go: update builtin function attributes
[official-gcc.git] / gcc / config / m32c / m32c.cc
blobc853c963fd61895a24c0445049a8bacfbac6a0fe
1 /* Target Code for R8C/M16C/M32C
2 Copyright (C) 2005-2025 Free Software Foundation, Inc.
3 Contributed by Red Hat.
5 This file is part of GCC.
7 GCC is free software; you can redistribute it and/or modify it
8 under the terms of the GNU General Public License as published
9 by the Free Software Foundation; either version 3, or (at your
10 option) any later version.
12 GCC is distributed in the hope that it will be useful, but WITHOUT
13 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
14 or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public
15 License for more details.
17 You should have received a copy of the GNU General Public License
18 along with GCC; see the file COPYING3. If not see
19 <http://www.gnu.org/licenses/>. */
21 #define IN_TARGET_CODE 1
23 #include "config.h"
24 #include "system.h"
25 #include "coretypes.h"
26 #include "backend.h"
27 #include "target.h"
28 #include "rtl.h"
29 #include "tree.h"
30 #include "stringpool.h"
31 #include "attribs.h"
32 #include "df.h"
33 #include "memmodel.h"
34 #include "tm_p.h"
35 #include "optabs.h"
36 #include "regs.h"
37 #include "emit-rtl.h"
38 #include "recog.h"
39 #include "diagnostic-core.h"
40 #include "output.h"
41 #include "insn-attr.h"
42 #include "flags.h"
43 #include "reload.h"
44 #include "stor-layout.h"
45 #include "varasm.h"
46 #include "calls.h"
47 #include "explow.h"
48 #include "expr.h"
49 #include "tm-constrs.h"
50 #include "builtins.h"
51 #include "opts.h"
53 /* This file should be included last. */
54 #include "target-def.h"
56 /* Prototypes */
58 /* Used by m32c_pushm_popm. */
59 typedef enum
61 PP_pushm,
62 PP_popm,
63 PP_justcount
64 } Push_Pop_Type;
66 static bool m32c_function_needs_enter (void);
67 static tree interrupt_handler (tree *, tree, tree, int, bool *);
68 static tree function_vector_handler (tree *, tree, tree, int, bool *);
69 static int interrupt_p (tree node);
70 static int bank_switch_p (tree node);
71 static int fast_interrupt_p (tree node);
72 static int interrupt_p (tree node);
73 static bool m32c_asm_integer (rtx, unsigned int, int);
74 static int m32c_comp_type_attributes (const_tree, const_tree);
75 static bool m32c_fixed_condition_code_regs (unsigned int *, unsigned int *);
76 static struct machine_function *m32c_init_machine_status (void);
77 static void m32c_insert_attributes (tree, tree *);
78 static bool m32c_legitimate_address_p (machine_mode, rtx, bool,
79 code_helper = ERROR_MARK);
80 static bool m32c_addr_space_legitimate_address_p (machine_mode, rtx, bool,
81 addr_space_t,
82 code_helper = ERROR_MARK);
83 static rtx m32c_function_arg (cumulative_args_t, const function_arg_info &);
84 static bool m32c_pass_by_reference (cumulative_args_t,
85 const function_arg_info &);
86 static void m32c_function_arg_advance (cumulative_args_t,
87 const function_arg_info &);
88 static unsigned int m32c_function_arg_boundary (machine_mode, const_tree);
89 static int m32c_pushm_popm (Push_Pop_Type);
90 static bool m32c_strict_argument_naming (cumulative_args_t);
91 static rtx m32c_struct_value_rtx (tree, int);
92 static rtx m32c_subreg (machine_mode, rtx, machine_mode, int);
93 static int need_to_save (int);
94 static rtx m32c_function_value (const_tree, const_tree, bool);
95 static rtx m32c_libcall_value (machine_mode, const_rtx);
97 /* Returns true if an address is specified, else false. */
98 static bool m32c_get_pragma_address (const char *varname, unsigned *addr);
100 static bool m32c_hard_regno_mode_ok (unsigned int, machine_mode);
102 #define SYMBOL_FLAG_FUNCVEC_FUNCTION (SYMBOL_FLAG_MACH_DEP << 0)
104 #define streq(a,b) (strcmp ((a), (b)) == 0)
106 /* Internal support routines */
108 /* Debugging statements are tagged with DEBUG0 only so that they can
109 be easily enabled individually, by replacing the '0' with '1' as
110 needed. */
111 #define DEBUG0 0
112 #define DEBUG1 1
114 #if DEBUG0
115 #include "print-tree.h"
116 /* This is needed by some of the commented-out debug statements
117 below. */
118 static char const *class_names[LIM_REG_CLASSES] = REG_CLASS_NAMES;
119 #endif
120 static int class_contents[LIM_REG_CLASSES][1] = REG_CLASS_CONTENTS;
122 /* These are all to support encode_pattern(). */
123 static char pattern[30], *patternp;
124 static GTY(()) rtx patternr[30];
125 #define RTX_IS(x) (streq (pattern, x))
127 /* Some macros to simplify the logic throughout this file. */
128 #define IS_MEM_REGNO(regno) ((regno) >= MEM0_REGNO && (regno) <= MEM7_REGNO)
129 #define IS_MEM_REG(rtx) (GET_CODE (rtx) == REG && IS_MEM_REGNO (REGNO (rtx)))
131 #define IS_CR_REGNO(regno) ((regno) >= SB_REGNO && (regno) <= PC_REGNO)
132 #define IS_CR_REG(rtx) (GET_CODE (rtx) == REG && IS_CR_REGNO (REGNO (rtx)))
134 static int
135 far_addr_space_p (rtx x)
137 if (GET_CODE (x) != MEM)
138 return 0;
139 #if DEBUG0
140 fprintf(stderr, "\033[35mfar_addr_space: "); debug_rtx(x);
141 fprintf(stderr, " = %d\033[0m\n", MEM_ADDR_SPACE (x) == ADDR_SPACE_FAR);
142 #endif
143 return MEM_ADDR_SPACE (x) == ADDR_SPACE_FAR;
146 /* We do most RTX matching by converting the RTX into a string, and
147 using string compares. This vastly simplifies the logic in many of
148 the functions in this file.
150 On exit, pattern[] has the encoded string (use RTX_IS("...") to
151 compare it) and patternr[] has pointers to the nodes in the RTX
152 corresponding to each character in the encoded string. The latter
153 is mostly used by print_operand().
155 Unrecognized patterns have '?' in them; this shows up when the
156 assembler complains about syntax errors.
159 static void
160 encode_pattern_1 (rtx x)
162 int i;
164 if (patternp == pattern + sizeof (pattern) - 2)
166 patternp[-1] = '?';
167 return;
170 patternr[patternp - pattern] = x;
172 switch (GET_CODE (x))
174 case REG:
175 *patternp++ = 'r';
176 break;
177 case SUBREG:
178 if (GET_MODE_SIZE (GET_MODE (x)) !=
179 GET_MODE_SIZE (GET_MODE (XEXP (x, 0))))
180 *patternp++ = 'S';
181 if (GET_MODE (x) == PSImode
182 && GET_CODE (XEXP (x, 0)) == REG)
183 *patternp++ = 'S';
184 encode_pattern_1 (XEXP (x, 0));
185 break;
186 case MEM:
187 *patternp++ = 'm';
188 /* FALLTHRU */
189 case CONST:
190 encode_pattern_1 (XEXP (x, 0));
191 break;
192 case SIGN_EXTEND:
193 *patternp++ = '^';
194 *patternp++ = 'S';
195 encode_pattern_1 (XEXP (x, 0));
196 break;
197 case ZERO_EXTEND:
198 *patternp++ = '^';
199 *patternp++ = 'Z';
200 encode_pattern_1 (XEXP (x, 0));
201 break;
202 case PLUS:
203 *patternp++ = '+';
204 encode_pattern_1 (XEXP (x, 0));
205 encode_pattern_1 (XEXP (x, 1));
206 break;
207 case PRE_DEC:
208 *patternp++ = '>';
209 encode_pattern_1 (XEXP (x, 0));
210 break;
211 case POST_INC:
212 *patternp++ = '<';
213 encode_pattern_1 (XEXP (x, 0));
214 break;
215 case LO_SUM:
216 *patternp++ = 'L';
217 encode_pattern_1 (XEXP (x, 0));
218 encode_pattern_1 (XEXP (x, 1));
219 break;
220 case HIGH:
221 *patternp++ = 'H';
222 encode_pattern_1 (XEXP (x, 0));
223 break;
224 case SYMBOL_REF:
225 *patternp++ = 's';
226 break;
227 case LABEL_REF:
228 *patternp++ = 'l';
229 break;
230 case CODE_LABEL:
231 *patternp++ = 'c';
232 break;
233 case CONST_INT:
234 case CONST_DOUBLE:
235 *patternp++ = 'i';
236 break;
237 case UNSPEC:
238 *patternp++ = 'u';
239 *patternp++ = '0' + XCINT (x, 1, UNSPEC);
240 for (i = 0; i < XVECLEN (x, 0); i++)
241 encode_pattern_1 (XVECEXP (x, 0, i));
242 break;
243 case USE:
244 *patternp++ = 'U';
245 break;
246 case PARALLEL:
247 *patternp++ = '|';
248 for (i = 0; i < XVECLEN (x, 0); i++)
249 encode_pattern_1 (XVECEXP (x, 0, i));
250 break;
251 case EXPR_LIST:
252 *patternp++ = 'E';
253 encode_pattern_1 (XEXP (x, 0));
254 if (XEXP (x, 1))
255 encode_pattern_1 (XEXP (x, 1));
256 break;
257 default:
258 *patternp++ = '?';
259 #if DEBUG0
260 fprintf (stderr, "can't encode pattern %s\n",
261 GET_RTX_NAME (GET_CODE (x)));
262 debug_rtx (x);
263 #endif
264 break;
268 static void
269 encode_pattern (rtx x)
271 patternp = pattern;
272 encode_pattern_1 (x);
273 *patternp = 0;
276 /* Since register names indicate the mode they're used in, we need a
277 way to determine which name to refer to the register with. Called
278 by print_operand(). */
280 static const char *
281 reg_name_with_mode (int regno, machine_mode mode)
283 int mlen = GET_MODE_SIZE (mode);
284 if (regno == R0_REGNO && mlen == 1)
285 return "r0l";
286 if (regno == R0_REGNO && (mlen == 3 || mlen == 4))
287 return "r2r0";
288 if (regno == R0_REGNO && mlen == 6)
289 return "r2r1r0";
290 if (regno == R0_REGNO && mlen == 8)
291 return "r3r1r2r0";
292 if (regno == R1_REGNO && mlen == 1)
293 return "r1l";
294 if (regno == R1_REGNO && (mlen == 3 || mlen == 4))
295 return "r3r1";
296 if (regno == A0_REGNO && TARGET_A16 && (mlen == 3 || mlen == 4))
297 return "a1a0";
298 return reg_names[regno];
301 /* How many bytes a register uses on stack when it's pushed. We need
302 to know this because the push opcode needs to explicitly indicate
303 the size of the register, even though the name of the register
304 already tells it that. Used by m32c_output_reg_{push,pop}, which
305 is only used through calls to ASM_OUTPUT_REG_{PUSH,POP}. */
307 static int
308 reg_push_size (int regno)
310 switch (regno)
312 case R0_REGNO:
313 case R1_REGNO:
314 return 2;
315 case R2_REGNO:
316 case R3_REGNO:
317 case FLG_REGNO:
318 return 2;
319 case A0_REGNO:
320 case A1_REGNO:
321 case SB_REGNO:
322 case FB_REGNO:
323 case SP_REGNO:
324 if (TARGET_A16)
325 return 2;
326 else
327 return 3;
328 default:
329 gcc_unreachable ();
333 /* Given two register classes, find the largest intersection between
334 them. If there is no intersection, return RETURNED_IF_EMPTY
335 instead. */
336 static reg_class_t
337 reduce_class (reg_class_t original_class, reg_class_t limiting_class,
338 reg_class_t returned_if_empty)
340 HARD_REG_SET cc;
341 int i;
342 reg_class_t best = NO_REGS;
343 unsigned int best_size = 0;
345 if (original_class == limiting_class)
346 return original_class;
348 cc = reg_class_contents[original_class] & reg_class_contents[limiting_class];
350 for (i = 0; i < LIM_REG_CLASSES; i++)
352 if (hard_reg_set_subset_p (reg_class_contents[i], cc))
353 if (best_size < reg_class_size[i])
355 best = (reg_class_t) i;
356 best_size = reg_class_size[i];
360 if (best == NO_REGS)
361 return returned_if_empty;
362 return best;
365 /* Used by m32c_register_move_cost to determine if a move is
366 impossibly expensive. */
367 static bool
368 class_can_hold_mode (reg_class_t rclass, machine_mode mode)
370 /* Cache the results: 0=untested 1=no 2=yes */
371 static char results[LIM_REG_CLASSES][MAX_MACHINE_MODE];
373 if (results[(int) rclass][mode] == 0)
375 int r;
376 results[rclass][mode] = 1;
377 for (r = 0; r < FIRST_PSEUDO_REGISTER; r++)
378 if (in_hard_reg_set_p (reg_class_contents[(int) rclass], mode, r)
379 && m32c_hard_regno_mode_ok (r, mode))
381 results[rclass][mode] = 2;
382 break;
386 #if DEBUG0
387 fprintf (stderr, "class %s can hold %s? %s\n",
388 class_names[(int) rclass], mode_name[mode],
389 (results[rclass][mode] == 2) ? "yes" : "no");
390 #endif
391 return results[(int) rclass][mode] == 2;
394 /* Run-time Target Specification. */
396 /* Memregs are memory locations that gcc treats like general
397 registers, as there are a limited number of true registers and the
398 m32c families can use memory in most places that registers can be
399 used.
401 However, since memory accesses are more expensive than registers,
402 we allow the user to limit the number of memregs available, in
403 order to try to persuade gcc to try harder to use real registers.
405 Memregs are provided by lib1funcs.S.
408 int ok_to_change_target_memregs = TRUE;
410 /* Implements TARGET_OPTION_OVERRIDE. */
412 #undef TARGET_OPTION_OVERRIDE
413 #define TARGET_OPTION_OVERRIDE m32c_option_override
415 static void
416 m32c_option_override (void)
418 /* We limit memregs to 0..16, and provide a default. */
419 if (OPTION_SET_P (target_memregs))
421 if (target_memregs < 0 || target_memregs > 16)
422 error ("invalid target memregs value %<%d%>", target_memregs);
424 else
425 target_memregs = 16;
427 if (TARGET_A24)
428 flag_ivopts = 0;
430 /* This target defaults to strict volatile bitfields. */
431 if (flag_strict_volatile_bitfields < 0 && abi_version_at_least(2))
432 flag_strict_volatile_bitfields = 1;
434 /* r8c/m16c have no 16-bit indirect call, so thunks are involved.
435 This is always worse than an absolute call. */
436 if (TARGET_A16)
437 flag_no_function_cse = 1;
439 /* This wants to put insns between compares and their jumps. */
440 /* FIXME: The right solution is to properly trace the flags register
441 values, but that is too much work for stage 4. */
442 flag_combine_stack_adjustments = 0;
445 #undef TARGET_OVERRIDE_OPTIONS_AFTER_CHANGE
446 #define TARGET_OVERRIDE_OPTIONS_AFTER_CHANGE m32c_override_options_after_change
448 static void
449 m32c_override_options_after_change (void)
451 if (TARGET_A16)
452 flag_no_function_cse = 1;
455 /* Defining data structures for per-function information */
457 /* The usual; we set up our machine_function data. */
458 static struct machine_function *
459 m32c_init_machine_status (void)
461 return ggc_cleared_alloc<machine_function> ();
464 /* Implements INIT_EXPANDERS. We just set up to call the above
465 function. */
466 void
467 m32c_init_expanders (void)
469 init_machine_status = m32c_init_machine_status;
472 /* Storage Layout */
474 /* Register Basics */
476 /* Basic Characteristics of Registers */
478 /* Whether a mode fits in a register is complex enough to warrant a
479 table. */
480 static struct
482 char qi_regs;
483 char hi_regs;
484 char pi_regs;
485 char si_regs;
486 char di_regs;
487 } nregs_table[FIRST_PSEUDO_REGISTER] =
489 { 1, 1, 2, 2, 4 }, /* r0 */
490 { 0, 1, 0, 0, 0 }, /* r2 */
491 { 1, 1, 2, 2, 0 }, /* r1 */
492 { 0, 1, 0, 0, 0 }, /* r3 */
493 { 0, 1, 1, 0, 0 }, /* a0 */
494 { 0, 1, 1, 0, 0 }, /* a1 */
495 { 0, 1, 1, 0, 0 }, /* sb */
496 { 0, 1, 1, 0, 0 }, /* fb */
497 { 0, 1, 1, 0, 0 }, /* sp */
498 { 1, 1, 1, 0, 0 }, /* pc */
499 { 0, 0, 0, 0, 0 }, /* fl */
500 { 1, 1, 1, 0, 0 }, /* ap */
501 { 1, 1, 2, 2, 4 }, /* mem0 */
502 { 1, 1, 2, 2, 4 }, /* mem1 */
503 { 1, 1, 2, 2, 4 }, /* mem2 */
504 { 1, 1, 2, 2, 4 }, /* mem3 */
505 { 1, 1, 2, 2, 4 }, /* mem4 */
506 { 1, 1, 2, 2, 0 }, /* mem5 */
507 { 1, 1, 2, 2, 0 }, /* mem6 */
508 { 1, 1, 0, 0, 0 }, /* mem7 */
511 /* Implements TARGET_CONDITIONAL_REGISTER_USAGE. We adjust the number
512 of available memregs, and select which registers need to be preserved
513 across calls based on the chip family. */
515 #undef TARGET_CONDITIONAL_REGISTER_USAGE
516 #define TARGET_CONDITIONAL_REGISTER_USAGE m32c_conditional_register_usage
517 void
518 m32c_conditional_register_usage (void)
520 int i;
522 if (target_memregs >= 0 && target_memregs <= 16)
524 /* The command line option is bytes, but our "registers" are
525 16-bit words. */
526 for (i = (target_memregs+1)/2; i < 8; i++)
528 fixed_regs[MEM0_REGNO + i] = 1;
529 CLEAR_HARD_REG_BIT (reg_class_contents[MEM_REGS], MEM0_REGNO + i);
533 /* M32CM and M32C preserve more registers across function calls. */
534 if (TARGET_A24)
536 call_used_regs[R1_REGNO] = 0;
537 call_used_regs[R2_REGNO] = 0;
538 call_used_regs[R3_REGNO] = 0;
539 call_used_regs[A0_REGNO] = 0;
540 call_used_regs[A1_REGNO] = 0;
544 /* How Values Fit in Registers */
546 /* Implements TARGET_HARD_REGNO_NREGS. This is complicated by the fact that
547 different registers are different sizes from each other, *and* may
548 be different sizes in different chip families. */
549 static unsigned int
550 m32c_hard_regno_nregs_1 (unsigned int regno, machine_mode mode)
552 if (regno == FLG_REGNO && mode == CCmode)
553 return 1;
554 if (regno >= FIRST_PSEUDO_REGISTER)
555 return ((GET_MODE_SIZE (mode) + UNITS_PER_WORD - 1) / UNITS_PER_WORD);
557 if (regno >= MEM0_REGNO && regno <= MEM7_REGNO)
558 return (GET_MODE_SIZE (mode) + 1) / 2;
560 if (GET_MODE_SIZE (mode) <= 1)
561 return nregs_table[regno].qi_regs;
562 if (GET_MODE_SIZE (mode) <= 2)
563 return nregs_table[regno].hi_regs;
564 if (regno == A0_REGNO && mode == SImode && TARGET_A16)
565 return 2;
566 if ((GET_MODE_SIZE (mode) <= 3 || mode == PSImode) && TARGET_A24)
567 return nregs_table[regno].pi_regs;
568 if (GET_MODE_SIZE (mode) <= 4)
569 return nregs_table[regno].si_regs;
570 if (GET_MODE_SIZE (mode) <= 8)
571 return nregs_table[regno].di_regs;
572 return 0;
575 static unsigned int
576 m32c_hard_regno_nregs (unsigned int regno, machine_mode mode)
578 unsigned int rv = m32c_hard_regno_nregs_1 (regno, mode);
579 return rv ? rv : 1;
582 /* Implement TARGET_HARD_REGNO_MODE_OK. The above function does the work
583 already; just test its return value. */
584 static bool
585 m32c_hard_regno_mode_ok (unsigned int regno, machine_mode mode)
587 return m32c_hard_regno_nregs_1 (regno, mode) != 0;
590 /* Implement TARGET_MODES_TIEABLE_P. In general, modes aren't tieable since
591 registers are all different sizes. However, since most modes are
592 bigger than our registers anyway, it's easier to implement this
593 function that way, leaving QImode as the only unique case. */
594 static bool
595 m32c_modes_tieable_p (machine_mode m1, machine_mode m2)
597 if (GET_MODE_SIZE (m1) == GET_MODE_SIZE (m2))
598 return 1;
600 #if 0
601 if (m1 == QImode || m2 == QImode)
602 return 0;
603 #endif
605 return 1;
608 /* Register Classes */
610 /* Implements REGNO_REG_CLASS. */
611 enum reg_class
612 m32c_regno_reg_class (int regno)
614 switch (regno)
616 case R0_REGNO:
617 return R0_REGS;
618 case R1_REGNO:
619 return R1_REGS;
620 case R2_REGNO:
621 return R2_REGS;
622 case R3_REGNO:
623 return R3_REGS;
624 case A0_REGNO:
625 return A0_REGS;
626 case A1_REGNO:
627 return A1_REGS;
628 case SB_REGNO:
629 return SB_REGS;
630 case FB_REGNO:
631 return FB_REGS;
632 case SP_REGNO:
633 return SP_REGS;
634 case FLG_REGNO:
635 return FLG_REGS;
636 default:
637 if (IS_MEM_REGNO (regno))
638 return MEM_REGS;
639 return ALL_REGS;
643 /* Implements REGNO_OK_FOR_BASE_P. */
645 m32c_regno_ok_for_base_p (int regno)
647 if (regno == A0_REGNO
648 || regno == A1_REGNO || regno >= FIRST_PSEUDO_REGISTER)
649 return 1;
650 return 0;
653 /* Implements TARGET_PREFERRED_RELOAD_CLASS. In general, prefer general
654 registers of the appropriate size. */
656 #undef TARGET_PREFERRED_RELOAD_CLASS
657 #define TARGET_PREFERRED_RELOAD_CLASS m32c_preferred_reload_class
659 static reg_class_t
660 m32c_preferred_reload_class (rtx x, reg_class_t rclass)
662 reg_class_t newclass = rclass;
664 #if DEBUG0
665 fprintf (stderr, "\npreferred_reload_class for %s is ",
666 class_names[rclass]);
667 #endif
668 if (rclass == NO_REGS)
669 rclass = GET_MODE (x) == QImode ? HL_REGS : R03_REGS;
671 if (reg_classes_intersect_p (rclass, CR_REGS))
673 switch (GET_MODE (x))
675 case E_QImode:
676 newclass = HL_REGS;
677 break;
678 default:
679 /* newclass = HI_REGS; */
680 break;
684 else if (newclass == QI_REGS && GET_MODE_SIZE (GET_MODE (x)) > 2)
685 newclass = SI_REGS;
686 else if (GET_MODE_SIZE (GET_MODE (x)) > 4
687 && ! reg_class_subset_p (R03_REGS, rclass))
688 newclass = DI_REGS;
690 rclass = reduce_class (rclass, newclass, rclass);
692 if (GET_MODE (x) == QImode)
693 rclass = reduce_class (rclass, HL_REGS, rclass);
695 #if DEBUG0
696 fprintf (stderr, "%s\n", class_names[rclass]);
697 debug_rtx (x);
699 if (GET_CODE (x) == MEM
700 && GET_CODE (XEXP (x, 0)) == PLUS
701 && GET_CODE (XEXP (XEXP (x, 0), 0)) == PLUS)
702 fprintf (stderr, "Glorm!\n");
703 #endif
704 return rclass;
707 /* Implements TARGET_PREFERRED_OUTPUT_RELOAD_CLASS. */
709 #undef TARGET_PREFERRED_OUTPUT_RELOAD_CLASS
710 #define TARGET_PREFERRED_OUTPUT_RELOAD_CLASS m32c_preferred_output_reload_class
712 static reg_class_t
713 m32c_preferred_output_reload_class (rtx x, reg_class_t rclass)
715 return m32c_preferred_reload_class (x, rclass);
718 /* Implements LIMIT_RELOAD_CLASS. We basically want to avoid using
719 address registers for reloads since they're needed for address
720 reloads. */
722 m32c_limit_reload_class (machine_mode mode, int rclass)
724 #if DEBUG0
725 fprintf (stderr, "limit_reload_class for %s: %s ->",
726 mode_name[mode], class_names[rclass]);
727 #endif
729 if (mode == QImode)
730 rclass = reduce_class (rclass, HL_REGS, rclass);
731 else if (mode == HImode)
732 rclass = reduce_class (rclass, HI_REGS, rclass);
733 else if (mode == SImode)
734 rclass = reduce_class (rclass, SI_REGS, rclass);
736 if (rclass != A_REGS)
737 rclass = reduce_class (rclass, DI_REGS, rclass);
739 #if DEBUG0
740 fprintf (stderr, " %s\n", class_names[rclass]);
741 #endif
742 return rclass;
745 /* Implements SECONDARY_RELOAD_CLASS. QImode have to be reloaded in
746 r0 or r1, as those are the only real QImode registers. CR regs get
747 reloaded through appropriately sized general or address
748 registers. */
750 m32c_secondary_reload_class (int rclass, machine_mode mode, rtx x)
752 int cc = class_contents[rclass][0];
753 #if DEBUG0
754 fprintf (stderr, "\nsecondary reload class %s %s\n",
755 class_names[rclass], mode_name[mode]);
756 debug_rtx (x);
757 #endif
758 if (mode == QImode
759 && GET_CODE (x) == MEM && (cc & ~class_contents[R23_REGS][0]) == 0)
760 return QI_REGS;
761 if (reg_classes_intersect_p (rclass, CR_REGS)
762 && GET_CODE (x) == REG
763 && REGNO (x) >= SB_REGNO && REGNO (x) <= SP_REGNO)
764 return (TARGET_A16 || mode == HImode) ? HI_REGS : A_REGS;
765 return NO_REGS;
768 /* Implements TARGET_CLASS_LIKELY_SPILLED_P. A_REGS is needed for address
769 reloads. */
771 #undef TARGET_CLASS_LIKELY_SPILLED_P
772 #define TARGET_CLASS_LIKELY_SPILLED_P m32c_class_likely_spilled_p
774 static bool
775 m32c_class_likely_spilled_p (reg_class_t regclass)
777 if (regclass == A_REGS)
778 return true;
780 return (reg_class_size[(int) regclass] == 1);
783 /* Implements TARGET_CLASS_MAX_NREGS. We calculate this according to its
784 documented meaning, to avoid potential inconsistencies with actual
785 class definitions. */
787 #undef TARGET_CLASS_MAX_NREGS
788 #define TARGET_CLASS_MAX_NREGS m32c_class_max_nregs
790 static unsigned char
791 m32c_class_max_nregs (reg_class_t regclass, machine_mode mode)
793 int rn;
794 unsigned char max = 0;
796 for (rn = 0; rn < FIRST_PSEUDO_REGISTER; rn++)
797 if (TEST_HARD_REG_BIT (reg_class_contents[(int) regclass], rn))
799 unsigned char n = m32c_hard_regno_nregs (rn, mode);
800 if (max < n)
801 max = n;
803 return max;
806 /* Implements TARGET_CAN_CHANGE_MODE_CLASS. Only r0 and r1 can change to
807 QI (r0l, r1l) because the chip doesn't support QI ops on other
808 registers (well, it does on a0/a1 but if we let gcc do that, reload
809 suffers). Otherwise, we allow changes to larger modes. */
810 static bool
811 m32c_can_change_mode_class (machine_mode from,
812 machine_mode to, reg_class_t rclass)
814 int rn;
815 #if DEBUG0
816 fprintf (stderr, "can change from %s to %s in %s\n",
817 mode_name[from], mode_name[to], class_names[rclass]);
818 #endif
820 /* If the larger mode isn't allowed in any of these registers, we
821 can't allow the change. */
822 for (rn = 0; rn < FIRST_PSEUDO_REGISTER; rn++)
823 if (class_contents[rclass][0] & (1 << rn))
824 if (! m32c_hard_regno_mode_ok (rn, to))
825 return false;
827 if (to == QImode)
828 return (class_contents[rclass][0] & 0x1ffa) == 0;
830 if (class_contents[rclass][0] & 0x0005 /* r0, r1 */
831 && GET_MODE_SIZE (from) > 1)
832 return true;
833 if (GET_MODE_SIZE (from) > 2) /* all other regs */
834 return true;
836 return false;
839 /* Helpers for the rest of the file. */
840 /* TRUE if the rtx is a REG rtx for the given register. */
841 #define IS_REG(rtx,regno) (GET_CODE (rtx) == REG \
842 && REGNO (rtx) == regno)
843 /* TRUE if the rtx is a pseudo - specifically, one we can use as a
844 base register in address calculations (hence the "strict"
845 argument). */
846 #define IS_PSEUDO(rtx,strict) (!strict && GET_CODE (rtx) == REG \
847 && (REGNO (rtx) == AP_REGNO \
848 || REGNO (rtx) >= FIRST_PSEUDO_REGISTER))
850 #define A0_OR_PSEUDO(x) (IS_REG(x, A0_REGNO) || REGNO (x) >= FIRST_PSEUDO_REGISTER)
852 /* Implements matching for constraints (see next function too). 'S' is
853 for memory constraints, plus "Rpa" for PARALLEL rtx's we use for
854 call return values. */
855 bool
856 m32c_matches_constraint_p (rtx value, int constraint)
858 encode_pattern (value);
860 switch (constraint) {
861 case CONSTRAINT_SF:
862 return (far_addr_space_p (value)
863 && ((RTX_IS ("mr")
864 && A0_OR_PSEUDO (patternr[1])
865 && GET_MODE (patternr[1]) == SImode)
866 || (RTX_IS ("m+^Sri")
867 && A0_OR_PSEUDO (patternr[4])
868 && GET_MODE (patternr[4]) == HImode)
869 || (RTX_IS ("m+^Srs")
870 && A0_OR_PSEUDO (patternr[4])
871 && GET_MODE (patternr[4]) == HImode)
872 || (RTX_IS ("m+^S+ris")
873 && A0_OR_PSEUDO (patternr[5])
874 && GET_MODE (patternr[5]) == HImode)
875 || RTX_IS ("ms")));
876 case CONSTRAINT_Sd:
878 /* This is the common "src/dest" address */
879 rtx r;
880 if (GET_CODE (value) == MEM && CONSTANT_P (XEXP (value, 0)))
881 return true;
882 if (RTX_IS ("ms") || RTX_IS ("m+si"))
883 return true;
884 if (RTX_IS ("m++rii"))
886 if (REGNO (patternr[3]) == FB_REGNO
887 && INTVAL (patternr[4]) == 0)
888 return true;
890 if (RTX_IS ("mr"))
891 r = patternr[1];
892 else if (RTX_IS ("m+ri") || RTX_IS ("m+rs") || RTX_IS ("m+r+si"))
893 r = patternr[2];
894 else
895 return false;
896 if (REGNO (r) == SP_REGNO)
897 return false;
898 return m32c_legitimate_address_p (GET_MODE (value), XEXP (value, 0), 1);
900 case CONSTRAINT_Sa:
902 rtx r;
903 if (RTX_IS ("mr"))
904 r = patternr[1];
905 else if (RTX_IS ("m+ri"))
906 r = patternr[2];
907 else
908 return false;
909 return (IS_REG (r, A0_REGNO) || IS_REG (r, A1_REGNO));
911 case CONSTRAINT_Si:
912 return (RTX_IS ("mi") || RTX_IS ("ms") || RTX_IS ("m+si"));
913 case CONSTRAINT_Ss:
914 return ((RTX_IS ("mr")
915 && (IS_REG (patternr[1], SP_REGNO)))
916 || (RTX_IS ("m+ri") && (IS_REG (patternr[2], SP_REGNO))));
917 case CONSTRAINT_Sf:
918 return ((RTX_IS ("mr")
919 && (IS_REG (patternr[1], FB_REGNO)))
920 || (RTX_IS ("m+ri") && (IS_REG (patternr[2], FB_REGNO))));
921 case CONSTRAINT_Sb:
922 return ((RTX_IS ("mr")
923 && (IS_REG (patternr[1], SB_REGNO)))
924 || (RTX_IS ("m+ri") && (IS_REG (patternr[2], SB_REGNO))));
925 case CONSTRAINT_Sp:
926 /* Absolute addresses 0..0x1fff used for bit addressing (I/O ports) */
927 return (RTX_IS ("mi")
928 && !(INTVAL (patternr[1]) & ~0x1fff));
929 case CONSTRAINT_S1:
930 return r1h_operand (value, QImode);
931 case CONSTRAINT_Rpa:
932 return GET_CODE (value) == PARALLEL;
933 default:
934 return false;
938 /* STACK AND CALLING */
940 /* Frame Layout */
942 /* Implements RETURN_ADDR_RTX. Note that R8C and M16C push 24 bits
943 (yes, THREE bytes) onto the stack for the return address, but we
944 don't support pointers bigger than 16 bits on those chips. This
945 will likely wreak havoc with exception unwinding. FIXME. */
947 m32c_return_addr_rtx (int count)
949 machine_mode mode;
950 int offset;
951 rtx ra_mem;
953 if (count)
954 return NULL_RTX;
955 /* we want 2[$fb] */
957 if (TARGET_A24)
959 /* It's four bytes */
960 mode = PSImode;
961 offset = 4;
963 else
965 /* FIXME: it's really 3 bytes */
966 mode = HImode;
967 offset = 2;
970 ra_mem =
971 gen_rtx_MEM (mode, plus_constant (Pmode, gen_rtx_REG (Pmode, FP_REGNO),
972 offset));
973 return copy_to_mode_reg (mode, ra_mem);
976 /* Implements INCOMING_RETURN_ADDR_RTX. See comment above. */
978 m32c_incoming_return_addr_rtx (void)
980 /* we want [sp] */
981 return gen_rtx_MEM (PSImode, gen_rtx_REG (PSImode, SP_REGNO));
984 /* Exception Handling Support */
986 /* Implements EH_RETURN_DATA_REGNO. Choose registers able to hold
987 pointers. */
989 m32c_eh_return_data_regno (int n)
991 switch (n)
993 case 0:
994 return MEM0_REGNO;
995 case 1:
996 return MEM0_REGNO+4;
997 default:
998 return INVALID_REGNUM;
1002 /* Implements EH_RETURN_STACKADJ_RTX. Saved and used later in
1003 m32c_emit_eh_epilogue. */
1005 m32c_eh_return_stackadj_rtx (void)
1007 if (!cfun->machine->eh_stack_adjust)
1009 rtx sa;
1011 sa = gen_rtx_REG (Pmode, R0_REGNO);
1012 cfun->machine->eh_stack_adjust = sa;
1014 return cfun->machine->eh_stack_adjust;
1017 /* Registers That Address the Stack Frame */
1019 /* Implements DWARF_FRAME_REGNUM and DEBUGGER_REGNO. Note that
1020 the original spec called for dwarf numbers to vary with register
1021 width as well, for example, r0l, r0, and r2r0 would each have
1022 different dwarf numbers. GCC doesn't support this, and we don't do
1023 it, and gdb seems to like it this way anyway. */
1024 unsigned int
1025 m32c_dwarf_frame_regnum (int n)
1027 switch (n)
1029 case R0_REGNO:
1030 return 5;
1031 case R1_REGNO:
1032 return 6;
1033 case R2_REGNO:
1034 return 7;
1035 case R3_REGNO:
1036 return 8;
1037 case A0_REGNO:
1038 return 9;
1039 case A1_REGNO:
1040 return 10;
1041 case FB_REGNO:
1042 return 11;
1043 case SB_REGNO:
1044 return 19;
1046 case SP_REGNO:
1047 return 12;
1048 case PC_REGNO:
1049 return 13;
1050 default:
1051 return DWARF_FRAME_REGISTERS + 1;
1055 /* The frame looks like this:
1057 ap -> +------------------------------
1058 | Return address (3 or 4 bytes)
1059 | Saved FB (2 or 4 bytes)
1060 fb -> +------------------------------
1061 | local vars
1062 | register saves fb
1063 | through r0 as needed
1064 sp -> +------------------------------
1067 /* We use this to wrap all emitted insns in the prologue. */
1068 static rtx
1069 F (rtx x)
1071 RTX_FRAME_RELATED_P (x) = 1;
1072 return x;
1075 /* This maps register numbers to the PUSHM/POPM bitfield, and tells us
1076 how much the stack pointer moves for each, for each cpu family. */
1077 static struct
1079 int reg1;
1080 int bit;
1081 int a16_bytes;
1082 int a24_bytes;
1083 } pushm_info[] =
1085 /* These are in reverse push (nearest-to-sp) order. */
1086 { R0_REGNO, 0x80, 2, 2 },
1087 { R1_REGNO, 0x40, 2, 2 },
1088 { R2_REGNO, 0x20, 2, 2 },
1089 { R3_REGNO, 0x10, 2, 2 },
1090 { A0_REGNO, 0x08, 2, 4 },
1091 { A1_REGNO, 0x04, 2, 4 },
1092 { SB_REGNO, 0x02, 2, 4 },
1093 { FB_REGNO, 0x01, 2, 4 }
1096 #define PUSHM_N (ARRAY_SIZE (pushm_info))
1098 /* Returns TRUE if we need to save/restore the given register. We
1099 save everything for exception handlers, so that any register can be
1100 unwound. For interrupt handlers, we save everything if the handler
1101 calls something else (because we don't know what *that* function
1102 might do), but try to be a bit smarter if the handler is a leaf
1103 function. We always save $a0, though, because we use that in the
1104 epilogue to copy $fb to $sp. */
1105 static int
1106 need_to_save (int regno)
1108 if (fixed_regs[regno])
1109 return 0;
1110 if (crtl->calls_eh_return)
1111 return 1;
1112 if (regno == FP_REGNO)
1113 return 0;
1114 if (cfun->machine->is_interrupt
1115 && (!cfun->machine->is_leaf
1116 || (regno == A0_REGNO
1117 && m32c_function_needs_enter ())
1119 return 1;
1120 if (df_regs_ever_live_p (regno)
1121 && (!call_used_or_fixed_reg_p (regno) || cfun->machine->is_interrupt))
1122 return 1;
1123 return 0;
1126 /* This function contains all the intelligence about saving and
1127 restoring registers. It always figures out the register save set.
1128 When called with PP_justcount, it merely returns the size of the
1129 save set (for eliminating the frame pointer, for example). When
1130 called with PP_pushm or PP_popm, it emits the appropriate
1131 instructions for saving (pushm) or restoring (popm) the
1132 registers. */
1133 static int
1134 m32c_pushm_popm (Push_Pop_Type ppt)
1136 int reg_mask = 0;
1137 int byte_count = 0, bytes;
1138 int i;
1139 rtx dwarf_set[PUSHM_N];
1140 int n_dwarfs = 0;
1141 int nosave_mask = 0;
1143 if (crtl->return_rtx
1144 && GET_CODE (crtl->return_rtx) == PARALLEL
1145 && !(crtl->calls_eh_return || cfun->machine->is_interrupt))
1147 rtx exp = XVECEXP (crtl->return_rtx, 0, 0);
1148 rtx rv = XEXP (exp, 0);
1149 int rv_bytes = GET_MODE_SIZE (GET_MODE (rv));
1151 if (rv_bytes > 2)
1152 nosave_mask |= 0x20; /* PSI, SI */
1153 else
1154 nosave_mask |= 0xf0; /* DF */
1155 if (rv_bytes > 4)
1156 nosave_mask |= 0x50; /* DI */
1159 for (i = 0; i < (int) PUSHM_N; i++)
1161 /* Skip if neither register needs saving. */
1162 if (!need_to_save (pushm_info[i].reg1))
1163 continue;
1165 if (pushm_info[i].bit & nosave_mask)
1166 continue;
1168 reg_mask |= pushm_info[i].bit;
1169 bytes = TARGET_A16 ? pushm_info[i].a16_bytes : pushm_info[i].a24_bytes;
1171 if (ppt == PP_pushm)
1173 machine_mode mode = (bytes == 2) ? HImode : SImode;
1174 rtx addr;
1176 /* Always use stack_pointer_rtx instead of calling
1177 rtx_gen_REG ourselves. Code elsewhere in GCC assumes
1178 that there is a single rtx representing the stack pointer,
1179 namely stack_pointer_rtx, and uses == to recognize it. */
1180 addr = stack_pointer_rtx;
1182 if (byte_count != 0)
1183 addr = gen_rtx_PLUS (GET_MODE (addr), addr, GEN_INT (byte_count));
1185 dwarf_set[n_dwarfs++] =
1186 gen_rtx_SET (gen_rtx_MEM (mode, addr),
1187 gen_rtx_REG (mode, pushm_info[i].reg1));
1188 F (dwarf_set[n_dwarfs - 1]);
1191 byte_count += bytes;
1194 if (cfun->machine->is_interrupt)
1196 cfun->machine->intr_pushm = reg_mask & 0xfe;
1197 reg_mask = 0;
1198 byte_count = 0;
1201 if (cfun->machine->is_interrupt)
1202 for (i = MEM0_REGNO; i <= MEM7_REGNO; i++)
1203 if (need_to_save (i))
1205 byte_count += 2;
1206 cfun->machine->intr_pushmem[i - MEM0_REGNO] = 1;
1209 if (ppt == PP_pushm && byte_count)
1211 rtx note = gen_rtx_SEQUENCE (VOIDmode, rtvec_alloc (n_dwarfs + 1));
1212 rtx pushm;
1214 if (reg_mask)
1216 XVECEXP (note, 0, 0)
1217 = gen_rtx_SET (stack_pointer_rtx,
1218 gen_rtx_PLUS (GET_MODE (stack_pointer_rtx),
1219 stack_pointer_rtx,
1220 GEN_INT (-byte_count)));
1221 F (XVECEXP (note, 0, 0));
1223 for (i = 0; i < n_dwarfs; i++)
1224 XVECEXP (note, 0, i + 1) = dwarf_set[i];
1226 pushm = F (emit_insn (gen_pushm (GEN_INT (reg_mask))));
1228 add_reg_note (pushm, REG_FRAME_RELATED_EXPR, note);
1231 if (cfun->machine->is_interrupt)
1232 for (i = MEM0_REGNO; i <= MEM7_REGNO; i++)
1233 if (cfun->machine->intr_pushmem[i - MEM0_REGNO])
1235 if (TARGET_A16)
1236 pushm = emit_insn (gen_pushhi_16 (gen_rtx_REG (HImode, i)));
1237 else
1238 pushm = emit_insn (gen_pushhi_24 (gen_rtx_REG (HImode, i)));
1239 F (pushm);
1242 if (ppt == PP_popm && byte_count)
1244 if (cfun->machine->is_interrupt)
1245 for (i = MEM7_REGNO; i >= MEM0_REGNO; i--)
1246 if (cfun->machine->intr_pushmem[i - MEM0_REGNO])
1248 if (TARGET_A16)
1249 emit_insn (gen_pophi_16 (gen_rtx_REG (HImode, i)));
1250 else
1251 emit_insn (gen_pophi_24 (gen_rtx_REG (HImode, i)));
1253 if (reg_mask)
1254 emit_insn (gen_popm (GEN_INT (reg_mask)));
1257 return byte_count;
1260 /* Implements INITIAL_ELIMINATION_OFFSET. See the comment above that
1261 diagrams our call frame. */
1263 m32c_initial_elimination_offset (int from, int to)
1265 int ofs = 0;
1267 if (from == AP_REGNO)
1269 if (TARGET_A16)
1270 ofs += 5;
1271 else
1272 ofs += 8;
1275 if (to == SP_REGNO)
1277 ofs += m32c_pushm_popm (PP_justcount);
1278 ofs += get_frame_size ();
1281 /* Account for push rounding. */
1282 if (TARGET_A24)
1283 ofs = (ofs + 1) & ~1;
1284 #if DEBUG0
1285 fprintf (stderr, "initial_elimination_offset from=%d to=%d, ofs=%d\n", from,
1286 to, ofs);
1287 #endif
1288 return ofs;
1291 /* Passing Function Arguments on the Stack */
1293 /* Implements PUSH_ROUNDING. The R8C and M16C have byte stacks, the
1294 M32C has word stacks. */
1295 poly_int64
1296 m32c_push_rounding (poly_int64 n)
1298 if (TARGET_R8C || TARGET_M16C)
1299 return n;
1300 return (n + 1) & ~1;
1303 #undef TARGET_PUSH_ARGUMENT
1304 #define TARGET_PUSH_ARGUMENT hook_bool_uint_true
1306 /* Passing Arguments in Registers */
1308 /* Implements TARGET_FUNCTION_ARG. Arguments are passed partly in
1309 registers, partly on stack. If our function returns a struct, a
1310 pointer to a buffer for it is at the top of the stack (last thing
1311 pushed). The first few real arguments may be in registers as
1312 follows:
1314 R8C/M16C: arg1 in r1 if it's QI or HI (else it's pushed on stack)
1315 arg2 in r2 if it's HI (else pushed on stack)
1316 rest on stack
1317 M32C: arg1 in r0 if it's QI or HI (else it's pushed on stack)
1318 rest on stack
1320 Structs are not passed in registers, even if they fit. Only
1321 integer and pointer types are passed in registers.
1323 Note that when arg1 doesn't fit in r1, arg2 may still be passed in
1324 r2 if it fits. */
1325 #undef TARGET_FUNCTION_ARG
1326 #define TARGET_FUNCTION_ARG m32c_function_arg
1327 static rtx
1328 m32c_function_arg (cumulative_args_t ca_v, const function_arg_info &arg)
1330 CUMULATIVE_ARGS *ca = get_cumulative_args (ca_v);
1332 /* Can return a reg, parallel, or 0 for stack */
1333 rtx rv = NULL_RTX;
1334 #if DEBUG0
1335 fprintf (stderr, "func_arg %d (%s, %d)\n",
1336 ca->parm_num, mode_name[arg.mode], arg.named);
1337 debug_tree (arg.type);
1338 #endif
1340 if (arg.end_marker_p ())
1341 return GEN_INT (0);
1343 if (ca->force_mem || !arg.named)
1345 #if DEBUG0
1346 fprintf (stderr, "func arg: force %d named %d, mem\n", ca->force_mem,
1347 arg.named);
1348 #endif
1349 return NULL_RTX;
1352 if (arg.type && INTEGRAL_TYPE_P (arg.type) && POINTER_TYPE_P (arg.type))
1353 return NULL_RTX;
1355 if (arg.aggregate_type_p ())
1356 return NULL_RTX;
1358 switch (ca->parm_num)
1360 case 1:
1361 if (GET_MODE_SIZE (arg.mode) == 1 || GET_MODE_SIZE (arg.mode) == 2)
1362 rv = gen_rtx_REG (arg.mode, TARGET_A16 ? R1_REGNO : R0_REGNO);
1363 break;
1365 case 2:
1366 if (TARGET_A16 && GET_MODE_SIZE (arg.mode) == 2)
1367 rv = gen_rtx_REG (arg.mode, R2_REGNO);
1368 break;
1371 #if DEBUG0
1372 debug_rtx (rv);
1373 #endif
1374 return rv;
1377 #undef TARGET_PASS_BY_REFERENCE
1378 #define TARGET_PASS_BY_REFERENCE m32c_pass_by_reference
1379 static bool
1380 m32c_pass_by_reference (cumulative_args_t, const function_arg_info &)
1382 return 0;
1385 /* Implements INIT_CUMULATIVE_ARGS. */
1386 void
1387 m32c_init_cumulative_args (CUMULATIVE_ARGS * ca,
1388 tree fntype,
1389 rtx libname ATTRIBUTE_UNUSED,
1390 tree fndecl,
1391 int n_named_args ATTRIBUTE_UNUSED)
1393 if (fntype && aggregate_value_p (TREE_TYPE (fntype), fndecl))
1394 ca->force_mem = 1;
1395 else
1396 ca->force_mem = 0;
1397 ca->parm_num = 1;
1400 /* Implements TARGET_FUNCTION_ARG_ADVANCE. force_mem is set for
1401 functions returning structures, so we always reset that. Otherwise,
1402 we only need to know the sequence number of the argument to know what
1403 to do with it. */
1404 #undef TARGET_FUNCTION_ARG_ADVANCE
1405 #define TARGET_FUNCTION_ARG_ADVANCE m32c_function_arg_advance
1406 static void
1407 m32c_function_arg_advance (cumulative_args_t ca_v,
1408 const function_arg_info &)
1410 CUMULATIVE_ARGS *ca = get_cumulative_args (ca_v);
1412 if (ca->force_mem)
1413 ca->force_mem = 0;
1414 else
1415 ca->parm_num++;
1418 /* Implements TARGET_FUNCTION_ARG_BOUNDARY. */
1419 #undef TARGET_FUNCTION_ARG_BOUNDARY
1420 #define TARGET_FUNCTION_ARG_BOUNDARY m32c_function_arg_boundary
1421 static unsigned int
1422 m32c_function_arg_boundary (machine_mode mode ATTRIBUTE_UNUSED,
1423 const_tree type ATTRIBUTE_UNUSED)
1425 return (TARGET_A16 ? 8 : 16);
1428 /* Implements FUNCTION_ARG_REGNO_P. */
1430 m32c_function_arg_regno_p (int r)
1432 if (TARGET_A24)
1433 return (r == R0_REGNO);
1434 return (r == R1_REGNO || r == R2_REGNO);
1437 /* HImode and PSImode are the two "native" modes as far as GCC is
1438 concerned, but the chips also support a 32-bit mode which is used
1439 for some opcodes in R8C/M16C and for reset vectors and such. */
1440 #undef TARGET_VALID_POINTER_MODE
1441 #define TARGET_VALID_POINTER_MODE m32c_valid_pointer_mode
1442 static bool
1443 m32c_valid_pointer_mode (scalar_int_mode mode)
1445 if (mode == HImode
1446 || mode == PSImode
1447 || mode == SImode
1449 return 1;
1450 return 0;
1453 /* How Scalar Function Values Are Returned */
1455 /* Implements TARGET_LIBCALL_VALUE. Most values are returned in $r0, or some
1456 combination of registers starting there (r2r0 for longs, r3r1r2r0
1457 for long long, r3r2r1r0 for doubles), except that that ABI
1458 currently doesn't work because it ends up using all available
1459 general registers and gcc often can't compile it. So, instead, we
1460 return anything bigger than 16 bits in "mem0" (effectively, a
1461 memory location). */
1463 #undef TARGET_LIBCALL_VALUE
1464 #define TARGET_LIBCALL_VALUE m32c_libcall_value
1466 static rtx
1467 m32c_libcall_value (machine_mode mode, const_rtx fun ATTRIBUTE_UNUSED)
1469 /* return reg or parallel */
1470 #if 0
1471 /* FIXME: GCC has difficulty returning large values in registers,
1472 because that ties up most of the general registers and gives the
1473 register allocator little to work with. Until we can resolve
1474 this, large values are returned in memory. */
1475 if (mode == DFmode)
1477 rtx rv;
1479 rv = gen_rtx_PARALLEL (mode, rtvec_alloc (4));
1480 XVECEXP (rv, 0, 0) = gen_rtx_EXPR_LIST (VOIDmode,
1481 gen_rtx_REG (HImode,
1482 R0_REGNO),
1483 GEN_INT (0));
1484 XVECEXP (rv, 0, 1) = gen_rtx_EXPR_LIST (VOIDmode,
1485 gen_rtx_REG (HImode,
1486 R1_REGNO),
1487 GEN_INT (2));
1488 XVECEXP (rv, 0, 2) = gen_rtx_EXPR_LIST (VOIDmode,
1489 gen_rtx_REG (HImode,
1490 R2_REGNO),
1491 GEN_INT (4));
1492 XVECEXP (rv, 0, 3) = gen_rtx_EXPR_LIST (VOIDmode,
1493 gen_rtx_REG (HImode,
1494 R3_REGNO),
1495 GEN_INT (6));
1496 return rv;
1499 if (TARGET_A24 && GET_MODE_SIZE (mode) > 2)
1501 rtx rv;
1503 rv = gen_rtx_PARALLEL (mode, rtvec_alloc (1));
1504 XVECEXP (rv, 0, 0) = gen_rtx_EXPR_LIST (VOIDmode,
1505 gen_rtx_REG (mode,
1506 R0_REGNO),
1507 GEN_INT (0));
1508 return rv;
1510 #endif
1512 if (GET_MODE_SIZE (mode) > 2)
1513 return gen_rtx_REG (mode, MEM0_REGNO);
1514 return gen_rtx_REG (mode, R0_REGNO);
1517 /* Implements TARGET_FUNCTION_VALUE. Functions and libcalls have the same
1518 conventions. */
1520 #undef TARGET_FUNCTION_VALUE
1521 #define TARGET_FUNCTION_VALUE m32c_function_value
1523 static rtx
1524 m32c_function_value (const_tree valtype,
1525 const_tree fn_decl_or_type ATTRIBUTE_UNUSED,
1526 bool outgoing ATTRIBUTE_UNUSED)
1528 /* return reg or parallel */
1529 const machine_mode mode = TYPE_MODE (valtype);
1530 return m32c_libcall_value (mode, NULL_RTX);
1533 /* Implements TARGET_FUNCTION_VALUE_REGNO_P. */
1535 #undef TARGET_FUNCTION_VALUE_REGNO_P
1536 #define TARGET_FUNCTION_VALUE_REGNO_P m32c_function_value_regno_p
1538 static bool
1539 m32c_function_value_regno_p (const unsigned int regno)
1541 return (regno == R0_REGNO || regno == MEM0_REGNO);
1544 /* How Large Values Are Returned */
1546 /* We return structures by pushing the address on the stack, even if
1547 we use registers for the first few "real" arguments. */
1548 #undef TARGET_STRUCT_VALUE_RTX
1549 #define TARGET_STRUCT_VALUE_RTX m32c_struct_value_rtx
1550 static rtx
1551 m32c_struct_value_rtx (tree fndecl ATTRIBUTE_UNUSED,
1552 int incoming ATTRIBUTE_UNUSED)
1554 return 0;
1557 /* Function Entry and Exit */
1559 /* Implements EPILOGUE_USES. Interrupts restore all registers. */
1561 m32c_epilogue_uses (int regno ATTRIBUTE_UNUSED)
1563 if (cfun->machine->is_interrupt)
1564 return 1;
1565 return 0;
1568 /* Implementing the Varargs Macros */
1570 #undef TARGET_STRICT_ARGUMENT_NAMING
1571 #define TARGET_STRICT_ARGUMENT_NAMING m32c_strict_argument_naming
1572 static bool
1573 m32c_strict_argument_naming (cumulative_args_t ca ATTRIBUTE_UNUSED)
1575 return 1;
1578 /* Trampolines for Nested Functions */
1581 m16c:
1582 1 0000 75C43412 mov.w #0x1234,a0
1583 2 0004 FC000000 jmp.a label
1585 m32c:
1586 1 0000 BC563412 mov.l:s #0x123456,a0
1587 2 0004 CC000000 jmp.a label
1590 /* Implements TRAMPOLINE_SIZE. */
1592 m32c_trampoline_size (void)
1594 /* Allocate extra space so we can avoid the messy shifts when we
1595 initialize the trampoline; we just write past the end of the
1596 opcode. */
1597 return TARGET_A16 ? 8 : 10;
1600 /* Implements TRAMPOLINE_ALIGNMENT. */
1602 m32c_trampoline_alignment (void)
1604 return 2;
1607 /* Implements TARGET_TRAMPOLINE_INIT. */
1609 #undef TARGET_TRAMPOLINE_INIT
1610 #define TARGET_TRAMPOLINE_INIT m32c_trampoline_init
1611 static void
1612 m32c_trampoline_init (rtx m_tramp, tree fndecl, rtx chainval)
1614 rtx function = XEXP (DECL_RTL (fndecl), 0);
1616 #define A0(m,i) adjust_address (m_tramp, m, i)
1617 if (TARGET_A16)
1619 /* Note: we subtract a "word" because the moves want signed
1620 constants, not unsigned constants. */
1621 emit_move_insn (A0 (HImode, 0), GEN_INT (0xc475 - 0x10000));
1622 emit_move_insn (A0 (HImode, 2), chainval);
1623 emit_move_insn (A0 (QImode, 4), GEN_INT (0xfc - 0x100));
1624 /* We use 16-bit addresses here, but store the zero to turn it
1625 into a 24-bit offset. */
1626 emit_move_insn (A0 (HImode, 5), function);
1627 emit_move_insn (A0 (QImode, 7), GEN_INT (0x00));
1629 else
1631 /* Note that the PSI moves actually write 4 bytes. Make sure we
1632 write stuff out in the right order, and leave room for the
1633 extra byte at the end. */
1634 emit_move_insn (A0 (QImode, 0), GEN_INT (0xbc - 0x100));
1635 emit_move_insn (A0 (PSImode, 1), chainval);
1636 emit_move_insn (A0 (QImode, 4), GEN_INT (0xcc - 0x100));
1637 emit_move_insn (A0 (PSImode, 5), function);
1639 #undef A0
1642 #undef TARGET_LRA_P
1643 #define TARGET_LRA_P hook_bool_void_false
1645 /* Addressing Modes */
1647 /* The r8c/m32c family supports a wide range of non-orthogonal
1648 addressing modes, including the ability to double-indirect on *some*
1649 of them. Not all insns support all modes, either, but we rely on
1650 predicates and constraints to deal with that. */
1651 #undef TARGET_LEGITIMATE_ADDRESS_P
1652 #define TARGET_LEGITIMATE_ADDRESS_P m32c_legitimate_address_p
1653 bool
1654 m32c_legitimate_address_p (machine_mode mode, rtx x, bool strict, code_helper)
1656 int mode_adjust;
1657 if (CONSTANT_P (x))
1658 return 1;
1660 if (TARGET_A16 && GET_MODE (x) != HImode && GET_MODE (x) != SImode)
1661 return 0;
1662 if (TARGET_A24 && GET_MODE (x) != PSImode)
1663 return 0;
1665 /* Wide references to memory will be split after reload, so we must
1666 ensure that all parts of such splits remain legitimate
1667 addresses. */
1668 mode_adjust = GET_MODE_SIZE (mode) - 1;
1670 /* allowing PLUS yields mem:HI(plus:SI(mem:SI(plus:SI in m32c_split_move */
1671 if (GET_CODE (x) == PRE_DEC
1672 || GET_CODE (x) == POST_INC || GET_CODE (x) == PRE_MODIFY)
1674 return (GET_CODE (XEXP (x, 0)) == REG
1675 && REGNO (XEXP (x, 0)) == SP_REGNO);
1678 #if 0
1679 /* This is the double indirection detection, but it currently
1680 doesn't work as cleanly as this code implies, so until we've had
1681 a chance to debug it, leave it disabled. */
1682 if (TARGET_A24 && GET_CODE (x) == MEM && GET_CODE (XEXP (x, 0)) != PLUS)
1684 #if DEBUG_DOUBLE
1685 fprintf (stderr, "double indirect\n");
1686 #endif
1687 x = XEXP (x, 0);
1689 #endif
1691 encode_pattern (x);
1692 if (RTX_IS ("r"))
1694 /* Most indexable registers can be used without displacements,
1695 although some of them will be emitted with an explicit zero
1696 to please the assembler. */
1697 switch (REGNO (patternr[0]))
1699 case A1_REGNO:
1700 case SB_REGNO:
1701 case FB_REGNO:
1702 case SP_REGNO:
1703 if (TARGET_A16 && GET_MODE (x) == SImode)
1704 return 0;
1705 /* FALLTHRU */
1706 case A0_REGNO:
1707 return 1;
1709 default:
1710 if (IS_PSEUDO (patternr[0], strict))
1711 return 1;
1712 return 0;
1716 if (TARGET_A16 && GET_MODE (x) == SImode)
1717 return 0;
1719 if (RTX_IS ("+ri"))
1721 /* This is more interesting, because different base registers
1722 allow for different displacements - both range and signedness
1723 - and it differs from chip series to chip series too. */
1724 int rn = REGNO (patternr[1]);
1725 HOST_WIDE_INT offs = INTVAL (patternr[2]);
1726 switch (rn)
1728 case A0_REGNO:
1729 case A1_REGNO:
1730 case SB_REGNO:
1731 /* The syntax only allows positive offsets, but when the
1732 offsets span the entire memory range, we can simulate
1733 negative offsets by wrapping. */
1734 if (TARGET_A16)
1735 return (offs >= -65536 && offs <= 65535 - mode_adjust);
1736 if (rn == SB_REGNO)
1737 return (offs >= 0 && offs <= 65535 - mode_adjust);
1738 /* A0 or A1 */
1739 return (offs >= -16777216 && offs <= 16777215);
1741 case FB_REGNO:
1742 if (TARGET_A16)
1743 return (offs >= -128 && offs <= 127 - mode_adjust);
1744 return (offs >= -65536 && offs <= 65535 - mode_adjust);
1746 case SP_REGNO:
1747 return (offs >= -128 && offs <= 127 - mode_adjust);
1749 default:
1750 if (IS_PSEUDO (patternr[1], strict))
1751 return 1;
1752 return 0;
1755 if (RTX_IS ("+rs") || RTX_IS ("+r+si"))
1757 rtx reg = patternr[1];
1759 /* We don't know where the symbol is, so only allow base
1760 registers which support displacements spanning the whole
1761 address range. */
1762 switch (REGNO (reg))
1764 case A0_REGNO:
1765 case A1_REGNO:
1766 /* $sb needs a secondary reload, but since it's involved in
1767 memory address reloads too, we don't deal with it very
1768 well. */
1769 /* case SB_REGNO: */
1770 return 1;
1771 default:
1772 if (GET_CODE (reg) == SUBREG)
1773 return 0;
1774 if (IS_PSEUDO (reg, strict))
1775 return 1;
1776 return 0;
1779 return 0;
1782 /* Implements REG_OK_FOR_BASE_P. */
1784 m32c_reg_ok_for_base_p (rtx x, int strict)
1786 if (GET_CODE (x) != REG)
1787 return 0;
1788 switch (REGNO (x))
1790 case A0_REGNO:
1791 case A1_REGNO:
1792 case SB_REGNO:
1793 case FB_REGNO:
1794 case SP_REGNO:
1795 return 1;
1796 default:
1797 if (IS_PSEUDO (x, strict))
1798 return 1;
1799 return 0;
1803 /* We have three choices for choosing fb->aN offsets. If we choose -128,
1804 we need one MOVA -128[fb],aN opcode and 16-bit aN displacements,
1805 like this:
1806 EB 4B FF mova -128[$fb],$a0
1807 D8 0C FF FF mov.w:Q #0,-1[$a0]
1809 Alternately, we subtract the frame size, and hopefully use 8-bit aN
1810 displacements:
1811 7B F4 stc $fb,$a0
1812 77 54 00 01 sub #256,$a0
1813 D8 08 01 mov.w:Q #0,1[$a0]
1815 If we don't offset (i.e. offset by zero), we end up with:
1816 7B F4 stc $fb,$a0
1817 D8 0C 00 FF mov.w:Q #0,-256[$a0]
1819 We have to subtract *something* so that we have a PLUS rtx to mark
1820 that we've done this reload. The -128 offset will never result in
1821 an 8-bit aN offset, and the payoff for the second case is five
1822 loads *if* those loads are within 256 bytes of the other end of the
1823 frame, so the third case seems best. Note that we subtract the
1824 zero, but detect that in the addhi3 pattern. */
1826 #define BIG_FB_ADJ 0
1828 /* Implements LEGITIMIZE_ADDRESS. The only address we really have to
1829 worry about is frame base offsets, as $fb has a limited
1830 displacement range. We deal with this by attempting to reload $fb
1831 itself into an address register; that seems to result in the best
1832 code. */
1833 #undef TARGET_LEGITIMIZE_ADDRESS
1834 #define TARGET_LEGITIMIZE_ADDRESS m32c_legitimize_address
1835 static rtx
1836 m32c_legitimize_address (rtx x, rtx oldx ATTRIBUTE_UNUSED,
1837 machine_mode mode)
1839 #if DEBUG0
1840 fprintf (stderr, "m32c_legitimize_address for mode %s\n", mode_name[mode]);
1841 debug_rtx (x);
1842 fprintf (stderr, "\n");
1843 #endif
1845 if (GET_CODE (x) == PLUS
1846 && GET_CODE (XEXP (x, 0)) == REG
1847 && REGNO (XEXP (x, 0)) == FB_REGNO
1848 && GET_CODE (XEXP (x, 1)) == CONST_INT
1849 && (INTVAL (XEXP (x, 1)) < -128
1850 || INTVAL (XEXP (x, 1)) > (128 - GET_MODE_SIZE (mode))))
1852 /* reload FB to A_REGS */
1853 rtx temp = gen_reg_rtx (Pmode);
1854 x = copy_rtx (x);
1855 emit_insn (gen_rtx_SET (temp, XEXP (x, 0)));
1856 XEXP (x, 0) = temp;
1859 return x;
1862 /* Implements LEGITIMIZE_RELOAD_ADDRESS. See comment above. */
1864 m32c_legitimize_reload_address (rtx * x,
1865 machine_mode mode,
1866 int opnum,
1867 int type, int ind_levels ATTRIBUTE_UNUSED)
1869 #if DEBUG0
1870 fprintf (stderr, "\nm32c_legitimize_reload_address for mode %s\n",
1871 mode_name[mode]);
1872 debug_rtx (*x);
1873 #endif
1875 /* At one point, this function tried to get $fb copied to an address
1876 register, which in theory would maximize sharing, but gcc was
1877 *also* still trying to reload the whole address, and we'd run out
1878 of address registers. So we let gcc do the naive (but safe)
1879 reload instead, when the above function doesn't handle it for
1882 The code below is a second attempt at the above. */
1884 if (GET_CODE (*x) == PLUS
1885 && GET_CODE (XEXP (*x, 0)) == REG
1886 && REGNO (XEXP (*x, 0)) == FB_REGNO
1887 && GET_CODE (XEXP (*x, 1)) == CONST_INT
1888 && (INTVAL (XEXP (*x, 1)) < -128
1889 || INTVAL (XEXP (*x, 1)) > (128 - GET_MODE_SIZE (mode))))
1891 rtx sum;
1892 int offset = INTVAL (XEXP (*x, 1));
1893 int adjustment = -BIG_FB_ADJ;
1895 sum = gen_rtx_PLUS (Pmode, XEXP (*x, 0),
1896 GEN_INT (adjustment));
1897 *x = gen_rtx_PLUS (Pmode, sum, GEN_INT (offset - adjustment));
1898 if (type == RELOAD_OTHER)
1899 type = RELOAD_FOR_OTHER_ADDRESS;
1900 push_reload (sum, NULL_RTX, &XEXP (*x, 0), NULL,
1901 A_REGS, Pmode, VOIDmode, 0, 0, opnum,
1902 (enum reload_type) type);
1903 return 1;
1906 if (GET_CODE (*x) == PLUS
1907 && GET_CODE (XEXP (*x, 0)) == PLUS
1908 && GET_CODE (XEXP (XEXP (*x, 0), 0)) == REG
1909 && REGNO (XEXP (XEXP (*x, 0), 0)) == FB_REGNO
1910 && GET_CODE (XEXP (XEXP (*x, 0), 1)) == CONST_INT
1911 && GET_CODE (XEXP (*x, 1)) == CONST_INT
1914 if (type == RELOAD_OTHER)
1915 type = RELOAD_FOR_OTHER_ADDRESS;
1916 push_reload (XEXP (*x, 0), NULL_RTX, &XEXP (*x, 0), NULL,
1917 A_REGS, Pmode, VOIDmode, 0, 0, opnum,
1918 (enum reload_type) type);
1919 return 1;
1922 if (TARGET_A24 && GET_MODE (*x) == PSImode)
1924 push_reload (*x, NULL_RTX, x, NULL,
1925 A_REGS, PSImode, VOIDmode, 0, 0, opnum,
1926 (enum reload_type) type);
1927 return 1;
1930 return 0;
1933 /* Return the appropriate mode for a named address pointer. */
1934 #undef TARGET_ADDR_SPACE_POINTER_MODE
1935 #define TARGET_ADDR_SPACE_POINTER_MODE m32c_addr_space_pointer_mode
1936 static scalar_int_mode
1937 m32c_addr_space_pointer_mode (addr_space_t addrspace)
1939 switch (addrspace)
1941 case ADDR_SPACE_GENERIC:
1942 return TARGET_A24 ? PSImode : HImode;
1943 case ADDR_SPACE_FAR:
1944 return SImode;
1945 default:
1946 gcc_unreachable ();
1950 /* Return the appropriate mode for a named address address. */
1951 #undef TARGET_ADDR_SPACE_ADDRESS_MODE
1952 #define TARGET_ADDR_SPACE_ADDRESS_MODE m32c_addr_space_address_mode
1953 static scalar_int_mode
1954 m32c_addr_space_address_mode (addr_space_t addrspace)
1956 switch (addrspace)
1958 case ADDR_SPACE_GENERIC:
1959 return TARGET_A24 ? PSImode : HImode;
1960 case ADDR_SPACE_FAR:
1961 return SImode;
1962 default:
1963 gcc_unreachable ();
1967 /* Like m32c_legitimate_address_p, except with named addresses. */
1968 #undef TARGET_ADDR_SPACE_LEGITIMATE_ADDRESS_P
1969 #define TARGET_ADDR_SPACE_LEGITIMATE_ADDRESS_P \
1970 m32c_addr_space_legitimate_address_p
1971 static bool
1972 m32c_addr_space_legitimate_address_p (machine_mode mode, rtx x, bool strict,
1973 addr_space_t as, code_helper ch)
1975 if (as == ADDR_SPACE_FAR)
1977 if (TARGET_A24)
1978 return 0;
1979 encode_pattern (x);
1980 if (RTX_IS ("r"))
1982 if (GET_MODE (x) != SImode)
1983 return 0;
1984 switch (REGNO (patternr[0]))
1986 case A0_REGNO:
1987 return 1;
1989 default:
1990 if (IS_PSEUDO (patternr[0], strict))
1991 return 1;
1992 return 0;
1995 if (RTX_IS ("+^Sri"))
1997 int rn = REGNO (patternr[3]);
1998 HOST_WIDE_INT offs = INTVAL (patternr[4]);
1999 if (GET_MODE (patternr[3]) != HImode)
2000 return 0;
2001 switch (rn)
2003 case A0_REGNO:
2004 return (offs >= 0 && offs <= 0xfffff);
2006 default:
2007 if (IS_PSEUDO (patternr[3], strict))
2008 return 1;
2009 return 0;
2012 if (RTX_IS ("+^Srs"))
2014 int rn = REGNO (patternr[3]);
2015 if (GET_MODE (patternr[3]) != HImode)
2016 return 0;
2017 switch (rn)
2019 case A0_REGNO:
2020 return 1;
2022 default:
2023 if (IS_PSEUDO (patternr[3], strict))
2024 return 1;
2025 return 0;
2028 if (RTX_IS ("+^S+ris"))
2030 int rn = REGNO (patternr[4]);
2031 if (GET_MODE (patternr[4]) != HImode)
2032 return 0;
2033 switch (rn)
2035 case A0_REGNO:
2036 return 1;
2038 default:
2039 if (IS_PSEUDO (patternr[4], strict))
2040 return 1;
2041 return 0;
2044 if (RTX_IS ("s"))
2046 return 1;
2048 return 0;
2051 else if (as != ADDR_SPACE_GENERIC)
2052 gcc_unreachable ();
2054 return m32c_legitimate_address_p (mode, x, strict, ch);
2057 /* Like m32c_legitimate_address, except with named address support. */
2058 #undef TARGET_ADDR_SPACE_LEGITIMIZE_ADDRESS
2059 #define TARGET_ADDR_SPACE_LEGITIMIZE_ADDRESS m32c_addr_space_legitimize_address
2060 static rtx
2061 m32c_addr_space_legitimize_address (rtx x, rtx oldx, machine_mode mode,
2062 addr_space_t as)
2064 if (as != ADDR_SPACE_GENERIC)
2066 #if DEBUG0
2067 fprintf (stderr, "\033[36mm32c_addr_space_legitimize_address for mode %s\033[0m\n", mode_name[mode]);
2068 debug_rtx (x);
2069 fprintf (stderr, "\n");
2070 #endif
2072 if (GET_CODE (x) != REG)
2074 x = force_reg (SImode, x);
2076 return x;
2079 return m32c_legitimize_address (x, oldx, mode);
2082 /* Determine if one named address space is a subset of another. */
2083 #undef TARGET_ADDR_SPACE_SUBSET_P
2084 #define TARGET_ADDR_SPACE_SUBSET_P m32c_addr_space_subset_p
2085 static bool
2086 m32c_addr_space_subset_p (addr_space_t subset, addr_space_t superset)
2088 gcc_assert (subset == ADDR_SPACE_GENERIC || subset == ADDR_SPACE_FAR);
2089 gcc_assert (superset == ADDR_SPACE_GENERIC || superset == ADDR_SPACE_FAR);
2091 if (subset == superset)
2092 return true;
2094 else
2095 return (subset == ADDR_SPACE_GENERIC && superset == ADDR_SPACE_FAR);
2098 #undef TARGET_ADDR_SPACE_CONVERT
2099 #define TARGET_ADDR_SPACE_CONVERT m32c_addr_space_convert
2100 /* Convert from one address space to another. */
2101 static rtx
2102 m32c_addr_space_convert (rtx op, tree from_type, tree to_type)
2104 addr_space_t from_as = TYPE_ADDR_SPACE (TREE_TYPE (from_type));
2105 addr_space_t to_as = TYPE_ADDR_SPACE (TREE_TYPE (to_type));
2106 rtx result;
2108 gcc_assert (from_as == ADDR_SPACE_GENERIC || from_as == ADDR_SPACE_FAR);
2109 gcc_assert (to_as == ADDR_SPACE_GENERIC || to_as == ADDR_SPACE_FAR);
2111 if (to_as == ADDR_SPACE_GENERIC && from_as == ADDR_SPACE_FAR)
2113 /* This is unpredictable, as we're truncating off usable address
2114 bits. */
2116 result = gen_reg_rtx (HImode);
2117 emit_move_insn (result, simplify_subreg (HImode, op, SImode, 0));
2118 return result;
2120 else if (to_as == ADDR_SPACE_FAR && from_as == ADDR_SPACE_GENERIC)
2122 /* This always works. */
2123 result = gen_reg_rtx (SImode);
2124 emit_insn (gen_zero_extendhisi2 (result, op));
2125 return result;
2127 else
2128 gcc_unreachable ();
2131 /* Condition Code Status */
2133 #undef TARGET_FIXED_CONDITION_CODE_REGS
2134 #define TARGET_FIXED_CONDITION_CODE_REGS m32c_fixed_condition_code_regs
2135 static bool
2136 m32c_fixed_condition_code_regs (unsigned int *p1, unsigned int *p2)
2138 *p1 = FLG_REGNO;
2139 *p2 = INVALID_REGNUM;
2140 return true;
2143 /* Describing Relative Costs of Operations */
2145 /* Implements TARGET_REGISTER_MOVE_COST. We make impossible moves
2146 prohibitively expensive, like trying to put QIs in r2/r3 (there are
2147 no opcodes to do that). We also discourage use of mem* registers
2148 since they're really memory. */
2150 #undef TARGET_REGISTER_MOVE_COST
2151 #define TARGET_REGISTER_MOVE_COST m32c_register_move_cost
2153 static int
2154 m32c_register_move_cost (machine_mode mode, reg_class_t from,
2155 reg_class_t to)
2157 int cost = COSTS_N_INSNS (3);
2158 HARD_REG_SET cc;
2160 /* FIXME: pick real values, but not 2 for now. */
2161 cc = reg_class_contents[from] | reg_class_contents[(int) to];
2163 if (mode == QImode
2164 && hard_reg_set_intersect_p (cc, reg_class_contents[R23_REGS]))
2166 if (hard_reg_set_subset_p (cc, reg_class_contents[R23_REGS]))
2167 cost = COSTS_N_INSNS (1000);
2168 else
2169 cost = COSTS_N_INSNS (80);
2172 if (!class_can_hold_mode (from, mode) || !class_can_hold_mode (to, mode))
2173 cost = COSTS_N_INSNS (1000);
2175 if (reg_classes_intersect_p (from, CR_REGS))
2176 cost += COSTS_N_INSNS (5);
2178 if (reg_classes_intersect_p (to, CR_REGS))
2179 cost += COSTS_N_INSNS (5);
2181 if (from == MEM_REGS || to == MEM_REGS)
2182 cost += COSTS_N_INSNS (50);
2183 else if (reg_classes_intersect_p (from, MEM_REGS)
2184 || reg_classes_intersect_p (to, MEM_REGS))
2185 cost += COSTS_N_INSNS (10);
2187 #if DEBUG0
2188 fprintf (stderr, "register_move_cost %s from %s to %s = %d\n",
2189 mode_name[mode], class_names[(int) from], class_names[(int) to],
2190 cost);
2191 #endif
2192 return cost;
2195 /* Implements TARGET_MEMORY_MOVE_COST. */
2197 #undef TARGET_MEMORY_MOVE_COST
2198 #define TARGET_MEMORY_MOVE_COST m32c_memory_move_cost
2200 static int
2201 m32c_memory_move_cost (machine_mode mode ATTRIBUTE_UNUSED,
2202 reg_class_t rclass ATTRIBUTE_UNUSED,
2203 bool in ATTRIBUTE_UNUSED)
2205 /* FIXME: pick real values. */
2206 return COSTS_N_INSNS (10);
2209 /* Here we try to describe when we use multiple opcodes for one RTX so
2210 that gcc knows when to use them. */
2211 #undef TARGET_RTX_COSTS
2212 #define TARGET_RTX_COSTS m32c_rtx_costs
2213 static bool
2214 m32c_rtx_costs (rtx x, machine_mode mode, int outer_code,
2215 int opno ATTRIBUTE_UNUSED,
2216 int *total, bool speed ATTRIBUTE_UNUSED)
2218 int code = GET_CODE (x);
2219 switch (code)
2221 case REG:
2222 if (REGNO (x) >= MEM0_REGNO && REGNO (x) <= MEM7_REGNO)
2223 *total += COSTS_N_INSNS (500);
2224 else
2225 *total += COSTS_N_INSNS (1);
2226 return true;
2228 case ASHIFT:
2229 case LSHIFTRT:
2230 case ASHIFTRT:
2231 if (GET_CODE (XEXP (x, 1)) != CONST_INT)
2233 /* mov.b r1l, r1h */
2234 *total += COSTS_N_INSNS (1);
2235 return true;
2237 if (INTVAL (XEXP (x, 1)) > 8
2238 || INTVAL (XEXP (x, 1)) < -8)
2240 /* mov.b #N, r1l */
2241 /* mov.b r1l, r1h */
2242 *total += COSTS_N_INSNS (2);
2243 return true;
2245 return true;
2247 case LE:
2248 case LEU:
2249 case LT:
2250 case LTU:
2251 case GT:
2252 case GTU:
2253 case GE:
2254 case GEU:
2255 case NE:
2256 case EQ:
2257 if (outer_code == SET)
2259 *total += COSTS_N_INSNS (2);
2260 return true;
2262 break;
2264 case ZERO_EXTRACT:
2266 rtx dest = XEXP (x, 0);
2267 rtx addr = XEXP (dest, 0);
2268 switch (GET_CODE (addr))
2270 case CONST_INT:
2271 *total += COSTS_N_INSNS (1);
2272 break;
2273 case SYMBOL_REF:
2274 *total += COSTS_N_INSNS (3);
2275 break;
2276 default:
2277 *total += COSTS_N_INSNS (2);
2278 break;
2280 return true;
2282 break;
2284 default:
2285 /* Reasonable default. */
2286 if (TARGET_A16 && mode == SImode)
2287 *total += COSTS_N_INSNS (2);
2288 break;
2290 return false;
2293 #undef TARGET_ADDRESS_COST
2294 #define TARGET_ADDRESS_COST m32c_address_cost
2295 static int
2296 m32c_address_cost (rtx addr, machine_mode mode ATTRIBUTE_UNUSED,
2297 addr_space_t as ATTRIBUTE_UNUSED,
2298 bool speed ATTRIBUTE_UNUSED)
2300 int i;
2301 /* fprintf(stderr, "\naddress_cost\n");
2302 debug_rtx(addr);*/
2303 switch (GET_CODE (addr))
2305 case CONST_INT:
2306 i = INTVAL (addr);
2307 if (i == 0)
2308 return COSTS_N_INSNS(1);
2309 if (i > 0 && i <= 255)
2310 return COSTS_N_INSNS(2);
2311 if (i > 0 && i <= 65535)
2312 return COSTS_N_INSNS(3);
2313 return COSTS_N_INSNS(4);
2314 case SYMBOL_REF:
2315 return COSTS_N_INSNS(4);
2316 case REG:
2317 return COSTS_N_INSNS(1);
2318 case PLUS:
2319 if (GET_CODE (XEXP (addr, 1)) == CONST_INT)
2321 i = INTVAL (XEXP (addr, 1));
2322 if (i == 0)
2323 return COSTS_N_INSNS(1);
2324 if (i > 0 && i <= 255)
2325 return COSTS_N_INSNS(2);
2326 if (i > 0 && i <= 65535)
2327 return COSTS_N_INSNS(3);
2329 return COSTS_N_INSNS(4);
2330 default:
2331 return 0;
2335 /* Defining the Output Assembler Language */
2337 /* Output of Data */
2339 /* We may have 24 bit sizes, which is the native address size.
2340 Currently unused, but provided for completeness. */
2341 #undef TARGET_ASM_INTEGER
2342 #define TARGET_ASM_INTEGER m32c_asm_integer
2343 static bool
2344 m32c_asm_integer (rtx x, unsigned int size, int aligned_p)
2346 switch (size)
2348 case 3:
2349 fprintf (asm_out_file, "\t.3byte\t");
2350 output_addr_const (asm_out_file, x);
2351 fputc ('\n', asm_out_file);
2352 return true;
2353 case 4:
2354 if (GET_CODE (x) == SYMBOL_REF)
2356 fprintf (asm_out_file, "\t.long\t");
2357 output_addr_const (asm_out_file, x);
2358 fputc ('\n', asm_out_file);
2359 return true;
2361 break;
2363 return default_assemble_integer (x, size, aligned_p);
2366 /* Output of Assembler Instructions */
2368 /* We use a lookup table because the addressing modes are non-orthogonal. */
2370 static struct
2372 char code;
2373 char const *pattern;
2374 char const *format;
2376 const conversions[] = {
2377 { 0, "r", "0" },
2379 { 0, "mr", "z[1]" },
2380 { 0, "m+ri", "3[2]" },
2381 { 0, "m+rs", "3[2]" },
2382 { 0, "m+^Zrs", "5[4]" },
2383 { 0, "m+^Zri", "5[4]" },
2384 { 0, "m+^Z+ris", "7+6[5]" },
2385 { 0, "m+^Srs", "5[4]" },
2386 { 0, "m+^Sri", "5[4]" },
2387 { 0, "m+^S+ris", "7+6[5]" },
2388 { 0, "m+r+si", "4+5[2]" },
2389 { 0, "ms", "1" },
2390 { 0, "mi", "1" },
2391 { 0, "m+si", "2+3" },
2393 { 0, "mmr", "[z[2]]" },
2394 { 0, "mm+ri", "[4[3]]" },
2395 { 0, "mm+rs", "[4[3]]" },
2396 { 0, "mm+r+si", "[5+6[3]]" },
2397 { 0, "mms", "[[2]]" },
2398 { 0, "mmi", "[[2]]" },
2399 { 0, "mm+si", "[4[3]]" },
2401 { 0, "i", "#0" },
2402 { 0, "s", "#0" },
2403 { 0, "+si", "#1+2" },
2404 { 0, "l", "#0" },
2406 { 'l', "l", "0" },
2407 { 'd', "i", "0" },
2408 { 'd', "s", "0" },
2409 { 'd', "+si", "1+2" },
2410 { 'D', "i", "0" },
2411 { 'D', "s", "0" },
2412 { 'D', "+si", "1+2" },
2413 { 'x', "i", "#0" },
2414 { 'X', "i", "#0" },
2415 { 'm', "i", "#0" },
2416 { 'b', "i", "#0" },
2417 { 'B', "i", "0" },
2418 { 'p', "i", "0" },
2420 { 0, 0, 0 }
2423 /* This is in order according to the bitfield that pushm/popm use. */
2424 static char const *pushm_regs[] = {
2425 "fb", "sb", "a1", "a0", "r3", "r2", "r1", "r0"
2428 /* Implements TARGET_PRINT_OPERAND. */
2430 #undef TARGET_PRINT_OPERAND
2431 #define TARGET_PRINT_OPERAND m32c_print_operand
2433 static void
2434 m32c_print_operand (FILE * file, rtx x, int code)
2436 int i, j, b;
2437 const char *comma;
2438 HOST_WIDE_INT ival;
2439 int unsigned_const = 0;
2440 int force_sign;
2442 /* Multiplies; constants are converted to sign-extended format but
2443 we need unsigned, so 'u' and 'U' tell us what size unsigned we
2444 need. */
2445 if (code == 'u')
2447 unsigned_const = 2;
2448 code = 0;
2450 if (code == 'U')
2452 unsigned_const = 1;
2453 code = 0;
2455 /* This one is only for debugging; you can put it in a pattern to
2456 force this error. */
2457 if (code == '!')
2459 fprintf (stderr, "dj: unreviewed pattern:");
2460 if (current_output_insn)
2461 debug_rtx (current_output_insn);
2462 gcc_unreachable ();
2464 /* PSImode operations are either .w or .l depending on the target. */
2465 if (code == '&')
2467 if (TARGET_A16)
2468 fprintf (file, "w");
2469 else
2470 fprintf (file, "l");
2471 return;
2473 /* Inverted conditionals. */
2474 if (code == 'C')
2476 switch (GET_CODE (x))
2478 case LE:
2479 fputs ("gt", file);
2480 break;
2481 case LEU:
2482 fputs ("gtu", file);
2483 break;
2484 case LT:
2485 fputs ("ge", file);
2486 break;
2487 case LTU:
2488 fputs ("geu", file);
2489 break;
2490 case GT:
2491 fputs ("le", file);
2492 break;
2493 case GTU:
2494 fputs ("leu", file);
2495 break;
2496 case GE:
2497 fputs ("lt", file);
2498 break;
2499 case GEU:
2500 fputs ("ltu", file);
2501 break;
2502 case NE:
2503 fputs ("eq", file);
2504 break;
2505 case EQ:
2506 fputs ("ne", file);
2507 break;
2508 default:
2509 gcc_unreachable ();
2511 return;
2513 /* Regular conditionals. */
2514 if (code == 'c')
2516 switch (GET_CODE (x))
2518 case LE:
2519 fputs ("le", file);
2520 break;
2521 case LEU:
2522 fputs ("leu", file);
2523 break;
2524 case LT:
2525 fputs ("lt", file);
2526 break;
2527 case LTU:
2528 fputs ("ltu", file);
2529 break;
2530 case GT:
2531 fputs ("gt", file);
2532 break;
2533 case GTU:
2534 fputs ("gtu", file);
2535 break;
2536 case GE:
2537 fputs ("ge", file);
2538 break;
2539 case GEU:
2540 fputs ("geu", file);
2541 break;
2542 case NE:
2543 fputs ("ne", file);
2544 break;
2545 case EQ:
2546 fputs ("eq", file);
2547 break;
2548 default:
2549 gcc_unreachable ();
2551 return;
2553 /* Used in negsi2 to do HImode ops on the two parts of an SImode
2554 operand. */
2555 if (code == 'h' && GET_MODE (x) == SImode)
2557 x = m32c_subreg (HImode, x, SImode, 0);
2558 code = 0;
2560 if (code == 'H' && GET_MODE (x) == SImode)
2562 x = m32c_subreg (HImode, x, SImode, 2);
2563 code = 0;
2565 if (code == 'h' && GET_MODE (x) == HImode)
2567 x = m32c_subreg (QImode, x, HImode, 0);
2568 code = 0;
2570 if (code == 'H' && GET_MODE (x) == HImode)
2572 /* We can't actually represent this as an rtx. Do it here. */
2573 if (GET_CODE (x) == REG)
2575 switch (REGNO (x))
2577 case R0_REGNO:
2578 fputs ("r0h", file);
2579 return;
2580 case R1_REGNO:
2581 fputs ("r1h", file);
2582 return;
2583 default:
2584 gcc_unreachable();
2587 /* This should be a MEM. */
2588 x = m32c_subreg (QImode, x, HImode, 1);
2589 code = 0;
2591 /* This is for BMcond, which always wants word register names. */
2592 if (code == 'h' && GET_MODE (x) == QImode)
2594 if (GET_CODE (x) == REG)
2595 x = gen_rtx_REG (HImode, REGNO (x));
2596 code = 0;
2598 /* 'x' and 'X' need to be ignored for non-immediates. */
2599 if ((code == 'x' || code == 'X') && GET_CODE (x) != CONST_INT)
2600 code = 0;
2602 encode_pattern (x);
2603 force_sign = 0;
2604 for (i = 0; conversions[i].pattern; i++)
2605 if (conversions[i].code == code
2606 && streq (conversions[i].pattern, pattern))
2608 for (j = 0; conversions[i].format[j]; j++)
2609 /* backslash quotes the next character in the output pattern. */
2610 if (conversions[i].format[j] == '\\')
2612 fputc (conversions[i].format[j + 1], file);
2613 j++;
2615 /* Digits in the output pattern indicate that the
2616 corresponding RTX is to be output at that point. */
2617 else if (ISDIGIT (conversions[i].format[j]))
2619 rtx r = patternr[conversions[i].format[j] - '0'];
2620 switch (GET_CODE (r))
2622 case REG:
2623 fprintf (file, "%s",
2624 reg_name_with_mode (REGNO (r), GET_MODE (r)));
2625 break;
2626 case CONST_INT:
2627 switch (code)
2629 case 'b':
2630 case 'B':
2632 int v = INTVAL (r);
2633 int i = (int) exact_log2 (v);
2634 if (i == -1)
2635 i = (int) exact_log2 ((v ^ 0xffff) & 0xffff);
2636 if (i == -1)
2637 i = (int) exact_log2 ((v ^ 0xff) & 0xff);
2638 /* Bit position. */
2639 fprintf (file, "%d", i);
2641 break;
2642 case 'x':
2643 /* Unsigned byte. */
2644 fprintf (file, HOST_WIDE_INT_PRINT_HEX,
2645 INTVAL (r) & 0xff);
2646 break;
2647 case 'X':
2648 /* Unsigned word. */
2649 fprintf (file, HOST_WIDE_INT_PRINT_HEX,
2650 INTVAL (r) & 0xffff);
2651 break;
2652 case 'p':
2653 /* pushm and popm encode a register set into a single byte. */
2654 comma = "";
2655 for (b = 7; b >= 0; b--)
2656 if (INTVAL (r) & (1 << b))
2658 fprintf (file, "%s%s", comma, pushm_regs[b]);
2659 comma = ",";
2661 break;
2662 case 'm':
2663 /* "Minus". Output -X */
2664 ival = (-INTVAL (r) & 0xffff);
2665 if (ival & 0x8000)
2666 ival = ival - 0x10000;
2667 fprintf (file, HOST_WIDE_INT_PRINT_DEC, ival);
2668 break;
2669 default:
2670 ival = INTVAL (r);
2671 if (conversions[i].format[j + 1] == '[' && ival < 0)
2673 /* We can simulate negative displacements by
2674 taking advantage of address space
2675 wrapping when the offset can span the
2676 entire address range. */
2677 rtx base =
2678 patternr[conversions[i].format[j + 2] - '0'];
2679 if (GET_CODE (base) == REG)
2680 switch (REGNO (base))
2682 case A0_REGNO:
2683 case A1_REGNO:
2684 if (TARGET_A24)
2685 ival = 0x1000000 + ival;
2686 else
2687 ival = 0x10000 + ival;
2688 break;
2689 case SB_REGNO:
2690 if (TARGET_A16)
2691 ival = 0x10000 + ival;
2692 break;
2695 else if (code == 'd' && ival < 0 && j == 0)
2696 /* The "mova" opcode is used to do addition by
2697 computing displacements, but again, we need
2698 displacements to be unsigned *if* they're
2699 the only component of the displacement
2700 (i.e. no "symbol-4" type displacement). */
2701 ival = (TARGET_A24 ? 0x1000000 : 0x10000) + ival;
2703 if (conversions[i].format[j] == '0')
2705 /* More conversions to unsigned. */
2706 if (unsigned_const == 2)
2707 ival &= 0xffff;
2708 if (unsigned_const == 1)
2709 ival &= 0xff;
2711 if (streq (conversions[i].pattern, "mi")
2712 || streq (conversions[i].pattern, "mmi"))
2714 /* Integers used as addresses are unsigned. */
2715 ival &= (TARGET_A24 ? 0xffffff : 0xffff);
2717 if (force_sign && ival >= 0)
2718 fputc ('+', file);
2719 fprintf (file, HOST_WIDE_INT_PRINT_DEC, ival);
2720 break;
2722 break;
2723 case CONST_DOUBLE:
2724 /* We don't have const_double constants. If it
2725 happens, make it obvious. */
2726 fprintf (file, "[const_double 0x%lx]",
2727 (unsigned long) CONST_DOUBLE_HIGH (r));
2728 break;
2729 case SYMBOL_REF:
2730 assemble_name (file, XSTR (r, 0));
2731 break;
2732 case LABEL_REF:
2733 output_asm_label (r);
2734 break;
2735 default:
2736 fprintf (stderr, "don't know how to print this operand:");
2737 debug_rtx (r);
2738 gcc_unreachable ();
2741 else
2743 if (conversions[i].format[j] == 'z')
2745 /* Some addressing modes *must* have a displacement,
2746 so insert a zero here if needed. */
2747 int k;
2748 for (k = j + 1; conversions[i].format[k]; k++)
2749 if (ISDIGIT (conversions[i].format[k]))
2751 rtx reg = patternr[conversions[i].format[k] - '0'];
2752 if (GET_CODE (reg) == REG
2753 && (REGNO (reg) == SB_REGNO
2754 || REGNO (reg) == FB_REGNO
2755 || REGNO (reg) == SP_REGNO))
2756 fputc ('0', file);
2758 continue;
2760 /* Signed displacements off symbols need to have signs
2761 blended cleanly. */
2762 if (conversions[i].format[j] == '+'
2763 && (!code || code == 'D' || code == 'd')
2764 && ISDIGIT (conversions[i].format[j + 1])
2765 && (GET_CODE (patternr[conversions[i].format[j + 1] - '0'])
2766 == CONST_INT))
2768 force_sign = 1;
2769 continue;
2771 fputc (conversions[i].format[j], file);
2773 break;
2775 if (!conversions[i].pattern)
2777 fprintf (stderr, "unconvertible operand %c `%s'", code ? code : '-',
2778 pattern);
2779 debug_rtx (x);
2780 fprintf (file, "[%c.%s]", code ? code : '-', pattern);
2783 return;
2786 /* Implements TARGET_PRINT_OPERAND_PUNCT_VALID_P.
2788 See m32c_print_operand above for descriptions of what these do. */
2790 #undef TARGET_PRINT_OPERAND_PUNCT_VALID_P
2791 #define TARGET_PRINT_OPERAND_PUNCT_VALID_P m32c_print_operand_punct_valid_p
2793 static bool
2794 m32c_print_operand_punct_valid_p (unsigned char c)
2796 if (c == '&' || c == '!')
2797 return true;
2799 return false;
2802 /* Implements TARGET_PRINT_OPERAND_ADDRESS. Nothing unusual here. */
2804 #undef TARGET_PRINT_OPERAND_ADDRESS
2805 #define TARGET_PRINT_OPERAND_ADDRESS m32c_print_operand_address
2807 static void
2808 m32c_print_operand_address (FILE * stream, machine_mode /*mode*/, rtx address)
2810 if (GET_CODE (address) == MEM)
2811 address = XEXP (address, 0);
2812 else
2813 /* cf: gcc.dg/asm-4.c. */
2814 gcc_assert (GET_CODE (address) == REG);
2816 m32c_print_operand (stream, address, 0);
2819 /* Implements ASM_OUTPUT_REG_PUSH. Control registers are pushed
2820 differently than general registers. */
2821 void
2822 m32c_output_reg_push (FILE * s, int regno)
2824 if (regno == FLG_REGNO)
2825 fprintf (s, "\tpushc\tflg\n");
2826 else
2827 fprintf (s, "\tpush.%c\t%s\n",
2828 " bwll"[reg_push_size (regno)], reg_names[regno]);
2831 /* Likewise for ASM_OUTPUT_REG_POP. */
2832 void
2833 m32c_output_reg_pop (FILE * s, int regno)
2835 if (regno == FLG_REGNO)
2836 fprintf (s, "\tpopc\tflg\n");
2837 else
2838 fprintf (s, "\tpop.%c\t%s\n",
2839 " bwll"[reg_push_size (regno)], reg_names[regno]);
2842 /* Defining target-specific uses of `__attribute__' */
2844 /* Used to simplify the logic below. Find the attributes wherever
2845 they may be. */
2846 #define M32C_ATTRIBUTES(decl) \
2847 (TYPE_P (decl)) ? TYPE_ATTRIBUTES (decl) \
2848 : DECL_ATTRIBUTES (decl) \
2849 ? (DECL_ATTRIBUTES (decl)) \
2850 : TYPE_ATTRIBUTES (TREE_TYPE (decl))
2852 /* Returns TRUE if the given tree has the "interrupt" attribute. */
2853 static int
2854 interrupt_p (tree node ATTRIBUTE_UNUSED)
2856 tree list = M32C_ATTRIBUTES (node);
2857 while (list)
2859 if (is_attribute_p ("interrupt", TREE_PURPOSE (list)))
2860 return 1;
2861 list = TREE_CHAIN (list);
2863 return fast_interrupt_p (node);
2866 /* Returns TRUE if the given tree has the "bank_switch" attribute. */
2867 static int
2868 bank_switch_p (tree node ATTRIBUTE_UNUSED)
2870 tree list = M32C_ATTRIBUTES (node);
2871 while (list)
2873 if (is_attribute_p ("bank_switch", TREE_PURPOSE (list)))
2874 return 1;
2875 list = TREE_CHAIN (list);
2877 return 0;
2880 /* Returns TRUE if the given tree has the "fast_interrupt" attribute. */
2881 static int
2882 fast_interrupt_p (tree node ATTRIBUTE_UNUSED)
2884 tree list = M32C_ATTRIBUTES (node);
2885 while (list)
2887 if (is_attribute_p ("fast_interrupt", TREE_PURPOSE (list)))
2888 return 1;
2889 list = TREE_CHAIN (list);
2891 return 0;
2894 static tree
2895 interrupt_handler (tree * node ATTRIBUTE_UNUSED,
2896 tree name ATTRIBUTE_UNUSED,
2897 tree args ATTRIBUTE_UNUSED,
2898 int flags ATTRIBUTE_UNUSED,
2899 bool * no_add_attrs ATTRIBUTE_UNUSED)
2901 return NULL_TREE;
2904 /* Returns TRUE if given tree has the "function_vector" attribute. */
2906 m32c_special_page_vector_p (tree func)
2908 tree list;
2910 if (TREE_CODE (func) != FUNCTION_DECL)
2911 return 0;
2913 list = M32C_ATTRIBUTES (func);
2914 while (list)
2916 if (is_attribute_p ("function_vector", TREE_PURPOSE (list)))
2917 return 1;
2918 list = TREE_CHAIN (list);
2920 return 0;
2923 static tree
2924 function_vector_handler (tree * node ATTRIBUTE_UNUSED,
2925 tree name ATTRIBUTE_UNUSED,
2926 tree args ATTRIBUTE_UNUSED,
2927 int flags ATTRIBUTE_UNUSED,
2928 bool * no_add_attrs ATTRIBUTE_UNUSED)
2930 if (TARGET_R8C)
2932 /* The attribute is not supported for R8C target. */
2933 warning (OPT_Wattributes,
2934 "%qE attribute is not supported for R8C target",
2935 name);
2936 *no_add_attrs = true;
2938 else if (TREE_CODE (*node) != FUNCTION_DECL)
2940 /* The attribute must be applied to functions only. */
2941 warning (OPT_Wattributes,
2942 "%qE attribute applies only to functions",
2943 name);
2944 *no_add_attrs = true;
2946 else if (TREE_CODE (TREE_VALUE (args)) != INTEGER_CST)
2948 /* The argument must be a constant integer. */
2949 warning (OPT_Wattributes,
2950 "%qE attribute argument not an integer constant",
2951 name);
2952 *no_add_attrs = true;
2954 else if (TREE_INT_CST_LOW (TREE_VALUE (args)) < 18
2955 || TREE_INT_CST_LOW (TREE_VALUE (args)) > 255)
2957 /* The argument value must be between 18 to 255. */
2958 warning (OPT_Wattributes,
2959 "%qE attribute argument should be between 18 to 255",
2960 name);
2961 *no_add_attrs = true;
2963 return NULL_TREE;
2966 /* If the function is assigned the attribute 'function_vector', it
2967 returns the function vector number, otherwise returns zero. */
2969 current_function_special_page_vector (rtx x)
2971 int num;
2973 if ((GET_CODE(x) == SYMBOL_REF)
2974 && (SYMBOL_REF_FLAGS (x) & SYMBOL_FLAG_FUNCVEC_FUNCTION))
2976 tree list;
2977 tree t = SYMBOL_REF_DECL (x);
2979 if (TREE_CODE (t) != FUNCTION_DECL)
2980 return 0;
2982 list = M32C_ATTRIBUTES (t);
2983 while (list)
2985 if (is_attribute_p ("function_vector", TREE_PURPOSE (list)))
2987 num = TREE_INT_CST_LOW (TREE_VALUE (TREE_VALUE (list)));
2988 return num;
2991 list = TREE_CHAIN (list);
2994 return 0;
2996 else
2997 return 0;
3000 #undef TARGET_ATTRIBUTE_TABLE
3001 #define TARGET_ATTRIBUTE_TABLE m32c_attribute_table
3002 TARGET_GNU_ATTRIBUTES (m32c_attribute_table, {
3003 /* { name, min_len, max_len, decl_req, type_req, fn_type_req,
3004 affects_type_identity, handler, exclude } */
3005 { "interrupt", 0, 0, false, false, false, false, interrupt_handler, NULL },
3006 { "bank_switch", 0, 0, false, false, false, false, interrupt_handler, NULL },
3007 { "fast_interrupt", 0, 0, false, false, false, false,
3008 interrupt_handler, NULL },
3009 { "function_vector", 1, 1, true, false, false, false,
3010 function_vector_handler, NULL }
3013 #undef TARGET_COMP_TYPE_ATTRIBUTES
3014 #define TARGET_COMP_TYPE_ATTRIBUTES m32c_comp_type_attributes
3015 static int
3016 m32c_comp_type_attributes (const_tree type1 ATTRIBUTE_UNUSED,
3017 const_tree type2 ATTRIBUTE_UNUSED)
3019 /* 0=incompatible 1=compatible 2=warning */
3020 return 1;
3023 #undef TARGET_INSERT_ATTRIBUTES
3024 #define TARGET_INSERT_ATTRIBUTES m32c_insert_attributes
3025 static void
3026 m32c_insert_attributes (tree node ATTRIBUTE_UNUSED,
3027 tree * attr_ptr ATTRIBUTE_UNUSED)
3029 unsigned addr;
3030 /* See if we need to make #pragma address variables volatile. */
3032 if (VAR_P (node))
3034 const char *name = IDENTIFIER_POINTER (DECL_NAME (node));
3035 if (m32c_get_pragma_address (name, &addr))
3037 TREE_THIS_VOLATILE (node) = true;
3042 /* Hash table of pragma info. */
3043 static GTY(()) hash_map<nofree_string_hash, unsigned> *pragma_htab;
3045 void
3046 m32c_note_pragma_address (const char *varname, unsigned address)
3048 if (!pragma_htab)
3049 pragma_htab = hash_map<nofree_string_hash, unsigned>::create_ggc (31);
3051 const char *name = ggc_strdup (varname);
3052 unsigned int *slot = &pragma_htab->get_or_insert (name);
3053 *slot = address;
3056 static bool
3057 m32c_get_pragma_address (const char *varname, unsigned *address)
3059 if (!pragma_htab)
3060 return false;
3062 unsigned int *slot = pragma_htab->get (varname);
3063 if (slot)
3065 *address = *slot;
3066 return true;
3068 return false;
3071 void
3072 m32c_output_aligned_common (FILE *stream, tree decl ATTRIBUTE_UNUSED,
3073 const char *name,
3074 int size, int align, int global)
3076 unsigned address;
3078 if (m32c_get_pragma_address (name, &address))
3080 /* We never output these as global. */
3081 assemble_name (stream, name);
3082 fprintf (stream, " = 0x%04x\n", address);
3083 return;
3085 if (!global)
3087 fprintf (stream, "\t.local\t");
3088 assemble_name (stream, name);
3089 fprintf (stream, "\n");
3091 fprintf (stream, "\t.comm\t");
3092 assemble_name (stream, name);
3093 fprintf (stream, ",%u,%u\n", size, align / BITS_PER_UNIT);
3096 /* Predicates */
3098 /* This is a list of legal subregs of hard regs. */
3099 static const struct {
3100 unsigned char outer_mode_size;
3101 unsigned char inner_mode_size;
3102 unsigned char byte_mask;
3103 unsigned char legal_when;
3104 unsigned int regno;
3105 } legal_subregs[] = {
3106 {1, 2, 0x03, 1, R0_REGNO}, /* r0h r0l */
3107 {1, 2, 0x03, 1, R1_REGNO}, /* r1h r1l */
3108 {1, 2, 0x01, 1, A0_REGNO},
3109 {1, 2, 0x01, 1, A1_REGNO},
3111 {1, 4, 0x01, 1, A0_REGNO},
3112 {1, 4, 0x01, 1, A1_REGNO},
3114 {2, 4, 0x05, 1, R0_REGNO}, /* r2 r0 */
3115 {2, 4, 0x05, 1, R1_REGNO}, /* r3 r1 */
3116 {2, 4, 0x05, 16, A0_REGNO}, /* a1 a0 */
3117 {2, 4, 0x01, 24, A0_REGNO}, /* a1 a0 */
3118 {2, 4, 0x01, 24, A1_REGNO}, /* a1 a0 */
3120 {4, 8, 0x55, 1, R0_REGNO}, /* r3 r1 r2 r0 */
3123 /* Returns TRUE if OP is a subreg of a hard reg which we don't
3124 support. We also bail on MEMs with illegal addresses. */
3125 bool
3126 m32c_illegal_subreg_p (rtx op)
3128 int offset;
3129 unsigned int i;
3130 machine_mode src_mode, dest_mode;
3132 if (GET_CODE (op) == MEM
3133 && ! m32c_legitimate_address_p (Pmode, XEXP (op, 0), false))
3135 return true;
3138 if (GET_CODE (op) != SUBREG)
3139 return false;
3141 dest_mode = GET_MODE (op);
3142 offset = SUBREG_BYTE (op);
3143 op = SUBREG_REG (op);
3144 src_mode = GET_MODE (op);
3146 if (GET_MODE_SIZE (dest_mode) == GET_MODE_SIZE (src_mode))
3147 return false;
3148 if (GET_CODE (op) != REG)
3149 return false;
3150 if (REGNO (op) >= MEM0_REGNO)
3151 return false;
3153 offset = (1 << offset);
3155 for (i = 0; i < ARRAY_SIZE (legal_subregs); i ++)
3156 if (legal_subregs[i].outer_mode_size == GET_MODE_SIZE (dest_mode)
3157 && legal_subregs[i].regno == REGNO (op)
3158 && legal_subregs[i].inner_mode_size == GET_MODE_SIZE (src_mode)
3159 && legal_subregs[i].byte_mask & offset)
3161 switch (legal_subregs[i].legal_when)
3163 case 1:
3164 return false;
3165 case 16:
3166 if (TARGET_A16)
3167 return false;
3168 break;
3169 case 24:
3170 if (TARGET_A24)
3171 return false;
3172 break;
3175 return true;
3178 /* Returns TRUE if we support a move between the first two operands.
3179 At the moment, we just want to discourage mem to mem moves until
3180 after reload, because reload has a hard time with our limited
3181 number of address registers, and we can get into a situation where
3182 we need three of them when we only have two. */
3183 bool
3184 m32c_mov_ok (rtx * operands, machine_mode mode ATTRIBUTE_UNUSED)
3186 rtx op0 = operands[0];
3187 rtx op1 = operands[1];
3189 if (TARGET_A24)
3190 return true;
3192 #define DEBUG_MOV_OK 0
3193 #if DEBUG_MOV_OK
3194 fprintf (stderr, "m32c_mov_ok %s\n", mode_name[mode]);
3195 debug_rtx (op0);
3196 debug_rtx (op1);
3197 #endif
3199 if (GET_CODE (op0) == SUBREG)
3200 op0 = XEXP (op0, 0);
3201 if (GET_CODE (op1) == SUBREG)
3202 op1 = XEXP (op1, 0);
3204 if (GET_CODE (op0) == MEM
3205 && GET_CODE (op1) == MEM
3206 && ! reload_completed)
3208 #if DEBUG_MOV_OK
3209 fprintf (stderr, " - no, mem to mem\n");
3210 #endif
3211 return false;
3214 #if DEBUG_MOV_OK
3215 fprintf (stderr, " - ok\n");
3216 #endif
3217 return true;
3220 /* Returns TRUE if two consecutive HImode mov instructions, generated
3221 for moving an immediate double data to a double data type variable
3222 location, can be combined into single SImode mov instruction. */
3223 bool
3224 m32c_immd_dbl_mov (rtx * operands ATTRIBUTE_UNUSED,
3225 machine_mode mode ATTRIBUTE_UNUSED)
3227 /* ??? This relied on the now-defunct MEM_SCALAR and MEM_IN_STRUCT_P
3228 flags. */
3229 return false;
3232 /* Expanders */
3234 /* Subregs are non-orthogonal for us, because our registers are all
3235 different sizes. */
3236 static rtx
3237 m32c_subreg (machine_mode outer,
3238 rtx x, machine_mode inner, int byte)
3240 int r, nr = -1;
3242 /* Converting MEMs to different types that are the same size, we
3243 just rewrite them. */
3244 if (GET_CODE (x) == SUBREG
3245 && SUBREG_BYTE (x) == 0
3246 && GET_CODE (SUBREG_REG (x)) == MEM
3247 && (GET_MODE_SIZE (GET_MODE (x))
3248 == GET_MODE_SIZE (GET_MODE (SUBREG_REG (x)))))
3250 rtx oldx = x;
3251 x = gen_rtx_MEM (GET_MODE (x), XEXP (SUBREG_REG (x), 0));
3252 MEM_COPY_ATTRIBUTES (x, SUBREG_REG (oldx));
3255 /* Push/pop get done as smaller push/pops. */
3256 if (GET_CODE (x) == MEM
3257 && (GET_CODE (XEXP (x, 0)) == PRE_DEC
3258 || GET_CODE (XEXP (x, 0)) == POST_INC))
3259 return gen_rtx_MEM (outer, XEXP (x, 0));
3260 if (GET_CODE (x) == SUBREG
3261 && GET_CODE (XEXP (x, 0)) == MEM
3262 && (GET_CODE (XEXP (XEXP (x, 0), 0)) == PRE_DEC
3263 || GET_CODE (XEXP (XEXP (x, 0), 0)) == POST_INC))
3264 return gen_rtx_MEM (outer, XEXP (XEXP (x, 0), 0));
3266 if (GET_CODE (x) != REG)
3268 rtx r = simplify_gen_subreg (outer, x, inner, byte);
3269 if (GET_CODE (r) == SUBREG
3270 && GET_CODE (x) == MEM
3271 && MEM_VOLATILE_P (x))
3273 /* Volatile MEMs don't get simplified, but we need them to
3274 be. We are little endian, so the subreg byte is the
3275 offset. */
3276 r = adjust_address_nv (x, outer, byte);
3278 return r;
3281 r = REGNO (x);
3282 if (r >= FIRST_PSEUDO_REGISTER || r == AP_REGNO)
3283 return simplify_gen_subreg (outer, x, inner, byte);
3285 if (IS_MEM_REGNO (r))
3286 return simplify_gen_subreg (outer, x, inner, byte);
3288 /* This is where the complexities of our register layout are
3289 described. */
3290 if (byte == 0)
3291 nr = r;
3292 else if (outer == HImode)
3294 if (r == R0_REGNO && byte == 2)
3295 nr = R2_REGNO;
3296 else if (r == R0_REGNO && byte == 4)
3297 nr = R1_REGNO;
3298 else if (r == R0_REGNO && byte == 6)
3299 nr = R3_REGNO;
3300 else if (r == R1_REGNO && byte == 2)
3301 nr = R3_REGNO;
3302 else if (r == A0_REGNO && byte == 2)
3303 nr = A1_REGNO;
3305 else if (outer == SImode)
3307 if (r == R0_REGNO && byte == 0)
3308 nr = R0_REGNO;
3309 else if (r == R0_REGNO && byte == 4)
3310 nr = R1_REGNO;
3312 if (nr == -1)
3314 fprintf (stderr, "m32c_subreg %s %s %d\n",
3315 mode_name[outer], mode_name[inner], byte);
3316 debug_rtx (x);
3317 gcc_unreachable ();
3319 return gen_rtx_REG (outer, nr);
3322 /* Used to emit move instructions. We split some moves,
3323 and avoid mem-mem moves. */
3325 m32c_prepare_move (rtx * operands, machine_mode mode)
3327 if (far_addr_space_p (operands[0])
3328 && CONSTANT_P (operands[1]))
3330 operands[1] = force_reg (GET_MODE (operands[0]), operands[1]);
3332 if (TARGET_A16 && mode == PSImode)
3333 return m32c_split_move (operands, mode, 1);
3334 if ((GET_CODE (operands[0]) == MEM)
3335 && (GET_CODE (XEXP (operands[0], 0)) == PRE_MODIFY))
3337 rtx pmv = XEXP (operands[0], 0);
3338 rtx dest_reg = XEXP (pmv, 0);
3339 rtx dest_mod = XEXP (pmv, 1);
3341 emit_insn (gen_rtx_SET (dest_reg, dest_mod));
3342 operands[0] = gen_rtx_MEM (mode, dest_reg);
3344 if (can_create_pseudo_p () && MEM_P (operands[0]) && MEM_P (operands[1]))
3345 operands[1] = copy_to_mode_reg (mode, operands[1]);
3346 return 0;
3349 #define DEBUG_SPLIT 0
3351 /* Returns TRUE if the given PSImode move should be split. We split
3352 for all r8c/m16c moves, since it doesn't support them, and for
3353 POP.L as we can only *push* SImode. */
3355 m32c_split_psi_p (rtx * operands)
3357 #if DEBUG_SPLIT
3358 fprintf (stderr, "\nm32c_split_psi_p\n");
3359 debug_rtx (operands[0]);
3360 debug_rtx (operands[1]);
3361 #endif
3362 if (TARGET_A16)
3364 #if DEBUG_SPLIT
3365 fprintf (stderr, "yes, A16\n");
3366 #endif
3367 return 1;
3369 if (GET_CODE (operands[1]) == MEM
3370 && GET_CODE (XEXP (operands[1], 0)) == POST_INC)
3372 #if DEBUG_SPLIT
3373 fprintf (stderr, "yes, pop.l\n");
3374 #endif
3375 return 1;
3377 #if DEBUG_SPLIT
3378 fprintf (stderr, "no, default\n");
3379 #endif
3380 return 0;
3383 /* Split the given move. SPLIT_ALL is 0 if splitting is optional
3384 (define_expand), 1 if it is not optional (define_insn_and_split),
3385 and 3 for define_split (alternate api). */
3387 m32c_split_move (rtx * operands, machine_mode mode, int split_all)
3389 rtx s[4], d[4];
3390 int parts, si, di, rev = 0;
3391 int rv = 0, opi = 2;
3392 machine_mode submode = HImode;
3393 rtx *ops, local_ops[10];
3395 /* define_split modifies the existing operands, but the other two
3396 emit new insns. OPS is where we store the operand pairs, which
3397 we emit later. */
3398 if (split_all == 3)
3399 ops = operands;
3400 else
3401 ops = local_ops;
3403 /* Else HImode. */
3404 if (mode == DImode)
3405 submode = SImode;
3407 /* Before splitting mem-mem moves, force one operand into a
3408 register. */
3409 if (can_create_pseudo_p () && MEM_P (operands[0]) && MEM_P (operands[1]))
3411 #if DEBUG0
3412 fprintf (stderr, "force_reg...\n");
3413 debug_rtx (operands[1]);
3414 #endif
3415 operands[1] = force_reg (mode, operands[1]);
3416 #if DEBUG0
3417 debug_rtx (operands[1]);
3418 #endif
3421 parts = 2;
3423 #if DEBUG_SPLIT
3424 fprintf (stderr, "\nsplit_move %d all=%d\n", !can_create_pseudo_p (),
3425 split_all);
3426 debug_rtx (operands[0]);
3427 debug_rtx (operands[1]);
3428 #endif
3430 /* Note that split_all is not used to select the api after this
3431 point, so it's safe to set it to 3 even with define_insn. */
3432 /* None of the chips can move SI operands to sp-relative addresses,
3433 so we always split those. */
3434 if (satisfies_constraint_Ss (operands[0]))
3435 split_all = 3;
3437 if (TARGET_A16
3438 && (far_addr_space_p (operands[0])
3439 || far_addr_space_p (operands[1])))
3440 split_all |= 1;
3442 /* We don't need to split these. */
3443 if (TARGET_A24
3444 && split_all != 3
3445 && (mode == SImode || mode == PSImode)
3446 && !(GET_CODE (operands[1]) == MEM
3447 && GET_CODE (XEXP (operands[1], 0)) == POST_INC))
3448 return 0;
3450 /* First, enumerate the subregs we'll be dealing with. */
3451 for (si = 0; si < parts; si++)
3453 d[si] =
3454 m32c_subreg (submode, operands[0], mode,
3455 si * GET_MODE_SIZE (submode));
3456 s[si] =
3457 m32c_subreg (submode, operands[1], mode,
3458 si * GET_MODE_SIZE (submode));
3461 /* Split pushes by emitting a sequence of smaller pushes. */
3462 if (GET_CODE (d[0]) == MEM && GET_CODE (XEXP (d[0], 0)) == PRE_DEC)
3464 for (si = parts - 1; si >= 0; si--)
3466 ops[opi++] = gen_rtx_MEM (submode,
3467 gen_rtx_PRE_DEC (Pmode,
3468 gen_rtx_REG (Pmode,
3469 SP_REGNO)));
3470 ops[opi++] = s[si];
3473 rv = 1;
3475 /* Likewise for pops. */
3476 else if (GET_CODE (s[0]) == MEM && GET_CODE (XEXP (s[0], 0)) == POST_INC)
3478 for (di = 0; di < parts; di++)
3480 ops[opi++] = d[di];
3481 ops[opi++] = gen_rtx_MEM (submode,
3482 gen_rtx_POST_INC (Pmode,
3483 gen_rtx_REG (Pmode,
3484 SP_REGNO)));
3486 rv = 1;
3488 else if (split_all)
3490 /* if d[di] == s[si] for any di < si, we'll early clobber. */
3491 for (di = 0; di < parts - 1; di++)
3492 for (si = di + 1; si < parts; si++)
3493 if (reg_mentioned_p (d[di], s[si]))
3494 rev = 1;
3496 if (rev)
3497 for (si = 0; si < parts; si++)
3499 ops[opi++] = d[si];
3500 ops[opi++] = s[si];
3502 else
3503 for (si = parts - 1; si >= 0; si--)
3505 ops[opi++] = d[si];
3506 ops[opi++] = s[si];
3508 rv = 1;
3510 /* Now emit any moves we may have accumulated. */
3511 if (rv && split_all != 3)
3513 int i;
3514 for (i = 2; i < opi; i += 2)
3515 emit_move_insn (ops[i], ops[i + 1]);
3517 return rv;
3520 /* The m32c has a number of opcodes that act like memcpy, strcmp, and
3521 the like. For the R8C they expect one of the addresses to be in
3522 R1L:An so we need to arrange for that. Otherwise, it's just a
3523 matter of picking out the operands we want and emitting the right
3524 pattern for them. All these expanders, which correspond to
3525 patterns in blkmov.md, must return nonzero if they expand the insn,
3526 or zero if they should FAIL. */
3528 /* This is a memset() opcode. All operands are implied, so we need to
3529 arrange for them to be in the right registers. The opcode wants
3530 addresses, not [mem] syntax. $0 is the destination (MEM:BLK), $1
3531 the count (HI), and $2 the value (QI). */
3533 m32c_expand_setmemhi(rtx *operands)
3535 rtx desta, count, val;
3536 rtx desto, counto;
3538 desta = XEXP (operands[0], 0);
3539 count = operands[1];
3540 val = operands[2];
3542 desto = gen_reg_rtx (Pmode);
3543 counto = gen_reg_rtx (HImode);
3545 if (GET_CODE (desta) != REG
3546 || REGNO (desta) < FIRST_PSEUDO_REGISTER)
3547 desta = copy_to_mode_reg (Pmode, desta);
3549 /* This looks like an arbitrary restriction, but this is by far the
3550 most common case. For counts 8..14 this actually results in
3551 smaller code with no speed penalty because the half-sized
3552 constant can be loaded with a shorter opcode. */
3553 if (GET_CODE (count) == CONST_INT
3554 && GET_CODE (val) == CONST_INT
3555 && ! (INTVAL (count) & 1)
3556 && (INTVAL (count) > 1)
3557 && (INTVAL (val) <= 7 && INTVAL (val) >= -8))
3559 unsigned v = INTVAL (val) & 0xff;
3560 v = v | (v << 8);
3561 count = copy_to_mode_reg (HImode, GEN_INT (INTVAL (count) / 2));
3562 val = copy_to_mode_reg (HImode, GEN_INT (v));
3563 if (TARGET_A16)
3564 emit_insn (gen_setmemhi_whi_op (desto, counto, val, desta, count));
3565 else
3566 emit_insn (gen_setmemhi_wpsi_op (desto, counto, val, desta, count));
3567 return 1;
3570 /* This is the generalized memset() case. */
3571 if (GET_CODE (val) != REG
3572 || REGNO (val) < FIRST_PSEUDO_REGISTER)
3573 val = copy_to_mode_reg (QImode, val);
3575 if (GET_CODE (count) != REG
3576 || REGNO (count) < FIRST_PSEUDO_REGISTER)
3577 count = copy_to_mode_reg (HImode, count);
3579 if (TARGET_A16)
3580 emit_insn (gen_setmemhi_bhi_op (desto, counto, val, desta, count));
3581 else
3582 emit_insn (gen_setmemhi_bpsi_op (desto, counto, val, desta, count));
3584 return 1;
3587 /* This is a memcpy() opcode. All operands are implied, so we need to
3588 arrange for them to be in the right registers. The opcode wants
3589 addresses, not [mem] syntax. $0 is the destination (MEM:BLK), $1
3590 is the source (MEM:BLK), and $2 the count (HI). */
3592 m32c_expand_cpymemhi(rtx *operands)
3594 rtx desta, srca, count;
3595 rtx desto, srco, counto;
3597 desta = XEXP (operands[0], 0);
3598 srca = XEXP (operands[1], 0);
3599 count = operands[2];
3601 desto = gen_reg_rtx (Pmode);
3602 srco = gen_reg_rtx (Pmode);
3603 counto = gen_reg_rtx (HImode);
3605 if (GET_CODE (desta) != REG
3606 || REGNO (desta) < FIRST_PSEUDO_REGISTER)
3607 desta = copy_to_mode_reg (Pmode, desta);
3609 if (GET_CODE (srca) != REG
3610 || REGNO (srca) < FIRST_PSEUDO_REGISTER)
3611 srca = copy_to_mode_reg (Pmode, srca);
3613 /* Similar to setmem, but we don't need to check the value. */
3614 if (GET_CODE (count) == CONST_INT
3615 && ! (INTVAL (count) & 1)
3616 && (INTVAL (count) > 1))
3618 count = copy_to_mode_reg (HImode, GEN_INT (INTVAL (count) / 2));
3619 if (TARGET_A16)
3620 emit_insn (gen_cpymemhi_whi_op (desto, srco, counto, desta, srca, count));
3621 else
3622 emit_insn (gen_cpymemhi_wpsi_op (desto, srco, counto, desta, srca, count));
3623 return 1;
3626 /* This is the generalized memset() case. */
3627 if (GET_CODE (count) != REG
3628 || REGNO (count) < FIRST_PSEUDO_REGISTER)
3629 count = copy_to_mode_reg (HImode, count);
3631 if (TARGET_A16)
3632 emit_insn (gen_cpymemhi_bhi_op (desto, srco, counto, desta, srca, count));
3633 else
3634 emit_insn (gen_cpymemhi_bpsi_op (desto, srco, counto, desta, srca, count));
3636 return 1;
3639 /* This is a stpcpy() opcode. $0 is the destination (MEM:BLK) after
3640 the copy, which should point to the NUL at the end of the string,
3641 $1 is the destination (MEM:BLK), and $2 is the source (MEM:BLK).
3642 Since our opcode leaves the destination pointing *after* the NUL,
3643 we must emit an adjustment. */
3645 m32c_expand_movstr(rtx *operands)
3647 rtx desta, srca;
3648 rtx desto, srco;
3650 desta = XEXP (operands[1], 0);
3651 srca = XEXP (operands[2], 0);
3653 desto = gen_reg_rtx (Pmode);
3654 srco = gen_reg_rtx (Pmode);
3656 if (GET_CODE (desta) != REG
3657 || REGNO (desta) < FIRST_PSEUDO_REGISTER)
3658 desta = copy_to_mode_reg (Pmode, desta);
3660 if (GET_CODE (srca) != REG
3661 || REGNO (srca) < FIRST_PSEUDO_REGISTER)
3662 srca = copy_to_mode_reg (Pmode, srca);
3664 emit_insn (gen_movstr_op (desto, srco, desta, srca));
3665 /* desto ends up being a1, which allows this type of add through MOVA. */
3666 emit_insn (gen_addpsi3 (operands[0], desto, GEN_INT (-1)));
3668 return 1;
3671 /* This is a strcmp() opcode. $0 is the destination (HI) which holds
3672 <=>0 depending on the comparison, $1 is one string (MEM:BLK), and
3673 $2 is the other (MEM:BLK). We must do the comparison, and then
3674 convert the flags to a signed integer result. */
3676 m32c_expand_cmpstr(rtx *operands)
3678 rtx src1a, src2a;
3680 src1a = XEXP (operands[1], 0);
3681 src2a = XEXP (operands[2], 0);
3683 if (GET_CODE (src1a) != REG
3684 || REGNO (src1a) < FIRST_PSEUDO_REGISTER)
3685 src1a = copy_to_mode_reg (Pmode, src1a);
3687 if (GET_CODE (src2a) != REG
3688 || REGNO (src2a) < FIRST_PSEUDO_REGISTER)
3689 src2a = copy_to_mode_reg (Pmode, src2a);
3691 emit_insn (gen_cmpstrhi_op (src1a, src2a, src1a, src2a));
3692 emit_insn (gen_cond_to_int (operands[0]));
3694 return 1;
3698 typedef rtx (*shift_gen_func)(rtx, rtx, rtx);
3700 static shift_gen_func
3701 shift_gen_func_for (int mode, int code)
3703 #define GFF(m,c,f) if (mode == m && code == c) return f
3704 GFF(QImode, ASHIFT, gen_ashlqi3_i);
3705 GFF(QImode, ASHIFTRT, gen_ashrqi3_i);
3706 GFF(QImode, LSHIFTRT, gen_lshrqi3_i);
3707 GFF(HImode, ASHIFT, gen_ashlhi3_i);
3708 GFF(HImode, ASHIFTRT, gen_ashrhi3_i);
3709 GFF(HImode, LSHIFTRT, gen_lshrhi3_i);
3710 GFF(PSImode, ASHIFT, gen_ashlpsi3_i);
3711 GFF(PSImode, ASHIFTRT, gen_ashrpsi3_i);
3712 GFF(PSImode, LSHIFTRT, gen_lshrpsi3_i);
3713 GFF(SImode, ASHIFT, TARGET_A16 ? gen_ashlsi3_16 : gen_ashlsi3_24);
3714 GFF(SImode, ASHIFTRT, TARGET_A16 ? gen_ashrsi3_16 : gen_ashrsi3_24);
3715 GFF(SImode, LSHIFTRT, TARGET_A16 ? gen_lshrsi3_16 : gen_lshrsi3_24);
3716 #undef GFF
3717 gcc_unreachable ();
3720 /* The m32c only has one shift, but it takes a signed count. GCC
3721 doesn't want this, so we fake it by negating any shift count when
3722 we're pretending to shift the other way. Also, the shift count is
3723 limited to -8..8. It's slightly better to use two shifts for 9..15
3724 than to load the count into r1h, so we do that too. */
3726 m32c_prepare_shift (rtx * operands, int scale, int shift_code)
3728 machine_mode mode = GET_MODE (operands[0]);
3729 shift_gen_func func = shift_gen_func_for (mode, shift_code);
3730 rtx temp;
3732 if (GET_CODE (operands[2]) == CONST_INT)
3734 int maxc = TARGET_A24 && (mode == PSImode || mode == SImode) ? 32 : 8;
3735 int count = INTVAL (operands[2]) * scale;
3737 while (count > maxc)
3739 temp = gen_reg_rtx (mode);
3740 emit_insn (func (temp, operands[1], GEN_INT (maxc)));
3741 operands[1] = temp;
3742 count -= maxc;
3744 while (count < -maxc)
3746 temp = gen_reg_rtx (mode);
3747 emit_insn (func (temp, operands[1], GEN_INT (-maxc)));
3748 operands[1] = temp;
3749 count += maxc;
3751 emit_insn (func (operands[0], operands[1], GEN_INT (count)));
3752 return 1;
3755 temp = gen_reg_rtx (QImode);
3756 if (scale < 0)
3757 /* The pattern has a NEG that corresponds to this. */
3758 emit_move_insn (temp, gen_rtx_NEG (QImode, operands[2]));
3759 else if (TARGET_A16 && mode == SImode)
3760 /* We do this because the code below may modify this, we don't
3761 want to modify the origin of this value. */
3762 emit_move_insn (temp, operands[2]);
3763 else
3764 /* We'll only use it for the shift, no point emitting a move. */
3765 temp = operands[2];
3767 if (TARGET_A16 && GET_MODE_SIZE (mode) == 4)
3769 /* The m16c has a limit of -16..16 for SI shifts, even when the
3770 shift count is in a register. Since there are so many targets
3771 of these shifts, it's better to expand the RTL here than to
3772 call a helper function.
3774 The resulting code looks something like this:
3776 cmp.b r1h,-16
3777 jge.b 1f
3778 shl.l -16,dest
3779 add.b r1h,16
3780 1f: cmp.b r1h,16
3781 jle.b 1f
3782 shl.l 16,dest
3783 sub.b r1h,16
3784 1f: shl.l r1h,dest
3786 We take advantage of the fact that "negative" shifts are
3787 undefined to skip one of the comparisons. */
3789 rtx count;
3790 rtx tempvar;
3791 rtx_insn *insn;
3793 emit_move_insn (operands[0], operands[1]);
3795 count = temp;
3796 rtx_code_label *label = gen_label_rtx ();
3797 LABEL_NUSES (label) ++;
3799 tempvar = gen_reg_rtx (mode);
3801 if (shift_code == ASHIFT)
3803 /* This is a left shift. We only need check positive counts. */
3804 emit_jump_insn (gen_cbranchqi4 (gen_rtx_LE (VOIDmode, 0, 0),
3805 count, GEN_INT (16), label));
3806 emit_insn (func (tempvar, operands[0], GEN_INT (8)));
3807 emit_insn (func (operands[0], tempvar, GEN_INT (8)));
3808 insn = emit_insn (gen_addqi3 (count, count, GEN_INT (-16)));
3809 emit_label_after (label, insn);
3811 else
3813 /* This is a right shift. We only need check negative counts. */
3814 emit_jump_insn (gen_cbranchqi4 (gen_rtx_GE (VOIDmode, 0, 0),
3815 count, GEN_INT (-16), label));
3816 emit_insn (func (tempvar, operands[0], GEN_INT (-8)));
3817 emit_insn (func (operands[0], tempvar, GEN_INT (-8)));
3818 insn = emit_insn (gen_addqi3 (count, count, GEN_INT (16)));
3819 emit_label_after (label, insn);
3821 operands[1] = operands[0];
3822 emit_insn (func (operands[0], operands[0], count));
3823 return 1;
3826 operands[2] = temp;
3827 return 0;
3830 /* The m32c has a limited range of operations that work on PSImode
3831 values; we have to expand to SI, do the math, and truncate back to
3832 PSI. Yes, this is expensive, but hopefully gcc will learn to avoid
3833 those cases. */
3834 void
3835 m32c_expand_neg_mulpsi3 (rtx * operands)
3837 /* operands: a = b * i */
3838 rtx temp1; /* b as SI */
3839 rtx scale /* i as SI */;
3840 rtx temp2; /* a*b as SI */
3842 temp1 = gen_reg_rtx (SImode);
3843 temp2 = gen_reg_rtx (SImode);
3844 if (GET_CODE (operands[2]) != CONST_INT)
3846 scale = gen_reg_rtx (SImode);
3847 emit_insn (gen_zero_extendpsisi2 (scale, operands[2]));
3849 else
3850 scale = copy_to_mode_reg (SImode, operands[2]);
3852 emit_insn (gen_zero_extendpsisi2 (temp1, operands[1]));
3853 temp2 = expand_simple_binop (SImode, MULT, temp1, scale, temp2, 1, OPTAB_LIB);
3854 emit_insn (gen_truncsipsi2 (operands[0], temp2));
3857 /* Pattern Output Functions */
3860 m32c_expand_movcc (rtx *operands)
3862 rtx rel = operands[1];
3864 if (GET_CODE (rel) != EQ && GET_CODE (rel) != NE)
3865 return 1;
3866 if (GET_CODE (operands[2]) != CONST_INT
3867 || GET_CODE (operands[3]) != CONST_INT)
3868 return 1;
3869 if (GET_CODE (rel) == NE)
3871 rtx tmp = operands[2];
3872 operands[2] = operands[3];
3873 operands[3] = tmp;
3874 rel = gen_rtx_EQ (GET_MODE (rel), XEXP (rel, 0), XEXP (rel, 1));
3877 emit_move_insn (operands[0],
3878 gen_rtx_IF_THEN_ELSE (GET_MODE (operands[0]),
3879 rel,
3880 operands[2],
3881 operands[3]));
3882 return 0;
3885 /* Used for the "insv" pattern. Return nonzero to fail, else done. */
3887 m32c_expand_insv (rtx *operands)
3889 rtx op0, src0, p;
3890 int mask;
3892 if (INTVAL (operands[1]) != 1)
3893 return 1;
3895 /* Our insv opcode (bset, bclr) can only insert a one-bit constant. */
3896 if (GET_CODE (operands[3]) != CONST_INT)
3897 return 1;
3898 if (INTVAL (operands[3]) != 0
3899 && INTVAL (operands[3]) != 1
3900 && INTVAL (operands[3]) != -1)
3901 return 1;
3903 mask = 1 << INTVAL (operands[2]);
3905 op0 = operands[0];
3906 if (GET_CODE (op0) == SUBREG
3907 && SUBREG_BYTE (op0) == 0)
3909 rtx sub = SUBREG_REG (op0);
3910 if (GET_MODE (sub) == HImode || GET_MODE (sub) == QImode)
3911 op0 = sub;
3914 if (!can_create_pseudo_p ()
3915 || (GET_CODE (op0) == MEM && MEM_VOLATILE_P (op0)))
3916 src0 = op0;
3917 else
3919 src0 = gen_reg_rtx (GET_MODE (op0));
3920 emit_move_insn (src0, op0);
3923 if (GET_MODE (op0) == HImode
3924 && INTVAL (operands[2]) >= 8
3925 && GET_CODE (op0) == MEM)
3927 /* We are little endian. */
3928 rtx new_mem = gen_rtx_MEM (QImode, plus_constant (Pmode,
3929 XEXP (op0, 0), 1));
3930 MEM_COPY_ATTRIBUTES (new_mem, op0);
3931 mask >>= 8;
3934 /* First, we generate a mask with the correct polarity. If we are
3935 storing a zero, we want an AND mask, so invert it. */
3936 if (INTVAL (operands[3]) == 0)
3938 /* Storing a zero, use an AND mask */
3939 if (GET_MODE (op0) == HImode)
3940 mask ^= 0xffff;
3941 else
3942 mask ^= 0xff;
3944 /* Now we need to properly sign-extend the mask in case we need to
3945 fall back to an AND or OR opcode. */
3946 if (GET_MODE (op0) == HImode)
3948 if (mask & 0x8000)
3949 mask -= 0x10000;
3951 else
3953 if (mask & 0x80)
3954 mask -= 0x100;
3957 switch ( (INTVAL (operands[3]) ? 4 : 0)
3958 + ((GET_MODE (op0) == HImode) ? 2 : 0)
3959 + (TARGET_A24 ? 1 : 0))
3961 case 0: p = gen_andqi3_16 (op0, src0, GEN_INT (mask)); break;
3962 case 1: p = gen_andqi3_24 (op0, src0, GEN_INT (mask)); break;
3963 case 2: p = gen_andhi3_16 (op0, src0, GEN_INT (mask)); break;
3964 case 3: p = gen_andhi3_24 (op0, src0, GEN_INT (mask)); break;
3965 case 4: p = gen_iorqi3_16 (op0, src0, GEN_INT (mask)); break;
3966 case 5: p = gen_iorqi3_24 (op0, src0, GEN_INT (mask)); break;
3967 case 6: p = gen_iorhi3_16 (op0, src0, GEN_INT (mask)); break;
3968 case 7: p = gen_iorhi3_24 (op0, src0, GEN_INT (mask)); break;
3969 default: p = NULL_RTX; break; /* Not reached, but silences a warning. */
3972 emit_insn (p);
3973 return 0;
3976 const char *
3977 m32c_scc_pattern(rtx *operands, RTX_CODE code)
3979 static char buf[30];
3980 if (GET_CODE (operands[0]) == REG
3981 && REGNO (operands[0]) == R0_REGNO)
3983 if (code == EQ)
3984 return "stzx\t#1,#0,r0l";
3985 if (code == NE)
3986 return "stzx\t#0,#1,r0l";
3988 sprintf(buf, "bm%s\t0,%%h0\n\tand.b\t#1,%%0", GET_RTX_NAME (code));
3989 return buf;
3992 /* Encode symbol attributes of a SYMBOL_REF into its
3993 SYMBOL_REF_FLAGS. */
3994 static void
3995 m32c_encode_section_info (tree decl, rtx rtl, int first)
3997 int extra_flags = 0;
3999 default_encode_section_info (decl, rtl, first);
4000 if (TREE_CODE (decl) == FUNCTION_DECL
4001 && m32c_special_page_vector_p (decl))
4003 extra_flags = SYMBOL_FLAG_FUNCVEC_FUNCTION;
4005 if (extra_flags)
4006 SYMBOL_REF_FLAGS (XEXP (rtl, 0)) |= extra_flags;
4009 /* Returns TRUE if the current function is a leaf, and thus we can
4010 determine which registers an interrupt function really needs to
4011 save. The logic below is mostly about finding the insn sequence
4012 that's the function, versus any sequence that might be open for the
4013 current insn. */
4014 static int
4015 m32c_leaf_function_p (void)
4017 int rv;
4019 push_topmost_sequence ();
4020 rv = leaf_function_p ();
4021 pop_topmost_sequence ();
4022 return rv;
4025 /* Returns TRUE if the current function needs to use the ENTER/EXIT
4026 opcodes. If the function doesn't need the frame base or stack
4027 pointer, it can use the simpler RTS opcode. */
4028 static bool
4029 m32c_function_needs_enter (void)
4031 rtx_insn *insn;
4032 rtx sp = gen_rtx_REG (Pmode, SP_REGNO);
4033 rtx fb = gen_rtx_REG (Pmode, FB_REGNO);
4035 for (insn = get_topmost_sequence ()->first; insn; insn = NEXT_INSN (insn))
4036 if (NONDEBUG_INSN_P (insn))
4038 if (reg_mentioned_p (sp, insn))
4039 return true;
4040 if (reg_mentioned_p (fb, insn))
4041 return true;
4043 return false;
4046 /* Mark all the subexpressions of the PARALLEL rtx PAR as
4047 frame-related. Return PAR.
4049 dwarf2out.cc:dwarf2out_frame_debug_expr ignores sub-expressions of a
4050 PARALLEL rtx other than the first if they do not have the
4051 FRAME_RELATED flag set on them. So this function is handy for
4052 marking up 'enter' instructions. */
4053 static rtx
4054 m32c_all_frame_related (rtx par)
4056 int len = XVECLEN (par, 0);
4057 int i;
4059 for (i = 0; i < len; i++)
4060 F (XVECEXP (par, 0, i));
4062 return par;
4065 /* Emits the prologue. See the frame layout comment earlier in this
4066 file. We can reserve up to 256 bytes with the ENTER opcode, beyond
4067 that we manually update sp. */
4068 void
4069 m32c_emit_prologue (void)
4071 int frame_size, extra_frame_size = 0, reg_save_size;
4072 int complex_prologue = 0;
4074 cfun->machine->is_leaf = m32c_leaf_function_p ();
4075 if (interrupt_p (cfun->decl))
4077 cfun->machine->is_interrupt = 1;
4078 complex_prologue = 1;
4080 else if (bank_switch_p (cfun->decl))
4081 warning (OPT_Wattributes,
4082 "%<bank_switch%> has no effect on non-interrupt functions");
4084 reg_save_size = m32c_pushm_popm (PP_justcount);
4086 if (interrupt_p (cfun->decl))
4088 if (bank_switch_p (cfun->decl))
4089 emit_insn (gen_fset_b ());
4090 else if (cfun->machine->intr_pushm)
4091 emit_insn (gen_pushm (GEN_INT (cfun->machine->intr_pushm)));
4094 frame_size =
4095 m32c_initial_elimination_offset (FB_REGNO, SP_REGNO) - reg_save_size;
4096 if (frame_size == 0
4097 && !m32c_function_needs_enter ())
4098 cfun->machine->use_rts = 1;
4100 if (flag_stack_usage_info)
4101 current_function_static_stack_size = frame_size;
4103 if (frame_size > 254)
4105 extra_frame_size = frame_size - 254;
4106 frame_size = 254;
4108 if (cfun->machine->use_rts == 0)
4109 F (emit_insn (m32c_all_frame_related
4110 (TARGET_A16
4111 ? gen_prologue_enter_16 (GEN_INT (frame_size + 2))
4112 : gen_prologue_enter_24 (GEN_INT (frame_size + 4)))));
4114 if (extra_frame_size)
4116 complex_prologue = 1;
4117 if (TARGET_A16)
4118 F (emit_insn (gen_addhi3 (gen_rtx_REG (HImode, SP_REGNO),
4119 gen_rtx_REG (HImode, SP_REGNO),
4120 GEN_INT (-extra_frame_size))));
4121 else
4122 F (emit_insn (gen_addpsi3 (gen_rtx_REG (PSImode, SP_REGNO),
4123 gen_rtx_REG (PSImode, SP_REGNO),
4124 GEN_INT (-extra_frame_size))));
4127 complex_prologue += m32c_pushm_popm (PP_pushm);
4129 /* This just emits a comment into the .s file for debugging. */
4130 if (complex_prologue)
4131 emit_insn (gen_prologue_end ());
4134 /* Likewise, for the epilogue. The only exception is that, for
4135 interrupts, we must manually unwind the frame as the REIT opcode
4136 doesn't do that. */
4137 void
4138 m32c_emit_epilogue (void)
4140 int popm_count = m32c_pushm_popm (PP_justcount);
4142 /* This just emits a comment into the .s file for debugging. */
4143 if (popm_count > 0 || cfun->machine->is_interrupt)
4144 emit_insn (gen_epilogue_start ());
4146 if (popm_count > 0)
4147 m32c_pushm_popm (PP_popm);
4149 if (cfun->machine->is_interrupt)
4151 machine_mode spmode = TARGET_A16 ? HImode : PSImode;
4153 /* REIT clears B flag and restores $fp for us, but we still
4154 have to fix up the stack. USE_RTS just means we didn't
4155 emit ENTER. */
4156 if (!cfun->machine->use_rts)
4158 emit_move_insn (gen_rtx_REG (spmode, A0_REGNO),
4159 gen_rtx_REG (spmode, FP_REGNO));
4160 emit_move_insn (gen_rtx_REG (spmode, SP_REGNO),
4161 gen_rtx_REG (spmode, A0_REGNO));
4162 /* We can't just add this to the POPM because it would be in
4163 the wrong order, and wouldn't fix the stack if we're bank
4164 switching. */
4165 if (TARGET_A16)
4166 emit_insn (gen_pophi_16 (gen_rtx_REG (HImode, FP_REGNO)));
4167 else
4168 emit_insn (gen_poppsi (gen_rtx_REG (PSImode, FP_REGNO)));
4170 if (!bank_switch_p (cfun->decl) && cfun->machine->intr_pushm)
4171 emit_insn (gen_popm (GEN_INT (cfun->machine->intr_pushm)));
4173 /* The FREIT (Fast REturn from InTerrupt) instruction should be
4174 generated only for M32C/M32CM targets (generate the REIT
4175 instruction otherwise). */
4176 if (fast_interrupt_p (cfun->decl))
4178 /* Check if fast_attribute is set for M32C or M32CM. */
4179 if (TARGET_A24)
4181 emit_jump_insn (gen_epilogue_freit ());
4183 /* If fast_interrupt attribute is set for an R8C or M16C
4184 target ignore this attribute and generated REIT
4185 instruction. */
4186 else
4188 warning (OPT_Wattributes,
4189 "%<fast_interrupt%> attribute directive ignored");
4190 emit_jump_insn (gen_epilogue_reit_16 ());
4193 else if (TARGET_A16)
4194 emit_jump_insn (gen_epilogue_reit_16 ());
4195 else
4196 emit_jump_insn (gen_epilogue_reit_24 ());
4198 else if (cfun->machine->use_rts)
4199 emit_jump_insn (gen_epilogue_rts ());
4200 else if (TARGET_A16)
4201 emit_jump_insn (gen_epilogue_exitd_16 ());
4202 else
4203 emit_jump_insn (gen_epilogue_exitd_24 ());
4206 void
4207 m32c_emit_eh_epilogue (rtx ret_addr)
4209 /* R0[R2] has the stack adjustment. R1[R3] has the address to
4210 return to. We have to fudge the stack, pop everything, pop SP
4211 (fudged), and return (fudged). This is actually easier to do in
4212 assembler, so punt to libgcc. */
4213 emit_jump_insn (gen_eh_epilogue (ret_addr, cfun->machine->eh_stack_adjust));
4214 /* emit_clobber (gen_rtx_REG (HImode, R0L_REGNO)); */
4217 /* Indicate which flags must be properly set for a given conditional. */
4218 static int
4219 flags_needed_for_conditional (rtx cond)
4221 switch (GET_CODE (cond))
4223 case LE:
4224 case GT:
4225 return FLAGS_OSZ;
4226 case LEU:
4227 case GTU:
4228 return FLAGS_ZC;
4229 case LT:
4230 case GE:
4231 return FLAGS_OS;
4232 case LTU:
4233 case GEU:
4234 return FLAGS_C;
4235 case EQ:
4236 case NE:
4237 return FLAGS_Z;
4238 default:
4239 return FLAGS_N;
4243 #define DEBUG_CMP 0
4245 /* Returns true if a compare insn is redundant because it would only
4246 set flags that are already set correctly. */
4247 static bool
4248 m32c_compare_redundant (rtx_insn *cmp, rtx *operands)
4250 int flags_needed;
4251 int pflags;
4252 rtx_insn *prev;
4253 rtx pp, next;
4254 rtx op0, op1;
4255 #if DEBUG_CMP
4256 int prev_icode, i;
4257 #endif
4259 op0 = operands[0];
4260 op1 = operands[1];
4262 #if DEBUG_CMP
4263 fprintf(stderr, "\n\033[32mm32c_compare_redundant\033[0m\n");
4264 debug_rtx(cmp);
4265 for (i=0; i<2; i++)
4267 fprintf(stderr, "operands[%d] = ", i);
4268 debug_rtx(operands[i]);
4270 #endif
4272 next = next_nonnote_insn (cmp);
4273 if (!next || !INSN_P (next))
4275 #if DEBUG_CMP
4276 fprintf(stderr, "compare not followed by insn\n");
4277 debug_rtx(next);
4278 #endif
4279 return false;
4281 if (GET_CODE (PATTERN (next)) == SET
4282 && GET_CODE (XEXP ( PATTERN (next), 1)) == IF_THEN_ELSE)
4284 next = XEXP (XEXP (PATTERN (next), 1), 0);
4286 else if (GET_CODE (PATTERN (next)) == SET)
4288 /* If this is a conditional, flags_needed will be something
4289 other than FLAGS_N, which we test below. */
4290 next = XEXP (PATTERN (next), 1);
4292 else
4294 #if DEBUG_CMP
4295 fprintf(stderr, "compare not followed by conditional\n");
4296 debug_rtx(next);
4297 #endif
4298 return false;
4300 #if DEBUG_CMP
4301 fprintf(stderr, "conditional is: ");
4302 debug_rtx(next);
4303 #endif
4305 flags_needed = flags_needed_for_conditional (next);
4306 if (flags_needed == FLAGS_N)
4308 #if DEBUG_CMP
4309 fprintf(stderr, "compare not followed by conditional\n");
4310 debug_rtx(next);
4311 #endif
4312 return false;
4315 /* Compare doesn't set overflow and carry the same way that
4316 arithmetic instructions do, so we can't replace those. */
4317 if (flags_needed & FLAGS_OC)
4318 return false;
4320 prev = cmp;
4321 do {
4322 prev = prev_nonnote_insn (prev);
4323 if (!prev)
4325 #if DEBUG_CMP
4326 fprintf(stderr, "No previous insn.\n");
4327 #endif
4328 return false;
4330 if (!INSN_P (prev))
4332 #if DEBUG_CMP
4333 fprintf(stderr, "Previous insn is a non-insn.\n");
4334 #endif
4335 return false;
4337 pp = PATTERN (prev);
4338 if (GET_CODE (pp) != SET)
4340 #if DEBUG_CMP
4341 fprintf(stderr, "Previous insn is not a SET.\n");
4342 #endif
4343 return false;
4345 pflags = get_attr_flags (prev);
4347 /* Looking up attributes of previous insns corrupted the recog
4348 tables. */
4349 INSN_UID (cmp) = -1;
4350 recog (PATTERN (cmp), cmp, 0);
4352 if (pflags == FLAGS_N
4353 && reg_mentioned_p (op0, pp))
4355 #if DEBUG_CMP
4356 fprintf(stderr, "intermediate non-flags insn uses op:\n");
4357 debug_rtx(prev);
4358 #endif
4359 return false;
4362 /* Check for comparisons against memory - between volatiles and
4363 aliases, we just can't risk this one. */
4364 if (GET_CODE (operands[0]) == MEM
4365 || GET_CODE (operands[0]) == MEM)
4367 #if DEBUG_CMP
4368 fprintf(stderr, "comparisons with memory:\n");
4369 debug_rtx(prev);
4370 #endif
4371 return false;
4374 /* Check for PREV changing a register that's used to compute a
4375 value in CMP, even if it doesn't otherwise change flags. */
4376 if (GET_CODE (operands[0]) == REG
4377 && rtx_referenced_p (SET_DEST (PATTERN (prev)), operands[0]))
4379 #if DEBUG_CMP
4380 fprintf(stderr, "sub-value affected, op0:\n");
4381 debug_rtx(prev);
4382 #endif
4383 return false;
4385 if (GET_CODE (operands[1]) == REG
4386 && rtx_referenced_p (SET_DEST (PATTERN (prev)), operands[1]))
4388 #if DEBUG_CMP
4389 fprintf(stderr, "sub-value affected, op1:\n");
4390 debug_rtx(prev);
4391 #endif
4392 return false;
4395 } while (pflags == FLAGS_N);
4396 #if DEBUG_CMP
4397 fprintf(stderr, "previous flag-setting insn:\n");
4398 debug_rtx(prev);
4399 debug_rtx(pp);
4400 #endif
4402 if (GET_CODE (pp) == SET
4403 && GET_CODE (XEXP (pp, 0)) == REG
4404 && REGNO (XEXP (pp, 0)) == FLG_REGNO
4405 && GET_CODE (XEXP (pp, 1)) == COMPARE)
4407 /* Adjacent cbranches must have the same operands to be
4408 redundant. */
4409 rtx pop0 = XEXP (XEXP (pp, 1), 0);
4410 rtx pop1 = XEXP (XEXP (pp, 1), 1);
4411 #if DEBUG_CMP
4412 fprintf(stderr, "adjacent cbranches\n");
4413 debug_rtx(pop0);
4414 debug_rtx(pop1);
4415 #endif
4416 if (rtx_equal_p (op0, pop0)
4417 && rtx_equal_p (op1, pop1))
4418 return true;
4419 #if DEBUG_CMP
4420 fprintf(stderr, "prev cmp not same\n");
4421 #endif
4422 return false;
4425 /* Else the previous insn must be a SET, with either the source or
4426 dest equal to operands[0], and operands[1] must be zero. */
4428 if (!rtx_equal_p (op1, const0_rtx))
4430 #if DEBUG_CMP
4431 fprintf(stderr, "operands[1] not const0_rtx\n");
4432 #endif
4433 return false;
4435 if (GET_CODE (pp) != SET)
4437 #if DEBUG_CMP
4438 fprintf (stderr, "pp not set\n");
4439 #endif
4440 return false;
4442 if (!rtx_equal_p (op0, SET_SRC (pp))
4443 && !rtx_equal_p (op0, SET_DEST (pp)))
4445 #if DEBUG_CMP
4446 fprintf(stderr, "operands[0] not found in set\n");
4447 #endif
4448 return false;
4451 #if DEBUG_CMP
4452 fprintf(stderr, "cmp flags %x prev flags %x\n", flags_needed, pflags);
4453 #endif
4454 if ((pflags & flags_needed) == flags_needed)
4455 return true;
4457 return false;
4460 /* Return the pattern for a compare. This will be commented out if
4461 the compare is redundant, else a normal pattern is returned. Thus,
4462 the assembler output says where the compare would have been. */
4463 char *
4464 m32c_output_compare (rtx_insn *insn, rtx *operands)
4466 static char templ[] = ";cmp.b\t%1,%0";
4467 /* ^ 5 */
4469 templ[5] = " bwll"[GET_MODE_SIZE(GET_MODE(operands[0]))];
4470 if (m32c_compare_redundant (insn, operands))
4472 #if DEBUG_CMP
4473 fprintf(stderr, "cbranch: cmp not needed\n");
4474 #endif
4475 return templ;
4478 #if DEBUG_CMP
4479 fprintf(stderr, "cbranch: cmp needed: `%s'\n", templ + 1);
4480 #endif
4481 return templ + 1;
4484 #undef TARGET_ENCODE_SECTION_INFO
4485 #define TARGET_ENCODE_SECTION_INFO m32c_encode_section_info
4487 /* If the frame pointer isn't used, we detect it manually. But the
4488 stack pointer doesn't have as flexible addressing as the frame
4489 pointer, so we always assume we have it. */
4491 #undef TARGET_FRAME_POINTER_REQUIRED
4492 #define TARGET_FRAME_POINTER_REQUIRED hook_bool_void_true
4494 #undef TARGET_HARD_REGNO_NREGS
4495 #define TARGET_HARD_REGNO_NREGS m32c_hard_regno_nregs
4496 #undef TARGET_HARD_REGNO_MODE_OK
4497 #define TARGET_HARD_REGNO_MODE_OK m32c_hard_regno_mode_ok
4498 #undef TARGET_MODES_TIEABLE_P
4499 #define TARGET_MODES_TIEABLE_P m32c_modes_tieable_p
4501 #undef TARGET_CAN_CHANGE_MODE_CLASS
4502 #define TARGET_CAN_CHANGE_MODE_CLASS m32c_can_change_mode_class
4504 #undef TARGET_DOCUMENTATION_NAME
4505 #define TARGET_DOCUMENTATION_NAME "M32C"
4507 /* The Global `targetm' Variable. */
4509 struct gcc_target targetm = TARGET_INITIALIZER;
4511 #include "gt-m32c.h"