1 /* Subroutines for insn-output.c for HPPA.
2 Copyright (C) 1992, 1993, 1994, 1995, 1996, 1997, 1998, 1999, 2000, 2001,
3 2002, 2003, 2004, 2005 Free Software Foundation, Inc.
4 Contributed by Tim Moore (moore@cs.utah.edu), based on sparc.c
6 This file is part of GCC.
8 GCC is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License as published by
10 the Free Software Foundation; either version 2, or (at your option)
13 GCC is distributed in the hope that it will be useful,
14 but WITHOUT ANY WARRANTY; without even the implied warranty of
15 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 GNU General Public License for more details.
18 You should have received a copy of the GNU General Public License
19 along with GCC; see the file COPYING. If not, write to
20 the Free Software Foundation, 51 Franklin Street, Fifth Floor,
21 Boston, MA 02110-1301, USA. */
25 #include "coretypes.h"
29 #include "hard-reg-set.h"
31 #include "insn-config.h"
32 #include "conditions.h"
33 #include "insn-attr.h"
41 #include "integrate.h"
49 #include "target-def.h"
51 /* Return nonzero if there is a bypass for the output of
52 OUT_INSN and the fp store IN_INSN. */
54 hppa_fpstore_bypass_p (rtx out_insn
, rtx in_insn
)
56 enum machine_mode store_mode
;
57 enum machine_mode other_mode
;
60 if (recog_memoized (in_insn
) < 0
61 || get_attr_type (in_insn
) != TYPE_FPSTORE
62 || recog_memoized (out_insn
) < 0)
65 store_mode
= GET_MODE (SET_SRC (PATTERN (in_insn
)));
67 set
= single_set (out_insn
);
71 other_mode
= GET_MODE (SET_SRC (set
));
73 return (GET_MODE_SIZE (store_mode
) == GET_MODE_SIZE (other_mode
));
77 #ifndef DO_FRAME_NOTES
78 #ifdef INCOMING_RETURN_ADDR_RTX
79 #define DO_FRAME_NOTES 1
81 #define DO_FRAME_NOTES 0
85 static void copy_reg_pointer (rtx
, rtx
);
86 static void fix_range (const char *);
87 static bool pa_handle_option (size_t, const char *, int);
88 static int hppa_address_cost (rtx
);
89 static bool hppa_rtx_costs (rtx
, int, int, int *);
90 static inline rtx
force_mode (enum machine_mode
, rtx
);
91 static void pa_reorg (void);
92 static void pa_combine_instructions (void);
93 static int pa_can_combine_p (rtx
, rtx
, rtx
, int, rtx
, rtx
, rtx
);
94 static int forward_branch_p (rtx
);
95 static void compute_zdepwi_operands (unsigned HOST_WIDE_INT
, unsigned *);
96 static int compute_movmem_length (rtx
);
97 static int compute_clrmem_length (rtx
);
98 static bool pa_assemble_integer (rtx
, unsigned int, int);
99 static void remove_useless_addtr_insns (int);
100 static void store_reg (int, HOST_WIDE_INT
, int);
101 static void store_reg_modify (int, int, HOST_WIDE_INT
);
102 static void load_reg (int, HOST_WIDE_INT
, int);
103 static void set_reg_plus_d (int, int, HOST_WIDE_INT
, int);
104 static void pa_output_function_prologue (FILE *, HOST_WIDE_INT
);
105 static void update_total_code_bytes (int);
106 static void pa_output_function_epilogue (FILE *, HOST_WIDE_INT
);
107 static int pa_adjust_cost (rtx
, rtx
, rtx
, int);
108 static int pa_adjust_priority (rtx
, int);
109 static int pa_issue_rate (void);
110 static void pa_select_section (tree
, int, unsigned HOST_WIDE_INT
)
112 static void pa_encode_section_info (tree
, rtx
, int);
113 static const char *pa_strip_name_encoding (const char *);
114 static bool pa_function_ok_for_sibcall (tree
, tree
);
115 static void pa_globalize_label (FILE *, const char *)
117 static void pa_asm_output_mi_thunk (FILE *, tree
, HOST_WIDE_INT
,
118 HOST_WIDE_INT
, tree
);
119 #if !defined(USE_COLLECT2)
120 static void pa_asm_out_constructor (rtx
, int);
121 static void pa_asm_out_destructor (rtx
, int);
123 static void pa_init_builtins (void);
124 static rtx
hppa_builtin_saveregs (void);
125 static tree
hppa_gimplify_va_arg_expr (tree
, tree
, tree
*, tree
*);
126 static bool pa_scalar_mode_supported_p (enum machine_mode
);
127 static bool pa_commutative_p (rtx x
, int outer_code
);
128 static void copy_fp_args (rtx
) ATTRIBUTE_UNUSED
;
129 static int length_fp_args (rtx
) ATTRIBUTE_UNUSED
;
130 static inline void pa_file_start_level (void) ATTRIBUTE_UNUSED
;
131 static inline void pa_file_start_space (int) ATTRIBUTE_UNUSED
;
132 static inline void pa_file_start_file (int) ATTRIBUTE_UNUSED
;
133 static inline void pa_file_start_mcount (const char*) ATTRIBUTE_UNUSED
;
134 static void pa_elf_file_start (void) ATTRIBUTE_UNUSED
;
135 static void pa_som_file_start (void) ATTRIBUTE_UNUSED
;
136 static void pa_linux_file_start (void) ATTRIBUTE_UNUSED
;
137 static void pa_hpux64_gas_file_start (void) ATTRIBUTE_UNUSED
;
138 static void pa_hpux64_hpas_file_start (void) ATTRIBUTE_UNUSED
;
139 static void output_deferred_plabels (void);
140 static void output_deferred_profile_counters (void) ATTRIBUTE_UNUSED
;
141 #ifdef ASM_OUTPUT_EXTERNAL_REAL
142 static void pa_hpux_file_end (void);
144 #ifdef HPUX_LONG_DOUBLE_LIBRARY
145 static void pa_hpux_init_libfuncs (void);
147 static rtx
pa_struct_value_rtx (tree
, int);
148 static bool pa_pass_by_reference (CUMULATIVE_ARGS
*, enum machine_mode
,
150 static int pa_arg_partial_bytes (CUMULATIVE_ARGS
*, enum machine_mode
,
152 static struct machine_function
* pa_init_machine_status (void);
155 /* Save the operands last given to a compare for use when we
156 generate a scc or bcc insn. */
157 rtx hppa_compare_op0
, hppa_compare_op1
;
158 enum cmp_type hppa_branch_type
;
160 /* Which cpu we are scheduling for. */
161 enum processor_type pa_cpu
= TARGET_SCHED_DEFAULT
;
163 /* The UNIX standard to use for predefines and linking. */
164 int flag_pa_unix
= TARGET_HPUX_11_11
? 1998 : TARGET_HPUX_10_10
? 1995 : 1993;
166 /* Counts for the number of callee-saved general and floating point
167 registers which were saved by the current function's prologue. */
168 static int gr_saved
, fr_saved
;
170 static rtx
find_addr_reg (rtx
);
172 /* Keep track of the number of bytes we have output in the CODE subspace
173 during this compilation so we'll know when to emit inline long-calls. */
174 unsigned long total_code_bytes
;
176 /* The last address of the previous function plus the number of bytes in
177 associated thunks that have been output. This is used to determine if
178 a thunk can use an IA-relative branch to reach its target function. */
179 static int last_address
;
181 /* Variables to handle plabels that we discover are necessary at assembly
182 output time. They are output after the current function. */
183 struct deferred_plabel
GTY(())
188 static GTY((length ("n_deferred_plabels"))) struct deferred_plabel
*
190 static size_t n_deferred_plabels
= 0;
193 /* Initialize the GCC target structure. */
195 #undef TARGET_ASM_ALIGNED_HI_OP
196 #define TARGET_ASM_ALIGNED_HI_OP "\t.half\t"
197 #undef TARGET_ASM_ALIGNED_SI_OP
198 #define TARGET_ASM_ALIGNED_SI_OP "\t.word\t"
199 #undef TARGET_ASM_ALIGNED_DI_OP
200 #define TARGET_ASM_ALIGNED_DI_OP "\t.dword\t"
201 #undef TARGET_ASM_UNALIGNED_HI_OP
202 #define TARGET_ASM_UNALIGNED_HI_OP TARGET_ASM_ALIGNED_HI_OP
203 #undef TARGET_ASM_UNALIGNED_SI_OP
204 #define TARGET_ASM_UNALIGNED_SI_OP TARGET_ASM_ALIGNED_SI_OP
205 #undef TARGET_ASM_UNALIGNED_DI_OP
206 #define TARGET_ASM_UNALIGNED_DI_OP TARGET_ASM_ALIGNED_DI_OP
207 #undef TARGET_ASM_INTEGER
208 #define TARGET_ASM_INTEGER pa_assemble_integer
210 #undef TARGET_ASM_FUNCTION_PROLOGUE
211 #define TARGET_ASM_FUNCTION_PROLOGUE pa_output_function_prologue
212 #undef TARGET_ASM_FUNCTION_EPILOGUE
213 #define TARGET_ASM_FUNCTION_EPILOGUE pa_output_function_epilogue
215 #undef TARGET_SCHED_ADJUST_COST
216 #define TARGET_SCHED_ADJUST_COST pa_adjust_cost
217 #undef TARGET_SCHED_ADJUST_PRIORITY
218 #define TARGET_SCHED_ADJUST_PRIORITY pa_adjust_priority
219 #undef TARGET_SCHED_ISSUE_RATE
220 #define TARGET_SCHED_ISSUE_RATE pa_issue_rate
222 #undef TARGET_ENCODE_SECTION_INFO
223 #define TARGET_ENCODE_SECTION_INFO pa_encode_section_info
224 #undef TARGET_STRIP_NAME_ENCODING
225 #define TARGET_STRIP_NAME_ENCODING pa_strip_name_encoding
227 #undef TARGET_FUNCTION_OK_FOR_SIBCALL
228 #define TARGET_FUNCTION_OK_FOR_SIBCALL pa_function_ok_for_sibcall
230 #undef TARGET_COMMUTATIVE_P
231 #define TARGET_COMMUTATIVE_P pa_commutative_p
233 #undef TARGET_ASM_OUTPUT_MI_THUNK
234 #define TARGET_ASM_OUTPUT_MI_THUNK pa_asm_output_mi_thunk
235 #undef TARGET_ASM_CAN_OUTPUT_MI_THUNK
236 #define TARGET_ASM_CAN_OUTPUT_MI_THUNK default_can_output_mi_thunk_no_vcall
238 #undef TARGET_ASM_FILE_END
239 #ifdef ASM_OUTPUT_EXTERNAL_REAL
240 #define TARGET_ASM_FILE_END pa_hpux_file_end
242 #define TARGET_ASM_FILE_END output_deferred_plabels
245 #if !defined(USE_COLLECT2)
246 #undef TARGET_ASM_CONSTRUCTOR
247 #define TARGET_ASM_CONSTRUCTOR pa_asm_out_constructor
248 #undef TARGET_ASM_DESTRUCTOR
249 #define TARGET_ASM_DESTRUCTOR pa_asm_out_destructor
252 #undef TARGET_DEFAULT_TARGET_FLAGS
253 #define TARGET_DEFAULT_TARGET_FLAGS (TARGET_DEFAULT | TARGET_CPU_DEFAULT)
254 #undef TARGET_HANDLE_OPTION
255 #define TARGET_HANDLE_OPTION pa_handle_option
257 #undef TARGET_INIT_BUILTINS
258 #define TARGET_INIT_BUILTINS pa_init_builtins
260 #undef TARGET_RTX_COSTS
261 #define TARGET_RTX_COSTS hppa_rtx_costs
262 #undef TARGET_ADDRESS_COST
263 #define TARGET_ADDRESS_COST hppa_address_cost
265 #undef TARGET_MACHINE_DEPENDENT_REORG
266 #define TARGET_MACHINE_DEPENDENT_REORG pa_reorg
268 #ifdef HPUX_LONG_DOUBLE_LIBRARY
269 #undef TARGET_INIT_LIBFUNCS
270 #define TARGET_INIT_LIBFUNCS pa_hpux_init_libfuncs
273 #undef TARGET_PROMOTE_FUNCTION_RETURN
274 #define TARGET_PROMOTE_FUNCTION_RETURN hook_bool_tree_true
275 #undef TARGET_PROMOTE_PROTOTYPES
276 #define TARGET_PROMOTE_PROTOTYPES hook_bool_tree_true
278 #undef TARGET_STRUCT_VALUE_RTX
279 #define TARGET_STRUCT_VALUE_RTX pa_struct_value_rtx
280 #undef TARGET_RETURN_IN_MEMORY
281 #define TARGET_RETURN_IN_MEMORY pa_return_in_memory
282 #undef TARGET_MUST_PASS_IN_STACK
283 #define TARGET_MUST_PASS_IN_STACK must_pass_in_stack_var_size
284 #undef TARGET_PASS_BY_REFERENCE
285 #define TARGET_PASS_BY_REFERENCE pa_pass_by_reference
286 #undef TARGET_CALLEE_COPIES
287 #define TARGET_CALLEE_COPIES hook_bool_CUMULATIVE_ARGS_mode_tree_bool_true
288 #undef TARGET_ARG_PARTIAL_BYTES
289 #define TARGET_ARG_PARTIAL_BYTES pa_arg_partial_bytes
291 #undef TARGET_EXPAND_BUILTIN_SAVEREGS
292 #define TARGET_EXPAND_BUILTIN_SAVEREGS hppa_builtin_saveregs
293 #undef TARGET_GIMPLIFY_VA_ARG_EXPR
294 #define TARGET_GIMPLIFY_VA_ARG_EXPR hppa_gimplify_va_arg_expr
296 #undef TARGET_SCALAR_MODE_SUPPORTED_P
297 #define TARGET_SCALAR_MODE_SUPPORTED_P pa_scalar_mode_supported_p
299 #undef TARGET_CANNOT_FORCE_CONST_MEM
300 #define TARGET_CANNOT_FORCE_CONST_MEM pa_tls_referenced_p
302 struct gcc_target targetm
= TARGET_INITIALIZER
;
304 /* Parse the -mfixed-range= option string. */
307 fix_range (const char *const_str
)
310 char *str
, *dash
, *comma
;
312 /* str must be of the form REG1'-'REG2{,REG1'-'REG} where REG1 and
313 REG2 are either register names or register numbers. The effect
314 of this option is to mark the registers in the range from REG1 to
315 REG2 as ``fixed'' so they won't be used by the compiler. This is
316 used, e.g., to ensure that kernel mode code doesn't use fr4-fr31. */
318 i
= strlen (const_str
);
319 str
= (char *) alloca (i
+ 1);
320 memcpy (str
, const_str
, i
+ 1);
324 dash
= strchr (str
, '-');
327 warning (0, "value of -mfixed-range must have form REG1-REG2");
332 comma
= strchr (dash
+ 1, ',');
336 first
= decode_reg_name (str
);
339 warning (0, "unknown register name: %s", str
);
343 last
= decode_reg_name (dash
+ 1);
346 warning (0, "unknown register name: %s", dash
+ 1);
354 warning (0, "%s-%s is an empty range", str
, dash
+ 1);
358 for (i
= first
; i
<= last
; ++i
)
359 fixed_regs
[i
] = call_used_regs
[i
] = 1;
368 /* Check if all floating point registers have been fixed. */
369 for (i
= FP_REG_FIRST
; i
<= FP_REG_LAST
; i
++)
374 target_flags
|= MASK_DISABLE_FPREGS
;
377 /* Implement TARGET_HANDLE_OPTION. */
380 pa_handle_option (size_t code
, const char *arg
, int value ATTRIBUTE_UNUSED
)
385 case OPT_mpa_risc_1_0
:
387 target_flags
&= ~(MASK_PA_11
| MASK_PA_20
);
391 case OPT_mpa_risc_1_1
:
393 target_flags
&= ~MASK_PA_20
;
394 target_flags
|= MASK_PA_11
;
397 case OPT_mpa_risc_2_0
:
399 target_flags
|= MASK_PA_11
| MASK_PA_20
;
403 if (strcmp (arg
, "8000") == 0)
404 pa_cpu
= PROCESSOR_8000
;
405 else if (strcmp (arg
, "7100") == 0)
406 pa_cpu
= PROCESSOR_7100
;
407 else if (strcmp (arg
, "700") == 0)
408 pa_cpu
= PROCESSOR_700
;
409 else if (strcmp (arg
, "7100LC") == 0)
410 pa_cpu
= PROCESSOR_7100LC
;
411 else if (strcmp (arg
, "7200") == 0)
412 pa_cpu
= PROCESSOR_7200
;
413 else if (strcmp (arg
, "7300") == 0)
414 pa_cpu
= PROCESSOR_7300
;
419 case OPT_mfixed_range_
:
429 #if TARGET_HPUX_10_10
435 #if TARGET_HPUX_11_11
447 override_options (void)
449 /* Unconditional branches in the delay slot are not compatible with dwarf2
450 call frame information. There is no benefit in using this optimization
451 on PA8000 and later processors. */
452 if (pa_cpu
>= PROCESSOR_8000
453 || (! USING_SJLJ_EXCEPTIONS
&& flag_exceptions
)
454 || flag_unwind_tables
)
455 target_flags
&= ~MASK_JUMP_IN_DELAY
;
457 if (flag_pic
&& TARGET_PORTABLE_RUNTIME
)
459 warning (0, "PIC code generation is not supported in the portable runtime model");
462 if (flag_pic
&& TARGET_FAST_INDIRECT_CALLS
)
464 warning (0, "PIC code generation is not compatible with fast indirect calls");
467 if (! TARGET_GAS
&& write_symbols
!= NO_DEBUG
)
469 warning (0, "-g is only supported when using GAS on this processor,");
470 warning (0, "-g option disabled");
471 write_symbols
= NO_DEBUG
;
474 /* We only support the "big PIC" model now. And we always generate PIC
475 code when in 64bit mode. */
476 if (flag_pic
== 1 || TARGET_64BIT
)
479 /* We can't guarantee that .dword is available for 32-bit targets. */
480 if (UNITS_PER_WORD
== 4)
481 targetm
.asm_out
.aligned_op
.di
= NULL
;
483 /* The unaligned ops are only available when using GAS. */
486 targetm
.asm_out
.unaligned_op
.hi
= NULL
;
487 targetm
.asm_out
.unaligned_op
.si
= NULL
;
488 targetm
.asm_out
.unaligned_op
.di
= NULL
;
491 init_machine_status
= pa_init_machine_status
;
495 pa_init_builtins (void)
497 #ifdef DONT_HAVE_FPUTC_UNLOCKED
498 built_in_decls
[(int) BUILT_IN_FPUTC_UNLOCKED
] = NULL_TREE
;
499 implicit_built_in_decls
[(int) BUILT_IN_FPUTC_UNLOCKED
] = NULL_TREE
;
503 /* Function to init struct machine_function.
504 This will be called, via a pointer variable,
505 from push_function_context. */
507 static struct machine_function
*
508 pa_init_machine_status (void)
510 return ggc_alloc_cleared (sizeof (machine_function
));
513 /* If FROM is a probable pointer register, mark TO as a probable
514 pointer register with the same pointer alignment as FROM. */
517 copy_reg_pointer (rtx to
, rtx from
)
519 if (REG_POINTER (from
))
520 mark_reg_pointer (to
, REGNO_POINTER_ALIGN (REGNO (from
)));
523 /* Return 1 if X contains a symbolic expression. We know these
524 expressions will have one of a few well defined forms, so
525 we need only check those forms. */
527 symbolic_expression_p (rtx x
)
530 /* Strip off any HIGH. */
531 if (GET_CODE (x
) == HIGH
)
534 return (symbolic_operand (x
, VOIDmode
));
537 /* Accept any constant that can be moved in one instruction into a
540 cint_ok_for_move (HOST_WIDE_INT intval
)
542 /* OK if ldo, ldil, or zdepi, can be used. */
543 return (CONST_OK_FOR_LETTER_P (intval
, 'J')
544 || CONST_OK_FOR_LETTER_P (intval
, 'N')
545 || CONST_OK_FOR_LETTER_P (intval
, 'K'));
548 /* Return truth value of whether OP can be used as an operand in a
551 adddi3_operand (rtx op
, enum machine_mode mode
)
553 return (register_operand (op
, mode
)
554 || (GET_CODE (op
) == CONST_INT
555 && (TARGET_64BIT
? INT_14_BITS (op
) : INT_11_BITS (op
))));
558 /* True iff zdepi can be used to generate this CONST_INT.
559 zdepi first sign extends a 5 bit signed number to a given field
560 length, then places this field anywhere in a zero. */
562 zdepi_cint_p (unsigned HOST_WIDE_INT x
)
564 unsigned HOST_WIDE_INT lsb_mask
, t
;
566 /* This might not be obvious, but it's at least fast.
567 This function is critical; we don't have the time loops would take. */
569 t
= ((x
>> 4) + lsb_mask
) & ~(lsb_mask
- 1);
570 /* Return true iff t is a power of two. */
571 return ((t
& (t
- 1)) == 0);
574 /* True iff depi or extru can be used to compute (reg & mask).
575 Accept bit pattern like these:
580 and_mask_p (unsigned HOST_WIDE_INT mask
)
583 mask
+= mask
& -mask
;
584 return (mask
& (mask
- 1)) == 0;
587 /* True iff depi can be used to compute (reg | MASK). */
589 ior_mask_p (unsigned HOST_WIDE_INT mask
)
591 mask
+= mask
& -mask
;
592 return (mask
& (mask
- 1)) == 0;
595 /* Legitimize PIC addresses. If the address is already
596 position-independent, we return ORIG. Newly generated
597 position-independent addresses go to REG. If we need more
598 than one register, we lose. */
601 legitimize_pic_address (rtx orig
, enum machine_mode mode
, rtx reg
)
605 gcc_assert (!PA_SYMBOL_REF_TLS_P (orig
));
607 /* Labels need special handling. */
608 if (pic_label_operand (orig
, mode
))
612 /* We do not want to go through the movXX expanders here since that
613 would create recursion.
615 Nor do we really want to call a generator for a named pattern
616 since that requires multiple patterns if we want to support
619 So instead we just emit the raw set, which avoids the movXX
620 expanders completely. */
621 mark_reg_pointer (reg
, BITS_PER_UNIT
);
622 insn
= emit_insn (gen_rtx_SET (VOIDmode
, reg
, orig
));
624 /* Put a REG_EQUAL note on this insn, so that it can be optimized. */
625 REG_NOTES (insn
) = gen_rtx_EXPR_LIST (REG_EQUAL
, orig
, REG_NOTES (insn
));
627 /* During and after reload, we need to generate a REG_LABEL note and
628 update LABEL_NUSES because this is not done automatically. */
629 if (reload_in_progress
|| reload_completed
)
631 /* Extract LABEL_REF. */
632 if (GET_CODE (orig
) == CONST
)
633 orig
= XEXP (XEXP (orig
, 0), 0);
634 /* Extract CODE_LABEL. */
635 orig
= XEXP (orig
, 0);
636 REG_NOTES (insn
) = gen_rtx_INSN_LIST (REG_LABEL
, orig
,
638 LABEL_NUSES (orig
)++;
640 current_function_uses_pic_offset_table
= 1;
643 if (GET_CODE (orig
) == SYMBOL_REF
)
649 /* Before reload, allocate a temporary register for the intermediate
650 result. This allows the sequence to be deleted when the final
651 result is unused and the insns are trivially dead. */
652 tmp_reg
= ((reload_in_progress
|| reload_completed
)
653 ? reg
: gen_reg_rtx (Pmode
));
655 emit_move_insn (tmp_reg
,
656 gen_rtx_PLUS (word_mode
, pic_offset_table_rtx
,
657 gen_rtx_HIGH (word_mode
, orig
)));
659 = gen_const_mem (Pmode
,
660 gen_rtx_LO_SUM (Pmode
, tmp_reg
,
661 gen_rtx_UNSPEC (Pmode
,
665 current_function_uses_pic_offset_table
= 1;
666 mark_reg_pointer (reg
, BITS_PER_UNIT
);
667 insn
= emit_move_insn (reg
, pic_ref
);
669 /* Put a REG_EQUAL note on this insn, so that it can be optimized. */
670 REG_NOTES (insn
) = gen_rtx_EXPR_LIST (REG_EQUAL
, orig
, REG_NOTES (insn
));
674 else if (GET_CODE (orig
) == CONST
)
678 if (GET_CODE (XEXP (orig
, 0)) == PLUS
679 && XEXP (XEXP (orig
, 0), 0) == pic_offset_table_rtx
)
683 gcc_assert (GET_CODE (XEXP (orig
, 0)) == PLUS
);
685 base
= legitimize_pic_address (XEXP (XEXP (orig
, 0), 0), Pmode
, reg
);
686 orig
= legitimize_pic_address (XEXP (XEXP (orig
, 0), 1), Pmode
,
687 base
== reg
? 0 : reg
);
689 if (GET_CODE (orig
) == CONST_INT
)
691 if (INT_14_BITS (orig
))
692 return plus_constant (base
, INTVAL (orig
));
693 orig
= force_reg (Pmode
, orig
);
695 pic_ref
= gen_rtx_PLUS (Pmode
, base
, orig
);
696 /* Likewise, should we set special REG_NOTEs here? */
702 static GTY(()) rtx gen_tls_tga
;
705 gen_tls_get_addr (void)
708 gen_tls_tga
= init_one_libfunc ("__tls_get_addr");
713 hppa_tls_call (rtx arg
)
717 ret
= gen_reg_rtx (Pmode
);
718 emit_library_call_value (gen_tls_get_addr (), ret
,
719 LCT_CONST
, Pmode
, 1, arg
, Pmode
);
725 legitimize_tls_address (rtx addr
)
727 rtx ret
, insn
, tmp
, t1
, t2
, tp
;
728 enum tls_model model
= SYMBOL_REF_TLS_MODEL (addr
);
732 case TLS_MODEL_GLOBAL_DYNAMIC
:
733 tmp
= gen_reg_rtx (Pmode
);
735 emit_insn (gen_tgd_load_pic (tmp
, addr
));
737 emit_insn (gen_tgd_load (tmp
, addr
));
738 ret
= hppa_tls_call (tmp
);
741 case TLS_MODEL_LOCAL_DYNAMIC
:
742 ret
= gen_reg_rtx (Pmode
);
743 tmp
= gen_reg_rtx (Pmode
);
746 emit_insn (gen_tld_load_pic (tmp
, addr
));
748 emit_insn (gen_tld_load (tmp
, addr
));
749 t1
= hppa_tls_call (tmp
);
752 t2
= gen_reg_rtx (Pmode
);
753 emit_libcall_block (insn
, t2
, t1
,
754 gen_rtx_UNSPEC (Pmode
, gen_rtvec (1, const0_rtx
),
756 emit_insn (gen_tld_offset_load (ret
, addr
, t2
));
759 case TLS_MODEL_INITIAL_EXEC
:
760 tp
= gen_reg_rtx (Pmode
);
761 tmp
= gen_reg_rtx (Pmode
);
762 ret
= gen_reg_rtx (Pmode
);
763 emit_insn (gen_tp_load (tp
));
765 emit_insn (gen_tie_load_pic (tmp
, addr
));
767 emit_insn (gen_tie_load (tmp
, addr
));
768 emit_move_insn (ret
, gen_rtx_PLUS (Pmode
, tp
, tmp
));
771 case TLS_MODEL_LOCAL_EXEC
:
772 tp
= gen_reg_rtx (Pmode
);
773 ret
= gen_reg_rtx (Pmode
);
774 emit_insn (gen_tp_load (tp
));
775 emit_insn (gen_tle_load (ret
, addr
, tp
));
785 /* Try machine-dependent ways of modifying an illegitimate address
786 to be legitimate. If we find one, return the new, valid address.
787 This macro is used in only one place: `memory_address' in explow.c.
789 OLDX is the address as it was before break_out_memory_refs was called.
790 In some cases it is useful to look at this to decide what needs to be done.
792 MODE and WIN are passed so that this macro can use
793 GO_IF_LEGITIMATE_ADDRESS.
795 It is always safe for this macro to do nothing. It exists to recognize
796 opportunities to optimize the output.
798 For the PA, transform:
800 memory(X + <large int>)
804 if (<large int> & mask) >= 16
805 Y = (<large int> & ~mask) + mask + 1 Round up.
807 Y = (<large int> & ~mask) Round down.
809 memory (Z + (<large int> - Y));
811 This is for CSE to find several similar references, and only use one Z.
813 X can either be a SYMBOL_REF or REG, but because combine cannot
814 perform a 4->2 combination we do nothing for SYMBOL_REF + D where
815 D will not fit in 14 bits.
817 MODE_FLOAT references allow displacements which fit in 5 bits, so use
820 MODE_INT references allow displacements which fit in 14 bits, so use
823 This relies on the fact that most mode MODE_FLOAT references will use FP
824 registers and most mode MODE_INT references will use integer registers.
825 (In the rare case of an FP register used in an integer MODE, we depend
826 on secondary reloads to clean things up.)
829 It is also beneficial to handle (plus (mult (X) (Y)) (Z)) in a special
830 manner if Y is 2, 4, or 8. (allows more shadd insns and shifted indexed
831 addressing modes to be used).
833 Put X and Z into registers. Then put the entire expression into
837 hppa_legitimize_address (rtx x
, rtx oldx ATTRIBUTE_UNUSED
,
838 enum machine_mode mode
)
842 /* We need to canonicalize the order of operands in unscaled indexed
843 addresses since the code that checks if an address is valid doesn't
844 always try both orders. */
845 if (!TARGET_NO_SPACE_REGS
846 && GET_CODE (x
) == PLUS
847 && GET_MODE (x
) == Pmode
848 && REG_P (XEXP (x
, 0))
849 && REG_P (XEXP (x
, 1))
850 && REG_POINTER (XEXP (x
, 0))
851 && !REG_POINTER (XEXP (x
, 1)))
852 return gen_rtx_PLUS (Pmode
, XEXP (x
, 1), XEXP (x
, 0));
854 if (PA_SYMBOL_REF_TLS_P (x
))
855 return legitimize_tls_address (x
);
857 return legitimize_pic_address (x
, mode
, gen_reg_rtx (Pmode
));
859 /* Strip off CONST. */
860 if (GET_CODE (x
) == CONST
)
863 /* Special case. Get the SYMBOL_REF into a register and use indexing.
864 That should always be safe. */
865 if (GET_CODE (x
) == PLUS
866 && GET_CODE (XEXP (x
, 0)) == REG
867 && GET_CODE (XEXP (x
, 1)) == SYMBOL_REF
)
869 rtx reg
= force_reg (Pmode
, XEXP (x
, 1));
870 return force_reg (Pmode
, gen_rtx_PLUS (Pmode
, reg
, XEXP (x
, 0)));
873 /* Note we must reject symbols which represent function addresses
874 since the assembler/linker can't handle arithmetic on plabels. */
875 if (GET_CODE (x
) == PLUS
876 && GET_CODE (XEXP (x
, 1)) == CONST_INT
877 && ((GET_CODE (XEXP (x
, 0)) == SYMBOL_REF
878 && !FUNCTION_NAME_P (XSTR (XEXP (x
, 0), 0)))
879 || GET_CODE (XEXP (x
, 0)) == REG
))
881 rtx int_part
, ptr_reg
;
882 HOST_WIDE_INT newoffset
;
883 HOST_WIDE_INT offset
= INTVAL (XEXP (x
, 1));
886 mask
= (GET_MODE_CLASS (mode
) == MODE_FLOAT
887 ? (TARGET_PA_20
? 0x3fff : 0x1f) : 0x3fff);
889 /* Choose which way to round the offset. Round up if we
890 are >= halfway to the next boundary. */
891 if ((offset
& mask
) >= ((mask
+ 1) / 2))
892 newoffset
= (offset
& ~ mask
) + mask
+ 1;
894 newoffset
= (offset
& ~ mask
);
896 /* If the newoffset will not fit in 14 bits (ldo), then
897 handling this would take 4 or 5 instructions (2 to load
898 the SYMBOL_REF + 1 or 2 to load the newoffset + 1 to
899 add the new offset and the SYMBOL_REF.) Combine can
900 not handle 4->2 or 5->2 combinations, so do not create
902 if (! VAL_14_BITS_P (newoffset
)
903 && GET_CODE (XEXP (x
, 0)) == SYMBOL_REF
)
905 rtx const_part
= plus_constant (XEXP (x
, 0), newoffset
);
908 gen_rtx_HIGH (Pmode
, const_part
));
911 gen_rtx_LO_SUM (Pmode
,
912 tmp_reg
, const_part
));
916 if (! VAL_14_BITS_P (newoffset
))
917 int_part
= force_reg (Pmode
, GEN_INT (newoffset
));
919 int_part
= GEN_INT (newoffset
);
921 ptr_reg
= force_reg (Pmode
,
923 force_reg (Pmode
, XEXP (x
, 0)),
926 return plus_constant (ptr_reg
, offset
- newoffset
);
929 /* Handle (plus (mult (a) (shadd_constant)) (b)). */
931 if (GET_CODE (x
) == PLUS
&& GET_CODE (XEXP (x
, 0)) == MULT
932 && GET_CODE (XEXP (XEXP (x
, 0), 1)) == CONST_INT
933 && shadd_constant_p (INTVAL (XEXP (XEXP (x
, 0), 1)))
934 && (OBJECT_P (XEXP (x
, 1))
935 || GET_CODE (XEXP (x
, 1)) == SUBREG
)
936 && GET_CODE (XEXP (x
, 1)) != CONST
)
938 HOST_WIDE_INT val
= INTVAL (XEXP (XEXP (x
, 0), 1));
942 if (GET_CODE (reg1
) != REG
)
943 reg1
= force_reg (Pmode
, force_operand (reg1
, 0));
945 reg2
= XEXP (XEXP (x
, 0), 0);
946 if (GET_CODE (reg2
) != REG
)
947 reg2
= force_reg (Pmode
, force_operand (reg2
, 0));
949 return force_reg (Pmode
, gen_rtx_PLUS (Pmode
,
956 /* Similarly for (plus (plus (mult (a) (shadd_constant)) (b)) (c)).
958 Only do so for floating point modes since this is more speculative
959 and we lose if it's an integer store. */
960 if (GET_CODE (x
) == PLUS
961 && GET_CODE (XEXP (x
, 0)) == PLUS
962 && GET_CODE (XEXP (XEXP (x
, 0), 0)) == MULT
963 && GET_CODE (XEXP (XEXP (XEXP (x
, 0), 0), 1)) == CONST_INT
964 && shadd_constant_p (INTVAL (XEXP (XEXP (XEXP (x
, 0), 0), 1)))
965 && (mode
== SFmode
|| mode
== DFmode
))
968 /* First, try and figure out what to use as a base register. */
969 rtx reg1
, reg2
, base
, idx
, orig_base
;
971 reg1
= XEXP (XEXP (x
, 0), 1);
976 /* Make sure they're both regs. If one was a SYMBOL_REF [+ const],
977 then emit_move_sequence will turn on REG_POINTER so we'll know
978 it's a base register below. */
979 if (GET_CODE (reg1
) != REG
)
980 reg1
= force_reg (Pmode
, force_operand (reg1
, 0));
982 if (GET_CODE (reg2
) != REG
)
983 reg2
= force_reg (Pmode
, force_operand (reg2
, 0));
985 /* Figure out what the base and index are. */
987 if (GET_CODE (reg1
) == REG
988 && REG_POINTER (reg1
))
991 orig_base
= XEXP (XEXP (x
, 0), 1);
992 idx
= gen_rtx_PLUS (Pmode
,
994 XEXP (XEXP (XEXP (x
, 0), 0), 0),
995 XEXP (XEXP (XEXP (x
, 0), 0), 1)),
998 else if (GET_CODE (reg2
) == REG
999 && REG_POINTER (reg2
))
1002 orig_base
= XEXP (x
, 1);
1009 /* If the index adds a large constant, try to scale the
1010 constant so that it can be loaded with only one insn. */
1011 if (GET_CODE (XEXP (idx
, 1)) == CONST_INT
1012 && VAL_14_BITS_P (INTVAL (XEXP (idx
, 1))
1013 / INTVAL (XEXP (XEXP (idx
, 0), 1)))
1014 && INTVAL (XEXP (idx
, 1)) % INTVAL (XEXP (XEXP (idx
, 0), 1)) == 0)
1016 /* Divide the CONST_INT by the scale factor, then add it to A. */
1017 HOST_WIDE_INT val
= INTVAL (XEXP (idx
, 1));
1019 val
/= INTVAL (XEXP (XEXP (idx
, 0), 1));
1020 reg1
= XEXP (XEXP (idx
, 0), 0);
1021 if (GET_CODE (reg1
) != REG
)
1022 reg1
= force_reg (Pmode
, force_operand (reg1
, 0));
1024 reg1
= force_reg (Pmode
, gen_rtx_PLUS (Pmode
, reg1
, GEN_INT (val
)));
1026 /* We can now generate a simple scaled indexed address. */
1029 (Pmode
, gen_rtx_PLUS (Pmode
,
1030 gen_rtx_MULT (Pmode
, reg1
,
1031 XEXP (XEXP (idx
, 0), 1)),
1035 /* If B + C is still a valid base register, then add them. */
1036 if (GET_CODE (XEXP (idx
, 1)) == CONST_INT
1037 && INTVAL (XEXP (idx
, 1)) <= 4096
1038 && INTVAL (XEXP (idx
, 1)) >= -4096)
1040 HOST_WIDE_INT val
= INTVAL (XEXP (XEXP (idx
, 0), 1));
1043 reg1
= force_reg (Pmode
, gen_rtx_PLUS (Pmode
, base
, XEXP (idx
, 1)));
1045 reg2
= XEXP (XEXP (idx
, 0), 0);
1046 if (GET_CODE (reg2
) != CONST_INT
)
1047 reg2
= force_reg (Pmode
, force_operand (reg2
, 0));
1049 return force_reg (Pmode
, gen_rtx_PLUS (Pmode
,
1050 gen_rtx_MULT (Pmode
,
1056 /* Get the index into a register, then add the base + index and
1057 return a register holding the result. */
1059 /* First get A into a register. */
1060 reg1
= XEXP (XEXP (idx
, 0), 0);
1061 if (GET_CODE (reg1
) != REG
)
1062 reg1
= force_reg (Pmode
, force_operand (reg1
, 0));
1064 /* And get B into a register. */
1065 reg2
= XEXP (idx
, 1);
1066 if (GET_CODE (reg2
) != REG
)
1067 reg2
= force_reg (Pmode
, force_operand (reg2
, 0));
1069 reg1
= force_reg (Pmode
,
1070 gen_rtx_PLUS (Pmode
,
1071 gen_rtx_MULT (Pmode
, reg1
,
1072 XEXP (XEXP (idx
, 0), 1)),
1075 /* Add the result to our base register and return. */
1076 return force_reg (Pmode
, gen_rtx_PLUS (Pmode
, base
, reg1
));
1080 /* Uh-oh. We might have an address for x[n-100000]. This needs
1081 special handling to avoid creating an indexed memory address
1082 with x-100000 as the base.
1084 If the constant part is small enough, then it's still safe because
1085 there is a guard page at the beginning and end of the data segment.
1087 Scaled references are common enough that we want to try and rearrange the
1088 terms so that we can use indexing for these addresses too. Only
1089 do the optimization for floatint point modes. */
1091 if (GET_CODE (x
) == PLUS
1092 && symbolic_expression_p (XEXP (x
, 1)))
1094 /* Ugly. We modify things here so that the address offset specified
1095 by the index expression is computed first, then added to x to form
1096 the entire address. */
1098 rtx regx1
, regx2
, regy1
, regy2
, y
;
1100 /* Strip off any CONST. */
1102 if (GET_CODE (y
) == CONST
)
1105 if (GET_CODE (y
) == PLUS
|| GET_CODE (y
) == MINUS
)
1107 /* See if this looks like
1108 (plus (mult (reg) (shadd_const))
1109 (const (plus (symbol_ref) (const_int))))
1111 Where const_int is small. In that case the const
1112 expression is a valid pointer for indexing.
1114 If const_int is big, but can be divided evenly by shadd_const
1115 and added to (reg). This allows more scaled indexed addresses. */
1116 if (GET_CODE (XEXP (y
, 0)) == SYMBOL_REF
1117 && GET_CODE (XEXP (x
, 0)) == MULT
1118 && GET_CODE (XEXP (y
, 1)) == CONST_INT
1119 && INTVAL (XEXP (y
, 1)) >= -4096
1120 && INTVAL (XEXP (y
, 1)) <= 4095
1121 && GET_CODE (XEXP (XEXP (x
, 0), 1)) == CONST_INT
1122 && shadd_constant_p (INTVAL (XEXP (XEXP (x
, 0), 1))))
1124 HOST_WIDE_INT val
= INTVAL (XEXP (XEXP (x
, 0), 1));
1128 if (GET_CODE (reg1
) != REG
)
1129 reg1
= force_reg (Pmode
, force_operand (reg1
, 0));
1131 reg2
= XEXP (XEXP (x
, 0), 0);
1132 if (GET_CODE (reg2
) != REG
)
1133 reg2
= force_reg (Pmode
, force_operand (reg2
, 0));
1135 return force_reg (Pmode
,
1136 gen_rtx_PLUS (Pmode
,
1137 gen_rtx_MULT (Pmode
,
1142 else if ((mode
== DFmode
|| mode
== SFmode
)
1143 && GET_CODE (XEXP (y
, 0)) == SYMBOL_REF
1144 && GET_CODE (XEXP (x
, 0)) == MULT
1145 && GET_CODE (XEXP (y
, 1)) == CONST_INT
1146 && INTVAL (XEXP (y
, 1)) % INTVAL (XEXP (XEXP (x
, 0), 1)) == 0
1147 && GET_CODE (XEXP (XEXP (x
, 0), 1)) == CONST_INT
1148 && shadd_constant_p (INTVAL (XEXP (XEXP (x
, 0), 1))))
1151 = force_reg (Pmode
, GEN_INT (INTVAL (XEXP (y
, 1))
1152 / INTVAL (XEXP (XEXP (x
, 0), 1))));
1153 regx2
= XEXP (XEXP (x
, 0), 0);
1154 if (GET_CODE (regx2
) != REG
)
1155 regx2
= force_reg (Pmode
, force_operand (regx2
, 0));
1156 regx2
= force_reg (Pmode
, gen_rtx_fmt_ee (GET_CODE (y
), Pmode
,
1160 gen_rtx_PLUS (Pmode
,
1161 gen_rtx_MULT (Pmode
, regx2
,
1162 XEXP (XEXP (x
, 0), 1)),
1163 force_reg (Pmode
, XEXP (y
, 0))));
1165 else if (GET_CODE (XEXP (y
, 1)) == CONST_INT
1166 && INTVAL (XEXP (y
, 1)) >= -4096
1167 && INTVAL (XEXP (y
, 1)) <= 4095)
1169 /* This is safe because of the guard page at the
1170 beginning and end of the data space. Just
1171 return the original address. */
1176 /* Doesn't look like one we can optimize. */
1177 regx1
= force_reg (Pmode
, force_operand (XEXP (x
, 0), 0));
1178 regy1
= force_reg (Pmode
, force_operand (XEXP (y
, 0), 0));
1179 regy2
= force_reg (Pmode
, force_operand (XEXP (y
, 1), 0));
1180 regx1
= force_reg (Pmode
,
1181 gen_rtx_fmt_ee (GET_CODE (y
), Pmode
,
1183 return force_reg (Pmode
, gen_rtx_PLUS (Pmode
, regx1
, regy1
));
1191 /* For the HPPA, REG and REG+CONST is cost 0
1192 and addresses involving symbolic constants are cost 2.
1194 PIC addresses are very expensive.
1196 It is no coincidence that this has the same structure
1197 as GO_IF_LEGITIMATE_ADDRESS. */
1200 hppa_address_cost (rtx X
)
1202 switch (GET_CODE (X
))
1215 /* Compute a (partial) cost for rtx X. Return true if the complete
1216 cost has been computed, and false if subexpressions should be
1217 scanned. In either case, *TOTAL contains the cost result. */
1220 hppa_rtx_costs (rtx x
, int code
, int outer_code
, int *total
)
1225 if (INTVAL (x
) == 0)
1227 else if (INT_14_BITS (x
))
1244 if ((x
== CONST0_RTX (DFmode
) || x
== CONST0_RTX (SFmode
))
1245 && outer_code
!= SET
)
1252 if (GET_MODE_CLASS (GET_MODE (x
)) == MODE_FLOAT
)
1253 *total
= COSTS_N_INSNS (3);
1254 else if (TARGET_PA_11
&& !TARGET_DISABLE_FPREGS
&& !TARGET_SOFT_FLOAT
)
1255 *total
= COSTS_N_INSNS (8);
1257 *total
= COSTS_N_INSNS (20);
1261 if (GET_MODE_CLASS (GET_MODE (x
)) == MODE_FLOAT
)
1263 *total
= COSTS_N_INSNS (14);
1271 *total
= COSTS_N_INSNS (60);
1274 case PLUS
: /* this includes shNadd insns */
1276 if (GET_MODE_CLASS (GET_MODE (x
)) == MODE_FLOAT
)
1277 *total
= COSTS_N_INSNS (3);
1279 *total
= COSTS_N_INSNS (1);
1285 *total
= COSTS_N_INSNS (1);
1293 /* Ensure mode of ORIG, a REG rtx, is MODE. Returns either ORIG or a
1294 new rtx with the correct mode. */
1296 force_mode (enum machine_mode mode
, rtx orig
)
1298 if (mode
== GET_MODE (orig
))
1301 gcc_assert (REGNO (orig
) < FIRST_PSEUDO_REGISTER
);
1303 return gen_rtx_REG (mode
, REGNO (orig
));
1306 /* Return 1 if *X is a thread-local symbol. */
1309 pa_tls_symbol_ref_1 (rtx
*x
, void *data ATTRIBUTE_UNUSED
)
1311 return PA_SYMBOL_REF_TLS_P (*x
);
1314 /* Return 1 if X contains a thread-local symbol. */
1317 pa_tls_referenced_p (rtx x
)
1319 if (!TARGET_HAVE_TLS
)
1322 return for_each_rtx (&x
, &pa_tls_symbol_ref_1
, 0);
1325 /* Emit insns to move operands[1] into operands[0].
1327 Return 1 if we have written out everything that needs to be done to
1328 do the move. Otherwise, return 0 and the caller will emit the move
1331 Note SCRATCH_REG may not be in the proper mode depending on how it
1332 will be used. This routine is responsible for creating a new copy
1333 of SCRATCH_REG in the proper mode. */
1336 emit_move_sequence (rtx
*operands
, enum machine_mode mode
, rtx scratch_reg
)
1338 register rtx operand0
= operands
[0];
1339 register rtx operand1
= operands
[1];
1342 /* We can only handle indexed addresses in the destination operand
1343 of floating point stores. Thus, we need to break out indexed
1344 addresses from the destination operand. */
1345 if (GET_CODE (operand0
) == MEM
&& IS_INDEX_ADDR_P (XEXP (operand0
, 0)))
1347 /* This is only safe up to the beginning of life analysis. */
1348 gcc_assert (!no_new_pseudos
);
1350 tem
= copy_to_mode_reg (Pmode
, XEXP (operand0
, 0));
1351 operand0
= replace_equiv_address (operand0
, tem
);
1354 /* On targets with non-equivalent space registers, break out unscaled
1355 indexed addresses from the source operand before the final CSE.
1356 We have to do this because the REG_POINTER flag is not correctly
1357 carried through various optimization passes and CSE may substitute
1358 a pseudo without the pointer set for one with the pointer set. As
1359 a result, we loose various opportunities to create insns with
1360 unscaled indexed addresses. */
1361 if (!TARGET_NO_SPACE_REGS
1362 && !cse_not_expected
1363 && GET_CODE (operand1
) == MEM
1364 && GET_CODE (XEXP (operand1
, 0)) == PLUS
1365 && REG_P (XEXP (XEXP (operand1
, 0), 0))
1366 && REG_P (XEXP (XEXP (operand1
, 0), 1)))
1368 = replace_equiv_address (operand1
,
1369 copy_to_mode_reg (Pmode
, XEXP (operand1
, 0)));
1372 && reload_in_progress
&& GET_CODE (operand0
) == REG
1373 && REGNO (operand0
) >= FIRST_PSEUDO_REGISTER
)
1374 operand0
= reg_equiv_mem
[REGNO (operand0
)];
1375 else if (scratch_reg
1376 && reload_in_progress
&& GET_CODE (operand0
) == SUBREG
1377 && GET_CODE (SUBREG_REG (operand0
)) == REG
1378 && REGNO (SUBREG_REG (operand0
)) >= FIRST_PSEUDO_REGISTER
)
1380 /* We must not alter SUBREG_BYTE (operand0) since that would confuse
1381 the code which tracks sets/uses for delete_output_reload. */
1382 rtx temp
= gen_rtx_SUBREG (GET_MODE (operand0
),
1383 reg_equiv_mem
[REGNO (SUBREG_REG (operand0
))],
1384 SUBREG_BYTE (operand0
));
1385 operand0
= alter_subreg (&temp
);
1389 && reload_in_progress
&& GET_CODE (operand1
) == REG
1390 && REGNO (operand1
) >= FIRST_PSEUDO_REGISTER
)
1391 operand1
= reg_equiv_mem
[REGNO (operand1
)];
1392 else if (scratch_reg
1393 && reload_in_progress
&& GET_CODE (operand1
) == SUBREG
1394 && GET_CODE (SUBREG_REG (operand1
)) == REG
1395 && REGNO (SUBREG_REG (operand1
)) >= FIRST_PSEUDO_REGISTER
)
1397 /* We must not alter SUBREG_BYTE (operand0) since that would confuse
1398 the code which tracks sets/uses for delete_output_reload. */
1399 rtx temp
= gen_rtx_SUBREG (GET_MODE (operand1
),
1400 reg_equiv_mem
[REGNO (SUBREG_REG (operand1
))],
1401 SUBREG_BYTE (operand1
));
1402 operand1
= alter_subreg (&temp
);
1405 if (scratch_reg
&& reload_in_progress
&& GET_CODE (operand0
) == MEM
1406 && ((tem
= find_replacement (&XEXP (operand0
, 0)))
1407 != XEXP (operand0
, 0)))
1408 operand0
= replace_equiv_address (operand0
, tem
);
1410 if (scratch_reg
&& reload_in_progress
&& GET_CODE (operand1
) == MEM
1411 && ((tem
= find_replacement (&XEXP (operand1
, 0)))
1412 != XEXP (operand1
, 0)))
1413 operand1
= replace_equiv_address (operand1
, tem
);
1415 /* Handle secondary reloads for loads/stores of FP registers from
1416 REG+D addresses where D does not fit in 5 or 14 bits, including
1417 (subreg (mem (addr))) cases. */
1419 && fp_reg_operand (operand0
, mode
)
1420 && ((GET_CODE (operand1
) == MEM
1421 && !memory_address_p ((GET_MODE_SIZE (mode
) == 4 ? SFmode
: DFmode
),
1422 XEXP (operand1
, 0)))
1423 || ((GET_CODE (operand1
) == SUBREG
1424 && GET_CODE (XEXP (operand1
, 0)) == MEM
1425 && !memory_address_p ((GET_MODE_SIZE (mode
) == 4
1427 XEXP (XEXP (operand1
, 0), 0))))))
1429 if (GET_CODE (operand1
) == SUBREG
)
1430 operand1
= XEXP (operand1
, 0);
1432 /* SCRATCH_REG will hold an address and maybe the actual data. We want
1433 it in WORD_MODE regardless of what mode it was originally given
1435 scratch_reg
= force_mode (word_mode
, scratch_reg
);
1437 /* D might not fit in 14 bits either; for such cases load D into
1439 if (!memory_address_p (Pmode
, XEXP (operand1
, 0)))
1441 emit_move_insn (scratch_reg
, XEXP (XEXP (operand1
, 0), 1));
1442 emit_move_insn (scratch_reg
,
1443 gen_rtx_fmt_ee (GET_CODE (XEXP (operand1
, 0)),
1445 XEXP (XEXP (operand1
, 0), 0),
1449 emit_move_insn (scratch_reg
, XEXP (operand1
, 0));
1450 emit_insn (gen_rtx_SET (VOIDmode
, operand0
,
1451 replace_equiv_address (operand1
, scratch_reg
)));
1454 else if (scratch_reg
1455 && fp_reg_operand (operand1
, mode
)
1456 && ((GET_CODE (operand0
) == MEM
1457 && !memory_address_p ((GET_MODE_SIZE (mode
) == 4
1459 XEXP (operand0
, 0)))
1460 || ((GET_CODE (operand0
) == SUBREG
)
1461 && GET_CODE (XEXP (operand0
, 0)) == MEM
1462 && !memory_address_p ((GET_MODE_SIZE (mode
) == 4
1464 XEXP (XEXP (operand0
, 0), 0)))))
1466 if (GET_CODE (operand0
) == SUBREG
)
1467 operand0
= XEXP (operand0
, 0);
1469 /* SCRATCH_REG will hold an address and maybe the actual data. We want
1470 it in WORD_MODE regardless of what mode it was originally given
1472 scratch_reg
= force_mode (word_mode
, scratch_reg
);
1474 /* D might not fit in 14 bits either; for such cases load D into
1476 if (!memory_address_p (Pmode
, XEXP (operand0
, 0)))
1478 emit_move_insn (scratch_reg
, XEXP (XEXP (operand0
, 0), 1));
1479 emit_move_insn (scratch_reg
, gen_rtx_fmt_ee (GET_CODE (XEXP (operand0
,
1482 XEXP (XEXP (operand0
, 0),
1487 emit_move_insn (scratch_reg
, XEXP (operand0
, 0));
1488 emit_insn (gen_rtx_SET (VOIDmode
,
1489 replace_equiv_address (operand0
, scratch_reg
),
1493 /* Handle secondary reloads for loads of FP registers from constant
1494 expressions by forcing the constant into memory.
1496 Use scratch_reg to hold the address of the memory location.
1498 The proper fix is to change PREFERRED_RELOAD_CLASS to return
1499 NO_REGS when presented with a const_int and a register class
1500 containing only FP registers. Doing so unfortunately creates
1501 more problems than it solves. Fix this for 2.5. */
1502 else if (scratch_reg
1503 && CONSTANT_P (operand1
)
1504 && fp_reg_operand (operand0
, mode
))
1506 rtx const_mem
, xoperands
[2];
1508 /* SCRATCH_REG will hold an address and maybe the actual data. We want
1509 it in WORD_MODE regardless of what mode it was originally given
1511 scratch_reg
= force_mode (word_mode
, scratch_reg
);
1513 /* Force the constant into memory and put the address of the
1514 memory location into scratch_reg. */
1515 const_mem
= force_const_mem (mode
, operand1
);
1516 xoperands
[0] = scratch_reg
;
1517 xoperands
[1] = XEXP (const_mem
, 0);
1518 emit_move_sequence (xoperands
, Pmode
, 0);
1520 /* Now load the destination register. */
1521 emit_insn (gen_rtx_SET (mode
, operand0
,
1522 replace_equiv_address (const_mem
, scratch_reg
)));
1525 /* Handle secondary reloads for SAR. These occur when trying to load
1526 the SAR from memory, FP register, or with a constant. */
1527 else if (scratch_reg
1528 && GET_CODE (operand0
) == REG
1529 && REGNO (operand0
) < FIRST_PSEUDO_REGISTER
1530 && REGNO_REG_CLASS (REGNO (operand0
)) == SHIFT_REGS
1531 && (GET_CODE (operand1
) == MEM
1532 || GET_CODE (operand1
) == CONST_INT
1533 || (GET_CODE (operand1
) == REG
1534 && FP_REG_CLASS_P (REGNO_REG_CLASS (REGNO (operand1
))))))
1536 /* D might not fit in 14 bits either; for such cases load D into
1538 if (GET_CODE (operand1
) == MEM
1539 && !memory_address_p (Pmode
, XEXP (operand1
, 0)))
1541 /* We are reloading the address into the scratch register, so we
1542 want to make sure the scratch register is a full register. */
1543 scratch_reg
= force_mode (word_mode
, scratch_reg
);
1545 emit_move_insn (scratch_reg
, XEXP (XEXP (operand1
, 0), 1));
1546 emit_move_insn (scratch_reg
, gen_rtx_fmt_ee (GET_CODE (XEXP (operand1
,
1549 XEXP (XEXP (operand1
, 0),
1553 /* Now we are going to load the scratch register from memory,
1554 we want to load it in the same width as the original MEM,
1555 which must be the same as the width of the ultimate destination,
1557 scratch_reg
= force_mode (GET_MODE (operand0
), scratch_reg
);
1559 emit_move_insn (scratch_reg
,
1560 replace_equiv_address (operand1
, scratch_reg
));
1564 /* We want to load the scratch register using the same mode as
1565 the ultimate destination. */
1566 scratch_reg
= force_mode (GET_MODE (operand0
), scratch_reg
);
1568 emit_move_insn (scratch_reg
, operand1
);
1571 /* And emit the insn to set the ultimate destination. We know that
1572 the scratch register has the same mode as the destination at this
1574 emit_move_insn (operand0
, scratch_reg
);
1577 /* Handle the most common case: storing into a register. */
1578 else if (register_operand (operand0
, mode
))
1580 if (register_operand (operand1
, mode
)
1581 || (GET_CODE (operand1
) == CONST_INT
1582 && cint_ok_for_move (INTVAL (operand1
)))
1583 || (operand1
== CONST0_RTX (mode
))
1584 || (GET_CODE (operand1
) == HIGH
1585 && !symbolic_operand (XEXP (operand1
, 0), VOIDmode
))
1586 /* Only `general_operands' can come here, so MEM is ok. */
1587 || GET_CODE (operand1
) == MEM
)
1589 /* Various sets are created during RTL generation which don't
1590 have the REG_POINTER flag correctly set. After the CSE pass,
1591 instruction recognition can fail if we don't consistently
1592 set this flag when performing register copies. This should
1593 also improve the opportunities for creating insns that use
1594 unscaled indexing. */
1595 if (REG_P (operand0
) && REG_P (operand1
))
1597 if (REG_POINTER (operand1
)
1598 && !REG_POINTER (operand0
)
1599 && !HARD_REGISTER_P (operand0
))
1600 copy_reg_pointer (operand0
, operand1
);
1601 else if (REG_POINTER (operand0
)
1602 && !REG_POINTER (operand1
)
1603 && !HARD_REGISTER_P (operand1
))
1604 copy_reg_pointer (operand1
, operand0
);
1607 /* When MEMs are broken out, the REG_POINTER flag doesn't
1608 get set. In some cases, we can set the REG_POINTER flag
1609 from the declaration for the MEM. */
1610 if (REG_P (operand0
)
1611 && GET_CODE (operand1
) == MEM
1612 && !REG_POINTER (operand0
))
1614 tree decl
= MEM_EXPR (operand1
);
1616 /* Set the register pointer flag and register alignment
1617 if the declaration for this memory reference is a
1618 pointer type. Fortran indirect argument references
1621 && !(flag_argument_noalias
> 1
1622 && TREE_CODE (decl
) == INDIRECT_REF
1623 && TREE_CODE (TREE_OPERAND (decl
, 0)) == PARM_DECL
))
1627 /* If this is a COMPONENT_REF, use the FIELD_DECL from
1629 if (TREE_CODE (decl
) == COMPONENT_REF
)
1630 decl
= TREE_OPERAND (decl
, 1);
1632 type
= TREE_TYPE (decl
);
1633 if (TREE_CODE (type
) == ARRAY_TYPE
)
1634 type
= get_inner_array_type (type
);
1636 if (POINTER_TYPE_P (type
))
1640 type
= TREE_TYPE (type
);
1641 /* Using TYPE_ALIGN_OK is rather conservative as
1642 only the ada frontend actually sets it. */
1643 align
= (TYPE_ALIGN_OK (type
) ? TYPE_ALIGN (type
)
1645 mark_reg_pointer (operand0
, align
);
1650 emit_insn (gen_rtx_SET (VOIDmode
, operand0
, operand1
));
1654 else if (GET_CODE (operand0
) == MEM
)
1656 if (mode
== DFmode
&& operand1
== CONST0_RTX (mode
)
1657 && !(reload_in_progress
|| reload_completed
))
1659 rtx temp
= gen_reg_rtx (DFmode
);
1661 emit_insn (gen_rtx_SET (VOIDmode
, temp
, operand1
));
1662 emit_insn (gen_rtx_SET (VOIDmode
, operand0
, temp
));
1665 if (register_operand (operand1
, mode
) || operand1
== CONST0_RTX (mode
))
1667 /* Run this case quickly. */
1668 emit_insn (gen_rtx_SET (VOIDmode
, operand0
, operand1
));
1671 if (! (reload_in_progress
|| reload_completed
))
1673 operands
[0] = validize_mem (operand0
);
1674 operands
[1] = operand1
= force_reg (mode
, operand1
);
1678 /* Simplify the source if we need to.
1679 Note we do have to handle function labels here, even though we do
1680 not consider them legitimate constants. Loop optimizations can
1681 call the emit_move_xxx with one as a source. */
1682 if ((GET_CODE (operand1
) != HIGH
&& immediate_operand (operand1
, mode
))
1683 || function_label_operand (operand1
, mode
)
1684 || (GET_CODE (operand1
) == HIGH
1685 && symbolic_operand (XEXP (operand1
, 0), mode
)))
1689 if (GET_CODE (operand1
) == HIGH
)
1692 operand1
= XEXP (operand1
, 0);
1694 if (symbolic_operand (operand1
, mode
))
1696 /* Argh. The assembler and linker can't handle arithmetic
1699 So we force the plabel into memory, load operand0 from
1700 the memory location, then add in the constant part. */
1701 if ((GET_CODE (operand1
) == CONST
1702 && GET_CODE (XEXP (operand1
, 0)) == PLUS
1703 && function_label_operand (XEXP (XEXP (operand1
, 0), 0), Pmode
))
1704 || function_label_operand (operand1
, mode
))
1706 rtx temp
, const_part
;
1708 /* Figure out what (if any) scratch register to use. */
1709 if (reload_in_progress
|| reload_completed
)
1711 scratch_reg
= scratch_reg
? scratch_reg
: operand0
;
1712 /* SCRATCH_REG will hold an address and maybe the actual
1713 data. We want it in WORD_MODE regardless of what mode it
1714 was originally given to us. */
1715 scratch_reg
= force_mode (word_mode
, scratch_reg
);
1718 scratch_reg
= gen_reg_rtx (Pmode
);
1720 if (GET_CODE (operand1
) == CONST
)
1722 /* Save away the constant part of the expression. */
1723 const_part
= XEXP (XEXP (operand1
, 0), 1);
1724 gcc_assert (GET_CODE (const_part
) == CONST_INT
);
1726 /* Force the function label into memory. */
1727 temp
= force_const_mem (mode
, XEXP (XEXP (operand1
, 0), 0));
1731 /* No constant part. */
1732 const_part
= NULL_RTX
;
1734 /* Force the function label into memory. */
1735 temp
= force_const_mem (mode
, operand1
);
1739 /* Get the address of the memory location. PIC-ify it if
1741 temp
= XEXP (temp
, 0);
1743 temp
= legitimize_pic_address (temp
, mode
, scratch_reg
);
1745 /* Put the address of the memory location into our destination
1748 emit_move_sequence (operands
, mode
, scratch_reg
);
1750 /* Now load from the memory location into our destination
1752 operands
[1] = gen_rtx_MEM (Pmode
, operands
[0]);
1753 emit_move_sequence (operands
, mode
, scratch_reg
);
1755 /* And add back in the constant part. */
1756 if (const_part
!= NULL_RTX
)
1757 expand_inc (operand0
, const_part
);
1766 if (reload_in_progress
|| reload_completed
)
1768 temp
= scratch_reg
? scratch_reg
: operand0
;
1769 /* TEMP will hold an address and maybe the actual
1770 data. We want it in WORD_MODE regardless of what mode it
1771 was originally given to us. */
1772 temp
= force_mode (word_mode
, temp
);
1775 temp
= gen_reg_rtx (Pmode
);
1777 /* (const (plus (symbol) (const_int))) must be forced to
1778 memory during/after reload if the const_int will not fit
1780 if (GET_CODE (operand1
) == CONST
1781 && GET_CODE (XEXP (operand1
, 0)) == PLUS
1782 && GET_CODE (XEXP (XEXP (operand1
, 0), 1)) == CONST_INT
1783 && !INT_14_BITS (XEXP (XEXP (operand1
, 0), 1))
1784 && (reload_completed
|| reload_in_progress
)
1787 rtx const_mem
= force_const_mem (mode
, operand1
);
1788 operands
[1] = legitimize_pic_address (XEXP (const_mem
, 0),
1790 operands
[1] = replace_equiv_address (const_mem
, operands
[1]);
1791 emit_move_sequence (operands
, mode
, temp
);
1795 operands
[1] = legitimize_pic_address (operand1
, mode
, temp
);
1796 if (REG_P (operand0
) && REG_P (operands
[1]))
1797 copy_reg_pointer (operand0
, operands
[1]);
1798 emit_insn (gen_rtx_SET (VOIDmode
, operand0
, operands
[1]));
1801 /* On the HPPA, references to data space are supposed to use dp,
1802 register 27, but showing it in the RTL inhibits various cse
1803 and loop optimizations. */
1808 if (reload_in_progress
|| reload_completed
)
1810 temp
= scratch_reg
? scratch_reg
: operand0
;
1811 /* TEMP will hold an address and maybe the actual
1812 data. We want it in WORD_MODE regardless of what mode it
1813 was originally given to us. */
1814 temp
= force_mode (word_mode
, temp
);
1817 temp
= gen_reg_rtx (mode
);
1819 /* Loading a SYMBOL_REF into a register makes that register
1820 safe to be used as the base in an indexed address.
1822 Don't mark hard registers though. That loses. */
1823 if (GET_CODE (operand0
) == REG
1824 && REGNO (operand0
) >= FIRST_PSEUDO_REGISTER
)
1825 mark_reg_pointer (operand0
, BITS_PER_UNIT
);
1826 if (REGNO (temp
) >= FIRST_PSEUDO_REGISTER
)
1827 mark_reg_pointer (temp
, BITS_PER_UNIT
);
1830 set
= gen_rtx_SET (mode
, operand0
, temp
);
1832 set
= gen_rtx_SET (VOIDmode
,
1834 gen_rtx_LO_SUM (mode
, temp
, operand1
));
1836 emit_insn (gen_rtx_SET (VOIDmode
,
1838 gen_rtx_HIGH (mode
, operand1
)));
1844 else if (pa_tls_referenced_p (operand1
))
1849 if (GET_CODE (tmp
) == CONST
&& GET_CODE (XEXP (tmp
, 0)) == PLUS
)
1851 addend
= XEXP (XEXP (tmp
, 0), 1);
1852 tmp
= XEXP (XEXP (tmp
, 0), 0);
1855 gcc_assert (GET_CODE (tmp
) == SYMBOL_REF
);
1856 tmp
= legitimize_tls_address (tmp
);
1859 tmp
= gen_rtx_PLUS (mode
, tmp
, addend
);
1860 tmp
= force_operand (tmp
, operands
[0]);
1864 else if (GET_CODE (operand1
) != CONST_INT
1865 || !cint_ok_for_move (INTVAL (operand1
)))
1869 HOST_WIDE_INT value
= 0;
1870 HOST_WIDE_INT insv
= 0;
1873 if (GET_CODE (operand1
) == CONST_INT
)
1874 value
= INTVAL (operand1
);
1877 && GET_CODE (operand1
) == CONST_INT
1878 && HOST_BITS_PER_WIDE_INT
> 32
1879 && GET_MODE_BITSIZE (GET_MODE (operand0
)) > 32)
1883 /* Extract the low order 32 bits of the value and sign extend.
1884 If the new value is the same as the original value, we can
1885 can use the original value as-is. If the new value is
1886 different, we use it and insert the most-significant 32-bits
1887 of the original value into the final result. */
1888 nval
= ((value
& (((HOST_WIDE_INT
) 2 << 31) - 1))
1889 ^ ((HOST_WIDE_INT
) 1 << 31)) - ((HOST_WIDE_INT
) 1 << 31);
1892 #if HOST_BITS_PER_WIDE_INT > 32
1893 insv
= value
>= 0 ? value
>> 32 : ~(~value
>> 32);
1897 operand1
= GEN_INT (nval
);
1901 if (reload_in_progress
|| reload_completed
)
1902 temp
= scratch_reg
? scratch_reg
: operand0
;
1904 temp
= gen_reg_rtx (mode
);
1906 /* We don't directly split DImode constants on 32-bit targets
1907 because PLUS uses an 11-bit immediate and the insn sequence
1908 generated is not as efficient as the one using HIGH/LO_SUM. */
1909 if (GET_CODE (operand1
) == CONST_INT
1910 && GET_MODE_BITSIZE (mode
) <= BITS_PER_WORD
1911 && GET_MODE_BITSIZE (mode
) <= HOST_BITS_PER_WIDE_INT
1914 /* Directly break constant into high and low parts. This
1915 provides better optimization opportunities because various
1916 passes recognize constants split with PLUS but not LO_SUM.
1917 We use a 14-bit signed low part except when the addition
1918 of 0x4000 to the high part might change the sign of the
1920 HOST_WIDE_INT low
= value
& 0x3fff;
1921 HOST_WIDE_INT high
= value
& ~ 0x3fff;
1925 if (high
== 0x7fffc000 || (mode
== HImode
&& high
== 0x4000))
1933 emit_insn (gen_rtx_SET (VOIDmode
, temp
, GEN_INT (high
)));
1934 operands
[1] = gen_rtx_PLUS (mode
, temp
, GEN_INT (low
));
1938 emit_insn (gen_rtx_SET (VOIDmode
, temp
,
1939 gen_rtx_HIGH (mode
, operand1
)));
1940 operands
[1] = gen_rtx_LO_SUM (mode
, temp
, operand1
);
1943 insn
= emit_move_insn (operands
[0], operands
[1]);
1945 /* Now insert the most significant 32 bits of the value
1946 into the register. When we don't have a second register
1947 available, it could take up to nine instructions to load
1948 a 64-bit integer constant. Prior to reload, we force
1949 constants that would take more than three instructions
1950 to load to the constant pool. During and after reload,
1951 we have to handle all possible values. */
1954 /* Use a HIGH/LO_SUM/INSV sequence if we have a second
1955 register and the value to be inserted is outside the
1956 range that can be loaded with three depdi instructions. */
1957 if (temp
!= operand0
&& (insv
>= 16384 || insv
< -16384))
1959 operand1
= GEN_INT (insv
);
1961 emit_insn (gen_rtx_SET (VOIDmode
, temp
,
1962 gen_rtx_HIGH (mode
, operand1
)));
1963 emit_move_insn (temp
, gen_rtx_LO_SUM (mode
, temp
, operand1
));
1964 emit_insn (gen_insv (operand0
, GEN_INT (32),
1969 int len
= 5, pos
= 27;
1971 /* Insert the bits using the depdi instruction. */
1974 HOST_WIDE_INT v5
= ((insv
& 31) ^ 16) - 16;
1975 HOST_WIDE_INT sign
= v5
< 0;
1977 /* Left extend the insertion. */
1978 insv
= (insv
>= 0 ? insv
>> len
: ~(~insv
>> len
));
1979 while (pos
> 0 && (insv
& 1) == sign
)
1981 insv
= (insv
>= 0 ? insv
>> 1 : ~(~insv
>> 1));
1986 emit_insn (gen_insv (operand0
, GEN_INT (len
),
1987 GEN_INT (pos
), GEN_INT (v5
)));
1989 len
= pos
> 0 && pos
< 5 ? pos
: 5;
1996 = gen_rtx_EXPR_LIST (REG_EQUAL
, op1
, REG_NOTES (insn
));
2001 /* Now have insn-emit do whatever it normally does. */
2005 /* Examine EXP and return nonzero if it contains an ADDR_EXPR (meaning
2006 it will need a link/runtime reloc). */
2009 reloc_needed (tree exp
)
2013 switch (TREE_CODE (exp
))
2020 reloc
= reloc_needed (TREE_OPERAND (exp
, 0));
2021 reloc
|= reloc_needed (TREE_OPERAND (exp
, 1));
2026 case NON_LVALUE_EXPR
:
2027 reloc
= reloc_needed (TREE_OPERAND (exp
, 0));
2033 unsigned HOST_WIDE_INT ix
;
2035 FOR_EACH_CONSTRUCTOR_VALUE (CONSTRUCTOR_ELTS (exp
), ix
, value
)
2037 reloc
|= reloc_needed (value
);
2050 /* Does operand (which is a symbolic_operand) live in text space?
2051 If so, SYMBOL_REF_FLAG, which is set by pa_encode_section_info,
2055 read_only_operand (rtx operand
, enum machine_mode mode ATTRIBUTE_UNUSED
)
2057 if (GET_CODE (operand
) == CONST
)
2058 operand
= XEXP (XEXP (operand
, 0), 0);
2061 if (GET_CODE (operand
) == SYMBOL_REF
)
2062 return SYMBOL_REF_FLAG (operand
) && !CONSTANT_POOL_ADDRESS_P (operand
);
2066 if (GET_CODE (operand
) == SYMBOL_REF
)
2067 return SYMBOL_REF_FLAG (operand
) || CONSTANT_POOL_ADDRESS_P (operand
);
2073 /* Return the best assembler insn template
2074 for moving operands[1] into operands[0] as a fullword. */
2076 singlemove_string (rtx
*operands
)
2078 HOST_WIDE_INT intval
;
2080 if (GET_CODE (operands
[0]) == MEM
)
2081 return "stw %r1,%0";
2082 if (GET_CODE (operands
[1]) == MEM
)
2084 if (GET_CODE (operands
[1]) == CONST_DOUBLE
)
2089 gcc_assert (GET_MODE (operands
[1]) == SFmode
);
2091 /* Translate the CONST_DOUBLE to a CONST_INT with the same target
2093 REAL_VALUE_FROM_CONST_DOUBLE (d
, operands
[1]);
2094 REAL_VALUE_TO_TARGET_SINGLE (d
, i
);
2096 operands
[1] = GEN_INT (i
);
2097 /* Fall through to CONST_INT case. */
2099 if (GET_CODE (operands
[1]) == CONST_INT
)
2101 intval
= INTVAL (operands
[1]);
2103 if (VAL_14_BITS_P (intval
))
2105 else if ((intval
& 0x7ff) == 0)
2106 return "ldil L'%1,%0";
2107 else if (zdepi_cint_p (intval
))
2108 return "{zdepi %Z1,%0|depwi,z %Z1,%0}";
2110 return "ldil L'%1,%0\n\tldo R'%1(%0),%0";
2112 return "copy %1,%0";
2116 /* Compute position (in OP[1]) and width (in OP[2])
2117 useful for copying IMM to a register using the zdepi
2118 instructions. Store the immediate value to insert in OP[0]. */
2120 compute_zdepwi_operands (unsigned HOST_WIDE_INT imm
, unsigned *op
)
2124 /* Find the least significant set bit in IMM. */
2125 for (lsb
= 0; lsb
< 32; lsb
++)
2132 /* Choose variants based on *sign* of the 5-bit field. */
2133 if ((imm
& 0x10) == 0)
2134 len
= (lsb
<= 28) ? 4 : 32 - lsb
;
2137 /* Find the width of the bitstring in IMM. */
2138 for (len
= 5; len
< 32; len
++)
2140 if ((imm
& (1 << len
)) == 0)
2144 /* Sign extend IMM as a 5-bit value. */
2145 imm
= (imm
& 0xf) - 0x10;
2153 /* Compute position (in OP[1]) and width (in OP[2])
2154 useful for copying IMM to a register using the depdi,z
2155 instructions. Store the immediate value to insert in OP[0]. */
2157 compute_zdepdi_operands (unsigned HOST_WIDE_INT imm
, unsigned *op
)
2159 HOST_WIDE_INT lsb
, len
;
2161 /* Find the least significant set bit in IMM. */
2162 for (lsb
= 0; lsb
< HOST_BITS_PER_WIDE_INT
; lsb
++)
2169 /* Choose variants based on *sign* of the 5-bit field. */
2170 if ((imm
& 0x10) == 0)
2171 len
= ((lsb
<= HOST_BITS_PER_WIDE_INT
- 4)
2172 ? 4 : HOST_BITS_PER_WIDE_INT
- lsb
);
2175 /* Find the width of the bitstring in IMM. */
2176 for (len
= 5; len
< HOST_BITS_PER_WIDE_INT
; len
++)
2178 if ((imm
& ((unsigned HOST_WIDE_INT
) 1 << len
)) == 0)
2182 /* Sign extend IMM as a 5-bit value. */
2183 imm
= (imm
& 0xf) - 0x10;
2191 /* Output assembler code to perform a doubleword move insn
2192 with operands OPERANDS. */
2195 output_move_double (rtx
*operands
)
2197 enum { REGOP
, OFFSOP
, MEMOP
, CNSTOP
, RNDOP
} optype0
, optype1
;
2199 rtx addreg0
= 0, addreg1
= 0;
2201 /* First classify both operands. */
2203 if (REG_P (operands
[0]))
2205 else if (offsettable_memref_p (operands
[0]))
2207 else if (GET_CODE (operands
[0]) == MEM
)
2212 if (REG_P (operands
[1]))
2214 else if (CONSTANT_P (operands
[1]))
2216 else if (offsettable_memref_p (operands
[1]))
2218 else if (GET_CODE (operands
[1]) == MEM
)
2223 /* Check for the cases that the operand constraints are not
2224 supposed to allow to happen. */
2225 gcc_assert (optype0
== REGOP
|| optype1
== REGOP
);
2227 /* Handle copies between general and floating registers. */
2229 if (optype0
== REGOP
&& optype1
== REGOP
2230 && FP_REG_P (operands
[0]) ^ FP_REG_P (operands
[1]))
2232 if (FP_REG_P (operands
[0]))
2234 output_asm_insn ("{stws|stw} %1,-16(%%sp)", operands
);
2235 output_asm_insn ("{stws|stw} %R1,-12(%%sp)", operands
);
2236 return "{fldds|fldd} -16(%%sp),%0";
2240 output_asm_insn ("{fstds|fstd} %1,-16(%%sp)", operands
);
2241 output_asm_insn ("{ldws|ldw} -16(%%sp),%0", operands
);
2242 return "{ldws|ldw} -12(%%sp),%R0";
2246 /* Handle auto decrementing and incrementing loads and stores
2247 specifically, since the structure of the function doesn't work
2248 for them without major modification. Do it better when we learn
2249 this port about the general inc/dec addressing of PA.
2250 (This was written by tege. Chide him if it doesn't work.) */
2252 if (optype0
== MEMOP
)
2254 /* We have to output the address syntax ourselves, since print_operand
2255 doesn't deal with the addresses we want to use. Fix this later. */
2257 rtx addr
= XEXP (operands
[0], 0);
2258 if (GET_CODE (addr
) == POST_INC
|| GET_CODE (addr
) == POST_DEC
)
2260 rtx high_reg
= gen_rtx_SUBREG (SImode
, operands
[1], 0);
2262 operands
[0] = XEXP (addr
, 0);
2263 gcc_assert (GET_CODE (operands
[1]) == REG
2264 && GET_CODE (operands
[0]) == REG
);
2266 gcc_assert (!reg_overlap_mentioned_p (high_reg
, addr
));
2268 /* No overlap between high target register and address
2269 register. (We do this in a non-obvious way to
2270 save a register file writeback) */
2271 if (GET_CODE (addr
) == POST_INC
)
2272 return "{stws|stw},ma %1,8(%0)\n\tstw %R1,-4(%0)";
2273 return "{stws|stw},ma %1,-8(%0)\n\tstw %R1,12(%0)";
2275 else if (GET_CODE (addr
) == PRE_INC
|| GET_CODE (addr
) == PRE_DEC
)
2277 rtx high_reg
= gen_rtx_SUBREG (SImode
, operands
[1], 0);
2279 operands
[0] = XEXP (addr
, 0);
2280 gcc_assert (GET_CODE (operands
[1]) == REG
2281 && GET_CODE (operands
[0]) == REG
);
2283 gcc_assert (!reg_overlap_mentioned_p (high_reg
, addr
));
2284 /* No overlap between high target register and address
2285 register. (We do this in a non-obvious way to save a
2286 register file writeback) */
2287 if (GET_CODE (addr
) == PRE_INC
)
2288 return "{stws|stw},mb %1,8(%0)\n\tstw %R1,4(%0)";
2289 return "{stws|stw},mb %1,-8(%0)\n\tstw %R1,4(%0)";
2292 if (optype1
== MEMOP
)
2294 /* We have to output the address syntax ourselves, since print_operand
2295 doesn't deal with the addresses we want to use. Fix this later. */
2297 rtx addr
= XEXP (operands
[1], 0);
2298 if (GET_CODE (addr
) == POST_INC
|| GET_CODE (addr
) == POST_DEC
)
2300 rtx high_reg
= gen_rtx_SUBREG (SImode
, operands
[0], 0);
2302 operands
[1] = XEXP (addr
, 0);
2303 gcc_assert (GET_CODE (operands
[0]) == REG
2304 && GET_CODE (operands
[1]) == REG
);
2306 if (!reg_overlap_mentioned_p (high_reg
, addr
))
2308 /* No overlap between high target register and address
2309 register. (We do this in a non-obvious way to
2310 save a register file writeback) */
2311 if (GET_CODE (addr
) == POST_INC
)
2312 return "{ldws|ldw},ma 8(%1),%0\n\tldw -4(%1),%R0";
2313 return "{ldws|ldw},ma -8(%1),%0\n\tldw 12(%1),%R0";
2317 /* This is an undefined situation. We should load into the
2318 address register *and* update that register. Probably
2319 we don't need to handle this at all. */
2320 if (GET_CODE (addr
) == POST_INC
)
2321 return "ldw 4(%1),%R0\n\t{ldws|ldw},ma 8(%1),%0";
2322 return "ldw 4(%1),%R0\n\t{ldws|ldw},ma -8(%1),%0";
2325 else if (GET_CODE (addr
) == PRE_INC
|| GET_CODE (addr
) == PRE_DEC
)
2327 rtx high_reg
= gen_rtx_SUBREG (SImode
, operands
[0], 0);
2329 operands
[1] = XEXP (addr
, 0);
2330 gcc_assert (GET_CODE (operands
[0]) == REG
2331 && GET_CODE (operands
[1]) == REG
);
2333 if (!reg_overlap_mentioned_p (high_reg
, addr
))
2335 /* No overlap between high target register and address
2336 register. (We do this in a non-obvious way to
2337 save a register file writeback) */
2338 if (GET_CODE (addr
) == PRE_INC
)
2339 return "{ldws|ldw},mb 8(%1),%0\n\tldw 4(%1),%R0";
2340 return "{ldws|ldw},mb -8(%1),%0\n\tldw 4(%1),%R0";
2344 /* This is an undefined situation. We should load into the
2345 address register *and* update that register. Probably
2346 we don't need to handle this at all. */
2347 if (GET_CODE (addr
) == PRE_INC
)
2348 return "ldw 12(%1),%R0\n\t{ldws|ldw},mb 8(%1),%0";
2349 return "ldw -4(%1),%R0\n\t{ldws|ldw},mb -8(%1),%0";
2352 else if (GET_CODE (addr
) == PLUS
2353 && GET_CODE (XEXP (addr
, 0)) == MULT
)
2356 rtx high_reg
= gen_rtx_SUBREG (SImode
, operands
[0], 0);
2358 if (!reg_overlap_mentioned_p (high_reg
, addr
))
2360 xoperands
[0] = high_reg
;
2361 xoperands
[1] = XEXP (addr
, 1);
2362 xoperands
[2] = XEXP (XEXP (addr
, 0), 0);
2363 xoperands
[3] = XEXP (XEXP (addr
, 0), 1);
2364 output_asm_insn ("{sh%O3addl %2,%1,%0|shladd,l %2,%O3,%1,%0}",
2366 return "ldw 4(%0),%R0\n\tldw 0(%0),%0";
2370 xoperands
[0] = high_reg
;
2371 xoperands
[1] = XEXP (addr
, 1);
2372 xoperands
[2] = XEXP (XEXP (addr
, 0), 0);
2373 xoperands
[3] = XEXP (XEXP (addr
, 0), 1);
2374 output_asm_insn ("{sh%O3addl %2,%1,%R0|shladd,l %2,%O3,%1,%R0}",
2376 return "ldw 0(%R0),%0\n\tldw 4(%R0),%R0";
2381 /* If an operand is an unoffsettable memory ref, find a register
2382 we can increment temporarily to make it refer to the second word. */
2384 if (optype0
== MEMOP
)
2385 addreg0
= find_addr_reg (XEXP (operands
[0], 0));
2387 if (optype1
== MEMOP
)
2388 addreg1
= find_addr_reg (XEXP (operands
[1], 0));
2390 /* Ok, we can do one word at a time.
2391 Normally we do the low-numbered word first.
2393 In either case, set up in LATEHALF the operands to use
2394 for the high-numbered word and in some cases alter the
2395 operands in OPERANDS to be suitable for the low-numbered word. */
2397 if (optype0
== REGOP
)
2398 latehalf
[0] = gen_rtx_REG (SImode
, REGNO (operands
[0]) + 1);
2399 else if (optype0
== OFFSOP
)
2400 latehalf
[0] = adjust_address (operands
[0], SImode
, 4);
2402 latehalf
[0] = operands
[0];
2404 if (optype1
== REGOP
)
2405 latehalf
[1] = gen_rtx_REG (SImode
, REGNO (operands
[1]) + 1);
2406 else if (optype1
== OFFSOP
)
2407 latehalf
[1] = adjust_address (operands
[1], SImode
, 4);
2408 else if (optype1
== CNSTOP
)
2409 split_double (operands
[1], &operands
[1], &latehalf
[1]);
2411 latehalf
[1] = operands
[1];
2413 /* If the first move would clobber the source of the second one,
2414 do them in the other order.
2416 This can happen in two cases:
2418 mem -> register where the first half of the destination register
2419 is the same register used in the memory's address. Reload
2420 can create such insns.
2422 mem in this case will be either register indirect or register
2423 indirect plus a valid offset.
2425 register -> register move where REGNO(dst) == REGNO(src + 1)
2426 someone (Tim/Tege?) claimed this can happen for parameter loads.
2428 Handle mem -> register case first. */
2429 if (optype0
== REGOP
2430 && (optype1
== MEMOP
|| optype1
== OFFSOP
)
2431 && refers_to_regno_p (REGNO (operands
[0]), REGNO (operands
[0]) + 1,
2434 /* Do the late half first. */
2436 output_asm_insn ("ldo 4(%0),%0", &addreg1
);
2437 output_asm_insn (singlemove_string (latehalf
), latehalf
);
2441 output_asm_insn ("ldo -4(%0),%0", &addreg1
);
2442 return singlemove_string (operands
);
2445 /* Now handle register -> register case. */
2446 if (optype0
== REGOP
&& optype1
== REGOP
2447 && REGNO (operands
[0]) == REGNO (operands
[1]) + 1)
2449 output_asm_insn (singlemove_string (latehalf
), latehalf
);
2450 return singlemove_string (operands
);
2453 /* Normal case: do the two words, low-numbered first. */
2455 output_asm_insn (singlemove_string (operands
), operands
);
2457 /* Make any unoffsettable addresses point at high-numbered word. */
2459 output_asm_insn ("ldo 4(%0),%0", &addreg0
);
2461 output_asm_insn ("ldo 4(%0),%0", &addreg1
);
2464 output_asm_insn (singlemove_string (latehalf
), latehalf
);
2466 /* Undo the adds we just did. */
2468 output_asm_insn ("ldo -4(%0),%0", &addreg0
);
2470 output_asm_insn ("ldo -4(%0),%0", &addreg1
);
2476 output_fp_move_double (rtx
*operands
)
2478 if (FP_REG_P (operands
[0]))
2480 if (FP_REG_P (operands
[1])
2481 || operands
[1] == CONST0_RTX (GET_MODE (operands
[0])))
2482 output_asm_insn ("fcpy,dbl %f1,%0", operands
);
2484 output_asm_insn ("fldd%F1 %1,%0", operands
);
2486 else if (FP_REG_P (operands
[1]))
2488 output_asm_insn ("fstd%F0 %1,%0", operands
);
2494 gcc_assert (operands
[1] == CONST0_RTX (GET_MODE (operands
[0])));
2496 /* This is a pain. You have to be prepared to deal with an
2497 arbitrary address here including pre/post increment/decrement.
2499 so avoid this in the MD. */
2500 gcc_assert (GET_CODE (operands
[0]) == REG
);
2502 xoperands
[1] = gen_rtx_REG (SImode
, REGNO (operands
[0]) + 1);
2503 xoperands
[0] = operands
[0];
2504 output_asm_insn ("copy %%r0,%0\n\tcopy %%r0,%1", xoperands
);
2509 /* Return a REG that occurs in ADDR with coefficient 1.
2510 ADDR can be effectively incremented by incrementing REG. */
2513 find_addr_reg (rtx addr
)
2515 while (GET_CODE (addr
) == PLUS
)
2517 if (GET_CODE (XEXP (addr
, 0)) == REG
)
2518 addr
= XEXP (addr
, 0);
2519 else if (GET_CODE (XEXP (addr
, 1)) == REG
)
2520 addr
= XEXP (addr
, 1);
2521 else if (CONSTANT_P (XEXP (addr
, 0)))
2522 addr
= XEXP (addr
, 1);
2523 else if (CONSTANT_P (XEXP (addr
, 1)))
2524 addr
= XEXP (addr
, 0);
2528 gcc_assert (GET_CODE (addr
) == REG
);
2532 /* Emit code to perform a block move.
2534 OPERANDS[0] is the destination pointer as a REG, clobbered.
2535 OPERANDS[1] is the source pointer as a REG, clobbered.
2536 OPERANDS[2] is a register for temporary storage.
2537 OPERANDS[3] is a register for temporary storage.
2538 OPERANDS[4] is the size as a CONST_INT
2539 OPERANDS[5] is the alignment safe to use, as a CONST_INT.
2540 OPERANDS[6] is another temporary register. */
2543 output_block_move (rtx
*operands
, int size_is_constant ATTRIBUTE_UNUSED
)
2545 HOST_WIDE_INT align
= INTVAL (operands
[5]);
2546 unsigned HOST_WIDE_INT n_bytes
= INTVAL (operands
[4]);
2548 /* We can't move more than a word at a time because the PA
2549 has no longer integer move insns. (Could use fp mem ops?) */
2550 if (align
> (TARGET_64BIT
? 8 : 4))
2551 align
= (TARGET_64BIT
? 8 : 4);
2553 /* Note that we know each loop below will execute at least twice
2554 (else we would have open-coded the copy). */
2558 /* Pre-adjust the loop counter. */
2559 operands
[4] = GEN_INT (n_bytes
- 16);
2560 output_asm_insn ("ldi %4,%2", operands
);
2563 output_asm_insn ("ldd,ma 8(%1),%3", operands
);
2564 output_asm_insn ("ldd,ma 8(%1),%6", operands
);
2565 output_asm_insn ("std,ma %3,8(%0)", operands
);
2566 output_asm_insn ("addib,>= -16,%2,.-12", operands
);
2567 output_asm_insn ("std,ma %6,8(%0)", operands
);
2569 /* Handle the residual. There could be up to 7 bytes of
2570 residual to copy! */
2571 if (n_bytes
% 16 != 0)
2573 operands
[4] = GEN_INT (n_bytes
% 8);
2574 if (n_bytes
% 16 >= 8)
2575 output_asm_insn ("ldd,ma 8(%1),%3", operands
);
2576 if (n_bytes
% 8 != 0)
2577 output_asm_insn ("ldd 0(%1),%6", operands
);
2578 if (n_bytes
% 16 >= 8)
2579 output_asm_insn ("std,ma %3,8(%0)", operands
);
2580 if (n_bytes
% 8 != 0)
2581 output_asm_insn ("stdby,e %6,%4(%0)", operands
);
2586 /* Pre-adjust the loop counter. */
2587 operands
[4] = GEN_INT (n_bytes
- 8);
2588 output_asm_insn ("ldi %4,%2", operands
);
2591 output_asm_insn ("{ldws|ldw},ma 4(%1),%3", operands
);
2592 output_asm_insn ("{ldws|ldw},ma 4(%1),%6", operands
);
2593 output_asm_insn ("{stws|stw},ma %3,4(%0)", operands
);
2594 output_asm_insn ("addib,>= -8,%2,.-12", operands
);
2595 output_asm_insn ("{stws|stw},ma %6,4(%0)", operands
);
2597 /* Handle the residual. There could be up to 7 bytes of
2598 residual to copy! */
2599 if (n_bytes
% 8 != 0)
2601 operands
[4] = GEN_INT (n_bytes
% 4);
2602 if (n_bytes
% 8 >= 4)
2603 output_asm_insn ("{ldws|ldw},ma 4(%1),%3", operands
);
2604 if (n_bytes
% 4 != 0)
2605 output_asm_insn ("ldw 0(%1),%6", operands
);
2606 if (n_bytes
% 8 >= 4)
2607 output_asm_insn ("{stws|stw},ma %3,4(%0)", operands
);
2608 if (n_bytes
% 4 != 0)
2609 output_asm_insn ("{stbys|stby},e %6,%4(%0)", operands
);
2614 /* Pre-adjust the loop counter. */
2615 operands
[4] = GEN_INT (n_bytes
- 4);
2616 output_asm_insn ("ldi %4,%2", operands
);
2619 output_asm_insn ("{ldhs|ldh},ma 2(%1),%3", operands
);
2620 output_asm_insn ("{ldhs|ldh},ma 2(%1),%6", operands
);
2621 output_asm_insn ("{sths|sth},ma %3,2(%0)", operands
);
2622 output_asm_insn ("addib,>= -4,%2,.-12", operands
);
2623 output_asm_insn ("{sths|sth},ma %6,2(%0)", operands
);
2625 /* Handle the residual. */
2626 if (n_bytes
% 4 != 0)
2628 if (n_bytes
% 4 >= 2)
2629 output_asm_insn ("{ldhs|ldh},ma 2(%1),%3", operands
);
2630 if (n_bytes
% 2 != 0)
2631 output_asm_insn ("ldb 0(%1),%6", operands
);
2632 if (n_bytes
% 4 >= 2)
2633 output_asm_insn ("{sths|sth},ma %3,2(%0)", operands
);
2634 if (n_bytes
% 2 != 0)
2635 output_asm_insn ("stb %6,0(%0)", operands
);
2640 /* Pre-adjust the loop counter. */
2641 operands
[4] = GEN_INT (n_bytes
- 2);
2642 output_asm_insn ("ldi %4,%2", operands
);
2645 output_asm_insn ("{ldbs|ldb},ma 1(%1),%3", operands
);
2646 output_asm_insn ("{ldbs|ldb},ma 1(%1),%6", operands
);
2647 output_asm_insn ("{stbs|stb},ma %3,1(%0)", operands
);
2648 output_asm_insn ("addib,>= -2,%2,.-12", operands
);
2649 output_asm_insn ("{stbs|stb},ma %6,1(%0)", operands
);
2651 /* Handle the residual. */
2652 if (n_bytes
% 2 != 0)
2654 output_asm_insn ("ldb 0(%1),%3", operands
);
2655 output_asm_insn ("stb %3,0(%0)", operands
);
2664 /* Count the number of insns necessary to handle this block move.
2666 Basic structure is the same as emit_block_move, except that we
2667 count insns rather than emit them. */
2670 compute_movmem_length (rtx insn
)
2672 rtx pat
= PATTERN (insn
);
2673 unsigned HOST_WIDE_INT align
= INTVAL (XEXP (XVECEXP (pat
, 0, 7), 0));
2674 unsigned HOST_WIDE_INT n_bytes
= INTVAL (XEXP (XVECEXP (pat
, 0, 6), 0));
2675 unsigned int n_insns
= 0;
2677 /* We can't move more than four bytes at a time because the PA
2678 has no longer integer move insns. (Could use fp mem ops?) */
2679 if (align
> (TARGET_64BIT
? 8 : 4))
2680 align
= (TARGET_64BIT
? 8 : 4);
2682 /* The basic copying loop. */
2686 if (n_bytes
% (2 * align
) != 0)
2688 if ((n_bytes
% (2 * align
)) >= align
)
2691 if ((n_bytes
% align
) != 0)
2695 /* Lengths are expressed in bytes now; each insn is 4 bytes. */
2699 /* Emit code to perform a block clear.
2701 OPERANDS[0] is the destination pointer as a REG, clobbered.
2702 OPERANDS[1] is a register for temporary storage.
2703 OPERANDS[2] is the size as a CONST_INT
2704 OPERANDS[3] is the alignment safe to use, as a CONST_INT. */
2707 output_block_clear (rtx
*operands
, int size_is_constant ATTRIBUTE_UNUSED
)
2709 HOST_WIDE_INT align
= INTVAL (operands
[3]);
2710 unsigned HOST_WIDE_INT n_bytes
= INTVAL (operands
[2]);
2712 /* We can't clear more than a word at a time because the PA
2713 has no longer integer move insns. */
2714 if (align
> (TARGET_64BIT
? 8 : 4))
2715 align
= (TARGET_64BIT
? 8 : 4);
2717 /* Note that we know each loop below will execute at least twice
2718 (else we would have open-coded the copy). */
2722 /* Pre-adjust the loop counter. */
2723 operands
[2] = GEN_INT (n_bytes
- 16);
2724 output_asm_insn ("ldi %2,%1", operands
);
2727 output_asm_insn ("std,ma %%r0,8(%0)", operands
);
2728 output_asm_insn ("addib,>= -16,%1,.-4", operands
);
2729 output_asm_insn ("std,ma %%r0,8(%0)", operands
);
2731 /* Handle the residual. There could be up to 7 bytes of
2732 residual to copy! */
2733 if (n_bytes
% 16 != 0)
2735 operands
[2] = GEN_INT (n_bytes
% 8);
2736 if (n_bytes
% 16 >= 8)
2737 output_asm_insn ("std,ma %%r0,8(%0)", operands
);
2738 if (n_bytes
% 8 != 0)
2739 output_asm_insn ("stdby,e %%r0,%2(%0)", operands
);
2744 /* Pre-adjust the loop counter. */
2745 operands
[2] = GEN_INT (n_bytes
- 8);
2746 output_asm_insn ("ldi %2,%1", operands
);
2749 output_asm_insn ("{stws|stw},ma %%r0,4(%0)", operands
);
2750 output_asm_insn ("addib,>= -8,%1,.-4", operands
);
2751 output_asm_insn ("{stws|stw},ma %%r0,4(%0)", operands
);
2753 /* Handle the residual. There could be up to 7 bytes of
2754 residual to copy! */
2755 if (n_bytes
% 8 != 0)
2757 operands
[2] = GEN_INT (n_bytes
% 4);
2758 if (n_bytes
% 8 >= 4)
2759 output_asm_insn ("{stws|stw},ma %%r0,4(%0)", operands
);
2760 if (n_bytes
% 4 != 0)
2761 output_asm_insn ("{stbys|stby},e %%r0,%2(%0)", operands
);
2766 /* Pre-adjust the loop counter. */
2767 operands
[2] = GEN_INT (n_bytes
- 4);
2768 output_asm_insn ("ldi %2,%1", operands
);
2771 output_asm_insn ("{sths|sth},ma %%r0,2(%0)", operands
);
2772 output_asm_insn ("addib,>= -4,%1,.-4", operands
);
2773 output_asm_insn ("{sths|sth},ma %%r0,2(%0)", operands
);
2775 /* Handle the residual. */
2776 if (n_bytes
% 4 != 0)
2778 if (n_bytes
% 4 >= 2)
2779 output_asm_insn ("{sths|sth},ma %%r0,2(%0)", operands
);
2780 if (n_bytes
% 2 != 0)
2781 output_asm_insn ("stb %%r0,0(%0)", operands
);
2786 /* Pre-adjust the loop counter. */
2787 operands
[2] = GEN_INT (n_bytes
- 2);
2788 output_asm_insn ("ldi %2,%1", operands
);
2791 output_asm_insn ("{stbs|stb},ma %%r0,1(%0)", operands
);
2792 output_asm_insn ("addib,>= -2,%1,.-4", operands
);
2793 output_asm_insn ("{stbs|stb},ma %%r0,1(%0)", operands
);
2795 /* Handle the residual. */
2796 if (n_bytes
% 2 != 0)
2797 output_asm_insn ("stb %%r0,0(%0)", operands
);
2806 /* Count the number of insns necessary to handle this block move.
2808 Basic structure is the same as emit_block_move, except that we
2809 count insns rather than emit them. */
2812 compute_clrmem_length (rtx insn
)
2814 rtx pat
= PATTERN (insn
);
2815 unsigned HOST_WIDE_INT align
= INTVAL (XEXP (XVECEXP (pat
, 0, 4), 0));
2816 unsigned HOST_WIDE_INT n_bytes
= INTVAL (XEXP (XVECEXP (pat
, 0, 3), 0));
2817 unsigned int n_insns
= 0;
2819 /* We can't clear more than a word at a time because the PA
2820 has no longer integer move insns. */
2821 if (align
> (TARGET_64BIT
? 8 : 4))
2822 align
= (TARGET_64BIT
? 8 : 4);
2824 /* The basic loop. */
2828 if (n_bytes
% (2 * align
) != 0)
2830 if ((n_bytes
% (2 * align
)) >= align
)
2833 if ((n_bytes
% align
) != 0)
2837 /* Lengths are expressed in bytes now; each insn is 4 bytes. */
2843 output_and (rtx
*operands
)
2845 if (GET_CODE (operands
[2]) == CONST_INT
&& INTVAL (operands
[2]) != 0)
2847 unsigned HOST_WIDE_INT mask
= INTVAL (operands
[2]);
2848 int ls0
, ls1
, ms0
, p
, len
;
2850 for (ls0
= 0; ls0
< 32; ls0
++)
2851 if ((mask
& (1 << ls0
)) == 0)
2854 for (ls1
= ls0
; ls1
< 32; ls1
++)
2855 if ((mask
& (1 << ls1
)) != 0)
2858 for (ms0
= ls1
; ms0
< 32; ms0
++)
2859 if ((mask
& (1 << ms0
)) == 0)
2862 gcc_assert (ms0
== 32);
2870 operands
[2] = GEN_INT (len
);
2871 return "{extru|extrw,u} %1,31,%2,%0";
2875 /* We could use this `depi' for the case above as well, but `depi'
2876 requires one more register file access than an `extru'. */
2881 operands
[2] = GEN_INT (p
);
2882 operands
[3] = GEN_INT (len
);
2883 return "{depi|depwi} 0,%2,%3,%0";
2887 return "and %1,%2,%0";
2890 /* Return a string to perform a bitwise-and of operands[1] with operands[2]
2891 storing the result in operands[0]. */
2893 output_64bit_and (rtx
*operands
)
2895 if (GET_CODE (operands
[2]) == CONST_INT
&& INTVAL (operands
[2]) != 0)
2897 unsigned HOST_WIDE_INT mask
= INTVAL (operands
[2]);
2898 int ls0
, ls1
, ms0
, p
, len
;
2900 for (ls0
= 0; ls0
< HOST_BITS_PER_WIDE_INT
; ls0
++)
2901 if ((mask
& ((unsigned HOST_WIDE_INT
) 1 << ls0
)) == 0)
2904 for (ls1
= ls0
; ls1
< HOST_BITS_PER_WIDE_INT
; ls1
++)
2905 if ((mask
& ((unsigned HOST_WIDE_INT
) 1 << ls1
)) != 0)
2908 for (ms0
= ls1
; ms0
< HOST_BITS_PER_WIDE_INT
; ms0
++)
2909 if ((mask
& ((unsigned HOST_WIDE_INT
) 1 << ms0
)) == 0)
2912 gcc_assert (ms0
== HOST_BITS_PER_WIDE_INT
);
2914 if (ls1
== HOST_BITS_PER_WIDE_INT
)
2920 operands
[2] = GEN_INT (len
);
2921 return "extrd,u %1,63,%2,%0";
2925 /* We could use this `depi' for the case above as well, but `depi'
2926 requires one more register file access than an `extru'. */
2931 operands
[2] = GEN_INT (p
);
2932 operands
[3] = GEN_INT (len
);
2933 return "depdi 0,%2,%3,%0";
2937 return "and %1,%2,%0";
2941 output_ior (rtx
*operands
)
2943 unsigned HOST_WIDE_INT mask
= INTVAL (operands
[2]);
2944 int bs0
, bs1
, p
, len
;
2946 if (INTVAL (operands
[2]) == 0)
2947 return "copy %1,%0";
2949 for (bs0
= 0; bs0
< 32; bs0
++)
2950 if ((mask
& (1 << bs0
)) != 0)
2953 for (bs1
= bs0
; bs1
< 32; bs1
++)
2954 if ((mask
& (1 << bs1
)) == 0)
2957 gcc_assert (bs1
== 32 || ((unsigned HOST_WIDE_INT
) 1 << bs1
) > mask
);
2962 operands
[2] = GEN_INT (p
);
2963 operands
[3] = GEN_INT (len
);
2964 return "{depi|depwi} -1,%2,%3,%0";
2967 /* Return a string to perform a bitwise-and of operands[1] with operands[2]
2968 storing the result in operands[0]. */
2970 output_64bit_ior (rtx
*operands
)
2972 unsigned HOST_WIDE_INT mask
= INTVAL (operands
[2]);
2973 int bs0
, bs1
, p
, len
;
2975 if (INTVAL (operands
[2]) == 0)
2976 return "copy %1,%0";
2978 for (bs0
= 0; bs0
< HOST_BITS_PER_WIDE_INT
; bs0
++)
2979 if ((mask
& ((unsigned HOST_WIDE_INT
) 1 << bs0
)) != 0)
2982 for (bs1
= bs0
; bs1
< HOST_BITS_PER_WIDE_INT
; bs1
++)
2983 if ((mask
& ((unsigned HOST_WIDE_INT
) 1 << bs1
)) == 0)
2986 gcc_assert (bs1
== HOST_BITS_PER_WIDE_INT
2987 || ((unsigned HOST_WIDE_INT
) 1 << bs1
) > mask
);
2992 operands
[2] = GEN_INT (p
);
2993 operands
[3] = GEN_INT (len
);
2994 return "depdi -1,%2,%3,%0";
2997 /* Target hook for assembling integer objects. This code handles
2998 aligned SI and DI integers specially since function references
2999 must be preceded by P%. */
3002 pa_assemble_integer (rtx x
, unsigned int size
, int aligned_p
)
3004 if (size
== UNITS_PER_WORD
3006 && function_label_operand (x
, VOIDmode
))
3008 fputs (size
== 8? "\t.dword\tP%" : "\t.word\tP%", asm_out_file
);
3009 output_addr_const (asm_out_file
, x
);
3010 fputc ('\n', asm_out_file
);
3013 return default_assemble_integer (x
, size
, aligned_p
);
3016 /* Output an ascii string. */
3018 output_ascii (FILE *file
, const char *p
, int size
)
3022 unsigned char partial_output
[16]; /* Max space 4 chars can occupy. */
3024 /* The HP assembler can only take strings of 256 characters at one
3025 time. This is a limitation on input line length, *not* the
3026 length of the string. Sigh. Even worse, it seems that the
3027 restriction is in number of input characters (see \xnn &
3028 \whatever). So we have to do this very carefully. */
3030 fputs ("\t.STRING \"", file
);
3033 for (i
= 0; i
< size
; i
+= 4)
3037 for (io
= 0, co
= 0; io
< MIN (4, size
- i
); io
++)
3039 register unsigned int c
= (unsigned char) p
[i
+ io
];
3041 if (c
== '\"' || c
== '\\')
3042 partial_output
[co
++] = '\\';
3043 if (c
>= ' ' && c
< 0177)
3044 partial_output
[co
++] = c
;
3048 partial_output
[co
++] = '\\';
3049 partial_output
[co
++] = 'x';
3050 hexd
= c
/ 16 - 0 + '0';
3052 hexd
-= '9' - 'a' + 1;
3053 partial_output
[co
++] = hexd
;
3054 hexd
= c
% 16 - 0 + '0';
3056 hexd
-= '9' - 'a' + 1;
3057 partial_output
[co
++] = hexd
;
3060 if (chars_output
+ co
> 243)
3062 fputs ("\"\n\t.STRING \"", file
);
3065 fwrite (partial_output
, 1, (size_t) co
, file
);
3069 fputs ("\"\n", file
);
3072 /* Try to rewrite floating point comparisons & branches to avoid
3073 useless add,tr insns.
3075 CHECK_NOTES is nonzero if we should examine REG_DEAD notes
3076 to see if FPCC is dead. CHECK_NOTES is nonzero for the
3077 first attempt to remove useless add,tr insns. It is zero
3078 for the second pass as reorg sometimes leaves bogus REG_DEAD
3081 When CHECK_NOTES is zero we can only eliminate add,tr insns
3082 when there's a 1:1 correspondence between fcmp and ftest/fbranch
3085 remove_useless_addtr_insns (int check_notes
)
3088 static int pass
= 0;
3090 /* This is fairly cheap, so always run it when optimizing. */
3094 int fbranch_count
= 0;
3096 /* Walk all the insns in this function looking for fcmp & fbranch
3097 instructions. Keep track of how many of each we find. */
3098 for (insn
= get_insns (); insn
; insn
= next_insn (insn
))
3102 /* Ignore anything that isn't an INSN or a JUMP_INSN. */
3103 if (GET_CODE (insn
) != INSN
&& GET_CODE (insn
) != JUMP_INSN
)
3106 tmp
= PATTERN (insn
);
3108 /* It must be a set. */
3109 if (GET_CODE (tmp
) != SET
)
3112 /* If the destination is CCFP, then we've found an fcmp insn. */
3113 tmp
= SET_DEST (tmp
);
3114 if (GET_CODE (tmp
) == REG
&& REGNO (tmp
) == 0)
3120 tmp
= PATTERN (insn
);
3121 /* If this is an fbranch instruction, bump the fbranch counter. */
3122 if (GET_CODE (tmp
) == SET
3123 && SET_DEST (tmp
) == pc_rtx
3124 && GET_CODE (SET_SRC (tmp
)) == IF_THEN_ELSE
3125 && GET_CODE (XEXP (SET_SRC (tmp
), 0)) == NE
3126 && GET_CODE (XEXP (XEXP (SET_SRC (tmp
), 0), 0)) == REG
3127 && REGNO (XEXP (XEXP (SET_SRC (tmp
), 0), 0)) == 0)
3135 /* Find all floating point compare + branch insns. If possible,
3136 reverse the comparison & the branch to avoid add,tr insns. */
3137 for (insn
= get_insns (); insn
; insn
= next_insn (insn
))
3141 /* Ignore anything that isn't an INSN. */
3142 if (GET_CODE (insn
) != INSN
)
3145 tmp
= PATTERN (insn
);
3147 /* It must be a set. */
3148 if (GET_CODE (tmp
) != SET
)
3151 /* The destination must be CCFP, which is register zero. */
3152 tmp
= SET_DEST (tmp
);
3153 if (GET_CODE (tmp
) != REG
|| REGNO (tmp
) != 0)
3156 /* INSN should be a set of CCFP.
3158 See if the result of this insn is used in a reversed FP
3159 conditional branch. If so, reverse our condition and
3160 the branch. Doing so avoids useless add,tr insns. */
3161 next
= next_insn (insn
);
3164 /* Jumps, calls and labels stop our search. */
3165 if (GET_CODE (next
) == JUMP_INSN
3166 || GET_CODE (next
) == CALL_INSN
3167 || GET_CODE (next
) == CODE_LABEL
)
3170 /* As does another fcmp insn. */
3171 if (GET_CODE (next
) == INSN
3172 && GET_CODE (PATTERN (next
)) == SET
3173 && GET_CODE (SET_DEST (PATTERN (next
))) == REG
3174 && REGNO (SET_DEST (PATTERN (next
))) == 0)
3177 next
= next_insn (next
);
3180 /* Is NEXT_INSN a branch? */
3182 && GET_CODE (next
) == JUMP_INSN
)
3184 rtx pattern
= PATTERN (next
);
3186 /* If it a reversed fp conditional branch (e.g. uses add,tr)
3187 and CCFP dies, then reverse our conditional and the branch
3188 to avoid the add,tr. */
3189 if (GET_CODE (pattern
) == SET
3190 && SET_DEST (pattern
) == pc_rtx
3191 && GET_CODE (SET_SRC (pattern
)) == IF_THEN_ELSE
3192 && GET_CODE (XEXP (SET_SRC (pattern
), 0)) == NE
3193 && GET_CODE (XEXP (XEXP (SET_SRC (pattern
), 0), 0)) == REG
3194 && REGNO (XEXP (XEXP (SET_SRC (pattern
), 0), 0)) == 0
3195 && GET_CODE (XEXP (SET_SRC (pattern
), 1)) == PC
3196 && (fcmp_count
== fbranch_count
3198 && find_regno_note (next
, REG_DEAD
, 0))))
3200 /* Reverse the branch. */
3201 tmp
= XEXP (SET_SRC (pattern
), 1);
3202 XEXP (SET_SRC (pattern
), 1) = XEXP (SET_SRC (pattern
), 2);
3203 XEXP (SET_SRC (pattern
), 2) = tmp
;
3204 INSN_CODE (next
) = -1;
3206 /* Reverse our condition. */
3207 tmp
= PATTERN (insn
);
3208 PUT_CODE (XEXP (tmp
, 1),
3209 (reverse_condition_maybe_unordered
3210 (GET_CODE (XEXP (tmp
, 1)))));
3220 /* You may have trouble believing this, but this is the 32 bit HP-PA
3225 Variable arguments (optional; any number may be allocated)
3227 SP-(4*(N+9)) arg word N
3232 Fixed arguments (must be allocated; may remain unused)
3241 SP-32 External Data Pointer (DP)
3243 SP-24 External/stub RP (RP')
3247 SP-8 Calling Stub RP (RP'')
3252 SP-0 Stack Pointer (points to next available address)
3256 /* This function saves registers as follows. Registers marked with ' are
3257 this function's registers (as opposed to the previous function's).
3258 If a frame_pointer isn't needed, r4 is saved as a general register;
3259 the space for the frame pointer is still allocated, though, to keep
3265 SP (FP') Previous FP
3266 SP + 4 Alignment filler (sigh)
3267 SP + 8 Space for locals reserved here.
3271 SP + n All call saved register used.
3275 SP + o All call saved fp registers used.
3279 SP + p (SP') points to next available address.
3283 /* Global variables set by output_function_prologue(). */
3284 /* Size of frame. Need to know this to emit return insns from
3286 static HOST_WIDE_INT actual_fsize
, local_fsize
;
3287 static int save_fregs
;
3289 /* Emit RTL to store REG at the memory location specified by BASE+DISP.
3290 Handle case where DISP > 8k by using the add_high_const patterns.
3292 Note in DISP > 8k case, we will leave the high part of the address
3293 in %r1. There is code in expand_hppa_{prologue,epilogue} that knows this.*/
3296 store_reg (int reg
, HOST_WIDE_INT disp
, int base
)
3298 rtx insn
, dest
, src
, basereg
;
3300 src
= gen_rtx_REG (word_mode
, reg
);
3301 basereg
= gen_rtx_REG (Pmode
, base
);
3302 if (VAL_14_BITS_P (disp
))
3304 dest
= gen_rtx_MEM (word_mode
, plus_constant (basereg
, disp
));
3305 insn
= emit_move_insn (dest
, src
);
3307 else if (TARGET_64BIT
&& !VAL_32_BITS_P (disp
))
3309 rtx delta
= GEN_INT (disp
);
3310 rtx tmpreg
= gen_rtx_REG (Pmode
, 1);
3312 emit_move_insn (tmpreg
, delta
);
3313 insn
= emit_move_insn (tmpreg
, gen_rtx_PLUS (Pmode
, tmpreg
, basereg
));
3317 = gen_rtx_EXPR_LIST (REG_FRAME_RELATED_EXPR
,
3318 gen_rtx_SET (VOIDmode
, tmpreg
,
3319 gen_rtx_PLUS (Pmode
, basereg
, delta
)),
3321 RTX_FRAME_RELATED_P (insn
) = 1;
3323 dest
= gen_rtx_MEM (word_mode
, tmpreg
);
3324 insn
= emit_move_insn (dest
, src
);
3328 rtx delta
= GEN_INT (disp
);
3329 rtx high
= gen_rtx_PLUS (Pmode
, basereg
, gen_rtx_HIGH (Pmode
, delta
));
3330 rtx tmpreg
= gen_rtx_REG (Pmode
, 1);
3332 emit_move_insn (tmpreg
, high
);
3333 dest
= gen_rtx_MEM (word_mode
, gen_rtx_LO_SUM (Pmode
, tmpreg
, delta
));
3334 insn
= emit_move_insn (dest
, src
);
3338 = gen_rtx_EXPR_LIST (REG_FRAME_RELATED_EXPR
,
3339 gen_rtx_SET (VOIDmode
,
3340 gen_rtx_MEM (word_mode
,
3341 gen_rtx_PLUS (word_mode
, basereg
,
3349 RTX_FRAME_RELATED_P (insn
) = 1;
3352 /* Emit RTL to store REG at the memory location specified by BASE and then
3353 add MOD to BASE. MOD must be <= 8k. */
3356 store_reg_modify (int base
, int reg
, HOST_WIDE_INT mod
)
3358 rtx insn
, basereg
, srcreg
, delta
;
3360 gcc_assert (VAL_14_BITS_P (mod
));
3362 basereg
= gen_rtx_REG (Pmode
, base
);
3363 srcreg
= gen_rtx_REG (word_mode
, reg
);
3364 delta
= GEN_INT (mod
);
3366 insn
= emit_insn (gen_post_store (basereg
, srcreg
, delta
));
3369 RTX_FRAME_RELATED_P (insn
) = 1;
3371 /* RTX_FRAME_RELATED_P must be set on each frame related set
3372 in a parallel with more than one element. */
3373 RTX_FRAME_RELATED_P (XVECEXP (PATTERN (insn
), 0, 0)) = 1;
3374 RTX_FRAME_RELATED_P (XVECEXP (PATTERN (insn
), 0, 1)) = 1;
3378 /* Emit RTL to set REG to the value specified by BASE+DISP. Handle case
3379 where DISP > 8k by using the add_high_const patterns. NOTE indicates
3380 whether to add a frame note or not.
3382 In the DISP > 8k case, we leave the high part of the address in %r1.
3383 There is code in expand_hppa_{prologue,epilogue} that knows about this. */
3386 set_reg_plus_d (int reg
, int base
, HOST_WIDE_INT disp
, int note
)
3390 if (VAL_14_BITS_P (disp
))
3392 insn
= emit_move_insn (gen_rtx_REG (Pmode
, reg
),
3393 plus_constant (gen_rtx_REG (Pmode
, base
), disp
));
3395 else if (TARGET_64BIT
&& !VAL_32_BITS_P (disp
))
3397 rtx basereg
= gen_rtx_REG (Pmode
, base
);
3398 rtx delta
= GEN_INT (disp
);
3399 rtx tmpreg
= gen_rtx_REG (Pmode
, 1);
3401 emit_move_insn (tmpreg
, delta
);
3402 insn
= emit_move_insn (gen_rtx_REG (Pmode
, reg
),
3403 gen_rtx_PLUS (Pmode
, tmpreg
, basereg
));
3406 = gen_rtx_EXPR_LIST (REG_FRAME_RELATED_EXPR
,
3407 gen_rtx_SET (VOIDmode
, tmpreg
,
3408 gen_rtx_PLUS (Pmode
, basereg
, delta
)),
3413 rtx basereg
= gen_rtx_REG (Pmode
, base
);
3414 rtx delta
= GEN_INT (disp
);
3415 rtx tmpreg
= gen_rtx_REG (Pmode
, 1);
3417 emit_move_insn (tmpreg
,
3418 gen_rtx_PLUS (Pmode
, basereg
,
3419 gen_rtx_HIGH (Pmode
, delta
)));
3420 insn
= emit_move_insn (gen_rtx_REG (Pmode
, reg
),
3421 gen_rtx_LO_SUM (Pmode
, tmpreg
, delta
));
3424 if (DO_FRAME_NOTES
&& note
)
3425 RTX_FRAME_RELATED_P (insn
) = 1;
3429 compute_frame_size (HOST_WIDE_INT size
, int *fregs_live
)
3434 /* The code in hppa_expand_prologue and hppa_expand_epilogue must
3435 be consistent with the rounding and size calculation done here.
3436 Change them at the same time. */
3438 /* We do our own stack alignment. First, round the size of the
3439 stack locals up to a word boundary. */
3440 size
= (size
+ UNITS_PER_WORD
- 1) & ~(UNITS_PER_WORD
- 1);
3442 /* Space for previous frame pointer + filler. If any frame is
3443 allocated, we need to add in the STARTING_FRAME_OFFSET. We
3444 waste some space here for the sake of HP compatibility. The
3445 first slot is only used when the frame pointer is needed. */
3446 if (size
|| frame_pointer_needed
)
3447 size
+= STARTING_FRAME_OFFSET
;
3449 /* If the current function calls __builtin_eh_return, then we need
3450 to allocate stack space for registers that will hold data for
3451 the exception handler. */
3452 if (DO_FRAME_NOTES
&& current_function_calls_eh_return
)
3456 for (i
= 0; EH_RETURN_DATA_REGNO (i
) != INVALID_REGNUM
; ++i
)
3458 size
+= i
* UNITS_PER_WORD
;
3461 /* Account for space used by the callee general register saves. */
3462 for (i
= 18, j
= frame_pointer_needed
? 4 : 3; i
>= j
; i
--)
3463 if (regs_ever_live
[i
])
3464 size
+= UNITS_PER_WORD
;
3466 /* Account for space used by the callee floating point register saves. */
3467 for (i
= FP_SAVED_REG_LAST
; i
>= FP_SAVED_REG_FIRST
; i
-= FP_REG_STEP
)
3468 if (regs_ever_live
[i
]
3469 || (!TARGET_64BIT
&& regs_ever_live
[i
+ 1]))
3473 /* We always save both halves of the FP register, so always
3474 increment the frame size by 8 bytes. */
3478 /* If any of the floating registers are saved, account for the
3479 alignment needed for the floating point register save block. */
3482 size
= (size
+ 7) & ~7;
3487 /* The various ABIs include space for the outgoing parameters in the
3488 size of the current function's stack frame. We don't need to align
3489 for the outgoing arguments as their alignment is set by the final
3490 rounding for the frame as a whole. */
3491 size
+= current_function_outgoing_args_size
;
3493 /* Allocate space for the fixed frame marker. This space must be
3494 allocated for any function that makes calls or allocates
3496 if (!current_function_is_leaf
|| size
)
3497 size
+= TARGET_64BIT
? 48 : 32;
3499 /* Finally, round to the preferred stack boundary. */
3500 return ((size
+ PREFERRED_STACK_BOUNDARY
/ BITS_PER_UNIT
- 1)
3501 & ~(PREFERRED_STACK_BOUNDARY
/ BITS_PER_UNIT
- 1));
3504 /* Generate the assembly code for function entry. FILE is a stdio
3505 stream to output the code to. SIZE is an int: how many units of
3506 temporary storage to allocate.
3508 Refer to the array `regs_ever_live' to determine which registers to
3509 save; `regs_ever_live[I]' is nonzero if register number I is ever
3510 used in the function. This function is responsible for knowing
3511 which registers should not be saved even if used. */
3513 /* On HP-PA, move-double insns between fpu and cpu need an 8-byte block
3514 of memory. If any fpu reg is used in the function, we allocate
3515 such a block here, at the bottom of the frame, just in case it's needed.
3517 If this function is a leaf procedure, then we may choose not
3518 to do a "save" insn. The decision about whether or not
3519 to do this is made in regclass.c. */
3522 pa_output_function_prologue (FILE *file
, HOST_WIDE_INT size ATTRIBUTE_UNUSED
)
3524 /* The function's label and associated .PROC must never be
3525 separated and must be output *after* any profiling declarations
3526 to avoid changing spaces/subspaces within a procedure. */
3527 ASM_OUTPUT_LABEL (file
, XSTR (XEXP (DECL_RTL (current_function_decl
), 0), 0));
3528 fputs ("\t.PROC\n", file
);
3530 /* hppa_expand_prologue does the dirty work now. We just need
3531 to output the assembler directives which denote the start
3533 fprintf (file
, "\t.CALLINFO FRAME=" HOST_WIDE_INT_PRINT_DEC
, actual_fsize
);
3534 if (regs_ever_live
[2])
3535 fputs (",CALLS,SAVE_RP", file
);
3537 fputs (",NO_CALLS", file
);
3539 /* The SAVE_SP flag is used to indicate that register %r3 is stored
3540 at the beginning of the frame and that it is used as the frame
3541 pointer for the frame. We do this because our current frame
3542 layout doesn't conform to that specified in the HP runtime
3543 documentation and we need a way to indicate to programs such as
3544 GDB where %r3 is saved. The SAVE_SP flag was chosen because it
3545 isn't used by HP compilers but is supported by the assembler.
3546 However, SAVE_SP is supposed to indicate that the previous stack
3547 pointer has been saved in the frame marker. */
3548 if (frame_pointer_needed
)
3549 fputs (",SAVE_SP", file
);
3551 /* Pass on information about the number of callee register saves
3552 performed in the prologue.
3554 The compiler is supposed to pass the highest register number
3555 saved, the assembler then has to adjust that number before
3556 entering it into the unwind descriptor (to account for any
3557 caller saved registers with lower register numbers than the
3558 first callee saved register). */
3560 fprintf (file
, ",ENTRY_GR=%d", gr_saved
+ 2);
3563 fprintf (file
, ",ENTRY_FR=%d", fr_saved
+ 11);
3565 fputs ("\n\t.ENTRY\n", file
);
3567 remove_useless_addtr_insns (0);
3571 hppa_expand_prologue (void)
3573 int merge_sp_adjust_with_store
= 0;
3574 HOST_WIDE_INT size
= get_frame_size ();
3575 HOST_WIDE_INT offset
;
3583 /* Compute total size for frame pointer, filler, locals and rounding to
3584 the next word boundary. Similar code appears in compute_frame_size
3585 and must be changed in tandem with this code. */
3586 local_fsize
= (size
+ UNITS_PER_WORD
- 1) & ~(UNITS_PER_WORD
- 1);
3587 if (local_fsize
|| frame_pointer_needed
)
3588 local_fsize
+= STARTING_FRAME_OFFSET
;
3590 actual_fsize
= compute_frame_size (size
, &save_fregs
);
3592 /* Compute a few things we will use often. */
3593 tmpreg
= gen_rtx_REG (word_mode
, 1);
3595 /* Save RP first. The calling conventions manual states RP will
3596 always be stored into the caller's frame at sp - 20 or sp - 16
3597 depending on which ABI is in use. */
3598 if (regs_ever_live
[2] || current_function_calls_eh_return
)
3599 store_reg (2, TARGET_64BIT
? -16 : -20, STACK_POINTER_REGNUM
);
3601 /* Allocate the local frame and set up the frame pointer if needed. */
3602 if (actual_fsize
!= 0)
3604 if (frame_pointer_needed
)
3606 /* Copy the old frame pointer temporarily into %r1. Set up the
3607 new stack pointer, then store away the saved old frame pointer
3608 into the stack at sp and at the same time update the stack
3609 pointer by actual_fsize bytes. Two versions, first
3610 handles small (<8k) frames. The second handles large (>=8k)
3612 insn
= emit_move_insn (tmpreg
, frame_pointer_rtx
);
3614 RTX_FRAME_RELATED_P (insn
) = 1;
3616 insn
= emit_move_insn (frame_pointer_rtx
, stack_pointer_rtx
);
3618 RTX_FRAME_RELATED_P (insn
) = 1;
3620 if (VAL_14_BITS_P (actual_fsize
))
3621 store_reg_modify (STACK_POINTER_REGNUM
, 1, actual_fsize
);
3624 /* It is incorrect to store the saved frame pointer at *sp,
3625 then increment sp (writes beyond the current stack boundary).
3627 So instead use stwm to store at *sp and post-increment the
3628 stack pointer as an atomic operation. Then increment sp to
3629 finish allocating the new frame. */
3630 HOST_WIDE_INT adjust1
= 8192 - 64;
3631 HOST_WIDE_INT adjust2
= actual_fsize
- adjust1
;
3633 store_reg_modify (STACK_POINTER_REGNUM
, 1, adjust1
);
3634 set_reg_plus_d (STACK_POINTER_REGNUM
, STACK_POINTER_REGNUM
,
3638 /* We set SAVE_SP in frames that need a frame pointer. Thus,
3639 we need to store the previous stack pointer (frame pointer)
3640 into the frame marker on targets that use the HP unwind
3641 library. This allows the HP unwind library to be used to
3642 unwind GCC frames. However, we are not fully compatible
3643 with the HP library because our frame layout differs from
3644 that specified in the HP runtime specification.
3646 We don't want a frame note on this instruction as the frame
3647 marker moves during dynamic stack allocation.
3649 This instruction also serves as a blockage to prevent
3650 register spills from being scheduled before the stack
3651 pointer is raised. This is necessary as we store
3652 registers using the frame pointer as a base register,
3653 and the frame pointer is set before sp is raised. */
3654 if (TARGET_HPUX_UNWIND_LIBRARY
)
3656 rtx addr
= gen_rtx_PLUS (word_mode
, stack_pointer_rtx
,
3657 GEN_INT (TARGET_64BIT
? -8 : -4));
3659 emit_move_insn (gen_rtx_MEM (word_mode
, addr
),
3663 emit_insn (gen_blockage ());
3665 /* no frame pointer needed. */
3668 /* In some cases we can perform the first callee register save
3669 and allocating the stack frame at the same time. If so, just
3670 make a note of it and defer allocating the frame until saving
3671 the callee registers. */
3672 if (VAL_14_BITS_P (actual_fsize
) && local_fsize
== 0)
3673 merge_sp_adjust_with_store
= 1;
3674 /* Can not optimize. Adjust the stack frame by actual_fsize
3677 set_reg_plus_d (STACK_POINTER_REGNUM
, STACK_POINTER_REGNUM
,
3682 /* Normal register save.
3684 Do not save the frame pointer in the frame_pointer_needed case. It
3685 was done earlier. */
3686 if (frame_pointer_needed
)
3688 offset
= local_fsize
;
3690 /* Saving the EH return data registers in the frame is the simplest
3691 way to get the frame unwind information emitted. We put them
3692 just before the general registers. */
3693 if (DO_FRAME_NOTES
&& current_function_calls_eh_return
)
3695 unsigned int i
, regno
;
3699 regno
= EH_RETURN_DATA_REGNO (i
);
3700 if (regno
== INVALID_REGNUM
)
3703 store_reg (regno
, offset
, FRAME_POINTER_REGNUM
);
3704 offset
+= UNITS_PER_WORD
;
3708 for (i
= 18; i
>= 4; i
--)
3709 if (regs_ever_live
[i
] && ! call_used_regs
[i
])
3711 store_reg (i
, offset
, FRAME_POINTER_REGNUM
);
3712 offset
+= UNITS_PER_WORD
;
3715 /* Account for %r3 which is saved in a special place. */
3718 /* No frame pointer needed. */
3721 offset
= local_fsize
- actual_fsize
;
3723 /* Saving the EH return data registers in the frame is the simplest
3724 way to get the frame unwind information emitted. */
3725 if (DO_FRAME_NOTES
&& current_function_calls_eh_return
)
3727 unsigned int i
, regno
;
3731 regno
= EH_RETURN_DATA_REGNO (i
);
3732 if (regno
== INVALID_REGNUM
)
3735 /* If merge_sp_adjust_with_store is nonzero, then we can
3736 optimize the first save. */
3737 if (merge_sp_adjust_with_store
)
3739 store_reg_modify (STACK_POINTER_REGNUM
, regno
, -offset
);
3740 merge_sp_adjust_with_store
= 0;
3743 store_reg (regno
, offset
, STACK_POINTER_REGNUM
);
3744 offset
+= UNITS_PER_WORD
;
3748 for (i
= 18; i
>= 3; i
--)
3749 if (regs_ever_live
[i
] && ! call_used_regs
[i
])
3751 /* If merge_sp_adjust_with_store is nonzero, then we can
3752 optimize the first GR save. */
3753 if (merge_sp_adjust_with_store
)
3755 store_reg_modify (STACK_POINTER_REGNUM
, i
, -offset
);
3756 merge_sp_adjust_with_store
= 0;
3759 store_reg (i
, offset
, STACK_POINTER_REGNUM
);
3760 offset
+= UNITS_PER_WORD
;
3764 /* If we wanted to merge the SP adjustment with a GR save, but we never
3765 did any GR saves, then just emit the adjustment here. */
3766 if (merge_sp_adjust_with_store
)
3767 set_reg_plus_d (STACK_POINTER_REGNUM
, STACK_POINTER_REGNUM
,
3771 /* The hppa calling conventions say that %r19, the pic offset
3772 register, is saved at sp - 32 (in this function's frame)
3773 when generating PIC code. FIXME: What is the correct thing
3774 to do for functions which make no calls and allocate no
3775 frame? Do we need to allocate a frame, or can we just omit
3776 the save? For now we'll just omit the save.
3778 We don't want a note on this insn as the frame marker can
3779 move if there is a dynamic stack allocation. */
3780 if (flag_pic
&& actual_fsize
!= 0 && !TARGET_64BIT
)
3782 rtx addr
= gen_rtx_PLUS (word_mode
, stack_pointer_rtx
, GEN_INT (-32));
3784 emit_move_insn (gen_rtx_MEM (word_mode
, addr
), pic_offset_table_rtx
);
3788 /* Align pointer properly (doubleword boundary). */
3789 offset
= (offset
+ 7) & ~7;
3791 /* Floating point register store. */
3796 /* First get the frame or stack pointer to the start of the FP register
3798 if (frame_pointer_needed
)
3800 set_reg_plus_d (1, FRAME_POINTER_REGNUM
, offset
, 0);
3801 base
= frame_pointer_rtx
;
3805 set_reg_plus_d (1, STACK_POINTER_REGNUM
, offset
, 0);
3806 base
= stack_pointer_rtx
;
3809 /* Now actually save the FP registers. */
3810 for (i
= FP_SAVED_REG_LAST
; i
>= FP_SAVED_REG_FIRST
; i
-= FP_REG_STEP
)
3812 if (regs_ever_live
[i
]
3813 || (! TARGET_64BIT
&& regs_ever_live
[i
+ 1]))
3815 rtx addr
, insn
, reg
;
3816 addr
= gen_rtx_MEM (DFmode
, gen_rtx_POST_INC (DFmode
, tmpreg
));
3817 reg
= gen_rtx_REG (DFmode
, i
);
3818 insn
= emit_move_insn (addr
, reg
);
3821 RTX_FRAME_RELATED_P (insn
) = 1;
3824 rtx mem
= gen_rtx_MEM (DFmode
,
3825 plus_constant (base
, offset
));
3827 = gen_rtx_EXPR_LIST (REG_FRAME_RELATED_EXPR
,
3828 gen_rtx_SET (VOIDmode
, mem
, reg
),
3833 rtx meml
= gen_rtx_MEM (SFmode
,
3834 plus_constant (base
, offset
));
3835 rtx memr
= gen_rtx_MEM (SFmode
,
3836 plus_constant (base
, offset
+ 4));
3837 rtx regl
= gen_rtx_REG (SFmode
, i
);
3838 rtx regr
= gen_rtx_REG (SFmode
, i
+ 1);
3839 rtx setl
= gen_rtx_SET (VOIDmode
, meml
, regl
);
3840 rtx setr
= gen_rtx_SET (VOIDmode
, memr
, regr
);
3843 RTX_FRAME_RELATED_P (setl
) = 1;
3844 RTX_FRAME_RELATED_P (setr
) = 1;
3845 vec
= gen_rtvec (2, setl
, setr
);
3847 = gen_rtx_EXPR_LIST (REG_FRAME_RELATED_EXPR
,
3848 gen_rtx_SEQUENCE (VOIDmode
, vec
),
3852 offset
+= GET_MODE_SIZE (DFmode
);
3859 /* Emit RTL to load REG from the memory location specified by BASE+DISP.
3860 Handle case where DISP > 8k by using the add_high_const patterns. */
3863 load_reg (int reg
, HOST_WIDE_INT disp
, int base
)
3865 rtx dest
= gen_rtx_REG (word_mode
, reg
);
3866 rtx basereg
= gen_rtx_REG (Pmode
, base
);
3869 if (VAL_14_BITS_P (disp
))
3870 src
= gen_rtx_MEM (word_mode
, plus_constant (basereg
, disp
));
3871 else if (TARGET_64BIT
&& !VAL_32_BITS_P (disp
))
3873 rtx delta
= GEN_INT (disp
);
3874 rtx tmpreg
= gen_rtx_REG (Pmode
, 1);
3876 emit_move_insn (tmpreg
, delta
);
3877 if (TARGET_DISABLE_INDEXING
)
3879 emit_move_insn (tmpreg
, gen_rtx_PLUS (Pmode
, tmpreg
, basereg
));
3880 src
= gen_rtx_MEM (word_mode
, tmpreg
);
3883 src
= gen_rtx_MEM (word_mode
, gen_rtx_PLUS (Pmode
, tmpreg
, basereg
));
3887 rtx delta
= GEN_INT (disp
);
3888 rtx high
= gen_rtx_PLUS (Pmode
, basereg
, gen_rtx_HIGH (Pmode
, delta
));
3889 rtx tmpreg
= gen_rtx_REG (Pmode
, 1);
3891 emit_move_insn (tmpreg
, high
);
3892 src
= gen_rtx_MEM (word_mode
, gen_rtx_LO_SUM (Pmode
, tmpreg
, delta
));
3895 emit_move_insn (dest
, src
);
3898 /* Update the total code bytes output to the text section. */
3901 update_total_code_bytes (int nbytes
)
3903 if ((TARGET_PORTABLE_RUNTIME
|| !TARGET_GAS
|| !TARGET_SOM
)
3904 && !IN_NAMED_SECTION_P (cfun
->decl
))
3906 if (INSN_ADDRESSES_SET_P ())
3908 unsigned long old_total
= total_code_bytes
;
3910 total_code_bytes
+= nbytes
;
3912 /* Be prepared to handle overflows. */
3913 if (old_total
> total_code_bytes
)
3914 total_code_bytes
= -1;
3917 total_code_bytes
= -1;
3921 /* This function generates the assembly code for function exit.
3922 Args are as for output_function_prologue ().
3924 The function epilogue should not depend on the current stack
3925 pointer! It should use the frame pointer only. This is mandatory
3926 because of alloca; we also take advantage of it to omit stack
3927 adjustments before returning. */
3930 pa_output_function_epilogue (FILE *file
, HOST_WIDE_INT size ATTRIBUTE_UNUSED
)
3932 rtx insn
= get_last_insn ();
3936 /* hppa_expand_epilogue does the dirty work now. We just need
3937 to output the assembler directives which denote the end
3940 To make debuggers happy, emit a nop if the epilogue was completely
3941 eliminated due to a volatile call as the last insn in the
3942 current function. That way the return address (in %r2) will
3943 always point to a valid instruction in the current function. */
3945 /* Get the last real insn. */
3946 if (GET_CODE (insn
) == NOTE
)
3947 insn
= prev_real_insn (insn
);
3949 /* If it is a sequence, then look inside. */
3950 if (insn
&& GET_CODE (insn
) == INSN
&& GET_CODE (PATTERN (insn
)) == SEQUENCE
)
3951 insn
= XVECEXP (PATTERN (insn
), 0, 0);
3953 /* If insn is a CALL_INSN, then it must be a call to a volatile
3954 function (otherwise there would be epilogue insns). */
3955 if (insn
&& GET_CODE (insn
) == CALL_INSN
)
3957 fputs ("\tnop\n", file
);
3961 fputs ("\t.EXIT\n\t.PROCEND\n", file
);
3963 if (TARGET_SOM
&& TARGET_GAS
)
3965 /* We done with this subspace except possibly for some additional
3966 debug information. Forget that we are in this subspace to ensure
3967 that the next function is output in its own subspace. */
3971 if (INSN_ADDRESSES_SET_P ())
3973 insn
= get_last_nonnote_insn ();
3974 last_address
+= INSN_ADDRESSES (INSN_UID (insn
));
3976 last_address
+= insn_default_length (insn
);
3977 last_address
= ((last_address
+ FUNCTION_BOUNDARY
/ BITS_PER_UNIT
- 1)
3978 & ~(FUNCTION_BOUNDARY
/ BITS_PER_UNIT
- 1));
3981 /* Finally, update the total number of code bytes output so far. */
3982 update_total_code_bytes (last_address
);
3986 hppa_expand_epilogue (void)
3989 HOST_WIDE_INT offset
;
3990 HOST_WIDE_INT ret_off
= 0;
3992 int merge_sp_adjust_with_load
= 0;
3994 /* We will use this often. */
3995 tmpreg
= gen_rtx_REG (word_mode
, 1);
3997 /* Try to restore RP early to avoid load/use interlocks when
3998 RP gets used in the return (bv) instruction. This appears to still
3999 be necessary even when we schedule the prologue and epilogue. */
4000 if (regs_ever_live
[2] || current_function_calls_eh_return
)
4002 ret_off
= TARGET_64BIT
? -16 : -20;
4003 if (frame_pointer_needed
)
4005 load_reg (2, ret_off
, FRAME_POINTER_REGNUM
);
4010 /* No frame pointer, and stack is smaller than 8k. */
4011 if (VAL_14_BITS_P (ret_off
- actual_fsize
))
4013 load_reg (2, ret_off
- actual_fsize
, STACK_POINTER_REGNUM
);
4019 /* General register restores. */
4020 if (frame_pointer_needed
)
4022 offset
= local_fsize
;
4024 /* If the current function calls __builtin_eh_return, then we need
4025 to restore the saved EH data registers. */
4026 if (DO_FRAME_NOTES
&& current_function_calls_eh_return
)
4028 unsigned int i
, regno
;
4032 regno
= EH_RETURN_DATA_REGNO (i
);
4033 if (regno
== INVALID_REGNUM
)
4036 load_reg (regno
, offset
, FRAME_POINTER_REGNUM
);
4037 offset
+= UNITS_PER_WORD
;
4041 for (i
= 18; i
>= 4; i
--)
4042 if (regs_ever_live
[i
] && ! call_used_regs
[i
])
4044 load_reg (i
, offset
, FRAME_POINTER_REGNUM
);
4045 offset
+= UNITS_PER_WORD
;
4050 offset
= local_fsize
- actual_fsize
;
4052 /* If the current function calls __builtin_eh_return, then we need
4053 to restore the saved EH data registers. */
4054 if (DO_FRAME_NOTES
&& current_function_calls_eh_return
)
4056 unsigned int i
, regno
;
4060 regno
= EH_RETURN_DATA_REGNO (i
);
4061 if (regno
== INVALID_REGNUM
)
4064 /* Only for the first load.
4065 merge_sp_adjust_with_load holds the register load
4066 with which we will merge the sp adjustment. */
4067 if (merge_sp_adjust_with_load
== 0
4069 && VAL_14_BITS_P (-actual_fsize
))
4070 merge_sp_adjust_with_load
= regno
;
4072 load_reg (regno
, offset
, STACK_POINTER_REGNUM
);
4073 offset
+= UNITS_PER_WORD
;
4077 for (i
= 18; i
>= 3; i
--)
4079 if (regs_ever_live
[i
] && ! call_used_regs
[i
])
4081 /* Only for the first load.
4082 merge_sp_adjust_with_load holds the register load
4083 with which we will merge the sp adjustment. */
4084 if (merge_sp_adjust_with_load
== 0
4086 && VAL_14_BITS_P (-actual_fsize
))
4087 merge_sp_adjust_with_load
= i
;
4089 load_reg (i
, offset
, STACK_POINTER_REGNUM
);
4090 offset
+= UNITS_PER_WORD
;
4095 /* Align pointer properly (doubleword boundary). */
4096 offset
= (offset
+ 7) & ~7;
4098 /* FP register restores. */
4101 /* Adjust the register to index off of. */
4102 if (frame_pointer_needed
)
4103 set_reg_plus_d (1, FRAME_POINTER_REGNUM
, offset
, 0);
4105 set_reg_plus_d (1, STACK_POINTER_REGNUM
, offset
, 0);
4107 /* Actually do the restores now. */
4108 for (i
= FP_SAVED_REG_LAST
; i
>= FP_SAVED_REG_FIRST
; i
-= FP_REG_STEP
)
4109 if (regs_ever_live
[i
]
4110 || (! TARGET_64BIT
&& regs_ever_live
[i
+ 1]))
4112 rtx src
= gen_rtx_MEM (DFmode
, gen_rtx_POST_INC (DFmode
, tmpreg
));
4113 rtx dest
= gen_rtx_REG (DFmode
, i
);
4114 emit_move_insn (dest
, src
);
4118 /* Emit a blockage insn here to keep these insns from being moved to
4119 an earlier spot in the epilogue, or into the main instruction stream.
4121 This is necessary as we must not cut the stack back before all the
4122 restores are finished. */
4123 emit_insn (gen_blockage ());
4125 /* Reset stack pointer (and possibly frame pointer). The stack
4126 pointer is initially set to fp + 64 to avoid a race condition. */
4127 if (frame_pointer_needed
)
4129 rtx delta
= GEN_INT (-64);
4131 set_reg_plus_d (STACK_POINTER_REGNUM
, FRAME_POINTER_REGNUM
, 64, 0);
4132 emit_insn (gen_pre_load (frame_pointer_rtx
, stack_pointer_rtx
, delta
));
4134 /* If we were deferring a callee register restore, do it now. */
4135 else if (merge_sp_adjust_with_load
)
4137 rtx delta
= GEN_INT (-actual_fsize
);
4138 rtx dest
= gen_rtx_REG (word_mode
, merge_sp_adjust_with_load
);
4140 emit_insn (gen_pre_load (dest
, stack_pointer_rtx
, delta
));
4142 else if (actual_fsize
!= 0)
4143 set_reg_plus_d (STACK_POINTER_REGNUM
, STACK_POINTER_REGNUM
,
4146 /* If we haven't restored %r2 yet (no frame pointer, and a stack
4147 frame greater than 8k), do so now. */
4149 load_reg (2, ret_off
, STACK_POINTER_REGNUM
);
4151 if (DO_FRAME_NOTES
&& current_function_calls_eh_return
)
4153 rtx sa
= EH_RETURN_STACKADJ_RTX
;
4155 emit_insn (gen_blockage ());
4156 emit_insn (TARGET_64BIT
4157 ? gen_subdi3 (stack_pointer_rtx
, stack_pointer_rtx
, sa
)
4158 : gen_subsi3 (stack_pointer_rtx
, stack_pointer_rtx
, sa
));
4163 hppa_pic_save_rtx (void)
4165 return get_hard_reg_initial_val (word_mode
, PIC_OFFSET_TABLE_REGNUM
);
4168 #ifndef NO_DEFERRED_PROFILE_COUNTERS
4169 #define NO_DEFERRED_PROFILE_COUNTERS 0
4172 /* Define heap vector type for funcdef numbers. */
4174 DEF_VEC_ALLOC_I(int,heap
);
4176 /* Vector of funcdef numbers. */
4177 static VEC(int,heap
) *funcdef_nos
;
4179 /* Output deferred profile counters. */
4181 output_deferred_profile_counters (void)
4186 if (VEC_empty (int, funcdef_nos
))
4190 align
= MIN (BIGGEST_ALIGNMENT
, LONG_TYPE_SIZE
);
4191 ASM_OUTPUT_ALIGN (asm_out_file
, floor_log2 (align
/ BITS_PER_UNIT
));
4193 for (i
= 0; VEC_iterate (int, funcdef_nos
, i
, n
); i
++)
4195 targetm
.asm_out
.internal_label (asm_out_file
, "LP", n
);
4196 assemble_integer (const0_rtx
, LONG_TYPE_SIZE
/ BITS_PER_UNIT
, align
, 1);
4199 VEC_free (int, heap
, funcdef_nos
);
4203 hppa_profile_hook (int label_no
)
4205 /* We use SImode for the address of the function in both 32 and
4206 64-bit code to avoid having to provide DImode versions of the
4207 lcla2 and load_offset_label_address insn patterns. */
4208 rtx reg
= gen_reg_rtx (SImode
);
4209 rtx label_rtx
= gen_label_rtx ();
4210 rtx begin_label_rtx
, call_insn
;
4211 char begin_label_name
[16];
4213 ASM_GENERATE_INTERNAL_LABEL (begin_label_name
, FUNC_BEGIN_PROLOG_LABEL
,
4215 begin_label_rtx
= gen_rtx_SYMBOL_REF (SImode
, ggc_strdup (begin_label_name
));
4218 emit_move_insn (arg_pointer_rtx
,
4219 gen_rtx_PLUS (word_mode
, virtual_outgoing_args_rtx
,
4222 emit_move_insn (gen_rtx_REG (word_mode
, 26), gen_rtx_REG (word_mode
, 2));
4224 /* The address of the function is loaded into %r25 with a instruction-
4225 relative sequence that avoids the use of relocations. The sequence
4226 is split so that the load_offset_label_address instruction can
4227 occupy the delay slot of the call to _mcount. */
4229 emit_insn (gen_lcla2 (reg
, label_rtx
));
4231 emit_insn (gen_lcla1 (reg
, label_rtx
));
4233 emit_insn (gen_load_offset_label_address (gen_rtx_REG (SImode
, 25),
4234 reg
, begin_label_rtx
, label_rtx
));
4236 #if !NO_DEFERRED_PROFILE_COUNTERS
4238 rtx count_label_rtx
, addr
, r24
;
4239 char count_label_name
[16];
4241 VEC_safe_push (int, heap
, funcdef_nos
, label_no
);
4242 ASM_GENERATE_INTERNAL_LABEL (count_label_name
, "LP", label_no
);
4243 count_label_rtx
= gen_rtx_SYMBOL_REF (Pmode
, ggc_strdup (count_label_name
));
4245 addr
= force_reg (Pmode
, count_label_rtx
);
4246 r24
= gen_rtx_REG (Pmode
, 24);
4247 emit_move_insn (r24
, addr
);
4250 emit_call_insn (gen_call (gen_rtx_MEM (Pmode
,
4251 gen_rtx_SYMBOL_REF (Pmode
,
4253 GEN_INT (TARGET_64BIT
? 24 : 12)));
4255 use_reg (&CALL_INSN_FUNCTION_USAGE (call_insn
), r24
);
4260 emit_call_insn (gen_call (gen_rtx_MEM (Pmode
,
4261 gen_rtx_SYMBOL_REF (Pmode
,
4263 GEN_INT (TARGET_64BIT
? 16 : 8)));
4267 use_reg (&CALL_INSN_FUNCTION_USAGE (call_insn
), gen_rtx_REG (SImode
, 25));
4268 use_reg (&CALL_INSN_FUNCTION_USAGE (call_insn
), gen_rtx_REG (SImode
, 26));
4270 /* Indicate the _mcount call cannot throw, nor will it execute a
4272 REG_NOTES (call_insn
)
4273 = gen_rtx_EXPR_LIST (REG_EH_REGION
, constm1_rtx
, REG_NOTES (call_insn
));
4276 /* Fetch the return address for the frame COUNT steps up from
4277 the current frame, after the prologue. FRAMEADDR is the
4278 frame pointer of the COUNT frame.
4280 We want to ignore any export stub remnants here. To handle this,
4281 we examine the code at the return address, and if it is an export
4282 stub, we return a memory rtx for the stub return address stored
4285 The value returned is used in two different ways:
4287 1. To find a function's caller.
4289 2. To change the return address for a function.
4291 This function handles most instances of case 1; however, it will
4292 fail if there are two levels of stubs to execute on the return
4293 path. The only way I believe that can happen is if the return value
4294 needs a parameter relocation, which never happens for C code.
4296 This function handles most instances of case 2; however, it will
4297 fail if we did not originally have stub code on the return path
4298 but will need stub code on the new return path. This can happen if
4299 the caller & callee are both in the main program, but the new
4300 return location is in a shared library. */
4303 return_addr_rtx (int count
, rtx frameaddr
)
4313 rp
= get_hard_reg_initial_val (Pmode
, 2);
4315 if (TARGET_64BIT
|| TARGET_NO_SPACE_REGS
)
4318 saved_rp
= gen_reg_rtx (Pmode
);
4319 emit_move_insn (saved_rp
, rp
);
4321 /* Get pointer to the instruction stream. We have to mask out the
4322 privilege level from the two low order bits of the return address
4323 pointer here so that ins will point to the start of the first
4324 instruction that would have been executed if we returned. */
4325 ins
= copy_to_reg (gen_rtx_AND (Pmode
, rp
, MASK_RETURN_ADDR
));
4326 label
= gen_label_rtx ();
4328 /* Check the instruction stream at the normal return address for the
4331 0x4bc23fd1 | stub+8: ldw -18(sr0,sp),rp
4332 0x004010a1 | stub+12: ldsid (sr0,rp),r1
4333 0x00011820 | stub+16: mtsp r1,sr0
4334 0xe0400002 | stub+20: be,n 0(sr0,rp)
4336 If it is an export stub, than our return address is really in
4339 emit_cmp_insn (gen_rtx_MEM (SImode
, ins
), GEN_INT (0x4bc23fd1), NE
,
4340 NULL_RTX
, SImode
, 1);
4341 emit_jump_insn (gen_bne (label
));
4343 emit_cmp_insn (gen_rtx_MEM (SImode
, plus_constant (ins
, 4)),
4344 GEN_INT (0x004010a1), NE
, NULL_RTX
, SImode
, 1);
4345 emit_jump_insn (gen_bne (label
));
4347 emit_cmp_insn (gen_rtx_MEM (SImode
, plus_constant (ins
, 8)),
4348 GEN_INT (0x00011820), NE
, NULL_RTX
, SImode
, 1);
4349 emit_jump_insn (gen_bne (label
));
4351 /* 0xe0400002 must be specified as -532676606 so that it won't be
4352 rejected as an invalid immediate operand on 64-bit hosts. */
4353 emit_cmp_insn (gen_rtx_MEM (SImode
, plus_constant (ins
, 12)),
4354 GEN_INT (-532676606), NE
, NULL_RTX
, SImode
, 1);
4356 /* If there is no export stub then just use the value saved from
4357 the return pointer register. */
4359 emit_jump_insn (gen_bne (label
));
4361 /* Here we know that our return address points to an export
4362 stub. We don't want to return the address of the export stub,
4363 but rather the return address of the export stub. That return
4364 address is stored at -24[frameaddr]. */
4366 emit_move_insn (saved_rp
,
4368 memory_address (Pmode
,
4369 plus_constant (frameaddr
,
4376 /* This is only valid once reload has completed because it depends on
4377 knowing exactly how much (if any) frame there is and...
4379 It's only valid if there is no frame marker to de-allocate and...
4381 It's only valid if %r2 hasn't been saved into the caller's frame
4382 (we're not profiling and %r2 isn't live anywhere). */
4384 hppa_can_use_return_insn_p (void)
4386 return (reload_completed
4387 && (compute_frame_size (get_frame_size (), 0) ? 0 : 1)
4388 && ! regs_ever_live
[2]
4389 && ! frame_pointer_needed
);
4393 emit_bcond_fp (enum rtx_code code
, rtx operand0
)
4395 emit_jump_insn (gen_rtx_SET (VOIDmode
, pc_rtx
,
4396 gen_rtx_IF_THEN_ELSE (VOIDmode
,
4397 gen_rtx_fmt_ee (code
,
4399 gen_rtx_REG (CCFPmode
, 0),
4401 gen_rtx_LABEL_REF (VOIDmode
, operand0
),
4407 gen_cmp_fp (enum rtx_code code
, rtx operand0
, rtx operand1
)
4409 return gen_rtx_SET (VOIDmode
, gen_rtx_REG (CCFPmode
, 0),
4410 gen_rtx_fmt_ee (code
, CCFPmode
, operand0
, operand1
));
4413 /* Adjust the cost of a scheduling dependency. Return the new cost of
4414 a dependency LINK or INSN on DEP_INSN. COST is the current cost. */
4417 pa_adjust_cost (rtx insn
, rtx link
, rtx dep_insn
, int cost
)
4419 enum attr_type attr_type
;
4421 /* Don't adjust costs for a pa8000 chip, also do not adjust any
4422 true dependencies as they are described with bypasses now. */
4423 if (pa_cpu
>= PROCESSOR_8000
|| REG_NOTE_KIND (link
) == 0)
4426 if (! recog_memoized (insn
))
4429 attr_type
= get_attr_type (insn
);
4431 switch (REG_NOTE_KIND (link
))
4434 /* Anti dependency; DEP_INSN reads a register that INSN writes some
4437 if (attr_type
== TYPE_FPLOAD
)
4439 rtx pat
= PATTERN (insn
);
4440 rtx dep_pat
= PATTERN (dep_insn
);
4441 if (GET_CODE (pat
) == PARALLEL
)
4443 /* This happens for the fldXs,mb patterns. */
4444 pat
= XVECEXP (pat
, 0, 0);
4446 if (GET_CODE (pat
) != SET
|| GET_CODE (dep_pat
) != SET
)
4447 /* If this happens, we have to extend this to schedule
4448 optimally. Return 0 for now. */
4451 if (reg_mentioned_p (SET_DEST (pat
), SET_SRC (dep_pat
)))
4453 if (! recog_memoized (dep_insn
))
4455 switch (get_attr_type (dep_insn
))
4462 case TYPE_FPSQRTSGL
:
4463 case TYPE_FPSQRTDBL
:
4464 /* A fpload can't be issued until one cycle before a
4465 preceding arithmetic operation has finished if
4466 the target of the fpload is any of the sources
4467 (or destination) of the arithmetic operation. */
4468 return insn_default_latency (dep_insn
) - 1;
4475 else if (attr_type
== TYPE_FPALU
)
4477 rtx pat
= PATTERN (insn
);
4478 rtx dep_pat
= PATTERN (dep_insn
);
4479 if (GET_CODE (pat
) == PARALLEL
)
4481 /* This happens for the fldXs,mb patterns. */
4482 pat
= XVECEXP (pat
, 0, 0);
4484 if (GET_CODE (pat
) != SET
|| GET_CODE (dep_pat
) != SET
)
4485 /* If this happens, we have to extend this to schedule
4486 optimally. Return 0 for now. */
4489 if (reg_mentioned_p (SET_DEST (pat
), SET_SRC (dep_pat
)))
4491 if (! recog_memoized (dep_insn
))
4493 switch (get_attr_type (dep_insn
))
4497 case TYPE_FPSQRTSGL
:
4498 case TYPE_FPSQRTDBL
:
4499 /* An ALU flop can't be issued until two cycles before a
4500 preceding divide or sqrt operation has finished if
4501 the target of the ALU flop is any of the sources
4502 (or destination) of the divide or sqrt operation. */
4503 return insn_default_latency (dep_insn
) - 2;
4511 /* For other anti dependencies, the cost is 0. */
4514 case REG_DEP_OUTPUT
:
4515 /* Output dependency; DEP_INSN writes a register that INSN writes some
4517 if (attr_type
== TYPE_FPLOAD
)
4519 rtx pat
= PATTERN (insn
);
4520 rtx dep_pat
= PATTERN (dep_insn
);
4521 if (GET_CODE (pat
) == PARALLEL
)
4523 /* This happens for the fldXs,mb patterns. */
4524 pat
= XVECEXP (pat
, 0, 0);
4526 if (GET_CODE (pat
) != SET
|| GET_CODE (dep_pat
) != SET
)
4527 /* If this happens, we have to extend this to schedule
4528 optimally. Return 0 for now. */
4531 if (reg_mentioned_p (SET_DEST (pat
), SET_DEST (dep_pat
)))
4533 if (! recog_memoized (dep_insn
))
4535 switch (get_attr_type (dep_insn
))
4542 case TYPE_FPSQRTSGL
:
4543 case TYPE_FPSQRTDBL
:
4544 /* A fpload can't be issued until one cycle before a
4545 preceding arithmetic operation has finished if
4546 the target of the fpload is the destination of the
4547 arithmetic operation.
4549 Exception: For PA7100LC, PA7200 and PA7300, the cost
4550 is 3 cycles, unless they bundle together. We also
4551 pay the penalty if the second insn is a fpload. */
4552 return insn_default_latency (dep_insn
) - 1;
4559 else if (attr_type
== TYPE_FPALU
)
4561 rtx pat
= PATTERN (insn
);
4562 rtx dep_pat
= PATTERN (dep_insn
);
4563 if (GET_CODE (pat
) == PARALLEL
)
4565 /* This happens for the fldXs,mb patterns. */
4566 pat
= XVECEXP (pat
, 0, 0);
4568 if (GET_CODE (pat
) != SET
|| GET_CODE (dep_pat
) != SET
)
4569 /* If this happens, we have to extend this to schedule
4570 optimally. Return 0 for now. */
4573 if (reg_mentioned_p (SET_DEST (pat
), SET_DEST (dep_pat
)))
4575 if (! recog_memoized (dep_insn
))
4577 switch (get_attr_type (dep_insn
))
4581 case TYPE_FPSQRTSGL
:
4582 case TYPE_FPSQRTDBL
:
4583 /* An ALU flop can't be issued until two cycles before a
4584 preceding divide or sqrt operation has finished if
4585 the target of the ALU flop is also the target of
4586 the divide or sqrt operation. */
4587 return insn_default_latency (dep_insn
) - 2;
4595 /* For other output dependencies, the cost is 0. */
4603 /* Adjust scheduling priorities. We use this to try and keep addil
4604 and the next use of %r1 close together. */
4606 pa_adjust_priority (rtx insn
, int priority
)
4608 rtx set
= single_set (insn
);
4612 src
= SET_SRC (set
);
4613 dest
= SET_DEST (set
);
4614 if (GET_CODE (src
) == LO_SUM
4615 && symbolic_operand (XEXP (src
, 1), VOIDmode
)
4616 && ! read_only_operand (XEXP (src
, 1), VOIDmode
))
4619 else if (GET_CODE (src
) == MEM
4620 && GET_CODE (XEXP (src
, 0)) == LO_SUM
4621 && symbolic_operand (XEXP (XEXP (src
, 0), 1), VOIDmode
)
4622 && ! read_only_operand (XEXP (XEXP (src
, 0), 1), VOIDmode
))
4625 else if (GET_CODE (dest
) == MEM
4626 && GET_CODE (XEXP (dest
, 0)) == LO_SUM
4627 && symbolic_operand (XEXP (XEXP (dest
, 0), 1), VOIDmode
)
4628 && ! read_only_operand (XEXP (XEXP (dest
, 0), 1), VOIDmode
))
4634 /* The 700 can only issue a single insn at a time.
4635 The 7XXX processors can issue two insns at a time.
4636 The 8000 can issue 4 insns at a time. */
4638 pa_issue_rate (void)
4642 case PROCESSOR_700
: return 1;
4643 case PROCESSOR_7100
: return 2;
4644 case PROCESSOR_7100LC
: return 2;
4645 case PROCESSOR_7200
: return 2;
4646 case PROCESSOR_7300
: return 2;
4647 case PROCESSOR_8000
: return 4;
4656 /* Return any length adjustment needed by INSN which already has its length
4657 computed as LENGTH. Return zero if no adjustment is necessary.
4659 For the PA: function calls, millicode calls, and backwards short
4660 conditional branches with unfilled delay slots need an adjustment by +1
4661 (to account for the NOP which will be inserted into the instruction stream).
4663 Also compute the length of an inline block move here as it is too
4664 complicated to express as a length attribute in pa.md. */
4666 pa_adjust_insn_length (rtx insn
, int length
)
4668 rtx pat
= PATTERN (insn
);
4670 /* Jumps inside switch tables which have unfilled delay slots need
4672 if (GET_CODE (insn
) == JUMP_INSN
4673 && GET_CODE (pat
) == PARALLEL
4674 && get_attr_type (insn
) == TYPE_BTABLE_BRANCH
)
4676 /* Millicode insn with an unfilled delay slot. */
4677 else if (GET_CODE (insn
) == INSN
4678 && GET_CODE (pat
) != SEQUENCE
4679 && GET_CODE (pat
) != USE
4680 && GET_CODE (pat
) != CLOBBER
4681 && get_attr_type (insn
) == TYPE_MILLI
)
4683 /* Block move pattern. */
4684 else if (GET_CODE (insn
) == INSN
4685 && GET_CODE (pat
) == PARALLEL
4686 && GET_CODE (XVECEXP (pat
, 0, 0)) == SET
4687 && GET_CODE (XEXP (XVECEXP (pat
, 0, 0), 0)) == MEM
4688 && GET_CODE (XEXP (XVECEXP (pat
, 0, 0), 1)) == MEM
4689 && GET_MODE (XEXP (XVECEXP (pat
, 0, 0), 0)) == BLKmode
4690 && GET_MODE (XEXP (XVECEXP (pat
, 0, 0), 1)) == BLKmode
)
4691 return compute_movmem_length (insn
) - 4;
4692 /* Block clear pattern. */
4693 else if (GET_CODE (insn
) == INSN
4694 && GET_CODE (pat
) == PARALLEL
4695 && GET_CODE (XVECEXP (pat
, 0, 0)) == SET
4696 && GET_CODE (XEXP (XVECEXP (pat
, 0, 0), 0)) == MEM
4697 && XEXP (XVECEXP (pat
, 0, 0), 1) == const0_rtx
4698 && GET_MODE (XEXP (XVECEXP (pat
, 0, 0), 0)) == BLKmode
)
4699 return compute_clrmem_length (insn
) - 4;
4700 /* Conditional branch with an unfilled delay slot. */
4701 else if (GET_CODE (insn
) == JUMP_INSN
&& ! simplejump_p (insn
))
4703 /* Adjust a short backwards conditional with an unfilled delay slot. */
4704 if (GET_CODE (pat
) == SET
4706 && ! forward_branch_p (insn
))
4708 else if (GET_CODE (pat
) == PARALLEL
4709 && get_attr_type (insn
) == TYPE_PARALLEL_BRANCH
4712 /* Adjust dbra insn with short backwards conditional branch with
4713 unfilled delay slot -- only for case where counter is in a
4714 general register register. */
4715 else if (GET_CODE (pat
) == PARALLEL
4716 && GET_CODE (XVECEXP (pat
, 0, 1)) == SET
4717 && GET_CODE (XEXP (XVECEXP (pat
, 0, 1), 0)) == REG
4718 && ! FP_REG_P (XEXP (XVECEXP (pat
, 0, 1), 0))
4720 && ! forward_branch_p (insn
))
4728 /* Print operand X (an rtx) in assembler syntax to file FILE.
4729 CODE is a letter or dot (`z' in `%z0') or 0 if no letter was specified.
4730 For `%' followed by punctuation, CODE is the punctuation and X is null. */
4733 print_operand (FILE *file
, rtx x
, int code
)
4738 /* Output a 'nop' if there's nothing for the delay slot. */
4739 if (dbr_sequence_length () == 0)
4740 fputs ("\n\tnop", file
);
4743 /* Output a nullification completer if there's nothing for the */
4744 /* delay slot or nullification is requested. */
4745 if (dbr_sequence_length () == 0 ||
4747 INSN_ANNULLED_BRANCH_P (XVECEXP (final_sequence
, 0, 0))))
4751 /* Print out the second register name of a register pair.
4752 I.e., R (6) => 7. */
4753 fputs (reg_names
[REGNO (x
) + 1], file
);
4756 /* A register or zero. */
4758 || (x
== CONST0_RTX (DFmode
))
4759 || (x
== CONST0_RTX (SFmode
)))
4761 fputs ("%r0", file
);
4767 /* A register or zero (floating point). */
4769 || (x
== CONST0_RTX (DFmode
))
4770 || (x
== CONST0_RTX (SFmode
)))
4772 fputs ("%fr0", file
);
4781 xoperands
[0] = XEXP (XEXP (x
, 0), 0);
4782 xoperands
[1] = XVECEXP (XEXP (XEXP (x
, 0), 1), 0, 0);
4783 output_global_address (file
, xoperands
[1], 0);
4784 fprintf (file
, "(%s)", reg_names
[REGNO (xoperands
[0])]);
4788 case 'C': /* Plain (C)ondition */
4790 switch (GET_CODE (x
))
4793 fputs ("=", file
); break;
4795 fputs ("<>", file
); break;
4797 fputs (">", file
); break;
4799 fputs (">=", file
); break;
4801 fputs (">>=", file
); break;
4803 fputs (">>", file
); break;
4805 fputs ("<", file
); break;
4807 fputs ("<=", file
); break;
4809 fputs ("<<=", file
); break;
4811 fputs ("<<", file
); break;
4816 case 'N': /* Condition, (N)egated */
4817 switch (GET_CODE (x
))
4820 fputs ("<>", file
); break;
4822 fputs ("=", file
); break;
4824 fputs ("<=", file
); break;
4826 fputs ("<", file
); break;
4828 fputs ("<<", file
); break;
4830 fputs ("<<=", file
); break;
4832 fputs (">=", file
); break;
4834 fputs (">", file
); break;
4836 fputs (">>", file
); break;
4838 fputs (">>=", file
); break;
4843 /* For floating point comparisons. Note that the output
4844 predicates are the complement of the desired mode. The
4845 conditions for GT, GE, LT, LE and LTGT cause an invalid
4846 operation exception if the result is unordered and this
4847 exception is enabled in the floating-point status register. */
4849 switch (GET_CODE (x
))
4852 fputs ("!=", file
); break;
4854 fputs ("=", file
); break;
4856 fputs ("!>", file
); break;
4858 fputs ("!>=", file
); break;
4860 fputs ("!<", file
); break;
4862 fputs ("!<=", file
); break;
4864 fputs ("!<>", file
); break;
4866 fputs ("!?<=", file
); break;
4868 fputs ("!?<", file
); break;
4870 fputs ("!?>=", file
); break;
4872 fputs ("!?>", file
); break;
4874 fputs ("!?=", file
); break;
4876 fputs ("!?", file
); break;
4878 fputs ("?", file
); break;
4883 case 'S': /* Condition, operands are (S)wapped. */
4884 switch (GET_CODE (x
))
4887 fputs ("=", file
); break;
4889 fputs ("<>", file
); break;
4891 fputs ("<", file
); break;
4893 fputs ("<=", file
); break;
4895 fputs ("<<=", file
); break;
4897 fputs ("<<", file
); break;
4899 fputs (">", file
); break;
4901 fputs (">=", file
); break;
4903 fputs (">>=", file
); break;
4905 fputs (">>", file
); break;
4910 case 'B': /* Condition, (B)oth swapped and negate. */
4911 switch (GET_CODE (x
))
4914 fputs ("<>", file
); break;
4916 fputs ("=", file
); break;
4918 fputs (">=", file
); break;
4920 fputs (">", file
); break;
4922 fputs (">>", file
); break;
4924 fputs (">>=", file
); break;
4926 fputs ("<=", file
); break;
4928 fputs ("<", file
); break;
4930 fputs ("<<", file
); break;
4932 fputs ("<<=", file
); break;
4938 gcc_assert (GET_CODE (x
) == CONST_INT
);
4939 fprintf (file
, HOST_WIDE_INT_PRINT_DEC
, ~INTVAL (x
));
4942 gcc_assert (GET_CODE (x
) == CONST_INT
);
4943 fprintf (file
, HOST_WIDE_INT_PRINT_DEC
, 64 - (INTVAL (x
) & 63));
4946 gcc_assert (GET_CODE (x
) == CONST_INT
);
4947 fprintf (file
, HOST_WIDE_INT_PRINT_DEC
, 32 - (INTVAL (x
) & 31));
4950 gcc_assert (GET_CODE (x
) == CONST_INT
&& exact_log2 (INTVAL (x
)) >= 0);
4951 fprintf (file
, "%d", exact_log2 (INTVAL (x
)));
4954 gcc_assert (GET_CODE (x
) == CONST_INT
);
4955 fprintf (file
, HOST_WIDE_INT_PRINT_DEC
, 63 - (INTVAL (x
) & 63));
4958 gcc_assert (GET_CODE (x
) == CONST_INT
);
4959 fprintf (file
, HOST_WIDE_INT_PRINT_DEC
, 31 - (INTVAL (x
) & 31));
4962 if (GET_CODE (x
) == CONST_INT
)
4967 switch (GET_CODE (XEXP (x
, 0)))
4971 if (ASSEMBLER_DIALECT
== 0)
4972 fputs ("s,mb", file
);
4974 fputs (",mb", file
);
4978 if (ASSEMBLER_DIALECT
== 0)
4979 fputs ("s,ma", file
);
4981 fputs (",ma", file
);
4984 if (GET_CODE (XEXP (XEXP (x
, 0), 0)) == REG
4985 && GET_CODE (XEXP (XEXP (x
, 0), 1)) == REG
)
4987 if (ASSEMBLER_DIALECT
== 0)
4990 else if (GET_CODE (XEXP (XEXP (x
, 0), 0)) == MULT
4991 || GET_CODE (XEXP (XEXP (x
, 0), 1)) == MULT
)
4993 if (ASSEMBLER_DIALECT
== 0)
4994 fputs ("x,s", file
);
4998 else if (code
== 'F' && ASSEMBLER_DIALECT
== 0)
5002 if (code
== 'F' && ASSEMBLER_DIALECT
== 0)
5008 output_global_address (file
, x
, 0);
5011 output_global_address (file
, x
, 1);
5013 case 0: /* Don't do anything special */
5018 compute_zdepwi_operands (INTVAL (x
), op
);
5019 fprintf (file
, "%d,%d,%d", op
[0], op
[1], op
[2]);
5025 compute_zdepdi_operands (INTVAL (x
), op
);
5026 fprintf (file
, "%d,%d,%d", op
[0], op
[1], op
[2]);
5030 /* We can get here from a .vtable_inherit due to our
5031 CONSTANT_ADDRESS_P rejecting perfectly good constant
5037 if (GET_CODE (x
) == REG
)
5039 fputs (reg_names
[REGNO (x
)], file
);
5040 if (TARGET_64BIT
&& FP_REG_P (x
) && GET_MODE_SIZE (GET_MODE (x
)) <= 4)
5046 && GET_MODE_SIZE (GET_MODE (x
)) <= 4
5047 && (REGNO (x
) & 1) == 0)
5050 else if (GET_CODE (x
) == MEM
)
5052 int size
= GET_MODE_SIZE (GET_MODE (x
));
5053 rtx base
= NULL_RTX
;
5054 switch (GET_CODE (XEXP (x
, 0)))
5058 base
= XEXP (XEXP (x
, 0), 0);
5059 fprintf (file
, "-%d(%s)", size
, reg_names
[REGNO (base
)]);
5063 base
= XEXP (XEXP (x
, 0), 0);
5064 fprintf (file
, "%d(%s)", size
, reg_names
[REGNO (base
)]);
5067 if (GET_CODE (XEXP (XEXP (x
, 0), 0)) == MULT
)
5068 fprintf (file
, "%s(%s)",
5069 reg_names
[REGNO (XEXP (XEXP (XEXP (x
, 0), 0), 0))],
5070 reg_names
[REGNO (XEXP (XEXP (x
, 0), 1))]);
5071 else if (GET_CODE (XEXP (XEXP (x
, 0), 1)) == MULT
)
5072 fprintf (file
, "%s(%s)",
5073 reg_names
[REGNO (XEXP (XEXP (XEXP (x
, 0), 1), 0))],
5074 reg_names
[REGNO (XEXP (XEXP (x
, 0), 0))]);
5075 else if (GET_CODE (XEXP (XEXP (x
, 0), 0)) == REG
5076 && GET_CODE (XEXP (XEXP (x
, 0), 1)) == REG
)
5078 /* Because the REG_POINTER flag can get lost during reload,
5079 GO_IF_LEGITIMATE_ADDRESS canonicalizes the order of the
5080 index and base registers in the combined move patterns. */
5081 rtx base
= XEXP (XEXP (x
, 0), 1);
5082 rtx index
= XEXP (XEXP (x
, 0), 0);
5084 fprintf (file
, "%s(%s)",
5085 reg_names
[REGNO (index
)], reg_names
[REGNO (base
)]);
5088 output_address (XEXP (x
, 0));
5091 output_address (XEXP (x
, 0));
5096 output_addr_const (file
, x
);
5099 /* output a SYMBOL_REF or a CONST expression involving a SYMBOL_REF. */
5102 output_global_address (FILE *file
, rtx x
, int round_constant
)
5105 /* Imagine (high (const (plus ...))). */
5106 if (GET_CODE (x
) == HIGH
)
5109 if (GET_CODE (x
) == SYMBOL_REF
&& read_only_operand (x
, VOIDmode
))
5110 output_addr_const (file
, x
);
5111 else if (GET_CODE (x
) == SYMBOL_REF
&& !flag_pic
)
5113 output_addr_const (file
, x
);
5114 fputs ("-$global$", file
);
5116 else if (GET_CODE (x
) == CONST
)
5118 const char *sep
= "";
5119 int offset
= 0; /* assembler wants -$global$ at end */
5120 rtx base
= NULL_RTX
;
5122 switch (GET_CODE (XEXP (XEXP (x
, 0), 0)))
5125 base
= XEXP (XEXP (x
, 0), 0);
5126 output_addr_const (file
, base
);
5129 offset
= INTVAL (XEXP (XEXP (x
, 0), 0));
5135 switch (GET_CODE (XEXP (XEXP (x
, 0), 1)))
5138 base
= XEXP (XEXP (x
, 0), 1);
5139 output_addr_const (file
, base
);
5142 offset
= INTVAL (XEXP (XEXP (x
, 0), 1));
5148 /* How bogus. The compiler is apparently responsible for
5149 rounding the constant if it uses an LR field selector.
5151 The linker and/or assembler seem a better place since
5152 they have to do this kind of thing already.
5154 If we fail to do this, HP's optimizing linker may eliminate
5155 an addil, but not update the ldw/stw/ldo instruction that
5156 uses the result of the addil. */
5158 offset
= ((offset
+ 0x1000) & ~0x1fff);
5160 switch (GET_CODE (XEXP (x
, 0)))
5173 gcc_assert (GET_CODE (XEXP (XEXP (x
, 0), 0)) == SYMBOL_REF
);
5181 if (!read_only_operand (base
, VOIDmode
) && !flag_pic
)
5182 fputs ("-$global$", file
);
5184 fprintf (file
, "%s%d", sep
, offset
);
5187 output_addr_const (file
, x
);
5190 /* Output boilerplate text to appear at the beginning of the file.
5191 There are several possible versions. */
5192 #define aputs(x) fputs(x, asm_out_file)
5194 pa_file_start_level (void)
5197 aputs ("\t.LEVEL 2.0w\n");
5198 else if (TARGET_PA_20
)
5199 aputs ("\t.LEVEL 2.0\n");
5200 else if (TARGET_PA_11
)
5201 aputs ("\t.LEVEL 1.1\n");
5203 aputs ("\t.LEVEL 1.0\n");
5207 pa_file_start_space (int sortspace
)
5209 aputs ("\t.SPACE $PRIVATE$");
5212 aputs ("\n\t.SUBSPA $DATA$,QUAD=1,ALIGN=8,ACCESS=31"
5213 "\n\t.SUBSPA $BSS$,QUAD=1,ALIGN=8,ACCESS=31,ZERO,SORT=82"
5214 "\n\t.SPACE $TEXT$");
5217 aputs ("\n\t.SUBSPA $LIT$,QUAD=0,ALIGN=8,ACCESS=44"
5218 "\n\t.SUBSPA $CODE$,QUAD=0,ALIGN=8,ACCESS=44,CODE_ONLY\n");
5222 pa_file_start_file (int want_version
)
5224 if (write_symbols
!= NO_DEBUG
)
5226 output_file_directive (asm_out_file
, main_input_filename
);
5228 aputs ("\t.version\t\"01.01\"\n");
5233 pa_file_start_mcount (const char *aswhat
)
5236 fprintf (asm_out_file
, "\t.IMPORT _mcount,%s\n", aswhat
);
5240 pa_elf_file_start (void)
5242 pa_file_start_level ();
5243 pa_file_start_mcount ("ENTRY");
5244 pa_file_start_file (0);
5248 pa_som_file_start (void)
5250 pa_file_start_level ();
5251 pa_file_start_space (0);
5252 aputs ("\t.IMPORT $global$,DATA\n"
5253 "\t.IMPORT $$dyncall,MILLICODE\n");
5254 pa_file_start_mcount ("CODE");
5255 pa_file_start_file (0);
5259 pa_linux_file_start (void)
5261 pa_file_start_file (1);
5262 pa_file_start_level ();
5263 pa_file_start_mcount ("CODE");
5267 pa_hpux64_gas_file_start (void)
5269 pa_file_start_level ();
5270 #ifdef ASM_OUTPUT_TYPE_DIRECTIVE
5272 ASM_OUTPUT_TYPE_DIRECTIVE (asm_out_file
, "_mcount", "function");
5274 pa_file_start_file (1);
5278 pa_hpux64_hpas_file_start (void)
5280 pa_file_start_level ();
5281 pa_file_start_space (1);
5282 pa_file_start_mcount ("CODE");
5283 pa_file_start_file (0);
5287 /* Search the deferred plabel list for SYMBOL and return its internal
5288 label. If an entry for SYMBOL is not found, a new entry is created. */
5291 get_deferred_plabel (rtx symbol
)
5293 const char *fname
= XSTR (symbol
, 0);
5296 /* See if we have already put this function on the list of deferred
5297 plabels. This list is generally small, so a liner search is not
5298 too ugly. If it proves too slow replace it with something faster. */
5299 for (i
= 0; i
< n_deferred_plabels
; i
++)
5300 if (strcmp (fname
, XSTR (deferred_plabels
[i
].symbol
, 0)) == 0)
5303 /* If the deferred plabel list is empty, or this entry was not found
5304 on the list, create a new entry on the list. */
5305 if (deferred_plabels
== NULL
|| i
== n_deferred_plabels
)
5309 if (deferred_plabels
== 0)
5310 deferred_plabels
= (struct deferred_plabel
*)
5311 ggc_alloc (sizeof (struct deferred_plabel
));
5313 deferred_plabels
= (struct deferred_plabel
*)
5314 ggc_realloc (deferred_plabels
,
5315 ((n_deferred_plabels
+ 1)
5316 * sizeof (struct deferred_plabel
)));
5318 i
= n_deferred_plabels
++;
5319 deferred_plabels
[i
].internal_label
= gen_label_rtx ();
5320 deferred_plabels
[i
].symbol
= symbol
;
5322 /* Gross. We have just implicitly taken the address of this
5323 function. Mark it in the same manner as assemble_name. */
5324 id
= maybe_get_identifier (targetm
.strip_name_encoding (fname
));
5326 mark_referenced (id
);
5329 return deferred_plabels
[i
].internal_label
;
5333 output_deferred_plabels (void)
5336 /* If we have deferred plabels, then we need to switch into the data
5337 section and align it to a 4 byte boundary before we output the
5338 deferred plabels. */
5339 if (n_deferred_plabels
)
5342 ASM_OUTPUT_ALIGN (asm_out_file
, TARGET_64BIT
? 3 : 2);
5345 /* Now output the deferred plabels. */
5346 for (i
= 0; i
< n_deferred_plabels
; i
++)
5348 (*targetm
.asm_out
.internal_label
) (asm_out_file
, "L",
5349 CODE_LABEL_NUMBER (deferred_plabels
[i
].internal_label
));
5350 assemble_integer (deferred_plabels
[i
].symbol
,
5351 TARGET_64BIT
? 8 : 4, TARGET_64BIT
? 64 : 32, 1);
5355 #ifdef HPUX_LONG_DOUBLE_LIBRARY
5356 /* Initialize optabs to point to HPUX long double emulation routines. */
5358 pa_hpux_init_libfuncs (void)
5360 set_optab_libfunc (add_optab
, TFmode
, "_U_Qfadd");
5361 set_optab_libfunc (sub_optab
, TFmode
, "_U_Qfsub");
5362 set_optab_libfunc (smul_optab
, TFmode
, "_U_Qfmpy");
5363 set_optab_libfunc (sdiv_optab
, TFmode
, "_U_Qfdiv");
5364 set_optab_libfunc (smin_optab
, TFmode
, "_U_Qmin");
5365 set_optab_libfunc (smax_optab
, TFmode
, "_U_Qfmax");
5366 set_optab_libfunc (sqrt_optab
, TFmode
, "_U_Qfsqrt");
5367 set_optab_libfunc (abs_optab
, TFmode
, "_U_Qfabs");
5368 set_optab_libfunc (neg_optab
, TFmode
, "_U_Qfneg");
5370 set_optab_libfunc (eq_optab
, TFmode
, "_U_Qfeq");
5371 set_optab_libfunc (ne_optab
, TFmode
, "_U_Qfne");
5372 set_optab_libfunc (gt_optab
, TFmode
, "_U_Qfgt");
5373 set_optab_libfunc (ge_optab
, TFmode
, "_U_Qfge");
5374 set_optab_libfunc (lt_optab
, TFmode
, "_U_Qflt");
5375 set_optab_libfunc (le_optab
, TFmode
, "_U_Qfle");
5376 set_optab_libfunc (unord_optab
, TFmode
, "_U_Qfunord");
5378 set_conv_libfunc (sext_optab
, TFmode
, SFmode
, "_U_Qfcnvff_sgl_to_quad");
5379 set_conv_libfunc (sext_optab
, TFmode
, DFmode
, "_U_Qfcnvff_dbl_to_quad");
5380 set_conv_libfunc (trunc_optab
, SFmode
, TFmode
, "_U_Qfcnvff_quad_to_sgl");
5381 set_conv_libfunc (trunc_optab
, DFmode
, TFmode
, "_U_Qfcnvff_quad_to_dbl");
5383 set_conv_libfunc (sfix_optab
, SImode
, TFmode
, TARGET_64BIT
5384 ? "__U_Qfcnvfxt_quad_to_sgl"
5385 : "_U_Qfcnvfxt_quad_to_sgl");
5386 set_conv_libfunc (sfix_optab
, DImode
, TFmode
, "_U_Qfcnvfxt_quad_to_dbl");
5387 set_conv_libfunc (ufix_optab
, SImode
, TFmode
, "_U_Qfcnvfxt_quad_to_usgl");
5388 set_conv_libfunc (ufix_optab
, DImode
, TFmode
, "_U_Qfcnvfxt_quad_to_udbl");
5390 set_conv_libfunc (sfloat_optab
, TFmode
, SImode
, "_U_Qfcnvxf_sgl_to_quad");
5391 set_conv_libfunc (sfloat_optab
, TFmode
, DImode
, "_U_Qfcnvxf_dbl_to_quad");
5395 /* HP's millicode routines mean something special to the assembler.
5396 Keep track of which ones we have used. */
5398 enum millicodes
{ remI
, remU
, divI
, divU
, mulI
, end1000
};
5399 static void import_milli (enum millicodes
);
5400 static char imported
[(int) end1000
];
5401 static const char * const milli_names
[] = {"remI", "remU", "divI", "divU", "mulI"};
5402 static const char import_string
[] = ".IMPORT $$....,MILLICODE";
5403 #define MILLI_START 10
5406 import_milli (enum millicodes code
)
5408 char str
[sizeof (import_string
)];
5410 if (!imported
[(int) code
])
5412 imported
[(int) code
] = 1;
5413 strcpy (str
, import_string
);
5414 strncpy (str
+ MILLI_START
, milli_names
[(int) code
], 4);
5415 output_asm_insn (str
, 0);
5419 /* The register constraints have put the operands and return value in
5420 the proper registers. */
5423 output_mul_insn (int unsignedp ATTRIBUTE_UNUSED
, rtx insn
)
5425 import_milli (mulI
);
5426 return output_millicode_call (insn
, gen_rtx_SYMBOL_REF (Pmode
, "$$mulI"));
5429 /* Emit the rtl for doing a division by a constant. */
5431 /* Do magic division millicodes exist for this value? */
5432 const int magic_milli
[]= {0, 0, 0, 1, 0, 1, 1, 1, 0, 1, 1, 0, 1, 0, 1, 1};
5434 /* We'll use an array to keep track of the magic millicodes and
5435 whether or not we've used them already. [n][0] is signed, [n][1] is
5438 static int div_milli
[16][2];
5441 emit_hpdiv_const (rtx
*operands
, int unsignedp
)
5443 if (GET_CODE (operands
[2]) == CONST_INT
5444 && INTVAL (operands
[2]) > 0
5445 && INTVAL (operands
[2]) < 16
5446 && magic_milli
[INTVAL (operands
[2])])
5448 rtx ret
= gen_rtx_REG (SImode
, TARGET_64BIT
? 2 : 31);
5450 emit_move_insn (gen_rtx_REG (SImode
, 26), operands
[1]);
5454 gen_rtvec (6, gen_rtx_SET (VOIDmode
, gen_rtx_REG (SImode
, 29),
5455 gen_rtx_fmt_ee (unsignedp
? UDIV
: DIV
,
5457 gen_rtx_REG (SImode
, 26),
5459 gen_rtx_CLOBBER (VOIDmode
, operands
[4]),
5460 gen_rtx_CLOBBER (VOIDmode
, operands
[3]),
5461 gen_rtx_CLOBBER (VOIDmode
, gen_rtx_REG (SImode
, 26)),
5462 gen_rtx_CLOBBER (VOIDmode
, gen_rtx_REG (SImode
, 25)),
5463 gen_rtx_CLOBBER (VOIDmode
, ret
))));
5464 emit_move_insn (operands
[0], gen_rtx_REG (SImode
, 29));
5471 output_div_insn (rtx
*operands
, int unsignedp
, rtx insn
)
5473 HOST_WIDE_INT divisor
;
5475 /* If the divisor is a constant, try to use one of the special
5477 if (GET_CODE (operands
[0]) == CONST_INT
)
5479 static char buf
[100];
5480 divisor
= INTVAL (operands
[0]);
5481 if (!div_milli
[divisor
][unsignedp
])
5483 div_milli
[divisor
][unsignedp
] = 1;
5485 output_asm_insn (".IMPORT $$divU_%0,MILLICODE", operands
);
5487 output_asm_insn (".IMPORT $$divI_%0,MILLICODE", operands
);
5491 sprintf (buf
, "$$divU_" HOST_WIDE_INT_PRINT_DEC
,
5492 INTVAL (operands
[0]));
5493 return output_millicode_call (insn
,
5494 gen_rtx_SYMBOL_REF (SImode
, buf
));
5498 sprintf (buf
, "$$divI_" HOST_WIDE_INT_PRINT_DEC
,
5499 INTVAL (operands
[0]));
5500 return output_millicode_call (insn
,
5501 gen_rtx_SYMBOL_REF (SImode
, buf
));
5504 /* Divisor isn't a special constant. */
5509 import_milli (divU
);
5510 return output_millicode_call (insn
,
5511 gen_rtx_SYMBOL_REF (SImode
, "$$divU"));
5515 import_milli (divI
);
5516 return output_millicode_call (insn
,
5517 gen_rtx_SYMBOL_REF (SImode
, "$$divI"));
5522 /* Output a $$rem millicode to do mod. */
5525 output_mod_insn (int unsignedp
, rtx insn
)
5529 import_milli (remU
);
5530 return output_millicode_call (insn
,
5531 gen_rtx_SYMBOL_REF (SImode
, "$$remU"));
5535 import_milli (remI
);
5536 return output_millicode_call (insn
,
5537 gen_rtx_SYMBOL_REF (SImode
, "$$remI"));
5542 output_arg_descriptor (rtx call_insn
)
5544 const char *arg_regs
[4];
5545 enum machine_mode arg_mode
;
5547 int i
, output_flag
= 0;
5550 /* We neither need nor want argument location descriptors for the
5551 64bit runtime environment or the ELF32 environment. */
5552 if (TARGET_64BIT
|| TARGET_ELF32
)
5555 for (i
= 0; i
< 4; i
++)
5558 /* Specify explicitly that no argument relocations should take place
5559 if using the portable runtime calling conventions. */
5560 if (TARGET_PORTABLE_RUNTIME
)
5562 fputs ("\t.CALL ARGW0=NO,ARGW1=NO,ARGW2=NO,ARGW3=NO,RETVAL=NO\n",
5567 gcc_assert (GET_CODE (call_insn
) == CALL_INSN
);
5568 for (link
= CALL_INSN_FUNCTION_USAGE (call_insn
);
5569 link
; link
= XEXP (link
, 1))
5571 rtx use
= XEXP (link
, 0);
5573 if (! (GET_CODE (use
) == USE
5574 && GET_CODE (XEXP (use
, 0)) == REG
5575 && FUNCTION_ARG_REGNO_P (REGNO (XEXP (use
, 0)))))
5578 arg_mode
= GET_MODE (XEXP (use
, 0));
5579 regno
= REGNO (XEXP (use
, 0));
5580 if (regno
>= 23 && regno
<= 26)
5582 arg_regs
[26 - regno
] = "GR";
5583 if (arg_mode
== DImode
)
5584 arg_regs
[25 - regno
] = "GR";
5586 else if (regno
>= 32 && regno
<= 39)
5588 if (arg_mode
== SFmode
)
5589 arg_regs
[(regno
- 32) / 2] = "FR";
5592 #ifndef HP_FP_ARG_DESCRIPTOR_REVERSED
5593 arg_regs
[(regno
- 34) / 2] = "FR";
5594 arg_regs
[(regno
- 34) / 2 + 1] = "FU";
5596 arg_regs
[(regno
- 34) / 2] = "FU";
5597 arg_regs
[(regno
- 34) / 2 + 1] = "FR";
5602 fputs ("\t.CALL ", asm_out_file
);
5603 for (i
= 0; i
< 4; i
++)
5608 fputc (',', asm_out_file
);
5609 fprintf (asm_out_file
, "ARGW%d=%s", i
, arg_regs
[i
]);
5612 fputc ('\n', asm_out_file
);
5615 /* Return the class of any secondary reload register that is needed to
5616 move IN into a register in class CLASS using mode MODE.
5618 Profiling has showed this routine and its descendants account for
5619 a significant amount of compile time (~7%). So it has been
5620 optimized to reduce redundant computations and eliminate useless
5623 It might be worthwhile to try and make this a leaf function too. */
5626 secondary_reload_class (enum reg_class
class, enum machine_mode mode
, rtx in
)
5628 int regno
, is_symbolic
;
5630 /* Trying to load a constant into a FP register during PIC code
5631 generation will require %r1 as a scratch register. */
5633 && GET_MODE_CLASS (mode
) == MODE_INT
5634 && FP_REG_CLASS_P (class)
5635 && (GET_CODE (in
) == CONST_INT
|| GET_CODE (in
) == CONST_DOUBLE
))
5638 /* Profiling showed the PA port spends about 1.3% of its compilation
5639 time in true_regnum from calls inside secondary_reload_class. */
5641 if (GET_CODE (in
) == REG
)
5644 if (regno
>= FIRST_PSEUDO_REGISTER
)
5645 regno
= true_regnum (in
);
5647 else if (GET_CODE (in
) == SUBREG
)
5648 regno
= true_regnum (in
);
5652 /* If we have something like (mem (mem (...)), we can safely assume the
5653 inner MEM will end up in a general register after reloading, so there's
5654 no need for a secondary reload. */
5655 if (GET_CODE (in
) == MEM
5656 && GET_CODE (XEXP (in
, 0)) == MEM
)
5659 /* Handle out of range displacement for integer mode loads/stores of
5661 if (((regno
>= FIRST_PSEUDO_REGISTER
|| regno
== -1)
5662 && GET_MODE_CLASS (mode
) == MODE_INT
5663 && FP_REG_CLASS_P (class))
5664 || (class == SHIFT_REGS
&& (regno
<= 0 || regno
>= 32)))
5665 return GENERAL_REGS
;
5667 /* A SAR<->FP register copy requires a secondary register (GPR) as
5668 well as secondary memory. */
5669 if (regno
>= 0 && regno
< FIRST_PSEUDO_REGISTER
5670 && ((REGNO_REG_CLASS (regno
) == SHIFT_REGS
&& FP_REG_CLASS_P (class))
5671 || (class == SHIFT_REGS
&& FP_REG_CLASS_P (REGNO_REG_CLASS (regno
)))))
5672 return GENERAL_REGS
;
5674 if (GET_CODE (in
) == HIGH
)
5677 /* Profiling has showed GCC spends about 2.6% of its compilation
5678 time in symbolic_operand from calls inside secondary_reload_class.
5680 We use an inline copy and only compute its return value once to avoid
5682 switch (GET_CODE (in
))
5692 is_symbolic
= ((GET_CODE (XEXP (tmp
, 0)) == SYMBOL_REF
5693 || GET_CODE (XEXP (tmp
, 0)) == LABEL_REF
)
5694 && GET_CODE (XEXP (tmp
, 1)) == CONST_INT
);
5704 && read_only_operand (in
, VOIDmode
))
5707 if (class != R1_REGS
&& is_symbolic
)
5713 /* In the 32-bit runtime, arguments larger than eight bytes are passed
5714 by invisible reference. As a GCC extension, we also pass anything
5715 with a zero or variable size by reference.
5717 The 64-bit runtime does not describe passing any types by invisible
5718 reference. The internals of GCC can't currently handle passing
5719 empty structures, and zero or variable length arrays when they are
5720 not passed entirely on the stack or by reference. Thus, as a GCC
5721 extension, we pass these types by reference. The HP compiler doesn't
5722 support these types, so hopefully there shouldn't be any compatibility
5723 issues. This may have to be revisited when HP releases a C99 compiler
5724 or updates the ABI. */
5727 pa_pass_by_reference (CUMULATIVE_ARGS
*ca ATTRIBUTE_UNUSED
,
5728 enum machine_mode mode
, tree type
,
5729 bool named ATTRIBUTE_UNUSED
)
5734 size
= int_size_in_bytes (type
);
5736 size
= GET_MODE_SIZE (mode
);
5741 return size
<= 0 || size
> 8;
5745 function_arg_padding (enum machine_mode mode
, tree type
)
5748 || (TARGET_64BIT
&& type
&& AGGREGATE_TYPE_P (type
)))
5750 /* Return none if justification is not required. */
5752 && TREE_CODE (TYPE_SIZE (type
)) == INTEGER_CST
5753 && (int_size_in_bytes (type
) * BITS_PER_UNIT
) % PARM_BOUNDARY
== 0)
5756 /* The directions set here are ignored when a BLKmode argument larger
5757 than a word is placed in a register. Different code is used for
5758 the stack and registers. This makes it difficult to have a
5759 consistent data representation for both the stack and registers.
5760 For both runtimes, the justification and padding for arguments on
5761 the stack and in registers should be identical. */
5763 /* The 64-bit runtime specifies left justification for aggregates. */
5766 /* The 32-bit runtime architecture specifies right justification.
5767 When the argument is passed on the stack, the argument is padded
5768 with garbage on the left. The HP compiler pads with zeros. */
5772 if (GET_MODE_BITSIZE (mode
) < PARM_BOUNDARY
)
5779 /* Do what is necessary for `va_start'. We look at the current function
5780 to determine if stdargs or varargs is used and fill in an initial
5781 va_list. A pointer to this constructor is returned. */
5784 hppa_builtin_saveregs (void)
5787 tree fntype
= TREE_TYPE (current_function_decl
);
5788 int argadj
= ((!(TYPE_ARG_TYPES (fntype
) != 0
5789 && (TREE_VALUE (tree_last (TYPE_ARG_TYPES (fntype
)))
5790 != void_type_node
)))
5791 ? UNITS_PER_WORD
: 0);
5794 offset
= plus_constant (current_function_arg_offset_rtx
, argadj
);
5796 offset
= current_function_arg_offset_rtx
;
5802 /* Adjust for varargs/stdarg differences. */
5804 offset
= plus_constant (current_function_arg_offset_rtx
, -argadj
);
5806 offset
= current_function_arg_offset_rtx
;
5808 /* We need to save %r26 .. %r19 inclusive starting at offset -64
5809 from the incoming arg pointer and growing to larger addresses. */
5810 for (i
= 26, off
= -64; i
>= 19; i
--, off
+= 8)
5811 emit_move_insn (gen_rtx_MEM (word_mode
,
5812 plus_constant (arg_pointer_rtx
, off
)),
5813 gen_rtx_REG (word_mode
, i
));
5815 /* The incoming args pointer points just beyond the flushback area;
5816 normally this is not a serious concern. However, when we are doing
5817 varargs/stdargs we want to make the arg pointer point to the start
5818 of the incoming argument area. */
5819 emit_move_insn (virtual_incoming_args_rtx
,
5820 plus_constant (arg_pointer_rtx
, -64));
5822 /* Now return a pointer to the first anonymous argument. */
5823 return copy_to_reg (expand_binop (Pmode
, add_optab
,
5824 virtual_incoming_args_rtx
,
5825 offset
, 0, 0, OPTAB_LIB_WIDEN
));
5828 /* Store general registers on the stack. */
5829 dest
= gen_rtx_MEM (BLKmode
,
5830 plus_constant (current_function_internal_arg_pointer
,
5832 set_mem_alias_set (dest
, get_varargs_alias_set ());
5833 set_mem_align (dest
, BITS_PER_WORD
);
5834 move_block_from_reg (23, dest
, 4);
5836 /* move_block_from_reg will emit code to store the argument registers
5837 individually as scalar stores.
5839 However, other insns may later load from the same addresses for
5840 a structure load (passing a struct to a varargs routine).
5842 The alias code assumes that such aliasing can never happen, so we
5843 have to keep memory referencing insns from moving up beyond the
5844 last argument register store. So we emit a blockage insn here. */
5845 emit_insn (gen_blockage ());
5847 return copy_to_reg (expand_binop (Pmode
, add_optab
,
5848 current_function_internal_arg_pointer
,
5849 offset
, 0, 0, OPTAB_LIB_WIDEN
));
5853 hppa_va_start (tree valist
, rtx nextarg
)
5855 nextarg
= expand_builtin_saveregs ();
5856 std_expand_builtin_va_start (valist
, nextarg
);
5860 hppa_gimplify_va_arg_expr (tree valist
, tree type
, tree
*pre_p
, tree
*post_p
)
5864 /* Args grow upward. We can use the generic routines. */
5865 return std_gimplify_va_arg_expr (valist
, type
, pre_p
, post_p
);
5867 else /* !TARGET_64BIT */
5869 tree ptr
= build_pointer_type (type
);
5872 unsigned int size
, ofs
;
5875 indirect
= pass_by_reference (NULL
, TYPE_MODE (type
), type
, 0);
5879 ptr
= build_pointer_type (type
);
5881 size
= int_size_in_bytes (type
);
5882 valist_type
= TREE_TYPE (valist
);
5884 /* Args grow down. Not handled by generic routines. */
5886 u
= fold_convert (valist_type
, size_in_bytes (type
));
5887 t
= build (MINUS_EXPR
, valist_type
, valist
, u
);
5889 /* Copied from va-pa.h, but we probably don't need to align to
5890 word size, since we generate and preserve that invariant. */
5891 u
= build_int_cst (valist_type
, (size
> 4 ? -8 : -4));
5892 t
= build (BIT_AND_EXPR
, valist_type
, t
, u
);
5894 t
= build (MODIFY_EXPR
, valist_type
, valist
, t
);
5896 ofs
= (8 - size
) % 4;
5899 u
= fold_convert (valist_type
, size_int (ofs
));
5900 t
= build (PLUS_EXPR
, valist_type
, t
, u
);
5903 t
= fold_convert (ptr
, t
);
5904 t
= build_va_arg_indirect_ref (t
);
5907 t
= build_va_arg_indirect_ref (t
);
5913 /* True if MODE is valid for the target. By "valid", we mean able to
5914 be manipulated in non-trivial ways. In particular, this means all
5915 the arithmetic is supported.
5917 Currently, TImode is not valid as the HP 64-bit runtime documentation
5918 doesn't document the alignment and calling conventions for this type.
5919 Thus, we return false when PRECISION is 2 * BITS_PER_WORD and
5920 2 * BITS_PER_WORD isn't equal LONG_LONG_TYPE_SIZE. */
5923 pa_scalar_mode_supported_p (enum machine_mode mode
)
5925 int precision
= GET_MODE_PRECISION (mode
);
5927 switch (GET_MODE_CLASS (mode
))
5929 case MODE_PARTIAL_INT
:
5931 if (precision
== CHAR_TYPE_SIZE
)
5933 if (precision
== SHORT_TYPE_SIZE
)
5935 if (precision
== INT_TYPE_SIZE
)
5937 if (precision
== LONG_TYPE_SIZE
)
5939 if (precision
== LONG_LONG_TYPE_SIZE
)
5944 if (precision
== FLOAT_TYPE_SIZE
)
5946 if (precision
== DOUBLE_TYPE_SIZE
)
5948 if (precision
== LONG_DOUBLE_TYPE_SIZE
)
5957 /* This routine handles all the normal conditional branch sequences we
5958 might need to generate. It handles compare immediate vs compare
5959 register, nullification of delay slots, varying length branches,
5960 negated branches, and all combinations of the above. It returns the
5961 output appropriate to emit the branch corresponding to all given
5965 output_cbranch (rtx
*operands
, int nullify
, int length
, int negated
, rtx insn
)
5967 static char buf
[100];
5971 /* A conditional branch to the following instruction (e.g. the delay slot)
5972 is asking for a disaster. This can happen when not optimizing and
5973 when jump optimization fails.
5975 While it is usually safe to emit nothing, this can fail if the
5976 preceding instruction is a nullified branch with an empty delay
5977 slot and the same branch target as this branch. We could check
5978 for this but jump optimization should eliminate nop jumps. It
5979 is always safe to emit a nop. */
5980 if (next_real_insn (JUMP_LABEL (insn
)) == next_real_insn (insn
))
5983 /* The doubleword form of the cmpib instruction doesn't have the LEU
5984 and GTU conditions while the cmpb instruction does. Since we accept
5985 zero for cmpb, we must ensure that we use cmpb for the comparison. */
5986 if (GET_MODE (operands
[1]) == DImode
&& operands
[2] == const0_rtx
)
5987 operands
[2] = gen_rtx_REG (DImode
, 0);
5988 if (GET_MODE (operands
[2]) == DImode
&& operands
[1] == const0_rtx
)
5989 operands
[1] = gen_rtx_REG (DImode
, 0);
5991 /* If this is a long branch with its delay slot unfilled, set `nullify'
5992 as it can nullify the delay slot and save a nop. */
5993 if (length
== 8 && dbr_sequence_length () == 0)
5996 /* If this is a short forward conditional branch which did not get
5997 its delay slot filled, the delay slot can still be nullified. */
5998 if (! nullify
&& length
== 4 && dbr_sequence_length () == 0)
5999 nullify
= forward_branch_p (insn
);
6001 /* A forward branch over a single nullified insn can be done with a
6002 comclr instruction. This avoids a single cycle penalty due to
6003 mis-predicted branch if we fall through (branch not taken). */
6005 && next_real_insn (insn
) != 0
6006 && get_attr_length (next_real_insn (insn
)) == 4
6007 && JUMP_LABEL (insn
) == next_nonnote_insn (next_real_insn (insn
))
6013 /* All short conditional branches except backwards with an unfilled
6017 strcpy (buf
, "{com%I2clr,|cmp%I2clr,}");
6019 strcpy (buf
, "{com%I2b,|cmp%I2b,}");
6020 if (GET_MODE (operands
[1]) == DImode
)
6023 strcat (buf
, "%B3");
6025 strcat (buf
, "%S3");
6027 strcat (buf
, " %2,%r1,%%r0");
6029 strcat (buf
, ",n %2,%r1,%0");
6031 strcat (buf
, " %2,%r1,%0");
6034 /* All long conditionals. Note a short backward branch with an
6035 unfilled delay slot is treated just like a long backward branch
6036 with an unfilled delay slot. */
6038 /* Handle weird backwards branch with a filled delay slot
6039 with is nullified. */
6040 if (dbr_sequence_length () != 0
6041 && ! forward_branch_p (insn
)
6044 strcpy (buf
, "{com%I2b,|cmp%I2b,}");
6045 if (GET_MODE (operands
[1]) == DImode
)
6048 strcat (buf
, "%S3");
6050 strcat (buf
, "%B3");
6051 strcat (buf
, ",n %2,%r1,.+12\n\tb %0");
6053 /* Handle short backwards branch with an unfilled delay slot.
6054 Using a comb;nop rather than comiclr;bl saves 1 cycle for both
6055 taken and untaken branches. */
6056 else if (dbr_sequence_length () == 0
6057 && ! forward_branch_p (insn
)
6058 && INSN_ADDRESSES_SET_P ()
6059 && VAL_14_BITS_P (INSN_ADDRESSES (INSN_UID (JUMP_LABEL (insn
)))
6060 - INSN_ADDRESSES (INSN_UID (insn
)) - 8))
6062 strcpy (buf
, "{com%I2b,|cmp%I2b,}");
6063 if (GET_MODE (operands
[1]) == DImode
)
6066 strcat (buf
, "%B3 %2,%r1,%0%#");
6068 strcat (buf
, "%S3 %2,%r1,%0%#");
6072 strcpy (buf
, "{com%I2clr,|cmp%I2clr,}");
6073 if (GET_MODE (operands
[1]) == DImode
)
6076 strcat (buf
, "%S3");
6078 strcat (buf
, "%B3");
6080 strcat (buf
, " %2,%r1,%%r0\n\tb,n %0");
6082 strcat (buf
, " %2,%r1,%%r0\n\tb %0");
6088 xoperands
[0] = operands
[0];
6089 xoperands
[1] = operands
[1];
6090 xoperands
[2] = operands
[2];
6091 xoperands
[3] = operands
[3];
6093 /* The reversed conditional branch must branch over one additional
6094 instruction if the delay slot is filled. If the delay slot
6095 is empty, the instruction after the reversed condition branch
6096 must be nullified. */
6097 nullify
= dbr_sequence_length () == 0;
6098 xoperands
[4] = nullify
? GEN_INT (length
) : GEN_INT (length
+ 4);
6100 /* Create a reversed conditional branch which branches around
6101 the following insns. */
6102 if (GET_MODE (operands
[1]) != DImode
)
6108 "{com%I2b,%S3,n %2,%r1,.+%4|cmp%I2b,%S3,n %2,%r1,.+%4}");
6111 "{com%I2b,%B3,n %2,%r1,.+%4|cmp%I2b,%B3,n %2,%r1,.+%4}");
6117 "{com%I2b,%S3 %2,%r1,.+%4|cmp%I2b,%S3 %2,%r1,.+%4}");
6120 "{com%I2b,%B3 %2,%r1,.+%4|cmp%I2b,%B3 %2,%r1,.+%4}");
6129 "{com%I2b,*%S3,n %2,%r1,.+%4|cmp%I2b,*%S3,n %2,%r1,.+%4}");
6132 "{com%I2b,*%B3,n %2,%r1,.+%4|cmp%I2b,*%B3,n %2,%r1,.+%4}");
6138 "{com%I2b,*%S3 %2,%r1,.+%4|cmp%I2b,*%S3 %2,%r1,.+%4}");
6141 "{com%I2b,*%B3 %2,%r1,.+%4|cmp%I2b,*%B3 %2,%r1,.+%4}");
6145 output_asm_insn (buf
, xoperands
);
6146 return output_lbranch (operands
[0], insn
);
6154 /* This routine handles long unconditional branches that exceed the
6155 maximum range of a simple branch instruction. */
6158 output_lbranch (rtx dest
, rtx insn
)
6162 xoperands
[0] = dest
;
6164 /* First, free up the delay slot. */
6165 if (dbr_sequence_length () != 0)
6167 /* We can't handle a jump in the delay slot. */
6168 gcc_assert (GET_CODE (NEXT_INSN (insn
)) != JUMP_INSN
);
6170 final_scan_insn (NEXT_INSN (insn
), asm_out_file
,
6173 /* Now delete the delay insn. */
6174 PUT_CODE (NEXT_INSN (insn
), NOTE
);
6175 NOTE_LINE_NUMBER (NEXT_INSN (insn
)) = NOTE_INSN_DELETED
;
6176 NOTE_SOURCE_FILE (NEXT_INSN (insn
)) = 0;
6179 /* Output an insn to save %r1. The runtime documentation doesn't
6180 specify whether the "Clean Up" slot in the callers frame can
6181 be clobbered by the callee. It isn't copied by HP's builtin
6182 alloca, so this suggests that it can be clobbered if necessary.
6183 The "Static Link" location is copied by HP builtin alloca, so
6184 we avoid using it. Using the cleanup slot might be a problem
6185 if we have to interoperate with languages that pass cleanup
6186 information. However, it should be possible to handle these
6187 situations with GCC's asm feature.
6189 The "Current RP" slot is reserved for the called procedure, so
6190 we try to use it when we don't have a frame of our own. It's
6191 rather unlikely that we won't have a frame when we need to emit
6194 Really the way to go long term is a register scavenger; goto
6195 the target of the jump and find a register which we can use
6196 as a scratch to hold the value in %r1. Then, we wouldn't have
6197 to free up the delay slot or clobber a slot that may be needed
6198 for other purposes. */
6201 if (actual_fsize
== 0 && !regs_ever_live
[2])
6202 /* Use the return pointer slot in the frame marker. */
6203 output_asm_insn ("std %%r1,-16(%%r30)", xoperands
);
6205 /* Use the slot at -40 in the frame marker since HP builtin
6206 alloca doesn't copy it. */
6207 output_asm_insn ("std %%r1,-40(%%r30)", xoperands
);
6211 if (actual_fsize
== 0 && !regs_ever_live
[2])
6212 /* Use the return pointer slot in the frame marker. */
6213 output_asm_insn ("stw %%r1,-20(%%r30)", xoperands
);
6215 /* Use the "Clean Up" slot in the frame marker. In GCC,
6216 the only other use of this location is for copying a
6217 floating point double argument from a floating-point
6218 register to two general registers. The copy is done
6219 as an "atomic" operation when outputting a call, so it
6220 won't interfere with our using the location here. */
6221 output_asm_insn ("stw %%r1,-12(%%r30)", xoperands
);
6224 if (TARGET_PORTABLE_RUNTIME
)
6226 output_asm_insn ("ldil L'%0,%%r1", xoperands
);
6227 output_asm_insn ("ldo R'%0(%%r1),%%r1", xoperands
);
6228 output_asm_insn ("bv %%r0(%%r1)", xoperands
);
6232 output_asm_insn ("{bl|b,l} .+8,%%r1", xoperands
);
6233 if (TARGET_SOM
|| !TARGET_GAS
)
6235 xoperands
[1] = gen_label_rtx ();
6236 output_asm_insn ("addil L'%l0-%l1,%%r1", xoperands
);
6237 (*targetm
.asm_out
.internal_label
) (asm_out_file
, "L",
6238 CODE_LABEL_NUMBER (xoperands
[1]));
6239 output_asm_insn ("ldo R'%l0-%l1(%%r1),%%r1", xoperands
);
6243 output_asm_insn ("addil L'%l0-$PIC_pcrel$0+4,%%r1", xoperands
);
6244 output_asm_insn ("ldo R'%l0-$PIC_pcrel$0+8(%%r1),%%r1", xoperands
);
6246 output_asm_insn ("bv %%r0(%%r1)", xoperands
);
6249 /* Now output a very long branch to the original target. */
6250 output_asm_insn ("ldil L'%l0,%%r1\n\tbe R'%l0(%%sr4,%%r1)", xoperands
);
6252 /* Now restore the value of %r1 in the delay slot. */
6255 if (actual_fsize
== 0 && !regs_ever_live
[2])
6256 return "ldd -16(%%r30),%%r1";
6258 return "ldd -40(%%r30),%%r1";
6262 if (actual_fsize
== 0 && !regs_ever_live
[2])
6263 return "ldw -20(%%r30),%%r1";
6265 return "ldw -12(%%r30),%%r1";
6269 /* This routine handles all the branch-on-bit conditional branch sequences we
6270 might need to generate. It handles nullification of delay slots,
6271 varying length branches, negated branches and all combinations of the
6272 above. it returns the appropriate output template to emit the branch. */
6275 output_bb (rtx
*operands ATTRIBUTE_UNUSED
, int nullify
, int length
,
6276 int negated
, rtx insn
, int which
)
6278 static char buf
[100];
6281 /* A conditional branch to the following instruction (e.g. the delay slot) is
6282 asking for a disaster. I do not think this can happen as this pattern
6283 is only used when optimizing; jump optimization should eliminate the
6284 jump. But be prepared just in case. */
6286 if (next_real_insn (JUMP_LABEL (insn
)) == next_real_insn (insn
))
6289 /* If this is a long branch with its delay slot unfilled, set `nullify'
6290 as it can nullify the delay slot and save a nop. */
6291 if (length
== 8 && dbr_sequence_length () == 0)
6294 /* If this is a short forward conditional branch which did not get
6295 its delay slot filled, the delay slot can still be nullified. */
6296 if (! nullify
&& length
== 4 && dbr_sequence_length () == 0)
6297 nullify
= forward_branch_p (insn
);
6299 /* A forward branch over a single nullified insn can be done with a
6300 extrs instruction. This avoids a single cycle penalty due to
6301 mis-predicted branch if we fall through (branch not taken). */
6304 && next_real_insn (insn
) != 0
6305 && get_attr_length (next_real_insn (insn
)) == 4
6306 && JUMP_LABEL (insn
) == next_nonnote_insn (next_real_insn (insn
))
6313 /* All short conditional branches except backwards with an unfilled
6317 strcpy (buf
, "{extrs,|extrw,s,}");
6319 strcpy (buf
, "bb,");
6320 if (useskip
&& GET_MODE (operands
[0]) == DImode
)
6321 strcpy (buf
, "extrd,s,*");
6322 else if (GET_MODE (operands
[0]) == DImode
)
6323 strcpy (buf
, "bb,*");
6324 if ((which
== 0 && negated
)
6325 || (which
== 1 && ! negated
))
6330 strcat (buf
, " %0,%1,1,%%r0");
6331 else if (nullify
&& negated
)
6332 strcat (buf
, ",n %0,%1,%3");
6333 else if (nullify
&& ! negated
)
6334 strcat (buf
, ",n %0,%1,%2");
6335 else if (! nullify
&& negated
)
6336 strcat (buf
, "%0,%1,%3");
6337 else if (! nullify
&& ! negated
)
6338 strcat (buf
, " %0,%1,%2");
6341 /* All long conditionals. Note a short backward branch with an
6342 unfilled delay slot is treated just like a long backward branch
6343 with an unfilled delay slot. */
6345 /* Handle weird backwards branch with a filled delay slot
6346 with is nullified. */
6347 if (dbr_sequence_length () != 0
6348 && ! forward_branch_p (insn
)
6351 strcpy (buf
, "bb,");
6352 if (GET_MODE (operands
[0]) == DImode
)
6354 if ((which
== 0 && negated
)
6355 || (which
== 1 && ! negated
))
6360 strcat (buf
, ",n %0,%1,.+12\n\tb %3");
6362 strcat (buf
, ",n %0,%1,.+12\n\tb %2");
6364 /* Handle short backwards branch with an unfilled delay slot.
6365 Using a bb;nop rather than extrs;bl saves 1 cycle for both
6366 taken and untaken branches. */
6367 else if (dbr_sequence_length () == 0
6368 && ! forward_branch_p (insn
)
6369 && INSN_ADDRESSES_SET_P ()
6370 && VAL_14_BITS_P (INSN_ADDRESSES (INSN_UID (JUMP_LABEL (insn
)))
6371 - INSN_ADDRESSES (INSN_UID (insn
)) - 8))
6373 strcpy (buf
, "bb,");
6374 if (GET_MODE (operands
[0]) == DImode
)
6376 if ((which
== 0 && negated
)
6377 || (which
== 1 && ! negated
))
6382 strcat (buf
, " %0,%1,%3%#");
6384 strcat (buf
, " %0,%1,%2%#");
6388 strcpy (buf
, "{extrs,|extrw,s,}");
6389 if (GET_MODE (operands
[0]) == DImode
)
6390 strcpy (buf
, "extrd,s,*");
6391 if ((which
== 0 && negated
)
6392 || (which
== 1 && ! negated
))
6396 if (nullify
&& negated
)
6397 strcat (buf
, " %0,%1,1,%%r0\n\tb,n %3");
6398 else if (nullify
&& ! negated
)
6399 strcat (buf
, " %0,%1,1,%%r0\n\tb,n %2");
6401 strcat (buf
, " %0,%1,1,%%r0\n\tb %3");
6403 strcat (buf
, " %0,%1,1,%%r0\n\tb %2");
6413 /* This routine handles all the branch-on-variable-bit conditional branch
6414 sequences we might need to generate. It handles nullification of delay
6415 slots, varying length branches, negated branches and all combinations
6416 of the above. it returns the appropriate output template to emit the
6420 output_bvb (rtx
*operands ATTRIBUTE_UNUSED
, int nullify
, int length
,
6421 int negated
, rtx insn
, int which
)
6423 static char buf
[100];
6426 /* A conditional branch to the following instruction (e.g. the delay slot) is
6427 asking for a disaster. I do not think this can happen as this pattern
6428 is only used when optimizing; jump optimization should eliminate the
6429 jump. But be prepared just in case. */
6431 if (next_real_insn (JUMP_LABEL (insn
)) == next_real_insn (insn
))
6434 /* If this is a long branch with its delay slot unfilled, set `nullify'
6435 as it can nullify the delay slot and save a nop. */
6436 if (length
== 8 && dbr_sequence_length () == 0)
6439 /* If this is a short forward conditional branch which did not get
6440 its delay slot filled, the delay slot can still be nullified. */
6441 if (! nullify
&& length
== 4 && dbr_sequence_length () == 0)
6442 nullify
= forward_branch_p (insn
);
6444 /* A forward branch over a single nullified insn can be done with a
6445 extrs instruction. This avoids a single cycle penalty due to
6446 mis-predicted branch if we fall through (branch not taken). */
6449 && next_real_insn (insn
) != 0
6450 && get_attr_length (next_real_insn (insn
)) == 4
6451 && JUMP_LABEL (insn
) == next_nonnote_insn (next_real_insn (insn
))
6458 /* All short conditional branches except backwards with an unfilled
6462 strcpy (buf
, "{vextrs,|extrw,s,}");
6464 strcpy (buf
, "{bvb,|bb,}");
6465 if (useskip
&& GET_MODE (operands
[0]) == DImode
)
6466 strcpy (buf
, "extrd,s,*");
6467 else if (GET_MODE (operands
[0]) == DImode
)
6468 strcpy (buf
, "bb,*");
6469 if ((which
== 0 && negated
)
6470 || (which
== 1 && ! negated
))
6475 strcat (buf
, "{ %0,1,%%r0| %0,%%sar,1,%%r0}");
6476 else if (nullify
&& negated
)
6477 strcat (buf
, "{,n %0,%3|,n %0,%%sar,%3}");
6478 else if (nullify
&& ! negated
)
6479 strcat (buf
, "{,n %0,%2|,n %0,%%sar,%2}");
6480 else if (! nullify
&& negated
)
6481 strcat (buf
, "{%0,%3|%0,%%sar,%3}");
6482 else if (! nullify
&& ! negated
)
6483 strcat (buf
, "{ %0,%2| %0,%%sar,%2}");
6486 /* All long conditionals. Note a short backward branch with an
6487 unfilled delay slot is treated just like a long backward branch
6488 with an unfilled delay slot. */
6490 /* Handle weird backwards branch with a filled delay slot
6491 with is nullified. */
6492 if (dbr_sequence_length () != 0
6493 && ! forward_branch_p (insn
)
6496 strcpy (buf
, "{bvb,|bb,}");
6497 if (GET_MODE (operands
[0]) == DImode
)
6499 if ((which
== 0 && negated
)
6500 || (which
== 1 && ! negated
))
6505 strcat (buf
, "{,n %0,.+12\n\tb %3|,n %0,%%sar,.+12\n\tb %3}");
6507 strcat (buf
, "{,n %0,.+12\n\tb %2|,n %0,%%sar,.+12\n\tb %2}");
6509 /* Handle short backwards branch with an unfilled delay slot.
6510 Using a bb;nop rather than extrs;bl saves 1 cycle for both
6511 taken and untaken branches. */
6512 else if (dbr_sequence_length () == 0
6513 && ! forward_branch_p (insn
)
6514 && INSN_ADDRESSES_SET_P ()
6515 && VAL_14_BITS_P (INSN_ADDRESSES (INSN_UID (JUMP_LABEL (insn
)))
6516 - INSN_ADDRESSES (INSN_UID (insn
)) - 8))
6518 strcpy (buf
, "{bvb,|bb,}");
6519 if (GET_MODE (operands
[0]) == DImode
)
6521 if ((which
== 0 && negated
)
6522 || (which
== 1 && ! negated
))
6527 strcat (buf
, "{ %0,%3%#| %0,%%sar,%3%#}");
6529 strcat (buf
, "{ %0,%2%#| %0,%%sar,%2%#}");
6533 strcpy (buf
, "{vextrs,|extrw,s,}");
6534 if (GET_MODE (operands
[0]) == DImode
)
6535 strcpy (buf
, "extrd,s,*");
6536 if ((which
== 0 && negated
)
6537 || (which
== 1 && ! negated
))
6541 if (nullify
&& negated
)
6542 strcat (buf
, "{ %0,1,%%r0\n\tb,n %3| %0,%%sar,1,%%r0\n\tb,n %3}");
6543 else if (nullify
&& ! negated
)
6544 strcat (buf
, "{ %0,1,%%r0\n\tb,n %2| %0,%%sar,1,%%r0\n\tb,n %2}");
6546 strcat (buf
, "{ %0,1,%%r0\n\tb %3| %0,%%sar,1,%%r0\n\tb %3}");
6548 strcat (buf
, "{ %0,1,%%r0\n\tb %2| %0,%%sar,1,%%r0\n\tb %2}");
6558 /* Return the output template for emitting a dbra type insn.
6560 Note it may perform some output operations on its own before
6561 returning the final output string. */
6563 output_dbra (rtx
*operands
, rtx insn
, int which_alternative
)
6566 /* A conditional branch to the following instruction (e.g. the delay slot) is
6567 asking for a disaster. Be prepared! */
6569 if (next_real_insn (JUMP_LABEL (insn
)) == next_real_insn (insn
))
6571 if (which_alternative
== 0)
6572 return "ldo %1(%0),%0";
6573 else if (which_alternative
== 1)
6575 output_asm_insn ("{fstws|fstw} %0,-16(%%r30)", operands
);
6576 output_asm_insn ("ldw -16(%%r30),%4", operands
);
6577 output_asm_insn ("ldo %1(%4),%4\n\tstw %4,-16(%%r30)", operands
);
6578 return "{fldws|fldw} -16(%%r30),%0";
6582 output_asm_insn ("ldw %0,%4", operands
);
6583 return "ldo %1(%4),%4\n\tstw %4,%0";
6587 if (which_alternative
== 0)
6589 int nullify
= INSN_ANNULLED_BRANCH_P (insn
);
6590 int length
= get_attr_length (insn
);
6592 /* If this is a long branch with its delay slot unfilled, set `nullify'
6593 as it can nullify the delay slot and save a nop. */
6594 if (length
== 8 && dbr_sequence_length () == 0)
6597 /* If this is a short forward conditional branch which did not get
6598 its delay slot filled, the delay slot can still be nullified. */
6599 if (! nullify
&& length
== 4 && dbr_sequence_length () == 0)
6600 nullify
= forward_branch_p (insn
);
6606 return "addib,%C2,n %1,%0,%3";
6608 return "addib,%C2 %1,%0,%3";
6611 /* Handle weird backwards branch with a fulled delay slot
6612 which is nullified. */
6613 if (dbr_sequence_length () != 0
6614 && ! forward_branch_p (insn
)
6616 return "addib,%N2,n %1,%0,.+12\n\tb %3";
6617 /* Handle short backwards branch with an unfilled delay slot.
6618 Using a addb;nop rather than addi;bl saves 1 cycle for both
6619 taken and untaken branches. */
6620 else if (dbr_sequence_length () == 0
6621 && ! forward_branch_p (insn
)
6622 && INSN_ADDRESSES_SET_P ()
6623 && VAL_14_BITS_P (INSN_ADDRESSES (INSN_UID (JUMP_LABEL (insn
)))
6624 - INSN_ADDRESSES (INSN_UID (insn
)) - 8))
6625 return "addib,%C2 %1,%0,%3%#";
6627 /* Handle normal cases. */
6629 return "addi,%N2 %1,%0,%0\n\tb,n %3";
6631 return "addi,%N2 %1,%0,%0\n\tb %3";
6638 /* Deal with gross reload from FP register case. */
6639 else if (which_alternative
== 1)
6641 /* Move loop counter from FP register to MEM then into a GR,
6642 increment the GR, store the GR into MEM, and finally reload
6643 the FP register from MEM from within the branch's delay slot. */
6644 output_asm_insn ("{fstws|fstw} %0,-16(%%r30)\n\tldw -16(%%r30),%4",
6646 output_asm_insn ("ldo %1(%4),%4\n\tstw %4,-16(%%r30)", operands
);
6647 if (get_attr_length (insn
) == 24)
6648 return "{comb|cmpb},%S2 %%r0,%4,%3\n\t{fldws|fldw} -16(%%r30),%0";
6650 return "{comclr|cmpclr},%B2 %%r0,%4,%%r0\n\tb %3\n\t{fldws|fldw} -16(%%r30),%0";
6652 /* Deal with gross reload from memory case. */
6655 /* Reload loop counter from memory, the store back to memory
6656 happens in the branch's delay slot. */
6657 output_asm_insn ("ldw %0,%4", operands
);
6658 if (get_attr_length (insn
) == 12)
6659 return "addib,%C2 %1,%4,%3\n\tstw %4,%0";
6661 return "addi,%N2 %1,%4,%4\n\tb %3\n\tstw %4,%0";
6665 /* Return the output template for emitting a dbra type insn.
6667 Note it may perform some output operations on its own before
6668 returning the final output string. */
6670 output_movb (rtx
*operands
, rtx insn
, int which_alternative
,
6671 int reverse_comparison
)
6674 /* A conditional branch to the following instruction (e.g. the delay slot) is
6675 asking for a disaster. Be prepared! */
6677 if (next_real_insn (JUMP_LABEL (insn
)) == next_real_insn (insn
))
6679 if (which_alternative
== 0)
6680 return "copy %1,%0";
6681 else if (which_alternative
== 1)
6683 output_asm_insn ("stw %1,-16(%%r30)", operands
);
6684 return "{fldws|fldw} -16(%%r30),%0";
6686 else if (which_alternative
== 2)
6692 /* Support the second variant. */
6693 if (reverse_comparison
)
6694 PUT_CODE (operands
[2], reverse_condition (GET_CODE (operands
[2])));
6696 if (which_alternative
== 0)
6698 int nullify
= INSN_ANNULLED_BRANCH_P (insn
);
6699 int length
= get_attr_length (insn
);
6701 /* If this is a long branch with its delay slot unfilled, set `nullify'
6702 as it can nullify the delay slot and save a nop. */
6703 if (length
== 8 && dbr_sequence_length () == 0)
6706 /* If this is a short forward conditional branch which did not get
6707 its delay slot filled, the delay slot can still be nullified. */
6708 if (! nullify
&& length
== 4 && dbr_sequence_length () == 0)
6709 nullify
= forward_branch_p (insn
);
6715 return "movb,%C2,n %1,%0,%3";
6717 return "movb,%C2 %1,%0,%3";
6720 /* Handle weird backwards branch with a filled delay slot
6721 which is nullified. */
6722 if (dbr_sequence_length () != 0
6723 && ! forward_branch_p (insn
)
6725 return "movb,%N2,n %1,%0,.+12\n\tb %3";
6727 /* Handle short backwards branch with an unfilled delay slot.
6728 Using a movb;nop rather than or;bl saves 1 cycle for both
6729 taken and untaken branches. */
6730 else if (dbr_sequence_length () == 0
6731 && ! forward_branch_p (insn
)
6732 && INSN_ADDRESSES_SET_P ()
6733 && VAL_14_BITS_P (INSN_ADDRESSES (INSN_UID (JUMP_LABEL (insn
)))
6734 - INSN_ADDRESSES (INSN_UID (insn
)) - 8))
6735 return "movb,%C2 %1,%0,%3%#";
6736 /* Handle normal cases. */
6738 return "or,%N2 %1,%%r0,%0\n\tb,n %3";
6740 return "or,%N2 %1,%%r0,%0\n\tb %3";
6746 /* Deal with gross reload from FP register case. */
6747 else if (which_alternative
== 1)
6749 /* Move loop counter from FP register to MEM then into a GR,
6750 increment the GR, store the GR into MEM, and finally reload
6751 the FP register from MEM from within the branch's delay slot. */
6752 output_asm_insn ("stw %1,-16(%%r30)", operands
);
6753 if (get_attr_length (insn
) == 12)
6754 return "{comb|cmpb},%S2 %%r0,%1,%3\n\t{fldws|fldw} -16(%%r30),%0";
6756 return "{comclr|cmpclr},%B2 %%r0,%1,%%r0\n\tb %3\n\t{fldws|fldw} -16(%%r30),%0";
6758 /* Deal with gross reload from memory case. */
6759 else if (which_alternative
== 2)
6761 /* Reload loop counter from memory, the store back to memory
6762 happens in the branch's delay slot. */
6763 if (get_attr_length (insn
) == 8)
6764 return "{comb|cmpb},%S2 %%r0,%1,%3\n\tstw %1,%0";
6766 return "{comclr|cmpclr},%B2 %%r0,%1,%%r0\n\tb %3\n\tstw %1,%0";
6768 /* Handle SAR as a destination. */
6771 if (get_attr_length (insn
) == 8)
6772 return "{comb|cmpb},%S2 %%r0,%1,%3\n\tmtsar %r1";
6774 return "{comclr|cmpclr},%B2 %%r0,%1,%%r0\n\tb %3\n\tmtsar %r1";
6778 /* Copy any FP arguments in INSN into integer registers. */
6780 copy_fp_args (rtx insn
)
6785 for (link
= CALL_INSN_FUNCTION_USAGE (insn
); link
; link
= XEXP (link
, 1))
6787 int arg_mode
, regno
;
6788 rtx use
= XEXP (link
, 0);
6790 if (! (GET_CODE (use
) == USE
6791 && GET_CODE (XEXP (use
, 0)) == REG
6792 && FUNCTION_ARG_REGNO_P (REGNO (XEXP (use
, 0)))))
6795 arg_mode
= GET_MODE (XEXP (use
, 0));
6796 regno
= REGNO (XEXP (use
, 0));
6798 /* Is it a floating point register? */
6799 if (regno
>= 32 && regno
<= 39)
6801 /* Copy the FP register into an integer register via memory. */
6802 if (arg_mode
== SFmode
)
6804 xoperands
[0] = XEXP (use
, 0);
6805 xoperands
[1] = gen_rtx_REG (SImode
, 26 - (regno
- 32) / 2);
6806 output_asm_insn ("{fstws|fstw} %0,-16(%%sr0,%%r30)", xoperands
);
6807 output_asm_insn ("ldw -16(%%sr0,%%r30),%1", xoperands
);
6811 xoperands
[0] = XEXP (use
, 0);
6812 xoperands
[1] = gen_rtx_REG (DImode
, 25 - (regno
- 34) / 2);
6813 output_asm_insn ("{fstds|fstd} %0,-16(%%sr0,%%r30)", xoperands
);
6814 output_asm_insn ("ldw -12(%%sr0,%%r30),%R1", xoperands
);
6815 output_asm_insn ("ldw -16(%%sr0,%%r30),%1", xoperands
);
6821 /* Compute length of the FP argument copy sequence for INSN. */
6823 length_fp_args (rtx insn
)
6828 for (link
= CALL_INSN_FUNCTION_USAGE (insn
); link
; link
= XEXP (link
, 1))
6830 int arg_mode
, regno
;
6831 rtx use
= XEXP (link
, 0);
6833 if (! (GET_CODE (use
) == USE
6834 && GET_CODE (XEXP (use
, 0)) == REG
6835 && FUNCTION_ARG_REGNO_P (REGNO (XEXP (use
, 0)))))
6838 arg_mode
= GET_MODE (XEXP (use
, 0));
6839 regno
= REGNO (XEXP (use
, 0));
6841 /* Is it a floating point register? */
6842 if (regno
>= 32 && regno
<= 39)
6844 if (arg_mode
== SFmode
)
6854 /* Return the attribute length for the millicode call instruction INSN.
6855 The length must match the code generated by output_millicode_call.
6856 We include the delay slot in the returned length as it is better to
6857 over estimate the length than to under estimate it. */
6860 attr_length_millicode_call (rtx insn
)
6862 unsigned long distance
= -1;
6863 unsigned long total
= IN_NAMED_SECTION_P (cfun
->decl
) ? 0 : total_code_bytes
;
6865 if (INSN_ADDRESSES_SET_P ())
6867 distance
= (total
+ insn_current_reference_address (insn
));
6868 if (distance
< total
)
6874 if (!TARGET_LONG_CALLS
&& distance
< 7600000)
6879 else if (TARGET_PORTABLE_RUNTIME
)
6883 if (!TARGET_LONG_CALLS
&& distance
< 240000)
6886 if (TARGET_LONG_ABS_CALL
&& !flag_pic
)
6893 /* INSN is a function call. It may have an unconditional jump
6896 CALL_DEST is the routine we are calling. */
6899 output_millicode_call (rtx insn
, rtx call_dest
)
6901 int attr_length
= get_attr_length (insn
);
6902 int seq_length
= dbr_sequence_length ();
6907 xoperands
[0] = call_dest
;
6908 xoperands
[2] = gen_rtx_REG (Pmode
, TARGET_64BIT
? 2 : 31);
6910 /* Handle the common case where we are sure that the branch will
6911 reach the beginning of the $CODE$ subspace. The within reach
6912 form of the $$sh_func_adrs call has a length of 28. Because
6913 it has an attribute type of multi, it never has a nonzero
6914 sequence length. The length of the $$sh_func_adrs is the same
6915 as certain out of reach PIC calls to other routines. */
6916 if (!TARGET_LONG_CALLS
6917 && ((seq_length
== 0
6918 && (attr_length
== 12
6919 || (attr_length
== 28 && get_attr_type (insn
) == TYPE_MULTI
)))
6920 || (seq_length
!= 0 && attr_length
== 8)))
6922 output_asm_insn ("{bl|b,l} %0,%2", xoperands
);
6928 /* It might seem that one insn could be saved by accessing
6929 the millicode function using the linkage table. However,
6930 this doesn't work in shared libraries and other dynamically
6931 loaded objects. Using a pc-relative sequence also avoids
6932 problems related to the implicit use of the gp register. */
6933 output_asm_insn ("b,l .+8,%%r1", xoperands
);
6937 output_asm_insn ("addil L'%0-$PIC_pcrel$0+4,%%r1", xoperands
);
6938 output_asm_insn ("ldo R'%0-$PIC_pcrel$0+8(%%r1),%%r1", xoperands
);
6942 xoperands
[1] = gen_label_rtx ();
6943 output_asm_insn ("addil L'%0-%l1,%%r1", xoperands
);
6944 (*targetm
.asm_out
.internal_label
) (asm_out_file
, "L",
6945 CODE_LABEL_NUMBER (xoperands
[1]));
6946 output_asm_insn ("ldo R'%0-%l1(%%r1),%%r1", xoperands
);
6949 output_asm_insn ("bve,l (%%r1),%%r2", xoperands
);
6951 else if (TARGET_PORTABLE_RUNTIME
)
6953 /* Pure portable runtime doesn't allow be/ble; we also don't
6954 have PIC support in the assembler/linker, so this sequence
6957 /* Get the address of our target into %r1. */
6958 output_asm_insn ("ldil L'%0,%%r1", xoperands
);
6959 output_asm_insn ("ldo R'%0(%%r1),%%r1", xoperands
);
6961 /* Get our return address into %r31. */
6962 output_asm_insn ("{bl|b,l} .+8,%%r31", xoperands
);
6963 output_asm_insn ("addi 8,%%r31,%%r31", xoperands
);
6965 /* Jump to our target address in %r1. */
6966 output_asm_insn ("bv %%r0(%%r1)", xoperands
);
6970 output_asm_insn ("ldil L'%0,%%r1", xoperands
);
6972 output_asm_insn ("be,l R'%0(%%sr4,%%r1),%%sr0,%%r31", xoperands
);
6974 output_asm_insn ("ble R'%0(%%sr4,%%r1)", xoperands
);
6978 output_asm_insn ("{bl|b,l} .+8,%%r1", xoperands
);
6979 output_asm_insn ("addi 16,%%r1,%%r31", xoperands
);
6981 if (TARGET_SOM
|| !TARGET_GAS
)
6983 /* The HP assembler can generate relocations for the
6984 difference of two symbols. GAS can do this for a
6985 millicode symbol but not an arbitrary external
6986 symbol when generating SOM output. */
6987 xoperands
[1] = gen_label_rtx ();
6988 (*targetm
.asm_out
.internal_label
) (asm_out_file
, "L",
6989 CODE_LABEL_NUMBER (xoperands
[1]));
6990 output_asm_insn ("addil L'%0-%l1,%%r1", xoperands
);
6991 output_asm_insn ("ldo R'%0-%l1(%%r1),%%r1", xoperands
);
6995 output_asm_insn ("addil L'%0-$PIC_pcrel$0+8,%%r1", xoperands
);
6996 output_asm_insn ("ldo R'%0-$PIC_pcrel$0+12(%%r1),%%r1",
7000 /* Jump to our target address in %r1. */
7001 output_asm_insn ("bv %%r0(%%r1)", xoperands
);
7005 if (seq_length
== 0)
7006 output_asm_insn ("nop", xoperands
);
7008 /* We are done if there isn't a jump in the delay slot. */
7009 if (seq_length
== 0 || GET_CODE (NEXT_INSN (insn
)) != JUMP_INSN
)
7012 /* This call has an unconditional jump in its delay slot. */
7013 xoperands
[0] = XEXP (PATTERN (NEXT_INSN (insn
)), 1);
7015 /* See if the return address can be adjusted. Use the containing
7016 sequence insn's address. */
7017 if (INSN_ADDRESSES_SET_P ())
7019 seq_insn
= NEXT_INSN (PREV_INSN (XVECEXP (final_sequence
, 0, 0)));
7020 distance
= (INSN_ADDRESSES (INSN_UID (JUMP_LABEL (NEXT_INSN (insn
))))
7021 - INSN_ADDRESSES (INSN_UID (seq_insn
)) - 8);
7023 if (VAL_14_BITS_P (distance
))
7025 xoperands
[1] = gen_label_rtx ();
7026 output_asm_insn ("ldo %0-%1(%2),%2", xoperands
);
7027 (*targetm
.asm_out
.internal_label
) (asm_out_file
, "L",
7028 CODE_LABEL_NUMBER (xoperands
[1]));
7031 /* ??? This branch may not reach its target. */
7032 output_asm_insn ("nop\n\tb,n %0", xoperands
);
7035 /* ??? This branch may not reach its target. */
7036 output_asm_insn ("nop\n\tb,n %0", xoperands
);
7038 /* Delete the jump. */
7039 PUT_CODE (NEXT_INSN (insn
), NOTE
);
7040 NOTE_LINE_NUMBER (NEXT_INSN (insn
)) = NOTE_INSN_DELETED
;
7041 NOTE_SOURCE_FILE (NEXT_INSN (insn
)) = 0;
7046 /* Return the attribute length of the call instruction INSN. The SIBCALL
7047 flag indicates whether INSN is a regular call or a sibling call. The
7048 length returned must be longer than the code actually generated by
7049 output_call. Since branch shortening is done before delay branch
7050 sequencing, there is no way to determine whether or not the delay
7051 slot will be filled during branch shortening. Even when the delay
7052 slot is filled, we may have to add a nop if the delay slot contains
7053 a branch that can't reach its target. Thus, we always have to include
7054 the delay slot in the length estimate. This used to be done in
7055 pa_adjust_insn_length but we do it here now as some sequences always
7056 fill the delay slot and we can save four bytes in the estimate for
7060 attr_length_call (rtx insn
, int sibcall
)
7066 rtx pat
= PATTERN (insn
);
7067 unsigned long distance
= -1;
7069 if (INSN_ADDRESSES_SET_P ())
7071 unsigned long total
;
7073 total
= IN_NAMED_SECTION_P (cfun
->decl
) ? 0 : total_code_bytes
;
7074 distance
= (total
+ insn_current_reference_address (insn
));
7075 if (distance
< total
)
7079 /* Determine if this is a local call. */
7080 if (GET_CODE (XVECEXP (pat
, 0, 0)) == CALL
)
7081 call_dest
= XEXP (XEXP (XVECEXP (pat
, 0, 0), 0), 0);
7083 call_dest
= XEXP (XEXP (XEXP (XVECEXP (pat
, 0, 0), 1), 0), 0);
7085 call_decl
= SYMBOL_REF_DECL (call_dest
);
7086 local_call
= call_decl
&& (*targetm
.binds_local_p
) (call_decl
);
7088 /* pc-relative branch. */
7089 if (!TARGET_LONG_CALLS
7090 && ((TARGET_PA_20
&& !sibcall
&& distance
< 7600000)
7091 || distance
< 240000))
7094 /* 64-bit plabel sequence. */
7095 else if (TARGET_64BIT
&& !local_call
)
7096 length
+= sibcall
? 28 : 24;
7098 /* non-pic long absolute branch sequence. */
7099 else if ((TARGET_LONG_ABS_CALL
|| local_call
) && !flag_pic
)
7102 /* long pc-relative branch sequence. */
7103 else if ((TARGET_SOM
&& TARGET_LONG_PIC_SDIFF_CALL
)
7104 || (TARGET_64BIT
&& !TARGET_GAS
)
7105 || (TARGET_GAS
&& !TARGET_SOM
7106 && (TARGET_LONG_PIC_PCREL_CALL
|| local_call
)))
7110 if (!TARGET_PA_20
&& !TARGET_NO_SPACE_REGS
)
7114 /* 32-bit plabel sequence. */
7120 length
+= length_fp_args (insn
);
7130 if (!TARGET_NO_SPACE_REGS
)
7138 /* INSN is a function call. It may have an unconditional jump
7141 CALL_DEST is the routine we are calling. */
7144 output_call (rtx insn
, rtx call_dest
, int sibcall
)
7146 int delay_insn_deleted
= 0;
7147 int delay_slot_filled
= 0;
7148 int seq_length
= dbr_sequence_length ();
7149 tree call_decl
= SYMBOL_REF_DECL (call_dest
);
7150 int local_call
= call_decl
&& (*targetm
.binds_local_p
) (call_decl
);
7153 xoperands
[0] = call_dest
;
7155 /* Handle the common case where we're sure that the branch will reach
7156 the beginning of the "$CODE$" subspace. This is the beginning of
7157 the current function if we are in a named section. */
7158 if (!TARGET_LONG_CALLS
&& attr_length_call (insn
, sibcall
) == 8)
7160 xoperands
[1] = gen_rtx_REG (word_mode
, sibcall
? 0 : 2);
7161 output_asm_insn ("{bl|b,l} %0,%1", xoperands
);
7165 if (TARGET_64BIT
&& !local_call
)
7167 /* ??? As far as I can tell, the HP linker doesn't support the
7168 long pc-relative sequence described in the 64-bit runtime
7169 architecture. So, we use a slightly longer indirect call. */
7170 xoperands
[0] = get_deferred_plabel (call_dest
);
7171 xoperands
[1] = gen_label_rtx ();
7173 /* If this isn't a sibcall, we put the load of %r27 into the
7174 delay slot. We can't do this in a sibcall as we don't
7175 have a second call-clobbered scratch register available. */
7177 && GET_CODE (NEXT_INSN (insn
)) != JUMP_INSN
7180 final_scan_insn (NEXT_INSN (insn
), asm_out_file
,
7183 /* Now delete the delay insn. */
7184 PUT_CODE (NEXT_INSN (insn
), NOTE
);
7185 NOTE_LINE_NUMBER (NEXT_INSN (insn
)) = NOTE_INSN_DELETED
;
7186 NOTE_SOURCE_FILE (NEXT_INSN (insn
)) = 0;
7187 delay_insn_deleted
= 1;
7190 output_asm_insn ("addil LT'%0,%%r27", xoperands
);
7191 output_asm_insn ("ldd RT'%0(%%r1),%%r1", xoperands
);
7192 output_asm_insn ("ldd 0(%%r1),%%r1", xoperands
);
7196 output_asm_insn ("ldd 24(%%r1),%%r27", xoperands
);
7197 output_asm_insn ("ldd 16(%%r1),%%r1", xoperands
);
7198 output_asm_insn ("bve (%%r1)", xoperands
);
7202 output_asm_insn ("ldd 16(%%r1),%%r2", xoperands
);
7203 output_asm_insn ("bve,l (%%r2),%%r2", xoperands
);
7204 output_asm_insn ("ldd 24(%%r1),%%r27", xoperands
);
7205 delay_slot_filled
= 1;
7210 int indirect_call
= 0;
7212 /* Emit a long call. There are several different sequences
7213 of increasing length and complexity. In most cases,
7214 they don't allow an instruction in the delay slot. */
7215 if (!((TARGET_LONG_ABS_CALL
|| local_call
) && !flag_pic
)
7216 && !(TARGET_SOM
&& TARGET_LONG_PIC_SDIFF_CALL
)
7217 && !(TARGET_GAS
&& !TARGET_SOM
7218 && (TARGET_LONG_PIC_PCREL_CALL
|| local_call
))
7223 && GET_CODE (NEXT_INSN (insn
)) != JUMP_INSN
7225 && (!TARGET_PA_20
|| indirect_call
))
7227 /* A non-jump insn in the delay slot. By definition we can
7228 emit this insn before the call (and in fact before argument
7230 final_scan_insn (NEXT_INSN (insn
), asm_out_file
, optimize
, 0,
7233 /* Now delete the delay insn. */
7234 PUT_CODE (NEXT_INSN (insn
), NOTE
);
7235 NOTE_LINE_NUMBER (NEXT_INSN (insn
)) = NOTE_INSN_DELETED
;
7236 NOTE_SOURCE_FILE (NEXT_INSN (insn
)) = 0;
7237 delay_insn_deleted
= 1;
7240 if ((TARGET_LONG_ABS_CALL
|| local_call
) && !flag_pic
)
7242 /* This is the best sequence for making long calls in
7243 non-pic code. Unfortunately, GNU ld doesn't provide
7244 the stub needed for external calls, and GAS's support
7245 for this with the SOM linker is buggy. It is safe
7246 to use this for local calls. */
7247 output_asm_insn ("ldil L'%0,%%r1", xoperands
);
7249 output_asm_insn ("be R'%0(%%sr4,%%r1)", xoperands
);
7253 output_asm_insn ("be,l R'%0(%%sr4,%%r1),%%sr0,%%r31",
7256 output_asm_insn ("ble R'%0(%%sr4,%%r1)", xoperands
);
7258 output_asm_insn ("copy %%r31,%%r2", xoperands
);
7259 delay_slot_filled
= 1;
7264 if ((TARGET_SOM
&& TARGET_LONG_PIC_SDIFF_CALL
)
7265 || (TARGET_64BIT
&& !TARGET_GAS
))
7267 /* The HP assembler and linker can handle relocations
7268 for the difference of two symbols. GAS and the HP
7269 linker can't do this when one of the symbols is
7271 xoperands
[1] = gen_label_rtx ();
7272 output_asm_insn ("{bl|b,l} .+8,%%r1", xoperands
);
7273 output_asm_insn ("addil L'%0-%l1,%%r1", xoperands
);
7274 (*targetm
.asm_out
.internal_label
) (asm_out_file
, "L",
7275 CODE_LABEL_NUMBER (xoperands
[1]));
7276 output_asm_insn ("ldo R'%0-%l1(%%r1),%%r1", xoperands
);
7278 else if (TARGET_GAS
&& !TARGET_SOM
7279 && (TARGET_LONG_PIC_PCREL_CALL
|| local_call
))
7281 /* GAS currently can't generate the relocations that
7282 are needed for the SOM linker under HP-UX using this
7283 sequence. The GNU linker doesn't generate the stubs
7284 that are needed for external calls on TARGET_ELF32
7285 with this sequence. For now, we have to use a
7286 longer plabel sequence when using GAS. */
7287 output_asm_insn ("{bl|b,l} .+8,%%r1", xoperands
);
7288 output_asm_insn ("addil L'%0-$PIC_pcrel$0+4,%%r1",
7290 output_asm_insn ("ldo R'%0-$PIC_pcrel$0+8(%%r1),%%r1",
7295 /* Emit a long plabel-based call sequence. This is
7296 essentially an inline implementation of $$dyncall.
7297 We don't actually try to call $$dyncall as this is
7298 as difficult as calling the function itself. */
7299 xoperands
[0] = get_deferred_plabel (call_dest
);
7300 xoperands
[1] = gen_label_rtx ();
7302 /* Since the call is indirect, FP arguments in registers
7303 need to be copied to the general registers. Then, the
7304 argument relocation stub will copy them back. */
7306 copy_fp_args (insn
);
7310 output_asm_insn ("addil LT'%0,%%r19", xoperands
);
7311 output_asm_insn ("ldw RT'%0(%%r1),%%r1", xoperands
);
7312 output_asm_insn ("ldw 0(%%r1),%%r1", xoperands
);
7316 output_asm_insn ("addil LR'%0-$global$,%%r27",
7318 output_asm_insn ("ldw RR'%0-$global$(%%r1),%%r1",
7322 output_asm_insn ("bb,>=,n %%r1,30,.+16", xoperands
);
7323 output_asm_insn ("depi 0,31,2,%%r1", xoperands
);
7324 output_asm_insn ("ldw 4(%%sr0,%%r1),%%r19", xoperands
);
7325 output_asm_insn ("ldw 0(%%sr0,%%r1),%%r1", xoperands
);
7327 if (!sibcall
&& !TARGET_PA_20
)
7329 output_asm_insn ("{bl|b,l} .+8,%%r2", xoperands
);
7330 if (TARGET_NO_SPACE_REGS
)
7331 output_asm_insn ("addi 8,%%r2,%%r2", xoperands
);
7333 output_asm_insn ("addi 16,%%r2,%%r2", xoperands
);
7340 output_asm_insn ("bve (%%r1)", xoperands
);
7345 output_asm_insn ("bve,l (%%r1),%%r2", xoperands
);
7346 output_asm_insn ("stw %%r2,-24(%%sp)", xoperands
);
7347 delay_slot_filled
= 1;
7350 output_asm_insn ("bve,l (%%r1),%%r2", xoperands
);
7355 if (!TARGET_NO_SPACE_REGS
)
7356 output_asm_insn ("ldsid (%%r1),%%r31\n\tmtsp %%r31,%%sr0",
7361 if (TARGET_NO_SPACE_REGS
)
7362 output_asm_insn ("be 0(%%sr4,%%r1)", xoperands
);
7364 output_asm_insn ("be 0(%%sr0,%%r1)", xoperands
);
7368 if (TARGET_NO_SPACE_REGS
)
7369 output_asm_insn ("ble 0(%%sr4,%%r1)", xoperands
);
7371 output_asm_insn ("ble 0(%%sr0,%%r1)", xoperands
);
7374 output_asm_insn ("stw %%r31,-24(%%sp)", xoperands
);
7376 output_asm_insn ("copy %%r31,%%r2", xoperands
);
7377 delay_slot_filled
= 1;
7384 if (!delay_slot_filled
&& (seq_length
== 0 || delay_insn_deleted
))
7385 output_asm_insn ("nop", xoperands
);
7387 /* We are done if there isn't a jump in the delay slot. */
7389 || delay_insn_deleted
7390 || GET_CODE (NEXT_INSN (insn
)) != JUMP_INSN
)
7393 /* A sibcall should never have a branch in the delay slot. */
7394 gcc_assert (!sibcall
);
7396 /* This call has an unconditional jump in its delay slot. */
7397 xoperands
[0] = XEXP (PATTERN (NEXT_INSN (insn
)), 1);
7399 if (!delay_slot_filled
&& INSN_ADDRESSES_SET_P ())
7401 /* See if the return address can be adjusted. Use the containing
7402 sequence insn's address. */
7403 rtx seq_insn
= NEXT_INSN (PREV_INSN (XVECEXP (final_sequence
, 0, 0)));
7404 int distance
= (INSN_ADDRESSES (INSN_UID (JUMP_LABEL (NEXT_INSN (insn
))))
7405 - INSN_ADDRESSES (INSN_UID (seq_insn
)) - 8);
7407 if (VAL_14_BITS_P (distance
))
7409 xoperands
[1] = gen_label_rtx ();
7410 output_asm_insn ("ldo %0-%1(%%r2),%%r2", xoperands
);
7411 (*targetm
.asm_out
.internal_label
) (asm_out_file
, "L",
7412 CODE_LABEL_NUMBER (xoperands
[1]));
7415 output_asm_insn ("nop\n\tb,n %0", xoperands
);
7418 output_asm_insn ("b,n %0", xoperands
);
7420 /* Delete the jump. */
7421 PUT_CODE (NEXT_INSN (insn
), NOTE
);
7422 NOTE_LINE_NUMBER (NEXT_INSN (insn
)) = NOTE_INSN_DELETED
;
7423 NOTE_SOURCE_FILE (NEXT_INSN (insn
)) = 0;
7428 /* Return the attribute length of the indirect call instruction INSN.
7429 The length must match the code generated by output_indirect call.
7430 The returned length includes the delay slot. Currently, the delay
7431 slot of an indirect call sequence is not exposed and it is used by
7432 the sequence itself. */
7435 attr_length_indirect_call (rtx insn
)
7437 unsigned long distance
= -1;
7438 unsigned long total
= IN_NAMED_SECTION_P (cfun
->decl
) ? 0 : total_code_bytes
;
7440 if (INSN_ADDRESSES_SET_P ())
7442 distance
= (total
+ insn_current_reference_address (insn
));
7443 if (distance
< total
)
7450 if (TARGET_FAST_INDIRECT_CALLS
7451 || (!TARGET_PORTABLE_RUNTIME
7452 && ((TARGET_PA_20
&& !TARGET_SOM
&& distance
< 7600000)
7453 || distance
< 240000)))
7459 if (TARGET_PORTABLE_RUNTIME
)
7462 /* Out of reach, can use ble. */
7467 output_indirect_call (rtx insn
, rtx call_dest
)
7473 xoperands
[0] = call_dest
;
7474 output_asm_insn ("ldd 16(%0),%%r2", xoperands
);
7475 output_asm_insn ("bve,l (%%r2),%%r2\n\tldd 24(%0),%%r27", xoperands
);
7479 /* First the special case for kernels, level 0 systems, etc. */
7480 if (TARGET_FAST_INDIRECT_CALLS
)
7481 return "ble 0(%%sr4,%%r22)\n\tcopy %%r31,%%r2";
7483 /* Now the normal case -- we can reach $$dyncall directly or
7484 we're sure that we can get there via a long-branch stub.
7486 No need to check target flags as the length uniquely identifies
7487 the remaining cases. */
7488 if (attr_length_indirect_call (insn
) == 8)
7490 /* The HP linker sometimes substitutes a BLE for BL/B,L calls to
7491 $$dyncall. Since BLE uses %r31 as the link register, the 22-bit
7492 variant of the B,L instruction can't be used on the SOM target. */
7493 if (TARGET_PA_20
&& !TARGET_SOM
)
7494 return ".CALL\tARGW0=GR\n\tb,l $$dyncall,%%r2\n\tcopy %%r2,%%r31";
7496 return ".CALL\tARGW0=GR\n\tbl $$dyncall,%%r31\n\tcopy %%r31,%%r2";
7499 /* Long millicode call, but we are not generating PIC or portable runtime
7501 if (attr_length_indirect_call (insn
) == 12)
7502 return ".CALL\tARGW0=GR\n\tldil L'$$dyncall,%%r2\n\tble R'$$dyncall(%%sr4,%%r2)\n\tcopy %%r31,%%r2";
7504 /* Long millicode call for portable runtime. */
7505 if (attr_length_indirect_call (insn
) == 20)
7506 return "ldil L'$$dyncall,%%r31\n\tldo R'$$dyncall(%%r31),%%r31\n\tblr %%r0,%%r2\n\tbv,n %%r0(%%r31)\n\tnop";
7508 /* We need a long PIC call to $$dyncall. */
7509 xoperands
[0] = NULL_RTX
;
7510 output_asm_insn ("{bl|b,l} .+8,%%r1", xoperands
);
7511 if (TARGET_SOM
|| !TARGET_GAS
)
7513 xoperands
[0] = gen_label_rtx ();
7514 output_asm_insn ("addil L'$$dyncall-%0,%%r1", xoperands
);
7515 (*targetm
.asm_out
.internal_label
) (asm_out_file
, "L",
7516 CODE_LABEL_NUMBER (xoperands
[0]));
7517 output_asm_insn ("ldo R'$$dyncall-%0(%%r1),%%r1", xoperands
);
7521 output_asm_insn ("addil L'$$dyncall-$PIC_pcrel$0+4,%%r1", xoperands
);
7522 output_asm_insn ("ldo R'$$dyncall-$PIC_pcrel$0+8(%%r1),%%r1",
7525 output_asm_insn ("blr %%r0,%%r2", xoperands
);
7526 output_asm_insn ("bv,n %%r0(%%r1)\n\tnop", xoperands
);
7530 /* Return the total length of the save and restore instructions needed for
7531 the data linkage table pointer (i.e., the PIC register) across the call
7532 instruction INSN. No-return calls do not require a save and restore.
7533 In addition, we may be able to avoid the save and restore for calls
7534 within the same translation unit. */
7537 attr_length_save_restore_dltp (rtx insn
)
7539 if (find_reg_note (insn
, REG_NORETURN
, NULL_RTX
))
7545 /* In HPUX 8.0's shared library scheme, special relocations are needed
7546 for function labels if they might be passed to a function
7547 in a shared library (because shared libraries don't live in code
7548 space), and special magic is needed to construct their address. */
7551 hppa_encode_label (rtx sym
)
7553 const char *str
= XSTR (sym
, 0);
7554 int len
= strlen (str
) + 1;
7557 p
= newstr
= alloca (len
+ 1);
7561 XSTR (sym
, 0) = ggc_alloc_string (newstr
, len
);
7565 pa_encode_section_info (tree decl
, rtx rtl
, int first
)
7567 int old_referenced
= 0;
7569 if (!first
&& MEM_P (rtl
) && GET_CODE (XEXP (rtl
, 0)) == SYMBOL_REF
)
7571 = SYMBOL_REF_FLAGS (XEXP (rtl
, 0)) & SYMBOL_FLAG_REFERENCED
;
7573 default_encode_section_info (decl
, rtl
, first
);
7575 if (first
&& TEXT_SPACE_P (decl
))
7577 SYMBOL_REF_FLAG (XEXP (rtl
, 0)) = 1;
7578 if (TREE_CODE (decl
) == FUNCTION_DECL
)
7579 hppa_encode_label (XEXP (rtl
, 0));
7581 else if (old_referenced
)
7582 SYMBOL_REF_FLAGS (XEXP (rtl
, 0)) |= old_referenced
;
7585 /* This is sort of inverse to pa_encode_section_info. */
7588 pa_strip_name_encoding (const char *str
)
7590 str
+= (*str
== '@');
7591 str
+= (*str
== '*');
7596 function_label_operand (rtx op
, enum machine_mode mode ATTRIBUTE_UNUSED
)
7598 return GET_CODE (op
) == SYMBOL_REF
&& FUNCTION_NAME_P (XSTR (op
, 0));
7601 /* Returns 1 if OP is a function label involved in a simple addition
7602 with a constant. Used to keep certain patterns from matching
7603 during instruction combination. */
7605 is_function_label_plus_const (rtx op
)
7607 /* Strip off any CONST. */
7608 if (GET_CODE (op
) == CONST
)
7611 return (GET_CODE (op
) == PLUS
7612 && function_label_operand (XEXP (op
, 0), Pmode
)
7613 && GET_CODE (XEXP (op
, 1)) == CONST_INT
);
7616 /* Output assembly code for a thunk to FUNCTION. */
7619 pa_asm_output_mi_thunk (FILE *file
, tree thunk_fndecl
, HOST_WIDE_INT delta
,
7620 HOST_WIDE_INT vcall_offset ATTRIBUTE_UNUSED
,
7623 static unsigned int current_thunk_number
;
7624 int val_14
= VAL_14_BITS_P (delta
);
7629 xoperands
[0] = XEXP (DECL_RTL (function
), 0);
7630 xoperands
[1] = XEXP (DECL_RTL (thunk_fndecl
), 0);
7631 xoperands
[2] = GEN_INT (delta
);
7633 ASM_OUTPUT_LABEL (file
, XSTR (xoperands
[1], 0));
7634 fprintf (file
, "\t.PROC\n\t.CALLINFO FRAME=0,NO_CALLS\n\t.ENTRY\n");
7636 /* Output the thunk. We know that the function is in the same
7637 translation unit (i.e., the same space) as the thunk, and that
7638 thunks are output after their method. Thus, we don't need an
7639 external branch to reach the function. With SOM and GAS,
7640 functions and thunks are effectively in different sections.
7641 Thus, we can always use a IA-relative branch and the linker
7642 will add a long branch stub if necessary.
7644 However, we have to be careful when generating PIC code on the
7645 SOM port to ensure that the sequence does not transfer to an
7646 import stub for the target function as this could clobber the
7647 return value saved at SP-24. This would also apply to the
7648 32-bit linux port if the multi-space model is implemented. */
7649 if ((!TARGET_LONG_CALLS
&& TARGET_SOM
&& !TARGET_PORTABLE_RUNTIME
7650 && !(flag_pic
&& TREE_PUBLIC (function
))
7651 && (TARGET_GAS
|| last_address
< 262132))
7652 || (!TARGET_LONG_CALLS
&& !TARGET_SOM
&& !TARGET_PORTABLE_RUNTIME
7653 && ((targetm
.have_named_sections
7654 && DECL_SECTION_NAME (thunk_fndecl
) != NULL
7655 /* The GNU 64-bit linker has rather poor stub management.
7656 So, we use a long branch from thunks that aren't in
7657 the same section as the target function. */
7659 && (DECL_SECTION_NAME (thunk_fndecl
)
7660 != DECL_SECTION_NAME (function
)))
7661 || ((DECL_SECTION_NAME (thunk_fndecl
)
7662 == DECL_SECTION_NAME (function
))
7663 && last_address
< 262132)))
7664 || (!targetm
.have_named_sections
&& last_address
< 262132))))
7667 output_asm_insn ("addil L'%2,%%r26", xoperands
);
7669 output_asm_insn ("b %0", xoperands
);
7673 output_asm_insn ("ldo %2(%%r26),%%r26", xoperands
);
7678 output_asm_insn ("ldo R'%2(%%r1),%%r26", xoperands
);
7682 else if (TARGET_64BIT
)
7684 /* We only have one call-clobbered scratch register, so we can't
7685 make use of the delay slot if delta doesn't fit in 14 bits. */
7688 output_asm_insn ("addil L'%2,%%r26", xoperands
);
7689 output_asm_insn ("ldo R'%2(%%r1),%%r26", xoperands
);
7692 output_asm_insn ("b,l .+8,%%r1", xoperands
);
7696 output_asm_insn ("addil L'%0-$PIC_pcrel$0+4,%%r1", xoperands
);
7697 output_asm_insn ("ldo R'%0-$PIC_pcrel$0+8(%%r1),%%r1", xoperands
);
7701 xoperands
[3] = GEN_INT (val_14
? 8 : 16);
7702 output_asm_insn ("addil L'%0-%1-%3,%%r1", xoperands
);
7707 output_asm_insn ("bv %%r0(%%r1)", xoperands
);
7708 output_asm_insn ("ldo %2(%%r26),%%r26", xoperands
);
7713 output_asm_insn ("bv,n %%r0(%%r1)", xoperands
);
7717 else if (TARGET_PORTABLE_RUNTIME
)
7719 output_asm_insn ("ldil L'%0,%%r1", xoperands
);
7720 output_asm_insn ("ldo R'%0(%%r1),%%r22", xoperands
);
7723 output_asm_insn ("addil L'%2,%%r26", xoperands
);
7725 output_asm_insn ("bv %%r0(%%r22)", xoperands
);
7729 output_asm_insn ("ldo %2(%%r26),%%r26", xoperands
);
7734 output_asm_insn ("ldo R'%2(%%r1),%%r26", xoperands
);
7738 else if (TARGET_SOM
&& flag_pic
&& TREE_PUBLIC (function
))
7740 /* The function is accessible from outside this module. The only
7741 way to avoid an import stub between the thunk and function is to
7742 call the function directly with an indirect sequence similar to
7743 that used by $$dyncall. This is possible because $$dyncall acts
7744 as the import stub in an indirect call. */
7745 ASM_GENERATE_INTERNAL_LABEL (label
, "LTHN", current_thunk_number
);
7746 xoperands
[3] = gen_rtx_SYMBOL_REF (Pmode
, label
);
7747 output_asm_insn ("addil LT'%3,%%r19", xoperands
);
7748 output_asm_insn ("ldw RT'%3(%%r1),%%r22", xoperands
);
7749 output_asm_insn ("ldw 0(%%sr0,%%r22),%%r22", xoperands
);
7750 output_asm_insn ("bb,>=,n %%r22,30,.+16", xoperands
);
7751 output_asm_insn ("depi 0,31,2,%%r22", xoperands
);
7752 output_asm_insn ("ldw 4(%%sr0,%%r22),%%r19", xoperands
);
7753 output_asm_insn ("ldw 0(%%sr0,%%r22),%%r22", xoperands
);
7757 output_asm_insn ("addil L'%2,%%r26", xoperands
);
7763 output_asm_insn ("bve (%%r22)", xoperands
);
7766 else if (TARGET_NO_SPACE_REGS
)
7768 output_asm_insn ("be 0(%%sr4,%%r22)", xoperands
);
7773 output_asm_insn ("ldsid (%%sr0,%%r22),%%r21", xoperands
);
7774 output_asm_insn ("mtsp %%r21,%%sr0", xoperands
);
7775 output_asm_insn ("be 0(%%sr0,%%r22)", xoperands
);
7780 output_asm_insn ("ldo %2(%%r26),%%r26", xoperands
);
7782 output_asm_insn ("ldo R'%2(%%r1),%%r26", xoperands
);
7786 output_asm_insn ("{bl|b,l} .+8,%%r1", xoperands
);
7788 if (TARGET_SOM
|| !TARGET_GAS
)
7790 output_asm_insn ("addil L'%0-%1-8,%%r1", xoperands
);
7791 output_asm_insn ("ldo R'%0-%1-8(%%r1),%%r22", xoperands
);
7795 output_asm_insn ("addil L'%0-$PIC_pcrel$0+4,%%r1", xoperands
);
7796 output_asm_insn ("ldo R'%0-$PIC_pcrel$0+8(%%r1),%%r22", xoperands
);
7800 output_asm_insn ("addil L'%2,%%r26", xoperands
);
7802 output_asm_insn ("bv %%r0(%%r22)", xoperands
);
7806 output_asm_insn ("ldo %2(%%r26),%%r26", xoperands
);
7811 output_asm_insn ("ldo R'%2(%%r1),%%r26", xoperands
);
7818 output_asm_insn ("addil L'%2,%%r26", xoperands
);
7820 output_asm_insn ("ldil L'%0,%%r22", xoperands
);
7821 output_asm_insn ("be R'%0(%%sr4,%%r22)", xoperands
);
7825 output_asm_insn ("ldo %2(%%r26),%%r26", xoperands
);
7830 output_asm_insn ("ldo R'%2(%%r1),%%r26", xoperands
);
7835 fprintf (file
, "\t.EXIT\n\t.PROCEND\n");
7837 if (TARGET_SOM
&& flag_pic
&& TREE_PUBLIC (function
))
7840 output_asm_insn (".align 4", xoperands
);
7841 ASM_OUTPUT_LABEL (file
, label
);
7842 output_asm_insn (".word P'%0", xoperands
);
7844 else if (TARGET_SOM
&& TARGET_GAS
)
7847 current_thunk_number
++;
7848 nbytes
= ((nbytes
+ FUNCTION_BOUNDARY
/ BITS_PER_UNIT
- 1)
7849 & ~(FUNCTION_BOUNDARY
/ BITS_PER_UNIT
- 1));
7850 last_address
+= nbytes
;
7851 update_total_code_bytes (nbytes
);
7854 /* Only direct calls to static functions are allowed to be sibling (tail)
7857 This restriction is necessary because some linker generated stubs will
7858 store return pointers into rp' in some cases which might clobber a
7859 live value already in rp'.
7861 In a sibcall the current function and the target function share stack
7862 space. Thus if the path to the current function and the path to the
7863 target function save a value in rp', they save the value into the
7864 same stack slot, which has undesirable consequences.
7866 Because of the deferred binding nature of shared libraries any function
7867 with external scope could be in a different load module and thus require
7868 rp' to be saved when calling that function. So sibcall optimizations
7869 can only be safe for static function.
7871 Note that GCC never needs return value relocations, so we don't have to
7872 worry about static calls with return value relocations (which require
7875 It is safe to perform a sibcall optimization when the target function
7876 will never return. */
7878 pa_function_ok_for_sibcall (tree decl
, tree exp ATTRIBUTE_UNUSED
)
7880 if (TARGET_PORTABLE_RUNTIME
)
7883 /* Sibcalls are ok for TARGET_ELF32 as along as the linker is used in
7884 single subspace mode and the call is not indirect. As far as I know,
7885 there is no operating system support for the multiple subspace mode.
7886 It might be possible to support indirect calls if we didn't use
7887 $$dyncall (see the indirect sequence generated in output_call). */
7889 return (decl
!= NULL_TREE
);
7891 /* Sibcalls are not ok because the arg pointer register is not a fixed
7892 register. This prevents the sibcall optimization from occurring. In
7893 addition, there are problems with stub placement using GNU ld. This
7894 is because a normal sibcall branch uses a 17-bit relocation while
7895 a regular call branch uses a 22-bit relocation. As a result, more
7896 care needs to be taken in the placement of long-branch stubs. */
7900 /* Sibcalls are only ok within a translation unit. */
7901 return (decl
&& !TREE_PUBLIC (decl
));
7904 /* ??? Addition is not commutative on the PA due to the weird implicit
7905 space register selection rules for memory addresses. Therefore, we
7906 don't consider a + b == b + a, as this might be inside a MEM. */
7908 pa_commutative_p (rtx x
, int outer_code
)
7910 return (COMMUTATIVE_P (x
)
7911 && (TARGET_NO_SPACE_REGS
7912 || (outer_code
!= UNKNOWN
&& outer_code
!= MEM
)
7913 || GET_CODE (x
) != PLUS
));
7916 /* Returns 1 if the 6 operands specified in OPERANDS are suitable for
7917 use in fmpyadd instructions. */
7919 fmpyaddoperands (rtx
*operands
)
7921 enum machine_mode mode
= GET_MODE (operands
[0]);
7923 /* Must be a floating point mode. */
7924 if (mode
!= SFmode
&& mode
!= DFmode
)
7927 /* All modes must be the same. */
7928 if (! (mode
== GET_MODE (operands
[1])
7929 && mode
== GET_MODE (operands
[2])
7930 && mode
== GET_MODE (operands
[3])
7931 && mode
== GET_MODE (operands
[4])
7932 && mode
== GET_MODE (operands
[5])))
7935 /* All operands must be registers. */
7936 if (! (GET_CODE (operands
[1]) == REG
7937 && GET_CODE (operands
[2]) == REG
7938 && GET_CODE (operands
[3]) == REG
7939 && GET_CODE (operands
[4]) == REG
7940 && GET_CODE (operands
[5]) == REG
))
7943 /* Only 2 real operands to the addition. One of the input operands must
7944 be the same as the output operand. */
7945 if (! rtx_equal_p (operands
[3], operands
[4])
7946 && ! rtx_equal_p (operands
[3], operands
[5]))
7949 /* Inout operand of add cannot conflict with any operands from multiply. */
7950 if (rtx_equal_p (operands
[3], operands
[0])
7951 || rtx_equal_p (operands
[3], operands
[1])
7952 || rtx_equal_p (operands
[3], operands
[2]))
7955 /* multiply cannot feed into addition operands. */
7956 if (rtx_equal_p (operands
[4], operands
[0])
7957 || rtx_equal_p (operands
[5], operands
[0]))
7960 /* SFmode limits the registers to the upper 32 of the 32bit FP regs. */
7962 && (REGNO_REG_CLASS (REGNO (operands
[0])) != FPUPPER_REGS
7963 || REGNO_REG_CLASS (REGNO (operands
[1])) != FPUPPER_REGS
7964 || REGNO_REG_CLASS (REGNO (operands
[2])) != FPUPPER_REGS
7965 || REGNO_REG_CLASS (REGNO (operands
[3])) != FPUPPER_REGS
7966 || REGNO_REG_CLASS (REGNO (operands
[4])) != FPUPPER_REGS
7967 || REGNO_REG_CLASS (REGNO (operands
[5])) != FPUPPER_REGS
))
7970 /* Passed. Operands are suitable for fmpyadd. */
7974 #if !defined(USE_COLLECT2)
7976 pa_asm_out_constructor (rtx symbol
, int priority
)
7978 if (!function_label_operand (symbol
, VOIDmode
))
7979 hppa_encode_label (symbol
);
7981 #ifdef CTORS_SECTION_ASM_OP
7982 default_ctor_section_asm_out_constructor (symbol
, priority
);
7984 # ifdef TARGET_ASM_NAMED_SECTION
7985 default_named_section_asm_out_constructor (symbol
, priority
);
7987 default_stabs_asm_out_constructor (symbol
, priority
);
7993 pa_asm_out_destructor (rtx symbol
, int priority
)
7995 if (!function_label_operand (symbol
, VOIDmode
))
7996 hppa_encode_label (symbol
);
7998 #ifdef DTORS_SECTION_ASM_OP
7999 default_dtor_section_asm_out_destructor (symbol
, priority
);
8001 # ifdef TARGET_ASM_NAMED_SECTION
8002 default_named_section_asm_out_destructor (symbol
, priority
);
8004 default_stabs_asm_out_destructor (symbol
, priority
);
8010 /* This function places uninitialized global data in the bss section.
8011 The ASM_OUTPUT_ALIGNED_BSS macro needs to be defined to call this
8012 function on the SOM port to prevent uninitialized global data from
8013 being placed in the data section. */
8016 pa_asm_output_aligned_bss (FILE *stream
,
8018 unsigned HOST_WIDE_INT size
,
8022 fprintf (stream
, "\t.align %u\n", align
/ BITS_PER_UNIT
);
8024 #ifdef ASM_OUTPUT_TYPE_DIRECTIVE
8025 ASM_OUTPUT_TYPE_DIRECTIVE (stream
, name
, "object");
8028 #ifdef ASM_OUTPUT_SIZE_DIRECTIVE
8029 ASM_OUTPUT_SIZE_DIRECTIVE (stream
, name
, size
);
8032 fprintf (stream
, "\t.align %u\n", align
/ BITS_PER_UNIT
);
8033 ASM_OUTPUT_LABEL (stream
, name
);
8034 fprintf (stream
, "\t.block "HOST_WIDE_INT_PRINT_UNSIGNED
"\n", size
);
8037 /* Both the HP and GNU assemblers under HP-UX provide a .comm directive
8038 that doesn't allow the alignment of global common storage to be directly
8039 specified. The SOM linker aligns common storage based on the rounded
8040 value of the NUM_BYTES parameter in the .comm directive. It's not
8041 possible to use the .align directive as it doesn't affect the alignment
8042 of the label associated with a .comm directive. */
8045 pa_asm_output_aligned_common (FILE *stream
,
8047 unsigned HOST_WIDE_INT size
,
8050 unsigned int max_common_align
;
8052 max_common_align
= TARGET_64BIT
? 128 : (size
>= 4096 ? 256 : 64);
8053 if (align
> max_common_align
)
8055 warning (0, "alignment (%u) for %s exceeds maximum alignment "
8056 "for global common data. Using %u",
8057 align
/ BITS_PER_UNIT
, name
, max_common_align
/ BITS_PER_UNIT
);
8058 align
= max_common_align
;
8063 assemble_name (stream
, name
);
8064 fprintf (stream
, "\t.comm "HOST_WIDE_INT_PRINT_UNSIGNED
"\n",
8065 MAX (size
, align
/ BITS_PER_UNIT
));
8068 /* We can't use .comm for local common storage as the SOM linker effectively
8069 treats the symbol as universal and uses the same storage for local symbols
8070 with the same name in different object files. The .block directive
8071 reserves an uninitialized block of storage. However, it's not common
8072 storage. Fortunately, GCC never requests common storage with the same
8073 name in any given translation unit. */
8076 pa_asm_output_aligned_local (FILE *stream
,
8078 unsigned HOST_WIDE_INT size
,
8082 fprintf (stream
, "\t.align %u\n", align
/ BITS_PER_UNIT
);
8085 fprintf (stream
, "%s", LOCAL_ASM_OP
);
8086 assemble_name (stream
, name
);
8087 fprintf (stream
, "\n");
8090 ASM_OUTPUT_LABEL (stream
, name
);
8091 fprintf (stream
, "\t.block "HOST_WIDE_INT_PRINT_UNSIGNED
"\n", size
);
8094 /* Returns 1 if the 6 operands specified in OPERANDS are suitable for
8095 use in fmpysub instructions. */
8097 fmpysuboperands (rtx
*operands
)
8099 enum machine_mode mode
= GET_MODE (operands
[0]);
8101 /* Must be a floating point mode. */
8102 if (mode
!= SFmode
&& mode
!= DFmode
)
8105 /* All modes must be the same. */
8106 if (! (mode
== GET_MODE (operands
[1])
8107 && mode
== GET_MODE (operands
[2])
8108 && mode
== GET_MODE (operands
[3])
8109 && mode
== GET_MODE (operands
[4])
8110 && mode
== GET_MODE (operands
[5])))
8113 /* All operands must be registers. */
8114 if (! (GET_CODE (operands
[1]) == REG
8115 && GET_CODE (operands
[2]) == REG
8116 && GET_CODE (operands
[3]) == REG
8117 && GET_CODE (operands
[4]) == REG
8118 && GET_CODE (operands
[5]) == REG
))
8121 /* Only 2 real operands to the subtraction. Subtraction is not a commutative
8122 operation, so operands[4] must be the same as operand[3]. */
8123 if (! rtx_equal_p (operands
[3], operands
[4]))
8126 /* multiply cannot feed into subtraction. */
8127 if (rtx_equal_p (operands
[5], operands
[0]))
8130 /* Inout operand of sub cannot conflict with any operands from multiply. */
8131 if (rtx_equal_p (operands
[3], operands
[0])
8132 || rtx_equal_p (operands
[3], operands
[1])
8133 || rtx_equal_p (operands
[3], operands
[2]))
8136 /* SFmode limits the registers to the upper 32 of the 32bit FP regs. */
8138 && (REGNO_REG_CLASS (REGNO (operands
[0])) != FPUPPER_REGS
8139 || REGNO_REG_CLASS (REGNO (operands
[1])) != FPUPPER_REGS
8140 || REGNO_REG_CLASS (REGNO (operands
[2])) != FPUPPER_REGS
8141 || REGNO_REG_CLASS (REGNO (operands
[3])) != FPUPPER_REGS
8142 || REGNO_REG_CLASS (REGNO (operands
[4])) != FPUPPER_REGS
8143 || REGNO_REG_CLASS (REGNO (operands
[5])) != FPUPPER_REGS
))
8146 /* Passed. Operands are suitable for fmpysub. */
8150 /* Return 1 if the given constant is 2, 4, or 8. These are the valid
8151 constants for shadd instructions. */
8153 shadd_constant_p (int val
)
8155 if (val
== 2 || val
== 4 || val
== 8)
8161 /* Return 1 if OP is valid as a base or index register in a
8165 borx_reg_operand (rtx op
, enum machine_mode mode
)
8167 if (GET_CODE (op
) != REG
)
8170 /* We must reject virtual registers as the only expressions that
8171 can be instantiated are REG and REG+CONST. */
8172 if (op
== virtual_incoming_args_rtx
8173 || op
== virtual_stack_vars_rtx
8174 || op
== virtual_stack_dynamic_rtx
8175 || op
== virtual_outgoing_args_rtx
8176 || op
== virtual_cfa_rtx
)
8179 /* While it's always safe to index off the frame pointer, it's not
8180 profitable to do so when the frame pointer is being eliminated. */
8181 if (!reload_completed
8182 && flag_omit_frame_pointer
8183 && !current_function_calls_alloca
8184 && op
== frame_pointer_rtx
)
8187 return register_operand (op
, mode
);
8190 /* Return 1 if this operand is anything other than a hard register. */
8193 non_hard_reg_operand (rtx op
, enum machine_mode mode ATTRIBUTE_UNUSED
)
8195 return ! (GET_CODE (op
) == REG
&& REGNO (op
) < FIRST_PSEUDO_REGISTER
);
8198 /* Return 1 if INSN branches forward. Should be using insn_addresses
8199 to avoid walking through all the insns... */
8201 forward_branch_p (rtx insn
)
8203 rtx label
= JUMP_LABEL (insn
);
8210 insn
= NEXT_INSN (insn
);
8213 return (insn
== label
);
8216 /* Return 1 if OP is an equality comparison, else return 0. */
8218 eq_neq_comparison_operator (rtx op
, enum machine_mode mode ATTRIBUTE_UNUSED
)
8220 return (GET_CODE (op
) == EQ
|| GET_CODE (op
) == NE
);
8223 /* Return 1 if INSN is in the delay slot of a call instruction. */
8225 jump_in_call_delay (rtx insn
)
8228 if (GET_CODE (insn
) != JUMP_INSN
)
8231 if (PREV_INSN (insn
)
8232 && PREV_INSN (PREV_INSN (insn
))
8233 && GET_CODE (next_real_insn (PREV_INSN (PREV_INSN (insn
)))) == INSN
)
8235 rtx test_insn
= next_real_insn (PREV_INSN (PREV_INSN (insn
)));
8237 return (GET_CODE (PATTERN (test_insn
)) == SEQUENCE
8238 && XVECEXP (PATTERN (test_insn
), 0, 1) == insn
);
8245 /* Output an unconditional move and branch insn. */
8248 output_parallel_movb (rtx
*operands
, int length
)
8250 /* These are the cases in which we win. */
8252 return "mov%I1b,tr %1,%0,%2";
8254 /* None of these cases wins, but they don't lose either. */
8255 if (dbr_sequence_length () == 0)
8257 /* Nothing in the delay slot, fake it by putting the combined
8258 insn (the copy or add) in the delay slot of a bl. */
8259 if (GET_CODE (operands
[1]) == CONST_INT
)
8260 return "b %2\n\tldi %1,%0";
8262 return "b %2\n\tcopy %1,%0";
8266 /* Something in the delay slot, but we've got a long branch. */
8267 if (GET_CODE (operands
[1]) == CONST_INT
)
8268 return "ldi %1,%0\n\tb %2";
8270 return "copy %1,%0\n\tb %2";
8274 /* Output an unconditional add and branch insn. */
8277 output_parallel_addb (rtx
*operands
, int length
)
8279 /* To make life easy we want operand0 to be the shared input/output
8280 operand and operand1 to be the readonly operand. */
8281 if (operands
[0] == operands
[1])
8282 operands
[1] = operands
[2];
8284 /* These are the cases in which we win. */
8286 return "add%I1b,tr %1,%0,%3";
8288 /* None of these cases win, but they don't lose either. */
8289 if (dbr_sequence_length () == 0)
8291 /* Nothing in the delay slot, fake it by putting the combined
8292 insn (the copy or add) in the delay slot of a bl. */
8293 return "b %3\n\tadd%I1 %1,%0,%0";
8297 /* Something in the delay slot, but we've got a long branch. */
8298 return "add%I1 %1,%0,%0\n\tb %3";
8302 /* Return nonzero if INSN (a jump insn) immediately follows a call
8303 to a named function. This is used to avoid filling the delay slot
8304 of the jump since it can usually be eliminated by modifying RP in
8305 the delay slot of the call. */
8308 following_call (rtx insn
)
8310 if (! TARGET_JUMP_IN_DELAY
)
8313 /* Find the previous real insn, skipping NOTEs. */
8314 insn
= PREV_INSN (insn
);
8315 while (insn
&& GET_CODE (insn
) == NOTE
)
8316 insn
= PREV_INSN (insn
);
8318 /* Check for CALL_INSNs and millicode calls. */
8320 && ((GET_CODE (insn
) == CALL_INSN
8321 && get_attr_type (insn
) != TYPE_DYNCALL
)
8322 || (GET_CODE (insn
) == INSN
8323 && GET_CODE (PATTERN (insn
)) != SEQUENCE
8324 && GET_CODE (PATTERN (insn
)) != USE
8325 && GET_CODE (PATTERN (insn
)) != CLOBBER
8326 && get_attr_type (insn
) == TYPE_MILLI
)))
8332 /* We use this hook to perform a PA specific optimization which is difficult
8333 to do in earlier passes.
8335 We want the delay slots of branches within jump tables to be filled.
8336 None of the compiler passes at the moment even has the notion that a
8337 PA jump table doesn't contain addresses, but instead contains actual
8340 Because we actually jump into the table, the addresses of each entry
8341 must stay constant in relation to the beginning of the table (which
8342 itself must stay constant relative to the instruction to jump into
8343 it). I don't believe we can guarantee earlier passes of the compiler
8344 will adhere to those rules.
8346 So, late in the compilation process we find all the jump tables, and
8347 expand them into real code -- e.g. each entry in the jump table vector
8348 will get an appropriate label followed by a jump to the final target.
8350 Reorg and the final jump pass can then optimize these branches and
8351 fill their delay slots. We end up with smaller, more efficient code.
8353 The jump instructions within the table are special; we must be able
8354 to identify them during assembly output (if the jumps don't get filled
8355 we need to emit a nop rather than nullifying the delay slot)). We
8356 identify jumps in switch tables by using insns with the attribute
8357 type TYPE_BTABLE_BRANCH.
8359 We also surround the jump table itself with BEGIN_BRTAB and END_BRTAB
8360 insns. This serves two purposes, first it prevents jump.c from
8361 noticing that the last N entries in the table jump to the instruction
8362 immediately after the table and deleting the jumps. Second, those
8363 insns mark where we should emit .begin_brtab and .end_brtab directives
8364 when using GAS (allows for better link time optimizations). */
8371 remove_useless_addtr_insns (1);
8373 if (pa_cpu
< PROCESSOR_8000
)
8374 pa_combine_instructions ();
8377 /* This is fairly cheap, so always run it if optimizing. */
8378 if (optimize
> 0 && !TARGET_BIG_SWITCH
)
8380 /* Find and explode all ADDR_VEC or ADDR_DIFF_VEC insns. */
8381 for (insn
= get_insns (); insn
; insn
= NEXT_INSN (insn
))
8383 rtx pattern
, tmp
, location
, label
;
8384 unsigned int length
, i
;
8386 /* Find an ADDR_VEC or ADDR_DIFF_VEC insn to explode. */
8387 if (GET_CODE (insn
) != JUMP_INSN
8388 || (GET_CODE (PATTERN (insn
)) != ADDR_VEC
8389 && GET_CODE (PATTERN (insn
)) != ADDR_DIFF_VEC
))
8392 /* Emit marker for the beginning of the branch table. */
8393 emit_insn_before (gen_begin_brtab (), insn
);
8395 pattern
= PATTERN (insn
);
8396 location
= PREV_INSN (insn
);
8397 length
= XVECLEN (pattern
, GET_CODE (pattern
) == ADDR_DIFF_VEC
);
8399 for (i
= 0; i
< length
; i
++)
8401 /* Emit a label before each jump to keep jump.c from
8402 removing this code. */
8403 tmp
= gen_label_rtx ();
8404 LABEL_NUSES (tmp
) = 1;
8405 emit_label_after (tmp
, location
);
8406 location
= NEXT_INSN (location
);
8408 if (GET_CODE (pattern
) == ADDR_VEC
)
8409 label
= XEXP (XVECEXP (pattern
, 0, i
), 0);
8411 label
= XEXP (XVECEXP (pattern
, 1, i
), 0);
8413 tmp
= gen_short_jump (label
);
8415 /* Emit the jump itself. */
8416 tmp
= emit_jump_insn_after (tmp
, location
);
8417 JUMP_LABEL (tmp
) = label
;
8418 LABEL_NUSES (label
)++;
8419 location
= NEXT_INSN (location
);
8421 /* Emit a BARRIER after the jump. */
8422 emit_barrier_after (location
);
8423 location
= NEXT_INSN (location
);
8426 /* Emit marker for the end of the branch table. */
8427 emit_insn_before (gen_end_brtab (), location
);
8428 location
= NEXT_INSN (location
);
8429 emit_barrier_after (location
);
8431 /* Delete the ADDR_VEC or ADDR_DIFF_VEC. */
8437 /* Still need brtab marker insns. FIXME: the presence of these
8438 markers disables output of the branch table to readonly memory,
8439 and any alignment directives that might be needed. Possibly,
8440 the begin_brtab insn should be output before the label for the
8441 table. This doesn't matter at the moment since the tables are
8442 always output in the text section. */
8443 for (insn
= get_insns (); insn
; insn
= NEXT_INSN (insn
))
8445 /* Find an ADDR_VEC insn. */
8446 if (GET_CODE (insn
) != JUMP_INSN
8447 || (GET_CODE (PATTERN (insn
)) != ADDR_VEC
8448 && GET_CODE (PATTERN (insn
)) != ADDR_DIFF_VEC
))
8451 /* Now generate markers for the beginning and end of the
8453 emit_insn_before (gen_begin_brtab (), insn
);
8454 emit_insn_after (gen_end_brtab (), insn
);
8459 /* The PA has a number of odd instructions which can perform multiple
8460 tasks at once. On first generation PA machines (PA1.0 and PA1.1)
8461 it may be profitable to combine two instructions into one instruction
8462 with two outputs. It's not profitable PA2.0 machines because the
8463 two outputs would take two slots in the reorder buffers.
8465 This routine finds instructions which can be combined and combines
8466 them. We only support some of the potential combinations, and we
8467 only try common ways to find suitable instructions.
8469 * addb can add two registers or a register and a small integer
8470 and jump to a nearby (+-8k) location. Normally the jump to the
8471 nearby location is conditional on the result of the add, but by
8472 using the "true" condition we can make the jump unconditional.
8473 Thus addb can perform two independent operations in one insn.
8475 * movb is similar to addb in that it can perform a reg->reg
8476 or small immediate->reg copy and jump to a nearby (+-8k location).
8478 * fmpyadd and fmpysub can perform a FP multiply and either an
8479 FP add or FP sub if the operands of the multiply and add/sub are
8480 independent (there are other minor restrictions). Note both
8481 the fmpy and fadd/fsub can in theory move to better spots according
8482 to data dependencies, but for now we require the fmpy stay at a
8485 * Many of the memory operations can perform pre & post updates
8486 of index registers. GCC's pre/post increment/decrement addressing
8487 is far too simple to take advantage of all the possibilities. This
8488 pass may not be suitable since those insns may not be independent.
8490 * comclr can compare two ints or an int and a register, nullify
8491 the following instruction and zero some other register. This
8492 is more difficult to use as it's harder to find an insn which
8493 will generate a comclr than finding something like an unconditional
8494 branch. (conditional moves & long branches create comclr insns).
8496 * Most arithmetic operations can conditionally skip the next
8497 instruction. They can be viewed as "perform this operation
8498 and conditionally jump to this nearby location" (where nearby
8499 is an insns away). These are difficult to use due to the
8500 branch length restrictions. */
8503 pa_combine_instructions (void)
8507 /* This can get expensive since the basic algorithm is on the
8508 order of O(n^2) (or worse). Only do it for -O2 or higher
8509 levels of optimization. */
8513 /* Walk down the list of insns looking for "anchor" insns which
8514 may be combined with "floating" insns. As the name implies,
8515 "anchor" instructions don't move, while "floating" insns may
8517 new = gen_rtx_PARALLEL (VOIDmode
, gen_rtvec (2, NULL_RTX
, NULL_RTX
));
8518 new = make_insn_raw (new);
8520 for (anchor
= get_insns (); anchor
; anchor
= NEXT_INSN (anchor
))
8522 enum attr_pa_combine_type anchor_attr
;
8523 enum attr_pa_combine_type floater_attr
;
8525 /* We only care about INSNs, JUMP_INSNs, and CALL_INSNs.
8526 Also ignore any special USE insns. */
8527 if ((GET_CODE (anchor
) != INSN
8528 && GET_CODE (anchor
) != JUMP_INSN
8529 && GET_CODE (anchor
) != CALL_INSN
)
8530 || GET_CODE (PATTERN (anchor
)) == USE
8531 || GET_CODE (PATTERN (anchor
)) == CLOBBER
8532 || GET_CODE (PATTERN (anchor
)) == ADDR_VEC
8533 || GET_CODE (PATTERN (anchor
)) == ADDR_DIFF_VEC
)
8536 anchor_attr
= get_attr_pa_combine_type (anchor
);
8537 /* See if anchor is an insn suitable for combination. */
8538 if (anchor_attr
== PA_COMBINE_TYPE_FMPY
8539 || anchor_attr
== PA_COMBINE_TYPE_FADDSUB
8540 || (anchor_attr
== PA_COMBINE_TYPE_UNCOND_BRANCH
8541 && ! forward_branch_p (anchor
)))
8545 for (floater
= PREV_INSN (anchor
);
8547 floater
= PREV_INSN (floater
))
8549 if (GET_CODE (floater
) == NOTE
8550 || (GET_CODE (floater
) == INSN
8551 && (GET_CODE (PATTERN (floater
)) == USE
8552 || GET_CODE (PATTERN (floater
)) == CLOBBER
)))
8555 /* Anything except a regular INSN will stop our search. */
8556 if (GET_CODE (floater
) != INSN
8557 || GET_CODE (PATTERN (floater
)) == ADDR_VEC
8558 || GET_CODE (PATTERN (floater
)) == ADDR_DIFF_VEC
)
8564 /* See if FLOATER is suitable for combination with the
8566 floater_attr
= get_attr_pa_combine_type (floater
);
8567 if ((anchor_attr
== PA_COMBINE_TYPE_FMPY
8568 && floater_attr
== PA_COMBINE_TYPE_FADDSUB
)
8569 || (anchor_attr
== PA_COMBINE_TYPE_FADDSUB
8570 && floater_attr
== PA_COMBINE_TYPE_FMPY
))
8572 /* If ANCHOR and FLOATER can be combined, then we're
8573 done with this pass. */
8574 if (pa_can_combine_p (new, anchor
, floater
, 0,
8575 SET_DEST (PATTERN (floater
)),
8576 XEXP (SET_SRC (PATTERN (floater
)), 0),
8577 XEXP (SET_SRC (PATTERN (floater
)), 1)))
8581 else if (anchor_attr
== PA_COMBINE_TYPE_UNCOND_BRANCH
8582 && floater_attr
== PA_COMBINE_TYPE_ADDMOVE
)
8584 if (GET_CODE (SET_SRC (PATTERN (floater
))) == PLUS
)
8586 if (pa_can_combine_p (new, anchor
, floater
, 0,
8587 SET_DEST (PATTERN (floater
)),
8588 XEXP (SET_SRC (PATTERN (floater
)), 0),
8589 XEXP (SET_SRC (PATTERN (floater
)), 1)))
8594 if (pa_can_combine_p (new, anchor
, floater
, 0,
8595 SET_DEST (PATTERN (floater
)),
8596 SET_SRC (PATTERN (floater
)),
8597 SET_SRC (PATTERN (floater
))))
8603 /* If we didn't find anything on the backwards scan try forwards. */
8605 && (anchor_attr
== PA_COMBINE_TYPE_FMPY
8606 || anchor_attr
== PA_COMBINE_TYPE_FADDSUB
))
8608 for (floater
= anchor
; floater
; floater
= NEXT_INSN (floater
))
8610 if (GET_CODE (floater
) == NOTE
8611 || (GET_CODE (floater
) == INSN
8612 && (GET_CODE (PATTERN (floater
)) == USE
8613 || GET_CODE (PATTERN (floater
)) == CLOBBER
)))
8617 /* Anything except a regular INSN will stop our search. */
8618 if (GET_CODE (floater
) != INSN
8619 || GET_CODE (PATTERN (floater
)) == ADDR_VEC
8620 || GET_CODE (PATTERN (floater
)) == ADDR_DIFF_VEC
)
8626 /* See if FLOATER is suitable for combination with the
8628 floater_attr
= get_attr_pa_combine_type (floater
);
8629 if ((anchor_attr
== PA_COMBINE_TYPE_FMPY
8630 && floater_attr
== PA_COMBINE_TYPE_FADDSUB
)
8631 || (anchor_attr
== PA_COMBINE_TYPE_FADDSUB
8632 && floater_attr
== PA_COMBINE_TYPE_FMPY
))
8634 /* If ANCHOR and FLOATER can be combined, then we're
8635 done with this pass. */
8636 if (pa_can_combine_p (new, anchor
, floater
, 1,
8637 SET_DEST (PATTERN (floater
)),
8638 XEXP (SET_SRC (PATTERN (floater
)),
8640 XEXP (SET_SRC (PATTERN (floater
)),
8647 /* FLOATER will be nonzero if we found a suitable floating
8648 insn for combination with ANCHOR. */
8650 && (anchor_attr
== PA_COMBINE_TYPE_FADDSUB
8651 || anchor_attr
== PA_COMBINE_TYPE_FMPY
))
8653 /* Emit the new instruction and delete the old anchor. */
8654 emit_insn_before (gen_rtx_PARALLEL
8656 gen_rtvec (2, PATTERN (anchor
),
8657 PATTERN (floater
))),
8660 PUT_CODE (anchor
, NOTE
);
8661 NOTE_LINE_NUMBER (anchor
) = NOTE_INSN_DELETED
;
8662 NOTE_SOURCE_FILE (anchor
) = 0;
8664 /* Emit a special USE insn for FLOATER, then delete
8665 the floating insn. */
8666 emit_insn_before (gen_rtx_USE (VOIDmode
, floater
), floater
);
8667 delete_insn (floater
);
8672 && anchor_attr
== PA_COMBINE_TYPE_UNCOND_BRANCH
)
8675 /* Emit the new_jump instruction and delete the old anchor. */
8677 = emit_jump_insn_before (gen_rtx_PARALLEL
8679 gen_rtvec (2, PATTERN (anchor
),
8680 PATTERN (floater
))),
8683 JUMP_LABEL (temp
) = JUMP_LABEL (anchor
);
8684 PUT_CODE (anchor
, NOTE
);
8685 NOTE_LINE_NUMBER (anchor
) = NOTE_INSN_DELETED
;
8686 NOTE_SOURCE_FILE (anchor
) = 0;
8688 /* Emit a special USE insn for FLOATER, then delete
8689 the floating insn. */
8690 emit_insn_before (gen_rtx_USE (VOIDmode
, floater
), floater
);
8691 delete_insn (floater
);
8699 pa_can_combine_p (rtx
new, rtx anchor
, rtx floater
, int reversed
, rtx dest
,
8702 int insn_code_number
;
8705 /* Create a PARALLEL with the patterns of ANCHOR and
8706 FLOATER, try to recognize it, then test constraints
8707 for the resulting pattern.
8709 If the pattern doesn't match or the constraints
8710 aren't met keep searching for a suitable floater
8712 XVECEXP (PATTERN (new), 0, 0) = PATTERN (anchor
);
8713 XVECEXP (PATTERN (new), 0, 1) = PATTERN (floater
);
8714 INSN_CODE (new) = -1;
8715 insn_code_number
= recog_memoized (new);
8716 if (insn_code_number
< 0
8717 || (extract_insn (new), ! constrain_operands (1)))
8731 /* There's up to three operands to consider. One
8732 output and two inputs.
8734 The output must not be used between FLOATER & ANCHOR
8735 exclusive. The inputs must not be set between
8736 FLOATER and ANCHOR exclusive. */
8738 if (reg_used_between_p (dest
, start
, end
))
8741 if (reg_set_between_p (src1
, start
, end
))
8744 if (reg_set_between_p (src2
, start
, end
))
8747 /* If we get here, then everything is good. */
8751 /* Return nonzero if references for INSN are delayed.
8753 Millicode insns are actually function calls with some special
8754 constraints on arguments and register usage.
8756 Millicode calls always expect their arguments in the integer argument
8757 registers, and always return their result in %r29 (ret1). They
8758 are expected to clobber their arguments, %r1, %r29, and the return
8759 pointer which is %r31 on 32-bit and %r2 on 64-bit, and nothing else.
8761 This function tells reorg that the references to arguments and
8762 millicode calls do not appear to happen until after the millicode call.
8763 This allows reorg to put insns which set the argument registers into the
8764 delay slot of the millicode call -- thus they act more like traditional
8767 Note we cannot consider side effects of the insn to be delayed because
8768 the branch and link insn will clobber the return pointer. If we happened
8769 to use the return pointer in the delay slot of the call, then we lose.
8771 get_attr_type will try to recognize the given insn, so make sure to
8772 filter out things it will not accept -- SEQUENCE, USE and CLOBBER insns
8775 insn_refs_are_delayed (rtx insn
)
8777 return ((GET_CODE (insn
) == INSN
8778 && GET_CODE (PATTERN (insn
)) != SEQUENCE
8779 && GET_CODE (PATTERN (insn
)) != USE
8780 && GET_CODE (PATTERN (insn
)) != CLOBBER
8781 && get_attr_type (insn
) == TYPE_MILLI
));
8784 /* On the HP-PA the value is found in register(s) 28(-29), unless
8785 the mode is SF or DF. Then the value is returned in fr4 (32).
8787 This must perform the same promotions as PROMOTE_MODE, else
8788 TARGET_PROMOTE_FUNCTION_RETURN will not work correctly.
8790 Small structures must be returned in a PARALLEL on PA64 in order
8791 to match the HP Compiler ABI. */
8794 function_value (tree valtype
, tree func ATTRIBUTE_UNUSED
)
8796 enum machine_mode valmode
;
8798 if (AGGREGATE_TYPE_P (valtype
)
8799 || TREE_CODE (valtype
) == COMPLEX_TYPE
8800 || TREE_CODE (valtype
) == VECTOR_TYPE
)
8804 /* Aggregates with a size less than or equal to 128 bits are
8805 returned in GR 28(-29). They are left justified. The pad
8806 bits are undefined. Larger aggregates are returned in
8810 int ub
= int_size_in_bytes (valtype
) <= UNITS_PER_WORD
? 1 : 2;
8812 for (i
= 0; i
< ub
; i
++)
8814 loc
[i
] = gen_rtx_EXPR_LIST (VOIDmode
,
8815 gen_rtx_REG (DImode
, 28 + i
),
8820 return gen_rtx_PARALLEL (BLKmode
, gen_rtvec_v (ub
, loc
));
8822 else if (int_size_in_bytes (valtype
) > UNITS_PER_WORD
)
8824 /* Aggregates 5 to 8 bytes in size are returned in general
8825 registers r28-r29 in the same manner as other non
8826 floating-point objects. The data is right-justified and
8827 zero-extended to 64 bits. This is opposite to the normal
8828 justification used on big endian targets and requires
8829 special treatment. */
8830 rtx loc
= gen_rtx_EXPR_LIST (VOIDmode
,
8831 gen_rtx_REG (DImode
, 28), const0_rtx
);
8832 return gen_rtx_PARALLEL (BLKmode
, gen_rtvec (1, loc
));
8836 if ((INTEGRAL_TYPE_P (valtype
)
8837 && TYPE_PRECISION (valtype
) < BITS_PER_WORD
)
8838 || POINTER_TYPE_P (valtype
))
8839 valmode
= word_mode
;
8841 valmode
= TYPE_MODE (valtype
);
8843 if (TREE_CODE (valtype
) == REAL_TYPE
8844 && !AGGREGATE_TYPE_P (valtype
)
8845 && TYPE_MODE (valtype
) != TFmode
8846 && !TARGET_SOFT_FLOAT
)
8847 return gen_rtx_REG (valmode
, 32);
8849 return gen_rtx_REG (valmode
, 28);
8852 /* Return the location of a parameter that is passed in a register or NULL
8853 if the parameter has any component that is passed in memory.
8855 This is new code and will be pushed to into the net sources after
8858 ??? We might want to restructure this so that it looks more like other
8861 function_arg (CUMULATIVE_ARGS
*cum
, enum machine_mode mode
, tree type
,
8862 int named ATTRIBUTE_UNUSED
)
8864 int max_arg_words
= (TARGET_64BIT
? 8 : 4);
8871 if (mode
== VOIDmode
)
8874 arg_size
= FUNCTION_ARG_SIZE (mode
, type
);
8876 /* If this arg would be passed partially or totally on the stack, then
8877 this routine should return zero. pa_arg_partial_bytes will
8878 handle arguments which are split between regs and stack slots if
8879 the ABI mandates split arguments. */
8882 /* The 32-bit ABI does not split arguments. */
8883 if (cum
->words
+ arg_size
> max_arg_words
)
8889 alignment
= cum
->words
& 1;
8890 if (cum
->words
+ alignment
>= max_arg_words
)
8894 /* The 32bit ABIs and the 64bit ABIs are rather different,
8895 particularly in their handling of FP registers. We might
8896 be able to cleverly share code between them, but I'm not
8897 going to bother in the hope that splitting them up results
8898 in code that is more easily understood. */
8902 /* Advance the base registers to their current locations.
8904 Remember, gprs grow towards smaller register numbers while
8905 fprs grow to higher register numbers. Also remember that
8906 although FP regs are 32-bit addressable, we pretend that
8907 the registers are 64-bits wide. */
8908 gpr_reg_base
= 26 - cum
->words
;
8909 fpr_reg_base
= 32 + cum
->words
;
8911 /* Arguments wider than one word and small aggregates need special
8915 || (type
&& (AGGREGATE_TYPE_P (type
)
8916 || TREE_CODE (type
) == COMPLEX_TYPE
8917 || TREE_CODE (type
) == VECTOR_TYPE
)))
8919 /* Double-extended precision (80-bit), quad-precision (128-bit)
8920 and aggregates including complex numbers are aligned on
8921 128-bit boundaries. The first eight 64-bit argument slots
8922 are associated one-to-one, with general registers r26
8923 through r19, and also with floating-point registers fr4
8924 through fr11. Arguments larger than one word are always
8925 passed in general registers.
8927 Using a PARALLEL with a word mode register results in left
8928 justified data on a big-endian target. */
8931 int i
, offset
= 0, ub
= arg_size
;
8933 /* Align the base register. */
8934 gpr_reg_base
-= alignment
;
8936 ub
= MIN (ub
, max_arg_words
- cum
->words
- alignment
);
8937 for (i
= 0; i
< ub
; i
++)
8939 loc
[i
] = gen_rtx_EXPR_LIST (VOIDmode
,
8940 gen_rtx_REG (DImode
, gpr_reg_base
),
8946 return gen_rtx_PARALLEL (mode
, gen_rtvec_v (ub
, loc
));
8951 /* If the argument is larger than a word, then we know precisely
8952 which registers we must use. */
8966 /* Structures 5 to 8 bytes in size are passed in the general
8967 registers in the same manner as other non floating-point
8968 objects. The data is right-justified and zero-extended
8969 to 64 bits. This is opposite to the normal justification
8970 used on big endian targets and requires special treatment.
8971 We now define BLOCK_REG_PADDING to pad these objects.
8972 Aggregates, complex and vector types are passed in the same
8973 manner as structures. */
8975 || (type
&& (AGGREGATE_TYPE_P (type
)
8976 || TREE_CODE (type
) == COMPLEX_TYPE
8977 || TREE_CODE (type
) == VECTOR_TYPE
)))
8979 rtx loc
= gen_rtx_EXPR_LIST (VOIDmode
,
8980 gen_rtx_REG (DImode
, gpr_reg_base
),
8982 return gen_rtx_PARALLEL (BLKmode
, gen_rtvec (1, loc
));
8987 /* We have a single word (32 bits). A simple computation
8988 will get us the register #s we need. */
8989 gpr_reg_base
= 26 - cum
->words
;
8990 fpr_reg_base
= 32 + 2 * cum
->words
;
8994 /* Determine if the argument needs to be passed in both general and
8995 floating point registers. */
8996 if (((TARGET_PORTABLE_RUNTIME
|| TARGET_64BIT
|| TARGET_ELF32
)
8997 /* If we are doing soft-float with portable runtime, then there
8998 is no need to worry about FP regs. */
8999 && !TARGET_SOFT_FLOAT
9000 /* The parameter must be some kind of scalar float, else we just
9001 pass it in integer registers. */
9002 && GET_MODE_CLASS (mode
) == MODE_FLOAT
9003 /* The target function must not have a prototype. */
9004 && cum
->nargs_prototype
<= 0
9005 /* libcalls do not need to pass items in both FP and general
9007 && type
!= NULL_TREE
9008 /* All this hair applies to "outgoing" args only. This includes
9009 sibcall arguments setup with FUNCTION_INCOMING_ARG. */
9011 /* Also pass outgoing floating arguments in both registers in indirect
9012 calls with the 32 bit ABI and the HP assembler since there is no
9013 way to the specify argument locations in static functions. */
9018 && GET_MODE_CLASS (mode
) == MODE_FLOAT
))
9024 gen_rtx_EXPR_LIST (VOIDmode
,
9025 gen_rtx_REG (mode
, fpr_reg_base
),
9027 gen_rtx_EXPR_LIST (VOIDmode
,
9028 gen_rtx_REG (mode
, gpr_reg_base
),
9033 /* See if we should pass this parameter in a general register. */
9034 if (TARGET_SOFT_FLOAT
9035 /* Indirect calls in the normal 32bit ABI require all arguments
9036 to be passed in general registers. */
9037 || (!TARGET_PORTABLE_RUNTIME
9041 /* If the parameter is not a scalar floating-point parameter,
9042 then it belongs in GPRs. */
9043 || GET_MODE_CLASS (mode
) != MODE_FLOAT
9044 /* Structure with single SFmode field belongs in GPR. */
9045 || (type
&& AGGREGATE_TYPE_P (type
)))
9046 retval
= gen_rtx_REG (mode
, gpr_reg_base
);
9048 retval
= gen_rtx_REG (mode
, fpr_reg_base
);
9054 /* If this arg would be passed totally in registers or totally on the stack,
9055 then this routine should return zero. */
9058 pa_arg_partial_bytes (CUMULATIVE_ARGS
*cum
, enum machine_mode mode
,
9059 tree type
, bool named ATTRIBUTE_UNUSED
)
9061 unsigned int max_arg_words
= 8;
9062 unsigned int offset
= 0;
9067 if (FUNCTION_ARG_SIZE (mode
, type
) > 1 && (cum
->words
& 1))
9070 if (cum
->words
+ offset
+ FUNCTION_ARG_SIZE (mode
, type
) <= max_arg_words
)
9071 /* Arg fits fully into registers. */
9073 else if (cum
->words
+ offset
>= max_arg_words
)
9074 /* Arg fully on the stack. */
9078 return (max_arg_words
- cum
->words
- offset
) * UNITS_PER_WORD
;
9082 /* Return a string to output before text in the current function.
9084 This function is only used with SOM. Because we don't support
9085 named subspaces, we can only create a new subspace or switch back
9086 to the default text subspace. */
9088 som_text_section_asm_op (void)
9095 if (cfun
&& !cfun
->machine
->in_nsubspa
)
9097 /* We only want to emit a .nsubspa directive once at the
9098 start of the function. */
9099 cfun
->machine
->in_nsubspa
= 1;
9101 /* Create a new subspace for the text. This provides
9102 better stub placement and one-only functions. */
9104 && DECL_ONE_ONLY (cfun
->decl
)
9105 && !DECL_WEAK (cfun
->decl
))
9107 "\t.SPACE $TEXT$\n\t.NSUBSPA $CODE$,QUAD=0,ALIGN=8,ACCESS=44,SORT=24,COMDAT";
9111 /* There isn't a current function or the body of the current
9112 function has been completed. So, we are changing to the
9113 text section to output debugging information. We need to
9114 forget that we are in the text section so that the function
9115 text_section in varasm.c will call us the next time around. */
9118 return "\t.SPACE $TEXT$\n\t.NSUBSPA $CODE$";
9121 return "\t.SPACE $TEXT$\n\t.SUBSPA $CODE$";
9124 /* On hpux10, the linker will give an error if we have a reference
9125 in the read-only data section to a symbol defined in a shared
9126 library. Therefore, expressions that might require a reloc can
9127 not be placed in the read-only data section. */
9130 pa_select_section (tree exp
, int reloc
,
9131 unsigned HOST_WIDE_INT align ATTRIBUTE_UNUSED
)
9133 if (TREE_CODE (exp
) == VAR_DECL
9134 && TREE_READONLY (exp
)
9135 && !TREE_THIS_VOLATILE (exp
)
9136 && DECL_INITIAL (exp
)
9137 && (DECL_INITIAL (exp
) == error_mark_node
9138 || TREE_CONSTANT (DECL_INITIAL (exp
)))
9142 && DECL_ONE_ONLY (exp
)
9143 && !DECL_WEAK (exp
))
9144 som_one_only_readonly_data_section ();
9146 readonly_data_section ();
9148 else if (CONSTANT_CLASS_P (exp
) && !reloc
)
9149 readonly_data_section ();
9151 && TREE_CODE (exp
) == VAR_DECL
9152 && DECL_ONE_ONLY (exp
)
9153 && !DECL_WEAK (exp
))
9154 som_one_only_data_section ();
9160 pa_globalize_label (FILE *stream
, const char *name
)
9162 /* We only handle DATA objects here, functions are globalized in
9163 ASM_DECLARE_FUNCTION_NAME. */
9164 if (! FUNCTION_NAME_P (name
))
9166 fputs ("\t.EXPORT ", stream
);
9167 assemble_name (stream
, name
);
9168 fputs (",DATA\n", stream
);
9172 /* Worker function for TARGET_STRUCT_VALUE_RTX. */
9175 pa_struct_value_rtx (tree fntype ATTRIBUTE_UNUSED
,
9176 int incoming ATTRIBUTE_UNUSED
)
9178 return gen_rtx_REG (Pmode
, PA_STRUCT_VALUE_REGNUM
);
9181 /* Worker function for TARGET_RETURN_IN_MEMORY. */
9184 pa_return_in_memory (tree type
, tree fntype ATTRIBUTE_UNUSED
)
9186 /* SOM ABI says that objects larger than 64 bits are returned in memory.
9187 PA64 ABI says that objects larger than 128 bits are returned in memory.
9188 Note, int_size_in_bytes can return -1 if the size of the object is
9189 variable or larger than the maximum value that can be expressed as
9190 a HOST_WIDE_INT. It can also return zero for an empty type. The
9191 simplest way to handle variable and empty types is to pass them in
9192 memory. This avoids problems in defining the boundaries of argument
9193 slots, allocating registers, etc. */
9194 return (int_size_in_bytes (type
) > (TARGET_64BIT
? 16 : 8)
9195 || int_size_in_bytes (type
) <= 0);
9198 /* Structure to hold declaration and name of external symbols that are
9199 emitted by GCC. We generate a vector of these symbols and output them
9200 at the end of the file if and only if SYMBOL_REF_REFERENCED_P is true.
9201 This avoids putting out names that are never really used. */
9203 typedef struct extern_symbol
GTY(())
9209 /* Define gc'd vector type for extern_symbol. */
9210 DEF_VEC_O(extern_symbol
);
9211 DEF_VEC_ALLOC_O(extern_symbol
,gc
);
9213 /* Vector of extern_symbol pointers. */
9214 static GTY(()) VEC(extern_symbol
,gc
) *extern_symbols
;
9216 #ifdef ASM_OUTPUT_EXTERNAL_REAL
9217 /* Mark DECL (name NAME) as an external reference (assembler output
9218 file FILE). This saves the names to output at the end of the file
9219 if actually referenced. */
9222 pa_hpux_asm_output_external (FILE *file
, tree decl
, const char *name
)
9224 extern_symbol
* p
= VEC_safe_push (extern_symbol
, gc
, extern_symbols
, NULL
);
9226 gcc_assert (file
== asm_out_file
);
9231 /* Output text required at the end of an assembler file.
9232 This includes deferred plabels and .import directives for
9233 all external symbols that were actually referenced. */
9236 pa_hpux_file_end (void)
9241 if (!NO_DEFERRED_PROFILE_COUNTERS
)
9242 output_deferred_profile_counters ();
9244 output_deferred_plabels ();
9246 for (i
= 0; VEC_iterate (extern_symbol
, extern_symbols
, i
, p
); i
++)
9248 tree decl
= p
->decl
;
9250 if (!TREE_ASM_WRITTEN (decl
)
9251 && SYMBOL_REF_REFERENCED_P (XEXP (DECL_RTL (decl
), 0)))
9252 ASM_OUTPUT_EXTERNAL_REAL (asm_out_file
, decl
, p
->name
);
9255 VEC_free (extern_symbol
, gc
, extern_symbols
);