1 /* Definitions of target machine for GNU compiler.
2 Copyright (C) 1999, 2000, 2001, 2002, 2003, 2004, 2005, 2006
3 Free Software Foundation, Inc.
4 Contributed by James E. Wilson <wilson@cygnus.com> and
5 David Mosberger <davidm@hpl.hp.com>.
7 This file is part of GCC.
9 GCC is free software; you can redistribute it and/or modify
10 it under the terms of the GNU General Public License as published by
11 the Free Software Foundation; either version 2, or (at your option)
14 GCC is distributed in the hope that it will be useful,
15 but WITHOUT ANY WARRANTY; without even the implied warranty of
16 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 GNU General Public License for more details.
19 You should have received a copy of the GNU General Public License
20 along with GCC; see the file COPYING. If not, write to
21 the Free Software Foundation, 51 Franklin Street, Fifth Floor,
22 Boston, MA 02110-1301, USA. */
26 #include "coretypes.h"
31 #include "hard-reg-set.h"
33 #include "insn-config.h"
34 #include "conditions.h"
36 #include "insn-attr.h"
44 #include "basic-block.h"
46 #include "sched-int.h"
49 #include "target-def.h"
52 #include "langhooks.h"
53 #include "cfglayout.h"
54 #include "tree-gimple.h"
58 /* This is used for communication between ASM_OUTPUT_LABEL and
59 ASM_OUTPUT_LABELREF. */
60 int ia64_asm_output_label
= 0;
62 /* Define the information needed to generate branch and scc insns. This is
63 stored from the compare operation. */
64 struct rtx_def
* ia64_compare_op0
;
65 struct rtx_def
* ia64_compare_op1
;
67 /* Register names for ia64_expand_prologue. */
68 static const char * const ia64_reg_numbers
[96] =
69 { "r32", "r33", "r34", "r35", "r36", "r37", "r38", "r39",
70 "r40", "r41", "r42", "r43", "r44", "r45", "r46", "r47",
71 "r48", "r49", "r50", "r51", "r52", "r53", "r54", "r55",
72 "r56", "r57", "r58", "r59", "r60", "r61", "r62", "r63",
73 "r64", "r65", "r66", "r67", "r68", "r69", "r70", "r71",
74 "r72", "r73", "r74", "r75", "r76", "r77", "r78", "r79",
75 "r80", "r81", "r82", "r83", "r84", "r85", "r86", "r87",
76 "r88", "r89", "r90", "r91", "r92", "r93", "r94", "r95",
77 "r96", "r97", "r98", "r99", "r100","r101","r102","r103",
78 "r104","r105","r106","r107","r108","r109","r110","r111",
79 "r112","r113","r114","r115","r116","r117","r118","r119",
80 "r120","r121","r122","r123","r124","r125","r126","r127"};
82 /* ??? These strings could be shared with REGISTER_NAMES. */
83 static const char * const ia64_input_reg_names
[8] =
84 { "in0", "in1", "in2", "in3", "in4", "in5", "in6", "in7" };
86 /* ??? These strings could be shared with REGISTER_NAMES. */
87 static const char * const ia64_local_reg_names
[80] =
88 { "loc0", "loc1", "loc2", "loc3", "loc4", "loc5", "loc6", "loc7",
89 "loc8", "loc9", "loc10","loc11","loc12","loc13","loc14","loc15",
90 "loc16","loc17","loc18","loc19","loc20","loc21","loc22","loc23",
91 "loc24","loc25","loc26","loc27","loc28","loc29","loc30","loc31",
92 "loc32","loc33","loc34","loc35","loc36","loc37","loc38","loc39",
93 "loc40","loc41","loc42","loc43","loc44","loc45","loc46","loc47",
94 "loc48","loc49","loc50","loc51","loc52","loc53","loc54","loc55",
95 "loc56","loc57","loc58","loc59","loc60","loc61","loc62","loc63",
96 "loc64","loc65","loc66","loc67","loc68","loc69","loc70","loc71",
97 "loc72","loc73","loc74","loc75","loc76","loc77","loc78","loc79" };
99 /* ??? These strings could be shared with REGISTER_NAMES. */
100 static const char * const ia64_output_reg_names
[8] =
101 { "out0", "out1", "out2", "out3", "out4", "out5", "out6", "out7" };
103 /* Which cpu are we scheduling for. */
104 enum processor_type ia64_tune
= PROCESSOR_ITANIUM2
;
106 /* Determines whether we run our final scheduling pass or not. We always
107 avoid the normal second scheduling pass. */
108 static int ia64_flag_schedule_insns2
;
110 /* Determines whether we run variable tracking in machine dependent
112 static int ia64_flag_var_tracking
;
114 /* Variables which are this size or smaller are put in the sdata/sbss
117 unsigned int ia64_section_threshold
;
119 /* The following variable is used by the DFA insn scheduler. The value is
120 TRUE if we do insn bundling instead of insn scheduling. */
123 /* Structure to be filled in by ia64_compute_frame_size with register
124 save masks and offsets for the current function. */
126 struct ia64_frame_info
128 HOST_WIDE_INT total_size
; /* size of the stack frame, not including
129 the caller's scratch area. */
130 HOST_WIDE_INT spill_cfa_off
; /* top of the reg spill area from the cfa. */
131 HOST_WIDE_INT spill_size
; /* size of the gr/br/fr spill area. */
132 HOST_WIDE_INT extra_spill_size
; /* size of spill area for others. */
133 HARD_REG_SET mask
; /* mask of saved registers. */
134 unsigned int gr_used_mask
; /* mask of registers in use as gr spill
135 registers or long-term scratches. */
136 int n_spilled
; /* number of spilled registers. */
137 int reg_fp
; /* register for fp. */
138 int reg_save_b0
; /* save register for b0. */
139 int reg_save_pr
; /* save register for prs. */
140 int reg_save_ar_pfs
; /* save register for ar.pfs. */
141 int reg_save_ar_unat
; /* save register for ar.unat. */
142 int reg_save_ar_lc
; /* save register for ar.lc. */
143 int reg_save_gp
; /* save register for gp. */
144 int n_input_regs
; /* number of input registers used. */
145 int n_local_regs
; /* number of local registers used. */
146 int n_output_regs
; /* number of output registers used. */
147 int n_rotate_regs
; /* number of rotating registers used. */
149 char need_regstk
; /* true if a .regstk directive needed. */
150 char initialized
; /* true if the data is finalized. */
153 /* Current frame information calculated by ia64_compute_frame_size. */
154 static struct ia64_frame_info current_frame_info
;
156 static int ia64_first_cycle_multipass_dfa_lookahead (void);
157 static void ia64_dependencies_evaluation_hook (rtx
, rtx
);
158 static void ia64_init_dfa_pre_cycle_insn (void);
159 static rtx
ia64_dfa_pre_cycle_insn (void);
160 static int ia64_first_cycle_multipass_dfa_lookahead_guard (rtx
);
161 static int ia64_dfa_new_cycle (FILE *, int, rtx
, int, int, int *);
162 static rtx
gen_tls_get_addr (void);
163 static rtx
gen_thread_pointer (void);
164 static int find_gr_spill (int);
165 static int next_scratch_gr_reg (void);
166 static void mark_reg_gr_used_mask (rtx
, void *);
167 static void ia64_compute_frame_size (HOST_WIDE_INT
);
168 static void setup_spill_pointers (int, rtx
, HOST_WIDE_INT
);
169 static void finish_spill_pointers (void);
170 static rtx
spill_restore_mem (rtx
, HOST_WIDE_INT
);
171 static void do_spill (rtx (*)(rtx
, rtx
, rtx
), rtx
, HOST_WIDE_INT
, rtx
);
172 static void do_restore (rtx (*)(rtx
, rtx
, rtx
), rtx
, HOST_WIDE_INT
);
173 static rtx
gen_movdi_x (rtx
, rtx
, rtx
);
174 static rtx
gen_fr_spill_x (rtx
, rtx
, rtx
);
175 static rtx
gen_fr_restore_x (rtx
, rtx
, rtx
);
177 static enum machine_mode
hfa_element_mode (tree
, bool);
178 static void ia64_setup_incoming_varargs (CUMULATIVE_ARGS
*, enum machine_mode
,
180 static int ia64_arg_partial_bytes (CUMULATIVE_ARGS
*, enum machine_mode
,
182 static bool ia64_function_ok_for_sibcall (tree
, tree
);
183 static bool ia64_return_in_memory (tree
, tree
);
184 static bool ia64_rtx_costs (rtx
, int, int, int *);
185 static void fix_range (const char *);
186 static bool ia64_handle_option (size_t, const char *, int);
187 static struct machine_function
* ia64_init_machine_status (void);
188 static void emit_insn_group_barriers (FILE *);
189 static void emit_all_insn_group_barriers (FILE *);
190 static void final_emit_insn_group_barriers (FILE *);
191 static void emit_predicate_relation_info (void);
192 static void ia64_reorg (void);
193 static bool ia64_in_small_data_p (tree
);
194 static void process_epilogue (FILE *, rtx
, bool, bool);
195 static int process_set (FILE *, rtx
, rtx
, bool, bool);
197 static bool ia64_assemble_integer (rtx
, unsigned int, int);
198 static void ia64_output_function_prologue (FILE *, HOST_WIDE_INT
);
199 static void ia64_output_function_epilogue (FILE *, HOST_WIDE_INT
);
200 static void ia64_output_function_end_prologue (FILE *);
202 static int ia64_issue_rate (void);
203 static int ia64_adjust_cost (rtx
, rtx
, rtx
, int);
204 static void ia64_sched_init (FILE *, int, int);
205 static void ia64_sched_finish (FILE *, int);
206 static int ia64_dfa_sched_reorder (FILE *, int, rtx
*, int *, int, int);
207 static int ia64_sched_reorder (FILE *, int, rtx
*, int *, int);
208 static int ia64_sched_reorder2 (FILE *, int, rtx
*, int *, int);
209 static int ia64_variable_issue (FILE *, int, rtx
, int);
211 static struct bundle_state
*get_free_bundle_state (void);
212 static void free_bundle_state (struct bundle_state
*);
213 static void initiate_bundle_states (void);
214 static void finish_bundle_states (void);
215 static unsigned bundle_state_hash (const void *);
216 static int bundle_state_eq_p (const void *, const void *);
217 static int insert_bundle_state (struct bundle_state
*);
218 static void initiate_bundle_state_table (void);
219 static void finish_bundle_state_table (void);
220 static int try_issue_nops (struct bundle_state
*, int);
221 static int try_issue_insn (struct bundle_state
*, rtx
);
222 static void issue_nops_and_insn (struct bundle_state
*, int, rtx
, int, int);
223 static int get_max_pos (state_t
);
224 static int get_template (state_t
, int);
226 static rtx
get_next_important_insn (rtx
, rtx
);
227 static void bundling (FILE *, int, rtx
, rtx
);
229 static void ia64_output_mi_thunk (FILE *, tree
, HOST_WIDE_INT
,
230 HOST_WIDE_INT
, tree
);
231 static void ia64_file_start (void);
233 static void ia64_select_rtx_section (enum machine_mode
, rtx
,
234 unsigned HOST_WIDE_INT
);
235 static void ia64_output_dwarf_dtprel (FILE *, int, rtx
)
237 static void ia64_rwreloc_select_section (tree
, int, unsigned HOST_WIDE_INT
)
239 static void ia64_rwreloc_unique_section (tree
, int)
241 static void ia64_rwreloc_select_rtx_section (enum machine_mode
, rtx
,
242 unsigned HOST_WIDE_INT
)
244 static unsigned int ia64_section_type_flags (tree
, const char *, int);
245 static void ia64_hpux_add_extern_decl (tree decl
)
247 static void ia64_hpux_file_end (void)
249 static void ia64_init_libfuncs (void)
251 static void ia64_hpux_init_libfuncs (void)
253 static void ia64_sysv4_init_libfuncs (void)
255 static void ia64_vms_init_libfuncs (void)
258 static tree
ia64_handle_model_attribute (tree
*, tree
, tree
, int, bool *);
259 static void ia64_encode_section_info (tree
, rtx
, int);
260 static rtx
ia64_struct_value_rtx (tree
, int);
261 static tree
ia64_gimplify_va_arg (tree
, tree
, tree
*, tree
*);
262 static bool ia64_scalar_mode_supported_p (enum machine_mode mode
);
263 static bool ia64_vector_mode_supported_p (enum machine_mode mode
);
264 static bool ia64_cannot_force_const_mem (rtx
);
265 static const char *ia64_mangle_fundamental_type (tree
);
266 static const char *ia64_invalid_conversion (tree
, tree
);
267 static const char *ia64_invalid_unary_op (int, tree
);
268 static const char *ia64_invalid_binary_op (int, tree
, tree
);
270 /* Table of valid machine attributes. */
271 static const struct attribute_spec ia64_attribute_table
[] =
273 /* { name, min_len, max_len, decl_req, type_req, fn_type_req, handler } */
274 { "syscall_linkage", 0, 0, false, true, true, NULL
},
275 { "model", 1, 1, true, false, false, ia64_handle_model_attribute
},
276 { NULL
, 0, 0, false, false, false, NULL
}
279 /* Initialize the GCC target structure. */
280 #undef TARGET_ATTRIBUTE_TABLE
281 #define TARGET_ATTRIBUTE_TABLE ia64_attribute_table
283 #undef TARGET_INIT_BUILTINS
284 #define TARGET_INIT_BUILTINS ia64_init_builtins
286 #undef TARGET_EXPAND_BUILTIN
287 #define TARGET_EXPAND_BUILTIN ia64_expand_builtin
289 #undef TARGET_ASM_BYTE_OP
290 #define TARGET_ASM_BYTE_OP "\tdata1\t"
291 #undef TARGET_ASM_ALIGNED_HI_OP
292 #define TARGET_ASM_ALIGNED_HI_OP "\tdata2\t"
293 #undef TARGET_ASM_ALIGNED_SI_OP
294 #define TARGET_ASM_ALIGNED_SI_OP "\tdata4\t"
295 #undef TARGET_ASM_ALIGNED_DI_OP
296 #define TARGET_ASM_ALIGNED_DI_OP "\tdata8\t"
297 #undef TARGET_ASM_UNALIGNED_HI_OP
298 #define TARGET_ASM_UNALIGNED_HI_OP "\tdata2.ua\t"
299 #undef TARGET_ASM_UNALIGNED_SI_OP
300 #define TARGET_ASM_UNALIGNED_SI_OP "\tdata4.ua\t"
301 #undef TARGET_ASM_UNALIGNED_DI_OP
302 #define TARGET_ASM_UNALIGNED_DI_OP "\tdata8.ua\t"
303 #undef TARGET_ASM_INTEGER
304 #define TARGET_ASM_INTEGER ia64_assemble_integer
306 #undef TARGET_ASM_FUNCTION_PROLOGUE
307 #define TARGET_ASM_FUNCTION_PROLOGUE ia64_output_function_prologue
308 #undef TARGET_ASM_FUNCTION_END_PROLOGUE
309 #define TARGET_ASM_FUNCTION_END_PROLOGUE ia64_output_function_end_prologue
310 #undef TARGET_ASM_FUNCTION_EPILOGUE
311 #define TARGET_ASM_FUNCTION_EPILOGUE ia64_output_function_epilogue
313 #undef TARGET_IN_SMALL_DATA_P
314 #define TARGET_IN_SMALL_DATA_P ia64_in_small_data_p
316 #undef TARGET_SCHED_ADJUST_COST
317 #define TARGET_SCHED_ADJUST_COST ia64_adjust_cost
318 #undef TARGET_SCHED_ISSUE_RATE
319 #define TARGET_SCHED_ISSUE_RATE ia64_issue_rate
320 #undef TARGET_SCHED_VARIABLE_ISSUE
321 #define TARGET_SCHED_VARIABLE_ISSUE ia64_variable_issue
322 #undef TARGET_SCHED_INIT
323 #define TARGET_SCHED_INIT ia64_sched_init
324 #undef TARGET_SCHED_FINISH
325 #define TARGET_SCHED_FINISH ia64_sched_finish
326 #undef TARGET_SCHED_REORDER
327 #define TARGET_SCHED_REORDER ia64_sched_reorder
328 #undef TARGET_SCHED_REORDER2
329 #define TARGET_SCHED_REORDER2 ia64_sched_reorder2
331 #undef TARGET_SCHED_DEPENDENCIES_EVALUATION_HOOK
332 #define TARGET_SCHED_DEPENDENCIES_EVALUATION_HOOK ia64_dependencies_evaluation_hook
334 #undef TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD
335 #define TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD ia64_first_cycle_multipass_dfa_lookahead
337 #undef TARGET_SCHED_INIT_DFA_PRE_CYCLE_INSN
338 #define TARGET_SCHED_INIT_DFA_PRE_CYCLE_INSN ia64_init_dfa_pre_cycle_insn
339 #undef TARGET_SCHED_DFA_PRE_CYCLE_INSN
340 #define TARGET_SCHED_DFA_PRE_CYCLE_INSN ia64_dfa_pre_cycle_insn
342 #undef TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD_GUARD
343 #define TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD_GUARD\
344 ia64_first_cycle_multipass_dfa_lookahead_guard
346 #undef TARGET_SCHED_DFA_NEW_CYCLE
347 #define TARGET_SCHED_DFA_NEW_CYCLE ia64_dfa_new_cycle
349 #undef TARGET_FUNCTION_OK_FOR_SIBCALL
350 #define TARGET_FUNCTION_OK_FOR_SIBCALL ia64_function_ok_for_sibcall
351 #undef TARGET_ARG_PARTIAL_BYTES
352 #define TARGET_ARG_PARTIAL_BYTES ia64_arg_partial_bytes
354 #undef TARGET_ASM_OUTPUT_MI_THUNK
355 #define TARGET_ASM_OUTPUT_MI_THUNK ia64_output_mi_thunk
356 #undef TARGET_ASM_CAN_OUTPUT_MI_THUNK
357 #define TARGET_ASM_CAN_OUTPUT_MI_THUNK hook_bool_tree_hwi_hwi_tree_true
359 #undef TARGET_ASM_FILE_START
360 #define TARGET_ASM_FILE_START ia64_file_start
362 #undef TARGET_RTX_COSTS
363 #define TARGET_RTX_COSTS ia64_rtx_costs
364 #undef TARGET_ADDRESS_COST
365 #define TARGET_ADDRESS_COST hook_int_rtx_0
367 #undef TARGET_MACHINE_DEPENDENT_REORG
368 #define TARGET_MACHINE_DEPENDENT_REORG ia64_reorg
370 #undef TARGET_ENCODE_SECTION_INFO
371 #define TARGET_ENCODE_SECTION_INFO ia64_encode_section_info
373 #undef TARGET_SECTION_TYPE_FLAGS
374 #define TARGET_SECTION_TYPE_FLAGS ia64_section_type_flags
377 #undef TARGET_ASM_OUTPUT_DWARF_DTPREL
378 #define TARGET_ASM_OUTPUT_DWARF_DTPREL ia64_output_dwarf_dtprel
381 /* ??? ABI doesn't allow us to define this. */
383 #undef TARGET_PROMOTE_FUNCTION_ARGS
384 #define TARGET_PROMOTE_FUNCTION_ARGS hook_bool_tree_true
387 /* ??? ABI doesn't allow us to define this. */
389 #undef TARGET_PROMOTE_FUNCTION_RETURN
390 #define TARGET_PROMOTE_FUNCTION_RETURN hook_bool_tree_true
393 /* ??? Investigate. */
395 #undef TARGET_PROMOTE_PROTOTYPES
396 #define TARGET_PROMOTE_PROTOTYPES hook_bool_tree_true
399 #undef TARGET_STRUCT_VALUE_RTX
400 #define TARGET_STRUCT_VALUE_RTX ia64_struct_value_rtx
401 #undef TARGET_RETURN_IN_MEMORY
402 #define TARGET_RETURN_IN_MEMORY ia64_return_in_memory
403 #undef TARGET_SETUP_INCOMING_VARARGS
404 #define TARGET_SETUP_INCOMING_VARARGS ia64_setup_incoming_varargs
405 #undef TARGET_STRICT_ARGUMENT_NAMING
406 #define TARGET_STRICT_ARGUMENT_NAMING hook_bool_CUMULATIVE_ARGS_true
407 #undef TARGET_MUST_PASS_IN_STACK
408 #define TARGET_MUST_PASS_IN_STACK must_pass_in_stack_var_size
410 #undef TARGET_GIMPLIFY_VA_ARG_EXPR
411 #define TARGET_GIMPLIFY_VA_ARG_EXPR ia64_gimplify_va_arg
413 #undef TARGET_UNWIND_EMIT
414 #define TARGET_UNWIND_EMIT process_for_unwind_directive
416 #undef TARGET_SCALAR_MODE_SUPPORTED_P
417 #define TARGET_SCALAR_MODE_SUPPORTED_P ia64_scalar_mode_supported_p
418 #undef TARGET_VECTOR_MODE_SUPPORTED_P
419 #define TARGET_VECTOR_MODE_SUPPORTED_P ia64_vector_mode_supported_p
421 /* ia64 architecture manual 4.4.7: ... reads, writes, and flushes may occur
422 in an order different from the specified program order. */
423 #undef TARGET_RELAXED_ORDERING
424 #define TARGET_RELAXED_ORDERING true
426 #undef TARGET_DEFAULT_TARGET_FLAGS
427 #define TARGET_DEFAULT_TARGET_FLAGS (TARGET_DEFAULT | TARGET_CPU_DEFAULT)
428 #undef TARGET_HANDLE_OPTION
429 #define TARGET_HANDLE_OPTION ia64_handle_option
431 #undef TARGET_CANNOT_FORCE_CONST_MEM
432 #define TARGET_CANNOT_FORCE_CONST_MEM ia64_cannot_force_const_mem
434 #undef TARGET_MANGLE_FUNDAMENTAL_TYPE
435 #define TARGET_MANGLE_FUNDAMENTAL_TYPE ia64_mangle_fundamental_type
437 #undef TARGET_INVALID_CONVERSION
438 #define TARGET_INVALID_CONVERSION ia64_invalid_conversion
439 #undef TARGET_INVALID_UNARY_OP
440 #define TARGET_INVALID_UNARY_OP ia64_invalid_unary_op
441 #undef TARGET_INVALID_BINARY_OP
442 #define TARGET_INVALID_BINARY_OP ia64_invalid_binary_op
444 struct gcc_target targetm
= TARGET_INITIALIZER
;
448 ADDR_AREA_NORMAL
, /* normal address area */
449 ADDR_AREA_SMALL
/* addressable by "addl" (-2MB < addr < 2MB) */
453 static GTY(()) tree small_ident1
;
454 static GTY(()) tree small_ident2
;
459 if (small_ident1
== 0)
461 small_ident1
= get_identifier ("small");
462 small_ident2
= get_identifier ("__small__");
466 /* Retrieve the address area that has been chosen for the given decl. */
468 static ia64_addr_area
469 ia64_get_addr_area (tree decl
)
473 model_attr
= lookup_attribute ("model", DECL_ATTRIBUTES (decl
));
479 id
= TREE_VALUE (TREE_VALUE (model_attr
));
480 if (id
== small_ident1
|| id
== small_ident2
)
481 return ADDR_AREA_SMALL
;
483 return ADDR_AREA_NORMAL
;
487 ia64_handle_model_attribute (tree
*node
, tree name
, tree args
,
488 int flags ATTRIBUTE_UNUSED
, bool *no_add_attrs
)
490 ia64_addr_area addr_area
= ADDR_AREA_NORMAL
;
492 tree arg
, decl
= *node
;
495 arg
= TREE_VALUE (args
);
496 if (arg
== small_ident1
|| arg
== small_ident2
)
498 addr_area
= ADDR_AREA_SMALL
;
502 warning (OPT_Wattributes
, "invalid argument of %qs attribute",
503 IDENTIFIER_POINTER (name
));
504 *no_add_attrs
= true;
507 switch (TREE_CODE (decl
))
510 if ((DECL_CONTEXT (decl
) && TREE_CODE (DECL_CONTEXT (decl
))
512 && !TREE_STATIC (decl
))
514 error ("%Jan address area attribute cannot be specified for "
515 "local variables", decl
);
516 *no_add_attrs
= true;
518 area
= ia64_get_addr_area (decl
);
519 if (area
!= ADDR_AREA_NORMAL
&& addr_area
!= area
)
521 error ("address area of %q+D conflicts with previous "
522 "declaration", decl
);
523 *no_add_attrs
= true;
528 error ("%Jaddress area attribute cannot be specified for functions",
530 *no_add_attrs
= true;
534 warning (OPT_Wattributes
, "%qs attribute ignored",
535 IDENTIFIER_POINTER (name
));
536 *no_add_attrs
= true;
544 ia64_encode_addr_area (tree decl
, rtx symbol
)
548 flags
= SYMBOL_REF_FLAGS (symbol
);
549 switch (ia64_get_addr_area (decl
))
551 case ADDR_AREA_NORMAL
: break;
552 case ADDR_AREA_SMALL
: flags
|= SYMBOL_FLAG_SMALL_ADDR
; break;
553 default: gcc_unreachable ();
555 SYMBOL_REF_FLAGS (symbol
) = flags
;
559 ia64_encode_section_info (tree decl
, rtx rtl
, int first
)
561 default_encode_section_info (decl
, rtl
, first
);
563 /* Careful not to prod global register variables. */
564 if (TREE_CODE (decl
) == VAR_DECL
565 && GET_CODE (DECL_RTL (decl
)) == MEM
566 && GET_CODE (XEXP (DECL_RTL (decl
), 0)) == SYMBOL_REF
567 && (TREE_STATIC (decl
) || DECL_EXTERNAL (decl
)))
568 ia64_encode_addr_area (decl
, XEXP (rtl
, 0));
571 /* Implement CONST_OK_FOR_LETTER_P. */
574 ia64_const_ok_for_letter_p (HOST_WIDE_INT value
, char c
)
579 return CONST_OK_FOR_I (value
);
581 return CONST_OK_FOR_J (value
);
583 return CONST_OK_FOR_K (value
);
585 return CONST_OK_FOR_L (value
);
587 return CONST_OK_FOR_M (value
);
589 return CONST_OK_FOR_N (value
);
591 return CONST_OK_FOR_O (value
);
593 return CONST_OK_FOR_P (value
);
599 /* Implement CONST_DOUBLE_OK_FOR_LETTER_P. */
602 ia64_const_double_ok_for_letter_p (rtx value
, char c
)
607 return CONST_DOUBLE_OK_FOR_G (value
);
613 /* Implement EXTRA_CONSTRAINT. */
616 ia64_extra_constraint (rtx value
, char c
)
621 /* Non-volatile memory for FP_REG loads/stores. */
622 return memory_operand(value
, VOIDmode
) && !MEM_VOLATILE_P (value
);
625 /* 1..4 for shladd arguments. */
626 return (GET_CODE (value
) == CONST_INT
627 && INTVAL (value
) >= 1 && INTVAL (value
) <= 4);
630 /* Non-post-inc memory for asms and other unsavory creatures. */
631 return (GET_CODE (value
) == MEM
632 && GET_RTX_CLASS (GET_CODE (XEXP (value
, 0))) != RTX_AUTOINC
633 && (reload_in_progress
|| memory_operand (value
, VOIDmode
)));
636 /* Symbol ref to small-address-area. */
637 return small_addr_symbolic_operand (value
, VOIDmode
);
641 return value
== CONST0_RTX (GET_MODE (value
));
644 /* An integer vector, such that conversion to an integer yields a
645 value appropriate for an integer 'J' constraint. */
646 if (GET_CODE (value
) == CONST_VECTOR
647 && GET_MODE_CLASS (GET_MODE (value
)) == MODE_VECTOR_INT
)
649 value
= simplify_subreg (DImode
, value
, GET_MODE (value
), 0);
650 return ia64_const_ok_for_letter_p (INTVAL (value
), 'J');
655 /* A V2SF vector containing elements that satisfy 'G'. */
657 (GET_CODE (value
) == CONST_VECTOR
658 && GET_MODE (value
) == V2SFmode
659 && ia64_const_double_ok_for_letter_p (XVECEXP (value
, 0, 0), 'G')
660 && ia64_const_double_ok_for_letter_p (XVECEXP (value
, 0, 1), 'G'));
667 /* Return 1 if the operands of a move are ok. */
670 ia64_move_ok (rtx dst
, rtx src
)
672 /* If we're under init_recog_no_volatile, we'll not be able to use
673 memory_operand. So check the code directly and don't worry about
674 the validity of the underlying address, which should have been
675 checked elsewhere anyway. */
676 if (GET_CODE (dst
) != MEM
)
678 if (GET_CODE (src
) == MEM
)
680 if (register_operand (src
, VOIDmode
))
683 /* Otherwise, this must be a constant, and that either 0 or 0.0 or 1.0. */
684 if (INTEGRAL_MODE_P (GET_MODE (dst
)))
685 return src
== const0_rtx
;
687 return GET_CODE (src
) == CONST_DOUBLE
&& CONST_DOUBLE_OK_FOR_G (src
);
690 /* Return 1 if the operands are ok for a floating point load pair. */
693 ia64_load_pair_ok (rtx dst
, rtx src
)
695 if (GET_CODE (dst
) != REG
|| !FP_REGNO_P (REGNO (dst
)))
697 if (GET_CODE (src
) != MEM
|| MEM_VOLATILE_P (src
))
699 switch (GET_CODE (XEXP (src
, 0)))
708 rtx adjust
= XEXP (XEXP (XEXP (src
, 0), 1), 1);
710 if (GET_CODE (adjust
) != CONST_INT
711 || INTVAL (adjust
) != GET_MODE_SIZE (GET_MODE (src
)))
722 addp4_optimize_ok (rtx op1
, rtx op2
)
724 return (basereg_operand (op1
, GET_MODE(op1
)) !=
725 basereg_operand (op2
, GET_MODE(op2
)));
728 /* Check if OP is a mask suitable for use with SHIFT in a dep.z instruction.
729 Return the length of the field, or <= 0 on failure. */
732 ia64_depz_field_mask (rtx rop
, rtx rshift
)
734 unsigned HOST_WIDE_INT op
= INTVAL (rop
);
735 unsigned HOST_WIDE_INT shift
= INTVAL (rshift
);
737 /* Get rid of the zero bits we're shifting in. */
740 /* We must now have a solid block of 1's at bit 0. */
741 return exact_log2 (op
+ 1);
744 /* Return the TLS model to use for ADDR. */
746 static enum tls_model
747 tls_symbolic_operand_type (rtx addr
)
749 enum tls_model tls_kind
= 0;
751 if (GET_CODE (addr
) == CONST
)
753 if (GET_CODE (XEXP (addr
, 0)) == PLUS
754 && GET_CODE (XEXP (XEXP (addr
, 0), 0)) == SYMBOL_REF
)
755 tls_kind
= SYMBOL_REF_TLS_MODEL (XEXP (XEXP (addr
, 0), 0));
757 else if (GET_CODE (addr
) == SYMBOL_REF
)
758 tls_kind
= SYMBOL_REF_TLS_MODEL (addr
);
763 /* Return true if X is a constant that is valid for some immediate
764 field in an instruction. */
767 ia64_legitimate_constant_p (rtx x
)
769 switch (GET_CODE (x
))
776 if (GET_MODE (x
) == VOIDmode
)
778 return CONST_DOUBLE_OK_FOR_G (x
);
782 /* ??? Short term workaround for PR 28490. We must make the code here
783 match the code in ia64_expand_move and move_operand, even though they
784 are both technically wrong. */
785 if (tls_symbolic_operand_type (x
) == 0)
787 HOST_WIDE_INT addend
= 0;
790 if (GET_CODE (op
) == CONST
791 && GET_CODE (XEXP (op
, 0)) == PLUS
792 && GET_CODE (XEXP (XEXP (op
, 0), 1)) == CONST_INT
)
794 addend
= INTVAL (XEXP (XEXP (op
, 0), 1));
795 op
= XEXP (XEXP (op
, 0), 0);
798 if (any_offset_symbol_operand (op
, GET_MODE (op
))
799 || function_operand (op
, GET_MODE (op
)))
801 if (aligned_offset_symbol_operand (op
, GET_MODE (op
)))
802 return (addend
& 0x3fff) == 0;
809 enum machine_mode mode
= GET_MODE (x
);
811 if (mode
== V2SFmode
)
812 return ia64_extra_constraint (x
, 'Y');
814 return (GET_MODE_CLASS (mode
) == MODE_VECTOR_INT
815 && GET_MODE_SIZE (mode
) <= 8);
823 /* Don't allow TLS addresses to get spilled to memory. */
826 ia64_cannot_force_const_mem (rtx x
)
828 return tls_symbolic_operand_type (x
) != 0;
831 /* Expand a symbolic constant load. */
834 ia64_expand_load_address (rtx dest
, rtx src
)
836 gcc_assert (GET_CODE (dest
) == REG
);
838 /* ILP32 mode still loads 64-bits of data from the GOT. This avoids
839 having to pointer-extend the value afterward. Other forms of address
840 computation below are also more natural to compute as 64-bit quantities.
841 If we've been given an SImode destination register, change it. */
842 if (GET_MODE (dest
) != Pmode
)
843 dest
= gen_rtx_REG_offset (dest
, Pmode
, REGNO (dest
), 0);
847 if (small_addr_symbolic_operand (src
, VOIDmode
))
851 emit_insn (gen_load_gprel64 (dest
, src
));
852 else if (GET_CODE (src
) == SYMBOL_REF
&& SYMBOL_REF_FUNCTION_P (src
))
853 emit_insn (gen_load_fptr (dest
, src
));
854 else if (sdata_symbolic_operand (src
, VOIDmode
))
855 emit_insn (gen_load_gprel (dest
, src
));
858 HOST_WIDE_INT addend
= 0;
861 /* We did split constant offsets in ia64_expand_move, and we did try
862 to keep them split in move_operand, but we also allowed reload to
863 rematerialize arbitrary constants rather than spill the value to
864 the stack and reload it. So we have to be prepared here to split
866 if (GET_CODE (src
) == CONST
)
868 HOST_WIDE_INT hi
, lo
;
870 hi
= INTVAL (XEXP (XEXP (src
, 0), 1));
871 lo
= ((hi
& 0x3fff) ^ 0x2000) - 0x2000;
877 src
= plus_constant (XEXP (XEXP (src
, 0), 0), hi
);
881 tmp
= gen_rtx_HIGH (Pmode
, src
);
882 tmp
= gen_rtx_PLUS (Pmode
, tmp
, pic_offset_table_rtx
);
883 emit_insn (gen_rtx_SET (VOIDmode
, dest
, tmp
));
885 tmp
= gen_rtx_LO_SUM (Pmode
, dest
, src
);
886 emit_insn (gen_rtx_SET (VOIDmode
, dest
, tmp
));
890 tmp
= gen_rtx_PLUS (Pmode
, dest
, GEN_INT (addend
));
891 emit_insn (gen_rtx_SET (VOIDmode
, dest
, tmp
));
898 static GTY(()) rtx gen_tls_tga
;
900 gen_tls_get_addr (void)
903 gen_tls_tga
= init_one_libfunc ("__tls_get_addr");
907 static GTY(()) rtx thread_pointer_rtx
;
909 gen_thread_pointer (void)
911 if (!thread_pointer_rtx
)
912 thread_pointer_rtx
= gen_rtx_REG (Pmode
, 13);
913 return thread_pointer_rtx
;
917 ia64_expand_tls_address (enum tls_model tls_kind
, rtx op0
, rtx op1
,
918 rtx orig_op1
, HOST_WIDE_INT addend
)
920 rtx tga_op1
, tga_op2
, tga_ret
, tga_eqv
, tmp
, insns
;
922 HOST_WIDE_INT addend_lo
, addend_hi
;
926 case TLS_MODEL_GLOBAL_DYNAMIC
:
929 tga_op1
= gen_reg_rtx (Pmode
);
930 emit_insn (gen_load_dtpmod (tga_op1
, op1
));
932 tga_op2
= gen_reg_rtx (Pmode
);
933 emit_insn (gen_load_dtprel (tga_op2
, op1
));
935 tga_ret
= emit_library_call_value (gen_tls_get_addr (), NULL_RTX
,
936 LCT_CONST
, Pmode
, 2, tga_op1
,
937 Pmode
, tga_op2
, Pmode
);
939 insns
= get_insns ();
942 if (GET_MODE (op0
) != Pmode
)
944 emit_libcall_block (insns
, op0
, tga_ret
, op1
);
947 case TLS_MODEL_LOCAL_DYNAMIC
:
948 /* ??? This isn't the completely proper way to do local-dynamic
949 If the call to __tls_get_addr is used only by a single symbol,
950 then we should (somehow) move the dtprel to the second arg
951 to avoid the extra add. */
954 tga_op1
= gen_reg_rtx (Pmode
);
955 emit_insn (gen_load_dtpmod (tga_op1
, op1
));
957 tga_op2
= const0_rtx
;
959 tga_ret
= emit_library_call_value (gen_tls_get_addr (), NULL_RTX
,
960 LCT_CONST
, Pmode
, 2, tga_op1
,
961 Pmode
, tga_op2
, Pmode
);
963 insns
= get_insns ();
966 tga_eqv
= gen_rtx_UNSPEC (Pmode
, gen_rtvec (1, const0_rtx
),
968 tmp
= gen_reg_rtx (Pmode
);
969 emit_libcall_block (insns
, tmp
, tga_ret
, tga_eqv
);
971 if (!register_operand (op0
, Pmode
))
972 op0
= gen_reg_rtx (Pmode
);
975 emit_insn (gen_load_dtprel (op0
, op1
));
976 emit_insn (gen_adddi3 (op0
, tmp
, op0
));
979 emit_insn (gen_add_dtprel (op0
, op1
, tmp
));
982 case TLS_MODEL_INITIAL_EXEC
:
983 addend_lo
= ((addend
& 0x3fff) ^ 0x2000) - 0x2000;
984 addend_hi
= addend
- addend_lo
;
986 op1
= plus_constant (op1
, addend_hi
);
989 tmp
= gen_reg_rtx (Pmode
);
990 emit_insn (gen_load_tprel (tmp
, op1
));
992 if (!register_operand (op0
, Pmode
))
993 op0
= gen_reg_rtx (Pmode
);
994 emit_insn (gen_adddi3 (op0
, tmp
, gen_thread_pointer ()));
997 case TLS_MODEL_LOCAL_EXEC
:
998 if (!register_operand (op0
, Pmode
))
999 op0
= gen_reg_rtx (Pmode
);
1005 emit_insn (gen_load_tprel (op0
, op1
));
1006 emit_insn (gen_adddi3 (op0
, op0
, gen_thread_pointer ()));
1009 emit_insn (gen_add_tprel (op0
, op1
, gen_thread_pointer ()));
1017 op0
= expand_simple_binop (Pmode
, PLUS
, op0
, GEN_INT (addend
),
1018 orig_op0
, 1, OPTAB_DIRECT
);
1019 if (orig_op0
== op0
)
1021 if (GET_MODE (orig_op0
) == Pmode
)
1023 return gen_lowpart (GET_MODE (orig_op0
), op0
);
1027 ia64_expand_move (rtx op0
, rtx op1
)
1029 enum machine_mode mode
= GET_MODE (op0
);
1031 if (!reload_in_progress
&& !reload_completed
&& !ia64_move_ok (op0
, op1
))
1032 op1
= force_reg (mode
, op1
);
1034 if ((mode
== Pmode
|| mode
== ptr_mode
) && symbolic_operand (op1
, VOIDmode
))
1036 HOST_WIDE_INT addend
= 0;
1037 enum tls_model tls_kind
;
1040 if (GET_CODE (op1
) == CONST
1041 && GET_CODE (XEXP (op1
, 0)) == PLUS
1042 && GET_CODE (XEXP (XEXP (op1
, 0), 1)) == CONST_INT
)
1044 addend
= INTVAL (XEXP (XEXP (op1
, 0), 1));
1045 sym
= XEXP (XEXP (op1
, 0), 0);
1048 tls_kind
= tls_symbolic_operand_type (sym
);
1050 return ia64_expand_tls_address (tls_kind
, op0
, sym
, op1
, addend
);
1052 if (any_offset_symbol_operand (sym
, mode
))
1054 else if (aligned_offset_symbol_operand (sym
, mode
))
1056 HOST_WIDE_INT addend_lo
, addend_hi
;
1058 addend_lo
= ((addend
& 0x3fff) ^ 0x2000) - 0x2000;
1059 addend_hi
= addend
- addend_lo
;
1063 op1
= plus_constant (sym
, addend_hi
);
1072 if (reload_completed
)
1074 /* We really should have taken care of this offset earlier. */
1075 gcc_assert (addend
== 0);
1076 if (ia64_expand_load_address (op0
, op1
))
1082 rtx subtarget
= no_new_pseudos
? op0
: gen_reg_rtx (mode
);
1084 emit_insn (gen_rtx_SET (VOIDmode
, subtarget
, op1
));
1086 op1
= expand_simple_binop (mode
, PLUS
, subtarget
,
1087 GEN_INT (addend
), op0
, 1, OPTAB_DIRECT
);
1096 /* Split a move from OP1 to OP0 conditional on COND. */
1099 ia64_emit_cond_move (rtx op0
, rtx op1
, rtx cond
)
1101 rtx insn
, first
= get_last_insn ();
1103 emit_move_insn (op0
, op1
);
1105 for (insn
= get_last_insn (); insn
!= first
; insn
= PREV_INSN (insn
))
1107 PATTERN (insn
) = gen_rtx_COND_EXEC (VOIDmode
, copy_rtx (cond
),
1111 /* Split a post-reload TImode or TFmode reference into two DImode
1112 components. This is made extra difficult by the fact that we do
1113 not get any scratch registers to work with, because reload cannot
1114 be prevented from giving us a scratch that overlaps the register
1115 pair involved. So instead, when addressing memory, we tweak the
1116 pointer register up and back down with POST_INCs. Or up and not
1117 back down when we can get away with it.
1119 REVERSED is true when the loads must be done in reversed order
1120 (high word first) for correctness. DEAD is true when the pointer
1121 dies with the second insn we generate and therefore the second
1122 address must not carry a postmodify.
1124 May return an insn which is to be emitted after the moves. */
1127 ia64_split_tmode (rtx out
[2], rtx in
, bool reversed
, bool dead
)
1131 switch (GET_CODE (in
))
1134 out
[reversed
] = gen_rtx_REG (DImode
, REGNO (in
));
1135 out
[!reversed
] = gen_rtx_REG (DImode
, REGNO (in
) + 1);
1140 /* Cannot occur reversed. */
1141 gcc_assert (!reversed
);
1143 if (GET_MODE (in
) != TFmode
)
1144 split_double (in
, &out
[0], &out
[1]);
1146 /* split_double does not understand how to split a TFmode
1147 quantity into a pair of DImode constants. */
1150 unsigned HOST_WIDE_INT p
[2];
1151 long l
[4]; /* TFmode is 128 bits */
1153 REAL_VALUE_FROM_CONST_DOUBLE (r
, in
);
1154 real_to_target (l
, &r
, TFmode
);
1156 if (FLOAT_WORDS_BIG_ENDIAN
)
1158 p
[0] = (((unsigned HOST_WIDE_INT
) l
[0]) << 32) + l
[1];
1159 p
[1] = (((unsigned HOST_WIDE_INT
) l
[2]) << 32) + l
[3];
1163 p
[0] = (((unsigned HOST_WIDE_INT
) l
[3]) << 32) + l
[2];
1164 p
[1] = (((unsigned HOST_WIDE_INT
) l
[1]) << 32) + l
[0];
1166 out
[0] = GEN_INT (p
[0]);
1167 out
[1] = GEN_INT (p
[1]);
1173 rtx base
= XEXP (in
, 0);
1176 switch (GET_CODE (base
))
1181 out
[0] = adjust_automodify_address
1182 (in
, DImode
, gen_rtx_POST_INC (Pmode
, base
), 0);
1183 out
[1] = adjust_automodify_address
1184 (in
, DImode
, dead
? 0 : gen_rtx_POST_DEC (Pmode
, base
), 8);
1188 /* Reversal requires a pre-increment, which can only
1189 be done as a separate insn. */
1190 emit_insn (gen_adddi3 (base
, base
, GEN_INT (8)));
1191 out
[0] = adjust_automodify_address
1192 (in
, DImode
, gen_rtx_POST_DEC (Pmode
, base
), 8);
1193 out
[1] = adjust_address (in
, DImode
, 0);
1198 gcc_assert (!reversed
&& !dead
);
1200 /* Just do the increment in two steps. */
1201 out
[0] = adjust_automodify_address (in
, DImode
, 0, 0);
1202 out
[1] = adjust_automodify_address (in
, DImode
, 0, 8);
1206 gcc_assert (!reversed
&& !dead
);
1208 /* Add 8, subtract 24. */
1209 base
= XEXP (base
, 0);
1210 out
[0] = adjust_automodify_address
1211 (in
, DImode
, gen_rtx_POST_INC (Pmode
, base
), 0);
1212 out
[1] = adjust_automodify_address
1214 gen_rtx_POST_MODIFY (Pmode
, base
, plus_constant (base
, -24)),
1219 gcc_assert (!reversed
&& !dead
);
1221 /* Extract and adjust the modification. This case is
1222 trickier than the others, because we might have an
1223 index register, or we might have a combined offset that
1224 doesn't fit a signed 9-bit displacement field. We can
1225 assume the incoming expression is already legitimate. */
1226 offset
= XEXP (base
, 1);
1227 base
= XEXP (base
, 0);
1229 out
[0] = adjust_automodify_address
1230 (in
, DImode
, gen_rtx_POST_INC (Pmode
, base
), 0);
1232 if (GET_CODE (XEXP (offset
, 1)) == REG
)
1234 /* Can't adjust the postmodify to match. Emit the
1235 original, then a separate addition insn. */
1236 out
[1] = adjust_automodify_address (in
, DImode
, 0, 8);
1237 fixup
= gen_adddi3 (base
, base
, GEN_INT (-8));
1241 gcc_assert (GET_CODE (XEXP (offset
, 1)) == CONST_INT
);
1242 if (INTVAL (XEXP (offset
, 1)) < -256 + 8)
1244 /* Again the postmodify cannot be made to match,
1245 but in this case it's more efficient to get rid
1246 of the postmodify entirely and fix up with an
1248 out
[1] = adjust_automodify_address (in
, DImode
, base
, 8);
1250 (base
, base
, GEN_INT (INTVAL (XEXP (offset
, 1)) - 8));
1254 /* Combined offset still fits in the displacement field.
1255 (We cannot overflow it at the high end.) */
1256 out
[1] = adjust_automodify_address
1257 (in
, DImode
, gen_rtx_POST_MODIFY
1258 (Pmode
, base
, gen_rtx_PLUS
1260 GEN_INT (INTVAL (XEXP (offset
, 1)) - 8))),
1279 /* Split a TImode or TFmode move instruction after reload.
1280 This is used by *movtf_internal and *movti_internal. */
1282 ia64_split_tmode_move (rtx operands
[])
1284 rtx in
[2], out
[2], insn
;
1287 bool reversed
= false;
1289 /* It is possible for reload to decide to overwrite a pointer with
1290 the value it points to. In that case we have to do the loads in
1291 the appropriate order so that the pointer is not destroyed too
1292 early. Also we must not generate a postmodify for that second
1293 load, or rws_access_regno will die. */
1294 if (GET_CODE (operands
[1]) == MEM
1295 && reg_overlap_mentioned_p (operands
[0], operands
[1]))
1297 rtx base
= XEXP (operands
[1], 0);
1298 while (GET_CODE (base
) != REG
)
1299 base
= XEXP (base
, 0);
1301 if (REGNO (base
) == REGNO (operands
[0]))
1305 /* Another reason to do the moves in reversed order is if the first
1306 element of the target register pair is also the second element of
1307 the source register pair. */
1308 if (GET_CODE (operands
[0]) == REG
&& GET_CODE (operands
[1]) == REG
1309 && REGNO (operands
[0]) == REGNO (operands
[1]) + 1)
1312 fixup
[0] = ia64_split_tmode (in
, operands
[1], reversed
, dead
);
1313 fixup
[1] = ia64_split_tmode (out
, operands
[0], reversed
, dead
);
1315 #define MAYBE_ADD_REG_INC_NOTE(INSN, EXP) \
1316 if (GET_CODE (EXP) == MEM \
1317 && (GET_CODE (XEXP (EXP, 0)) == POST_MODIFY \
1318 || GET_CODE (XEXP (EXP, 0)) == POST_INC \
1319 || GET_CODE (XEXP (EXP, 0)) == POST_DEC)) \
1320 REG_NOTES (INSN) = gen_rtx_EXPR_LIST (REG_INC, \
1321 XEXP (XEXP (EXP, 0), 0), \
1324 insn
= emit_insn (gen_rtx_SET (VOIDmode
, out
[0], in
[0]));
1325 MAYBE_ADD_REG_INC_NOTE (insn
, in
[0]);
1326 MAYBE_ADD_REG_INC_NOTE (insn
, out
[0]);
1328 insn
= emit_insn (gen_rtx_SET (VOIDmode
, out
[1], in
[1]));
1329 MAYBE_ADD_REG_INC_NOTE (insn
, in
[1]);
1330 MAYBE_ADD_REG_INC_NOTE (insn
, out
[1]);
1333 emit_insn (fixup
[0]);
1335 emit_insn (fixup
[1]);
1337 #undef MAYBE_ADD_REG_INC_NOTE
1340 /* ??? Fixing GR->FR XFmode moves during reload is hard. You need to go
1341 through memory plus an extra GR scratch register. Except that you can
1342 either get the first from SECONDARY_MEMORY_NEEDED or the second from
1343 SECONDARY_RELOAD_CLASS, but not both.
1345 We got into problems in the first place by allowing a construct like
1346 (subreg:XF (reg:TI)), which we got from a union containing a long double.
1347 This solution attempts to prevent this situation from occurring. When
1348 we see something like the above, we spill the inner register to memory. */
1351 spill_xfmode_rfmode_operand (rtx in
, int force
, enum machine_mode mode
)
1353 if (GET_CODE (in
) == SUBREG
1354 && GET_MODE (SUBREG_REG (in
)) == TImode
1355 && GET_CODE (SUBREG_REG (in
)) == REG
)
1357 rtx memt
= assign_stack_temp (TImode
, 16, 0);
1358 emit_move_insn (memt
, SUBREG_REG (in
));
1359 return adjust_address (memt
, mode
, 0);
1361 else if (force
&& GET_CODE (in
) == REG
)
1363 rtx memx
= assign_stack_temp (mode
, 16, 0);
1364 emit_move_insn (memx
, in
);
1371 /* Expand the movxf or movrf pattern (MODE says which) with the given
1372 OPERANDS, returning true if the pattern should then invoke
1376 ia64_expand_movxf_movrf (enum machine_mode mode
, rtx operands
[])
1378 rtx op0
= operands
[0];
1380 if (GET_CODE (op0
) == SUBREG
)
1381 op0
= SUBREG_REG (op0
);
1383 /* We must support XFmode loads into general registers for stdarg/vararg,
1384 unprototyped calls, and a rare case where a long double is passed as
1385 an argument after a float HFA fills the FP registers. We split them into
1386 DImode loads for convenience. We also need to support XFmode stores
1387 for the last case. This case does not happen for stdarg/vararg routines,
1388 because we do a block store to memory of unnamed arguments. */
1390 if (GET_CODE (op0
) == REG
&& GR_REGNO_P (REGNO (op0
)))
1394 /* We're hoping to transform everything that deals with XFmode
1395 quantities and GR registers early in the compiler. */
1396 gcc_assert (!no_new_pseudos
);
1398 /* Struct to register can just use TImode instead. */
1399 if ((GET_CODE (operands
[1]) == SUBREG
1400 && GET_MODE (SUBREG_REG (operands
[1])) == TImode
)
1401 || (GET_CODE (operands
[1]) == REG
1402 && GR_REGNO_P (REGNO (operands
[1]))))
1404 rtx op1
= operands
[1];
1406 if (GET_CODE (op1
) == SUBREG
)
1407 op1
= SUBREG_REG (op1
);
1409 op1
= gen_rtx_REG (TImode
, REGNO (op1
));
1411 emit_move_insn (gen_rtx_REG (TImode
, REGNO (op0
)), op1
);
1415 if (GET_CODE (operands
[1]) == CONST_DOUBLE
)
1417 /* Don't word-swap when reading in the constant. */
1418 emit_move_insn (gen_rtx_REG (DImode
, REGNO (op0
)),
1419 operand_subword (operands
[1], WORDS_BIG_ENDIAN
,
1421 emit_move_insn (gen_rtx_REG (DImode
, REGNO (op0
) + 1),
1422 operand_subword (operands
[1], !WORDS_BIG_ENDIAN
,
1427 /* If the quantity is in a register not known to be GR, spill it. */
1428 if (register_operand (operands
[1], mode
))
1429 operands
[1] = spill_xfmode_rfmode_operand (operands
[1], 1, mode
);
1431 gcc_assert (GET_CODE (operands
[1]) == MEM
);
1433 /* Don't word-swap when reading in the value. */
1434 out
[0] = gen_rtx_REG (DImode
, REGNO (op0
));
1435 out
[1] = gen_rtx_REG (DImode
, REGNO (op0
) + 1);
1437 emit_move_insn (out
[0], adjust_address (operands
[1], DImode
, 0));
1438 emit_move_insn (out
[1], adjust_address (operands
[1], DImode
, 8));
1442 if (GET_CODE (operands
[1]) == REG
&& GR_REGNO_P (REGNO (operands
[1])))
1444 /* We're hoping to transform everything that deals with XFmode
1445 quantities and GR registers early in the compiler. */
1446 gcc_assert (!no_new_pseudos
);
1448 /* Op0 can't be a GR_REG here, as that case is handled above.
1449 If op0 is a register, then we spill op1, so that we now have a
1450 MEM operand. This requires creating an XFmode subreg of a TImode reg
1451 to force the spill. */
1452 if (register_operand (operands
[0], mode
))
1454 rtx op1
= gen_rtx_REG (TImode
, REGNO (operands
[1]));
1455 op1
= gen_rtx_SUBREG (mode
, op1
, 0);
1456 operands
[1] = spill_xfmode_rfmode_operand (op1
, 0, mode
);
1463 gcc_assert (GET_CODE (operands
[0]) == MEM
);
1465 /* Don't word-swap when writing out the value. */
1466 in
[0] = gen_rtx_REG (DImode
, REGNO (operands
[1]));
1467 in
[1] = gen_rtx_REG (DImode
, REGNO (operands
[1]) + 1);
1469 emit_move_insn (adjust_address (operands
[0], DImode
, 0), in
[0]);
1470 emit_move_insn (adjust_address (operands
[0], DImode
, 8), in
[1]);
1475 if (!reload_in_progress
&& !reload_completed
)
1477 operands
[1] = spill_xfmode_rfmode_operand (operands
[1], 0, mode
);
1479 if (GET_MODE (op0
) == TImode
&& GET_CODE (op0
) == REG
)
1481 rtx memt
, memx
, in
= operands
[1];
1482 if (CONSTANT_P (in
))
1483 in
= validize_mem (force_const_mem (mode
, in
));
1484 if (GET_CODE (in
) == MEM
)
1485 memt
= adjust_address (in
, TImode
, 0);
1488 memt
= assign_stack_temp (TImode
, 16, 0);
1489 memx
= adjust_address (memt
, mode
, 0);
1490 emit_move_insn (memx
, in
);
1492 emit_move_insn (op0
, memt
);
1496 if (!ia64_move_ok (operands
[0], operands
[1]))
1497 operands
[1] = force_reg (mode
, operands
[1]);
1503 /* Emit comparison instruction if necessary, returning the expression
1504 that holds the compare result in the proper mode. */
1506 static GTY(()) rtx cmptf_libfunc
;
1509 ia64_expand_compare (enum rtx_code code
, enum machine_mode mode
)
1511 rtx op0
= ia64_compare_op0
, op1
= ia64_compare_op1
;
1514 /* If we have a BImode input, then we already have a compare result, and
1515 do not need to emit another comparison. */
1516 if (GET_MODE (op0
) == BImode
)
1518 gcc_assert ((code
== NE
|| code
== EQ
) && op1
== const0_rtx
);
1521 /* HPUX TFmode compare requires a library call to _U_Qfcmp, which takes a
1522 magic number as its third argument, that indicates what to do.
1523 The return value is an integer to be compared against zero. */
1524 else if (GET_MODE (op0
) == TFmode
)
1527 QCMP_INV
= 1, /* Raise FP_INVALID on SNaN as a side effect. */
1533 enum rtx_code ncode
;
1536 gcc_assert (cmptf_libfunc
&& GET_MODE (op1
) == TFmode
);
1539 /* 1 = equal, 0 = not equal. Equality operators do
1540 not raise FP_INVALID when given an SNaN operand. */
1541 case EQ
: magic
= QCMP_EQ
; ncode
= NE
; break;
1542 case NE
: magic
= QCMP_EQ
; ncode
= EQ
; break;
1543 /* isunordered() from C99. */
1544 case UNORDERED
: magic
= QCMP_UNORD
; ncode
= NE
; break;
1545 case ORDERED
: magic
= QCMP_UNORD
; ncode
= EQ
; break;
1546 /* Relational operators raise FP_INVALID when given
1548 case LT
: magic
= QCMP_LT
|QCMP_INV
; ncode
= NE
; break;
1549 case LE
: magic
= QCMP_LT
|QCMP_EQ
|QCMP_INV
; ncode
= NE
; break;
1550 case GT
: magic
= QCMP_GT
|QCMP_INV
; ncode
= NE
; break;
1551 case GE
: magic
= QCMP_GT
|QCMP_EQ
|QCMP_INV
; ncode
= NE
; break;
1552 /* FUTURE: Implement UNEQ, UNLT, UNLE, UNGT, UNGE, LTGT.
1553 Expanders for buneq etc. weuld have to be added to ia64.md
1554 for this to be useful. */
1555 default: gcc_unreachable ();
1560 ret
= emit_library_call_value (cmptf_libfunc
, 0, LCT_CONST
, DImode
, 3,
1561 op0
, TFmode
, op1
, TFmode
,
1562 GEN_INT (magic
), DImode
);
1563 cmp
= gen_reg_rtx (BImode
);
1564 emit_insn (gen_rtx_SET (VOIDmode
, cmp
,
1565 gen_rtx_fmt_ee (ncode
, BImode
,
1568 insns
= get_insns ();
1571 emit_libcall_block (insns
, cmp
, cmp
,
1572 gen_rtx_fmt_ee (code
, BImode
, op0
, op1
));
1577 cmp
= gen_reg_rtx (BImode
);
1578 emit_insn (gen_rtx_SET (VOIDmode
, cmp
,
1579 gen_rtx_fmt_ee (code
, BImode
, op0
, op1
)));
1583 return gen_rtx_fmt_ee (code
, mode
, cmp
, const0_rtx
);
1586 /* Generate an integral vector comparison. Return true if the condition has
1587 been reversed, and so the sense of the comparison should be inverted. */
1590 ia64_expand_vecint_compare (enum rtx_code code
, enum machine_mode mode
,
1591 rtx dest
, rtx op0
, rtx op1
)
1593 bool negate
= false;
1596 /* Canonicalize the comparison to EQ, GT, GTU. */
1607 code
= reverse_condition (code
);
1613 code
= reverse_condition (code
);
1619 code
= swap_condition (code
);
1620 x
= op0
, op0
= op1
, op1
= x
;
1627 /* Unsigned parallel compare is not supported by the hardware. Play some
1628 tricks to turn this into a signed comparison against 0. */
1637 /* Perform a parallel modulo subtraction. */
1638 t1
= gen_reg_rtx (V2SImode
);
1639 emit_insn (gen_subv2si3 (t1
, op0
, op1
));
1641 /* Extract the original sign bit of op0. */
1642 mask
= GEN_INT (-0x80000000);
1643 mask
= gen_rtx_CONST_VECTOR (V2SImode
, gen_rtvec (2, mask
, mask
));
1644 mask
= force_reg (V2SImode
, mask
);
1645 t2
= gen_reg_rtx (V2SImode
);
1646 emit_insn (gen_andv2si3 (t2
, op0
, mask
));
1648 /* XOR it back into the result of the subtraction. This results
1649 in the sign bit set iff we saw unsigned underflow. */
1650 x
= gen_reg_rtx (V2SImode
);
1651 emit_insn (gen_xorv2si3 (x
, t1
, t2
));
1655 op1
= CONST0_RTX (mode
);
1661 /* Perform a parallel unsigned saturating subtraction. */
1662 x
= gen_reg_rtx (mode
);
1663 emit_insn (gen_rtx_SET (VOIDmode
, x
,
1664 gen_rtx_US_MINUS (mode
, op0
, op1
)));
1668 op1
= CONST0_RTX (mode
);
1677 x
= gen_rtx_fmt_ee (code
, mode
, op0
, op1
);
1678 emit_insn (gen_rtx_SET (VOIDmode
, dest
, x
));
1683 /* Emit an integral vector conditional move. */
1686 ia64_expand_vecint_cmov (rtx operands
[])
1688 enum machine_mode mode
= GET_MODE (operands
[0]);
1689 enum rtx_code code
= GET_CODE (operands
[3]);
1693 cmp
= gen_reg_rtx (mode
);
1694 negate
= ia64_expand_vecint_compare (code
, mode
, cmp
,
1695 operands
[4], operands
[5]);
1697 ot
= operands
[1+negate
];
1698 of
= operands
[2-negate
];
1700 if (ot
== CONST0_RTX (mode
))
1702 if (of
== CONST0_RTX (mode
))
1704 emit_move_insn (operands
[0], ot
);
1708 x
= gen_rtx_NOT (mode
, cmp
);
1709 x
= gen_rtx_AND (mode
, x
, of
);
1710 emit_insn (gen_rtx_SET (VOIDmode
, operands
[0], x
));
1712 else if (of
== CONST0_RTX (mode
))
1714 x
= gen_rtx_AND (mode
, cmp
, ot
);
1715 emit_insn (gen_rtx_SET (VOIDmode
, operands
[0], x
));
1721 t
= gen_reg_rtx (mode
);
1722 x
= gen_rtx_AND (mode
, cmp
, operands
[1+negate
]);
1723 emit_insn (gen_rtx_SET (VOIDmode
, t
, x
));
1725 f
= gen_reg_rtx (mode
);
1726 x
= gen_rtx_NOT (mode
, cmp
);
1727 x
= gen_rtx_AND (mode
, x
, operands
[2-negate
]);
1728 emit_insn (gen_rtx_SET (VOIDmode
, f
, x
));
1730 x
= gen_rtx_IOR (mode
, t
, f
);
1731 emit_insn (gen_rtx_SET (VOIDmode
, operands
[0], x
));
1735 /* Emit an integral vector min or max operation. Return true if all done. */
1738 ia64_expand_vecint_minmax (enum rtx_code code
, enum machine_mode mode
,
1743 /* These four combinations are supported directly. */
1744 if (mode
== V8QImode
&& (code
== UMIN
|| code
== UMAX
))
1746 if (mode
== V4HImode
&& (code
== SMIN
|| code
== SMAX
))
1749 /* This combination can be implemented with only saturating subtraction. */
1750 if (mode
== V4HImode
&& code
== UMAX
)
1752 rtx x
, tmp
= gen_reg_rtx (mode
);
1754 x
= gen_rtx_US_MINUS (mode
, operands
[1], operands
[2]);
1755 emit_insn (gen_rtx_SET (VOIDmode
, tmp
, x
));
1757 emit_insn (gen_addv4hi3 (operands
[0], tmp
, operands
[2]));
1761 /* Everything else implemented via vector comparisons. */
1762 xops
[0] = operands
[0];
1763 xops
[4] = xops
[1] = operands
[1];
1764 xops
[5] = xops
[2] = operands
[2];
1783 xops
[3] = gen_rtx_fmt_ee (code
, VOIDmode
, operands
[1], operands
[2]);
1785 ia64_expand_vecint_cmov (xops
);
1789 /* Emit an integral vector widening sum operations. */
1792 ia64_expand_widen_sum (rtx operands
[3], bool unsignedp
)
1795 enum machine_mode wmode
, mode
;
1796 rtx (*unpack_l
) (rtx
, rtx
, rtx
);
1797 rtx (*unpack_h
) (rtx
, rtx
, rtx
);
1798 rtx (*plus
) (rtx
, rtx
, rtx
);
1800 wmode
= GET_MODE (operands
[0]);
1801 mode
= GET_MODE (operands
[1]);
1806 unpack_l
= gen_unpack1_l
;
1807 unpack_h
= gen_unpack1_h
;
1808 plus
= gen_addv4hi3
;
1811 unpack_l
= gen_unpack2_l
;
1812 unpack_h
= gen_unpack2_h
;
1813 plus
= gen_addv2si3
;
1819 /* Fill in x with the sign extension of each element in op1. */
1821 x
= CONST0_RTX (mode
);
1826 x
= gen_reg_rtx (mode
);
1828 neg
= ia64_expand_vecint_compare (LT
, mode
, x
, operands
[1],
1833 l
= gen_reg_rtx (wmode
);
1834 h
= gen_reg_rtx (wmode
);
1835 s
= gen_reg_rtx (wmode
);
1837 emit_insn (unpack_l (gen_lowpart (mode
, l
), operands
[1], x
));
1838 emit_insn (unpack_h (gen_lowpart (mode
, h
), operands
[1], x
));
1839 emit_insn (plus (s
, l
, operands
[2]));
1840 emit_insn (plus (operands
[0], h
, s
));
1843 /* Emit a signed or unsigned V8QI dot product operation. */
1846 ia64_expand_dot_prod_v8qi (rtx operands
[4], bool unsignedp
)
1848 rtx l1
, l2
, h1
, h2
, x1
, x2
, p1
, p2
, p3
, p4
, s1
, s2
, s3
;
1850 /* Fill in x1 and x2 with the sign extension of each element. */
1852 x1
= x2
= CONST0_RTX (V8QImode
);
1857 x1
= gen_reg_rtx (V8QImode
);
1858 x2
= gen_reg_rtx (V8QImode
);
1860 neg
= ia64_expand_vecint_compare (LT
, V8QImode
, x1
, operands
[1],
1861 CONST0_RTX (V8QImode
));
1863 neg
= ia64_expand_vecint_compare (LT
, V8QImode
, x2
, operands
[2],
1864 CONST0_RTX (V8QImode
));
1868 l1
= gen_reg_rtx (V4HImode
);
1869 l2
= gen_reg_rtx (V4HImode
);
1870 h1
= gen_reg_rtx (V4HImode
);
1871 h2
= gen_reg_rtx (V4HImode
);
1873 emit_insn (gen_unpack1_l (gen_lowpart (V8QImode
, l1
), operands
[1], x1
));
1874 emit_insn (gen_unpack1_l (gen_lowpart (V8QImode
, l2
), operands
[2], x2
));
1875 emit_insn (gen_unpack1_h (gen_lowpart (V8QImode
, h1
), operands
[1], x1
));
1876 emit_insn (gen_unpack1_h (gen_lowpart (V8QImode
, h2
), operands
[2], x2
));
1878 p1
= gen_reg_rtx (V2SImode
);
1879 p2
= gen_reg_rtx (V2SImode
);
1880 p3
= gen_reg_rtx (V2SImode
);
1881 p4
= gen_reg_rtx (V2SImode
);
1882 emit_insn (gen_pmpy2_r (p1
, l1
, l2
));
1883 emit_insn (gen_pmpy2_l (p2
, l1
, l2
));
1884 emit_insn (gen_pmpy2_r (p3
, h1
, h2
));
1885 emit_insn (gen_pmpy2_l (p4
, h1
, h2
));
1887 s1
= gen_reg_rtx (V2SImode
);
1888 s2
= gen_reg_rtx (V2SImode
);
1889 s3
= gen_reg_rtx (V2SImode
);
1890 emit_insn (gen_addv2si3 (s1
, p1
, p2
));
1891 emit_insn (gen_addv2si3 (s2
, p3
, p4
));
1892 emit_insn (gen_addv2si3 (s3
, s1
, operands
[3]));
1893 emit_insn (gen_addv2si3 (operands
[0], s2
, s3
));
1896 /* Emit the appropriate sequence for a call. */
1899 ia64_expand_call (rtx retval
, rtx addr
, rtx nextarg ATTRIBUTE_UNUSED
,
1904 addr
= XEXP (addr
, 0);
1905 addr
= convert_memory_address (DImode
, addr
);
1906 b0
= gen_rtx_REG (DImode
, R_BR (0));
1908 /* ??? Should do this for functions known to bind local too. */
1909 if (TARGET_NO_PIC
|| TARGET_AUTO_PIC
)
1912 insn
= gen_sibcall_nogp (addr
);
1914 insn
= gen_call_nogp (addr
, b0
);
1916 insn
= gen_call_value_nogp (retval
, addr
, b0
);
1917 insn
= emit_call_insn (insn
);
1922 insn
= gen_sibcall_gp (addr
);
1924 insn
= gen_call_gp (addr
, b0
);
1926 insn
= gen_call_value_gp (retval
, addr
, b0
);
1927 insn
= emit_call_insn (insn
);
1929 use_reg (&CALL_INSN_FUNCTION_USAGE (insn
), pic_offset_table_rtx
);
1933 use_reg (&CALL_INSN_FUNCTION_USAGE (insn
), b0
);
1937 ia64_reload_gp (void)
1941 if (current_frame_info
.reg_save_gp
)
1942 tmp
= gen_rtx_REG (DImode
, current_frame_info
.reg_save_gp
);
1945 HOST_WIDE_INT offset
;
1947 offset
= (current_frame_info
.spill_cfa_off
1948 + current_frame_info
.spill_size
);
1949 if (frame_pointer_needed
)
1951 tmp
= hard_frame_pointer_rtx
;
1956 tmp
= stack_pointer_rtx
;
1957 offset
= current_frame_info
.total_size
- offset
;
1960 if (CONST_OK_FOR_I (offset
))
1961 emit_insn (gen_adddi3 (pic_offset_table_rtx
,
1962 tmp
, GEN_INT (offset
)));
1965 emit_move_insn (pic_offset_table_rtx
, GEN_INT (offset
));
1966 emit_insn (gen_adddi3 (pic_offset_table_rtx
,
1967 pic_offset_table_rtx
, tmp
));
1970 tmp
= gen_rtx_MEM (DImode
, pic_offset_table_rtx
);
1973 emit_move_insn (pic_offset_table_rtx
, tmp
);
1977 ia64_split_call (rtx retval
, rtx addr
, rtx retaddr
, rtx scratch_r
,
1978 rtx scratch_b
, int noreturn_p
, int sibcall_p
)
1981 bool is_desc
= false;
1983 /* If we find we're calling through a register, then we're actually
1984 calling through a descriptor, so load up the values. */
1985 if (REG_P (addr
) && GR_REGNO_P (REGNO (addr
)))
1990 /* ??? We are currently constrained to *not* use peep2, because
1991 we can legitimately change the global lifetime of the GP
1992 (in the form of killing where previously live). This is
1993 because a call through a descriptor doesn't use the previous
1994 value of the GP, while a direct call does, and we do not
1995 commit to either form until the split here.
1997 That said, this means that we lack precise life info for
1998 whether ADDR is dead after this call. This is not terribly
1999 important, since we can fix things up essentially for free
2000 with the POST_DEC below, but it's nice to not use it when we
2001 can immediately tell it's not necessary. */
2002 addr_dead_p
= ((noreturn_p
|| sibcall_p
2003 || TEST_HARD_REG_BIT (regs_invalidated_by_call
,
2005 && !FUNCTION_ARG_REGNO_P (REGNO (addr
)));
2007 /* Load the code address into scratch_b. */
2008 tmp
= gen_rtx_POST_INC (Pmode
, addr
);
2009 tmp
= gen_rtx_MEM (Pmode
, tmp
);
2010 emit_move_insn (scratch_r
, tmp
);
2011 emit_move_insn (scratch_b
, scratch_r
);
2013 /* Load the GP address. If ADDR is not dead here, then we must
2014 revert the change made above via the POST_INCREMENT. */
2016 tmp
= gen_rtx_POST_DEC (Pmode
, addr
);
2019 tmp
= gen_rtx_MEM (Pmode
, tmp
);
2020 emit_move_insn (pic_offset_table_rtx
, tmp
);
2027 insn
= gen_sibcall_nogp (addr
);
2029 insn
= gen_call_value_nogp (retval
, addr
, retaddr
);
2031 insn
= gen_call_nogp (addr
, retaddr
);
2032 emit_call_insn (insn
);
2034 if ((!TARGET_CONST_GP
|| is_desc
) && !noreturn_p
&& !sibcall_p
)
2038 /* Expand an atomic operation. We want to perform MEM <CODE>= VAL atomically.
2040 This differs from the generic code in that we know about the zero-extending
2041 properties of cmpxchg, and the zero-extending requirements of ar.ccv. We
2042 also know that ld.acq+cmpxchg.rel equals a full barrier.
2044 The loop we want to generate looks like
2049 new_reg = cmp_reg op val;
2050 cmp_reg = compare-and-swap(mem, old_reg, new_reg)
2051 if (cmp_reg != old_reg)
2054 Note that we only do the plain load from memory once. Subsequent
2055 iterations use the value loaded by the compare-and-swap pattern. */
2058 ia64_expand_atomic_op (enum rtx_code code
, rtx mem
, rtx val
,
2059 rtx old_dst
, rtx new_dst
)
2061 enum machine_mode mode
= GET_MODE (mem
);
2062 rtx old_reg
, new_reg
, cmp_reg
, ar_ccv
, label
;
2063 enum insn_code icode
;
2065 /* Special case for using fetchadd. */
2066 if ((mode
== SImode
|| mode
== DImode
)
2067 && (code
== PLUS
|| code
== MINUS
)
2068 && fetchadd_operand (val
, mode
))
2071 val
= GEN_INT (-INTVAL (val
));
2074 old_dst
= gen_reg_rtx (mode
);
2076 emit_insn (gen_memory_barrier ());
2079 icode
= CODE_FOR_fetchadd_acq_si
;
2081 icode
= CODE_FOR_fetchadd_acq_di
;
2082 emit_insn (GEN_FCN (icode
) (old_dst
, mem
, val
));
2086 new_reg
= expand_simple_binop (mode
, PLUS
, old_dst
, val
, new_dst
,
2088 if (new_reg
!= new_dst
)
2089 emit_move_insn (new_dst
, new_reg
);
2094 /* Because of the volatile mem read, we get an ld.acq, which is the
2095 front half of the full barrier. The end half is the cmpxchg.rel. */
2096 gcc_assert (MEM_VOLATILE_P (mem
));
2098 old_reg
= gen_reg_rtx (DImode
);
2099 cmp_reg
= gen_reg_rtx (DImode
);
2100 label
= gen_label_rtx ();
2104 val
= simplify_gen_subreg (DImode
, val
, mode
, 0);
2105 emit_insn (gen_extend_insn (cmp_reg
, mem
, DImode
, mode
, 1));
2108 emit_move_insn (cmp_reg
, mem
);
2112 ar_ccv
= gen_rtx_REG (DImode
, AR_CCV_REGNUM
);
2113 emit_move_insn (old_reg
, cmp_reg
);
2114 emit_move_insn (ar_ccv
, cmp_reg
);
2117 emit_move_insn (old_dst
, gen_lowpart (mode
, cmp_reg
));
2122 new_reg
= expand_simple_unop (DImode
, NOT
, new_reg
, NULL_RTX
, true);
2125 new_reg
= expand_simple_binop (DImode
, code
, new_reg
, val
, NULL_RTX
,
2126 true, OPTAB_DIRECT
);
2129 new_reg
= gen_lowpart (mode
, new_reg
);
2131 emit_move_insn (new_dst
, new_reg
);
2135 case QImode
: icode
= CODE_FOR_cmpxchg_rel_qi
; break;
2136 case HImode
: icode
= CODE_FOR_cmpxchg_rel_hi
; break;
2137 case SImode
: icode
= CODE_FOR_cmpxchg_rel_si
; break;
2138 case DImode
: icode
= CODE_FOR_cmpxchg_rel_di
; break;
2143 emit_insn (GEN_FCN (icode
) (cmp_reg
, mem
, ar_ccv
, new_reg
));
2145 emit_cmp_and_jump_insns (cmp_reg
, old_reg
, NE
, NULL
, DImode
, true, label
);
2148 /* Begin the assembly file. */
2151 ia64_file_start (void)
2153 /* Variable tracking should be run after all optimizations which change order
2154 of insns. It also needs a valid CFG. This can't be done in
2155 ia64_override_options, because flag_var_tracking is finalized after
2157 ia64_flag_var_tracking
= flag_var_tracking
;
2158 flag_var_tracking
= 0;
2160 default_file_start ();
2161 emit_safe_across_calls ();
2165 emit_safe_across_calls (void)
2167 unsigned int rs
, re
;
2174 while (rs
< 64 && call_used_regs
[PR_REG (rs
)])
2178 for (re
= rs
+ 1; re
< 64 && ! call_used_regs
[PR_REG (re
)]; re
++)
2182 fputs ("\t.pred.safe_across_calls ", asm_out_file
);
2186 fputc (',', asm_out_file
);
2188 fprintf (asm_out_file
, "p%u", rs
);
2190 fprintf (asm_out_file
, "p%u-p%u", rs
, re
- 1);
2194 fputc ('\n', asm_out_file
);
2197 /* Helper function for ia64_compute_frame_size: find an appropriate general
2198 register to spill some special register to. SPECIAL_SPILL_MASK contains
2199 bits in GR0 to GR31 that have already been allocated by this routine.
2200 TRY_LOCALS is true if we should attempt to locate a local regnum. */
2203 find_gr_spill (int try_locals
)
2207 /* If this is a leaf function, first try an otherwise unused
2208 call-clobbered register. */
2209 if (current_function_is_leaf
)
2211 for (regno
= GR_REG (1); regno
<= GR_REG (31); regno
++)
2212 if (! regs_ever_live
[regno
]
2213 && call_used_regs
[regno
]
2214 && ! fixed_regs
[regno
]
2215 && ! global_regs
[regno
]
2216 && ((current_frame_info
.gr_used_mask
>> regno
) & 1) == 0)
2218 current_frame_info
.gr_used_mask
|= 1 << regno
;
2225 regno
= current_frame_info
.n_local_regs
;
2226 /* If there is a frame pointer, then we can't use loc79, because
2227 that is HARD_FRAME_POINTER_REGNUM. In particular, see the
2228 reg_name switching code in ia64_expand_prologue. */
2229 if (regno
< (80 - frame_pointer_needed
))
2231 current_frame_info
.n_local_regs
= regno
+ 1;
2232 return LOC_REG (0) + regno
;
2236 /* Failed to find a general register to spill to. Must use stack. */
2240 /* In order to make for nice schedules, we try to allocate every temporary
2241 to a different register. We must of course stay away from call-saved,
2242 fixed, and global registers. We must also stay away from registers
2243 allocated in current_frame_info.gr_used_mask, since those include regs
2244 used all through the prologue.
2246 Any register allocated here must be used immediately. The idea is to
2247 aid scheduling, not to solve data flow problems. */
2249 static int last_scratch_gr_reg
;
2252 next_scratch_gr_reg (void)
2256 for (i
= 0; i
< 32; ++i
)
2258 regno
= (last_scratch_gr_reg
+ i
+ 1) & 31;
2259 if (call_used_regs
[regno
]
2260 && ! fixed_regs
[regno
]
2261 && ! global_regs
[regno
]
2262 && ((current_frame_info
.gr_used_mask
>> regno
) & 1) == 0)
2264 last_scratch_gr_reg
= regno
;
2269 /* There must be _something_ available. */
2273 /* Helper function for ia64_compute_frame_size, called through
2274 diddle_return_value. Mark REG in current_frame_info.gr_used_mask. */
2277 mark_reg_gr_used_mask (rtx reg
, void *data ATTRIBUTE_UNUSED
)
2279 unsigned int regno
= REGNO (reg
);
2282 unsigned int i
, n
= hard_regno_nregs
[regno
][GET_MODE (reg
)];
2283 for (i
= 0; i
< n
; ++i
)
2284 current_frame_info
.gr_used_mask
|= 1 << (regno
+ i
);
2288 /* Returns the number of bytes offset between the frame pointer and the stack
2289 pointer for the current function. SIZE is the number of bytes of space
2290 needed for local variables. */
2293 ia64_compute_frame_size (HOST_WIDE_INT size
)
2295 HOST_WIDE_INT total_size
;
2296 HOST_WIDE_INT spill_size
= 0;
2297 HOST_WIDE_INT extra_spill_size
= 0;
2298 HOST_WIDE_INT pretend_args_size
;
2301 int spilled_gr_p
= 0;
2302 int spilled_fr_p
= 0;
2306 if (current_frame_info
.initialized
)
2309 memset (¤t_frame_info
, 0, sizeof current_frame_info
);
2310 CLEAR_HARD_REG_SET (mask
);
2312 /* Don't allocate scratches to the return register. */
2313 diddle_return_value (mark_reg_gr_used_mask
, NULL
);
2315 /* Don't allocate scratches to the EH scratch registers. */
2316 if (cfun
->machine
->ia64_eh_epilogue_sp
)
2317 mark_reg_gr_used_mask (cfun
->machine
->ia64_eh_epilogue_sp
, NULL
);
2318 if (cfun
->machine
->ia64_eh_epilogue_bsp
)
2319 mark_reg_gr_used_mask (cfun
->machine
->ia64_eh_epilogue_bsp
, NULL
);
2321 /* Find the size of the register stack frame. We have only 80 local
2322 registers, because we reserve 8 for the inputs and 8 for the
2325 /* Skip HARD_FRAME_POINTER_REGNUM (loc79) when frame_pointer_needed,
2326 since we'll be adjusting that down later. */
2327 regno
= LOC_REG (78) + ! frame_pointer_needed
;
2328 for (; regno
>= LOC_REG (0); regno
--)
2329 if (regs_ever_live
[regno
])
2331 current_frame_info
.n_local_regs
= regno
- LOC_REG (0) + 1;
2333 /* For functions marked with the syscall_linkage attribute, we must mark
2334 all eight input registers as in use, so that locals aren't visible to
2337 if (cfun
->machine
->n_varargs
> 0
2338 || lookup_attribute ("syscall_linkage",
2339 TYPE_ATTRIBUTES (TREE_TYPE (current_function_decl
))))
2340 current_frame_info
.n_input_regs
= 8;
2343 for (regno
= IN_REG (7); regno
>= IN_REG (0); regno
--)
2344 if (regs_ever_live
[regno
])
2346 current_frame_info
.n_input_regs
= regno
- IN_REG (0) + 1;
2349 for (regno
= OUT_REG (7); regno
>= OUT_REG (0); regno
--)
2350 if (regs_ever_live
[regno
])
2352 i
= regno
- OUT_REG (0) + 1;
2354 #ifndef PROFILE_HOOK
2355 /* When -p profiling, we need one output register for the mcount argument.
2356 Likewise for -a profiling for the bb_init_func argument. For -ax
2357 profiling, we need two output registers for the two bb_init_trace_func
2359 if (current_function_profile
)
2362 current_frame_info
.n_output_regs
= i
;
2364 /* ??? No rotating register support yet. */
2365 current_frame_info
.n_rotate_regs
= 0;
2367 /* Discover which registers need spilling, and how much room that
2368 will take. Begin with floating point and general registers,
2369 which will always wind up on the stack. */
2371 for (regno
= FR_REG (2); regno
<= FR_REG (127); regno
++)
2372 if (regs_ever_live
[regno
] && ! call_used_regs
[regno
])
2374 SET_HARD_REG_BIT (mask
, regno
);
2380 for (regno
= GR_REG (1); regno
<= GR_REG (31); regno
++)
2381 if (regs_ever_live
[regno
] && ! call_used_regs
[regno
])
2383 SET_HARD_REG_BIT (mask
, regno
);
2389 for (regno
= BR_REG (1); regno
<= BR_REG (7); regno
++)
2390 if (regs_ever_live
[regno
] && ! call_used_regs
[regno
])
2392 SET_HARD_REG_BIT (mask
, regno
);
2397 /* Now come all special registers that might get saved in other
2398 general registers. */
2400 if (frame_pointer_needed
)
2402 current_frame_info
.reg_fp
= find_gr_spill (1);
2403 /* If we did not get a register, then we take LOC79. This is guaranteed
2404 to be free, even if regs_ever_live is already set, because this is
2405 HARD_FRAME_POINTER_REGNUM. This requires incrementing n_local_regs,
2406 as we don't count loc79 above. */
2407 if (current_frame_info
.reg_fp
== 0)
2409 current_frame_info
.reg_fp
= LOC_REG (79);
2410 current_frame_info
.n_local_regs
++;
2414 if (! current_function_is_leaf
)
2416 /* Emit a save of BR0 if we call other functions. Do this even
2417 if this function doesn't return, as EH depends on this to be
2418 able to unwind the stack. */
2419 SET_HARD_REG_BIT (mask
, BR_REG (0));
2421 current_frame_info
.reg_save_b0
= find_gr_spill (1);
2422 if (current_frame_info
.reg_save_b0
== 0)
2428 /* Similarly for ar.pfs. */
2429 SET_HARD_REG_BIT (mask
, AR_PFS_REGNUM
);
2430 current_frame_info
.reg_save_ar_pfs
= find_gr_spill (1);
2431 if (current_frame_info
.reg_save_ar_pfs
== 0)
2433 extra_spill_size
+= 8;
2437 /* Similarly for gp. Note that if we're calling setjmp, the stacked
2438 registers are clobbered, so we fall back to the stack. */
2439 current_frame_info
.reg_save_gp
2440 = (current_function_calls_setjmp
? 0 : find_gr_spill (1));
2441 if (current_frame_info
.reg_save_gp
== 0)
2443 SET_HARD_REG_BIT (mask
, GR_REG (1));
2450 if (regs_ever_live
[BR_REG (0)] && ! call_used_regs
[BR_REG (0)])
2452 SET_HARD_REG_BIT (mask
, BR_REG (0));
2457 if (regs_ever_live
[AR_PFS_REGNUM
])
2459 SET_HARD_REG_BIT (mask
, AR_PFS_REGNUM
);
2460 current_frame_info
.reg_save_ar_pfs
= find_gr_spill (1);
2461 if (current_frame_info
.reg_save_ar_pfs
== 0)
2463 extra_spill_size
+= 8;
2469 /* Unwind descriptor hackery: things are most efficient if we allocate
2470 consecutive GR save registers for RP, PFS, FP in that order. However,
2471 it is absolutely critical that FP get the only hard register that's
2472 guaranteed to be free, so we allocated it first. If all three did
2473 happen to be allocated hard regs, and are consecutive, rearrange them
2474 into the preferred order now. */
2475 if (current_frame_info
.reg_fp
!= 0
2476 && current_frame_info
.reg_save_b0
== current_frame_info
.reg_fp
+ 1
2477 && current_frame_info
.reg_save_ar_pfs
== current_frame_info
.reg_fp
+ 2)
2479 current_frame_info
.reg_save_b0
= current_frame_info
.reg_fp
;
2480 current_frame_info
.reg_save_ar_pfs
= current_frame_info
.reg_fp
+ 1;
2481 current_frame_info
.reg_fp
= current_frame_info
.reg_fp
+ 2;
2484 /* See if we need to store the predicate register block. */
2485 for (regno
= PR_REG (0); regno
<= PR_REG (63); regno
++)
2486 if (regs_ever_live
[regno
] && ! call_used_regs
[regno
])
2488 if (regno
<= PR_REG (63))
2490 SET_HARD_REG_BIT (mask
, PR_REG (0));
2491 current_frame_info
.reg_save_pr
= find_gr_spill (1);
2492 if (current_frame_info
.reg_save_pr
== 0)
2494 extra_spill_size
+= 8;
2498 /* ??? Mark them all as used so that register renaming and such
2499 are free to use them. */
2500 for (regno
= PR_REG (0); regno
<= PR_REG (63); regno
++)
2501 regs_ever_live
[regno
] = 1;
2504 /* If we're forced to use st8.spill, we're forced to save and restore
2505 ar.unat as well. The check for existing liveness allows inline asm
2506 to touch ar.unat. */
2507 if (spilled_gr_p
|| cfun
->machine
->n_varargs
2508 || regs_ever_live
[AR_UNAT_REGNUM
])
2510 regs_ever_live
[AR_UNAT_REGNUM
] = 1;
2511 SET_HARD_REG_BIT (mask
, AR_UNAT_REGNUM
);
2512 current_frame_info
.reg_save_ar_unat
= find_gr_spill (spill_size
== 0);
2513 if (current_frame_info
.reg_save_ar_unat
== 0)
2515 extra_spill_size
+= 8;
2520 if (regs_ever_live
[AR_LC_REGNUM
])
2522 SET_HARD_REG_BIT (mask
, AR_LC_REGNUM
);
2523 current_frame_info
.reg_save_ar_lc
= find_gr_spill (spill_size
== 0);
2524 if (current_frame_info
.reg_save_ar_lc
== 0)
2526 extra_spill_size
+= 8;
2531 /* If we have an odd number of words of pretend arguments written to
2532 the stack, then the FR save area will be unaligned. We round the
2533 size of this area up to keep things 16 byte aligned. */
2535 pretend_args_size
= IA64_STACK_ALIGN (current_function_pretend_args_size
);
2537 pretend_args_size
= current_function_pretend_args_size
;
2539 total_size
= (spill_size
+ extra_spill_size
+ size
+ pretend_args_size
2540 + current_function_outgoing_args_size
);
2541 total_size
= IA64_STACK_ALIGN (total_size
);
2543 /* We always use the 16-byte scratch area provided by the caller, but
2544 if we are a leaf function, there's no one to which we need to provide
2546 if (current_function_is_leaf
)
2547 total_size
= MAX (0, total_size
- 16);
2549 current_frame_info
.total_size
= total_size
;
2550 current_frame_info
.spill_cfa_off
= pretend_args_size
- 16;
2551 current_frame_info
.spill_size
= spill_size
;
2552 current_frame_info
.extra_spill_size
= extra_spill_size
;
2553 COPY_HARD_REG_SET (current_frame_info
.mask
, mask
);
2554 current_frame_info
.n_spilled
= n_spilled
;
2555 current_frame_info
.initialized
= reload_completed
;
2558 /* Compute the initial difference between the specified pair of registers. */
2561 ia64_initial_elimination_offset (int from
, int to
)
2563 HOST_WIDE_INT offset
;
2565 ia64_compute_frame_size (get_frame_size ());
2568 case FRAME_POINTER_REGNUM
:
2571 case HARD_FRAME_POINTER_REGNUM
:
2572 if (current_function_is_leaf
)
2573 offset
= -current_frame_info
.total_size
;
2575 offset
= -(current_frame_info
.total_size
2576 - current_function_outgoing_args_size
- 16);
2579 case STACK_POINTER_REGNUM
:
2580 if (current_function_is_leaf
)
2583 offset
= 16 + current_function_outgoing_args_size
;
2591 case ARG_POINTER_REGNUM
:
2592 /* Arguments start above the 16 byte save area, unless stdarg
2593 in which case we store through the 16 byte save area. */
2596 case HARD_FRAME_POINTER_REGNUM
:
2597 offset
= 16 - current_function_pretend_args_size
;
2600 case STACK_POINTER_REGNUM
:
2601 offset
= (current_frame_info
.total_size
2602 + 16 - current_function_pretend_args_size
);
2617 /* If there are more than a trivial number of register spills, we use
2618 two interleaved iterators so that we can get two memory references
2621 In order to simplify things in the prologue and epilogue expanders,
2622 we use helper functions to fix up the memory references after the
2623 fact with the appropriate offsets to a POST_MODIFY memory mode.
2624 The following data structure tracks the state of the two iterators
2625 while insns are being emitted. */
2627 struct spill_fill_data
2629 rtx init_after
; /* point at which to emit initializations */
2630 rtx init_reg
[2]; /* initial base register */
2631 rtx iter_reg
[2]; /* the iterator registers */
2632 rtx
*prev_addr
[2]; /* address of last memory use */
2633 rtx prev_insn
[2]; /* the insn corresponding to prev_addr */
2634 HOST_WIDE_INT prev_off
[2]; /* last offset */
2635 int n_iter
; /* number of iterators in use */
2636 int next_iter
; /* next iterator to use */
2637 unsigned int save_gr_used_mask
;
2640 static struct spill_fill_data spill_fill_data
;
2643 setup_spill_pointers (int n_spills
, rtx init_reg
, HOST_WIDE_INT cfa_off
)
2647 spill_fill_data
.init_after
= get_last_insn ();
2648 spill_fill_data
.init_reg
[0] = init_reg
;
2649 spill_fill_data
.init_reg
[1] = init_reg
;
2650 spill_fill_data
.prev_addr
[0] = NULL
;
2651 spill_fill_data
.prev_addr
[1] = NULL
;
2652 spill_fill_data
.prev_insn
[0] = NULL
;
2653 spill_fill_data
.prev_insn
[1] = NULL
;
2654 spill_fill_data
.prev_off
[0] = cfa_off
;
2655 spill_fill_data
.prev_off
[1] = cfa_off
;
2656 spill_fill_data
.next_iter
= 0;
2657 spill_fill_data
.save_gr_used_mask
= current_frame_info
.gr_used_mask
;
2659 spill_fill_data
.n_iter
= 1 + (n_spills
> 2);
2660 for (i
= 0; i
< spill_fill_data
.n_iter
; ++i
)
2662 int regno
= next_scratch_gr_reg ();
2663 spill_fill_data
.iter_reg
[i
] = gen_rtx_REG (DImode
, regno
);
2664 current_frame_info
.gr_used_mask
|= 1 << regno
;
2669 finish_spill_pointers (void)
2671 current_frame_info
.gr_used_mask
= spill_fill_data
.save_gr_used_mask
;
2675 spill_restore_mem (rtx reg
, HOST_WIDE_INT cfa_off
)
2677 int iter
= spill_fill_data
.next_iter
;
2678 HOST_WIDE_INT disp
= spill_fill_data
.prev_off
[iter
] - cfa_off
;
2679 rtx disp_rtx
= GEN_INT (disp
);
2682 if (spill_fill_data
.prev_addr
[iter
])
2684 if (CONST_OK_FOR_N (disp
))
2686 *spill_fill_data
.prev_addr
[iter
]
2687 = gen_rtx_POST_MODIFY (DImode
, spill_fill_data
.iter_reg
[iter
],
2688 gen_rtx_PLUS (DImode
,
2689 spill_fill_data
.iter_reg
[iter
],
2691 REG_NOTES (spill_fill_data
.prev_insn
[iter
])
2692 = gen_rtx_EXPR_LIST (REG_INC
, spill_fill_data
.iter_reg
[iter
],
2693 REG_NOTES (spill_fill_data
.prev_insn
[iter
]));
2697 /* ??? Could use register post_modify for loads. */
2698 if (! CONST_OK_FOR_I (disp
))
2700 rtx tmp
= gen_rtx_REG (DImode
, next_scratch_gr_reg ());
2701 emit_move_insn (tmp
, disp_rtx
);
2704 emit_insn (gen_adddi3 (spill_fill_data
.iter_reg
[iter
],
2705 spill_fill_data
.iter_reg
[iter
], disp_rtx
));
2708 /* Micro-optimization: if we've created a frame pointer, it's at
2709 CFA 0, which may allow the real iterator to be initialized lower,
2710 slightly increasing parallelism. Also, if there are few saves
2711 it may eliminate the iterator entirely. */
2713 && spill_fill_data
.init_reg
[iter
] == stack_pointer_rtx
2714 && frame_pointer_needed
)
2716 mem
= gen_rtx_MEM (GET_MODE (reg
), hard_frame_pointer_rtx
);
2717 set_mem_alias_set (mem
, get_varargs_alias_set ());
2725 seq
= gen_movdi (spill_fill_data
.iter_reg
[iter
],
2726 spill_fill_data
.init_reg
[iter
]);
2731 if (! CONST_OK_FOR_I (disp
))
2733 rtx tmp
= gen_rtx_REG (DImode
, next_scratch_gr_reg ());
2734 emit_move_insn (tmp
, disp_rtx
);
2738 emit_insn (gen_adddi3 (spill_fill_data
.iter_reg
[iter
],
2739 spill_fill_data
.init_reg
[iter
],
2746 /* Careful for being the first insn in a sequence. */
2747 if (spill_fill_data
.init_after
)
2748 insn
= emit_insn_after (seq
, spill_fill_data
.init_after
);
2751 rtx first
= get_insns ();
2753 insn
= emit_insn_before (seq
, first
);
2755 insn
= emit_insn (seq
);
2757 spill_fill_data
.init_after
= insn
;
2759 /* If DISP is 0, we may or may not have a further adjustment
2760 afterward. If we do, then the load/store insn may be modified
2761 to be a post-modify. If we don't, then this copy may be
2762 eliminated by copyprop_hardreg_forward, which makes this
2763 insn garbage, which runs afoul of the sanity check in
2764 propagate_one_insn. So mark this insn as legal to delete. */
2766 REG_NOTES(insn
) = gen_rtx_EXPR_LIST (REG_MAYBE_DEAD
, const0_rtx
,
2770 mem
= gen_rtx_MEM (GET_MODE (reg
), spill_fill_data
.iter_reg
[iter
]);
2772 /* ??? Not all of the spills are for varargs, but some of them are.
2773 The rest of the spills belong in an alias set of their own. But
2774 it doesn't actually hurt to include them here. */
2775 set_mem_alias_set (mem
, get_varargs_alias_set ());
2777 spill_fill_data
.prev_addr
[iter
] = &XEXP (mem
, 0);
2778 spill_fill_data
.prev_off
[iter
] = cfa_off
;
2780 if (++iter
>= spill_fill_data
.n_iter
)
2782 spill_fill_data
.next_iter
= iter
;
2788 do_spill (rtx (*move_fn
) (rtx
, rtx
, rtx
), rtx reg
, HOST_WIDE_INT cfa_off
,
2791 int iter
= spill_fill_data
.next_iter
;
2794 mem
= spill_restore_mem (reg
, cfa_off
);
2795 insn
= emit_insn ((*move_fn
) (mem
, reg
, GEN_INT (cfa_off
)));
2796 spill_fill_data
.prev_insn
[iter
] = insn
;
2803 RTX_FRAME_RELATED_P (insn
) = 1;
2805 /* Don't even pretend that the unwind code can intuit its way
2806 through a pair of interleaved post_modify iterators. Just
2807 provide the correct answer. */
2809 if (frame_pointer_needed
)
2811 base
= hard_frame_pointer_rtx
;
2816 base
= stack_pointer_rtx
;
2817 off
= current_frame_info
.total_size
- cfa_off
;
2821 = gen_rtx_EXPR_LIST (REG_FRAME_RELATED_EXPR
,
2822 gen_rtx_SET (VOIDmode
,
2823 gen_rtx_MEM (GET_MODE (reg
),
2824 plus_constant (base
, off
)),
2831 do_restore (rtx (*move_fn
) (rtx
, rtx
, rtx
), rtx reg
, HOST_WIDE_INT cfa_off
)
2833 int iter
= spill_fill_data
.next_iter
;
2836 insn
= emit_insn ((*move_fn
) (reg
, spill_restore_mem (reg
, cfa_off
),
2837 GEN_INT (cfa_off
)));
2838 spill_fill_data
.prev_insn
[iter
] = insn
;
2841 /* Wrapper functions that discards the CONST_INT spill offset. These
2842 exist so that we can give gr_spill/gr_fill the offset they need and
2843 use a consistent function interface. */
2846 gen_movdi_x (rtx dest
, rtx src
, rtx offset ATTRIBUTE_UNUSED
)
2848 return gen_movdi (dest
, src
);
2852 gen_fr_spill_x (rtx dest
, rtx src
, rtx offset ATTRIBUTE_UNUSED
)
2854 return gen_fr_spill (dest
, src
);
2858 gen_fr_restore_x (rtx dest
, rtx src
, rtx offset ATTRIBUTE_UNUSED
)
2860 return gen_fr_restore (dest
, src
);
2863 /* Called after register allocation to add any instructions needed for the
2864 prologue. Using a prologue insn is favored compared to putting all of the
2865 instructions in output_function_prologue(), since it allows the scheduler
2866 to intermix instructions with the saves of the caller saved registers. In
2867 some cases, it might be necessary to emit a barrier instruction as the last
2868 insn to prevent such scheduling.
2870 Also any insns generated here should have RTX_FRAME_RELATED_P(insn) = 1
2871 so that the debug info generation code can handle them properly.
2873 The register save area is layed out like so:
2875 [ varargs spill area ]
2876 [ fr register spill area ]
2877 [ br register spill area ]
2878 [ ar register spill area ]
2879 [ pr register spill area ]
2880 [ gr register spill area ] */
2882 /* ??? Get inefficient code when the frame size is larger than can fit in an
2883 adds instruction. */
2886 ia64_expand_prologue (void)
2888 rtx insn
, ar_pfs_save_reg
, ar_unat_save_reg
;
2889 int i
, epilogue_p
, regno
, alt_regno
, cfa_off
, n_varargs
;
2892 ia64_compute_frame_size (get_frame_size ());
2893 last_scratch_gr_reg
= 15;
2895 /* If there is no epilogue, then we don't need some prologue insns.
2896 We need to avoid emitting the dead prologue insns, because flow
2897 will complain about them. */
2903 FOR_EACH_EDGE (e
, ei
, EXIT_BLOCK_PTR
->preds
)
2904 if ((e
->flags
& EDGE_FAKE
) == 0
2905 && (e
->flags
& EDGE_FALLTHRU
) != 0)
2907 epilogue_p
= (e
!= NULL
);
2912 /* Set the local, input, and output register names. We need to do this
2913 for GNU libc, which creates crti.S/crtn.S by splitting initfini.c in
2914 half. If we use in/loc/out register names, then we get assembler errors
2915 in crtn.S because there is no alloc insn or regstk directive in there. */
2916 if (! TARGET_REG_NAMES
)
2918 int inputs
= current_frame_info
.n_input_regs
;
2919 int locals
= current_frame_info
.n_local_regs
;
2920 int outputs
= current_frame_info
.n_output_regs
;
2922 for (i
= 0; i
< inputs
; i
++)
2923 reg_names
[IN_REG (i
)] = ia64_reg_numbers
[i
];
2924 for (i
= 0; i
< locals
; i
++)
2925 reg_names
[LOC_REG (i
)] = ia64_reg_numbers
[inputs
+ i
];
2926 for (i
= 0; i
< outputs
; i
++)
2927 reg_names
[OUT_REG (i
)] = ia64_reg_numbers
[inputs
+ locals
+ i
];
2930 /* Set the frame pointer register name. The regnum is logically loc79,
2931 but of course we'll not have allocated that many locals. Rather than
2932 worrying about renumbering the existing rtxs, we adjust the name. */
2933 /* ??? This code means that we can never use one local register when
2934 there is a frame pointer. loc79 gets wasted in this case, as it is
2935 renamed to a register that will never be used. See also the try_locals
2936 code in find_gr_spill. */
2937 if (current_frame_info
.reg_fp
)
2939 const char *tmp
= reg_names
[HARD_FRAME_POINTER_REGNUM
];
2940 reg_names
[HARD_FRAME_POINTER_REGNUM
]
2941 = reg_names
[current_frame_info
.reg_fp
];
2942 reg_names
[current_frame_info
.reg_fp
] = tmp
;
2945 /* We don't need an alloc instruction if we've used no outputs or locals. */
2946 if (current_frame_info
.n_local_regs
== 0
2947 && current_frame_info
.n_output_regs
== 0
2948 && current_frame_info
.n_input_regs
<= current_function_args_info
.int_regs
2949 && !TEST_HARD_REG_BIT (current_frame_info
.mask
, AR_PFS_REGNUM
))
2951 /* If there is no alloc, but there are input registers used, then we
2952 need a .regstk directive. */
2953 current_frame_info
.need_regstk
= (TARGET_REG_NAMES
!= 0);
2954 ar_pfs_save_reg
= NULL_RTX
;
2958 current_frame_info
.need_regstk
= 0;
2960 if (current_frame_info
.reg_save_ar_pfs
)
2961 regno
= current_frame_info
.reg_save_ar_pfs
;
2963 regno
= next_scratch_gr_reg ();
2964 ar_pfs_save_reg
= gen_rtx_REG (DImode
, regno
);
2966 insn
= emit_insn (gen_alloc (ar_pfs_save_reg
,
2967 GEN_INT (current_frame_info
.n_input_regs
),
2968 GEN_INT (current_frame_info
.n_local_regs
),
2969 GEN_INT (current_frame_info
.n_output_regs
),
2970 GEN_INT (current_frame_info
.n_rotate_regs
)));
2971 RTX_FRAME_RELATED_P (insn
) = (current_frame_info
.reg_save_ar_pfs
!= 0);
2974 /* Set up frame pointer, stack pointer, and spill iterators. */
2976 n_varargs
= cfun
->machine
->n_varargs
;
2977 setup_spill_pointers (current_frame_info
.n_spilled
+ n_varargs
,
2978 stack_pointer_rtx
, 0);
2980 if (frame_pointer_needed
)
2982 insn
= emit_move_insn (hard_frame_pointer_rtx
, stack_pointer_rtx
);
2983 RTX_FRAME_RELATED_P (insn
) = 1;
2986 if (current_frame_info
.total_size
!= 0)
2988 rtx frame_size_rtx
= GEN_INT (- current_frame_info
.total_size
);
2991 if (CONST_OK_FOR_I (- current_frame_info
.total_size
))
2992 offset
= frame_size_rtx
;
2995 regno
= next_scratch_gr_reg ();
2996 offset
= gen_rtx_REG (DImode
, regno
);
2997 emit_move_insn (offset
, frame_size_rtx
);
3000 insn
= emit_insn (gen_adddi3 (stack_pointer_rtx
,
3001 stack_pointer_rtx
, offset
));
3003 if (! frame_pointer_needed
)
3005 RTX_FRAME_RELATED_P (insn
) = 1;
3006 if (GET_CODE (offset
) != CONST_INT
)
3009 = gen_rtx_EXPR_LIST (REG_FRAME_RELATED_EXPR
,
3010 gen_rtx_SET (VOIDmode
,
3012 gen_rtx_PLUS (DImode
,
3019 /* ??? At this point we must generate a magic insn that appears to
3020 modify the stack pointer, the frame pointer, and all spill
3021 iterators. This would allow the most scheduling freedom. For
3022 now, just hard stop. */
3023 emit_insn (gen_blockage ());
3026 /* Must copy out ar.unat before doing any integer spills. */
3027 if (TEST_HARD_REG_BIT (current_frame_info
.mask
, AR_UNAT_REGNUM
))
3029 if (current_frame_info
.reg_save_ar_unat
)
3031 = gen_rtx_REG (DImode
, current_frame_info
.reg_save_ar_unat
);
3034 alt_regno
= next_scratch_gr_reg ();
3035 ar_unat_save_reg
= gen_rtx_REG (DImode
, alt_regno
);
3036 current_frame_info
.gr_used_mask
|= 1 << alt_regno
;
3039 reg
= gen_rtx_REG (DImode
, AR_UNAT_REGNUM
);
3040 insn
= emit_move_insn (ar_unat_save_reg
, reg
);
3041 RTX_FRAME_RELATED_P (insn
) = (current_frame_info
.reg_save_ar_unat
!= 0);
3043 /* Even if we're not going to generate an epilogue, we still
3044 need to save the register so that EH works. */
3045 if (! epilogue_p
&& current_frame_info
.reg_save_ar_unat
)
3046 emit_insn (gen_prologue_use (ar_unat_save_reg
));
3049 ar_unat_save_reg
= NULL_RTX
;
3051 /* Spill all varargs registers. Do this before spilling any GR registers,
3052 since we want the UNAT bits for the GR registers to override the UNAT
3053 bits from varargs, which we don't care about. */
3056 for (regno
= GR_ARG_FIRST
+ 7; n_varargs
> 0; --n_varargs
, --regno
)
3058 reg
= gen_rtx_REG (DImode
, regno
);
3059 do_spill (gen_gr_spill
, reg
, cfa_off
+= 8, NULL_RTX
);
3062 /* Locate the bottom of the register save area. */
3063 cfa_off
= (current_frame_info
.spill_cfa_off
3064 + current_frame_info
.spill_size
3065 + current_frame_info
.extra_spill_size
);
3067 /* Save the predicate register block either in a register or in memory. */
3068 if (TEST_HARD_REG_BIT (current_frame_info
.mask
, PR_REG (0)))
3070 reg
= gen_rtx_REG (DImode
, PR_REG (0));
3071 if (current_frame_info
.reg_save_pr
!= 0)
3073 alt_reg
= gen_rtx_REG (DImode
, current_frame_info
.reg_save_pr
);
3074 insn
= emit_move_insn (alt_reg
, reg
);
3076 /* ??? Denote pr spill/fill by a DImode move that modifies all
3077 64 hard registers. */
3078 RTX_FRAME_RELATED_P (insn
) = 1;
3080 = gen_rtx_EXPR_LIST (REG_FRAME_RELATED_EXPR
,
3081 gen_rtx_SET (VOIDmode
, alt_reg
, reg
),
3084 /* Even if we're not going to generate an epilogue, we still
3085 need to save the register so that EH works. */
3087 emit_insn (gen_prologue_use (alt_reg
));
3091 alt_regno
= next_scratch_gr_reg ();
3092 alt_reg
= gen_rtx_REG (DImode
, alt_regno
);
3093 insn
= emit_move_insn (alt_reg
, reg
);
3094 do_spill (gen_movdi_x
, alt_reg
, cfa_off
, reg
);
3099 /* Handle AR regs in numerical order. All of them get special handling. */
3100 if (TEST_HARD_REG_BIT (current_frame_info
.mask
, AR_UNAT_REGNUM
)
3101 && current_frame_info
.reg_save_ar_unat
== 0)
3103 reg
= gen_rtx_REG (DImode
, AR_UNAT_REGNUM
);
3104 do_spill (gen_movdi_x
, ar_unat_save_reg
, cfa_off
, reg
);
3108 /* The alloc insn already copied ar.pfs into a general register. The
3109 only thing we have to do now is copy that register to a stack slot
3110 if we'd not allocated a local register for the job. */
3111 if (TEST_HARD_REG_BIT (current_frame_info
.mask
, AR_PFS_REGNUM
)
3112 && current_frame_info
.reg_save_ar_pfs
== 0)
3114 reg
= gen_rtx_REG (DImode
, AR_PFS_REGNUM
);
3115 do_spill (gen_movdi_x
, ar_pfs_save_reg
, cfa_off
, reg
);
3119 if (TEST_HARD_REG_BIT (current_frame_info
.mask
, AR_LC_REGNUM
))
3121 reg
= gen_rtx_REG (DImode
, AR_LC_REGNUM
);
3122 if (current_frame_info
.reg_save_ar_lc
!= 0)
3124 alt_reg
= gen_rtx_REG (DImode
, current_frame_info
.reg_save_ar_lc
);
3125 insn
= emit_move_insn (alt_reg
, reg
);
3126 RTX_FRAME_RELATED_P (insn
) = 1;
3128 /* Even if we're not going to generate an epilogue, we still
3129 need to save the register so that EH works. */
3131 emit_insn (gen_prologue_use (alt_reg
));
3135 alt_regno
= next_scratch_gr_reg ();
3136 alt_reg
= gen_rtx_REG (DImode
, alt_regno
);
3137 emit_move_insn (alt_reg
, reg
);
3138 do_spill (gen_movdi_x
, alt_reg
, cfa_off
, reg
);
3143 if (current_frame_info
.reg_save_gp
)
3145 insn
= emit_move_insn (gen_rtx_REG (DImode
,
3146 current_frame_info
.reg_save_gp
),
3147 pic_offset_table_rtx
);
3148 /* We don't know for sure yet if this is actually needed, since
3149 we've not split the PIC call patterns. If all of the calls
3150 are indirect, and not followed by any uses of the gp, then
3151 this save is dead. Allow it to go away. */
3153 = gen_rtx_EXPR_LIST (REG_MAYBE_DEAD
, const0_rtx
, REG_NOTES (insn
));
3156 /* We should now be at the base of the gr/br/fr spill area. */
3157 gcc_assert (cfa_off
== (current_frame_info
.spill_cfa_off
3158 + current_frame_info
.spill_size
));
3160 /* Spill all general registers. */
3161 for (regno
= GR_REG (1); regno
<= GR_REG (31); ++regno
)
3162 if (TEST_HARD_REG_BIT (current_frame_info
.mask
, regno
))
3164 reg
= gen_rtx_REG (DImode
, regno
);
3165 do_spill (gen_gr_spill
, reg
, cfa_off
, reg
);
3169 /* Handle BR0 specially -- it may be getting stored permanently in
3170 some GR register. */
3171 if (TEST_HARD_REG_BIT (current_frame_info
.mask
, BR_REG (0)))
3173 reg
= gen_rtx_REG (DImode
, BR_REG (0));
3174 if (current_frame_info
.reg_save_b0
!= 0)
3176 alt_reg
= gen_rtx_REG (DImode
, current_frame_info
.reg_save_b0
);
3177 insn
= emit_move_insn (alt_reg
, reg
);
3178 RTX_FRAME_RELATED_P (insn
) = 1;
3180 /* Even if we're not going to generate an epilogue, we still
3181 need to save the register so that EH works. */
3183 emit_insn (gen_prologue_use (alt_reg
));
3187 alt_regno
= next_scratch_gr_reg ();
3188 alt_reg
= gen_rtx_REG (DImode
, alt_regno
);
3189 emit_move_insn (alt_reg
, reg
);
3190 do_spill (gen_movdi_x
, alt_reg
, cfa_off
, reg
);
3195 /* Spill the rest of the BR registers. */
3196 for (regno
= BR_REG (1); regno
<= BR_REG (7); ++regno
)
3197 if (TEST_HARD_REG_BIT (current_frame_info
.mask
, regno
))
3199 alt_regno
= next_scratch_gr_reg ();
3200 alt_reg
= gen_rtx_REG (DImode
, alt_regno
);
3201 reg
= gen_rtx_REG (DImode
, regno
);
3202 emit_move_insn (alt_reg
, reg
);
3203 do_spill (gen_movdi_x
, alt_reg
, cfa_off
, reg
);
3207 /* Align the frame and spill all FR registers. */
3208 for (regno
= FR_REG (2); regno
<= FR_REG (127); ++regno
)
3209 if (TEST_HARD_REG_BIT (current_frame_info
.mask
, regno
))
3211 gcc_assert (!(cfa_off
& 15));
3212 reg
= gen_rtx_REG (XFmode
, regno
);
3213 do_spill (gen_fr_spill_x
, reg
, cfa_off
, reg
);
3217 gcc_assert (cfa_off
== current_frame_info
.spill_cfa_off
);
3219 finish_spill_pointers ();
3222 /* Called after register allocation to add any instructions needed for the
3223 epilogue. Using an epilogue insn is favored compared to putting all of the
3224 instructions in output_function_prologue(), since it allows the scheduler
3225 to intermix instructions with the saves of the caller saved registers. In
3226 some cases, it might be necessary to emit a barrier instruction as the last
3227 insn to prevent such scheduling. */
3230 ia64_expand_epilogue (int sibcall_p
)
3232 rtx insn
, reg
, alt_reg
, ar_unat_save_reg
;
3233 int regno
, alt_regno
, cfa_off
;
3235 ia64_compute_frame_size (get_frame_size ());
3237 /* If there is a frame pointer, then we use it instead of the stack
3238 pointer, so that the stack pointer does not need to be valid when
3239 the epilogue starts. See EXIT_IGNORE_STACK. */
3240 if (frame_pointer_needed
)
3241 setup_spill_pointers (current_frame_info
.n_spilled
,
3242 hard_frame_pointer_rtx
, 0);
3244 setup_spill_pointers (current_frame_info
.n_spilled
, stack_pointer_rtx
,
3245 current_frame_info
.total_size
);
3247 if (current_frame_info
.total_size
!= 0)
3249 /* ??? At this point we must generate a magic insn that appears to
3250 modify the spill iterators and the frame pointer. This would
3251 allow the most scheduling freedom. For now, just hard stop. */
3252 emit_insn (gen_blockage ());
3255 /* Locate the bottom of the register save area. */
3256 cfa_off
= (current_frame_info
.spill_cfa_off
3257 + current_frame_info
.spill_size
3258 + current_frame_info
.extra_spill_size
);
3260 /* Restore the predicate registers. */
3261 if (TEST_HARD_REG_BIT (current_frame_info
.mask
, PR_REG (0)))
3263 if (current_frame_info
.reg_save_pr
!= 0)
3264 alt_reg
= gen_rtx_REG (DImode
, current_frame_info
.reg_save_pr
);
3267 alt_regno
= next_scratch_gr_reg ();
3268 alt_reg
= gen_rtx_REG (DImode
, alt_regno
);
3269 do_restore (gen_movdi_x
, alt_reg
, cfa_off
);
3272 reg
= gen_rtx_REG (DImode
, PR_REG (0));
3273 emit_move_insn (reg
, alt_reg
);
3276 /* Restore the application registers. */
3278 /* Load the saved unat from the stack, but do not restore it until
3279 after the GRs have been restored. */
3280 if (TEST_HARD_REG_BIT (current_frame_info
.mask
, AR_UNAT_REGNUM
))
3282 if (current_frame_info
.reg_save_ar_unat
!= 0)
3284 = gen_rtx_REG (DImode
, current_frame_info
.reg_save_ar_unat
);
3287 alt_regno
= next_scratch_gr_reg ();
3288 ar_unat_save_reg
= gen_rtx_REG (DImode
, alt_regno
);
3289 current_frame_info
.gr_used_mask
|= 1 << alt_regno
;
3290 do_restore (gen_movdi_x
, ar_unat_save_reg
, cfa_off
);
3295 ar_unat_save_reg
= NULL_RTX
;
3297 if (current_frame_info
.reg_save_ar_pfs
!= 0)
3299 alt_reg
= gen_rtx_REG (DImode
, current_frame_info
.reg_save_ar_pfs
);
3300 reg
= gen_rtx_REG (DImode
, AR_PFS_REGNUM
);
3301 emit_move_insn (reg
, alt_reg
);
3303 else if (TEST_HARD_REG_BIT (current_frame_info
.mask
, AR_PFS_REGNUM
))
3305 alt_regno
= next_scratch_gr_reg ();
3306 alt_reg
= gen_rtx_REG (DImode
, alt_regno
);
3307 do_restore (gen_movdi_x
, alt_reg
, cfa_off
);
3309 reg
= gen_rtx_REG (DImode
, AR_PFS_REGNUM
);
3310 emit_move_insn (reg
, alt_reg
);
3313 if (TEST_HARD_REG_BIT (current_frame_info
.mask
, AR_LC_REGNUM
))
3315 if (current_frame_info
.reg_save_ar_lc
!= 0)
3316 alt_reg
= gen_rtx_REG (DImode
, current_frame_info
.reg_save_ar_lc
);
3319 alt_regno
= next_scratch_gr_reg ();
3320 alt_reg
= gen_rtx_REG (DImode
, alt_regno
);
3321 do_restore (gen_movdi_x
, alt_reg
, cfa_off
);
3324 reg
= gen_rtx_REG (DImode
, AR_LC_REGNUM
);
3325 emit_move_insn (reg
, alt_reg
);
3328 /* We should now be at the base of the gr/br/fr spill area. */
3329 gcc_assert (cfa_off
== (current_frame_info
.spill_cfa_off
3330 + current_frame_info
.spill_size
));
3332 /* The GP may be stored on the stack in the prologue, but it's
3333 never restored in the epilogue. Skip the stack slot. */
3334 if (TEST_HARD_REG_BIT (current_frame_info
.mask
, GR_REG (1)))
3337 /* Restore all general registers. */
3338 for (regno
= GR_REG (2); regno
<= GR_REG (31); ++regno
)
3339 if (TEST_HARD_REG_BIT (current_frame_info
.mask
, regno
))
3341 reg
= gen_rtx_REG (DImode
, regno
);
3342 do_restore (gen_gr_restore
, reg
, cfa_off
);
3346 /* Restore the branch registers. Handle B0 specially, as it may
3347 have gotten stored in some GR register. */
3348 if (TEST_HARD_REG_BIT (current_frame_info
.mask
, BR_REG (0)))
3350 if (current_frame_info
.reg_save_b0
!= 0)
3351 alt_reg
= gen_rtx_REG (DImode
, current_frame_info
.reg_save_b0
);
3354 alt_regno
= next_scratch_gr_reg ();
3355 alt_reg
= gen_rtx_REG (DImode
, alt_regno
);
3356 do_restore (gen_movdi_x
, alt_reg
, cfa_off
);
3359 reg
= gen_rtx_REG (DImode
, BR_REG (0));
3360 emit_move_insn (reg
, alt_reg
);
3363 for (regno
= BR_REG (1); regno
<= BR_REG (7); ++regno
)
3364 if (TEST_HARD_REG_BIT (current_frame_info
.mask
, regno
))
3366 alt_regno
= next_scratch_gr_reg ();
3367 alt_reg
= gen_rtx_REG (DImode
, alt_regno
);
3368 do_restore (gen_movdi_x
, alt_reg
, cfa_off
);
3370 reg
= gen_rtx_REG (DImode
, regno
);
3371 emit_move_insn (reg
, alt_reg
);
3374 /* Restore floating point registers. */
3375 for (regno
= FR_REG (2); regno
<= FR_REG (127); ++regno
)
3376 if (TEST_HARD_REG_BIT (current_frame_info
.mask
, regno
))
3378 gcc_assert (!(cfa_off
& 15));
3379 reg
= gen_rtx_REG (XFmode
, regno
);
3380 do_restore (gen_fr_restore_x
, reg
, cfa_off
);
3384 /* Restore ar.unat for real. */
3385 if (TEST_HARD_REG_BIT (current_frame_info
.mask
, AR_UNAT_REGNUM
))
3387 reg
= gen_rtx_REG (DImode
, AR_UNAT_REGNUM
);
3388 emit_move_insn (reg
, ar_unat_save_reg
);
3391 gcc_assert (cfa_off
== current_frame_info
.spill_cfa_off
);
3393 finish_spill_pointers ();
3395 if (current_frame_info
.total_size
|| cfun
->machine
->ia64_eh_epilogue_sp
)
3397 /* ??? At this point we must generate a magic insn that appears to
3398 modify the spill iterators, the stack pointer, and the frame
3399 pointer. This would allow the most scheduling freedom. For now,
3401 emit_insn (gen_blockage ());
3404 if (cfun
->machine
->ia64_eh_epilogue_sp
)
3405 emit_move_insn (stack_pointer_rtx
, cfun
->machine
->ia64_eh_epilogue_sp
);
3406 else if (frame_pointer_needed
)
3408 insn
= emit_move_insn (stack_pointer_rtx
, hard_frame_pointer_rtx
);
3409 RTX_FRAME_RELATED_P (insn
) = 1;
3411 else if (current_frame_info
.total_size
)
3413 rtx offset
, frame_size_rtx
;
3415 frame_size_rtx
= GEN_INT (current_frame_info
.total_size
);
3416 if (CONST_OK_FOR_I (current_frame_info
.total_size
))
3417 offset
= frame_size_rtx
;
3420 regno
= next_scratch_gr_reg ();
3421 offset
= gen_rtx_REG (DImode
, regno
);
3422 emit_move_insn (offset
, frame_size_rtx
);
3425 insn
= emit_insn (gen_adddi3 (stack_pointer_rtx
, stack_pointer_rtx
,
3428 RTX_FRAME_RELATED_P (insn
) = 1;
3429 if (GET_CODE (offset
) != CONST_INT
)
3432 = gen_rtx_EXPR_LIST (REG_FRAME_RELATED_EXPR
,
3433 gen_rtx_SET (VOIDmode
,
3435 gen_rtx_PLUS (DImode
,
3442 if (cfun
->machine
->ia64_eh_epilogue_bsp
)
3443 emit_insn (gen_set_bsp (cfun
->machine
->ia64_eh_epilogue_bsp
));
3446 emit_jump_insn (gen_return_internal (gen_rtx_REG (DImode
, BR_REG (0))));
3449 int fp
= GR_REG (2);
3450 /* We need a throw away register here, r0 and r1 are reserved, so r2 is the
3451 first available call clobbered register. If there was a frame_pointer
3452 register, we may have swapped the names of r2 and HARD_FRAME_POINTER_REGNUM,
3453 so we have to make sure we're using the string "r2" when emitting
3454 the register name for the assembler. */
3455 if (current_frame_info
.reg_fp
&& current_frame_info
.reg_fp
== GR_REG (2))
3456 fp
= HARD_FRAME_POINTER_REGNUM
;
3458 /* We must emit an alloc to force the input registers to become output
3459 registers. Otherwise, if the callee tries to pass its parameters
3460 through to another call without an intervening alloc, then these
3462 /* ??? We don't need to preserve all input registers. We only need to
3463 preserve those input registers used as arguments to the sibling call.
3464 It is unclear how to compute that number here. */
3465 if (current_frame_info
.n_input_regs
!= 0)
3467 rtx n_inputs
= GEN_INT (current_frame_info
.n_input_regs
);
3468 insn
= emit_insn (gen_alloc (gen_rtx_REG (DImode
, fp
),
3469 const0_rtx
, const0_rtx
,
3470 n_inputs
, const0_rtx
));
3471 RTX_FRAME_RELATED_P (insn
) = 1;
3476 /* Return 1 if br.ret can do all the work required to return from a
3480 ia64_direct_return (void)
3482 if (reload_completed
&& ! frame_pointer_needed
)
3484 ia64_compute_frame_size (get_frame_size ());
3486 return (current_frame_info
.total_size
== 0
3487 && current_frame_info
.n_spilled
== 0
3488 && current_frame_info
.reg_save_b0
== 0
3489 && current_frame_info
.reg_save_pr
== 0
3490 && current_frame_info
.reg_save_ar_pfs
== 0
3491 && current_frame_info
.reg_save_ar_unat
== 0
3492 && current_frame_info
.reg_save_ar_lc
== 0);
3497 /* Return the magic cookie that we use to hold the return address
3498 during early compilation. */
3501 ia64_return_addr_rtx (HOST_WIDE_INT count
, rtx frame ATTRIBUTE_UNUSED
)
3505 return gen_rtx_UNSPEC (Pmode
, gen_rtvec (1, const0_rtx
), UNSPEC_RET_ADDR
);
3508 /* Split this value after reload, now that we know where the return
3509 address is saved. */
3512 ia64_split_return_addr_rtx (rtx dest
)
3516 if (TEST_HARD_REG_BIT (current_frame_info
.mask
, BR_REG (0)))
3518 if (current_frame_info
.reg_save_b0
!= 0)
3519 src
= gen_rtx_REG (DImode
, current_frame_info
.reg_save_b0
);
3525 /* Compute offset from CFA for BR0. */
3526 /* ??? Must be kept in sync with ia64_expand_prologue. */
3527 off
= (current_frame_info
.spill_cfa_off
3528 + current_frame_info
.spill_size
);
3529 for (regno
= GR_REG (1); regno
<= GR_REG (31); ++regno
)
3530 if (TEST_HARD_REG_BIT (current_frame_info
.mask
, regno
))
3533 /* Convert CFA offset to a register based offset. */
3534 if (frame_pointer_needed
)
3535 src
= hard_frame_pointer_rtx
;
3538 src
= stack_pointer_rtx
;
3539 off
+= current_frame_info
.total_size
;
3542 /* Load address into scratch register. */
3543 if (CONST_OK_FOR_I (off
))
3544 emit_insn (gen_adddi3 (dest
, src
, GEN_INT (off
)));
3547 emit_move_insn (dest
, GEN_INT (off
));
3548 emit_insn (gen_adddi3 (dest
, src
, dest
));
3551 src
= gen_rtx_MEM (Pmode
, dest
);
3555 src
= gen_rtx_REG (DImode
, BR_REG (0));
3557 emit_move_insn (dest
, src
);
3561 ia64_hard_regno_rename_ok (int from
, int to
)
3563 /* Don't clobber any of the registers we reserved for the prologue. */
3564 if (to
== current_frame_info
.reg_fp
3565 || to
== current_frame_info
.reg_save_b0
3566 || to
== current_frame_info
.reg_save_pr
3567 || to
== current_frame_info
.reg_save_ar_pfs
3568 || to
== current_frame_info
.reg_save_ar_unat
3569 || to
== current_frame_info
.reg_save_ar_lc
)
3572 if (from
== current_frame_info
.reg_fp
3573 || from
== current_frame_info
.reg_save_b0
3574 || from
== current_frame_info
.reg_save_pr
3575 || from
== current_frame_info
.reg_save_ar_pfs
3576 || from
== current_frame_info
.reg_save_ar_unat
3577 || from
== current_frame_info
.reg_save_ar_lc
)
3580 /* Don't use output registers outside the register frame. */
3581 if (OUT_REGNO_P (to
) && to
>= OUT_REG (current_frame_info
.n_output_regs
))
3584 /* Retain even/oddness on predicate register pairs. */
3585 if (PR_REGNO_P (from
) && PR_REGNO_P (to
))
3586 return (from
& 1) == (to
& 1);
3591 /* Target hook for assembling integer objects. Handle word-sized
3592 aligned objects and detect the cases when @fptr is needed. */
3595 ia64_assemble_integer (rtx x
, unsigned int size
, int aligned_p
)
3597 if (size
== POINTER_SIZE
/ BITS_PER_UNIT
3598 && !(TARGET_NO_PIC
|| TARGET_AUTO_PIC
)
3599 && GET_CODE (x
) == SYMBOL_REF
3600 && SYMBOL_REF_FUNCTION_P (x
))
3602 static const char * const directive
[2][2] = {
3603 /* 64-bit pointer */ /* 32-bit pointer */
3604 { "\tdata8.ua\t@fptr(", "\tdata4.ua\t@fptr("}, /* unaligned */
3605 { "\tdata8\t@fptr(", "\tdata4\t@fptr("} /* aligned */
3607 fputs (directive
[(aligned_p
!= 0)][POINTER_SIZE
== 32], asm_out_file
);
3608 output_addr_const (asm_out_file
, x
);
3609 fputs (")\n", asm_out_file
);
3612 return default_assemble_integer (x
, size
, aligned_p
);
3615 /* Emit the function prologue. */
3618 ia64_output_function_prologue (FILE *file
, HOST_WIDE_INT size ATTRIBUTE_UNUSED
)
3620 int mask
, grsave
, grsave_prev
;
3622 if (current_frame_info
.need_regstk
)
3623 fprintf (file
, "\t.regstk %d, %d, %d, %d\n",
3624 current_frame_info
.n_input_regs
,
3625 current_frame_info
.n_local_regs
,
3626 current_frame_info
.n_output_regs
,
3627 current_frame_info
.n_rotate_regs
);
3629 if (!flag_unwind_tables
&& (!flag_exceptions
|| USING_SJLJ_EXCEPTIONS
))
3632 /* Emit the .prologue directive. */
3635 grsave
= grsave_prev
= 0;
3636 if (current_frame_info
.reg_save_b0
!= 0)
3639 grsave
= grsave_prev
= current_frame_info
.reg_save_b0
;
3641 if (current_frame_info
.reg_save_ar_pfs
!= 0
3642 && (grsave_prev
== 0
3643 || current_frame_info
.reg_save_ar_pfs
== grsave_prev
+ 1))
3646 if (grsave_prev
== 0)
3647 grsave
= current_frame_info
.reg_save_ar_pfs
;
3648 grsave_prev
= current_frame_info
.reg_save_ar_pfs
;
3650 if (current_frame_info
.reg_fp
!= 0
3651 && (grsave_prev
== 0
3652 || current_frame_info
.reg_fp
== grsave_prev
+ 1))
3655 if (grsave_prev
== 0)
3656 grsave
= HARD_FRAME_POINTER_REGNUM
;
3657 grsave_prev
= current_frame_info
.reg_fp
;
3659 if (current_frame_info
.reg_save_pr
!= 0
3660 && (grsave_prev
== 0
3661 || current_frame_info
.reg_save_pr
== grsave_prev
+ 1))
3664 if (grsave_prev
== 0)
3665 grsave
= current_frame_info
.reg_save_pr
;
3668 if (mask
&& TARGET_GNU_AS
)
3669 fprintf (file
, "\t.prologue %d, %d\n", mask
,
3670 ia64_dbx_register_number (grsave
));
3672 fputs ("\t.prologue\n", file
);
3674 /* Emit a .spill directive, if necessary, to relocate the base of
3675 the register spill area. */
3676 if (current_frame_info
.spill_cfa_off
!= -16)
3677 fprintf (file
, "\t.spill %ld\n",
3678 (long) (current_frame_info
.spill_cfa_off
3679 + current_frame_info
.spill_size
));
3682 /* Emit the .body directive at the scheduled end of the prologue. */
3685 ia64_output_function_end_prologue (FILE *file
)
3687 if (!flag_unwind_tables
&& (!flag_exceptions
|| USING_SJLJ_EXCEPTIONS
))
3690 fputs ("\t.body\n", file
);
3693 /* Emit the function epilogue. */
3696 ia64_output_function_epilogue (FILE *file ATTRIBUTE_UNUSED
,
3697 HOST_WIDE_INT size ATTRIBUTE_UNUSED
)
3701 if (current_frame_info
.reg_fp
)
3703 const char *tmp
= reg_names
[HARD_FRAME_POINTER_REGNUM
];
3704 reg_names
[HARD_FRAME_POINTER_REGNUM
]
3705 = reg_names
[current_frame_info
.reg_fp
];
3706 reg_names
[current_frame_info
.reg_fp
] = tmp
;
3708 if (! TARGET_REG_NAMES
)
3710 for (i
= 0; i
< current_frame_info
.n_input_regs
; i
++)
3711 reg_names
[IN_REG (i
)] = ia64_input_reg_names
[i
];
3712 for (i
= 0; i
< current_frame_info
.n_local_regs
; i
++)
3713 reg_names
[LOC_REG (i
)] = ia64_local_reg_names
[i
];
3714 for (i
= 0; i
< current_frame_info
.n_output_regs
; i
++)
3715 reg_names
[OUT_REG (i
)] = ia64_output_reg_names
[i
];
3718 current_frame_info
.initialized
= 0;
3722 ia64_dbx_register_number (int regno
)
3724 /* In ia64_expand_prologue we quite literally renamed the frame pointer
3725 from its home at loc79 to something inside the register frame. We
3726 must perform the same renumbering here for the debug info. */
3727 if (current_frame_info
.reg_fp
)
3729 if (regno
== HARD_FRAME_POINTER_REGNUM
)
3730 regno
= current_frame_info
.reg_fp
;
3731 else if (regno
== current_frame_info
.reg_fp
)
3732 regno
= HARD_FRAME_POINTER_REGNUM
;
3735 if (IN_REGNO_P (regno
))
3736 return 32 + regno
- IN_REG (0);
3737 else if (LOC_REGNO_P (regno
))
3738 return 32 + current_frame_info
.n_input_regs
+ regno
- LOC_REG (0);
3739 else if (OUT_REGNO_P (regno
))
3740 return (32 + current_frame_info
.n_input_regs
3741 + current_frame_info
.n_local_regs
+ regno
- OUT_REG (0));
3747 ia64_initialize_trampoline (rtx addr
, rtx fnaddr
, rtx static_chain
)
3749 rtx addr_reg
, eight
= GEN_INT (8);
3751 /* The Intel assembler requires that the global __ia64_trampoline symbol
3752 be declared explicitly */
3755 static bool declared_ia64_trampoline
= false;
3757 if (!declared_ia64_trampoline
)
3759 declared_ia64_trampoline
= true;
3760 (*targetm
.asm_out
.globalize_label
) (asm_out_file
,
3761 "__ia64_trampoline");
3765 /* Make sure addresses are Pmode even if we are in ILP32 mode. */
3766 addr
= convert_memory_address (Pmode
, addr
);
3767 fnaddr
= convert_memory_address (Pmode
, fnaddr
);
3768 static_chain
= convert_memory_address (Pmode
, static_chain
);
3770 /* Load up our iterator. */
3771 addr_reg
= gen_reg_rtx (Pmode
);
3772 emit_move_insn (addr_reg
, addr
);
3774 /* The first two words are the fake descriptor:
3775 __ia64_trampoline, ADDR+16. */
3776 emit_move_insn (gen_rtx_MEM (Pmode
, addr_reg
),
3777 gen_rtx_SYMBOL_REF (Pmode
, "__ia64_trampoline"));
3778 emit_insn (gen_adddi3 (addr_reg
, addr_reg
, eight
));
3780 emit_move_insn (gen_rtx_MEM (Pmode
, addr_reg
),
3781 copy_to_reg (plus_constant (addr
, 16)));
3782 emit_insn (gen_adddi3 (addr_reg
, addr_reg
, eight
));
3784 /* The third word is the target descriptor. */
3785 emit_move_insn (gen_rtx_MEM (Pmode
, addr_reg
), fnaddr
);
3786 emit_insn (gen_adddi3 (addr_reg
, addr_reg
, eight
));
3788 /* The fourth word is the static chain. */
3789 emit_move_insn (gen_rtx_MEM (Pmode
, addr_reg
), static_chain
);
3792 /* Do any needed setup for a variadic function. CUM has not been updated
3793 for the last named argument which has type TYPE and mode MODE.
3795 We generate the actual spill instructions during prologue generation. */
3798 ia64_setup_incoming_varargs (CUMULATIVE_ARGS
*cum
, enum machine_mode mode
,
3799 tree type
, int * pretend_size
,
3800 int second_time ATTRIBUTE_UNUSED
)
3802 CUMULATIVE_ARGS next_cum
= *cum
;
3804 /* Skip the current argument. */
3805 ia64_function_arg_advance (&next_cum
, mode
, type
, 1);
3807 if (next_cum
.words
< MAX_ARGUMENT_SLOTS
)
3809 int n
= MAX_ARGUMENT_SLOTS
- next_cum
.words
;
3810 *pretend_size
= n
* UNITS_PER_WORD
;
3811 cfun
->machine
->n_varargs
= n
;
3815 /* Check whether TYPE is a homogeneous floating point aggregate. If
3816 it is, return the mode of the floating point type that appears
3817 in all leafs. If it is not, return VOIDmode.
3819 An aggregate is a homogeneous floating point aggregate is if all
3820 fields/elements in it have the same floating point type (e.g,
3821 SFmode). 128-bit quad-precision floats are excluded.
3823 Variable sized aggregates should never arrive here, since we should
3824 have already decided to pass them by reference. Top-level zero-sized
3825 aggregates are excluded because our parallels crash the middle-end. */
3827 static enum machine_mode
3828 hfa_element_mode (tree type
, bool nested
)
3830 enum machine_mode element_mode
= VOIDmode
;
3831 enum machine_mode mode
;
3832 enum tree_code code
= TREE_CODE (type
);
3833 int know_element_mode
= 0;
3836 if (!nested
&& (!TYPE_SIZE (type
) || integer_zerop (TYPE_SIZE (type
))))
3841 case VOID_TYPE
: case INTEGER_TYPE
: case ENUMERAL_TYPE
:
3842 case BOOLEAN_TYPE
: case CHAR_TYPE
: case POINTER_TYPE
:
3843 case OFFSET_TYPE
: case REFERENCE_TYPE
: case METHOD_TYPE
:
3844 case LANG_TYPE
: case FUNCTION_TYPE
:
3847 /* Fortran complex types are supposed to be HFAs, so we need to handle
3848 gcc's COMPLEX_TYPEs as HFAs. We need to exclude the integral complex
3851 if (GET_MODE_CLASS (TYPE_MODE (type
)) == MODE_COMPLEX_FLOAT
3852 && TYPE_MODE (type
) != TCmode
)
3853 return GET_MODE_INNER (TYPE_MODE (type
));
3858 /* We want to return VOIDmode for raw REAL_TYPEs, but the actual
3859 mode if this is contained within an aggregate. */
3860 if (nested
&& TYPE_MODE (type
) != TFmode
)
3861 return TYPE_MODE (type
);
3866 return hfa_element_mode (TREE_TYPE (type
), 1);
3870 case QUAL_UNION_TYPE
:
3871 for (t
= TYPE_FIELDS (type
); t
; t
= TREE_CHAIN (t
))
3873 if (TREE_CODE (t
) != FIELD_DECL
)
3876 mode
= hfa_element_mode (TREE_TYPE (t
), 1);
3877 if (know_element_mode
)
3879 if (mode
!= element_mode
)
3882 else if (GET_MODE_CLASS (mode
) != MODE_FLOAT
)
3886 know_element_mode
= 1;
3887 element_mode
= mode
;
3890 return element_mode
;
3893 /* If we reach here, we probably have some front-end specific type
3894 that the backend doesn't know about. This can happen via the
3895 aggregate_value_p call in init_function_start. All we can do is
3896 ignore unknown tree types. */
3903 /* Return the number of words required to hold a quantity of TYPE and MODE
3904 when passed as an argument. */
3906 ia64_function_arg_words (tree type
, enum machine_mode mode
)
3910 if (mode
== BLKmode
)
3911 words
= int_size_in_bytes (type
);
3913 words
= GET_MODE_SIZE (mode
);
3915 return (words
+ UNITS_PER_WORD
- 1) / UNITS_PER_WORD
; /* round up */
3918 /* Return the number of registers that should be skipped so the current
3919 argument (described by TYPE and WORDS) will be properly aligned.
3921 Integer and float arguments larger than 8 bytes start at the next
3922 even boundary. Aggregates larger than 8 bytes start at the next
3923 even boundary if the aggregate has 16 byte alignment. Note that
3924 in the 32-bit ABI, TImode and TFmode have only 8-byte alignment
3925 but are still to be aligned in registers.
3927 ??? The ABI does not specify how to handle aggregates with
3928 alignment from 9 to 15 bytes, or greater than 16. We handle them
3929 all as if they had 16 byte alignment. Such aggregates can occur
3930 only if gcc extensions are used. */
3932 ia64_function_arg_offset (CUMULATIVE_ARGS
*cum
, tree type
, int words
)
3934 if ((cum
->words
& 1) == 0)
3938 && TREE_CODE (type
) != INTEGER_TYPE
3939 && TREE_CODE (type
) != REAL_TYPE
)
3940 return TYPE_ALIGN (type
) > 8 * BITS_PER_UNIT
;
3945 /* Return rtx for register where argument is passed, or zero if it is passed
3947 /* ??? 128-bit quad-precision floats are always passed in general
3951 ia64_function_arg (CUMULATIVE_ARGS
*cum
, enum machine_mode mode
, tree type
,
3952 int named
, int incoming
)
3954 int basereg
= (incoming
? GR_ARG_FIRST
: AR_ARG_FIRST
);
3955 int words
= ia64_function_arg_words (type
, mode
);
3956 int offset
= ia64_function_arg_offset (cum
, type
, words
);
3957 enum machine_mode hfa_mode
= VOIDmode
;
3959 /* If all argument slots are used, then it must go on the stack. */
3960 if (cum
->words
+ offset
>= MAX_ARGUMENT_SLOTS
)
3963 /* Check for and handle homogeneous FP aggregates. */
3965 hfa_mode
= hfa_element_mode (type
, 0);
3967 /* Unnamed prototyped hfas are passed as usual. Named prototyped hfas
3968 and unprototyped hfas are passed specially. */
3969 if (hfa_mode
!= VOIDmode
&& (! cum
->prototype
|| named
))
3973 int fp_regs
= cum
->fp_regs
;
3974 int int_regs
= cum
->words
+ offset
;
3975 int hfa_size
= GET_MODE_SIZE (hfa_mode
);
3979 /* If prototyped, pass it in FR regs then GR regs.
3980 If not prototyped, pass it in both FR and GR regs.
3982 If this is an SFmode aggregate, then it is possible to run out of
3983 FR regs while GR regs are still left. In that case, we pass the
3984 remaining part in the GR regs. */
3986 /* Fill the FP regs. We do this always. We stop if we reach the end
3987 of the argument, the last FP register, or the last argument slot. */
3989 byte_size
= ((mode
== BLKmode
)
3990 ? int_size_in_bytes (type
) : GET_MODE_SIZE (mode
));
3991 args_byte_size
= int_regs
* UNITS_PER_WORD
;
3993 for (; (offset
< byte_size
&& fp_regs
< MAX_ARGUMENT_SLOTS
3994 && args_byte_size
< (MAX_ARGUMENT_SLOTS
* UNITS_PER_WORD
)); i
++)
3996 loc
[i
] = gen_rtx_EXPR_LIST (VOIDmode
,
3997 gen_rtx_REG (hfa_mode
, (FR_ARG_FIRST
4001 args_byte_size
+= hfa_size
;
4005 /* If no prototype, then the whole thing must go in GR regs. */
4006 if (! cum
->prototype
)
4008 /* If this is an SFmode aggregate, then we might have some left over
4009 that needs to go in GR regs. */
4010 else if (byte_size
!= offset
)
4011 int_regs
+= offset
/ UNITS_PER_WORD
;
4013 /* Fill in the GR regs. We must use DImode here, not the hfa mode. */
4015 for (; offset
< byte_size
&& int_regs
< MAX_ARGUMENT_SLOTS
; i
++)
4017 enum machine_mode gr_mode
= DImode
;
4018 unsigned int gr_size
;
4020 /* If we have an odd 4 byte hunk because we ran out of FR regs,
4021 then this goes in a GR reg left adjusted/little endian, right
4022 adjusted/big endian. */
4023 /* ??? Currently this is handled wrong, because 4-byte hunks are
4024 always right adjusted/little endian. */
4027 /* If we have an even 4 byte hunk because the aggregate is a
4028 multiple of 4 bytes in size, then this goes in a GR reg right
4029 adjusted/little endian. */
4030 else if (byte_size
- offset
== 4)
4033 loc
[i
] = gen_rtx_EXPR_LIST (VOIDmode
,
4034 gen_rtx_REG (gr_mode
, (basereg
4038 gr_size
= GET_MODE_SIZE (gr_mode
);
4040 if (gr_size
== UNITS_PER_WORD
4041 || (gr_size
< UNITS_PER_WORD
&& offset
% UNITS_PER_WORD
== 0))
4043 else if (gr_size
> UNITS_PER_WORD
)
4044 int_regs
+= gr_size
/ UNITS_PER_WORD
;
4046 return gen_rtx_PARALLEL (mode
, gen_rtvec_v (i
, loc
));
4049 /* Integral and aggregates go in general registers. If we have run out of
4050 FR registers, then FP values must also go in general registers. This can
4051 happen when we have a SFmode HFA. */
4052 else if (mode
== TFmode
|| mode
== TCmode
4053 || (! FLOAT_MODE_P (mode
) || cum
->fp_regs
== MAX_ARGUMENT_SLOTS
))
4055 int byte_size
= ((mode
== BLKmode
)
4056 ? int_size_in_bytes (type
) : GET_MODE_SIZE (mode
));
4057 if (BYTES_BIG_ENDIAN
4058 && (mode
== BLKmode
|| (type
&& AGGREGATE_TYPE_P (type
)))
4059 && byte_size
< UNITS_PER_WORD
4062 rtx gr_reg
= gen_rtx_EXPR_LIST (VOIDmode
,
4063 gen_rtx_REG (DImode
,
4064 (basereg
+ cum
->words
4067 return gen_rtx_PARALLEL (mode
, gen_rtvec (1, gr_reg
));
4070 return gen_rtx_REG (mode
, basereg
+ cum
->words
+ offset
);
4074 /* If there is a prototype, then FP values go in a FR register when
4075 named, and in a GR register when unnamed. */
4076 else if (cum
->prototype
)
4079 return gen_rtx_REG (mode
, FR_ARG_FIRST
+ cum
->fp_regs
);
4080 /* In big-endian mode, an anonymous SFmode value must be represented
4081 as (parallel:SF [(expr_list (reg:DI n) (const_int 0))]) to force
4082 the value into the high half of the general register. */
4083 else if (BYTES_BIG_ENDIAN
&& mode
== SFmode
)
4084 return gen_rtx_PARALLEL (mode
,
4086 gen_rtx_EXPR_LIST (VOIDmode
,
4087 gen_rtx_REG (DImode
, basereg
+ cum
->words
+ offset
),
4090 return gen_rtx_REG (mode
, basereg
+ cum
->words
+ offset
);
4092 /* If there is no prototype, then FP values go in both FR and GR
4096 /* See comment above. */
4097 enum machine_mode inner_mode
=
4098 (BYTES_BIG_ENDIAN
&& mode
== SFmode
) ? DImode
: mode
;
4100 rtx fp_reg
= gen_rtx_EXPR_LIST (VOIDmode
,
4101 gen_rtx_REG (mode
, (FR_ARG_FIRST
4104 rtx gr_reg
= gen_rtx_EXPR_LIST (VOIDmode
,
4105 gen_rtx_REG (inner_mode
,
4106 (basereg
+ cum
->words
4110 return gen_rtx_PARALLEL (mode
, gen_rtvec (2, fp_reg
, gr_reg
));
4114 /* Return number of bytes, at the beginning of the argument, that must be
4115 put in registers. 0 is the argument is entirely in registers or entirely
4119 ia64_arg_partial_bytes (CUMULATIVE_ARGS
*cum
, enum machine_mode mode
,
4120 tree type
, bool named ATTRIBUTE_UNUSED
)
4122 int words
= ia64_function_arg_words (type
, mode
);
4123 int offset
= ia64_function_arg_offset (cum
, type
, words
);
4125 /* If all argument slots are used, then it must go on the stack. */
4126 if (cum
->words
+ offset
>= MAX_ARGUMENT_SLOTS
)
4129 /* It doesn't matter whether the argument goes in FR or GR regs. If
4130 it fits within the 8 argument slots, then it goes entirely in
4131 registers. If it extends past the last argument slot, then the rest
4132 goes on the stack. */
4134 if (words
+ cum
->words
+ offset
<= MAX_ARGUMENT_SLOTS
)
4137 return (MAX_ARGUMENT_SLOTS
- cum
->words
- offset
) * UNITS_PER_WORD
;
4140 /* Update CUM to point after this argument. This is patterned after
4141 ia64_function_arg. */
4144 ia64_function_arg_advance (CUMULATIVE_ARGS
*cum
, enum machine_mode mode
,
4145 tree type
, int named
)
4147 int words
= ia64_function_arg_words (type
, mode
);
4148 int offset
= ia64_function_arg_offset (cum
, type
, words
);
4149 enum machine_mode hfa_mode
= VOIDmode
;
4151 /* If all arg slots are already full, then there is nothing to do. */
4152 if (cum
->words
>= MAX_ARGUMENT_SLOTS
)
4155 cum
->words
+= words
+ offset
;
4157 /* Check for and handle homogeneous FP aggregates. */
4159 hfa_mode
= hfa_element_mode (type
, 0);
4161 /* Unnamed prototyped hfas are passed as usual. Named prototyped hfas
4162 and unprototyped hfas are passed specially. */
4163 if (hfa_mode
!= VOIDmode
&& (! cum
->prototype
|| named
))
4165 int fp_regs
= cum
->fp_regs
;
4166 /* This is the original value of cum->words + offset. */
4167 int int_regs
= cum
->words
- words
;
4168 int hfa_size
= GET_MODE_SIZE (hfa_mode
);
4172 /* If prototyped, pass it in FR regs then GR regs.
4173 If not prototyped, pass it in both FR and GR regs.
4175 If this is an SFmode aggregate, then it is possible to run out of
4176 FR regs while GR regs are still left. In that case, we pass the
4177 remaining part in the GR regs. */
4179 /* Fill the FP regs. We do this always. We stop if we reach the end
4180 of the argument, the last FP register, or the last argument slot. */
4182 byte_size
= ((mode
== BLKmode
)
4183 ? int_size_in_bytes (type
) : GET_MODE_SIZE (mode
));
4184 args_byte_size
= int_regs
* UNITS_PER_WORD
;
4186 for (; (offset
< byte_size
&& fp_regs
< MAX_ARGUMENT_SLOTS
4187 && args_byte_size
< (MAX_ARGUMENT_SLOTS
* UNITS_PER_WORD
));)
4190 args_byte_size
+= hfa_size
;
4194 cum
->fp_regs
= fp_regs
;
4197 /* Integral and aggregates go in general registers. So do TFmode FP values.
4198 If we have run out of FR registers, then other FP values must also go in
4199 general registers. This can happen when we have a SFmode HFA. */
4200 else if (mode
== TFmode
|| mode
== TCmode
4201 || (! FLOAT_MODE_P (mode
) || cum
->fp_regs
== MAX_ARGUMENT_SLOTS
))
4202 cum
->int_regs
= cum
->words
;
4204 /* If there is a prototype, then FP values go in a FR register when
4205 named, and in a GR register when unnamed. */
4206 else if (cum
->prototype
)
4209 cum
->int_regs
= cum
->words
;
4211 /* ??? Complex types should not reach here. */
4212 cum
->fp_regs
+= (GET_MODE_CLASS (mode
) == MODE_COMPLEX_FLOAT
? 2 : 1);
4214 /* If there is no prototype, then FP values go in both FR and GR
4218 /* ??? Complex types should not reach here. */
4219 cum
->fp_regs
+= (GET_MODE_CLASS (mode
) == MODE_COMPLEX_FLOAT
? 2 : 1);
4220 cum
->int_regs
= cum
->words
;
4224 /* Arguments with alignment larger than 8 bytes start at the next even
4225 boundary. On ILP32 HPUX, TFmode arguments start on next even boundary
4226 even though their normal alignment is 8 bytes. See ia64_function_arg. */
4229 ia64_function_arg_boundary (enum machine_mode mode
, tree type
)
4232 if (mode
== TFmode
&& TARGET_HPUX
&& TARGET_ILP32
)
4233 return PARM_BOUNDARY
* 2;
4237 if (TYPE_ALIGN (type
) > PARM_BOUNDARY
)
4238 return PARM_BOUNDARY
* 2;
4240 return PARM_BOUNDARY
;
4243 if (GET_MODE_BITSIZE (mode
) > PARM_BOUNDARY
)
4244 return PARM_BOUNDARY
* 2;
4246 return PARM_BOUNDARY
;
4249 /* True if it is OK to do sibling call optimization for the specified
4250 call expression EXP. DECL will be the called function, or NULL if
4251 this is an indirect call. */
4253 ia64_function_ok_for_sibcall (tree decl
, tree exp ATTRIBUTE_UNUSED
)
4255 /* We can't perform a sibcall if the current function has the syscall_linkage
4257 if (lookup_attribute ("syscall_linkage",
4258 TYPE_ATTRIBUTES (TREE_TYPE (current_function_decl
))))
4261 /* We must always return with our current GP. This means we can
4262 only sibcall to functions defined in the current module. */
4263 return decl
&& (*targetm
.binds_local_p
) (decl
);
4267 /* Implement va_arg. */
4270 ia64_gimplify_va_arg (tree valist
, tree type
, tree
*pre_p
, tree
*post_p
)
4272 /* Variable sized types are passed by reference. */
4273 if (pass_by_reference (NULL
, TYPE_MODE (type
), type
, false))
4275 tree ptrtype
= build_pointer_type (type
);
4276 tree addr
= std_gimplify_va_arg_expr (valist
, ptrtype
, pre_p
, post_p
);
4277 return build_va_arg_indirect_ref (addr
);
4280 /* Aggregate arguments with alignment larger than 8 bytes start at
4281 the next even boundary. Integer and floating point arguments
4282 do so if they are larger than 8 bytes, whether or not they are
4283 also aligned larger than 8 bytes. */
4284 if ((TREE_CODE (type
) == REAL_TYPE
|| TREE_CODE (type
) == INTEGER_TYPE
)
4285 ? int_size_in_bytes (type
) > 8 : TYPE_ALIGN (type
) > 8 * BITS_PER_UNIT
)
4287 tree t
= build (PLUS_EXPR
, TREE_TYPE (valist
), valist
,
4288 build_int_cst (NULL_TREE
, 2 * UNITS_PER_WORD
- 1));
4289 t
= build (BIT_AND_EXPR
, TREE_TYPE (t
), t
,
4290 build_int_cst (NULL_TREE
, -2 * UNITS_PER_WORD
));
4291 t
= build (MODIFY_EXPR
, TREE_TYPE (valist
), valist
, t
);
4292 gimplify_and_add (t
, pre_p
);
4295 return std_gimplify_va_arg_expr (valist
, type
, pre_p
, post_p
);
4298 /* Return 1 if function return value returned in memory. Return 0 if it is
4302 ia64_return_in_memory (tree valtype
, tree fntype ATTRIBUTE_UNUSED
)
4304 enum machine_mode mode
;
4305 enum machine_mode hfa_mode
;
4306 HOST_WIDE_INT byte_size
;
4308 mode
= TYPE_MODE (valtype
);
4309 byte_size
= GET_MODE_SIZE (mode
);
4310 if (mode
== BLKmode
)
4312 byte_size
= int_size_in_bytes (valtype
);
4317 /* Hfa's with up to 8 elements are returned in the FP argument registers. */
4319 hfa_mode
= hfa_element_mode (valtype
, 0);
4320 if (hfa_mode
!= VOIDmode
)
4322 int hfa_size
= GET_MODE_SIZE (hfa_mode
);
4324 if (byte_size
/ hfa_size
> MAX_ARGUMENT_SLOTS
)
4329 else if (byte_size
> UNITS_PER_WORD
* MAX_INT_RETURN_SLOTS
)
4335 /* Return rtx for register that holds the function return value. */
4338 ia64_function_value (tree valtype
, tree func ATTRIBUTE_UNUSED
)
4340 enum machine_mode mode
;
4341 enum machine_mode hfa_mode
;
4343 mode
= TYPE_MODE (valtype
);
4344 hfa_mode
= hfa_element_mode (valtype
, 0);
4346 if (hfa_mode
!= VOIDmode
)
4354 hfa_size
= GET_MODE_SIZE (hfa_mode
);
4355 byte_size
= ((mode
== BLKmode
)
4356 ? int_size_in_bytes (valtype
) : GET_MODE_SIZE (mode
));
4358 for (i
= 0; offset
< byte_size
; i
++)
4360 loc
[i
] = gen_rtx_EXPR_LIST (VOIDmode
,
4361 gen_rtx_REG (hfa_mode
, FR_ARG_FIRST
+ i
),
4365 return gen_rtx_PARALLEL (mode
, gen_rtvec_v (i
, loc
));
4367 else if (FLOAT_TYPE_P (valtype
) && mode
!= TFmode
&& mode
!= TCmode
)
4368 return gen_rtx_REG (mode
, FR_ARG_FIRST
);
4371 bool need_parallel
= false;
4373 /* In big-endian mode, we need to manage the layout of aggregates
4374 in the registers so that we get the bits properly aligned in
4375 the highpart of the registers. */
4376 if (BYTES_BIG_ENDIAN
4377 && (mode
== BLKmode
|| (valtype
&& AGGREGATE_TYPE_P (valtype
))))
4378 need_parallel
= true;
4380 /* Something like struct S { long double x; char a[0] } is not an
4381 HFA structure, and therefore doesn't go in fp registers. But
4382 the middle-end will give it XFmode anyway, and XFmode values
4383 don't normally fit in integer registers. So we need to smuggle
4384 the value inside a parallel. */
4385 else if (mode
== XFmode
|| mode
== XCmode
|| mode
== RFmode
)
4386 need_parallel
= true;
4396 bytesize
= int_size_in_bytes (valtype
);
4397 /* An empty PARALLEL is invalid here, but the return value
4398 doesn't matter for empty structs. */
4400 return gen_rtx_REG (mode
, GR_RET_FIRST
);
4401 for (i
= 0; offset
< bytesize
; i
++)
4403 loc
[i
] = gen_rtx_EXPR_LIST (VOIDmode
,
4404 gen_rtx_REG (DImode
,
4407 offset
+= UNITS_PER_WORD
;
4409 return gen_rtx_PARALLEL (mode
, gen_rtvec_v (i
, loc
));
4412 return gen_rtx_REG (mode
, GR_RET_FIRST
);
4416 /* This is called from dwarf2out.c via TARGET_ASM_OUTPUT_DWARF_DTPREL.
4417 We need to emit DTP-relative relocations. */
4420 ia64_output_dwarf_dtprel (FILE *file
, int size
, rtx x
)
4422 gcc_assert (size
== 4 || size
== 8);
4424 fputs ("\tdata4.ua\t@dtprel(", file
);
4426 fputs ("\tdata8.ua\t@dtprel(", file
);
4427 output_addr_const (file
, x
);
4431 /* Print a memory address as an operand to reference that memory location. */
4433 /* ??? Do we need this? It gets used only for 'a' operands. We could perhaps
4434 also call this from ia64_print_operand for memory addresses. */
4437 ia64_print_operand_address (FILE * stream ATTRIBUTE_UNUSED
,
4438 rtx address ATTRIBUTE_UNUSED
)
4442 /* Print an operand to an assembler instruction.
4443 C Swap and print a comparison operator.
4444 D Print an FP comparison operator.
4445 E Print 32 - constant, for SImode shifts as extract.
4446 e Print 64 - constant, for DImode rotates.
4447 F A floating point constant 0.0 emitted as f0, or 1.0 emitted as f1, or
4448 a floating point register emitted normally.
4449 I Invert a predicate register by adding 1.
4450 J Select the proper predicate register for a condition.
4451 j Select the inverse predicate register for a condition.
4452 O Append .acq for volatile load.
4453 P Postincrement of a MEM.
4454 Q Append .rel for volatile store.
4455 S Shift amount for shladd instruction.
4456 T Print an 8-bit sign extended number (K) as a 32-bit unsigned number
4457 for Intel assembler.
4458 U Print an 8-bit sign extended number (K) as a 64-bit unsigned number
4459 for Intel assembler.
4460 X A pair of floating point registers.
4461 r Print register name, or constant 0 as r0. HP compatibility for
4463 v Print vector constant value as an 8-byte integer value. */
4466 ia64_print_operand (FILE * file
, rtx x
, int code
)
4473 /* Handled below. */
4478 enum rtx_code c
= swap_condition (GET_CODE (x
));
4479 fputs (GET_RTX_NAME (c
), file
);
4484 switch (GET_CODE (x
))
4496 str
= GET_RTX_NAME (GET_CODE (x
));
4503 fprintf (file
, HOST_WIDE_INT_PRINT_DEC
, 32 - INTVAL (x
));
4507 fprintf (file
, HOST_WIDE_INT_PRINT_DEC
, 64 - INTVAL (x
));
4511 if (x
== CONST0_RTX (GET_MODE (x
)))
4512 str
= reg_names
[FR_REG (0)];
4513 else if (x
== CONST1_RTX (GET_MODE (x
)))
4514 str
= reg_names
[FR_REG (1)];
4517 gcc_assert (GET_CODE (x
) == REG
);
4518 str
= reg_names
[REGNO (x
)];
4524 fputs (reg_names
[REGNO (x
) + 1], file
);
4530 unsigned int regno
= REGNO (XEXP (x
, 0));
4531 if (GET_CODE (x
) == EQ
)
4535 fputs (reg_names
[regno
], file
);
4540 if (MEM_VOLATILE_P (x
))
4541 fputs(".acq", file
);
4546 HOST_WIDE_INT value
;
4548 switch (GET_CODE (XEXP (x
, 0)))
4554 x
= XEXP (XEXP (XEXP (x
, 0), 1), 1);
4555 if (GET_CODE (x
) == CONST_INT
)
4559 gcc_assert (GET_CODE (x
) == REG
);
4560 fprintf (file
, ", %s", reg_names
[REGNO (x
)]);
4566 value
= GET_MODE_SIZE (GET_MODE (x
));
4570 value
= - (HOST_WIDE_INT
) GET_MODE_SIZE (GET_MODE (x
));
4574 fprintf (file
, ", " HOST_WIDE_INT_PRINT_DEC
, value
);
4579 if (MEM_VOLATILE_P (x
))
4580 fputs(".rel", file
);
4584 fprintf (file
, "%d", exact_log2 (INTVAL (x
)));
4588 if (! TARGET_GNU_AS
&& GET_CODE (x
) == CONST_INT
)
4590 fprintf (file
, "0x%x", (int) INTVAL (x
) & 0xffffffff);
4596 if (! TARGET_GNU_AS
&& GET_CODE (x
) == CONST_INT
)
4598 const char *prefix
= "0x";
4599 if (INTVAL (x
) & 0x80000000)
4601 fprintf (file
, "0xffffffff");
4604 fprintf (file
, "%s%x", prefix
, (int) INTVAL (x
) & 0xffffffff);
4611 unsigned int regno
= REGNO (x
);
4612 fprintf (file
, "%s, %s", reg_names
[regno
], reg_names
[regno
+ 1]);
4617 /* If this operand is the constant zero, write it as register zero.
4618 Any register, zero, or CONST_INT value is OK here. */
4619 if (GET_CODE (x
) == REG
)
4620 fputs (reg_names
[REGNO (x
)], file
);
4621 else if (x
== CONST0_RTX (GET_MODE (x
)))
4623 else if (GET_CODE (x
) == CONST_INT
)
4624 output_addr_const (file
, x
);
4626 output_operand_lossage ("invalid %%r value");
4630 gcc_assert (GET_CODE (x
) == CONST_VECTOR
);
4631 x
= simplify_subreg (DImode
, x
, GET_MODE (x
), 0);
4638 /* For conditional branches, returns or calls, substitute
4639 sptk, dptk, dpnt, or spnt for %s. */
4640 x
= find_reg_note (current_output_insn
, REG_BR_PROB
, 0);
4643 int pred_val
= INTVAL (XEXP (x
, 0));
4645 /* Guess top and bottom 10% statically predicted. */
4646 if (pred_val
< REG_BR_PROB_BASE
/ 50)
4648 else if (pred_val
< REG_BR_PROB_BASE
/ 2)
4650 else if (pred_val
< REG_BR_PROB_BASE
/ 100 * 98)
4655 else if (GET_CODE (current_output_insn
) == CALL_INSN
)
4660 fputs (which
, file
);
4665 x
= current_insn_predicate
;
4668 unsigned int regno
= REGNO (XEXP (x
, 0));
4669 if (GET_CODE (x
) == EQ
)
4671 fprintf (file
, "(%s) ", reg_names
[regno
]);
4676 output_operand_lossage ("ia64_print_operand: unknown code");
4680 switch (GET_CODE (x
))
4682 /* This happens for the spill/restore instructions. */
4687 /* ... fall through ... */
4690 fputs (reg_names
[REGNO (x
)], file
);
4695 rtx addr
= XEXP (x
, 0);
4696 if (GET_RTX_CLASS (GET_CODE (addr
)) == RTX_AUTOINC
)
4697 addr
= XEXP (addr
, 0);
4698 fprintf (file
, "[%s]", reg_names
[REGNO (addr
)]);
4703 output_addr_const (file
, x
);
4710 /* Compute a (partial) cost for rtx X. Return true if the complete
4711 cost has been computed, and false if subexpressions should be
4712 scanned. In either case, *TOTAL contains the cost result. */
4713 /* ??? This is incomplete. */
4716 ia64_rtx_costs (rtx x
, int code
, int outer_code
, int *total
)
4724 *total
= CONST_OK_FOR_J (INTVAL (x
)) ? 0 : COSTS_N_INSNS (1);
4727 if (CONST_OK_FOR_I (INTVAL (x
)))
4729 else if (CONST_OK_FOR_J (INTVAL (x
)))
4732 *total
= COSTS_N_INSNS (1);
4735 if (CONST_OK_FOR_K (INTVAL (x
)) || CONST_OK_FOR_L (INTVAL (x
)))
4738 *total
= COSTS_N_INSNS (1);
4743 *total
= COSTS_N_INSNS (1);
4749 *total
= COSTS_N_INSNS (3);
4753 /* For multiplies wider than HImode, we have to go to the FPU,
4754 which normally involves copies. Plus there's the latency
4755 of the multiply itself, and the latency of the instructions to
4756 transfer integer regs to FP regs. */
4757 /* ??? Check for FP mode. */
4758 if (GET_MODE_SIZE (GET_MODE (x
)) > 2)
4759 *total
= COSTS_N_INSNS (10);
4761 *total
= COSTS_N_INSNS (2);
4769 *total
= COSTS_N_INSNS (1);
4776 /* We make divide expensive, so that divide-by-constant will be
4777 optimized to a multiply. */
4778 *total
= COSTS_N_INSNS (60);
4786 /* Calculate the cost of moving data from a register in class FROM to
4787 one in class TO, using MODE. */
4790 ia64_register_move_cost (enum machine_mode mode
, enum reg_class from
,
4793 /* ADDL_REGS is the same as GR_REGS for movement purposes. */
4794 if (to
== ADDL_REGS
)
4796 if (from
== ADDL_REGS
)
4799 /* All costs are symmetric, so reduce cases by putting the
4800 lower number class as the destination. */
4803 enum reg_class tmp
= to
;
4804 to
= from
, from
= tmp
;
4807 /* Moving from FR<->GR in XFmode must be more expensive than 2,
4808 so that we get secondary memory reloads. Between FR_REGS,
4809 we have to make this at least as expensive as MEMORY_MOVE_COST
4810 to avoid spectacularly poor register class preferencing. */
4811 if (mode
== XFmode
|| mode
== RFmode
)
4813 if (to
!= GR_REGS
|| from
!= GR_REGS
)
4814 return MEMORY_MOVE_COST (mode
, to
, 0);
4822 /* Moving between PR registers takes two insns. */
4823 if (from
== PR_REGS
)
4825 /* Moving between PR and anything but GR is impossible. */
4826 if (from
!= GR_REGS
)
4827 return MEMORY_MOVE_COST (mode
, to
, 0);
4831 /* Moving between BR and anything but GR is impossible. */
4832 if (from
!= GR_REGS
&& from
!= GR_AND_BR_REGS
)
4833 return MEMORY_MOVE_COST (mode
, to
, 0);
4838 /* Moving between AR and anything but GR is impossible. */
4839 if (from
!= GR_REGS
)
4840 return MEMORY_MOVE_COST (mode
, to
, 0);
4846 case GR_AND_FR_REGS
:
4847 case GR_AND_BR_REGS
:
4858 /* Implement PREFERRED_RELOAD_CLASS. Place additional restrictions on CLASS
4859 to use when copying X into that class. */
4862 ia64_preferred_reload_class (rtx x
, enum reg_class
class)
4868 /* Don't allow volatile mem reloads into floating point registers.
4869 This is defined to force reload to choose the r/m case instead
4870 of the f/f case when reloading (set (reg fX) (mem/v)). */
4871 if (MEM_P (x
) && MEM_VOLATILE_P (x
))
4874 /* Force all unrecognized constants into the constant pool. */
4892 /* This function returns the register class required for a secondary
4893 register when copying between one of the registers in CLASS, and X,
4894 using MODE. A return value of NO_REGS means that no secondary register
4898 ia64_secondary_reload_class (enum reg_class
class,
4899 enum machine_mode mode ATTRIBUTE_UNUSED
, rtx x
)
4903 if (GET_CODE (x
) == REG
|| GET_CODE (x
) == SUBREG
)
4904 regno
= true_regnum (x
);
4911 /* ??? BR<->BR register copies can happen due to a bad gcse/cse/global
4912 interaction. We end up with two pseudos with overlapping lifetimes
4913 both of which are equiv to the same constant, and both which need
4914 to be in BR_REGS. This seems to be a cse bug. cse_basic_block_end
4915 changes depending on the path length, which means the qty_first_reg
4916 check in make_regs_eqv can give different answers at different times.
4917 At some point I'll probably need a reload_indi pattern to handle
4920 We can also get GR_AND_FR_REGS to BR_REGS/AR_REGS copies, where we
4921 wound up with a FP register from GR_AND_FR_REGS. Extend that to all
4922 non-general registers for good measure. */
4923 if (regno
>= 0 && ! GENERAL_REGNO_P (regno
))
4926 /* This is needed if a pseudo used as a call_operand gets spilled to a
4928 if (GET_CODE (x
) == MEM
)
4934 /* Need to go through general registers to get to other class regs. */
4935 if (regno
>= 0 && ! (FR_REGNO_P (regno
) || GENERAL_REGNO_P (regno
)))
4938 /* This can happen when a paradoxical subreg is an operand to the
4940 /* ??? This shouldn't be necessary after instruction scheduling is
4941 enabled, because paradoxical subregs are not accepted by
4942 register_operand when INSN_SCHEDULING is defined. Or alternatively,
4943 stop the paradoxical subreg stupidity in the *_operand functions
4945 if (GET_CODE (x
) == MEM
4946 && (GET_MODE (x
) == SImode
|| GET_MODE (x
) == HImode
4947 || GET_MODE (x
) == QImode
))
4950 /* This can happen because of the ior/and/etc patterns that accept FP
4951 registers as operands. If the third operand is a constant, then it
4952 needs to be reloaded into a FP register. */
4953 if (GET_CODE (x
) == CONST_INT
)
4956 /* This can happen because of register elimination in a muldi3 insn.
4957 E.g. `26107 * (unsigned long)&u'. */
4958 if (GET_CODE (x
) == PLUS
)
4963 /* ??? This happens if we cse/gcse a BImode value across a call,
4964 and the function has a nonlocal goto. This is because global
4965 does not allocate call crossing pseudos to hard registers when
4966 current_function_has_nonlocal_goto is true. This is relatively
4967 common for C++ programs that use exceptions. To reproduce,
4968 return NO_REGS and compile libstdc++. */
4969 if (GET_CODE (x
) == MEM
)
4972 /* This can happen when we take a BImode subreg of a DImode value,
4973 and that DImode value winds up in some non-GR register. */
4974 if (regno
>= 0 && ! GENERAL_REGNO_P (regno
) && ! PR_REGNO_P (regno
))
4986 /* Emit text to declare externally defined variables and functions, because
4987 the Intel assembler does not support undefined externals. */
4990 ia64_asm_output_external (FILE *file
, tree decl
, const char *name
)
4992 int save_referenced
;
4994 /* GNU as does not need anything here, but the HP linker does need
4995 something for external functions. */
4999 || TREE_CODE (decl
) != FUNCTION_DECL
5000 || strstr (name
, "__builtin_") == name
))
5003 /* ??? The Intel assembler creates a reference that needs to be satisfied by
5004 the linker when we do this, so we need to be careful not to do this for
5005 builtin functions which have no library equivalent. Unfortunately, we
5006 can't tell here whether or not a function will actually be called by
5007 expand_expr, so we pull in library functions even if we may not need
5009 if (! strcmp (name
, "__builtin_next_arg")
5010 || ! strcmp (name
, "alloca")
5011 || ! strcmp (name
, "__builtin_constant_p")
5012 || ! strcmp (name
, "__builtin_args_info"))
5016 ia64_hpux_add_extern_decl (decl
);
5019 /* assemble_name will set TREE_SYMBOL_REFERENCED, so we must save and
5021 save_referenced
= TREE_SYMBOL_REFERENCED (DECL_ASSEMBLER_NAME (decl
));
5022 if (TREE_CODE (decl
) == FUNCTION_DECL
)
5023 ASM_OUTPUT_TYPE_DIRECTIVE (file
, name
, "function");
5024 (*targetm
.asm_out
.globalize_label
) (file
, name
);
5025 TREE_SYMBOL_REFERENCED (DECL_ASSEMBLER_NAME (decl
)) = save_referenced
;
5029 /* Parse the -mfixed-range= option string. */
5032 fix_range (const char *const_str
)
5035 char *str
, *dash
, *comma
;
5037 /* str must be of the form REG1'-'REG2{,REG1'-'REG} where REG1 and
5038 REG2 are either register names or register numbers. The effect
5039 of this option is to mark the registers in the range from REG1 to
5040 REG2 as ``fixed'' so they won't be used by the compiler. This is
5041 used, e.g., to ensure that kernel mode code doesn't use f32-f127. */
5043 i
= strlen (const_str
);
5044 str
= (char *) alloca (i
+ 1);
5045 memcpy (str
, const_str
, i
+ 1);
5049 dash
= strchr (str
, '-');
5052 warning (0, "value of -mfixed-range must have form REG1-REG2");
5057 comma
= strchr (dash
+ 1, ',');
5061 first
= decode_reg_name (str
);
5064 warning (0, "unknown register name: %s", str
);
5068 last
= decode_reg_name (dash
+ 1);
5071 warning (0, "unknown register name: %s", dash
+ 1);
5079 warning (0, "%s-%s is an empty range", str
, dash
+ 1);
5083 for (i
= first
; i
<= last
; ++i
)
5084 fixed_regs
[i
] = call_used_regs
[i
] = 1;
5094 /* Implement TARGET_HANDLE_OPTION. */
5097 ia64_handle_option (size_t code
, const char *arg
, int value
)
5101 case OPT_mfixed_range_
:
5105 case OPT_mtls_size_
:
5106 if (value
!= 14 && value
!= 22 && value
!= 64)
5107 error ("bad value %<%s%> for -mtls-size= switch", arg
);
5114 const char *name
; /* processor name or nickname. */
5115 enum processor_type processor
;
5117 const processor_alias_table
[] =
5119 {"itanium", PROCESSOR_ITANIUM
},
5120 {"itanium1", PROCESSOR_ITANIUM
},
5121 {"merced", PROCESSOR_ITANIUM
},
5122 {"itanium2", PROCESSOR_ITANIUM2
},
5123 {"mckinley", PROCESSOR_ITANIUM2
},
5125 int const pta_size
= ARRAY_SIZE (processor_alias_table
);
5128 for (i
= 0; i
< pta_size
; i
++)
5129 if (!strcmp (arg
, processor_alias_table
[i
].name
))
5131 ia64_tune
= processor_alias_table
[i
].processor
;
5135 error ("bad value %<%s%> for -mtune= switch", arg
);
5144 /* Implement OVERRIDE_OPTIONS. */
5147 ia64_override_options (void)
5149 if (TARGET_AUTO_PIC
)
5150 target_flags
|= MASK_CONST_GP
;
5152 if (TARGET_INLINE_SQRT
== INL_MIN_LAT
)
5154 warning (0, "not yet implemented: latency-optimized inline square root");
5155 TARGET_INLINE_SQRT
= INL_MAX_THR
;
5158 ia64_flag_schedule_insns2
= flag_schedule_insns_after_reload
;
5159 flag_schedule_insns_after_reload
= 0;
5161 ia64_section_threshold
= g_switch_set
? g_switch_value
: IA64_DEFAULT_GVALUE
;
5163 init_machine_status
= ia64_init_machine_status
;
5166 static struct machine_function
*
5167 ia64_init_machine_status (void)
5169 return ggc_alloc_cleared (sizeof (struct machine_function
));
5172 static enum attr_itanium_class
ia64_safe_itanium_class (rtx
);
5173 static enum attr_type
ia64_safe_type (rtx
);
5175 static enum attr_itanium_class
5176 ia64_safe_itanium_class (rtx insn
)
5178 if (recog_memoized (insn
) >= 0)
5179 return get_attr_itanium_class (insn
);
5181 return ITANIUM_CLASS_UNKNOWN
;
5184 static enum attr_type
5185 ia64_safe_type (rtx insn
)
5187 if (recog_memoized (insn
) >= 0)
5188 return get_attr_type (insn
);
5190 return TYPE_UNKNOWN
;
5193 /* The following collection of routines emit instruction group stop bits as
5194 necessary to avoid dependencies. */
5196 /* Need to track some additional registers as far as serialization is
5197 concerned so we can properly handle br.call and br.ret. We could
5198 make these registers visible to gcc, but since these registers are
5199 never explicitly used in gcc generated code, it seems wasteful to
5200 do so (plus it would make the call and return patterns needlessly
5202 #define REG_RP (BR_REG (0))
5203 #define REG_AR_CFM (FIRST_PSEUDO_REGISTER + 1)
5204 /* This is used for volatile asms which may require a stop bit immediately
5205 before and after them. */
5206 #define REG_VOLATILE (FIRST_PSEUDO_REGISTER + 2)
5207 #define AR_UNAT_BIT_0 (FIRST_PSEUDO_REGISTER + 3)
5208 #define NUM_REGS (AR_UNAT_BIT_0 + 64)
5210 /* For each register, we keep track of how it has been written in the
5211 current instruction group.
5213 If a register is written unconditionally (no qualifying predicate),
5214 WRITE_COUNT is set to 2 and FIRST_PRED is ignored.
5216 If a register is written if its qualifying predicate P is true, we
5217 set WRITE_COUNT to 1 and FIRST_PRED to P. Later on, the same register
5218 may be written again by the complement of P (P^1) and when this happens,
5219 WRITE_COUNT gets set to 2.
5221 The result of this is that whenever an insn attempts to write a register
5222 whose WRITE_COUNT is two, we need to issue an insn group barrier first.
5224 If a predicate register is written by a floating-point insn, we set
5225 WRITTEN_BY_FP to true.
5227 If a predicate register is written by an AND.ORCM we set WRITTEN_BY_AND
5228 to true; if it was written by an OR.ANDCM we set WRITTEN_BY_OR to true. */
5230 struct reg_write_state
5232 unsigned int write_count
: 2;
5233 unsigned int first_pred
: 16;
5234 unsigned int written_by_fp
: 1;
5235 unsigned int written_by_and
: 1;
5236 unsigned int written_by_or
: 1;
5239 /* Cumulative info for the current instruction group. */
5240 struct reg_write_state rws_sum
[NUM_REGS
];
5241 /* Info for the current instruction. This gets copied to rws_sum after a
5242 stop bit is emitted. */
5243 struct reg_write_state rws_insn
[NUM_REGS
];
5245 /* Indicates whether this is the first instruction after a stop bit,
5246 in which case we don't need another stop bit. Without this,
5247 ia64_variable_issue will die when scheduling an alloc. */
5248 static int first_instruction
;
5250 /* Misc flags needed to compute RAW/WAW dependencies while we are traversing
5251 RTL for one instruction. */
5254 unsigned int is_write
: 1; /* Is register being written? */
5255 unsigned int is_fp
: 1; /* Is register used as part of an fp op? */
5256 unsigned int is_branch
: 1; /* Is register used as part of a branch? */
5257 unsigned int is_and
: 1; /* Is register used as part of and.orcm? */
5258 unsigned int is_or
: 1; /* Is register used as part of or.andcm? */
5259 unsigned int is_sibcall
: 1; /* Is this a sibling or normal call? */
5262 static void rws_update (struct reg_write_state
*, int, struct reg_flags
, int);
5263 static int rws_access_regno (int, struct reg_flags
, int);
5264 static int rws_access_reg (rtx
, struct reg_flags
, int);
5265 static void update_set_flags (rtx
, struct reg_flags
*);
5266 static int set_src_needs_barrier (rtx
, struct reg_flags
, int);
5267 static int rtx_needs_barrier (rtx
, struct reg_flags
, int);
5268 static void init_insn_group_barriers (void);
5269 static int group_barrier_needed (rtx
);
5270 static int safe_group_barrier_needed (rtx
);
5272 /* Update *RWS for REGNO, which is being written by the current instruction,
5273 with predicate PRED, and associated register flags in FLAGS. */
5276 rws_update (struct reg_write_state
*rws
, int regno
, struct reg_flags flags
, int pred
)
5279 rws
[regno
].write_count
++;
5281 rws
[regno
].write_count
= 2;
5282 rws
[regno
].written_by_fp
|= flags
.is_fp
;
5283 /* ??? Not tracking and/or across differing predicates. */
5284 rws
[regno
].written_by_and
= flags
.is_and
;
5285 rws
[regno
].written_by_or
= flags
.is_or
;
5286 rws
[regno
].first_pred
= pred
;
5289 /* Handle an access to register REGNO of type FLAGS using predicate register
5290 PRED. Update rws_insn and rws_sum arrays. Return 1 if this access creates
5291 a dependency with an earlier instruction in the same group. */
5294 rws_access_regno (int regno
, struct reg_flags flags
, int pred
)
5296 int need_barrier
= 0;
5298 gcc_assert (regno
< NUM_REGS
);
5300 if (! PR_REGNO_P (regno
))
5301 flags
.is_and
= flags
.is_or
= 0;
5307 /* One insn writes same reg multiple times? */
5308 gcc_assert (!rws_insn
[regno
].write_count
);
5310 /* Update info for current instruction. */
5311 rws_update (rws_insn
, regno
, flags
, pred
);
5312 write_count
= rws_sum
[regno
].write_count
;
5314 switch (write_count
)
5317 /* The register has not been written yet. */
5318 rws_update (rws_sum
, regno
, flags
, pred
);
5322 /* The register has been written via a predicate. If this is
5323 not a complementary predicate, then we need a barrier. */
5324 /* ??? This assumes that P and P+1 are always complementary
5325 predicates for P even. */
5326 if (flags
.is_and
&& rws_sum
[regno
].written_by_and
)
5328 else if (flags
.is_or
&& rws_sum
[regno
].written_by_or
)
5330 else if ((rws_sum
[regno
].first_pred
^ 1) != pred
)
5332 rws_update (rws_sum
, regno
, flags
, pred
);
5336 /* The register has been unconditionally written already. We
5338 if (flags
.is_and
&& rws_sum
[regno
].written_by_and
)
5340 else if (flags
.is_or
&& rws_sum
[regno
].written_by_or
)
5344 rws_sum
[regno
].written_by_and
= flags
.is_and
;
5345 rws_sum
[regno
].written_by_or
= flags
.is_or
;
5354 if (flags
.is_branch
)
5356 /* Branches have several RAW exceptions that allow to avoid
5359 if (REGNO_REG_CLASS (regno
) == BR_REGS
|| regno
== AR_PFS_REGNUM
)
5360 /* RAW dependencies on branch regs are permissible as long
5361 as the writer is a non-branch instruction. Since we
5362 never generate code that uses a branch register written
5363 by a branch instruction, handling this case is
5367 if (REGNO_REG_CLASS (regno
) == PR_REGS
5368 && ! rws_sum
[regno
].written_by_fp
)
5369 /* The predicates of a branch are available within the
5370 same insn group as long as the predicate was written by
5371 something other than a floating-point instruction. */
5375 if (flags
.is_and
&& rws_sum
[regno
].written_by_and
)
5377 if (flags
.is_or
&& rws_sum
[regno
].written_by_or
)
5380 switch (rws_sum
[regno
].write_count
)
5383 /* The register has not been written yet. */
5387 /* The register has been written via a predicate. If this is
5388 not a complementary predicate, then we need a barrier. */
5389 /* ??? This assumes that P and P+1 are always complementary
5390 predicates for P even. */
5391 if ((rws_sum
[regno
].first_pred
^ 1) != pred
)
5396 /* The register has been unconditionally written already. We
5406 return need_barrier
;
5410 rws_access_reg (rtx reg
, struct reg_flags flags
, int pred
)
5412 int regno
= REGNO (reg
);
5413 int n
= HARD_REGNO_NREGS (REGNO (reg
), GET_MODE (reg
));
5416 return rws_access_regno (regno
, flags
, pred
);
5419 int need_barrier
= 0;
5421 need_barrier
|= rws_access_regno (regno
+ n
, flags
, pred
);
5422 return need_barrier
;
5426 /* Examine X, which is a SET rtx, and update the flags, the predicate, and
5427 the condition, stored in *PFLAGS, *PPRED and *PCOND. */
5430 update_set_flags (rtx x
, struct reg_flags
*pflags
)
5432 rtx src
= SET_SRC (x
);
5434 switch (GET_CODE (src
))
5440 /* There are three cases here:
5441 (1) The destination is (pc), in which case this is a branch,
5442 nothing here applies.
5443 (2) The destination is ar.lc, in which case this is a
5444 doloop_end_internal,
5445 (3) The destination is an fp register, in which case this is
5446 an fselect instruction.
5447 In all cases, nothing we do in this function applies. */
5451 if (COMPARISON_P (src
)
5452 && SCALAR_FLOAT_MODE_P (GET_MODE (XEXP (src
, 0))))
5453 /* Set pflags->is_fp to 1 so that we know we're dealing
5454 with a floating point comparison when processing the
5455 destination of the SET. */
5458 /* Discover if this is a parallel comparison. We only handle
5459 and.orcm and or.andcm at present, since we must retain a
5460 strict inverse on the predicate pair. */
5461 else if (GET_CODE (src
) == AND
)
5463 else if (GET_CODE (src
) == IOR
)
5470 /* Subroutine of rtx_needs_barrier; this function determines whether the
5471 source of a given SET rtx found in X needs a barrier. FLAGS and PRED
5472 are as in rtx_needs_barrier. COND is an rtx that holds the condition
5476 set_src_needs_barrier (rtx x
, struct reg_flags flags
, int pred
)
5478 int need_barrier
= 0;
5480 rtx src
= SET_SRC (x
);
5482 if (GET_CODE (src
) == CALL
)
5483 /* We don't need to worry about the result registers that
5484 get written by subroutine call. */
5485 return rtx_needs_barrier (src
, flags
, pred
);
5486 else if (SET_DEST (x
) == pc_rtx
)
5488 /* X is a conditional branch. */
5489 /* ??? This seems redundant, as the caller sets this bit for
5491 flags
.is_branch
= 1;
5492 return rtx_needs_barrier (src
, flags
, pred
);
5495 need_barrier
= rtx_needs_barrier (src
, flags
, pred
);
5498 if (GET_CODE (dst
) == ZERO_EXTRACT
)
5500 need_barrier
|= rtx_needs_barrier (XEXP (dst
, 1), flags
, pred
);
5501 need_barrier
|= rtx_needs_barrier (XEXP (dst
, 2), flags
, pred
);
5502 dst
= XEXP (dst
, 0);
5504 return need_barrier
;
5507 /* Handle an access to rtx X of type FLAGS using predicate register
5508 PRED. Return 1 if this access creates a dependency with an earlier
5509 instruction in the same group. */
5512 rtx_needs_barrier (rtx x
, struct reg_flags flags
, int pred
)
5515 int is_complemented
= 0;
5516 int need_barrier
= 0;
5517 const char *format_ptr
;
5518 struct reg_flags new_flags
;
5526 switch (GET_CODE (x
))
5529 update_set_flags (x
, &new_flags
);
5530 need_barrier
= set_src_needs_barrier (x
, new_flags
, pred
);
5531 if (GET_CODE (SET_SRC (x
)) != CALL
)
5533 new_flags
.is_write
= 1;
5534 need_barrier
|= rtx_needs_barrier (SET_DEST (x
), new_flags
, pred
);
5539 new_flags
.is_write
= 0;
5540 need_barrier
|= rws_access_regno (AR_EC_REGNUM
, new_flags
, pred
);
5542 /* Avoid multiple register writes, in case this is a pattern with
5543 multiple CALL rtx. This avoids a failure in rws_access_reg. */
5544 if (! flags
.is_sibcall
&& ! rws_insn
[REG_AR_CFM
].write_count
)
5546 new_flags
.is_write
= 1;
5547 need_barrier
|= rws_access_regno (REG_RP
, new_flags
, pred
);
5548 need_barrier
|= rws_access_regno (AR_PFS_REGNUM
, new_flags
, pred
);
5549 need_barrier
|= rws_access_regno (REG_AR_CFM
, new_flags
, pred
);
5554 /* X is a predicated instruction. */
5556 cond
= COND_EXEC_TEST (x
);
5558 need_barrier
= rtx_needs_barrier (cond
, flags
, 0);
5560 if (GET_CODE (cond
) == EQ
)
5561 is_complemented
= 1;
5562 cond
= XEXP (cond
, 0);
5563 gcc_assert (GET_CODE (cond
) == REG
5564 && REGNO_REG_CLASS (REGNO (cond
)) == PR_REGS
);
5565 pred
= REGNO (cond
);
5566 if (is_complemented
)
5569 need_barrier
|= rtx_needs_barrier (COND_EXEC_CODE (x
), flags
, pred
);
5570 return need_barrier
;
5574 /* Clobber & use are for earlier compiler-phases only. */
5579 /* We always emit stop bits for traditional asms. We emit stop bits
5580 for volatile extended asms if TARGET_VOL_ASM_STOP is true. */
5581 if (GET_CODE (x
) != ASM_OPERANDS
5582 || (MEM_VOLATILE_P (x
) && TARGET_VOL_ASM_STOP
))
5584 /* Avoid writing the register multiple times if we have multiple
5585 asm outputs. This avoids a failure in rws_access_reg. */
5586 if (! rws_insn
[REG_VOLATILE
].write_count
)
5588 new_flags
.is_write
= 1;
5589 rws_access_regno (REG_VOLATILE
, new_flags
, pred
);
5594 /* For all ASM_OPERANDS, we must traverse the vector of input operands.
5595 We cannot just fall through here since then we would be confused
5596 by the ASM_INPUT rtx inside ASM_OPERANDS, which do not indicate
5597 traditional asms unlike their normal usage. */
5599 for (i
= ASM_OPERANDS_INPUT_LENGTH (x
) - 1; i
>= 0; --i
)
5600 if (rtx_needs_barrier (ASM_OPERANDS_INPUT (x
, i
), flags
, pred
))
5605 for (i
= XVECLEN (x
, 0) - 1; i
>= 0; --i
)
5607 rtx pat
= XVECEXP (x
, 0, i
);
5608 switch (GET_CODE (pat
))
5611 update_set_flags (pat
, &new_flags
);
5612 need_barrier
|= set_src_needs_barrier (pat
, new_flags
, pred
);
5618 need_barrier
|= rtx_needs_barrier (pat
, flags
, pred
);
5629 for (i
= XVECLEN (x
, 0) - 1; i
>= 0; --i
)
5631 rtx pat
= XVECEXP (x
, 0, i
);
5632 if (GET_CODE (pat
) == SET
)
5634 if (GET_CODE (SET_SRC (pat
)) != CALL
)
5636 new_flags
.is_write
= 1;
5637 need_barrier
|= rtx_needs_barrier (SET_DEST (pat
), new_flags
,
5641 else if (GET_CODE (pat
) == CLOBBER
|| GET_CODE (pat
) == RETURN
)
5642 need_barrier
|= rtx_needs_barrier (pat
, flags
, pred
);
5647 need_barrier
|= rtx_needs_barrier (SUBREG_REG (x
), flags
, pred
);
5650 if (REGNO (x
) == AR_UNAT_REGNUM
)
5652 for (i
= 0; i
< 64; ++i
)
5653 need_barrier
|= rws_access_regno (AR_UNAT_BIT_0
+ i
, flags
, pred
);
5656 need_barrier
= rws_access_reg (x
, flags
, pred
);
5660 /* Find the regs used in memory address computation. */
5661 new_flags
.is_write
= 0;
5662 need_barrier
= rtx_needs_barrier (XEXP (x
, 0), new_flags
, pred
);
5665 case CONST_INT
: case CONST_DOUBLE
: case CONST_VECTOR
:
5666 case SYMBOL_REF
: case LABEL_REF
: case CONST
:
5669 /* Operators with side-effects. */
5670 case POST_INC
: case POST_DEC
:
5671 gcc_assert (GET_CODE (XEXP (x
, 0)) == REG
);
5673 new_flags
.is_write
= 0;
5674 need_barrier
= rws_access_reg (XEXP (x
, 0), new_flags
, pred
);
5675 new_flags
.is_write
= 1;
5676 need_barrier
|= rws_access_reg (XEXP (x
, 0), new_flags
, pred
);
5680 gcc_assert (GET_CODE (XEXP (x
, 0)) == REG
);
5682 new_flags
.is_write
= 0;
5683 need_barrier
= rws_access_reg (XEXP (x
, 0), new_flags
, pred
);
5684 need_barrier
|= rtx_needs_barrier (XEXP (x
, 1), new_flags
, pred
);
5685 new_flags
.is_write
= 1;
5686 need_barrier
|= rws_access_reg (XEXP (x
, 0), new_flags
, pred
);
5689 /* Handle common unary and binary ops for efficiency. */
5690 case COMPARE
: case PLUS
: case MINUS
: case MULT
: case DIV
:
5691 case MOD
: case UDIV
: case UMOD
: case AND
: case IOR
:
5692 case XOR
: case ASHIFT
: case ROTATE
: case ASHIFTRT
: case LSHIFTRT
:
5693 case ROTATERT
: case SMIN
: case SMAX
: case UMIN
: case UMAX
:
5694 case NE
: case EQ
: case GE
: case GT
: case LE
:
5695 case LT
: case GEU
: case GTU
: case LEU
: case LTU
:
5696 need_barrier
= rtx_needs_barrier (XEXP (x
, 0), new_flags
, pred
);
5697 need_barrier
|= rtx_needs_barrier (XEXP (x
, 1), new_flags
, pred
);
5700 case NEG
: case NOT
: case SIGN_EXTEND
: case ZERO_EXTEND
:
5701 case TRUNCATE
: case FLOAT_EXTEND
: case FLOAT_TRUNCATE
: case FLOAT
:
5702 case FIX
: case UNSIGNED_FLOAT
: case UNSIGNED_FIX
: case ABS
:
5703 case SQRT
: case FFS
: case POPCOUNT
:
5704 need_barrier
= rtx_needs_barrier (XEXP (x
, 0), flags
, pred
);
5708 /* VEC_SELECT's second argument is a PARALLEL with integers that
5709 describe the elements selected. On ia64, those integers are
5710 always constants. Avoid walking the PARALLEL so that we don't
5711 get confused with "normal" parallels and then die. */
5712 need_barrier
= rtx_needs_barrier (XEXP (x
, 0), flags
, pred
);
5716 switch (XINT (x
, 1))
5718 case UNSPEC_LTOFF_DTPMOD
:
5719 case UNSPEC_LTOFF_DTPREL
:
5721 case UNSPEC_LTOFF_TPREL
:
5723 case UNSPEC_PRED_REL_MUTEX
:
5724 case UNSPEC_PIC_CALL
:
5726 case UNSPEC_FETCHADD_ACQ
:
5727 case UNSPEC_BSP_VALUE
:
5728 case UNSPEC_FLUSHRS
:
5729 case UNSPEC_BUNDLE_SELECTOR
:
5732 case UNSPEC_GR_SPILL
:
5733 case UNSPEC_GR_RESTORE
:
5735 HOST_WIDE_INT offset
= INTVAL (XVECEXP (x
, 0, 1));
5736 HOST_WIDE_INT bit
= (offset
>> 3) & 63;
5738 need_barrier
= rtx_needs_barrier (XVECEXP (x
, 0, 0), flags
, pred
);
5739 new_flags
.is_write
= (XINT (x
, 1) == UNSPEC_GR_SPILL
);
5740 need_barrier
|= rws_access_regno (AR_UNAT_BIT_0
+ bit
,
5745 case UNSPEC_FR_SPILL
:
5746 case UNSPEC_FR_RESTORE
:
5747 case UNSPEC_GETF_EXP
:
5748 case UNSPEC_SETF_EXP
:
5750 case UNSPEC_FR_SQRT_RECIP_APPROX
:
5751 need_barrier
= rtx_needs_barrier (XVECEXP (x
, 0, 0), flags
, pred
);
5754 case UNSPEC_FR_RECIP_APPROX
:
5756 case UNSPEC_COPYSIGN
:
5757 need_barrier
= rtx_needs_barrier (XVECEXP (x
, 0, 0), flags
, pred
);
5758 need_barrier
|= rtx_needs_barrier (XVECEXP (x
, 0, 1), flags
, pred
);
5761 case UNSPEC_CMPXCHG_ACQ
:
5762 need_barrier
= rtx_needs_barrier (XVECEXP (x
, 0, 1), flags
, pred
);
5763 need_barrier
|= rtx_needs_barrier (XVECEXP (x
, 0, 2), flags
, pred
);
5771 case UNSPEC_VOLATILE
:
5772 switch (XINT (x
, 1))
5775 /* Alloc must always be the first instruction of a group.
5776 We force this by always returning true. */
5777 /* ??? We might get better scheduling if we explicitly check for
5778 input/local/output register dependencies, and modify the
5779 scheduler so that alloc is always reordered to the start of
5780 the current group. We could then eliminate all of the
5781 first_instruction code. */
5782 rws_access_regno (AR_PFS_REGNUM
, flags
, pred
);
5784 new_flags
.is_write
= 1;
5785 rws_access_regno (REG_AR_CFM
, new_flags
, pred
);
5788 case UNSPECV_SET_BSP
:
5792 case UNSPECV_BLOCKAGE
:
5793 case UNSPECV_INSN_GROUP_BARRIER
:
5795 case UNSPECV_PSAC_ALL
:
5796 case UNSPECV_PSAC_NORMAL
:
5805 new_flags
.is_write
= 0;
5806 need_barrier
= rws_access_regno (REG_RP
, flags
, pred
);
5807 need_barrier
|= rws_access_regno (AR_PFS_REGNUM
, flags
, pred
);
5809 new_flags
.is_write
= 1;
5810 need_barrier
|= rws_access_regno (AR_EC_REGNUM
, new_flags
, pred
);
5811 need_barrier
|= rws_access_regno (REG_AR_CFM
, new_flags
, pred
);
5815 format_ptr
= GET_RTX_FORMAT (GET_CODE (x
));
5816 for (i
= GET_RTX_LENGTH (GET_CODE (x
)) - 1; i
>= 0; i
--)
5817 switch (format_ptr
[i
])
5819 case '0': /* unused field */
5820 case 'i': /* integer */
5821 case 'n': /* note */
5822 case 'w': /* wide integer */
5823 case 's': /* pointer to string */
5824 case 'S': /* optional pointer to string */
5828 if (rtx_needs_barrier (XEXP (x
, i
), flags
, pred
))
5833 for (j
= XVECLEN (x
, i
) - 1; j
>= 0; --j
)
5834 if (rtx_needs_barrier (XVECEXP (x
, i
, j
), flags
, pred
))
5843 return need_barrier
;
5846 /* Clear out the state for group_barrier_needed at the start of a
5847 sequence of insns. */
5850 init_insn_group_barriers (void)
5852 memset (rws_sum
, 0, sizeof (rws_sum
));
5853 first_instruction
= 1;
5856 /* Given the current state, determine whether a group barrier (a stop bit) is
5857 necessary before INSN. Return nonzero if so. This modifies the state to
5858 include the effects of INSN as a side-effect. */
5861 group_barrier_needed (rtx insn
)
5864 int need_barrier
= 0;
5865 struct reg_flags flags
;
5867 memset (&flags
, 0, sizeof (flags
));
5868 switch (GET_CODE (insn
))
5874 /* A barrier doesn't imply an instruction group boundary. */
5878 memset (rws_insn
, 0, sizeof (rws_insn
));
5882 flags
.is_branch
= 1;
5883 flags
.is_sibcall
= SIBLING_CALL_P (insn
);
5884 memset (rws_insn
, 0, sizeof (rws_insn
));
5886 /* Don't bundle a call following another call. */
5887 if ((pat
= prev_active_insn (insn
))
5888 && GET_CODE (pat
) == CALL_INSN
)
5894 need_barrier
= rtx_needs_barrier (PATTERN (insn
), flags
, 0);
5898 flags
.is_branch
= 1;
5900 /* Don't bundle a jump following a call. */
5901 if ((pat
= prev_active_insn (insn
))
5902 && GET_CODE (pat
) == CALL_INSN
)
5910 if (GET_CODE (PATTERN (insn
)) == USE
5911 || GET_CODE (PATTERN (insn
)) == CLOBBER
)
5912 /* Don't care about USE and CLOBBER "insns"---those are used to
5913 indicate to the optimizer that it shouldn't get rid of
5914 certain operations. */
5917 pat
= PATTERN (insn
);
5919 /* Ug. Hack hacks hacked elsewhere. */
5920 switch (recog_memoized (insn
))
5922 /* We play dependency tricks with the epilogue in order
5923 to get proper schedules. Undo this for dv analysis. */
5924 case CODE_FOR_epilogue_deallocate_stack
:
5925 case CODE_FOR_prologue_allocate_stack
:
5926 pat
= XVECEXP (pat
, 0, 0);
5929 /* The pattern we use for br.cloop confuses the code above.
5930 The second element of the vector is representative. */
5931 case CODE_FOR_doloop_end_internal
:
5932 pat
= XVECEXP (pat
, 0, 1);
5935 /* Doesn't generate code. */
5936 case CODE_FOR_pred_rel_mutex
:
5937 case CODE_FOR_prologue_use
:
5944 memset (rws_insn
, 0, sizeof (rws_insn
));
5945 need_barrier
= rtx_needs_barrier (pat
, flags
, 0);
5947 /* Check to see if the previous instruction was a volatile
5950 need_barrier
= rws_access_regno (REG_VOLATILE
, flags
, 0);
5957 if (first_instruction
&& INSN_P (insn
)
5958 && ia64_safe_itanium_class (insn
) != ITANIUM_CLASS_IGNORE
5959 && GET_CODE (PATTERN (insn
)) != USE
5960 && GET_CODE (PATTERN (insn
)) != CLOBBER
)
5963 first_instruction
= 0;
5966 return need_barrier
;
5969 /* Like group_barrier_needed, but do not clobber the current state. */
5972 safe_group_barrier_needed (rtx insn
)
5974 struct reg_write_state rws_saved
[NUM_REGS
];
5975 int saved_first_instruction
;
5978 memcpy (rws_saved
, rws_sum
, NUM_REGS
* sizeof *rws_saved
);
5979 saved_first_instruction
= first_instruction
;
5981 t
= group_barrier_needed (insn
);
5983 memcpy (rws_sum
, rws_saved
, NUM_REGS
* sizeof *rws_saved
);
5984 first_instruction
= saved_first_instruction
;
5989 /* Scan the current function and insert stop bits as necessary to
5990 eliminate dependencies. This function assumes that a final
5991 instruction scheduling pass has been run which has already
5992 inserted most of the necessary stop bits. This function only
5993 inserts new ones at basic block boundaries, since these are
5994 invisible to the scheduler. */
5997 emit_insn_group_barriers (FILE *dump
)
6001 int insns_since_last_label
= 0;
6003 init_insn_group_barriers ();
6005 for (insn
= get_insns (); insn
; insn
= NEXT_INSN (insn
))
6007 if (GET_CODE (insn
) == CODE_LABEL
)
6009 if (insns_since_last_label
)
6011 insns_since_last_label
= 0;
6013 else if (GET_CODE (insn
) == NOTE
6014 && NOTE_LINE_NUMBER (insn
) == NOTE_INSN_BASIC_BLOCK
)
6016 if (insns_since_last_label
)
6018 insns_since_last_label
= 0;
6020 else if (GET_CODE (insn
) == INSN
6021 && GET_CODE (PATTERN (insn
)) == UNSPEC_VOLATILE
6022 && XINT (PATTERN (insn
), 1) == UNSPECV_INSN_GROUP_BARRIER
)
6024 init_insn_group_barriers ();
6027 else if (INSN_P (insn
))
6029 insns_since_last_label
= 1;
6031 if (group_barrier_needed (insn
))
6036 fprintf (dump
, "Emitting stop before label %d\n",
6037 INSN_UID (last_label
));
6038 emit_insn_before (gen_insn_group_barrier (GEN_INT (3)), last_label
);
6041 init_insn_group_barriers ();
6049 /* Like emit_insn_group_barriers, but run if no final scheduling pass was run.
6050 This function has to emit all necessary group barriers. */
6053 emit_all_insn_group_barriers (FILE *dump ATTRIBUTE_UNUSED
)
6057 init_insn_group_barriers ();
6059 for (insn
= get_insns (); insn
; insn
= NEXT_INSN (insn
))
6061 if (GET_CODE (insn
) == BARRIER
)
6063 rtx last
= prev_active_insn (insn
);
6067 if (GET_CODE (last
) == JUMP_INSN
6068 && GET_CODE (PATTERN (last
)) == ADDR_DIFF_VEC
)
6069 last
= prev_active_insn (last
);
6070 if (recog_memoized (last
) != CODE_FOR_insn_group_barrier
)
6071 emit_insn_after (gen_insn_group_barrier (GEN_INT (3)), last
);
6073 init_insn_group_barriers ();
6075 else if (INSN_P (insn
))
6077 if (recog_memoized (insn
) == CODE_FOR_insn_group_barrier
)
6078 init_insn_group_barriers ();
6079 else if (group_barrier_needed (insn
))
6081 emit_insn_before (gen_insn_group_barrier (GEN_INT (3)), insn
);
6082 init_insn_group_barriers ();
6083 group_barrier_needed (insn
);
6091 /* Instruction scheduling support. */
6093 #define NR_BUNDLES 10
6095 /* A list of names of all available bundles. */
6097 static const char *bundle_name
[NR_BUNDLES
] =
6103 #if NR_BUNDLES == 10
6113 /* Nonzero if we should insert stop bits into the schedule. */
6115 int ia64_final_schedule
= 0;
6117 /* Codes of the corresponding queried units: */
6119 static int _0mii_
, _0mmi_
, _0mfi_
, _0mmf_
;
6120 static int _0bbb_
, _0mbb_
, _0mib_
, _0mmb_
, _0mfb_
, _0mlx_
;
6122 static int _1mii_
, _1mmi_
, _1mfi_
, _1mmf_
;
6123 static int _1bbb_
, _1mbb_
, _1mib_
, _1mmb_
, _1mfb_
, _1mlx_
;
6125 static int pos_1
, pos_2
, pos_3
, pos_4
, pos_5
, pos_6
;
6127 /* The following variable value is an insn group barrier. */
6129 static rtx dfa_stop_insn
;
6131 /* The following variable value is the last issued insn. */
6133 static rtx last_scheduled_insn
;
6135 /* The following variable value is size of the DFA state. */
6137 static size_t dfa_state_size
;
6139 /* The following variable value is pointer to a DFA state used as
6140 temporary variable. */
6142 static state_t temp_dfa_state
= NULL
;
6144 /* The following variable value is DFA state after issuing the last
6147 static state_t prev_cycle_state
= NULL
;
6149 /* The following array element values are TRUE if the corresponding
6150 insn requires to add stop bits before it. */
6152 static char *stops_p
;
6154 /* The following variable is used to set up the mentioned above array. */
6156 static int stop_before_p
= 0;
6158 /* The following variable value is length of the arrays `clocks' and
6161 static int clocks_length
;
6163 /* The following array element values are cycles on which the
6164 corresponding insn will be issued. The array is used only for
6169 /* The following array element values are numbers of cycles should be
6170 added to improve insn scheduling for MM_insns for Itanium1. */
6172 static int *add_cycles
;
6174 static rtx
ia64_single_set (rtx
);
6175 static void ia64_emit_insn_before (rtx
, rtx
);
6177 /* Map a bundle number to its pseudo-op. */
6180 get_bundle_name (int b
)
6182 return bundle_name
[b
];
6186 /* Return the maximum number of instructions a cpu can issue. */
6189 ia64_issue_rate (void)
6194 /* Helper function - like single_set, but look inside COND_EXEC. */
6197 ia64_single_set (rtx insn
)
6199 rtx x
= PATTERN (insn
), ret
;
6200 if (GET_CODE (x
) == COND_EXEC
)
6201 x
= COND_EXEC_CODE (x
);
6202 if (GET_CODE (x
) == SET
)
6205 /* Special case here prologue_allocate_stack and epilogue_deallocate_stack.
6206 Although they are not classical single set, the second set is there just
6207 to protect it from moving past FP-relative stack accesses. */
6208 switch (recog_memoized (insn
))
6210 case CODE_FOR_prologue_allocate_stack
:
6211 case CODE_FOR_epilogue_deallocate_stack
:
6212 ret
= XVECEXP (x
, 0, 0);
6216 ret
= single_set_2 (insn
, x
);
6223 /* Adjust the cost of a scheduling dependency. Return the new cost of
6224 a dependency LINK or INSN on DEP_INSN. COST is the current cost. */
6227 ia64_adjust_cost (rtx insn
, rtx link
, rtx dep_insn
, int cost
)
6229 enum attr_itanium_class dep_class
;
6230 enum attr_itanium_class insn_class
;
6232 if (REG_NOTE_KIND (link
) != REG_DEP_OUTPUT
)
6235 insn_class
= ia64_safe_itanium_class (insn
);
6236 dep_class
= ia64_safe_itanium_class (dep_insn
);
6237 if (dep_class
== ITANIUM_CLASS_ST
|| dep_class
== ITANIUM_CLASS_STF
6238 || insn_class
== ITANIUM_CLASS_ST
|| insn_class
== ITANIUM_CLASS_STF
)
6244 /* Like emit_insn_before, but skip cycle_display notes.
6245 ??? When cycle display notes are implemented, update this. */
6248 ia64_emit_insn_before (rtx insn
, rtx before
)
6250 emit_insn_before (insn
, before
);
6253 /* The following function marks insns who produce addresses for load
6254 and store insns. Such insns will be placed into M slots because it
6255 decrease latency time for Itanium1 (see function
6256 `ia64_produce_address_p' and the DFA descriptions). */
6259 ia64_dependencies_evaluation_hook (rtx head
, rtx tail
)
6261 rtx insn
, link
, next
, next_tail
;
6263 /* Before reload, which_alternative is not set, which means that
6264 ia64_safe_itanium_class will produce wrong results for (at least)
6265 move instructions. */
6266 if (!reload_completed
)
6269 next_tail
= NEXT_INSN (tail
);
6270 for (insn
= head
; insn
!= next_tail
; insn
= NEXT_INSN (insn
))
6273 for (insn
= head
; insn
!= next_tail
; insn
= NEXT_INSN (insn
))
6275 && ia64_safe_itanium_class (insn
) == ITANIUM_CLASS_IALU
)
6277 for (link
= INSN_DEPEND (insn
); link
!= 0; link
= XEXP (link
, 1))
6279 enum attr_itanium_class c
;
6281 if (REG_NOTE_KIND (link
) != REG_DEP_TRUE
)
6283 next
= XEXP (link
, 0);
6284 c
= ia64_safe_itanium_class (next
);
6285 if ((c
== ITANIUM_CLASS_ST
6286 || c
== ITANIUM_CLASS_STF
)
6287 && ia64_st_address_bypass_p (insn
, next
))
6289 else if ((c
== ITANIUM_CLASS_LD
6290 || c
== ITANIUM_CLASS_FLD
6291 || c
== ITANIUM_CLASS_FLDP
)
6292 && ia64_ld_address_bypass_p (insn
, next
))
6295 insn
->call
= link
!= 0;
6299 /* We're beginning a new block. Initialize data structures as necessary. */
6302 ia64_sched_init (FILE *dump ATTRIBUTE_UNUSED
,
6303 int sched_verbose ATTRIBUTE_UNUSED
,
6304 int max_ready ATTRIBUTE_UNUSED
)
6306 #ifdef ENABLE_CHECKING
6309 if (reload_completed
)
6310 for (insn
= NEXT_INSN (current_sched_info
->prev_head
);
6311 insn
!= current_sched_info
->next_tail
;
6312 insn
= NEXT_INSN (insn
))
6313 gcc_assert (!SCHED_GROUP_P (insn
));
6315 last_scheduled_insn
= NULL_RTX
;
6316 init_insn_group_barriers ();
6319 /* We are about to being issuing insns for this clock cycle.
6320 Override the default sort algorithm to better slot instructions. */
6323 ia64_dfa_sched_reorder (FILE *dump
, int sched_verbose
, rtx
*ready
,
6324 int *pn_ready
, int clock_var ATTRIBUTE_UNUSED
,
6328 int n_ready
= *pn_ready
;
6329 rtx
*e_ready
= ready
+ n_ready
;
6333 fprintf (dump
, "// ia64_dfa_sched_reorder (type %d):\n", reorder_type
);
6335 if (reorder_type
== 0)
6337 /* First, move all USEs, CLOBBERs and other crud out of the way. */
6339 for (insnp
= ready
; insnp
< e_ready
; insnp
++)
6340 if (insnp
< e_ready
)
6343 enum attr_type t
= ia64_safe_type (insn
);
6344 if (t
== TYPE_UNKNOWN
)
6346 if (GET_CODE (PATTERN (insn
)) == ASM_INPUT
6347 || asm_noperands (PATTERN (insn
)) >= 0)
6349 rtx lowest
= ready
[n_asms
];
6350 ready
[n_asms
] = insn
;
6356 rtx highest
= ready
[n_ready
- 1];
6357 ready
[n_ready
- 1] = insn
;
6364 if (n_asms
< n_ready
)
6366 /* Some normal insns to process. Skip the asms. */
6370 else if (n_ready
> 0)
6374 if (ia64_final_schedule
)
6377 int nr_need_stop
= 0;
6379 for (insnp
= ready
; insnp
< e_ready
; insnp
++)
6380 if (safe_group_barrier_needed (*insnp
))
6383 if (reorder_type
== 1 && n_ready
== nr_need_stop
)
6385 if (reorder_type
== 0)
6388 /* Move down everything that needs a stop bit, preserving
6390 while (insnp
-- > ready
+ deleted
)
6391 while (insnp
>= ready
+ deleted
)
6394 if (! safe_group_barrier_needed (insn
))
6396 memmove (ready
+ 1, ready
, (insnp
- ready
) * sizeof (rtx
));
6407 /* We are about to being issuing insns for this clock cycle. Override
6408 the default sort algorithm to better slot instructions. */
6411 ia64_sched_reorder (FILE *dump
, int sched_verbose
, rtx
*ready
, int *pn_ready
,
6414 return ia64_dfa_sched_reorder (dump
, sched_verbose
, ready
,
6415 pn_ready
, clock_var
, 0);
6418 /* Like ia64_sched_reorder, but called after issuing each insn.
6419 Override the default sort algorithm to better slot instructions. */
6422 ia64_sched_reorder2 (FILE *dump ATTRIBUTE_UNUSED
,
6423 int sched_verbose ATTRIBUTE_UNUSED
, rtx
*ready
,
6424 int *pn_ready
, int clock_var
)
6426 if (ia64_tune
== PROCESSOR_ITANIUM
&& reload_completed
&& last_scheduled_insn
)
6427 clocks
[INSN_UID (last_scheduled_insn
)] = clock_var
;
6428 return ia64_dfa_sched_reorder (dump
, sched_verbose
, ready
, pn_ready
,
6432 /* We are about to issue INSN. Return the number of insns left on the
6433 ready queue that can be issued this cycle. */
6436 ia64_variable_issue (FILE *dump ATTRIBUTE_UNUSED
,
6437 int sched_verbose ATTRIBUTE_UNUSED
,
6438 rtx insn ATTRIBUTE_UNUSED
,
6439 int can_issue_more ATTRIBUTE_UNUSED
)
6441 last_scheduled_insn
= insn
;
6442 memcpy (prev_cycle_state
, curr_state
, dfa_state_size
);
6443 if (reload_completed
)
6445 int needed
= group_barrier_needed (insn
);
6447 gcc_assert (!needed
);
6448 if (GET_CODE (insn
) == CALL_INSN
)
6449 init_insn_group_barriers ();
6450 stops_p
[INSN_UID (insn
)] = stop_before_p
;
6456 /* We are choosing insn from the ready queue. Return nonzero if INSN
6460 ia64_first_cycle_multipass_dfa_lookahead_guard (rtx insn
)
6462 gcc_assert (insn
&& INSN_P (insn
));
6463 return (!reload_completed
6464 || !safe_group_barrier_needed (insn
));
6467 /* The following variable value is pseudo-insn used by the DFA insn
6468 scheduler to change the DFA state when the simulated clock is
6471 static rtx dfa_pre_cycle_insn
;
6473 /* We are about to being issuing INSN. Return nonzero if we cannot
6474 issue it on given cycle CLOCK and return zero if we should not sort
6475 the ready queue on the next clock start. */
6478 ia64_dfa_new_cycle (FILE *dump
, int verbose
, rtx insn
, int last_clock
,
6479 int clock
, int *sort_p
)
6481 int setup_clocks_p
= FALSE
;
6483 gcc_assert (insn
&& INSN_P (insn
));
6484 if ((reload_completed
&& safe_group_barrier_needed (insn
))
6485 || (last_scheduled_insn
6486 && (GET_CODE (last_scheduled_insn
) == CALL_INSN
6487 || GET_CODE (PATTERN (last_scheduled_insn
)) == ASM_INPUT
6488 || asm_noperands (PATTERN (last_scheduled_insn
)) >= 0)))
6490 init_insn_group_barriers ();
6491 if (verbose
&& dump
)
6492 fprintf (dump
, "// Stop should be before %d%s\n", INSN_UID (insn
),
6493 last_clock
== clock
? " + cycle advance" : "");
6495 if (last_clock
== clock
)
6497 state_transition (curr_state
, dfa_stop_insn
);
6498 if (TARGET_EARLY_STOP_BITS
)
6499 *sort_p
= (last_scheduled_insn
== NULL_RTX
6500 || GET_CODE (last_scheduled_insn
) != CALL_INSN
);
6505 else if (reload_completed
)
6506 setup_clocks_p
= TRUE
;
6507 if (GET_CODE (PATTERN (last_scheduled_insn
)) == ASM_INPUT
6508 || asm_noperands (PATTERN (last_scheduled_insn
)) >= 0)
6509 state_reset (curr_state
);
6512 memcpy (curr_state
, prev_cycle_state
, dfa_state_size
);
6513 state_transition (curr_state
, dfa_stop_insn
);
6514 state_transition (curr_state
, dfa_pre_cycle_insn
);
6515 state_transition (curr_state
, NULL
);
6518 else if (reload_completed
)
6519 setup_clocks_p
= TRUE
;
6520 if (setup_clocks_p
&& ia64_tune
== PROCESSOR_ITANIUM
6521 && GET_CODE (PATTERN (insn
)) != ASM_INPUT
6522 && asm_noperands (PATTERN (insn
)) < 0)
6524 enum attr_itanium_class c
= ia64_safe_itanium_class (insn
);
6526 if (c
!= ITANIUM_CLASS_MMMUL
&& c
!= ITANIUM_CLASS_MMSHF
)
6531 for (link
= LOG_LINKS (insn
); link
; link
= XEXP (link
, 1))
6532 if (REG_NOTE_KIND (link
) == 0)
6534 enum attr_itanium_class dep_class
;
6535 rtx dep_insn
= XEXP (link
, 0);
6537 dep_class
= ia64_safe_itanium_class (dep_insn
);
6538 if ((dep_class
== ITANIUM_CLASS_MMMUL
6539 || dep_class
== ITANIUM_CLASS_MMSHF
)
6540 && last_clock
- clocks
[INSN_UID (dep_insn
)] < 4
6542 || last_clock
- clocks
[INSN_UID (dep_insn
)] < d
))
6543 d
= last_clock
- clocks
[INSN_UID (dep_insn
)];
6546 add_cycles
[INSN_UID (insn
)] = 3 - d
;
6554 /* The following page contains abstract data `bundle states' which are
6555 used for bundling insns (inserting nops and template generation). */
6557 /* The following describes state of insn bundling. */
6561 /* Unique bundle state number to identify them in the debugging
6564 rtx insn
; /* corresponding insn, NULL for the 1st and the last state */
6565 /* number nops before and after the insn */
6566 short before_nops_num
, after_nops_num
;
6567 int insn_num
; /* insn number (0 - for initial state, 1 - for the 1st
6569 int cost
; /* cost of the state in cycles */
6570 int accumulated_insns_num
; /* number of all previous insns including
6571 nops. L is considered as 2 insns */
6572 int branch_deviation
; /* deviation of previous branches from 3rd slots */
6573 struct bundle_state
*next
; /* next state with the same insn_num */
6574 struct bundle_state
*originator
; /* originator (previous insn state) */
6575 /* All bundle states are in the following chain. */
6576 struct bundle_state
*allocated_states_chain
;
6577 /* The DFA State after issuing the insn and the nops. */
6581 /* The following is map insn number to the corresponding bundle state. */
6583 static struct bundle_state
**index_to_bundle_states
;
6585 /* The unique number of next bundle state. */
6587 static int bundle_states_num
;
6589 /* All allocated bundle states are in the following chain. */
6591 static struct bundle_state
*allocated_bundle_states_chain
;
6593 /* All allocated but not used bundle states are in the following
6596 static struct bundle_state
*free_bundle_state_chain
;
6599 /* The following function returns a free bundle state. */
6601 static struct bundle_state
*
6602 get_free_bundle_state (void)
6604 struct bundle_state
*result
;
6606 if (free_bundle_state_chain
!= NULL
)
6608 result
= free_bundle_state_chain
;
6609 free_bundle_state_chain
= result
->next
;
6613 result
= xmalloc (sizeof (struct bundle_state
));
6614 result
->dfa_state
= xmalloc (dfa_state_size
);
6615 result
->allocated_states_chain
= allocated_bundle_states_chain
;
6616 allocated_bundle_states_chain
= result
;
6618 result
->unique_num
= bundle_states_num
++;
6623 /* The following function frees given bundle state. */
6626 free_bundle_state (struct bundle_state
*state
)
6628 state
->next
= free_bundle_state_chain
;
6629 free_bundle_state_chain
= state
;
6632 /* Start work with abstract data `bundle states'. */
6635 initiate_bundle_states (void)
6637 bundle_states_num
= 0;
6638 free_bundle_state_chain
= NULL
;
6639 allocated_bundle_states_chain
= NULL
;
6642 /* Finish work with abstract data `bundle states'. */
6645 finish_bundle_states (void)
6647 struct bundle_state
*curr_state
, *next_state
;
6649 for (curr_state
= allocated_bundle_states_chain
;
6651 curr_state
= next_state
)
6653 next_state
= curr_state
->allocated_states_chain
;
6654 free (curr_state
->dfa_state
);
6659 /* Hash table of the bundle states. The key is dfa_state and insn_num
6660 of the bundle states. */
6662 static htab_t bundle_state_table
;
6664 /* The function returns hash of BUNDLE_STATE. */
6667 bundle_state_hash (const void *bundle_state
)
6669 const struct bundle_state
*state
= (struct bundle_state
*) bundle_state
;
6672 for (result
= i
= 0; i
< dfa_state_size
; i
++)
6673 result
+= (((unsigned char *) state
->dfa_state
) [i
]
6674 << ((i
% CHAR_BIT
) * 3 + CHAR_BIT
));
6675 return result
+ state
->insn_num
;
6678 /* The function returns nonzero if the bundle state keys are equal. */
6681 bundle_state_eq_p (const void *bundle_state_1
, const void *bundle_state_2
)
6683 const struct bundle_state
* state1
= (struct bundle_state
*) bundle_state_1
;
6684 const struct bundle_state
* state2
= (struct bundle_state
*) bundle_state_2
;
6686 return (state1
->insn_num
== state2
->insn_num
6687 && memcmp (state1
->dfa_state
, state2
->dfa_state
,
6688 dfa_state_size
) == 0);
6691 /* The function inserts the BUNDLE_STATE into the hash table. The
6692 function returns nonzero if the bundle has been inserted into the
6693 table. The table contains the best bundle state with given key. */
6696 insert_bundle_state (struct bundle_state
*bundle_state
)
6700 entry_ptr
= htab_find_slot (bundle_state_table
, bundle_state
, 1);
6701 if (*entry_ptr
== NULL
)
6703 bundle_state
->next
= index_to_bundle_states
[bundle_state
->insn_num
];
6704 index_to_bundle_states
[bundle_state
->insn_num
] = bundle_state
;
6705 *entry_ptr
= (void *) bundle_state
;
6708 else if (bundle_state
->cost
< ((struct bundle_state
*) *entry_ptr
)->cost
6709 || (bundle_state
->cost
== ((struct bundle_state
*) *entry_ptr
)->cost
6710 && (((struct bundle_state
*)*entry_ptr
)->accumulated_insns_num
6711 > bundle_state
->accumulated_insns_num
6712 || (((struct bundle_state
*)
6713 *entry_ptr
)->accumulated_insns_num
6714 == bundle_state
->accumulated_insns_num
6715 && ((struct bundle_state
*)
6716 *entry_ptr
)->branch_deviation
6717 > bundle_state
->branch_deviation
))))
6720 struct bundle_state temp
;
6722 temp
= *(struct bundle_state
*) *entry_ptr
;
6723 *(struct bundle_state
*) *entry_ptr
= *bundle_state
;
6724 ((struct bundle_state
*) *entry_ptr
)->next
= temp
.next
;
6725 *bundle_state
= temp
;
6730 /* Start work with the hash table. */
6733 initiate_bundle_state_table (void)
6735 bundle_state_table
= htab_create (50, bundle_state_hash
, bundle_state_eq_p
,
6739 /* Finish work with the hash table. */
6742 finish_bundle_state_table (void)
6744 htab_delete (bundle_state_table
);
6749 /* The following variable is a insn `nop' used to check bundle states
6750 with different number of inserted nops. */
6752 static rtx ia64_nop
;
6754 /* The following function tries to issue NOPS_NUM nops for the current
6755 state without advancing processor cycle. If it failed, the
6756 function returns FALSE and frees the current state. */
6759 try_issue_nops (struct bundle_state
*curr_state
, int nops_num
)
6763 for (i
= 0; i
< nops_num
; i
++)
6764 if (state_transition (curr_state
->dfa_state
, ia64_nop
) >= 0)
6766 free_bundle_state (curr_state
);
6772 /* The following function tries to issue INSN for the current
6773 state without advancing processor cycle. If it failed, the
6774 function returns FALSE and frees the current state. */
6777 try_issue_insn (struct bundle_state
*curr_state
, rtx insn
)
6779 if (insn
&& state_transition (curr_state
->dfa_state
, insn
) >= 0)
6781 free_bundle_state (curr_state
);
6787 /* The following function tries to issue BEFORE_NOPS_NUM nops and INSN
6788 starting with ORIGINATOR without advancing processor cycle. If
6789 TRY_BUNDLE_END_P is TRUE, the function also/only (if
6790 ONLY_BUNDLE_END_P is TRUE) tries to issue nops to fill all bundle.
6791 If it was successful, the function creates new bundle state and
6792 insert into the hash table and into `index_to_bundle_states'. */
6795 issue_nops_and_insn (struct bundle_state
*originator
, int before_nops_num
,
6796 rtx insn
, int try_bundle_end_p
, int only_bundle_end_p
)
6798 struct bundle_state
*curr_state
;
6800 curr_state
= get_free_bundle_state ();
6801 memcpy (curr_state
->dfa_state
, originator
->dfa_state
, dfa_state_size
);
6802 curr_state
->insn
= insn
;
6803 curr_state
->insn_num
= originator
->insn_num
+ 1;
6804 curr_state
->cost
= originator
->cost
;
6805 curr_state
->originator
= originator
;
6806 curr_state
->before_nops_num
= before_nops_num
;
6807 curr_state
->after_nops_num
= 0;
6808 curr_state
->accumulated_insns_num
6809 = originator
->accumulated_insns_num
+ before_nops_num
;
6810 curr_state
->branch_deviation
= originator
->branch_deviation
;
6812 if (INSN_CODE (insn
) == CODE_FOR_insn_group_barrier
)
6814 gcc_assert (GET_MODE (insn
) != TImode
);
6815 if (!try_issue_nops (curr_state
, before_nops_num
))
6817 if (!try_issue_insn (curr_state
, insn
))
6819 memcpy (temp_dfa_state
, curr_state
->dfa_state
, dfa_state_size
);
6820 if (state_transition (temp_dfa_state
, dfa_pre_cycle_insn
) >= 0
6821 && curr_state
->accumulated_insns_num
% 3 != 0)
6823 free_bundle_state (curr_state
);
6827 else if (GET_MODE (insn
) != TImode
)
6829 if (!try_issue_nops (curr_state
, before_nops_num
))
6831 if (!try_issue_insn (curr_state
, insn
))
6833 curr_state
->accumulated_insns_num
++;
6834 gcc_assert (GET_CODE (PATTERN (insn
)) != ASM_INPUT
6835 && asm_noperands (PATTERN (insn
)) < 0);
6837 if (ia64_safe_type (insn
) == TYPE_L
)
6838 curr_state
->accumulated_insns_num
++;
6842 /* If this is an insn that must be first in a group, then don't allow
6843 nops to be emitted before it. Currently, alloc is the only such
6844 supported instruction. */
6845 /* ??? The bundling automatons should handle this for us, but they do
6846 not yet have support for the first_insn attribute. */
6847 if (before_nops_num
> 0 && get_attr_first_insn (insn
) == FIRST_INSN_YES
)
6849 free_bundle_state (curr_state
);
6853 state_transition (curr_state
->dfa_state
, dfa_pre_cycle_insn
);
6854 state_transition (curr_state
->dfa_state
, NULL
);
6856 if (!try_issue_nops (curr_state
, before_nops_num
))
6858 if (!try_issue_insn (curr_state
, insn
))
6860 curr_state
->accumulated_insns_num
++;
6861 if (GET_CODE (PATTERN (insn
)) == ASM_INPUT
6862 || asm_noperands (PATTERN (insn
)) >= 0)
6864 /* Finish bundle containing asm insn. */
6865 curr_state
->after_nops_num
6866 = 3 - curr_state
->accumulated_insns_num
% 3;
6867 curr_state
->accumulated_insns_num
6868 += 3 - curr_state
->accumulated_insns_num
% 3;
6870 else if (ia64_safe_type (insn
) == TYPE_L
)
6871 curr_state
->accumulated_insns_num
++;
6873 if (ia64_safe_type (insn
) == TYPE_B
)
6874 curr_state
->branch_deviation
6875 += 2 - (curr_state
->accumulated_insns_num
- 1) % 3;
6876 if (try_bundle_end_p
&& curr_state
->accumulated_insns_num
% 3 != 0)
6878 if (!only_bundle_end_p
&& insert_bundle_state (curr_state
))
6881 struct bundle_state
*curr_state1
;
6882 struct bundle_state
*allocated_states_chain
;
6884 curr_state1
= get_free_bundle_state ();
6885 dfa_state
= curr_state1
->dfa_state
;
6886 allocated_states_chain
= curr_state1
->allocated_states_chain
;
6887 *curr_state1
= *curr_state
;
6888 curr_state1
->dfa_state
= dfa_state
;
6889 curr_state1
->allocated_states_chain
= allocated_states_chain
;
6890 memcpy (curr_state1
->dfa_state
, curr_state
->dfa_state
,
6892 curr_state
= curr_state1
;
6894 if (!try_issue_nops (curr_state
,
6895 3 - curr_state
->accumulated_insns_num
% 3))
6897 curr_state
->after_nops_num
6898 = 3 - curr_state
->accumulated_insns_num
% 3;
6899 curr_state
->accumulated_insns_num
6900 += 3 - curr_state
->accumulated_insns_num
% 3;
6902 if (!insert_bundle_state (curr_state
))
6903 free_bundle_state (curr_state
);
6907 /* The following function returns position in the two window bundle
6911 get_max_pos (state_t state
)
6913 if (cpu_unit_reservation_p (state
, pos_6
))
6915 else if (cpu_unit_reservation_p (state
, pos_5
))
6917 else if (cpu_unit_reservation_p (state
, pos_4
))
6919 else if (cpu_unit_reservation_p (state
, pos_3
))
6921 else if (cpu_unit_reservation_p (state
, pos_2
))
6923 else if (cpu_unit_reservation_p (state
, pos_1
))
6929 /* The function returns code of a possible template for given position
6930 and state. The function should be called only with 2 values of
6931 position equal to 3 or 6. We avoid generating F NOPs by putting
6932 templates containing F insns at the end of the template search
6933 because undocumented anomaly in McKinley derived cores which can
6934 cause stalls if an F-unit insn (including a NOP) is issued within a
6935 six-cycle window after reading certain application registers (such
6936 as ar.bsp). Furthermore, power-considerations also argue against
6937 the use of F-unit instructions unless they're really needed. */
6940 get_template (state_t state
, int pos
)
6945 if (cpu_unit_reservation_p (state
, _0mmi_
))
6947 else if (cpu_unit_reservation_p (state
, _0mii_
))
6949 else if (cpu_unit_reservation_p (state
, _0mmb_
))
6951 else if (cpu_unit_reservation_p (state
, _0mib_
))
6953 else if (cpu_unit_reservation_p (state
, _0mbb_
))
6955 else if (cpu_unit_reservation_p (state
, _0bbb_
))
6957 else if (cpu_unit_reservation_p (state
, _0mmf_
))
6959 else if (cpu_unit_reservation_p (state
, _0mfi_
))
6961 else if (cpu_unit_reservation_p (state
, _0mfb_
))
6963 else if (cpu_unit_reservation_p (state
, _0mlx_
))
6968 if (cpu_unit_reservation_p (state
, _1mmi_
))
6970 else if (cpu_unit_reservation_p (state
, _1mii_
))
6972 else if (cpu_unit_reservation_p (state
, _1mmb_
))
6974 else if (cpu_unit_reservation_p (state
, _1mib_
))
6976 else if (cpu_unit_reservation_p (state
, _1mbb_
))
6978 else if (cpu_unit_reservation_p (state
, _1bbb_
))
6980 else if (_1mmf_
>= 0 && cpu_unit_reservation_p (state
, _1mmf_
))
6982 else if (cpu_unit_reservation_p (state
, _1mfi_
))
6984 else if (cpu_unit_reservation_p (state
, _1mfb_
))
6986 else if (cpu_unit_reservation_p (state
, _1mlx_
))
6995 /* The following function returns an insn important for insn bundling
6996 followed by INSN and before TAIL. */
6999 get_next_important_insn (rtx insn
, rtx tail
)
7001 for (; insn
&& insn
!= tail
; insn
= NEXT_INSN (insn
))
7003 && ia64_safe_itanium_class (insn
) != ITANIUM_CLASS_IGNORE
7004 && GET_CODE (PATTERN (insn
)) != USE
7005 && GET_CODE (PATTERN (insn
)) != CLOBBER
)
7010 /* Add a bundle selector TEMPLATE0 before INSN. */
7013 ia64_add_bundle_selector_before (int template0
, rtx insn
)
7015 rtx b
= gen_bundle_selector (GEN_INT (template0
));
7017 ia64_emit_insn_before (b
, insn
);
7018 #if NR_BUNDLES == 10
7019 if ((template0
== 4 || template0
== 5)
7020 && (flag_unwind_tables
|| (flag_exceptions
&& !USING_SJLJ_EXCEPTIONS
)))
7023 rtx note
= NULL_RTX
;
7025 /* In .mbb and .bbb bundles, check if CALL_INSN isn't in the
7026 first or second slot. If it is and has REG_EH_NOTE set, copy it
7027 to following nops, as br.call sets rp to the address of following
7028 bundle and therefore an EH region end must be on a bundle
7030 insn
= PREV_INSN (insn
);
7031 for (i
= 0; i
< 3; i
++)
7034 insn
= next_active_insn (insn
);
7035 while (GET_CODE (insn
) == INSN
7036 && get_attr_empty (insn
) == EMPTY_YES
);
7037 if (GET_CODE (insn
) == CALL_INSN
)
7038 note
= find_reg_note (insn
, REG_EH_REGION
, NULL_RTX
);
7043 gcc_assert ((code
= recog_memoized (insn
)) == CODE_FOR_nop
7044 || code
== CODE_FOR_nop_b
);
7045 if (find_reg_note (insn
, REG_EH_REGION
, NULL_RTX
))
7049 = gen_rtx_EXPR_LIST (REG_EH_REGION
, XEXP (note
, 0),
7057 /* The following function does insn bundling. Bundling means
7058 inserting templates and nop insns to fit insn groups into permitted
7059 templates. Instruction scheduling uses NDFA (non-deterministic
7060 finite automata) encoding informations about the templates and the
7061 inserted nops. Nondeterminism of the automata permits follows
7062 all possible insn sequences very fast.
7064 Unfortunately it is not possible to get information about inserting
7065 nop insns and used templates from the automata states. The
7066 automata only says that we can issue an insn possibly inserting
7067 some nops before it and using some template. Therefore insn
7068 bundling in this function is implemented by using DFA
7069 (deterministic finite automata). We follows all possible insn
7070 sequences by inserting 0-2 nops (that is what the NDFA describe for
7071 insn scheduling) before/after each insn being bundled. We know the
7072 start of simulated processor cycle from insn scheduling (insn
7073 starting a new cycle has TImode).
7075 Simple implementation of insn bundling would create enormous
7076 number of possible insn sequences satisfying information about new
7077 cycle ticks taken from the insn scheduling. To make the algorithm
7078 practical we use dynamic programming. Each decision (about
7079 inserting nops and implicitly about previous decisions) is described
7080 by structure bundle_state (see above). If we generate the same
7081 bundle state (key is automaton state after issuing the insns and
7082 nops for it), we reuse already generated one. As consequence we
7083 reject some decisions which cannot improve the solution and
7084 reduce memory for the algorithm.
7086 When we reach the end of EBB (extended basic block), we choose the
7087 best sequence and then, moving back in EBB, insert templates for
7088 the best alternative. The templates are taken from querying
7089 automaton state for each insn in chosen bundle states.
7091 So the algorithm makes two (forward and backward) passes through
7092 EBB. There is an additional forward pass through EBB for Itanium1
7093 processor. This pass inserts more nops to make dependency between
7094 a producer insn and MMMUL/MMSHF at least 4 cycles long. */
7097 bundling (FILE *dump
, int verbose
, rtx prev_head_insn
, rtx tail
)
7099 struct bundle_state
*curr_state
, *next_state
, *best_state
;
7100 rtx insn
, next_insn
;
7102 int i
, bundle_end_p
, only_bundle_end_p
, asm_p
;
7103 int pos
= 0, max_pos
, template0
, template1
;
7106 enum attr_type type
;
7109 /* Count insns in the EBB. */
7110 for (insn
= NEXT_INSN (prev_head_insn
);
7111 insn
&& insn
!= tail
;
7112 insn
= NEXT_INSN (insn
))
7118 dfa_clean_insn_cache ();
7119 initiate_bundle_state_table ();
7120 index_to_bundle_states
= xmalloc ((insn_num
+ 2)
7121 * sizeof (struct bundle_state
*));
7122 /* First (forward) pass -- generation of bundle states. */
7123 curr_state
= get_free_bundle_state ();
7124 curr_state
->insn
= NULL
;
7125 curr_state
->before_nops_num
= 0;
7126 curr_state
->after_nops_num
= 0;
7127 curr_state
->insn_num
= 0;
7128 curr_state
->cost
= 0;
7129 curr_state
->accumulated_insns_num
= 0;
7130 curr_state
->branch_deviation
= 0;
7131 curr_state
->next
= NULL
;
7132 curr_state
->originator
= NULL
;
7133 state_reset (curr_state
->dfa_state
);
7134 index_to_bundle_states
[0] = curr_state
;
7136 /* Shift cycle mark if it is put on insn which could be ignored. */
7137 for (insn
= NEXT_INSN (prev_head_insn
);
7139 insn
= NEXT_INSN (insn
))
7141 && (ia64_safe_itanium_class (insn
) == ITANIUM_CLASS_IGNORE
7142 || GET_CODE (PATTERN (insn
)) == USE
7143 || GET_CODE (PATTERN (insn
)) == CLOBBER
)
7144 && GET_MODE (insn
) == TImode
)
7146 PUT_MODE (insn
, VOIDmode
);
7147 for (next_insn
= NEXT_INSN (insn
);
7149 next_insn
= NEXT_INSN (next_insn
))
7150 if (INSN_P (next_insn
)
7151 && ia64_safe_itanium_class (next_insn
) != ITANIUM_CLASS_IGNORE
7152 && GET_CODE (PATTERN (next_insn
)) != USE
7153 && GET_CODE (PATTERN (next_insn
)) != CLOBBER
)
7155 PUT_MODE (next_insn
, TImode
);
7159 /* Froward pass: generation of bundle states. */
7160 for (insn
= get_next_important_insn (NEXT_INSN (prev_head_insn
), tail
);
7164 gcc_assert (INSN_P (insn
)
7165 && ia64_safe_itanium_class (insn
) != ITANIUM_CLASS_IGNORE
7166 && GET_CODE (PATTERN (insn
)) != USE
7167 && GET_CODE (PATTERN (insn
)) != CLOBBER
);
7168 type
= ia64_safe_type (insn
);
7169 next_insn
= get_next_important_insn (NEXT_INSN (insn
), tail
);
7171 index_to_bundle_states
[insn_num
] = NULL
;
7172 for (curr_state
= index_to_bundle_states
[insn_num
- 1];
7174 curr_state
= next_state
)
7176 pos
= curr_state
->accumulated_insns_num
% 3;
7177 next_state
= curr_state
->next
;
7178 /* We must fill up the current bundle in order to start a
7179 subsequent asm insn in a new bundle. Asm insn is always
7180 placed in a separate bundle. */
7182 = (next_insn
!= NULL_RTX
7183 && INSN_CODE (insn
) == CODE_FOR_insn_group_barrier
7184 && ia64_safe_type (next_insn
) == TYPE_UNKNOWN
);
7185 /* We may fill up the current bundle if it is the cycle end
7186 without a group barrier. */
7188 = (only_bundle_end_p
|| next_insn
== NULL_RTX
7189 || (GET_MODE (next_insn
) == TImode
7190 && INSN_CODE (insn
) != CODE_FOR_insn_group_barrier
));
7191 if (type
== TYPE_F
|| type
== TYPE_B
|| type
== TYPE_L
7193 /* We need to insert 2 nops for cases like M_MII. To
7194 guarantee issuing all insns on the same cycle for
7195 Itanium 1, we need to issue 2 nops after the first M
7196 insn (MnnMII where n is a nop insn). */
7197 || ((type
== TYPE_M
|| type
== TYPE_A
)
7198 && ia64_tune
== PROCESSOR_ITANIUM
7199 && !bundle_end_p
&& pos
== 1))
7200 issue_nops_and_insn (curr_state
, 2, insn
, bundle_end_p
,
7202 issue_nops_and_insn (curr_state
, 1, insn
, bundle_end_p
,
7204 issue_nops_and_insn (curr_state
, 0, insn
, bundle_end_p
,
7207 gcc_assert (index_to_bundle_states
[insn_num
]);
7208 for (curr_state
= index_to_bundle_states
[insn_num
];
7210 curr_state
= curr_state
->next
)
7211 if (verbose
>= 2 && dump
)
7213 /* This structure is taken from generated code of the
7214 pipeline hazard recognizer (see file insn-attrtab.c).
7215 Please don't forget to change the structure if a new
7216 automaton is added to .md file. */
7219 unsigned short one_automaton_state
;
7220 unsigned short oneb_automaton_state
;
7221 unsigned short two_automaton_state
;
7222 unsigned short twob_automaton_state
;
7227 "// Bundle state %d (orig %d, cost %d, nops %d/%d, insns %d, branch %d, state %d) for %d\n",
7228 curr_state
->unique_num
,
7229 (curr_state
->originator
== NULL
7230 ? -1 : curr_state
->originator
->unique_num
),
7232 curr_state
->before_nops_num
, curr_state
->after_nops_num
,
7233 curr_state
->accumulated_insns_num
, curr_state
->branch_deviation
,
7234 (ia64_tune
== PROCESSOR_ITANIUM
7235 ? ((struct DFA_chip
*) curr_state
->dfa_state
)->oneb_automaton_state
7236 : ((struct DFA_chip
*) curr_state
->dfa_state
)->twob_automaton_state
),
7241 /* We should find a solution because the 2nd insn scheduling has
7243 gcc_assert (index_to_bundle_states
[insn_num
]);
7244 /* Find a state corresponding to the best insn sequence. */
7246 for (curr_state
= index_to_bundle_states
[insn_num
];
7248 curr_state
= curr_state
->next
)
7249 /* We are just looking at the states with fully filled up last
7250 bundle. The first we prefer insn sequences with minimal cost
7251 then with minimal inserted nops and finally with branch insns
7252 placed in the 3rd slots. */
7253 if (curr_state
->accumulated_insns_num
% 3 == 0
7254 && (best_state
== NULL
|| best_state
->cost
> curr_state
->cost
7255 || (best_state
->cost
== curr_state
->cost
7256 && (curr_state
->accumulated_insns_num
7257 < best_state
->accumulated_insns_num
7258 || (curr_state
->accumulated_insns_num
7259 == best_state
->accumulated_insns_num
7260 && curr_state
->branch_deviation
7261 < best_state
->branch_deviation
)))))
7262 best_state
= curr_state
;
7263 /* Second (backward) pass: adding nops and templates. */
7264 insn_num
= best_state
->before_nops_num
;
7265 template0
= template1
= -1;
7266 for (curr_state
= best_state
;
7267 curr_state
->originator
!= NULL
;
7268 curr_state
= curr_state
->originator
)
7270 insn
= curr_state
->insn
;
7271 asm_p
= (GET_CODE (PATTERN (insn
)) == ASM_INPUT
7272 || asm_noperands (PATTERN (insn
)) >= 0);
7274 if (verbose
>= 2 && dump
)
7278 unsigned short one_automaton_state
;
7279 unsigned short oneb_automaton_state
;
7280 unsigned short two_automaton_state
;
7281 unsigned short twob_automaton_state
;
7286 "// Best %d (orig %d, cost %d, nops %d/%d, insns %d, branch %d, state %d) for %d\n",
7287 curr_state
->unique_num
,
7288 (curr_state
->originator
== NULL
7289 ? -1 : curr_state
->originator
->unique_num
),
7291 curr_state
->before_nops_num
, curr_state
->after_nops_num
,
7292 curr_state
->accumulated_insns_num
, curr_state
->branch_deviation
,
7293 (ia64_tune
== PROCESSOR_ITANIUM
7294 ? ((struct DFA_chip
*) curr_state
->dfa_state
)->oneb_automaton_state
7295 : ((struct DFA_chip
*) curr_state
->dfa_state
)->twob_automaton_state
),
7298 /* Find the position in the current bundle window. The window can
7299 contain at most two bundles. Two bundle window means that
7300 the processor will make two bundle rotation. */
7301 max_pos
= get_max_pos (curr_state
->dfa_state
);
7303 /* The following (negative template number) means that the
7304 processor did one bundle rotation. */
7305 || (max_pos
== 3 && template0
< 0))
7307 /* We are at the end of the window -- find template(s) for
7311 template0
= get_template (curr_state
->dfa_state
, 3);
7314 template1
= get_template (curr_state
->dfa_state
, 3);
7315 template0
= get_template (curr_state
->dfa_state
, 6);
7318 if (max_pos
> 3 && template1
< 0)
7319 /* It may happen when we have the stop inside a bundle. */
7321 gcc_assert (pos
<= 3);
7322 template1
= get_template (curr_state
->dfa_state
, 3);
7326 /* Emit nops after the current insn. */
7327 for (i
= 0; i
< curr_state
->after_nops_num
; i
++)
7330 emit_insn_after (nop
, insn
);
7332 gcc_assert (pos
>= 0);
7335 /* We are at the start of a bundle: emit the template
7336 (it should be defined). */
7337 gcc_assert (template0
>= 0);
7338 ia64_add_bundle_selector_before (template0
, nop
);
7339 /* If we have two bundle window, we make one bundle
7340 rotation. Otherwise template0 will be undefined
7341 (negative value). */
7342 template0
= template1
;
7346 /* Move the position backward in the window. Group barrier has
7347 no slot. Asm insn takes all bundle. */
7348 if (INSN_CODE (insn
) != CODE_FOR_insn_group_barrier
7349 && GET_CODE (PATTERN (insn
)) != ASM_INPUT
7350 && asm_noperands (PATTERN (insn
)) < 0)
7352 /* Long insn takes 2 slots. */
7353 if (ia64_safe_type (insn
) == TYPE_L
)
7355 gcc_assert (pos
>= 0);
7357 && INSN_CODE (insn
) != CODE_FOR_insn_group_barrier
7358 && GET_CODE (PATTERN (insn
)) != ASM_INPUT
7359 && asm_noperands (PATTERN (insn
)) < 0)
7361 /* The current insn is at the bundle start: emit the
7363 gcc_assert (template0
>= 0);
7364 ia64_add_bundle_selector_before (template0
, insn
);
7365 b
= PREV_INSN (insn
);
7367 /* See comment above in analogous place for emitting nops
7369 template0
= template1
;
7372 /* Emit nops after the current insn. */
7373 for (i
= 0; i
< curr_state
->before_nops_num
; i
++)
7376 ia64_emit_insn_before (nop
, insn
);
7377 nop
= PREV_INSN (insn
);
7380 gcc_assert (pos
>= 0);
7383 /* See comment above in analogous place for emitting nops
7385 gcc_assert (template0
>= 0);
7386 ia64_add_bundle_selector_before (template0
, insn
);
7387 b
= PREV_INSN (insn
);
7389 template0
= template1
;
7394 if (ia64_tune
== PROCESSOR_ITANIUM
)
7395 /* Insert additional cycles for MM-insns (MMMUL and MMSHF).
7396 Itanium1 has a strange design, if the distance between an insn
7397 and dependent MM-insn is less 4 then we have a 6 additional
7398 cycles stall. So we make the distance equal to 4 cycles if it
7400 for (insn
= get_next_important_insn (NEXT_INSN (prev_head_insn
), tail
);
7404 gcc_assert (INSN_P (insn
)
7405 && ia64_safe_itanium_class (insn
) != ITANIUM_CLASS_IGNORE
7406 && GET_CODE (PATTERN (insn
)) != USE
7407 && GET_CODE (PATTERN (insn
)) != CLOBBER
);
7408 next_insn
= get_next_important_insn (NEXT_INSN (insn
), tail
);
7409 if (INSN_UID (insn
) < clocks_length
&& add_cycles
[INSN_UID (insn
)])
7410 /* We found a MM-insn which needs additional cycles. */
7416 /* Now we are searching for a template of the bundle in
7417 which the MM-insn is placed and the position of the
7418 insn in the bundle (0, 1, 2). Also we are searching
7419 for that there is a stop before the insn. */
7420 last
= prev_active_insn (insn
);
7421 pred_stop_p
= recog_memoized (last
) == CODE_FOR_insn_group_barrier
;
7423 last
= prev_active_insn (last
);
7425 for (;; last
= prev_active_insn (last
))
7426 if (recog_memoized (last
) == CODE_FOR_bundle_selector
)
7428 template0
= XINT (XVECEXP (PATTERN (last
), 0, 0), 0);
7430 /* The insn is in MLX bundle. Change the template
7431 onto MFI because we will add nops before the
7432 insn. It simplifies subsequent code a lot. */
7434 = gen_bundle_selector (const2_rtx
); /* -> MFI */
7437 else if (recog_memoized (last
) != CODE_FOR_insn_group_barrier
7438 && (ia64_safe_itanium_class (last
)
7439 != ITANIUM_CLASS_IGNORE
))
7441 /* Some check of correctness: the stop is not at the
7442 bundle start, there are no more 3 insns in the bundle,
7443 and the MM-insn is not at the start of bundle with
7445 gcc_assert ((!pred_stop_p
|| n
)
7447 && (template0
!= 9 || !n
));
7448 /* Put nops after the insn in the bundle. */
7449 for (j
= 3 - n
; j
> 0; j
--)
7450 ia64_emit_insn_before (gen_nop (), insn
);
7451 /* It takes into account that we will add more N nops
7452 before the insn lately -- please see code below. */
7453 add_cycles
[INSN_UID (insn
)]--;
7454 if (!pred_stop_p
|| add_cycles
[INSN_UID (insn
)])
7455 ia64_emit_insn_before (gen_insn_group_barrier (GEN_INT (3)),
7458 add_cycles
[INSN_UID (insn
)]--;
7459 for (i
= add_cycles
[INSN_UID (insn
)]; i
> 0; i
--)
7461 /* Insert "MII;" template. */
7462 ia64_emit_insn_before (gen_bundle_selector (const0_rtx
),
7464 ia64_emit_insn_before (gen_nop (), insn
);
7465 ia64_emit_insn_before (gen_nop (), insn
);
7468 /* To decrease code size, we use "MI;I;"
7470 ia64_emit_insn_before
7471 (gen_insn_group_barrier (GEN_INT (3)), insn
);
7474 ia64_emit_insn_before (gen_nop (), insn
);
7475 ia64_emit_insn_before (gen_insn_group_barrier (GEN_INT (3)),
7478 /* Put the MM-insn in the same slot of a bundle with the
7479 same template as the original one. */
7480 ia64_add_bundle_selector_before (template0
, insn
);
7481 /* To put the insn in the same slot, add necessary number
7483 for (j
= n
; j
> 0; j
--)
7484 ia64_emit_insn_before (gen_nop (), insn
);
7485 /* Put the stop if the original bundle had it. */
7487 ia64_emit_insn_before (gen_insn_group_barrier (GEN_INT (3)),
7491 free (index_to_bundle_states
);
7492 finish_bundle_state_table ();
7494 dfa_clean_insn_cache ();
7497 /* The following function is called at the end of scheduling BB or
7498 EBB. After reload, it inserts stop bits and does insn bundling. */
7501 ia64_sched_finish (FILE *dump
, int sched_verbose
)
7504 fprintf (dump
, "// Finishing schedule.\n");
7505 if (!reload_completed
)
7507 if (reload_completed
)
7509 final_emit_insn_group_barriers (dump
);
7510 bundling (dump
, sched_verbose
, current_sched_info
->prev_head
,
7511 current_sched_info
->next_tail
);
7512 if (sched_verbose
&& dump
)
7513 fprintf (dump
, "// finishing %d-%d\n",
7514 INSN_UID (NEXT_INSN (current_sched_info
->prev_head
)),
7515 INSN_UID (PREV_INSN (current_sched_info
->next_tail
)));
7521 /* The following function inserts stop bits in scheduled BB or EBB. */
7524 final_emit_insn_group_barriers (FILE *dump ATTRIBUTE_UNUSED
)
7527 int need_barrier_p
= 0;
7528 rtx prev_insn
= NULL_RTX
;
7530 init_insn_group_barriers ();
7532 for (insn
= NEXT_INSN (current_sched_info
->prev_head
);
7533 insn
!= current_sched_info
->next_tail
;
7534 insn
= NEXT_INSN (insn
))
7536 if (GET_CODE (insn
) == BARRIER
)
7538 rtx last
= prev_active_insn (insn
);
7542 if (GET_CODE (last
) == JUMP_INSN
7543 && GET_CODE (PATTERN (last
)) == ADDR_DIFF_VEC
)
7544 last
= prev_active_insn (last
);
7545 if (recog_memoized (last
) != CODE_FOR_insn_group_barrier
)
7546 emit_insn_after (gen_insn_group_barrier (GEN_INT (3)), last
);
7548 init_insn_group_barriers ();
7550 prev_insn
= NULL_RTX
;
7552 else if (INSN_P (insn
))
7554 if (recog_memoized (insn
) == CODE_FOR_insn_group_barrier
)
7556 init_insn_group_barriers ();
7558 prev_insn
= NULL_RTX
;
7560 else if (need_barrier_p
|| group_barrier_needed (insn
))
7562 if (TARGET_EARLY_STOP_BITS
)
7567 last
!= current_sched_info
->prev_head
;
7568 last
= PREV_INSN (last
))
7569 if (INSN_P (last
) && GET_MODE (last
) == TImode
7570 && stops_p
[INSN_UID (last
)])
7572 if (last
== current_sched_info
->prev_head
)
7574 last
= prev_active_insn (last
);
7576 && recog_memoized (last
) != CODE_FOR_insn_group_barrier
)
7577 emit_insn_after (gen_insn_group_barrier (GEN_INT (3)),
7579 init_insn_group_barriers ();
7580 for (last
= NEXT_INSN (last
);
7582 last
= NEXT_INSN (last
))
7584 group_barrier_needed (last
);
7588 emit_insn_before (gen_insn_group_barrier (GEN_INT (3)),
7590 init_insn_group_barriers ();
7592 group_barrier_needed (insn
);
7593 prev_insn
= NULL_RTX
;
7595 else if (recog_memoized (insn
) >= 0)
7597 need_barrier_p
= (GET_CODE (insn
) == CALL_INSN
7598 || GET_CODE (PATTERN (insn
)) == ASM_INPUT
7599 || asm_noperands (PATTERN (insn
)) >= 0);
7606 /* If the following function returns TRUE, we will use the DFA
7610 ia64_first_cycle_multipass_dfa_lookahead (void)
7612 return (reload_completed
? 6 : 4);
7615 /* The following function initiates variable `dfa_pre_cycle_insn'. */
7618 ia64_init_dfa_pre_cycle_insn (void)
7620 if (temp_dfa_state
== NULL
)
7622 dfa_state_size
= state_size ();
7623 temp_dfa_state
= xmalloc (dfa_state_size
);
7624 prev_cycle_state
= xmalloc (dfa_state_size
);
7626 dfa_pre_cycle_insn
= make_insn_raw (gen_pre_cycle ());
7627 PREV_INSN (dfa_pre_cycle_insn
) = NEXT_INSN (dfa_pre_cycle_insn
) = NULL_RTX
;
7628 recog_memoized (dfa_pre_cycle_insn
);
7629 dfa_stop_insn
= make_insn_raw (gen_insn_group_barrier (GEN_INT (3)));
7630 PREV_INSN (dfa_stop_insn
) = NEXT_INSN (dfa_stop_insn
) = NULL_RTX
;
7631 recog_memoized (dfa_stop_insn
);
7634 /* The following function returns the pseudo insn DFA_PRE_CYCLE_INSN
7635 used by the DFA insn scheduler. */
7638 ia64_dfa_pre_cycle_insn (void)
7640 return dfa_pre_cycle_insn
;
7643 /* The following function returns TRUE if PRODUCER (of type ilog or
7644 ld) produces address for CONSUMER (of type st or stf). */
7647 ia64_st_address_bypass_p (rtx producer
, rtx consumer
)
7651 gcc_assert (producer
&& consumer
);
7652 dest
= ia64_single_set (producer
);
7654 reg
= SET_DEST (dest
);
7656 if (GET_CODE (reg
) == SUBREG
)
7657 reg
= SUBREG_REG (reg
);
7658 gcc_assert (GET_CODE (reg
) == REG
);
7660 dest
= ia64_single_set (consumer
);
7662 mem
= SET_DEST (dest
);
7663 gcc_assert (mem
&& GET_CODE (mem
) == MEM
);
7664 return reg_mentioned_p (reg
, mem
);
7667 /* The following function returns TRUE if PRODUCER (of type ilog or
7668 ld) produces address for CONSUMER (of type ld or fld). */
7671 ia64_ld_address_bypass_p (rtx producer
, rtx consumer
)
7673 rtx dest
, src
, reg
, mem
;
7675 gcc_assert (producer
&& consumer
);
7676 dest
= ia64_single_set (producer
);
7678 reg
= SET_DEST (dest
);
7680 if (GET_CODE (reg
) == SUBREG
)
7681 reg
= SUBREG_REG (reg
);
7682 gcc_assert (GET_CODE (reg
) == REG
);
7684 src
= ia64_single_set (consumer
);
7686 mem
= SET_SRC (src
);
7688 if (GET_CODE (mem
) == UNSPEC
&& XVECLEN (mem
, 0) > 0)
7689 mem
= XVECEXP (mem
, 0, 0);
7690 while (GET_CODE (mem
) == SUBREG
|| GET_CODE (mem
) == ZERO_EXTEND
)
7691 mem
= XEXP (mem
, 0);
7693 /* Note that LO_SUM is used for GOT loads. */
7694 gcc_assert (GET_CODE (mem
) == LO_SUM
|| GET_CODE (mem
) == MEM
);
7696 return reg_mentioned_p (reg
, mem
);
7699 /* The following function returns TRUE if INSN produces address for a
7700 load/store insn. We will place such insns into M slot because it
7701 decreases its latency time. */
7704 ia64_produce_address_p (rtx insn
)
7710 /* Emit pseudo-ops for the assembler to describe predicate relations.
7711 At present this assumes that we only consider predicate pairs to
7712 be mutex, and that the assembler can deduce proper values from
7713 straight-line code. */
7716 emit_predicate_relation_info (void)
7720 FOR_EACH_BB_REVERSE (bb
)
7723 rtx head
= BB_HEAD (bb
);
7725 /* We only need such notes at code labels. */
7726 if (GET_CODE (head
) != CODE_LABEL
)
7728 if (GET_CODE (NEXT_INSN (head
)) == NOTE
7729 && NOTE_LINE_NUMBER (NEXT_INSN (head
)) == NOTE_INSN_BASIC_BLOCK
)
7730 head
= NEXT_INSN (head
);
7732 /* Skip p0, which may be thought to be live due to (reg:DI p0)
7733 grabbing the entire block of predicate registers. */
7734 for (r
= PR_REG (2); r
< PR_REG (64); r
+= 2)
7735 if (REGNO_REG_SET_P (bb
->il
.rtl
->global_live_at_start
, r
))
7737 rtx p
= gen_rtx_REG (BImode
, r
);
7738 rtx n
= emit_insn_after (gen_pred_rel_mutex (p
), head
);
7739 if (head
== BB_END (bb
))
7745 /* Look for conditional calls that do not return, and protect predicate
7746 relations around them. Otherwise the assembler will assume the call
7747 returns, and complain about uses of call-clobbered predicates after
7749 FOR_EACH_BB_REVERSE (bb
)
7751 rtx insn
= BB_HEAD (bb
);
7755 if (GET_CODE (insn
) == CALL_INSN
7756 && GET_CODE (PATTERN (insn
)) == COND_EXEC
7757 && find_reg_note (insn
, REG_NORETURN
, NULL_RTX
))
7759 rtx b
= emit_insn_before (gen_safe_across_calls_all (), insn
);
7760 rtx a
= emit_insn_after (gen_safe_across_calls_normal (), insn
);
7761 if (BB_HEAD (bb
) == insn
)
7763 if (BB_END (bb
) == insn
)
7767 if (insn
== BB_END (bb
))
7769 insn
= NEXT_INSN (insn
);
7774 /* Perform machine dependent operations on the rtl chain INSNS. */
7779 /* We are freeing block_for_insn in the toplev to keep compatibility
7780 with old MDEP_REORGS that are not CFG based. Recompute it now. */
7781 compute_bb_for_insn ();
7783 /* If optimizing, we'll have split before scheduling. */
7785 split_all_insns (0);
7787 /* ??? update_life_info_in_dirty_blocks fails to terminate during
7788 non-optimizing bootstrap. */
7789 update_life_info (NULL
, UPDATE_LIFE_GLOBAL_RM_NOTES
, PROP_DEATH_NOTES
);
7791 if (optimize
&& ia64_flag_schedule_insns2
)
7793 timevar_push (TV_SCHED2
);
7794 ia64_final_schedule
= 1;
7796 initiate_bundle_states ();
7797 ia64_nop
= make_insn_raw (gen_nop ());
7798 PREV_INSN (ia64_nop
) = NEXT_INSN (ia64_nop
) = NULL_RTX
;
7799 recog_memoized (ia64_nop
);
7800 clocks_length
= get_max_uid () + 1;
7801 stops_p
= xcalloc (1, clocks_length
);
7802 if (ia64_tune
== PROCESSOR_ITANIUM
)
7804 clocks
= xcalloc (clocks_length
, sizeof (int));
7805 add_cycles
= xcalloc (clocks_length
, sizeof (int));
7807 if (ia64_tune
== PROCESSOR_ITANIUM2
)
7809 pos_1
= get_cpu_unit_code ("2_1");
7810 pos_2
= get_cpu_unit_code ("2_2");
7811 pos_3
= get_cpu_unit_code ("2_3");
7812 pos_4
= get_cpu_unit_code ("2_4");
7813 pos_5
= get_cpu_unit_code ("2_5");
7814 pos_6
= get_cpu_unit_code ("2_6");
7815 _0mii_
= get_cpu_unit_code ("2b_0mii.");
7816 _0mmi_
= get_cpu_unit_code ("2b_0mmi.");
7817 _0mfi_
= get_cpu_unit_code ("2b_0mfi.");
7818 _0mmf_
= get_cpu_unit_code ("2b_0mmf.");
7819 _0bbb_
= get_cpu_unit_code ("2b_0bbb.");
7820 _0mbb_
= get_cpu_unit_code ("2b_0mbb.");
7821 _0mib_
= get_cpu_unit_code ("2b_0mib.");
7822 _0mmb_
= get_cpu_unit_code ("2b_0mmb.");
7823 _0mfb_
= get_cpu_unit_code ("2b_0mfb.");
7824 _0mlx_
= get_cpu_unit_code ("2b_0mlx.");
7825 _1mii_
= get_cpu_unit_code ("2b_1mii.");
7826 _1mmi_
= get_cpu_unit_code ("2b_1mmi.");
7827 _1mfi_
= get_cpu_unit_code ("2b_1mfi.");
7828 _1mmf_
= get_cpu_unit_code ("2b_1mmf.");
7829 _1bbb_
= get_cpu_unit_code ("2b_1bbb.");
7830 _1mbb_
= get_cpu_unit_code ("2b_1mbb.");
7831 _1mib_
= get_cpu_unit_code ("2b_1mib.");
7832 _1mmb_
= get_cpu_unit_code ("2b_1mmb.");
7833 _1mfb_
= get_cpu_unit_code ("2b_1mfb.");
7834 _1mlx_
= get_cpu_unit_code ("2b_1mlx.");
7838 pos_1
= get_cpu_unit_code ("1_1");
7839 pos_2
= get_cpu_unit_code ("1_2");
7840 pos_3
= get_cpu_unit_code ("1_3");
7841 pos_4
= get_cpu_unit_code ("1_4");
7842 pos_5
= get_cpu_unit_code ("1_5");
7843 pos_6
= get_cpu_unit_code ("1_6");
7844 _0mii_
= get_cpu_unit_code ("1b_0mii.");
7845 _0mmi_
= get_cpu_unit_code ("1b_0mmi.");
7846 _0mfi_
= get_cpu_unit_code ("1b_0mfi.");
7847 _0mmf_
= get_cpu_unit_code ("1b_0mmf.");
7848 _0bbb_
= get_cpu_unit_code ("1b_0bbb.");
7849 _0mbb_
= get_cpu_unit_code ("1b_0mbb.");
7850 _0mib_
= get_cpu_unit_code ("1b_0mib.");
7851 _0mmb_
= get_cpu_unit_code ("1b_0mmb.");
7852 _0mfb_
= get_cpu_unit_code ("1b_0mfb.");
7853 _0mlx_
= get_cpu_unit_code ("1b_0mlx.");
7854 _1mii_
= get_cpu_unit_code ("1b_1mii.");
7855 _1mmi_
= get_cpu_unit_code ("1b_1mmi.");
7856 _1mfi_
= get_cpu_unit_code ("1b_1mfi.");
7857 _1mmf_
= get_cpu_unit_code ("1b_1mmf.");
7858 _1bbb_
= get_cpu_unit_code ("1b_1bbb.");
7859 _1mbb_
= get_cpu_unit_code ("1b_1mbb.");
7860 _1mib_
= get_cpu_unit_code ("1b_1mib.");
7861 _1mmb_
= get_cpu_unit_code ("1b_1mmb.");
7862 _1mfb_
= get_cpu_unit_code ("1b_1mfb.");
7863 _1mlx_
= get_cpu_unit_code ("1b_1mlx.");
7865 schedule_ebbs (dump_file
);
7866 finish_bundle_states ();
7867 if (ia64_tune
== PROCESSOR_ITANIUM
)
7873 emit_insn_group_barriers (dump_file
);
7875 ia64_final_schedule
= 0;
7876 timevar_pop (TV_SCHED2
);
7879 emit_all_insn_group_barriers (dump_file
);
7881 /* A call must not be the last instruction in a function, so that the
7882 return address is still within the function, so that unwinding works
7883 properly. Note that IA-64 differs from dwarf2 on this point. */
7884 if (flag_unwind_tables
|| (flag_exceptions
&& !USING_SJLJ_EXCEPTIONS
))
7889 insn
= get_last_insn ();
7890 if (! INSN_P (insn
))
7891 insn
= prev_active_insn (insn
);
7892 /* Skip over insns that expand to nothing. */
7893 while (GET_CODE (insn
) == INSN
&& get_attr_empty (insn
) == EMPTY_YES
)
7895 if (GET_CODE (PATTERN (insn
)) == UNSPEC_VOLATILE
7896 && XINT (PATTERN (insn
), 1) == UNSPECV_INSN_GROUP_BARRIER
)
7898 insn
= prev_active_insn (insn
);
7900 if (GET_CODE (insn
) == CALL_INSN
)
7903 emit_insn (gen_insn_group_barrier (GEN_INT (3)));
7904 emit_insn (gen_break_f ());
7905 emit_insn (gen_insn_group_barrier (GEN_INT (3)));
7909 emit_predicate_relation_info ();
7911 if (ia64_flag_var_tracking
)
7913 timevar_push (TV_VAR_TRACKING
);
7914 variable_tracking_main ();
7915 timevar_pop (TV_VAR_TRACKING
);
7919 /* Return true if REGNO is used by the epilogue. */
7922 ia64_epilogue_uses (int regno
)
7927 /* With a call to a function in another module, we will write a new
7928 value to "gp". After returning from such a call, we need to make
7929 sure the function restores the original gp-value, even if the
7930 function itself does not use the gp anymore. */
7931 return !(TARGET_AUTO_PIC
|| TARGET_NO_PIC
);
7933 case IN_REG (0): case IN_REG (1): case IN_REG (2): case IN_REG (3):
7934 case IN_REG (4): case IN_REG (5): case IN_REG (6): case IN_REG (7):
7935 /* For functions defined with the syscall_linkage attribute, all
7936 input registers are marked as live at all function exits. This
7937 prevents the register allocator from using the input registers,
7938 which in turn makes it possible to restart a system call after
7939 an interrupt without having to save/restore the input registers.
7940 This also prevents kernel data from leaking to application code. */
7941 return lookup_attribute ("syscall_linkage",
7942 TYPE_ATTRIBUTES (TREE_TYPE (current_function_decl
))) != NULL
;
7945 /* Conditional return patterns can't represent the use of `b0' as
7946 the return address, so we force the value live this way. */
7950 /* Likewise for ar.pfs, which is used by br.ret. */
7958 /* Return true if REGNO is used by the frame unwinder. */
7961 ia64_eh_uses (int regno
)
7963 if (! reload_completed
)
7966 if (current_frame_info
.reg_save_b0
7967 && regno
== current_frame_info
.reg_save_b0
)
7969 if (current_frame_info
.reg_save_pr
7970 && regno
== current_frame_info
.reg_save_pr
)
7972 if (current_frame_info
.reg_save_ar_pfs
7973 && regno
== current_frame_info
.reg_save_ar_pfs
)
7975 if (current_frame_info
.reg_save_ar_unat
7976 && regno
== current_frame_info
.reg_save_ar_unat
)
7978 if (current_frame_info
.reg_save_ar_lc
7979 && regno
== current_frame_info
.reg_save_ar_lc
)
7985 /* Return true if this goes in small data/bss. */
7987 /* ??? We could also support own long data here. Generating movl/add/ld8
7988 instead of addl,ld8/ld8. This makes the code bigger, but should make the
7989 code faster because there is one less load. This also includes incomplete
7990 types which can't go in sdata/sbss. */
7993 ia64_in_small_data_p (tree exp
)
7995 if (TARGET_NO_SDATA
)
7998 /* We want to merge strings, so we never consider them small data. */
7999 if (TREE_CODE (exp
) == STRING_CST
)
8002 /* Functions are never small data. */
8003 if (TREE_CODE (exp
) == FUNCTION_DECL
)
8006 if (TREE_CODE (exp
) == VAR_DECL
&& DECL_SECTION_NAME (exp
))
8008 const char *section
= TREE_STRING_POINTER (DECL_SECTION_NAME (exp
));
8010 if (strcmp (section
, ".sdata") == 0
8011 || strncmp (section
, ".sdata.", 7) == 0
8012 || strncmp (section
, ".gnu.linkonce.s.", 16) == 0
8013 || strcmp (section
, ".sbss") == 0
8014 || strncmp (section
, ".sbss.", 6) == 0
8015 || strncmp (section
, ".gnu.linkonce.sb.", 17) == 0)
8020 HOST_WIDE_INT size
= int_size_in_bytes (TREE_TYPE (exp
));
8022 /* If this is an incomplete type with size 0, then we can't put it
8023 in sdata because it might be too big when completed. */
8024 if (size
> 0 && size
<= ia64_section_threshold
)
8031 /* Output assembly directives for prologue regions. */
8033 /* The current basic block number. */
8035 static bool last_block
;
8037 /* True if we need a copy_state command at the start of the next block. */
8039 static bool need_copy_state
;
8041 #ifndef MAX_ARTIFICIAL_LABEL_BYTES
8042 # define MAX_ARTIFICIAL_LABEL_BYTES 30
8045 /* Emit a debugging label after a call-frame-related insn. We'd
8046 rather output the label right away, but we'd have to output it
8047 after, not before, the instruction, and the instruction has not
8048 been output yet. So we emit the label after the insn, delete it to
8049 avoid introducing basic blocks, and mark it as preserved, such that
8050 it is still output, given that it is referenced in debug info. */
8053 ia64_emit_deleted_label_after_insn (rtx insn
)
8055 char label
[MAX_ARTIFICIAL_LABEL_BYTES
];
8056 rtx lb
= gen_label_rtx ();
8057 rtx label_insn
= emit_label_after (lb
, insn
);
8059 LABEL_PRESERVE_P (lb
) = 1;
8061 delete_insn (label_insn
);
8063 ASM_GENERATE_INTERNAL_LABEL (label
, "L", CODE_LABEL_NUMBER (label_insn
));
8065 return xstrdup (label
);
8068 /* Define the CFA after INSN with the steady-state definition. */
8071 ia64_dwarf2out_def_steady_cfa (rtx insn
)
8073 rtx fp
= frame_pointer_needed
8074 ? hard_frame_pointer_rtx
8075 : stack_pointer_rtx
;
8078 (ia64_emit_deleted_label_after_insn (insn
),
8080 ia64_initial_elimination_offset
8081 (REGNO (arg_pointer_rtx
), REGNO (fp
))
8082 + ARG_POINTER_CFA_OFFSET (current_function_decl
));
8085 /* The generic dwarf2 frame debug info generator does not define a
8086 separate region for the very end of the epilogue, so refrain from
8087 doing so in the IA64-specific code as well. */
8089 #define IA64_CHANGE_CFA_IN_EPILOGUE 0
8091 /* The function emits unwind directives for the start of an epilogue. */
8094 process_epilogue (FILE *asm_out_file
, rtx insn
, bool unwind
, bool frame
)
8096 /* If this isn't the last block of the function, then we need to label the
8097 current state, and copy it back in at the start of the next block. */
8102 fprintf (asm_out_file
, "\t.label_state %d\n",
8103 ++cfun
->machine
->state_num
);
8104 need_copy_state
= true;
8108 fprintf (asm_out_file
, "\t.restore sp\n");
8109 if (IA64_CHANGE_CFA_IN_EPILOGUE
&& frame
)
8110 dwarf2out_def_cfa (ia64_emit_deleted_label_after_insn (insn
),
8111 STACK_POINTER_REGNUM
, INCOMING_FRAME_SP_OFFSET
);
8114 /* This function processes a SET pattern looking for specific patterns
8115 which result in emitting an assembly directive required for unwinding. */
8118 process_set (FILE *asm_out_file
, rtx pat
, rtx insn
, bool unwind
, bool frame
)
8120 rtx src
= SET_SRC (pat
);
8121 rtx dest
= SET_DEST (pat
);
8122 int src_regno
, dest_regno
;
8124 /* Look for the ALLOC insn. */
8125 if (GET_CODE (src
) == UNSPEC_VOLATILE
8126 && XINT (src
, 1) == UNSPECV_ALLOC
8127 && GET_CODE (dest
) == REG
)
8129 dest_regno
= REGNO (dest
);
8131 /* If this is the final destination for ar.pfs, then this must
8132 be the alloc in the prologue. */
8133 if (dest_regno
== current_frame_info
.reg_save_ar_pfs
)
8136 fprintf (asm_out_file
, "\t.save ar.pfs, r%d\n",
8137 ia64_dbx_register_number (dest_regno
));
8141 /* This must be an alloc before a sibcall. We must drop the
8142 old frame info. The easiest way to drop the old frame
8143 info is to ensure we had a ".restore sp" directive
8144 followed by a new prologue. If the procedure doesn't
8145 have a memory-stack frame, we'll issue a dummy ".restore
8147 if (current_frame_info
.total_size
== 0 && !frame_pointer_needed
)
8148 /* if haven't done process_epilogue() yet, do it now */
8149 process_epilogue (asm_out_file
, insn
, unwind
, frame
);
8151 fprintf (asm_out_file
, "\t.prologue\n");
8156 /* Look for SP = .... */
8157 if (GET_CODE (dest
) == REG
&& REGNO (dest
) == STACK_POINTER_REGNUM
)
8159 if (GET_CODE (src
) == PLUS
)
8161 rtx op0
= XEXP (src
, 0);
8162 rtx op1
= XEXP (src
, 1);
8164 gcc_assert (op0
== dest
&& GET_CODE (op1
) == CONST_INT
);
8166 if (INTVAL (op1
) < 0)
8168 gcc_assert (!frame_pointer_needed
);
8170 fprintf (asm_out_file
, "\t.fframe "HOST_WIDE_INT_PRINT_DEC
"\n",
8173 ia64_dwarf2out_def_steady_cfa (insn
);
8176 process_epilogue (asm_out_file
, insn
, unwind
, frame
);
8180 gcc_assert (GET_CODE (src
) == REG
8181 && REGNO (src
) == HARD_FRAME_POINTER_REGNUM
);
8182 process_epilogue (asm_out_file
, insn
, unwind
, frame
);
8188 /* Register move we need to look at. */
8189 if (GET_CODE (dest
) == REG
&& GET_CODE (src
) == REG
)
8191 src_regno
= REGNO (src
);
8192 dest_regno
= REGNO (dest
);
8197 /* Saving return address pointer. */
8198 gcc_assert (dest_regno
== current_frame_info
.reg_save_b0
);
8200 fprintf (asm_out_file
, "\t.save rp, r%d\n",
8201 ia64_dbx_register_number (dest_regno
));
8205 gcc_assert (dest_regno
== current_frame_info
.reg_save_pr
);
8207 fprintf (asm_out_file
, "\t.save pr, r%d\n",
8208 ia64_dbx_register_number (dest_regno
));
8211 case AR_UNAT_REGNUM
:
8212 gcc_assert (dest_regno
== current_frame_info
.reg_save_ar_unat
);
8214 fprintf (asm_out_file
, "\t.save ar.unat, r%d\n",
8215 ia64_dbx_register_number (dest_regno
));
8219 gcc_assert (dest_regno
== current_frame_info
.reg_save_ar_lc
);
8221 fprintf (asm_out_file
, "\t.save ar.lc, r%d\n",
8222 ia64_dbx_register_number (dest_regno
));
8225 case STACK_POINTER_REGNUM
:
8226 gcc_assert (dest_regno
== HARD_FRAME_POINTER_REGNUM
8227 && frame_pointer_needed
);
8229 fprintf (asm_out_file
, "\t.vframe r%d\n",
8230 ia64_dbx_register_number (dest_regno
));
8232 ia64_dwarf2out_def_steady_cfa (insn
);
8236 /* Everything else should indicate being stored to memory. */
8241 /* Memory store we need to look at. */
8242 if (GET_CODE (dest
) == MEM
&& GET_CODE (src
) == REG
)
8248 if (GET_CODE (XEXP (dest
, 0)) == REG
)
8250 base
= XEXP (dest
, 0);
8255 gcc_assert (GET_CODE (XEXP (dest
, 0)) == PLUS
8256 && GET_CODE (XEXP (XEXP (dest
, 0), 1)) == CONST_INT
);
8257 base
= XEXP (XEXP (dest
, 0), 0);
8258 off
= INTVAL (XEXP (XEXP (dest
, 0), 1));
8261 if (base
== hard_frame_pointer_rtx
)
8263 saveop
= ".savepsp";
8268 gcc_assert (base
== stack_pointer_rtx
);
8272 src_regno
= REGNO (src
);
8276 gcc_assert (!current_frame_info
.reg_save_b0
);
8278 fprintf (asm_out_file
, "\t%s rp, %ld\n", saveop
, off
);
8282 gcc_assert (!current_frame_info
.reg_save_pr
);
8284 fprintf (asm_out_file
, "\t%s pr, %ld\n", saveop
, off
);
8288 gcc_assert (!current_frame_info
.reg_save_ar_lc
);
8290 fprintf (asm_out_file
, "\t%s ar.lc, %ld\n", saveop
, off
);
8294 gcc_assert (!current_frame_info
.reg_save_ar_pfs
);
8296 fprintf (asm_out_file
, "\t%s ar.pfs, %ld\n", saveop
, off
);
8299 case AR_UNAT_REGNUM
:
8300 gcc_assert (!current_frame_info
.reg_save_ar_unat
);
8302 fprintf (asm_out_file
, "\t%s ar.unat, %ld\n", saveop
, off
);
8310 fprintf (asm_out_file
, "\t.save.g 0x%x\n",
8311 1 << (src_regno
- GR_REG (4)));
8320 fprintf (asm_out_file
, "\t.save.b 0x%x\n",
8321 1 << (src_regno
- BR_REG (1)));
8329 fprintf (asm_out_file
, "\t.save.f 0x%x\n",
8330 1 << (src_regno
- FR_REG (2)));
8333 case FR_REG (16): case FR_REG (17): case FR_REG (18): case FR_REG (19):
8334 case FR_REG (20): case FR_REG (21): case FR_REG (22): case FR_REG (23):
8335 case FR_REG (24): case FR_REG (25): case FR_REG (26): case FR_REG (27):
8336 case FR_REG (28): case FR_REG (29): case FR_REG (30): case FR_REG (31):
8338 fprintf (asm_out_file
, "\t.save.gf 0x0, 0x%x\n",
8339 1 << (src_regno
- FR_REG (12)));
8351 /* This function looks at a single insn and emits any directives
8352 required to unwind this insn. */
8354 process_for_unwind_directive (FILE *asm_out_file
, rtx insn
)
8356 bool unwind
= (flag_unwind_tables
8357 || (flag_exceptions
&& !USING_SJLJ_EXCEPTIONS
));
8358 bool frame
= dwarf2out_do_frame ();
8360 if (unwind
|| frame
)
8364 if (GET_CODE (insn
) == NOTE
8365 && NOTE_LINE_NUMBER (insn
) == NOTE_INSN_BASIC_BLOCK
)
8367 last_block
= NOTE_BASIC_BLOCK (insn
)->next_bb
== EXIT_BLOCK_PTR
;
8369 /* Restore unwind state from immediately before the epilogue. */
8370 if (need_copy_state
)
8374 fprintf (asm_out_file
, "\t.body\n");
8375 fprintf (asm_out_file
, "\t.copy_state %d\n",
8376 cfun
->machine
->state_num
);
8378 if (IA64_CHANGE_CFA_IN_EPILOGUE
&& frame
)
8379 ia64_dwarf2out_def_steady_cfa (insn
);
8380 need_copy_state
= false;
8384 if (GET_CODE (insn
) == NOTE
|| ! RTX_FRAME_RELATED_P (insn
))
8387 pat
= find_reg_note (insn
, REG_FRAME_RELATED_EXPR
, NULL_RTX
);
8389 pat
= XEXP (pat
, 0);
8391 pat
= PATTERN (insn
);
8393 switch (GET_CODE (pat
))
8396 process_set (asm_out_file
, pat
, insn
, unwind
, frame
);
8402 int limit
= XVECLEN (pat
, 0);
8403 for (par_index
= 0; par_index
< limit
; par_index
++)
8405 rtx x
= XVECEXP (pat
, 0, par_index
);
8406 if (GET_CODE (x
) == SET
)
8407 process_set (asm_out_file
, x
, insn
, unwind
, frame
);
8422 IA64_BUILTIN_FLUSHRS
8426 ia64_init_builtins (void)
8431 /* The __fpreg type. */
8432 fpreg_type
= make_node (REAL_TYPE
);
8433 TYPE_PRECISION (fpreg_type
) = 82;
8434 layout_type (fpreg_type
);
8435 (*lang_hooks
.types
.register_builtin_type
) (fpreg_type
, "__fpreg");
8437 /* The __float80 type. */
8438 float80_type
= make_node (REAL_TYPE
);
8439 TYPE_PRECISION (float80_type
) = 80;
8440 layout_type (float80_type
);
8441 (*lang_hooks
.types
.register_builtin_type
) (float80_type
, "__float80");
8443 /* The __float128 type. */
8446 tree float128_type
= make_node (REAL_TYPE
);
8447 TYPE_PRECISION (float128_type
) = 128;
8448 layout_type (float128_type
);
8449 (*lang_hooks
.types
.register_builtin_type
) (float128_type
, "__float128");
8452 /* Under HPUX, this is a synonym for "long double". */
8453 (*lang_hooks
.types
.register_builtin_type
) (long_double_type_node
,
8456 #define def_builtin(name, type, code) \
8457 lang_hooks.builtin_function ((name), (type), (code), BUILT_IN_MD, \
8460 def_builtin ("__builtin_ia64_bsp",
8461 build_function_type (ptr_type_node
, void_list_node
),
8464 def_builtin ("__builtin_ia64_flushrs",
8465 build_function_type (void_type_node
, void_list_node
),
8466 IA64_BUILTIN_FLUSHRS
);
8472 ia64_expand_builtin (tree exp
, rtx target
, rtx subtarget ATTRIBUTE_UNUSED
,
8473 enum machine_mode mode ATTRIBUTE_UNUSED
,
8474 int ignore ATTRIBUTE_UNUSED
)
8476 tree fndecl
= TREE_OPERAND (TREE_OPERAND (exp
, 0), 0);
8477 unsigned int fcode
= DECL_FUNCTION_CODE (fndecl
);
8481 case IA64_BUILTIN_BSP
:
8482 if (! target
|| ! register_operand (target
, DImode
))
8483 target
= gen_reg_rtx (DImode
);
8484 emit_insn (gen_bsp_value (target
));
8485 #ifdef POINTERS_EXTEND_UNSIGNED
8486 target
= convert_memory_address (ptr_mode
, target
);
8490 case IA64_BUILTIN_FLUSHRS
:
8491 emit_insn (gen_flushrs ());
8501 /* For the HP-UX IA64 aggregate parameters are passed stored in the
8502 most significant bits of the stack slot. */
8505 ia64_hpux_function_arg_padding (enum machine_mode mode
, tree type
)
8507 /* Exception to normal case for structures/unions/etc. */
8509 if (type
&& AGGREGATE_TYPE_P (type
)
8510 && int_size_in_bytes (type
) < UNITS_PER_WORD
)
8513 /* Fall back to the default. */
8514 return DEFAULT_FUNCTION_ARG_PADDING (mode
, type
);
8517 /* Linked list of all external functions that are to be emitted by GCC.
8518 We output the name if and only if TREE_SYMBOL_REFERENCED is set in
8519 order to avoid putting out names that are never really used. */
8521 struct extern_func_list
GTY(())
8523 struct extern_func_list
*next
;
8527 static GTY(()) struct extern_func_list
*extern_func_head
;
8530 ia64_hpux_add_extern_decl (tree decl
)
8532 struct extern_func_list
*p
= ggc_alloc (sizeof (struct extern_func_list
));
8535 p
->next
= extern_func_head
;
8536 extern_func_head
= p
;
8539 /* Print out the list of used global functions. */
8542 ia64_hpux_file_end (void)
8544 struct extern_func_list
*p
;
8546 for (p
= extern_func_head
; p
; p
= p
->next
)
8548 tree decl
= p
->decl
;
8549 tree id
= DECL_ASSEMBLER_NAME (decl
);
8553 if (!TREE_ASM_WRITTEN (decl
) && TREE_SYMBOL_REFERENCED (id
))
8555 const char *name
= XSTR (XEXP (DECL_RTL (decl
), 0), 0);
8557 TREE_ASM_WRITTEN (decl
) = 1;
8558 (*targetm
.asm_out
.globalize_label
) (asm_out_file
, name
);
8559 fputs (TYPE_ASM_OP
, asm_out_file
);
8560 assemble_name (asm_out_file
, name
);
8561 fprintf (asm_out_file
, "," TYPE_OPERAND_FMT
"\n", "function");
8565 extern_func_head
= 0;
8568 /* Set SImode div/mod functions, init_integral_libfuncs only initializes
8569 modes of word_mode and larger. Rename the TFmode libfuncs using the
8570 HPUX conventions. __divtf3 is used for XFmode. We need to keep it for
8571 backward compatibility. */
8574 ia64_init_libfuncs (void)
8576 set_optab_libfunc (sdiv_optab
, SImode
, "__divsi3");
8577 set_optab_libfunc (udiv_optab
, SImode
, "__udivsi3");
8578 set_optab_libfunc (smod_optab
, SImode
, "__modsi3");
8579 set_optab_libfunc (umod_optab
, SImode
, "__umodsi3");
8581 set_optab_libfunc (add_optab
, TFmode
, "_U_Qfadd");
8582 set_optab_libfunc (sub_optab
, TFmode
, "_U_Qfsub");
8583 set_optab_libfunc (smul_optab
, TFmode
, "_U_Qfmpy");
8584 set_optab_libfunc (sdiv_optab
, TFmode
, "_U_Qfdiv");
8585 set_optab_libfunc (neg_optab
, TFmode
, "_U_Qfneg");
8587 set_conv_libfunc (sext_optab
, TFmode
, SFmode
, "_U_Qfcnvff_sgl_to_quad");
8588 set_conv_libfunc (sext_optab
, TFmode
, DFmode
, "_U_Qfcnvff_dbl_to_quad");
8589 set_conv_libfunc (sext_optab
, TFmode
, XFmode
, "_U_Qfcnvff_f80_to_quad");
8590 set_conv_libfunc (trunc_optab
, SFmode
, TFmode
, "_U_Qfcnvff_quad_to_sgl");
8591 set_conv_libfunc (trunc_optab
, DFmode
, TFmode
, "_U_Qfcnvff_quad_to_dbl");
8592 set_conv_libfunc (trunc_optab
, XFmode
, TFmode
, "_U_Qfcnvff_quad_to_f80");
8594 set_conv_libfunc (sfix_optab
, SImode
, TFmode
, "_U_Qfcnvfxt_quad_to_sgl");
8595 set_conv_libfunc (sfix_optab
, DImode
, TFmode
, "_U_Qfcnvfxt_quad_to_dbl");
8596 set_conv_libfunc (ufix_optab
, SImode
, TFmode
, "_U_Qfcnvfxut_quad_to_sgl");
8597 set_conv_libfunc (ufix_optab
, DImode
, TFmode
, "_U_Qfcnvfxut_quad_to_dbl");
8599 set_conv_libfunc (sfloat_optab
, TFmode
, SImode
, "_U_Qfcnvxf_sgl_to_quad");
8600 set_conv_libfunc (sfloat_optab
, TFmode
, DImode
, "_U_Qfcnvxf_dbl_to_quad");
8603 /* Rename all the TFmode libfuncs using the HPUX conventions. */
8606 ia64_hpux_init_libfuncs (void)
8608 ia64_init_libfuncs ();
8610 set_optab_libfunc (smin_optab
, TFmode
, "_U_Qfmin");
8611 set_optab_libfunc (smax_optab
, TFmode
, "_U_Qfmax");
8612 set_optab_libfunc (abs_optab
, TFmode
, "_U_Qfabs");
8614 /* ia64_expand_compare uses this. */
8615 cmptf_libfunc
= init_one_libfunc ("_U_Qfcmp");
8617 /* These should never be used. */
8618 set_optab_libfunc (eq_optab
, TFmode
, 0);
8619 set_optab_libfunc (ne_optab
, TFmode
, 0);
8620 set_optab_libfunc (gt_optab
, TFmode
, 0);
8621 set_optab_libfunc (ge_optab
, TFmode
, 0);
8622 set_optab_libfunc (lt_optab
, TFmode
, 0);
8623 set_optab_libfunc (le_optab
, TFmode
, 0);
8626 /* Rename the division and modulus functions in VMS. */
8629 ia64_vms_init_libfuncs (void)
8631 set_optab_libfunc (sdiv_optab
, SImode
, "OTS$DIV_I");
8632 set_optab_libfunc (sdiv_optab
, DImode
, "OTS$DIV_L");
8633 set_optab_libfunc (udiv_optab
, SImode
, "OTS$DIV_UI");
8634 set_optab_libfunc (udiv_optab
, DImode
, "OTS$DIV_UL");
8635 set_optab_libfunc (smod_optab
, SImode
, "OTS$REM_I");
8636 set_optab_libfunc (smod_optab
, DImode
, "OTS$REM_L");
8637 set_optab_libfunc (umod_optab
, SImode
, "OTS$REM_UI");
8638 set_optab_libfunc (umod_optab
, DImode
, "OTS$REM_UL");
8641 /* Rename the TFmode libfuncs available from soft-fp in glibc using
8642 the HPUX conventions. */
8645 ia64_sysv4_init_libfuncs (void)
8647 ia64_init_libfuncs ();
8649 /* These functions are not part of the HPUX TFmode interface. We
8650 use them instead of _U_Qfcmp, which doesn't work the way we
8652 set_optab_libfunc (eq_optab
, TFmode
, "_U_Qfeq");
8653 set_optab_libfunc (ne_optab
, TFmode
, "_U_Qfne");
8654 set_optab_libfunc (gt_optab
, TFmode
, "_U_Qfgt");
8655 set_optab_libfunc (ge_optab
, TFmode
, "_U_Qfge");
8656 set_optab_libfunc (lt_optab
, TFmode
, "_U_Qflt");
8657 set_optab_libfunc (le_optab
, TFmode
, "_U_Qfle");
8659 /* We leave out _U_Qfmin, _U_Qfmax and _U_Qfabs since soft-fp in
8660 glibc doesn't have them. */
8663 /* Switch to the section to which we should output X. The only thing
8664 special we do here is to honor small data. */
8667 ia64_select_rtx_section (enum machine_mode mode
, rtx x
,
8668 unsigned HOST_WIDE_INT align
)
8670 if (GET_MODE_SIZE (mode
) > 0
8671 && GET_MODE_SIZE (mode
) <= ia64_section_threshold
)
8674 default_elf_select_rtx_section (mode
, x
, align
);
8677 /* It is illegal to have relocations in shared segments on AIX and HPUX.
8678 Pretend flag_pic is always set. */
8681 ia64_rwreloc_select_section (tree exp
, int reloc
, unsigned HOST_WIDE_INT align
)
8683 default_elf_select_section_1 (exp
, reloc
, align
, true);
8687 ia64_rwreloc_unique_section (tree decl
, int reloc
)
8689 default_unique_section_1 (decl
, reloc
, true);
8693 ia64_rwreloc_select_rtx_section (enum machine_mode mode
, rtx x
,
8694 unsigned HOST_WIDE_INT align
)
8696 int save_pic
= flag_pic
;
8698 ia64_select_rtx_section (mode
, x
, align
);
8699 flag_pic
= save_pic
;
8702 #ifndef TARGET_RWRELOC
8703 #define TARGET_RWRELOC flag_pic
8707 ia64_section_type_flags (tree decl
, const char *name
, int reloc
)
8709 unsigned int flags
= 0;
8711 if (strcmp (name
, ".sdata") == 0
8712 || strncmp (name
, ".sdata.", 7) == 0
8713 || strncmp (name
, ".gnu.linkonce.s.", 16) == 0
8714 || strncmp (name
, ".sdata2.", 8) == 0
8715 || strncmp (name
, ".gnu.linkonce.s2.", 17) == 0
8716 || strcmp (name
, ".sbss") == 0
8717 || strncmp (name
, ".sbss.", 6) == 0
8718 || strncmp (name
, ".gnu.linkonce.sb.", 17) == 0)
8719 flags
= SECTION_SMALL
;
8721 flags
|= default_section_type_flags_1 (decl
, name
, reloc
, TARGET_RWRELOC
);
8725 /* Returns true if FNTYPE (a FUNCTION_TYPE or a METHOD_TYPE) returns a
8726 structure type and that the address of that type should be passed
8727 in out0, rather than in r8. */
8730 ia64_struct_retval_addr_is_first_parm_p (tree fntype
)
8732 tree ret_type
= TREE_TYPE (fntype
);
8734 /* The Itanium C++ ABI requires that out0, rather than r8, be used
8735 as the structure return address parameter, if the return value
8736 type has a non-trivial copy constructor or destructor. It is not
8737 clear if this same convention should be used for other
8738 programming languages. Until G++ 3.4, we incorrectly used r8 for
8739 these return values. */
8740 return (abi_version_at_least (2)
8742 && TYPE_MODE (ret_type
) == BLKmode
8743 && TREE_ADDRESSABLE (ret_type
)
8744 && strcmp (lang_hooks
.name
, "GNU C++") == 0);
8747 /* Output the assembler code for a thunk function. THUNK_DECL is the
8748 declaration for the thunk function itself, FUNCTION is the decl for
8749 the target function. DELTA is an immediate constant offset to be
8750 added to THIS. If VCALL_OFFSET is nonzero, the word at
8751 *(*this + vcall_offset) should be added to THIS. */
8754 ia64_output_mi_thunk (FILE *file
, tree thunk ATTRIBUTE_UNUSED
,
8755 HOST_WIDE_INT delta
, HOST_WIDE_INT vcall_offset
,
8758 rtx
this, insn
, funexp
;
8759 unsigned int this_parmno
;
8760 unsigned int this_regno
;
8762 reload_completed
= 1;
8763 epilogue_completed
= 1;
8765 reset_block_changes ();
8767 /* Set things up as ia64_expand_prologue might. */
8768 last_scratch_gr_reg
= 15;
8770 memset (¤t_frame_info
, 0, sizeof (current_frame_info
));
8771 current_frame_info
.spill_cfa_off
= -16;
8772 current_frame_info
.n_input_regs
= 1;
8773 current_frame_info
.need_regstk
= (TARGET_REG_NAMES
!= 0);
8775 /* Mark the end of the (empty) prologue. */
8776 emit_note (NOTE_INSN_PROLOGUE_END
);
8778 /* Figure out whether "this" will be the first parameter (the
8779 typical case) or the second parameter (as happens when the
8780 virtual function returns certain class objects). */
8782 = (ia64_struct_retval_addr_is_first_parm_p (TREE_TYPE (thunk
))
8784 this_regno
= IN_REG (this_parmno
);
8785 if (!TARGET_REG_NAMES
)
8786 reg_names
[this_regno
] = ia64_reg_numbers
[this_parmno
];
8788 this = gen_rtx_REG (Pmode
, this_regno
);
8791 rtx tmp
= gen_rtx_REG (ptr_mode
, this_regno
);
8792 REG_POINTER (tmp
) = 1;
8793 if (delta
&& CONST_OK_FOR_I (delta
))
8795 emit_insn (gen_ptr_extend_plus_imm (this, tmp
, GEN_INT (delta
)));
8799 emit_insn (gen_ptr_extend (this, tmp
));
8802 /* Apply the constant offset, if required. */
8805 rtx delta_rtx
= GEN_INT (delta
);
8807 if (!CONST_OK_FOR_I (delta
))
8809 rtx tmp
= gen_rtx_REG (Pmode
, 2);
8810 emit_move_insn (tmp
, delta_rtx
);
8813 emit_insn (gen_adddi3 (this, this, delta_rtx
));
8816 /* Apply the offset from the vtable, if required. */
8819 rtx vcall_offset_rtx
= GEN_INT (vcall_offset
);
8820 rtx tmp
= gen_rtx_REG (Pmode
, 2);
8824 rtx t
= gen_rtx_REG (ptr_mode
, 2);
8825 REG_POINTER (t
) = 1;
8826 emit_move_insn (t
, gen_rtx_MEM (ptr_mode
, this));
8827 if (CONST_OK_FOR_I (vcall_offset
))
8829 emit_insn (gen_ptr_extend_plus_imm (tmp
, t
,
8834 emit_insn (gen_ptr_extend (tmp
, t
));
8837 emit_move_insn (tmp
, gen_rtx_MEM (Pmode
, this));
8841 if (!CONST_OK_FOR_J (vcall_offset
))
8843 rtx tmp2
= gen_rtx_REG (Pmode
, next_scratch_gr_reg ());
8844 emit_move_insn (tmp2
, vcall_offset_rtx
);
8845 vcall_offset_rtx
= tmp2
;
8847 emit_insn (gen_adddi3 (tmp
, tmp
, vcall_offset_rtx
));
8851 emit_move_insn (gen_rtx_REG (ptr_mode
, 2),
8852 gen_rtx_MEM (ptr_mode
, tmp
));
8854 emit_move_insn (tmp
, gen_rtx_MEM (Pmode
, tmp
));
8856 emit_insn (gen_adddi3 (this, this, tmp
));
8859 /* Generate a tail call to the target function. */
8860 if (! TREE_USED (function
))
8862 assemble_external (function
);
8863 TREE_USED (function
) = 1;
8865 funexp
= XEXP (DECL_RTL (function
), 0);
8866 funexp
= gen_rtx_MEM (FUNCTION_MODE
, funexp
);
8867 ia64_expand_call (NULL_RTX
, funexp
, NULL_RTX
, 1);
8868 insn
= get_last_insn ();
8869 SIBLING_CALL_P (insn
) = 1;
8871 /* Code generation for calls relies on splitting. */
8872 reload_completed
= 1;
8873 epilogue_completed
= 1;
8874 try_split (PATTERN (insn
), insn
, 0);
8878 /* Run just enough of rest_of_compilation to get the insns emitted.
8879 There's not really enough bulk here to make other passes such as
8880 instruction scheduling worth while. Note that use_thunk calls
8881 assemble_start_function and assemble_end_function. */
8883 insn_locators_initialize ();
8884 emit_all_insn_group_barriers (NULL
);
8885 insn
= get_insns ();
8886 shorten_branches (insn
);
8887 final_start_function (insn
, file
, 1);
8888 final (insn
, file
, 1);
8889 final_end_function ();
8891 reload_completed
= 0;
8892 epilogue_completed
= 0;
8896 /* Worker function for TARGET_STRUCT_VALUE_RTX. */
8899 ia64_struct_value_rtx (tree fntype
,
8900 int incoming ATTRIBUTE_UNUSED
)
8902 if (fntype
&& ia64_struct_retval_addr_is_first_parm_p (fntype
))
8904 return gen_rtx_REG (Pmode
, GR_REG (8));
8908 ia64_scalar_mode_supported_p (enum machine_mode mode
)
8934 ia64_vector_mode_supported_p (enum machine_mode mode
)
8951 /* Implement the FUNCTION_PROFILER macro. */
8954 ia64_output_function_profiler (FILE *file
, int labelno
)
8958 /* If the function needs a static chain and the static chain
8959 register is r15, we use an indirect call so as to bypass
8960 the PLT stub in case the executable is dynamically linked,
8961 because the stub clobbers r15 as per 5.3.6 of the psABI.
8962 We don't need to do that in non canonical PIC mode. */
8964 if (cfun
->static_chain_decl
&& !TARGET_NO_PIC
&& !TARGET_AUTO_PIC
)
8966 gcc_assert (STATIC_CHAIN_REGNUM
== 15);
8967 indirect_call
= true;
8970 indirect_call
= false;
8973 fputs ("\t.prologue 4, r40\n", file
);
8975 fputs ("\t.prologue\n\t.save ar.pfs, r40\n", file
);
8976 fputs ("\talloc out0 = ar.pfs, 8, 0, 4, 0\n", file
);
8978 if (NO_PROFILE_COUNTERS
)
8979 fputs ("\tmov out3 = r0\n", file
);
8983 ASM_GENERATE_INTERNAL_LABEL (buf
, "LP", labelno
);
8985 if (TARGET_AUTO_PIC
)
8986 fputs ("\tmovl out3 = @gprel(", file
);
8988 fputs ("\taddl out3 = @ltoff(", file
);
8989 assemble_name (file
, buf
);
8990 if (TARGET_AUTO_PIC
)
8991 fputs (")\n", file
);
8993 fputs ("), r1\n", file
);
8997 fputs ("\taddl r14 = @ltoff(@fptr(_mcount)), r1\n", file
);
8998 fputs ("\t;;\n", file
);
9000 fputs ("\t.save rp, r42\n", file
);
9001 fputs ("\tmov out2 = b0\n", file
);
9003 fputs ("\tld8 r14 = [r14]\n\t;;\n", file
);
9004 fputs ("\t.body\n", file
);
9005 fputs ("\tmov out1 = r1\n", file
);
9008 fputs ("\tld8 r16 = [r14], 8\n\t;;\n", file
);
9009 fputs ("\tmov b6 = r16\n", file
);
9010 fputs ("\tld8 r1 = [r14]\n", file
);
9011 fputs ("\tbr.call.sptk.many b0 = b6\n\t;;\n", file
);
9014 fputs ("\tbr.call.sptk.many b0 = _mcount\n\t;;\n", file
);
9017 static GTY(()) rtx mcount_func_rtx
;
9019 gen_mcount_func_rtx (void)
9021 if (!mcount_func_rtx
)
9022 mcount_func_rtx
= init_one_libfunc ("_mcount");
9023 return mcount_func_rtx
;
9027 ia64_profile_hook (int labelno
)
9031 if (NO_PROFILE_COUNTERS
)
9036 const char *label_name
;
9037 ASM_GENERATE_INTERNAL_LABEL (buf
, "LP", labelno
);
9038 label_name
= (*targetm
.strip_name_encoding
) (ggc_strdup (buf
));
9039 label
= gen_rtx_SYMBOL_REF (Pmode
, label_name
);
9040 SYMBOL_REF_FLAGS (label
) = SYMBOL_FLAG_LOCAL
;
9042 ip
= gen_reg_rtx (Pmode
);
9043 emit_insn (gen_ip_value (ip
));
9044 emit_library_call (gen_mcount_func_rtx (), LCT_NORMAL
,
9046 gen_rtx_REG (Pmode
, BR_REG (0)), Pmode
,
9051 /* Return the mangling of TYPE if it is an extended fundamental type. */
9054 ia64_mangle_fundamental_type (tree type
)
9056 /* On HP-UX, "long double" is mangled as "e" so __float128 is
9058 if (!TARGET_HPUX
&& TYPE_MODE (type
) == TFmode
)
9060 /* On HP-UX, "e" is not available as a mangling of __float80 so use
9061 an extended mangling. Elsewhere, "e" is available since long
9062 double is 80 bits. */
9063 if (TYPE_MODE (type
) == XFmode
)
9064 return TARGET_HPUX
? "u9__float80" : "e";
9065 if (TYPE_MODE (type
) == RFmode
)
9070 /* Return the diagnostic message string if conversion from FROMTYPE to
9071 TOTYPE is not allowed, NULL otherwise. */
9073 ia64_invalid_conversion (tree fromtype
, tree totype
)
9075 /* Reject nontrivial conversion to or from __fpreg. */
9076 if (TYPE_MODE (fromtype
) == RFmode
9077 && TYPE_MODE (totype
) != RFmode
9078 && TYPE_MODE (totype
) != VOIDmode
)
9079 return N_("invalid conversion from %<__fpreg%>");
9080 if (TYPE_MODE (totype
) == RFmode
9081 && TYPE_MODE (fromtype
) != RFmode
)
9082 return N_("invalid conversion to %<__fpreg%>");
9086 /* Return the diagnostic message string if the unary operation OP is
9087 not permitted on TYPE, NULL otherwise. */
9089 ia64_invalid_unary_op (int op
, tree type
)
9091 /* Reject operations on __fpreg other than unary + or &. */
9092 if (TYPE_MODE (type
) == RFmode
9093 && op
!= CONVERT_EXPR
9095 return N_("invalid operation on %<__fpreg%>");
9099 /* Return the diagnostic message string if the binary operation OP is
9100 not permitted on TYPE1 and TYPE2, NULL otherwise. */
9102 ia64_invalid_binary_op (int op ATTRIBUTE_UNUSED
, tree type1
, tree type2
)
9104 /* Reject operations on __fpreg. */
9105 if (TYPE_MODE (type1
) == RFmode
|| TYPE_MODE (type2
) == RFmode
)
9106 return N_("invalid operation on %<__fpreg%>");
9110 #include "gt-ia64.h"