libstdc++: Simplify std::any to fix -Wdeprecated-declarations warning
[official-gcc.git] / gcc / targhooks.cc
blobdc040df9fcd1182b62d83088ee7fb3a248c99f51
1 /* Default target hook functions.
2 Copyright (C) 2003-2024 Free Software Foundation, Inc.
4 This file is part of GCC.
6 GCC is free software; you can redistribute it and/or modify it under
7 the terms of the GNU General Public License as published by the Free
8 Software Foundation; either version 3, or (at your option) any later
9 version.
11 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
12 WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
14 for more details.
16 You should have received a copy of the GNU General Public License
17 along with GCC; see the file COPYING3. If not see
18 <http://www.gnu.org/licenses/>. */
20 /* The migration of target macros to target hooks works as follows:
22 1. Create a target hook that uses the existing target macros to
23 implement the same functionality.
25 2. Convert all the MI files to use the hook instead of the macro.
27 3. Repeat for a majority of the remaining target macros. This will
28 take some time.
30 4. Tell target maintainers to start migrating.
32 5. Eventually convert the backends to override the hook instead of
33 defining the macros. This will take some time too.
35 6. TBD when, poison the macros. Unmigrated targets will break at
36 this point.
38 Note that we expect steps 1-3 to be done by the people that
39 understand what the MI does with each macro, and step 5 to be done
40 by the target maintainers for their respective targets.
42 Note that steps 1 and 2 don't have to be done together, but no
43 target can override the new hook until step 2 is complete for it.
45 Once the macros are poisoned, we will revert to the old migration
46 rules - migrate the macro, callers, and targets all at once. This
47 comment can thus be removed at that point. */
49 #include "config.h"
50 #define INCLUDE_MEMORY
51 #include "system.h"
52 #include "coretypes.h"
53 #include "target.h"
54 #include "function.h"
55 #include "rtl.h"
56 #include "tree.h"
57 #include "tree-ssa-alias.h"
58 #include "gimple-expr.h"
59 #include "memmodel.h"
60 #include "backend.h"
61 #include "emit-rtl.h"
62 #include "df.h"
63 #include "tm_p.h"
64 #include "stringpool.h"
65 #include "tree-vrp.h"
66 #include "tree-ssanames.h"
67 #include "profile-count.h"
68 #include "optabs.h"
69 #include "regs.h"
70 #include "recog.h"
71 #include "diagnostic-core.h"
72 #include "fold-const.h"
73 #include "stor-layout.h"
74 #include "varasm.h"
75 #include "flags.h"
76 #include "explow.h"
77 #include "expmed.h"
78 #include "calls.h"
79 #include "expr.h"
80 #include "output.h"
81 #include "common/common-target.h"
82 #include "reload.h"
83 #include "intl.h"
84 #include "opts.h"
85 #include "gimplify.h"
86 #include "predict.h"
87 #include "real.h"
88 #include "langhooks.h"
89 #include "sbitmap.h"
90 #include "function-abi.h"
91 #include "attribs.h"
92 #include "asan.h"
93 #include "emit-rtl.h"
94 #include "gimple.h"
95 #include "cfgloop.h"
96 #include "tree-vectorizer.h"
97 #include "options.h"
98 #include "case-cfn-macros.h"
100 bool
101 default_legitimate_address_p (machine_mode mode ATTRIBUTE_UNUSED,
102 rtx addr ATTRIBUTE_UNUSED,
103 bool strict ATTRIBUTE_UNUSED,
104 code_helper ATTRIBUTE_UNUSED)
106 #ifdef GO_IF_LEGITIMATE_ADDRESS
107 /* Defer to the old implementation using a goto. */
108 if (strict)
109 return strict_memory_address_p (mode, addr);
110 else
111 return memory_address_p (mode, addr);
112 #else
113 gcc_unreachable ();
114 #endif
117 void
118 default_external_libcall (rtx fun ATTRIBUTE_UNUSED)
120 #ifdef ASM_OUTPUT_EXTERNAL_LIBCALL
121 ASM_OUTPUT_EXTERNAL_LIBCALL (asm_out_file, fun);
122 #endif
126 default_unspec_may_trap_p (const_rtx x, unsigned flags)
128 int i;
130 /* Any floating arithmetic may trap. */
131 if ((SCALAR_FLOAT_MODE_P (GET_MODE (x)) && flag_trapping_math))
132 return 1;
134 for (i = 0; i < XVECLEN (x, 0); ++i)
136 if (may_trap_p_1 (XVECEXP (x, 0, i), flags))
137 return 1;
140 return 0;
143 machine_mode
144 default_promote_function_mode (const_tree type ATTRIBUTE_UNUSED,
145 machine_mode mode,
146 int *punsignedp ATTRIBUTE_UNUSED,
147 const_tree funtype ATTRIBUTE_UNUSED,
148 int for_return ATTRIBUTE_UNUSED)
150 if (type != NULL_TREE && for_return == 2)
151 return promote_mode (type, mode, punsignedp);
152 return mode;
155 machine_mode
156 default_promote_function_mode_always_promote (const_tree type,
157 machine_mode mode,
158 int *punsignedp,
159 const_tree funtype ATTRIBUTE_UNUSED,
160 int for_return ATTRIBUTE_UNUSED)
162 return promote_mode (type, mode, punsignedp);
165 machine_mode
166 default_cc_modes_compatible (machine_mode m1, machine_mode m2)
168 if (m1 == m2)
169 return m1;
170 return VOIDmode;
173 bool
174 default_return_in_memory (const_tree type,
175 const_tree fntype ATTRIBUTE_UNUSED)
177 return (TYPE_MODE (type) == BLKmode);
181 default_legitimize_address (rtx x, rtx orig_x ATTRIBUTE_UNUSED,
182 machine_mode mode ATTRIBUTE_UNUSED)
184 return x;
187 bool
188 default_legitimize_address_displacement (rtx *, rtx *, poly_int64,
189 machine_mode)
191 return false;
194 bool
195 default_const_not_ok_for_debug_p (rtx x)
197 if (GET_CODE (x) == UNSPEC)
198 return true;
199 return false;
203 default_expand_builtin_saveregs (void)
205 error ("%<__builtin_saveregs%> not supported by this target");
206 return const0_rtx;
209 void
210 default_setup_incoming_varargs (cumulative_args_t,
211 const function_arg_info &, int *, int)
215 /* The default implementation of TARGET_BUILTIN_SETJMP_FRAME_VALUE. */
218 default_builtin_setjmp_frame_value (void)
220 return virtual_stack_vars_rtx;
223 /* Generic hook that takes a CUMULATIVE_ARGS pointer and returns false. */
225 bool
226 hook_bool_CUMULATIVE_ARGS_false (cumulative_args_t ca ATTRIBUTE_UNUSED)
228 return false;
231 bool
232 default_pretend_outgoing_varargs_named (cumulative_args_t ca ATTRIBUTE_UNUSED)
234 return (targetm.calls.setup_incoming_varargs
235 != default_setup_incoming_varargs);
238 scalar_int_mode
239 default_eh_return_filter_mode (void)
241 return targetm.unwind_word_mode ();
244 scalar_int_mode
245 default_libgcc_cmp_return_mode (void)
247 return word_mode;
250 scalar_int_mode
251 default_libgcc_shift_count_mode (void)
253 return word_mode;
256 scalar_int_mode
257 default_unwind_word_mode (void)
259 return word_mode;
262 /* The default implementation of TARGET_SHIFT_TRUNCATION_MASK. */
264 unsigned HOST_WIDE_INT
265 default_shift_truncation_mask (machine_mode mode)
267 return SHIFT_COUNT_TRUNCATED ? GET_MODE_UNIT_BITSIZE (mode) - 1 : 0;
270 /* The default implementation of TARGET_MIN_DIVISIONS_FOR_RECIP_MUL. */
272 unsigned int
273 default_min_divisions_for_recip_mul (machine_mode mode ATTRIBUTE_UNUSED)
275 return have_insn_for (DIV, mode) ? 3 : 2;
278 /* The default implementation of TARGET_MODE_REP_EXTENDED. */
281 default_mode_rep_extended (scalar_int_mode, scalar_int_mode)
283 return UNKNOWN;
286 /* Generic hook that takes a CUMULATIVE_ARGS pointer and returns true. */
288 bool
289 hook_bool_CUMULATIVE_ARGS_true (cumulative_args_t a ATTRIBUTE_UNUSED)
291 return true;
294 /* Return machine mode for non-standard suffix
295 or VOIDmode if non-standard suffixes are unsupported. */
296 machine_mode
297 default_mode_for_suffix (char suffix ATTRIBUTE_UNUSED)
299 return VOIDmode;
302 /* Return machine mode for a floating type which is indicated
303 by the given enum tree_index. */
305 machine_mode
306 default_mode_for_floating_type (enum tree_index ti)
308 if (ti == TI_FLOAT_TYPE)
309 return SFmode;
310 gcc_assert (ti == TI_DOUBLE_TYPE || ti == TI_LONG_DOUBLE_TYPE);
311 return DFmode;
314 /* The generic C++ ABI specifies this is a 64-bit value. */
315 tree
316 default_cxx_guard_type (void)
318 return long_long_integer_type_node;
321 /* Returns the size of the cookie to use when allocating an array
322 whose elements have the indicated TYPE. Assumes that it is already
323 known that a cookie is needed. */
325 tree
326 default_cxx_get_cookie_size (tree type)
328 tree cookie_size;
330 /* We need to allocate an additional max (sizeof (size_t), alignof
331 (true_type)) bytes. */
332 tree sizetype_size;
333 tree type_align;
335 sizetype_size = size_in_bytes (sizetype);
336 type_align = size_int (TYPE_ALIGN_UNIT (type));
337 if (tree_int_cst_lt (type_align, sizetype_size))
338 cookie_size = sizetype_size;
339 else
340 cookie_size = type_align;
342 return cookie_size;
345 /* Returns modified FUNCTION_TYPE for cdtor callabi. */
347 tree
348 default_cxx_adjust_cdtor_callabi_fntype (tree fntype)
350 return fntype;
353 /* Return true if a parameter must be passed by reference. This version
354 of the TARGET_PASS_BY_REFERENCE hook uses just MUST_PASS_IN_STACK. */
356 bool
357 hook_pass_by_reference_must_pass_in_stack (cumulative_args_t,
358 const function_arg_info &arg)
360 return targetm.calls.must_pass_in_stack (arg);
363 /* Return true if a parameter follows callee copies conventions. This
364 version of the hook is true for all named arguments. */
366 bool
367 hook_callee_copies_named (cumulative_args_t, const function_arg_info &arg)
369 return arg.named;
372 /* Emit to STREAM the assembler syntax for insn operand X. */
374 void
375 default_print_operand (FILE *stream ATTRIBUTE_UNUSED, rtx x ATTRIBUTE_UNUSED,
376 int code ATTRIBUTE_UNUSED)
378 #ifdef PRINT_OPERAND
379 PRINT_OPERAND (stream, x, code);
380 #else
381 gcc_unreachable ();
382 #endif
385 /* Emit to STREAM the assembler syntax for an insn operand whose memory
386 address is X. */
388 void
389 default_print_operand_address (FILE *stream ATTRIBUTE_UNUSED,
390 machine_mode /*mode*/,
391 rtx x ATTRIBUTE_UNUSED)
393 #ifdef PRINT_OPERAND_ADDRESS
394 PRINT_OPERAND_ADDRESS (stream, x);
395 #else
396 gcc_unreachable ();
397 #endif
400 /* Return true if CODE is a valid punctuation character for the
401 `print_operand' hook. */
403 bool
404 default_print_operand_punct_valid_p (unsigned char code ATTRIBUTE_UNUSED)
406 #ifdef PRINT_OPERAND_PUNCT_VALID_P
407 return PRINT_OPERAND_PUNCT_VALID_P (code);
408 #else
409 return false;
410 #endif
413 /* The default implementation of TARGET_MANGLE_ASSEMBLER_NAME. */
414 tree
415 default_mangle_assembler_name (const char *name ATTRIBUTE_UNUSED)
417 const char *skipped = name + (*name == '*' ? 1 : 0);
418 const char *stripped = targetm.strip_name_encoding (skipped);
419 if (*name != '*' && user_label_prefix[0])
420 stripped = ACONCAT ((user_label_prefix, stripped, NULL));
421 return get_identifier (stripped);
424 /* The default implementation of TARGET_TRANSLATE_MODE_ATTRIBUTE. */
426 machine_mode
427 default_translate_mode_attribute (machine_mode mode)
429 return mode;
432 /* True if MODE is valid for the target. By "valid", we mean able to
433 be manipulated in non-trivial ways. In particular, this means all
434 the arithmetic is supported.
436 By default we guess this means that any C type is supported. If
437 we can't map the mode back to a type that would be available in C,
438 then reject it. Special case, here, is the double-word arithmetic
439 supported by optabs.cc. */
441 bool
442 default_scalar_mode_supported_p (scalar_mode mode)
444 int precision = GET_MODE_PRECISION (mode);
446 switch (GET_MODE_CLASS (mode))
448 case MODE_PARTIAL_INT:
449 case MODE_INT:
450 if (precision == CHAR_TYPE_SIZE)
451 return true;
452 if (precision == SHORT_TYPE_SIZE)
453 return true;
454 if (precision == INT_TYPE_SIZE)
455 return true;
456 if (precision == LONG_TYPE_SIZE)
457 return true;
458 if (precision == LONG_LONG_TYPE_SIZE)
459 return true;
460 if (precision == 2 * BITS_PER_WORD)
461 return true;
462 return false;
464 case MODE_FLOAT:
465 if (mode == targetm.c.mode_for_floating_type (TI_FLOAT_TYPE))
466 return true;
467 if (mode == targetm.c.mode_for_floating_type (TI_DOUBLE_TYPE))
468 return true;
469 if (mode == targetm.c.mode_for_floating_type (TI_LONG_DOUBLE_TYPE))
470 return true;
471 return false;
473 case MODE_DECIMAL_FLOAT:
474 case MODE_FRACT:
475 case MODE_UFRACT:
476 case MODE_ACCUM:
477 case MODE_UACCUM:
478 return false;
480 default:
481 gcc_unreachable ();
485 /* Return true if libgcc supports floating-point mode MODE (known to
486 be supported as a scalar mode). */
488 bool
489 default_libgcc_floating_mode_supported_p (scalar_float_mode mode)
491 switch (mode)
493 #ifdef HAVE_SFmode
494 case E_SFmode:
495 #endif
496 #ifdef HAVE_DFmode
497 case E_DFmode:
498 #endif
499 #ifdef HAVE_XFmode
500 case E_XFmode:
501 #endif
502 #ifdef HAVE_TFmode
503 case E_TFmode:
504 #endif
505 return true;
507 default:
508 return false;
512 /* Return the machine mode to use for the type _FloatN, if EXTENDED is
513 false, or _FloatNx, if EXTENDED is true, or VOIDmode if not
514 supported. */
515 opt_scalar_float_mode
516 default_floatn_mode (int n, bool extended)
518 if (extended)
520 opt_scalar_float_mode cand1, cand2;
521 scalar_float_mode mode;
522 switch (n)
524 case 32:
525 #ifdef HAVE_DFmode
526 cand1 = DFmode;
527 #endif
528 break;
530 case 64:
531 #ifdef HAVE_XFmode
532 cand1 = XFmode;
533 #endif
534 #ifdef HAVE_TFmode
535 cand2 = TFmode;
536 #endif
537 break;
539 case 128:
540 break;
542 default:
543 /* Those are the only valid _FloatNx types. */
544 gcc_unreachable ();
546 if (cand1.exists (&mode)
547 && REAL_MODE_FORMAT (mode)->ieee_bits > n
548 && targetm.scalar_mode_supported_p (mode)
549 && targetm.libgcc_floating_mode_supported_p (mode))
550 return cand1;
551 if (cand2.exists (&mode)
552 && REAL_MODE_FORMAT (mode)->ieee_bits > n
553 && targetm.scalar_mode_supported_p (mode)
554 && targetm.libgcc_floating_mode_supported_p (mode))
555 return cand2;
557 else
559 opt_scalar_float_mode cand;
560 scalar_float_mode mode;
561 switch (n)
563 case 16:
564 /* Always enable _Float16 if we have basic support for the mode.
565 Targets can control the range and precision of operations on
566 the _Float16 type using TARGET_C_EXCESS_PRECISION. */
567 #ifdef HAVE_HFmode
568 cand = HFmode;
569 #endif
570 break;
572 case 32:
573 #ifdef HAVE_SFmode
574 cand = SFmode;
575 #endif
576 break;
578 case 64:
579 #ifdef HAVE_DFmode
580 cand = DFmode;
581 #endif
582 break;
584 case 128:
585 #ifdef HAVE_TFmode
586 cand = TFmode;
587 #endif
588 break;
590 default:
591 break;
593 if (cand.exists (&mode)
594 && REAL_MODE_FORMAT (mode)->ieee_bits == n
595 && targetm.scalar_mode_supported_p (mode)
596 && targetm.libgcc_floating_mode_supported_p (mode))
597 return cand;
599 return opt_scalar_float_mode ();
602 /* Define this to return true if the _Floatn and _Floatnx built-in functions
603 should implicitly enable the built-in function without the __builtin_ prefix
604 in addition to the normal built-in function with the __builtin_ prefix. The
605 default is to only enable built-in functions without the __builtin_ prefix
606 for the GNU C langauge. The argument FUNC is the enum builtin_in_function
607 id of the function to be enabled. */
609 bool
610 default_floatn_builtin_p (int func ATTRIBUTE_UNUSED)
612 static bool first_time_p = true;
613 static bool c_or_objective_c;
615 if (first_time_p)
617 first_time_p = false;
618 c_or_objective_c = lang_GNU_C () || lang_GNU_OBJC ();
621 return c_or_objective_c;
624 /* Make some target macros useable by target-independent code. */
625 bool
626 targhook_words_big_endian (void)
628 return !!WORDS_BIG_ENDIAN;
631 bool
632 targhook_float_words_big_endian (void)
634 return !!FLOAT_WORDS_BIG_ENDIAN;
637 /* True if the target supports floating-point exceptions and rounding
638 modes. */
640 bool
641 default_float_exceptions_rounding_supported_p (void)
643 #ifdef HAVE_adddf3
644 return HAVE_adddf3;
645 #else
646 return false;
647 #endif
650 /* True if the target supports decimal floating point. */
652 bool
653 default_decimal_float_supported_p (void)
655 return ENABLE_DECIMAL_FLOAT;
658 /* True if the target supports fixed-point arithmetic. */
660 bool
661 default_fixed_point_supported_p (void)
663 return ENABLE_FIXED_POINT;
666 /* True if the target supports GNU indirect functions. */
668 bool
669 default_has_ifunc_p (void)
671 return HAVE_GNU_INDIRECT_FUNCTION;
674 /* Return true if we predict the loop LOOP will be transformed to a
675 low-overhead loop, otherwise return false.
677 By default, false is returned, as this hook's applicability should be
678 verified for each target. Target maintainers should re-define the hook
679 if the target can take advantage of it. */
681 bool
682 default_predict_doloop_p (class loop *loop ATTRIBUTE_UNUSED)
684 return false;
687 /* By default, just use the input MODE itself. */
689 machine_mode
690 default_preferred_doloop_mode (machine_mode mode)
692 return mode;
695 /* NULL if INSN insn is valid within a low-overhead loop, otherwise returns
696 an error message.
698 This function checks whether a given INSN is valid within a low-overhead
699 loop. If INSN is invalid it returns the reason for that, otherwise it
700 returns NULL. A called function may clobber any special registers required
701 for low-overhead looping. Additionally, some targets (eg, PPC) use the count
702 register for branch on table instructions. We reject the doloop pattern in
703 these cases. */
705 const char *
706 default_invalid_within_doloop (const rtx_insn *insn)
708 if (CALL_P (insn))
709 return "Function call in loop.";
711 if (tablejump_p (insn, NULL, NULL) || computed_jump_p (insn))
712 return "Computed branch in the loop.";
714 return NULL;
717 /* Mapping of builtin functions to vectorized variants. */
719 tree
720 default_builtin_vectorized_function (unsigned int, tree, tree)
722 return NULL_TREE;
725 /* Mapping of target builtin functions to vectorized variants. */
727 tree
728 default_builtin_md_vectorized_function (tree, tree, tree)
730 return NULL_TREE;
733 /* Default vectorizer cost model values. */
736 default_builtin_vectorization_cost (enum vect_cost_for_stmt type_of_cost,
737 tree vectype,
738 int misalign ATTRIBUTE_UNUSED)
740 switch (type_of_cost)
742 case scalar_stmt:
743 case scalar_load:
744 case scalar_store:
745 case vector_stmt:
746 case vector_load:
747 case vector_store:
748 case vec_to_scalar:
749 case scalar_to_vec:
750 case cond_branch_not_taken:
751 case vec_perm:
752 case vec_promote_demote:
753 return 1;
755 case unaligned_load:
756 case unaligned_store:
757 return 2;
759 case cond_branch_taken:
760 return 3;
762 case vec_construct:
763 return estimated_poly_value (TYPE_VECTOR_SUBPARTS (vectype)) - 1;
765 default:
766 gcc_unreachable ();
770 /* Reciprocal. */
772 tree
773 default_builtin_reciprocal (tree)
775 return NULL_TREE;
778 void
779 default_emit_support_tinfos (emit_support_tinfos_callback)
783 bool
784 hook_bool_CUMULATIVE_ARGS_arg_info_false (cumulative_args_t,
785 const function_arg_info &)
787 return false;
790 bool
791 hook_bool_CUMULATIVE_ARGS_arg_info_true (cumulative_args_t,
792 const function_arg_info &)
794 return true;
798 hook_int_CUMULATIVE_ARGS_arg_info_0 (cumulative_args_t,
799 const function_arg_info &)
801 return 0;
804 void
805 hook_void_CUMULATIVE_ARGS (cumulative_args_t)
809 void
810 hook_void_CUMULATIVE_ARGS_tree (cumulative_args_t ca ATTRIBUTE_UNUSED,
811 tree ATTRIBUTE_UNUSED)
815 void
816 hook_void_CUMULATIVE_ARGS_rtx_tree (cumulative_args_t, rtx, tree)
820 /* Default implementation of TARGET_PUSH_ARGUMENT. */
822 bool
823 default_push_argument (unsigned int)
825 #ifdef PUSH_ROUNDING
826 return !ACCUMULATE_OUTGOING_ARGS;
827 #else
828 return false;
829 #endif
832 void
833 default_function_arg_advance (cumulative_args_t, const function_arg_info &)
835 gcc_unreachable ();
838 /* Default implementation of TARGET_FUNCTION_ARG_OFFSET. */
840 HOST_WIDE_INT
841 default_function_arg_offset (machine_mode, const_tree)
843 return 0;
846 /* Default implementation of TARGET_FUNCTION_ARG_PADDING: usually pad
847 upward, but pad short args downward on big-endian machines. */
849 pad_direction
850 default_function_arg_padding (machine_mode mode, const_tree type)
852 if (!BYTES_BIG_ENDIAN)
853 return PAD_UPWARD;
855 unsigned HOST_WIDE_INT size;
856 if (mode == BLKmode)
858 if (!type || TREE_CODE (TYPE_SIZE (type)) != INTEGER_CST)
859 return PAD_UPWARD;
860 size = int_size_in_bytes (type);
862 else
863 /* Targets with variable-sized modes must override this hook
864 and handle variable-sized modes explicitly. */
865 size = GET_MODE_SIZE (mode).to_constant ();
867 if (size < (PARM_BOUNDARY / BITS_PER_UNIT))
868 return PAD_DOWNWARD;
870 return PAD_UPWARD;
874 default_function_arg (cumulative_args_t, const function_arg_info &)
876 gcc_unreachable ();
880 default_function_incoming_arg (cumulative_args_t, const function_arg_info &)
882 gcc_unreachable ();
885 unsigned int
886 default_function_arg_boundary (machine_mode mode ATTRIBUTE_UNUSED,
887 const_tree type ATTRIBUTE_UNUSED)
889 return PARM_BOUNDARY;
892 unsigned int
893 default_function_arg_round_boundary (machine_mode mode ATTRIBUTE_UNUSED,
894 const_tree type ATTRIBUTE_UNUSED)
896 return PARM_BOUNDARY;
899 void
900 hook_void_bitmap (bitmap regs ATTRIBUTE_UNUSED)
904 const char *
905 hook_invalid_arg_for_unprototyped_fn (
906 const_tree typelist ATTRIBUTE_UNUSED,
907 const_tree funcdecl ATTRIBUTE_UNUSED,
908 const_tree val ATTRIBUTE_UNUSED)
910 return NULL;
913 /* Initialize the stack protection decls. */
915 /* Stack protection related decls living in libgcc. */
916 static GTY(()) tree stack_chk_guard_decl;
918 tree
919 default_stack_protect_guard (void)
921 tree t = stack_chk_guard_decl;
923 if (t == NULL)
925 rtx x;
927 t = build_decl (UNKNOWN_LOCATION,
928 VAR_DECL, get_identifier ("__stack_chk_guard"),
929 ptr_type_node);
930 TREE_STATIC (t) = 1;
931 TREE_PUBLIC (t) = 1;
932 DECL_EXTERNAL (t) = 1;
933 TREE_USED (t) = 1;
934 TREE_THIS_VOLATILE (t) = 1;
935 DECL_ARTIFICIAL (t) = 1;
936 DECL_IGNORED_P (t) = 1;
938 /* Do not share RTL as the declaration is visible outside of
939 current function. */
940 x = DECL_RTL (t);
941 RTX_FLAG (x, used) = 1;
943 stack_chk_guard_decl = t;
946 return t;
949 static GTY(()) tree stack_chk_fail_decl;
951 tree
952 default_external_stack_protect_fail (void)
954 tree t = stack_chk_fail_decl;
956 if (t == NULL_TREE)
958 t = build_function_type_list (void_type_node, NULL_TREE);
959 t = build_decl (UNKNOWN_LOCATION,
960 FUNCTION_DECL, get_identifier ("__stack_chk_fail"), t);
961 TREE_STATIC (t) = 1;
962 TREE_PUBLIC (t) = 1;
963 DECL_EXTERNAL (t) = 1;
964 TREE_USED (t) = 1;
965 TREE_THIS_VOLATILE (t) = 1;
966 TREE_NOTHROW (t) = 1;
967 DECL_ARTIFICIAL (t) = 1;
968 DECL_IGNORED_P (t) = 1;
969 DECL_VISIBILITY (t) = VISIBILITY_DEFAULT;
970 DECL_VISIBILITY_SPECIFIED (t) = 1;
972 stack_chk_fail_decl = t;
975 return build_call_expr (t, 0);
978 tree
979 default_hidden_stack_protect_fail (void)
981 #ifndef HAVE_GAS_HIDDEN
982 return default_external_stack_protect_fail ();
983 #else
984 tree t = stack_chk_fail_decl;
986 if (!flag_pic)
987 return default_external_stack_protect_fail ();
989 if (t == NULL_TREE)
991 t = build_function_type_list (void_type_node, NULL_TREE);
992 t = build_decl (UNKNOWN_LOCATION, FUNCTION_DECL,
993 get_identifier ("__stack_chk_fail_local"), t);
994 TREE_STATIC (t) = 1;
995 TREE_PUBLIC (t) = 1;
996 DECL_EXTERNAL (t) = 1;
997 TREE_USED (t) = 1;
998 TREE_THIS_VOLATILE (t) = 1;
999 TREE_NOTHROW (t) = 1;
1000 DECL_ARTIFICIAL (t) = 1;
1001 DECL_IGNORED_P (t) = 1;
1002 DECL_VISIBILITY_SPECIFIED (t) = 1;
1003 DECL_VISIBILITY (t) = VISIBILITY_HIDDEN;
1005 stack_chk_fail_decl = t;
1008 return build_call_expr (t, 0);
1009 #endif
1012 bool
1013 hook_bool_const_rtx_commutative_p (const_rtx x,
1014 int outer_code ATTRIBUTE_UNUSED)
1016 return COMMUTATIVE_P (x);
1020 default_function_value (const_tree ret_type ATTRIBUTE_UNUSED,
1021 const_tree fn_decl_or_type,
1022 bool outgoing ATTRIBUTE_UNUSED)
1024 /* The old interface doesn't handle receiving the function type. */
1025 if (fn_decl_or_type
1026 && !DECL_P (fn_decl_or_type))
1027 fn_decl_or_type = NULL;
1029 #ifdef FUNCTION_VALUE
1030 return FUNCTION_VALUE (ret_type, fn_decl_or_type);
1031 #else
1032 gcc_unreachable ();
1033 #endif
1037 default_libcall_value (machine_mode mode ATTRIBUTE_UNUSED,
1038 const_rtx fun ATTRIBUTE_UNUSED)
1040 #ifdef LIBCALL_VALUE
1041 return LIBCALL_VALUE (MACRO_MODE (mode));
1042 #else
1043 gcc_unreachable ();
1044 #endif
1047 /* The default hook for TARGET_FUNCTION_VALUE_REGNO_P. */
1049 bool
1050 default_function_value_regno_p (const unsigned int regno ATTRIBUTE_UNUSED)
1052 #ifdef FUNCTION_VALUE_REGNO_P
1053 return FUNCTION_VALUE_REGNO_P (regno);
1054 #else
1055 gcc_unreachable ();
1056 #endif
1059 /* Choose the mode and rtx to use to zero REGNO, storing tem in PMODE and
1060 PREGNO_RTX and returning TRUE if successful, otherwise returning FALSE. If
1061 the natural mode for REGNO doesn't work, attempt to group it with subsequent
1062 adjacent registers set in TOZERO. */
1064 static inline bool
1065 zcur_select_mode_rtx (unsigned int regno, machine_mode *pmode,
1066 rtx *pregno_rtx, HARD_REG_SET tozero)
1068 rtx regno_rtx = regno_reg_rtx[regno];
1069 machine_mode mode = GET_MODE (regno_rtx);
1071 /* If the natural mode doesn't work, try some wider mode. */
1072 if (!targetm.hard_regno_mode_ok (regno, mode))
1074 bool found = false;
1075 for (int nregs = 2;
1076 !found && nregs <= hard_regno_max_nregs
1077 && regno + nregs <= FIRST_PSEUDO_REGISTER
1078 && TEST_HARD_REG_BIT (tozero,
1079 regno + nregs - 1);
1080 nregs++)
1082 mode = choose_hard_reg_mode (regno, nregs, 0);
1083 if (mode == E_VOIDmode)
1084 continue;
1085 gcc_checking_assert (targetm.hard_regno_mode_ok (regno, mode));
1086 regno_rtx = gen_rtx_REG (mode, regno);
1087 found = true;
1089 if (!found)
1090 return false;
1093 *pmode = mode;
1094 *pregno_rtx = regno_rtx;
1095 return true;
1098 /* The default hook for TARGET_ZERO_CALL_USED_REGS. */
1100 HARD_REG_SET
1101 default_zero_call_used_regs (HARD_REG_SET need_zeroed_hardregs)
1103 gcc_assert (!hard_reg_set_empty_p (need_zeroed_hardregs));
1105 HARD_REG_SET failed;
1106 CLEAR_HARD_REG_SET (failed);
1107 bool progress = false;
1109 /* First, try to zero each register in need_zeroed_hardregs by
1110 loading a zero into it, taking note of any failures in
1111 FAILED. */
1112 for (unsigned int regno = 0; regno < FIRST_PSEUDO_REGISTER; regno++)
1113 if (TEST_HARD_REG_BIT (need_zeroed_hardregs, regno))
1115 rtx_insn *last_insn = get_last_insn ();
1116 rtx regno_rtx;
1117 machine_mode mode;
1119 if (!zcur_select_mode_rtx (regno, &mode, &regno_rtx,
1120 need_zeroed_hardregs))
1122 SET_HARD_REG_BIT (failed, regno);
1123 continue;
1126 rtx zero = CONST0_RTX (mode);
1127 rtx_insn *insn = emit_move_insn (regno_rtx, zero);
1128 if (!valid_insn_p (insn))
1130 SET_HARD_REG_BIT (failed, regno);
1131 delete_insns_since (last_insn);
1133 else
1135 progress = true;
1136 regno += hard_regno_nregs (regno, mode) - 1;
1140 /* Now retry with copies from zeroed registers, as long as we've
1141 made some PROGRESS, and registers remain to be zeroed in
1142 FAILED. */
1143 while (progress && !hard_reg_set_empty_p (failed))
1145 HARD_REG_SET retrying = failed;
1147 CLEAR_HARD_REG_SET (failed);
1148 progress = false;
1150 for (unsigned int regno = 0; regno < FIRST_PSEUDO_REGISTER; regno++)
1151 if (TEST_HARD_REG_BIT (retrying, regno))
1153 rtx regno_rtx;
1154 machine_mode mode;
1156 /* This might select registers we've already zeroed. If grouping
1157 with them is what it takes to get regno zeroed, so be it. */
1158 if (!zcur_select_mode_rtx (regno, &mode, &regno_rtx,
1159 need_zeroed_hardregs))
1161 SET_HARD_REG_BIT (failed, regno);
1162 continue;
1165 bool success = false;
1166 /* Look for a source. */
1167 for (unsigned int src = 0; src < FIRST_PSEUDO_REGISTER; src++)
1169 /* If SRC hasn't been zeroed (yet?), skip it. */
1170 if (! TEST_HARD_REG_BIT (need_zeroed_hardregs, src))
1171 continue;
1172 if (TEST_HARD_REG_BIT (retrying, src))
1173 continue;
1175 /* Check that SRC can hold MODE, and that any other
1176 registers needed to hold MODE in SRC have also been
1177 zeroed. */
1178 if (!targetm.hard_regno_mode_ok (src, mode))
1179 continue;
1180 unsigned n = targetm.hard_regno_nregs (src, mode);
1181 bool ok = true;
1182 for (unsigned i = 1; ok && i < n; i++)
1183 ok = (TEST_HARD_REG_BIT (need_zeroed_hardregs, src + i)
1184 && !TEST_HARD_REG_BIT (retrying, src + i));
1185 if (!ok)
1186 continue;
1188 /* SRC is usable, try to copy from it. */
1189 rtx_insn *last_insn = get_last_insn ();
1190 rtx src_rtx = gen_rtx_REG (mode, src);
1191 rtx_insn *insn = emit_move_insn (regno_rtx, src_rtx);
1192 if (!valid_insn_p (insn))
1193 /* It didn't work, remove any inserts. We'll look
1194 for another SRC. */
1195 delete_insns_since (last_insn);
1196 else
1198 /* We're done for REGNO. */
1199 success = true;
1200 break;
1204 /* If nothing worked for REGNO this round, mark it to be
1205 retried if we get another round. */
1206 if (!success)
1207 SET_HARD_REG_BIT (failed, regno);
1208 else
1210 /* Take note so as to enable another round if needed. */
1211 progress = true;
1212 regno += hard_regno_nregs (regno, mode) - 1;
1217 /* If any register remained, report it. */
1218 if (!progress)
1220 static bool issued_error;
1221 if (!issued_error)
1223 const char *name = NULL;
1224 for (unsigned int i = 0; zero_call_used_regs_opts[i].name != NULL;
1225 ++i)
1226 if (flag_zero_call_used_regs == zero_call_used_regs_opts[i].flag)
1228 name = zero_call_used_regs_opts[i].name;
1229 break;
1232 if (!name)
1233 name = "";
1235 issued_error = true;
1236 sorry ("argument %qs is not supported for %qs on this target",
1237 name, "-fzero-call-used-regs");
1241 return need_zeroed_hardregs;
1245 default_internal_arg_pointer (void)
1247 /* If the reg that the virtual arg pointer will be translated into is
1248 not a fixed reg or is the stack pointer, make a copy of the virtual
1249 arg pointer, and address parms via the copy. The frame pointer is
1250 considered fixed even though it is not marked as such. */
1251 if ((ARG_POINTER_REGNUM == STACK_POINTER_REGNUM
1252 || ! (fixed_regs[ARG_POINTER_REGNUM]
1253 || ARG_POINTER_REGNUM == FRAME_POINTER_REGNUM)))
1254 return copy_to_reg (virtual_incoming_args_rtx);
1255 else
1256 return virtual_incoming_args_rtx;
1260 default_static_chain (const_tree ARG_UNUSED (fndecl_or_type), bool incoming_p)
1262 if (incoming_p)
1264 #ifdef STATIC_CHAIN_INCOMING_REGNUM
1265 return gen_rtx_REG (Pmode, STATIC_CHAIN_INCOMING_REGNUM);
1266 #endif
1269 #ifdef STATIC_CHAIN_REGNUM
1270 return gen_rtx_REG (Pmode, STATIC_CHAIN_REGNUM);
1271 #endif
1274 static bool issued_error;
1275 if (!issued_error)
1277 issued_error = true;
1278 sorry ("nested functions not supported on this target");
1281 /* It really doesn't matter what we return here, so long at it
1282 doesn't cause the rest of the compiler to crash. */
1283 return gen_rtx_MEM (Pmode, stack_pointer_rtx);
1287 void
1288 default_trampoline_init (rtx ARG_UNUSED (m_tramp), tree ARG_UNUSED (t_func),
1289 rtx ARG_UNUSED (r_chain))
1291 sorry ("nested function trampolines not supported on this target");
1294 poly_int64
1295 default_return_pops_args (tree, tree, poly_int64)
1297 return 0;
1300 reg_class_t
1301 default_ira_change_pseudo_allocno_class (int regno ATTRIBUTE_UNUSED,
1302 reg_class_t cl,
1303 reg_class_t best_cl ATTRIBUTE_UNUSED)
1305 return cl;
1308 extern bool
1309 default_lra_p (void)
1311 return true;
1315 default_register_priority (int hard_regno ATTRIBUTE_UNUSED)
1317 return 0;
1320 extern bool
1321 default_register_usage_leveling_p (void)
1323 return false;
1326 extern bool
1327 default_different_addr_displacement_p (void)
1329 return false;
1332 reg_class_t
1333 default_secondary_reload (bool in_p ATTRIBUTE_UNUSED, rtx x ATTRIBUTE_UNUSED,
1334 reg_class_t reload_class_i ATTRIBUTE_UNUSED,
1335 machine_mode reload_mode ATTRIBUTE_UNUSED,
1336 secondary_reload_info *sri)
1338 enum reg_class rclass = NO_REGS;
1339 enum reg_class reload_class = (enum reg_class) reload_class_i;
1341 if (sri->prev_sri && sri->prev_sri->t_icode != CODE_FOR_nothing)
1343 sri->icode = sri->prev_sri->t_icode;
1344 return NO_REGS;
1346 #ifdef SECONDARY_INPUT_RELOAD_CLASS
1347 if (in_p)
1348 rclass = SECONDARY_INPUT_RELOAD_CLASS (reload_class,
1349 MACRO_MODE (reload_mode), x);
1350 #endif
1351 #ifdef SECONDARY_OUTPUT_RELOAD_CLASS
1352 if (! in_p)
1353 rclass = SECONDARY_OUTPUT_RELOAD_CLASS (reload_class,
1354 MACRO_MODE (reload_mode), x);
1355 #endif
1356 if (rclass != NO_REGS)
1358 enum insn_code icode
1359 = direct_optab_handler (in_p ? reload_in_optab : reload_out_optab,
1360 reload_mode);
1362 if (icode != CODE_FOR_nothing
1363 && !insn_operand_matches (icode, in_p, x))
1364 icode = CODE_FOR_nothing;
1365 else if (icode != CODE_FOR_nothing)
1367 const char *insn_constraint, *scratch_constraint;
1368 enum reg_class insn_class, scratch_class;
1370 gcc_assert (insn_data[(int) icode].n_operands == 3);
1371 insn_constraint = insn_data[(int) icode].operand[!in_p].constraint;
1372 if (!*insn_constraint)
1373 insn_class = ALL_REGS;
1374 else
1376 if (in_p)
1378 gcc_assert (*insn_constraint == '=');
1379 insn_constraint++;
1381 insn_class = (reg_class_for_constraint
1382 (lookup_constraint (insn_constraint)));
1383 gcc_assert (insn_class != NO_REGS);
1386 scratch_constraint = insn_data[(int) icode].operand[2].constraint;
1387 /* The scratch register's constraint must start with "=&",
1388 except for an input reload, where only "=" is necessary,
1389 and where it might be beneficial to re-use registers from
1390 the input. */
1391 gcc_assert (scratch_constraint[0] == '='
1392 && (in_p || scratch_constraint[1] == '&'));
1393 scratch_constraint++;
1394 if (*scratch_constraint == '&')
1395 scratch_constraint++;
1396 scratch_class = (reg_class_for_constraint
1397 (lookup_constraint (scratch_constraint)));
1399 if (reg_class_subset_p (reload_class, insn_class))
1401 gcc_assert (scratch_class == rclass);
1402 rclass = NO_REGS;
1404 else
1405 rclass = insn_class;
1408 if (rclass == NO_REGS)
1409 sri->icode = icode;
1410 else
1411 sri->t_icode = icode;
1413 return rclass;
1416 /* The default implementation of TARGET_SECONDARY_MEMORY_NEEDED_MODE. */
1418 machine_mode
1419 default_secondary_memory_needed_mode (machine_mode mode)
1421 if (!targetm.lra_p ()
1422 && known_lt (GET_MODE_BITSIZE (mode), BITS_PER_WORD)
1423 && INTEGRAL_MODE_P (mode))
1424 return mode_for_size (BITS_PER_WORD, GET_MODE_CLASS (mode), 0).require ();
1425 return mode;
1428 /* By default, if flag_pic is true, then neither local nor global relocs
1429 should be placed in readonly memory. */
1432 default_reloc_rw_mask (void)
1434 return flag_pic ? 3 : 0;
1437 /* By default, address diff vectors are generated
1438 for jump tables when flag_pic is true. */
1440 bool
1441 default_generate_pic_addr_diff_vec (void)
1443 return flag_pic;
1446 /* Record an element in the table of global constructors. SYMBOL is
1447 a SYMBOL_REF of the function to be called; PRIORITY is a number
1448 between 0 and MAX_INIT_PRIORITY. */
1450 void
1451 default_asm_out_constructor (rtx symbol ATTRIBUTE_UNUSED,
1452 int priority ATTRIBUTE_UNUSED)
1454 sorry ("global constructors not supported on this target");
1457 /* Likewise for global destructors. */
1459 void
1460 default_asm_out_destructor (rtx symbol ATTRIBUTE_UNUSED,
1461 int priority ATTRIBUTE_UNUSED)
1463 sorry ("global destructors not supported on this target");
1466 /* By default, do no modification. */
1467 tree default_mangle_decl_assembler_name (tree decl ATTRIBUTE_UNUSED,
1468 tree id)
1470 return id;
1473 /* The default implementation of TARGET_STATIC_RTX_ALIGNMENT. */
1475 HOST_WIDE_INT
1476 default_static_rtx_alignment (machine_mode mode)
1478 return GET_MODE_ALIGNMENT (mode);
1481 /* The default implementation of TARGET_CONSTANT_ALIGNMENT. */
1483 HOST_WIDE_INT
1484 default_constant_alignment (const_tree, HOST_WIDE_INT align)
1486 return align;
1489 /* An implementation of TARGET_CONSTANT_ALIGNMENT that aligns strings
1490 to at least BITS_PER_WORD but otherwise makes no changes. */
1492 HOST_WIDE_INT
1493 constant_alignment_word_strings (const_tree exp, HOST_WIDE_INT align)
1495 if (TREE_CODE (exp) == STRING_CST)
1496 return MAX (align, BITS_PER_WORD);
1497 return align;
1500 /* Default to natural alignment for vector types, bounded by
1501 MAX_OFILE_ALIGNMENT. */
1503 HOST_WIDE_INT
1504 default_vector_alignment (const_tree type)
1506 unsigned HOST_WIDE_INT align = MAX_OFILE_ALIGNMENT;
1507 tree size = TYPE_SIZE (type);
1508 if (tree_fits_uhwi_p (size))
1509 align = tree_to_uhwi (size);
1510 if (align >= MAX_OFILE_ALIGNMENT)
1511 return MAX_OFILE_ALIGNMENT;
1512 return MAX (align, GET_MODE_ALIGNMENT (TYPE_MODE (type)));
1515 /* The default implementation of
1516 TARGET_VECTORIZE_PREFERRED_VECTOR_ALIGNMENT. */
1518 poly_uint64
1519 default_preferred_vector_alignment (const_tree type)
1521 return TYPE_ALIGN (type);
1524 /* The default implementation of
1525 TARGET_VECTORIZE_PREFERRED_DIV_AS_SHIFTS_OVER_MULT. */
1527 bool
1528 default_preferred_div_as_shifts_over_mult (const_tree type)
1530 return !can_mult_highpart_p (TYPE_MODE (type), TYPE_UNSIGNED (type));
1533 /* By default assume vectors of element TYPE require a multiple of the natural
1534 alignment of TYPE. TYPE is naturally aligned if IS_PACKED is false. */
1535 bool
1536 default_builtin_vector_alignment_reachable (const_tree /*type*/, bool is_packed)
1538 return ! is_packed;
1541 /* By default, assume that a target supports any factor of misalignment
1542 memory access if it supports movmisalign patten.
1543 is_packed is true if the memory access is defined in a packed struct. */
1544 bool
1545 default_builtin_support_vector_misalignment (machine_mode mode,
1546 const_tree type
1547 ATTRIBUTE_UNUSED,
1548 int misalignment
1549 ATTRIBUTE_UNUSED,
1550 bool is_packed
1551 ATTRIBUTE_UNUSED)
1553 if (optab_handler (movmisalign_optab, mode) != CODE_FOR_nothing)
1554 return true;
1555 return false;
1558 /* By default, only attempt to parallelize bitwise operations, and
1559 possibly adds/subtracts using bit-twiddling. */
1561 machine_mode
1562 default_preferred_simd_mode (scalar_mode)
1564 return word_mode;
1567 /* By default do not split reductions further. */
1569 machine_mode
1570 default_split_reduction (machine_mode mode)
1572 return mode;
1575 /* By default only the preferred vector mode is tried. */
1577 unsigned int
1578 default_autovectorize_vector_modes (vector_modes *, bool)
1580 return 0;
1583 /* The default implementation of TARGET_VECTORIZE_RELATED_MODE. */
1585 opt_machine_mode
1586 default_vectorize_related_mode (machine_mode vector_mode,
1587 scalar_mode element_mode,
1588 poly_uint64 nunits)
1590 machine_mode result_mode;
1591 if ((maybe_ne (nunits, 0U)
1592 || multiple_p (GET_MODE_SIZE (vector_mode),
1593 GET_MODE_SIZE (element_mode), &nunits))
1594 && mode_for_vector (element_mode, nunits).exists (&result_mode)
1595 && VECTOR_MODE_P (result_mode)
1596 && targetm.vector_mode_supported_p (result_mode))
1597 return result_mode;
1599 return opt_machine_mode ();
1602 /* By default a vector of integers is used as a mask. */
1604 opt_machine_mode
1605 default_get_mask_mode (machine_mode mode)
1607 return related_int_vector_mode (mode);
1610 /* By default consider masked stores to be expensive. */
1612 bool
1613 default_conditional_operation_is_expensive (unsigned ifn)
1615 return ifn == IFN_MASK_STORE;
1618 /* By default consider masked stores to be expensive. */
1620 bool
1621 default_empty_mask_is_expensive (unsigned ifn)
1623 return ifn == IFN_MASK_STORE;
1626 /* By default, the cost model accumulates three separate costs (prologue,
1627 loop body, and epilogue) for a vectorized loop or block. So allocate an
1628 array of three unsigned ints, set it to zero, and return its address. */
1630 vector_costs *
1631 default_vectorize_create_costs (vec_info *vinfo, bool costing_for_scalar)
1633 return new vector_costs (vinfo, costing_for_scalar);
1636 /* Determine whether or not a pointer mode is valid. Assume defaults
1637 of ptr_mode or Pmode - can be overridden. */
1638 bool
1639 default_valid_pointer_mode (scalar_int_mode mode)
1641 return (mode == ptr_mode || mode == Pmode);
1644 /* Determine whether the memory reference specified by REF may alias
1645 the C libraries errno location. */
1646 bool
1647 default_ref_may_alias_errno (ao_ref *ref)
1649 tree base = ao_ref_base (ref);
1650 /* The default implementation assumes the errno location is
1651 a declaration of type int or is always accessed via a
1652 pointer to int. We assume that accesses to errno are
1653 not deliberately obfuscated (even in conforming ways). */
1654 if (TYPE_UNSIGNED (TREE_TYPE (base))
1655 || TYPE_MODE (TREE_TYPE (base)) != TYPE_MODE (integer_type_node))
1656 return false;
1657 /* The default implementation assumes an errno location declaration
1658 is never defined in the current compilation unit and may not be
1659 aliased by a local variable. */
1660 if (DECL_P (base)
1661 && DECL_EXTERNAL (base)
1662 && !TREE_STATIC (base))
1663 return true;
1664 else if (TREE_CODE (base) == MEM_REF
1665 && TREE_CODE (TREE_OPERAND (base, 0)) == SSA_NAME)
1667 struct ptr_info_def *pi = SSA_NAME_PTR_INFO (TREE_OPERAND (base, 0));
1668 return !pi || pi->pt.anything || pi->pt.nonlocal;
1670 return false;
1673 /* Return the mode for a pointer to a given ADDRSPACE,
1674 defaulting to ptr_mode for all address spaces. */
1676 scalar_int_mode
1677 default_addr_space_pointer_mode (addr_space_t addrspace ATTRIBUTE_UNUSED)
1679 return ptr_mode;
1682 /* Return the mode for an address in a given ADDRSPACE,
1683 defaulting to Pmode for all address spaces. */
1685 scalar_int_mode
1686 default_addr_space_address_mode (addr_space_t addrspace ATTRIBUTE_UNUSED)
1688 return Pmode;
1691 /* Named address space version of valid_pointer_mode.
1692 To match the above, the same modes apply to all address spaces. */
1694 bool
1695 default_addr_space_valid_pointer_mode (scalar_int_mode mode,
1696 addr_space_t as ATTRIBUTE_UNUSED)
1698 return targetm.valid_pointer_mode (mode);
1701 /* Some places still assume that all pointer or address modes are the
1702 standard Pmode and ptr_mode. These optimizations become invalid if
1703 the target actually supports multiple different modes. For now,
1704 we disable such optimizations on such targets, using this function. */
1706 bool
1707 target_default_pointer_address_modes_p (void)
1709 if (targetm.addr_space.address_mode != default_addr_space_address_mode)
1710 return false;
1711 if (targetm.addr_space.pointer_mode != default_addr_space_pointer_mode)
1712 return false;
1714 return true;
1717 /* Named address space version of legitimate_address_p.
1718 By default, all address spaces have the same form. */
1720 bool
1721 default_addr_space_legitimate_address_p (machine_mode mode, rtx mem,
1722 bool strict,
1723 addr_space_t as ATTRIBUTE_UNUSED,
1724 code_helper code)
1726 return targetm.legitimate_address_p (mode, mem, strict, code);
1729 /* Named address space version of LEGITIMIZE_ADDRESS.
1730 By default, all address spaces have the same form. */
1733 default_addr_space_legitimize_address (rtx x, rtx oldx, machine_mode mode,
1734 addr_space_t as ATTRIBUTE_UNUSED)
1736 return targetm.legitimize_address (x, oldx, mode);
1739 /* The default hook for determining if one named address space is a subset of
1740 another and to return which address space to use as the common address
1741 space. */
1743 bool
1744 default_addr_space_subset_p (addr_space_t subset, addr_space_t superset)
1746 return (subset == superset);
1749 /* The default hook for determining if 0 within a named address
1750 space is a valid address. */
1752 bool
1753 default_addr_space_zero_address_valid (addr_space_t as ATTRIBUTE_UNUSED)
1755 return false;
1758 /* The default hook for debugging the address space is to return the
1759 address space number to indicate DW_AT_address_class. */
1761 default_addr_space_debug (addr_space_t as)
1763 return as;
1766 /* The default hook implementation for TARGET_ADDR_SPACE_DIAGNOSE_USAGE.
1767 Don't complain about any address space. */
1769 void
1770 default_addr_space_diagnose_usage (addr_space_t, location_t)
1775 /* The default hook for TARGET_ADDR_SPACE_CONVERT. This hook should never be
1776 called for targets with only a generic address space. */
1779 default_addr_space_convert (rtx op ATTRIBUTE_UNUSED,
1780 tree from_type ATTRIBUTE_UNUSED,
1781 tree to_type ATTRIBUTE_UNUSED)
1783 gcc_unreachable ();
1786 /* The defualt implementation of TARGET_HARD_REGNO_NREGS. */
1788 unsigned int
1789 default_hard_regno_nregs (unsigned int, machine_mode mode)
1791 /* Targets with variable-sized modes must provide their own definition
1792 of this hook. */
1793 return CEIL (GET_MODE_SIZE (mode).to_constant (), UNITS_PER_WORD);
1796 bool
1797 default_hard_regno_scratch_ok (unsigned int regno ATTRIBUTE_UNUSED)
1799 return true;
1802 /* The default implementation of TARGET_MODE_DEPENDENT_ADDRESS_P. */
1804 bool
1805 default_mode_dependent_address_p (const_rtx addr ATTRIBUTE_UNUSED,
1806 addr_space_t addrspace ATTRIBUTE_UNUSED)
1808 return false;
1811 extern bool default_new_address_profitable_p (rtx, rtx);
1814 /* The default implementation of TARGET_NEW_ADDRESS_PROFITABLE_P. */
1816 bool
1817 default_new_address_profitable_p (rtx memref ATTRIBUTE_UNUSED,
1818 rtx_insn *insn ATTRIBUTE_UNUSED,
1819 rtx new_addr ATTRIBUTE_UNUSED)
1821 return true;
1824 bool
1825 default_target_option_valid_attribute_p (tree ARG_UNUSED (fndecl),
1826 tree ARG_UNUSED (name),
1827 tree ARG_UNUSED (args),
1828 int ARG_UNUSED (flags))
1830 warning (OPT_Wattributes,
1831 "%<target%> attribute is not supported on this machine");
1833 return false;
1836 bool
1837 default_target_option_valid_version_attribute_p (tree ARG_UNUSED (fndecl),
1838 tree ARG_UNUSED (name),
1839 tree ARG_UNUSED (args),
1840 int ARG_UNUSED (flags))
1842 warning (OPT_Wattributes,
1843 "%<target_version%> attribute is not supported on this machine");
1845 return false;
1848 bool
1849 default_target_option_pragma_parse (tree ARG_UNUSED (args),
1850 tree ARG_UNUSED (pop_target))
1852 /* If args is NULL the caller is handle_pragma_pop_options (). In that case,
1853 emit no warning because "#pragma GCC pop_target" is valid on targets that
1854 do not have the "target" pragma. */
1855 if (args)
1856 warning (OPT_Wpragmas,
1857 "%<#pragma GCC target%> is not supported for this machine");
1859 return false;
1862 bool
1863 default_target_can_inline_p (tree caller, tree callee)
1865 tree callee_opts = DECL_FUNCTION_SPECIFIC_TARGET (callee);
1866 tree caller_opts = DECL_FUNCTION_SPECIFIC_TARGET (caller);
1867 if (! callee_opts)
1868 callee_opts = target_option_default_node;
1869 if (! caller_opts)
1870 caller_opts = target_option_default_node;
1872 /* If both caller and callee have attributes, assume that if the
1873 pointer is different, the two functions have different target
1874 options since build_target_option_node uses a hash table for the
1875 options. */
1876 return callee_opts == caller_opts;
1879 /* By default, return false to not need to collect any target information
1880 for inlining. Target maintainer should re-define the hook if the
1881 target want to take advantage of it. */
1883 bool
1884 default_need_ipa_fn_target_info (const_tree, unsigned int &)
1886 return false;
1889 bool
1890 default_update_ipa_fn_target_info (unsigned int &, const gimple *)
1892 return false;
1895 /* If the machine does not have a case insn that compares the bounds,
1896 this means extra overhead for dispatch tables, which raises the
1897 threshold for using them. */
1899 unsigned int
1900 default_case_values_threshold (void)
1902 return (targetm.have_casesi () ? 4 : 5);
1905 bool
1906 default_have_conditional_execution (void)
1908 return HAVE_conditional_execution;
1911 bool
1912 default_have_ccmp (void)
1914 return targetm.gen_ccmp_first != NULL;
1917 /* By default we assume that c99 functions are present at the runtime,
1918 but sincos is not. */
1919 bool
1920 default_libc_has_function (enum function_class fn_class,
1921 tree type ATTRIBUTE_UNUSED)
1923 if (fn_class == function_c94
1924 || fn_class == function_c99_misc
1925 || fn_class == function_c99_math_complex)
1926 return true;
1928 return false;
1931 /* By default assume that libc has not a fast implementation. */
1933 bool
1934 default_libc_has_fast_function (int fcode ATTRIBUTE_UNUSED)
1936 return false;
1939 bool
1940 gnu_libc_has_function (enum function_class fn_class ATTRIBUTE_UNUSED,
1941 tree type ATTRIBUTE_UNUSED)
1943 return true;
1946 bool
1947 no_c99_libc_has_function (enum function_class fn_class ATTRIBUTE_UNUSED,
1948 tree type ATTRIBUTE_UNUSED)
1950 return false;
1953 /* Assume some c99 functions are present at the runtime including sincos. */
1954 bool
1955 bsd_libc_has_function (enum function_class fn_class,
1956 tree type ATTRIBUTE_UNUSED)
1958 if (fn_class == function_c94
1959 || fn_class == function_c99_misc
1960 || fn_class == function_sincos)
1961 return true;
1963 return false;
1966 /* By default, -fhardened will add -D_FORTIFY_SOURCE=2. */
1968 unsigned
1969 default_fortify_source_default_level ()
1971 return 2;
1974 unsigned
1975 default_libm_function_max_error (unsigned, machine_mode, bool)
1977 return ~0U;
1980 unsigned
1981 glibc_linux_libm_function_max_error (unsigned cfn, machine_mode mode,
1982 bool boundary_p)
1984 /* Let's use
1985 https://www.gnu.org/software/libc/manual/2.22/html_node/Errors-in-Math-Functions.html
1986 https://www.gnu.org/software/libc/manual/html_node/Errors-in-Math-Functions.html
1987 with usual values recorded here and significant outliers handled in
1988 target CPU specific overriders. The tables only record default
1989 rounding to nearest, for -frounding-math let's add some extra ulps.
1990 For boundary_p values (say finite results outside of [-1.,1.] for
1991 sin/cos, or [-0.,+Inf] for sqrt etc. let's use custom random testers. */
1992 int rnd = flag_rounding_math ? 4 : 0;
1993 bool sf = (REAL_MODE_FORMAT (mode) == &ieee_single_format
1994 || REAL_MODE_FORMAT (mode) == &mips_single_format
1995 || REAL_MODE_FORMAT (mode) == &motorola_single_format);
1996 bool df = (REAL_MODE_FORMAT (mode) == &ieee_double_format
1997 || REAL_MODE_FORMAT (mode) == &mips_double_format
1998 || REAL_MODE_FORMAT (mode) == &motorola_double_format);
1999 bool xf = (REAL_MODE_FORMAT (mode) == &ieee_extended_intel_96_format
2000 || REAL_MODE_FORMAT (mode) == &ieee_extended_intel_128_format
2001 || REAL_MODE_FORMAT (mode) == &ieee_extended_motorola_format);
2002 bool tf = (REAL_MODE_FORMAT (mode) == &ieee_quad_format
2003 || REAL_MODE_FORMAT (mode) == &mips_quad_format);
2005 switch (cfn)
2007 CASE_CFN_SQRT:
2008 CASE_CFN_SQRT_FN:
2009 if (boundary_p)
2010 /* https://gcc.gnu.org/pipermail/gcc-patches/2023-April/616595.html */
2011 return 0;
2012 if (sf || df || xf || tf)
2013 return 0 + rnd;
2014 break;
2015 CASE_CFN_COS:
2016 CASE_CFN_COS_FN:
2017 /* cos is generally errors like sin, but far more arches have 2ulps
2018 for double. */
2019 if (!boundary_p && df)
2020 return 2 + rnd;
2021 gcc_fallthrough ();
2022 CASE_CFN_SIN:
2023 CASE_CFN_SIN_FN:
2024 if (boundary_p)
2025 /* According to
2026 https://sourceware.org/pipermail/gcc-patches/2023-April/616315.html
2027 seems default rounding sin/cos stay strictly in [-1.,1.] range,
2028 with rounding to infinity it can be 1ulp larger/smaller. */
2029 return flag_rounding_math ? 1 : 0;
2030 if (sf || df)
2031 return 1 + rnd;
2032 if (xf || tf)
2033 return 2 + rnd;
2034 break;
2035 default:
2036 break;
2039 return default_libm_function_max_error (cfn, mode, boundary_p);
2042 tree
2043 default_builtin_tm_load_store (tree ARG_UNUSED (type))
2045 return NULL_TREE;
2048 /* Compute cost of moving registers to/from memory. */
2051 default_memory_move_cost (machine_mode mode ATTRIBUTE_UNUSED,
2052 reg_class_t rclass ATTRIBUTE_UNUSED,
2053 bool in ATTRIBUTE_UNUSED)
2055 #ifndef MEMORY_MOVE_COST
2056 return (4 + memory_move_secondary_cost (mode, (enum reg_class) rclass, in));
2057 #else
2058 return MEMORY_MOVE_COST (MACRO_MODE (mode), (enum reg_class) rclass, in);
2059 #endif
2062 /* Compute cost of moving data from a register of class FROM to one of
2063 TO, using MODE. */
2066 default_register_move_cost (machine_mode mode ATTRIBUTE_UNUSED,
2067 reg_class_t from ATTRIBUTE_UNUSED,
2068 reg_class_t to ATTRIBUTE_UNUSED)
2070 #ifndef REGISTER_MOVE_COST
2071 return 2;
2072 #else
2073 return REGISTER_MOVE_COST (MACRO_MODE (mode),
2074 (enum reg_class) from, (enum reg_class) to);
2075 #endif
2078 /* The default implementation of TARGET_SLOW_UNALIGNED_ACCESS. */
2080 bool
2081 default_slow_unaligned_access (machine_mode, unsigned int)
2083 return STRICT_ALIGNMENT;
2086 /* The default implementation of TARGET_ESTIMATED_POLY_VALUE. */
2088 HOST_WIDE_INT
2089 default_estimated_poly_value (poly_int64 x, poly_value_estimate_kind)
2091 return x.coeffs[0];
2094 /* For hooks which use the MOVE_RATIO macro, this gives the legacy default
2095 behavior. SPEED_P is true if we are compiling for speed. */
2097 unsigned int
2098 get_move_ratio (bool speed_p ATTRIBUTE_UNUSED)
2100 unsigned int move_ratio;
2101 #ifdef MOVE_RATIO
2102 move_ratio = (unsigned int) MOVE_RATIO (speed_p);
2103 #else
2104 #if defined (HAVE_cpymemqi) || defined (HAVE_cpymemhi) || defined (HAVE_cpymemsi) || defined (HAVE_cpymemdi) || defined (HAVE_cpymemti)
2105 move_ratio = 2;
2106 #else /* No cpymem patterns, pick a default. */
2107 move_ratio = ((speed_p) ? 15 : 3);
2108 #endif
2109 #endif
2110 return move_ratio;
2113 /* Return TRUE if the move_by_pieces/set_by_pieces infrastructure should be
2114 used; return FALSE if the cpymem/setmem optab should be expanded, or
2115 a call to memcpy emitted. */
2117 bool
2118 default_use_by_pieces_infrastructure_p (unsigned HOST_WIDE_INT size,
2119 unsigned int alignment,
2120 enum by_pieces_operation op,
2121 bool speed_p)
2123 unsigned int max_size = 0;
2124 unsigned int ratio = 0;
2126 switch (op)
2128 case CLEAR_BY_PIECES:
2129 max_size = STORE_MAX_PIECES;
2130 ratio = CLEAR_RATIO (speed_p);
2131 break;
2132 case MOVE_BY_PIECES:
2133 max_size = MOVE_MAX_PIECES;
2134 ratio = get_move_ratio (speed_p);
2135 break;
2136 case SET_BY_PIECES:
2137 max_size = STORE_MAX_PIECES;
2138 ratio = SET_RATIO (speed_p);
2139 break;
2140 case STORE_BY_PIECES:
2141 max_size = STORE_MAX_PIECES;
2142 ratio = get_move_ratio (speed_p);
2143 break;
2144 case COMPARE_BY_PIECES:
2145 max_size = COMPARE_MAX_PIECES;
2146 /* Pick a likely default, just as in get_move_ratio. */
2147 ratio = speed_p ? 15 : 3;
2148 break;
2151 return by_pieces_ninsns (size, alignment, max_size + 1, op) < ratio;
2154 /* This hook controls code generation for expanding a memcmp operation by
2155 pieces. Return 1 for the normal pattern of compare/jump after each pair
2156 of loads, or a higher number to reduce the number of branches. */
2159 default_compare_by_pieces_branch_ratio (machine_mode)
2161 return 1;
2164 /* Write PATCH_AREA_SIZE NOPs into the asm outfile FILE around a function
2165 entry. If RECORD_P is true and the target supports named sections,
2166 the location of the NOPs will be recorded in a special object section
2167 called "__patchable_function_entries". This routine may be called
2168 twice per function to put NOPs before and after the function
2169 entry. */
2171 void
2172 default_print_patchable_function_entry (FILE *file,
2173 unsigned HOST_WIDE_INT patch_area_size,
2174 bool record_p)
2176 const char *nop_templ = 0;
2177 int code_num;
2178 rtx_insn *my_nop = make_insn_raw (gen_nop ());
2180 /* We use the template alone, relying on the (currently sane) assumption
2181 that the NOP template does not have variable operands. */
2182 code_num = recog_memoized (my_nop);
2183 nop_templ = get_insn_template (code_num, my_nop);
2185 if (record_p && targetm_common.have_named_sections)
2187 char buf[256];
2188 section *previous_section = in_section;
2189 const char *asm_op = integer_asm_op (POINTER_SIZE_UNITS, false);
2191 gcc_assert (asm_op != NULL);
2192 /* If SECTION_LINK_ORDER is supported, this internal label will
2193 be filled as the symbol for linked_to section. */
2194 ASM_GENERATE_INTERNAL_LABEL (buf, "LPFE", current_function_funcdef_no);
2196 unsigned int flags = SECTION_WRITE | SECTION_RELRO;
2197 if (HAVE_GAS_SECTION_LINK_ORDER)
2198 flags |= SECTION_LINK_ORDER;
2200 section *sect = get_section ("__patchable_function_entries",
2201 flags, current_function_decl);
2202 if (HAVE_COMDAT_GROUP && DECL_COMDAT_GROUP (current_function_decl))
2203 switch_to_comdat_section (sect, current_function_decl);
2204 else
2205 switch_to_section (sect);
2206 assemble_align (POINTER_SIZE);
2207 fputs (asm_op, file);
2208 assemble_name_raw (file, buf);
2209 fputc ('\n', file);
2211 switch_to_section (previous_section);
2212 ASM_OUTPUT_LABEL (file, buf);
2215 unsigned i;
2216 for (i = 0; i < patch_area_size; ++i)
2217 output_asm_insn (nop_templ, NULL);
2220 bool
2221 default_profile_before_prologue (void)
2223 #ifdef PROFILE_BEFORE_PROLOGUE
2224 return true;
2225 #else
2226 return false;
2227 #endif
2230 /* The default implementation of TARGET_PREFERRED_RELOAD_CLASS. */
2232 reg_class_t
2233 default_preferred_reload_class (rtx x ATTRIBUTE_UNUSED,
2234 reg_class_t rclass)
2236 #ifdef PREFERRED_RELOAD_CLASS
2237 return (reg_class_t) PREFERRED_RELOAD_CLASS (x, (enum reg_class) rclass);
2238 #else
2239 return rclass;
2240 #endif
2243 /* The default implementation of TARGET_OUTPUT_PREFERRED_RELOAD_CLASS. */
2245 reg_class_t
2246 default_preferred_output_reload_class (rtx x ATTRIBUTE_UNUSED,
2247 reg_class_t rclass)
2249 return rclass;
2252 /* The default implementation of TARGET_PREFERRED_RENAME_CLASS. */
2253 reg_class_t
2254 default_preferred_rename_class (reg_class_t rclass ATTRIBUTE_UNUSED)
2256 return NO_REGS;
2259 /* The default implementation of TARGET_CLASS_LIKELY_SPILLED_P. */
2261 bool
2262 default_class_likely_spilled_p (reg_class_t rclass)
2264 return (reg_class_size[(int) rclass] == 1);
2267 /* The default implementation of TARGET_CLASS_MAX_NREGS. */
2269 unsigned char
2270 default_class_max_nregs (reg_class_t rclass ATTRIBUTE_UNUSED,
2271 machine_mode mode ATTRIBUTE_UNUSED)
2273 #ifdef CLASS_MAX_NREGS
2274 return (unsigned char) CLASS_MAX_NREGS ((enum reg_class) rclass,
2275 MACRO_MODE (mode));
2276 #else
2277 /* Targets with variable-sized modes must provide their own definition
2278 of this hook. */
2279 unsigned int size = GET_MODE_SIZE (mode).to_constant ();
2280 return (size + UNITS_PER_WORD - 1) / UNITS_PER_WORD;
2281 #endif
2284 /* Determine the debugging unwind mechanism for the target. */
2286 enum unwind_info_type
2287 default_debug_unwind_info (void)
2289 /* If the target wants to force the use of dwarf2 unwind info, let it. */
2290 /* ??? Change all users to the hook, then poison this. */
2291 #ifdef DWARF2_FRAME_INFO
2292 if (DWARF2_FRAME_INFO)
2293 return UI_DWARF2;
2294 #endif
2296 /* Otherwise, only turn it on if dwarf2 debugging is enabled. */
2297 #ifdef DWARF2_DEBUGGING_INFO
2298 if (dwarf_debuginfo_p ())
2299 return UI_DWARF2;
2300 #endif
2302 return UI_NONE;
2305 /* Targets that set NUM_POLY_INT_COEFFS to something greater than 1
2306 must define this hook. */
2308 unsigned int
2309 default_dwarf_poly_indeterminate_value (unsigned int, unsigned int *, int *)
2311 gcc_unreachable ();
2314 /* Determine the correct mode for a Dwarf frame register that represents
2315 register REGNO. */
2317 machine_mode
2318 default_dwarf_frame_reg_mode (int regno)
2320 machine_mode save_mode = reg_raw_mode[regno];
2322 if (targetm.hard_regno_call_part_clobbered (eh_edge_abi.id (),
2323 regno, save_mode))
2324 save_mode = choose_hard_reg_mode (regno, 1, &eh_edge_abi);
2325 return save_mode;
2328 /* To be used by targets where reg_raw_mode doesn't return the right
2329 mode for registers used in apply_builtin_return and apply_builtin_arg. */
2331 fixed_size_mode
2332 default_get_reg_raw_mode (int regno)
2334 /* Targets must override this hook if the underlying register is
2335 variable-sized. */
2336 return as_a <fixed_size_mode> (reg_raw_mode[regno]);
2339 /* Return true if a leaf function should stay leaf even with profiling
2340 enabled. */
2342 bool
2343 default_keep_leaf_when_profiled ()
2345 return false;
2348 /* Return true if the state of option OPTION should be stored in PCH files
2349 and checked by default_pch_valid_p. Store the option's current state
2350 in STATE if so. */
2352 static inline bool
2353 option_affects_pch_p (int option, struct cl_option_state *state)
2355 if ((cl_options[option].flags & CL_TARGET) == 0)
2356 return false;
2357 if ((cl_options[option].flags & CL_PCH_IGNORE) != 0)
2358 return false;
2359 if (option_flag_var (option, &global_options) == &target_flags)
2360 if (targetm.check_pch_target_flags)
2361 return false;
2362 return get_option_state (&global_options, option, state);
2365 /* Default version of get_pch_validity.
2366 By default, every flag difference is fatal; that will be mostly right for
2367 most targets, but completely right for very few. */
2369 void *
2370 default_get_pch_validity (size_t *sz)
2372 struct cl_option_state state;
2373 size_t i;
2374 char *result, *r;
2376 *sz = 2;
2377 if (targetm.check_pch_target_flags)
2378 *sz += sizeof (target_flags);
2379 for (i = 0; i < cl_options_count; i++)
2380 if (option_affects_pch_p (i, &state))
2381 *sz += state.size;
2383 result = r = XNEWVEC (char, *sz);
2384 r[0] = flag_pic;
2385 r[1] = flag_pie;
2386 r += 2;
2387 if (targetm.check_pch_target_flags)
2389 memcpy (r, &target_flags, sizeof (target_flags));
2390 r += sizeof (target_flags);
2393 for (i = 0; i < cl_options_count; i++)
2394 if (option_affects_pch_p (i, &state))
2396 memcpy (r, state.data, state.size);
2397 r += state.size;
2400 return result;
2403 /* Return a message which says that a PCH file was created with a different
2404 setting of OPTION. */
2406 static const char *
2407 pch_option_mismatch (const char *option)
2409 return xasprintf (_("created and used with differing settings of '%s'"),
2410 option);
2413 /* Default version of pch_valid_p. */
2415 const char *
2416 default_pch_valid_p (const void *data_p, size_t len ATTRIBUTE_UNUSED)
2418 struct cl_option_state state;
2419 const char *data = (const char *)data_p;
2420 size_t i;
2422 /* -fpic and -fpie also usually make a PCH invalid. */
2423 if (data[0] != flag_pic)
2424 return _("created and used with different settings of %<-fpic%>");
2425 if (data[1] != flag_pie)
2426 return _("created and used with different settings of %<-fpie%>");
2427 data += 2;
2429 /* Check target_flags. */
2430 if (targetm.check_pch_target_flags)
2432 int tf;
2433 const char *r;
2435 memcpy (&tf, data, sizeof (target_flags));
2436 data += sizeof (target_flags);
2437 r = targetm.check_pch_target_flags (tf);
2438 if (r != NULL)
2439 return r;
2442 for (i = 0; i < cl_options_count; i++)
2443 if (option_affects_pch_p (i, &state))
2445 if (memcmp (data, state.data, state.size) != 0)
2446 return pch_option_mismatch (cl_options[i].opt_text);
2447 data += state.size;
2450 return NULL;
2453 /* Default version of cstore_mode. */
2455 scalar_int_mode
2456 default_cstore_mode (enum insn_code icode)
2458 return as_a <scalar_int_mode> (insn_data[(int) icode].operand[0].mode);
2461 /* Default version of member_type_forces_blk. */
2463 bool
2464 default_member_type_forces_blk (const_tree, machine_mode)
2466 return false;
2469 /* Default version of canonicalize_comparison. */
2471 void
2472 default_canonicalize_comparison (int *, rtx *, rtx *, bool)
2476 /* Default implementation of TARGET_ATOMIC_ASSIGN_EXPAND_FENV. */
2478 void
2479 default_atomic_assign_expand_fenv (tree *, tree *, tree *)
2483 #ifndef PAD_VARARGS_DOWN
2484 #define PAD_VARARGS_DOWN BYTES_BIG_ENDIAN
2485 #endif
2487 /* Build an indirect-ref expression over the given TREE, which represents a
2488 piece of a va_arg() expansion. */
2489 tree
2490 build_va_arg_indirect_ref (tree addr)
2492 addr = build_simple_mem_ref_loc (EXPR_LOCATION (addr), addr);
2493 return addr;
2496 /* The "standard" implementation of va_arg: read the value from the
2497 current (padded) address and increment by the (padded) size. */
2499 tree
2500 std_gimplify_va_arg_expr (tree valist, tree type, gimple_seq *pre_p,
2501 gimple_seq *post_p)
2503 tree addr, t, type_size, rounded_size, valist_tmp;
2504 unsigned HOST_WIDE_INT align, boundary;
2505 bool indirect;
2507 /* All of the alignment and movement below is for args-grow-up machines.
2508 As of 2004, there are only 3 ARGS_GROW_DOWNWARD targets, and they all
2509 implement their own specialized gimplify_va_arg_expr routines. */
2510 if (ARGS_GROW_DOWNWARD)
2511 gcc_unreachable ();
2513 indirect = pass_va_arg_by_reference (type);
2514 if (indirect)
2515 type = build_pointer_type (type);
2517 if (targetm.calls.split_complex_arg
2518 && TREE_CODE (type) == COMPLEX_TYPE
2519 && targetm.calls.split_complex_arg (type))
2521 tree real_part, imag_part;
2523 real_part = std_gimplify_va_arg_expr (valist,
2524 TREE_TYPE (type), pre_p, NULL);
2525 real_part = get_initialized_tmp_var (real_part, pre_p);
2527 imag_part = std_gimplify_va_arg_expr (unshare_expr (valist),
2528 TREE_TYPE (type), pre_p, NULL);
2529 imag_part = get_initialized_tmp_var (imag_part, pre_p);
2531 return build2 (COMPLEX_EXPR, type, real_part, imag_part);
2534 align = PARM_BOUNDARY / BITS_PER_UNIT;
2535 boundary = targetm.calls.function_arg_boundary (TYPE_MODE (type), type);
2537 /* When we align parameter on stack for caller, if the parameter
2538 alignment is beyond MAX_SUPPORTED_STACK_ALIGNMENT, it will be
2539 aligned at MAX_SUPPORTED_STACK_ALIGNMENT. We will match callee
2540 here with caller. */
2541 if (boundary > MAX_SUPPORTED_STACK_ALIGNMENT)
2542 boundary = MAX_SUPPORTED_STACK_ALIGNMENT;
2544 boundary /= BITS_PER_UNIT;
2546 /* Hoist the valist value into a temporary for the moment. */
2547 valist_tmp = get_initialized_tmp_var (valist, pre_p);
2549 /* va_list pointer is aligned to PARM_BOUNDARY. If argument actually
2550 requires greater alignment, we must perform dynamic alignment. */
2551 if (boundary > align
2552 && !TYPE_EMPTY_P (type)
2553 && !integer_zerop (TYPE_SIZE (type)))
2555 t = build2 (MODIFY_EXPR, TREE_TYPE (valist), valist_tmp,
2556 fold_build_pointer_plus_hwi (valist_tmp, boundary - 1));
2557 gimplify_and_add (t, pre_p);
2559 t = build2 (MODIFY_EXPR, TREE_TYPE (valist), valist_tmp,
2560 fold_build2 (BIT_AND_EXPR, TREE_TYPE (valist),
2561 valist_tmp,
2562 build_int_cst (TREE_TYPE (valist), -boundary)));
2563 gimplify_and_add (t, pre_p);
2565 else
2566 boundary = align;
2568 /* If the actual alignment is less than the alignment of the type,
2569 adjust the type accordingly so that we don't assume strict alignment
2570 when dereferencing the pointer. */
2571 boundary *= BITS_PER_UNIT;
2572 if (boundary < TYPE_ALIGN (type))
2574 type = build_variant_type_copy (type);
2575 SET_TYPE_ALIGN (type, boundary);
2578 /* Compute the rounded size of the type. */
2579 type_size = arg_size_in_bytes (type);
2580 rounded_size = round_up (type_size, align);
2582 /* Reduce rounded_size so it's sharable with the postqueue. */
2583 gimplify_expr (&rounded_size, pre_p, post_p, is_gimple_val, fb_rvalue);
2585 /* Get AP. */
2586 addr = valist_tmp;
2587 if (PAD_VARARGS_DOWN && !integer_zerop (rounded_size))
2589 /* Small args are padded downward. */
2590 t = fold_build2_loc (input_location, GT_EXPR, sizetype,
2591 rounded_size, size_int (align));
2592 t = fold_build3 (COND_EXPR, sizetype, t, size_zero_node,
2593 size_binop (MINUS_EXPR, rounded_size, type_size));
2594 addr = fold_build_pointer_plus (addr, t);
2597 /* Compute new value for AP. */
2598 t = fold_build_pointer_plus (valist_tmp, rounded_size);
2599 t = build2 (MODIFY_EXPR, TREE_TYPE (valist), valist, t);
2600 gimplify_and_add (t, pre_p);
2602 addr = fold_convert (build_pointer_type (type), addr);
2604 if (indirect)
2605 addr = build_va_arg_indirect_ref (addr);
2607 return build_va_arg_indirect_ref (addr);
2610 /* An implementation of TARGET_CAN_USE_DOLOOP_P for targets that do
2611 not support nested low-overhead loops. */
2613 bool
2614 can_use_doloop_if_innermost (const widest_int &, const widest_int &,
2615 unsigned int loop_depth, bool)
2617 return loop_depth == 1;
2620 /* Default implementation of TARGET_OPTAB_SUPPORTED_P. */
2622 bool
2623 default_optab_supported_p (int, machine_mode, machine_mode, optimization_type)
2625 return true;
2628 /* Default implementation of TARGET_MAX_NOCE_IFCVT_SEQ_COST. */
2630 unsigned int
2631 default_max_noce_ifcvt_seq_cost (edge e)
2633 bool predictable_p = predictable_edge_p (e);
2635 if (predictable_p)
2637 if (OPTION_SET_P (param_max_rtl_if_conversion_predictable_cost))
2638 return param_max_rtl_if_conversion_predictable_cost;
2640 else
2642 if (OPTION_SET_P (param_max_rtl_if_conversion_unpredictable_cost))
2643 return param_max_rtl_if_conversion_unpredictable_cost;
2646 return BRANCH_COST (true, predictable_p) * COSTS_N_INSNS (3);
2649 /* Default implementation of TARGET_MIN_ARITHMETIC_PRECISION. */
2651 unsigned int
2652 default_min_arithmetic_precision (void)
2654 return WORD_REGISTER_OPERATIONS ? BITS_PER_WORD : BITS_PER_UNIT;
2657 /* Default implementation of TARGET_C_EXCESS_PRECISION. */
2659 enum flt_eval_method
2660 default_excess_precision (enum excess_precision_type ATTRIBUTE_UNUSED)
2662 return FLT_EVAL_METHOD_PROMOTE_TO_FLOAT;
2665 /* Return true if _BitInt(N) is supported and fill details about it into
2666 *INFO. */
2667 bool
2668 default_bitint_type_info (int, struct bitint_info *)
2670 return false;
2673 /* Default implementation for
2674 TARGET_STACK_CLASH_PROTECTION_ALLOCA_PROBE_RANGE. */
2675 HOST_WIDE_INT
2676 default_stack_clash_protection_alloca_probe_range (void)
2678 return 0;
2681 /* The default implementation of TARGET_EARLY_REMAT_MODES. */
2683 void
2684 default_select_early_remat_modes (sbitmap)
2688 /* The default implementation of TARGET_PREFERRED_ELSE_VALUE. */
2690 tree
2691 default_preferred_else_value (unsigned, tree type, unsigned, tree *)
2693 return build_zero_cst (type);
2696 /* Default implementation of TARGET_HAVE_SPECULATION_SAFE_VALUE. */
2697 bool
2698 default_have_speculation_safe_value (bool active ATTRIBUTE_UNUSED)
2700 #ifdef HAVE_speculation_barrier
2701 return active ? HAVE_speculation_barrier : true;
2702 #else
2703 return false;
2704 #endif
2706 /* Alternative implementation of TARGET_HAVE_SPECULATION_SAFE_VALUE
2707 that can be used on targets that never have speculative execution. */
2708 bool
2709 speculation_safe_value_not_needed (bool active)
2711 return !active;
2714 /* Default implementation of the speculation-safe-load builtin. This
2715 implementation simply copies val to result and generates a
2716 speculation_barrier insn, if such a pattern is defined. */
2718 default_speculation_safe_value (machine_mode mode ATTRIBUTE_UNUSED,
2719 rtx result, rtx val,
2720 rtx failval ATTRIBUTE_UNUSED)
2722 emit_move_insn (result, val);
2724 #ifdef HAVE_speculation_barrier
2725 /* Assume the target knows what it is doing: if it defines a
2726 speculation barrier, but it is not enabled, then assume that one
2727 isn't needed. */
2728 if (HAVE_speculation_barrier)
2729 emit_insn (gen_speculation_barrier ());
2730 #endif
2732 return result;
2735 /* How many bits to shift in order to access the tag bits.
2736 The default is to store the tag in the top 8 bits of a 64 bit pointer, hence
2737 shifting 56 bits will leave just the tag. */
2738 #define HWASAN_SHIFT (GET_MODE_PRECISION (Pmode) - 8)
2739 #define HWASAN_SHIFT_RTX GEN_INT (HWASAN_SHIFT)
2741 bool
2742 default_memtag_can_tag_addresses ()
2744 return false;
2747 uint8_t
2748 default_memtag_tag_size ()
2750 return 8;
2753 uint8_t
2754 default_memtag_granule_size ()
2756 return 16;
2759 /* The default implementation of TARGET_MEMTAG_INSERT_RANDOM_TAG. */
2761 default_memtag_insert_random_tag (rtx untagged, rtx target)
2763 gcc_assert (param_hwasan_instrument_stack);
2764 if (param_hwasan_random_frame_tag)
2766 rtx fn = init_one_libfunc ("__hwasan_generate_tag");
2767 rtx new_tag = emit_library_call_value (fn, NULL_RTX, LCT_NORMAL, QImode);
2768 return targetm.memtag.set_tag (untagged, new_tag, target);
2770 else
2772 /* NOTE: The kernel API does not have __hwasan_generate_tag exposed.
2773 In the future we may add the option emit random tags with inline
2774 instrumentation instead of function calls. This would be the same
2775 between the kernel and userland. */
2776 return untagged;
2780 /* The default implementation of TARGET_MEMTAG_ADD_TAG. */
2782 default_memtag_add_tag (rtx base, poly_int64 offset, uint8_t tag_offset)
2784 /* Need to look into what the most efficient code sequence is.
2785 This is a code sequence that would be emitted *many* times, so we
2786 want it as small as possible.
2788 There are two places where tag overflow is a question:
2789 - Tagging the shadow stack.
2790 (both tagging and untagging).
2791 - Tagging addressable pointers.
2793 We need to ensure both behaviors are the same (i.e. that the tag that
2794 ends up in a pointer after "overflowing" the tag bits with a tag addition
2795 is the same that ends up in the shadow space).
2797 The aim is that the behavior of tag addition should follow modulo
2798 wrapping in both instances.
2800 The libhwasan code doesn't have any path that increments a pointer's tag,
2801 which means it has no opinion on what happens when a tag increment
2802 overflows (and hence we can choose our own behavior). */
2804 offset += ((uint64_t)tag_offset << HWASAN_SHIFT);
2805 return plus_constant (Pmode, base, offset);
2808 /* The default implementation of TARGET_MEMTAG_SET_TAG. */
2810 default_memtag_set_tag (rtx untagged, rtx tag, rtx target)
2812 gcc_assert (GET_MODE (untagged) == Pmode && GET_MODE (tag) == QImode);
2813 tag = expand_simple_binop (Pmode, ASHIFT, tag, HWASAN_SHIFT_RTX, NULL_RTX,
2814 /* unsignedp = */1, OPTAB_WIDEN);
2815 rtx ret = expand_simple_binop (Pmode, IOR, untagged, tag, target,
2816 /* unsignedp = */1, OPTAB_DIRECT);
2817 gcc_assert (ret);
2818 return ret;
2821 /* The default implementation of TARGET_MEMTAG_EXTRACT_TAG. */
2823 default_memtag_extract_tag (rtx tagged_pointer, rtx target)
2825 rtx tag = expand_simple_binop (Pmode, LSHIFTRT, tagged_pointer,
2826 HWASAN_SHIFT_RTX, target,
2827 /* unsignedp = */0,
2828 OPTAB_DIRECT);
2829 rtx ret = gen_lowpart (QImode, tag);
2830 gcc_assert (ret);
2831 return ret;
2834 /* The default implementation of TARGET_MEMTAG_UNTAGGED_POINTER. */
2836 default_memtag_untagged_pointer (rtx tagged_pointer, rtx target)
2838 rtx tag_mask = gen_int_mode ((HOST_WIDE_INT_1U << HWASAN_SHIFT) - 1, Pmode);
2839 rtx untagged_base = expand_simple_binop (Pmode, AND, tagged_pointer,
2840 tag_mask, target, true,
2841 OPTAB_DIRECT);
2842 gcc_assert (untagged_base);
2843 return untagged_base;
2846 #include "gt-targhooks.h"