1 /* Support routines for Value Range Propagation (VRP).
2 Copyright (C) 2005-2025 Free Software Foundation, Inc.
3 Contributed by Diego Novillo <dnovillo@redhat.com>.
5 This file is part of GCC.
7 GCC is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 3, or (at your option)
12 GCC is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 GNU General Public License for more details.
17 You should have received a copy of the GNU General Public License
18 along with GCC; see the file COPYING3. If not see
19 <http://www.gnu.org/licenses/>. */
23 #include "coretypes.h"
24 #include "basic-block.h"
28 #include "dominance.h"
33 #include "tree-pass.h"
35 #include "gimple-pretty-print.h"
36 #include "fold-const.h"
38 #include "gimple-iterator.h"
40 #include "tree-ssa-loop-manip.h"
41 #include "tree-ssa-loop-niter.h"
42 #include "tree-into-ssa.h"
44 #include "tree-scalar-evolution.h"
45 #include "tree-ssa-propagate.h"
47 #include "vr-values.h"
48 #include "gimple-array-bounds.h"
49 #include "gimple-range.h"
50 #include "gimple-range-path.h"
51 #include "value-pointer-equiv.h"
52 #include "gimple-fold.h"
54 #include "tree-ssa-dce.h"
55 #include "alloc-pool.h"
57 #include "symbol-summary.h"
58 #include "ipa-utils.h"
63 #include "diagnostic-core.h"
65 // This class is utilized by VRP and ranger to remove __builtin_unreachable
66 // calls, and reflect any resulting global ranges.
68 // maybe_register() is called on condition statements , and if that
69 // matches the pattern of one branch being a builtin_unreachable, either check
70 // for early removal or register the resulting executable edge in a list.
72 // During early/non-final processing, we check to see if ALL exports from the
73 // block can be safely updated with a new global value. If they can, then
74 // we rewrite the condition and update those values immediately. Otherwise
75 // the unreachable condition is left in the IL until the final pass.
77 // During final processing, after all blocks have been registered,
78 // remove_and_update_globals() will
79 // - check all exports from registered blocks
80 // - ensure the cache entry of each export is set with the appropriate range
81 // - rewrite the conditions to take the executable edge
82 // - perform DCE on any feeding instructions to those rewritten conditions
84 // Then each of the immediate use chain of each export is walked, and a new
85 // global range created by unioning the ranges at all remaining use locations.
87 class remove_unreachable
{
89 remove_unreachable (range_query
&r
, bool all
) : m_ranger (r
), final_p (all
)
90 { m_list
.create (30); }
91 ~remove_unreachable () { m_list
.release (); }
92 void handle_early (gimple
*s
, edge e
);
93 void maybe_register (gimple
*s
);
95 bool remove_and_update_globals ();
96 vec
<std::pair
<int, int> > m_list
;
97 range_query
&m_ranger
;
101 // Check if block BB has a __builtin_unreachable () call on one arm, and
102 // register the executable edge if so.
105 remove_unreachable::maybe_register (gimple
*s
)
107 gcc_checking_assert (gimple_code (s
) == GIMPLE_COND
);
108 basic_block bb
= gimple_bb (s
);
110 edge e0
= EDGE_SUCC (bb
, 0);
111 basic_block bb0
= e0
->dest
;
112 bool un0
= EDGE_COUNT (bb0
->succs
) == 0
113 && gimple_seq_unreachable_p (bb_seq (bb0
));
114 edge e1
= EDGE_SUCC (bb
, 1);
115 basic_block bb1
= e1
->dest
;
116 bool un1
= EDGE_COUNT (bb1
->succs
) == 0
117 && gimple_seq_unreachable_p (bb_seq (bb1
));
119 // If the 2 blocks are not different, ignore.
123 // Constant expressions are ignored.
124 if (TREE_CODE (gimple_cond_lhs (s
)) != SSA_NAME
125 && TREE_CODE (gimple_cond_rhs (s
)) != SSA_NAME
)
128 edge e
= un0
? e1
: e0
;
133 m_list
.safe_push (std::make_pair (e
->src
->index
, e
->dest
->index
));
136 // Return true if all uses of NAME are dominated by block BB. 1 use
137 // is allowed in block BB, This is one we hope to remove.
141 // goto <bb 3>; [0.00%]
142 // Any additional use of _1 or _2 in this block invalidates early replacement.
145 fully_replaceable (tree name
, basic_block bb
)
148 imm_use_iterator iter
;
149 bool saw_in_bb
= false;
151 // If a name loads from memory, we may lose information used in
152 // commoning opportunities later. See tree-ssa/ssa-pre-34.c.
153 gimple
*def_stmt
= SSA_NAME_DEF_STMT (name
);
154 if (gimple_vuse (def_stmt
))
157 FOR_EACH_IMM_USE_FAST (use_p
, iter
, name
)
159 gimple
*use_stmt
= USE_STMT (use_p
);
160 // Ignore debug stmts and the branch.
161 if (is_gimple_debug (use_stmt
))
163 basic_block use_bb
= gimple_bb (use_stmt
);
164 // Only one use in the block allowed to avoid complicated cases.
172 else if (!dominated_by_p (CDI_DOMINATORS
, use_bb
, bb
))
178 // This routine is called to check builtin_unreachable calls during any
179 // time before final removal. The only way we can be sure it does not
180 // provide any additional information is to expect that we can update the
181 // global values of all exports from a block. This means the branch
182 // to the unreachable call must dominate all uses of those ssa-names, with
183 // the exception that there can be a single use in the block containing
184 // the branch. IF the name used in the branch is defined in the block, it may
185 // contain the name of something else that will be an export. And likewise
186 // that may also use another name that is an export etc.
188 // As long as there is only a single use, we can be sure that there are no other
189 // side effects (like being passed to a call, or stored to a global, etc.
190 // This means we will miss cases where there are 2 or more uses that have
191 // no interveneing statements that may had side effects, but it catches most
192 // of the caes we care about, and prevents expensive in depth analysis.
194 // Ranger will still reflect the proper ranges at other places in these missed
195 // cases, we simply will not remove/set globals early.
198 remove_unreachable::handle_early (gimple
*s
, edge e
)
200 // If there is no gori_ssa, there is no early processsing.
201 if (!m_ranger
.gori_ssa ())
203 bool lhs_p
= TREE_CODE (gimple_cond_lhs (s
)) == SSA_NAME
;
204 bool rhs_p
= TREE_CODE (gimple_cond_rhs (s
)) == SSA_NAME
;
205 // Do not remove __builtin_unreachable if it confers a relation, or
206 // that relation may be lost in subsequent passes.
209 // Do not remove addresses early. ie if (x == &y)
210 if (lhs_p
&& TREE_CODE (gimple_cond_rhs (s
)) == ADDR_EXPR
)
213 gcc_checking_assert (gimple_outgoing_range_stmt_p (e
->src
) == s
);
214 gcc_checking_assert (!final_p
);
216 // Check if every export use is dominated by this branch.
218 FOR_EACH_GORI_EXPORT_NAME (m_ranger
.gori_ssa (), e
->src
, name
)
220 if (!fully_replaceable (name
, e
->src
))
224 // Set the global value for each.
225 FOR_EACH_GORI_EXPORT_NAME (m_ranger
.gori_ssa (), e
->src
, name
)
227 value_range
r (TREE_TYPE (name
));
228 m_ranger
.range_on_entry (r
, e
->dest
, name
);
229 // Nothing at this late stage we can do if the write fails.
230 if (!set_range_info (name
, r
))
234 tree ssa
= lhs_p
? gimple_cond_lhs (s
) : gimple_cond_rhs (s
);
236 // Rewrite the condition.
237 if (e
->flags
& EDGE_TRUE_VALUE
)
238 gimple_cond_make_true (as_a
<gcond
*> (s
));
240 gimple_cond_make_false (as_a
<gcond
*> (s
));
243 // If the name on S is defined in this block, see if there is DCE work to do.
244 if (gimple_bb (SSA_NAME_DEF_STMT (ssa
)) == e
->src
)
247 bitmap_set_bit (dce
, SSA_NAME_VERSION (ssa
));
248 simple_dce_from_worklist (dce
);
252 // Process the edges in the list, change the conditions and removing any
253 // dead code feeding those conditions. This removes the unreachables, but
254 // makes no attempt to set globals values.
257 remove_unreachable::remove ()
259 if (!final_p
|| m_list
.length () == 0)
264 for (i
= 0; i
< m_list
.length (); i
++)
267 basic_block src
= BASIC_BLOCK_FOR_FN (cfun
, eb
.first
);
268 basic_block dest
= BASIC_BLOCK_FOR_FN (cfun
, eb
.second
);
271 edge e
= find_edge (src
, dest
);
272 gimple
*s
= gimple_outgoing_range_stmt_p (e
->src
);
273 gcc_checking_assert (gimple_code (s
) == GIMPLE_COND
);
275 tree name
= gimple_range_ssa_p (gimple_cond_lhs (s
));
277 name
= gimple_range_ssa_p (gimple_cond_rhs (s
));
278 // Check if global value can be set for NAME.
279 if (name
&& fully_replaceable (name
, src
))
281 value_range
r (TREE_TYPE (name
));
282 if (gori_name_on_edge (r
, name
, e
, &m_ranger
))
283 set_range_info (name
, r
);
287 // Rewrite the condition.
288 if (e
->flags
& EDGE_TRUE_VALUE
)
289 gimple_cond_make_true (as_a
<gcond
*> (s
));
291 gimple_cond_make_false (as_a
<gcond
*> (s
));
299 // Process the edges in the list, change the conditions and removing any
300 // dead code feeding those conditions. Calculate the range of any
301 // names that may have been exported from those blocks, and determine if
302 // there is any updates to their global ranges..
303 // Return true if any builtin_unreachables/globals eliminated/updated.
306 remove_unreachable::remove_and_update_globals ()
308 if (m_list
.length () == 0)
311 // If there is no import/export info, Do basic removal.
312 if (!m_ranger
.gori_ssa ())
319 auto_bitmap all_exports
;
320 for (i
= 0; i
< m_list
.length (); i
++)
323 basic_block src
= BASIC_BLOCK_FOR_FN (cfun
, eb
.first
);
324 basic_block dest
= BASIC_BLOCK_FOR_FN (cfun
, eb
.second
);
327 edge e
= find_edge (src
, dest
);
328 gimple
*s
= gimple_outgoing_range_stmt_p (e
->src
);
329 gcc_checking_assert (gimple_code (s
) == GIMPLE_COND
);
331 bool dominate_exit_p
= true;
332 FOR_EACH_GORI_EXPORT_NAME (m_ranger
.gori_ssa (), e
->src
, name
)
334 // Ensure the cache is set for NAME in the succ block.
335 value_range
r(TREE_TYPE (name
));
336 value_range
ex(TREE_TYPE (name
));
337 m_ranger
.range_on_entry (r
, e
->dest
, name
);
338 m_ranger
.range_on_entry (ex
, EXIT_BLOCK_PTR_FOR_FN (cfun
), name
);
339 // If the range produced by this __builtin_unreachacble expression
340 // is not fully reflected in the range at exit, then it does not
341 // dominate the exit of the function.
342 if (ex
.intersect (r
))
343 dominate_exit_p
= false;
346 // If the exit is dominated, add to the export list. Otherwise if this
347 // isn't the final VRP pass, leave the call in the IL.
349 bitmap_ior_into (all_exports
,
350 m_ranger
.gori_ssa ()->exports (e
->src
));
355 // Rewrite the condition.
356 if (e
->flags
& EDGE_TRUE_VALUE
)
357 gimple_cond_make_true (as_a
<gcond
*> (s
));
359 gimple_cond_make_false (as_a
<gcond
*> (s
));
363 if (bitmap_empty_p (all_exports
))
365 // Invoke DCE on all exported names to eliminate dead feeding defs.
367 bitmap_copy (dce
, all_exports
);
368 // Don't attempt to DCE parameters.
369 EXECUTE_IF_SET_IN_BITMAP (all_exports
, 0, i
, bi
)
370 if (!ssa_name (i
) || SSA_NAME_IS_DEFAULT_DEF (ssa_name (i
)))
371 bitmap_clear_bit (dce
, i
);
372 simple_dce_from_worklist (dce
);
374 // Loop over all uses of each name and find maximal range. This is the
377 imm_use_iterator iter
;
378 EXECUTE_IF_SET_IN_BITMAP (all_exports
, 0, i
, bi
)
381 if (!name
|| SSA_NAME_IN_FREE_LIST (name
))
383 value_range
r (TREE_TYPE (name
));
384 value_range
exp_range (TREE_TYPE (name
));
386 FOR_EACH_IMM_USE_FAST (use_p
, iter
, name
)
388 gimple
*use_stmt
= USE_STMT (use_p
);
389 if (is_gimple_debug (use_stmt
))
391 if (!m_ranger
.range_of_expr (exp_range
, name
, use_stmt
))
392 exp_range
.set_varying (TREE_TYPE (name
));
393 r
.union_ (exp_range
);
397 // Include the on-exit range to ensure non-dominated unreachables
398 // don't incorrectly impact the global range.
399 m_ranger
.range_on_entry (exp_range
, EXIT_BLOCK_PTR_FOR_FN (cfun
), name
);
400 r
.union_ (exp_range
);
401 if (r
.varying_p () || r
.undefined_p ())
403 if (!set_range_info (name
, r
))
410 /* VR_TYPE describes a range with minimum value *MIN and maximum
411 value *MAX. Restrict the range to the set of values that have
412 no bits set outside NONZERO_BITS. Update *MIN and *MAX and
413 return the new range type.
415 SGN gives the sign of the values described by the range. */
417 enum value_range_kind
418 intersect_range_with_nonzero_bits (enum value_range_kind vr_type
,
419 wide_int
*min
, wide_int
*max
,
420 const wide_int
&nonzero_bits
,
423 if (vr_type
== VR_ANTI_RANGE
)
425 /* The VR_ANTI_RANGE is equivalent to the union of the ranges
426 A: [-INF, *MIN) and B: (*MAX, +INF]. First use NONZERO_BITS
427 to create an inclusive upper bound for A and an inclusive lower
429 wide_int a_max
= wi::round_down_for_mask (*min
- 1, nonzero_bits
);
430 wide_int b_min
= wi::round_up_for_mask (*max
+ 1, nonzero_bits
);
432 /* If the calculation of A_MAX wrapped, A is effectively empty
433 and A_MAX is the highest value that satisfies NONZERO_BITS.
434 Likewise if the calculation of B_MIN wrapped, B is effectively
435 empty and B_MIN is the lowest value that satisfies NONZERO_BITS. */
436 bool a_empty
= wi::ge_p (a_max
, *min
, sgn
);
437 bool b_empty
= wi::le_p (b_min
, *max
, sgn
);
439 /* If both A and B are empty, there are no valid values. */
440 if (a_empty
&& b_empty
)
443 /* If exactly one of A or B is empty, return a VR_RANGE for the
445 if (a_empty
|| b_empty
)
449 gcc_checking_assert (wi::le_p (*min
, *max
, sgn
));
453 /* Update the VR_ANTI_RANGE bounds. */
456 gcc_checking_assert (wi::le_p (*min
, *max
, sgn
));
458 /* Now check whether the excluded range includes any values that
459 satisfy NONZERO_BITS. If not, switch to a full VR_RANGE. */
460 if (wi::round_up_for_mask (*min
, nonzero_bits
) == b_min
)
462 unsigned int precision
= min
->get_precision ();
463 *min
= wi::min_value (precision
, sgn
);
464 *max
= wi::max_value (precision
, sgn
);
468 if (vr_type
== VR_RANGE
|| vr_type
== VR_VARYING
)
470 *max
= wi::round_down_for_mask (*max
, nonzero_bits
);
472 /* Check that the range contains at least one valid value. */
473 if (wi::gt_p (*min
, *max
, sgn
))
476 *min
= wi::round_up_for_mask (*min
, nonzero_bits
);
477 gcc_checking_assert (wi::le_p (*min
, *max
, sgn
));
482 /* Return the single symbol (an SSA_NAME) contained in T if any, or NULL_TREE
483 otherwise. We only handle additive operations and set NEG to true if the
484 symbol is negated and INV to the invariant part, if any. */
487 get_single_symbol (tree t
, bool *neg
, tree
*inv
)
495 if (TREE_CODE (t
) == PLUS_EXPR
496 || TREE_CODE (t
) == POINTER_PLUS_EXPR
497 || TREE_CODE (t
) == MINUS_EXPR
)
499 if (is_gimple_min_invariant (TREE_OPERAND (t
, 0)))
501 neg_
= (TREE_CODE (t
) == MINUS_EXPR
);
502 inv_
= TREE_OPERAND (t
, 0);
503 t
= TREE_OPERAND (t
, 1);
505 else if (is_gimple_min_invariant (TREE_OPERAND (t
, 1)))
508 inv_
= TREE_OPERAND (t
, 1);
509 t
= TREE_OPERAND (t
, 0);
520 if (TREE_CODE (t
) == NEGATE_EXPR
)
522 t
= TREE_OPERAND (t
, 0);
526 if (TREE_CODE (t
) != SSA_NAME
)
529 if (inv_
&& TREE_OVERFLOW_P (inv_
))
530 inv_
= drop_tree_overflow (inv_
);
537 /* Compare two values VAL1 and VAL2. Return
539 -2 if VAL1 and VAL2 cannot be compared at compile-time,
542 +1 if VAL1 > VAL2, and
545 This is similar to tree_int_cst_compare but supports pointer values
546 and values that cannot be compared at compile time.
548 If STRICT_OVERFLOW_P is not NULL, then set *STRICT_OVERFLOW_P to
549 true if the return value is only valid if we assume that signed
550 overflow is undefined. */
553 compare_values_warnv (tree val1
, tree val2
, bool *strict_overflow_p
)
558 /* Below we rely on the fact that VAL1 and VAL2 are both pointers or
560 gcc_assert (POINTER_TYPE_P (TREE_TYPE (val1
))
561 == POINTER_TYPE_P (TREE_TYPE (val2
)));
563 /* Convert the two values into the same type. This is needed because
564 sizetype causes sign extension even for unsigned types. */
565 if (!useless_type_conversion_p (TREE_TYPE (val1
), TREE_TYPE (val2
)))
566 val2
= fold_convert (TREE_TYPE (val1
), val2
);
568 const bool overflow_undefined
569 = INTEGRAL_TYPE_P (TREE_TYPE (val1
))
570 && TYPE_OVERFLOW_UNDEFINED (TREE_TYPE (val1
));
573 tree sym1
= get_single_symbol (val1
, &neg1
, &inv1
);
574 tree sym2
= get_single_symbol (val2
, &neg2
, &inv2
);
576 /* If VAL1 and VAL2 are of the form '[-]NAME [+ CST]', return -1 or +1
577 accordingly. If VAL1 and VAL2 don't use the same name, return -2. */
580 /* Both values must use the same name with the same sign. */
581 if (sym1
!= sym2
|| neg1
!= neg2
)
584 /* [-]NAME + CST == [-]NAME + CST. */
588 /* If overflow is defined we cannot simplify more. */
589 if (!overflow_undefined
)
592 if (strict_overflow_p
!= NULL
593 /* Symbolic range building sets the no-warning bit to declare
594 that overflow doesn't happen. */
595 && (!inv1
|| !warning_suppressed_p (val1
, OPT_Woverflow
))
596 && (!inv2
|| !warning_suppressed_p (val2
, OPT_Woverflow
)))
597 *strict_overflow_p
= true;
600 inv1
= build_int_cst (TREE_TYPE (val1
), 0);
602 inv2
= build_int_cst (TREE_TYPE (val2
), 0);
604 return wi::cmp (wi::to_wide (inv1
), wi::to_wide (inv2
),
605 TYPE_SIGN (TREE_TYPE (val1
)));
608 const bool cst1
= is_gimple_min_invariant (val1
);
609 const bool cst2
= is_gimple_min_invariant (val2
);
611 /* If one is of the form '[-]NAME + CST' and the other is constant, then
612 it might be possible to say something depending on the constants. */
613 if ((sym1
&& inv1
&& cst2
) || (sym2
&& inv2
&& cst1
))
615 if (!overflow_undefined
)
618 if (strict_overflow_p
!= NULL
619 /* Symbolic range building sets the no-warning bit to declare
620 that overflow doesn't happen. */
621 && (!sym1
|| !warning_suppressed_p (val1
, OPT_Woverflow
))
622 && (!sym2
|| !warning_suppressed_p (val2
, OPT_Woverflow
)))
623 *strict_overflow_p
= true;
625 const signop sgn
= TYPE_SIGN (TREE_TYPE (val1
));
626 tree cst
= cst1
? val1
: val2
;
627 tree inv
= cst1
? inv2
: inv1
;
629 /* Compute the difference between the constants. If it overflows or
630 underflows, this means that we can trivially compare the NAME with
631 it and, consequently, the two values with each other. */
632 wide_int diff
= wi::to_wide (cst
) - wi::to_wide (inv
);
633 if (wi::cmp (0, wi::to_wide (inv
), sgn
)
634 != wi::cmp (diff
, wi::to_wide (cst
), sgn
))
636 const int res
= wi::cmp (wi::to_wide (cst
), wi::to_wide (inv
), sgn
);
637 return cst1
? res
: -res
;
643 /* We cannot say anything more for non-constants. */
647 if (!POINTER_TYPE_P (TREE_TYPE (val1
)))
649 /* We cannot compare overflowed values. */
650 if (TREE_OVERFLOW (val1
) || TREE_OVERFLOW (val2
))
653 if (TREE_CODE (val1
) == INTEGER_CST
654 && TREE_CODE (val2
) == INTEGER_CST
)
655 return tree_int_cst_compare (val1
, val2
);
657 if (poly_int_tree_p (val1
) && poly_int_tree_p (val2
))
659 if (known_eq (wi::to_poly_widest (val1
),
660 wi::to_poly_widest (val2
)))
662 if (known_lt (wi::to_poly_widest (val1
),
663 wi::to_poly_widest (val2
)))
665 if (known_gt (wi::to_poly_widest (val1
),
666 wi::to_poly_widest (val2
)))
674 if (TREE_CODE (val1
) == INTEGER_CST
&& TREE_CODE (val2
) == INTEGER_CST
)
676 /* We cannot compare overflowed values. */
677 if (TREE_OVERFLOW (val1
) || TREE_OVERFLOW (val2
))
680 return tree_int_cst_compare (val1
, val2
);
683 /* First see if VAL1 and VAL2 are not the same. */
684 if (operand_equal_p (val1
, val2
, 0))
687 fold_defer_overflow_warnings ();
689 /* If VAL1 is a lower address than VAL2, return -1. */
690 tree t
= fold_binary_to_constant (LT_EXPR
, boolean_type_node
, val1
, val2
);
691 if (t
&& integer_onep (t
))
693 fold_undefer_and_ignore_overflow_warnings ();
697 /* If VAL1 is a higher address than VAL2, return +1. */
698 t
= fold_binary_to_constant (LT_EXPR
, boolean_type_node
, val2
, val1
);
699 if (t
&& integer_onep (t
))
701 fold_undefer_and_ignore_overflow_warnings ();
705 /* If VAL1 is different than VAL2, return +2. */
706 t
= fold_binary_to_constant (NE_EXPR
, boolean_type_node
, val1
, val2
);
707 fold_undefer_and_ignore_overflow_warnings ();
708 if (t
&& integer_onep (t
))
715 /* Compare values like compare_values_warnv. */
718 compare_values (tree val1
, tree val2
)
721 return compare_values_warnv (val1
, val2
, &sop
);
724 /* Helper for overflow_comparison_p
726 OP0 CODE OP1 is a comparison. Examine the comparison and potentially
727 OP1's defining statement to see if it ultimately has the form
728 OP0 CODE (OP0 PLUS INTEGER_CST)
730 If so, return TRUE indicating this is an overflow test and store into
731 *NEW_CST an updated constant that can be used in a narrowed range test.
733 REVERSED indicates if the comparison was originally:
737 This affects how we build the updated constant. */
740 overflow_comparison_p_1 (enum tree_code code
, tree op0
, tree op1
,
741 bool reversed
, tree
*new_cst
)
743 /* See if this is a relational operation between two SSA_NAMES with
744 unsigned, overflow wrapping values. If so, check it more deeply. */
745 if ((code
== LT_EXPR
|| code
== LE_EXPR
746 || code
== GE_EXPR
|| code
== GT_EXPR
)
747 && TREE_CODE (op0
) == SSA_NAME
748 && TREE_CODE (op1
) == SSA_NAME
749 && INTEGRAL_TYPE_P (TREE_TYPE (op0
))
750 && TYPE_UNSIGNED (TREE_TYPE (op0
))
751 && TYPE_OVERFLOW_WRAPS (TREE_TYPE (op0
)))
753 gimple
*op1_def
= SSA_NAME_DEF_STMT (op1
);
755 /* Now look at the defining statement of OP1 to see if it adds
756 or subtracts a nonzero constant from another operand. */
758 && is_gimple_assign (op1_def
)
759 && gimple_assign_rhs_code (op1_def
) == PLUS_EXPR
760 && TREE_CODE (gimple_assign_rhs2 (op1_def
)) == INTEGER_CST
761 && !integer_zerop (gimple_assign_rhs2 (op1_def
)))
763 tree target
= gimple_assign_rhs1 (op1_def
);
765 /* If we did not find our target SSA_NAME, then this is not
770 tree type
= TREE_TYPE (op0
);
771 wide_int max
= wi::max_value (TYPE_PRECISION (type
), UNSIGNED
);
772 tree inc
= gimple_assign_rhs2 (op1_def
);
774 *new_cst
= wide_int_to_tree (type
, max
+ wi::to_wide (inc
));
776 *new_cst
= wide_int_to_tree (type
, max
- wi::to_wide (inc
));
783 /* OP0 CODE OP1 is a comparison. Examine the comparison and potentially
784 OP1's defining statement to see if it ultimately has the form
785 OP0 CODE (OP0 PLUS INTEGER_CST)
787 If so, return TRUE indicating this is an overflow test and store into
788 *NEW_CST an updated constant that can be used in a narrowed range test.
790 These statements are left as-is in the IL to facilitate discovery of
791 {ADD,SUB}_OVERFLOW sequences later in the optimizer pipeline. But
792 the alternate range representation is often useful within VRP. */
795 overflow_comparison_p (tree_code code
, tree name
, tree val
, tree
*new_cst
)
797 if (overflow_comparison_p_1 (code
, name
, val
, false, new_cst
))
799 return overflow_comparison_p_1 (swap_tree_comparison (code
), val
, name
,
803 /* Searches the case label vector VEC for the index *IDX of the CASE_LABEL
804 that includes the value VAL. The search is restricted to the range
805 [START_IDX, n - 1] where n is the size of VEC.
807 If there is a CASE_LABEL for VAL, its index is placed in IDX and true is
810 If there is no CASE_LABEL for VAL and there is one that is larger than VAL,
811 it is placed in IDX and false is returned.
813 If VAL is larger than any CASE_LABEL, n is placed on IDX and false is
817 find_case_label_index (gswitch
*stmt
, size_t start_idx
, tree val
, size_t *idx
)
819 size_t n
= gimple_switch_num_labels (stmt
);
822 /* Find case label for minimum of the value range or the next one.
823 At each iteration we are searching in [low, high - 1]. */
825 for (low
= start_idx
, high
= n
; high
!= low
; )
829 /* Note that i != high, so we never ask for n. */
830 size_t i
= (high
+ low
) / 2;
831 t
= gimple_switch_label (stmt
, i
);
833 /* Cache the result of comparing CASE_LOW and val. */
834 cmp
= tree_int_cst_compare (CASE_LOW (t
), val
);
838 /* Ranges cannot be empty. */
847 if (CASE_HIGH (t
) != NULL
848 && tree_int_cst_compare (CASE_HIGH (t
), val
) >= 0)
860 /* Searches the case label vector VEC for the range of CASE_LABELs that is used
861 for values between MIN and MAX. The first index is placed in MIN_IDX. The
862 last index is placed in MAX_IDX. If the range of CASE_LABELs is empty
863 then MAX_IDX < MIN_IDX.
864 Returns true if the default label is not needed. */
867 find_case_label_range (gswitch
*stmt
, tree min
, tree max
, size_t *min_idx
,
871 bool min_take_default
= !find_case_label_index (stmt
, 1, min
, &i
);
872 bool max_take_default
= !find_case_label_index (stmt
, i
, max
, &j
);
878 /* Only the default case label reached.
879 Return an empty range. */
886 bool take_default
= min_take_default
|| max_take_default
;
890 if (max_take_default
)
893 /* If the case label range is continuous, we do not need
894 the default case label. Verify that. */
895 high
= CASE_LOW (gimple_switch_label (stmt
, i
));
896 if (CASE_HIGH (gimple_switch_label (stmt
, i
)))
897 high
= CASE_HIGH (gimple_switch_label (stmt
, i
));
898 for (k
= i
+ 1; k
<= j
; ++k
)
900 low
= CASE_LOW (gimple_switch_label (stmt
, k
));
901 if (!integer_onep (int_const_binop (MINUS_EXPR
, low
, high
)))
907 if (CASE_HIGH (gimple_switch_label (stmt
, k
)))
908 high
= CASE_HIGH (gimple_switch_label (stmt
, k
));
913 return !take_default
;
917 /* Given a SWITCH_STMT, return the case label that encompasses the
918 known possible values for the switch operand. RANGE_OF_OP is a
919 range for the known values of the switch operand. */
922 find_case_label_range (gswitch
*switch_stmt
, const irange
*range_of_op
)
924 if (range_of_op
->undefined_p ()
925 || range_of_op
->varying_p ())
929 tree op
= gimple_switch_index (switch_stmt
);
930 tree type
= TREE_TYPE (op
);
931 tree tmin
= wide_int_to_tree (type
, range_of_op
->lower_bound ());
932 tree tmax
= wide_int_to_tree (type
, range_of_op
->upper_bound ());
933 find_case_label_range (switch_stmt
, tmin
, tmax
, &i
, &j
);
936 /* Look for exactly one label that encompasses the range of
938 tree label
= gimple_switch_label (switch_stmt
, i
);
940 = CASE_HIGH (label
) ? CASE_HIGH (label
) : CASE_LOW (label
);
941 wide_int wlow
= wi::to_wide (CASE_LOW (label
));
942 wide_int whigh
= wi::to_wide (case_high
);
943 int_range_max
label_range (TREE_TYPE (case_high
), wlow
, whigh
);
944 if (!types_compatible_p (label_range
.type (), range_of_op
->type ()))
945 range_cast (label_range
, range_of_op
->type ());
946 label_range
.intersect (*range_of_op
);
947 if (label_range
== *range_of_op
)
952 /* If there are no labels at all, take the default. */
953 return gimple_switch_label (switch_stmt
, 0);
957 /* Otherwise, there are various labels that can encompass
958 the range of operand. In which case, see if the range of
959 the operand is entirely *outside* the bounds of all the
960 (non-default) case labels. If so, take the default. */
961 unsigned n
= gimple_switch_num_labels (switch_stmt
);
962 tree min_label
= gimple_switch_label (switch_stmt
, 1);
963 tree max_label
= gimple_switch_label (switch_stmt
, n
- 1);
964 tree case_high
= CASE_HIGH (max_label
);
966 case_high
= CASE_LOW (max_label
);
967 int_range_max
label_range (TREE_TYPE (CASE_LOW (min_label
)),
968 wi::to_wide (CASE_LOW (min_label
)),
969 wi::to_wide (case_high
));
970 if (!types_compatible_p (label_range
.type (), range_of_op
->type ()))
971 range_cast (label_range
, range_of_op
->type ());
972 label_range
.intersect (*range_of_op
);
973 if (label_range
.undefined_p ())
974 return gimple_switch_label (switch_stmt
, 0);
985 // This is a ranger based folder which continues to use the dominator
986 // walk to access the substitute and fold machinery. Ranges are calculated
989 class rvrp_folder
: public substitute_and_fold_engine
993 rvrp_folder (gimple_ranger
*r
, bool all
)
994 : substitute_and_fold_engine (),
995 m_unreachable (*r
, all
),
996 m_simplifier (r
, r
->non_executable_edge_flag
)
999 m_pta
= new pointer_equiv_analyzer (m_ranger
);
1000 m_last_bb_stmt
= NULL
;
1008 tree
value_of_expr (tree name
, gimple
*s
= NULL
) override
1010 // Shortcircuit subst_and_fold callbacks for abnormal ssa_names.
1011 if (TREE_CODE (name
) == SSA_NAME
&& SSA_NAME_OCCURS_IN_ABNORMAL_PHI (name
))
1013 tree ret
= m_ranger
->value_of_expr (name
, s
);
1014 if (!ret
&& supported_pointer_equiv_p (name
))
1015 ret
= m_pta
->get_equiv (name
);
1019 tree
value_on_edge (edge e
, tree name
) override
1021 // Shortcircuit subst_and_fold callbacks for abnormal ssa_names.
1022 if (TREE_CODE (name
) == SSA_NAME
&& SSA_NAME_OCCURS_IN_ABNORMAL_PHI (name
))
1024 tree ret
= m_ranger
->value_on_edge (e
, name
);
1025 if (!ret
&& supported_pointer_equiv_p (name
))
1026 ret
= m_pta
->get_equiv (name
);
1030 tree
value_of_stmt (gimple
*s
, tree name
= NULL
) override
1032 // Shortcircuit subst_and_fold callbacks for abnormal ssa_names.
1033 if (TREE_CODE (name
) == SSA_NAME
&& SSA_NAME_OCCURS_IN_ABNORMAL_PHI (name
))
1035 return m_ranger
->value_of_stmt (s
, name
);
1038 void pre_fold_bb (basic_block bb
) override
1041 for (gphi_iterator gsi
= gsi_start_phis (bb
); !gsi_end_p (gsi
);
1043 m_ranger
->register_inferred_ranges (gsi
.phi ());
1044 m_last_bb_stmt
= last_nondebug_stmt (bb
);
1047 void post_fold_bb (basic_block bb
) override
1052 void pre_fold_stmt (gimple
*stmt
) override
1054 m_pta
->visit_stmt (stmt
);
1055 // If this is the last stmt and there are inferred ranges, reparse the
1056 // block for transitive inferred ranges that occur earlier in the block.
1057 if (stmt
== m_last_bb_stmt
)
1059 m_ranger
->register_transitive_inferred_ranges (gimple_bb (stmt
));
1060 // Also check for builtin_unreachable calls.
1061 if (cfun
->after_inlining
&& gimple_code (stmt
) == GIMPLE_COND
)
1062 m_unreachable
.maybe_register (stmt
);
1066 bool fold_stmt (gimple_stmt_iterator
*gsi
) override
1068 bool ret
= m_simplifier
.simplify (gsi
);
1070 ret
= m_ranger
->fold_stmt (gsi
, follow_single_use_edges
);
1071 m_ranger
->register_inferred_ranges (gsi_stmt (*gsi
));
1075 remove_unreachable m_unreachable
;
1077 DISABLE_COPY_AND_ASSIGN (rvrp_folder
);
1078 gimple_ranger
*m_ranger
;
1079 simplify_using_ranges m_simplifier
;
1080 pointer_equiv_analyzer
*m_pta
;
1081 gimple
*m_last_bb_stmt
;
1084 /* Main entry point for a VRP pass using just ranger. This can be called
1085 from anywhere to perform a VRP pass, including from EVRP. */
1088 execute_ranger_vrp (struct function
*fun
, bool final_p
)
1090 loop_optimizer_init (LOOPS_NORMAL
| LOOPS_HAVE_RECORDED_EXITS
);
1091 rewrite_into_loop_closed_ssa (NULL
, TODO_update_ssa
);
1093 calculate_dominance_info (CDI_DOMINATORS
);
1095 set_all_edges_as_executable (fun
);
1096 gimple_ranger
*ranger
= enable_ranger (fun
, false);
1097 rvrp_folder
folder (ranger
, final_p
);
1098 phi_analysis_initialize (ranger
->const_query ());
1099 folder
.substitute_and_fold ();
1100 // Ensure the cache in SCEV has been cleared before processing
1101 // globals to be removed.
1103 // Remove tagged builtin-unreachable and maybe update globals.
1104 folder
.m_unreachable
.remove_and_update_globals ();
1105 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
1106 ranger
->dump (dump_file
);
1108 if (value_range::supports_type_p (TREE_TYPE
1109 (TREE_TYPE (current_function_decl
)))
1111 && !lookup_attribute ("noipa", DECL_ATTRIBUTES (current_function_decl
)))
1116 value_range
return_range (TREE_TYPE (TREE_TYPE (current_function_decl
)));
1117 FOR_EACH_EDGE (e
, ei
, EXIT_BLOCK_PTR_FOR_FN (cfun
)->preds
)
1118 if (greturn
*ret
= dyn_cast
<greturn
*> (*gsi_last_bb (e
->src
)))
1120 tree retval
= gimple_return_retval (ret
);
1123 return_range
.set_varying (TREE_TYPE (TREE_TYPE (current_function_decl
)));
1127 value_range
r (TREE_TYPE (retval
));
1128 if (ranger
->range_of_expr (r
, retval
, ret
)
1129 && !r
.undefined_p ()
1135 return_range
.union_ (r
);
1138 return_range
.set_varying (TREE_TYPE (retval
));
1141 if (found
&& !return_range
.varying_p ())
1143 ipa_record_return_value_range (return_range
);
1144 if (POINTER_TYPE_P (TREE_TYPE (TREE_TYPE (current_function_decl
)))
1145 && return_range
.nonzero_p ()
1146 && cgraph_node::get (current_function_decl
)
1147 ->add_detected_attribute ("returns_nonnull"))
1148 warn_function_returns_nonnull (current_function_decl
);
1152 phi_analysis_finalize ();
1153 disable_ranger (fun
);
1155 loop_optimizer_finalize ();
1159 // Implement a Fast VRP folder. Not quite as effective but faster.
1161 class fvrp_folder
: public substitute_and_fold_engine
1164 fvrp_folder (dom_ranger
*dr
, bool final_p
) : substitute_and_fold_engine (),
1169 m_unreachable
= new remove_unreachable (*dr
, final_p
);
1171 m_unreachable
= NULL
;
1176 tree
value_of_expr (tree name
, gimple
*s
= NULL
) override
1178 // Shortcircuit subst_and_fold callbacks for abnormal ssa_names.
1179 if (TREE_CODE (name
) == SSA_NAME
&& SSA_NAME_OCCURS_IN_ABNORMAL_PHI (name
))
1181 return m_dom_ranger
->value_of_expr (name
, s
);
1184 tree
value_on_edge (edge e
, tree name
) override
1186 // Shortcircuit subst_and_fold callbacks for abnormal ssa_names.
1187 if (TREE_CODE (name
) == SSA_NAME
&& SSA_NAME_OCCURS_IN_ABNORMAL_PHI (name
))
1189 return m_dom_ranger
->value_on_edge (e
, name
);
1192 tree
value_of_stmt (gimple
*s
, tree name
= NULL
) override
1194 // Shortcircuit subst_and_fold callbacks for abnormal ssa_names.
1195 if (TREE_CODE (name
) == SSA_NAME
&& SSA_NAME_OCCURS_IN_ABNORMAL_PHI (name
))
1197 return m_dom_ranger
->value_of_stmt (s
, name
);
1200 void pre_fold_bb (basic_block bb
) override
1202 m_dom_ranger
->pre_bb (bb
);
1203 // Now process the PHIs in advance.
1204 gphi_iterator psi
= gsi_start_phis (bb
);
1205 for ( ; !gsi_end_p (psi
); gsi_next (&psi
))
1207 tree name
= gimple_range_ssa_p (PHI_RESULT (psi
.phi ()));
1210 value_range
vr(TREE_TYPE (name
));
1211 m_dom_ranger
->range_of_stmt (vr
, psi
.phi (), name
);
1216 void post_fold_bb (basic_block bb
) override
1218 m_dom_ranger
->post_bb (bb
);
1221 void pre_fold_stmt (gimple
*s
) override
1223 // Ensure range_of_stmt has been called.
1224 tree type
= gimple_range_type (s
);
1227 value_range
vr(type
);
1228 m_dom_ranger
->range_of_stmt (vr
, s
);
1230 if (m_unreachable
&& gimple_code (s
) == GIMPLE_COND
)
1231 m_unreachable
->maybe_register (s
);
1235 bool fold_stmt (gimple_stmt_iterator
*gsi
) override
1237 bool ret
= m_simplifier
.simplify (gsi
);
1239 ret
= ::fold_stmt (gsi
, follow_single_use_edges
);
1243 remove_unreachable
*m_unreachable
;
1245 DISABLE_COPY_AND_ASSIGN (fvrp_folder
);
1246 simplify_using_ranges m_simplifier
;
1247 dom_ranger
*m_dom_ranger
;
1251 // Main entry point for a FAST VRP pass using a dom ranger.
1254 execute_fast_vrp (struct function
*fun
, bool final_p
)
1256 calculate_dominance_info (CDI_DOMINATORS
);
1258 fvrp_folder
folder (&dr
, final_p
);
1260 gcc_checking_assert (!fun
->x_range_query
);
1261 set_all_edges_as_executable (fun
);
1262 fun
->x_range_query
= &dr
;
1263 // Create a relation oracle without transitives.
1264 get_range_query (fun
)->create_relation_oracle (false);
1266 folder
.substitute_and_fold ();
1267 if (folder
.m_unreachable
)
1268 folder
.m_unreachable
->remove ();
1270 get_range_query (fun
)->destroy_relation_oracle ();
1271 fun
->x_range_query
= NULL
;
1277 const pass_data pass_data_vrp
=
1279 GIMPLE_PASS
, /* type */
1281 OPTGROUP_NONE
, /* optinfo_flags */
1282 TV_TREE_VRP
, /* tv_id */
1283 PROP_ssa
, /* properties_required */
1284 0, /* properties_provided */
1285 0, /* properties_destroyed */
1286 0, /* todo_flags_start */
1287 ( TODO_cleanup_cfg
| TODO_update_ssa
), /* todo_flags_finish */
1290 const pass_data pass_data_early_vrp
=
1292 GIMPLE_PASS
, /* type */
1294 OPTGROUP_NONE
, /* optinfo_flags */
1295 TV_TREE_EARLY_VRP
, /* tv_id */
1296 PROP_ssa
, /* properties_required */
1297 0, /* properties_provided */
1298 0, /* properties_destroyed */
1299 0, /* todo_flags_start */
1300 ( TODO_cleanup_cfg
| TODO_update_ssa
| TODO_verify_all
),
1303 const pass_data pass_data_fast_vrp
=
1305 GIMPLE_PASS
, /* type */
1307 OPTGROUP_NONE
, /* optinfo_flags */
1308 TV_TREE_FAST_VRP
, /* tv_id */
1309 PROP_ssa
, /* properties_required */
1310 0, /* properties_provided */
1311 0, /* properties_destroyed */
1312 0, /* todo_flags_start */
1313 ( TODO_cleanup_cfg
| TODO_update_ssa
| TODO_verify_all
),
1317 class pass_vrp
: public gimple_opt_pass
1320 pass_vrp (gcc::context
*ctxt
, const pass_data
&data_
)
1321 : gimple_opt_pass (data_
, ctxt
), data (data_
), final_p (false)
1324 /* opt_pass methods: */
1325 opt_pass
* clone () final override
1326 { return new pass_vrp (m_ctxt
, data
); }
1327 void set_pass_param (unsigned int n
, bool param
) final override
1329 gcc_assert (n
== 0);
1332 bool gate (function
*) final override
{ return flag_tree_vrp
!= 0; }
1333 unsigned int execute (function
*fun
) final override
1335 // Check for fast vrp.
1336 bool use_fvrp
= (&data
== &pass_data_fast_vrp
);
1337 if (!use_fvrp
&& last_basic_block_for_fn (fun
) > param_vrp_block_limit
)
1340 warning (OPT_Wdisabled_optimization
,
1341 "using fast VRP algorithm; %d basic blocks"
1342 " exceeds %<--param=vrp-block-limit=%d%> limit",
1343 n_basic_blocks_for_fn (fun
),
1344 param_vrp_block_limit
);
1347 return execute_fast_vrp (fun
, final_p
);
1348 return execute_ranger_vrp (fun
, final_p
);
1352 const pass_data
&data
;
1354 }; // class pass_vrp
1358 make_pass_vrp (gcc::context
*ctxt
)
1360 return new pass_vrp (ctxt
, pass_data_vrp
);
1364 make_pass_early_vrp (gcc::context
*ctxt
)
1366 return new pass_vrp (ctxt
, pass_data_early_vrp
);
1370 make_pass_fast_vrp (gcc::context
*ctxt
)
1372 return new pass_vrp (ctxt
, pass_data_fast_vrp
);