d: Fix error: aggregate value used where floating point was expected
[official-gcc.git] / gcc / tree-ssa-loop-ivopts.cc
blob81b536f930415e478b63952f53398a10b24ec46b
1 /* Induction variable optimizations.
2 Copyright (C) 2003-2022 Free Software Foundation, Inc.
4 This file is part of GCC.
6 GCC is free software; you can redistribute it and/or modify it
7 under the terms of the GNU General Public License as published by the
8 Free Software Foundation; either version 3, or (at your option) any
9 later version.
11 GCC is distributed in the hope that it will be useful, but WITHOUT
12 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
14 for more details.
16 You should have received a copy of the GNU General Public License
17 along with GCC; see the file COPYING3. If not see
18 <http://www.gnu.org/licenses/>. */
20 /* This pass tries to find the optimal set of induction variables for the loop.
21 It optimizes just the basic linear induction variables (although adding
22 support for other types should not be too hard). It includes the
23 optimizations commonly known as strength reduction, induction variable
24 coalescing and induction variable elimination. It does it in the
25 following steps:
27 1) The interesting uses of induction variables are found. This includes
29 -- uses of induction variables in non-linear expressions
30 -- addresses of arrays
31 -- comparisons of induction variables
33 Note the interesting uses are categorized and handled in group.
34 Generally, address type uses are grouped together if their iv bases
35 are different in constant offset.
37 2) Candidates for the induction variables are found. This includes
39 -- old induction variables
40 -- the variables defined by expressions derived from the "interesting
41 groups/uses" above
43 3) The optimal (w.r. to a cost function) set of variables is chosen. The
44 cost function assigns a cost to sets of induction variables and consists
45 of three parts:
47 -- The group/use costs. Each of the interesting groups/uses chooses
48 the best induction variable in the set and adds its cost to the sum.
49 The cost reflects the time spent on modifying the induction variables
50 value to be usable for the given purpose (adding base and offset for
51 arrays, etc.).
52 -- The variable costs. Each of the variables has a cost assigned that
53 reflects the costs associated with incrementing the value of the
54 variable. The original variables are somewhat preferred.
55 -- The set cost. Depending on the size of the set, extra cost may be
56 added to reflect register pressure.
58 All the costs are defined in a machine-specific way, using the target
59 hooks and machine descriptions to determine them.
61 4) The trees are transformed to use the new variables, the dead code is
62 removed.
64 All of this is done loop by loop. Doing it globally is theoretically
65 possible, it might give a better performance and it might enable us
66 to decide costs more precisely, but getting all the interactions right
67 would be complicated.
69 For the targets supporting low-overhead loops, IVOPTs has to take care of
70 the loops which will probably be transformed in RTL doloop optimization,
71 to try to make selected IV candidate set optimal. The process of doloop
72 support includes:
74 1) Analyze the current loop will be transformed to doloop or not, find and
75 mark its compare type IV use as doloop use (iv_group field doloop_p), and
76 set flag doloop_use_p of ivopts_data to notify subsequent processings on
77 doloop. See analyze_and_mark_doloop_use and its callees for the details.
78 The target hook predict_doloop_p can be used for target specific checks.
80 2) Add one doloop dedicated IV cand {(may_be_zero ? 1 : (niter + 1)), +, -1},
81 set flag doloop_p of iv_cand, step cost is set as zero and no extra cost
82 like biv. For cost determination between doloop IV cand and IV use, the
83 target hooks doloop_cost_for_generic and doloop_cost_for_address are
84 provided to add on extra costs for generic type and address type IV use.
85 Zero cost is assigned to the pair between doloop IV cand and doloop IV
86 use, and bound zero is set for IV elimination.
88 3) With the cost setting in step 2), the current cost model based IV
89 selection algorithm will process as usual, pick up doloop dedicated IV if
90 profitable. */
92 #include "config.h"
93 #include "system.h"
94 #include "coretypes.h"
95 #include "backend.h"
96 #include "rtl.h"
97 #include "tree.h"
98 #include "gimple.h"
99 #include "cfghooks.h"
100 #include "tree-pass.h"
101 #include "memmodel.h"
102 #include "tm_p.h"
103 #include "ssa.h"
104 #include "expmed.h"
105 #include "insn-config.h"
106 #include "emit-rtl.h"
107 #include "recog.h"
108 #include "cgraph.h"
109 #include "gimple-pretty-print.h"
110 #include "alias.h"
111 #include "fold-const.h"
112 #include "stor-layout.h"
113 #include "tree-eh.h"
114 #include "gimplify.h"
115 #include "gimple-iterator.h"
116 #include "gimplify-me.h"
117 #include "tree-cfg.h"
118 #include "tree-ssa-loop-ivopts.h"
119 #include "tree-ssa-loop-manip.h"
120 #include "tree-ssa-loop-niter.h"
121 #include "tree-ssa-loop.h"
122 #include "explow.h"
123 #include "expr.h"
124 #include "tree-dfa.h"
125 #include "tree-ssa.h"
126 #include "cfgloop.h"
127 #include "tree-scalar-evolution.h"
128 #include "tree-affine.h"
129 #include "tree-ssa-propagate.h"
130 #include "tree-ssa-address.h"
131 #include "builtins.h"
132 #include "tree-vectorizer.h"
133 #include "dbgcnt.h"
135 /* For lang_hooks.types.type_for_mode. */
136 #include "langhooks.h"
138 /* FIXME: Expressions are expanded to RTL in this pass to determine the
139 cost of different addressing modes. This should be moved to a TBD
140 interface between the GIMPLE and RTL worlds. */
142 /* The infinite cost. */
143 #define INFTY 1000000000
145 /* Returns the expected number of loop iterations for LOOP.
146 The average trip count is computed from profile data if it
147 exists. */
149 static inline HOST_WIDE_INT
150 avg_loop_niter (class loop *loop)
152 HOST_WIDE_INT niter = estimated_stmt_executions_int (loop);
153 if (niter == -1)
155 niter = likely_max_stmt_executions_int (loop);
157 if (niter == -1 || niter > param_avg_loop_niter)
158 return param_avg_loop_niter;
161 return niter;
164 struct iv_use;
166 /* Representation of the induction variable. */
167 struct iv
169 tree base; /* Initial value of the iv. */
170 tree base_object; /* A memory object to that the induction variable points. */
171 tree step; /* Step of the iv (constant only). */
172 tree ssa_name; /* The ssa name with the value. */
173 struct iv_use *nonlin_use; /* The identifier in the use if it is the case. */
174 bool biv_p; /* Is it a biv? */
175 bool no_overflow; /* True if the iv doesn't overflow. */
176 bool have_address_use;/* For biv, indicate if it's used in any address
177 type use. */
180 /* Per-ssa version information (induction variable descriptions, etc.). */
181 struct version_info
183 tree name; /* The ssa name. */
184 struct iv *iv; /* Induction variable description. */
185 bool has_nonlin_use; /* For a loop-level invariant, whether it is used in
186 an expression that is not an induction variable. */
187 bool preserve_biv; /* For the original biv, whether to preserve it. */
188 unsigned inv_id; /* Id of an invariant. */
191 /* Types of uses. */
192 enum use_type
194 USE_NONLINEAR_EXPR, /* Use in a nonlinear expression. */
195 USE_REF_ADDRESS, /* Use is an address for an explicit memory
196 reference. */
197 USE_PTR_ADDRESS, /* Use is a pointer argument to a function in
198 cases where the expansion of the function
199 will turn the argument into a normal address. */
200 USE_COMPARE /* Use is a compare. */
203 /* Cost of a computation. */
204 class comp_cost
206 public:
207 comp_cost (): cost (0), complexity (0), scratch (0)
210 comp_cost (int64_t cost, unsigned complexity, int64_t scratch = 0)
211 : cost (cost), complexity (complexity), scratch (scratch)
214 /* Returns true if COST is infinite. */
215 bool infinite_cost_p ();
217 /* Adds costs COST1 and COST2. */
218 friend comp_cost operator+ (comp_cost cost1, comp_cost cost2);
220 /* Adds COST to the comp_cost. */
221 comp_cost operator+= (comp_cost cost);
223 /* Adds constant C to this comp_cost. */
224 comp_cost operator+= (HOST_WIDE_INT c);
226 /* Subtracts constant C to this comp_cost. */
227 comp_cost operator-= (HOST_WIDE_INT c);
229 /* Divide the comp_cost by constant C. */
230 comp_cost operator/= (HOST_WIDE_INT c);
232 /* Multiply the comp_cost by constant C. */
233 comp_cost operator*= (HOST_WIDE_INT c);
235 /* Subtracts costs COST1 and COST2. */
236 friend comp_cost operator- (comp_cost cost1, comp_cost cost2);
238 /* Subtracts COST from this comp_cost. */
239 comp_cost operator-= (comp_cost cost);
241 /* Returns true if COST1 is smaller than COST2. */
242 friend bool operator< (comp_cost cost1, comp_cost cost2);
244 /* Returns true if COST1 and COST2 are equal. */
245 friend bool operator== (comp_cost cost1, comp_cost cost2);
247 /* Returns true if COST1 is smaller or equal than COST2. */
248 friend bool operator<= (comp_cost cost1, comp_cost cost2);
250 int64_t cost; /* The runtime cost. */
251 unsigned complexity; /* The estimate of the complexity of the code for
252 the computation (in no concrete units --
253 complexity field should be larger for more
254 complex expressions and addressing modes). */
255 int64_t scratch; /* Scratch used during cost computation. */
258 static const comp_cost no_cost;
259 static const comp_cost infinite_cost (INFTY, 0, INFTY);
261 bool
262 comp_cost::infinite_cost_p ()
264 return cost == INFTY;
267 comp_cost
268 operator+ (comp_cost cost1, comp_cost cost2)
270 if (cost1.infinite_cost_p () || cost2.infinite_cost_p ())
271 return infinite_cost;
273 gcc_assert (cost1.cost + cost2.cost < infinite_cost.cost);
274 cost1.cost += cost2.cost;
275 cost1.complexity += cost2.complexity;
277 return cost1;
280 comp_cost
281 operator- (comp_cost cost1, comp_cost cost2)
283 if (cost1.infinite_cost_p ())
284 return infinite_cost;
286 gcc_assert (!cost2.infinite_cost_p ());
287 gcc_assert (cost1.cost - cost2.cost < infinite_cost.cost);
289 cost1.cost -= cost2.cost;
290 cost1.complexity -= cost2.complexity;
292 return cost1;
295 comp_cost
296 comp_cost::operator+= (comp_cost cost)
298 *this = *this + cost;
299 return *this;
302 comp_cost
303 comp_cost::operator+= (HOST_WIDE_INT c)
305 if (c >= INFTY)
306 this->cost = INFTY;
308 if (infinite_cost_p ())
309 return *this;
311 gcc_assert (this->cost + c < infinite_cost.cost);
312 this->cost += c;
314 return *this;
317 comp_cost
318 comp_cost::operator-= (HOST_WIDE_INT c)
320 if (infinite_cost_p ())
321 return *this;
323 gcc_assert (this->cost - c < infinite_cost.cost);
324 this->cost -= c;
326 return *this;
329 comp_cost
330 comp_cost::operator/= (HOST_WIDE_INT c)
332 gcc_assert (c != 0);
333 if (infinite_cost_p ())
334 return *this;
336 this->cost /= c;
338 return *this;
341 comp_cost
342 comp_cost::operator*= (HOST_WIDE_INT c)
344 if (infinite_cost_p ())
345 return *this;
347 gcc_assert (this->cost * c < infinite_cost.cost);
348 this->cost *= c;
350 return *this;
353 comp_cost
354 comp_cost::operator-= (comp_cost cost)
356 *this = *this - cost;
357 return *this;
360 bool
361 operator< (comp_cost cost1, comp_cost cost2)
363 if (cost1.cost == cost2.cost)
364 return cost1.complexity < cost2.complexity;
366 return cost1.cost < cost2.cost;
369 bool
370 operator== (comp_cost cost1, comp_cost cost2)
372 return cost1.cost == cost2.cost
373 && cost1.complexity == cost2.complexity;
376 bool
377 operator<= (comp_cost cost1, comp_cost cost2)
379 return cost1 < cost2 || cost1 == cost2;
382 struct iv_inv_expr_ent;
384 /* The candidate - cost pair. */
385 class cost_pair
387 public:
388 struct iv_cand *cand; /* The candidate. */
389 comp_cost cost; /* The cost. */
390 enum tree_code comp; /* For iv elimination, the comparison. */
391 bitmap inv_vars; /* The list of invariant ssa_vars that have to be
392 preserved when representing iv_use with iv_cand. */
393 bitmap inv_exprs; /* The list of newly created invariant expressions
394 when representing iv_use with iv_cand. */
395 tree value; /* For final value elimination, the expression for
396 the final value of the iv. For iv elimination,
397 the new bound to compare with. */
400 /* Use. */
401 struct iv_use
403 unsigned id; /* The id of the use. */
404 unsigned group_id; /* The group id the use belongs to. */
405 enum use_type type; /* Type of the use. */
406 tree mem_type; /* The memory type to use when testing whether an
407 address is legitimate, and what the address's
408 cost is. */
409 struct iv *iv; /* The induction variable it is based on. */
410 gimple *stmt; /* Statement in that it occurs. */
411 tree *op_p; /* The place where it occurs. */
413 tree addr_base; /* Base address with const offset stripped. */
414 poly_uint64_pod addr_offset;
415 /* Const offset stripped from base address. */
418 /* Group of uses. */
419 struct iv_group
421 /* The id of the group. */
422 unsigned id;
423 /* Uses of the group are of the same type. */
424 enum use_type type;
425 /* The set of "related" IV candidates, plus the important ones. */
426 bitmap related_cands;
427 /* Number of IV candidates in the cost_map. */
428 unsigned n_map_members;
429 /* The costs wrto the iv candidates. */
430 class cost_pair *cost_map;
431 /* The selected candidate for the group. */
432 struct iv_cand *selected;
433 /* To indicate this is a doloop use group. */
434 bool doloop_p;
435 /* Uses in the group. */
436 vec<struct iv_use *> vuses;
439 /* The position where the iv is computed. */
440 enum iv_position
442 IP_NORMAL, /* At the end, just before the exit condition. */
443 IP_END, /* At the end of the latch block. */
444 IP_BEFORE_USE, /* Immediately before a specific use. */
445 IP_AFTER_USE, /* Immediately after a specific use. */
446 IP_ORIGINAL /* The original biv. */
449 /* The induction variable candidate. */
450 struct iv_cand
452 unsigned id; /* The number of the candidate. */
453 bool important; /* Whether this is an "important" candidate, i.e. such
454 that it should be considered by all uses. */
455 bool involves_undefs; /* Whether the IV involves undefined values. */
456 ENUM_BITFIELD(iv_position) pos : 8; /* Where it is computed. */
457 gimple *incremented_at;/* For original biv, the statement where it is
458 incremented. */
459 tree var_before; /* The variable used for it before increment. */
460 tree var_after; /* The variable used for it after increment. */
461 struct iv *iv; /* The value of the candidate. NULL for
462 "pseudocandidate" used to indicate the possibility
463 to replace the final value of an iv by direct
464 computation of the value. */
465 unsigned cost; /* Cost of the candidate. */
466 unsigned cost_step; /* Cost of the candidate's increment operation. */
467 struct iv_use *ainc_use; /* For IP_{BEFORE,AFTER}_USE candidates, the place
468 where it is incremented. */
469 bitmap inv_vars; /* The list of invariant ssa_vars used in step of the
470 iv_cand. */
471 bitmap inv_exprs; /* If step is more complicated than a single ssa_var,
472 hanlde it as a new invariant expression which will
473 be hoisted out of loop. */
474 struct iv *orig_iv; /* The original iv if this cand is added from biv with
475 smaller type. */
476 bool doloop_p; /* Whether this is a doloop candidate. */
479 /* Hashtable entry for common candidate derived from iv uses. */
480 class iv_common_cand
482 public:
483 tree base;
484 tree step;
485 /* IV uses from which this common candidate is derived. */
486 auto_vec<struct iv_use *> uses;
487 hashval_t hash;
490 /* Hashtable helpers. */
492 struct iv_common_cand_hasher : delete_ptr_hash <iv_common_cand>
494 static inline hashval_t hash (const iv_common_cand *);
495 static inline bool equal (const iv_common_cand *, const iv_common_cand *);
498 /* Hash function for possible common candidates. */
500 inline hashval_t
501 iv_common_cand_hasher::hash (const iv_common_cand *ccand)
503 return ccand->hash;
506 /* Hash table equality function for common candidates. */
508 inline bool
509 iv_common_cand_hasher::equal (const iv_common_cand *ccand1,
510 const iv_common_cand *ccand2)
512 return (ccand1->hash == ccand2->hash
513 && operand_equal_p (ccand1->base, ccand2->base, 0)
514 && operand_equal_p (ccand1->step, ccand2->step, 0)
515 && (TYPE_PRECISION (TREE_TYPE (ccand1->base))
516 == TYPE_PRECISION (TREE_TYPE (ccand2->base))));
519 /* Loop invariant expression hashtable entry. */
521 struct iv_inv_expr_ent
523 /* Tree expression of the entry. */
524 tree expr;
525 /* Unique indentifier. */
526 int id;
527 /* Hash value. */
528 hashval_t hash;
531 /* Sort iv_inv_expr_ent pair A and B by id field. */
533 static int
534 sort_iv_inv_expr_ent (const void *a, const void *b)
536 const iv_inv_expr_ent * const *e1 = (const iv_inv_expr_ent * const *) (a);
537 const iv_inv_expr_ent * const *e2 = (const iv_inv_expr_ent * const *) (b);
539 unsigned id1 = (*e1)->id;
540 unsigned id2 = (*e2)->id;
542 if (id1 < id2)
543 return -1;
544 else if (id1 > id2)
545 return 1;
546 else
547 return 0;
550 /* Hashtable helpers. */
552 struct iv_inv_expr_hasher : free_ptr_hash <iv_inv_expr_ent>
554 static inline hashval_t hash (const iv_inv_expr_ent *);
555 static inline bool equal (const iv_inv_expr_ent *, const iv_inv_expr_ent *);
558 /* Return true if uses of type TYPE represent some form of address. */
560 inline bool
561 address_p (use_type type)
563 return type == USE_REF_ADDRESS || type == USE_PTR_ADDRESS;
566 /* Hash function for loop invariant expressions. */
568 inline hashval_t
569 iv_inv_expr_hasher::hash (const iv_inv_expr_ent *expr)
571 return expr->hash;
574 /* Hash table equality function for expressions. */
576 inline bool
577 iv_inv_expr_hasher::equal (const iv_inv_expr_ent *expr1,
578 const iv_inv_expr_ent *expr2)
580 return expr1->hash == expr2->hash
581 && operand_equal_p (expr1->expr, expr2->expr, 0);
584 struct ivopts_data
586 /* The currently optimized loop. */
587 class loop *current_loop;
588 location_t loop_loc;
590 /* Numbers of iterations for all exits of the current loop. */
591 hash_map<edge, tree_niter_desc *> *niters;
593 /* Number of registers used in it. */
594 unsigned regs_used;
596 /* The size of version_info array allocated. */
597 unsigned version_info_size;
599 /* The array of information for the ssa names. */
600 struct version_info *version_info;
602 /* The hashtable of loop invariant expressions created
603 by ivopt. */
604 hash_table<iv_inv_expr_hasher> *inv_expr_tab;
606 /* The bitmap of indices in version_info whose value was changed. */
607 bitmap relevant;
609 /* The uses of induction variables. */
610 vec<iv_group *> vgroups;
612 /* The candidates. */
613 vec<iv_cand *> vcands;
615 /* A bitmap of important candidates. */
616 bitmap important_candidates;
618 /* Cache used by tree_to_aff_combination_expand. */
619 hash_map<tree, name_expansion *> *name_expansion_cache;
621 /* The hashtable of common candidates derived from iv uses. */
622 hash_table<iv_common_cand_hasher> *iv_common_cand_tab;
624 /* The common candidates. */
625 vec<iv_common_cand *> iv_common_cands;
627 /* Hash map recording base object information of tree exp. */
628 hash_map<tree, tree> *base_object_map;
630 /* The maximum invariant variable id. */
631 unsigned max_inv_var_id;
633 /* The maximum invariant expression id. */
634 unsigned max_inv_expr_id;
636 /* Number of no_overflow BIVs which are not used in memory address. */
637 unsigned bivs_not_used_in_addr;
639 /* Obstack for iv structure. */
640 struct obstack iv_obstack;
642 /* Whether to consider just related and important candidates when replacing a
643 use. */
644 bool consider_all_candidates;
646 /* Are we optimizing for speed? */
647 bool speed;
649 /* Whether the loop body includes any function calls. */
650 bool body_includes_call;
652 /* Whether the loop body can only be exited via single exit. */
653 bool loop_single_exit_p;
655 /* Whether the loop has doloop comparison use. */
656 bool doloop_use_p;
659 /* An assignment of iv candidates to uses. */
661 class iv_ca
663 public:
664 /* The number of uses covered by the assignment. */
665 unsigned upto;
667 /* Number of uses that cannot be expressed by the candidates in the set. */
668 unsigned bad_groups;
670 /* Candidate assigned to a use, together with the related costs. */
671 class cost_pair **cand_for_group;
673 /* Number of times each candidate is used. */
674 unsigned *n_cand_uses;
676 /* The candidates used. */
677 bitmap cands;
679 /* The number of candidates in the set. */
680 unsigned n_cands;
682 /* The number of invariants needed, including both invariant variants and
683 invariant expressions. */
684 unsigned n_invs;
686 /* Total cost of expressing uses. */
687 comp_cost cand_use_cost;
689 /* Total cost of candidates. */
690 int64_t cand_cost;
692 /* Number of times each invariant variable is used. */
693 unsigned *n_inv_var_uses;
695 /* Number of times each invariant expression is used. */
696 unsigned *n_inv_expr_uses;
698 /* Total cost of the assignment. */
699 comp_cost cost;
702 /* Difference of two iv candidate assignments. */
704 struct iv_ca_delta
706 /* Changed group. */
707 struct iv_group *group;
709 /* An old assignment (for rollback purposes). */
710 class cost_pair *old_cp;
712 /* A new assignment. */
713 class cost_pair *new_cp;
715 /* Next change in the list. */
716 struct iv_ca_delta *next;
719 /* Bound on number of candidates below that all candidates are considered. */
721 #define CONSIDER_ALL_CANDIDATES_BOUND \
722 ((unsigned) param_iv_consider_all_candidates_bound)
724 /* If there are more iv occurrences, we just give up (it is quite unlikely that
725 optimizing such a loop would help, and it would take ages). */
727 #define MAX_CONSIDERED_GROUPS \
728 ((unsigned) param_iv_max_considered_uses)
730 /* If there are at most this number of ivs in the set, try removing unnecessary
731 ivs from the set always. */
733 #define ALWAYS_PRUNE_CAND_SET_BOUND \
734 ((unsigned) param_iv_always_prune_cand_set_bound)
736 /* The list of trees for that the decl_rtl field must be reset is stored
737 here. */
739 static vec<tree> decl_rtl_to_reset;
741 static comp_cost force_expr_to_var_cost (tree, bool);
743 /* The single loop exit if it dominates the latch, NULL otherwise. */
745 edge
746 single_dom_exit (class loop *loop)
748 edge exit = single_exit (loop);
750 if (!exit)
751 return NULL;
753 if (!just_once_each_iteration_p (loop, exit->src))
754 return NULL;
756 return exit;
759 /* Dumps information about the induction variable IV to FILE. Don't dump
760 variable's name if DUMP_NAME is FALSE. The information is dumped with
761 preceding spaces indicated by INDENT_LEVEL. */
763 void
764 dump_iv (FILE *file, struct iv *iv, bool dump_name, unsigned indent_level)
766 const char *p;
767 const char spaces[9] = {' ', ' ', ' ', ' ', ' ', ' ', ' ', ' ', '\0'};
769 if (indent_level > 4)
770 indent_level = 4;
771 p = spaces + 8 - (indent_level << 1);
773 fprintf (file, "%sIV struct:\n", p);
774 if (iv->ssa_name && dump_name)
776 fprintf (file, "%s SSA_NAME:\t", p);
777 print_generic_expr (file, iv->ssa_name, TDF_SLIM);
778 fprintf (file, "\n");
781 fprintf (file, "%s Type:\t", p);
782 print_generic_expr (file, TREE_TYPE (iv->base), TDF_SLIM);
783 fprintf (file, "\n");
785 fprintf (file, "%s Base:\t", p);
786 print_generic_expr (file, iv->base, TDF_SLIM);
787 fprintf (file, "\n");
789 fprintf (file, "%s Step:\t", p);
790 print_generic_expr (file, iv->step, TDF_SLIM);
791 fprintf (file, "\n");
793 if (iv->base_object)
795 fprintf (file, "%s Object:\t", p);
796 print_generic_expr (file, iv->base_object, TDF_SLIM);
797 fprintf (file, "\n");
800 fprintf (file, "%s Biv:\t%c\n", p, iv->biv_p ? 'Y' : 'N');
802 fprintf (file, "%s Overflowness wrto loop niter:\t%s\n",
803 p, iv->no_overflow ? "No-overflow" : "Overflow");
806 /* Dumps information about the USE to FILE. */
808 void
809 dump_use (FILE *file, struct iv_use *use)
811 fprintf (file, " Use %d.%d:\n", use->group_id, use->id);
812 fprintf (file, " At stmt:\t");
813 print_gimple_stmt (file, use->stmt, 0);
814 fprintf (file, " At pos:\t");
815 if (use->op_p)
816 print_generic_expr (file, *use->op_p, TDF_SLIM);
817 fprintf (file, "\n");
818 dump_iv (file, use->iv, false, 2);
821 /* Dumps information about the uses to FILE. */
823 void
824 dump_groups (FILE *file, struct ivopts_data *data)
826 unsigned i, j;
827 struct iv_group *group;
829 for (i = 0; i < data->vgroups.length (); i++)
831 group = data->vgroups[i];
832 fprintf (file, "Group %d:\n", group->id);
833 if (group->type == USE_NONLINEAR_EXPR)
834 fprintf (file, " Type:\tGENERIC\n");
835 else if (group->type == USE_REF_ADDRESS)
836 fprintf (file, " Type:\tREFERENCE ADDRESS\n");
837 else if (group->type == USE_PTR_ADDRESS)
838 fprintf (file, " Type:\tPOINTER ARGUMENT ADDRESS\n");
839 else
841 gcc_assert (group->type == USE_COMPARE);
842 fprintf (file, " Type:\tCOMPARE\n");
844 for (j = 0; j < group->vuses.length (); j++)
845 dump_use (file, group->vuses[j]);
849 /* Dumps information about induction variable candidate CAND to FILE. */
851 void
852 dump_cand (FILE *file, struct iv_cand *cand)
854 struct iv *iv = cand->iv;
856 fprintf (file, "Candidate %d:\n", cand->id);
857 if (cand->inv_vars)
859 fprintf (file, " Depend on inv.vars: ");
860 dump_bitmap (file, cand->inv_vars);
862 if (cand->inv_exprs)
864 fprintf (file, " Depend on inv.exprs: ");
865 dump_bitmap (file, cand->inv_exprs);
868 if (cand->var_before)
870 fprintf (file, " Var befor: ");
871 print_generic_expr (file, cand->var_before, TDF_SLIM);
872 fprintf (file, "\n");
874 if (cand->var_after)
876 fprintf (file, " Var after: ");
877 print_generic_expr (file, cand->var_after, TDF_SLIM);
878 fprintf (file, "\n");
881 switch (cand->pos)
883 case IP_NORMAL:
884 fprintf (file, " Incr POS: before exit test\n");
885 break;
887 case IP_BEFORE_USE:
888 fprintf (file, " Incr POS: before use %d\n", cand->ainc_use->id);
889 break;
891 case IP_AFTER_USE:
892 fprintf (file, " Incr POS: after use %d\n", cand->ainc_use->id);
893 break;
895 case IP_END:
896 fprintf (file, " Incr POS: at end\n");
897 break;
899 case IP_ORIGINAL:
900 fprintf (file, " Incr POS: orig biv\n");
901 break;
904 dump_iv (file, iv, false, 1);
907 /* Returns the info for ssa version VER. */
909 static inline struct version_info *
910 ver_info (struct ivopts_data *data, unsigned ver)
912 return data->version_info + ver;
915 /* Returns the info for ssa name NAME. */
917 static inline struct version_info *
918 name_info (struct ivopts_data *data, tree name)
920 return ver_info (data, SSA_NAME_VERSION (name));
923 /* Returns true if STMT is after the place where the IP_NORMAL ivs will be
924 emitted in LOOP. */
926 static bool
927 stmt_after_ip_normal_pos (class loop *loop, gimple *stmt)
929 basic_block bb = ip_normal_pos (loop), sbb = gimple_bb (stmt);
931 gcc_assert (bb);
933 if (sbb == loop->latch)
934 return true;
936 if (sbb != bb)
937 return false;
939 return stmt == last_stmt (bb);
942 /* Returns true if STMT if after the place where the original induction
943 variable CAND is incremented. If TRUE_IF_EQUAL is set, we return true
944 if the positions are identical. */
946 static bool
947 stmt_after_inc_pos (struct iv_cand *cand, gimple *stmt, bool true_if_equal)
949 basic_block cand_bb = gimple_bb (cand->incremented_at);
950 basic_block stmt_bb = gimple_bb (stmt);
952 if (!dominated_by_p (CDI_DOMINATORS, stmt_bb, cand_bb))
953 return false;
955 if (stmt_bb != cand_bb)
956 return true;
958 if (true_if_equal
959 && gimple_uid (stmt) == gimple_uid (cand->incremented_at))
960 return true;
961 return gimple_uid (stmt) > gimple_uid (cand->incremented_at);
964 /* Returns true if STMT if after the place where the induction variable
965 CAND is incremented in LOOP. */
967 static bool
968 stmt_after_increment (class loop *loop, struct iv_cand *cand, gimple *stmt)
970 switch (cand->pos)
972 case IP_END:
973 return false;
975 case IP_NORMAL:
976 return stmt_after_ip_normal_pos (loop, stmt);
978 case IP_ORIGINAL:
979 case IP_AFTER_USE:
980 return stmt_after_inc_pos (cand, stmt, false);
982 case IP_BEFORE_USE:
983 return stmt_after_inc_pos (cand, stmt, true);
985 default:
986 gcc_unreachable ();
990 /* walk_tree callback for contains_abnormal_ssa_name_p. */
992 static tree
993 contains_abnormal_ssa_name_p_1 (tree *tp, int *walk_subtrees, void *)
995 if (TREE_CODE (*tp) == SSA_NAME
996 && SSA_NAME_OCCURS_IN_ABNORMAL_PHI (*tp))
997 return *tp;
999 if (!EXPR_P (*tp))
1000 *walk_subtrees = 0;
1002 return NULL_TREE;
1005 /* Returns true if EXPR contains a ssa name that occurs in an
1006 abnormal phi node. */
1008 bool
1009 contains_abnormal_ssa_name_p (tree expr)
1011 return walk_tree_without_duplicates
1012 (&expr, contains_abnormal_ssa_name_p_1, NULL) != NULL_TREE;
1015 /* Returns the structure describing number of iterations determined from
1016 EXIT of DATA->current_loop, or NULL if something goes wrong. */
1018 static class tree_niter_desc *
1019 niter_for_exit (struct ivopts_data *data, edge exit)
1021 class tree_niter_desc *desc;
1022 tree_niter_desc **slot;
1024 if (!data->niters)
1026 data->niters = new hash_map<edge, tree_niter_desc *>;
1027 slot = NULL;
1029 else
1030 slot = data->niters->get (exit);
1032 if (!slot)
1034 /* Try to determine number of iterations. We cannot safely work with ssa
1035 names that appear in phi nodes on abnormal edges, so that we do not
1036 create overlapping life ranges for them (PR 27283). */
1037 desc = XNEW (class tree_niter_desc);
1038 if (!number_of_iterations_exit (data->current_loop,
1039 exit, desc, true)
1040 || contains_abnormal_ssa_name_p (desc->niter))
1042 XDELETE (desc);
1043 desc = NULL;
1045 data->niters->put (exit, desc);
1047 else
1048 desc = *slot;
1050 return desc;
1053 /* Returns the structure describing number of iterations determined from
1054 single dominating exit of DATA->current_loop, or NULL if something
1055 goes wrong. */
1057 static class tree_niter_desc *
1058 niter_for_single_dom_exit (struct ivopts_data *data)
1060 edge exit = single_dom_exit (data->current_loop);
1062 if (!exit)
1063 return NULL;
1065 return niter_for_exit (data, exit);
1068 /* Initializes data structures used by the iv optimization pass, stored
1069 in DATA. */
1071 static void
1072 tree_ssa_iv_optimize_init (struct ivopts_data *data)
1074 data->version_info_size = 2 * num_ssa_names;
1075 data->version_info = XCNEWVEC (struct version_info, data->version_info_size);
1076 data->relevant = BITMAP_ALLOC (NULL);
1077 data->important_candidates = BITMAP_ALLOC (NULL);
1078 data->max_inv_var_id = 0;
1079 data->max_inv_expr_id = 0;
1080 data->niters = NULL;
1081 data->vgroups.create (20);
1082 data->vcands.create (20);
1083 data->inv_expr_tab = new hash_table<iv_inv_expr_hasher> (10);
1084 data->name_expansion_cache = NULL;
1085 data->base_object_map = NULL;
1086 data->iv_common_cand_tab = new hash_table<iv_common_cand_hasher> (10);
1087 data->iv_common_cands.create (20);
1088 decl_rtl_to_reset.create (20);
1089 gcc_obstack_init (&data->iv_obstack);
1092 /* walk_tree callback for determine_base_object. */
1094 static tree
1095 determine_base_object_1 (tree *tp, int *walk_subtrees, void *wdata)
1097 tree_code code = TREE_CODE (*tp);
1098 tree obj = NULL_TREE;
1099 if (code == ADDR_EXPR)
1101 tree base = get_base_address (TREE_OPERAND (*tp, 0));
1102 if (!base)
1103 obj = *tp;
1104 else if (TREE_CODE (base) != MEM_REF)
1105 obj = fold_convert (ptr_type_node, build_fold_addr_expr (base));
1107 else if (code == SSA_NAME && POINTER_TYPE_P (TREE_TYPE (*tp)))
1108 obj = fold_convert (ptr_type_node, *tp);
1110 if (!obj)
1112 if (!EXPR_P (*tp))
1113 *walk_subtrees = 0;
1115 return NULL_TREE;
1117 /* Record special node for multiple base objects and stop. */
1118 if (*static_cast<tree *> (wdata))
1120 *static_cast<tree *> (wdata) = integer_zero_node;
1121 return integer_zero_node;
1123 /* Record the base object and continue looking. */
1124 *static_cast<tree *> (wdata) = obj;
1125 return NULL_TREE;
1128 /* Returns a memory object to that EXPR points with caching. Return NULL if we
1129 are able to determine that it does not point to any such object; specially
1130 return integer_zero_node if EXPR contains multiple base objects. */
1132 static tree
1133 determine_base_object (struct ivopts_data *data, tree expr)
1135 tree *slot, obj = NULL_TREE;
1136 if (data->base_object_map)
1138 if ((slot = data->base_object_map->get(expr)) != NULL)
1139 return *slot;
1141 else
1142 data->base_object_map = new hash_map<tree, tree>;
1144 (void) walk_tree_without_duplicates (&expr, determine_base_object_1, &obj);
1145 data->base_object_map->put (expr, obj);
1146 return obj;
1149 /* Return true if address expression with non-DECL_P operand appears
1150 in EXPR. */
1152 static bool
1153 contain_complex_addr_expr (tree expr)
1155 bool res = false;
1157 STRIP_NOPS (expr);
1158 switch (TREE_CODE (expr))
1160 case POINTER_PLUS_EXPR:
1161 case PLUS_EXPR:
1162 case MINUS_EXPR:
1163 res |= contain_complex_addr_expr (TREE_OPERAND (expr, 0));
1164 res |= contain_complex_addr_expr (TREE_OPERAND (expr, 1));
1165 break;
1167 case ADDR_EXPR:
1168 return (!DECL_P (TREE_OPERAND (expr, 0)));
1170 default:
1171 return false;
1174 return res;
1177 /* Allocates an induction variable with given initial value BASE and step STEP
1178 for loop LOOP. NO_OVERFLOW implies the iv doesn't overflow. */
1180 static struct iv *
1181 alloc_iv (struct ivopts_data *data, tree base, tree step,
1182 bool no_overflow = false)
1184 tree expr = base;
1185 struct iv *iv = (struct iv*) obstack_alloc (&data->iv_obstack,
1186 sizeof (struct iv));
1187 gcc_assert (step != NULL_TREE);
1189 /* Lower address expression in base except ones with DECL_P as operand.
1190 By doing this:
1191 1) More accurate cost can be computed for address expressions;
1192 2) Duplicate candidates won't be created for bases in different
1193 forms, like &a[0] and &a. */
1194 STRIP_NOPS (expr);
1195 if ((TREE_CODE (expr) == ADDR_EXPR && !DECL_P (TREE_OPERAND (expr, 0)))
1196 || contain_complex_addr_expr (expr))
1198 aff_tree comb;
1199 tree_to_aff_combination (expr, TREE_TYPE (expr), &comb);
1200 base = fold_convert (TREE_TYPE (base), aff_combination_to_tree (&comb));
1203 iv->base = base;
1204 iv->base_object = determine_base_object (data, base);
1205 iv->step = step;
1206 iv->biv_p = false;
1207 iv->nonlin_use = NULL;
1208 iv->ssa_name = NULL_TREE;
1209 if (!no_overflow
1210 && !iv_can_overflow_p (data->current_loop, TREE_TYPE (base),
1211 base, step))
1212 no_overflow = true;
1213 iv->no_overflow = no_overflow;
1214 iv->have_address_use = false;
1216 return iv;
1219 /* Sets STEP and BASE for induction variable IV. NO_OVERFLOW implies the IV
1220 doesn't overflow. */
1222 static void
1223 set_iv (struct ivopts_data *data, tree iv, tree base, tree step,
1224 bool no_overflow)
1226 struct version_info *info = name_info (data, iv);
1228 gcc_assert (!info->iv);
1230 bitmap_set_bit (data->relevant, SSA_NAME_VERSION (iv));
1231 info->iv = alloc_iv (data, base, step, no_overflow);
1232 info->iv->ssa_name = iv;
1235 /* Finds induction variable declaration for VAR. */
1237 static struct iv *
1238 get_iv (struct ivopts_data *data, tree var)
1240 basic_block bb;
1241 tree type = TREE_TYPE (var);
1243 if (!POINTER_TYPE_P (type)
1244 && !INTEGRAL_TYPE_P (type))
1245 return NULL;
1247 if (!name_info (data, var)->iv)
1249 bb = gimple_bb (SSA_NAME_DEF_STMT (var));
1251 if (!bb
1252 || !flow_bb_inside_loop_p (data->current_loop, bb))
1254 if (POINTER_TYPE_P (type))
1255 type = sizetype;
1256 set_iv (data, var, var, build_int_cst (type, 0), true);
1260 return name_info (data, var)->iv;
1263 /* Return the first non-invariant ssa var found in EXPR. */
1265 static tree
1266 extract_single_var_from_expr (tree expr)
1268 int i, n;
1269 tree tmp;
1270 enum tree_code code;
1272 if (!expr || is_gimple_min_invariant (expr))
1273 return NULL;
1275 code = TREE_CODE (expr);
1276 if (IS_EXPR_CODE_CLASS (TREE_CODE_CLASS (code)))
1278 n = TREE_OPERAND_LENGTH (expr);
1279 for (i = 0; i < n; i++)
1281 tmp = extract_single_var_from_expr (TREE_OPERAND (expr, i));
1283 if (tmp)
1284 return tmp;
1287 return (TREE_CODE (expr) == SSA_NAME) ? expr : NULL;
1290 /* Finds basic ivs. */
1292 static bool
1293 find_bivs (struct ivopts_data *data)
1295 gphi *phi;
1296 affine_iv iv;
1297 tree step, type, base, stop;
1298 bool found = false;
1299 class loop *loop = data->current_loop;
1300 gphi_iterator psi;
1302 for (psi = gsi_start_phis (loop->header); !gsi_end_p (psi); gsi_next (&psi))
1304 phi = psi.phi ();
1306 if (SSA_NAME_OCCURS_IN_ABNORMAL_PHI (PHI_RESULT (phi)))
1307 continue;
1309 if (virtual_operand_p (PHI_RESULT (phi)))
1310 continue;
1312 if (!simple_iv (loop, loop, PHI_RESULT (phi), &iv, true))
1313 continue;
1315 if (integer_zerop (iv.step))
1316 continue;
1318 step = iv.step;
1319 base = PHI_ARG_DEF_FROM_EDGE (phi, loop_preheader_edge (loop));
1320 /* Stop expanding iv base at the first ssa var referred by iv step.
1321 Ideally we should stop at any ssa var, because that's expensive
1322 and unusual to happen, we just do it on the first one.
1324 See PR64705 for the rationale. */
1325 stop = extract_single_var_from_expr (step);
1326 base = expand_simple_operations (base, stop);
1327 if (contains_abnormal_ssa_name_p (base)
1328 || contains_abnormal_ssa_name_p (step))
1329 continue;
1331 type = TREE_TYPE (PHI_RESULT (phi));
1332 base = fold_convert (type, base);
1333 if (step)
1335 if (POINTER_TYPE_P (type))
1336 step = convert_to_ptrofftype (step);
1337 else
1338 step = fold_convert (type, step);
1341 set_iv (data, PHI_RESULT (phi), base, step, iv.no_overflow);
1342 found = true;
1345 return found;
1348 /* Marks basic ivs. */
1350 static void
1351 mark_bivs (struct ivopts_data *data)
1353 gphi *phi;
1354 gimple *def;
1355 tree var;
1356 struct iv *iv, *incr_iv;
1357 class loop *loop = data->current_loop;
1358 basic_block incr_bb;
1359 gphi_iterator psi;
1361 data->bivs_not_used_in_addr = 0;
1362 for (psi = gsi_start_phis (loop->header); !gsi_end_p (psi); gsi_next (&psi))
1364 phi = psi.phi ();
1366 iv = get_iv (data, PHI_RESULT (phi));
1367 if (!iv)
1368 continue;
1370 var = PHI_ARG_DEF_FROM_EDGE (phi, loop_latch_edge (loop));
1371 def = SSA_NAME_DEF_STMT (var);
1372 /* Don't mark iv peeled from other one as biv. */
1373 if (def
1374 && gimple_code (def) == GIMPLE_PHI
1375 && gimple_bb (def) == loop->header)
1376 continue;
1378 incr_iv = get_iv (data, var);
1379 if (!incr_iv)
1380 continue;
1382 /* If the increment is in the subloop, ignore it. */
1383 incr_bb = gimple_bb (SSA_NAME_DEF_STMT (var));
1384 if (incr_bb->loop_father != data->current_loop
1385 || (incr_bb->flags & BB_IRREDUCIBLE_LOOP))
1386 continue;
1388 iv->biv_p = true;
1389 incr_iv->biv_p = true;
1390 if (iv->no_overflow)
1391 data->bivs_not_used_in_addr++;
1392 if (incr_iv->no_overflow)
1393 data->bivs_not_used_in_addr++;
1397 /* Checks whether STMT defines a linear induction variable and stores its
1398 parameters to IV. */
1400 static bool
1401 find_givs_in_stmt_scev (struct ivopts_data *data, gimple *stmt, affine_iv *iv)
1403 tree lhs, stop;
1404 class loop *loop = data->current_loop;
1406 iv->base = NULL_TREE;
1407 iv->step = NULL_TREE;
1409 if (gimple_code (stmt) != GIMPLE_ASSIGN)
1410 return false;
1412 lhs = gimple_assign_lhs (stmt);
1413 if (TREE_CODE (lhs) != SSA_NAME)
1414 return false;
1416 if (!simple_iv (loop, loop_containing_stmt (stmt), lhs, iv, true))
1417 return false;
1419 /* Stop expanding iv base at the first ssa var referred by iv step.
1420 Ideally we should stop at any ssa var, because that's expensive
1421 and unusual to happen, we just do it on the first one.
1423 See PR64705 for the rationale. */
1424 stop = extract_single_var_from_expr (iv->step);
1425 iv->base = expand_simple_operations (iv->base, stop);
1426 if (contains_abnormal_ssa_name_p (iv->base)
1427 || contains_abnormal_ssa_name_p (iv->step))
1428 return false;
1430 /* If STMT could throw, then do not consider STMT as defining a GIV.
1431 While this will suppress optimizations, we cannot safely delete this
1432 GIV and associated statements, even if it appears it is not used. */
1433 if (stmt_could_throw_p (cfun, stmt))
1434 return false;
1436 return true;
1439 /* Finds general ivs in statement STMT. */
1441 static void
1442 find_givs_in_stmt (struct ivopts_data *data, gimple *stmt)
1444 affine_iv iv;
1446 if (!find_givs_in_stmt_scev (data, stmt, &iv))
1447 return;
1449 set_iv (data, gimple_assign_lhs (stmt), iv.base, iv.step, iv.no_overflow);
1452 /* Finds general ivs in basic block BB. */
1454 static void
1455 find_givs_in_bb (struct ivopts_data *data, basic_block bb)
1457 gimple_stmt_iterator bsi;
1459 for (bsi = gsi_start_bb (bb); !gsi_end_p (bsi); gsi_next (&bsi))
1460 find_givs_in_stmt (data, gsi_stmt (bsi));
1463 /* Finds general ivs. */
1465 static void
1466 find_givs (struct ivopts_data *data, basic_block *body)
1468 class loop *loop = data->current_loop;
1469 unsigned i;
1471 for (i = 0; i < loop->num_nodes; i++)
1472 find_givs_in_bb (data, body[i]);
1475 /* For each ssa name defined in LOOP determines whether it is an induction
1476 variable and if so, its initial value and step. */
1478 static bool
1479 find_induction_variables (struct ivopts_data *data, basic_block *body)
1481 unsigned i;
1482 bitmap_iterator bi;
1484 if (!find_bivs (data))
1485 return false;
1487 find_givs (data, body);
1488 mark_bivs (data);
1490 if (dump_file && (dump_flags & TDF_DETAILS))
1492 class tree_niter_desc *niter = niter_for_single_dom_exit (data);
1494 if (niter)
1496 fprintf (dump_file, " number of iterations ");
1497 print_generic_expr (dump_file, niter->niter, TDF_SLIM);
1498 if (!integer_zerop (niter->may_be_zero))
1500 fprintf (dump_file, "; zero if ");
1501 print_generic_expr (dump_file, niter->may_be_zero, TDF_SLIM);
1503 fprintf (dump_file, "\n");
1506 fprintf (dump_file, "\n<Induction Vars>:\n");
1507 EXECUTE_IF_SET_IN_BITMAP (data->relevant, 0, i, bi)
1509 struct version_info *info = ver_info (data, i);
1510 if (info->iv && info->iv->step && !integer_zerop (info->iv->step))
1511 dump_iv (dump_file, ver_info (data, i)->iv, true, 0);
1515 return true;
1518 /* Records a use of TYPE at *USE_P in STMT whose value is IV in GROUP.
1519 For address type use, ADDR_BASE is the stripped IV base, ADDR_OFFSET
1520 is the const offset stripped from IV base and MEM_TYPE is the type
1521 of the memory being addressed. For uses of other types, ADDR_BASE
1522 and ADDR_OFFSET are zero by default and MEM_TYPE is NULL_TREE. */
1524 static struct iv_use *
1525 record_use (struct iv_group *group, tree *use_p, struct iv *iv,
1526 gimple *stmt, enum use_type type, tree mem_type,
1527 tree addr_base, poly_uint64 addr_offset)
1529 struct iv_use *use = XCNEW (struct iv_use);
1531 use->id = group->vuses.length ();
1532 use->group_id = group->id;
1533 use->type = type;
1534 use->mem_type = mem_type;
1535 use->iv = iv;
1536 use->stmt = stmt;
1537 use->op_p = use_p;
1538 use->addr_base = addr_base;
1539 use->addr_offset = addr_offset;
1541 group->vuses.safe_push (use);
1542 return use;
1545 /* Checks whether OP is a loop-level invariant and if so, records it.
1546 NONLINEAR_USE is true if the invariant is used in a way we do not
1547 handle specially. */
1549 static void
1550 record_invariant (struct ivopts_data *data, tree op, bool nonlinear_use)
1552 basic_block bb;
1553 struct version_info *info;
1555 if (TREE_CODE (op) != SSA_NAME
1556 || virtual_operand_p (op))
1557 return;
1559 bb = gimple_bb (SSA_NAME_DEF_STMT (op));
1560 if (bb
1561 && flow_bb_inside_loop_p (data->current_loop, bb))
1562 return;
1564 info = name_info (data, op);
1565 info->name = op;
1566 info->has_nonlin_use |= nonlinear_use;
1567 if (!info->inv_id)
1568 info->inv_id = ++data->max_inv_var_id;
1569 bitmap_set_bit (data->relevant, SSA_NAME_VERSION (op));
1572 /* Record a group of TYPE. */
1574 static struct iv_group *
1575 record_group (struct ivopts_data *data, enum use_type type)
1577 struct iv_group *group = XCNEW (struct iv_group);
1579 group->id = data->vgroups.length ();
1580 group->type = type;
1581 group->related_cands = BITMAP_ALLOC (NULL);
1582 group->vuses.create (1);
1583 group->doloop_p = false;
1585 data->vgroups.safe_push (group);
1586 return group;
1589 /* Record a use of TYPE at *USE_P in STMT whose value is IV in a group.
1590 New group will be created if there is no existing group for the use.
1591 MEM_TYPE is the type of memory being addressed, or NULL if this
1592 isn't an address reference. */
1594 static struct iv_use *
1595 record_group_use (struct ivopts_data *data, tree *use_p,
1596 struct iv *iv, gimple *stmt, enum use_type type,
1597 tree mem_type)
1599 tree addr_base = NULL;
1600 struct iv_group *group = NULL;
1601 poly_uint64 addr_offset = 0;
1603 /* Record non address type use in a new group. */
1604 if (address_p (type))
1606 unsigned int i;
1608 addr_base = strip_offset (iv->base, &addr_offset);
1609 for (i = 0; i < data->vgroups.length (); i++)
1611 struct iv_use *use;
1613 group = data->vgroups[i];
1614 use = group->vuses[0];
1615 if (!address_p (use->type))
1616 continue;
1618 /* Check if it has the same stripped base and step. */
1619 if (operand_equal_p (iv->base_object, use->iv->base_object, 0)
1620 && operand_equal_p (iv->step, use->iv->step, 0)
1621 && operand_equal_p (addr_base, use->addr_base, 0))
1622 break;
1624 if (i == data->vgroups.length ())
1625 group = NULL;
1628 if (!group)
1629 group = record_group (data, type);
1631 return record_use (group, use_p, iv, stmt, type, mem_type,
1632 addr_base, addr_offset);
1635 /* Checks whether the use OP is interesting and if so, records it. */
1637 static struct iv_use *
1638 find_interesting_uses_op (struct ivopts_data *data, tree op)
1640 struct iv *iv;
1641 gimple *stmt;
1642 struct iv_use *use;
1644 if (TREE_CODE (op) != SSA_NAME)
1645 return NULL;
1647 iv = get_iv (data, op);
1648 if (!iv)
1649 return NULL;
1651 if (iv->nonlin_use)
1653 gcc_assert (iv->nonlin_use->type == USE_NONLINEAR_EXPR);
1654 return iv->nonlin_use;
1657 if (integer_zerop (iv->step))
1659 record_invariant (data, op, true);
1660 return NULL;
1663 stmt = SSA_NAME_DEF_STMT (op);
1664 gcc_assert (gimple_code (stmt) == GIMPLE_PHI || is_gimple_assign (stmt));
1666 use = record_group_use (data, NULL, iv, stmt, USE_NONLINEAR_EXPR, NULL_TREE);
1667 iv->nonlin_use = use;
1668 return use;
1671 /* Indicate how compare type iv_use can be handled. */
1672 enum comp_iv_rewrite
1674 COMP_IV_NA,
1675 /* We may rewrite compare type iv_use by expressing value of the iv_use. */
1676 COMP_IV_EXPR,
1677 /* We may rewrite compare type iv_uses on both sides of comparison by
1678 expressing value of each iv_use. */
1679 COMP_IV_EXPR_2,
1680 /* We may rewrite compare type iv_use by expressing value of the iv_use
1681 or by eliminating it with other iv_cand. */
1682 COMP_IV_ELIM
1685 /* Given a condition in statement STMT, checks whether it is a compare
1686 of an induction variable and an invariant. If this is the case,
1687 CONTROL_VAR is set to location of the iv, BOUND to the location of
1688 the invariant, IV_VAR and IV_BOUND are set to the corresponding
1689 induction variable descriptions, and true is returned. If this is not
1690 the case, CONTROL_VAR and BOUND are set to the arguments of the
1691 condition and false is returned. */
1693 static enum comp_iv_rewrite
1694 extract_cond_operands (struct ivopts_data *data, gimple *stmt,
1695 tree **control_var, tree **bound,
1696 struct iv **iv_var, struct iv **iv_bound)
1698 /* The objects returned when COND has constant operands. */
1699 static struct iv const_iv;
1700 static tree zero;
1701 tree *op0 = &zero, *op1 = &zero;
1702 struct iv *iv0 = &const_iv, *iv1 = &const_iv;
1703 enum comp_iv_rewrite rewrite_type = COMP_IV_NA;
1705 if (gimple_code (stmt) == GIMPLE_COND)
1707 gcond *cond_stmt = as_a <gcond *> (stmt);
1708 op0 = gimple_cond_lhs_ptr (cond_stmt);
1709 op1 = gimple_cond_rhs_ptr (cond_stmt);
1711 else
1713 op0 = gimple_assign_rhs1_ptr (stmt);
1714 op1 = gimple_assign_rhs2_ptr (stmt);
1717 zero = integer_zero_node;
1718 const_iv.step = integer_zero_node;
1720 if (TREE_CODE (*op0) == SSA_NAME)
1721 iv0 = get_iv (data, *op0);
1722 if (TREE_CODE (*op1) == SSA_NAME)
1723 iv1 = get_iv (data, *op1);
1725 /* If both sides of comparison are IVs. We can express ivs on both end. */
1726 if (iv0 && iv1 && !integer_zerop (iv0->step) && !integer_zerop (iv1->step))
1728 rewrite_type = COMP_IV_EXPR_2;
1729 goto end;
1732 /* If none side of comparison is IV. */
1733 if ((!iv0 || integer_zerop (iv0->step))
1734 && (!iv1 || integer_zerop (iv1->step)))
1735 goto end;
1737 /* Control variable may be on the other side. */
1738 if (!iv0 || integer_zerop (iv0->step))
1740 std::swap (op0, op1);
1741 std::swap (iv0, iv1);
1743 /* If one side is IV and the other side isn't loop invariant. */
1744 if (!iv1)
1745 rewrite_type = COMP_IV_EXPR;
1746 /* If one side is IV and the other side is loop invariant. */
1747 else if (!integer_zerop (iv0->step) && integer_zerop (iv1->step))
1748 rewrite_type = COMP_IV_ELIM;
1750 end:
1751 if (control_var)
1752 *control_var = op0;
1753 if (iv_var)
1754 *iv_var = iv0;
1755 if (bound)
1756 *bound = op1;
1757 if (iv_bound)
1758 *iv_bound = iv1;
1760 return rewrite_type;
1763 /* Checks whether the condition in STMT is interesting and if so,
1764 records it. */
1766 static void
1767 find_interesting_uses_cond (struct ivopts_data *data, gimple *stmt)
1769 tree *var_p, *bound_p;
1770 struct iv *var_iv, *bound_iv;
1771 enum comp_iv_rewrite ret;
1773 ret = extract_cond_operands (data, stmt,
1774 &var_p, &bound_p, &var_iv, &bound_iv);
1775 if (ret == COMP_IV_NA)
1777 find_interesting_uses_op (data, *var_p);
1778 find_interesting_uses_op (data, *bound_p);
1779 return;
1782 record_group_use (data, var_p, var_iv, stmt, USE_COMPARE, NULL_TREE);
1783 /* Record compare type iv_use for iv on the other side of comparison. */
1784 if (ret == COMP_IV_EXPR_2)
1785 record_group_use (data, bound_p, bound_iv, stmt, USE_COMPARE, NULL_TREE);
1788 /* Returns the outermost loop EXPR is obviously invariant in
1789 relative to the loop LOOP, i.e. if all its operands are defined
1790 outside of the returned loop. Returns NULL if EXPR is not
1791 even obviously invariant in LOOP. */
1793 class loop *
1794 outermost_invariant_loop_for_expr (class loop *loop, tree expr)
1796 basic_block def_bb;
1797 unsigned i, len;
1799 if (is_gimple_min_invariant (expr))
1800 return current_loops->tree_root;
1802 if (TREE_CODE (expr) == SSA_NAME)
1804 def_bb = gimple_bb (SSA_NAME_DEF_STMT (expr));
1805 if (def_bb)
1807 if (flow_bb_inside_loop_p (loop, def_bb))
1808 return NULL;
1809 return superloop_at_depth (loop,
1810 loop_depth (def_bb->loop_father) + 1);
1813 return current_loops->tree_root;
1816 if (!EXPR_P (expr))
1817 return NULL;
1819 unsigned maxdepth = 0;
1820 len = TREE_OPERAND_LENGTH (expr);
1821 for (i = 0; i < len; i++)
1823 class loop *ivloop;
1824 if (!TREE_OPERAND (expr, i))
1825 continue;
1827 ivloop = outermost_invariant_loop_for_expr (loop, TREE_OPERAND (expr, i));
1828 if (!ivloop)
1829 return NULL;
1830 maxdepth = MAX (maxdepth, loop_depth (ivloop));
1833 return superloop_at_depth (loop, maxdepth);
1836 /* Returns true if expression EXPR is obviously invariant in LOOP,
1837 i.e. if all its operands are defined outside of the LOOP. LOOP
1838 should not be the function body. */
1840 bool
1841 expr_invariant_in_loop_p (class loop *loop, tree expr)
1843 basic_block def_bb;
1844 unsigned i, len;
1846 gcc_assert (loop_depth (loop) > 0);
1848 if (is_gimple_min_invariant (expr))
1849 return true;
1851 if (TREE_CODE (expr) == SSA_NAME)
1853 def_bb = gimple_bb (SSA_NAME_DEF_STMT (expr));
1854 if (def_bb
1855 && flow_bb_inside_loop_p (loop, def_bb))
1856 return false;
1858 return true;
1861 if (!EXPR_P (expr))
1862 return false;
1864 len = TREE_OPERAND_LENGTH (expr);
1865 for (i = 0; i < len; i++)
1866 if (TREE_OPERAND (expr, i)
1867 && !expr_invariant_in_loop_p (loop, TREE_OPERAND (expr, i)))
1868 return false;
1870 return true;
1873 /* Given expression EXPR which computes inductive values with respect
1874 to loop recorded in DATA, this function returns biv from which EXPR
1875 is derived by tracing definition chains of ssa variables in EXPR. */
1877 static struct iv*
1878 find_deriving_biv_for_expr (struct ivopts_data *data, tree expr)
1880 struct iv *iv;
1881 unsigned i, n;
1882 tree e2, e1;
1883 enum tree_code code;
1884 gimple *stmt;
1886 if (expr == NULL_TREE)
1887 return NULL;
1889 if (is_gimple_min_invariant (expr))
1890 return NULL;
1892 code = TREE_CODE (expr);
1893 if (IS_EXPR_CODE_CLASS (TREE_CODE_CLASS (code)))
1895 n = TREE_OPERAND_LENGTH (expr);
1896 for (i = 0; i < n; i++)
1898 iv = find_deriving_biv_for_expr (data, TREE_OPERAND (expr, i));
1899 if (iv)
1900 return iv;
1904 /* Stop if it's not ssa name. */
1905 if (code != SSA_NAME)
1906 return NULL;
1908 iv = get_iv (data, expr);
1909 if (!iv || integer_zerop (iv->step))
1910 return NULL;
1911 else if (iv->biv_p)
1912 return iv;
1914 stmt = SSA_NAME_DEF_STMT (expr);
1915 if (gphi *phi = dyn_cast <gphi *> (stmt))
1917 ssa_op_iter iter;
1918 use_operand_p use_p;
1919 basic_block phi_bb = gimple_bb (phi);
1921 /* Skip loop header PHI that doesn't define biv. */
1922 if (phi_bb->loop_father == data->current_loop)
1923 return NULL;
1925 if (virtual_operand_p (gimple_phi_result (phi)))
1926 return NULL;
1928 FOR_EACH_PHI_ARG (use_p, phi, iter, SSA_OP_USE)
1930 tree use = USE_FROM_PTR (use_p);
1931 iv = find_deriving_biv_for_expr (data, use);
1932 if (iv)
1933 return iv;
1935 return NULL;
1937 if (gimple_code (stmt) != GIMPLE_ASSIGN)
1938 return NULL;
1940 e1 = gimple_assign_rhs1 (stmt);
1941 code = gimple_assign_rhs_code (stmt);
1942 if (get_gimple_rhs_class (code) == GIMPLE_SINGLE_RHS)
1943 return find_deriving_biv_for_expr (data, e1);
1945 switch (code)
1947 case MULT_EXPR:
1948 case PLUS_EXPR:
1949 case MINUS_EXPR:
1950 case POINTER_PLUS_EXPR:
1951 /* Increments, decrements and multiplications by a constant
1952 are simple. */
1953 e2 = gimple_assign_rhs2 (stmt);
1954 iv = find_deriving_biv_for_expr (data, e2);
1955 if (iv)
1956 return iv;
1957 gcc_fallthrough ();
1959 CASE_CONVERT:
1960 /* Casts are simple. */
1961 return find_deriving_biv_for_expr (data, e1);
1963 default:
1964 break;
1967 return NULL;
1970 /* Record BIV, its predecessor and successor that they are used in
1971 address type uses. */
1973 static void
1974 record_biv_for_address_use (struct ivopts_data *data, struct iv *biv)
1976 unsigned i;
1977 tree type, base_1, base_2;
1978 bitmap_iterator bi;
1980 if (!biv || !biv->biv_p || integer_zerop (biv->step)
1981 || biv->have_address_use || !biv->no_overflow)
1982 return;
1984 type = TREE_TYPE (biv->base);
1985 if (!INTEGRAL_TYPE_P (type))
1986 return;
1988 biv->have_address_use = true;
1989 data->bivs_not_used_in_addr--;
1990 base_1 = fold_build2 (PLUS_EXPR, type, biv->base, biv->step);
1991 EXECUTE_IF_SET_IN_BITMAP (data->relevant, 0, i, bi)
1993 struct iv *iv = ver_info (data, i)->iv;
1995 if (!iv || !iv->biv_p || integer_zerop (iv->step)
1996 || iv->have_address_use || !iv->no_overflow)
1997 continue;
1999 if (type != TREE_TYPE (iv->base)
2000 || !INTEGRAL_TYPE_P (TREE_TYPE (iv->base)))
2001 continue;
2003 if (!operand_equal_p (biv->step, iv->step, 0))
2004 continue;
2006 base_2 = fold_build2 (PLUS_EXPR, type, iv->base, iv->step);
2007 if (operand_equal_p (base_1, iv->base, 0)
2008 || operand_equal_p (base_2, biv->base, 0))
2010 iv->have_address_use = true;
2011 data->bivs_not_used_in_addr--;
2016 /* Cumulates the steps of indices into DATA and replaces their values with the
2017 initial ones. Returns false when the value of the index cannot be determined.
2018 Callback for for_each_index. */
2020 struct ifs_ivopts_data
2022 struct ivopts_data *ivopts_data;
2023 gimple *stmt;
2024 tree step;
2027 static bool
2028 idx_find_step (tree base, tree *idx, void *data)
2030 struct ifs_ivopts_data *dta = (struct ifs_ivopts_data *) data;
2031 struct iv *iv;
2032 bool use_overflow_semantics = false;
2033 tree step, iv_base, iv_step, lbound, off;
2034 class loop *loop = dta->ivopts_data->current_loop;
2036 /* If base is a component ref, require that the offset of the reference
2037 be invariant. */
2038 if (TREE_CODE (base) == COMPONENT_REF)
2040 off = component_ref_field_offset (base);
2041 return expr_invariant_in_loop_p (loop, off);
2044 /* If base is array, first check whether we will be able to move the
2045 reference out of the loop (in order to take its address in strength
2046 reduction). In order for this to work we need both lower bound
2047 and step to be loop invariants. */
2048 if (TREE_CODE (base) == ARRAY_REF || TREE_CODE (base) == ARRAY_RANGE_REF)
2050 /* Moreover, for a range, the size needs to be invariant as well. */
2051 if (TREE_CODE (base) == ARRAY_RANGE_REF
2052 && !expr_invariant_in_loop_p (loop, TYPE_SIZE (TREE_TYPE (base))))
2053 return false;
2055 step = array_ref_element_size (base);
2056 lbound = array_ref_low_bound (base);
2058 if (!expr_invariant_in_loop_p (loop, step)
2059 || !expr_invariant_in_loop_p (loop, lbound))
2060 return false;
2063 if (TREE_CODE (*idx) != SSA_NAME)
2064 return true;
2066 iv = get_iv (dta->ivopts_data, *idx);
2067 if (!iv)
2068 return false;
2070 /* XXX We produce for a base of *D42 with iv->base being &x[0]
2071 *&x[0], which is not folded and does not trigger the
2072 ARRAY_REF path below. */
2073 *idx = iv->base;
2075 if (integer_zerop (iv->step))
2076 return true;
2078 if (TREE_CODE (base) == ARRAY_REF || TREE_CODE (base) == ARRAY_RANGE_REF)
2080 step = array_ref_element_size (base);
2082 /* We only handle addresses whose step is an integer constant. */
2083 if (TREE_CODE (step) != INTEGER_CST)
2084 return false;
2086 else
2087 /* The step for pointer arithmetics already is 1 byte. */
2088 step = size_one_node;
2090 iv_base = iv->base;
2091 iv_step = iv->step;
2092 if (iv->no_overflow && nowrap_type_p (TREE_TYPE (iv_step)))
2093 use_overflow_semantics = true;
2095 if (!convert_affine_scev (dta->ivopts_data->current_loop,
2096 sizetype, &iv_base, &iv_step, dta->stmt,
2097 use_overflow_semantics))
2099 /* The index might wrap. */
2100 return false;
2103 step = fold_build2 (MULT_EXPR, sizetype, step, iv_step);
2104 dta->step = fold_build2 (PLUS_EXPR, sizetype, dta->step, step);
2106 if (dta->ivopts_data->bivs_not_used_in_addr)
2108 if (!iv->biv_p)
2109 iv = find_deriving_biv_for_expr (dta->ivopts_data, iv->ssa_name);
2111 record_biv_for_address_use (dta->ivopts_data, iv);
2113 return true;
2116 /* Records use in index IDX. Callback for for_each_index. Ivopts data
2117 object is passed to it in DATA. */
2119 static bool
2120 idx_record_use (tree base, tree *idx,
2121 void *vdata)
2123 struct ivopts_data *data = (struct ivopts_data *) vdata;
2124 find_interesting_uses_op (data, *idx);
2125 if (TREE_CODE (base) == ARRAY_REF || TREE_CODE (base) == ARRAY_RANGE_REF)
2127 if (TREE_OPERAND (base, 2))
2128 find_interesting_uses_op (data, TREE_OPERAND (base, 2));
2129 if (TREE_OPERAND (base, 3))
2130 find_interesting_uses_op (data, TREE_OPERAND (base, 3));
2132 return true;
2135 /* If we can prove that TOP = cst * BOT for some constant cst,
2136 store cst to MUL and return true. Otherwise return false.
2137 The returned value is always sign-extended, regardless of the
2138 signedness of TOP and BOT. */
2140 static bool
2141 constant_multiple_of (tree top, tree bot, widest_int *mul)
2143 tree mby;
2144 enum tree_code code;
2145 unsigned precision = TYPE_PRECISION (TREE_TYPE (top));
2146 widest_int res, p0, p1;
2148 STRIP_NOPS (top);
2149 STRIP_NOPS (bot);
2151 if (operand_equal_p (top, bot, 0))
2153 *mul = 1;
2154 return true;
2157 code = TREE_CODE (top);
2158 switch (code)
2160 case MULT_EXPR:
2161 mby = TREE_OPERAND (top, 1);
2162 if (TREE_CODE (mby) != INTEGER_CST)
2163 return false;
2165 if (!constant_multiple_of (TREE_OPERAND (top, 0), bot, &res))
2166 return false;
2168 *mul = wi::sext (res * wi::to_widest (mby), precision);
2169 return true;
2171 case PLUS_EXPR:
2172 case MINUS_EXPR:
2173 if (!constant_multiple_of (TREE_OPERAND (top, 0), bot, &p0)
2174 || !constant_multiple_of (TREE_OPERAND (top, 1), bot, &p1))
2175 return false;
2177 if (code == MINUS_EXPR)
2178 p1 = -p1;
2179 *mul = wi::sext (p0 + p1, precision);
2180 return true;
2182 case INTEGER_CST:
2183 if (TREE_CODE (bot) != INTEGER_CST)
2184 return false;
2186 p0 = widest_int::from (wi::to_wide (top), SIGNED);
2187 p1 = widest_int::from (wi::to_wide (bot), SIGNED);
2188 if (p1 == 0)
2189 return false;
2190 *mul = wi::sext (wi::divmod_trunc (p0, p1, SIGNED, &res), precision);
2191 return res == 0;
2193 default:
2194 if (POLY_INT_CST_P (top)
2195 && POLY_INT_CST_P (bot)
2196 && constant_multiple_p (wi::to_poly_widest (top),
2197 wi::to_poly_widest (bot), mul))
2198 return true;
2200 return false;
2204 /* Return true if memory reference REF with step STEP may be unaligned. */
2206 static bool
2207 may_be_unaligned_p (tree ref, tree step)
2209 /* TARGET_MEM_REFs are translated directly to valid MEMs on the target,
2210 thus they are not misaligned. */
2211 if (TREE_CODE (ref) == TARGET_MEM_REF)
2212 return false;
2214 unsigned int align = TYPE_ALIGN (TREE_TYPE (ref));
2215 if (GET_MODE_ALIGNMENT (TYPE_MODE (TREE_TYPE (ref))) > align)
2216 align = GET_MODE_ALIGNMENT (TYPE_MODE (TREE_TYPE (ref)));
2218 unsigned HOST_WIDE_INT bitpos;
2219 unsigned int ref_align;
2220 get_object_alignment_1 (ref, &ref_align, &bitpos);
2221 if (ref_align < align
2222 || (bitpos % align) != 0
2223 || (bitpos % BITS_PER_UNIT) != 0)
2224 return true;
2226 unsigned int trailing_zeros = tree_ctz (step);
2227 if (trailing_zeros < HOST_BITS_PER_INT
2228 && (1U << trailing_zeros) * BITS_PER_UNIT < align)
2229 return true;
2231 return false;
2234 /* Return true if EXPR may be non-addressable. */
2236 bool
2237 may_be_nonaddressable_p (tree expr)
2239 switch (TREE_CODE (expr))
2241 case VAR_DECL:
2242 /* Check if it's a register variable. */
2243 return DECL_HARD_REGISTER (expr);
2245 case TARGET_MEM_REF:
2246 /* TARGET_MEM_REFs are translated directly to valid MEMs on the
2247 target, thus they are always addressable. */
2248 return false;
2250 case MEM_REF:
2251 /* Likewise for MEM_REFs, modulo the storage order. */
2252 return REF_REVERSE_STORAGE_ORDER (expr);
2254 case BIT_FIELD_REF:
2255 if (REF_REVERSE_STORAGE_ORDER (expr))
2256 return true;
2257 return may_be_nonaddressable_p (TREE_OPERAND (expr, 0));
2259 case COMPONENT_REF:
2260 if (TYPE_REVERSE_STORAGE_ORDER (TREE_TYPE (TREE_OPERAND (expr, 0))))
2261 return true;
2262 return DECL_NONADDRESSABLE_P (TREE_OPERAND (expr, 1))
2263 || may_be_nonaddressable_p (TREE_OPERAND (expr, 0));
2265 case ARRAY_REF:
2266 case ARRAY_RANGE_REF:
2267 if (TYPE_REVERSE_STORAGE_ORDER (TREE_TYPE (TREE_OPERAND (expr, 0))))
2268 return true;
2269 return may_be_nonaddressable_p (TREE_OPERAND (expr, 0));
2271 case VIEW_CONVERT_EXPR:
2272 /* This kind of view-conversions may wrap non-addressable objects
2273 and make them look addressable. After some processing the
2274 non-addressability may be uncovered again, causing ADDR_EXPRs
2275 of inappropriate objects to be built. */
2276 if (is_gimple_reg (TREE_OPERAND (expr, 0))
2277 || !is_gimple_addressable (TREE_OPERAND (expr, 0)))
2278 return true;
2279 return may_be_nonaddressable_p (TREE_OPERAND (expr, 0));
2281 CASE_CONVERT:
2282 return true;
2284 default:
2285 break;
2288 return false;
2291 /* Finds addresses in *OP_P inside STMT. */
2293 static void
2294 find_interesting_uses_address (struct ivopts_data *data, gimple *stmt,
2295 tree *op_p)
2297 tree base = *op_p, step = size_zero_node;
2298 struct iv *civ;
2299 struct ifs_ivopts_data ifs_ivopts_data;
2301 /* Do not play with volatile memory references. A bit too conservative,
2302 perhaps, but safe. */
2303 if (gimple_has_volatile_ops (stmt))
2304 goto fail;
2306 /* Ignore bitfields for now. Not really something terribly complicated
2307 to handle. TODO. */
2308 if (TREE_CODE (base) == BIT_FIELD_REF)
2309 goto fail;
2311 base = unshare_expr (base);
2313 if (TREE_CODE (base) == TARGET_MEM_REF)
2315 tree type = build_pointer_type (TREE_TYPE (base));
2316 tree astep;
2318 if (TMR_BASE (base)
2319 && TREE_CODE (TMR_BASE (base)) == SSA_NAME)
2321 civ = get_iv (data, TMR_BASE (base));
2322 if (!civ)
2323 goto fail;
2325 TMR_BASE (base) = civ->base;
2326 step = civ->step;
2328 if (TMR_INDEX2 (base)
2329 && TREE_CODE (TMR_INDEX2 (base)) == SSA_NAME)
2331 civ = get_iv (data, TMR_INDEX2 (base));
2332 if (!civ)
2333 goto fail;
2335 TMR_INDEX2 (base) = civ->base;
2336 step = civ->step;
2338 if (TMR_INDEX (base)
2339 && TREE_CODE (TMR_INDEX (base)) == SSA_NAME)
2341 civ = get_iv (data, TMR_INDEX (base));
2342 if (!civ)
2343 goto fail;
2345 TMR_INDEX (base) = civ->base;
2346 astep = civ->step;
2348 if (astep)
2350 if (TMR_STEP (base))
2351 astep = fold_build2 (MULT_EXPR, type, TMR_STEP (base), astep);
2353 step = fold_build2 (PLUS_EXPR, type, step, astep);
2357 if (integer_zerop (step))
2358 goto fail;
2359 base = tree_mem_ref_addr (type, base);
2361 else
2363 ifs_ivopts_data.ivopts_data = data;
2364 ifs_ivopts_data.stmt = stmt;
2365 ifs_ivopts_data.step = size_zero_node;
2366 if (!for_each_index (&base, idx_find_step, &ifs_ivopts_data)
2367 || integer_zerop (ifs_ivopts_data.step))
2368 goto fail;
2369 step = ifs_ivopts_data.step;
2371 /* Check that the base expression is addressable. This needs
2372 to be done after substituting bases of IVs into it. */
2373 if (may_be_nonaddressable_p (base))
2374 goto fail;
2376 /* Moreover, on strict alignment platforms, check that it is
2377 sufficiently aligned. */
2378 if (STRICT_ALIGNMENT && may_be_unaligned_p (base, step))
2379 goto fail;
2381 base = build_fold_addr_expr (base);
2383 /* Substituting bases of IVs into the base expression might
2384 have caused folding opportunities. */
2385 if (TREE_CODE (base) == ADDR_EXPR)
2387 tree *ref = &TREE_OPERAND (base, 0);
2388 while (handled_component_p (*ref))
2389 ref = &TREE_OPERAND (*ref, 0);
2390 if (TREE_CODE (*ref) == MEM_REF)
2392 tree tem = fold_binary (MEM_REF, TREE_TYPE (*ref),
2393 TREE_OPERAND (*ref, 0),
2394 TREE_OPERAND (*ref, 1));
2395 if (tem)
2396 *ref = tem;
2401 civ = alloc_iv (data, base, step);
2402 /* Fail if base object of this memory reference is unknown. */
2403 if (civ->base_object == NULL_TREE)
2404 goto fail;
2406 record_group_use (data, op_p, civ, stmt, USE_REF_ADDRESS, TREE_TYPE (*op_p));
2407 return;
2409 fail:
2410 for_each_index (op_p, idx_record_use, data);
2413 /* Finds and records invariants used in STMT. */
2415 static void
2416 find_invariants_stmt (struct ivopts_data *data, gimple *stmt)
2418 ssa_op_iter iter;
2419 use_operand_p use_p;
2420 tree op;
2422 FOR_EACH_PHI_OR_STMT_USE (use_p, stmt, iter, SSA_OP_USE)
2424 op = USE_FROM_PTR (use_p);
2425 record_invariant (data, op, false);
2429 /* CALL calls an internal function. If operand *OP_P will become an
2430 address when the call is expanded, return the type of the memory
2431 being addressed, otherwise return null. */
2433 static tree
2434 get_mem_type_for_internal_fn (gcall *call, tree *op_p)
2436 switch (gimple_call_internal_fn (call))
2438 case IFN_MASK_LOAD:
2439 case IFN_MASK_LOAD_LANES:
2440 case IFN_LEN_LOAD:
2441 if (op_p == gimple_call_arg_ptr (call, 0))
2442 return TREE_TYPE (gimple_call_lhs (call));
2443 return NULL_TREE;
2445 case IFN_MASK_STORE:
2446 case IFN_MASK_STORE_LANES:
2447 case IFN_LEN_STORE:
2448 if (op_p == gimple_call_arg_ptr (call, 0))
2449 return TREE_TYPE (gimple_call_arg (call, 3));
2450 return NULL_TREE;
2452 default:
2453 return NULL_TREE;
2457 /* IV is a (non-address) iv that describes operand *OP_P of STMT.
2458 Return true if the operand will become an address when STMT
2459 is expanded and record the associated address use if so. */
2461 static bool
2462 find_address_like_use (struct ivopts_data *data, gimple *stmt, tree *op_p,
2463 struct iv *iv)
2465 /* Fail if base object of this memory reference is unknown. */
2466 if (iv->base_object == NULL_TREE)
2467 return false;
2469 tree mem_type = NULL_TREE;
2470 if (gcall *call = dyn_cast <gcall *> (stmt))
2471 if (gimple_call_internal_p (call))
2472 mem_type = get_mem_type_for_internal_fn (call, op_p);
2473 if (mem_type)
2475 iv = alloc_iv (data, iv->base, iv->step);
2476 record_group_use (data, op_p, iv, stmt, USE_PTR_ADDRESS, mem_type);
2477 return true;
2479 return false;
2482 /* Finds interesting uses of induction variables in the statement STMT. */
2484 static void
2485 find_interesting_uses_stmt (struct ivopts_data *data, gimple *stmt)
2487 struct iv *iv;
2488 tree op, *lhs, *rhs;
2489 ssa_op_iter iter;
2490 use_operand_p use_p;
2491 enum tree_code code;
2493 find_invariants_stmt (data, stmt);
2495 if (gimple_code (stmt) == GIMPLE_COND)
2497 find_interesting_uses_cond (data, stmt);
2498 return;
2501 if (is_gimple_assign (stmt))
2503 lhs = gimple_assign_lhs_ptr (stmt);
2504 rhs = gimple_assign_rhs1_ptr (stmt);
2506 if (TREE_CODE (*lhs) == SSA_NAME)
2508 /* If the statement defines an induction variable, the uses are not
2509 interesting by themselves. */
2511 iv = get_iv (data, *lhs);
2513 if (iv && !integer_zerop (iv->step))
2514 return;
2517 code = gimple_assign_rhs_code (stmt);
2518 if (get_gimple_rhs_class (code) == GIMPLE_SINGLE_RHS
2519 && (REFERENCE_CLASS_P (*rhs)
2520 || is_gimple_val (*rhs)))
2522 if (REFERENCE_CLASS_P (*rhs))
2523 find_interesting_uses_address (data, stmt, rhs);
2524 else
2525 find_interesting_uses_op (data, *rhs);
2527 if (REFERENCE_CLASS_P (*lhs))
2528 find_interesting_uses_address (data, stmt, lhs);
2529 return;
2531 else if (TREE_CODE_CLASS (code) == tcc_comparison)
2533 find_interesting_uses_cond (data, stmt);
2534 return;
2537 /* TODO -- we should also handle address uses of type
2539 memory = call (whatever);
2543 call (memory). */
2546 if (gimple_code (stmt) == GIMPLE_PHI
2547 && gimple_bb (stmt) == data->current_loop->header)
2549 iv = get_iv (data, PHI_RESULT (stmt));
2551 if (iv && !integer_zerop (iv->step))
2552 return;
2555 FOR_EACH_PHI_OR_STMT_USE (use_p, stmt, iter, SSA_OP_USE)
2557 op = USE_FROM_PTR (use_p);
2559 if (TREE_CODE (op) != SSA_NAME)
2560 continue;
2562 iv = get_iv (data, op);
2563 if (!iv)
2564 continue;
2566 if (!find_address_like_use (data, stmt, use_p->use, iv))
2567 find_interesting_uses_op (data, op);
2571 /* Finds interesting uses of induction variables outside of loops
2572 on loop exit edge EXIT. */
2574 static void
2575 find_interesting_uses_outside (struct ivopts_data *data, edge exit)
2577 gphi *phi;
2578 gphi_iterator psi;
2579 tree def;
2581 for (psi = gsi_start_phis (exit->dest); !gsi_end_p (psi); gsi_next (&psi))
2583 phi = psi.phi ();
2584 def = PHI_ARG_DEF_FROM_EDGE (phi, exit);
2585 if (!virtual_operand_p (def))
2586 find_interesting_uses_op (data, def);
2590 /* Return TRUE if OFFSET is within the range of [base + offset] addressing
2591 mode for memory reference represented by USE. */
2593 static GTY (()) vec<rtx, va_gc> *addr_list;
2595 static bool
2596 addr_offset_valid_p (struct iv_use *use, poly_int64 offset)
2598 rtx reg, addr;
2599 unsigned list_index;
2600 addr_space_t as = TYPE_ADDR_SPACE (TREE_TYPE (use->iv->base));
2601 machine_mode addr_mode, mem_mode = TYPE_MODE (use->mem_type);
2603 list_index = (unsigned) as * MAX_MACHINE_MODE + (unsigned) mem_mode;
2604 if (list_index >= vec_safe_length (addr_list))
2605 vec_safe_grow_cleared (addr_list, list_index + MAX_MACHINE_MODE, true);
2607 addr = (*addr_list)[list_index];
2608 if (!addr)
2610 addr_mode = targetm.addr_space.address_mode (as);
2611 reg = gen_raw_REG (addr_mode, LAST_VIRTUAL_REGISTER + 1);
2612 addr = gen_rtx_fmt_ee (PLUS, addr_mode, reg, NULL_RTX);
2613 (*addr_list)[list_index] = addr;
2615 else
2616 addr_mode = GET_MODE (addr);
2618 XEXP (addr, 1) = gen_int_mode (offset, addr_mode);
2619 return (memory_address_addr_space_p (mem_mode, addr, as));
2622 /* Comparison function to sort group in ascending order of addr_offset. */
2624 static int
2625 group_compare_offset (const void *a, const void *b)
2627 const struct iv_use *const *u1 = (const struct iv_use *const *) a;
2628 const struct iv_use *const *u2 = (const struct iv_use *const *) b;
2630 return compare_sizes_for_sort ((*u1)->addr_offset, (*u2)->addr_offset);
2633 /* Check if small groups should be split. Return true if no group
2634 contains more than two uses with distinct addr_offsets. Return
2635 false otherwise. We want to split such groups because:
2637 1) Small groups don't have much benefit and may interfer with
2638 general candidate selection.
2639 2) Size for problem with only small groups is usually small and
2640 general algorithm can handle it well.
2642 TODO -- Above claim may not hold when we want to merge memory
2643 accesses with conseuctive addresses. */
2645 static bool
2646 split_small_address_groups_p (struct ivopts_data *data)
2648 unsigned int i, j, distinct = 1;
2649 struct iv_use *pre;
2650 struct iv_group *group;
2652 for (i = 0; i < data->vgroups.length (); i++)
2654 group = data->vgroups[i];
2655 if (group->vuses.length () == 1)
2656 continue;
2658 gcc_assert (address_p (group->type));
2659 if (group->vuses.length () == 2)
2661 if (compare_sizes_for_sort (group->vuses[0]->addr_offset,
2662 group->vuses[1]->addr_offset) > 0)
2663 std::swap (group->vuses[0], group->vuses[1]);
2665 else
2666 group->vuses.qsort (group_compare_offset);
2668 if (distinct > 2)
2669 continue;
2671 distinct = 1;
2672 for (pre = group->vuses[0], j = 1; j < group->vuses.length (); j++)
2674 if (maybe_ne (group->vuses[j]->addr_offset, pre->addr_offset))
2676 pre = group->vuses[j];
2677 distinct++;
2680 if (distinct > 2)
2681 break;
2685 return (distinct <= 2);
2688 /* For each group of address type uses, this function further groups
2689 these uses according to the maximum offset supported by target's
2690 [base + offset] addressing mode. */
2692 static void
2693 split_address_groups (struct ivopts_data *data)
2695 unsigned int i, j;
2696 /* Always split group. */
2697 bool split_p = split_small_address_groups_p (data);
2699 for (i = 0; i < data->vgroups.length (); i++)
2701 struct iv_group *new_group = NULL;
2702 struct iv_group *group = data->vgroups[i];
2703 struct iv_use *use = group->vuses[0];
2705 use->id = 0;
2706 use->group_id = group->id;
2707 if (group->vuses.length () == 1)
2708 continue;
2710 gcc_assert (address_p (use->type));
2712 for (j = 1; j < group->vuses.length ();)
2714 struct iv_use *next = group->vuses[j];
2715 poly_int64 offset = next->addr_offset - use->addr_offset;
2717 /* Split group if aksed to, or the offset against the first
2718 use can't fit in offset part of addressing mode. IV uses
2719 having the same offset are still kept in one group. */
2720 if (maybe_ne (offset, 0)
2721 && (split_p || !addr_offset_valid_p (use, offset)))
2723 if (!new_group)
2724 new_group = record_group (data, group->type);
2725 group->vuses.ordered_remove (j);
2726 new_group->vuses.safe_push (next);
2727 continue;
2730 next->id = j;
2731 next->group_id = group->id;
2732 j++;
2737 /* Finds uses of the induction variables that are interesting. */
2739 static void
2740 find_interesting_uses (struct ivopts_data *data, basic_block *body)
2742 basic_block bb;
2743 gimple_stmt_iterator bsi;
2744 unsigned i;
2745 edge e;
2747 for (i = 0; i < data->current_loop->num_nodes; i++)
2749 edge_iterator ei;
2750 bb = body[i];
2752 FOR_EACH_EDGE (e, ei, bb->succs)
2753 if (e->dest != EXIT_BLOCK_PTR_FOR_FN (cfun)
2754 && !flow_bb_inside_loop_p (data->current_loop, e->dest))
2755 find_interesting_uses_outside (data, e);
2757 for (bsi = gsi_start_phis (bb); !gsi_end_p (bsi); gsi_next (&bsi))
2758 find_interesting_uses_stmt (data, gsi_stmt (bsi));
2759 for (bsi = gsi_start_bb (bb); !gsi_end_p (bsi); gsi_next (&bsi))
2760 if (!is_gimple_debug (gsi_stmt (bsi)))
2761 find_interesting_uses_stmt (data, gsi_stmt (bsi));
2764 split_address_groups (data);
2766 if (dump_file && (dump_flags & TDF_DETAILS))
2768 fprintf (dump_file, "\n<IV Groups>:\n");
2769 dump_groups (dump_file, data);
2770 fprintf (dump_file, "\n");
2774 /* Strips constant offsets from EXPR and stores them to OFFSET. If INSIDE_ADDR
2775 is true, assume we are inside an address. If TOP_COMPREF is true, assume
2776 we are at the top-level of the processed address. */
2778 static tree
2779 strip_offset_1 (tree expr, bool inside_addr, bool top_compref,
2780 poly_int64 *offset)
2782 tree op0 = NULL_TREE, op1 = NULL_TREE, tmp, step;
2783 enum tree_code code;
2784 tree type, orig_type = TREE_TYPE (expr);
2785 poly_int64 off0, off1;
2786 HOST_WIDE_INT st;
2787 tree orig_expr = expr;
2789 STRIP_NOPS (expr);
2791 type = TREE_TYPE (expr);
2792 code = TREE_CODE (expr);
2793 *offset = 0;
2795 switch (code)
2797 case POINTER_PLUS_EXPR:
2798 case PLUS_EXPR:
2799 case MINUS_EXPR:
2800 op0 = TREE_OPERAND (expr, 0);
2801 op1 = TREE_OPERAND (expr, 1);
2803 op0 = strip_offset_1 (op0, false, false, &off0);
2804 op1 = strip_offset_1 (op1, false, false, &off1);
2806 *offset = (code == MINUS_EXPR ? off0 - off1 : off0 + off1);
2807 if (op0 == TREE_OPERAND (expr, 0)
2808 && op1 == TREE_OPERAND (expr, 1))
2809 return orig_expr;
2811 if (integer_zerop (op1))
2812 expr = op0;
2813 else if (integer_zerop (op0))
2815 if (code == MINUS_EXPR)
2816 expr = fold_build1 (NEGATE_EXPR, type, op1);
2817 else
2818 expr = op1;
2820 else
2821 expr = fold_build2 (code, type, op0, op1);
2823 return fold_convert (orig_type, expr);
2825 case MULT_EXPR:
2826 op1 = TREE_OPERAND (expr, 1);
2827 if (!cst_and_fits_in_hwi (op1))
2828 return orig_expr;
2830 op0 = TREE_OPERAND (expr, 0);
2831 op0 = strip_offset_1 (op0, false, false, &off0);
2832 if (op0 == TREE_OPERAND (expr, 0))
2833 return orig_expr;
2835 *offset = off0 * int_cst_value (op1);
2836 if (integer_zerop (op0))
2837 expr = op0;
2838 else
2839 expr = fold_build2 (MULT_EXPR, type, op0, op1);
2841 return fold_convert (orig_type, expr);
2843 case ARRAY_REF:
2844 case ARRAY_RANGE_REF:
2845 if (!inside_addr)
2846 return orig_expr;
2848 step = array_ref_element_size (expr);
2849 if (!cst_and_fits_in_hwi (step))
2850 break;
2852 st = int_cst_value (step);
2853 op1 = TREE_OPERAND (expr, 1);
2854 op1 = strip_offset_1 (op1, false, false, &off1);
2855 *offset = off1 * st;
2857 if (top_compref
2858 && integer_zerop (op1))
2860 /* Strip the component reference completely. */
2861 op0 = TREE_OPERAND (expr, 0);
2862 op0 = strip_offset_1 (op0, inside_addr, top_compref, &off0);
2863 *offset += off0;
2864 return op0;
2866 break;
2868 case COMPONENT_REF:
2870 tree field;
2872 if (!inside_addr)
2873 return orig_expr;
2875 tmp = component_ref_field_offset (expr);
2876 field = TREE_OPERAND (expr, 1);
2877 if (top_compref
2878 && cst_and_fits_in_hwi (tmp)
2879 && cst_and_fits_in_hwi (DECL_FIELD_BIT_OFFSET (field)))
2881 HOST_WIDE_INT boffset, abs_off;
2883 /* Strip the component reference completely. */
2884 op0 = TREE_OPERAND (expr, 0);
2885 op0 = strip_offset_1 (op0, inside_addr, top_compref, &off0);
2886 boffset = int_cst_value (DECL_FIELD_BIT_OFFSET (field));
2887 abs_off = abs_hwi (boffset) / BITS_PER_UNIT;
2888 if (boffset < 0)
2889 abs_off = -abs_off;
2891 *offset = off0 + int_cst_value (tmp) + abs_off;
2892 return op0;
2895 break;
2897 case ADDR_EXPR:
2898 op0 = TREE_OPERAND (expr, 0);
2899 op0 = strip_offset_1 (op0, true, true, &off0);
2900 *offset += off0;
2902 if (op0 == TREE_OPERAND (expr, 0))
2903 return orig_expr;
2905 expr = build_fold_addr_expr (op0);
2906 return fold_convert (orig_type, expr);
2908 case MEM_REF:
2909 /* ??? Offset operand? */
2910 inside_addr = false;
2911 break;
2913 default:
2914 if (ptrdiff_tree_p (expr, offset) && maybe_ne (*offset, 0))
2915 return build_int_cst (orig_type, 0);
2916 return orig_expr;
2919 /* Default handling of expressions for that we want to recurse into
2920 the first operand. */
2921 op0 = TREE_OPERAND (expr, 0);
2922 op0 = strip_offset_1 (op0, inside_addr, false, &off0);
2923 *offset += off0;
2925 if (op0 == TREE_OPERAND (expr, 0)
2926 && (!op1 || op1 == TREE_OPERAND (expr, 1)))
2927 return orig_expr;
2929 expr = copy_node (expr);
2930 TREE_OPERAND (expr, 0) = op0;
2931 if (op1)
2932 TREE_OPERAND (expr, 1) = op1;
2934 /* Inside address, we might strip the top level component references,
2935 thus changing type of the expression. Handling of ADDR_EXPR
2936 will fix that. */
2937 expr = fold_convert (orig_type, expr);
2939 return expr;
2942 /* Strips constant offsets from EXPR and stores them to OFFSET. */
2944 tree
2945 strip_offset (tree expr, poly_uint64_pod *offset)
2947 poly_int64 off;
2948 tree core = strip_offset_1 (expr, false, false, &off);
2949 *offset = off;
2950 return core;
2953 /* Returns variant of TYPE that can be used as base for different uses.
2954 We return unsigned type with the same precision, which avoids problems
2955 with overflows. */
2957 static tree
2958 generic_type_for (tree type)
2960 if (POINTER_TYPE_P (type))
2961 return unsigned_type_for (type);
2963 if (TYPE_UNSIGNED (type))
2964 return type;
2966 return unsigned_type_for (type);
2969 /* Private data for walk_tree. */
2971 struct walk_tree_data
2973 bitmap *inv_vars;
2974 struct ivopts_data *idata;
2977 /* Callback function for walk_tree, it records invariants and symbol
2978 reference in *EXPR_P. DATA is the structure storing result info. */
2980 static tree
2981 find_inv_vars_cb (tree *expr_p, int *ws ATTRIBUTE_UNUSED, void *data)
2983 tree op = *expr_p;
2984 struct version_info *info;
2985 struct walk_tree_data *wdata = (struct walk_tree_data*) data;
2987 if (TREE_CODE (op) != SSA_NAME)
2988 return NULL_TREE;
2990 info = name_info (wdata->idata, op);
2991 /* Because we expand simple operations when finding IVs, loop invariant
2992 variable that isn't referred by the original loop could be used now.
2993 Record such invariant variables here. */
2994 if (!info->iv)
2996 struct ivopts_data *idata = wdata->idata;
2997 basic_block bb = gimple_bb (SSA_NAME_DEF_STMT (op));
2999 if (!bb || !flow_bb_inside_loop_p (idata->current_loop, bb))
3001 tree steptype = TREE_TYPE (op);
3002 if (POINTER_TYPE_P (steptype))
3003 steptype = sizetype;
3004 set_iv (idata, op, op, build_int_cst (steptype, 0), true);
3005 record_invariant (idata, op, false);
3008 if (!info->inv_id || info->has_nonlin_use)
3009 return NULL_TREE;
3011 if (!*wdata->inv_vars)
3012 *wdata->inv_vars = BITMAP_ALLOC (NULL);
3013 bitmap_set_bit (*wdata->inv_vars, info->inv_id);
3015 return NULL_TREE;
3018 /* Records invariants in *EXPR_P. INV_VARS is the bitmap to that we should
3019 store it. */
3021 static inline void
3022 find_inv_vars (struct ivopts_data *data, tree *expr_p, bitmap *inv_vars)
3024 struct walk_tree_data wdata;
3026 if (!inv_vars)
3027 return;
3029 wdata.idata = data;
3030 wdata.inv_vars = inv_vars;
3031 walk_tree (expr_p, find_inv_vars_cb, &wdata, NULL);
3034 /* Get entry from invariant expr hash table for INV_EXPR. New entry
3035 will be recorded if it doesn't exist yet. Given below two exprs:
3036 inv_expr + cst1, inv_expr + cst2
3037 It's hard to make decision whether constant part should be stripped
3038 or not. We choose to not strip based on below facts:
3039 1) We need to count ADD cost for constant part if it's stripped,
3040 which isn't always trivial where this functions is called.
3041 2) Stripping constant away may be conflict with following loop
3042 invariant hoisting pass.
3043 3) Not stripping constant away results in more invariant exprs,
3044 which usually leads to decision preferring lower reg pressure. */
3046 static iv_inv_expr_ent *
3047 get_loop_invariant_expr (struct ivopts_data *data, tree inv_expr)
3049 STRIP_NOPS (inv_expr);
3051 if (poly_int_tree_p (inv_expr)
3052 || TREE_CODE (inv_expr) == SSA_NAME)
3053 return NULL;
3055 /* Don't strip constant part away as we used to. */
3057 /* Stores EXPR in DATA->inv_expr_tab, return pointer to iv_inv_expr_ent. */
3058 struct iv_inv_expr_ent ent;
3059 ent.expr = inv_expr;
3060 ent.hash = iterative_hash_expr (inv_expr, 0);
3061 struct iv_inv_expr_ent **slot = data->inv_expr_tab->find_slot (&ent, INSERT);
3063 if (!*slot)
3065 *slot = XNEW (struct iv_inv_expr_ent);
3066 (*slot)->expr = inv_expr;
3067 (*slot)->hash = ent.hash;
3068 (*slot)->id = ++data->max_inv_expr_id;
3071 return *slot;
3074 /* Find the first undefined SSA name in *TP. */
3076 static tree
3077 find_ssa_undef (tree *tp, int *walk_subtrees, void *)
3079 if (TREE_CODE (*tp) == SSA_NAME
3080 && ssa_undefined_value_p (*tp, false))
3081 return *tp;
3082 if (!EXPR_P (*tp))
3083 *walk_subtrees = 0;
3084 return NULL;
3087 /* Adds a candidate BASE + STEP * i. Important field is set to IMPORTANT and
3088 position to POS. If USE is not NULL, the candidate is set as related to
3089 it. If both BASE and STEP are NULL, we add a pseudocandidate for the
3090 replacement of the final value of the iv by a direct computation. */
3092 static struct iv_cand *
3093 add_candidate_1 (struct ivopts_data *data, tree base, tree step, bool important,
3094 enum iv_position pos, struct iv_use *use,
3095 gimple *incremented_at, struct iv *orig_iv = NULL,
3096 bool doloop = false)
3098 unsigned i;
3099 struct iv_cand *cand = NULL;
3100 tree type, orig_type;
3102 gcc_assert (base && step);
3104 /* -fkeep-gc-roots-live means that we have to keep a real pointer
3105 live, but the ivopts code may replace a real pointer with one
3106 pointing before or after the memory block that is then adjusted
3107 into the memory block during the loop. FIXME: It would likely be
3108 better to actually force the pointer live and still use ivopts;
3109 for example, it would be enough to write the pointer into memory
3110 and keep it there until after the loop. */
3111 if (flag_keep_gc_roots_live && POINTER_TYPE_P (TREE_TYPE (base)))
3112 return NULL;
3114 /* If BASE contains undefined SSA names make sure we only record
3115 the original IV. */
3116 bool involves_undefs = false;
3117 if (walk_tree (&base, find_ssa_undef, NULL, NULL))
3119 if (pos != IP_ORIGINAL)
3120 return NULL;
3121 important = false;
3122 involves_undefs = true;
3125 /* For non-original variables, make sure their values are computed in a type
3126 that does not invoke undefined behavior on overflows (since in general,
3127 we cannot prove that these induction variables are non-wrapping). */
3128 if (pos != IP_ORIGINAL)
3130 orig_type = TREE_TYPE (base);
3131 type = generic_type_for (orig_type);
3132 if (type != orig_type)
3134 base = fold_convert (type, base);
3135 step = fold_convert (type, step);
3139 for (i = 0; i < data->vcands.length (); i++)
3141 cand = data->vcands[i];
3143 if (cand->pos != pos)
3144 continue;
3146 if (cand->incremented_at != incremented_at
3147 || ((pos == IP_AFTER_USE || pos == IP_BEFORE_USE)
3148 && cand->ainc_use != use))
3149 continue;
3151 if (operand_equal_p (base, cand->iv->base, 0)
3152 && operand_equal_p (step, cand->iv->step, 0)
3153 && (TYPE_PRECISION (TREE_TYPE (base))
3154 == TYPE_PRECISION (TREE_TYPE (cand->iv->base))))
3155 break;
3158 if (i == data->vcands.length ())
3160 cand = XCNEW (struct iv_cand);
3161 cand->id = i;
3162 cand->iv = alloc_iv (data, base, step);
3163 cand->pos = pos;
3164 if (pos != IP_ORIGINAL)
3166 if (doloop)
3167 cand->var_before = create_tmp_var_raw (TREE_TYPE (base), "doloop");
3168 else
3169 cand->var_before = create_tmp_var_raw (TREE_TYPE (base), "ivtmp");
3170 cand->var_after = cand->var_before;
3172 cand->important = important;
3173 cand->involves_undefs = involves_undefs;
3174 cand->incremented_at = incremented_at;
3175 cand->doloop_p = doloop;
3176 data->vcands.safe_push (cand);
3178 if (!poly_int_tree_p (step))
3180 find_inv_vars (data, &step, &cand->inv_vars);
3182 iv_inv_expr_ent *inv_expr = get_loop_invariant_expr (data, step);
3183 /* Share bitmap between inv_vars and inv_exprs for cand. */
3184 if (inv_expr != NULL)
3186 cand->inv_exprs = cand->inv_vars;
3187 cand->inv_vars = NULL;
3188 if (cand->inv_exprs)
3189 bitmap_clear (cand->inv_exprs);
3190 else
3191 cand->inv_exprs = BITMAP_ALLOC (NULL);
3193 bitmap_set_bit (cand->inv_exprs, inv_expr->id);
3197 if (pos == IP_AFTER_USE || pos == IP_BEFORE_USE)
3198 cand->ainc_use = use;
3199 else
3200 cand->ainc_use = NULL;
3202 cand->orig_iv = orig_iv;
3203 if (dump_file && (dump_flags & TDF_DETAILS))
3204 dump_cand (dump_file, cand);
3207 cand->important |= important;
3208 cand->doloop_p |= doloop;
3210 /* Relate candidate to the group for which it is added. */
3211 if (use)
3212 bitmap_set_bit (data->vgroups[use->group_id]->related_cands, i);
3214 return cand;
3217 /* Returns true if incrementing the induction variable at the end of the LOOP
3218 is allowed.
3220 The purpose is to avoid splitting latch edge with a biv increment, thus
3221 creating a jump, possibly confusing other optimization passes and leaving
3222 less freedom to scheduler. So we allow IP_END only if IP_NORMAL is not
3223 available (so we do not have a better alternative), or if the latch edge
3224 is already nonempty. */
3226 static bool
3227 allow_ip_end_pos_p (class loop *loop)
3229 if (!ip_normal_pos (loop))
3230 return true;
3232 if (!empty_block_p (ip_end_pos (loop)))
3233 return true;
3235 return false;
3238 /* If possible, adds autoincrement candidates BASE + STEP * i based on use USE.
3239 Important field is set to IMPORTANT. */
3241 static void
3242 add_autoinc_candidates (struct ivopts_data *data, tree base, tree step,
3243 bool important, struct iv_use *use)
3245 basic_block use_bb = gimple_bb (use->stmt);
3246 machine_mode mem_mode;
3247 unsigned HOST_WIDE_INT cstepi;
3249 /* If we insert the increment in any position other than the standard
3250 ones, we must ensure that it is incremented once per iteration.
3251 It must not be in an inner nested loop, or one side of an if
3252 statement. */
3253 if (use_bb->loop_father != data->current_loop
3254 || !dominated_by_p (CDI_DOMINATORS, data->current_loop->latch, use_bb)
3255 || stmt_can_throw_internal (cfun, use->stmt)
3256 || !cst_and_fits_in_hwi (step))
3257 return;
3259 cstepi = int_cst_value (step);
3261 mem_mode = TYPE_MODE (use->mem_type);
3262 if (((USE_LOAD_PRE_INCREMENT (mem_mode)
3263 || USE_STORE_PRE_INCREMENT (mem_mode))
3264 && known_eq (GET_MODE_SIZE (mem_mode), cstepi))
3265 || ((USE_LOAD_PRE_DECREMENT (mem_mode)
3266 || USE_STORE_PRE_DECREMENT (mem_mode))
3267 && known_eq (GET_MODE_SIZE (mem_mode), -cstepi)))
3269 enum tree_code code = MINUS_EXPR;
3270 tree new_base;
3271 tree new_step = step;
3273 if (POINTER_TYPE_P (TREE_TYPE (base)))
3275 new_step = fold_build1 (NEGATE_EXPR, TREE_TYPE (step), step);
3276 code = POINTER_PLUS_EXPR;
3278 else
3279 new_step = fold_convert (TREE_TYPE (base), new_step);
3280 new_base = fold_build2 (code, TREE_TYPE (base), base, new_step);
3281 add_candidate_1 (data, new_base, step, important, IP_BEFORE_USE, use,
3282 use->stmt);
3284 if (((USE_LOAD_POST_INCREMENT (mem_mode)
3285 || USE_STORE_POST_INCREMENT (mem_mode))
3286 && known_eq (GET_MODE_SIZE (mem_mode), cstepi))
3287 || ((USE_LOAD_POST_DECREMENT (mem_mode)
3288 || USE_STORE_POST_DECREMENT (mem_mode))
3289 && known_eq (GET_MODE_SIZE (mem_mode), -cstepi)))
3291 add_candidate_1 (data, base, step, important, IP_AFTER_USE, use,
3292 use->stmt);
3296 /* Adds a candidate BASE + STEP * i. Important field is set to IMPORTANT and
3297 position to POS. If USE is not NULL, the candidate is set as related to
3298 it. The candidate computation is scheduled before exit condition and at
3299 the end of loop. */
3301 static void
3302 add_candidate (struct ivopts_data *data, tree base, tree step, bool important,
3303 struct iv_use *use, struct iv *orig_iv = NULL,
3304 bool doloop = false)
3306 if (ip_normal_pos (data->current_loop))
3307 add_candidate_1 (data, base, step, important, IP_NORMAL, use, NULL, orig_iv,
3308 doloop);
3309 /* Exclude doloop candidate here since it requires decrement then comparison
3310 and jump, the IP_END position doesn't match. */
3311 if (!doloop && ip_end_pos (data->current_loop)
3312 && allow_ip_end_pos_p (data->current_loop))
3313 add_candidate_1 (data, base, step, important, IP_END, use, NULL, orig_iv);
3316 /* Adds standard iv candidates. */
3318 static void
3319 add_standard_iv_candidates (struct ivopts_data *data)
3321 add_candidate (data, integer_zero_node, integer_one_node, true, NULL);
3323 /* The same for a double-integer type if it is still fast enough. */
3324 if (TYPE_PRECISION
3325 (long_integer_type_node) > TYPE_PRECISION (integer_type_node)
3326 && TYPE_PRECISION (long_integer_type_node) <= BITS_PER_WORD)
3327 add_candidate (data, build_int_cst (long_integer_type_node, 0),
3328 build_int_cst (long_integer_type_node, 1), true, NULL);
3330 /* The same for a double-integer type if it is still fast enough. */
3331 if (TYPE_PRECISION
3332 (long_long_integer_type_node) > TYPE_PRECISION (long_integer_type_node)
3333 && TYPE_PRECISION (long_long_integer_type_node) <= BITS_PER_WORD)
3334 add_candidate (data, build_int_cst (long_long_integer_type_node, 0),
3335 build_int_cst (long_long_integer_type_node, 1), true, NULL);
3339 /* Adds candidates bases on the old induction variable IV. */
3341 static void
3342 add_iv_candidate_for_biv (struct ivopts_data *data, struct iv *iv)
3344 gimple *phi;
3345 tree def;
3346 struct iv_cand *cand;
3348 /* Check if this biv is used in address type use. */
3349 if (iv->no_overflow && iv->have_address_use
3350 && INTEGRAL_TYPE_P (TREE_TYPE (iv->base))
3351 && TYPE_PRECISION (TREE_TYPE (iv->base)) < TYPE_PRECISION (sizetype))
3353 tree base = fold_convert (sizetype, iv->base);
3354 tree step = fold_convert (sizetype, iv->step);
3356 /* Add iv cand of same precision as index part in TARGET_MEM_REF. */
3357 add_candidate (data, base, step, true, NULL, iv);
3358 /* Add iv cand of the original type only if it has nonlinear use. */
3359 if (iv->nonlin_use)
3360 add_candidate (data, iv->base, iv->step, true, NULL);
3362 else
3363 add_candidate (data, iv->base, iv->step, true, NULL);
3365 /* The same, but with initial value zero. */
3366 if (POINTER_TYPE_P (TREE_TYPE (iv->base)))
3367 add_candidate (data, size_int (0), iv->step, true, NULL);
3368 else
3369 add_candidate (data, build_int_cst (TREE_TYPE (iv->base), 0),
3370 iv->step, true, NULL);
3372 phi = SSA_NAME_DEF_STMT (iv->ssa_name);
3373 if (gimple_code (phi) == GIMPLE_PHI)
3375 /* Additionally record the possibility of leaving the original iv
3376 untouched. */
3377 def = PHI_ARG_DEF_FROM_EDGE (phi, loop_latch_edge (data->current_loop));
3378 /* Don't add candidate if it's from another PHI node because
3379 it's an affine iv appearing in the form of PEELED_CHREC. */
3380 phi = SSA_NAME_DEF_STMT (def);
3381 if (gimple_code (phi) != GIMPLE_PHI)
3383 cand = add_candidate_1 (data,
3384 iv->base, iv->step, true, IP_ORIGINAL, NULL,
3385 SSA_NAME_DEF_STMT (def));
3386 if (cand)
3388 cand->var_before = iv->ssa_name;
3389 cand->var_after = def;
3392 else
3393 gcc_assert (gimple_bb (phi) == data->current_loop->header);
3397 /* Adds candidates based on the old induction variables. */
3399 static void
3400 add_iv_candidate_for_bivs (struct ivopts_data *data)
3402 unsigned i;
3403 struct iv *iv;
3404 bitmap_iterator bi;
3406 EXECUTE_IF_SET_IN_BITMAP (data->relevant, 0, i, bi)
3408 iv = ver_info (data, i)->iv;
3409 if (iv && iv->biv_p && !integer_zerop (iv->step))
3410 add_iv_candidate_for_biv (data, iv);
3414 /* Record common candidate {BASE, STEP} derived from USE in hashtable. */
3416 static void
3417 record_common_cand (struct ivopts_data *data, tree base,
3418 tree step, struct iv_use *use)
3420 class iv_common_cand ent;
3421 class iv_common_cand **slot;
3423 ent.base = base;
3424 ent.step = step;
3425 ent.hash = iterative_hash_expr (base, 0);
3426 ent.hash = iterative_hash_expr (step, ent.hash);
3428 slot = data->iv_common_cand_tab->find_slot (&ent, INSERT);
3429 if (*slot == NULL)
3431 *slot = new iv_common_cand ();
3432 (*slot)->base = base;
3433 (*slot)->step = step;
3434 (*slot)->uses.create (8);
3435 (*slot)->hash = ent.hash;
3436 data->iv_common_cands.safe_push ((*slot));
3439 gcc_assert (use != NULL);
3440 (*slot)->uses.safe_push (use);
3441 return;
3444 /* Comparison function used to sort common candidates. */
3446 static int
3447 common_cand_cmp (const void *p1, const void *p2)
3449 unsigned n1, n2;
3450 const class iv_common_cand *const *const ccand1
3451 = (const class iv_common_cand *const *)p1;
3452 const class iv_common_cand *const *const ccand2
3453 = (const class iv_common_cand *const *)p2;
3455 n1 = (*ccand1)->uses.length ();
3456 n2 = (*ccand2)->uses.length ();
3457 return n2 - n1;
3460 /* Adds IV candidates based on common candidated recorded. */
3462 static void
3463 add_iv_candidate_derived_from_uses (struct ivopts_data *data)
3465 unsigned i, j;
3466 struct iv_cand *cand_1, *cand_2;
3468 data->iv_common_cands.qsort (common_cand_cmp);
3469 for (i = 0; i < data->iv_common_cands.length (); i++)
3471 class iv_common_cand *ptr = data->iv_common_cands[i];
3473 /* Only add IV candidate if it's derived from multiple uses. */
3474 if (ptr->uses.length () <= 1)
3475 break;
3477 cand_1 = NULL;
3478 cand_2 = NULL;
3479 if (ip_normal_pos (data->current_loop))
3480 cand_1 = add_candidate_1 (data, ptr->base, ptr->step,
3481 false, IP_NORMAL, NULL, NULL);
3483 if (ip_end_pos (data->current_loop)
3484 && allow_ip_end_pos_p (data->current_loop))
3485 cand_2 = add_candidate_1 (data, ptr->base, ptr->step,
3486 false, IP_END, NULL, NULL);
3488 /* Bind deriving uses and the new candidates. */
3489 for (j = 0; j < ptr->uses.length (); j++)
3491 struct iv_group *group = data->vgroups[ptr->uses[j]->group_id];
3492 if (cand_1)
3493 bitmap_set_bit (group->related_cands, cand_1->id);
3494 if (cand_2)
3495 bitmap_set_bit (group->related_cands, cand_2->id);
3499 /* Release data since it is useless from this point. */
3500 data->iv_common_cand_tab->empty ();
3501 data->iv_common_cands.truncate (0);
3504 /* Adds candidates based on the value of USE's iv. */
3506 static void
3507 add_iv_candidate_for_use (struct ivopts_data *data, struct iv_use *use)
3509 poly_uint64 offset;
3510 tree base;
3511 struct iv *iv = use->iv;
3512 tree basetype = TREE_TYPE (iv->base);
3514 /* Don't add candidate for iv_use with non integer, pointer or non-mode
3515 precision types, instead, add candidate for the corresponding scev in
3516 unsigned type with the same precision. See PR93674 for more info. */
3517 if ((TREE_CODE (basetype) != INTEGER_TYPE && !POINTER_TYPE_P (basetype))
3518 || !type_has_mode_precision_p (basetype))
3520 basetype = lang_hooks.types.type_for_mode (TYPE_MODE (basetype),
3521 TYPE_UNSIGNED (basetype));
3522 add_candidate (data, fold_convert (basetype, iv->base),
3523 fold_convert (basetype, iv->step), false, NULL);
3524 return;
3527 add_candidate (data, iv->base, iv->step, false, use);
3529 /* Record common candidate for use in case it can be shared by others. */
3530 record_common_cand (data, iv->base, iv->step, use);
3532 /* Record common candidate with initial value zero. */
3533 basetype = TREE_TYPE (iv->base);
3534 if (POINTER_TYPE_P (basetype))
3535 basetype = sizetype;
3536 record_common_cand (data, build_int_cst (basetype, 0), iv->step, use);
3538 /* Compare the cost of an address with an unscaled index with the cost of
3539 an address with a scaled index and add candidate if useful. */
3540 poly_int64 step;
3541 if (use != NULL
3542 && poly_int_tree_p (iv->step, &step)
3543 && address_p (use->type))
3545 poly_int64 new_step;
3546 unsigned int fact = preferred_mem_scale_factor
3547 (use->iv->base,
3548 TYPE_MODE (use->mem_type),
3549 optimize_loop_for_speed_p (data->current_loop));
3551 if (fact != 1
3552 && multiple_p (step, fact, &new_step))
3553 add_candidate (data, size_int (0),
3554 wide_int_to_tree (sizetype, new_step),
3555 true, NULL);
3558 /* Record common candidate with constant offset stripped in base.
3559 Like the use itself, we also add candidate directly for it. */
3560 base = strip_offset (iv->base, &offset);
3561 if (maybe_ne (offset, 0U) || base != iv->base)
3563 record_common_cand (data, base, iv->step, use);
3564 add_candidate (data, base, iv->step, false, use);
3567 /* Record common candidate with base_object removed in base. */
3568 base = iv->base;
3569 STRIP_NOPS (base);
3570 if (iv->base_object != NULL && TREE_CODE (base) == POINTER_PLUS_EXPR)
3572 tree step = iv->step;
3574 STRIP_NOPS (step);
3575 base = TREE_OPERAND (base, 1);
3576 step = fold_convert (sizetype, step);
3577 record_common_cand (data, base, step, use);
3578 /* Also record common candidate with offset stripped. */
3579 base = strip_offset (base, &offset);
3580 if (maybe_ne (offset, 0U))
3581 record_common_cand (data, base, step, use);
3584 /* At last, add auto-incremental candidates. Make such variables
3585 important since other iv uses with same base object may be based
3586 on it. */
3587 if (use != NULL && address_p (use->type))
3588 add_autoinc_candidates (data, iv->base, iv->step, true, use);
3591 /* Adds candidates based on the uses. */
3593 static void
3594 add_iv_candidate_for_groups (struct ivopts_data *data)
3596 unsigned i;
3598 /* Only add candidate for the first use in group. */
3599 for (i = 0; i < data->vgroups.length (); i++)
3601 struct iv_group *group = data->vgroups[i];
3603 gcc_assert (group->vuses[0] != NULL);
3604 add_iv_candidate_for_use (data, group->vuses[0]);
3606 add_iv_candidate_derived_from_uses (data);
3609 /* Record important candidates and add them to related_cands bitmaps. */
3611 static void
3612 record_important_candidates (struct ivopts_data *data)
3614 unsigned i;
3615 struct iv_group *group;
3617 for (i = 0; i < data->vcands.length (); i++)
3619 struct iv_cand *cand = data->vcands[i];
3621 if (cand->important)
3622 bitmap_set_bit (data->important_candidates, i);
3625 data->consider_all_candidates = (data->vcands.length ()
3626 <= CONSIDER_ALL_CANDIDATES_BOUND);
3628 /* Add important candidates to groups' related_cands bitmaps. */
3629 for (i = 0; i < data->vgroups.length (); i++)
3631 group = data->vgroups[i];
3632 bitmap_ior_into (group->related_cands, data->important_candidates);
3636 /* Allocates the data structure mapping the (use, candidate) pairs to costs.
3637 If consider_all_candidates is true, we use a two-dimensional array, otherwise
3638 we allocate a simple list to every use. */
3640 static void
3641 alloc_use_cost_map (struct ivopts_data *data)
3643 unsigned i, size, s;
3645 for (i = 0; i < data->vgroups.length (); i++)
3647 struct iv_group *group = data->vgroups[i];
3649 if (data->consider_all_candidates)
3650 size = data->vcands.length ();
3651 else
3653 s = bitmap_count_bits (group->related_cands);
3655 /* Round up to the power of two, so that moduling by it is fast. */
3656 size = s ? (1 << ceil_log2 (s)) : 1;
3659 group->n_map_members = size;
3660 group->cost_map = XCNEWVEC (class cost_pair, size);
3664 /* Sets cost of (GROUP, CAND) pair to COST and record that it depends
3665 on invariants INV_VARS and that the value used in expressing it is
3666 VALUE, and in case of iv elimination the comparison operator is COMP. */
3668 static void
3669 set_group_iv_cost (struct ivopts_data *data,
3670 struct iv_group *group, struct iv_cand *cand,
3671 comp_cost cost, bitmap inv_vars, tree value,
3672 enum tree_code comp, bitmap inv_exprs)
3674 unsigned i, s;
3676 if (cost.infinite_cost_p ())
3678 BITMAP_FREE (inv_vars);
3679 BITMAP_FREE (inv_exprs);
3680 return;
3683 if (data->consider_all_candidates)
3685 group->cost_map[cand->id].cand = cand;
3686 group->cost_map[cand->id].cost = cost;
3687 group->cost_map[cand->id].inv_vars = inv_vars;
3688 group->cost_map[cand->id].inv_exprs = inv_exprs;
3689 group->cost_map[cand->id].value = value;
3690 group->cost_map[cand->id].comp = comp;
3691 return;
3694 /* n_map_members is a power of two, so this computes modulo. */
3695 s = cand->id & (group->n_map_members - 1);
3696 for (i = s; i < group->n_map_members; i++)
3697 if (!group->cost_map[i].cand)
3698 goto found;
3699 for (i = 0; i < s; i++)
3700 if (!group->cost_map[i].cand)
3701 goto found;
3703 gcc_unreachable ();
3705 found:
3706 group->cost_map[i].cand = cand;
3707 group->cost_map[i].cost = cost;
3708 group->cost_map[i].inv_vars = inv_vars;
3709 group->cost_map[i].inv_exprs = inv_exprs;
3710 group->cost_map[i].value = value;
3711 group->cost_map[i].comp = comp;
3714 /* Gets cost of (GROUP, CAND) pair. */
3716 static class cost_pair *
3717 get_group_iv_cost (struct ivopts_data *data, struct iv_group *group,
3718 struct iv_cand *cand)
3720 unsigned i, s;
3721 class cost_pair *ret;
3723 if (!cand)
3724 return NULL;
3726 if (data->consider_all_candidates)
3728 ret = group->cost_map + cand->id;
3729 if (!ret->cand)
3730 return NULL;
3732 return ret;
3735 /* n_map_members is a power of two, so this computes modulo. */
3736 s = cand->id & (group->n_map_members - 1);
3737 for (i = s; i < group->n_map_members; i++)
3738 if (group->cost_map[i].cand == cand)
3739 return group->cost_map + i;
3740 else if (group->cost_map[i].cand == NULL)
3741 return NULL;
3742 for (i = 0; i < s; i++)
3743 if (group->cost_map[i].cand == cand)
3744 return group->cost_map + i;
3745 else if (group->cost_map[i].cand == NULL)
3746 return NULL;
3748 return NULL;
3751 /* Produce DECL_RTL for object obj so it looks like it is stored in memory. */
3752 static rtx
3753 produce_memory_decl_rtl (tree obj, int *regno)
3755 addr_space_t as = TYPE_ADDR_SPACE (TREE_TYPE (obj));
3756 machine_mode address_mode = targetm.addr_space.address_mode (as);
3757 rtx x;
3759 gcc_assert (obj);
3760 if (TREE_STATIC (obj) || DECL_EXTERNAL (obj))
3762 const char *name = IDENTIFIER_POINTER (DECL_ASSEMBLER_NAME (obj));
3763 x = gen_rtx_SYMBOL_REF (address_mode, name);
3764 SET_SYMBOL_REF_DECL (x, obj);
3765 x = gen_rtx_MEM (DECL_MODE (obj), x);
3766 set_mem_addr_space (x, as);
3767 targetm.encode_section_info (obj, x, true);
3769 else
3771 x = gen_raw_REG (address_mode, (*regno)++);
3772 x = gen_rtx_MEM (DECL_MODE (obj), x);
3773 set_mem_addr_space (x, as);
3776 return x;
3779 /* Prepares decl_rtl for variables referred in *EXPR_P. Callback for
3780 walk_tree. DATA contains the actual fake register number. */
3782 static tree
3783 prepare_decl_rtl (tree *expr_p, int *ws, void *data)
3785 tree obj = NULL_TREE;
3786 rtx x = NULL_RTX;
3787 int *regno = (int *) data;
3789 switch (TREE_CODE (*expr_p))
3791 case ADDR_EXPR:
3792 for (expr_p = &TREE_OPERAND (*expr_p, 0);
3793 handled_component_p (*expr_p);
3794 expr_p = &TREE_OPERAND (*expr_p, 0))
3795 continue;
3796 obj = *expr_p;
3797 if (DECL_P (obj) && HAS_RTL_P (obj) && !DECL_RTL_SET_P (obj))
3798 x = produce_memory_decl_rtl (obj, regno);
3799 break;
3801 case SSA_NAME:
3802 *ws = 0;
3803 obj = SSA_NAME_VAR (*expr_p);
3804 /* Defer handling of anonymous SSA_NAMEs to the expander. */
3805 if (!obj)
3806 return NULL_TREE;
3807 if (!DECL_RTL_SET_P (obj))
3808 x = gen_raw_REG (DECL_MODE (obj), (*regno)++);
3809 break;
3811 case VAR_DECL:
3812 case PARM_DECL:
3813 case RESULT_DECL:
3814 *ws = 0;
3815 obj = *expr_p;
3817 if (DECL_RTL_SET_P (obj))
3818 break;
3820 if (DECL_MODE (obj) == BLKmode)
3821 x = produce_memory_decl_rtl (obj, regno);
3822 else
3823 x = gen_raw_REG (DECL_MODE (obj), (*regno)++);
3825 break;
3827 default:
3828 break;
3831 if (x)
3833 decl_rtl_to_reset.safe_push (obj);
3834 SET_DECL_RTL (obj, x);
3837 return NULL_TREE;
3840 /* Predict whether the given loop will be transformed in the RTL
3841 doloop_optimize pass. Attempt to duplicate some doloop_optimize checks.
3842 This is only for target independent checks, see targetm.predict_doloop_p
3843 for the target dependent ones.
3845 Note that according to some initial investigation, some checks like costly
3846 niter check and invalid stmt scanning don't have much gains among general
3847 cases, so keep this as simple as possible first.
3849 Some RTL specific checks seems unable to be checked in gimple, if any new
3850 checks or easy checks _are_ missing here, please add them. */
3852 static bool
3853 generic_predict_doloop_p (struct ivopts_data *data)
3855 class loop *loop = data->current_loop;
3857 /* Call target hook for target dependent checks. */
3858 if (!targetm.predict_doloop_p (loop))
3860 if (dump_file && (dump_flags & TDF_DETAILS))
3861 fprintf (dump_file, "Predict doloop failure due to"
3862 " target specific checks.\n");
3863 return false;
3866 /* Similar to doloop_optimize, check iteration description to know it's
3867 suitable or not. Keep it as simple as possible, feel free to extend it
3868 if you find any multiple exits cases matter. */
3869 edge exit = single_dom_exit (loop);
3870 class tree_niter_desc *niter_desc;
3871 if (!exit || !(niter_desc = niter_for_exit (data, exit)))
3873 if (dump_file && (dump_flags & TDF_DETAILS))
3874 fprintf (dump_file, "Predict doloop failure due to"
3875 " unexpected niters.\n");
3876 return false;
3879 /* Similar to doloop_optimize, check whether iteration count too small
3880 and not profitable. */
3881 HOST_WIDE_INT est_niter = get_estimated_loop_iterations_int (loop);
3882 if (est_niter == -1)
3883 est_niter = get_likely_max_loop_iterations_int (loop);
3884 if (est_niter >= 0 && est_niter < 3)
3886 if (dump_file && (dump_flags & TDF_DETAILS))
3887 fprintf (dump_file,
3888 "Predict doloop failure due to"
3889 " too few iterations (%u).\n",
3890 (unsigned int) est_niter);
3891 return false;
3894 return true;
3897 /* Determines cost of the computation of EXPR. */
3899 static unsigned
3900 computation_cost (tree expr, bool speed)
3902 rtx_insn *seq;
3903 rtx rslt;
3904 tree type = TREE_TYPE (expr);
3905 unsigned cost;
3906 /* Avoid using hard regs in ways which may be unsupported. */
3907 int regno = LAST_VIRTUAL_REGISTER + 1;
3908 struct cgraph_node *node = cgraph_node::get (current_function_decl);
3909 enum node_frequency real_frequency = node->frequency;
3911 node->frequency = NODE_FREQUENCY_NORMAL;
3912 crtl->maybe_hot_insn_p = speed;
3913 walk_tree (&expr, prepare_decl_rtl, &regno, NULL);
3914 start_sequence ();
3915 rslt = expand_expr (expr, NULL_RTX, TYPE_MODE (type), EXPAND_NORMAL);
3916 seq = get_insns ();
3917 end_sequence ();
3918 default_rtl_profile ();
3919 node->frequency = real_frequency;
3921 cost = seq_cost (seq, speed);
3922 if (MEM_P (rslt))
3923 cost += address_cost (XEXP (rslt, 0), TYPE_MODE (type),
3924 TYPE_ADDR_SPACE (type), speed);
3925 else if (!REG_P (rslt))
3926 cost += set_src_cost (rslt, TYPE_MODE (type), speed);
3928 return cost;
3931 /* Returns variable containing the value of candidate CAND at statement AT. */
3933 static tree
3934 var_at_stmt (class loop *loop, struct iv_cand *cand, gimple *stmt)
3936 if (stmt_after_increment (loop, cand, stmt))
3937 return cand->var_after;
3938 else
3939 return cand->var_before;
3942 /* If A is (TYPE) BA and B is (TYPE) BB, and the types of BA and BB have the
3943 same precision that is at least as wide as the precision of TYPE, stores
3944 BA to A and BB to B, and returns the type of BA. Otherwise, returns the
3945 type of A and B. */
3947 static tree
3948 determine_common_wider_type (tree *a, tree *b)
3950 tree wider_type = NULL;
3951 tree suba, subb;
3952 tree atype = TREE_TYPE (*a);
3954 if (CONVERT_EXPR_P (*a))
3956 suba = TREE_OPERAND (*a, 0);
3957 wider_type = TREE_TYPE (suba);
3958 if (TYPE_PRECISION (wider_type) < TYPE_PRECISION (atype))
3959 return atype;
3961 else
3962 return atype;
3964 if (CONVERT_EXPR_P (*b))
3966 subb = TREE_OPERAND (*b, 0);
3967 if (TYPE_PRECISION (wider_type) != TYPE_PRECISION (TREE_TYPE (subb)))
3968 return atype;
3970 else
3971 return atype;
3973 *a = suba;
3974 *b = subb;
3975 return wider_type;
3978 /* Determines the expression by that USE is expressed from induction variable
3979 CAND at statement AT in LOOP. The expression is stored in two parts in a
3980 decomposed form. The invariant part is stored in AFF_INV; while variant
3981 part in AFF_VAR. Store ratio of CAND.step over USE.step in PRAT if it's
3982 non-null. Returns false if USE cannot be expressed using CAND. */
3984 static bool
3985 get_computation_aff_1 (class loop *loop, gimple *at, struct iv_use *use,
3986 struct iv_cand *cand, class aff_tree *aff_inv,
3987 class aff_tree *aff_var, widest_int *prat = NULL)
3989 tree ubase = use->iv->base, ustep = use->iv->step;
3990 tree cbase = cand->iv->base, cstep = cand->iv->step;
3991 tree common_type, uutype, var, cstep_common;
3992 tree utype = TREE_TYPE (ubase), ctype = TREE_TYPE (cbase);
3993 aff_tree aff_cbase;
3994 widest_int rat;
3996 /* We must have a precision to express the values of use. */
3997 if (TYPE_PRECISION (utype) > TYPE_PRECISION (ctype))
3998 return false;
4000 var = var_at_stmt (loop, cand, at);
4001 uutype = unsigned_type_for (utype);
4003 /* If the conversion is not noop, perform it. */
4004 if (TYPE_PRECISION (utype) < TYPE_PRECISION (ctype))
4006 if (cand->orig_iv != NULL && CONVERT_EXPR_P (cbase)
4007 && (CONVERT_EXPR_P (cstep) || poly_int_tree_p (cstep)))
4009 tree inner_base, inner_step, inner_type;
4010 inner_base = TREE_OPERAND (cbase, 0);
4011 if (CONVERT_EXPR_P (cstep))
4012 inner_step = TREE_OPERAND (cstep, 0);
4013 else
4014 inner_step = cstep;
4016 inner_type = TREE_TYPE (inner_base);
4017 /* If candidate is added from a biv whose type is smaller than
4018 ctype, we know both candidate and the biv won't overflow.
4019 In this case, it's safe to skip the convertion in candidate.
4020 As an example, (unsigned short)((unsigned long)A) equals to
4021 (unsigned short)A, if A has a type no larger than short. */
4022 if (TYPE_PRECISION (inner_type) <= TYPE_PRECISION (uutype))
4024 cbase = inner_base;
4025 cstep = inner_step;
4028 cbase = fold_convert (uutype, cbase);
4029 cstep = fold_convert (uutype, cstep);
4030 var = fold_convert (uutype, var);
4033 /* Ratio is 1 when computing the value of biv cand by itself.
4034 We can't rely on constant_multiple_of in this case because the
4035 use is created after the original biv is selected. The call
4036 could fail because of inconsistent fold behavior. See PR68021
4037 for more information. */
4038 if (cand->pos == IP_ORIGINAL && cand->incremented_at == use->stmt)
4040 gcc_assert (is_gimple_assign (use->stmt));
4041 gcc_assert (use->iv->ssa_name == cand->var_after);
4042 gcc_assert (gimple_assign_lhs (use->stmt) == cand->var_after);
4043 rat = 1;
4045 else if (!constant_multiple_of (ustep, cstep, &rat))
4046 return false;
4048 if (prat)
4049 *prat = rat;
4051 /* In case both UBASE and CBASE are shortened to UUTYPE from some common
4052 type, we achieve better folding by computing their difference in this
4053 wider type, and cast the result to UUTYPE. We do not need to worry about
4054 overflows, as all the arithmetics will in the end be performed in UUTYPE
4055 anyway. */
4056 common_type = determine_common_wider_type (&ubase, &cbase);
4058 /* use = ubase - ratio * cbase + ratio * var. */
4059 tree_to_aff_combination (ubase, common_type, aff_inv);
4060 tree_to_aff_combination (cbase, common_type, &aff_cbase);
4061 tree_to_aff_combination (var, uutype, aff_var);
4063 /* We need to shift the value if we are after the increment. */
4064 if (stmt_after_increment (loop, cand, at))
4066 aff_tree cstep_aff;
4068 if (common_type != uutype)
4069 cstep_common = fold_convert (common_type, cstep);
4070 else
4071 cstep_common = cstep;
4073 tree_to_aff_combination (cstep_common, common_type, &cstep_aff);
4074 aff_combination_add (&aff_cbase, &cstep_aff);
4077 aff_combination_scale (&aff_cbase, -rat);
4078 aff_combination_add (aff_inv, &aff_cbase);
4079 if (common_type != uutype)
4080 aff_combination_convert (aff_inv, uutype);
4082 aff_combination_scale (aff_var, rat);
4083 return true;
4086 /* Determines the expression by that USE is expressed from induction variable
4087 CAND at statement AT in LOOP. The expression is stored in a decomposed
4088 form into AFF. Returns false if USE cannot be expressed using CAND. */
4090 static bool
4091 get_computation_aff (class loop *loop, gimple *at, struct iv_use *use,
4092 struct iv_cand *cand, class aff_tree *aff)
4094 aff_tree aff_var;
4096 if (!get_computation_aff_1 (loop, at, use, cand, aff, &aff_var))
4097 return false;
4099 aff_combination_add (aff, &aff_var);
4100 return true;
4103 /* Return the type of USE. */
4105 static tree
4106 get_use_type (struct iv_use *use)
4108 tree base_type = TREE_TYPE (use->iv->base);
4109 tree type;
4111 if (use->type == USE_REF_ADDRESS)
4113 /* The base_type may be a void pointer. Create a pointer type based on
4114 the mem_ref instead. */
4115 type = build_pointer_type (TREE_TYPE (*use->op_p));
4116 gcc_assert (TYPE_ADDR_SPACE (TREE_TYPE (type))
4117 == TYPE_ADDR_SPACE (TREE_TYPE (base_type)));
4119 else
4120 type = base_type;
4122 return type;
4125 /* Determines the expression by that USE is expressed from induction variable
4126 CAND at statement AT in LOOP. The computation is unshared. */
4128 static tree
4129 get_computation_at (class loop *loop, gimple *at,
4130 struct iv_use *use, struct iv_cand *cand)
4132 aff_tree aff;
4133 tree type = get_use_type (use);
4135 if (!get_computation_aff (loop, at, use, cand, &aff))
4136 return NULL_TREE;
4137 unshare_aff_combination (&aff);
4138 return fold_convert (type, aff_combination_to_tree (&aff));
4141 /* Like get_computation_at, but try harder, even if the computation
4142 is more expensive. Intended for debug stmts. */
4144 static tree
4145 get_debug_computation_at (class loop *loop, gimple *at,
4146 struct iv_use *use, struct iv_cand *cand)
4148 if (tree ret = get_computation_at (loop, at, use, cand))
4149 return ret;
4151 tree ubase = use->iv->base, ustep = use->iv->step;
4152 tree cbase = cand->iv->base, cstep = cand->iv->step;
4153 tree var;
4154 tree utype = TREE_TYPE (ubase), ctype = TREE_TYPE (cbase);
4155 widest_int rat;
4157 /* We must have a precision to express the values of use. */
4158 if (TYPE_PRECISION (utype) >= TYPE_PRECISION (ctype))
4159 return NULL_TREE;
4161 /* Try to handle the case that get_computation_at doesn't,
4162 try to express
4163 use = ubase + (var - cbase) / ratio. */
4164 if (!constant_multiple_of (cstep, fold_convert (TREE_TYPE (cstep), ustep),
4165 &rat))
4166 return NULL_TREE;
4168 bool neg_p = false;
4169 if (wi::neg_p (rat))
4171 if (TYPE_UNSIGNED (ctype))
4172 return NULL_TREE;
4173 neg_p = true;
4174 rat = wi::neg (rat);
4177 /* If both IVs can wrap around and CAND doesn't have a power of two step,
4178 it is unsafe. Consider uint16_t CAND with step 9, when wrapping around,
4179 the values will be ... 0xfff0, 0xfff9, 2, 11 ... and when use is say
4180 uint8_t with step 3, those values divided by 3 cast to uint8_t will be
4181 ... 0x50, 0x53, 0, 3 ... rather than expected 0x50, 0x53, 0x56, 0x59. */
4182 if (!use->iv->no_overflow
4183 && !cand->iv->no_overflow
4184 && !integer_pow2p (cstep))
4185 return NULL_TREE;
4187 int bits = wi::exact_log2 (rat);
4188 if (bits == -1)
4189 bits = wi::floor_log2 (rat) + 1;
4190 if (!cand->iv->no_overflow
4191 && TYPE_PRECISION (utype) + bits > TYPE_PRECISION (ctype))
4192 return NULL_TREE;
4194 var = var_at_stmt (loop, cand, at);
4196 if (POINTER_TYPE_P (ctype))
4198 ctype = unsigned_type_for (ctype);
4199 cbase = fold_convert (ctype, cbase);
4200 cstep = fold_convert (ctype, cstep);
4201 var = fold_convert (ctype, var);
4204 if (stmt_after_increment (loop, cand, at))
4205 var = fold_build2 (MINUS_EXPR, TREE_TYPE (var), var,
4206 unshare_expr (cstep));
4208 var = fold_build2 (MINUS_EXPR, TREE_TYPE (var), var, cbase);
4209 var = fold_build2 (EXACT_DIV_EXPR, TREE_TYPE (var), var,
4210 wide_int_to_tree (TREE_TYPE (var), rat));
4211 if (POINTER_TYPE_P (utype))
4213 var = fold_convert (sizetype, var);
4214 if (neg_p)
4215 var = fold_build1 (NEGATE_EXPR, sizetype, var);
4216 var = fold_build2 (POINTER_PLUS_EXPR, utype, ubase, var);
4218 else
4220 var = fold_convert (utype, var);
4221 var = fold_build2 (neg_p ? MINUS_EXPR : PLUS_EXPR, utype,
4222 ubase, var);
4224 return var;
4227 /* Adjust the cost COST for being in loop setup rather than loop body.
4228 If we're optimizing for space, the loop setup overhead is constant;
4229 if we're optimizing for speed, amortize it over the per-iteration cost.
4230 If ROUND_UP_P is true, the result is round up rather than to zero when
4231 optimizing for speed. */
4232 static int64_t
4233 adjust_setup_cost (struct ivopts_data *data, int64_t cost,
4234 bool round_up_p = false)
4236 if (cost == INFTY)
4237 return cost;
4238 else if (optimize_loop_for_speed_p (data->current_loop))
4240 int64_t niters = (int64_t) avg_loop_niter (data->current_loop);
4241 return (cost + (round_up_p ? niters - 1 : 0)) / niters;
4243 else
4244 return cost;
4247 /* Calculate the SPEED or size cost of shiftadd EXPR in MODE. MULT is the
4248 EXPR operand holding the shift. COST0 and COST1 are the costs for
4249 calculating the operands of EXPR. Returns true if successful, and returns
4250 the cost in COST. */
4252 static bool
4253 get_shiftadd_cost (tree expr, scalar_int_mode mode, comp_cost cost0,
4254 comp_cost cost1, tree mult, bool speed, comp_cost *cost)
4256 comp_cost res;
4257 tree op1 = TREE_OPERAND (expr, 1);
4258 tree cst = TREE_OPERAND (mult, 1);
4259 tree multop = TREE_OPERAND (mult, 0);
4260 int m = exact_log2 (int_cst_value (cst));
4261 int maxm = MIN (BITS_PER_WORD, GET_MODE_BITSIZE (mode));
4262 int as_cost, sa_cost;
4263 bool mult_in_op1;
4265 if (!(m >= 0 && m < maxm))
4266 return false;
4268 STRIP_NOPS (op1);
4269 mult_in_op1 = operand_equal_p (op1, mult, 0);
4271 as_cost = add_cost (speed, mode) + shift_cost (speed, mode, m);
4273 /* If the target has a cheap shift-and-add or shift-and-sub instruction,
4274 use that in preference to a shift insn followed by an add insn. */
4275 sa_cost = (TREE_CODE (expr) != MINUS_EXPR
4276 ? shiftadd_cost (speed, mode, m)
4277 : (mult_in_op1
4278 ? shiftsub1_cost (speed, mode, m)
4279 : shiftsub0_cost (speed, mode, m)));
4281 res = comp_cost (MIN (as_cost, sa_cost), 0);
4282 res += (mult_in_op1 ? cost0 : cost1);
4284 STRIP_NOPS (multop);
4285 if (!is_gimple_val (multop))
4286 res += force_expr_to_var_cost (multop, speed);
4288 *cost = res;
4289 return true;
4292 /* Estimates cost of forcing expression EXPR into a variable. */
4294 static comp_cost
4295 force_expr_to_var_cost (tree expr, bool speed)
4297 static bool costs_initialized = false;
4298 static unsigned integer_cost [2];
4299 static unsigned symbol_cost [2];
4300 static unsigned address_cost [2];
4301 tree op0, op1;
4302 comp_cost cost0, cost1, cost;
4303 machine_mode mode;
4304 scalar_int_mode int_mode;
4306 if (!costs_initialized)
4308 tree type = build_pointer_type (integer_type_node);
4309 tree var, addr;
4310 rtx x;
4311 int i;
4313 var = create_tmp_var_raw (integer_type_node, "test_var");
4314 TREE_STATIC (var) = 1;
4315 x = produce_memory_decl_rtl (var, NULL);
4316 SET_DECL_RTL (var, x);
4318 addr = build1 (ADDR_EXPR, type, var);
4321 for (i = 0; i < 2; i++)
4323 integer_cost[i] = computation_cost (build_int_cst (integer_type_node,
4324 2000), i);
4326 symbol_cost[i] = computation_cost (addr, i) + 1;
4328 address_cost[i]
4329 = computation_cost (fold_build_pointer_plus_hwi (addr, 2000), i) + 1;
4330 if (dump_file && (dump_flags & TDF_DETAILS))
4332 fprintf (dump_file, "force_expr_to_var_cost %s costs:\n", i ? "speed" : "size");
4333 fprintf (dump_file, " integer %d\n", (int) integer_cost[i]);
4334 fprintf (dump_file, " symbol %d\n", (int) symbol_cost[i]);
4335 fprintf (dump_file, " address %d\n", (int) address_cost[i]);
4336 fprintf (dump_file, " other %d\n", (int) target_spill_cost[i]);
4337 fprintf (dump_file, "\n");
4341 costs_initialized = true;
4344 STRIP_NOPS (expr);
4346 if (SSA_VAR_P (expr))
4347 return no_cost;
4349 if (is_gimple_min_invariant (expr))
4351 if (poly_int_tree_p (expr))
4352 return comp_cost (integer_cost [speed], 0);
4354 if (TREE_CODE (expr) == ADDR_EXPR)
4356 tree obj = TREE_OPERAND (expr, 0);
4358 if (VAR_P (obj)
4359 || TREE_CODE (obj) == PARM_DECL
4360 || TREE_CODE (obj) == RESULT_DECL)
4361 return comp_cost (symbol_cost [speed], 0);
4364 return comp_cost (address_cost [speed], 0);
4367 switch (TREE_CODE (expr))
4369 case POINTER_PLUS_EXPR:
4370 case PLUS_EXPR:
4371 case MINUS_EXPR:
4372 case MULT_EXPR:
4373 case TRUNC_DIV_EXPR:
4374 case BIT_AND_EXPR:
4375 case BIT_IOR_EXPR:
4376 case LSHIFT_EXPR:
4377 case RSHIFT_EXPR:
4378 op0 = TREE_OPERAND (expr, 0);
4379 op1 = TREE_OPERAND (expr, 1);
4380 STRIP_NOPS (op0);
4381 STRIP_NOPS (op1);
4382 break;
4384 CASE_CONVERT:
4385 case NEGATE_EXPR:
4386 case BIT_NOT_EXPR:
4387 op0 = TREE_OPERAND (expr, 0);
4388 STRIP_NOPS (op0);
4389 op1 = NULL_TREE;
4390 break;
4391 /* See add_iv_candidate_for_doloop, for doloop may_be_zero case, we
4392 introduce COND_EXPR for IV base, need to support better cost estimation
4393 for this COND_EXPR and tcc_comparison. */
4394 case COND_EXPR:
4395 op0 = TREE_OPERAND (expr, 1);
4396 STRIP_NOPS (op0);
4397 op1 = TREE_OPERAND (expr, 2);
4398 STRIP_NOPS (op1);
4399 break;
4400 case LT_EXPR:
4401 case LE_EXPR:
4402 case GT_EXPR:
4403 case GE_EXPR:
4404 case EQ_EXPR:
4405 case NE_EXPR:
4406 case UNORDERED_EXPR:
4407 case ORDERED_EXPR:
4408 case UNLT_EXPR:
4409 case UNLE_EXPR:
4410 case UNGT_EXPR:
4411 case UNGE_EXPR:
4412 case UNEQ_EXPR:
4413 case LTGT_EXPR:
4414 case MAX_EXPR:
4415 case MIN_EXPR:
4416 op0 = TREE_OPERAND (expr, 0);
4417 STRIP_NOPS (op0);
4418 op1 = TREE_OPERAND (expr, 1);
4419 STRIP_NOPS (op1);
4420 break;
4422 default:
4423 /* Just an arbitrary value, FIXME. */
4424 return comp_cost (target_spill_cost[speed], 0);
4427 if (op0 == NULL_TREE
4428 || TREE_CODE (op0) == SSA_NAME || CONSTANT_CLASS_P (op0))
4429 cost0 = no_cost;
4430 else
4431 cost0 = force_expr_to_var_cost (op0, speed);
4433 if (op1 == NULL_TREE
4434 || TREE_CODE (op1) == SSA_NAME || CONSTANT_CLASS_P (op1))
4435 cost1 = no_cost;
4436 else
4437 cost1 = force_expr_to_var_cost (op1, speed);
4439 mode = TYPE_MODE (TREE_TYPE (expr));
4440 switch (TREE_CODE (expr))
4442 case POINTER_PLUS_EXPR:
4443 case PLUS_EXPR:
4444 case MINUS_EXPR:
4445 case NEGATE_EXPR:
4446 cost = comp_cost (add_cost (speed, mode), 0);
4447 if (TREE_CODE (expr) != NEGATE_EXPR)
4449 tree mult = NULL_TREE;
4450 comp_cost sa_cost;
4451 if (TREE_CODE (op1) == MULT_EXPR)
4452 mult = op1;
4453 else if (TREE_CODE (op0) == MULT_EXPR)
4454 mult = op0;
4456 if (mult != NULL_TREE
4457 && is_a <scalar_int_mode> (mode, &int_mode)
4458 && cst_and_fits_in_hwi (TREE_OPERAND (mult, 1))
4459 && get_shiftadd_cost (expr, int_mode, cost0, cost1, mult,
4460 speed, &sa_cost))
4461 return sa_cost;
4463 break;
4465 CASE_CONVERT:
4467 tree inner_mode, outer_mode;
4468 outer_mode = TREE_TYPE (expr);
4469 inner_mode = TREE_TYPE (op0);
4470 cost = comp_cost (convert_cost (TYPE_MODE (outer_mode),
4471 TYPE_MODE (inner_mode), speed), 0);
4473 break;
4475 case MULT_EXPR:
4476 if (cst_and_fits_in_hwi (op0))
4477 cost = comp_cost (mult_by_coeff_cost (int_cst_value (op0),
4478 mode, speed), 0);
4479 else if (cst_and_fits_in_hwi (op1))
4480 cost = comp_cost (mult_by_coeff_cost (int_cst_value (op1),
4481 mode, speed), 0);
4482 else
4483 return comp_cost (target_spill_cost [speed], 0);
4484 break;
4486 case TRUNC_DIV_EXPR:
4487 /* Division by power of two is usually cheap, so we allow it. Forbid
4488 anything else. */
4489 if (integer_pow2p (TREE_OPERAND (expr, 1)))
4490 cost = comp_cost (add_cost (speed, mode), 0);
4491 else
4492 cost = comp_cost (target_spill_cost[speed], 0);
4493 break;
4495 case BIT_AND_EXPR:
4496 case BIT_IOR_EXPR:
4497 case BIT_NOT_EXPR:
4498 case LSHIFT_EXPR:
4499 case RSHIFT_EXPR:
4500 cost = comp_cost (add_cost (speed, mode), 0);
4501 break;
4502 case COND_EXPR:
4503 op0 = TREE_OPERAND (expr, 0);
4504 STRIP_NOPS (op0);
4505 if (op0 == NULL_TREE || TREE_CODE (op0) == SSA_NAME
4506 || CONSTANT_CLASS_P (op0))
4507 cost = no_cost;
4508 else
4509 cost = force_expr_to_var_cost (op0, speed);
4510 break;
4511 case LT_EXPR:
4512 case LE_EXPR:
4513 case GT_EXPR:
4514 case GE_EXPR:
4515 case EQ_EXPR:
4516 case NE_EXPR:
4517 case UNORDERED_EXPR:
4518 case ORDERED_EXPR:
4519 case UNLT_EXPR:
4520 case UNLE_EXPR:
4521 case UNGT_EXPR:
4522 case UNGE_EXPR:
4523 case UNEQ_EXPR:
4524 case LTGT_EXPR:
4525 case MAX_EXPR:
4526 case MIN_EXPR:
4527 /* Simply use add cost for now, FIXME if there is some more accurate cost
4528 evaluation way. */
4529 cost = comp_cost (add_cost (speed, mode), 0);
4530 break;
4532 default:
4533 gcc_unreachable ();
4536 cost += cost0;
4537 cost += cost1;
4538 return cost;
4541 /* Estimates cost of forcing EXPR into a variable. INV_VARS is a set of the
4542 invariants the computation depends on. */
4544 static comp_cost
4545 force_var_cost (struct ivopts_data *data, tree expr, bitmap *inv_vars)
4547 if (!expr)
4548 return no_cost;
4550 find_inv_vars (data, &expr, inv_vars);
4551 return force_expr_to_var_cost (expr, data->speed);
4554 /* Returns cost of auto-modifying address expression in shape base + offset.
4555 AINC_STEP is step size of the address IV. AINC_OFFSET is offset of the
4556 address expression. The address expression has ADDR_MODE in addr space
4557 AS. The memory access has MEM_MODE. SPEED means we are optimizing for
4558 speed or size. */
4560 enum ainc_type
4562 AINC_PRE_INC, /* Pre increment. */
4563 AINC_PRE_DEC, /* Pre decrement. */
4564 AINC_POST_INC, /* Post increment. */
4565 AINC_POST_DEC, /* Post decrement. */
4566 AINC_NONE /* Also the number of auto increment types. */
4569 struct ainc_cost_data
4571 int64_t costs[AINC_NONE];
4574 static comp_cost
4575 get_address_cost_ainc (poly_int64 ainc_step, poly_int64 ainc_offset,
4576 machine_mode addr_mode, machine_mode mem_mode,
4577 addr_space_t as, bool speed)
4579 if (!USE_LOAD_PRE_DECREMENT (mem_mode)
4580 && !USE_STORE_PRE_DECREMENT (mem_mode)
4581 && !USE_LOAD_POST_DECREMENT (mem_mode)
4582 && !USE_STORE_POST_DECREMENT (mem_mode)
4583 && !USE_LOAD_PRE_INCREMENT (mem_mode)
4584 && !USE_STORE_PRE_INCREMENT (mem_mode)
4585 && !USE_LOAD_POST_INCREMENT (mem_mode)
4586 && !USE_STORE_POST_INCREMENT (mem_mode))
4587 return infinite_cost;
4589 static vec<ainc_cost_data *> ainc_cost_data_list;
4590 unsigned idx = (unsigned) as * MAX_MACHINE_MODE + (unsigned) mem_mode;
4591 if (idx >= ainc_cost_data_list.length ())
4593 unsigned nsize = ((unsigned) as + 1) *MAX_MACHINE_MODE;
4595 gcc_assert (nsize > idx);
4596 ainc_cost_data_list.safe_grow_cleared (nsize, true);
4599 ainc_cost_data *data = ainc_cost_data_list[idx];
4600 if (data == NULL)
4602 rtx reg = gen_raw_REG (addr_mode, LAST_VIRTUAL_REGISTER + 1);
4604 data = (ainc_cost_data *) xcalloc (1, sizeof (*data));
4605 data->costs[AINC_PRE_DEC] = INFTY;
4606 data->costs[AINC_POST_DEC] = INFTY;
4607 data->costs[AINC_PRE_INC] = INFTY;
4608 data->costs[AINC_POST_INC] = INFTY;
4609 if (USE_LOAD_PRE_DECREMENT (mem_mode)
4610 || USE_STORE_PRE_DECREMENT (mem_mode))
4612 rtx addr = gen_rtx_PRE_DEC (addr_mode, reg);
4614 if (memory_address_addr_space_p (mem_mode, addr, as))
4615 data->costs[AINC_PRE_DEC]
4616 = address_cost (addr, mem_mode, as, speed);
4618 if (USE_LOAD_POST_DECREMENT (mem_mode)
4619 || USE_STORE_POST_DECREMENT (mem_mode))
4621 rtx addr = gen_rtx_POST_DEC (addr_mode, reg);
4623 if (memory_address_addr_space_p (mem_mode, addr, as))
4624 data->costs[AINC_POST_DEC]
4625 = address_cost (addr, mem_mode, as, speed);
4627 if (USE_LOAD_PRE_INCREMENT (mem_mode)
4628 || USE_STORE_PRE_INCREMENT (mem_mode))
4630 rtx addr = gen_rtx_PRE_INC (addr_mode, reg);
4632 if (memory_address_addr_space_p (mem_mode, addr, as))
4633 data->costs[AINC_PRE_INC]
4634 = address_cost (addr, mem_mode, as, speed);
4636 if (USE_LOAD_POST_INCREMENT (mem_mode)
4637 || USE_STORE_POST_INCREMENT (mem_mode))
4639 rtx addr = gen_rtx_POST_INC (addr_mode, reg);
4641 if (memory_address_addr_space_p (mem_mode, addr, as))
4642 data->costs[AINC_POST_INC]
4643 = address_cost (addr, mem_mode, as, speed);
4645 ainc_cost_data_list[idx] = data;
4648 poly_int64 msize = GET_MODE_SIZE (mem_mode);
4649 if (known_eq (ainc_offset, 0) && known_eq (msize, ainc_step))
4650 return comp_cost (data->costs[AINC_POST_INC], 0);
4651 if (known_eq (ainc_offset, 0) && known_eq (msize, -ainc_step))
4652 return comp_cost (data->costs[AINC_POST_DEC], 0);
4653 if (known_eq (ainc_offset, msize) && known_eq (msize, ainc_step))
4654 return comp_cost (data->costs[AINC_PRE_INC], 0);
4655 if (known_eq (ainc_offset, -msize) && known_eq (msize, -ainc_step))
4656 return comp_cost (data->costs[AINC_PRE_DEC], 0);
4658 return infinite_cost;
4661 /* Return cost of computing USE's address expression by using CAND.
4662 AFF_INV and AFF_VAR represent invariant and variant parts of the
4663 address expression, respectively. If AFF_INV is simple, store
4664 the loop invariant variables which are depended by it in INV_VARS;
4665 if AFF_INV is complicated, handle it as a new invariant expression
4666 and record it in INV_EXPR. RATIO indicates multiple times between
4667 steps of USE and CAND. If CAN_AUTOINC is nonNULL, store boolean
4668 value to it indicating if this is an auto-increment address. */
4670 static comp_cost
4671 get_address_cost (struct ivopts_data *data, struct iv_use *use,
4672 struct iv_cand *cand, aff_tree *aff_inv,
4673 aff_tree *aff_var, HOST_WIDE_INT ratio,
4674 bitmap *inv_vars, iv_inv_expr_ent **inv_expr,
4675 bool *can_autoinc, bool speed)
4677 rtx addr;
4678 bool simple_inv = true;
4679 tree comp_inv = NULL_TREE, type = aff_var->type;
4680 comp_cost var_cost = no_cost, cost = no_cost;
4681 struct mem_address parts = {NULL_TREE, integer_one_node,
4682 NULL_TREE, NULL_TREE, NULL_TREE};
4683 machine_mode addr_mode = TYPE_MODE (type);
4684 machine_mode mem_mode = TYPE_MODE (use->mem_type);
4685 addr_space_t as = TYPE_ADDR_SPACE (TREE_TYPE (use->iv->base));
4686 /* Only true if ratio != 1. */
4687 bool ok_with_ratio_p = false;
4688 bool ok_without_ratio_p = false;
4690 if (!aff_combination_const_p (aff_inv))
4692 parts.index = integer_one_node;
4693 /* Addressing mode "base + index". */
4694 ok_without_ratio_p = valid_mem_ref_p (mem_mode, as, &parts);
4695 if (ratio != 1)
4697 parts.step = wide_int_to_tree (type, ratio);
4698 /* Addressing mode "base + index << scale". */
4699 ok_with_ratio_p = valid_mem_ref_p (mem_mode, as, &parts);
4700 if (!ok_with_ratio_p)
4701 parts.step = NULL_TREE;
4703 if (ok_with_ratio_p || ok_without_ratio_p)
4705 if (maybe_ne (aff_inv->offset, 0))
4707 parts.offset = wide_int_to_tree (sizetype, aff_inv->offset);
4708 /* Addressing mode "base + index [<< scale] + offset". */
4709 if (!valid_mem_ref_p (mem_mode, as, &parts))
4710 parts.offset = NULL_TREE;
4711 else
4712 aff_inv->offset = 0;
4715 move_fixed_address_to_symbol (&parts, aff_inv);
4716 /* Base is fixed address and is moved to symbol part. */
4717 if (parts.symbol != NULL_TREE && aff_combination_zero_p (aff_inv))
4718 parts.base = NULL_TREE;
4720 /* Addressing mode "symbol + base + index [<< scale] [+ offset]". */
4721 if (parts.symbol != NULL_TREE
4722 && !valid_mem_ref_p (mem_mode, as, &parts))
4724 aff_combination_add_elt (aff_inv, parts.symbol, 1);
4725 parts.symbol = NULL_TREE;
4726 /* Reset SIMPLE_INV since symbol address needs to be computed
4727 outside of address expression in this case. */
4728 simple_inv = false;
4729 /* Symbol part is moved back to base part, it can't be NULL. */
4730 parts.base = integer_one_node;
4733 else
4734 parts.index = NULL_TREE;
4736 else
4738 poly_int64 ainc_step;
4739 if (can_autoinc
4740 && ratio == 1
4741 && ptrdiff_tree_p (cand->iv->step, &ainc_step))
4743 poly_int64 ainc_offset = (aff_inv->offset).force_shwi ();
4745 if (stmt_after_increment (data->current_loop, cand, use->stmt))
4746 ainc_offset += ainc_step;
4747 cost = get_address_cost_ainc (ainc_step, ainc_offset,
4748 addr_mode, mem_mode, as, speed);
4749 if (!cost.infinite_cost_p ())
4751 *can_autoinc = true;
4752 return cost;
4754 cost = no_cost;
4756 if (!aff_combination_zero_p (aff_inv))
4758 parts.offset = wide_int_to_tree (sizetype, aff_inv->offset);
4759 /* Addressing mode "base + offset". */
4760 if (!valid_mem_ref_p (mem_mode, as, &parts))
4761 parts.offset = NULL_TREE;
4762 else
4763 aff_inv->offset = 0;
4767 if (simple_inv)
4768 simple_inv = (aff_inv == NULL
4769 || aff_combination_const_p (aff_inv)
4770 || aff_combination_singleton_var_p (aff_inv));
4771 if (!aff_combination_zero_p (aff_inv))
4772 comp_inv = aff_combination_to_tree (aff_inv);
4773 if (comp_inv != NULL_TREE)
4774 cost = force_var_cost (data, comp_inv, inv_vars);
4775 if (ratio != 1 && parts.step == NULL_TREE)
4776 var_cost += mult_by_coeff_cost (ratio, addr_mode, speed);
4777 if (comp_inv != NULL_TREE && parts.index == NULL_TREE)
4778 var_cost += add_cost (speed, addr_mode);
4780 if (comp_inv && inv_expr && !simple_inv)
4782 *inv_expr = get_loop_invariant_expr (data, comp_inv);
4783 /* Clear depends on. */
4784 if (*inv_expr != NULL && inv_vars && *inv_vars)
4785 bitmap_clear (*inv_vars);
4787 /* Cost of small invariant expression adjusted against loop niters
4788 is usually zero, which makes it difficult to be differentiated
4789 from candidate based on loop invariant variables. Secondly, the
4790 generated invariant expression may not be hoisted out of loop by
4791 following pass. We penalize the cost by rounding up in order to
4792 neutralize such effects. */
4793 cost.cost = adjust_setup_cost (data, cost.cost, true);
4794 cost.scratch = cost.cost;
4797 cost += var_cost;
4798 addr = addr_for_mem_ref (&parts, as, false);
4799 gcc_assert (memory_address_addr_space_p (mem_mode, addr, as));
4800 cost += address_cost (addr, mem_mode, as, speed);
4802 if (parts.symbol != NULL_TREE)
4803 cost.complexity += 1;
4804 /* Don't increase the complexity of adding a scaled index if it's
4805 the only kind of index that the target allows. */
4806 if (parts.step != NULL_TREE && ok_without_ratio_p)
4807 cost.complexity += 1;
4808 if (parts.base != NULL_TREE && parts.index != NULL_TREE)
4809 cost.complexity += 1;
4810 if (parts.offset != NULL_TREE && !integer_zerop (parts.offset))
4811 cost.complexity += 1;
4813 return cost;
4816 /* Scale (multiply) the computed COST (except scratch part that should be
4817 hoisted out a loop) by header->frequency / AT->frequency, which makes
4818 expected cost more accurate. */
4820 static comp_cost
4821 get_scaled_computation_cost_at (ivopts_data *data, gimple *at, comp_cost cost)
4823 if (data->speed
4824 && data->current_loop->header->count.to_frequency (cfun) > 0)
4826 basic_block bb = gimple_bb (at);
4827 gcc_assert (cost.scratch <= cost.cost);
4828 int scale_factor = (int)(intptr_t) bb->aux;
4829 if (scale_factor == 1)
4830 return cost;
4832 int64_t scaled_cost
4833 = cost.scratch + (cost.cost - cost.scratch) * scale_factor;
4835 if (dump_file && (dump_flags & TDF_DETAILS))
4836 fprintf (dump_file, "Scaling cost based on bb prob by %2.2f: "
4837 "%" PRId64 " (scratch: %" PRId64 ") -> %" PRId64 "\n",
4838 1.0f * scale_factor, cost.cost, cost.scratch, scaled_cost);
4840 cost.cost = scaled_cost;
4843 return cost;
4846 /* Determines the cost of the computation by that USE is expressed
4847 from induction variable CAND. If ADDRESS_P is true, we just need
4848 to create an address from it, otherwise we want to get it into
4849 register. A set of invariants we depend on is stored in INV_VARS.
4850 If CAN_AUTOINC is nonnull, use it to record whether autoinc
4851 addressing is likely. If INV_EXPR is nonnull, record invariant
4852 expr entry in it. */
4854 static comp_cost
4855 get_computation_cost (struct ivopts_data *data, struct iv_use *use,
4856 struct iv_cand *cand, bool address_p, bitmap *inv_vars,
4857 bool *can_autoinc, iv_inv_expr_ent **inv_expr)
4859 gimple *at = use->stmt;
4860 tree ubase = use->iv->base, cbase = cand->iv->base;
4861 tree utype = TREE_TYPE (ubase), ctype = TREE_TYPE (cbase);
4862 tree comp_inv = NULL_TREE;
4863 HOST_WIDE_INT ratio, aratio;
4864 comp_cost cost;
4865 widest_int rat;
4866 aff_tree aff_inv, aff_var;
4867 bool speed = optimize_bb_for_speed_p (gimple_bb (at));
4869 if (inv_vars)
4870 *inv_vars = NULL;
4871 if (can_autoinc)
4872 *can_autoinc = false;
4873 if (inv_expr)
4874 *inv_expr = NULL;
4876 /* Check if we have enough precision to express the values of use. */
4877 if (TYPE_PRECISION (utype) > TYPE_PRECISION (ctype))
4878 return infinite_cost;
4880 if (address_p
4881 || (use->iv->base_object
4882 && cand->iv->base_object
4883 && POINTER_TYPE_P (TREE_TYPE (use->iv->base_object))
4884 && POINTER_TYPE_P (TREE_TYPE (cand->iv->base_object))))
4886 /* Do not try to express address of an object with computation based
4887 on address of a different object. This may cause problems in rtl
4888 level alias analysis (that does not expect this to be happening,
4889 as this is illegal in C), and would be unlikely to be useful
4890 anyway. */
4891 if (use->iv->base_object
4892 && cand->iv->base_object
4893 && !operand_equal_p (use->iv->base_object, cand->iv->base_object, 0))
4894 return infinite_cost;
4897 if (!get_computation_aff_1 (data->current_loop, at, use,
4898 cand, &aff_inv, &aff_var, &rat)
4899 || !wi::fits_shwi_p (rat))
4900 return infinite_cost;
4902 ratio = rat.to_shwi ();
4903 if (address_p)
4905 cost = get_address_cost (data, use, cand, &aff_inv, &aff_var, ratio,
4906 inv_vars, inv_expr, can_autoinc, speed);
4907 cost = get_scaled_computation_cost_at (data, at, cost);
4908 /* For doloop IV cand, add on the extra cost. */
4909 cost += cand->doloop_p ? targetm.doloop_cost_for_address : 0;
4910 return cost;
4913 bool simple_inv = (aff_combination_const_p (&aff_inv)
4914 || aff_combination_singleton_var_p (&aff_inv));
4915 tree signed_type = signed_type_for (aff_combination_type (&aff_inv));
4916 aff_combination_convert (&aff_inv, signed_type);
4917 if (!aff_combination_zero_p (&aff_inv))
4918 comp_inv = aff_combination_to_tree (&aff_inv);
4920 cost = force_var_cost (data, comp_inv, inv_vars);
4921 if (comp_inv && inv_expr && !simple_inv)
4923 *inv_expr = get_loop_invariant_expr (data, comp_inv);
4924 /* Clear depends on. */
4925 if (*inv_expr != NULL && inv_vars && *inv_vars)
4926 bitmap_clear (*inv_vars);
4928 cost.cost = adjust_setup_cost (data, cost.cost);
4929 /* Record setup cost in scratch field. */
4930 cost.scratch = cost.cost;
4932 /* Cost of constant integer can be covered when adding invariant part to
4933 variant part. */
4934 else if (comp_inv && CONSTANT_CLASS_P (comp_inv))
4935 cost = no_cost;
4937 /* Need type narrowing to represent use with cand. */
4938 if (TYPE_PRECISION (utype) < TYPE_PRECISION (ctype))
4940 machine_mode outer_mode = TYPE_MODE (utype);
4941 machine_mode inner_mode = TYPE_MODE (ctype);
4942 cost += comp_cost (convert_cost (outer_mode, inner_mode, speed), 0);
4945 /* Turn a + i * (-c) into a - i * c. */
4946 if (ratio < 0 && comp_inv && !integer_zerop (comp_inv))
4947 aratio = -ratio;
4948 else
4949 aratio = ratio;
4951 if (ratio != 1)
4952 cost += mult_by_coeff_cost (aratio, TYPE_MODE (utype), speed);
4954 /* TODO: We may also need to check if we can compute a + i * 4 in one
4955 instruction. */
4956 /* Need to add up the invariant and variant parts. */
4957 if (comp_inv && !integer_zerop (comp_inv))
4958 cost += add_cost (speed, TYPE_MODE (utype));
4960 cost = get_scaled_computation_cost_at (data, at, cost);
4962 /* For doloop IV cand, add on the extra cost. */
4963 if (cand->doloop_p && use->type == USE_NONLINEAR_EXPR)
4964 cost += targetm.doloop_cost_for_generic;
4966 return cost;
4969 /* Determines cost of computing the use in GROUP with CAND in a generic
4970 expression. */
4972 static bool
4973 determine_group_iv_cost_generic (struct ivopts_data *data,
4974 struct iv_group *group, struct iv_cand *cand)
4976 comp_cost cost;
4977 iv_inv_expr_ent *inv_expr = NULL;
4978 bitmap inv_vars = NULL, inv_exprs = NULL;
4979 struct iv_use *use = group->vuses[0];
4981 /* The simple case first -- if we need to express value of the preserved
4982 original biv, the cost is 0. This also prevents us from counting the
4983 cost of increment twice -- once at this use and once in the cost of
4984 the candidate. */
4985 if (cand->pos == IP_ORIGINAL && cand->incremented_at == use->stmt)
4986 cost = no_cost;
4987 /* If the IV candidate involves undefined SSA values and is not the
4988 same IV as on the USE avoid using that candidate here. */
4989 else if (cand->involves_undefs
4990 && (!use->iv || !operand_equal_p (cand->iv->base, use->iv->base, 0)))
4991 return false;
4992 else
4993 cost = get_computation_cost (data, use, cand, false,
4994 &inv_vars, NULL, &inv_expr);
4996 if (inv_expr)
4998 inv_exprs = BITMAP_ALLOC (NULL);
4999 bitmap_set_bit (inv_exprs, inv_expr->id);
5001 set_group_iv_cost (data, group, cand, cost, inv_vars,
5002 NULL_TREE, ERROR_MARK, inv_exprs);
5003 return !cost.infinite_cost_p ();
5006 /* Determines cost of computing uses in GROUP with CAND in addresses. */
5008 static bool
5009 determine_group_iv_cost_address (struct ivopts_data *data,
5010 struct iv_group *group, struct iv_cand *cand)
5012 unsigned i;
5013 bitmap inv_vars = NULL, inv_exprs = NULL;
5014 bool can_autoinc;
5015 iv_inv_expr_ent *inv_expr = NULL;
5016 struct iv_use *use = group->vuses[0];
5017 comp_cost sum_cost = no_cost, cost;
5019 cost = get_computation_cost (data, use, cand, true,
5020 &inv_vars, &can_autoinc, &inv_expr);
5022 if (inv_expr)
5024 inv_exprs = BITMAP_ALLOC (NULL);
5025 bitmap_set_bit (inv_exprs, inv_expr->id);
5027 sum_cost = cost;
5028 if (!sum_cost.infinite_cost_p () && cand->ainc_use == use)
5030 if (can_autoinc)
5031 sum_cost -= cand->cost_step;
5032 /* If we generated the candidate solely for exploiting autoincrement
5033 opportunities, and it turns out it can't be used, set the cost to
5034 infinity to make sure we ignore it. */
5035 else if (cand->pos == IP_AFTER_USE || cand->pos == IP_BEFORE_USE)
5036 sum_cost = infinite_cost;
5039 /* Uses in a group can share setup code, so only add setup cost once. */
5040 cost -= cost.scratch;
5041 /* Compute and add costs for rest uses of this group. */
5042 for (i = 1; i < group->vuses.length () && !sum_cost.infinite_cost_p (); i++)
5044 struct iv_use *next = group->vuses[i];
5046 /* TODO: We could skip computing cost for sub iv_use when it has the
5047 same cost as the first iv_use, but the cost really depends on the
5048 offset and where the iv_use is. */
5049 cost = get_computation_cost (data, next, cand, true,
5050 NULL, &can_autoinc, &inv_expr);
5051 if (inv_expr)
5053 if (!inv_exprs)
5054 inv_exprs = BITMAP_ALLOC (NULL);
5056 bitmap_set_bit (inv_exprs, inv_expr->id);
5058 sum_cost += cost;
5060 set_group_iv_cost (data, group, cand, sum_cost, inv_vars,
5061 NULL_TREE, ERROR_MARK, inv_exprs);
5063 return !sum_cost.infinite_cost_p ();
5066 /* Computes value of candidate CAND at position AT in iteration DESC->NITER,
5067 and stores it to VAL. */
5069 static void
5070 cand_value_at (class loop *loop, struct iv_cand *cand, gimple *at,
5071 class tree_niter_desc *desc, aff_tree *val)
5073 aff_tree step, delta, nit;
5074 struct iv *iv = cand->iv;
5075 tree type = TREE_TYPE (iv->base);
5076 tree niter = desc->niter;
5077 bool after_adjust = stmt_after_increment (loop, cand, at);
5078 tree steptype;
5080 if (POINTER_TYPE_P (type))
5081 steptype = sizetype;
5082 else
5083 steptype = unsigned_type_for (type);
5085 /* If AFTER_ADJUST is required, the code below generates the equivalent
5086 of BASE + NITER * STEP + STEP, when ideally we'd prefer the expression
5087 BASE + (NITER + 1) * STEP, especially when NITER is often of the form
5088 SSA_NAME - 1. Unfortunately, guaranteeing that adding 1 to NITER
5089 doesn't overflow is tricky, so we peek inside the TREE_NITER_DESC
5090 class for common idioms that we know are safe. */
5091 if (after_adjust
5092 && desc->control.no_overflow
5093 && integer_onep (desc->control.step)
5094 && (desc->cmp == LT_EXPR
5095 || desc->cmp == NE_EXPR)
5096 && TREE_CODE (desc->bound) == SSA_NAME)
5098 if (integer_onep (desc->control.base))
5100 niter = desc->bound;
5101 after_adjust = false;
5103 else if (TREE_CODE (niter) == MINUS_EXPR
5104 && integer_onep (TREE_OPERAND (niter, 1)))
5106 niter = TREE_OPERAND (niter, 0);
5107 after_adjust = false;
5111 tree_to_aff_combination (iv->step, TREE_TYPE (iv->step), &step);
5112 aff_combination_convert (&step, steptype);
5113 tree_to_aff_combination (niter, TREE_TYPE (niter), &nit);
5114 aff_combination_convert (&nit, steptype);
5115 aff_combination_mult (&nit, &step, &delta);
5116 if (after_adjust)
5117 aff_combination_add (&delta, &step);
5119 tree_to_aff_combination (iv->base, type, val);
5120 if (!POINTER_TYPE_P (type))
5121 aff_combination_convert (val, steptype);
5122 aff_combination_add (val, &delta);
5125 /* Returns period of induction variable iv. */
5127 static tree
5128 iv_period (struct iv *iv)
5130 tree step = iv->step, period, type;
5131 tree pow2div;
5133 gcc_assert (step && TREE_CODE (step) == INTEGER_CST);
5135 type = unsigned_type_for (TREE_TYPE (step));
5136 /* Period of the iv is lcm (step, type_range)/step -1,
5137 i.e., N*type_range/step - 1. Since type range is power
5138 of two, N == (step >> num_of_ending_zeros_binary (step),
5139 so the final result is
5141 (type_range >> num_of_ending_zeros_binary (step)) - 1
5144 pow2div = num_ending_zeros (step);
5146 period = build_low_bits_mask (type,
5147 (TYPE_PRECISION (type)
5148 - tree_to_uhwi (pow2div)));
5150 return period;
5153 /* Returns the comparison operator used when eliminating the iv USE. */
5155 static enum tree_code
5156 iv_elimination_compare (struct ivopts_data *data, struct iv_use *use)
5158 class loop *loop = data->current_loop;
5159 basic_block ex_bb;
5160 edge exit;
5162 ex_bb = gimple_bb (use->stmt);
5163 exit = EDGE_SUCC (ex_bb, 0);
5164 if (flow_bb_inside_loop_p (loop, exit->dest))
5165 exit = EDGE_SUCC (ex_bb, 1);
5167 return (exit->flags & EDGE_TRUE_VALUE ? EQ_EXPR : NE_EXPR);
5170 /* Returns true if we can prove that BASE - OFFSET does not overflow. For now,
5171 we only detect the situation that BASE = SOMETHING + OFFSET, where the
5172 calculation is performed in non-wrapping type.
5174 TODO: More generally, we could test for the situation that
5175 BASE = SOMETHING + OFFSET' and OFFSET is between OFFSET' and zero.
5176 This would require knowing the sign of OFFSET. */
5178 static bool
5179 difference_cannot_overflow_p (struct ivopts_data *data, tree base, tree offset)
5181 enum tree_code code;
5182 tree e1, e2;
5183 aff_tree aff_e1, aff_e2, aff_offset;
5185 if (!nowrap_type_p (TREE_TYPE (base)))
5186 return false;
5188 base = expand_simple_operations (base);
5190 if (TREE_CODE (base) == SSA_NAME)
5192 gimple *stmt = SSA_NAME_DEF_STMT (base);
5194 if (gimple_code (stmt) != GIMPLE_ASSIGN)
5195 return false;
5197 code = gimple_assign_rhs_code (stmt);
5198 if (get_gimple_rhs_class (code) != GIMPLE_BINARY_RHS)
5199 return false;
5201 e1 = gimple_assign_rhs1 (stmt);
5202 e2 = gimple_assign_rhs2 (stmt);
5204 else
5206 code = TREE_CODE (base);
5207 if (get_gimple_rhs_class (code) != GIMPLE_BINARY_RHS)
5208 return false;
5209 e1 = TREE_OPERAND (base, 0);
5210 e2 = TREE_OPERAND (base, 1);
5213 /* Use affine expansion as deeper inspection to prove the equality. */
5214 tree_to_aff_combination_expand (e2, TREE_TYPE (e2),
5215 &aff_e2, &data->name_expansion_cache);
5216 tree_to_aff_combination_expand (offset, TREE_TYPE (offset),
5217 &aff_offset, &data->name_expansion_cache);
5218 aff_combination_scale (&aff_offset, -1);
5219 switch (code)
5221 case PLUS_EXPR:
5222 aff_combination_add (&aff_e2, &aff_offset);
5223 if (aff_combination_zero_p (&aff_e2))
5224 return true;
5226 tree_to_aff_combination_expand (e1, TREE_TYPE (e1),
5227 &aff_e1, &data->name_expansion_cache);
5228 aff_combination_add (&aff_e1, &aff_offset);
5229 return aff_combination_zero_p (&aff_e1);
5231 case POINTER_PLUS_EXPR:
5232 aff_combination_add (&aff_e2, &aff_offset);
5233 return aff_combination_zero_p (&aff_e2);
5235 default:
5236 return false;
5240 /* Tries to replace loop exit by one formulated in terms of a LT_EXPR
5241 comparison with CAND. NITER describes the number of iterations of
5242 the loops. If successful, the comparison in COMP_P is altered accordingly.
5244 We aim to handle the following situation:
5246 sometype *base, *p;
5247 int a, b, i;
5249 i = a;
5250 p = p_0 = base + a;
5254 bla (*p);
5255 p++;
5256 i++;
5258 while (i < b);
5260 Here, the number of iterations of the loop is (a + 1 > b) ? 0 : b - a - 1.
5261 We aim to optimize this to
5263 p = p_0 = base + a;
5266 bla (*p);
5267 p++;
5269 while (p < p_0 - a + b);
5271 This preserves the correctness, since the pointer arithmetics does not
5272 overflow. More precisely:
5274 1) if a + 1 <= b, then p_0 - a + b is the final value of p, hence there is no
5275 overflow in computing it or the values of p.
5276 2) if a + 1 > b, then we need to verify that the expression p_0 - a does not
5277 overflow. To prove this, we use the fact that p_0 = base + a. */
5279 static bool
5280 iv_elimination_compare_lt (struct ivopts_data *data,
5281 struct iv_cand *cand, enum tree_code *comp_p,
5282 class tree_niter_desc *niter)
5284 tree cand_type, a, b, mbz, nit_type = TREE_TYPE (niter->niter), offset;
5285 class aff_tree nit, tmpa, tmpb;
5286 enum tree_code comp;
5287 HOST_WIDE_INT step;
5289 /* We need to know that the candidate induction variable does not overflow.
5290 While more complex analysis may be used to prove this, for now just
5291 check that the variable appears in the original program and that it
5292 is computed in a type that guarantees no overflows. */
5293 cand_type = TREE_TYPE (cand->iv->base);
5294 if (cand->pos != IP_ORIGINAL || !nowrap_type_p (cand_type))
5295 return false;
5297 /* Make sure that the loop iterates till the loop bound is hit, as otherwise
5298 the calculation of the BOUND could overflow, making the comparison
5299 invalid. */
5300 if (!data->loop_single_exit_p)
5301 return false;
5303 /* We need to be able to decide whether candidate is increasing or decreasing
5304 in order to choose the right comparison operator. */
5305 if (!cst_and_fits_in_hwi (cand->iv->step))
5306 return false;
5307 step = int_cst_value (cand->iv->step);
5309 /* Check that the number of iterations matches the expected pattern:
5310 a + 1 > b ? 0 : b - a - 1. */
5311 mbz = niter->may_be_zero;
5312 if (TREE_CODE (mbz) == GT_EXPR)
5314 /* Handle a + 1 > b. */
5315 tree op0 = TREE_OPERAND (mbz, 0);
5316 if (TREE_CODE (op0) == PLUS_EXPR && integer_onep (TREE_OPERAND (op0, 1)))
5318 a = TREE_OPERAND (op0, 0);
5319 b = TREE_OPERAND (mbz, 1);
5321 else
5322 return false;
5324 else if (TREE_CODE (mbz) == LT_EXPR)
5326 tree op1 = TREE_OPERAND (mbz, 1);
5328 /* Handle b < a + 1. */
5329 if (TREE_CODE (op1) == PLUS_EXPR && integer_onep (TREE_OPERAND (op1, 1)))
5331 a = TREE_OPERAND (op1, 0);
5332 b = TREE_OPERAND (mbz, 0);
5334 else
5335 return false;
5337 else
5338 return false;
5340 /* Expected number of iterations is B - A - 1. Check that it matches
5341 the actual number, i.e., that B - A - NITER = 1. */
5342 tree_to_aff_combination (niter->niter, nit_type, &nit);
5343 tree_to_aff_combination (fold_convert (nit_type, a), nit_type, &tmpa);
5344 tree_to_aff_combination (fold_convert (nit_type, b), nit_type, &tmpb);
5345 aff_combination_scale (&nit, -1);
5346 aff_combination_scale (&tmpa, -1);
5347 aff_combination_add (&tmpb, &tmpa);
5348 aff_combination_add (&tmpb, &nit);
5349 if (tmpb.n != 0 || maybe_ne (tmpb.offset, 1))
5350 return false;
5352 /* Finally, check that CAND->IV->BASE - CAND->IV->STEP * A does not
5353 overflow. */
5354 offset = fold_build2 (MULT_EXPR, TREE_TYPE (cand->iv->step),
5355 cand->iv->step,
5356 fold_convert (TREE_TYPE (cand->iv->step), a));
5357 if (!difference_cannot_overflow_p (data, cand->iv->base, offset))
5358 return false;
5360 /* Determine the new comparison operator. */
5361 comp = step < 0 ? GT_EXPR : LT_EXPR;
5362 if (*comp_p == NE_EXPR)
5363 *comp_p = comp;
5364 else if (*comp_p == EQ_EXPR)
5365 *comp_p = invert_tree_comparison (comp, false);
5366 else
5367 gcc_unreachable ();
5369 return true;
5372 /* Check whether it is possible to express the condition in USE by comparison
5373 of candidate CAND. If so, store the value compared with to BOUND, and the
5374 comparison operator to COMP. */
5376 static bool
5377 may_eliminate_iv (struct ivopts_data *data,
5378 struct iv_use *use, struct iv_cand *cand, tree *bound,
5379 enum tree_code *comp)
5381 basic_block ex_bb;
5382 edge exit;
5383 tree period;
5384 class loop *loop = data->current_loop;
5385 aff_tree bnd;
5386 class tree_niter_desc *desc = NULL;
5388 if (TREE_CODE (cand->iv->step) != INTEGER_CST)
5389 return false;
5391 /* For now works only for exits that dominate the loop latch.
5392 TODO: extend to other conditions inside loop body. */
5393 ex_bb = gimple_bb (use->stmt);
5394 if (use->stmt != last_stmt (ex_bb)
5395 || gimple_code (use->stmt) != GIMPLE_COND
5396 || !dominated_by_p (CDI_DOMINATORS, loop->latch, ex_bb))
5397 return false;
5399 exit = EDGE_SUCC (ex_bb, 0);
5400 if (flow_bb_inside_loop_p (loop, exit->dest))
5401 exit = EDGE_SUCC (ex_bb, 1);
5402 if (flow_bb_inside_loop_p (loop, exit->dest))
5403 return false;
5405 desc = niter_for_exit (data, exit);
5406 if (!desc)
5407 return false;
5409 /* Determine whether we can use the variable to test the exit condition.
5410 This is the case iff the period of the induction variable is greater
5411 than the number of iterations for which the exit condition is true. */
5412 period = iv_period (cand->iv);
5414 /* If the number of iterations is constant, compare against it directly. */
5415 if (TREE_CODE (desc->niter) == INTEGER_CST)
5417 /* See cand_value_at. */
5418 if (stmt_after_increment (loop, cand, use->stmt))
5420 if (!tree_int_cst_lt (desc->niter, period))
5421 return false;
5423 else
5425 if (tree_int_cst_lt (period, desc->niter))
5426 return false;
5430 /* If not, and if this is the only possible exit of the loop, see whether
5431 we can get a conservative estimate on the number of iterations of the
5432 entire loop and compare against that instead. */
5433 else
5435 widest_int period_value, max_niter;
5437 max_niter = desc->max;
5438 if (stmt_after_increment (loop, cand, use->stmt))
5439 max_niter += 1;
5440 period_value = wi::to_widest (period);
5441 if (wi::gtu_p (max_niter, period_value))
5443 /* See if we can take advantage of inferred loop bound
5444 information. */
5445 if (data->loop_single_exit_p)
5447 if (!max_loop_iterations (loop, &max_niter))
5448 return false;
5449 /* The loop bound is already adjusted by adding 1. */
5450 if (wi::gtu_p (max_niter, period_value))
5451 return false;
5453 else
5454 return false;
5458 /* For doloop IV cand, the bound would be zero. It's safe whether
5459 may_be_zero set or not. */
5460 if (cand->doloop_p)
5462 *bound = build_int_cst (TREE_TYPE (cand->iv->base), 0);
5463 *comp = iv_elimination_compare (data, use);
5464 return true;
5467 cand_value_at (loop, cand, use->stmt, desc, &bnd);
5469 *bound = fold_convert (TREE_TYPE (cand->iv->base),
5470 aff_combination_to_tree (&bnd));
5471 *comp = iv_elimination_compare (data, use);
5473 /* It is unlikely that computing the number of iterations using division
5474 would be more profitable than keeping the original induction variable. */
5475 if (expression_expensive_p (*bound))
5476 return false;
5478 /* Sometimes, it is possible to handle the situation that the number of
5479 iterations may be zero unless additional assumptions by using <
5480 instead of != in the exit condition.
5482 TODO: we could also calculate the value MAY_BE_ZERO ? 0 : NITER and
5483 base the exit condition on it. However, that is often too
5484 expensive. */
5485 if (!integer_zerop (desc->may_be_zero))
5486 return iv_elimination_compare_lt (data, cand, comp, desc);
5488 return true;
5491 /* Calculates the cost of BOUND, if it is a PARM_DECL. A PARM_DECL must
5492 be copied, if it is used in the loop body and DATA->body_includes_call. */
5494 static int
5495 parm_decl_cost (struct ivopts_data *data, tree bound)
5497 tree sbound = bound;
5498 STRIP_NOPS (sbound);
5500 if (TREE_CODE (sbound) == SSA_NAME
5501 && SSA_NAME_IS_DEFAULT_DEF (sbound)
5502 && TREE_CODE (SSA_NAME_VAR (sbound)) == PARM_DECL
5503 && data->body_includes_call)
5504 return COSTS_N_INSNS (1);
5506 return 0;
5509 /* Determines cost of computing the use in GROUP with CAND in a condition. */
5511 static bool
5512 determine_group_iv_cost_cond (struct ivopts_data *data,
5513 struct iv_group *group, struct iv_cand *cand)
5515 tree bound = NULL_TREE;
5516 struct iv *cmp_iv;
5517 bitmap inv_exprs = NULL;
5518 bitmap inv_vars_elim = NULL, inv_vars_express = NULL, inv_vars;
5519 comp_cost elim_cost = infinite_cost, express_cost, cost, bound_cost;
5520 enum comp_iv_rewrite rewrite_type;
5521 iv_inv_expr_ent *inv_expr_elim = NULL, *inv_expr_express = NULL, *inv_expr;
5522 tree *control_var, *bound_cst;
5523 enum tree_code comp = ERROR_MARK;
5524 struct iv_use *use = group->vuses[0];
5526 /* Extract condition operands. */
5527 rewrite_type = extract_cond_operands (data, use->stmt, &control_var,
5528 &bound_cst, NULL, &cmp_iv);
5529 gcc_assert (rewrite_type != COMP_IV_NA);
5531 /* Try iv elimination. */
5532 if (rewrite_type == COMP_IV_ELIM
5533 && may_eliminate_iv (data, use, cand, &bound, &comp))
5535 elim_cost = force_var_cost (data, bound, &inv_vars_elim);
5536 if (elim_cost.cost == 0)
5537 elim_cost.cost = parm_decl_cost (data, bound);
5538 else if (TREE_CODE (bound) == INTEGER_CST)
5539 elim_cost.cost = 0;
5540 /* If we replace a loop condition 'i < n' with 'p < base + n',
5541 inv_vars_elim will have 'base' and 'n' set, which implies that both
5542 'base' and 'n' will be live during the loop. More likely,
5543 'base + n' will be loop invariant, resulting in only one live value
5544 during the loop. So in that case we clear inv_vars_elim and set
5545 inv_expr_elim instead. */
5546 if (inv_vars_elim && bitmap_count_bits (inv_vars_elim) > 1)
5548 inv_expr_elim = get_loop_invariant_expr (data, bound);
5549 bitmap_clear (inv_vars_elim);
5551 /* The bound is a loop invariant, so it will be only computed
5552 once. */
5553 elim_cost.cost = adjust_setup_cost (data, elim_cost.cost);
5556 /* When the condition is a comparison of the candidate IV against
5557 zero, prefer this IV.
5559 TODO: The constant that we're subtracting from the cost should
5560 be target-dependent. This information should be added to the
5561 target costs for each backend. */
5562 if (!elim_cost.infinite_cost_p () /* Do not try to decrease infinite! */
5563 && integer_zerop (*bound_cst)
5564 && (operand_equal_p (*control_var, cand->var_after, 0)
5565 || operand_equal_p (*control_var, cand->var_before, 0)))
5566 elim_cost -= 1;
5568 express_cost = get_computation_cost (data, use, cand, false,
5569 &inv_vars_express, NULL,
5570 &inv_expr_express);
5571 if (cmp_iv != NULL)
5572 find_inv_vars (data, &cmp_iv->base, &inv_vars_express);
5574 /* Count the cost of the original bound as well. */
5575 bound_cost = force_var_cost (data, *bound_cst, NULL);
5576 if (bound_cost.cost == 0)
5577 bound_cost.cost = parm_decl_cost (data, *bound_cst);
5578 else if (TREE_CODE (*bound_cst) == INTEGER_CST)
5579 bound_cost.cost = 0;
5580 express_cost += bound_cost;
5582 /* Choose the better approach, preferring the eliminated IV. */
5583 if (elim_cost <= express_cost)
5585 cost = elim_cost;
5586 inv_vars = inv_vars_elim;
5587 inv_vars_elim = NULL;
5588 inv_expr = inv_expr_elim;
5589 /* For doloop candidate/use pair, adjust to zero cost. */
5590 if (group->doloop_p && cand->doloop_p && elim_cost.cost > no_cost.cost)
5591 cost = no_cost;
5593 else
5595 cost = express_cost;
5596 inv_vars = inv_vars_express;
5597 inv_vars_express = NULL;
5598 bound = NULL_TREE;
5599 comp = ERROR_MARK;
5600 inv_expr = inv_expr_express;
5603 if (inv_expr)
5605 inv_exprs = BITMAP_ALLOC (NULL);
5606 bitmap_set_bit (inv_exprs, inv_expr->id);
5608 set_group_iv_cost (data, group, cand, cost,
5609 inv_vars, bound, comp, inv_exprs);
5611 if (inv_vars_elim)
5612 BITMAP_FREE (inv_vars_elim);
5613 if (inv_vars_express)
5614 BITMAP_FREE (inv_vars_express);
5616 return !cost.infinite_cost_p ();
5619 /* Determines cost of computing uses in GROUP with CAND. Returns false
5620 if USE cannot be represented with CAND. */
5622 static bool
5623 determine_group_iv_cost (struct ivopts_data *data,
5624 struct iv_group *group, struct iv_cand *cand)
5626 switch (group->type)
5628 case USE_NONLINEAR_EXPR:
5629 return determine_group_iv_cost_generic (data, group, cand);
5631 case USE_REF_ADDRESS:
5632 case USE_PTR_ADDRESS:
5633 return determine_group_iv_cost_address (data, group, cand);
5635 case USE_COMPARE:
5636 return determine_group_iv_cost_cond (data, group, cand);
5638 default:
5639 gcc_unreachable ();
5643 /* Return true if get_computation_cost indicates that autoincrement is
5644 a possibility for the pair of USE and CAND, false otherwise. */
5646 static bool
5647 autoinc_possible_for_pair (struct ivopts_data *data, struct iv_use *use,
5648 struct iv_cand *cand)
5650 if (!address_p (use->type))
5651 return false;
5653 bool can_autoinc = false;
5654 get_computation_cost (data, use, cand, true, NULL, &can_autoinc, NULL);
5655 return can_autoinc;
5658 /* Examine IP_ORIGINAL candidates to see if they are incremented next to a
5659 use that allows autoincrement, and set their AINC_USE if possible. */
5661 static void
5662 set_autoinc_for_original_candidates (struct ivopts_data *data)
5664 unsigned i, j;
5666 for (i = 0; i < data->vcands.length (); i++)
5668 struct iv_cand *cand = data->vcands[i];
5669 struct iv_use *closest_before = NULL;
5670 struct iv_use *closest_after = NULL;
5671 if (cand->pos != IP_ORIGINAL)
5672 continue;
5674 for (j = 0; j < data->vgroups.length (); j++)
5676 struct iv_group *group = data->vgroups[j];
5677 struct iv_use *use = group->vuses[0];
5678 unsigned uid = gimple_uid (use->stmt);
5680 if (gimple_bb (use->stmt) != gimple_bb (cand->incremented_at))
5681 continue;
5683 if (uid < gimple_uid (cand->incremented_at)
5684 && (closest_before == NULL
5685 || uid > gimple_uid (closest_before->stmt)))
5686 closest_before = use;
5688 if (uid > gimple_uid (cand->incremented_at)
5689 && (closest_after == NULL
5690 || uid < gimple_uid (closest_after->stmt)))
5691 closest_after = use;
5694 if (closest_before != NULL
5695 && autoinc_possible_for_pair (data, closest_before, cand))
5696 cand->ainc_use = closest_before;
5697 else if (closest_after != NULL
5698 && autoinc_possible_for_pair (data, closest_after, cand))
5699 cand->ainc_use = closest_after;
5703 /* Relate compare use with all candidates. */
5705 static void
5706 relate_compare_use_with_all_cands (struct ivopts_data *data)
5708 unsigned i, count = data->vcands.length ();
5709 for (i = 0; i < data->vgroups.length (); i++)
5711 struct iv_group *group = data->vgroups[i];
5713 if (group->type == USE_COMPARE)
5714 bitmap_set_range (group->related_cands, 0, count);
5718 /* If PREFERRED_MODE is suitable and profitable, use the preferred
5719 PREFERRED_MODE to compute doloop iv base from niter: base = niter + 1. */
5721 static tree
5722 compute_doloop_base_on_mode (machine_mode preferred_mode, tree niter,
5723 const widest_int &iterations_max)
5725 tree ntype = TREE_TYPE (niter);
5726 tree pref_type = lang_hooks.types.type_for_mode (preferred_mode, 1);
5727 if (!pref_type)
5728 return fold_build2 (PLUS_EXPR, ntype, unshare_expr (niter),
5729 build_int_cst (ntype, 1));
5731 gcc_assert (TREE_CODE (pref_type) == INTEGER_TYPE);
5733 int prec = TYPE_PRECISION (ntype);
5734 int pref_prec = TYPE_PRECISION (pref_type);
5736 tree base;
5738 /* Check if the PREFERRED_MODED is able to present niter. */
5739 if (pref_prec > prec
5740 || wi::ltu_p (iterations_max,
5741 widest_int::from (wi::max_value (pref_prec, UNSIGNED),
5742 UNSIGNED)))
5744 /* No wrap, it is safe to use preferred type after niter + 1. */
5745 if (wi::ltu_p (iterations_max,
5746 widest_int::from (wi::max_value (prec, UNSIGNED),
5747 UNSIGNED)))
5749 /* This could help to optimize "-1 +1" pair when niter looks
5750 like "n-1": n is in original mode. "base = (n - 1) + 1"
5751 in PREFERRED_MODED: it could be base = (PREFERRED_TYPE)n. */
5752 base = fold_build2 (PLUS_EXPR, ntype, unshare_expr (niter),
5753 build_int_cst (ntype, 1));
5754 base = fold_convert (pref_type, base);
5757 /* To avoid wrap, convert niter to preferred type before plus 1. */
5758 else
5760 niter = fold_convert (pref_type, niter);
5761 base = fold_build2 (PLUS_EXPR, pref_type, unshare_expr (niter),
5762 build_int_cst (pref_type, 1));
5765 else
5766 base = fold_build2 (PLUS_EXPR, ntype, unshare_expr (niter),
5767 build_int_cst (ntype, 1));
5768 return base;
5771 /* Add one doloop dedicated IV candidate:
5772 - Base is (may_be_zero ? 1 : (niter + 1)).
5773 - Step is -1. */
5775 static void
5776 add_iv_candidate_for_doloop (struct ivopts_data *data)
5778 tree_niter_desc *niter_desc = niter_for_single_dom_exit (data);
5779 gcc_assert (niter_desc && niter_desc->assumptions);
5781 tree niter = niter_desc->niter;
5782 tree ntype = TREE_TYPE (niter);
5783 gcc_assert (TREE_CODE (ntype) == INTEGER_TYPE);
5785 tree may_be_zero = niter_desc->may_be_zero;
5786 if (may_be_zero && integer_zerop (may_be_zero))
5787 may_be_zero = NULL_TREE;
5788 if (may_be_zero)
5790 if (COMPARISON_CLASS_P (may_be_zero))
5792 niter = fold_build3 (COND_EXPR, ntype, may_be_zero,
5793 build_int_cst (ntype, 0),
5794 rewrite_to_non_trapping_overflow (niter));
5796 /* Don't try to obtain the iteration count expression when may_be_zero is
5797 integer_nonzerop (actually iteration count is one) or else. */
5798 else
5799 return;
5802 machine_mode mode = TYPE_MODE (ntype);
5803 machine_mode pref_mode = targetm.preferred_doloop_mode (mode);
5805 tree base;
5806 if (mode != pref_mode)
5808 base = compute_doloop_base_on_mode (pref_mode, niter, niter_desc->max);
5809 ntype = TREE_TYPE (base);
5811 else
5812 base = fold_build2 (PLUS_EXPR, ntype, unshare_expr (niter),
5813 build_int_cst (ntype, 1));
5816 add_candidate (data, base, build_int_cst (ntype, -1), true, NULL, NULL, true);
5819 /* Finds the candidates for the induction variables. */
5821 static void
5822 find_iv_candidates (struct ivopts_data *data)
5824 /* Add commonly used ivs. */
5825 add_standard_iv_candidates (data);
5827 /* Add doloop dedicated ivs. */
5828 if (data->doloop_use_p)
5829 add_iv_candidate_for_doloop (data);
5831 /* Add old induction variables. */
5832 add_iv_candidate_for_bivs (data);
5834 /* Add induction variables derived from uses. */
5835 add_iv_candidate_for_groups (data);
5837 set_autoinc_for_original_candidates (data);
5839 /* Record the important candidates. */
5840 record_important_candidates (data);
5842 /* Relate compare iv_use with all candidates. */
5843 if (!data->consider_all_candidates)
5844 relate_compare_use_with_all_cands (data);
5846 if (dump_file && (dump_flags & TDF_DETAILS))
5848 unsigned i;
5850 fprintf (dump_file, "\n<Important Candidates>:\t");
5851 for (i = 0; i < data->vcands.length (); i++)
5852 if (data->vcands[i]->important)
5853 fprintf (dump_file, " %d,", data->vcands[i]->id);
5854 fprintf (dump_file, "\n");
5856 fprintf (dump_file, "\n<Group, Cand> Related:\n");
5857 for (i = 0; i < data->vgroups.length (); i++)
5859 struct iv_group *group = data->vgroups[i];
5861 if (group->related_cands)
5863 fprintf (dump_file, " Group %d:\t", group->id);
5864 dump_bitmap (dump_file, group->related_cands);
5867 fprintf (dump_file, "\n");
5871 /* Determines costs of computing use of iv with an iv candidate. */
5873 static void
5874 determine_group_iv_costs (struct ivopts_data *data)
5876 unsigned i, j;
5877 struct iv_cand *cand;
5878 struct iv_group *group;
5879 bitmap to_clear = BITMAP_ALLOC (NULL);
5881 alloc_use_cost_map (data);
5883 for (i = 0; i < data->vgroups.length (); i++)
5885 group = data->vgroups[i];
5887 if (data->consider_all_candidates)
5889 for (j = 0; j < data->vcands.length (); j++)
5891 cand = data->vcands[j];
5892 determine_group_iv_cost (data, group, cand);
5895 else
5897 bitmap_iterator bi;
5899 EXECUTE_IF_SET_IN_BITMAP (group->related_cands, 0, j, bi)
5901 cand = data->vcands[j];
5902 if (!determine_group_iv_cost (data, group, cand))
5903 bitmap_set_bit (to_clear, j);
5906 /* Remove the candidates for that the cost is infinite from
5907 the list of related candidates. */
5908 bitmap_and_compl_into (group->related_cands, to_clear);
5909 bitmap_clear (to_clear);
5913 BITMAP_FREE (to_clear);
5915 if (dump_file && (dump_flags & TDF_DETAILS))
5917 bitmap_iterator bi;
5919 /* Dump invariant variables. */
5920 fprintf (dump_file, "\n<Invariant Vars>:\n");
5921 EXECUTE_IF_SET_IN_BITMAP (data->relevant, 0, i, bi)
5923 struct version_info *info = ver_info (data, i);
5924 if (info->inv_id)
5926 fprintf (dump_file, "Inv %d:\t", info->inv_id);
5927 print_generic_expr (dump_file, info->name, TDF_SLIM);
5928 fprintf (dump_file, "%s\n",
5929 info->has_nonlin_use ? "" : "\t(eliminable)");
5933 /* Dump invariant expressions. */
5934 fprintf (dump_file, "\n<Invariant Expressions>:\n");
5935 auto_vec <iv_inv_expr_ent *> list (data->inv_expr_tab->elements ());
5937 for (hash_table<iv_inv_expr_hasher>::iterator it
5938 = data->inv_expr_tab->begin (); it != data->inv_expr_tab->end ();
5939 ++it)
5940 list.safe_push (*it);
5942 list.qsort (sort_iv_inv_expr_ent);
5944 for (i = 0; i < list.length (); ++i)
5946 fprintf (dump_file, "inv_expr %d: \t", list[i]->id);
5947 print_generic_expr (dump_file, list[i]->expr, TDF_SLIM);
5948 fprintf (dump_file, "\n");
5951 fprintf (dump_file, "\n<Group-candidate Costs>:\n");
5953 for (i = 0; i < data->vgroups.length (); i++)
5955 group = data->vgroups[i];
5957 fprintf (dump_file, "Group %d:\n", i);
5958 fprintf (dump_file, " cand\tcost\tcompl.\tinv.expr.\tinv.vars\n");
5959 for (j = 0; j < group->n_map_members; j++)
5961 if (!group->cost_map[j].cand
5962 || group->cost_map[j].cost.infinite_cost_p ())
5963 continue;
5965 fprintf (dump_file, " %d\t%" PRId64 "\t%d\t",
5966 group->cost_map[j].cand->id,
5967 group->cost_map[j].cost.cost,
5968 group->cost_map[j].cost.complexity);
5969 if (!group->cost_map[j].inv_exprs
5970 || bitmap_empty_p (group->cost_map[j].inv_exprs))
5971 fprintf (dump_file, "NIL;\t");
5972 else
5973 bitmap_print (dump_file,
5974 group->cost_map[j].inv_exprs, "", ";\t");
5975 if (!group->cost_map[j].inv_vars
5976 || bitmap_empty_p (group->cost_map[j].inv_vars))
5977 fprintf (dump_file, "NIL;\n");
5978 else
5979 bitmap_print (dump_file,
5980 group->cost_map[j].inv_vars, "", "\n");
5983 fprintf (dump_file, "\n");
5985 fprintf (dump_file, "\n");
5989 /* Determines cost of the candidate CAND. */
5991 static void
5992 determine_iv_cost (struct ivopts_data *data, struct iv_cand *cand)
5994 comp_cost cost_base;
5995 int64_t cost, cost_step;
5996 tree base;
5998 gcc_assert (cand->iv != NULL);
6000 /* There are two costs associated with the candidate -- its increment
6001 and its initialization. The second is almost negligible for any loop
6002 that rolls enough, so we take it just very little into account. */
6004 base = cand->iv->base;
6005 cost_base = force_var_cost (data, base, NULL);
6006 /* It will be exceptional that the iv register happens to be initialized with
6007 the proper value at no cost. In general, there will at least be a regcopy
6008 or a const set. */
6009 if (cost_base.cost == 0)
6010 cost_base.cost = COSTS_N_INSNS (1);
6011 /* Doloop decrement should be considered as zero cost. */
6012 if (cand->doloop_p)
6013 cost_step = 0;
6014 else
6015 cost_step = add_cost (data->speed, TYPE_MODE (TREE_TYPE (base)));
6016 cost = cost_step + adjust_setup_cost (data, cost_base.cost);
6018 /* Prefer the original ivs unless we may gain something by replacing it.
6019 The reason is to make debugging simpler; so this is not relevant for
6020 artificial ivs created by other optimization passes. */
6021 if ((cand->pos != IP_ORIGINAL
6022 || !SSA_NAME_VAR (cand->var_before)
6023 || DECL_ARTIFICIAL (SSA_NAME_VAR (cand->var_before)))
6024 /* Prefer doloop as well. */
6025 && !cand->doloop_p)
6026 cost++;
6028 /* Prefer not to insert statements into latch unless there are some
6029 already (so that we do not create unnecessary jumps). */
6030 if (cand->pos == IP_END
6031 && empty_block_p (ip_end_pos (data->current_loop)))
6032 cost++;
6034 cand->cost = cost;
6035 cand->cost_step = cost_step;
6038 /* Determines costs of computation of the candidates. */
6040 static void
6041 determine_iv_costs (struct ivopts_data *data)
6043 unsigned i;
6045 if (dump_file && (dump_flags & TDF_DETAILS))
6047 fprintf (dump_file, "<Candidate Costs>:\n");
6048 fprintf (dump_file, " cand\tcost\n");
6051 for (i = 0; i < data->vcands.length (); i++)
6053 struct iv_cand *cand = data->vcands[i];
6055 determine_iv_cost (data, cand);
6057 if (dump_file && (dump_flags & TDF_DETAILS))
6058 fprintf (dump_file, " %d\t%d\n", i, cand->cost);
6061 if (dump_file && (dump_flags & TDF_DETAILS))
6062 fprintf (dump_file, "\n");
6065 /* Estimate register pressure for loop having N_INVS invariants and N_CANDS
6066 induction variables. Note N_INVS includes both invariant variables and
6067 invariant expressions. */
6069 static unsigned
6070 ivopts_estimate_reg_pressure (struct ivopts_data *data, unsigned n_invs,
6071 unsigned n_cands)
6073 unsigned cost;
6074 unsigned n_old = data->regs_used, n_new = n_invs + n_cands;
6075 unsigned regs_needed = n_new + n_old, available_regs = target_avail_regs;
6076 bool speed = data->speed;
6078 /* If there is a call in the loop body, the call-clobbered registers
6079 are not available for loop invariants. */
6080 if (data->body_includes_call)
6081 available_regs = available_regs - target_clobbered_regs;
6083 /* If we have enough registers. */
6084 if (regs_needed + target_res_regs < available_regs)
6085 cost = n_new;
6086 /* If close to running out of registers, try to preserve them. */
6087 else if (regs_needed <= available_regs)
6088 cost = target_reg_cost [speed] * regs_needed;
6089 /* If we run out of available registers but the number of candidates
6090 does not, we penalize extra registers using target_spill_cost. */
6091 else if (n_cands <= available_regs)
6092 cost = target_reg_cost [speed] * available_regs
6093 + target_spill_cost [speed] * (regs_needed - available_regs);
6094 /* If the number of candidates runs out available registers, we penalize
6095 extra candidate registers using target_spill_cost * 2. Because it is
6096 more expensive to spill induction variable than invariant. */
6097 else
6098 cost = target_reg_cost [speed] * available_regs
6099 + target_spill_cost [speed] * (n_cands - available_regs) * 2
6100 + target_spill_cost [speed] * (regs_needed - n_cands);
6102 /* Finally, add the number of candidates, so that we prefer eliminating
6103 induction variables if possible. */
6104 return cost + n_cands;
6107 /* For each size of the induction variable set determine the penalty. */
6109 static void
6110 determine_set_costs (struct ivopts_data *data)
6112 unsigned j, n;
6113 gphi *phi;
6114 gphi_iterator psi;
6115 tree op;
6116 class loop *loop = data->current_loop;
6117 bitmap_iterator bi;
6119 if (dump_file && (dump_flags & TDF_DETAILS))
6121 fprintf (dump_file, "<Global Costs>:\n");
6122 fprintf (dump_file, " target_avail_regs %d\n", target_avail_regs);
6123 fprintf (dump_file, " target_clobbered_regs %d\n", target_clobbered_regs);
6124 fprintf (dump_file, " target_reg_cost %d\n", target_reg_cost[data->speed]);
6125 fprintf (dump_file, " target_spill_cost %d\n", target_spill_cost[data->speed]);
6128 n = 0;
6129 for (psi = gsi_start_phis (loop->header); !gsi_end_p (psi); gsi_next (&psi))
6131 phi = psi.phi ();
6132 op = PHI_RESULT (phi);
6134 if (virtual_operand_p (op))
6135 continue;
6137 if (get_iv (data, op))
6138 continue;
6140 if (!POINTER_TYPE_P (TREE_TYPE (op))
6141 && !INTEGRAL_TYPE_P (TREE_TYPE (op)))
6142 continue;
6144 n++;
6147 EXECUTE_IF_SET_IN_BITMAP (data->relevant, 0, j, bi)
6149 struct version_info *info = ver_info (data, j);
6151 if (info->inv_id && info->has_nonlin_use)
6152 n++;
6155 data->regs_used = n;
6156 if (dump_file && (dump_flags & TDF_DETAILS))
6157 fprintf (dump_file, " regs_used %d\n", n);
6159 if (dump_file && (dump_flags & TDF_DETAILS))
6161 fprintf (dump_file, " cost for size:\n");
6162 fprintf (dump_file, " ivs\tcost\n");
6163 for (j = 0; j <= 2 * target_avail_regs; j++)
6164 fprintf (dump_file, " %d\t%d\n", j,
6165 ivopts_estimate_reg_pressure (data, 0, j));
6166 fprintf (dump_file, "\n");
6170 /* Returns true if A is a cheaper cost pair than B. */
6172 static bool
6173 cheaper_cost_pair (class cost_pair *a, class cost_pair *b)
6175 if (!a)
6176 return false;
6178 if (!b)
6179 return true;
6181 if (a->cost < b->cost)
6182 return true;
6184 if (b->cost < a->cost)
6185 return false;
6187 /* In case the costs are the same, prefer the cheaper candidate. */
6188 if (a->cand->cost < b->cand->cost)
6189 return true;
6191 return false;
6194 /* Compare if A is a more expensive cost pair than B. Return 1, 0 and -1
6195 for more expensive, equal and cheaper respectively. */
6197 static int
6198 compare_cost_pair (class cost_pair *a, class cost_pair *b)
6200 if (cheaper_cost_pair (a, b))
6201 return -1;
6202 if (cheaper_cost_pair (b, a))
6203 return 1;
6205 return 0;
6208 /* Returns candidate by that USE is expressed in IVS. */
6210 static class cost_pair *
6211 iv_ca_cand_for_group (class iv_ca *ivs, struct iv_group *group)
6213 return ivs->cand_for_group[group->id];
6216 /* Computes the cost field of IVS structure. */
6218 static void
6219 iv_ca_recount_cost (struct ivopts_data *data, class iv_ca *ivs)
6221 comp_cost cost = ivs->cand_use_cost;
6223 cost += ivs->cand_cost;
6224 cost += ivopts_estimate_reg_pressure (data, ivs->n_invs, ivs->n_cands);
6225 ivs->cost = cost;
6228 /* Remove use of invariants in set INVS by decreasing counter in N_INV_USES
6229 and IVS. */
6231 static void
6232 iv_ca_set_remove_invs (class iv_ca *ivs, bitmap invs, unsigned *n_inv_uses)
6234 bitmap_iterator bi;
6235 unsigned iid;
6237 if (!invs)
6238 return;
6240 gcc_assert (n_inv_uses != NULL);
6241 EXECUTE_IF_SET_IN_BITMAP (invs, 0, iid, bi)
6243 n_inv_uses[iid]--;
6244 if (n_inv_uses[iid] == 0)
6245 ivs->n_invs--;
6249 /* Set USE not to be expressed by any candidate in IVS. */
6251 static void
6252 iv_ca_set_no_cp (struct ivopts_data *data, class iv_ca *ivs,
6253 struct iv_group *group)
6255 unsigned gid = group->id, cid;
6256 class cost_pair *cp;
6258 cp = ivs->cand_for_group[gid];
6259 if (!cp)
6260 return;
6261 cid = cp->cand->id;
6263 ivs->bad_groups++;
6264 ivs->cand_for_group[gid] = NULL;
6265 ivs->n_cand_uses[cid]--;
6267 if (ivs->n_cand_uses[cid] == 0)
6269 bitmap_clear_bit (ivs->cands, cid);
6270 if (!cp->cand->doloop_p || !targetm.have_count_reg_decr_p)
6271 ivs->n_cands--;
6272 ivs->cand_cost -= cp->cand->cost;
6273 iv_ca_set_remove_invs (ivs, cp->cand->inv_vars, ivs->n_inv_var_uses);
6274 iv_ca_set_remove_invs (ivs, cp->cand->inv_exprs, ivs->n_inv_expr_uses);
6277 ivs->cand_use_cost -= cp->cost;
6278 iv_ca_set_remove_invs (ivs, cp->inv_vars, ivs->n_inv_var_uses);
6279 iv_ca_set_remove_invs (ivs, cp->inv_exprs, ivs->n_inv_expr_uses);
6280 iv_ca_recount_cost (data, ivs);
6283 /* Add use of invariants in set INVS by increasing counter in N_INV_USES and
6284 IVS. */
6286 static void
6287 iv_ca_set_add_invs (class iv_ca *ivs, bitmap invs, unsigned *n_inv_uses)
6289 bitmap_iterator bi;
6290 unsigned iid;
6292 if (!invs)
6293 return;
6295 gcc_assert (n_inv_uses != NULL);
6296 EXECUTE_IF_SET_IN_BITMAP (invs, 0, iid, bi)
6298 n_inv_uses[iid]++;
6299 if (n_inv_uses[iid] == 1)
6300 ivs->n_invs++;
6304 /* Set cost pair for GROUP in set IVS to CP. */
6306 static void
6307 iv_ca_set_cp (struct ivopts_data *data, class iv_ca *ivs,
6308 struct iv_group *group, class cost_pair *cp)
6310 unsigned gid = group->id, cid;
6312 if (ivs->cand_for_group[gid] == cp)
6313 return;
6315 if (ivs->cand_for_group[gid])
6316 iv_ca_set_no_cp (data, ivs, group);
6318 if (cp)
6320 cid = cp->cand->id;
6322 ivs->bad_groups--;
6323 ivs->cand_for_group[gid] = cp;
6324 ivs->n_cand_uses[cid]++;
6325 if (ivs->n_cand_uses[cid] == 1)
6327 bitmap_set_bit (ivs->cands, cid);
6328 if (!cp->cand->doloop_p || !targetm.have_count_reg_decr_p)
6329 ivs->n_cands++;
6330 ivs->cand_cost += cp->cand->cost;
6331 iv_ca_set_add_invs (ivs, cp->cand->inv_vars, ivs->n_inv_var_uses);
6332 iv_ca_set_add_invs (ivs, cp->cand->inv_exprs, ivs->n_inv_expr_uses);
6335 ivs->cand_use_cost += cp->cost;
6336 iv_ca_set_add_invs (ivs, cp->inv_vars, ivs->n_inv_var_uses);
6337 iv_ca_set_add_invs (ivs, cp->inv_exprs, ivs->n_inv_expr_uses);
6338 iv_ca_recount_cost (data, ivs);
6342 /* Extend set IVS by expressing USE by some of the candidates in it
6343 if possible. Consider all important candidates if candidates in
6344 set IVS don't give any result. */
6346 static void
6347 iv_ca_add_group (struct ivopts_data *data, class iv_ca *ivs,
6348 struct iv_group *group)
6350 class cost_pair *best_cp = NULL, *cp;
6351 bitmap_iterator bi;
6352 unsigned i;
6353 struct iv_cand *cand;
6355 gcc_assert (ivs->upto >= group->id);
6356 ivs->upto++;
6357 ivs->bad_groups++;
6359 EXECUTE_IF_SET_IN_BITMAP (ivs->cands, 0, i, bi)
6361 cand = data->vcands[i];
6362 cp = get_group_iv_cost (data, group, cand);
6363 if (cheaper_cost_pair (cp, best_cp))
6364 best_cp = cp;
6367 if (best_cp == NULL)
6369 EXECUTE_IF_SET_IN_BITMAP (data->important_candidates, 0, i, bi)
6371 cand = data->vcands[i];
6372 cp = get_group_iv_cost (data, group, cand);
6373 if (cheaper_cost_pair (cp, best_cp))
6374 best_cp = cp;
6378 iv_ca_set_cp (data, ivs, group, best_cp);
6381 /* Get cost for assignment IVS. */
6383 static comp_cost
6384 iv_ca_cost (class iv_ca *ivs)
6386 /* This was a conditional expression but it triggered a bug in
6387 Sun C 5.5. */
6388 if (ivs->bad_groups)
6389 return infinite_cost;
6390 else
6391 return ivs->cost;
6394 /* Compare if applying NEW_CP to GROUP for IVS introduces more invariants
6395 than OLD_CP. Return 1, 0 and -1 for more, equal and fewer invariants
6396 respectively. */
6398 static int
6399 iv_ca_compare_deps (struct ivopts_data *data, class iv_ca *ivs,
6400 struct iv_group *group, class cost_pair *old_cp,
6401 class cost_pair *new_cp)
6403 gcc_assert (old_cp && new_cp && old_cp != new_cp);
6404 unsigned old_n_invs = ivs->n_invs;
6405 iv_ca_set_cp (data, ivs, group, new_cp);
6406 unsigned new_n_invs = ivs->n_invs;
6407 iv_ca_set_cp (data, ivs, group, old_cp);
6409 return new_n_invs > old_n_invs ? 1 : (new_n_invs < old_n_invs ? -1 : 0);
6412 /* Creates change of expressing GROUP by NEW_CP instead of OLD_CP and chains
6413 it before NEXT. */
6415 static struct iv_ca_delta *
6416 iv_ca_delta_add (struct iv_group *group, class cost_pair *old_cp,
6417 class cost_pair *new_cp, struct iv_ca_delta *next)
6419 struct iv_ca_delta *change = XNEW (struct iv_ca_delta);
6421 change->group = group;
6422 change->old_cp = old_cp;
6423 change->new_cp = new_cp;
6424 change->next = next;
6426 return change;
6429 /* Joins two lists of changes L1 and L2. Destructive -- old lists
6430 are rewritten. */
6432 static struct iv_ca_delta *
6433 iv_ca_delta_join (struct iv_ca_delta *l1, struct iv_ca_delta *l2)
6435 struct iv_ca_delta *last;
6437 if (!l2)
6438 return l1;
6440 if (!l1)
6441 return l2;
6443 for (last = l1; last->next; last = last->next)
6444 continue;
6445 last->next = l2;
6447 return l1;
6450 /* Reverse the list of changes DELTA, forming the inverse to it. */
6452 static struct iv_ca_delta *
6453 iv_ca_delta_reverse (struct iv_ca_delta *delta)
6455 struct iv_ca_delta *act, *next, *prev = NULL;
6457 for (act = delta; act; act = next)
6459 next = act->next;
6460 act->next = prev;
6461 prev = act;
6463 std::swap (act->old_cp, act->new_cp);
6466 return prev;
6469 /* Commit changes in DELTA to IVS. If FORWARD is false, the changes are
6470 reverted instead. */
6472 static void
6473 iv_ca_delta_commit (struct ivopts_data *data, class iv_ca *ivs,
6474 struct iv_ca_delta *delta, bool forward)
6476 class cost_pair *from, *to;
6477 struct iv_ca_delta *act;
6479 if (!forward)
6480 delta = iv_ca_delta_reverse (delta);
6482 for (act = delta; act; act = act->next)
6484 from = act->old_cp;
6485 to = act->new_cp;
6486 gcc_assert (iv_ca_cand_for_group (ivs, act->group) == from);
6487 iv_ca_set_cp (data, ivs, act->group, to);
6490 if (!forward)
6491 iv_ca_delta_reverse (delta);
6494 /* Returns true if CAND is used in IVS. */
6496 static bool
6497 iv_ca_cand_used_p (class iv_ca *ivs, struct iv_cand *cand)
6499 return ivs->n_cand_uses[cand->id] > 0;
6502 /* Returns number of induction variable candidates in the set IVS. */
6504 static unsigned
6505 iv_ca_n_cands (class iv_ca *ivs)
6507 return ivs->n_cands;
6510 /* Free the list of changes DELTA. */
6512 static void
6513 iv_ca_delta_free (struct iv_ca_delta **delta)
6515 struct iv_ca_delta *act, *next;
6517 for (act = *delta; act; act = next)
6519 next = act->next;
6520 free (act);
6523 *delta = NULL;
6526 /* Allocates new iv candidates assignment. */
6528 static class iv_ca *
6529 iv_ca_new (struct ivopts_data *data)
6531 class iv_ca *nw = XNEW (class iv_ca);
6533 nw->upto = 0;
6534 nw->bad_groups = 0;
6535 nw->cand_for_group = XCNEWVEC (class cost_pair *,
6536 data->vgroups.length ());
6537 nw->n_cand_uses = XCNEWVEC (unsigned, data->vcands.length ());
6538 nw->cands = BITMAP_ALLOC (NULL);
6539 nw->n_cands = 0;
6540 nw->n_invs = 0;
6541 nw->cand_use_cost = no_cost;
6542 nw->cand_cost = 0;
6543 nw->n_inv_var_uses = XCNEWVEC (unsigned, data->max_inv_var_id + 1);
6544 nw->n_inv_expr_uses = XCNEWVEC (unsigned, data->max_inv_expr_id + 1);
6545 nw->cost = no_cost;
6547 return nw;
6550 /* Free memory occupied by the set IVS. */
6552 static void
6553 iv_ca_free (class iv_ca **ivs)
6555 free ((*ivs)->cand_for_group);
6556 free ((*ivs)->n_cand_uses);
6557 BITMAP_FREE ((*ivs)->cands);
6558 free ((*ivs)->n_inv_var_uses);
6559 free ((*ivs)->n_inv_expr_uses);
6560 free (*ivs);
6561 *ivs = NULL;
6564 /* Dumps IVS to FILE. */
6566 static void
6567 iv_ca_dump (struct ivopts_data *data, FILE *file, class iv_ca *ivs)
6569 unsigned i;
6570 comp_cost cost = iv_ca_cost (ivs);
6572 fprintf (file, " cost: %" PRId64 " (complexity %d)\n", cost.cost,
6573 cost.complexity);
6574 fprintf (file, " reg_cost: %d\n",
6575 ivopts_estimate_reg_pressure (data, ivs->n_invs, ivs->n_cands));
6576 fprintf (file, " cand_cost: %" PRId64 "\n cand_group_cost: "
6577 "%" PRId64 " (complexity %d)\n", ivs->cand_cost,
6578 ivs->cand_use_cost.cost, ivs->cand_use_cost.complexity);
6579 bitmap_print (file, ivs->cands, " candidates: ","\n");
6581 for (i = 0; i < ivs->upto; i++)
6583 struct iv_group *group = data->vgroups[i];
6584 class cost_pair *cp = iv_ca_cand_for_group (ivs, group);
6585 if (cp)
6586 fprintf (file, " group:%d --> iv_cand:%d, cost=("
6587 "%" PRId64 ",%d)\n", group->id, cp->cand->id,
6588 cp->cost.cost, cp->cost.complexity);
6589 else
6590 fprintf (file, " group:%d --> ??\n", group->id);
6593 const char *pref = "";
6594 fprintf (file, " invariant variables: ");
6595 for (i = 1; i <= data->max_inv_var_id; i++)
6596 if (ivs->n_inv_var_uses[i])
6598 fprintf (file, "%s%d", pref, i);
6599 pref = ", ";
6602 pref = "";
6603 fprintf (file, "\n invariant expressions: ");
6604 for (i = 1; i <= data->max_inv_expr_id; i++)
6605 if (ivs->n_inv_expr_uses[i])
6607 fprintf (file, "%s%d", pref, i);
6608 pref = ", ";
6611 fprintf (file, "\n\n");
6614 /* Try changing candidate in IVS to CAND for each use. Return cost of the
6615 new set, and store differences in DELTA. Number of induction variables
6616 in the new set is stored to N_IVS. MIN_NCAND is a flag. When it is true
6617 the function will try to find a solution with mimimal iv candidates. */
6619 static comp_cost
6620 iv_ca_extend (struct ivopts_data *data, class iv_ca *ivs,
6621 struct iv_cand *cand, struct iv_ca_delta **delta,
6622 unsigned *n_ivs, bool min_ncand)
6624 unsigned i;
6625 comp_cost cost;
6626 struct iv_group *group;
6627 class cost_pair *old_cp, *new_cp;
6629 *delta = NULL;
6630 for (i = 0; i < ivs->upto; i++)
6632 group = data->vgroups[i];
6633 old_cp = iv_ca_cand_for_group (ivs, group);
6635 if (old_cp
6636 && old_cp->cand == cand)
6637 continue;
6639 new_cp = get_group_iv_cost (data, group, cand);
6640 if (!new_cp)
6641 continue;
6643 if (!min_ncand)
6645 int cmp_invs = iv_ca_compare_deps (data, ivs, group, old_cp, new_cp);
6646 /* Skip if new_cp depends on more invariants. */
6647 if (cmp_invs > 0)
6648 continue;
6650 int cmp_cost = compare_cost_pair (new_cp, old_cp);
6651 /* Skip if new_cp is not cheaper. */
6652 if (cmp_cost > 0 || (cmp_cost == 0 && cmp_invs == 0))
6653 continue;
6656 *delta = iv_ca_delta_add (group, old_cp, new_cp, *delta);
6659 iv_ca_delta_commit (data, ivs, *delta, true);
6660 cost = iv_ca_cost (ivs);
6661 if (n_ivs)
6662 *n_ivs = iv_ca_n_cands (ivs);
6663 iv_ca_delta_commit (data, ivs, *delta, false);
6665 return cost;
6668 /* Try narrowing set IVS by removing CAND. Return the cost of
6669 the new set and store the differences in DELTA. START is
6670 the candidate with which we start narrowing. */
6672 static comp_cost
6673 iv_ca_narrow (struct ivopts_data *data, class iv_ca *ivs,
6674 struct iv_cand *cand, struct iv_cand *start,
6675 struct iv_ca_delta **delta)
6677 unsigned i, ci;
6678 struct iv_group *group;
6679 class cost_pair *old_cp, *new_cp, *cp;
6680 bitmap_iterator bi;
6681 struct iv_cand *cnd;
6682 comp_cost cost, best_cost, acost;
6684 *delta = NULL;
6685 for (i = 0; i < data->vgroups.length (); i++)
6687 group = data->vgroups[i];
6689 old_cp = iv_ca_cand_for_group (ivs, group);
6690 if (old_cp->cand != cand)
6691 continue;
6693 best_cost = iv_ca_cost (ivs);
6694 /* Start narrowing with START. */
6695 new_cp = get_group_iv_cost (data, group, start);
6697 if (data->consider_all_candidates)
6699 EXECUTE_IF_SET_IN_BITMAP (ivs->cands, 0, ci, bi)
6701 if (ci == cand->id || (start && ci == start->id))
6702 continue;
6704 cnd = data->vcands[ci];
6706 cp = get_group_iv_cost (data, group, cnd);
6707 if (!cp)
6708 continue;
6710 iv_ca_set_cp (data, ivs, group, cp);
6711 acost = iv_ca_cost (ivs);
6713 if (acost < best_cost)
6715 best_cost = acost;
6716 new_cp = cp;
6720 else
6722 EXECUTE_IF_AND_IN_BITMAP (group->related_cands, ivs->cands, 0, ci, bi)
6724 if (ci == cand->id || (start && ci == start->id))
6725 continue;
6727 cnd = data->vcands[ci];
6729 cp = get_group_iv_cost (data, group, cnd);
6730 if (!cp)
6731 continue;
6733 iv_ca_set_cp (data, ivs, group, cp);
6734 acost = iv_ca_cost (ivs);
6736 if (acost < best_cost)
6738 best_cost = acost;
6739 new_cp = cp;
6743 /* Restore to old cp for use. */
6744 iv_ca_set_cp (data, ivs, group, old_cp);
6746 if (!new_cp)
6748 iv_ca_delta_free (delta);
6749 return infinite_cost;
6752 *delta = iv_ca_delta_add (group, old_cp, new_cp, *delta);
6755 iv_ca_delta_commit (data, ivs, *delta, true);
6756 cost = iv_ca_cost (ivs);
6757 iv_ca_delta_commit (data, ivs, *delta, false);
6759 return cost;
6762 /* Try optimizing the set of candidates IVS by removing candidates different
6763 from to EXCEPT_CAND from it. Return cost of the new set, and store
6764 differences in DELTA. */
6766 static comp_cost
6767 iv_ca_prune (struct ivopts_data *data, class iv_ca *ivs,
6768 struct iv_cand *except_cand, struct iv_ca_delta **delta)
6770 bitmap_iterator bi;
6771 struct iv_ca_delta *act_delta, *best_delta;
6772 unsigned i;
6773 comp_cost best_cost, acost;
6774 struct iv_cand *cand;
6776 best_delta = NULL;
6777 best_cost = iv_ca_cost (ivs);
6779 EXECUTE_IF_SET_IN_BITMAP (ivs->cands, 0, i, bi)
6781 cand = data->vcands[i];
6783 if (cand == except_cand)
6784 continue;
6786 acost = iv_ca_narrow (data, ivs, cand, except_cand, &act_delta);
6788 if (acost < best_cost)
6790 best_cost = acost;
6791 iv_ca_delta_free (&best_delta);
6792 best_delta = act_delta;
6794 else
6795 iv_ca_delta_free (&act_delta);
6798 if (!best_delta)
6800 *delta = NULL;
6801 return best_cost;
6804 /* Recurse to possibly remove other unnecessary ivs. */
6805 iv_ca_delta_commit (data, ivs, best_delta, true);
6806 best_cost = iv_ca_prune (data, ivs, except_cand, delta);
6807 iv_ca_delta_commit (data, ivs, best_delta, false);
6808 *delta = iv_ca_delta_join (best_delta, *delta);
6809 return best_cost;
6812 /* Check if CAND_IDX is a candidate other than OLD_CAND and has
6813 cheaper local cost for GROUP than BEST_CP. Return pointer to
6814 the corresponding cost_pair, otherwise just return BEST_CP. */
6816 static class cost_pair*
6817 cheaper_cost_with_cand (struct ivopts_data *data, struct iv_group *group,
6818 unsigned int cand_idx, struct iv_cand *old_cand,
6819 class cost_pair *best_cp)
6821 struct iv_cand *cand;
6822 class cost_pair *cp;
6824 gcc_assert (old_cand != NULL && best_cp != NULL);
6825 if (cand_idx == old_cand->id)
6826 return best_cp;
6828 cand = data->vcands[cand_idx];
6829 cp = get_group_iv_cost (data, group, cand);
6830 if (cp != NULL && cheaper_cost_pair (cp, best_cp))
6831 return cp;
6833 return best_cp;
6836 /* Try breaking local optimal fixed-point for IVS by replacing candidates
6837 which are used by more than one iv uses. For each of those candidates,
6838 this function tries to represent iv uses under that candidate using
6839 other ones with lower local cost, then tries to prune the new set.
6840 If the new set has lower cost, It returns the new cost after recording
6841 candidate replacement in list DELTA. */
6843 static comp_cost
6844 iv_ca_replace (struct ivopts_data *data, class iv_ca *ivs,
6845 struct iv_ca_delta **delta)
6847 bitmap_iterator bi, bj;
6848 unsigned int i, j, k;
6849 struct iv_cand *cand;
6850 comp_cost orig_cost, acost;
6851 struct iv_ca_delta *act_delta, *tmp_delta;
6852 class cost_pair *old_cp, *best_cp = NULL;
6854 *delta = NULL;
6855 orig_cost = iv_ca_cost (ivs);
6857 EXECUTE_IF_SET_IN_BITMAP (ivs->cands, 0, i, bi)
6859 if (ivs->n_cand_uses[i] == 1
6860 || ivs->n_cand_uses[i] > ALWAYS_PRUNE_CAND_SET_BOUND)
6861 continue;
6863 cand = data->vcands[i];
6865 act_delta = NULL;
6866 /* Represent uses under current candidate using other ones with
6867 lower local cost. */
6868 for (j = 0; j < ivs->upto; j++)
6870 struct iv_group *group = data->vgroups[j];
6871 old_cp = iv_ca_cand_for_group (ivs, group);
6873 if (old_cp->cand != cand)
6874 continue;
6876 best_cp = old_cp;
6877 if (data->consider_all_candidates)
6878 for (k = 0; k < data->vcands.length (); k++)
6879 best_cp = cheaper_cost_with_cand (data, group, k,
6880 old_cp->cand, best_cp);
6881 else
6882 EXECUTE_IF_SET_IN_BITMAP (group->related_cands, 0, k, bj)
6883 best_cp = cheaper_cost_with_cand (data, group, k,
6884 old_cp->cand, best_cp);
6886 if (best_cp == old_cp)
6887 continue;
6889 act_delta = iv_ca_delta_add (group, old_cp, best_cp, act_delta);
6891 /* No need for further prune. */
6892 if (!act_delta)
6893 continue;
6895 /* Prune the new candidate set. */
6896 iv_ca_delta_commit (data, ivs, act_delta, true);
6897 acost = iv_ca_prune (data, ivs, NULL, &tmp_delta);
6898 iv_ca_delta_commit (data, ivs, act_delta, false);
6899 act_delta = iv_ca_delta_join (act_delta, tmp_delta);
6901 if (acost < orig_cost)
6903 *delta = act_delta;
6904 return acost;
6906 else
6907 iv_ca_delta_free (&act_delta);
6910 return orig_cost;
6913 /* Tries to extend the sets IVS in the best possible way in order to
6914 express the GROUP. If ORIGINALP is true, prefer candidates from
6915 the original set of IVs, otherwise favor important candidates not
6916 based on any memory object. */
6918 static bool
6919 try_add_cand_for (struct ivopts_data *data, class iv_ca *ivs,
6920 struct iv_group *group, bool originalp)
6922 comp_cost best_cost, act_cost;
6923 unsigned i;
6924 bitmap_iterator bi;
6925 struct iv_cand *cand;
6926 struct iv_ca_delta *best_delta = NULL, *act_delta;
6927 class cost_pair *cp;
6929 iv_ca_add_group (data, ivs, group);
6930 best_cost = iv_ca_cost (ivs);
6931 cp = iv_ca_cand_for_group (ivs, group);
6932 if (cp)
6934 best_delta = iv_ca_delta_add (group, NULL, cp, NULL);
6935 iv_ca_set_no_cp (data, ivs, group);
6938 /* If ORIGINALP is true, try to find the original IV for the use. Otherwise
6939 first try important candidates not based on any memory object. Only if
6940 this fails, try the specific ones. Rationale -- in loops with many
6941 variables the best choice often is to use just one generic biv. If we
6942 added here many ivs specific to the uses, the optimization algorithm later
6943 would be likely to get stuck in a local minimum, thus causing us to create
6944 too many ivs. The approach from few ivs to more seems more likely to be
6945 successful -- starting from few ivs, replacing an expensive use by a
6946 specific iv should always be a win. */
6947 EXECUTE_IF_SET_IN_BITMAP (group->related_cands, 0, i, bi)
6949 cand = data->vcands[i];
6951 if (originalp && cand->pos !=IP_ORIGINAL)
6952 continue;
6954 if (!originalp && cand->iv->base_object != NULL_TREE)
6955 continue;
6957 if (iv_ca_cand_used_p (ivs, cand))
6958 continue;
6960 cp = get_group_iv_cost (data, group, cand);
6961 if (!cp)
6962 continue;
6964 iv_ca_set_cp (data, ivs, group, cp);
6965 act_cost = iv_ca_extend (data, ivs, cand, &act_delta, NULL,
6966 true);
6967 iv_ca_set_no_cp (data, ivs, group);
6968 act_delta = iv_ca_delta_add (group, NULL, cp, act_delta);
6970 if (act_cost < best_cost)
6972 best_cost = act_cost;
6974 iv_ca_delta_free (&best_delta);
6975 best_delta = act_delta;
6977 else
6978 iv_ca_delta_free (&act_delta);
6981 if (best_cost.infinite_cost_p ())
6983 for (i = 0; i < group->n_map_members; i++)
6985 cp = group->cost_map + i;
6986 cand = cp->cand;
6987 if (!cand)
6988 continue;
6990 /* Already tried this. */
6991 if (cand->important)
6993 if (originalp && cand->pos == IP_ORIGINAL)
6994 continue;
6995 if (!originalp && cand->iv->base_object == NULL_TREE)
6996 continue;
6999 if (iv_ca_cand_used_p (ivs, cand))
7000 continue;
7002 act_delta = NULL;
7003 iv_ca_set_cp (data, ivs, group, cp);
7004 act_cost = iv_ca_extend (data, ivs, cand, &act_delta, NULL, true);
7005 iv_ca_set_no_cp (data, ivs, group);
7006 act_delta = iv_ca_delta_add (group,
7007 iv_ca_cand_for_group (ivs, group),
7008 cp, act_delta);
7010 if (act_cost < best_cost)
7012 best_cost = act_cost;
7014 if (best_delta)
7015 iv_ca_delta_free (&best_delta);
7016 best_delta = act_delta;
7018 else
7019 iv_ca_delta_free (&act_delta);
7023 iv_ca_delta_commit (data, ivs, best_delta, true);
7024 iv_ca_delta_free (&best_delta);
7026 return !best_cost.infinite_cost_p ();
7029 /* Finds an initial assignment of candidates to uses. */
7031 static class iv_ca *
7032 get_initial_solution (struct ivopts_data *data, bool originalp)
7034 unsigned i;
7035 class iv_ca *ivs = iv_ca_new (data);
7037 for (i = 0; i < data->vgroups.length (); i++)
7038 if (!try_add_cand_for (data, ivs, data->vgroups[i], originalp))
7040 iv_ca_free (&ivs);
7041 return NULL;
7044 return ivs;
7047 /* Tries to improve set of induction variables IVS. TRY_REPLACE_P
7048 points to a bool variable, this function tries to break local
7049 optimal fixed-point by replacing candidates in IVS if it's true. */
7051 static bool
7052 try_improve_iv_set (struct ivopts_data *data,
7053 class iv_ca *ivs, bool *try_replace_p)
7055 unsigned i, n_ivs;
7056 comp_cost acost, best_cost = iv_ca_cost (ivs);
7057 struct iv_ca_delta *best_delta = NULL, *act_delta, *tmp_delta;
7058 struct iv_cand *cand;
7060 /* Try extending the set of induction variables by one. */
7061 for (i = 0; i < data->vcands.length (); i++)
7063 cand = data->vcands[i];
7065 if (iv_ca_cand_used_p (ivs, cand))
7066 continue;
7068 acost = iv_ca_extend (data, ivs, cand, &act_delta, &n_ivs, false);
7069 if (!act_delta)
7070 continue;
7072 /* If we successfully added the candidate and the set is small enough,
7073 try optimizing it by removing other candidates. */
7074 if (n_ivs <= ALWAYS_PRUNE_CAND_SET_BOUND)
7076 iv_ca_delta_commit (data, ivs, act_delta, true);
7077 acost = iv_ca_prune (data, ivs, cand, &tmp_delta);
7078 iv_ca_delta_commit (data, ivs, act_delta, false);
7079 act_delta = iv_ca_delta_join (act_delta, tmp_delta);
7082 if (acost < best_cost)
7084 best_cost = acost;
7085 iv_ca_delta_free (&best_delta);
7086 best_delta = act_delta;
7088 else
7089 iv_ca_delta_free (&act_delta);
7092 if (!best_delta)
7094 /* Try removing the candidates from the set instead. */
7095 best_cost = iv_ca_prune (data, ivs, NULL, &best_delta);
7097 if (!best_delta && *try_replace_p)
7099 *try_replace_p = false;
7100 /* So far candidate selecting algorithm tends to choose fewer IVs
7101 so that it can handle cases in which loops have many variables
7102 but the best choice is often to use only one general biv. One
7103 weakness is it can't handle opposite cases, in which different
7104 candidates should be chosen with respect to each use. To solve
7105 the problem, we replace candidates in a manner described by the
7106 comments of iv_ca_replace, thus give general algorithm a chance
7107 to break local optimal fixed-point in these cases. */
7108 best_cost = iv_ca_replace (data, ivs, &best_delta);
7111 if (!best_delta)
7112 return false;
7115 iv_ca_delta_commit (data, ivs, best_delta, true);
7116 iv_ca_delta_free (&best_delta);
7117 return best_cost == iv_ca_cost (ivs);
7120 /* Attempts to find the optimal set of induction variables. We do simple
7121 greedy heuristic -- we try to replace at most one candidate in the selected
7122 solution and remove the unused ivs while this improves the cost. */
7124 static class iv_ca *
7125 find_optimal_iv_set_1 (struct ivopts_data *data, bool originalp)
7127 class iv_ca *set;
7128 bool try_replace_p = true;
7130 /* Get the initial solution. */
7131 set = get_initial_solution (data, originalp);
7132 if (!set)
7134 if (dump_file && (dump_flags & TDF_DETAILS))
7135 fprintf (dump_file, "Unable to substitute for ivs, failed.\n");
7136 return NULL;
7139 if (dump_file && (dump_flags & TDF_DETAILS))
7141 fprintf (dump_file, "Initial set of candidates:\n");
7142 iv_ca_dump (data, dump_file, set);
7145 while (try_improve_iv_set (data, set, &try_replace_p))
7147 if (dump_file && (dump_flags & TDF_DETAILS))
7149 fprintf (dump_file, "Improved to:\n");
7150 iv_ca_dump (data, dump_file, set);
7154 /* If the set has infinite_cost, it can't be optimal. */
7155 if (iv_ca_cost (set).infinite_cost_p ())
7157 if (dump_file && (dump_flags & TDF_DETAILS))
7158 fprintf (dump_file,
7159 "Overflow to infinite cost in try_improve_iv_set.\n");
7160 iv_ca_free (&set);
7162 return set;
7165 static class iv_ca *
7166 find_optimal_iv_set (struct ivopts_data *data)
7168 unsigned i;
7169 comp_cost cost, origcost;
7170 class iv_ca *set, *origset;
7172 /* Determine the cost based on a strategy that starts with original IVs,
7173 and try again using a strategy that prefers candidates not based
7174 on any IVs. */
7175 origset = find_optimal_iv_set_1 (data, true);
7176 set = find_optimal_iv_set_1 (data, false);
7178 if (!origset && !set)
7179 return NULL;
7181 origcost = origset ? iv_ca_cost (origset) : infinite_cost;
7182 cost = set ? iv_ca_cost (set) : infinite_cost;
7184 if (dump_file && (dump_flags & TDF_DETAILS))
7186 fprintf (dump_file, "Original cost %" PRId64 " (complexity %d)\n\n",
7187 origcost.cost, origcost.complexity);
7188 fprintf (dump_file, "Final cost %" PRId64 " (complexity %d)\n\n",
7189 cost.cost, cost.complexity);
7192 /* Choose the one with the best cost. */
7193 if (origcost <= cost)
7195 if (set)
7196 iv_ca_free (&set);
7197 set = origset;
7199 else if (origset)
7200 iv_ca_free (&origset);
7202 for (i = 0; i < data->vgroups.length (); i++)
7204 struct iv_group *group = data->vgroups[i];
7205 group->selected = iv_ca_cand_for_group (set, group)->cand;
7208 return set;
7211 /* Creates a new induction variable corresponding to CAND. */
7213 static void
7214 create_new_iv (struct ivopts_data *data, struct iv_cand *cand)
7216 gimple_stmt_iterator incr_pos;
7217 tree base;
7218 struct iv_use *use;
7219 struct iv_group *group;
7220 bool after = false;
7222 gcc_assert (cand->iv != NULL);
7224 switch (cand->pos)
7226 case IP_NORMAL:
7227 incr_pos = gsi_last_bb (ip_normal_pos (data->current_loop));
7228 break;
7230 case IP_END:
7231 incr_pos = gsi_last_bb (ip_end_pos (data->current_loop));
7232 after = true;
7233 break;
7235 case IP_AFTER_USE:
7236 after = true;
7237 /* fall through */
7238 case IP_BEFORE_USE:
7239 incr_pos = gsi_for_stmt (cand->incremented_at);
7240 break;
7242 case IP_ORIGINAL:
7243 /* Mark that the iv is preserved. */
7244 name_info (data, cand->var_before)->preserve_biv = true;
7245 name_info (data, cand->var_after)->preserve_biv = true;
7247 /* Rewrite the increment so that it uses var_before directly. */
7248 use = find_interesting_uses_op (data, cand->var_after);
7249 group = data->vgroups[use->group_id];
7250 group->selected = cand;
7251 return;
7254 gimple_add_tmp_var (cand->var_before);
7256 base = unshare_expr (cand->iv->base);
7258 create_iv (base, unshare_expr (cand->iv->step),
7259 cand->var_before, data->current_loop,
7260 &incr_pos, after, &cand->var_before, &cand->var_after);
7263 /* Creates new induction variables described in SET. */
7265 static void
7266 create_new_ivs (struct ivopts_data *data, class iv_ca *set)
7268 unsigned i;
7269 struct iv_cand *cand;
7270 bitmap_iterator bi;
7272 EXECUTE_IF_SET_IN_BITMAP (set->cands, 0, i, bi)
7274 cand = data->vcands[i];
7275 create_new_iv (data, cand);
7278 if (dump_file && (dump_flags & TDF_DETAILS))
7280 fprintf (dump_file, "Selected IV set for loop %d",
7281 data->current_loop->num);
7282 if (data->loop_loc != UNKNOWN_LOCATION)
7283 fprintf (dump_file, " at %s:%d", LOCATION_FILE (data->loop_loc),
7284 LOCATION_LINE (data->loop_loc));
7285 fprintf (dump_file, ", " HOST_WIDE_INT_PRINT_DEC " avg niters",
7286 avg_loop_niter (data->current_loop));
7287 fprintf (dump_file, ", %lu IVs:\n", bitmap_count_bits (set->cands));
7288 EXECUTE_IF_SET_IN_BITMAP (set->cands, 0, i, bi)
7290 cand = data->vcands[i];
7291 dump_cand (dump_file, cand);
7293 fprintf (dump_file, "\n");
7297 /* Rewrites USE (definition of iv used in a nonlinear expression)
7298 using candidate CAND. */
7300 static void
7301 rewrite_use_nonlinear_expr (struct ivopts_data *data,
7302 struct iv_use *use, struct iv_cand *cand)
7304 gassign *ass;
7305 gimple_stmt_iterator bsi;
7306 tree comp, type = get_use_type (use), tgt;
7308 /* An important special case -- if we are asked to express value of
7309 the original iv by itself, just exit; there is no need to
7310 introduce a new computation (that might also need casting the
7311 variable to unsigned and back). */
7312 if (cand->pos == IP_ORIGINAL
7313 && cand->incremented_at == use->stmt)
7315 tree op = NULL_TREE;
7316 enum tree_code stmt_code;
7318 gcc_assert (is_gimple_assign (use->stmt));
7319 gcc_assert (gimple_assign_lhs (use->stmt) == cand->var_after);
7321 /* Check whether we may leave the computation unchanged.
7322 This is the case only if it does not rely on other
7323 computations in the loop -- otherwise, the computation
7324 we rely upon may be removed in remove_unused_ivs,
7325 thus leading to ICE. */
7326 stmt_code = gimple_assign_rhs_code (use->stmt);
7327 if (stmt_code == PLUS_EXPR
7328 || stmt_code == MINUS_EXPR
7329 || stmt_code == POINTER_PLUS_EXPR)
7331 if (gimple_assign_rhs1 (use->stmt) == cand->var_before)
7332 op = gimple_assign_rhs2 (use->stmt);
7333 else if (gimple_assign_rhs2 (use->stmt) == cand->var_before)
7334 op = gimple_assign_rhs1 (use->stmt);
7337 if (op != NULL_TREE)
7339 if (expr_invariant_in_loop_p (data->current_loop, op))
7340 return;
7341 if (TREE_CODE (op) == SSA_NAME)
7343 struct iv *iv = get_iv (data, op);
7344 if (iv != NULL && integer_zerop (iv->step))
7345 return;
7350 switch (gimple_code (use->stmt))
7352 case GIMPLE_PHI:
7353 tgt = PHI_RESULT (use->stmt);
7355 /* If we should keep the biv, do not replace it. */
7356 if (name_info (data, tgt)->preserve_biv)
7357 return;
7359 bsi = gsi_after_labels (gimple_bb (use->stmt));
7360 break;
7362 case GIMPLE_ASSIGN:
7363 tgt = gimple_assign_lhs (use->stmt);
7364 bsi = gsi_for_stmt (use->stmt);
7365 break;
7367 default:
7368 gcc_unreachable ();
7371 aff_tree aff_inv, aff_var;
7372 if (!get_computation_aff_1 (data->current_loop, use->stmt,
7373 use, cand, &aff_inv, &aff_var))
7374 gcc_unreachable ();
7376 unshare_aff_combination (&aff_inv);
7377 unshare_aff_combination (&aff_var);
7378 /* Prefer CSE opportunity than loop invariant by adding offset at last
7379 so that iv_uses have different offsets can be CSEed. */
7380 poly_widest_int offset = aff_inv.offset;
7381 aff_inv.offset = 0;
7383 gimple_seq stmt_list = NULL, seq = NULL;
7384 tree comp_op1 = aff_combination_to_tree (&aff_inv);
7385 tree comp_op2 = aff_combination_to_tree (&aff_var);
7386 gcc_assert (comp_op1 && comp_op2);
7388 comp_op1 = force_gimple_operand (comp_op1, &seq, true, NULL);
7389 gimple_seq_add_seq (&stmt_list, seq);
7390 comp_op2 = force_gimple_operand (comp_op2, &seq, true, NULL);
7391 gimple_seq_add_seq (&stmt_list, seq);
7393 if (POINTER_TYPE_P (TREE_TYPE (comp_op2)))
7394 std::swap (comp_op1, comp_op2);
7396 if (POINTER_TYPE_P (TREE_TYPE (comp_op1)))
7398 comp = fold_build_pointer_plus (comp_op1,
7399 fold_convert (sizetype, comp_op2));
7400 comp = fold_build_pointer_plus (comp,
7401 wide_int_to_tree (sizetype, offset));
7403 else
7405 comp = fold_build2 (PLUS_EXPR, TREE_TYPE (comp_op1), comp_op1,
7406 fold_convert (TREE_TYPE (comp_op1), comp_op2));
7407 comp = fold_build2 (PLUS_EXPR, TREE_TYPE (comp_op1), comp,
7408 wide_int_to_tree (TREE_TYPE (comp_op1), offset));
7411 comp = fold_convert (type, comp);
7412 comp = force_gimple_operand (comp, &seq, false, NULL);
7413 gimple_seq_add_seq (&stmt_list, seq);
7414 if (gimple_code (use->stmt) != GIMPLE_PHI
7415 /* We can't allow re-allocating the stmt as it might be pointed
7416 to still. */
7417 && (get_gimple_rhs_num_ops (TREE_CODE (comp))
7418 >= gimple_num_ops (gsi_stmt (bsi))))
7420 comp = force_gimple_operand (comp, &seq, true, NULL);
7421 gimple_seq_add_seq (&stmt_list, seq);
7422 if (POINTER_TYPE_P (TREE_TYPE (tgt)))
7424 duplicate_ssa_name_ptr_info (comp, SSA_NAME_PTR_INFO (tgt));
7425 /* As this isn't a plain copy we have to reset alignment
7426 information. */
7427 if (SSA_NAME_PTR_INFO (comp))
7428 mark_ptr_info_alignment_unknown (SSA_NAME_PTR_INFO (comp));
7432 gsi_insert_seq_before (&bsi, stmt_list, GSI_SAME_STMT);
7433 if (gimple_code (use->stmt) == GIMPLE_PHI)
7435 ass = gimple_build_assign (tgt, comp);
7436 gsi_insert_before (&bsi, ass, GSI_SAME_STMT);
7438 bsi = gsi_for_stmt (use->stmt);
7439 remove_phi_node (&bsi, false);
7441 else
7443 gimple_assign_set_rhs_from_tree (&bsi, comp);
7444 use->stmt = gsi_stmt (bsi);
7448 /* Performs a peephole optimization to reorder the iv update statement with
7449 a mem ref to enable instruction combining in later phases. The mem ref uses
7450 the iv value before the update, so the reordering transformation requires
7451 adjustment of the offset. CAND is the selected IV_CAND.
7453 Example:
7455 t = MEM_REF (base, iv1, 8, 16); // base, index, stride, offset
7456 iv2 = iv1 + 1;
7458 if (t < val) (1)
7459 goto L;
7460 goto Head;
7463 directly propagating t over to (1) will introduce overlapping live range
7464 thus increase register pressure. This peephole transform it into:
7467 iv2 = iv1 + 1;
7468 t = MEM_REF (base, iv2, 8, 8);
7469 if (t < val)
7470 goto L;
7471 goto Head;
7474 static void
7475 adjust_iv_update_pos (struct iv_cand *cand, struct iv_use *use)
7477 tree var_after;
7478 gimple *iv_update, *stmt;
7479 basic_block bb;
7480 gimple_stmt_iterator gsi, gsi_iv;
7482 if (cand->pos != IP_NORMAL)
7483 return;
7485 var_after = cand->var_after;
7486 iv_update = SSA_NAME_DEF_STMT (var_after);
7488 bb = gimple_bb (iv_update);
7489 gsi = gsi_last_nondebug_bb (bb);
7490 stmt = gsi_stmt (gsi);
7492 /* Only handle conditional statement for now. */
7493 if (gimple_code (stmt) != GIMPLE_COND)
7494 return;
7496 gsi_prev_nondebug (&gsi);
7497 stmt = gsi_stmt (gsi);
7498 if (stmt != iv_update)
7499 return;
7501 gsi_prev_nondebug (&gsi);
7502 if (gsi_end_p (gsi))
7503 return;
7505 stmt = gsi_stmt (gsi);
7506 if (gimple_code (stmt) != GIMPLE_ASSIGN)
7507 return;
7509 if (stmt != use->stmt)
7510 return;
7512 if (TREE_CODE (gimple_assign_lhs (stmt)) != SSA_NAME)
7513 return;
7515 if (dump_file && (dump_flags & TDF_DETAILS))
7517 fprintf (dump_file, "Reordering \n");
7518 print_gimple_stmt (dump_file, iv_update, 0);
7519 print_gimple_stmt (dump_file, use->stmt, 0);
7520 fprintf (dump_file, "\n");
7523 gsi = gsi_for_stmt (use->stmt);
7524 gsi_iv = gsi_for_stmt (iv_update);
7525 gsi_move_before (&gsi_iv, &gsi);
7527 cand->pos = IP_BEFORE_USE;
7528 cand->incremented_at = use->stmt;
7531 /* Return the alias pointer type that should be used for a MEM_REF
7532 associated with USE, which has type USE_PTR_ADDRESS. */
7534 static tree
7535 get_alias_ptr_type_for_ptr_address (iv_use *use)
7537 gcall *call = as_a <gcall *> (use->stmt);
7538 switch (gimple_call_internal_fn (call))
7540 case IFN_MASK_LOAD:
7541 case IFN_MASK_STORE:
7542 case IFN_MASK_LOAD_LANES:
7543 case IFN_MASK_STORE_LANES:
7544 case IFN_LEN_LOAD:
7545 case IFN_LEN_STORE:
7546 /* The second argument contains the correct alias type. */
7547 gcc_assert (use->op_p = gimple_call_arg_ptr (call, 0));
7548 return TREE_TYPE (gimple_call_arg (call, 1));
7550 default:
7551 gcc_unreachable ();
7556 /* Rewrites USE (address that is an iv) using candidate CAND. */
7558 static void
7559 rewrite_use_address (struct ivopts_data *data,
7560 struct iv_use *use, struct iv_cand *cand)
7562 aff_tree aff;
7563 bool ok;
7565 adjust_iv_update_pos (cand, use);
7566 ok = get_computation_aff (data->current_loop, use->stmt, use, cand, &aff);
7567 gcc_assert (ok);
7568 unshare_aff_combination (&aff);
7570 /* To avoid undefined overflow problems, all IV candidates use unsigned
7571 integer types. The drawback is that this makes it impossible for
7572 create_mem_ref to distinguish an IV that is based on a memory object
7573 from one that represents simply an offset.
7575 To work around this problem, we pass a hint to create_mem_ref that
7576 indicates which variable (if any) in aff is an IV based on a memory
7577 object. Note that we only consider the candidate. If this is not
7578 based on an object, the base of the reference is in some subexpression
7579 of the use -- but these will use pointer types, so they are recognized
7580 by the create_mem_ref heuristics anyway. */
7581 tree iv = var_at_stmt (data->current_loop, cand, use->stmt);
7582 tree base_hint = (cand->iv->base_object) ? iv : NULL_TREE;
7583 gimple_stmt_iterator bsi = gsi_for_stmt (use->stmt);
7584 tree type = use->mem_type;
7585 tree alias_ptr_type;
7586 if (use->type == USE_PTR_ADDRESS)
7587 alias_ptr_type = get_alias_ptr_type_for_ptr_address (use);
7588 else
7590 gcc_assert (type == TREE_TYPE (*use->op_p));
7591 unsigned int align = get_object_alignment (*use->op_p);
7592 if (align != TYPE_ALIGN (type))
7593 type = build_aligned_type (type, align);
7594 alias_ptr_type = reference_alias_ptr_type (*use->op_p);
7596 tree ref = create_mem_ref (&bsi, type, &aff, alias_ptr_type,
7597 iv, base_hint, data->speed);
7599 if (use->type == USE_PTR_ADDRESS)
7601 ref = fold_build1 (ADDR_EXPR, build_pointer_type (use->mem_type), ref);
7602 ref = fold_convert (get_use_type (use), ref);
7603 ref = force_gimple_operand_gsi (&bsi, ref, true, NULL_TREE,
7604 true, GSI_SAME_STMT);
7606 else
7607 copy_ref_info (ref, *use->op_p);
7609 *use->op_p = ref;
7612 /* Rewrites USE (the condition such that one of the arguments is an iv) using
7613 candidate CAND. */
7615 static void
7616 rewrite_use_compare (struct ivopts_data *data,
7617 struct iv_use *use, struct iv_cand *cand)
7619 tree comp, op, bound;
7620 gimple_stmt_iterator bsi = gsi_for_stmt (use->stmt);
7621 enum tree_code compare;
7622 struct iv_group *group = data->vgroups[use->group_id];
7623 class cost_pair *cp = get_group_iv_cost (data, group, cand);
7625 bound = cp->value;
7626 if (bound)
7628 tree var = var_at_stmt (data->current_loop, cand, use->stmt);
7629 tree var_type = TREE_TYPE (var);
7630 gimple_seq stmts;
7632 if (dump_file && (dump_flags & TDF_DETAILS))
7634 fprintf (dump_file, "Replacing exit test: ");
7635 print_gimple_stmt (dump_file, use->stmt, 0, TDF_SLIM);
7637 compare = cp->comp;
7638 bound = unshare_expr (fold_convert (var_type, bound));
7639 op = force_gimple_operand (bound, &stmts, true, NULL_TREE);
7640 if (stmts)
7641 gsi_insert_seq_on_edge_immediate (
7642 loop_preheader_edge (data->current_loop),
7643 stmts);
7645 gcond *cond_stmt = as_a <gcond *> (use->stmt);
7646 gimple_cond_set_lhs (cond_stmt, var);
7647 gimple_cond_set_code (cond_stmt, compare);
7648 gimple_cond_set_rhs (cond_stmt, op);
7649 return;
7652 /* The induction variable elimination failed; just express the original
7653 giv. */
7654 comp = get_computation_at (data->current_loop, use->stmt, use, cand);
7655 gcc_assert (comp != NULL_TREE);
7656 gcc_assert (use->op_p != NULL);
7657 *use->op_p = force_gimple_operand_gsi (&bsi, comp, true,
7658 SSA_NAME_VAR (*use->op_p),
7659 true, GSI_SAME_STMT);
7662 /* Rewrite the groups using the selected induction variables. */
7664 static void
7665 rewrite_groups (struct ivopts_data *data)
7667 unsigned i, j;
7669 for (i = 0; i < data->vgroups.length (); i++)
7671 struct iv_group *group = data->vgroups[i];
7672 struct iv_cand *cand = group->selected;
7674 gcc_assert (cand);
7676 if (group->type == USE_NONLINEAR_EXPR)
7678 for (j = 0; j < group->vuses.length (); j++)
7680 rewrite_use_nonlinear_expr (data, group->vuses[j], cand);
7681 update_stmt (group->vuses[j]->stmt);
7684 else if (address_p (group->type))
7686 for (j = 0; j < group->vuses.length (); j++)
7688 rewrite_use_address (data, group->vuses[j], cand);
7689 update_stmt (group->vuses[j]->stmt);
7692 else
7694 gcc_assert (group->type == USE_COMPARE);
7696 for (j = 0; j < group->vuses.length (); j++)
7698 rewrite_use_compare (data, group->vuses[j], cand);
7699 update_stmt (group->vuses[j]->stmt);
7705 /* Removes the ivs that are not used after rewriting. */
7707 static void
7708 remove_unused_ivs (struct ivopts_data *data, bitmap toremove)
7710 unsigned j;
7711 bitmap_iterator bi;
7713 /* Figure out an order in which to release SSA DEFs so that we don't
7714 release something that we'd have to propagate into a debug stmt
7715 afterwards. */
7716 EXECUTE_IF_SET_IN_BITMAP (data->relevant, 0, j, bi)
7718 struct version_info *info;
7720 info = ver_info (data, j);
7721 if (info->iv
7722 && !integer_zerop (info->iv->step)
7723 && !info->inv_id
7724 && !info->iv->nonlin_use
7725 && !info->preserve_biv)
7727 bitmap_set_bit (toremove, SSA_NAME_VERSION (info->iv->ssa_name));
7729 tree def = info->iv->ssa_name;
7731 if (MAY_HAVE_DEBUG_BIND_STMTS && SSA_NAME_DEF_STMT (def))
7733 imm_use_iterator imm_iter;
7734 use_operand_p use_p;
7735 gimple *stmt;
7736 int count = 0;
7738 FOR_EACH_IMM_USE_STMT (stmt, imm_iter, def)
7740 if (!gimple_debug_bind_p (stmt))
7741 continue;
7743 /* We just want to determine whether to do nothing
7744 (count == 0), to substitute the computed
7745 expression into a single use of the SSA DEF by
7746 itself (count == 1), or to use a debug temp
7747 because the SSA DEF is used multiple times or as
7748 part of a larger expression (count > 1). */
7749 count++;
7750 if (gimple_debug_bind_get_value (stmt) != def)
7751 count++;
7753 if (count > 1)
7754 break;
7757 if (!count)
7758 continue;
7760 struct iv_use dummy_use;
7761 struct iv_cand *best_cand = NULL, *cand;
7762 unsigned i, best_pref = 0, cand_pref;
7763 tree comp = NULL_TREE;
7765 memset (&dummy_use, 0, sizeof (dummy_use));
7766 dummy_use.iv = info->iv;
7767 for (i = 0; i < data->vgroups.length () && i < 64; i++)
7769 cand = data->vgroups[i]->selected;
7770 if (cand == best_cand)
7771 continue;
7772 cand_pref = operand_equal_p (cand->iv->step,
7773 info->iv->step, 0)
7774 ? 4 : 0;
7775 cand_pref
7776 += TYPE_MODE (TREE_TYPE (cand->iv->base))
7777 == TYPE_MODE (TREE_TYPE (info->iv->base))
7778 ? 2 : 0;
7779 cand_pref
7780 += TREE_CODE (cand->iv->base) == INTEGER_CST
7781 ? 1 : 0;
7782 if (best_cand == NULL || best_pref < cand_pref)
7784 tree this_comp
7785 = get_debug_computation_at (data->current_loop,
7786 SSA_NAME_DEF_STMT (def),
7787 &dummy_use, cand);
7788 if (this_comp)
7790 best_cand = cand;
7791 best_pref = cand_pref;
7792 comp = this_comp;
7797 if (!best_cand)
7798 continue;
7800 comp = unshare_expr (comp);
7801 if (count > 1)
7803 tree vexpr = build_debug_expr_decl (TREE_TYPE (comp));
7804 /* FIXME: Is setting the mode really necessary? */
7805 if (SSA_NAME_VAR (def))
7806 SET_DECL_MODE (vexpr, DECL_MODE (SSA_NAME_VAR (def)));
7807 else
7808 SET_DECL_MODE (vexpr, TYPE_MODE (TREE_TYPE (vexpr)));
7809 gdebug *def_temp
7810 = gimple_build_debug_bind (vexpr, comp, NULL);
7811 gimple_stmt_iterator gsi;
7813 if (gimple_code (SSA_NAME_DEF_STMT (def)) == GIMPLE_PHI)
7814 gsi = gsi_after_labels (gimple_bb
7815 (SSA_NAME_DEF_STMT (def)));
7816 else
7817 gsi = gsi_for_stmt (SSA_NAME_DEF_STMT (def));
7819 gsi_insert_before (&gsi, def_temp, GSI_SAME_STMT);
7820 comp = vexpr;
7823 FOR_EACH_IMM_USE_STMT (stmt, imm_iter, def)
7825 if (!gimple_debug_bind_p (stmt))
7826 continue;
7828 FOR_EACH_IMM_USE_ON_STMT (use_p, imm_iter)
7829 SET_USE (use_p, comp);
7831 update_stmt (stmt);
7838 /* Frees memory occupied by class tree_niter_desc in *VALUE. Callback
7839 for hash_map::traverse. */
7841 bool
7842 free_tree_niter_desc (edge const &, tree_niter_desc *const &value, void *)
7844 free (value);
7845 return true;
7848 /* Frees data allocated by the optimization of a single loop. */
7850 static void
7851 free_loop_data (struct ivopts_data *data)
7853 unsigned i, j;
7854 bitmap_iterator bi;
7855 tree obj;
7857 if (data->niters)
7859 data->niters->traverse<void *, free_tree_niter_desc> (NULL);
7860 delete data->niters;
7861 data->niters = NULL;
7864 EXECUTE_IF_SET_IN_BITMAP (data->relevant, 0, i, bi)
7866 struct version_info *info;
7868 info = ver_info (data, i);
7869 info->iv = NULL;
7870 info->has_nonlin_use = false;
7871 info->preserve_biv = false;
7872 info->inv_id = 0;
7874 bitmap_clear (data->relevant);
7875 bitmap_clear (data->important_candidates);
7877 for (i = 0; i < data->vgroups.length (); i++)
7879 struct iv_group *group = data->vgroups[i];
7881 for (j = 0; j < group->vuses.length (); j++)
7882 free (group->vuses[j]);
7883 group->vuses.release ();
7885 BITMAP_FREE (group->related_cands);
7886 for (j = 0; j < group->n_map_members; j++)
7888 if (group->cost_map[j].inv_vars)
7889 BITMAP_FREE (group->cost_map[j].inv_vars);
7890 if (group->cost_map[j].inv_exprs)
7891 BITMAP_FREE (group->cost_map[j].inv_exprs);
7894 free (group->cost_map);
7895 free (group);
7897 data->vgroups.truncate (0);
7899 for (i = 0; i < data->vcands.length (); i++)
7901 struct iv_cand *cand = data->vcands[i];
7903 if (cand->inv_vars)
7904 BITMAP_FREE (cand->inv_vars);
7905 if (cand->inv_exprs)
7906 BITMAP_FREE (cand->inv_exprs);
7907 free (cand);
7909 data->vcands.truncate (0);
7911 if (data->version_info_size < num_ssa_names)
7913 data->version_info_size = 2 * num_ssa_names;
7914 free (data->version_info);
7915 data->version_info = XCNEWVEC (struct version_info, data->version_info_size);
7918 data->max_inv_var_id = 0;
7919 data->max_inv_expr_id = 0;
7921 FOR_EACH_VEC_ELT (decl_rtl_to_reset, i, obj)
7922 SET_DECL_RTL (obj, NULL_RTX);
7924 decl_rtl_to_reset.truncate (0);
7926 data->inv_expr_tab->empty ();
7928 data->iv_common_cand_tab->empty ();
7929 data->iv_common_cands.truncate (0);
7932 /* Finalizes data structures used by the iv optimization pass. LOOPS is the
7933 loop tree. */
7935 static void
7936 tree_ssa_iv_optimize_finalize (struct ivopts_data *data)
7938 free_loop_data (data);
7939 free (data->version_info);
7940 BITMAP_FREE (data->relevant);
7941 BITMAP_FREE (data->important_candidates);
7943 decl_rtl_to_reset.release ();
7944 data->vgroups.release ();
7945 data->vcands.release ();
7946 delete data->inv_expr_tab;
7947 data->inv_expr_tab = NULL;
7948 free_affine_expand_cache (&data->name_expansion_cache);
7949 if (data->base_object_map)
7950 delete data->base_object_map;
7951 delete data->iv_common_cand_tab;
7952 data->iv_common_cand_tab = NULL;
7953 data->iv_common_cands.release ();
7954 obstack_free (&data->iv_obstack, NULL);
7957 /* Returns true if the loop body BODY includes any function calls. */
7959 static bool
7960 loop_body_includes_call (basic_block *body, unsigned num_nodes)
7962 gimple_stmt_iterator gsi;
7963 unsigned i;
7965 for (i = 0; i < num_nodes; i++)
7966 for (gsi = gsi_start_bb (body[i]); !gsi_end_p (gsi); gsi_next (&gsi))
7968 gimple *stmt = gsi_stmt (gsi);
7969 if (is_gimple_call (stmt)
7970 && !gimple_call_internal_p (stmt)
7971 && !is_inexpensive_builtin (gimple_call_fndecl (stmt)))
7972 return true;
7974 return false;
7977 /* Determine cost scaling factor for basic blocks in loop. */
7978 #define COST_SCALING_FACTOR_BOUND (20)
7980 static void
7981 determine_scaling_factor (struct ivopts_data *data, basic_block *body)
7983 int lfreq = data->current_loop->header->count.to_frequency (cfun);
7984 if (!data->speed || lfreq <= 0)
7985 return;
7987 int max_freq = lfreq;
7988 for (unsigned i = 0; i < data->current_loop->num_nodes; i++)
7990 body[i]->aux = (void *)(intptr_t) 1;
7991 if (max_freq < body[i]->count.to_frequency (cfun))
7992 max_freq = body[i]->count.to_frequency (cfun);
7994 if (max_freq > lfreq)
7996 int divisor, factor;
7997 /* Check if scaling factor itself needs to be scaled by the bound. This
7998 is to avoid overflow when scaling cost according to profile info. */
7999 if (max_freq / lfreq > COST_SCALING_FACTOR_BOUND)
8001 divisor = max_freq;
8002 factor = COST_SCALING_FACTOR_BOUND;
8004 else
8006 divisor = lfreq;
8007 factor = 1;
8009 for (unsigned i = 0; i < data->current_loop->num_nodes; i++)
8011 int bfreq = body[i]->count.to_frequency (cfun);
8012 if (bfreq <= lfreq)
8013 continue;
8015 body[i]->aux = (void*)(intptr_t) (factor * bfreq / divisor);
8020 /* Find doloop comparison use and set its doloop_p on if found. */
8022 static bool
8023 find_doloop_use (struct ivopts_data *data)
8025 struct loop *loop = data->current_loop;
8027 for (unsigned i = 0; i < data->vgroups.length (); i++)
8029 struct iv_group *group = data->vgroups[i];
8030 if (group->type == USE_COMPARE)
8032 gcc_assert (group->vuses.length () == 1);
8033 struct iv_use *use = group->vuses[0];
8034 gimple *stmt = use->stmt;
8035 if (gimple_code (stmt) == GIMPLE_COND)
8037 basic_block bb = gimple_bb (stmt);
8038 edge true_edge, false_edge;
8039 extract_true_false_edges_from_block (bb, &true_edge, &false_edge);
8040 /* This comparison is used for loop latch. Require latch is empty
8041 for now. */
8042 if ((loop->latch == true_edge->dest
8043 || loop->latch == false_edge->dest)
8044 && empty_block_p (loop->latch))
8046 group->doloop_p = true;
8047 if (dump_file && (dump_flags & TDF_DETAILS))
8049 fprintf (dump_file, "Doloop cmp iv use: ");
8050 print_gimple_stmt (dump_file, stmt, TDF_DETAILS);
8052 return true;
8058 return false;
8061 /* For the targets which support doloop, to predict whether later RTL doloop
8062 transformation will perform on this loop, further detect the doloop use and
8063 mark the flag doloop_use_p if predicted. */
8065 void
8066 analyze_and_mark_doloop_use (struct ivopts_data *data)
8068 data->doloop_use_p = false;
8070 if (!flag_branch_on_count_reg)
8071 return;
8073 if (data->current_loop->unroll == USHRT_MAX)
8074 return;
8076 if (!generic_predict_doloop_p (data))
8077 return;
8079 if (find_doloop_use (data))
8081 data->doloop_use_p = true;
8082 if (dump_file && (dump_flags & TDF_DETAILS))
8084 struct loop *loop = data->current_loop;
8085 fprintf (dump_file,
8086 "Predict loop %d can perform"
8087 " doloop optimization later.\n",
8088 loop->num);
8089 flow_loop_dump (loop, dump_file, NULL, 1);
8094 /* Optimizes the LOOP. Returns true if anything changed. */
8096 static bool
8097 tree_ssa_iv_optimize_loop (struct ivopts_data *data, class loop *loop,
8098 bitmap toremove)
8100 bool changed = false;
8101 class iv_ca *iv_ca;
8102 edge exit = single_dom_exit (loop);
8103 basic_block *body;
8105 gcc_assert (!data->niters);
8106 data->current_loop = loop;
8107 data->loop_loc = find_loop_location (loop).get_location_t ();
8108 data->speed = optimize_loop_for_speed_p (loop);
8110 if (dump_file && (dump_flags & TDF_DETAILS))
8112 fprintf (dump_file, "Processing loop %d", loop->num);
8113 if (data->loop_loc != UNKNOWN_LOCATION)
8114 fprintf (dump_file, " at %s:%d", LOCATION_FILE (data->loop_loc),
8115 LOCATION_LINE (data->loop_loc));
8116 fprintf (dump_file, "\n");
8118 if (exit)
8120 fprintf (dump_file, " single exit %d -> %d, exit condition ",
8121 exit->src->index, exit->dest->index);
8122 print_gimple_stmt (dump_file, last_stmt (exit->src), 0, TDF_SLIM);
8123 fprintf (dump_file, "\n");
8126 fprintf (dump_file, "\n");
8129 body = get_loop_body (loop);
8130 data->body_includes_call = loop_body_includes_call (body, loop->num_nodes);
8131 renumber_gimple_stmt_uids_in_blocks (body, loop->num_nodes);
8133 data->loop_single_exit_p
8134 = exit != NULL && loop_only_exit_p (loop, body, exit);
8136 /* For each ssa name determines whether it behaves as an induction variable
8137 in some loop. */
8138 if (!find_induction_variables (data, body))
8139 goto finish;
8141 /* Finds interesting uses (item 1). */
8142 find_interesting_uses (data, body);
8143 if (data->vgroups.length () > MAX_CONSIDERED_GROUPS)
8144 goto finish;
8146 /* Determine cost scaling factor for basic blocks in loop. */
8147 determine_scaling_factor (data, body);
8149 /* Analyze doloop possibility and mark the doloop use if predicted. */
8150 analyze_and_mark_doloop_use (data);
8152 /* Finds candidates for the induction variables (item 2). */
8153 find_iv_candidates (data);
8155 /* Calculates the costs (item 3, part 1). */
8156 determine_iv_costs (data);
8157 determine_group_iv_costs (data);
8158 determine_set_costs (data);
8160 /* Find the optimal set of induction variables (item 3, part 2). */
8161 iv_ca = find_optimal_iv_set (data);
8162 /* Cleanup basic block aux field. */
8163 for (unsigned i = 0; i < data->current_loop->num_nodes; i++)
8164 body[i]->aux = NULL;
8165 if (!iv_ca)
8166 goto finish;
8167 changed = true;
8169 /* Create the new induction variables (item 4, part 1). */
8170 create_new_ivs (data, iv_ca);
8171 iv_ca_free (&iv_ca);
8173 /* Rewrite the uses (item 4, part 2). */
8174 rewrite_groups (data);
8176 /* Remove the ivs that are unused after rewriting. */
8177 remove_unused_ivs (data, toremove);
8179 finish:
8180 free (body);
8181 free_loop_data (data);
8183 return changed;
8186 /* Main entry point. Optimizes induction variables in loops. */
8188 void
8189 tree_ssa_iv_optimize (void)
8191 struct ivopts_data data;
8192 auto_bitmap toremove;
8194 tree_ssa_iv_optimize_init (&data);
8196 /* Optimize the loops starting with the innermost ones. */
8197 for (auto loop : loops_list (cfun, LI_FROM_INNERMOST))
8199 if (!dbg_cnt (ivopts_loop))
8200 continue;
8202 if (dump_file && (dump_flags & TDF_DETAILS))
8203 flow_loop_dump (loop, dump_file, NULL, 1);
8205 tree_ssa_iv_optimize_loop (&data, loop, toremove);
8208 /* Remove eliminated IV defs. */
8209 release_defs_bitset (toremove);
8211 /* We have changed the structure of induction variables; it might happen
8212 that definitions in the scev database refer to some of them that were
8213 eliminated. */
8214 scev_reset_htab ();
8215 /* Likewise niter and control-IV information. */
8216 free_numbers_of_iterations_estimates (cfun);
8218 tree_ssa_iv_optimize_finalize (&data);
8221 #include "gt-tree-ssa-loop-ivopts.h"