libstdc++: Simplify std::any to fix -Wdeprecated-declarations warning
[official-gcc.git] / gcc / tree-ssa-phiopt.cc
blob271a5d51f09f8a13738baddfe7ed6c2988314b1c
1 /* Optimization of PHI nodes by converting them into straightline code.
2 Copyright (C) 2004-2024 Free Software Foundation, Inc.
4 This file is part of GCC.
6 GCC is free software; you can redistribute it and/or modify it
7 under the terms of the GNU General Public License as published by the
8 Free Software Foundation; either version 3, or (at your option) any
9 later version.
11 GCC is distributed in the hope that it will be useful, but WITHOUT
12 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
14 for more details.
16 You should have received a copy of the GNU General Public License
17 along with GCC; see the file COPYING3. If not see
18 <http://www.gnu.org/licenses/>. */
20 #include "config.h"
21 #define INCLUDE_MEMORY
22 #include "system.h"
23 #include "coretypes.h"
24 #include "backend.h"
25 #include "insn-codes.h"
26 #include "rtl.h"
27 #include "tree.h"
28 #include "gimple.h"
29 #include "cfghooks.h"
30 #include "tree-pass.h"
31 #include "ssa.h"
32 #include "tree-ssa.h"
33 #include "optabs-tree.h"
34 #include "insn-config.h"
35 #include "gimple-pretty-print.h"
36 #include "fold-const.h"
37 #include "stor-layout.h"
38 #include "cfganal.h"
39 #include "gimplify.h"
40 #include "gimple-iterator.h"
41 #include "gimplify-me.h"
42 #include "tree-cfg.h"
43 #include "tree-dfa.h"
44 #include "domwalk.h"
45 #include "cfgloop.h"
46 #include "tree-data-ref.h"
47 #include "tree-scalar-evolution.h"
48 #include "tree-inline.h"
49 #include "case-cfn-macros.h"
50 #include "tree-eh.h"
51 #include "gimple-fold.h"
52 #include "internal-fn.h"
53 #include "gimple-range.h"
54 #include "gimple-match.h"
55 #include "dbgcnt.h"
56 #include "tree-ssa-propagate.h"
57 #include "tree-ssa-dce.h"
58 #include "calls.h"
60 /* Return the singleton PHI in the SEQ of PHIs for edges E0 and E1. */
62 static gphi *
63 single_non_singleton_phi_for_edges (gimple_seq seq, edge e0, edge e1)
65 gimple_stmt_iterator i;
66 gphi *phi = NULL;
67 for (i = gsi_start (seq); !gsi_end_p (i); gsi_next (&i))
69 gphi *p = as_a <gphi *> (gsi_stmt (i));
70 /* If the PHI arguments are equal then we can skip this PHI. */
71 if (operand_equal_for_phi_arg_p (gimple_phi_arg_def (p, e0->dest_idx),
72 gimple_phi_arg_def (p, e1->dest_idx)))
73 continue;
75 /* Punt on virtual phis with different arguments from the edges. */
76 if (virtual_operand_p (gimple_phi_result (p)))
77 return NULL;
79 /* If we already have a PHI that has the two edge arguments are
80 different, then return it is not a singleton for these PHIs. */
81 if (phi)
82 return NULL;
84 phi = p;
86 return phi;
89 /* Replace PHI node element whose edge is E in block BB with variable NEW.
90 Remove the edge from COND_BLOCK which does not lead to BB (COND_BLOCK
91 is known to have two edges, one of which must reach BB). */
93 static void
94 replace_phi_edge_with_variable (basic_block cond_block,
95 edge e, gphi *phi, tree new_tree,
96 bitmap dce_ssa_names = nullptr)
98 basic_block bb = gimple_bb (phi);
99 gimple_stmt_iterator gsi;
100 tree phi_result = PHI_RESULT (phi);
101 bool deleteboth = false;
103 /* Duplicate range info if they are the only things setting the target PHI.
104 This is needed as later on, the new_tree will be replacing
105 The assignement of the PHI.
106 For an example:
107 bb1:
108 _4 = min<a_1, 255>
109 goto bb2
111 # RANGE [-INF, 255]
112 a_3 = PHI<_4(1)>
113 bb3:
115 use(a_3)
116 And _4 gets propagated into the use of a_3 and losing the range info.
117 This can't be done for more than 2 incoming edges as the propagation
118 won't happen.
119 The new_tree needs to be defined in the same basic block as the conditional. */
120 if (TREE_CODE (new_tree) == SSA_NAME
121 && EDGE_COUNT (gimple_bb (phi)->preds) == 2
122 && INTEGRAL_TYPE_P (TREE_TYPE (phi_result))
123 && !SSA_NAME_RANGE_INFO (new_tree)
124 && SSA_NAME_RANGE_INFO (phi_result)
125 && gimple_bb (SSA_NAME_DEF_STMT (new_tree)) == cond_block
126 && dbg_cnt (phiopt_edge_range))
127 duplicate_ssa_name_range_info (new_tree, phi_result);
129 /* Change the PHI argument to new. */
130 SET_USE (PHI_ARG_DEF_PTR (phi, e->dest_idx), new_tree);
132 /* Remove the empty basic block. */
133 edge edge_to_remove = NULL, keep_edge = NULL;
134 if (EDGE_SUCC (cond_block, 0)->dest == bb)
136 edge_to_remove = EDGE_SUCC (cond_block, 1);
137 keep_edge = EDGE_SUCC (cond_block, 0);
139 else if (EDGE_SUCC (cond_block, 1)->dest == bb)
141 edge_to_remove = EDGE_SUCC (cond_block, 0);
142 keep_edge = EDGE_SUCC (cond_block, 1);
144 else if ((keep_edge = find_edge (cond_block, e->src)))
146 basic_block bb1 = EDGE_SUCC (cond_block, 0)->dest;
147 basic_block bb2 = EDGE_SUCC (cond_block, 1)->dest;
148 if (single_pred_p (bb1) && single_pred_p (bb2)
149 && single_succ_p (bb1) && single_succ_p (bb2)
150 && empty_block_p (bb1) && empty_block_p (bb2))
151 deleteboth = true;
153 else
154 gcc_unreachable ();
156 if (edge_to_remove && EDGE_COUNT (edge_to_remove->dest->preds) == 1)
158 e->flags |= EDGE_FALLTHRU;
159 e->flags &= ~(EDGE_TRUE_VALUE | EDGE_FALSE_VALUE);
160 e->probability = profile_probability::always ();
161 delete_basic_block (edge_to_remove->dest);
163 /* Eliminate the COND_EXPR at the end of COND_BLOCK. */
164 gsi = gsi_last_bb (cond_block);
165 gsi_remove (&gsi, true);
167 else if (deleteboth)
169 basic_block bb1 = EDGE_SUCC (cond_block, 0)->dest;
170 basic_block bb2 = EDGE_SUCC (cond_block, 1)->dest;
172 edge newedge = redirect_edge_and_branch (keep_edge, bb);
174 /* The new edge should be the same. */
175 gcc_assert (newedge == keep_edge);
177 keep_edge->flags |= EDGE_FALLTHRU;
178 keep_edge->flags &= ~(EDGE_TRUE_VALUE | EDGE_FALSE_VALUE);
179 keep_edge->probability = profile_probability::always ();
181 /* Copy the edge's phi entry from the old one. */
182 copy_phi_arg_into_existing_phi (e, keep_edge);
184 /* Delete the old 2 empty basic blocks */
185 delete_basic_block (bb1);
186 delete_basic_block (bb2);
188 /* Eliminate the COND_EXPR at the end of COND_BLOCK. */
189 gsi = gsi_last_bb (cond_block);
190 gsi_remove (&gsi, true);
192 else
194 /* If there are other edges into the middle block make
195 CFG cleanup deal with the edge removal to avoid
196 updating dominators here in a non-trivial way. */
197 gcond *cond = as_a <gcond *> (*gsi_last_bb (cond_block));
198 if (keep_edge->flags & EDGE_FALSE_VALUE)
199 gimple_cond_make_false (cond);
200 else if (keep_edge->flags & EDGE_TRUE_VALUE)
201 gimple_cond_make_true (cond);
204 if (dce_ssa_names)
205 simple_dce_from_worklist (dce_ssa_names);
207 statistics_counter_event (cfun, "Replace PHI with variable", 1);
209 if (dump_file && (dump_flags & TDF_DETAILS))
210 fprintf (dump_file,
211 "COND_EXPR in block %d and PHI in block %d converted to straightline code.\n",
212 cond_block->index,
213 bb->index);
216 /* PR66726: Factor operations out of COND_EXPR. If the arguments of the PHI
217 stmt are Unary operator, factor out the operation and perform the operation
218 to the result of PHI stmt. COND_STMT is the controlling predicate.
219 Return the newly-created PHI, if any. */
221 static gphi *
222 factor_out_conditional_operation (edge e0, edge e1, gphi *phi,
223 tree arg0, tree arg1, gimple *cond_stmt)
225 gimple *arg0_def_stmt = NULL, *arg1_def_stmt = NULL;
226 tree temp, result;
227 gphi *newphi;
228 gimple_stmt_iterator gsi, gsi_for_def;
229 location_t locus = gimple_location (phi);
230 gimple_match_op arg0_op, arg1_op;
232 /* Handle only PHI statements with two arguments. TODO: If all
233 other arguments to PHI are INTEGER_CST or if their defining
234 statement have the same unary operation, we can handle more
235 than two arguments too. */
236 if (gimple_phi_num_args (phi) != 2)
237 return NULL;
239 /* First canonicalize to simplify tests. */
240 if (TREE_CODE (arg0) != SSA_NAME)
242 std::swap (arg0, arg1);
243 std::swap (e0, e1);
246 if (TREE_CODE (arg0) != SSA_NAME
247 || (TREE_CODE (arg1) != SSA_NAME
248 && TREE_CODE (arg1) != INTEGER_CST))
249 return NULL;
251 /* Check if arg0 is an SSA_NAME and the stmt which defines arg0 is
252 an unary operation. */
253 arg0_def_stmt = SSA_NAME_DEF_STMT (arg0);
254 if (!gimple_extract_op (arg0_def_stmt, &arg0_op))
255 return NULL;
257 /* Check to make sure none of the operands are in abnormal phis. */
258 if (arg0_op.operands_occurs_in_abnormal_phi ())
259 return NULL;
261 /* Currently just support one operand expressions. */
262 if (arg0_op.num_ops != 1)
263 return NULL;
265 tree new_arg0 = arg0_op.ops[0];
266 tree new_arg1;
268 if (TREE_CODE (arg1) == SSA_NAME)
270 /* Check if arg1 is an SSA_NAME. */
271 arg1_def_stmt = SSA_NAME_DEF_STMT (arg1);
272 if (!gimple_extract_op (arg1_def_stmt, &arg1_op))
273 return NULL;
274 if (arg1_op.code != arg0_op.code)
275 return NULL;
276 if (arg1_op.num_ops != arg0_op.num_ops)
277 return NULL;
278 if (arg1_op.operands_occurs_in_abnormal_phi ())
279 return NULL;
281 /* Either arg1_def_stmt or arg0_def_stmt should be conditional. */
282 if (dominated_by_p (CDI_DOMINATORS, gimple_bb (phi), gimple_bb (arg0_def_stmt))
283 && dominated_by_p (CDI_DOMINATORS,
284 gimple_bb (phi), gimple_bb (arg1_def_stmt)))
285 return NULL;
286 new_arg1 = arg1_op.ops[0];
288 else
290 /* TODO: handle more than just casts here. */
291 if (!gimple_assign_cast_p (arg0_def_stmt))
292 return NULL;
294 /* arg0_def_stmt should be conditional. */
295 if (dominated_by_p (CDI_DOMINATORS, gimple_bb (phi), gimple_bb (arg0_def_stmt)))
296 return NULL;
298 /* If arg1 is an INTEGER_CST, fold it to new type. */
299 if (INTEGRAL_TYPE_P (TREE_TYPE (new_arg0))
300 && (int_fits_type_p (arg1, TREE_TYPE (new_arg0))
301 || (TYPE_PRECISION (TREE_TYPE (new_arg0))
302 == TYPE_PRECISION (TREE_TYPE (arg1)))))
304 if (gimple_assign_cast_p (arg0_def_stmt))
306 /* For the INTEGER_CST case, we are just moving the
307 conversion from one place to another, which can often
308 hurt as the conversion moves further away from the
309 statement that computes the value. So, perform this
310 only if new_arg0 is an operand of COND_STMT, or
311 if arg0_def_stmt is the only non-debug stmt in
312 its basic block, because then it is possible this
313 could enable further optimizations (minmax replacement
314 etc.). See PR71016.
315 Note no-op conversions don't have this issue as
316 it will not generate any zero/sign extend in that case. */
317 if ((TYPE_PRECISION (TREE_TYPE (new_arg0))
318 != TYPE_PRECISION (TREE_TYPE (arg1)))
319 && new_arg0 != gimple_cond_lhs (cond_stmt)
320 && new_arg0 != gimple_cond_rhs (cond_stmt)
321 && gimple_bb (arg0_def_stmt) == e0->src)
323 gsi = gsi_for_stmt (arg0_def_stmt);
324 gsi_prev_nondebug (&gsi);
325 if (!gsi_end_p (gsi))
327 gimple *stmt = gsi_stmt (gsi);
328 /* Ignore nops, predicates and labels. */
329 if (gimple_code (stmt) == GIMPLE_NOP
330 || gimple_code (stmt) == GIMPLE_PREDICT
331 || gimple_code (stmt) == GIMPLE_LABEL)
333 else if (gassign *assign = dyn_cast <gassign *> (stmt))
335 tree lhs = gimple_assign_lhs (assign);
336 enum tree_code ass_code
337 = gimple_assign_rhs_code (assign);
338 if (ass_code != MAX_EXPR && ass_code != MIN_EXPR)
339 return NULL;
340 if (lhs != gimple_assign_rhs1 (arg0_def_stmt))
341 return NULL;
342 gsi_prev_nondebug (&gsi);
343 if (!gsi_end_p (gsi))
344 return NULL;
346 else
347 return NULL;
349 gsi = gsi_for_stmt (arg0_def_stmt);
350 gsi_next_nondebug (&gsi);
351 if (!gsi_end_p (gsi))
352 return NULL;
354 new_arg1 = fold_convert (TREE_TYPE (new_arg0), arg1);
356 /* Drop the overlow that fold_convert might add. */
357 if (TREE_OVERFLOW (new_arg1))
358 new_arg1 = drop_tree_overflow (new_arg1);
360 else
361 return NULL;
363 else
364 return NULL;
367 /* If arg0/arg1 have > 1 use, then this transformation actually increases
368 the number of expressions evaluated at runtime. */
369 if (!has_single_use (arg0)
370 || (arg1_def_stmt && !has_single_use (arg1)))
371 return NULL;
373 /* If types of new_arg0 and new_arg1 are different bailout. */
374 if (!types_compatible_p (TREE_TYPE (new_arg0), TREE_TYPE (new_arg1)))
375 return NULL;
377 /* Create a new PHI stmt. */
378 result = PHI_RESULT (phi);
379 temp = make_ssa_name (TREE_TYPE (new_arg0), NULL);
381 gimple_match_op new_op = arg0_op;
383 /* Create the operation stmt if possible and insert it. */
384 new_op.ops[0] = temp;
385 gimple_seq seq = NULL;
386 result = maybe_push_res_to_seq (&new_op, &seq, result);
388 /* If we can't create the new statement, release the temp name
389 and return back. */
390 if (!result)
392 release_ssa_name (temp);
393 return NULL;
396 gsi = gsi_after_labels (gimple_bb (phi));
397 gsi_insert_seq_before (&gsi, seq, GSI_CONTINUE_LINKING);
399 newphi = create_phi_node (temp, gimple_bb (phi));
401 if (dump_file && (dump_flags & TDF_DETAILS))
403 fprintf (dump_file, "PHI ");
404 print_generic_expr (dump_file, gimple_phi_result (phi));
405 fprintf (dump_file,
406 " changed to factor operation out from COND_EXPR.\n");
407 fprintf (dump_file, "New stmt with OPERATION that defines ");
408 print_generic_expr (dump_file, result);
409 fprintf (dump_file, ".\n");
412 /* Remove the old operation(s) that has single use. */
413 gsi_for_def = gsi_for_stmt (arg0_def_stmt);
414 gsi_remove (&gsi_for_def, true);
415 release_defs (arg0_def_stmt);
417 if (arg1_def_stmt)
419 gsi_for_def = gsi_for_stmt (arg1_def_stmt);
420 gsi_remove (&gsi_for_def, true);
421 release_defs (arg1_def_stmt);
424 add_phi_arg (newphi, new_arg0, e0, locus);
425 add_phi_arg (newphi, new_arg1, e1, locus);
427 /* Remove the original PHI stmt. */
428 gsi = gsi_for_stmt (phi);
429 gsi_remove (&gsi, true);
431 statistics_counter_event (cfun, "factored out operation", 1);
433 return newphi;
437 /* Return TRUE if SEQ/OP pair should be allowed during early phiopt.
438 Currently this is to allow MIN/MAX and ABS/NEGATE and constants. */
439 static bool
440 phiopt_early_allow (gimple_seq &seq, gimple_match_op &op)
442 /* Don't allow functions. */
443 if (!op.code.is_tree_code ())
444 return false;
445 tree_code code = (tree_code)op.code;
447 /* For non-empty sequence, only allow one statement
448 except for MIN/MAX, allow max 2 statements,
449 each with MIN/MAX. */
450 if (!gimple_seq_empty_p (seq))
452 if (code == MIN_EXPR || code == MAX_EXPR)
454 if (!gimple_seq_singleton_p (seq))
455 return false;
457 gimple *stmt = gimple_seq_first_stmt (seq);
458 /* Only allow assignments. */
459 if (!is_gimple_assign (stmt))
460 return false;
461 code = gimple_assign_rhs_code (stmt);
462 return code == MIN_EXPR || code == MAX_EXPR;
464 /* Check to make sure op was already a SSA_NAME. */
465 if (code != SSA_NAME)
466 return false;
467 if (!gimple_seq_singleton_p (seq))
468 return false;
469 gimple *stmt = gimple_seq_first_stmt (seq);
470 /* Only allow assignments. */
471 if (!is_gimple_assign (stmt))
472 return false;
473 if (gimple_assign_lhs (stmt) != op.ops[0])
474 return false;
475 code = gimple_assign_rhs_code (stmt);
478 switch (code)
480 case MIN_EXPR:
481 case MAX_EXPR:
482 case ABS_EXPR:
483 case ABSU_EXPR:
484 case NEGATE_EXPR:
485 case SSA_NAME:
486 return true;
487 case INTEGER_CST:
488 case REAL_CST:
489 case VECTOR_CST:
490 case FIXED_CST:
491 return true;
492 default:
493 return false;
497 /* gimple_simplify_phiopt is like gimple_simplify but designed for PHIOPT.
498 Return NULL if nothing can be simplified or the resulting simplified value
499 with parts pushed if EARLY_P was true. Also rejects non allowed tree code
500 if EARLY_P is set.
501 Takes the comparison from COMP_STMT and two args, ARG0 and ARG1 and tries
502 to simplify CMP ? ARG0 : ARG1.
503 Also try to simplify (!CMP) ? ARG1 : ARG0 if the non-inverse failed. */
504 static tree
505 gimple_simplify_phiopt (bool early_p, tree type, gimple *comp_stmt,
506 tree arg0, tree arg1,
507 gimple_seq *seq)
509 gimple_seq seq1 = NULL;
510 enum tree_code comp_code = gimple_cond_code (comp_stmt);
511 location_t loc = gimple_location (comp_stmt);
512 tree cmp0 = gimple_cond_lhs (comp_stmt);
513 tree cmp1 = gimple_cond_rhs (comp_stmt);
514 /* To handle special cases like floating point comparison, it is easier and
515 less error-prone to build a tree and gimplify it on the fly though it is
516 less efficient.
517 Don't use fold_build2 here as that might create (bool)a instead of just
518 "a != 0". */
519 tree cond = build2_loc (loc, comp_code, boolean_type_node,
520 cmp0, cmp1);
522 if (dump_file && (dump_flags & TDF_FOLDING))
524 fprintf (dump_file, "\nphiopt match-simplify trying:\n\t");
525 print_generic_expr (dump_file, cond);
526 fprintf (dump_file, " ? ");
527 print_generic_expr (dump_file, arg0);
528 fprintf (dump_file, " : ");
529 print_generic_expr (dump_file, arg1);
530 fprintf (dump_file, "\n");
533 gimple_match_op op (gimple_match_cond::UNCOND,
534 COND_EXPR, type, cond, arg0, arg1);
536 if (op.resimplify (&seq1, follow_all_ssa_edges))
538 bool allowed = !early_p || phiopt_early_allow (seq1, op);
539 tree result = maybe_push_res_to_seq (&op, &seq1);
540 if (dump_file && (dump_flags & TDF_FOLDING))
542 fprintf (dump_file, "\nphiopt match-simplify back:\n");
543 if (seq1)
544 print_gimple_seq (dump_file, seq1, 0, TDF_VOPS|TDF_MEMSYMS);
545 fprintf (dump_file, "result: ");
546 if (result)
547 print_generic_expr (dump_file, result);
548 else
549 fprintf (dump_file, " (none)");
550 fprintf (dump_file, "\n");
551 if (!allowed)
552 fprintf (dump_file, "rejected because early\n");
554 /* Early we want only to allow some generated tree codes. */
555 if (allowed && result)
557 if (loc != UNKNOWN_LOCATION)
558 annotate_all_with_location (seq1, loc);
559 gimple_seq_add_seq_without_update (seq, seq1);
560 return result;
563 gimple_seq_discard (seq1);
564 seq1 = NULL;
566 /* Try the inverted comparison, that is !COMP ? ARG1 : ARG0. */
567 comp_code = invert_tree_comparison (comp_code, HONOR_NANS (cmp0));
569 if (comp_code == ERROR_MARK)
570 return NULL;
572 cond = build2_loc (loc,
573 comp_code, boolean_type_node,
574 cmp0, cmp1);
576 if (dump_file && (dump_flags & TDF_FOLDING))
578 fprintf (dump_file, "\nphiopt match-simplify trying:\n\t");
579 print_generic_expr (dump_file, cond);
580 fprintf (dump_file, " ? ");
581 print_generic_expr (dump_file, arg1);
582 fprintf (dump_file, " : ");
583 print_generic_expr (dump_file, arg0);
584 fprintf (dump_file, "\n");
587 gimple_match_op op1 (gimple_match_cond::UNCOND,
588 COND_EXPR, type, cond, arg1, arg0);
590 if (op1.resimplify (&seq1, follow_all_ssa_edges))
592 bool allowed = !early_p || phiopt_early_allow (seq1, op1);
593 tree result = maybe_push_res_to_seq (&op1, &seq1);
594 if (dump_file && (dump_flags & TDF_FOLDING))
596 fprintf (dump_file, "\nphiopt match-simplify back:\n");
597 if (seq1)
598 print_gimple_seq (dump_file, seq1, 0, TDF_VOPS|TDF_MEMSYMS);
599 fprintf (dump_file, "result: ");
600 if (result)
601 print_generic_expr (dump_file, result);
602 else
603 fprintf (dump_file, " (none)");
604 fprintf (dump_file, "\n");
605 if (!allowed)
606 fprintf (dump_file, "rejected because early\n");
608 /* Early we want only to allow some generated tree codes. */
609 if (allowed && result)
611 if (loc != UNKNOWN_LOCATION)
612 annotate_all_with_location (seq1, loc);
613 gimple_seq_add_seq_without_update (seq, seq1);
614 return result;
617 gimple_seq_discard (seq1);
619 return NULL;
622 /* empty_bb_or_one_feeding_into_p returns true if bb was empty basic block
623 or it has one cheap preparation statement that feeds into the PHI
624 statement and it sets STMT to that statement. */
625 static bool
626 empty_bb_or_one_feeding_into_p (basic_block bb,
627 gimple *phi,
628 gimple *&stmt)
630 stmt = nullptr;
631 gimple *stmt_to_move = nullptr;
632 tree lhs;
634 if (empty_block_p (bb))
635 return true;
637 if (!single_pred_p (bb))
638 return false;
640 /* The middle bb cannot have phi nodes as we don't
641 move those assignments yet. */
642 if (!gimple_seq_empty_p (phi_nodes (bb)))
643 return false;
645 gimple_stmt_iterator gsi;
647 gsi = gsi_start_nondebug_after_labels_bb (bb);
648 while (!gsi_end_p (gsi))
650 gimple *s = gsi_stmt (gsi);
651 gsi_next_nondebug (&gsi);
652 /* Skip over Predict and nop statements. */
653 if (gimple_code (s) == GIMPLE_PREDICT
654 || gimple_code (s) == GIMPLE_NOP)
655 continue;
656 /* If there is more one statement return false. */
657 if (stmt_to_move)
658 return false;
659 stmt_to_move = s;
662 /* The only statement here was a Predict or a nop statement
663 so return true. */
664 if (!stmt_to_move)
665 return true;
667 if (gimple_vuse (stmt_to_move))
668 return false;
670 if (gimple_could_trap_p (stmt_to_move)
671 || gimple_has_side_effects (stmt_to_move))
672 return false;
674 ssa_op_iter it;
675 tree use;
676 FOR_EACH_SSA_TREE_OPERAND (use, stmt_to_move, it, SSA_OP_USE)
677 if (ssa_name_maybe_undef_p (use))
678 return false;
680 /* Allow assignments but allow some builtin/internal calls.
681 As const calls don't match any of the above, yet they could
682 still have some side-effects - they could contain
683 gimple_could_trap_p statements, like floating point
684 exceptions or integer division by zero. See PR70586.
685 FIXME: perhaps gimple_has_side_effects or gimple_could_trap_p
686 should handle this.
687 Allow some known builtin/internal calls that are known not to
688 trap: logical functions (e.g. bswap and bit counting). */
689 if (!is_gimple_assign (stmt_to_move))
691 if (!is_gimple_call (stmt_to_move))
692 return false;
693 combined_fn cfn = gimple_call_combined_fn (stmt_to_move);
694 switch (cfn)
696 default:
697 return false;
698 case CFN_BUILT_IN_BSWAP16:
699 case CFN_BUILT_IN_BSWAP32:
700 case CFN_BUILT_IN_BSWAP64:
701 case CFN_BUILT_IN_BSWAP128:
702 CASE_CFN_FFS:
703 CASE_CFN_PARITY:
704 CASE_CFN_POPCOUNT:
705 CASE_CFN_CLZ:
706 CASE_CFN_CTZ:
707 case CFN_BUILT_IN_CLRSB:
708 case CFN_BUILT_IN_CLRSBL:
709 case CFN_BUILT_IN_CLRSBLL:
710 lhs = gimple_call_lhs (stmt_to_move);
711 break;
714 else
715 lhs = gimple_assign_lhs (stmt_to_move);
717 gimple *use_stmt;
718 use_operand_p use_p;
720 /* Allow only a statement which feeds into the other stmt. */
721 if (!lhs || TREE_CODE (lhs) != SSA_NAME
722 || !single_imm_use (lhs, &use_p, &use_stmt)
723 || use_stmt != phi)
724 return false;
726 stmt = stmt_to_move;
727 return true;
730 /* Move STMT to before GSI and insert its defining
731 name into INSERTED_EXPRS bitmap. */
732 static void
733 move_stmt (gimple *stmt, gimple_stmt_iterator *gsi, auto_bitmap &inserted_exprs)
735 if (!stmt)
736 return;
737 if (dump_file && (dump_flags & TDF_DETAILS))
739 fprintf (dump_file, "statement un-sinked:\n");
740 print_gimple_stmt (dump_file, stmt, 0,
741 TDF_VOPS|TDF_MEMSYMS);
744 tree name = gimple_get_lhs (stmt);
745 // Mark the name to be renamed if there is one.
746 bitmap_set_bit (inserted_exprs, SSA_NAME_VERSION (name));
747 gimple_stmt_iterator gsi1 = gsi_for_stmt (stmt);
748 gsi_move_before (&gsi1, gsi);
749 reset_flow_sensitive_info (name);
752 /* RAII style class to temporarily remove flow sensitive
753 from ssa names defined by a gimple statement. */
754 class auto_flow_sensitive
756 public:
757 auto_flow_sensitive (gimple *s);
758 ~auto_flow_sensitive ();
759 private:
760 auto_vec<std::pair<tree, flow_sensitive_info_storage>, 2> stack;
763 /* Constructor for auto_flow_sensitive. Saves
764 off the ssa names' flow sensitive information
765 that was defined by gimple statement S and
766 resets it to be non-flow based ones. */
768 auto_flow_sensitive::auto_flow_sensitive (gimple *s)
770 if (!s)
771 return;
772 ssa_op_iter it;
773 tree def;
774 FOR_EACH_SSA_TREE_OPERAND (def, s, it, SSA_OP_DEF)
776 flow_sensitive_info_storage storage;
777 storage.save_and_clear (def);
778 stack.safe_push (std::make_pair (def, storage));
782 /* Deconstructor, restores the flow sensitive information
783 for the SSA names that had been saved off. */
785 auto_flow_sensitive::~auto_flow_sensitive ()
787 for (auto p : stack)
788 p.second.restore (p.first);
791 /* The function match_simplify_replacement does the main work of doing the
792 replacement using match and simplify. Return true if the replacement is done.
793 Otherwise return false.
794 BB is the basic block where the replacement is going to be done on. ARG0
795 is argument 0 from PHI. Likewise for ARG1. */
797 static bool
798 match_simplify_replacement (basic_block cond_bb, basic_block middle_bb,
799 basic_block middle_bb_alt,
800 edge e0, edge e1, gphi *phi,
801 tree arg0, tree arg1, bool early_p,
802 bool threeway_p)
804 gimple *stmt;
805 gimple_stmt_iterator gsi;
806 edge true_edge, false_edge;
807 gimple_seq seq = NULL;
808 tree result;
809 gimple *stmt_to_move = NULL;
810 gimple *stmt_to_move_alt = NULL;
811 tree arg_true, arg_false;
813 /* Special case A ? B : B as this will always simplify to B. */
814 if (operand_equal_for_phi_arg_p (arg0, arg1))
815 return false;
817 /* If the basic block only has a cheap preparation statement,
818 allow it and move it once the transformation is done. */
819 if (!empty_bb_or_one_feeding_into_p (middle_bb, phi, stmt_to_move))
820 return false;
822 if (threeway_p
823 && middle_bb != middle_bb_alt
824 && !empty_bb_or_one_feeding_into_p (middle_bb_alt, phi,
825 stmt_to_move_alt))
826 return false;
828 /* At this point we know we have a GIMPLE_COND with two successors.
829 One successor is BB, the other successor is an empty block which
830 falls through into BB.
832 There is a single PHI node at the join point (BB).
834 So, given the condition COND, and the two PHI arguments, match and simplify
835 can happen on (COND) ? arg0 : arg1. */
837 stmt = last_nondebug_stmt (cond_bb);
839 /* We need to know which is the true edge and which is the false
840 edge so that we know when to invert the condition below. */
841 extract_true_false_edges_from_block (cond_bb, &true_edge, &false_edge);
843 /* Forward the edges over the middle basic block. */
844 if (true_edge->dest == middle_bb)
845 true_edge = EDGE_SUCC (true_edge->dest, 0);
846 if (false_edge->dest == middle_bb)
847 false_edge = EDGE_SUCC (false_edge->dest, 0);
849 /* When THREEWAY_P then e1 will point to the edge of the final transition
850 from middle-bb to end. */
851 if (true_edge == e0)
853 if (!threeway_p)
854 gcc_assert (false_edge == e1);
855 arg_true = arg0;
856 arg_false = arg1;
858 else
860 gcc_assert (false_edge == e0);
861 if (!threeway_p)
862 gcc_assert (true_edge == e1);
863 arg_true = arg1;
864 arg_false = arg0;
867 /* Do not make conditional undefs unconditional. */
868 if ((TREE_CODE (arg_true) == SSA_NAME
869 && ssa_name_maybe_undef_p (arg_true))
870 || (TREE_CODE (arg_false) == SSA_NAME
871 && ssa_name_maybe_undef_p (arg_false)))
872 return false;
874 tree type = TREE_TYPE (gimple_phi_result (phi));
876 auto_flow_sensitive s1(stmt_to_move);
877 auto_flow_sensitive s_alt(stmt_to_move_alt);
879 result = gimple_simplify_phiopt (early_p, type, stmt,
880 arg_true, arg_false,
881 &seq);
884 if (!result)
885 return false;
886 if (dump_file && (dump_flags & TDF_FOLDING))
887 fprintf (dump_file, "accepted the phiopt match-simplify.\n");
889 auto_bitmap exprs_maybe_dce;
891 /* Mark the cond statements' lhs/rhs as maybe dce. */
892 if (TREE_CODE (gimple_cond_lhs (stmt)) == SSA_NAME
893 && !SSA_NAME_IS_DEFAULT_DEF (gimple_cond_lhs (stmt)))
894 bitmap_set_bit (exprs_maybe_dce,
895 SSA_NAME_VERSION (gimple_cond_lhs (stmt)));
896 if (TREE_CODE (gimple_cond_rhs (stmt)) == SSA_NAME
897 && !SSA_NAME_IS_DEFAULT_DEF (gimple_cond_rhs (stmt)))
898 bitmap_set_bit (exprs_maybe_dce,
899 SSA_NAME_VERSION (gimple_cond_rhs (stmt)));
901 gsi = gsi_last_bb (cond_bb);
902 /* Insert the sequence generated from gimple_simplify_phiopt. */
903 if (seq)
905 // Mark the lhs of the new statements maybe for dce
906 gimple_stmt_iterator gsi1 = gsi_start (seq);
907 for (; !gsi_end_p (gsi1); gsi_next (&gsi1))
909 gimple *stmt = gsi_stmt (gsi1);
910 tree name = gimple_get_lhs (stmt);
911 if (name && TREE_CODE (name) == SSA_NAME)
912 bitmap_set_bit (exprs_maybe_dce, SSA_NAME_VERSION (name));
914 gsi_insert_seq_before (&gsi, seq, GSI_CONTINUE_LINKING);
917 /* If there was a statement to move, move it to right before
918 the original conditional. */
919 move_stmt (stmt_to_move, &gsi, exprs_maybe_dce);
920 move_stmt (stmt_to_move_alt, &gsi, exprs_maybe_dce);
922 replace_phi_edge_with_variable (cond_bb, e1, phi, result, exprs_maybe_dce);
924 /* Add Statistic here even though replace_phi_edge_with_variable already
925 does it as we want to be able to count when match-simplify happens vs
926 the others. */
927 statistics_counter_event (cfun, "match-simplify PHI replacement", 1);
929 /* Note that we optimized this PHI. */
930 return true;
933 /* Update *ARG which is defined in STMT so that it contains the
934 computed value if that seems profitable. Return true if the
935 statement is made dead by that rewriting. */
937 static bool
938 jump_function_from_stmt (tree *arg, gimple *stmt)
940 enum tree_code code = gimple_assign_rhs_code (stmt);
941 if (code == ADDR_EXPR)
943 /* For arg = &p->i transform it to p, if possible. */
944 tree rhs1 = gimple_assign_rhs1 (stmt);
945 poly_int64 offset;
946 tree tem = get_addr_base_and_unit_offset (TREE_OPERAND (rhs1, 0),
947 &offset);
948 if (tem
949 && TREE_CODE (tem) == MEM_REF
950 && known_eq (mem_ref_offset (tem) + offset, 0))
952 *arg = TREE_OPERAND (tem, 0);
953 return true;
956 /* TODO: Much like IPA-CP jump-functions we want to handle constant
957 additions symbolically here, and we'd need to update the comparison
958 code that compares the arg + cst tuples in our caller. For now the
959 code above exactly handles the VEC_BASE pattern from vec.h. */
960 return false;
963 /* RHS is a source argument in a BIT_AND_EXPR which feeds a conditional
964 of the form SSA_NAME NE 0.
966 If RHS is fed by a simple EQ_EXPR comparison of two values, see if
967 the two input values of the EQ_EXPR match arg0 and arg1.
969 If so update *code and return TRUE. Otherwise return FALSE. */
971 static bool
972 rhs_is_fed_for_value_replacement (const_tree arg0, const_tree arg1,
973 enum tree_code *code, const_tree rhs)
975 /* Obviously if RHS is not an SSA_NAME, we can't look at the defining
976 statement. */
977 if (TREE_CODE (rhs) == SSA_NAME)
979 gimple *def1 = SSA_NAME_DEF_STMT (rhs);
981 /* Verify the defining statement has an EQ_EXPR on the RHS. */
982 if (is_gimple_assign (def1) && gimple_assign_rhs_code (def1) == EQ_EXPR)
984 /* Finally verify the source operands of the EQ_EXPR are equal
985 to arg0 and arg1. */
986 tree op0 = gimple_assign_rhs1 (def1);
987 tree op1 = gimple_assign_rhs2 (def1);
988 if ((operand_equal_for_phi_arg_p (arg0, op0)
989 && operand_equal_for_phi_arg_p (arg1, op1))
990 || (operand_equal_for_phi_arg_p (arg0, op1)
991 && operand_equal_for_phi_arg_p (arg1, op0)))
993 /* We will perform the optimization. */
994 *code = gimple_assign_rhs_code (def1);
995 return true;
999 return false;
1002 /* Return TRUE if arg0/arg1 are equal to the rhs/lhs or lhs/rhs of COND.
1004 Also return TRUE if arg0/arg1 are equal to the source arguments of a
1005 an EQ comparison feeding a BIT_AND_EXPR which feeds COND.
1007 Return FALSE otherwise. */
1009 static bool
1010 operand_equal_for_value_replacement (const_tree arg0, const_tree arg1,
1011 enum tree_code *code, gimple *cond)
1013 gimple *def;
1014 tree lhs = gimple_cond_lhs (cond);
1015 tree rhs = gimple_cond_rhs (cond);
1017 if ((operand_equal_for_phi_arg_p (arg0, lhs)
1018 && operand_equal_for_phi_arg_p (arg1, rhs))
1019 || (operand_equal_for_phi_arg_p (arg1, lhs)
1020 && operand_equal_for_phi_arg_p (arg0, rhs)))
1021 return true;
1023 /* Now handle more complex case where we have an EQ comparison
1024 which feeds a BIT_AND_EXPR which feeds COND.
1026 First verify that COND is of the form SSA_NAME NE 0. */
1027 if (*code != NE_EXPR || !integer_zerop (rhs)
1028 || TREE_CODE (lhs) != SSA_NAME)
1029 return false;
1031 /* Now ensure that SSA_NAME is set by a BIT_AND_EXPR. */
1032 def = SSA_NAME_DEF_STMT (lhs);
1033 if (!is_gimple_assign (def) || gimple_assign_rhs_code (def) != BIT_AND_EXPR)
1034 return false;
1036 /* Now verify arg0/arg1 correspond to the source arguments of an
1037 EQ comparison feeding the BIT_AND_EXPR. */
1039 tree tmp = gimple_assign_rhs1 (def);
1040 if (rhs_is_fed_for_value_replacement (arg0, arg1, code, tmp))
1041 return true;
1043 tmp = gimple_assign_rhs2 (def);
1044 if (rhs_is_fed_for_value_replacement (arg0, arg1, code, tmp))
1045 return true;
1047 return false;
1050 /* Returns true if ARG is a neutral element for operation CODE
1051 on the RIGHT side. */
1053 static bool
1054 neutral_element_p (tree_code code, tree arg, bool right)
1056 switch (code)
1058 case PLUS_EXPR:
1059 case BIT_IOR_EXPR:
1060 case BIT_XOR_EXPR:
1061 return integer_zerop (arg);
1063 case LROTATE_EXPR:
1064 case RROTATE_EXPR:
1065 case LSHIFT_EXPR:
1066 case RSHIFT_EXPR:
1067 case MINUS_EXPR:
1068 case POINTER_PLUS_EXPR:
1069 return right && integer_zerop (arg);
1071 case MULT_EXPR:
1072 return integer_onep (arg);
1074 case TRUNC_DIV_EXPR:
1075 case CEIL_DIV_EXPR:
1076 case FLOOR_DIV_EXPR:
1077 case ROUND_DIV_EXPR:
1078 case EXACT_DIV_EXPR:
1079 return right && integer_onep (arg);
1081 case BIT_AND_EXPR:
1082 return integer_all_onesp (arg);
1084 default:
1085 return false;
1089 /* Returns true if ARG is an absorbing element for operation CODE. */
1091 static bool
1092 absorbing_element_p (tree_code code, tree arg, bool right, tree rval)
1094 switch (code)
1096 case BIT_IOR_EXPR:
1097 return integer_all_onesp (arg);
1099 case MULT_EXPR:
1100 case BIT_AND_EXPR:
1101 return integer_zerop (arg);
1103 case LSHIFT_EXPR:
1104 case RSHIFT_EXPR:
1105 case LROTATE_EXPR:
1106 case RROTATE_EXPR:
1107 return !right && integer_zerop (arg);
1109 case TRUNC_DIV_EXPR:
1110 case CEIL_DIV_EXPR:
1111 case FLOOR_DIV_EXPR:
1112 case ROUND_DIV_EXPR:
1113 case EXACT_DIV_EXPR:
1114 case TRUNC_MOD_EXPR:
1115 case CEIL_MOD_EXPR:
1116 case FLOOR_MOD_EXPR:
1117 case ROUND_MOD_EXPR:
1118 return (!right
1119 && integer_zerop (arg)
1120 && tree_single_nonzero_warnv_p (rval, NULL));
1122 default:
1123 return false;
1127 /* The function value_replacement does the main work of doing the value
1128 replacement. Return non-zero if the replacement is done. Otherwise return
1129 0. If we remove the middle basic block, return 2.
1130 BB is the basic block where the replacement is going to be done on. ARG0
1131 is argument 0 from the PHI. Likewise for ARG1. */
1133 static int
1134 value_replacement (basic_block cond_bb, basic_block middle_bb,
1135 edge e0, edge e1, gphi *phi, tree arg0, tree arg1)
1137 gimple_stmt_iterator gsi;
1138 edge true_edge, false_edge;
1139 enum tree_code code;
1140 bool empty_or_with_defined_p = true;
1142 /* Virtual operands don't need to be handled. */
1143 if (virtual_operand_p (arg1))
1144 return 0;
1146 /* Special case A ? B : B as this will always simplify to B. */
1147 if (operand_equal_for_phi_arg_p (arg0, arg1))
1148 return 0;
1150 gcond *cond = as_a <gcond *> (*gsi_last_bb (cond_bb));
1151 code = gimple_cond_code (cond);
1153 /* This transformation is only valid for equality comparisons. */
1154 if (code != NE_EXPR && code != EQ_EXPR)
1155 return 0;
1157 /* Do not make conditional undefs unconditional. */
1158 if ((TREE_CODE (arg0) == SSA_NAME
1159 && ssa_name_maybe_undef_p (arg0))
1160 || (TREE_CODE (arg1) == SSA_NAME
1161 && ssa_name_maybe_undef_p (arg1)))
1162 return false;
1164 /* If the type says honor signed zeros we cannot do this
1165 optimization. */
1166 if (HONOR_SIGNED_ZEROS (arg1))
1167 return 0;
1169 /* If there is a statement in MIDDLE_BB that defines one of the PHI
1170 arguments, then adjust arg0 or arg1. */
1171 gsi = gsi_start_nondebug_after_labels_bb (middle_bb);
1172 while (!gsi_end_p (gsi))
1174 gimple *stmt = gsi_stmt (gsi);
1175 tree lhs;
1176 gsi_next_nondebug (&gsi);
1177 if (!is_gimple_assign (stmt))
1179 if (gimple_code (stmt) != GIMPLE_PREDICT
1180 && gimple_code (stmt) != GIMPLE_NOP)
1181 empty_or_with_defined_p = false;
1182 continue;
1184 /* Now try to adjust arg0 or arg1 according to the computation
1185 in the statement. */
1186 lhs = gimple_assign_lhs (stmt);
1187 if (!(lhs == arg0
1188 && jump_function_from_stmt (&arg0, stmt))
1189 || (lhs == arg1
1190 && jump_function_from_stmt (&arg1, stmt)))
1191 empty_or_with_defined_p = false;
1194 /* We need to know which is the true edge and which is the false
1195 edge so that we know if have abs or negative abs. */
1196 extract_true_false_edges_from_block (cond_bb, &true_edge, &false_edge);
1198 /* At this point we know we have a COND_EXPR with two successors.
1199 One successor is BB, the other successor is an empty block which
1200 falls through into BB.
1202 The condition for the COND_EXPR is known to be NE_EXPR or EQ_EXPR.
1204 There is a single PHI node at the join point (BB) with two arguments.
1206 We now need to verify that the two arguments in the PHI node match
1207 the two arguments to the equality comparison. */
1209 bool equal_p = operand_equal_for_value_replacement (arg0, arg1, &code, cond);
1210 bool maybe_equal_p = false;
1211 if (!equal_p
1212 && empty_or_with_defined_p
1213 && TREE_CODE (gimple_cond_rhs (cond)) == INTEGER_CST
1214 && (operand_equal_for_phi_arg_p (gimple_cond_lhs (cond), arg0)
1215 ? TREE_CODE (arg1) == INTEGER_CST
1216 : (operand_equal_for_phi_arg_p (gimple_cond_lhs (cond), arg1)
1217 && TREE_CODE (arg0) == INTEGER_CST)))
1218 maybe_equal_p = true;
1219 if (equal_p || maybe_equal_p)
1221 edge e;
1222 tree arg;
1224 /* For NE_EXPR, we want to build an assignment result = arg where
1225 arg is the PHI argument associated with the true edge. For
1226 EQ_EXPR we want the PHI argument associated with the false edge. */
1227 e = (code == NE_EXPR ? true_edge : false_edge);
1229 /* Unfortunately, E may not reach BB (it may instead have gone to
1230 OTHER_BLOCK). If that is the case, then we want the single outgoing
1231 edge from OTHER_BLOCK which reaches BB and represents the desired
1232 path from COND_BLOCK. */
1233 if (e->dest == middle_bb)
1234 e = single_succ_edge (e->dest);
1236 /* Now we know the incoming edge to BB that has the argument for the
1237 RHS of our new assignment statement. */
1238 if (e0 == e)
1239 arg = arg0;
1240 else
1241 arg = arg1;
1243 /* If the middle basic block was empty or is defining the
1244 PHI arguments and this is a single phi where the args are different
1245 for the edges e0 and e1 then we can remove the middle basic block. */
1246 if (empty_or_with_defined_p
1247 && single_non_singleton_phi_for_edges (phi_nodes (gimple_bb (phi)),
1248 e0, e1) == phi)
1250 use_operand_p use_p;
1251 gimple *use_stmt;
1253 /* Even if arg0/arg1 isn't equal to second operand of cond, we
1254 can optimize away the bb if we can prove it doesn't care whether
1255 phi result is arg0/arg1 or second operand of cond. Consider:
1256 <bb 2> [local count: 118111600]:
1257 if (i_2(D) == 4)
1258 goto <bb 4>; [97.00%]
1259 else
1260 goto <bb 3>; [3.00%]
1262 <bb 3> [local count: 3540129]:
1264 <bb 4> [local count: 118111600]:
1265 # i_6 = PHI <i_2(D)(3), 6(2)>
1266 _3 = i_6 != 0;
1267 Here, carg is 4, oarg is 6, crhs is 0, and because
1268 (4 != 0) == (6 != 0), we don't care if i_6 is 4 or 6, both
1269 have the same outcome. So, we can optimize this to:
1270 _3 = i_2(D) != 0;
1271 If the single imm use of phi result >, >=, < or <=, similarly
1272 we can check if both carg and oarg compare the same against
1273 crhs using ccode. */
1274 if (maybe_equal_p
1275 && TREE_CODE (arg) != INTEGER_CST
1276 && single_imm_use (gimple_phi_result (phi), &use_p, &use_stmt))
1278 enum tree_code ccode = ERROR_MARK;
1279 tree clhs = NULL_TREE, crhs = NULL_TREE;
1280 tree carg = gimple_cond_rhs (cond);
1281 tree oarg = e0 == e ? arg1 : arg0;
1282 if (is_gimple_assign (use_stmt)
1283 && (TREE_CODE_CLASS (gimple_assign_rhs_code (use_stmt))
1284 == tcc_comparison))
1286 ccode = gimple_assign_rhs_code (use_stmt);
1287 clhs = gimple_assign_rhs1 (use_stmt);
1288 crhs = gimple_assign_rhs2 (use_stmt);
1290 else if (gimple_code (use_stmt) == GIMPLE_COND)
1292 ccode = gimple_cond_code (use_stmt);
1293 clhs = gimple_cond_lhs (use_stmt);
1294 crhs = gimple_cond_rhs (use_stmt);
1296 if (ccode != ERROR_MARK
1297 && clhs == gimple_phi_result (phi)
1298 && TREE_CODE (crhs) == INTEGER_CST)
1299 switch (ccode)
1301 case EQ_EXPR:
1302 case NE_EXPR:
1303 if (!tree_int_cst_equal (crhs, carg)
1304 && !tree_int_cst_equal (crhs, oarg))
1305 equal_p = true;
1306 break;
1307 case GT_EXPR:
1308 if (tree_int_cst_lt (crhs, carg)
1309 == tree_int_cst_lt (crhs, oarg))
1310 equal_p = true;
1311 break;
1312 case GE_EXPR:
1313 if (tree_int_cst_le (crhs, carg)
1314 == tree_int_cst_le (crhs, oarg))
1315 equal_p = true;
1316 break;
1317 case LT_EXPR:
1318 if (tree_int_cst_lt (carg, crhs)
1319 == tree_int_cst_lt (oarg, crhs))
1320 equal_p = true;
1321 break;
1322 case LE_EXPR:
1323 if (tree_int_cst_le (carg, crhs)
1324 == tree_int_cst_le (oarg, crhs))
1325 equal_p = true;
1326 break;
1327 default:
1328 break;
1330 if (equal_p)
1332 tree phires = gimple_phi_result (phi);
1333 if (SSA_NAME_RANGE_INFO (phires))
1335 /* After the optimization PHI result can have value
1336 which it couldn't have previously. */
1337 value_range r (TREE_TYPE (phires));
1338 if (get_global_range_query ()->range_of_expr (r, phires,
1339 phi))
1341 value_range tmp (carg, carg);
1342 r.union_ (tmp);
1343 reset_flow_sensitive_info (phires);
1344 set_range_info (phires, r);
1346 else
1347 reset_flow_sensitive_info (phires);
1350 if (equal_p && MAY_HAVE_DEBUG_BIND_STMTS)
1352 imm_use_iterator imm_iter;
1353 tree phires = gimple_phi_result (phi);
1354 tree temp = NULL_TREE;
1355 bool reset_p = false;
1357 /* Add # DEBUG D#1 => arg != carg ? arg : oarg. */
1358 FOR_EACH_IMM_USE_STMT (use_stmt, imm_iter, phires)
1360 if (!is_gimple_debug (use_stmt))
1361 continue;
1362 if (temp == NULL_TREE)
1364 if (!single_pred_p (middle_bb)
1365 || EDGE_COUNT (gimple_bb (phi)->preds) != 2)
1367 /* But only if middle_bb has a single
1368 predecessor and phi bb has two, otherwise
1369 we could use a SSA_NAME not usable in that
1370 place or wrong-debug. */
1371 reset_p = true;
1372 break;
1374 gimple_stmt_iterator gsi
1375 = gsi_after_labels (gimple_bb (phi));
1376 tree type = TREE_TYPE (phires);
1377 temp = build_debug_expr_decl (type);
1378 tree t = build2 (NE_EXPR, boolean_type_node,
1379 arg, carg);
1380 t = build3 (COND_EXPR, type, t, arg, oarg);
1381 gimple *g = gimple_build_debug_bind (temp, t, phi);
1382 gsi_insert_before (&gsi, g, GSI_SAME_STMT);
1384 FOR_EACH_IMM_USE_ON_STMT (use_p, imm_iter)
1385 replace_exp (use_p, temp);
1386 update_stmt (use_stmt);
1388 if (reset_p)
1389 reset_debug_uses (phi);
1392 if (equal_p)
1394 replace_phi_edge_with_variable (cond_bb, e1, phi, arg);
1395 /* Note that we optimized this PHI. */
1396 return 2;
1399 else if (equal_p)
1401 if (!single_pred_p (middle_bb))
1402 return 0;
1403 statistics_counter_event (cfun, "Replace PHI with "
1404 "variable/value_replacement", 1);
1406 /* Replace the PHI arguments with arg. */
1407 SET_PHI_ARG_DEF (phi, e0->dest_idx, arg);
1408 SET_PHI_ARG_DEF (phi, e1->dest_idx, arg);
1409 if (dump_file && (dump_flags & TDF_DETAILS))
1411 fprintf (dump_file, "PHI ");
1412 print_generic_expr (dump_file, gimple_phi_result (phi));
1413 fprintf (dump_file, " reduced for COND_EXPR in block %d to ",
1414 cond_bb->index);
1415 print_generic_expr (dump_file, arg);
1416 fprintf (dump_file, ".\n");
1418 return 1;
1422 if (!single_pred_p (middle_bb))
1423 return 0;
1425 /* Now optimize (x != 0) ? x + y : y to just x + y. */
1426 gsi = gsi_last_nondebug_bb (middle_bb);
1427 if (gsi_end_p (gsi))
1428 return 0;
1430 gimple *assign = gsi_stmt (gsi);
1431 if (!is_gimple_assign (assign)
1432 || (!INTEGRAL_TYPE_P (TREE_TYPE (arg0))
1433 && !POINTER_TYPE_P (TREE_TYPE (arg0))))
1434 return 0;
1436 if (gimple_assign_rhs_class (assign) != GIMPLE_BINARY_RHS)
1438 /* If last stmt of the middle_bb is a conversion, handle it like
1439 a preparation statement through constant evaluation with
1440 checking for UB. */
1441 enum tree_code sc = gimple_assign_rhs_code (assign);
1442 if (CONVERT_EXPR_CODE_P (sc))
1443 assign = NULL;
1444 else
1445 return 0;
1448 /* Punt if there are (degenerate) PHIs in middle_bb, there should not be. */
1449 if (!gimple_seq_empty_p (phi_nodes (middle_bb)))
1450 return 0;
1452 /* Allow up to 2 cheap preparation statements that prepare argument
1453 for assign, e.g.:
1454 if (y_4 != 0)
1455 goto <bb 3>;
1456 else
1457 goto <bb 4>;
1458 <bb 3>:
1459 _1 = (int) y_4;
1460 iftmp.0_6 = x_5(D) r<< _1;
1461 <bb 4>:
1462 # iftmp.0_2 = PHI <iftmp.0_6(3), x_5(D)(2)>
1464 if (y_3(D) == 0)
1465 goto <bb 4>;
1466 else
1467 goto <bb 3>;
1468 <bb 3>:
1469 y_4 = y_3(D) & 31;
1470 _1 = (int) y_4;
1471 _6 = x_5(D) r<< _1;
1472 <bb 4>:
1473 # _2 = PHI <x_5(D)(2), _6(3)> */
1474 gimple *prep_stmt[2] = { NULL, NULL };
1475 int prep_cnt;
1476 for (prep_cnt = 0; ; prep_cnt++)
1478 if (prep_cnt || assign)
1479 gsi_prev_nondebug (&gsi);
1480 if (gsi_end_p (gsi))
1481 break;
1483 gimple *g = gsi_stmt (gsi);
1484 if (gimple_code (g) == GIMPLE_LABEL)
1485 break;
1487 if (prep_cnt == 2 || !is_gimple_assign (g))
1488 return 0;
1490 tree lhs = gimple_assign_lhs (g);
1491 tree rhs1 = gimple_assign_rhs1 (g);
1492 use_operand_p use_p;
1493 gimple *use_stmt;
1494 if (TREE_CODE (lhs) != SSA_NAME
1495 || TREE_CODE (rhs1) != SSA_NAME
1496 || !INTEGRAL_TYPE_P (TREE_TYPE (lhs))
1497 || !INTEGRAL_TYPE_P (TREE_TYPE (rhs1))
1498 || !single_imm_use (lhs, &use_p, &use_stmt)
1499 || ((prep_cnt || assign)
1500 && use_stmt != (prep_cnt ? prep_stmt[prep_cnt - 1] : assign)))
1501 return 0;
1502 switch (gimple_assign_rhs_code (g))
1504 CASE_CONVERT:
1505 break;
1506 case PLUS_EXPR:
1507 case BIT_AND_EXPR:
1508 case BIT_IOR_EXPR:
1509 case BIT_XOR_EXPR:
1510 if (TREE_CODE (gimple_assign_rhs2 (g)) != INTEGER_CST)
1511 return 0;
1512 break;
1513 default:
1514 return 0;
1516 prep_stmt[prep_cnt] = g;
1519 /* Only transform if it removes the condition. */
1520 if (!single_non_singleton_phi_for_edges (phi_nodes (gimple_bb (phi)), e0, e1))
1521 return 0;
1523 /* Size-wise, this is always profitable. */
1524 if (optimize_bb_for_speed_p (cond_bb)
1525 /* The special case is useless if it has a low probability. */
1526 && profile_status_for_fn (cfun) != PROFILE_ABSENT
1527 && EDGE_PRED (middle_bb, 0)->probability < profile_probability::even ()
1528 /* If assign is cheap, there is no point avoiding it. */
1529 && estimate_num_insns_seq (bb_seq (middle_bb), &eni_time_weights)
1530 >= 3 * estimate_num_insns (cond, &eni_time_weights))
1531 return 0;
1533 tree cond_lhs = gimple_cond_lhs (cond);
1534 tree cond_rhs = gimple_cond_rhs (cond);
1536 /* Propagate the cond_rhs constant through preparation stmts,
1537 make sure UB isn't invoked while doing that. */
1538 for (int i = prep_cnt - 1; i >= 0; --i)
1540 gimple *g = prep_stmt[i];
1541 tree grhs1 = gimple_assign_rhs1 (g);
1542 if (!operand_equal_for_phi_arg_p (cond_lhs, grhs1))
1543 return 0;
1544 cond_lhs = gimple_assign_lhs (g);
1545 cond_rhs = fold_convert (TREE_TYPE (grhs1), cond_rhs);
1546 if (TREE_CODE (cond_rhs) != INTEGER_CST
1547 || TREE_OVERFLOW (cond_rhs))
1548 return 0;
1549 if (gimple_assign_rhs_class (g) == GIMPLE_BINARY_RHS)
1551 cond_rhs = int_const_binop (gimple_assign_rhs_code (g), cond_rhs,
1552 gimple_assign_rhs2 (g));
1553 if (TREE_OVERFLOW (cond_rhs))
1554 return 0;
1556 cond_rhs = fold_convert (TREE_TYPE (cond_lhs), cond_rhs);
1557 if (TREE_CODE (cond_rhs) != INTEGER_CST
1558 || TREE_OVERFLOW (cond_rhs))
1559 return 0;
1562 tree lhs, rhs1, rhs2;
1563 enum tree_code code_def;
1564 if (assign)
1566 lhs = gimple_assign_lhs (assign);
1567 rhs1 = gimple_assign_rhs1 (assign);
1568 rhs2 = gimple_assign_rhs2 (assign);
1569 code_def = gimple_assign_rhs_code (assign);
1571 else
1573 gcc_assert (prep_cnt > 0);
1574 lhs = cond_lhs;
1575 rhs1 = NULL_TREE;
1576 rhs2 = NULL_TREE;
1577 code_def = ERROR_MARK;
1580 if (((code == NE_EXPR && e1 == false_edge)
1581 || (code == EQ_EXPR && e1 == true_edge))
1582 && arg0 == lhs
1583 && ((assign == NULL
1584 && operand_equal_for_phi_arg_p (arg1, cond_rhs))
1585 || (assign
1586 && arg1 == rhs1
1587 && operand_equal_for_phi_arg_p (rhs2, cond_lhs)
1588 && neutral_element_p (code_def, cond_rhs, true))
1589 || (assign
1590 && arg1 == rhs2
1591 && operand_equal_for_phi_arg_p (rhs1, cond_lhs)
1592 && neutral_element_p (code_def, cond_rhs, false))
1593 || (assign
1594 && operand_equal_for_phi_arg_p (arg1, cond_rhs)
1595 && ((operand_equal_for_phi_arg_p (rhs2, cond_lhs)
1596 && absorbing_element_p (code_def, cond_rhs, true, rhs2))
1597 || (operand_equal_for_phi_arg_p (rhs1, cond_lhs)
1598 && absorbing_element_p (code_def,
1599 cond_rhs, false, rhs2))))))
1601 gsi = gsi_for_stmt (cond);
1602 /* Moving ASSIGN might change VR of lhs, e.g. when moving u_6
1603 def-stmt in:
1604 if (n_5 != 0)
1605 goto <bb 3>;
1606 else
1607 goto <bb 4>;
1609 <bb 3>:
1610 # RANGE [0, 4294967294]
1611 u_6 = n_5 + 4294967295;
1613 <bb 4>:
1614 # u_3 = PHI <u_6(3), 4294967295(2)> */
1615 reset_flow_sensitive_info (lhs);
1616 gimple_stmt_iterator gsi_from;
1617 for (int i = prep_cnt - 1; i >= 0; --i)
1619 tree plhs = gimple_assign_lhs (prep_stmt[i]);
1620 reset_flow_sensitive_info (plhs);
1621 gsi_from = gsi_for_stmt (prep_stmt[i]);
1622 gsi_move_before (&gsi_from, &gsi);
1624 if (assign)
1626 gsi_from = gsi_for_stmt (assign);
1627 gsi_move_before (&gsi_from, &gsi);
1629 replace_phi_edge_with_variable (cond_bb, e1, phi, lhs);
1630 return 2;
1633 return 0;
1636 /* If VAR is an SSA_NAME that points to a BIT_NOT_EXPR then return the TREE for
1637 the value being inverted. */
1639 static tree
1640 strip_bit_not (tree var)
1642 if (TREE_CODE (var) != SSA_NAME)
1643 return NULL_TREE;
1645 gimple *assign = SSA_NAME_DEF_STMT (var);
1646 if (gimple_code (assign) != GIMPLE_ASSIGN)
1647 return NULL_TREE;
1649 if (gimple_assign_rhs_code (assign) != BIT_NOT_EXPR)
1650 return NULL_TREE;
1652 return gimple_assign_rhs1 (assign);
1655 /* Invert a MIN to a MAX or a MAX to a MIN expression CODE. */
1657 enum tree_code
1658 invert_minmax_code (enum tree_code code)
1660 switch (code) {
1661 case MIN_EXPR:
1662 return MAX_EXPR;
1663 case MAX_EXPR:
1664 return MIN_EXPR;
1665 default:
1666 gcc_unreachable ();
1670 /* The function minmax_replacement does the main work of doing the minmax
1671 replacement. Return true if the replacement is done. Otherwise return
1672 false.
1673 BB is the basic block where the replacement is going to be done on. ARG0
1674 is argument 0 from the PHI. Likewise for ARG1.
1676 If THREEWAY_P then expect the BB to be laid out in diamond shape with each
1677 BB containing only a MIN or MAX expression. */
1679 static bool
1680 minmax_replacement (basic_block cond_bb, basic_block middle_bb, basic_block alt_middle_bb,
1681 edge e0, edge e1, gphi *phi, tree arg0, tree arg1, bool threeway_p)
1683 tree result;
1684 edge true_edge, false_edge;
1685 enum tree_code minmax, ass_code;
1686 tree smaller, larger, arg_true, arg_false;
1687 gimple_stmt_iterator gsi, gsi_from;
1689 tree type = TREE_TYPE (PHI_RESULT (phi));
1691 gcond *cond = as_a <gcond *> (*gsi_last_bb (cond_bb));
1692 enum tree_code cmp = gimple_cond_code (cond);
1693 tree rhs = gimple_cond_rhs (cond);
1695 /* Turn EQ/NE of extreme values to order comparisons. */
1696 if ((cmp == NE_EXPR || cmp == EQ_EXPR)
1697 && TREE_CODE (rhs) == INTEGER_CST
1698 && INTEGRAL_TYPE_P (TREE_TYPE (rhs)))
1700 if (wi::eq_p (wi::to_wide (rhs), wi::min_value (TREE_TYPE (rhs))))
1702 cmp = (cmp == EQ_EXPR) ? LT_EXPR : GE_EXPR;
1703 rhs = wide_int_to_tree (TREE_TYPE (rhs),
1704 wi::min_value (TREE_TYPE (rhs)) + 1);
1706 else if (wi::eq_p (wi::to_wide (rhs), wi::max_value (TREE_TYPE (rhs))))
1708 cmp = (cmp == EQ_EXPR) ? GT_EXPR : LE_EXPR;
1709 rhs = wide_int_to_tree (TREE_TYPE (rhs),
1710 wi::max_value (TREE_TYPE (rhs)) - 1);
1714 /* This transformation is only valid for order comparisons. Record which
1715 operand is smaller/larger if the result of the comparison is true. */
1716 tree alt_smaller = NULL_TREE;
1717 tree alt_larger = NULL_TREE;
1718 if (cmp == LT_EXPR || cmp == LE_EXPR)
1720 smaller = gimple_cond_lhs (cond);
1721 larger = rhs;
1722 /* If we have smaller < CST it is equivalent to smaller <= CST-1.
1723 Likewise smaller <= CST is equivalent to smaller < CST+1. */
1724 if (TREE_CODE (larger) == INTEGER_CST
1725 && INTEGRAL_TYPE_P (TREE_TYPE (larger)))
1727 if (cmp == LT_EXPR)
1729 wi::overflow_type overflow;
1730 wide_int alt = wi::sub (wi::to_wide (larger), 1,
1731 TYPE_SIGN (TREE_TYPE (larger)),
1732 &overflow);
1733 if (! overflow)
1734 alt_larger = wide_int_to_tree (TREE_TYPE (larger), alt);
1736 else
1738 wi::overflow_type overflow;
1739 wide_int alt = wi::add (wi::to_wide (larger), 1,
1740 TYPE_SIGN (TREE_TYPE (larger)),
1741 &overflow);
1742 if (! overflow)
1743 alt_larger = wide_int_to_tree (TREE_TYPE (larger), alt);
1747 else if (cmp == GT_EXPR || cmp == GE_EXPR)
1749 smaller = rhs;
1750 larger = gimple_cond_lhs (cond);
1751 /* If we have larger > CST it is equivalent to larger >= CST+1.
1752 Likewise larger >= CST is equivalent to larger > CST-1. */
1753 if (TREE_CODE (smaller) == INTEGER_CST
1754 && INTEGRAL_TYPE_P (TREE_TYPE (smaller)))
1756 wi::overflow_type overflow;
1757 if (cmp == GT_EXPR)
1759 wide_int alt = wi::add (wi::to_wide (smaller), 1,
1760 TYPE_SIGN (TREE_TYPE (smaller)),
1761 &overflow);
1762 if (! overflow)
1763 alt_smaller = wide_int_to_tree (TREE_TYPE (smaller), alt);
1765 else
1767 wide_int alt = wi::sub (wi::to_wide (smaller), 1,
1768 TYPE_SIGN (TREE_TYPE (smaller)),
1769 &overflow);
1770 if (! overflow)
1771 alt_smaller = wide_int_to_tree (TREE_TYPE (smaller), alt);
1775 else
1776 return false;
1778 /* Handle the special case of (signed_type)x < 0 being equivalent
1779 to x > MAX_VAL(signed_type) and (signed_type)x >= 0 equivalent
1780 to x <= MAX_VAL(signed_type). */
1781 if ((cmp == GE_EXPR || cmp == LT_EXPR)
1782 && INTEGRAL_TYPE_P (type)
1783 && TYPE_UNSIGNED (type)
1784 && integer_zerop (rhs))
1786 tree op = gimple_cond_lhs (cond);
1787 if (TREE_CODE (op) == SSA_NAME
1788 && INTEGRAL_TYPE_P (TREE_TYPE (op))
1789 && !TYPE_UNSIGNED (TREE_TYPE (op)))
1791 gimple *def_stmt = SSA_NAME_DEF_STMT (op);
1792 if (gimple_assign_cast_p (def_stmt))
1794 tree op1 = gimple_assign_rhs1 (def_stmt);
1795 if (INTEGRAL_TYPE_P (TREE_TYPE (op1))
1796 && TYPE_UNSIGNED (TREE_TYPE (op1))
1797 && (TYPE_PRECISION (TREE_TYPE (op))
1798 == TYPE_PRECISION (TREE_TYPE (op1)))
1799 && useless_type_conversion_p (type, TREE_TYPE (op1)))
1801 wide_int w1 = wi::max_value (TREE_TYPE (op));
1802 wide_int w2 = wi::add (w1, 1);
1803 if (cmp == LT_EXPR)
1805 larger = op1;
1806 smaller = wide_int_to_tree (TREE_TYPE (op1), w1);
1807 alt_smaller = wide_int_to_tree (TREE_TYPE (op1), w2);
1808 alt_larger = NULL_TREE;
1810 else
1812 smaller = op1;
1813 larger = wide_int_to_tree (TREE_TYPE (op1), w1);
1814 alt_larger = wide_int_to_tree (TREE_TYPE (op1), w2);
1815 alt_smaller = NULL_TREE;
1822 /* We need to know which is the true edge and which is the false
1823 edge so that we know if have abs or negative abs. */
1824 extract_true_false_edges_from_block (cond_bb, &true_edge, &false_edge);
1826 /* Forward the edges over the middle basic block. */
1827 if (true_edge->dest == middle_bb)
1828 true_edge = EDGE_SUCC (true_edge->dest, 0);
1829 if (false_edge->dest == middle_bb)
1830 false_edge = EDGE_SUCC (false_edge->dest, 0);
1832 /* When THREEWAY_P then e1 will point to the edge of the final transition
1833 from middle-bb to end. */
1834 if (true_edge == e0)
1836 if (!threeway_p)
1837 gcc_assert (false_edge == e1);
1838 arg_true = arg0;
1839 arg_false = arg1;
1841 else
1843 gcc_assert (false_edge == e0);
1844 if (!threeway_p)
1845 gcc_assert (true_edge == e1);
1846 arg_true = arg1;
1847 arg_false = arg0;
1850 if (empty_block_p (middle_bb)
1851 && (!threeway_p
1852 || empty_block_p (alt_middle_bb)))
1854 if ((operand_equal_for_phi_arg_p (arg_true, smaller)
1855 || (alt_smaller
1856 && operand_equal_for_phi_arg_p (arg_true, alt_smaller)))
1857 && (operand_equal_for_phi_arg_p (arg_false, larger)
1858 || (alt_larger
1859 && operand_equal_for_phi_arg_p (arg_true, alt_larger))))
1861 /* Case
1863 if (smaller < larger)
1864 rslt = smaller;
1865 else
1866 rslt = larger; */
1867 minmax = MIN_EXPR;
1869 else if ((operand_equal_for_phi_arg_p (arg_false, smaller)
1870 || (alt_smaller
1871 && operand_equal_for_phi_arg_p (arg_false, alt_smaller)))
1872 && (operand_equal_for_phi_arg_p (arg_true, larger)
1873 || (alt_larger
1874 && operand_equal_for_phi_arg_p (arg_true, alt_larger))))
1875 minmax = MAX_EXPR;
1876 else
1877 return false;
1879 else if (HONOR_NANS (type) || HONOR_SIGNED_ZEROS (type))
1880 /* The optimization may be unsafe due to NaNs. */
1881 return false;
1882 else if (middle_bb != alt_middle_bb && threeway_p)
1884 /* Recognize the following case:
1886 if (smaller < larger)
1887 a = MIN (smaller, c);
1888 else
1889 b = MIN (larger, c);
1890 x = PHI <a, b>
1892 This is equivalent to
1894 a = MIN (smaller, c);
1895 x = MIN (larger, a); */
1897 gimple *assign = last_and_only_stmt (middle_bb);
1898 tree lhs, op0, op1, bound;
1899 tree alt_lhs, alt_op0, alt_op1;
1900 bool invert = false;
1902 /* When THREEWAY_P then e1 will point to the edge of the final transition
1903 from middle-bb to end. */
1904 if (true_edge == e0)
1905 gcc_assert (false_edge == EDGE_PRED (e1->src, 0));
1906 else
1907 gcc_assert (true_edge == EDGE_PRED (e1->src, 0));
1909 bool valid_minmax_p = false;
1910 gimple_stmt_iterator it1
1911 = gsi_start_nondebug_after_labels_bb (middle_bb);
1912 gimple_stmt_iterator it2
1913 = gsi_start_nondebug_after_labels_bb (alt_middle_bb);
1914 if (gsi_one_nondebug_before_end_p (it1)
1915 && gsi_one_nondebug_before_end_p (it2))
1917 gimple *stmt1 = gsi_stmt (it1);
1918 gimple *stmt2 = gsi_stmt (it2);
1919 if (is_gimple_assign (stmt1) && is_gimple_assign (stmt2))
1921 enum tree_code code1 = gimple_assign_rhs_code (stmt1);
1922 enum tree_code code2 = gimple_assign_rhs_code (stmt2);
1923 valid_minmax_p = (code1 == MIN_EXPR || code1 == MAX_EXPR)
1924 && (code2 == MIN_EXPR || code2 == MAX_EXPR);
1928 if (!valid_minmax_p)
1929 return false;
1931 if (!assign
1932 || gimple_code (assign) != GIMPLE_ASSIGN)
1933 return false;
1935 /* There cannot be any phi nodes in the middle bb. */
1936 if (!gimple_seq_empty_p (phi_nodes (middle_bb)))
1937 return false;
1939 lhs = gimple_assign_lhs (assign);
1940 ass_code = gimple_assign_rhs_code (assign);
1941 if (ass_code != MAX_EXPR && ass_code != MIN_EXPR)
1942 return false;
1944 op0 = gimple_assign_rhs1 (assign);
1945 op1 = gimple_assign_rhs2 (assign);
1947 assign = last_and_only_stmt (alt_middle_bb);
1948 if (!assign
1949 || gimple_code (assign) != GIMPLE_ASSIGN)
1950 return false;
1952 /* There cannot be any phi nodes in the alt middle bb. */
1953 if (!gimple_seq_empty_p (phi_nodes (alt_middle_bb)))
1954 return false;
1956 alt_lhs = gimple_assign_lhs (assign);
1957 if (ass_code != gimple_assign_rhs_code (assign))
1958 return false;
1960 if (!operand_equal_for_phi_arg_p (lhs, arg_true)
1961 || !operand_equal_for_phi_arg_p (alt_lhs, arg_false))
1962 return false;
1964 alt_op0 = gimple_assign_rhs1 (assign);
1965 alt_op1 = gimple_assign_rhs2 (assign);
1967 if ((operand_equal_for_phi_arg_p (op0, smaller)
1968 || (alt_smaller
1969 && operand_equal_for_phi_arg_p (op0, alt_smaller)))
1970 && (operand_equal_for_phi_arg_p (alt_op0, larger)
1971 || (alt_larger
1972 && operand_equal_for_phi_arg_p (alt_op0, alt_larger))))
1974 /* We got here if the condition is true, i.e., SMALLER < LARGER. */
1975 if (!operand_equal_for_phi_arg_p (op1, alt_op1))
1976 return false;
1978 if ((arg0 = strip_bit_not (op0)) != NULL
1979 && (arg1 = strip_bit_not (alt_op0)) != NULL
1980 && (bound = strip_bit_not (op1)) != NULL)
1982 minmax = MAX_EXPR;
1983 ass_code = invert_minmax_code (ass_code);
1984 invert = true;
1986 else
1988 bound = op1;
1989 minmax = MIN_EXPR;
1990 arg0 = op0;
1991 arg1 = alt_op0;
1994 else if ((operand_equal_for_phi_arg_p (op0, larger)
1995 || (alt_larger
1996 && operand_equal_for_phi_arg_p (op0, alt_larger)))
1997 && (operand_equal_for_phi_arg_p (alt_op0, smaller)
1998 || (alt_smaller
1999 && operand_equal_for_phi_arg_p (alt_op0, alt_smaller))))
2001 /* We got here if the condition is true, i.e., SMALLER > LARGER. */
2002 if (!operand_equal_for_phi_arg_p (op1, alt_op1))
2003 return false;
2005 if ((arg0 = strip_bit_not (op0)) != NULL
2006 && (arg1 = strip_bit_not (alt_op0)) != NULL
2007 && (bound = strip_bit_not (op1)) != NULL)
2009 minmax = MIN_EXPR;
2010 ass_code = invert_minmax_code (ass_code);
2011 invert = true;
2013 else
2015 bound = op1;
2016 minmax = MAX_EXPR;
2017 arg0 = op0;
2018 arg1 = alt_op0;
2021 else
2022 return false;
2024 /* Emit the statement to compute min/max. */
2025 location_t locus = gimple_location (last_nondebug_stmt (cond_bb));
2026 gimple_seq stmts = NULL;
2027 tree phi_result = PHI_RESULT (phi);
2028 result = gimple_build (&stmts, locus, minmax, TREE_TYPE (phi_result),
2029 arg0, arg1);
2030 result = gimple_build (&stmts, locus, ass_code, TREE_TYPE (phi_result),
2031 result, bound);
2032 if (invert)
2033 result = gimple_build (&stmts, locus, BIT_NOT_EXPR, TREE_TYPE (phi_result),
2034 result);
2036 gsi = gsi_last_bb (cond_bb);
2037 gsi_insert_seq_before (&gsi, stmts, GSI_NEW_STMT);
2039 replace_phi_edge_with_variable (cond_bb, e1, phi, result);
2041 return true;
2043 else if (!threeway_p
2044 || empty_block_p (alt_middle_bb))
2046 /* Recognize the following case, assuming d <= u:
2048 if (a <= u)
2049 b = MAX (a, d);
2050 x = PHI <b, u>
2052 This is equivalent to
2054 b = MAX (a, d);
2055 x = MIN (b, u); */
2057 gimple *assign = last_and_only_stmt (middle_bb);
2058 tree lhs, op0, op1, bound;
2060 if (!single_pred_p (middle_bb))
2061 return false;
2063 if (!assign
2064 || gimple_code (assign) != GIMPLE_ASSIGN)
2065 return false;
2067 /* There cannot be any phi nodes in the middle bb. */
2068 if (!gimple_seq_empty_p (phi_nodes (middle_bb)))
2069 return false;
2071 lhs = gimple_assign_lhs (assign);
2072 ass_code = gimple_assign_rhs_code (assign);
2073 if (ass_code != MAX_EXPR && ass_code != MIN_EXPR)
2074 return false;
2075 op0 = gimple_assign_rhs1 (assign);
2076 op1 = gimple_assign_rhs2 (assign);
2078 if (true_edge->src == middle_bb)
2080 /* We got here if the condition is true, i.e., SMALLER < LARGER. */
2081 if (!operand_equal_for_phi_arg_p (lhs, arg_true))
2082 return false;
2084 if (operand_equal_for_phi_arg_p (arg_false, larger)
2085 || (alt_larger
2086 && operand_equal_for_phi_arg_p (arg_false, alt_larger)))
2088 /* Case
2090 if (smaller < larger)
2092 r' = MAX_EXPR (smaller, bound)
2094 r = PHI <r', larger> --> to be turned to MIN_EXPR. */
2095 if (ass_code != MAX_EXPR)
2096 return false;
2098 minmax = MIN_EXPR;
2099 if (operand_equal_for_phi_arg_p (op0, smaller)
2100 || (alt_smaller
2101 && operand_equal_for_phi_arg_p (op0, alt_smaller)))
2102 bound = op1;
2103 else if (operand_equal_for_phi_arg_p (op1, smaller)
2104 || (alt_smaller
2105 && operand_equal_for_phi_arg_p (op1, alt_smaller)))
2106 bound = op0;
2107 else
2108 return false;
2110 /* We need BOUND <= LARGER. */
2111 if (!integer_nonzerop (fold_build2 (LE_EXPR, boolean_type_node,
2112 bound, arg_false)))
2113 return false;
2115 else if (operand_equal_for_phi_arg_p (arg_false, smaller)
2116 || (alt_smaller
2117 && operand_equal_for_phi_arg_p (arg_false, alt_smaller)))
2119 /* Case
2121 if (smaller < larger)
2123 r' = MIN_EXPR (larger, bound)
2125 r = PHI <r', smaller> --> to be turned to MAX_EXPR. */
2126 if (ass_code != MIN_EXPR)
2127 return false;
2129 minmax = MAX_EXPR;
2130 if (operand_equal_for_phi_arg_p (op0, larger)
2131 || (alt_larger
2132 && operand_equal_for_phi_arg_p (op0, alt_larger)))
2133 bound = op1;
2134 else if (operand_equal_for_phi_arg_p (op1, larger)
2135 || (alt_larger
2136 && operand_equal_for_phi_arg_p (op1, alt_larger)))
2137 bound = op0;
2138 else
2139 return false;
2141 /* We need BOUND >= SMALLER. */
2142 if (!integer_nonzerop (fold_build2 (GE_EXPR, boolean_type_node,
2143 bound, arg_false)))
2144 return false;
2146 else
2147 return false;
2149 else
2151 /* We got here if the condition is false, i.e., SMALLER > LARGER. */
2152 if (!operand_equal_for_phi_arg_p (lhs, arg_false))
2153 return false;
2155 if (operand_equal_for_phi_arg_p (arg_true, larger)
2156 || (alt_larger
2157 && operand_equal_for_phi_arg_p (arg_true, alt_larger)))
2159 /* Case
2161 if (smaller > larger)
2163 r' = MIN_EXPR (smaller, bound)
2165 r = PHI <r', larger> --> to be turned to MAX_EXPR. */
2166 if (ass_code != MIN_EXPR)
2167 return false;
2169 minmax = MAX_EXPR;
2170 if (operand_equal_for_phi_arg_p (op0, smaller)
2171 || (alt_smaller
2172 && operand_equal_for_phi_arg_p (op0, alt_smaller)))
2173 bound = op1;
2174 else if (operand_equal_for_phi_arg_p (op1, smaller)
2175 || (alt_smaller
2176 && operand_equal_for_phi_arg_p (op1, alt_smaller)))
2177 bound = op0;
2178 else
2179 return false;
2181 /* We need BOUND >= LARGER. */
2182 if (!integer_nonzerop (fold_build2 (GE_EXPR, boolean_type_node,
2183 bound, arg_true)))
2184 return false;
2186 else if (operand_equal_for_phi_arg_p (arg_true, smaller)
2187 || (alt_smaller
2188 && operand_equal_for_phi_arg_p (arg_true, alt_smaller)))
2190 /* Case
2192 if (smaller > larger)
2194 r' = MAX_EXPR (larger, bound)
2196 r = PHI <r', smaller> --> to be turned to MIN_EXPR. */
2197 if (ass_code != MAX_EXPR)
2198 return false;
2200 minmax = MIN_EXPR;
2201 if (operand_equal_for_phi_arg_p (op0, larger))
2202 bound = op1;
2203 else if (operand_equal_for_phi_arg_p (op1, larger))
2204 bound = op0;
2205 else
2206 return false;
2208 /* We need BOUND <= SMALLER. */
2209 if (!integer_nonzerop (fold_build2 (LE_EXPR, boolean_type_node,
2210 bound, arg_true)))
2211 return false;
2213 else
2214 return false;
2217 /* Move the statement from the middle block. */
2218 gsi = gsi_last_bb (cond_bb);
2219 gsi_from = gsi_last_nondebug_bb (middle_bb);
2220 reset_flow_sensitive_info (SINGLE_SSA_TREE_OPERAND (gsi_stmt (gsi_from),
2221 SSA_OP_DEF));
2222 gsi_move_before (&gsi_from, &gsi);
2224 else
2225 return false;
2227 /* Emit the statement to compute min/max. */
2228 gimple_seq stmts = NULL;
2229 tree phi_result = PHI_RESULT (phi);
2231 /* When we can't use a MIN/MAX_EXPR still make sure the expression
2232 stays in a form to be recognized by ISA that map to IEEE x > y ? x : y
2233 semantics (that's not IEEE max semantics). */
2234 if (HONOR_NANS (type) || HONOR_SIGNED_ZEROS (type))
2236 result = gimple_build (&stmts, cmp, boolean_type_node,
2237 gimple_cond_lhs (cond), rhs);
2238 result = gimple_build (&stmts, COND_EXPR, TREE_TYPE (phi_result),
2239 result, arg_true, arg_false);
2241 else
2242 result = gimple_build (&stmts, minmax, TREE_TYPE (phi_result), arg0, arg1);
2244 gsi = gsi_last_bb (cond_bb);
2245 gsi_insert_seq_before (&gsi, stmts, GSI_NEW_STMT);
2247 replace_phi_edge_with_variable (cond_bb, e1, phi, result);
2249 return true;
2252 /* Attempt to optimize (x <=> y) cmp 0 and similar comparisons.
2253 For strong ordering <=> try to match something like:
2254 <bb 2> : // cond3_bb (== cond2_bb)
2255 if (x_4(D) != y_5(D))
2256 goto <bb 3>; [INV]
2257 else
2258 goto <bb 6>; [INV]
2260 <bb 3> : // cond_bb
2261 if (x_4(D) < y_5(D))
2262 goto <bb 6>; [INV]
2263 else
2264 goto <bb 4>; [INV]
2266 <bb 4> : // middle_bb
2268 <bb 6> : // phi_bb
2269 # iftmp.0_2 = PHI <1(4), 0(2), -1(3)>
2270 _1 = iftmp.0_2 == 0;
2272 and for partial ordering <=> something like:
2274 <bb 2> : // cond3_bb
2275 if (a_3(D) == b_5(D))
2276 goto <bb 6>; [50.00%]
2277 else
2278 goto <bb 3>; [50.00%]
2280 <bb 3> [local count: 536870913]: // cond2_bb
2281 if (a_3(D) < b_5(D))
2282 goto <bb 6>; [50.00%]
2283 else
2284 goto <bb 4>; [50.00%]
2286 <bb 4> [local count: 268435456]: // cond_bb
2287 if (a_3(D) > b_5(D))
2288 goto <bb 6>; [50.00%]
2289 else
2290 goto <bb 5>; [50.00%]
2292 <bb 5> [local count: 134217728]: // middle_bb
2294 <bb 6> [local count: 1073741824]: // phi_bb
2295 # SR.27_4 = PHI <0(2), -1(3), 1(4), 2(5)>
2296 _2 = SR.27_4 > 0; */
2298 static bool
2299 spaceship_replacement (basic_block cond_bb, basic_block middle_bb,
2300 edge e0, edge e1, gphi *phi,
2301 tree arg0, tree arg1)
2303 tree phires = PHI_RESULT (phi);
2304 if (!INTEGRAL_TYPE_P (TREE_TYPE (phires))
2305 || TYPE_UNSIGNED (TREE_TYPE (phires))
2306 || !tree_fits_shwi_p (arg0)
2307 || !tree_fits_shwi_p (arg1)
2308 || !IN_RANGE (tree_to_shwi (arg0), -1, 2)
2309 || !IN_RANGE (tree_to_shwi (arg1), -1, 2))
2310 return false;
2312 basic_block phi_bb = gimple_bb (phi);
2313 gcc_assert (phi_bb == e0->dest && phi_bb == e1->dest);
2314 if (!IN_RANGE (EDGE_COUNT (phi_bb->preds), 3, 4))
2315 return false;
2317 use_operand_p use_p;
2318 gimple *use_stmt;
2319 if (SSA_NAME_OCCURS_IN_ABNORMAL_PHI (phires))
2320 return false;
2321 if (!single_imm_use (phires, &use_p, &use_stmt))
2322 return false;
2323 enum tree_code cmp;
2324 tree lhs, rhs;
2325 gimple *orig_use_stmt = use_stmt;
2326 tree orig_use_lhs = NULL_TREE;
2327 int prec = TYPE_PRECISION (TREE_TYPE (phires));
2328 bool is_cast = false;
2330 /* Deal with the case when match.pd has rewritten the (res & ~1) == 0
2331 into res <= 1 and has left a type-cast for signed types. */
2332 if (gimple_assign_cast_p (use_stmt))
2334 orig_use_lhs = gimple_assign_lhs (use_stmt);
2335 /* match.pd would have only done this for a signed type,
2336 so the conversion must be to an unsigned one. */
2337 tree ty1 = TREE_TYPE (gimple_assign_rhs1 (use_stmt));
2338 tree ty2 = TREE_TYPE (orig_use_lhs);
2340 if (!TYPE_UNSIGNED (ty2) || !INTEGRAL_TYPE_P (ty2))
2341 return false;
2342 if (TYPE_PRECISION (ty1) > TYPE_PRECISION (ty2))
2343 return false;
2344 if (SSA_NAME_OCCURS_IN_ABNORMAL_PHI (orig_use_lhs))
2345 return false;
2346 if (!single_imm_use (orig_use_lhs, &use_p, &use_stmt))
2347 return false;
2349 is_cast = true;
2351 else if (is_gimple_assign (use_stmt)
2352 && gimple_assign_rhs_code (use_stmt) == BIT_AND_EXPR
2353 && TREE_CODE (gimple_assign_rhs2 (use_stmt)) == INTEGER_CST
2354 && (wi::to_wide (gimple_assign_rhs2 (use_stmt))
2355 == wi::shifted_mask (1, prec - 1, false, prec)))
2357 /* For partial_ordering result operator>= with unspec as second
2358 argument is (res & 1) == res, folded by match.pd into
2359 (res & ~1) == 0. */
2360 orig_use_lhs = gimple_assign_lhs (use_stmt);
2361 if (SSA_NAME_OCCURS_IN_ABNORMAL_PHI (orig_use_lhs))
2362 return false;
2363 if (!single_imm_use (orig_use_lhs, &use_p, &use_stmt))
2364 return false;
2366 if (gimple_code (use_stmt) == GIMPLE_COND)
2368 cmp = gimple_cond_code (use_stmt);
2369 lhs = gimple_cond_lhs (use_stmt);
2370 rhs = gimple_cond_rhs (use_stmt);
2372 else if (is_gimple_assign (use_stmt))
2374 if (gimple_assign_rhs_class (use_stmt) == GIMPLE_BINARY_RHS)
2376 cmp = gimple_assign_rhs_code (use_stmt);
2377 lhs = gimple_assign_rhs1 (use_stmt);
2378 rhs = gimple_assign_rhs2 (use_stmt);
2380 else if (gimple_assign_rhs_code (use_stmt) == COND_EXPR)
2382 tree cond = gimple_assign_rhs1 (use_stmt);
2383 if (!COMPARISON_CLASS_P (cond))
2384 return false;
2385 cmp = TREE_CODE (cond);
2386 lhs = TREE_OPERAND (cond, 0);
2387 rhs = TREE_OPERAND (cond, 1);
2389 else
2390 return false;
2392 else
2393 return false;
2394 switch (cmp)
2396 case EQ_EXPR:
2397 case NE_EXPR:
2398 case LT_EXPR:
2399 case GT_EXPR:
2400 case LE_EXPR:
2401 case GE_EXPR:
2402 break;
2403 default:
2404 return false;
2406 if (lhs != (orig_use_lhs ? orig_use_lhs : phires)
2407 || !tree_fits_shwi_p (rhs)
2408 || !IN_RANGE (tree_to_shwi (rhs), -1, 1))
2409 return false;
2411 if (is_cast)
2413 if (TREE_CODE (rhs) != INTEGER_CST)
2414 return false;
2415 /* As for -ffast-math we assume the 2 return to be
2416 impossible, canonicalize (unsigned) res <= 1U or
2417 (unsigned) res < 2U into res >= 0 and (unsigned) res > 1U
2418 or (unsigned) res >= 2U as res < 0. */
2419 switch (cmp)
2421 case LE_EXPR:
2422 if (!integer_onep (rhs))
2423 return false;
2424 cmp = GE_EXPR;
2425 break;
2426 case LT_EXPR:
2427 if (wi::ne_p (wi::to_widest (rhs), 2))
2428 return false;
2429 cmp = GE_EXPR;
2430 break;
2431 case GT_EXPR:
2432 if (!integer_onep (rhs))
2433 return false;
2434 cmp = LT_EXPR;
2435 break;
2436 case GE_EXPR:
2437 if (wi::ne_p (wi::to_widest (rhs), 2))
2438 return false;
2439 cmp = LT_EXPR;
2440 break;
2441 default:
2442 return false;
2444 rhs = build_zero_cst (TREE_TYPE (phires));
2446 else if (orig_use_lhs)
2448 if ((cmp != EQ_EXPR && cmp != NE_EXPR) || !integer_zerop (rhs))
2449 return false;
2450 /* As for -ffast-math we assume the 2 return to be
2451 impossible, canonicalize (res & ~1) == 0 into
2452 res >= 0 and (res & ~1) != 0 as res < 0. */
2453 cmp = cmp == EQ_EXPR ? GE_EXPR : LT_EXPR;
2456 if (!empty_block_p (middle_bb))
2457 return false;
2459 gcond *cond1 = as_a <gcond *> (*gsi_last_bb (cond_bb));
2460 enum tree_code cmp1 = gimple_cond_code (cond1);
2461 switch (cmp1)
2463 case LT_EXPR:
2464 case LE_EXPR:
2465 case GT_EXPR:
2466 case GE_EXPR:
2467 break;
2468 default:
2469 return false;
2471 tree lhs1 = gimple_cond_lhs (cond1);
2472 tree rhs1 = gimple_cond_rhs (cond1);
2473 /* The optimization may be unsafe due to NaNs. */
2474 if (HONOR_NANS (TREE_TYPE (lhs1)))
2475 return false;
2476 if (TREE_CODE (lhs1) == SSA_NAME && SSA_NAME_OCCURS_IN_ABNORMAL_PHI (lhs1))
2477 return false;
2478 if (TREE_CODE (rhs1) == SSA_NAME && SSA_NAME_OCCURS_IN_ABNORMAL_PHI (rhs1))
2479 return false;
2481 if (!single_pred_p (cond_bb) || !cond_only_block_p (cond_bb))
2482 return false;
2484 basic_block cond2_bb = single_pred (cond_bb);
2485 if (EDGE_COUNT (cond2_bb->succs) != 2)
2486 return false;
2487 edge cond2_phi_edge;
2488 if (EDGE_SUCC (cond2_bb, 0)->dest == cond_bb)
2490 if (EDGE_SUCC (cond2_bb, 1)->dest != phi_bb)
2491 return false;
2492 cond2_phi_edge = EDGE_SUCC (cond2_bb, 1);
2494 else if (EDGE_SUCC (cond2_bb, 0)->dest != phi_bb)
2495 return false;
2496 else
2497 cond2_phi_edge = EDGE_SUCC (cond2_bb, 0);
2498 tree arg2 = gimple_phi_arg_def (phi, cond2_phi_edge->dest_idx);
2499 if (!tree_fits_shwi_p (arg2))
2500 return false;
2501 gcond *cond2 = safe_dyn_cast <gcond *> (*gsi_last_bb (cond2_bb));
2502 if (!cond2)
2503 return false;
2504 enum tree_code cmp2 = gimple_cond_code (cond2);
2505 tree lhs2 = gimple_cond_lhs (cond2);
2506 tree rhs2 = gimple_cond_rhs (cond2);
2507 if (lhs2 == lhs1)
2509 if (!operand_equal_p (rhs2, rhs1, 0))
2511 if ((cmp2 == EQ_EXPR || cmp2 == NE_EXPR)
2512 && TREE_CODE (rhs1) == INTEGER_CST
2513 && TREE_CODE (rhs2) == INTEGER_CST)
2515 /* For integers, we can have cond2 x == 5
2516 and cond1 x < 5, x <= 4, x <= 5, x < 6,
2517 x > 5, x >= 6, x >= 5 or x > 4. */
2518 if (tree_int_cst_lt (rhs1, rhs2))
2520 if (wi::ne_p (wi::to_wide (rhs1) + 1, wi::to_wide (rhs2)))
2521 return false;
2522 if (cmp1 == LE_EXPR)
2523 cmp1 = LT_EXPR;
2524 else if (cmp1 == GT_EXPR)
2525 cmp1 = GE_EXPR;
2526 else
2527 return false;
2529 else
2531 gcc_checking_assert (tree_int_cst_lt (rhs2, rhs1));
2532 if (wi::ne_p (wi::to_wide (rhs2) + 1, wi::to_wide (rhs1)))
2533 return false;
2534 if (cmp1 == LT_EXPR)
2535 cmp1 = LE_EXPR;
2536 else if (cmp1 == GE_EXPR)
2537 cmp1 = GT_EXPR;
2538 else
2539 return false;
2541 rhs1 = rhs2;
2543 else
2544 return false;
2547 else if (lhs2 == rhs1)
2549 if (rhs2 != lhs1)
2550 return false;
2552 else
2553 return false;
2555 tree arg3 = arg2;
2556 basic_block cond3_bb = cond2_bb;
2557 edge cond3_phi_edge = cond2_phi_edge;
2558 gcond *cond3 = cond2;
2559 enum tree_code cmp3 = cmp2;
2560 tree lhs3 = lhs2;
2561 tree rhs3 = rhs2;
2562 if (EDGE_COUNT (phi_bb->preds) == 4)
2564 if (absu_hwi (tree_to_shwi (arg2)) != 1)
2565 return false;
2566 if (e1->flags & EDGE_TRUE_VALUE)
2568 if (tree_to_shwi (arg0) != 2
2569 || absu_hwi (tree_to_shwi (arg1)) != 1
2570 || wi::to_widest (arg1) == wi::to_widest (arg2))
2571 return false;
2573 else if (tree_to_shwi (arg1) != 2
2574 || absu_hwi (tree_to_shwi (arg0)) != 1
2575 || wi::to_widest (arg0) == wi::to_widest (arg1))
2576 return false;
2577 switch (cmp2)
2579 case LT_EXPR:
2580 case LE_EXPR:
2581 case GT_EXPR:
2582 case GE_EXPR:
2583 break;
2584 default:
2585 return false;
2587 /* if (x < y) goto phi_bb; else fallthru;
2588 if (x > y) goto phi_bb; else fallthru;
2589 bbx:;
2590 phi_bb:;
2591 is ok, but if x and y are swapped in one of the comparisons,
2592 or the comparisons are the same and operands not swapped,
2593 or the true and false edges are swapped, it is not. */
2594 if ((lhs2 == lhs1)
2595 ^ (((cond2_phi_edge->flags
2596 & ((cmp2 == LT_EXPR || cmp2 == LE_EXPR)
2597 ? EDGE_TRUE_VALUE : EDGE_FALSE_VALUE)) != 0)
2598 != ((e1->flags
2599 & ((cmp1 == LT_EXPR || cmp1 == LE_EXPR)
2600 ? EDGE_TRUE_VALUE : EDGE_FALSE_VALUE)) != 0)))
2601 return false;
2602 if (!single_pred_p (cond2_bb) || !cond_only_block_p (cond2_bb))
2603 return false;
2604 cond3_bb = single_pred (cond2_bb);
2605 if (EDGE_COUNT (cond2_bb->succs) != 2)
2606 return false;
2607 if (EDGE_SUCC (cond3_bb, 0)->dest == cond2_bb)
2609 if (EDGE_SUCC (cond3_bb, 1)->dest != phi_bb)
2610 return false;
2611 cond3_phi_edge = EDGE_SUCC (cond3_bb, 1);
2613 else if (EDGE_SUCC (cond3_bb, 0)->dest != phi_bb)
2614 return false;
2615 else
2616 cond3_phi_edge = EDGE_SUCC (cond3_bb, 0);
2617 arg3 = gimple_phi_arg_def (phi, cond3_phi_edge->dest_idx);
2618 cond3 = safe_dyn_cast <gcond *> (*gsi_last_bb (cond3_bb));
2619 if (!cond3)
2620 return false;
2621 cmp3 = gimple_cond_code (cond3);
2622 lhs3 = gimple_cond_lhs (cond3);
2623 rhs3 = gimple_cond_rhs (cond3);
2624 if (lhs3 == lhs1)
2626 if (!operand_equal_p (rhs3, rhs1, 0))
2627 return false;
2629 else if (lhs3 == rhs1)
2631 if (rhs3 != lhs1)
2632 return false;
2634 else
2635 return false;
2637 else if (absu_hwi (tree_to_shwi (arg0)) != 1
2638 || absu_hwi (tree_to_shwi (arg1)) != 1
2639 || wi::to_widest (arg0) == wi::to_widest (arg1))
2640 return false;
2642 if (!integer_zerop (arg3) || (cmp3 != EQ_EXPR && cmp3 != NE_EXPR))
2643 return false;
2644 if ((cond3_phi_edge->flags & (cmp3 == EQ_EXPR
2645 ? EDGE_TRUE_VALUE : EDGE_FALSE_VALUE)) == 0)
2646 return false;
2648 /* lhs1 one_cmp rhs1 results in phires of 1. */
2649 enum tree_code one_cmp;
2650 if ((cmp1 == LT_EXPR || cmp1 == LE_EXPR)
2651 ^ (!integer_onep ((e1->flags & EDGE_TRUE_VALUE) ? arg1 : arg0)))
2652 one_cmp = LT_EXPR;
2653 else
2654 one_cmp = GT_EXPR;
2656 enum tree_code res_cmp;
2657 switch (cmp)
2659 case EQ_EXPR:
2660 if (integer_zerop (rhs))
2661 res_cmp = EQ_EXPR;
2662 else if (integer_minus_onep (rhs))
2663 res_cmp = one_cmp == LT_EXPR ? GT_EXPR : LT_EXPR;
2664 else if (integer_onep (rhs))
2665 res_cmp = one_cmp;
2666 else
2667 return false;
2668 break;
2669 case NE_EXPR:
2670 if (integer_zerop (rhs))
2671 res_cmp = NE_EXPR;
2672 else if (integer_minus_onep (rhs))
2673 res_cmp = one_cmp == LT_EXPR ? LE_EXPR : GE_EXPR;
2674 else if (integer_onep (rhs))
2675 res_cmp = one_cmp == LT_EXPR ? GE_EXPR : LE_EXPR;
2676 else
2677 return false;
2678 break;
2679 case LT_EXPR:
2680 if (integer_onep (rhs))
2681 res_cmp = one_cmp == LT_EXPR ? GE_EXPR : LE_EXPR;
2682 else if (integer_zerop (rhs))
2683 res_cmp = one_cmp == LT_EXPR ? GT_EXPR : LT_EXPR;
2684 else
2685 return false;
2686 break;
2687 case LE_EXPR:
2688 if (integer_zerop (rhs))
2689 res_cmp = one_cmp == LT_EXPR ? GE_EXPR : LE_EXPR;
2690 else if (integer_minus_onep (rhs))
2691 res_cmp = one_cmp == LT_EXPR ? GT_EXPR : LT_EXPR;
2692 else
2693 return false;
2694 break;
2695 case GT_EXPR:
2696 if (integer_minus_onep (rhs))
2697 res_cmp = one_cmp == LT_EXPR ? LE_EXPR : GE_EXPR;
2698 else if (integer_zerop (rhs))
2699 res_cmp = one_cmp;
2700 else
2701 return false;
2702 break;
2703 case GE_EXPR:
2704 if (integer_zerop (rhs))
2705 res_cmp = one_cmp == LT_EXPR ? LE_EXPR : GE_EXPR;
2706 else if (integer_onep (rhs))
2707 res_cmp = one_cmp;
2708 else
2709 return false;
2710 break;
2711 default:
2712 gcc_unreachable ();
2715 if (gimple_code (use_stmt) == GIMPLE_COND)
2717 gcond *use_cond = as_a <gcond *> (use_stmt);
2718 gimple_cond_set_code (use_cond, res_cmp);
2719 gimple_cond_set_lhs (use_cond, lhs1);
2720 gimple_cond_set_rhs (use_cond, rhs1);
2722 else if (gimple_assign_rhs_class (use_stmt) == GIMPLE_BINARY_RHS)
2724 gimple_assign_set_rhs_code (use_stmt, res_cmp);
2725 gimple_assign_set_rhs1 (use_stmt, lhs1);
2726 gimple_assign_set_rhs2 (use_stmt, rhs1);
2728 else
2730 tree cond = build2 (res_cmp, TREE_TYPE (gimple_assign_rhs1 (use_stmt)),
2731 lhs1, rhs1);
2732 gimple_assign_set_rhs1 (use_stmt, cond);
2734 update_stmt (use_stmt);
2736 if (MAY_HAVE_DEBUG_BIND_STMTS)
2738 use_operand_p use_p;
2739 imm_use_iterator iter;
2740 bool has_debug_uses = false;
2741 bool has_cast_debug_uses = false;
2742 FOR_EACH_IMM_USE_FAST (use_p, iter, phires)
2744 gimple *use_stmt = USE_STMT (use_p);
2745 if (orig_use_lhs && use_stmt == orig_use_stmt)
2746 continue;
2747 gcc_assert (is_gimple_debug (use_stmt));
2748 has_debug_uses = true;
2749 break;
2751 if (orig_use_lhs)
2753 if (!has_debug_uses || is_cast)
2754 FOR_EACH_IMM_USE_FAST (use_p, iter, orig_use_lhs)
2756 gimple *use_stmt = USE_STMT (use_p);
2757 gcc_assert (is_gimple_debug (use_stmt));
2758 has_debug_uses = true;
2759 if (is_cast)
2760 has_cast_debug_uses = true;
2762 gimple_stmt_iterator gsi = gsi_for_stmt (orig_use_stmt);
2763 tree zero = build_zero_cst (TREE_TYPE (orig_use_lhs));
2764 gimple_assign_set_rhs_with_ops (&gsi, INTEGER_CST, zero);
2765 update_stmt (orig_use_stmt);
2768 if (has_debug_uses)
2770 /* If there are debug uses, emit something like:
2771 # DEBUG D#1 => i_2(D) > j_3(D) ? 1 : -1
2772 # DEBUG D#2 => i_2(D) == j_3(D) ? 0 : D#1
2773 where > stands for the comparison that yielded 1
2774 and replace debug uses of phi result with that D#2.
2775 Ignore the value of 2, because if NaNs aren't expected,
2776 all floating point numbers should be comparable. */
2777 gimple_stmt_iterator gsi = gsi_after_labels (gimple_bb (phi));
2778 tree type = TREE_TYPE (phires);
2779 tree temp1 = build_debug_expr_decl (type);
2780 tree t = build2 (one_cmp, boolean_type_node, lhs1, rhs2);
2781 t = build3 (COND_EXPR, type, t, build_one_cst (type),
2782 build_int_cst (type, -1));
2783 gimple *g = gimple_build_debug_bind (temp1, t, phi);
2784 gsi_insert_before (&gsi, g, GSI_SAME_STMT);
2785 tree temp2 = build_debug_expr_decl (type);
2786 t = build2 (EQ_EXPR, boolean_type_node, lhs1, rhs2);
2787 t = build3 (COND_EXPR, type, t, build_zero_cst (type), temp1);
2788 g = gimple_build_debug_bind (temp2, t, phi);
2789 gsi_insert_before (&gsi, g, GSI_SAME_STMT);
2790 replace_uses_by (phires, temp2);
2791 if (orig_use_lhs)
2793 if (has_cast_debug_uses)
2795 tree temp3 = make_node (DEBUG_EXPR_DECL);
2796 DECL_ARTIFICIAL (temp3) = 1;
2797 TREE_TYPE (temp3) = TREE_TYPE (orig_use_lhs);
2798 SET_DECL_MODE (temp3, TYPE_MODE (type));
2799 t = fold_convert (TREE_TYPE (temp3), temp2);
2800 g = gimple_build_debug_bind (temp3, t, phi);
2801 gsi_insert_before (&gsi, g, GSI_SAME_STMT);
2802 replace_uses_by (orig_use_lhs, temp3);
2804 else
2805 replace_uses_by (orig_use_lhs, temp2);
2810 if (orig_use_lhs)
2812 gimple_stmt_iterator gsi = gsi_for_stmt (orig_use_stmt);
2813 gsi_remove (&gsi, true);
2816 gimple_stmt_iterator psi = gsi_for_stmt (phi);
2817 remove_phi_node (&psi, true);
2818 statistics_counter_event (cfun, "spaceship replacement", 1);
2820 return true;
2823 /* Optimize x ? __builtin_fun (x) : C, where C is __builtin_fun (0).
2824 Convert
2826 <bb 2>
2827 if (b_4(D) != 0)
2828 goto <bb 3>
2829 else
2830 goto <bb 4>
2832 <bb 3>
2833 _2 = (unsigned long) b_4(D);
2834 _9 = __builtin_popcountl (_2);
2836 _9 = __builtin_popcountl (b_4(D));
2838 <bb 4>
2839 c_12 = PHI <0(2), _9(3)>
2841 Into
2842 <bb 2>
2843 _2 = (unsigned long) b_4(D);
2844 _9 = __builtin_popcountl (_2);
2846 _9 = __builtin_popcountl (b_4(D));
2848 <bb 4>
2849 c_12 = PHI <_9(2)>
2851 Similarly for __builtin_clz or __builtin_ctz if
2852 C?Z_DEFINED_VALUE_AT_ZERO is 2, optab is present and
2853 instead of 0 above it uses the value from that macro. */
2855 static bool
2856 cond_removal_in_builtin_zero_pattern (basic_block cond_bb,
2857 basic_block middle_bb,
2858 edge e1, edge e2, gphi *phi,
2859 tree arg0, tree arg1)
2861 gimple_stmt_iterator gsi, gsi_from;
2862 gimple *call;
2863 gimple *cast = NULL;
2864 tree lhs, arg;
2866 /* Check that
2867 _2 = (unsigned long) b_4(D);
2868 _9 = __builtin_popcountl (_2);
2870 _9 = __builtin_popcountl (b_4(D));
2871 are the only stmts in the middle_bb. */
2873 gsi = gsi_start_nondebug_after_labels_bb (middle_bb);
2874 if (gsi_end_p (gsi))
2875 return false;
2876 cast = gsi_stmt (gsi);
2877 gsi_next_nondebug (&gsi);
2878 if (!gsi_end_p (gsi))
2880 call = gsi_stmt (gsi);
2881 gsi_next_nondebug (&gsi);
2882 if (!gsi_end_p (gsi))
2883 return false;
2885 else
2887 call = cast;
2888 cast = NULL;
2891 /* Check that we have a popcount/clz/ctz builtin. */
2892 if (!is_gimple_call (call))
2893 return false;
2895 lhs = gimple_get_lhs (call);
2897 if (lhs == NULL_TREE)
2898 return false;
2900 combined_fn cfn = gimple_call_combined_fn (call);
2901 if (gimple_call_num_args (call) != 1
2902 && (gimple_call_num_args (call) != 2
2903 || cfn == CFN_CLZ
2904 || cfn == CFN_CTZ))
2905 return false;
2907 arg = gimple_call_arg (call, 0);
2909 internal_fn ifn = IFN_LAST;
2910 int val = 0;
2911 bool any_val = false;
2912 switch (cfn)
2914 case CFN_BUILT_IN_BSWAP16:
2915 case CFN_BUILT_IN_BSWAP32:
2916 case CFN_BUILT_IN_BSWAP64:
2917 case CFN_BUILT_IN_BSWAP128:
2918 CASE_CFN_FFS:
2919 CASE_CFN_PARITY:
2920 CASE_CFN_POPCOUNT:
2921 break;
2922 CASE_CFN_CLZ:
2923 if (INTEGRAL_TYPE_P (TREE_TYPE (arg)))
2925 tree type = TREE_TYPE (arg);
2926 if (TREE_CODE (type) == BITINT_TYPE)
2928 if (gimple_call_num_args (call) == 1)
2930 any_val = true;
2931 ifn = IFN_CLZ;
2932 break;
2934 if (!tree_fits_shwi_p (gimple_call_arg (call, 1)))
2935 return false;
2936 HOST_WIDE_INT at_zero = tree_to_shwi (gimple_call_arg (call, 1));
2937 if ((int) at_zero != at_zero)
2938 return false;
2939 ifn = IFN_CLZ;
2940 val = at_zero;
2941 break;
2943 if (direct_internal_fn_supported_p (IFN_CLZ, type, OPTIMIZE_FOR_BOTH)
2944 && CLZ_DEFINED_VALUE_AT_ZERO (SCALAR_INT_TYPE_MODE (type),
2945 val) == 2)
2947 ifn = IFN_CLZ;
2948 break;
2951 return false;
2952 CASE_CFN_CTZ:
2953 if (INTEGRAL_TYPE_P (TREE_TYPE (arg)))
2955 tree type = TREE_TYPE (arg);
2956 if (TREE_CODE (type) == BITINT_TYPE)
2958 if (gimple_call_num_args (call) == 1)
2960 any_val = true;
2961 ifn = IFN_CTZ;
2962 break;
2964 if (!tree_fits_shwi_p (gimple_call_arg (call, 1)))
2965 return false;
2966 HOST_WIDE_INT at_zero = tree_to_shwi (gimple_call_arg (call, 1));
2967 if ((int) at_zero != at_zero)
2968 return false;
2969 ifn = IFN_CTZ;
2970 val = at_zero;
2971 break;
2973 if (direct_internal_fn_supported_p (IFN_CTZ, type, OPTIMIZE_FOR_BOTH)
2974 && CTZ_DEFINED_VALUE_AT_ZERO (SCALAR_INT_TYPE_MODE (type),
2975 val) == 2)
2977 ifn = IFN_CTZ;
2978 break;
2981 return false;
2982 case CFN_BUILT_IN_CLRSB:
2983 val = TYPE_PRECISION (integer_type_node) - 1;
2984 break;
2985 case CFN_BUILT_IN_CLRSBL:
2986 val = TYPE_PRECISION (long_integer_type_node) - 1;
2987 break;
2988 case CFN_BUILT_IN_CLRSBLL:
2989 val = TYPE_PRECISION (long_long_integer_type_node) - 1;
2990 break;
2991 default:
2992 return false;
2995 if (cast)
2997 /* We have a cast stmt feeding popcount/clz/ctz builtin. */
2998 /* Check that we have a cast prior to that. */
2999 if (gimple_code (cast) != GIMPLE_ASSIGN
3000 || !CONVERT_EXPR_CODE_P (gimple_assign_rhs_code (cast)))
3001 return false;
3002 /* Result of the cast stmt is the argument to the builtin. */
3003 if (arg != gimple_assign_lhs (cast))
3004 return false;
3005 arg = gimple_assign_rhs1 (cast);
3008 gcond *cond = dyn_cast <gcond *> (*gsi_last_bb (cond_bb));
3010 /* Cond_bb has a check for b_4 [!=|==] 0 before calling the popcount/clz/ctz
3011 builtin. */
3012 if (!cond
3013 || (gimple_cond_code (cond) != NE_EXPR
3014 && gimple_cond_code (cond) != EQ_EXPR)
3015 || !integer_zerop (gimple_cond_rhs (cond))
3016 || arg != gimple_cond_lhs (cond))
3017 return false;
3019 /* Canonicalize. */
3020 if ((e2->flags & EDGE_TRUE_VALUE
3021 && gimple_cond_code (cond) == NE_EXPR)
3022 || (e1->flags & EDGE_TRUE_VALUE
3023 && gimple_cond_code (cond) == EQ_EXPR))
3025 std::swap (arg0, arg1);
3026 std::swap (e1, e2);
3029 /* Check PHI arguments. */
3030 if (lhs != arg0
3031 || TREE_CODE (arg1) != INTEGER_CST)
3032 return false;
3033 if (any_val)
3035 if (!tree_fits_shwi_p (arg1))
3036 return false;
3037 HOST_WIDE_INT at_zero = tree_to_shwi (arg1);
3038 if ((int) at_zero != at_zero)
3039 return false;
3040 val = at_zero;
3042 else if (wi::to_wide (arg1) != val)
3043 return false;
3045 /* And insert the popcount/clz/ctz builtin and cast stmt before the
3046 cond_bb. */
3047 gsi = gsi_last_bb (cond_bb);
3048 if (cast)
3050 gsi_from = gsi_for_stmt (cast);
3051 gsi_move_before (&gsi_from, &gsi);
3052 reset_flow_sensitive_info (gimple_get_lhs (cast));
3054 gsi_from = gsi_for_stmt (call);
3055 if (ifn == IFN_LAST
3056 || (gimple_call_internal_p (call) && gimple_call_num_args (call) == 2))
3057 gsi_move_before (&gsi_from, &gsi);
3058 else
3060 /* For __builtin_c[lt]z* force .C[LT]Z ifn, because only
3061 the latter is well defined at zero. */
3062 call = gimple_build_call_internal (ifn, 2, gimple_call_arg (call, 0),
3063 build_int_cst (integer_type_node, val));
3064 gimple_call_set_lhs (call, lhs);
3065 gsi_insert_before (&gsi, call, GSI_SAME_STMT);
3066 gsi_remove (&gsi_from, true);
3068 reset_flow_sensitive_info (lhs);
3070 /* Now update the PHI and remove unneeded bbs. */
3071 replace_phi_edge_with_variable (cond_bb, e2, phi, lhs);
3072 return true;
3075 /* Auxiliary functions to determine the set of memory accesses which
3076 can't trap because they are preceded by accesses to the same memory
3077 portion. We do that for MEM_REFs, so we only need to track
3078 the SSA_NAME of the pointer indirectly referenced. The algorithm
3079 simply is a walk over all instructions in dominator order. When
3080 we see an MEM_REF we determine if we've already seen a same
3081 ref anywhere up to the root of the dominator tree. If we do the
3082 current access can't trap. If we don't see any dominating access
3083 the current access might trap, but might also make later accesses
3084 non-trapping, so we remember it. We need to be careful with loads
3085 or stores, for instance a load might not trap, while a store would,
3086 so if we see a dominating read access this doesn't mean that a later
3087 write access would not trap. Hence we also need to differentiate the
3088 type of access(es) seen.
3090 ??? We currently are very conservative and assume that a load might
3091 trap even if a store doesn't (write-only memory). This probably is
3092 overly conservative.
3094 We currently support a special case that for !TREE_ADDRESSABLE automatic
3095 variables, it could ignore whether something is a load or store because the
3096 local stack should be always writable. */
3098 /* A hash-table of references (MEM_REF/ARRAY_REF/COMPONENT_REF), and in which
3099 basic block an *_REF through it was seen, which would constitute a
3100 no-trap region for same accesses.
3102 Size is needed to support 2 MEM_REFs of different types, like
3103 MEM<double>(s_1) and MEM<long>(s_1), which would compare equal with
3104 OEP_ADDRESS_OF. */
3105 struct ref_to_bb
3107 tree exp;
3108 HOST_WIDE_INT size;
3109 unsigned int phase;
3110 basic_block bb;
3113 /* Hashtable helpers. */
3115 struct refs_hasher : free_ptr_hash<ref_to_bb>
3117 static inline hashval_t hash (const ref_to_bb *);
3118 static inline bool equal (const ref_to_bb *, const ref_to_bb *);
3121 /* Used for quick clearing of the hash-table when we see calls.
3122 Hash entries with phase < nt_call_phase are invalid. */
3123 static unsigned int nt_call_phase;
3125 /* The hash function. */
3127 inline hashval_t
3128 refs_hasher::hash (const ref_to_bb *n)
3130 inchash::hash hstate;
3131 inchash::add_expr (n->exp, hstate, OEP_ADDRESS_OF);
3132 hstate.add_hwi (n->size);
3133 return hstate.end ();
3136 /* The equality function of *P1 and *P2. */
3138 inline bool
3139 refs_hasher::equal (const ref_to_bb *n1, const ref_to_bb *n2)
3141 return operand_equal_p (n1->exp, n2->exp, OEP_ADDRESS_OF)
3142 && n1->size == n2->size;
3145 class nontrapping_dom_walker : public dom_walker
3147 public:
3148 nontrapping_dom_walker (cdi_direction direction, hash_set<tree> *ps)
3149 : dom_walker (direction), m_nontrapping (ps), m_seen_refs (128)
3152 edge before_dom_children (basic_block) final override;
3153 void after_dom_children (basic_block) final override;
3155 private:
3157 /* We see the expression EXP in basic block BB. If it's an interesting
3158 expression (an MEM_REF through an SSA_NAME) possibly insert the
3159 expression into the set NONTRAP or the hash table of seen expressions.
3160 STORE is true if this expression is on the LHS, otherwise it's on
3161 the RHS. */
3162 void add_or_mark_expr (basic_block, tree, bool);
3164 hash_set<tree> *m_nontrapping;
3166 /* The hash table for remembering what we've seen. */
3167 hash_table<refs_hasher> m_seen_refs;
3170 /* Called by walk_dominator_tree, when entering the block BB. */
3171 edge
3172 nontrapping_dom_walker::before_dom_children (basic_block bb)
3174 edge e;
3175 edge_iterator ei;
3176 gimple_stmt_iterator gsi;
3178 /* If we haven't seen all our predecessors, clear the hash-table. */
3179 FOR_EACH_EDGE (e, ei, bb->preds)
3180 if ((((size_t)e->src->aux) & 2) == 0)
3182 nt_call_phase++;
3183 break;
3186 /* Mark this BB as being on the path to dominator root and as visited. */
3187 bb->aux = (void*)(1 | 2);
3189 /* And walk the statements in order. */
3190 for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi); gsi_next (&gsi))
3192 gimple *stmt = gsi_stmt (gsi);
3194 if ((gimple_code (stmt) == GIMPLE_ASM && gimple_vdef (stmt))
3195 || (is_gimple_call (stmt)
3196 && (!nonfreeing_call_p (stmt) || !nonbarrier_call_p (stmt))))
3197 nt_call_phase++;
3198 else if (gimple_assign_single_p (stmt) && !gimple_has_volatile_ops (stmt))
3200 add_or_mark_expr (bb, gimple_assign_lhs (stmt), true);
3201 add_or_mark_expr (bb, gimple_assign_rhs1 (stmt), false);
3204 return NULL;
3207 /* Called by walk_dominator_tree, when basic block BB is exited. */
3208 void
3209 nontrapping_dom_walker::after_dom_children (basic_block bb)
3211 /* This BB isn't on the path to dominator root anymore. */
3212 bb->aux = (void*)2;
3215 /* We see the expression EXP in basic block BB. If it's an interesting
3216 expression of:
3217 1) MEM_REF
3218 2) ARRAY_REF
3219 3) COMPONENT_REF
3220 possibly insert the expression into the set NONTRAP or the hash table
3221 of seen expressions. STORE is true if this expression is on the LHS,
3222 otherwise it's on the RHS. */
3223 void
3224 nontrapping_dom_walker::add_or_mark_expr (basic_block bb, tree exp, bool store)
3226 HOST_WIDE_INT size;
3228 if ((TREE_CODE (exp) == MEM_REF || TREE_CODE (exp) == ARRAY_REF
3229 || TREE_CODE (exp) == COMPONENT_REF)
3230 && (size = int_size_in_bytes (TREE_TYPE (exp))) > 0)
3232 struct ref_to_bb map;
3233 ref_to_bb **slot;
3234 struct ref_to_bb *r2bb;
3235 basic_block found_bb = 0;
3237 if (!store)
3239 tree base = get_base_address (exp);
3240 /* Only record a LOAD of a local variable without address-taken, as
3241 the local stack is always writable. This allows cselim on a STORE
3242 with a dominating LOAD. */
3243 if (!auto_var_p (base) || TREE_ADDRESSABLE (base))
3244 return;
3247 /* Try to find the last seen *_REF, which can trap. */
3248 map.exp = exp;
3249 map.size = size;
3250 slot = m_seen_refs.find_slot (&map, INSERT);
3251 r2bb = *slot;
3252 if (r2bb && r2bb->phase >= nt_call_phase)
3253 found_bb = r2bb->bb;
3255 /* If we've found a trapping *_REF, _and_ it dominates EXP
3256 (it's in a basic block on the path from us to the dominator root)
3257 then we can't trap. */
3258 if (found_bb && (((size_t)found_bb->aux) & 1) == 1)
3260 m_nontrapping->add (exp);
3262 else
3264 /* EXP might trap, so insert it into the hash table. */
3265 if (r2bb)
3267 r2bb->phase = nt_call_phase;
3268 r2bb->bb = bb;
3270 else
3272 r2bb = XNEW (struct ref_to_bb);
3273 r2bb->phase = nt_call_phase;
3274 r2bb->bb = bb;
3275 r2bb->exp = exp;
3276 r2bb->size = size;
3277 *slot = r2bb;
3283 /* This is the entry point of gathering non trapping memory accesses.
3284 It will do a dominator walk over the whole function, and it will
3285 make use of the bb->aux pointers. It returns a set of trees
3286 (the MEM_REFs itself) which can't trap. */
3287 static hash_set<tree> *
3288 get_non_trapping (void)
3290 nt_call_phase = 0;
3291 hash_set<tree> *nontrap = new hash_set<tree>;
3293 nontrapping_dom_walker (CDI_DOMINATORS, nontrap)
3294 .walk (cfun->cfg->x_entry_block_ptr);
3296 clear_aux_for_blocks ();
3297 return nontrap;
3300 /* Do the main work of conditional store replacement. We already know
3301 that the recognized pattern looks like so:
3303 split:
3304 if (cond) goto MIDDLE_BB; else goto JOIN_BB (edge E1)
3305 MIDDLE_BB:
3306 something
3307 fallthrough (edge E0)
3308 JOIN_BB:
3309 some more
3311 We check that MIDDLE_BB contains only one store, that that store
3312 doesn't trap (not via NOTRAP, but via checking if an access to the same
3313 memory location dominates us, or the store is to a local addressable
3314 object) and that the store has a "simple" RHS. */
3316 static bool
3317 cond_store_replacement (basic_block middle_bb, basic_block join_bb,
3318 edge e0, edge e1, hash_set<tree> *nontrap)
3320 gimple *assign = last_and_only_stmt (middle_bb);
3321 tree lhs, rhs, name, name2;
3322 gphi *newphi;
3323 gassign *new_stmt;
3324 gimple_stmt_iterator gsi;
3325 location_t locus;
3327 /* Check if middle_bb contains of only one store. */
3328 if (!assign
3329 || !gimple_assign_single_p (assign)
3330 || gimple_has_volatile_ops (assign))
3331 return false;
3333 /* And no PHI nodes so all uses in the single stmt are also
3334 available where we insert to. */
3335 if (!gimple_seq_empty_p (phi_nodes (middle_bb)))
3336 return false;
3338 locus = gimple_location (assign);
3339 lhs = gimple_assign_lhs (assign);
3340 rhs = gimple_assign_rhs1 (assign);
3341 if ((!REFERENCE_CLASS_P (lhs)
3342 && !DECL_P (lhs))
3343 || !is_gimple_reg_type (TREE_TYPE (lhs)))
3344 return false;
3346 /* Prove that we can move the store down. We could also check
3347 TREE_THIS_NOTRAP here, but in that case we also could move stores,
3348 whose value is not available readily, which we want to avoid. */
3349 if (!nontrap->contains (lhs))
3351 /* If LHS is an access to a local variable without address-taken
3352 (or when we allow data races) and known not to trap, we could
3353 always safely move down the store. */
3354 if (ref_can_have_store_data_races (lhs)
3355 || tree_could_trap_p (lhs))
3356 return false;
3359 /* Now we've checked the constraints, so do the transformation:
3360 1) Remove the single store. */
3361 gsi = gsi_for_stmt (assign);
3362 unlink_stmt_vdef (assign);
3363 gsi_remove (&gsi, true);
3364 release_defs (assign);
3366 /* Make both store and load use alias-set zero as we have to
3367 deal with the case of the store being a conditional change
3368 of the dynamic type. */
3369 lhs = unshare_expr (lhs);
3370 tree *basep = &lhs;
3371 while (handled_component_p (*basep))
3372 basep = &TREE_OPERAND (*basep, 0);
3373 if (TREE_CODE (*basep) == MEM_REF
3374 || TREE_CODE (*basep) == TARGET_MEM_REF)
3375 TREE_OPERAND (*basep, 1)
3376 = fold_convert (ptr_type_node, TREE_OPERAND (*basep, 1));
3377 else
3378 *basep = build2 (MEM_REF, TREE_TYPE (*basep),
3379 build_fold_addr_expr (*basep),
3380 build_zero_cst (ptr_type_node));
3382 /* 2) Insert a load from the memory of the store to the temporary
3383 on the edge which did not contain the store. */
3384 name = make_temp_ssa_name (TREE_TYPE (lhs), NULL, "cstore");
3385 new_stmt = gimple_build_assign (name, lhs);
3386 gimple_set_location (new_stmt, locus);
3387 lhs = unshare_expr (lhs);
3389 /* Set the no-warning bit on the rhs of the load to avoid uninit
3390 warnings. */
3391 tree rhs1 = gimple_assign_rhs1 (new_stmt);
3392 suppress_warning (rhs1, OPT_Wuninitialized);
3394 gsi_insert_on_edge (e1, new_stmt);
3396 /* 3) Create a PHI node at the join block, with one argument
3397 holding the old RHS, and the other holding the temporary
3398 where we stored the old memory contents. */
3399 name2 = make_temp_ssa_name (TREE_TYPE (lhs), NULL, "cstore");
3400 newphi = create_phi_node (name2, join_bb);
3401 add_phi_arg (newphi, rhs, e0, locus);
3402 add_phi_arg (newphi, name, e1, locus);
3404 new_stmt = gimple_build_assign (lhs, PHI_RESULT (newphi));
3406 /* 4) Insert that PHI node. */
3407 gsi = gsi_after_labels (join_bb);
3408 if (gsi_end_p (gsi))
3410 gsi = gsi_last_bb (join_bb);
3411 gsi_insert_after (&gsi, new_stmt, GSI_NEW_STMT);
3413 else
3414 gsi_insert_before (&gsi, new_stmt, GSI_NEW_STMT);
3416 if (dump_file && (dump_flags & TDF_DETAILS))
3418 fprintf (dump_file, "\nConditional store replacement happened!");
3419 fprintf (dump_file, "\nReplaced the store with a load.");
3420 fprintf (dump_file, "\nInserted a new PHI statement in joint block:\n");
3421 print_gimple_stmt (dump_file, new_stmt, 0, TDF_VOPS|TDF_MEMSYMS);
3423 statistics_counter_event (cfun, "conditional store replacement", 1);
3425 return true;
3428 /* Do the main work of conditional store replacement. */
3430 static bool
3431 cond_if_else_store_replacement_1 (basic_block then_bb, basic_block else_bb,
3432 basic_block join_bb, gimple *then_assign,
3433 gimple *else_assign)
3435 tree lhs_base, lhs, then_rhs, else_rhs, name;
3436 location_t then_locus, else_locus;
3437 gimple_stmt_iterator gsi;
3438 gphi *newphi;
3439 gassign *new_stmt;
3441 if (then_assign == NULL
3442 || !gimple_assign_single_p (then_assign)
3443 || gimple_clobber_p (then_assign)
3444 || gimple_has_volatile_ops (then_assign)
3445 || else_assign == NULL
3446 || !gimple_assign_single_p (else_assign)
3447 || gimple_clobber_p (else_assign)
3448 || gimple_has_volatile_ops (else_assign))
3449 return false;
3451 lhs = gimple_assign_lhs (then_assign);
3452 if (!is_gimple_reg_type (TREE_TYPE (lhs))
3453 || !operand_equal_p (lhs, gimple_assign_lhs (else_assign), 0))
3454 return false;
3456 lhs_base = get_base_address (lhs);
3457 if (lhs_base == NULL_TREE
3458 || (!DECL_P (lhs_base) && TREE_CODE (lhs_base) != MEM_REF))
3459 return false;
3461 then_rhs = gimple_assign_rhs1 (then_assign);
3462 else_rhs = gimple_assign_rhs1 (else_assign);
3463 then_locus = gimple_location (then_assign);
3464 else_locus = gimple_location (else_assign);
3466 /* Now we've checked the constraints, so do the transformation:
3467 1) Remove the stores. */
3468 gsi = gsi_for_stmt (then_assign);
3469 unlink_stmt_vdef (then_assign);
3470 gsi_remove (&gsi, true);
3471 release_defs (then_assign);
3473 gsi = gsi_for_stmt (else_assign);
3474 unlink_stmt_vdef (else_assign);
3475 gsi_remove (&gsi, true);
3476 release_defs (else_assign);
3478 /* 2) Create a PHI node at the join block, with one argument
3479 holding the old RHS, and the other holding the temporary
3480 where we stored the old memory contents. */
3481 name = make_temp_ssa_name (TREE_TYPE (lhs), NULL, "cstore");
3482 newphi = create_phi_node (name, join_bb);
3483 add_phi_arg (newphi, then_rhs, EDGE_SUCC (then_bb, 0), then_locus);
3484 add_phi_arg (newphi, else_rhs, EDGE_SUCC (else_bb, 0), else_locus);
3486 new_stmt = gimple_build_assign (lhs, PHI_RESULT (newphi));
3488 /* 3) Insert that PHI node. */
3489 gsi = gsi_after_labels (join_bb);
3490 if (gsi_end_p (gsi))
3492 gsi = gsi_last_bb (join_bb);
3493 gsi_insert_after (&gsi, new_stmt, GSI_NEW_STMT);
3495 else
3496 gsi_insert_before (&gsi, new_stmt, GSI_NEW_STMT);
3498 statistics_counter_event (cfun, "if-then-else store replacement", 1);
3500 return true;
3503 /* Return the single store in BB with VDEF or NULL if there are
3504 other stores in the BB or loads following the store. */
3506 static gimple *
3507 single_trailing_store_in_bb (basic_block bb, tree vdef)
3509 if (SSA_NAME_IS_DEFAULT_DEF (vdef))
3510 return NULL;
3511 gimple *store = SSA_NAME_DEF_STMT (vdef);
3512 if (gimple_bb (store) != bb
3513 || gimple_code (store) == GIMPLE_PHI)
3514 return NULL;
3516 /* Verify there is no other store in this BB. */
3517 if (!SSA_NAME_IS_DEFAULT_DEF (gimple_vuse (store))
3518 && gimple_bb (SSA_NAME_DEF_STMT (gimple_vuse (store))) == bb
3519 && gimple_code (SSA_NAME_DEF_STMT (gimple_vuse (store))) != GIMPLE_PHI)
3520 return NULL;
3522 /* Verify there is no load or store after the store. */
3523 use_operand_p use_p;
3524 imm_use_iterator imm_iter;
3525 FOR_EACH_IMM_USE_FAST (use_p, imm_iter, gimple_vdef (store))
3526 if (USE_STMT (use_p) != store
3527 && gimple_bb (USE_STMT (use_p)) == bb)
3528 return NULL;
3530 return store;
3533 /* Conditional store replacement. We already know
3534 that the recognized pattern looks like so:
3536 split:
3537 if (cond) goto THEN_BB; else goto ELSE_BB (edge E1)
3538 THEN_BB:
3540 X = Y;
3542 goto JOIN_BB;
3543 ELSE_BB:
3545 X = Z;
3547 fallthrough (edge E0)
3548 JOIN_BB:
3549 some more
3551 We check that it is safe to sink the store to JOIN_BB by verifying that
3552 there are no read-after-write or write-after-write dependencies in
3553 THEN_BB and ELSE_BB. */
3555 static bool
3556 cond_if_else_store_replacement (basic_block then_bb, basic_block else_bb,
3557 basic_block join_bb)
3559 vec<data_reference_p> then_datarefs, else_datarefs;
3560 vec<ddr_p> then_ddrs, else_ddrs;
3561 gimple *then_store, *else_store;
3562 bool found, ok = false, res;
3563 struct data_dependence_relation *ddr;
3564 data_reference_p then_dr, else_dr;
3565 int i, j;
3566 tree then_lhs, else_lhs;
3567 basic_block blocks[3];
3569 /* Handle the case with single store in THEN_BB and ELSE_BB. That is
3570 cheap enough to always handle as it allows us to elide dependence
3571 checking. */
3572 gphi *vphi = NULL;
3573 for (gphi_iterator si = gsi_start_phis (join_bb); !gsi_end_p (si);
3574 gsi_next (&si))
3575 if (virtual_operand_p (gimple_phi_result (si.phi ())))
3577 vphi = si.phi ();
3578 break;
3580 if (!vphi)
3581 return false;
3582 tree then_vdef = PHI_ARG_DEF_FROM_EDGE (vphi, single_succ_edge (then_bb));
3583 tree else_vdef = PHI_ARG_DEF_FROM_EDGE (vphi, single_succ_edge (else_bb));
3584 gimple *then_assign = single_trailing_store_in_bb (then_bb, then_vdef);
3585 if (then_assign)
3587 gimple *else_assign = single_trailing_store_in_bb (else_bb, else_vdef);
3588 if (else_assign)
3589 return cond_if_else_store_replacement_1 (then_bb, else_bb, join_bb,
3590 then_assign, else_assign);
3593 /* If either vectorization or if-conversion is disabled then do
3594 not sink any stores. */
3595 if (param_max_stores_to_sink == 0
3596 || (!flag_tree_loop_vectorize && !flag_tree_slp_vectorize)
3597 || !flag_tree_loop_if_convert)
3598 return false;
3600 /* Find data references. */
3601 then_datarefs.create (1);
3602 else_datarefs.create (1);
3603 if ((find_data_references_in_bb (NULL, then_bb, &then_datarefs)
3604 == chrec_dont_know)
3605 || !then_datarefs.length ()
3606 || (find_data_references_in_bb (NULL, else_bb, &else_datarefs)
3607 == chrec_dont_know)
3608 || !else_datarefs.length ())
3610 free_data_refs (then_datarefs);
3611 free_data_refs (else_datarefs);
3612 return false;
3615 /* Find pairs of stores with equal LHS. */
3616 auto_vec<gimple *, 1> then_stores, else_stores;
3617 FOR_EACH_VEC_ELT (then_datarefs, i, then_dr)
3619 if (DR_IS_READ (then_dr))
3620 continue;
3622 then_store = DR_STMT (then_dr);
3623 then_lhs = gimple_get_lhs (then_store);
3624 if (then_lhs == NULL_TREE)
3625 continue;
3626 found = false;
3628 FOR_EACH_VEC_ELT (else_datarefs, j, else_dr)
3630 if (DR_IS_READ (else_dr))
3631 continue;
3633 else_store = DR_STMT (else_dr);
3634 else_lhs = gimple_get_lhs (else_store);
3635 if (else_lhs == NULL_TREE)
3636 continue;
3638 if (operand_equal_p (then_lhs, else_lhs, 0))
3640 found = true;
3641 break;
3645 if (!found)
3646 continue;
3648 then_stores.safe_push (then_store);
3649 else_stores.safe_push (else_store);
3652 /* No pairs of stores found. */
3653 if (!then_stores.length ()
3654 || then_stores.length () > (unsigned) param_max_stores_to_sink)
3656 free_data_refs (then_datarefs);
3657 free_data_refs (else_datarefs);
3658 return false;
3661 /* Compute and check data dependencies in both basic blocks. */
3662 then_ddrs.create (1);
3663 else_ddrs.create (1);
3664 if (!compute_all_dependences (then_datarefs, &then_ddrs,
3665 vNULL, false)
3666 || !compute_all_dependences (else_datarefs, &else_ddrs,
3667 vNULL, false))
3669 free_dependence_relations (then_ddrs);
3670 free_dependence_relations (else_ddrs);
3671 free_data_refs (then_datarefs);
3672 free_data_refs (else_datarefs);
3673 return false;
3675 blocks[0] = then_bb;
3676 blocks[1] = else_bb;
3677 blocks[2] = join_bb;
3678 renumber_gimple_stmt_uids_in_blocks (blocks, 3);
3680 /* Check that there are no read-after-write or write-after-write dependencies
3681 in THEN_BB. */
3682 FOR_EACH_VEC_ELT (then_ddrs, i, ddr)
3684 struct data_reference *dra = DDR_A (ddr);
3685 struct data_reference *drb = DDR_B (ddr);
3687 if (DDR_ARE_DEPENDENT (ddr) != chrec_known
3688 && ((DR_IS_READ (dra) && DR_IS_WRITE (drb)
3689 && gimple_uid (DR_STMT (dra)) > gimple_uid (DR_STMT (drb)))
3690 || (DR_IS_READ (drb) && DR_IS_WRITE (dra)
3691 && gimple_uid (DR_STMT (drb)) > gimple_uid (DR_STMT (dra)))
3692 || (DR_IS_WRITE (dra) && DR_IS_WRITE (drb))))
3694 free_dependence_relations (then_ddrs);
3695 free_dependence_relations (else_ddrs);
3696 free_data_refs (then_datarefs);
3697 free_data_refs (else_datarefs);
3698 return false;
3702 /* Check that there are no read-after-write or write-after-write dependencies
3703 in ELSE_BB. */
3704 FOR_EACH_VEC_ELT (else_ddrs, i, ddr)
3706 struct data_reference *dra = DDR_A (ddr);
3707 struct data_reference *drb = DDR_B (ddr);
3709 if (DDR_ARE_DEPENDENT (ddr) != chrec_known
3710 && ((DR_IS_READ (dra) && DR_IS_WRITE (drb)
3711 && gimple_uid (DR_STMT (dra)) > gimple_uid (DR_STMT (drb)))
3712 || (DR_IS_READ (drb) && DR_IS_WRITE (dra)
3713 && gimple_uid (DR_STMT (drb)) > gimple_uid (DR_STMT (dra)))
3714 || (DR_IS_WRITE (dra) && DR_IS_WRITE (drb))))
3716 free_dependence_relations (then_ddrs);
3717 free_dependence_relations (else_ddrs);
3718 free_data_refs (then_datarefs);
3719 free_data_refs (else_datarefs);
3720 return false;
3724 /* Sink stores with same LHS. */
3725 FOR_EACH_VEC_ELT (then_stores, i, then_store)
3727 else_store = else_stores[i];
3728 res = cond_if_else_store_replacement_1 (then_bb, else_bb, join_bb,
3729 then_store, else_store);
3730 ok = ok || res;
3733 free_dependence_relations (then_ddrs);
3734 free_dependence_relations (else_ddrs);
3735 free_data_refs (then_datarefs);
3736 free_data_refs (else_datarefs);
3738 return ok;
3741 /* Return TRUE if STMT has a VUSE whose corresponding VDEF is in BB. */
3743 static bool
3744 local_mem_dependence (gimple *stmt, basic_block bb)
3746 tree vuse = gimple_vuse (stmt);
3747 gimple *def;
3749 if (!vuse)
3750 return false;
3752 def = SSA_NAME_DEF_STMT (vuse);
3753 return (def && gimple_bb (def) == bb);
3756 /* Given a "diamond" control-flow pattern where BB0 tests a condition,
3757 BB1 and BB2 are "then" and "else" blocks dependent on this test,
3758 and BB3 rejoins control flow following BB1 and BB2, look for
3759 opportunities to hoist loads as follows. If BB3 contains a PHI of
3760 two loads, one each occurring in BB1 and BB2, and the loads are
3761 provably of adjacent fields in the same structure, then move both
3762 loads into BB0. Of course this can only be done if there are no
3763 dependencies preventing such motion.
3765 One of the hoisted loads will always be speculative, so the
3766 transformation is currently conservative:
3768 - The fields must be strictly adjacent.
3769 - The two fields must occupy a single memory block that is
3770 guaranteed to not cross a page boundary.
3772 The last is difficult to prove, as such memory blocks should be
3773 aligned on the minimum of the stack alignment boundary and the
3774 alignment guaranteed by heap allocation interfaces. Thus we rely
3775 on a parameter for the alignment value.
3777 Provided a good value is used for the last case, the first
3778 restriction could possibly be relaxed. */
3780 static void
3781 hoist_adjacent_loads (basic_block bb0, basic_block bb1,
3782 basic_block bb2, basic_block bb3)
3784 unsigned HOST_WIDE_INT param_align = param_l1_cache_line_size;
3785 unsigned HOST_WIDE_INT param_align_bits = param_align * BITS_PER_UNIT;
3786 gphi_iterator gsi;
3788 /* Walk the phis in bb3 looking for an opportunity. We are looking
3789 for phis of two SSA names, one each of which is defined in bb1 and
3790 bb2. */
3791 for (gsi = gsi_start_phis (bb3); !gsi_end_p (gsi); gsi_next (&gsi))
3793 gphi *phi_stmt = gsi.phi ();
3794 gimple *def1, *def2;
3795 tree arg1, arg2, ref1, ref2, field1, field2;
3796 tree tree_offset1, tree_offset2, tree_size2, next;
3797 unsigned HOST_WIDE_INT offset1, offset2, size2, align1;
3798 gimple_stmt_iterator gsi2;
3799 basic_block bb_for_def1, bb_for_def2;
3801 if (gimple_phi_num_args (phi_stmt) != 2
3802 || virtual_operand_p (gimple_phi_result (phi_stmt)))
3803 continue;
3805 arg1 = gimple_phi_arg_def (phi_stmt, 0);
3806 arg2 = gimple_phi_arg_def (phi_stmt, 1);
3808 if (TREE_CODE (arg1) != SSA_NAME
3809 || TREE_CODE (arg2) != SSA_NAME
3810 || SSA_NAME_IS_DEFAULT_DEF (arg1)
3811 || SSA_NAME_IS_DEFAULT_DEF (arg2))
3812 continue;
3814 def1 = SSA_NAME_DEF_STMT (arg1);
3815 def2 = SSA_NAME_DEF_STMT (arg2);
3817 if ((gimple_bb (def1) != bb1 || gimple_bb (def2) != bb2)
3818 && (gimple_bb (def2) != bb1 || gimple_bb (def1) != bb2))
3819 continue;
3821 /* Check the mode of the arguments to be sure a conditional move
3822 can be generated for it. */
3823 if (optab_handler (movcc_optab, TYPE_MODE (TREE_TYPE (arg1)))
3824 == CODE_FOR_nothing)
3825 continue;
3827 /* Both statements must be assignments whose RHS is a COMPONENT_REF. */
3828 if (!gimple_assign_single_p (def1)
3829 || !gimple_assign_single_p (def2)
3830 || gimple_has_volatile_ops (def1)
3831 || gimple_has_volatile_ops (def2))
3832 continue;
3834 ref1 = gimple_assign_rhs1 (def1);
3835 ref2 = gimple_assign_rhs1 (def2);
3837 if (TREE_CODE (ref1) != COMPONENT_REF
3838 || TREE_CODE (ref2) != COMPONENT_REF)
3839 continue;
3841 /* The zeroth operand of the two component references must be
3842 identical. It is not sufficient to compare get_base_address of
3843 the two references, because this could allow for different
3844 elements of the same array in the two trees. It is not safe to
3845 assume that the existence of one array element implies the
3846 existence of a different one. */
3847 if (!operand_equal_p (TREE_OPERAND (ref1, 0), TREE_OPERAND (ref2, 0), 0))
3848 continue;
3850 field1 = TREE_OPERAND (ref1, 1);
3851 field2 = TREE_OPERAND (ref2, 1);
3853 /* Check for field adjacency, and ensure field1 comes first. */
3854 for (next = DECL_CHAIN (field1);
3855 next && TREE_CODE (next) != FIELD_DECL;
3856 next = DECL_CHAIN (next))
3859 if (next != field2)
3861 for (next = DECL_CHAIN (field2);
3862 next && TREE_CODE (next) != FIELD_DECL;
3863 next = DECL_CHAIN (next))
3866 if (next != field1)
3867 continue;
3869 std::swap (field1, field2);
3870 std::swap (def1, def2);
3873 bb_for_def1 = gimple_bb (def1);
3874 bb_for_def2 = gimple_bb (def2);
3876 /* Check for proper alignment of the first field. */
3877 tree_offset1 = bit_position (field1);
3878 tree_offset2 = bit_position (field2);
3879 tree_size2 = DECL_SIZE (field2);
3881 if (!tree_fits_uhwi_p (tree_offset1)
3882 || !tree_fits_uhwi_p (tree_offset2)
3883 || !tree_fits_uhwi_p (tree_size2))
3884 continue;
3886 offset1 = tree_to_uhwi (tree_offset1);
3887 offset2 = tree_to_uhwi (tree_offset2);
3888 size2 = tree_to_uhwi (tree_size2);
3889 align1 = DECL_ALIGN (field1) % param_align_bits;
3891 if (offset1 % BITS_PER_UNIT != 0)
3892 continue;
3894 /* For profitability, the two field references should fit within
3895 a single cache line. */
3896 if (align1 + offset2 - offset1 + size2 > param_align_bits)
3897 continue;
3899 /* The two expressions cannot be dependent upon vdefs defined
3900 in bb1/bb2. */
3901 if (local_mem_dependence (def1, bb_for_def1)
3902 || local_mem_dependence (def2, bb_for_def2))
3903 continue;
3905 /* The conditions are satisfied; hoist the loads from bb1 and bb2 into
3906 bb0. We hoist the first one first so that a cache miss is handled
3907 efficiently regardless of hardware cache-fill policy. */
3908 gsi2 = gsi_for_stmt (def1);
3909 gsi_move_to_bb_end (&gsi2, bb0);
3910 gsi2 = gsi_for_stmt (def2);
3911 gsi_move_to_bb_end (&gsi2, bb0);
3912 statistics_counter_event (cfun, "hoisted loads", 1);
3914 if (dump_file && (dump_flags & TDF_DETAILS))
3916 fprintf (dump_file,
3917 "\nHoisting adjacent loads from %d and %d into %d: \n",
3918 bb_for_def1->index, bb_for_def2->index, bb0->index);
3919 print_gimple_stmt (dump_file, def1, 0, TDF_VOPS|TDF_MEMSYMS);
3920 print_gimple_stmt (dump_file, def2, 0, TDF_VOPS|TDF_MEMSYMS);
3925 /* Determine whether we should attempt to hoist adjacent loads out of
3926 diamond patterns in pass_phiopt. Always hoist loads if
3927 -fhoist-adjacent-loads is specified and the target machine has
3928 both a conditional move instruction and a defined cache line size. */
3930 static bool
3931 gate_hoist_loads (void)
3933 return (flag_hoist_adjacent_loads == 1
3934 && param_l1_cache_line_size
3935 && HAVE_conditional_move);
3938 /* This pass tries to replaces an if-then-else block with an
3939 assignment. We have different kinds of transformations.
3940 Some of these transformations are also performed by the ifcvt
3941 RTL optimizer.
3943 PHI-OPT using Match-and-simplify infrastructure
3944 -----------------------
3946 The PHI-OPT pass will try to use match-and-simplify infrastructure
3947 (gimple_simplify) to do transformations. This is implemented in
3948 match_simplify_replacement.
3950 The way it works is it replaces:
3951 bb0:
3952 if (cond) goto bb2; else goto bb1;
3953 bb1:
3954 bb2:
3955 x = PHI <a (bb1), b (bb0), ...>;
3957 with a statement if it gets simplified from `cond ? b : a`.
3959 bb0:
3960 x1 = cond ? b : a;
3961 bb2:
3962 x = PHI <a (bb1), x1 (bb0), ...>;
3963 Bb1 might be removed as it becomes unreachable when doing the replacement.
3964 Though bb1 does not have to be considered a forwarding basic block from bb0.
3966 Will try to see if `(!cond) ? a : b` gets simplified (iff !cond simplifies);
3967 this is done not to have an explosion of patterns in match.pd.
3968 Note bb1 does not need to be completely empty, it can contain
3969 one statement which is known not to trap.
3971 It also can handle the case where we have two forwarding bbs (diamond):
3972 bb0:
3973 if (cond) goto bb2; else goto bb1;
3974 bb1: goto bb3;
3975 bb2: goto bb3;
3976 bb3:
3977 x = PHI <a (bb1), b (bb2), ...>;
3978 And that is replaced with a statement if it is simplified
3979 from `cond ? b : a`.
3980 Again bb1 and bb2 does not have to be completely empty but
3981 each can contain one statement which is known not to trap.
3982 But in this case bb1/bb2 can only be forwarding basic blocks.
3984 This fully replaces the old "Conditional Replacement",
3985 "ABS Replacement" transformations as they are now
3986 implmeneted in match.pd.
3987 Some parts of the "MIN/MAX Replacement" are re-implemented in match.pd.
3989 Value Replacement
3990 -----------------
3992 This transformation, implemented in value_replacement, replaces
3994 bb0:
3995 if (a != b) goto bb2; else goto bb1;
3996 bb1:
3997 bb2:
3998 x = PHI <a (bb1), b (bb0), ...>;
4000 with
4002 bb0:
4003 bb2:
4004 x = PHI <b (bb0), ...>;
4006 This opportunity can sometimes occur as a result of other
4007 optimizations.
4010 Another case caught by value replacement looks like this:
4012 bb0:
4013 t1 = a == CONST;
4014 t2 = b > c;
4015 t3 = t1 & t2;
4016 if (t3 != 0) goto bb1; else goto bb2;
4017 bb1:
4018 bb2:
4019 x = PHI (CONST, a)
4021 Gets replaced with:
4022 bb0:
4023 bb2:
4024 t1 = a == CONST;
4025 t2 = b > c;
4026 t3 = t1 & t2;
4027 x = a;
4029 MIN/MAX Replacement
4030 -------------------
4032 This transformation, minmax_replacement replaces
4034 bb0:
4035 if (a <= b) goto bb2; else goto bb1;
4036 bb1:
4037 bb2:
4038 x = PHI <b (bb1), a (bb0), ...>;
4040 with
4042 bb0:
4043 x' = MIN_EXPR (a, b)
4044 bb2:
4045 x = PHI <x' (bb0), ...>;
4047 A similar transformation is done for MAX_EXPR.
4050 This pass also performs a fifth transformation of a slightly different
4051 flavor.
4053 Factor operations in COND_EXPR
4054 ------------------------------
4056 This transformation factors the unary operations out of COND_EXPR with
4057 factor_out_conditional_operation.
4059 For example:
4060 if (a <= CST) goto <bb 3>; else goto <bb 4>;
4061 <bb 3>:
4062 tmp = (int) a;
4063 <bb 4>:
4064 tmp = PHI <tmp, CST>
4066 Into:
4067 if (a <= CST) goto <bb 3>; else goto <bb 4>;
4068 <bb 3>:
4069 <bb 4>:
4070 a = PHI <a, CST>
4071 tmp = (int) a;
4073 Adjacent Load Hoisting
4074 ----------------------
4076 This transformation replaces
4078 bb0:
4079 if (...) goto bb2; else goto bb1;
4080 bb1:
4081 x1 = (<expr>).field1;
4082 goto bb3;
4083 bb2:
4084 x2 = (<expr>).field2;
4085 bb3:
4086 # x = PHI <x1, x2>;
4088 with
4090 bb0:
4091 x1 = (<expr>).field1;
4092 x2 = (<expr>).field2;
4093 if (...) goto bb2; else goto bb1;
4094 bb1:
4095 goto bb3;
4096 bb2:
4097 bb3:
4098 # x = PHI <x1, x2>;
4100 The purpose of this transformation is to enable generation of conditional
4101 move instructions such as Intel CMOVE or PowerPC ISEL. Because one of
4102 the loads is speculative, the transformation is restricted to very
4103 specific cases to avoid introducing a page fault. We are looking for
4104 the common idiom:
4106 if (...)
4107 x = y->left;
4108 else
4109 x = y->right;
4111 where left and right are typically adjacent pointers in a tree structure. */
4113 namespace {
4115 const pass_data pass_data_phiopt =
4117 GIMPLE_PASS, /* type */
4118 "phiopt", /* name */
4119 OPTGROUP_NONE, /* optinfo_flags */
4120 TV_TREE_PHIOPT, /* tv_id */
4121 ( PROP_cfg | PROP_ssa ), /* properties_required */
4122 0, /* properties_provided */
4123 0, /* properties_destroyed */
4124 0, /* todo_flags_start */
4125 0, /* todo_flags_finish */
4128 class pass_phiopt : public gimple_opt_pass
4130 public:
4131 pass_phiopt (gcc::context *ctxt)
4132 : gimple_opt_pass (pass_data_phiopt, ctxt), early_p (false)
4135 /* opt_pass methods: */
4136 opt_pass * clone () final override { return new pass_phiopt (m_ctxt); }
4137 void set_pass_param (unsigned n, bool param) final override
4139 gcc_assert (n == 0);
4140 early_p = param;
4142 bool gate (function *) final override { return flag_ssa_phiopt; }
4143 unsigned int execute (function *) final override;
4145 private:
4146 bool early_p;
4147 }; // class pass_phiopt
4149 } // anon namespace
4151 gimple_opt_pass *
4152 make_pass_phiopt (gcc::context *ctxt)
4154 return new pass_phiopt (ctxt);
4157 unsigned int
4158 pass_phiopt::execute (function *)
4160 bool do_hoist_loads = !early_p ? gate_hoist_loads () : false;
4161 basic_block bb;
4162 basic_block *bb_order;
4163 unsigned n, i;
4164 bool cfgchanged = false;
4166 calculate_dominance_info (CDI_DOMINATORS);
4167 mark_ssa_maybe_undefs ();
4169 /* Search every basic block for COND_EXPR we may be able to optimize.
4171 We walk the blocks in order that guarantees that a block with
4172 a single predecessor is processed before the predecessor.
4173 This ensures that we collapse inner ifs before visiting the
4174 outer ones, and also that we do not try to visit a removed
4175 block. */
4176 bb_order = single_pred_before_succ_order ();
4177 n = n_basic_blocks_for_fn (cfun) - NUM_FIXED_BLOCKS;
4179 for (i = 0; i < n; i++)
4181 gphi *phi;
4182 basic_block bb1, bb2;
4183 edge e1, e2;
4184 tree arg0, arg1;
4185 bool diamond_p = false;
4187 bb = bb_order[i];
4189 /* Check to see if the last statement is a GIMPLE_COND. */
4190 gcond *cond_stmt = safe_dyn_cast <gcond *> (*gsi_last_bb (bb));
4191 if (!cond_stmt)
4192 continue;
4194 e1 = EDGE_SUCC (bb, 0);
4195 bb1 = e1->dest;
4196 e2 = EDGE_SUCC (bb, 1);
4197 bb2 = e2->dest;
4199 /* We cannot do the optimization on abnormal edges. */
4200 if ((e1->flags & EDGE_ABNORMAL) != 0
4201 || (e2->flags & EDGE_ABNORMAL) != 0)
4202 continue;
4204 /* If either bb1's succ or bb2 or bb2's succ is non NULL. */
4205 if (EDGE_COUNT (bb1->succs) == 0
4206 || EDGE_COUNT (bb2->succs) == 0)
4207 continue;
4209 /* Find the bb which is the fall through to the other. */
4210 if (EDGE_SUCC (bb1, 0)->dest == bb2)
4212 else if (EDGE_SUCC (bb2, 0)->dest == bb1)
4214 std::swap (bb1, bb2);
4215 std::swap (e1, e2);
4217 else if (EDGE_SUCC (bb1, 0)->dest == EDGE_SUCC (bb2, 0)->dest
4218 && single_succ_p (bb2))
4220 diamond_p = true;
4221 e2 = EDGE_SUCC (bb2, 0);
4222 /* Make sure bb2 is just a fall through. */
4223 if ((e2->flags & EDGE_FALLTHRU) == 0)
4224 continue;
4226 else
4227 continue;
4229 e1 = EDGE_SUCC (bb1, 0);
4231 /* Make sure that bb1 is just a fall through. */
4232 if (!single_succ_p (bb1)
4233 || (e1->flags & EDGE_FALLTHRU) == 0)
4234 continue;
4236 if (diamond_p)
4238 basic_block bb3 = e1->dest;
4240 if (!single_pred_p (bb1)
4241 || !single_pred_p (bb2))
4242 continue;
4244 if (do_hoist_loads
4245 && !FLOAT_TYPE_P (TREE_TYPE (gimple_cond_lhs (cond_stmt)))
4246 && EDGE_COUNT (bb->succs) == 2
4247 && EDGE_COUNT (bb3->preds) == 2
4248 /* If one edge or the other is dominant, a conditional move
4249 is likely to perform worse than the well-predicted branch. */
4250 && !predictable_edge_p (EDGE_SUCC (bb, 0))
4251 && !predictable_edge_p (EDGE_SUCC (bb, 1)))
4252 hoist_adjacent_loads (bb, bb1, bb2, bb3);
4255 gimple_stmt_iterator gsi;
4256 bool candorest = true;
4258 /* Check that we're looking for nested phis. */
4259 basic_block merge = diamond_p ? EDGE_SUCC (bb2, 0)->dest : bb2;
4260 gimple_seq phis = phi_nodes (merge);
4262 /* Value replacement can work with more than one PHI
4263 so try that first. */
4264 if (!early_p && !diamond_p)
4265 for (gsi = gsi_start (phis); !gsi_end_p (gsi); gsi_next (&gsi))
4267 phi = as_a <gphi *> (gsi_stmt (gsi));
4268 arg0 = gimple_phi_arg_def (phi, e1->dest_idx);
4269 arg1 = gimple_phi_arg_def (phi, e2->dest_idx);
4270 if (value_replacement (bb, bb1, e1, e2, phi, arg0, arg1) == 2)
4272 candorest = false;
4273 cfgchanged = true;
4274 break;
4278 if (!candorest)
4279 continue;
4281 phi = single_non_singleton_phi_for_edges (phis, e1, e2);
4282 if (!phi)
4283 continue;
4285 arg0 = gimple_phi_arg_def (phi, e1->dest_idx);
4286 arg1 = gimple_phi_arg_def (phi, e2->dest_idx);
4288 /* Something is wrong if we cannot find the arguments in the PHI
4289 node. */
4290 gcc_assert (arg0 != NULL_TREE && arg1 != NULL_TREE);
4292 if (single_pred_p (bb1)
4293 && EDGE_COUNT (merge->preds) == 2)
4295 gphi *newphi = phi;
4296 while (newphi)
4298 phi = newphi;
4299 /* factor_out_conditional_operation may create a new PHI in
4300 BB2 and eliminate an existing PHI in BB2. Recompute values
4301 that may be affected by that change. */
4302 arg0 = gimple_phi_arg_def (phi, e1->dest_idx);
4303 arg1 = gimple_phi_arg_def (phi, e2->dest_idx);
4304 gcc_assert (arg0 != NULL_TREE && arg1 != NULL_TREE);
4305 newphi = factor_out_conditional_operation (e1, e2, phi,
4306 arg0, arg1,
4307 cond_stmt);
4311 /* Do the replacement of conditional if it can be done. */
4312 if (match_simplify_replacement (bb, bb1, bb2, e1, e2, phi,
4313 arg0, arg1, early_p, diamond_p))
4314 cfgchanged = true;
4315 else if (!early_p
4316 && !diamond_p
4317 && single_pred_p (bb1)
4318 && cond_removal_in_builtin_zero_pattern (bb, bb1, e1, e2,
4319 phi, arg0, arg1))
4320 cfgchanged = true;
4321 else if (minmax_replacement (bb, bb1, bb2, e1, e2, phi, arg0, arg1,
4322 diamond_p))
4323 cfgchanged = true;
4324 else if (single_pred_p (bb1)
4325 && !diamond_p
4326 && spaceship_replacement (bb, bb1, e1, e2, phi, arg0, arg1))
4327 cfgchanged = true;
4330 free (bb_order);
4332 if (cfgchanged)
4333 return TODO_cleanup_cfg;
4334 return 0;
4337 /* This pass tries to transform conditional stores into unconditional
4338 ones, enabling further simplifications with the simpler then and else
4339 blocks. In particular it replaces this:
4341 bb0:
4342 if (cond) goto bb2; else goto bb1;
4343 bb1:
4344 *p = RHS;
4345 bb2:
4347 with
4349 bb0:
4350 if (cond) goto bb1; else goto bb2;
4351 bb1:
4352 condtmp' = *p;
4353 bb2:
4354 condtmp = PHI <RHS, condtmp'>
4355 *p = condtmp;
4357 This transformation can only be done under several constraints,
4358 documented below. It also replaces:
4360 bb0:
4361 if (cond) goto bb2; else goto bb1;
4362 bb1:
4363 *p = RHS1;
4364 goto bb3;
4365 bb2:
4366 *p = RHS2;
4367 bb3:
4369 with
4371 bb0:
4372 if (cond) goto bb3; else goto bb1;
4373 bb1:
4374 bb3:
4375 condtmp = PHI <RHS1, RHS2>
4376 *p = condtmp; */
4378 namespace {
4380 const pass_data pass_data_cselim =
4382 GIMPLE_PASS, /* type */
4383 "cselim", /* name */
4384 OPTGROUP_NONE, /* optinfo_flags */
4385 TV_TREE_PHIOPT, /* tv_id */
4386 ( PROP_cfg | PROP_ssa ), /* properties_required */
4387 0, /* properties_provided */
4388 0, /* properties_destroyed */
4389 0, /* todo_flags_start */
4390 0, /* todo_flags_finish */
4393 class pass_cselim : public gimple_opt_pass
4395 public:
4396 pass_cselim (gcc::context *ctxt)
4397 : gimple_opt_pass (pass_data_cselim, ctxt)
4400 /* opt_pass methods: */
4401 bool gate (function *) final override { return flag_tree_cselim; }
4402 unsigned int execute (function *) final override;
4404 }; // class pass_cselim
4406 } // anon namespace
4408 gimple_opt_pass *
4409 make_pass_cselim (gcc::context *ctxt)
4411 return new pass_cselim (ctxt);
4414 unsigned int
4415 pass_cselim::execute (function *)
4417 basic_block bb;
4418 basic_block *bb_order;
4419 unsigned n, i;
4420 bool cfgchanged = false;
4421 hash_set<tree> *nontrap = 0;
4422 unsigned todo = 0;
4424 /* ??? We are not interested in loop related info, but the following
4425 will create it, ICEing as we didn't init loops with pre-headers.
4426 An interfacing issue of find_data_references_in_bb. */
4427 loop_optimizer_init (LOOPS_NORMAL);
4428 scev_initialize ();
4430 calculate_dominance_info (CDI_DOMINATORS);
4432 /* Calculate the set of non-trapping memory accesses. */
4433 nontrap = get_non_trapping ();
4435 /* Search every basic block for COND_EXPR we may be able to optimize.
4437 We walk the blocks in order that guarantees that a block with
4438 a single predecessor is processed before the predecessor.
4439 This ensures that we collapse inner ifs before visiting the
4440 outer ones, and also that we do not try to visit a removed
4441 block. */
4442 bb_order = single_pred_before_succ_order ();
4443 n = n_basic_blocks_for_fn (cfun) - NUM_FIXED_BLOCKS;
4445 for (i = 0; i < n; i++)
4447 basic_block bb1, bb2;
4448 edge e1, e2;
4449 bool diamond_p = false;
4451 bb = bb_order[i];
4453 /* Check to see if the last statement is a GIMPLE_COND. */
4454 gcond *cond_stmt = safe_dyn_cast <gcond *> (*gsi_last_bb (bb));
4455 if (!cond_stmt)
4456 continue;
4458 e1 = EDGE_SUCC (bb, 0);
4459 bb1 = e1->dest;
4460 e2 = EDGE_SUCC (bb, 1);
4461 bb2 = e2->dest;
4463 /* We cannot do the optimization on abnormal edges. */
4464 if ((e1->flags & EDGE_ABNORMAL) != 0
4465 || (e2->flags & EDGE_ABNORMAL) != 0)
4466 continue;
4468 /* If either bb1's succ or bb2 or bb2's succ is non NULL. */
4469 if (EDGE_COUNT (bb1->succs) == 0
4470 || EDGE_COUNT (bb2->succs) == 0)
4471 continue;
4473 /* Find the bb which is the fall through to the other. */
4474 if (EDGE_SUCC (bb1, 0)->dest == bb2)
4476 else if (EDGE_SUCC (bb2, 0)->dest == bb1)
4478 std::swap (bb1, bb2);
4479 std::swap (e1, e2);
4481 else if (EDGE_SUCC (bb1, 0)->dest == EDGE_SUCC (bb2, 0)->dest
4482 && single_succ_p (bb2))
4484 diamond_p = true;
4485 e2 = EDGE_SUCC (bb2, 0);
4486 /* Make sure bb2 is just a fall through. */
4487 if ((e2->flags & EDGE_FALLTHRU) == 0)
4488 continue;
4490 else
4491 continue;
4493 e1 = EDGE_SUCC (bb1, 0);
4495 /* Make sure that bb1 is just a fall through. */
4496 if (!single_succ_p (bb1)
4497 || (e1->flags & EDGE_FALLTHRU) == 0)
4498 continue;
4500 if (diamond_p)
4502 basic_block bb3 = e1->dest;
4504 /* Only handle sinking of store from 2 bbs only,
4505 The middle bbs don't need to come from the
4506 if always since we are sinking rather than
4507 hoisting. */
4508 if (EDGE_COUNT (bb3->preds) != 2)
4509 continue;
4510 if (cond_if_else_store_replacement (bb1, bb2, bb3))
4511 cfgchanged = true;
4512 continue;
4515 /* Also make sure that bb1 only have one predecessor and that it
4516 is bb. */
4517 if (!single_pred_p (bb1)
4518 || single_pred (bb1) != bb)
4519 continue;
4521 /* bb1 is the middle block, bb2 the join block, bb the split block,
4522 e1 the fallthrough edge from bb1 to bb2. We can't do the
4523 optimization if the join block has more than two predecessors. */
4524 if (EDGE_COUNT (bb2->preds) > 2)
4525 continue;
4526 if (cond_store_replacement (bb1, bb2, e1, e2, nontrap))
4527 cfgchanged = true;
4530 free (bb_order);
4532 delete nontrap;
4533 /* If the CFG has changed, we should cleanup the CFG. */
4534 if (cfgchanged)
4536 /* In cond-store replacement we have added some loads on edges
4537 and new VOPS (as we moved the store, and created a load). */
4538 gsi_commit_edge_inserts ();
4539 todo = TODO_cleanup_cfg | TODO_update_ssa_only_virtuals;
4541 scev_finalize ();
4542 loop_optimizer_finalize ();
4543 return todo;