x86: Add a test for PR rtl-optimization/111673
[official-gcc.git] / gcc / tree-switch-conversion.cc
blob39a8a893eddeeb70a4f5a8ed7869874c46f77360
1 /* Lower GIMPLE_SWITCH expressions to something more efficient than
2 a jump table.
3 Copyright (C) 2006-2025 Free Software Foundation, Inc.
5 This file is part of GCC.
7 GCC is free software; you can redistribute it and/or modify it
8 under the terms of the GNU General Public License as published by the
9 Free Software Foundation; either version 3, or (at your option) any
10 later version.
12 GCC is distributed in the hope that it will be useful, but WITHOUT
13 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
14 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
15 for more details.
17 You should have received a copy of the GNU General Public License
18 along with GCC; see the file COPYING3. If not, write to the Free
19 Software Foundation, 51 Franklin Street, Fifth Floor, Boston, MA
20 02110-1301, USA. */
22 /* This file handles the lowering of GIMPLE_SWITCH to an indexed
23 load, or a series of bit-test-and-branch expressions. */
25 #include "config.h"
26 #include "system.h"
27 #include "coretypes.h"
28 #include "backend.h"
29 #include "insn-codes.h"
30 #include "rtl.h"
31 #include "tree.h"
32 #include "gimple.h"
33 #include "cfghooks.h"
34 #include "tree-pass.h"
35 #include "ssa.h"
36 #include "optabs-tree.h"
37 #include "cgraph.h"
38 #include "gimple-pretty-print.h"
39 #include "fold-const.h"
40 #include "varasm.h"
41 #include "stor-layout.h"
42 #include "cfganal.h"
43 #include "gimplify.h"
44 #include "gimple-iterator.h"
45 #include "gimplify-me.h"
46 #include "gimple-fold.h"
47 #include "tree-cfg.h"
48 #include "cfgloop.h"
49 #include "alloc-pool.h"
50 #include "target.h"
51 #include "tree-into-ssa.h"
52 #include "omp-general.h"
53 #include "gimple-range.h"
54 #include "tree-cfgcleanup.h"
55 #include "hwint.h"
56 #include "internal-fn.h"
57 #include "diagnostic-core.h"
59 /* ??? For lang_hooks.types.type_for_mode, but is there a word_mode
60 type in the GIMPLE type system that is language-independent? */
61 #include "langhooks.h"
63 #include "tree-switch-conversion.h"
65 using namespace tree_switch_conversion;
67 /* Does the target have optabs needed to efficiently compute exact base two
68 logarithm of a variable with type TYPE?
70 If yes, returns TYPE. If no, returns NULL_TREE. May also return another
71 type. This indicates that logarithm of the variable can be computed but
72 only after it is converted to this type.
74 Also see gen_log2. */
76 static tree
77 can_log2 (tree type, optimization_type opt_type)
79 /* Check if target supports FFS for given type. */
80 if (direct_internal_fn_supported_p (IFN_FFS, type, opt_type))
81 return type;
83 /* Check if target supports FFS for some type we could convert to. */
84 int prec = TYPE_PRECISION (type);
85 int i_prec = TYPE_PRECISION (integer_type_node);
86 int li_prec = TYPE_PRECISION (long_integer_type_node);
87 int lli_prec = TYPE_PRECISION (long_long_integer_type_node);
88 tree new_type;
89 if (prec <= i_prec
90 && direct_internal_fn_supported_p (IFN_FFS, integer_type_node, opt_type))
91 new_type = integer_type_node;
92 else if (prec <= li_prec
93 && direct_internal_fn_supported_p (IFN_FFS, long_integer_type_node,
94 opt_type))
95 new_type = long_integer_type_node;
96 else if (prec <= lli_prec
97 && direct_internal_fn_supported_p (IFN_FFS,
98 long_long_integer_type_node,
99 opt_type))
100 new_type = long_long_integer_type_node;
101 else
102 return NULL_TREE;
103 return new_type;
106 /* Assume that OP is a power of two. Build a sequence of gimple statements
107 efficiently computing the base two logarithm of OP using special optabs.
108 Return the ssa name represeting the result of the logarithm through RESULT.
110 Before computing the logarithm, OP may have to be converted to another type.
111 This should be specified in TYPE. Use can_log2 to decide what this type
112 should be.
114 Should only be used if can_log2 doesn't reject the type of OP. */
116 static gimple_seq
117 gen_log2 (tree op, location_t loc, tree *result, tree type)
119 gimple_seq stmts = NULL;
120 gimple_stmt_iterator gsi = gsi_last (stmts);
122 tree orig_type = TREE_TYPE (op);
123 tree tmp1;
124 if (type != orig_type)
125 tmp1 = gimple_convert (&gsi, false, GSI_NEW_STMT, loc, type, op);
126 else
127 tmp1 = op;
128 /* Build FFS (op) - 1. */
129 tree tmp2 = gimple_build (&gsi, false, GSI_NEW_STMT, loc, IFN_FFS, orig_type,
130 tmp1);
131 tree tmp3 = gimple_build (&gsi, false, GSI_NEW_STMT, loc, MINUS_EXPR,
132 orig_type, tmp2, build_one_cst (orig_type));
133 *result = tmp3;
134 return stmts;
137 /* Build a sequence of gimple statements checking that OP is a power of 2.
138 Return the result as a boolean_type_node ssa name through RESULT. Assumes
139 that OP's value will be non-negative. The generated check may give
140 arbitrary answer for negative values. */
142 static gimple_seq
143 gen_pow2p (tree op, location_t loc, tree *result)
145 gimple_seq stmts = NULL;
146 gimple_stmt_iterator gsi = gsi_last (stmts);
148 tree type = TREE_TYPE (op);
149 tree utype = unsigned_type_for (type);
151 /* Build (op ^ (op - 1)) > (op - 1). */
152 tree tmp1;
153 if (types_compatible_p (type, utype))
154 tmp1 = op;
155 else
156 tmp1 = gimple_convert (&gsi, false, GSI_NEW_STMT, loc, utype, op);
157 tree tmp2 = gimple_build (&gsi, false, GSI_NEW_STMT, loc, MINUS_EXPR, utype,
158 tmp1, build_one_cst (utype));
159 tree tmp3 = gimple_build (&gsi, false, GSI_NEW_STMT, loc, BIT_XOR_EXPR,
160 utype, tmp1, tmp2);
161 *result = gimple_build (&gsi, false, GSI_NEW_STMT, loc, GT_EXPR,
162 boolean_type_node, tmp3, tmp2);
164 return stmts;
168 /* Constructor. */
170 switch_conversion::switch_conversion (): m_final_bb (NULL),
171 m_constructors (NULL), m_default_values (NULL),
172 m_arr_ref_first (NULL), m_arr_ref_last (NULL),
173 m_reason (NULL), m_default_case_nonstandard (false), m_cfg_altered (false),
174 m_exp_index_transform_applied (false)
178 /* Collection information about SWTCH statement. */
180 void
181 switch_conversion::collect (gswitch *swtch)
183 unsigned int branch_num = gimple_switch_num_labels (swtch);
184 tree min_case, max_case;
185 unsigned int i;
186 edge e, e_default, e_first;
187 edge_iterator ei;
189 m_switch = swtch;
191 /* The gimplifier has already sorted the cases by CASE_LOW and ensured there
192 is a default label which is the first in the vector.
193 Collect the bits we can deduce from the CFG. */
194 m_index_expr = gimple_switch_index (swtch);
195 m_switch_bb = gimple_bb (swtch);
196 e_default = gimple_switch_default_edge (cfun, swtch);
197 m_default_bb = e_default->dest;
198 m_default_prob = e_default->probability;
200 /* Get upper and lower bounds of case values, and the covered range. */
201 min_case = gimple_switch_label (swtch, 1);
202 max_case = gimple_switch_label (swtch, branch_num - 1);
204 m_range_min = CASE_LOW (min_case);
205 if (CASE_HIGH (max_case) != NULL_TREE)
206 m_range_max = CASE_HIGH (max_case);
207 else
208 m_range_max = CASE_LOW (max_case);
210 m_contiguous_range = true;
211 tree last = CASE_HIGH (min_case) ? CASE_HIGH (min_case) : m_range_min;
212 for (i = 2; i < branch_num; i++)
214 tree elt = gimple_switch_label (swtch, i);
215 if (wi::to_wide (last) + 1 != wi::to_wide (CASE_LOW (elt)))
217 m_contiguous_range = false;
218 break;
220 last = CASE_HIGH (elt) ? CASE_HIGH (elt) : CASE_LOW (elt);
223 if (m_contiguous_range)
224 e_first = gimple_switch_edge (cfun, swtch, 1);
225 else
226 e_first = e_default;
228 /* See if there is one common successor block for all branch
229 targets. If it exists, record it in FINAL_BB.
230 Start with the destination of the first non-default case
231 if the range is contiguous and default case otherwise as
232 guess or its destination in case it is a forwarder block. */
233 if (! single_pred_p (e_first->dest))
234 m_final_bb = e_first->dest;
235 else if (single_succ_p (e_first->dest)
236 && ! single_pred_p (single_succ (e_first->dest)))
237 m_final_bb = single_succ (e_first->dest);
238 /* Require that all switch destinations are either that common
239 FINAL_BB or a forwarder to it, except for the default
240 case if contiguous range. */
241 auto_vec<edge, 10> fw_edges;
242 m_uniq = 0;
243 if (m_final_bb)
244 FOR_EACH_EDGE (e, ei, m_switch_bb->succs)
246 edge phi_e = nullptr;
247 if (e->dest == m_final_bb)
248 phi_e = e;
249 else if (single_pred_p (e->dest)
250 && single_succ_p (e->dest)
251 && single_succ (e->dest) == m_final_bb)
252 phi_e = single_succ_edge (e->dest);
253 if (phi_e)
255 if (e == e_default)
257 else if (phi_e == e || empty_block_p (e->dest))
259 /* For empty blocks consider forwarders with equal
260 PHI arguments in m_final_bb as unique. */
261 unsigned i;
262 for (i = 0; i < fw_edges.length (); ++i)
263 if (phi_alternatives_equal (m_final_bb, fw_edges[i], phi_e))
264 break;
265 if (i == fw_edges.length ())
267 /* But limit the above possibly quadratic search. */
268 if (fw_edges.length () < 10)
269 fw_edges.quick_push (phi_e);
270 m_uniq++;
273 else
274 m_uniq++;
275 continue;
278 if (e == e_default && m_contiguous_range)
280 m_default_case_nonstandard = true;
281 continue;
284 m_final_bb = NULL;
285 break;
288 /* When there's not a single common successor block conservatively
289 approximate the number of unique non-default targets. */
290 if (!m_final_bb)
291 m_uniq = EDGE_COUNT (gimple_bb (swtch)->succs) - 1;
293 m_range_size
294 = int_const_binop (MINUS_EXPR, m_range_max, m_range_min);
296 /* Get a count of the number of case labels. Single-valued case labels
297 simply count as one, but a case range counts double, since it may
298 require two compares if it gets lowered as a branching tree. */
299 m_count = 0;
300 for (i = 1; i < branch_num; i++)
302 tree elt = gimple_switch_label (swtch, i);
303 m_count++;
304 if (CASE_HIGH (elt)
305 && ! tree_int_cst_equal (CASE_LOW (elt), CASE_HIGH (elt)))
306 m_count++;
310 /* Check that the "exponential index transform" can be applied to this switch.
312 See comment of the exp_index_transform function for details about this
313 transformation.
315 We want:
316 - This form of the switch is more efficient
317 - Cases are powers of 2
319 Expects that SWTCH has at least one case. */
321 bool
322 switch_conversion::is_exp_index_transform_viable (gswitch *swtch)
324 tree index = gimple_switch_index (swtch);
325 tree index_type = TREE_TYPE (index);
326 basic_block swtch_bb = gimple_bb (swtch);
327 unsigned num_labels = gimple_switch_num_labels (swtch);
329 optimization_type opt_type = bb_optimization_type (swtch_bb);
330 m_exp_index_transform_log2_type = can_log2 (index_type, opt_type);
331 if (!m_exp_index_transform_log2_type)
332 return false;
334 /* Check that each case label corresponds only to one value
335 (no case 1..3). */
336 unsigned i;
337 for (i = 1; i < num_labels; i++)
339 tree label = gimple_switch_label (swtch, i);
340 if (CASE_HIGH (label))
341 return false;
344 /* Check that each label is nonnegative and a power of 2. */
345 for (i = 1; i < num_labels; i++)
347 tree label = gimple_switch_label (swtch, i);
348 wide_int label_wi = wi::to_wide (CASE_LOW (label));
349 if (!wi::ge_p (label_wi, 0, TYPE_SIGN (index_type)))
350 return false;
351 if (wi::exact_log2 (label_wi) == -1)
352 return false;
355 if (dump_file)
356 fprintf (dump_file, "Exponential index transform viable\n");
358 return true;
361 /* Perform the "exponential index transform".
363 Assume that cases of SWTCH are powers of 2. The transformation replaces the
364 cases by their exponents (2^k -> k). It also inserts a statement that
365 computes the exponent of the original index variable (basically taking the
366 logarithm) and then sets the result as the new index variable.
368 The transformation also inserts a conditional statement checking that the
369 incoming original index variable is a power of 2 with the false edge leading
370 to the default case.
372 The exponential index transform shrinks the range of case numbers which
373 helps switch conversion convert switches it otherwise could not.
375 Consider for example:
377 switch (i)
379 case (1 << 0): return 0;
380 case (1 << 1): return 1;
381 case (1 << 2): return 2;
383 case (1 << 30): return 30;
384 default: return 31;
387 First, exponential index transform gets applied. Since each case becomes
388 case x: return x;, the rest of switch conversion is then able to get rid of
389 the switch statement.
391 if (i is power of 2)
392 return log2 (i);
393 else
394 return 31;
398 void
399 switch_conversion::exp_index_transform (gswitch *swtch)
401 if (dump_file)
402 fprintf (dump_file, "Applying exponential index transform\n");
404 tree index = gimple_switch_index (swtch);
405 tree index_type = TREE_TYPE (index);
406 basic_block swtch_bb = gimple_bb (swtch);
407 unsigned num_labels = gimple_switch_num_labels (swtch);
409 /* Insert a cond stmt that checks if the index variable is a power of 2. */
410 gimple_stmt_iterator gsi = gsi_for_stmt (swtch);
411 gsi_prev (&gsi);
412 gimple *foo = gsi_stmt (gsi);
413 edge new_edge1 = split_block (swtch_bb, foo);
415 swtch_bb = new_edge1->dest;
416 basic_block cond_bb = new_edge1->src;
417 new_edge1->flags |= EDGE_TRUE_VALUE;
418 new_edge1->flags &= ~EDGE_FALLTHRU;
419 new_edge1->probability = profile_probability::even ();
421 basic_block default_bb = gimple_switch_default_bb (cfun, swtch);
422 edge new_edge2 = make_edge (cond_bb, default_bb, EDGE_FALSE_VALUE);
423 new_edge2->probability = profile_probability::even ();
425 tree tmp;
426 gimple_seq stmts = gen_pow2p (index, UNKNOWN_LOCATION, &tmp);
427 gsi = gsi_last_bb (cond_bb);
428 gsi_insert_seq_after (&gsi, stmts, GSI_LAST_NEW_STMT);
429 gcond *stmt_cond = gimple_build_cond (NE_EXPR, tmp, boolean_false_node,
430 NULL, NULL);
431 gsi_insert_after (&gsi, stmt_cond, GSI_NEW_STMT);
433 /* We just added an edge going to default bb so fix PHI nodes in that bb:
434 For each PHI add new PHI arg. It will be the same arg as when comming to
435 the default bb from the switch bb. */
436 edge default_edge = find_edge (swtch_bb, default_bb);
437 for (gphi_iterator gsi = gsi_start_phis (default_bb);
438 !gsi_end_p (gsi); gsi_next (&gsi))
440 gphi *phi = gsi.phi ();
441 tree arg = PHI_ARG_DEF_FROM_EDGE (phi, default_edge);
442 location_t loc = gimple_phi_arg_location_from_edge (phi, default_edge);
443 add_phi_arg (phi, arg, new_edge2, loc);
446 /* Insert a sequence of stmts that takes the log of the index variable. */
447 stmts = gen_log2 (index, UNKNOWN_LOCATION, &tmp,
448 m_exp_index_transform_log2_type);
449 gsi = gsi_after_labels (swtch_bb);
450 gsi_insert_seq_before (&gsi, stmts, GSI_SAME_STMT);
452 /* Use the result of the logarithm as the new index variable. */
453 gimple_switch_set_index (swtch, tmp);
454 update_stmt (swtch);
456 /* Replace each case number with its logarithm. */
457 unsigned i;
458 for (i = 1; i < num_labels; i++)
460 tree label = gimple_switch_label (swtch, i);
461 CASE_LOW (label) = build_int_cst (index_type,
462 tree_log2 (CASE_LOW (label)));
465 /* Fix the dominator tree, if it is available. */
466 if (dom_info_available_p (CDI_DOMINATORS))
468 /* Analysis of how dominators should look after we add the edge E going
469 from the cond block to the default block.
471 1 For the blocks between the switch block and the final block
472 (excluding the final block itself): They had the switch block as
473 their immediate dominator. That shouldn't change.
475 2 The final block may now have the switch block or the cond block as
476 its immediate dominator. There's no easy way of knowing (consider
477 two cases where in both m_default_case_nonstandard = true, in one a
478 path through default intersects the final block and in one all paths
479 through default avoid the final block but intersect a successor of the
480 final block).
482 3 Other blocks that had the switch block as their immediate dominator
483 should now have the cond block as their immediate dominator.
485 4 Immediate dominators of the rest of the blocks shouldn't change.
487 Reasoning for 3 and 4:
489 We'll only consider blocks that do not fall into 1 or 2.
491 Consider a block X whose original imm dom was the switch block. All
492 paths to X must also intersect the cond block since it's the only
493 pred of the switch block. The final block doesn't dominate X so at
494 least one path P must lead through the default block. Let P' be P but
495 instead of going through the switch block, take E. The switch block
496 doesn't dominate X so its imm dom must now be the cond block.
498 Consider a block X whose original imm dom was Y != the switch block.
499 We only added an edge so all original paths to X are still present.
500 So X gained no new dominators. Observe that Y still dominates X.
501 There would have to be a path that avoids Y otherwise. But any block
502 we can avoid now except for the switch block we were able to avoid
503 before adding E. */
505 redirect_immediate_dominators (CDI_DOMINATORS, swtch_bb, cond_bb);
507 edge e;
508 edge_iterator ei;
509 FOR_EACH_EDGE (e, ei, swtch_bb->succs)
511 basic_block bb = e->dest;
512 if (bb == m_final_bb || bb == default_bb)
513 continue;
514 set_immediate_dominator (CDI_DOMINATORS, bb, swtch_bb);
517 vec<basic_block> v;
518 v.create (1);
519 v.quick_push (m_final_bb);
520 iterate_fix_dominators (CDI_DOMINATORS, v, true);
523 /* Update information about the switch statement. */
524 tree first_label = gimple_switch_label (swtch, 1);
525 tree last_label = gimple_switch_label (swtch, num_labels - 1);
527 m_range_min = CASE_LOW (first_label);
528 m_range_max = CASE_LOW (last_label);
529 m_index_expr = gimple_switch_index (swtch);
530 m_switch_bb = swtch_bb;
532 m_range_size = int_const_binop (MINUS_EXPR, m_range_max, m_range_min);
534 m_cfg_altered = true;
536 m_contiguous_range = true;
537 wide_int last_wi = wi::to_wide (CASE_LOW (first_label));
538 for (i = 2; i < num_labels; i++)
540 tree label = gimple_switch_label (swtch, i);
541 wide_int label_wi = wi::to_wide (CASE_LOW (label));
542 m_contiguous_range &= wi::eq_p (wi::add (last_wi, 1), label_wi);
543 last_wi = label_wi;
546 m_exp_index_transform_applied = true;
549 /* Checks whether the range given by individual case statements of the switch
550 switch statement isn't too big and whether the number of branches actually
551 satisfies the size of the new array. */
553 bool
554 switch_conversion::check_range ()
556 gcc_assert (m_range_size);
557 if (!tree_fits_uhwi_p (m_range_size))
559 m_reason = "index range way too large or otherwise unusable";
560 return false;
563 if (tree_to_uhwi (m_range_size)
564 > ((unsigned) m_count * param_switch_conversion_branch_ratio))
566 m_reason = "the maximum range-branch ratio exceeded";
567 return false;
570 return true;
573 /* Checks whether all but the final BB basic blocks are empty. */
575 bool
576 switch_conversion::check_all_empty_except_final ()
578 edge e, e_default = find_edge (m_switch_bb, m_default_bb);
579 edge_iterator ei;
581 FOR_EACH_EDGE (e, ei, m_switch_bb->succs)
583 if (e->dest == m_final_bb)
584 continue;
586 if (!empty_block_p (e->dest))
588 if (m_contiguous_range && e == e_default)
590 m_default_case_nonstandard = true;
591 continue;
594 m_reason = "bad case - a non-final BB not empty";
595 return false;
599 return true;
602 /* This function checks whether all required values in phi nodes in final_bb
603 are constants. Required values are those that correspond to a basic block
604 which is a part of the examined switch statement. It returns true if the
605 phi nodes are OK, otherwise false. */
607 bool
608 switch_conversion::check_final_bb ()
610 gphi_iterator gsi;
612 m_phi_count = 0;
613 for (gsi = gsi_start_phis (m_final_bb); !gsi_end_p (gsi); gsi_next (&gsi))
615 gphi *phi = gsi.phi ();
616 unsigned int i;
618 if (virtual_operand_p (gimple_phi_result (phi)))
619 continue;
621 m_phi_count++;
623 for (i = 0; i < gimple_phi_num_args (phi); i++)
625 basic_block bb = gimple_phi_arg_edge (phi, i)->src;
627 if (bb == m_switch_bb
628 || (single_pred_p (bb)
629 && single_pred (bb) == m_switch_bb
630 && (!m_default_case_nonstandard
631 || empty_block_p (bb))))
633 tree reloc, val;
634 const char *reason = NULL;
636 val = gimple_phi_arg_def (phi, i);
637 if (!is_gimple_ip_invariant (val))
638 reason = "non-invariant value from a case";
639 else
641 reloc = initializer_constant_valid_p (val, TREE_TYPE (val));
642 if ((flag_pic && reloc != null_pointer_node)
643 || (!flag_pic && reloc == NULL_TREE))
645 if (reloc)
646 reason
647 = "value from a case would need runtime relocations";
648 else
649 reason
650 = "value from a case is not a valid initializer";
653 if (reason)
655 /* For contiguous range, we can allow non-constant
656 or one that needs relocation, as long as it is
657 only reachable from the default case. */
658 if (bb == m_switch_bb)
659 bb = m_final_bb;
660 if (!m_contiguous_range || bb != m_default_bb)
662 m_reason = reason;
663 return false;
666 unsigned int branch_num = gimple_switch_num_labels (m_switch);
667 for (unsigned int i = 1; i < branch_num; i++)
669 if (gimple_switch_label_bb (cfun, m_switch, i) == bb)
671 m_reason = reason;
672 return false;
675 m_default_case_nonstandard = true;
681 return true;
684 /* The following function allocates default_values, target_{in,out}_names and
685 constructors arrays. The last one is also populated with pointers to
686 vectors that will become constructors of new arrays. */
688 void
689 switch_conversion::create_temp_arrays ()
691 int i;
693 m_default_values = XCNEWVEC (tree, m_phi_count * 3);
694 /* ??? Macros do not support multi argument templates in their
695 argument list. We create a typedef to work around that problem. */
696 typedef vec<constructor_elt, va_gc> *vec_constructor_elt_gc;
697 m_constructors = XCNEWVEC (vec_constructor_elt_gc, m_phi_count);
698 m_target_inbound_names = m_default_values + m_phi_count;
699 m_target_outbound_names = m_target_inbound_names + m_phi_count;
700 for (i = 0; i < m_phi_count; i++)
701 vec_alloc (m_constructors[i], tree_to_uhwi (m_range_size) + 1);
704 /* Populate the array of default values in the order of phi nodes.
705 DEFAULT_CASE is the CASE_LABEL_EXPR for the default switch branch
706 if the range is non-contiguous or the default case has standard
707 structure, otherwise it is the first non-default case instead. */
709 void
710 switch_conversion::gather_default_values (tree default_case)
712 gphi_iterator gsi;
713 basic_block bb = label_to_block (cfun, CASE_LABEL (default_case));
714 edge e;
715 int i = 0;
717 gcc_assert (CASE_LOW (default_case) == NULL_TREE
718 || m_default_case_nonstandard);
720 if (bb == m_final_bb)
721 e = find_edge (m_switch_bb, bb);
722 else
723 e = single_succ_edge (bb);
725 for (gsi = gsi_start_phis (m_final_bb); !gsi_end_p (gsi); gsi_next (&gsi))
727 gphi *phi = gsi.phi ();
728 if (virtual_operand_p (gimple_phi_result (phi)))
729 continue;
730 tree val = PHI_ARG_DEF_FROM_EDGE (phi, e);
731 gcc_assert (val);
732 m_default_values[i++] = val;
736 /* The following function populates the vectors in the constructors array with
737 future contents of the static arrays. The vectors are populated in the
738 order of phi nodes. */
740 void
741 switch_conversion::build_constructors ()
743 unsigned i, branch_num = gimple_switch_num_labels (m_switch);
744 tree pos = m_range_min;
745 tree pos_one = build_int_cst (TREE_TYPE (pos), 1);
747 for (i = 1; i < branch_num; i++)
749 tree cs = gimple_switch_label (m_switch, i);
750 basic_block bb = label_to_block (cfun, CASE_LABEL (cs));
751 edge e;
752 tree high;
753 gphi_iterator gsi;
754 int j;
756 if (bb == m_final_bb)
757 e = find_edge (m_switch_bb, bb);
758 else
759 e = single_succ_edge (bb);
760 gcc_assert (e);
762 while (tree_int_cst_lt (pos, CASE_LOW (cs)))
764 int k;
765 for (k = 0; k < m_phi_count; k++)
767 constructor_elt elt;
769 elt.index = int_const_binop (MINUS_EXPR, pos, m_range_min);
770 if (TYPE_PRECISION (TREE_TYPE (elt.index))
771 > TYPE_PRECISION (sizetype))
772 elt.index = fold_convert (sizetype, elt.index);
773 elt.value
774 = unshare_expr_without_location (m_default_values[k]);
775 m_constructors[k]->quick_push (elt);
778 pos = int_const_binop (PLUS_EXPR, pos, pos_one);
780 gcc_assert (tree_int_cst_equal (pos, CASE_LOW (cs)));
782 j = 0;
783 if (CASE_HIGH (cs))
784 high = CASE_HIGH (cs);
785 else
786 high = CASE_LOW (cs);
787 for (gsi = gsi_start_phis (m_final_bb);
788 !gsi_end_p (gsi); gsi_next (&gsi))
790 gphi *phi = gsi.phi ();
791 if (virtual_operand_p (gimple_phi_result (phi)))
792 continue;
793 tree val = PHI_ARG_DEF_FROM_EDGE (phi, e);
794 tree low = CASE_LOW (cs);
795 pos = CASE_LOW (cs);
799 constructor_elt elt;
801 elt.index = int_const_binop (MINUS_EXPR, pos, m_range_min);
802 if (TYPE_PRECISION (TREE_TYPE (elt.index))
803 > TYPE_PRECISION (sizetype))
804 elt.index = fold_convert (sizetype, elt.index);
805 elt.value = unshare_expr_without_location (val);
806 m_constructors[j]->quick_push (elt);
808 pos = int_const_binop (PLUS_EXPR, pos, pos_one);
809 } while (!tree_int_cst_lt (high, pos)
810 && tree_int_cst_lt (low, pos));
811 j++;
816 /* If all values in the constructor vector are products of a linear function
817 a * x + b, then return true. When true, COEFF_A and COEFF_B and
818 coefficients of the linear function. Note that equal values are special
819 case of a linear function with a and b equal to zero. */
821 bool
822 switch_conversion::contains_linear_function_p (vec<constructor_elt, va_gc> *vec,
823 wide_int *coeff_a,
824 wide_int *coeff_b)
826 unsigned int i;
827 constructor_elt *elt;
829 gcc_assert (vec->length () >= 2);
831 /* Let's try to find any linear function a * x + y that can apply to
832 given values. 'a' can be calculated as follows:
834 a = (y2 - y1) / (x2 - x1) where x2 - x1 = 1 (consecutive case indices)
835 a = y2 - y1
839 b = y2 - a * x2
843 tree elt0 = (*vec)[0].value;
844 tree elt1 = (*vec)[1].value;
846 if (TREE_CODE (elt0) != INTEGER_CST || TREE_CODE (elt1) != INTEGER_CST)
847 return false;
849 wide_int range_min
850 = wide_int::from (wi::to_wide (m_range_min),
851 TYPE_PRECISION (TREE_TYPE (elt0)),
852 TYPE_SIGN (TREE_TYPE (m_range_min)));
853 wide_int y1 = wi::to_wide (elt0);
854 wide_int y2 = wi::to_wide (elt1);
855 wide_int a = y2 - y1;
856 wide_int b = y2 - a * (range_min + 1);
858 /* Verify that all values fulfill the linear function. */
859 FOR_EACH_VEC_SAFE_ELT (vec, i, elt)
861 if (TREE_CODE (elt->value) != INTEGER_CST)
862 return false;
864 wide_int value = wi::to_wide (elt->value);
865 if (a * range_min + b != value)
866 return false;
868 ++range_min;
871 *coeff_a = a;
872 *coeff_b = b;
874 return true;
877 /* Return type which should be used for array elements, either TYPE's
878 main variant or, for integral types, some smaller integral type
879 that can still hold all the constants. */
881 tree
882 switch_conversion::array_value_type (tree type, int num)
884 unsigned int i, len = vec_safe_length (m_constructors[num]);
885 constructor_elt *elt;
886 int sign = 0;
887 tree smaller_type;
889 /* Types with alignments greater than their size can reach here, e.g. out of
890 SRA. We couldn't use these as an array component type so get back to the
891 main variant first, which, for our purposes, is fine for other types as
892 well. */
894 type = TYPE_MAIN_VARIANT (type);
896 if (!INTEGRAL_TYPE_P (type)
897 || (TREE_CODE (type) == BITINT_TYPE
898 && (TYPE_PRECISION (type) > MAX_FIXED_MODE_SIZE
899 || TYPE_MODE (type) == BLKmode)))
900 return type;
902 scalar_int_mode type_mode = SCALAR_INT_TYPE_MODE (type);
903 scalar_int_mode mode = get_narrowest_mode (type_mode);
904 if (GET_MODE_SIZE (type_mode) <= GET_MODE_SIZE (mode))
905 return type;
907 if (len < (optimize_bb_for_size_p (gimple_bb (m_switch)) ? 2 : 32))
908 return type;
910 FOR_EACH_VEC_SAFE_ELT (m_constructors[num], i, elt)
912 wide_int cst;
914 if (TREE_CODE (elt->value) != INTEGER_CST)
915 return type;
917 cst = wi::to_wide (elt->value);
918 while (1)
920 unsigned int prec = GET_MODE_BITSIZE (mode);
921 if (prec > HOST_BITS_PER_WIDE_INT)
922 return type;
924 if (sign >= 0 && cst == wi::zext (cst, prec))
926 if (sign == 0 && cst == wi::sext (cst, prec))
927 break;
928 sign = 1;
929 break;
931 if (sign <= 0 && cst == wi::sext (cst, prec))
933 sign = -1;
934 break;
937 if (sign == 1)
938 sign = 0;
940 if (!GET_MODE_WIDER_MODE (mode).exists (&mode)
941 || GET_MODE_SIZE (mode) >= GET_MODE_SIZE (type_mode))
942 return type;
946 if (sign == 0)
947 sign = TYPE_UNSIGNED (type) ? 1 : -1;
948 smaller_type = lang_hooks.types.type_for_mode (mode, sign >= 0);
949 if (GET_MODE_SIZE (type_mode)
950 <= GET_MODE_SIZE (SCALAR_INT_TYPE_MODE (smaller_type)))
951 return type;
953 return smaller_type;
956 /* Create an appropriate array type and declaration and assemble a static
957 array variable. Also create a load statement that initializes
958 the variable in question with a value from the static array. SWTCH is
959 the switch statement being converted, NUM is the index to
960 arrays of constructors, default values and target SSA names
961 for this particular array. ARR_INDEX_TYPE is the type of the index
962 of the new array, PHI is the phi node of the final BB that corresponds
963 to the value that will be loaded from the created array. TIDX
964 is an ssa name of a temporary variable holding the index for loads from the
965 new array. */
967 void
968 switch_conversion::build_one_array (int num, tree arr_index_type,
969 gphi *phi, tree tidx)
971 tree name;
972 gimple *load;
973 gimple_stmt_iterator gsi = gsi_for_stmt (m_switch);
974 location_t loc = gimple_location (m_switch);
976 gcc_assert (m_default_values[num]);
978 name = copy_ssa_name (PHI_RESULT (phi));
979 m_target_inbound_names[num] = name;
981 vec<constructor_elt, va_gc> *constructor = m_constructors[num];
982 wide_int coeff_a, coeff_b;
983 bool linear_p = contains_linear_function_p (constructor, &coeff_a, &coeff_b);
984 tree type;
985 if (linear_p
986 && (type = range_check_type (TREE_TYPE ((*constructor)[0].value))))
988 if (dump_file && coeff_a.to_uhwi () > 0)
989 fprintf (dump_file, "Linear transformation with A = %" PRId64
990 " and B = %" PRId64 "\n", coeff_a.to_shwi (),
991 coeff_b.to_shwi ());
993 /* We must use type of constructor values. */
994 gimple_seq seq = NULL;
995 tree tmp = gimple_convert (&seq, type, m_index_expr);
996 tree tmp2 = gimple_build (&seq, MULT_EXPR, type,
997 wide_int_to_tree (type, coeff_a), tmp);
998 tree tmp3 = gimple_build (&seq, PLUS_EXPR, type, tmp2,
999 wide_int_to_tree (type, coeff_b));
1000 tree tmp4 = gimple_convert (&seq, TREE_TYPE (name), tmp3);
1001 gsi_insert_seq_before (&gsi, seq, GSI_SAME_STMT);
1002 load = gimple_build_assign (name, tmp4);
1004 else
1006 tree array_type, ctor, decl, value_type, fetch, default_type;
1008 default_type = TREE_TYPE (m_default_values[num]);
1009 value_type = array_value_type (default_type, num);
1010 array_type = build_array_type (value_type, arr_index_type);
1011 if (default_type != value_type)
1013 unsigned int i;
1014 constructor_elt *elt;
1016 FOR_EACH_VEC_SAFE_ELT (constructor, i, elt)
1017 elt->value = fold_convert (value_type, elt->value);
1019 ctor = build_constructor (array_type, constructor);
1020 TREE_CONSTANT (ctor) = true;
1021 TREE_STATIC (ctor) = true;
1023 decl = build_decl (loc, VAR_DECL, NULL_TREE, array_type);
1024 TREE_STATIC (decl) = 1;
1025 DECL_INITIAL (decl) = ctor;
1027 DECL_NAME (decl) = create_tmp_var_name ("CSWTCH");
1028 DECL_ARTIFICIAL (decl) = 1;
1029 DECL_IGNORED_P (decl) = 1;
1030 TREE_CONSTANT (decl) = 1;
1031 TREE_READONLY (decl) = 1;
1032 DECL_IGNORED_P (decl) = 1;
1033 if (offloading_function_p (cfun->decl))
1034 DECL_ATTRIBUTES (decl)
1035 = tree_cons (get_identifier ("omp declare target"), NULL_TREE,
1036 NULL_TREE);
1037 varpool_node::finalize_decl (decl);
1039 fetch = build4 (ARRAY_REF, value_type, decl, tidx, NULL_TREE,
1040 NULL_TREE);
1041 if (default_type != value_type)
1043 fetch = fold_convert (default_type, fetch);
1044 fetch = force_gimple_operand_gsi (&gsi, fetch, true, NULL_TREE,
1045 true, GSI_SAME_STMT);
1047 load = gimple_build_assign (name, fetch);
1050 gsi_insert_before (&gsi, load, GSI_SAME_STMT);
1051 update_stmt (load);
1052 m_arr_ref_last = load;
1055 /* Builds and initializes static arrays initialized with values gathered from
1056 the switch statement. Also creates statements that load values from
1057 them. */
1059 void
1060 switch_conversion::build_arrays ()
1062 tree arr_index_type;
1063 tree tidx, sub, utype, tidxtype;
1064 gimple *stmt;
1065 gimple_stmt_iterator gsi;
1066 gphi_iterator gpi;
1067 int i;
1068 location_t loc = gimple_location (m_switch);
1070 gsi = gsi_for_stmt (m_switch);
1072 /* Make sure we do not generate arithmetics in a subrange. */
1073 utype = TREE_TYPE (m_index_expr);
1074 if (TREE_TYPE (utype))
1075 utype = lang_hooks.types.type_for_mode (TYPE_MODE (TREE_TYPE (utype)), 1);
1076 else if (TREE_CODE (utype) == BITINT_TYPE
1077 && (TYPE_PRECISION (utype) > MAX_FIXED_MODE_SIZE
1078 || TYPE_MODE (utype) == BLKmode))
1079 utype = unsigned_type_for (utype);
1080 else
1081 utype = lang_hooks.types.type_for_mode (TYPE_MODE (utype), 1);
1082 if (TYPE_PRECISION (utype) > TYPE_PRECISION (sizetype))
1083 tidxtype = sizetype;
1084 else
1085 tidxtype = utype;
1087 arr_index_type = build_index_type (m_range_size);
1088 tidx = make_ssa_name (tidxtype);
1089 sub = fold_build2_loc (loc, MINUS_EXPR, utype,
1090 fold_convert_loc (loc, utype, m_index_expr),
1091 fold_convert_loc (loc, utype, m_range_min));
1092 sub = fold_convert (tidxtype, sub);
1093 sub = force_gimple_operand_gsi (&gsi, sub,
1094 false, NULL, true, GSI_SAME_STMT);
1095 stmt = gimple_build_assign (tidx, sub);
1097 gsi_insert_before (&gsi, stmt, GSI_SAME_STMT);
1098 update_stmt (stmt);
1099 m_arr_ref_first = stmt;
1101 for (gpi = gsi_start_phis (m_final_bb), i = 0;
1102 !gsi_end_p (gpi); gsi_next (&gpi))
1104 gphi *phi = gpi.phi ();
1105 if (!virtual_operand_p (gimple_phi_result (phi)))
1106 build_one_array (i++, arr_index_type, phi, tidx);
1107 else
1109 edge e;
1110 edge_iterator ei;
1111 FOR_EACH_EDGE (e, ei, m_switch_bb->succs)
1113 if (e->dest == m_final_bb)
1114 break;
1115 if (!m_default_case_nonstandard
1116 || e->dest != m_default_bb)
1118 e = single_succ_edge (e->dest);
1119 break;
1122 gcc_assert (e && e->dest == m_final_bb);
1123 m_target_vop = PHI_ARG_DEF_FROM_EDGE (phi, e);
1128 /* Generates and appropriately inserts loads of default values at the position
1129 given by GSI. Returns the last inserted statement. */
1131 gassign *
1132 switch_conversion::gen_def_assigns (gimple_stmt_iterator *gsi)
1134 int i;
1135 gassign *assign = NULL;
1137 for (i = 0; i < m_phi_count; i++)
1139 tree name = copy_ssa_name (m_target_inbound_names[i]);
1140 m_target_outbound_names[i] = name;
1141 assign = gimple_build_assign (name, m_default_values[i]);
1142 gsi_insert_before (gsi, assign, GSI_SAME_STMT);
1143 update_stmt (assign);
1145 return assign;
1148 /* Deletes the unused bbs and edges that now contain the switch statement and
1149 its empty branch bbs. BBD is the now dead BB containing
1150 the original switch statement, FINAL is the last BB of the converted
1151 switch statement (in terms of succession). */
1153 void
1154 switch_conversion::prune_bbs (basic_block bbd, basic_block final,
1155 basic_block default_bb)
1157 edge_iterator ei;
1158 edge e;
1160 for (ei = ei_start (bbd->succs); (e = ei_safe_edge (ei)); )
1162 basic_block bb;
1163 bb = e->dest;
1164 remove_edge (e);
1165 if (bb != final && bb != default_bb)
1166 delete_basic_block (bb);
1168 delete_basic_block (bbd);
1171 /* Add values to phi nodes in final_bb for the two new edges. E1F is the edge
1172 from the basic block loading values from an array and E2F from the basic
1173 block loading default values. BBF is the last switch basic block (see the
1174 bbf description in the comment below). */
1176 void
1177 switch_conversion::fix_phi_nodes (edge e1f, edge e2f, basic_block bbf)
1179 gphi_iterator gsi;
1180 int i;
1182 for (gsi = gsi_start_phis (bbf), i = 0;
1183 !gsi_end_p (gsi); gsi_next (&gsi))
1185 gphi *phi = gsi.phi ();
1186 tree inbound, outbound;
1187 if (virtual_operand_p (gimple_phi_result (phi)))
1188 inbound = outbound = m_target_vop;
1189 else
1191 inbound = m_target_inbound_names[i];
1192 outbound = m_target_outbound_names[i++];
1194 add_phi_arg (phi, inbound, e1f, UNKNOWN_LOCATION);
1195 if (!m_default_case_nonstandard)
1196 add_phi_arg (phi, outbound, e2f, UNKNOWN_LOCATION);
1200 /* Creates a check whether the switch expression value actually falls into the
1201 range given by all the cases. If it does not, the temporaries are loaded
1202 with default values instead. */
1204 void
1205 switch_conversion::gen_inbound_check ()
1207 tree label_decl1 = create_artificial_label (UNKNOWN_LOCATION);
1208 tree label_decl2 = create_artificial_label (UNKNOWN_LOCATION);
1209 tree label_decl3 = create_artificial_label (UNKNOWN_LOCATION);
1210 glabel *label1, *label2, *label3;
1211 tree utype, tidx;
1212 tree bound;
1214 gcond *cond_stmt;
1216 gassign *last_assign = NULL;
1217 gimple_stmt_iterator gsi;
1218 basic_block bb0, bb1, bb2, bbf, bbd;
1219 edge e01 = NULL, e02, e21, e1d, e1f, e2f;
1220 location_t loc = gimple_location (m_switch);
1222 gcc_assert (m_default_values);
1224 bb0 = gimple_bb (m_switch);
1226 tidx = gimple_assign_lhs (m_arr_ref_first);
1227 utype = TREE_TYPE (tidx);
1229 /* (end of) block 0 */
1230 gsi = gsi_for_stmt (m_arr_ref_first);
1231 gsi_next (&gsi);
1233 bound = fold_convert_loc (loc, utype, m_range_size);
1234 cond_stmt = gimple_build_cond (LE_EXPR, tidx, bound, NULL_TREE, NULL_TREE);
1235 gsi_insert_before (&gsi, cond_stmt, GSI_SAME_STMT);
1236 update_stmt (cond_stmt);
1238 /* block 2 */
1239 if (!m_default_case_nonstandard)
1241 label2 = gimple_build_label (label_decl2);
1242 gsi_insert_before (&gsi, label2, GSI_SAME_STMT);
1243 last_assign = gen_def_assigns (&gsi);
1246 /* block 1 */
1247 label1 = gimple_build_label (label_decl1);
1248 gsi_insert_before (&gsi, label1, GSI_SAME_STMT);
1250 /* block F */
1251 gsi = gsi_start_bb (m_final_bb);
1252 label3 = gimple_build_label (label_decl3);
1253 gsi_insert_before (&gsi, label3, GSI_SAME_STMT);
1255 /* cfg fix */
1256 e02 = split_block (bb0, cond_stmt);
1257 bb2 = e02->dest;
1259 if (m_default_case_nonstandard)
1261 bb1 = bb2;
1262 bb2 = m_default_bb;
1263 e01 = e02;
1264 e01->flags = EDGE_TRUE_VALUE;
1265 e02 = make_edge (bb0, bb2, EDGE_FALSE_VALUE);
1266 edge e_default = find_edge (bb1, bb2);
1267 for (gphi_iterator gsi = gsi_start_phis (bb2);
1268 !gsi_end_p (gsi); gsi_next (&gsi))
1270 gphi *phi = gsi.phi ();
1271 tree arg = PHI_ARG_DEF_FROM_EDGE (phi, e_default);
1272 add_phi_arg (phi, arg, e02,
1273 gimple_phi_arg_location_from_edge (phi, e_default));
1275 /* Partially fix the dominator tree, if it is available. */
1276 if (dom_info_available_p (CDI_DOMINATORS))
1277 redirect_immediate_dominators (CDI_DOMINATORS, bb1, bb0);
1279 else
1281 e21 = split_block (bb2, last_assign);
1282 bb1 = e21->dest;
1283 remove_edge (e21);
1286 e1d = split_block (bb1, m_arr_ref_last);
1287 bbd = e1d->dest;
1288 remove_edge (e1d);
1290 /* Flags and profiles of the edge for in-range values. */
1291 if (!m_default_case_nonstandard)
1292 e01 = make_edge (bb0, bb1, EDGE_TRUE_VALUE);
1293 e01->probability = m_default_prob.invert ();
1295 /* Flags and profiles of the edge taking care of out-of-range values. */
1296 e02->flags &= ~EDGE_FALLTHRU;
1297 e02->flags |= EDGE_FALSE_VALUE;
1298 e02->probability = m_default_prob;
1300 bbf = m_final_bb;
1302 e1f = make_edge (bb1, bbf, EDGE_FALLTHRU);
1303 e1f->probability = profile_probability::always ();
1305 if (m_default_case_nonstandard)
1306 e2f = NULL;
1307 else
1309 e2f = make_edge (bb2, bbf, EDGE_FALLTHRU);
1310 e2f->probability = profile_probability::always ();
1313 /* frequencies of the new BBs */
1314 bb1->count = e01->count ();
1315 bb2->count = e02->count ();
1316 if (!m_default_case_nonstandard)
1317 bbf->count = e1f->count () + e2f->count ();
1319 /* Tidy blocks that have become unreachable. */
1320 bool prune_default_bb = !m_default_case_nonstandard
1321 && !m_exp_index_transform_applied;
1322 prune_bbs (bbd, m_final_bb, prune_default_bb ? NULL : m_default_bb);
1324 /* Fixup the PHI nodes in bbF. */
1325 fix_phi_nodes (e1f, e2f, bbf);
1327 /* Fix the dominator tree, if it is available. */
1328 if (dom_info_available_p (CDI_DOMINATORS))
1330 vec<basic_block> bbs_to_fix_dom;
1332 set_immediate_dominator (CDI_DOMINATORS, bb1, bb0);
1333 if (!m_default_case_nonstandard)
1334 set_immediate_dominator (CDI_DOMINATORS, bb2, bb0);
1335 if (! get_immediate_dominator (CDI_DOMINATORS, bbf))
1336 /* If bbD was the immediate dominator ... */
1337 set_immediate_dominator (CDI_DOMINATORS, bbf, bb0);
1339 bbs_to_fix_dom.create (3 + (bb2 != bbf));
1340 bbs_to_fix_dom.quick_push (bb0);
1341 bbs_to_fix_dom.quick_push (bb1);
1342 if (bb2 != bbf)
1343 bbs_to_fix_dom.quick_push (bb2);
1344 bbs_to_fix_dom.quick_push (bbf);
1346 iterate_fix_dominators (CDI_DOMINATORS, bbs_to_fix_dom, true);
1347 bbs_to_fix_dom.release ();
1351 /* The following function is invoked on every switch statement (the current
1352 one is given in SWTCH) and runs the individual phases of switch
1353 conversion on it one after another until one fails or the conversion
1354 is completed. On success, NULL is in m_reason, otherwise points
1355 to a string with the reason why the conversion failed. */
1357 void
1358 switch_conversion::expand (gswitch *swtch)
1360 /* Group case labels so that we get the right results from the heuristics
1361 that decide on the code generation approach for this switch. */
1362 m_cfg_altered |= group_case_labels_stmt (swtch);
1364 /* If this switch is now a degenerate case with only a default label,
1365 there is nothing left for us to do. */
1366 if (gimple_switch_num_labels (swtch) < 2)
1368 m_reason = "switch is a degenerate case";
1369 return;
1372 collect (swtch);
1374 /* No error markers should reach here (they should be filtered out
1375 during gimplification). */
1376 gcc_checking_assert (TREE_TYPE (m_index_expr) != error_mark_node);
1378 /* Prefer bit test if possible. */
1379 if (tree_fits_uhwi_p (m_range_size)
1380 && bit_test_cluster::can_be_handled (tree_to_uhwi (m_range_size), m_uniq)
1381 && bit_test_cluster::is_beneficial (m_count, m_uniq))
1383 m_reason = "expanding as bit test is preferable";
1384 return;
1387 if (m_uniq <= 2)
1389 /* This will be expanded as a decision tree . */
1390 m_reason = "expanding as jumps is preferable";
1391 return;
1394 /* If there is no common successor, we cannot do the transformation. */
1395 if (!m_final_bb)
1397 m_reason = "no common successor to all case label target blocks found";
1398 return;
1401 /* Sometimes it is possible to use the "exponential index transform" to help
1402 switch conversion convert switches which it otherwise could not convert.
1403 However, we want to do this transform only when we know that switch
1404 conversion will then really be able to convert the switch. So we first
1405 check if the transformation is applicable and then maybe later do the
1406 transformation. */
1407 bool exp_transform_viable = is_exp_index_transform_viable (swtch);
1409 /* Check the case label values are within reasonable range.
1411 If we will be doing exponential index transform, the range will be always
1412 reasonable. */
1413 if (!exp_transform_viable && !check_range ())
1415 gcc_assert (m_reason);
1416 return;
1419 /* For all the cases, see whether they are empty, the assignments they
1420 represent constant and so on... */
1421 if (!check_all_empty_except_final ())
1423 gcc_assert (m_reason);
1424 return;
1426 if (!check_final_bb ())
1428 gcc_assert (m_reason);
1429 return;
1432 /* At this point all checks have passed and we can proceed with the
1433 transformation. */
1435 if (exp_transform_viable)
1436 exp_index_transform (swtch);
1438 create_temp_arrays ();
1439 gather_default_values (m_default_case_nonstandard
1440 ? gimple_switch_label (swtch, 1)
1441 : gimple_switch_default_label (swtch));
1442 build_constructors ();
1444 build_arrays (); /* Build the static arrays and assignments. */
1445 gen_inbound_check (); /* Build the bounds check. */
1447 m_cfg_altered = true;
1450 /* Destructor. */
1452 switch_conversion::~switch_conversion ()
1454 XDELETEVEC (m_constructors);
1455 XDELETEVEC (m_default_values);
1458 /* Constructor. */
1460 group_cluster::group_cluster (vec<cluster *> &clusters,
1461 unsigned start, unsigned end)
1463 gcc_checking_assert (end - start + 1 >= 1);
1464 m_prob = profile_probability::never ();
1465 m_cases.create (end - start + 1);
1466 for (unsigned i = start; i <= end; i++)
1468 m_cases.quick_push (static_cast<simple_cluster *> (clusters[i]));
1469 m_prob += clusters[i]->m_prob;
1471 m_subtree_prob = m_prob;
1474 /* Destructor. */
1476 group_cluster::~group_cluster ()
1478 for (unsigned i = 0; i < m_cases.length (); i++)
1479 delete m_cases[i];
1481 m_cases.release ();
1484 /* Dump content of a cluster. */
1486 void
1487 group_cluster::dump (FILE *f, bool details)
1489 unsigned total_values = 0;
1490 for (unsigned i = 0; i < m_cases.length (); i++)
1491 total_values += m_cases[i]->get_range (m_cases[i]->get_low (),
1492 m_cases[i]->get_high ());
1494 unsigned comparison_count = 0;
1495 for (unsigned i = 0; i < m_cases.length (); i++)
1497 simple_cluster *sc = static_cast<simple_cluster *> (m_cases[i]);
1498 comparison_count += sc->get_comparison_count ();
1501 unsigned HOST_WIDE_INT range = get_range (get_low (), get_high ());
1502 fprintf (f, "%s", get_type () == JUMP_TABLE ? "JT" : "BT");
1504 if (details)
1505 fprintf (f, "(values:%d comparisons:%d range:" HOST_WIDE_INT_PRINT_DEC
1506 " density: %.2f%%)", total_values, comparison_count, range,
1507 100.0f * comparison_count / range);
1509 fprintf (f, ":");
1510 PRINT_CASE (f, get_low ());
1511 fprintf (f, "-");
1512 PRINT_CASE (f, get_high ());
1513 fprintf (f, " ");
1516 /* Emit GIMPLE code to handle the cluster. */
1518 void
1519 jump_table_cluster::emit (tree index_expr, tree,
1520 tree default_label_expr, basic_block default_bb,
1521 location_t loc)
1523 tree low = get_low ();
1524 unsigned HOST_WIDE_INT range = get_range (low, get_high ());
1525 unsigned HOST_WIDE_INT nondefault_range = 0;
1526 bool bitint = false;
1527 gimple_stmt_iterator gsi = gsi_start_bb (m_case_bb);
1529 /* For large/huge _BitInt, subtract low from index_expr, cast to unsigned
1530 DImode type (get_range doesn't support ranges larger than 64-bits)
1531 and subtract low from all case values as well. */
1532 if (TREE_CODE (TREE_TYPE (index_expr)) == BITINT_TYPE
1533 && TYPE_PRECISION (TREE_TYPE (index_expr)) > GET_MODE_PRECISION (DImode))
1535 bitint = true;
1536 tree this_low = low, type;
1537 gimple *g;
1538 gimple_seq seq = NULL;
1539 if (!TYPE_OVERFLOW_WRAPS (TREE_TYPE (index_expr)))
1541 type = unsigned_type_for (TREE_TYPE (index_expr));
1542 index_expr = gimple_convert (&seq, type, index_expr);
1543 this_low = fold_convert (type, this_low);
1545 this_low = const_unop (NEGATE_EXPR, TREE_TYPE (this_low), this_low);
1546 index_expr = gimple_build (&seq, PLUS_EXPR, TREE_TYPE (index_expr),
1547 index_expr, this_low);
1548 type = build_nonstandard_integer_type (GET_MODE_PRECISION (DImode), 1);
1549 g = gimple_build_cond (GT_EXPR, index_expr,
1550 fold_convert (TREE_TYPE (index_expr),
1551 TYPE_MAX_VALUE (type)),
1552 NULL_TREE, NULL_TREE);
1553 gimple_seq_add_stmt (&seq, g);
1554 gimple_seq_set_location (seq, loc);
1555 gsi_insert_seq_after (&gsi, seq, GSI_NEW_STMT);
1556 edge e1 = split_block (m_case_bb, g);
1557 e1->flags = EDGE_FALSE_VALUE;
1558 e1->probability = profile_probability::likely ();
1559 edge e2 = make_edge (e1->src, default_bb, EDGE_TRUE_VALUE);
1560 e2->probability = e1->probability.invert ();
1561 gsi = gsi_start_bb (e1->dest);
1562 seq = NULL;
1563 index_expr = gimple_convert (&seq, type, index_expr);
1564 gimple_seq_set_location (seq, loc);
1565 gsi_insert_seq_after (&gsi, seq, GSI_NEW_STMT);
1568 /* For jump table we just emit a new gswitch statement that will
1569 be latter lowered to jump table. */
1570 auto_vec <tree> labels;
1571 labels.create (m_cases.length ());
1573 basic_block case_bb = gsi_bb (gsi);
1574 make_edge (case_bb, default_bb, 0);
1575 for (unsigned i = 0; i < m_cases.length (); i++)
1577 tree lab = unshare_expr (m_cases[i]->m_case_label_expr);
1578 if (bitint)
1580 CASE_LOW (lab)
1581 = fold_convert (TREE_TYPE (index_expr),
1582 const_binop (MINUS_EXPR,
1583 TREE_TYPE (CASE_LOW (lab)),
1584 CASE_LOW (lab), low));
1585 if (CASE_HIGH (lab))
1586 CASE_HIGH (lab)
1587 = fold_convert (TREE_TYPE (index_expr),
1588 const_binop (MINUS_EXPR,
1589 TREE_TYPE (CASE_HIGH (lab)),
1590 CASE_HIGH (lab), low));
1592 labels.quick_push (lab);
1593 make_edge (case_bb, m_cases[i]->m_case_bb, 0);
1596 gswitch *s = gimple_build_switch (index_expr,
1597 unshare_expr (default_label_expr), labels);
1598 gimple_set_location (s, loc);
1599 gsi_insert_after (&gsi, s, GSI_NEW_STMT);
1601 /* Set up even probabilities for all cases. */
1602 for (unsigned i = 0; i < m_cases.length (); i++)
1604 simple_cluster *sc = static_cast<simple_cluster *> (m_cases[i]);
1605 edge case_edge = find_edge (case_bb, sc->m_case_bb);
1606 unsigned HOST_WIDE_INT case_range
1607 = sc->get_range (sc->get_low (), sc->get_high ());
1608 nondefault_range += case_range;
1610 /* case_edge->aux is number of values in a jump-table that are covered
1611 by the case_edge. */
1612 case_edge->aux = (void *) ((intptr_t) (case_edge->aux) + case_range);
1615 edge default_edge = gimple_switch_default_edge (cfun, s);
1616 default_edge->probability = profile_probability::never ();
1618 for (unsigned i = 0; i < m_cases.length (); i++)
1620 simple_cluster *sc = static_cast<simple_cluster *> (m_cases[i]);
1621 edge case_edge = find_edge (case_bb, sc->m_case_bb);
1622 case_edge->probability
1623 = profile_probability::always ().apply_scale ((intptr_t)case_edge->aux,
1624 range);
1627 /* Number of non-default values is probability of default edge. */
1628 default_edge->probability
1629 += profile_probability::always ().apply_scale (nondefault_range,
1630 range).invert ();
1632 switch_decision_tree::reset_out_edges_aux (s);
1635 /* Find jump tables of given CLUSTERS, where all members of the vector
1636 are of type simple_cluster. New clusters are returned. */
1638 vec<cluster *>
1639 jump_table_cluster::find_jump_tables (vec<cluster *> &clusters)
1641 if (!is_enabled ())
1642 return clusters.copy ();
1644 unsigned l = clusters.length ();
1646 auto_vec<min_cluster_item> min;
1647 min.reserve (l + 1);
1649 min.quick_push (min_cluster_item (0, 0, 0));
1651 unsigned HOST_WIDE_INT max_ratio
1652 = (optimize_insn_for_size_p ()
1653 ? param_jump_table_max_growth_ratio_for_size
1654 : param_jump_table_max_growth_ratio_for_speed);
1656 for (unsigned i = 1; i <= l; i++)
1658 /* Set minimal # of clusters with i-th item to infinite. */
1659 min.quick_push (min_cluster_item (INT_MAX, INT_MAX, INT_MAX));
1661 /* Pre-calculate number of comparisons for the clusters. */
1662 HOST_WIDE_INT comparison_count = 0;
1663 for (unsigned k = 0; k <= i - 1; k++)
1665 simple_cluster *sc = static_cast<simple_cluster *> (clusters[k]);
1666 comparison_count += sc->get_comparison_count ();
1669 for (unsigned j = 0; j < i; j++)
1671 unsigned HOST_WIDE_INT s = min[j].m_non_jt_cases;
1672 if (i - j < case_values_threshold ())
1673 s += i - j;
1675 /* Prefer clusters with smaller number of numbers covered. */
1676 if ((min[j].m_count + 1 < min[i].m_count
1677 || (min[j].m_count + 1 == min[i].m_count
1678 && s < min[i].m_non_jt_cases))
1679 && can_be_handled (clusters, j, i - 1, max_ratio,
1680 comparison_count))
1681 min[i] = min_cluster_item (min[j].m_count + 1, j, s);
1683 simple_cluster *sc = static_cast<simple_cluster *> (clusters[j]);
1684 comparison_count -= sc->get_comparison_count ();
1687 gcc_checking_assert (comparison_count == 0);
1688 gcc_checking_assert (min[i].m_count != INT_MAX);
1691 /* No result. */
1692 if (min[l].m_count == l)
1693 return clusters.copy ();
1695 vec<cluster *> output;
1696 output.create (4);
1698 /* Find and build the clusters. */
1699 for (unsigned int end = l;;)
1701 int start = min[end].m_start;
1703 /* Do not allow clusters with small number of cases. */
1704 if (is_beneficial (clusters, start, end - 1))
1705 output.safe_push (new jump_table_cluster (clusters, start, end - 1));
1706 else
1707 for (int i = end - 1; i >= start; i--)
1708 output.safe_push (clusters[i]);
1710 end = start;
1712 if (start <= 0)
1713 break;
1716 output.reverse ();
1717 return output;
1720 /* Return true when cluster starting at START and ending at END (inclusive)
1721 can build a jump-table. */
1723 bool
1724 jump_table_cluster::can_be_handled (const vec<cluster *> &clusters,
1725 unsigned start, unsigned end,
1726 unsigned HOST_WIDE_INT max_ratio,
1727 unsigned HOST_WIDE_INT comparison_count)
1729 /* If the switch is relatively small such that the cost of one
1730 indirect jump on the target are higher than the cost of a
1731 decision tree, go with the decision tree.
1733 If range of values is much bigger than number of values,
1734 or if it is too large to represent in a HOST_WIDE_INT,
1735 make a sequence of conditional branches instead of a dispatch.
1737 The definition of "much bigger" depends on whether we are
1738 optimizing for size or for speed.
1740 For algorithm correctness, jump table for a single case must return
1741 true. We bail out in is_beneficial if it's called just for
1742 a single case. */
1743 if (start == end)
1744 return true;
1746 unsigned HOST_WIDE_INT range = get_range (clusters[start]->get_low (),
1747 clusters[end]->get_high ());
1748 /* Check overflow. */
1749 if (range == 0)
1750 return false;
1752 if (range > HOST_WIDE_INT_M1U / 100)
1753 return false;
1755 unsigned HOST_WIDE_INT lhs = 100 * range;
1756 if (lhs < range)
1757 return false;
1759 return lhs <= max_ratio * comparison_count;
1762 /* Return true if cluster starting at START and ending at END (inclusive)
1763 is profitable transformation. */
1765 bool
1766 jump_table_cluster::is_beneficial (const vec<cluster *> &,
1767 unsigned start, unsigned end)
1769 /* Single case bail out. */
1770 if (start == end)
1771 return false;
1773 return end - start + 1 >= case_values_threshold ();
1776 /* Find bit tests of given CLUSTERS, where all members of the vector are of
1777 type simple_cluster. Use a fast algorithm that might not find the optimal
1778 solution (minimal number of clusters on the output). New clusters are
1779 returned.
1781 You should call find_bit_tests () instead of calling this function
1782 directly. */
1784 vec<cluster *>
1785 bit_test_cluster::find_bit_tests_fast (vec<cluster *> &clusters)
1787 unsigned l = clusters.length ();
1788 vec<cluster *> output;
1790 output.create (l);
1792 /* Look at sliding BITS_PER_WORD sized windows in the switch value space
1793 and determine if they are suitable for a bit test cluster. Worst case
1794 this can examine every value BITS_PER_WORD-1 times. */
1795 unsigned k;
1796 for (unsigned i = 0; i < l; i += k)
1798 hash_set<basic_block> targets;
1799 cluster *start_cluster = clusters[i];
1801 /* Find the biggest k such that clusters i to i+k-1 can be turned into a
1802 one big bit test cluster. */
1803 k = 0;
1804 while (i + k < l)
1806 cluster *end_cluster = clusters[i + k];
1808 /* Does value range fit into the BITS_PER_WORD window? */
1809 HOST_WIDE_INT w = cluster::get_range (start_cluster->get_low (),
1810 end_cluster->get_high ());
1811 if (w == 0 || w > BITS_PER_WORD)
1812 break;
1814 /* Check for max # of targets. */
1815 if (targets.elements () == m_max_case_bit_tests
1816 && !targets.contains (end_cluster->m_case_bb))
1817 break;
1819 targets.add (end_cluster->m_case_bb);
1820 k++;
1823 if (is_beneficial (k, targets.elements ()))
1825 output.safe_push (new bit_test_cluster (clusters, i, i + k - 1,
1826 i == 0 && k == l));
1828 else
1830 output.safe_push (clusters[i]);
1831 /* ??? Might be able to skip more. */
1832 k = 1;
1836 return output;
1839 /* Find bit tests of given CLUSTERS, where all members of the vector
1840 are of type simple_cluster. Use a slow (quadratic) algorithm that always
1841 finds the optimal solution (minimal number of clusters on the output). New
1842 clusters are returned.
1844 You should call find_bit_tests () instead of calling this function
1845 directly. */
1847 vec<cluster *>
1848 bit_test_cluster::find_bit_tests_slow (vec<cluster *> &clusters)
1850 unsigned l = clusters.length ();
1851 auto_vec<min_cluster_item> min;
1852 min.reserve (l + 1);
1854 min.quick_push (min_cluster_item (0, 0, 0));
1856 for (unsigned i = 1; i <= l; i++)
1858 /* Set minimal # of clusters with i-th item to infinite. */
1859 min.quick_push (min_cluster_item (INT_MAX, INT_MAX, INT_MAX));
1861 for (unsigned j = 0; j < i; j++)
1863 if (min[j].m_count + 1 < min[i].m_count
1864 && can_be_handled (clusters, j, i - 1))
1865 min[i] = min_cluster_item (min[j].m_count + 1, j, INT_MAX);
1868 gcc_checking_assert (min[i].m_count != INT_MAX);
1871 /* No result. */
1872 if (min[l].m_count == l)
1873 return clusters.copy ();
1875 vec<cluster *> output;
1876 output.create (4);
1878 /* Find and build the clusters. */
1879 for (unsigned end = l;;)
1881 int start = min[end].m_start;
1883 if (is_beneficial (clusters, start, end - 1))
1885 bool entire = start == 0 && end == clusters.length ();
1886 output.safe_push (new bit_test_cluster (clusters, start, end - 1,
1887 entire));
1889 else
1890 for (int i = end - 1; i >= start; i--)
1891 output.safe_push (clusters[i]);
1893 end = start;
1895 if (start <= 0)
1896 break;
1899 output.reverse ();
1900 return output;
1903 /* Find bit tests of given CLUSTERS, where all members of the vector
1904 are of type simple_cluster. MAX_C is the approx max number of cases per
1905 label. New clusters are returned. */
1907 vec<cluster *>
1908 bit_test_cluster::find_bit_tests (vec<cluster *> &clusters, int max_c)
1910 if (!is_enabled () || max_c == 1)
1911 return clusters.copy ();
1913 unsigned l = clusters.length ();
1915 /* Note: l + 1 is the number of cases of the switch. */
1916 if (l + 1 > (unsigned) param_switch_lower_slow_alg_max_cases)
1917 return find_bit_tests_fast (clusters);
1918 else
1919 return find_bit_tests_slow (clusters);
1922 /* Return true when RANGE of case values with UNIQ labels
1923 can build a bit test. */
1925 bool
1926 bit_test_cluster::can_be_handled (unsigned HOST_WIDE_INT range,
1927 unsigned int uniq)
1929 /* Check overflow. */
1930 if (range == 0)
1931 return false;
1933 if (range >= GET_MODE_BITSIZE (word_mode))
1934 return false;
1936 return uniq <= m_max_case_bit_tests;
1939 /* Return true when cluster starting at START and ending at END (inclusive)
1940 can build a bit test. */
1942 bool
1943 bit_test_cluster::can_be_handled (const vec<cluster *> &clusters,
1944 unsigned start, unsigned end)
1946 auto_vec<int, m_max_case_bit_tests> dest_bbs;
1947 /* For algorithm correctness, bit test for a single case must return
1948 true. We bail out in is_beneficial if it's called just for
1949 a single case. */
1950 if (start == end)
1951 return true;
1953 unsigned HOST_WIDE_INT range = get_range (clusters[start]->get_low (),
1954 clusters[end]->get_high ());
1956 /* Make a guess first. */
1957 if (!can_be_handled (range, m_max_case_bit_tests))
1958 return false;
1960 for (unsigned i = start; i <= end; i++)
1962 simple_cluster *sc = static_cast<simple_cluster *> (clusters[i]);
1963 /* m_max_case_bit_tests is very small integer, thus the operation
1964 is constant. */
1965 if (!dest_bbs.contains (sc->m_case_bb->index))
1967 if (dest_bbs.length () >= m_max_case_bit_tests)
1968 return false;
1969 dest_bbs.quick_push (sc->m_case_bb->index);
1973 return true;
1976 /* Return true when COUNT of cases of UNIQ labels is beneficial for bit test
1977 transformation. */
1979 bool
1980 bit_test_cluster::is_beneficial (unsigned count, unsigned uniq)
1982 return (((uniq == 1 && count >= 3)
1983 || (uniq == 2 && count >= 5)
1984 || (uniq == 3 && count >= 6)));
1987 /* Return true if cluster starting at START and ending at END (inclusive)
1988 is profitable transformation. */
1990 bool
1991 bit_test_cluster::is_beneficial (const vec<cluster *> &clusters,
1992 unsigned start, unsigned end)
1994 /* Single case bail out. */
1995 if (start == end)
1996 return false;
1998 auto_bitmap dest_bbs;
2000 for (unsigned i = start; i <= end; i++)
2002 simple_cluster *sc = static_cast<simple_cluster *> (clusters[i]);
2003 bitmap_set_bit (dest_bbs, sc->m_case_bb->index);
2006 unsigned uniq = bitmap_count_bits (dest_bbs);
2007 unsigned count = end - start + 1;
2008 return is_beneficial (count, uniq);
2011 /* Comparison function for qsort to order bit tests by decreasing
2012 probability of execution. */
2015 case_bit_test::cmp (const void *p1, const void *p2)
2017 const case_bit_test *const d1 = (const case_bit_test *) p1;
2018 const case_bit_test *const d2 = (const case_bit_test *) p2;
2020 if (d2->bits != d1->bits)
2021 return d2->bits - d1->bits;
2023 /* Stabilize the sort. */
2024 return (LABEL_DECL_UID (CASE_LABEL (d2->label))
2025 - LABEL_DECL_UID (CASE_LABEL (d1->label)));
2028 /* Expand a switch statement by a short sequence of bit-wise
2029 comparisons. "switch(x)" is effectively converted into
2030 "if ((1 << (x-MINVAL)) & CST)" where CST and MINVAL are
2031 integer constants.
2033 INDEX_EXPR is the value being switched on.
2035 MINVAL is the lowest case value of in the case nodes,
2036 and RANGE is highest value minus MINVAL. MINVAL and RANGE
2037 are not guaranteed to be of the same type as INDEX_EXPR
2038 (the gimplifier doesn't change the type of case label values,
2039 and MINVAL and RANGE are derived from those values).
2040 MAXVAL is MINVAL + RANGE.
2042 There *MUST* be max_case_bit_tests or less unique case
2043 node targets. */
2045 void
2046 bit_test_cluster::emit (tree index_expr, tree index_type,
2047 tree, basic_block default_bb, location_t loc)
2049 case_bit_test test[m_max_case_bit_tests] = { {} };
2050 unsigned int i, j, k;
2051 unsigned int count;
2053 tree unsigned_index_type = range_check_type (index_type);
2055 gimple_stmt_iterator gsi;
2056 gassign *shift_stmt;
2058 tree idx, tmp, csui;
2059 tree word_type_node = lang_hooks.types.type_for_mode (word_mode, 1);
2060 tree word_mode_zero = fold_convert (word_type_node, integer_zero_node);
2061 tree word_mode_one = fold_convert (word_type_node, integer_one_node);
2062 int prec = TYPE_PRECISION (word_type_node);
2063 wide_int wone = wi::one (prec);
2065 tree minval = get_low ();
2066 tree maxval = get_high ();
2068 /* Go through all case labels, and collect the case labels, profile
2069 counts, and other information we need to build the branch tests. */
2070 count = 0;
2071 for (i = 0; i < m_cases.length (); i++)
2073 unsigned int lo, hi;
2074 simple_cluster *n = static_cast<simple_cluster *> (m_cases[i]);
2075 for (k = 0; k < count; k++)
2076 if (n->m_case_bb == test[k].target_bb)
2077 break;
2079 if (k == count)
2081 gcc_checking_assert (count < m_max_case_bit_tests);
2082 test[k].mask = wi::zero (prec);
2083 test[k].target_bb = n->m_case_bb;
2084 test[k].label = n->m_case_label_expr;
2085 test[k].bits = 0;
2086 test[k].prob = profile_probability::never ();
2087 count++;
2090 test[k].bits += n->get_range (n->get_low (), n->get_high ());
2091 test[k].prob += n->m_prob;
2093 lo = tree_to_uhwi (int_const_binop (MINUS_EXPR, n->get_low (), minval));
2094 if (n->get_high () == NULL_TREE)
2095 hi = lo;
2096 else
2097 hi = tree_to_uhwi (int_const_binop (MINUS_EXPR, n->get_high (),
2098 minval));
2100 for (j = lo; j <= hi; j++)
2101 test[k].mask |= wi::lshift (wone, j);
2104 qsort (test, count, sizeof (*test), case_bit_test::cmp);
2106 /* If every possible relative value of the index expression is a valid shift
2107 amount, then we can merge the entry test in the bit test. */
2108 bool entry_test_needed;
2109 int_range_max r;
2110 if (TREE_CODE (index_expr) == SSA_NAME
2111 && get_range_query (cfun)->range_of_expr (r, index_expr)
2112 && !r.undefined_p ()
2113 && !r.varying_p ()
2114 && wi::leu_p (r.upper_bound () - r.lower_bound (), prec - 1))
2116 wide_int min = r.lower_bound ();
2117 wide_int max = r.upper_bound ();
2118 tree index_type = TREE_TYPE (index_expr);
2119 minval = fold_convert (index_type, minval);
2120 wide_int iminval = wi::to_wide (minval);
2121 if (wi::lt_p (min, iminval, TYPE_SIGN (index_type)))
2123 minval = wide_int_to_tree (index_type, min);
2124 for (i = 0; i < count; i++)
2125 test[i].mask = wi::lshift (test[i].mask, iminval - min);
2127 else if (wi::gt_p (min, iminval, TYPE_SIGN (index_type)))
2129 minval = wide_int_to_tree (index_type, min);
2130 for (i = 0; i < count; i++)
2131 test[i].mask = wi::lrshift (test[i].mask, min - iminval);
2133 maxval = wide_int_to_tree (index_type, max);
2134 entry_test_needed = false;
2136 else
2137 entry_test_needed = true;
2139 /* If all values are in the 0 .. BITS_PER_WORD-1 range, we can get rid of
2140 the minval subtractions, but it might make the mask constants more
2141 expensive. So, compare the costs. */
2142 if (compare_tree_int (minval, 0) > 0 && compare_tree_int (maxval, prec) < 0)
2144 int cost_diff;
2145 HOST_WIDE_INT m = tree_to_uhwi (minval);
2146 rtx reg = gen_raw_REG (word_mode, 10000);
2147 bool speed_p = optimize_insn_for_speed_p ();
2148 cost_diff = set_src_cost (gen_rtx_PLUS (word_mode, reg,
2149 GEN_INT (-m)),
2150 word_mode, speed_p);
2151 for (i = 0; i < count; i++)
2153 rtx r = immed_wide_int_const (test[i].mask, word_mode);
2154 cost_diff += set_src_cost (gen_rtx_AND (word_mode, reg, r),
2155 word_mode, speed_p);
2156 r = immed_wide_int_const (wi::lshift (test[i].mask, m), word_mode);
2157 cost_diff -= set_src_cost (gen_rtx_AND (word_mode, reg, r),
2158 word_mode, speed_p);
2160 if (cost_diff > 0)
2162 for (i = 0; i < count; i++)
2163 test[i].mask = wi::lshift (test[i].mask, m);
2164 minval = build_zero_cst (TREE_TYPE (minval));
2168 /* Now build the test-and-branch code. */
2170 gsi = gsi_last_bb (m_case_bb);
2172 /* idx = (unsigned)x - minval. */
2173 idx = fold_convert_loc (loc, unsigned_index_type, index_expr);
2174 idx = fold_build2_loc (loc, MINUS_EXPR, unsigned_index_type, idx,
2175 fold_convert_loc (loc, unsigned_index_type, minval));
2176 idx = force_gimple_operand_gsi (&gsi, idx,
2177 /*simple=*/true, NULL_TREE,
2178 /*before=*/true, GSI_SAME_STMT);
2180 profile_probability subtree_prob = m_subtree_prob;
2181 profile_probability default_prob = m_default_prob;
2182 if (!default_prob.initialized_p ())
2183 default_prob = m_subtree_prob.invert ();
2185 if (m_handles_entire_switch && entry_test_needed)
2187 tree range = int_const_binop (MINUS_EXPR, maxval, minval);
2188 /* if (idx > range) goto default */
2189 range
2190 = force_gimple_operand_gsi (&gsi,
2191 fold_convert (unsigned_index_type, range),
2192 /*simple=*/true, NULL_TREE,
2193 /*before=*/true, GSI_SAME_STMT);
2194 tmp = fold_build2 (GT_EXPR, boolean_type_node, idx, range);
2195 default_prob = default_prob / 2;
2196 basic_block new_bb
2197 = hoist_edge_and_branch_if_true (&gsi, tmp, default_bb,
2198 default_prob, loc);
2199 gsi = gsi_last_bb (new_bb);
2202 tmp = fold_build2_loc (loc, LSHIFT_EXPR, word_type_node, word_mode_one,
2203 fold_convert_loc (loc, word_type_node, idx));
2205 /* csui = (1 << (word_mode) idx) */
2206 if (count > 1)
2208 csui = make_ssa_name (word_type_node);
2209 tmp = force_gimple_operand_gsi (&gsi, tmp,
2210 /*simple=*/false, NULL_TREE,
2211 /*before=*/true, GSI_SAME_STMT);
2212 shift_stmt = gimple_build_assign (csui, tmp);
2213 gsi_insert_before (&gsi, shift_stmt, GSI_SAME_STMT);
2214 update_stmt (shift_stmt);
2216 else
2217 csui = tmp;
2219 /* for each unique set of cases:
2220 if (const & csui) goto target */
2221 for (k = 0; k < count; k++)
2223 profile_probability prob = test[k].prob / (subtree_prob + default_prob);
2224 subtree_prob -= test[k].prob;
2225 tmp = wide_int_to_tree (word_type_node, test[k].mask);
2226 tmp = fold_build2_loc (loc, BIT_AND_EXPR, word_type_node, csui, tmp);
2227 tmp = fold_build2_loc (loc, NE_EXPR, boolean_type_node,
2228 tmp, word_mode_zero);
2229 tmp = force_gimple_operand_gsi (&gsi, tmp,
2230 /*simple=*/true, NULL_TREE,
2231 /*before=*/true, GSI_SAME_STMT);
2232 basic_block new_bb
2233 = hoist_edge_and_branch_if_true (&gsi, tmp, test[k].target_bb,
2234 prob, loc);
2235 gsi = gsi_last_bb (new_bb);
2238 /* We should have removed all edges now. */
2239 gcc_assert (EDGE_COUNT (gsi_bb (gsi)->succs) == 0);
2241 /* If nothing matched, go to the default label. */
2242 edge e = make_edge (gsi_bb (gsi), default_bb, EDGE_FALLTHRU);
2243 e->probability = profile_probability::always ();
2246 /* Split the basic block at the statement pointed to by GSIP, and insert
2247 a branch to the target basic block of E_TRUE conditional on tree
2248 expression COND.
2250 It is assumed that there is already an edge from the to-be-split
2251 basic block to E_TRUE->dest block. This edge is removed, and the
2252 profile information on the edge is re-used for the new conditional
2253 jump.
2255 The CFG is updated. The dominator tree will not be valid after
2256 this transformation, but the immediate dominators are updated if
2257 UPDATE_DOMINATORS is true.
2259 Returns the newly created basic block. */
2261 basic_block
2262 bit_test_cluster::hoist_edge_and_branch_if_true (gimple_stmt_iterator *gsip,
2263 tree cond, basic_block case_bb,
2264 profile_probability prob,
2265 location_t loc)
2267 tree tmp;
2268 gcond *cond_stmt;
2269 edge e_false;
2270 basic_block new_bb, split_bb = gsi_bb (*gsip);
2272 edge e_true = make_edge (split_bb, case_bb, EDGE_TRUE_VALUE);
2273 e_true->probability = prob;
2274 gcc_assert (e_true->src == split_bb);
2276 tmp = force_gimple_operand_gsi (gsip, cond, /*simple=*/true, NULL,
2277 /*before=*/true, GSI_SAME_STMT);
2278 cond_stmt = gimple_build_cond_from_tree (tmp, NULL_TREE, NULL_TREE);
2279 gimple_set_location (cond_stmt, loc);
2280 gsi_insert_before (gsip, cond_stmt, GSI_SAME_STMT);
2282 e_false = split_block (split_bb, cond_stmt);
2283 new_bb = e_false->dest;
2284 redirect_edge_pred (e_true, split_bb);
2286 e_false->flags &= ~EDGE_FALLTHRU;
2287 e_false->flags |= EDGE_FALSE_VALUE;
2288 e_false->probability = e_true->probability.invert ();
2289 new_bb->count = e_false->count ();
2291 return new_bb;
2294 /* Compute the number of case labels that correspond to each outgoing edge of
2295 switch statement. Record this information in the aux field of the edge.
2296 Return the approx max number of cases per edge. */
2299 switch_decision_tree::compute_cases_per_edge ()
2301 int max_c = 0;
2302 reset_out_edges_aux (m_switch);
2303 int ncases = gimple_switch_num_labels (m_switch);
2304 for (int i = ncases - 1; i >= 1; --i)
2306 edge case_edge = gimple_switch_edge (cfun, m_switch, i);
2307 case_edge->aux = (void *) ((intptr_t) (case_edge->aux) + 1);
2308 /* For a range case add one extra. That's enough for the bit
2309 cluster heuristic. */
2310 if ((intptr_t)case_edge->aux > max_c)
2311 max_c = (intptr_t)case_edge->aux +
2312 !!CASE_HIGH (gimple_switch_label (m_switch, i));
2314 return max_c;
2317 /* Analyze switch statement and return true when the statement is expanded
2318 as decision tree. */
2320 bool
2321 switch_decision_tree::analyze_switch_statement ()
2323 unsigned l = gimple_switch_num_labels (m_switch);
2324 basic_block bb = gimple_bb (m_switch);
2325 auto_vec<cluster *> clusters;
2326 clusters.create (l - 1);
2328 basic_block default_bb = gimple_switch_default_bb (cfun, m_switch);
2329 m_case_bbs.reserve (l);
2330 m_case_bbs.quick_push (default_bb);
2332 int max_c = compute_cases_per_edge ();
2334 for (unsigned i = 1; i < l; i++)
2336 tree elt = gimple_switch_label (m_switch, i);
2337 tree lab = CASE_LABEL (elt);
2338 basic_block case_bb = label_to_block (cfun, lab);
2339 edge case_edge = find_edge (bb, case_bb);
2340 tree low = CASE_LOW (elt);
2341 tree high = CASE_HIGH (elt);
2343 profile_probability p
2344 = case_edge->probability / ((intptr_t) (case_edge->aux));
2345 clusters.quick_push (new simple_cluster (low, high, elt, case_edge->dest,
2346 p));
2347 m_case_bbs.quick_push (case_edge->dest);
2350 reset_out_edges_aux (m_switch);
2352 if (l > (unsigned) param_switch_lower_slow_alg_max_cases)
2353 warning_at (gimple_location (m_switch), OPT_Wdisabled_optimization,
2354 "Using faster switch lowering algorithms. "
2355 "Number of switch cases (%d) exceeds "
2356 "%<--param=switch-lower-slow-alg-max-cases=%d%> limit.",
2357 l, param_switch_lower_slow_alg_max_cases);
2359 /* Find bit-test clusters. */
2360 vec<cluster *> output = bit_test_cluster::find_bit_tests (clusters, max_c);
2362 /* Find jump table clusters. We are looking for these in the sequences of
2363 simple clusters which we didn't manage to convert into bit-test
2364 clusters. */
2365 vec<cluster *> output2;
2366 auto_vec<cluster *> tmp;
2367 output2.create (1);
2368 tmp.create (1);
2370 for (unsigned i = 0; i < output.length (); i++)
2372 cluster *c = output[i];
2373 if (c->get_type () != SIMPLE_CASE)
2375 if (!tmp.is_empty ())
2377 vec<cluster *> n = jump_table_cluster::find_jump_tables (tmp);
2378 output2.safe_splice (n);
2379 n.release ();
2380 tmp.truncate (0);
2382 output2.safe_push (c);
2384 else
2385 tmp.safe_push (c);
2388 /* We still can have a temporary vector to test. */
2389 if (!tmp.is_empty ())
2391 vec<cluster *> n = jump_table_cluster::find_jump_tables (tmp);
2392 output2.safe_splice (n);
2393 n.release ();
2396 if (dump_file)
2398 fprintf (dump_file, ";; GIMPLE switch case clusters: ");
2399 for (unsigned i = 0; i < output2.length (); i++)
2400 output2[i]->dump (dump_file, dump_flags & TDF_DETAILS);
2401 fprintf (dump_file, "\n");
2404 output.release ();
2406 bool expanded = try_switch_expansion (output2);
2407 release_clusters (output2);
2408 return expanded;
2411 /* Attempt to expand CLUSTERS as a decision tree. Return true when
2412 expanded. */
2414 bool
2415 switch_decision_tree::try_switch_expansion (vec<cluster *> &clusters)
2417 tree index_expr = gimple_switch_index (m_switch);
2418 tree index_type = TREE_TYPE (index_expr);
2419 basic_block bb = gimple_bb (m_switch);
2421 if (gimple_switch_num_labels (m_switch) == 1
2422 || range_check_type (index_type) == NULL_TREE)
2423 return false;
2425 /* Find the default case target label. */
2426 edge default_edge = gimple_switch_default_edge (cfun, m_switch);
2427 m_default_bb = default_edge->dest;
2429 /* Do the insertion of a case label into m_case_list. The labels are
2430 fed to us in descending order from the sorted vector of case labels used
2431 in the tree part of the middle end. So the list we construct is
2432 sorted in ascending order. */
2434 for (int i = clusters.length () - 1; i >= 0; i--)
2436 case_tree_node *r = m_case_list;
2437 m_case_list = m_case_node_pool.allocate ();
2438 m_case_list->m_right = r;
2439 m_case_list->m_c = clusters[i];
2442 record_phi_operand_mapping ();
2444 /* Split basic block that contains the gswitch statement. */
2445 gimple_stmt_iterator gsi = gsi_last_bb (bb);
2446 edge e;
2447 if (gsi_end_p (gsi))
2448 e = split_block_after_labels (bb);
2449 else
2451 gsi_prev (&gsi);
2452 e = split_block (bb, gsi_stmt (gsi));
2454 bb = split_edge (e);
2456 /* Create new basic blocks for non-case clusters where specific expansion
2457 needs to happen. */
2458 for (unsigned i = 0; i < clusters.length (); i++)
2459 if (clusters[i]->get_type () != SIMPLE_CASE)
2461 clusters[i]->m_case_bb = create_empty_bb (bb);
2462 clusters[i]->m_case_bb->count = bb->count;
2463 clusters[i]->m_case_bb->loop_father = bb->loop_father;
2466 /* Do not do an extra work for a single cluster. */
2467 if (clusters.length () == 1
2468 && clusters[0]->get_type () != SIMPLE_CASE)
2470 cluster *c = clusters[0];
2471 c->emit (index_expr, index_type,
2472 gimple_switch_default_label (m_switch), m_default_bb,
2473 gimple_location (m_switch));
2474 redirect_edge_succ (single_succ_edge (bb), c->m_case_bb);
2476 else
2478 emit (bb, index_expr, default_edge->probability, index_type);
2480 /* Emit cluster-specific switch handling. */
2481 for (unsigned i = 0; i < clusters.length (); i++)
2482 if (clusters[i]->get_type () != SIMPLE_CASE)
2484 edge e = single_pred_edge (clusters[i]->m_case_bb);
2485 e->dest->count = e->src->count.apply_probability (e->probability);
2486 clusters[i]->emit (index_expr, index_type,
2487 gimple_switch_default_label (m_switch),
2488 m_default_bb, gimple_location (m_switch));
2492 fix_phi_operands_for_edges ();
2494 return true;
2497 /* Before switch transformation, record all SSA_NAMEs defined in switch BB
2498 and used in a label basic block. */
2500 void
2501 switch_decision_tree::record_phi_operand_mapping ()
2503 basic_block switch_bb = gimple_bb (m_switch);
2504 /* Record all PHI nodes that have to be fixed after conversion. */
2505 for (unsigned i = 0; i < m_case_bbs.length (); i++)
2507 gphi_iterator gsi;
2508 basic_block bb = m_case_bbs[i];
2509 for (gsi = gsi_start_phis (bb); !gsi_end_p (gsi); gsi_next (&gsi))
2511 gphi *phi = gsi.phi ();
2513 for (unsigned i = 0; i < gimple_phi_num_args (phi); i++)
2515 basic_block phi_src_bb = gimple_phi_arg_edge (phi, i)->src;
2516 if (phi_src_bb == switch_bb)
2518 tree def = gimple_phi_arg_def (phi, i);
2519 tree result = gimple_phi_result (phi);
2520 m_phi_mapping.put (result, def);
2521 break;
2528 /* Append new operands to PHI statements that were introduced due to
2529 addition of new edges to case labels. */
2531 void
2532 switch_decision_tree::fix_phi_operands_for_edges ()
2534 gphi_iterator gsi;
2536 for (unsigned i = 0; i < m_case_bbs.length (); i++)
2538 basic_block bb = m_case_bbs[i];
2539 for (gsi = gsi_start_phis (bb); !gsi_end_p (gsi); gsi_next (&gsi))
2541 gphi *phi = gsi.phi ();
2542 for (unsigned j = 0; j < gimple_phi_num_args (phi); j++)
2544 tree def = gimple_phi_arg_def (phi, j);
2545 if (def == NULL_TREE)
2547 edge e = gimple_phi_arg_edge (phi, j);
2548 tree *definition
2549 = m_phi_mapping.get (gimple_phi_result (phi));
2550 gcc_assert (definition);
2551 add_phi_arg (phi, *definition, e, UNKNOWN_LOCATION);
2558 /* Generate a decision tree, switching on INDEX_EXPR and jumping to
2559 one of the labels in CASE_LIST or to the DEFAULT_LABEL.
2561 We generate a binary decision tree to select the appropriate target
2562 code. */
2564 void
2565 switch_decision_tree::emit (basic_block bb, tree index_expr,
2566 profile_probability default_prob, tree index_type)
2568 balance_case_nodes (&m_case_list, NULL);
2570 if (dump_file)
2571 dump_function_to_file (current_function_decl, dump_file, dump_flags);
2572 if (dump_file && (dump_flags & TDF_DETAILS))
2574 int indent_step = ceil_log2 (TYPE_PRECISION (index_type)) + 2;
2575 fprintf (dump_file, ";; Expanding GIMPLE switch as decision tree:\n");
2576 gcc_assert (m_case_list != NULL);
2577 dump_case_nodes (dump_file, m_case_list, indent_step, 0);
2580 bb = emit_case_nodes (bb, index_expr, m_case_list, default_prob, index_type,
2581 gimple_location (m_switch));
2583 if (bb)
2584 emit_jump (bb, m_default_bb);
2586 /* Remove all edges and do just an edge that will reach default_bb. */
2587 bb = gimple_bb (m_switch);
2588 gimple_stmt_iterator gsi = gsi_last_bb (bb);
2589 gsi_remove (&gsi, true);
2591 delete_basic_block (bb);
2594 /* Take an ordered list of case nodes
2595 and transform them into a near optimal binary tree,
2596 on the assumption that any target code selection value is as
2597 likely as any other.
2599 The transformation is performed by splitting the ordered
2600 list into two equal sections plus a pivot. The parts are
2601 then attached to the pivot as left and right branches. Each
2602 branch is then transformed recursively. */
2604 void
2605 switch_decision_tree::balance_case_nodes (case_tree_node **head,
2606 case_tree_node *parent)
2608 case_tree_node *np;
2610 np = *head;
2611 if (np)
2613 int i = 0;
2614 case_tree_node **npp;
2615 case_tree_node *left;
2616 profile_probability prob = profile_probability::never ();
2618 /* Count the number of entries on branch. */
2620 while (np)
2622 i++;
2623 prob += np->m_c->m_prob;
2624 np = np->m_right;
2627 if (i > 2)
2629 /* Split this list if it is long enough for that to help. */
2630 npp = head;
2631 left = *npp;
2632 profile_probability pivot_prob = prob / 2;
2634 /* Find the place in the list that bisects the list's total cost
2635 by probability. */
2636 while (1)
2638 /* Skip nodes while their probability does not reach
2639 that amount. */
2640 prob -= (*npp)->m_c->m_prob;
2641 if ((prob.initialized_p () && prob < pivot_prob)
2642 || ! (*npp)->m_right)
2643 break;
2644 npp = &(*npp)->m_right;
2647 np = *npp;
2648 *npp = 0;
2649 *head = np;
2650 np->m_parent = parent;
2651 np->m_left = left == np ? NULL : left;
2653 /* Optimize each of the two split parts. */
2654 balance_case_nodes (&np->m_left, np);
2655 balance_case_nodes (&np->m_right, np);
2656 np->m_c->m_subtree_prob = np->m_c->m_prob;
2657 if (np->m_left)
2658 np->m_c->m_subtree_prob += np->m_left->m_c->m_subtree_prob;
2659 if (np->m_right)
2660 np->m_c->m_subtree_prob += np->m_right->m_c->m_subtree_prob;
2662 else
2664 /* Else leave this branch as one level,
2665 but fill in `parent' fields. */
2666 np = *head;
2667 np->m_parent = parent;
2668 np->m_c->m_subtree_prob = np->m_c->m_prob;
2669 for (; np->m_right; np = np->m_right)
2671 np->m_right->m_parent = np;
2672 (*head)->m_c->m_subtree_prob += np->m_right->m_c->m_subtree_prob;
2678 /* Dump ROOT, a list or tree of case nodes, to file. */
2680 void
2681 switch_decision_tree::dump_case_nodes (FILE *f, case_tree_node *root,
2682 int indent_step, int indent_level)
2684 if (root == 0)
2685 return;
2686 indent_level++;
2688 dump_case_nodes (f, root->m_left, indent_step, indent_level);
2690 fputs (";; ", f);
2691 fprintf (f, "%*s", indent_step * indent_level, "");
2692 root->m_c->dump (f);
2693 root->m_c->m_prob.dump (f);
2694 fputs (" subtree: ", f);
2695 root->m_c->m_subtree_prob.dump (f);
2696 fputs (")\n", f);
2698 dump_case_nodes (f, root->m_right, indent_step, indent_level);
2702 /* Add an unconditional jump to CASE_BB that happens in basic block BB. */
2704 void
2705 switch_decision_tree::emit_jump (basic_block bb, basic_block case_bb)
2707 edge e = single_succ_edge (bb);
2708 redirect_edge_succ (e, case_bb);
2711 /* Generate code to compare OP0 with OP1 so that the condition codes are
2712 set and to jump to LABEL_BB if the condition is true.
2713 COMPARISON is the GIMPLE comparison (EQ, NE, GT, etc.).
2714 PROB is the probability of jumping to LABEL_BB. */
2716 basic_block
2717 switch_decision_tree::emit_cmp_and_jump_insns (basic_block bb, tree op0,
2718 tree op1, tree_code comparison,
2719 basic_block label_bb,
2720 profile_probability prob,
2721 location_t loc)
2723 // TODO: it's once called with lhs != index.
2724 op1 = fold_convert (TREE_TYPE (op0), op1);
2726 gcond *cond = gimple_build_cond (comparison, op0, op1, NULL_TREE, NULL_TREE);
2727 gimple_set_location (cond, loc);
2728 gimple_stmt_iterator gsi = gsi_last_bb (bb);
2729 gsi_insert_after (&gsi, cond, GSI_NEW_STMT);
2731 gcc_assert (single_succ_p (bb));
2733 /* Make a new basic block where false branch will take place. */
2734 edge false_edge = split_block (bb, cond);
2735 false_edge->flags = EDGE_FALSE_VALUE;
2736 false_edge->probability = prob.invert ();
2737 false_edge->dest->count = bb->count.apply_probability (prob.invert ());
2739 edge true_edge = make_edge (bb, label_bb, EDGE_TRUE_VALUE);
2740 true_edge->probability = prob;
2742 return false_edge->dest;
2745 /* Generate code to jump to LABEL if OP0 and OP1 are equal.
2746 PROB is the probability of jumping to LABEL_BB.
2747 BB is a basic block where the new condition will be placed. */
2749 basic_block
2750 switch_decision_tree::do_jump_if_equal (basic_block bb, tree op0, tree op1,
2751 basic_block label_bb,
2752 profile_probability prob,
2753 location_t loc)
2755 op1 = fold_convert (TREE_TYPE (op0), op1);
2757 gcond *cond = gimple_build_cond (EQ_EXPR, op0, op1, NULL_TREE, NULL_TREE);
2758 gimple_set_location (cond, loc);
2759 gimple_stmt_iterator gsi = gsi_last_bb (bb);
2760 gsi_insert_before (&gsi, cond, GSI_SAME_STMT);
2762 gcc_assert (single_succ_p (bb));
2764 /* Make a new basic block where false branch will take place. */
2765 edge false_edge = split_block (bb, cond);
2766 false_edge->flags = EDGE_FALSE_VALUE;
2767 false_edge->probability = prob.invert ();
2768 false_edge->dest->count = bb->count.apply_probability (prob.invert ());
2770 edge true_edge = make_edge (bb, label_bb, EDGE_TRUE_VALUE);
2771 true_edge->probability = prob;
2773 return false_edge->dest;
2776 /* Emit step-by-step code to select a case for the value of INDEX.
2777 The thus generated decision tree follows the form of the
2778 case-node binary tree NODE, whose nodes represent test conditions.
2779 DEFAULT_PROB is probability of cases leading to default BB.
2780 INDEX_TYPE is the type of the index of the switch. */
2782 basic_block
2783 switch_decision_tree::emit_case_nodes (basic_block bb, tree index,
2784 case_tree_node *node,
2785 profile_probability default_prob,
2786 tree index_type, location_t loc)
2788 profile_probability p;
2790 /* If node is null, we are done. */
2791 if (node == NULL)
2792 return bb;
2794 /* Single value case. */
2795 if (node->m_c->is_single_value_p ())
2797 /* Node is single valued. First see if the index expression matches
2798 this node and then check our children, if any. */
2799 p = node->m_c->m_prob / (node->m_c->m_subtree_prob + default_prob);
2800 bb = do_jump_if_equal (bb, index, node->m_c->get_low (),
2801 node->m_c->m_case_bb, p, loc);
2802 /* Since this case is taken at this point, reduce its weight from
2803 subtree_weight. */
2804 node->m_c->m_subtree_prob -= node->m_c->m_prob;
2806 if (node->m_left != NULL && node->m_right != NULL)
2808 /* 1) the node has both children
2810 If both children are single-valued cases with no
2811 children, finish up all the work. This way, we can save
2812 one ordered comparison. */
2814 if (!node->m_left->has_child ()
2815 && node->m_left->m_c->is_single_value_p ()
2816 && !node->m_right->has_child ()
2817 && node->m_right->m_c->is_single_value_p ())
2819 p = (node->m_right->m_c->m_prob
2820 / (node->m_c->m_subtree_prob + default_prob));
2821 bb = do_jump_if_equal (bb, index, node->m_right->m_c->get_low (),
2822 node->m_right->m_c->m_case_bb, p, loc);
2823 node->m_c->m_subtree_prob -= node->m_right->m_c->m_prob;
2825 p = (node->m_left->m_c->m_prob
2826 / (node->m_c->m_subtree_prob + default_prob));
2827 bb = do_jump_if_equal (bb, index, node->m_left->m_c->get_low (),
2828 node->m_left->m_c->m_case_bb, p, loc);
2830 else
2832 /* Branch to a label where we will handle it later. */
2833 basic_block test_bb = split_edge (single_succ_edge (bb));
2834 redirect_edge_succ (single_pred_edge (test_bb),
2835 single_succ_edge (bb)->dest);
2837 p = ((node->m_right->m_c->m_subtree_prob + default_prob / 2)
2838 / (node->m_c->m_subtree_prob + default_prob));
2839 test_bb->count = bb->count.apply_probability (p);
2840 bb = emit_cmp_and_jump_insns (bb, index, node->m_c->get_high (),
2841 GT_EXPR, test_bb, p, loc);
2842 default_prob /= 2;
2844 /* Handle the left-hand subtree. */
2845 bb = emit_case_nodes (bb, index, node->m_left,
2846 default_prob, index_type, loc);
2848 /* If the left-hand subtree fell through,
2849 don't let it fall into the right-hand subtree. */
2850 if (bb && m_default_bb)
2851 emit_jump (bb, m_default_bb);
2853 bb = emit_case_nodes (test_bb, index, node->m_right,
2854 default_prob, index_type, loc);
2857 else if (node->m_left == NULL && node->m_right != NULL)
2859 /* 2) the node has only right child. */
2861 /* Here we have a right child but no left so we issue a conditional
2862 branch to default and process the right child.
2864 Omit the conditional branch to default if the right child
2865 does not have any children and is single valued; it would
2866 cost too much space to save so little time. */
2868 if (node->m_right->has_child ()
2869 || !node->m_right->m_c->is_single_value_p ())
2871 p = ((default_prob / 2)
2872 / (node->m_c->m_subtree_prob + default_prob));
2873 bb = emit_cmp_and_jump_insns (bb, index, node->m_c->get_low (),
2874 LT_EXPR, m_default_bb, p, loc);
2875 default_prob /= 2;
2877 bb = emit_case_nodes (bb, index, node->m_right, default_prob,
2878 index_type, loc);
2880 else
2882 /* We cannot process node->right normally
2883 since we haven't ruled out the numbers less than
2884 this node's value. So handle node->right explicitly. */
2885 p = (node->m_right->m_c->m_subtree_prob
2886 / (node->m_c->m_subtree_prob + default_prob));
2887 bb = do_jump_if_equal (bb, index, node->m_right->m_c->get_low (),
2888 node->m_right->m_c->m_case_bb, p, loc);
2891 else if (node->m_left != NULL && node->m_right == NULL)
2893 /* 3) just one subtree, on the left. Similar case as previous. */
2895 if (node->m_left->has_child ()
2896 || !node->m_left->m_c->is_single_value_p ())
2898 p = ((default_prob / 2)
2899 / (node->m_c->m_subtree_prob + default_prob));
2900 bb = emit_cmp_and_jump_insns (bb, index, node->m_c->get_high (),
2901 GT_EXPR, m_default_bb, p, loc);
2902 default_prob /= 2;
2904 bb = emit_case_nodes (bb, index, node->m_left, default_prob,
2905 index_type, loc);
2907 else
2909 /* We cannot process node->left normally
2910 since we haven't ruled out the numbers less than
2911 this node's value. So handle node->left explicitly. */
2912 p = (node->m_left->m_c->m_subtree_prob
2913 / (node->m_c->m_subtree_prob + default_prob));
2914 bb = do_jump_if_equal (bb, index, node->m_left->m_c->get_low (),
2915 node->m_left->m_c->m_case_bb, p, loc);
2919 else
2921 /* Node is a range. These cases are very similar to those for a single
2922 value, except that we do not start by testing whether this node
2923 is the one to branch to. */
2924 if (node->has_child () || node->m_c->get_type () != SIMPLE_CASE)
2926 bool is_bt = node->m_c->get_type () == BIT_TEST;
2927 int parts = is_bt ? 3 : 2;
2929 /* Branch to a label where we will handle it later. */
2930 basic_block test_bb = split_edge (single_succ_edge (bb));
2931 redirect_edge_succ (single_pred_edge (test_bb),
2932 single_succ_edge (bb)->dest);
2934 profile_probability right_prob = profile_probability::never ();
2935 if (node->m_right)
2936 right_prob = node->m_right->m_c->m_subtree_prob;
2937 p = ((right_prob + default_prob / parts)
2938 / (node->m_c->m_subtree_prob + default_prob));
2939 test_bb->count = bb->count.apply_probability (p);
2941 bb = emit_cmp_and_jump_insns (bb, index, node->m_c->get_high (),
2942 GT_EXPR, test_bb, p, loc);
2944 default_prob /= parts;
2945 node->m_c->m_subtree_prob -= right_prob;
2946 if (is_bt)
2947 node->m_c->m_default_prob = default_prob;
2949 /* Value belongs to this node or to the left-hand subtree. */
2950 p = node->m_c->m_prob / (node->m_c->m_subtree_prob + default_prob);
2951 bb = emit_cmp_and_jump_insns (bb, index, node->m_c->get_low (),
2952 GE_EXPR, node->m_c->m_case_bb, p, loc);
2954 /* Handle the left-hand subtree. */
2955 bb = emit_case_nodes (bb, index, node->m_left, default_prob,
2956 index_type, loc);
2958 /* If the left-hand subtree fell through,
2959 don't let it fall into the right-hand subtree. */
2960 if (bb && m_default_bb)
2961 emit_jump (bb, m_default_bb);
2963 bb = emit_case_nodes (test_bb, index, node->m_right, default_prob,
2964 index_type, loc);
2966 else
2968 /* Node has no children so we check low and high bounds to remove
2969 redundant tests. Only one of the bounds can exist,
2970 since otherwise this node is bounded--a case tested already. */
2971 tree lhs, rhs;
2972 generate_range_test (bb, index, node->m_c->get_low (),
2973 node->m_c->get_high (), &lhs, &rhs);
2974 p = default_prob / (node->m_c->m_subtree_prob + default_prob);
2976 bb = emit_cmp_and_jump_insns (bb, lhs, rhs, GT_EXPR,
2977 m_default_bb, p, loc);
2979 emit_jump (bb, node->m_c->m_case_bb);
2980 return NULL;
2984 return bb;
2987 /* The main function of the pass scans statements for switches and invokes
2988 process_switch on them. */
2990 namespace {
2992 const pass_data pass_data_convert_switch =
2994 GIMPLE_PASS, /* type */
2995 "switchconv", /* name */
2996 OPTGROUP_NONE, /* optinfo_flags */
2997 TV_TREE_SWITCH_CONVERSION, /* tv_id */
2998 ( PROP_cfg | PROP_ssa ), /* properties_required */
2999 0, /* properties_provided */
3000 0, /* properties_destroyed */
3001 0, /* todo_flags_start */
3002 TODO_update_ssa, /* todo_flags_finish */
3005 class pass_convert_switch : public gimple_opt_pass
3007 public:
3008 pass_convert_switch (gcc::context *ctxt)
3009 : gimple_opt_pass (pass_data_convert_switch, ctxt)
3012 /* opt_pass methods: */
3013 bool gate (function *) final override
3015 return flag_tree_switch_conversion != 0;
3017 unsigned int execute (function *) final override;
3019 }; // class pass_convert_switch
3021 unsigned int
3022 pass_convert_switch::execute (function *fun)
3024 basic_block bb;
3025 bool cfg_altered = false;
3027 FOR_EACH_BB_FN (bb, fun)
3029 if (gswitch *stmt = safe_dyn_cast <gswitch *> (*gsi_last_bb (bb)))
3031 if (dump_file)
3033 expanded_location loc = expand_location (gimple_location (stmt));
3035 fprintf (dump_file, "beginning to process the following "
3036 "SWITCH statement (%s:%d) : ------- \n",
3037 loc.file, loc.line);
3038 print_gimple_stmt (dump_file, stmt, 0, TDF_SLIM);
3039 putc ('\n', dump_file);
3042 switch_conversion sconv;
3043 sconv.expand (stmt);
3044 cfg_altered |= sconv.m_cfg_altered;
3045 if (!sconv.m_reason)
3047 if (dump_file)
3049 fputs ("Switch converted\n", dump_file);
3050 fputs ("--------------------------------\n", dump_file);
3053 /* Make no effort to update the post-dominator tree.
3054 It is actually not that hard for the transformations
3055 we have performed, but it is not supported
3056 by iterate_fix_dominators. */
3057 free_dominance_info (CDI_POST_DOMINATORS);
3059 else
3061 if (dump_file)
3063 fputs ("Bailing out - ", dump_file);
3064 fputs (sconv.m_reason, dump_file);
3065 fputs ("\n--------------------------------\n", dump_file);
3071 return cfg_altered ? TODO_cleanup_cfg : 0;;
3074 } // anon namespace
3076 gimple_opt_pass *
3077 make_pass_convert_switch (gcc::context *ctxt)
3079 return new pass_convert_switch (ctxt);
3082 /* The main function of the pass scans statements for switches and invokes
3083 process_switch on them. */
3085 namespace {
3087 template <bool O0> class pass_lower_switch: public gimple_opt_pass
3089 public:
3090 pass_lower_switch (gcc::context *ctxt) : gimple_opt_pass (data, ctxt) {}
3092 static const pass_data data;
3093 opt_pass *
3094 clone () final override
3096 return new pass_lower_switch<O0> (m_ctxt);
3099 bool
3100 gate (function *) final override
3102 return !O0 || !optimize;
3105 unsigned int execute (function *fun) final override;
3106 }; // class pass_lower_switch
3108 template <bool O0>
3109 const pass_data pass_lower_switch<O0>::data = {
3110 GIMPLE_PASS, /* type */
3111 O0 ? "switchlower_O0" : "switchlower", /* name */
3112 OPTGROUP_NONE, /* optinfo_flags */
3113 TV_TREE_SWITCH_LOWERING, /* tv_id */
3114 ( PROP_cfg | PROP_ssa ), /* properties_required */
3115 0, /* properties_provided */
3116 0, /* properties_destroyed */
3117 0, /* todo_flags_start */
3118 TODO_update_ssa | TODO_cleanup_cfg, /* todo_flags_finish */
3121 template <bool O0>
3122 unsigned int
3123 pass_lower_switch<O0>::execute (function *fun)
3125 basic_block bb;
3126 bool expanded = false;
3128 auto_vec<gimple *> switch_statements;
3129 switch_statements.create (1);
3131 FOR_EACH_BB_FN (bb, fun)
3133 if (gswitch *swtch = safe_dyn_cast <gswitch *> (*gsi_last_bb (bb)))
3135 if (!O0)
3136 group_case_labels_stmt (swtch);
3137 switch_statements.safe_push (swtch);
3141 for (unsigned i = 0; i < switch_statements.length (); i++)
3143 gimple *stmt = switch_statements[i];
3144 if (dump_file)
3146 expanded_location loc = expand_location (gimple_location (stmt));
3148 fprintf (dump_file, "beginning to process the following "
3149 "SWITCH statement (%s:%d) : ------- \n",
3150 loc.file, loc.line);
3151 print_gimple_stmt (dump_file, stmt, 0, TDF_SLIM);
3152 putc ('\n', dump_file);
3155 gswitch *swtch = dyn_cast<gswitch *> (stmt);
3156 if (swtch)
3158 switch_decision_tree dt (swtch);
3159 expanded |= dt.analyze_switch_statement ();
3163 if (expanded)
3165 free_dominance_info (CDI_DOMINATORS);
3166 free_dominance_info (CDI_POST_DOMINATORS);
3167 mark_virtual_operands_for_renaming (cfun);
3170 return 0;
3173 } // anon namespace
3175 gimple_opt_pass *
3176 make_pass_lower_switch_O0 (gcc::context *ctxt)
3178 return new pass_lower_switch<true> (ctxt);
3180 gimple_opt_pass *
3181 make_pass_lower_switch (gcc::context *ctxt)
3183 return new pass_lower_switch<false> (ctxt);