libstdc++: Simplify std::any to fix -Wdeprecated-declarations warning
[official-gcc.git] / gcc / tree-vectorizer.cc
blob0efabcbb2580d32529aff0183ba4a903d80dfdde
1 /* Vectorizer
2 Copyright (C) 2003-2024 Free Software Foundation, Inc.
3 Contributed by Dorit Naishlos <dorit@il.ibm.com>
5 This file is part of GCC.
7 GCC is free software; you can redistribute it and/or modify it under
8 the terms of the GNU General Public License as published by the Free
9 Software Foundation; either version 3, or (at your option) any later
10 version.
12 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
13 WARRANTY; without even the implied warranty of MERCHANTABILITY or
14 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
15 for more details.
17 You should have received a copy of the GNU General Public License
18 along with GCC; see the file COPYING3. If not see
19 <http://www.gnu.org/licenses/>. */
21 /* Loop and basic block vectorizer.
23 This file contains drivers for the three vectorizers:
24 (1) loop vectorizer (inter-iteration parallelism),
25 (2) loop-aware SLP (intra-iteration parallelism) (invoked by the loop
26 vectorizer)
27 (3) BB vectorizer (out-of-loops), aka SLP
29 The rest of the vectorizer's code is organized as follows:
30 - tree-vect-loop.cc - loop specific parts such as reductions, etc. These are
31 used by drivers (1) and (2).
32 - tree-vect-loop-manip.cc - vectorizer's loop control-flow utilities, used by
33 drivers (1) and (2).
34 - tree-vect-slp.cc - BB vectorization specific analysis and transformation,
35 used by drivers (2) and (3).
36 - tree-vect-stmts.cc - statements analysis and transformation (used by all).
37 - tree-vect-data-refs.cc - vectorizer specific data-refs analysis and
38 manipulations (used by all).
39 - tree-vect-patterns.cc - vectorizable code patterns detector (used by all)
41 Here's a poor attempt at illustrating that:
43 tree-vectorizer.cc:
44 loop_vect() loop_aware_slp() slp_vect()
45 | / \ /
46 | / \ /
47 tree-vect-loop.cc tree-vect-slp.cc
48 | \ \ / / |
49 | \ \/ / |
50 | \ /\ / |
51 | \ / \ / |
52 tree-vect-stmts.cc tree-vect-data-refs.cc
53 \ /
54 tree-vect-patterns.cc
57 #include "config.h"
58 #define INCLUDE_MEMORY
59 #include "system.h"
60 #include "coretypes.h"
61 #include "backend.h"
62 #include "tree.h"
63 #include "gimple.h"
64 #include "predict.h"
65 #include "tree-pass.h"
66 #include "ssa.h"
67 #include "cgraph.h"
68 #include "fold-const.h"
69 #include "stor-layout.h"
70 #include "gimple-iterator.h"
71 #include "gimple-walk.h"
72 #include "tree-ssa-loop-manip.h"
73 #include "tree-ssa-loop-niter.h"
74 #include "tree-cfg.h"
75 #include "cfgloop.h"
76 #include "tree-vectorizer.h"
77 #include "tree-ssa-propagate.h"
78 #include "dbgcnt.h"
79 #include "tree-scalar-evolution.h"
80 #include "stringpool.h"
81 #include "attribs.h"
82 #include "gimple-pretty-print.h"
83 #include "opt-problem.h"
84 #include "internal-fn.h"
85 #include "tree-ssa-sccvn.h"
86 #include "tree-into-ssa.h"
88 /* Loop or bb location, with hotness information. */
89 dump_user_location_t vect_location;
91 /* auto_purge_vect_location's dtor: reset the vect_location
92 global, to avoid stale location_t values that could reference
93 GC-ed blocks. */
95 auto_purge_vect_location::~auto_purge_vect_location ()
97 vect_location = dump_user_location_t ();
100 /* Dump a cost entry according to args to F. */
102 void
103 dump_stmt_cost (FILE *f, int count, enum vect_cost_for_stmt kind,
104 stmt_vec_info stmt_info, slp_tree node, tree,
105 int misalign, unsigned cost,
106 enum vect_cost_model_location where)
108 if (stmt_info)
110 print_gimple_expr (f, STMT_VINFO_STMT (stmt_info), 0, TDF_SLIM);
111 fprintf (f, " ");
113 else if (node)
114 fprintf (f, "node %p ", (void *)node);
115 else
116 fprintf (f, "<unknown> ");
117 fprintf (f, "%d times ", count);
118 const char *ks = "unknown";
119 switch (kind)
121 case scalar_stmt:
122 ks = "scalar_stmt";
123 break;
124 case scalar_load:
125 ks = "scalar_load";
126 break;
127 case scalar_store:
128 ks = "scalar_store";
129 break;
130 case vector_stmt:
131 ks = "vector_stmt";
132 break;
133 case vector_load:
134 ks = "vector_load";
135 break;
136 case vector_gather_load:
137 ks = "vector_gather_load";
138 break;
139 case unaligned_load:
140 ks = "unaligned_load";
141 break;
142 case unaligned_store:
143 ks = "unaligned_store";
144 break;
145 case vector_store:
146 ks = "vector_store";
147 break;
148 case vector_scatter_store:
149 ks = "vector_scatter_store";
150 break;
151 case vec_to_scalar:
152 ks = "vec_to_scalar";
153 break;
154 case scalar_to_vec:
155 ks = "scalar_to_vec";
156 break;
157 case cond_branch_not_taken:
158 ks = "cond_branch_not_taken";
159 break;
160 case cond_branch_taken:
161 ks = "cond_branch_taken";
162 break;
163 case vec_perm:
164 ks = "vec_perm";
165 break;
166 case vec_promote_demote:
167 ks = "vec_promote_demote";
168 break;
169 case vec_construct:
170 ks = "vec_construct";
171 break;
173 fprintf (f, "%s ", ks);
174 if (kind == unaligned_load || kind == unaligned_store)
175 fprintf (f, "(misalign %d) ", misalign);
176 fprintf (f, "costs %u ", cost);
177 const char *ws = "unknown";
178 switch (where)
180 case vect_prologue:
181 ws = "prologue";
182 break;
183 case vect_body:
184 ws = "body";
185 break;
186 case vect_epilogue:
187 ws = "epilogue";
188 break;
190 fprintf (f, "in %s\n", ws);
193 /* For mapping simduid to vectorization factor. */
195 class simduid_to_vf : public free_ptr_hash<simduid_to_vf>
197 public:
198 unsigned int simduid;
199 poly_uint64 vf;
201 /* hash_table support. */
202 static inline hashval_t hash (const simduid_to_vf *);
203 static inline int equal (const simduid_to_vf *, const simduid_to_vf *);
206 inline hashval_t
207 simduid_to_vf::hash (const simduid_to_vf *p)
209 return p->simduid;
212 inline int
213 simduid_to_vf::equal (const simduid_to_vf *p1, const simduid_to_vf *p2)
215 return p1->simduid == p2->simduid;
218 /* This hash maps the OMP simd array to the corresponding simduid used
219 to index into it. Like thus,
221 _7 = GOMP_SIMD_LANE (simduid.0)
224 D.1737[_7] = stuff;
227 This hash maps from the OMP simd array (D.1737[]) to DECL_UID of
228 simduid.0. */
230 struct simd_array_to_simduid : free_ptr_hash<simd_array_to_simduid>
232 tree decl;
233 unsigned int simduid;
235 /* hash_table support. */
236 static inline hashval_t hash (const simd_array_to_simduid *);
237 static inline int equal (const simd_array_to_simduid *,
238 const simd_array_to_simduid *);
241 inline hashval_t
242 simd_array_to_simduid::hash (const simd_array_to_simduid *p)
244 return DECL_UID (p->decl);
247 inline int
248 simd_array_to_simduid::equal (const simd_array_to_simduid *p1,
249 const simd_array_to_simduid *p2)
251 return p1->decl == p2->decl;
254 /* Fold IFN_GOMP_SIMD_LANE, IFN_GOMP_SIMD_VF, IFN_GOMP_SIMD_LAST_LANE,
255 into their corresponding constants and remove
256 IFN_GOMP_SIMD_ORDERED_{START,END}. */
258 static void
259 adjust_simduid_builtins (hash_table<simduid_to_vf> *htab, function *fun)
261 basic_block bb;
263 FOR_EACH_BB_FN (bb, fun)
265 gimple_stmt_iterator i;
267 for (i = gsi_start_bb (bb); !gsi_end_p (i); )
269 poly_uint64 vf = 1;
270 enum internal_fn ifn;
271 gimple *stmt = gsi_stmt (i);
272 tree t;
273 if (!is_gimple_call (stmt)
274 || !gimple_call_internal_p (stmt))
276 gsi_next (&i);
277 continue;
279 ifn = gimple_call_internal_fn (stmt);
280 switch (ifn)
282 case IFN_GOMP_SIMD_LANE:
283 case IFN_GOMP_SIMD_VF:
284 case IFN_GOMP_SIMD_LAST_LANE:
285 break;
286 case IFN_GOMP_SIMD_ORDERED_START:
287 case IFN_GOMP_SIMD_ORDERED_END:
288 if (integer_onep (gimple_call_arg (stmt, 0)))
290 enum built_in_function bcode
291 = (ifn == IFN_GOMP_SIMD_ORDERED_START
292 ? BUILT_IN_GOMP_ORDERED_START
293 : BUILT_IN_GOMP_ORDERED_END);
294 gimple *g
295 = gimple_build_call (builtin_decl_explicit (bcode), 0);
296 gimple_move_vops (g, stmt);
297 gsi_replace (&i, g, true);
298 continue;
300 gsi_remove (&i, true);
301 unlink_stmt_vdef (stmt);
302 continue;
303 default:
304 gsi_next (&i);
305 continue;
307 tree arg = gimple_call_arg (stmt, 0);
308 gcc_assert (arg != NULL_TREE);
309 gcc_assert (TREE_CODE (arg) == SSA_NAME);
310 simduid_to_vf *p = NULL, data;
311 data.simduid = DECL_UID (SSA_NAME_VAR (arg));
312 /* Need to nullify loop safelen field since it's value is not
313 valid after transformation. */
314 if (bb->loop_father && bb->loop_father->safelen > 0)
315 bb->loop_father->safelen = 0;
316 if (htab)
318 p = htab->find (&data);
319 if (p)
320 vf = p->vf;
322 switch (ifn)
324 case IFN_GOMP_SIMD_VF:
325 t = build_int_cst (unsigned_type_node, vf);
326 break;
327 case IFN_GOMP_SIMD_LANE:
328 t = build_int_cst (unsigned_type_node, 0);
329 break;
330 case IFN_GOMP_SIMD_LAST_LANE:
331 t = gimple_call_arg (stmt, 1);
332 break;
333 default:
334 gcc_unreachable ();
336 tree lhs = gimple_call_lhs (stmt);
337 if (lhs)
338 replace_uses_by (lhs, t);
339 release_defs (stmt);
340 gsi_remove (&i, true);
345 /* Helper structure for note_simd_array_uses. */
347 struct note_simd_array_uses_struct
349 hash_table<simd_array_to_simduid> **htab;
350 unsigned int simduid;
353 /* Callback for note_simd_array_uses, called through walk_gimple_op. */
355 static tree
356 note_simd_array_uses_cb (tree *tp, int *walk_subtrees, void *data)
358 struct walk_stmt_info *wi = (struct walk_stmt_info *) data;
359 struct note_simd_array_uses_struct *ns
360 = (struct note_simd_array_uses_struct *) wi->info;
362 if (TYPE_P (*tp))
363 *walk_subtrees = 0;
364 else if (VAR_P (*tp)
365 && lookup_attribute ("omp simd array", DECL_ATTRIBUTES (*tp))
366 && DECL_CONTEXT (*tp) == current_function_decl)
368 simd_array_to_simduid data;
369 if (!*ns->htab)
370 *ns->htab = new hash_table<simd_array_to_simduid> (15);
371 data.decl = *tp;
372 data.simduid = ns->simduid;
373 simd_array_to_simduid **slot = (*ns->htab)->find_slot (&data, INSERT);
374 if (*slot == NULL)
376 simd_array_to_simduid *p = XNEW (simd_array_to_simduid);
377 *p = data;
378 *slot = p;
380 else if ((*slot)->simduid != ns->simduid)
381 (*slot)->simduid = -1U;
382 *walk_subtrees = 0;
384 return NULL_TREE;
387 /* Find "omp simd array" temporaries and map them to corresponding
388 simduid. */
390 static void
391 note_simd_array_uses (hash_table<simd_array_to_simduid> **htab, function *fun)
393 basic_block bb;
394 gimple_stmt_iterator gsi;
395 struct walk_stmt_info wi;
396 struct note_simd_array_uses_struct ns;
398 memset (&wi, 0, sizeof (wi));
399 wi.info = &ns;
400 ns.htab = htab;
402 FOR_EACH_BB_FN (bb, fun)
403 for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi); gsi_next (&gsi))
405 gimple *stmt = gsi_stmt (gsi);
406 if (!is_gimple_call (stmt) || !gimple_call_internal_p (stmt))
407 continue;
408 switch (gimple_call_internal_fn (stmt))
410 case IFN_GOMP_SIMD_LANE:
411 case IFN_GOMP_SIMD_VF:
412 case IFN_GOMP_SIMD_LAST_LANE:
413 break;
414 default:
415 continue;
417 tree lhs = gimple_call_lhs (stmt);
418 if (lhs == NULL_TREE)
419 continue;
420 imm_use_iterator use_iter;
421 gimple *use_stmt;
422 ns.simduid = DECL_UID (SSA_NAME_VAR (gimple_call_arg (stmt, 0)));
423 FOR_EACH_IMM_USE_STMT (use_stmt, use_iter, lhs)
424 if (!is_gimple_debug (use_stmt))
425 walk_gimple_op (use_stmt, note_simd_array_uses_cb, &wi);
429 /* Shrink arrays with "omp simd array" attribute to the corresponding
430 vectorization factor. */
432 static void
433 shrink_simd_arrays
434 (hash_table<simd_array_to_simduid> *simd_array_to_simduid_htab,
435 hash_table<simduid_to_vf> *simduid_to_vf_htab)
437 for (hash_table<simd_array_to_simduid>::iterator iter
438 = simd_array_to_simduid_htab->begin ();
439 iter != simd_array_to_simduid_htab->end (); ++iter)
440 if ((*iter)->simduid != -1U)
442 tree decl = (*iter)->decl;
443 poly_uint64 vf = 1;
444 if (simduid_to_vf_htab)
446 simduid_to_vf *p = NULL, data;
447 data.simduid = (*iter)->simduid;
448 p = simduid_to_vf_htab->find (&data);
449 if (p)
450 vf = p->vf;
452 tree atype
453 = build_array_type_nelts (TREE_TYPE (TREE_TYPE (decl)), vf);
454 TREE_TYPE (decl) = atype;
455 relayout_decl (decl);
458 delete simd_array_to_simduid_htab;
461 /* Initialize the vec_info with kind KIND_IN and target cost data
462 TARGET_COST_DATA_IN. */
464 vec_info::vec_info (vec_info::vec_kind kind_in, vec_info_shared *shared_)
465 : kind (kind_in),
466 shared (shared_),
467 stmt_vec_info_ro (false),
468 bbs (NULL),
469 nbbs (0)
471 stmt_vec_infos.create (50);
474 vec_info::~vec_info ()
476 for (slp_instance &instance : slp_instances)
477 vect_free_slp_instance (instance);
479 free_stmt_vec_infos ();
482 vec_info_shared::vec_info_shared ()
483 : n_stmts (0),
484 datarefs (vNULL),
485 datarefs_copy (vNULL),
486 ddrs (vNULL)
490 vec_info_shared::~vec_info_shared ()
492 free_data_refs (datarefs);
493 free_dependence_relations (ddrs);
494 datarefs_copy.release ();
497 void
498 vec_info_shared::save_datarefs ()
500 if (!flag_checking)
501 return;
502 datarefs_copy.reserve_exact (datarefs.length ());
503 for (unsigned i = 0; i < datarefs.length (); ++i)
504 datarefs_copy.quick_push (*datarefs[i]);
507 void
508 vec_info_shared::check_datarefs ()
510 if (!flag_checking)
511 return;
512 gcc_assert (datarefs.length () == datarefs_copy.length ());
513 for (unsigned i = 0; i < datarefs.length (); ++i)
514 if (memcmp (&datarefs_copy[i], datarefs[i],
515 offsetof (data_reference, alt_indices)) != 0)
516 gcc_unreachable ();
519 /* Record that STMT belongs to the vectorizable region. Create and return
520 an associated stmt_vec_info. */
522 stmt_vec_info
523 vec_info::add_stmt (gimple *stmt)
525 stmt_vec_info res = new_stmt_vec_info (stmt);
526 set_vinfo_for_stmt (stmt, res);
527 return res;
530 /* Record that STMT belongs to the vectorizable region. Create a new
531 stmt_vec_info and mark VECINFO as being related and return the new
532 stmt_vec_info. */
534 stmt_vec_info
535 vec_info::add_pattern_stmt (gimple *stmt, stmt_vec_info stmt_info)
537 stmt_vec_info res = new_stmt_vec_info (stmt);
538 set_vinfo_for_stmt (stmt, res, false);
539 STMT_VINFO_RELATED_STMT (res) = stmt_info;
540 return res;
543 /* If STMT has an associated stmt_vec_info, return that vec_info, otherwise
544 return null. It is safe to call this function on any statement, even if
545 it might not be part of the vectorizable region. */
547 stmt_vec_info
548 vec_info::lookup_stmt (gimple *stmt)
550 unsigned int uid = gimple_uid (stmt);
551 if (uid > 0 && uid - 1 < stmt_vec_infos.length ())
553 stmt_vec_info res = stmt_vec_infos[uid - 1];
554 if (res && res->stmt == stmt)
555 return res;
557 return NULL;
560 /* If NAME is an SSA_NAME and its definition has an associated stmt_vec_info,
561 return that stmt_vec_info, otherwise return null. It is safe to call
562 this on arbitrary operands. */
564 stmt_vec_info
565 vec_info::lookup_def (tree name)
567 if (TREE_CODE (name) == SSA_NAME
568 && !SSA_NAME_IS_DEFAULT_DEF (name))
569 return lookup_stmt (SSA_NAME_DEF_STMT (name));
570 return NULL;
573 /* See whether there is a single non-debug statement that uses LHS and
574 whether that statement has an associated stmt_vec_info. Return the
575 stmt_vec_info if so, otherwise return null. */
577 stmt_vec_info
578 vec_info::lookup_single_use (tree lhs)
580 use_operand_p dummy;
581 gimple *use_stmt;
582 if (single_imm_use (lhs, &dummy, &use_stmt))
583 return lookup_stmt (use_stmt);
584 return NULL;
587 /* Return vectorization information about DR. */
589 dr_vec_info *
590 vec_info::lookup_dr (data_reference *dr)
592 stmt_vec_info stmt_info = lookup_stmt (DR_STMT (dr));
593 /* DR_STMT should never refer to a stmt in a pattern replacement. */
594 gcc_checking_assert (!is_pattern_stmt_p (stmt_info));
595 return STMT_VINFO_DR_INFO (stmt_info->dr_aux.stmt);
598 /* Record that NEW_STMT_INFO now implements the same data reference
599 as OLD_STMT_INFO. */
601 void
602 vec_info::move_dr (stmt_vec_info new_stmt_info, stmt_vec_info old_stmt_info)
604 gcc_assert (!is_pattern_stmt_p (old_stmt_info));
605 STMT_VINFO_DR_INFO (old_stmt_info)->stmt = new_stmt_info;
606 new_stmt_info->dr_aux = old_stmt_info->dr_aux;
607 STMT_VINFO_DR_WRT_VEC_LOOP (new_stmt_info)
608 = STMT_VINFO_DR_WRT_VEC_LOOP (old_stmt_info);
609 STMT_VINFO_GATHER_SCATTER_P (new_stmt_info)
610 = STMT_VINFO_GATHER_SCATTER_P (old_stmt_info);
613 /* Permanently remove the statement described by STMT_INFO from the
614 function. */
616 void
617 vec_info::remove_stmt (stmt_vec_info stmt_info)
619 gcc_assert (!stmt_info->pattern_stmt_p);
620 set_vinfo_for_stmt (stmt_info->stmt, NULL);
621 unlink_stmt_vdef (stmt_info->stmt);
622 gimple_stmt_iterator si = gsi_for_stmt (stmt_info->stmt);
623 gsi_remove (&si, true);
624 release_defs (stmt_info->stmt);
625 free_stmt_vec_info (stmt_info);
628 /* Replace the statement at GSI by NEW_STMT, both the vectorization
629 information and the function itself. STMT_INFO describes the statement
630 at GSI. */
632 void
633 vec_info::replace_stmt (gimple_stmt_iterator *gsi, stmt_vec_info stmt_info,
634 gimple *new_stmt)
636 gimple *old_stmt = stmt_info->stmt;
637 gcc_assert (!stmt_info->pattern_stmt_p && old_stmt == gsi_stmt (*gsi));
638 gimple_set_uid (new_stmt, gimple_uid (old_stmt));
639 stmt_info->stmt = new_stmt;
640 gsi_replace (gsi, new_stmt, true);
643 /* Insert stmts in SEQ on the VEC_INFO region entry. If CONTEXT is
644 not NULL it specifies whether to use the sub-region entry
645 determined by it, currently used for loop vectorization to insert
646 on the inner loop entry vs. the outer loop entry. */
648 void
649 vec_info::insert_seq_on_entry (stmt_vec_info context, gimple_seq seq)
651 if (loop_vec_info loop_vinfo = dyn_cast <loop_vec_info> (this))
653 class loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
654 basic_block new_bb;
655 edge pe;
657 if (context && nested_in_vect_loop_p (loop, context))
658 loop = loop->inner;
660 pe = loop_preheader_edge (loop);
661 new_bb = gsi_insert_seq_on_edge_immediate (pe, seq);
662 gcc_assert (!new_bb);
664 else
666 gimple_stmt_iterator gsi_region_begin
667 = gsi_after_labels (bbs[0]);
668 gsi_insert_seq_before (&gsi_region_begin, seq, GSI_SAME_STMT);
672 /* Like insert_seq_on_entry but just inserts the single stmt NEW_STMT. */
674 void
675 vec_info::insert_on_entry (stmt_vec_info context, gimple *new_stmt)
677 gimple_seq seq = NULL;
678 gimple_stmt_iterator gsi = gsi_start (seq);
679 gsi_insert_before_without_update (&gsi, new_stmt, GSI_SAME_STMT);
680 insert_seq_on_entry (context, seq);
683 /* Create and initialize a new stmt_vec_info struct for STMT. */
685 stmt_vec_info
686 vec_info::new_stmt_vec_info (gimple *stmt)
688 stmt_vec_info res = XCNEW (class _stmt_vec_info);
689 res->stmt = stmt;
691 STMT_VINFO_TYPE (res) = undef_vec_info_type;
692 STMT_VINFO_RELEVANT (res) = vect_unused_in_scope;
693 STMT_VINFO_VECTORIZABLE (res) = true;
694 STMT_VINFO_REDUC_TYPE (res) = TREE_CODE_REDUCTION;
695 STMT_VINFO_REDUC_CODE (res) = ERROR_MARK;
696 STMT_VINFO_REDUC_FN (res) = IFN_LAST;
697 STMT_VINFO_REDUC_IDX (res) = -1;
698 STMT_VINFO_SLP_VECT_ONLY (res) = false;
699 STMT_VINFO_SLP_VECT_ONLY_PATTERN (res) = false;
700 STMT_VINFO_VEC_STMTS (res) = vNULL;
701 res->reduc_initial_values = vNULL;
702 res->reduc_scalar_results = vNULL;
704 if (is_a <loop_vec_info> (this)
705 && gimple_code (stmt) == GIMPLE_PHI
706 && is_loop_header_bb_p (gimple_bb (stmt)))
707 STMT_VINFO_DEF_TYPE (res) = vect_unknown_def_type;
708 else
709 STMT_VINFO_DEF_TYPE (res) = vect_internal_def;
711 STMT_SLP_TYPE (res) = loop_vect;
713 /* This is really "uninitialized" until vect_compute_data_ref_alignment. */
714 res->dr_aux.misalignment = DR_MISALIGNMENT_UNINITIALIZED;
716 return res;
719 /* Associate STMT with INFO. */
721 void
722 vec_info::set_vinfo_for_stmt (gimple *stmt, stmt_vec_info info, bool check_ro)
724 unsigned int uid = gimple_uid (stmt);
725 if (uid == 0)
727 gcc_assert (!check_ro || !stmt_vec_info_ro);
728 gcc_checking_assert (info);
729 uid = stmt_vec_infos.length () + 1;
730 gimple_set_uid (stmt, uid);
731 stmt_vec_infos.safe_push (info);
733 else
735 gcc_checking_assert (info == NULL);
736 stmt_vec_infos[uid - 1] = info;
740 /* Free the contents of stmt_vec_infos. */
742 void
743 vec_info::free_stmt_vec_infos (void)
745 for (stmt_vec_info &info : stmt_vec_infos)
746 if (info != NULL)
747 free_stmt_vec_info (info);
748 stmt_vec_infos.release ();
751 /* Free STMT_INFO. */
753 void
754 vec_info::free_stmt_vec_info (stmt_vec_info stmt_info)
756 if (stmt_info->pattern_stmt_p)
758 gimple_set_bb (stmt_info->stmt, NULL);
759 tree lhs = gimple_get_lhs (stmt_info->stmt);
760 if (lhs && TREE_CODE (lhs) == SSA_NAME)
761 release_ssa_name (lhs);
764 stmt_info->reduc_initial_values.release ();
765 stmt_info->reduc_scalar_results.release ();
766 STMT_VINFO_SIMD_CLONE_INFO (stmt_info).release ();
767 STMT_VINFO_VEC_STMTS (stmt_info).release ();
768 free (stmt_info);
771 /* Returns true if S1 dominates S2. */
773 bool
774 vect_stmt_dominates_stmt_p (gimple *s1, gimple *s2)
776 basic_block bb1 = gimple_bb (s1), bb2 = gimple_bb (s2);
778 /* If bb1 is NULL, it should be a GIMPLE_NOP def stmt of an (D)
779 SSA_NAME. Assume it lives at the beginning of function and
780 thus dominates everything. */
781 if (!bb1 || s1 == s2)
782 return true;
784 /* If bb2 is NULL, it doesn't dominate any stmt with a bb. */
785 if (!bb2)
786 return false;
788 if (bb1 != bb2)
789 return dominated_by_p (CDI_DOMINATORS, bb2, bb1);
791 /* PHIs in the same basic block are assumed to be
792 executed all in parallel, if only one stmt is a PHI,
793 it dominates the other stmt in the same basic block. */
794 if (gimple_code (s1) == GIMPLE_PHI)
795 return true;
797 if (gimple_code (s2) == GIMPLE_PHI)
798 return false;
800 /* Inserted vectorized stmts all have UID 0 while the original stmts
801 in the IL have UID increasing within a BB. Walk from both sides
802 until we find the other stmt or a stmt with UID != 0. */
803 gimple_stmt_iterator gsi1 = gsi_for_stmt (s1);
804 while (gimple_uid (gsi_stmt (gsi1)) == 0)
806 gsi_next (&gsi1);
807 if (gsi_end_p (gsi1))
808 return false;
809 if (gsi_stmt (gsi1) == s2)
810 return true;
812 if (gimple_uid (gsi_stmt (gsi1)) == -1u)
813 return false;
815 gimple_stmt_iterator gsi2 = gsi_for_stmt (s2);
816 while (gimple_uid (gsi_stmt (gsi2)) == 0)
818 gsi_prev (&gsi2);
819 if (gsi_end_p (gsi2))
820 return false;
821 if (gsi_stmt (gsi2) == s1)
822 return true;
824 if (gimple_uid (gsi_stmt (gsi2)) == -1u)
825 return false;
827 if (gimple_uid (gsi_stmt (gsi1)) <= gimple_uid (gsi_stmt (gsi2)))
828 return true;
829 return false;
832 /* A helper function to free scev and LOOP niter information, as well as
833 clear loop constraint LOOP_C_FINITE. */
835 void
836 vect_free_loop_info_assumptions (class loop *loop)
838 scev_reset_htab ();
839 /* We need to explicitly reset upper bound information since they are
840 used even after free_numbers_of_iterations_estimates. */
841 loop->any_upper_bound = false;
842 loop->any_likely_upper_bound = false;
843 free_numbers_of_iterations_estimates (loop);
844 loop_constraint_clear (loop, LOOP_C_FINITE);
847 /* If LOOP has been versioned during ifcvt, return the internal call
848 guarding it. */
850 gimple *
851 vect_loop_vectorized_call (class loop *loop, gcond **cond)
853 basic_block bb = loop_preheader_edge (loop)->src;
854 gimple *g;
857 g = *gsi_last_bb (bb);
858 if ((g && gimple_code (g) == GIMPLE_COND)
859 || !single_succ_p (bb))
860 break;
861 if (!single_pred_p (bb))
862 break;
863 bb = single_pred (bb);
865 while (1);
866 if (g && gimple_code (g) == GIMPLE_COND)
868 if (cond)
869 *cond = as_a <gcond *> (g);
870 gimple_stmt_iterator gsi = gsi_for_stmt (g);
871 gsi_prev (&gsi);
872 if (!gsi_end_p (gsi))
874 g = gsi_stmt (gsi);
875 if (gimple_call_internal_p (g, IFN_LOOP_VECTORIZED)
876 && (tree_to_shwi (gimple_call_arg (g, 0)) == loop->num
877 || tree_to_shwi (gimple_call_arg (g, 1)) == loop->num))
878 return g;
881 return NULL;
884 /* If LOOP has been versioned during loop distribution, return the gurading
885 internal call. */
887 static gimple *
888 vect_loop_dist_alias_call (class loop *loop, function *fun)
890 basic_block bb;
891 basic_block entry;
892 class loop *outer, *orig;
894 if (loop->orig_loop_num == 0)
895 return NULL;
897 orig = get_loop (fun, loop->orig_loop_num);
898 if (orig == NULL)
900 /* The original loop is somehow destroyed. Clear the information. */
901 loop->orig_loop_num = 0;
902 return NULL;
905 if (loop != orig)
906 bb = nearest_common_dominator (CDI_DOMINATORS, loop->header, orig->header);
907 else
908 bb = loop_preheader_edge (loop)->src;
910 outer = bb->loop_father;
911 entry = ENTRY_BLOCK_PTR_FOR_FN (fun);
913 /* Look upward in dominance tree. */
914 for (; bb != entry && flow_bb_inside_loop_p (outer, bb);
915 bb = get_immediate_dominator (CDI_DOMINATORS, bb))
917 gimple_stmt_iterator gsi = gsi_last_bb (bb);
918 if (!safe_is_a <gcond *> (*gsi))
919 continue;
921 gsi_prev (&gsi);
922 if (gsi_end_p (gsi))
923 continue;
925 gimple *g = gsi_stmt (gsi);
926 /* The guarding internal function call must have the same distribution
927 alias id. */
928 if (gimple_call_internal_p (g, IFN_LOOP_DIST_ALIAS)
929 && (tree_to_shwi (gimple_call_arg (g, 0)) == loop->orig_loop_num))
930 return g;
932 return NULL;
935 /* Set the uids of all the statements in basic blocks inside loop
936 represented by LOOP_VINFO. LOOP_VECTORIZED_CALL is the internal
937 call guarding the loop which has been if converted. */
938 static void
939 set_uid_loop_bbs (loop_vec_info loop_vinfo, gimple *loop_vectorized_call,
940 function *fun)
942 tree arg = gimple_call_arg (loop_vectorized_call, 1);
943 basic_block *bbs;
944 unsigned int i;
945 class loop *scalar_loop = get_loop (fun, tree_to_shwi (arg));
947 LOOP_VINFO_SCALAR_LOOP (loop_vinfo) = scalar_loop;
948 LOOP_VINFO_SCALAR_IV_EXIT (loop_vinfo)
949 = vec_init_loop_exit_info (scalar_loop);
950 gcc_checking_assert (vect_loop_vectorized_call (scalar_loop)
951 == loop_vectorized_call);
952 /* If we are going to vectorize outer loop, prevent vectorization
953 of the inner loop in the scalar loop - either the scalar loop is
954 thrown away, so it is a wasted work, or is used only for
955 a few iterations. */
956 if (scalar_loop->inner)
958 gimple *g = vect_loop_vectorized_call (scalar_loop->inner);
959 if (g)
961 arg = gimple_call_arg (g, 0);
962 get_loop (fun, tree_to_shwi (arg))->dont_vectorize = true;
963 fold_loop_internal_call (g, boolean_false_node);
966 bbs = get_loop_body (scalar_loop);
967 for (i = 0; i < scalar_loop->num_nodes; i++)
969 basic_block bb = bbs[i];
970 gimple_stmt_iterator gsi;
971 for (gsi = gsi_start_phis (bb); !gsi_end_p (gsi); gsi_next (&gsi))
973 gimple *phi = gsi_stmt (gsi);
974 gimple_set_uid (phi, 0);
976 for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi); gsi_next (&gsi))
978 gimple *stmt = gsi_stmt (gsi);
979 gimple_set_uid (stmt, 0);
982 free (bbs);
985 /* Generate vectorized code for LOOP and its epilogues. */
987 static unsigned
988 vect_transform_loops (hash_table<simduid_to_vf> *&simduid_to_vf_htab,
989 loop_p loop, gimple *loop_vectorized_call,
990 function *fun)
992 loop_vec_info loop_vinfo = loop_vec_info_for_loop (loop);
994 if (loop_vectorized_call)
995 set_uid_loop_bbs (loop_vinfo, loop_vectorized_call, fun);
997 unsigned HOST_WIDE_INT bytes;
998 if (dump_enabled_p ())
1000 if (GET_MODE_SIZE (loop_vinfo->vector_mode).is_constant (&bytes))
1001 dump_printf_loc (MSG_OPTIMIZED_LOCATIONS, vect_location,
1002 "loop vectorized using %wu byte vectors\n", bytes);
1003 else
1004 dump_printf_loc (MSG_OPTIMIZED_LOCATIONS, vect_location,
1005 "loop vectorized using variable length vectors\n");
1008 loop_p new_loop = vect_transform_loop (loop_vinfo,
1009 loop_vectorized_call);
1010 /* Now that the loop has been vectorized, allow it to be unrolled
1011 etc. */
1012 loop->force_vectorize = false;
1014 if (loop->simduid)
1016 simduid_to_vf *simduid_to_vf_data = XNEW (simduid_to_vf);
1017 if (!simduid_to_vf_htab)
1018 simduid_to_vf_htab = new hash_table<simduid_to_vf> (15);
1019 simduid_to_vf_data->simduid = DECL_UID (loop->simduid);
1020 simduid_to_vf_data->vf = loop_vinfo->vectorization_factor;
1021 *simduid_to_vf_htab->find_slot (simduid_to_vf_data, INSERT)
1022 = simduid_to_vf_data;
1025 /* We should not have to update virtual SSA form here but some
1026 transforms involve creating new virtual definitions which makes
1027 updating difficult.
1028 We delay the actual update to the end of the pass but avoid
1029 confusing ourselves by forcing need_ssa_update_p () to false. */
1030 unsigned todo = 0;
1031 if (need_ssa_update_p (cfun))
1033 gcc_assert (loop_vinfo->any_known_not_updated_vssa);
1034 fun->gimple_df->ssa_renaming_needed = false;
1035 todo |= TODO_update_ssa_only_virtuals;
1037 gcc_assert (!need_ssa_update_p (cfun));
1039 /* Epilogue of vectorized loop must be vectorized too. */
1040 if (new_loop)
1041 todo |= vect_transform_loops (simduid_to_vf_htab, new_loop, NULL, fun);
1043 return todo;
1046 /* Try to vectorize LOOP. */
1048 static unsigned
1049 try_vectorize_loop_1 (hash_table<simduid_to_vf> *&simduid_to_vf_htab,
1050 unsigned *num_vectorized_loops, loop_p loop,
1051 gimple *loop_vectorized_call,
1052 gimple *loop_dist_alias_call,
1053 function *fun)
1055 unsigned ret = 0;
1056 vec_info_shared shared;
1057 auto_purge_vect_location sentinel;
1058 vect_location = find_loop_location (loop);
1060 if (LOCATION_LOCUS (vect_location.get_location_t ()) != UNKNOWN_LOCATION
1061 && dump_enabled_p ())
1062 dump_printf (MSG_NOTE | MSG_PRIORITY_INTERNALS,
1063 "\nAnalyzing loop at %s:%d\n",
1064 LOCATION_FILE (vect_location.get_location_t ()),
1065 LOCATION_LINE (vect_location.get_location_t ()));
1067 /* Try to analyze the loop, retaining an opt_problem if dump_enabled_p. */
1068 opt_loop_vec_info loop_vinfo = vect_analyze_loop (loop, &shared);
1069 loop->aux = loop_vinfo;
1071 if (!loop_vinfo)
1072 if (dump_enabled_p ())
1073 if (opt_problem *problem = loop_vinfo.get_problem ())
1075 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
1076 "couldn't vectorize loop\n");
1077 problem->emit_and_clear ();
1080 if (!loop_vinfo || !LOOP_VINFO_VECTORIZABLE_P (loop_vinfo))
1082 /* Free existing information if loop is analyzed with some
1083 assumptions. */
1084 if (loop_constraint_set_p (loop, LOOP_C_FINITE))
1085 vect_free_loop_info_assumptions (loop);
1087 /* If we applied if-conversion then try to vectorize the
1088 BB of innermost loops.
1089 ??? Ideally BB vectorization would learn to vectorize
1090 control flow by applying if-conversion on-the-fly, the
1091 following retains the if-converted loop body even when
1092 only non-if-converted parts took part in BB vectorization. */
1093 if (flag_tree_slp_vectorize != 0
1094 && loop_vectorized_call
1095 && ! loop->inner)
1097 basic_block bb = loop->header;
1098 bool require_loop_vectorize = false;
1099 for (gimple_stmt_iterator gsi = gsi_start_bb (bb);
1100 !gsi_end_p (gsi); gsi_next (&gsi))
1102 gimple *stmt = gsi_stmt (gsi);
1103 gcall *call = dyn_cast <gcall *> (stmt);
1104 if (call && gimple_call_internal_p (call))
1106 internal_fn ifn = gimple_call_internal_fn (call);
1107 if (ifn == IFN_MASK_LOAD || ifn == IFN_MASK_STORE
1108 /* Don't keep the if-converted parts when the ifn with
1109 specifc type is not supported by the backend. */
1110 || (direct_internal_fn_p (ifn)
1111 && !direct_internal_fn_supported_p
1112 (call, OPTIMIZE_FOR_SPEED)))
1114 require_loop_vectorize = true;
1115 break;
1118 gimple_set_uid (stmt, -1);
1119 gimple_set_visited (stmt, false);
1121 if (!require_loop_vectorize)
1123 tree arg = gimple_call_arg (loop_vectorized_call, 1);
1124 class loop *scalar_loop = get_loop (fun, tree_to_shwi (arg));
1125 if (vect_slp_if_converted_bb (bb, scalar_loop))
1127 fold_loop_internal_call (loop_vectorized_call,
1128 boolean_true_node);
1129 loop_vectorized_call = NULL;
1130 ret |= TODO_cleanup_cfg | TODO_update_ssa_only_virtuals;
1134 /* If outer loop vectorization fails for LOOP_VECTORIZED guarded
1135 loop, don't vectorize its inner loop; we'll attempt to
1136 vectorize LOOP_VECTORIZED guarded inner loop of the scalar
1137 loop version. */
1138 if (loop_vectorized_call && loop->inner)
1139 loop->inner->dont_vectorize = true;
1140 return ret;
1143 if (!dbg_cnt (vect_loop))
1145 /* Free existing information if loop is analyzed with some
1146 assumptions. */
1147 if (loop_constraint_set_p (loop, LOOP_C_FINITE))
1148 vect_free_loop_info_assumptions (loop);
1149 return ret;
1152 (*num_vectorized_loops)++;
1153 /* Transform LOOP and its epilogues. */
1154 ret |= vect_transform_loops (simduid_to_vf_htab, loop,
1155 loop_vectorized_call, fun);
1157 if (loop_vectorized_call)
1159 fold_loop_internal_call (loop_vectorized_call, boolean_true_node);
1160 ret |= TODO_cleanup_cfg;
1162 if (loop_dist_alias_call)
1164 tree value = gimple_call_arg (loop_dist_alias_call, 1);
1165 fold_loop_internal_call (loop_dist_alias_call, value);
1166 ret |= TODO_cleanup_cfg;
1169 return ret;
1172 /* Try to vectorize LOOP. */
1174 static unsigned
1175 try_vectorize_loop (hash_table<simduid_to_vf> *&simduid_to_vf_htab,
1176 unsigned *num_vectorized_loops, loop_p loop,
1177 function *fun)
1179 if (!((flag_tree_loop_vectorize
1180 && optimize_loop_nest_for_speed_p (loop))
1181 || loop->force_vectorize))
1182 return 0;
1184 return try_vectorize_loop_1 (simduid_to_vf_htab, num_vectorized_loops, loop,
1185 vect_loop_vectorized_call (loop),
1186 vect_loop_dist_alias_call (loop, fun), fun);
1190 /* Loop autovectorization. */
1192 namespace {
1194 const pass_data pass_data_vectorize =
1196 GIMPLE_PASS, /* type */
1197 "vect", /* name */
1198 OPTGROUP_LOOP | OPTGROUP_VEC, /* optinfo_flags */
1199 TV_TREE_VECTORIZATION, /* tv_id */
1200 ( PROP_cfg | PROP_ssa ), /* properties_required */
1201 0, /* properties_provided */
1202 0, /* properties_destroyed */
1203 0, /* todo_flags_start */
1204 0, /* todo_flags_finish */
1207 class pass_vectorize : public gimple_opt_pass
1209 public:
1210 pass_vectorize (gcc::context *ctxt)
1211 : gimple_opt_pass (pass_data_vectorize, ctxt)
1214 /* opt_pass methods: */
1215 bool gate (function *fun) final override
1217 return flag_tree_loop_vectorize || fun->has_force_vectorize_loops;
1220 unsigned int execute (function *) final override;
1222 }; // class pass_vectorize
1224 /* Function vectorize_loops.
1226 Entry point to loop vectorization phase. */
1228 unsigned
1229 pass_vectorize::execute (function *fun)
1231 unsigned int i;
1232 unsigned int num_vectorized_loops = 0;
1233 unsigned int vect_loops_num;
1234 hash_table<simduid_to_vf> *simduid_to_vf_htab = NULL;
1235 hash_table<simd_array_to_simduid> *simd_array_to_simduid_htab = NULL;
1236 bool any_ifcvt_loops = false;
1237 unsigned ret = 0;
1239 vect_loops_num = number_of_loops (fun);
1241 /* Bail out if there are no loops. */
1242 if (vect_loops_num <= 1)
1243 return 0;
1245 vect_slp_init ();
1247 if (fun->has_simduid_loops)
1248 note_simd_array_uses (&simd_array_to_simduid_htab, fun);
1250 /* ----------- Analyze loops. ----------- */
1252 /* If some loop was duplicated, it gets bigger number
1253 than all previously defined loops. This fact allows us to run
1254 only over initial loops skipping newly generated ones. */
1255 for (auto loop : loops_list (fun, 0))
1256 if (loop->dont_vectorize)
1258 any_ifcvt_loops = true;
1259 /* If-conversion sometimes versions both the outer loop
1260 (for the case when outer loop vectorization might be
1261 desirable) as well as the inner loop in the scalar version
1262 of the loop. So we have:
1263 if (LOOP_VECTORIZED (1, 3))
1265 loop1
1266 loop2
1268 else
1269 loop3 (copy of loop1)
1270 if (LOOP_VECTORIZED (4, 5))
1271 loop4 (copy of loop2)
1272 else
1273 loop5 (copy of loop4)
1274 If loops' iteration gives us loop3 first (which has
1275 dont_vectorize set), make sure to process loop1 before loop4;
1276 so that we can prevent vectorization of loop4 if loop1
1277 is successfully vectorized. */
1278 if (loop->inner)
1280 gimple *loop_vectorized_call
1281 = vect_loop_vectorized_call (loop);
1282 if (loop_vectorized_call
1283 && vect_loop_vectorized_call (loop->inner))
1285 tree arg = gimple_call_arg (loop_vectorized_call, 0);
1286 class loop *vector_loop
1287 = get_loop (fun, tree_to_shwi (arg));
1288 if (vector_loop && vector_loop != loop)
1290 /* Make sure we don't vectorize it twice. */
1291 vector_loop->dont_vectorize = true;
1292 ret |= try_vectorize_loop (simduid_to_vf_htab,
1293 &num_vectorized_loops,
1294 vector_loop, fun);
1299 else
1300 ret |= try_vectorize_loop (simduid_to_vf_htab, &num_vectorized_loops,
1301 loop, fun);
1303 vect_location = dump_user_location_t ();
1305 statistics_counter_event (fun, "Vectorized loops", num_vectorized_loops);
1306 if (dump_enabled_p ()
1307 || (num_vectorized_loops > 0 && dump_enabled_p ()))
1308 dump_printf_loc (MSG_NOTE, vect_location,
1309 "vectorized %u loops in function.\n",
1310 num_vectorized_loops);
1312 /* ----------- Finalize. ----------- */
1314 if (any_ifcvt_loops)
1315 for (i = 1; i < number_of_loops (fun); i++)
1317 class loop *loop = get_loop (fun, i);
1318 if (loop && loop->dont_vectorize)
1320 gimple *g = vect_loop_vectorized_call (loop);
1321 if (g)
1323 fold_loop_internal_call (g, boolean_false_node);
1324 ret |= TODO_cleanup_cfg;
1325 g = NULL;
1327 else
1328 g = vect_loop_dist_alias_call (loop, fun);
1330 if (g)
1332 fold_loop_internal_call (g, boolean_false_node);
1333 ret |= TODO_cleanup_cfg;
1338 /* Fold IFN_GOMP_SIMD_{VF,LANE,LAST_LANE,ORDERED_{START,END}} builtins. */
1339 if (fun->has_simduid_loops)
1341 adjust_simduid_builtins (simduid_to_vf_htab, fun);
1342 /* Avoid stale SCEV cache entries for the SIMD_LANE defs. */
1343 scev_reset ();
1345 /* Shrink any "omp array simd" temporary arrays to the
1346 actual vectorization factors. */
1347 if (simd_array_to_simduid_htab)
1348 shrink_simd_arrays (simd_array_to_simduid_htab, simduid_to_vf_htab);
1349 delete simduid_to_vf_htab;
1350 fun->has_simduid_loops = false;
1352 if (num_vectorized_loops > 0)
1354 /* We are collecting some corner cases where we need to update
1355 virtual SSA form via the TODO but delete the queued update-SSA
1356 state. Force renaming if we think that might be necessary. */
1357 if (ret & TODO_update_ssa_only_virtuals)
1358 mark_virtual_operands_for_renaming (cfun);
1359 /* If we vectorized any loop only virtual SSA form needs to be updated.
1360 ??? Also while we try hard to update loop-closed SSA form we fail
1361 to properly do this in some corner-cases (see PR56286). */
1362 rewrite_into_loop_closed_ssa (NULL, TODO_update_ssa_only_virtuals);
1363 ret |= TODO_cleanup_cfg;
1366 for (i = 1; i < number_of_loops (fun); i++)
1368 loop_vec_info loop_vinfo;
1369 bool has_mask_store;
1371 class loop *loop = get_loop (fun, i);
1372 if (!loop || !loop->aux)
1373 continue;
1374 loop_vinfo = (loop_vec_info) loop->aux;
1375 has_mask_store = LOOP_VINFO_HAS_MASK_STORE (loop_vinfo);
1376 delete loop_vinfo;
1377 if (has_mask_store
1378 && targetm.vectorize.empty_mask_is_expensive (IFN_MASK_STORE))
1379 optimize_mask_stores (loop);
1381 auto_bitmap exit_bbs;
1382 /* Perform local CSE, this esp. helps because we emit code for
1383 predicates that need to be shared for optimal predicate usage.
1384 However reassoc will re-order them and prevent CSE from working
1385 as it should. CSE only the loop body, not the entry. */
1386 auto_vec<edge> exits = get_loop_exit_edges (loop);
1387 for (edge exit : exits)
1388 bitmap_set_bit (exit_bbs, exit->dest->index);
1390 edge entry = EDGE_PRED (loop_preheader_edge (loop)->src, 0);
1391 do_rpo_vn (fun, entry, exit_bbs);
1393 loop->aux = NULL;
1396 vect_slp_fini ();
1398 return ret;
1401 } // anon namespace
1403 gimple_opt_pass *
1404 make_pass_vectorize (gcc::context *ctxt)
1406 return new pass_vectorize (ctxt);
1409 /* Entry point to the simduid cleanup pass. */
1411 namespace {
1413 const pass_data pass_data_simduid_cleanup =
1415 GIMPLE_PASS, /* type */
1416 "simduid", /* name */
1417 OPTGROUP_NONE, /* optinfo_flags */
1418 TV_NONE, /* tv_id */
1419 ( PROP_ssa | PROP_cfg ), /* properties_required */
1420 0, /* properties_provided */
1421 0, /* properties_destroyed */
1422 0, /* todo_flags_start */
1423 0, /* todo_flags_finish */
1426 class pass_simduid_cleanup : public gimple_opt_pass
1428 public:
1429 pass_simduid_cleanup (gcc::context *ctxt)
1430 : gimple_opt_pass (pass_data_simduid_cleanup, ctxt)
1433 /* opt_pass methods: */
1434 opt_pass * clone () final override
1436 return new pass_simduid_cleanup (m_ctxt);
1438 bool gate (function *fun) final override { return fun->has_simduid_loops; }
1439 unsigned int execute (function *) final override;
1441 }; // class pass_simduid_cleanup
1443 unsigned int
1444 pass_simduid_cleanup::execute (function *fun)
1446 hash_table<simd_array_to_simduid> *simd_array_to_simduid_htab = NULL;
1448 note_simd_array_uses (&simd_array_to_simduid_htab, fun);
1450 /* Fold IFN_GOMP_SIMD_{VF,LANE,LAST_LANE,ORDERED_{START,END}} builtins. */
1451 adjust_simduid_builtins (NULL, fun);
1453 /* Shrink any "omp array simd" temporary arrays to the
1454 actual vectorization factors. */
1455 if (simd_array_to_simduid_htab)
1456 shrink_simd_arrays (simd_array_to_simduid_htab, NULL);
1457 fun->has_simduid_loops = false;
1458 return 0;
1461 } // anon namespace
1463 gimple_opt_pass *
1464 make_pass_simduid_cleanup (gcc::context *ctxt)
1466 return new pass_simduid_cleanup (ctxt);
1470 /* Entry point to basic block SLP phase. */
1472 namespace {
1474 const pass_data pass_data_slp_vectorize =
1476 GIMPLE_PASS, /* type */
1477 "slp", /* name */
1478 OPTGROUP_LOOP | OPTGROUP_VEC, /* optinfo_flags */
1479 TV_TREE_SLP_VECTORIZATION, /* tv_id */
1480 ( PROP_ssa | PROP_cfg ), /* properties_required */
1481 0, /* properties_provided */
1482 0, /* properties_destroyed */
1483 0, /* todo_flags_start */
1484 TODO_update_ssa, /* todo_flags_finish */
1487 class pass_slp_vectorize : public gimple_opt_pass
1489 public:
1490 pass_slp_vectorize (gcc::context *ctxt)
1491 : gimple_opt_pass (pass_data_slp_vectorize, ctxt)
1494 /* opt_pass methods: */
1495 opt_pass * clone () final override { return new pass_slp_vectorize (m_ctxt); }
1496 bool gate (function *) final override { return flag_tree_slp_vectorize != 0; }
1497 unsigned int execute (function *) final override;
1499 }; // class pass_slp_vectorize
1501 unsigned int
1502 pass_slp_vectorize::execute (function *fun)
1504 auto_purge_vect_location sentinel;
1505 basic_block bb;
1507 bool in_loop_pipeline = scev_initialized_p ();
1508 if (!in_loop_pipeline)
1510 loop_optimizer_init (LOOPS_NORMAL);
1511 scev_initialize ();
1514 /* Mark all stmts as not belonging to the current region and unvisited. */
1515 FOR_EACH_BB_FN (bb, fun)
1517 for (gphi_iterator gsi = gsi_start_phis (bb); !gsi_end_p (gsi);
1518 gsi_next (&gsi))
1520 gphi *stmt = gsi.phi ();
1521 gimple_set_uid (stmt, -1);
1522 gimple_set_visited (stmt, false);
1524 for (gimple_stmt_iterator gsi = gsi_start_bb (bb); !gsi_end_p (gsi);
1525 gsi_next (&gsi))
1527 gimple *stmt = gsi_stmt (gsi);
1528 gimple_set_uid (stmt, -1);
1529 gimple_set_visited (stmt, false);
1533 vect_slp_init ();
1535 vect_slp_function (fun);
1537 vect_slp_fini ();
1539 if (!in_loop_pipeline)
1541 scev_finalize ();
1542 loop_optimizer_finalize ();
1545 return 0;
1548 } // anon namespace
1550 gimple_opt_pass *
1551 make_pass_slp_vectorize (gcc::context *ctxt)
1553 return new pass_slp_vectorize (ctxt);
1557 /* Increase alignment of global arrays to improve vectorization potential.
1558 TODO:
1559 - Consider also structs that have an array field.
1560 - Use ipa analysis to prune arrays that can't be vectorized?
1561 This should involve global alignment analysis and in the future also
1562 array padding. */
1564 static unsigned get_vec_alignment_for_type (tree);
1565 static hash_map<tree, unsigned> *type_align_map;
1567 /* Return alignment of array's vector type corresponding to scalar type.
1568 0 if no vector type exists. */
1569 static unsigned
1570 get_vec_alignment_for_array_type (tree type)
1572 gcc_assert (TREE_CODE (type) == ARRAY_TYPE);
1573 poly_uint64 array_size, vector_size;
1575 tree scalar_type = strip_array_types (type);
1576 tree vectype = get_related_vectype_for_scalar_type (VOIDmode, scalar_type);
1577 if (!vectype
1578 || !poly_int_tree_p (TYPE_SIZE (type), &array_size)
1579 || !poly_int_tree_p (TYPE_SIZE (vectype), &vector_size)
1580 || maybe_lt (array_size, vector_size))
1581 return 0;
1583 return TYPE_ALIGN (vectype);
1586 /* Return alignment of field having maximum alignment of vector type
1587 corresponding to it's scalar type. For now, we only consider fields whose
1588 offset is a multiple of it's vector alignment.
1589 0 if no suitable field is found. */
1590 static unsigned
1591 get_vec_alignment_for_record_type (tree type)
1593 gcc_assert (TREE_CODE (type) == RECORD_TYPE);
1595 unsigned max_align = 0, alignment;
1596 HOST_WIDE_INT offset;
1597 tree offset_tree;
1599 if (TYPE_PACKED (type))
1600 return 0;
1602 unsigned *slot = type_align_map->get (type);
1603 if (slot)
1604 return *slot;
1606 for (tree field = first_field (type);
1607 field != NULL_TREE;
1608 field = DECL_CHAIN (field))
1610 /* Skip if not FIELD_DECL or if alignment is set by user. */
1611 if (TREE_CODE (field) != FIELD_DECL
1612 || DECL_USER_ALIGN (field)
1613 || DECL_ARTIFICIAL (field))
1614 continue;
1616 /* We don't need to process the type further if offset is variable,
1617 since the offsets of remaining members will also be variable. */
1618 if (TREE_CODE (DECL_FIELD_OFFSET (field)) != INTEGER_CST
1619 || TREE_CODE (DECL_FIELD_BIT_OFFSET (field)) != INTEGER_CST)
1620 break;
1622 /* Similarly stop processing the type if offset_tree
1623 does not fit in unsigned HOST_WIDE_INT. */
1624 offset_tree = bit_position (field);
1625 if (!tree_fits_uhwi_p (offset_tree))
1626 break;
1628 offset = tree_to_uhwi (offset_tree);
1629 alignment = get_vec_alignment_for_type (TREE_TYPE (field));
1631 /* Get maximum alignment of vectorized field/array among those members
1632 whose offset is multiple of the vector alignment. */
1633 if (alignment
1634 && (offset % alignment == 0)
1635 && (alignment > max_align))
1636 max_align = alignment;
1639 type_align_map->put (type, max_align);
1640 return max_align;
1643 /* Return alignment of vector type corresponding to decl's scalar type
1644 or 0 if it doesn't exist or the vector alignment is lesser than
1645 decl's alignment. */
1646 static unsigned
1647 get_vec_alignment_for_type (tree type)
1649 if (type == NULL_TREE)
1650 return 0;
1652 gcc_assert (TYPE_P (type));
1654 static unsigned alignment = 0;
1655 switch (TREE_CODE (type))
1657 case ARRAY_TYPE:
1658 alignment = get_vec_alignment_for_array_type (type);
1659 break;
1660 case RECORD_TYPE:
1661 alignment = get_vec_alignment_for_record_type (type);
1662 break;
1663 default:
1664 alignment = 0;
1665 break;
1668 return (alignment > TYPE_ALIGN (type)) ? alignment : 0;
1671 /* Entry point to increase_alignment pass. */
1672 static unsigned int
1673 increase_alignment (void)
1675 varpool_node *vnode;
1677 vect_location = dump_user_location_t ();
1678 type_align_map = new hash_map<tree, unsigned>;
1680 /* Increase the alignment of all global arrays for vectorization. */
1681 FOR_EACH_DEFINED_VARIABLE (vnode)
1683 tree decl = vnode->decl;
1684 unsigned int alignment;
1686 if ((decl_in_symtab_p (decl)
1687 && !symtab_node::get (decl)->can_increase_alignment_p ())
1688 || DECL_USER_ALIGN (decl) || DECL_ARTIFICIAL (decl))
1689 continue;
1691 alignment = get_vec_alignment_for_type (TREE_TYPE (decl));
1692 if (alignment && vect_can_force_dr_alignment_p (decl, alignment))
1694 vnode->increase_alignment (alignment);
1695 if (dump_enabled_p ())
1696 dump_printf (MSG_NOTE, "Increasing alignment of decl: %T\n", decl);
1700 delete type_align_map;
1701 return 0;
1705 namespace {
1707 const pass_data pass_data_ipa_increase_alignment =
1709 SIMPLE_IPA_PASS, /* type */
1710 "increase_alignment", /* name */
1711 OPTGROUP_LOOP | OPTGROUP_VEC, /* optinfo_flags */
1712 TV_IPA_OPT, /* tv_id */
1713 0, /* properties_required */
1714 0, /* properties_provided */
1715 0, /* properties_destroyed */
1716 0, /* todo_flags_start */
1717 0, /* todo_flags_finish */
1720 class pass_ipa_increase_alignment : public simple_ipa_opt_pass
1722 public:
1723 pass_ipa_increase_alignment (gcc::context *ctxt)
1724 : simple_ipa_opt_pass (pass_data_ipa_increase_alignment, ctxt)
1727 /* opt_pass methods: */
1728 bool gate (function *) final override
1730 return flag_section_anchors && flag_tree_loop_vectorize;
1733 unsigned int execute (function *) final override
1735 return increase_alignment ();
1738 }; // class pass_ipa_increase_alignment
1740 } // anon namespace
1742 simple_ipa_opt_pass *
1743 make_pass_ipa_increase_alignment (gcc::context *ctxt)
1745 return new pass_ipa_increase_alignment (ctxt);
1748 /* If the condition represented by T is a comparison or the SSA name
1749 result of a comparison, extract the comparison's operands. Represent
1750 T as NE_EXPR <T, 0> otherwise. */
1752 void
1753 scalar_cond_masked_key::get_cond_ops_from_tree (tree t)
1755 if (TREE_CODE_CLASS (TREE_CODE (t)) == tcc_comparison)
1757 this->code = TREE_CODE (t);
1758 this->op0 = TREE_OPERAND (t, 0);
1759 this->op1 = TREE_OPERAND (t, 1);
1760 this->inverted_p = false;
1761 return;
1764 if (TREE_CODE (t) == SSA_NAME)
1765 if (gassign *stmt = dyn_cast<gassign *> (SSA_NAME_DEF_STMT (t)))
1767 tree_code code = gimple_assign_rhs_code (stmt);
1768 if (TREE_CODE_CLASS (code) == tcc_comparison)
1770 this->code = code;
1771 this->op0 = gimple_assign_rhs1 (stmt);
1772 this->op1 = gimple_assign_rhs2 (stmt);
1773 this->inverted_p = false;
1774 return;
1776 else if (code == BIT_NOT_EXPR)
1778 tree n_op = gimple_assign_rhs1 (stmt);
1779 if ((stmt = dyn_cast<gassign *> (SSA_NAME_DEF_STMT (n_op))))
1781 code = gimple_assign_rhs_code (stmt);
1782 if (TREE_CODE_CLASS (code) == tcc_comparison)
1784 this->code = code;
1785 this->op0 = gimple_assign_rhs1 (stmt);
1786 this->op1 = gimple_assign_rhs2 (stmt);
1787 this->inverted_p = true;
1788 return;
1794 this->code = NE_EXPR;
1795 this->op0 = t;
1796 this->op1 = build_zero_cst (TREE_TYPE (t));
1797 this->inverted_p = false;
1800 /* See the comment above the declaration for details. */
1802 unsigned int
1803 vector_costs::add_stmt_cost (int count, vect_cost_for_stmt kind,
1804 stmt_vec_info stmt_info, slp_tree,
1805 tree vectype, int misalign,
1806 vect_cost_model_location where)
1808 unsigned int cost
1809 = builtin_vectorization_cost (kind, vectype, misalign) * count;
1810 return record_stmt_cost (stmt_info, where, cost);
1813 /* See the comment above the declaration for details. */
1815 void
1816 vector_costs::finish_cost (const vector_costs *)
1818 gcc_assert (!m_finished);
1819 m_finished = true;
1822 /* Record a base cost of COST units against WHERE. If STMT_INFO is
1823 nonnull, use it to adjust the cost based on execution frequency
1824 (where appropriate). */
1826 unsigned int
1827 vector_costs::record_stmt_cost (stmt_vec_info stmt_info,
1828 vect_cost_model_location where,
1829 unsigned int cost)
1831 cost = adjust_cost_for_freq (stmt_info, where, cost);
1832 m_costs[where] += cost;
1833 return cost;
1836 /* COST is the base cost we have calculated for an operation in location WHERE.
1837 If STMT_INFO is nonnull, use it to adjust the cost based on execution
1838 frequency (where appropriate). Return the adjusted cost. */
1840 unsigned int
1841 vector_costs::adjust_cost_for_freq (stmt_vec_info stmt_info,
1842 vect_cost_model_location where,
1843 unsigned int cost)
1845 /* Statements in an inner loop relative to the loop being
1846 vectorized are weighted more heavily. The value here is
1847 arbitrary and could potentially be improved with analysis. */
1848 if (where == vect_body
1849 && stmt_info
1850 && stmt_in_inner_loop_p (m_vinfo, stmt_info))
1852 loop_vec_info loop_vinfo = as_a<loop_vec_info> (m_vinfo);
1853 cost *= LOOP_VINFO_INNER_LOOP_COST_FACTOR (loop_vinfo);
1855 return cost;
1858 /* See the comment above the declaration for details. */
1860 bool
1861 vector_costs::better_main_loop_than_p (const vector_costs *other) const
1863 int diff = compare_inside_loop_cost (other);
1864 if (diff != 0)
1865 return diff < 0;
1867 /* If there's nothing to choose between the loop bodies, see whether
1868 there's a difference in the prologue and epilogue costs. */
1869 diff = compare_outside_loop_cost (other);
1870 if (diff != 0)
1871 return diff < 0;
1873 return false;
1877 /* See the comment above the declaration for details. */
1879 bool
1880 vector_costs::better_epilogue_loop_than_p (const vector_costs *other,
1881 loop_vec_info main_loop) const
1883 loop_vec_info this_loop_vinfo = as_a<loop_vec_info> (this->m_vinfo);
1884 loop_vec_info other_loop_vinfo = as_a<loop_vec_info> (other->m_vinfo);
1886 poly_int64 this_vf = LOOP_VINFO_VECT_FACTOR (this_loop_vinfo);
1887 poly_int64 other_vf = LOOP_VINFO_VECT_FACTOR (other_loop_vinfo);
1889 poly_uint64 main_poly_vf = LOOP_VINFO_VECT_FACTOR (main_loop);
1890 unsigned HOST_WIDE_INT main_vf;
1891 unsigned HOST_WIDE_INT other_factor, this_factor, other_cost, this_cost;
1892 /* If we can determine how many iterations are left for the epilogue
1893 loop, that is if both the main loop's vectorization factor and number
1894 of iterations are constant, then we use them to calculate the cost of
1895 the epilogue loop together with a 'likely value' for the epilogues
1896 vectorization factor. Otherwise we use the main loop's vectorization
1897 factor and the maximum poly value for the epilogue's. If the target
1898 has not provided with a sensible upper bound poly vectorization
1899 factors are likely to be favored over constant ones. */
1900 if (main_poly_vf.is_constant (&main_vf)
1901 && LOOP_VINFO_NITERS_KNOWN_P (main_loop))
1903 unsigned HOST_WIDE_INT niters
1904 = LOOP_VINFO_INT_NITERS (main_loop) % main_vf;
1905 HOST_WIDE_INT other_likely_vf
1906 = estimated_poly_value (other_vf, POLY_VALUE_LIKELY);
1907 HOST_WIDE_INT this_likely_vf
1908 = estimated_poly_value (this_vf, POLY_VALUE_LIKELY);
1910 /* If the epilogue is using partial vectors we account for the
1911 partial iteration here too. */
1912 other_factor = niters / other_likely_vf;
1913 if (LOOP_VINFO_USING_PARTIAL_VECTORS_P (other_loop_vinfo)
1914 && niters % other_likely_vf != 0)
1915 other_factor++;
1917 this_factor = niters / this_likely_vf;
1918 if (LOOP_VINFO_USING_PARTIAL_VECTORS_P (this_loop_vinfo)
1919 && niters % this_likely_vf != 0)
1920 this_factor++;
1922 else
1924 unsigned HOST_WIDE_INT main_vf_max
1925 = estimated_poly_value (main_poly_vf, POLY_VALUE_MAX);
1926 unsigned HOST_WIDE_INT other_vf_max
1927 = estimated_poly_value (other_vf, POLY_VALUE_MAX);
1928 unsigned HOST_WIDE_INT this_vf_max
1929 = estimated_poly_value (this_vf, POLY_VALUE_MAX);
1931 other_factor = CEIL (main_vf_max, other_vf_max);
1932 this_factor = CEIL (main_vf_max, this_vf_max);
1934 /* If the loop is not using partial vectors then it will iterate one
1935 time less than one that does. It is safe to subtract one here,
1936 because the main loop's vf is always at least 2x bigger than that
1937 of an epilogue. */
1938 if (!LOOP_VINFO_USING_PARTIAL_VECTORS_P (other_loop_vinfo))
1939 other_factor -= 1;
1940 if (!LOOP_VINFO_USING_PARTIAL_VECTORS_P (this_loop_vinfo))
1941 this_factor -= 1;
1944 /* Compute the costs by multiplying the inside costs with the factor and
1945 add the outside costs for a more complete picture. The factor is the
1946 amount of times we are expecting to iterate this epilogue. */
1947 other_cost = other->body_cost () * other_factor;
1948 this_cost = this->body_cost () * this_factor;
1949 other_cost += other->outside_cost ();
1950 this_cost += this->outside_cost ();
1951 return this_cost < other_cost;
1954 /* A <=>-style subroutine of better_main_loop_than_p. Check whether we can
1955 determine the return value of better_main_loop_than_p by comparing the
1956 inside (loop body) costs of THIS and OTHER. Return:
1958 * -1 if better_main_loop_than_p should return true.
1959 * 1 if better_main_loop_than_p should return false.
1960 * 0 if we can't decide. */
1963 vector_costs::compare_inside_loop_cost (const vector_costs *other) const
1965 loop_vec_info this_loop_vinfo = as_a<loop_vec_info> (this->m_vinfo);
1966 loop_vec_info other_loop_vinfo = as_a<loop_vec_info> (other->m_vinfo);
1968 struct loop *loop = LOOP_VINFO_LOOP (this_loop_vinfo);
1969 gcc_assert (LOOP_VINFO_LOOP (other_loop_vinfo) == loop);
1971 poly_int64 this_vf = LOOP_VINFO_VECT_FACTOR (this_loop_vinfo);
1972 poly_int64 other_vf = LOOP_VINFO_VECT_FACTOR (other_loop_vinfo);
1974 /* Limit the VFs to what is likely to be the maximum number of iterations,
1975 to handle cases in which at least one loop_vinfo is fully-masked. */
1976 HOST_WIDE_INT estimated_max_niter = likely_max_stmt_executions_int (loop);
1977 if (estimated_max_niter != -1)
1979 if (estimated_poly_value (this_vf, POLY_VALUE_MIN)
1980 >= estimated_max_niter)
1981 this_vf = estimated_max_niter;
1982 if (estimated_poly_value (other_vf, POLY_VALUE_MIN)
1983 >= estimated_max_niter)
1984 other_vf = estimated_max_niter;
1987 /* Check whether the (fractional) cost per scalar iteration is lower or
1988 higher: this_inside_cost / this_vf vs. other_inside_cost / other_vf. */
1989 poly_int64 rel_this = this_loop_vinfo->vector_costs->body_cost () * other_vf;
1990 poly_int64 rel_other
1991 = other_loop_vinfo->vector_costs->body_cost () * this_vf;
1993 HOST_WIDE_INT est_rel_this_min
1994 = estimated_poly_value (rel_this, POLY_VALUE_MIN);
1995 HOST_WIDE_INT est_rel_this_max
1996 = estimated_poly_value (rel_this, POLY_VALUE_MAX);
1998 HOST_WIDE_INT est_rel_other_min
1999 = estimated_poly_value (rel_other, POLY_VALUE_MIN);
2000 HOST_WIDE_INT est_rel_other_max
2001 = estimated_poly_value (rel_other, POLY_VALUE_MAX);
2003 /* Check first if we can make out an unambigous total order from the minimum
2004 and maximum estimates. */
2005 if (est_rel_this_min < est_rel_other_min
2006 && est_rel_this_max < est_rel_other_max)
2007 return -1;
2009 if (est_rel_other_min < est_rel_this_min
2010 && est_rel_other_max < est_rel_this_max)
2011 return 1;
2013 /* When other_loop_vinfo uses a variable vectorization factor,
2014 we know that it has a lower cost for at least one runtime VF.
2015 However, we don't know how likely that VF is.
2017 One option would be to compare the costs for the estimated VFs.
2018 The problem is that that can put too much pressure on the cost
2019 model. E.g. if the estimated VF is also the lowest possible VF,
2020 and if other_loop_vinfo is 1 unit worse than this_loop_vinfo
2021 for the estimated VF, we'd then choose this_loop_vinfo even
2022 though (a) this_loop_vinfo might not actually be better than
2023 other_loop_vinfo for that VF and (b) it would be significantly
2024 worse at larger VFs.
2026 Here we go for a hacky compromise: pick this_loop_vinfo if it is
2027 no more expensive than other_loop_vinfo even after doubling the
2028 estimated other_loop_vinfo VF. For all but trivial loops, this
2029 ensures that we only pick this_loop_vinfo if it is significantly
2030 better than other_loop_vinfo at the estimated VF. */
2031 if (est_rel_other_min != est_rel_this_min
2032 || est_rel_other_max != est_rel_this_max)
2034 HOST_WIDE_INT est_rel_this_likely
2035 = estimated_poly_value (rel_this, POLY_VALUE_LIKELY);
2036 HOST_WIDE_INT est_rel_other_likely
2037 = estimated_poly_value (rel_other, POLY_VALUE_LIKELY);
2039 return est_rel_this_likely * 2 <= est_rel_other_likely ? -1 : 1;
2042 return 0;
2045 /* A <=>-style subroutine of better_main_loop_than_p, used when there is
2046 nothing to choose between the inside (loop body) costs of THIS and OTHER.
2047 Check whether we can determine the return value of better_main_loop_than_p
2048 by comparing the outside (prologue and epilogue) costs of THIS and OTHER.
2049 Return:
2051 * -1 if better_main_loop_than_p should return true.
2052 * 1 if better_main_loop_than_p should return false.
2053 * 0 if we can't decide. */
2056 vector_costs::compare_outside_loop_cost (const vector_costs *other) const
2058 auto this_outside_cost = this->outside_cost ();
2059 auto other_outside_cost = other->outside_cost ();
2060 if (this_outside_cost != other_outside_cost)
2061 return this_outside_cost < other_outside_cost ? -1 : 1;
2063 return 0;