[PATCH] RISC-V: Move UNSPEC_SSP_SET and UNSPEC_SSP_TEST to correct enum
[gcc.git] / gcc / gimple-range-cache.cc
blob818b801468a0f2827ae8c04f33b23bb654f6c1d6
1 /* Gimple ranger SSA cache implementation.
2 Copyright (C) 2017-2025 Free Software Foundation, Inc.
3 Contributed by Andrew MacLeod <amacleod@redhat.com>.
5 This file is part of GCC.
7 GCC is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 3, or (at your option)
10 any later version.
12 GCC is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 GNU General Public License for more details.
17 You should have received a copy of the GNU General Public License
18 along with GCC; see the file COPYING3. If not see
19 <http://www.gnu.org/licenses/>. */
21 #include "config.h"
22 #include "system.h"
23 #include "coretypes.h"
24 #include "backend.h"
25 #include "insn-codes.h"
26 #include "tree.h"
27 #include "gimple.h"
28 #include "ssa.h"
29 #include "gimple-pretty-print.h"
30 #include "gimple-range.h"
31 #include "value-range-storage.h"
32 #include "tree-cfg.h"
33 #include "target.h"
34 #include "attribs.h"
35 #include "gimple-iterator.h"
36 #include "gimple-walk.h"
37 #include "cfganal.h"
39 #define DEBUG_RANGE_CACHE (dump_file \
40 && (param_ranger_debug & RANGER_DEBUG_CACHE))
42 // This class represents the API into a cache of ranges for an SSA_NAME.
43 // Routines must be implemented to set, get, and query if a value is set.
45 class ssa_block_ranges
47 public:
48 ssa_block_ranges (tree t) : m_type (t) { }
49 virtual bool set_bb_range (const_basic_block bb, const vrange &r) = 0;
50 virtual bool get_bb_range (vrange &r, const_basic_block bb) = 0;
51 virtual bool bb_range_p (const_basic_block bb) = 0;
53 void dump(FILE *f);
54 private:
55 tree m_type;
58 // Print the list of known ranges for file F in a nice format.
60 void
61 ssa_block_ranges::dump (FILE *f)
63 basic_block bb;
64 value_range r (m_type);
66 FOR_EACH_BB_FN (bb, cfun)
67 if (get_bb_range (r, bb))
69 fprintf (f, "BB%d -> ", bb->index);
70 r.dump (f);
71 fprintf (f, "\n");
75 // This class implements the range cache as a linear vector, indexed by BB.
76 // It caches a varying and undefined range which are used instead of
77 // allocating new ones each time.
79 class sbr_vector : public ssa_block_ranges
81 public:
82 sbr_vector (tree t, vrange_allocator *allocator, bool zero_p = true);
84 virtual bool set_bb_range (const_basic_block bb, const vrange &r) override;
85 virtual bool get_bb_range (vrange &r, const_basic_block bb) override;
86 virtual bool bb_range_p (const_basic_block bb) override;
87 protected:
88 vrange_storage **m_tab; // Non growing vector.
89 int m_tab_size;
90 vrange_storage *m_varying;
91 vrange_storage *m_undefined;
92 tree m_type;
93 vrange_allocator *m_range_allocator;
94 bool m_zero_p;
95 void grow ();
99 // Initialize a block cache for an ssa_name of type T.
101 sbr_vector::sbr_vector (tree t, vrange_allocator *allocator, bool zero_p)
102 : ssa_block_ranges (t)
104 gcc_checking_assert (TYPE_P (t));
105 m_type = t;
106 m_zero_p = zero_p;
107 m_range_allocator = allocator;
108 m_tab_size = last_basic_block_for_fn (cfun) + 1;
109 m_tab = static_cast <vrange_storage **>
110 (allocator->alloc (m_tab_size * sizeof (vrange_storage *)));
111 if (zero_p)
112 memset (m_tab, 0, m_tab_size * sizeof (vrange *));
114 // Create the cached type range.
115 m_varying = m_range_allocator->clone_varying (t);
116 m_undefined = m_range_allocator->clone_undefined (t);
119 // Grow the vector when the CFG has increased in size.
121 void
122 sbr_vector::grow ()
124 int curr_bb_size = last_basic_block_for_fn (cfun);
125 gcc_checking_assert (curr_bb_size > m_tab_size);
127 // Increase the max of a)128, b)needed increase * 2, c)10% of current_size.
128 int inc = MAX ((curr_bb_size - m_tab_size) * 2, 128);
129 inc = MAX (inc, curr_bb_size / 10);
130 int new_size = inc + curr_bb_size;
132 // Allocate new memory, copy the old vector and clear the new space.
133 vrange_storage **t = static_cast <vrange_storage **>
134 (m_range_allocator->alloc (new_size * sizeof (vrange_storage *)));
135 memcpy (t, m_tab, m_tab_size * sizeof (vrange_storage *));
136 if (m_zero_p)
137 memset (t + m_tab_size, 0, (new_size - m_tab_size) * sizeof (vrange_storage *));
139 m_tab = t;
140 m_tab_size = new_size;
143 // Set the range for block BB to be R.
145 bool
146 sbr_vector::set_bb_range (const_basic_block bb, const vrange &r)
148 vrange_storage *m;
149 if (bb->index >= m_tab_size)
150 grow ();
151 if (r.varying_p ())
152 m = m_varying;
153 else if (r.undefined_p ())
154 m = m_undefined;
155 else
156 m = m_range_allocator->clone (r);
157 m_tab[bb->index] = m;
158 return true;
161 // Return the range associated with block BB in R. Return false if
162 // there is no range.
164 bool
165 sbr_vector::get_bb_range (vrange &r, const_basic_block bb)
167 if (bb->index >= m_tab_size)
168 return false;
169 vrange_storage *m = m_tab[bb->index];
170 if (m)
172 m->get_vrange (r, m_type);
173 return true;
175 return false;
178 // Return true if a range is present.
180 bool
181 sbr_vector::bb_range_p (const_basic_block bb)
183 if (bb->index < m_tab_size)
184 return m_tab[bb->index] != NULL;
185 return false;
188 // Like an sbr_vector, except it uses a bitmap to manage whetehr vale is set
189 // or not rather than cleared memory.
191 class sbr_lazy_vector : public sbr_vector
193 public:
194 sbr_lazy_vector (tree t, vrange_allocator *allocator, bitmap_obstack *bm);
196 virtual bool set_bb_range (const_basic_block bb, const vrange &r) override;
197 virtual bool get_bb_range (vrange &r, const_basic_block bb) override;
198 virtual bool bb_range_p (const_basic_block bb) override;
199 protected:
200 bitmap m_has_value;
203 sbr_lazy_vector::sbr_lazy_vector (tree t, vrange_allocator *allocator,
204 bitmap_obstack *bm)
205 : sbr_vector (t, allocator, false)
207 m_has_value = BITMAP_ALLOC (bm);
210 bool
211 sbr_lazy_vector::set_bb_range (const_basic_block bb, const vrange &r)
213 sbr_vector::set_bb_range (bb, r);
214 bitmap_set_bit (m_has_value, bb->index);
215 return true;
218 bool
219 sbr_lazy_vector::get_bb_range (vrange &r, const_basic_block bb)
221 if (bitmap_bit_p (m_has_value, bb->index))
222 return sbr_vector::get_bb_range (r, bb);
223 return false;
226 bool
227 sbr_lazy_vector::bb_range_p (const_basic_block bb)
229 return bitmap_bit_p (m_has_value, bb->index);
232 // This class implements the on entry cache via a sparse bitmap.
233 // It uses the quad bit routines to access 4 bits at a time.
234 // A value of 0 (the default) means there is no entry, and a value of
235 // 1 thru SBR_NUM represents an element in the m_range vector.
236 // Varying is given the first value (1) and pre-cached.
237 // SBR_NUM + 1 represents the value of UNDEFINED, and is never stored.
238 // SBR_NUM is the number of values that can be cached.
239 // Indexes are 1..SBR_NUM and are stored locally at m_range[0..SBR_NUM-1]
241 #define SBR_NUM 14
242 #define SBR_UNDEF SBR_NUM + 1
243 #define SBR_VARYING 1
245 class sbr_sparse_bitmap : public ssa_block_ranges
247 public:
248 sbr_sparse_bitmap (tree t, vrange_allocator *allocator, bitmap_obstack *bm);
249 virtual bool set_bb_range (const_basic_block bb, const vrange &r) override;
250 virtual bool get_bb_range (vrange &r, const_basic_block bb) override;
251 virtual bool bb_range_p (const_basic_block bb) override;
252 private:
253 void bitmap_set_quad (bitmap head, int quad, int quad_value);
254 int bitmap_get_quad (const_bitmap head, int quad);
255 vrange_allocator *m_range_allocator;
256 vrange_storage *m_range[SBR_NUM];
257 bitmap_head bitvec;
258 tree m_type;
261 // Initialize a block cache for an ssa_name of type T.
263 sbr_sparse_bitmap::sbr_sparse_bitmap (tree t, vrange_allocator *allocator,
264 bitmap_obstack *bm)
265 : ssa_block_ranges (t)
267 gcc_checking_assert (TYPE_P (t));
268 m_type = t;
269 bitmap_initialize (&bitvec, bm);
270 bitmap_tree_view (&bitvec);
271 m_range_allocator = allocator;
272 // Pre-cache varying.
273 m_range[0] = m_range_allocator->clone_varying (t);
274 // Pre-cache zero and non-zero values for pointers.
275 if (POINTER_TYPE_P (t))
277 prange nonzero;
278 nonzero.set_nonzero (t);
279 m_range[1] = m_range_allocator->clone (nonzero);
280 prange zero;
281 zero.set_zero (t);
282 m_range[2] = m_range_allocator->clone (zero);
284 else
285 m_range[1] = m_range[2] = NULL;
286 // Clear SBR_NUM entries.
287 for (int x = 3; x < SBR_NUM; x++)
288 m_range[x] = 0;
291 // Set 4 bit values in a sparse bitmap. This allows a bitmap to
292 // function as a sparse array of 4 bit values.
293 // QUAD is the index, QUAD_VALUE is the 4 bit value to set.
295 inline void
296 sbr_sparse_bitmap::bitmap_set_quad (bitmap head, int quad, int quad_value)
298 bitmap_set_aligned_chunk (head, quad, 4, (BITMAP_WORD) quad_value);
301 // Get a 4 bit value from a sparse bitmap. This allows a bitmap to
302 // function as a sparse array of 4 bit values.
303 // QUAD is the index.
304 inline int
305 sbr_sparse_bitmap::bitmap_get_quad (const_bitmap head, int quad)
307 return (int) bitmap_get_aligned_chunk (head, quad, 4);
310 // Set the range on entry to basic block BB to R.
312 bool
313 sbr_sparse_bitmap::set_bb_range (const_basic_block bb, const vrange &r)
315 if (r.undefined_p ())
317 bitmap_set_quad (&bitvec, bb->index, SBR_UNDEF);
318 return true;
321 // Loop thru the values to see if R is already present.
322 for (int x = 0; x < SBR_NUM; x++)
323 if (!m_range[x] || m_range[x]->equal_p (r))
325 if (!m_range[x])
326 m_range[x] = m_range_allocator->clone (r);
327 bitmap_set_quad (&bitvec, bb->index, x + 1);
328 return true;
330 // All values are taken, default to VARYING.
331 bitmap_set_quad (&bitvec, bb->index, SBR_VARYING);
332 return false;
335 // Return the range associated with block BB in R. Return false if
336 // there is no range.
338 bool
339 sbr_sparse_bitmap::get_bb_range (vrange &r, const_basic_block bb)
341 int value = bitmap_get_quad (&bitvec, bb->index);
343 if (!value)
344 return false;
346 gcc_checking_assert (value <= SBR_UNDEF);
347 if (value == SBR_UNDEF)
348 r.set_undefined ();
349 else
350 m_range[value - 1]->get_vrange (r, m_type);
351 return true;
354 // Return true if a range is present.
356 bool
357 sbr_sparse_bitmap::bb_range_p (const_basic_block bb)
359 return (bitmap_get_quad (&bitvec, bb->index) != 0);
362 // -------------------------------------------------------------------------
364 // Initialize the block cache.
366 block_range_cache::block_range_cache ()
368 bitmap_obstack_initialize (&m_bitmaps);
369 m_ssa_ranges.create (0);
370 m_ssa_ranges.safe_grow_cleared (num_ssa_names);
371 m_range_allocator = new vrange_allocator;
374 // Remove any m_block_caches which have been created.
376 block_range_cache::~block_range_cache ()
378 delete m_range_allocator;
379 // Release the vector itself.
380 m_ssa_ranges.release ();
381 bitmap_obstack_release (&m_bitmaps);
384 // Set the range for NAME on entry to block BB to R.
385 // If it has not been accessed yet, allocate it first.
387 bool
388 block_range_cache::set_bb_range (tree name, const_basic_block bb,
389 const vrange &r)
391 unsigned v = SSA_NAME_VERSION (name);
392 if (v >= m_ssa_ranges.length ())
393 m_ssa_ranges.safe_grow_cleared (num_ssa_names);
395 if (!m_ssa_ranges[v])
397 // Use sparse bitmap representation if there are too many basic blocks.
398 if (last_basic_block_for_fn (cfun) > param_vrp_sparse_threshold)
400 void *r = m_range_allocator->alloc (sizeof (sbr_sparse_bitmap));
401 m_ssa_ranges[v] = new (r) sbr_sparse_bitmap (TREE_TYPE (name),
402 m_range_allocator,
403 &m_bitmaps);
405 else if (last_basic_block_for_fn (cfun) < param_vrp_vector_threshold)
407 // For small CFGs use the basic vector implemntation.
408 void *r = m_range_allocator->alloc (sizeof (sbr_vector));
409 m_ssa_ranges[v] = new (r) sbr_vector (TREE_TYPE (name),
410 m_range_allocator);
412 else
414 // Otherwise use the sparse vector implementation.
415 void *r = m_range_allocator->alloc (sizeof (sbr_lazy_vector));
416 m_ssa_ranges[v] = new (r) sbr_lazy_vector (TREE_TYPE (name),
417 m_range_allocator,
418 &m_bitmaps);
421 return m_ssa_ranges[v]->set_bb_range (bb, r);
425 // Return a pointer to the ssa_block_cache for NAME. If it has not been
426 // accessed yet, return NULL.
428 inline ssa_block_ranges *
429 block_range_cache::query_block_ranges (tree name)
431 unsigned v = SSA_NAME_VERSION (name);
432 if (v >= m_ssa_ranges.length () || !m_ssa_ranges[v])
433 return NULL;
434 return m_ssa_ranges[v];
439 // Return the range for NAME on entry to BB in R. Return true if there
440 // is one.
442 bool
443 block_range_cache::get_bb_range (vrange &r, tree name, const_basic_block bb)
445 ssa_block_ranges *ptr = query_block_ranges (name);
446 if (ptr)
447 return ptr->get_bb_range (r, bb);
448 return false;
451 // Return true if NAME has a range set in block BB.
453 bool
454 block_range_cache::bb_range_p (tree name, const_basic_block bb)
456 ssa_block_ranges *ptr = query_block_ranges (name);
457 if (ptr)
458 return ptr->bb_range_p (bb);
459 return false;
462 // Print all known block caches to file F.
464 void
465 block_range_cache::dump (FILE *f)
467 unsigned x;
468 for (x = 1; x < m_ssa_ranges.length (); ++x)
470 if (m_ssa_ranges[x])
472 fprintf (f, " Ranges for ");
473 print_generic_expr (f, ssa_name (x), TDF_NONE);
474 fprintf (f, ":\n");
475 m_ssa_ranges[x]->dump (f);
476 fprintf (f, "\n");
481 // Print all known ranges on entry to block BB to file F.
483 void
484 block_range_cache::dump (FILE *f, basic_block bb, bool print_varying)
486 unsigned x;
487 bool summarize_varying = false;
488 for (x = 1; x < m_ssa_ranges.length (); ++x)
490 if (!m_ssa_ranges[x])
491 continue;
493 if (!gimple_range_ssa_p (ssa_name (x)))
494 continue;
496 value_range r (TREE_TYPE (ssa_name (x)));
497 if (m_ssa_ranges[x]->get_bb_range (r, bb))
499 if (!print_varying && r.varying_p ())
501 summarize_varying = true;
502 continue;
504 print_generic_expr (f, ssa_name (x), TDF_NONE);
505 fprintf (f, "\t");
506 r.dump(f);
507 fprintf (f, "\n");
510 // If there were any varying entries, lump them all together.
511 if (summarize_varying)
513 fprintf (f, "VARYING_P on entry : ");
514 for (x = 1; x < m_ssa_ranges.length (); ++x)
516 if (!m_ssa_ranges[x])
517 continue;
519 if (!gimple_range_ssa_p (ssa_name (x)))
520 continue;
522 value_range r (TREE_TYPE (ssa_name (x)));
523 if (m_ssa_ranges[x]->get_bb_range (r, bb))
525 if (r.varying_p ())
527 print_generic_expr (f, ssa_name (x), TDF_NONE);
528 fprintf (f, " ");
532 fprintf (f, "\n");
536 // -------------------------------------------------------------------------
538 // Initialize an ssa cache.
540 ssa_cache::ssa_cache ()
542 m_tab.create (0);
543 m_range_allocator = new vrange_allocator;
546 // Deconstruct an ssa cache.
548 ssa_cache::~ssa_cache ()
550 m_tab.release ();
551 delete m_range_allocator;
554 // Enable a query to evaluate staements/ramnges based on picking up ranges
555 // from just an ssa-cache.
557 bool
558 ssa_cache::range_of_expr (vrange &r, tree expr, gimple *stmt)
560 if (!gimple_range_ssa_p (expr))
561 return get_tree_range (r, expr, stmt);
563 if (!get_range (r, expr))
564 gimple_range_global (r, expr, cfun);
565 return true;
568 // Return TRUE if the global range of NAME has a cache entry.
570 bool
571 ssa_cache::has_range (tree name) const
573 unsigned v = SSA_NAME_VERSION (name);
574 if (v >= m_tab.length ())
575 return false;
576 return m_tab[v] != NULL;
579 // Retrieve the global range of NAME from cache memory if it exists.
580 // Return the value in R.
582 bool
583 ssa_cache::get_range (vrange &r, tree name) const
585 unsigned v = SSA_NAME_VERSION (name);
586 if (v >= m_tab.length ())
587 return false;
589 vrange_storage *stow = m_tab[v];
590 if (!stow)
591 return false;
592 stow->get_vrange (r, TREE_TYPE (name));
593 return true;
596 // Set the range for NAME to R in the ssa cache.
597 // Return TRUE if there was already a range set, otherwise false.
599 bool
600 ssa_cache::set_range (tree name, const vrange &r)
602 unsigned v = SSA_NAME_VERSION (name);
603 if (v >= m_tab.length ())
604 m_tab.safe_grow_cleared (num_ssa_names + 1);
606 vrange_storage *m = m_tab[v];
607 if (m && m->fits_p (r))
608 m->set_vrange (r);
609 else
610 m_tab[v] = m_range_allocator->clone (r);
611 return m != NULL;
614 // If NAME has a range, intersect it with R, otherwise set it to R.
615 // Return TRUE if the range is new or changes.
617 bool
618 ssa_cache::merge_range (tree name, const vrange &r)
620 unsigned v = SSA_NAME_VERSION (name);
621 if (v >= m_tab.length ())
622 m_tab.safe_grow_cleared (num_ssa_names + 1);
624 vrange_storage *m = m_tab[v];
625 // Check if this is a new value.
626 if (!m)
627 m_tab[v] = m_range_allocator->clone (r);
628 else
630 value_range curr (TREE_TYPE (name));
631 m->get_vrange (curr, TREE_TYPE (name));
632 // If there is no change, return false.
633 if (!curr.intersect (r))
634 return false;
636 if (m->fits_p (curr))
637 m->set_vrange (curr);
638 else
639 m_tab[v] = m_range_allocator->clone (curr);
641 return true;
644 // Set the range for NAME to R in the ssa cache.
646 void
647 ssa_cache::clear_range (tree name)
649 unsigned v = SSA_NAME_VERSION (name);
650 if (v >= m_tab.length ())
651 return;
652 m_tab[v] = NULL;
655 // Clear the ssa cache.
657 void
658 ssa_cache::clear ()
660 if (m_tab.address ())
661 memset (m_tab.address(), 0, m_tab.length () * sizeof (vrange *));
664 // Dump the contents of the ssa cache to F.
666 void
667 ssa_cache::dump (FILE *f)
669 for (unsigned x = 1; x < num_ssa_names; x++)
671 if (!gimple_range_ssa_p (ssa_name (x)))
672 continue;
673 value_range r (TREE_TYPE (ssa_name (x)));
674 // Dump all non-varying ranges.
675 if (get_range (r, ssa_name (x)) && !r.varying_p ())
677 print_generic_expr (f, ssa_name (x), TDF_NONE);
678 fprintf (f, " : ");
679 r.dump (f);
680 fprintf (f, "\n");
686 // Construct an ssa_lazy_cache. If OB is specified, us it, otherwise use
687 // a local bitmap obstack.
689 ssa_lazy_cache::ssa_lazy_cache (bitmap_obstack *ob)
691 if (!ob)
693 bitmap_obstack_initialize (&m_bitmaps);
694 m_ob = &m_bitmaps;
696 else
697 m_ob = ob;
698 active_p = BITMAP_ALLOC (m_ob);
701 // Destruct an sa_lazy_cache. Free the bitmap if it came from a different
702 // obstack, or release the obstack if it was a local one.
704 ssa_lazy_cache::~ssa_lazy_cache ()
706 if (m_ob == &m_bitmaps)
707 bitmap_obstack_release (&m_bitmaps);
708 else
709 BITMAP_FREE (active_p);
712 // Return true if NAME has an active range in the cache.
714 bool
715 ssa_lazy_cache::has_range (tree name) const
717 return bitmap_bit_p (active_p, SSA_NAME_VERSION (name));
720 // Set range of NAME to R in a lazy cache. Return FALSE if it did not already
721 // have a range.
723 bool
724 ssa_lazy_cache::set_range (tree name, const vrange &r)
726 unsigned v = SSA_NAME_VERSION (name);
727 if (!bitmap_set_bit (active_p, v))
729 // There is already an entry, simply set it.
730 gcc_checking_assert (v < m_tab.length ());
731 return ssa_cache::set_range (name, r);
733 if (v >= m_tab.length ())
734 m_tab.safe_grow (num_ssa_names + 1);
735 m_tab[v] = m_range_allocator->clone (r);
736 return false;
739 // If NAME has a range, intersect it with R, otherwise set it to R.
740 // Return TRUE if the range is new or changes.
742 bool
743 ssa_lazy_cache::merge_range (tree name, const vrange &r)
745 unsigned v = SSA_NAME_VERSION (name);
746 if (!bitmap_set_bit (active_p, v))
748 // There is already an entry, simply merge it.
749 gcc_checking_assert (v < m_tab.length ());
750 return ssa_cache::merge_range (name, r);
752 if (v >= m_tab.length ())
753 m_tab.safe_grow (num_ssa_names + 1);
754 m_tab[v] = m_range_allocator->clone (r);
755 return true;
758 // Merge all elements of CACHE with this cache.
759 // Any names in CACHE that are not in this one are added.
760 // Any names in both are merged via merge_range..
762 void
763 ssa_lazy_cache::merge (const ssa_lazy_cache &cache)
765 unsigned x;
766 bitmap_iterator bi;
767 EXECUTE_IF_SET_IN_BITMAP (cache.active_p, 0, x, bi)
769 tree name = ssa_name (x);
770 value_range r(TREE_TYPE (name));
771 cache.get_range (r, name);
772 merge_range (ssa_name (x), r);
776 // Return TRUE if NAME has a range, and return it in R.
778 bool
779 ssa_lazy_cache::get_range (vrange &r, tree name) const
781 if (!bitmap_bit_p (active_p, SSA_NAME_VERSION (name)))
782 return false;
783 return ssa_cache::get_range (r, name);
786 // Remove NAME from the active range list.
788 void
789 ssa_lazy_cache::clear_range (tree name)
791 bitmap_clear_bit (active_p, SSA_NAME_VERSION (name));
794 // Remove all ranges from the active range list.
796 void
797 ssa_lazy_cache::clear ()
799 bitmap_clear (active_p);
802 // --------------------------------------------------------------------------
805 // This class will manage the timestamps for each ssa_name.
806 // When a value is calculated, the timestamp is set to the current time.
807 // Current time is then incremented. Any dependencies will already have
808 // been calculated, and will thus have older timestamps.
809 // If one of those values is ever calculated again, it will get a newer
810 // timestamp, and the "current_p" check will fail.
812 class temporal_cache
814 public:
815 temporal_cache ();
816 ~temporal_cache ();
817 bool current_p (tree name, tree dep1, tree dep2) const;
818 void set_timestamp (tree name);
819 void set_always_current (tree name, bool value);
820 bool always_current_p (tree name) const;
821 private:
822 int temporal_value (unsigned ssa) const;
823 int m_current_time;
824 vec <int> m_timestamp;
827 inline
828 temporal_cache::temporal_cache ()
830 m_current_time = 1;
831 m_timestamp.create (0);
832 m_timestamp.safe_grow_cleared (num_ssa_names);
835 inline
836 temporal_cache::~temporal_cache ()
838 m_timestamp.release ();
841 // Return the timestamp value for SSA, or 0 if there isn't one.
843 inline int
844 temporal_cache::temporal_value (unsigned ssa) const
846 if (ssa >= m_timestamp.length ())
847 return 0;
848 return abs (m_timestamp[ssa]);
851 // Return TRUE if the timestamp for NAME is newer than any of its dependents.
852 // Up to 2 dependencies can be checked.
854 bool
855 temporal_cache::current_p (tree name, tree dep1, tree dep2) const
857 if (always_current_p (name))
858 return true;
860 // Any non-registered dependencies will have a value of 0 and thus be older.
861 // Return true if time is newer than either dependent.
862 int ts = temporal_value (SSA_NAME_VERSION (name));
863 if (dep1 && ts < temporal_value (SSA_NAME_VERSION (dep1)))
864 return false;
865 if (dep2 && ts < temporal_value (SSA_NAME_VERSION (dep2)))
866 return false;
868 return true;
871 // This increments the global timer and sets the timestamp for NAME.
873 inline void
874 temporal_cache::set_timestamp (tree name)
876 unsigned v = SSA_NAME_VERSION (name);
877 if (v >= m_timestamp.length ())
878 m_timestamp.safe_grow_cleared (num_ssa_names + 20);
879 m_timestamp[v] = ++m_current_time;
882 // Set the timestamp to 0, marking it as "always up to date".
884 inline void
885 temporal_cache::set_always_current (tree name, bool value)
887 unsigned v = SSA_NAME_VERSION (name);
888 if (v >= m_timestamp.length ())
889 m_timestamp.safe_grow_cleared (num_ssa_names + 20);
891 int ts = abs (m_timestamp[v]);
892 // If this does not have a timestamp, create one.
893 if (ts == 0)
894 ts = ++m_current_time;
895 m_timestamp[v] = value ? -ts : ts;
898 // Return true if NAME is always current.
900 inline bool
901 temporal_cache::always_current_p (tree name) const
903 unsigned v = SSA_NAME_VERSION (name);
904 if (v >= m_timestamp.length ())
905 return false;
906 return m_timestamp[v] <= 0;
909 // --------------------------------------------------------------------------
911 // This class provides an abstraction of a list of blocks to be updated
912 // by the cache. It is currently a stack but could be changed. It also
913 // maintains a list of blocks which have failed propagation, and does not
914 // enter any of those blocks into the list.
916 // A vector over the BBs is maintained, and an entry of 0 means it is not in
917 // a list. Otherwise, the entry is the next block in the list. -1 terminates
918 // the list. m_head points to the top of the list, -1 if the list is empty.
920 class update_list
922 public:
923 update_list ();
924 ~update_list ();
925 void add (basic_block bb);
926 basic_block pop ();
927 inline bool empty_p () { return m_update_head == -1; }
928 inline void clear_failures () { bitmap_clear (m_propfail); }
929 inline void propagation_failed (basic_block bb)
930 { bitmap_set_bit (m_propfail, bb->index); }
931 private:
932 vec<int> m_update_list;
933 int m_update_head;
934 bitmap m_propfail;
935 bitmap_obstack m_bitmaps;
938 // Create an update list.
940 update_list::update_list ()
942 m_update_list.create (0);
943 m_update_list.safe_grow_cleared (last_basic_block_for_fn (cfun) + 64);
944 m_update_head = -1;
945 bitmap_obstack_initialize (&m_bitmaps);
946 m_propfail = BITMAP_ALLOC (&m_bitmaps);
949 // Destroy an update list.
951 update_list::~update_list ()
953 m_update_list.release ();
954 bitmap_obstack_release (&m_bitmaps);
957 // Add BB to the list of blocks to update, unless it's already in the list.
959 void
960 update_list::add (basic_block bb)
962 int i = bb->index;
963 // If propagation has failed for BB, or its already in the list, don't
964 // add it again.
965 if ((unsigned)i >= m_update_list.length ())
966 m_update_list.safe_grow_cleared (i + 64);
967 if (!m_update_list[i] && !bitmap_bit_p (m_propfail, i))
969 if (empty_p ())
971 m_update_head = i;
972 m_update_list[i] = -1;
974 else
976 gcc_checking_assert (m_update_head > 0);
977 m_update_list[i] = m_update_head;
978 m_update_head = i;
983 // Remove a block from the list.
985 basic_block
986 update_list::pop ()
988 gcc_checking_assert (!empty_p ());
989 basic_block bb = BASIC_BLOCK_FOR_FN (cfun, m_update_head);
990 int pop = m_update_head;
991 m_update_head = m_update_list[pop];
992 m_update_list[pop] = 0;
993 return bb;
996 // --------------------------------------------------------------------------
998 ranger_cache::ranger_cache (int not_executable_flag, bool use_imm_uses)
1000 m_workback = vNULL;
1001 m_temporal = new temporal_cache;
1003 // If DOM info is available, spawn an oracle as well.
1004 create_relation_oracle ();
1005 // Create an infer oracle using this cache as the range query. The cache
1006 // version acts as a read-only query, and will spawn no additional lookups.
1007 // It just ues what is already known.
1008 create_infer_oracle (this, use_imm_uses);
1009 create_gori (not_executable_flag, param_vrp_switch_limit);
1011 unsigned x, lim = last_basic_block_for_fn (cfun);
1012 // Calculate outgoing range info upfront. This will fully populate the
1013 // m_maybe_variant bitmap which will help eliminate processing of names
1014 // which never have their ranges adjusted.
1015 for (x = 0; x < lim ; x++)
1017 basic_block bb = BASIC_BLOCK_FOR_FN (cfun, x);
1018 if (bb)
1019 gori_ssa ()->exports (bb);
1021 m_update = new update_list ();
1024 ranger_cache::~ranger_cache ()
1026 delete m_update;
1027 destroy_infer_oracle ();
1028 destroy_relation_oracle ();
1029 delete m_temporal;
1030 m_workback.release ();
1033 // Dump the global caches to file F. if GORI_DUMP is true, dump the
1034 // gori map as well.
1036 void
1037 ranger_cache::dump (FILE *f)
1039 fprintf (f, "Non-varying global ranges:\n");
1040 fprintf (f, "=========================:\n");
1041 m_globals.dump (f);
1042 fprintf (f, "\n");
1045 // Dump the caches for basic block BB to file F.
1047 void
1048 ranger_cache::dump_bb (FILE *f, basic_block bb)
1050 gori_ssa ()->dump (f, bb, false);
1051 m_on_entry.dump (f, bb);
1052 m_relation->dump (f, bb);
1055 // Get the global range for NAME, and return in R. Return false if the
1056 // global range is not set, and return the legacy global value in R.
1058 bool
1059 ranger_cache::get_global_range (vrange &r, tree name) const
1061 if (m_globals.get_range (r, name))
1062 return true;
1063 gimple_range_global (r, name);
1064 return false;
1067 // Get the global range for NAME, and return in R. Return false if the
1068 // global range is not set, and R will contain the legacy global value.
1069 // CURRENT_P is set to true if the value was in cache and not stale.
1070 // Otherwise, set CURRENT_P to false and mark as it always current.
1071 // If the global cache did not have a value, initialize it as well.
1072 // After this call, the global cache will have a value.
1074 bool
1075 ranger_cache::get_global_range (vrange &r, tree name, bool &current_p)
1077 bool had_global = get_global_range (r, name);
1079 // If there was a global value, set current flag, otherwise set a value.
1080 current_p = false;
1081 if (had_global)
1082 current_p = r.singleton_p ()
1083 || m_temporal->current_p (name, gori_ssa ()->depend1 (name),
1084 gori_ssa ()->depend2 (name));
1085 else
1087 // If no global value has been set and value is VARYING, fold the stmt
1088 // using just global ranges to get a better initial value.
1089 // After inlining we tend to decide some things are constant, so
1090 // so not do this evaluation after inlining.
1091 if (r.varying_p () && !cfun->after_inlining)
1093 gimple *s = SSA_NAME_DEF_STMT (name);
1094 // Do not process PHIs as SCEV may be in use and it can
1095 // spawn cyclic lookups.
1096 if (gimple_get_lhs (s) == name && !is_a<gphi *> (s))
1098 if (!fold_range (r, s, get_global_range_query ()))
1099 gimple_range_global (r, name);
1102 m_globals.set_range (name, r);
1105 // If the existing value was not current, mark it as always current.
1106 if (!current_p)
1107 m_temporal->set_always_current (name, true);
1108 return had_global;
1111 // Set the global range of NAME to R and give it a timestamp.
1113 void
1114 ranger_cache::set_global_range (tree name, const vrange &r, bool changed)
1116 // Setting a range always clears the always_current flag.
1117 m_temporal->set_always_current (name, false);
1118 if (!changed)
1120 // If there are dependencies, make sure this is not out of date.
1121 if (!m_temporal->current_p (name, gori_ssa ()->depend1 (name),
1122 gori_ssa ()->depend2 (name)))
1123 m_temporal->set_timestamp (name);
1124 return;
1126 if (m_globals.set_range (name, r))
1128 // If there was already a range set, propagate the new value.
1129 basic_block bb = gimple_bb (SSA_NAME_DEF_STMT (name));
1130 if (!bb)
1131 bb = ENTRY_BLOCK_PTR_FOR_FN (cfun);
1133 if (DEBUG_RANGE_CACHE)
1134 fprintf (dump_file, " GLOBAL :");
1136 propagate_updated_value (name, bb);
1138 // Constants no longer need to tracked. Any further refinement has to be
1139 // undefined. Propagation works better with constants. PR 100512.
1140 // Pointers which resolve to non-zero also do not need
1141 // tracking in the cache as they will never change. See PR 98866.
1142 // Timestamp must always be updated, or dependent calculations may
1143 // not include this latest value. PR 100774.
1145 if (r.singleton_p ()
1146 || (POINTER_TYPE_P (TREE_TYPE (name)) && r.nonzero_p ()))
1147 gori_ssa ()->set_range_invariant (name);
1148 m_temporal->set_timestamp (name);
1151 // Provide lookup for the gori-computes class to access the best known range
1152 // of an ssa_name in any given basic block. Note, this does no additional
1153 // lookups, just accesses the data that is already known.
1155 // Get the range of NAME when the def occurs in block BB. If BB is NULL
1156 // get the best global value available.
1158 void
1159 ranger_cache::range_of_def (vrange &r, tree name, basic_block bb)
1161 gcc_checking_assert (gimple_range_ssa_p (name));
1162 gcc_checking_assert (!bb || bb == gimple_bb (SSA_NAME_DEF_STMT (name)));
1164 // Pick up the best global range available.
1165 if (!m_globals.get_range (r, name))
1167 // If that fails, try to calculate the range using just global values.
1168 gimple *s = SSA_NAME_DEF_STMT (name);
1169 if (gimple_get_lhs (s) == name)
1170 fold_range (r, s, get_global_range_query ());
1171 else
1172 gimple_range_global (r, name);
1176 // Get the range of NAME as it occurs on entry to block BB. Use MODE for
1177 // lookups.
1179 void
1180 ranger_cache::entry_range (vrange &r, tree name, basic_block bb,
1181 enum rfd_mode mode)
1183 if (bb == ENTRY_BLOCK_PTR_FOR_FN (cfun))
1185 gimple_range_global (r, name);
1186 return;
1189 // If NAME is invariant, simply return the defining range.
1190 if (!gori ().has_edge_range_p (name))
1192 range_of_def (r, name);
1193 return;
1196 // Look for the on-entry value of name in BB from the cache.
1197 // Otherwise pick up the best available global value.
1198 if (!m_on_entry.get_bb_range (r, name, bb))
1199 if (!range_from_dom (r, name, bb, mode))
1200 range_of_def (r, name);
1203 // Get the range of NAME as it occurs on exit from block BB. Use MODE for
1204 // lookups.
1206 void
1207 ranger_cache::exit_range (vrange &r, tree name, basic_block bb,
1208 enum rfd_mode mode)
1210 if (bb == ENTRY_BLOCK_PTR_FOR_FN (cfun))
1212 gimple_range_global (r, name);
1213 return;
1216 gimple *s = SSA_NAME_DEF_STMT (name);
1217 basic_block def_bb = gimple_bb (s);
1218 if (def_bb == bb)
1219 range_of_def (r, name, bb);
1220 else
1221 entry_range (r, name, bb, mode);
1224 // Get the range of NAME on edge E using MODE, return the result in R.
1225 // Always returns a range and true.
1227 bool
1228 ranger_cache::edge_range (vrange &r, edge e, tree name, enum rfd_mode mode)
1230 exit_range (r, name, e->src, mode);
1231 // If this is not an abnormal edge, check for inferred ranges on exit.
1232 if ((e->flags & (EDGE_EH | EDGE_ABNORMAL)) == 0)
1233 infer_oracle ().maybe_adjust_range (r, name, e->src);
1234 value_range er (TREE_TYPE (name));
1235 if (gori ().edge_range_p (er, e, name, *this))
1236 r.intersect (er);
1237 return true;
1242 // Implement range_of_expr.
1244 bool
1245 ranger_cache::range_of_expr (vrange &r, tree name, gimple *stmt)
1247 if (!gimple_range_ssa_p (name))
1249 get_tree_range (r, name, stmt);
1250 return true;
1253 basic_block bb = gimple_bb (stmt);
1254 gimple *def_stmt = SSA_NAME_DEF_STMT (name);
1255 basic_block def_bb = gimple_bb (def_stmt);
1257 if (bb == def_bb)
1258 range_of_def (r, name, bb);
1259 else
1260 entry_range (r, name, bb, RFD_NONE);
1261 return true;
1265 // Implement range_on_edge. Always return the best available range using
1266 // the current cache values.
1268 bool
1269 ranger_cache::range_on_edge (vrange &r, edge e, tree expr)
1271 if (gimple_range_ssa_p (expr))
1272 return edge_range (r, e, expr, RFD_NONE);
1273 return get_tree_range (r, expr, NULL);
1276 // Return a static range for NAME on entry to basic block BB in R. If
1277 // calc is true, fill any cache entries required between BB and the
1278 // def block for NAME. Otherwise, return false if the cache is empty.
1280 bool
1281 ranger_cache::block_range (vrange &r, basic_block bb, tree name, bool calc)
1283 gcc_checking_assert (gimple_range_ssa_p (name));
1285 // If there are no range calculations anywhere in the IL, global range
1286 // applies everywhere, so don't bother caching it.
1287 if (!gori ().has_edge_range_p (name))
1288 return false;
1290 if (calc)
1292 gimple *def_stmt = SSA_NAME_DEF_STMT (name);
1293 basic_block def_bb = NULL;
1294 if (def_stmt)
1295 def_bb = gimple_bb (def_stmt);
1296 if (!def_bb)
1298 // If we get to the entry block, this better be a default def
1299 // or range_on_entry was called for a block not dominated by
1300 // the def. But it could be also SSA_NAME defined by a statement
1301 // not yet in the IL (such as queued edge insertion), in that case
1302 // just punt.
1303 if (!SSA_NAME_IS_DEFAULT_DEF (name))
1304 return false;
1305 def_bb = ENTRY_BLOCK_PTR_FOR_FN (cfun);
1308 // There is no range on entry for the definition block.
1309 if (def_bb == bb)
1310 return false;
1312 // Otherwise, go figure out what is known in predecessor blocks.
1313 fill_block_cache (name, bb, def_bb);
1314 gcc_checking_assert (m_on_entry.bb_range_p (name, bb));
1316 return m_on_entry.get_bb_range (r, name, bb);
1319 // If there is anything in the propagation update_list, continue
1320 // processing NAME until the list of blocks is empty.
1322 void
1323 ranger_cache::propagate_cache (tree name)
1325 basic_block bb;
1326 edge_iterator ei;
1327 edge e;
1328 tree type = TREE_TYPE (name);
1329 value_range new_range (type);
1330 value_range current_range (type);
1331 value_range e_range (type);
1333 // Process each block by seeing if its calculated range on entry is
1334 // the same as its cached value. If there is a difference, update
1335 // the cache to reflect the new value, and check to see if any
1336 // successors have cache entries which may need to be checked for
1337 // updates.
1339 while (!m_update->empty_p ())
1341 bb = m_update->pop ();
1342 gcc_checking_assert (m_on_entry.bb_range_p (name, bb));
1343 m_on_entry.get_bb_range (current_range, name, bb);
1345 if (DEBUG_RANGE_CACHE)
1347 fprintf (dump_file, "FWD visiting block %d for ", bb->index);
1348 print_generic_expr (dump_file, name, TDF_SLIM);
1349 fprintf (dump_file, " starting range : ");
1350 current_range.dump (dump_file);
1351 fprintf (dump_file, "\n");
1354 // Calculate the "new" range on entry by unioning the pred edges.
1355 new_range.set_undefined ();
1356 FOR_EACH_EDGE (e, ei, bb->preds)
1358 edge_range (e_range, e, name, RFD_READ_ONLY);
1359 if (DEBUG_RANGE_CACHE)
1361 fprintf (dump_file, " edge %d->%d :", e->src->index, bb->index);
1362 e_range.dump (dump_file);
1363 fprintf (dump_file, "\n");
1365 new_range.union_ (e_range);
1366 if (new_range.varying_p ())
1367 break;
1370 // If the range on entry has changed, update it.
1371 if (new_range != current_range)
1373 bool ok_p = m_on_entry.set_bb_range (name, bb, new_range);
1374 // If the cache couldn't set the value, mark it as failed.
1375 if (!ok_p)
1376 m_update->propagation_failed (bb);
1377 if (DEBUG_RANGE_CACHE)
1379 if (!ok_p)
1381 fprintf (dump_file, " Cache failure to store value:");
1382 print_generic_expr (dump_file, name, TDF_SLIM);
1383 fprintf (dump_file, " ");
1385 else
1387 fprintf (dump_file, " Updating range to ");
1388 new_range.dump (dump_file);
1390 fprintf (dump_file, "\n Updating blocks :");
1392 // Mark each successor that has a range to re-check its range
1393 FOR_EACH_EDGE (e, ei, bb->succs)
1394 if (m_on_entry.bb_range_p (name, e->dest))
1396 if (DEBUG_RANGE_CACHE)
1397 fprintf (dump_file, " bb%d",e->dest->index);
1398 m_update->add (e->dest);
1400 if (DEBUG_RANGE_CACHE)
1401 fprintf (dump_file, "\n");
1404 if (DEBUG_RANGE_CACHE)
1406 fprintf (dump_file, "DONE visiting blocks for ");
1407 print_generic_expr (dump_file, name, TDF_SLIM);
1408 fprintf (dump_file, "\n");
1410 m_update->clear_failures ();
1413 // Check to see if an update to the value for NAME in BB has any effect
1414 // on values already in the on-entry cache for successor blocks.
1415 // If it does, update them. Don't visit any blocks which don't have a cache
1416 // entry.
1418 void
1419 ranger_cache::propagate_updated_value (tree name, basic_block bb)
1421 edge e;
1422 edge_iterator ei;
1424 // The update work list should be empty at this point.
1425 gcc_checking_assert (m_update->empty_p ());
1426 gcc_checking_assert (bb);
1428 if (DEBUG_RANGE_CACHE)
1430 fprintf (dump_file, " UPDATE cache for ");
1431 print_generic_expr (dump_file, name, TDF_SLIM);
1432 fprintf (dump_file, " in BB %d : successors : ", bb->index);
1434 FOR_EACH_EDGE (e, ei, bb->succs)
1436 // Only update active cache entries.
1437 if (m_on_entry.bb_range_p (name, e->dest))
1439 m_update->add (e->dest);
1440 if (DEBUG_RANGE_CACHE)
1441 fprintf (dump_file, " UPDATE: bb%d", e->dest->index);
1444 if (!m_update->empty_p ())
1446 if (DEBUG_RANGE_CACHE)
1447 fprintf (dump_file, "\n");
1448 propagate_cache (name);
1450 else
1452 if (DEBUG_RANGE_CACHE)
1453 fprintf (dump_file, " : No updates!\n");
1457 // Make sure that the range-on-entry cache for NAME is set for block BB.
1458 // Work back through the CFG to DEF_BB ensuring the range is calculated
1459 // on the block/edges leading back to that point.
1461 void
1462 ranger_cache::fill_block_cache (tree name, basic_block bb, basic_block def_bb)
1464 edge_iterator ei;
1465 edge e;
1466 tree type = TREE_TYPE (name);
1467 value_range block_result (type);
1468 value_range undefined (type);
1470 // At this point we shouldn't be looking at the def, entry block.
1471 gcc_checking_assert (bb != def_bb && bb != ENTRY_BLOCK_PTR_FOR_FN (cfun));
1472 unsigned start_length = m_workback.length ();
1474 // If the block cache is set, then we've already visited this block.
1475 if (m_on_entry.bb_range_p (name, bb))
1476 return;
1478 if (DEBUG_RANGE_CACHE)
1480 fprintf (dump_file, "\n");
1481 print_generic_expr (dump_file, name, TDF_SLIM);
1482 fprintf (dump_file, " : ");
1485 // Check if a dominators can supply the range.
1486 if (range_from_dom (block_result, name, bb, RFD_FILL))
1488 if (DEBUG_RANGE_CACHE)
1490 fprintf (dump_file, "Filled from dominator! : ");
1491 block_result.dump (dump_file);
1492 fprintf (dump_file, "\n");
1494 // See if any equivalences can refine it.
1495 // PR 109462, like 108139 below, a one way equivalence introduced
1496 // by a PHI node can also be through the definition side. Disallow it.
1497 tree equiv_name;
1498 relation_kind rel;
1499 int prec = TYPE_PRECISION (type);
1500 // If there are too many basic blocks, do not attempt to process
1501 // equivalencies.
1502 if (last_basic_block_for_fn (cfun) > param_vrp_sparse_threshold)
1504 m_on_entry.set_bb_range (name, bb, block_result);
1505 gcc_checking_assert (m_workback.length () == start_length);
1506 return;
1508 FOR_EACH_PARTIAL_AND_FULL_EQUIV (m_relation, bb, name, equiv_name, rel)
1510 basic_block equiv_bb = gimple_bb (SSA_NAME_DEF_STMT (equiv_name));
1512 // Ignore partial equivs that are smaller than this object.
1513 if (rel != VREL_EQ && prec > pe_to_bits (rel))
1514 continue;
1516 // Check if the equiv has any ranges calculated.
1517 if (!gori ().has_edge_range_p (equiv_name))
1518 continue;
1520 // Check if the equiv definition dominates this block
1521 if (equiv_bb == bb ||
1522 (equiv_bb && !dominated_by_p (CDI_DOMINATORS, bb, equiv_bb)))
1523 continue;
1525 if (DEBUG_RANGE_CACHE)
1527 if (rel == VREL_EQ)
1528 fprintf (dump_file, "Checking Equivalence (");
1529 else
1530 fprintf (dump_file, "Checking Partial equiv (");
1531 print_relation (dump_file, rel);
1532 fprintf (dump_file, ") ");
1533 print_generic_expr (dump_file, equiv_name, TDF_SLIM);
1534 fprintf (dump_file, "\n");
1536 value_range equiv_range (TREE_TYPE (equiv_name));
1537 if (range_from_dom (equiv_range, equiv_name, bb, RFD_READ_ONLY))
1539 if (rel != VREL_EQ)
1540 range_cast (equiv_range, type);
1541 else
1542 adjust_equivalence_range (equiv_range);
1544 if (block_result.intersect (equiv_range))
1546 if (DEBUG_RANGE_CACHE)
1548 if (rel == VREL_EQ)
1549 fprintf (dump_file, "Equivalence update! : ");
1550 else
1551 fprintf (dump_file, "Partial equiv update! : ");
1552 print_generic_expr (dump_file, equiv_name, TDF_SLIM);
1553 fprintf (dump_file, " has range : ");
1554 equiv_range.dump (dump_file);
1555 fprintf (dump_file, " refining range to :");
1556 block_result.dump (dump_file);
1557 fprintf (dump_file, "\n");
1563 m_on_entry.set_bb_range (name, bb, block_result);
1564 gcc_checking_assert (m_workback.length () == start_length);
1565 return;
1568 // Visit each block back to the DEF. Initialize each one to UNDEFINED.
1569 // m_visited at the end will contain all the blocks that we needed to set
1570 // the range_on_entry cache for.
1571 m_workback.safe_push (bb);
1572 undefined.set_undefined ();
1573 m_on_entry.set_bb_range (name, bb, undefined);
1574 gcc_checking_assert (m_update->empty_p ());
1576 while (m_workback.length () > start_length)
1578 basic_block node = m_workback.pop ();
1579 if (DEBUG_RANGE_CACHE)
1581 fprintf (dump_file, "BACK visiting block %d for ", node->index);
1582 print_generic_expr (dump_file, name, TDF_SLIM);
1583 fprintf (dump_file, "\n");
1586 FOR_EACH_EDGE (e, ei, node->preds)
1588 basic_block pred = e->src;
1589 value_range r (TREE_TYPE (name));
1591 if (DEBUG_RANGE_CACHE)
1592 fprintf (dump_file, " %d->%d ",e->src->index, e->dest->index);
1594 // If the pred block is the def block add this BB to update list.
1595 if (pred == def_bb)
1597 m_update->add (node);
1598 continue;
1601 // If the pred is entry but NOT def, then it is used before
1602 // defined, it'll get set to [] and no need to update it.
1603 if (pred == ENTRY_BLOCK_PTR_FOR_FN (cfun))
1605 if (DEBUG_RANGE_CACHE)
1606 fprintf (dump_file, "entry: bail.");
1607 continue;
1610 // Regardless of whether we have visited pred or not, if the
1611 // pred has inferred ranges, revisit this block.
1612 // Don't search the DOM tree.
1613 if (infer_oracle ().has_range_p (pred, name))
1615 if (DEBUG_RANGE_CACHE)
1616 fprintf (dump_file, "Inferred range: update ");
1617 m_update->add (node);
1620 // If the pred block already has a range, or if it can contribute
1621 // something new. Ie, the edge generates a range of some sort.
1622 if (m_on_entry.get_bb_range (r, name, pred))
1624 if (DEBUG_RANGE_CACHE)
1626 fprintf (dump_file, "has cache, ");
1627 r.dump (dump_file);
1628 fprintf (dump_file, ", ");
1630 if (!r.undefined_p () || gori ().has_edge_range_p (name, e))
1632 m_update->add (node);
1633 if (DEBUG_RANGE_CACHE)
1634 fprintf (dump_file, "update. ");
1636 continue;
1639 if (DEBUG_RANGE_CACHE)
1640 fprintf (dump_file, "pushing undefined pred block.\n");
1641 // If the pred hasn't been visited (has no range), add it to
1642 // the list.
1643 gcc_checking_assert (!m_on_entry.bb_range_p (name, pred));
1644 m_on_entry.set_bb_range (name, pred, undefined);
1645 m_workback.safe_push (pred);
1649 if (DEBUG_RANGE_CACHE)
1650 fprintf (dump_file, "\n");
1652 // Now fill in the marked blocks with values.
1653 propagate_cache (name);
1654 if (DEBUG_RANGE_CACHE)
1655 fprintf (dump_file, " Propagation update done.\n");
1658 // Resolve the range of BB if the dominators range is R by calculating incoming
1659 // edges to this block. All lead back to the dominator so should be cheap.
1660 // The range for BB is set and returned in R.
1662 void
1663 ranger_cache::resolve_dom (vrange &r, tree name, basic_block bb)
1665 basic_block def_bb = gimple_bb (SSA_NAME_DEF_STMT (name));
1666 basic_block dom_bb = get_immediate_dominator (CDI_DOMINATORS, bb);
1668 // if it doesn't already have a value, store the incoming range.
1669 if (!m_on_entry.bb_range_p (name, dom_bb) && def_bb != dom_bb)
1671 // If the range can't be store, don't try to accumulate
1672 // the range in PREV_BB due to excessive recalculations.
1673 if (!m_on_entry.set_bb_range (name, dom_bb, r))
1674 return;
1676 // With the dominator set, we should be able to cheaply query
1677 // each incoming edge now and accumulate the results.
1678 r.set_undefined ();
1679 edge e;
1680 edge_iterator ei;
1681 value_range er (TREE_TYPE (name));
1682 FOR_EACH_EDGE (e, ei, bb->preds)
1684 // If the predecessor is dominated by this block, then there is a back
1685 // edge, and won't provide anything useful. We'll actually end up with
1686 // VARYING as we will not resolve this node.
1687 if (dominated_by_p (CDI_DOMINATORS, e->src, bb))
1688 continue;
1689 edge_range (er, e, name, RFD_READ_ONLY);
1690 r.union_ (er);
1692 // Set the cache in PREV_BB so it is not calculated again.
1693 m_on_entry.set_bb_range (name, bb, r);
1696 // Get the range of NAME from dominators of BB and return it in R. Search the
1697 // dominator tree based on MODE.
1699 bool
1700 ranger_cache::range_from_dom (vrange &r, tree name, basic_block start_bb,
1701 enum rfd_mode mode)
1703 if (mode == RFD_NONE || !dom_info_available_p (CDI_DOMINATORS))
1704 return false;
1706 // Search back to the definition block or entry block.
1707 basic_block def_bb = gimple_bb (SSA_NAME_DEF_STMT (name));
1708 if (def_bb == NULL)
1709 def_bb = ENTRY_BLOCK_PTR_FOR_FN (cfun);
1711 basic_block bb;
1712 basic_block prev_bb = start_bb;
1714 // Track any inferred ranges seen.
1715 value_range infer (TREE_TYPE (name));
1716 infer.set_varying (TREE_TYPE (name));
1718 // Range on entry to the DEF block should not be queried.
1719 gcc_checking_assert (start_bb != def_bb);
1720 unsigned start_limit = m_workback.length ();
1722 // Default value is global range.
1723 get_global_range (r, name);
1725 // The dominator of EXIT_BLOCK doesn't seem to be set, so at least handle
1726 // the common single exit cases.
1727 if (start_bb == EXIT_BLOCK_PTR_FOR_FN (cfun) && single_pred_p (start_bb))
1728 bb = single_pred_edge (start_bb)->src;
1729 else
1730 bb = get_immediate_dominator (CDI_DOMINATORS, start_bb);
1732 // Search until a value is found, pushing blocks which may need calculating.
1733 for ( ; bb; prev_bb = bb, bb = get_immediate_dominator (CDI_DOMINATORS, bb))
1735 // Accumulate any block exit inferred ranges.
1736 infer_oracle ().maybe_adjust_range (infer, name, bb);
1738 // This block has an outgoing range.
1739 if (gori ().has_edge_range_p (name, bb))
1740 m_workback.safe_push (prev_bb);
1741 else
1743 // Normally join blocks don't carry any new range information on
1744 // incoming edges. If the first incoming edge to this block does
1745 // generate a range, calculate the ranges if all incoming edges
1746 // are also dominated by the dominator. (Avoids backedges which
1747 // will break the rule of moving only upward in the dominator tree).
1748 // If the first pred does not generate a range, then we will be
1749 // using the dominator range anyway, so that's all the check needed.
1750 if (EDGE_COUNT (prev_bb->preds) > 1
1751 && gori ().has_edge_range_p (name, EDGE_PRED (prev_bb, 0)->src))
1753 edge e;
1754 edge_iterator ei;
1755 bool all_dom = true;
1756 FOR_EACH_EDGE (e, ei, prev_bb->preds)
1757 if (e->src != bb
1758 && !dominated_by_p (CDI_DOMINATORS, e->src, bb))
1760 all_dom = false;
1761 break;
1763 if (all_dom)
1764 m_workback.safe_push (prev_bb);
1768 if (def_bb == bb)
1769 break;
1771 if (m_on_entry.get_bb_range (r, name, bb))
1772 break;
1775 if (DEBUG_RANGE_CACHE)
1777 fprintf (dump_file, "CACHE: BB %d DOM query for ", start_bb->index);
1778 print_generic_expr (dump_file, name, TDF_SLIM);
1779 fprintf (dump_file, ", found ");
1780 r.dump (dump_file);
1781 if (bb)
1782 fprintf (dump_file, " at BB%d\n", bb->index);
1783 else
1784 fprintf (dump_file, " at function top\n");
1787 // Now process any blocks wit incoming edges that nay have adjustments.
1788 while (m_workback.length () > start_limit)
1790 value_range er (TREE_TYPE (name));
1791 prev_bb = m_workback.pop ();
1792 if (!single_pred_p (prev_bb))
1794 // Non single pred means we need to cache a value in the dominator
1795 // so we can cheaply calculate incoming edges to this block, and
1796 // then store the resulting value. If processing mode is not
1797 // RFD_FILL, then the cache cant be stored to, so don't try.
1798 // Otherwise this becomes a quadratic timed calculation.
1799 if (mode == RFD_FILL)
1800 resolve_dom (r, name, prev_bb);
1801 continue;
1804 edge e = single_pred_edge (prev_bb);
1805 bb = e->src;
1806 if (gori ().edge_range_p (er, e, name, *this))
1808 r.intersect (er);
1809 // If this is a normal edge, apply any inferred ranges.
1810 if ((e->flags & (EDGE_EH | EDGE_ABNORMAL)) == 0)
1811 infer_oracle ().maybe_adjust_range (r, name, bb);
1813 if (DEBUG_RANGE_CACHE)
1815 fprintf (dump_file, "CACHE: Adjusted edge range for %d->%d : ",
1816 bb->index, prev_bb->index);
1817 r.dump (dump_file);
1818 fprintf (dump_file, "\n");
1823 // Apply non-null if appropriate.
1824 if (!has_abnormal_call_or_eh_pred_edge_p (start_bb))
1825 r.intersect (infer);
1827 if (DEBUG_RANGE_CACHE)
1829 fprintf (dump_file, "CACHE: Range for DOM returns : ");
1830 r.dump (dump_file);
1831 fprintf (dump_file, "\n");
1833 return true;
1836 // This routine will register an inferred value in block BB, and possibly
1837 // update the on-entry cache if appropriate.
1839 void
1840 ranger_cache::register_inferred_value (const vrange &ir, tree name,
1841 basic_block bb)
1843 value_range r (TREE_TYPE (name));
1844 if (!m_on_entry.get_bb_range (r, name, bb))
1845 exit_range (r, name, bb, RFD_READ_ONLY);
1846 if (r.intersect (ir))
1848 m_on_entry.set_bb_range (name, bb, r);
1849 // If this range was invariant before, remove invariant.
1850 if (!gori ().has_edge_range_p (name))
1851 gori_ssa ()->set_range_invariant (name, false);
1855 // This routine is used during a block walk to adjust any inferred ranges
1856 // of operands on stmt S.
1858 void
1859 ranger_cache::apply_inferred_ranges (gimple *s)
1861 bool update = true;
1863 basic_block bb = gimple_bb (s);
1864 gimple_infer_range infer(s);
1865 if (infer.num () == 0)
1866 return;
1868 // Do not update the on-entry cache for block ending stmts.
1869 if (stmt_ends_bb_p (s))
1871 edge_iterator ei;
1872 edge e;
1873 FOR_EACH_EDGE (e, ei, gimple_bb (s)->succs)
1874 if (!(e->flags & (EDGE_ABNORMAL|EDGE_EH)))
1875 break;
1876 if (e == NULL)
1877 update = false;
1880 infer_oracle ().add_ranges (s, infer);
1881 if (update)
1882 for (unsigned x = 0; x < infer.num (); x++)
1883 register_inferred_value (infer.range (x), infer.name (x), bb);