1 //===- LiveInterval.cpp - Live Interval Representation --------------------===//
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
7 //===----------------------------------------------------------------------===//
9 // This file implements the LiveRange and LiveInterval classes. Given some
10 // numbering of each the machine instructions an interval [i, j) is said to be a
11 // live range for register v if there is no instruction with number j' >= j
12 // such that v is live at j' and there is no instruction with number i' < i such
13 // that v is live at i'. In this implementation ranges can have holes,
14 // i.e. a range might look like [1,20), [50,65), [1000,1001). Each
15 // individual segment is represented as an instance of LiveRange::Segment,
16 // and the whole range is represented as an instance of LiveRange.
18 //===----------------------------------------------------------------------===//
20 #include "llvm/CodeGen/LiveInterval.h"
21 #include "LiveRangeUtils.h"
22 #include "RegisterCoalescer.h"
23 #include "llvm/ADT/ArrayRef.h"
24 #include "llvm/ADT/STLExtras.h"
25 #include "llvm/ADT/SmallPtrSet.h"
26 #include "llvm/ADT/SmallVector.h"
27 #include "llvm/ADT/iterator_range.h"
28 #include "llvm/CodeGen/LiveIntervals.h"
29 #include "llvm/CodeGen/MachineBasicBlock.h"
30 #include "llvm/CodeGen/MachineInstr.h"
31 #include "llvm/CodeGen/MachineOperand.h"
32 #include "llvm/CodeGen/MachineRegisterInfo.h"
33 #include "llvm/CodeGen/SlotIndexes.h"
34 #include "llvm/CodeGen/TargetRegisterInfo.h"
35 #include "llvm/Config/llvm-config.h"
36 #include "llvm/MC/LaneBitmask.h"
37 #include "llvm/Support/Compiler.h"
38 #include "llvm/Support/Debug.h"
39 #include "llvm/Support/raw_ostream.h"
50 //===----------------------------------------------------------------------===//
51 // Implementation of various methods necessary for calculation of live ranges.
52 // The implementation of the methods abstracts from the concrete type of the
53 // segment collection.
55 // Implementation of the class follows the Template design pattern. The base
56 // class contains generic algorithms that call collection-specific methods,
57 // which are provided in concrete subclasses. In order to avoid virtual calls
58 // these methods are provided by means of C++ template instantiation.
59 // The base class calls the methods of the subclass through method impl(),
60 // which casts 'this' pointer to the type of the subclass.
62 //===----------------------------------------------------------------------===//
64 template <typename ImplT
, typename IteratorT
, typename CollectionT
>
65 class CalcLiveRangeUtilBase
{
70 CalcLiveRangeUtilBase(LiveRange
*LR
) : LR(LR
) {}
73 using Segment
= LiveRange::Segment
;
74 using iterator
= IteratorT
;
76 /// A counterpart of LiveRange::createDeadDef: Make sure the range has a
77 /// value defined at @p Def.
78 /// If @p ForVNI is null, and there is no value defined at @p Def, a new
79 /// value will be allocated using @p VNInfoAllocator.
80 /// If @p ForVNI is null, the return value is the value defined at @p Def,
81 /// either a pre-existing one, or the one newly created.
82 /// If @p ForVNI is not null, then @p Def should be the location where
83 /// @p ForVNI is defined. If the range does not have a value defined at
84 /// @p Def, the value @p ForVNI will be used instead of allocating a new
85 /// one. If the range already has a value defined at @p Def, it must be
86 /// same as @p ForVNI. In either case, @p ForVNI will be the return value.
87 VNInfo
*createDeadDef(SlotIndex Def
, VNInfo::Allocator
*VNInfoAllocator
,
89 assert(!Def
.isDead() && "Cannot define a value at the dead slot");
90 assert((!ForVNI
|| ForVNI
->def
== Def
) &&
91 "If ForVNI is specified, it must match Def");
92 iterator I
= impl().find(Def
);
93 if (I
== segments().end()) {
94 VNInfo
*VNI
= ForVNI
? ForVNI
: LR
->getNextValue(Def
, *VNInfoAllocator
);
95 impl().insertAtEnd(Segment(Def
, Def
.getDeadSlot(), VNI
));
99 Segment
*S
= segmentAt(I
);
100 if (SlotIndex::isSameInstr(Def
, S
->start
)) {
101 assert((!ForVNI
|| ForVNI
== S
->valno
) && "Value number mismatch");
102 assert(S
->valno
->def
== S
->start
&& "Inconsistent existing value def");
104 // It is possible to have both normal and early-clobber defs of the same
105 // register on an instruction. It doesn't make a lot of sense, but it is
106 // possible to specify in inline assembly.
108 // Just convert everything to early-clobber.
109 Def
= std::min(Def
, S
->start
);
111 S
->start
= S
->valno
->def
= Def
;
114 assert(SlotIndex::isEarlierInstr(Def
, S
->start
) && "Already live at def");
115 VNInfo
*VNI
= ForVNI
? ForVNI
: LR
->getNextValue(Def
, *VNInfoAllocator
);
116 segments().insert(I
, Segment(Def
, Def
.getDeadSlot(), VNI
));
120 VNInfo
*extendInBlock(SlotIndex StartIdx
, SlotIndex Use
) {
121 if (segments().empty())
124 impl().findInsertPos(Segment(Use
.getPrevSlot(), Use
, nullptr));
125 if (I
== segments().begin())
128 if (I
->end
<= StartIdx
)
131 extendSegmentEndTo(I
, Use
);
135 std::pair
<VNInfo
*,bool> extendInBlock(ArrayRef
<SlotIndex
> Undefs
,
136 SlotIndex StartIdx
, SlotIndex Use
) {
137 if (segments().empty())
138 return std::make_pair(nullptr, false);
139 SlotIndex BeforeUse
= Use
.getPrevSlot();
140 iterator I
= impl().findInsertPos(Segment(BeforeUse
, Use
, nullptr));
141 if (I
== segments().begin())
142 return std::make_pair(nullptr, LR
->isUndefIn(Undefs
, StartIdx
, BeforeUse
));
144 if (I
->end
<= StartIdx
)
145 return std::make_pair(nullptr, LR
->isUndefIn(Undefs
, StartIdx
, BeforeUse
));
147 if (LR
->isUndefIn(Undefs
, I
->end
, BeforeUse
))
148 return std::make_pair(nullptr, true);
149 extendSegmentEndTo(I
, Use
);
151 return std::make_pair(I
->valno
, false);
154 /// This method is used when we want to extend the segment specified
155 /// by I to end at the specified endpoint. To do this, we should
156 /// merge and eliminate all segments that this will overlap
157 /// with. The iterator is not invalidated.
158 void extendSegmentEndTo(iterator I
, SlotIndex NewEnd
) {
159 assert(I
!= segments().end() && "Not a valid segment!");
160 Segment
*S
= segmentAt(I
);
161 VNInfo
*ValNo
= I
->valno
;
163 // Search for the first segment that we can't merge with.
164 iterator MergeTo
= std::next(I
);
165 for (; MergeTo
!= segments().end() && NewEnd
>= MergeTo
->end
; ++MergeTo
)
166 assert(MergeTo
->valno
== ValNo
&& "Cannot merge with differing values!");
168 // If NewEnd was in the middle of a segment, make sure to get its endpoint.
169 S
->end
= std::max(NewEnd
, std::prev(MergeTo
)->end
);
171 // If the newly formed segment now touches the segment after it and if they
172 // have the same value number, merge the two segments into one segment.
173 if (MergeTo
!= segments().end() && MergeTo
->start
<= I
->end
&&
174 MergeTo
->valno
== ValNo
) {
175 S
->end
= MergeTo
->end
;
179 // Erase any dead segments.
180 segments().erase(std::next(I
), MergeTo
);
183 /// This method is used when we want to extend the segment specified
184 /// by I to start at the specified endpoint. To do this, we should
185 /// merge and eliminate all segments that this will overlap with.
186 iterator
extendSegmentStartTo(iterator I
, SlotIndex NewStart
) {
187 assert(I
!= segments().end() && "Not a valid segment!");
188 Segment
*S
= segmentAt(I
);
189 VNInfo
*ValNo
= I
->valno
;
191 // Search for the first segment that we can't merge with.
192 iterator MergeTo
= I
;
194 if (MergeTo
== segments().begin()) {
196 segments().erase(MergeTo
, I
);
199 assert(MergeTo
->valno
== ValNo
&& "Cannot merge with differing values!");
201 } while (NewStart
<= MergeTo
->start
);
203 // If we start in the middle of another segment, just delete a range and
204 // extend that segment.
205 if (MergeTo
->end
>= NewStart
&& MergeTo
->valno
== ValNo
) {
206 segmentAt(MergeTo
)->end
= S
->end
;
208 // Otherwise, extend the segment right after.
210 Segment
*MergeToSeg
= segmentAt(MergeTo
);
211 MergeToSeg
->start
= NewStart
;
212 MergeToSeg
->end
= S
->end
;
215 segments().erase(std::next(MergeTo
), std::next(I
));
219 iterator
addSegment(Segment S
) {
220 SlotIndex Start
= S
.start
, End
= S
.end
;
221 iterator I
= impl().findInsertPos(S
);
223 // If the inserted segment starts in the middle or right at the end of
224 // another segment, just extend that segment to contain the segment of S.
225 if (I
!= segments().begin()) {
226 iterator B
= std::prev(I
);
227 if (S
.valno
== B
->valno
) {
228 if (B
->start
<= Start
&& B
->end
>= Start
) {
229 extendSegmentEndTo(B
, End
);
233 // Check to make sure that we are not overlapping two live segments with
234 // different valno's.
235 assert(B
->end
<= Start
&&
236 "Cannot overlap two segments with differing ValID's"
237 " (did you def the same reg twice in a MachineInstr?)");
241 // Otherwise, if this segment ends in the middle of, or right next
242 // to, another segment, merge it into that segment.
243 if (I
!= segments().end()) {
244 if (S
.valno
== I
->valno
) {
245 if (I
->start
<= End
) {
246 I
= extendSegmentStartTo(I
, Start
);
248 // If S is a complete superset of a segment, we may need to grow its
251 extendSegmentEndTo(I
, End
);
255 // Check to make sure that we are not overlapping two live segments with
256 // different valno's.
257 assert(I
->start
>= End
&&
258 "Cannot overlap two segments with differing ValID's");
262 // Otherwise, this is just a new segment that doesn't interact with
265 return segments().insert(I
, S
);
269 ImplT
&impl() { return *static_cast<ImplT
*>(this); }
271 CollectionT
&segments() { return impl().segmentsColl(); }
273 Segment
*segmentAt(iterator I
) { return const_cast<Segment
*>(&(*I
)); }
276 //===----------------------------------------------------------------------===//
277 // Instantiation of the methods for calculation of live ranges
278 // based on a segment vector.
279 //===----------------------------------------------------------------------===//
281 class CalcLiveRangeUtilVector
;
282 using CalcLiveRangeUtilVectorBase
=
283 CalcLiveRangeUtilBase
<CalcLiveRangeUtilVector
, LiveRange::iterator
,
284 LiveRange::Segments
>;
286 class CalcLiveRangeUtilVector
: public CalcLiveRangeUtilVectorBase
{
288 CalcLiveRangeUtilVector(LiveRange
*LR
) : CalcLiveRangeUtilVectorBase(LR
) {}
291 friend CalcLiveRangeUtilVectorBase
;
293 LiveRange::Segments
&segmentsColl() { return LR
->segments
; }
295 void insertAtEnd(const Segment
&S
) { LR
->segments
.push_back(S
); }
297 iterator
find(SlotIndex Pos
) { return LR
->find(Pos
); }
299 iterator
findInsertPos(Segment S
) { return llvm::upper_bound(*LR
, S
.start
); }
302 //===----------------------------------------------------------------------===//
303 // Instantiation of the methods for calculation of live ranges
304 // based on a segment set.
305 //===----------------------------------------------------------------------===//
307 class CalcLiveRangeUtilSet
;
308 using CalcLiveRangeUtilSetBase
=
309 CalcLiveRangeUtilBase
<CalcLiveRangeUtilSet
, LiveRange::SegmentSet::iterator
,
310 LiveRange::SegmentSet
>;
312 class CalcLiveRangeUtilSet
: public CalcLiveRangeUtilSetBase
{
314 CalcLiveRangeUtilSet(LiveRange
*LR
) : CalcLiveRangeUtilSetBase(LR
) {}
317 friend CalcLiveRangeUtilSetBase
;
319 LiveRange::SegmentSet
&segmentsColl() { return *LR
->segmentSet
; }
321 void insertAtEnd(const Segment
&S
) {
322 LR
->segmentSet
->insert(LR
->segmentSet
->end(), S
);
325 iterator
find(SlotIndex Pos
) {
327 LR
->segmentSet
->upper_bound(Segment(Pos
, Pos
.getNextSlot(), nullptr));
328 if (I
== LR
->segmentSet
->begin())
330 iterator PrevI
= std::prev(I
);
331 if (Pos
< (*PrevI
).end
)
336 iterator
findInsertPos(Segment S
) {
337 iterator I
= LR
->segmentSet
->upper_bound(S
);
338 if (I
!= LR
->segmentSet
->end() && !(S
.start
< *I
))
344 } // end anonymous namespace
346 //===----------------------------------------------------------------------===//
348 //===----------------------------------------------------------------------===//
350 LiveRange::iterator
LiveRange::find(SlotIndex Pos
) {
351 return llvm::partition_point(*this,
352 [&](const Segment
&X
) { return X
.end
<= Pos
; });
355 VNInfo
*LiveRange::createDeadDef(SlotIndex Def
, VNInfo::Allocator
&VNIAlloc
) {
356 // Use the segment set, if it is available.
357 if (segmentSet
!= nullptr)
358 return CalcLiveRangeUtilSet(this).createDeadDef(Def
, &VNIAlloc
, nullptr);
359 // Otherwise use the segment vector.
360 return CalcLiveRangeUtilVector(this).createDeadDef(Def
, &VNIAlloc
, nullptr);
363 VNInfo
*LiveRange::createDeadDef(VNInfo
*VNI
) {
364 // Use the segment set, if it is available.
365 if (segmentSet
!= nullptr)
366 return CalcLiveRangeUtilSet(this).createDeadDef(VNI
->def
, nullptr, VNI
);
367 // Otherwise use the segment vector.
368 return CalcLiveRangeUtilVector(this).createDeadDef(VNI
->def
, nullptr, VNI
);
371 // overlaps - Return true if the intersection of the two live ranges is
374 // An example for overlaps():
378 // 8: C = A + B ;; last use of A
380 // The live ranges should look like:
386 // A->overlaps(C) should return false since we want to be able to join
389 bool LiveRange::overlapsFrom(const LiveRange
& other
,
390 const_iterator StartPos
) const {
391 assert(!empty() && "empty range");
392 const_iterator i
= begin();
393 const_iterator ie
= end();
394 const_iterator j
= StartPos
;
395 const_iterator je
= other
.end();
397 assert((StartPos
->start
<= i
->start
|| StartPos
== other
.begin()) &&
398 StartPos
!= other
.end() && "Bogus start position hint!");
400 if (i
->start
< j
->start
) {
401 i
= std::upper_bound(i
, ie
, j
->start
);
402 if (i
!= begin()) --i
;
403 } else if (j
->start
< i
->start
) {
405 if (StartPos
!= other
.end() && StartPos
->start
<= i
->start
) {
406 assert(StartPos
< other
.end() && i
< end());
407 j
= std::upper_bound(j
, je
, i
->start
);
408 if (j
!= other
.begin()) --j
;
414 if (j
== je
) return false;
417 if (i
->start
> j
->start
) {
422 if (i
->end
> j
->start
)
430 bool LiveRange::overlaps(const LiveRange
&Other
, const CoalescerPair
&CP
,
431 const SlotIndexes
&Indexes
) const {
432 assert(!empty() && "empty range");
436 // Use binary searches to find initial positions.
437 const_iterator I
= find(Other
.beginIndex());
438 const_iterator IE
= end();
441 const_iterator J
= Other
.find(I
->start
);
442 const_iterator JE
= Other
.end();
447 // J has just been advanced to satisfy:
448 assert(J
->end
> I
->start
);
449 // Check for an overlap.
450 if (J
->start
< I
->end
) {
451 // I and J are overlapping. Find the later start.
452 SlotIndex Def
= std::max(I
->start
, J
->start
);
453 // Allow the overlap if Def is a coalescable copy.
455 !CP
.isCoalescable(Indexes
.getInstructionFromIndex(Def
)))
458 // Advance the iterator that ends first to check for more overlaps.
459 if (J
->end
> I
->end
) {
463 // Advance J until J->end > I->start.
467 while (J
->end
<= I
->start
);
471 /// overlaps - Return true if the live range overlaps an interval specified
473 bool LiveRange::overlaps(SlotIndex Start
, SlotIndex End
) const {
474 assert(Start
< End
&& "Invalid range");
475 const_iterator I
= lower_bound(*this, End
);
476 return I
!= begin() && (--I
)->end
> Start
;
479 bool LiveRange::covers(const LiveRange
&Other
) const {
481 return Other
.empty();
483 const_iterator I
= begin();
484 for (const Segment
&O
: Other
.segments
) {
485 I
= advanceTo(I
, O
.start
);
486 if (I
== end() || I
->start
> O
.start
)
489 // Check adjacent live segments and see if we can get behind O.end.
490 while (I
->end
< O
.end
) {
491 const_iterator Last
= I
;
492 // Get next segment and abort if it was not adjacent.
494 if (I
== end() || Last
->end
!= I
->start
)
501 /// ValNo is dead, remove it. If it is the largest value number, just nuke it
502 /// (and any other deleted values neighboring it), otherwise mark it as ~1U so
503 /// it can be nuked later.
504 void LiveRange::markValNoForDeletion(VNInfo
*ValNo
) {
505 if (ValNo
->id
== getNumValNums()-1) {
508 } while (!valnos
.empty() && valnos
.back()->isUnused());
514 /// RenumberValues - Renumber all values in order of appearance and delete the
515 /// remaining unused values.
516 void LiveRange::RenumberValues() {
517 SmallPtrSet
<VNInfo
*, 8> Seen
;
519 for (const Segment
&S
: segments
) {
520 VNInfo
*VNI
= S
.valno
;
521 if (!Seen
.insert(VNI
).second
)
523 assert(!VNI
->isUnused() && "Unused valno used by live segment");
524 VNI
->id
= (unsigned)valnos
.size();
525 valnos
.push_back(VNI
);
529 void LiveRange::addSegmentToSet(Segment S
) {
530 CalcLiveRangeUtilSet(this).addSegment(S
);
533 LiveRange::iterator
LiveRange::addSegment(Segment S
) {
534 // Use the segment set, if it is available.
535 if (segmentSet
!= nullptr) {
539 // Otherwise use the segment vector.
540 return CalcLiveRangeUtilVector(this).addSegment(S
);
543 void LiveRange::append(const Segment S
) {
544 // Check that the segment belongs to the back of the list.
545 assert(segments
.empty() || segments
.back().end
<= S
.start
);
546 segments
.push_back(S
);
549 std::pair
<VNInfo
*,bool> LiveRange::extendInBlock(ArrayRef
<SlotIndex
> Undefs
,
550 SlotIndex StartIdx
, SlotIndex Kill
) {
551 // Use the segment set, if it is available.
552 if (segmentSet
!= nullptr)
553 return CalcLiveRangeUtilSet(this).extendInBlock(Undefs
, StartIdx
, Kill
);
554 // Otherwise use the segment vector.
555 return CalcLiveRangeUtilVector(this).extendInBlock(Undefs
, StartIdx
, Kill
);
558 VNInfo
*LiveRange::extendInBlock(SlotIndex StartIdx
, SlotIndex Kill
) {
559 // Use the segment set, if it is available.
560 if (segmentSet
!= nullptr)
561 return CalcLiveRangeUtilSet(this).extendInBlock(StartIdx
, Kill
);
562 // Otherwise use the segment vector.
563 return CalcLiveRangeUtilVector(this).extendInBlock(StartIdx
, Kill
);
566 void LiveRange::removeSegment(SlotIndex Start
, SlotIndex End
,
567 bool RemoveDeadValNo
) {
568 // Find the Segment containing this span.
569 iterator I
= find(Start
);
571 // No Segment found, so nothing to do.
575 assert(I
->containsInterval(Start
, End
)
576 && "Segment is not entirely in range!");
578 // If the span we are removing is at the start of the Segment, adjust it.
579 VNInfo
*ValNo
= I
->valno
;
580 if (I
->start
== Start
) {
582 segments
.erase(I
); // Removed the whole Segment.
585 removeValNoIfDead(ValNo
);
591 // Otherwise if the span we are removing is at the end of the Segment,
592 // adjust the other way.
598 // Otherwise, we are splitting the Segment into two pieces.
599 SlotIndex OldEnd
= I
->end
;
600 I
->end
= Start
; // Trim the old segment.
602 // Insert the new one.
603 segments
.insert(std::next(I
), Segment(End
, OldEnd
, ValNo
));
606 LiveRange::iterator
LiveRange::removeSegment(iterator I
, bool RemoveDeadValNo
) {
607 VNInfo
*ValNo
= I
->valno
;
608 I
= segments
.erase(I
);
610 removeValNoIfDead(ValNo
);
614 void LiveRange::removeValNoIfDead(VNInfo
*ValNo
) {
615 if (none_of(*this, [=](const Segment
&S
) { return S
.valno
== ValNo
; }))
616 markValNoForDeletion(ValNo
);
619 /// removeValNo - Remove all the segments defined by the specified value#.
620 /// Also remove the value# from value# list.
621 void LiveRange::removeValNo(VNInfo
*ValNo
) {
623 llvm::erase_if(segments
,
624 [ValNo
](const Segment
&S
) { return S
.valno
== ValNo
; });
625 // Now that ValNo is dead, remove it.
626 markValNoForDeletion(ValNo
);
629 void LiveRange::join(LiveRange
&Other
,
630 const int *LHSValNoAssignments
,
631 const int *RHSValNoAssignments
,
632 SmallVectorImpl
<VNInfo
*> &NewVNInfo
) {
636 // Determine if any of our values are mapped. This is uncommon, so we want
637 // to avoid the range scan if not.
638 bool MustMapCurValNos
= false;
639 unsigned NumVals
= getNumValNums();
640 unsigned NumNewVals
= NewVNInfo
.size();
641 for (unsigned i
= 0; i
!= NumVals
; ++i
) {
642 unsigned LHSValID
= LHSValNoAssignments
[i
];
644 (NewVNInfo
[LHSValID
] && NewVNInfo
[LHSValID
] != getValNumInfo(i
))) {
645 MustMapCurValNos
= true;
650 // If we have to apply a mapping to our base range assignment, rewrite it now.
651 if (MustMapCurValNos
&& !empty()) {
652 // Map the first live range.
654 iterator OutIt
= begin();
655 OutIt
->valno
= NewVNInfo
[LHSValNoAssignments
[OutIt
->valno
->id
]];
656 for (iterator I
= std::next(OutIt
), E
= end(); I
!= E
; ++I
) {
657 VNInfo
* nextValNo
= NewVNInfo
[LHSValNoAssignments
[I
->valno
->id
]];
658 assert(nextValNo
&& "Huh?");
660 // If this live range has the same value # as its immediate predecessor,
661 // and if they are neighbors, remove one Segment. This happens when we
662 // have [0,4:0)[4,7:1) and map 0/1 onto the same value #.
663 if (OutIt
->valno
== nextValNo
&& OutIt
->end
== I
->start
) {
666 // Didn't merge. Move OutIt to the next segment,
668 OutIt
->valno
= nextValNo
;
670 OutIt
->start
= I
->start
;
675 // If we merge some segments, chop off the end.
677 segments
.erase(OutIt
, end());
680 // Rewrite Other values before changing the VNInfo ids.
681 // This can leave Other in an invalid state because we're not coalescing
682 // touching segments that now have identical values. That's OK since Other is
683 // not supposed to be valid after calling join();
684 for (Segment
&S
: Other
.segments
)
685 S
.valno
= NewVNInfo
[RHSValNoAssignments
[S
.valno
->id
]];
687 // Update val# info. Renumber them and make sure they all belong to this
688 // LiveRange now. Also remove dead val#'s.
689 unsigned NumValNos
= 0;
690 for (unsigned i
= 0; i
< NumNewVals
; ++i
) {
691 VNInfo
*VNI
= NewVNInfo
[i
];
693 if (NumValNos
>= NumVals
)
694 valnos
.push_back(VNI
);
696 valnos
[NumValNos
] = VNI
;
697 VNI
->id
= NumValNos
++; // Renumber val#.
700 if (NumNewVals
< NumVals
)
701 valnos
.resize(NumNewVals
); // shrinkify
703 // Okay, now insert the RHS live segments into the LHS.
704 LiveRangeUpdater
Updater(this);
705 for (Segment
&S
: Other
.segments
)
709 /// Merge all of the segments in RHS into this live range as the specified
710 /// value number. The segments in RHS are allowed to overlap with segments in
711 /// the current range, but only if the overlapping segments have the
712 /// specified value number.
713 void LiveRange::MergeSegmentsInAsValue(const LiveRange
&RHS
,
715 LiveRangeUpdater
Updater(this);
716 for (const Segment
&S
: RHS
.segments
)
717 Updater
.add(S
.start
, S
.end
, LHSValNo
);
720 /// MergeValueInAsValue - Merge all of the live segments of a specific val#
721 /// in RHS into this live range as the specified value number.
722 /// The segments in RHS are allowed to overlap with segments in the
723 /// current range, it will replace the value numbers of the overlaped
724 /// segments with the specified value number.
725 void LiveRange::MergeValueInAsValue(const LiveRange
&RHS
,
726 const VNInfo
*RHSValNo
,
728 LiveRangeUpdater
Updater(this);
729 for (const Segment
&S
: RHS
.segments
)
730 if (S
.valno
== RHSValNo
)
731 Updater
.add(S
.start
, S
.end
, LHSValNo
);
734 /// MergeValueNumberInto - This method is called when two value nubmers
735 /// are found to be equivalent. This eliminates V1, replacing all
736 /// segments with the V1 value number with the V2 value number. This can
737 /// cause merging of V1/V2 values numbers and compaction of the value space.
738 VNInfo
*LiveRange::MergeValueNumberInto(VNInfo
*V1
, VNInfo
*V2
) {
739 assert(V1
!= V2
&& "Identical value#'s are always equivalent!");
741 // This code actually merges the (numerically) larger value number into the
742 // smaller value number, which is likely to allow us to compactify the value
743 // space. The only thing we have to be careful of is to preserve the
744 // instruction that defines the result value.
746 // Make sure V2 is smaller than V1.
747 if (V1
->id
< V2
->id
) {
752 // Merge V1 segments into V2.
753 for (iterator I
= begin(); I
!= end(); ) {
755 if (S
->valno
!= V1
) continue; // Not a V1 Segment.
757 // Okay, we found a V1 live range. If it had a previous, touching, V2 live
761 if (Prev
->valno
== V2
&& Prev
->end
== S
->start
) {
764 // Erase this live-range.
771 // Okay, now we have a V1 or V2 live range that is maximally merged forward.
772 // Ensure that it is a V2 live-range.
775 // If we can merge it into later V2 segments, do so now. We ignore any
776 // following V1 segments, as they will be merged in subsequent iterations
779 if (I
->start
== S
->end
&& I
->valno
== V2
) {
787 // Now that V1 is dead, remove it.
788 markValNoForDeletion(V1
);
793 void LiveRange::flushSegmentSet() {
794 assert(segmentSet
!= nullptr && "segment set must have been created");
797 "segment set can be used only initially before switching to the array");
798 segments
.append(segmentSet
->begin(), segmentSet
->end());
799 segmentSet
= nullptr;
803 bool LiveRange::isLiveAtIndexes(ArrayRef
<SlotIndex
> Slots
) const {
804 ArrayRef
<SlotIndex
>::iterator SlotI
= Slots
.begin();
805 ArrayRef
<SlotIndex
>::iterator SlotE
= Slots
.end();
807 // If there are no regmask slots, we have nothing to search.
811 // Start our search at the first segment that ends after the first slot.
812 const_iterator SegmentI
= find(*SlotI
);
813 const_iterator SegmentE
= end();
815 // If there are no segments that end after the first slot, we're done.
816 if (SegmentI
== SegmentE
)
819 // Look for each slot in the live range.
820 for ( ; SlotI
!= SlotE
; ++SlotI
) {
821 // Go to the next segment that ends after the current slot.
822 // The slot may be within a hole in the range.
823 SegmentI
= advanceTo(SegmentI
, *SlotI
);
824 if (SegmentI
== SegmentE
)
827 // If this segment contains the slot, we're done.
828 if (SegmentI
->contains(*SlotI
))
830 // Otherwise, look for the next slot.
833 // We didn't find a segment containing any of the slots.
837 void LiveInterval::freeSubRange(SubRange
*S
) {
839 // Memory was allocated with BumpPtr allocator and is not freed here.
842 void LiveInterval::removeEmptySubRanges() {
843 SubRange
**NextPtr
= &SubRanges
;
844 SubRange
*I
= *NextPtr
;
845 while (I
!= nullptr) {
851 // Skip empty subranges until we find the first nonempty one.
853 SubRange
*Next
= I
->Next
;
856 } while (I
!= nullptr && I
->empty());
861 void LiveInterval::clearSubRanges() {
862 for (SubRange
*I
= SubRanges
, *Next
; I
!= nullptr; I
= Next
) {
869 /// For each VNI in \p SR, check whether or not that value defines part
870 /// of the mask describe by \p LaneMask and if not, remove that value
872 static void stripValuesNotDefiningMask(unsigned Reg
, LiveInterval::SubRange
&SR
,
873 LaneBitmask LaneMask
,
874 const SlotIndexes
&Indexes
,
875 const TargetRegisterInfo
&TRI
,
876 unsigned ComposeSubRegIdx
) {
877 // Phys reg should not be tracked at subreg level.
878 // Same for noreg (Reg == 0).
879 if (!Register::isVirtualRegister(Reg
) || !Reg
)
881 // Remove the values that don't define those lanes.
882 SmallVector
<VNInfo
*, 8> ToBeRemoved
;
883 for (VNInfo
*VNI
: SR
.valnos
) {
886 // PHI definitions don't have MI attached, so there is nothing
887 // we can use to strip the VNI.
890 const MachineInstr
*MI
= Indexes
.getInstructionFromIndex(VNI
->def
);
891 assert(MI
&& "Cannot find the definition of a value");
893 for (ConstMIBundleOperands
MOI(*MI
); MOI
.isValid(); ++MOI
) {
894 if (!MOI
->isReg() || !MOI
->isDef())
896 if (MOI
->getReg() != Reg
)
898 LaneBitmask OrigMask
= TRI
.getSubRegIndexLaneMask(MOI
->getSubReg());
899 LaneBitmask ExpectedDefMask
=
901 ? TRI
.composeSubRegIndexLaneMask(ComposeSubRegIdx
, OrigMask
)
903 if ((ExpectedDefMask
& LaneMask
).none())
910 ToBeRemoved
.push_back(VNI
);
912 for (VNInfo
*VNI
: ToBeRemoved
)
915 // If the subrange is empty at this point, the MIR is invalid. Do not assert
916 // and let the verifier catch this case.
919 void LiveInterval::refineSubRanges(
920 BumpPtrAllocator
&Allocator
, LaneBitmask LaneMask
,
921 std::function
<void(LiveInterval::SubRange
&)> Apply
,
922 const SlotIndexes
&Indexes
, const TargetRegisterInfo
&TRI
,
923 unsigned ComposeSubRegIdx
) {
924 LaneBitmask ToApply
= LaneMask
;
925 for (SubRange
&SR
: subranges()) {
926 LaneBitmask SRMask
= SR
.LaneMask
;
927 LaneBitmask Matching
= SRMask
& LaneMask
;
931 SubRange
*MatchingRange
;
932 if (SRMask
== Matching
) {
933 // The subrange fits (it does not cover bits outside \p LaneMask).
936 // We have to split the subrange into a matching and non-matching part.
937 // Reduce lanemask of existing lane to non-matching part.
938 SR
.LaneMask
= SRMask
& ~Matching
;
939 // Create a new subrange for the matching part
940 MatchingRange
= createSubRangeFrom(Allocator
, Matching
, SR
);
941 // Now that the subrange is split in half, make sure we
942 // only keep in the subranges the VNIs that touch the related half.
943 stripValuesNotDefiningMask(reg(), *MatchingRange
, Matching
, Indexes
, TRI
,
945 stripValuesNotDefiningMask(reg(), SR
, SR
.LaneMask
, Indexes
, TRI
,
948 Apply(*MatchingRange
);
949 ToApply
&= ~Matching
;
951 // Create a new subrange if there are uncovered bits left.
953 SubRange
*NewRange
= createSubRange(Allocator
, ToApply
);
958 unsigned LiveInterval::getSize() const {
960 for (const Segment
&S
: segments
)
961 Sum
+= S
.start
.distance(S
.end
);
965 void LiveInterval::computeSubRangeUndefs(SmallVectorImpl
<SlotIndex
> &Undefs
,
966 LaneBitmask LaneMask
,
967 const MachineRegisterInfo
&MRI
,
968 const SlotIndexes
&Indexes
) const {
969 assert(reg().isVirtual());
970 LaneBitmask VRegMask
= MRI
.getMaxLaneMaskForVReg(reg());
971 assert((VRegMask
& LaneMask
).any());
972 const TargetRegisterInfo
&TRI
= *MRI
.getTargetRegisterInfo();
973 for (const MachineOperand
&MO
: MRI
.def_operands(reg())) {
976 unsigned SubReg
= MO
.getSubReg();
977 assert(SubReg
!= 0 && "Undef should only be set on subreg defs");
978 LaneBitmask DefMask
= TRI
.getSubRegIndexLaneMask(SubReg
);
979 LaneBitmask UndefMask
= VRegMask
& ~DefMask
;
980 if ((UndefMask
& LaneMask
).any()) {
981 const MachineInstr
&MI
= *MO
.getParent();
982 bool EarlyClobber
= MO
.isEarlyClobber();
983 SlotIndex Pos
= Indexes
.getInstructionIndex(MI
).getRegSlot(EarlyClobber
);
984 Undefs
.push_back(Pos
);
989 raw_ostream
& llvm::operator<<(raw_ostream
& OS
, const LiveRange::Segment
&S
) {
990 return OS
<< '[' << S
.start
<< ',' << S
.end
<< ':' << S
.valno
->id
<< ')';
993 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
994 LLVM_DUMP_METHOD
void LiveRange::Segment::dump() const {
995 dbgs() << *this << '\n';
999 void LiveRange::print(raw_ostream
&OS
) const {
1003 for (const Segment
&S
: segments
) {
1005 assert(S
.valno
== getValNumInfo(S
.valno
->id
) && "Bad VNInfo");
1009 // Print value number info.
1010 if (getNumValNums()) {
1013 for (const_vni_iterator i
= vni_begin(), e
= vni_end(); i
!= e
;
1015 const VNInfo
*vni
= *i
;
1016 if (vnum
) OS
<< ' ';
1018 if (vni
->isUnused()) {
1022 if (vni
->isPHIDef())
1029 void LiveInterval::SubRange::print(raw_ostream
&OS
) const {
1030 OS
<< " L" << PrintLaneMask(LaneMask
) << ' '
1031 << static_cast<const LiveRange
&>(*this);
1034 void LiveInterval::print(raw_ostream
&OS
) const {
1035 OS
<< printReg(reg()) << ' ';
1038 for (const SubRange
&SR
: subranges())
1040 OS
<< " weight:" << Weight
;
1043 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
1044 LLVM_DUMP_METHOD
void LiveRange::dump() const {
1045 dbgs() << *this << '\n';
1048 LLVM_DUMP_METHOD
void LiveInterval::SubRange::dump() const {
1049 dbgs() << *this << '\n';
1052 LLVM_DUMP_METHOD
void LiveInterval::dump() const {
1053 dbgs() << *this << '\n';
1058 void LiveRange::verify() const {
1059 for (const_iterator I
= begin(), E
= end(); I
!= E
; ++I
) {
1060 assert(I
->start
.isValid());
1061 assert(I
->end
.isValid());
1062 assert(I
->start
< I
->end
);
1063 assert(I
->valno
!= nullptr);
1064 assert(I
->valno
->id
< valnos
.size());
1065 assert(I
->valno
== valnos
[I
->valno
->id
]);
1066 if (std::next(I
) != E
) {
1067 assert(I
->end
<= std::next(I
)->start
);
1068 if (I
->end
== std::next(I
)->start
)
1069 assert(I
->valno
!= std::next(I
)->valno
);
1074 void LiveInterval::verify(const MachineRegisterInfo
*MRI
) const {
1077 // Make sure SubRanges are fine and LaneMasks are disjunct.
1079 LaneBitmask MaxMask
= MRI
!= nullptr ? MRI
->getMaxLaneMaskForVReg(reg())
1080 : LaneBitmask::getAll();
1081 for (const SubRange
&SR
: subranges()) {
1082 // Subrange lanemask should be disjunct to any previous subrange masks.
1083 assert((Mask
& SR
.LaneMask
).none());
1084 Mask
|= SR
.LaneMask
;
1086 // subrange mask should not contained in maximum lane mask for the vreg.
1087 assert((Mask
& ~MaxMask
).none());
1088 // empty subranges must be removed.
1089 assert(!SR
.empty());
1092 // Main liverange should cover subrange.
1098 //===----------------------------------------------------------------------===//
1099 // LiveRangeUpdater class
1100 //===----------------------------------------------------------------------===//
1102 // The LiveRangeUpdater class always maintains these invariants:
1104 // - When LastStart is invalid, Spills is empty and the iterators are invalid.
1105 // This is the initial state, and the state created by flush().
1106 // In this state, isDirty() returns false.
1108 // Otherwise, segments are kept in three separate areas:
1110 // 1. [begin; WriteI) at the front of LR.
1111 // 2. [ReadI; end) at the back of LR.
1114 // - LR.begin() <= WriteI <= ReadI <= LR.end().
1115 // - Segments in all three areas are fully ordered and coalesced.
1116 // - Segments in area 1 precede and can't coalesce with segments in area 2.
1117 // - Segments in Spills precede and can't coalesce with segments in area 2.
1118 // - No coalescing is possible between segments in Spills and segments in area
1119 // 1, and there are no overlapping segments.
1121 // The segments in Spills are not ordered with respect to the segments in area
1122 // 1. They need to be merged.
1124 // When they exist, Spills.back().start <= LastStart,
1125 // and WriteI[-1].start <= LastStart.
1127 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
1128 void LiveRangeUpdater::print(raw_ostream
&OS
) const {
1131 OS
<< "Clean updater: " << *LR
<< '\n';
1133 OS
<< "Null updater.\n";
1136 assert(LR
&& "Can't have null LR in dirty updater.");
1137 OS
<< " updater with gap = " << (ReadI
- WriteI
)
1138 << ", last start = " << LastStart
1140 for (const auto &S
: make_range(LR
->begin(), WriteI
))
1143 for (unsigned I
= 0, E
= Spills
.size(); I
!= E
; ++I
)
1144 OS
<< ' ' << Spills
[I
];
1146 for (const auto &S
: make_range(ReadI
, LR
->end()))
1151 LLVM_DUMP_METHOD
void LiveRangeUpdater::dump() const {
1156 // Determine if A and B should be coalesced.
1157 static inline bool coalescable(const LiveRange::Segment
&A
,
1158 const LiveRange::Segment
&B
) {
1159 assert(A
.start
<= B
.start
&& "Unordered live segments.");
1160 if (A
.end
== B
.start
)
1161 return A
.valno
== B
.valno
;
1162 if (A
.end
< B
.start
)
1164 assert(A
.valno
== B
.valno
&& "Cannot overlap different values");
1168 void LiveRangeUpdater::add(LiveRange::Segment Seg
) {
1169 assert(LR
&& "Cannot add to a null destination");
1171 // Fall back to the regular add method if the live range
1172 // is using the segment set instead of the segment vector.
1173 if (LR
->segmentSet
!= nullptr) {
1174 LR
->addSegmentToSet(Seg
);
1178 // Flush the state if Start moves backwards.
1179 if (!LastStart
.isValid() || LastStart
> Seg
.start
) {
1182 // This brings us to an uninitialized state. Reinitialize.
1183 assert(Spills
.empty() && "Leftover spilled segments");
1184 WriteI
= ReadI
= LR
->begin();
1187 // Remember start for next time.
1188 LastStart
= Seg
.start
;
1190 // Advance ReadI until it ends after Seg.start.
1191 LiveRange::iterator E
= LR
->end();
1192 if (ReadI
!= E
&& ReadI
->end
<= Seg
.start
) {
1193 // First try to close the gap between WriteI and ReadI with spills.
1194 if (ReadI
!= WriteI
)
1196 // Then advance ReadI.
1197 if (ReadI
== WriteI
)
1198 ReadI
= WriteI
= LR
->find(Seg
.start
);
1200 while (ReadI
!= E
&& ReadI
->end
<= Seg
.start
)
1201 *WriteI
++ = *ReadI
++;
1204 assert(ReadI
== E
|| ReadI
->end
> Seg
.start
);
1206 // Check if the ReadI segment begins early.
1207 if (ReadI
!= E
&& ReadI
->start
<= Seg
.start
) {
1208 assert(ReadI
->valno
== Seg
.valno
&& "Cannot overlap different values");
1209 // Bail if Seg is completely contained in ReadI.
1210 if (ReadI
->end
>= Seg
.end
)
1212 // Coalesce into Seg.
1213 Seg
.start
= ReadI
->start
;
1217 // Coalesce as much as possible from ReadI into Seg.
1218 while (ReadI
!= E
&& coalescable(Seg
, *ReadI
)) {
1219 Seg
.end
= std::max(Seg
.end
, ReadI
->end
);
1223 // Try coalescing Spills.back() into Seg.
1224 if (!Spills
.empty() && coalescable(Spills
.back(), Seg
)) {
1225 Seg
.start
= Spills
.back().start
;
1226 Seg
.end
= std::max(Spills
.back().end
, Seg
.end
);
1230 // Try coalescing Seg into WriteI[-1].
1231 if (WriteI
!= LR
->begin() && coalescable(WriteI
[-1], Seg
)) {
1232 WriteI
[-1].end
= std::max(WriteI
[-1].end
, Seg
.end
);
1236 // Seg doesn't coalesce with anything, and needs to be inserted somewhere.
1237 if (WriteI
!= ReadI
) {
1242 // Finally, append to LR or Spills.
1244 LR
->segments
.push_back(Seg
);
1245 WriteI
= ReadI
= LR
->end();
1247 Spills
.push_back(Seg
);
1250 // Merge as many spilled segments as possible into the gap between WriteI
1251 // and ReadI. Advance WriteI to reflect the inserted instructions.
1252 void LiveRangeUpdater::mergeSpills() {
1253 // Perform a backwards merge of Spills and [SpillI;WriteI).
1254 size_t GapSize
= ReadI
- WriteI
;
1255 size_t NumMoved
= std::min(Spills
.size(), GapSize
);
1256 LiveRange::iterator Src
= WriteI
;
1257 LiveRange::iterator Dst
= Src
+ NumMoved
;
1258 LiveRange::iterator SpillSrc
= Spills
.end();
1259 LiveRange::iterator B
= LR
->begin();
1261 // This is the new WriteI position after merging spills.
1264 // Now merge Src and Spills backwards.
1265 while (Src
!= Dst
) {
1266 if (Src
!= B
&& Src
[-1].start
> SpillSrc
[-1].start
)
1269 *--Dst
= *--SpillSrc
;
1271 assert(NumMoved
== size_t(Spills
.end() - SpillSrc
));
1272 Spills
.erase(SpillSrc
, Spills
.end());
1275 void LiveRangeUpdater::flush() {
1278 // Clear the dirty state.
1279 LastStart
= SlotIndex();
1281 assert(LR
&& "Cannot add to a null destination");
1283 // Nothing to merge?
1284 if (Spills
.empty()) {
1285 LR
->segments
.erase(WriteI
, ReadI
);
1290 // Resize the WriteI - ReadI gap to match Spills.
1291 size_t GapSize
= ReadI
- WriteI
;
1292 if (GapSize
< Spills
.size()) {
1293 // The gap is too small. Make some room.
1294 size_t WritePos
= WriteI
- LR
->begin();
1295 LR
->segments
.insert(ReadI
, Spills
.size() - GapSize
, LiveRange::Segment());
1296 // This also invalidated ReadI, but it is recomputed below.
1297 WriteI
= LR
->begin() + WritePos
;
1299 // Shrink the gap if necessary.
1300 LR
->segments
.erase(WriteI
+ Spills
.size(), ReadI
);
1302 ReadI
= WriteI
+ Spills
.size();
1307 unsigned ConnectedVNInfoEqClasses::Classify(const LiveRange
&LR
) {
1308 // Create initial equivalence classes.
1310 EqClass
.grow(LR
.getNumValNums());
1312 const VNInfo
*used
= nullptr, *unused
= nullptr;
1314 // Determine connections.
1315 for (const VNInfo
*VNI
: LR
.valnos
) {
1316 // Group all unused values into one class.
1317 if (VNI
->isUnused()) {
1319 EqClass
.join(unused
->id
, VNI
->id
);
1324 if (VNI
->isPHIDef()) {
1325 const MachineBasicBlock
*MBB
= LIS
.getMBBFromIndex(VNI
->def
);
1326 assert(MBB
&& "Phi-def has no defining MBB");
1327 // Connect to values live out of predecessors.
1328 for (MachineBasicBlock
*Pred
: MBB
->predecessors())
1329 if (const VNInfo
*PVNI
= LR
.getVNInfoBefore(LIS
.getMBBEndIdx(Pred
)))
1330 EqClass
.join(VNI
->id
, PVNI
->id
);
1332 // Normal value defined by an instruction. Check for two-addr redef.
1333 // FIXME: This could be coincidental. Should we really check for a tied
1334 // operand constraint?
1335 // Note that VNI->def may be a use slot for an early clobber def.
1336 if (const VNInfo
*UVNI
= LR
.getVNInfoBefore(VNI
->def
))
1337 EqClass
.join(VNI
->id
, UVNI
->id
);
1341 // Lump all the unused values in with the last used value.
1343 EqClass
.join(used
->id
, unused
->id
);
1346 return EqClass
.getNumClasses();
1349 void ConnectedVNInfoEqClasses::Distribute(LiveInterval
&LI
, LiveInterval
*LIV
[],
1350 MachineRegisterInfo
&MRI
) {
1351 // Rewrite instructions.
1352 for (MachineOperand
&MO
:
1353 llvm::make_early_inc_range(MRI
.reg_operands(LI
.reg()))) {
1354 MachineInstr
*MI
= MO
.getParent();
1356 if (MI
->isDebugValue()) {
1357 // DBG_VALUE instructions don't have slot indexes, so get the index of
1358 // the instruction before them. The value is defined there too.
1359 SlotIndex Idx
= LIS
.getSlotIndexes()->getIndexBefore(*MI
);
1360 VNI
= LI
.Query(Idx
).valueOut();
1362 SlotIndex Idx
= LIS
.getInstructionIndex(*MI
);
1363 LiveQueryResult LRQ
= LI
.Query(Idx
);
1364 VNI
= MO
.readsReg() ? LRQ
.valueIn() : LRQ
.valueDefined();
1366 // In the case of an <undef> use that isn't tied to any def, VNI will be
1367 // NULL. If the use is tied to a def, VNI will be the defined value.
1370 if (unsigned EqClass
= getEqClass(VNI
))
1371 MO
.setReg(LIV
[EqClass
- 1]->reg());
1374 // Distribute subregister liveranges.
1375 if (LI
.hasSubRanges()) {
1376 unsigned NumComponents
= EqClass
.getNumClasses();
1377 SmallVector
<unsigned, 8> VNIMapping
;
1378 SmallVector
<LiveInterval::SubRange
*, 8> SubRanges
;
1379 BumpPtrAllocator
&Allocator
= LIS
.getVNInfoAllocator();
1380 for (LiveInterval::SubRange
&SR
: LI
.subranges()) {
1381 // Create new subranges in the split intervals and construct a mapping
1382 // for the VNInfos in the subrange.
1383 unsigned NumValNos
= SR
.valnos
.size();
1385 VNIMapping
.reserve(NumValNos
);
1387 SubRanges
.resize(NumComponents
-1, nullptr);
1388 for (unsigned I
= 0; I
< NumValNos
; ++I
) {
1389 const VNInfo
&VNI
= *SR
.valnos
[I
];
1390 unsigned ComponentNum
;
1391 if (VNI
.isUnused()) {
1394 const VNInfo
*MainRangeVNI
= LI
.getVNInfoAt(VNI
.def
);
1395 assert(MainRangeVNI
!= nullptr
1396 && "SubRange def must have corresponding main range def");
1397 ComponentNum
= getEqClass(MainRangeVNI
);
1398 if (ComponentNum
> 0 && SubRanges
[ComponentNum
-1] == nullptr) {
1399 SubRanges
[ComponentNum
-1]
1400 = LIV
[ComponentNum
-1]->createSubRange(Allocator
, SR
.LaneMask
);
1403 VNIMapping
.push_back(ComponentNum
);
1405 DistributeRange(SR
, SubRanges
.data(), VNIMapping
);
1407 LI
.removeEmptySubRanges();
1410 // Distribute main liverange.
1411 DistributeRange(LI
, LIV
, EqClass
);