1 //===- RegAllocGreedy.cpp - greedy register allocator ---------------------===//
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
7 //===----------------------------------------------------------------------===//
9 // This file defines the RAGreedy function pass for register allocation in
12 //===----------------------------------------------------------------------===//
14 #include "AllocationOrder.h"
15 #include "InterferenceCache.h"
16 #include "LiveDebugVariables.h"
17 #include "RegAllocBase.h"
18 #include "SpillPlacement.h"
21 #include "llvm/ADT/ArrayRef.h"
22 #include "llvm/ADT/BitVector.h"
23 #include "llvm/ADT/DenseMap.h"
24 #include "llvm/ADT/IndexedMap.h"
25 #include "llvm/ADT/MapVector.h"
26 #include "llvm/ADT/SetVector.h"
27 #include "llvm/ADT/SmallPtrSet.h"
28 #include "llvm/ADT/SmallSet.h"
29 #include "llvm/ADT/SmallVector.h"
30 #include "llvm/ADT/Statistic.h"
31 #include "llvm/ADT/StringRef.h"
32 #include "llvm/Analysis/AliasAnalysis.h"
33 #include "llvm/Analysis/OptimizationRemarkEmitter.h"
34 #include "llvm/CodeGen/CalcSpillWeights.h"
35 #include "llvm/CodeGen/EdgeBundles.h"
36 #include "llvm/CodeGen/LiveInterval.h"
37 #include "llvm/CodeGen/LiveIntervalUnion.h"
38 #include "llvm/CodeGen/LiveIntervals.h"
39 #include "llvm/CodeGen/LiveRangeEdit.h"
40 #include "llvm/CodeGen/LiveRegMatrix.h"
41 #include "llvm/CodeGen/LiveStacks.h"
42 #include "llvm/CodeGen/MachineBasicBlock.h"
43 #include "llvm/CodeGen/MachineBlockFrequencyInfo.h"
44 #include "llvm/CodeGen/MachineDominators.h"
45 #include "llvm/CodeGen/MachineFrameInfo.h"
46 #include "llvm/CodeGen/MachineFunction.h"
47 #include "llvm/CodeGen/MachineFunctionPass.h"
48 #include "llvm/CodeGen/MachineInstr.h"
49 #include "llvm/CodeGen/MachineLoopInfo.h"
50 #include "llvm/CodeGen/MachineOperand.h"
51 #include "llvm/CodeGen/MachineOptimizationRemarkEmitter.h"
52 #include "llvm/CodeGen/MachineRegisterInfo.h"
53 #include "llvm/CodeGen/RegAllocRegistry.h"
54 #include "llvm/CodeGen/RegisterClassInfo.h"
55 #include "llvm/CodeGen/SlotIndexes.h"
56 #include "llvm/CodeGen/TargetInstrInfo.h"
57 #include "llvm/CodeGen/TargetRegisterInfo.h"
58 #include "llvm/CodeGen/TargetSubtargetInfo.h"
59 #include "llvm/CodeGen/VirtRegMap.h"
60 #include "llvm/IR/Function.h"
61 #include "llvm/IR/LLVMContext.h"
62 #include "llvm/MC/MCRegisterInfo.h"
63 #include "llvm/Pass.h"
64 #include "llvm/Support/BlockFrequency.h"
65 #include "llvm/Support/BranchProbability.h"
66 #include "llvm/Support/CommandLine.h"
67 #include "llvm/Support/Debug.h"
68 #include "llvm/Support/MathExtras.h"
69 #include "llvm/Support/Timer.h"
70 #include "llvm/Support/raw_ostream.h"
71 #include "llvm/Target/TargetMachine.h"
82 #define DEBUG_TYPE "regalloc"
84 STATISTIC(NumGlobalSplits
, "Number of split global live ranges");
85 STATISTIC(NumLocalSplits
, "Number of split local live ranges");
86 STATISTIC(NumEvicted
, "Number of interferences evicted");
88 static cl::opt
<SplitEditor::ComplementSpillMode
> SplitSpillMode(
89 "split-spill-mode", cl::Hidden
,
90 cl::desc("Spill mode for splitting live ranges"),
91 cl::values(clEnumValN(SplitEditor::SM_Partition
, "default", "Default"),
92 clEnumValN(SplitEditor::SM_Size
, "size", "Optimize for size"),
93 clEnumValN(SplitEditor::SM_Speed
, "speed", "Optimize for speed")),
94 cl::init(SplitEditor::SM_Speed
));
96 static cl::opt
<unsigned>
97 LastChanceRecoloringMaxDepth("lcr-max-depth", cl::Hidden
,
98 cl::desc("Last chance recoloring max depth"),
101 static cl::opt
<unsigned> LastChanceRecoloringMaxInterference(
102 "lcr-max-interf", cl::Hidden
,
103 cl::desc("Last chance recoloring maximum number of considered"
104 " interference at a time"),
107 static cl::opt
<bool> ExhaustiveSearch(
108 "exhaustive-register-search", cl::NotHidden
,
109 cl::desc("Exhaustive Search for registers bypassing the depth "
110 "and interference cutoffs of last chance recoloring"),
113 static cl::opt
<bool> EnableLocalReassignment(
114 "enable-local-reassign", cl::Hidden
,
115 cl::desc("Local reassignment can yield better allocation decisions, but "
116 "may be compile time intensive"),
119 static cl::opt
<bool> EnableDeferredSpilling(
120 "enable-deferred-spilling", cl::Hidden
,
121 cl::desc("Instead of spilling a variable right away, defer the actual "
122 "code insertion to the end of the allocation. That way the "
123 "allocator might still find a suitable coloring for this "
124 "variable because of other evicted variables."),
127 static cl::opt
<unsigned>
128 HugeSizeForSplit("huge-size-for-split", cl::Hidden
,
129 cl::desc("A threshold of live range size which may cause "
130 "high compile time cost in global splitting."),
133 // FIXME: Find a good default for this flag and remove the flag.
134 static cl::opt
<unsigned>
135 CSRFirstTimeCost("regalloc-csr-first-time-cost",
136 cl::desc("Cost for first time use of callee-saved register."),
137 cl::init(0), cl::Hidden
);
139 static cl::opt
<bool> ConsiderLocalIntervalCost(
140 "consider-local-interval-cost", cl::Hidden
,
141 cl::desc("Consider the cost of local intervals created by a split "
142 "candidate when choosing the best split candidate."),
145 static RegisterRegAlloc
greedyRegAlloc("greedy", "greedy register allocator",
146 createGreedyRegisterAllocator
);
150 class RAGreedy
: public MachineFunctionPass
,
152 private LiveRangeEdit::Delegate
{
153 // Convenient shortcuts.
154 using PQueue
= std::priority_queue
<std::pair
<unsigned, unsigned>>;
155 using SmallLISet
= SmallPtrSet
<LiveInterval
*, 4>;
156 using SmallVirtRegSet
= SmallSet
<unsigned, 16>;
161 // Shortcuts to some useful interface.
162 const TargetInstrInfo
*TII
;
163 const TargetRegisterInfo
*TRI
;
164 RegisterClassInfo RCI
;
167 SlotIndexes
*Indexes
;
168 MachineBlockFrequencyInfo
*MBFI
;
169 MachineDominatorTree
*DomTree
;
170 MachineLoopInfo
*Loops
;
171 MachineOptimizationRemarkEmitter
*ORE
;
172 EdgeBundles
*Bundles
;
173 SpillPlacement
*SpillPlacer
;
174 LiveDebugVariables
*DebugVars
;
178 std::unique_ptr
<Spiller
> SpillerInstance
;
180 unsigned NextCascade
;
182 // Live ranges pass through a number of stages as we try to allocate them.
183 // Some of the stages may also create new live ranges:
185 // - Region splitting.
186 // - Per-block splitting.
187 // - Local splitting.
190 // Ranges produced by one of the stages skip the previous stages when they are
191 // dequeued. This improves performance because we can skip interference checks
192 // that are unlikely to give any results. It also guarantees that the live
193 // range splitting algorithm terminates, something that is otherwise hard to
195 enum LiveRangeStage
{
196 /// Newly created live range that has never been queued.
199 /// Only attempt assignment and eviction. Then requeue as RS_Split.
202 /// Attempt live range splitting if assignment is impossible.
205 /// Attempt more aggressive live range splitting that is guaranteed to make
206 /// progress. This is used for split products that may not be making
210 /// Live range will be spilled. No more splitting will be attempted.
214 /// Live range is in memory. Because of other evictions, it might get moved
215 /// in a register in the end.
218 /// There is nothing more we can do to this live range. Abort compilation
219 /// if it can't be assigned.
223 // Enum CutOffStage to keep a track whether the register allocation failed
224 // because of the cutoffs encountered in last chance recoloring.
225 // Note: This is used as bitmask. New value should be next power of 2.
227 // No cutoffs encountered
230 // lcr-max-depth cutoff encountered
233 // lcr-max-interf cutoff encountered
240 static const char *const StageName
[];
243 // RegInfo - Keep additional information about each live range.
245 LiveRangeStage Stage
= RS_New
;
247 // Cascade - Eviction loop prevention. See canEvictInterference().
248 unsigned Cascade
= 0;
253 IndexedMap
<RegInfo
, VirtReg2IndexFunctor
> ExtraRegInfo
;
255 LiveRangeStage
getStage(const LiveInterval
&VirtReg
) const {
256 return ExtraRegInfo
[VirtReg
.reg
].Stage
;
259 void setStage(const LiveInterval
&VirtReg
, LiveRangeStage Stage
) {
260 ExtraRegInfo
.resize(MRI
->getNumVirtRegs());
261 ExtraRegInfo
[VirtReg
.reg
].Stage
= Stage
;
264 template<typename Iterator
>
265 void setStage(Iterator Begin
, Iterator End
, LiveRangeStage NewStage
) {
266 ExtraRegInfo
.resize(MRI
->getNumVirtRegs());
267 for (;Begin
!= End
; ++Begin
) {
268 unsigned Reg
= *Begin
;
269 if (ExtraRegInfo
[Reg
].Stage
== RS_New
)
270 ExtraRegInfo
[Reg
].Stage
= NewStage
;
274 /// Cost of evicting interference.
275 struct EvictionCost
{
276 unsigned BrokenHints
= 0; ///< Total number of broken hints.
277 float MaxWeight
= 0; ///< Maximum spill weight evicted.
279 EvictionCost() = default;
281 bool isMax() const { return BrokenHints
== ~0u; }
283 void setMax() { BrokenHints
= ~0u; }
285 void setBrokenHints(unsigned NHints
) { BrokenHints
= NHints
; }
287 bool operator<(const EvictionCost
&O
) const {
288 return std::tie(BrokenHints
, MaxWeight
) <
289 std::tie(O
.BrokenHints
, O
.MaxWeight
);
293 /// EvictionTrack - Keeps track of past evictions in order to optimize region
295 class EvictionTrack
{
299 std::pair
<unsigned /* evictor */, unsigned /* physreg */>;
300 using EvicteeInfo
= llvm::DenseMap
<unsigned /* evictee */, EvictorInfo
>;
303 /// Each Vreg that has been evicted in the last stage of selectOrSplit will
304 /// be mapped to the evictor Vreg and the PhysReg it was evicted from.
305 EvicteeInfo Evictees
;
308 /// Clear all eviction information.
309 void clear() { Evictees
.clear(); }
311 /// Clear eviction information for the given evictee Vreg.
312 /// E.g. when Vreg get's a new allocation, the old eviction info is no
314 /// \param Evictee The evictee Vreg for whom we want to clear collected
316 void clearEvicteeInfo(unsigned Evictee
) { Evictees
.erase(Evictee
); }
318 /// Track new eviction.
319 /// The Evictor vreg has evicted the Evictee vreg from Physreg.
320 /// \param PhysReg The physical register Evictee was evicted from.
321 /// \param Evictor The evictor Vreg that evicted Evictee.
322 /// \param Evictee The evictee Vreg.
323 void addEviction(unsigned PhysReg
, unsigned Evictor
, unsigned Evictee
) {
324 Evictees
[Evictee
].first
= Evictor
;
325 Evictees
[Evictee
].second
= PhysReg
;
328 /// Return the Evictor Vreg which evicted Evictee Vreg from PhysReg.
329 /// \param Evictee The evictee vreg.
330 /// \return The Evictor vreg which evicted Evictee vreg from PhysReg. 0 if
331 /// nobody has evicted Evictee from PhysReg.
332 EvictorInfo
getEvictor(unsigned Evictee
) {
333 if (Evictees
.count(Evictee
)) {
334 return Evictees
[Evictee
];
337 return EvictorInfo(0, 0);
341 // Keeps track of past evictions in order to optimize region split decision.
342 EvictionTrack LastEvicted
;
345 std::unique_ptr
<SplitAnalysis
> SA
;
346 std::unique_ptr
<SplitEditor
> SE
;
348 /// Cached per-block interference maps
349 InterferenceCache IntfCache
;
351 /// All basic blocks where the current register has uses.
352 SmallVector
<SpillPlacement::BlockConstraint
, 8> SplitConstraints
;
354 /// Global live range splitting candidate info.
355 struct GlobalSplitCandidate
{
356 // Register intended for assignment, or 0.
359 // SplitKit interval index for this candidate.
362 // Interference for PhysReg.
363 InterferenceCache::Cursor Intf
;
365 // Bundles where this candidate should be live.
366 BitVector LiveBundles
;
367 SmallVector
<unsigned, 8> ActiveBlocks
;
369 void reset(InterferenceCache
&Cache
, unsigned Reg
) {
372 Intf
.setPhysReg(Cache
, Reg
);
374 ActiveBlocks
.clear();
377 // Set B[i] = C for every live bundle where B[i] was NoCand.
378 unsigned getBundles(SmallVectorImpl
<unsigned> &B
, unsigned C
) {
380 for (unsigned i
: LiveBundles
.set_bits())
381 if (B
[i
] == NoCand
) {
389 /// Candidate info for each PhysReg in AllocationOrder.
390 /// This vector never shrinks, but grows to the size of the largest register
392 SmallVector
<GlobalSplitCandidate
, 32> GlobalCand
;
394 enum : unsigned { NoCand
= ~0u };
396 /// Candidate map. Each edge bundle is assigned to a GlobalCand entry, or to
397 /// NoCand which indicates the stack interval.
398 SmallVector
<unsigned, 32> BundleCand
;
400 /// Callee-save register cost, calculated once per machine function.
401 BlockFrequency CSRCost
;
403 /// Run or not the local reassignment heuristic. This information is
404 /// obtained from the TargetSubtargetInfo.
405 bool EnableLocalReassign
;
407 /// Enable or not the consideration of the cost of local intervals created
408 /// by a split candidate when choosing the best split candidate.
409 bool EnableAdvancedRASplitCost
;
411 /// Set of broken hints that may be reconciled later because of eviction.
412 SmallSetVector
<LiveInterval
*, 8> SetOfBrokenHints
;
417 /// Return the pass name.
418 StringRef
getPassName() const override
{ return "Greedy Register Allocator"; }
420 /// RAGreedy analysis usage.
421 void getAnalysisUsage(AnalysisUsage
&AU
) const override
;
422 void releaseMemory() override
;
423 Spiller
&spiller() override
{ return *SpillerInstance
; }
424 void enqueue(LiveInterval
*LI
) override
;
425 LiveInterval
*dequeue() override
;
426 unsigned selectOrSplit(LiveInterval
&, SmallVectorImpl
<unsigned>&) override
;
427 void aboutToRemoveInterval(LiveInterval
&) override
;
429 /// Perform register allocation.
430 bool runOnMachineFunction(MachineFunction
&mf
) override
;
432 MachineFunctionProperties
getRequiredProperties() const override
{
433 return MachineFunctionProperties().set(
434 MachineFunctionProperties::Property::NoPHIs
);
440 unsigned selectOrSplitImpl(LiveInterval
&, SmallVectorImpl
<unsigned> &,
441 SmallVirtRegSet
&, unsigned = 0);
443 bool LRE_CanEraseVirtReg(unsigned) override
;
444 void LRE_WillShrinkVirtReg(unsigned) override
;
445 void LRE_DidCloneVirtReg(unsigned, unsigned) override
;
446 void enqueue(PQueue
&CurQueue
, LiveInterval
*LI
);
447 LiveInterval
*dequeue(PQueue
&CurQueue
);
449 BlockFrequency
calcSpillCost();
450 bool addSplitConstraints(InterferenceCache::Cursor
, BlockFrequency
&);
451 bool addThroughConstraints(InterferenceCache::Cursor
, ArrayRef
<unsigned>);
452 bool growRegion(GlobalSplitCandidate
&Cand
);
453 bool splitCanCauseEvictionChain(unsigned Evictee
, GlobalSplitCandidate
&Cand
,
455 const AllocationOrder
&Order
);
456 bool splitCanCauseLocalSpill(unsigned VirtRegToSplit
,
457 GlobalSplitCandidate
&Cand
, unsigned BBNumber
,
458 const AllocationOrder
&Order
);
459 BlockFrequency
calcGlobalSplitCost(GlobalSplitCandidate
&,
460 const AllocationOrder
&Order
,
461 bool *CanCauseEvictionChain
);
462 bool calcCompactRegion(GlobalSplitCandidate
&);
463 void splitAroundRegion(LiveRangeEdit
&, ArrayRef
<unsigned>);
464 void calcGapWeights(unsigned, SmallVectorImpl
<float>&);
465 unsigned canReassign(LiveInterval
&VirtReg
, unsigned PrevReg
);
466 bool shouldEvict(LiveInterval
&A
, bool, LiveInterval
&B
, bool);
467 bool canEvictInterference(LiveInterval
&, unsigned, bool, EvictionCost
&,
468 const SmallVirtRegSet
&);
469 bool canEvictInterferenceInRange(LiveInterval
&VirtReg
, unsigned PhysReg
,
470 SlotIndex Start
, SlotIndex End
,
471 EvictionCost
&MaxCost
);
472 unsigned getCheapestEvicteeWeight(const AllocationOrder
&Order
,
473 LiveInterval
&VirtReg
, SlotIndex Start
,
474 SlotIndex End
, float *BestEvictWeight
);
475 void evictInterference(LiveInterval
&, unsigned,
476 SmallVectorImpl
<unsigned>&);
477 bool mayRecolorAllInterferences(unsigned PhysReg
, LiveInterval
&VirtReg
,
478 SmallLISet
&RecoloringCandidates
,
479 const SmallVirtRegSet
&FixedRegisters
);
481 unsigned tryAssign(LiveInterval
&, AllocationOrder
&,
482 SmallVectorImpl
<unsigned>&,
483 const SmallVirtRegSet
&);
484 unsigned tryEvict(LiveInterval
&, AllocationOrder
&,
485 SmallVectorImpl
<unsigned>&, unsigned,
486 const SmallVirtRegSet
&);
487 unsigned tryRegionSplit(LiveInterval
&, AllocationOrder
&,
488 SmallVectorImpl
<unsigned>&);
489 unsigned isSplitBenefitWorthCost(LiveInterval
&VirtReg
);
490 /// Calculate cost of region splitting.
491 unsigned calculateRegionSplitCost(LiveInterval
&VirtReg
,
492 AllocationOrder
&Order
,
493 BlockFrequency
&BestCost
,
494 unsigned &NumCands
, bool IgnoreCSR
,
495 bool *CanCauseEvictionChain
= nullptr);
496 /// Perform region splitting.
497 unsigned doRegionSplit(LiveInterval
&VirtReg
, unsigned BestCand
,
499 SmallVectorImpl
<unsigned> &NewVRegs
);
500 /// Check other options before using a callee-saved register for the first
502 unsigned tryAssignCSRFirstTime(LiveInterval
&VirtReg
, AllocationOrder
&Order
,
503 unsigned PhysReg
, unsigned &CostPerUseLimit
,
504 SmallVectorImpl
<unsigned> &NewVRegs
);
505 void initializeCSRCost();
506 unsigned tryBlockSplit(LiveInterval
&, AllocationOrder
&,
507 SmallVectorImpl
<unsigned>&);
508 unsigned tryInstructionSplit(LiveInterval
&, AllocationOrder
&,
509 SmallVectorImpl
<unsigned>&);
510 unsigned tryLocalSplit(LiveInterval
&, AllocationOrder
&,
511 SmallVectorImpl
<unsigned>&);
512 unsigned trySplit(LiveInterval
&, AllocationOrder
&,
513 SmallVectorImpl
<unsigned>&,
514 const SmallVirtRegSet
&);
515 unsigned tryLastChanceRecoloring(LiveInterval
&, AllocationOrder
&,
516 SmallVectorImpl
<unsigned> &,
517 SmallVirtRegSet
&, unsigned);
518 bool tryRecoloringCandidates(PQueue
&, SmallVectorImpl
<unsigned> &,
519 SmallVirtRegSet
&, unsigned);
520 void tryHintRecoloring(LiveInterval
&);
521 void tryHintsRecoloring();
523 /// Model the information carried by one end of a copy.
525 /// The frequency of the copy.
527 /// The virtual register or physical register.
529 /// Its currently assigned register.
530 /// In case of a physical register Reg == PhysReg.
533 HintInfo(BlockFrequency Freq
, unsigned Reg
, unsigned PhysReg
)
534 : Freq(Freq
), Reg(Reg
), PhysReg(PhysReg
) {}
536 using HintsInfo
= SmallVector
<HintInfo
, 4>;
538 BlockFrequency
getBrokenHintFreq(const HintsInfo
&, unsigned);
539 void collectHintInfo(unsigned, HintsInfo
&);
541 bool isUnusedCalleeSavedReg(unsigned PhysReg
) const;
543 /// Compute and report the number of spills and reloads for a loop.
544 void reportNumberOfSplillsReloads(MachineLoop
*L
, unsigned &Reloads
,
545 unsigned &FoldedReloads
, unsigned &Spills
,
546 unsigned &FoldedSpills
);
548 /// Report the number of spills and reloads for each loop.
549 void reportNumberOfSplillsReloads() {
550 for (MachineLoop
*L
: *Loops
) {
551 unsigned Reloads
, FoldedReloads
, Spills
, FoldedSpills
;
552 reportNumberOfSplillsReloads(L
, Reloads
, FoldedReloads
, Spills
,
558 } // end anonymous namespace
560 char RAGreedy::ID
= 0;
561 char &llvm::RAGreedyID
= RAGreedy::ID
;
563 INITIALIZE_PASS_BEGIN(RAGreedy
, "greedy",
564 "Greedy Register Allocator", false, false)
565 INITIALIZE_PASS_DEPENDENCY(LiveDebugVariables
)
566 INITIALIZE_PASS_DEPENDENCY(SlotIndexes
)
567 INITIALIZE_PASS_DEPENDENCY(LiveIntervals
)
568 INITIALIZE_PASS_DEPENDENCY(RegisterCoalescer
)
569 INITIALIZE_PASS_DEPENDENCY(MachineScheduler
)
570 INITIALIZE_PASS_DEPENDENCY(LiveStacks
)
571 INITIALIZE_PASS_DEPENDENCY(MachineDominatorTree
)
572 INITIALIZE_PASS_DEPENDENCY(MachineLoopInfo
)
573 INITIALIZE_PASS_DEPENDENCY(VirtRegMap
)
574 INITIALIZE_PASS_DEPENDENCY(LiveRegMatrix
)
575 INITIALIZE_PASS_DEPENDENCY(EdgeBundles
)
576 INITIALIZE_PASS_DEPENDENCY(SpillPlacement
)
577 INITIALIZE_PASS_DEPENDENCY(MachineOptimizationRemarkEmitterPass
)
578 INITIALIZE_PASS_END(RAGreedy
, "greedy",
579 "Greedy Register Allocator", false, false)
582 const char *const RAGreedy::StageName
[] = {
593 // Hysteresis to use when comparing floats.
594 // This helps stabilize decisions based on float comparisons.
595 const float Hysteresis
= (2007 / 2048.0f
); // 0.97998046875
597 FunctionPass
* llvm::createGreedyRegisterAllocator() {
598 return new RAGreedy();
601 RAGreedy::RAGreedy(): MachineFunctionPass(ID
) {
604 void RAGreedy::getAnalysisUsage(AnalysisUsage
&AU
) const {
605 AU
.setPreservesCFG();
606 AU
.addRequired
<MachineBlockFrequencyInfo
>();
607 AU
.addPreserved
<MachineBlockFrequencyInfo
>();
608 AU
.addRequired
<AAResultsWrapperPass
>();
609 AU
.addPreserved
<AAResultsWrapperPass
>();
610 AU
.addRequired
<LiveIntervals
>();
611 AU
.addPreserved
<LiveIntervals
>();
612 AU
.addRequired
<SlotIndexes
>();
613 AU
.addPreserved
<SlotIndexes
>();
614 AU
.addRequired
<LiveDebugVariables
>();
615 AU
.addPreserved
<LiveDebugVariables
>();
616 AU
.addRequired
<LiveStacks
>();
617 AU
.addPreserved
<LiveStacks
>();
618 AU
.addRequired
<MachineDominatorTree
>();
619 AU
.addPreserved
<MachineDominatorTree
>();
620 AU
.addRequired
<MachineLoopInfo
>();
621 AU
.addPreserved
<MachineLoopInfo
>();
622 AU
.addRequired
<VirtRegMap
>();
623 AU
.addPreserved
<VirtRegMap
>();
624 AU
.addRequired
<LiveRegMatrix
>();
625 AU
.addPreserved
<LiveRegMatrix
>();
626 AU
.addRequired
<EdgeBundles
>();
627 AU
.addRequired
<SpillPlacement
>();
628 AU
.addRequired
<MachineOptimizationRemarkEmitterPass
>();
629 MachineFunctionPass::getAnalysisUsage(AU
);
632 //===----------------------------------------------------------------------===//
633 // LiveRangeEdit delegate methods
634 //===----------------------------------------------------------------------===//
636 bool RAGreedy::LRE_CanEraseVirtReg(unsigned VirtReg
) {
637 LiveInterval
&LI
= LIS
->getInterval(VirtReg
);
638 if (VRM
->hasPhys(VirtReg
)) {
639 Matrix
->unassign(LI
);
640 aboutToRemoveInterval(LI
);
643 // Unassigned virtreg is probably in the priority queue.
644 // RegAllocBase will erase it after dequeueing.
645 // Nonetheless, clear the live-range so that the debug
646 // dump will show the right state for that VirtReg.
651 void RAGreedy::LRE_WillShrinkVirtReg(unsigned VirtReg
) {
652 if (!VRM
->hasPhys(VirtReg
))
655 // Register is assigned, put it back on the queue for reassignment.
656 LiveInterval
&LI
= LIS
->getInterval(VirtReg
);
657 Matrix
->unassign(LI
);
661 void RAGreedy::LRE_DidCloneVirtReg(unsigned New
, unsigned Old
) {
662 // Cloning a register we haven't even heard about yet? Just ignore it.
663 if (!ExtraRegInfo
.inBounds(Old
))
666 // LRE may clone a virtual register because dead code elimination causes it to
667 // be split into connected components. The new components are much smaller
668 // than the original, so they should get a new chance at being assigned.
669 // same stage as the parent.
670 ExtraRegInfo
[Old
].Stage
= RS_Assign
;
671 ExtraRegInfo
.grow(New
);
672 ExtraRegInfo
[New
] = ExtraRegInfo
[Old
];
675 void RAGreedy::releaseMemory() {
676 SpillerInstance
.reset();
677 ExtraRegInfo
.clear();
681 void RAGreedy::enqueue(LiveInterval
*LI
) { enqueue(Queue
, LI
); }
683 void RAGreedy::enqueue(PQueue
&CurQueue
, LiveInterval
*LI
) {
684 // Prioritize live ranges by size, assigning larger ranges first.
685 // The queue holds (size, reg) pairs.
686 const unsigned Size
= LI
->getSize();
687 const unsigned Reg
= LI
->reg
;
688 assert(Register::isVirtualRegister(Reg
) &&
689 "Can only enqueue virtual registers");
692 ExtraRegInfo
.grow(Reg
);
693 if (ExtraRegInfo
[Reg
].Stage
== RS_New
)
694 ExtraRegInfo
[Reg
].Stage
= RS_Assign
;
696 if (ExtraRegInfo
[Reg
].Stage
== RS_Split
) {
697 // Unsplit ranges that couldn't be allocated immediately are deferred until
698 // everything else has been allocated.
700 } else if (ExtraRegInfo
[Reg
].Stage
== RS_Memory
) {
701 // Memory operand should be considered last.
702 // Change the priority such that Memory operand are assigned in
703 // the reverse order that they came in.
704 // TODO: Make this a member variable and probably do something about hints.
705 static unsigned MemOp
= 0;
708 // Giant live ranges fall back to the global assignment heuristic, which
709 // prevents excessive spilling in pathological cases.
710 bool ReverseLocal
= TRI
->reverseLocalAssignment();
711 const TargetRegisterClass
&RC
= *MRI
->getRegClass(Reg
);
712 bool ForceGlobal
= !ReverseLocal
&&
713 (Size
/ SlotIndex::InstrDist
) > (2 * RC
.getNumRegs());
715 if (ExtraRegInfo
[Reg
].Stage
== RS_Assign
&& !ForceGlobal
&& !LI
->empty() &&
716 LIS
->intervalIsInOneMBB(*LI
)) {
717 // Allocate original local ranges in linear instruction order. Since they
718 // are singly defined, this produces optimal coloring in the absence of
719 // global interference and other constraints.
721 Prio
= LI
->beginIndex().getInstrDistance(Indexes
->getLastIndex());
723 // Allocating bottom up may allow many short LRGs to be assigned first
724 // to one of the cheap registers. This could be much faster for very
725 // large blocks on targets with many physical registers.
726 Prio
= Indexes
->getZeroIndex().getInstrDistance(LI
->endIndex());
728 Prio
|= RC
.AllocationPriority
<< 24;
730 // Allocate global and split ranges in long->short order. Long ranges that
731 // don't fit should be spilled (or split) ASAP so they don't create
732 // interference. Mark a bit to prioritize global above local ranges.
733 Prio
= (1u << 29) + Size
;
735 // Mark a higher bit to prioritize global and local above RS_Split.
738 // Boost ranges that have a physical register hint.
739 if (VRM
->hasKnownPreference(Reg
))
742 // The virtual register number is a tie breaker for same-sized ranges.
743 // Give lower vreg numbers higher priority to assign them first.
744 CurQueue
.push(std::make_pair(Prio
, ~Reg
));
747 LiveInterval
*RAGreedy::dequeue() { return dequeue(Queue
); }
749 LiveInterval
*RAGreedy::dequeue(PQueue
&CurQueue
) {
750 if (CurQueue
.empty())
752 LiveInterval
*LI
= &LIS
->getInterval(~CurQueue
.top().second
);
757 //===----------------------------------------------------------------------===//
759 //===----------------------------------------------------------------------===//
761 /// tryAssign - Try to assign VirtReg to an available register.
762 unsigned RAGreedy::tryAssign(LiveInterval
&VirtReg
,
763 AllocationOrder
&Order
,
764 SmallVectorImpl
<unsigned> &NewVRegs
,
765 const SmallVirtRegSet
&FixedRegisters
) {
768 while ((PhysReg
= Order
.next()))
769 if (!Matrix
->checkInterference(VirtReg
, PhysReg
))
771 if (!PhysReg
|| Order
.isHint())
774 // PhysReg is available, but there may be a better choice.
776 // If we missed a simple hint, try to cheaply evict interference from the
777 // preferred register.
778 if (unsigned Hint
= MRI
->getSimpleHint(VirtReg
.reg
))
779 if (Order
.isHint(Hint
)) {
780 LLVM_DEBUG(dbgs() << "missed hint " << printReg(Hint
, TRI
) << '\n');
781 EvictionCost MaxCost
;
782 MaxCost
.setBrokenHints(1);
783 if (canEvictInterference(VirtReg
, Hint
, true, MaxCost
, FixedRegisters
)) {
784 evictInterference(VirtReg
, Hint
, NewVRegs
);
787 // Record the missed hint, we may be able to recover
788 // at the end if the surrounding allocation changed.
789 SetOfBrokenHints
.insert(&VirtReg
);
792 // Try to evict interference from a cheaper alternative.
793 unsigned Cost
= TRI
->getCostPerUse(PhysReg
);
795 // Most registers have 0 additional cost.
799 LLVM_DEBUG(dbgs() << printReg(PhysReg
, TRI
) << " is available at cost "
801 unsigned CheapReg
= tryEvict(VirtReg
, Order
, NewVRegs
, Cost
, FixedRegisters
);
802 return CheapReg
? CheapReg
: PhysReg
;
805 //===----------------------------------------------------------------------===//
806 // Interference eviction
807 //===----------------------------------------------------------------------===//
809 unsigned RAGreedy::canReassign(LiveInterval
&VirtReg
, unsigned PrevReg
) {
810 AllocationOrder
Order(VirtReg
.reg
, *VRM
, RegClassInfo
, Matrix
);
812 while ((PhysReg
= Order
.next())) {
813 if (PhysReg
== PrevReg
)
816 MCRegUnitIterator
Units(PhysReg
, TRI
);
817 for (; Units
.isValid(); ++Units
) {
818 // Instantiate a "subquery", not to be confused with the Queries array.
819 LiveIntervalUnion::Query
subQ(VirtReg
, Matrix
->getLiveUnions()[*Units
]);
820 if (subQ
.checkInterference())
823 // If no units have interference, break out with the current PhysReg.
824 if (!Units
.isValid())
828 LLVM_DEBUG(dbgs() << "can reassign: " << VirtReg
<< " from "
829 << printReg(PrevReg
, TRI
) << " to "
830 << printReg(PhysReg
, TRI
) << '\n');
834 /// shouldEvict - determine if A should evict the assigned live range B. The
835 /// eviction policy defined by this function together with the allocation order
836 /// defined by enqueue() decides which registers ultimately end up being split
839 /// Cascade numbers are used to prevent infinite loops if this function is a
842 /// @param A The live range to be assigned.
843 /// @param IsHint True when A is about to be assigned to its preferred
845 /// @param B The live range to be evicted.
846 /// @param BreaksHint True when B is already assigned to its preferred register.
847 bool RAGreedy::shouldEvict(LiveInterval
&A
, bool IsHint
,
848 LiveInterval
&B
, bool BreaksHint
) {
849 bool CanSplit
= getStage(B
) < RS_Spill
;
851 // Be fairly aggressive about following hints as long as the evictee can be
853 if (CanSplit
&& IsHint
&& !BreaksHint
)
856 if (A
.weight
> B
.weight
) {
857 LLVM_DEBUG(dbgs() << "should evict: " << B
<< " w= " << B
.weight
<< '\n');
863 /// canEvictInterference - Return true if all interferences between VirtReg and
864 /// PhysReg can be evicted.
866 /// @param VirtReg Live range that is about to be assigned.
867 /// @param PhysReg Desired register for assignment.
868 /// @param IsHint True when PhysReg is VirtReg's preferred register.
869 /// @param MaxCost Only look for cheaper candidates and update with new cost
870 /// when returning true.
871 /// @returns True when interference can be evicted cheaper than MaxCost.
872 bool RAGreedy::canEvictInterference(LiveInterval
&VirtReg
, unsigned PhysReg
,
873 bool IsHint
, EvictionCost
&MaxCost
,
874 const SmallVirtRegSet
&FixedRegisters
) {
875 // It is only possible to evict virtual register interference.
876 if (Matrix
->checkInterference(VirtReg
, PhysReg
) > LiveRegMatrix::IK_VirtReg
)
879 bool IsLocal
= LIS
->intervalIsInOneMBB(VirtReg
);
881 // Find VirtReg's cascade number. This will be unassigned if VirtReg was never
882 // involved in an eviction before. If a cascade number was assigned, deny
883 // evicting anything with the same or a newer cascade number. This prevents
884 // infinite eviction loops.
886 // This works out so a register without a cascade number is allowed to evict
887 // anything, and it can be evicted by anything.
888 unsigned Cascade
= ExtraRegInfo
[VirtReg
.reg
].Cascade
;
890 Cascade
= NextCascade
;
893 for (MCRegUnitIterator
Units(PhysReg
, TRI
); Units
.isValid(); ++Units
) {
894 LiveIntervalUnion::Query
&Q
= Matrix
->query(VirtReg
, *Units
);
895 // If there is 10 or more interferences, chances are one is heavier.
896 if (Q
.collectInterferingVRegs(10) >= 10)
899 // Check if any interfering live range is heavier than MaxWeight.
900 for (unsigned i
= Q
.interferingVRegs().size(); i
; --i
) {
901 LiveInterval
*Intf
= Q
.interferingVRegs()[i
- 1];
902 assert(Register::isVirtualRegister(Intf
->reg
) &&
903 "Only expecting virtual register interference from query");
905 // Do not allow eviction of a virtual register if we are in the middle
906 // of last-chance recoloring and this virtual register is one that we
907 // have scavenged a physical register for.
908 if (FixedRegisters
.count(Intf
->reg
))
911 // Never evict spill products. They cannot split or spill.
912 if (getStage(*Intf
) == RS_Done
)
914 // Once a live range becomes small enough, it is urgent that we find a
915 // register for it. This is indicated by an infinite spill weight. These
916 // urgent live ranges get to evict almost anything.
918 // Also allow urgent evictions of unspillable ranges from a strictly
919 // larger allocation order.
920 bool Urgent
= !VirtReg
.isSpillable() &&
921 (Intf
->isSpillable() ||
922 RegClassInfo
.getNumAllocatableRegs(MRI
->getRegClass(VirtReg
.reg
)) <
923 RegClassInfo
.getNumAllocatableRegs(MRI
->getRegClass(Intf
->reg
)));
924 // Only evict older cascades or live ranges without a cascade.
925 unsigned IntfCascade
= ExtraRegInfo
[Intf
->reg
].Cascade
;
926 if (Cascade
<= IntfCascade
) {
929 // We permit breaking cascades for urgent evictions. It should be the
930 // last resort, though, so make it really expensive.
931 Cost
.BrokenHints
+= 10;
933 // Would this break a satisfied hint?
934 bool BreaksHint
= VRM
->hasPreferredPhys(Intf
->reg
);
935 // Update eviction cost.
936 Cost
.BrokenHints
+= BreaksHint
;
937 Cost
.MaxWeight
= std::max(Cost
.MaxWeight
, Intf
->weight
);
938 // Abort if this would be too expensive.
939 if (!(Cost
< MaxCost
))
943 // Apply the eviction policy for non-urgent evictions.
944 if (!shouldEvict(VirtReg
, IsHint
, *Intf
, BreaksHint
))
946 // If !MaxCost.isMax(), then we're just looking for a cheap register.
947 // Evicting another local live range in this case could lead to suboptimal
949 if (!MaxCost
.isMax() && IsLocal
&& LIS
->intervalIsInOneMBB(*Intf
) &&
950 (!EnableLocalReassign
|| !canReassign(*Intf
, PhysReg
))) {
959 /// Return true if all interferences between VirtReg and PhysReg between
960 /// Start and End can be evicted.
962 /// \param VirtReg Live range that is about to be assigned.
963 /// \param PhysReg Desired register for assignment.
964 /// \param Start Start of range to look for interferences.
965 /// \param End End of range to look for interferences.
966 /// \param MaxCost Only look for cheaper candidates and update with new cost
967 /// when returning true.
968 /// \return True when interference can be evicted cheaper than MaxCost.
969 bool RAGreedy::canEvictInterferenceInRange(LiveInterval
&VirtReg
,
970 unsigned PhysReg
, SlotIndex Start
,
972 EvictionCost
&MaxCost
) {
975 for (MCRegUnitIterator
Units(PhysReg
, TRI
); Units
.isValid(); ++Units
) {
976 LiveIntervalUnion::Query
&Q
= Matrix
->query(VirtReg
, *Units
);
978 // Check if any interfering live range is heavier than MaxWeight.
979 for (unsigned i
= Q
.interferingVRegs().size(); i
; --i
) {
980 LiveInterval
*Intf
= Q
.interferingVRegs()[i
- 1];
982 // Check if interference overlast the segment in interest.
983 if (!Intf
->overlaps(Start
, End
))
986 // Cannot evict non virtual reg interference.
987 if (!Register::isVirtualRegister(Intf
->reg
))
989 // Never evict spill products. They cannot split or spill.
990 if (getStage(*Intf
) == RS_Done
)
993 // Would this break a satisfied hint?
994 bool BreaksHint
= VRM
->hasPreferredPhys(Intf
->reg
);
995 // Update eviction cost.
996 Cost
.BrokenHints
+= BreaksHint
;
997 Cost
.MaxWeight
= std::max(Cost
.MaxWeight
, Intf
->weight
);
998 // Abort if this would be too expensive.
999 if (!(Cost
< MaxCost
))
1004 if (Cost
.MaxWeight
== 0)
1011 /// Return the physical register that will be best
1012 /// candidate for eviction by a local split interval that will be created
1013 /// between Start and End.
1015 /// \param Order The allocation order
1016 /// \param VirtReg Live range that is about to be assigned.
1017 /// \param Start Start of range to look for interferences
1018 /// \param End End of range to look for interferences
1019 /// \param BestEvictweight The eviction cost of that eviction
1020 /// \return The PhysReg which is the best candidate for eviction and the
1021 /// eviction cost in BestEvictweight
1022 unsigned RAGreedy::getCheapestEvicteeWeight(const AllocationOrder
&Order
,
1023 LiveInterval
&VirtReg
,
1024 SlotIndex Start
, SlotIndex End
,
1025 float *BestEvictweight
) {
1026 EvictionCost BestEvictCost
;
1027 BestEvictCost
.setMax();
1028 BestEvictCost
.MaxWeight
= VirtReg
.weight
;
1029 unsigned BestEvicteePhys
= 0;
1031 // Go over all physical registers and find the best candidate for eviction
1032 for (auto PhysReg
: Order
.getOrder()) {
1034 if (!canEvictInterferenceInRange(VirtReg
, PhysReg
, Start
, End
,
1039 BestEvicteePhys
= PhysReg
;
1041 *BestEvictweight
= BestEvictCost
.MaxWeight
;
1042 return BestEvicteePhys
;
1045 /// evictInterference - Evict any interferring registers that prevent VirtReg
1046 /// from being assigned to Physreg. This assumes that canEvictInterference
1048 void RAGreedy::evictInterference(LiveInterval
&VirtReg
, unsigned PhysReg
,
1049 SmallVectorImpl
<unsigned> &NewVRegs
) {
1050 // Make sure that VirtReg has a cascade number, and assign that cascade
1051 // number to every evicted register. These live ranges than then only be
1052 // evicted by a newer cascade, preventing infinite loops.
1053 unsigned Cascade
= ExtraRegInfo
[VirtReg
.reg
].Cascade
;
1055 Cascade
= ExtraRegInfo
[VirtReg
.reg
].Cascade
= NextCascade
++;
1057 LLVM_DEBUG(dbgs() << "evicting " << printReg(PhysReg
, TRI
)
1058 << " interference: Cascade " << Cascade
<< '\n');
1060 // Collect all interfering virtregs first.
1061 SmallVector
<LiveInterval
*, 8> Intfs
;
1062 for (MCRegUnitIterator
Units(PhysReg
, TRI
); Units
.isValid(); ++Units
) {
1063 LiveIntervalUnion::Query
&Q
= Matrix
->query(VirtReg
, *Units
);
1064 // We usually have the interfering VRegs cached so collectInterferingVRegs()
1065 // should be fast, we may need to recalculate if when different physregs
1066 // overlap the same register unit so we had different SubRanges queried
1068 Q
.collectInterferingVRegs();
1069 ArrayRef
<LiveInterval
*> IVR
= Q
.interferingVRegs();
1070 Intfs
.append(IVR
.begin(), IVR
.end());
1073 // Evict them second. This will invalidate the queries.
1074 for (unsigned i
= 0, e
= Intfs
.size(); i
!= e
; ++i
) {
1075 LiveInterval
*Intf
= Intfs
[i
];
1076 // The same VirtReg may be present in multiple RegUnits. Skip duplicates.
1077 if (!VRM
->hasPhys(Intf
->reg
))
1080 LastEvicted
.addEviction(PhysReg
, VirtReg
.reg
, Intf
->reg
);
1082 Matrix
->unassign(*Intf
);
1083 assert((ExtraRegInfo
[Intf
->reg
].Cascade
< Cascade
||
1084 VirtReg
.isSpillable() < Intf
->isSpillable()) &&
1085 "Cannot decrease cascade number, illegal eviction");
1086 ExtraRegInfo
[Intf
->reg
].Cascade
= Cascade
;
1088 NewVRegs
.push_back(Intf
->reg
);
1092 /// Returns true if the given \p PhysReg is a callee saved register and has not
1093 /// been used for allocation yet.
1094 bool RAGreedy::isUnusedCalleeSavedReg(unsigned PhysReg
) const {
1095 unsigned CSR
= RegClassInfo
.getLastCalleeSavedAlias(PhysReg
);
1099 return !Matrix
->isPhysRegUsed(PhysReg
);
1102 /// tryEvict - Try to evict all interferences for a physreg.
1103 /// @param VirtReg Currently unassigned virtual register.
1104 /// @param Order Physregs to try.
1105 /// @return Physreg to assign VirtReg, or 0.
1106 unsigned RAGreedy::tryEvict(LiveInterval
&VirtReg
,
1107 AllocationOrder
&Order
,
1108 SmallVectorImpl
<unsigned> &NewVRegs
,
1109 unsigned CostPerUseLimit
,
1110 const SmallVirtRegSet
&FixedRegisters
) {
1111 NamedRegionTimer
T("evict", "Evict", TimerGroupName
, TimerGroupDescription
,
1112 TimePassesIsEnabled
);
1114 // Keep track of the cheapest interference seen so far.
1115 EvictionCost BestCost
;
1117 unsigned BestPhys
= 0;
1118 unsigned OrderLimit
= Order
.getOrder().size();
1120 // When we are just looking for a reduced cost per use, don't break any
1121 // hints, and only evict smaller spill weights.
1122 if (CostPerUseLimit
< ~0u) {
1123 BestCost
.BrokenHints
= 0;
1124 BestCost
.MaxWeight
= VirtReg
.weight
;
1126 // Check of any registers in RC are below CostPerUseLimit.
1127 const TargetRegisterClass
*RC
= MRI
->getRegClass(VirtReg
.reg
);
1128 unsigned MinCost
= RegClassInfo
.getMinCost(RC
);
1129 if (MinCost
>= CostPerUseLimit
) {
1130 LLVM_DEBUG(dbgs() << TRI
->getRegClassName(RC
) << " minimum cost = "
1131 << MinCost
<< ", no cheaper registers to be found.\n");
1135 // It is normal for register classes to have a long tail of registers with
1136 // the same cost. We don't need to look at them if they're too expensive.
1137 if (TRI
->getCostPerUse(Order
.getOrder().back()) >= CostPerUseLimit
) {
1138 OrderLimit
= RegClassInfo
.getLastCostChange(RC
);
1139 LLVM_DEBUG(dbgs() << "Only trying the first " << OrderLimit
1145 while (unsigned PhysReg
= Order
.next(OrderLimit
)) {
1146 if (TRI
->getCostPerUse(PhysReg
) >= CostPerUseLimit
)
1148 // The first use of a callee-saved register in a function has cost 1.
1149 // Don't start using a CSR when the CostPerUseLimit is low.
1150 if (CostPerUseLimit
== 1 && isUnusedCalleeSavedReg(PhysReg
)) {
1152 dbgs() << printReg(PhysReg
, TRI
) << " would clobber CSR "
1153 << printReg(RegClassInfo
.getLastCalleeSavedAlias(PhysReg
), TRI
)
1158 if (!canEvictInterference(VirtReg
, PhysReg
, false, BestCost
,
1165 // Stop if the hint can be used.
1173 evictInterference(VirtReg
, BestPhys
, NewVRegs
);
1177 //===----------------------------------------------------------------------===//
1179 //===----------------------------------------------------------------------===//
1181 /// addSplitConstraints - Fill out the SplitConstraints vector based on the
1182 /// interference pattern in Physreg and its aliases. Add the constraints to
1183 /// SpillPlacement and return the static cost of this split in Cost, assuming
1184 /// that all preferences in SplitConstraints are met.
1185 /// Return false if there are no bundles with positive bias.
1186 bool RAGreedy::addSplitConstraints(InterferenceCache::Cursor Intf
,
1187 BlockFrequency
&Cost
) {
1188 ArrayRef
<SplitAnalysis::BlockInfo
> UseBlocks
= SA
->getUseBlocks();
1190 // Reset interference dependent info.
1191 SplitConstraints
.resize(UseBlocks
.size());
1192 BlockFrequency StaticCost
= 0;
1193 for (unsigned i
= 0; i
!= UseBlocks
.size(); ++i
) {
1194 const SplitAnalysis::BlockInfo
&BI
= UseBlocks
[i
];
1195 SpillPlacement::BlockConstraint
&BC
= SplitConstraints
[i
];
1197 BC
.Number
= BI
.MBB
->getNumber();
1198 Intf
.moveToBlock(BC
.Number
);
1199 BC
.Entry
= BI
.LiveIn
? SpillPlacement::PrefReg
: SpillPlacement::DontCare
;
1200 BC
.Exit
= (BI
.LiveOut
&&
1201 !LIS
->getInstructionFromIndex(BI
.LastInstr
)->isImplicitDef())
1202 ? SpillPlacement::PrefReg
1203 : SpillPlacement::DontCare
;
1204 BC
.ChangesValue
= BI
.FirstDef
.isValid();
1206 if (!Intf
.hasInterference())
1209 // Number of spill code instructions to insert.
1212 // Interference for the live-in value.
1214 if (Intf
.first() <= Indexes
->getMBBStartIdx(BC
.Number
)) {
1215 BC
.Entry
= SpillPlacement::MustSpill
;
1217 } else if (Intf
.first() < BI
.FirstInstr
) {
1218 BC
.Entry
= SpillPlacement::PrefSpill
;
1220 } else if (Intf
.first() < BI
.LastInstr
) {
1224 // Abort if the spill cannot be inserted at the MBB' start
1225 if (((BC
.Entry
== SpillPlacement::MustSpill
) ||
1226 (BC
.Entry
== SpillPlacement::PrefSpill
)) &&
1227 SlotIndex::isEarlierInstr(BI
.FirstInstr
,
1228 SA
->getFirstSplitPoint(BC
.Number
)))
1232 // Interference for the live-out value.
1234 if (Intf
.last() >= SA
->getLastSplitPoint(BC
.Number
)) {
1235 BC
.Exit
= SpillPlacement::MustSpill
;
1237 } else if (Intf
.last() > BI
.LastInstr
) {
1238 BC
.Exit
= SpillPlacement::PrefSpill
;
1240 } else if (Intf
.last() > BI
.FirstInstr
) {
1245 // Accumulate the total frequency of inserted spill code.
1247 StaticCost
+= SpillPlacer
->getBlockFrequency(BC
.Number
);
1251 // Add constraints for use-blocks. Note that these are the only constraints
1252 // that may add a positive bias, it is downhill from here.
1253 SpillPlacer
->addConstraints(SplitConstraints
);
1254 return SpillPlacer
->scanActiveBundles();
1257 /// addThroughConstraints - Add constraints and links to SpillPlacer from the
1258 /// live-through blocks in Blocks.
1259 bool RAGreedy::addThroughConstraints(InterferenceCache::Cursor Intf
,
1260 ArrayRef
<unsigned> Blocks
) {
1261 const unsigned GroupSize
= 8;
1262 SpillPlacement::BlockConstraint BCS
[GroupSize
];
1263 unsigned TBS
[GroupSize
];
1264 unsigned B
= 0, T
= 0;
1266 for (unsigned i
= 0; i
!= Blocks
.size(); ++i
) {
1267 unsigned Number
= Blocks
[i
];
1268 Intf
.moveToBlock(Number
);
1270 if (!Intf
.hasInterference()) {
1271 assert(T
< GroupSize
&& "Array overflow");
1273 if (++T
== GroupSize
) {
1274 SpillPlacer
->addLinks(makeArrayRef(TBS
, T
));
1280 assert(B
< GroupSize
&& "Array overflow");
1281 BCS
[B
].Number
= Number
;
1283 // Abort if the spill cannot be inserted at the MBB' start
1284 MachineBasicBlock
*MBB
= MF
->getBlockNumbered(Number
);
1285 if (!MBB
->empty() &&
1286 SlotIndex::isEarlierInstr(LIS
->getInstructionIndex(MBB
->instr_front()),
1287 SA
->getFirstSplitPoint(Number
)))
1289 // Interference for the live-in value.
1290 if (Intf
.first() <= Indexes
->getMBBStartIdx(Number
))
1291 BCS
[B
].Entry
= SpillPlacement::MustSpill
;
1293 BCS
[B
].Entry
= SpillPlacement::PrefSpill
;
1295 // Interference for the live-out value.
1296 if (Intf
.last() >= SA
->getLastSplitPoint(Number
))
1297 BCS
[B
].Exit
= SpillPlacement::MustSpill
;
1299 BCS
[B
].Exit
= SpillPlacement::PrefSpill
;
1301 if (++B
== GroupSize
) {
1302 SpillPlacer
->addConstraints(makeArrayRef(BCS
, B
));
1307 SpillPlacer
->addConstraints(makeArrayRef(BCS
, B
));
1308 SpillPlacer
->addLinks(makeArrayRef(TBS
, T
));
1312 bool RAGreedy::growRegion(GlobalSplitCandidate
&Cand
) {
1313 // Keep track of through blocks that have not been added to SpillPlacer.
1314 BitVector Todo
= SA
->getThroughBlocks();
1315 SmallVectorImpl
<unsigned> &ActiveBlocks
= Cand
.ActiveBlocks
;
1316 unsigned AddedTo
= 0;
1318 unsigned Visited
= 0;
1322 ArrayRef
<unsigned> NewBundles
= SpillPlacer
->getRecentPositive();
1323 // Find new through blocks in the periphery of PrefRegBundles.
1324 for (int i
= 0, e
= NewBundles
.size(); i
!= e
; ++i
) {
1325 unsigned Bundle
= NewBundles
[i
];
1326 // Look at all blocks connected to Bundle in the full graph.
1327 ArrayRef
<unsigned> Blocks
= Bundles
->getBlocks(Bundle
);
1328 for (ArrayRef
<unsigned>::iterator I
= Blocks
.begin(), E
= Blocks
.end();
1330 unsigned Block
= *I
;
1331 if (!Todo
.test(Block
))
1334 // This is a new through block. Add it to SpillPlacer later.
1335 ActiveBlocks
.push_back(Block
);
1341 // Any new blocks to add?
1342 if (ActiveBlocks
.size() == AddedTo
)
1345 // Compute through constraints from the interference, or assume that all
1346 // through blocks prefer spilling when forming compact regions.
1347 auto NewBlocks
= makeArrayRef(ActiveBlocks
).slice(AddedTo
);
1349 if (!addThroughConstraints(Cand
.Intf
, NewBlocks
))
1352 // Provide a strong negative bias on through blocks to prevent unwanted
1353 // liveness on loop backedges.
1354 SpillPlacer
->addPrefSpill(NewBlocks
, /* Strong= */ true);
1355 AddedTo
= ActiveBlocks
.size();
1357 // Perhaps iterating can enable more bundles?
1358 SpillPlacer
->iterate();
1360 LLVM_DEBUG(dbgs() << ", v=" << Visited
);
1364 /// calcCompactRegion - Compute the set of edge bundles that should be live
1365 /// when splitting the current live range into compact regions. Compact
1366 /// regions can be computed without looking at interference. They are the
1367 /// regions formed by removing all the live-through blocks from the live range.
1369 /// Returns false if the current live range is already compact, or if the
1370 /// compact regions would form single block regions anyway.
1371 bool RAGreedy::calcCompactRegion(GlobalSplitCandidate
&Cand
) {
1372 // Without any through blocks, the live range is already compact.
1373 if (!SA
->getNumThroughBlocks())
1376 // Compact regions don't correspond to any physreg.
1377 Cand
.reset(IntfCache
, 0);
1379 LLVM_DEBUG(dbgs() << "Compact region bundles");
1381 // Use the spill placer to determine the live bundles. GrowRegion pretends
1382 // that all the through blocks have interference when PhysReg is unset.
1383 SpillPlacer
->prepare(Cand
.LiveBundles
);
1385 // The static split cost will be zero since Cand.Intf reports no interference.
1386 BlockFrequency Cost
;
1387 if (!addSplitConstraints(Cand
.Intf
, Cost
)) {
1388 LLVM_DEBUG(dbgs() << ", none.\n");
1392 if (!growRegion(Cand
)) {
1393 LLVM_DEBUG(dbgs() << ", cannot spill all interferences.\n");
1397 SpillPlacer
->finish();
1399 if (!Cand
.LiveBundles
.any()) {
1400 LLVM_DEBUG(dbgs() << ", none.\n");
1405 for (int i
: Cand
.LiveBundles
.set_bits())
1406 dbgs() << " EB#" << i
;
1412 /// calcSpillCost - Compute how expensive it would be to split the live range in
1413 /// SA around all use blocks instead of forming bundle regions.
1414 BlockFrequency
RAGreedy::calcSpillCost() {
1415 BlockFrequency Cost
= 0;
1416 ArrayRef
<SplitAnalysis::BlockInfo
> UseBlocks
= SA
->getUseBlocks();
1417 for (unsigned i
= 0; i
!= UseBlocks
.size(); ++i
) {
1418 const SplitAnalysis::BlockInfo
&BI
= UseBlocks
[i
];
1419 unsigned Number
= BI
.MBB
->getNumber();
1420 // We normally only need one spill instruction - a load or a store.
1421 Cost
+= SpillPlacer
->getBlockFrequency(Number
);
1423 // Unless the value is redefined in the block.
1424 if (BI
.LiveIn
&& BI
.LiveOut
&& BI
.FirstDef
)
1425 Cost
+= SpillPlacer
->getBlockFrequency(Number
);
1430 /// Check if splitting Evictee will create a local split interval in
1431 /// basic block number BBNumber that may cause a bad eviction chain. This is
1432 /// intended to prevent bad eviction sequences like:
1433 /// movl %ebp, 8(%esp) # 4-byte Spill
1444 /// movl 16(%esp), %ebp # 4 - byte Reload
1446 /// Such sequences are created in 2 scenarios:
1449 /// %0 is evicted from physreg0 by %1.
1450 /// Evictee %0 is intended for region splitting with split candidate
1451 /// physreg0 (the reg %0 was evicted from).
1452 /// Region splitting creates a local interval because of interference with the
1453 /// evictor %1 (normally region splitting creates 2 interval, the "by reg"
1454 /// and "by stack" intervals and local interval created when interference
1456 /// One of the split intervals ends up evicting %2 from physreg1.
1457 /// Evictee %2 is intended for region splitting with split candidate
1459 /// One of the split intervals ends up evicting %3 from physreg2, etc.
1462 /// %0 is evicted from physreg0 by %1.
1463 /// %2 is evicted from physreg2 by %3 etc.
1464 /// Evictee %0 is intended for region splitting with split candidate
1466 /// Region splitting creates a local interval because of interference with the
1468 /// One of the split intervals ends up evicting back original evictor %1
1469 /// from physreg0 (the reg %0 was evicted from).
1470 /// Another evictee %2 is intended for region splitting with split candidate
1472 /// One of the split intervals ends up evicting %3 from physreg2, etc.
1474 /// \param Evictee The register considered to be split.
1475 /// \param Cand The split candidate that determines the physical register
1476 /// we are splitting for and the interferences.
1477 /// \param BBNumber The number of a BB for which the region split process will
1478 /// create a local split interval.
1479 /// \param Order The physical registers that may get evicted by a split
1480 /// artifact of Evictee.
1481 /// \return True if splitting Evictee may cause a bad eviction chain, false
1483 bool RAGreedy::splitCanCauseEvictionChain(unsigned Evictee
,
1484 GlobalSplitCandidate
&Cand
,
1486 const AllocationOrder
&Order
) {
1487 EvictionTrack::EvictorInfo VregEvictorInfo
= LastEvicted
.getEvictor(Evictee
);
1488 unsigned Evictor
= VregEvictorInfo
.first
;
1489 unsigned PhysReg
= VregEvictorInfo
.second
;
1491 // No actual evictor.
1492 if (!Evictor
|| !PhysReg
)
1495 float MaxWeight
= 0;
1496 unsigned FutureEvictedPhysReg
=
1497 getCheapestEvicteeWeight(Order
, LIS
->getInterval(Evictee
),
1498 Cand
.Intf
.first(), Cand
.Intf
.last(), &MaxWeight
);
1500 // The bad eviction chain occurs when either the split candidate is the
1501 // evicting reg or one of the split artifact will evict the evicting reg.
1502 if ((PhysReg
!= Cand
.PhysReg
) && (PhysReg
!= FutureEvictedPhysReg
))
1505 Cand
.Intf
.moveToBlock(BBNumber
);
1507 // Check to see if the Evictor contains interference (with Evictee) in the
1508 // given BB. If so, this interference caused the eviction of Evictee from
1509 // PhysReg. This suggest that we will create a local interval during the
1510 // region split to avoid this interference This local interval may cause a bad
1512 if (!LIS
->hasInterval(Evictor
))
1514 LiveInterval
&EvictorLI
= LIS
->getInterval(Evictor
);
1515 if (EvictorLI
.FindSegmentContaining(Cand
.Intf
.first()) == EvictorLI
.end())
1518 // Now, check to see if the local interval we will create is going to be
1519 // expensive enough to evict somebody If so, this may cause a bad eviction
1521 VirtRegAuxInfo
VRAI(*MF
, *LIS
, VRM
, getAnalysis
<MachineLoopInfo
>(), *MBFI
);
1522 float splitArtifactWeight
=
1523 VRAI
.futureWeight(LIS
->getInterval(Evictee
),
1524 Cand
.Intf
.first().getPrevIndex(), Cand
.Intf
.last());
1525 if (splitArtifactWeight
>= 0 && splitArtifactWeight
< MaxWeight
)
1531 /// Check if splitting VirtRegToSplit will create a local split interval
1532 /// in basic block number BBNumber that may cause a spill.
1534 /// \param VirtRegToSplit The register considered to be split.
1535 /// \param Cand The split candidate that determines the physical
1536 /// register we are splitting for and the interferences.
1537 /// \param BBNumber The number of a BB for which the region split process
1538 /// will create a local split interval.
1539 /// \param Order The physical registers that may get evicted by a
1540 /// split artifact of VirtRegToSplit.
1541 /// \return True if splitting VirtRegToSplit may cause a spill, false
1543 bool RAGreedy::splitCanCauseLocalSpill(unsigned VirtRegToSplit
,
1544 GlobalSplitCandidate
&Cand
,
1546 const AllocationOrder
&Order
) {
1547 Cand
.Intf
.moveToBlock(BBNumber
);
1549 // Check if the local interval will find a non interfereing assignment.
1550 for (auto PhysReg
: Order
.getOrder()) {
1551 if (!Matrix
->checkInterference(Cand
.Intf
.first().getPrevIndex(),
1552 Cand
.Intf
.last(), PhysReg
))
1556 // Check if the local interval will evict a cheaper interval.
1557 float CheapestEvictWeight
= 0;
1558 unsigned FutureEvictedPhysReg
= getCheapestEvicteeWeight(
1559 Order
, LIS
->getInterval(VirtRegToSplit
), Cand
.Intf
.first(),
1560 Cand
.Intf
.last(), &CheapestEvictWeight
);
1562 // Have we found an interval that can be evicted?
1563 if (FutureEvictedPhysReg
) {
1564 VirtRegAuxInfo
VRAI(*MF
, *LIS
, VRM
, getAnalysis
<MachineLoopInfo
>(), *MBFI
);
1565 float splitArtifactWeight
=
1566 VRAI
.futureWeight(LIS
->getInterval(VirtRegToSplit
),
1567 Cand
.Intf
.first().getPrevIndex(), Cand
.Intf
.last());
1568 // Will the weight of the local interval be higher than the cheapest evictee
1569 // weight? If so it will evict it and will not cause a spill.
1570 if (splitArtifactWeight
>= 0 && splitArtifactWeight
> CheapestEvictWeight
)
1574 // The local interval is not able to find non interferencing assignment and
1575 // not able to evict a less worthy interval, therfore, it can cause a spill.
1579 /// calcGlobalSplitCost - Return the global split cost of following the split
1580 /// pattern in LiveBundles. This cost should be added to the local cost of the
1581 /// interference pattern in SplitConstraints.
1583 BlockFrequency
RAGreedy::calcGlobalSplitCost(GlobalSplitCandidate
&Cand
,
1584 const AllocationOrder
&Order
,
1585 bool *CanCauseEvictionChain
) {
1586 BlockFrequency GlobalCost
= 0;
1587 const BitVector
&LiveBundles
= Cand
.LiveBundles
;
1588 unsigned VirtRegToSplit
= SA
->getParent().reg
;
1589 ArrayRef
<SplitAnalysis::BlockInfo
> UseBlocks
= SA
->getUseBlocks();
1590 for (unsigned i
= 0; i
!= UseBlocks
.size(); ++i
) {
1591 const SplitAnalysis::BlockInfo
&BI
= UseBlocks
[i
];
1592 SpillPlacement::BlockConstraint
&BC
= SplitConstraints
[i
];
1593 bool RegIn
= LiveBundles
[Bundles
->getBundle(BC
.Number
, false)];
1594 bool RegOut
= LiveBundles
[Bundles
->getBundle(BC
.Number
, true)];
1597 Cand
.Intf
.moveToBlock(BC
.Number
);
1598 // Check wheather a local interval is going to be created during the region
1599 // split. Calculate adavanced spilt cost (cost of local intervals) if option
1601 if (EnableAdvancedRASplitCost
&& Cand
.Intf
.hasInterference() && BI
.LiveIn
&&
1602 BI
.LiveOut
&& RegIn
&& RegOut
) {
1604 if (CanCauseEvictionChain
&&
1605 splitCanCauseEvictionChain(VirtRegToSplit
, Cand
, BC
.Number
, Order
)) {
1606 // This interference causes our eviction from this assignment, we might
1607 // evict somebody else and eventually someone will spill, add that cost.
1608 // See splitCanCauseEvictionChain for detailed description of scenarios.
1609 GlobalCost
+= SpillPlacer
->getBlockFrequency(BC
.Number
);
1610 GlobalCost
+= SpillPlacer
->getBlockFrequency(BC
.Number
);
1612 *CanCauseEvictionChain
= true;
1614 } else if (splitCanCauseLocalSpill(VirtRegToSplit
, Cand
, BC
.Number
,
1616 // This interference causes local interval to spill, add that cost.
1617 GlobalCost
+= SpillPlacer
->getBlockFrequency(BC
.Number
);
1618 GlobalCost
+= SpillPlacer
->getBlockFrequency(BC
.Number
);
1623 Ins
+= RegIn
!= (BC
.Entry
== SpillPlacement::PrefReg
);
1625 Ins
+= RegOut
!= (BC
.Exit
== SpillPlacement::PrefReg
);
1627 GlobalCost
+= SpillPlacer
->getBlockFrequency(BC
.Number
);
1630 for (unsigned i
= 0, e
= Cand
.ActiveBlocks
.size(); i
!= e
; ++i
) {
1631 unsigned Number
= Cand
.ActiveBlocks
[i
];
1632 bool RegIn
= LiveBundles
[Bundles
->getBundle(Number
, false)];
1633 bool RegOut
= LiveBundles
[Bundles
->getBundle(Number
, true)];
1634 if (!RegIn
&& !RegOut
)
1636 if (RegIn
&& RegOut
) {
1637 // We need double spill code if this block has interference.
1638 Cand
.Intf
.moveToBlock(Number
);
1639 if (Cand
.Intf
.hasInterference()) {
1640 GlobalCost
+= SpillPlacer
->getBlockFrequency(Number
);
1641 GlobalCost
+= SpillPlacer
->getBlockFrequency(Number
);
1643 // Check wheather a local interval is going to be created during the
1645 if (EnableAdvancedRASplitCost
&& CanCauseEvictionChain
&&
1646 splitCanCauseEvictionChain(VirtRegToSplit
, Cand
, Number
, Order
)) {
1647 // This interference cause our eviction from this assignment, we might
1648 // evict somebody else, add that cost.
1649 // See splitCanCauseEvictionChain for detailed description of
1651 GlobalCost
+= SpillPlacer
->getBlockFrequency(Number
);
1652 GlobalCost
+= SpillPlacer
->getBlockFrequency(Number
);
1654 *CanCauseEvictionChain
= true;
1659 // live-in / stack-out or stack-in live-out.
1660 GlobalCost
+= SpillPlacer
->getBlockFrequency(Number
);
1665 /// splitAroundRegion - Split the current live range around the regions
1666 /// determined by BundleCand and GlobalCand.
1668 /// Before calling this function, GlobalCand and BundleCand must be initialized
1669 /// so each bundle is assigned to a valid candidate, or NoCand for the
1670 /// stack-bound bundles. The shared SA/SE SplitAnalysis and SplitEditor
1671 /// objects must be initialized for the current live range, and intervals
1672 /// created for the used candidates.
1674 /// @param LREdit The LiveRangeEdit object handling the current split.
1675 /// @param UsedCands List of used GlobalCand entries. Every BundleCand value
1676 /// must appear in this list.
1677 void RAGreedy::splitAroundRegion(LiveRangeEdit
&LREdit
,
1678 ArrayRef
<unsigned> UsedCands
) {
1679 // These are the intervals created for new global ranges. We may create more
1680 // intervals for local ranges.
1681 const unsigned NumGlobalIntvs
= LREdit
.size();
1682 LLVM_DEBUG(dbgs() << "splitAroundRegion with " << NumGlobalIntvs
1684 assert(NumGlobalIntvs
&& "No global intervals configured");
1686 // Isolate even single instructions when dealing with a proper sub-class.
1687 // That guarantees register class inflation for the stack interval because it
1689 unsigned Reg
= SA
->getParent().reg
;
1690 bool SingleInstrs
= RegClassInfo
.isProperSubClass(MRI
->getRegClass(Reg
));
1692 // First handle all the blocks with uses.
1693 ArrayRef
<SplitAnalysis::BlockInfo
> UseBlocks
= SA
->getUseBlocks();
1694 for (unsigned i
= 0; i
!= UseBlocks
.size(); ++i
) {
1695 const SplitAnalysis::BlockInfo
&BI
= UseBlocks
[i
];
1696 unsigned Number
= BI
.MBB
->getNumber();
1697 unsigned IntvIn
= 0, IntvOut
= 0;
1698 SlotIndex IntfIn
, IntfOut
;
1700 unsigned CandIn
= BundleCand
[Bundles
->getBundle(Number
, false)];
1701 if (CandIn
!= NoCand
) {
1702 GlobalSplitCandidate
&Cand
= GlobalCand
[CandIn
];
1703 IntvIn
= Cand
.IntvIdx
;
1704 Cand
.Intf
.moveToBlock(Number
);
1705 IntfIn
= Cand
.Intf
.first();
1709 unsigned CandOut
= BundleCand
[Bundles
->getBundle(Number
, true)];
1710 if (CandOut
!= NoCand
) {
1711 GlobalSplitCandidate
&Cand
= GlobalCand
[CandOut
];
1712 IntvOut
= Cand
.IntvIdx
;
1713 Cand
.Intf
.moveToBlock(Number
);
1714 IntfOut
= Cand
.Intf
.last();
1718 // Create separate intervals for isolated blocks with multiple uses.
1719 if (!IntvIn
&& !IntvOut
) {
1720 LLVM_DEBUG(dbgs() << printMBBReference(*BI
.MBB
) << " isolated.\n");
1721 if (SA
->shouldSplitSingleBlock(BI
, SingleInstrs
))
1722 SE
->splitSingleBlock(BI
);
1726 if (IntvIn
&& IntvOut
)
1727 SE
->splitLiveThroughBlock(Number
, IntvIn
, IntfIn
, IntvOut
, IntfOut
);
1729 SE
->splitRegInBlock(BI
, IntvIn
, IntfIn
);
1731 SE
->splitRegOutBlock(BI
, IntvOut
, IntfOut
);
1734 // Handle live-through blocks. The relevant live-through blocks are stored in
1735 // the ActiveBlocks list with each candidate. We need to filter out
1737 BitVector Todo
= SA
->getThroughBlocks();
1738 for (unsigned c
= 0; c
!= UsedCands
.size(); ++c
) {
1739 ArrayRef
<unsigned> Blocks
= GlobalCand
[UsedCands
[c
]].ActiveBlocks
;
1740 for (unsigned i
= 0, e
= Blocks
.size(); i
!= e
; ++i
) {
1741 unsigned Number
= Blocks
[i
];
1742 if (!Todo
.test(Number
))
1746 unsigned IntvIn
= 0, IntvOut
= 0;
1747 SlotIndex IntfIn
, IntfOut
;
1749 unsigned CandIn
= BundleCand
[Bundles
->getBundle(Number
, false)];
1750 if (CandIn
!= NoCand
) {
1751 GlobalSplitCandidate
&Cand
= GlobalCand
[CandIn
];
1752 IntvIn
= Cand
.IntvIdx
;
1753 Cand
.Intf
.moveToBlock(Number
);
1754 IntfIn
= Cand
.Intf
.first();
1757 unsigned CandOut
= BundleCand
[Bundles
->getBundle(Number
, true)];
1758 if (CandOut
!= NoCand
) {
1759 GlobalSplitCandidate
&Cand
= GlobalCand
[CandOut
];
1760 IntvOut
= Cand
.IntvIdx
;
1761 Cand
.Intf
.moveToBlock(Number
);
1762 IntfOut
= Cand
.Intf
.last();
1764 if (!IntvIn
&& !IntvOut
)
1766 SE
->splitLiveThroughBlock(Number
, IntvIn
, IntfIn
, IntvOut
, IntfOut
);
1772 SmallVector
<unsigned, 8> IntvMap
;
1773 SE
->finish(&IntvMap
);
1774 DebugVars
->splitRegister(Reg
, LREdit
.regs(), *LIS
);
1776 ExtraRegInfo
.resize(MRI
->getNumVirtRegs());
1777 unsigned OrigBlocks
= SA
->getNumLiveBlocks();
1779 // Sort out the new intervals created by splitting. We get four kinds:
1780 // - Remainder intervals should not be split again.
1781 // - Candidate intervals can be assigned to Cand.PhysReg.
1782 // - Block-local splits are candidates for local splitting.
1783 // - DCE leftovers should go back on the queue.
1784 for (unsigned i
= 0, e
= LREdit
.size(); i
!= e
; ++i
) {
1785 LiveInterval
&Reg
= LIS
->getInterval(LREdit
.get(i
));
1787 // Ignore old intervals from DCE.
1788 if (getStage(Reg
) != RS_New
)
1791 // Remainder interval. Don't try splitting again, spill if it doesn't
1793 if (IntvMap
[i
] == 0) {
1794 setStage(Reg
, RS_Spill
);
1798 // Global intervals. Allow repeated splitting as long as the number of live
1799 // blocks is strictly decreasing.
1800 if (IntvMap
[i
] < NumGlobalIntvs
) {
1801 if (SA
->countLiveBlocks(&Reg
) >= OrigBlocks
) {
1802 LLVM_DEBUG(dbgs() << "Main interval covers the same " << OrigBlocks
1803 << " blocks as original.\n");
1804 // Don't allow repeated splitting as a safe guard against looping.
1805 setStage(Reg
, RS_Split2
);
1810 // Other intervals are treated as new. This includes local intervals created
1811 // for blocks with multiple uses, and anything created by DCE.
1815 MF
->verify(this, "After splitting live range around region");
1818 // Global split has high compile time cost especially for large live range.
1819 // Return false for the case here where the potential benefit will never
1821 unsigned RAGreedy::isSplitBenefitWorthCost(LiveInterval
&VirtReg
) {
1822 MachineInstr
*MI
= MRI
->getUniqueVRegDef(VirtReg
.reg
);
1823 if (MI
&& TII
->isTriviallyReMaterializable(*MI
, AA
) &&
1824 VirtReg
.size() > HugeSizeForSplit
)
1829 unsigned RAGreedy::tryRegionSplit(LiveInterval
&VirtReg
, AllocationOrder
&Order
,
1830 SmallVectorImpl
<unsigned> &NewVRegs
) {
1831 if (!isSplitBenefitWorthCost(VirtReg
))
1833 unsigned NumCands
= 0;
1834 BlockFrequency SpillCost
= calcSpillCost();
1835 BlockFrequency BestCost
;
1837 // Check if we can split this live range around a compact region.
1838 bool HasCompact
= calcCompactRegion(GlobalCand
.front());
1840 // Yes, keep GlobalCand[0] as the compact region candidate.
1842 BestCost
= BlockFrequency::getMaxFrequency();
1844 // No benefit from the compact region, our fallback will be per-block
1845 // splitting. Make sure we find a solution that is cheaper than spilling.
1846 BestCost
= SpillCost
;
1847 LLVM_DEBUG(dbgs() << "Cost of isolating all blocks = ";
1848 MBFI
->printBlockFreq(dbgs(), BestCost
) << '\n');
1851 bool CanCauseEvictionChain
= false;
1853 calculateRegionSplitCost(VirtReg
, Order
, BestCost
, NumCands
,
1854 false /*IgnoreCSR*/, &CanCauseEvictionChain
);
1856 // Split candidates with compact regions can cause a bad eviction sequence.
1857 // See splitCanCauseEvictionChain for detailed description of scenarios.
1858 // To avoid it, we need to comapre the cost with the spill cost and not the
1859 // current max frequency.
1860 if (HasCompact
&& (BestCost
> SpillCost
) && (BestCand
!= NoCand
) &&
1861 CanCauseEvictionChain
) {
1865 // No solutions found, fall back to single block splitting.
1866 if (!HasCompact
&& BestCand
== NoCand
)
1869 return doRegionSplit(VirtReg
, BestCand
, HasCompact
, NewVRegs
);
1872 unsigned RAGreedy::calculateRegionSplitCost(LiveInterval
&VirtReg
,
1873 AllocationOrder
&Order
,
1874 BlockFrequency
&BestCost
,
1875 unsigned &NumCands
, bool IgnoreCSR
,
1876 bool *CanCauseEvictionChain
) {
1877 unsigned BestCand
= NoCand
;
1879 while (unsigned PhysReg
= Order
.next()) {
1880 if (IgnoreCSR
&& isUnusedCalleeSavedReg(PhysReg
))
1883 // Discard bad candidates before we run out of interference cache cursors.
1884 // This will only affect register classes with a lot of registers (>32).
1885 if (NumCands
== IntfCache
.getMaxCursors()) {
1886 unsigned WorstCount
= ~0u;
1888 for (unsigned i
= 0; i
!= NumCands
; ++i
) {
1889 if (i
== BestCand
|| !GlobalCand
[i
].PhysReg
)
1891 unsigned Count
= GlobalCand
[i
].LiveBundles
.count();
1892 if (Count
< WorstCount
) {
1898 GlobalCand
[Worst
] = GlobalCand
[NumCands
];
1899 if (BestCand
== NumCands
)
1903 if (GlobalCand
.size() <= NumCands
)
1904 GlobalCand
.resize(NumCands
+1);
1905 GlobalSplitCandidate
&Cand
= GlobalCand
[NumCands
];
1906 Cand
.reset(IntfCache
, PhysReg
);
1908 SpillPlacer
->prepare(Cand
.LiveBundles
);
1909 BlockFrequency Cost
;
1910 if (!addSplitConstraints(Cand
.Intf
, Cost
)) {
1911 LLVM_DEBUG(dbgs() << printReg(PhysReg
, TRI
) << "\tno positive bundles\n");
1914 LLVM_DEBUG(dbgs() << printReg(PhysReg
, TRI
) << "\tstatic = ";
1915 MBFI
->printBlockFreq(dbgs(), Cost
));
1916 if (Cost
>= BestCost
) {
1918 if (BestCand
== NoCand
)
1919 dbgs() << " worse than no bundles\n";
1921 dbgs() << " worse than "
1922 << printReg(GlobalCand
[BestCand
].PhysReg
, TRI
) << '\n';
1926 if (!growRegion(Cand
)) {
1927 LLVM_DEBUG(dbgs() << ", cannot spill all interferences.\n");
1931 SpillPlacer
->finish();
1933 // No live bundles, defer to splitSingleBlocks().
1934 if (!Cand
.LiveBundles
.any()) {
1935 LLVM_DEBUG(dbgs() << " no bundles.\n");
1939 bool HasEvictionChain
= false;
1940 Cost
+= calcGlobalSplitCost(Cand
, Order
, &HasEvictionChain
);
1942 dbgs() << ", total = ";
1943 MBFI
->printBlockFreq(dbgs(), Cost
) << " with bundles";
1944 for (int i
: Cand
.LiveBundles
.set_bits())
1945 dbgs() << " EB#" << i
;
1948 if (Cost
< BestCost
) {
1949 BestCand
= NumCands
;
1951 // See splitCanCauseEvictionChain for detailed description of bad
1952 // eviction chain scenarios.
1953 if (CanCauseEvictionChain
)
1954 *CanCauseEvictionChain
= HasEvictionChain
;
1959 if (CanCauseEvictionChain
&& BestCand
!= NoCand
) {
1960 // See splitCanCauseEvictionChain for detailed description of bad
1961 // eviction chain scenarios.
1962 LLVM_DEBUG(dbgs() << "Best split candidate of vreg "
1963 << printReg(VirtReg
.reg
, TRI
) << " may ");
1964 if (!(*CanCauseEvictionChain
))
1965 LLVM_DEBUG(dbgs() << "not ");
1966 LLVM_DEBUG(dbgs() << "cause bad eviction chain\n");
1972 unsigned RAGreedy::doRegionSplit(LiveInterval
&VirtReg
, unsigned BestCand
,
1974 SmallVectorImpl
<unsigned> &NewVRegs
) {
1975 SmallVector
<unsigned, 8> UsedCands
;
1976 // Prepare split editor.
1977 LiveRangeEdit
LREdit(&VirtReg
, NewVRegs
, *MF
, *LIS
, VRM
, this, &DeadRemats
);
1978 SE
->reset(LREdit
, SplitSpillMode
);
1980 // Assign all edge bundles to the preferred candidate, or NoCand.
1981 BundleCand
.assign(Bundles
->getNumBundles(), NoCand
);
1983 // Assign bundles for the best candidate region.
1984 if (BestCand
!= NoCand
) {
1985 GlobalSplitCandidate
&Cand
= GlobalCand
[BestCand
];
1986 if (unsigned B
= Cand
.getBundles(BundleCand
, BestCand
)) {
1987 UsedCands
.push_back(BestCand
);
1988 Cand
.IntvIdx
= SE
->openIntv();
1989 LLVM_DEBUG(dbgs() << "Split for " << printReg(Cand
.PhysReg
, TRI
) << " in "
1990 << B
<< " bundles, intv " << Cand
.IntvIdx
<< ".\n");
1995 // Assign bundles for the compact region.
1997 GlobalSplitCandidate
&Cand
= GlobalCand
.front();
1998 assert(!Cand
.PhysReg
&& "Compact region has no physreg");
1999 if (unsigned B
= Cand
.getBundles(BundleCand
, 0)) {
2000 UsedCands
.push_back(0);
2001 Cand
.IntvIdx
= SE
->openIntv();
2002 LLVM_DEBUG(dbgs() << "Split for compact region in " << B
2003 << " bundles, intv " << Cand
.IntvIdx
<< ".\n");
2008 splitAroundRegion(LREdit
, UsedCands
);
2012 //===----------------------------------------------------------------------===//
2013 // Per-Block Splitting
2014 //===----------------------------------------------------------------------===//
2016 /// tryBlockSplit - Split a global live range around every block with uses. This
2017 /// creates a lot of local live ranges, that will be split by tryLocalSplit if
2018 /// they don't allocate.
2019 unsigned RAGreedy::tryBlockSplit(LiveInterval
&VirtReg
, AllocationOrder
&Order
,
2020 SmallVectorImpl
<unsigned> &NewVRegs
) {
2021 assert(&SA
->getParent() == &VirtReg
&& "Live range wasn't analyzed");
2022 unsigned Reg
= VirtReg
.reg
;
2023 bool SingleInstrs
= RegClassInfo
.isProperSubClass(MRI
->getRegClass(Reg
));
2024 LiveRangeEdit
LREdit(&VirtReg
, NewVRegs
, *MF
, *LIS
, VRM
, this, &DeadRemats
);
2025 SE
->reset(LREdit
, SplitSpillMode
);
2026 ArrayRef
<SplitAnalysis::BlockInfo
> UseBlocks
= SA
->getUseBlocks();
2027 for (unsigned i
= 0; i
!= UseBlocks
.size(); ++i
) {
2028 const SplitAnalysis::BlockInfo
&BI
= UseBlocks
[i
];
2029 if (SA
->shouldSplitSingleBlock(BI
, SingleInstrs
))
2030 SE
->splitSingleBlock(BI
);
2032 // No blocks were split.
2036 // We did split for some blocks.
2037 SmallVector
<unsigned, 8> IntvMap
;
2038 SE
->finish(&IntvMap
);
2040 // Tell LiveDebugVariables about the new ranges.
2041 DebugVars
->splitRegister(Reg
, LREdit
.regs(), *LIS
);
2043 ExtraRegInfo
.resize(MRI
->getNumVirtRegs());
2045 // Sort out the new intervals created by splitting. The remainder interval
2046 // goes straight to spilling, the new local ranges get to stay RS_New.
2047 for (unsigned i
= 0, e
= LREdit
.size(); i
!= e
; ++i
) {
2048 LiveInterval
&LI
= LIS
->getInterval(LREdit
.get(i
));
2049 if (getStage(LI
) == RS_New
&& IntvMap
[i
] == 0)
2050 setStage(LI
, RS_Spill
);
2054 MF
->verify(this, "After splitting live range around basic blocks");
2058 //===----------------------------------------------------------------------===//
2059 // Per-Instruction Splitting
2060 //===----------------------------------------------------------------------===//
2062 /// Get the number of allocatable registers that match the constraints of \p Reg
2063 /// on \p MI and that are also in \p SuperRC.
2064 static unsigned getNumAllocatableRegsForConstraints(
2065 const MachineInstr
*MI
, unsigned Reg
, const TargetRegisterClass
*SuperRC
,
2066 const TargetInstrInfo
*TII
, const TargetRegisterInfo
*TRI
,
2067 const RegisterClassInfo
&RCI
) {
2068 assert(SuperRC
&& "Invalid register class");
2070 const TargetRegisterClass
*ConstrainedRC
=
2071 MI
->getRegClassConstraintEffectForVReg(Reg
, SuperRC
, TII
, TRI
,
2072 /* ExploreBundle */ true);
2075 return RCI
.getNumAllocatableRegs(ConstrainedRC
);
2078 /// tryInstructionSplit - Split a live range around individual instructions.
2079 /// This is normally not worthwhile since the spiller is doing essentially the
2080 /// same thing. However, when the live range is in a constrained register
2081 /// class, it may help to insert copies such that parts of the live range can
2082 /// be moved to a larger register class.
2084 /// This is similar to spilling to a larger register class.
2086 RAGreedy::tryInstructionSplit(LiveInterval
&VirtReg
, AllocationOrder
&Order
,
2087 SmallVectorImpl
<unsigned> &NewVRegs
) {
2088 const TargetRegisterClass
*CurRC
= MRI
->getRegClass(VirtReg
.reg
);
2089 // There is no point to this if there are no larger sub-classes.
2090 if (!RegClassInfo
.isProperSubClass(CurRC
))
2093 // Always enable split spill mode, since we're effectively spilling to a
2095 LiveRangeEdit
LREdit(&VirtReg
, NewVRegs
, *MF
, *LIS
, VRM
, this, &DeadRemats
);
2096 SE
->reset(LREdit
, SplitEditor::SM_Size
);
2098 ArrayRef
<SlotIndex
> Uses
= SA
->getUseSlots();
2099 if (Uses
.size() <= 1)
2102 LLVM_DEBUG(dbgs() << "Split around " << Uses
.size()
2103 << " individual instrs.\n");
2105 const TargetRegisterClass
*SuperRC
=
2106 TRI
->getLargestLegalSuperClass(CurRC
, *MF
);
2107 unsigned SuperRCNumAllocatableRegs
= RCI
.getNumAllocatableRegs(SuperRC
);
2108 // Split around every non-copy instruction if this split will relax
2109 // the constraints on the virtual register.
2110 // Otherwise, splitting just inserts uncoalescable copies that do not help
2112 for (unsigned i
= 0; i
!= Uses
.size(); ++i
) {
2113 if (const MachineInstr
*MI
= Indexes
->getInstructionFromIndex(Uses
[i
]))
2114 if (MI
->isFullCopy() ||
2115 SuperRCNumAllocatableRegs
==
2116 getNumAllocatableRegsForConstraints(MI
, VirtReg
.reg
, SuperRC
, TII
,
2118 LLVM_DEBUG(dbgs() << " skip:\t" << Uses
[i
] << '\t' << *MI
);
2122 SlotIndex SegStart
= SE
->enterIntvBefore(Uses
[i
]);
2123 SlotIndex SegStop
= SE
->leaveIntvAfter(Uses
[i
]);
2124 SE
->useIntv(SegStart
, SegStop
);
2127 if (LREdit
.empty()) {
2128 LLVM_DEBUG(dbgs() << "All uses were copies.\n");
2132 SmallVector
<unsigned, 8> IntvMap
;
2133 SE
->finish(&IntvMap
);
2134 DebugVars
->splitRegister(VirtReg
.reg
, LREdit
.regs(), *LIS
);
2135 ExtraRegInfo
.resize(MRI
->getNumVirtRegs());
2137 // Assign all new registers to RS_Spill. This was the last chance.
2138 setStage(LREdit
.begin(), LREdit
.end(), RS_Spill
);
2142 //===----------------------------------------------------------------------===//
2144 //===----------------------------------------------------------------------===//
2146 /// calcGapWeights - Compute the maximum spill weight that needs to be evicted
2147 /// in order to use PhysReg between two entries in SA->UseSlots.
2149 /// GapWeight[i] represents the gap between UseSlots[i] and UseSlots[i+1].
2151 void RAGreedy::calcGapWeights(unsigned PhysReg
,
2152 SmallVectorImpl
<float> &GapWeight
) {
2153 assert(SA
->getUseBlocks().size() == 1 && "Not a local interval");
2154 const SplitAnalysis::BlockInfo
&BI
= SA
->getUseBlocks().front();
2155 ArrayRef
<SlotIndex
> Uses
= SA
->getUseSlots();
2156 const unsigned NumGaps
= Uses
.size()-1;
2158 // Start and end points for the interference check.
2159 SlotIndex StartIdx
=
2160 BI
.LiveIn
? BI
.FirstInstr
.getBaseIndex() : BI
.FirstInstr
;
2162 BI
.LiveOut
? BI
.LastInstr
.getBoundaryIndex() : BI
.LastInstr
;
2164 GapWeight
.assign(NumGaps
, 0.0f
);
2166 // Add interference from each overlapping register.
2167 for (MCRegUnitIterator
Units(PhysReg
, TRI
); Units
.isValid(); ++Units
) {
2168 if (!Matrix
->query(const_cast<LiveInterval
&>(SA
->getParent()), *Units
)
2169 .checkInterference())
2172 // We know that VirtReg is a continuous interval from FirstInstr to
2173 // LastInstr, so we don't need InterferenceQuery.
2175 // Interference that overlaps an instruction is counted in both gaps
2176 // surrounding the instruction. The exception is interference before
2177 // StartIdx and after StopIdx.
2179 LiveIntervalUnion::SegmentIter IntI
=
2180 Matrix
->getLiveUnions()[*Units
] .find(StartIdx
);
2181 for (unsigned Gap
= 0; IntI
.valid() && IntI
.start() < StopIdx
; ++IntI
) {
2182 // Skip the gaps before IntI.
2183 while (Uses
[Gap
+1].getBoundaryIndex() < IntI
.start())
2184 if (++Gap
== NumGaps
)
2189 // Update the gaps covered by IntI.
2190 const float weight
= IntI
.value()->weight
;
2191 for (; Gap
!= NumGaps
; ++Gap
) {
2192 GapWeight
[Gap
] = std::max(GapWeight
[Gap
], weight
);
2193 if (Uses
[Gap
+1].getBaseIndex() >= IntI
.stop())
2201 // Add fixed interference.
2202 for (MCRegUnitIterator
Units(PhysReg
, TRI
); Units
.isValid(); ++Units
) {
2203 const LiveRange
&LR
= LIS
->getRegUnit(*Units
);
2204 LiveRange::const_iterator I
= LR
.find(StartIdx
);
2205 LiveRange::const_iterator E
= LR
.end();
2207 // Same loop as above. Mark any overlapped gaps as HUGE_VALF.
2208 for (unsigned Gap
= 0; I
!= E
&& I
->start
< StopIdx
; ++I
) {
2209 while (Uses
[Gap
+1].getBoundaryIndex() < I
->start
)
2210 if (++Gap
== NumGaps
)
2215 for (; Gap
!= NumGaps
; ++Gap
) {
2216 GapWeight
[Gap
] = huge_valf
;
2217 if (Uses
[Gap
+1].getBaseIndex() >= I
->end
)
2226 /// tryLocalSplit - Try to split VirtReg into smaller intervals inside its only
2229 unsigned RAGreedy::tryLocalSplit(LiveInterval
&VirtReg
, AllocationOrder
&Order
,
2230 SmallVectorImpl
<unsigned> &NewVRegs
) {
2231 // TODO: the function currently only handles a single UseBlock; it should be
2232 // possible to generalize.
2233 if (SA
->getUseBlocks().size() != 1)
2236 const SplitAnalysis::BlockInfo
&BI
= SA
->getUseBlocks().front();
2238 // Note that it is possible to have an interval that is live-in or live-out
2239 // while only covering a single block - A phi-def can use undef values from
2240 // predecessors, and the block could be a single-block loop.
2241 // We don't bother doing anything clever about such a case, we simply assume
2242 // that the interval is continuous from FirstInstr to LastInstr. We should
2243 // make sure that we don't do anything illegal to such an interval, though.
2245 ArrayRef
<SlotIndex
> Uses
= SA
->getUseSlots();
2246 if (Uses
.size() <= 2)
2248 const unsigned NumGaps
= Uses
.size()-1;
2251 dbgs() << "tryLocalSplit: ";
2252 for (unsigned i
= 0, e
= Uses
.size(); i
!= e
; ++i
)
2253 dbgs() << ' ' << Uses
[i
];
2257 // If VirtReg is live across any register mask operands, compute a list of
2258 // gaps with register masks.
2259 SmallVector
<unsigned, 8> RegMaskGaps
;
2260 if (Matrix
->checkRegMaskInterference(VirtReg
)) {
2261 // Get regmask slots for the whole block.
2262 ArrayRef
<SlotIndex
> RMS
= LIS
->getRegMaskSlotsInBlock(BI
.MBB
->getNumber());
2263 LLVM_DEBUG(dbgs() << RMS
.size() << " regmasks in block:");
2264 // Constrain to VirtReg's live range.
2266 llvm::lower_bound(RMS
, Uses
.front().getRegSlot()) - RMS
.begin();
2267 unsigned re
= RMS
.size();
2268 for (unsigned i
= 0; i
!= NumGaps
&& ri
!= re
; ++i
) {
2269 // Look for Uses[i] <= RMS <= Uses[i+1].
2270 assert(!SlotIndex::isEarlierInstr(RMS
[ri
], Uses
[i
]));
2271 if (SlotIndex::isEarlierInstr(Uses
[i
+1], RMS
[ri
]))
2273 // Skip a regmask on the same instruction as the last use. It doesn't
2274 // overlap the live range.
2275 if (SlotIndex::isSameInstr(Uses
[i
+1], RMS
[ri
]) && i
+1 == NumGaps
)
2277 LLVM_DEBUG(dbgs() << ' ' << RMS
[ri
] << ':' << Uses
[i
] << '-'
2279 RegMaskGaps
.push_back(i
);
2280 // Advance ri to the next gap. A regmask on one of the uses counts in
2282 while (ri
!= re
&& SlotIndex::isEarlierInstr(RMS
[ri
], Uses
[i
+1]))
2285 LLVM_DEBUG(dbgs() << '\n');
2288 // Since we allow local split results to be split again, there is a risk of
2289 // creating infinite loops. It is tempting to require that the new live
2290 // ranges have less instructions than the original. That would guarantee
2291 // convergence, but it is too strict. A live range with 3 instructions can be
2292 // split 2+3 (including the COPY), and we want to allow that.
2294 // Instead we use these rules:
2296 // 1. Allow any split for ranges with getStage() < RS_Split2. (Except for the
2297 // noop split, of course).
2298 // 2. Require progress be made for ranges with getStage() == RS_Split2. All
2299 // the new ranges must have fewer instructions than before the split.
2300 // 3. New ranges with the same number of instructions are marked RS_Split2,
2301 // smaller ranges are marked RS_New.
2303 // These rules allow a 3 -> 2+3 split once, which we need. They also prevent
2304 // excessive splitting and infinite loops.
2306 bool ProgressRequired
= getStage(VirtReg
) >= RS_Split2
;
2308 // Best split candidate.
2309 unsigned BestBefore
= NumGaps
;
2310 unsigned BestAfter
= 0;
2313 const float blockFreq
=
2314 SpillPlacer
->getBlockFrequency(BI
.MBB
->getNumber()).getFrequency() *
2315 (1.0f
/ MBFI
->getEntryFreq());
2316 SmallVector
<float, 8> GapWeight
;
2319 while (unsigned PhysReg
= Order
.next()) {
2320 // Keep track of the largest spill weight that would need to be evicted in
2321 // order to make use of PhysReg between UseSlots[i] and UseSlots[i+1].
2322 calcGapWeights(PhysReg
, GapWeight
);
2324 // Remove any gaps with regmask clobbers.
2325 if (Matrix
->checkRegMaskInterference(VirtReg
, PhysReg
))
2326 for (unsigned i
= 0, e
= RegMaskGaps
.size(); i
!= e
; ++i
)
2327 GapWeight
[RegMaskGaps
[i
]] = huge_valf
;
2329 // Try to find the best sequence of gaps to close.
2330 // The new spill weight must be larger than any gap interference.
2332 // We will split before Uses[SplitBefore] and after Uses[SplitAfter].
2333 unsigned SplitBefore
= 0, SplitAfter
= 1;
2335 // MaxGap should always be max(GapWeight[SplitBefore..SplitAfter-1]).
2336 // It is the spill weight that needs to be evicted.
2337 float MaxGap
= GapWeight
[0];
2340 // Live before/after split?
2341 const bool LiveBefore
= SplitBefore
!= 0 || BI
.LiveIn
;
2342 const bool LiveAfter
= SplitAfter
!= NumGaps
|| BI
.LiveOut
;
2344 LLVM_DEBUG(dbgs() << printReg(PhysReg
, TRI
) << ' ' << Uses
[SplitBefore
]
2345 << '-' << Uses
[SplitAfter
] << " i=" << MaxGap
);
2347 // Stop before the interval gets so big we wouldn't be making progress.
2348 if (!LiveBefore
&& !LiveAfter
) {
2349 LLVM_DEBUG(dbgs() << " all\n");
2352 // Should the interval be extended or shrunk?
2355 // How many gaps would the new range have?
2356 unsigned NewGaps
= LiveBefore
+ SplitAfter
- SplitBefore
+ LiveAfter
;
2358 // Legally, without causing looping?
2359 bool Legal
= !ProgressRequired
|| NewGaps
< NumGaps
;
2361 if (Legal
&& MaxGap
< huge_valf
) {
2362 // Estimate the new spill weight. Each instruction reads or writes the
2363 // register. Conservatively assume there are no read-modify-write
2366 // Try to guess the size of the new interval.
2367 const float EstWeight
= normalizeSpillWeight(
2368 blockFreq
* (NewGaps
+ 1),
2369 Uses
[SplitBefore
].distance(Uses
[SplitAfter
]) +
2370 (LiveBefore
+ LiveAfter
) * SlotIndex::InstrDist
,
2372 // Would this split be possible to allocate?
2373 // Never allocate all gaps, we wouldn't be making progress.
2374 LLVM_DEBUG(dbgs() << " w=" << EstWeight
);
2375 if (EstWeight
* Hysteresis
>= MaxGap
) {
2377 float Diff
= EstWeight
- MaxGap
;
2378 if (Diff
> BestDiff
) {
2379 LLVM_DEBUG(dbgs() << " (best)");
2380 BestDiff
= Hysteresis
* Diff
;
2381 BestBefore
= SplitBefore
;
2382 BestAfter
= SplitAfter
;
2389 if (++SplitBefore
< SplitAfter
) {
2390 LLVM_DEBUG(dbgs() << " shrink\n");
2391 // Recompute the max when necessary.
2392 if (GapWeight
[SplitBefore
- 1] >= MaxGap
) {
2393 MaxGap
= GapWeight
[SplitBefore
];
2394 for (unsigned i
= SplitBefore
+ 1; i
!= SplitAfter
; ++i
)
2395 MaxGap
= std::max(MaxGap
, GapWeight
[i
]);
2402 // Try to extend the interval.
2403 if (SplitAfter
>= NumGaps
) {
2404 LLVM_DEBUG(dbgs() << " end\n");
2408 LLVM_DEBUG(dbgs() << " extend\n");
2409 MaxGap
= std::max(MaxGap
, GapWeight
[SplitAfter
++]);
2413 // Didn't find any candidates?
2414 if (BestBefore
== NumGaps
)
2417 LLVM_DEBUG(dbgs() << "Best local split range: " << Uses
[BestBefore
] << '-'
2418 << Uses
[BestAfter
] << ", " << BestDiff
<< ", "
2419 << (BestAfter
- BestBefore
+ 1) << " instrs\n");
2421 LiveRangeEdit
LREdit(&VirtReg
, NewVRegs
, *MF
, *LIS
, VRM
, this, &DeadRemats
);
2425 SlotIndex SegStart
= SE
->enterIntvBefore(Uses
[BestBefore
]);
2426 SlotIndex SegStop
= SE
->leaveIntvAfter(Uses
[BestAfter
]);
2427 SE
->useIntv(SegStart
, SegStop
);
2428 SmallVector
<unsigned, 8> IntvMap
;
2429 SE
->finish(&IntvMap
);
2430 DebugVars
->splitRegister(VirtReg
.reg
, LREdit
.regs(), *LIS
);
2432 // If the new range has the same number of instructions as before, mark it as
2433 // RS_Split2 so the next split will be forced to make progress. Otherwise,
2434 // leave the new intervals as RS_New so they can compete.
2435 bool LiveBefore
= BestBefore
!= 0 || BI
.LiveIn
;
2436 bool LiveAfter
= BestAfter
!= NumGaps
|| BI
.LiveOut
;
2437 unsigned NewGaps
= LiveBefore
+ BestAfter
- BestBefore
+ LiveAfter
;
2438 if (NewGaps
>= NumGaps
) {
2439 LLVM_DEBUG(dbgs() << "Tagging non-progress ranges: ");
2440 assert(!ProgressRequired
&& "Didn't make progress when it was required.");
2441 for (unsigned i
= 0, e
= IntvMap
.size(); i
!= e
; ++i
)
2442 if (IntvMap
[i
] == 1) {
2443 setStage(LIS
->getInterval(LREdit
.get(i
)), RS_Split2
);
2444 LLVM_DEBUG(dbgs() << printReg(LREdit
.get(i
)));
2446 LLVM_DEBUG(dbgs() << '\n');
2453 //===----------------------------------------------------------------------===//
2454 // Live Range Splitting
2455 //===----------------------------------------------------------------------===//
2457 /// trySplit - Try to split VirtReg or one of its interferences, making it
2459 /// @return Physreg when VirtReg may be assigned and/or new NewVRegs.
2460 unsigned RAGreedy::trySplit(LiveInterval
&VirtReg
, AllocationOrder
&Order
,
2461 SmallVectorImpl
<unsigned>&NewVRegs
,
2462 const SmallVirtRegSet
&FixedRegisters
) {
2463 // Ranges must be Split2 or less.
2464 if (getStage(VirtReg
) >= RS_Spill
)
2467 // Local intervals are handled separately.
2468 if (LIS
->intervalIsInOneMBB(VirtReg
)) {
2469 NamedRegionTimer
T("local_split", "Local Splitting", TimerGroupName
,
2470 TimerGroupDescription
, TimePassesIsEnabled
);
2471 SA
->analyze(&VirtReg
);
2472 unsigned PhysReg
= tryLocalSplit(VirtReg
, Order
, NewVRegs
);
2473 if (PhysReg
|| !NewVRegs
.empty())
2475 return tryInstructionSplit(VirtReg
, Order
, NewVRegs
);
2478 NamedRegionTimer
T("global_split", "Global Splitting", TimerGroupName
,
2479 TimerGroupDescription
, TimePassesIsEnabled
);
2481 SA
->analyze(&VirtReg
);
2483 // FIXME: SplitAnalysis may repair broken live ranges coming from the
2484 // coalescer. That may cause the range to become allocatable which means that
2485 // tryRegionSplit won't be making progress. This check should be replaced with
2486 // an assertion when the coalescer is fixed.
2487 if (SA
->didRepairRange()) {
2488 // VirtReg has changed, so all cached queries are invalid.
2489 Matrix
->invalidateVirtRegs();
2490 if (unsigned PhysReg
= tryAssign(VirtReg
, Order
, NewVRegs
, FixedRegisters
))
2494 // First try to split around a region spanning multiple blocks. RS_Split2
2495 // ranges already made dubious progress with region splitting, so they go
2496 // straight to single block splitting.
2497 if (getStage(VirtReg
) < RS_Split2
) {
2498 unsigned PhysReg
= tryRegionSplit(VirtReg
, Order
, NewVRegs
);
2499 if (PhysReg
|| !NewVRegs
.empty())
2503 // Then isolate blocks.
2504 return tryBlockSplit(VirtReg
, Order
, NewVRegs
);
2507 //===----------------------------------------------------------------------===//
2508 // Last Chance Recoloring
2509 //===----------------------------------------------------------------------===//
2511 /// Return true if \p reg has any tied def operand.
2512 static bool hasTiedDef(MachineRegisterInfo
*MRI
, unsigned reg
) {
2513 for (const MachineOperand
&MO
: MRI
->def_operands(reg
))
2520 /// mayRecolorAllInterferences - Check if the virtual registers that
2521 /// interfere with \p VirtReg on \p PhysReg (or one of its aliases) may be
2522 /// recolored to free \p PhysReg.
2523 /// When true is returned, \p RecoloringCandidates has been augmented with all
2524 /// the live intervals that need to be recolored in order to free \p PhysReg
2526 /// \p FixedRegisters contains all the virtual registers that cannot be
2529 RAGreedy::mayRecolorAllInterferences(unsigned PhysReg
, LiveInterval
&VirtReg
,
2530 SmallLISet
&RecoloringCandidates
,
2531 const SmallVirtRegSet
&FixedRegisters
) {
2532 const TargetRegisterClass
*CurRC
= MRI
->getRegClass(VirtReg
.reg
);
2534 for (MCRegUnitIterator
Units(PhysReg
, TRI
); Units
.isValid(); ++Units
) {
2535 LiveIntervalUnion::Query
&Q
= Matrix
->query(VirtReg
, *Units
);
2536 // If there is LastChanceRecoloringMaxInterference or more interferences,
2537 // chances are one would not be recolorable.
2538 if (Q
.collectInterferingVRegs(LastChanceRecoloringMaxInterference
) >=
2539 LastChanceRecoloringMaxInterference
&& !ExhaustiveSearch
) {
2540 LLVM_DEBUG(dbgs() << "Early abort: too many interferences.\n");
2541 CutOffInfo
|= CO_Interf
;
2544 for (unsigned i
= Q
.interferingVRegs().size(); i
; --i
) {
2545 LiveInterval
*Intf
= Q
.interferingVRegs()[i
- 1];
2546 // If Intf is done and sit on the same register class as VirtReg,
2547 // it would not be recolorable as it is in the same state as VirtReg.
2548 // However, if VirtReg has tied defs and Intf doesn't, then
2549 // there is still a point in examining if it can be recolorable.
2550 if (((getStage(*Intf
) == RS_Done
&&
2551 MRI
->getRegClass(Intf
->reg
) == CurRC
) &&
2552 !(hasTiedDef(MRI
, VirtReg
.reg
) && !hasTiedDef(MRI
, Intf
->reg
))) ||
2553 FixedRegisters
.count(Intf
->reg
)) {
2555 dbgs() << "Early abort: the interference is not recolorable.\n");
2558 RecoloringCandidates
.insert(Intf
);
2564 /// tryLastChanceRecoloring - Try to assign a color to \p VirtReg by recoloring
2565 /// its interferences.
2566 /// Last chance recoloring chooses a color for \p VirtReg and recolors every
2567 /// virtual register that was using it. The recoloring process may recursively
2568 /// use the last chance recoloring. Therefore, when a virtual register has been
2569 /// assigned a color by this mechanism, it is marked as Fixed, i.e., it cannot
2570 /// be last-chance-recolored again during this recoloring "session".
2573 /// vA can use {R1, R2 }
2574 /// vB can use { R2, R3}
2575 /// vC can use {R1 }
2576 /// Where vA, vB, and vC cannot be split anymore (they are reloads for
2577 /// instance) and they all interfere.
2579 /// vA is assigned R1
2580 /// vB is assigned R2
2581 /// vC tries to evict vA but vA is already done.
2582 /// Regular register allocation fails.
2584 /// Last chance recoloring kicks in:
2585 /// vC does as if vA was evicted => vC uses R1.
2586 /// vC is marked as fixed.
2587 /// vA needs to find a color.
2588 /// None are available.
2589 /// vA cannot evict vC: vC is a fixed virtual register now.
2590 /// vA does as if vB was evicted => vA uses R2.
2591 /// vB needs to find a color.
2592 /// R3 is available.
2593 /// Recoloring => vC = R1, vA = R2, vB = R3
2595 /// \p Order defines the preferred allocation order for \p VirtReg.
2596 /// \p NewRegs will contain any new virtual register that have been created
2597 /// (split, spill) during the process and that must be assigned.
2598 /// \p FixedRegisters contains all the virtual registers that cannot be
2600 /// \p Depth gives the current depth of the last chance recoloring.
2601 /// \return a physical register that can be used for VirtReg or ~0u if none
2603 unsigned RAGreedy::tryLastChanceRecoloring(LiveInterval
&VirtReg
,
2604 AllocationOrder
&Order
,
2605 SmallVectorImpl
<unsigned> &NewVRegs
,
2606 SmallVirtRegSet
&FixedRegisters
,
2608 LLVM_DEBUG(dbgs() << "Try last chance recoloring for " << VirtReg
<< '\n');
2609 // Ranges must be Done.
2610 assert((getStage(VirtReg
) >= RS_Done
|| !VirtReg
.isSpillable()) &&
2611 "Last chance recoloring should really be last chance");
2612 // Set the max depth to LastChanceRecoloringMaxDepth.
2613 // We may want to reconsider that if we end up with a too large search space
2614 // for target with hundreds of registers.
2615 // Indeed, in that case we may want to cut the search space earlier.
2616 if (Depth
>= LastChanceRecoloringMaxDepth
&& !ExhaustiveSearch
) {
2617 LLVM_DEBUG(dbgs() << "Abort because max depth has been reached.\n");
2618 CutOffInfo
|= CO_Depth
;
2622 // Set of Live intervals that will need to be recolored.
2623 SmallLISet RecoloringCandidates
;
2624 // Record the original mapping virtual register to physical register in case
2625 // the recoloring fails.
2626 DenseMap
<unsigned, unsigned> VirtRegToPhysReg
;
2627 // Mark VirtReg as fixed, i.e., it will not be recolored pass this point in
2628 // this recoloring "session".
2629 assert(!FixedRegisters
.count(VirtReg
.reg
));
2630 FixedRegisters
.insert(VirtReg
.reg
);
2631 SmallVector
<unsigned, 4> CurrentNewVRegs
;
2634 while (unsigned PhysReg
= Order
.next()) {
2635 LLVM_DEBUG(dbgs() << "Try to assign: " << VirtReg
<< " to "
2636 << printReg(PhysReg
, TRI
) << '\n');
2637 RecoloringCandidates
.clear();
2638 VirtRegToPhysReg
.clear();
2639 CurrentNewVRegs
.clear();
2641 // It is only possible to recolor virtual register interference.
2642 if (Matrix
->checkInterference(VirtReg
, PhysReg
) >
2643 LiveRegMatrix::IK_VirtReg
) {
2645 dbgs() << "Some interferences are not with virtual registers.\n");
2650 // Early give up on this PhysReg if it is obvious we cannot recolor all
2651 // the interferences.
2652 if (!mayRecolorAllInterferences(PhysReg
, VirtReg
, RecoloringCandidates
,
2654 LLVM_DEBUG(dbgs() << "Some interferences cannot be recolored.\n");
2658 // RecoloringCandidates contains all the virtual registers that interfer
2659 // with VirtReg on PhysReg (or one of its aliases).
2660 // Enqueue them for recoloring and perform the actual recoloring.
2661 PQueue RecoloringQueue
;
2662 for (SmallLISet::iterator It
= RecoloringCandidates
.begin(),
2663 EndIt
= RecoloringCandidates
.end();
2664 It
!= EndIt
; ++It
) {
2665 unsigned ItVirtReg
= (*It
)->reg
;
2666 enqueue(RecoloringQueue
, *It
);
2667 assert(VRM
->hasPhys(ItVirtReg
) &&
2668 "Interferences are supposed to be with allocated variables");
2670 // Record the current allocation.
2671 VirtRegToPhysReg
[ItVirtReg
] = VRM
->getPhys(ItVirtReg
);
2672 // unset the related struct.
2673 Matrix
->unassign(**It
);
2676 // Do as if VirtReg was assigned to PhysReg so that the underlying
2677 // recoloring has the right information about the interferes and
2678 // available colors.
2679 Matrix
->assign(VirtReg
, PhysReg
);
2681 // Save the current recoloring state.
2682 // If we cannot recolor all the interferences, we will have to start again
2683 // at this point for the next physical register.
2684 SmallVirtRegSet
SaveFixedRegisters(FixedRegisters
);
2685 if (tryRecoloringCandidates(RecoloringQueue
, CurrentNewVRegs
,
2686 FixedRegisters
, Depth
)) {
2687 // Push the queued vregs into the main queue.
2688 for (unsigned NewVReg
: CurrentNewVRegs
)
2689 NewVRegs
.push_back(NewVReg
);
2690 // Do not mess up with the global assignment process.
2691 // I.e., VirtReg must be unassigned.
2692 Matrix
->unassign(VirtReg
);
2696 LLVM_DEBUG(dbgs() << "Fail to assign: " << VirtReg
<< " to "
2697 << printReg(PhysReg
, TRI
) << '\n');
2699 // The recoloring attempt failed, undo the changes.
2700 FixedRegisters
= SaveFixedRegisters
;
2701 Matrix
->unassign(VirtReg
);
2703 // For a newly created vreg which is also in RecoloringCandidates,
2704 // don't add it to NewVRegs because its physical register will be restored
2705 // below. Other vregs in CurrentNewVRegs are created by calling
2706 // selectOrSplit and should be added into NewVRegs.
2707 for (SmallVectorImpl
<unsigned>::iterator Next
= CurrentNewVRegs
.begin(),
2708 End
= CurrentNewVRegs
.end();
2709 Next
!= End
; ++Next
) {
2710 if (RecoloringCandidates
.count(&LIS
->getInterval(*Next
)))
2712 NewVRegs
.push_back(*Next
);
2715 for (SmallLISet::iterator It
= RecoloringCandidates
.begin(),
2716 EndIt
= RecoloringCandidates
.end();
2717 It
!= EndIt
; ++It
) {
2718 unsigned ItVirtReg
= (*It
)->reg
;
2719 if (VRM
->hasPhys(ItVirtReg
))
2720 Matrix
->unassign(**It
);
2721 unsigned ItPhysReg
= VirtRegToPhysReg
[ItVirtReg
];
2722 Matrix
->assign(**It
, ItPhysReg
);
2726 // Last chance recoloring did not worked either, give up.
2730 /// tryRecoloringCandidates - Try to assign a new color to every register
2731 /// in \RecoloringQueue.
2732 /// \p NewRegs will contain any new virtual register created during the
2733 /// recoloring process.
2734 /// \p FixedRegisters[in/out] contains all the registers that have been
2736 /// \return true if all virtual registers in RecoloringQueue were successfully
2737 /// recolored, false otherwise.
2738 bool RAGreedy::tryRecoloringCandidates(PQueue
&RecoloringQueue
,
2739 SmallVectorImpl
<unsigned> &NewVRegs
,
2740 SmallVirtRegSet
&FixedRegisters
,
2742 while (!RecoloringQueue
.empty()) {
2743 LiveInterval
*LI
= dequeue(RecoloringQueue
);
2744 LLVM_DEBUG(dbgs() << "Try to recolor: " << *LI
<< '\n');
2746 PhysReg
= selectOrSplitImpl(*LI
, NewVRegs
, FixedRegisters
, Depth
+ 1);
2747 // When splitting happens, the live-range may actually be empty.
2748 // In that case, this is okay to continue the recoloring even
2749 // if we did not find an alternative color for it. Indeed,
2750 // there will not be anything to color for LI in the end.
2751 if (PhysReg
== ~0u || (!PhysReg
&& !LI
->empty()))
2755 assert(LI
->empty() && "Only empty live-range do not require a register");
2756 LLVM_DEBUG(dbgs() << "Recoloring of " << *LI
2757 << " succeeded. Empty LI.\n");
2760 LLVM_DEBUG(dbgs() << "Recoloring of " << *LI
2761 << " succeeded with: " << printReg(PhysReg
, TRI
) << '\n');
2763 Matrix
->assign(*LI
, PhysReg
);
2764 FixedRegisters
.insert(LI
->reg
);
2769 //===----------------------------------------------------------------------===//
2771 //===----------------------------------------------------------------------===//
2773 unsigned RAGreedy::selectOrSplit(LiveInterval
&VirtReg
,
2774 SmallVectorImpl
<unsigned> &NewVRegs
) {
2775 CutOffInfo
= CO_None
;
2776 LLVMContext
&Ctx
= MF
->getFunction().getContext();
2777 SmallVirtRegSet FixedRegisters
;
2778 unsigned Reg
= selectOrSplitImpl(VirtReg
, NewVRegs
, FixedRegisters
);
2779 if (Reg
== ~0U && (CutOffInfo
!= CO_None
)) {
2780 uint8_t CutOffEncountered
= CutOffInfo
& (CO_Depth
| CO_Interf
);
2781 if (CutOffEncountered
== CO_Depth
)
2782 Ctx
.emitError("register allocation failed: maximum depth for recoloring "
2783 "reached. Use -fexhaustive-register-search to skip "
2785 else if (CutOffEncountered
== CO_Interf
)
2786 Ctx
.emitError("register allocation failed: maximum interference for "
2787 "recoloring reached. Use -fexhaustive-register-search "
2789 else if (CutOffEncountered
== (CO_Depth
| CO_Interf
))
2790 Ctx
.emitError("register allocation failed: maximum interference and "
2791 "depth for recoloring reached. Use "
2792 "-fexhaustive-register-search to skip cutoffs");
2797 /// Using a CSR for the first time has a cost because it causes push|pop
2798 /// to be added to prologue|epilogue. Splitting a cold section of the live
2799 /// range can have lower cost than using the CSR for the first time;
2800 /// Spilling a live range in the cold path can have lower cost than using
2801 /// the CSR for the first time. Returns the physical register if we decide
2802 /// to use the CSR; otherwise return 0.
2803 unsigned RAGreedy::tryAssignCSRFirstTime(LiveInterval
&VirtReg
,
2804 AllocationOrder
&Order
,
2806 unsigned &CostPerUseLimit
,
2807 SmallVectorImpl
<unsigned> &NewVRegs
) {
2808 if (getStage(VirtReg
) == RS_Spill
&& VirtReg
.isSpillable()) {
2809 // We choose spill over using the CSR for the first time if the spill cost
2810 // is lower than CSRCost.
2811 SA
->analyze(&VirtReg
);
2812 if (calcSpillCost() >= CSRCost
)
2815 // We are going to spill, set CostPerUseLimit to 1 to make sure that
2816 // we will not use a callee-saved register in tryEvict.
2817 CostPerUseLimit
= 1;
2820 if (getStage(VirtReg
) < RS_Split
) {
2821 // We choose pre-splitting over using the CSR for the first time if
2822 // the cost of splitting is lower than CSRCost.
2823 SA
->analyze(&VirtReg
);
2824 unsigned NumCands
= 0;
2825 BlockFrequency BestCost
= CSRCost
; // Don't modify CSRCost.
2826 unsigned BestCand
= calculateRegionSplitCost(VirtReg
, Order
, BestCost
,
2827 NumCands
, true /*IgnoreCSR*/);
2828 if (BestCand
== NoCand
)
2829 // Use the CSR if we can't find a region split below CSRCost.
2832 // Perform the actual pre-splitting.
2833 doRegionSplit(VirtReg
, BestCand
, false/*HasCompact*/, NewVRegs
);
2839 void RAGreedy::aboutToRemoveInterval(LiveInterval
&LI
) {
2840 // Do not keep invalid information around.
2841 SetOfBrokenHints
.remove(&LI
);
2844 void RAGreedy::initializeCSRCost() {
2845 // We use the larger one out of the command-line option and the value report
2847 CSRCost
= BlockFrequency(
2848 std::max((unsigned)CSRFirstTimeCost
, TRI
->getCSRFirstUseCost()));
2849 if (!CSRCost
.getFrequency())
2852 // Raw cost is relative to Entry == 2^14; scale it appropriately.
2853 uint64_t ActualEntry
= MBFI
->getEntryFreq();
2858 uint64_t FixedEntry
= 1 << 14;
2859 if (ActualEntry
< FixedEntry
)
2860 CSRCost
*= BranchProbability(ActualEntry
, FixedEntry
);
2861 else if (ActualEntry
<= UINT32_MAX
)
2862 // Invert the fraction and divide.
2863 CSRCost
/= BranchProbability(FixedEntry
, ActualEntry
);
2865 // Can't use BranchProbability in general, since it takes 32-bit numbers.
2866 CSRCost
= CSRCost
.getFrequency() * (ActualEntry
/ FixedEntry
);
2869 /// Collect the hint info for \p Reg.
2870 /// The results are stored into \p Out.
2871 /// \p Out is not cleared before being populated.
2872 void RAGreedy::collectHintInfo(unsigned Reg
, HintsInfo
&Out
) {
2873 for (const MachineInstr
&Instr
: MRI
->reg_nodbg_instructions(Reg
)) {
2874 if (!Instr
.isFullCopy())
2876 // Look for the other end of the copy.
2877 Register OtherReg
= Instr
.getOperand(0).getReg();
2878 if (OtherReg
== Reg
) {
2879 OtherReg
= Instr
.getOperand(1).getReg();
2880 if (OtherReg
== Reg
)
2883 // Get the current assignment.
2884 Register OtherPhysReg
= Register::isPhysicalRegister(OtherReg
)
2886 : VRM
->getPhys(OtherReg
);
2887 // Push the collected information.
2888 Out
.push_back(HintInfo(MBFI
->getBlockFreq(Instr
.getParent()), OtherReg
,
2893 /// Using the given \p List, compute the cost of the broken hints if
2894 /// \p PhysReg was used.
2895 /// \return The cost of \p List for \p PhysReg.
2896 BlockFrequency
RAGreedy::getBrokenHintFreq(const HintsInfo
&List
,
2898 BlockFrequency Cost
= 0;
2899 for (const HintInfo
&Info
: List
) {
2900 if (Info
.PhysReg
!= PhysReg
)
2906 /// Using the register assigned to \p VirtReg, try to recolor
2907 /// all the live ranges that are copy-related with \p VirtReg.
2908 /// The recoloring is then propagated to all the live-ranges that have
2909 /// been recolored and so on, until no more copies can be coalesced or
2910 /// it is not profitable.
2911 /// For a given live range, profitability is determined by the sum of the
2912 /// frequencies of the non-identity copies it would introduce with the old
2913 /// and new register.
2914 void RAGreedy::tryHintRecoloring(LiveInterval
&VirtReg
) {
2915 // We have a broken hint, check if it is possible to fix it by
2916 // reusing PhysReg for the copy-related live-ranges. Indeed, we evicted
2917 // some register and PhysReg may be available for the other live-ranges.
2918 SmallSet
<unsigned, 4> Visited
;
2919 SmallVector
<unsigned, 2> RecoloringCandidates
;
2921 unsigned Reg
= VirtReg
.reg
;
2922 Register PhysReg
= VRM
->getPhys(Reg
);
2923 // Start the recoloring algorithm from the input live-interval, then
2924 // it will propagate to the ones that are copy-related with it.
2925 Visited
.insert(Reg
);
2926 RecoloringCandidates
.push_back(Reg
);
2928 LLVM_DEBUG(dbgs() << "Trying to reconcile hints for: " << printReg(Reg
, TRI
)
2929 << '(' << printReg(PhysReg
, TRI
) << ")\n");
2932 Reg
= RecoloringCandidates
.pop_back_val();
2934 // We cannot recolor physical register.
2935 if (Register::isPhysicalRegister(Reg
))
2938 assert(VRM
->hasPhys(Reg
) && "We have unallocated variable!!");
2940 // Get the live interval mapped with this virtual register to be able
2941 // to check for the interference with the new color.
2942 LiveInterval
&LI
= LIS
->getInterval(Reg
);
2943 Register CurrPhys
= VRM
->getPhys(Reg
);
2944 // Check that the new color matches the register class constraints and
2945 // that it is free for this live range.
2946 if (CurrPhys
!= PhysReg
&& (!MRI
->getRegClass(Reg
)->contains(PhysReg
) ||
2947 Matrix
->checkInterference(LI
, PhysReg
)))
2950 LLVM_DEBUG(dbgs() << printReg(Reg
, TRI
) << '(' << printReg(CurrPhys
, TRI
)
2951 << ") is recolorable.\n");
2953 // Gather the hint info.
2955 collectHintInfo(Reg
, Info
);
2956 // Check if recoloring the live-range will increase the cost of the
2957 // non-identity copies.
2958 if (CurrPhys
!= PhysReg
) {
2959 LLVM_DEBUG(dbgs() << "Checking profitability:\n");
2960 BlockFrequency OldCopiesCost
= getBrokenHintFreq(Info
, CurrPhys
);
2961 BlockFrequency NewCopiesCost
= getBrokenHintFreq(Info
, PhysReg
);
2962 LLVM_DEBUG(dbgs() << "Old Cost: " << OldCopiesCost
.getFrequency()
2963 << "\nNew Cost: " << NewCopiesCost
.getFrequency()
2965 if (OldCopiesCost
< NewCopiesCost
) {
2966 LLVM_DEBUG(dbgs() << "=> Not profitable.\n");
2969 // At this point, the cost is either cheaper or equal. If it is
2970 // equal, we consider this is profitable because it may expose
2971 // more recoloring opportunities.
2972 LLVM_DEBUG(dbgs() << "=> Profitable.\n");
2973 // Recolor the live-range.
2974 Matrix
->unassign(LI
);
2975 Matrix
->assign(LI
, PhysReg
);
2977 // Push all copy-related live-ranges to keep reconciling the broken
2979 for (const HintInfo
&HI
: Info
) {
2980 if (Visited
.insert(HI
.Reg
).second
)
2981 RecoloringCandidates
.push_back(HI
.Reg
);
2983 } while (!RecoloringCandidates
.empty());
2986 /// Try to recolor broken hints.
2987 /// Broken hints may be repaired by recoloring when an evicted variable
2988 /// freed up a register for a larger live-range.
2989 /// Consider the following example:
2997 /// Let us assume b gets split:
3007 /// Because of how the allocation work, b, c, and d may be assigned different
3008 /// colors. Now, if a gets evicted later:
3018 /// e = ld SpillSlot
3020 /// This is likely that we can assign the same register for b, c, and d,
3021 /// getting rid of 2 copies.
3022 void RAGreedy::tryHintsRecoloring() {
3023 for (LiveInterval
*LI
: SetOfBrokenHints
) {
3024 assert(Register::isVirtualRegister(LI
->reg
) &&
3025 "Recoloring is possible only for virtual registers");
3026 // Some dead defs may be around (e.g., because of debug uses).
3028 if (!VRM
->hasPhys(LI
->reg
))
3030 tryHintRecoloring(*LI
);
3034 unsigned RAGreedy::selectOrSplitImpl(LiveInterval
&VirtReg
,
3035 SmallVectorImpl
<unsigned> &NewVRegs
,
3036 SmallVirtRegSet
&FixedRegisters
,
3038 unsigned CostPerUseLimit
= ~0u;
3039 // First try assigning a free register.
3040 AllocationOrder
Order(VirtReg
.reg
, *VRM
, RegClassInfo
, Matrix
);
3041 if (unsigned PhysReg
= tryAssign(VirtReg
, Order
, NewVRegs
, FixedRegisters
)) {
3042 // If VirtReg got an assignment, the eviction info is no longre relevant.
3043 LastEvicted
.clearEvicteeInfo(VirtReg
.reg
);
3044 // When NewVRegs is not empty, we may have made decisions such as evicting
3045 // a virtual register, go with the earlier decisions and use the physical
3047 if (CSRCost
.getFrequency() && isUnusedCalleeSavedReg(PhysReg
) &&
3049 unsigned CSRReg
= tryAssignCSRFirstTime(VirtReg
, Order
, PhysReg
,
3050 CostPerUseLimit
, NewVRegs
);
3051 if (CSRReg
|| !NewVRegs
.empty())
3052 // Return now if we decide to use a CSR or create new vregs due to
3059 LiveRangeStage Stage
= getStage(VirtReg
);
3060 LLVM_DEBUG(dbgs() << StageName
[Stage
] << " Cascade "
3061 << ExtraRegInfo
[VirtReg
.reg
].Cascade
<< '\n');
3063 // Try to evict a less worthy live range, but only for ranges from the primary
3064 // queue. The RS_Split ranges already failed to do this, and they should not
3065 // get a second chance until they have been split.
3066 if (Stage
!= RS_Split
)
3067 if (unsigned PhysReg
=
3068 tryEvict(VirtReg
, Order
, NewVRegs
, CostPerUseLimit
,
3070 unsigned Hint
= MRI
->getSimpleHint(VirtReg
.reg
);
3071 // If VirtReg has a hint and that hint is broken record this
3072 // virtual register as a recoloring candidate for broken hint.
3073 // Indeed, since we evicted a variable in its neighborhood it is
3074 // likely we can at least partially recolor some of the
3075 // copy-related live-ranges.
3076 if (Hint
&& Hint
!= PhysReg
)
3077 SetOfBrokenHints
.insert(&VirtReg
);
3078 // If VirtReg eviction someone, the eviction info for it as an evictee is
3079 // no longre relevant.
3080 LastEvicted
.clearEvicteeInfo(VirtReg
.reg
);
3084 assert((NewVRegs
.empty() || Depth
) && "Cannot append to existing NewVRegs");
3086 // The first time we see a live range, don't try to split or spill.
3087 // Wait until the second time, when all smaller ranges have been allocated.
3088 // This gives a better picture of the interference to split around.
3089 if (Stage
< RS_Split
) {
3090 setStage(VirtReg
, RS_Split
);
3091 LLVM_DEBUG(dbgs() << "wait for second round\n");
3092 NewVRegs
.push_back(VirtReg
.reg
);
3096 if (Stage
< RS_Spill
) {
3097 // Try splitting VirtReg or interferences.
3098 unsigned NewVRegSizeBefore
= NewVRegs
.size();
3099 unsigned PhysReg
= trySplit(VirtReg
, Order
, NewVRegs
, FixedRegisters
);
3100 if (PhysReg
|| (NewVRegs
.size() - NewVRegSizeBefore
)) {
3101 // If VirtReg got split, the eviction info is no longre relevant.
3102 LastEvicted
.clearEvicteeInfo(VirtReg
.reg
);
3107 // If we couldn't allocate a register from spilling, there is probably some
3108 // invalid inline assembly. The base class will report it.
3109 if (Stage
>= RS_Done
|| !VirtReg
.isSpillable())
3110 return tryLastChanceRecoloring(VirtReg
, Order
, NewVRegs
, FixedRegisters
,
3113 // Finally spill VirtReg itself.
3114 if (EnableDeferredSpilling
&& getStage(VirtReg
) < RS_Memory
) {
3115 // TODO: This is experimental and in particular, we do not model
3116 // the live range splitting done by spilling correctly.
3117 // We would need a deep integration with the spiller to do the
3118 // right thing here. Anyway, that is still good for early testing.
3119 setStage(VirtReg
, RS_Memory
);
3120 LLVM_DEBUG(dbgs() << "Do as if this register is in memory\n");
3121 NewVRegs
.push_back(VirtReg
.reg
);
3123 NamedRegionTimer
T("spill", "Spiller", TimerGroupName
,
3124 TimerGroupDescription
, TimePassesIsEnabled
);
3125 LiveRangeEdit
LRE(&VirtReg
, NewVRegs
, *MF
, *LIS
, VRM
, this, &DeadRemats
);
3126 spiller().spill(LRE
);
3127 setStage(NewVRegs
.begin(), NewVRegs
.end(), RS_Done
);
3130 MF
->verify(this, "After spilling");
3133 // The live virtual register requesting allocation was spilled, so tell
3134 // the caller not to allocate anything during this round.
3138 void RAGreedy::reportNumberOfSplillsReloads(MachineLoop
*L
, unsigned &Reloads
,
3139 unsigned &FoldedReloads
,
3141 unsigned &FoldedSpills
) {
3147 // Sum up the spill and reloads in subloops.
3148 for (MachineLoop
*SubLoop
: *L
) {
3149 unsigned SubReloads
;
3150 unsigned SubFoldedReloads
;
3152 unsigned SubFoldedSpills
;
3154 reportNumberOfSplillsReloads(SubLoop
, SubReloads
, SubFoldedReloads
,
3155 SubSpills
, SubFoldedSpills
);
3156 Reloads
+= SubReloads
;
3157 FoldedReloads
+= SubFoldedReloads
;
3158 Spills
+= SubSpills
;
3159 FoldedSpills
+= SubFoldedSpills
;
3162 const MachineFrameInfo
&MFI
= MF
->getFrameInfo();
3163 const TargetInstrInfo
*TII
= MF
->getSubtarget().getInstrInfo();
3166 for (MachineBasicBlock
*MBB
: L
->getBlocks())
3167 // Handle blocks that were not included in subloops.
3168 if (Loops
->getLoopFor(MBB
) == L
)
3169 for (MachineInstr
&MI
: *MBB
) {
3170 SmallVector
<const MachineMemOperand
*, 2> Accesses
;
3171 auto isSpillSlotAccess
= [&MFI
](const MachineMemOperand
*A
) {
3172 return MFI
.isSpillSlotObjectIndex(
3173 cast
<FixedStackPseudoSourceValue
>(A
->getPseudoValue())
3177 if (TII
->isLoadFromStackSlot(MI
, FI
) && MFI
.isSpillSlotObjectIndex(FI
))
3179 else if (TII
->hasLoadFromStackSlot(MI
, Accesses
) &&
3180 llvm::any_of(Accesses
, isSpillSlotAccess
))
3182 else if (TII
->isStoreToStackSlot(MI
, FI
) &&
3183 MFI
.isSpillSlotObjectIndex(FI
))
3185 else if (TII
->hasStoreToStackSlot(MI
, Accesses
) &&
3186 llvm::any_of(Accesses
, isSpillSlotAccess
))
3190 if (Reloads
|| FoldedReloads
|| Spills
|| FoldedSpills
) {
3191 using namespace ore
;
3194 MachineOptimizationRemarkMissed
R(DEBUG_TYPE
, "LoopSpillReload",
3195 L
->getStartLoc(), L
->getHeader());
3197 R
<< NV("NumSpills", Spills
) << " spills ";
3199 R
<< NV("NumFoldedSpills", FoldedSpills
) << " folded spills ";
3201 R
<< NV("NumReloads", Reloads
) << " reloads ";
3203 R
<< NV("NumFoldedReloads", FoldedReloads
) << " folded reloads ";
3204 R
<< "generated in loop";
3210 bool RAGreedy::runOnMachineFunction(MachineFunction
&mf
) {
3211 LLVM_DEBUG(dbgs() << "********** GREEDY REGISTER ALLOCATION **********\n"
3212 << "********** Function: " << mf
.getName() << '\n');
3215 TRI
= MF
->getSubtarget().getRegisterInfo();
3216 TII
= MF
->getSubtarget().getInstrInfo();
3217 RCI
.runOnMachineFunction(mf
);
3219 EnableLocalReassign
= EnableLocalReassignment
||
3220 MF
->getSubtarget().enableRALocalReassignment(
3221 MF
->getTarget().getOptLevel());
3223 EnableAdvancedRASplitCost
= ConsiderLocalIntervalCost
||
3224 MF
->getSubtarget().enableAdvancedRASplitCost();
3227 MF
->verify(this, "Before greedy register allocator");
3229 RegAllocBase::init(getAnalysis
<VirtRegMap
>(),
3230 getAnalysis
<LiveIntervals
>(),
3231 getAnalysis
<LiveRegMatrix
>());
3232 Indexes
= &getAnalysis
<SlotIndexes
>();
3233 MBFI
= &getAnalysis
<MachineBlockFrequencyInfo
>();
3234 DomTree
= &getAnalysis
<MachineDominatorTree
>();
3235 ORE
= &getAnalysis
<MachineOptimizationRemarkEmitterPass
>().getORE();
3236 SpillerInstance
.reset(createInlineSpiller(*this, *MF
, *VRM
));
3237 Loops
= &getAnalysis
<MachineLoopInfo
>();
3238 Bundles
= &getAnalysis
<EdgeBundles
>();
3239 SpillPlacer
= &getAnalysis
<SpillPlacement
>();
3240 DebugVars
= &getAnalysis
<LiveDebugVariables
>();
3241 AA
= &getAnalysis
<AAResultsWrapperPass
>().getAAResults();
3243 initializeCSRCost();
3245 calculateSpillWeightsAndHints(*LIS
, mf
, VRM
, *Loops
, *MBFI
);
3247 LLVM_DEBUG(LIS
->dump());
3249 SA
.reset(new SplitAnalysis(*VRM
, *LIS
, *Loops
));
3250 SE
.reset(new SplitEditor(*SA
, *AA
, *LIS
, *VRM
, *DomTree
, *MBFI
));
3251 ExtraRegInfo
.clear();
3252 ExtraRegInfo
.resize(MRI
->getNumVirtRegs());
3254 IntfCache
.init(MF
, Matrix
->getLiveUnions(), Indexes
, LIS
, TRI
);
3255 GlobalCand
.resize(32); // This will grow as needed.
3256 SetOfBrokenHints
.clear();
3257 LastEvicted
.clear();
3260 tryHintsRecoloring();
3262 reportNumberOfSplillsReloads();