1 //===-- PreAllocSplitting.cpp - Pre-allocation Interval Spltting Pass. ----===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This file implements the machine instruction level pre-register allocation
11 // live interval splitting pass. It finds live interval barriers, i.e.
12 // instructions which will kill all physical registers in certain register
13 // classes, and split all live intervals which cross the barrier.
15 //===----------------------------------------------------------------------===//
17 #define DEBUG_TYPE "pre-alloc-split"
18 #include "VirtRegMap.h"
19 #include "RegisterCoalescer.h"
20 #include "llvm/CodeGen/CalcSpillWeights.h"
21 #include "llvm/CodeGen/LiveIntervalAnalysis.h"
22 #include "llvm/CodeGen/LiveStackAnalysis.h"
23 #include "llvm/CodeGen/MachineDominators.h"
24 #include "llvm/CodeGen/MachineFrameInfo.h"
25 #include "llvm/CodeGen/MachineFunctionPass.h"
26 #include "llvm/CodeGen/MachineLoopInfo.h"
27 #include "llvm/CodeGen/MachineRegisterInfo.h"
28 #include "llvm/CodeGen/Passes.h"
29 #include "llvm/Target/TargetInstrInfo.h"
30 #include "llvm/Target/TargetMachine.h"
31 #include "llvm/Target/TargetOptions.h"
32 #include "llvm/Target/TargetRegisterInfo.h"
33 #include "llvm/Support/CommandLine.h"
34 #include "llvm/Support/Debug.h"
35 #include "llvm/Support/ErrorHandling.h"
36 #include "llvm/ADT/DenseMap.h"
37 #include "llvm/ADT/DepthFirstIterator.h"
38 #include "llvm/ADT/SmallPtrSet.h"
39 #include "llvm/ADT/Statistic.h"
42 static cl::opt
<int> PreSplitLimit("pre-split-limit", cl::init(-1), cl::Hidden
);
43 static cl::opt
<int> DeadSplitLimit("dead-split-limit", cl::init(-1),
45 static cl::opt
<int> RestoreFoldLimit("restore-fold-limit", cl::init(-1),
48 STATISTIC(NumSplits
, "Number of intervals split");
49 STATISTIC(NumRemats
, "Number of intervals split by rematerialization");
50 STATISTIC(NumFolds
, "Number of intervals split with spill folding");
51 STATISTIC(NumRestoreFolds
, "Number of intervals split with restore folding");
52 STATISTIC(NumRenumbers
, "Number of intervals renumbered into new registers");
53 STATISTIC(NumDeadSpills
, "Number of dead spills removed");
56 class PreAllocSplitting
: public MachineFunctionPass
{
57 MachineFunction
*CurrMF
;
58 const TargetMachine
*TM
;
59 const TargetInstrInfo
*TII
;
60 const TargetRegisterInfo
* TRI
;
61 MachineFrameInfo
*MFI
;
62 MachineRegisterInfo
*MRI
;
68 // Barrier - Current barrier being processed.
69 MachineInstr
*Barrier
;
71 // BarrierMBB - Basic block where the barrier resides in.
72 MachineBasicBlock
*BarrierMBB
;
74 // Barrier - Current barrier index.
77 // CurrLI - Current live interval being split.
80 // CurrSLI - Current stack slot live interval.
81 LiveInterval
*CurrSLI
;
83 // CurrSValNo - Current val# for the stack slot live interval.
86 // IntervalSSMap - A map from live interval to spill slots.
87 DenseMap
<unsigned, int> IntervalSSMap
;
89 // Def2SpillMap - A map from a def instruction index to spill index.
90 DenseMap
<SlotIndex
, SlotIndex
> Def2SpillMap
;
94 PreAllocSplitting() : MachineFunctionPass(ID
) {
95 initializePreAllocSplittingPass(*PassRegistry::getPassRegistry());
98 virtual bool runOnMachineFunction(MachineFunction
&MF
);
100 virtual void getAnalysisUsage(AnalysisUsage
&AU
) const {
101 AU
.setPreservesCFG();
102 AU
.addRequired
<SlotIndexes
>();
103 AU
.addPreserved
<SlotIndexes
>();
104 AU
.addRequired
<LiveIntervals
>();
105 AU
.addPreserved
<LiveIntervals
>();
106 AU
.addRequired
<LiveStacks
>();
107 AU
.addPreserved
<LiveStacks
>();
108 AU
.addPreserved
<RegisterCoalescer
>();
109 AU
.addPreserved
<CalculateSpillWeights
>();
110 AU
.addPreservedID(StrongPHIEliminationID
);
111 AU
.addPreservedID(PHIEliminationID
);
112 AU
.addRequired
<MachineDominatorTree
>();
113 AU
.addRequired
<MachineLoopInfo
>();
114 AU
.addRequired
<VirtRegMap
>();
115 AU
.addPreserved
<MachineDominatorTree
>();
116 AU
.addPreserved
<MachineLoopInfo
>();
117 AU
.addPreserved
<VirtRegMap
>();
118 MachineFunctionPass::getAnalysisUsage(AU
);
121 virtual void releaseMemory() {
122 IntervalSSMap
.clear();
123 Def2SpillMap
.clear();
126 virtual const char *getPassName() const {
127 return "Pre-Register Allocaton Live Interval Splitting";
130 /// print - Implement the dump method.
131 virtual void print(raw_ostream
&O
, const Module
* M
= 0) const {
138 MachineBasicBlock::iterator
139 findSpillPoint(MachineBasicBlock
*, MachineInstr
*, MachineInstr
*,
140 SmallPtrSet
<MachineInstr
*, 4>&);
142 MachineBasicBlock::iterator
143 findRestorePoint(MachineBasicBlock
*, MachineInstr
*, SlotIndex
,
144 SmallPtrSet
<MachineInstr
*, 4>&);
146 int CreateSpillStackSlot(unsigned, const TargetRegisterClass
*);
148 bool IsAvailableInStack(MachineBasicBlock
*, unsigned,
149 SlotIndex
, SlotIndex
,
150 SlotIndex
&, int&) const;
152 void UpdateSpillSlotInterval(VNInfo
*, SlotIndex
, SlotIndex
);
154 bool SplitRegLiveInterval(LiveInterval
*);
156 bool SplitRegLiveIntervals(const TargetRegisterClass
**,
157 SmallPtrSet
<LiveInterval
*, 8>&);
159 bool createsNewJoin(LiveRange
* LR
, MachineBasicBlock
* DefMBB
,
160 MachineBasicBlock
* BarrierMBB
);
161 bool Rematerialize(unsigned vreg
, VNInfo
* ValNo
,
163 MachineBasicBlock::iterator RestorePt
,
164 SmallPtrSet
<MachineInstr
*, 4>& RefsInMBB
);
165 MachineInstr
* FoldSpill(unsigned vreg
, const TargetRegisterClass
* RC
,
167 MachineInstr
* Barrier
,
168 MachineBasicBlock
* MBB
,
170 SmallPtrSet
<MachineInstr
*, 4>& RefsInMBB
);
171 MachineInstr
* FoldRestore(unsigned vreg
,
172 const TargetRegisterClass
* RC
,
173 MachineInstr
* Barrier
,
174 MachineBasicBlock
* MBB
,
176 SmallPtrSet
<MachineInstr
*, 4>& RefsInMBB
);
177 void RenumberValno(VNInfo
* VN
);
178 void ReconstructLiveInterval(LiveInterval
* LI
);
179 bool removeDeadSpills(SmallPtrSet
<LiveInterval
*, 8>& split
);
180 unsigned getNumberOfNonSpills(SmallPtrSet
<MachineInstr
*, 4>& MIs
,
181 unsigned Reg
, int FrameIndex
, bool& TwoAddr
);
182 VNInfo
* PerformPHIConstruction(MachineBasicBlock::iterator Use
,
183 MachineBasicBlock
* MBB
, LiveInterval
* LI
,
184 SmallPtrSet
<MachineInstr
*, 4>& Visited
,
185 DenseMap
<MachineBasicBlock
*, SmallPtrSet
<MachineInstr
*, 2> >& Defs
,
186 DenseMap
<MachineBasicBlock
*, SmallPtrSet
<MachineInstr
*, 2> >& Uses
,
187 DenseMap
<MachineInstr
*, VNInfo
*>& NewVNs
,
188 DenseMap
<MachineBasicBlock
*, VNInfo
*>& LiveOut
,
189 DenseMap
<MachineBasicBlock
*, VNInfo
*>& Phis
,
190 bool IsTopLevel
, bool IsIntraBlock
);
191 VNInfo
* PerformPHIConstructionFallBack(MachineBasicBlock::iterator Use
,
192 MachineBasicBlock
* MBB
, LiveInterval
* LI
,
193 SmallPtrSet
<MachineInstr
*, 4>& Visited
,
194 DenseMap
<MachineBasicBlock
*, SmallPtrSet
<MachineInstr
*, 2> >& Defs
,
195 DenseMap
<MachineBasicBlock
*, SmallPtrSet
<MachineInstr
*, 2> >& Uses
,
196 DenseMap
<MachineInstr
*, VNInfo
*>& NewVNs
,
197 DenseMap
<MachineBasicBlock
*, VNInfo
*>& LiveOut
,
198 DenseMap
<MachineBasicBlock
*, VNInfo
*>& Phis
,
199 bool IsTopLevel
, bool IsIntraBlock
);
201 } // end anonymous namespace
203 char PreAllocSplitting::ID
= 0;
205 INITIALIZE_PASS_BEGIN(PreAllocSplitting
, "pre-alloc-splitting",
206 "Pre-Register Allocation Live Interval Splitting",
208 INITIALIZE_PASS_DEPENDENCY(SlotIndexes
)
209 INITIALIZE_PASS_DEPENDENCY(LiveIntervals
)
210 INITIALIZE_PASS_DEPENDENCY(LiveStacks
)
211 INITIALIZE_PASS_DEPENDENCY(MachineDominatorTree
)
212 INITIALIZE_PASS_DEPENDENCY(MachineLoopInfo
)
213 INITIALIZE_PASS_DEPENDENCY(VirtRegMap
)
214 INITIALIZE_PASS_END(PreAllocSplitting
, "pre-alloc-splitting",
215 "Pre-Register Allocation Live Interval Splitting",
218 char &llvm::PreAllocSplittingID
= PreAllocSplitting::ID
;
220 /// findSpillPoint - Find a gap as far away from the given MI that's suitable
221 /// for spilling the current live interval. The index must be before any
222 /// defs and uses of the live interval register in the mbb. Return begin() if
224 MachineBasicBlock::iterator
225 PreAllocSplitting::findSpillPoint(MachineBasicBlock
*MBB
, MachineInstr
*MI
,
227 SmallPtrSet
<MachineInstr
*, 4> &RefsInMBB
) {
228 MachineBasicBlock::iterator Pt
= MBB
->begin();
230 MachineBasicBlock::iterator MII
= MI
;
231 MachineBasicBlock::iterator EndPt
= DefMI
232 ? MachineBasicBlock::iterator(DefMI
) : MBB
->begin();
234 while (MII
!= EndPt
&& !RefsInMBB
.count(MII
) &&
235 MII
->getOpcode() != TRI
->getCallFrameSetupOpcode())
237 if (MII
== EndPt
|| RefsInMBB
.count(MII
)) return Pt
;
239 while (MII
!= EndPt
&& !RefsInMBB
.count(MII
)) {
240 // We can't insert the spill between the barrier (a call), and its
241 // corresponding call frame setup.
242 if (MII
->getOpcode() == TRI
->getCallFrameDestroyOpcode()) {
243 while (MII
->getOpcode() != TRI
->getCallFrameSetupOpcode()) {
254 if (RefsInMBB
.count(MII
))
264 /// findRestorePoint - Find a gap in the instruction index map that's suitable
265 /// for restoring the current live interval value. The index must be before any
266 /// uses of the live interval register in the mbb. Return end() if none is
268 MachineBasicBlock::iterator
269 PreAllocSplitting::findRestorePoint(MachineBasicBlock
*MBB
, MachineInstr
*MI
,
271 SmallPtrSet
<MachineInstr
*, 4> &RefsInMBB
) {
272 // FIXME: Allow spill to be inserted to the beginning of the mbb. Update mbb
273 // begin index accordingly.
274 MachineBasicBlock::iterator Pt
= MBB
->end();
275 MachineBasicBlock::iterator EndPt
= MBB
->getFirstTerminator();
277 // We start at the call, so walk forward until we find the call frame teardown
278 // since we can't insert restores before that. Bail if we encounter a use
280 MachineBasicBlock::iterator MII
= MI
;
281 if (MII
== EndPt
) return Pt
;
283 while (MII
!= EndPt
&& !RefsInMBB
.count(MII
) &&
284 MII
->getOpcode() != TRI
->getCallFrameDestroyOpcode())
286 if (MII
== EndPt
|| RefsInMBB
.count(MII
)) return Pt
;
289 // FIXME: Limit the number of instructions to examine to reduce
291 while (MII
!= EndPt
) {
292 SlotIndex Index
= LIs
->getInstructionIndex(MII
);
296 // We can't insert a restore between the barrier (a call) and its
297 // corresponding call frame teardown.
298 if (MII
->getOpcode() == TRI
->getCallFrameSetupOpcode()) {
300 if (MII
== EndPt
|| RefsInMBB
.count(MII
)) return Pt
;
302 } while (MII
->getOpcode() != TRI
->getCallFrameDestroyOpcode());
307 if (RefsInMBB
.count(MII
))
316 /// CreateSpillStackSlot - Create a stack slot for the live interval being
317 /// split. If the live interval was previously split, just reuse the same
319 int PreAllocSplitting::CreateSpillStackSlot(unsigned Reg
,
320 const TargetRegisterClass
*RC
) {
322 DenseMap
<unsigned, int>::iterator I
= IntervalSSMap
.find(Reg
);
323 if (I
!= IntervalSSMap
.end()) {
326 SS
= MFI
->CreateSpillStackObject(RC
->getSize(), RC
->getAlignment());
327 IntervalSSMap
[Reg
] = SS
;
330 // Create live interval for stack slot.
331 CurrSLI
= &LSs
->getOrCreateInterval(SS
, RC
);
332 if (CurrSLI
->hasAtLeastOneValue())
333 CurrSValNo
= CurrSLI
->getValNumInfo(0);
335 CurrSValNo
= CurrSLI
->getNextValue(SlotIndex(), 0,
336 LSs
->getVNInfoAllocator());
340 /// IsAvailableInStack - Return true if register is available in a split stack
341 /// slot at the specified index.
343 PreAllocSplitting::IsAvailableInStack(MachineBasicBlock
*DefMBB
,
344 unsigned Reg
, SlotIndex DefIndex
,
345 SlotIndex RestoreIndex
,
346 SlotIndex
&SpillIndex
,
351 DenseMap
<unsigned, int>::const_iterator I
= IntervalSSMap
.find(Reg
);
352 if (I
== IntervalSSMap
.end())
354 DenseMap
<SlotIndex
, SlotIndex
>::const_iterator
355 II
= Def2SpillMap
.find(DefIndex
);
356 if (II
== Def2SpillMap
.end())
359 // If last spill of def is in the same mbb as barrier mbb (where restore will
360 // be), make sure it's not below the intended restore index.
361 // FIXME: Undo the previous spill?
362 assert(LIs
->getMBBFromIndex(II
->second
) == DefMBB
);
363 if (DefMBB
== BarrierMBB
&& II
->second
>= RestoreIndex
)
367 SpillIndex
= II
->second
;
371 /// UpdateSpillSlotInterval - Given the specified val# of the register live
372 /// interval being split, and the spill and restore indicies, update the live
373 /// interval of the spill stack slot.
375 PreAllocSplitting::UpdateSpillSlotInterval(VNInfo
*ValNo
, SlotIndex SpillIndex
,
376 SlotIndex RestoreIndex
) {
377 assert(LIs
->getMBBFromIndex(RestoreIndex
) == BarrierMBB
&&
378 "Expect restore in the barrier mbb");
380 MachineBasicBlock
*MBB
= LIs
->getMBBFromIndex(SpillIndex
);
381 if (MBB
== BarrierMBB
) {
382 // Intra-block spill + restore. We are done.
383 LiveRange
SLR(SpillIndex
, RestoreIndex
, CurrSValNo
);
384 CurrSLI
->addRange(SLR
);
388 SmallPtrSet
<MachineBasicBlock
*, 4> Processed
;
389 SlotIndex EndIdx
= LIs
->getMBBEndIdx(MBB
);
390 LiveRange
SLR(SpillIndex
, EndIdx
, CurrSValNo
);
391 CurrSLI
->addRange(SLR
);
392 Processed
.insert(MBB
);
394 // Start from the spill mbb, figure out the extend of the spill slot's
396 SmallVector
<MachineBasicBlock
*, 4> WorkList
;
397 const LiveRange
*LR
= CurrLI
->getLiveRangeContaining(SpillIndex
);
398 if (LR
->end
> EndIdx
)
399 // If live range extend beyond end of mbb, add successors to work list.
400 for (MachineBasicBlock::succ_iterator SI
= MBB
->succ_begin(),
401 SE
= MBB
->succ_end(); SI
!= SE
; ++SI
)
402 WorkList
.push_back(*SI
);
404 while (!WorkList
.empty()) {
405 MachineBasicBlock
*MBB
= WorkList
.back();
407 if (Processed
.count(MBB
))
409 SlotIndex Idx
= LIs
->getMBBStartIdx(MBB
);
410 LR
= CurrLI
->getLiveRangeContaining(Idx
);
411 if (LR
&& LR
->valno
== ValNo
) {
412 EndIdx
= LIs
->getMBBEndIdx(MBB
);
413 if (Idx
<= RestoreIndex
&& RestoreIndex
< EndIdx
) {
414 // Spill slot live interval stops at the restore.
415 LiveRange
SLR(Idx
, RestoreIndex
, CurrSValNo
);
416 CurrSLI
->addRange(SLR
);
417 } else if (LR
->end
> EndIdx
) {
418 // Live range extends beyond end of mbb, process successors.
419 LiveRange
SLR(Idx
, EndIdx
.getNextIndex(), CurrSValNo
);
420 CurrSLI
->addRange(SLR
);
421 for (MachineBasicBlock::succ_iterator SI
= MBB
->succ_begin(),
422 SE
= MBB
->succ_end(); SI
!= SE
; ++SI
)
423 WorkList
.push_back(*SI
);
425 LiveRange
SLR(Idx
, LR
->end
, CurrSValNo
);
426 CurrSLI
->addRange(SLR
);
428 Processed
.insert(MBB
);
433 /// PerformPHIConstruction - From properly set up use and def lists, use a PHI
434 /// construction algorithm to compute the ranges and valnos for an interval.
436 PreAllocSplitting::PerformPHIConstruction(MachineBasicBlock::iterator UseI
,
437 MachineBasicBlock
* MBB
, LiveInterval
* LI
,
438 SmallPtrSet
<MachineInstr
*, 4>& Visited
,
439 DenseMap
<MachineBasicBlock
*, SmallPtrSet
<MachineInstr
*, 2> >& Defs
,
440 DenseMap
<MachineBasicBlock
*, SmallPtrSet
<MachineInstr
*, 2> >& Uses
,
441 DenseMap
<MachineInstr
*, VNInfo
*>& NewVNs
,
442 DenseMap
<MachineBasicBlock
*, VNInfo
*>& LiveOut
,
443 DenseMap
<MachineBasicBlock
*, VNInfo
*>& Phis
,
444 bool IsTopLevel
, bool IsIntraBlock
) {
445 // Return memoized result if it's available.
446 if (IsTopLevel
&& Visited
.count(UseI
) && NewVNs
.count(UseI
))
448 else if (!IsTopLevel
&& IsIntraBlock
&& NewVNs
.count(UseI
))
450 else if (!IsIntraBlock
&& LiveOut
.count(MBB
))
453 // Check if our block contains any uses or defs.
454 bool ContainsDefs
= Defs
.count(MBB
);
455 bool ContainsUses
= Uses
.count(MBB
);
459 // Enumerate the cases of use/def contaning blocks.
460 if (!ContainsDefs
&& !ContainsUses
) {
461 return PerformPHIConstructionFallBack(UseI
, MBB
, LI
, Visited
, Defs
, Uses
,
462 NewVNs
, LiveOut
, Phis
,
463 IsTopLevel
, IsIntraBlock
);
464 } else if (ContainsDefs
&& !ContainsUses
) {
465 SmallPtrSet
<MachineInstr
*, 2>& BlockDefs
= Defs
[MBB
];
467 // Search for the def in this block. If we don't find it before the
468 // instruction we care about, go to the fallback case. Note that that
469 // should never happen: this cannot be intrablock, so use should
470 // always be an end() iterator.
471 assert(UseI
== MBB
->end() && "No use marked in intrablock");
473 MachineBasicBlock::iterator Walker
= UseI
;
475 while (Walker
!= MBB
->begin()) {
476 if (BlockDefs
.count(Walker
))
481 // Once we've found it, extend its VNInfo to our instruction.
482 SlotIndex DefIndex
= LIs
->getInstructionIndex(Walker
);
483 DefIndex
= DefIndex
.getDefIndex();
484 SlotIndex EndIndex
= LIs
->getMBBEndIdx(MBB
);
486 RetVNI
= NewVNs
[Walker
];
487 LI
->addRange(LiveRange(DefIndex
, EndIndex
, RetVNI
));
488 } else if (!ContainsDefs
&& ContainsUses
) {
489 SmallPtrSet
<MachineInstr
*, 2>& BlockUses
= Uses
[MBB
];
491 // Search for the use in this block that precedes the instruction we care
492 // about, going to the fallback case if we don't find it.
493 MachineBasicBlock::iterator Walker
= UseI
;
495 while (Walker
!= MBB
->begin()) {
497 if (BlockUses
.count(Walker
)) {
504 return PerformPHIConstructionFallBack(UseI
, MBB
, LI
, Visited
, Defs
,
505 Uses
, NewVNs
, LiveOut
, Phis
,
506 IsTopLevel
, IsIntraBlock
);
508 SlotIndex UseIndex
= LIs
->getInstructionIndex(Walker
);
509 UseIndex
= UseIndex
.getUseIndex();
512 EndIndex
= LIs
->getInstructionIndex(UseI
).getDefIndex();
514 EndIndex
= LIs
->getMBBEndIdx(MBB
);
516 // Now, recursively phi construct the VNInfo for the use we found,
517 // and then extend it to include the instruction we care about
518 RetVNI
= PerformPHIConstruction(Walker
, MBB
, LI
, Visited
, Defs
, Uses
,
519 NewVNs
, LiveOut
, Phis
, false, true);
521 LI
->addRange(LiveRange(UseIndex
, EndIndex
, RetVNI
));
523 // FIXME: Need to set kills properly for inter-block stuff.
524 } else if (ContainsDefs
&& ContainsUses
) {
525 SmallPtrSet
<MachineInstr
*, 2>& BlockDefs
= Defs
[MBB
];
526 SmallPtrSet
<MachineInstr
*, 2>& BlockUses
= Uses
[MBB
];
528 // This case is basically a merging of the two preceding case, with the
529 // special note that checking for defs must take precedence over checking
530 // for uses, because of two-address instructions.
531 MachineBasicBlock::iterator Walker
= UseI
;
532 bool foundDef
= false;
533 bool foundUse
= false;
534 while (Walker
!= MBB
->begin()) {
536 if (BlockDefs
.count(Walker
)) {
539 } else if (BlockUses
.count(Walker
)) {
545 if (!foundDef
&& !foundUse
)
546 return PerformPHIConstructionFallBack(UseI
, MBB
, LI
, Visited
, Defs
,
547 Uses
, NewVNs
, LiveOut
, Phis
,
548 IsTopLevel
, IsIntraBlock
);
550 SlotIndex StartIndex
= LIs
->getInstructionIndex(Walker
);
551 StartIndex
= foundDef
? StartIndex
.getDefIndex() : StartIndex
.getUseIndex();
554 EndIndex
= LIs
->getInstructionIndex(UseI
).getDefIndex();
556 EndIndex
= LIs
->getMBBEndIdx(MBB
);
559 RetVNI
= NewVNs
[Walker
];
561 RetVNI
= PerformPHIConstruction(Walker
, MBB
, LI
, Visited
, Defs
, Uses
,
562 NewVNs
, LiveOut
, Phis
, false, true);
564 LI
->addRange(LiveRange(StartIndex
, EndIndex
, RetVNI
));
567 // Memoize results so we don't have to recompute them.
568 if (!IsIntraBlock
) LiveOut
[MBB
] = RetVNI
;
570 if (!NewVNs
.count(UseI
))
571 NewVNs
[UseI
] = RetVNI
;
572 Visited
.insert(UseI
);
578 /// PerformPHIConstructionFallBack - PerformPHIConstruction fall back path.
581 PreAllocSplitting::PerformPHIConstructionFallBack(MachineBasicBlock::iterator UseI
,
582 MachineBasicBlock
* MBB
, LiveInterval
* LI
,
583 SmallPtrSet
<MachineInstr
*, 4>& Visited
,
584 DenseMap
<MachineBasicBlock
*, SmallPtrSet
<MachineInstr
*, 2> >& Defs
,
585 DenseMap
<MachineBasicBlock
*, SmallPtrSet
<MachineInstr
*, 2> >& Uses
,
586 DenseMap
<MachineInstr
*, VNInfo
*>& NewVNs
,
587 DenseMap
<MachineBasicBlock
*, VNInfo
*>& LiveOut
,
588 DenseMap
<MachineBasicBlock
*, VNInfo
*>& Phis
,
589 bool IsTopLevel
, bool IsIntraBlock
) {
590 // NOTE: Because this is the fallback case from other cases, we do NOT
591 // assume that we are not intrablock here.
592 if (Phis
.count(MBB
)) return Phis
[MBB
];
594 SlotIndex StartIndex
= LIs
->getMBBStartIdx(MBB
);
595 VNInfo
*RetVNI
= Phis
[MBB
] =
596 LI
->getNextValue(SlotIndex(), /*FIXME*/ 0,
597 LIs
->getVNInfoAllocator());
599 if (!IsIntraBlock
) LiveOut
[MBB
] = RetVNI
;
601 // If there are no uses or defs between our starting point and the
602 // beginning of the block, then recursive perform phi construction
603 // on our predecessors.
604 DenseMap
<MachineBasicBlock
*, VNInfo
*> IncomingVNs
;
605 for (MachineBasicBlock::pred_iterator PI
= MBB
->pred_begin(),
606 PE
= MBB
->pred_end(); PI
!= PE
; ++PI
) {
607 VNInfo
* Incoming
= PerformPHIConstruction((*PI
)->end(), *PI
, LI
,
608 Visited
, Defs
, Uses
, NewVNs
,
609 LiveOut
, Phis
, false, false);
611 IncomingVNs
[*PI
] = Incoming
;
614 if (MBB
->pred_size() == 1 && !RetVNI
->hasPHIKill()) {
615 VNInfo
* OldVN
= RetVNI
;
616 VNInfo
* NewVN
= IncomingVNs
.begin()->second
;
617 VNInfo
* MergedVN
= LI
->MergeValueNumberInto(OldVN
, NewVN
);
618 if (MergedVN
== OldVN
) std::swap(OldVN
, NewVN
);
620 for (DenseMap
<MachineBasicBlock
*, VNInfo
*>::iterator LOI
= LiveOut
.begin(),
621 LOE
= LiveOut
.end(); LOI
!= LOE
; ++LOI
)
622 if (LOI
->second
== OldVN
)
623 LOI
->second
= MergedVN
;
624 for (DenseMap
<MachineInstr
*, VNInfo
*>::iterator NVI
= NewVNs
.begin(),
625 NVE
= NewVNs
.end(); NVI
!= NVE
; ++NVI
)
626 if (NVI
->second
== OldVN
)
627 NVI
->second
= MergedVN
;
628 for (DenseMap
<MachineBasicBlock
*, VNInfo
*>::iterator PI
= Phis
.begin(),
629 PE
= Phis
.end(); PI
!= PE
; ++PI
)
630 if (PI
->second
== OldVN
)
631 PI
->second
= MergedVN
;
634 // Otherwise, merge the incoming VNInfos with a phi join. Create a new
635 // VNInfo to represent the joined value.
636 for (DenseMap
<MachineBasicBlock
*, VNInfo
*>::iterator I
=
637 IncomingVNs
.begin(), E
= IncomingVNs
.end(); I
!= E
; ++I
) {
638 I
->second
->setHasPHIKill(true);
644 EndIndex
= LIs
->getInstructionIndex(UseI
).getDefIndex();
646 EndIndex
= LIs
->getMBBEndIdx(MBB
);
647 LI
->addRange(LiveRange(StartIndex
, EndIndex
, RetVNI
));
649 // Memoize results so we don't have to recompute them.
651 LiveOut
[MBB
] = RetVNI
;
653 if (!NewVNs
.count(UseI
))
654 NewVNs
[UseI
] = RetVNI
;
655 Visited
.insert(UseI
);
661 /// ReconstructLiveInterval - Recompute a live interval from scratch.
662 void PreAllocSplitting::ReconstructLiveInterval(LiveInterval
* LI
) {
663 VNInfo::Allocator
& Alloc
= LIs
->getVNInfoAllocator();
665 // Clear the old ranges and valnos;
668 // Cache the uses and defs of the register
669 typedef DenseMap
<MachineBasicBlock
*, SmallPtrSet
<MachineInstr
*, 2> > RegMap
;
672 // Keep track of the new VNs we're creating.
673 DenseMap
<MachineInstr
*, VNInfo
*> NewVNs
;
674 SmallPtrSet
<VNInfo
*, 2> PhiVNs
;
676 // Cache defs, and create a new VNInfo for each def.
677 for (MachineRegisterInfo::def_iterator DI
= MRI
->def_begin(LI
->reg
),
678 DE
= MRI
->def_end(); DI
!= DE
; ++DI
) {
679 Defs
[(*DI
).getParent()].insert(&*DI
);
681 SlotIndex DefIdx
= LIs
->getInstructionIndex(&*DI
);
682 DefIdx
= DefIdx
.getDefIndex();
684 assert(!DI
->isPHI() && "PHI instr in code during pre-alloc splitting.");
685 VNInfo
* NewVN
= LI
->getNextValue(DefIdx
, 0, Alloc
);
687 // If the def is a move, set the copy field.
688 if (DI
->isCopyLike() && DI
->getOperand(0).getReg() == LI
->reg
)
689 NewVN
->setCopy(&*DI
);
691 NewVNs
[&*DI
] = NewVN
;
694 // Cache uses as a separate pass from actually processing them.
695 for (MachineRegisterInfo::use_iterator UI
= MRI
->use_begin(LI
->reg
),
696 UE
= MRI
->use_end(); UI
!= UE
; ++UI
)
697 Uses
[(*UI
).getParent()].insert(&*UI
);
699 // Now, actually process every use and use a phi construction algorithm
700 // to walk from it to its reaching definitions, building VNInfos along
702 DenseMap
<MachineBasicBlock
*, VNInfo
*> LiveOut
;
703 DenseMap
<MachineBasicBlock
*, VNInfo
*> Phis
;
704 SmallPtrSet
<MachineInstr
*, 4> Visited
;
705 for (MachineRegisterInfo::use_iterator UI
= MRI
->use_begin(LI
->reg
),
706 UE
= MRI
->use_end(); UI
!= UE
; ++UI
) {
707 PerformPHIConstruction(&*UI
, UI
->getParent(), LI
, Visited
, Defs
,
708 Uses
, NewVNs
, LiveOut
, Phis
, true, true);
711 // Add ranges for dead defs
712 for (MachineRegisterInfo::def_iterator DI
= MRI
->def_begin(LI
->reg
),
713 DE
= MRI
->def_end(); DI
!= DE
; ++DI
) {
714 SlotIndex DefIdx
= LIs
->getInstructionIndex(&*DI
);
715 DefIdx
= DefIdx
.getDefIndex();
717 if (LI
->liveAt(DefIdx
)) continue;
719 VNInfo
* DeadVN
= NewVNs
[&*DI
];
720 LI
->addRange(LiveRange(DefIdx
, DefIdx
.getNextSlot(), DeadVN
));
724 /// RenumberValno - Split the given valno out into a new vreg, allowing it to
725 /// be allocated to a different register. This function creates a new vreg,
726 /// copies the valno and its live ranges over to the new vreg's interval,
727 /// removes them from the old interval, and rewrites all uses and defs of
728 /// the original reg to the new vreg within those ranges.
729 void PreAllocSplitting::RenumberValno(VNInfo
* VN
) {
730 SmallVector
<VNInfo
*, 4> Stack
;
731 SmallVector
<VNInfo
*, 4> VNsToCopy
;
734 // Walk through and copy the valno we care about, and any other valnos
735 // that are two-address redefinitions of the one we care about. These
736 // will need to be rewritten as well. We also check for safety of the
737 // renumbering here, by making sure that none of the valno involved has
739 while (!Stack
.empty()) {
740 VNInfo
* OldVN
= Stack
.back();
743 // Bail out if we ever encounter a valno that has a PHI kill. We can't
745 if (OldVN
->hasPHIKill()) return;
747 VNsToCopy
.push_back(OldVN
);
749 // Locate two-address redefinitions
750 for (MachineRegisterInfo::def_iterator DI
= MRI
->def_begin(CurrLI
->reg
),
751 DE
= MRI
->def_end(); DI
!= DE
; ++DI
) {
752 if (!DI
->isRegTiedToUseOperand(DI
.getOperandNo())) continue;
753 SlotIndex DefIdx
= LIs
->getInstructionIndex(&*DI
).getDefIndex();
754 VNInfo
* NextVN
= CurrLI
->findDefinedVNInfoForRegInt(DefIdx
);
755 if (std::find(VNsToCopy
.begin(), VNsToCopy
.end(), NextVN
) !=
757 Stack
.push_back(NextVN
);
761 // Create the new vreg
762 unsigned NewVReg
= MRI
->createVirtualRegister(MRI
->getRegClass(CurrLI
->reg
));
764 // Create the new live interval
765 LiveInterval
& NewLI
= LIs
->getOrCreateInterval(NewVReg
);
767 for (SmallVector
<VNInfo
*, 4>::iterator OI
= VNsToCopy
.begin(), OE
=
768 VNsToCopy
.end(); OI
!= OE
; ++OI
) {
771 // Copy the valno over
772 VNInfo
* NewVN
= NewLI
.createValueCopy(OldVN
, LIs
->getVNInfoAllocator());
773 NewLI
.MergeValueInAsValue(*CurrLI
, OldVN
, NewVN
);
775 // Remove the valno from the old interval
776 CurrLI
->removeValNo(OldVN
);
779 // Rewrite defs and uses. This is done in two stages to avoid invalidating
781 SmallVector
<std::pair
<MachineInstr
*, unsigned>, 8> OpsToChange
;
783 for (MachineRegisterInfo::reg_iterator I
= MRI
->reg_begin(CurrLI
->reg
),
784 E
= MRI
->reg_end(); I
!= E
; ++I
) {
785 MachineOperand
& MO
= I
.getOperand();
786 SlotIndex InstrIdx
= LIs
->getInstructionIndex(&*I
);
788 if ((MO
.isUse() && NewLI
.liveAt(InstrIdx
.getUseIndex())) ||
789 (MO
.isDef() && NewLI
.liveAt(InstrIdx
.getDefIndex())))
790 OpsToChange
.push_back(std::make_pair(&*I
, I
.getOperandNo()));
793 for (SmallVector
<std::pair
<MachineInstr
*, unsigned>, 8>::iterator I
=
794 OpsToChange
.begin(), E
= OpsToChange
.end(); I
!= E
; ++I
) {
795 MachineInstr
* Inst
= I
->first
;
796 unsigned OpIdx
= I
->second
;
797 MachineOperand
& MO
= Inst
->getOperand(OpIdx
);
801 // Grow the VirtRegMap, since we've created a new vreg.
804 // The renumbered vreg shares a stack slot with the old register.
805 if (IntervalSSMap
.count(CurrLI
->reg
))
806 IntervalSSMap
[NewVReg
] = IntervalSSMap
[CurrLI
->reg
];
811 bool PreAllocSplitting::Rematerialize(unsigned VReg
, VNInfo
* ValNo
,
813 MachineBasicBlock::iterator RestorePt
,
814 SmallPtrSet
<MachineInstr
*, 4>& RefsInMBB
) {
815 MachineBasicBlock
& MBB
= *RestorePt
->getParent();
817 MachineBasicBlock::iterator KillPt
= BarrierMBB
->end();
818 if (!DefMI
|| DefMI
->getParent() == BarrierMBB
)
819 KillPt
= findSpillPoint(BarrierMBB
, Barrier
, NULL
, RefsInMBB
);
821 KillPt
= llvm::next(MachineBasicBlock::iterator(DefMI
));
823 if (KillPt
== DefMI
->getParent()->end())
826 TII
->reMaterialize(MBB
, RestorePt
, VReg
, 0, DefMI
, *TRI
);
827 SlotIndex RematIdx
= LIs
->InsertMachineInstrInMaps(prior(RestorePt
));
829 ReconstructLiveInterval(CurrLI
);
830 RematIdx
= RematIdx
.getDefIndex();
831 RenumberValno(CurrLI
->findDefinedVNInfoForRegInt(RematIdx
));
838 MachineInstr
* PreAllocSplitting::FoldSpill(unsigned vreg
,
839 const TargetRegisterClass
* RC
,
841 MachineInstr
* Barrier
,
842 MachineBasicBlock
* MBB
,
844 SmallPtrSet
<MachineInstr
*, 4>& RefsInMBB
) {
845 // Go top down if RefsInMBB is empty.
846 if (RefsInMBB
.empty())
849 MachineBasicBlock::iterator FoldPt
= Barrier
;
850 while (&*FoldPt
!= DefMI
&& FoldPt
!= MBB
->begin() &&
851 !RefsInMBB
.count(FoldPt
))
854 int OpIdx
= FoldPt
->findRegisterDefOperandIdx(vreg
);
858 SmallVector
<unsigned, 1> Ops
;
859 Ops
.push_back(OpIdx
);
861 if (!TII
->canFoldMemoryOperand(FoldPt
, Ops
))
864 DenseMap
<unsigned, int>::iterator I
= IntervalSSMap
.find(vreg
);
865 if (I
!= IntervalSSMap
.end()) {
868 SS
= MFI
->CreateSpillStackObject(RC
->getSize(), RC
->getAlignment());
871 MachineInstr
* FMI
= TII
->foldMemoryOperand(FoldPt
, Ops
, SS
);
874 LIs
->ReplaceMachineInstrInMaps(FoldPt
, FMI
);
875 FoldPt
->eraseFromParent();
878 IntervalSSMap
[vreg
] = SS
;
879 CurrSLI
= &LSs
->getOrCreateInterval(SS
, RC
);
880 if (CurrSLI
->hasAtLeastOneValue())
881 CurrSValNo
= CurrSLI
->getValNumInfo(0);
883 CurrSValNo
= CurrSLI
->getNextValue(SlotIndex(), 0,
884 LSs
->getVNInfoAllocator());
890 MachineInstr
* PreAllocSplitting::FoldRestore(unsigned vreg
,
891 const TargetRegisterClass
* RC
,
892 MachineInstr
* Barrier
,
893 MachineBasicBlock
* MBB
,
895 SmallPtrSet
<MachineInstr
*, 4>& RefsInMBB
) {
896 if ((int)RestoreFoldLimit
!= -1 && RestoreFoldLimit
== (int)NumRestoreFolds
)
899 // Go top down if RefsInMBB is empty.
900 if (RefsInMBB
.empty())
903 // Can't fold a restore between a call stack setup and teardown.
904 MachineBasicBlock::iterator FoldPt
= Barrier
;
906 // Advance from barrier to call frame teardown.
907 while (FoldPt
!= MBB
->getFirstTerminator() &&
908 FoldPt
->getOpcode() != TRI
->getCallFrameDestroyOpcode()) {
909 if (RefsInMBB
.count(FoldPt
))
915 if (FoldPt
== MBB
->getFirstTerminator())
920 // Now find the restore point.
921 while (FoldPt
!= MBB
->getFirstTerminator() && !RefsInMBB
.count(FoldPt
)) {
922 if (FoldPt
->getOpcode() == TRI
->getCallFrameSetupOpcode()) {
923 while (FoldPt
!= MBB
->getFirstTerminator() &&
924 FoldPt
->getOpcode() != TRI
->getCallFrameDestroyOpcode()) {
925 if (RefsInMBB
.count(FoldPt
))
931 if (FoldPt
== MBB
->getFirstTerminator())
938 if (FoldPt
== MBB
->getFirstTerminator())
941 int OpIdx
= FoldPt
->findRegisterUseOperandIdx(vreg
, true);
945 SmallVector
<unsigned, 1> Ops
;
946 Ops
.push_back(OpIdx
);
948 if (!TII
->canFoldMemoryOperand(FoldPt
, Ops
))
951 MachineInstr
* FMI
= TII
->foldMemoryOperand(FoldPt
, Ops
, SS
);
954 LIs
->ReplaceMachineInstrInMaps(FoldPt
, FMI
);
955 FoldPt
->eraseFromParent();
962 /// SplitRegLiveInterval - Split (spill and restore) the given live interval
963 /// so it would not cross the barrier that's being processed. Shrink wrap
964 /// (minimize) the live interval to the last uses.
965 bool PreAllocSplitting::SplitRegLiveInterval(LiveInterval
*LI
) {
966 DEBUG(dbgs() << "Pre-alloc splitting " << LI
->reg
<< " for " << *Barrier
971 // Find live range where current interval cross the barrier.
972 LiveInterval::iterator LR
=
973 CurrLI
->FindLiveRangeContaining(BarrierIdx
.getUseIndex());
974 VNInfo
*ValNo
= LR
->valno
;
976 assert(!ValNo
->isUnused() && "Val# is defined by a dead def?");
978 MachineInstr
*DefMI
= LIs
->getInstructionFromIndex(ValNo
->def
);
980 // If this would create a new join point, do not split.
981 if (DefMI
&& createsNewJoin(LR
, DefMI
->getParent(), Barrier
->getParent())) {
982 DEBUG(dbgs() << "FAILED (would create a new join point).\n");
986 // Find all references in the barrier mbb.
987 SmallPtrSet
<MachineInstr
*, 4> RefsInMBB
;
988 for (MachineRegisterInfo::reg_iterator I
= MRI
->reg_begin(CurrLI
->reg
),
989 E
= MRI
->reg_end(); I
!= E
; ++I
) {
990 MachineInstr
*RefMI
= &*I
;
991 if (RefMI
->getParent() == BarrierMBB
)
992 RefsInMBB
.insert(RefMI
);
995 // Find a point to restore the value after the barrier.
996 MachineBasicBlock::iterator RestorePt
=
997 findRestorePoint(BarrierMBB
, Barrier
, LR
->end
, RefsInMBB
);
998 if (RestorePt
== BarrierMBB
->end()) {
999 DEBUG(dbgs() << "FAILED (could not find a suitable restore point).\n");
1003 if (DefMI
&& LIs
->isReMaterializable(*LI
, ValNo
, DefMI
))
1004 if (Rematerialize(LI
->reg
, ValNo
, DefMI
, RestorePt
, RefsInMBB
)) {
1005 DEBUG(dbgs() << "success (remat).\n");
1009 // Add a spill either before the barrier or after the definition.
1010 MachineBasicBlock
*DefMBB
= DefMI
? DefMI
->getParent() : NULL
;
1011 const TargetRegisterClass
*RC
= MRI
->getRegClass(CurrLI
->reg
);
1012 SlotIndex SpillIndex
;
1013 MachineInstr
*SpillMI
= NULL
;
1016 // If we don't know where the def is we must split just before the barrier.
1017 if ((SpillMI
= FoldSpill(LI
->reg
, RC
, 0, Barrier
,
1018 BarrierMBB
, SS
, RefsInMBB
))) {
1019 SpillIndex
= LIs
->getInstructionIndex(SpillMI
);
1021 MachineBasicBlock::iterator SpillPt
=
1022 findSpillPoint(BarrierMBB
, Barrier
, NULL
, RefsInMBB
);
1023 if (SpillPt
== BarrierMBB
->begin()) {
1024 DEBUG(dbgs() << "FAILED (could not find a suitable spill point).\n");
1025 return false; // No gap to insert spill.
1029 SS
= CreateSpillStackSlot(CurrLI
->reg
, RC
);
1030 TII
->storeRegToStackSlot(*BarrierMBB
, SpillPt
, CurrLI
->reg
, true, SS
, RC
,
1032 SpillMI
= prior(SpillPt
);
1033 SpillIndex
= LIs
->InsertMachineInstrInMaps(SpillMI
);
1035 } else if (!IsAvailableInStack(DefMBB
, CurrLI
->reg
, ValNo
->def
,
1036 LIs
->getZeroIndex(), SpillIndex
, SS
)) {
1037 // If it's already split, just restore the value. There is no need to spill
1040 DEBUG(dbgs() << "FAILED (def is dead).\n");
1041 return false; // Def is dead. Do nothing.
1044 if ((SpillMI
= FoldSpill(LI
->reg
, RC
, DefMI
, Barrier
,
1045 BarrierMBB
, SS
, RefsInMBB
))) {
1046 SpillIndex
= LIs
->getInstructionIndex(SpillMI
);
1048 // Check if it's possible to insert a spill after the def MI.
1049 MachineBasicBlock::iterator SpillPt
;
1050 if (DefMBB
== BarrierMBB
) {
1051 // Add spill after the def and the last use before the barrier.
1052 SpillPt
= findSpillPoint(BarrierMBB
, Barrier
, DefMI
,
1054 if (SpillPt
== DefMBB
->begin()) {
1055 DEBUG(dbgs() << "FAILED (could not find a suitable spill point).\n");
1056 return false; // No gap to insert spill.
1059 SpillPt
= llvm::next(MachineBasicBlock::iterator(DefMI
));
1060 if (SpillPt
== DefMBB
->end()) {
1061 DEBUG(dbgs() << "FAILED (could not find a suitable spill point).\n");
1062 return false; // No gap to insert spill.
1066 SS
= CreateSpillStackSlot(CurrLI
->reg
, RC
);
1067 TII
->storeRegToStackSlot(*DefMBB
, SpillPt
, CurrLI
->reg
, false, SS
, RC
,
1069 SpillMI
= prior(SpillPt
);
1070 SpillIndex
= LIs
->InsertMachineInstrInMaps(SpillMI
);
1074 // Remember def instruction index to spill index mapping.
1075 if (DefMI
&& SpillMI
)
1076 Def2SpillMap
[ValNo
->def
] = SpillIndex
;
1079 bool FoldedRestore
= false;
1080 SlotIndex RestoreIndex
;
1081 if (MachineInstr
* LMI
= FoldRestore(CurrLI
->reg
, RC
, Barrier
,
1082 BarrierMBB
, SS
, RefsInMBB
)) {
1084 RestoreIndex
= LIs
->getInstructionIndex(RestorePt
);
1085 FoldedRestore
= true;
1087 TII
->loadRegFromStackSlot(*BarrierMBB
, RestorePt
, CurrLI
->reg
, SS
, RC
, TRI
);
1088 MachineInstr
*LoadMI
= prior(RestorePt
);
1089 RestoreIndex
= LIs
->InsertMachineInstrInMaps(LoadMI
);
1092 // Update spill stack slot live interval.
1093 UpdateSpillSlotInterval(ValNo
, SpillIndex
.getUseIndex().getNextSlot(),
1094 RestoreIndex
.getDefIndex());
1096 ReconstructLiveInterval(CurrLI
);
1098 if (!FoldedRestore
) {
1099 SlotIndex RestoreIdx
= LIs
->getInstructionIndex(prior(RestorePt
));
1100 RestoreIdx
= RestoreIdx
.getDefIndex();
1101 RenumberValno(CurrLI
->findDefinedVNInfoForRegInt(RestoreIdx
));
1105 DEBUG(dbgs() << "success.\n");
1109 /// SplitRegLiveIntervals - Split all register live intervals that cross the
1110 /// barrier that's being processed.
1112 PreAllocSplitting::SplitRegLiveIntervals(const TargetRegisterClass
**RCs
,
1113 SmallPtrSet
<LiveInterval
*, 8>& Split
) {
1114 // First find all the virtual registers whose live intervals are intercepted
1115 // by the current barrier.
1116 SmallVector
<LiveInterval
*, 8> Intervals
;
1117 for (const TargetRegisterClass
**RC
= RCs
; *RC
; ++RC
) {
1118 // FIXME: If it's not safe to move any instruction that defines the barrier
1119 // register class, then it means there are some special dependencies which
1120 // codegen is not modelling. Ignore these barriers for now.
1121 if (!TII
->isSafeToMoveRegClassDefs(*RC
))
1123 const std::vector
<unsigned> &VRs
= MRI
->getRegClassVirtRegs(*RC
);
1124 for (unsigned i
= 0, e
= VRs
.size(); i
!= e
; ++i
) {
1125 unsigned Reg
= VRs
[i
];
1126 if (!LIs
->hasInterval(Reg
))
1128 LiveInterval
*LI
= &LIs
->getInterval(Reg
);
1129 if (LI
->liveAt(BarrierIdx
) && !Barrier
->readsRegister(Reg
))
1130 // Virtual register live interval is intercepted by the barrier. We
1131 // should split and shrink wrap its interval if possible.
1132 Intervals
.push_back(LI
);
1136 // Process the affected live intervals.
1137 bool Change
= false;
1138 while (!Intervals
.empty()) {
1139 if (PreSplitLimit
!= -1 && (int)NumSplits
== PreSplitLimit
)
1141 LiveInterval
*LI
= Intervals
.back();
1142 Intervals
.pop_back();
1143 bool result
= SplitRegLiveInterval(LI
);
1144 if (result
) Split
.insert(LI
);
1151 unsigned PreAllocSplitting::getNumberOfNonSpills(
1152 SmallPtrSet
<MachineInstr
*, 4>& MIs
,
1153 unsigned Reg
, int FrameIndex
,
1154 bool& FeedsTwoAddr
) {
1155 unsigned NonSpills
= 0;
1156 for (SmallPtrSet
<MachineInstr
*, 4>::iterator UI
= MIs
.begin(), UE
= MIs
.end();
1158 int StoreFrameIndex
;
1159 unsigned StoreVReg
= TII
->isStoreToStackSlot(*UI
, StoreFrameIndex
);
1160 if (StoreVReg
!= Reg
|| StoreFrameIndex
!= FrameIndex
)
1163 int DefIdx
= (*UI
)->findRegisterDefOperandIdx(Reg
);
1164 if (DefIdx
!= -1 && (*UI
)->isRegTiedToUseOperand(DefIdx
))
1165 FeedsTwoAddr
= true;
1171 /// removeDeadSpills - After doing splitting, filter through all intervals we've
1172 /// split, and see if any of the spills are unnecessary. If so, remove them.
1173 bool PreAllocSplitting::removeDeadSpills(SmallPtrSet
<LiveInterval
*, 8>& split
) {
1174 bool changed
= false;
1176 // Walk over all of the live intervals that were touched by the splitter,
1177 // and see if we can do any DCE and/or folding.
1178 for (SmallPtrSet
<LiveInterval
*, 8>::iterator LI
= split
.begin(),
1179 LE
= split
.end(); LI
!= LE
; ++LI
) {
1180 DenseMap
<VNInfo
*, SmallPtrSet
<MachineInstr
*, 4> > VNUseCount
;
1182 // First, collect all the uses of the vreg, and sort them by their
1183 // reaching definition (VNInfo).
1184 for (MachineRegisterInfo::use_iterator UI
= MRI
->use_begin((*LI
)->reg
),
1185 UE
= MRI
->use_end(); UI
!= UE
; ++UI
) {
1186 SlotIndex index
= LIs
->getInstructionIndex(&*UI
);
1187 index
= index
.getUseIndex();
1189 const LiveRange
* LR
= (*LI
)->getLiveRangeContaining(index
);
1190 VNUseCount
[LR
->valno
].insert(&*UI
);
1193 // Now, take the definitions (VNInfo's) one at a time and try to DCE
1194 // and/or fold them away.
1195 for (LiveInterval::vni_iterator VI
= (*LI
)->vni_begin(),
1196 VE
= (*LI
)->vni_end(); VI
!= VE
; ++VI
) {
1198 if (DeadSplitLimit
!= -1 && (int)NumDeadSpills
== DeadSplitLimit
)
1201 VNInfo
* CurrVN
= *VI
;
1203 // We don't currently try to handle definitions with PHI kills, because
1204 // it would involve processing more than one VNInfo at once.
1205 if (CurrVN
->hasPHIKill()) continue;
1207 // We also don't try to handle the results of PHI joins, since there's
1208 // no defining instruction to analyze.
1209 MachineInstr
* DefMI
= LIs
->getInstructionFromIndex(CurrVN
->def
);
1210 if (!DefMI
|| CurrVN
->isUnused()) continue;
1212 // We're only interested in eliminating cruft introduced by the splitter,
1213 // is of the form load-use or load-use-store. First, check that the
1214 // definition is a load, and remember what stack slot we loaded it from.
1216 if (!TII
->isLoadFromStackSlot(DefMI
, FrameIndex
)) continue;
1218 // If the definition has no uses at all, just DCE it.
1219 if (VNUseCount
[CurrVN
].size() == 0) {
1220 LIs
->RemoveMachineInstrFromMaps(DefMI
);
1221 (*LI
)->removeValNo(CurrVN
);
1222 DefMI
->eraseFromParent();
1223 VNUseCount
.erase(CurrVN
);
1229 // Second, get the number of non-store uses of the definition, as well as
1230 // a flag indicating whether it feeds into a later two-address definition.
1231 bool FeedsTwoAddr
= false;
1232 unsigned NonSpillCount
= getNumberOfNonSpills(VNUseCount
[CurrVN
],
1233 (*LI
)->reg
, FrameIndex
,
1236 // If there's one non-store use and it doesn't feed a two-addr, then
1237 // this is a load-use-store case that we can try to fold.
1238 if (NonSpillCount
== 1 && !FeedsTwoAddr
) {
1239 // Start by finding the non-store use MachineInstr.
1240 SmallPtrSet
<MachineInstr
*, 4>::iterator UI
= VNUseCount
[CurrVN
].begin();
1241 int StoreFrameIndex
;
1242 unsigned StoreVReg
= TII
->isStoreToStackSlot(*UI
, StoreFrameIndex
);
1243 while (UI
!= VNUseCount
[CurrVN
].end() &&
1244 (StoreVReg
== (*LI
)->reg
&& StoreFrameIndex
== FrameIndex
)) {
1246 if (UI
!= VNUseCount
[CurrVN
].end())
1247 StoreVReg
= TII
->isStoreToStackSlot(*UI
, StoreFrameIndex
);
1249 if (UI
== VNUseCount
[CurrVN
].end()) continue;
1251 MachineInstr
* use
= *UI
;
1253 // Attempt to fold it away!
1254 int OpIdx
= use
->findRegisterUseOperandIdx((*LI
)->reg
, false);
1255 if (OpIdx
== -1) continue;
1256 SmallVector
<unsigned, 1> Ops
;
1257 Ops
.push_back(OpIdx
);
1258 if (!TII
->canFoldMemoryOperand(use
, Ops
)) continue;
1260 MachineInstr
* NewMI
= TII
->foldMemoryOperand(use
, Ops
, FrameIndex
);
1262 if (!NewMI
) continue;
1264 // Update relevant analyses.
1265 LIs
->RemoveMachineInstrFromMaps(DefMI
);
1266 LIs
->ReplaceMachineInstrInMaps(use
, NewMI
);
1267 (*LI
)->removeValNo(CurrVN
);
1269 DefMI
->eraseFromParent();
1270 use
->eraseFromParent();
1271 VNUseCount
[CurrVN
].erase(use
);
1273 // Remove deleted instructions. Note that we need to remove them from
1274 // the VNInfo->use map as well, just to be safe.
1275 for (SmallPtrSet
<MachineInstr
*, 4>::iterator II
=
1276 VNUseCount
[CurrVN
].begin(), IE
= VNUseCount
[CurrVN
].end();
1278 for (DenseMap
<VNInfo
*, SmallPtrSet
<MachineInstr
*, 4> >::iterator
1279 VNI
= VNUseCount
.begin(), VNE
= VNUseCount
.end(); VNI
!= VNE
;
1281 if (VNI
->first
!= CurrVN
)
1282 VNI
->second
.erase(*II
);
1283 LIs
->RemoveMachineInstrFromMaps(*II
);
1284 (*II
)->eraseFromParent();
1287 VNUseCount
.erase(CurrVN
);
1289 for (DenseMap
<VNInfo
*, SmallPtrSet
<MachineInstr
*, 4> >::iterator
1290 VI
= VNUseCount
.begin(), VE
= VNUseCount
.end(); VI
!= VE
; ++VI
)
1291 if (VI
->second
.erase(use
))
1292 VI
->second
.insert(NewMI
);
1299 // If there's more than one non-store instruction, we can't profitably
1300 // fold it, so bail.
1301 if (NonSpillCount
) continue;
1303 // Otherwise, this is a load-store case, so DCE them.
1304 for (SmallPtrSet
<MachineInstr
*, 4>::iterator UI
=
1305 VNUseCount
[CurrVN
].begin(), UE
= VNUseCount
[CurrVN
].end();
1307 LIs
->RemoveMachineInstrFromMaps(*UI
);
1308 (*UI
)->eraseFromParent();
1311 VNUseCount
.erase(CurrVN
);
1313 LIs
->RemoveMachineInstrFromMaps(DefMI
);
1314 (*LI
)->removeValNo(CurrVN
);
1315 DefMI
->eraseFromParent();
1324 bool PreAllocSplitting::createsNewJoin(LiveRange
* LR
,
1325 MachineBasicBlock
* DefMBB
,
1326 MachineBasicBlock
* BarrierMBB
) {
1327 if (DefMBB
== BarrierMBB
)
1330 if (LR
->valno
->hasPHIKill())
1333 SlotIndex MBBEnd
= LIs
->getMBBEndIdx(BarrierMBB
);
1334 if (LR
->end
< MBBEnd
)
1337 MachineLoopInfo
& MLI
= getAnalysis
<MachineLoopInfo
>();
1338 if (MLI
.getLoopFor(DefMBB
) != MLI
.getLoopFor(BarrierMBB
))
1341 MachineDominatorTree
& MDT
= getAnalysis
<MachineDominatorTree
>();
1342 SmallPtrSet
<MachineBasicBlock
*, 4> Visited
;
1343 typedef std::pair
<MachineBasicBlock
*,
1344 MachineBasicBlock::succ_iterator
> ItPair
;
1345 SmallVector
<ItPair
, 4> Stack
;
1346 Stack
.push_back(std::make_pair(BarrierMBB
, BarrierMBB
->succ_begin()));
1348 while (!Stack
.empty()) {
1349 ItPair P
= Stack
.back();
1352 MachineBasicBlock
* PredMBB
= P
.first
;
1353 MachineBasicBlock::succ_iterator S
= P
.second
;
1355 if (S
== PredMBB
->succ_end())
1357 else if (Visited
.count(*S
)) {
1358 Stack
.push_back(std::make_pair(PredMBB
, ++S
));
1361 Stack
.push_back(std::make_pair(PredMBB
, S
+1));
1363 MachineBasicBlock
* MBB
= *S
;
1364 Visited
.insert(MBB
);
1366 if (MBB
== BarrierMBB
)
1369 MachineDomTreeNode
* DefMDTN
= MDT
.getNode(DefMBB
);
1370 MachineDomTreeNode
* BarrierMDTN
= MDT
.getNode(BarrierMBB
);
1371 MachineDomTreeNode
* MDTN
= MDT
.getNode(MBB
)->getIDom();
1373 if (MDTN
== DefMDTN
)
1375 else if (MDTN
== BarrierMDTN
)
1377 MDTN
= MDTN
->getIDom();
1380 MBBEnd
= LIs
->getMBBEndIdx(MBB
);
1381 if (LR
->end
> MBBEnd
)
1382 Stack
.push_back(std::make_pair(MBB
, MBB
->succ_begin()));
1389 bool PreAllocSplitting::runOnMachineFunction(MachineFunction
&MF
) {
1391 TM
= &MF
.getTarget();
1392 TRI
= TM
->getRegisterInfo();
1393 TII
= TM
->getInstrInfo();
1394 MFI
= MF
.getFrameInfo();
1395 MRI
= &MF
.getRegInfo();
1396 SIs
= &getAnalysis
<SlotIndexes
>();
1397 LIs
= &getAnalysis
<LiveIntervals
>();
1398 LSs
= &getAnalysis
<LiveStacks
>();
1399 VRM
= &getAnalysis
<VirtRegMap
>();
1401 bool MadeChange
= false;
1403 // Make sure blocks are numbered in order.
1404 MF
.RenumberBlocks();
1406 MachineBasicBlock
*Entry
= MF
.begin();
1407 SmallPtrSet
<MachineBasicBlock
*,16> Visited
;
1409 SmallPtrSet
<LiveInterval
*, 8> Split
;
1411 for (df_ext_iterator
<MachineBasicBlock
*, SmallPtrSet
<MachineBasicBlock
*,16> >
1412 DFI
= df_ext_begin(Entry
, Visited
), E
= df_ext_end(Entry
, Visited
);
1415 for (MachineBasicBlock::iterator I
= BarrierMBB
->begin(),
1416 E
= BarrierMBB
->end(); I
!= E
; ++I
) {
1418 const TargetRegisterClass
**BarrierRCs
=
1419 Barrier
->getDesc().getRegClassBarriers();
1422 BarrierIdx
= LIs
->getInstructionIndex(Barrier
);
1423 MadeChange
|= SplitRegLiveIntervals(BarrierRCs
, Split
);
1427 MadeChange
|= removeDeadSpills(Split
);