1 //===-- PreAllocSplitting.cpp - Pre-allocation Interval Spltting Pass. ----===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This file implements the machine instruction level pre-register allocation
11 // live interval splitting pass. It finds live interval barriers, i.e.
12 // instructions which will kill all physical registers in certain register
13 // classes, and split all live intervals which cross the barrier.
15 //===----------------------------------------------------------------------===//
17 #define DEBUG_TYPE "pre-alloc-split"
18 #include "VirtRegMap.h"
19 #include "llvm/CodeGen/CalcSpillWeights.h"
20 #include "llvm/CodeGen/LiveIntervalAnalysis.h"
21 #include "llvm/CodeGen/LiveStackAnalysis.h"
22 #include "llvm/CodeGen/MachineDominators.h"
23 #include "llvm/CodeGen/MachineFrameInfo.h"
24 #include "llvm/CodeGen/MachineFunctionPass.h"
25 #include "llvm/CodeGen/MachineLoopInfo.h"
26 #include "llvm/CodeGen/MachineRegisterInfo.h"
27 #include "llvm/CodeGen/Passes.h"
28 #include "llvm/CodeGen/RegisterCoalescer.h"
29 #include "llvm/Target/TargetInstrInfo.h"
30 #include "llvm/Target/TargetMachine.h"
31 #include "llvm/Target/TargetOptions.h"
32 #include "llvm/Target/TargetRegisterInfo.h"
33 #include "llvm/Support/CommandLine.h"
34 #include "llvm/Support/Debug.h"
35 #include "llvm/Support/ErrorHandling.h"
36 #include "llvm/ADT/DenseMap.h"
37 #include "llvm/ADT/DepthFirstIterator.h"
38 #include "llvm/ADT/SmallPtrSet.h"
39 #include "llvm/ADT/Statistic.h"
42 static cl::opt
<int> PreSplitLimit("pre-split-limit", cl::init(-1), cl::Hidden
);
43 static cl::opt
<int> DeadSplitLimit("dead-split-limit", cl::init(-1),
45 static cl::opt
<int> RestoreFoldLimit("restore-fold-limit", cl::init(-1),
48 STATISTIC(NumSplits
, "Number of intervals split");
49 STATISTIC(NumRemats
, "Number of intervals split by rematerialization");
50 STATISTIC(NumFolds
, "Number of intervals split with spill folding");
51 STATISTIC(NumRestoreFolds
, "Number of intervals split with restore folding");
52 STATISTIC(NumRenumbers
, "Number of intervals renumbered into new registers");
53 STATISTIC(NumDeadSpills
, "Number of dead spills removed");
56 class PreAllocSplitting
: public MachineFunctionPass
{
57 MachineFunction
*CurrMF
;
58 const TargetMachine
*TM
;
59 const TargetInstrInfo
*TII
;
60 const TargetRegisterInfo
* TRI
;
61 MachineFrameInfo
*MFI
;
62 MachineRegisterInfo
*MRI
;
68 // Barrier - Current barrier being processed.
69 MachineInstr
*Barrier
;
71 // BarrierMBB - Basic block where the barrier resides in.
72 MachineBasicBlock
*BarrierMBB
;
74 // Barrier - Current barrier index.
77 // CurrLI - Current live interval being split.
80 // CurrSLI - Current stack slot live interval.
81 LiveInterval
*CurrSLI
;
83 // CurrSValNo - Current val# for the stack slot live interval.
86 // IntervalSSMap - A map from live interval to spill slots.
87 DenseMap
<unsigned, int> IntervalSSMap
;
89 // Def2SpillMap - A map from a def instruction index to spill index.
90 DenseMap
<SlotIndex
, SlotIndex
> Def2SpillMap
;
94 PreAllocSplitting() : MachineFunctionPass(ID
) {
95 initializePreAllocSplittingPass(*PassRegistry::getPassRegistry());
98 virtual bool runOnMachineFunction(MachineFunction
&MF
);
100 virtual void getAnalysisUsage(AnalysisUsage
&AU
) const {
101 AU
.setPreservesCFG();
102 AU
.addRequired
<SlotIndexes
>();
103 AU
.addPreserved
<SlotIndexes
>();
104 AU
.addRequired
<LiveIntervals
>();
105 AU
.addPreserved
<LiveIntervals
>();
106 AU
.addRequired
<LiveStacks
>();
107 AU
.addPreserved
<LiveStacks
>();
108 AU
.addPreserved
<RegisterCoalescer
>();
109 AU
.addPreserved
<CalculateSpillWeights
>();
111 AU
.addPreservedID(StrongPHIEliminationID
);
113 AU
.addPreservedID(PHIEliminationID
);
114 AU
.addRequired
<MachineDominatorTree
>();
115 AU
.addRequired
<MachineLoopInfo
>();
116 AU
.addRequired
<VirtRegMap
>();
117 AU
.addPreserved
<MachineDominatorTree
>();
118 AU
.addPreserved
<MachineLoopInfo
>();
119 AU
.addPreserved
<VirtRegMap
>();
120 MachineFunctionPass::getAnalysisUsage(AU
);
123 virtual void releaseMemory() {
124 IntervalSSMap
.clear();
125 Def2SpillMap
.clear();
128 virtual const char *getPassName() const {
129 return "Pre-Register Allocaton Live Interval Splitting";
132 /// print - Implement the dump method.
133 virtual void print(raw_ostream
&O
, const Module
* M
= 0) const {
140 MachineBasicBlock::iterator
141 findSpillPoint(MachineBasicBlock
*, MachineInstr
*, MachineInstr
*,
142 SmallPtrSet
<MachineInstr
*, 4>&);
144 MachineBasicBlock::iterator
145 findRestorePoint(MachineBasicBlock
*, MachineInstr
*, SlotIndex
,
146 SmallPtrSet
<MachineInstr
*, 4>&);
148 int CreateSpillStackSlot(unsigned, const TargetRegisterClass
*);
150 bool IsAvailableInStack(MachineBasicBlock
*, unsigned,
151 SlotIndex
, SlotIndex
,
152 SlotIndex
&, int&) const;
154 void UpdateSpillSlotInterval(VNInfo
*, SlotIndex
, SlotIndex
);
156 bool SplitRegLiveInterval(LiveInterval
*);
158 bool SplitRegLiveIntervals(const TargetRegisterClass
**,
159 SmallPtrSet
<LiveInterval
*, 8>&);
161 bool createsNewJoin(LiveRange
* LR
, MachineBasicBlock
* DefMBB
,
162 MachineBasicBlock
* BarrierMBB
);
163 bool Rematerialize(unsigned vreg
, VNInfo
* ValNo
,
165 MachineBasicBlock::iterator RestorePt
,
166 SmallPtrSet
<MachineInstr
*, 4>& RefsInMBB
);
167 MachineInstr
* FoldSpill(unsigned vreg
, const TargetRegisterClass
* RC
,
169 MachineInstr
* Barrier
,
170 MachineBasicBlock
* MBB
,
172 SmallPtrSet
<MachineInstr
*, 4>& RefsInMBB
);
173 MachineInstr
* FoldRestore(unsigned vreg
,
174 const TargetRegisterClass
* RC
,
175 MachineInstr
* Barrier
,
176 MachineBasicBlock
* MBB
,
178 SmallPtrSet
<MachineInstr
*, 4>& RefsInMBB
);
179 void RenumberValno(VNInfo
* VN
);
180 void ReconstructLiveInterval(LiveInterval
* LI
);
181 bool removeDeadSpills(SmallPtrSet
<LiveInterval
*, 8>& split
);
182 unsigned getNumberOfNonSpills(SmallPtrSet
<MachineInstr
*, 4>& MIs
,
183 unsigned Reg
, int FrameIndex
, bool& TwoAddr
);
184 VNInfo
* PerformPHIConstruction(MachineBasicBlock::iterator Use
,
185 MachineBasicBlock
* MBB
, LiveInterval
* LI
,
186 SmallPtrSet
<MachineInstr
*, 4>& Visited
,
187 DenseMap
<MachineBasicBlock
*, SmallPtrSet
<MachineInstr
*, 2> >& Defs
,
188 DenseMap
<MachineBasicBlock
*, SmallPtrSet
<MachineInstr
*, 2> >& Uses
,
189 DenseMap
<MachineInstr
*, VNInfo
*>& NewVNs
,
190 DenseMap
<MachineBasicBlock
*, VNInfo
*>& LiveOut
,
191 DenseMap
<MachineBasicBlock
*, VNInfo
*>& Phis
,
192 bool IsTopLevel
, bool IsIntraBlock
);
193 VNInfo
* PerformPHIConstructionFallBack(MachineBasicBlock::iterator Use
,
194 MachineBasicBlock
* MBB
, LiveInterval
* LI
,
195 SmallPtrSet
<MachineInstr
*, 4>& Visited
,
196 DenseMap
<MachineBasicBlock
*, SmallPtrSet
<MachineInstr
*, 2> >& Defs
,
197 DenseMap
<MachineBasicBlock
*, SmallPtrSet
<MachineInstr
*, 2> >& Uses
,
198 DenseMap
<MachineInstr
*, VNInfo
*>& NewVNs
,
199 DenseMap
<MachineBasicBlock
*, VNInfo
*>& LiveOut
,
200 DenseMap
<MachineBasicBlock
*, VNInfo
*>& Phis
,
201 bool IsTopLevel
, bool IsIntraBlock
);
203 } // end anonymous namespace
205 char PreAllocSplitting::ID
= 0;
207 INITIALIZE_PASS_BEGIN(PreAllocSplitting
, "pre-alloc-splitting",
208 "Pre-Register Allocation Live Interval Splitting",
210 INITIALIZE_PASS_DEPENDENCY(SlotIndexes
)
211 INITIALIZE_PASS_DEPENDENCY(LiveIntervals
)
212 INITIALIZE_PASS_DEPENDENCY(LiveStacks
)
213 INITIALIZE_PASS_DEPENDENCY(MachineDominatorTree
)
214 INITIALIZE_PASS_DEPENDENCY(MachineLoopInfo
)
215 INITIALIZE_PASS_DEPENDENCY(VirtRegMap
)
216 INITIALIZE_PASS_END(PreAllocSplitting
, "pre-alloc-splitting",
217 "Pre-Register Allocation Live Interval Splitting",
220 char &llvm::PreAllocSplittingID
= PreAllocSplitting::ID
;
222 /// findSpillPoint - Find a gap as far away from the given MI that's suitable
223 /// for spilling the current live interval. The index must be before any
224 /// defs and uses of the live interval register in the mbb. Return begin() if
226 MachineBasicBlock::iterator
227 PreAllocSplitting::findSpillPoint(MachineBasicBlock
*MBB
, MachineInstr
*MI
,
229 SmallPtrSet
<MachineInstr
*, 4> &RefsInMBB
) {
230 MachineBasicBlock::iterator Pt
= MBB
->begin();
232 MachineBasicBlock::iterator MII
= MI
;
233 MachineBasicBlock::iterator EndPt
= DefMI
234 ? MachineBasicBlock::iterator(DefMI
) : MBB
->begin();
236 while (MII
!= EndPt
&& !RefsInMBB
.count(MII
) &&
237 MII
->getOpcode() != TRI
->getCallFrameSetupOpcode())
239 if (MII
== EndPt
|| RefsInMBB
.count(MII
)) return Pt
;
241 while (MII
!= EndPt
&& !RefsInMBB
.count(MII
)) {
242 // We can't insert the spill between the barrier (a call), and its
243 // corresponding call frame setup.
244 if (MII
->getOpcode() == TRI
->getCallFrameDestroyOpcode()) {
245 while (MII
->getOpcode() != TRI
->getCallFrameSetupOpcode()) {
256 if (RefsInMBB
.count(MII
))
266 /// findRestorePoint - Find a gap in the instruction index map that's suitable
267 /// for restoring the current live interval value. The index must be before any
268 /// uses of the live interval register in the mbb. Return end() if none is
270 MachineBasicBlock::iterator
271 PreAllocSplitting::findRestorePoint(MachineBasicBlock
*MBB
, MachineInstr
*MI
,
273 SmallPtrSet
<MachineInstr
*, 4> &RefsInMBB
) {
274 // FIXME: Allow spill to be inserted to the beginning of the mbb. Update mbb
275 // begin index accordingly.
276 MachineBasicBlock::iterator Pt
= MBB
->end();
277 MachineBasicBlock::iterator EndPt
= MBB
->getFirstTerminator();
279 // We start at the call, so walk forward until we find the call frame teardown
280 // since we can't insert restores before that. Bail if we encounter a use
282 MachineBasicBlock::iterator MII
= MI
;
283 if (MII
== EndPt
) return Pt
;
285 while (MII
!= EndPt
&& !RefsInMBB
.count(MII
) &&
286 MII
->getOpcode() != TRI
->getCallFrameDestroyOpcode())
288 if (MII
== EndPt
|| RefsInMBB
.count(MII
)) return Pt
;
291 // FIXME: Limit the number of instructions to examine to reduce
293 while (MII
!= EndPt
) {
294 SlotIndex Index
= LIs
->getInstructionIndex(MII
);
298 // We can't insert a restore between the barrier (a call) and its
299 // corresponding call frame teardown.
300 if (MII
->getOpcode() == TRI
->getCallFrameSetupOpcode()) {
302 if (MII
== EndPt
|| RefsInMBB
.count(MII
)) return Pt
;
304 } while (MII
->getOpcode() != TRI
->getCallFrameDestroyOpcode());
309 if (RefsInMBB
.count(MII
))
318 /// CreateSpillStackSlot - Create a stack slot for the live interval being
319 /// split. If the live interval was previously split, just reuse the same
321 int PreAllocSplitting::CreateSpillStackSlot(unsigned Reg
,
322 const TargetRegisterClass
*RC
) {
324 DenseMap
<unsigned, int>::iterator I
= IntervalSSMap
.find(Reg
);
325 if (I
!= IntervalSSMap
.end()) {
328 SS
= MFI
->CreateSpillStackObject(RC
->getSize(), RC
->getAlignment());
329 IntervalSSMap
[Reg
] = SS
;
332 // Create live interval for stack slot.
333 CurrSLI
= &LSs
->getOrCreateInterval(SS
, RC
);
334 if (CurrSLI
->hasAtLeastOneValue())
335 CurrSValNo
= CurrSLI
->getValNumInfo(0);
337 CurrSValNo
= CurrSLI
->getNextValue(SlotIndex(), 0,
338 LSs
->getVNInfoAllocator());
342 /// IsAvailableInStack - Return true if register is available in a split stack
343 /// slot at the specified index.
345 PreAllocSplitting::IsAvailableInStack(MachineBasicBlock
*DefMBB
,
346 unsigned Reg
, SlotIndex DefIndex
,
347 SlotIndex RestoreIndex
,
348 SlotIndex
&SpillIndex
,
353 DenseMap
<unsigned, int>::const_iterator I
= IntervalSSMap
.find(Reg
);
354 if (I
== IntervalSSMap
.end())
356 DenseMap
<SlotIndex
, SlotIndex
>::const_iterator
357 II
= Def2SpillMap
.find(DefIndex
);
358 if (II
== Def2SpillMap
.end())
361 // If last spill of def is in the same mbb as barrier mbb (where restore will
362 // be), make sure it's not below the intended restore index.
363 // FIXME: Undo the previous spill?
364 assert(LIs
->getMBBFromIndex(II
->second
) == DefMBB
);
365 if (DefMBB
== BarrierMBB
&& II
->second
>= RestoreIndex
)
369 SpillIndex
= II
->second
;
373 /// UpdateSpillSlotInterval - Given the specified val# of the register live
374 /// interval being split, and the spill and restore indicies, update the live
375 /// interval of the spill stack slot.
377 PreAllocSplitting::UpdateSpillSlotInterval(VNInfo
*ValNo
, SlotIndex SpillIndex
,
378 SlotIndex RestoreIndex
) {
379 assert(LIs
->getMBBFromIndex(RestoreIndex
) == BarrierMBB
&&
380 "Expect restore in the barrier mbb");
382 MachineBasicBlock
*MBB
= LIs
->getMBBFromIndex(SpillIndex
);
383 if (MBB
== BarrierMBB
) {
384 // Intra-block spill + restore. We are done.
385 LiveRange
SLR(SpillIndex
, RestoreIndex
, CurrSValNo
);
386 CurrSLI
->addRange(SLR
);
390 SmallPtrSet
<MachineBasicBlock
*, 4> Processed
;
391 SlotIndex EndIdx
= LIs
->getMBBEndIdx(MBB
);
392 LiveRange
SLR(SpillIndex
, EndIdx
, CurrSValNo
);
393 CurrSLI
->addRange(SLR
);
394 Processed
.insert(MBB
);
396 // Start from the spill mbb, figure out the extend of the spill slot's
398 SmallVector
<MachineBasicBlock
*, 4> WorkList
;
399 const LiveRange
*LR
= CurrLI
->getLiveRangeContaining(SpillIndex
);
400 if (LR
->end
> EndIdx
)
401 // If live range extend beyond end of mbb, add successors to work list.
402 for (MachineBasicBlock::succ_iterator SI
= MBB
->succ_begin(),
403 SE
= MBB
->succ_end(); SI
!= SE
; ++SI
)
404 WorkList
.push_back(*SI
);
406 while (!WorkList
.empty()) {
407 MachineBasicBlock
*MBB
= WorkList
.back();
409 if (Processed
.count(MBB
))
411 SlotIndex Idx
= LIs
->getMBBStartIdx(MBB
);
412 LR
= CurrLI
->getLiveRangeContaining(Idx
);
413 if (LR
&& LR
->valno
== ValNo
) {
414 EndIdx
= LIs
->getMBBEndIdx(MBB
);
415 if (Idx
<= RestoreIndex
&& RestoreIndex
< EndIdx
) {
416 // Spill slot live interval stops at the restore.
417 LiveRange
SLR(Idx
, RestoreIndex
, CurrSValNo
);
418 CurrSLI
->addRange(SLR
);
419 } else if (LR
->end
> EndIdx
) {
420 // Live range extends beyond end of mbb, process successors.
421 LiveRange
SLR(Idx
, EndIdx
.getNextIndex(), CurrSValNo
);
422 CurrSLI
->addRange(SLR
);
423 for (MachineBasicBlock::succ_iterator SI
= MBB
->succ_begin(),
424 SE
= MBB
->succ_end(); SI
!= SE
; ++SI
)
425 WorkList
.push_back(*SI
);
427 LiveRange
SLR(Idx
, LR
->end
, CurrSValNo
);
428 CurrSLI
->addRange(SLR
);
430 Processed
.insert(MBB
);
435 /// PerformPHIConstruction - From properly set up use and def lists, use a PHI
436 /// construction algorithm to compute the ranges and valnos for an interval.
438 PreAllocSplitting::PerformPHIConstruction(MachineBasicBlock::iterator UseI
,
439 MachineBasicBlock
* MBB
, LiveInterval
* LI
,
440 SmallPtrSet
<MachineInstr
*, 4>& Visited
,
441 DenseMap
<MachineBasicBlock
*, SmallPtrSet
<MachineInstr
*, 2> >& Defs
,
442 DenseMap
<MachineBasicBlock
*, SmallPtrSet
<MachineInstr
*, 2> >& Uses
,
443 DenseMap
<MachineInstr
*, VNInfo
*>& NewVNs
,
444 DenseMap
<MachineBasicBlock
*, VNInfo
*>& LiveOut
,
445 DenseMap
<MachineBasicBlock
*, VNInfo
*>& Phis
,
446 bool IsTopLevel
, bool IsIntraBlock
) {
447 // Return memoized result if it's available.
448 if (IsTopLevel
&& Visited
.count(UseI
) && NewVNs
.count(UseI
))
450 else if (!IsTopLevel
&& IsIntraBlock
&& NewVNs
.count(UseI
))
452 else if (!IsIntraBlock
&& LiveOut
.count(MBB
))
455 // Check if our block contains any uses or defs.
456 bool ContainsDefs
= Defs
.count(MBB
);
457 bool ContainsUses
= Uses
.count(MBB
);
461 // Enumerate the cases of use/def contaning blocks.
462 if (!ContainsDefs
&& !ContainsUses
) {
463 return PerformPHIConstructionFallBack(UseI
, MBB
, LI
, Visited
, Defs
, Uses
,
464 NewVNs
, LiveOut
, Phis
,
465 IsTopLevel
, IsIntraBlock
);
466 } else if (ContainsDefs
&& !ContainsUses
) {
467 SmallPtrSet
<MachineInstr
*, 2>& BlockDefs
= Defs
[MBB
];
469 // Search for the def in this block. If we don't find it before the
470 // instruction we care about, go to the fallback case. Note that that
471 // should never happen: this cannot be intrablock, so use should
472 // always be an end() iterator.
473 assert(UseI
== MBB
->end() && "No use marked in intrablock");
475 MachineBasicBlock::iterator Walker
= UseI
;
477 while (Walker
!= MBB
->begin()) {
478 if (BlockDefs
.count(Walker
))
483 // Once we've found it, extend its VNInfo to our instruction.
484 SlotIndex DefIndex
= LIs
->getInstructionIndex(Walker
);
485 DefIndex
= DefIndex
.getDefIndex();
486 SlotIndex EndIndex
= LIs
->getMBBEndIdx(MBB
);
488 RetVNI
= NewVNs
[Walker
];
489 LI
->addRange(LiveRange(DefIndex
, EndIndex
, RetVNI
));
490 } else if (!ContainsDefs
&& ContainsUses
) {
491 SmallPtrSet
<MachineInstr
*, 2>& BlockUses
= Uses
[MBB
];
493 // Search for the use in this block that precedes the instruction we care
494 // about, going to the fallback case if we don't find it.
495 MachineBasicBlock::iterator Walker
= UseI
;
497 while (Walker
!= MBB
->begin()) {
499 if (BlockUses
.count(Walker
)) {
506 return PerformPHIConstructionFallBack(UseI
, MBB
, LI
, Visited
, Defs
,
507 Uses
, NewVNs
, LiveOut
, Phis
,
508 IsTopLevel
, IsIntraBlock
);
510 SlotIndex UseIndex
= LIs
->getInstructionIndex(Walker
);
511 UseIndex
= UseIndex
.getUseIndex();
514 EndIndex
= LIs
->getInstructionIndex(UseI
).getDefIndex();
516 EndIndex
= LIs
->getMBBEndIdx(MBB
);
518 // Now, recursively phi construct the VNInfo for the use we found,
519 // and then extend it to include the instruction we care about
520 RetVNI
= PerformPHIConstruction(Walker
, MBB
, LI
, Visited
, Defs
, Uses
,
521 NewVNs
, LiveOut
, Phis
, false, true);
523 LI
->addRange(LiveRange(UseIndex
, EndIndex
, RetVNI
));
525 // FIXME: Need to set kills properly for inter-block stuff.
526 } else if (ContainsDefs
&& ContainsUses
) {
527 SmallPtrSet
<MachineInstr
*, 2>& BlockDefs
= Defs
[MBB
];
528 SmallPtrSet
<MachineInstr
*, 2>& BlockUses
= Uses
[MBB
];
530 // This case is basically a merging of the two preceding case, with the
531 // special note that checking for defs must take precedence over checking
532 // for uses, because of two-address instructions.
533 MachineBasicBlock::iterator Walker
= UseI
;
534 bool foundDef
= false;
535 bool foundUse
= false;
536 while (Walker
!= MBB
->begin()) {
538 if (BlockDefs
.count(Walker
)) {
541 } else if (BlockUses
.count(Walker
)) {
547 if (!foundDef
&& !foundUse
)
548 return PerformPHIConstructionFallBack(UseI
, MBB
, LI
, Visited
, Defs
,
549 Uses
, NewVNs
, LiveOut
, Phis
,
550 IsTopLevel
, IsIntraBlock
);
552 SlotIndex StartIndex
= LIs
->getInstructionIndex(Walker
);
553 StartIndex
= foundDef
? StartIndex
.getDefIndex() : StartIndex
.getUseIndex();
556 EndIndex
= LIs
->getInstructionIndex(UseI
).getDefIndex();
558 EndIndex
= LIs
->getMBBEndIdx(MBB
);
561 RetVNI
= NewVNs
[Walker
];
563 RetVNI
= PerformPHIConstruction(Walker
, MBB
, LI
, Visited
, Defs
, Uses
,
564 NewVNs
, LiveOut
, Phis
, false, true);
566 LI
->addRange(LiveRange(StartIndex
, EndIndex
, RetVNI
));
569 // Memoize results so we don't have to recompute them.
570 if (!IsIntraBlock
) LiveOut
[MBB
] = RetVNI
;
572 if (!NewVNs
.count(UseI
))
573 NewVNs
[UseI
] = RetVNI
;
574 Visited
.insert(UseI
);
580 /// PerformPHIConstructionFallBack - PerformPHIConstruction fall back path.
583 PreAllocSplitting::PerformPHIConstructionFallBack(MachineBasicBlock::iterator UseI
,
584 MachineBasicBlock
* MBB
, LiveInterval
* LI
,
585 SmallPtrSet
<MachineInstr
*, 4>& Visited
,
586 DenseMap
<MachineBasicBlock
*, SmallPtrSet
<MachineInstr
*, 2> >& Defs
,
587 DenseMap
<MachineBasicBlock
*, SmallPtrSet
<MachineInstr
*, 2> >& Uses
,
588 DenseMap
<MachineInstr
*, VNInfo
*>& NewVNs
,
589 DenseMap
<MachineBasicBlock
*, VNInfo
*>& LiveOut
,
590 DenseMap
<MachineBasicBlock
*, VNInfo
*>& Phis
,
591 bool IsTopLevel
, bool IsIntraBlock
) {
592 // NOTE: Because this is the fallback case from other cases, we do NOT
593 // assume that we are not intrablock here.
594 if (Phis
.count(MBB
)) return Phis
[MBB
];
596 SlotIndex StartIndex
= LIs
->getMBBStartIdx(MBB
);
597 VNInfo
*RetVNI
= Phis
[MBB
] =
598 LI
->getNextValue(SlotIndex(), /*FIXME*/ 0,
599 LIs
->getVNInfoAllocator());
601 if (!IsIntraBlock
) LiveOut
[MBB
] = RetVNI
;
603 // If there are no uses or defs between our starting point and the
604 // beginning of the block, then recursive perform phi construction
605 // on our predecessors.
606 DenseMap
<MachineBasicBlock
*, VNInfo
*> IncomingVNs
;
607 for (MachineBasicBlock::pred_iterator PI
= MBB
->pred_begin(),
608 PE
= MBB
->pred_end(); PI
!= PE
; ++PI
) {
609 VNInfo
* Incoming
= PerformPHIConstruction((*PI
)->end(), *PI
, LI
,
610 Visited
, Defs
, Uses
, NewVNs
,
611 LiveOut
, Phis
, false, false);
613 IncomingVNs
[*PI
] = Incoming
;
616 if (MBB
->pred_size() == 1 && !RetVNI
->hasPHIKill()) {
617 VNInfo
* OldVN
= RetVNI
;
618 VNInfo
* NewVN
= IncomingVNs
.begin()->second
;
619 VNInfo
* MergedVN
= LI
->MergeValueNumberInto(OldVN
, NewVN
);
620 if (MergedVN
== OldVN
) std::swap(OldVN
, NewVN
);
622 for (DenseMap
<MachineBasicBlock
*, VNInfo
*>::iterator LOI
= LiveOut
.begin(),
623 LOE
= LiveOut
.end(); LOI
!= LOE
; ++LOI
)
624 if (LOI
->second
== OldVN
)
625 LOI
->second
= MergedVN
;
626 for (DenseMap
<MachineInstr
*, VNInfo
*>::iterator NVI
= NewVNs
.begin(),
627 NVE
= NewVNs
.end(); NVI
!= NVE
; ++NVI
)
628 if (NVI
->second
== OldVN
)
629 NVI
->second
= MergedVN
;
630 for (DenseMap
<MachineBasicBlock
*, VNInfo
*>::iterator PI
= Phis
.begin(),
631 PE
= Phis
.end(); PI
!= PE
; ++PI
)
632 if (PI
->second
== OldVN
)
633 PI
->second
= MergedVN
;
636 // Otherwise, merge the incoming VNInfos with a phi join. Create a new
637 // VNInfo to represent the joined value.
638 for (DenseMap
<MachineBasicBlock
*, VNInfo
*>::iterator I
=
639 IncomingVNs
.begin(), E
= IncomingVNs
.end(); I
!= E
; ++I
) {
640 I
->second
->setHasPHIKill(true);
646 EndIndex
= LIs
->getInstructionIndex(UseI
).getDefIndex();
648 EndIndex
= LIs
->getMBBEndIdx(MBB
);
649 LI
->addRange(LiveRange(StartIndex
, EndIndex
, RetVNI
));
651 // Memoize results so we don't have to recompute them.
653 LiveOut
[MBB
] = RetVNI
;
655 if (!NewVNs
.count(UseI
))
656 NewVNs
[UseI
] = RetVNI
;
657 Visited
.insert(UseI
);
663 /// ReconstructLiveInterval - Recompute a live interval from scratch.
664 void PreAllocSplitting::ReconstructLiveInterval(LiveInterval
* LI
) {
665 VNInfo::Allocator
& Alloc
= LIs
->getVNInfoAllocator();
667 // Clear the old ranges and valnos;
670 // Cache the uses and defs of the register
671 typedef DenseMap
<MachineBasicBlock
*, SmallPtrSet
<MachineInstr
*, 2> > RegMap
;
674 // Keep track of the new VNs we're creating.
675 DenseMap
<MachineInstr
*, VNInfo
*> NewVNs
;
676 SmallPtrSet
<VNInfo
*, 2> PhiVNs
;
678 // Cache defs, and create a new VNInfo for each def.
679 for (MachineRegisterInfo::def_iterator DI
= MRI
->def_begin(LI
->reg
),
680 DE
= MRI
->def_end(); DI
!= DE
; ++DI
) {
681 Defs
[(*DI
).getParent()].insert(&*DI
);
683 SlotIndex DefIdx
= LIs
->getInstructionIndex(&*DI
);
684 DefIdx
= DefIdx
.getDefIndex();
686 assert(!DI
->isPHI() && "PHI instr in code during pre-alloc splitting.");
687 VNInfo
* NewVN
= LI
->getNextValue(DefIdx
, 0, Alloc
);
689 // If the def is a move, set the copy field.
690 if (DI
->isCopyLike() && DI
->getOperand(0).getReg() == LI
->reg
)
691 NewVN
->setCopy(&*DI
);
693 NewVNs
[&*DI
] = NewVN
;
696 // Cache uses as a separate pass from actually processing them.
697 for (MachineRegisterInfo::use_iterator UI
= MRI
->use_begin(LI
->reg
),
698 UE
= MRI
->use_end(); UI
!= UE
; ++UI
)
699 Uses
[(*UI
).getParent()].insert(&*UI
);
701 // Now, actually process every use and use a phi construction algorithm
702 // to walk from it to its reaching definitions, building VNInfos along
704 DenseMap
<MachineBasicBlock
*, VNInfo
*> LiveOut
;
705 DenseMap
<MachineBasicBlock
*, VNInfo
*> Phis
;
706 SmallPtrSet
<MachineInstr
*, 4> Visited
;
707 for (MachineRegisterInfo::use_iterator UI
= MRI
->use_begin(LI
->reg
),
708 UE
= MRI
->use_end(); UI
!= UE
; ++UI
) {
709 PerformPHIConstruction(&*UI
, UI
->getParent(), LI
, Visited
, Defs
,
710 Uses
, NewVNs
, LiveOut
, Phis
, true, true);
713 // Add ranges for dead defs
714 for (MachineRegisterInfo::def_iterator DI
= MRI
->def_begin(LI
->reg
),
715 DE
= MRI
->def_end(); DI
!= DE
; ++DI
) {
716 SlotIndex DefIdx
= LIs
->getInstructionIndex(&*DI
);
717 DefIdx
= DefIdx
.getDefIndex();
719 if (LI
->liveAt(DefIdx
)) continue;
721 VNInfo
* DeadVN
= NewVNs
[&*DI
];
722 LI
->addRange(LiveRange(DefIdx
, DefIdx
.getNextSlot(), DeadVN
));
726 /// RenumberValno - Split the given valno out into a new vreg, allowing it to
727 /// be allocated to a different register. This function creates a new vreg,
728 /// copies the valno and its live ranges over to the new vreg's interval,
729 /// removes them from the old interval, and rewrites all uses and defs of
730 /// the original reg to the new vreg within those ranges.
731 void PreAllocSplitting::RenumberValno(VNInfo
* VN
) {
732 SmallVector
<VNInfo
*, 4> Stack
;
733 SmallVector
<VNInfo
*, 4> VNsToCopy
;
736 // Walk through and copy the valno we care about, and any other valnos
737 // that are two-address redefinitions of the one we care about. These
738 // will need to be rewritten as well. We also check for safety of the
739 // renumbering here, by making sure that none of the valno involved has
741 while (!Stack
.empty()) {
742 VNInfo
* OldVN
= Stack
.back();
745 // Bail out if we ever encounter a valno that has a PHI kill. We can't
747 if (OldVN
->hasPHIKill()) return;
749 VNsToCopy
.push_back(OldVN
);
751 // Locate two-address redefinitions
752 for (MachineRegisterInfo::def_iterator DI
= MRI
->def_begin(CurrLI
->reg
),
753 DE
= MRI
->def_end(); DI
!= DE
; ++DI
) {
754 if (!DI
->isRegTiedToUseOperand(DI
.getOperandNo())) continue;
755 SlotIndex DefIdx
= LIs
->getInstructionIndex(&*DI
).getDefIndex();
756 VNInfo
* NextVN
= CurrLI
->findDefinedVNInfoForRegInt(DefIdx
);
757 if (std::find(VNsToCopy
.begin(), VNsToCopy
.end(), NextVN
) !=
759 Stack
.push_back(NextVN
);
763 // Create the new vreg
764 unsigned NewVReg
= MRI
->createVirtualRegister(MRI
->getRegClass(CurrLI
->reg
));
766 // Create the new live interval
767 LiveInterval
& NewLI
= LIs
->getOrCreateInterval(NewVReg
);
769 for (SmallVector
<VNInfo
*, 4>::iterator OI
= VNsToCopy
.begin(), OE
=
770 VNsToCopy
.end(); OI
!= OE
; ++OI
) {
773 // Copy the valno over
774 VNInfo
* NewVN
= NewLI
.createValueCopy(OldVN
, LIs
->getVNInfoAllocator());
775 NewLI
.MergeValueInAsValue(*CurrLI
, OldVN
, NewVN
);
777 // Remove the valno from the old interval
778 CurrLI
->removeValNo(OldVN
);
781 // Rewrite defs and uses. This is done in two stages to avoid invalidating
783 SmallVector
<std::pair
<MachineInstr
*, unsigned>, 8> OpsToChange
;
785 for (MachineRegisterInfo::reg_iterator I
= MRI
->reg_begin(CurrLI
->reg
),
786 E
= MRI
->reg_end(); I
!= E
; ++I
) {
787 MachineOperand
& MO
= I
.getOperand();
788 SlotIndex InstrIdx
= LIs
->getInstructionIndex(&*I
);
790 if ((MO
.isUse() && NewLI
.liveAt(InstrIdx
.getUseIndex())) ||
791 (MO
.isDef() && NewLI
.liveAt(InstrIdx
.getDefIndex())))
792 OpsToChange
.push_back(std::make_pair(&*I
, I
.getOperandNo()));
795 for (SmallVector
<std::pair
<MachineInstr
*, unsigned>, 8>::iterator I
=
796 OpsToChange
.begin(), E
= OpsToChange
.end(); I
!= E
; ++I
) {
797 MachineInstr
* Inst
= I
->first
;
798 unsigned OpIdx
= I
->second
;
799 MachineOperand
& MO
= Inst
->getOperand(OpIdx
);
803 // Grow the VirtRegMap, since we've created a new vreg.
806 // The renumbered vreg shares a stack slot with the old register.
807 if (IntervalSSMap
.count(CurrLI
->reg
))
808 IntervalSSMap
[NewVReg
] = IntervalSSMap
[CurrLI
->reg
];
813 bool PreAllocSplitting::Rematerialize(unsigned VReg
, VNInfo
* ValNo
,
815 MachineBasicBlock::iterator RestorePt
,
816 SmallPtrSet
<MachineInstr
*, 4>& RefsInMBB
) {
817 MachineBasicBlock
& MBB
= *RestorePt
->getParent();
819 MachineBasicBlock::iterator KillPt
= BarrierMBB
->end();
820 if (!DefMI
|| DefMI
->getParent() == BarrierMBB
)
821 KillPt
= findSpillPoint(BarrierMBB
, Barrier
, NULL
, RefsInMBB
);
823 KillPt
= llvm::next(MachineBasicBlock::iterator(DefMI
));
825 if (KillPt
== DefMI
->getParent()->end())
828 TII
->reMaterialize(MBB
, RestorePt
, VReg
, 0, DefMI
, *TRI
);
829 SlotIndex RematIdx
= LIs
->InsertMachineInstrInMaps(prior(RestorePt
));
831 ReconstructLiveInterval(CurrLI
);
832 RematIdx
= RematIdx
.getDefIndex();
833 RenumberValno(CurrLI
->findDefinedVNInfoForRegInt(RematIdx
));
840 MachineInstr
* PreAllocSplitting::FoldSpill(unsigned vreg
,
841 const TargetRegisterClass
* RC
,
843 MachineInstr
* Barrier
,
844 MachineBasicBlock
* MBB
,
846 SmallPtrSet
<MachineInstr
*, 4>& RefsInMBB
) {
847 // Go top down if RefsInMBB is empty.
848 if (RefsInMBB
.empty())
851 MachineBasicBlock::iterator FoldPt
= Barrier
;
852 while (&*FoldPt
!= DefMI
&& FoldPt
!= MBB
->begin() &&
853 !RefsInMBB
.count(FoldPt
))
856 int OpIdx
= FoldPt
->findRegisterDefOperandIdx(vreg
);
860 SmallVector
<unsigned, 1> Ops
;
861 Ops
.push_back(OpIdx
);
863 if (!TII
->canFoldMemoryOperand(FoldPt
, Ops
))
866 DenseMap
<unsigned, int>::iterator I
= IntervalSSMap
.find(vreg
);
867 if (I
!= IntervalSSMap
.end()) {
870 SS
= MFI
->CreateSpillStackObject(RC
->getSize(), RC
->getAlignment());
873 MachineInstr
* FMI
= TII
->foldMemoryOperand(FoldPt
, Ops
, SS
);
876 LIs
->ReplaceMachineInstrInMaps(FoldPt
, FMI
);
877 FoldPt
->eraseFromParent();
880 IntervalSSMap
[vreg
] = SS
;
881 CurrSLI
= &LSs
->getOrCreateInterval(SS
, RC
);
882 if (CurrSLI
->hasAtLeastOneValue())
883 CurrSValNo
= CurrSLI
->getValNumInfo(0);
885 CurrSValNo
= CurrSLI
->getNextValue(SlotIndex(), 0,
886 LSs
->getVNInfoAllocator());
892 MachineInstr
* PreAllocSplitting::FoldRestore(unsigned vreg
,
893 const TargetRegisterClass
* RC
,
894 MachineInstr
* Barrier
,
895 MachineBasicBlock
* MBB
,
897 SmallPtrSet
<MachineInstr
*, 4>& RefsInMBB
) {
898 if ((int)RestoreFoldLimit
!= -1 && RestoreFoldLimit
== (int)NumRestoreFolds
)
901 // Go top down if RefsInMBB is empty.
902 if (RefsInMBB
.empty())
905 // Can't fold a restore between a call stack setup and teardown.
906 MachineBasicBlock::iterator FoldPt
= Barrier
;
908 // Advance from barrier to call frame teardown.
909 while (FoldPt
!= MBB
->getFirstTerminator() &&
910 FoldPt
->getOpcode() != TRI
->getCallFrameDestroyOpcode()) {
911 if (RefsInMBB
.count(FoldPt
))
917 if (FoldPt
== MBB
->getFirstTerminator())
922 // Now find the restore point.
923 while (FoldPt
!= MBB
->getFirstTerminator() && !RefsInMBB
.count(FoldPt
)) {
924 if (FoldPt
->getOpcode() == TRI
->getCallFrameSetupOpcode()) {
925 while (FoldPt
!= MBB
->getFirstTerminator() &&
926 FoldPt
->getOpcode() != TRI
->getCallFrameDestroyOpcode()) {
927 if (RefsInMBB
.count(FoldPt
))
933 if (FoldPt
== MBB
->getFirstTerminator())
940 if (FoldPt
== MBB
->getFirstTerminator())
943 int OpIdx
= FoldPt
->findRegisterUseOperandIdx(vreg
, true);
947 SmallVector
<unsigned, 1> Ops
;
948 Ops
.push_back(OpIdx
);
950 if (!TII
->canFoldMemoryOperand(FoldPt
, Ops
))
953 MachineInstr
* FMI
= TII
->foldMemoryOperand(FoldPt
, Ops
, SS
);
956 LIs
->ReplaceMachineInstrInMaps(FoldPt
, FMI
);
957 FoldPt
->eraseFromParent();
964 /// SplitRegLiveInterval - Split (spill and restore) the given live interval
965 /// so it would not cross the barrier that's being processed. Shrink wrap
966 /// (minimize) the live interval to the last uses.
967 bool PreAllocSplitting::SplitRegLiveInterval(LiveInterval
*LI
) {
968 DEBUG(dbgs() << "Pre-alloc splitting " << LI
->reg
<< " for " << *Barrier
973 // Find live range where current interval cross the barrier.
974 LiveInterval::iterator LR
=
975 CurrLI
->FindLiveRangeContaining(BarrierIdx
.getUseIndex());
976 VNInfo
*ValNo
= LR
->valno
;
978 assert(!ValNo
->isUnused() && "Val# is defined by a dead def?");
980 MachineInstr
*DefMI
= LIs
->getInstructionFromIndex(ValNo
->def
);
982 // If this would create a new join point, do not split.
983 if (DefMI
&& createsNewJoin(LR
, DefMI
->getParent(), Barrier
->getParent())) {
984 DEBUG(dbgs() << "FAILED (would create a new join point).\n");
988 // Find all references in the barrier mbb.
989 SmallPtrSet
<MachineInstr
*, 4> RefsInMBB
;
990 for (MachineRegisterInfo::reg_iterator I
= MRI
->reg_begin(CurrLI
->reg
),
991 E
= MRI
->reg_end(); I
!= E
; ++I
) {
992 MachineInstr
*RefMI
= &*I
;
993 if (RefMI
->getParent() == BarrierMBB
)
994 RefsInMBB
.insert(RefMI
);
997 // Find a point to restore the value after the barrier.
998 MachineBasicBlock::iterator RestorePt
=
999 findRestorePoint(BarrierMBB
, Barrier
, LR
->end
, RefsInMBB
);
1000 if (RestorePt
== BarrierMBB
->end()) {
1001 DEBUG(dbgs() << "FAILED (could not find a suitable restore point).\n");
1005 if (DefMI
&& LIs
->isReMaterializable(*LI
, ValNo
, DefMI
))
1006 if (Rematerialize(LI
->reg
, ValNo
, DefMI
, RestorePt
, RefsInMBB
)) {
1007 DEBUG(dbgs() << "success (remat).\n");
1011 // Add a spill either before the barrier or after the definition.
1012 MachineBasicBlock
*DefMBB
= DefMI
? DefMI
->getParent() : NULL
;
1013 const TargetRegisterClass
*RC
= MRI
->getRegClass(CurrLI
->reg
);
1014 SlotIndex SpillIndex
;
1015 MachineInstr
*SpillMI
= NULL
;
1018 // If we don't know where the def is we must split just before the barrier.
1019 if ((SpillMI
= FoldSpill(LI
->reg
, RC
, 0, Barrier
,
1020 BarrierMBB
, SS
, RefsInMBB
))) {
1021 SpillIndex
= LIs
->getInstructionIndex(SpillMI
);
1023 MachineBasicBlock::iterator SpillPt
=
1024 findSpillPoint(BarrierMBB
, Barrier
, NULL
, RefsInMBB
);
1025 if (SpillPt
== BarrierMBB
->begin()) {
1026 DEBUG(dbgs() << "FAILED (could not find a suitable spill point).\n");
1027 return false; // No gap to insert spill.
1031 SS
= CreateSpillStackSlot(CurrLI
->reg
, RC
);
1032 TII
->storeRegToStackSlot(*BarrierMBB
, SpillPt
, CurrLI
->reg
, true, SS
, RC
,
1034 SpillMI
= prior(SpillPt
);
1035 SpillIndex
= LIs
->InsertMachineInstrInMaps(SpillMI
);
1037 } else if (!IsAvailableInStack(DefMBB
, CurrLI
->reg
, ValNo
->def
,
1038 LIs
->getZeroIndex(), SpillIndex
, SS
)) {
1039 // If it's already split, just restore the value. There is no need to spill
1042 DEBUG(dbgs() << "FAILED (def is dead).\n");
1043 return false; // Def is dead. Do nothing.
1046 if ((SpillMI
= FoldSpill(LI
->reg
, RC
, DefMI
, Barrier
,
1047 BarrierMBB
, SS
, RefsInMBB
))) {
1048 SpillIndex
= LIs
->getInstructionIndex(SpillMI
);
1050 // Check if it's possible to insert a spill after the def MI.
1051 MachineBasicBlock::iterator SpillPt
;
1052 if (DefMBB
== BarrierMBB
) {
1053 // Add spill after the def and the last use before the barrier.
1054 SpillPt
= findSpillPoint(BarrierMBB
, Barrier
, DefMI
,
1056 if (SpillPt
== DefMBB
->begin()) {
1057 DEBUG(dbgs() << "FAILED (could not find a suitable spill point).\n");
1058 return false; // No gap to insert spill.
1061 SpillPt
= llvm::next(MachineBasicBlock::iterator(DefMI
));
1062 if (SpillPt
== DefMBB
->end()) {
1063 DEBUG(dbgs() << "FAILED (could not find a suitable spill point).\n");
1064 return false; // No gap to insert spill.
1068 SS
= CreateSpillStackSlot(CurrLI
->reg
, RC
);
1069 TII
->storeRegToStackSlot(*DefMBB
, SpillPt
, CurrLI
->reg
, false, SS
, RC
,
1071 SpillMI
= prior(SpillPt
);
1072 SpillIndex
= LIs
->InsertMachineInstrInMaps(SpillMI
);
1076 // Remember def instruction index to spill index mapping.
1077 if (DefMI
&& SpillMI
)
1078 Def2SpillMap
[ValNo
->def
] = SpillIndex
;
1081 bool FoldedRestore
= false;
1082 SlotIndex RestoreIndex
;
1083 if (MachineInstr
* LMI
= FoldRestore(CurrLI
->reg
, RC
, Barrier
,
1084 BarrierMBB
, SS
, RefsInMBB
)) {
1086 RestoreIndex
= LIs
->getInstructionIndex(RestorePt
);
1087 FoldedRestore
= true;
1089 TII
->loadRegFromStackSlot(*BarrierMBB
, RestorePt
, CurrLI
->reg
, SS
, RC
, TRI
);
1090 MachineInstr
*LoadMI
= prior(RestorePt
);
1091 RestoreIndex
= LIs
->InsertMachineInstrInMaps(LoadMI
);
1094 // Update spill stack slot live interval.
1095 UpdateSpillSlotInterval(ValNo
, SpillIndex
.getUseIndex().getNextSlot(),
1096 RestoreIndex
.getDefIndex());
1098 ReconstructLiveInterval(CurrLI
);
1100 if (!FoldedRestore
) {
1101 SlotIndex RestoreIdx
= LIs
->getInstructionIndex(prior(RestorePt
));
1102 RestoreIdx
= RestoreIdx
.getDefIndex();
1103 RenumberValno(CurrLI
->findDefinedVNInfoForRegInt(RestoreIdx
));
1107 DEBUG(dbgs() << "success.\n");
1111 /// SplitRegLiveIntervals - Split all register live intervals that cross the
1112 /// barrier that's being processed.
1114 PreAllocSplitting::SplitRegLiveIntervals(const TargetRegisterClass
**RCs
,
1115 SmallPtrSet
<LiveInterval
*, 8>& Split
) {
1116 // First find all the virtual registers whose live intervals are intercepted
1117 // by the current barrier.
1118 SmallVector
<LiveInterval
*, 8> Intervals
;
1119 for (const TargetRegisterClass
**RC
= RCs
; *RC
; ++RC
) {
1120 // FIXME: If it's not safe to move any instruction that defines the barrier
1121 // register class, then it means there are some special dependencies which
1122 // codegen is not modelling. Ignore these barriers for now.
1123 if (!TII
->isSafeToMoveRegClassDefs(*RC
))
1125 const std::vector
<unsigned> &VRs
= MRI
->getRegClassVirtRegs(*RC
);
1126 for (unsigned i
= 0, e
= VRs
.size(); i
!= e
; ++i
) {
1127 unsigned Reg
= VRs
[i
];
1128 if (!LIs
->hasInterval(Reg
))
1130 LiveInterval
*LI
= &LIs
->getInterval(Reg
);
1131 if (LI
->liveAt(BarrierIdx
) && !Barrier
->readsRegister(Reg
))
1132 // Virtual register live interval is intercepted by the barrier. We
1133 // should split and shrink wrap its interval if possible.
1134 Intervals
.push_back(LI
);
1138 // Process the affected live intervals.
1139 bool Change
= false;
1140 while (!Intervals
.empty()) {
1141 if (PreSplitLimit
!= -1 && (int)NumSplits
== PreSplitLimit
)
1143 LiveInterval
*LI
= Intervals
.back();
1144 Intervals
.pop_back();
1145 bool result
= SplitRegLiveInterval(LI
);
1146 if (result
) Split
.insert(LI
);
1153 unsigned PreAllocSplitting::getNumberOfNonSpills(
1154 SmallPtrSet
<MachineInstr
*, 4>& MIs
,
1155 unsigned Reg
, int FrameIndex
,
1156 bool& FeedsTwoAddr
) {
1157 unsigned NonSpills
= 0;
1158 for (SmallPtrSet
<MachineInstr
*, 4>::iterator UI
= MIs
.begin(), UE
= MIs
.end();
1160 int StoreFrameIndex
;
1161 unsigned StoreVReg
= TII
->isStoreToStackSlot(*UI
, StoreFrameIndex
);
1162 if (StoreVReg
!= Reg
|| StoreFrameIndex
!= FrameIndex
)
1165 int DefIdx
= (*UI
)->findRegisterDefOperandIdx(Reg
);
1166 if (DefIdx
!= -1 && (*UI
)->isRegTiedToUseOperand(DefIdx
))
1167 FeedsTwoAddr
= true;
1173 /// removeDeadSpills - After doing splitting, filter through all intervals we've
1174 /// split, and see if any of the spills are unnecessary. If so, remove them.
1175 bool PreAllocSplitting::removeDeadSpills(SmallPtrSet
<LiveInterval
*, 8>& split
) {
1176 bool changed
= false;
1178 // Walk over all of the live intervals that were touched by the splitter,
1179 // and see if we can do any DCE and/or folding.
1180 for (SmallPtrSet
<LiveInterval
*, 8>::iterator LI
= split
.begin(),
1181 LE
= split
.end(); LI
!= LE
; ++LI
) {
1182 DenseMap
<VNInfo
*, SmallPtrSet
<MachineInstr
*, 4> > VNUseCount
;
1184 // First, collect all the uses of the vreg, and sort them by their
1185 // reaching definition (VNInfo).
1186 for (MachineRegisterInfo::use_iterator UI
= MRI
->use_begin((*LI
)->reg
),
1187 UE
= MRI
->use_end(); UI
!= UE
; ++UI
) {
1188 SlotIndex index
= LIs
->getInstructionIndex(&*UI
);
1189 index
= index
.getUseIndex();
1191 const LiveRange
* LR
= (*LI
)->getLiveRangeContaining(index
);
1192 VNUseCount
[LR
->valno
].insert(&*UI
);
1195 // Now, take the definitions (VNInfo's) one at a time and try to DCE
1196 // and/or fold them away.
1197 for (LiveInterval::vni_iterator VI
= (*LI
)->vni_begin(),
1198 VE
= (*LI
)->vni_end(); VI
!= VE
; ++VI
) {
1200 if (DeadSplitLimit
!= -1 && (int)NumDeadSpills
== DeadSplitLimit
)
1203 VNInfo
* CurrVN
= *VI
;
1205 // We don't currently try to handle definitions with PHI kills, because
1206 // it would involve processing more than one VNInfo at once.
1207 if (CurrVN
->hasPHIKill()) continue;
1209 // We also don't try to handle the results of PHI joins, since there's
1210 // no defining instruction to analyze.
1211 MachineInstr
* DefMI
= LIs
->getInstructionFromIndex(CurrVN
->def
);
1212 if (!DefMI
|| CurrVN
->isUnused()) continue;
1214 // We're only interested in eliminating cruft introduced by the splitter,
1215 // is of the form load-use or load-use-store. First, check that the
1216 // definition is a load, and remember what stack slot we loaded it from.
1218 if (!TII
->isLoadFromStackSlot(DefMI
, FrameIndex
)) continue;
1220 // If the definition has no uses at all, just DCE it.
1221 if (VNUseCount
[CurrVN
].size() == 0) {
1222 LIs
->RemoveMachineInstrFromMaps(DefMI
);
1223 (*LI
)->removeValNo(CurrVN
);
1224 DefMI
->eraseFromParent();
1225 VNUseCount
.erase(CurrVN
);
1231 // Second, get the number of non-store uses of the definition, as well as
1232 // a flag indicating whether it feeds into a later two-address definition.
1233 bool FeedsTwoAddr
= false;
1234 unsigned NonSpillCount
= getNumberOfNonSpills(VNUseCount
[CurrVN
],
1235 (*LI
)->reg
, FrameIndex
,
1238 // If there's one non-store use and it doesn't feed a two-addr, then
1239 // this is a load-use-store case that we can try to fold.
1240 if (NonSpillCount
== 1 && !FeedsTwoAddr
) {
1241 // Start by finding the non-store use MachineInstr.
1242 SmallPtrSet
<MachineInstr
*, 4>::iterator UI
= VNUseCount
[CurrVN
].begin();
1243 int StoreFrameIndex
;
1244 unsigned StoreVReg
= TII
->isStoreToStackSlot(*UI
, StoreFrameIndex
);
1245 while (UI
!= VNUseCount
[CurrVN
].end() &&
1246 (StoreVReg
== (*LI
)->reg
&& StoreFrameIndex
== FrameIndex
)) {
1248 if (UI
!= VNUseCount
[CurrVN
].end())
1249 StoreVReg
= TII
->isStoreToStackSlot(*UI
, StoreFrameIndex
);
1251 if (UI
== VNUseCount
[CurrVN
].end()) continue;
1253 MachineInstr
* use
= *UI
;
1255 // Attempt to fold it away!
1256 int OpIdx
= use
->findRegisterUseOperandIdx((*LI
)->reg
, false);
1257 if (OpIdx
== -1) continue;
1258 SmallVector
<unsigned, 1> Ops
;
1259 Ops
.push_back(OpIdx
);
1260 if (!TII
->canFoldMemoryOperand(use
, Ops
)) continue;
1262 MachineInstr
* NewMI
= TII
->foldMemoryOperand(use
, Ops
, FrameIndex
);
1264 if (!NewMI
) continue;
1266 // Update relevant analyses.
1267 LIs
->RemoveMachineInstrFromMaps(DefMI
);
1268 LIs
->ReplaceMachineInstrInMaps(use
, NewMI
);
1269 (*LI
)->removeValNo(CurrVN
);
1271 DefMI
->eraseFromParent();
1272 use
->eraseFromParent();
1273 VNUseCount
[CurrVN
].erase(use
);
1275 // Remove deleted instructions. Note that we need to remove them from
1276 // the VNInfo->use map as well, just to be safe.
1277 for (SmallPtrSet
<MachineInstr
*, 4>::iterator II
=
1278 VNUseCount
[CurrVN
].begin(), IE
= VNUseCount
[CurrVN
].end();
1280 for (DenseMap
<VNInfo
*, SmallPtrSet
<MachineInstr
*, 4> >::iterator
1281 VNI
= VNUseCount
.begin(), VNE
= VNUseCount
.end(); VNI
!= VNE
;
1283 if (VNI
->first
!= CurrVN
)
1284 VNI
->second
.erase(*II
);
1285 LIs
->RemoveMachineInstrFromMaps(*II
);
1286 (*II
)->eraseFromParent();
1289 VNUseCount
.erase(CurrVN
);
1291 for (DenseMap
<VNInfo
*, SmallPtrSet
<MachineInstr
*, 4> >::iterator
1292 VI
= VNUseCount
.begin(), VE
= VNUseCount
.end(); VI
!= VE
; ++VI
)
1293 if (VI
->second
.erase(use
))
1294 VI
->second
.insert(NewMI
);
1301 // If there's more than one non-store instruction, we can't profitably
1302 // fold it, so bail.
1303 if (NonSpillCount
) continue;
1305 // Otherwise, this is a load-store case, so DCE them.
1306 for (SmallPtrSet
<MachineInstr
*, 4>::iterator UI
=
1307 VNUseCount
[CurrVN
].begin(), UE
= VNUseCount
[CurrVN
].end();
1309 LIs
->RemoveMachineInstrFromMaps(*UI
);
1310 (*UI
)->eraseFromParent();
1313 VNUseCount
.erase(CurrVN
);
1315 LIs
->RemoveMachineInstrFromMaps(DefMI
);
1316 (*LI
)->removeValNo(CurrVN
);
1317 DefMI
->eraseFromParent();
1326 bool PreAllocSplitting::createsNewJoin(LiveRange
* LR
,
1327 MachineBasicBlock
* DefMBB
,
1328 MachineBasicBlock
* BarrierMBB
) {
1329 if (DefMBB
== BarrierMBB
)
1332 if (LR
->valno
->hasPHIKill())
1335 SlotIndex MBBEnd
= LIs
->getMBBEndIdx(BarrierMBB
);
1336 if (LR
->end
< MBBEnd
)
1339 MachineLoopInfo
& MLI
= getAnalysis
<MachineLoopInfo
>();
1340 if (MLI
.getLoopFor(DefMBB
) != MLI
.getLoopFor(BarrierMBB
))
1343 MachineDominatorTree
& MDT
= getAnalysis
<MachineDominatorTree
>();
1344 SmallPtrSet
<MachineBasicBlock
*, 4> Visited
;
1345 typedef std::pair
<MachineBasicBlock
*,
1346 MachineBasicBlock::succ_iterator
> ItPair
;
1347 SmallVector
<ItPair
, 4> Stack
;
1348 Stack
.push_back(std::make_pair(BarrierMBB
, BarrierMBB
->succ_begin()));
1350 while (!Stack
.empty()) {
1351 ItPair P
= Stack
.back();
1354 MachineBasicBlock
* PredMBB
= P
.first
;
1355 MachineBasicBlock::succ_iterator S
= P
.second
;
1357 if (S
== PredMBB
->succ_end())
1359 else if (Visited
.count(*S
)) {
1360 Stack
.push_back(std::make_pair(PredMBB
, ++S
));
1363 Stack
.push_back(std::make_pair(PredMBB
, S
+1));
1365 MachineBasicBlock
* MBB
= *S
;
1366 Visited
.insert(MBB
);
1368 if (MBB
== BarrierMBB
)
1371 MachineDomTreeNode
* DefMDTN
= MDT
.getNode(DefMBB
);
1372 MachineDomTreeNode
* BarrierMDTN
= MDT
.getNode(BarrierMBB
);
1373 MachineDomTreeNode
* MDTN
= MDT
.getNode(MBB
)->getIDom();
1375 if (MDTN
== DefMDTN
)
1377 else if (MDTN
== BarrierMDTN
)
1379 MDTN
= MDTN
->getIDom();
1382 MBBEnd
= LIs
->getMBBEndIdx(MBB
);
1383 if (LR
->end
> MBBEnd
)
1384 Stack
.push_back(std::make_pair(MBB
, MBB
->succ_begin()));
1391 bool PreAllocSplitting::runOnMachineFunction(MachineFunction
&MF
) {
1393 TM
= &MF
.getTarget();
1394 TRI
= TM
->getRegisterInfo();
1395 TII
= TM
->getInstrInfo();
1396 MFI
= MF
.getFrameInfo();
1397 MRI
= &MF
.getRegInfo();
1398 SIs
= &getAnalysis
<SlotIndexes
>();
1399 LIs
= &getAnalysis
<LiveIntervals
>();
1400 LSs
= &getAnalysis
<LiveStacks
>();
1401 VRM
= &getAnalysis
<VirtRegMap
>();
1403 bool MadeChange
= false;
1405 // Make sure blocks are numbered in order.
1406 MF
.RenumberBlocks();
1408 MachineBasicBlock
*Entry
= MF
.begin();
1409 SmallPtrSet
<MachineBasicBlock
*,16> Visited
;
1411 SmallPtrSet
<LiveInterval
*, 8> Split
;
1413 for (df_ext_iterator
<MachineBasicBlock
*, SmallPtrSet
<MachineBasicBlock
*,16> >
1414 DFI
= df_ext_begin(Entry
, Visited
), E
= df_ext_end(Entry
, Visited
);
1417 for (MachineBasicBlock::iterator I
= BarrierMBB
->begin(),
1418 E
= BarrierMBB
->end(); I
!= E
; ++I
) {
1420 const TargetRegisterClass
**BarrierRCs
=
1421 Barrier
->getDesc().getRegClassBarriers();
1424 BarrierIdx
= LIs
->getInstructionIndex(Barrier
);
1425 MadeChange
|= SplitRegLiveIntervals(BarrierRCs
, Split
);
1429 MadeChange
|= removeDeadSpills(Split
);