Fix comment for consistency sake.
[llvm/avr.git] / lib / CodeGen / PreAllocSplitting.cpp
blobf12ad77eeaf494dcc67da12e63c394b8027dbf4c
1 //===-- PreAllocSplitting.cpp - Pre-allocation Interval Spltting Pass. ----===//
2 //
3 // The LLVM Compiler Infrastructure
4 //
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
7 //
8 //===----------------------------------------------------------------------===//
9 //
10 // This file implements the machine instruction level pre-register allocation
11 // live interval splitting pass. It finds live interval barriers, i.e.
12 // instructions which will kill all physical registers in certain register
13 // classes, and split all live intervals which cross the barrier.
15 //===----------------------------------------------------------------------===//
17 #define DEBUG_TYPE "pre-alloc-split"
18 #include "VirtRegMap.h"
19 #include "llvm/CodeGen/LiveIntervalAnalysis.h"
20 #include "llvm/CodeGen/LiveStackAnalysis.h"
21 #include "llvm/CodeGen/MachineDominators.h"
22 #include "llvm/CodeGen/MachineFrameInfo.h"
23 #include "llvm/CodeGen/MachineFunctionPass.h"
24 #include "llvm/CodeGen/MachineLoopInfo.h"
25 #include "llvm/CodeGen/MachineRegisterInfo.h"
26 #include "llvm/CodeGen/Passes.h"
27 #include "llvm/CodeGen/RegisterCoalescer.h"
28 #include "llvm/Target/TargetInstrInfo.h"
29 #include "llvm/Target/TargetMachine.h"
30 #include "llvm/Target/TargetOptions.h"
31 #include "llvm/Target/TargetRegisterInfo.h"
32 #include "llvm/Support/CommandLine.h"
33 #include "llvm/Support/Debug.h"
34 #include "llvm/Support/ErrorHandling.h"
35 #include "llvm/ADT/DenseMap.h"
36 #include "llvm/ADT/DepthFirstIterator.h"
37 #include "llvm/ADT/SmallPtrSet.h"
38 #include "llvm/ADT/Statistic.h"
39 using namespace llvm;
41 static cl::opt<int> PreSplitLimit("pre-split-limit", cl::init(-1), cl::Hidden);
42 static cl::opt<int> DeadSplitLimit("dead-split-limit", cl::init(-1), cl::Hidden);
43 static cl::opt<int> RestoreFoldLimit("restore-fold-limit", cl::init(-1), cl::Hidden);
45 STATISTIC(NumSplits, "Number of intervals split");
46 STATISTIC(NumRemats, "Number of intervals split by rematerialization");
47 STATISTIC(NumFolds, "Number of intervals split with spill folding");
48 STATISTIC(NumRestoreFolds, "Number of intervals split with restore folding");
49 STATISTIC(NumRenumbers, "Number of intervals renumbered into new registers");
50 STATISTIC(NumDeadSpills, "Number of dead spills removed");
52 namespace {
53 class VISIBILITY_HIDDEN PreAllocSplitting : public MachineFunctionPass {
54 MachineFunction *CurrMF;
55 const TargetMachine *TM;
56 const TargetInstrInfo *TII;
57 const TargetRegisterInfo* TRI;
58 MachineFrameInfo *MFI;
59 MachineRegisterInfo *MRI;
60 LiveIntervals *LIs;
61 LiveStacks *LSs;
62 VirtRegMap *VRM;
64 // Barrier - Current barrier being processed.
65 MachineInstr *Barrier;
67 // BarrierMBB - Basic block where the barrier resides in.
68 MachineBasicBlock *BarrierMBB;
70 // Barrier - Current barrier index.
71 unsigned BarrierIdx;
73 // CurrLI - Current live interval being split.
74 LiveInterval *CurrLI;
76 // CurrSLI - Current stack slot live interval.
77 LiveInterval *CurrSLI;
79 // CurrSValNo - Current val# for the stack slot live interval.
80 VNInfo *CurrSValNo;
82 // IntervalSSMap - A map from live interval to spill slots.
83 DenseMap<unsigned, int> IntervalSSMap;
85 // Def2SpillMap - A map from a def instruction index to spill index.
86 DenseMap<unsigned, unsigned> Def2SpillMap;
88 public:
89 static char ID;
90 PreAllocSplitting() : MachineFunctionPass(&ID) {}
92 virtual bool runOnMachineFunction(MachineFunction &MF);
94 virtual void getAnalysisUsage(AnalysisUsage &AU) const {
95 AU.setPreservesCFG();
96 AU.addRequired<LiveIntervals>();
97 AU.addPreserved<LiveIntervals>();
98 AU.addRequired<LiveStacks>();
99 AU.addPreserved<LiveStacks>();
100 AU.addPreserved<RegisterCoalescer>();
101 if (StrongPHIElim)
102 AU.addPreservedID(StrongPHIEliminationID);
103 else
104 AU.addPreservedID(PHIEliminationID);
105 AU.addRequired<MachineDominatorTree>();
106 AU.addRequired<MachineLoopInfo>();
107 AU.addRequired<VirtRegMap>();
108 AU.addPreserved<MachineDominatorTree>();
109 AU.addPreserved<MachineLoopInfo>();
110 AU.addPreserved<VirtRegMap>();
111 MachineFunctionPass::getAnalysisUsage(AU);
114 virtual void releaseMemory() {
115 IntervalSSMap.clear();
116 Def2SpillMap.clear();
119 virtual const char *getPassName() const {
120 return "Pre-Register Allocaton Live Interval Splitting";
123 /// print - Implement the dump method.
124 virtual void print(raw_ostream &O, const Module* M = 0) const {
125 LIs->print(O, M);
129 private:
130 MachineBasicBlock::iterator
131 findNextEmptySlot(MachineBasicBlock*, MachineInstr*,
132 unsigned&);
134 MachineBasicBlock::iterator
135 findSpillPoint(MachineBasicBlock*, MachineInstr*, MachineInstr*,
136 SmallPtrSet<MachineInstr*, 4>&, unsigned&);
138 MachineBasicBlock::iterator
139 findRestorePoint(MachineBasicBlock*, MachineInstr*, unsigned,
140 SmallPtrSet<MachineInstr*, 4>&, unsigned&);
142 int CreateSpillStackSlot(unsigned, const TargetRegisterClass *);
144 bool IsAvailableInStack(MachineBasicBlock*, unsigned, unsigned, unsigned,
145 unsigned&, int&) const;
147 void UpdateSpillSlotInterval(VNInfo*, unsigned, unsigned);
149 bool SplitRegLiveInterval(LiveInterval*);
151 bool SplitRegLiveIntervals(const TargetRegisterClass **,
152 SmallPtrSet<LiveInterval*, 8>&);
154 bool createsNewJoin(LiveRange* LR, MachineBasicBlock* DefMBB,
155 MachineBasicBlock* BarrierMBB);
156 bool Rematerialize(unsigned vreg, VNInfo* ValNo,
157 MachineInstr* DefMI,
158 MachineBasicBlock::iterator RestorePt,
159 unsigned RestoreIdx,
160 SmallPtrSet<MachineInstr*, 4>& RefsInMBB);
161 MachineInstr* FoldSpill(unsigned vreg, const TargetRegisterClass* RC,
162 MachineInstr* DefMI,
163 MachineInstr* Barrier,
164 MachineBasicBlock* MBB,
165 int& SS,
166 SmallPtrSet<MachineInstr*, 4>& RefsInMBB);
167 MachineInstr* FoldRestore(unsigned vreg,
168 const TargetRegisterClass* RC,
169 MachineInstr* Barrier,
170 MachineBasicBlock* MBB,
171 int SS,
172 SmallPtrSet<MachineInstr*, 4>& RefsInMBB);
173 void RenumberValno(VNInfo* VN);
174 void ReconstructLiveInterval(LiveInterval* LI);
175 bool removeDeadSpills(SmallPtrSet<LiveInterval*, 8>& split);
176 unsigned getNumberOfNonSpills(SmallPtrSet<MachineInstr*, 4>& MIs,
177 unsigned Reg, int FrameIndex, bool& TwoAddr);
178 VNInfo* PerformPHIConstruction(MachineBasicBlock::iterator Use,
179 MachineBasicBlock* MBB, LiveInterval* LI,
180 SmallPtrSet<MachineInstr*, 4>& Visited,
181 DenseMap<MachineBasicBlock*, SmallPtrSet<MachineInstr*, 2> >& Defs,
182 DenseMap<MachineBasicBlock*, SmallPtrSet<MachineInstr*, 2> >& Uses,
183 DenseMap<MachineInstr*, VNInfo*>& NewVNs,
184 DenseMap<MachineBasicBlock*, VNInfo*>& LiveOut,
185 DenseMap<MachineBasicBlock*, VNInfo*>& Phis,
186 bool IsTopLevel, bool IsIntraBlock);
187 VNInfo* PerformPHIConstructionFallBack(MachineBasicBlock::iterator Use,
188 MachineBasicBlock* MBB, LiveInterval* LI,
189 SmallPtrSet<MachineInstr*, 4>& Visited,
190 DenseMap<MachineBasicBlock*, SmallPtrSet<MachineInstr*, 2> >& Defs,
191 DenseMap<MachineBasicBlock*, SmallPtrSet<MachineInstr*, 2> >& Uses,
192 DenseMap<MachineInstr*, VNInfo*>& NewVNs,
193 DenseMap<MachineBasicBlock*, VNInfo*>& LiveOut,
194 DenseMap<MachineBasicBlock*, VNInfo*>& Phis,
195 bool IsTopLevel, bool IsIntraBlock);
197 } // end anonymous namespace
199 char PreAllocSplitting::ID = 0;
201 static RegisterPass<PreAllocSplitting>
202 X("pre-alloc-splitting", "Pre-Register Allocation Live Interval Splitting");
204 const PassInfo *const llvm::PreAllocSplittingID = &X;
207 /// findNextEmptySlot - Find a gap after the given machine instruction in the
208 /// instruction index map. If there isn't one, return end().
209 MachineBasicBlock::iterator
210 PreAllocSplitting::findNextEmptySlot(MachineBasicBlock *MBB, MachineInstr *MI,
211 unsigned &SpotIndex) {
212 MachineBasicBlock::iterator MII = MI;
213 if (++MII != MBB->end()) {
214 unsigned Index = LIs->findGapBeforeInstr(LIs->getInstructionIndex(MII));
215 if (Index) {
216 SpotIndex = Index;
217 return MII;
220 return MBB->end();
223 /// findSpillPoint - Find a gap as far away from the given MI that's suitable
224 /// for spilling the current live interval. The index must be before any
225 /// defs and uses of the live interval register in the mbb. Return begin() if
226 /// none is found.
227 MachineBasicBlock::iterator
228 PreAllocSplitting::findSpillPoint(MachineBasicBlock *MBB, MachineInstr *MI,
229 MachineInstr *DefMI,
230 SmallPtrSet<MachineInstr*, 4> &RefsInMBB,
231 unsigned &SpillIndex) {
232 MachineBasicBlock::iterator Pt = MBB->begin();
234 MachineBasicBlock::iterator MII = MI;
235 MachineBasicBlock::iterator EndPt = DefMI
236 ? MachineBasicBlock::iterator(DefMI) : MBB->begin();
238 while (MII != EndPt && !RefsInMBB.count(MII) &&
239 MII->getOpcode() != TRI->getCallFrameSetupOpcode())
240 --MII;
241 if (MII == EndPt || RefsInMBB.count(MII)) return Pt;
243 while (MII != EndPt && !RefsInMBB.count(MII)) {
244 unsigned Index = LIs->getInstructionIndex(MII);
246 // We can't insert the spill between the barrier (a call), and its
247 // corresponding call frame setup.
248 if (MII->getOpcode() == TRI->getCallFrameDestroyOpcode()) {
249 while (MII->getOpcode() != TRI->getCallFrameSetupOpcode()) {
250 --MII;
251 if (MII == EndPt) {
252 return Pt;
255 continue;
256 } else if (LIs->hasGapBeforeInstr(Index)) {
257 Pt = MII;
258 SpillIndex = LIs->findGapBeforeInstr(Index, true);
261 if (RefsInMBB.count(MII))
262 return Pt;
265 --MII;
268 return Pt;
271 /// findRestorePoint - Find a gap in the instruction index map that's suitable
272 /// for restoring the current live interval value. The index must be before any
273 /// uses of the live interval register in the mbb. Return end() if none is
274 /// found.
275 MachineBasicBlock::iterator
276 PreAllocSplitting::findRestorePoint(MachineBasicBlock *MBB, MachineInstr *MI,
277 unsigned LastIdx,
278 SmallPtrSet<MachineInstr*, 4> &RefsInMBB,
279 unsigned &RestoreIndex) {
280 // FIXME: Allow spill to be inserted to the beginning of the mbb. Update mbb
281 // begin index accordingly.
282 MachineBasicBlock::iterator Pt = MBB->end();
283 MachineBasicBlock::iterator EndPt = MBB->getFirstTerminator();
285 // We start at the call, so walk forward until we find the call frame teardown
286 // since we can't insert restores before that. Bail if we encounter a use
287 // during this time.
288 MachineBasicBlock::iterator MII = MI;
289 if (MII == EndPt) return Pt;
291 while (MII != EndPt && !RefsInMBB.count(MII) &&
292 MII->getOpcode() != TRI->getCallFrameDestroyOpcode())
293 ++MII;
294 if (MII == EndPt || RefsInMBB.count(MII)) return Pt;
295 ++MII;
297 // FIXME: Limit the number of instructions to examine to reduce
298 // compile time?
299 while (MII != EndPt) {
300 unsigned Index = LIs->getInstructionIndex(MII);
301 if (Index > LastIdx)
302 break;
303 unsigned Gap = LIs->findGapBeforeInstr(Index);
305 // We can't insert a restore between the barrier (a call) and its
306 // corresponding call frame teardown.
307 if (MII->getOpcode() == TRI->getCallFrameSetupOpcode()) {
308 do {
309 if (MII == EndPt || RefsInMBB.count(MII)) return Pt;
310 ++MII;
311 } while (MII->getOpcode() != TRI->getCallFrameDestroyOpcode());
312 } else if (Gap) {
313 Pt = MII;
314 RestoreIndex = Gap;
317 if (RefsInMBB.count(MII))
318 return Pt;
320 ++MII;
323 return Pt;
326 /// CreateSpillStackSlot - Create a stack slot for the live interval being
327 /// split. If the live interval was previously split, just reuse the same
328 /// slot.
329 int PreAllocSplitting::CreateSpillStackSlot(unsigned Reg,
330 const TargetRegisterClass *RC) {
331 int SS;
332 DenseMap<unsigned, int>::iterator I = IntervalSSMap.find(Reg);
333 if (I != IntervalSSMap.end()) {
334 SS = I->second;
335 } else {
336 SS = MFI->CreateStackObject(RC->getSize(), RC->getAlignment());
337 IntervalSSMap[Reg] = SS;
340 // Create live interval for stack slot.
341 CurrSLI = &LSs->getOrCreateInterval(SS, RC);
342 if (CurrSLI->hasAtLeastOneValue())
343 CurrSValNo = CurrSLI->getValNumInfo(0);
344 else
345 CurrSValNo = CurrSLI->getNextValue(0, 0, false, LSs->getVNInfoAllocator());
346 return SS;
349 /// IsAvailableInStack - Return true if register is available in a split stack
350 /// slot at the specified index.
351 bool
352 PreAllocSplitting::IsAvailableInStack(MachineBasicBlock *DefMBB,
353 unsigned Reg, unsigned DefIndex,
354 unsigned RestoreIndex, unsigned &SpillIndex,
355 int& SS) const {
356 if (!DefMBB)
357 return false;
359 DenseMap<unsigned, int>::iterator I = IntervalSSMap.find(Reg);
360 if (I == IntervalSSMap.end())
361 return false;
362 DenseMap<unsigned, unsigned>::iterator II = Def2SpillMap.find(DefIndex);
363 if (II == Def2SpillMap.end())
364 return false;
366 // If last spill of def is in the same mbb as barrier mbb (where restore will
367 // be), make sure it's not below the intended restore index.
368 // FIXME: Undo the previous spill?
369 assert(LIs->getMBBFromIndex(II->second) == DefMBB);
370 if (DefMBB == BarrierMBB && II->second >= RestoreIndex)
371 return false;
373 SS = I->second;
374 SpillIndex = II->second;
375 return true;
378 /// UpdateSpillSlotInterval - Given the specified val# of the register live
379 /// interval being split, and the spill and restore indicies, update the live
380 /// interval of the spill stack slot.
381 void
382 PreAllocSplitting::UpdateSpillSlotInterval(VNInfo *ValNo, unsigned SpillIndex,
383 unsigned RestoreIndex) {
384 assert(LIs->getMBBFromIndex(RestoreIndex) == BarrierMBB &&
385 "Expect restore in the barrier mbb");
387 MachineBasicBlock *MBB = LIs->getMBBFromIndex(SpillIndex);
388 if (MBB == BarrierMBB) {
389 // Intra-block spill + restore. We are done.
390 LiveRange SLR(SpillIndex, RestoreIndex, CurrSValNo);
391 CurrSLI->addRange(SLR);
392 return;
395 SmallPtrSet<MachineBasicBlock*, 4> Processed;
396 unsigned EndIdx = LIs->getMBBEndIdx(MBB);
397 LiveRange SLR(SpillIndex, EndIdx+1, CurrSValNo);
398 CurrSLI->addRange(SLR);
399 Processed.insert(MBB);
401 // Start from the spill mbb, figure out the extend of the spill slot's
402 // live interval.
403 SmallVector<MachineBasicBlock*, 4> WorkList;
404 const LiveRange *LR = CurrLI->getLiveRangeContaining(SpillIndex);
405 if (LR->end > EndIdx)
406 // If live range extend beyond end of mbb, add successors to work list.
407 for (MachineBasicBlock::succ_iterator SI = MBB->succ_begin(),
408 SE = MBB->succ_end(); SI != SE; ++SI)
409 WorkList.push_back(*SI);
411 while (!WorkList.empty()) {
412 MachineBasicBlock *MBB = WorkList.back();
413 WorkList.pop_back();
414 if (Processed.count(MBB))
415 continue;
416 unsigned Idx = LIs->getMBBStartIdx(MBB);
417 LR = CurrLI->getLiveRangeContaining(Idx);
418 if (LR && LR->valno == ValNo) {
419 EndIdx = LIs->getMBBEndIdx(MBB);
420 if (Idx <= RestoreIndex && RestoreIndex < EndIdx) {
421 // Spill slot live interval stops at the restore.
422 LiveRange SLR(Idx, RestoreIndex, CurrSValNo);
423 CurrSLI->addRange(SLR);
424 } else if (LR->end > EndIdx) {
425 // Live range extends beyond end of mbb, process successors.
426 LiveRange SLR(Idx, EndIdx+1, CurrSValNo);
427 CurrSLI->addRange(SLR);
428 for (MachineBasicBlock::succ_iterator SI = MBB->succ_begin(),
429 SE = MBB->succ_end(); SI != SE; ++SI)
430 WorkList.push_back(*SI);
431 } else {
432 LiveRange SLR(Idx, LR->end, CurrSValNo);
433 CurrSLI->addRange(SLR);
435 Processed.insert(MBB);
440 /// PerformPHIConstruction - From properly set up use and def lists, use a PHI
441 /// construction algorithm to compute the ranges and valnos for an interval.
442 VNInfo*
443 PreAllocSplitting::PerformPHIConstruction(MachineBasicBlock::iterator UseI,
444 MachineBasicBlock* MBB, LiveInterval* LI,
445 SmallPtrSet<MachineInstr*, 4>& Visited,
446 DenseMap<MachineBasicBlock*, SmallPtrSet<MachineInstr*, 2> >& Defs,
447 DenseMap<MachineBasicBlock*, SmallPtrSet<MachineInstr*, 2> >& Uses,
448 DenseMap<MachineInstr*, VNInfo*>& NewVNs,
449 DenseMap<MachineBasicBlock*, VNInfo*>& LiveOut,
450 DenseMap<MachineBasicBlock*, VNInfo*>& Phis,
451 bool IsTopLevel, bool IsIntraBlock) {
452 // Return memoized result if it's available.
453 if (IsTopLevel && Visited.count(UseI) && NewVNs.count(UseI))
454 return NewVNs[UseI];
455 else if (!IsTopLevel && IsIntraBlock && NewVNs.count(UseI))
456 return NewVNs[UseI];
457 else if (!IsIntraBlock && LiveOut.count(MBB))
458 return LiveOut[MBB];
460 // Check if our block contains any uses or defs.
461 bool ContainsDefs = Defs.count(MBB);
462 bool ContainsUses = Uses.count(MBB);
464 VNInfo* RetVNI = 0;
466 // Enumerate the cases of use/def contaning blocks.
467 if (!ContainsDefs && !ContainsUses) {
468 return PerformPHIConstructionFallBack(UseI, MBB, LI, Visited, Defs, Uses,
469 NewVNs, LiveOut, Phis,
470 IsTopLevel, IsIntraBlock);
471 } else if (ContainsDefs && !ContainsUses) {
472 SmallPtrSet<MachineInstr*, 2>& BlockDefs = Defs[MBB];
474 // Search for the def in this block. If we don't find it before the
475 // instruction we care about, go to the fallback case. Note that that
476 // should never happen: this cannot be intrablock, so use should
477 // always be an end() iterator.
478 assert(UseI == MBB->end() && "No use marked in intrablock");
480 MachineBasicBlock::iterator Walker = UseI;
481 --Walker;
482 while (Walker != MBB->begin()) {
483 if (BlockDefs.count(Walker))
484 break;
485 --Walker;
488 // Once we've found it, extend its VNInfo to our instruction.
489 unsigned DefIndex = LIs->getInstructionIndex(Walker);
490 DefIndex = LiveIntervals::getDefIndex(DefIndex);
491 unsigned EndIndex = LIs->getMBBEndIdx(MBB);
493 RetVNI = NewVNs[Walker];
494 LI->addRange(LiveRange(DefIndex, EndIndex+1, RetVNI));
495 } else if (!ContainsDefs && ContainsUses) {
496 SmallPtrSet<MachineInstr*, 2>& BlockUses = Uses[MBB];
498 // Search for the use in this block that precedes the instruction we care
499 // about, going to the fallback case if we don't find it.
500 if (UseI == MBB->begin())
501 return PerformPHIConstructionFallBack(UseI, MBB, LI, Visited, Defs,
502 Uses, NewVNs, LiveOut, Phis,
503 IsTopLevel, IsIntraBlock);
505 MachineBasicBlock::iterator Walker = UseI;
506 --Walker;
507 bool found = false;
508 while (Walker != MBB->begin()) {
509 if (BlockUses.count(Walker)) {
510 found = true;
511 break;
513 --Walker;
516 // Must check begin() too.
517 if (!found) {
518 if (BlockUses.count(Walker))
519 found = true;
520 else
521 return PerformPHIConstructionFallBack(UseI, MBB, LI, Visited, Defs,
522 Uses, NewVNs, LiveOut, Phis,
523 IsTopLevel, IsIntraBlock);
526 unsigned UseIndex = LIs->getInstructionIndex(Walker);
527 UseIndex = LiveIntervals::getUseIndex(UseIndex);
528 unsigned EndIndex = 0;
529 if (IsIntraBlock) {
530 EndIndex = LIs->getInstructionIndex(UseI);
531 EndIndex = LiveIntervals::getUseIndex(EndIndex);
532 } else
533 EndIndex = LIs->getMBBEndIdx(MBB);
535 // Now, recursively phi construct the VNInfo for the use we found,
536 // and then extend it to include the instruction we care about
537 RetVNI = PerformPHIConstruction(Walker, MBB, LI, Visited, Defs, Uses,
538 NewVNs, LiveOut, Phis, false, true);
540 LI->addRange(LiveRange(UseIndex, EndIndex+1, RetVNI));
542 // FIXME: Need to set kills properly for inter-block stuff.
543 if (LI->isKill(RetVNI, UseIndex)) LI->removeKill(RetVNI, UseIndex);
544 if (IsIntraBlock)
545 LI->addKill(RetVNI, EndIndex, false);
546 } else if (ContainsDefs && ContainsUses) {
547 SmallPtrSet<MachineInstr*, 2>& BlockDefs = Defs[MBB];
548 SmallPtrSet<MachineInstr*, 2>& BlockUses = Uses[MBB];
550 // This case is basically a merging of the two preceding case, with the
551 // special note that checking for defs must take precedence over checking
552 // for uses, because of two-address instructions.
554 if (UseI == MBB->begin())
555 return PerformPHIConstructionFallBack(UseI, MBB, LI, Visited, Defs, Uses,
556 NewVNs, LiveOut, Phis,
557 IsTopLevel, IsIntraBlock);
559 MachineBasicBlock::iterator Walker = UseI;
560 --Walker;
561 bool foundDef = false;
562 bool foundUse = false;
563 while (Walker != MBB->begin()) {
564 if (BlockDefs.count(Walker)) {
565 foundDef = true;
566 break;
567 } else if (BlockUses.count(Walker)) {
568 foundUse = true;
569 break;
571 --Walker;
574 // Must check begin() too.
575 if (!foundDef && !foundUse) {
576 if (BlockDefs.count(Walker))
577 foundDef = true;
578 else if (BlockUses.count(Walker))
579 foundUse = true;
580 else
581 return PerformPHIConstructionFallBack(UseI, MBB, LI, Visited, Defs,
582 Uses, NewVNs, LiveOut, Phis,
583 IsTopLevel, IsIntraBlock);
586 unsigned StartIndex = LIs->getInstructionIndex(Walker);
587 StartIndex = foundDef ? LiveIntervals::getDefIndex(StartIndex) :
588 LiveIntervals::getUseIndex(StartIndex);
589 unsigned EndIndex = 0;
590 if (IsIntraBlock) {
591 EndIndex = LIs->getInstructionIndex(UseI);
592 EndIndex = LiveIntervals::getUseIndex(EndIndex);
593 } else
594 EndIndex = LIs->getMBBEndIdx(MBB);
596 if (foundDef)
597 RetVNI = NewVNs[Walker];
598 else
599 RetVNI = PerformPHIConstruction(Walker, MBB, LI, Visited, Defs, Uses,
600 NewVNs, LiveOut, Phis, false, true);
602 LI->addRange(LiveRange(StartIndex, EndIndex+1, RetVNI));
604 if (foundUse && LI->isKill(RetVNI, StartIndex))
605 LI->removeKill(RetVNI, StartIndex);
606 if (IsIntraBlock) {
607 LI->addKill(RetVNI, EndIndex, false);
611 // Memoize results so we don't have to recompute them.
612 if (!IsIntraBlock) LiveOut[MBB] = RetVNI;
613 else {
614 if (!NewVNs.count(UseI))
615 NewVNs[UseI] = RetVNI;
616 Visited.insert(UseI);
619 return RetVNI;
622 /// PerformPHIConstructionFallBack - PerformPHIConstruction fall back path.
624 VNInfo*
625 PreAllocSplitting::PerformPHIConstructionFallBack(MachineBasicBlock::iterator UseI,
626 MachineBasicBlock* MBB, LiveInterval* LI,
627 SmallPtrSet<MachineInstr*, 4>& Visited,
628 DenseMap<MachineBasicBlock*, SmallPtrSet<MachineInstr*, 2> >& Defs,
629 DenseMap<MachineBasicBlock*, SmallPtrSet<MachineInstr*, 2> >& Uses,
630 DenseMap<MachineInstr*, VNInfo*>& NewVNs,
631 DenseMap<MachineBasicBlock*, VNInfo*>& LiveOut,
632 DenseMap<MachineBasicBlock*, VNInfo*>& Phis,
633 bool IsTopLevel, bool IsIntraBlock) {
634 // NOTE: Because this is the fallback case from other cases, we do NOT
635 // assume that we are not intrablock here.
636 if (Phis.count(MBB)) return Phis[MBB];
638 unsigned StartIndex = LIs->getMBBStartIdx(MBB);
639 VNInfo *RetVNI = Phis[MBB] =
640 LI->getNextValue(0, /*FIXME*/ 0, false, LIs->getVNInfoAllocator());
642 if (!IsIntraBlock) LiveOut[MBB] = RetVNI;
644 // If there are no uses or defs between our starting point and the
645 // beginning of the block, then recursive perform phi construction
646 // on our predecessors.
647 DenseMap<MachineBasicBlock*, VNInfo*> IncomingVNs;
648 for (MachineBasicBlock::pred_iterator PI = MBB->pred_begin(),
649 PE = MBB->pred_end(); PI != PE; ++PI) {
650 VNInfo* Incoming = PerformPHIConstruction((*PI)->end(), *PI, LI,
651 Visited, Defs, Uses, NewVNs,
652 LiveOut, Phis, false, false);
653 if (Incoming != 0)
654 IncomingVNs[*PI] = Incoming;
657 if (MBB->pred_size() == 1 && !RetVNI->hasPHIKill()) {
658 VNInfo* OldVN = RetVNI;
659 VNInfo* NewVN = IncomingVNs.begin()->second;
660 VNInfo* MergedVN = LI->MergeValueNumberInto(OldVN, NewVN);
661 if (MergedVN == OldVN) std::swap(OldVN, NewVN);
663 for (DenseMap<MachineBasicBlock*, VNInfo*>::iterator LOI = LiveOut.begin(),
664 LOE = LiveOut.end(); LOI != LOE; ++LOI)
665 if (LOI->second == OldVN)
666 LOI->second = MergedVN;
667 for (DenseMap<MachineInstr*, VNInfo*>::iterator NVI = NewVNs.begin(),
668 NVE = NewVNs.end(); NVI != NVE; ++NVI)
669 if (NVI->second == OldVN)
670 NVI->second = MergedVN;
671 for (DenseMap<MachineBasicBlock*, VNInfo*>::iterator PI = Phis.begin(),
672 PE = Phis.end(); PI != PE; ++PI)
673 if (PI->second == OldVN)
674 PI->second = MergedVN;
675 RetVNI = MergedVN;
676 } else {
677 // Otherwise, merge the incoming VNInfos with a phi join. Create a new
678 // VNInfo to represent the joined value.
679 for (DenseMap<MachineBasicBlock*, VNInfo*>::iterator I =
680 IncomingVNs.begin(), E = IncomingVNs.end(); I != E; ++I) {
681 I->second->setHasPHIKill(true);
682 unsigned KillIndex = LIs->getMBBEndIdx(I->first);
683 if (!LiveInterval::isKill(I->second, KillIndex))
684 LI->addKill(I->second, KillIndex, false);
688 unsigned EndIndex = 0;
689 if (IsIntraBlock) {
690 EndIndex = LIs->getInstructionIndex(UseI);
691 EndIndex = LiveIntervals::getUseIndex(EndIndex);
692 } else
693 EndIndex = LIs->getMBBEndIdx(MBB);
694 LI->addRange(LiveRange(StartIndex, EndIndex+1, RetVNI));
695 if (IsIntraBlock)
696 LI->addKill(RetVNI, EndIndex, false);
698 // Memoize results so we don't have to recompute them.
699 if (!IsIntraBlock)
700 LiveOut[MBB] = RetVNI;
701 else {
702 if (!NewVNs.count(UseI))
703 NewVNs[UseI] = RetVNI;
704 Visited.insert(UseI);
707 return RetVNI;
710 /// ReconstructLiveInterval - Recompute a live interval from scratch.
711 void PreAllocSplitting::ReconstructLiveInterval(LiveInterval* LI) {
712 BumpPtrAllocator& Alloc = LIs->getVNInfoAllocator();
714 // Clear the old ranges and valnos;
715 LI->clear();
717 // Cache the uses and defs of the register
718 typedef DenseMap<MachineBasicBlock*, SmallPtrSet<MachineInstr*, 2> > RegMap;
719 RegMap Defs, Uses;
721 // Keep track of the new VNs we're creating.
722 DenseMap<MachineInstr*, VNInfo*> NewVNs;
723 SmallPtrSet<VNInfo*, 2> PhiVNs;
725 // Cache defs, and create a new VNInfo for each def.
726 for (MachineRegisterInfo::def_iterator DI = MRI->def_begin(LI->reg),
727 DE = MRI->def_end(); DI != DE; ++DI) {
728 Defs[(*DI).getParent()].insert(&*DI);
730 unsigned DefIdx = LIs->getInstructionIndex(&*DI);
731 DefIdx = LiveIntervals::getDefIndex(DefIdx);
733 assert(DI->getOpcode() != TargetInstrInfo::PHI &&
734 "Following NewVN isPHIDef flag incorrect. Fix me!");
735 VNInfo* NewVN = LI->getNextValue(DefIdx, 0, true, Alloc);
737 // If the def is a move, set the copy field.
738 unsigned SrcReg, DstReg, SrcSubIdx, DstSubIdx;
739 if (TII->isMoveInstr(*DI, SrcReg, DstReg, SrcSubIdx, DstSubIdx))
740 if (DstReg == LI->reg)
741 NewVN->setCopy(&*DI);
743 NewVNs[&*DI] = NewVN;
746 // Cache uses as a separate pass from actually processing them.
747 for (MachineRegisterInfo::use_iterator UI = MRI->use_begin(LI->reg),
748 UE = MRI->use_end(); UI != UE; ++UI)
749 Uses[(*UI).getParent()].insert(&*UI);
751 // Now, actually process every use and use a phi construction algorithm
752 // to walk from it to its reaching definitions, building VNInfos along
753 // the way.
754 DenseMap<MachineBasicBlock*, VNInfo*> LiveOut;
755 DenseMap<MachineBasicBlock*, VNInfo*> Phis;
756 SmallPtrSet<MachineInstr*, 4> Visited;
757 for (MachineRegisterInfo::use_iterator UI = MRI->use_begin(LI->reg),
758 UE = MRI->use_end(); UI != UE; ++UI) {
759 PerformPHIConstruction(&*UI, UI->getParent(), LI, Visited, Defs,
760 Uses, NewVNs, LiveOut, Phis, true, true);
763 // Add ranges for dead defs
764 for (MachineRegisterInfo::def_iterator DI = MRI->def_begin(LI->reg),
765 DE = MRI->def_end(); DI != DE; ++DI) {
766 unsigned DefIdx = LIs->getInstructionIndex(&*DI);
767 DefIdx = LiveIntervals::getDefIndex(DefIdx);
769 if (LI->liveAt(DefIdx)) continue;
771 VNInfo* DeadVN = NewVNs[&*DI];
772 LI->addRange(LiveRange(DefIdx, DefIdx+1, DeadVN));
773 LI->addKill(DeadVN, DefIdx, false);
777 /// RenumberValno - Split the given valno out into a new vreg, allowing it to
778 /// be allocated to a different register. This function creates a new vreg,
779 /// copies the valno and its live ranges over to the new vreg's interval,
780 /// removes them from the old interval, and rewrites all uses and defs of
781 /// the original reg to the new vreg within those ranges.
782 void PreAllocSplitting::RenumberValno(VNInfo* VN) {
783 SmallVector<VNInfo*, 4> Stack;
784 SmallVector<VNInfo*, 4> VNsToCopy;
785 Stack.push_back(VN);
787 // Walk through and copy the valno we care about, and any other valnos
788 // that are two-address redefinitions of the one we care about. These
789 // will need to be rewritten as well. We also check for safety of the
790 // renumbering here, by making sure that none of the valno involved has
791 // phi kills.
792 while (!Stack.empty()) {
793 VNInfo* OldVN = Stack.back();
794 Stack.pop_back();
796 // Bail out if we ever encounter a valno that has a PHI kill. We can't
797 // renumber these.
798 if (OldVN->hasPHIKill()) return;
800 VNsToCopy.push_back(OldVN);
802 // Locate two-address redefinitions
803 for (VNInfo::KillSet::iterator KI = OldVN->kills.begin(),
804 KE = OldVN->kills.end(); KI != KE; ++KI) {
805 assert(!KI->isPHIKill && "VN previously reported having no PHI kills.");
806 MachineInstr* MI = LIs->getInstructionFromIndex(KI->killIdx);
807 unsigned DefIdx = MI->findRegisterDefOperandIdx(CurrLI->reg);
808 if (DefIdx == ~0U) continue;
809 if (MI->isRegTiedToUseOperand(DefIdx)) {
810 VNInfo* NextVN =
811 CurrLI->findDefinedVNInfo(LiveIntervals::getDefIndex(KI->killIdx));
812 if (NextVN == OldVN) continue;
813 Stack.push_back(NextVN);
818 // Create the new vreg
819 unsigned NewVReg = MRI->createVirtualRegister(MRI->getRegClass(CurrLI->reg));
821 // Create the new live interval
822 LiveInterval& NewLI = LIs->getOrCreateInterval(NewVReg);
824 for (SmallVector<VNInfo*, 4>::iterator OI = VNsToCopy.begin(), OE =
825 VNsToCopy.end(); OI != OE; ++OI) {
826 VNInfo* OldVN = *OI;
828 // Copy the valno over
829 VNInfo* NewVN = NewLI.createValueCopy(OldVN, LIs->getVNInfoAllocator());
830 NewLI.MergeValueInAsValue(*CurrLI, OldVN, NewVN);
832 // Remove the valno from the old interval
833 CurrLI->removeValNo(OldVN);
836 // Rewrite defs and uses. This is done in two stages to avoid invalidating
837 // the reg_iterator.
838 SmallVector<std::pair<MachineInstr*, unsigned>, 8> OpsToChange;
840 for (MachineRegisterInfo::reg_iterator I = MRI->reg_begin(CurrLI->reg),
841 E = MRI->reg_end(); I != E; ++I) {
842 MachineOperand& MO = I.getOperand();
843 unsigned InstrIdx = LIs->getInstructionIndex(&*I);
845 if ((MO.isUse() && NewLI.liveAt(LiveIntervals::getUseIndex(InstrIdx))) ||
846 (MO.isDef() && NewLI.liveAt(LiveIntervals::getDefIndex(InstrIdx))))
847 OpsToChange.push_back(std::make_pair(&*I, I.getOperandNo()));
850 for (SmallVector<std::pair<MachineInstr*, unsigned>, 8>::iterator I =
851 OpsToChange.begin(), E = OpsToChange.end(); I != E; ++I) {
852 MachineInstr* Inst = I->first;
853 unsigned OpIdx = I->second;
854 MachineOperand& MO = Inst->getOperand(OpIdx);
855 MO.setReg(NewVReg);
858 // Grow the VirtRegMap, since we've created a new vreg.
859 VRM->grow();
861 // The renumbered vreg shares a stack slot with the old register.
862 if (IntervalSSMap.count(CurrLI->reg))
863 IntervalSSMap[NewVReg] = IntervalSSMap[CurrLI->reg];
865 NumRenumbers++;
868 bool PreAllocSplitting::Rematerialize(unsigned VReg, VNInfo* ValNo,
869 MachineInstr* DefMI,
870 MachineBasicBlock::iterator RestorePt,
871 unsigned RestoreIdx,
872 SmallPtrSet<MachineInstr*, 4>& RefsInMBB) {
873 MachineBasicBlock& MBB = *RestorePt->getParent();
875 MachineBasicBlock::iterator KillPt = BarrierMBB->end();
876 unsigned KillIdx = 0;
877 if (!ValNo->isDefAccurate() || DefMI->getParent() == BarrierMBB)
878 KillPt = findSpillPoint(BarrierMBB, Barrier, NULL, RefsInMBB, KillIdx);
879 else
880 KillPt = findNextEmptySlot(DefMI->getParent(), DefMI, KillIdx);
882 if (KillPt == DefMI->getParent()->end())
883 return false;
885 TII->reMaterialize(MBB, RestorePt, VReg, 0, DefMI);
886 LIs->InsertMachineInstrInMaps(prior(RestorePt), RestoreIdx);
888 ReconstructLiveInterval(CurrLI);
889 unsigned RematIdx = LIs->getInstructionIndex(prior(RestorePt));
890 RematIdx = LiveIntervals::getDefIndex(RematIdx);
891 RenumberValno(CurrLI->findDefinedVNInfo(RematIdx));
893 ++NumSplits;
894 ++NumRemats;
895 return true;
898 MachineInstr* PreAllocSplitting::FoldSpill(unsigned vreg,
899 const TargetRegisterClass* RC,
900 MachineInstr* DefMI,
901 MachineInstr* Barrier,
902 MachineBasicBlock* MBB,
903 int& SS,
904 SmallPtrSet<MachineInstr*, 4>& RefsInMBB) {
905 MachineBasicBlock::iterator Pt = MBB->begin();
907 // Go top down if RefsInMBB is empty.
908 if (RefsInMBB.empty())
909 return 0;
911 MachineBasicBlock::iterator FoldPt = Barrier;
912 while (&*FoldPt != DefMI && FoldPt != MBB->begin() &&
913 !RefsInMBB.count(FoldPt))
914 --FoldPt;
916 int OpIdx = FoldPt->findRegisterDefOperandIdx(vreg, false);
917 if (OpIdx == -1)
918 return 0;
920 SmallVector<unsigned, 1> Ops;
921 Ops.push_back(OpIdx);
923 if (!TII->canFoldMemoryOperand(FoldPt, Ops))
924 return 0;
926 DenseMap<unsigned, int>::iterator I = IntervalSSMap.find(vreg);
927 if (I != IntervalSSMap.end()) {
928 SS = I->second;
929 } else {
930 SS = MFI->CreateStackObject(RC->getSize(), RC->getAlignment());
933 MachineInstr* FMI = TII->foldMemoryOperand(*MBB->getParent(),
934 FoldPt, Ops, SS);
936 if (FMI) {
937 LIs->ReplaceMachineInstrInMaps(FoldPt, FMI);
938 FMI = MBB->insert(MBB->erase(FoldPt), FMI);
939 ++NumFolds;
941 IntervalSSMap[vreg] = SS;
942 CurrSLI = &LSs->getOrCreateInterval(SS, RC);
943 if (CurrSLI->hasAtLeastOneValue())
944 CurrSValNo = CurrSLI->getValNumInfo(0);
945 else
946 CurrSValNo = CurrSLI->getNextValue(0, 0, false, LSs->getVNInfoAllocator());
949 return FMI;
952 MachineInstr* PreAllocSplitting::FoldRestore(unsigned vreg,
953 const TargetRegisterClass* RC,
954 MachineInstr* Barrier,
955 MachineBasicBlock* MBB,
956 int SS,
957 SmallPtrSet<MachineInstr*, 4>& RefsInMBB) {
958 if ((int)RestoreFoldLimit != -1 && RestoreFoldLimit == (int)NumRestoreFolds)
959 return 0;
961 // Go top down if RefsInMBB is empty.
962 if (RefsInMBB.empty())
963 return 0;
965 // Can't fold a restore between a call stack setup and teardown.
966 MachineBasicBlock::iterator FoldPt = Barrier;
968 // Advance from barrier to call frame teardown.
969 while (FoldPt != MBB->getFirstTerminator() &&
970 FoldPt->getOpcode() != TRI->getCallFrameDestroyOpcode()) {
971 if (RefsInMBB.count(FoldPt))
972 return 0;
974 ++FoldPt;
977 if (FoldPt == MBB->getFirstTerminator())
978 return 0;
979 else
980 ++FoldPt;
982 // Now find the restore point.
983 while (FoldPt != MBB->getFirstTerminator() && !RefsInMBB.count(FoldPt)) {
984 if (FoldPt->getOpcode() == TRI->getCallFrameSetupOpcode()) {
985 while (FoldPt != MBB->getFirstTerminator() &&
986 FoldPt->getOpcode() != TRI->getCallFrameDestroyOpcode()) {
987 if (RefsInMBB.count(FoldPt))
988 return 0;
990 ++FoldPt;
993 if (FoldPt == MBB->getFirstTerminator())
994 return 0;
997 ++FoldPt;
1000 if (FoldPt == MBB->getFirstTerminator())
1001 return 0;
1003 int OpIdx = FoldPt->findRegisterUseOperandIdx(vreg, true);
1004 if (OpIdx == -1)
1005 return 0;
1007 SmallVector<unsigned, 1> Ops;
1008 Ops.push_back(OpIdx);
1010 if (!TII->canFoldMemoryOperand(FoldPt, Ops))
1011 return 0;
1013 MachineInstr* FMI = TII->foldMemoryOperand(*MBB->getParent(),
1014 FoldPt, Ops, SS);
1016 if (FMI) {
1017 LIs->ReplaceMachineInstrInMaps(FoldPt, FMI);
1018 FMI = MBB->insert(MBB->erase(FoldPt), FMI);
1019 ++NumRestoreFolds;
1022 return FMI;
1025 /// SplitRegLiveInterval - Split (spill and restore) the given live interval
1026 /// so it would not cross the barrier that's being processed. Shrink wrap
1027 /// (minimize) the live interval to the last uses.
1028 bool PreAllocSplitting::SplitRegLiveInterval(LiveInterval *LI) {
1029 CurrLI = LI;
1031 // Find live range where current interval cross the barrier.
1032 LiveInterval::iterator LR =
1033 CurrLI->FindLiveRangeContaining(LIs->getUseIndex(BarrierIdx));
1034 VNInfo *ValNo = LR->valno;
1036 assert(!ValNo->isUnused() && "Val# is defined by a dead def?");
1038 MachineInstr *DefMI = ValNo->isDefAccurate()
1039 ? LIs->getInstructionFromIndex(ValNo->def) : NULL;
1041 // If this would create a new join point, do not split.
1042 if (DefMI && createsNewJoin(LR, DefMI->getParent(), Barrier->getParent()))
1043 return false;
1045 // Find all references in the barrier mbb.
1046 SmallPtrSet<MachineInstr*, 4> RefsInMBB;
1047 for (MachineRegisterInfo::reg_iterator I = MRI->reg_begin(CurrLI->reg),
1048 E = MRI->reg_end(); I != E; ++I) {
1049 MachineInstr *RefMI = &*I;
1050 if (RefMI->getParent() == BarrierMBB)
1051 RefsInMBB.insert(RefMI);
1054 // Find a point to restore the value after the barrier.
1055 unsigned RestoreIndex = 0;
1056 MachineBasicBlock::iterator RestorePt =
1057 findRestorePoint(BarrierMBB, Barrier, LR->end, RefsInMBB, RestoreIndex);
1058 if (RestorePt == BarrierMBB->end())
1059 return false;
1061 if (DefMI && LIs->isReMaterializable(*LI, ValNo, DefMI))
1062 if (Rematerialize(LI->reg, ValNo, DefMI, RestorePt,
1063 RestoreIndex, RefsInMBB))
1064 return true;
1066 // Add a spill either before the barrier or after the definition.
1067 MachineBasicBlock *DefMBB = DefMI ? DefMI->getParent() : NULL;
1068 const TargetRegisterClass *RC = MRI->getRegClass(CurrLI->reg);
1069 unsigned SpillIndex = 0;
1070 MachineInstr *SpillMI = NULL;
1071 int SS = -1;
1072 if (!ValNo->isDefAccurate()) {
1073 // If we don't know where the def is we must split just before the barrier.
1074 if ((SpillMI = FoldSpill(LI->reg, RC, 0, Barrier,
1075 BarrierMBB, SS, RefsInMBB))) {
1076 SpillIndex = LIs->getInstructionIndex(SpillMI);
1077 } else {
1078 MachineBasicBlock::iterator SpillPt =
1079 findSpillPoint(BarrierMBB, Barrier, NULL, RefsInMBB, SpillIndex);
1080 if (SpillPt == BarrierMBB->begin())
1081 return false; // No gap to insert spill.
1082 // Add spill.
1084 SS = CreateSpillStackSlot(CurrLI->reg, RC);
1085 TII->storeRegToStackSlot(*BarrierMBB, SpillPt, CurrLI->reg, true, SS, RC);
1086 SpillMI = prior(SpillPt);
1087 LIs->InsertMachineInstrInMaps(SpillMI, SpillIndex);
1089 } else if (!IsAvailableInStack(DefMBB, CurrLI->reg, ValNo->def,
1090 RestoreIndex, SpillIndex, SS)) {
1091 // If it's already split, just restore the value. There is no need to spill
1092 // the def again.
1093 if (!DefMI)
1094 return false; // Def is dead. Do nothing.
1096 if ((SpillMI = FoldSpill(LI->reg, RC, DefMI, Barrier,
1097 BarrierMBB, SS, RefsInMBB))) {
1098 SpillIndex = LIs->getInstructionIndex(SpillMI);
1099 } else {
1100 // Check if it's possible to insert a spill after the def MI.
1101 MachineBasicBlock::iterator SpillPt;
1102 if (DefMBB == BarrierMBB) {
1103 // Add spill after the def and the last use before the barrier.
1104 SpillPt = findSpillPoint(BarrierMBB, Barrier, DefMI,
1105 RefsInMBB, SpillIndex);
1106 if (SpillPt == DefMBB->begin())
1107 return false; // No gap to insert spill.
1108 } else {
1109 SpillPt = findNextEmptySlot(DefMBB, DefMI, SpillIndex);
1110 if (SpillPt == DefMBB->end())
1111 return false; // No gap to insert spill.
1113 // Add spill. The store instruction kills the register if def is before
1114 // the barrier in the barrier block.
1115 SS = CreateSpillStackSlot(CurrLI->reg, RC);
1116 TII->storeRegToStackSlot(*DefMBB, SpillPt, CurrLI->reg,
1117 DefMBB == BarrierMBB, SS, RC);
1118 SpillMI = prior(SpillPt);
1119 LIs->InsertMachineInstrInMaps(SpillMI, SpillIndex);
1123 // Remember def instruction index to spill index mapping.
1124 if (DefMI && SpillMI)
1125 Def2SpillMap[ValNo->def] = SpillIndex;
1127 // Add restore.
1128 bool FoldedRestore = false;
1129 if (MachineInstr* LMI = FoldRestore(CurrLI->reg, RC, Barrier,
1130 BarrierMBB, SS, RefsInMBB)) {
1131 RestorePt = LMI;
1132 RestoreIndex = LIs->getInstructionIndex(RestorePt);
1133 FoldedRestore = true;
1134 } else {
1135 TII->loadRegFromStackSlot(*BarrierMBB, RestorePt, CurrLI->reg, SS, RC);
1136 MachineInstr *LoadMI = prior(RestorePt);
1137 LIs->InsertMachineInstrInMaps(LoadMI, RestoreIndex);
1140 // Update spill stack slot live interval.
1141 UpdateSpillSlotInterval(ValNo, LIs->getUseIndex(SpillIndex)+1,
1142 LIs->getDefIndex(RestoreIndex));
1144 ReconstructLiveInterval(CurrLI);
1146 if (!FoldedRestore) {
1147 unsigned RestoreIdx = LIs->getInstructionIndex(prior(RestorePt));
1148 RestoreIdx = LiveIntervals::getDefIndex(RestoreIdx);
1149 RenumberValno(CurrLI->findDefinedVNInfo(RestoreIdx));
1152 ++NumSplits;
1153 return true;
1156 /// SplitRegLiveIntervals - Split all register live intervals that cross the
1157 /// barrier that's being processed.
1158 bool
1159 PreAllocSplitting::SplitRegLiveIntervals(const TargetRegisterClass **RCs,
1160 SmallPtrSet<LiveInterval*, 8>& Split) {
1161 // First find all the virtual registers whose live intervals are intercepted
1162 // by the current barrier.
1163 SmallVector<LiveInterval*, 8> Intervals;
1164 for (const TargetRegisterClass **RC = RCs; *RC; ++RC) {
1165 // FIXME: If it's not safe to move any instruction that defines the barrier
1166 // register class, then it means there are some special dependencies which
1167 // codegen is not modelling. Ignore these barriers for now.
1168 if (!TII->isSafeToMoveRegClassDefs(*RC))
1169 continue;
1170 std::vector<unsigned> &VRs = MRI->getRegClassVirtRegs(*RC);
1171 for (unsigned i = 0, e = VRs.size(); i != e; ++i) {
1172 unsigned Reg = VRs[i];
1173 if (!LIs->hasInterval(Reg))
1174 continue;
1175 LiveInterval *LI = &LIs->getInterval(Reg);
1176 if (LI->liveAt(BarrierIdx) && !Barrier->readsRegister(Reg))
1177 // Virtual register live interval is intercepted by the barrier. We
1178 // should split and shrink wrap its interval if possible.
1179 Intervals.push_back(LI);
1183 // Process the affected live intervals.
1184 bool Change = false;
1185 while (!Intervals.empty()) {
1186 if (PreSplitLimit != -1 && (int)NumSplits == PreSplitLimit)
1187 break;
1188 else if (NumSplits == 4)
1189 Change |= Change;
1190 LiveInterval *LI = Intervals.back();
1191 Intervals.pop_back();
1192 bool result = SplitRegLiveInterval(LI);
1193 if (result) Split.insert(LI);
1194 Change |= result;
1197 return Change;
1200 unsigned PreAllocSplitting::getNumberOfNonSpills(
1201 SmallPtrSet<MachineInstr*, 4>& MIs,
1202 unsigned Reg, int FrameIndex,
1203 bool& FeedsTwoAddr) {
1204 unsigned NonSpills = 0;
1205 for (SmallPtrSet<MachineInstr*, 4>::iterator UI = MIs.begin(), UE = MIs.end();
1206 UI != UE; ++UI) {
1207 int StoreFrameIndex;
1208 unsigned StoreVReg = TII->isStoreToStackSlot(*UI, StoreFrameIndex);
1209 if (StoreVReg != Reg || StoreFrameIndex != FrameIndex)
1210 NonSpills++;
1212 int DefIdx = (*UI)->findRegisterDefOperandIdx(Reg);
1213 if (DefIdx != -1 && (*UI)->isRegTiedToUseOperand(DefIdx))
1214 FeedsTwoAddr = true;
1217 return NonSpills;
1220 /// removeDeadSpills - After doing splitting, filter through all intervals we've
1221 /// split, and see if any of the spills are unnecessary. If so, remove them.
1222 bool PreAllocSplitting::removeDeadSpills(SmallPtrSet<LiveInterval*, 8>& split) {
1223 bool changed = false;
1225 // Walk over all of the live intervals that were touched by the splitter,
1226 // and see if we can do any DCE and/or folding.
1227 for (SmallPtrSet<LiveInterval*, 8>::iterator LI = split.begin(),
1228 LE = split.end(); LI != LE; ++LI) {
1229 DenseMap<VNInfo*, SmallPtrSet<MachineInstr*, 4> > VNUseCount;
1231 // First, collect all the uses of the vreg, and sort them by their
1232 // reaching definition (VNInfo).
1233 for (MachineRegisterInfo::use_iterator UI = MRI->use_begin((*LI)->reg),
1234 UE = MRI->use_end(); UI != UE; ++UI) {
1235 unsigned index = LIs->getInstructionIndex(&*UI);
1236 index = LiveIntervals::getUseIndex(index);
1238 const LiveRange* LR = (*LI)->getLiveRangeContaining(index);
1239 VNUseCount[LR->valno].insert(&*UI);
1242 // Now, take the definitions (VNInfo's) one at a time and try to DCE
1243 // and/or fold them away.
1244 for (LiveInterval::vni_iterator VI = (*LI)->vni_begin(),
1245 VE = (*LI)->vni_end(); VI != VE; ++VI) {
1247 if (DeadSplitLimit != -1 && (int)NumDeadSpills == DeadSplitLimit)
1248 return changed;
1250 VNInfo* CurrVN = *VI;
1252 // We don't currently try to handle definitions with PHI kills, because
1253 // it would involve processing more than one VNInfo at once.
1254 if (CurrVN->hasPHIKill()) continue;
1256 // We also don't try to handle the results of PHI joins, since there's
1257 // no defining instruction to analyze.
1258 if (!CurrVN->isDefAccurate() || CurrVN->isUnused()) continue;
1260 // We're only interested in eliminating cruft introduced by the splitter,
1261 // is of the form load-use or load-use-store. First, check that the
1262 // definition is a load, and remember what stack slot we loaded it from.
1263 MachineInstr* DefMI = LIs->getInstructionFromIndex(CurrVN->def);
1264 int FrameIndex;
1265 if (!TII->isLoadFromStackSlot(DefMI, FrameIndex)) continue;
1267 // If the definition has no uses at all, just DCE it.
1268 if (VNUseCount[CurrVN].size() == 0) {
1269 LIs->RemoveMachineInstrFromMaps(DefMI);
1270 (*LI)->removeValNo(CurrVN);
1271 DefMI->eraseFromParent();
1272 VNUseCount.erase(CurrVN);
1273 NumDeadSpills++;
1274 changed = true;
1275 continue;
1278 // Second, get the number of non-store uses of the definition, as well as
1279 // a flag indicating whether it feeds into a later two-address definition.
1280 bool FeedsTwoAddr = false;
1281 unsigned NonSpillCount = getNumberOfNonSpills(VNUseCount[CurrVN],
1282 (*LI)->reg, FrameIndex,
1283 FeedsTwoAddr);
1285 // If there's one non-store use and it doesn't feed a two-addr, then
1286 // this is a load-use-store case that we can try to fold.
1287 if (NonSpillCount == 1 && !FeedsTwoAddr) {
1288 // Start by finding the non-store use MachineInstr.
1289 SmallPtrSet<MachineInstr*, 4>::iterator UI = VNUseCount[CurrVN].begin();
1290 int StoreFrameIndex;
1291 unsigned StoreVReg = TII->isStoreToStackSlot(*UI, StoreFrameIndex);
1292 while (UI != VNUseCount[CurrVN].end() &&
1293 (StoreVReg == (*LI)->reg && StoreFrameIndex == FrameIndex)) {
1294 ++UI;
1295 if (UI != VNUseCount[CurrVN].end())
1296 StoreVReg = TII->isStoreToStackSlot(*UI, StoreFrameIndex);
1298 if (UI == VNUseCount[CurrVN].end()) continue;
1300 MachineInstr* use = *UI;
1302 // Attempt to fold it away!
1303 int OpIdx = use->findRegisterUseOperandIdx((*LI)->reg, false);
1304 if (OpIdx == -1) continue;
1305 SmallVector<unsigned, 1> Ops;
1306 Ops.push_back(OpIdx);
1307 if (!TII->canFoldMemoryOperand(use, Ops)) continue;
1309 MachineInstr* NewMI =
1310 TII->foldMemoryOperand(*use->getParent()->getParent(),
1311 use, Ops, FrameIndex);
1313 if (!NewMI) continue;
1315 // Update relevant analyses.
1316 LIs->RemoveMachineInstrFromMaps(DefMI);
1317 LIs->ReplaceMachineInstrInMaps(use, NewMI);
1318 (*LI)->removeValNo(CurrVN);
1320 DefMI->eraseFromParent();
1321 MachineBasicBlock* MBB = use->getParent();
1322 NewMI = MBB->insert(MBB->erase(use), NewMI);
1323 VNUseCount[CurrVN].erase(use);
1325 // Remove deleted instructions. Note that we need to remove them from
1326 // the VNInfo->use map as well, just to be safe.
1327 for (SmallPtrSet<MachineInstr*, 4>::iterator II =
1328 VNUseCount[CurrVN].begin(), IE = VNUseCount[CurrVN].end();
1329 II != IE; ++II) {
1330 for (DenseMap<VNInfo*, SmallPtrSet<MachineInstr*, 4> >::iterator
1331 VNI = VNUseCount.begin(), VNE = VNUseCount.end(); VNI != VNE;
1332 ++VNI)
1333 if (VNI->first != CurrVN)
1334 VNI->second.erase(*II);
1335 LIs->RemoveMachineInstrFromMaps(*II);
1336 (*II)->eraseFromParent();
1339 VNUseCount.erase(CurrVN);
1341 for (DenseMap<VNInfo*, SmallPtrSet<MachineInstr*, 4> >::iterator
1342 VI = VNUseCount.begin(), VE = VNUseCount.end(); VI != VE; ++VI)
1343 if (VI->second.erase(use))
1344 VI->second.insert(NewMI);
1346 NumDeadSpills++;
1347 changed = true;
1348 continue;
1351 // If there's more than one non-store instruction, we can't profitably
1352 // fold it, so bail.
1353 if (NonSpillCount) continue;
1355 // Otherwise, this is a load-store case, so DCE them.
1356 for (SmallPtrSet<MachineInstr*, 4>::iterator UI =
1357 VNUseCount[CurrVN].begin(), UE = VNUseCount[CurrVN].end();
1358 UI != UI; ++UI) {
1359 LIs->RemoveMachineInstrFromMaps(*UI);
1360 (*UI)->eraseFromParent();
1363 VNUseCount.erase(CurrVN);
1365 LIs->RemoveMachineInstrFromMaps(DefMI);
1366 (*LI)->removeValNo(CurrVN);
1367 DefMI->eraseFromParent();
1368 NumDeadSpills++;
1369 changed = true;
1373 return changed;
1376 bool PreAllocSplitting::createsNewJoin(LiveRange* LR,
1377 MachineBasicBlock* DefMBB,
1378 MachineBasicBlock* BarrierMBB) {
1379 if (DefMBB == BarrierMBB)
1380 return false;
1382 if (LR->valno->hasPHIKill())
1383 return false;
1385 unsigned MBBEnd = LIs->getMBBEndIdx(BarrierMBB);
1386 if (LR->end < MBBEnd)
1387 return false;
1389 MachineLoopInfo& MLI = getAnalysis<MachineLoopInfo>();
1390 if (MLI.getLoopFor(DefMBB) != MLI.getLoopFor(BarrierMBB))
1391 return true;
1393 MachineDominatorTree& MDT = getAnalysis<MachineDominatorTree>();
1394 SmallPtrSet<MachineBasicBlock*, 4> Visited;
1395 typedef std::pair<MachineBasicBlock*,
1396 MachineBasicBlock::succ_iterator> ItPair;
1397 SmallVector<ItPair, 4> Stack;
1398 Stack.push_back(std::make_pair(BarrierMBB, BarrierMBB->succ_begin()));
1400 while (!Stack.empty()) {
1401 ItPair P = Stack.back();
1402 Stack.pop_back();
1404 MachineBasicBlock* PredMBB = P.first;
1405 MachineBasicBlock::succ_iterator S = P.second;
1407 if (S == PredMBB->succ_end())
1408 continue;
1409 else if (Visited.count(*S)) {
1410 Stack.push_back(std::make_pair(PredMBB, ++S));
1411 continue;
1412 } else
1413 Stack.push_back(std::make_pair(PredMBB, S+1));
1415 MachineBasicBlock* MBB = *S;
1416 Visited.insert(MBB);
1418 if (MBB == BarrierMBB)
1419 return true;
1421 MachineDomTreeNode* DefMDTN = MDT.getNode(DefMBB);
1422 MachineDomTreeNode* BarrierMDTN = MDT.getNode(BarrierMBB);
1423 MachineDomTreeNode* MDTN = MDT.getNode(MBB)->getIDom();
1424 while (MDTN) {
1425 if (MDTN == DefMDTN)
1426 return true;
1427 else if (MDTN == BarrierMDTN)
1428 break;
1429 MDTN = MDTN->getIDom();
1432 MBBEnd = LIs->getMBBEndIdx(MBB);
1433 if (LR->end > MBBEnd)
1434 Stack.push_back(std::make_pair(MBB, MBB->succ_begin()));
1437 return false;
1441 bool PreAllocSplitting::runOnMachineFunction(MachineFunction &MF) {
1442 CurrMF = &MF;
1443 TM = &MF.getTarget();
1444 TRI = TM->getRegisterInfo();
1445 TII = TM->getInstrInfo();
1446 MFI = MF.getFrameInfo();
1447 MRI = &MF.getRegInfo();
1448 LIs = &getAnalysis<LiveIntervals>();
1449 LSs = &getAnalysis<LiveStacks>();
1450 VRM = &getAnalysis<VirtRegMap>();
1452 bool MadeChange = false;
1454 // Make sure blocks are numbered in order.
1455 MF.RenumberBlocks();
1457 MachineBasicBlock *Entry = MF.begin();
1458 SmallPtrSet<MachineBasicBlock*,16> Visited;
1460 SmallPtrSet<LiveInterval*, 8> Split;
1462 for (df_ext_iterator<MachineBasicBlock*, SmallPtrSet<MachineBasicBlock*,16> >
1463 DFI = df_ext_begin(Entry, Visited), E = df_ext_end(Entry, Visited);
1464 DFI != E; ++DFI) {
1465 BarrierMBB = *DFI;
1466 for (MachineBasicBlock::iterator I = BarrierMBB->begin(),
1467 E = BarrierMBB->end(); I != E; ++I) {
1468 Barrier = &*I;
1469 const TargetRegisterClass **BarrierRCs =
1470 Barrier->getDesc().getRegClassBarriers();
1471 if (!BarrierRCs)
1472 continue;
1473 BarrierIdx = LIs->getInstructionIndex(Barrier);
1474 MadeChange |= SplitRegLiveIntervals(BarrierRCs, Split);
1478 MadeChange |= removeDeadSpills(Split);
1480 return MadeChange;