1 //===-- LiveIntervalAnalysis.cpp - Live Interval Analysis -----------------===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This file implements the LiveInterval analysis pass which is used
11 // by the Linear Scan Register allocator. This pass linearizes the
12 // basic blocks of the function in DFS order and uses the
13 // LiveVariables pass to conservatively compute live intervals for
14 // each virtual and physical register.
16 //===----------------------------------------------------------------------===//
18 #define DEBUG_TYPE "liveintervals"
19 #include "llvm/CodeGen/LiveIntervalAnalysis.h"
20 #include "VirtRegMap.h"
21 #include "llvm/Value.h"
22 #include "llvm/Analysis/AliasAnalysis.h"
23 #include "llvm/CodeGen/LiveVariables.h"
24 #include "llvm/CodeGen/MachineFrameInfo.h"
25 #include "llvm/CodeGen/MachineInstr.h"
26 #include "llvm/CodeGen/MachineInstrBuilder.h"
27 #include "llvm/CodeGen/MachineLoopInfo.h"
28 #include "llvm/CodeGen/MachineRegisterInfo.h"
29 #include "llvm/CodeGen/Passes.h"
30 #include "llvm/CodeGen/PseudoSourceValue.h"
31 #include "llvm/Target/TargetRegisterInfo.h"
32 #include "llvm/Target/TargetInstrInfo.h"
33 #include "llvm/Target/TargetMachine.h"
34 #include "llvm/Target/TargetOptions.h"
35 #include "llvm/Support/CommandLine.h"
36 #include "llvm/Support/Debug.h"
37 #include "llvm/Support/ErrorHandling.h"
38 #include "llvm/Support/raw_ostream.h"
39 #include "llvm/ADT/DepthFirstIterator.h"
40 #include "llvm/ADT/SmallSet.h"
41 #include "llvm/ADT/Statistic.h"
42 #include "llvm/ADT/STLExtras.h"
48 // Hidden options for help debugging.
49 static cl::opt
<bool> DisableReMat("disable-rematerialization",
50 cl::init(false), cl::Hidden
);
52 static cl::opt
<bool> SplitAtBB("split-intervals-at-bb",
53 cl::init(true), cl::Hidden
);
54 static cl::opt
<int> SplitLimit("split-limit",
55 cl::init(-1), cl::Hidden
);
57 static cl::opt
<bool> EnableAggressiveRemat("aggressive-remat", cl::Hidden
);
59 static cl::opt
<bool> EnableFastSpilling("fast-spill",
60 cl::init(false), cl::Hidden
);
62 STATISTIC(numIntervals
, "Number of original intervals");
63 STATISTIC(numFolds
, "Number of loads/stores folded into instructions");
64 STATISTIC(numSplits
, "Number of intervals split");
66 char LiveIntervals::ID
= 0;
67 static RegisterPass
<LiveIntervals
> X("liveintervals", "Live Interval Analysis");
69 void LiveIntervals::getAnalysisUsage(AnalysisUsage
&AU
) const {
71 AU
.addRequired
<AliasAnalysis
>();
72 AU
.addPreserved
<AliasAnalysis
>();
73 AU
.addPreserved
<LiveVariables
>();
74 AU
.addRequired
<LiveVariables
>();
75 AU
.addPreservedID(MachineLoopInfoID
);
76 AU
.addPreservedID(MachineDominatorsID
);
79 AU
.addPreservedID(PHIEliminationID
);
80 AU
.addRequiredID(PHIEliminationID
);
83 AU
.addRequiredID(TwoAddressInstructionPassID
);
84 MachineFunctionPass::getAnalysisUsage(AU
);
87 void LiveIntervals::releaseMemory() {
88 // Free the live intervals themselves.
89 for (DenseMap
<unsigned, LiveInterval
*>::iterator I
= r2iMap_
.begin(),
90 E
= r2iMap_
.end(); I
!= E
; ++I
)
98 terminatorGaps
.clear();
100 // Release VNInfo memroy regions after all VNInfo objects are dtor'd.
101 VNInfoAllocator
.Reset();
102 while (!ClonedMIs
.empty()) {
103 MachineInstr
*MI
= ClonedMIs
.back();
104 ClonedMIs
.pop_back();
105 mf_
->DeleteMachineInstr(MI
);
109 static bool CanTurnIntoImplicitDef(MachineInstr
*MI
, unsigned Reg
,
110 const TargetInstrInfo
*tii_
) {
111 unsigned SrcReg
, DstReg
, SrcSubReg
, DstSubReg
;
112 if (tii_
->isMoveInstr(*MI
, SrcReg
, DstReg
, SrcSubReg
, DstSubReg
) &&
116 if ((MI
->getOpcode() == TargetInstrInfo::INSERT_SUBREG
||
117 MI
->getOpcode() == TargetInstrInfo::SUBREG_TO_REG
) &&
118 MI
->getOperand(2).getReg() == Reg
)
120 if (MI
->getOpcode() == TargetInstrInfo::EXTRACT_SUBREG
&&
121 MI
->getOperand(1).getReg() == Reg
)
126 /// processImplicitDefs - Process IMPLICIT_DEF instructions and make sure
127 /// there is one implicit_def for each use. Add isUndef marker to
128 /// implicit_def defs and their uses.
129 void LiveIntervals::processImplicitDefs() {
130 SmallSet
<unsigned, 8> ImpDefRegs
;
131 SmallVector
<MachineInstr
*, 8> ImpDefMIs
;
132 MachineBasicBlock
*Entry
= mf_
->begin();
133 SmallPtrSet
<MachineBasicBlock
*,16> Visited
;
134 for (df_ext_iterator
<MachineBasicBlock
*, SmallPtrSet
<MachineBasicBlock
*,16> >
135 DFI
= df_ext_begin(Entry
, Visited
), E
= df_ext_end(Entry
, Visited
);
137 MachineBasicBlock
*MBB
= *DFI
;
138 for (MachineBasicBlock::iterator I
= MBB
->begin(), E
= MBB
->end();
140 MachineInstr
*MI
= &*I
;
142 if (MI
->getOpcode() == TargetInstrInfo::IMPLICIT_DEF
) {
143 unsigned Reg
= MI
->getOperand(0).getReg();
144 ImpDefRegs
.insert(Reg
);
145 ImpDefMIs
.push_back(MI
);
149 bool ChangedToImpDef
= false;
150 for (unsigned i
= 0, e
= MI
->getNumOperands(); i
!= e
; ++i
) {
151 MachineOperand
& MO
= MI
->getOperand(i
);
152 if (!MO
.isReg() || !MO
.isUse() || MO
.isUndef())
154 unsigned Reg
= MO
.getReg();
157 if (!ImpDefRegs
.count(Reg
))
159 // Use is a copy, just turn it into an implicit_def.
160 if (CanTurnIntoImplicitDef(MI
, Reg
, tii_
)) {
161 bool isKill
= MO
.isKill();
162 MI
->setDesc(tii_
->get(TargetInstrInfo::IMPLICIT_DEF
));
163 for (int j
= MI
->getNumOperands() - 1, ee
= 0; j
> ee
; --j
)
164 MI
->RemoveOperand(j
);
166 ImpDefRegs
.erase(Reg
);
167 ChangedToImpDef
= true;
172 if (MO
.isKill() || MI
->isRegTiedToDefOperand(i
)) {
173 // Make sure other uses of
174 for (unsigned j
= i
+1; j
!= e
; ++j
) {
175 MachineOperand
&MOJ
= MI
->getOperand(j
);
176 if (MOJ
.isReg() && MOJ
.isUse() && MOJ
.getReg() == Reg
)
179 ImpDefRegs
.erase(Reg
);
183 if (ChangedToImpDef
) {
184 // Backtrack to process this new implicit_def.
187 for (unsigned i
= 0; i
!= MI
->getNumOperands(); ++i
) {
188 MachineOperand
& MO
= MI
->getOperand(i
);
189 if (!MO
.isReg() || !MO
.isDef())
191 ImpDefRegs
.erase(MO
.getReg());
196 // Any outstanding liveout implicit_def's?
197 for (unsigned i
= 0, e
= ImpDefMIs
.size(); i
!= e
; ++i
) {
198 MachineInstr
*MI
= ImpDefMIs
[i
];
199 unsigned Reg
= MI
->getOperand(0).getReg();
200 if (TargetRegisterInfo::isPhysicalRegister(Reg
) ||
201 !ImpDefRegs
.count(Reg
)) {
202 // Delete all "local" implicit_def's. That include those which define
203 // physical registers since they cannot be liveout.
204 MI
->eraseFromParent();
208 // If there are multiple defs of the same register and at least one
209 // is not an implicit_def, do not insert implicit_def's before the
212 for (MachineRegisterInfo::def_iterator DI
= mri_
->def_begin(Reg
),
213 DE
= mri_
->def_end(); DI
!= DE
; ++DI
) {
214 if (DI
->getOpcode() != TargetInstrInfo::IMPLICIT_DEF
) {
222 // The only implicit_def which we want to keep are those that are live
224 MI
->eraseFromParent();
226 for (MachineRegisterInfo::use_iterator UI
= mri_
->use_begin(Reg
),
227 UE
= mri_
->use_end(); UI
!= UE
; ) {
228 MachineOperand
&RMO
= UI
.getOperand();
229 MachineInstr
*RMI
= &*UI
;
231 MachineBasicBlock
*RMBB
= RMI
->getParent();
235 // Turn a copy use into an implicit_def.
236 unsigned SrcReg
, DstReg
, SrcSubReg
, DstSubReg
;
237 if (tii_
->isMoveInstr(*RMI
, SrcReg
, DstReg
, SrcSubReg
, DstSubReg
) &&
239 RMI
->setDesc(tii_
->get(TargetInstrInfo::IMPLICIT_DEF
));
240 for (int j
= RMI
->getNumOperands() - 1, ee
= 0; j
> ee
; --j
)
241 RMI
->RemoveOperand(j
);
245 const TargetRegisterClass
* RC
= mri_
->getRegClass(Reg
);
246 unsigned NewVReg
= mri_
->createVirtualRegister(RC
);
257 void LiveIntervals::computeNumbering() {
258 Index2MiMap OldI2MI
= i2miMap_
;
259 std::vector
<IdxMBBPair
> OldI2MBB
= Idx2MBBMap
;
265 terminatorGaps
.clear();
269 // Number MachineInstrs and MachineBasicBlocks.
270 // Initialize MBB indexes to a sentinal.
271 MBB2IdxMap
.resize(mf_
->getNumBlockIDs(), std::make_pair(~0U,~0U));
273 unsigned MIIndex
= 0;
274 for (MachineFunction::iterator MBB
= mf_
->begin(), E
= mf_
->end();
276 unsigned StartIdx
= MIIndex
;
278 // Insert an empty slot at the beginning of each block.
279 MIIndex
+= InstrSlots::NUM
;
280 i2miMap_
.push_back(0);
282 for (MachineBasicBlock::iterator I
= MBB
->begin(), E
= MBB
->end();
285 if (I
== MBB
->getFirstTerminator()) {
286 // Leave a gap for before terminators, this is where we will point
289 terminatorGaps
.insert(std::make_pair(&*MBB
, MIIndex
)).second
;
291 "Multiple 'first' terminators encountered during numbering.");
292 inserted
= inserted
; // Avoid compiler warning if assertions turned off.
293 i2miMap_
.push_back(0);
295 MIIndex
+= InstrSlots::NUM
;
298 bool inserted
= mi2iMap_
.insert(std::make_pair(I
, MIIndex
)).second
;
299 assert(inserted
&& "multiple MachineInstr -> index mappings");
301 i2miMap_
.push_back(I
);
302 MIIndex
+= InstrSlots::NUM
;
305 // Insert max(1, numdefs) empty slots after every instruction.
306 unsigned Slots
= I
->getDesc().getNumDefs();
309 MIIndex
+= InstrSlots::NUM
* Slots
;
311 i2miMap_
.push_back(0);
314 if (MBB
->getFirstTerminator() == MBB
->end()) {
315 // Leave a gap for before terminators, this is where we will point
318 terminatorGaps
.insert(std::make_pair(&*MBB
, MIIndex
)).second
;
320 "Multiple 'first' terminators encountered during numbering.");
321 inserted
= inserted
; // Avoid compiler warning if assertions turned off.
322 i2miMap_
.push_back(0);
324 MIIndex
+= InstrSlots::NUM
;
327 // Set the MBB2IdxMap entry for this MBB.
328 MBB2IdxMap
[MBB
->getNumber()] = std::make_pair(StartIdx
, MIIndex
- 1);
329 Idx2MBBMap
.push_back(std::make_pair(StartIdx
, MBB
));
332 std::sort(Idx2MBBMap
.begin(), Idx2MBBMap
.end(), Idx2MBBCompare());
334 if (!OldI2MI
.empty())
335 for (iterator OI
= begin(), OE
= end(); OI
!= OE
; ++OI
) {
336 for (LiveInterval::iterator LI
= OI
->second
->begin(),
337 LE
= OI
->second
->end(); LI
!= LE
; ++LI
) {
339 // Remap the start index of the live range to the corresponding new
340 // number, or our best guess at what it _should_ correspond to if the
341 // original instruction has been erased. This is either the following
342 // instruction or its predecessor.
343 unsigned index
= LI
->start
/ InstrSlots::NUM
;
344 unsigned offset
= LI
->start
% InstrSlots::NUM
;
345 if (offset
== InstrSlots::LOAD
) {
346 std::vector
<IdxMBBPair
>::const_iterator I
=
347 std::lower_bound(OldI2MBB
.begin(), OldI2MBB
.end(), LI
->start
);
348 // Take the pair containing the index
349 std::vector
<IdxMBBPair
>::const_iterator J
=
350 (I
== OldI2MBB
.end() && OldI2MBB
.size()>0) ? (I
-1): I
;
352 LI
->start
= getMBBStartIdx(J
->second
);
354 LI
->start
= mi2iMap_
[OldI2MI
[index
]] + offset
;
357 // Remap the ending index in the same way that we remapped the start,
358 // except for the final step where we always map to the immediately
359 // following instruction.
360 index
= (LI
->end
- 1) / InstrSlots::NUM
;
361 offset
= LI
->end
% InstrSlots::NUM
;
362 if (offset
== InstrSlots::LOAD
) {
363 // VReg dies at end of block.
364 std::vector
<IdxMBBPair
>::const_iterator I
=
365 std::lower_bound(OldI2MBB
.begin(), OldI2MBB
.end(), LI
->end
);
368 LI
->end
= getMBBEndIdx(I
->second
) + 1;
370 unsigned idx
= index
;
371 while (index
< OldI2MI
.size() && !OldI2MI
[index
]) ++index
;
373 if (index
!= OldI2MI
.size())
374 LI
->end
= mi2iMap_
[OldI2MI
[index
]] + (idx
== index
? offset
: 0);
376 LI
->end
= InstrSlots::NUM
* i2miMap_
.size();
380 for (LiveInterval::vni_iterator VNI
= OI
->second
->vni_begin(),
381 VNE
= OI
->second
->vni_end(); VNI
!= VNE
; ++VNI
) {
384 // Remap the VNInfo def index, which works the same as the
385 // start indices above. VN's with special sentinel defs
386 // don't need to be remapped.
387 if (vni
->isDefAccurate() && !vni
->isUnused()) {
388 unsigned index
= vni
->def
/ InstrSlots::NUM
;
389 unsigned offset
= vni
->def
% InstrSlots::NUM
;
390 if (offset
== InstrSlots::LOAD
) {
391 std::vector
<IdxMBBPair
>::const_iterator I
=
392 std::lower_bound(OldI2MBB
.begin(), OldI2MBB
.end(), vni
->def
);
393 // Take the pair containing the index
394 std::vector
<IdxMBBPair
>::const_iterator J
=
395 (I
== OldI2MBB
.end() && OldI2MBB
.size()>0) ? (I
-1): I
;
397 vni
->def
= getMBBStartIdx(J
->second
);
399 vni
->def
= mi2iMap_
[OldI2MI
[index
]] + offset
;
403 // Remap the VNInfo kill indices, which works the same as
404 // the end indices above.
405 for (size_t i
= 0; i
< vni
->kills
.size(); ++i
) {
406 unsigned killIdx
= vni
->kills
[i
].killIdx
;
408 unsigned index
= (killIdx
- 1) / InstrSlots::NUM
;
409 unsigned offset
= killIdx
% InstrSlots::NUM
;
411 if (offset
== InstrSlots::LOAD
) {
412 assert("Value killed at a load slot.");
413 /*std::vector<IdxMBBPair>::const_iterator I =
414 std::lower_bound(OldI2MBB.begin(), OldI2MBB.end(), vni->kills[i]);
417 vni->kills[i] = getMBBEndIdx(I->second);*/
419 if (vni
->kills
[i
].isPHIKill
) {
420 std::vector
<IdxMBBPair
>::const_iterator I
=
421 std::lower_bound(OldI2MBB
.begin(), OldI2MBB
.end(), index
);
423 vni
->kills
[i
].killIdx
= terminatorGaps
[I
->second
];
425 assert(OldI2MI
[index
] != 0 &&
426 "Kill refers to instruction not present in index maps.");
427 vni
->kills
[i
].killIdx
= mi2iMap_
[OldI2MI
[index
]] + offset
;
431 unsigned idx = index;
432 while (index < OldI2MI.size() && !OldI2MI[index]) ++index;
434 if (index != OldI2MI.size())
435 vni->kills[i] = mi2iMap_[OldI2MI[index]] +
436 (idx == index ? offset : 0);
438 vni->kills[i] = InstrSlots::NUM * i2miMap_.size();
446 void LiveIntervals::scaleNumbering(int factor
) {
448 // * scale MBB begin and end points
449 // * scale all ranges.
450 // * Update VNI structures.
451 // * Scale instruction numberings
453 // Scale the MBB indices.
455 for (MachineFunction::iterator MBB
= mf_
->begin(), MBBE
= mf_
->end();
456 MBB
!= MBBE
; ++MBB
) {
457 std::pair
<unsigned, unsigned> &mbbIndices
= MBB2IdxMap
[MBB
->getNumber()];
458 mbbIndices
.first
= InstrSlots::scale(mbbIndices
.first
, factor
);
459 mbbIndices
.second
= InstrSlots::scale(mbbIndices
.second
, factor
);
460 Idx2MBBMap
.push_back(std::make_pair(mbbIndices
.first
, MBB
));
462 std::sort(Idx2MBBMap
.begin(), Idx2MBBMap
.end(), Idx2MBBCompare());
464 // Scale terminator gaps.
465 for (DenseMap
<MachineBasicBlock
*, unsigned>::iterator
466 TGI
= terminatorGaps
.begin(), TGE
= terminatorGaps
.end();
468 terminatorGaps
[TGI
->first
] = InstrSlots::scale(TGI
->second
, factor
);
471 // Scale the intervals.
472 for (iterator LI
= begin(), LE
= end(); LI
!= LE
; ++LI
) {
473 LI
->second
->scaleNumbering(factor
);
476 // Scale MachineInstrs.
477 Mi2IndexMap oldmi2iMap
= mi2iMap_
;
478 unsigned highestSlot
= 0;
479 for (Mi2IndexMap::iterator MI
= oldmi2iMap
.begin(), ME
= oldmi2iMap
.end();
481 unsigned newSlot
= InstrSlots::scale(MI
->second
, factor
);
482 mi2iMap_
[MI
->first
] = newSlot
;
483 highestSlot
= std::max(highestSlot
, newSlot
);
487 i2miMap_
.resize(highestSlot
+ 1);
488 for (Mi2IndexMap::iterator MI
= mi2iMap_
.begin(), ME
= mi2iMap_
.end();
490 i2miMap_
[MI
->second
] = const_cast<MachineInstr
*>(MI
->first
);
496 /// runOnMachineFunction - Register allocate the whole function
498 bool LiveIntervals::runOnMachineFunction(MachineFunction
&fn
) {
500 mri_
= &mf_
->getRegInfo();
501 tm_
= &fn
.getTarget();
502 tri_
= tm_
->getRegisterInfo();
503 tii_
= tm_
->getInstrInfo();
504 aa_
= &getAnalysis
<AliasAnalysis
>();
505 lv_
= &getAnalysis
<LiveVariables
>();
506 allocatableRegs_
= tri_
->getAllocatableSet(fn
);
508 processImplicitDefs();
512 numIntervals
+= getNumIntervals();
518 /// print - Implement the dump method.
519 void LiveIntervals::print(std::ostream
&O
, const Module
* ) const {
520 O
<< "********** INTERVALS **********\n";
521 for (const_iterator I
= begin(), E
= end(); I
!= E
; ++I
) {
522 I
->second
->print(O
, tri_
);
526 O
<< "********** MACHINEINSTRS **********\n";
527 for (MachineFunction::iterator mbbi
= mf_
->begin(), mbbe
= mf_
->end();
528 mbbi
!= mbbe
; ++mbbi
) {
529 O
<< ((Value
*)mbbi
->getBasicBlock())->getNameStr() << ":\n";
530 for (MachineBasicBlock::iterator mii
= mbbi
->begin(),
531 mie
= mbbi
->end(); mii
!= mie
; ++mii
) {
532 O
<< getInstructionIndex(mii
) << '\t' << *mii
;
537 /// conflictsWithPhysRegDef - Returns true if the specified register
538 /// is defined during the duration of the specified interval.
539 bool LiveIntervals::conflictsWithPhysRegDef(const LiveInterval
&li
,
540 VirtRegMap
&vrm
, unsigned reg
) {
541 for (LiveInterval::Ranges::const_iterator
542 I
= li
.ranges
.begin(), E
= li
.ranges
.end(); I
!= E
; ++I
) {
543 for (unsigned index
= getBaseIndex(I
->start
),
544 end
= getBaseIndex(I
->end
-1) + InstrSlots::NUM
; index
!= end
;
545 index
+= InstrSlots::NUM
) {
546 // skip deleted instructions
547 while (index
!= end
&& !getInstructionFromIndex(index
))
548 index
+= InstrSlots::NUM
;
549 if (index
== end
) break;
551 MachineInstr
*MI
= getInstructionFromIndex(index
);
552 unsigned SrcReg
, DstReg
, SrcSubReg
, DstSubReg
;
553 if (tii_
->isMoveInstr(*MI
, SrcReg
, DstReg
, SrcSubReg
, DstSubReg
))
554 if (SrcReg
== li
.reg
|| DstReg
== li
.reg
)
556 for (unsigned i
= 0; i
!= MI
->getNumOperands(); ++i
) {
557 MachineOperand
& mop
= MI
->getOperand(i
);
560 unsigned PhysReg
= mop
.getReg();
561 if (PhysReg
== 0 || PhysReg
== li
.reg
)
563 if (TargetRegisterInfo::isVirtualRegister(PhysReg
)) {
564 if (!vrm
.hasPhys(PhysReg
))
566 PhysReg
= vrm
.getPhys(PhysReg
);
568 if (PhysReg
&& tri_
->regsOverlap(PhysReg
, reg
))
577 /// conflictsWithPhysRegRef - Similar to conflictsWithPhysRegRef except
578 /// it can check use as well.
579 bool LiveIntervals::conflictsWithPhysRegRef(LiveInterval
&li
,
580 unsigned Reg
, bool CheckUse
,
581 SmallPtrSet
<MachineInstr
*,32> &JoinedCopies
) {
582 for (LiveInterval::Ranges::const_iterator
583 I
= li
.ranges
.begin(), E
= li
.ranges
.end(); I
!= E
; ++I
) {
584 for (unsigned index
= getBaseIndex(I
->start
),
585 end
= getBaseIndex(I
->end
-1) + InstrSlots::NUM
; index
!= end
;
586 index
+= InstrSlots::NUM
) {
587 // Skip deleted instructions.
588 MachineInstr
*MI
= 0;
589 while (index
!= end
) {
590 MI
= getInstructionFromIndex(index
);
593 index
+= InstrSlots::NUM
;
595 if (index
== end
) break;
597 if (JoinedCopies
.count(MI
))
599 for (unsigned i
= 0, e
= MI
->getNumOperands(); i
!= e
; ++i
) {
600 MachineOperand
& MO
= MI
->getOperand(i
);
603 if (MO
.isUse() && !CheckUse
)
605 unsigned PhysReg
= MO
.getReg();
606 if (PhysReg
== 0 || TargetRegisterInfo::isVirtualRegister(PhysReg
))
608 if (tri_
->isSubRegister(Reg
, PhysReg
))
618 void LiveIntervals::printRegName(unsigned reg
) const {
619 if (TargetRegisterInfo::isPhysicalRegister(reg
))
620 errs() << tri_
->getName(reg
);
622 errs() << "%reg" << reg
;
625 void LiveIntervals::handleVirtualRegisterDef(MachineBasicBlock
*mbb
,
626 MachineBasicBlock::iterator mi
,
627 unsigned MIIdx
, MachineOperand
& MO
,
629 LiveInterval
&interval
) {
630 DOUT
<< "\t\tregister: "; DEBUG(printRegName(interval
.reg
));
632 // Virtual registers may be defined multiple times (due to phi
633 // elimination and 2-addr elimination). Much of what we do only has to be
634 // done once for the vreg. We use an empty interval to detect the first
635 // time we see a vreg.
636 LiveVariables::VarInfo
& vi
= lv_
->getVarInfo(interval
.reg
);
637 if (interval
.empty()) {
638 // Get the Idx of the defining instructions.
639 unsigned defIndex
= getDefIndex(MIIdx
);
640 // Earlyclobbers move back one.
641 if (MO
.isEarlyClobber())
642 defIndex
= getUseIndex(MIIdx
);
644 MachineInstr
*CopyMI
= NULL
;
645 unsigned SrcReg
, DstReg
, SrcSubReg
, DstSubReg
;
646 if (mi
->getOpcode() == TargetInstrInfo::EXTRACT_SUBREG
||
647 mi
->getOpcode() == TargetInstrInfo::INSERT_SUBREG
||
648 mi
->getOpcode() == TargetInstrInfo::SUBREG_TO_REG
||
649 tii_
->isMoveInstr(*mi
, SrcReg
, DstReg
, SrcSubReg
, DstSubReg
))
651 // Earlyclobbers move back one.
652 ValNo
= interval
.getNextValue(defIndex
, CopyMI
, true, VNInfoAllocator
);
654 assert(ValNo
->id
== 0 && "First value in interval is not 0?");
656 // Loop over all of the blocks that the vreg is defined in. There are
657 // two cases we have to handle here. The most common case is a vreg
658 // whose lifetime is contained within a basic block. In this case there
659 // will be a single kill, in MBB, which comes after the definition.
660 if (vi
.Kills
.size() == 1 && vi
.Kills
[0]->getParent() == mbb
) {
661 // FIXME: what about dead vars?
663 if (vi
.Kills
[0] != mi
)
664 killIdx
= getUseIndex(getInstructionIndex(vi
.Kills
[0]))+1;
666 killIdx
= defIndex
+1;
668 // If the kill happens after the definition, we have an intra-block
670 if (killIdx
> defIndex
) {
671 assert(vi
.AliveBlocks
.empty() &&
672 "Shouldn't be alive across any blocks!");
673 LiveRange
LR(defIndex
, killIdx
, ValNo
);
674 interval
.addRange(LR
);
675 DOUT
<< " +" << LR
<< "\n";
676 interval
.addKill(ValNo
, killIdx
, false);
681 // The other case we handle is when a virtual register lives to the end
682 // of the defining block, potentially live across some blocks, then is
683 // live into some number of blocks, but gets killed. Start by adding a
684 // range that goes from this definition to the end of the defining block.
685 LiveRange
NewLR(defIndex
, getMBBEndIdx(mbb
)+1, ValNo
);
686 DOUT
<< " +" << NewLR
;
687 interval
.addRange(NewLR
);
689 // Iterate over all of the blocks that the variable is completely
690 // live in, adding [insrtIndex(begin), instrIndex(end)+4) to the
692 for (SparseBitVector
<>::iterator I
= vi
.AliveBlocks
.begin(),
693 E
= vi
.AliveBlocks
.end(); I
!= E
; ++I
) {
694 LiveRange
LR(getMBBStartIdx(*I
),
695 getMBBEndIdx(*I
)+1, // MBB ends at -1.
697 interval
.addRange(LR
);
701 // Finally, this virtual register is live from the start of any killing
702 // block to the 'use' slot of the killing instruction.
703 for (unsigned i
= 0, e
= vi
.Kills
.size(); i
!= e
; ++i
) {
704 MachineInstr
*Kill
= vi
.Kills
[i
];
705 unsigned killIdx
= getUseIndex(getInstructionIndex(Kill
))+1;
706 LiveRange
LR(getMBBStartIdx(Kill
->getParent()),
708 interval
.addRange(LR
);
709 interval
.addKill(ValNo
, killIdx
, false);
714 // If this is the second time we see a virtual register definition, it
715 // must be due to phi elimination or two addr elimination. If this is
716 // the result of two address elimination, then the vreg is one of the
717 // def-and-use register operand.
718 if (mi
->isRegTiedToUseOperand(MOIdx
)) {
719 // If this is a two-address definition, then we have already processed
720 // the live range. The only problem is that we didn't realize there
721 // are actually two values in the live interval. Because of this we
722 // need to take the LiveRegion that defines this register and split it
724 assert(interval
.containsOneValue());
725 unsigned DefIndex
= getDefIndex(interval
.getValNumInfo(0)->def
);
726 unsigned RedefIndex
= getDefIndex(MIIdx
);
727 if (MO
.isEarlyClobber())
728 RedefIndex
= getUseIndex(MIIdx
);
730 const LiveRange
*OldLR
= interval
.getLiveRangeContaining(RedefIndex
-1);
731 VNInfo
*OldValNo
= OldLR
->valno
;
733 // Delete the initial value, which should be short and continuous,
734 // because the 2-addr copy must be in the same MBB as the redef.
735 interval
.removeRange(DefIndex
, RedefIndex
);
737 // Two-address vregs should always only be redefined once. This means
738 // that at this point, there should be exactly one value number in it.
739 assert(interval
.containsOneValue() && "Unexpected 2-addr liveint!");
741 // The new value number (#1) is defined by the instruction we claimed
743 VNInfo
*ValNo
= interval
.getNextValue(OldValNo
->def
, OldValNo
->copy
,
744 false, // update at *
746 ValNo
->setFlags(OldValNo
->getFlags()); // * <- updating here
748 // Value#0 is now defined by the 2-addr instruction.
749 OldValNo
->def
= RedefIndex
;
751 if (MO
.isEarlyClobber())
752 OldValNo
->setHasRedefByEC(true);
754 // Add the new live interval which replaces the range for the input copy.
755 LiveRange
LR(DefIndex
, RedefIndex
, ValNo
);
756 DOUT
<< " replace range with " << LR
;
757 interval
.addRange(LR
);
758 interval
.addKill(ValNo
, RedefIndex
, false);
760 // If this redefinition is dead, we need to add a dummy unit live
761 // range covering the def slot.
763 interval
.addRange(LiveRange(RedefIndex
, RedefIndex
+1, OldValNo
));
766 interval
.print(DOUT
, tri_
);
769 // Otherwise, this must be because of phi elimination. If this is the
770 // first redefinition of the vreg that we have seen, go back and change
771 // the live range in the PHI block to be a different value number.
772 if (interval
.containsOneValue()) {
773 assert(vi
.Kills
.size() == 1 &&
774 "PHI elimination vreg should have one kill, the PHI itself!");
776 // Remove the old range that we now know has an incorrect number.
777 VNInfo
*VNI
= interval
.getValNumInfo(0);
778 MachineInstr
*Killer
= vi
.Kills
[0];
779 unsigned Start
= getMBBStartIdx(Killer
->getParent());
780 unsigned End
= getUseIndex(getInstructionIndex(Killer
))+1;
781 DOUT
<< " Removing [" << Start
<< "," << End
<< "] from: ";
782 interval
.print(DOUT
, tri_
); DOUT
<< "\n";
783 interval
.removeRange(Start
, End
);
784 assert(interval
.ranges
.size() == 1 &&
785 "newly discovered PHI interval has >1 ranges.");
786 MachineBasicBlock
*killMBB
= getMBBFromIndex(interval
.endNumber());
787 interval
.addKill(VNI
, terminatorGaps
[killMBB
], true);
788 VNI
->setHasPHIKill(true);
789 DOUT
<< " RESULT: "; interval
.print(DOUT
, tri_
);
791 // Replace the interval with one of a NEW value number. Note that this
792 // value number isn't actually defined by an instruction, weird huh? :)
793 LiveRange
LR(Start
, End
,
794 interval
.getNextValue(mbb
->getNumber(), 0, false, VNInfoAllocator
));
795 LR
.valno
->setIsPHIDef(true);
796 DOUT
<< " replace range with " << LR
;
797 interval
.addRange(LR
);
798 interval
.addKill(LR
.valno
, End
, false);
799 DOUT
<< " RESULT: "; interval
.print(DOUT
, tri_
);
802 // In the case of PHI elimination, each variable definition is only
803 // live until the end of the block. We've already taken care of the
804 // rest of the live range.
805 unsigned defIndex
= getDefIndex(MIIdx
);
806 if (MO
.isEarlyClobber())
807 defIndex
= getUseIndex(MIIdx
);
810 MachineInstr
*CopyMI
= NULL
;
811 unsigned SrcReg
, DstReg
, SrcSubReg
, DstSubReg
;
812 if (mi
->getOpcode() == TargetInstrInfo::EXTRACT_SUBREG
||
813 mi
->getOpcode() == TargetInstrInfo::INSERT_SUBREG
||
814 mi
->getOpcode() == TargetInstrInfo::SUBREG_TO_REG
||
815 tii_
->isMoveInstr(*mi
, SrcReg
, DstReg
, SrcSubReg
, DstSubReg
))
817 ValNo
= interval
.getNextValue(defIndex
, CopyMI
, true, VNInfoAllocator
);
819 unsigned killIndex
= getMBBEndIdx(mbb
) + 1;
820 LiveRange
LR(defIndex
, killIndex
, ValNo
);
821 interval
.addRange(LR
);
822 interval
.addKill(ValNo
, terminatorGaps
[mbb
], true);
823 ValNo
->setHasPHIKill(true);
831 void LiveIntervals::handlePhysicalRegisterDef(MachineBasicBlock
*MBB
,
832 MachineBasicBlock::iterator mi
,
835 LiveInterval
&interval
,
836 MachineInstr
*CopyMI
) {
837 // A physical register cannot be live across basic block, so its
838 // lifetime must end somewhere in its defining basic block.
839 DOUT
<< "\t\tregister: "; DEBUG(printRegName(interval
.reg
));
841 unsigned baseIndex
= MIIdx
;
842 unsigned start
= getDefIndex(baseIndex
);
843 // Earlyclobbers move back one.
844 if (MO
.isEarlyClobber())
845 start
= getUseIndex(MIIdx
);
846 unsigned end
= start
;
848 // If it is not used after definition, it is considered dead at
849 // the instruction defining it. Hence its interval is:
850 // [defSlot(def), defSlot(def)+1)
857 // If it is not dead on definition, it must be killed by a
858 // subsequent instruction. Hence its interval is:
859 // [defSlot(def), useSlot(kill)+1)
860 baseIndex
+= InstrSlots::NUM
;
861 while (++mi
!= MBB
->end()) {
862 while (baseIndex
/ InstrSlots::NUM
< i2miMap_
.size() &&
863 getInstructionFromIndex(baseIndex
) == 0)
864 baseIndex
+= InstrSlots::NUM
;
865 if (mi
->killsRegister(interval
.reg
, tri_
)) {
867 end
= getUseIndex(baseIndex
) + 1;
870 int DefIdx
= mi
->findRegisterDefOperandIdx(interval
.reg
, false, tri_
);
872 if (mi
->isRegTiedToUseOperand(DefIdx
)) {
873 // Two-address instruction.
874 end
= getDefIndex(baseIndex
);
875 if (mi
->getOperand(DefIdx
).isEarlyClobber())
876 end
= getUseIndex(baseIndex
);
878 // Another instruction redefines the register before it is ever read.
879 // Then the register is essentially dead at the instruction that defines
880 // it. Hence its interval is:
881 // [defSlot(def), defSlot(def)+1)
889 baseIndex
+= InstrSlots::NUM
;
892 // The only case we should have a dead physreg here without a killing or
893 // instruction where we know it's dead is if it is live-in to the function
894 // and never used. Another possible case is the implicit use of the
895 // physical register has been deleted by two-address pass.
899 assert(start
< end
&& "did not find end of interval?");
901 // Already exists? Extend old live interval.
902 LiveInterval::iterator OldLR
= interval
.FindLiveRangeContaining(start
);
903 bool Extend
= OldLR
!= interval
.end();
904 VNInfo
*ValNo
= Extend
905 ? OldLR
->valno
: interval
.getNextValue(start
, CopyMI
, true, VNInfoAllocator
);
906 if (MO
.isEarlyClobber() && Extend
)
907 ValNo
->setHasRedefByEC(true);
908 LiveRange
LR(start
, end
, ValNo
);
909 interval
.addRange(LR
);
910 interval
.addKill(LR
.valno
, end
, false);
911 DOUT
<< " +" << LR
<< '\n';
914 void LiveIntervals::handleRegisterDef(MachineBasicBlock
*MBB
,
915 MachineBasicBlock::iterator MI
,
919 if (TargetRegisterInfo::isVirtualRegister(MO
.getReg()))
920 handleVirtualRegisterDef(MBB
, MI
, MIIdx
, MO
, MOIdx
,
921 getOrCreateInterval(MO
.getReg()));
922 else if (allocatableRegs_
[MO
.getReg()]) {
923 MachineInstr
*CopyMI
= NULL
;
924 unsigned SrcReg
, DstReg
, SrcSubReg
, DstSubReg
;
925 if (MI
->getOpcode() == TargetInstrInfo::EXTRACT_SUBREG
||
926 MI
->getOpcode() == TargetInstrInfo::INSERT_SUBREG
||
927 MI
->getOpcode() == TargetInstrInfo::SUBREG_TO_REG
||
928 tii_
->isMoveInstr(*MI
, SrcReg
, DstReg
, SrcSubReg
, DstSubReg
))
930 handlePhysicalRegisterDef(MBB
, MI
, MIIdx
, MO
,
931 getOrCreateInterval(MO
.getReg()), CopyMI
);
932 // Def of a register also defines its sub-registers.
933 for (const unsigned* AS
= tri_
->getSubRegisters(MO
.getReg()); *AS
; ++AS
)
934 // If MI also modifies the sub-register explicitly, avoid processing it
935 // more than once. Do not pass in TRI here so it checks for exact match.
936 if (!MI
->modifiesRegister(*AS
))
937 handlePhysicalRegisterDef(MBB
, MI
, MIIdx
, MO
,
938 getOrCreateInterval(*AS
), 0);
942 void LiveIntervals::handleLiveInRegister(MachineBasicBlock
*MBB
,
944 LiveInterval
&interval
, bool isAlias
) {
945 DOUT
<< "\t\tlivein register: "; DEBUG(printRegName(interval
.reg
));
947 // Look for kills, if it reaches a def before it's killed, then it shouldn't
948 // be considered a livein.
949 MachineBasicBlock::iterator mi
= MBB
->begin();
950 unsigned baseIndex
= MIIdx
;
951 unsigned start
= baseIndex
;
952 while (baseIndex
/ InstrSlots::NUM
< i2miMap_
.size() &&
953 getInstructionFromIndex(baseIndex
) == 0)
954 baseIndex
+= InstrSlots::NUM
;
955 unsigned end
= baseIndex
;
956 bool SeenDefUse
= false;
958 while (mi
!= MBB
->end()) {
959 if (mi
->killsRegister(interval
.reg
, tri_
)) {
961 end
= getUseIndex(baseIndex
) + 1;
964 } else if (mi
->modifiesRegister(interval
.reg
, tri_
)) {
965 // Another instruction redefines the register before it is ever read.
966 // Then the register is essentially dead at the instruction that defines
967 // it. Hence its interval is:
968 // [defSlot(def), defSlot(def)+1)
970 end
= getDefIndex(start
) + 1;
975 baseIndex
+= InstrSlots::NUM
;
977 if (mi
!= MBB
->end()) {
978 while (baseIndex
/ InstrSlots::NUM
< i2miMap_
.size() &&
979 getInstructionFromIndex(baseIndex
) == 0)
980 baseIndex
+= InstrSlots::NUM
;
984 // Live-in register might not be used at all.
988 end
= getDefIndex(MIIdx
) + 1;
990 DOUT
<< " live through";
996 interval
.getNextValue(MBB
->getNumber(), 0, false, VNInfoAllocator
);
997 vni
->setIsPHIDef(true);
998 LiveRange
LR(start
, end
, vni
);
1000 interval
.addRange(LR
);
1001 interval
.addKill(LR
.valno
, end
, false);
1002 DOUT
<< " +" << LR
<< '\n';
1005 /// computeIntervals - computes the live intervals for virtual
1006 /// registers. for some ordering of the machine instructions [1,N] a
1007 /// live interval is an interval [i, j) where 1 <= i <= j < N for
1008 /// which a variable is live
1009 void LiveIntervals::computeIntervals() {
1011 DEBUG(errs() << "********** COMPUTING LIVE INTERVALS **********\n"
1012 << "********** Function: "
1013 << ((Value
*)mf_
->getFunction())->getName() << '\n');
1015 SmallVector
<unsigned, 8> UndefUses
;
1016 for (MachineFunction::iterator MBBI
= mf_
->begin(), E
= mf_
->end();
1017 MBBI
!= E
; ++MBBI
) {
1018 MachineBasicBlock
*MBB
= MBBI
;
1019 // Track the index of the current machine instr.
1020 unsigned MIIndex
= getMBBStartIdx(MBB
);
1021 DEBUG(errs() << ((Value
*)MBB
->getBasicBlock())->getName() << ":\n");
1023 MachineBasicBlock::iterator MI
= MBB
->begin(), miEnd
= MBB
->end();
1025 // Create intervals for live-ins to this BB first.
1026 for (MachineBasicBlock::const_livein_iterator LI
= MBB
->livein_begin(),
1027 LE
= MBB
->livein_end(); LI
!= LE
; ++LI
) {
1028 handleLiveInRegister(MBB
, MIIndex
, getOrCreateInterval(*LI
));
1029 // Multiple live-ins can alias the same register.
1030 for (const unsigned* AS
= tri_
->getSubRegisters(*LI
); *AS
; ++AS
)
1031 if (!hasInterval(*AS
))
1032 handleLiveInRegister(MBB
, MIIndex
, getOrCreateInterval(*AS
),
1036 // Skip over empty initial indices.
1037 while (MIIndex
/ InstrSlots::NUM
< i2miMap_
.size() &&
1038 getInstructionFromIndex(MIIndex
) == 0)
1039 MIIndex
+= InstrSlots::NUM
;
1041 for (; MI
!= miEnd
; ++MI
) {
1042 DOUT
<< MIIndex
<< "\t" << *MI
;
1045 for (int i
= MI
->getNumOperands() - 1; i
>= 0; --i
) {
1046 MachineOperand
&MO
= MI
->getOperand(i
);
1047 if (!MO
.isReg() || !MO
.getReg())
1050 // handle register defs - build intervals
1052 handleRegisterDef(MBB
, MI
, MIIndex
, MO
, i
);
1053 else if (MO
.isUndef())
1054 UndefUses
.push_back(MO
.getReg());
1057 // Skip over the empty slots after each instruction.
1058 unsigned Slots
= MI
->getDesc().getNumDefs();
1061 MIIndex
+= InstrSlots::NUM
* Slots
;
1063 // Skip over empty indices.
1064 while (MIIndex
/ InstrSlots::NUM
< i2miMap_
.size() &&
1065 getInstructionFromIndex(MIIndex
) == 0)
1066 MIIndex
+= InstrSlots::NUM
;
1070 // Create empty intervals for registers defined by implicit_def's (except
1071 // for those implicit_def that define values which are liveout of their
1073 for (unsigned i
= 0, e
= UndefUses
.size(); i
!= e
; ++i
) {
1074 unsigned UndefReg
= UndefUses
[i
];
1075 (void)getOrCreateInterval(UndefReg
);
1079 bool LiveIntervals::findLiveInMBBs(unsigned Start
, unsigned End
,
1080 SmallVectorImpl
<MachineBasicBlock
*> &MBBs
) const {
1081 std::vector
<IdxMBBPair
>::const_iterator I
=
1082 std::lower_bound(Idx2MBBMap
.begin(), Idx2MBBMap
.end(), Start
);
1084 bool ResVal
= false;
1085 while (I
!= Idx2MBBMap
.end()) {
1086 if (I
->first
>= End
)
1088 MBBs
.push_back(I
->second
);
1095 bool LiveIntervals::findReachableMBBs(unsigned Start
, unsigned End
,
1096 SmallVectorImpl
<MachineBasicBlock
*> &MBBs
) const {
1097 std::vector
<IdxMBBPair
>::const_iterator I
=
1098 std::lower_bound(Idx2MBBMap
.begin(), Idx2MBBMap
.end(), Start
);
1100 bool ResVal
= false;
1101 while (I
!= Idx2MBBMap
.end()) {
1104 MachineBasicBlock
*MBB
= I
->second
;
1105 if (getMBBEndIdx(MBB
) > End
)
1107 for (MachineBasicBlock::succ_iterator SI
= MBB
->succ_begin(),
1108 SE
= MBB
->succ_end(); SI
!= SE
; ++SI
)
1109 MBBs
.push_back(*SI
);
1116 LiveInterval
* LiveIntervals::createInterval(unsigned reg
) {
1117 float Weight
= TargetRegisterInfo::isPhysicalRegister(reg
) ? HUGE_VALF
: 0.0F
;
1118 return new LiveInterval(reg
, Weight
);
1121 /// dupInterval - Duplicate a live interval. The caller is responsible for
1122 /// managing the allocated memory.
1123 LiveInterval
* LiveIntervals::dupInterval(LiveInterval
*li
) {
1124 LiveInterval
*NewLI
= createInterval(li
->reg
);
1125 NewLI
->Copy(*li
, mri_
, getVNInfoAllocator());
1129 /// getVNInfoSourceReg - Helper function that parses the specified VNInfo
1130 /// copy field and returns the source register that defines it.
1131 unsigned LiveIntervals::getVNInfoSourceReg(const VNInfo
*VNI
) const {
1135 if (VNI
->copy
->getOpcode() == TargetInstrInfo::EXTRACT_SUBREG
) {
1136 // If it's extracting out of a physical register, return the sub-register.
1137 unsigned Reg
= VNI
->copy
->getOperand(1).getReg();
1138 if (TargetRegisterInfo::isPhysicalRegister(Reg
))
1139 Reg
= tri_
->getSubReg(Reg
, VNI
->copy
->getOperand(2).getImm());
1141 } else if (VNI
->copy
->getOpcode() == TargetInstrInfo::INSERT_SUBREG
||
1142 VNI
->copy
->getOpcode() == TargetInstrInfo::SUBREG_TO_REG
)
1143 return VNI
->copy
->getOperand(2).getReg();
1145 unsigned SrcReg
, DstReg
, SrcSubReg
, DstSubReg
;
1146 if (tii_
->isMoveInstr(*VNI
->copy
, SrcReg
, DstReg
, SrcSubReg
, DstSubReg
))
1148 llvm_unreachable("Unrecognized copy instruction!");
1152 //===----------------------------------------------------------------------===//
1153 // Register allocator hooks.
1156 /// getReMatImplicitUse - If the remat definition MI has one (for now, we only
1157 /// allow one) virtual register operand, then its uses are implicitly using
1158 /// the register. Returns the virtual register.
1159 unsigned LiveIntervals::getReMatImplicitUse(const LiveInterval
&li
,
1160 MachineInstr
*MI
) const {
1162 for (unsigned i
= 0, e
= MI
->getNumOperands(); i
!= e
; ++i
) {
1163 MachineOperand
&MO
= MI
->getOperand(i
);
1164 if (!MO
.isReg() || !MO
.isUse())
1166 unsigned Reg
= MO
.getReg();
1167 if (Reg
== 0 || Reg
== li
.reg
)
1170 if (TargetRegisterInfo::isPhysicalRegister(Reg
) &&
1171 !allocatableRegs_
[Reg
])
1173 // FIXME: For now, only remat MI with at most one register operand.
1175 "Can't rematerialize instruction with multiple register operand!");
1176 RegOp
= MO
.getReg();
1184 /// isValNoAvailableAt - Return true if the val# of the specified interval
1185 /// which reaches the given instruction also reaches the specified use index.
1186 bool LiveIntervals::isValNoAvailableAt(const LiveInterval
&li
, MachineInstr
*MI
,
1187 unsigned UseIdx
) const {
1188 unsigned Index
= getInstructionIndex(MI
);
1189 VNInfo
*ValNo
= li
.FindLiveRangeContaining(Index
)->valno
;
1190 LiveInterval::const_iterator UI
= li
.FindLiveRangeContaining(UseIdx
);
1191 return UI
!= li
.end() && UI
->valno
== ValNo
;
1194 /// isReMaterializable - Returns true if the definition MI of the specified
1195 /// val# of the specified interval is re-materializable.
1196 bool LiveIntervals::isReMaterializable(const LiveInterval
&li
,
1197 const VNInfo
*ValNo
, MachineInstr
*MI
,
1198 SmallVectorImpl
<LiveInterval
*> &SpillIs
,
1203 if (MI
->getOpcode() == TargetInstrInfo::IMPLICIT_DEF
)
1207 if (tii_
->isLoadFromStackSlot(MI
, FrameIdx
) &&
1208 mf_
->getFrameInfo()->isImmutableObjectIndex(FrameIdx
))
1209 // FIXME: Let target specific isReallyTriviallyReMaterializable determines
1210 // this but remember this is not safe to fold into a two-address
1212 // This is a load from fixed stack slot. It can be rematerialized.
1215 // If the target-specific rules don't identify an instruction as
1216 // being trivially rematerializable, use some target-independent
1218 if (!MI
->getDesc().isRematerializable() ||
1219 !tii_
->isTriviallyReMaterializable(MI
)) {
1220 if (!EnableAggressiveRemat
)
1223 // If the instruction accesses memory but the memoperands have been lost,
1224 // we can't analyze it.
1225 const TargetInstrDesc
&TID
= MI
->getDesc();
1226 if ((TID
.mayLoad() || TID
.mayStore()) && MI
->memoperands_empty())
1229 // Avoid instructions obviously unsafe for remat.
1230 if (TID
.hasUnmodeledSideEffects() || TID
.isNotDuplicable())
1233 // If the instruction accesses memory and the memory could be non-constant,
1234 // assume the instruction is not rematerializable.
1235 for (std::list
<MachineMemOperand
>::const_iterator
1236 I
= MI
->memoperands_begin(), E
= MI
->memoperands_end(); I
!= E
; ++I
){
1237 const MachineMemOperand
&MMO
= *I
;
1238 if (MMO
.isVolatile() || MMO
.isStore())
1240 const Value
*V
= MMO
.getValue();
1243 if (const PseudoSourceValue
*PSV
= dyn_cast
<PseudoSourceValue
>(V
)) {
1244 if (!PSV
->isConstant(mf_
->getFrameInfo()))
1246 } else if (!aa_
->pointsToConstantMemory(V
))
1250 // If any of the registers accessed are non-constant, conservatively assume
1251 // the instruction is not rematerializable.
1252 unsigned ImpUse
= 0;
1253 for (unsigned i
= 0, e
= MI
->getNumOperands(); i
!= e
; ++i
) {
1254 const MachineOperand
&MO
= MI
->getOperand(i
);
1256 unsigned Reg
= MO
.getReg();
1259 if (TargetRegisterInfo::isPhysicalRegister(Reg
))
1262 // Only allow one def, and that in the first operand.
1263 if (MO
.isDef() != (i
== 0))
1266 // Only allow constant-valued registers.
1267 bool IsLiveIn
= mri_
->isLiveIn(Reg
);
1268 MachineRegisterInfo::def_iterator I
= mri_
->def_begin(Reg
),
1269 E
= mri_
->def_end();
1271 // For the def, it should be the only def of that register.
1272 if (MO
.isDef() && (next(I
) != E
|| IsLiveIn
))
1276 // Only allow one use other register use, as that's all the
1277 // remat mechanisms support currently.
1278 if (Reg
!= li
.reg
) {
1281 else if (Reg
!= ImpUse
)
1284 // For the use, there should be only one associated def.
1285 if (I
!= E
&& (next(I
) != E
|| IsLiveIn
))
1292 unsigned ImpUse
= getReMatImplicitUse(li
, MI
);
1294 const LiveInterval
&ImpLi
= getInterval(ImpUse
);
1295 for (MachineRegisterInfo::use_iterator ri
= mri_
->use_begin(li
.reg
),
1296 re
= mri_
->use_end(); ri
!= re
; ++ri
) {
1297 MachineInstr
*UseMI
= &*ri
;
1298 unsigned UseIdx
= getInstructionIndex(UseMI
);
1299 if (li
.FindLiveRangeContaining(UseIdx
)->valno
!= ValNo
)
1301 if (!isValNoAvailableAt(ImpLi
, MI
, UseIdx
))
1305 // If a register operand of the re-materialized instruction is going to
1306 // be spilled next, then it's not legal to re-materialize this instruction.
1307 for (unsigned i
= 0, e
= SpillIs
.size(); i
!= e
; ++i
)
1308 if (ImpUse
== SpillIs
[i
]->reg
)
1314 /// isReMaterializable - Returns true if the definition MI of the specified
1315 /// val# of the specified interval is re-materializable.
1316 bool LiveIntervals::isReMaterializable(const LiveInterval
&li
,
1317 const VNInfo
*ValNo
, MachineInstr
*MI
) {
1318 SmallVector
<LiveInterval
*, 4> Dummy1
;
1320 return isReMaterializable(li
, ValNo
, MI
, Dummy1
, Dummy2
);
1323 /// isReMaterializable - Returns true if every definition of MI of every
1324 /// val# of the specified interval is re-materializable.
1325 bool LiveIntervals::isReMaterializable(const LiveInterval
&li
,
1326 SmallVectorImpl
<LiveInterval
*> &SpillIs
,
1329 for (LiveInterval::const_vni_iterator i
= li
.vni_begin(), e
= li
.vni_end();
1331 const VNInfo
*VNI
= *i
;
1332 if (VNI
->isUnused())
1333 continue; // Dead val#.
1334 // Is the def for the val# rematerializable?
1335 if (!VNI
->isDefAccurate())
1337 MachineInstr
*ReMatDefMI
= getInstructionFromIndex(VNI
->def
);
1338 bool DefIsLoad
= false;
1340 !isReMaterializable(li
, VNI
, ReMatDefMI
, SpillIs
, DefIsLoad
))
1342 isLoad
|= DefIsLoad
;
1347 /// FilterFoldedOps - Filter out two-address use operands. Return
1348 /// true if it finds any issue with the operands that ought to prevent
1350 static bool FilterFoldedOps(MachineInstr
*MI
,
1351 SmallVector
<unsigned, 2> &Ops
,
1353 SmallVector
<unsigned, 2> &FoldOps
) {
1355 for (unsigned i
= 0, e
= Ops
.size(); i
!= e
; ++i
) {
1356 unsigned OpIdx
= Ops
[i
];
1357 MachineOperand
&MO
= MI
->getOperand(OpIdx
);
1358 // FIXME: fold subreg use.
1362 MRInfo
|= (unsigned)VirtRegMap::isMod
;
1364 // Filter out two-address use operand(s).
1365 if (MI
->isRegTiedToDefOperand(OpIdx
)) {
1366 MRInfo
= VirtRegMap::isModRef
;
1369 MRInfo
|= (unsigned)VirtRegMap::isRef
;
1371 FoldOps
.push_back(OpIdx
);
1377 /// tryFoldMemoryOperand - Attempts to fold either a spill / restore from
1378 /// slot / to reg or any rematerialized load into ith operand of specified
1379 /// MI. If it is successul, MI is updated with the newly created MI and
1381 bool LiveIntervals::tryFoldMemoryOperand(MachineInstr
* &MI
,
1382 VirtRegMap
&vrm
, MachineInstr
*DefMI
,
1384 SmallVector
<unsigned, 2> &Ops
,
1385 bool isSS
, int Slot
, unsigned Reg
) {
1386 // If it is an implicit def instruction, just delete it.
1387 if (MI
->getOpcode() == TargetInstrInfo::IMPLICIT_DEF
) {
1388 RemoveMachineInstrFromMaps(MI
);
1389 vrm
.RemoveMachineInstrFromMaps(MI
);
1390 MI
->eraseFromParent();
1395 // Filter the list of operand indexes that are to be folded. Abort if
1396 // any operand will prevent folding.
1397 unsigned MRInfo
= 0;
1398 SmallVector
<unsigned, 2> FoldOps
;
1399 if (FilterFoldedOps(MI
, Ops
, MRInfo
, FoldOps
))
1402 // The only time it's safe to fold into a two address instruction is when
1403 // it's folding reload and spill from / into a spill stack slot.
1404 if (DefMI
&& (MRInfo
& VirtRegMap::isMod
))
1407 MachineInstr
*fmi
= isSS
? tii_
->foldMemoryOperand(*mf_
, MI
, FoldOps
, Slot
)
1408 : tii_
->foldMemoryOperand(*mf_
, MI
, FoldOps
, DefMI
);
1410 // Remember this instruction uses the spill slot.
1411 if (isSS
) vrm
.addSpillSlotUse(Slot
, fmi
);
1413 // Attempt to fold the memory reference into the instruction. If
1414 // we can do this, we don't need to insert spill code.
1415 MachineBasicBlock
&MBB
= *MI
->getParent();
1416 if (isSS
&& !mf_
->getFrameInfo()->isImmutableObjectIndex(Slot
))
1417 vrm
.virtFolded(Reg
, MI
, fmi
, (VirtRegMap::ModRef
)MRInfo
);
1418 vrm
.transferSpillPts(MI
, fmi
);
1419 vrm
.transferRestorePts(MI
, fmi
);
1420 vrm
.transferEmergencySpills(MI
, fmi
);
1422 i2miMap_
[InstrIdx
/InstrSlots::NUM
] = fmi
;
1423 mi2iMap_
[fmi
] = InstrIdx
;
1424 MI
= MBB
.insert(MBB
.erase(MI
), fmi
);
1431 /// canFoldMemoryOperand - Returns true if the specified load / store
1432 /// folding is possible.
1433 bool LiveIntervals::canFoldMemoryOperand(MachineInstr
*MI
,
1434 SmallVector
<unsigned, 2> &Ops
,
1436 // Filter the list of operand indexes that are to be folded. Abort if
1437 // any operand will prevent folding.
1438 unsigned MRInfo
= 0;
1439 SmallVector
<unsigned, 2> FoldOps
;
1440 if (FilterFoldedOps(MI
, Ops
, MRInfo
, FoldOps
))
1443 // It's only legal to remat for a use, not a def.
1444 if (ReMat
&& (MRInfo
& VirtRegMap::isMod
))
1447 return tii_
->canFoldMemoryOperand(MI
, FoldOps
);
1450 bool LiveIntervals::intervalIsInOneMBB(const LiveInterval
&li
) const {
1451 SmallPtrSet
<MachineBasicBlock
*, 4> MBBs
;
1452 for (LiveInterval::Ranges::const_iterator
1453 I
= li
.ranges
.begin(), E
= li
.ranges
.end(); I
!= E
; ++I
) {
1454 std::vector
<IdxMBBPair
>::const_iterator II
=
1455 std::lower_bound(Idx2MBBMap
.begin(), Idx2MBBMap
.end(), I
->start
);
1456 if (II
== Idx2MBBMap
.end())
1458 if (I
->end
> II
->first
) // crossing a MBB.
1460 MBBs
.insert(II
->second
);
1461 if (MBBs
.size() > 1)
1467 /// rewriteImplicitOps - Rewrite implicit use operands of MI (i.e. uses of
1468 /// interval on to-be re-materialized operands of MI) with new register.
1469 void LiveIntervals::rewriteImplicitOps(const LiveInterval
&li
,
1470 MachineInstr
*MI
, unsigned NewVReg
,
1472 // There is an implicit use. That means one of the other operand is
1473 // being remat'ed and the remat'ed instruction has li.reg as an
1474 // use operand. Make sure we rewrite that as well.
1475 for (unsigned i
= 0, e
= MI
->getNumOperands(); i
!= e
; ++i
) {
1476 MachineOperand
&MO
= MI
->getOperand(i
);
1479 unsigned Reg
= MO
.getReg();
1480 if (Reg
== 0 || TargetRegisterInfo::isPhysicalRegister(Reg
))
1482 if (!vrm
.isReMaterialized(Reg
))
1484 MachineInstr
*ReMatMI
= vrm
.getReMaterializedMI(Reg
);
1485 MachineOperand
*UseMO
= ReMatMI
->findRegisterUseOperand(li
.reg
);
1487 UseMO
->setReg(NewVReg
);
1491 /// rewriteInstructionForSpills, rewriteInstructionsForSpills - Helper functions
1492 /// for addIntervalsForSpills to rewrite uses / defs for the given live range.
1493 bool LiveIntervals::
1494 rewriteInstructionForSpills(const LiveInterval
&li
, const VNInfo
*VNI
,
1495 bool TrySplit
, unsigned index
, unsigned end
, MachineInstr
*MI
,
1496 MachineInstr
*ReMatOrigDefMI
, MachineInstr
*ReMatDefMI
,
1497 unsigned Slot
, int LdSlot
,
1498 bool isLoad
, bool isLoadSS
, bool DefIsReMat
, bool CanDelete
,
1500 const TargetRegisterClass
* rc
,
1501 SmallVector
<int, 4> &ReMatIds
,
1502 const MachineLoopInfo
*loopInfo
,
1503 unsigned &NewVReg
, unsigned ImpUse
, bool &HasDef
, bool &HasUse
,
1504 DenseMap
<unsigned,unsigned> &MBBVRegsMap
,
1505 std::vector
<LiveInterval
*> &NewLIs
) {
1506 bool CanFold
= false;
1508 for (unsigned i
= 0; i
!= MI
->getNumOperands(); ++i
) {
1509 MachineOperand
& mop
= MI
->getOperand(i
);
1512 unsigned Reg
= mop
.getReg();
1513 unsigned RegI
= Reg
;
1514 if (Reg
== 0 || TargetRegisterInfo::isPhysicalRegister(Reg
))
1519 bool TryFold
= !DefIsReMat
;
1520 bool FoldSS
= true; // Default behavior unless it's a remat.
1521 int FoldSlot
= Slot
;
1523 // If this is the rematerializable definition MI itself and
1524 // all of its uses are rematerialized, simply delete it.
1525 if (MI
== ReMatOrigDefMI
&& CanDelete
) {
1526 DOUT
<< "\t\t\t\tErasing re-materlizable def: ";
1528 RemoveMachineInstrFromMaps(MI
);
1529 vrm
.RemoveMachineInstrFromMaps(MI
);
1530 MI
->eraseFromParent();
1534 // If def for this use can't be rematerialized, then try folding.
1535 // If def is rematerializable and it's a load, also try folding.
1536 TryFold
= !ReMatDefMI
|| (ReMatDefMI
&& (MI
== ReMatOrigDefMI
|| isLoad
));
1538 // Try fold loads (from stack slot, constant pool, etc.) into uses.
1544 // Scan all of the operands of this instruction rewriting operands
1545 // to use NewVReg instead of li.reg as appropriate. We do this for
1548 // 1. If the instr reads the same spilled vreg multiple times, we
1549 // want to reuse the NewVReg.
1550 // 2. If the instr is a two-addr instruction, we are required to
1551 // keep the src/dst regs pinned.
1553 // Keep track of whether we replace a use and/or def so that we can
1554 // create the spill interval with the appropriate range.
1556 HasUse
= mop
.isUse();
1557 HasDef
= mop
.isDef();
1558 SmallVector
<unsigned, 2> Ops
;
1560 for (unsigned j
= i
+1, e
= MI
->getNumOperands(); j
!= e
; ++j
) {
1561 const MachineOperand
&MOj
= MI
->getOperand(j
);
1564 unsigned RegJ
= MOj
.getReg();
1565 if (RegJ
== 0 || TargetRegisterInfo::isPhysicalRegister(RegJ
))
1569 if (!MOj
.isUndef()) {
1570 HasUse
|= MOj
.isUse();
1571 HasDef
|= MOj
.isDef();
1576 // Create a new virtual register for the spill interval.
1577 // Create the new register now so we can map the fold instruction
1578 // to the new register so when it is unfolded we get the correct
1580 bool CreatedNewVReg
= false;
1582 NewVReg
= mri_
->createVirtualRegister(rc
);
1584 CreatedNewVReg
= true;
1590 // Do not fold load / store here if we are splitting. We'll find an
1591 // optimal point to insert a load / store later.
1593 if (tryFoldMemoryOperand(MI
, vrm
, ReMatDefMI
, index
,
1594 Ops
, FoldSS
, FoldSlot
, NewVReg
)) {
1595 // Folding the load/store can completely change the instruction in
1596 // unpredictable ways, rescan it from the beginning.
1599 // We need to give the new vreg the same stack slot as the
1600 // spilled interval.
1601 vrm
.assignVirt2StackSlot(NewVReg
, FoldSlot
);
1607 if (isNotInMIMap(MI
))
1609 goto RestartInstruction
;
1612 // We'll try to fold it later if it's profitable.
1613 CanFold
= canFoldMemoryOperand(MI
, Ops
, DefIsReMat
);
1617 mop
.setReg(NewVReg
);
1618 if (mop
.isImplicit())
1619 rewriteImplicitOps(li
, MI
, NewVReg
, vrm
);
1621 // Reuse NewVReg for other reads.
1622 for (unsigned j
= 0, e
= Ops
.size(); j
!= e
; ++j
) {
1623 MachineOperand
&mopj
= MI
->getOperand(Ops
[j
]);
1624 mopj
.setReg(NewVReg
);
1625 if (mopj
.isImplicit())
1626 rewriteImplicitOps(li
, MI
, NewVReg
, vrm
);
1629 if (CreatedNewVReg
) {
1631 vrm
.setVirtIsReMaterialized(NewVReg
, ReMatDefMI
);
1632 if (ReMatIds
[VNI
->id
] == VirtRegMap::MAX_STACK_SLOT
) {
1633 // Each valnum may have its own remat id.
1634 ReMatIds
[VNI
->id
] = vrm
.assignVirtReMatId(NewVReg
);
1636 vrm
.assignVirtReMatId(NewVReg
, ReMatIds
[VNI
->id
]);
1638 if (!CanDelete
|| (HasUse
&& HasDef
)) {
1639 // If this is a two-addr instruction then its use operands are
1640 // rematerializable but its def is not. It should be assigned a
1642 vrm
.assignVirt2StackSlot(NewVReg
, Slot
);
1645 vrm
.assignVirt2StackSlot(NewVReg
, Slot
);
1647 } else if (HasUse
&& HasDef
&&
1648 vrm
.getStackSlot(NewVReg
) == VirtRegMap::NO_STACK_SLOT
) {
1649 // If this interval hasn't been assigned a stack slot (because earlier
1650 // def is a deleted remat def), do it now.
1651 assert(Slot
!= VirtRegMap::NO_STACK_SLOT
);
1652 vrm
.assignVirt2StackSlot(NewVReg
, Slot
);
1655 // Re-matting an instruction with virtual register use. Add the
1656 // register as an implicit use on the use MI.
1657 if (DefIsReMat
&& ImpUse
)
1658 MI
->addOperand(MachineOperand::CreateReg(ImpUse
, false, true));
1660 // Create a new register interval for this spill / remat.
1661 LiveInterval
&nI
= getOrCreateInterval(NewVReg
);
1662 if (CreatedNewVReg
) {
1663 NewLIs
.push_back(&nI
);
1664 MBBVRegsMap
.insert(std::make_pair(MI
->getParent()->getNumber(), NewVReg
));
1666 vrm
.setIsSplitFromReg(NewVReg
, li
.reg
);
1670 if (CreatedNewVReg
) {
1671 LiveRange
LR(getLoadIndex(index
), getUseIndex(index
)+1,
1672 nI
.getNextValue(0, 0, false, VNInfoAllocator
));
1676 // Extend the split live interval to this def / use.
1677 unsigned End
= getUseIndex(index
)+1;
1678 LiveRange
LR(nI
.ranges
[nI
.ranges
.size()-1].end
, End
,
1679 nI
.getValNumInfo(nI
.getNumValNums()-1));
1685 LiveRange
LR(getDefIndex(index
), getStoreIndex(index
),
1686 nI
.getNextValue(0, 0, false, VNInfoAllocator
));
1691 DOUT
<< "\t\t\t\tAdded new interval: ";
1692 nI
.print(DOUT
, tri_
);
1697 bool LiveIntervals::anyKillInMBBAfterIdx(const LiveInterval
&li
,
1699 MachineBasicBlock
*MBB
, unsigned Idx
) const {
1700 unsigned End
= getMBBEndIdx(MBB
);
1701 for (unsigned j
= 0, ee
= VNI
->kills
.size(); j
!= ee
; ++j
) {
1702 if (VNI
->kills
[j
].isPHIKill
)
1705 unsigned KillIdx
= VNI
->kills
[j
].killIdx
;
1706 if (KillIdx
> Idx
&& KillIdx
< End
)
1712 /// RewriteInfo - Keep track of machine instrs that will be rewritten
1713 /// during spilling.
1715 struct RewriteInfo
{
1720 RewriteInfo(unsigned i
, MachineInstr
*mi
, bool u
, bool d
)
1721 : Index(i
), MI(mi
), HasUse(u
), HasDef(d
) {}
1724 struct RewriteInfoCompare
{
1725 bool operator()(const RewriteInfo
&LHS
, const RewriteInfo
&RHS
) const {
1726 return LHS
.Index
< RHS
.Index
;
1731 void LiveIntervals::
1732 rewriteInstructionsForSpills(const LiveInterval
&li
, bool TrySplit
,
1733 LiveInterval::Ranges::const_iterator
&I
,
1734 MachineInstr
*ReMatOrigDefMI
, MachineInstr
*ReMatDefMI
,
1735 unsigned Slot
, int LdSlot
,
1736 bool isLoad
, bool isLoadSS
, bool DefIsReMat
, bool CanDelete
,
1738 const TargetRegisterClass
* rc
,
1739 SmallVector
<int, 4> &ReMatIds
,
1740 const MachineLoopInfo
*loopInfo
,
1741 BitVector
&SpillMBBs
,
1742 DenseMap
<unsigned, std::vector
<SRInfo
> > &SpillIdxes
,
1743 BitVector
&RestoreMBBs
,
1744 DenseMap
<unsigned, std::vector
<SRInfo
> > &RestoreIdxes
,
1745 DenseMap
<unsigned,unsigned> &MBBVRegsMap
,
1746 std::vector
<LiveInterval
*> &NewLIs
) {
1747 bool AllCanFold
= true;
1748 unsigned NewVReg
= 0;
1749 unsigned start
= getBaseIndex(I
->start
);
1750 unsigned end
= getBaseIndex(I
->end
-1) + InstrSlots::NUM
;
1752 // First collect all the def / use in this live range that will be rewritten.
1753 // Make sure they are sorted according to instruction index.
1754 std::vector
<RewriteInfo
> RewriteMIs
;
1755 for (MachineRegisterInfo::reg_iterator ri
= mri_
->reg_begin(li
.reg
),
1756 re
= mri_
->reg_end(); ri
!= re
; ) {
1757 MachineInstr
*MI
= &*ri
;
1758 MachineOperand
&O
= ri
.getOperand();
1760 assert(!O
.isImplicit() && "Spilling register that's used as implicit use?");
1761 unsigned index
= getInstructionIndex(MI
);
1762 if (index
< start
|| index
>= end
)
1766 // Must be defined by an implicit def. It should not be spilled. Note,
1767 // this is for correctness reason. e.g.
1768 // 8 %reg1024<def> = IMPLICIT_DEF
1769 // 12 %reg1024<def> = INSERT_SUBREG %reg1024<kill>, %reg1025, 2
1770 // The live range [12, 14) are not part of the r1024 live interval since
1771 // it's defined by an implicit def. It will not conflicts with live
1772 // interval of r1025. Now suppose both registers are spilled, you can
1773 // easily see a situation where both registers are reloaded before
1774 // the INSERT_SUBREG and both target registers that would overlap.
1776 RewriteMIs
.push_back(RewriteInfo(index
, MI
, O
.isUse(), O
.isDef()));
1778 std::sort(RewriteMIs
.begin(), RewriteMIs
.end(), RewriteInfoCompare());
1780 unsigned ImpUse
= DefIsReMat
? getReMatImplicitUse(li
, ReMatDefMI
) : 0;
1781 // Now rewrite the defs and uses.
1782 for (unsigned i
= 0, e
= RewriteMIs
.size(); i
!= e
; ) {
1783 RewriteInfo
&rwi
= RewriteMIs
[i
];
1785 unsigned index
= rwi
.Index
;
1786 bool MIHasUse
= rwi
.HasUse
;
1787 bool MIHasDef
= rwi
.HasDef
;
1788 MachineInstr
*MI
= rwi
.MI
;
1789 // If MI def and/or use the same register multiple times, then there
1790 // are multiple entries.
1791 unsigned NumUses
= MIHasUse
;
1792 while (i
!= e
&& RewriteMIs
[i
].MI
== MI
) {
1793 assert(RewriteMIs
[i
].Index
== index
);
1794 bool isUse
= RewriteMIs
[i
].HasUse
;
1795 if (isUse
) ++NumUses
;
1797 MIHasDef
|= RewriteMIs
[i
].HasDef
;
1800 MachineBasicBlock
*MBB
= MI
->getParent();
1802 if (ImpUse
&& MI
!= ReMatDefMI
) {
1803 // Re-matting an instruction with virtual register use. Update the
1804 // register interval's spill weight to HUGE_VALF to prevent it from
1806 LiveInterval
&ImpLi
= getInterval(ImpUse
);
1807 ImpLi
.weight
= HUGE_VALF
;
1810 unsigned MBBId
= MBB
->getNumber();
1811 unsigned ThisVReg
= 0;
1813 DenseMap
<unsigned,unsigned>::iterator NVI
= MBBVRegsMap
.find(MBBId
);
1814 if (NVI
!= MBBVRegsMap
.end()) {
1815 ThisVReg
= NVI
->second
;
1822 // It's better to start a new interval to avoid artifically
1823 // extend the new interval.
1824 if (MIHasDef
&& !MIHasUse
) {
1825 MBBVRegsMap
.erase(MBB
->getNumber());
1831 bool IsNew
= ThisVReg
== 0;
1833 // This ends the previous live interval. If all of its def / use
1834 // can be folded, give it a low spill weight.
1835 if (NewVReg
&& TrySplit
&& AllCanFold
) {
1836 LiveInterval
&nI
= getOrCreateInterval(NewVReg
);
1843 bool HasDef
= false;
1844 bool HasUse
= false;
1845 bool CanFold
= rewriteInstructionForSpills(li
, I
->valno
, TrySplit
,
1846 index
, end
, MI
, ReMatOrigDefMI
, ReMatDefMI
,
1847 Slot
, LdSlot
, isLoad
, isLoadSS
, DefIsReMat
,
1848 CanDelete
, vrm
, rc
, ReMatIds
, loopInfo
, NewVReg
,
1849 ImpUse
, HasDef
, HasUse
, MBBVRegsMap
, NewLIs
);
1850 if (!HasDef
&& !HasUse
)
1853 AllCanFold
&= CanFold
;
1855 // Update weight of spill interval.
1856 LiveInterval
&nI
= getOrCreateInterval(NewVReg
);
1858 // The spill weight is now infinity as it cannot be spilled again.
1859 nI
.weight
= HUGE_VALF
;
1863 // Keep track of the last def and first use in each MBB.
1865 if (MI
!= ReMatOrigDefMI
|| !CanDelete
) {
1866 bool HasKill
= false;
1868 HasKill
= anyKillInMBBAfterIdx(li
, I
->valno
, MBB
, getDefIndex(index
));
1870 // If this is a two-address code, then this index starts a new VNInfo.
1871 const VNInfo
*VNI
= li
.findDefinedVNInfo(getDefIndex(index
));
1873 HasKill
= anyKillInMBBAfterIdx(li
, VNI
, MBB
, getDefIndex(index
));
1875 DenseMap
<unsigned, std::vector
<SRInfo
> >::iterator SII
=
1876 SpillIdxes
.find(MBBId
);
1878 if (SII
== SpillIdxes
.end()) {
1879 std::vector
<SRInfo
> S
;
1880 S
.push_back(SRInfo(index
, NewVReg
, true));
1881 SpillIdxes
.insert(std::make_pair(MBBId
, S
));
1882 } else if (SII
->second
.back().vreg
!= NewVReg
) {
1883 SII
->second
.push_back(SRInfo(index
, NewVReg
, true));
1884 } else if ((int)index
> SII
->second
.back().index
) {
1885 // If there is an earlier def and this is a two-address
1886 // instruction, then it's not possible to fold the store (which
1887 // would also fold the load).
1888 SRInfo
&Info
= SII
->second
.back();
1890 Info
.canFold
= !HasUse
;
1892 SpillMBBs
.set(MBBId
);
1893 } else if (SII
!= SpillIdxes
.end() &&
1894 SII
->second
.back().vreg
== NewVReg
&&
1895 (int)index
> SII
->second
.back().index
) {
1896 // There is an earlier def that's not killed (must be two-address).
1897 // The spill is no longer needed.
1898 SII
->second
.pop_back();
1899 if (SII
->second
.empty()) {
1900 SpillIdxes
.erase(MBBId
);
1901 SpillMBBs
.reset(MBBId
);
1908 DenseMap
<unsigned, std::vector
<SRInfo
> >::iterator SII
=
1909 SpillIdxes
.find(MBBId
);
1910 if (SII
!= SpillIdxes
.end() &&
1911 SII
->second
.back().vreg
== NewVReg
&&
1912 (int)index
> SII
->second
.back().index
)
1913 // Use(s) following the last def, it's not safe to fold the spill.
1914 SII
->second
.back().canFold
= false;
1915 DenseMap
<unsigned, std::vector
<SRInfo
> >::iterator RII
=
1916 RestoreIdxes
.find(MBBId
);
1917 if (RII
!= RestoreIdxes
.end() && RII
->second
.back().vreg
== NewVReg
)
1918 // If we are splitting live intervals, only fold if it's the first
1919 // use and there isn't another use later in the MBB.
1920 RII
->second
.back().canFold
= false;
1922 // Only need a reload if there isn't an earlier def / use.
1923 if (RII
== RestoreIdxes
.end()) {
1924 std::vector
<SRInfo
> Infos
;
1925 Infos
.push_back(SRInfo(index
, NewVReg
, true));
1926 RestoreIdxes
.insert(std::make_pair(MBBId
, Infos
));
1928 RII
->second
.push_back(SRInfo(index
, NewVReg
, true));
1930 RestoreMBBs
.set(MBBId
);
1934 // Update spill weight.
1935 unsigned loopDepth
= loopInfo
->getLoopDepth(MBB
);
1936 nI
.weight
+= getSpillWeight(HasDef
, HasUse
, loopDepth
);
1939 if (NewVReg
&& TrySplit
&& AllCanFold
) {
1940 // If all of its def / use can be folded, give it a low spill weight.
1941 LiveInterval
&nI
= getOrCreateInterval(NewVReg
);
1946 bool LiveIntervals::alsoFoldARestore(int Id
, int index
, unsigned vr
,
1947 BitVector
&RestoreMBBs
,
1948 DenseMap
<unsigned,std::vector
<SRInfo
> > &RestoreIdxes
) {
1949 if (!RestoreMBBs
[Id
])
1951 std::vector
<SRInfo
> &Restores
= RestoreIdxes
[Id
];
1952 for (unsigned i
= 0, e
= Restores
.size(); i
!= e
; ++i
)
1953 if (Restores
[i
].index
== index
&&
1954 Restores
[i
].vreg
== vr
&&
1955 Restores
[i
].canFold
)
1960 void LiveIntervals::eraseRestoreInfo(int Id
, int index
, unsigned vr
,
1961 BitVector
&RestoreMBBs
,
1962 DenseMap
<unsigned,std::vector
<SRInfo
> > &RestoreIdxes
) {
1963 if (!RestoreMBBs
[Id
])
1965 std::vector
<SRInfo
> &Restores
= RestoreIdxes
[Id
];
1966 for (unsigned i
= 0, e
= Restores
.size(); i
!= e
; ++i
)
1967 if (Restores
[i
].index
== index
&& Restores
[i
].vreg
)
1968 Restores
[i
].index
= -1;
1971 /// handleSpilledImpDefs - Remove IMPLICIT_DEF instructions which are being
1972 /// spilled and create empty intervals for their uses.
1974 LiveIntervals::handleSpilledImpDefs(const LiveInterval
&li
, VirtRegMap
&vrm
,
1975 const TargetRegisterClass
* rc
,
1976 std::vector
<LiveInterval
*> &NewLIs
) {
1977 for (MachineRegisterInfo::reg_iterator ri
= mri_
->reg_begin(li
.reg
),
1978 re
= mri_
->reg_end(); ri
!= re
; ) {
1979 MachineOperand
&O
= ri
.getOperand();
1980 MachineInstr
*MI
= &*ri
;
1983 assert(MI
->getOpcode() == TargetInstrInfo::IMPLICIT_DEF
&&
1984 "Register def was not rewritten?");
1985 RemoveMachineInstrFromMaps(MI
);
1986 vrm
.RemoveMachineInstrFromMaps(MI
);
1987 MI
->eraseFromParent();
1989 // This must be an use of an implicit_def so it's not part of the live
1990 // interval. Create a new empty live interval for it.
1991 // FIXME: Can we simply erase some of the instructions? e.g. Stores?
1992 unsigned NewVReg
= mri_
->createVirtualRegister(rc
);
1994 vrm
.setIsImplicitlyDefined(NewVReg
);
1995 NewLIs
.push_back(&getOrCreateInterval(NewVReg
));
1996 for (unsigned i
= 0, e
= MI
->getNumOperands(); i
!= e
; ++i
) {
1997 MachineOperand
&MO
= MI
->getOperand(i
);
1998 if (MO
.isReg() && MO
.getReg() == li
.reg
) {
2007 std::vector
<LiveInterval
*> LiveIntervals::
2008 addIntervalsForSpillsFast(const LiveInterval
&li
,
2009 const MachineLoopInfo
*loopInfo
,
2011 unsigned slot
= vrm
.assignVirt2StackSlot(li
.reg
);
2013 std::vector
<LiveInterval
*> added
;
2015 assert(li
.weight
!= HUGE_VALF
&&
2016 "attempt to spill already spilled interval!");
2018 DOUT
<< "\t\t\t\tadding intervals for spills for interval: ";
2022 const TargetRegisterClass
* rc
= mri_
->getRegClass(li
.reg
);
2024 MachineRegisterInfo::reg_iterator RI
= mri_
->reg_begin(li
.reg
);
2025 while (RI
!= mri_
->reg_end()) {
2026 MachineInstr
* MI
= &*RI
;
2028 SmallVector
<unsigned, 2> Indices
;
2029 bool HasUse
= false;
2030 bool HasDef
= false;
2032 for (unsigned i
= 0; i
!= MI
->getNumOperands(); ++i
) {
2033 MachineOperand
& mop
= MI
->getOperand(i
);
2034 if (!mop
.isReg() || mop
.getReg() != li
.reg
) continue;
2036 HasUse
|= MI
->getOperand(i
).isUse();
2037 HasDef
|= MI
->getOperand(i
).isDef();
2039 Indices
.push_back(i
);
2042 if (!tryFoldMemoryOperand(MI
, vrm
, NULL
, getInstructionIndex(MI
),
2043 Indices
, true, slot
, li
.reg
)) {
2044 unsigned NewVReg
= mri_
->createVirtualRegister(rc
);
2046 vrm
.assignVirt2StackSlot(NewVReg
, slot
);
2048 // create a new register for this spill
2049 LiveInterval
&nI
= getOrCreateInterval(NewVReg
);
2051 // the spill weight is now infinity as it
2052 // cannot be spilled again
2053 nI
.weight
= HUGE_VALF
;
2055 // Rewrite register operands to use the new vreg.
2056 for (SmallVectorImpl
<unsigned>::iterator I
= Indices
.begin(),
2057 E
= Indices
.end(); I
!= E
; ++I
) {
2058 MI
->getOperand(*I
).setReg(NewVReg
);
2060 if (MI
->getOperand(*I
).isUse())
2061 MI
->getOperand(*I
).setIsKill(true);
2064 // Fill in the new live interval.
2065 unsigned index
= getInstructionIndex(MI
);
2067 LiveRange
LR(getLoadIndex(index
), getUseIndex(index
),
2068 nI
.getNextValue(0, 0, false, getVNInfoAllocator()));
2071 vrm
.addRestorePoint(NewVReg
, MI
);
2074 LiveRange
LR(getDefIndex(index
), getStoreIndex(index
),
2075 nI
.getNextValue(0, 0, false, getVNInfoAllocator()));
2078 vrm
.addSpillPoint(NewVReg
, true, MI
);
2081 added
.push_back(&nI
);
2083 DOUT
<< "\t\t\t\tadded new interval: ";
2089 RI
= mri_
->reg_begin(li
.reg
);
2095 std::vector
<LiveInterval
*> LiveIntervals::
2096 addIntervalsForSpills(const LiveInterval
&li
,
2097 SmallVectorImpl
<LiveInterval
*> &SpillIs
,
2098 const MachineLoopInfo
*loopInfo
, VirtRegMap
&vrm
) {
2100 if (EnableFastSpilling
)
2101 return addIntervalsForSpillsFast(li
, loopInfo
, vrm
);
2103 assert(li
.weight
!= HUGE_VALF
&&
2104 "attempt to spill already spilled interval!");
2106 DOUT
<< "\t\t\t\tadding intervals for spills for interval: ";
2107 li
.print(DOUT
, tri_
);
2110 // Each bit specify whether a spill is required in the MBB.
2111 BitVector
SpillMBBs(mf_
->getNumBlockIDs());
2112 DenseMap
<unsigned, std::vector
<SRInfo
> > SpillIdxes
;
2113 BitVector
RestoreMBBs(mf_
->getNumBlockIDs());
2114 DenseMap
<unsigned, std::vector
<SRInfo
> > RestoreIdxes
;
2115 DenseMap
<unsigned,unsigned> MBBVRegsMap
;
2116 std::vector
<LiveInterval
*> NewLIs
;
2117 const TargetRegisterClass
* rc
= mri_
->getRegClass(li
.reg
);
2119 unsigned NumValNums
= li
.getNumValNums();
2120 SmallVector
<MachineInstr
*, 4> ReMatDefs
;
2121 ReMatDefs
.resize(NumValNums
, NULL
);
2122 SmallVector
<MachineInstr
*, 4> ReMatOrigDefs
;
2123 ReMatOrigDefs
.resize(NumValNums
, NULL
);
2124 SmallVector
<int, 4> ReMatIds
;
2125 ReMatIds
.resize(NumValNums
, VirtRegMap::MAX_STACK_SLOT
);
2126 BitVector
ReMatDelete(NumValNums
);
2127 unsigned Slot
= VirtRegMap::MAX_STACK_SLOT
;
2129 // Spilling a split live interval. It cannot be split any further. Also,
2130 // it's also guaranteed to be a single val# / range interval.
2131 if (vrm
.getPreSplitReg(li
.reg
)) {
2132 vrm
.setIsSplitFromReg(li
.reg
, 0);
2133 // Unset the split kill marker on the last use.
2134 unsigned KillIdx
= vrm
.getKillPoint(li
.reg
);
2136 MachineInstr
*KillMI
= getInstructionFromIndex(KillIdx
);
2137 assert(KillMI
&& "Last use disappeared?");
2138 int KillOp
= KillMI
->findRegisterUseOperandIdx(li
.reg
, true);
2139 assert(KillOp
!= -1 && "Last use disappeared?");
2140 KillMI
->getOperand(KillOp
).setIsKill(false);
2142 vrm
.removeKillPoint(li
.reg
);
2143 bool DefIsReMat
= vrm
.isReMaterialized(li
.reg
);
2144 Slot
= vrm
.getStackSlot(li
.reg
);
2145 assert(Slot
!= VirtRegMap::MAX_STACK_SLOT
);
2146 MachineInstr
*ReMatDefMI
= DefIsReMat
?
2147 vrm
.getReMaterializedMI(li
.reg
) : NULL
;
2149 bool isLoadSS
= DefIsReMat
&& tii_
->isLoadFromStackSlot(ReMatDefMI
, LdSlot
);
2150 bool isLoad
= isLoadSS
||
2151 (DefIsReMat
&& (ReMatDefMI
->getDesc().canFoldAsLoad()));
2152 bool IsFirstRange
= true;
2153 for (LiveInterval::Ranges::const_iterator
2154 I
= li
.ranges
.begin(), E
= li
.ranges
.end(); I
!= E
; ++I
) {
2155 // If this is a split live interval with multiple ranges, it means there
2156 // are two-address instructions that re-defined the value. Only the
2157 // first def can be rematerialized!
2159 // Note ReMatOrigDefMI has already been deleted.
2160 rewriteInstructionsForSpills(li
, false, I
, NULL
, ReMatDefMI
,
2161 Slot
, LdSlot
, isLoad
, isLoadSS
, DefIsReMat
,
2162 false, vrm
, rc
, ReMatIds
, loopInfo
,
2163 SpillMBBs
, SpillIdxes
, RestoreMBBs
, RestoreIdxes
,
2164 MBBVRegsMap
, NewLIs
);
2166 rewriteInstructionsForSpills(li
, false, I
, NULL
, 0,
2167 Slot
, 0, false, false, false,
2168 false, vrm
, rc
, ReMatIds
, loopInfo
,
2169 SpillMBBs
, SpillIdxes
, RestoreMBBs
, RestoreIdxes
,
2170 MBBVRegsMap
, NewLIs
);
2172 IsFirstRange
= false;
2175 handleSpilledImpDefs(li
, vrm
, rc
, NewLIs
);
2179 bool TrySplit
= SplitAtBB
&& !intervalIsInOneMBB(li
);
2180 if (SplitLimit
!= -1 && (int)numSplits
>= SplitLimit
)
2184 bool NeedStackSlot
= false;
2185 for (LiveInterval::const_vni_iterator i
= li
.vni_begin(), e
= li
.vni_end();
2187 const VNInfo
*VNI
= *i
;
2188 unsigned VN
= VNI
->id
;
2189 if (VNI
->isUnused())
2190 continue; // Dead val#.
2191 // Is the def for the val# rematerializable?
2192 MachineInstr
*ReMatDefMI
= VNI
->isDefAccurate()
2193 ? getInstructionFromIndex(VNI
->def
) : 0;
2195 if (ReMatDefMI
&& isReMaterializable(li
, VNI
, ReMatDefMI
, SpillIs
, dummy
)) {
2196 // Remember how to remat the def of this val#.
2197 ReMatOrigDefs
[VN
] = ReMatDefMI
;
2198 // Original def may be modified so we have to make a copy here.
2199 MachineInstr
*Clone
= mf_
->CloneMachineInstr(ReMatDefMI
);
2200 ClonedMIs
.push_back(Clone
);
2201 ReMatDefs
[VN
] = Clone
;
2203 bool CanDelete
= true;
2204 if (VNI
->hasPHIKill()) {
2205 // A kill is a phi node, not all of its uses can be rematerialized.
2206 // It must not be deleted.
2208 // Need a stack slot if there is any live range where uses cannot be
2210 NeedStackSlot
= true;
2213 ReMatDelete
.set(VN
);
2215 // Need a stack slot if there is any live range where uses cannot be
2217 NeedStackSlot
= true;
2221 // One stack slot per live interval.
2222 if (NeedStackSlot
&& vrm
.getPreSplitReg(li
.reg
) == 0) {
2223 if (vrm
.getStackSlot(li
.reg
) == VirtRegMap::NO_STACK_SLOT
)
2224 Slot
= vrm
.assignVirt2StackSlot(li
.reg
);
2226 // This case only occurs when the prealloc splitter has already assigned
2227 // a stack slot to this vreg.
2229 Slot
= vrm
.getStackSlot(li
.reg
);
2232 // Create new intervals and rewrite defs and uses.
2233 for (LiveInterval::Ranges::const_iterator
2234 I
= li
.ranges
.begin(), E
= li
.ranges
.end(); I
!= E
; ++I
) {
2235 MachineInstr
*ReMatDefMI
= ReMatDefs
[I
->valno
->id
];
2236 MachineInstr
*ReMatOrigDefMI
= ReMatOrigDefs
[I
->valno
->id
];
2237 bool DefIsReMat
= ReMatDefMI
!= NULL
;
2238 bool CanDelete
= ReMatDelete
[I
->valno
->id
];
2240 bool isLoadSS
= DefIsReMat
&& tii_
->isLoadFromStackSlot(ReMatDefMI
, LdSlot
);
2241 bool isLoad
= isLoadSS
||
2242 (DefIsReMat
&& ReMatDefMI
->getDesc().canFoldAsLoad());
2243 rewriteInstructionsForSpills(li
, TrySplit
, I
, ReMatOrigDefMI
, ReMatDefMI
,
2244 Slot
, LdSlot
, isLoad
, isLoadSS
, DefIsReMat
,
2245 CanDelete
, vrm
, rc
, ReMatIds
, loopInfo
,
2246 SpillMBBs
, SpillIdxes
, RestoreMBBs
, RestoreIdxes
,
2247 MBBVRegsMap
, NewLIs
);
2250 // Insert spills / restores if we are splitting.
2252 handleSpilledImpDefs(li
, vrm
, rc
, NewLIs
);
2256 SmallPtrSet
<LiveInterval
*, 4> AddedKill
;
2257 SmallVector
<unsigned, 2> Ops
;
2258 if (NeedStackSlot
) {
2259 int Id
= SpillMBBs
.find_first();
2261 std::vector
<SRInfo
> &spills
= SpillIdxes
[Id
];
2262 for (unsigned i
= 0, e
= spills
.size(); i
!= e
; ++i
) {
2263 int index
= spills
[i
].index
;
2264 unsigned VReg
= spills
[i
].vreg
;
2265 LiveInterval
&nI
= getOrCreateInterval(VReg
);
2266 bool isReMat
= vrm
.isReMaterialized(VReg
);
2267 MachineInstr
*MI
= getInstructionFromIndex(index
);
2268 bool CanFold
= false;
2269 bool FoundUse
= false;
2271 if (spills
[i
].canFold
) {
2273 for (unsigned j
= 0, ee
= MI
->getNumOperands(); j
!= ee
; ++j
) {
2274 MachineOperand
&MO
= MI
->getOperand(j
);
2275 if (!MO
.isReg() || MO
.getReg() != VReg
)
2282 (!FoundUse
&& !alsoFoldARestore(Id
, index
, VReg
,
2283 RestoreMBBs
, RestoreIdxes
))) {
2284 // MI has two-address uses of the same register. If the use
2285 // isn't the first and only use in the BB, then we can't fold
2286 // it. FIXME: Move this to rewriteInstructionsForSpills.
2293 // Fold the store into the def if possible.
2294 bool Folded
= false;
2295 if (CanFold
&& !Ops
.empty()) {
2296 if (tryFoldMemoryOperand(MI
, vrm
, NULL
, index
, Ops
, true, Slot
,VReg
)){
2299 // Also folded uses, do not issue a load.
2300 eraseRestoreInfo(Id
, index
, VReg
, RestoreMBBs
, RestoreIdxes
);
2301 nI
.removeRange(getLoadIndex(index
), getUseIndex(index
)+1);
2303 nI
.removeRange(getDefIndex(index
), getStoreIndex(index
));
2307 // Otherwise tell the spiller to issue a spill.
2309 LiveRange
*LR
= &nI
.ranges
[nI
.ranges
.size()-1];
2310 bool isKill
= LR
->end
== getStoreIndex(index
);
2311 if (!MI
->registerDefIsDead(nI
.reg
))
2312 // No need to spill a dead def.
2313 vrm
.addSpillPoint(VReg
, isKill
, MI
);
2315 AddedKill
.insert(&nI
);
2318 Id
= SpillMBBs
.find_next(Id
);
2322 int Id
= RestoreMBBs
.find_first();
2324 std::vector
<SRInfo
> &restores
= RestoreIdxes
[Id
];
2325 for (unsigned i
= 0, e
= restores
.size(); i
!= e
; ++i
) {
2326 int index
= restores
[i
].index
;
2329 unsigned VReg
= restores
[i
].vreg
;
2330 LiveInterval
&nI
= getOrCreateInterval(VReg
);
2331 bool isReMat
= vrm
.isReMaterialized(VReg
);
2332 MachineInstr
*MI
= getInstructionFromIndex(index
);
2333 bool CanFold
= false;
2335 if (restores
[i
].canFold
) {
2337 for (unsigned j
= 0, ee
= MI
->getNumOperands(); j
!= ee
; ++j
) {
2338 MachineOperand
&MO
= MI
->getOperand(j
);
2339 if (!MO
.isReg() || MO
.getReg() != VReg
)
2343 // If this restore were to be folded, it would have been folded
2352 // Fold the load into the use if possible.
2353 bool Folded
= false;
2354 if (CanFold
&& !Ops
.empty()) {
2356 Folded
= tryFoldMemoryOperand(MI
, vrm
, NULL
,index
,Ops
,true,Slot
,VReg
);
2358 MachineInstr
*ReMatDefMI
= vrm
.getReMaterializedMI(VReg
);
2360 bool isLoadSS
= tii_
->isLoadFromStackSlot(ReMatDefMI
, LdSlot
);
2361 // If the rematerializable def is a load, also try to fold it.
2362 if (isLoadSS
|| ReMatDefMI
->getDesc().canFoldAsLoad())
2363 Folded
= tryFoldMemoryOperand(MI
, vrm
, ReMatDefMI
, index
,
2364 Ops
, isLoadSS
, LdSlot
, VReg
);
2366 unsigned ImpUse
= getReMatImplicitUse(li
, ReMatDefMI
);
2368 // Re-matting an instruction with virtual register use. Add the
2369 // register as an implicit use on the use MI and update the register
2370 // interval's spill weight to HUGE_VALF to prevent it from being
2372 LiveInterval
&ImpLi
= getInterval(ImpUse
);
2373 ImpLi
.weight
= HUGE_VALF
;
2374 MI
->addOperand(MachineOperand::CreateReg(ImpUse
, false, true));
2379 // If folding is not possible / failed, then tell the spiller to issue a
2380 // load / rematerialization for us.
2382 nI
.removeRange(getLoadIndex(index
), getUseIndex(index
)+1);
2384 vrm
.addRestorePoint(VReg
, MI
);
2386 Id
= RestoreMBBs
.find_next(Id
);
2389 // Finalize intervals: add kills, finalize spill weights, and filter out
2391 std::vector
<LiveInterval
*> RetNewLIs
;
2392 for (unsigned i
= 0, e
= NewLIs
.size(); i
!= e
; ++i
) {
2393 LiveInterval
*LI
= NewLIs
[i
];
2395 LI
->weight
/= InstrSlots::NUM
* getApproximateInstructionCount(*LI
);
2396 if (!AddedKill
.count(LI
)) {
2397 LiveRange
*LR
= &LI
->ranges
[LI
->ranges
.size()-1];
2398 unsigned LastUseIdx
= getBaseIndex(LR
->end
);
2399 MachineInstr
*LastUse
= getInstructionFromIndex(LastUseIdx
);
2400 int UseIdx
= LastUse
->findRegisterUseOperandIdx(LI
->reg
, false);
2401 assert(UseIdx
!= -1);
2402 if (!LastUse
->isRegTiedToDefOperand(UseIdx
)) {
2403 LastUse
->getOperand(UseIdx
).setIsKill();
2404 vrm
.addKillPoint(LI
->reg
, LastUseIdx
);
2407 RetNewLIs
.push_back(LI
);
2411 handleSpilledImpDefs(li
, vrm
, rc
, RetNewLIs
);
2415 /// hasAllocatableSuperReg - Return true if the specified physical register has
2416 /// any super register that's allocatable.
2417 bool LiveIntervals::hasAllocatableSuperReg(unsigned Reg
) const {
2418 for (const unsigned* AS
= tri_
->getSuperRegisters(Reg
); *AS
; ++AS
)
2419 if (allocatableRegs_
[*AS
] && hasInterval(*AS
))
2424 /// getRepresentativeReg - Find the largest super register of the specified
2425 /// physical register.
2426 unsigned LiveIntervals::getRepresentativeReg(unsigned Reg
) const {
2427 // Find the largest super-register that is allocatable.
2428 unsigned BestReg
= Reg
;
2429 for (const unsigned* AS
= tri_
->getSuperRegisters(Reg
); *AS
; ++AS
) {
2430 unsigned SuperReg
= *AS
;
2431 if (!hasAllocatableSuperReg(SuperReg
) && hasInterval(SuperReg
)) {
2439 /// getNumConflictsWithPhysReg - Return the number of uses and defs of the
2440 /// specified interval that conflicts with the specified physical register.
2441 unsigned LiveIntervals::getNumConflictsWithPhysReg(const LiveInterval
&li
,
2442 unsigned PhysReg
) const {
2443 unsigned NumConflicts
= 0;
2444 const LiveInterval
&pli
= getInterval(getRepresentativeReg(PhysReg
));
2445 for (MachineRegisterInfo::reg_iterator I
= mri_
->reg_begin(li
.reg
),
2446 E
= mri_
->reg_end(); I
!= E
; ++I
) {
2447 MachineOperand
&O
= I
.getOperand();
2448 MachineInstr
*MI
= O
.getParent();
2449 unsigned Index
= getInstructionIndex(MI
);
2450 if (pli
.liveAt(Index
))
2453 return NumConflicts
;
2456 /// spillPhysRegAroundRegDefsUses - Spill the specified physical register
2457 /// around all defs and uses of the specified interval. Return true if it
2458 /// was able to cut its interval.
2459 bool LiveIntervals::spillPhysRegAroundRegDefsUses(const LiveInterval
&li
,
2460 unsigned PhysReg
, VirtRegMap
&vrm
) {
2461 unsigned SpillReg
= getRepresentativeReg(PhysReg
);
2463 for (const unsigned *AS
= tri_
->getAliasSet(PhysReg
); *AS
; ++AS
)
2464 // If there are registers which alias PhysReg, but which are not a
2465 // sub-register of the chosen representative super register. Assert
2466 // since we can't handle it yet.
2467 assert(*AS
== SpillReg
|| !allocatableRegs_
[*AS
] || !hasInterval(*AS
) ||
2468 tri_
->isSuperRegister(*AS
, SpillReg
));
2471 LiveInterval
&pli
= getInterval(SpillReg
);
2472 SmallPtrSet
<MachineInstr
*, 8> SeenMIs
;
2473 for (MachineRegisterInfo::reg_iterator I
= mri_
->reg_begin(li
.reg
),
2474 E
= mri_
->reg_end(); I
!= E
; ++I
) {
2475 MachineOperand
&O
= I
.getOperand();
2476 MachineInstr
*MI
= O
.getParent();
2477 if (SeenMIs
.count(MI
))
2480 unsigned Index
= getInstructionIndex(MI
);
2481 if (pli
.liveAt(Index
)) {
2482 vrm
.addEmergencySpill(SpillReg
, MI
);
2483 unsigned StartIdx
= getLoadIndex(Index
);
2484 unsigned EndIdx
= getStoreIndex(Index
)+1;
2485 if (pli
.isInOneLiveRange(StartIdx
, EndIdx
)) {
2486 pli
.removeRange(StartIdx
, EndIdx
);
2490 raw_string_ostream
Msg(msg
);
2491 Msg
<< "Ran out of registers during register allocation!";
2492 if (MI
->getOpcode() == TargetInstrInfo::INLINEASM
) {
2493 Msg
<< "\nPlease check your inline asm statement for invalid "
2494 << "constraints:\n";
2495 MI
->print(Msg
, tm_
);
2497 llvm_report_error(Msg
.str());
2499 for (const unsigned* AS
= tri_
->getSubRegisters(SpillReg
); *AS
; ++AS
) {
2500 if (!hasInterval(*AS
))
2502 LiveInterval
&spli
= getInterval(*AS
);
2503 if (spli
.liveAt(Index
))
2504 spli
.removeRange(getLoadIndex(Index
), getStoreIndex(Index
)+1);
2511 LiveRange
LiveIntervals::addLiveRangeToEndOfBlock(unsigned reg
,
2512 MachineInstr
* startInst
) {
2513 LiveInterval
& Interval
= getOrCreateInterval(reg
);
2514 VNInfo
* VN
= Interval
.getNextValue(
2515 getInstructionIndex(startInst
) + InstrSlots::DEF
,
2516 startInst
, true, getVNInfoAllocator());
2517 VN
->setHasPHIKill(true);
2518 VN
->kills
.push_back(
2519 VNInfo::KillInfo(terminatorGaps
[startInst
->getParent()], true));
2520 LiveRange
LR(getInstructionIndex(startInst
) + InstrSlots::DEF
,
2521 getMBBEndIdx(startInst
->getParent()) + 1, VN
);
2522 Interval
.addRange(LR
);
2528 IntervalPrefixPrinter::operator()(raw_ostream
&out
,
2529 const MachineInstr
&instr
) const {
2530 return out
<< liinfo
.getInstructionIndex(&instr
);