1 //===-- LiveIntervalAnalysis.cpp - Live Interval Analysis -----------------===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This file implements the LiveInterval analysis pass which is used
11 // by the Linear Scan Register allocator. This pass linearizes the
12 // basic blocks of the function in DFS order and uses the
13 // LiveVariables pass to conservatively compute live intervals for
14 // each virtual and physical register.
16 //===----------------------------------------------------------------------===//
18 #define DEBUG_TYPE "liveintervals"
19 #include "llvm/CodeGen/LiveIntervalAnalysis.h"
20 #include "VirtRegMap.h"
21 #include "llvm/Value.h"
22 #include "llvm/Analysis/AliasAnalysis.h"
23 #include "llvm/CodeGen/LiveVariables.h"
24 #include "llvm/CodeGen/MachineFrameInfo.h"
25 #include "llvm/CodeGen/MachineInstr.h"
26 #include "llvm/CodeGen/MachineInstrBuilder.h"
27 #include "llvm/CodeGen/MachineLoopInfo.h"
28 #include "llvm/CodeGen/MachineRegisterInfo.h"
29 #include "llvm/CodeGen/Passes.h"
30 #include "llvm/CodeGen/PseudoSourceValue.h"
31 #include "llvm/Target/TargetRegisterInfo.h"
32 #include "llvm/Target/TargetInstrInfo.h"
33 #include "llvm/Target/TargetMachine.h"
34 #include "llvm/Target/TargetOptions.h"
35 #include "llvm/Support/CommandLine.h"
36 #include "llvm/Support/Debug.h"
37 #include "llvm/Support/ErrorHandling.h"
38 #include "llvm/Support/raw_ostream.h"
39 #include "llvm/ADT/DepthFirstIterator.h"
40 #include "llvm/ADT/SmallSet.h"
41 #include "llvm/ADT/Statistic.h"
42 #include "llvm/ADT/STLExtras.h"
48 // Hidden options for help debugging.
49 static cl::opt
<bool> DisableReMat("disable-rematerialization",
50 cl::init(false), cl::Hidden
);
52 static cl::opt
<bool> EnableAggressiveRemat("aggressive-remat", cl::Hidden
);
54 static cl::opt
<bool> EnableFastSpilling("fast-spill",
55 cl::init(false), cl::Hidden
);
57 static cl::opt
<bool> EarlyCoalescing("early-coalescing", cl::init(false));
59 static cl::opt
<int> CoalescingLimit("early-coalescing-limit",
60 cl::init(-1), cl::Hidden
);
62 STATISTIC(numIntervals
, "Number of original intervals");
63 STATISTIC(numFolds
, "Number of loads/stores folded into instructions");
64 STATISTIC(numSplits
, "Number of intervals split");
65 STATISTIC(numCoalescing
, "Number of early coalescing performed");
67 char LiveIntervals::ID
= 0;
68 static RegisterPass
<LiveIntervals
> X("liveintervals", "Live Interval Analysis");
70 void LiveIntervals::getAnalysisUsage(AnalysisUsage
&AU
) const {
72 AU
.addRequired
<AliasAnalysis
>();
73 AU
.addPreserved
<AliasAnalysis
>();
74 AU
.addPreserved
<LiveVariables
>();
75 AU
.addRequired
<LiveVariables
>();
76 AU
.addPreservedID(MachineLoopInfoID
);
77 AU
.addPreservedID(MachineDominatorsID
);
80 AU
.addPreservedID(PHIEliminationID
);
81 AU
.addRequiredID(PHIEliminationID
);
84 AU
.addRequiredID(TwoAddressInstructionPassID
);
85 MachineFunctionPass::getAnalysisUsage(AU
);
88 void LiveIntervals::releaseMemory() {
89 // Free the live intervals themselves.
90 for (DenseMap
<unsigned, LiveInterval
*>::iterator I
= r2iMap_
.begin(),
91 E
= r2iMap_
.end(); I
!= E
; ++I
)
99 terminatorGaps
.clear();
100 phiJoinCopies
.clear();
102 // Release VNInfo memroy regions after all VNInfo objects are dtor'd.
103 VNInfoAllocator
.Reset();
104 while (!CloneMIs
.empty()) {
105 MachineInstr
*MI
= CloneMIs
.back();
107 mf_
->DeleteMachineInstr(MI
);
111 static bool CanTurnIntoImplicitDef(MachineInstr
*MI
, unsigned Reg
,
112 const TargetInstrInfo
*tii_
) {
113 unsigned SrcReg
, DstReg
, SrcSubReg
, DstSubReg
;
114 if (tii_
->isMoveInstr(*MI
, SrcReg
, DstReg
, SrcSubReg
, DstSubReg
) &&
118 if ((MI
->getOpcode() == TargetInstrInfo::INSERT_SUBREG
||
119 MI
->getOpcode() == TargetInstrInfo::SUBREG_TO_REG
) &&
120 MI
->getOperand(2).getReg() == Reg
)
122 if (MI
->getOpcode() == TargetInstrInfo::EXTRACT_SUBREG
&&
123 MI
->getOperand(1).getReg() == Reg
)
128 /// processImplicitDefs - Process IMPLICIT_DEF instructions and make sure
129 /// there is one implicit_def for each use. Add isUndef marker to
130 /// implicit_def defs and their uses.
131 void LiveIntervals::processImplicitDefs() {
132 SmallSet
<unsigned, 8> ImpDefRegs
;
133 SmallVector
<MachineInstr
*, 8> ImpDefMIs
;
134 MachineBasicBlock
*Entry
= mf_
->begin();
135 SmallPtrSet
<MachineBasicBlock
*,16> Visited
;
136 for (df_ext_iterator
<MachineBasicBlock
*, SmallPtrSet
<MachineBasicBlock
*,16> >
137 DFI
= df_ext_begin(Entry
, Visited
), E
= df_ext_end(Entry
, Visited
);
139 MachineBasicBlock
*MBB
= *DFI
;
140 for (MachineBasicBlock::iterator I
= MBB
->begin(), E
= MBB
->end();
142 MachineInstr
*MI
= &*I
;
144 if (MI
->getOpcode() == TargetInstrInfo::IMPLICIT_DEF
) {
145 unsigned Reg
= MI
->getOperand(0).getReg();
146 ImpDefRegs
.insert(Reg
);
147 ImpDefMIs
.push_back(MI
);
151 bool ChangedToImpDef
= false;
152 for (unsigned i
= 0, e
= MI
->getNumOperands(); i
!= e
; ++i
) {
153 MachineOperand
& MO
= MI
->getOperand(i
);
154 if (!MO
.isReg() || !MO
.isUse() || MO
.isUndef())
156 unsigned Reg
= MO
.getReg();
159 if (!ImpDefRegs
.count(Reg
))
161 // Use is a copy, just turn it into an implicit_def.
162 if (CanTurnIntoImplicitDef(MI
, Reg
, tii_
)) {
163 bool isKill
= MO
.isKill();
164 MI
->setDesc(tii_
->get(TargetInstrInfo::IMPLICIT_DEF
));
165 for (int j
= MI
->getNumOperands() - 1, ee
= 0; j
> ee
; --j
)
166 MI
->RemoveOperand(j
);
168 ImpDefRegs
.erase(Reg
);
169 ChangedToImpDef
= true;
174 if (MO
.isKill() || MI
->isRegTiedToDefOperand(i
)) {
175 // Make sure other uses of
176 for (unsigned j
= i
+1; j
!= e
; ++j
) {
177 MachineOperand
&MOJ
= MI
->getOperand(j
);
178 if (MOJ
.isReg() && MOJ
.isUse() && MOJ
.getReg() == Reg
)
181 ImpDefRegs
.erase(Reg
);
185 if (ChangedToImpDef
) {
186 // Backtrack to process this new implicit_def.
189 for (unsigned i
= 0; i
!= MI
->getNumOperands(); ++i
) {
190 MachineOperand
& MO
= MI
->getOperand(i
);
191 if (!MO
.isReg() || !MO
.isDef())
193 ImpDefRegs
.erase(MO
.getReg());
198 // Any outstanding liveout implicit_def's?
199 for (unsigned i
= 0, e
= ImpDefMIs
.size(); i
!= e
; ++i
) {
200 MachineInstr
*MI
= ImpDefMIs
[i
];
201 unsigned Reg
= MI
->getOperand(0).getReg();
202 if (TargetRegisterInfo::isPhysicalRegister(Reg
) ||
203 !ImpDefRegs
.count(Reg
)) {
204 // Delete all "local" implicit_def's. That include those which define
205 // physical registers since they cannot be liveout.
206 MI
->eraseFromParent();
210 // If there are multiple defs of the same register and at least one
211 // is not an implicit_def, do not insert implicit_def's before the
214 for (MachineRegisterInfo::def_iterator DI
= mri_
->def_begin(Reg
),
215 DE
= mri_
->def_end(); DI
!= DE
; ++DI
) {
216 if (DI
->getOpcode() != TargetInstrInfo::IMPLICIT_DEF
) {
224 // The only implicit_def which we want to keep are those that are live
226 MI
->eraseFromParent();
228 for (MachineRegisterInfo::use_iterator UI
= mri_
->use_begin(Reg
),
229 UE
= mri_
->use_end(); UI
!= UE
; ) {
230 MachineOperand
&RMO
= UI
.getOperand();
231 MachineInstr
*RMI
= &*UI
;
233 MachineBasicBlock
*RMBB
= RMI
->getParent();
237 // Turn a copy use into an implicit_def.
238 unsigned SrcReg
, DstReg
, SrcSubReg
, DstSubReg
;
239 if (tii_
->isMoveInstr(*RMI
, SrcReg
, DstReg
, SrcSubReg
, DstSubReg
) &&
241 RMI
->setDesc(tii_
->get(TargetInstrInfo::IMPLICIT_DEF
));
242 for (int j
= RMI
->getNumOperands() - 1, ee
= 0; j
> ee
; --j
)
243 RMI
->RemoveOperand(j
);
247 const TargetRegisterClass
* RC
= mri_
->getRegClass(Reg
);
248 unsigned NewVReg
= mri_
->createVirtualRegister(RC
);
260 void LiveIntervals::computeNumbering() {
261 Index2MiMap OldI2MI
= i2miMap_
;
262 std::vector
<IdxMBBPair
> OldI2MBB
= Idx2MBBMap
;
268 terminatorGaps
.clear();
269 phiJoinCopies
.clear();
273 // Number MachineInstrs and MachineBasicBlocks.
274 // Initialize MBB indexes to a sentinal.
275 MBB2IdxMap
.resize(mf_
->getNumBlockIDs(),
276 std::make_pair(MachineInstrIndex(),MachineInstrIndex()));
278 MachineInstrIndex MIIndex
;
279 for (MachineFunction::iterator MBB
= mf_
->begin(), E
= mf_
->end();
281 MachineInstrIndex StartIdx
= MIIndex
;
283 // Insert an empty slot at the beginning of each block.
284 MIIndex
= getNextIndex(MIIndex
);
285 i2miMap_
.push_back(0);
287 for (MachineBasicBlock::iterator I
= MBB
->begin(), E
= MBB
->end();
290 if (I
== MBB
->getFirstTerminator()) {
291 // Leave a gap for before terminators, this is where we will point
293 MachineInstrIndex
tGap(true, MIIndex
);
295 terminatorGaps
.insert(std::make_pair(&*MBB
, tGap
)).second
;
297 "Multiple 'first' terminators encountered during numbering.");
298 inserted
= inserted
; // Avoid compiler warning if assertions turned off.
299 i2miMap_
.push_back(0);
301 MIIndex
= getNextIndex(MIIndex
);
304 bool inserted
= mi2iMap_
.insert(std::make_pair(I
, MIIndex
)).second
;
305 assert(inserted
&& "multiple MachineInstr -> index mappings");
307 i2miMap_
.push_back(I
);
308 MIIndex
= getNextIndex(MIIndex
);
311 // Insert max(1, numdefs) empty slots after every instruction.
312 unsigned Slots
= I
->getDesc().getNumDefs();
316 MIIndex
= getNextIndex(MIIndex
);
317 i2miMap_
.push_back(0);
322 if (MBB
->getFirstTerminator() == MBB
->end()) {
323 // Leave a gap for before terminators, this is where we will point
325 MachineInstrIndex
tGap(true, MIIndex
);
327 terminatorGaps
.insert(std::make_pair(&*MBB
, tGap
)).second
;
329 "Multiple 'first' terminators encountered during numbering.");
330 inserted
= inserted
; // Avoid compiler warning if assertions turned off.
331 i2miMap_
.push_back(0);
333 MIIndex
= getNextIndex(MIIndex
);
336 // Set the MBB2IdxMap entry for this MBB.
337 MBB2IdxMap
[MBB
->getNumber()] = std::make_pair(StartIdx
, getPrevSlot(MIIndex
));
338 Idx2MBBMap
.push_back(std::make_pair(StartIdx
, MBB
));
341 std::sort(Idx2MBBMap
.begin(), Idx2MBBMap
.end(), Idx2MBBCompare());
343 if (!OldI2MI
.empty())
344 for (iterator OI
= begin(), OE
= end(); OI
!= OE
; ++OI
) {
345 for (LiveInterval::iterator LI
= OI
->second
->begin(),
346 LE
= OI
->second
->end(); LI
!= LE
; ++LI
) {
348 // Remap the start index of the live range to the corresponding new
349 // number, or our best guess at what it _should_ correspond to if the
350 // original instruction has been erased. This is either the following
351 // instruction or its predecessor.
352 unsigned index
= LI
->start
.getVecIndex();
353 MachineInstrIndex::Slot offset
= LI
->start
.getSlot();
354 if (LI
->start
.isLoad()) {
355 std::vector
<IdxMBBPair
>::const_iterator I
=
356 std::lower_bound(OldI2MBB
.begin(), OldI2MBB
.end(), LI
->start
);
357 // Take the pair containing the index
358 std::vector
<IdxMBBPair
>::const_iterator J
=
359 (I
== OldI2MBB
.end() && OldI2MBB
.size()>0) ? (I
-1): I
;
361 LI
->start
= getMBBStartIdx(J
->second
);
363 LI
->start
= MachineInstrIndex(
364 MachineInstrIndex(mi2iMap_
[OldI2MI
[index
]]),
365 (MachineInstrIndex::Slot
)offset
);
368 // Remap the ending index in the same way that we remapped the start,
369 // except for the final step where we always map to the immediately
370 // following instruction.
371 index
= (getPrevSlot(LI
->end
)).getVecIndex();
372 offset
= LI
->end
.getSlot();
373 if (LI
->end
.isLoad()) {
374 // VReg dies at end of block.
375 std::vector
<IdxMBBPair
>::const_iterator I
=
376 std::lower_bound(OldI2MBB
.begin(), OldI2MBB
.end(), LI
->end
);
379 LI
->end
= getNextSlot(getMBBEndIdx(I
->second
));
381 unsigned idx
= index
;
382 while (index
< OldI2MI
.size() && !OldI2MI
[index
]) ++index
;
384 if (index
!= OldI2MI
.size())
386 MachineInstrIndex(mi2iMap_
[OldI2MI
[index
]],
387 (idx
== index
? offset
: MachineInstrIndex::LOAD
));
390 MachineInstrIndex(MachineInstrIndex::NUM
* i2miMap_
.size());
394 for (LiveInterval::vni_iterator VNI
= OI
->second
->vni_begin(),
395 VNE
= OI
->second
->vni_end(); VNI
!= VNE
; ++VNI
) {
398 // Remap the VNInfo def index, which works the same as the
399 // start indices above. VN's with special sentinel defs
400 // don't need to be remapped.
401 if (vni
->isDefAccurate() && !vni
->isUnused()) {
402 unsigned index
= vni
->def
.getVecIndex();
403 MachineInstrIndex::Slot offset
= vni
->def
.getSlot();
404 if (vni
->def
.isLoad()) {
405 std::vector
<IdxMBBPair
>::const_iterator I
=
406 std::lower_bound(OldI2MBB
.begin(), OldI2MBB
.end(), vni
->def
);
407 // Take the pair containing the index
408 std::vector
<IdxMBBPair
>::const_iterator J
=
409 (I
== OldI2MBB
.end() && OldI2MBB
.size()>0) ? (I
-1): I
;
411 vni
->def
= getMBBStartIdx(J
->second
);
413 vni
->def
= MachineInstrIndex(mi2iMap_
[OldI2MI
[index
]], offset
);
417 // Remap the VNInfo kill indices, which works the same as
418 // the end indices above.
419 for (size_t i
= 0; i
< vni
->kills
.size(); ++i
) {
420 unsigned index
= getPrevSlot(vni
->kills
[i
]).getVecIndex();
421 MachineInstrIndex::Slot offset
= vni
->kills
[i
].getSlot();
423 if (vni
->kills
[i
].isLoad()) {
424 assert("Value killed at a load slot.");
425 /*std::vector<IdxMBBPair>::const_iterator I =
426 std::lower_bound(OldI2MBB.begin(), OldI2MBB.end(), vni->kills[i]);
429 vni->kills[i] = getMBBEndIdx(I->second);*/
431 if (vni
->kills
[i
].isPHIIndex()) {
432 std::vector
<IdxMBBPair
>::const_iterator I
=
433 std::lower_bound(OldI2MBB
.begin(), OldI2MBB
.end(), vni
->kills
[i
]);
435 vni
->kills
[i
] = terminatorGaps
[I
->second
];
437 assert(OldI2MI
[index
] != 0 &&
438 "Kill refers to instruction not present in index maps.");
439 vni
->kills
[i
] = MachineInstrIndex(mi2iMap_
[OldI2MI
[index
]], offset
);
443 unsigned idx = index;
444 while (index < OldI2MI.size() && !OldI2MI[index]) ++index;
446 if (index != OldI2MI.size())
447 vni->kills[i] = mi2iMap_[OldI2MI[index]] +
448 (idx == index ? offset : 0);
450 vni->kills[i] = InstrSlots::NUM * i2miMap_.size();
458 void LiveIntervals::scaleNumbering(int factor
) {
460 // * scale MBB begin and end points
461 // * scale all ranges.
462 // * Update VNI structures.
463 // * Scale instruction numberings
465 // Scale the MBB indices.
467 for (MachineFunction::iterator MBB
= mf_
->begin(), MBBE
= mf_
->end();
468 MBB
!= MBBE
; ++MBB
) {
469 std::pair
<MachineInstrIndex
, MachineInstrIndex
> &mbbIndices
= MBB2IdxMap
[MBB
->getNumber()];
470 mbbIndices
.first
= mbbIndices
.first
.scale(factor
);
471 mbbIndices
.second
= mbbIndices
.second
.scale(factor
);
472 Idx2MBBMap
.push_back(std::make_pair(mbbIndices
.first
, MBB
));
474 std::sort(Idx2MBBMap
.begin(), Idx2MBBMap
.end(), Idx2MBBCompare());
476 // Scale terminator gaps.
477 for (DenseMap
<MachineBasicBlock
*, MachineInstrIndex
>::iterator
478 TGI
= terminatorGaps
.begin(), TGE
= terminatorGaps
.end();
480 terminatorGaps
[TGI
->first
] = TGI
->second
.scale(factor
);
483 // Scale the intervals.
484 for (iterator LI
= begin(), LE
= end(); LI
!= LE
; ++LI
) {
485 LI
->second
->scaleNumbering(factor
);
488 // Scale MachineInstrs.
489 Mi2IndexMap oldmi2iMap
= mi2iMap_
;
490 MachineInstrIndex highestSlot
;
491 for (Mi2IndexMap::iterator MI
= oldmi2iMap
.begin(), ME
= oldmi2iMap
.end();
493 MachineInstrIndex newSlot
= MI
->second
.scale(factor
);
494 mi2iMap_
[MI
->first
] = newSlot
;
495 highestSlot
= std::max(highestSlot
, newSlot
);
498 unsigned highestVIndex
= highestSlot
.getVecIndex();
500 i2miMap_
.resize(highestVIndex
+ 1);
501 for (Mi2IndexMap::iterator MI
= mi2iMap_
.begin(), ME
= mi2iMap_
.end();
503 i2miMap_
[MI
->second
.getVecIndex()] = const_cast<MachineInstr
*>(MI
->first
);
509 /// runOnMachineFunction - Register allocate the whole function
511 bool LiveIntervals::runOnMachineFunction(MachineFunction
&fn
) {
513 mri_
= &mf_
->getRegInfo();
514 tm_
= &fn
.getTarget();
515 tri_
= tm_
->getRegisterInfo();
516 tii_
= tm_
->getInstrInfo();
517 aa_
= &getAnalysis
<AliasAnalysis
>();
518 lv_
= &getAnalysis
<LiveVariables
>();
519 allocatableRegs_
= tri_
->getAllocatableSet(fn
);
521 processImplicitDefs();
524 performEarlyCoalescing();
526 numIntervals
+= getNumIntervals();
532 /// print - Implement the dump method.
533 void LiveIntervals::print(raw_ostream
&OS
, const Module
* ) const {
534 OS
<< "********** INTERVALS **********\n";
535 for (const_iterator I
= begin(), E
= end(); I
!= E
; ++I
) {
536 I
->second
->print(OS
, tri_
);
543 void LiveIntervals::printInstrs(raw_ostream
&OS
) const {
544 OS
<< "********** MACHINEINSTRS **********\n";
546 for (MachineFunction::iterator mbbi
= mf_
->begin(), mbbe
= mf_
->end();
547 mbbi
!= mbbe
; ++mbbi
) {
548 OS
<< ((Value
*)mbbi
->getBasicBlock())->getName() << ":\n";
549 for (MachineBasicBlock::iterator mii
= mbbi
->begin(),
550 mie
= mbbi
->end(); mii
!= mie
; ++mii
) {
551 OS
<< getInstructionIndex(mii
) << '\t' << *mii
;
556 void LiveIntervals::dumpInstrs() const {
560 /// conflictsWithPhysRegDef - Returns true if the specified register
561 /// is defined during the duration of the specified interval.
562 bool LiveIntervals::conflictsWithPhysRegDef(const LiveInterval
&li
,
563 VirtRegMap
&vrm
, unsigned reg
) {
564 for (LiveInterval::Ranges::const_iterator
565 I
= li
.ranges
.begin(), E
= li
.ranges
.end(); I
!= E
; ++I
) {
566 for (MachineInstrIndex index
= getBaseIndex(I
->start
),
567 end
= getNextIndex(getBaseIndex(getPrevSlot(I
->end
))); index
!= end
;
568 index
= getNextIndex(index
)) {
569 // skip deleted instructions
570 while (index
!= end
&& !getInstructionFromIndex(index
))
571 index
= getNextIndex(index
);
572 if (index
== end
) break;
574 MachineInstr
*MI
= getInstructionFromIndex(index
);
575 unsigned SrcReg
, DstReg
, SrcSubReg
, DstSubReg
;
576 if (tii_
->isMoveInstr(*MI
, SrcReg
, DstReg
, SrcSubReg
, DstSubReg
))
577 if (SrcReg
== li
.reg
|| DstReg
== li
.reg
)
579 for (unsigned i
= 0; i
!= MI
->getNumOperands(); ++i
) {
580 MachineOperand
& mop
= MI
->getOperand(i
);
583 unsigned PhysReg
= mop
.getReg();
584 if (PhysReg
== 0 || PhysReg
== li
.reg
)
586 if (TargetRegisterInfo::isVirtualRegister(PhysReg
)) {
587 if (!vrm
.hasPhys(PhysReg
))
589 PhysReg
= vrm
.getPhys(PhysReg
);
591 if (PhysReg
&& tri_
->regsOverlap(PhysReg
, reg
))
600 /// conflictsWithPhysRegRef - Similar to conflictsWithPhysRegRef except
601 /// it can check use as well.
602 bool LiveIntervals::conflictsWithPhysRegRef(LiveInterval
&li
,
603 unsigned Reg
, bool CheckUse
,
604 SmallPtrSet
<MachineInstr
*,32> &JoinedCopies
) {
605 for (LiveInterval::Ranges::const_iterator
606 I
= li
.ranges
.begin(), E
= li
.ranges
.end(); I
!= E
; ++I
) {
607 for (MachineInstrIndex index
= getBaseIndex(I
->start
),
608 end
= getNextIndex(getBaseIndex(getPrevSlot(I
->end
))); index
!= end
;
609 index
= getNextIndex(index
)) {
610 // Skip deleted instructions.
611 MachineInstr
*MI
= 0;
612 while (index
!= end
) {
613 MI
= getInstructionFromIndex(index
);
616 index
= getNextIndex(index
);
618 if (index
== end
) break;
620 if (JoinedCopies
.count(MI
))
622 for (unsigned i
= 0, e
= MI
->getNumOperands(); i
!= e
; ++i
) {
623 MachineOperand
& MO
= MI
->getOperand(i
);
626 if (MO
.isUse() && !CheckUse
)
628 unsigned PhysReg
= MO
.getReg();
629 if (PhysReg
== 0 || TargetRegisterInfo::isVirtualRegister(PhysReg
))
631 if (tri_
->isSubRegister(Reg
, PhysReg
))
641 static void printRegName(unsigned reg
, const TargetRegisterInfo
* tri_
) {
642 if (TargetRegisterInfo::isPhysicalRegister(reg
))
643 errs() << tri_
->getName(reg
);
645 errs() << "%reg" << reg
;
648 void LiveIntervals::handleVirtualRegisterDef(MachineBasicBlock
*mbb
,
649 MachineBasicBlock::iterator mi
,
650 MachineInstrIndex MIIdx
,
653 LiveInterval
&interval
) {
655 errs() << "\t\tregister: ";
656 printRegName(interval
.reg
, tri_
);
659 // Virtual registers may be defined multiple times (due to phi
660 // elimination and 2-addr elimination). Much of what we do only has to be
661 // done once for the vreg. We use an empty interval to detect the first
662 // time we see a vreg.
663 LiveVariables::VarInfo
& vi
= lv_
->getVarInfo(interval
.reg
);
664 if (interval
.empty()) {
665 // Get the Idx of the defining instructions.
666 MachineInstrIndex defIndex
= getDefIndex(MIIdx
);
667 // Earlyclobbers move back one.
668 if (MO
.isEarlyClobber())
669 defIndex
= getUseIndex(MIIdx
);
671 MachineInstr
*CopyMI
= NULL
;
672 unsigned SrcReg
, DstReg
, SrcSubReg
, DstSubReg
;
673 if (mi
->getOpcode() == TargetInstrInfo::EXTRACT_SUBREG
||
674 mi
->getOpcode() == TargetInstrInfo::INSERT_SUBREG
||
675 mi
->getOpcode() == TargetInstrInfo::SUBREG_TO_REG
||
676 tii_
->isMoveInstr(*mi
, SrcReg
, DstReg
, SrcSubReg
, DstSubReg
))
678 // Earlyclobbers move back one.
679 ValNo
= interval
.getNextValue(defIndex
, CopyMI
, true, VNInfoAllocator
);
681 assert(ValNo
->id
== 0 && "First value in interval is not 0?");
683 // Loop over all of the blocks that the vreg is defined in. There are
684 // two cases we have to handle here. The most common case is a vreg
685 // whose lifetime is contained within a basic block. In this case there
686 // will be a single kill, in MBB, which comes after the definition.
687 if (vi
.Kills
.size() == 1 && vi
.Kills
[0]->getParent() == mbb
) {
688 // FIXME: what about dead vars?
689 MachineInstrIndex killIdx
;
690 if (vi
.Kills
[0] != mi
)
691 killIdx
= getNextSlot(getUseIndex(getInstructionIndex(vi
.Kills
[0])));
693 killIdx
= getNextSlot(defIndex
);
695 // If the kill happens after the definition, we have an intra-block
697 if (killIdx
> defIndex
) {
698 assert(vi
.AliveBlocks
.empty() &&
699 "Shouldn't be alive across any blocks!");
700 LiveRange
LR(defIndex
, killIdx
, ValNo
);
701 interval
.addRange(LR
);
702 DEBUG(errs() << " +" << LR
<< "\n");
703 ValNo
->addKill(killIdx
);
708 // The other case we handle is when a virtual register lives to the end
709 // of the defining block, potentially live across some blocks, then is
710 // live into some number of blocks, but gets killed. Start by adding a
711 // range that goes from this definition to the end of the defining block.
712 LiveRange
NewLR(defIndex
, getNextSlot(getMBBEndIdx(mbb
)), ValNo
);
713 DEBUG(errs() << " +" << NewLR
);
714 interval
.addRange(NewLR
);
716 // Iterate over all of the blocks that the variable is completely
717 // live in, adding [insrtIndex(begin), instrIndex(end)+4) to the
719 for (SparseBitVector
<>::iterator I
= vi
.AliveBlocks
.begin(),
720 E
= vi
.AliveBlocks
.end(); I
!= E
; ++I
) {
721 LiveRange
LR(getMBBStartIdx(*I
),
722 getNextSlot(getMBBEndIdx(*I
)), // MBB ends at -1.
724 interval
.addRange(LR
);
725 DEBUG(errs() << " +" << LR
);
728 // Finally, this virtual register is live from the start of any killing
729 // block to the 'use' slot of the killing instruction.
730 for (unsigned i
= 0, e
= vi
.Kills
.size(); i
!= e
; ++i
) {
731 MachineInstr
*Kill
= vi
.Kills
[i
];
732 MachineInstrIndex killIdx
=
733 getNextSlot(getUseIndex(getInstructionIndex(Kill
)));
734 LiveRange
LR(getMBBStartIdx(Kill
->getParent()),
736 interval
.addRange(LR
);
737 ValNo
->addKill(killIdx
);
738 DEBUG(errs() << " +" << LR
);
742 // If this is the second time we see a virtual register definition, it
743 // must be due to phi elimination or two addr elimination. If this is
744 // the result of two address elimination, then the vreg is one of the
745 // def-and-use register operand.
746 if (mi
->isRegTiedToUseOperand(MOIdx
)) {
747 // If this is a two-address definition, then we have already processed
748 // the live range. The only problem is that we didn't realize there
749 // are actually two values in the live interval. Because of this we
750 // need to take the LiveRegion that defines this register and split it
752 assert(interval
.containsOneValue());
753 MachineInstrIndex DefIndex
= getDefIndex(interval
.getValNumInfo(0)->def
);
754 MachineInstrIndex RedefIndex
= getDefIndex(MIIdx
);
755 if (MO
.isEarlyClobber())
756 RedefIndex
= getUseIndex(MIIdx
);
758 const LiveRange
*OldLR
=
759 interval
.getLiveRangeContaining(getPrevSlot(RedefIndex
));
760 VNInfo
*OldValNo
= OldLR
->valno
;
762 // Delete the initial value, which should be short and continuous,
763 // because the 2-addr copy must be in the same MBB as the redef.
764 interval
.removeRange(DefIndex
, RedefIndex
);
766 // Two-address vregs should always only be redefined once. This means
767 // that at this point, there should be exactly one value number in it.
768 assert(interval
.containsOneValue() && "Unexpected 2-addr liveint!");
770 // The new value number (#1) is defined by the instruction we claimed
772 VNInfo
*ValNo
= interval
.getNextValue(OldValNo
->def
, OldValNo
->getCopy(),
773 false, // update at *
775 ValNo
->setFlags(OldValNo
->getFlags()); // * <- updating here
777 // Value#0 is now defined by the 2-addr instruction.
778 OldValNo
->def
= RedefIndex
;
779 OldValNo
->setCopy(0);
780 if (MO
.isEarlyClobber())
781 OldValNo
->setHasRedefByEC(true);
783 // Add the new live interval which replaces the range for the input copy.
784 LiveRange
LR(DefIndex
, RedefIndex
, ValNo
);
785 DEBUG(errs() << " replace range with " << LR
);
786 interval
.addRange(LR
);
787 ValNo
->addKill(RedefIndex
);
789 // If this redefinition is dead, we need to add a dummy unit live
790 // range covering the def slot.
793 LiveRange(RedefIndex
, getNextSlot(RedefIndex
), OldValNo
));
796 errs() << " RESULT: ";
797 interval
.print(errs(), tri_
);
800 // Otherwise, this must be because of phi elimination. If this is the
801 // first redefinition of the vreg that we have seen, go back and change
802 // the live range in the PHI block to be a different value number.
803 if (interval
.containsOneValue()) {
804 // Remove the old range that we now know has an incorrect number.
805 VNInfo
*VNI
= interval
.getValNumInfo(0);
806 MachineInstr
*Killer
= vi
.Kills
[0];
807 phiJoinCopies
.push_back(Killer
);
808 MachineInstrIndex Start
= getMBBStartIdx(Killer
->getParent());
809 MachineInstrIndex End
=
810 getNextSlot(getUseIndex(getInstructionIndex(Killer
)));
812 errs() << " Removing [" << Start
<< "," << End
<< "] from: ";
813 interval
.print(errs(), tri_
);
816 interval
.removeRange(Start
, End
);
817 assert(interval
.ranges
.size() == 1 &&
818 "Newly discovered PHI interval has >1 ranges.");
819 MachineBasicBlock
*killMBB
= getMBBFromIndex(interval
.endIndex());
820 VNI
->addKill(terminatorGaps
[killMBB
]);
821 VNI
->setHasPHIKill(true);
823 errs() << " RESULT: ";
824 interval
.print(errs(), tri_
);
827 // Replace the interval with one of a NEW value number. Note that this
828 // value number isn't actually defined by an instruction, weird huh? :)
829 LiveRange
LR(Start
, End
,
830 interval
.getNextValue(MachineInstrIndex(mbb
->getNumber()),
831 0, false, VNInfoAllocator
));
832 LR
.valno
->setIsPHIDef(true);
833 DEBUG(errs() << " replace range with " << LR
);
834 interval
.addRange(LR
);
835 LR
.valno
->addKill(End
);
837 errs() << " RESULT: ";
838 interval
.print(errs(), tri_
);
842 // In the case of PHI elimination, each variable definition is only
843 // live until the end of the block. We've already taken care of the
844 // rest of the live range.
845 MachineInstrIndex defIndex
= getDefIndex(MIIdx
);
846 if (MO
.isEarlyClobber())
847 defIndex
= getUseIndex(MIIdx
);
850 MachineInstr
*CopyMI
= NULL
;
851 unsigned SrcReg
, DstReg
, SrcSubReg
, DstSubReg
;
852 if (mi
->getOpcode() == TargetInstrInfo::EXTRACT_SUBREG
||
853 mi
->getOpcode() == TargetInstrInfo::INSERT_SUBREG
||
854 mi
->getOpcode() == TargetInstrInfo::SUBREG_TO_REG
||
855 tii_
->isMoveInstr(*mi
, SrcReg
, DstReg
, SrcSubReg
, DstSubReg
))
857 ValNo
= interval
.getNextValue(defIndex
, CopyMI
, true, VNInfoAllocator
);
859 MachineInstrIndex killIndex
= getNextSlot(getMBBEndIdx(mbb
));
860 LiveRange
LR(defIndex
, killIndex
, ValNo
);
861 interval
.addRange(LR
);
862 ValNo
->addKill(terminatorGaps
[mbb
]);
863 ValNo
->setHasPHIKill(true);
864 DEBUG(errs() << " +" << LR
);
868 DEBUG(errs() << '\n');
871 void LiveIntervals::handlePhysicalRegisterDef(MachineBasicBlock
*MBB
,
872 MachineBasicBlock::iterator mi
,
873 MachineInstrIndex MIIdx
,
875 LiveInterval
&interval
,
876 MachineInstr
*CopyMI
) {
877 // A physical register cannot be live across basic block, so its
878 // lifetime must end somewhere in its defining basic block.
880 errs() << "\t\tregister: ";
881 printRegName(interval
.reg
, tri_
);
884 MachineInstrIndex baseIndex
= MIIdx
;
885 MachineInstrIndex start
= getDefIndex(baseIndex
);
886 // Earlyclobbers move back one.
887 if (MO
.isEarlyClobber())
888 start
= getUseIndex(MIIdx
);
889 MachineInstrIndex end
= start
;
891 // If it is not used after definition, it is considered dead at
892 // the instruction defining it. Hence its interval is:
893 // [defSlot(def), defSlot(def)+1)
895 DEBUG(errs() << " dead");
896 end
= getNextSlot(start
);
900 // If it is not dead on definition, it must be killed by a
901 // subsequent instruction. Hence its interval is:
902 // [defSlot(def), useSlot(kill)+1)
903 baseIndex
= getNextIndex(baseIndex
);
904 while (++mi
!= MBB
->end()) {
905 while (baseIndex
.getVecIndex() < i2miMap_
.size() &&
906 getInstructionFromIndex(baseIndex
) == 0)
907 baseIndex
= getNextIndex(baseIndex
);
908 if (mi
->killsRegister(interval
.reg
, tri_
)) {
909 DEBUG(errs() << " killed");
910 end
= getNextSlot(getUseIndex(baseIndex
));
913 int DefIdx
= mi
->findRegisterDefOperandIdx(interval
.reg
, false, tri_
);
915 if (mi
->isRegTiedToUseOperand(DefIdx
)) {
916 // Two-address instruction.
917 end
= getDefIndex(baseIndex
);
918 if (mi
->getOperand(DefIdx
).isEarlyClobber())
919 end
= getUseIndex(baseIndex
);
921 // Another instruction redefines the register before it is ever read.
922 // Then the register is essentially dead at the instruction that defines
923 // it. Hence its interval is:
924 // [defSlot(def), defSlot(def)+1)
925 DEBUG(errs() << " dead");
926 end
= getNextSlot(start
);
932 baseIndex
= getNextIndex(baseIndex
);
935 // The only case we should have a dead physreg here without a killing or
936 // instruction where we know it's dead is if it is live-in to the function
937 // and never used. Another possible case is the implicit use of the
938 // physical register has been deleted by two-address pass.
939 end
= getNextSlot(start
);
942 assert(start
< end
&& "did not find end of interval?");
944 // Already exists? Extend old live interval.
945 LiveInterval::iterator OldLR
= interval
.FindLiveRangeContaining(start
);
946 bool Extend
= OldLR
!= interval
.end();
947 VNInfo
*ValNo
= Extend
948 ? OldLR
->valno
: interval
.getNextValue(start
, CopyMI
, true, VNInfoAllocator
);
949 if (MO
.isEarlyClobber() && Extend
)
950 ValNo
->setHasRedefByEC(true);
951 LiveRange
LR(start
, end
, ValNo
);
952 interval
.addRange(LR
);
953 LR
.valno
->addKill(end
);
954 DEBUG(errs() << " +" << LR
<< '\n');
957 void LiveIntervals::handleRegisterDef(MachineBasicBlock
*MBB
,
958 MachineBasicBlock::iterator MI
,
959 MachineInstrIndex MIIdx
,
962 if (TargetRegisterInfo::isVirtualRegister(MO
.getReg()))
963 handleVirtualRegisterDef(MBB
, MI
, MIIdx
, MO
, MOIdx
,
964 getOrCreateInterval(MO
.getReg()));
965 else if (allocatableRegs_
[MO
.getReg()]) {
966 MachineInstr
*CopyMI
= NULL
;
967 unsigned SrcReg
, DstReg
, SrcSubReg
, DstSubReg
;
968 if (MI
->getOpcode() == TargetInstrInfo::EXTRACT_SUBREG
||
969 MI
->getOpcode() == TargetInstrInfo::INSERT_SUBREG
||
970 MI
->getOpcode() == TargetInstrInfo::SUBREG_TO_REG
||
971 tii_
->isMoveInstr(*MI
, SrcReg
, DstReg
, SrcSubReg
, DstSubReg
))
973 handlePhysicalRegisterDef(MBB
, MI
, MIIdx
, MO
,
974 getOrCreateInterval(MO
.getReg()), CopyMI
);
975 // Def of a register also defines its sub-registers.
976 for (const unsigned* AS
= tri_
->getSubRegisters(MO
.getReg()); *AS
; ++AS
)
977 // If MI also modifies the sub-register explicitly, avoid processing it
978 // more than once. Do not pass in TRI here so it checks for exact match.
979 if (!MI
->modifiesRegister(*AS
))
980 handlePhysicalRegisterDef(MBB
, MI
, MIIdx
, MO
,
981 getOrCreateInterval(*AS
), 0);
985 void LiveIntervals::handleLiveInRegister(MachineBasicBlock
*MBB
,
986 MachineInstrIndex MIIdx
,
987 LiveInterval
&interval
, bool isAlias
) {
989 errs() << "\t\tlivein register: ";
990 printRegName(interval
.reg
, tri_
);
993 // Look for kills, if it reaches a def before it's killed, then it shouldn't
994 // be considered a livein.
995 MachineBasicBlock::iterator mi
= MBB
->begin();
996 MachineInstrIndex baseIndex
= MIIdx
;
997 MachineInstrIndex start
= baseIndex
;
998 while (baseIndex
.getVecIndex() < i2miMap_
.size() &&
999 getInstructionFromIndex(baseIndex
) == 0)
1000 baseIndex
= getNextIndex(baseIndex
);
1001 MachineInstrIndex end
= baseIndex
;
1002 bool SeenDefUse
= false;
1004 while (mi
!= MBB
->end()) {
1005 if (mi
->killsRegister(interval
.reg
, tri_
)) {
1006 DEBUG(errs() << " killed");
1007 end
= getNextSlot(getUseIndex(baseIndex
));
1010 } else if (mi
->modifiesRegister(interval
.reg
, tri_
)) {
1011 // Another instruction redefines the register before it is ever read.
1012 // Then the register is essentially dead at the instruction that defines
1013 // it. Hence its interval is:
1014 // [defSlot(def), defSlot(def)+1)
1015 DEBUG(errs() << " dead");
1016 end
= getNextSlot(getDefIndex(start
));
1021 baseIndex
= getNextIndex(baseIndex
);
1023 if (mi
!= MBB
->end()) {
1024 while (baseIndex
.getVecIndex() < i2miMap_
.size() &&
1025 getInstructionFromIndex(baseIndex
) == 0)
1026 baseIndex
= getNextIndex(baseIndex
);
1030 // Live-in register might not be used at all.
1033 DEBUG(errs() << " dead");
1034 end
= getNextSlot(getDefIndex(MIIdx
));
1036 DEBUG(errs() << " live through");
1042 interval
.getNextValue(MachineInstrIndex(MBB
->getNumber()),
1043 0, false, VNInfoAllocator
);
1044 vni
->setIsPHIDef(true);
1045 LiveRange
LR(start
, end
, vni
);
1047 interval
.addRange(LR
);
1048 LR
.valno
->addKill(end
);
1049 DEBUG(errs() << " +" << LR
<< '\n');
1053 LiveIntervals::isProfitableToCoalesce(LiveInterval
&DstInt
, LiveInterval
&SrcInt
,
1054 SmallVector
<MachineInstr
*,16> &IdentCopies
,
1055 SmallVector
<MachineInstr
*,16> &OtherCopies
,
1056 bool &HaveConflict
) {
1057 HaveConflict
= false;
1059 unsigned NumIdent
= 0;
1060 unsigned NumSources
= 0;
1061 for (MachineRegisterInfo::reg_iterator ri
= mri_
->reg_begin(SrcInt
.reg
),
1062 re
= mri_
->reg_end(); ri
!= re
; ++ri
) {
1063 MachineOperand
&O
= ri
.getOperand();
1068 MachineInstr
*MI
= &*ri
;
1069 unsigned SrcReg
, DstReg
, SrcSubReg
, DstSubReg
;
1070 if (!tii_
->isMoveInstr(*MI
, SrcReg
, DstReg
, SrcSubReg
, DstSubReg
))
1072 if (SrcReg
!= DstInt
.reg
) {
1073 OtherCopies
.push_back(MI
);
1074 HaveConflict
|= DstInt
.liveAt(getInstructionIndex(MI
));
1076 IdentCopies
.push_back(MI
);
1081 return NumSources
>= 5 && (((float)NumIdent
) / NumSources
) > 0.20F
;
1084 void LiveIntervals::performEarlyCoalescing() {
1085 if (!EarlyCoalescing
)
1088 /// Perform early coalescing: eliminate copies which feed into phi joins
1089 /// and whose sources are defined by the phi joins.
1090 for (unsigned i
= 0, e
= phiJoinCopies
.size(); i
!= e
; ++i
) {
1091 MachineInstr
*Join
= phiJoinCopies
[i
];
1092 if (CoalescingLimit
!= -1 && (int)numCoalescing
== CoalescingLimit
)
1095 unsigned PHISrc
, PHIDst
, SrcSubReg
, DstSubReg
;
1096 bool isMove
= tii_
->isMoveInstr(*Join
, PHISrc
, PHIDst
, SrcSubReg
, DstSubReg
);
1098 assert(isMove
&& "PHI join instruction must be a move!");
1103 LiveInterval
&DstInt
= getInterval(PHIDst
);
1104 LiveInterval
&SrcInt
= getInterval(PHISrc
);
1105 SmallVector
<MachineInstr
*, 16> IdentCopies
;
1106 SmallVector
<MachineInstr
*, 16> OtherCopies
;
1108 if (!isProfitableToCoalesce(DstInt
, SrcInt
, IdentCopies
, OtherCopies
,
1112 DEBUG(errs() << "PHI Join: " << *Join
);
1113 assert(DstInt
.containsOneValue() && "PHI join should have just one val#!");
1114 VNInfo
*VNI
= DstInt
.getValNumInfo(0);
1115 VNInfo
*NewVNI
= HaveConflict
1116 ? 0 : SrcInt
.getNextValue(VNI
->def
, 0, false, VNInfoAllocator
);
1117 // Now let's eliminate all the would-be identity copies.
1118 for (unsigned i
= 0, e
= IdentCopies
.size(); i
!= e
; ++i
) {
1119 MachineInstr
*PHICopy
= IdentCopies
[i
];
1120 DEBUG(errs() << "Coalescing: " << *PHICopy
);
1122 MachineBasicBlock
*PHIMBB
= PHICopy
->getParent();
1123 MachineInstrIndex MIIndex
= getInstructionIndex(PHICopy
);
1124 MachineInstrIndex DefIndex
= getDefIndex(MIIndex
);
1125 LiveRange
*SLR
= SrcInt
.getLiveRangeContaining(DefIndex
);
1126 MachineInstrIndex StartIndex
= HaveConflict
1127 ? SLR
->start
: getMBBStartIdx(PHIMBB
);
1128 MachineInstrIndex EndIndex
= SLR
->end
;
1130 // Delete val# defined by the now identity copy and add the range from
1131 // beginning of the mbb to the end of the range.
1132 SrcInt
.removeValNo(SLR
->valno
);
1134 DEBUG(errs() << " added range [" << StartIndex
<< ','
1135 << EndIndex
<< "] to reg" << DstInt
.reg
<< '\n');
1136 DstInt
.addRange(LiveRange(StartIndex
, EndIndex
, VNI
));
1137 // FIXME: Update uses of src to dst in this range?
1139 DEBUG(errs() << " added range [" << StartIndex
<< ','
1140 << SLR
->start
<< "] to reg" << SrcInt
.reg
<< '\n');
1141 SrcInt
.addRange(LiveRange(StartIndex
, EndIndex
, NewVNI
));
1142 if (PHICopy
->killsRegister(PHIDst
))
1143 EndIndex
= DefIndex
;
1144 DstInt
.removeRange(StartIndex
, EndIndex
);
1146 RemoveMachineInstrFromMaps(PHICopy
);
1147 PHICopy
->eraseFromParent();
1150 // First unset the kill.
1151 for (unsigned i
= 0, e
= Join
->getNumOperands(); i
!= e
; ++i
) {
1152 MachineOperand
&O
= Join
->getOperand(i
);
1153 if (!O
.isReg() || O
.getReg() != PHISrc
)
1158 MachineInstrIndex MIIndex
= getInstructionIndex(Join
);
1159 MachineInstrIndex UseIndex
= getUseIndex(MIIndex
);
1160 MachineInstrIndex DefIndex
= getDefIndex(MIIndex
);
1161 LiveRange
*SLR
= SrcInt
.getLiveRangeContaining(UseIndex
);
1162 LiveRange
*DLR
= DstInt
.getLiveRangeContaining(DefIndex
);
1163 SrcInt
.addRange(LiveRange(DLR
->start
, DLR
->end
, SLR
->valno
));
1165 SrcInt
.MergeValueInAsValue(DstInt
, VNI
, NewVNI
);
1167 // Change all references of phi source to destination.
1168 for (MachineRegisterInfo::reg_iterator ri
= mri_
->reg_begin(PHIDst
),
1169 re
= mri_
->reg_end(); ri
!= re
; ) {
1170 MachineOperand
&O
= ri
.getOperand();
1174 removeInterval(DstInt
.reg
);
1176 RemoveMachineInstrFromMaps(Join
);
1177 Join
->eraseFromParent();
1184 /// computeIntervals - computes the live intervals for virtual
1185 /// registers. for some ordering of the machine instructions [1,N] a
1186 /// live interval is an interval [i, j) where 1 <= i <= j < N for
1187 /// which a variable is live
1188 void LiveIntervals::computeIntervals() {
1189 DEBUG(errs() << "********** COMPUTING LIVE INTERVALS **********\n"
1190 << "********** Function: "
1191 << ((Value
*)mf_
->getFunction())->getName() << '\n');
1193 SmallVector
<unsigned, 8> UndefUses
;
1194 for (MachineFunction::iterator MBBI
= mf_
->begin(), E
= mf_
->end();
1195 MBBI
!= E
; ++MBBI
) {
1196 MachineBasicBlock
*MBB
= MBBI
;
1197 // Track the index of the current machine instr.
1198 MachineInstrIndex MIIndex
= getMBBStartIdx(MBB
);
1199 DEBUG(errs() << ((Value
*)MBB
->getBasicBlock())->getName() << ":\n");
1201 MachineBasicBlock::iterator MI
= MBB
->begin(), miEnd
= MBB
->end();
1203 // Create intervals for live-ins to this BB first.
1204 for (MachineBasicBlock::const_livein_iterator LI
= MBB
->livein_begin(),
1205 LE
= MBB
->livein_end(); LI
!= LE
; ++LI
) {
1206 handleLiveInRegister(MBB
, MIIndex
, getOrCreateInterval(*LI
));
1207 // Multiple live-ins can alias the same register.
1208 for (const unsigned* AS
= tri_
->getSubRegisters(*LI
); *AS
; ++AS
)
1209 if (!hasInterval(*AS
))
1210 handleLiveInRegister(MBB
, MIIndex
, getOrCreateInterval(*AS
),
1214 // Skip over empty initial indices.
1215 while (MIIndex
.getVecIndex() < i2miMap_
.size() &&
1216 getInstructionFromIndex(MIIndex
) == 0)
1217 MIIndex
= getNextIndex(MIIndex
);
1219 for (; MI
!= miEnd
; ++MI
) {
1220 DEBUG(errs() << MIIndex
<< "\t" << *MI
);
1223 for (int i
= MI
->getNumOperands() - 1; i
>= 0; --i
) {
1224 MachineOperand
&MO
= MI
->getOperand(i
);
1225 if (!MO
.isReg() || !MO
.getReg())
1228 // handle register defs - build intervals
1230 handleRegisterDef(MBB
, MI
, MIIndex
, MO
, i
);
1231 else if (MO
.isUndef())
1232 UndefUses
.push_back(MO
.getReg());
1235 // Skip over the empty slots after each instruction.
1236 unsigned Slots
= MI
->getDesc().getNumDefs();
1241 MIIndex
= getNextIndex(MIIndex
);
1243 // Skip over empty indices.
1244 while (MIIndex
.getVecIndex() < i2miMap_
.size() &&
1245 getInstructionFromIndex(MIIndex
) == 0)
1246 MIIndex
= getNextIndex(MIIndex
);
1250 // Create empty intervals for registers defined by implicit_def's (except
1251 // for those implicit_def that define values which are liveout of their
1253 for (unsigned i
= 0, e
= UndefUses
.size(); i
!= e
; ++i
) {
1254 unsigned UndefReg
= UndefUses
[i
];
1255 (void)getOrCreateInterval(UndefReg
);
1259 bool LiveIntervals::findLiveInMBBs(
1260 MachineInstrIndex Start
, MachineInstrIndex End
,
1261 SmallVectorImpl
<MachineBasicBlock
*> &MBBs
) const {
1262 std::vector
<IdxMBBPair
>::const_iterator I
=
1263 std::lower_bound(Idx2MBBMap
.begin(), Idx2MBBMap
.end(), Start
);
1265 bool ResVal
= false;
1266 while (I
!= Idx2MBBMap
.end()) {
1267 if (I
->first
>= End
)
1269 MBBs
.push_back(I
->second
);
1276 bool LiveIntervals::findReachableMBBs(
1277 MachineInstrIndex Start
, MachineInstrIndex End
,
1278 SmallVectorImpl
<MachineBasicBlock
*> &MBBs
) const {
1279 std::vector
<IdxMBBPair
>::const_iterator I
=
1280 std::lower_bound(Idx2MBBMap
.begin(), Idx2MBBMap
.end(), Start
);
1282 bool ResVal
= false;
1283 while (I
!= Idx2MBBMap
.end()) {
1286 MachineBasicBlock
*MBB
= I
->second
;
1287 if (getMBBEndIdx(MBB
) > End
)
1289 for (MachineBasicBlock::succ_iterator SI
= MBB
->succ_begin(),
1290 SE
= MBB
->succ_end(); SI
!= SE
; ++SI
)
1291 MBBs
.push_back(*SI
);
1298 LiveInterval
* LiveIntervals::createInterval(unsigned reg
) {
1299 float Weight
= TargetRegisterInfo::isPhysicalRegister(reg
) ? HUGE_VALF
: 0.0F
;
1300 return new LiveInterval(reg
, Weight
);
1303 /// dupInterval - Duplicate a live interval. The caller is responsible for
1304 /// managing the allocated memory.
1305 LiveInterval
* LiveIntervals::dupInterval(LiveInterval
*li
) {
1306 LiveInterval
*NewLI
= createInterval(li
->reg
);
1307 NewLI
->Copy(*li
, mri_
, getVNInfoAllocator());
1311 /// getVNInfoSourceReg - Helper function that parses the specified VNInfo
1312 /// copy field and returns the source register that defines it.
1313 unsigned LiveIntervals::getVNInfoSourceReg(const VNInfo
*VNI
) const {
1314 if (!VNI
->getCopy())
1317 if (VNI
->getCopy()->getOpcode() == TargetInstrInfo::EXTRACT_SUBREG
) {
1318 // If it's extracting out of a physical register, return the sub-register.
1319 unsigned Reg
= VNI
->getCopy()->getOperand(1).getReg();
1320 if (TargetRegisterInfo::isPhysicalRegister(Reg
))
1321 Reg
= tri_
->getSubReg(Reg
, VNI
->getCopy()->getOperand(2).getImm());
1323 } else if (VNI
->getCopy()->getOpcode() == TargetInstrInfo::INSERT_SUBREG
||
1324 VNI
->getCopy()->getOpcode() == TargetInstrInfo::SUBREG_TO_REG
)
1325 return VNI
->getCopy()->getOperand(2).getReg();
1327 unsigned SrcReg
, DstReg
, SrcSubReg
, DstSubReg
;
1328 if (tii_
->isMoveInstr(*VNI
->getCopy(), SrcReg
, DstReg
, SrcSubReg
, DstSubReg
))
1330 llvm_unreachable("Unrecognized copy instruction!");
1334 //===----------------------------------------------------------------------===//
1335 // Register allocator hooks.
1338 /// getReMatImplicitUse - If the remat definition MI has one (for now, we only
1339 /// allow one) virtual register operand, then its uses are implicitly using
1340 /// the register. Returns the virtual register.
1341 unsigned LiveIntervals::getReMatImplicitUse(const LiveInterval
&li
,
1342 MachineInstr
*MI
) const {
1344 for (unsigned i
= 0, e
= MI
->getNumOperands(); i
!= e
; ++i
) {
1345 MachineOperand
&MO
= MI
->getOperand(i
);
1346 if (!MO
.isReg() || !MO
.isUse())
1348 unsigned Reg
= MO
.getReg();
1349 if (Reg
== 0 || Reg
== li
.reg
)
1352 if (TargetRegisterInfo::isPhysicalRegister(Reg
) &&
1353 !allocatableRegs_
[Reg
])
1355 // FIXME: For now, only remat MI with at most one register operand.
1357 "Can't rematerialize instruction with multiple register operand!");
1358 RegOp
= MO
.getReg();
1366 /// isValNoAvailableAt - Return true if the val# of the specified interval
1367 /// which reaches the given instruction also reaches the specified use index.
1368 bool LiveIntervals::isValNoAvailableAt(const LiveInterval
&li
, MachineInstr
*MI
,
1369 MachineInstrIndex UseIdx
) const {
1370 MachineInstrIndex Index
= getInstructionIndex(MI
);
1371 VNInfo
*ValNo
= li
.FindLiveRangeContaining(Index
)->valno
;
1372 LiveInterval::const_iterator UI
= li
.FindLiveRangeContaining(UseIdx
);
1373 return UI
!= li
.end() && UI
->valno
== ValNo
;
1376 /// isReMaterializable - Returns true if the definition MI of the specified
1377 /// val# of the specified interval is re-materializable.
1378 bool LiveIntervals::isReMaterializable(const LiveInterval
&li
,
1379 const VNInfo
*ValNo
, MachineInstr
*MI
,
1380 SmallVectorImpl
<LiveInterval
*> &SpillIs
,
1385 if (MI
->getOpcode() == TargetInstrInfo::IMPLICIT_DEF
)
1389 if (tii_
->isLoadFromStackSlot(MI
, FrameIdx
) &&
1390 mf_
->getFrameInfo()->isImmutableObjectIndex(FrameIdx
))
1391 // FIXME: Let target specific isReallyTriviallyReMaterializable determines
1392 // this but remember this is not safe to fold into a two-address
1394 // This is a load from fixed stack slot. It can be rematerialized.
1397 // If the target-specific rules don't identify an instruction as
1398 // being trivially rematerializable, use some target-independent
1400 if (!MI
->getDesc().isRematerializable() ||
1401 !tii_
->isTriviallyReMaterializable(MI
)) {
1402 if (!EnableAggressiveRemat
)
1405 // If the instruction accesses memory but the memoperands have been lost,
1406 // we can't analyze it.
1407 const TargetInstrDesc
&TID
= MI
->getDesc();
1408 if ((TID
.mayLoad() || TID
.mayStore()) && MI
->memoperands_empty())
1411 // Avoid instructions obviously unsafe for remat.
1412 if (TID
.hasUnmodeledSideEffects() || TID
.isNotDuplicable())
1415 // If the instruction accesses memory and the memory could be non-constant,
1416 // assume the instruction is not rematerializable.
1417 for (std::list
<MachineMemOperand
>::const_iterator
1418 I
= MI
->memoperands_begin(), E
= MI
->memoperands_end(); I
!= E
; ++I
){
1419 const MachineMemOperand
&MMO
= *I
;
1420 if (MMO
.isVolatile() || MMO
.isStore())
1422 const Value
*V
= MMO
.getValue();
1425 if (const PseudoSourceValue
*PSV
= dyn_cast
<PseudoSourceValue
>(V
)) {
1426 if (!PSV
->isConstant(mf_
->getFrameInfo()))
1428 } else if (!aa_
->pointsToConstantMemory(V
))
1432 // If any of the registers accessed are non-constant, conservatively assume
1433 // the instruction is not rematerializable.
1434 unsigned ImpUse
= 0;
1435 for (unsigned i
= 0, e
= MI
->getNumOperands(); i
!= e
; ++i
) {
1436 const MachineOperand
&MO
= MI
->getOperand(i
);
1438 unsigned Reg
= MO
.getReg();
1441 if (TargetRegisterInfo::isPhysicalRegister(Reg
))
1444 // Only allow one def, and that in the first operand.
1445 if (MO
.isDef() != (i
== 0))
1448 // Only allow constant-valued registers.
1449 bool IsLiveIn
= mri_
->isLiveIn(Reg
);
1450 MachineRegisterInfo::def_iterator I
= mri_
->def_begin(Reg
),
1451 E
= mri_
->def_end();
1453 // For the def, it should be the only def of that register.
1454 if (MO
.isDef() && (next(I
) != E
|| IsLiveIn
))
1458 // Only allow one use other register use, as that's all the
1459 // remat mechanisms support currently.
1460 if (Reg
!= li
.reg
) {
1463 else if (Reg
!= ImpUse
)
1466 // For the use, there should be only one associated def.
1467 if (I
!= E
&& (next(I
) != E
|| IsLiveIn
))
1474 unsigned ImpUse
= getReMatImplicitUse(li
, MI
);
1476 const LiveInterval
&ImpLi
= getInterval(ImpUse
);
1477 for (MachineRegisterInfo::use_iterator ri
= mri_
->use_begin(li
.reg
),
1478 re
= mri_
->use_end(); ri
!= re
; ++ri
) {
1479 MachineInstr
*UseMI
= &*ri
;
1480 MachineInstrIndex UseIdx
= getInstructionIndex(UseMI
);
1481 if (li
.FindLiveRangeContaining(UseIdx
)->valno
!= ValNo
)
1483 if (!isValNoAvailableAt(ImpLi
, MI
, UseIdx
))
1487 // If a register operand of the re-materialized instruction is going to
1488 // be spilled next, then it's not legal to re-materialize this instruction.
1489 for (unsigned i
= 0, e
= SpillIs
.size(); i
!= e
; ++i
)
1490 if (ImpUse
== SpillIs
[i
]->reg
)
1496 /// isReMaterializable - Returns true if the definition MI of the specified
1497 /// val# of the specified interval is re-materializable.
1498 bool LiveIntervals::isReMaterializable(const LiveInterval
&li
,
1499 const VNInfo
*ValNo
, MachineInstr
*MI
) {
1500 SmallVector
<LiveInterval
*, 4> Dummy1
;
1502 return isReMaterializable(li
, ValNo
, MI
, Dummy1
, Dummy2
);
1505 /// isReMaterializable - Returns true if every definition of MI of every
1506 /// val# of the specified interval is re-materializable.
1507 bool LiveIntervals::isReMaterializable(const LiveInterval
&li
,
1508 SmallVectorImpl
<LiveInterval
*> &SpillIs
,
1511 for (LiveInterval::const_vni_iterator i
= li
.vni_begin(), e
= li
.vni_end();
1513 const VNInfo
*VNI
= *i
;
1514 if (VNI
->isUnused())
1515 continue; // Dead val#.
1516 // Is the def for the val# rematerializable?
1517 if (!VNI
->isDefAccurate())
1519 MachineInstr
*ReMatDefMI
= getInstructionFromIndex(VNI
->def
);
1520 bool DefIsLoad
= false;
1522 !isReMaterializable(li
, VNI
, ReMatDefMI
, SpillIs
, DefIsLoad
))
1524 isLoad
|= DefIsLoad
;
1529 /// FilterFoldedOps - Filter out two-address use operands. Return
1530 /// true if it finds any issue with the operands that ought to prevent
1532 static bool FilterFoldedOps(MachineInstr
*MI
,
1533 SmallVector
<unsigned, 2> &Ops
,
1535 SmallVector
<unsigned, 2> &FoldOps
) {
1537 for (unsigned i
= 0, e
= Ops
.size(); i
!= e
; ++i
) {
1538 unsigned OpIdx
= Ops
[i
];
1539 MachineOperand
&MO
= MI
->getOperand(OpIdx
);
1540 // FIXME: fold subreg use.
1544 MRInfo
|= (unsigned)VirtRegMap::isMod
;
1546 // Filter out two-address use operand(s).
1547 if (MI
->isRegTiedToDefOperand(OpIdx
)) {
1548 MRInfo
= VirtRegMap::isModRef
;
1551 MRInfo
|= (unsigned)VirtRegMap::isRef
;
1553 FoldOps
.push_back(OpIdx
);
1559 /// tryFoldMemoryOperand - Attempts to fold either a spill / restore from
1560 /// slot / to reg or any rematerialized load into ith operand of specified
1561 /// MI. If it is successul, MI is updated with the newly created MI and
1563 bool LiveIntervals::tryFoldMemoryOperand(MachineInstr
* &MI
,
1564 VirtRegMap
&vrm
, MachineInstr
*DefMI
,
1565 MachineInstrIndex InstrIdx
,
1566 SmallVector
<unsigned, 2> &Ops
,
1567 bool isSS
, int Slot
, unsigned Reg
) {
1568 // If it is an implicit def instruction, just delete it.
1569 if (MI
->getOpcode() == TargetInstrInfo::IMPLICIT_DEF
) {
1570 RemoveMachineInstrFromMaps(MI
);
1571 vrm
.RemoveMachineInstrFromMaps(MI
);
1572 MI
->eraseFromParent();
1577 // Filter the list of operand indexes that are to be folded. Abort if
1578 // any operand will prevent folding.
1579 unsigned MRInfo
= 0;
1580 SmallVector
<unsigned, 2> FoldOps
;
1581 if (FilterFoldedOps(MI
, Ops
, MRInfo
, FoldOps
))
1584 // The only time it's safe to fold into a two address instruction is when
1585 // it's folding reload and spill from / into a spill stack slot.
1586 if (DefMI
&& (MRInfo
& VirtRegMap::isMod
))
1589 MachineInstr
*fmi
= isSS
? tii_
->foldMemoryOperand(*mf_
, MI
, FoldOps
, Slot
)
1590 : tii_
->foldMemoryOperand(*mf_
, MI
, FoldOps
, DefMI
);
1592 // Remember this instruction uses the spill slot.
1593 if (isSS
) vrm
.addSpillSlotUse(Slot
, fmi
);
1595 // Attempt to fold the memory reference into the instruction. If
1596 // we can do this, we don't need to insert spill code.
1597 MachineBasicBlock
&MBB
= *MI
->getParent();
1598 if (isSS
&& !mf_
->getFrameInfo()->isImmutableObjectIndex(Slot
))
1599 vrm
.virtFolded(Reg
, MI
, fmi
, (VirtRegMap::ModRef
)MRInfo
);
1600 vrm
.transferSpillPts(MI
, fmi
);
1601 vrm
.transferRestorePts(MI
, fmi
);
1602 vrm
.transferEmergencySpills(MI
, fmi
);
1604 i2miMap_
[InstrIdx
.getVecIndex()] = fmi
;
1605 mi2iMap_
[fmi
] = InstrIdx
;
1606 MI
= MBB
.insert(MBB
.erase(MI
), fmi
);
1613 /// canFoldMemoryOperand - Returns true if the specified load / store
1614 /// folding is possible.
1615 bool LiveIntervals::canFoldMemoryOperand(MachineInstr
*MI
,
1616 SmallVector
<unsigned, 2> &Ops
,
1618 // Filter the list of operand indexes that are to be folded. Abort if
1619 // any operand will prevent folding.
1620 unsigned MRInfo
= 0;
1621 SmallVector
<unsigned, 2> FoldOps
;
1622 if (FilterFoldedOps(MI
, Ops
, MRInfo
, FoldOps
))
1625 // It's only legal to remat for a use, not a def.
1626 if (ReMat
&& (MRInfo
& VirtRegMap::isMod
))
1629 return tii_
->canFoldMemoryOperand(MI
, FoldOps
);
1632 bool LiveIntervals::intervalIsInOneMBB(const LiveInterval
&li
) const {
1633 SmallPtrSet
<MachineBasicBlock
*, 4> MBBs
;
1634 for (LiveInterval::Ranges::const_iterator
1635 I
= li
.ranges
.begin(), E
= li
.ranges
.end(); I
!= E
; ++I
) {
1636 std::vector
<IdxMBBPair
>::const_iterator II
=
1637 std::lower_bound(Idx2MBBMap
.begin(), Idx2MBBMap
.end(), I
->start
);
1638 if (II
== Idx2MBBMap
.end())
1640 if (I
->end
> II
->first
) // crossing a MBB.
1642 MBBs
.insert(II
->second
);
1643 if (MBBs
.size() > 1)
1649 /// rewriteImplicitOps - Rewrite implicit use operands of MI (i.e. uses of
1650 /// interval on to-be re-materialized operands of MI) with new register.
1651 void LiveIntervals::rewriteImplicitOps(const LiveInterval
&li
,
1652 MachineInstr
*MI
, unsigned NewVReg
,
1654 // There is an implicit use. That means one of the other operand is
1655 // being remat'ed and the remat'ed instruction has li.reg as an
1656 // use operand. Make sure we rewrite that as well.
1657 for (unsigned i
= 0, e
= MI
->getNumOperands(); i
!= e
; ++i
) {
1658 MachineOperand
&MO
= MI
->getOperand(i
);
1661 unsigned Reg
= MO
.getReg();
1662 if (Reg
== 0 || TargetRegisterInfo::isPhysicalRegister(Reg
))
1664 if (!vrm
.isReMaterialized(Reg
))
1666 MachineInstr
*ReMatMI
= vrm
.getReMaterializedMI(Reg
);
1667 MachineOperand
*UseMO
= ReMatMI
->findRegisterUseOperand(li
.reg
);
1669 UseMO
->setReg(NewVReg
);
1673 /// rewriteInstructionForSpills, rewriteInstructionsForSpills - Helper functions
1674 /// for addIntervalsForSpills to rewrite uses / defs for the given live range.
1675 bool LiveIntervals::
1676 rewriteInstructionForSpills(const LiveInterval
&li
, const VNInfo
*VNI
,
1677 bool TrySplit
, MachineInstrIndex index
, MachineInstrIndex end
,
1679 MachineInstr
*ReMatOrigDefMI
, MachineInstr
*ReMatDefMI
,
1680 unsigned Slot
, int LdSlot
,
1681 bool isLoad
, bool isLoadSS
, bool DefIsReMat
, bool CanDelete
,
1683 const TargetRegisterClass
* rc
,
1684 SmallVector
<int, 4> &ReMatIds
,
1685 const MachineLoopInfo
*loopInfo
,
1686 unsigned &NewVReg
, unsigned ImpUse
, bool &HasDef
, bool &HasUse
,
1687 DenseMap
<unsigned,unsigned> &MBBVRegsMap
,
1688 std::vector
<LiveInterval
*> &NewLIs
) {
1689 bool CanFold
= false;
1691 for (unsigned i
= 0; i
!= MI
->getNumOperands(); ++i
) {
1692 MachineOperand
& mop
= MI
->getOperand(i
);
1695 unsigned Reg
= mop
.getReg();
1696 unsigned RegI
= Reg
;
1697 if (Reg
== 0 || TargetRegisterInfo::isPhysicalRegister(Reg
))
1702 bool TryFold
= !DefIsReMat
;
1703 bool FoldSS
= true; // Default behavior unless it's a remat.
1704 int FoldSlot
= Slot
;
1706 // If this is the rematerializable definition MI itself and
1707 // all of its uses are rematerialized, simply delete it.
1708 if (MI
== ReMatOrigDefMI
&& CanDelete
) {
1709 DEBUG(errs() << "\t\t\t\tErasing re-materlizable def: "
1711 RemoveMachineInstrFromMaps(MI
);
1712 vrm
.RemoveMachineInstrFromMaps(MI
);
1713 MI
->eraseFromParent();
1717 // If def for this use can't be rematerialized, then try folding.
1718 // If def is rematerializable and it's a load, also try folding.
1719 TryFold
= !ReMatDefMI
|| (ReMatDefMI
&& (MI
== ReMatOrigDefMI
|| isLoad
));
1721 // Try fold loads (from stack slot, constant pool, etc.) into uses.
1727 // Scan all of the operands of this instruction rewriting operands
1728 // to use NewVReg instead of li.reg as appropriate. We do this for
1731 // 1. If the instr reads the same spilled vreg multiple times, we
1732 // want to reuse the NewVReg.
1733 // 2. If the instr is a two-addr instruction, we are required to
1734 // keep the src/dst regs pinned.
1736 // Keep track of whether we replace a use and/or def so that we can
1737 // create the spill interval with the appropriate range.
1739 HasUse
= mop
.isUse();
1740 HasDef
= mop
.isDef();
1741 SmallVector
<unsigned, 2> Ops
;
1743 for (unsigned j
= i
+1, e
= MI
->getNumOperands(); j
!= e
; ++j
) {
1744 const MachineOperand
&MOj
= MI
->getOperand(j
);
1747 unsigned RegJ
= MOj
.getReg();
1748 if (RegJ
== 0 || TargetRegisterInfo::isPhysicalRegister(RegJ
))
1752 if (!MOj
.isUndef()) {
1753 HasUse
|= MOj
.isUse();
1754 HasDef
|= MOj
.isDef();
1759 // Create a new virtual register for the spill interval.
1760 // Create the new register now so we can map the fold instruction
1761 // to the new register so when it is unfolded we get the correct
1763 bool CreatedNewVReg
= false;
1765 NewVReg
= mri_
->createVirtualRegister(rc
);
1767 CreatedNewVReg
= true;
1773 // Do not fold load / store here if we are splitting. We'll find an
1774 // optimal point to insert a load / store later.
1776 if (tryFoldMemoryOperand(MI
, vrm
, ReMatDefMI
, index
,
1777 Ops
, FoldSS
, FoldSlot
, NewVReg
)) {
1778 // Folding the load/store can completely change the instruction in
1779 // unpredictable ways, rescan it from the beginning.
1782 // We need to give the new vreg the same stack slot as the
1783 // spilled interval.
1784 vrm
.assignVirt2StackSlot(NewVReg
, FoldSlot
);
1790 if (isNotInMIMap(MI
))
1792 goto RestartInstruction
;
1795 // We'll try to fold it later if it's profitable.
1796 CanFold
= canFoldMemoryOperand(MI
, Ops
, DefIsReMat
);
1800 mop
.setReg(NewVReg
);
1801 if (mop
.isImplicit())
1802 rewriteImplicitOps(li
, MI
, NewVReg
, vrm
);
1804 // Reuse NewVReg for other reads.
1805 for (unsigned j
= 0, e
= Ops
.size(); j
!= e
; ++j
) {
1806 MachineOperand
&mopj
= MI
->getOperand(Ops
[j
]);
1807 mopj
.setReg(NewVReg
);
1808 if (mopj
.isImplicit())
1809 rewriteImplicitOps(li
, MI
, NewVReg
, vrm
);
1812 if (CreatedNewVReg
) {
1814 vrm
.setVirtIsReMaterialized(NewVReg
, ReMatDefMI
);
1815 if (ReMatIds
[VNI
->id
] == VirtRegMap::MAX_STACK_SLOT
) {
1816 // Each valnum may have its own remat id.
1817 ReMatIds
[VNI
->id
] = vrm
.assignVirtReMatId(NewVReg
);
1819 vrm
.assignVirtReMatId(NewVReg
, ReMatIds
[VNI
->id
]);
1821 if (!CanDelete
|| (HasUse
&& HasDef
)) {
1822 // If this is a two-addr instruction then its use operands are
1823 // rematerializable but its def is not. It should be assigned a
1825 vrm
.assignVirt2StackSlot(NewVReg
, Slot
);
1828 vrm
.assignVirt2StackSlot(NewVReg
, Slot
);
1830 } else if (HasUse
&& HasDef
&&
1831 vrm
.getStackSlot(NewVReg
) == VirtRegMap::NO_STACK_SLOT
) {
1832 // If this interval hasn't been assigned a stack slot (because earlier
1833 // def is a deleted remat def), do it now.
1834 assert(Slot
!= VirtRegMap::NO_STACK_SLOT
);
1835 vrm
.assignVirt2StackSlot(NewVReg
, Slot
);
1838 // Re-matting an instruction with virtual register use. Add the
1839 // register as an implicit use on the use MI.
1840 if (DefIsReMat
&& ImpUse
)
1841 MI
->addOperand(MachineOperand::CreateReg(ImpUse
, false, true));
1843 // Create a new register interval for this spill / remat.
1844 LiveInterval
&nI
= getOrCreateInterval(NewVReg
);
1845 if (CreatedNewVReg
) {
1846 NewLIs
.push_back(&nI
);
1847 MBBVRegsMap
.insert(std::make_pair(MI
->getParent()->getNumber(), NewVReg
));
1849 vrm
.setIsSplitFromReg(NewVReg
, li
.reg
);
1853 if (CreatedNewVReg
) {
1854 LiveRange
LR(getLoadIndex(index
), getNextSlot(getUseIndex(index
)),
1855 nI
.getNextValue(MachineInstrIndex(), 0, false,
1857 DEBUG(errs() << " +" << LR
);
1860 // Extend the split live interval to this def / use.
1861 MachineInstrIndex End
= getNextSlot(getUseIndex(index
));
1862 LiveRange
LR(nI
.ranges
[nI
.ranges
.size()-1].end
, End
,
1863 nI
.getValNumInfo(nI
.getNumValNums()-1));
1864 DEBUG(errs() << " +" << LR
);
1869 LiveRange
LR(getDefIndex(index
), getStoreIndex(index
),
1870 nI
.getNextValue(MachineInstrIndex(), 0, false,
1872 DEBUG(errs() << " +" << LR
);
1877 errs() << "\t\t\t\tAdded new interval: ";
1878 nI
.print(errs(), tri_
);
1884 bool LiveIntervals::anyKillInMBBAfterIdx(const LiveInterval
&li
,
1886 MachineBasicBlock
*MBB
,
1887 MachineInstrIndex Idx
) const {
1888 MachineInstrIndex End
= getMBBEndIdx(MBB
);
1889 for (unsigned j
= 0, ee
= VNI
->kills
.size(); j
!= ee
; ++j
) {
1890 if (VNI
->kills
[j
].isPHIIndex())
1893 MachineInstrIndex KillIdx
= VNI
->kills
[j
];
1894 if (KillIdx
> Idx
&& KillIdx
< End
)
1900 /// RewriteInfo - Keep track of machine instrs that will be rewritten
1901 /// during spilling.
1903 struct RewriteInfo
{
1904 MachineInstrIndex Index
;
1908 RewriteInfo(MachineInstrIndex i
, MachineInstr
*mi
, bool u
, bool d
)
1909 : Index(i
), MI(mi
), HasUse(u
), HasDef(d
) {}
1912 struct RewriteInfoCompare
{
1913 bool operator()(const RewriteInfo
&LHS
, const RewriteInfo
&RHS
) const {
1914 return LHS
.Index
< RHS
.Index
;
1919 void LiveIntervals::
1920 rewriteInstructionsForSpills(const LiveInterval
&li
, bool TrySplit
,
1921 LiveInterval::Ranges::const_iterator
&I
,
1922 MachineInstr
*ReMatOrigDefMI
, MachineInstr
*ReMatDefMI
,
1923 unsigned Slot
, int LdSlot
,
1924 bool isLoad
, bool isLoadSS
, bool DefIsReMat
, bool CanDelete
,
1926 const TargetRegisterClass
* rc
,
1927 SmallVector
<int, 4> &ReMatIds
,
1928 const MachineLoopInfo
*loopInfo
,
1929 BitVector
&SpillMBBs
,
1930 DenseMap
<unsigned, std::vector
<SRInfo
> > &SpillIdxes
,
1931 BitVector
&RestoreMBBs
,
1932 DenseMap
<unsigned, std::vector
<SRInfo
> > &RestoreIdxes
,
1933 DenseMap
<unsigned,unsigned> &MBBVRegsMap
,
1934 std::vector
<LiveInterval
*> &NewLIs
) {
1935 bool AllCanFold
= true;
1936 unsigned NewVReg
= 0;
1937 MachineInstrIndex start
= getBaseIndex(I
->start
);
1938 MachineInstrIndex end
= getNextIndex(getBaseIndex(getPrevSlot(I
->end
)));
1940 // First collect all the def / use in this live range that will be rewritten.
1941 // Make sure they are sorted according to instruction index.
1942 std::vector
<RewriteInfo
> RewriteMIs
;
1943 for (MachineRegisterInfo::reg_iterator ri
= mri_
->reg_begin(li
.reg
),
1944 re
= mri_
->reg_end(); ri
!= re
; ) {
1945 MachineInstr
*MI
= &*ri
;
1946 MachineOperand
&O
= ri
.getOperand();
1948 assert(!O
.isImplicit() && "Spilling register that's used as implicit use?");
1949 MachineInstrIndex index
= getInstructionIndex(MI
);
1950 if (index
< start
|| index
>= end
)
1954 // Must be defined by an implicit def. It should not be spilled. Note,
1955 // this is for correctness reason. e.g.
1956 // 8 %reg1024<def> = IMPLICIT_DEF
1957 // 12 %reg1024<def> = INSERT_SUBREG %reg1024<kill>, %reg1025, 2
1958 // The live range [12, 14) are not part of the r1024 live interval since
1959 // it's defined by an implicit def. It will not conflicts with live
1960 // interval of r1025. Now suppose both registers are spilled, you can
1961 // easily see a situation where both registers are reloaded before
1962 // the INSERT_SUBREG and both target registers that would overlap.
1964 RewriteMIs
.push_back(RewriteInfo(index
, MI
, O
.isUse(), O
.isDef()));
1966 std::sort(RewriteMIs
.begin(), RewriteMIs
.end(), RewriteInfoCompare());
1968 unsigned ImpUse
= DefIsReMat
? getReMatImplicitUse(li
, ReMatDefMI
) : 0;
1969 // Now rewrite the defs and uses.
1970 for (unsigned i
= 0, e
= RewriteMIs
.size(); i
!= e
; ) {
1971 RewriteInfo
&rwi
= RewriteMIs
[i
];
1973 MachineInstrIndex index
= rwi
.Index
;
1974 bool MIHasUse
= rwi
.HasUse
;
1975 bool MIHasDef
= rwi
.HasDef
;
1976 MachineInstr
*MI
= rwi
.MI
;
1977 // If MI def and/or use the same register multiple times, then there
1978 // are multiple entries.
1979 unsigned NumUses
= MIHasUse
;
1980 while (i
!= e
&& RewriteMIs
[i
].MI
== MI
) {
1981 assert(RewriteMIs
[i
].Index
== index
);
1982 bool isUse
= RewriteMIs
[i
].HasUse
;
1983 if (isUse
) ++NumUses
;
1985 MIHasDef
|= RewriteMIs
[i
].HasDef
;
1988 MachineBasicBlock
*MBB
= MI
->getParent();
1990 if (ImpUse
&& MI
!= ReMatDefMI
) {
1991 // Re-matting an instruction with virtual register use. Update the
1992 // register interval's spill weight to HUGE_VALF to prevent it from
1994 LiveInterval
&ImpLi
= getInterval(ImpUse
);
1995 ImpLi
.weight
= HUGE_VALF
;
1998 unsigned MBBId
= MBB
->getNumber();
1999 unsigned ThisVReg
= 0;
2001 DenseMap
<unsigned,unsigned>::iterator NVI
= MBBVRegsMap
.find(MBBId
);
2002 if (NVI
!= MBBVRegsMap
.end()) {
2003 ThisVReg
= NVI
->second
;
2010 // It's better to start a new interval to avoid artifically
2011 // extend the new interval.
2012 if (MIHasDef
&& !MIHasUse
) {
2013 MBBVRegsMap
.erase(MBB
->getNumber());
2019 bool IsNew
= ThisVReg
== 0;
2021 // This ends the previous live interval. If all of its def / use
2022 // can be folded, give it a low spill weight.
2023 if (NewVReg
&& TrySplit
&& AllCanFold
) {
2024 LiveInterval
&nI
= getOrCreateInterval(NewVReg
);
2031 bool HasDef
= false;
2032 bool HasUse
= false;
2033 bool CanFold
= rewriteInstructionForSpills(li
, I
->valno
, TrySplit
,
2034 index
, end
, MI
, ReMatOrigDefMI
, ReMatDefMI
,
2035 Slot
, LdSlot
, isLoad
, isLoadSS
, DefIsReMat
,
2036 CanDelete
, vrm
, rc
, ReMatIds
, loopInfo
, NewVReg
,
2037 ImpUse
, HasDef
, HasUse
, MBBVRegsMap
, NewLIs
);
2038 if (!HasDef
&& !HasUse
)
2041 AllCanFold
&= CanFold
;
2043 // Update weight of spill interval.
2044 LiveInterval
&nI
= getOrCreateInterval(NewVReg
);
2046 // The spill weight is now infinity as it cannot be spilled again.
2047 nI
.weight
= HUGE_VALF
;
2051 // Keep track of the last def and first use in each MBB.
2053 if (MI
!= ReMatOrigDefMI
|| !CanDelete
) {
2054 bool HasKill
= false;
2056 HasKill
= anyKillInMBBAfterIdx(li
, I
->valno
, MBB
, getDefIndex(index
));
2058 // If this is a two-address code, then this index starts a new VNInfo.
2059 const VNInfo
*VNI
= li
.findDefinedVNInfoForRegInt(getDefIndex(index
));
2061 HasKill
= anyKillInMBBAfterIdx(li
, VNI
, MBB
, getDefIndex(index
));
2063 DenseMap
<unsigned, std::vector
<SRInfo
> >::iterator SII
=
2064 SpillIdxes
.find(MBBId
);
2066 if (SII
== SpillIdxes
.end()) {
2067 std::vector
<SRInfo
> S
;
2068 S
.push_back(SRInfo(index
, NewVReg
, true));
2069 SpillIdxes
.insert(std::make_pair(MBBId
, S
));
2070 } else if (SII
->second
.back().vreg
!= NewVReg
) {
2071 SII
->second
.push_back(SRInfo(index
, NewVReg
, true));
2072 } else if (index
> SII
->second
.back().index
) {
2073 // If there is an earlier def and this is a two-address
2074 // instruction, then it's not possible to fold the store (which
2075 // would also fold the load).
2076 SRInfo
&Info
= SII
->second
.back();
2078 Info
.canFold
= !HasUse
;
2080 SpillMBBs
.set(MBBId
);
2081 } else if (SII
!= SpillIdxes
.end() &&
2082 SII
->second
.back().vreg
== NewVReg
&&
2083 index
> SII
->second
.back().index
) {
2084 // There is an earlier def that's not killed (must be two-address).
2085 // The spill is no longer needed.
2086 SII
->second
.pop_back();
2087 if (SII
->second
.empty()) {
2088 SpillIdxes
.erase(MBBId
);
2089 SpillMBBs
.reset(MBBId
);
2096 DenseMap
<unsigned, std::vector
<SRInfo
> >::iterator SII
=
2097 SpillIdxes
.find(MBBId
);
2098 if (SII
!= SpillIdxes
.end() &&
2099 SII
->second
.back().vreg
== NewVReg
&&
2100 index
> SII
->second
.back().index
)
2101 // Use(s) following the last def, it's not safe to fold the spill.
2102 SII
->second
.back().canFold
= false;
2103 DenseMap
<unsigned, std::vector
<SRInfo
> >::iterator RII
=
2104 RestoreIdxes
.find(MBBId
);
2105 if (RII
!= RestoreIdxes
.end() && RII
->second
.back().vreg
== NewVReg
)
2106 // If we are splitting live intervals, only fold if it's the first
2107 // use and there isn't another use later in the MBB.
2108 RII
->second
.back().canFold
= false;
2110 // Only need a reload if there isn't an earlier def / use.
2111 if (RII
== RestoreIdxes
.end()) {
2112 std::vector
<SRInfo
> Infos
;
2113 Infos
.push_back(SRInfo(index
, NewVReg
, true));
2114 RestoreIdxes
.insert(std::make_pair(MBBId
, Infos
));
2116 RII
->second
.push_back(SRInfo(index
, NewVReg
, true));
2118 RestoreMBBs
.set(MBBId
);
2122 // Update spill weight.
2123 unsigned loopDepth
= loopInfo
->getLoopDepth(MBB
);
2124 nI
.weight
+= getSpillWeight(HasDef
, HasUse
, loopDepth
);
2127 if (NewVReg
&& TrySplit
&& AllCanFold
) {
2128 // If all of its def / use can be folded, give it a low spill weight.
2129 LiveInterval
&nI
= getOrCreateInterval(NewVReg
);
2134 bool LiveIntervals::alsoFoldARestore(int Id
, MachineInstrIndex index
,
2135 unsigned vr
, BitVector
&RestoreMBBs
,
2136 DenseMap
<unsigned,std::vector
<SRInfo
> > &RestoreIdxes
) {
2137 if (!RestoreMBBs
[Id
])
2139 std::vector
<SRInfo
> &Restores
= RestoreIdxes
[Id
];
2140 for (unsigned i
= 0, e
= Restores
.size(); i
!= e
; ++i
)
2141 if (Restores
[i
].index
== index
&&
2142 Restores
[i
].vreg
== vr
&&
2143 Restores
[i
].canFold
)
2148 void LiveIntervals::eraseRestoreInfo(int Id
, MachineInstrIndex index
,
2149 unsigned vr
, BitVector
&RestoreMBBs
,
2150 DenseMap
<unsigned,std::vector
<SRInfo
> > &RestoreIdxes
) {
2151 if (!RestoreMBBs
[Id
])
2153 std::vector
<SRInfo
> &Restores
= RestoreIdxes
[Id
];
2154 for (unsigned i
= 0, e
= Restores
.size(); i
!= e
; ++i
)
2155 if (Restores
[i
].index
== index
&& Restores
[i
].vreg
)
2156 Restores
[i
].index
= MachineInstrIndex();
2159 /// handleSpilledImpDefs - Remove IMPLICIT_DEF instructions which are being
2160 /// spilled and create empty intervals for their uses.
2162 LiveIntervals::handleSpilledImpDefs(const LiveInterval
&li
, VirtRegMap
&vrm
,
2163 const TargetRegisterClass
* rc
,
2164 std::vector
<LiveInterval
*> &NewLIs
) {
2165 for (MachineRegisterInfo::reg_iterator ri
= mri_
->reg_begin(li
.reg
),
2166 re
= mri_
->reg_end(); ri
!= re
; ) {
2167 MachineOperand
&O
= ri
.getOperand();
2168 MachineInstr
*MI
= &*ri
;
2171 assert(MI
->getOpcode() == TargetInstrInfo::IMPLICIT_DEF
&&
2172 "Register def was not rewritten?");
2173 RemoveMachineInstrFromMaps(MI
);
2174 vrm
.RemoveMachineInstrFromMaps(MI
);
2175 MI
->eraseFromParent();
2177 // This must be an use of an implicit_def so it's not part of the live
2178 // interval. Create a new empty live interval for it.
2179 // FIXME: Can we simply erase some of the instructions? e.g. Stores?
2180 unsigned NewVReg
= mri_
->createVirtualRegister(rc
);
2182 vrm
.setIsImplicitlyDefined(NewVReg
);
2183 NewLIs
.push_back(&getOrCreateInterval(NewVReg
));
2184 for (unsigned i
= 0, e
= MI
->getNumOperands(); i
!= e
; ++i
) {
2185 MachineOperand
&MO
= MI
->getOperand(i
);
2186 if (MO
.isReg() && MO
.getReg() == li
.reg
) {
2195 std::vector
<LiveInterval
*> LiveIntervals::
2196 addIntervalsForSpillsFast(const LiveInterval
&li
,
2197 const MachineLoopInfo
*loopInfo
,
2199 unsigned slot
= vrm
.assignVirt2StackSlot(li
.reg
);
2201 std::vector
<LiveInterval
*> added
;
2203 assert(li
.weight
!= HUGE_VALF
&&
2204 "attempt to spill already spilled interval!");
2207 errs() << "\t\t\t\tadding intervals for spills for interval: ";
2212 const TargetRegisterClass
* rc
= mri_
->getRegClass(li
.reg
);
2214 MachineRegisterInfo::reg_iterator RI
= mri_
->reg_begin(li
.reg
);
2215 while (RI
!= mri_
->reg_end()) {
2216 MachineInstr
* MI
= &*RI
;
2218 SmallVector
<unsigned, 2> Indices
;
2219 bool HasUse
= false;
2220 bool HasDef
= false;
2222 for (unsigned i
= 0; i
!= MI
->getNumOperands(); ++i
) {
2223 MachineOperand
& mop
= MI
->getOperand(i
);
2224 if (!mop
.isReg() || mop
.getReg() != li
.reg
) continue;
2226 HasUse
|= MI
->getOperand(i
).isUse();
2227 HasDef
|= MI
->getOperand(i
).isDef();
2229 Indices
.push_back(i
);
2232 if (!tryFoldMemoryOperand(MI
, vrm
, NULL
, getInstructionIndex(MI
),
2233 Indices
, true, slot
, li
.reg
)) {
2234 unsigned NewVReg
= mri_
->createVirtualRegister(rc
);
2236 vrm
.assignVirt2StackSlot(NewVReg
, slot
);
2238 // create a new register for this spill
2239 LiveInterval
&nI
= getOrCreateInterval(NewVReg
);
2241 // the spill weight is now infinity as it
2242 // cannot be spilled again
2243 nI
.weight
= HUGE_VALF
;
2245 // Rewrite register operands to use the new vreg.
2246 for (SmallVectorImpl
<unsigned>::iterator I
= Indices
.begin(),
2247 E
= Indices
.end(); I
!= E
; ++I
) {
2248 MI
->getOperand(*I
).setReg(NewVReg
);
2250 if (MI
->getOperand(*I
).isUse())
2251 MI
->getOperand(*I
).setIsKill(true);
2254 // Fill in the new live interval.
2255 MachineInstrIndex index
= getInstructionIndex(MI
);
2257 LiveRange
LR(getLoadIndex(index
), getUseIndex(index
),
2258 nI
.getNextValue(MachineInstrIndex(), 0, false,
2259 getVNInfoAllocator()));
2260 DEBUG(errs() << " +" << LR
);
2262 vrm
.addRestorePoint(NewVReg
, MI
);
2265 LiveRange
LR(getDefIndex(index
), getStoreIndex(index
),
2266 nI
.getNextValue(MachineInstrIndex(), 0, false,
2267 getVNInfoAllocator()));
2268 DEBUG(errs() << " +" << LR
);
2270 vrm
.addSpillPoint(NewVReg
, true, MI
);
2273 added
.push_back(&nI
);
2276 errs() << "\t\t\t\tadded new interval: ";
2283 RI
= mri_
->reg_begin(li
.reg
);
2289 std::vector
<LiveInterval
*> LiveIntervals::
2290 addIntervalsForSpills(const LiveInterval
&li
,
2291 SmallVectorImpl
<LiveInterval
*> &SpillIs
,
2292 const MachineLoopInfo
*loopInfo
, VirtRegMap
&vrm
) {
2294 if (EnableFastSpilling
)
2295 return addIntervalsForSpillsFast(li
, loopInfo
, vrm
);
2297 assert(li
.weight
!= HUGE_VALF
&&
2298 "attempt to spill already spilled interval!");
2301 errs() << "\t\t\t\tadding intervals for spills for interval: ";
2302 li
.print(errs(), tri_
);
2306 // Each bit specify whether a spill is required in the MBB.
2307 BitVector
SpillMBBs(mf_
->getNumBlockIDs());
2308 DenseMap
<unsigned, std::vector
<SRInfo
> > SpillIdxes
;
2309 BitVector
RestoreMBBs(mf_
->getNumBlockIDs());
2310 DenseMap
<unsigned, std::vector
<SRInfo
> > RestoreIdxes
;
2311 DenseMap
<unsigned,unsigned> MBBVRegsMap
;
2312 std::vector
<LiveInterval
*> NewLIs
;
2313 const TargetRegisterClass
* rc
= mri_
->getRegClass(li
.reg
);
2315 unsigned NumValNums
= li
.getNumValNums();
2316 SmallVector
<MachineInstr
*, 4> ReMatDefs
;
2317 ReMatDefs
.resize(NumValNums
, NULL
);
2318 SmallVector
<MachineInstr
*, 4> ReMatOrigDefs
;
2319 ReMatOrigDefs
.resize(NumValNums
, NULL
);
2320 SmallVector
<int, 4> ReMatIds
;
2321 ReMatIds
.resize(NumValNums
, VirtRegMap::MAX_STACK_SLOT
);
2322 BitVector
ReMatDelete(NumValNums
);
2323 unsigned Slot
= VirtRegMap::MAX_STACK_SLOT
;
2325 // Spilling a split live interval. It cannot be split any further. Also,
2326 // it's also guaranteed to be a single val# / range interval.
2327 if (vrm
.getPreSplitReg(li
.reg
)) {
2328 vrm
.setIsSplitFromReg(li
.reg
, 0);
2329 // Unset the split kill marker on the last use.
2330 MachineInstrIndex KillIdx
= vrm
.getKillPoint(li
.reg
);
2331 if (KillIdx
!= MachineInstrIndex()) {
2332 MachineInstr
*KillMI
= getInstructionFromIndex(KillIdx
);
2333 assert(KillMI
&& "Last use disappeared?");
2334 int KillOp
= KillMI
->findRegisterUseOperandIdx(li
.reg
, true);
2335 assert(KillOp
!= -1 && "Last use disappeared?");
2336 KillMI
->getOperand(KillOp
).setIsKill(false);
2338 vrm
.removeKillPoint(li
.reg
);
2339 bool DefIsReMat
= vrm
.isReMaterialized(li
.reg
);
2340 Slot
= vrm
.getStackSlot(li
.reg
);
2341 assert(Slot
!= VirtRegMap::MAX_STACK_SLOT
);
2342 MachineInstr
*ReMatDefMI
= DefIsReMat
?
2343 vrm
.getReMaterializedMI(li
.reg
) : NULL
;
2345 bool isLoadSS
= DefIsReMat
&& tii_
->isLoadFromStackSlot(ReMatDefMI
, LdSlot
);
2346 bool isLoad
= isLoadSS
||
2347 (DefIsReMat
&& (ReMatDefMI
->getDesc().canFoldAsLoad()));
2348 bool IsFirstRange
= true;
2349 for (LiveInterval::Ranges::const_iterator
2350 I
= li
.ranges
.begin(), E
= li
.ranges
.end(); I
!= E
; ++I
) {
2351 // If this is a split live interval with multiple ranges, it means there
2352 // are two-address instructions that re-defined the value. Only the
2353 // first def can be rematerialized!
2355 // Note ReMatOrigDefMI has already been deleted.
2356 rewriteInstructionsForSpills(li
, false, I
, NULL
, ReMatDefMI
,
2357 Slot
, LdSlot
, isLoad
, isLoadSS
, DefIsReMat
,
2358 false, vrm
, rc
, ReMatIds
, loopInfo
,
2359 SpillMBBs
, SpillIdxes
, RestoreMBBs
, RestoreIdxes
,
2360 MBBVRegsMap
, NewLIs
);
2362 rewriteInstructionsForSpills(li
, false, I
, NULL
, 0,
2363 Slot
, 0, false, false, false,
2364 false, vrm
, rc
, ReMatIds
, loopInfo
,
2365 SpillMBBs
, SpillIdxes
, RestoreMBBs
, RestoreIdxes
,
2366 MBBVRegsMap
, NewLIs
);
2368 IsFirstRange
= false;
2371 handleSpilledImpDefs(li
, vrm
, rc
, NewLIs
);
2375 bool TrySplit
= !intervalIsInOneMBB(li
);
2378 bool NeedStackSlot
= false;
2379 for (LiveInterval::const_vni_iterator i
= li
.vni_begin(), e
= li
.vni_end();
2381 const VNInfo
*VNI
= *i
;
2382 unsigned VN
= VNI
->id
;
2383 if (VNI
->isUnused())
2384 continue; // Dead val#.
2385 // Is the def for the val# rematerializable?
2386 MachineInstr
*ReMatDefMI
= VNI
->isDefAccurate()
2387 ? getInstructionFromIndex(VNI
->def
) : 0;
2389 if (ReMatDefMI
&& isReMaterializable(li
, VNI
, ReMatDefMI
, SpillIs
, dummy
)) {
2390 // Remember how to remat the def of this val#.
2391 ReMatOrigDefs
[VN
] = ReMatDefMI
;
2392 // Original def may be modified so we have to make a copy here.
2393 MachineInstr
*Clone
= mf_
->CloneMachineInstr(ReMatDefMI
);
2394 CloneMIs
.push_back(Clone
);
2395 ReMatDefs
[VN
] = Clone
;
2397 bool CanDelete
= true;
2398 if (VNI
->hasPHIKill()) {
2399 // A kill is a phi node, not all of its uses can be rematerialized.
2400 // It must not be deleted.
2402 // Need a stack slot if there is any live range where uses cannot be
2404 NeedStackSlot
= true;
2407 ReMatDelete
.set(VN
);
2409 // Need a stack slot if there is any live range where uses cannot be
2411 NeedStackSlot
= true;
2415 // One stack slot per live interval.
2416 if (NeedStackSlot
&& vrm
.getPreSplitReg(li
.reg
) == 0) {
2417 if (vrm
.getStackSlot(li
.reg
) == VirtRegMap::NO_STACK_SLOT
)
2418 Slot
= vrm
.assignVirt2StackSlot(li
.reg
);
2420 // This case only occurs when the prealloc splitter has already assigned
2421 // a stack slot to this vreg.
2423 Slot
= vrm
.getStackSlot(li
.reg
);
2426 // Create new intervals and rewrite defs and uses.
2427 for (LiveInterval::Ranges::const_iterator
2428 I
= li
.ranges
.begin(), E
= li
.ranges
.end(); I
!= E
; ++I
) {
2429 MachineInstr
*ReMatDefMI
= ReMatDefs
[I
->valno
->id
];
2430 MachineInstr
*ReMatOrigDefMI
= ReMatOrigDefs
[I
->valno
->id
];
2431 bool DefIsReMat
= ReMatDefMI
!= NULL
;
2432 bool CanDelete
= ReMatDelete
[I
->valno
->id
];
2434 bool isLoadSS
= DefIsReMat
&& tii_
->isLoadFromStackSlot(ReMatDefMI
, LdSlot
);
2435 bool isLoad
= isLoadSS
||
2436 (DefIsReMat
&& ReMatDefMI
->getDesc().canFoldAsLoad());
2437 rewriteInstructionsForSpills(li
, TrySplit
, I
, ReMatOrigDefMI
, ReMatDefMI
,
2438 Slot
, LdSlot
, isLoad
, isLoadSS
, DefIsReMat
,
2439 CanDelete
, vrm
, rc
, ReMatIds
, loopInfo
,
2440 SpillMBBs
, SpillIdxes
, RestoreMBBs
, RestoreIdxes
,
2441 MBBVRegsMap
, NewLIs
);
2444 // Insert spills / restores if we are splitting.
2446 handleSpilledImpDefs(li
, vrm
, rc
, NewLIs
);
2450 SmallPtrSet
<LiveInterval
*, 4> AddedKill
;
2451 SmallVector
<unsigned, 2> Ops
;
2452 if (NeedStackSlot
) {
2453 int Id
= SpillMBBs
.find_first();
2455 std::vector
<SRInfo
> &spills
= SpillIdxes
[Id
];
2456 for (unsigned i
= 0, e
= spills
.size(); i
!= e
; ++i
) {
2457 MachineInstrIndex index
= spills
[i
].index
;
2458 unsigned VReg
= spills
[i
].vreg
;
2459 LiveInterval
&nI
= getOrCreateInterval(VReg
);
2460 bool isReMat
= vrm
.isReMaterialized(VReg
);
2461 MachineInstr
*MI
= getInstructionFromIndex(index
);
2462 bool CanFold
= false;
2463 bool FoundUse
= false;
2465 if (spills
[i
].canFold
) {
2467 for (unsigned j
= 0, ee
= MI
->getNumOperands(); j
!= ee
; ++j
) {
2468 MachineOperand
&MO
= MI
->getOperand(j
);
2469 if (!MO
.isReg() || MO
.getReg() != VReg
)
2476 (!FoundUse
&& !alsoFoldARestore(Id
, index
, VReg
,
2477 RestoreMBBs
, RestoreIdxes
))) {
2478 // MI has two-address uses of the same register. If the use
2479 // isn't the first and only use in the BB, then we can't fold
2480 // it. FIXME: Move this to rewriteInstructionsForSpills.
2487 // Fold the store into the def if possible.
2488 bool Folded
= false;
2489 if (CanFold
&& !Ops
.empty()) {
2490 if (tryFoldMemoryOperand(MI
, vrm
, NULL
, index
, Ops
, true, Slot
,VReg
)){
2493 // Also folded uses, do not issue a load.
2494 eraseRestoreInfo(Id
, index
, VReg
, RestoreMBBs
, RestoreIdxes
);
2495 nI
.removeRange(getLoadIndex(index
), getNextSlot(getUseIndex(index
)));
2497 nI
.removeRange(getDefIndex(index
), getStoreIndex(index
));
2501 // Otherwise tell the spiller to issue a spill.
2503 LiveRange
*LR
= &nI
.ranges
[nI
.ranges
.size()-1];
2504 bool isKill
= LR
->end
== getStoreIndex(index
);
2505 if (!MI
->registerDefIsDead(nI
.reg
))
2506 // No need to spill a dead def.
2507 vrm
.addSpillPoint(VReg
, isKill
, MI
);
2509 AddedKill
.insert(&nI
);
2512 Id
= SpillMBBs
.find_next(Id
);
2516 int Id
= RestoreMBBs
.find_first();
2518 std::vector
<SRInfo
> &restores
= RestoreIdxes
[Id
];
2519 for (unsigned i
= 0, e
= restores
.size(); i
!= e
; ++i
) {
2520 MachineInstrIndex index
= restores
[i
].index
;
2521 if (index
== MachineInstrIndex())
2523 unsigned VReg
= restores
[i
].vreg
;
2524 LiveInterval
&nI
= getOrCreateInterval(VReg
);
2525 bool isReMat
= vrm
.isReMaterialized(VReg
);
2526 MachineInstr
*MI
= getInstructionFromIndex(index
);
2527 bool CanFold
= false;
2529 if (restores
[i
].canFold
) {
2531 for (unsigned j
= 0, ee
= MI
->getNumOperands(); j
!= ee
; ++j
) {
2532 MachineOperand
&MO
= MI
->getOperand(j
);
2533 if (!MO
.isReg() || MO
.getReg() != VReg
)
2537 // If this restore were to be folded, it would have been folded
2546 // Fold the load into the use if possible.
2547 bool Folded
= false;
2548 if (CanFold
&& !Ops
.empty()) {
2550 Folded
= tryFoldMemoryOperand(MI
, vrm
, NULL
,index
,Ops
,true,Slot
,VReg
);
2552 MachineInstr
*ReMatDefMI
= vrm
.getReMaterializedMI(VReg
);
2554 bool isLoadSS
= tii_
->isLoadFromStackSlot(ReMatDefMI
, LdSlot
);
2555 // If the rematerializable def is a load, also try to fold it.
2556 if (isLoadSS
|| ReMatDefMI
->getDesc().canFoldAsLoad())
2557 Folded
= tryFoldMemoryOperand(MI
, vrm
, ReMatDefMI
, index
,
2558 Ops
, isLoadSS
, LdSlot
, VReg
);
2560 unsigned ImpUse
= getReMatImplicitUse(li
, ReMatDefMI
);
2562 // Re-matting an instruction with virtual register use. Add the
2563 // register as an implicit use on the use MI and update the register
2564 // interval's spill weight to HUGE_VALF to prevent it from being
2566 LiveInterval
&ImpLi
= getInterval(ImpUse
);
2567 ImpLi
.weight
= HUGE_VALF
;
2568 MI
->addOperand(MachineOperand::CreateReg(ImpUse
, false, true));
2573 // If folding is not possible / failed, then tell the spiller to issue a
2574 // load / rematerialization for us.
2576 nI
.removeRange(getLoadIndex(index
), getNextSlot(getUseIndex(index
)));
2578 vrm
.addRestorePoint(VReg
, MI
);
2580 Id
= RestoreMBBs
.find_next(Id
);
2583 // Finalize intervals: add kills, finalize spill weights, and filter out
2585 std::vector
<LiveInterval
*> RetNewLIs
;
2586 for (unsigned i
= 0, e
= NewLIs
.size(); i
!= e
; ++i
) {
2587 LiveInterval
*LI
= NewLIs
[i
];
2589 LI
->weight
/= InstrSlots::NUM
* getApproximateInstructionCount(*LI
);
2590 if (!AddedKill
.count(LI
)) {
2591 LiveRange
*LR
= &LI
->ranges
[LI
->ranges
.size()-1];
2592 MachineInstrIndex LastUseIdx
= getBaseIndex(LR
->end
);
2593 MachineInstr
*LastUse
= getInstructionFromIndex(LastUseIdx
);
2594 int UseIdx
= LastUse
->findRegisterUseOperandIdx(LI
->reg
, false);
2595 assert(UseIdx
!= -1);
2596 if (!LastUse
->isRegTiedToDefOperand(UseIdx
)) {
2597 LastUse
->getOperand(UseIdx
).setIsKill();
2598 vrm
.addKillPoint(LI
->reg
, LastUseIdx
);
2601 RetNewLIs
.push_back(LI
);
2605 handleSpilledImpDefs(li
, vrm
, rc
, RetNewLIs
);
2609 /// hasAllocatableSuperReg - Return true if the specified physical register has
2610 /// any super register that's allocatable.
2611 bool LiveIntervals::hasAllocatableSuperReg(unsigned Reg
) const {
2612 for (const unsigned* AS
= tri_
->getSuperRegisters(Reg
); *AS
; ++AS
)
2613 if (allocatableRegs_
[*AS
] && hasInterval(*AS
))
2618 /// getRepresentativeReg - Find the largest super register of the specified
2619 /// physical register.
2620 unsigned LiveIntervals::getRepresentativeReg(unsigned Reg
) const {
2621 // Find the largest super-register that is allocatable.
2622 unsigned BestReg
= Reg
;
2623 for (const unsigned* AS
= tri_
->getSuperRegisters(Reg
); *AS
; ++AS
) {
2624 unsigned SuperReg
= *AS
;
2625 if (!hasAllocatableSuperReg(SuperReg
) && hasInterval(SuperReg
)) {
2633 /// getNumConflictsWithPhysReg - Return the number of uses and defs of the
2634 /// specified interval that conflicts with the specified physical register.
2635 unsigned LiveIntervals::getNumConflictsWithPhysReg(const LiveInterval
&li
,
2636 unsigned PhysReg
) const {
2637 unsigned NumConflicts
= 0;
2638 const LiveInterval
&pli
= getInterval(getRepresentativeReg(PhysReg
));
2639 for (MachineRegisterInfo::reg_iterator I
= mri_
->reg_begin(li
.reg
),
2640 E
= mri_
->reg_end(); I
!= E
; ++I
) {
2641 MachineOperand
&O
= I
.getOperand();
2642 MachineInstr
*MI
= O
.getParent();
2643 MachineInstrIndex Index
= getInstructionIndex(MI
);
2644 if (pli
.liveAt(Index
))
2647 return NumConflicts
;
2650 /// spillPhysRegAroundRegDefsUses - Spill the specified physical register
2651 /// around all defs and uses of the specified interval. Return true if it
2652 /// was able to cut its interval.
2653 bool LiveIntervals::spillPhysRegAroundRegDefsUses(const LiveInterval
&li
,
2654 unsigned PhysReg
, VirtRegMap
&vrm
) {
2655 unsigned SpillReg
= getRepresentativeReg(PhysReg
);
2657 for (const unsigned *AS
= tri_
->getAliasSet(PhysReg
); *AS
; ++AS
)
2658 // If there are registers which alias PhysReg, but which are not a
2659 // sub-register of the chosen representative super register. Assert
2660 // since we can't handle it yet.
2661 assert(*AS
== SpillReg
|| !allocatableRegs_
[*AS
] || !hasInterval(*AS
) ||
2662 tri_
->isSuperRegister(*AS
, SpillReg
));
2665 LiveInterval
&pli
= getInterval(SpillReg
);
2666 SmallPtrSet
<MachineInstr
*, 8> SeenMIs
;
2667 for (MachineRegisterInfo::reg_iterator I
= mri_
->reg_begin(li
.reg
),
2668 E
= mri_
->reg_end(); I
!= E
; ++I
) {
2669 MachineOperand
&O
= I
.getOperand();
2670 MachineInstr
*MI
= O
.getParent();
2671 if (SeenMIs
.count(MI
))
2674 MachineInstrIndex Index
= getInstructionIndex(MI
);
2675 if (pli
.liveAt(Index
)) {
2676 vrm
.addEmergencySpill(SpillReg
, MI
);
2677 MachineInstrIndex StartIdx
= getLoadIndex(Index
);
2678 MachineInstrIndex EndIdx
= getNextSlot(getStoreIndex(Index
));
2679 if (pli
.isInOneLiveRange(StartIdx
, EndIdx
)) {
2680 pli
.removeRange(StartIdx
, EndIdx
);
2684 raw_string_ostream
Msg(msg
);
2685 Msg
<< "Ran out of registers during register allocation!";
2686 if (MI
->getOpcode() == TargetInstrInfo::INLINEASM
) {
2687 Msg
<< "\nPlease check your inline asm statement for invalid "
2688 << "constraints:\n";
2689 MI
->print(Msg
, tm_
);
2691 llvm_report_error(Msg
.str());
2693 for (const unsigned* AS
= tri_
->getSubRegisters(SpillReg
); *AS
; ++AS
) {
2694 if (!hasInterval(*AS
))
2696 LiveInterval
&spli
= getInterval(*AS
);
2697 if (spli
.liveAt(Index
))
2698 spli
.removeRange(getLoadIndex(Index
), getNextSlot(getStoreIndex(Index
)));
2705 LiveRange
LiveIntervals::addLiveRangeToEndOfBlock(unsigned reg
,
2706 MachineInstr
* startInst
) {
2707 LiveInterval
& Interval
= getOrCreateInterval(reg
);
2708 VNInfo
* VN
= Interval
.getNextValue(
2709 MachineInstrIndex(getInstructionIndex(startInst
), MachineInstrIndex::DEF
),
2710 startInst
, true, getVNInfoAllocator());
2711 VN
->setHasPHIKill(true);
2712 VN
->kills
.push_back(terminatorGaps
[startInst
->getParent()]);
2714 MachineInstrIndex(getInstructionIndex(startInst
), MachineInstrIndex::DEF
),
2715 getNextSlot(getMBBEndIdx(startInst
->getParent())), VN
);
2716 Interval
.addRange(LR
);