1 //===-- llvm/CodeGen/Spiller.cpp - Spiller -------------------------------===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 #define DEBUG_TYPE "spiller"
12 #include "llvm/Support/Compiler.h"
13 #include "llvm/ADT/DepthFirstIterator.h"
14 #include "llvm/ADT/Statistic.h"
15 #include "llvm/ADT/STLExtras.h"
19 STATISTIC(NumDSE
, "Number of dead stores elided");
20 STATISTIC(NumDSS
, "Number of dead spill slots removed");
21 STATISTIC(NumCommutes
, "Number of instructions commuted");
22 STATISTIC(NumDRM
, "Number of re-materializable defs elided");
23 STATISTIC(NumStores
, "Number of stores added");
24 STATISTIC(NumPSpills
, "Number of physical register spills");
25 STATISTIC(NumOmitted
, "Number of reloads omited");
26 STATISTIC(NumAvoided
, "Number of reloads deemed unnecessary");
27 STATISTIC(NumCopified
, "Number of available reloads turned into copies");
28 STATISTIC(NumReMats
, "Number of re-materialization");
29 STATISTIC(NumLoads
, "Number of loads added");
30 STATISTIC(NumReused
, "Number of values reused");
31 STATISTIC(NumDCE
, "Number of copies elided");
32 STATISTIC(NumSUnfold
, "Number of stores unfolded");
33 STATISTIC(NumModRefUnfold
, "Number of modref unfolded");
36 enum SpillerName
{ simple
, local
};
39 static cl::opt
<SpillerName
>
41 cl::desc("Spiller to use: (default: local)"),
43 cl::values(clEnumVal(simple
, "simple spiller"),
44 clEnumVal(local
, "local spiller"),
48 // ****************************** //
49 // Simple Spiller Implementation //
50 // ****************************** //
52 Spiller::~Spiller() {}
54 bool SimpleSpiller::runOnMachineFunction(MachineFunction
&MF
, VirtRegMap
&VRM
,
56 DOUT
<< "********** REWRITE MACHINE CODE **********\n";
57 DOUT
<< "********** Function: " << MF
.getFunction()->getName() << '\n';
58 const TargetMachine
&TM
= MF
.getTarget();
59 const TargetInstrInfo
&TII
= *TM
.getInstrInfo();
60 const TargetRegisterInfo
&TRI
= *TM
.getRegisterInfo();
63 // LoadedRegs - Keep track of which vregs are loaded, so that we only load
64 // each vreg once (in the case where a spilled vreg is used by multiple
65 // operands). This is always smaller than the number of operands to the
66 // current machine instr, so it should be small.
67 std::vector
<unsigned> LoadedRegs
;
69 for (MachineFunction::iterator MBBI
= MF
.begin(), E
= MF
.end();
71 DOUT
<< MBBI
->getBasicBlock()->getName() << ":\n";
72 MachineBasicBlock
&MBB
= *MBBI
;
73 for (MachineBasicBlock::iterator MII
= MBB
.begin(), E
= MBB
.end();
75 MachineInstr
&MI
= *MII
;
76 for (unsigned i
= 0, e
= MI
.getNumOperands(); i
!= e
; ++i
) {
77 MachineOperand
&MO
= MI
.getOperand(i
);
78 if (MO
.isReg() && MO
.getReg()) {
79 if (TargetRegisterInfo::isVirtualRegister(MO
.getReg())) {
80 unsigned VirtReg
= MO
.getReg();
81 unsigned SubIdx
= MO
.getSubReg();
82 unsigned PhysReg
= VRM
.getPhys(VirtReg
);
83 unsigned RReg
= SubIdx
? TRI
.getSubReg(PhysReg
, SubIdx
) : PhysReg
;
84 if (!VRM
.isAssignedReg(VirtReg
)) {
85 int StackSlot
= VRM
.getStackSlot(VirtReg
);
86 const TargetRegisterClass
* RC
=
87 MF
.getRegInfo().getRegClass(VirtReg
);
90 std::find(LoadedRegs
.begin(), LoadedRegs
.end(), VirtReg
)
91 == LoadedRegs
.end()) {
92 TII
.loadRegFromStackSlot(MBB
, &MI
, PhysReg
, StackSlot
, RC
);
93 MachineInstr
*LoadMI
= prior(MII
);
94 VRM
.addSpillSlotUse(StackSlot
, LoadMI
);
95 LoadedRegs
.push_back(VirtReg
);
97 DOUT
<< '\t' << *LoadMI
;
101 TII
.storeRegToStackSlot(MBB
, next(MII
), PhysReg
, true,
103 MachineInstr
*StoreMI
= next(MII
);
104 VRM
.addSpillSlotUse(StackSlot
, StoreMI
);
108 MF
.getRegInfo().setPhysRegUsed(RReg
);
109 MI
.getOperand(i
).setReg(RReg
);
110 MI
.getOperand(i
).setSubReg(0);
112 MF
.getRegInfo().setPhysRegUsed(MO
.getReg());
124 // ****************** //
125 // Utility Functions //
126 // ****************** //
128 /// InvalidateKill - A MI that defines the specified register is being deleted,
129 /// invalidate the register kill information.
130 static void InvalidateKill(unsigned Reg
, BitVector
&RegKills
,
131 std::vector
<MachineOperand
*> &KillOps
) {
133 KillOps
[Reg
]->setIsKill(false);
139 /// findSinglePredSuccessor - Return via reference a vector of machine basic
140 /// blocks each of which is a successor of the specified BB and has no other
142 static void findSinglePredSuccessor(MachineBasicBlock
*MBB
,
143 SmallVectorImpl
<MachineBasicBlock
*> &Succs
) {
144 for (MachineBasicBlock::succ_iterator SI
= MBB
->succ_begin(),
145 SE
= MBB
->succ_end(); SI
!= SE
; ++SI
) {
146 MachineBasicBlock
*SuccMBB
= *SI
;
147 if (SuccMBB
->pred_size() == 1)
148 Succs
.push_back(SuccMBB
);
152 /// InvalidateKills - MI is going to be deleted. If any of its operands are
153 /// marked kill, then invalidate the information.
154 static void InvalidateKills(MachineInstr
&MI
, BitVector
&RegKills
,
155 std::vector
<MachineOperand
*> &KillOps
,
156 SmallVector
<unsigned, 2> *KillRegs
= NULL
) {
157 for (unsigned i
= 0, e
= MI
.getNumOperands(); i
!= e
; ++i
) {
158 MachineOperand
&MO
= MI
.getOperand(i
);
159 if (!MO
.isReg() || !MO
.isUse() || !MO
.isKill())
161 unsigned Reg
= MO
.getReg();
162 if (TargetRegisterInfo::isVirtualRegister(Reg
))
165 KillRegs
->push_back(Reg
);
166 assert(Reg
< KillOps
.size());
167 if (KillOps
[Reg
] == &MO
) {
174 /// InvalidateRegDef - If the def operand of the specified def MI is now dead
175 /// (since it's spill instruction is removed), mark it isDead. Also checks if
176 /// the def MI has other definition operands that are not dead. Returns it by
178 static bool InvalidateRegDef(MachineBasicBlock::iterator I
,
179 MachineInstr
&NewDef
, unsigned Reg
,
181 // Due to remat, it's possible this reg isn't being reused. That is,
182 // the def of this reg (by prev MI) is now dead.
183 MachineInstr
*DefMI
= I
;
184 MachineOperand
*DefOp
= NULL
;
185 for (unsigned i
= 0, e
= DefMI
->getNumOperands(); i
!= e
; ++i
) {
186 MachineOperand
&MO
= DefMI
->getOperand(i
);
187 if (MO
.isReg() && MO
.isDef()) {
188 if (MO
.getReg() == Reg
)
190 else if (!MO
.isDead())
197 bool FoundUse
= false, Done
= false;
198 MachineBasicBlock::iterator E
= &NewDef
;
200 for (; !Done
&& I
!= E
; ++I
) {
201 MachineInstr
*NMI
= I
;
202 for (unsigned j
= 0, ee
= NMI
->getNumOperands(); j
!= ee
; ++j
) {
203 MachineOperand
&MO
= NMI
->getOperand(j
);
204 if (!MO
.isReg() || MO
.getReg() != Reg
)
208 Done
= true; // Stop after scanning all the operands of this MI.
219 /// UpdateKills - Track and update kill info. If a MI reads a register that is
220 /// marked kill, then it must be due to register reuse. Transfer the kill info
222 static void UpdateKills(MachineInstr
&MI
, BitVector
&RegKills
,
223 std::vector
<MachineOperand
*> &KillOps
,
224 const TargetRegisterInfo
* TRI
) {
225 for (unsigned i
= 0, e
= MI
.getNumOperands(); i
!= e
; ++i
) {
226 MachineOperand
&MO
= MI
.getOperand(i
);
227 if (!MO
.isReg() || !MO
.isUse())
229 unsigned Reg
= MO
.getReg();
233 if (RegKills
[Reg
] && KillOps
[Reg
]->getParent() != &MI
) {
234 // That can't be right. Register is killed but not re-defined and it's
235 // being reused. Let's fix that.
236 KillOps
[Reg
]->setIsKill(false);
239 if (!MI
.isRegTiedToDefOperand(i
))
240 // Unless it's a two-address operand, this is the new kill.
249 for (unsigned i
= 0, e
= MI
.getNumOperands(); i
!= e
; ++i
) {
250 const MachineOperand
&MO
= MI
.getOperand(i
);
251 if (!MO
.isReg() || !MO
.isDef())
253 unsigned Reg
= MO
.getReg();
256 // It also defines (or partially define) aliases.
257 for (const unsigned *AS
= TRI
->getAliasSet(Reg
); *AS
; ++AS
) {
264 /// ReMaterialize - Re-materialize definition for Reg targetting DestReg.
266 static void ReMaterialize(MachineBasicBlock
&MBB
,
267 MachineBasicBlock::iterator
&MII
,
268 unsigned DestReg
, unsigned Reg
,
269 const TargetInstrInfo
*TII
,
270 const TargetRegisterInfo
*TRI
,
272 TII
->reMaterialize(MBB
, MII
, DestReg
, VRM
.getReMaterializedMI(Reg
));
273 MachineInstr
*NewMI
= prior(MII
);
274 for (unsigned i
= 0, e
= NewMI
->getNumOperands(); i
!= e
; ++i
) {
275 MachineOperand
&MO
= NewMI
->getOperand(i
);
276 if (!MO
.isReg() || MO
.getReg() == 0)
278 unsigned VirtReg
= MO
.getReg();
279 if (TargetRegisterInfo::isPhysicalRegister(VirtReg
))
282 unsigned SubIdx
= MO
.getSubReg();
283 unsigned Phys
= VRM
.getPhys(VirtReg
);
285 unsigned RReg
= SubIdx
? TRI
->getSubReg(Phys
, SubIdx
) : Phys
;
292 /// findSuperReg - Find the SubReg's super-register of given register class
293 /// where its SubIdx sub-register is SubReg.
294 static unsigned findSuperReg(const TargetRegisterClass
*RC
, unsigned SubReg
,
295 unsigned SubIdx
, const TargetRegisterInfo
*TRI
) {
296 for (TargetRegisterClass::iterator I
= RC
->begin(), E
= RC
->end();
299 if (TRI
->getSubReg(Reg
, SubIdx
) == SubReg
)
305 // ******************************** //
306 // Available Spills Implementation //
307 // ******************************** //
309 /// disallowClobberPhysRegOnly - Unset the CanClobber bit of the specified
310 /// stackslot register. The register is still available but is no longer
311 /// allowed to be modifed.
312 void AvailableSpills::disallowClobberPhysRegOnly(unsigned PhysReg
) {
313 std::multimap
<unsigned, int>::iterator I
=
314 PhysRegsAvailable
.lower_bound(PhysReg
);
315 while (I
!= PhysRegsAvailable
.end() && I
->first
== PhysReg
) {
316 int SlotOrReMat
= I
->second
;
318 assert((SpillSlotsOrReMatsAvailable
[SlotOrReMat
] >> 1) == PhysReg
&&
319 "Bidirectional map mismatch!");
320 SpillSlotsOrReMatsAvailable
[SlotOrReMat
] &= ~1;
321 DOUT
<< "PhysReg " << TRI
->getName(PhysReg
)
322 << " copied, it is available for use but can no longer be modified\n";
326 /// disallowClobberPhysReg - Unset the CanClobber bit of the specified
327 /// stackslot register and its aliases. The register and its aliases may
328 /// still available but is no longer allowed to be modifed.
329 void AvailableSpills::disallowClobberPhysReg(unsigned PhysReg
) {
330 for (const unsigned *AS
= TRI
->getAliasSet(PhysReg
); *AS
; ++AS
)
331 disallowClobberPhysRegOnly(*AS
);
332 disallowClobberPhysRegOnly(PhysReg
);
335 /// ClobberPhysRegOnly - This is called when the specified physreg changes
336 /// value. We use this to invalidate any info about stuff we thing lives in it.
337 void AvailableSpills::ClobberPhysRegOnly(unsigned PhysReg
) {
338 std::multimap
<unsigned, int>::iterator I
=
339 PhysRegsAvailable
.lower_bound(PhysReg
);
340 while (I
!= PhysRegsAvailable
.end() && I
->first
== PhysReg
) {
341 int SlotOrReMat
= I
->second
;
342 PhysRegsAvailable
.erase(I
++);
343 assert((SpillSlotsOrReMatsAvailable
[SlotOrReMat
] >> 1) == PhysReg
&&
344 "Bidirectional map mismatch!");
345 SpillSlotsOrReMatsAvailable
.erase(SlotOrReMat
);
346 DOUT
<< "PhysReg " << TRI
->getName(PhysReg
)
347 << " clobbered, invalidating ";
348 if (SlotOrReMat
> VirtRegMap::MAX_STACK_SLOT
)
349 DOUT
<< "RM#" << SlotOrReMat
-VirtRegMap::MAX_STACK_SLOT
-1 << "\n";
351 DOUT
<< "SS#" << SlotOrReMat
<< "\n";
355 /// ClobberPhysReg - This is called when the specified physreg changes
356 /// value. We use this to invalidate any info about stuff we thing lives in
357 /// it and any of its aliases.
358 void AvailableSpills::ClobberPhysReg(unsigned PhysReg
) {
359 for (const unsigned *AS
= TRI
->getAliasSet(PhysReg
); *AS
; ++AS
)
360 ClobberPhysRegOnly(*AS
);
361 ClobberPhysRegOnly(PhysReg
);
364 /// AddAvailableRegsToLiveIn - Availability information is being kept coming
365 /// into the specified MBB. Add available physical registers as potential
366 /// live-in's. If they are reused in the MBB, they will be added to the
367 /// live-in set to make register scavenger and post-allocation scheduler.
368 void AvailableSpills::AddAvailableRegsToLiveIn(MachineBasicBlock
&MBB
,
370 std::vector
<MachineOperand
*> &KillOps
) {
371 std::set
<unsigned> NotAvailable
;
372 for (std::multimap
<unsigned, int>::iterator
373 I
= PhysRegsAvailable
.begin(), E
= PhysRegsAvailable
.end();
375 unsigned Reg
= I
->first
;
376 const TargetRegisterClass
* RC
= TRI
->getPhysicalRegisterRegClass(Reg
);
377 // FIXME: A temporary workaround. We can't reuse available value if it's
378 // not safe to move the def of the virtual register's class. e.g.
379 // X86::RFP* register classes. Do not add it as a live-in.
380 if (!TII
->isSafeToMoveRegClassDefs(RC
))
381 // This is no longer available.
382 NotAvailable
.insert(Reg
);
385 InvalidateKill(Reg
, RegKills
, KillOps
);
388 // Skip over the same register.
389 std::multimap
<unsigned, int>::iterator NI
= next(I
);
390 while (NI
!= E
&& NI
->first
== Reg
) {
396 for (std::set
<unsigned>::iterator I
= NotAvailable
.begin(),
397 E
= NotAvailable
.end(); I
!= E
; ++I
) {
399 for (const unsigned *SubRegs
= TRI
->getSubRegisters(*I
);
401 ClobberPhysReg(*SubRegs
);
405 /// ModifyStackSlotOrReMat - This method is called when the value in a stack
406 /// slot changes. This removes information about which register the previous
407 /// value for this slot lives in (as the previous value is dead now).
408 void AvailableSpills::ModifyStackSlotOrReMat(int SlotOrReMat
) {
409 std::map
<int, unsigned>::iterator It
=
410 SpillSlotsOrReMatsAvailable
.find(SlotOrReMat
);
411 if (It
== SpillSlotsOrReMatsAvailable
.end()) return;
412 unsigned Reg
= It
->second
>> 1;
413 SpillSlotsOrReMatsAvailable
.erase(It
);
415 // This register may hold the value of multiple stack slots, only remove this
416 // stack slot from the set of values the register contains.
417 std::multimap
<unsigned, int>::iterator I
= PhysRegsAvailable
.lower_bound(Reg
);
419 assert(I
!= PhysRegsAvailable
.end() && I
->first
== Reg
&&
420 "Map inverse broken!");
421 if (I
->second
== SlotOrReMat
) break;
423 PhysRegsAvailable
.erase(I
);
426 // ************************** //
427 // Reuse Info Implementation //
428 // ************************** //
430 /// GetRegForReload - We are about to emit a reload into PhysReg. If there
431 /// is some other operand that is using the specified register, either pick
432 /// a new register to use, or evict the previous reload and use this reg.
433 unsigned ReuseInfo::GetRegForReload(unsigned PhysReg
, MachineInstr
*MI
,
434 AvailableSpills
&Spills
,
435 std::vector
<MachineInstr
*> &MaybeDeadStores
,
436 SmallSet
<unsigned, 8> &Rejected
,
438 std::vector
<MachineOperand
*> &KillOps
,
440 const TargetInstrInfo
* TII
= MI
->getParent()->getParent()->getTarget()
443 if (Reuses
.empty()) return PhysReg
; // This is most often empty.
445 for (unsigned ro
= 0, e
= Reuses
.size(); ro
!= e
; ++ro
) {
446 ReusedOp
&Op
= Reuses
[ro
];
447 // If we find some other reuse that was supposed to use this register
448 // exactly for its reload, we can change this reload to use ITS reload
449 // register. That is, unless its reload register has already been
450 // considered and subsequently rejected because it has also been reused
451 // by another operand.
452 if (Op
.PhysRegReused
== PhysReg
&&
453 Rejected
.count(Op
.AssignedPhysReg
) == 0) {
454 // Yup, use the reload register that we didn't use before.
455 unsigned NewReg
= Op
.AssignedPhysReg
;
456 Rejected
.insert(PhysReg
);
457 return GetRegForReload(NewReg
, MI
, Spills
, MaybeDeadStores
, Rejected
,
458 RegKills
, KillOps
, VRM
);
460 // Otherwise, we might also have a problem if a previously reused
461 // value aliases the new register. If so, codegen the previous reload
463 unsigned PRRU
= Op
.PhysRegReused
;
464 const TargetRegisterInfo
*TRI
= Spills
.getRegInfo();
465 if (TRI
->areAliases(PRRU
, PhysReg
)) {
466 // Okay, we found out that an alias of a reused register
467 // was used. This isn't good because it means we have
468 // to undo a previous reuse.
469 MachineBasicBlock
*MBB
= MI
->getParent();
470 const TargetRegisterClass
*AliasRC
=
471 MBB
->getParent()->getRegInfo().getRegClass(Op
.VirtReg
);
473 // Copy Op out of the vector and remove it, we're going to insert an
474 // explicit load for it.
476 Reuses
.erase(Reuses
.begin()+ro
);
478 // Ok, we're going to try to reload the assigned physreg into the
479 // slot that we were supposed to in the first place. However, that
480 // register could hold a reuse. Check to see if it conflicts or
481 // would prefer us to use a different register.
482 unsigned NewPhysReg
= GetRegForReload(NewOp
.AssignedPhysReg
,
483 MI
, Spills
, MaybeDeadStores
,
484 Rejected
, RegKills
, KillOps
, VRM
);
486 MachineBasicBlock::iterator MII
= MI
;
487 if (NewOp
.StackSlotOrReMat
> VirtRegMap::MAX_STACK_SLOT
) {
488 ReMaterialize(*MBB
, MII
, NewPhysReg
, NewOp
.VirtReg
, TII
, TRI
,VRM
);
490 TII
->loadRegFromStackSlot(*MBB
, MII
, NewPhysReg
,
491 NewOp
.StackSlotOrReMat
, AliasRC
);
492 MachineInstr
*LoadMI
= prior(MII
);
493 VRM
.addSpillSlotUse(NewOp
.StackSlotOrReMat
, LoadMI
);
494 // Any stores to this stack slot are not dead anymore.
495 MaybeDeadStores
[NewOp
.StackSlotOrReMat
] = NULL
;
498 Spills
.ClobberPhysReg(NewPhysReg
);
499 Spills
.ClobberPhysReg(NewOp
.PhysRegReused
);
501 unsigned SubIdx
= MI
->getOperand(NewOp
.Operand
).getSubReg();
502 unsigned RReg
= SubIdx
? TRI
->getSubReg(NewPhysReg
, SubIdx
) : NewPhysReg
;
503 MI
->getOperand(NewOp
.Operand
).setReg(RReg
);
504 MI
->getOperand(NewOp
.Operand
).setSubReg(0);
506 Spills
.addAvailable(NewOp
.StackSlotOrReMat
, NewPhysReg
);
508 UpdateKills(*MII
, RegKills
, KillOps
, TRI
);
509 DOUT
<< '\t' << *MII
;
511 DOUT
<< "Reuse undone!\n";
514 // Finally, PhysReg is now available, go ahead and use it.
522 // ***************************** //
523 // Local Spiller Implementation //
524 // ***************************** //
526 bool LocalSpiller::runOnMachineFunction(MachineFunction
&MF
, VirtRegMap
&VRM
,
527 LiveIntervals
* LIs
) {
528 RegInfo
= &MF
.getRegInfo();
529 TRI
= MF
.getTarget().getRegisterInfo();
530 TII
= MF
.getTarget().getInstrInfo();
531 AllocatableRegs
= TRI
->getAllocatableSet(MF
);
532 DOUT
<< "\n**** Local spiller rewriting function '"
533 << MF
.getFunction()->getName() << "':\n";
534 DOUT
<< "**** Machine Instrs (NOTE! Does not include spills and reloads!)"
538 // Spills - Keep track of which spilled values are available in physregs
539 // so that we can choose to reuse the physregs instead of emitting
540 // reloads. This is usually refreshed per basic block.
541 AvailableSpills
Spills(TRI
, TII
);
543 // Keep track of kill information.
544 BitVector
RegKills(TRI
->getNumRegs());
545 std::vector
<MachineOperand
*> KillOps
;
546 KillOps
.resize(TRI
->getNumRegs(), NULL
);
548 // SingleEntrySuccs - Successor blocks which have a single predecessor.
549 SmallVector
<MachineBasicBlock
*, 4> SinglePredSuccs
;
550 SmallPtrSet
<MachineBasicBlock
*,16> EarlyVisited
;
552 // Traverse the basic blocks depth first.
553 MachineBasicBlock
*Entry
= MF
.begin();
554 SmallPtrSet
<MachineBasicBlock
*,16> Visited
;
555 for (df_ext_iterator
<MachineBasicBlock
*,
556 SmallPtrSet
<MachineBasicBlock
*,16> >
557 DFI
= df_ext_begin(Entry
, Visited
), E
= df_ext_end(Entry
, Visited
);
559 MachineBasicBlock
*MBB
= *DFI
;
560 if (!EarlyVisited
.count(MBB
))
561 RewriteMBB(*MBB
, VRM
, LIs
, Spills
, RegKills
, KillOps
);
563 // If this MBB is the only predecessor of a successor. Keep the
564 // availability information and visit it next.
566 // Keep visiting single predecessor successor as long as possible.
567 SinglePredSuccs
.clear();
568 findSinglePredSuccessor(MBB
, SinglePredSuccs
);
569 if (SinglePredSuccs
.empty())
572 // FIXME: More than one successors, each of which has MBB has
573 // the only predecessor.
574 MBB
= SinglePredSuccs
[0];
575 if (!Visited
.count(MBB
) && EarlyVisited
.insert(MBB
)) {
576 Spills
.AddAvailableRegsToLiveIn(*MBB
, RegKills
, KillOps
);
577 RewriteMBB(*MBB
, VRM
, LIs
, Spills
, RegKills
, KillOps
);
582 // Clear the availability info.
586 DOUT
<< "**** Post Machine Instrs ****\n";
589 // Mark unused spill slots.
590 MachineFrameInfo
*MFI
= MF
.getFrameInfo();
591 int SS
= VRM
.getLowSpillSlot();
592 if (SS
!= VirtRegMap::NO_STACK_SLOT
)
593 for (int e
= VRM
.getHighSpillSlot(); SS
<= e
; ++SS
)
594 if (!VRM
.isSpillSlotUsed(SS
)) {
595 MFI
->RemoveStackObject(SS
);
603 /// FoldsStackSlotModRef - Return true if the specified MI folds the specified
604 /// stack slot mod/ref. It also checks if it's possible to unfold the
605 /// instruction by having it define a specified physical register instead.
606 static bool FoldsStackSlotModRef(MachineInstr
&MI
, int SS
, unsigned PhysReg
,
607 const TargetInstrInfo
*TII
,
608 const TargetRegisterInfo
*TRI
,
610 if (VRM
.hasEmergencySpills(&MI
) || VRM
.isSpillPt(&MI
))
614 VirtRegMap::MI2VirtMapTy::const_iterator I
, End
;
615 for (tie(I
, End
) = VRM
.getFoldedVirts(&MI
); I
!= End
; ++I
) {
616 unsigned VirtReg
= I
->second
.first
;
617 VirtRegMap::ModRef MR
= I
->second
.second
;
618 if (MR
& VirtRegMap::isModRef
)
619 if (VRM
.getStackSlot(VirtReg
) == SS
) {
620 Found
= TII
->getOpcodeAfterMemoryUnfold(MI
.getOpcode(), true, true) != 0;
627 // Does the instruction uses a register that overlaps the scratch register?
628 for (unsigned i
= 0, e
= MI
.getNumOperands(); i
!= e
; ++i
) {
629 MachineOperand
&MO
= MI
.getOperand(i
);
630 if (!MO
.isReg() || MO
.getReg() == 0)
632 unsigned Reg
= MO
.getReg();
633 if (TargetRegisterInfo::isVirtualRegister(Reg
)) {
634 if (!VRM
.hasPhys(Reg
))
636 Reg
= VRM
.getPhys(Reg
);
638 if (TRI
->regsOverlap(PhysReg
, Reg
))
644 /// FindFreeRegister - Find a free register of a given register class by looking
645 /// at (at most) the last two machine instructions.
646 static unsigned FindFreeRegister(MachineBasicBlock::iterator MII
,
647 MachineBasicBlock
&MBB
,
648 const TargetRegisterClass
*RC
,
649 const TargetRegisterInfo
*TRI
,
650 BitVector
&AllocatableRegs
) {
651 BitVector
Defs(TRI
->getNumRegs());
652 BitVector
Uses(TRI
->getNumRegs());
653 SmallVector
<unsigned, 4> LocalUses
;
654 SmallVector
<unsigned, 4> Kills
;
656 // Take a look at 2 instructions at most.
657 for (unsigned Count
= 0; Count
< 2; ++Count
) {
658 if (MII
== MBB
.begin())
660 MachineInstr
*PrevMI
= prior(MII
);
661 for (unsigned i
= 0, e
= PrevMI
->getNumOperands(); i
!= e
; ++i
) {
662 MachineOperand
&MO
= PrevMI
->getOperand(i
);
663 if (!MO
.isReg() || MO
.getReg() == 0)
665 unsigned Reg
= MO
.getReg();
668 for (const unsigned *AS
= TRI
->getAliasSet(Reg
); *AS
; ++AS
)
671 LocalUses
.push_back(Reg
);
672 if (MO
.isKill() && AllocatableRegs
[Reg
])
673 Kills
.push_back(Reg
);
677 for (unsigned i
= 0, e
= Kills
.size(); i
!= e
; ++i
) {
678 unsigned Kill
= Kills
[i
];
679 if (!Defs
[Kill
] && !Uses
[Kill
] &&
680 TRI
->getPhysicalRegisterRegClass(Kill
) == RC
)
683 for (unsigned i
= 0, e
= LocalUses
.size(); i
!= e
; ++i
) {
684 unsigned Reg
= LocalUses
[i
];
686 for (const unsigned *AS
= TRI
->getAliasSet(Reg
); *AS
; ++AS
)
697 void AssignPhysToVirtReg(MachineInstr
*MI
, unsigned VirtReg
, unsigned PhysReg
) {
698 for (unsigned i
= 0, e
= MI
->getNumOperands(); i
!= e
; ++i
) {
699 MachineOperand
&MO
= MI
->getOperand(i
);
700 if (MO
.isReg() && MO
.getReg() == VirtReg
)
705 /// OptimizeByUnfold2 - Unfold a series of load / store folding instructions if
706 /// a scratch register is available.
707 /// xorq %r12<kill>, %r13
708 /// addq %rax, -184(%rbp)
709 /// addq %r13, -184(%rbp)
711 /// xorq %r12<kill>, %r13
712 /// movq -184(%rbp), %r12
715 /// movq %r12, -184(%rbp)
716 bool LocalSpiller::OptimizeByUnfold2(unsigned VirtReg
, int SS
,
717 MachineBasicBlock
&MBB
,
718 MachineBasicBlock::iterator
&MII
,
719 std::vector
<MachineInstr
*> &MaybeDeadStores
,
720 AvailableSpills
&Spills
,
722 std::vector
<MachineOperand
*> &KillOps
,
724 MachineBasicBlock::iterator NextMII
= next(MII
);
725 if (NextMII
== MBB
.end())
728 if (TII
->getOpcodeAfterMemoryUnfold(MII
->getOpcode(), true, true) == 0)
731 // Now let's see if the last couple of instructions happens to have freed up
733 const TargetRegisterClass
* RC
= RegInfo
->getRegClass(VirtReg
);
734 unsigned PhysReg
= FindFreeRegister(MII
, MBB
, RC
, TRI
, AllocatableRegs
);
738 MachineFunction
&MF
= *MBB
.getParent();
739 TRI
= MF
.getTarget().getRegisterInfo();
740 MachineInstr
&MI
= *MII
;
741 if (!FoldsStackSlotModRef(MI
, SS
, PhysReg
, TII
, TRI
, VRM
))
744 // If the next instruction also folds the same SS modref and can be unfoled,
745 // then it's worthwhile to issue a load from SS into the free register and
746 // then unfold these instructions.
747 if (!FoldsStackSlotModRef(*NextMII
, SS
, PhysReg
, TII
, TRI
, VRM
))
750 // Load from SS to the spare physical register.
751 TII
->loadRegFromStackSlot(MBB
, MII
, PhysReg
, SS
, RC
);
752 // This invalidates Phys.
753 Spills
.ClobberPhysReg(PhysReg
);
754 // Remember it's available.
755 Spills
.addAvailable(SS
, PhysReg
);
756 MaybeDeadStores
[SS
] = NULL
;
758 // Unfold current MI.
759 SmallVector
<MachineInstr
*, 4> NewMIs
;
760 if (!TII
->unfoldMemoryOperand(MF
, &MI
, VirtReg
, false, false, NewMIs
))
761 assert(0 && "Unable unfold the load / store folding instruction!");
762 assert(NewMIs
.size() == 1);
763 AssignPhysToVirtReg(NewMIs
[0], VirtReg
, PhysReg
);
764 VRM
.transferRestorePts(&MI
, NewMIs
[0]);
765 MII
= MBB
.insert(MII
, NewMIs
[0]);
766 InvalidateKills(MI
, RegKills
, KillOps
);
767 VRM
.RemoveMachineInstrFromMaps(&MI
);
771 // Unfold next instructions that fold the same SS.
773 MachineInstr
&NextMI
= *NextMII
;
774 NextMII
= next(NextMII
);
776 if (!TII
->unfoldMemoryOperand(MF
, &NextMI
, VirtReg
, false, false, NewMIs
))
777 assert(0 && "Unable unfold the load / store folding instruction!");
778 assert(NewMIs
.size() == 1);
779 AssignPhysToVirtReg(NewMIs
[0], VirtReg
, PhysReg
);
780 VRM
.transferRestorePts(&NextMI
, NewMIs
[0]);
781 MBB
.insert(NextMII
, NewMIs
[0]);
782 InvalidateKills(NextMI
, RegKills
, KillOps
);
783 VRM
.RemoveMachineInstrFromMaps(&NextMI
);
786 } while (FoldsStackSlotModRef(*NextMII
, SS
, PhysReg
, TII
, TRI
, VRM
));
788 // Store the value back into SS.
789 TII
->storeRegToStackSlot(MBB
, NextMII
, PhysReg
, true, SS
, RC
);
790 MachineInstr
*StoreMI
= prior(NextMII
);
791 VRM
.addSpillSlotUse(SS
, StoreMI
);
792 VRM
.virtFolded(VirtReg
, StoreMI
, VirtRegMap::isMod
);
797 /// OptimizeByUnfold - Turn a store folding instruction into a load folding
798 /// instruction. e.g.
800 /// movl %eax, -32(%ebp)
801 /// movl -36(%ebp), %eax
802 /// orl %eax, -32(%ebp)
805 /// orl -36(%ebp), %eax
806 /// mov %eax, -32(%ebp)
807 /// This enables unfolding optimization for a subsequent instruction which will
808 /// also eliminate the newly introduced store instruction.
809 bool LocalSpiller::OptimizeByUnfold(MachineBasicBlock
&MBB
,
810 MachineBasicBlock::iterator
&MII
,
811 std::vector
<MachineInstr
*> &MaybeDeadStores
,
812 AvailableSpills
&Spills
,
814 std::vector
<MachineOperand
*> &KillOps
,
816 MachineFunction
&MF
= *MBB
.getParent();
817 MachineInstr
&MI
= *MII
;
818 unsigned UnfoldedOpc
= 0;
819 unsigned UnfoldPR
= 0;
820 unsigned UnfoldVR
= 0;
821 int FoldedSS
= VirtRegMap::NO_STACK_SLOT
;
822 VirtRegMap::MI2VirtMapTy::const_iterator I
, End
;
823 for (tie(I
, End
) = VRM
.getFoldedVirts(&MI
); I
!= End
; ) {
824 // Only transform a MI that folds a single register.
827 UnfoldVR
= I
->second
.first
;
828 VirtRegMap::ModRef MR
= I
->second
.second
;
829 // MI2VirtMap be can updated which invalidate the iterator.
830 // Increment the iterator first.
832 if (VRM
.isAssignedReg(UnfoldVR
))
834 // If this reference is not a use, any previous store is now dead.
835 // Otherwise, the store to this stack slot is not dead anymore.
836 FoldedSS
= VRM
.getStackSlot(UnfoldVR
);
837 MachineInstr
* DeadStore
= MaybeDeadStores
[FoldedSS
];
838 if (DeadStore
&& (MR
& VirtRegMap::isModRef
)) {
839 unsigned PhysReg
= Spills
.getSpillSlotOrReMatPhysReg(FoldedSS
);
840 if (!PhysReg
|| !DeadStore
->readsRegister(PhysReg
))
843 UnfoldedOpc
= TII
->getOpcodeAfterMemoryUnfold(MI
.getOpcode(),
852 // Look for other unfolding opportunities.
853 return OptimizeByUnfold2(UnfoldVR
, FoldedSS
, MBB
, MII
,
854 MaybeDeadStores
, Spills
, RegKills
, KillOps
, VRM
);
857 for (unsigned i
= 0, e
= MI
.getNumOperands(); i
!= e
; ++i
) {
858 MachineOperand
&MO
= MI
.getOperand(i
);
859 if (!MO
.isReg() || MO
.getReg() == 0 || !MO
.isUse())
861 unsigned VirtReg
= MO
.getReg();
862 if (TargetRegisterInfo::isPhysicalRegister(VirtReg
) || MO
.getSubReg())
864 if (VRM
.isAssignedReg(VirtReg
)) {
865 unsigned PhysReg
= VRM
.getPhys(VirtReg
);
866 if (PhysReg
&& TRI
->regsOverlap(PhysReg
, UnfoldPR
))
868 } else if (VRM
.isReMaterialized(VirtReg
))
870 int SS
= VRM
.getStackSlot(VirtReg
);
871 unsigned PhysReg
= Spills
.getSpillSlotOrReMatPhysReg(SS
);
873 if (TRI
->regsOverlap(PhysReg
, UnfoldPR
))
877 if (VRM
.hasPhys(VirtReg
)) {
878 PhysReg
= VRM
.getPhys(VirtReg
);
879 if (!TRI
->regsOverlap(PhysReg
, UnfoldPR
))
883 // Ok, we'll need to reload the value into a register which makes
884 // it impossible to perform the store unfolding optimization later.
885 // Let's see if it is possible to fold the load if the store is
886 // unfolded. This allows us to perform the store unfolding
888 SmallVector
<MachineInstr
*, 4> NewMIs
;
889 if (TII
->unfoldMemoryOperand(MF
, &MI
, UnfoldVR
, false, false, NewMIs
)) {
890 assert(NewMIs
.size() == 1);
891 MachineInstr
*NewMI
= NewMIs
.back();
893 int Idx
= NewMI
->findRegisterUseOperandIdx(VirtReg
, false);
895 SmallVector
<unsigned, 1> Ops
;
897 MachineInstr
*FoldedMI
= TII
->foldMemoryOperand(MF
, NewMI
, Ops
, SS
);
899 VRM
.addSpillSlotUse(SS
, FoldedMI
);
900 if (!VRM
.hasPhys(UnfoldVR
))
901 VRM
.assignVirt2Phys(UnfoldVR
, UnfoldPR
);
902 VRM
.virtFolded(VirtReg
, FoldedMI
, VirtRegMap::isRef
);
903 MII
= MBB
.insert(MII
, FoldedMI
);
904 InvalidateKills(MI
, RegKills
, KillOps
);
905 VRM
.RemoveMachineInstrFromMaps(&MI
);
907 MF
.DeleteMachineInstr(NewMI
);
910 MF
.DeleteMachineInstr(NewMI
);
917 /// CommuteToFoldReload -
920 /// r1 = op r1, r2<kill>
923 /// If op is commutable and r2 is killed, then we can xform these to
926 bool LocalSpiller::CommuteToFoldReload(MachineBasicBlock
&MBB
,
927 MachineBasicBlock::iterator
&MII
,
928 unsigned VirtReg
, unsigned SrcReg
, int SS
,
929 AvailableSpills
&Spills
,
931 std::vector
<MachineOperand
*> &KillOps
,
932 const TargetRegisterInfo
*TRI
,
934 if (MII
== MBB
.begin() || !MII
->killsRegister(SrcReg
))
937 MachineFunction
&MF
= *MBB
.getParent();
938 MachineInstr
&MI
= *MII
;
939 MachineBasicBlock::iterator DefMII
= prior(MII
);
940 MachineInstr
*DefMI
= DefMII
;
941 const TargetInstrDesc
&TID
= DefMI
->getDesc();
943 if (DefMII
!= MBB
.begin() &&
944 TID
.isCommutable() &&
945 TII
->CommuteChangesDestination(DefMI
, NewDstIdx
)) {
946 MachineOperand
&NewDstMO
= DefMI
->getOperand(NewDstIdx
);
947 unsigned NewReg
= NewDstMO
.getReg();
948 if (!NewDstMO
.isKill() || TRI
->regsOverlap(NewReg
, SrcReg
))
950 MachineInstr
*ReloadMI
= prior(DefMII
);
952 unsigned DestReg
= TII
->isLoadFromStackSlot(ReloadMI
, FrameIdx
);
953 if (DestReg
!= SrcReg
|| FrameIdx
!= SS
)
955 int UseIdx
= DefMI
->findRegisterUseOperandIdx(DestReg
, false);
959 if (!MI
.isRegTiedToDefOperand(UseIdx
, &DefIdx
))
961 assert(DefMI
->getOperand(DefIdx
).isReg() &&
962 DefMI
->getOperand(DefIdx
).getReg() == SrcReg
);
964 // Now commute def instruction.
965 MachineInstr
*CommutedMI
= TII
->commuteInstruction(DefMI
, true);
968 SmallVector
<unsigned, 1> Ops
;
969 Ops
.push_back(NewDstIdx
);
970 MachineInstr
*FoldedMI
= TII
->foldMemoryOperand(MF
, CommutedMI
, Ops
, SS
);
971 // Not needed since foldMemoryOperand returns new MI.
972 MF
.DeleteMachineInstr(CommutedMI
);
976 VRM
.addSpillSlotUse(SS
, FoldedMI
);
977 VRM
.virtFolded(VirtReg
, FoldedMI
, VirtRegMap::isRef
);
978 // Insert new def MI and spill MI.
979 const TargetRegisterClass
* RC
= RegInfo
->getRegClass(VirtReg
);
980 TII
->storeRegToStackSlot(MBB
, &MI
, NewReg
, true, SS
, RC
);
982 MachineInstr
*StoreMI
= MII
;
983 VRM
.addSpillSlotUse(SS
, StoreMI
);
984 VRM
.virtFolded(VirtReg
, StoreMI
, VirtRegMap::isMod
);
985 MII
= MBB
.insert(MII
, FoldedMI
); // Update MII to backtrack.
987 // Delete all 3 old instructions.
988 InvalidateKills(*ReloadMI
, RegKills
, KillOps
);
989 VRM
.RemoveMachineInstrFromMaps(ReloadMI
);
991 InvalidateKills(*DefMI
, RegKills
, KillOps
);
992 VRM
.RemoveMachineInstrFromMaps(DefMI
);
994 InvalidateKills(MI
, RegKills
, KillOps
);
995 VRM
.RemoveMachineInstrFromMaps(&MI
);
998 // If NewReg was previously holding value of some SS, it's now clobbered.
999 // This has to be done now because it's a physical register. When this
1000 // instruction is re-visited, it's ignored.
1001 Spills
.ClobberPhysReg(NewReg
);
1010 /// SpillRegToStackSlot - Spill a register to a specified stack slot. Check if
1011 /// the last store to the same slot is now dead. If so, remove the last store.
1012 void LocalSpiller::SpillRegToStackSlot(MachineBasicBlock
&MBB
,
1013 MachineBasicBlock::iterator
&MII
,
1014 int Idx
, unsigned PhysReg
, int StackSlot
,
1015 const TargetRegisterClass
*RC
,
1016 bool isAvailable
, MachineInstr
*&LastStore
,
1017 AvailableSpills
&Spills
,
1018 SmallSet
<MachineInstr
*, 4> &ReMatDefs
,
1019 BitVector
&RegKills
,
1020 std::vector
<MachineOperand
*> &KillOps
,
1022 TII
->storeRegToStackSlot(MBB
, next(MII
), PhysReg
, true, StackSlot
, RC
);
1023 MachineInstr
*StoreMI
= next(MII
);
1024 VRM
.addSpillSlotUse(StackSlot
, StoreMI
);
1025 DOUT
<< "Store:\t" << *StoreMI
;
1027 // If there is a dead store to this stack slot, nuke it now.
1029 DOUT
<< "Removed dead store:\t" << *LastStore
;
1031 SmallVector
<unsigned, 2> KillRegs
;
1032 InvalidateKills(*LastStore
, RegKills
, KillOps
, &KillRegs
);
1033 MachineBasicBlock::iterator PrevMII
= LastStore
;
1034 bool CheckDef
= PrevMII
!= MBB
.begin();
1037 VRM
.RemoveMachineInstrFromMaps(LastStore
);
1038 MBB
.erase(LastStore
);
1040 // Look at defs of killed registers on the store. Mark the defs
1041 // as dead since the store has been deleted and they aren't
1043 for (unsigned j
= 0, ee
= KillRegs
.size(); j
!= ee
; ++j
) {
1044 bool HasOtherDef
= false;
1045 if (InvalidateRegDef(PrevMII
, *MII
, KillRegs
[j
], HasOtherDef
)) {
1046 MachineInstr
*DeadDef
= PrevMII
;
1047 if (ReMatDefs
.count(DeadDef
) && !HasOtherDef
) {
1048 // FIXME: This assumes a remat def does not have side
1050 VRM
.RemoveMachineInstrFromMaps(DeadDef
);
1059 LastStore
= next(MII
);
1061 // If the stack slot value was previously available in some other
1062 // register, change it now. Otherwise, make the register available,
1064 Spills
.ModifyStackSlotOrReMat(StackSlot
);
1065 Spills
.ClobberPhysReg(PhysReg
);
1066 Spills
.addAvailable(StackSlot
, PhysReg
, isAvailable
);
1070 /// TransferDeadness - A identity copy definition is dead and it's being
1071 /// removed. Find the last def or use and mark it as dead / kill.
1072 void LocalSpiller::TransferDeadness(MachineBasicBlock
*MBB
, unsigned CurDist
,
1073 unsigned Reg
, BitVector
&RegKills
,
1074 std::vector
<MachineOperand
*> &KillOps
) {
1075 int LastUDDist
= -1;
1076 MachineInstr
*LastUDMI
= NULL
;
1077 for (MachineRegisterInfo::reg_iterator RI
= RegInfo
->reg_begin(Reg
),
1078 RE
= RegInfo
->reg_end(); RI
!= RE
; ++RI
) {
1079 MachineInstr
*UDMI
= &*RI
;
1080 if (UDMI
->getParent() != MBB
)
1082 DenseMap
<MachineInstr
*, unsigned>::iterator DI
= DistanceMap
.find(UDMI
);
1083 if (DI
== DistanceMap
.end() || DI
->second
> CurDist
)
1085 if ((int)DI
->second
< LastUDDist
)
1087 LastUDDist
= DI
->second
;
1092 MachineOperand
*LastUD
= NULL
;
1093 for (unsigned i
= 0, e
= LastUDMI
->getNumOperands(); i
!= e
; ++i
) {
1094 MachineOperand
&MO
= LastUDMI
->getOperand(i
);
1095 if (!MO
.isReg() || MO
.getReg() != Reg
)
1097 if (!LastUD
|| (LastUD
->isUse() && MO
.isDef()))
1099 if (LastUDMI
->isRegTiedToDefOperand(i
))
1102 if (LastUD
->isDef())
1103 LastUD
->setIsDead();
1105 LastUD
->setIsKill();
1107 KillOps
[Reg
] = LastUD
;
1112 /// rewriteMBB - Keep track of which spills are available even after the
1113 /// register allocator is done with them. If possible, avid reloading vregs.
1114 void LocalSpiller::RewriteMBB(MachineBasicBlock
&MBB
, VirtRegMap
&VRM
,
1116 AvailableSpills
&Spills
, BitVector
&RegKills
,
1117 std::vector
<MachineOperand
*> &KillOps
) {
1118 DOUT
<< "\n**** Local spiller rewriting MBB '"
1119 << MBB
.getBasicBlock()->getName() << "':\n";
1121 MachineFunction
&MF
= *MBB
.getParent();
1123 // MaybeDeadStores - When we need to write a value back into a stack slot,
1124 // keep track of the inserted store. If the stack slot value is never read
1125 // (because the value was used from some available register, for example), and
1126 // subsequently stored to, the original store is dead. This map keeps track
1127 // of inserted stores that are not used. If we see a subsequent store to the
1128 // same stack slot, the original store is deleted.
1129 std::vector
<MachineInstr
*> MaybeDeadStores
;
1130 MaybeDeadStores
.resize(MF
.getFrameInfo()->getObjectIndexEnd(), NULL
);
1132 // ReMatDefs - These are rematerializable def MIs which are not deleted.
1133 SmallSet
<MachineInstr
*, 4> ReMatDefs
;
1136 SmallSet
<unsigned, 2> KilledMIRegs
;
1139 KillOps
.resize(TRI
->getNumRegs(), NULL
);
1142 DistanceMap
.clear();
1143 for (MachineBasicBlock::iterator MII
= MBB
.begin(), E
= MBB
.end();
1145 MachineBasicBlock::iterator NextMII
= next(MII
);
1147 VirtRegMap::MI2VirtMapTy::const_iterator I
, End
;
1148 bool Erased
= false;
1149 bool BackTracked
= false;
1150 if (OptimizeByUnfold(MBB
, MII
,
1151 MaybeDeadStores
, Spills
, RegKills
, KillOps
, VRM
))
1152 NextMII
= next(MII
);
1154 MachineInstr
&MI
= *MII
;
1156 if (VRM
.hasEmergencySpills(&MI
)) {
1157 // Spill physical register(s) in the rare case the allocator has run out
1158 // of registers to allocate.
1159 SmallSet
<int, 4> UsedSS
;
1160 std::vector
<unsigned> &EmSpills
= VRM
.getEmergencySpills(&MI
);
1161 for (unsigned i
= 0, e
= EmSpills
.size(); i
!= e
; ++i
) {
1162 unsigned PhysReg
= EmSpills
[i
];
1163 const TargetRegisterClass
*RC
=
1164 TRI
->getPhysicalRegisterRegClass(PhysReg
);
1165 assert(RC
&& "Unable to determine register class!");
1166 int SS
= VRM
.getEmergencySpillSlot(RC
);
1167 if (UsedSS
.count(SS
))
1168 assert(0 && "Need to spill more than one physical registers!");
1170 TII
->storeRegToStackSlot(MBB
, MII
, PhysReg
, true, SS
, RC
);
1171 MachineInstr
*StoreMI
= prior(MII
);
1172 VRM
.addSpillSlotUse(SS
, StoreMI
);
1173 TII
->loadRegFromStackSlot(MBB
, next(MII
), PhysReg
, SS
, RC
);
1174 MachineInstr
*LoadMI
= next(MII
);
1175 VRM
.addSpillSlotUse(SS
, LoadMI
);
1178 NextMII
= next(MII
);
1181 // Insert restores here if asked to.
1182 if (VRM
.isRestorePt(&MI
)) {
1183 std::vector
<unsigned> &RestoreRegs
= VRM
.getRestorePtRestores(&MI
);
1184 for (unsigned i
= 0, e
= RestoreRegs
.size(); i
!= e
; ++i
) {
1185 unsigned VirtReg
= RestoreRegs
[e
-i
-1]; // Reverse order.
1186 if (!VRM
.getPreSplitReg(VirtReg
))
1187 continue; // Split interval spilled again.
1188 unsigned Phys
= VRM
.getPhys(VirtReg
);
1189 RegInfo
->setPhysRegUsed(Phys
);
1191 // Check if the value being restored if available. If so, it must be
1192 // from a predecessor BB that fallthrough into this BB. We do not
1198 // ... # r1 not clobbered
1201 bool DoReMat
= VRM
.isReMaterialized(VirtReg
);
1202 int SSorRMId
= DoReMat
1203 ? VRM
.getReMatId(VirtReg
) : VRM
.getStackSlot(VirtReg
);
1204 const TargetRegisterClass
* RC
= RegInfo
->getRegClass(VirtReg
);
1205 unsigned InReg
= Spills
.getSpillSlotOrReMatPhysReg(SSorRMId
);
1206 if (InReg
== Phys
) {
1207 // If the value is already available in the expected register, save
1208 // a reload / remat.
1210 DOUT
<< "Reusing RM#" << SSorRMId
-VirtRegMap::MAX_STACK_SLOT
-1;
1212 DOUT
<< "Reusing SS#" << SSorRMId
;
1213 DOUT
<< " from physreg "
1214 << TRI
->getName(InReg
) << " for vreg"
1215 << VirtReg
<<" instead of reloading into physreg "
1216 << TRI
->getName(Phys
) << "\n";
1219 } else if (InReg
&& InReg
!= Phys
) {
1221 DOUT
<< "Reusing RM#" << SSorRMId
-VirtRegMap::MAX_STACK_SLOT
-1;
1223 DOUT
<< "Reusing SS#" << SSorRMId
;
1224 DOUT
<< " from physreg "
1225 << TRI
->getName(InReg
) << " for vreg"
1226 << VirtReg
<<" by copying it into physreg "
1227 << TRI
->getName(Phys
) << "\n";
1229 // If the reloaded / remat value is available in another register,
1230 // copy it to the desired register.
1231 TII
->copyRegToReg(MBB
, &MI
, Phys
, InReg
, RC
, RC
);
1233 // This invalidates Phys.
1234 Spills
.ClobberPhysReg(Phys
);
1235 // Remember it's available.
1236 Spills
.addAvailable(SSorRMId
, Phys
);
1239 MachineInstr
*CopyMI
= prior(MII
);
1240 MachineOperand
*KillOpnd
= CopyMI
->findRegisterUseOperand(InReg
);
1241 KillOpnd
->setIsKill();
1242 UpdateKills(*CopyMI
, RegKills
, KillOps
, TRI
);
1244 DOUT
<< '\t' << *CopyMI
;
1249 if (VRM
.isReMaterialized(VirtReg
)) {
1250 ReMaterialize(MBB
, MII
, Phys
, VirtReg
, TII
, TRI
, VRM
);
1252 const TargetRegisterClass
* RC
= RegInfo
->getRegClass(VirtReg
);
1253 TII
->loadRegFromStackSlot(MBB
, &MI
, Phys
, SSorRMId
, RC
);
1254 MachineInstr
*LoadMI
= prior(MII
);
1255 VRM
.addSpillSlotUse(SSorRMId
, LoadMI
);
1259 // This invalidates Phys.
1260 Spills
.ClobberPhysReg(Phys
);
1261 // Remember it's available.
1262 Spills
.addAvailable(SSorRMId
, Phys
);
1264 UpdateKills(*prior(MII
), RegKills
, KillOps
, TRI
);
1265 DOUT
<< '\t' << *prior(MII
);
1269 // Insert spills here if asked to.
1270 if (VRM
.isSpillPt(&MI
)) {
1271 std::vector
<std::pair
<unsigned,bool> > &SpillRegs
=
1272 VRM
.getSpillPtSpills(&MI
);
1273 for (unsigned i
= 0, e
= SpillRegs
.size(); i
!= e
; ++i
) {
1274 unsigned VirtReg
= SpillRegs
[i
].first
;
1275 bool isKill
= SpillRegs
[i
].second
;
1276 if (!VRM
.getPreSplitReg(VirtReg
))
1277 continue; // Split interval spilled again.
1278 const TargetRegisterClass
*RC
= RegInfo
->getRegClass(VirtReg
);
1279 unsigned Phys
= VRM
.getPhys(VirtReg
);
1280 int StackSlot
= VRM
.getStackSlot(VirtReg
);
1281 TII
->storeRegToStackSlot(MBB
, next(MII
), Phys
, isKill
, StackSlot
, RC
);
1282 MachineInstr
*StoreMI
= next(MII
);
1283 VRM
.addSpillSlotUse(StackSlot
, StoreMI
);
1284 DOUT
<< "Store:\t" << *StoreMI
;
1285 VRM
.virtFolded(VirtReg
, StoreMI
, VirtRegMap::isMod
);
1287 NextMII
= next(MII
);
1290 /// ReusedOperands - Keep track of operand reuse in case we need to undo
1292 ReuseInfo
ReusedOperands(MI
, TRI
);
1293 SmallVector
<unsigned, 4> VirtUseOps
;
1294 for (unsigned i
= 0, e
= MI
.getNumOperands(); i
!= e
; ++i
) {
1295 MachineOperand
&MO
= MI
.getOperand(i
);
1296 if (!MO
.isReg() || MO
.getReg() == 0)
1297 continue; // Ignore non-register operands.
1299 unsigned VirtReg
= MO
.getReg();
1300 if (TargetRegisterInfo::isPhysicalRegister(VirtReg
)) {
1301 // Ignore physregs for spilling, but remember that it is used by this
1303 RegInfo
->setPhysRegUsed(VirtReg
);
1307 // We want to process implicit virtual register uses first.
1308 if (MO
.isImplicit())
1309 // If the virtual register is implicitly defined, emit a implicit_def
1310 // before so scavenger knows it's "defined".
1311 VirtUseOps
.insert(VirtUseOps
.begin(), i
);
1313 VirtUseOps
.push_back(i
);
1316 // Process all of the spilled uses and all non spilled reg references.
1317 SmallVector
<int, 2> PotentialDeadStoreSlots
;
1318 KilledMIRegs
.clear();
1319 for (unsigned j
= 0, e
= VirtUseOps
.size(); j
!= e
; ++j
) {
1320 unsigned i
= VirtUseOps
[j
];
1321 MachineOperand
&MO
= MI
.getOperand(i
);
1322 unsigned VirtReg
= MO
.getReg();
1323 assert(TargetRegisterInfo::isVirtualRegister(VirtReg
) &&
1324 "Not a virtual register?");
1326 unsigned SubIdx
= MO
.getSubReg();
1327 if (VRM
.isAssignedReg(VirtReg
)) {
1328 // This virtual register was assigned a physreg!
1329 unsigned Phys
= VRM
.getPhys(VirtReg
);
1330 RegInfo
->setPhysRegUsed(Phys
);
1332 ReusedOperands
.markClobbered(Phys
);
1333 unsigned RReg
= SubIdx
? TRI
->getSubReg(Phys
, SubIdx
) : Phys
;
1334 MI
.getOperand(i
).setReg(RReg
);
1335 MI
.getOperand(i
).setSubReg(0);
1336 if (VRM
.isImplicitlyDefined(VirtReg
))
1337 BuildMI(MBB
, &MI
, MI
.getDebugLoc(),
1338 TII
->get(TargetInstrInfo::IMPLICIT_DEF
), RReg
);
1342 // This virtual register is now known to be a spilled value.
1344 continue; // Handle defs in the loop below (handle use&def here though)
1346 bool AvoidReload
= false;
1347 if (LIs
->hasInterval(VirtReg
)) {
1348 LiveInterval
&LI
= LIs
->getInterval(VirtReg
);
1349 if (!LI
.liveAt(LIs
->getUseIndex(LI
.beginNumber())))
1350 // Must be defined by an implicit def. It should not be spilled. Note,
1351 // this is for correctness reason. e.g.
1352 // 8 %reg1024<def> = IMPLICIT_DEF
1353 // 12 %reg1024<def> = INSERT_SUBREG %reg1024<kill>, %reg1025, 2
1354 // The live range [12, 14) are not part of the r1024 live interval since
1355 // it's defined by an implicit def. It will not conflicts with live
1356 // interval of r1025. Now suppose both registers are spilled, you can
1357 // easily see a situation where both registers are reloaded before
1358 // the INSERT_SUBREG and both target registers that would overlap.
1362 bool DoReMat
= VRM
.isReMaterialized(VirtReg
);
1363 int SSorRMId
= DoReMat
1364 ? VRM
.getReMatId(VirtReg
) : VRM
.getStackSlot(VirtReg
);
1365 int ReuseSlot
= SSorRMId
;
1367 // Check to see if this stack slot is available.
1368 unsigned PhysReg
= Spills
.getSpillSlotOrReMatPhysReg(SSorRMId
);
1370 // If this is a sub-register use, make sure the reuse register is in the
1371 // right register class. For example, for x86 not all of the 32-bit
1372 // registers have accessible sub-registers.
1373 // Similarly so for EXTRACT_SUBREG. Consider this:
1375 // MOV32_mr fi#1, EDI
1377 // = EXTRACT_SUBREG fi#1
1378 // fi#1 is available in EDI, but it cannot be reused because it's not in
1379 // the right register file.
1380 if (PhysReg
&& !AvoidReload
&&
1381 (SubIdx
|| MI
.getOpcode() == TargetInstrInfo::EXTRACT_SUBREG
)) {
1382 const TargetRegisterClass
* RC
= RegInfo
->getRegClass(VirtReg
);
1383 if (!RC
->contains(PhysReg
))
1387 if (PhysReg
&& !AvoidReload
) {
1388 // This spilled operand might be part of a two-address operand. If this
1389 // is the case, then changing it will necessarily require changing the
1390 // def part of the instruction as well. However, in some cases, we
1391 // aren't allowed to modify the reused register. If none of these cases
1393 bool CanReuse
= true;
1394 bool isTied
= MI
.isRegTiedToDefOperand(i
);
1396 // Okay, we have a two address operand. We can reuse this physreg as
1397 // long as we are allowed to clobber the value and there isn't an
1398 // earlier def that has already clobbered the physreg.
1399 CanReuse
= !ReusedOperands
.isClobbered(PhysReg
) &&
1400 Spills
.canClobberPhysReg(PhysReg
);
1404 // If this stack slot value is already available, reuse it!
1405 if (ReuseSlot
> VirtRegMap::MAX_STACK_SLOT
)
1406 DOUT
<< "Reusing RM#" << ReuseSlot
-VirtRegMap::MAX_STACK_SLOT
-1;
1408 DOUT
<< "Reusing SS#" << ReuseSlot
;
1409 DOUT
<< " from physreg "
1410 << TRI
->getName(PhysReg
) << " for vreg"
1411 << VirtReg
<<" instead of reloading into physreg "
1412 << TRI
->getName(VRM
.getPhys(VirtReg
)) << "\n";
1413 unsigned RReg
= SubIdx
? TRI
->getSubReg(PhysReg
, SubIdx
) : PhysReg
;
1414 MI
.getOperand(i
).setReg(RReg
);
1415 MI
.getOperand(i
).setSubReg(0);
1417 // The only technical detail we have is that we don't know that
1418 // PhysReg won't be clobbered by a reloaded stack slot that occurs
1419 // later in the instruction. In particular, consider 'op V1, V2'.
1420 // If V1 is available in physreg R0, we would choose to reuse it
1421 // here, instead of reloading it into the register the allocator
1422 // indicated (say R1). However, V2 might have to be reloaded
1423 // later, and it might indicate that it needs to live in R0. When
1424 // this occurs, we need to have information available that
1425 // indicates it is safe to use R1 for the reload instead of R0.
1427 // To further complicate matters, we might conflict with an alias,
1428 // or R0 and R1 might not be compatible with each other. In this
1429 // case, we actually insert a reload for V1 in R1, ensuring that
1430 // we can get at R0 or its alias.
1431 ReusedOperands
.addReuse(i
, ReuseSlot
, PhysReg
,
1432 VRM
.getPhys(VirtReg
), VirtReg
);
1434 // Only mark it clobbered if this is a use&def operand.
1435 ReusedOperands
.markClobbered(PhysReg
);
1438 if (MI
.getOperand(i
).isKill() &&
1439 ReuseSlot
<= VirtRegMap::MAX_STACK_SLOT
) {
1441 // The store of this spilled value is potentially dead, but we
1442 // won't know for certain until we've confirmed that the re-use
1443 // above is valid, which means waiting until the other operands
1444 // are processed. For now we just track the spill slot, we'll
1445 // remove it after the other operands are processed if valid.
1447 PotentialDeadStoreSlots
.push_back(ReuseSlot
);
1450 // Mark is isKill if it's there no other uses of the same virtual
1451 // register and it's not a two-address operand. IsKill will be
1452 // unset if reg is reused.
1453 if (!isTied
&& KilledMIRegs
.count(VirtReg
) == 0) {
1454 MI
.getOperand(i
).setIsKill();
1455 KilledMIRegs
.insert(VirtReg
);
1461 // Otherwise we have a situation where we have a two-address instruction
1462 // whose mod/ref operand needs to be reloaded. This reload is already
1463 // available in some register "PhysReg", but if we used PhysReg as the
1464 // operand to our 2-addr instruction, the instruction would modify
1465 // PhysReg. This isn't cool if something later uses PhysReg and expects
1466 // to get its initial value.
1468 // To avoid this problem, and to avoid doing a load right after a store,
1469 // we emit a copy from PhysReg into the designated register for this
1471 unsigned DesignatedReg
= VRM
.getPhys(VirtReg
);
1472 assert(DesignatedReg
&& "Must map virtreg to physreg!");
1474 // Note that, if we reused a register for a previous operand, the
1475 // register we want to reload into might not actually be
1476 // available. If this occurs, use the register indicated by the
1478 if (ReusedOperands
.hasReuses())
1479 DesignatedReg
= ReusedOperands
.GetRegForReload(DesignatedReg
, &MI
,
1480 Spills
, MaybeDeadStores
, RegKills
, KillOps
, VRM
);
1482 // If the mapped designated register is actually the physreg we have
1483 // incoming, we don't need to inserted a dead copy.
1484 if (DesignatedReg
== PhysReg
) {
1485 // If this stack slot value is already available, reuse it!
1486 if (ReuseSlot
> VirtRegMap::MAX_STACK_SLOT
)
1487 DOUT
<< "Reusing RM#" << ReuseSlot
-VirtRegMap::MAX_STACK_SLOT
-1;
1489 DOUT
<< "Reusing SS#" << ReuseSlot
;
1490 DOUT
<< " from physreg " << TRI
->getName(PhysReg
)
1491 << " for vreg" << VirtReg
1492 << " instead of reloading into same physreg.\n";
1493 unsigned RReg
= SubIdx
? TRI
->getSubReg(PhysReg
, SubIdx
) : PhysReg
;
1494 MI
.getOperand(i
).setReg(RReg
);
1495 MI
.getOperand(i
).setSubReg(0);
1496 ReusedOperands
.markClobbered(RReg
);
1501 const TargetRegisterClass
* RC
= RegInfo
->getRegClass(VirtReg
);
1502 RegInfo
->setPhysRegUsed(DesignatedReg
);
1503 ReusedOperands
.markClobbered(DesignatedReg
);
1504 TII
->copyRegToReg(MBB
, &MI
, DesignatedReg
, PhysReg
, RC
, RC
);
1506 MachineInstr
*CopyMI
= prior(MII
);
1507 UpdateKills(*CopyMI
, RegKills
, KillOps
, TRI
);
1509 // This invalidates DesignatedReg.
1510 Spills
.ClobberPhysReg(DesignatedReg
);
1512 Spills
.addAvailable(ReuseSlot
, DesignatedReg
);
1514 SubIdx
? TRI
->getSubReg(DesignatedReg
, SubIdx
) : DesignatedReg
;
1515 MI
.getOperand(i
).setReg(RReg
);
1516 MI
.getOperand(i
).setSubReg(0);
1517 DOUT
<< '\t' << *prior(MII
);
1522 // Otherwise, reload it and remember that we have it.
1523 PhysReg
= VRM
.getPhys(VirtReg
);
1524 assert(PhysReg
&& "Must map virtreg to physreg!");
1526 // Note that, if we reused a register for a previous operand, the
1527 // register we want to reload into might not actually be
1528 // available. If this occurs, use the register indicated by the
1530 if (ReusedOperands
.hasReuses())
1531 PhysReg
= ReusedOperands
.GetRegForReload(PhysReg
, &MI
,
1532 Spills
, MaybeDeadStores
, RegKills
, KillOps
, VRM
);
1534 RegInfo
->setPhysRegUsed(PhysReg
);
1535 ReusedOperands
.markClobbered(PhysReg
);
1540 ReMaterialize(MBB
, MII
, PhysReg
, VirtReg
, TII
, TRI
, VRM
);
1542 const TargetRegisterClass
* RC
= RegInfo
->getRegClass(VirtReg
);
1543 TII
->loadRegFromStackSlot(MBB
, &MI
, PhysReg
, SSorRMId
, RC
);
1544 MachineInstr
*LoadMI
= prior(MII
);
1545 VRM
.addSpillSlotUse(SSorRMId
, LoadMI
);
1548 // This invalidates PhysReg.
1549 Spills
.ClobberPhysReg(PhysReg
);
1551 // Any stores to this stack slot are not dead anymore.
1553 MaybeDeadStores
[SSorRMId
] = NULL
;
1554 Spills
.addAvailable(SSorRMId
, PhysReg
);
1555 // Assumes this is the last use. IsKill will be unset if reg is reused
1556 // unless it's a two-address operand.
1557 if (!MI
.isRegTiedToDefOperand(i
) &&
1558 KilledMIRegs
.count(VirtReg
) == 0) {
1559 MI
.getOperand(i
).setIsKill();
1560 KilledMIRegs
.insert(VirtReg
);
1563 UpdateKills(*prior(MII
), RegKills
, KillOps
, TRI
);
1564 DOUT
<< '\t' << *prior(MII
);
1566 unsigned RReg
= SubIdx
? TRI
->getSubReg(PhysReg
, SubIdx
) : PhysReg
;
1567 MI
.getOperand(i
).setReg(RReg
);
1568 MI
.getOperand(i
).setSubReg(0);
1571 // Ok - now we can remove stores that have been confirmed dead.
1572 for (unsigned j
= 0, e
= PotentialDeadStoreSlots
.size(); j
!= e
; ++j
) {
1573 // This was the last use and the spilled value is still available
1574 // for reuse. That means the spill was unnecessary!
1575 int PDSSlot
= PotentialDeadStoreSlots
[j
];
1576 MachineInstr
* DeadStore
= MaybeDeadStores
[PDSSlot
];
1578 DOUT
<< "Removed dead store:\t" << *DeadStore
;
1579 InvalidateKills(*DeadStore
, RegKills
, KillOps
);
1580 VRM
.RemoveMachineInstrFromMaps(DeadStore
);
1581 MBB
.erase(DeadStore
);
1582 MaybeDeadStores
[PDSSlot
] = NULL
;
1591 // If we have folded references to memory operands, make sure we clear all
1592 // physical registers that may contain the value of the spilled virtual
1594 SmallSet
<int, 2> FoldedSS
;
1595 for (tie(I
, End
) = VRM
.getFoldedVirts(&MI
); I
!= End
; ) {
1596 unsigned VirtReg
= I
->second
.first
;
1597 VirtRegMap::ModRef MR
= I
->second
.second
;
1598 DOUT
<< "Folded vreg: " << VirtReg
<< " MR: " << MR
;
1600 // MI2VirtMap be can updated which invalidate the iterator.
1601 // Increment the iterator first.
1603 int SS
= VRM
.getStackSlot(VirtReg
);
1604 if (SS
== VirtRegMap::NO_STACK_SLOT
)
1606 FoldedSS
.insert(SS
);
1607 DOUT
<< " - StackSlot: " << SS
<< "\n";
1609 // If this folded instruction is just a use, check to see if it's a
1610 // straight load from the virt reg slot.
1611 if ((MR
& VirtRegMap::isRef
) && !(MR
& VirtRegMap::isMod
)) {
1613 unsigned DestReg
= TII
->isLoadFromStackSlot(&MI
, FrameIdx
);
1614 if (DestReg
&& FrameIdx
== SS
) {
1615 // If this spill slot is available, turn it into a copy (or nothing)
1616 // instead of leaving it as a load!
1617 if (unsigned InReg
= Spills
.getSpillSlotOrReMatPhysReg(SS
)) {
1618 DOUT
<< "Promoted Load To Copy: " << MI
;
1619 if (DestReg
!= InReg
) {
1620 const TargetRegisterClass
*RC
= RegInfo
->getRegClass(VirtReg
);
1621 TII
->copyRegToReg(MBB
, &MI
, DestReg
, InReg
, RC
, RC
);
1622 MachineOperand
*DefMO
= MI
.findRegisterDefOperand(DestReg
);
1623 unsigned SubIdx
= DefMO
->getSubReg();
1624 // Revisit the copy so we make sure to notice the effects of the
1625 // operation on the destreg (either needing to RA it if it's
1626 // virtual or needing to clobber any values if it's physical).
1628 --NextMII
; // backtrack to the copy.
1629 // Propagate the sub-register index over.
1631 DefMO
= NextMII
->findRegisterDefOperand(DestReg
);
1632 DefMO
->setSubReg(SubIdx
);
1636 MachineOperand
*KillOpnd
= NextMII
->findRegisterUseOperand(InReg
);
1637 KillOpnd
->setIsKill();
1641 DOUT
<< "Removing now-noop copy: " << MI
;
1642 // Unset last kill since it's being reused.
1643 InvalidateKill(InReg
, RegKills
, KillOps
);
1644 Spills
.disallowClobberPhysReg(InReg
);
1647 InvalidateKills(MI
, RegKills
, KillOps
);
1648 VRM
.RemoveMachineInstrFromMaps(&MI
);
1651 goto ProcessNextInst
;
1654 unsigned PhysReg
= Spills
.getSpillSlotOrReMatPhysReg(SS
);
1655 SmallVector
<MachineInstr
*, 4> NewMIs
;
1657 TII
->unfoldMemoryOperand(MF
, &MI
, PhysReg
, false, false, NewMIs
)) {
1658 MBB
.insert(MII
, NewMIs
[0]);
1659 InvalidateKills(MI
, RegKills
, KillOps
);
1660 VRM
.RemoveMachineInstrFromMaps(&MI
);
1663 --NextMII
; // backtrack to the unfolded instruction.
1665 goto ProcessNextInst
;
1670 // If this reference is not a use, any previous store is now dead.
1671 // Otherwise, the store to this stack slot is not dead anymore.
1672 MachineInstr
* DeadStore
= MaybeDeadStores
[SS
];
1674 bool isDead
= !(MR
& VirtRegMap::isRef
);
1675 MachineInstr
*NewStore
= NULL
;
1676 if (MR
& VirtRegMap::isModRef
) {
1677 unsigned PhysReg
= Spills
.getSpillSlotOrReMatPhysReg(SS
);
1678 SmallVector
<MachineInstr
*, 4> NewMIs
;
1679 // We can reuse this physreg as long as we are allowed to clobber
1680 // the value and there isn't an earlier def that has already clobbered
1683 !ReusedOperands
.isClobbered(PhysReg
) &&
1684 Spills
.canClobberPhysReg(PhysReg
) &&
1685 !TII
->isStoreToStackSlot(&MI
, SS
)) { // Not profitable!
1686 MachineOperand
*KillOpnd
=
1687 DeadStore
->findRegisterUseOperand(PhysReg
, true);
1688 // Note, if the store is storing a sub-register, it's possible the
1689 // super-register is needed below.
1690 if (KillOpnd
&& !KillOpnd
->getSubReg() &&
1691 TII
->unfoldMemoryOperand(MF
, &MI
, PhysReg
, false, true,NewMIs
)){
1692 MBB
.insert(MII
, NewMIs
[0]);
1693 NewStore
= NewMIs
[1];
1694 MBB
.insert(MII
, NewStore
);
1695 VRM
.addSpillSlotUse(SS
, NewStore
);
1696 InvalidateKills(MI
, RegKills
, KillOps
);
1697 VRM
.RemoveMachineInstrFromMaps(&MI
);
1701 --NextMII
; // backtrack to the unfolded instruction.
1709 if (isDead
) { // Previous store is dead.
1710 // If we get here, the store is dead, nuke it now.
1711 DOUT
<< "Removed dead store:\t" << *DeadStore
;
1712 InvalidateKills(*DeadStore
, RegKills
, KillOps
);
1713 VRM
.RemoveMachineInstrFromMaps(DeadStore
);
1714 MBB
.erase(DeadStore
);
1719 MaybeDeadStores
[SS
] = NULL
;
1721 // Treat this store as a spill merged into a copy. That makes the
1722 // stack slot value available.
1723 VRM
.virtFolded(VirtReg
, NewStore
, VirtRegMap::isMod
);
1724 goto ProcessNextInst
;
1728 // If the spill slot value is available, and this is a new definition of
1729 // the value, the value is not available anymore.
1730 if (MR
& VirtRegMap::isMod
) {
1731 // Notice that the value in this stack slot has been modified.
1732 Spills
.ModifyStackSlotOrReMat(SS
);
1734 // If this is *just* a mod of the value, check to see if this is just a
1735 // store to the spill slot (i.e. the spill got merged into the copy). If
1736 // so, realize that the vreg is available now, and add the store to the
1737 // MaybeDeadStore info.
1739 if (!(MR
& VirtRegMap::isRef
)) {
1740 if (unsigned SrcReg
= TII
->isStoreToStackSlot(&MI
, StackSlot
)) {
1741 assert(TargetRegisterInfo::isPhysicalRegister(SrcReg
) &&
1742 "Src hasn't been allocated yet?");
1744 if (CommuteToFoldReload(MBB
, MII
, VirtReg
, SrcReg
, StackSlot
,
1745 Spills
, RegKills
, KillOps
, TRI
, VRM
)) {
1746 NextMII
= next(MII
);
1748 goto ProcessNextInst
;
1751 // Okay, this is certainly a store of SrcReg to [StackSlot]. Mark
1752 // this as a potentially dead store in case there is a subsequent
1753 // store into the stack slot without a read from it.
1754 MaybeDeadStores
[StackSlot
] = &MI
;
1756 // If the stack slot value was previously available in some other
1757 // register, change it now. Otherwise, make the register
1758 // available in PhysReg.
1759 Spills
.addAvailable(StackSlot
, SrcReg
, MI
.killsRegister(SrcReg
));
1765 // Process all of the spilled defs.
1766 for (unsigned i
= 0, e
= MI
.getNumOperands(); i
!= e
; ++i
) {
1767 MachineOperand
&MO
= MI
.getOperand(i
);
1768 if (!(MO
.isReg() && MO
.getReg() && MO
.isDef()))
1771 unsigned VirtReg
= MO
.getReg();
1772 if (!TargetRegisterInfo::isVirtualRegister(VirtReg
)) {
1773 // Check to see if this is a noop copy. If so, eliminate the
1774 // instruction before considering the dest reg to be changed.
1775 unsigned Src
, Dst
, SrcSR
, DstSR
;
1776 if (TII
->isMoveInstr(MI
, Src
, Dst
, SrcSR
, DstSR
) && Src
== Dst
) {
1778 DOUT
<< "Removing now-noop copy: " << MI
;
1779 SmallVector
<unsigned, 2> KillRegs
;
1780 InvalidateKills(MI
, RegKills
, KillOps
, &KillRegs
);
1781 if (MO
.isDead() && !KillRegs
.empty()) {
1782 // Source register or an implicit super/sub-register use is killed.
1783 assert(KillRegs
[0] == Dst
||
1784 TRI
->isSubRegister(KillRegs
[0], Dst
) ||
1785 TRI
->isSuperRegister(KillRegs
[0], Dst
));
1786 // Last def is now dead.
1787 TransferDeadness(&MBB
, Dist
, Src
, RegKills
, KillOps
);
1789 VRM
.RemoveMachineInstrFromMaps(&MI
);
1792 Spills
.disallowClobberPhysReg(VirtReg
);
1793 goto ProcessNextInst
;
1796 // If it's not a no-op copy, it clobbers the value in the destreg.
1797 Spills
.ClobberPhysReg(VirtReg
);
1798 ReusedOperands
.markClobbered(VirtReg
);
1800 // Check to see if this instruction is a load from a stack slot into
1801 // a register. If so, this provides the stack slot value in the reg.
1803 if (unsigned DestReg
= TII
->isLoadFromStackSlot(&MI
, FrameIdx
)) {
1804 assert(DestReg
== VirtReg
&& "Unknown load situation!");
1806 // If it is a folded reference, then it's not safe to clobber.
1807 bool Folded
= FoldedSS
.count(FrameIdx
);
1808 // Otherwise, if it wasn't available, remember that it is now!
1809 Spills
.addAvailable(FrameIdx
, DestReg
, !Folded
);
1810 goto ProcessNextInst
;
1816 unsigned SubIdx
= MO
.getSubReg();
1817 bool DoReMat
= VRM
.isReMaterialized(VirtReg
);
1819 ReMatDefs
.insert(&MI
);
1821 // The only vregs left are stack slot definitions.
1822 int StackSlot
= VRM
.getStackSlot(VirtReg
);
1823 const TargetRegisterClass
*RC
= RegInfo
->getRegClass(VirtReg
);
1825 // If this def is part of a two-address operand, make sure to execute
1826 // the store from the correct physical register.
1829 if (MI
.isRegTiedToUseOperand(i
, &TiedOp
)) {
1830 PhysReg
= MI
.getOperand(TiedOp
).getReg();
1832 unsigned SuperReg
= findSuperReg(RC
, PhysReg
, SubIdx
, TRI
);
1833 assert(SuperReg
&& TRI
->getSubReg(SuperReg
, SubIdx
) == PhysReg
&&
1834 "Can't find corresponding super-register!");
1838 PhysReg
= VRM
.getPhys(VirtReg
);
1839 if (ReusedOperands
.isClobbered(PhysReg
)) {
1840 // Another def has taken the assigned physreg. It must have been a
1841 // use&def which got it due to reuse. Undo the reuse!
1842 PhysReg
= ReusedOperands
.GetRegForReload(PhysReg
, &MI
,
1843 Spills
, MaybeDeadStores
, RegKills
, KillOps
, VRM
);
1847 assert(PhysReg
&& "VR not assigned a physical register?");
1848 RegInfo
->setPhysRegUsed(PhysReg
);
1849 unsigned RReg
= SubIdx
? TRI
->getSubReg(PhysReg
, SubIdx
) : PhysReg
;
1850 ReusedOperands
.markClobbered(RReg
);
1851 MI
.getOperand(i
).setReg(RReg
);
1852 MI
.getOperand(i
).setSubReg(0);
1855 MachineInstr
*&LastStore
= MaybeDeadStores
[StackSlot
];
1856 SpillRegToStackSlot(MBB
, MII
, -1, PhysReg
, StackSlot
, RC
, true,
1857 LastStore
, Spills
, ReMatDefs
, RegKills
, KillOps
, VRM
);
1858 NextMII
= next(MII
);
1860 // Check to see if this is a noop copy. If so, eliminate the
1861 // instruction before considering the dest reg to be changed.
1863 unsigned Src
, Dst
, SrcSR
, DstSR
;
1864 if (TII
->isMoveInstr(MI
, Src
, Dst
, SrcSR
, DstSR
) && Src
== Dst
) {
1866 DOUT
<< "Removing now-noop copy: " << MI
;
1867 InvalidateKills(MI
, RegKills
, KillOps
);
1868 VRM
.RemoveMachineInstrFromMaps(&MI
);
1871 UpdateKills(*LastStore
, RegKills
, KillOps
, TRI
);
1872 goto ProcessNextInst
;
1878 DistanceMap
.insert(std::make_pair(&MI
, Dist
++));
1879 if (!Erased
&& !BackTracked
) {
1880 for (MachineBasicBlock::iterator II
= &MI
; II
!= NextMII
; ++II
)
1881 UpdateKills(*II
, RegKills
, KillOps
, TRI
);
1888 llvm::Spiller
* llvm::createSpiller() {
1889 switch (SpillerOpt
) {
1890 default: assert(0 && "Unreachable!");
1892 return new LocalSpiller();
1894 return new SimpleSpiller();