1 //===-- llvm/CodeGen/Rewriter.cpp - Rewriter -----------------------------===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 #define DEBUG_TYPE "virtregrewriter"
11 #include "VirtRegRewriter.h"
12 #include "llvm/Support/Compiler.h"
13 #include "llvm/ADT/DepthFirstIterator.h"
14 #include "llvm/ADT/Statistic.h"
15 #include "llvm/ADT/STLExtras.h"
19 STATISTIC(NumDSE
, "Number of dead stores elided");
20 STATISTIC(NumDSS
, "Number of dead spill slots removed");
21 STATISTIC(NumCommutes
, "Number of instructions commuted");
22 STATISTIC(NumDRM
, "Number of re-materializable defs elided");
23 STATISTIC(NumStores
, "Number of stores added");
24 STATISTIC(NumPSpills
, "Number of physical register spills");
25 STATISTIC(NumOmitted
, "Number of reloads omited");
26 STATISTIC(NumAvoided
, "Number of reloads deemed unnecessary");
27 STATISTIC(NumCopified
, "Number of available reloads turned into copies");
28 STATISTIC(NumReMats
, "Number of re-materialization");
29 STATISTIC(NumLoads
, "Number of loads added");
30 STATISTIC(NumReused
, "Number of values reused");
31 STATISTIC(NumDCE
, "Number of copies elided");
32 STATISTIC(NumSUnfold
, "Number of stores unfolded");
33 STATISTIC(NumModRefUnfold
, "Number of modref unfolded");
36 enum RewriterName
{ simple
, local
};
39 static cl::opt
<RewriterName
>
40 RewriterOpt("rewriter",
41 cl::desc("Rewriter to use: (default: local)"),
43 cl::values(clEnumVal(simple
, "simple rewriter"),
44 clEnumVal(local
, "local rewriter"),
48 VirtRegRewriter::~VirtRegRewriter() {}
51 // ****************************** //
52 // Simple Spiller Implementation //
53 // ****************************** //
55 struct VISIBILITY_HIDDEN SimpleRewriter
: public VirtRegRewriter
{
57 bool runOnMachineFunction(MachineFunction
&MF
, VirtRegMap
&VRM
,
59 DOUT
<< "********** REWRITE MACHINE CODE **********\n";
60 DOUT
<< "********** Function: " << MF
.getFunction()->getName() << '\n';
61 const TargetMachine
&TM
= MF
.getTarget();
62 const TargetInstrInfo
&TII
= *TM
.getInstrInfo();
63 const TargetRegisterInfo
&TRI
= *TM
.getRegisterInfo();
66 // LoadedRegs - Keep track of which vregs are loaded, so that we only load
67 // each vreg once (in the case where a spilled vreg is used by multiple
68 // operands). This is always smaller than the number of operands to the
69 // current machine instr, so it should be small.
70 std::vector
<unsigned> LoadedRegs
;
72 for (MachineFunction::iterator MBBI
= MF
.begin(), E
= MF
.end();
74 DOUT
<< MBBI
->getBasicBlock()->getName() << ":\n";
75 MachineBasicBlock
&MBB
= *MBBI
;
76 for (MachineBasicBlock::iterator MII
= MBB
.begin(), E
= MBB
.end();
78 MachineInstr
&MI
= *MII
;
79 for (unsigned i
= 0, e
= MI
.getNumOperands(); i
!= e
; ++i
) {
80 MachineOperand
&MO
= MI
.getOperand(i
);
81 if (MO
.isReg() && MO
.getReg()) {
82 if (TargetRegisterInfo::isVirtualRegister(MO
.getReg())) {
83 unsigned VirtReg
= MO
.getReg();
84 unsigned SubIdx
= MO
.getSubReg();
85 unsigned PhysReg
= VRM
.getPhys(VirtReg
);
86 unsigned RReg
= SubIdx
? TRI
.getSubReg(PhysReg
, SubIdx
) : PhysReg
;
87 if (!VRM
.isAssignedReg(VirtReg
)) {
88 int StackSlot
= VRM
.getStackSlot(VirtReg
);
89 const TargetRegisterClass
* RC
=
90 MF
.getRegInfo().getRegClass(VirtReg
);
93 std::find(LoadedRegs
.begin(), LoadedRegs
.end(), VirtReg
)
94 == LoadedRegs
.end()) {
95 TII
.loadRegFromStackSlot(MBB
, &MI
, PhysReg
, StackSlot
, RC
);
96 MachineInstr
*LoadMI
= prior(MII
);
97 VRM
.addSpillSlotUse(StackSlot
, LoadMI
);
98 LoadedRegs
.push_back(VirtReg
);
100 DOUT
<< '\t' << *LoadMI
;
104 TII
.storeRegToStackSlot(MBB
, next(MII
), PhysReg
, true,
106 MachineInstr
*StoreMI
= next(MII
);
107 VRM
.addSpillSlotUse(StackSlot
, StoreMI
);
111 MF
.getRegInfo().setPhysRegUsed(RReg
);
112 MI
.getOperand(i
).setReg(RReg
);
113 MI
.getOperand(i
).setSubReg(0);
115 MF
.getRegInfo().setPhysRegUsed(MO
.getReg());
129 // ************************************************************************ //
131 /// AvailableSpills - As the local rewriter is scanning and rewriting an MBB
132 /// from top down, keep track of which spill slots or remat are available in
135 /// Note that not all physregs are created equal here. In particular, some
136 /// physregs are reloads that we are allowed to clobber or ignore at any time.
137 /// Other physregs are values that the register allocated program is using
138 /// that we cannot CHANGE, but we can read if we like. We keep track of this
139 /// on a per-stack-slot / remat id basis as the low bit in the value of the
140 /// SpillSlotsAvailable entries. The predicate 'canClobberPhysReg()' checks
141 /// this bit and addAvailable sets it if.
142 class VISIBILITY_HIDDEN AvailableSpills
{
143 const TargetRegisterInfo
*TRI
;
144 const TargetInstrInfo
*TII
;
146 // SpillSlotsOrReMatsAvailable - This map keeps track of all of the spilled
147 // or remat'ed virtual register values that are still available, due to
148 // being loaded or stored to, but not invalidated yet.
149 std::map
<int, unsigned> SpillSlotsOrReMatsAvailable
;
151 // PhysRegsAvailable - This is the inverse of SpillSlotsOrReMatsAvailable,
152 // indicating which stack slot values are currently held by a physreg. This
153 // is used to invalidate entries in SpillSlotsOrReMatsAvailable when a
154 // physreg is modified.
155 std::multimap
<unsigned, int> PhysRegsAvailable
;
157 void disallowClobberPhysRegOnly(unsigned PhysReg
);
159 void ClobberPhysRegOnly(unsigned PhysReg
);
161 AvailableSpills(const TargetRegisterInfo
*tri
, const TargetInstrInfo
*tii
)
162 : TRI(tri
), TII(tii
) {
165 /// clear - Reset the state.
167 SpillSlotsOrReMatsAvailable
.clear();
168 PhysRegsAvailable
.clear();
171 const TargetRegisterInfo
*getRegInfo() const { return TRI
; }
173 /// getSpillSlotOrReMatPhysReg - If the specified stack slot or remat is
174 /// available in a physical register, return that PhysReg, otherwise
176 unsigned getSpillSlotOrReMatPhysReg(int Slot
) const {
177 std::map
<int, unsigned>::const_iterator I
=
178 SpillSlotsOrReMatsAvailable
.find(Slot
);
179 if (I
!= SpillSlotsOrReMatsAvailable
.end()) {
180 return I
->second
>> 1; // Remove the CanClobber bit.
185 /// addAvailable - Mark that the specified stack slot / remat is available
186 /// in the specified physreg. If CanClobber is true, the physreg can be
187 /// modified at any time without changing the semantics of the program.
188 void addAvailable(int SlotOrReMat
, unsigned Reg
, bool CanClobber
= true) {
189 // If this stack slot is thought to be available in some other physreg,
190 // remove its record.
191 ModifyStackSlotOrReMat(SlotOrReMat
);
193 PhysRegsAvailable
.insert(std::make_pair(Reg
, SlotOrReMat
));
194 SpillSlotsOrReMatsAvailable
[SlotOrReMat
]= (Reg
<< 1) |
195 (unsigned)CanClobber
;
197 if (SlotOrReMat
> VirtRegMap::MAX_STACK_SLOT
)
198 DOUT
<< "Remembering RM#" << SlotOrReMat
-VirtRegMap::MAX_STACK_SLOT
-1;
200 DOUT
<< "Remembering SS#" << SlotOrReMat
;
201 DOUT
<< " in physreg " << TRI
->getName(Reg
) << "\n";
204 /// canClobberPhysRegForSS - Return true if the spiller is allowed to change
205 /// the value of the specified stackslot register if it desires. The
206 /// specified stack slot must be available in a physreg for this query to
208 bool canClobberPhysRegForSS(int SlotOrReMat
) const {
209 assert(SpillSlotsOrReMatsAvailable
.count(SlotOrReMat
) &&
210 "Value not available!");
211 return SpillSlotsOrReMatsAvailable
.find(SlotOrReMat
)->second
& 1;
214 /// canClobberPhysReg - Return true if the spiller is allowed to clobber the
215 /// physical register where values for some stack slot(s) might be
217 bool canClobberPhysReg(unsigned PhysReg
) const {
218 std::multimap
<unsigned, int>::const_iterator I
=
219 PhysRegsAvailable
.lower_bound(PhysReg
);
220 while (I
!= PhysRegsAvailable
.end() && I
->first
== PhysReg
) {
221 int SlotOrReMat
= I
->second
;
223 if (!canClobberPhysRegForSS(SlotOrReMat
))
229 /// disallowClobberPhysReg - Unset the CanClobber bit of the specified
230 /// stackslot register. The register is still available but is no longer
231 /// allowed to be modifed.
232 void disallowClobberPhysReg(unsigned PhysReg
);
234 /// ClobberPhysReg - This is called when the specified physreg changes
235 /// value. We use this to invalidate any info about stuff that lives in
236 /// it and any of its aliases.
237 void ClobberPhysReg(unsigned PhysReg
);
239 /// ModifyStackSlotOrReMat - This method is called when the value in a stack
240 /// slot changes. This removes information about which register the
241 /// previous value for this slot lives in (as the previous value is dead
243 void ModifyStackSlotOrReMat(int SlotOrReMat
);
245 /// AddAvailableRegsToLiveIn - Availability information is being kept coming
246 /// into the specified MBB. Add available physical registers as potential
247 /// live-in's. If they are reused in the MBB, they will be added to the
248 /// live-in set to make register scavenger and post-allocation scheduler.
249 void AddAvailableRegsToLiveIn(MachineBasicBlock
&MBB
, BitVector
&RegKills
,
250 std::vector
<MachineOperand
*> &KillOps
);
253 // ************************************************************************ //
255 // ReusedOp - For each reused operand, we keep track of a bit of information,
256 // in case we need to rollback upon processing a new operand. See comments
259 // The MachineInstr operand that reused an available value.
262 // StackSlotOrReMat - The spill slot or remat id of the value being reused.
263 unsigned StackSlotOrReMat
;
265 // PhysRegReused - The physical register the value was available in.
266 unsigned PhysRegReused
;
268 // AssignedPhysReg - The physreg that was assigned for use by the reload.
269 unsigned AssignedPhysReg
;
271 // VirtReg - The virtual register itself.
274 ReusedOp(unsigned o
, unsigned ss
, unsigned prr
, unsigned apr
,
276 : Operand(o
), StackSlotOrReMat(ss
), PhysRegReused(prr
),
277 AssignedPhysReg(apr
), VirtReg(vreg
) {}
280 /// ReuseInfo - This maintains a collection of ReuseOp's for each operand that
281 /// is reused instead of reloaded.
282 class VISIBILITY_HIDDEN ReuseInfo
{
284 std::vector
<ReusedOp
> Reuses
;
285 BitVector PhysRegsClobbered
;
287 ReuseInfo(MachineInstr
&mi
, const TargetRegisterInfo
*tri
) : MI(mi
) {
288 PhysRegsClobbered
.resize(tri
->getNumRegs());
291 bool hasReuses() const {
292 return !Reuses
.empty();
295 /// addReuse - If we choose to reuse a virtual register that is already
296 /// available instead of reloading it, remember that we did so.
297 void addReuse(unsigned OpNo
, unsigned StackSlotOrReMat
,
298 unsigned PhysRegReused
, unsigned AssignedPhysReg
,
300 // If the reload is to the assigned register anyway, no undo will be
302 if (PhysRegReused
== AssignedPhysReg
) return;
304 // Otherwise, remember this.
305 Reuses
.push_back(ReusedOp(OpNo
, StackSlotOrReMat
, PhysRegReused
,
306 AssignedPhysReg
, VirtReg
));
309 void markClobbered(unsigned PhysReg
) {
310 PhysRegsClobbered
.set(PhysReg
);
313 bool isClobbered(unsigned PhysReg
) const {
314 return PhysRegsClobbered
.test(PhysReg
);
317 /// GetRegForReload - We are about to emit a reload into PhysReg. If there
318 /// is some other operand that is using the specified register, either pick
319 /// a new register to use, or evict the previous reload and use this reg.
320 unsigned GetRegForReload(unsigned PhysReg
, MachineInstr
*MI
,
321 AvailableSpills
&Spills
,
322 std::vector
<MachineInstr
*> &MaybeDeadStores
,
323 SmallSet
<unsigned, 8> &Rejected
,
325 std::vector
<MachineOperand
*> &KillOps
,
328 /// GetRegForReload - Helper for the above GetRegForReload(). Add a
329 /// 'Rejected' set to remember which registers have been considered and
330 /// rejected for the reload. This avoids infinite looping in case like
333 /// t2 <- assigned r0 for use by the reload but ended up reuse r1
334 /// t3 <- assigned r1 for use by the reload but ended up reuse r0
336 /// sees r1 is taken by t2, tries t2's reload register r0
337 /// sees r0 is taken by t3, tries t3's reload register r1
338 /// sees r1 is taken by t2, tries t2's reload register r0 ...
339 unsigned GetRegForReload(unsigned PhysReg
, MachineInstr
*MI
,
340 AvailableSpills
&Spills
,
341 std::vector
<MachineInstr
*> &MaybeDeadStores
,
343 std::vector
<MachineOperand
*> &KillOps
,
345 SmallSet
<unsigned, 8> Rejected
;
346 return GetRegForReload(PhysReg
, MI
, Spills
, MaybeDeadStores
, Rejected
,
347 RegKills
, KillOps
, VRM
);
352 // ****************** //
353 // Utility Functions //
354 // ****************** //
356 /// findSinglePredSuccessor - Return via reference a vector of machine basic
357 /// blocks each of which is a successor of the specified BB and has no other
359 static void findSinglePredSuccessor(MachineBasicBlock
*MBB
,
360 SmallVectorImpl
<MachineBasicBlock
*> &Succs
) {
361 for (MachineBasicBlock::succ_iterator SI
= MBB
->succ_begin(),
362 SE
= MBB
->succ_end(); SI
!= SE
; ++SI
) {
363 MachineBasicBlock
*SuccMBB
= *SI
;
364 if (SuccMBB
->pred_size() == 1)
365 Succs
.push_back(SuccMBB
);
369 /// InvalidateKill - Invalidate register kill information for a specific
370 /// register. This also unsets the kills marker on the last kill operand.
371 static void InvalidateKill(unsigned Reg
,
372 const TargetRegisterInfo
* TRI
,
374 std::vector
<MachineOperand
*> &KillOps
) {
376 KillOps
[Reg
]->setIsKill(false);
379 for (const unsigned *SR
= TRI
->getSubRegisters(Reg
); *SR
; ++SR
) {
381 KillOps
[*SR
]->setIsKill(false);
389 /// InvalidateKills - MI is going to be deleted. If any of its operands are
390 /// marked kill, then invalidate the information.
391 static void InvalidateKills(MachineInstr
&MI
,
392 const TargetRegisterInfo
* TRI
,
394 std::vector
<MachineOperand
*> &KillOps
,
395 SmallVector
<unsigned, 2> *KillRegs
= NULL
) {
396 for (unsigned i
= 0, e
= MI
.getNumOperands(); i
!= e
; ++i
) {
397 MachineOperand
&MO
= MI
.getOperand(i
);
398 if (!MO
.isReg() || !MO
.isUse() || !MO
.isKill())
400 unsigned Reg
= MO
.getReg();
401 if (TargetRegisterInfo::isVirtualRegister(Reg
))
404 KillRegs
->push_back(Reg
);
405 assert(Reg
< KillOps
.size());
406 if (KillOps
[Reg
] == &MO
) {
409 for (const unsigned *SR
= TRI
->getSubRegisters(Reg
); *SR
; ++SR
) {
419 /// InvalidateRegDef - If the def operand of the specified def MI is now dead
420 /// (since it's spill instruction is removed), mark it isDead. Also checks if
421 /// the def MI has other definition operands that are not dead. Returns it by
423 static bool InvalidateRegDef(MachineBasicBlock::iterator I
,
424 MachineInstr
&NewDef
, unsigned Reg
,
426 // Due to remat, it's possible this reg isn't being reused. That is,
427 // the def of this reg (by prev MI) is now dead.
428 MachineInstr
*DefMI
= I
;
429 MachineOperand
*DefOp
= NULL
;
430 for (unsigned i
= 0, e
= DefMI
->getNumOperands(); i
!= e
; ++i
) {
431 MachineOperand
&MO
= DefMI
->getOperand(i
);
432 if (MO
.isReg() && MO
.isDef()) {
433 if (MO
.getReg() == Reg
)
435 else if (!MO
.isDead())
442 bool FoundUse
= false, Done
= false;
443 MachineBasicBlock::iterator E
= &NewDef
;
445 for (; !Done
&& I
!= E
; ++I
) {
446 MachineInstr
*NMI
= I
;
447 for (unsigned j
= 0, ee
= NMI
->getNumOperands(); j
!= ee
; ++j
) {
448 MachineOperand
&MO
= NMI
->getOperand(j
);
449 if (!MO
.isReg() || MO
.getReg() != Reg
)
453 Done
= true; // Stop after scanning all the operands of this MI.
464 /// UpdateKills - Track and update kill info. If a MI reads a register that is
465 /// marked kill, then it must be due to register reuse. Transfer the kill info
467 static void UpdateKills(MachineInstr
&MI
, const TargetRegisterInfo
* TRI
,
469 std::vector
<MachineOperand
*> &KillOps
) {
470 for (unsigned i
= 0, e
= MI
.getNumOperands(); i
!= e
; ++i
) {
471 MachineOperand
&MO
= MI
.getOperand(i
);
472 if (!MO
.isReg() || !MO
.isUse())
474 unsigned Reg
= MO
.getReg();
478 if (RegKills
[Reg
] && KillOps
[Reg
]->getParent() != &MI
) {
479 // That can't be right. Register is killed but not re-defined and it's
480 // being reused. Let's fix that.
481 KillOps
[Reg
]->setIsKill(false);
484 if (!MI
.isRegTiedToDefOperand(i
))
485 // Unless it's a two-address operand, this is the new kill.
491 for (const unsigned *SR
= TRI
->getSubRegisters(Reg
); *SR
; ++SR
) {
498 for (unsigned i
= 0, e
= MI
.getNumOperands(); i
!= e
; ++i
) {
499 const MachineOperand
&MO
= MI
.getOperand(i
);
500 if (!MO
.isReg() || !MO
.isDef())
502 unsigned Reg
= MO
.getReg();
505 // It also defines (or partially define) aliases.
506 for (const unsigned *SR
= TRI
->getSubRegisters(Reg
); *SR
; ++SR
) {
513 /// ReMaterialize - Re-materialize definition for Reg targetting DestReg.
515 static void ReMaterialize(MachineBasicBlock
&MBB
,
516 MachineBasicBlock::iterator
&MII
,
517 unsigned DestReg
, unsigned Reg
,
518 const TargetInstrInfo
*TII
,
519 const TargetRegisterInfo
*TRI
,
521 TII
->reMaterialize(MBB
, MII
, DestReg
, VRM
.getReMaterializedMI(Reg
));
522 MachineInstr
*NewMI
= prior(MII
);
523 for (unsigned i
= 0, e
= NewMI
->getNumOperands(); i
!= e
; ++i
) {
524 MachineOperand
&MO
= NewMI
->getOperand(i
);
525 if (!MO
.isReg() || MO
.getReg() == 0)
527 unsigned VirtReg
= MO
.getReg();
528 if (TargetRegisterInfo::isPhysicalRegister(VirtReg
))
531 unsigned SubIdx
= MO
.getSubReg();
532 unsigned Phys
= VRM
.getPhys(VirtReg
);
534 unsigned RReg
= SubIdx
? TRI
->getSubReg(Phys
, SubIdx
) : Phys
;
541 /// findSuperReg - Find the SubReg's super-register of given register class
542 /// where its SubIdx sub-register is SubReg.
543 static unsigned findSuperReg(const TargetRegisterClass
*RC
, unsigned SubReg
,
544 unsigned SubIdx
, const TargetRegisterInfo
*TRI
) {
545 for (TargetRegisterClass::iterator I
= RC
->begin(), E
= RC
->end();
548 if (TRI
->getSubReg(Reg
, SubIdx
) == SubReg
)
554 // ******************************** //
555 // Available Spills Implementation //
556 // ******************************** //
558 /// disallowClobberPhysRegOnly - Unset the CanClobber bit of the specified
559 /// stackslot register. The register is still available but is no longer
560 /// allowed to be modifed.
561 void AvailableSpills::disallowClobberPhysRegOnly(unsigned PhysReg
) {
562 std::multimap
<unsigned, int>::iterator I
=
563 PhysRegsAvailable
.lower_bound(PhysReg
);
564 while (I
!= PhysRegsAvailable
.end() && I
->first
== PhysReg
) {
565 int SlotOrReMat
= I
->second
;
567 assert((SpillSlotsOrReMatsAvailable
[SlotOrReMat
] >> 1) == PhysReg
&&
568 "Bidirectional map mismatch!");
569 SpillSlotsOrReMatsAvailable
[SlotOrReMat
] &= ~1;
570 DOUT
<< "PhysReg " << TRI
->getName(PhysReg
)
571 << " copied, it is available for use but can no longer be modified\n";
575 /// disallowClobberPhysReg - Unset the CanClobber bit of the specified
576 /// stackslot register and its aliases. The register and its aliases may
577 /// still available but is no longer allowed to be modifed.
578 void AvailableSpills::disallowClobberPhysReg(unsigned PhysReg
) {
579 for (const unsigned *AS
= TRI
->getAliasSet(PhysReg
); *AS
; ++AS
)
580 disallowClobberPhysRegOnly(*AS
);
581 disallowClobberPhysRegOnly(PhysReg
);
584 /// ClobberPhysRegOnly - This is called when the specified physreg changes
585 /// value. We use this to invalidate any info about stuff we thing lives in it.
586 void AvailableSpills::ClobberPhysRegOnly(unsigned PhysReg
) {
587 std::multimap
<unsigned, int>::iterator I
=
588 PhysRegsAvailable
.lower_bound(PhysReg
);
589 while (I
!= PhysRegsAvailable
.end() && I
->first
== PhysReg
) {
590 int SlotOrReMat
= I
->second
;
591 PhysRegsAvailable
.erase(I
++);
592 assert((SpillSlotsOrReMatsAvailable
[SlotOrReMat
] >> 1) == PhysReg
&&
593 "Bidirectional map mismatch!");
594 SpillSlotsOrReMatsAvailable
.erase(SlotOrReMat
);
595 DOUT
<< "PhysReg " << TRI
->getName(PhysReg
)
596 << " clobbered, invalidating ";
597 if (SlotOrReMat
> VirtRegMap::MAX_STACK_SLOT
)
598 DOUT
<< "RM#" << SlotOrReMat
-VirtRegMap::MAX_STACK_SLOT
-1 << "\n";
600 DOUT
<< "SS#" << SlotOrReMat
<< "\n";
604 /// ClobberPhysReg - This is called when the specified physreg changes
605 /// value. We use this to invalidate any info about stuff we thing lives in
606 /// it and any of its aliases.
607 void AvailableSpills::ClobberPhysReg(unsigned PhysReg
) {
608 for (const unsigned *AS
= TRI
->getAliasSet(PhysReg
); *AS
; ++AS
)
609 ClobberPhysRegOnly(*AS
);
610 ClobberPhysRegOnly(PhysReg
);
613 /// AddAvailableRegsToLiveIn - Availability information is being kept coming
614 /// into the specified MBB. Add available physical registers as potential
615 /// live-in's. If they are reused in the MBB, they will be added to the
616 /// live-in set to make register scavenger and post-allocation scheduler.
617 void AvailableSpills::AddAvailableRegsToLiveIn(MachineBasicBlock
&MBB
,
619 std::vector
<MachineOperand
*> &KillOps
) {
620 std::set
<unsigned> NotAvailable
;
621 for (std::multimap
<unsigned, int>::iterator
622 I
= PhysRegsAvailable
.begin(), E
= PhysRegsAvailable
.end();
624 unsigned Reg
= I
->first
;
625 const TargetRegisterClass
* RC
= TRI
->getPhysicalRegisterRegClass(Reg
);
626 // FIXME: A temporary workaround. We can't reuse available value if it's
627 // not safe to move the def of the virtual register's class. e.g.
628 // X86::RFP* register classes. Do not add it as a live-in.
629 if (!TII
->isSafeToMoveRegClassDefs(RC
))
630 // This is no longer available.
631 NotAvailable
.insert(Reg
);
634 InvalidateKill(Reg
, TRI
, RegKills
, KillOps
);
637 // Skip over the same register.
638 std::multimap
<unsigned, int>::iterator NI
= next(I
);
639 while (NI
!= E
&& NI
->first
== Reg
) {
645 for (std::set
<unsigned>::iterator I
= NotAvailable
.begin(),
646 E
= NotAvailable
.end(); I
!= E
; ++I
) {
648 for (const unsigned *SubRegs
= TRI
->getSubRegisters(*I
);
650 ClobberPhysReg(*SubRegs
);
654 /// ModifyStackSlotOrReMat - This method is called when the value in a stack
655 /// slot changes. This removes information about which register the previous
656 /// value for this slot lives in (as the previous value is dead now).
657 void AvailableSpills::ModifyStackSlotOrReMat(int SlotOrReMat
) {
658 std::map
<int, unsigned>::iterator It
=
659 SpillSlotsOrReMatsAvailable
.find(SlotOrReMat
);
660 if (It
== SpillSlotsOrReMatsAvailable
.end()) return;
661 unsigned Reg
= It
->second
>> 1;
662 SpillSlotsOrReMatsAvailable
.erase(It
);
664 // This register may hold the value of multiple stack slots, only remove this
665 // stack slot from the set of values the register contains.
666 std::multimap
<unsigned, int>::iterator I
= PhysRegsAvailable
.lower_bound(Reg
);
668 assert(I
!= PhysRegsAvailable
.end() && I
->first
== Reg
&&
669 "Map inverse broken!");
670 if (I
->second
== SlotOrReMat
) break;
672 PhysRegsAvailable
.erase(I
);
675 // ************************** //
676 // Reuse Info Implementation //
677 // ************************** //
679 /// GetRegForReload - We are about to emit a reload into PhysReg. If there
680 /// is some other operand that is using the specified register, either pick
681 /// a new register to use, or evict the previous reload and use this reg.
682 unsigned ReuseInfo::GetRegForReload(unsigned PhysReg
, MachineInstr
*MI
,
683 AvailableSpills
&Spills
,
684 std::vector
<MachineInstr
*> &MaybeDeadStores
,
685 SmallSet
<unsigned, 8> &Rejected
,
687 std::vector
<MachineOperand
*> &KillOps
,
689 const TargetInstrInfo
* TII
= MI
->getParent()->getParent()->getTarget()
692 if (Reuses
.empty()) return PhysReg
; // This is most often empty.
694 for (unsigned ro
= 0, e
= Reuses
.size(); ro
!= e
; ++ro
) {
695 ReusedOp
&Op
= Reuses
[ro
];
696 // If we find some other reuse that was supposed to use this register
697 // exactly for its reload, we can change this reload to use ITS reload
698 // register. That is, unless its reload register has already been
699 // considered and subsequently rejected because it has also been reused
700 // by another operand.
701 if (Op
.PhysRegReused
== PhysReg
&&
702 Rejected
.count(Op
.AssignedPhysReg
) == 0) {
703 // Yup, use the reload register that we didn't use before.
704 unsigned NewReg
= Op
.AssignedPhysReg
;
705 Rejected
.insert(PhysReg
);
706 return GetRegForReload(NewReg
, MI
, Spills
, MaybeDeadStores
, Rejected
,
707 RegKills
, KillOps
, VRM
);
709 // Otherwise, we might also have a problem if a previously reused
710 // value aliases the new register. If so, codegen the previous reload
712 unsigned PRRU
= Op
.PhysRegReused
;
713 const TargetRegisterInfo
*TRI
= Spills
.getRegInfo();
714 if (TRI
->areAliases(PRRU
, PhysReg
)) {
715 // Okay, we found out that an alias of a reused register
716 // was used. This isn't good because it means we have
717 // to undo a previous reuse.
718 MachineBasicBlock
*MBB
= MI
->getParent();
719 const TargetRegisterClass
*AliasRC
=
720 MBB
->getParent()->getRegInfo().getRegClass(Op
.VirtReg
);
722 // Copy Op out of the vector and remove it, we're going to insert an
723 // explicit load for it.
725 Reuses
.erase(Reuses
.begin()+ro
);
727 // Ok, we're going to try to reload the assigned physreg into the
728 // slot that we were supposed to in the first place. However, that
729 // register could hold a reuse. Check to see if it conflicts or
730 // would prefer us to use a different register.
731 unsigned NewPhysReg
= GetRegForReload(NewOp
.AssignedPhysReg
,
732 MI
, Spills
, MaybeDeadStores
,
733 Rejected
, RegKills
, KillOps
, VRM
);
735 MachineBasicBlock::iterator MII
= MI
;
736 if (NewOp
.StackSlotOrReMat
> VirtRegMap::MAX_STACK_SLOT
) {
737 ReMaterialize(*MBB
, MII
, NewPhysReg
, NewOp
.VirtReg
, TII
, TRI
,VRM
);
739 TII
->loadRegFromStackSlot(*MBB
, MII
, NewPhysReg
,
740 NewOp
.StackSlotOrReMat
, AliasRC
);
741 MachineInstr
*LoadMI
= prior(MII
);
742 VRM
.addSpillSlotUse(NewOp
.StackSlotOrReMat
, LoadMI
);
743 // Any stores to this stack slot are not dead anymore.
744 MaybeDeadStores
[NewOp
.StackSlotOrReMat
] = NULL
;
747 Spills
.ClobberPhysReg(NewPhysReg
);
748 Spills
.ClobberPhysReg(NewOp
.PhysRegReused
);
750 unsigned SubIdx
= MI
->getOperand(NewOp
.Operand
).getSubReg();
751 unsigned RReg
= SubIdx
? TRI
->getSubReg(NewPhysReg
, SubIdx
) : NewPhysReg
;
752 MI
->getOperand(NewOp
.Operand
).setReg(RReg
);
753 MI
->getOperand(NewOp
.Operand
).setSubReg(0);
755 Spills
.addAvailable(NewOp
.StackSlotOrReMat
, NewPhysReg
);
757 UpdateKills(*MII
, TRI
, RegKills
, KillOps
);
758 DOUT
<< '\t' << *MII
;
760 DOUT
<< "Reuse undone!\n";
763 // Finally, PhysReg is now available, go ahead and use it.
771 // ************************************************************************ //
773 /// FoldsStackSlotModRef - Return true if the specified MI folds the specified
774 /// stack slot mod/ref. It also checks if it's possible to unfold the
775 /// instruction by having it define a specified physical register instead.
776 static bool FoldsStackSlotModRef(MachineInstr
&MI
, int SS
, unsigned PhysReg
,
777 const TargetInstrInfo
*TII
,
778 const TargetRegisterInfo
*TRI
,
780 if (VRM
.hasEmergencySpills(&MI
) || VRM
.isSpillPt(&MI
))
784 VirtRegMap::MI2VirtMapTy::const_iterator I
, End
;
785 for (tie(I
, End
) = VRM
.getFoldedVirts(&MI
); I
!= End
; ++I
) {
786 unsigned VirtReg
= I
->second
.first
;
787 VirtRegMap::ModRef MR
= I
->second
.second
;
788 if (MR
& VirtRegMap::isModRef
)
789 if (VRM
.getStackSlot(VirtReg
) == SS
) {
790 Found
= TII
->getOpcodeAfterMemoryUnfold(MI
.getOpcode(), true, true) != 0;
797 // Does the instruction uses a register that overlaps the scratch register?
798 for (unsigned i
= 0, e
= MI
.getNumOperands(); i
!= e
; ++i
) {
799 MachineOperand
&MO
= MI
.getOperand(i
);
800 if (!MO
.isReg() || MO
.getReg() == 0)
802 unsigned Reg
= MO
.getReg();
803 if (TargetRegisterInfo::isVirtualRegister(Reg
)) {
804 if (!VRM
.hasPhys(Reg
))
806 Reg
= VRM
.getPhys(Reg
);
808 if (TRI
->regsOverlap(PhysReg
, Reg
))
814 /// FindFreeRegister - Find a free register of a given register class by looking
815 /// at (at most) the last two machine instructions.
816 static unsigned FindFreeRegister(MachineBasicBlock::iterator MII
,
817 MachineBasicBlock
&MBB
,
818 const TargetRegisterClass
*RC
,
819 const TargetRegisterInfo
*TRI
,
820 BitVector
&AllocatableRegs
) {
821 BitVector
Defs(TRI
->getNumRegs());
822 BitVector
Uses(TRI
->getNumRegs());
823 SmallVector
<unsigned, 4> LocalUses
;
824 SmallVector
<unsigned, 4> Kills
;
826 // Take a look at 2 instructions at most.
827 for (unsigned Count
= 0; Count
< 2; ++Count
) {
828 if (MII
== MBB
.begin())
830 MachineInstr
*PrevMI
= prior(MII
);
831 for (unsigned i
= 0, e
= PrevMI
->getNumOperands(); i
!= e
; ++i
) {
832 MachineOperand
&MO
= PrevMI
->getOperand(i
);
833 if (!MO
.isReg() || MO
.getReg() == 0)
835 unsigned Reg
= MO
.getReg();
838 for (const unsigned *AS
= TRI
->getAliasSet(Reg
); *AS
; ++AS
)
841 LocalUses
.push_back(Reg
);
842 if (MO
.isKill() && AllocatableRegs
[Reg
])
843 Kills
.push_back(Reg
);
847 for (unsigned i
= 0, e
= Kills
.size(); i
!= e
; ++i
) {
848 unsigned Kill
= Kills
[i
];
849 if (!Defs
[Kill
] && !Uses
[Kill
] &&
850 TRI
->getPhysicalRegisterRegClass(Kill
) == RC
)
853 for (unsigned i
= 0, e
= LocalUses
.size(); i
!= e
; ++i
) {
854 unsigned Reg
= LocalUses
[i
];
856 for (const unsigned *AS
= TRI
->getAliasSet(Reg
); *AS
; ++AS
)
867 void AssignPhysToVirtReg(MachineInstr
*MI
, unsigned VirtReg
, unsigned PhysReg
) {
868 for (unsigned i
= 0, e
= MI
->getNumOperands(); i
!= e
; ++i
) {
869 MachineOperand
&MO
= MI
->getOperand(i
);
870 if (MO
.isReg() && MO
.getReg() == VirtReg
)
877 bool operator()(const std::pair
<MachineInstr
*, int> &A
,
878 const std::pair
<MachineInstr
*, int> &B
) {
879 return A
.second
< B
.second
;
884 // ***************************** //
885 // Local Spiller Implementation //
886 // ***************************** //
888 class VISIBILITY_HIDDEN LocalRewriter
: public VirtRegRewriter
{
889 MachineRegisterInfo
*RegInfo
;
890 const TargetRegisterInfo
*TRI
;
891 const TargetInstrInfo
*TII
;
892 BitVector AllocatableRegs
;
893 DenseMap
<MachineInstr
*, unsigned> DistanceMap
;
896 bool runOnMachineFunction(MachineFunction
&MF
, VirtRegMap
&VRM
,
897 LiveIntervals
* LIs
) {
898 RegInfo
= &MF
.getRegInfo();
899 TRI
= MF
.getTarget().getRegisterInfo();
900 TII
= MF
.getTarget().getInstrInfo();
901 AllocatableRegs
= TRI
->getAllocatableSet(MF
);
902 DOUT
<< "\n**** Local spiller rewriting function '"
903 << MF
.getFunction()->getName() << "':\n";
904 DOUT
<< "**** Machine Instrs (NOTE! Does not include spills and reloads!)"
908 // Spills - Keep track of which spilled values are available in physregs
909 // so that we can choose to reuse the physregs instead of emitting
910 // reloads. This is usually refreshed per basic block.
911 AvailableSpills
Spills(TRI
, TII
);
913 // Keep track of kill information.
914 BitVector
RegKills(TRI
->getNumRegs());
915 std::vector
<MachineOperand
*> KillOps
;
916 KillOps
.resize(TRI
->getNumRegs(), NULL
);
918 // SingleEntrySuccs - Successor blocks which have a single predecessor.
919 SmallVector
<MachineBasicBlock
*, 4> SinglePredSuccs
;
920 SmallPtrSet
<MachineBasicBlock
*,16> EarlyVisited
;
922 // Traverse the basic blocks depth first.
923 MachineBasicBlock
*Entry
= MF
.begin();
924 SmallPtrSet
<MachineBasicBlock
*,16> Visited
;
925 for (df_ext_iterator
<MachineBasicBlock
*,
926 SmallPtrSet
<MachineBasicBlock
*,16> >
927 DFI
= df_ext_begin(Entry
, Visited
), E
= df_ext_end(Entry
, Visited
);
929 MachineBasicBlock
*MBB
= *DFI
;
930 if (!EarlyVisited
.count(MBB
))
931 RewriteMBB(*MBB
, VRM
, LIs
, Spills
, RegKills
, KillOps
);
933 // If this MBB is the only predecessor of a successor. Keep the
934 // availability information and visit it next.
936 // Keep visiting single predecessor successor as long as possible.
937 SinglePredSuccs
.clear();
938 findSinglePredSuccessor(MBB
, SinglePredSuccs
);
939 if (SinglePredSuccs
.empty())
942 // FIXME: More than one successors, each of which has MBB has
943 // the only predecessor.
944 MBB
= SinglePredSuccs
[0];
945 if (!Visited
.count(MBB
) && EarlyVisited
.insert(MBB
)) {
946 Spills
.AddAvailableRegsToLiveIn(*MBB
, RegKills
, KillOps
);
947 RewriteMBB(*MBB
, VRM
, LIs
, Spills
, RegKills
, KillOps
);
952 // Clear the availability info.
956 DOUT
<< "**** Post Machine Instrs ****\n";
959 // Mark unused spill slots.
960 MachineFrameInfo
*MFI
= MF
.getFrameInfo();
961 int SS
= VRM
.getLowSpillSlot();
962 if (SS
!= VirtRegMap::NO_STACK_SLOT
)
963 for (int e
= VRM
.getHighSpillSlot(); SS
<= e
; ++SS
)
964 if (!VRM
.isSpillSlotUsed(SS
)) {
965 MFI
->RemoveStackObject(SS
);
974 /// OptimizeByUnfold2 - Unfold a series of load / store folding instructions if
975 /// a scratch register is available.
976 /// xorq %r12<kill>, %r13
977 /// addq %rax, -184(%rbp)
978 /// addq %r13, -184(%rbp)
980 /// xorq %r12<kill>, %r13
981 /// movq -184(%rbp), %r12
984 /// movq %r12, -184(%rbp)
985 bool OptimizeByUnfold2(unsigned VirtReg
, int SS
,
986 MachineBasicBlock
&MBB
,
987 MachineBasicBlock::iterator
&MII
,
988 std::vector
<MachineInstr
*> &MaybeDeadStores
,
989 AvailableSpills
&Spills
,
991 std::vector
<MachineOperand
*> &KillOps
,
994 MachineBasicBlock::iterator NextMII
= next(MII
);
995 if (NextMII
== MBB
.end())
998 if (TII
->getOpcodeAfterMemoryUnfold(MII
->getOpcode(), true, true) == 0)
1001 // Now let's see if the last couple of instructions happens to have freed up
1003 const TargetRegisterClass
* RC
= RegInfo
->getRegClass(VirtReg
);
1004 unsigned PhysReg
= FindFreeRegister(MII
, MBB
, RC
, TRI
, AllocatableRegs
);
1008 MachineFunction
&MF
= *MBB
.getParent();
1009 TRI
= MF
.getTarget().getRegisterInfo();
1010 MachineInstr
&MI
= *MII
;
1011 if (!FoldsStackSlotModRef(MI
, SS
, PhysReg
, TII
, TRI
, VRM
))
1014 // If the next instruction also folds the same SS modref and can be unfoled,
1015 // then it's worthwhile to issue a load from SS into the free register and
1016 // then unfold these instructions.
1017 if (!FoldsStackSlotModRef(*NextMII
, SS
, PhysReg
, TII
, TRI
, VRM
))
1020 // Load from SS to the spare physical register.
1021 TII
->loadRegFromStackSlot(MBB
, MII
, PhysReg
, SS
, RC
);
1022 // This invalidates Phys.
1023 Spills
.ClobberPhysReg(PhysReg
);
1024 // Remember it's available.
1025 Spills
.addAvailable(SS
, PhysReg
);
1026 MaybeDeadStores
[SS
] = NULL
;
1028 // Unfold current MI.
1029 SmallVector
<MachineInstr
*, 4> NewMIs
;
1030 if (!TII
->unfoldMemoryOperand(MF
, &MI
, VirtReg
, false, false, NewMIs
))
1031 assert(0 && "Unable unfold the load / store folding instruction!");
1032 assert(NewMIs
.size() == 1);
1033 AssignPhysToVirtReg(NewMIs
[0], VirtReg
, PhysReg
);
1034 VRM
.transferRestorePts(&MI
, NewMIs
[0]);
1035 MII
= MBB
.insert(MII
, NewMIs
[0]);
1036 InvalidateKills(MI
, TRI
, RegKills
, KillOps
);
1037 VRM
.RemoveMachineInstrFromMaps(&MI
);
1041 // Unfold next instructions that fold the same SS.
1043 MachineInstr
&NextMI
= *NextMII
;
1044 NextMII
= next(NextMII
);
1046 if (!TII
->unfoldMemoryOperand(MF
, &NextMI
, VirtReg
, false, false, NewMIs
))
1047 assert(0 && "Unable unfold the load / store folding instruction!");
1048 assert(NewMIs
.size() == 1);
1049 AssignPhysToVirtReg(NewMIs
[0], VirtReg
, PhysReg
);
1050 VRM
.transferRestorePts(&NextMI
, NewMIs
[0]);
1051 MBB
.insert(NextMII
, NewMIs
[0]);
1052 InvalidateKills(NextMI
, TRI
, RegKills
, KillOps
);
1053 VRM
.RemoveMachineInstrFromMaps(&NextMI
);
1056 } while (FoldsStackSlotModRef(*NextMII
, SS
, PhysReg
, TII
, TRI
, VRM
));
1058 // Store the value back into SS.
1059 TII
->storeRegToStackSlot(MBB
, NextMII
, PhysReg
, true, SS
, RC
);
1060 MachineInstr
*StoreMI
= prior(NextMII
);
1061 VRM
.addSpillSlotUse(SS
, StoreMI
);
1062 VRM
.virtFolded(VirtReg
, StoreMI
, VirtRegMap::isMod
);
1067 /// OptimizeByUnfold - Turn a store folding instruction into a load folding
1068 /// instruction. e.g.
1070 /// movl %eax, -32(%ebp)
1071 /// movl -36(%ebp), %eax
1072 /// orl %eax, -32(%ebp)
1075 /// orl -36(%ebp), %eax
1076 /// mov %eax, -32(%ebp)
1077 /// This enables unfolding optimization for a subsequent instruction which will
1078 /// also eliminate the newly introduced store instruction.
1079 bool OptimizeByUnfold(MachineBasicBlock
&MBB
,
1080 MachineBasicBlock::iterator
&MII
,
1081 std::vector
<MachineInstr
*> &MaybeDeadStores
,
1082 AvailableSpills
&Spills
,
1083 BitVector
&RegKills
,
1084 std::vector
<MachineOperand
*> &KillOps
,
1086 MachineFunction
&MF
= *MBB
.getParent();
1087 MachineInstr
&MI
= *MII
;
1088 unsigned UnfoldedOpc
= 0;
1089 unsigned UnfoldPR
= 0;
1090 unsigned UnfoldVR
= 0;
1091 int FoldedSS
= VirtRegMap::NO_STACK_SLOT
;
1092 VirtRegMap::MI2VirtMapTy::const_iterator I
, End
;
1093 for (tie(I
, End
) = VRM
.getFoldedVirts(&MI
); I
!= End
; ) {
1094 // Only transform a MI that folds a single register.
1097 UnfoldVR
= I
->second
.first
;
1098 VirtRegMap::ModRef MR
= I
->second
.second
;
1099 // MI2VirtMap be can updated which invalidate the iterator.
1100 // Increment the iterator first.
1102 if (VRM
.isAssignedReg(UnfoldVR
))
1104 // If this reference is not a use, any previous store is now dead.
1105 // Otherwise, the store to this stack slot is not dead anymore.
1106 FoldedSS
= VRM
.getStackSlot(UnfoldVR
);
1107 MachineInstr
* DeadStore
= MaybeDeadStores
[FoldedSS
];
1108 if (DeadStore
&& (MR
& VirtRegMap::isModRef
)) {
1109 unsigned PhysReg
= Spills
.getSpillSlotOrReMatPhysReg(FoldedSS
);
1110 if (!PhysReg
|| !DeadStore
->readsRegister(PhysReg
))
1113 UnfoldedOpc
= TII
->getOpcodeAfterMemoryUnfold(MI
.getOpcode(),
1122 // Look for other unfolding opportunities.
1123 return OptimizeByUnfold2(UnfoldVR
, FoldedSS
, MBB
, MII
,
1124 MaybeDeadStores
, Spills
, RegKills
, KillOps
, VRM
);
1127 for (unsigned i
= 0, e
= MI
.getNumOperands(); i
!= e
; ++i
) {
1128 MachineOperand
&MO
= MI
.getOperand(i
);
1129 if (!MO
.isReg() || MO
.getReg() == 0 || !MO
.isUse())
1131 unsigned VirtReg
= MO
.getReg();
1132 if (TargetRegisterInfo::isPhysicalRegister(VirtReg
) || MO
.getSubReg())
1134 if (VRM
.isAssignedReg(VirtReg
)) {
1135 unsigned PhysReg
= VRM
.getPhys(VirtReg
);
1136 if (PhysReg
&& TRI
->regsOverlap(PhysReg
, UnfoldPR
))
1138 } else if (VRM
.isReMaterialized(VirtReg
))
1140 int SS
= VRM
.getStackSlot(VirtReg
);
1141 unsigned PhysReg
= Spills
.getSpillSlotOrReMatPhysReg(SS
);
1143 if (TRI
->regsOverlap(PhysReg
, UnfoldPR
))
1147 if (VRM
.hasPhys(VirtReg
)) {
1148 PhysReg
= VRM
.getPhys(VirtReg
);
1149 if (!TRI
->regsOverlap(PhysReg
, UnfoldPR
))
1153 // Ok, we'll need to reload the value into a register which makes
1154 // it impossible to perform the store unfolding optimization later.
1155 // Let's see if it is possible to fold the load if the store is
1156 // unfolded. This allows us to perform the store unfolding
1158 SmallVector
<MachineInstr
*, 4> NewMIs
;
1159 if (TII
->unfoldMemoryOperand(MF
, &MI
, UnfoldVR
, false, false, NewMIs
)) {
1160 assert(NewMIs
.size() == 1);
1161 MachineInstr
*NewMI
= NewMIs
.back();
1163 int Idx
= NewMI
->findRegisterUseOperandIdx(VirtReg
, false);
1165 SmallVector
<unsigned, 1> Ops
;
1167 MachineInstr
*FoldedMI
= TII
->foldMemoryOperand(MF
, NewMI
, Ops
, SS
);
1169 VRM
.addSpillSlotUse(SS
, FoldedMI
);
1170 if (!VRM
.hasPhys(UnfoldVR
))
1171 VRM
.assignVirt2Phys(UnfoldVR
, UnfoldPR
);
1172 VRM
.virtFolded(VirtReg
, FoldedMI
, VirtRegMap::isRef
);
1173 MII
= MBB
.insert(MII
, FoldedMI
);
1174 InvalidateKills(MI
, TRI
, RegKills
, KillOps
);
1175 VRM
.RemoveMachineInstrFromMaps(&MI
);
1177 MF
.DeleteMachineInstr(NewMI
);
1180 MF
.DeleteMachineInstr(NewMI
);
1187 /// CommuteToFoldReload -
1190 /// r1 = op r1, r2<kill>
1193 /// If op is commutable and r2 is killed, then we can xform these to
1194 /// r2 = op r2, fi#1
1196 bool CommuteToFoldReload(MachineBasicBlock
&MBB
,
1197 MachineBasicBlock::iterator
&MII
,
1198 unsigned VirtReg
, unsigned SrcReg
, int SS
,
1199 AvailableSpills
&Spills
,
1200 BitVector
&RegKills
,
1201 std::vector
<MachineOperand
*> &KillOps
,
1202 const TargetRegisterInfo
*TRI
,
1204 if (MII
== MBB
.begin() || !MII
->killsRegister(SrcReg
))
1207 MachineFunction
&MF
= *MBB
.getParent();
1208 MachineInstr
&MI
= *MII
;
1209 MachineBasicBlock::iterator DefMII
= prior(MII
);
1210 MachineInstr
*DefMI
= DefMII
;
1211 const TargetInstrDesc
&TID
= DefMI
->getDesc();
1213 if (DefMII
!= MBB
.begin() &&
1214 TID
.isCommutable() &&
1215 TII
->CommuteChangesDestination(DefMI
, NewDstIdx
)) {
1216 MachineOperand
&NewDstMO
= DefMI
->getOperand(NewDstIdx
);
1217 unsigned NewReg
= NewDstMO
.getReg();
1218 if (!NewDstMO
.isKill() || TRI
->regsOverlap(NewReg
, SrcReg
))
1220 MachineInstr
*ReloadMI
= prior(DefMII
);
1222 unsigned DestReg
= TII
->isLoadFromStackSlot(ReloadMI
, FrameIdx
);
1223 if (DestReg
!= SrcReg
|| FrameIdx
!= SS
)
1225 int UseIdx
= DefMI
->findRegisterUseOperandIdx(DestReg
, false);
1229 if (!MI
.isRegTiedToDefOperand(UseIdx
, &DefIdx
))
1231 assert(DefMI
->getOperand(DefIdx
).isReg() &&
1232 DefMI
->getOperand(DefIdx
).getReg() == SrcReg
);
1234 // Now commute def instruction.
1235 MachineInstr
*CommutedMI
= TII
->commuteInstruction(DefMI
, true);
1238 SmallVector
<unsigned, 1> Ops
;
1239 Ops
.push_back(NewDstIdx
);
1240 MachineInstr
*FoldedMI
= TII
->foldMemoryOperand(MF
, CommutedMI
, Ops
, SS
);
1241 // Not needed since foldMemoryOperand returns new MI.
1242 MF
.DeleteMachineInstr(CommutedMI
);
1246 VRM
.addSpillSlotUse(SS
, FoldedMI
);
1247 VRM
.virtFolded(VirtReg
, FoldedMI
, VirtRegMap::isRef
);
1248 // Insert new def MI and spill MI.
1249 const TargetRegisterClass
* RC
= RegInfo
->getRegClass(VirtReg
);
1250 TII
->storeRegToStackSlot(MBB
, &MI
, NewReg
, true, SS
, RC
);
1252 MachineInstr
*StoreMI
= MII
;
1253 VRM
.addSpillSlotUse(SS
, StoreMI
);
1254 VRM
.virtFolded(VirtReg
, StoreMI
, VirtRegMap::isMod
);
1255 MII
= MBB
.insert(MII
, FoldedMI
); // Update MII to backtrack.
1257 // Delete all 3 old instructions.
1258 InvalidateKills(*ReloadMI
, TRI
, RegKills
, KillOps
);
1259 VRM
.RemoveMachineInstrFromMaps(ReloadMI
);
1260 MBB
.erase(ReloadMI
);
1261 InvalidateKills(*DefMI
, TRI
, RegKills
, KillOps
);
1262 VRM
.RemoveMachineInstrFromMaps(DefMI
);
1264 InvalidateKills(MI
, TRI
, RegKills
, KillOps
);
1265 VRM
.RemoveMachineInstrFromMaps(&MI
);
1268 // If NewReg was previously holding value of some SS, it's now clobbered.
1269 // This has to be done now because it's a physical register. When this
1270 // instruction is re-visited, it's ignored.
1271 Spills
.ClobberPhysReg(NewReg
);
1280 /// SpillRegToStackSlot - Spill a register to a specified stack slot. Check if
1281 /// the last store to the same slot is now dead. If so, remove the last store.
1282 void SpillRegToStackSlot(MachineBasicBlock
&MBB
,
1283 MachineBasicBlock::iterator
&MII
,
1284 int Idx
, unsigned PhysReg
, int StackSlot
,
1285 const TargetRegisterClass
*RC
,
1286 bool isAvailable
, MachineInstr
*&LastStore
,
1287 AvailableSpills
&Spills
,
1288 SmallSet
<MachineInstr
*, 4> &ReMatDefs
,
1289 BitVector
&RegKills
,
1290 std::vector
<MachineOperand
*> &KillOps
,
1293 TII
->storeRegToStackSlot(MBB
, next(MII
), PhysReg
, true, StackSlot
, RC
);
1294 MachineInstr
*StoreMI
= next(MII
);
1295 VRM
.addSpillSlotUse(StackSlot
, StoreMI
);
1296 DOUT
<< "Store:\t" << *StoreMI
;
1298 // If there is a dead store to this stack slot, nuke it now.
1300 DOUT
<< "Removed dead store:\t" << *LastStore
;
1302 SmallVector
<unsigned, 2> KillRegs
;
1303 InvalidateKills(*LastStore
, TRI
, RegKills
, KillOps
, &KillRegs
);
1304 MachineBasicBlock::iterator PrevMII
= LastStore
;
1305 bool CheckDef
= PrevMII
!= MBB
.begin();
1308 VRM
.RemoveMachineInstrFromMaps(LastStore
);
1309 MBB
.erase(LastStore
);
1311 // Look at defs of killed registers on the store. Mark the defs
1312 // as dead since the store has been deleted and they aren't
1314 for (unsigned j
= 0, ee
= KillRegs
.size(); j
!= ee
; ++j
) {
1315 bool HasOtherDef
= false;
1316 if (InvalidateRegDef(PrevMII
, *MII
, KillRegs
[j
], HasOtherDef
)) {
1317 MachineInstr
*DeadDef
= PrevMII
;
1318 if (ReMatDefs
.count(DeadDef
) && !HasOtherDef
) {
1319 // FIXME: This assumes a remat def does not have side
1321 VRM
.RemoveMachineInstrFromMaps(DeadDef
);
1330 LastStore
= next(MII
);
1332 // If the stack slot value was previously available in some other
1333 // register, change it now. Otherwise, make the register available,
1335 Spills
.ModifyStackSlotOrReMat(StackSlot
);
1336 Spills
.ClobberPhysReg(PhysReg
);
1337 Spills
.addAvailable(StackSlot
, PhysReg
, isAvailable
);
1341 /// TransferDeadness - A identity copy definition is dead and it's being
1342 /// removed. Find the last def or use and mark it as dead / kill.
1343 void TransferDeadness(MachineBasicBlock
*MBB
, unsigned CurDist
,
1344 unsigned Reg
, BitVector
&RegKills
,
1345 std::vector
<MachineOperand
*> &KillOps
,
1347 SmallPtrSet
<MachineInstr
*, 4> Seens
;
1348 SmallVector
<std::pair
<MachineInstr
*, int>,8> Refs
;
1349 for (MachineRegisterInfo::reg_iterator RI
= RegInfo
->reg_begin(Reg
),
1350 RE
= RegInfo
->reg_end(); RI
!= RE
; ++RI
) {
1351 MachineInstr
*UDMI
= &*RI
;
1352 if (UDMI
->getParent() != MBB
)
1354 DenseMap
<MachineInstr
*, unsigned>::iterator DI
= DistanceMap
.find(UDMI
);
1355 if (DI
== DistanceMap
.end() || DI
->second
> CurDist
)
1357 if (Seens
.insert(UDMI
))
1358 Refs
.push_back(std::make_pair(UDMI
, DI
->second
));
1363 std::sort(Refs
.begin(), Refs
.end(), RefSorter());
1365 while (!Refs
.empty()) {
1366 MachineInstr
*LastUDMI
= Refs
.back().first
;
1369 MachineOperand
*LastUD
= NULL
;
1370 for (unsigned i
= 0, e
= LastUDMI
->getNumOperands(); i
!= e
; ++i
) {
1371 MachineOperand
&MO
= LastUDMI
->getOperand(i
);
1372 if (!MO
.isReg() || MO
.getReg() != Reg
)
1374 if (!LastUD
|| (LastUD
->isUse() && MO
.isDef()))
1376 if (LastUDMI
->isRegTiedToDefOperand(i
))
1379 if (LastUD
->isDef()) {
1380 // If the instruction has no side effect, delete it and propagate
1381 // backward further. Otherwise, mark is dead and we are done.
1382 const TargetInstrDesc
&TID
= LastUDMI
->getDesc();
1383 if (TID
.mayStore() || TID
.isCall() || TID
.isTerminator() ||
1384 TID
.hasUnmodeledSideEffects()) {
1385 LastUD
->setIsDead();
1388 VRM
.RemoveMachineInstrFromMaps(LastUDMI
);
1389 MBB
->erase(LastUDMI
);
1391 LastUD
->setIsKill();
1393 KillOps
[Reg
] = LastUD
;
1399 /// rewriteMBB - Keep track of which spills are available even after the
1400 /// register allocator is done with them. If possible, avid reloading vregs.
1401 void RewriteMBB(MachineBasicBlock
&MBB
, VirtRegMap
&VRM
,
1403 AvailableSpills
&Spills
, BitVector
&RegKills
,
1404 std::vector
<MachineOperand
*> &KillOps
) {
1406 DOUT
<< "\n**** Local spiller rewriting MBB '"
1407 << MBB
.getBasicBlock()->getName() << "':\n";
1409 MachineFunction
&MF
= *MBB
.getParent();
1411 // MaybeDeadStores - When we need to write a value back into a stack slot,
1412 // keep track of the inserted store. If the stack slot value is never read
1413 // (because the value was used from some available register, for example), and
1414 // subsequently stored to, the original store is dead. This map keeps track
1415 // of inserted stores that are not used. If we see a subsequent store to the
1416 // same stack slot, the original store is deleted.
1417 std::vector
<MachineInstr
*> MaybeDeadStores
;
1418 MaybeDeadStores
.resize(MF
.getFrameInfo()->getObjectIndexEnd(), NULL
);
1420 // ReMatDefs - These are rematerializable def MIs which are not deleted.
1421 SmallSet
<MachineInstr
*, 4> ReMatDefs
;
1424 SmallSet
<unsigned, 2> KilledMIRegs
;
1427 KillOps
.resize(TRI
->getNumRegs(), NULL
);
1430 DistanceMap
.clear();
1431 for (MachineBasicBlock::iterator MII
= MBB
.begin(), E
= MBB
.end();
1433 MachineBasicBlock::iterator NextMII
= next(MII
);
1435 VirtRegMap::MI2VirtMapTy::const_iterator I
, End
;
1436 bool Erased
= false;
1437 bool BackTracked
= false;
1438 if (OptimizeByUnfold(MBB
, MII
,
1439 MaybeDeadStores
, Spills
, RegKills
, KillOps
, VRM
))
1440 NextMII
= next(MII
);
1442 MachineInstr
&MI
= *MII
;
1444 if (VRM
.hasEmergencySpills(&MI
)) {
1445 // Spill physical register(s) in the rare case the allocator has run out
1446 // of registers to allocate.
1447 SmallSet
<int, 4> UsedSS
;
1448 std::vector
<unsigned> &EmSpills
= VRM
.getEmergencySpills(&MI
);
1449 for (unsigned i
= 0, e
= EmSpills
.size(); i
!= e
; ++i
) {
1450 unsigned PhysReg
= EmSpills
[i
];
1451 const TargetRegisterClass
*RC
=
1452 TRI
->getPhysicalRegisterRegClass(PhysReg
);
1453 assert(RC
&& "Unable to determine register class!");
1454 int SS
= VRM
.getEmergencySpillSlot(RC
);
1455 if (UsedSS
.count(SS
))
1456 assert(0 && "Need to spill more than one physical registers!");
1458 TII
->storeRegToStackSlot(MBB
, MII
, PhysReg
, true, SS
, RC
);
1459 MachineInstr
*StoreMI
= prior(MII
);
1460 VRM
.addSpillSlotUse(SS
, StoreMI
);
1461 TII
->loadRegFromStackSlot(MBB
, next(MII
), PhysReg
, SS
, RC
);
1462 MachineInstr
*LoadMI
= next(MII
);
1463 VRM
.addSpillSlotUse(SS
, LoadMI
);
1466 NextMII
= next(MII
);
1469 // Insert restores here if asked to.
1470 if (VRM
.isRestorePt(&MI
)) {
1471 std::vector
<unsigned> &RestoreRegs
= VRM
.getRestorePtRestores(&MI
);
1472 for (unsigned i
= 0, e
= RestoreRegs
.size(); i
!= e
; ++i
) {
1473 unsigned VirtReg
= RestoreRegs
[e
-i
-1]; // Reverse order.
1474 if (!VRM
.getPreSplitReg(VirtReg
))
1475 continue; // Split interval spilled again.
1476 unsigned Phys
= VRM
.getPhys(VirtReg
);
1477 RegInfo
->setPhysRegUsed(Phys
);
1479 // Check if the value being restored if available. If so, it must be
1480 // from a predecessor BB that fallthrough into this BB. We do not
1486 // ... # r1 not clobbered
1489 bool DoReMat
= VRM
.isReMaterialized(VirtReg
);
1490 int SSorRMId
= DoReMat
1491 ? VRM
.getReMatId(VirtReg
) : VRM
.getStackSlot(VirtReg
);
1492 const TargetRegisterClass
* RC
= RegInfo
->getRegClass(VirtReg
);
1493 unsigned InReg
= Spills
.getSpillSlotOrReMatPhysReg(SSorRMId
);
1494 if (InReg
== Phys
) {
1495 // If the value is already available in the expected register, save
1496 // a reload / remat.
1498 DOUT
<< "Reusing RM#" << SSorRMId
-VirtRegMap::MAX_STACK_SLOT
-1;
1500 DOUT
<< "Reusing SS#" << SSorRMId
;
1501 DOUT
<< " from physreg "
1502 << TRI
->getName(InReg
) << " for vreg"
1503 << VirtReg
<<" instead of reloading into physreg "
1504 << TRI
->getName(Phys
) << "\n";
1507 } else if (InReg
&& InReg
!= Phys
) {
1509 DOUT
<< "Reusing RM#" << SSorRMId
-VirtRegMap::MAX_STACK_SLOT
-1;
1511 DOUT
<< "Reusing SS#" << SSorRMId
;
1512 DOUT
<< " from physreg "
1513 << TRI
->getName(InReg
) << " for vreg"
1514 << VirtReg
<<" by copying it into physreg "
1515 << TRI
->getName(Phys
) << "\n";
1517 // If the reloaded / remat value is available in another register,
1518 // copy it to the desired register.
1519 TII
->copyRegToReg(MBB
, &MI
, Phys
, InReg
, RC
, RC
);
1521 // This invalidates Phys.
1522 Spills
.ClobberPhysReg(Phys
);
1523 // Remember it's available.
1524 Spills
.addAvailable(SSorRMId
, Phys
);
1527 MachineInstr
*CopyMI
= prior(MII
);
1528 MachineOperand
*KillOpnd
= CopyMI
->findRegisterUseOperand(InReg
);
1529 KillOpnd
->setIsKill();
1530 UpdateKills(*CopyMI
, TRI
, RegKills
, KillOps
);
1532 DOUT
<< '\t' << *CopyMI
;
1537 if (VRM
.isReMaterialized(VirtReg
)) {
1538 ReMaterialize(MBB
, MII
, Phys
, VirtReg
, TII
, TRI
, VRM
);
1540 const TargetRegisterClass
* RC
= RegInfo
->getRegClass(VirtReg
);
1541 TII
->loadRegFromStackSlot(MBB
, &MI
, Phys
, SSorRMId
, RC
);
1542 MachineInstr
*LoadMI
= prior(MII
);
1543 VRM
.addSpillSlotUse(SSorRMId
, LoadMI
);
1547 // This invalidates Phys.
1548 Spills
.ClobberPhysReg(Phys
);
1549 // Remember it's available.
1550 Spills
.addAvailable(SSorRMId
, Phys
);
1552 UpdateKills(*prior(MII
), TRI
, RegKills
, KillOps
);
1553 DOUT
<< '\t' << *prior(MII
);
1557 // Insert spills here if asked to.
1558 if (VRM
.isSpillPt(&MI
)) {
1559 std::vector
<std::pair
<unsigned,bool> > &SpillRegs
=
1560 VRM
.getSpillPtSpills(&MI
);
1561 for (unsigned i
= 0, e
= SpillRegs
.size(); i
!= e
; ++i
) {
1562 unsigned VirtReg
= SpillRegs
[i
].first
;
1563 bool isKill
= SpillRegs
[i
].second
;
1564 if (!VRM
.getPreSplitReg(VirtReg
))
1565 continue; // Split interval spilled again.
1566 const TargetRegisterClass
*RC
= RegInfo
->getRegClass(VirtReg
);
1567 unsigned Phys
= VRM
.getPhys(VirtReg
);
1568 int StackSlot
= VRM
.getStackSlot(VirtReg
);
1569 TII
->storeRegToStackSlot(MBB
, next(MII
), Phys
, isKill
, StackSlot
, RC
);
1570 MachineInstr
*StoreMI
= next(MII
);
1571 VRM
.addSpillSlotUse(StackSlot
, StoreMI
);
1572 DOUT
<< "Store:\t" << *StoreMI
;
1573 VRM
.virtFolded(VirtReg
, StoreMI
, VirtRegMap::isMod
);
1575 NextMII
= next(MII
);
1578 /// ReusedOperands - Keep track of operand reuse in case we need to undo
1580 ReuseInfo
ReusedOperands(MI
, TRI
);
1581 SmallVector
<unsigned, 4> VirtUseOps
;
1582 for (unsigned i
= 0, e
= MI
.getNumOperands(); i
!= e
; ++i
) {
1583 MachineOperand
&MO
= MI
.getOperand(i
);
1584 if (!MO
.isReg() || MO
.getReg() == 0)
1585 continue; // Ignore non-register operands.
1587 unsigned VirtReg
= MO
.getReg();
1588 if (TargetRegisterInfo::isPhysicalRegister(VirtReg
)) {
1589 // Ignore physregs for spilling, but remember that it is used by this
1591 RegInfo
->setPhysRegUsed(VirtReg
);
1595 // We want to process implicit virtual register uses first.
1596 if (MO
.isImplicit())
1597 // If the virtual register is implicitly defined, emit a implicit_def
1598 // before so scavenger knows it's "defined".
1599 VirtUseOps
.insert(VirtUseOps
.begin(), i
);
1601 VirtUseOps
.push_back(i
);
1604 // Process all of the spilled uses and all non spilled reg references.
1605 SmallVector
<int, 2> PotentialDeadStoreSlots
;
1606 KilledMIRegs
.clear();
1607 for (unsigned j
= 0, e
= VirtUseOps
.size(); j
!= e
; ++j
) {
1608 unsigned i
= VirtUseOps
[j
];
1609 MachineOperand
&MO
= MI
.getOperand(i
);
1610 unsigned VirtReg
= MO
.getReg();
1611 assert(TargetRegisterInfo::isVirtualRegister(VirtReg
) &&
1612 "Not a virtual register?");
1614 unsigned SubIdx
= MO
.getSubReg();
1615 if (VRM
.isAssignedReg(VirtReg
)) {
1616 // This virtual register was assigned a physreg!
1617 unsigned Phys
= VRM
.getPhys(VirtReg
);
1618 RegInfo
->setPhysRegUsed(Phys
);
1620 ReusedOperands
.markClobbered(Phys
);
1621 unsigned RReg
= SubIdx
? TRI
->getSubReg(Phys
, SubIdx
) : Phys
;
1622 MI
.getOperand(i
).setReg(RReg
);
1623 MI
.getOperand(i
).setSubReg(0);
1624 if (VRM
.isImplicitlyDefined(VirtReg
))
1625 BuildMI(MBB
, &MI
, MI
.getDebugLoc(),
1626 TII
->get(TargetInstrInfo::IMPLICIT_DEF
), RReg
);
1630 // This virtual register is now known to be a spilled value.
1632 continue; // Handle defs in the loop below (handle use&def here though)
1634 bool AvoidReload
= false;
1635 if (LIs
->hasInterval(VirtReg
)) {
1636 LiveInterval
&LI
= LIs
->getInterval(VirtReg
);
1637 if (!LI
.liveAt(LIs
->getUseIndex(LI
.beginNumber())))
1638 // Must be defined by an implicit def. It should not be spilled. Note,
1639 // this is for correctness reason. e.g.
1640 // 8 %reg1024<def> = IMPLICIT_DEF
1641 // 12 %reg1024<def> = INSERT_SUBREG %reg1024<kill>, %reg1025, 2
1642 // The live range [12, 14) are not part of the r1024 live interval since
1643 // it's defined by an implicit def. It will not conflicts with live
1644 // interval of r1025. Now suppose both registers are spilled, you can
1645 // easily see a situation where both registers are reloaded before
1646 // the INSERT_SUBREG and both target registers that would overlap.
1650 bool DoReMat
= VRM
.isReMaterialized(VirtReg
);
1651 int SSorRMId
= DoReMat
1652 ? VRM
.getReMatId(VirtReg
) : VRM
.getStackSlot(VirtReg
);
1653 int ReuseSlot
= SSorRMId
;
1655 // Check to see if this stack slot is available.
1656 unsigned PhysReg
= Spills
.getSpillSlotOrReMatPhysReg(SSorRMId
);
1658 // If this is a sub-register use, make sure the reuse register is in the
1659 // right register class. For example, for x86 not all of the 32-bit
1660 // registers have accessible sub-registers.
1661 // Similarly so for EXTRACT_SUBREG. Consider this:
1663 // MOV32_mr fi#1, EDI
1665 // = EXTRACT_SUBREG fi#1
1666 // fi#1 is available in EDI, but it cannot be reused because it's not in
1667 // the right register file.
1668 if (PhysReg
&& !AvoidReload
&&
1669 (SubIdx
|| MI
.getOpcode() == TargetInstrInfo::EXTRACT_SUBREG
)) {
1670 const TargetRegisterClass
* RC
= RegInfo
->getRegClass(VirtReg
);
1671 if (!RC
->contains(PhysReg
))
1675 if (PhysReg
&& !AvoidReload
) {
1676 // This spilled operand might be part of a two-address operand. If this
1677 // is the case, then changing it will necessarily require changing the
1678 // def part of the instruction as well. However, in some cases, we
1679 // aren't allowed to modify the reused register. If none of these cases
1681 bool CanReuse
= true;
1682 bool isTied
= MI
.isRegTiedToDefOperand(i
);
1684 // Okay, we have a two address operand. We can reuse this physreg as
1685 // long as we are allowed to clobber the value and there isn't an
1686 // earlier def that has already clobbered the physreg.
1687 CanReuse
= !ReusedOperands
.isClobbered(PhysReg
) &&
1688 Spills
.canClobberPhysReg(PhysReg
);
1692 // If this stack slot value is already available, reuse it!
1693 if (ReuseSlot
> VirtRegMap::MAX_STACK_SLOT
)
1694 DOUT
<< "Reusing RM#" << ReuseSlot
-VirtRegMap::MAX_STACK_SLOT
-1;
1696 DOUT
<< "Reusing SS#" << ReuseSlot
;
1697 DOUT
<< " from physreg "
1698 << TRI
->getName(PhysReg
) << " for vreg"
1699 << VirtReg
<<" instead of reloading into physreg "
1700 << TRI
->getName(VRM
.getPhys(VirtReg
)) << "\n";
1701 unsigned RReg
= SubIdx
? TRI
->getSubReg(PhysReg
, SubIdx
) : PhysReg
;
1702 MI
.getOperand(i
).setReg(RReg
);
1703 MI
.getOperand(i
).setSubReg(0);
1705 // The only technical detail we have is that we don't know that
1706 // PhysReg won't be clobbered by a reloaded stack slot that occurs
1707 // later in the instruction. In particular, consider 'op V1, V2'.
1708 // If V1 is available in physreg R0, we would choose to reuse it
1709 // here, instead of reloading it into the register the allocator
1710 // indicated (say R1). However, V2 might have to be reloaded
1711 // later, and it might indicate that it needs to live in R0. When
1712 // this occurs, we need to have information available that
1713 // indicates it is safe to use R1 for the reload instead of R0.
1715 // To further complicate matters, we might conflict with an alias,
1716 // or R0 and R1 might not be compatible with each other. In this
1717 // case, we actually insert a reload for V1 in R1, ensuring that
1718 // we can get at R0 or its alias.
1719 ReusedOperands
.addReuse(i
, ReuseSlot
, PhysReg
,
1720 VRM
.getPhys(VirtReg
), VirtReg
);
1722 // Only mark it clobbered if this is a use&def operand.
1723 ReusedOperands
.markClobbered(PhysReg
);
1726 if (MI
.getOperand(i
).isKill() &&
1727 ReuseSlot
<= VirtRegMap::MAX_STACK_SLOT
) {
1729 // The store of this spilled value is potentially dead, but we
1730 // won't know for certain until we've confirmed that the re-use
1731 // above is valid, which means waiting until the other operands
1732 // are processed. For now we just track the spill slot, we'll
1733 // remove it after the other operands are processed if valid.
1735 PotentialDeadStoreSlots
.push_back(ReuseSlot
);
1738 // Mark is isKill if it's there no other uses of the same virtual
1739 // register and it's not a two-address operand. IsKill will be
1740 // unset if reg is reused.
1741 if (!isTied
&& KilledMIRegs
.count(VirtReg
) == 0) {
1742 MI
.getOperand(i
).setIsKill();
1743 KilledMIRegs
.insert(VirtReg
);
1749 // Otherwise we have a situation where we have a two-address instruction
1750 // whose mod/ref operand needs to be reloaded. This reload is already
1751 // available in some register "PhysReg", but if we used PhysReg as the
1752 // operand to our 2-addr instruction, the instruction would modify
1753 // PhysReg. This isn't cool if something later uses PhysReg and expects
1754 // to get its initial value.
1756 // To avoid this problem, and to avoid doing a load right after a store,
1757 // we emit a copy from PhysReg into the designated register for this
1759 unsigned DesignatedReg
= VRM
.getPhys(VirtReg
);
1760 assert(DesignatedReg
&& "Must map virtreg to physreg!");
1762 // Note that, if we reused a register for a previous operand, the
1763 // register we want to reload into might not actually be
1764 // available. If this occurs, use the register indicated by the
1766 if (ReusedOperands
.hasReuses())
1767 DesignatedReg
= ReusedOperands
.GetRegForReload(DesignatedReg
, &MI
,
1768 Spills
, MaybeDeadStores
, RegKills
, KillOps
, VRM
);
1770 // If the mapped designated register is actually the physreg we have
1771 // incoming, we don't need to inserted a dead copy.
1772 if (DesignatedReg
== PhysReg
) {
1773 // If this stack slot value is already available, reuse it!
1774 if (ReuseSlot
> VirtRegMap::MAX_STACK_SLOT
)
1775 DOUT
<< "Reusing RM#" << ReuseSlot
-VirtRegMap::MAX_STACK_SLOT
-1;
1777 DOUT
<< "Reusing SS#" << ReuseSlot
;
1778 DOUT
<< " from physreg " << TRI
->getName(PhysReg
)
1779 << " for vreg" << VirtReg
1780 << " instead of reloading into same physreg.\n";
1781 unsigned RReg
= SubIdx
? TRI
->getSubReg(PhysReg
, SubIdx
) : PhysReg
;
1782 MI
.getOperand(i
).setReg(RReg
);
1783 MI
.getOperand(i
).setSubReg(0);
1784 ReusedOperands
.markClobbered(RReg
);
1789 const TargetRegisterClass
* RC
= RegInfo
->getRegClass(VirtReg
);
1790 RegInfo
->setPhysRegUsed(DesignatedReg
);
1791 ReusedOperands
.markClobbered(DesignatedReg
);
1792 TII
->copyRegToReg(MBB
, &MI
, DesignatedReg
, PhysReg
, RC
, RC
);
1794 MachineInstr
*CopyMI
= prior(MII
);
1795 UpdateKills(*CopyMI
, TRI
, RegKills
, KillOps
);
1797 // This invalidates DesignatedReg.
1798 Spills
.ClobberPhysReg(DesignatedReg
);
1800 Spills
.addAvailable(ReuseSlot
, DesignatedReg
);
1802 SubIdx
? TRI
->getSubReg(DesignatedReg
, SubIdx
) : DesignatedReg
;
1803 MI
.getOperand(i
).setReg(RReg
);
1804 MI
.getOperand(i
).setSubReg(0);
1805 DOUT
<< '\t' << *prior(MII
);
1810 // Otherwise, reload it and remember that we have it.
1811 PhysReg
= VRM
.getPhys(VirtReg
);
1812 assert(PhysReg
&& "Must map virtreg to physreg!");
1814 // Note that, if we reused a register for a previous operand, the
1815 // register we want to reload into might not actually be
1816 // available. If this occurs, use the register indicated by the
1818 if (ReusedOperands
.hasReuses())
1819 PhysReg
= ReusedOperands
.GetRegForReload(PhysReg
, &MI
,
1820 Spills
, MaybeDeadStores
, RegKills
, KillOps
, VRM
);
1822 RegInfo
->setPhysRegUsed(PhysReg
);
1823 ReusedOperands
.markClobbered(PhysReg
);
1828 ReMaterialize(MBB
, MII
, PhysReg
, VirtReg
, TII
, TRI
, VRM
);
1830 const TargetRegisterClass
* RC
= RegInfo
->getRegClass(VirtReg
);
1831 TII
->loadRegFromStackSlot(MBB
, &MI
, PhysReg
, SSorRMId
, RC
);
1832 MachineInstr
*LoadMI
= prior(MII
);
1833 VRM
.addSpillSlotUse(SSorRMId
, LoadMI
);
1836 // This invalidates PhysReg.
1837 Spills
.ClobberPhysReg(PhysReg
);
1839 // Any stores to this stack slot are not dead anymore.
1841 MaybeDeadStores
[SSorRMId
] = NULL
;
1842 Spills
.addAvailable(SSorRMId
, PhysReg
);
1843 // Assumes this is the last use. IsKill will be unset if reg is reused
1844 // unless it's a two-address operand.
1845 if (!MI
.isRegTiedToDefOperand(i
) &&
1846 KilledMIRegs
.count(VirtReg
) == 0) {
1847 MI
.getOperand(i
).setIsKill();
1848 KilledMIRegs
.insert(VirtReg
);
1851 UpdateKills(*prior(MII
), TRI
, RegKills
, KillOps
);
1852 DOUT
<< '\t' << *prior(MII
);
1854 unsigned RReg
= SubIdx
? TRI
->getSubReg(PhysReg
, SubIdx
) : PhysReg
;
1855 MI
.getOperand(i
).setReg(RReg
);
1856 MI
.getOperand(i
).setSubReg(0);
1859 // Ok - now we can remove stores that have been confirmed dead.
1860 for (unsigned j
= 0, e
= PotentialDeadStoreSlots
.size(); j
!= e
; ++j
) {
1861 // This was the last use and the spilled value is still available
1862 // for reuse. That means the spill was unnecessary!
1863 int PDSSlot
= PotentialDeadStoreSlots
[j
];
1864 MachineInstr
* DeadStore
= MaybeDeadStores
[PDSSlot
];
1866 DOUT
<< "Removed dead store:\t" << *DeadStore
;
1867 InvalidateKills(*DeadStore
, TRI
, RegKills
, KillOps
);
1868 VRM
.RemoveMachineInstrFromMaps(DeadStore
);
1869 MBB
.erase(DeadStore
);
1870 MaybeDeadStores
[PDSSlot
] = NULL
;
1879 // If we have folded references to memory operands, make sure we clear all
1880 // physical registers that may contain the value of the spilled virtual
1882 SmallSet
<int, 2> FoldedSS
;
1883 for (tie(I
, End
) = VRM
.getFoldedVirts(&MI
); I
!= End
; ) {
1884 unsigned VirtReg
= I
->second
.first
;
1885 VirtRegMap::ModRef MR
= I
->second
.second
;
1886 DOUT
<< "Folded vreg: " << VirtReg
<< " MR: " << MR
;
1888 // MI2VirtMap be can updated which invalidate the iterator.
1889 // Increment the iterator first.
1891 int SS
= VRM
.getStackSlot(VirtReg
);
1892 if (SS
== VirtRegMap::NO_STACK_SLOT
)
1894 FoldedSS
.insert(SS
);
1895 DOUT
<< " - StackSlot: " << SS
<< "\n";
1897 // If this folded instruction is just a use, check to see if it's a
1898 // straight load from the virt reg slot.
1899 if ((MR
& VirtRegMap::isRef
) && !(MR
& VirtRegMap::isMod
)) {
1901 unsigned DestReg
= TII
->isLoadFromStackSlot(&MI
, FrameIdx
);
1902 if (DestReg
&& FrameIdx
== SS
) {
1903 // If this spill slot is available, turn it into a copy (or nothing)
1904 // instead of leaving it as a load!
1905 if (unsigned InReg
= Spills
.getSpillSlotOrReMatPhysReg(SS
)) {
1906 DOUT
<< "Promoted Load To Copy: " << MI
;
1907 if (DestReg
!= InReg
) {
1908 const TargetRegisterClass
*RC
= RegInfo
->getRegClass(VirtReg
);
1909 TII
->copyRegToReg(MBB
, &MI
, DestReg
, InReg
, RC
, RC
);
1910 MachineOperand
*DefMO
= MI
.findRegisterDefOperand(DestReg
);
1911 unsigned SubIdx
= DefMO
->getSubReg();
1912 // Revisit the copy so we make sure to notice the effects of the
1913 // operation on the destreg (either needing to RA it if it's
1914 // virtual or needing to clobber any values if it's physical).
1916 --NextMII
; // backtrack to the copy.
1917 // Propagate the sub-register index over.
1919 DefMO
= NextMII
->findRegisterDefOperand(DestReg
);
1920 DefMO
->setSubReg(SubIdx
);
1924 MachineOperand
*KillOpnd
= NextMII
->findRegisterUseOperand(InReg
);
1925 KillOpnd
->setIsKill();
1929 DOUT
<< "Removing now-noop copy: " << MI
;
1930 // Unset last kill since it's being reused.
1931 InvalidateKill(InReg
, TRI
, RegKills
, KillOps
);
1932 Spills
.disallowClobberPhysReg(InReg
);
1935 InvalidateKills(MI
, TRI
, RegKills
, KillOps
);
1936 VRM
.RemoveMachineInstrFromMaps(&MI
);
1939 goto ProcessNextInst
;
1942 unsigned PhysReg
= Spills
.getSpillSlotOrReMatPhysReg(SS
);
1943 SmallVector
<MachineInstr
*, 4> NewMIs
;
1945 TII
->unfoldMemoryOperand(MF
, &MI
, PhysReg
, false, false, NewMIs
)) {
1946 MBB
.insert(MII
, NewMIs
[0]);
1947 InvalidateKills(MI
, TRI
, RegKills
, KillOps
);
1948 VRM
.RemoveMachineInstrFromMaps(&MI
);
1951 --NextMII
; // backtrack to the unfolded instruction.
1953 goto ProcessNextInst
;
1958 // If this reference is not a use, any previous store is now dead.
1959 // Otherwise, the store to this stack slot is not dead anymore.
1960 MachineInstr
* DeadStore
= MaybeDeadStores
[SS
];
1962 bool isDead
= !(MR
& VirtRegMap::isRef
);
1963 MachineInstr
*NewStore
= NULL
;
1964 if (MR
& VirtRegMap::isModRef
) {
1965 unsigned PhysReg
= Spills
.getSpillSlotOrReMatPhysReg(SS
);
1966 SmallVector
<MachineInstr
*, 4> NewMIs
;
1967 // We can reuse this physreg as long as we are allowed to clobber
1968 // the value and there isn't an earlier def that has already clobbered
1971 !ReusedOperands
.isClobbered(PhysReg
) &&
1972 Spills
.canClobberPhysReg(PhysReg
) &&
1973 !TII
->isStoreToStackSlot(&MI
, SS
)) { // Not profitable!
1974 MachineOperand
*KillOpnd
=
1975 DeadStore
->findRegisterUseOperand(PhysReg
, true);
1976 // Note, if the store is storing a sub-register, it's possible the
1977 // super-register is needed below.
1978 if (KillOpnd
&& !KillOpnd
->getSubReg() &&
1979 TII
->unfoldMemoryOperand(MF
, &MI
, PhysReg
, false, true,NewMIs
)){
1980 MBB
.insert(MII
, NewMIs
[0]);
1981 NewStore
= NewMIs
[1];
1982 MBB
.insert(MII
, NewStore
);
1983 VRM
.addSpillSlotUse(SS
, NewStore
);
1984 InvalidateKills(MI
, TRI
, RegKills
, KillOps
);
1985 VRM
.RemoveMachineInstrFromMaps(&MI
);
1989 --NextMII
; // backtrack to the unfolded instruction.
1997 if (isDead
) { // Previous store is dead.
1998 // If we get here, the store is dead, nuke it now.
1999 DOUT
<< "Removed dead store:\t" << *DeadStore
;
2000 InvalidateKills(*DeadStore
, TRI
, RegKills
, KillOps
);
2001 VRM
.RemoveMachineInstrFromMaps(DeadStore
);
2002 MBB
.erase(DeadStore
);
2007 MaybeDeadStores
[SS
] = NULL
;
2009 // Treat this store as a spill merged into a copy. That makes the
2010 // stack slot value available.
2011 VRM
.virtFolded(VirtReg
, NewStore
, VirtRegMap::isMod
);
2012 goto ProcessNextInst
;
2016 // If the spill slot value is available, and this is a new definition of
2017 // the value, the value is not available anymore.
2018 if (MR
& VirtRegMap::isMod
) {
2019 // Notice that the value in this stack slot has been modified.
2020 Spills
.ModifyStackSlotOrReMat(SS
);
2022 // If this is *just* a mod of the value, check to see if this is just a
2023 // store to the spill slot (i.e. the spill got merged into the copy). If
2024 // so, realize that the vreg is available now, and add the store to the
2025 // MaybeDeadStore info.
2027 if (!(MR
& VirtRegMap::isRef
)) {
2028 if (unsigned SrcReg
= TII
->isStoreToStackSlot(&MI
, StackSlot
)) {
2029 assert(TargetRegisterInfo::isPhysicalRegister(SrcReg
) &&
2030 "Src hasn't been allocated yet?");
2032 if (CommuteToFoldReload(MBB
, MII
, VirtReg
, SrcReg
, StackSlot
,
2033 Spills
, RegKills
, KillOps
, TRI
, VRM
)) {
2034 NextMII
= next(MII
);
2036 goto ProcessNextInst
;
2039 // Okay, this is certainly a store of SrcReg to [StackSlot]. Mark
2040 // this as a potentially dead store in case there is a subsequent
2041 // store into the stack slot without a read from it.
2042 MaybeDeadStores
[StackSlot
] = &MI
;
2044 // If the stack slot value was previously available in some other
2045 // register, change it now. Otherwise, make the register
2046 // available in PhysReg.
2047 Spills
.addAvailable(StackSlot
, SrcReg
, MI
.killsRegister(SrcReg
));
2053 // Process all of the spilled defs.
2054 for (unsigned i
= 0, e
= MI
.getNumOperands(); i
!= e
; ++i
) {
2055 MachineOperand
&MO
= MI
.getOperand(i
);
2056 if (!(MO
.isReg() && MO
.getReg() && MO
.isDef()))
2059 unsigned VirtReg
= MO
.getReg();
2060 if (!TargetRegisterInfo::isVirtualRegister(VirtReg
)) {
2061 // Check to see if this is a noop copy. If so, eliminate the
2062 // instruction before considering the dest reg to be changed.
2063 unsigned Src
, Dst
, SrcSR
, DstSR
;
2064 if (TII
->isMoveInstr(MI
, Src
, Dst
, SrcSR
, DstSR
) && Src
== Dst
) {
2066 DOUT
<< "Removing now-noop copy: " << MI
;
2067 SmallVector
<unsigned, 2> KillRegs
;
2068 InvalidateKills(MI
, TRI
, RegKills
, KillOps
, &KillRegs
);
2069 if (MO
.isDead() && !KillRegs
.empty()) {
2070 // Source register or an implicit super/sub-register use is killed.
2071 assert(KillRegs
[0] == Dst
||
2072 TRI
->isSubRegister(KillRegs
[0], Dst
) ||
2073 TRI
->isSuperRegister(KillRegs
[0], Dst
));
2074 // Last def is now dead.
2075 TransferDeadness(&MBB
, Dist
, Src
, RegKills
, KillOps
, VRM
);
2077 VRM
.RemoveMachineInstrFromMaps(&MI
);
2080 Spills
.disallowClobberPhysReg(VirtReg
);
2081 goto ProcessNextInst
;
2084 // If it's not a no-op copy, it clobbers the value in the destreg.
2085 Spills
.ClobberPhysReg(VirtReg
);
2086 ReusedOperands
.markClobbered(VirtReg
);
2088 // Check to see if this instruction is a load from a stack slot into
2089 // a register. If so, this provides the stack slot value in the reg.
2091 if (unsigned DestReg
= TII
->isLoadFromStackSlot(&MI
, FrameIdx
)) {
2092 assert(DestReg
== VirtReg
&& "Unknown load situation!");
2094 // If it is a folded reference, then it's not safe to clobber.
2095 bool Folded
= FoldedSS
.count(FrameIdx
);
2096 // Otherwise, if it wasn't available, remember that it is now!
2097 Spills
.addAvailable(FrameIdx
, DestReg
, !Folded
);
2098 goto ProcessNextInst
;
2104 unsigned SubIdx
= MO
.getSubReg();
2105 bool DoReMat
= VRM
.isReMaterialized(VirtReg
);
2107 ReMatDefs
.insert(&MI
);
2109 // The only vregs left are stack slot definitions.
2110 int StackSlot
= VRM
.getStackSlot(VirtReg
);
2111 const TargetRegisterClass
*RC
= RegInfo
->getRegClass(VirtReg
);
2113 // If this def is part of a two-address operand, make sure to execute
2114 // the store from the correct physical register.
2117 if (MI
.isRegTiedToUseOperand(i
, &TiedOp
)) {
2118 PhysReg
= MI
.getOperand(TiedOp
).getReg();
2120 unsigned SuperReg
= findSuperReg(RC
, PhysReg
, SubIdx
, TRI
);
2121 assert(SuperReg
&& TRI
->getSubReg(SuperReg
, SubIdx
) == PhysReg
&&
2122 "Can't find corresponding super-register!");
2126 PhysReg
= VRM
.getPhys(VirtReg
);
2127 if (ReusedOperands
.isClobbered(PhysReg
)) {
2128 // Another def has taken the assigned physreg. It must have been a
2129 // use&def which got it due to reuse. Undo the reuse!
2130 PhysReg
= ReusedOperands
.GetRegForReload(PhysReg
, &MI
,
2131 Spills
, MaybeDeadStores
, RegKills
, KillOps
, VRM
);
2135 assert(PhysReg
&& "VR not assigned a physical register?");
2136 RegInfo
->setPhysRegUsed(PhysReg
);
2137 unsigned RReg
= SubIdx
? TRI
->getSubReg(PhysReg
, SubIdx
) : PhysReg
;
2138 ReusedOperands
.markClobbered(RReg
);
2139 MI
.getOperand(i
).setReg(RReg
);
2140 MI
.getOperand(i
).setSubReg(0);
2143 MachineInstr
*&LastStore
= MaybeDeadStores
[StackSlot
];
2144 SpillRegToStackSlot(MBB
, MII
, -1, PhysReg
, StackSlot
, RC
, true,
2145 LastStore
, Spills
, ReMatDefs
, RegKills
, KillOps
, VRM
);
2146 NextMII
= next(MII
);
2148 // Check to see if this is a noop copy. If so, eliminate the
2149 // instruction before considering the dest reg to be changed.
2151 unsigned Src
, Dst
, SrcSR
, DstSR
;
2152 if (TII
->isMoveInstr(MI
, Src
, Dst
, SrcSR
, DstSR
) && Src
== Dst
) {
2154 DOUT
<< "Removing now-noop copy: " << MI
;
2155 InvalidateKills(MI
, TRI
, RegKills
, KillOps
);
2156 VRM
.RemoveMachineInstrFromMaps(&MI
);
2159 UpdateKills(*LastStore
, TRI
, RegKills
, KillOps
);
2160 goto ProcessNextInst
;
2166 DistanceMap
.insert(std::make_pair(&MI
, Dist
++));
2167 if (!Erased
&& !BackTracked
) {
2168 for (MachineBasicBlock::iterator II
= &MI
; II
!= NextMII
; ++II
)
2169 UpdateKills(*II
, TRI
, RegKills
, KillOps
);
2178 llvm::VirtRegRewriter
* llvm::createVirtRegRewriter() {
2179 switch (RewriterOpt
) {
2180 default: assert(0 && "Unreachable!");
2182 return new LocalRewriter();
2184 return new SimpleRewriter();