1 //===-- llvm/CodeGen/Rewriter.cpp - Rewriter -----------------------------===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 #define DEBUG_TYPE "virtregrewriter"
11 #include "VirtRegRewriter.h"
12 #include "llvm/Function.h"
13 #include "llvm/CodeGen/MachineFrameInfo.h"
14 #include "llvm/CodeGen/MachineInstrBuilder.h"
15 #include "llvm/CodeGen/MachineRegisterInfo.h"
16 #include "llvm/Support/Compiler.h"
17 #include "llvm/Support/CommandLine.h"
18 #include "llvm/Support/Debug.h"
19 #include "llvm/Support/ErrorHandling.h"
20 #include "llvm/Support/raw_ostream.h"
21 #include "llvm/Target/TargetInstrInfo.h"
22 #include "llvm/Target/TargetLowering.h"
23 #include "llvm/ADT/DepthFirstIterator.h"
24 #include "llvm/ADT/Statistic.h"
28 STATISTIC(NumDSE
, "Number of dead stores elided");
29 STATISTIC(NumDSS
, "Number of dead spill slots removed");
30 STATISTIC(NumCommutes
, "Number of instructions commuted");
31 STATISTIC(NumDRM
, "Number of re-materializable defs elided");
32 STATISTIC(NumStores
, "Number of stores added");
33 STATISTIC(NumPSpills
, "Number of physical register spills");
34 STATISTIC(NumOmitted
, "Number of reloads omited");
35 STATISTIC(NumAvoided
, "Number of reloads deemed unnecessary");
36 STATISTIC(NumCopified
, "Number of available reloads turned into copies");
37 STATISTIC(NumReMats
, "Number of re-materialization");
38 STATISTIC(NumLoads
, "Number of loads added");
39 STATISTIC(NumReused
, "Number of values reused");
40 STATISTIC(NumDCE
, "Number of copies elided");
41 STATISTIC(NumSUnfold
, "Number of stores unfolded");
42 STATISTIC(NumModRefUnfold
, "Number of modref unfolded");
45 enum RewriterName
{ local
, trivial
};
48 static cl::opt
<RewriterName
>
49 RewriterOpt("rewriter",
50 cl::desc("Rewriter to use: (default: local)"),
52 cl::values(clEnumVal(local
, "local rewriter"),
53 clEnumVal(trivial
, "trivial rewriter"),
58 ScheduleSpills("schedule-spills",
59 cl::desc("Schedule spill code"),
62 VirtRegRewriter::~VirtRegRewriter() {}
66 /// This class is intended for use with the new spilling framework only. It
67 /// rewrites vreg def/uses to use the assigned preg, but does not insert any
69 struct VISIBILITY_HIDDEN TrivialRewriter
: public VirtRegRewriter
{
71 bool runOnMachineFunction(MachineFunction
&MF
, VirtRegMap
&VRM
,
73 DEBUG(errs() << "********** REWRITE MACHINE CODE **********\n");
74 DEBUG(errs() << "********** Function: "
75 << MF
.getFunction()->getName() << '\n');
76 DEBUG(errs() << "**** Machine Instrs"
77 << "(NOTE! Does not include spills and reloads!) ****\n");
80 MachineRegisterInfo
*mri
= &MF
.getRegInfo();
84 for (LiveIntervals::iterator liItr
= LIs
->begin(), liEnd
= LIs
->end();
85 liItr
!= liEnd
; ++liItr
) {
87 if (TargetRegisterInfo::isVirtualRegister(liItr
->first
)) {
88 if (VRM
.hasPhys(liItr
->first
)) {
89 unsigned preg
= VRM
.getPhys(liItr
->first
);
90 mri
->replaceRegWith(liItr
->first
, preg
);
91 mri
->setPhysRegUsed(preg
);
96 if (!liItr
->second
->empty()) {
97 mri
->setPhysRegUsed(liItr
->first
);
103 DEBUG(errs() << "**** Post Machine Instrs ****\n");
113 // ************************************************************************ //
117 /// AvailableSpills - As the local rewriter is scanning and rewriting an MBB
118 /// from top down, keep track of which spill slots or remat are available in
121 /// Note that not all physregs are created equal here. In particular, some
122 /// physregs are reloads that we are allowed to clobber or ignore at any time.
123 /// Other physregs are values that the register allocated program is using
124 /// that we cannot CHANGE, but we can read if we like. We keep track of this
125 /// on a per-stack-slot / remat id basis as the low bit in the value of the
126 /// SpillSlotsAvailable entries. The predicate 'canClobberPhysReg()' checks
127 /// this bit and addAvailable sets it if.
128 class VISIBILITY_HIDDEN AvailableSpills
{
129 const TargetRegisterInfo
*TRI
;
130 const TargetInstrInfo
*TII
;
132 // SpillSlotsOrReMatsAvailable - This map keeps track of all of the spilled
133 // or remat'ed virtual register values that are still available, due to
134 // being loaded or stored to, but not invalidated yet.
135 std::map
<int, unsigned> SpillSlotsOrReMatsAvailable
;
137 // PhysRegsAvailable - This is the inverse of SpillSlotsOrReMatsAvailable,
138 // indicating which stack slot values are currently held by a physreg. This
139 // is used to invalidate entries in SpillSlotsOrReMatsAvailable when a
140 // physreg is modified.
141 std::multimap
<unsigned, int> PhysRegsAvailable
;
143 void disallowClobberPhysRegOnly(unsigned PhysReg
);
145 void ClobberPhysRegOnly(unsigned PhysReg
);
147 AvailableSpills(const TargetRegisterInfo
*tri
, const TargetInstrInfo
*tii
)
148 : TRI(tri
), TII(tii
) {
151 /// clear - Reset the state.
153 SpillSlotsOrReMatsAvailable
.clear();
154 PhysRegsAvailable
.clear();
157 const TargetRegisterInfo
*getRegInfo() const { return TRI
; }
159 /// getSpillSlotOrReMatPhysReg - If the specified stack slot or remat is
160 /// available in a physical register, return that PhysReg, otherwise
162 unsigned getSpillSlotOrReMatPhysReg(int Slot
) const {
163 std::map
<int, unsigned>::const_iterator I
=
164 SpillSlotsOrReMatsAvailable
.find(Slot
);
165 if (I
!= SpillSlotsOrReMatsAvailable
.end()) {
166 return I
->second
>> 1; // Remove the CanClobber bit.
171 /// addAvailable - Mark that the specified stack slot / remat is available
172 /// in the specified physreg. If CanClobber is true, the physreg can be
173 /// modified at any time without changing the semantics of the program.
174 void addAvailable(int SlotOrReMat
, unsigned Reg
, bool CanClobber
= true) {
175 // If this stack slot is thought to be available in some other physreg,
176 // remove its record.
177 ModifyStackSlotOrReMat(SlotOrReMat
);
179 PhysRegsAvailable
.insert(std::make_pair(Reg
, SlotOrReMat
));
180 SpillSlotsOrReMatsAvailable
[SlotOrReMat
]= (Reg
<< 1) |
181 (unsigned)CanClobber
;
183 if (SlotOrReMat
> VirtRegMap::MAX_STACK_SLOT
)
184 DEBUG(errs() << "Remembering RM#"
185 << SlotOrReMat
-VirtRegMap::MAX_STACK_SLOT
-1);
187 DEBUG(errs() << "Remembering SS#" << SlotOrReMat
);
188 DEBUG(errs() << " in physreg " << TRI
->getName(Reg
) << "\n");
191 /// canClobberPhysRegForSS - Return true if the spiller is allowed to change
192 /// the value of the specified stackslot register if it desires. The
193 /// specified stack slot must be available in a physreg for this query to
195 bool canClobberPhysRegForSS(int SlotOrReMat
) const {
196 assert(SpillSlotsOrReMatsAvailable
.count(SlotOrReMat
) &&
197 "Value not available!");
198 return SpillSlotsOrReMatsAvailable
.find(SlotOrReMat
)->second
& 1;
201 /// canClobberPhysReg - Return true if the spiller is allowed to clobber the
202 /// physical register where values for some stack slot(s) might be
204 bool canClobberPhysReg(unsigned PhysReg
) const {
205 std::multimap
<unsigned, int>::const_iterator I
=
206 PhysRegsAvailable
.lower_bound(PhysReg
);
207 while (I
!= PhysRegsAvailable
.end() && I
->first
== PhysReg
) {
208 int SlotOrReMat
= I
->second
;
210 if (!canClobberPhysRegForSS(SlotOrReMat
))
216 /// disallowClobberPhysReg - Unset the CanClobber bit of the specified
217 /// stackslot register. The register is still available but is no longer
218 /// allowed to be modifed.
219 void disallowClobberPhysReg(unsigned PhysReg
);
221 /// ClobberPhysReg - This is called when the specified physreg changes
222 /// value. We use this to invalidate any info about stuff that lives in
223 /// it and any of its aliases.
224 void ClobberPhysReg(unsigned PhysReg
);
226 /// ModifyStackSlotOrReMat - This method is called when the value in a stack
227 /// slot changes. This removes information about which register the
228 /// previous value for this slot lives in (as the previous value is dead
230 void ModifyStackSlotOrReMat(int SlotOrReMat
);
232 /// AddAvailableRegsToLiveIn - Availability information is being kept coming
233 /// into the specified MBB. Add available physical registers as potential
234 /// live-in's. If they are reused in the MBB, they will be added to the
235 /// live-in set to make register scavenger and post-allocation scheduler.
236 void AddAvailableRegsToLiveIn(MachineBasicBlock
&MBB
, BitVector
&RegKills
,
237 std::vector
<MachineOperand
*> &KillOps
);
242 // ************************************************************************ //
244 // Given a location where a reload of a spilled register or a remat of
245 // a constant is to be inserted, attempt to find a safe location to
246 // insert the load at an earlier point in the basic-block, to hide
247 // latency of the load and to avoid address-generation interlock
249 static MachineBasicBlock::iterator
250 ComputeReloadLoc(MachineBasicBlock::iterator
const InsertLoc
,
251 MachineBasicBlock::iterator
const Begin
,
253 const TargetRegisterInfo
*TRI
,
256 const TargetInstrInfo
*TII
,
257 const MachineFunction
&MF
)
262 // Spill backscheduling is of primary interest to addresses, so
263 // don't do anything if the register isn't in the register class
264 // used for pointers.
266 const TargetLowering
*TL
= MF
.getTarget().getTargetLowering();
268 if (!TL
->isTypeLegal(TL
->getPointerTy()))
269 // Believe it or not, this is true on PIC16.
272 const TargetRegisterClass
*ptrRegClass
=
273 TL
->getRegClassFor(TL
->getPointerTy());
274 if (!ptrRegClass
->contains(PhysReg
))
277 // Scan upwards through the preceding instructions. If an instruction doesn't
278 // reference the stack slot or the register we're loading, we can
279 // backschedule the reload up past it.
280 MachineBasicBlock::iterator NewInsertLoc
= InsertLoc
;
281 while (NewInsertLoc
!= Begin
) {
282 MachineBasicBlock::iterator Prev
= prior(NewInsertLoc
);
283 for (unsigned i
= 0; i
< Prev
->getNumOperands(); ++i
) {
284 MachineOperand
&Op
= Prev
->getOperand(i
);
285 if (!DoReMat
&& Op
.isFI() && Op
.getIndex() == SSorRMId
)
288 if (Prev
->findRegisterUseOperandIdx(PhysReg
) != -1 ||
289 Prev
->findRegisterDefOperand(PhysReg
))
291 for (const unsigned *Alias
= TRI
->getAliasSet(PhysReg
); *Alias
; ++Alias
)
292 if (Prev
->findRegisterUseOperandIdx(*Alias
) != -1 ||
293 Prev
->findRegisterDefOperand(*Alias
))
299 // If we made it to the beginning of the block, turn around and move back
300 // down just past any existing reloads. They're likely to be reloads/remats
301 // for instructions earlier than what our current reload/remat is for, so
302 // they should be scheduled earlier.
303 if (NewInsertLoc
== Begin
) {
305 while (InsertLoc
!= NewInsertLoc
&&
306 (TII
->isLoadFromStackSlot(NewInsertLoc
, FrameIdx
) ||
307 TII
->isTriviallyReMaterializable(NewInsertLoc
)))
316 // ReusedOp - For each reused operand, we keep track of a bit of information,
317 // in case we need to rollback upon processing a new operand. See comments
320 // The MachineInstr operand that reused an available value.
323 // StackSlotOrReMat - The spill slot or remat id of the value being reused.
324 unsigned StackSlotOrReMat
;
326 // PhysRegReused - The physical register the value was available in.
327 unsigned PhysRegReused
;
329 // AssignedPhysReg - The physreg that was assigned for use by the reload.
330 unsigned AssignedPhysReg
;
332 // VirtReg - The virtual register itself.
335 ReusedOp(unsigned o
, unsigned ss
, unsigned prr
, unsigned apr
,
337 : Operand(o
), StackSlotOrReMat(ss
), PhysRegReused(prr
),
338 AssignedPhysReg(apr
), VirtReg(vreg
) {}
341 /// ReuseInfo - This maintains a collection of ReuseOp's for each operand that
342 /// is reused instead of reloaded.
343 class VISIBILITY_HIDDEN ReuseInfo
{
345 std::vector
<ReusedOp
> Reuses
;
346 BitVector PhysRegsClobbered
;
348 ReuseInfo(MachineInstr
&mi
, const TargetRegisterInfo
*tri
) : MI(mi
) {
349 PhysRegsClobbered
.resize(tri
->getNumRegs());
352 bool hasReuses() const {
353 return !Reuses
.empty();
356 /// addReuse - If we choose to reuse a virtual register that is already
357 /// available instead of reloading it, remember that we did so.
358 void addReuse(unsigned OpNo
, unsigned StackSlotOrReMat
,
359 unsigned PhysRegReused
, unsigned AssignedPhysReg
,
361 // If the reload is to the assigned register anyway, no undo will be
363 if (PhysRegReused
== AssignedPhysReg
) return;
365 // Otherwise, remember this.
366 Reuses
.push_back(ReusedOp(OpNo
, StackSlotOrReMat
, PhysRegReused
,
367 AssignedPhysReg
, VirtReg
));
370 void markClobbered(unsigned PhysReg
) {
371 PhysRegsClobbered
.set(PhysReg
);
374 bool isClobbered(unsigned PhysReg
) const {
375 return PhysRegsClobbered
.test(PhysReg
);
378 /// GetRegForReload - We are about to emit a reload into PhysReg. If there
379 /// is some other operand that is using the specified register, either pick
380 /// a new register to use, or evict the previous reload and use this reg.
381 unsigned GetRegForReload(const TargetRegisterClass
*RC
, unsigned PhysReg
,
382 MachineFunction
&MF
, MachineInstr
*MI
,
383 AvailableSpills
&Spills
,
384 std::vector
<MachineInstr
*> &MaybeDeadStores
,
385 SmallSet
<unsigned, 8> &Rejected
,
387 std::vector
<MachineOperand
*> &KillOps
,
390 /// GetRegForReload - Helper for the above GetRegForReload(). Add a
391 /// 'Rejected' set to remember which registers have been considered and
392 /// rejected for the reload. This avoids infinite looping in case like
395 /// t2 <- assigned r0 for use by the reload but ended up reuse r1
396 /// t3 <- assigned r1 for use by the reload but ended up reuse r0
398 /// sees r1 is taken by t2, tries t2's reload register r0
399 /// sees r0 is taken by t3, tries t3's reload register r1
400 /// sees r1 is taken by t2, tries t2's reload register r0 ...
401 unsigned GetRegForReload(unsigned VirtReg
, unsigned PhysReg
, MachineInstr
*MI
,
402 AvailableSpills
&Spills
,
403 std::vector
<MachineInstr
*> &MaybeDeadStores
,
405 std::vector
<MachineOperand
*> &KillOps
,
407 SmallSet
<unsigned, 8> Rejected
;
408 MachineFunction
&MF
= *MI
->getParent()->getParent();
409 const TargetRegisterClass
* RC
= MF
.getRegInfo().getRegClass(VirtReg
);
410 return GetRegForReload(RC
, PhysReg
, MF
, MI
, Spills
, MaybeDeadStores
,
411 Rejected
, RegKills
, KillOps
, VRM
);
417 // ****************** //
418 // Utility Functions //
419 // ****************** //
421 /// findSinglePredSuccessor - Return via reference a vector of machine basic
422 /// blocks each of which is a successor of the specified BB and has no other
424 static void findSinglePredSuccessor(MachineBasicBlock
*MBB
,
425 SmallVectorImpl
<MachineBasicBlock
*> &Succs
) {
426 for (MachineBasicBlock::succ_iterator SI
= MBB
->succ_begin(),
427 SE
= MBB
->succ_end(); SI
!= SE
; ++SI
) {
428 MachineBasicBlock
*SuccMBB
= *SI
;
429 if (SuccMBB
->pred_size() == 1)
430 Succs
.push_back(SuccMBB
);
434 /// InvalidateKill - Invalidate register kill information for a specific
435 /// register. This also unsets the kills marker on the last kill operand.
436 static void InvalidateKill(unsigned Reg
,
437 const TargetRegisterInfo
* TRI
,
439 std::vector
<MachineOperand
*> &KillOps
) {
441 KillOps
[Reg
]->setIsKill(false);
442 // KillOps[Reg] might be a def of a super-register.
443 unsigned KReg
= KillOps
[Reg
]->getReg();
444 KillOps
[KReg
] = NULL
;
445 RegKills
.reset(KReg
);
446 for (const unsigned *SR
= TRI
->getSubRegisters(KReg
); *SR
; ++SR
) {
448 KillOps
[*SR
]->setIsKill(false);
456 /// InvalidateKills - MI is going to be deleted. If any of its operands are
457 /// marked kill, then invalidate the information.
458 static void InvalidateKills(MachineInstr
&MI
,
459 const TargetRegisterInfo
* TRI
,
461 std::vector
<MachineOperand
*> &KillOps
,
462 SmallVector
<unsigned, 2> *KillRegs
= NULL
) {
463 for (unsigned i
= 0, e
= MI
.getNumOperands(); i
!= e
; ++i
) {
464 MachineOperand
&MO
= MI
.getOperand(i
);
465 if (!MO
.isReg() || !MO
.isUse() || !MO
.isKill() || MO
.isUndef())
467 unsigned Reg
= MO
.getReg();
468 if (TargetRegisterInfo::isVirtualRegister(Reg
))
471 KillRegs
->push_back(Reg
);
472 assert(Reg
< KillOps
.size());
473 if (KillOps
[Reg
] == &MO
) {
476 for (const unsigned *SR
= TRI
->getSubRegisters(Reg
); *SR
; ++SR
) {
486 /// InvalidateRegDef - If the def operand of the specified def MI is now dead
487 /// (since it's spill instruction is removed), mark it isDead. Also checks if
488 /// the def MI has other definition operands that are not dead. Returns it by
490 static bool InvalidateRegDef(MachineBasicBlock::iterator I
,
491 MachineInstr
&NewDef
, unsigned Reg
,
493 // Due to remat, it's possible this reg isn't being reused. That is,
494 // the def of this reg (by prev MI) is now dead.
495 MachineInstr
*DefMI
= I
;
496 MachineOperand
*DefOp
= NULL
;
497 for (unsigned i
= 0, e
= DefMI
->getNumOperands(); i
!= e
; ++i
) {
498 MachineOperand
&MO
= DefMI
->getOperand(i
);
499 if (!MO
.isReg() || !MO
.isUse() || !MO
.isKill() || MO
.isUndef())
501 if (MO
.getReg() == Reg
)
503 else if (!MO
.isDead())
509 bool FoundUse
= false, Done
= false;
510 MachineBasicBlock::iterator E
= &NewDef
;
512 for (; !Done
&& I
!= E
; ++I
) {
513 MachineInstr
*NMI
= I
;
514 for (unsigned j
= 0, ee
= NMI
->getNumOperands(); j
!= ee
; ++j
) {
515 MachineOperand
&MO
= NMI
->getOperand(j
);
516 if (!MO
.isReg() || MO
.getReg() != Reg
)
520 Done
= true; // Stop after scanning all the operands of this MI.
531 /// UpdateKills - Track and update kill info. If a MI reads a register that is
532 /// marked kill, then it must be due to register reuse. Transfer the kill info
534 static void UpdateKills(MachineInstr
&MI
, const TargetRegisterInfo
* TRI
,
536 std::vector
<MachineOperand
*> &KillOps
) {
537 for (unsigned i
= 0, e
= MI
.getNumOperands(); i
!= e
; ++i
) {
538 MachineOperand
&MO
= MI
.getOperand(i
);
539 if (!MO
.isReg() || !MO
.isUse() || MO
.isUndef())
541 unsigned Reg
= MO
.getReg();
545 if (RegKills
[Reg
] && KillOps
[Reg
]->getParent() != &MI
) {
546 // That can't be right. Register is killed but not re-defined and it's
547 // being reused. Let's fix that.
548 KillOps
[Reg
]->setIsKill(false);
549 // KillOps[Reg] might be a def of a super-register.
550 unsigned KReg
= KillOps
[Reg
]->getReg();
551 KillOps
[KReg
] = NULL
;
552 RegKills
.reset(KReg
);
554 // Must be a def of a super-register. Its other sub-regsters are no
555 // longer killed as well.
556 for (const unsigned *SR
= TRI
->getSubRegisters(KReg
); *SR
; ++SR
) {
561 if (!MI
.isRegTiedToDefOperand(i
))
562 // Unless it's a two-address operand, this is the new kill.
568 for (const unsigned *SR
= TRI
->getSubRegisters(Reg
); *SR
; ++SR
) {
575 for (unsigned i
= 0, e
= MI
.getNumOperands(); i
!= e
; ++i
) {
576 const MachineOperand
&MO
= MI
.getOperand(i
);
577 if (!MO
.isReg() || !MO
.isDef())
579 unsigned Reg
= MO
.getReg();
582 // It also defines (or partially define) aliases.
583 for (const unsigned *SR
= TRI
->getSubRegisters(Reg
); *SR
; ++SR
) {
590 /// ReMaterialize - Re-materialize definition for Reg targetting DestReg.
592 static void ReMaterialize(MachineBasicBlock
&MBB
,
593 MachineBasicBlock::iterator
&MII
,
594 unsigned DestReg
, unsigned Reg
,
595 const TargetInstrInfo
*TII
,
596 const TargetRegisterInfo
*TRI
,
598 MachineInstr
*ReMatDefMI
= VRM
.getReMaterializedMI(Reg
);
600 const TargetInstrDesc
&TID
= ReMatDefMI
->getDesc();
601 assert(TID
.getNumDefs() == 1 &&
602 "Don't know how to remat instructions that define > 1 values!");
604 TII
->reMaterialize(MBB
, MII
, DestReg
,
605 ReMatDefMI
->getOperand(0).getSubReg(), ReMatDefMI
);
606 MachineInstr
*NewMI
= prior(MII
);
607 for (unsigned i
= 0, e
= NewMI
->getNumOperands(); i
!= e
; ++i
) {
608 MachineOperand
&MO
= NewMI
->getOperand(i
);
609 if (!MO
.isReg() || MO
.getReg() == 0)
611 unsigned VirtReg
= MO
.getReg();
612 if (TargetRegisterInfo::isPhysicalRegister(VirtReg
))
615 unsigned SubIdx
= MO
.getSubReg();
616 unsigned Phys
= VRM
.getPhys(VirtReg
);
618 unsigned RReg
= SubIdx
? TRI
->getSubReg(Phys
, SubIdx
) : Phys
;
625 /// findSuperReg - Find the SubReg's super-register of given register class
626 /// where its SubIdx sub-register is SubReg.
627 static unsigned findSuperReg(const TargetRegisterClass
*RC
, unsigned SubReg
,
628 unsigned SubIdx
, const TargetRegisterInfo
*TRI
) {
629 for (TargetRegisterClass::iterator I
= RC
->begin(), E
= RC
->end();
632 if (TRI
->getSubReg(Reg
, SubIdx
) == SubReg
)
638 // ******************************** //
639 // Available Spills Implementation //
640 // ******************************** //
642 /// disallowClobberPhysRegOnly - Unset the CanClobber bit of the specified
643 /// stackslot register. The register is still available but is no longer
644 /// allowed to be modifed.
645 void AvailableSpills::disallowClobberPhysRegOnly(unsigned PhysReg
) {
646 std::multimap
<unsigned, int>::iterator I
=
647 PhysRegsAvailable
.lower_bound(PhysReg
);
648 while (I
!= PhysRegsAvailable
.end() && I
->first
== PhysReg
) {
649 int SlotOrReMat
= I
->second
;
651 assert((SpillSlotsOrReMatsAvailable
[SlotOrReMat
] >> 1) == PhysReg
&&
652 "Bidirectional map mismatch!");
653 SpillSlotsOrReMatsAvailable
[SlotOrReMat
] &= ~1;
654 DEBUG(errs() << "PhysReg " << TRI
->getName(PhysReg
)
655 << " copied, it is available for use but can no longer be modified\n");
659 /// disallowClobberPhysReg - Unset the CanClobber bit of the specified
660 /// stackslot register and its aliases. The register and its aliases may
661 /// still available but is no longer allowed to be modifed.
662 void AvailableSpills::disallowClobberPhysReg(unsigned PhysReg
) {
663 for (const unsigned *AS
= TRI
->getAliasSet(PhysReg
); *AS
; ++AS
)
664 disallowClobberPhysRegOnly(*AS
);
665 disallowClobberPhysRegOnly(PhysReg
);
668 /// ClobberPhysRegOnly - This is called when the specified physreg changes
669 /// value. We use this to invalidate any info about stuff we thing lives in it.
670 void AvailableSpills::ClobberPhysRegOnly(unsigned PhysReg
) {
671 std::multimap
<unsigned, int>::iterator I
=
672 PhysRegsAvailable
.lower_bound(PhysReg
);
673 while (I
!= PhysRegsAvailable
.end() && I
->first
== PhysReg
) {
674 int SlotOrReMat
= I
->second
;
675 PhysRegsAvailable
.erase(I
++);
676 assert((SpillSlotsOrReMatsAvailable
[SlotOrReMat
] >> 1) == PhysReg
&&
677 "Bidirectional map mismatch!");
678 SpillSlotsOrReMatsAvailable
.erase(SlotOrReMat
);
679 DEBUG(errs() << "PhysReg " << TRI
->getName(PhysReg
)
680 << " clobbered, invalidating ");
681 if (SlotOrReMat
> VirtRegMap::MAX_STACK_SLOT
)
682 DEBUG(errs() << "RM#" << SlotOrReMat
-VirtRegMap::MAX_STACK_SLOT
-1 <<"\n");
684 DEBUG(errs() << "SS#" << SlotOrReMat
<< "\n");
688 /// ClobberPhysReg - This is called when the specified physreg changes
689 /// value. We use this to invalidate any info about stuff we thing lives in
690 /// it and any of its aliases.
691 void AvailableSpills::ClobberPhysReg(unsigned PhysReg
) {
692 for (const unsigned *AS
= TRI
->getAliasSet(PhysReg
); *AS
; ++AS
)
693 ClobberPhysRegOnly(*AS
);
694 ClobberPhysRegOnly(PhysReg
);
697 /// AddAvailableRegsToLiveIn - Availability information is being kept coming
698 /// into the specified MBB. Add available physical registers as potential
699 /// live-in's. If they are reused in the MBB, they will be added to the
700 /// live-in set to make register scavenger and post-allocation scheduler.
701 void AvailableSpills::AddAvailableRegsToLiveIn(MachineBasicBlock
&MBB
,
703 std::vector
<MachineOperand
*> &KillOps
) {
704 std::set
<unsigned> NotAvailable
;
705 for (std::multimap
<unsigned, int>::iterator
706 I
= PhysRegsAvailable
.begin(), E
= PhysRegsAvailable
.end();
708 unsigned Reg
= I
->first
;
709 const TargetRegisterClass
* RC
= TRI
->getPhysicalRegisterRegClass(Reg
);
710 // FIXME: A temporary workaround. We can't reuse available value if it's
711 // not safe to move the def of the virtual register's class. e.g.
712 // X86::RFP* register classes. Do not add it as a live-in.
713 if (!TII
->isSafeToMoveRegClassDefs(RC
))
714 // This is no longer available.
715 NotAvailable
.insert(Reg
);
718 InvalidateKill(Reg
, TRI
, RegKills
, KillOps
);
721 // Skip over the same register.
722 std::multimap
<unsigned, int>::iterator NI
= next(I
);
723 while (NI
!= E
&& NI
->first
== Reg
) {
729 for (std::set
<unsigned>::iterator I
= NotAvailable
.begin(),
730 E
= NotAvailable
.end(); I
!= E
; ++I
) {
732 for (const unsigned *SubRegs
= TRI
->getSubRegisters(*I
);
734 ClobberPhysReg(*SubRegs
);
738 /// ModifyStackSlotOrReMat - This method is called when the value in a stack
739 /// slot changes. This removes information about which register the previous
740 /// value for this slot lives in (as the previous value is dead now).
741 void AvailableSpills::ModifyStackSlotOrReMat(int SlotOrReMat
) {
742 std::map
<int, unsigned>::iterator It
=
743 SpillSlotsOrReMatsAvailable
.find(SlotOrReMat
);
744 if (It
== SpillSlotsOrReMatsAvailable
.end()) return;
745 unsigned Reg
= It
->second
>> 1;
746 SpillSlotsOrReMatsAvailable
.erase(It
);
748 // This register may hold the value of multiple stack slots, only remove this
749 // stack slot from the set of values the register contains.
750 std::multimap
<unsigned, int>::iterator I
= PhysRegsAvailable
.lower_bound(Reg
);
752 assert(I
!= PhysRegsAvailable
.end() && I
->first
== Reg
&&
753 "Map inverse broken!");
754 if (I
->second
== SlotOrReMat
) break;
756 PhysRegsAvailable
.erase(I
);
759 // ************************** //
760 // Reuse Info Implementation //
761 // ************************** //
763 /// GetRegForReload - We are about to emit a reload into PhysReg. If there
764 /// is some other operand that is using the specified register, either pick
765 /// a new register to use, or evict the previous reload and use this reg.
766 unsigned ReuseInfo::GetRegForReload(const TargetRegisterClass
*RC
,
769 MachineInstr
*MI
, AvailableSpills
&Spills
,
770 std::vector
<MachineInstr
*> &MaybeDeadStores
,
771 SmallSet
<unsigned, 8> &Rejected
,
773 std::vector
<MachineOperand
*> &KillOps
,
775 const TargetInstrInfo
* TII
= MF
.getTarget().getInstrInfo();
776 const TargetRegisterInfo
*TRI
= Spills
.getRegInfo();
778 if (Reuses
.empty()) return PhysReg
; // This is most often empty.
780 for (unsigned ro
= 0, e
= Reuses
.size(); ro
!= e
; ++ro
) {
781 ReusedOp
&Op
= Reuses
[ro
];
782 // If we find some other reuse that was supposed to use this register
783 // exactly for its reload, we can change this reload to use ITS reload
784 // register. That is, unless its reload register has already been
785 // considered and subsequently rejected because it has also been reused
786 // by another operand.
787 if (Op
.PhysRegReused
== PhysReg
&&
788 Rejected
.count(Op
.AssignedPhysReg
) == 0 &&
789 RC
->contains(Op
.AssignedPhysReg
)) {
790 // Yup, use the reload register that we didn't use before.
791 unsigned NewReg
= Op
.AssignedPhysReg
;
792 Rejected
.insert(PhysReg
);
793 return GetRegForReload(RC
, NewReg
, MF
, MI
, Spills
, MaybeDeadStores
, Rejected
,
794 RegKills
, KillOps
, VRM
);
796 // Otherwise, we might also have a problem if a previously reused
797 // value aliases the new register. If so, codegen the previous reload
799 unsigned PRRU
= Op
.PhysRegReused
;
800 if (TRI
->regsOverlap(PRRU
, PhysReg
)) {
801 // Okay, we found out that an alias of a reused register
802 // was used. This isn't good because it means we have
803 // to undo a previous reuse.
804 MachineBasicBlock
*MBB
= MI
->getParent();
805 const TargetRegisterClass
*AliasRC
=
806 MBB
->getParent()->getRegInfo().getRegClass(Op
.VirtReg
);
808 // Copy Op out of the vector and remove it, we're going to insert an
809 // explicit load for it.
811 Reuses
.erase(Reuses
.begin()+ro
);
813 // MI may be using only a sub-register of PhysRegUsed.
814 unsigned RealPhysRegUsed
= MI
->getOperand(NewOp
.Operand
).getReg();
816 assert(TargetRegisterInfo::isPhysicalRegister(RealPhysRegUsed
) &&
817 "A reuse cannot be a virtual register");
818 if (PRRU
!= RealPhysRegUsed
) {
819 // What was the sub-register index?
821 for (SubIdx
= 1; (SubReg
= TRI
->getSubReg(PRRU
, SubIdx
)); SubIdx
++)
822 if (SubReg
== RealPhysRegUsed
)
824 assert(SubReg
== RealPhysRegUsed
&&
825 "Operand physreg is not a sub-register of PhysRegUsed");
828 // Ok, we're going to try to reload the assigned physreg into the
829 // slot that we were supposed to in the first place. However, that
830 // register could hold a reuse. Check to see if it conflicts or
831 // would prefer us to use a different register.
832 unsigned NewPhysReg
= GetRegForReload(RC
, NewOp
.AssignedPhysReg
,
833 MF
, MI
, Spills
, MaybeDeadStores
,
834 Rejected
, RegKills
, KillOps
, VRM
);
836 bool DoReMat
= NewOp
.StackSlotOrReMat
> VirtRegMap::MAX_STACK_SLOT
;
837 int SSorRMId
= DoReMat
838 ? VRM
.getReMatId(NewOp
.VirtReg
) : NewOp
.StackSlotOrReMat
;
840 // Back-schedule reloads and remats.
841 MachineBasicBlock::iterator InsertLoc
=
842 ComputeReloadLoc(MI
, MBB
->begin(), PhysReg
, TRI
,
843 DoReMat
, SSorRMId
, TII
, MF
);
846 ReMaterialize(*MBB
, InsertLoc
, NewPhysReg
, NewOp
.VirtReg
, TII
,
849 TII
->loadRegFromStackSlot(*MBB
, InsertLoc
, NewPhysReg
,
850 NewOp
.StackSlotOrReMat
, AliasRC
);
851 MachineInstr
*LoadMI
= prior(InsertLoc
);
852 VRM
.addSpillSlotUse(NewOp
.StackSlotOrReMat
, LoadMI
);
853 // Any stores to this stack slot are not dead anymore.
854 MaybeDeadStores
[NewOp
.StackSlotOrReMat
] = NULL
;
857 Spills
.ClobberPhysReg(NewPhysReg
);
858 Spills
.ClobberPhysReg(NewOp
.PhysRegReused
);
860 unsigned RReg
= SubIdx
? TRI
->getSubReg(NewPhysReg
, SubIdx
) : NewPhysReg
;
861 MI
->getOperand(NewOp
.Operand
).setReg(RReg
);
862 MI
->getOperand(NewOp
.Operand
).setSubReg(0);
864 Spills
.addAvailable(NewOp
.StackSlotOrReMat
, NewPhysReg
);
865 UpdateKills(*prior(InsertLoc
), TRI
, RegKills
, KillOps
);
866 DEBUG(errs() << '\t' << *prior(InsertLoc
));
868 DEBUG(errs() << "Reuse undone!\n");
871 // Finally, PhysReg is now available, go ahead and use it.
879 // ************************************************************************ //
881 /// FoldsStackSlotModRef - Return true if the specified MI folds the specified
882 /// stack slot mod/ref. It also checks if it's possible to unfold the
883 /// instruction by having it define a specified physical register instead.
884 static bool FoldsStackSlotModRef(MachineInstr
&MI
, int SS
, unsigned PhysReg
,
885 const TargetInstrInfo
*TII
,
886 const TargetRegisterInfo
*TRI
,
888 if (VRM
.hasEmergencySpills(&MI
) || VRM
.isSpillPt(&MI
))
892 VirtRegMap::MI2VirtMapTy::const_iterator I
, End
;
893 for (tie(I
, End
) = VRM
.getFoldedVirts(&MI
); I
!= End
; ++I
) {
894 unsigned VirtReg
= I
->second
.first
;
895 VirtRegMap::ModRef MR
= I
->second
.second
;
896 if (MR
& VirtRegMap::isModRef
)
897 if (VRM
.getStackSlot(VirtReg
) == SS
) {
898 Found
= TII
->getOpcodeAfterMemoryUnfold(MI
.getOpcode(), true, true) != 0;
905 // Does the instruction uses a register that overlaps the scratch register?
906 for (unsigned i
= 0, e
= MI
.getNumOperands(); i
!= e
; ++i
) {
907 MachineOperand
&MO
= MI
.getOperand(i
);
908 if (!MO
.isReg() || MO
.getReg() == 0)
910 unsigned Reg
= MO
.getReg();
911 if (TargetRegisterInfo::isVirtualRegister(Reg
)) {
912 if (!VRM
.hasPhys(Reg
))
914 Reg
= VRM
.getPhys(Reg
);
916 if (TRI
->regsOverlap(PhysReg
, Reg
))
922 /// FindFreeRegister - Find a free register of a given register class by looking
923 /// at (at most) the last two machine instructions.
924 static unsigned FindFreeRegister(MachineBasicBlock::iterator MII
,
925 MachineBasicBlock
&MBB
,
926 const TargetRegisterClass
*RC
,
927 const TargetRegisterInfo
*TRI
,
928 BitVector
&AllocatableRegs
) {
929 BitVector
Defs(TRI
->getNumRegs());
930 BitVector
Uses(TRI
->getNumRegs());
931 SmallVector
<unsigned, 4> LocalUses
;
932 SmallVector
<unsigned, 4> Kills
;
934 // Take a look at 2 instructions at most.
935 for (unsigned Count
= 0; Count
< 2; ++Count
) {
936 if (MII
== MBB
.begin())
938 MachineInstr
*PrevMI
= prior(MII
);
939 for (unsigned i
= 0, e
= PrevMI
->getNumOperands(); i
!= e
; ++i
) {
940 MachineOperand
&MO
= PrevMI
->getOperand(i
);
941 if (!MO
.isReg() || MO
.getReg() == 0)
943 unsigned Reg
= MO
.getReg();
946 for (const unsigned *AS
= TRI
->getAliasSet(Reg
); *AS
; ++AS
)
949 LocalUses
.push_back(Reg
);
950 if (MO
.isKill() && AllocatableRegs
[Reg
])
951 Kills
.push_back(Reg
);
955 for (unsigned i
= 0, e
= Kills
.size(); i
!= e
; ++i
) {
956 unsigned Kill
= Kills
[i
];
957 if (!Defs
[Kill
] && !Uses
[Kill
] &&
958 TRI
->getPhysicalRegisterRegClass(Kill
) == RC
)
961 for (unsigned i
= 0, e
= LocalUses
.size(); i
!= e
; ++i
) {
962 unsigned Reg
= LocalUses
[i
];
964 for (const unsigned *AS
= TRI
->getAliasSet(Reg
); *AS
; ++AS
)
975 void AssignPhysToVirtReg(MachineInstr
*MI
, unsigned VirtReg
, unsigned PhysReg
) {
976 for (unsigned i
= 0, e
= MI
->getNumOperands(); i
!= e
; ++i
) {
977 MachineOperand
&MO
= MI
->getOperand(i
);
978 if (MO
.isReg() && MO
.getReg() == VirtReg
)
985 bool operator()(const std::pair
<MachineInstr
*, int> &A
,
986 const std::pair
<MachineInstr
*, int> &B
) {
987 return A
.second
< B
.second
;
992 // ***************************** //
993 // Local Spiller Implementation //
994 // ***************************** //
998 class VISIBILITY_HIDDEN LocalRewriter
: public VirtRegRewriter
{
999 MachineRegisterInfo
*RegInfo
;
1000 const TargetRegisterInfo
*TRI
;
1001 const TargetInstrInfo
*TII
;
1002 BitVector AllocatableRegs
;
1003 DenseMap
<MachineInstr
*, unsigned> DistanceMap
;
1006 bool runOnMachineFunction(MachineFunction
&MF
, VirtRegMap
&VRM
,
1007 LiveIntervals
* LIs
) {
1008 RegInfo
= &MF
.getRegInfo();
1009 TRI
= MF
.getTarget().getRegisterInfo();
1010 TII
= MF
.getTarget().getInstrInfo();
1011 AllocatableRegs
= TRI
->getAllocatableSet(MF
);
1012 DEBUG(errs() << "\n**** Local spiller rewriting function '"
1013 << MF
.getFunction()->getName() << "':\n");
1014 DEBUG(errs() << "**** Machine Instrs (NOTE! Does not include spills and"
1015 " reloads!) ****\n");
1018 // Spills - Keep track of which spilled values are available in physregs
1019 // so that we can choose to reuse the physregs instead of emitting
1020 // reloads. This is usually refreshed per basic block.
1021 AvailableSpills
Spills(TRI
, TII
);
1023 // Keep track of kill information.
1024 BitVector
RegKills(TRI
->getNumRegs());
1025 std::vector
<MachineOperand
*> KillOps
;
1026 KillOps
.resize(TRI
->getNumRegs(), NULL
);
1028 // SingleEntrySuccs - Successor blocks which have a single predecessor.
1029 SmallVector
<MachineBasicBlock
*, 4> SinglePredSuccs
;
1030 SmallPtrSet
<MachineBasicBlock
*,16> EarlyVisited
;
1032 // Traverse the basic blocks depth first.
1033 MachineBasicBlock
*Entry
= MF
.begin();
1034 SmallPtrSet
<MachineBasicBlock
*,16> Visited
;
1035 for (df_ext_iterator
<MachineBasicBlock
*,
1036 SmallPtrSet
<MachineBasicBlock
*,16> >
1037 DFI
= df_ext_begin(Entry
, Visited
), E
= df_ext_end(Entry
, Visited
);
1039 MachineBasicBlock
*MBB
= *DFI
;
1040 if (!EarlyVisited
.count(MBB
))
1041 RewriteMBB(*MBB
, VRM
, LIs
, Spills
, RegKills
, KillOps
);
1043 // If this MBB is the only predecessor of a successor. Keep the
1044 // availability information and visit it next.
1046 // Keep visiting single predecessor successor as long as possible.
1047 SinglePredSuccs
.clear();
1048 findSinglePredSuccessor(MBB
, SinglePredSuccs
);
1049 if (SinglePredSuccs
.empty())
1052 // FIXME: More than one successors, each of which has MBB has
1053 // the only predecessor.
1054 MBB
= SinglePredSuccs
[0];
1055 if (!Visited
.count(MBB
) && EarlyVisited
.insert(MBB
)) {
1056 Spills
.AddAvailableRegsToLiveIn(*MBB
, RegKills
, KillOps
);
1057 RewriteMBB(*MBB
, VRM
, LIs
, Spills
, RegKills
, KillOps
);
1062 // Clear the availability info.
1066 DEBUG(errs() << "**** Post Machine Instrs ****\n");
1069 // Mark unused spill slots.
1070 MachineFrameInfo
*MFI
= MF
.getFrameInfo();
1071 int SS
= VRM
.getLowSpillSlot();
1072 if (SS
!= VirtRegMap::NO_STACK_SLOT
)
1073 for (int e
= VRM
.getHighSpillSlot(); SS
<= e
; ++SS
)
1074 if (!VRM
.isSpillSlotUsed(SS
)) {
1075 MFI
->RemoveStackObject(SS
);
1084 /// OptimizeByUnfold2 - Unfold a series of load / store folding instructions if
1085 /// a scratch register is available.
1086 /// xorq %r12<kill>, %r13
1087 /// addq %rax, -184(%rbp)
1088 /// addq %r13, -184(%rbp)
1090 /// xorq %r12<kill>, %r13
1091 /// movq -184(%rbp), %r12
1094 /// movq %r12, -184(%rbp)
1095 bool OptimizeByUnfold2(unsigned VirtReg
, int SS
,
1096 MachineBasicBlock
&MBB
,
1097 MachineBasicBlock::iterator
&MII
,
1098 std::vector
<MachineInstr
*> &MaybeDeadStores
,
1099 AvailableSpills
&Spills
,
1100 BitVector
&RegKills
,
1101 std::vector
<MachineOperand
*> &KillOps
,
1104 MachineBasicBlock::iterator NextMII
= next(MII
);
1105 if (NextMII
== MBB
.end())
1108 if (TII
->getOpcodeAfterMemoryUnfold(MII
->getOpcode(), true, true) == 0)
1111 // Now let's see if the last couple of instructions happens to have freed up
1113 const TargetRegisterClass
* RC
= RegInfo
->getRegClass(VirtReg
);
1114 unsigned PhysReg
= FindFreeRegister(MII
, MBB
, RC
, TRI
, AllocatableRegs
);
1118 MachineFunction
&MF
= *MBB
.getParent();
1119 TRI
= MF
.getTarget().getRegisterInfo();
1120 MachineInstr
&MI
= *MII
;
1121 if (!FoldsStackSlotModRef(MI
, SS
, PhysReg
, TII
, TRI
, VRM
))
1124 // If the next instruction also folds the same SS modref and can be unfoled,
1125 // then it's worthwhile to issue a load from SS into the free register and
1126 // then unfold these instructions.
1127 if (!FoldsStackSlotModRef(*NextMII
, SS
, PhysReg
, TII
, TRI
, VRM
))
1130 // Back-schedule reloads and remats.
1131 MachineBasicBlock::iterator InsertLoc
=
1132 ComputeReloadLoc(MII
, MBB
.begin(), PhysReg
, TRI
, false, SS
, TII
, MF
);
1134 // Load from SS to the spare physical register.
1135 TII
->loadRegFromStackSlot(MBB
, MII
, PhysReg
, SS
, RC
);
1136 // This invalidates Phys.
1137 Spills
.ClobberPhysReg(PhysReg
);
1138 // Remember it's available.
1139 Spills
.addAvailable(SS
, PhysReg
);
1140 MaybeDeadStores
[SS
] = NULL
;
1142 // Unfold current MI.
1143 SmallVector
<MachineInstr
*, 4> NewMIs
;
1144 if (!TII
->unfoldMemoryOperand(MF
, &MI
, VirtReg
, false, false, NewMIs
))
1145 llvm_unreachable("Unable unfold the load / store folding instruction!");
1146 assert(NewMIs
.size() == 1);
1147 AssignPhysToVirtReg(NewMIs
[0], VirtReg
, PhysReg
);
1148 VRM
.transferRestorePts(&MI
, NewMIs
[0]);
1149 MII
= MBB
.insert(MII
, NewMIs
[0]);
1150 InvalidateKills(MI
, TRI
, RegKills
, KillOps
);
1151 VRM
.RemoveMachineInstrFromMaps(&MI
);
1155 // Unfold next instructions that fold the same SS.
1157 MachineInstr
&NextMI
= *NextMII
;
1158 NextMII
= next(NextMII
);
1160 if (!TII
->unfoldMemoryOperand(MF
, &NextMI
, VirtReg
, false, false, NewMIs
))
1161 llvm_unreachable("Unable unfold the load / store folding instruction!");
1162 assert(NewMIs
.size() == 1);
1163 AssignPhysToVirtReg(NewMIs
[0], VirtReg
, PhysReg
);
1164 VRM
.transferRestorePts(&NextMI
, NewMIs
[0]);
1165 MBB
.insert(NextMII
, NewMIs
[0]);
1166 InvalidateKills(NextMI
, TRI
, RegKills
, KillOps
);
1167 VRM
.RemoveMachineInstrFromMaps(&NextMI
);
1170 if (NextMII
== MBB
.end())
1172 } while (FoldsStackSlotModRef(*NextMII
, SS
, PhysReg
, TII
, TRI
, VRM
));
1174 // Store the value back into SS.
1175 TII
->storeRegToStackSlot(MBB
, NextMII
, PhysReg
, true, SS
, RC
);
1176 MachineInstr
*StoreMI
= prior(NextMII
);
1177 VRM
.addSpillSlotUse(SS
, StoreMI
);
1178 VRM
.virtFolded(VirtReg
, StoreMI
, VirtRegMap::isMod
);
1183 /// OptimizeByUnfold - Turn a store folding instruction into a load folding
1184 /// instruction. e.g.
1186 /// movl %eax, -32(%ebp)
1187 /// movl -36(%ebp), %eax
1188 /// orl %eax, -32(%ebp)
1191 /// orl -36(%ebp), %eax
1192 /// mov %eax, -32(%ebp)
1193 /// This enables unfolding optimization for a subsequent instruction which will
1194 /// also eliminate the newly introduced store instruction.
1195 bool OptimizeByUnfold(MachineBasicBlock
&MBB
,
1196 MachineBasicBlock::iterator
&MII
,
1197 std::vector
<MachineInstr
*> &MaybeDeadStores
,
1198 AvailableSpills
&Spills
,
1199 BitVector
&RegKills
,
1200 std::vector
<MachineOperand
*> &KillOps
,
1202 MachineFunction
&MF
= *MBB
.getParent();
1203 MachineInstr
&MI
= *MII
;
1204 unsigned UnfoldedOpc
= 0;
1205 unsigned UnfoldPR
= 0;
1206 unsigned UnfoldVR
= 0;
1207 int FoldedSS
= VirtRegMap::NO_STACK_SLOT
;
1208 VirtRegMap::MI2VirtMapTy::const_iterator I
, End
;
1209 for (tie(I
, End
) = VRM
.getFoldedVirts(&MI
); I
!= End
; ) {
1210 // Only transform a MI that folds a single register.
1213 UnfoldVR
= I
->second
.first
;
1214 VirtRegMap::ModRef MR
= I
->second
.second
;
1215 // MI2VirtMap be can updated which invalidate the iterator.
1216 // Increment the iterator first.
1218 if (VRM
.isAssignedReg(UnfoldVR
))
1220 // If this reference is not a use, any previous store is now dead.
1221 // Otherwise, the store to this stack slot is not dead anymore.
1222 FoldedSS
= VRM
.getStackSlot(UnfoldVR
);
1223 MachineInstr
* DeadStore
= MaybeDeadStores
[FoldedSS
];
1224 if (DeadStore
&& (MR
& VirtRegMap::isModRef
)) {
1225 unsigned PhysReg
= Spills
.getSpillSlotOrReMatPhysReg(FoldedSS
);
1226 if (!PhysReg
|| !DeadStore
->readsRegister(PhysReg
))
1229 UnfoldedOpc
= TII
->getOpcodeAfterMemoryUnfold(MI
.getOpcode(),
1238 // Look for other unfolding opportunities.
1239 return OptimizeByUnfold2(UnfoldVR
, FoldedSS
, MBB
, MII
,
1240 MaybeDeadStores
, Spills
, RegKills
, KillOps
, VRM
);
1243 for (unsigned i
= 0, e
= MI
.getNumOperands(); i
!= e
; ++i
) {
1244 MachineOperand
&MO
= MI
.getOperand(i
);
1245 if (!MO
.isReg() || MO
.getReg() == 0 || !MO
.isUse())
1247 unsigned VirtReg
= MO
.getReg();
1248 if (TargetRegisterInfo::isPhysicalRegister(VirtReg
) || MO
.getSubReg())
1250 if (VRM
.isAssignedReg(VirtReg
)) {
1251 unsigned PhysReg
= VRM
.getPhys(VirtReg
);
1252 if (PhysReg
&& TRI
->regsOverlap(PhysReg
, UnfoldPR
))
1254 } else if (VRM
.isReMaterialized(VirtReg
))
1256 int SS
= VRM
.getStackSlot(VirtReg
);
1257 unsigned PhysReg
= Spills
.getSpillSlotOrReMatPhysReg(SS
);
1259 if (TRI
->regsOverlap(PhysReg
, UnfoldPR
))
1263 if (VRM
.hasPhys(VirtReg
)) {
1264 PhysReg
= VRM
.getPhys(VirtReg
);
1265 if (!TRI
->regsOverlap(PhysReg
, UnfoldPR
))
1269 // Ok, we'll need to reload the value into a register which makes
1270 // it impossible to perform the store unfolding optimization later.
1271 // Let's see if it is possible to fold the load if the store is
1272 // unfolded. This allows us to perform the store unfolding
1274 SmallVector
<MachineInstr
*, 4> NewMIs
;
1275 if (TII
->unfoldMemoryOperand(MF
, &MI
, UnfoldVR
, false, false, NewMIs
)) {
1276 assert(NewMIs
.size() == 1);
1277 MachineInstr
*NewMI
= NewMIs
.back();
1279 int Idx
= NewMI
->findRegisterUseOperandIdx(VirtReg
, false);
1281 SmallVector
<unsigned, 1> Ops
;
1283 MachineInstr
*FoldedMI
= TII
->foldMemoryOperand(MF
, NewMI
, Ops
, SS
);
1285 VRM
.addSpillSlotUse(SS
, FoldedMI
);
1286 if (!VRM
.hasPhys(UnfoldVR
))
1287 VRM
.assignVirt2Phys(UnfoldVR
, UnfoldPR
);
1288 VRM
.virtFolded(VirtReg
, FoldedMI
, VirtRegMap::isRef
);
1289 MII
= MBB
.insert(MII
, FoldedMI
);
1290 InvalidateKills(MI
, TRI
, RegKills
, KillOps
);
1291 VRM
.RemoveMachineInstrFromMaps(&MI
);
1293 MF
.DeleteMachineInstr(NewMI
);
1296 MF
.DeleteMachineInstr(NewMI
);
1303 /// CommuteChangesDestination - We are looking for r0 = op r1, r2 and
1304 /// where SrcReg is r1 and it is tied to r0. Return true if after
1305 /// commuting this instruction it will be r0 = op r2, r1.
1306 static bool CommuteChangesDestination(MachineInstr
*DefMI
,
1307 const TargetInstrDesc
&TID
,
1309 const TargetInstrInfo
*TII
,
1311 if (TID
.getNumDefs() != 1 && TID
.getNumOperands() != 3)
1313 if (!DefMI
->getOperand(1).isReg() ||
1314 DefMI
->getOperand(1).getReg() != SrcReg
)
1317 if (!DefMI
->isRegTiedToDefOperand(1, &DefIdx
) || DefIdx
!= 0)
1319 unsigned SrcIdx1
, SrcIdx2
;
1320 if (!TII
->findCommutedOpIndices(DefMI
, SrcIdx1
, SrcIdx2
))
1322 if (SrcIdx1
== 1 && SrcIdx2
== 2) {
1329 /// CommuteToFoldReload -
1332 /// r1 = op r1, r2<kill>
1335 /// If op is commutable and r2 is killed, then we can xform these to
1336 /// r2 = op r2, fi#1
1338 bool CommuteToFoldReload(MachineBasicBlock
&MBB
,
1339 MachineBasicBlock::iterator
&MII
,
1340 unsigned VirtReg
, unsigned SrcReg
, int SS
,
1341 AvailableSpills
&Spills
,
1342 BitVector
&RegKills
,
1343 std::vector
<MachineOperand
*> &KillOps
,
1344 const TargetRegisterInfo
*TRI
,
1346 if (MII
== MBB
.begin() || !MII
->killsRegister(SrcReg
))
1349 MachineFunction
&MF
= *MBB
.getParent();
1350 MachineInstr
&MI
= *MII
;
1351 MachineBasicBlock::iterator DefMII
= prior(MII
);
1352 MachineInstr
*DefMI
= DefMII
;
1353 const TargetInstrDesc
&TID
= DefMI
->getDesc();
1355 if (DefMII
!= MBB
.begin() &&
1356 TID
.isCommutable() &&
1357 CommuteChangesDestination(DefMI
, TID
, SrcReg
, TII
, NewDstIdx
)) {
1358 MachineOperand
&NewDstMO
= DefMI
->getOperand(NewDstIdx
);
1359 unsigned NewReg
= NewDstMO
.getReg();
1360 if (!NewDstMO
.isKill() || TRI
->regsOverlap(NewReg
, SrcReg
))
1362 MachineInstr
*ReloadMI
= prior(DefMII
);
1364 unsigned DestReg
= TII
->isLoadFromStackSlot(ReloadMI
, FrameIdx
);
1365 if (DestReg
!= SrcReg
|| FrameIdx
!= SS
)
1367 int UseIdx
= DefMI
->findRegisterUseOperandIdx(DestReg
, false);
1371 if (!MI
.isRegTiedToDefOperand(UseIdx
, &DefIdx
))
1373 assert(DefMI
->getOperand(DefIdx
).isReg() &&
1374 DefMI
->getOperand(DefIdx
).getReg() == SrcReg
);
1376 // Now commute def instruction.
1377 MachineInstr
*CommutedMI
= TII
->commuteInstruction(DefMI
, true);
1380 SmallVector
<unsigned, 1> Ops
;
1381 Ops
.push_back(NewDstIdx
);
1382 MachineInstr
*FoldedMI
= TII
->foldMemoryOperand(MF
, CommutedMI
, Ops
, SS
);
1383 // Not needed since foldMemoryOperand returns new MI.
1384 MF
.DeleteMachineInstr(CommutedMI
);
1388 VRM
.addSpillSlotUse(SS
, FoldedMI
);
1389 VRM
.virtFolded(VirtReg
, FoldedMI
, VirtRegMap::isRef
);
1390 // Insert new def MI and spill MI.
1391 const TargetRegisterClass
* RC
= RegInfo
->getRegClass(VirtReg
);
1392 TII
->storeRegToStackSlot(MBB
, &MI
, NewReg
, true, SS
, RC
);
1394 MachineInstr
*StoreMI
= MII
;
1395 VRM
.addSpillSlotUse(SS
, StoreMI
);
1396 VRM
.virtFolded(VirtReg
, StoreMI
, VirtRegMap::isMod
);
1397 MII
= MBB
.insert(MII
, FoldedMI
); // Update MII to backtrack.
1399 // Delete all 3 old instructions.
1400 InvalidateKills(*ReloadMI
, TRI
, RegKills
, KillOps
);
1401 VRM
.RemoveMachineInstrFromMaps(ReloadMI
);
1402 MBB
.erase(ReloadMI
);
1403 InvalidateKills(*DefMI
, TRI
, RegKills
, KillOps
);
1404 VRM
.RemoveMachineInstrFromMaps(DefMI
);
1406 InvalidateKills(MI
, TRI
, RegKills
, KillOps
);
1407 VRM
.RemoveMachineInstrFromMaps(&MI
);
1410 // If NewReg was previously holding value of some SS, it's now clobbered.
1411 // This has to be done now because it's a physical register. When this
1412 // instruction is re-visited, it's ignored.
1413 Spills
.ClobberPhysReg(NewReg
);
1422 /// SpillRegToStackSlot - Spill a register to a specified stack slot. Check if
1423 /// the last store to the same slot is now dead. If so, remove the last store.
1424 void SpillRegToStackSlot(MachineBasicBlock
&MBB
,
1425 MachineBasicBlock::iterator
&MII
,
1426 int Idx
, unsigned PhysReg
, int StackSlot
,
1427 const TargetRegisterClass
*RC
,
1428 bool isAvailable
, MachineInstr
*&LastStore
,
1429 AvailableSpills
&Spills
,
1430 SmallSet
<MachineInstr
*, 4> &ReMatDefs
,
1431 BitVector
&RegKills
,
1432 std::vector
<MachineOperand
*> &KillOps
,
1435 TII
->storeRegToStackSlot(MBB
, next(MII
), PhysReg
, true, StackSlot
, RC
);
1436 MachineInstr
*StoreMI
= next(MII
);
1437 VRM
.addSpillSlotUse(StackSlot
, StoreMI
);
1438 DEBUG(errs() << "Store:\t" << *StoreMI
);
1440 // If there is a dead store to this stack slot, nuke it now.
1442 DEBUG(errs() << "Removed dead store:\t" << *LastStore
);
1444 SmallVector
<unsigned, 2> KillRegs
;
1445 InvalidateKills(*LastStore
, TRI
, RegKills
, KillOps
, &KillRegs
);
1446 MachineBasicBlock::iterator PrevMII
= LastStore
;
1447 bool CheckDef
= PrevMII
!= MBB
.begin();
1450 VRM
.RemoveMachineInstrFromMaps(LastStore
);
1451 MBB
.erase(LastStore
);
1453 // Look at defs of killed registers on the store. Mark the defs
1454 // as dead since the store has been deleted and they aren't
1456 for (unsigned j
= 0, ee
= KillRegs
.size(); j
!= ee
; ++j
) {
1457 bool HasOtherDef
= false;
1458 if (InvalidateRegDef(PrevMII
, *MII
, KillRegs
[j
], HasOtherDef
)) {
1459 MachineInstr
*DeadDef
= PrevMII
;
1460 if (ReMatDefs
.count(DeadDef
) && !HasOtherDef
) {
1461 // FIXME: This assumes a remat def does not have side effects.
1462 VRM
.RemoveMachineInstrFromMaps(DeadDef
);
1471 LastStore
= next(MII
);
1473 // If the stack slot value was previously available in some other
1474 // register, change it now. Otherwise, make the register available,
1476 Spills
.ModifyStackSlotOrReMat(StackSlot
);
1477 Spills
.ClobberPhysReg(PhysReg
);
1478 Spills
.addAvailable(StackSlot
, PhysReg
, isAvailable
);
1482 /// TransferDeadness - A identity copy definition is dead and it's being
1483 /// removed. Find the last def or use and mark it as dead / kill.
1484 void TransferDeadness(MachineBasicBlock
*MBB
, unsigned CurDist
,
1485 unsigned Reg
, BitVector
&RegKills
,
1486 std::vector
<MachineOperand
*> &KillOps
,
1488 SmallPtrSet
<MachineInstr
*, 4> Seens
;
1489 SmallVector
<std::pair
<MachineInstr
*, int>,8> Refs
;
1490 for (MachineRegisterInfo::reg_iterator RI
= RegInfo
->reg_begin(Reg
),
1491 RE
= RegInfo
->reg_end(); RI
!= RE
; ++RI
) {
1492 MachineInstr
*UDMI
= &*RI
;
1493 if (UDMI
->getParent() != MBB
)
1495 DenseMap
<MachineInstr
*, unsigned>::iterator DI
= DistanceMap
.find(UDMI
);
1496 if (DI
== DistanceMap
.end() || DI
->second
> CurDist
)
1498 if (Seens
.insert(UDMI
))
1499 Refs
.push_back(std::make_pair(UDMI
, DI
->second
));
1504 std::sort(Refs
.begin(), Refs
.end(), RefSorter());
1506 while (!Refs
.empty()) {
1507 MachineInstr
*LastUDMI
= Refs
.back().first
;
1510 MachineOperand
*LastUD
= NULL
;
1511 for (unsigned i
= 0, e
= LastUDMI
->getNumOperands(); i
!= e
; ++i
) {
1512 MachineOperand
&MO
= LastUDMI
->getOperand(i
);
1513 if (!MO
.isReg() || MO
.getReg() != Reg
)
1515 if (!LastUD
|| (LastUD
->isUse() && MO
.isDef()))
1517 if (LastUDMI
->isRegTiedToDefOperand(i
))
1520 if (LastUD
->isDef()) {
1521 // If the instruction has no side effect, delete it and propagate
1522 // backward further. Otherwise, mark is dead and we are done.
1523 if (!TII
->isDeadInstruction(LastUDMI
)) {
1524 LastUD
->setIsDead();
1527 VRM
.RemoveMachineInstrFromMaps(LastUDMI
);
1528 MBB
->erase(LastUDMI
);
1530 LastUD
->setIsKill();
1532 KillOps
[Reg
] = LastUD
;
1538 /// rewriteMBB - Keep track of which spills are available even after the
1539 /// register allocator is done with them. If possible, avid reloading vregs.
1540 void RewriteMBB(MachineBasicBlock
&MBB
, VirtRegMap
&VRM
,
1542 AvailableSpills
&Spills
, BitVector
&RegKills
,
1543 std::vector
<MachineOperand
*> &KillOps
) {
1545 DEBUG(errs() << "\n**** Local spiller rewriting MBB '"
1546 << MBB
.getBasicBlock()->getName() << "':\n");
1548 MachineFunction
&MF
= *MBB
.getParent();
1550 // MaybeDeadStores - When we need to write a value back into a stack slot,
1551 // keep track of the inserted store. If the stack slot value is never read
1552 // (because the value was used from some available register, for example), and
1553 // subsequently stored to, the original store is dead. This map keeps track
1554 // of inserted stores that are not used. If we see a subsequent store to the
1555 // same stack slot, the original store is deleted.
1556 std::vector
<MachineInstr
*> MaybeDeadStores
;
1557 MaybeDeadStores
.resize(MF
.getFrameInfo()->getObjectIndexEnd(), NULL
);
1559 // ReMatDefs - These are rematerializable def MIs which are not deleted.
1560 SmallSet
<MachineInstr
*, 4> ReMatDefs
;
1563 SmallSet
<unsigned, 2> KilledMIRegs
;
1566 KillOps
.resize(TRI
->getNumRegs(), NULL
);
1569 DistanceMap
.clear();
1570 for (MachineBasicBlock::iterator MII
= MBB
.begin(), E
= MBB
.end();
1572 MachineBasicBlock::iterator NextMII
= next(MII
);
1574 VirtRegMap::MI2VirtMapTy::const_iterator I
, End
;
1575 bool Erased
= false;
1576 bool BackTracked
= false;
1577 if (OptimizeByUnfold(MBB
, MII
,
1578 MaybeDeadStores
, Spills
, RegKills
, KillOps
, VRM
))
1579 NextMII
= next(MII
);
1581 MachineInstr
&MI
= *MII
;
1583 if (VRM
.hasEmergencySpills(&MI
)) {
1584 // Spill physical register(s) in the rare case the allocator has run out
1585 // of registers to allocate.
1586 SmallSet
<int, 4> UsedSS
;
1587 std::vector
<unsigned> &EmSpills
= VRM
.getEmergencySpills(&MI
);
1588 for (unsigned i
= 0, e
= EmSpills
.size(); i
!= e
; ++i
) {
1589 unsigned PhysReg
= EmSpills
[i
];
1590 const TargetRegisterClass
*RC
=
1591 TRI
->getPhysicalRegisterRegClass(PhysReg
);
1592 assert(RC
&& "Unable to determine register class!");
1593 int SS
= VRM
.getEmergencySpillSlot(RC
);
1594 if (UsedSS
.count(SS
))
1595 llvm_unreachable("Need to spill more than one physical registers!");
1597 TII
->storeRegToStackSlot(MBB
, MII
, PhysReg
, true, SS
, RC
);
1598 MachineInstr
*StoreMI
= prior(MII
);
1599 VRM
.addSpillSlotUse(SS
, StoreMI
);
1601 // Back-schedule reloads and remats.
1602 MachineBasicBlock::iterator InsertLoc
=
1603 ComputeReloadLoc(next(MII
), MBB
.begin(), PhysReg
, TRI
, false,
1606 TII
->loadRegFromStackSlot(MBB
, InsertLoc
, PhysReg
, SS
, RC
);
1608 MachineInstr
*LoadMI
= prior(InsertLoc
);
1609 VRM
.addSpillSlotUse(SS
, LoadMI
);
1611 DistanceMap
.insert(std::make_pair(LoadMI
, Dist
++));
1613 NextMII
= next(MII
);
1616 // Insert restores here if asked to.
1617 if (VRM
.isRestorePt(&MI
)) {
1618 std::vector
<unsigned> &RestoreRegs
= VRM
.getRestorePtRestores(&MI
);
1619 for (unsigned i
= 0, e
= RestoreRegs
.size(); i
!= e
; ++i
) {
1620 unsigned VirtReg
= RestoreRegs
[e
-i
-1]; // Reverse order.
1621 if (!VRM
.getPreSplitReg(VirtReg
))
1622 continue; // Split interval spilled again.
1623 unsigned Phys
= VRM
.getPhys(VirtReg
);
1624 RegInfo
->setPhysRegUsed(Phys
);
1626 // Check if the value being restored if available. If so, it must be
1627 // from a predecessor BB that fallthrough into this BB. We do not
1633 // ... # r1 not clobbered
1636 bool DoReMat
= VRM
.isReMaterialized(VirtReg
);
1637 int SSorRMId
= DoReMat
1638 ? VRM
.getReMatId(VirtReg
) : VRM
.getStackSlot(VirtReg
);
1639 const TargetRegisterClass
* RC
= RegInfo
->getRegClass(VirtReg
);
1640 unsigned InReg
= Spills
.getSpillSlotOrReMatPhysReg(SSorRMId
);
1641 if (InReg
== Phys
) {
1642 // If the value is already available in the expected register, save
1643 // a reload / remat.
1645 DEBUG(errs() << "Reusing RM#"
1646 << SSorRMId
-VirtRegMap::MAX_STACK_SLOT
-1);
1648 DEBUG(errs() << "Reusing SS#" << SSorRMId
);
1649 DEBUG(errs() << " from physreg "
1650 << TRI
->getName(InReg
) << " for vreg"
1651 << VirtReg
<<" instead of reloading into physreg "
1652 << TRI
->getName(Phys
) << '\n');
1655 } else if (InReg
&& InReg
!= Phys
) {
1657 DEBUG(errs() << "Reusing RM#"
1658 << SSorRMId
-VirtRegMap::MAX_STACK_SLOT
-1);
1660 DEBUG(errs() << "Reusing SS#" << SSorRMId
);
1661 DEBUG(errs() << " from physreg "
1662 << TRI
->getName(InReg
) << " for vreg"
1663 << VirtReg
<<" by copying it into physreg "
1664 << TRI
->getName(Phys
) << '\n');
1666 // If the reloaded / remat value is available in another register,
1667 // copy it to the desired register.
1669 // Back-schedule reloads and remats.
1670 MachineBasicBlock::iterator InsertLoc
=
1671 ComputeReloadLoc(MII
, MBB
.begin(), Phys
, TRI
, DoReMat
,
1674 TII
->copyRegToReg(MBB
, InsertLoc
, Phys
, InReg
, RC
, RC
);
1676 // This invalidates Phys.
1677 Spills
.ClobberPhysReg(Phys
);
1678 // Remember it's available.
1679 Spills
.addAvailable(SSorRMId
, Phys
);
1682 MachineInstr
*CopyMI
= prior(InsertLoc
);
1683 MachineOperand
*KillOpnd
= CopyMI
->findRegisterUseOperand(InReg
);
1684 KillOpnd
->setIsKill();
1685 UpdateKills(*CopyMI
, TRI
, RegKills
, KillOps
);
1687 DEBUG(errs() << '\t' << *CopyMI
);
1692 // Back-schedule reloads and remats.
1693 MachineBasicBlock::iterator InsertLoc
=
1694 ComputeReloadLoc(MII
, MBB
.begin(), Phys
, TRI
, DoReMat
,
1697 if (VRM
.isReMaterialized(VirtReg
)) {
1698 ReMaterialize(MBB
, InsertLoc
, Phys
, VirtReg
, TII
, TRI
, VRM
);
1700 const TargetRegisterClass
* RC
= RegInfo
->getRegClass(VirtReg
);
1701 TII
->loadRegFromStackSlot(MBB
, InsertLoc
, Phys
, SSorRMId
, RC
);
1702 MachineInstr
*LoadMI
= prior(InsertLoc
);
1703 VRM
.addSpillSlotUse(SSorRMId
, LoadMI
);
1705 DistanceMap
.insert(std::make_pair(LoadMI
, Dist
++));
1708 // This invalidates Phys.
1709 Spills
.ClobberPhysReg(Phys
);
1710 // Remember it's available.
1711 Spills
.addAvailable(SSorRMId
, Phys
);
1713 UpdateKills(*prior(InsertLoc
), TRI
, RegKills
, KillOps
);
1714 DEBUG(errs() << '\t' << *prior(MII
));
1718 // Insert spills here if asked to.
1719 if (VRM
.isSpillPt(&MI
)) {
1720 std::vector
<std::pair
<unsigned,bool> > &SpillRegs
=
1721 VRM
.getSpillPtSpills(&MI
);
1722 for (unsigned i
= 0, e
= SpillRegs
.size(); i
!= e
; ++i
) {
1723 unsigned VirtReg
= SpillRegs
[i
].first
;
1724 bool isKill
= SpillRegs
[i
].second
;
1725 if (!VRM
.getPreSplitReg(VirtReg
))
1726 continue; // Split interval spilled again.
1727 const TargetRegisterClass
*RC
= RegInfo
->getRegClass(VirtReg
);
1728 unsigned Phys
= VRM
.getPhys(VirtReg
);
1729 int StackSlot
= VRM
.getStackSlot(VirtReg
);
1730 TII
->storeRegToStackSlot(MBB
, next(MII
), Phys
, isKill
, StackSlot
, RC
);
1731 MachineInstr
*StoreMI
= next(MII
);
1732 VRM
.addSpillSlotUse(StackSlot
, StoreMI
);
1733 DEBUG(errs() << "Store:\t" << *StoreMI
);
1734 VRM
.virtFolded(VirtReg
, StoreMI
, VirtRegMap::isMod
);
1736 NextMII
= next(MII
);
1739 /// ReusedOperands - Keep track of operand reuse in case we need to undo
1741 ReuseInfo
ReusedOperands(MI
, TRI
);
1742 SmallVector
<unsigned, 4> VirtUseOps
;
1743 for (unsigned i
= 0, e
= MI
.getNumOperands(); i
!= e
; ++i
) {
1744 MachineOperand
&MO
= MI
.getOperand(i
);
1745 if (!MO
.isReg() || MO
.getReg() == 0)
1746 continue; // Ignore non-register operands.
1748 unsigned VirtReg
= MO
.getReg();
1749 if (TargetRegisterInfo::isPhysicalRegister(VirtReg
)) {
1750 // Ignore physregs for spilling, but remember that it is used by this
1752 RegInfo
->setPhysRegUsed(VirtReg
);
1756 // We want to process implicit virtual register uses first.
1757 if (MO
.isImplicit())
1758 // If the virtual register is implicitly defined, emit a implicit_def
1759 // before so scavenger knows it's "defined".
1760 // FIXME: This is a horrible hack done the by register allocator to
1761 // remat a definition with virtual register operand.
1762 VirtUseOps
.insert(VirtUseOps
.begin(), i
);
1764 VirtUseOps
.push_back(i
);
1767 // Process all of the spilled uses and all non spilled reg references.
1768 SmallVector
<int, 2> PotentialDeadStoreSlots
;
1769 KilledMIRegs
.clear();
1770 for (unsigned j
= 0, e
= VirtUseOps
.size(); j
!= e
; ++j
) {
1771 unsigned i
= VirtUseOps
[j
];
1772 MachineOperand
&MO
= MI
.getOperand(i
);
1773 unsigned VirtReg
= MO
.getReg();
1774 assert(TargetRegisterInfo::isVirtualRegister(VirtReg
) &&
1775 "Not a virtual register?");
1777 unsigned SubIdx
= MO
.getSubReg();
1778 if (VRM
.isAssignedReg(VirtReg
)) {
1779 // This virtual register was assigned a physreg!
1780 unsigned Phys
= VRM
.getPhys(VirtReg
);
1781 RegInfo
->setPhysRegUsed(Phys
);
1783 ReusedOperands
.markClobbered(Phys
);
1784 unsigned RReg
= SubIdx
? TRI
->getSubReg(Phys
, SubIdx
) : Phys
;
1785 MI
.getOperand(i
).setReg(RReg
);
1786 MI
.getOperand(i
).setSubReg(0);
1787 if (VRM
.isImplicitlyDefined(VirtReg
))
1788 // FIXME: Is this needed?
1789 BuildMI(MBB
, &MI
, MI
.getDebugLoc(),
1790 TII
->get(TargetInstrInfo::IMPLICIT_DEF
), RReg
);
1794 // This virtual register is now known to be a spilled value.
1796 continue; // Handle defs in the loop below (handle use&def here though)
1798 bool AvoidReload
= MO
.isUndef();
1799 // Check if it is defined by an implicit def. It should not be spilled.
1800 // Note, this is for correctness reason. e.g.
1801 // 8 %reg1024<def> = IMPLICIT_DEF
1802 // 12 %reg1024<def> = INSERT_SUBREG %reg1024<kill>, %reg1025, 2
1803 // The live range [12, 14) are not part of the r1024 live interval since
1804 // it's defined by an implicit def. It will not conflicts with live
1805 // interval of r1025. Now suppose both registers are spilled, you can
1806 // easily see a situation where both registers are reloaded before
1807 // the INSERT_SUBREG and both target registers that would overlap.
1808 bool DoReMat
= VRM
.isReMaterialized(VirtReg
);
1809 int SSorRMId
= DoReMat
1810 ? VRM
.getReMatId(VirtReg
) : VRM
.getStackSlot(VirtReg
);
1811 int ReuseSlot
= SSorRMId
;
1813 // Check to see if this stack slot is available.
1814 unsigned PhysReg
= Spills
.getSpillSlotOrReMatPhysReg(SSorRMId
);
1816 // If this is a sub-register use, make sure the reuse register is in the
1817 // right register class. For example, for x86 not all of the 32-bit
1818 // registers have accessible sub-registers.
1819 // Similarly so for EXTRACT_SUBREG. Consider this:
1821 // MOV32_mr fi#1, EDI
1823 // = EXTRACT_SUBREG fi#1
1824 // fi#1 is available in EDI, but it cannot be reused because it's not in
1825 // the right register file.
1826 if (PhysReg
&& !AvoidReload
&&
1827 (SubIdx
|| MI
.getOpcode() == TargetInstrInfo::EXTRACT_SUBREG
)) {
1828 const TargetRegisterClass
* RC
= RegInfo
->getRegClass(VirtReg
);
1829 if (!RC
->contains(PhysReg
))
1833 if (PhysReg
&& !AvoidReload
) {
1834 // This spilled operand might be part of a two-address operand. If this
1835 // is the case, then changing it will necessarily require changing the
1836 // def part of the instruction as well. However, in some cases, we
1837 // aren't allowed to modify the reused register. If none of these cases
1839 bool CanReuse
= true;
1840 bool isTied
= MI
.isRegTiedToDefOperand(i
);
1842 // Okay, we have a two address operand. We can reuse this physreg as
1843 // long as we are allowed to clobber the value and there isn't an
1844 // earlier def that has already clobbered the physreg.
1845 CanReuse
= !ReusedOperands
.isClobbered(PhysReg
) &&
1846 Spills
.canClobberPhysReg(PhysReg
);
1850 // If this stack slot value is already available, reuse it!
1851 if (ReuseSlot
> VirtRegMap::MAX_STACK_SLOT
)
1852 DEBUG(errs() << "Reusing RM#"
1853 << ReuseSlot
-VirtRegMap::MAX_STACK_SLOT
-1);
1855 DEBUG(errs() << "Reusing SS#" << ReuseSlot
);
1856 DEBUG(errs() << " from physreg "
1857 << TRI
->getName(PhysReg
) << " for vreg"
1858 << VirtReg
<<" instead of reloading into physreg "
1859 << TRI
->getName(VRM
.getPhys(VirtReg
)) << '\n');
1860 unsigned RReg
= SubIdx
? TRI
->getSubReg(PhysReg
, SubIdx
) : PhysReg
;
1861 MI
.getOperand(i
).setReg(RReg
);
1862 MI
.getOperand(i
).setSubReg(0);
1864 // The only technical detail we have is that we don't know that
1865 // PhysReg won't be clobbered by a reloaded stack slot that occurs
1866 // later in the instruction. In particular, consider 'op V1, V2'.
1867 // If V1 is available in physreg R0, we would choose to reuse it
1868 // here, instead of reloading it into the register the allocator
1869 // indicated (say R1). However, V2 might have to be reloaded
1870 // later, and it might indicate that it needs to live in R0. When
1871 // this occurs, we need to have information available that
1872 // indicates it is safe to use R1 for the reload instead of R0.
1874 // To further complicate matters, we might conflict with an alias,
1875 // or R0 and R1 might not be compatible with each other. In this
1876 // case, we actually insert a reload for V1 in R1, ensuring that
1877 // we can get at R0 or its alias.
1878 ReusedOperands
.addReuse(i
, ReuseSlot
, PhysReg
,
1879 VRM
.getPhys(VirtReg
), VirtReg
);
1881 // Only mark it clobbered if this is a use&def operand.
1882 ReusedOperands
.markClobbered(PhysReg
);
1885 if (MI
.getOperand(i
).isKill() &&
1886 ReuseSlot
<= VirtRegMap::MAX_STACK_SLOT
) {
1888 // The store of this spilled value is potentially dead, but we
1889 // won't know for certain until we've confirmed that the re-use
1890 // above is valid, which means waiting until the other operands
1891 // are processed. For now we just track the spill slot, we'll
1892 // remove it after the other operands are processed if valid.
1894 PotentialDeadStoreSlots
.push_back(ReuseSlot
);
1897 // Mark is isKill if it's there no other uses of the same virtual
1898 // register and it's not a two-address operand. IsKill will be
1899 // unset if reg is reused.
1900 if (!isTied
&& KilledMIRegs
.count(VirtReg
) == 0) {
1901 MI
.getOperand(i
).setIsKill();
1902 KilledMIRegs
.insert(VirtReg
);
1908 // Otherwise we have a situation where we have a two-address instruction
1909 // whose mod/ref operand needs to be reloaded. This reload is already
1910 // available in some register "PhysReg", but if we used PhysReg as the
1911 // operand to our 2-addr instruction, the instruction would modify
1912 // PhysReg. This isn't cool if something later uses PhysReg and expects
1913 // to get its initial value.
1915 // To avoid this problem, and to avoid doing a load right after a store,
1916 // we emit a copy from PhysReg into the designated register for this
1918 unsigned DesignatedReg
= VRM
.getPhys(VirtReg
);
1919 assert(DesignatedReg
&& "Must map virtreg to physreg!");
1921 // Note that, if we reused a register for a previous operand, the
1922 // register we want to reload into might not actually be
1923 // available. If this occurs, use the register indicated by the
1925 if (ReusedOperands
.hasReuses())
1926 DesignatedReg
= ReusedOperands
.GetRegForReload(VirtReg
,
1928 Spills
, MaybeDeadStores
, RegKills
, KillOps
, VRM
);
1930 // If the mapped designated register is actually the physreg we have
1931 // incoming, we don't need to inserted a dead copy.
1932 if (DesignatedReg
== PhysReg
) {
1933 // If this stack slot value is already available, reuse it!
1934 if (ReuseSlot
> VirtRegMap::MAX_STACK_SLOT
)
1935 DEBUG(errs() << "Reusing RM#"
1936 << ReuseSlot
-VirtRegMap::MAX_STACK_SLOT
-1);
1938 DEBUG(errs() << "Reusing SS#" << ReuseSlot
);
1939 DEBUG(errs() << " from physreg " << TRI
->getName(PhysReg
)
1940 << " for vreg" << VirtReg
1941 << " instead of reloading into same physreg.\n");
1942 unsigned RReg
= SubIdx
? TRI
->getSubReg(PhysReg
, SubIdx
) : PhysReg
;
1943 MI
.getOperand(i
).setReg(RReg
);
1944 MI
.getOperand(i
).setSubReg(0);
1945 ReusedOperands
.markClobbered(RReg
);
1950 const TargetRegisterClass
* RC
= RegInfo
->getRegClass(VirtReg
);
1951 RegInfo
->setPhysRegUsed(DesignatedReg
);
1952 ReusedOperands
.markClobbered(DesignatedReg
);
1954 // Back-schedule reloads and remats.
1955 MachineBasicBlock::iterator InsertLoc
=
1956 ComputeReloadLoc(&MI
, MBB
.begin(), PhysReg
, TRI
, DoReMat
,
1959 TII
->copyRegToReg(MBB
, InsertLoc
, DesignatedReg
, PhysReg
, RC
, RC
);
1961 MachineInstr
*CopyMI
= prior(InsertLoc
);
1962 UpdateKills(*CopyMI
, TRI
, RegKills
, KillOps
);
1964 // This invalidates DesignatedReg.
1965 Spills
.ClobberPhysReg(DesignatedReg
);
1967 Spills
.addAvailable(ReuseSlot
, DesignatedReg
);
1969 SubIdx
? TRI
->getSubReg(DesignatedReg
, SubIdx
) : DesignatedReg
;
1970 MI
.getOperand(i
).setReg(RReg
);
1971 MI
.getOperand(i
).setSubReg(0);
1972 DEBUG(errs() << '\t' << *prior(MII
));
1977 // Otherwise, reload it and remember that we have it.
1978 PhysReg
= VRM
.getPhys(VirtReg
);
1979 assert(PhysReg
&& "Must map virtreg to physreg!");
1981 // Note that, if we reused a register for a previous operand, the
1982 // register we want to reload into might not actually be
1983 // available. If this occurs, use the register indicated by the
1985 if (ReusedOperands
.hasReuses())
1986 PhysReg
= ReusedOperands
.GetRegForReload(VirtReg
, PhysReg
, &MI
,
1987 Spills
, MaybeDeadStores
, RegKills
, KillOps
, VRM
);
1989 RegInfo
->setPhysRegUsed(PhysReg
);
1990 ReusedOperands
.markClobbered(PhysReg
);
1994 // Back-schedule reloads and remats.
1995 MachineBasicBlock::iterator InsertLoc
=
1996 ComputeReloadLoc(MII
, MBB
.begin(), PhysReg
, TRI
, DoReMat
,
2000 ReMaterialize(MBB
, InsertLoc
, PhysReg
, VirtReg
, TII
, TRI
, VRM
);
2002 const TargetRegisterClass
* RC
= RegInfo
->getRegClass(VirtReg
);
2003 TII
->loadRegFromStackSlot(MBB
, InsertLoc
, PhysReg
, SSorRMId
, RC
);
2004 MachineInstr
*LoadMI
= prior(InsertLoc
);
2005 VRM
.addSpillSlotUse(SSorRMId
, LoadMI
);
2007 DistanceMap
.insert(std::make_pair(LoadMI
, Dist
++));
2009 // This invalidates PhysReg.
2010 Spills
.ClobberPhysReg(PhysReg
);
2012 // Any stores to this stack slot are not dead anymore.
2014 MaybeDeadStores
[SSorRMId
] = NULL
;
2015 Spills
.addAvailable(SSorRMId
, PhysReg
);
2016 // Assumes this is the last use. IsKill will be unset if reg is reused
2017 // unless it's a two-address operand.
2018 if (!MI
.isRegTiedToDefOperand(i
) &&
2019 KilledMIRegs
.count(VirtReg
) == 0) {
2020 MI
.getOperand(i
).setIsKill();
2021 KilledMIRegs
.insert(VirtReg
);
2024 UpdateKills(*prior(InsertLoc
), TRI
, RegKills
, KillOps
);
2025 DEBUG(errs() << '\t' << *prior(InsertLoc
));
2027 unsigned RReg
= SubIdx
? TRI
->getSubReg(PhysReg
, SubIdx
) : PhysReg
;
2028 MI
.getOperand(i
).setReg(RReg
);
2029 MI
.getOperand(i
).setSubReg(0);
2032 // Ok - now we can remove stores that have been confirmed dead.
2033 for (unsigned j
= 0, e
= PotentialDeadStoreSlots
.size(); j
!= e
; ++j
) {
2034 // This was the last use and the spilled value is still available
2035 // for reuse. That means the spill was unnecessary!
2036 int PDSSlot
= PotentialDeadStoreSlots
[j
];
2037 MachineInstr
* DeadStore
= MaybeDeadStores
[PDSSlot
];
2039 DEBUG(errs() << "Removed dead store:\t" << *DeadStore
);
2040 InvalidateKills(*DeadStore
, TRI
, RegKills
, KillOps
);
2041 VRM
.RemoveMachineInstrFromMaps(DeadStore
);
2042 MBB
.erase(DeadStore
);
2043 MaybeDeadStores
[PDSSlot
] = NULL
;
2049 DEBUG(errs() << '\t' << MI
);
2052 // If we have folded references to memory operands, make sure we clear all
2053 // physical registers that may contain the value of the spilled virtual
2055 SmallSet
<int, 2> FoldedSS
;
2056 for (tie(I
, End
) = VRM
.getFoldedVirts(&MI
); I
!= End
; ) {
2057 unsigned VirtReg
= I
->second
.first
;
2058 VirtRegMap::ModRef MR
= I
->second
.second
;
2059 DEBUG(errs() << "Folded vreg: " << VirtReg
<< " MR: " << MR
);
2061 // MI2VirtMap be can updated which invalidate the iterator.
2062 // Increment the iterator first.
2064 int SS
= VRM
.getStackSlot(VirtReg
);
2065 if (SS
== VirtRegMap::NO_STACK_SLOT
)
2067 FoldedSS
.insert(SS
);
2068 DEBUG(errs() << " - StackSlot: " << SS
<< "\n");
2070 // If this folded instruction is just a use, check to see if it's a
2071 // straight load from the virt reg slot.
2072 if ((MR
& VirtRegMap::isRef
) && !(MR
& VirtRegMap::isMod
)) {
2074 unsigned DestReg
= TII
->isLoadFromStackSlot(&MI
, FrameIdx
);
2075 if (DestReg
&& FrameIdx
== SS
) {
2076 // If this spill slot is available, turn it into a copy (or nothing)
2077 // instead of leaving it as a load!
2078 if (unsigned InReg
= Spills
.getSpillSlotOrReMatPhysReg(SS
)) {
2079 DEBUG(errs() << "Promoted Load To Copy: " << MI
);
2080 if (DestReg
!= InReg
) {
2081 const TargetRegisterClass
*RC
= RegInfo
->getRegClass(VirtReg
);
2082 TII
->copyRegToReg(MBB
, &MI
, DestReg
, InReg
, RC
, RC
);
2083 MachineOperand
*DefMO
= MI
.findRegisterDefOperand(DestReg
);
2084 unsigned SubIdx
= DefMO
->getSubReg();
2085 // Revisit the copy so we make sure to notice the effects of the
2086 // operation on the destreg (either needing to RA it if it's
2087 // virtual or needing to clobber any values if it's physical).
2089 --NextMII
; // backtrack to the copy.
2090 // Propagate the sub-register index over.
2092 DefMO
= NextMII
->findRegisterDefOperand(DestReg
);
2093 DefMO
->setSubReg(SubIdx
);
2097 MachineOperand
*KillOpnd
= NextMII
->findRegisterUseOperand(InReg
);
2098 KillOpnd
->setIsKill();
2102 DEBUG(errs() << "Removing now-noop copy: " << MI
);
2103 // Unset last kill since it's being reused.
2104 InvalidateKill(InReg
, TRI
, RegKills
, KillOps
);
2105 Spills
.disallowClobberPhysReg(InReg
);
2108 InvalidateKills(MI
, TRI
, RegKills
, KillOps
);
2109 VRM
.RemoveMachineInstrFromMaps(&MI
);
2112 goto ProcessNextInst
;
2115 unsigned PhysReg
= Spills
.getSpillSlotOrReMatPhysReg(SS
);
2116 SmallVector
<MachineInstr
*, 4> NewMIs
;
2118 TII
->unfoldMemoryOperand(MF
, &MI
, PhysReg
, false, false, NewMIs
)) {
2119 MBB
.insert(MII
, NewMIs
[0]);
2120 InvalidateKills(MI
, TRI
, RegKills
, KillOps
);
2121 VRM
.RemoveMachineInstrFromMaps(&MI
);
2124 --NextMII
; // backtrack to the unfolded instruction.
2126 goto ProcessNextInst
;
2131 // If this reference is not a use, any previous store is now dead.
2132 // Otherwise, the store to this stack slot is not dead anymore.
2133 MachineInstr
* DeadStore
= MaybeDeadStores
[SS
];
2135 bool isDead
= !(MR
& VirtRegMap::isRef
);
2136 MachineInstr
*NewStore
= NULL
;
2137 if (MR
& VirtRegMap::isModRef
) {
2138 unsigned PhysReg
= Spills
.getSpillSlotOrReMatPhysReg(SS
);
2139 SmallVector
<MachineInstr
*, 4> NewMIs
;
2140 // We can reuse this physreg as long as we are allowed to clobber
2141 // the value and there isn't an earlier def that has already clobbered
2144 !ReusedOperands
.isClobbered(PhysReg
) &&
2145 Spills
.canClobberPhysReg(PhysReg
) &&
2146 !TII
->isStoreToStackSlot(&MI
, SS
)) { // Not profitable!
2147 MachineOperand
*KillOpnd
=
2148 DeadStore
->findRegisterUseOperand(PhysReg
, true);
2149 // Note, if the store is storing a sub-register, it's possible the
2150 // super-register is needed below.
2151 if (KillOpnd
&& !KillOpnd
->getSubReg() &&
2152 TII
->unfoldMemoryOperand(MF
, &MI
, PhysReg
, false, true,NewMIs
)){
2153 MBB
.insert(MII
, NewMIs
[0]);
2154 NewStore
= NewMIs
[1];
2155 MBB
.insert(MII
, NewStore
);
2156 VRM
.addSpillSlotUse(SS
, NewStore
);
2157 InvalidateKills(MI
, TRI
, RegKills
, KillOps
);
2158 VRM
.RemoveMachineInstrFromMaps(&MI
);
2162 --NextMII
; // backtrack to the unfolded instruction.
2170 if (isDead
) { // Previous store is dead.
2171 // If we get here, the store is dead, nuke it now.
2172 DEBUG(errs() << "Removed dead store:\t" << *DeadStore
);
2173 InvalidateKills(*DeadStore
, TRI
, RegKills
, KillOps
);
2174 VRM
.RemoveMachineInstrFromMaps(DeadStore
);
2175 MBB
.erase(DeadStore
);
2180 MaybeDeadStores
[SS
] = NULL
;
2182 // Treat this store as a spill merged into a copy. That makes the
2183 // stack slot value available.
2184 VRM
.virtFolded(VirtReg
, NewStore
, VirtRegMap::isMod
);
2185 goto ProcessNextInst
;
2189 // If the spill slot value is available, and this is a new definition of
2190 // the value, the value is not available anymore.
2191 if (MR
& VirtRegMap::isMod
) {
2192 // Notice that the value in this stack slot has been modified.
2193 Spills
.ModifyStackSlotOrReMat(SS
);
2195 // If this is *just* a mod of the value, check to see if this is just a
2196 // store to the spill slot (i.e. the spill got merged into the copy). If
2197 // so, realize that the vreg is available now, and add the store to the
2198 // MaybeDeadStore info.
2200 if (!(MR
& VirtRegMap::isRef
)) {
2201 if (unsigned SrcReg
= TII
->isStoreToStackSlot(&MI
, StackSlot
)) {
2202 assert(TargetRegisterInfo::isPhysicalRegister(SrcReg
) &&
2203 "Src hasn't been allocated yet?");
2205 if (CommuteToFoldReload(MBB
, MII
, VirtReg
, SrcReg
, StackSlot
,
2206 Spills
, RegKills
, KillOps
, TRI
, VRM
)) {
2207 NextMII
= next(MII
);
2209 goto ProcessNextInst
;
2212 // Okay, this is certainly a store of SrcReg to [StackSlot]. Mark
2213 // this as a potentially dead store in case there is a subsequent
2214 // store into the stack slot without a read from it.
2215 MaybeDeadStores
[StackSlot
] = &MI
;
2217 // If the stack slot value was previously available in some other
2218 // register, change it now. Otherwise, make the register
2219 // available in PhysReg.
2220 Spills
.addAvailable(StackSlot
, SrcReg
, MI
.killsRegister(SrcReg
));
2226 // Process all of the spilled defs.
2227 for (unsigned i
= 0, e
= MI
.getNumOperands(); i
!= e
; ++i
) {
2228 MachineOperand
&MO
= MI
.getOperand(i
);
2229 if (!(MO
.isReg() && MO
.getReg() && MO
.isDef()))
2232 unsigned VirtReg
= MO
.getReg();
2233 if (!TargetRegisterInfo::isVirtualRegister(VirtReg
)) {
2234 // Check to see if this is a noop copy. If so, eliminate the
2235 // instruction before considering the dest reg to be changed.
2236 // Also check if it's copying from an "undef", if so, we can't
2237 // eliminate this or else the undef marker is lost and it will
2238 // confuses the scavenger. This is extremely rare.
2239 unsigned Src
, Dst
, SrcSR
, DstSR
;
2240 if (TII
->isMoveInstr(MI
, Src
, Dst
, SrcSR
, DstSR
) && Src
== Dst
&&
2241 !MI
.findRegisterUseOperand(Src
)->isUndef()) {
2243 DEBUG(errs() << "Removing now-noop copy: " << MI
);
2244 SmallVector
<unsigned, 2> KillRegs
;
2245 InvalidateKills(MI
, TRI
, RegKills
, KillOps
, &KillRegs
);
2246 if (MO
.isDead() && !KillRegs
.empty()) {
2247 // Source register or an implicit super/sub-register use is killed.
2248 assert(KillRegs
[0] == Dst
||
2249 TRI
->isSubRegister(KillRegs
[0], Dst
) ||
2250 TRI
->isSuperRegister(KillRegs
[0], Dst
));
2251 // Last def is now dead.
2252 TransferDeadness(&MBB
, Dist
, Src
, RegKills
, KillOps
, VRM
);
2254 VRM
.RemoveMachineInstrFromMaps(&MI
);
2257 Spills
.disallowClobberPhysReg(VirtReg
);
2258 goto ProcessNextInst
;
2261 // If it's not a no-op copy, it clobbers the value in the destreg.
2262 Spills
.ClobberPhysReg(VirtReg
);
2263 ReusedOperands
.markClobbered(VirtReg
);
2265 // Check to see if this instruction is a load from a stack slot into
2266 // a register. If so, this provides the stack slot value in the reg.
2268 if (unsigned DestReg
= TII
->isLoadFromStackSlot(&MI
, FrameIdx
)) {
2269 assert(DestReg
== VirtReg
&& "Unknown load situation!");
2271 // If it is a folded reference, then it's not safe to clobber.
2272 bool Folded
= FoldedSS
.count(FrameIdx
);
2273 // Otherwise, if it wasn't available, remember that it is now!
2274 Spills
.addAvailable(FrameIdx
, DestReg
, !Folded
);
2275 goto ProcessNextInst
;
2281 unsigned SubIdx
= MO
.getSubReg();
2282 bool DoReMat
= VRM
.isReMaterialized(VirtReg
);
2284 ReMatDefs
.insert(&MI
);
2286 // The only vregs left are stack slot definitions.
2287 int StackSlot
= VRM
.getStackSlot(VirtReg
);
2288 const TargetRegisterClass
*RC
= RegInfo
->getRegClass(VirtReg
);
2290 // If this def is part of a two-address operand, make sure to execute
2291 // the store from the correct physical register.
2294 if (MI
.isRegTiedToUseOperand(i
, &TiedOp
)) {
2295 PhysReg
= MI
.getOperand(TiedOp
).getReg();
2297 unsigned SuperReg
= findSuperReg(RC
, PhysReg
, SubIdx
, TRI
);
2298 assert(SuperReg
&& TRI
->getSubReg(SuperReg
, SubIdx
) == PhysReg
&&
2299 "Can't find corresponding super-register!");
2303 PhysReg
= VRM
.getPhys(VirtReg
);
2304 if (ReusedOperands
.isClobbered(PhysReg
)) {
2305 // Another def has taken the assigned physreg. It must have been a
2306 // use&def which got it due to reuse. Undo the reuse!
2307 PhysReg
= ReusedOperands
.GetRegForReload(VirtReg
, PhysReg
, &MI
,
2308 Spills
, MaybeDeadStores
, RegKills
, KillOps
, VRM
);
2312 assert(PhysReg
&& "VR not assigned a physical register?");
2313 RegInfo
->setPhysRegUsed(PhysReg
);
2314 unsigned RReg
= SubIdx
? TRI
->getSubReg(PhysReg
, SubIdx
) : PhysReg
;
2315 ReusedOperands
.markClobbered(RReg
);
2316 MI
.getOperand(i
).setReg(RReg
);
2317 MI
.getOperand(i
).setSubReg(0);
2320 MachineInstr
*&LastStore
= MaybeDeadStores
[StackSlot
];
2321 SpillRegToStackSlot(MBB
, MII
, -1, PhysReg
, StackSlot
, RC
, true,
2322 LastStore
, Spills
, ReMatDefs
, RegKills
, KillOps
, VRM
);
2323 NextMII
= next(MII
);
2325 // Check to see if this is a noop copy. If so, eliminate the
2326 // instruction before considering the dest reg to be changed.
2328 unsigned Src
, Dst
, SrcSR
, DstSR
;
2329 if (TII
->isMoveInstr(MI
, Src
, Dst
, SrcSR
, DstSR
) && Src
== Dst
) {
2331 DEBUG(errs() << "Removing now-noop copy: " << MI
);
2332 InvalidateKills(MI
, TRI
, RegKills
, KillOps
);
2333 VRM
.RemoveMachineInstrFromMaps(&MI
);
2336 UpdateKills(*LastStore
, TRI
, RegKills
, KillOps
);
2337 goto ProcessNextInst
;
2343 // Delete dead instructions without side effects.
2344 if (!Erased
&& !BackTracked
&& TII
->isDeadInstruction(&MI
)) {
2345 InvalidateKills(MI
, TRI
, RegKills
, KillOps
);
2346 VRM
.RemoveMachineInstrFromMaps(&MI
);
2351 DistanceMap
.insert(std::make_pair(&MI
, Dist
++));
2352 if (!Erased
&& !BackTracked
) {
2353 for (MachineBasicBlock::iterator II
= &MI
; II
!= NextMII
; ++II
)
2354 UpdateKills(*II
, TRI
, RegKills
, KillOps
);
2365 llvm::VirtRegRewriter
* llvm::createVirtRegRewriter() {
2366 switch (RewriterOpt
) {
2367 default: llvm_unreachable("Unreachable!");
2369 return new LocalRewriter();
2371 return new TrivialRewriter();