1 //===- RegAllocFast.cpp - A fast register allocator for debug code --------===//
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
7 //===----------------------------------------------------------------------===//
9 /// \file This register allocator allocates registers to a basic block at a
10 /// time, attempting to keep values in registers and reusing registers as
13 //===----------------------------------------------------------------------===//
15 #include "llvm/CodeGen/RegAllocFast.h"
16 #include "llvm/ADT/ArrayRef.h"
17 #include "llvm/ADT/DenseMap.h"
18 #include "llvm/ADT/IndexedMap.h"
19 #include "llvm/ADT/MapVector.h"
20 #include "llvm/ADT/SmallSet.h"
21 #include "llvm/ADT/SmallVector.h"
22 #include "llvm/ADT/SparseSet.h"
23 #include "llvm/ADT/Statistic.h"
24 #include "llvm/CodeGen/MachineBasicBlock.h"
25 #include "llvm/CodeGen/MachineFrameInfo.h"
26 #include "llvm/CodeGen/MachineFunction.h"
27 #include "llvm/CodeGen/MachineFunctionPass.h"
28 #include "llvm/CodeGen/MachineInstr.h"
29 #include "llvm/CodeGen/MachineInstrBuilder.h"
30 #include "llvm/CodeGen/MachineOperand.h"
31 #include "llvm/CodeGen/MachineRegisterInfo.h"
32 #include "llvm/CodeGen/RegAllocCommon.h"
33 #include "llvm/CodeGen/RegAllocRegistry.h"
34 #include "llvm/CodeGen/RegisterClassInfo.h"
35 #include "llvm/CodeGen/TargetInstrInfo.h"
36 #include "llvm/CodeGen/TargetOpcodes.h"
37 #include "llvm/CodeGen/TargetRegisterInfo.h"
38 #include "llvm/CodeGen/TargetSubtargetInfo.h"
39 #include "llvm/InitializePasses.h"
40 #include "llvm/MC/MCRegisterInfo.h"
41 #include "llvm/Pass.h"
42 #include "llvm/Support/Debug.h"
43 #include "llvm/Support/ErrorHandling.h"
44 #include "llvm/Support/raw_ostream.h"
51 #define DEBUG_TYPE "regalloc"
53 STATISTIC(NumStores
, "Number of stores added");
54 STATISTIC(NumLoads
, "Number of loads added");
55 STATISTIC(NumCoalesced
, "Number of copies coalesced");
57 // FIXME: Remove this switch when all testcases are fixed!
58 static cl::opt
<bool> IgnoreMissingDefs("rafast-ignore-missing-defs",
61 static RegisterRegAlloc
fastRegAlloc("fast", "fast register allocator",
62 createFastRegisterAllocator
);
66 /// Assign ascending index for instructions in machine basic block. The index
67 /// can be used to determine dominance between instructions in same MBB.
68 class InstrPosIndexes
{
70 void unsetInitialized() { IsInitialized
= false; }
72 void init(const MachineBasicBlock
&MBB
) {
74 Instr2PosIndex
.clear();
75 uint64_t LastIndex
= 0;
76 for (const MachineInstr
&MI
: MBB
) {
77 LastIndex
+= InstrDist
;
78 Instr2PosIndex
[&MI
] = LastIndex
;
82 /// Set \p Index to index of \p MI. If \p MI is new inserted, it try to assign
83 /// index without affecting existing instruction's index. Return true if all
84 /// instructions index has been reassigned.
85 bool getIndex(const MachineInstr
&MI
, uint64_t &Index
) {
87 init(*MI
.getParent());
89 Index
= Instr2PosIndex
.at(&MI
);
93 assert(MI
.getParent() == CurMBB
&& "MI is not in CurMBB");
94 auto It
= Instr2PosIndex
.find(&MI
);
95 if (It
!= Instr2PosIndex
.end()) {
100 // Distance is the number of consecutive unassigned instructions including
101 // MI. Start is the first instruction of them. End is the next of last
102 // instruction of them.
104 // |Instruction| A | B | C | MI | D | E |
105 // | Index | 1024 | | | | | 2048 |
107 // In this case, B, C, MI, D are unassigned. Distance is 4, Start is B, End
109 unsigned Distance
= 1;
110 MachineBasicBlock::const_iterator Start
= MI
.getIterator(),
111 End
= std::next(Start
);
112 while (Start
!= CurMBB
->begin() &&
113 !Instr2PosIndex
.count(&*std::prev(Start
))) {
117 while (End
!= CurMBB
->end() && !Instr2PosIndex
.count(&*(End
))) {
122 // LastIndex is initialized to last used index prior to MI or zero.
123 // In previous example, LastIndex is 1024, EndIndex is 2048;
125 Start
== CurMBB
->begin() ? 0 : Instr2PosIndex
.at(&*std::prev(Start
));
127 if (End
== CurMBB
->end())
128 Step
= static_cast<uint64_t>(InstrDist
);
130 // No instruction uses index zero.
131 uint64_t EndIndex
= Instr2PosIndex
.at(&*End
);
132 assert(EndIndex
> LastIndex
&& "Index must be ascending order");
133 unsigned NumAvailableIndexes
= EndIndex
- LastIndex
- 1;
134 // We want index gap between two adjacent MI is as same as possible. Given
135 // total A available indexes, D is number of consecutive unassigned
136 // instructions, S is the step.
137 // |<- S-1 -> MI <- S-1 -> MI <- A-S*D ->|
138 // There're S-1 available indexes between unassigned instruction and its
139 // predecessor. There're A-S*D available indexes between the last
140 // unassigned instruction and its successor.
145 // An valid S must be integer greater than zero, so
149 // That means we can safely use (A+1)/(D+1) as step.
150 // In previous example, Step is 204, Index of B, C, MI, D is 1228, 1432,
152 Step
= (NumAvailableIndexes
+ 1) / (Distance
+ 1);
155 // Reassign index for all instructions if number of new inserted
156 // instructions exceed slot or all instructions are new.
157 if (LLVM_UNLIKELY(!Step
|| (!LastIndex
&& Step
== InstrDist
))) {
159 Index
= Instr2PosIndex
.at(&MI
);
163 for (auto I
= Start
; I
!= End
; ++I
) {
165 Instr2PosIndex
[&*I
] = LastIndex
;
167 Index
= Instr2PosIndex
.at(&MI
);
172 bool IsInitialized
= false;
173 enum { InstrDist
= 1024 };
174 const MachineBasicBlock
*CurMBB
= nullptr;
175 DenseMap
<const MachineInstr
*, uint64_t> Instr2PosIndex
;
178 class RegAllocFastImpl
{
180 RegAllocFastImpl(const RegAllocFilterFunc F
= nullptr,
181 bool ClearVirtRegs_
= true)
182 : ShouldAllocateRegisterImpl(F
), StackSlotForVirtReg(-1),
183 ClearVirtRegs(ClearVirtRegs_
) {}
186 MachineFrameInfo
*MFI
= nullptr;
187 MachineRegisterInfo
*MRI
= nullptr;
188 const TargetRegisterInfo
*TRI
= nullptr;
189 const TargetInstrInfo
*TII
= nullptr;
190 RegisterClassInfo RegClassInfo
;
191 const RegAllocFilterFunc ShouldAllocateRegisterImpl
;
193 /// Basic block currently being allocated.
194 MachineBasicBlock
*MBB
= nullptr;
196 /// Maps virtual regs to the frame index where these values are spilled.
197 IndexedMap
<int, VirtReg2IndexFunctor
> StackSlotForVirtReg
;
199 /// Everything we know about a live virtual register.
201 MachineInstr
*LastUse
= nullptr; ///< Last instr to use reg.
202 Register VirtReg
; ///< Virtual register number.
203 MCPhysReg PhysReg
= 0; ///< Currently held here.
204 bool LiveOut
= false; ///< Register is possibly live out.
205 bool Reloaded
= false; ///< Register was reloaded.
206 bool Error
= false; ///< Could not allocate.
208 explicit LiveReg(Register VirtReg
) : VirtReg(VirtReg
) {}
210 unsigned getSparseSetIndex() const {
211 return Register::virtReg2Index(VirtReg
);
215 using LiveRegMap
= SparseSet
<LiveReg
, identity
<unsigned>, uint16_t>;
216 /// This map contains entries for each virtual register that is currently
217 /// available in a physical register.
218 LiveRegMap LiveVirtRegs
;
220 /// Stores assigned virtual registers present in the bundle MI.
221 DenseMap
<Register
, MCPhysReg
> BundleVirtRegsMap
;
223 DenseMap
<unsigned, SmallVector
<MachineOperand
*, 2>> LiveDbgValueMap
;
224 /// List of DBG_VALUE that we encountered without the vreg being assigned
225 /// because they were placed after the last use of the vreg.
226 DenseMap
<unsigned, SmallVector
<MachineInstr
*, 1>> DanglingDbgValues
;
228 /// Has a bit set for every virtual register for which it was determined
229 /// that it is alive across blocks.
230 BitVector MayLiveAcrossBlocks
;
232 /// State of a register unit.
234 /// A free register is not currently in use and can be allocated
235 /// immediately without checking aliases.
238 /// A pre-assigned register has been assigned before register allocation
239 /// (e.g., setting up a call parameter).
242 /// Used temporarily in reloadAtBegin() to mark register units that are
243 /// live-in to the basic block.
246 /// A register state may also be a virtual register number, indication
247 /// that the physical register is currently allocated to a virtual
248 /// register. In that case, LiveVirtRegs contains the inverse mapping.
251 /// Maps each physical register to a RegUnitState enum or virtual register.
252 std::vector
<unsigned> RegUnitStates
;
254 SmallVector
<MachineInstr
*, 32> Coalesced
;
256 /// Track register units that are used in the current instruction, and so
257 /// cannot be allocated.
259 /// In the first phase (tied defs/early clobber), we consider also physical
260 /// uses, afterwards, we don't. If the lowest bit isn't set, it's a solely
261 /// physical use (markPhysRegUsedInInstr), otherwise, it's a normal use. To
262 /// avoid resetting the entire vector after every instruction, we track the
263 /// instruction "generation" in the remaining 31 bits -- this means, that if
264 /// UsedInInstr[Idx] < InstrGen, the register unit is unused. InstrGen is
265 /// never zero and always incremented by two.
267 /// Don't allocate inline storage: the number of register units is typically
268 /// quite large (e.g., AArch64 > 100, X86 > 200, AMDGPU > 1000).
270 SmallVector
<unsigned, 0> UsedInInstr
;
272 SmallVector
<unsigned, 8> DefOperandIndexes
;
273 // Register masks attached to the current instruction.
274 SmallVector
<const uint32_t *> RegMasks
;
276 // Assign index for each instruction to quickly determine dominance.
277 InstrPosIndexes PosIndexes
;
279 void setPhysRegState(MCPhysReg PhysReg
, unsigned NewState
);
280 bool isPhysRegFree(MCPhysReg PhysReg
) const;
282 /// Mark a physreg as used in this instruction.
283 void markRegUsedInInstr(MCPhysReg PhysReg
) {
284 for (MCRegUnit Unit
: TRI
->regunits(PhysReg
))
285 UsedInInstr
[Unit
] = InstrGen
| 1;
288 // Check if physreg is clobbered by instruction's regmask(s).
289 bool isClobberedByRegMasks(MCPhysReg PhysReg
) const {
290 return llvm::any_of(RegMasks
, [PhysReg
](const uint32_t *Mask
) {
291 return MachineOperand::clobbersPhysReg(Mask
, PhysReg
);
295 /// Check if a physreg or any of its aliases are used in this instruction.
296 bool isRegUsedInInstr(MCPhysReg PhysReg
, bool LookAtPhysRegUses
) const {
297 if (LookAtPhysRegUses
&& isClobberedByRegMasks(PhysReg
))
299 for (MCRegUnit Unit
: TRI
->regunits(PhysReg
))
300 if (UsedInInstr
[Unit
] >= (InstrGen
| !LookAtPhysRegUses
))
305 /// Mark physical register as being used in a register use operand.
306 /// This is only used by the special livethrough handling code.
307 void markPhysRegUsedInInstr(MCPhysReg PhysReg
) {
308 for (MCRegUnit Unit
: TRI
->regunits(PhysReg
)) {
309 assert(UsedInInstr
[Unit
] <= InstrGen
&& "non-phys use before phys use?");
310 UsedInInstr
[Unit
] = InstrGen
;
314 /// Remove mark of physical register being used in the instruction.
315 void unmarkRegUsedInInstr(MCPhysReg PhysReg
) {
316 for (MCRegUnit Unit
: TRI
->regunits(PhysReg
))
317 UsedInInstr
[Unit
] = 0;
324 spillImpossible
= ~0u
330 bool runOnMachineFunction(MachineFunction
&MF
);
333 void allocateBasicBlock(MachineBasicBlock
&MBB
);
335 void addRegClassDefCounts(MutableArrayRef
<unsigned> RegClassDefCounts
,
338 void findAndSortDefOperandIndexes(const MachineInstr
&MI
);
340 void allocateInstruction(MachineInstr
&MI
);
341 void handleDebugValue(MachineInstr
&MI
);
342 void handleBundle(MachineInstr
&MI
);
344 bool usePhysReg(MachineInstr
&MI
, MCPhysReg PhysReg
);
345 bool definePhysReg(MachineInstr
&MI
, MCPhysReg PhysReg
);
346 bool displacePhysReg(MachineInstr
&MI
, MCPhysReg PhysReg
);
347 void freePhysReg(MCPhysReg PhysReg
);
349 unsigned calcSpillCost(MCPhysReg PhysReg
) const;
351 LiveRegMap::iterator
findLiveVirtReg(Register VirtReg
) {
352 return LiveVirtRegs
.find(Register::virtReg2Index(VirtReg
));
355 LiveRegMap::const_iterator
findLiveVirtReg(Register VirtReg
) const {
356 return LiveVirtRegs
.find(Register::virtReg2Index(VirtReg
));
359 void assignVirtToPhysReg(MachineInstr
&MI
, LiveReg
&, MCPhysReg PhysReg
);
360 void allocVirtReg(MachineInstr
&MI
, LiveReg
&LR
, Register Hint
,
361 bool LookAtPhysRegUses
= false);
362 void allocVirtRegUndef(MachineOperand
&MO
);
363 void assignDanglingDebugValues(MachineInstr
&Def
, Register VirtReg
,
365 bool defineLiveThroughVirtReg(MachineInstr
&MI
, unsigned OpNum
,
367 bool defineVirtReg(MachineInstr
&MI
, unsigned OpNum
, Register VirtReg
,
368 bool LookAtPhysRegUses
= false);
369 bool useVirtReg(MachineInstr
&MI
, MachineOperand
&MO
, Register VirtReg
);
371 MachineBasicBlock::iterator
372 getMBBBeginInsertionPoint(MachineBasicBlock
&MBB
,
373 SmallSet
<Register
, 2> &PrologLiveIns
) const;
375 void reloadAtBegin(MachineBasicBlock
&MBB
);
376 bool setPhysReg(MachineInstr
&MI
, MachineOperand
&MO
, MCPhysReg PhysReg
);
378 Register
traceCopies(Register VirtReg
) const;
379 Register
traceCopyChain(Register Reg
) const;
381 bool shouldAllocateRegister(const Register Reg
) const;
382 int getStackSpaceFor(Register VirtReg
);
383 void spill(MachineBasicBlock::iterator Before
, Register VirtReg
,
384 MCPhysReg AssignedReg
, bool Kill
, bool LiveOut
);
385 void reload(MachineBasicBlock::iterator Before
, Register VirtReg
,
388 bool mayLiveOut(Register VirtReg
);
389 bool mayLiveIn(Register VirtReg
);
391 void dumpState() const;
394 class RegAllocFast
: public MachineFunctionPass
{
395 RegAllocFastImpl Impl
;
400 RegAllocFast(const RegAllocFilterFunc F
= nullptr, bool ClearVirtRegs_
= true)
401 : MachineFunctionPass(ID
), Impl(F
, ClearVirtRegs_
) {}
403 bool runOnMachineFunction(MachineFunction
&MF
) override
{
404 return Impl
.runOnMachineFunction(MF
);
407 StringRef
getPassName() const override
{ return "Fast Register Allocator"; }
409 void getAnalysisUsage(AnalysisUsage
&AU
) const override
{
410 AU
.setPreservesCFG();
411 MachineFunctionPass::getAnalysisUsage(AU
);
414 MachineFunctionProperties
getRequiredProperties() const override
{
415 return MachineFunctionProperties().set(
416 MachineFunctionProperties::Property::NoPHIs
);
419 MachineFunctionProperties
getSetProperties() const override
{
420 if (Impl
.ClearVirtRegs
) {
421 return MachineFunctionProperties().set(
422 MachineFunctionProperties::Property::NoVRegs
);
425 return MachineFunctionProperties();
428 MachineFunctionProperties
getClearedProperties() const override
{
429 return MachineFunctionProperties().set(
430 MachineFunctionProperties::Property::IsSSA
);
434 } // end anonymous namespace
436 char RegAllocFast::ID
= 0;
438 INITIALIZE_PASS(RegAllocFast
, "regallocfast", "Fast Register Allocator", false,
441 bool RegAllocFastImpl::shouldAllocateRegister(const Register Reg
) const {
442 assert(Reg
.isVirtual());
443 if (!ShouldAllocateRegisterImpl
)
446 return ShouldAllocateRegisterImpl(*TRI
, *MRI
, Reg
);
449 void RegAllocFastImpl::setPhysRegState(MCPhysReg PhysReg
, unsigned NewState
) {
450 for (MCRegUnit Unit
: TRI
->regunits(PhysReg
))
451 RegUnitStates
[Unit
] = NewState
;
454 bool RegAllocFastImpl::isPhysRegFree(MCPhysReg PhysReg
) const {
455 for (MCRegUnit Unit
: TRI
->regunits(PhysReg
)) {
456 if (RegUnitStates
[Unit
] != regFree
)
462 /// This allocates space for the specified virtual register to be held on the
464 int RegAllocFastImpl::getStackSpaceFor(Register VirtReg
) {
465 // Find the location Reg would belong...
466 int SS
= StackSlotForVirtReg
[VirtReg
];
467 // Already has space allocated?
471 // Allocate a new stack object for this spill location...
472 const TargetRegisterClass
&RC
= *MRI
->getRegClass(VirtReg
);
473 unsigned Size
= TRI
->getSpillSize(RC
);
474 Align Alignment
= TRI
->getSpillAlign(RC
);
475 int FrameIdx
= MFI
->CreateSpillStackObject(Size
, Alignment
);
478 StackSlotForVirtReg
[VirtReg
] = FrameIdx
;
482 static bool dominates(InstrPosIndexes
&PosIndexes
, const MachineInstr
&A
,
483 const MachineInstr
&B
) {
484 uint64_t IndexA
, IndexB
;
485 PosIndexes
.getIndex(A
, IndexA
);
486 if (LLVM_UNLIKELY(PosIndexes
.getIndex(B
, IndexB
)))
487 PosIndexes
.getIndex(A
, IndexA
);
488 return IndexA
< IndexB
;
491 /// Returns false if \p VirtReg is known to not live out of the current block.
492 bool RegAllocFastImpl::mayLiveOut(Register VirtReg
) {
493 if (MayLiveAcrossBlocks
.test(Register::virtReg2Index(VirtReg
))) {
494 // Cannot be live-out if there are no successors.
495 return !MBB
->succ_empty();
498 const MachineInstr
*SelfLoopDef
= nullptr;
500 // If this block loops back to itself, it is necessary to check whether the
501 // use comes after the def.
502 if (MBB
->isSuccessor(MBB
)) {
503 // Find the first def in the self loop MBB.
504 for (const MachineInstr
&DefInst
: MRI
->def_instructions(VirtReg
)) {
505 if (DefInst
.getParent() != MBB
) {
506 MayLiveAcrossBlocks
.set(Register::virtReg2Index(VirtReg
));
509 if (!SelfLoopDef
|| dominates(PosIndexes
, DefInst
, *SelfLoopDef
))
510 SelfLoopDef
= &DefInst
;
514 MayLiveAcrossBlocks
.set(Register::virtReg2Index(VirtReg
));
519 // See if the first \p Limit uses of the register are all in the current
521 static const unsigned Limit
= 8;
523 for (const MachineInstr
&UseInst
: MRI
->use_nodbg_instructions(VirtReg
)) {
524 if (UseInst
.getParent() != MBB
|| ++C
>= Limit
) {
525 MayLiveAcrossBlocks
.set(Register::virtReg2Index(VirtReg
));
526 // Cannot be live-out if there are no successors.
527 return !MBB
->succ_empty();
531 // Try to handle some simple cases to avoid spilling and reloading every
532 // value inside a self looping block.
533 if (SelfLoopDef
== &UseInst
||
534 !dominates(PosIndexes
, *SelfLoopDef
, UseInst
)) {
535 MayLiveAcrossBlocks
.set(Register::virtReg2Index(VirtReg
));
544 /// Returns false if \p VirtReg is known to not be live into the current block.
545 bool RegAllocFastImpl::mayLiveIn(Register VirtReg
) {
546 if (MayLiveAcrossBlocks
.test(Register::virtReg2Index(VirtReg
)))
547 return !MBB
->pred_empty();
549 // See if the first \p Limit def of the register are all in the current block.
550 static const unsigned Limit
= 8;
552 for (const MachineInstr
&DefInst
: MRI
->def_instructions(VirtReg
)) {
553 if (DefInst
.getParent() != MBB
|| ++C
>= Limit
) {
554 MayLiveAcrossBlocks
.set(Register::virtReg2Index(VirtReg
));
555 return !MBB
->pred_empty();
562 /// Insert spill instruction for \p AssignedReg before \p Before. Update
563 /// DBG_VALUEs with \p VirtReg operands with the stack slot.
564 void RegAllocFastImpl::spill(MachineBasicBlock::iterator Before
,
565 Register VirtReg
, MCPhysReg AssignedReg
, bool Kill
,
567 LLVM_DEBUG(dbgs() << "Spilling " << printReg(VirtReg
, TRI
) << " in "
568 << printReg(AssignedReg
, TRI
));
569 int FI
= getStackSpaceFor(VirtReg
);
570 LLVM_DEBUG(dbgs() << " to stack slot #" << FI
<< '\n');
572 const TargetRegisterClass
&RC
= *MRI
->getRegClass(VirtReg
);
573 TII
->storeRegToStackSlot(*MBB
, Before
, AssignedReg
, Kill
, FI
, &RC
, TRI
,
577 MachineBasicBlock::iterator FirstTerm
= MBB
->getFirstTerminator();
579 // When we spill a virtual register, we will have spill instructions behind
580 // every definition of it, meaning we can switch all the DBG_VALUEs over
581 // to just reference the stack slot.
582 SmallVectorImpl
<MachineOperand
*> &LRIDbgOperands
= LiveDbgValueMap
[VirtReg
];
583 SmallMapVector
<MachineInstr
*, SmallVector
<const MachineOperand
*>, 2>
585 for (MachineOperand
*MO
: LRIDbgOperands
)
586 SpilledOperandsMap
[MO
->getParent()].push_back(MO
);
587 for (auto MISpilledOperands
: SpilledOperandsMap
) {
588 MachineInstr
&DBG
= *MISpilledOperands
.first
;
589 // We don't have enough support for tracking operands of DBG_VALUE_LISTs.
590 if (DBG
.isDebugValueList())
592 MachineInstr
*NewDV
= buildDbgValueForSpill(
593 *MBB
, Before
, *MISpilledOperands
.first
, FI
, MISpilledOperands
.second
);
594 assert(NewDV
->getParent() == MBB
&& "dangling parent pointer");
596 LLVM_DEBUG(dbgs() << "Inserting debug info due to spill:\n" << *NewDV
);
599 // We need to insert a DBG_VALUE at the end of the block if the spill slot
600 // is live out, but there is another use of the value after the
601 // spill. This will allow LiveDebugValues to see the correct live out
602 // value to propagate to the successors.
603 MachineInstr
*ClonedDV
= MBB
->getParent()->CloneMachineInstr(NewDV
);
604 MBB
->insert(FirstTerm
, ClonedDV
);
605 LLVM_DEBUG(dbgs() << "Cloning debug info due to live out spill\n");
608 // Rewrite unassigned dbg_values to use the stack slot.
609 // TODO We can potentially do this for list debug values as well if we know
610 // how the dbg_values are getting unassigned.
611 if (DBG
.isNonListDebugValue()) {
612 MachineOperand
&MO
= DBG
.getDebugOperand(0);
613 if (MO
.isReg() && MO
.getReg() == 0) {
614 updateDbgValueForSpill(DBG
, FI
, 0);
618 // Now this register is spilled there is should not be any DBG_VALUE
619 // pointing to this register because they are all pointing to spilled value
621 LRIDbgOperands
.clear();
624 /// Insert reload instruction for \p PhysReg before \p Before.
625 void RegAllocFastImpl::reload(MachineBasicBlock::iterator Before
,
626 Register VirtReg
, MCPhysReg PhysReg
) {
627 LLVM_DEBUG(dbgs() << "Reloading " << printReg(VirtReg
, TRI
) << " into "
628 << printReg(PhysReg
, TRI
) << '\n');
629 int FI
= getStackSpaceFor(VirtReg
);
630 const TargetRegisterClass
&RC
= *MRI
->getRegClass(VirtReg
);
631 TII
->loadRegFromStackSlot(*MBB
, Before
, PhysReg
, FI
, &RC
, TRI
, VirtReg
);
635 /// Get basic block begin insertion point.
636 /// This is not just MBB.begin() because surprisingly we have EH_LABEL
637 /// instructions marking the begin of a basic block. This means we must insert
638 /// new instructions after such labels...
639 MachineBasicBlock::iterator
RegAllocFastImpl::getMBBBeginInsertionPoint(
640 MachineBasicBlock
&MBB
, SmallSet
<Register
, 2> &PrologLiveIns
) const {
641 MachineBasicBlock::iterator I
= MBB
.begin();
642 while (I
!= MBB
.end()) {
648 // Most reloads should be inserted after prolog instructions.
649 if (!TII
->isBasicBlockPrologue(*I
))
652 // However if a prolog instruction reads a register that needs to be
653 // reloaded, the reload should be inserted before the prolog.
654 for (MachineOperand
&MO
: I
->operands()) {
656 PrologLiveIns
.insert(MO
.getReg());
665 /// Reload all currently assigned virtual registers.
666 void RegAllocFastImpl::reloadAtBegin(MachineBasicBlock
&MBB
) {
667 if (LiveVirtRegs
.empty())
670 for (MachineBasicBlock::RegisterMaskPair P
: MBB
.liveins()) {
671 MCPhysReg Reg
= P
.PhysReg
;
672 // Set state to live-in. This possibly overrides mappings to virtual
673 // registers but we don't care anymore at this point.
674 setPhysRegState(Reg
, regLiveIn
);
677 SmallSet
<Register
, 2> PrologLiveIns
;
679 // The LiveRegMap is keyed by an unsigned (the virtreg number), so the order
680 // of spilling here is deterministic, if arbitrary.
681 MachineBasicBlock::iterator InsertBefore
=
682 getMBBBeginInsertionPoint(MBB
, PrologLiveIns
);
683 for (const LiveReg
&LR
: LiveVirtRegs
) {
684 MCPhysReg PhysReg
= LR
.PhysReg
;
688 MCRegister FirstUnit
= *TRI
->regunits(PhysReg
).begin();
689 if (RegUnitStates
[FirstUnit
] == regLiveIn
)
692 assert((&MBB
!= &MBB
.getParent()->front() || IgnoreMissingDefs
) &&
693 "no reload in start block. Missing vreg def?");
695 if (PrologLiveIns
.count(PhysReg
)) {
696 // FIXME: Theoretically this should use an insert point skipping labels
697 // but I'm not sure how labels should interact with prolog instruction
698 // that need reloads.
699 reload(MBB
.begin(), LR
.VirtReg
, PhysReg
);
701 reload(InsertBefore
, LR
.VirtReg
, PhysReg
);
703 LiveVirtRegs
.clear();
706 /// Handle the direct use of a physical register. Check that the register is
707 /// not used by a virtreg. Kill the physreg, marking it free. This may add
708 /// implicit kills to MO->getParent() and invalidate MO.
709 bool RegAllocFastImpl::usePhysReg(MachineInstr
&MI
, MCPhysReg Reg
) {
710 assert(Register::isPhysicalRegister(Reg
) && "expected physreg");
711 bool displacedAny
= displacePhysReg(MI
, Reg
);
712 setPhysRegState(Reg
, regPreAssigned
);
713 markRegUsedInInstr(Reg
);
717 bool RegAllocFastImpl::definePhysReg(MachineInstr
&MI
, MCPhysReg Reg
) {
718 bool displacedAny
= displacePhysReg(MI
, Reg
);
719 setPhysRegState(Reg
, regPreAssigned
);
723 /// Mark PhysReg as reserved or free after spilling any virtregs. This is very
724 /// similar to defineVirtReg except the physreg is reserved instead of
726 bool RegAllocFastImpl::displacePhysReg(MachineInstr
&MI
, MCPhysReg PhysReg
) {
727 bool displacedAny
= false;
729 for (MCRegUnit Unit
: TRI
->regunits(PhysReg
)) {
730 switch (unsigned VirtReg
= RegUnitStates
[Unit
]) {
732 LiveRegMap::iterator LRI
= findLiveVirtReg(VirtReg
);
733 assert(LRI
!= LiveVirtRegs
.end() && "datastructures in sync");
734 MachineBasicBlock::iterator ReloadBefore
=
735 std::next((MachineBasicBlock::iterator
)MI
.getIterator());
736 reload(ReloadBefore
, VirtReg
, LRI
->PhysReg
);
738 setPhysRegState(LRI
->PhysReg
, regFree
);
740 LRI
->Reloaded
= true;
745 RegUnitStates
[Unit
] = regFree
;
755 void RegAllocFastImpl::freePhysReg(MCPhysReg PhysReg
) {
756 LLVM_DEBUG(dbgs() << "Freeing " << printReg(PhysReg
, TRI
) << ':');
758 MCRegister FirstUnit
= *TRI
->regunits(PhysReg
).begin();
759 switch (unsigned VirtReg
= RegUnitStates
[FirstUnit
]) {
761 LLVM_DEBUG(dbgs() << '\n');
764 LLVM_DEBUG(dbgs() << '\n');
765 setPhysRegState(PhysReg
, regFree
);
768 LiveRegMap::iterator LRI
= findLiveVirtReg(VirtReg
);
769 assert(LRI
!= LiveVirtRegs
.end());
770 LLVM_DEBUG(dbgs() << ' ' << printReg(LRI
->VirtReg
, TRI
) << '\n');
771 setPhysRegState(LRI
->PhysReg
, regFree
);
778 /// Return the cost of spilling clearing out PhysReg and aliases so it is free
779 /// for allocation. Returns 0 when PhysReg is free or disabled with all aliases
780 /// disabled - it can be allocated directly.
781 /// \returns spillImpossible when PhysReg or an alias can't be spilled.
782 unsigned RegAllocFastImpl::calcSpillCost(MCPhysReg PhysReg
) const {
783 for (MCRegUnit Unit
: TRI
->regunits(PhysReg
)) {
784 switch (unsigned VirtReg
= RegUnitStates
[Unit
]) {
788 LLVM_DEBUG(dbgs() << "Cannot spill pre-assigned "
789 << printReg(PhysReg
, TRI
) << '\n');
790 return spillImpossible
;
792 bool SureSpill
= StackSlotForVirtReg
[VirtReg
] != -1 ||
793 findLiveVirtReg(VirtReg
)->LiveOut
;
794 return SureSpill
? spillClean
: spillDirty
;
801 void RegAllocFastImpl::assignDanglingDebugValues(MachineInstr
&Definition
,
804 auto UDBGValIter
= DanglingDbgValues
.find(VirtReg
);
805 if (UDBGValIter
== DanglingDbgValues
.end())
808 SmallVectorImpl
<MachineInstr
*> &Dangling
= UDBGValIter
->second
;
809 for (MachineInstr
*DbgValue
: Dangling
) {
810 assert(DbgValue
->isDebugValue());
811 if (!DbgValue
->hasDebugOperandForReg(VirtReg
))
814 // Test whether the physreg survives from the definition to the DBG_VALUE.
815 MCPhysReg SetToReg
= Reg
;
817 for (MachineBasicBlock::iterator I
= std::next(Definition
.getIterator()),
818 E
= DbgValue
->getIterator();
820 if (I
->modifiesRegister(Reg
, TRI
) || --Limit
== 0) {
821 LLVM_DEBUG(dbgs() << "Register did not survive for " << *DbgValue
827 for (MachineOperand
&MO
: DbgValue
->getDebugOperandsForReg(VirtReg
)) {
836 /// This method updates local state so that we know that PhysReg is the
837 /// proper container for VirtReg now. The physical register must not be used
838 /// for anything else when this is called.
839 void RegAllocFastImpl::assignVirtToPhysReg(MachineInstr
&AtMI
, LiveReg
&LR
,
841 Register VirtReg
= LR
.VirtReg
;
842 LLVM_DEBUG(dbgs() << "Assigning " << printReg(VirtReg
, TRI
) << " to "
843 << printReg(PhysReg
, TRI
) << '\n');
844 assert(LR
.PhysReg
== 0 && "Already assigned a physreg");
845 assert(PhysReg
!= 0 && "Trying to assign no register");
846 LR
.PhysReg
= PhysReg
;
847 setPhysRegState(PhysReg
, VirtReg
);
849 assignDanglingDebugValues(AtMI
, VirtReg
, PhysReg
);
852 static bool isCoalescable(const MachineInstr
&MI
) { return MI
.isFullCopy(); }
854 Register
RegAllocFastImpl::traceCopyChain(Register Reg
) const {
855 static const unsigned ChainLengthLimit
= 3;
858 if (Reg
.isPhysical())
860 assert(Reg
.isVirtual());
862 MachineInstr
*VRegDef
= MRI
->getUniqueVRegDef(Reg
);
863 if (!VRegDef
|| !isCoalescable(*VRegDef
))
865 Reg
= VRegDef
->getOperand(1).getReg();
866 } while (++C
<= ChainLengthLimit
);
870 /// Check if any of \p VirtReg's definitions is a copy. If it is follow the
871 /// chain of copies to check whether we reach a physical register we can
873 Register
RegAllocFastImpl::traceCopies(Register VirtReg
) const {
874 static const unsigned DefLimit
= 3;
876 for (const MachineInstr
&MI
: MRI
->def_instructions(VirtReg
)) {
877 if (isCoalescable(MI
)) {
878 Register Reg
= MI
.getOperand(1).getReg();
879 Reg
= traceCopyChain(Reg
);
890 /// Allocates a physical register for VirtReg.
891 void RegAllocFastImpl::allocVirtReg(MachineInstr
&MI
, LiveReg
&LR
,
892 Register Hint0
, bool LookAtPhysRegUses
) {
893 const Register VirtReg
= LR
.VirtReg
;
894 assert(LR
.PhysReg
== 0);
896 const TargetRegisterClass
&RC
= *MRI
->getRegClass(VirtReg
);
897 LLVM_DEBUG(dbgs() << "Search register for " << printReg(VirtReg
)
898 << " in class " << TRI
->getRegClassName(&RC
)
899 << " with hint " << printReg(Hint0
, TRI
) << '\n');
901 // Take hint when possible.
902 if (Hint0
.isPhysical() && MRI
->isAllocatable(Hint0
) && RC
.contains(Hint0
) &&
903 !isRegUsedInInstr(Hint0
, LookAtPhysRegUses
)) {
904 // Take hint if the register is currently free.
905 if (isPhysRegFree(Hint0
)) {
906 LLVM_DEBUG(dbgs() << "\tPreferred Register 1: " << printReg(Hint0
, TRI
)
908 assignVirtToPhysReg(MI
, LR
, Hint0
);
911 LLVM_DEBUG(dbgs() << "\tPreferred Register 0: " << printReg(Hint0
, TRI
)
919 Register Hint1
= traceCopies(VirtReg
);
920 if (Hint1
.isPhysical() && MRI
->isAllocatable(Hint1
) && RC
.contains(Hint1
) &&
921 !isRegUsedInInstr(Hint1
, LookAtPhysRegUses
)) {
922 // Take hint if the register is currently free.
923 if (isPhysRegFree(Hint1
)) {
924 LLVM_DEBUG(dbgs() << "\tPreferred Register 0: " << printReg(Hint1
, TRI
)
926 assignVirtToPhysReg(MI
, LR
, Hint1
);
929 LLVM_DEBUG(dbgs() << "\tPreferred Register 1: " << printReg(Hint1
, TRI
)
936 MCPhysReg BestReg
= 0;
937 unsigned BestCost
= spillImpossible
;
938 ArrayRef
<MCPhysReg
> AllocationOrder
= RegClassInfo
.getOrder(&RC
);
939 for (MCPhysReg PhysReg
: AllocationOrder
) {
940 LLVM_DEBUG(dbgs() << "\tRegister: " << printReg(PhysReg
, TRI
) << ' ');
941 if (isRegUsedInInstr(PhysReg
, LookAtPhysRegUses
)) {
942 LLVM_DEBUG(dbgs() << "already used in instr.\n");
946 unsigned Cost
= calcSpillCost(PhysReg
);
947 LLVM_DEBUG(dbgs() << "Cost: " << Cost
<< " BestCost: " << BestCost
<< '\n');
948 // Immediate take a register with cost 0.
950 assignVirtToPhysReg(MI
, LR
, PhysReg
);
954 if (PhysReg
== Hint0
|| PhysReg
== Hint1
)
955 Cost
-= spillPrefBonus
;
957 if (Cost
< BestCost
) {
964 // Nothing we can do: Report an error and keep going with an invalid
966 if (MI
.isInlineAsm())
967 MI
.emitError("inline assembly requires more registers than available");
969 MI
.emitError("ran out of registers during register allocation");
976 displacePhysReg(MI
, BestReg
);
977 assignVirtToPhysReg(MI
, LR
, BestReg
);
980 void RegAllocFastImpl::allocVirtRegUndef(MachineOperand
&MO
) {
981 assert(MO
.isUndef() && "expected undef use");
982 Register VirtReg
= MO
.getReg();
983 assert(VirtReg
.isVirtual() && "Expected virtreg");
984 if (!shouldAllocateRegister(VirtReg
))
987 LiveRegMap::const_iterator LRI
= findLiveVirtReg(VirtReg
);
989 if (LRI
!= LiveVirtRegs
.end() && LRI
->PhysReg
) {
990 PhysReg
= LRI
->PhysReg
;
992 const TargetRegisterClass
&RC
= *MRI
->getRegClass(VirtReg
);
993 ArrayRef
<MCPhysReg
> AllocationOrder
= RegClassInfo
.getOrder(&RC
);
994 assert(!AllocationOrder
.empty() && "Allocation order must not be empty");
995 PhysReg
= AllocationOrder
[0];
998 unsigned SubRegIdx
= MO
.getSubReg();
999 if (SubRegIdx
!= 0) {
1000 PhysReg
= TRI
->getSubReg(PhysReg
, SubRegIdx
);
1004 MO
.setIsRenamable(true);
1007 /// Variation of defineVirtReg() with special handling for livethrough regs
1008 /// (tied or earlyclobber) that may interfere with preassigned uses.
1009 /// \return true if MI's MachineOperands were re-arranged/invalidated.
1010 bool RegAllocFastImpl::defineLiveThroughVirtReg(MachineInstr
&MI
,
1013 if (!shouldAllocateRegister(VirtReg
))
1015 LiveRegMap::iterator LRI
= findLiveVirtReg(VirtReg
);
1016 if (LRI
!= LiveVirtRegs
.end()) {
1017 MCPhysReg PrevReg
= LRI
->PhysReg
;
1018 if (PrevReg
!= 0 && isRegUsedInInstr(PrevReg
, true)) {
1019 LLVM_DEBUG(dbgs() << "Need new assignment for " << printReg(PrevReg
, TRI
)
1020 << " (tied/earlyclobber resolution)\n");
1021 freePhysReg(PrevReg
);
1023 allocVirtReg(MI
, *LRI
, 0, true);
1024 MachineBasicBlock::iterator InsertBefore
=
1025 std::next((MachineBasicBlock::iterator
)MI
.getIterator());
1026 LLVM_DEBUG(dbgs() << "Copy " << printReg(LRI
->PhysReg
, TRI
) << " to "
1027 << printReg(PrevReg
, TRI
) << '\n');
1028 BuildMI(*MBB
, InsertBefore
, MI
.getDebugLoc(),
1029 TII
->get(TargetOpcode::COPY
), PrevReg
)
1030 .addReg(LRI
->PhysReg
, llvm::RegState::Kill
);
1032 MachineOperand
&MO
= MI
.getOperand(OpNum
);
1033 if (MO
.getSubReg() && !MO
.isUndef()) {
1037 return defineVirtReg(MI
, OpNum
, VirtReg
, true);
1040 /// Allocates a register for VirtReg definition. Typically the register is
1041 /// already assigned from a use of the virtreg, however we still need to
1042 /// perform an allocation if:
1043 /// - It is a dead definition without any uses.
1044 /// - The value is live out and all uses are in different basic blocks.
1046 /// \return true if MI's MachineOperands were re-arranged/invalidated.
1047 bool RegAllocFastImpl::defineVirtReg(MachineInstr
&MI
, unsigned OpNum
,
1048 Register VirtReg
, bool LookAtPhysRegUses
) {
1049 assert(VirtReg
.isVirtual() && "Not a virtual register");
1050 if (!shouldAllocateRegister(VirtReg
))
1052 MachineOperand
&MO
= MI
.getOperand(OpNum
);
1053 LiveRegMap::iterator LRI
;
1055 std::tie(LRI
, New
) = LiveVirtRegs
.insert(LiveReg(VirtReg
));
1058 if (mayLiveOut(VirtReg
)) {
1059 LRI
->LiveOut
= true;
1061 // It is a dead def without the dead flag; add the flag now.
1066 if (LRI
->PhysReg
== 0) {
1067 allocVirtReg(MI
, *LRI
, 0, LookAtPhysRegUses
);
1068 // If no physical register is available for LRI, we assign one at random
1069 // and bail out of this function immediately.
1071 const TargetRegisterClass
&RC
= *MRI
->getRegClass(VirtReg
);
1072 ArrayRef
<MCPhysReg
> AllocationOrder
= RegClassInfo
.getOrder(&RC
);
1073 if (AllocationOrder
.empty())
1074 return setPhysReg(MI
, MO
, MCRegister::NoRegister
);
1075 return setPhysReg(MI
, MO
, *AllocationOrder
.begin());
1078 assert(!isRegUsedInInstr(LRI
->PhysReg
, LookAtPhysRegUses
) &&
1079 "TODO: preassign mismatch");
1080 LLVM_DEBUG(dbgs() << "In def of " << printReg(VirtReg
, TRI
)
1081 << " use existing assignment to "
1082 << printReg(LRI
->PhysReg
, TRI
) << '\n');
1085 MCPhysReg PhysReg
= LRI
->PhysReg
;
1086 if (LRI
->Reloaded
|| LRI
->LiveOut
) {
1087 if (!MI
.isImplicitDef()) {
1088 MachineBasicBlock::iterator SpillBefore
=
1089 std::next((MachineBasicBlock::iterator
)MI
.getIterator());
1090 LLVM_DEBUG(dbgs() << "Spill Reason: LO: " << LRI
->LiveOut
1091 << " RL: " << LRI
->Reloaded
<< '\n');
1092 bool Kill
= LRI
->LastUse
== nullptr;
1093 spill(SpillBefore
, VirtReg
, PhysReg
, Kill
, LRI
->LiveOut
);
1095 // We need to place additional spills for each indirect destination of an
1097 if (MI
.getOpcode() == TargetOpcode::INLINEASM_BR
) {
1098 int FI
= StackSlotForVirtReg
[VirtReg
];
1099 const TargetRegisterClass
&RC
= *MRI
->getRegClass(VirtReg
);
1100 for (MachineOperand
&MO
: MI
.operands()) {
1102 MachineBasicBlock
*Succ
= MO
.getMBB();
1103 TII
->storeRegToStackSlot(*Succ
, Succ
->begin(), PhysReg
, Kill
, FI
,
1106 Succ
->addLiveIn(PhysReg
);
1111 LRI
->LastUse
= nullptr;
1113 LRI
->LiveOut
= false;
1114 LRI
->Reloaded
= false;
1116 if (MI
.getOpcode() == TargetOpcode::BUNDLE
) {
1117 BundleVirtRegsMap
[VirtReg
] = PhysReg
;
1119 markRegUsedInInstr(PhysReg
);
1120 return setPhysReg(MI
, MO
, PhysReg
);
1123 /// Allocates a register for a VirtReg use.
1124 /// \return true if MI's MachineOperands were re-arranged/invalidated.
1125 bool RegAllocFastImpl::useVirtReg(MachineInstr
&MI
, MachineOperand
&MO
,
1127 assert(VirtReg
.isVirtual() && "Not a virtual register");
1128 if (!shouldAllocateRegister(VirtReg
))
1130 LiveRegMap::iterator LRI
;
1132 std::tie(LRI
, New
) = LiveVirtRegs
.insert(LiveReg(VirtReg
));
1135 if (mayLiveOut(VirtReg
)) {
1136 LRI
->LiveOut
= true;
1138 // It is a last (killing) use without the kill flag; add the flag now.
1143 assert((!MO
.isKill() || LRI
->LastUse
== &MI
) && "Invalid kill flag");
1146 // If necessary allocate a register.
1147 if (LRI
->PhysReg
== 0) {
1148 assert(!MO
.isTied() && "tied op should be allocated");
1150 if (MI
.isCopy() && MI
.getOperand(1).getSubReg() == 0) {
1151 Hint
= MI
.getOperand(0).getReg();
1152 if (Hint
.isVirtual()) {
1153 assert(!shouldAllocateRegister(Hint
));
1156 assert(Hint
.isPhysical() &&
1157 "Copy destination should already be assigned");
1160 allocVirtReg(MI
, *LRI
, Hint
, false);
1162 const TargetRegisterClass
&RC
= *MRI
->getRegClass(VirtReg
);
1163 ArrayRef
<MCPhysReg
> AllocationOrder
= RegClassInfo
.getOrder(&RC
);
1164 if (AllocationOrder
.empty())
1165 return setPhysReg(MI
, MO
, MCRegister::NoRegister
);
1166 return setPhysReg(MI
, MO
, *AllocationOrder
.begin());
1172 if (MI
.getOpcode() == TargetOpcode::BUNDLE
) {
1173 BundleVirtRegsMap
[VirtReg
] = LRI
->PhysReg
;
1175 markRegUsedInInstr(LRI
->PhysReg
);
1176 return setPhysReg(MI
, MO
, LRI
->PhysReg
);
1179 /// Changes operand OpNum in MI the refer the PhysReg, considering subregs.
1180 /// \return true if MI's MachineOperands were re-arranged/invalidated.
1181 bool RegAllocFastImpl::setPhysReg(MachineInstr
&MI
, MachineOperand
&MO
,
1182 MCPhysReg PhysReg
) {
1183 if (!MO
.getSubReg()) {
1185 MO
.setIsRenamable(true);
1189 // Handle subregister index.
1190 MO
.setReg(PhysReg
? TRI
->getSubReg(PhysReg
, MO
.getSubReg()) : MCRegister());
1191 MO
.setIsRenamable(true);
1192 // Note: We leave the subreg number around a little longer in case of defs.
1193 // This is so that the register freeing logic in allocateInstruction can still
1194 // recognize this as subregister defs. The code there will clear the number.
1198 // A kill flag implies killing the full register. Add corresponding super
1201 MI
.addRegisterKilled(PhysReg
, TRI
, true);
1202 // Conservatively assume implicit MOs were re-arranged
1206 // A <def,read-undef> of a sub-register requires an implicit def of the full
1208 if (MO
.isDef() && MO
.isUndef()) {
1210 MI
.addRegisterDead(PhysReg
, TRI
, true);
1212 MI
.addRegisterDefined(PhysReg
, TRI
);
1213 // Conservatively assume implicit MOs were re-arranged
1221 void RegAllocFastImpl::dumpState() const {
1222 for (unsigned Unit
= 1, UnitE
= TRI
->getNumRegUnits(); Unit
!= UnitE
;
1224 switch (unsigned VirtReg
= RegUnitStates
[Unit
]) {
1227 case regPreAssigned
:
1228 dbgs() << " " << printRegUnit(Unit
, TRI
) << "[P]";
1231 llvm_unreachable("Should not have regLiveIn in map");
1233 dbgs() << ' ' << printRegUnit(Unit
, TRI
) << '=' << printReg(VirtReg
);
1234 LiveRegMap::const_iterator I
= findLiveVirtReg(VirtReg
);
1235 assert(I
!= LiveVirtRegs
.end() && "have LiveVirtRegs entry");
1236 if (I
->LiveOut
|| I
->Reloaded
) {
1244 assert(TRI
->hasRegUnit(I
->PhysReg
, Unit
) && "inverse mapping present");
1250 // Check that LiveVirtRegs is the inverse.
1251 for (const LiveReg
&LR
: LiveVirtRegs
) {
1252 Register VirtReg
= LR
.VirtReg
;
1253 assert(VirtReg
.isVirtual() && "Bad map key");
1254 MCPhysReg PhysReg
= LR
.PhysReg
;
1256 assert(Register::isPhysicalRegister(PhysReg
) && "mapped to physreg");
1257 for (MCRegUnit Unit
: TRI
->regunits(PhysReg
)) {
1258 assert(RegUnitStates
[Unit
] == VirtReg
&& "inverse map valid");
1265 /// Count number of defs consumed from each register class by \p Reg
1266 void RegAllocFastImpl::addRegClassDefCounts(
1267 MutableArrayRef
<unsigned> RegClassDefCounts
, Register Reg
) const {
1268 assert(RegClassDefCounts
.size() == TRI
->getNumRegClasses());
1270 if (Reg
.isVirtual()) {
1271 if (!shouldAllocateRegister(Reg
))
1273 const TargetRegisterClass
*OpRC
= MRI
->getRegClass(Reg
);
1274 for (unsigned RCIdx
= 0, RCIdxEnd
= TRI
->getNumRegClasses();
1275 RCIdx
!= RCIdxEnd
; ++RCIdx
) {
1276 const TargetRegisterClass
*IdxRC
= TRI
->getRegClass(RCIdx
);
1277 // FIXME: Consider aliasing sub/super registers.
1278 if (OpRC
->hasSubClassEq(IdxRC
))
1279 ++RegClassDefCounts
[RCIdx
];
1285 for (unsigned RCIdx
= 0, RCIdxEnd
= TRI
->getNumRegClasses();
1286 RCIdx
!= RCIdxEnd
; ++RCIdx
) {
1287 const TargetRegisterClass
*IdxRC
= TRI
->getRegClass(RCIdx
);
1288 for (MCRegAliasIterator
Alias(Reg
, TRI
, true); Alias
.isValid(); ++Alias
) {
1289 if (IdxRC
->contains(*Alias
)) {
1290 ++RegClassDefCounts
[RCIdx
];
1297 /// Compute \ref DefOperandIndexes so it contains the indices of "def" operands
1298 /// that are to be allocated. Those are ordered in a way that small classes,
1299 /// early clobbers and livethroughs are allocated first.
1300 void RegAllocFastImpl::findAndSortDefOperandIndexes(const MachineInstr
&MI
) {
1301 DefOperandIndexes
.clear();
1303 LLVM_DEBUG(dbgs() << "Need to assign livethroughs\n");
1304 for (unsigned I
= 0, E
= MI
.getNumOperands(); I
< E
; ++I
) {
1305 const MachineOperand
&MO
= MI
.getOperand(I
);
1308 Register Reg
= MO
.getReg();
1309 if (MO
.readsReg()) {
1310 if (Reg
.isPhysical()) {
1311 LLVM_DEBUG(dbgs() << "mark extra used: " << printReg(Reg
, TRI
) << '\n');
1312 markPhysRegUsedInInstr(Reg
);
1316 if (MO
.isDef() && Reg
.isVirtual() && shouldAllocateRegister(Reg
))
1317 DefOperandIndexes
.push_back(I
);
1320 // Most instructions only have one virtual def, so there's no point in
1321 // computing the possible number of defs for every register class.
1322 if (DefOperandIndexes
.size() <= 1)
1325 // Track number of defs which may consume a register from the class. This is
1326 // used to assign registers for possibly-too-small classes first. Example:
1327 // defs are eax, 3 * gr32_abcd, 2 * gr32 => we want to assign the gr32_abcd
1328 // registers first so that the gr32 don't use the gr32_abcd registers before
1330 SmallVector
<unsigned> RegClassDefCounts(TRI
->getNumRegClasses(), 0);
1332 for (const MachineOperand
&MO
: MI
.operands())
1333 if (MO
.isReg() && MO
.isDef())
1334 addRegClassDefCounts(RegClassDefCounts
, MO
.getReg());
1336 llvm::sort(DefOperandIndexes
, [&](unsigned I0
, unsigned I1
) {
1337 const MachineOperand
&MO0
= MI
.getOperand(I0
);
1338 const MachineOperand
&MO1
= MI
.getOperand(I1
);
1339 Register Reg0
= MO0
.getReg();
1340 Register Reg1
= MO1
.getReg();
1341 const TargetRegisterClass
&RC0
= *MRI
->getRegClass(Reg0
);
1342 const TargetRegisterClass
&RC1
= *MRI
->getRegClass(Reg1
);
1344 // Identify regclass that are easy to use up completely just in this
1346 unsigned ClassSize0
= RegClassInfo
.getOrder(&RC0
).size();
1347 unsigned ClassSize1
= RegClassInfo
.getOrder(&RC1
).size();
1349 bool SmallClass0
= ClassSize0
< RegClassDefCounts
[RC0
.getID()];
1350 bool SmallClass1
= ClassSize1
< RegClassDefCounts
[RC1
.getID()];
1351 if (SmallClass0
> SmallClass1
)
1353 if (SmallClass0
< SmallClass1
)
1356 // Allocate early clobbers and livethrough operands first.
1357 bool Livethrough0
= MO0
.isEarlyClobber() || MO0
.isTied() ||
1358 (MO0
.getSubReg() == 0 && !MO0
.isUndef());
1359 bool Livethrough1
= MO1
.isEarlyClobber() || MO1
.isTied() ||
1360 (MO1
.getSubReg() == 0 && !MO1
.isUndef());
1361 if (Livethrough0
> Livethrough1
)
1363 if (Livethrough0
< Livethrough1
)
1366 // Tie-break rule: operand index.
1371 // Returns true if MO is tied and the operand it's tied to is not Undef (not
1372 // Undef is not the same thing as Def).
1373 static bool isTiedToNotUndef(const MachineOperand
&MO
) {
1376 const MachineInstr
&MI
= *MO
.getParent();
1377 unsigned TiedIdx
= MI
.findTiedOperandIdx(MI
.getOperandNo(&MO
));
1378 const MachineOperand
&TiedMO
= MI
.getOperand(TiedIdx
);
1379 return !TiedMO
.isUndef();
1382 void RegAllocFastImpl::allocateInstruction(MachineInstr
&MI
) {
1383 // The basic algorithm here is:
1384 // 1. Mark registers of def operands as free
1385 // 2. Allocate registers to use operands and place reload instructions for
1386 // registers displaced by the allocation.
1388 // However we need to handle some corner cases:
1389 // - pre-assigned defs and uses need to be handled before the other def/use
1390 // operands are processed to avoid the allocation heuristics clashing with
1391 // the pre-assignment.
1392 // - The "free def operands" step has to come last instead of first for tied
1393 // operands and early-clobbers.
1396 // In the event we ever get more than 2**31 instructions...
1397 if (LLVM_UNLIKELY(InstrGen
== 0)) {
1398 UsedInInstr
.assign(UsedInInstr
.size(), 0);
1402 BundleVirtRegsMap
.clear();
1404 // Scan for special cases; Apply pre-assigned register defs to state.
1405 bool HasPhysRegUse
= false;
1406 bool HasRegMask
= false;
1407 bool HasVRegDef
= false;
1408 bool HasDef
= false;
1409 bool HasEarlyClobber
= false;
1410 bool NeedToAssignLiveThroughs
= false;
1411 for (MachineOperand
&MO
: MI
.operands()) {
1413 Register Reg
= MO
.getReg();
1414 if (Reg
.isVirtual()) {
1415 if (!shouldAllocateRegister(Reg
))
1420 if (MO
.isEarlyClobber()) {
1421 HasEarlyClobber
= true;
1422 NeedToAssignLiveThroughs
= true;
1424 if (isTiedToNotUndef(MO
) || (MO
.getSubReg() != 0 && !MO
.isUndef()))
1425 NeedToAssignLiveThroughs
= true;
1427 } else if (Reg
.isPhysical()) {
1428 if (!MRI
->isReserved(Reg
)) {
1431 bool displacedAny
= definePhysReg(MI
, Reg
);
1432 if (MO
.isEarlyClobber())
1433 HasEarlyClobber
= true;
1438 HasPhysRegUse
= true;
1441 } else if (MO
.isRegMask()) {
1443 RegMasks
.push_back(MO
.getRegMask());
1447 // Allocate virtreg defs.
1450 // Note that Implicit MOs can get re-arranged by defineVirtReg(), so loop
1451 // multiple times to ensure no operand is missed.
1452 bool ReArrangedImplicitOps
= true;
1454 // Special handling for early clobbers, tied operands or subregister defs:
1455 // Compared to "normal" defs these:
1456 // - Must not use a register that is pre-assigned for a use operand.
1457 // - In order to solve tricky inline assembly constraints we change the
1458 // heuristic to figure out a good operand order before doing
1460 if (NeedToAssignLiveThroughs
) {
1461 while (ReArrangedImplicitOps
) {
1462 ReArrangedImplicitOps
= false;
1463 findAndSortDefOperandIndexes(MI
);
1464 for (unsigned OpIdx
: DefOperandIndexes
) {
1465 MachineOperand
&MO
= MI
.getOperand(OpIdx
);
1466 LLVM_DEBUG(dbgs() << "Allocating " << MO
<< '\n');
1467 Register Reg
= MO
.getReg();
1468 if (MO
.isEarlyClobber() || isTiedToNotUndef(MO
) ||
1469 (MO
.getSubReg() && !MO
.isUndef())) {
1470 ReArrangedImplicitOps
= defineLiveThroughVirtReg(MI
, OpIdx
, Reg
);
1472 ReArrangedImplicitOps
= defineVirtReg(MI
, OpIdx
, Reg
);
1474 // Implicit operands of MI were re-arranged,
1475 // re-compute DefOperandIndexes.
1476 if (ReArrangedImplicitOps
)
1481 // Assign virtual register defs.
1482 while (ReArrangedImplicitOps
) {
1483 ReArrangedImplicitOps
= false;
1484 for (MachineOperand
&MO
: MI
.operands()) {
1485 if (!MO
.isReg() || !MO
.isDef())
1487 Register Reg
= MO
.getReg();
1488 if (Reg
.isVirtual()) {
1489 ReArrangedImplicitOps
=
1490 defineVirtReg(MI
, MI
.getOperandNo(&MO
), Reg
);
1491 if (ReArrangedImplicitOps
)
1499 // Free registers occupied by defs.
1500 // Iterate operands in reverse order, so we see the implicit super register
1501 // defs first (we added them earlier in case of <def,read-undef>).
1502 for (MachineOperand
&MO
: reverse(MI
.operands())) {
1503 if (!MO
.isReg() || !MO
.isDef())
1506 Register Reg
= MO
.getReg();
1508 // subreg defs don't free the full register. We left the subreg number
1509 // around as a marker in setPhysReg() to recognize this case here.
1510 if (Reg
.isPhysical() && MO
.getSubReg() != 0) {
1515 assert((!MO
.isTied() || !isClobberedByRegMasks(MO
.getReg())) &&
1516 "tied def assigned to clobbered register");
1518 // Do not free tied operands and early clobbers.
1519 if (isTiedToNotUndef(MO
) || MO
.isEarlyClobber())
1523 if (Reg
.isVirtual()) {
1524 assert(!shouldAllocateRegister(Reg
));
1527 assert(Reg
.isPhysical());
1528 if (MRI
->isReserved(Reg
))
1531 unmarkRegUsedInInstr(Reg
);
1535 // Displace clobbered registers.
1537 assert(!RegMasks
.empty() && "expected RegMask");
1539 for (const auto *RM
: RegMasks
)
1540 MRI
->addPhysRegsUsedFromRegMask(RM
);
1542 // Displace clobbered registers.
1543 for (const LiveReg
&LR
: LiveVirtRegs
) {
1544 MCPhysReg PhysReg
= LR
.PhysReg
;
1545 if (PhysReg
!= 0 && isClobberedByRegMasks(PhysReg
))
1546 displacePhysReg(MI
, PhysReg
);
1550 // Apply pre-assigned register uses to state.
1551 if (HasPhysRegUse
) {
1552 for (MachineOperand
&MO
: MI
.operands()) {
1553 if (!MO
.isReg() || !MO
.readsReg())
1555 Register Reg
= MO
.getReg();
1556 if (!Reg
.isPhysical())
1558 if (MRI
->isReserved(Reg
))
1560 if (!usePhysReg(MI
, Reg
))
1565 // Allocate virtreg uses and insert reloads as necessary.
1566 // Implicit MOs can get moved/removed by useVirtReg(), so loop multiple
1567 // times to ensure no operand is missed.
1568 bool HasUndefUse
= false;
1569 bool ReArrangedImplicitMOs
= true;
1570 while (ReArrangedImplicitMOs
) {
1571 ReArrangedImplicitMOs
= false;
1572 for (MachineOperand
&MO
: MI
.operands()) {
1573 if (!MO
.isReg() || !MO
.isUse())
1575 Register Reg
= MO
.getReg();
1576 if (!Reg
.isVirtual() || !shouldAllocateRegister(Reg
))
1584 // Populate MayLiveAcrossBlocks in case the use block is allocated before
1585 // the def block (removing the vreg uses).
1588 assert(!MO
.isInternalRead() && "Bundles not supported");
1589 assert(MO
.readsReg() && "reading use");
1590 ReArrangedImplicitMOs
= useVirtReg(MI
, MO
, Reg
);
1591 if (ReArrangedImplicitMOs
)
1596 // Allocate undef operands. This is a separate step because in a situation
1597 // like ` = OP undef %X, %X` both operands need the same register assign
1598 // so we should perform the normal assignment first.
1600 for (MachineOperand
&MO
: MI
.all_uses()) {
1601 Register Reg
= MO
.getReg();
1602 if (!Reg
.isVirtual() || !shouldAllocateRegister(Reg
))
1605 assert(MO
.isUndef() && "Should only have undef virtreg uses left");
1606 allocVirtRegUndef(MO
);
1610 // Free early clobbers.
1611 if (HasEarlyClobber
) {
1612 for (MachineOperand
&MO
: reverse(MI
.all_defs())) {
1613 if (!MO
.isEarlyClobber())
1615 assert(!MO
.getSubReg() && "should be already handled in def processing");
1617 Register Reg
= MO
.getReg();
1620 if (Reg
.isVirtual()) {
1621 assert(!shouldAllocateRegister(Reg
));
1624 assert(Reg
.isPhysical() && "should have register assigned");
1626 // We sometimes get odd situations like:
1627 // early-clobber %x0 = INSTRUCTION %x0
1628 // which is semantically questionable as the early-clobber should
1629 // apply before the use. But in practice we consider the use to
1630 // happen before the early clobber now. Don't free the early clobber
1631 // register in this case.
1632 if (MI
.readsRegister(Reg
, TRI
))
1639 LLVM_DEBUG(dbgs() << "<< " << MI
);
1640 if (MI
.isCopy() && MI
.getOperand(0).getReg() == MI
.getOperand(1).getReg() &&
1641 MI
.getNumOperands() == 2) {
1642 LLVM_DEBUG(dbgs() << "Mark identity copy for removal\n");
1643 Coalesced
.push_back(&MI
);
1647 void RegAllocFastImpl::handleDebugValue(MachineInstr
&MI
) {
1648 // Ignore DBG_VALUEs that aren't based on virtual registers. These are
1649 // mostly constants and frame indices.
1650 assert(MI
.isDebugValue() && "not a DBG_VALUE*");
1651 for (const auto &MO
: MI
.debug_operands()) {
1654 Register Reg
= MO
.getReg();
1655 if (!Reg
.isVirtual())
1657 if (!shouldAllocateRegister(Reg
))
1660 // Already spilled to a stackslot?
1661 int SS
= StackSlotForVirtReg
[Reg
];
1663 // Modify DBG_VALUE now that the value is in a spill slot.
1664 updateDbgValueForSpill(MI
, SS
, Reg
);
1665 LLVM_DEBUG(dbgs() << "Rewrite DBG_VALUE for spilled memory: " << MI
);
1669 // See if this virtual register has already been allocated to a physical
1670 // register or spilled to a stack slot.
1671 LiveRegMap::iterator LRI
= findLiveVirtReg(Reg
);
1672 SmallVector
<MachineOperand
*> DbgOps
;
1673 for (MachineOperand
&Op
: MI
.getDebugOperandsForReg(Reg
))
1674 DbgOps
.push_back(&Op
);
1676 if (LRI
!= LiveVirtRegs
.end() && LRI
->PhysReg
) {
1677 // Update every use of Reg within MI.
1678 for (auto &RegMO
: DbgOps
)
1679 setPhysReg(MI
, *RegMO
, LRI
->PhysReg
);
1681 DanglingDbgValues
[Reg
].push_back(&MI
);
1684 // If Reg hasn't been spilled, put this DBG_VALUE in LiveDbgValueMap so
1685 // that future spills of Reg will have DBG_VALUEs.
1686 LiveDbgValueMap
[Reg
].append(DbgOps
.begin(), DbgOps
.end());
1690 void RegAllocFastImpl::handleBundle(MachineInstr
&MI
) {
1691 MachineBasicBlock::instr_iterator BundledMI
= MI
.getIterator();
1693 while (BundledMI
->isBundledWithPred()) {
1694 for (MachineOperand
&MO
: BundledMI
->operands()) {
1698 Register Reg
= MO
.getReg();
1699 if (!Reg
.isVirtual() || !shouldAllocateRegister(Reg
))
1702 DenseMap
<Register
, MCPhysReg
>::iterator DI
;
1703 DI
= BundleVirtRegsMap
.find(Reg
);
1704 assert(DI
!= BundleVirtRegsMap
.end() && "Unassigned virtual register");
1706 setPhysReg(MI
, MO
, DI
->second
);
1713 void RegAllocFastImpl::allocateBasicBlock(MachineBasicBlock
&MBB
) {
1715 LLVM_DEBUG(dbgs() << "\nAllocating " << MBB
);
1717 PosIndexes
.unsetInitialized();
1718 RegUnitStates
.assign(TRI
->getNumRegUnits(), regFree
);
1719 assert(LiveVirtRegs
.empty() && "Mapping not cleared from last block?");
1721 for (const auto &LiveReg
: MBB
.liveouts())
1722 setPhysRegState(LiveReg
.PhysReg
, regPreAssigned
);
1726 // Traverse block in reverse order allocating instructions one by one.
1727 for (MachineInstr
&MI
: reverse(MBB
)) {
1728 LLVM_DEBUG(dbgs() << "\n>> " << MI
<< "Regs:"; dumpState());
1730 // Special handling for debug values. Note that they are not allowed to
1731 // affect codegen of the other instructions in any way.
1732 if (MI
.isDebugValue()) {
1733 handleDebugValue(MI
);
1737 allocateInstruction(MI
);
1739 // Once BUNDLE header is assigned registers, same assignments need to be
1740 // done for bundled MIs.
1741 if (MI
.getOpcode() == TargetOpcode::BUNDLE
) {
1746 LLVM_DEBUG(dbgs() << "Begin Regs:"; dumpState());
1748 // Spill all physical registers holding virtual registers now.
1749 LLVM_DEBUG(dbgs() << "Loading live registers at begin of block.\n");
1752 // Erase all the coalesced copies. We are delaying it until now because
1753 // LiveVirtRegs might refer to the instrs.
1754 for (MachineInstr
*MI
: Coalesced
)
1756 NumCoalesced
+= Coalesced
.size();
1758 for (auto &UDBGPair
: DanglingDbgValues
) {
1759 for (MachineInstr
*DbgValue
: UDBGPair
.second
) {
1760 assert(DbgValue
->isDebugValue() && "expected DBG_VALUE");
1761 // Nothing to do if the vreg was spilled in the meantime.
1762 if (!DbgValue
->hasDebugOperandForReg(UDBGPair
.first
))
1764 LLVM_DEBUG(dbgs() << "Register did not survive for " << *DbgValue
1766 DbgValue
->setDebugValueUndef();
1769 DanglingDbgValues
.clear();
1771 LLVM_DEBUG(MBB
.dump());
1774 bool RegAllocFastImpl::runOnMachineFunction(MachineFunction
&MF
) {
1775 LLVM_DEBUG(dbgs() << "********** FAST REGISTER ALLOCATION **********\n"
1776 << "********** Function: " << MF
.getName() << '\n');
1777 MRI
= &MF
.getRegInfo();
1778 const TargetSubtargetInfo
&STI
= MF
.getSubtarget();
1779 TRI
= STI
.getRegisterInfo();
1780 TII
= STI
.getInstrInfo();
1781 MFI
= &MF
.getFrameInfo();
1782 MRI
->freezeReservedRegs();
1783 RegClassInfo
.runOnMachineFunction(MF
);
1784 unsigned NumRegUnits
= TRI
->getNumRegUnits();
1786 UsedInInstr
.assign(NumRegUnits
, 0);
1788 // initialize the virtual->physical register map to have a 'null'
1789 // mapping for all virtual registers
1790 unsigned NumVirtRegs
= MRI
->getNumVirtRegs();
1791 StackSlotForVirtReg
.resize(NumVirtRegs
);
1792 LiveVirtRegs
.setUniverse(NumVirtRegs
);
1793 MayLiveAcrossBlocks
.clear();
1794 MayLiveAcrossBlocks
.resize(NumVirtRegs
);
1796 // Loop over all of the basic blocks, eliminating virtual register references
1797 for (MachineBasicBlock
&MBB
: MF
)
1798 allocateBasicBlock(MBB
);
1800 if (ClearVirtRegs
) {
1801 // All machine operands and other references to virtual registers have been
1802 // replaced. Remove the virtual registers.
1803 MRI
->clearVirtRegs();
1806 StackSlotForVirtReg
.clear();
1807 LiveDbgValueMap
.clear();
1811 PreservedAnalyses
RegAllocFastPass::run(MachineFunction
&MF
,
1812 MachineFunctionAnalysisManager
&) {
1813 MFPropsModifier
_(*this, MF
);
1814 RegAllocFastImpl
Impl(Opts
.Filter
, Opts
.ClearVRegs
);
1815 bool Changed
= Impl
.runOnMachineFunction(MF
);
1817 return PreservedAnalyses::all();
1818 auto PA
= getMachineFunctionPassPreservedAnalyses();
1819 PA
.preserveSet
<CFGAnalyses
>();
1823 void RegAllocFastPass::printPipeline(
1824 raw_ostream
&OS
, function_ref
<StringRef(StringRef
)> MapClassName2PassName
) {
1825 bool PrintFilterName
= Opts
.FilterName
!= "all";
1826 bool PrintNoClearVRegs
= !Opts
.ClearVRegs
;
1827 bool PrintSemicolon
= PrintFilterName
&& PrintNoClearVRegs
;
1829 OS
<< "regallocfast";
1830 if (PrintFilterName
|| PrintNoClearVRegs
) {
1832 if (PrintFilterName
)
1833 OS
<< "filter=" << Opts
.FilterName
;
1836 if (PrintNoClearVRegs
)
1837 OS
<< "no-clear-vregs";
1842 FunctionPass
*llvm::createFastRegisterAllocator() { return new RegAllocFast(); }
1844 FunctionPass
*llvm::createFastRegisterAllocator(RegAllocFilterFunc Ftor
,
1845 bool ClearVirtRegs
) {
1846 return new RegAllocFast(Ftor
, ClearVirtRegs
);