1 //===- InlineSpiller.cpp - Insert spills and restores inline --------------===//
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
7 //===----------------------------------------------------------------------===//
9 // The inline spiller modifies the machine function directly instead of
10 // inserting spills and restores in VirtRegMap.
12 //===----------------------------------------------------------------------===//
15 #include "llvm/ADT/ArrayRef.h"
16 #include "llvm/ADT/DenseMap.h"
17 #include "llvm/ADT/MapVector.h"
18 #include "llvm/ADT/STLExtras.h"
19 #include "llvm/ADT/SetVector.h"
20 #include "llvm/ADT/SmallPtrSet.h"
21 #include "llvm/ADT/SmallVector.h"
22 #include "llvm/ADT/Statistic.h"
23 #include "llvm/CodeGen/LiveInterval.h"
24 #include "llvm/CodeGen/LiveIntervals.h"
25 #include "llvm/CodeGen/LiveRangeEdit.h"
26 #include "llvm/CodeGen/LiveStacks.h"
27 #include "llvm/CodeGen/MachineBasicBlock.h"
28 #include "llvm/CodeGen/MachineBlockFrequencyInfo.h"
29 #include "llvm/CodeGen/MachineDominators.h"
30 #include "llvm/CodeGen/MachineFunction.h"
31 #include "llvm/CodeGen/MachineFunctionPass.h"
32 #include "llvm/CodeGen/MachineInstr.h"
33 #include "llvm/CodeGen/MachineInstrBuilder.h"
34 #include "llvm/CodeGen/MachineInstrBundle.h"
35 #include "llvm/CodeGen/MachineOperand.h"
36 #include "llvm/CodeGen/MachineRegisterInfo.h"
37 #include "llvm/CodeGen/SlotIndexes.h"
38 #include "llvm/CodeGen/Spiller.h"
39 #include "llvm/CodeGen/StackMaps.h"
40 #include "llvm/CodeGen/TargetInstrInfo.h"
41 #include "llvm/CodeGen/TargetOpcodes.h"
42 #include "llvm/CodeGen/TargetRegisterInfo.h"
43 #include "llvm/CodeGen/TargetSubtargetInfo.h"
44 #include "llvm/CodeGen/VirtRegMap.h"
45 #include "llvm/Config/llvm-config.h"
46 #include "llvm/Support/BlockFrequency.h"
47 #include "llvm/Support/BranchProbability.h"
48 #include "llvm/Support/CommandLine.h"
49 #include "llvm/Support/Compiler.h"
50 #include "llvm/Support/Debug.h"
51 #include "llvm/Support/ErrorHandling.h"
52 #include "llvm/Support/raw_ostream.h"
60 #define DEBUG_TYPE "regalloc"
62 STATISTIC(NumSpilledRanges
, "Number of spilled live ranges");
63 STATISTIC(NumSnippets
, "Number of spilled snippets");
64 STATISTIC(NumSpills
, "Number of spills inserted");
65 STATISTIC(NumSpillsRemoved
, "Number of spills removed");
66 STATISTIC(NumReloads
, "Number of reloads inserted");
67 STATISTIC(NumReloadsRemoved
, "Number of reloads removed");
68 STATISTIC(NumFolded
, "Number of folded stack accesses");
69 STATISTIC(NumFoldedLoads
, "Number of folded loads");
70 STATISTIC(NumRemats
, "Number of rematerialized defs for spilling");
73 RestrictStatepointRemat("restrict-statepoint-remat",
74 cl::init(false), cl::Hidden
,
75 cl::desc("Restrict remat for statepoint operands"));
79 class HoistSpillHelper
: private LiveRangeEdit::Delegate
{
83 MachineDominatorTree
&MDT
;
85 MachineRegisterInfo
&MRI
;
86 const TargetInstrInfo
&TII
;
87 const TargetRegisterInfo
&TRI
;
88 const MachineBlockFrequencyInfo
&MBFI
;
90 InsertPointAnalysis IPA
;
92 // Map from StackSlot to the LiveInterval of the original register.
93 // Note the LiveInterval of the original register may have been deleted
94 // after it is spilled. We keep a copy here to track the range where
95 // spills can be moved.
96 DenseMap
<int, std::unique_ptr
<LiveInterval
>> StackSlotToOrigLI
;
98 // Map from pair of (StackSlot and Original VNI) to a set of spills which
99 // have the same stackslot and have equal values defined by Original VNI.
100 // These spills are mergeable and are hoist candidates.
101 using MergeableSpillsMap
=
102 MapVector
<std::pair
<int, VNInfo
*>, SmallPtrSet
<MachineInstr
*, 16>>;
103 MergeableSpillsMap MergeableSpills
;
105 /// This is the map from original register to a set containing all its
106 /// siblings. To hoist a spill to another BB, we need to find out a live
107 /// sibling there and use it as the source of the new spill.
108 DenseMap
<Register
, SmallSetVector
<Register
, 16>> Virt2SiblingsMap
;
110 bool isSpillCandBB(LiveInterval
&OrigLI
, VNInfo
&OrigVNI
,
111 MachineBasicBlock
&BB
, Register
&LiveReg
);
113 void rmRedundantSpills(
114 SmallPtrSet
<MachineInstr
*, 16> &Spills
,
115 SmallVectorImpl
<MachineInstr
*> &SpillsToRm
,
116 DenseMap
<MachineDomTreeNode
*, MachineInstr
*> &SpillBBToSpill
);
119 MachineBasicBlock
*Root
, SmallPtrSet
<MachineInstr
*, 16> &Spills
,
120 SmallVectorImpl
<MachineDomTreeNode
*> &Orders
,
121 SmallVectorImpl
<MachineInstr
*> &SpillsToRm
,
122 DenseMap
<MachineDomTreeNode
*, unsigned> &SpillsToKeep
,
123 DenseMap
<MachineDomTreeNode
*, MachineInstr
*> &SpillBBToSpill
);
125 void runHoistSpills(LiveInterval
&OrigLI
, VNInfo
&OrigVNI
,
126 SmallPtrSet
<MachineInstr
*, 16> &Spills
,
127 SmallVectorImpl
<MachineInstr
*> &SpillsToRm
,
128 DenseMap
<MachineBasicBlock
*, unsigned> &SpillsToIns
);
131 HoistSpillHelper(MachineFunctionPass
&pass
, MachineFunction
&mf
,
133 : MF(mf
), LIS(pass
.getAnalysis
<LiveIntervalsWrapperPass
>().getLIS()),
134 LSS(pass
.getAnalysis
<LiveStacks
>()),
135 MDT(pass
.getAnalysis
<MachineDominatorTreeWrapperPass
>().getDomTree()),
136 VRM(vrm
), MRI(mf
.getRegInfo()), TII(*mf
.getSubtarget().getInstrInfo()),
137 TRI(*mf
.getSubtarget().getRegisterInfo()),
139 pass
.getAnalysis
<MachineBlockFrequencyInfoWrapperPass
>().getMBFI()),
140 IPA(LIS
, mf
.getNumBlockIDs()) {}
142 void addToMergeableSpills(MachineInstr
&Spill
, int StackSlot
,
144 bool rmFromMergeableSpills(MachineInstr
&Spill
, int StackSlot
);
145 void hoistAllSpills();
146 void LRE_DidCloneVirtReg(Register
, Register
) override
;
149 class InlineSpiller
: public Spiller
{
153 MachineDominatorTree
&MDT
;
155 MachineRegisterInfo
&MRI
;
156 const TargetInstrInfo
&TII
;
157 const TargetRegisterInfo
&TRI
;
158 const MachineBlockFrequencyInfo
&MBFI
;
160 // Variables that are valid during spill(), but used by multiple methods.
161 LiveRangeEdit
*Edit
= nullptr;
162 LiveInterval
*StackInt
= nullptr;
166 // All registers to spill to StackSlot, including the main register.
167 SmallVector
<Register
, 8> RegsToSpill
;
169 // All registers that were replaced by the spiller through some other method,
170 // e.g. rematerialization.
171 SmallVector
<Register
, 8> RegsReplaced
;
173 // All COPY instructions to/from snippets.
174 // They are ignored since both operands refer to the same stack slot.
175 // For bundled copies, this will only include the first header copy.
176 SmallPtrSet
<MachineInstr
*, 8> SnippetCopies
;
178 // Values that failed to remat at some point.
179 SmallPtrSet
<VNInfo
*, 8> UsedValues
;
181 // Dead defs generated during spilling.
182 SmallVector
<MachineInstr
*, 8> DeadDefs
;
184 // Object records spills information and does the hoisting.
185 HoistSpillHelper HSpiller
;
187 // Live range weight calculator.
188 VirtRegAuxInfo
&VRAI
;
190 ~InlineSpiller() override
= default;
193 InlineSpiller(MachineFunctionPass
&Pass
, MachineFunction
&MF
, VirtRegMap
&VRM
,
194 VirtRegAuxInfo
&VRAI
)
195 : MF(MF
), LIS(Pass
.getAnalysis
<LiveIntervalsWrapperPass
>().getLIS()),
196 LSS(Pass
.getAnalysis
<LiveStacks
>()),
197 MDT(Pass
.getAnalysis
<MachineDominatorTreeWrapperPass
>().getDomTree()),
198 VRM(VRM
), MRI(MF
.getRegInfo()), TII(*MF
.getSubtarget().getInstrInfo()),
199 TRI(*MF
.getSubtarget().getRegisterInfo()),
201 Pass
.getAnalysis
<MachineBlockFrequencyInfoWrapperPass
>().getMBFI()),
202 HSpiller(Pass
, MF
, VRM
), VRAI(VRAI
) {}
204 void spill(LiveRangeEdit
&) override
;
205 ArrayRef
<Register
> getSpilledRegs() override
{ return RegsToSpill
; }
206 ArrayRef
<Register
> getReplacedRegs() override
{ return RegsReplaced
; }
207 void postOptimization() override
;
210 bool isSnippet(const LiveInterval
&SnipLI
);
211 void collectRegsToSpill();
213 bool isRegToSpill(Register Reg
) { return is_contained(RegsToSpill
, Reg
); }
215 bool isSibling(Register Reg
);
216 bool hoistSpillInsideBB(LiveInterval
&SpillLI
, MachineInstr
&CopyMI
);
217 void eliminateRedundantSpills(LiveInterval
&LI
, VNInfo
*VNI
);
219 void markValueUsed(LiveInterval
*, VNInfo
*);
220 bool canGuaranteeAssignmentAfterRemat(Register VReg
, MachineInstr
&MI
);
221 bool reMaterializeFor(LiveInterval
&, MachineInstr
&MI
);
222 void reMaterializeAll();
224 bool coalesceStackAccess(MachineInstr
*MI
, Register Reg
);
225 bool foldMemoryOperand(ArrayRef
<std::pair
<MachineInstr
*, unsigned>>,
226 MachineInstr
*LoadMI
= nullptr);
227 void insertReload(Register VReg
, SlotIndex
, MachineBasicBlock::iterator MI
);
228 void insertSpill(Register VReg
, bool isKill
, MachineBasicBlock::iterator MI
);
230 void spillAroundUses(Register Reg
);
234 } // end anonymous namespace
236 Spiller::~Spiller() = default;
238 void Spiller::anchor() {}
240 Spiller
*llvm::createInlineSpiller(MachineFunctionPass
&Pass
,
241 MachineFunction
&MF
, VirtRegMap
&VRM
,
242 VirtRegAuxInfo
&VRAI
) {
243 return new InlineSpiller(Pass
, MF
, VRM
, VRAI
);
246 //===----------------------------------------------------------------------===//
248 //===----------------------------------------------------------------------===//
250 // When spilling a virtual register, we also spill any snippets it is connected
251 // to. The snippets are small live ranges that only have a single real use,
252 // leftovers from live range splitting. Spilling them enables memory operand
253 // folding or tightens the live range around the single use.
255 // This minimizes register pressure and maximizes the store-to-load distance for
256 // spill slots which can be important in tight loops.
258 /// isFullCopyOf - If MI is a COPY to or from Reg, return the other register,
259 /// otherwise return 0.
260 static Register
isCopyOf(const MachineInstr
&MI
, Register Reg
,
261 const TargetInstrInfo
&TII
) {
262 if (!TII
.isCopyInstr(MI
))
265 const MachineOperand
&DstOp
= MI
.getOperand(0);
266 const MachineOperand
&SrcOp
= MI
.getOperand(1);
268 // TODO: Probably only worth allowing subreg copies with undef dests.
269 if (DstOp
.getSubReg() != SrcOp
.getSubReg())
271 if (DstOp
.getReg() == Reg
)
272 return SrcOp
.getReg();
273 if (SrcOp
.getReg() == Reg
)
274 return DstOp
.getReg();
278 /// Check for a copy bundle as formed by SplitKit.
279 static Register
isCopyOfBundle(const MachineInstr
&FirstMI
, Register Reg
,
280 const TargetInstrInfo
&TII
) {
281 if (!FirstMI
.isBundled())
282 return isCopyOf(FirstMI
, Reg
, TII
);
284 assert(!FirstMI
.isBundledWithPred() && FirstMI
.isBundledWithSucc() &&
285 "expected to see first instruction in bundle");
288 MachineBasicBlock::const_instr_iterator I
= FirstMI
.getIterator();
289 while (I
->isBundledWithSucc()) {
290 const MachineInstr
&MI
= *I
;
291 auto CopyInst
= TII
.isCopyInstr(MI
);
295 const MachineOperand
&DstOp
= *CopyInst
->Destination
;
296 const MachineOperand
&SrcOp
= *CopyInst
->Source
;
297 if (DstOp
.getReg() == Reg
) {
299 SnipReg
= SrcOp
.getReg();
300 else if (SnipReg
!= SrcOp
.getReg())
302 } else if (SrcOp
.getReg() == Reg
) {
304 SnipReg
= DstOp
.getReg();
305 else if (SnipReg
!= DstOp
.getReg())
315 static void getVDefInterval(const MachineInstr
&MI
, LiveIntervals
&LIS
) {
316 for (const MachineOperand
&MO
: MI
.all_defs())
317 if (MO
.getReg().isVirtual())
318 LIS
.getInterval(MO
.getReg());
321 /// isSnippet - Identify if a live interval is a snippet that should be spilled.
322 /// It is assumed that SnipLI is a virtual register with the same original as
324 bool InlineSpiller::isSnippet(const LiveInterval
&SnipLI
) {
325 Register Reg
= Edit
->getReg();
327 // A snippet is a tiny live range with only a single instruction using it
328 // besides copies to/from Reg or spills/fills.
329 // Exception is done for statepoint instructions which will fold fills
330 // into their operands.
333 // %snip = COPY %Reg / FILL fi#
335 // %snip = STATEPOINT %snip in var arg area
336 // %Reg = COPY %snip / SPILL %snip, fi#
338 if (!LIS
.intervalIsInOneMBB(SnipLI
))
341 // Number of defs should not exceed 2 not accounting defs coming from
342 // statepoint instructions.
343 unsigned NumValNums
= SnipLI
.getNumValNums();
344 for (auto *VNI
: SnipLI
.vnis()) {
345 MachineInstr
*MI
= LIS
.getInstructionFromIndex(VNI
->def
);
346 if (MI
->getOpcode() == TargetOpcode::STATEPOINT
)
352 MachineInstr
*UseMI
= nullptr;
354 // Check that all uses satisfy our criteria.
355 for (MachineRegisterInfo::reg_bundle_nodbg_iterator
356 RI
= MRI
.reg_bundle_nodbg_begin(SnipLI
.reg()),
357 E
= MRI
.reg_bundle_nodbg_end();
359 MachineInstr
&MI
= *RI
++;
361 // Allow copies to/from Reg.
362 if (isCopyOfBundle(MI
, Reg
, TII
))
365 // Allow stack slot loads.
367 if (SnipLI
.reg() == TII
.isLoadFromStackSlot(MI
, FI
) && FI
== StackSlot
)
370 // Allow stack slot stores.
371 if (SnipLI
.reg() == TII
.isStoreToStackSlot(MI
, FI
) && FI
== StackSlot
)
374 if (StatepointOpers::isFoldableReg(&MI
, SnipLI
.reg()))
377 // Allow a single additional instruction.
378 if (UseMI
&& &MI
!= UseMI
)
385 /// collectRegsToSpill - Collect live range snippets that only have a single
387 void InlineSpiller::collectRegsToSpill() {
388 Register Reg
= Edit
->getReg();
390 // Main register always spills.
391 RegsToSpill
.assign(1, Reg
);
392 SnippetCopies
.clear();
393 RegsReplaced
.clear();
395 // Snippets all have the same original, so there can't be any for an original
400 for (MachineInstr
&MI
: llvm::make_early_inc_range(MRI
.reg_bundles(Reg
))) {
401 Register SnipReg
= isCopyOfBundle(MI
, Reg
, TII
);
402 if (!isSibling(SnipReg
))
404 LiveInterval
&SnipLI
= LIS
.getInterval(SnipReg
);
405 if (!isSnippet(SnipLI
))
407 SnippetCopies
.insert(&MI
);
408 if (isRegToSpill(SnipReg
))
410 RegsToSpill
.push_back(SnipReg
);
411 LLVM_DEBUG(dbgs() << "\talso spill snippet " << SnipLI
<< '\n');
416 bool InlineSpiller::isSibling(Register Reg
) {
417 return Reg
.isVirtual() && VRM
.getOriginal(Reg
) == Original
;
420 /// It is beneficial to spill to earlier place in the same BB in case
422 /// There is an alternative def earlier in the same MBB.
423 /// Hoist the spill as far as possible in SpillMBB. This can ease
424 /// register pressure:
430 /// Hoisting the spill of s to immediately after the def removes the
431 /// interference between x and y:
437 /// This hoist only helps when the copy kills its source.
439 bool InlineSpiller::hoistSpillInsideBB(LiveInterval
&SpillLI
,
440 MachineInstr
&CopyMI
) {
441 SlotIndex Idx
= LIS
.getInstructionIndex(CopyMI
);
443 VNInfo
*VNI
= SpillLI
.getVNInfoAt(Idx
.getRegSlot());
444 assert(VNI
&& VNI
->def
== Idx
.getRegSlot() && "Not defined by copy");
447 Register SrcReg
= CopyMI
.getOperand(1).getReg();
448 LiveInterval
&SrcLI
= LIS
.getInterval(SrcReg
);
449 VNInfo
*SrcVNI
= SrcLI
.getVNInfoAt(Idx
);
450 LiveQueryResult SrcQ
= SrcLI
.Query(Idx
);
451 MachineBasicBlock
*DefMBB
= LIS
.getMBBFromIndex(SrcVNI
->def
);
452 if (DefMBB
!= CopyMI
.getParent() || !SrcQ
.isKill())
455 // Conservatively extend the stack slot range to the range of the original
456 // value. We may be able to do better with stack slot coloring by being more
458 assert(StackInt
&& "No stack slot assigned yet.");
459 LiveInterval
&OrigLI
= LIS
.getInterval(Original
);
460 VNInfo
*OrigVNI
= OrigLI
.getVNInfoAt(Idx
);
461 StackInt
->MergeValueInAsValue(OrigLI
, OrigVNI
, StackInt
->getValNumInfo(0));
462 LLVM_DEBUG(dbgs() << "\tmerged orig valno " << OrigVNI
->id
<< ": "
463 << *StackInt
<< '\n');
465 // We are going to spill SrcVNI immediately after its def, so clear out
466 // any later spills of the same value.
467 eliminateRedundantSpills(SrcLI
, SrcVNI
);
469 MachineBasicBlock
*MBB
= LIS
.getMBBFromIndex(SrcVNI
->def
);
470 MachineBasicBlock::iterator MII
;
471 if (SrcVNI
->isPHIDef())
472 MII
= MBB
->SkipPHIsLabelsAndDebug(MBB
->begin(), SrcReg
);
474 MachineInstr
*DefMI
= LIS
.getInstructionFromIndex(SrcVNI
->def
);
475 assert(DefMI
&& "Defining instruction disappeared");
479 MachineInstrSpan
MIS(MII
, MBB
);
480 // Insert spill without kill flag immediately after def.
481 TII
.storeRegToStackSlot(*MBB
, MII
, SrcReg
, false, StackSlot
,
482 MRI
.getRegClass(SrcReg
), &TRI
, Register());
483 LIS
.InsertMachineInstrRangeInMaps(MIS
.begin(), MII
);
484 for (const MachineInstr
&MI
: make_range(MIS
.begin(), MII
))
485 getVDefInterval(MI
, LIS
);
486 --MII
; // Point to store instruction.
487 LLVM_DEBUG(dbgs() << "\thoisted: " << SrcVNI
->def
<< '\t' << *MII
);
489 // If there is only 1 store instruction is required for spill, add it
490 // to mergeable list. In X86 AMX, 2 intructions are required to store.
491 // We disable the merge for this case.
492 if (MIS
.begin() == MII
)
493 HSpiller
.addToMergeableSpills(*MII
, StackSlot
, Original
);
498 /// eliminateRedundantSpills - SLI:VNI is known to be on the stack. Remove any
499 /// redundant spills of this value in SLI.reg and sibling copies.
500 void InlineSpiller::eliminateRedundantSpills(LiveInterval
&SLI
, VNInfo
*VNI
) {
501 assert(VNI
&& "Missing value");
502 SmallVector
<std::pair
<LiveInterval
*, VNInfo
*>, 8> WorkList
;
503 WorkList
.push_back(std::make_pair(&SLI
, VNI
));
504 assert(StackInt
&& "No stack slot assigned yet.");
508 std::tie(LI
, VNI
) = WorkList
.pop_back_val();
509 Register Reg
= LI
->reg();
510 LLVM_DEBUG(dbgs() << "Checking redundant spills for " << VNI
->id
<< '@'
511 << VNI
->def
<< " in " << *LI
<< '\n');
513 // Regs to spill are taken care of.
514 if (isRegToSpill(Reg
))
517 // Add all of VNI's live range to StackInt.
518 StackInt
->MergeValueInAsValue(*LI
, VNI
, StackInt
->getValNumInfo(0));
519 LLVM_DEBUG(dbgs() << "Merged to stack int: " << *StackInt
<< '\n');
521 // Find all spills and copies of VNI.
522 for (MachineInstr
&MI
:
523 llvm::make_early_inc_range(MRI
.use_nodbg_bundles(Reg
))) {
524 if (!MI
.mayStore() && !TII
.isCopyInstr(MI
))
526 SlotIndex Idx
= LIS
.getInstructionIndex(MI
);
527 if (LI
->getVNInfoAt(Idx
) != VNI
)
530 // Follow sibling copies down the dominator tree.
531 if (Register DstReg
= isCopyOfBundle(MI
, Reg
, TII
)) {
532 if (isSibling(DstReg
)) {
533 LiveInterval
&DstLI
= LIS
.getInterval(DstReg
);
534 VNInfo
*DstVNI
= DstLI
.getVNInfoAt(Idx
.getRegSlot());
535 assert(DstVNI
&& "Missing defined value");
536 assert(DstVNI
->def
== Idx
.getRegSlot() && "Wrong copy def slot");
538 WorkList
.push_back(std::make_pair(&DstLI
, DstVNI
));
545 if (Reg
== TII
.isStoreToStackSlot(MI
, FI
) && FI
== StackSlot
) {
546 LLVM_DEBUG(dbgs() << "Redundant spill " << Idx
<< '\t' << MI
);
547 // eliminateDeadDefs won't normally remove stores, so switch opcode.
548 MI
.setDesc(TII
.get(TargetOpcode::KILL
));
549 DeadDefs
.push_back(&MI
);
551 if (HSpiller
.rmFromMergeableSpills(MI
, StackSlot
))
555 } while (!WorkList
.empty());
558 //===----------------------------------------------------------------------===//
560 //===----------------------------------------------------------------------===//
562 /// markValueUsed - Remember that VNI failed to rematerialize, so its defining
563 /// instruction cannot be eliminated. See through snippet copies
564 void InlineSpiller::markValueUsed(LiveInterval
*LI
, VNInfo
*VNI
) {
565 SmallVector
<std::pair
<LiveInterval
*, VNInfo
*>, 8> WorkList
;
566 WorkList
.push_back(std::make_pair(LI
, VNI
));
568 std::tie(LI
, VNI
) = WorkList
.pop_back_val();
569 if (!UsedValues
.insert(VNI
).second
)
572 if (VNI
->isPHIDef()) {
573 MachineBasicBlock
*MBB
= LIS
.getMBBFromIndex(VNI
->def
);
574 for (MachineBasicBlock
*P
: MBB
->predecessors()) {
575 VNInfo
*PVNI
= LI
->getVNInfoBefore(LIS
.getMBBEndIdx(P
));
577 WorkList
.push_back(std::make_pair(LI
, PVNI
));
582 // Follow snippet copies.
583 MachineInstr
*MI
= LIS
.getInstructionFromIndex(VNI
->def
);
584 if (!SnippetCopies
.count(MI
))
586 LiveInterval
&SnipLI
= LIS
.getInterval(MI
->getOperand(1).getReg());
587 assert(isRegToSpill(SnipLI
.reg()) && "Unexpected register in copy");
588 VNInfo
*SnipVNI
= SnipLI
.getVNInfoAt(VNI
->def
.getRegSlot(true));
589 assert(SnipVNI
&& "Snippet undefined before copy");
590 WorkList
.push_back(std::make_pair(&SnipLI
, SnipVNI
));
591 } while (!WorkList
.empty());
594 bool InlineSpiller::canGuaranteeAssignmentAfterRemat(Register VReg
,
596 if (!RestrictStatepointRemat
)
598 // Here's a quick explanation of the problem we're trying to handle here:
599 // * There are some pseudo instructions with more vreg uses than there are
600 // physical registers on the machine.
601 // * This is normally handled by spilling the vreg, and folding the reload
602 // into the user instruction. (Thus decreasing the number of used vregs
603 // until the remainder can be assigned to physregs.)
604 // * However, since we may try to spill vregs in any order, we can end up
605 // trying to spill each operand to the instruction, and then rematting it
606 // instead. When that happens, the new live intervals (for the remats) are
607 // expected to be trivially assignable (i.e. RS_Done). However, since we
608 // may have more remats than physregs, we're guaranteed to fail to assign
610 // At the moment, we only handle this for STATEPOINTs since they're the only
611 // pseudo op where we've seen this. If we start seeing other instructions
612 // with the same problem, we need to revisit this.
613 if (MI
.getOpcode() != TargetOpcode::STATEPOINT
)
615 // For STATEPOINTs we allow re-materialization for fixed arguments only hoping
616 // that number of physical registers is enough to cover all fixed arguments.
617 // If it is not true we need to revisit it.
618 for (unsigned Idx
= StatepointOpers(&MI
).getVarIdx(),
619 EndIdx
= MI
.getNumOperands();
620 Idx
< EndIdx
; ++Idx
) {
621 MachineOperand
&MO
= MI
.getOperand(Idx
);
622 if (MO
.isReg() && MO
.getReg() == VReg
)
628 /// reMaterializeFor - Attempt to rematerialize before MI instead of reloading.
629 bool InlineSpiller::reMaterializeFor(LiveInterval
&VirtReg
, MachineInstr
&MI
) {
630 // Analyze instruction
631 SmallVector
<std::pair
<MachineInstr
*, unsigned>, 8> Ops
;
632 VirtRegInfo RI
= AnalyzeVirtRegInBundle(MI
, VirtReg
.reg(), &Ops
);
637 SlotIndex UseIdx
= LIS
.getInstructionIndex(MI
).getRegSlot(true);
638 VNInfo
*ParentVNI
= VirtReg
.getVNInfoAt(UseIdx
.getBaseIndex());
641 LLVM_DEBUG(dbgs() << "\tadding <undef> flags: ");
642 for (MachineOperand
&MO
: MI
.all_uses())
643 if (MO
.getReg() == VirtReg
.reg())
645 LLVM_DEBUG(dbgs() << UseIdx
<< '\t' << MI
);
649 if (SnippetCopies
.count(&MI
))
652 LiveInterval
&OrigLI
= LIS
.getInterval(Original
);
653 VNInfo
*OrigVNI
= OrigLI
.getVNInfoAt(UseIdx
);
654 LiveRangeEdit::Remat
RM(ParentVNI
);
655 RM
.OrigMI
= LIS
.getInstructionFromIndex(OrigVNI
->def
);
657 if (!Edit
->canRematerializeAt(RM
, OrigVNI
, UseIdx
, false)) {
658 markValueUsed(&VirtReg
, ParentVNI
);
659 LLVM_DEBUG(dbgs() << "\tcannot remat for " << UseIdx
<< '\t' << MI
);
663 // If the instruction also writes VirtReg.reg, it had better not require the
664 // same register for uses and defs.
666 markValueUsed(&VirtReg
, ParentVNI
);
667 LLVM_DEBUG(dbgs() << "\tcannot remat tied reg: " << UseIdx
<< '\t' << MI
);
671 // Before rematerializing into a register for a single instruction, try to
672 // fold a load into the instruction. That avoids allocating a new register.
673 if (RM
.OrigMI
->canFoldAsLoad() &&
674 foldMemoryOperand(Ops
, RM
.OrigMI
)) {
675 Edit
->markRematerialized(RM
.ParentVNI
);
680 // If we can't guarantee that we'll be able to actually assign the new vreg,
682 if (!canGuaranteeAssignmentAfterRemat(VirtReg
.reg(), MI
)) {
683 markValueUsed(&VirtReg
, ParentVNI
);
684 LLVM_DEBUG(dbgs() << "\tcannot remat for " << UseIdx
<< '\t' << MI
);
688 // Allocate a new register for the remat.
689 Register NewVReg
= Edit
->createFrom(Original
);
691 // Finally we can rematerialize OrigMI before MI.
693 Edit
->rematerializeAt(*MI
.getParent(), MI
, NewVReg
, RM
, TRI
);
695 // We take the DebugLoc from MI, since OrigMI may be attributed to a
696 // different source location.
697 auto *NewMI
= LIS
.getInstructionFromIndex(DefIdx
);
698 NewMI
->setDebugLoc(MI
.getDebugLoc());
701 LLVM_DEBUG(dbgs() << "\tremat: " << DefIdx
<< '\t'
702 << *LIS
.getInstructionFromIndex(DefIdx
));
705 for (const auto &OpPair
: Ops
) {
706 MachineOperand
&MO
= OpPair
.first
->getOperand(OpPair
.second
);
707 if (MO
.isReg() && MO
.isUse() && MO
.getReg() == VirtReg
.reg()) {
712 LLVM_DEBUG(dbgs() << "\t " << UseIdx
<< '\t' << MI
<< '\n');
718 /// reMaterializeAll - Try to rematerialize as many uses as possible,
719 /// and trim the live ranges after.
720 void InlineSpiller::reMaterializeAll() {
721 if (!Edit
->anyRematerializable())
726 // Try to remat before all uses of snippets.
727 bool anyRemat
= false;
728 for (Register Reg
: RegsToSpill
) {
729 LiveInterval
&LI
= LIS
.getInterval(Reg
);
730 for (MachineInstr
&MI
: llvm::make_early_inc_range(MRI
.reg_bundles(Reg
))) {
731 // Debug values are not allowed to affect codegen.
732 if (MI
.isDebugValue())
735 assert(!MI
.isDebugInstr() && "Did not expect to find a use in debug "
736 "instruction that isn't a DBG_VALUE");
738 anyRemat
|= reMaterializeFor(LI
, MI
);
744 // Remove any values that were completely rematted.
745 for (Register Reg
: RegsToSpill
) {
746 LiveInterval
&LI
= LIS
.getInterval(Reg
);
747 for (VNInfo
*VNI
: LI
.vnis()) {
748 if (VNI
->isUnused() || VNI
->isPHIDef() || UsedValues
.count(VNI
))
750 MachineInstr
*MI
= LIS
.getInstructionFromIndex(VNI
->def
);
751 MI
->addRegisterDead(Reg
, &TRI
);
752 if (!MI
->allDefsAreDead())
754 LLVM_DEBUG(dbgs() << "All defs dead: " << *MI
);
755 DeadDefs
.push_back(MI
);
756 // If MI is a bundle header, also try removing copies inside the bundle,
757 // otherwise the verifier would complain "live range continues after dead
759 if (MI
->isBundledWithSucc() && !MI
->isBundledWithPred()) {
760 MachineBasicBlock::instr_iterator BeginIt
= MI
->getIterator(),
761 EndIt
= MI
->getParent()->instr_end();
762 ++BeginIt
; // Skip MI that was already handled.
764 bool OnlyDeadCopies
= true;
765 for (MachineBasicBlock::instr_iterator It
= BeginIt
;
766 It
!= EndIt
&& It
->isBundledWithPred(); ++It
) {
768 auto DestSrc
= TII
.isCopyInstr(*It
);
769 bool IsCopyToDeadReg
=
770 DestSrc
&& DestSrc
->Destination
->getReg() == Reg
;
771 if (!IsCopyToDeadReg
) {
772 OnlyDeadCopies
= false;
776 if (OnlyDeadCopies
) {
777 for (MachineBasicBlock::instr_iterator It
= BeginIt
;
778 It
!= EndIt
&& It
->isBundledWithPred(); ++It
) {
779 It
->addRegisterDead(Reg
, &TRI
);
780 LLVM_DEBUG(dbgs() << "All defs dead: " << *It
);
781 DeadDefs
.push_back(&*It
);
788 // Eliminate dead code after remat. Note that some snippet copies may be
790 if (DeadDefs
.empty())
792 LLVM_DEBUG(dbgs() << "Remat created " << DeadDefs
.size() << " dead defs.\n");
793 Edit
->eliminateDeadDefs(DeadDefs
, RegsToSpill
);
795 // LiveRangeEdit::eliminateDeadDef is used to remove dead define instructions
796 // after rematerialization. To remove a VNI for a vreg from its LiveInterval,
797 // LiveIntervals::removeVRegDefAt is used. However, after non-PHI VNIs are all
798 // removed, PHI VNI are still left in the LiveInterval.
799 // So to get rid of unused reg, we need to check whether it has non-dbg
800 // reference instead of whether it has non-empty interval.
801 unsigned ResultPos
= 0;
802 for (Register Reg
: RegsToSpill
) {
803 if (MRI
.reg_nodbg_empty(Reg
)) {
804 Edit
->eraseVirtReg(Reg
);
805 RegsReplaced
.push_back(Reg
);
809 assert(LIS
.hasInterval(Reg
) &&
810 (!LIS
.getInterval(Reg
).empty() || !MRI
.reg_nodbg_empty(Reg
)) &&
811 "Empty and not used live-range?!");
813 RegsToSpill
[ResultPos
++] = Reg
;
815 RegsToSpill
.erase(RegsToSpill
.begin() + ResultPos
, RegsToSpill
.end());
816 LLVM_DEBUG(dbgs() << RegsToSpill
.size()
817 << " registers to spill after remat.\n");
820 //===----------------------------------------------------------------------===//
822 //===----------------------------------------------------------------------===//
824 /// If MI is a load or store of StackSlot, it can be removed.
825 bool InlineSpiller::coalesceStackAccess(MachineInstr
*MI
, Register Reg
) {
827 Register InstrReg
= TII
.isLoadFromStackSlot(*MI
, FI
);
828 bool IsLoad
= InstrReg
;
830 InstrReg
= TII
.isStoreToStackSlot(*MI
, FI
);
832 // We have a stack access. Is it the right register and slot?
833 if (InstrReg
!= Reg
|| FI
!= StackSlot
)
837 HSpiller
.rmFromMergeableSpills(*MI
, StackSlot
);
839 LLVM_DEBUG(dbgs() << "Coalescing stack access: " << *MI
);
840 LIS
.RemoveMachineInstrFromMaps(*MI
);
841 MI
->eraseFromParent();
854 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
856 // Dump the range of instructions from B to E with their slot indexes.
857 static void dumpMachineInstrRangeWithSlotIndex(MachineBasicBlock::iterator B
,
858 MachineBasicBlock::iterator E
,
859 LiveIntervals
const &LIS
,
860 const char *const header
,
861 Register VReg
= Register()) {
862 char NextLine
= '\n';
863 char SlotIndent
= '\t';
865 if (std::next(B
) == E
) {
870 dbgs() << '\t' << header
<< ": " << NextLine
;
872 for (MachineBasicBlock::iterator I
= B
; I
!= E
; ++I
) {
873 SlotIndex Idx
= LIS
.getInstructionIndex(*I
).getRegSlot();
875 // If a register was passed in and this instruction has it as a
876 // destination that is marked as an early clobber, print the
877 // early-clobber slot index.
879 MachineOperand
*MO
= I
->findRegisterDefOperand(VReg
, /*TRI=*/nullptr);
880 if (MO
&& MO
->isEarlyClobber())
881 Idx
= Idx
.getRegSlot(true);
884 dbgs() << SlotIndent
<< Idx
<< '\t' << *I
;
889 /// foldMemoryOperand - Try folding stack slot references in Ops into their
892 /// @param Ops Operand indices from AnalyzeVirtRegInBundle().
893 /// @param LoadMI Load instruction to use instead of stack slot when non-null.
894 /// @return True on success.
896 foldMemoryOperand(ArrayRef
<std::pair
<MachineInstr
*, unsigned>> Ops
,
897 MachineInstr
*LoadMI
) {
900 // Don't attempt folding in bundles.
901 MachineInstr
*MI
= Ops
.front().first
;
902 if (Ops
.back().first
!= MI
|| MI
->isBundled())
905 bool WasCopy
= TII
.isCopyInstr(*MI
).has_value();
908 // TII::foldMemoryOperand will do what we need here for statepoint
909 // (fold load into use and remove corresponding def). We will replace
910 // uses of removed def with loads (spillAroundUses).
911 // For that to work we need to untie def and use to pass it through
912 // foldMemoryOperand and signal foldPatchpoint that it is allowed to
914 bool UntieRegs
= MI
->getOpcode() == TargetOpcode::STATEPOINT
;
916 // Spill subregs if the target allows it.
917 // We always want to spill subregs for stackmap/patchpoint pseudos.
918 bool SpillSubRegs
= TII
.isSubregFoldable() ||
919 MI
->getOpcode() == TargetOpcode::STATEPOINT
||
920 MI
->getOpcode() == TargetOpcode::PATCHPOINT
||
921 MI
->getOpcode() == TargetOpcode::STACKMAP
;
923 // TargetInstrInfo::foldMemoryOperand only expects explicit, non-tied
925 SmallVector
<unsigned, 8> FoldOps
;
926 for (const auto &OpPair
: Ops
) {
927 unsigned Idx
= OpPair
.second
;
928 assert(MI
== OpPair
.first
&& "Instruction conflict during operand folding");
929 MachineOperand
&MO
= MI
->getOperand(Idx
);
931 // No point restoring an undef read, and we'll produce an invalid live
933 // TODO: Is this really the correct way to handle undef tied uses?
934 if (MO
.isUse() && !MO
.readsReg() && !MO
.isTied())
937 if (MO
.isImplicit()) {
938 ImpReg
= MO
.getReg();
942 if (!SpillSubRegs
&& MO
.getSubReg())
944 // We cannot fold a load instruction into a def.
945 if (LoadMI
&& MO
.isDef())
947 // Tied use operands should not be passed to foldMemoryOperand.
948 if (UntieRegs
|| !MI
->isRegTiedToDefOperand(Idx
))
949 FoldOps
.push_back(Idx
);
952 // If we only have implicit uses, we won't be able to fold that.
953 // Moreover, TargetInstrInfo::foldMemoryOperand will assert if we try!
957 MachineInstrSpan
MIS(MI
, MI
->getParent());
959 SmallVector
<std::pair
<unsigned, unsigned> > TiedOps
;
961 for (unsigned Idx
: FoldOps
) {
962 MachineOperand
&MO
= MI
->getOperand(Idx
);
965 unsigned Tied
= MI
->findTiedOperandIdx(Idx
);
967 TiedOps
.emplace_back(Tied
, Idx
);
969 assert(MO
.isDef() && "Tied to not use and def?");
970 TiedOps
.emplace_back(Idx
, Tied
);
972 MI
->untieRegOperand(Idx
);
975 MachineInstr
*FoldMI
=
976 LoadMI
? TII
.foldMemoryOperand(*MI
, FoldOps
, *LoadMI
, &LIS
)
977 : TII
.foldMemoryOperand(*MI
, FoldOps
, StackSlot
, &LIS
, &VRM
);
980 for (auto Tied
: TiedOps
)
981 MI
->tieOperands(Tied
.first
, Tied
.second
);
985 // Remove LIS for any dead defs in the original MI not in FoldMI.
986 for (MIBundleOperands
MO(*MI
); MO
.isValid(); ++MO
) {
989 Register Reg
= MO
->getReg();
990 if (!Reg
|| Reg
.isVirtual() || MRI
.isReserved(Reg
)) {
993 // Skip non-Defs, including undef uses and internal reads.
996 PhysRegInfo RI
= AnalyzePhysRegInBundle(*FoldMI
, Reg
, &TRI
);
999 // FoldMI does not define this physreg. Remove the LI segment.
1000 assert(MO
->isDead() && "Cannot fold physreg def");
1001 SlotIndex Idx
= LIS
.getInstructionIndex(*MI
).getRegSlot();
1002 LIS
.removePhysRegDefAt(Reg
.asMCReg(), Idx
);
1006 if (TII
.isStoreToStackSlot(*MI
, FI
) &&
1007 HSpiller
.rmFromMergeableSpills(*MI
, FI
))
1009 LIS
.ReplaceMachineInstrInMaps(*MI
, *FoldMI
);
1010 // Update the call site info.
1011 if (MI
->isCandidateForCallSiteEntry())
1012 MI
->getMF()->moveCallSiteInfo(MI
, FoldMI
);
1014 // If we've folded a store into an instruction labelled with debug-info,
1015 // record a substitution from the old operand to the memory operand. Handle
1016 // the simple common case where operand 0 is the one being folded, plus when
1017 // the destination operand is also a tied def. More values could be
1018 // substituted / preserved with more analysis.
1019 if (MI
->peekDebugInstrNum() && Ops
[0].second
== 0) {
1021 auto MakeSubstitution
= [this,FoldMI
,MI
,&Ops
]() {
1022 // Substitute old operand zero to the new instructions memory operand.
1023 unsigned OldOperandNum
= Ops
[0].second
;
1024 unsigned NewNum
= FoldMI
->getDebugInstrNum();
1025 unsigned OldNum
= MI
->getDebugInstrNum();
1026 MF
.makeDebugValueSubstitution({OldNum
, OldOperandNum
},
1027 {NewNum
, MachineFunction::DebugOperandMemNumber
});
1030 const MachineOperand
&Op0
= MI
->getOperand(Ops
[0].second
);
1031 if (Ops
.size() == 1 && Op0
.isDef()) {
1033 } else if (Ops
.size() == 2 && Op0
.isDef() && MI
->getOperand(1).isTied() &&
1034 Op0
.getReg() == MI
->getOperand(1).getReg()) {
1037 } else if (MI
->peekDebugInstrNum()) {
1038 // This is a debug-labelled instruction, but the operand being folded isn't
1039 // at operand zero. Most likely this means it's a load being folded in.
1040 // Substitute any register defs from operand zero up to the one being
1041 // folded -- past that point, we don't know what the new operand indexes
1043 MF
.substituteDebugValuesForInst(*MI
, *FoldMI
, Ops
[0].second
);
1046 MI
->eraseFromParent();
1048 // Insert any new instructions other than FoldMI into the LIS maps.
1049 assert(!MIS
.empty() && "Unexpected empty span of instructions!");
1050 for (MachineInstr
&MI
: MIS
)
1052 LIS
.InsertMachineInstrInMaps(MI
);
1054 // TII.foldMemoryOperand may have left some implicit operands on the
1055 // instruction. Strip them.
1057 for (unsigned i
= FoldMI
->getNumOperands(); i
; --i
) {
1058 MachineOperand
&MO
= FoldMI
->getOperand(i
- 1);
1059 if (!MO
.isReg() || !MO
.isImplicit())
1061 if (MO
.getReg() == ImpReg
)
1062 FoldMI
->removeOperand(i
- 1);
1065 LLVM_DEBUG(dumpMachineInstrRangeWithSlotIndex(MIS
.begin(), MIS
.end(), LIS
,
1070 else if (Ops
.front().second
== 0) {
1072 // If there is only 1 store instruction is required for spill, add it
1073 // to mergeable list. In X86 AMX, 2 intructions are required to store.
1074 // We disable the merge for this case.
1075 if (std::distance(MIS
.begin(), MIS
.end()) <= 1)
1076 HSpiller
.addToMergeableSpills(*FoldMI
, StackSlot
, Original
);
1082 void InlineSpiller::insertReload(Register NewVReg
,
1084 MachineBasicBlock::iterator MI
) {
1085 MachineBasicBlock
&MBB
= *MI
->getParent();
1087 MachineInstrSpan
MIS(MI
, &MBB
);
1088 TII
.loadRegFromStackSlot(MBB
, MI
, NewVReg
, StackSlot
,
1089 MRI
.getRegClass(NewVReg
), &TRI
, Register());
1091 LIS
.InsertMachineInstrRangeInMaps(MIS
.begin(), MI
);
1093 LLVM_DEBUG(dumpMachineInstrRangeWithSlotIndex(MIS
.begin(), MI
, LIS
, "reload",
1098 /// Check if \p Def fully defines a VReg with an undefined value.
1099 /// If that's the case, that means the value of VReg is actually
1101 static bool isRealSpill(const MachineInstr
&Def
) {
1102 if (!Def
.isImplicitDef())
1105 // We can say that the VReg defined by Def is undef, only if it is
1106 // fully defined by Def. Otherwise, some of the lanes may not be
1107 // undef and the value of the VReg matters.
1108 return Def
.getOperand(0).getSubReg();
1111 /// insertSpill - Insert a spill of NewVReg after MI.
1112 void InlineSpiller::insertSpill(Register NewVReg
, bool isKill
,
1113 MachineBasicBlock::iterator MI
) {
1114 // Spill are not terminators, so inserting spills after terminators will
1115 // violate invariants in MachineVerifier.
1116 assert(!MI
->isTerminator() && "Inserting a spill after a terminator");
1117 MachineBasicBlock
&MBB
= *MI
->getParent();
1119 MachineInstrSpan
MIS(MI
, &MBB
);
1120 MachineBasicBlock::iterator SpillBefore
= std::next(MI
);
1121 bool IsRealSpill
= isRealSpill(*MI
);
1124 TII
.storeRegToStackSlot(MBB
, SpillBefore
, NewVReg
, isKill
, StackSlot
,
1125 MRI
.getRegClass(NewVReg
), &TRI
, Register());
1127 // Don't spill undef value.
1128 // Anything works for undef, in particular keeping the memory
1129 // uninitialized is a viable option and it saves code size and
1131 BuildMI(MBB
, SpillBefore
, MI
->getDebugLoc(), TII
.get(TargetOpcode::KILL
))
1132 .addReg(NewVReg
, getKillRegState(isKill
));
1134 MachineBasicBlock::iterator Spill
= std::next(MI
);
1135 LIS
.InsertMachineInstrRangeInMaps(Spill
, MIS
.end());
1136 for (const MachineInstr
&MI
: make_range(Spill
, MIS
.end()))
1137 getVDefInterval(MI
, LIS
);
1140 dumpMachineInstrRangeWithSlotIndex(Spill
, MIS
.end(), LIS
, "spill"));
1142 // If there is only 1 store instruction is required for spill, add it
1143 // to mergeable list. In X86 AMX, 2 intructions are required to store.
1144 // We disable the merge for this case.
1145 if (IsRealSpill
&& std::distance(Spill
, MIS
.end()) <= 1)
1146 HSpiller
.addToMergeableSpills(*Spill
, StackSlot
, Original
);
1149 /// spillAroundUses - insert spill code around each use of Reg.
1150 void InlineSpiller::spillAroundUses(Register Reg
) {
1151 LLVM_DEBUG(dbgs() << "spillAroundUses " << printReg(Reg
) << '\n');
1152 LiveInterval
&OldLI
= LIS
.getInterval(Reg
);
1154 // Iterate over instructions using Reg.
1155 for (MachineInstr
&MI
: llvm::make_early_inc_range(MRI
.reg_bundles(Reg
))) {
1156 // Debug values are not allowed to affect codegen.
1157 if (MI
.isDebugValue()) {
1158 // Modify DBG_VALUE now that the value is in a spill slot.
1159 MachineBasicBlock
*MBB
= MI
.getParent();
1160 LLVM_DEBUG(dbgs() << "Modifying debug info due to spill:\t" << MI
);
1161 buildDbgValueForSpill(*MBB
, &MI
, MI
, StackSlot
, Reg
);
1166 assert(!MI
.isDebugInstr() && "Did not expect to find a use in debug "
1167 "instruction that isn't a DBG_VALUE");
1169 // Ignore copies to/from snippets. We'll delete them.
1170 if (SnippetCopies
.count(&MI
))
1173 // Stack slot accesses may coalesce away.
1174 if (coalesceStackAccess(&MI
, Reg
))
1177 // Analyze instruction.
1178 SmallVector
<std::pair
<MachineInstr
*, unsigned>, 8> Ops
;
1179 VirtRegInfo RI
= AnalyzeVirtRegInBundle(MI
, Reg
, &Ops
);
1181 // Find the slot index where this instruction reads and writes OldLI.
1182 // This is usually the def slot, except for tied early clobbers.
1183 SlotIndex Idx
= LIS
.getInstructionIndex(MI
).getRegSlot();
1184 if (VNInfo
*VNI
= OldLI
.getVNInfoAt(Idx
.getRegSlot(true)))
1185 if (SlotIndex::isSameInstr(Idx
, VNI
->def
))
1188 // Check for a sibling copy.
1189 Register SibReg
= isCopyOfBundle(MI
, Reg
, TII
);
1190 if (SibReg
&& isSibling(SibReg
)) {
1191 // This may actually be a copy between snippets.
1192 if (isRegToSpill(SibReg
)) {
1193 LLVM_DEBUG(dbgs() << "Found new snippet copy: " << MI
);
1194 SnippetCopies
.insert(&MI
);
1198 if (hoistSpillInsideBB(OldLI
, MI
)) {
1199 // This COPY is now dead, the value is already in the stack slot.
1200 MI
.getOperand(0).setIsDead();
1201 DeadDefs
.push_back(&MI
);
1205 // This is a reload for a sib-reg copy. Drop spills downstream.
1206 LiveInterval
&SibLI
= LIS
.getInterval(SibReg
);
1207 eliminateRedundantSpills(SibLI
, SibLI
.getVNInfoAt(Idx
));
1208 // The COPY will fold to a reload below.
1212 // Attempt to fold memory ops.
1213 if (foldMemoryOperand(Ops
))
1216 // Create a new virtual register for spill/fill.
1217 // FIXME: Infer regclass from instruction alone.
1218 Register NewVReg
= Edit
->createFrom(Reg
);
1221 insertReload(NewVReg
, Idx
, &MI
);
1223 // Rewrite instruction operands.
1224 bool hasLiveDef
= false;
1225 for (const auto &OpPair
: Ops
) {
1226 MachineOperand
&MO
= OpPair
.first
->getOperand(OpPair
.second
);
1229 if (!OpPair
.first
->isRegTiedToDefOperand(OpPair
.second
))
1236 LLVM_DEBUG(dbgs() << "\trewrite: " << Idx
<< '\t' << MI
<< '\n');
1238 // FIXME: Use a second vreg if instruction has no tied ops.
1241 insertSpill(NewVReg
, true, &MI
);
1245 /// spillAll - Spill all registers remaining after rematerialization.
1246 void InlineSpiller::spillAll() {
1247 // Update LiveStacks now that we are committed to spilling.
1248 if (StackSlot
== VirtRegMap::NO_STACK_SLOT
) {
1249 StackSlot
= VRM
.assignVirt2StackSlot(Original
);
1250 StackInt
= &LSS
.getOrCreateInterval(StackSlot
, MRI
.getRegClass(Original
));
1251 StackInt
->getNextValue(SlotIndex(), LSS
.getVNInfoAllocator());
1253 StackInt
= &LSS
.getInterval(StackSlot
);
1255 if (Original
!= Edit
->getReg())
1256 VRM
.assignVirt2StackSlot(Edit
->getReg(), StackSlot
);
1258 assert(StackInt
->getNumValNums() == 1 && "Bad stack interval values");
1259 for (Register Reg
: RegsToSpill
)
1260 StackInt
->MergeSegmentsInAsValue(LIS
.getInterval(Reg
),
1261 StackInt
->getValNumInfo(0));
1262 LLVM_DEBUG(dbgs() << "Merged spilled regs: " << *StackInt
<< '\n');
1264 // Spill around uses of all RegsToSpill.
1265 for (Register Reg
: RegsToSpill
) {
1266 spillAroundUses(Reg
);
1267 // Assign all of the spilled registers to the slot so that
1268 // LiveDebugVariables knows about these locations later on.
1269 if (VRM
.getStackSlot(Reg
) == VirtRegMap::NO_STACK_SLOT
)
1270 VRM
.assignVirt2StackSlot(Reg
, StackSlot
);
1273 // Hoisted spills may cause dead code.
1274 if (!DeadDefs
.empty()) {
1275 LLVM_DEBUG(dbgs() << "Eliminating " << DeadDefs
.size() << " dead defs\n");
1276 Edit
->eliminateDeadDefs(DeadDefs
, RegsToSpill
);
1279 // Finally delete the SnippetCopies.
1280 for (Register Reg
: RegsToSpill
) {
1281 for (MachineInstr
&MI
:
1282 llvm::make_early_inc_range(MRI
.reg_instructions(Reg
))) {
1283 assert(SnippetCopies
.count(&MI
) && "Remaining use wasn't a snippet copy");
1284 // FIXME: Do this with a LiveRangeEdit callback.
1285 LIS
.getSlotIndexes()->removeSingleMachineInstrFromMaps(MI
);
1286 MI
.eraseFromBundle();
1290 // Delete all spilled registers.
1291 for (Register Reg
: RegsToSpill
)
1292 Edit
->eraseVirtReg(Reg
);
1295 void InlineSpiller::spill(LiveRangeEdit
&edit
) {
1298 assert(!Register::isStackSlot(edit
.getReg()) &&
1299 "Trying to spill a stack slot.");
1300 // Share a stack slot among all descendants of Original.
1301 Original
= VRM
.getOriginal(edit
.getReg());
1302 StackSlot
= VRM
.getStackSlot(Original
);
1305 LLVM_DEBUG(dbgs() << "Inline spilling "
1306 << TRI
.getRegClassName(MRI
.getRegClass(edit
.getReg()))
1307 << ':' << edit
.getParent() << "\nFrom original "
1308 << printReg(Original
) << '\n');
1309 assert(edit
.getParent().isSpillable() &&
1310 "Attempting to spill already spilled value.");
1311 assert(DeadDefs
.empty() && "Previous spill didn't remove dead defs");
1313 collectRegsToSpill();
1316 // Remat may handle everything.
1317 if (!RegsToSpill
.empty())
1320 Edit
->calculateRegClassAndHint(MF
, VRAI
);
1323 /// Optimizations after all the reg selections and spills are done.
1324 void InlineSpiller::postOptimization() { HSpiller
.hoistAllSpills(); }
1326 /// When a spill is inserted, add the spill to MergeableSpills map.
1327 void HoistSpillHelper::addToMergeableSpills(MachineInstr
&Spill
, int StackSlot
,
1328 unsigned Original
) {
1329 BumpPtrAllocator
&Allocator
= LIS
.getVNInfoAllocator();
1330 LiveInterval
&OrigLI
= LIS
.getInterval(Original
);
1331 // save a copy of LiveInterval in StackSlotToOrigLI because the original
1332 // LiveInterval may be cleared after all its references are spilled.
1333 if (!StackSlotToOrigLI
.contains(StackSlot
)) {
1334 auto LI
= std::make_unique
<LiveInterval
>(OrigLI
.reg(), OrigLI
.weight());
1335 LI
->assign(OrigLI
, Allocator
);
1336 StackSlotToOrigLI
[StackSlot
] = std::move(LI
);
1338 SlotIndex Idx
= LIS
.getInstructionIndex(Spill
);
1339 VNInfo
*OrigVNI
= StackSlotToOrigLI
[StackSlot
]->getVNInfoAt(Idx
.getRegSlot());
1340 std::pair
<int, VNInfo
*> MIdx
= std::make_pair(StackSlot
, OrigVNI
);
1341 MergeableSpills
[MIdx
].insert(&Spill
);
1344 /// When a spill is removed, remove the spill from MergeableSpills map.
1345 /// Return true if the spill is removed successfully.
1346 bool HoistSpillHelper::rmFromMergeableSpills(MachineInstr
&Spill
,
1348 auto It
= StackSlotToOrigLI
.find(StackSlot
);
1349 if (It
== StackSlotToOrigLI
.end())
1351 SlotIndex Idx
= LIS
.getInstructionIndex(Spill
);
1352 VNInfo
*OrigVNI
= It
->second
->getVNInfoAt(Idx
.getRegSlot());
1353 std::pair
<int, VNInfo
*> MIdx
= std::make_pair(StackSlot
, OrigVNI
);
1354 return MergeableSpills
[MIdx
].erase(&Spill
);
1357 /// Check BB to see if it is a possible target BB to place a hoisted spill,
1358 /// i.e., there should be a living sibling of OrigReg at the insert point.
1359 bool HoistSpillHelper::isSpillCandBB(LiveInterval
&OrigLI
, VNInfo
&OrigVNI
,
1360 MachineBasicBlock
&BB
, Register
&LiveReg
) {
1361 SlotIndex Idx
= IPA
.getLastInsertPoint(OrigLI
, BB
);
1362 // The original def could be after the last insert point in the root block,
1363 // we can't hoist to here.
1364 if (Idx
< OrigVNI
.def
) {
1365 // TODO: We could be better here. If LI is not alive in landing pad
1366 // we could hoist spill after LIP.
1367 LLVM_DEBUG(dbgs() << "can't spill in root block - def after LIP\n");
1370 Register OrigReg
= OrigLI
.reg();
1371 SmallSetVector
<Register
, 16> &Siblings
= Virt2SiblingsMap
[OrigReg
];
1372 assert(OrigLI
.getVNInfoAt(Idx
) == &OrigVNI
&& "Unexpected VNI");
1374 for (const Register
&SibReg
: Siblings
) {
1375 LiveInterval
&LI
= LIS
.getInterval(SibReg
);
1376 VNInfo
*VNI
= LI
.getVNInfoAt(Idx
);
1385 /// Remove redundant spills in the same BB. Save those redundant spills in
1386 /// SpillsToRm, and save the spill to keep and its BB in SpillBBToSpill map.
1387 void HoistSpillHelper::rmRedundantSpills(
1388 SmallPtrSet
<MachineInstr
*, 16> &Spills
,
1389 SmallVectorImpl
<MachineInstr
*> &SpillsToRm
,
1390 DenseMap
<MachineDomTreeNode
*, MachineInstr
*> &SpillBBToSpill
) {
1391 // For each spill saw, check SpillBBToSpill[] and see if its BB already has
1392 // another spill inside. If a BB contains more than one spill, only keep the
1393 // earlier spill with smaller SlotIndex.
1394 for (auto *const CurrentSpill
: Spills
) {
1395 MachineBasicBlock
*Block
= CurrentSpill
->getParent();
1396 MachineDomTreeNode
*Node
= MDT
.getNode(Block
);
1397 MachineInstr
*PrevSpill
= SpillBBToSpill
[Node
];
1399 SlotIndex PIdx
= LIS
.getInstructionIndex(*PrevSpill
);
1400 SlotIndex CIdx
= LIS
.getInstructionIndex(*CurrentSpill
);
1401 MachineInstr
*SpillToRm
= (CIdx
> PIdx
) ? CurrentSpill
: PrevSpill
;
1402 MachineInstr
*SpillToKeep
= (CIdx
> PIdx
) ? PrevSpill
: CurrentSpill
;
1403 SpillsToRm
.push_back(SpillToRm
);
1404 SpillBBToSpill
[MDT
.getNode(Block
)] = SpillToKeep
;
1406 SpillBBToSpill
[MDT
.getNode(Block
)] = CurrentSpill
;
1409 for (auto *const SpillToRm
: SpillsToRm
)
1410 Spills
.erase(SpillToRm
);
1413 /// Starting from \p Root find a top-down traversal order of the dominator
1414 /// tree to visit all basic blocks containing the elements of \p Spills.
1415 /// Redundant spills will be found and put into \p SpillsToRm at the same
1416 /// time. \p SpillBBToSpill will be populated as part of the process and
1417 /// maps a basic block to the first store occurring in the basic block.
1418 /// \post SpillsToRm.union(Spills\@post) == Spills\@pre
1419 void HoistSpillHelper::getVisitOrders(
1420 MachineBasicBlock
*Root
, SmallPtrSet
<MachineInstr
*, 16> &Spills
,
1421 SmallVectorImpl
<MachineDomTreeNode
*> &Orders
,
1422 SmallVectorImpl
<MachineInstr
*> &SpillsToRm
,
1423 DenseMap
<MachineDomTreeNode
*, unsigned> &SpillsToKeep
,
1424 DenseMap
<MachineDomTreeNode
*, MachineInstr
*> &SpillBBToSpill
) {
1425 // The set contains all the possible BB nodes to which we may hoist
1427 SmallPtrSet
<MachineDomTreeNode
*, 8> WorkSet
;
1428 // Save the BB nodes on the path from the first BB node containing
1429 // non-redundant spill to the Root node.
1430 SmallPtrSet
<MachineDomTreeNode
*, 8> NodesOnPath
;
1431 // All the spills to be hoisted must originate from a single def instruction
1432 // to the OrigReg. It means the def instruction should dominate all the spills
1433 // to be hoisted. We choose the BB where the def instruction is located as
1435 MachineDomTreeNode
*RootIDomNode
= MDT
[Root
]->getIDom();
1436 // For every node on the dominator tree with spill, walk up on the dominator
1437 // tree towards the Root node until it is reached. If there is other node
1438 // containing spill in the middle of the path, the previous spill saw will
1439 // be redundant and the node containing it will be removed. All the nodes on
1440 // the path starting from the first node with non-redundant spill to the Root
1441 // node will be added to the WorkSet, which will contain all the possible
1442 // locations where spills may be hoisted to after the loop below is done.
1443 for (auto *const Spill
: Spills
) {
1444 MachineBasicBlock
*Block
= Spill
->getParent();
1445 MachineDomTreeNode
*Node
= MDT
[Block
];
1446 MachineInstr
*SpillToRm
= nullptr;
1447 while (Node
!= RootIDomNode
) {
1448 // If Node dominates Block, and it already contains a spill, the spill in
1449 // Block will be redundant.
1450 if (Node
!= MDT
[Block
] && SpillBBToSpill
[Node
]) {
1451 SpillToRm
= SpillBBToSpill
[MDT
[Block
]];
1453 /// If we see the Node already in WorkSet, the path from the Node to
1454 /// the Root node must already be traversed by another spill.
1455 /// Then no need to repeat.
1456 } else if (WorkSet
.count(Node
)) {
1459 NodesOnPath
.insert(Node
);
1461 Node
= Node
->getIDom();
1464 SpillsToRm
.push_back(SpillToRm
);
1466 // Add a BB containing the original spills to SpillsToKeep -- i.e.,
1467 // set the initial status before hoisting start. The value of BBs
1468 // containing original spills is set to 0, in order to descriminate
1469 // with BBs containing hoisted spills which will be inserted to
1470 // SpillsToKeep later during hoisting.
1471 SpillsToKeep
[MDT
[Block
]] = 0;
1472 WorkSet
.insert(NodesOnPath
.begin(), NodesOnPath
.end());
1474 NodesOnPath
.clear();
1477 // Sort the nodes in WorkSet in top-down order and save the nodes
1478 // in Orders. Orders will be used for hoisting in runHoistSpills.
1480 Orders
.push_back(MDT
.getNode(Root
));
1482 MachineDomTreeNode
*Node
= Orders
[idx
++];
1483 for (MachineDomTreeNode
*Child
: Node
->children()) {
1484 if (WorkSet
.count(Child
))
1485 Orders
.push_back(Child
);
1487 } while (idx
!= Orders
.size());
1488 assert(Orders
.size() == WorkSet
.size() &&
1489 "Orders have different size with WorkSet");
1492 LLVM_DEBUG(dbgs() << "Orders size is " << Orders
.size() << "\n");
1493 SmallVector
<MachineDomTreeNode
*, 32>::reverse_iterator RIt
= Orders
.rbegin();
1494 for (; RIt
!= Orders
.rend(); RIt
++)
1495 LLVM_DEBUG(dbgs() << "BB" << (*RIt
)->getBlock()->getNumber() << ",");
1496 LLVM_DEBUG(dbgs() << "\n");
1500 /// Try to hoist spills according to BB hotness. The spills to removed will
1501 /// be saved in \p SpillsToRm. The spills to be inserted will be saved in
1503 void HoistSpillHelper::runHoistSpills(
1504 LiveInterval
&OrigLI
, VNInfo
&OrigVNI
,
1505 SmallPtrSet
<MachineInstr
*, 16> &Spills
,
1506 SmallVectorImpl
<MachineInstr
*> &SpillsToRm
,
1507 DenseMap
<MachineBasicBlock
*, unsigned> &SpillsToIns
) {
1508 // Visit order of dominator tree nodes.
1509 SmallVector
<MachineDomTreeNode
*, 32> Orders
;
1510 // SpillsToKeep contains all the nodes where spills are to be inserted
1511 // during hoisting. If the spill to be inserted is an original spill
1512 // (not a hoisted one), the value of the map entry is 0. If the spill
1513 // is a hoisted spill, the value of the map entry is the VReg to be used
1514 // as the source of the spill.
1515 DenseMap
<MachineDomTreeNode
*, unsigned> SpillsToKeep
;
1516 // Map from BB to the first spill inside of it.
1517 DenseMap
<MachineDomTreeNode
*, MachineInstr
*> SpillBBToSpill
;
1519 rmRedundantSpills(Spills
, SpillsToRm
, SpillBBToSpill
);
1521 MachineBasicBlock
*Root
= LIS
.getMBBFromIndex(OrigVNI
.def
);
1522 getVisitOrders(Root
, Spills
, Orders
, SpillsToRm
, SpillsToKeep
,
1525 // SpillsInSubTreeMap keeps the map from a dom tree node to a pair of
1526 // nodes set and the cost of all the spills inside those nodes.
1527 // The nodes set are the locations where spills are to be inserted
1528 // in the subtree of current node.
1529 using NodesCostPair
=
1530 std::pair
<SmallPtrSet
<MachineDomTreeNode
*, 16>, BlockFrequency
>;
1531 DenseMap
<MachineDomTreeNode
*, NodesCostPair
> SpillsInSubTreeMap
;
1533 // Iterate Orders set in reverse order, which will be a bottom-up order
1534 // in the dominator tree. Once we visit a dom tree node, we know its
1535 // children have already been visited and the spill locations in the
1536 // subtrees of all the children have been determined.
1537 SmallVector
<MachineDomTreeNode
*, 32>::reverse_iterator RIt
= Orders
.rbegin();
1538 for (; RIt
!= Orders
.rend(); RIt
++) {
1539 MachineBasicBlock
*Block
= (*RIt
)->getBlock();
1541 // If Block contains an original spill, simply continue.
1542 if (SpillsToKeep
.contains(*RIt
) && !SpillsToKeep
[*RIt
]) {
1543 SpillsInSubTreeMap
[*RIt
].first
.insert(*RIt
);
1544 // SpillsInSubTreeMap[*RIt].second contains the cost of spill.
1545 SpillsInSubTreeMap
[*RIt
].second
= MBFI
.getBlockFreq(Block
);
1549 // Collect spills in subtree of current node (*RIt) to
1550 // SpillsInSubTreeMap[*RIt].first.
1551 for (MachineDomTreeNode
*Child
: (*RIt
)->children()) {
1552 if (!SpillsInSubTreeMap
.contains(Child
))
1554 // The stmt "SpillsInSubTree = SpillsInSubTreeMap[*RIt].first" below
1555 // should be placed before getting the begin and end iterators of
1556 // SpillsInSubTreeMap[Child].first, or else the iterators may be
1557 // invalidated when SpillsInSubTreeMap[*RIt] is seen the first time
1558 // and the map grows and then the original buckets in the map are moved.
1559 SmallPtrSet
<MachineDomTreeNode
*, 16> &SpillsInSubTree
=
1560 SpillsInSubTreeMap
[*RIt
].first
;
1561 BlockFrequency
&SubTreeCost
= SpillsInSubTreeMap
[*RIt
].second
;
1562 SubTreeCost
+= SpillsInSubTreeMap
[Child
].second
;
1563 auto BI
= SpillsInSubTreeMap
[Child
].first
.begin();
1564 auto EI
= SpillsInSubTreeMap
[Child
].first
.end();
1565 SpillsInSubTree
.insert(BI
, EI
);
1566 SpillsInSubTreeMap
.erase(Child
);
1569 SmallPtrSet
<MachineDomTreeNode
*, 16> &SpillsInSubTree
=
1570 SpillsInSubTreeMap
[*RIt
].first
;
1571 BlockFrequency
&SubTreeCost
= SpillsInSubTreeMap
[*RIt
].second
;
1572 // No spills in subtree, simply continue.
1573 if (SpillsInSubTree
.empty())
1576 // Check whether Block is a possible candidate to insert spill.
1578 if (!isSpillCandBB(OrigLI
, OrigVNI
, *Block
, LiveReg
))
1581 // If there are multiple spills that could be merged, bias a little
1582 // to hoist the spill.
1583 BranchProbability MarginProb
= (SpillsInSubTree
.size() > 1)
1584 ? BranchProbability(9, 10)
1585 : BranchProbability(1, 1);
1586 if (SubTreeCost
> MBFI
.getBlockFreq(Block
) * MarginProb
) {
1587 // Hoist: Move spills to current Block.
1588 for (auto *const SpillBB
: SpillsInSubTree
) {
1589 // When SpillBB is a BB contains original spill, insert the spill
1591 if (SpillsToKeep
.contains(SpillBB
) && !SpillsToKeep
[SpillBB
]) {
1592 MachineInstr
*SpillToRm
= SpillBBToSpill
[SpillBB
];
1593 SpillsToRm
.push_back(SpillToRm
);
1595 // SpillBB will not contain spill anymore, remove it from SpillsToKeep.
1596 SpillsToKeep
.erase(SpillBB
);
1598 // Current Block is the BB containing the new hoisted spill. Add it to
1599 // SpillsToKeep. LiveReg is the source of the new spill.
1600 SpillsToKeep
[*RIt
] = LiveReg
;
1602 dbgs() << "spills in BB: ";
1603 for (const auto Rspill
: SpillsInSubTree
)
1604 dbgs() << Rspill
->getBlock()->getNumber() << " ";
1605 dbgs() << "were promoted to BB" << (*RIt
)->getBlock()->getNumber()
1608 SpillsInSubTree
.clear();
1609 SpillsInSubTree
.insert(*RIt
);
1610 SubTreeCost
= MBFI
.getBlockFreq(Block
);
1613 // For spills in SpillsToKeep with LiveReg set (i.e., not original spill),
1614 // save them to SpillsToIns.
1615 for (const auto &Ent
: SpillsToKeep
) {
1617 SpillsToIns
[Ent
.first
->getBlock()] = Ent
.second
;
1621 /// For spills with equal values, remove redundant spills and hoist those left
1622 /// to less hot spots.
1624 /// Spills with equal values will be collected into the same set in
1625 /// MergeableSpills when spill is inserted. These equal spills are originated
1626 /// from the same defining instruction and are dominated by the instruction.
1627 /// Before hoisting all the equal spills, redundant spills inside in the same
1628 /// BB are first marked to be deleted. Then starting from the spills left, walk
1629 /// up on the dominator tree towards the Root node where the define instruction
1630 /// is located, mark the dominated spills to be deleted along the way and
1631 /// collect the BB nodes on the path from non-dominated spills to the define
1632 /// instruction into a WorkSet. The nodes in WorkSet are the candidate places
1633 /// where we are considering to hoist the spills. We iterate the WorkSet in
1634 /// bottom-up order, and for each node, we will decide whether to hoist spills
1635 /// inside its subtree to that node. In this way, we can get benefit locally
1636 /// even if hoisting all the equal spills to one cold place is impossible.
1637 void HoistSpillHelper::hoistAllSpills() {
1638 SmallVector
<Register
, 4> NewVRegs
;
1639 LiveRangeEdit
Edit(nullptr, NewVRegs
, MF
, LIS
, &VRM
, this);
1641 for (unsigned i
= 0, e
= MRI
.getNumVirtRegs(); i
!= e
; ++i
) {
1642 Register Reg
= Register::index2VirtReg(i
);
1643 Register Original
= VRM
.getPreSplitReg(Reg
);
1644 if (!MRI
.def_empty(Reg
))
1645 Virt2SiblingsMap
[Original
].insert(Reg
);
1648 // Each entry in MergeableSpills contains a spill set with equal values.
1649 for (auto &Ent
: MergeableSpills
) {
1650 int Slot
= Ent
.first
.first
;
1651 LiveInterval
&OrigLI
= *StackSlotToOrigLI
[Slot
];
1652 VNInfo
*OrigVNI
= Ent
.first
.second
;
1653 SmallPtrSet
<MachineInstr
*, 16> &EqValSpills
= Ent
.second
;
1654 if (Ent
.second
.empty())
1658 dbgs() << "\nFor Slot" << Slot
<< " and VN" << OrigVNI
->id
<< ":\n"
1659 << "Equal spills in BB: ";
1660 for (const auto spill
: EqValSpills
)
1661 dbgs() << spill
->getParent()->getNumber() << " ";
1665 // SpillsToRm is the spill set to be removed from EqValSpills.
1666 SmallVector
<MachineInstr
*, 16> SpillsToRm
;
1667 // SpillsToIns is the spill set to be newly inserted after hoisting.
1668 DenseMap
<MachineBasicBlock
*, unsigned> SpillsToIns
;
1670 runHoistSpills(OrigLI
, *OrigVNI
, EqValSpills
, SpillsToRm
, SpillsToIns
);
1673 dbgs() << "Finally inserted spills in BB: ";
1674 for (const auto &Ispill
: SpillsToIns
)
1675 dbgs() << Ispill
.first
->getNumber() << " ";
1676 dbgs() << "\nFinally removed spills in BB: ";
1677 for (const auto Rspill
: SpillsToRm
)
1678 dbgs() << Rspill
->getParent()->getNumber() << " ";
1682 // Stack live range update.
1683 LiveInterval
&StackIntvl
= LSS
.getInterval(Slot
);
1684 if (!SpillsToIns
.empty() || !SpillsToRm
.empty())
1685 StackIntvl
.MergeValueInAsValue(OrigLI
, OrigVNI
,
1686 StackIntvl
.getValNumInfo(0));
1688 // Insert hoisted spills.
1689 for (auto const &Insert
: SpillsToIns
) {
1690 MachineBasicBlock
*BB
= Insert
.first
;
1691 Register LiveReg
= Insert
.second
;
1692 MachineBasicBlock::iterator MII
= IPA
.getLastInsertPointIter(OrigLI
, *BB
);
1693 MachineInstrSpan
MIS(MII
, BB
);
1694 TII
.storeRegToStackSlot(*BB
, MII
, LiveReg
, false, Slot
,
1695 MRI
.getRegClass(LiveReg
), &TRI
, Register());
1696 LIS
.InsertMachineInstrRangeInMaps(MIS
.begin(), MII
);
1697 for (const MachineInstr
&MI
: make_range(MIS
.begin(), MII
))
1698 getVDefInterval(MI
, LIS
);
1702 // Remove redundant spills or change them to dead instructions.
1703 NumSpills
-= SpillsToRm
.size();
1704 for (auto *const RMEnt
: SpillsToRm
) {
1705 RMEnt
->setDesc(TII
.get(TargetOpcode::KILL
));
1706 for (unsigned i
= RMEnt
->getNumOperands(); i
; --i
) {
1707 MachineOperand
&MO
= RMEnt
->getOperand(i
- 1);
1708 if (MO
.isReg() && MO
.isImplicit() && MO
.isDef() && !MO
.isDead())
1709 RMEnt
->removeOperand(i
- 1);
1712 Edit
.eliminateDeadDefs(SpillsToRm
, {});
1716 /// For VirtReg clone, the \p New register should have the same physreg or
1717 /// stackslot as the \p old register.
1718 void HoistSpillHelper::LRE_DidCloneVirtReg(Register New
, Register Old
) {
1719 if (VRM
.hasPhys(Old
))
1720 VRM
.assignVirt2Phys(New
, VRM
.getPhys(Old
));
1721 else if (VRM
.getStackSlot(Old
) != VirtRegMap::NO_STACK_SLOT
)
1722 VRM
.assignVirt2StackSlot(New
, VRM
.getStackSlot(Old
));
1724 llvm_unreachable("VReg should be assigned either physreg or stackslot");
1725 if (VRM
.hasShape(Old
))
1726 VRM
.assignVirt2Shape(New
, VRM
.getShape(Old
));