Run DCE after a LoopFlatten test to reduce spurious output [nfc]
[llvm-project.git] / llvm / lib / CodeGen / InlineSpiller.cpp
blob71d58b2e9e18d7d2bf6a42be3128df796ba54f23
1 //===- InlineSpiller.cpp - Insert spills and restores inline --------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // The inline spiller modifies the machine function directly instead of
10 // inserting spills and restores in VirtRegMap.
12 //===----------------------------------------------------------------------===//
14 #include "SplitKit.h"
15 #include "llvm/ADT/ArrayRef.h"
16 #include "llvm/ADT/DenseMap.h"
17 #include "llvm/ADT/MapVector.h"
18 #include "llvm/ADT/STLExtras.h"
19 #include "llvm/ADT/SetVector.h"
20 #include "llvm/ADT/SmallPtrSet.h"
21 #include "llvm/ADT/SmallVector.h"
22 #include "llvm/ADT/Statistic.h"
23 #include "llvm/Analysis/AliasAnalysis.h"
24 #include "llvm/CodeGen/LiveInterval.h"
25 #include "llvm/CodeGen/LiveIntervals.h"
26 #include "llvm/CodeGen/LiveRangeEdit.h"
27 #include "llvm/CodeGen/LiveStacks.h"
28 #include "llvm/CodeGen/MachineBasicBlock.h"
29 #include "llvm/CodeGen/MachineBlockFrequencyInfo.h"
30 #include "llvm/CodeGen/MachineDominators.h"
31 #include "llvm/CodeGen/MachineFunction.h"
32 #include "llvm/CodeGen/MachineFunctionPass.h"
33 #include "llvm/CodeGen/MachineInstr.h"
34 #include "llvm/CodeGen/MachineInstrBuilder.h"
35 #include "llvm/CodeGen/MachineInstrBundle.h"
36 #include "llvm/CodeGen/MachineLoopInfo.h"
37 #include "llvm/CodeGen/MachineOperand.h"
38 #include "llvm/CodeGen/MachineRegisterInfo.h"
39 #include "llvm/CodeGen/SlotIndexes.h"
40 #include "llvm/CodeGen/Spiller.h"
41 #include "llvm/CodeGen/StackMaps.h"
42 #include "llvm/CodeGen/TargetInstrInfo.h"
43 #include "llvm/CodeGen/TargetOpcodes.h"
44 #include "llvm/CodeGen/TargetRegisterInfo.h"
45 #include "llvm/CodeGen/TargetSubtargetInfo.h"
46 #include "llvm/CodeGen/VirtRegMap.h"
47 #include "llvm/Config/llvm-config.h"
48 #include "llvm/Support/BlockFrequency.h"
49 #include "llvm/Support/BranchProbability.h"
50 #include "llvm/Support/CommandLine.h"
51 #include "llvm/Support/Compiler.h"
52 #include "llvm/Support/Debug.h"
53 #include "llvm/Support/ErrorHandling.h"
54 #include "llvm/Support/raw_ostream.h"
55 #include <cassert>
56 #include <iterator>
57 #include <tuple>
58 #include <utility>
59 #include <vector>
61 using namespace llvm;
63 #define DEBUG_TYPE "regalloc"
65 STATISTIC(NumSpilledRanges, "Number of spilled live ranges");
66 STATISTIC(NumSnippets, "Number of spilled snippets");
67 STATISTIC(NumSpills, "Number of spills inserted");
68 STATISTIC(NumSpillsRemoved, "Number of spills removed");
69 STATISTIC(NumReloads, "Number of reloads inserted");
70 STATISTIC(NumReloadsRemoved, "Number of reloads removed");
71 STATISTIC(NumFolded, "Number of folded stack accesses");
72 STATISTIC(NumFoldedLoads, "Number of folded loads");
73 STATISTIC(NumRemats, "Number of rematerialized defs for spilling");
75 static cl::opt<bool> DisableHoisting("disable-spill-hoist", cl::Hidden,
76 cl::desc("Disable inline spill hoisting"));
77 static cl::opt<bool>
78 RestrictStatepointRemat("restrict-statepoint-remat",
79 cl::init(false), cl::Hidden,
80 cl::desc("Restrict remat for statepoint operands"));
82 namespace {
84 class HoistSpillHelper : private LiveRangeEdit::Delegate {
85 MachineFunction &MF;
86 LiveIntervals &LIS;
87 LiveStacks &LSS;
88 MachineDominatorTree &MDT;
89 MachineLoopInfo &Loops;
90 VirtRegMap &VRM;
91 MachineRegisterInfo &MRI;
92 const TargetInstrInfo &TII;
93 const TargetRegisterInfo &TRI;
94 const MachineBlockFrequencyInfo &MBFI;
96 InsertPointAnalysis IPA;
98 // Map from StackSlot to the LiveInterval of the original register.
99 // Note the LiveInterval of the original register may have been deleted
100 // after it is spilled. We keep a copy here to track the range where
101 // spills can be moved.
102 DenseMap<int, std::unique_ptr<LiveInterval>> StackSlotToOrigLI;
104 // Map from pair of (StackSlot and Original VNI) to a set of spills which
105 // have the same stackslot and have equal values defined by Original VNI.
106 // These spills are mergeable and are hoist candidates.
107 using MergeableSpillsMap =
108 MapVector<std::pair<int, VNInfo *>, SmallPtrSet<MachineInstr *, 16>>;
109 MergeableSpillsMap MergeableSpills;
111 /// This is the map from original register to a set containing all its
112 /// siblings. To hoist a spill to another BB, we need to find out a live
113 /// sibling there and use it as the source of the new spill.
114 DenseMap<Register, SmallSetVector<Register, 16>> Virt2SiblingsMap;
116 bool isSpillCandBB(LiveInterval &OrigLI, VNInfo &OrigVNI,
117 MachineBasicBlock &BB, Register &LiveReg);
119 void rmRedundantSpills(
120 SmallPtrSet<MachineInstr *, 16> &Spills,
121 SmallVectorImpl<MachineInstr *> &SpillsToRm,
122 DenseMap<MachineDomTreeNode *, MachineInstr *> &SpillBBToSpill);
124 void getVisitOrders(
125 MachineBasicBlock *Root, SmallPtrSet<MachineInstr *, 16> &Spills,
126 SmallVectorImpl<MachineDomTreeNode *> &Orders,
127 SmallVectorImpl<MachineInstr *> &SpillsToRm,
128 DenseMap<MachineDomTreeNode *, unsigned> &SpillsToKeep,
129 DenseMap<MachineDomTreeNode *, MachineInstr *> &SpillBBToSpill);
131 void runHoistSpills(LiveInterval &OrigLI, VNInfo &OrigVNI,
132 SmallPtrSet<MachineInstr *, 16> &Spills,
133 SmallVectorImpl<MachineInstr *> &SpillsToRm,
134 DenseMap<MachineBasicBlock *, unsigned> &SpillsToIns);
136 public:
137 HoistSpillHelper(MachineFunctionPass &pass, MachineFunction &mf,
138 VirtRegMap &vrm)
139 : MF(mf), LIS(pass.getAnalysis<LiveIntervals>()),
140 LSS(pass.getAnalysis<LiveStacks>()),
141 MDT(pass.getAnalysis<MachineDominatorTree>()),
142 Loops(pass.getAnalysis<MachineLoopInfo>()), VRM(vrm),
143 MRI(mf.getRegInfo()), TII(*mf.getSubtarget().getInstrInfo()),
144 TRI(*mf.getSubtarget().getRegisterInfo()),
145 MBFI(pass.getAnalysis<MachineBlockFrequencyInfo>()),
146 IPA(LIS, mf.getNumBlockIDs()) {}
148 void addToMergeableSpills(MachineInstr &Spill, int StackSlot,
149 unsigned Original);
150 bool rmFromMergeableSpills(MachineInstr &Spill, int StackSlot);
151 void hoistAllSpills();
152 void LRE_DidCloneVirtReg(Register, Register) override;
155 class InlineSpiller : public Spiller {
156 MachineFunction &MF;
157 LiveIntervals &LIS;
158 LiveStacks &LSS;
159 MachineDominatorTree &MDT;
160 MachineLoopInfo &Loops;
161 VirtRegMap &VRM;
162 MachineRegisterInfo &MRI;
163 const TargetInstrInfo &TII;
164 const TargetRegisterInfo &TRI;
165 const MachineBlockFrequencyInfo &MBFI;
167 // Variables that are valid during spill(), but used by multiple methods.
168 LiveRangeEdit *Edit = nullptr;
169 LiveInterval *StackInt = nullptr;
170 int StackSlot;
171 Register Original;
173 // All registers to spill to StackSlot, including the main register.
174 SmallVector<Register, 8> RegsToSpill;
176 // All COPY instructions to/from snippets.
177 // They are ignored since both operands refer to the same stack slot.
178 // For bundled copies, this will only include the first header copy.
179 SmallPtrSet<MachineInstr*, 8> SnippetCopies;
181 // Values that failed to remat at some point.
182 SmallPtrSet<VNInfo*, 8> UsedValues;
184 // Dead defs generated during spilling.
185 SmallVector<MachineInstr*, 8> DeadDefs;
187 // Object records spills information and does the hoisting.
188 HoistSpillHelper HSpiller;
190 // Live range weight calculator.
191 VirtRegAuxInfo &VRAI;
193 ~InlineSpiller() override = default;
195 public:
196 InlineSpiller(MachineFunctionPass &Pass, MachineFunction &MF, VirtRegMap &VRM,
197 VirtRegAuxInfo &VRAI)
198 : MF(MF), LIS(Pass.getAnalysis<LiveIntervals>()),
199 LSS(Pass.getAnalysis<LiveStacks>()),
200 MDT(Pass.getAnalysis<MachineDominatorTree>()),
201 Loops(Pass.getAnalysis<MachineLoopInfo>()), VRM(VRM),
202 MRI(MF.getRegInfo()), TII(*MF.getSubtarget().getInstrInfo()),
203 TRI(*MF.getSubtarget().getRegisterInfo()),
204 MBFI(Pass.getAnalysis<MachineBlockFrequencyInfo>()),
205 HSpiller(Pass, MF, VRM), VRAI(VRAI) {}
207 void spill(LiveRangeEdit &) override;
208 void postOptimization() override;
210 private:
211 bool isSnippet(const LiveInterval &SnipLI);
212 void collectRegsToSpill();
214 bool isRegToSpill(Register Reg) { return is_contained(RegsToSpill, Reg); }
216 bool isSibling(Register Reg);
217 bool hoistSpillInsideBB(LiveInterval &SpillLI, MachineInstr &CopyMI);
218 void eliminateRedundantSpills(LiveInterval &LI, VNInfo *VNI);
220 void markValueUsed(LiveInterval*, VNInfo*);
221 bool canGuaranteeAssignmentAfterRemat(Register VReg, MachineInstr &MI);
222 bool reMaterializeFor(LiveInterval &, MachineInstr &MI);
223 void reMaterializeAll();
225 bool coalesceStackAccess(MachineInstr *MI, Register Reg);
226 bool foldMemoryOperand(ArrayRef<std::pair<MachineInstr *, unsigned>>,
227 MachineInstr *LoadMI = nullptr);
228 void insertReload(Register VReg, SlotIndex, MachineBasicBlock::iterator MI);
229 void insertSpill(Register VReg, bool isKill, MachineBasicBlock::iterator MI);
231 void spillAroundUses(Register Reg);
232 void spillAll();
235 } // end anonymous namespace
237 Spiller::~Spiller() = default;
239 void Spiller::anchor() {}
241 Spiller *llvm::createInlineSpiller(MachineFunctionPass &Pass,
242 MachineFunction &MF, VirtRegMap &VRM,
243 VirtRegAuxInfo &VRAI) {
244 return new InlineSpiller(Pass, MF, VRM, VRAI);
247 //===----------------------------------------------------------------------===//
248 // Snippets
249 //===----------------------------------------------------------------------===//
251 // When spilling a virtual register, we also spill any snippets it is connected
252 // to. The snippets are small live ranges that only have a single real use,
253 // leftovers from live range splitting. Spilling them enables memory operand
254 // folding or tightens the live range around the single use.
256 // This minimizes register pressure and maximizes the store-to-load distance for
257 // spill slots which can be important in tight loops.
259 /// isFullCopyOf - If MI is a COPY to or from Reg, return the other register,
260 /// otherwise return 0.
261 static Register isCopyOf(const MachineInstr &MI, Register Reg,
262 const TargetInstrInfo &TII) {
263 if (!TII.isCopyInstr(MI))
264 return Register();
266 const MachineOperand &DstOp = MI.getOperand(0);
267 const MachineOperand &SrcOp = MI.getOperand(1);
269 // TODO: Probably only worth allowing subreg copies with undef dests.
270 if (DstOp.getSubReg() != SrcOp.getSubReg())
271 return Register();
272 if (DstOp.getReg() == Reg)
273 return SrcOp.getReg();
274 if (SrcOp.getReg() == Reg)
275 return DstOp.getReg();
276 return Register();
279 /// Check for a copy bundle as formed by SplitKit.
280 static Register isCopyOfBundle(const MachineInstr &FirstMI, Register Reg,
281 const TargetInstrInfo &TII) {
282 if (!FirstMI.isBundled())
283 return isCopyOf(FirstMI, Reg, TII);
285 assert(!FirstMI.isBundledWithPred() && FirstMI.isBundledWithSucc() &&
286 "expected to see first instruction in bundle");
288 Register SnipReg;
289 MachineBasicBlock::const_instr_iterator I = FirstMI.getIterator();
290 while (I->isBundledWithSucc()) {
291 const MachineInstr &MI = *I;
292 auto CopyInst = TII.isCopyInstr(MI);
293 if (!CopyInst)
294 return Register();
296 const MachineOperand &DstOp = *CopyInst->Destination;
297 const MachineOperand &SrcOp = *CopyInst->Source;
298 if (DstOp.getReg() == Reg) {
299 if (!SnipReg)
300 SnipReg = SrcOp.getReg();
301 else if (SnipReg != SrcOp.getReg())
302 return Register();
303 } else if (SrcOp.getReg() == Reg) {
304 if (!SnipReg)
305 SnipReg = DstOp.getReg();
306 else if (SnipReg != DstOp.getReg())
307 return Register();
310 ++I;
313 return Register();
316 static void getVDefInterval(const MachineInstr &MI, LiveIntervals &LIS) {
317 for (const MachineOperand &MO : MI.all_defs())
318 if (MO.getReg().isVirtual())
319 LIS.getInterval(MO.getReg());
322 /// isSnippet - Identify if a live interval is a snippet that should be spilled.
323 /// It is assumed that SnipLI is a virtual register with the same original as
324 /// Edit->getReg().
325 bool InlineSpiller::isSnippet(const LiveInterval &SnipLI) {
326 Register Reg = Edit->getReg();
328 // A snippet is a tiny live range with only a single instruction using it
329 // besides copies to/from Reg or spills/fills.
330 // Exception is done for statepoint instructions which will fold fills
331 // into their operands.
332 // We accept:
334 // %snip = COPY %Reg / FILL fi#
335 // %snip = USE %snip
336 // %snip = STATEPOINT %snip in var arg area
337 // %Reg = COPY %snip / SPILL %snip, fi#
339 if (!LIS.intervalIsInOneMBB(SnipLI))
340 return false;
342 // Number of defs should not exceed 2 not accounting defs coming from
343 // statepoint instructions.
344 unsigned NumValNums = SnipLI.getNumValNums();
345 for (auto *VNI : SnipLI.vnis()) {
346 MachineInstr *MI = LIS.getInstructionFromIndex(VNI->def);
347 if (MI->getOpcode() == TargetOpcode::STATEPOINT)
348 --NumValNums;
350 if (NumValNums > 2)
351 return false;
353 MachineInstr *UseMI = nullptr;
355 // Check that all uses satisfy our criteria.
356 for (MachineRegisterInfo::reg_bundle_nodbg_iterator
357 RI = MRI.reg_bundle_nodbg_begin(SnipLI.reg()),
358 E = MRI.reg_bundle_nodbg_end();
359 RI != E;) {
360 MachineInstr &MI = *RI++;
362 // Allow copies to/from Reg.
363 if (isCopyOfBundle(MI, Reg, TII))
364 continue;
366 // Allow stack slot loads.
367 int FI;
368 if (SnipLI.reg() == TII.isLoadFromStackSlot(MI, FI) && FI == StackSlot)
369 continue;
371 // Allow stack slot stores.
372 if (SnipLI.reg() == TII.isStoreToStackSlot(MI, FI) && FI == StackSlot)
373 continue;
375 if (StatepointOpers::isFoldableReg(&MI, SnipLI.reg()))
376 continue;
378 // Allow a single additional instruction.
379 if (UseMI && &MI != UseMI)
380 return false;
381 UseMI = &MI;
383 return true;
386 /// collectRegsToSpill - Collect live range snippets that only have a single
387 /// real use.
388 void InlineSpiller::collectRegsToSpill() {
389 Register Reg = Edit->getReg();
391 // Main register always spills.
392 RegsToSpill.assign(1, Reg);
393 SnippetCopies.clear();
395 // Snippets all have the same original, so there can't be any for an original
396 // register.
397 if (Original == Reg)
398 return;
400 for (MachineInstr &MI : llvm::make_early_inc_range(MRI.reg_bundles(Reg))) {
401 Register SnipReg = isCopyOfBundle(MI, Reg, TII);
402 if (!isSibling(SnipReg))
403 continue;
404 LiveInterval &SnipLI = LIS.getInterval(SnipReg);
405 if (!isSnippet(SnipLI))
406 continue;
407 SnippetCopies.insert(&MI);
408 if (isRegToSpill(SnipReg))
409 continue;
410 RegsToSpill.push_back(SnipReg);
411 LLVM_DEBUG(dbgs() << "\talso spill snippet " << SnipLI << '\n');
412 ++NumSnippets;
416 bool InlineSpiller::isSibling(Register Reg) {
417 return Reg.isVirtual() && VRM.getOriginal(Reg) == Original;
420 /// It is beneficial to spill to earlier place in the same BB in case
421 /// as follows:
422 /// There is an alternative def earlier in the same MBB.
423 /// Hoist the spill as far as possible in SpillMBB. This can ease
424 /// register pressure:
426 /// x = def
427 /// y = use x
428 /// s = copy x
430 /// Hoisting the spill of s to immediately after the def removes the
431 /// interference between x and y:
433 /// x = def
434 /// spill x
435 /// y = use killed x
437 /// This hoist only helps when the copy kills its source.
439 bool InlineSpiller::hoistSpillInsideBB(LiveInterval &SpillLI,
440 MachineInstr &CopyMI) {
441 SlotIndex Idx = LIS.getInstructionIndex(CopyMI);
442 #ifndef NDEBUG
443 VNInfo *VNI = SpillLI.getVNInfoAt(Idx.getRegSlot());
444 assert(VNI && VNI->def == Idx.getRegSlot() && "Not defined by copy");
445 #endif
447 Register SrcReg = CopyMI.getOperand(1).getReg();
448 LiveInterval &SrcLI = LIS.getInterval(SrcReg);
449 VNInfo *SrcVNI = SrcLI.getVNInfoAt(Idx);
450 LiveQueryResult SrcQ = SrcLI.Query(Idx);
451 MachineBasicBlock *DefMBB = LIS.getMBBFromIndex(SrcVNI->def);
452 if (DefMBB != CopyMI.getParent() || !SrcQ.isKill())
453 return false;
455 // Conservatively extend the stack slot range to the range of the original
456 // value. We may be able to do better with stack slot coloring by being more
457 // careful here.
458 assert(StackInt && "No stack slot assigned yet.");
459 LiveInterval &OrigLI = LIS.getInterval(Original);
460 VNInfo *OrigVNI = OrigLI.getVNInfoAt(Idx);
461 StackInt->MergeValueInAsValue(OrigLI, OrigVNI, StackInt->getValNumInfo(0));
462 LLVM_DEBUG(dbgs() << "\tmerged orig valno " << OrigVNI->id << ": "
463 << *StackInt << '\n');
465 // We are going to spill SrcVNI immediately after its def, so clear out
466 // any later spills of the same value.
467 eliminateRedundantSpills(SrcLI, SrcVNI);
469 MachineBasicBlock *MBB = LIS.getMBBFromIndex(SrcVNI->def);
470 MachineBasicBlock::iterator MII;
471 if (SrcVNI->isPHIDef())
472 MII = MBB->SkipPHIsLabelsAndDebug(MBB->begin());
473 else {
474 MachineInstr *DefMI = LIS.getInstructionFromIndex(SrcVNI->def);
475 assert(DefMI && "Defining instruction disappeared");
476 MII = DefMI;
477 ++MII;
479 MachineInstrSpan MIS(MII, MBB);
480 // Insert spill without kill flag immediately after def.
481 TII.storeRegToStackSlot(*MBB, MII, SrcReg, false, StackSlot,
482 MRI.getRegClass(SrcReg), &TRI, Register());
483 LIS.InsertMachineInstrRangeInMaps(MIS.begin(), MII);
484 for (const MachineInstr &MI : make_range(MIS.begin(), MII))
485 getVDefInterval(MI, LIS);
486 --MII; // Point to store instruction.
487 LLVM_DEBUG(dbgs() << "\thoisted: " << SrcVNI->def << '\t' << *MII);
489 // If there is only 1 store instruction is required for spill, add it
490 // to mergeable list. In X86 AMX, 2 intructions are required to store.
491 // We disable the merge for this case.
492 if (MIS.begin() == MII)
493 HSpiller.addToMergeableSpills(*MII, StackSlot, Original);
494 ++NumSpills;
495 return true;
498 /// eliminateRedundantSpills - SLI:VNI is known to be on the stack. Remove any
499 /// redundant spills of this value in SLI.reg and sibling copies.
500 void InlineSpiller::eliminateRedundantSpills(LiveInterval &SLI, VNInfo *VNI) {
501 assert(VNI && "Missing value");
502 SmallVector<std::pair<LiveInterval*, VNInfo*>, 8> WorkList;
503 WorkList.push_back(std::make_pair(&SLI, VNI));
504 assert(StackInt && "No stack slot assigned yet.");
506 do {
507 LiveInterval *LI;
508 std::tie(LI, VNI) = WorkList.pop_back_val();
509 Register Reg = LI->reg();
510 LLVM_DEBUG(dbgs() << "Checking redundant spills for " << VNI->id << '@'
511 << VNI->def << " in " << *LI << '\n');
513 // Regs to spill are taken care of.
514 if (isRegToSpill(Reg))
515 continue;
517 // Add all of VNI's live range to StackInt.
518 StackInt->MergeValueInAsValue(*LI, VNI, StackInt->getValNumInfo(0));
519 LLVM_DEBUG(dbgs() << "Merged to stack int: " << *StackInt << '\n');
521 // Find all spills and copies of VNI.
522 for (MachineInstr &MI :
523 llvm::make_early_inc_range(MRI.use_nodbg_bundles(Reg))) {
524 if (!MI.mayStore() && !TII.isCopyInstr(MI))
525 continue;
526 SlotIndex Idx = LIS.getInstructionIndex(MI);
527 if (LI->getVNInfoAt(Idx) != VNI)
528 continue;
530 // Follow sibling copies down the dominator tree.
531 if (Register DstReg = isCopyOfBundle(MI, Reg, TII)) {
532 if (isSibling(DstReg)) {
533 LiveInterval &DstLI = LIS.getInterval(DstReg);
534 VNInfo *DstVNI = DstLI.getVNInfoAt(Idx.getRegSlot());
535 assert(DstVNI && "Missing defined value");
536 assert(DstVNI->def == Idx.getRegSlot() && "Wrong copy def slot");
538 WorkList.push_back(std::make_pair(&DstLI, DstVNI));
540 continue;
543 // Erase spills.
544 int FI;
545 if (Reg == TII.isStoreToStackSlot(MI, FI) && FI == StackSlot) {
546 LLVM_DEBUG(dbgs() << "Redundant spill " << Idx << '\t' << MI);
547 // eliminateDeadDefs won't normally remove stores, so switch opcode.
548 MI.setDesc(TII.get(TargetOpcode::KILL));
549 DeadDefs.push_back(&MI);
550 ++NumSpillsRemoved;
551 if (HSpiller.rmFromMergeableSpills(MI, StackSlot))
552 --NumSpills;
555 } while (!WorkList.empty());
558 //===----------------------------------------------------------------------===//
559 // Rematerialization
560 //===----------------------------------------------------------------------===//
562 /// markValueUsed - Remember that VNI failed to rematerialize, so its defining
563 /// instruction cannot be eliminated. See through snippet copies
564 void InlineSpiller::markValueUsed(LiveInterval *LI, VNInfo *VNI) {
565 SmallVector<std::pair<LiveInterval*, VNInfo*>, 8> WorkList;
566 WorkList.push_back(std::make_pair(LI, VNI));
567 do {
568 std::tie(LI, VNI) = WorkList.pop_back_val();
569 if (!UsedValues.insert(VNI).second)
570 continue;
572 if (VNI->isPHIDef()) {
573 MachineBasicBlock *MBB = LIS.getMBBFromIndex(VNI->def);
574 for (MachineBasicBlock *P : MBB->predecessors()) {
575 VNInfo *PVNI = LI->getVNInfoBefore(LIS.getMBBEndIdx(P));
576 if (PVNI)
577 WorkList.push_back(std::make_pair(LI, PVNI));
579 continue;
582 // Follow snippet copies.
583 MachineInstr *MI = LIS.getInstructionFromIndex(VNI->def);
584 if (!SnippetCopies.count(MI))
585 continue;
586 LiveInterval &SnipLI = LIS.getInterval(MI->getOperand(1).getReg());
587 assert(isRegToSpill(SnipLI.reg()) && "Unexpected register in copy");
588 VNInfo *SnipVNI = SnipLI.getVNInfoAt(VNI->def.getRegSlot(true));
589 assert(SnipVNI && "Snippet undefined before copy");
590 WorkList.push_back(std::make_pair(&SnipLI, SnipVNI));
591 } while (!WorkList.empty());
594 bool InlineSpiller::canGuaranteeAssignmentAfterRemat(Register VReg,
595 MachineInstr &MI) {
596 if (!RestrictStatepointRemat)
597 return true;
598 // Here's a quick explanation of the problem we're trying to handle here:
599 // * There are some pseudo instructions with more vreg uses than there are
600 // physical registers on the machine.
601 // * This is normally handled by spilling the vreg, and folding the reload
602 // into the user instruction. (Thus decreasing the number of used vregs
603 // until the remainder can be assigned to physregs.)
604 // * However, since we may try to spill vregs in any order, we can end up
605 // trying to spill each operand to the instruction, and then rematting it
606 // instead. When that happens, the new live intervals (for the remats) are
607 // expected to be trivially assignable (i.e. RS_Done). However, since we
608 // may have more remats than physregs, we're guaranteed to fail to assign
609 // one.
610 // At the moment, we only handle this for STATEPOINTs since they're the only
611 // pseudo op where we've seen this. If we start seeing other instructions
612 // with the same problem, we need to revisit this.
613 if (MI.getOpcode() != TargetOpcode::STATEPOINT)
614 return true;
615 // For STATEPOINTs we allow re-materialization for fixed arguments only hoping
616 // that number of physical registers is enough to cover all fixed arguments.
617 // If it is not true we need to revisit it.
618 for (unsigned Idx = StatepointOpers(&MI).getVarIdx(),
619 EndIdx = MI.getNumOperands();
620 Idx < EndIdx; ++Idx) {
621 MachineOperand &MO = MI.getOperand(Idx);
622 if (MO.isReg() && MO.getReg() == VReg)
623 return false;
625 return true;
628 /// reMaterializeFor - Attempt to rematerialize before MI instead of reloading.
629 bool InlineSpiller::reMaterializeFor(LiveInterval &VirtReg, MachineInstr &MI) {
630 // Analyze instruction
631 SmallVector<std::pair<MachineInstr *, unsigned>, 8> Ops;
632 VirtRegInfo RI = AnalyzeVirtRegInBundle(MI, VirtReg.reg(), &Ops);
634 if (!RI.Reads)
635 return false;
637 SlotIndex UseIdx = LIS.getInstructionIndex(MI).getRegSlot(true);
638 VNInfo *ParentVNI = VirtReg.getVNInfoAt(UseIdx.getBaseIndex());
640 if (!ParentVNI) {
641 LLVM_DEBUG(dbgs() << "\tadding <undef> flags: ");
642 for (MachineOperand &MO : MI.all_uses())
643 if (MO.getReg() == VirtReg.reg())
644 MO.setIsUndef();
645 LLVM_DEBUG(dbgs() << UseIdx << '\t' << MI);
646 return true;
649 if (SnippetCopies.count(&MI))
650 return false;
652 LiveInterval &OrigLI = LIS.getInterval(Original);
653 VNInfo *OrigVNI = OrigLI.getVNInfoAt(UseIdx);
654 LiveRangeEdit::Remat RM(ParentVNI);
655 RM.OrigMI = LIS.getInstructionFromIndex(OrigVNI->def);
657 if (!Edit->canRematerializeAt(RM, OrigVNI, UseIdx, false)) {
658 markValueUsed(&VirtReg, ParentVNI);
659 LLVM_DEBUG(dbgs() << "\tcannot remat for " << UseIdx << '\t' << MI);
660 return false;
663 // If the instruction also writes VirtReg.reg, it had better not require the
664 // same register for uses and defs.
665 if (RI.Tied) {
666 markValueUsed(&VirtReg, ParentVNI);
667 LLVM_DEBUG(dbgs() << "\tcannot remat tied reg: " << UseIdx << '\t' << MI);
668 return false;
671 // Before rematerializing into a register for a single instruction, try to
672 // fold a load into the instruction. That avoids allocating a new register.
673 if (RM.OrigMI->canFoldAsLoad() &&
674 foldMemoryOperand(Ops, RM.OrigMI)) {
675 Edit->markRematerialized(RM.ParentVNI);
676 ++NumFoldedLoads;
677 return true;
680 // If we can't guarantee that we'll be able to actually assign the new vreg,
681 // we can't remat.
682 if (!canGuaranteeAssignmentAfterRemat(VirtReg.reg(), MI)) {
683 markValueUsed(&VirtReg, ParentVNI);
684 LLVM_DEBUG(dbgs() << "\tcannot remat for " << UseIdx << '\t' << MI);
685 return false;
688 // Allocate a new register for the remat.
689 Register NewVReg = Edit->createFrom(Original);
691 // Finally we can rematerialize OrigMI before MI.
692 SlotIndex DefIdx =
693 Edit->rematerializeAt(*MI.getParent(), MI, NewVReg, RM, TRI);
695 // We take the DebugLoc from MI, since OrigMI may be attributed to a
696 // different source location.
697 auto *NewMI = LIS.getInstructionFromIndex(DefIdx);
698 NewMI->setDebugLoc(MI.getDebugLoc());
700 (void)DefIdx;
701 LLVM_DEBUG(dbgs() << "\tremat: " << DefIdx << '\t'
702 << *LIS.getInstructionFromIndex(DefIdx));
704 // Replace operands
705 for (const auto &OpPair : Ops) {
706 MachineOperand &MO = OpPair.first->getOperand(OpPair.second);
707 if (MO.isReg() && MO.isUse() && MO.getReg() == VirtReg.reg()) {
708 MO.setReg(NewVReg);
709 MO.setIsKill();
712 LLVM_DEBUG(dbgs() << "\t " << UseIdx << '\t' << MI << '\n');
714 ++NumRemats;
715 return true;
718 /// reMaterializeAll - Try to rematerialize as many uses as possible,
719 /// and trim the live ranges after.
720 void InlineSpiller::reMaterializeAll() {
721 if (!Edit->anyRematerializable())
722 return;
724 UsedValues.clear();
726 // Try to remat before all uses of snippets.
727 bool anyRemat = false;
728 for (Register Reg : RegsToSpill) {
729 LiveInterval &LI = LIS.getInterval(Reg);
730 for (MachineInstr &MI : llvm::make_early_inc_range(MRI.reg_bundles(Reg))) {
731 // Debug values are not allowed to affect codegen.
732 if (MI.isDebugValue())
733 continue;
735 assert(!MI.isDebugInstr() && "Did not expect to find a use in debug "
736 "instruction that isn't a DBG_VALUE");
738 anyRemat |= reMaterializeFor(LI, MI);
741 if (!anyRemat)
742 return;
744 // Remove any values that were completely rematted.
745 for (Register Reg : RegsToSpill) {
746 LiveInterval &LI = LIS.getInterval(Reg);
747 for (VNInfo *VNI : LI.vnis()) {
748 if (VNI->isUnused() || VNI->isPHIDef() || UsedValues.count(VNI))
749 continue;
750 MachineInstr *MI = LIS.getInstructionFromIndex(VNI->def);
751 MI->addRegisterDead(Reg, &TRI);
752 if (!MI->allDefsAreDead())
753 continue;
754 LLVM_DEBUG(dbgs() << "All defs dead: " << *MI);
755 DeadDefs.push_back(MI);
756 // If MI is a bundle header, also try removing copies inside the bundle,
757 // otherwise the verifier would complain "live range continues after dead
758 // def flag".
759 if (MI->isBundledWithSucc() && !MI->isBundledWithPred()) {
760 MachineBasicBlock::instr_iterator BeginIt = MI->getIterator(),
761 EndIt = MI->getParent()->instr_end();
762 ++BeginIt; // Skip MI that was already handled.
764 bool OnlyDeadCopies = true;
765 for (MachineBasicBlock::instr_iterator It = BeginIt;
766 It != EndIt && It->isBundledWithPred(); ++It) {
768 auto DestSrc = TII.isCopyInstr(*It);
769 bool IsCopyToDeadReg =
770 DestSrc && DestSrc->Destination->getReg() == Reg;
771 if (!IsCopyToDeadReg) {
772 OnlyDeadCopies = false;
773 break;
776 if (OnlyDeadCopies) {
777 for (MachineBasicBlock::instr_iterator It = BeginIt;
778 It != EndIt && It->isBundledWithPred(); ++It) {
779 It->addRegisterDead(Reg, &TRI);
780 LLVM_DEBUG(dbgs() << "All defs dead: " << *It);
781 DeadDefs.push_back(&*It);
788 // Eliminate dead code after remat. Note that some snippet copies may be
789 // deleted here.
790 if (DeadDefs.empty())
791 return;
792 LLVM_DEBUG(dbgs() << "Remat created " << DeadDefs.size() << " dead defs.\n");
793 Edit->eliminateDeadDefs(DeadDefs, RegsToSpill);
795 // LiveRangeEdit::eliminateDeadDef is used to remove dead define instructions
796 // after rematerialization. To remove a VNI for a vreg from its LiveInterval,
797 // LiveIntervals::removeVRegDefAt is used. However, after non-PHI VNIs are all
798 // removed, PHI VNI are still left in the LiveInterval.
799 // So to get rid of unused reg, we need to check whether it has non-dbg
800 // reference instead of whether it has non-empty interval.
801 unsigned ResultPos = 0;
802 for (Register Reg : RegsToSpill) {
803 if (MRI.reg_nodbg_empty(Reg)) {
804 Edit->eraseVirtReg(Reg);
805 continue;
808 assert(LIS.hasInterval(Reg) &&
809 (!LIS.getInterval(Reg).empty() || !MRI.reg_nodbg_empty(Reg)) &&
810 "Empty and not used live-range?!");
812 RegsToSpill[ResultPos++] = Reg;
814 RegsToSpill.erase(RegsToSpill.begin() + ResultPos, RegsToSpill.end());
815 LLVM_DEBUG(dbgs() << RegsToSpill.size()
816 << " registers to spill after remat.\n");
819 //===----------------------------------------------------------------------===//
820 // Spilling
821 //===----------------------------------------------------------------------===//
823 /// If MI is a load or store of StackSlot, it can be removed.
824 bool InlineSpiller::coalesceStackAccess(MachineInstr *MI, Register Reg) {
825 int FI = 0;
826 Register InstrReg = TII.isLoadFromStackSlot(*MI, FI);
827 bool IsLoad = InstrReg;
828 if (!IsLoad)
829 InstrReg = TII.isStoreToStackSlot(*MI, FI);
831 // We have a stack access. Is it the right register and slot?
832 if (InstrReg != Reg || FI != StackSlot)
833 return false;
835 if (!IsLoad)
836 HSpiller.rmFromMergeableSpills(*MI, StackSlot);
838 LLVM_DEBUG(dbgs() << "Coalescing stack access: " << *MI);
839 LIS.RemoveMachineInstrFromMaps(*MI);
840 MI->eraseFromParent();
842 if (IsLoad) {
843 ++NumReloadsRemoved;
844 --NumReloads;
845 } else {
846 ++NumSpillsRemoved;
847 --NumSpills;
850 return true;
853 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
854 LLVM_DUMP_METHOD
855 // Dump the range of instructions from B to E with their slot indexes.
856 static void dumpMachineInstrRangeWithSlotIndex(MachineBasicBlock::iterator B,
857 MachineBasicBlock::iterator E,
858 LiveIntervals const &LIS,
859 const char *const header,
860 Register VReg = Register()) {
861 char NextLine = '\n';
862 char SlotIndent = '\t';
864 if (std::next(B) == E) {
865 NextLine = ' ';
866 SlotIndent = ' ';
869 dbgs() << '\t' << header << ": " << NextLine;
871 for (MachineBasicBlock::iterator I = B; I != E; ++I) {
872 SlotIndex Idx = LIS.getInstructionIndex(*I).getRegSlot();
874 // If a register was passed in and this instruction has it as a
875 // destination that is marked as an early clobber, print the
876 // early-clobber slot index.
877 if (VReg) {
878 MachineOperand *MO = I->findRegisterDefOperand(VReg);
879 if (MO && MO->isEarlyClobber())
880 Idx = Idx.getRegSlot(true);
883 dbgs() << SlotIndent << Idx << '\t' << *I;
886 #endif
888 /// foldMemoryOperand - Try folding stack slot references in Ops into their
889 /// instructions.
891 /// @param Ops Operand indices from AnalyzeVirtRegInBundle().
892 /// @param LoadMI Load instruction to use instead of stack slot when non-null.
893 /// @return True on success.
894 bool InlineSpiller::
895 foldMemoryOperand(ArrayRef<std::pair<MachineInstr *, unsigned>> Ops,
896 MachineInstr *LoadMI) {
897 if (Ops.empty())
898 return false;
899 // Don't attempt folding in bundles.
900 MachineInstr *MI = Ops.front().first;
901 if (Ops.back().first != MI || MI->isBundled())
902 return false;
904 bool WasCopy = TII.isCopyInstr(*MI).has_value();
905 Register ImpReg;
907 // TII::foldMemoryOperand will do what we need here for statepoint
908 // (fold load into use and remove corresponding def). We will replace
909 // uses of removed def with loads (spillAroundUses).
910 // For that to work we need to untie def and use to pass it through
911 // foldMemoryOperand and signal foldPatchpoint that it is allowed to
912 // fold them.
913 bool UntieRegs = MI->getOpcode() == TargetOpcode::STATEPOINT;
915 // Spill subregs if the target allows it.
916 // We always want to spill subregs for stackmap/patchpoint pseudos.
917 bool SpillSubRegs = TII.isSubregFoldable() ||
918 MI->getOpcode() == TargetOpcode::STATEPOINT ||
919 MI->getOpcode() == TargetOpcode::PATCHPOINT ||
920 MI->getOpcode() == TargetOpcode::STACKMAP;
922 // TargetInstrInfo::foldMemoryOperand only expects explicit, non-tied
923 // operands.
924 SmallVector<unsigned, 8> FoldOps;
925 for (const auto &OpPair : Ops) {
926 unsigned Idx = OpPair.second;
927 assert(MI == OpPair.first && "Instruction conflict during operand folding");
928 MachineOperand &MO = MI->getOperand(Idx);
930 // No point restoring an undef read, and we'll produce an invalid live
931 // interval.
932 // TODO: Is this really the correct way to handle undef tied uses?
933 if (MO.isUse() && !MO.readsReg() && !MO.isTied())
934 continue;
936 if (MO.isImplicit()) {
937 ImpReg = MO.getReg();
938 continue;
941 if (!SpillSubRegs && MO.getSubReg())
942 return false;
943 // We cannot fold a load instruction into a def.
944 if (LoadMI && MO.isDef())
945 return false;
946 // Tied use operands should not be passed to foldMemoryOperand.
947 if (UntieRegs || !MI->isRegTiedToDefOperand(Idx))
948 FoldOps.push_back(Idx);
951 // If we only have implicit uses, we won't be able to fold that.
952 // Moreover, TargetInstrInfo::foldMemoryOperand will assert if we try!
953 if (FoldOps.empty())
954 return false;
956 MachineInstrSpan MIS(MI, MI->getParent());
958 SmallVector<std::pair<unsigned, unsigned> > TiedOps;
959 if (UntieRegs)
960 for (unsigned Idx : FoldOps) {
961 MachineOperand &MO = MI->getOperand(Idx);
962 if (!MO.isTied())
963 continue;
964 unsigned Tied = MI->findTiedOperandIdx(Idx);
965 if (MO.isUse())
966 TiedOps.emplace_back(Tied, Idx);
967 else {
968 assert(MO.isDef() && "Tied to not use and def?");
969 TiedOps.emplace_back(Idx, Tied);
971 MI->untieRegOperand(Idx);
974 MachineInstr *FoldMI =
975 LoadMI ? TII.foldMemoryOperand(*MI, FoldOps, *LoadMI, &LIS)
976 : TII.foldMemoryOperand(*MI, FoldOps, StackSlot, &LIS, &VRM);
977 if (!FoldMI) {
978 // Re-tie operands.
979 for (auto Tied : TiedOps)
980 MI->tieOperands(Tied.first, Tied.second);
981 return false;
984 // Remove LIS for any dead defs in the original MI not in FoldMI.
985 for (MIBundleOperands MO(*MI); MO.isValid(); ++MO) {
986 if (!MO->isReg())
987 continue;
988 Register Reg = MO->getReg();
989 if (!Reg || Reg.isVirtual() || MRI.isReserved(Reg)) {
990 continue;
992 // Skip non-Defs, including undef uses and internal reads.
993 if (MO->isUse())
994 continue;
995 PhysRegInfo RI = AnalyzePhysRegInBundle(*FoldMI, Reg, &TRI);
996 if (RI.FullyDefined)
997 continue;
998 // FoldMI does not define this physreg. Remove the LI segment.
999 assert(MO->isDead() && "Cannot fold physreg def");
1000 SlotIndex Idx = LIS.getInstructionIndex(*MI).getRegSlot();
1001 LIS.removePhysRegDefAt(Reg.asMCReg(), Idx);
1004 int FI;
1005 if (TII.isStoreToStackSlot(*MI, FI) &&
1006 HSpiller.rmFromMergeableSpills(*MI, FI))
1007 --NumSpills;
1008 LIS.ReplaceMachineInstrInMaps(*MI, *FoldMI);
1009 // Update the call site info.
1010 if (MI->isCandidateForCallSiteEntry())
1011 MI->getMF()->moveCallSiteInfo(MI, FoldMI);
1013 // If we've folded a store into an instruction labelled with debug-info,
1014 // record a substitution from the old operand to the memory operand. Handle
1015 // the simple common case where operand 0 is the one being folded, plus when
1016 // the destination operand is also a tied def. More values could be
1017 // substituted / preserved with more analysis.
1018 if (MI->peekDebugInstrNum() && Ops[0].second == 0) {
1019 // Helper lambda.
1020 auto MakeSubstitution = [this,FoldMI,MI,&Ops]() {
1021 // Substitute old operand zero to the new instructions memory operand.
1022 unsigned OldOperandNum = Ops[0].second;
1023 unsigned NewNum = FoldMI->getDebugInstrNum();
1024 unsigned OldNum = MI->getDebugInstrNum();
1025 MF.makeDebugValueSubstitution({OldNum, OldOperandNum},
1026 {NewNum, MachineFunction::DebugOperandMemNumber});
1029 const MachineOperand &Op0 = MI->getOperand(Ops[0].second);
1030 if (Ops.size() == 1 && Op0.isDef()) {
1031 MakeSubstitution();
1032 } else if (Ops.size() == 2 && Op0.isDef() && MI->getOperand(1).isTied() &&
1033 Op0.getReg() == MI->getOperand(1).getReg()) {
1034 MakeSubstitution();
1036 } else if (MI->peekDebugInstrNum()) {
1037 // This is a debug-labelled instruction, but the operand being folded isn't
1038 // at operand zero. Most likely this means it's a load being folded in.
1039 // Substitute any register defs from operand zero up to the one being
1040 // folded -- past that point, we don't know what the new operand indexes
1041 // will be.
1042 MF.substituteDebugValuesForInst(*MI, *FoldMI, Ops[0].second);
1045 MI->eraseFromParent();
1047 // Insert any new instructions other than FoldMI into the LIS maps.
1048 assert(!MIS.empty() && "Unexpected empty span of instructions!");
1049 for (MachineInstr &MI : MIS)
1050 if (&MI != FoldMI)
1051 LIS.InsertMachineInstrInMaps(MI);
1053 // TII.foldMemoryOperand may have left some implicit operands on the
1054 // instruction. Strip them.
1055 if (ImpReg)
1056 for (unsigned i = FoldMI->getNumOperands(); i; --i) {
1057 MachineOperand &MO = FoldMI->getOperand(i - 1);
1058 if (!MO.isReg() || !MO.isImplicit())
1059 break;
1060 if (MO.getReg() == ImpReg)
1061 FoldMI->removeOperand(i - 1);
1064 LLVM_DEBUG(dumpMachineInstrRangeWithSlotIndex(MIS.begin(), MIS.end(), LIS,
1065 "folded"));
1067 if (!WasCopy)
1068 ++NumFolded;
1069 else if (Ops.front().second == 0) {
1070 ++NumSpills;
1071 // If there is only 1 store instruction is required for spill, add it
1072 // to mergeable list. In X86 AMX, 2 intructions are required to store.
1073 // We disable the merge for this case.
1074 if (std::distance(MIS.begin(), MIS.end()) <= 1)
1075 HSpiller.addToMergeableSpills(*FoldMI, StackSlot, Original);
1076 } else
1077 ++NumReloads;
1078 return true;
1081 void InlineSpiller::insertReload(Register NewVReg,
1082 SlotIndex Idx,
1083 MachineBasicBlock::iterator MI) {
1084 MachineBasicBlock &MBB = *MI->getParent();
1086 MachineInstrSpan MIS(MI, &MBB);
1087 TII.loadRegFromStackSlot(MBB, MI, NewVReg, StackSlot,
1088 MRI.getRegClass(NewVReg), &TRI, Register());
1090 LIS.InsertMachineInstrRangeInMaps(MIS.begin(), MI);
1092 LLVM_DEBUG(dumpMachineInstrRangeWithSlotIndex(MIS.begin(), MI, LIS, "reload",
1093 NewVReg));
1094 ++NumReloads;
1097 /// Check if \p Def fully defines a VReg with an undefined value.
1098 /// If that's the case, that means the value of VReg is actually
1099 /// not relevant.
1100 static bool isRealSpill(const MachineInstr &Def) {
1101 if (!Def.isImplicitDef())
1102 return true;
1104 // We can say that the VReg defined by Def is undef, only if it is
1105 // fully defined by Def. Otherwise, some of the lanes may not be
1106 // undef and the value of the VReg matters.
1107 return Def.getOperand(0).getSubReg();
1110 /// insertSpill - Insert a spill of NewVReg after MI.
1111 void InlineSpiller::insertSpill(Register NewVReg, bool isKill,
1112 MachineBasicBlock::iterator MI) {
1113 // Spill are not terminators, so inserting spills after terminators will
1114 // violate invariants in MachineVerifier.
1115 assert(!MI->isTerminator() && "Inserting a spill after a terminator");
1116 MachineBasicBlock &MBB = *MI->getParent();
1118 MachineInstrSpan MIS(MI, &MBB);
1119 MachineBasicBlock::iterator SpillBefore = std::next(MI);
1120 bool IsRealSpill = isRealSpill(*MI);
1122 if (IsRealSpill)
1123 TII.storeRegToStackSlot(MBB, SpillBefore, NewVReg, isKill, StackSlot,
1124 MRI.getRegClass(NewVReg), &TRI, Register());
1125 else
1126 // Don't spill undef value.
1127 // Anything works for undef, in particular keeping the memory
1128 // uninitialized is a viable option and it saves code size and
1129 // run time.
1130 BuildMI(MBB, SpillBefore, MI->getDebugLoc(), TII.get(TargetOpcode::KILL))
1131 .addReg(NewVReg, getKillRegState(isKill));
1133 MachineBasicBlock::iterator Spill = std::next(MI);
1134 LIS.InsertMachineInstrRangeInMaps(Spill, MIS.end());
1135 for (const MachineInstr &MI : make_range(Spill, MIS.end()))
1136 getVDefInterval(MI, LIS);
1138 LLVM_DEBUG(
1139 dumpMachineInstrRangeWithSlotIndex(Spill, MIS.end(), LIS, "spill"));
1140 ++NumSpills;
1141 // If there is only 1 store instruction is required for spill, add it
1142 // to mergeable list. In X86 AMX, 2 intructions are required to store.
1143 // We disable the merge for this case.
1144 if (IsRealSpill && std::distance(Spill, MIS.end()) <= 1)
1145 HSpiller.addToMergeableSpills(*Spill, StackSlot, Original);
1148 /// spillAroundUses - insert spill code around each use of Reg.
1149 void InlineSpiller::spillAroundUses(Register Reg) {
1150 LLVM_DEBUG(dbgs() << "spillAroundUses " << printReg(Reg) << '\n');
1151 LiveInterval &OldLI = LIS.getInterval(Reg);
1153 // Iterate over instructions using Reg.
1154 for (MachineInstr &MI : llvm::make_early_inc_range(MRI.reg_bundles(Reg))) {
1155 // Debug values are not allowed to affect codegen.
1156 if (MI.isDebugValue()) {
1157 // Modify DBG_VALUE now that the value is in a spill slot.
1158 MachineBasicBlock *MBB = MI.getParent();
1159 LLVM_DEBUG(dbgs() << "Modifying debug info due to spill:\t" << MI);
1160 buildDbgValueForSpill(*MBB, &MI, MI, StackSlot, Reg);
1161 MBB->erase(MI);
1162 continue;
1165 assert(!MI.isDebugInstr() && "Did not expect to find a use in debug "
1166 "instruction that isn't a DBG_VALUE");
1168 // Ignore copies to/from snippets. We'll delete them.
1169 if (SnippetCopies.count(&MI))
1170 continue;
1172 // Stack slot accesses may coalesce away.
1173 if (coalesceStackAccess(&MI, Reg))
1174 continue;
1176 // Analyze instruction.
1177 SmallVector<std::pair<MachineInstr*, unsigned>, 8> Ops;
1178 VirtRegInfo RI = AnalyzeVirtRegInBundle(MI, Reg, &Ops);
1180 // Find the slot index where this instruction reads and writes OldLI.
1181 // This is usually the def slot, except for tied early clobbers.
1182 SlotIndex Idx = LIS.getInstructionIndex(MI).getRegSlot();
1183 if (VNInfo *VNI = OldLI.getVNInfoAt(Idx.getRegSlot(true)))
1184 if (SlotIndex::isSameInstr(Idx, VNI->def))
1185 Idx = VNI->def;
1187 // Check for a sibling copy.
1188 Register SibReg = isCopyOfBundle(MI, Reg, TII);
1189 if (SibReg && isSibling(SibReg)) {
1190 // This may actually be a copy between snippets.
1191 if (isRegToSpill(SibReg)) {
1192 LLVM_DEBUG(dbgs() << "Found new snippet copy: " << MI);
1193 SnippetCopies.insert(&MI);
1194 continue;
1196 if (RI.Writes) {
1197 if (hoistSpillInsideBB(OldLI, MI)) {
1198 // This COPY is now dead, the value is already in the stack slot.
1199 MI.getOperand(0).setIsDead();
1200 DeadDefs.push_back(&MI);
1201 continue;
1203 } else {
1204 // This is a reload for a sib-reg copy. Drop spills downstream.
1205 LiveInterval &SibLI = LIS.getInterval(SibReg);
1206 eliminateRedundantSpills(SibLI, SibLI.getVNInfoAt(Idx));
1207 // The COPY will fold to a reload below.
1211 // Attempt to fold memory ops.
1212 if (foldMemoryOperand(Ops))
1213 continue;
1215 // Create a new virtual register for spill/fill.
1216 // FIXME: Infer regclass from instruction alone.
1217 Register NewVReg = Edit->createFrom(Reg);
1219 if (RI.Reads)
1220 insertReload(NewVReg, Idx, &MI);
1222 // Rewrite instruction operands.
1223 bool hasLiveDef = false;
1224 for (const auto &OpPair : Ops) {
1225 MachineOperand &MO = OpPair.first->getOperand(OpPair.second);
1226 MO.setReg(NewVReg);
1227 if (MO.isUse()) {
1228 if (!OpPair.first->isRegTiedToDefOperand(OpPair.second))
1229 MO.setIsKill();
1230 } else {
1231 if (!MO.isDead())
1232 hasLiveDef = true;
1235 LLVM_DEBUG(dbgs() << "\trewrite: " << Idx << '\t' << MI << '\n');
1237 // FIXME: Use a second vreg if instruction has no tied ops.
1238 if (RI.Writes)
1239 if (hasLiveDef)
1240 insertSpill(NewVReg, true, &MI);
1244 /// spillAll - Spill all registers remaining after rematerialization.
1245 void InlineSpiller::spillAll() {
1246 // Update LiveStacks now that we are committed to spilling.
1247 if (StackSlot == VirtRegMap::NO_STACK_SLOT) {
1248 StackSlot = VRM.assignVirt2StackSlot(Original);
1249 StackInt = &LSS.getOrCreateInterval(StackSlot, MRI.getRegClass(Original));
1250 StackInt->getNextValue(SlotIndex(), LSS.getVNInfoAllocator());
1251 } else
1252 StackInt = &LSS.getInterval(StackSlot);
1254 if (Original != Edit->getReg())
1255 VRM.assignVirt2StackSlot(Edit->getReg(), StackSlot);
1257 assert(StackInt->getNumValNums() == 1 && "Bad stack interval values");
1258 for (Register Reg : RegsToSpill)
1259 StackInt->MergeSegmentsInAsValue(LIS.getInterval(Reg),
1260 StackInt->getValNumInfo(0));
1261 LLVM_DEBUG(dbgs() << "Merged spilled regs: " << *StackInt << '\n');
1263 // Spill around uses of all RegsToSpill.
1264 for (Register Reg : RegsToSpill)
1265 spillAroundUses(Reg);
1267 // Hoisted spills may cause dead code.
1268 if (!DeadDefs.empty()) {
1269 LLVM_DEBUG(dbgs() << "Eliminating " << DeadDefs.size() << " dead defs\n");
1270 Edit->eliminateDeadDefs(DeadDefs, RegsToSpill);
1273 // Finally delete the SnippetCopies.
1274 for (Register Reg : RegsToSpill) {
1275 for (MachineInstr &MI :
1276 llvm::make_early_inc_range(MRI.reg_instructions(Reg))) {
1277 assert(SnippetCopies.count(&MI) && "Remaining use wasn't a snippet copy");
1278 // FIXME: Do this with a LiveRangeEdit callback.
1279 LIS.getSlotIndexes()->removeSingleMachineInstrFromMaps(MI);
1280 MI.eraseFromBundle();
1284 // Delete all spilled registers.
1285 for (Register Reg : RegsToSpill)
1286 Edit->eraseVirtReg(Reg);
1289 void InlineSpiller::spill(LiveRangeEdit &edit) {
1290 ++NumSpilledRanges;
1291 Edit = &edit;
1292 assert(!Register::isStackSlot(edit.getReg()) &&
1293 "Trying to spill a stack slot.");
1294 // Share a stack slot among all descendants of Original.
1295 Original = VRM.getOriginal(edit.getReg());
1296 StackSlot = VRM.getStackSlot(Original);
1297 StackInt = nullptr;
1299 LLVM_DEBUG(dbgs() << "Inline spilling "
1300 << TRI.getRegClassName(MRI.getRegClass(edit.getReg()))
1301 << ':' << edit.getParent() << "\nFrom original "
1302 << printReg(Original) << '\n');
1303 assert(edit.getParent().isSpillable() &&
1304 "Attempting to spill already spilled value.");
1305 assert(DeadDefs.empty() && "Previous spill didn't remove dead defs");
1307 collectRegsToSpill();
1308 reMaterializeAll();
1310 // Remat may handle everything.
1311 if (!RegsToSpill.empty())
1312 spillAll();
1314 Edit->calculateRegClassAndHint(MF, VRAI);
1317 /// Optimizations after all the reg selections and spills are done.
1318 void InlineSpiller::postOptimization() { HSpiller.hoistAllSpills(); }
1320 /// When a spill is inserted, add the spill to MergeableSpills map.
1321 void HoistSpillHelper::addToMergeableSpills(MachineInstr &Spill, int StackSlot,
1322 unsigned Original) {
1323 BumpPtrAllocator &Allocator = LIS.getVNInfoAllocator();
1324 LiveInterval &OrigLI = LIS.getInterval(Original);
1325 // save a copy of LiveInterval in StackSlotToOrigLI because the original
1326 // LiveInterval may be cleared after all its references are spilled.
1327 if (!StackSlotToOrigLI.contains(StackSlot)) {
1328 auto LI = std::make_unique<LiveInterval>(OrigLI.reg(), OrigLI.weight());
1329 LI->assign(OrigLI, Allocator);
1330 StackSlotToOrigLI[StackSlot] = std::move(LI);
1332 SlotIndex Idx = LIS.getInstructionIndex(Spill);
1333 VNInfo *OrigVNI = StackSlotToOrigLI[StackSlot]->getVNInfoAt(Idx.getRegSlot());
1334 std::pair<int, VNInfo *> MIdx = std::make_pair(StackSlot, OrigVNI);
1335 MergeableSpills[MIdx].insert(&Spill);
1338 /// When a spill is removed, remove the spill from MergeableSpills map.
1339 /// Return true if the spill is removed successfully.
1340 bool HoistSpillHelper::rmFromMergeableSpills(MachineInstr &Spill,
1341 int StackSlot) {
1342 auto It = StackSlotToOrigLI.find(StackSlot);
1343 if (It == StackSlotToOrigLI.end())
1344 return false;
1345 SlotIndex Idx = LIS.getInstructionIndex(Spill);
1346 VNInfo *OrigVNI = It->second->getVNInfoAt(Idx.getRegSlot());
1347 std::pair<int, VNInfo *> MIdx = std::make_pair(StackSlot, OrigVNI);
1348 return MergeableSpills[MIdx].erase(&Spill);
1351 /// Check BB to see if it is a possible target BB to place a hoisted spill,
1352 /// i.e., there should be a living sibling of OrigReg at the insert point.
1353 bool HoistSpillHelper::isSpillCandBB(LiveInterval &OrigLI, VNInfo &OrigVNI,
1354 MachineBasicBlock &BB, Register &LiveReg) {
1355 SlotIndex Idx = IPA.getLastInsertPoint(OrigLI, BB);
1356 // The original def could be after the last insert point in the root block,
1357 // we can't hoist to here.
1358 if (Idx < OrigVNI.def) {
1359 // TODO: We could be better here. If LI is not alive in landing pad
1360 // we could hoist spill after LIP.
1361 LLVM_DEBUG(dbgs() << "can't spill in root block - def after LIP\n");
1362 return false;
1364 Register OrigReg = OrigLI.reg();
1365 SmallSetVector<Register, 16> &Siblings = Virt2SiblingsMap[OrigReg];
1366 assert(OrigLI.getVNInfoAt(Idx) == &OrigVNI && "Unexpected VNI");
1368 for (const Register &SibReg : Siblings) {
1369 LiveInterval &LI = LIS.getInterval(SibReg);
1370 VNInfo *VNI = LI.getVNInfoAt(Idx);
1371 if (VNI) {
1372 LiveReg = SibReg;
1373 return true;
1376 return false;
1379 /// Remove redundant spills in the same BB. Save those redundant spills in
1380 /// SpillsToRm, and save the spill to keep and its BB in SpillBBToSpill map.
1381 void HoistSpillHelper::rmRedundantSpills(
1382 SmallPtrSet<MachineInstr *, 16> &Spills,
1383 SmallVectorImpl<MachineInstr *> &SpillsToRm,
1384 DenseMap<MachineDomTreeNode *, MachineInstr *> &SpillBBToSpill) {
1385 // For each spill saw, check SpillBBToSpill[] and see if its BB already has
1386 // another spill inside. If a BB contains more than one spill, only keep the
1387 // earlier spill with smaller SlotIndex.
1388 for (auto *const CurrentSpill : Spills) {
1389 MachineBasicBlock *Block = CurrentSpill->getParent();
1390 MachineDomTreeNode *Node = MDT.getBase().getNode(Block);
1391 MachineInstr *PrevSpill = SpillBBToSpill[Node];
1392 if (PrevSpill) {
1393 SlotIndex PIdx = LIS.getInstructionIndex(*PrevSpill);
1394 SlotIndex CIdx = LIS.getInstructionIndex(*CurrentSpill);
1395 MachineInstr *SpillToRm = (CIdx > PIdx) ? CurrentSpill : PrevSpill;
1396 MachineInstr *SpillToKeep = (CIdx > PIdx) ? PrevSpill : CurrentSpill;
1397 SpillsToRm.push_back(SpillToRm);
1398 SpillBBToSpill[MDT.getBase().getNode(Block)] = SpillToKeep;
1399 } else {
1400 SpillBBToSpill[MDT.getBase().getNode(Block)] = CurrentSpill;
1403 for (auto *const SpillToRm : SpillsToRm)
1404 Spills.erase(SpillToRm);
1407 /// Starting from \p Root find a top-down traversal order of the dominator
1408 /// tree to visit all basic blocks containing the elements of \p Spills.
1409 /// Redundant spills will be found and put into \p SpillsToRm at the same
1410 /// time. \p SpillBBToSpill will be populated as part of the process and
1411 /// maps a basic block to the first store occurring in the basic block.
1412 /// \post SpillsToRm.union(Spills\@post) == Spills\@pre
1413 void HoistSpillHelper::getVisitOrders(
1414 MachineBasicBlock *Root, SmallPtrSet<MachineInstr *, 16> &Spills,
1415 SmallVectorImpl<MachineDomTreeNode *> &Orders,
1416 SmallVectorImpl<MachineInstr *> &SpillsToRm,
1417 DenseMap<MachineDomTreeNode *, unsigned> &SpillsToKeep,
1418 DenseMap<MachineDomTreeNode *, MachineInstr *> &SpillBBToSpill) {
1419 // The set contains all the possible BB nodes to which we may hoist
1420 // original spills.
1421 SmallPtrSet<MachineDomTreeNode *, 8> WorkSet;
1422 // Save the BB nodes on the path from the first BB node containing
1423 // non-redundant spill to the Root node.
1424 SmallPtrSet<MachineDomTreeNode *, 8> NodesOnPath;
1425 // All the spills to be hoisted must originate from a single def instruction
1426 // to the OrigReg. It means the def instruction should dominate all the spills
1427 // to be hoisted. We choose the BB where the def instruction is located as
1428 // the Root.
1429 MachineDomTreeNode *RootIDomNode = MDT[Root]->getIDom();
1430 // For every node on the dominator tree with spill, walk up on the dominator
1431 // tree towards the Root node until it is reached. If there is other node
1432 // containing spill in the middle of the path, the previous spill saw will
1433 // be redundant and the node containing it will be removed. All the nodes on
1434 // the path starting from the first node with non-redundant spill to the Root
1435 // node will be added to the WorkSet, which will contain all the possible
1436 // locations where spills may be hoisted to after the loop below is done.
1437 for (auto *const Spill : Spills) {
1438 MachineBasicBlock *Block = Spill->getParent();
1439 MachineDomTreeNode *Node = MDT[Block];
1440 MachineInstr *SpillToRm = nullptr;
1441 while (Node != RootIDomNode) {
1442 // If Node dominates Block, and it already contains a spill, the spill in
1443 // Block will be redundant.
1444 if (Node != MDT[Block] && SpillBBToSpill[Node]) {
1445 SpillToRm = SpillBBToSpill[MDT[Block]];
1446 break;
1447 /// If we see the Node already in WorkSet, the path from the Node to
1448 /// the Root node must already be traversed by another spill.
1449 /// Then no need to repeat.
1450 } else if (WorkSet.count(Node)) {
1451 break;
1452 } else {
1453 NodesOnPath.insert(Node);
1455 Node = Node->getIDom();
1457 if (SpillToRm) {
1458 SpillsToRm.push_back(SpillToRm);
1459 } else {
1460 // Add a BB containing the original spills to SpillsToKeep -- i.e.,
1461 // set the initial status before hoisting start. The value of BBs
1462 // containing original spills is set to 0, in order to descriminate
1463 // with BBs containing hoisted spills which will be inserted to
1464 // SpillsToKeep later during hoisting.
1465 SpillsToKeep[MDT[Block]] = 0;
1466 WorkSet.insert(NodesOnPath.begin(), NodesOnPath.end());
1468 NodesOnPath.clear();
1471 // Sort the nodes in WorkSet in top-down order and save the nodes
1472 // in Orders. Orders will be used for hoisting in runHoistSpills.
1473 unsigned idx = 0;
1474 Orders.push_back(MDT.getBase().getNode(Root));
1475 do {
1476 MachineDomTreeNode *Node = Orders[idx++];
1477 for (MachineDomTreeNode *Child : Node->children()) {
1478 if (WorkSet.count(Child))
1479 Orders.push_back(Child);
1481 } while (idx != Orders.size());
1482 assert(Orders.size() == WorkSet.size() &&
1483 "Orders have different size with WorkSet");
1485 #ifndef NDEBUG
1486 LLVM_DEBUG(dbgs() << "Orders size is " << Orders.size() << "\n");
1487 SmallVector<MachineDomTreeNode *, 32>::reverse_iterator RIt = Orders.rbegin();
1488 for (; RIt != Orders.rend(); RIt++)
1489 LLVM_DEBUG(dbgs() << "BB" << (*RIt)->getBlock()->getNumber() << ",");
1490 LLVM_DEBUG(dbgs() << "\n");
1491 #endif
1494 /// Try to hoist spills according to BB hotness. The spills to removed will
1495 /// be saved in \p SpillsToRm. The spills to be inserted will be saved in
1496 /// \p SpillsToIns.
1497 void HoistSpillHelper::runHoistSpills(
1498 LiveInterval &OrigLI, VNInfo &OrigVNI,
1499 SmallPtrSet<MachineInstr *, 16> &Spills,
1500 SmallVectorImpl<MachineInstr *> &SpillsToRm,
1501 DenseMap<MachineBasicBlock *, unsigned> &SpillsToIns) {
1502 // Visit order of dominator tree nodes.
1503 SmallVector<MachineDomTreeNode *, 32> Orders;
1504 // SpillsToKeep contains all the nodes where spills are to be inserted
1505 // during hoisting. If the spill to be inserted is an original spill
1506 // (not a hoisted one), the value of the map entry is 0. If the spill
1507 // is a hoisted spill, the value of the map entry is the VReg to be used
1508 // as the source of the spill.
1509 DenseMap<MachineDomTreeNode *, unsigned> SpillsToKeep;
1510 // Map from BB to the first spill inside of it.
1511 DenseMap<MachineDomTreeNode *, MachineInstr *> SpillBBToSpill;
1513 rmRedundantSpills(Spills, SpillsToRm, SpillBBToSpill);
1515 MachineBasicBlock *Root = LIS.getMBBFromIndex(OrigVNI.def);
1516 getVisitOrders(Root, Spills, Orders, SpillsToRm, SpillsToKeep,
1517 SpillBBToSpill);
1519 // SpillsInSubTreeMap keeps the map from a dom tree node to a pair of
1520 // nodes set and the cost of all the spills inside those nodes.
1521 // The nodes set are the locations where spills are to be inserted
1522 // in the subtree of current node.
1523 using NodesCostPair =
1524 std::pair<SmallPtrSet<MachineDomTreeNode *, 16>, BlockFrequency>;
1525 DenseMap<MachineDomTreeNode *, NodesCostPair> SpillsInSubTreeMap;
1527 // Iterate Orders set in reverse order, which will be a bottom-up order
1528 // in the dominator tree. Once we visit a dom tree node, we know its
1529 // children have already been visited and the spill locations in the
1530 // subtrees of all the children have been determined.
1531 SmallVector<MachineDomTreeNode *, 32>::reverse_iterator RIt = Orders.rbegin();
1532 for (; RIt != Orders.rend(); RIt++) {
1533 MachineBasicBlock *Block = (*RIt)->getBlock();
1535 // If Block contains an original spill, simply continue.
1536 if (SpillsToKeep.contains(*RIt) && !SpillsToKeep[*RIt]) {
1537 SpillsInSubTreeMap[*RIt].first.insert(*RIt);
1538 // SpillsInSubTreeMap[*RIt].second contains the cost of spill.
1539 SpillsInSubTreeMap[*RIt].second = MBFI.getBlockFreq(Block);
1540 continue;
1543 // Collect spills in subtree of current node (*RIt) to
1544 // SpillsInSubTreeMap[*RIt].first.
1545 for (MachineDomTreeNode *Child : (*RIt)->children()) {
1546 if (!SpillsInSubTreeMap.contains(Child))
1547 continue;
1548 // The stmt "SpillsInSubTree = SpillsInSubTreeMap[*RIt].first" below
1549 // should be placed before getting the begin and end iterators of
1550 // SpillsInSubTreeMap[Child].first, or else the iterators may be
1551 // invalidated when SpillsInSubTreeMap[*RIt] is seen the first time
1552 // and the map grows and then the original buckets in the map are moved.
1553 SmallPtrSet<MachineDomTreeNode *, 16> &SpillsInSubTree =
1554 SpillsInSubTreeMap[*RIt].first;
1555 BlockFrequency &SubTreeCost = SpillsInSubTreeMap[*RIt].second;
1556 SubTreeCost += SpillsInSubTreeMap[Child].second;
1557 auto BI = SpillsInSubTreeMap[Child].first.begin();
1558 auto EI = SpillsInSubTreeMap[Child].first.end();
1559 SpillsInSubTree.insert(BI, EI);
1560 SpillsInSubTreeMap.erase(Child);
1563 SmallPtrSet<MachineDomTreeNode *, 16> &SpillsInSubTree =
1564 SpillsInSubTreeMap[*RIt].first;
1565 BlockFrequency &SubTreeCost = SpillsInSubTreeMap[*RIt].second;
1566 // No spills in subtree, simply continue.
1567 if (SpillsInSubTree.empty())
1568 continue;
1570 // Check whether Block is a possible candidate to insert spill.
1571 Register LiveReg;
1572 if (!isSpillCandBB(OrigLI, OrigVNI, *Block, LiveReg))
1573 continue;
1575 // If there are multiple spills that could be merged, bias a little
1576 // to hoist the spill.
1577 BranchProbability MarginProb = (SpillsInSubTree.size() > 1)
1578 ? BranchProbability(9, 10)
1579 : BranchProbability(1, 1);
1580 if (SubTreeCost > MBFI.getBlockFreq(Block) * MarginProb) {
1581 // Hoist: Move spills to current Block.
1582 for (auto *const SpillBB : SpillsInSubTree) {
1583 // When SpillBB is a BB contains original spill, insert the spill
1584 // to SpillsToRm.
1585 if (SpillsToKeep.contains(SpillBB) && !SpillsToKeep[SpillBB]) {
1586 MachineInstr *SpillToRm = SpillBBToSpill[SpillBB];
1587 SpillsToRm.push_back(SpillToRm);
1589 // SpillBB will not contain spill anymore, remove it from SpillsToKeep.
1590 SpillsToKeep.erase(SpillBB);
1592 // Current Block is the BB containing the new hoisted spill. Add it to
1593 // SpillsToKeep. LiveReg is the source of the new spill.
1594 SpillsToKeep[*RIt] = LiveReg;
1595 LLVM_DEBUG({
1596 dbgs() << "spills in BB: ";
1597 for (const auto Rspill : SpillsInSubTree)
1598 dbgs() << Rspill->getBlock()->getNumber() << " ";
1599 dbgs() << "were promoted to BB" << (*RIt)->getBlock()->getNumber()
1600 << "\n";
1602 SpillsInSubTree.clear();
1603 SpillsInSubTree.insert(*RIt);
1604 SubTreeCost = MBFI.getBlockFreq(Block);
1607 // For spills in SpillsToKeep with LiveReg set (i.e., not original spill),
1608 // save them to SpillsToIns.
1609 for (const auto &Ent : SpillsToKeep) {
1610 if (Ent.second)
1611 SpillsToIns[Ent.first->getBlock()] = Ent.second;
1615 /// For spills with equal values, remove redundant spills and hoist those left
1616 /// to less hot spots.
1618 /// Spills with equal values will be collected into the same set in
1619 /// MergeableSpills when spill is inserted. These equal spills are originated
1620 /// from the same defining instruction and are dominated by the instruction.
1621 /// Before hoisting all the equal spills, redundant spills inside in the same
1622 /// BB are first marked to be deleted. Then starting from the spills left, walk
1623 /// up on the dominator tree towards the Root node where the define instruction
1624 /// is located, mark the dominated spills to be deleted along the way and
1625 /// collect the BB nodes on the path from non-dominated spills to the define
1626 /// instruction into a WorkSet. The nodes in WorkSet are the candidate places
1627 /// where we are considering to hoist the spills. We iterate the WorkSet in
1628 /// bottom-up order, and for each node, we will decide whether to hoist spills
1629 /// inside its subtree to that node. In this way, we can get benefit locally
1630 /// even if hoisting all the equal spills to one cold place is impossible.
1631 void HoistSpillHelper::hoistAllSpills() {
1632 SmallVector<Register, 4> NewVRegs;
1633 LiveRangeEdit Edit(nullptr, NewVRegs, MF, LIS, &VRM, this);
1635 for (unsigned i = 0, e = MRI.getNumVirtRegs(); i != e; ++i) {
1636 Register Reg = Register::index2VirtReg(i);
1637 Register Original = VRM.getPreSplitReg(Reg);
1638 if (!MRI.def_empty(Reg))
1639 Virt2SiblingsMap[Original].insert(Reg);
1642 // Each entry in MergeableSpills contains a spill set with equal values.
1643 for (auto &Ent : MergeableSpills) {
1644 int Slot = Ent.first.first;
1645 LiveInterval &OrigLI = *StackSlotToOrigLI[Slot];
1646 VNInfo *OrigVNI = Ent.first.second;
1647 SmallPtrSet<MachineInstr *, 16> &EqValSpills = Ent.second;
1648 if (Ent.second.empty())
1649 continue;
1651 LLVM_DEBUG({
1652 dbgs() << "\nFor Slot" << Slot << " and VN" << OrigVNI->id << ":\n"
1653 << "Equal spills in BB: ";
1654 for (const auto spill : EqValSpills)
1655 dbgs() << spill->getParent()->getNumber() << " ";
1656 dbgs() << "\n";
1659 // SpillsToRm is the spill set to be removed from EqValSpills.
1660 SmallVector<MachineInstr *, 16> SpillsToRm;
1661 // SpillsToIns is the spill set to be newly inserted after hoisting.
1662 DenseMap<MachineBasicBlock *, unsigned> SpillsToIns;
1664 runHoistSpills(OrigLI, *OrigVNI, EqValSpills, SpillsToRm, SpillsToIns);
1666 LLVM_DEBUG({
1667 dbgs() << "Finally inserted spills in BB: ";
1668 for (const auto &Ispill : SpillsToIns)
1669 dbgs() << Ispill.first->getNumber() << " ";
1670 dbgs() << "\nFinally removed spills in BB: ";
1671 for (const auto Rspill : SpillsToRm)
1672 dbgs() << Rspill->getParent()->getNumber() << " ";
1673 dbgs() << "\n";
1676 // Stack live range update.
1677 LiveInterval &StackIntvl = LSS.getInterval(Slot);
1678 if (!SpillsToIns.empty() || !SpillsToRm.empty())
1679 StackIntvl.MergeValueInAsValue(OrigLI, OrigVNI,
1680 StackIntvl.getValNumInfo(0));
1682 // Insert hoisted spills.
1683 for (auto const &Insert : SpillsToIns) {
1684 MachineBasicBlock *BB = Insert.first;
1685 Register LiveReg = Insert.second;
1686 MachineBasicBlock::iterator MII = IPA.getLastInsertPointIter(OrigLI, *BB);
1687 MachineInstrSpan MIS(MII, BB);
1688 TII.storeRegToStackSlot(*BB, MII, LiveReg, false, Slot,
1689 MRI.getRegClass(LiveReg), &TRI, Register());
1690 LIS.InsertMachineInstrRangeInMaps(MIS.begin(), MII);
1691 for (const MachineInstr &MI : make_range(MIS.begin(), MII))
1692 getVDefInterval(MI, LIS);
1693 ++NumSpills;
1696 // Remove redundant spills or change them to dead instructions.
1697 NumSpills -= SpillsToRm.size();
1698 for (auto *const RMEnt : SpillsToRm) {
1699 RMEnt->setDesc(TII.get(TargetOpcode::KILL));
1700 for (unsigned i = RMEnt->getNumOperands(); i; --i) {
1701 MachineOperand &MO = RMEnt->getOperand(i - 1);
1702 if (MO.isReg() && MO.isImplicit() && MO.isDef() && !MO.isDead())
1703 RMEnt->removeOperand(i - 1);
1706 Edit.eliminateDeadDefs(SpillsToRm, std::nullopt);
1710 /// For VirtReg clone, the \p New register should have the same physreg or
1711 /// stackslot as the \p old register.
1712 void HoistSpillHelper::LRE_DidCloneVirtReg(Register New, Register Old) {
1713 if (VRM.hasPhys(Old))
1714 VRM.assignVirt2Phys(New, VRM.getPhys(Old));
1715 else if (VRM.getStackSlot(Old) != VirtRegMap::NO_STACK_SLOT)
1716 VRM.assignVirt2StackSlot(New, VRM.getStackSlot(Old));
1717 else
1718 llvm_unreachable("VReg should be assigned either physreg or stackslot");
1719 if (VRM.hasShape(Old))
1720 VRM.assignVirt2Shape(New, VRM.getShape(Old));