[AMDGPU][AsmParser][NFC] Translate parsed MIMG instructions to MCInsts automatically.
[llvm-project.git] / llvm / lib / CodeGen / PrologEpilogInserter.cpp
blobe323aaaeefaf851a071b088791ded23fd4da67fc
1 //===- PrologEpilogInserter.cpp - Insert Prolog/Epilog code in function ---===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This pass is responsible for finalizing the functions frame layout, saving
10 // callee saved registers, and for emitting prolog & epilog code for the
11 // function.
13 // This pass must be run after register allocation. After this pass is
14 // executed, it is illegal to construct MO_FrameIndex operands.
16 //===----------------------------------------------------------------------===//
18 #include "llvm/ADT/ArrayRef.h"
19 #include "llvm/ADT/BitVector.h"
20 #include "llvm/ADT/DepthFirstIterator.h"
21 #include "llvm/ADT/STLExtras.h"
22 #include "llvm/ADT/SetVector.h"
23 #include "llvm/ADT/SmallPtrSet.h"
24 #include "llvm/ADT/SmallSet.h"
25 #include "llvm/ADT/SmallVector.h"
26 #include "llvm/ADT/Statistic.h"
27 #include "llvm/Analysis/OptimizationRemarkEmitter.h"
28 #include "llvm/CodeGen/MachineBasicBlock.h"
29 #include "llvm/CodeGen/MachineDominators.h"
30 #include "llvm/CodeGen/MachineFrameInfo.h"
31 #include "llvm/CodeGen/MachineFunction.h"
32 #include "llvm/CodeGen/MachineFunctionPass.h"
33 #include "llvm/CodeGen/MachineInstr.h"
34 #include "llvm/CodeGen/MachineInstrBuilder.h"
35 #include "llvm/CodeGen/MachineLoopInfo.h"
36 #include "llvm/CodeGen/MachineModuleInfo.h"
37 #include "llvm/CodeGen/MachineOperand.h"
38 #include "llvm/CodeGen/MachineOptimizationRemarkEmitter.h"
39 #include "llvm/CodeGen/MachineRegisterInfo.h"
40 #include "llvm/CodeGen/RegisterScavenging.h"
41 #include "llvm/CodeGen/TargetFrameLowering.h"
42 #include "llvm/CodeGen/TargetInstrInfo.h"
43 #include "llvm/CodeGen/TargetOpcodes.h"
44 #include "llvm/CodeGen/TargetRegisterInfo.h"
45 #include "llvm/CodeGen/TargetSubtargetInfo.h"
46 #include "llvm/CodeGen/WinEHFuncInfo.h"
47 #include "llvm/IR/Attributes.h"
48 #include "llvm/IR/CallingConv.h"
49 #include "llvm/IR/DebugInfoMetadata.h"
50 #include "llvm/IR/DiagnosticInfo.h"
51 #include "llvm/IR/Function.h"
52 #include "llvm/IR/InlineAsm.h"
53 #include "llvm/IR/LLVMContext.h"
54 #include "llvm/InitializePasses.h"
55 #include "llvm/MC/MCRegisterInfo.h"
56 #include "llvm/Pass.h"
57 #include "llvm/Support/CodeGen.h"
58 #include "llvm/Support/Debug.h"
59 #include "llvm/Support/ErrorHandling.h"
60 #include "llvm/Support/FormatVariadic.h"
61 #include "llvm/Support/raw_ostream.h"
62 #include "llvm/Target/TargetMachine.h"
63 #include "llvm/Target/TargetOptions.h"
64 #include <algorithm>
65 #include <cassert>
66 #include <cstdint>
67 #include <functional>
68 #include <limits>
69 #include <utility>
70 #include <vector>
72 using namespace llvm;
74 #define DEBUG_TYPE "prologepilog"
76 using MBBVector = SmallVector<MachineBasicBlock *, 4>;
78 STATISTIC(NumLeafFuncWithSpills, "Number of leaf functions with CSRs");
79 STATISTIC(NumFuncSeen, "Number of functions seen in PEI");
82 namespace {
84 class PEI : public MachineFunctionPass {
85 public:
86 static char ID;
88 PEI() : MachineFunctionPass(ID) {
89 initializePEIPass(*PassRegistry::getPassRegistry());
92 void getAnalysisUsage(AnalysisUsage &AU) const override;
94 /// runOnMachineFunction - Insert prolog/epilog code and replace abstract
95 /// frame indexes with appropriate references.
96 bool runOnMachineFunction(MachineFunction &MF) override;
98 private:
99 RegScavenger *RS = nullptr;
101 // MinCSFrameIndex, MaxCSFrameIndex - Keeps the range of callee saved
102 // stack frame indexes.
103 unsigned MinCSFrameIndex = std::numeric_limits<unsigned>::max();
104 unsigned MaxCSFrameIndex = 0;
106 // Save and Restore blocks of the current function. Typically there is a
107 // single save block, unless Windows EH funclets are involved.
108 MBBVector SaveBlocks;
109 MBBVector RestoreBlocks;
111 // Flag to control whether to use the register scavenger to resolve
112 // frame index materialization registers. Set according to
113 // TRI->requiresFrameIndexScavenging() for the current function.
114 bool FrameIndexVirtualScavenging = false;
116 // Flag to control whether the scavenger should be passed even though
117 // FrameIndexVirtualScavenging is used.
118 bool FrameIndexEliminationScavenging = false;
120 // Emit remarks.
121 MachineOptimizationRemarkEmitter *ORE = nullptr;
123 void calculateCallFrameInfo(MachineFunction &MF);
124 void calculateSaveRestoreBlocks(MachineFunction &MF);
125 void spillCalleeSavedRegs(MachineFunction &MF);
127 void calculateFrameObjectOffsets(MachineFunction &MF);
128 void replaceFrameIndices(MachineFunction &MF);
129 void replaceFrameIndices(MachineBasicBlock *BB, MachineFunction &MF,
130 int &SPAdj);
131 // Frame indices in debug values are encoded in a target independent
132 // way with simply the frame index and offset rather than any
133 // target-specific addressing mode.
134 bool replaceFrameIndexDebugInstr(MachineFunction &MF, MachineInstr &MI,
135 unsigned OpIdx, int SPAdj = 0);
136 // Does same as replaceFrameIndices but using the backward MIR walk and
137 // backward register scavenger walk. Does not yet support call sequence
138 // processing.
139 void replaceFrameIndicesBackward(MachineBasicBlock *BB, MachineFunction &MF,
140 int &SPAdj);
142 void insertPrologEpilogCode(MachineFunction &MF);
143 void insertZeroCallUsedRegs(MachineFunction &MF);
146 } // end anonymous namespace
148 char PEI::ID = 0;
150 char &llvm::PrologEpilogCodeInserterID = PEI::ID;
152 INITIALIZE_PASS_BEGIN(PEI, DEBUG_TYPE, "Prologue/Epilogue Insertion", false,
153 false)
154 INITIALIZE_PASS_DEPENDENCY(MachineLoopInfo)
155 INITIALIZE_PASS_DEPENDENCY(MachineDominatorTree)
156 INITIALIZE_PASS_DEPENDENCY(MachineOptimizationRemarkEmitterPass)
157 INITIALIZE_PASS_END(PEI, DEBUG_TYPE,
158 "Prologue/Epilogue Insertion & Frame Finalization", false,
159 false)
161 MachineFunctionPass *llvm::createPrologEpilogInserterPass() {
162 return new PEI();
165 STATISTIC(NumBytesStackSpace,
166 "Number of bytes used for stack in all functions");
168 void PEI::getAnalysisUsage(AnalysisUsage &AU) const {
169 AU.setPreservesCFG();
170 AU.addPreserved<MachineLoopInfo>();
171 AU.addPreserved<MachineDominatorTree>();
172 AU.addRequired<MachineOptimizationRemarkEmitterPass>();
173 MachineFunctionPass::getAnalysisUsage(AU);
176 /// StackObjSet - A set of stack object indexes
177 using StackObjSet = SmallSetVector<int, 8>;
179 using SavedDbgValuesMap =
180 SmallDenseMap<MachineBasicBlock *, SmallVector<MachineInstr *, 4>, 4>;
182 /// Stash DBG_VALUEs that describe parameters and which are placed at the start
183 /// of the block. Later on, after the prologue code has been emitted, the
184 /// stashed DBG_VALUEs will be reinserted at the start of the block.
185 static void stashEntryDbgValues(MachineBasicBlock &MBB,
186 SavedDbgValuesMap &EntryDbgValues) {
187 SmallVector<const MachineInstr *, 4> FrameIndexValues;
189 for (auto &MI : MBB) {
190 if (!MI.isDebugInstr())
191 break;
192 if (!MI.isDebugValue() || !MI.getDebugVariable()->isParameter())
193 continue;
194 if (any_of(MI.debug_operands(),
195 [](const MachineOperand &MO) { return MO.isFI(); })) {
196 // We can only emit valid locations for frame indices after the frame
197 // setup, so do not stash away them.
198 FrameIndexValues.push_back(&MI);
199 continue;
201 const DILocalVariable *Var = MI.getDebugVariable();
202 const DIExpression *Expr = MI.getDebugExpression();
203 auto Overlaps = [Var, Expr](const MachineInstr *DV) {
204 return Var == DV->getDebugVariable() &&
205 Expr->fragmentsOverlap(DV->getDebugExpression());
207 // See if the debug value overlaps with any preceding debug value that will
208 // not be stashed. If that is the case, then we can't stash this value, as
209 // we would then reorder the values at reinsertion.
210 if (llvm::none_of(FrameIndexValues, Overlaps))
211 EntryDbgValues[&MBB].push_back(&MI);
214 // Remove stashed debug values from the block.
215 if (EntryDbgValues.count(&MBB))
216 for (auto *MI : EntryDbgValues[&MBB])
217 MI->removeFromParent();
220 /// runOnMachineFunction - Insert prolog/epilog code and replace abstract
221 /// frame indexes with appropriate references.
222 bool PEI::runOnMachineFunction(MachineFunction &MF) {
223 NumFuncSeen++;
224 const Function &F = MF.getFunction();
225 const TargetRegisterInfo *TRI = MF.getSubtarget().getRegisterInfo();
226 const TargetFrameLowering *TFI = MF.getSubtarget().getFrameLowering();
228 RS = TRI->requiresRegisterScavenging(MF) ? new RegScavenger() : nullptr;
229 FrameIndexVirtualScavenging = TRI->requiresFrameIndexScavenging(MF);
230 ORE = &getAnalysis<MachineOptimizationRemarkEmitterPass>().getORE();
232 // Calculate the MaxCallFrameSize and AdjustsStack variables for the
233 // function's frame information. Also eliminates call frame pseudo
234 // instructions.
235 calculateCallFrameInfo(MF);
237 // Determine placement of CSR spill/restore code and prolog/epilog code:
238 // place all spills in the entry block, all restores in return blocks.
239 calculateSaveRestoreBlocks(MF);
241 // Stash away DBG_VALUEs that should not be moved by insertion of prolog code.
242 SavedDbgValuesMap EntryDbgValues;
243 for (MachineBasicBlock *SaveBlock : SaveBlocks)
244 stashEntryDbgValues(*SaveBlock, EntryDbgValues);
246 // Handle CSR spilling and restoring, for targets that need it.
247 if (MF.getTarget().usesPhysRegsForValues())
248 spillCalleeSavedRegs(MF);
250 // Allow the target machine to make final modifications to the function
251 // before the frame layout is finalized.
252 TFI->processFunctionBeforeFrameFinalized(MF, RS);
254 // Calculate actual frame offsets for all abstract stack objects...
255 calculateFrameObjectOffsets(MF);
257 // Add prolog and epilog code to the function. This function is required
258 // to align the stack frame as necessary for any stack variables or
259 // called functions. Because of this, calculateCalleeSavedRegisters()
260 // must be called before this function in order to set the AdjustsStack
261 // and MaxCallFrameSize variables.
262 if (!F.hasFnAttribute(Attribute::Naked))
263 insertPrologEpilogCode(MF);
265 // Reinsert stashed debug values at the start of the entry blocks.
266 for (auto &I : EntryDbgValues)
267 I.first->insert(I.first->begin(), I.second.begin(), I.second.end());
269 // Allow the target machine to make final modifications to the function
270 // before the frame layout is finalized.
271 TFI->processFunctionBeforeFrameIndicesReplaced(MF, RS);
273 // Replace all MO_FrameIndex operands with physical register references
274 // and actual offsets.
276 replaceFrameIndices(MF);
278 // If register scavenging is needed, as we've enabled doing it as a
279 // post-pass, scavenge the virtual registers that frame index elimination
280 // inserted.
281 if (TRI->requiresRegisterScavenging(MF) && FrameIndexVirtualScavenging)
282 scavengeFrameVirtualRegs(MF, *RS);
284 // Warn on stack size when we exceeds the given limit.
285 MachineFrameInfo &MFI = MF.getFrameInfo();
286 uint64_t StackSize = MFI.getStackSize();
288 unsigned Threshold = UINT_MAX;
289 if (MF.getFunction().hasFnAttribute("warn-stack-size")) {
290 bool Failed = MF.getFunction()
291 .getFnAttribute("warn-stack-size")
292 .getValueAsString()
293 .getAsInteger(10, Threshold);
294 // Verifier should have caught this.
295 assert(!Failed && "Invalid warn-stack-size fn attr value");
296 (void)Failed;
298 uint64_t UnsafeStackSize = MFI.getUnsafeStackSize();
299 if (MF.getFunction().hasFnAttribute(Attribute::SafeStack))
300 StackSize += UnsafeStackSize;
302 if (StackSize > Threshold) {
303 DiagnosticInfoStackSize DiagStackSize(F, StackSize, Threshold, DS_Warning);
304 F.getContext().diagnose(DiagStackSize);
305 int64_t SpillSize = 0;
306 for (int Idx = MFI.getObjectIndexBegin(), End = MFI.getObjectIndexEnd();
307 Idx != End; ++Idx) {
308 if (MFI.isSpillSlotObjectIndex(Idx))
309 SpillSize += MFI.getObjectSize(Idx);
312 [[maybe_unused]] float SpillPct =
313 static_cast<float>(SpillSize) / static_cast<float>(StackSize);
314 LLVM_DEBUG(
315 dbgs() << formatv("{0}/{1} ({3:P}) spills, {2}/{1} ({4:P}) variables",
316 SpillSize, StackSize, StackSize - SpillSize, SpillPct,
317 1.0f - SpillPct));
318 if (UnsafeStackSize != 0) {
319 LLVM_DEBUG(dbgs() << formatv(", {0}/{2} ({1:P}) unsafe stack",
320 UnsafeStackSize,
321 static_cast<float>(UnsafeStackSize) /
322 static_cast<float>(StackSize),
323 StackSize));
325 LLVM_DEBUG(dbgs() << "\n");
328 ORE->emit([&]() {
329 return MachineOptimizationRemarkAnalysis(DEBUG_TYPE, "StackSize",
330 MF.getFunction().getSubprogram(),
331 &MF.front())
332 << ore::NV("NumStackBytes", StackSize) << " stack bytes in function";
335 delete RS;
336 SaveBlocks.clear();
337 RestoreBlocks.clear();
338 MFI.setSavePoint(nullptr);
339 MFI.setRestorePoint(nullptr);
340 return true;
343 /// Calculate the MaxCallFrameSize and AdjustsStack
344 /// variables for the function's frame information and eliminate call frame
345 /// pseudo instructions.
346 void PEI::calculateCallFrameInfo(MachineFunction &MF) {
347 const TargetInstrInfo &TII = *MF.getSubtarget().getInstrInfo();
348 const TargetFrameLowering *TFI = MF.getSubtarget().getFrameLowering();
349 MachineFrameInfo &MFI = MF.getFrameInfo();
351 unsigned MaxCallFrameSize = 0;
352 bool AdjustsStack = MFI.adjustsStack();
354 // Get the function call frame set-up and tear-down instruction opcode
355 unsigned FrameSetupOpcode = TII.getCallFrameSetupOpcode();
356 unsigned FrameDestroyOpcode = TII.getCallFrameDestroyOpcode();
358 // Early exit for targets which have no call frame setup/destroy pseudo
359 // instructions.
360 if (FrameSetupOpcode == ~0u && FrameDestroyOpcode == ~0u)
361 return;
363 std::vector<MachineBasicBlock::iterator> FrameSDOps;
364 for (MachineBasicBlock &BB : MF)
365 for (MachineBasicBlock::iterator I = BB.begin(); I != BB.end(); ++I)
366 if (TII.isFrameInstr(*I)) {
367 unsigned Size = TII.getFrameSize(*I);
368 if (Size > MaxCallFrameSize) MaxCallFrameSize = Size;
369 AdjustsStack = true;
370 FrameSDOps.push_back(I);
371 } else if (I->isInlineAsm()) {
372 // Some inline asm's need a stack frame, as indicated by operand 1.
373 unsigned ExtraInfo = I->getOperand(InlineAsm::MIOp_ExtraInfo).getImm();
374 if (ExtraInfo & InlineAsm::Extra_IsAlignStack)
375 AdjustsStack = true;
378 assert(!MFI.isMaxCallFrameSizeComputed() ||
379 (MFI.getMaxCallFrameSize() >= MaxCallFrameSize &&
380 !(AdjustsStack && !MFI.adjustsStack())));
381 MFI.setAdjustsStack(AdjustsStack);
382 MFI.setMaxCallFrameSize(MaxCallFrameSize);
384 for (MachineBasicBlock::iterator I : FrameSDOps) {
385 // If call frames are not being included as part of the stack frame, and
386 // the target doesn't indicate otherwise, remove the call frame pseudos
387 // here. The sub/add sp instruction pairs are still inserted, but we don't
388 // need to track the SP adjustment for frame index elimination.
389 if (TFI->canSimplifyCallFramePseudos(MF))
390 TFI->eliminateCallFramePseudoInstr(MF, *I->getParent(), I);
394 /// Compute the sets of entry and return blocks for saving and restoring
395 /// callee-saved registers, and placing prolog and epilog code.
396 void PEI::calculateSaveRestoreBlocks(MachineFunction &MF) {
397 const MachineFrameInfo &MFI = MF.getFrameInfo();
399 // Even when we do not change any CSR, we still want to insert the
400 // prologue and epilogue of the function.
401 // So set the save points for those.
403 // Use the points found by shrink-wrapping, if any.
404 if (MFI.getSavePoint()) {
405 SaveBlocks.push_back(MFI.getSavePoint());
406 assert(MFI.getRestorePoint() && "Both restore and save must be set");
407 MachineBasicBlock *RestoreBlock = MFI.getRestorePoint();
408 // If RestoreBlock does not have any successor and is not a return block
409 // then the end point is unreachable and we do not need to insert any
410 // epilogue.
411 if (!RestoreBlock->succ_empty() || RestoreBlock->isReturnBlock())
412 RestoreBlocks.push_back(RestoreBlock);
413 return;
416 // Save refs to entry and return blocks.
417 SaveBlocks.push_back(&MF.front());
418 for (MachineBasicBlock &MBB : MF) {
419 if (MBB.isEHFuncletEntry())
420 SaveBlocks.push_back(&MBB);
421 if (MBB.isReturnBlock())
422 RestoreBlocks.push_back(&MBB);
426 static void assignCalleeSavedSpillSlots(MachineFunction &F,
427 const BitVector &SavedRegs,
428 unsigned &MinCSFrameIndex,
429 unsigned &MaxCSFrameIndex) {
430 if (SavedRegs.empty())
431 return;
433 const TargetRegisterInfo *RegInfo = F.getSubtarget().getRegisterInfo();
434 const MCPhysReg *CSRegs = F.getRegInfo().getCalleeSavedRegs();
435 BitVector CSMask(SavedRegs.size());
437 for (unsigned i = 0; CSRegs[i]; ++i)
438 CSMask.set(CSRegs[i]);
440 std::vector<CalleeSavedInfo> CSI;
441 for (unsigned i = 0; CSRegs[i]; ++i) {
442 unsigned Reg = CSRegs[i];
443 if (SavedRegs.test(Reg)) {
444 bool SavedSuper = false;
445 for (const MCPhysReg &SuperReg : RegInfo->superregs(Reg)) {
446 // Some backends set all aliases for some registers as saved, such as
447 // Mips's $fp, so they appear in SavedRegs but not CSRegs.
448 if (SavedRegs.test(SuperReg) && CSMask.test(SuperReg)) {
449 SavedSuper = true;
450 break;
454 if (!SavedSuper)
455 CSI.push_back(CalleeSavedInfo(Reg));
459 const TargetFrameLowering *TFI = F.getSubtarget().getFrameLowering();
460 MachineFrameInfo &MFI = F.getFrameInfo();
461 if (!TFI->assignCalleeSavedSpillSlots(F, RegInfo, CSI, MinCSFrameIndex,
462 MaxCSFrameIndex)) {
463 // If target doesn't implement this, use generic code.
465 if (CSI.empty())
466 return; // Early exit if no callee saved registers are modified!
468 unsigned NumFixedSpillSlots;
469 const TargetFrameLowering::SpillSlot *FixedSpillSlots =
470 TFI->getCalleeSavedSpillSlots(NumFixedSpillSlots);
472 // Now that we know which registers need to be saved and restored, allocate
473 // stack slots for them.
474 for (auto &CS : CSI) {
475 // If the target has spilled this register to another register, we don't
476 // need to allocate a stack slot.
477 if (CS.isSpilledToReg())
478 continue;
480 unsigned Reg = CS.getReg();
481 const TargetRegisterClass *RC = RegInfo->getMinimalPhysRegClass(Reg);
483 int FrameIdx;
484 if (RegInfo->hasReservedSpillSlot(F, Reg, FrameIdx)) {
485 CS.setFrameIdx(FrameIdx);
486 continue;
489 // Check to see if this physreg must be spilled to a particular stack slot
490 // on this target.
491 const TargetFrameLowering::SpillSlot *FixedSlot = FixedSpillSlots;
492 while (FixedSlot != FixedSpillSlots + NumFixedSpillSlots &&
493 FixedSlot->Reg != Reg)
494 ++FixedSlot;
496 unsigned Size = RegInfo->getSpillSize(*RC);
497 if (FixedSlot == FixedSpillSlots + NumFixedSpillSlots) {
498 // Nope, just spill it anywhere convenient.
499 Align Alignment = RegInfo->getSpillAlign(*RC);
500 // We may not be able to satisfy the desired alignment specification of
501 // the TargetRegisterClass if the stack alignment is smaller. Use the
502 // min.
503 Alignment = std::min(Alignment, TFI->getStackAlign());
504 FrameIdx = MFI.CreateStackObject(Size, Alignment, true);
505 if ((unsigned)FrameIdx < MinCSFrameIndex) MinCSFrameIndex = FrameIdx;
506 if ((unsigned)FrameIdx > MaxCSFrameIndex) MaxCSFrameIndex = FrameIdx;
507 } else {
508 // Spill it to the stack where we must.
509 FrameIdx = MFI.CreateFixedSpillStackObject(Size, FixedSlot->Offset);
512 CS.setFrameIdx(FrameIdx);
516 MFI.setCalleeSavedInfo(CSI);
519 /// Helper function to update the liveness information for the callee-saved
520 /// registers.
521 static void updateLiveness(MachineFunction &MF) {
522 MachineFrameInfo &MFI = MF.getFrameInfo();
523 // Visited will contain all the basic blocks that are in the region
524 // where the callee saved registers are alive:
525 // - Anything that is not Save or Restore -> LiveThrough.
526 // - Save -> LiveIn.
527 // - Restore -> LiveOut.
528 // The live-out is not attached to the block, so no need to keep
529 // Restore in this set.
530 SmallPtrSet<MachineBasicBlock *, 8> Visited;
531 SmallVector<MachineBasicBlock *, 8> WorkList;
532 MachineBasicBlock *Entry = &MF.front();
533 MachineBasicBlock *Save = MFI.getSavePoint();
535 if (!Save)
536 Save = Entry;
538 if (Entry != Save) {
539 WorkList.push_back(Entry);
540 Visited.insert(Entry);
542 Visited.insert(Save);
544 MachineBasicBlock *Restore = MFI.getRestorePoint();
545 if (Restore)
546 // By construction Restore cannot be visited, otherwise it
547 // means there exists a path to Restore that does not go
548 // through Save.
549 WorkList.push_back(Restore);
551 while (!WorkList.empty()) {
552 const MachineBasicBlock *CurBB = WorkList.pop_back_val();
553 // By construction, the region that is after the save point is
554 // dominated by the Save and post-dominated by the Restore.
555 if (CurBB == Save && Save != Restore)
556 continue;
557 // Enqueue all the successors not already visited.
558 // Those are by construction either before Save or after Restore.
559 for (MachineBasicBlock *SuccBB : CurBB->successors())
560 if (Visited.insert(SuccBB).second)
561 WorkList.push_back(SuccBB);
564 const std::vector<CalleeSavedInfo> &CSI = MFI.getCalleeSavedInfo();
566 MachineRegisterInfo &MRI = MF.getRegInfo();
567 for (const CalleeSavedInfo &I : CSI) {
568 for (MachineBasicBlock *MBB : Visited) {
569 MCPhysReg Reg = I.getReg();
570 // Add the callee-saved register as live-in.
571 // It's killed at the spill.
572 if (!MRI.isReserved(Reg) && !MBB->isLiveIn(Reg))
573 MBB->addLiveIn(Reg);
575 // If callee-saved register is spilled to another register rather than
576 // spilling to stack, the destination register has to be marked as live for
577 // each MBB between the prologue and epilogue so that it is not clobbered
578 // before it is reloaded in the epilogue. The Visited set contains all
579 // blocks outside of the region delimited by prologue/epilogue.
580 if (I.isSpilledToReg()) {
581 for (MachineBasicBlock &MBB : MF) {
582 if (Visited.count(&MBB))
583 continue;
584 MCPhysReg DstReg = I.getDstReg();
585 if (!MBB.isLiveIn(DstReg))
586 MBB.addLiveIn(DstReg);
592 /// Insert spill code for the callee-saved registers used in the function.
593 static void insertCSRSaves(MachineBasicBlock &SaveBlock,
594 ArrayRef<CalleeSavedInfo> CSI) {
595 MachineFunction &MF = *SaveBlock.getParent();
596 const TargetInstrInfo &TII = *MF.getSubtarget().getInstrInfo();
597 const TargetFrameLowering *TFI = MF.getSubtarget().getFrameLowering();
598 const TargetRegisterInfo *TRI = MF.getSubtarget().getRegisterInfo();
600 MachineBasicBlock::iterator I = SaveBlock.begin();
601 if (!TFI->spillCalleeSavedRegisters(SaveBlock, I, CSI, TRI)) {
602 for (const CalleeSavedInfo &CS : CSI) {
603 // Insert the spill to the stack frame.
604 unsigned Reg = CS.getReg();
606 if (CS.isSpilledToReg()) {
607 BuildMI(SaveBlock, I, DebugLoc(),
608 TII.get(TargetOpcode::COPY), CS.getDstReg())
609 .addReg(Reg, getKillRegState(true));
610 } else {
611 const TargetRegisterClass *RC = TRI->getMinimalPhysRegClass(Reg);
612 TII.storeRegToStackSlot(SaveBlock, I, Reg, true, CS.getFrameIdx(), RC,
613 TRI, Register());
619 /// Insert restore code for the callee-saved registers used in the function.
620 static void insertCSRRestores(MachineBasicBlock &RestoreBlock,
621 std::vector<CalleeSavedInfo> &CSI) {
622 MachineFunction &MF = *RestoreBlock.getParent();
623 const TargetInstrInfo &TII = *MF.getSubtarget().getInstrInfo();
624 const TargetFrameLowering *TFI = MF.getSubtarget().getFrameLowering();
625 const TargetRegisterInfo *TRI = MF.getSubtarget().getRegisterInfo();
627 // Restore all registers immediately before the return and any
628 // terminators that precede it.
629 MachineBasicBlock::iterator I = RestoreBlock.getFirstTerminator();
631 if (!TFI->restoreCalleeSavedRegisters(RestoreBlock, I, CSI, TRI)) {
632 for (const CalleeSavedInfo &CI : reverse(CSI)) {
633 unsigned Reg = CI.getReg();
634 if (CI.isSpilledToReg()) {
635 BuildMI(RestoreBlock, I, DebugLoc(), TII.get(TargetOpcode::COPY), Reg)
636 .addReg(CI.getDstReg(), getKillRegState(true));
637 } else {
638 const TargetRegisterClass *RC = TRI->getMinimalPhysRegClass(Reg);
639 TII.loadRegFromStackSlot(RestoreBlock, I, Reg, CI.getFrameIdx(), RC,
640 TRI, Register());
641 assert(I != RestoreBlock.begin() &&
642 "loadRegFromStackSlot didn't insert any code!");
643 // Insert in reverse order. loadRegFromStackSlot can insert
644 // multiple instructions.
650 void PEI::spillCalleeSavedRegs(MachineFunction &MF) {
651 // We can't list this requirement in getRequiredProperties because some
652 // targets (WebAssembly) use virtual registers past this point, and the pass
653 // pipeline is set up without giving the passes a chance to look at the
654 // TargetMachine.
655 // FIXME: Find a way to express this in getRequiredProperties.
656 assert(MF.getProperties().hasProperty(
657 MachineFunctionProperties::Property::NoVRegs));
659 const Function &F = MF.getFunction();
660 const TargetFrameLowering *TFI = MF.getSubtarget().getFrameLowering();
661 MachineFrameInfo &MFI = MF.getFrameInfo();
662 MinCSFrameIndex = std::numeric_limits<unsigned>::max();
663 MaxCSFrameIndex = 0;
665 // Determine which of the registers in the callee save list should be saved.
666 BitVector SavedRegs;
667 TFI->determineCalleeSaves(MF, SavedRegs, RS);
669 // Assign stack slots for any callee-saved registers that must be spilled.
670 assignCalleeSavedSpillSlots(MF, SavedRegs, MinCSFrameIndex, MaxCSFrameIndex);
672 // Add the code to save and restore the callee saved registers.
673 if (!F.hasFnAttribute(Attribute::Naked)) {
674 MFI.setCalleeSavedInfoValid(true);
676 std::vector<CalleeSavedInfo> &CSI = MFI.getCalleeSavedInfo();
677 if (!CSI.empty()) {
678 if (!MFI.hasCalls())
679 NumLeafFuncWithSpills++;
681 for (MachineBasicBlock *SaveBlock : SaveBlocks)
682 insertCSRSaves(*SaveBlock, CSI);
684 // Update the live-in information of all the blocks up to the save point.
685 updateLiveness(MF);
687 for (MachineBasicBlock *RestoreBlock : RestoreBlocks)
688 insertCSRRestores(*RestoreBlock, CSI);
693 /// AdjustStackOffset - Helper function used to adjust the stack frame offset.
694 static inline void AdjustStackOffset(MachineFrameInfo &MFI, int FrameIdx,
695 bool StackGrowsDown, int64_t &Offset,
696 Align &MaxAlign) {
697 // If the stack grows down, add the object size to find the lowest address.
698 if (StackGrowsDown)
699 Offset += MFI.getObjectSize(FrameIdx);
701 Align Alignment = MFI.getObjectAlign(FrameIdx);
703 // If the alignment of this object is greater than that of the stack, then
704 // increase the stack alignment to match.
705 MaxAlign = std::max(MaxAlign, Alignment);
707 // Adjust to alignment boundary.
708 Offset = alignTo(Offset, Alignment);
710 if (StackGrowsDown) {
711 LLVM_DEBUG(dbgs() << "alloc FI(" << FrameIdx << ") at SP[" << -Offset
712 << "]\n");
713 MFI.setObjectOffset(FrameIdx, -Offset); // Set the computed offset
714 } else {
715 LLVM_DEBUG(dbgs() << "alloc FI(" << FrameIdx << ") at SP[" << Offset
716 << "]\n");
717 MFI.setObjectOffset(FrameIdx, Offset);
718 Offset += MFI.getObjectSize(FrameIdx);
722 /// Compute which bytes of fixed and callee-save stack area are unused and keep
723 /// track of them in StackBytesFree.
724 static inline void
725 computeFreeStackSlots(MachineFrameInfo &MFI, bool StackGrowsDown,
726 unsigned MinCSFrameIndex, unsigned MaxCSFrameIndex,
727 int64_t FixedCSEnd, BitVector &StackBytesFree) {
728 // Avoid undefined int64_t -> int conversion below in extreme case.
729 if (FixedCSEnd > std::numeric_limits<int>::max())
730 return;
732 StackBytesFree.resize(FixedCSEnd, true);
734 SmallVector<int, 16> AllocatedFrameSlots;
735 // Add fixed objects.
736 for (int i = MFI.getObjectIndexBegin(); i != 0; ++i)
737 // StackSlot scavenging is only implemented for the default stack.
738 if (MFI.getStackID(i) == TargetStackID::Default)
739 AllocatedFrameSlots.push_back(i);
740 // Add callee-save objects if there are any.
741 if (MinCSFrameIndex <= MaxCSFrameIndex) {
742 for (int i = MinCSFrameIndex; i <= (int)MaxCSFrameIndex; ++i)
743 if (MFI.getStackID(i) == TargetStackID::Default)
744 AllocatedFrameSlots.push_back(i);
747 for (int i : AllocatedFrameSlots) {
748 // These are converted from int64_t, but they should always fit in int
749 // because of the FixedCSEnd check above.
750 int ObjOffset = MFI.getObjectOffset(i);
751 int ObjSize = MFI.getObjectSize(i);
752 int ObjStart, ObjEnd;
753 if (StackGrowsDown) {
754 // ObjOffset is negative when StackGrowsDown is true.
755 ObjStart = -ObjOffset - ObjSize;
756 ObjEnd = -ObjOffset;
757 } else {
758 ObjStart = ObjOffset;
759 ObjEnd = ObjOffset + ObjSize;
761 // Ignore fixed holes that are in the previous stack frame.
762 if (ObjEnd > 0)
763 StackBytesFree.reset(ObjStart, ObjEnd);
767 /// Assign frame object to an unused portion of the stack in the fixed stack
768 /// object range. Return true if the allocation was successful.
769 static inline bool scavengeStackSlot(MachineFrameInfo &MFI, int FrameIdx,
770 bool StackGrowsDown, Align MaxAlign,
771 BitVector &StackBytesFree) {
772 if (MFI.isVariableSizedObjectIndex(FrameIdx))
773 return false;
775 if (StackBytesFree.none()) {
776 // clear it to speed up later scavengeStackSlot calls to
777 // StackBytesFree.none()
778 StackBytesFree.clear();
779 return false;
782 Align ObjAlign = MFI.getObjectAlign(FrameIdx);
783 if (ObjAlign > MaxAlign)
784 return false;
786 int64_t ObjSize = MFI.getObjectSize(FrameIdx);
787 int FreeStart;
788 for (FreeStart = StackBytesFree.find_first(); FreeStart != -1;
789 FreeStart = StackBytesFree.find_next(FreeStart)) {
791 // Check that free space has suitable alignment.
792 unsigned ObjStart = StackGrowsDown ? FreeStart + ObjSize : FreeStart;
793 if (alignTo(ObjStart, ObjAlign) != ObjStart)
794 continue;
796 if (FreeStart + ObjSize > StackBytesFree.size())
797 return false;
799 bool AllBytesFree = true;
800 for (unsigned Byte = 0; Byte < ObjSize; ++Byte)
801 if (!StackBytesFree.test(FreeStart + Byte)) {
802 AllBytesFree = false;
803 break;
805 if (AllBytesFree)
806 break;
809 if (FreeStart == -1)
810 return false;
812 if (StackGrowsDown) {
813 int ObjStart = -(FreeStart + ObjSize);
814 LLVM_DEBUG(dbgs() << "alloc FI(" << FrameIdx << ") scavenged at SP["
815 << ObjStart << "]\n");
816 MFI.setObjectOffset(FrameIdx, ObjStart);
817 } else {
818 LLVM_DEBUG(dbgs() << "alloc FI(" << FrameIdx << ") scavenged at SP["
819 << FreeStart << "]\n");
820 MFI.setObjectOffset(FrameIdx, FreeStart);
823 StackBytesFree.reset(FreeStart, FreeStart + ObjSize);
824 return true;
827 /// AssignProtectedObjSet - Helper function to assign large stack objects (i.e.,
828 /// those required to be close to the Stack Protector) to stack offsets.
829 static void AssignProtectedObjSet(const StackObjSet &UnassignedObjs,
830 SmallSet<int, 16> &ProtectedObjs,
831 MachineFrameInfo &MFI, bool StackGrowsDown,
832 int64_t &Offset, Align &MaxAlign) {
834 for (int i : UnassignedObjs) {
835 AdjustStackOffset(MFI, i, StackGrowsDown, Offset, MaxAlign);
836 ProtectedObjs.insert(i);
840 /// calculateFrameObjectOffsets - Calculate actual frame offsets for all of the
841 /// abstract stack objects.
842 void PEI::calculateFrameObjectOffsets(MachineFunction &MF) {
843 const TargetFrameLowering &TFI = *MF.getSubtarget().getFrameLowering();
845 bool StackGrowsDown =
846 TFI.getStackGrowthDirection() == TargetFrameLowering::StackGrowsDown;
848 // Loop over all of the stack objects, assigning sequential addresses...
849 MachineFrameInfo &MFI = MF.getFrameInfo();
851 // Start at the beginning of the local area.
852 // The Offset is the distance from the stack top in the direction
853 // of stack growth -- so it's always nonnegative.
854 int LocalAreaOffset = TFI.getOffsetOfLocalArea();
855 if (StackGrowsDown)
856 LocalAreaOffset = -LocalAreaOffset;
857 assert(LocalAreaOffset >= 0
858 && "Local area offset should be in direction of stack growth");
859 int64_t Offset = LocalAreaOffset;
861 #ifdef EXPENSIVE_CHECKS
862 for (unsigned i = 0, e = MFI.getObjectIndexEnd(); i != e; ++i)
863 if (!MFI.isDeadObjectIndex(i) &&
864 MFI.getStackID(i) == TargetStackID::Default)
865 assert(MFI.getObjectAlign(i) <= MFI.getMaxAlign() &&
866 "MaxAlignment is invalid");
867 #endif
869 // If there are fixed sized objects that are preallocated in the local area,
870 // non-fixed objects can't be allocated right at the start of local area.
871 // Adjust 'Offset' to point to the end of last fixed sized preallocated
872 // object.
873 for (int i = MFI.getObjectIndexBegin(); i != 0; ++i) {
874 // Only allocate objects on the default stack.
875 if (MFI.getStackID(i) != TargetStackID::Default)
876 continue;
878 int64_t FixedOff;
879 if (StackGrowsDown) {
880 // The maximum distance from the stack pointer is at lower address of
881 // the object -- which is given by offset. For down growing stack
882 // the offset is negative, so we negate the offset to get the distance.
883 FixedOff = -MFI.getObjectOffset(i);
884 } else {
885 // The maximum distance from the start pointer is at the upper
886 // address of the object.
887 FixedOff = MFI.getObjectOffset(i) + MFI.getObjectSize(i);
889 if (FixedOff > Offset) Offset = FixedOff;
892 Align MaxAlign = MFI.getMaxAlign();
893 // First assign frame offsets to stack objects that are used to spill
894 // callee saved registers.
895 if (MaxCSFrameIndex >= MinCSFrameIndex) {
896 for (unsigned i = 0; i <= MaxCSFrameIndex - MinCSFrameIndex; ++i) {
897 unsigned FrameIndex =
898 StackGrowsDown ? MinCSFrameIndex + i : MaxCSFrameIndex - i;
900 // Only allocate objects on the default stack.
901 if (MFI.getStackID(FrameIndex) != TargetStackID::Default)
902 continue;
904 // TODO: should this just be if (MFI.isDeadObjectIndex(FrameIndex))
905 if (!StackGrowsDown && MFI.isDeadObjectIndex(FrameIndex))
906 continue;
908 AdjustStackOffset(MFI, FrameIndex, StackGrowsDown, Offset, MaxAlign);
912 assert(MaxAlign == MFI.getMaxAlign() &&
913 "MFI.getMaxAlign should already account for all callee-saved "
914 "registers without a fixed stack slot");
916 // FixedCSEnd is the stack offset to the end of the fixed and callee-save
917 // stack area.
918 int64_t FixedCSEnd = Offset;
920 // Make sure the special register scavenging spill slot is closest to the
921 // incoming stack pointer if a frame pointer is required and is closer
922 // to the incoming rather than the final stack pointer.
923 const TargetRegisterInfo *RegInfo = MF.getSubtarget().getRegisterInfo();
924 bool EarlyScavengingSlots = TFI.allocateScavengingFrameIndexesNearIncomingSP(MF);
925 if (RS && EarlyScavengingSlots) {
926 SmallVector<int, 2> SFIs;
927 RS->getScavengingFrameIndices(SFIs);
928 for (int SFI : SFIs)
929 AdjustStackOffset(MFI, SFI, StackGrowsDown, Offset, MaxAlign);
932 // FIXME: Once this is working, then enable flag will change to a target
933 // check for whether the frame is large enough to want to use virtual
934 // frame index registers. Functions which don't want/need this optimization
935 // will continue to use the existing code path.
936 if (MFI.getUseLocalStackAllocationBlock()) {
937 Align Alignment = MFI.getLocalFrameMaxAlign();
939 // Adjust to alignment boundary.
940 Offset = alignTo(Offset, Alignment);
942 LLVM_DEBUG(dbgs() << "Local frame base offset: " << Offset << "\n");
944 // Resolve offsets for objects in the local block.
945 for (unsigned i = 0, e = MFI.getLocalFrameObjectCount(); i != e; ++i) {
946 std::pair<int, int64_t> Entry = MFI.getLocalFrameObjectMap(i);
947 int64_t FIOffset = (StackGrowsDown ? -Offset : Offset) + Entry.second;
948 LLVM_DEBUG(dbgs() << "alloc FI(" << Entry.first << ") at SP[" << FIOffset
949 << "]\n");
950 MFI.setObjectOffset(Entry.first, FIOffset);
952 // Allocate the local block
953 Offset += MFI.getLocalFrameSize();
955 MaxAlign = std::max(Alignment, MaxAlign);
958 // Retrieve the Exception Handler registration node.
959 int EHRegNodeFrameIndex = std::numeric_limits<int>::max();
960 if (const WinEHFuncInfo *FuncInfo = MF.getWinEHFuncInfo())
961 EHRegNodeFrameIndex = FuncInfo->EHRegNodeFrameIndex;
963 // Make sure that the stack protector comes before the local variables on the
964 // stack.
965 SmallSet<int, 16> ProtectedObjs;
966 if (MFI.hasStackProtectorIndex()) {
967 int StackProtectorFI = MFI.getStackProtectorIndex();
968 StackObjSet LargeArrayObjs;
969 StackObjSet SmallArrayObjs;
970 StackObjSet AddrOfObjs;
972 // If we need a stack protector, we need to make sure that
973 // LocalStackSlotPass didn't already allocate a slot for it.
974 // If we are told to use the LocalStackAllocationBlock, the stack protector
975 // is expected to be already pre-allocated.
976 if (MFI.getStackID(StackProtectorFI) != TargetStackID::Default) {
977 // If the stack protector isn't on the default stack then it's up to the
978 // target to set the stack offset.
979 assert(MFI.getObjectOffset(StackProtectorFI) != 0 &&
980 "Offset of stack protector on non-default stack expected to be "
981 "already set.");
982 assert(!MFI.isObjectPreAllocated(MFI.getStackProtectorIndex()) &&
983 "Stack protector on non-default stack expected to not be "
984 "pre-allocated by LocalStackSlotPass.");
985 } else if (!MFI.getUseLocalStackAllocationBlock()) {
986 AdjustStackOffset(MFI, StackProtectorFI, StackGrowsDown, Offset,
987 MaxAlign);
988 } else if (!MFI.isObjectPreAllocated(MFI.getStackProtectorIndex())) {
989 llvm_unreachable(
990 "Stack protector not pre-allocated by LocalStackSlotPass.");
993 // Assign large stack objects first.
994 for (unsigned i = 0, e = MFI.getObjectIndexEnd(); i != e; ++i) {
995 if (MFI.isObjectPreAllocated(i) && MFI.getUseLocalStackAllocationBlock())
996 continue;
997 if (i >= MinCSFrameIndex && i <= MaxCSFrameIndex)
998 continue;
999 if (RS && RS->isScavengingFrameIndex((int)i))
1000 continue;
1001 if (MFI.isDeadObjectIndex(i))
1002 continue;
1003 if (StackProtectorFI == (int)i || EHRegNodeFrameIndex == (int)i)
1004 continue;
1005 // Only allocate objects on the default stack.
1006 if (MFI.getStackID(i) != TargetStackID::Default)
1007 continue;
1009 switch (MFI.getObjectSSPLayout(i)) {
1010 case MachineFrameInfo::SSPLK_None:
1011 continue;
1012 case MachineFrameInfo::SSPLK_SmallArray:
1013 SmallArrayObjs.insert(i);
1014 continue;
1015 case MachineFrameInfo::SSPLK_AddrOf:
1016 AddrOfObjs.insert(i);
1017 continue;
1018 case MachineFrameInfo::SSPLK_LargeArray:
1019 LargeArrayObjs.insert(i);
1020 continue;
1022 llvm_unreachable("Unexpected SSPLayoutKind.");
1025 // We expect **all** the protected stack objects to be pre-allocated by
1026 // LocalStackSlotPass. If it turns out that PEI still has to allocate some
1027 // of them, we may end up messing up the expected order of the objects.
1028 if (MFI.getUseLocalStackAllocationBlock() &&
1029 !(LargeArrayObjs.empty() && SmallArrayObjs.empty() &&
1030 AddrOfObjs.empty()))
1031 llvm_unreachable("Found protected stack objects not pre-allocated by "
1032 "LocalStackSlotPass.");
1034 AssignProtectedObjSet(LargeArrayObjs, ProtectedObjs, MFI, StackGrowsDown,
1035 Offset, MaxAlign);
1036 AssignProtectedObjSet(SmallArrayObjs, ProtectedObjs, MFI, StackGrowsDown,
1037 Offset, MaxAlign);
1038 AssignProtectedObjSet(AddrOfObjs, ProtectedObjs, MFI, StackGrowsDown,
1039 Offset, MaxAlign);
1042 SmallVector<int, 8> ObjectsToAllocate;
1044 // Then prepare to assign frame offsets to stack objects that are not used to
1045 // spill callee saved registers.
1046 for (unsigned i = 0, e = MFI.getObjectIndexEnd(); i != e; ++i) {
1047 if (MFI.isObjectPreAllocated(i) && MFI.getUseLocalStackAllocationBlock())
1048 continue;
1049 if (i >= MinCSFrameIndex && i <= MaxCSFrameIndex)
1050 continue;
1051 if (RS && RS->isScavengingFrameIndex((int)i))
1052 continue;
1053 if (MFI.isDeadObjectIndex(i))
1054 continue;
1055 if (MFI.getStackProtectorIndex() == (int)i || EHRegNodeFrameIndex == (int)i)
1056 continue;
1057 if (ProtectedObjs.count(i))
1058 continue;
1059 // Only allocate objects on the default stack.
1060 if (MFI.getStackID(i) != TargetStackID::Default)
1061 continue;
1063 // Add the objects that we need to allocate to our working set.
1064 ObjectsToAllocate.push_back(i);
1067 // Allocate the EH registration node first if one is present.
1068 if (EHRegNodeFrameIndex != std::numeric_limits<int>::max())
1069 AdjustStackOffset(MFI, EHRegNodeFrameIndex, StackGrowsDown, Offset,
1070 MaxAlign);
1072 // Give the targets a chance to order the objects the way they like it.
1073 if (MF.getTarget().getOptLevel() != CodeGenOpt::None &&
1074 MF.getTarget().Options.StackSymbolOrdering)
1075 TFI.orderFrameObjects(MF, ObjectsToAllocate);
1077 // Keep track of which bytes in the fixed and callee-save range are used so we
1078 // can use the holes when allocating later stack objects. Only do this if
1079 // stack protector isn't being used and the target requests it and we're
1080 // optimizing.
1081 BitVector StackBytesFree;
1082 if (!ObjectsToAllocate.empty() &&
1083 MF.getTarget().getOptLevel() != CodeGenOpt::None &&
1084 MFI.getStackProtectorIndex() < 0 && TFI.enableStackSlotScavenging(MF))
1085 computeFreeStackSlots(MFI, StackGrowsDown, MinCSFrameIndex, MaxCSFrameIndex,
1086 FixedCSEnd, StackBytesFree);
1088 // Now walk the objects and actually assign base offsets to them.
1089 for (auto &Object : ObjectsToAllocate)
1090 if (!scavengeStackSlot(MFI, Object, StackGrowsDown, MaxAlign,
1091 StackBytesFree))
1092 AdjustStackOffset(MFI, Object, StackGrowsDown, Offset, MaxAlign);
1094 // Make sure the special register scavenging spill slot is closest to the
1095 // stack pointer.
1096 if (RS && !EarlyScavengingSlots) {
1097 SmallVector<int, 2> SFIs;
1098 RS->getScavengingFrameIndices(SFIs);
1099 for (int SFI : SFIs)
1100 AdjustStackOffset(MFI, SFI, StackGrowsDown, Offset, MaxAlign);
1103 if (!TFI.targetHandlesStackFrameRounding()) {
1104 // If we have reserved argument space for call sites in the function
1105 // immediately on entry to the current function, count it as part of the
1106 // overall stack size.
1107 if (MFI.adjustsStack() && TFI.hasReservedCallFrame(MF))
1108 Offset += MFI.getMaxCallFrameSize();
1110 // Round up the size to a multiple of the alignment. If the function has
1111 // any calls or alloca's, align to the target's StackAlignment value to
1112 // ensure that the callee's frame or the alloca data is suitably aligned;
1113 // otherwise, for leaf functions, align to the TransientStackAlignment
1114 // value.
1115 Align StackAlign;
1116 if (MFI.adjustsStack() || MFI.hasVarSizedObjects() ||
1117 (RegInfo->hasStackRealignment(MF) && MFI.getObjectIndexEnd() != 0))
1118 StackAlign = TFI.getStackAlign();
1119 else
1120 StackAlign = TFI.getTransientStackAlign();
1122 // If the frame pointer is eliminated, all frame offsets will be relative to
1123 // SP not FP. Align to MaxAlign so this works.
1124 StackAlign = std::max(StackAlign, MaxAlign);
1125 int64_t OffsetBeforeAlignment = Offset;
1126 Offset = alignTo(Offset, StackAlign);
1128 // If we have increased the offset to fulfill the alignment constrants,
1129 // then the scavenging spill slots may become harder to reach from the
1130 // stack pointer, float them so they stay close.
1131 if (StackGrowsDown && OffsetBeforeAlignment != Offset && RS &&
1132 !EarlyScavengingSlots) {
1133 SmallVector<int, 2> SFIs;
1134 RS->getScavengingFrameIndices(SFIs);
1135 LLVM_DEBUG(if (!SFIs.empty()) llvm::dbgs()
1136 << "Adjusting emergency spill slots!\n";);
1137 int64_t Delta = Offset - OffsetBeforeAlignment;
1138 for (int SFI : SFIs) {
1139 LLVM_DEBUG(llvm::dbgs()
1140 << "Adjusting offset of emergency spill slot #" << SFI
1141 << " from " << MFI.getObjectOffset(SFI););
1142 MFI.setObjectOffset(SFI, MFI.getObjectOffset(SFI) - Delta);
1143 LLVM_DEBUG(llvm::dbgs() << " to " << MFI.getObjectOffset(SFI) << "\n";);
1148 // Update frame info to pretend that this is part of the stack...
1149 int64_t StackSize = Offset - LocalAreaOffset;
1150 MFI.setStackSize(StackSize);
1151 NumBytesStackSpace += StackSize;
1154 /// insertPrologEpilogCode - Scan the function for modified callee saved
1155 /// registers, insert spill code for these callee saved registers, then add
1156 /// prolog and epilog code to the function.
1157 void PEI::insertPrologEpilogCode(MachineFunction &MF) {
1158 const TargetFrameLowering &TFI = *MF.getSubtarget().getFrameLowering();
1160 // Add prologue to the function...
1161 for (MachineBasicBlock *SaveBlock : SaveBlocks)
1162 TFI.emitPrologue(MF, *SaveBlock);
1164 // Add epilogue to restore the callee-save registers in each exiting block.
1165 for (MachineBasicBlock *RestoreBlock : RestoreBlocks)
1166 TFI.emitEpilogue(MF, *RestoreBlock);
1168 // Zero call used registers before restoring callee-saved registers.
1169 insertZeroCallUsedRegs(MF);
1171 for (MachineBasicBlock *SaveBlock : SaveBlocks)
1172 TFI.inlineStackProbe(MF, *SaveBlock);
1174 // Emit additional code that is required to support segmented stacks, if
1175 // we've been asked for it. This, when linked with a runtime with support
1176 // for segmented stacks (libgcc is one), will result in allocating stack
1177 // space in small chunks instead of one large contiguous block.
1178 if (MF.shouldSplitStack()) {
1179 for (MachineBasicBlock *SaveBlock : SaveBlocks)
1180 TFI.adjustForSegmentedStacks(MF, *SaveBlock);
1183 // Emit additional code that is required to explicitly handle the stack in
1184 // HiPE native code (if needed) when loaded in the Erlang/OTP runtime. The
1185 // approach is rather similar to that of Segmented Stacks, but it uses a
1186 // different conditional check and another BIF for allocating more stack
1187 // space.
1188 if (MF.getFunction().getCallingConv() == CallingConv::HiPE)
1189 for (MachineBasicBlock *SaveBlock : SaveBlocks)
1190 TFI.adjustForHiPEPrologue(MF, *SaveBlock);
1193 /// insertZeroCallUsedRegs - Zero out call used registers.
1194 void PEI::insertZeroCallUsedRegs(MachineFunction &MF) {
1195 const Function &F = MF.getFunction();
1197 if (!F.hasFnAttribute("zero-call-used-regs"))
1198 return;
1200 using namespace ZeroCallUsedRegs;
1202 ZeroCallUsedRegsKind ZeroRegsKind =
1203 StringSwitch<ZeroCallUsedRegsKind>(
1204 F.getFnAttribute("zero-call-used-regs").getValueAsString())
1205 .Case("skip", ZeroCallUsedRegsKind::Skip)
1206 .Case("used-gpr-arg", ZeroCallUsedRegsKind::UsedGPRArg)
1207 .Case("used-gpr", ZeroCallUsedRegsKind::UsedGPR)
1208 .Case("used-arg", ZeroCallUsedRegsKind::UsedArg)
1209 .Case("used", ZeroCallUsedRegsKind::Used)
1210 .Case("all-gpr-arg", ZeroCallUsedRegsKind::AllGPRArg)
1211 .Case("all-gpr", ZeroCallUsedRegsKind::AllGPR)
1212 .Case("all-arg", ZeroCallUsedRegsKind::AllArg)
1213 .Case("all", ZeroCallUsedRegsKind::All);
1215 if (ZeroRegsKind == ZeroCallUsedRegsKind::Skip)
1216 return;
1218 const bool OnlyGPR = static_cast<unsigned>(ZeroRegsKind) & ONLY_GPR;
1219 const bool OnlyUsed = static_cast<unsigned>(ZeroRegsKind) & ONLY_USED;
1220 const bool OnlyArg = static_cast<unsigned>(ZeroRegsKind) & ONLY_ARG;
1222 const TargetRegisterInfo &TRI = *MF.getSubtarget().getRegisterInfo();
1223 const BitVector AllocatableSet(TRI.getAllocatableSet(MF));
1225 // Mark all used registers.
1226 BitVector UsedRegs(TRI.getNumRegs());
1227 if (OnlyUsed)
1228 for (const MachineBasicBlock &MBB : MF)
1229 for (const MachineInstr &MI : MBB) {
1230 // skip debug instructions
1231 if (MI.isDebugInstr())
1232 continue;
1234 for (const MachineOperand &MO : MI.operands()) {
1235 if (!MO.isReg())
1236 continue;
1238 MCRegister Reg = MO.getReg();
1239 if (AllocatableSet[Reg] && !MO.isImplicit() &&
1240 (MO.isDef() || MO.isUse()))
1241 UsedRegs.set(Reg);
1245 // Get a list of registers that are used.
1246 BitVector LiveIns(TRI.getNumRegs());
1247 for (const MachineBasicBlock::RegisterMaskPair &LI : MF.front().liveins())
1248 LiveIns.set(LI.PhysReg);
1250 BitVector RegsToZero(TRI.getNumRegs());
1251 for (MCRegister Reg : AllocatableSet.set_bits()) {
1252 // Skip over fixed registers.
1253 if (TRI.isFixedRegister(MF, Reg))
1254 continue;
1256 // Want only general purpose registers.
1257 if (OnlyGPR && !TRI.isGeneralPurposeRegister(MF, Reg))
1258 continue;
1260 // Want only used registers.
1261 if (OnlyUsed && !UsedRegs[Reg])
1262 continue;
1264 // Want only registers used for arguments.
1265 if (OnlyArg) {
1266 if (OnlyUsed) {
1267 if (!LiveIns[Reg])
1268 continue;
1269 } else if (!TRI.isArgumentRegister(MF, Reg)) {
1270 continue;
1274 RegsToZero.set(Reg);
1277 // Don't clear registers that are live when leaving the function.
1278 for (const MachineBasicBlock &MBB : MF)
1279 for (const MachineInstr &MI : MBB.terminators()) {
1280 if (!MI.isReturn())
1281 continue;
1283 for (const auto &MO : MI.operands()) {
1284 if (!MO.isReg())
1285 continue;
1287 MCRegister Reg = MO.getReg();
1289 // This picks up sibling registers (e.q. %al -> %ah).
1290 for (MCRegUnit Unit : TRI.regunits(Reg))
1291 RegsToZero.reset(Unit);
1293 for (MCPhysReg SReg : TRI.sub_and_superregs_inclusive(Reg))
1294 RegsToZero.reset(SReg);
1298 // Don't need to clear registers that are used/clobbered by terminating
1299 // instructions.
1300 for (const MachineBasicBlock &MBB : MF) {
1301 if (!MBB.isReturnBlock())
1302 continue;
1304 MachineBasicBlock::const_iterator MBBI = MBB.getFirstTerminator();
1305 for (MachineBasicBlock::const_iterator I = MBBI, E = MBB.end(); I != E;
1306 ++I) {
1307 for (const MachineOperand &MO : I->operands()) {
1308 if (!MO.isReg())
1309 continue;
1311 for (const MCPhysReg &Reg :
1312 TRI.sub_and_superregs_inclusive(MO.getReg()))
1313 RegsToZero.reset(Reg);
1318 // Don't clear registers that must be preserved.
1319 for (const MCPhysReg *CSRegs = TRI.getCalleeSavedRegs(&MF);
1320 MCPhysReg CSReg = *CSRegs; ++CSRegs)
1321 for (MCRegister Reg : TRI.sub_and_superregs_inclusive(CSReg))
1322 RegsToZero.reset(Reg);
1324 const TargetFrameLowering &TFI = *MF.getSubtarget().getFrameLowering();
1325 for (MachineBasicBlock &MBB : MF)
1326 if (MBB.isReturnBlock())
1327 TFI.emitZeroCallUsedRegs(RegsToZero, MBB);
1330 /// replaceFrameIndices - Replace all MO_FrameIndex operands with physical
1331 /// register references and actual offsets.
1332 void PEI::replaceFrameIndices(MachineFunction &MF) {
1333 const auto &ST = MF.getSubtarget();
1334 const TargetFrameLowering &TFI = *ST.getFrameLowering();
1335 if (!TFI.needsFrameIndexResolution(MF))
1336 return;
1338 const TargetRegisterInfo *TRI = ST.getRegisterInfo();
1340 // Allow the target to determine this after knowing the frame size.
1341 FrameIndexEliminationScavenging = (RS && !FrameIndexVirtualScavenging) ||
1342 TRI->requiresFrameIndexReplacementScavenging(MF);
1344 // Store SPAdj at exit of a basic block.
1345 SmallVector<int, 8> SPState;
1346 SPState.resize(MF.getNumBlockIDs());
1347 df_iterator_default_set<MachineBasicBlock*> Reachable;
1349 // Iterate over the reachable blocks in DFS order.
1350 for (auto DFI = df_ext_begin(&MF, Reachable), DFE = df_ext_end(&MF, Reachable);
1351 DFI != DFE; ++DFI) {
1352 int SPAdj = 0;
1353 // Check the exit state of the DFS stack predecessor.
1354 if (DFI.getPathLength() >= 2) {
1355 MachineBasicBlock *StackPred = DFI.getPath(DFI.getPathLength() - 2);
1356 assert(Reachable.count(StackPred) &&
1357 "DFS stack predecessor is already visited.\n");
1358 SPAdj = SPState[StackPred->getNumber()];
1360 MachineBasicBlock *BB = *DFI;
1361 replaceFrameIndices(BB, MF, SPAdj);
1362 SPState[BB->getNumber()] = SPAdj;
1365 // Handle the unreachable blocks.
1366 for (auto &BB : MF) {
1367 if (Reachable.count(&BB))
1368 // Already handled in DFS traversal.
1369 continue;
1370 int SPAdj = 0;
1371 replaceFrameIndices(&BB, MF, SPAdj);
1375 bool PEI::replaceFrameIndexDebugInstr(MachineFunction &MF, MachineInstr &MI,
1376 unsigned OpIdx, int SPAdj) {
1377 const TargetFrameLowering *TFI = MF.getSubtarget().getFrameLowering();
1378 const TargetRegisterInfo &TRI = *MF.getSubtarget().getRegisterInfo();
1379 if (MI.isDebugValue()) {
1381 MachineOperand &Op = MI.getOperand(OpIdx);
1382 assert(MI.isDebugOperand(&Op) &&
1383 "Frame indices can only appear as a debug operand in a DBG_VALUE*"
1384 " machine instruction");
1385 Register Reg;
1386 unsigned FrameIdx = Op.getIndex();
1387 unsigned Size = MF.getFrameInfo().getObjectSize(FrameIdx);
1389 StackOffset Offset = TFI->getFrameIndexReference(MF, FrameIdx, Reg);
1390 Op.ChangeToRegister(Reg, false /*isDef*/);
1392 const DIExpression *DIExpr = MI.getDebugExpression();
1394 // If we have a direct DBG_VALUE, and its location expression isn't
1395 // currently complex, then adding an offset will morph it into a
1396 // complex location that is interpreted as being a memory address.
1397 // This changes a pointer-valued variable to dereference that pointer,
1398 // which is incorrect. Fix by adding DW_OP_stack_value.
1400 if (MI.isNonListDebugValue()) {
1401 unsigned PrependFlags = DIExpression::ApplyOffset;
1402 if (!MI.isIndirectDebugValue() && !DIExpr->isComplex())
1403 PrependFlags |= DIExpression::StackValue;
1405 // If we have DBG_VALUE that is indirect and has a Implicit location
1406 // expression need to insert a deref before prepending a Memory
1407 // location expression. Also after doing this we change the DBG_VALUE
1408 // to be direct.
1409 if (MI.isIndirectDebugValue() && DIExpr->isImplicit()) {
1410 SmallVector<uint64_t, 2> Ops = {dwarf::DW_OP_deref_size, Size};
1411 bool WithStackValue = true;
1412 DIExpr = DIExpression::prependOpcodes(DIExpr, Ops, WithStackValue);
1413 // Make the DBG_VALUE direct.
1414 MI.getDebugOffset().ChangeToRegister(0, false);
1416 DIExpr = TRI.prependOffsetExpression(DIExpr, PrependFlags, Offset);
1417 } else {
1418 // The debug operand at DebugOpIndex was a frame index at offset
1419 // `Offset`; now the operand has been replaced with the frame
1420 // register, we must add Offset with `register x, plus Offset`.
1421 unsigned DebugOpIndex = MI.getDebugOperandIndex(&Op);
1422 SmallVector<uint64_t, 3> Ops;
1423 TRI.getOffsetOpcodes(Offset, Ops);
1424 DIExpr = DIExpression::appendOpsToArg(DIExpr, Ops, DebugOpIndex);
1426 MI.getDebugExpressionOp().setMetadata(DIExpr);
1427 return true;
1430 if (MI.isDebugPHI()) {
1431 // Allow stack ref to continue onwards.
1432 return true;
1435 // TODO: This code should be commoned with the code for
1436 // PATCHPOINT. There's no good reason for the difference in
1437 // implementation other than historical accident. The only
1438 // remaining difference is the unconditional use of the stack
1439 // pointer as the base register.
1440 if (MI.getOpcode() == TargetOpcode::STATEPOINT) {
1441 assert((!MI.isDebugValue() || OpIdx == 0) &&
1442 "Frame indicies can only appear as the first operand of a "
1443 "DBG_VALUE machine instruction");
1444 Register Reg;
1445 MachineOperand &Offset = MI.getOperand(OpIdx + 1);
1446 StackOffset refOffset = TFI->getFrameIndexReferencePreferSP(
1447 MF, MI.getOperand(OpIdx).getIndex(), Reg, /*IgnoreSPUpdates*/ false);
1448 assert(!refOffset.getScalable() &&
1449 "Frame offsets with a scalable component are not supported");
1450 Offset.setImm(Offset.getImm() + refOffset.getFixed() + SPAdj);
1451 MI.getOperand(OpIdx).ChangeToRegister(Reg, false /*isDef*/);
1452 return true;
1454 return false;
1457 void PEI::replaceFrameIndicesBackward(MachineBasicBlock *BB,
1458 MachineFunction &MF, int &SPAdj) {
1459 assert(MF.getSubtarget().getRegisterInfo() &&
1460 "getRegisterInfo() must be implemented!");
1462 const TargetInstrInfo &TII = *MF.getSubtarget().getInstrInfo();
1463 const TargetRegisterInfo &TRI = *MF.getSubtarget().getRegisterInfo();
1464 const TargetFrameLowering &TFI = *MF.getSubtarget().getFrameLowering();
1466 RegScavenger *LocalRS = FrameIndexEliminationScavenging ? RS : nullptr;
1467 if (LocalRS)
1468 LocalRS->enterBasicBlockEnd(*BB);
1470 for (MachineInstr &MI : make_early_inc_range(reverse(*BB))) {
1471 if (TII.isFrameInstr(MI)) {
1472 TFI.eliminateCallFramePseudoInstr(MF, *BB, &MI);
1473 continue;
1476 // Step backwards to get the liveness state at (immedately after) MI.
1477 if (LocalRS)
1478 LocalRS->backward(MI);
1480 for (unsigned i = 0; i != MI.getNumOperands(); ++i) {
1481 if (!MI.getOperand(i).isFI())
1482 continue;
1484 if (replaceFrameIndexDebugInstr(MF, MI, i, SPAdj))
1485 continue;
1487 // Eliminate this FrameIndex operand.
1489 // Save and restore the scavenger's position around the call to
1490 // eliminateFrameIndex in case it erases MI and invalidates the iterator.
1491 MachineBasicBlock::iterator Save;
1492 if (LocalRS)
1493 Save = std::next(LocalRS->getCurrentPosition());
1494 bool Removed = TRI.eliminateFrameIndex(MI, SPAdj, i, RS);
1495 if (LocalRS)
1496 LocalRS->skipTo(std::prev(Save));
1498 if (Removed)
1499 break;
1504 void PEI::replaceFrameIndices(MachineBasicBlock *BB, MachineFunction &MF,
1505 int &SPAdj) {
1506 assert(MF.getSubtarget().getRegisterInfo() &&
1507 "getRegisterInfo() must be implemented!");
1508 const TargetInstrInfo &TII = *MF.getSubtarget().getInstrInfo();
1509 const TargetRegisterInfo &TRI = *MF.getSubtarget().getRegisterInfo();
1510 const TargetFrameLowering *TFI = MF.getSubtarget().getFrameLowering();
1512 if (TRI.supportsBackwardScavenger())
1513 return replaceFrameIndicesBackward(BB, MF, SPAdj);
1515 if (RS && FrameIndexEliminationScavenging)
1516 RS->enterBasicBlock(*BB);
1518 bool InsideCallSequence = false;
1520 for (MachineBasicBlock::iterator I = BB->begin(); I != BB->end(); ) {
1521 if (TII.isFrameInstr(*I)) {
1522 InsideCallSequence = TII.isFrameSetup(*I);
1523 SPAdj += TII.getSPAdjust(*I);
1524 I = TFI->eliminateCallFramePseudoInstr(MF, *BB, I);
1525 continue;
1528 MachineInstr &MI = *I;
1529 bool DoIncr = true;
1530 bool DidFinishLoop = true;
1531 for (unsigned i = 0, e = MI.getNumOperands(); i != e; ++i) {
1532 if (!MI.getOperand(i).isFI())
1533 continue;
1535 if (replaceFrameIndexDebugInstr(MF, MI, i, SPAdj))
1536 continue;
1538 // Some instructions (e.g. inline asm instructions) can have
1539 // multiple frame indices and/or cause eliminateFrameIndex
1540 // to insert more than one instruction. We need the register
1541 // scavenger to go through all of these instructions so that
1542 // it can update its register information. We keep the
1543 // iterator at the point before insertion so that we can
1544 // revisit them in full.
1545 bool AtBeginning = (I == BB->begin());
1546 if (!AtBeginning) --I;
1548 // If this instruction has a FrameIndex operand, we need to
1549 // use that target machine register info object to eliminate
1550 // it.
1551 TRI.eliminateFrameIndex(MI, SPAdj, i,
1552 FrameIndexEliminationScavenging ? RS : nullptr);
1554 // Reset the iterator if we were at the beginning of the BB.
1555 if (AtBeginning) {
1556 I = BB->begin();
1557 DoIncr = false;
1560 DidFinishLoop = false;
1561 break;
1564 // If we are looking at a call sequence, we need to keep track of
1565 // the SP adjustment made by each instruction in the sequence.
1566 // This includes both the frame setup/destroy pseudos (handled above),
1567 // as well as other instructions that have side effects w.r.t the SP.
1568 // Note that this must come after eliminateFrameIndex, because
1569 // if I itself referred to a frame index, we shouldn't count its own
1570 // adjustment.
1571 if (DidFinishLoop && InsideCallSequence)
1572 SPAdj += TII.getSPAdjust(MI);
1574 if (DoIncr && I != BB->end()) ++I;
1576 // Update register states.
1577 if (RS && FrameIndexEliminationScavenging && DidFinishLoop)
1578 RS->forward(MI);