1 //===- PrologEpilogInserter.cpp - Insert Prolog/Epilog code in function ---===//
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
7 //===----------------------------------------------------------------------===//
9 // This pass is responsible for finalizing the functions frame layout, saving
10 // callee saved registers, and for emitting prolog & epilog code for the
13 // This pass must be run after register allocation. After this pass is
14 // executed, it is illegal to construct MO_FrameIndex operands.
16 //===----------------------------------------------------------------------===//
18 #include "llvm/ADT/ArrayRef.h"
19 #include "llvm/ADT/BitVector.h"
20 #include "llvm/ADT/STLExtras.h"
21 #include "llvm/ADT/SetVector.h"
22 #include "llvm/ADT/SmallPtrSet.h"
23 #include "llvm/ADT/SmallSet.h"
24 #include "llvm/ADT/SmallVector.h"
25 #include "llvm/ADT/Statistic.h"
26 #include "llvm/Analysis/OptimizationRemarkEmitter.h"
27 #include "llvm/CodeGen/MachineBasicBlock.h"
28 #include "llvm/CodeGen/MachineDominators.h"
29 #include "llvm/CodeGen/MachineFrameInfo.h"
30 #include "llvm/CodeGen/MachineFunction.h"
31 #include "llvm/CodeGen/MachineFunctionPass.h"
32 #include "llvm/CodeGen/MachineInstr.h"
33 #include "llvm/CodeGen/MachineInstrBuilder.h"
34 #include "llvm/CodeGen/MachineLoopInfo.h"
35 #include "llvm/CodeGen/MachineModuleInfo.h"
36 #include "llvm/CodeGen/MachineOperand.h"
37 #include "llvm/CodeGen/MachineOptimizationRemarkEmitter.h"
38 #include "llvm/CodeGen/MachineRegisterInfo.h"
39 #include "llvm/CodeGen/RegisterScavenging.h"
40 #include "llvm/CodeGen/TargetFrameLowering.h"
41 #include "llvm/CodeGen/TargetInstrInfo.h"
42 #include "llvm/CodeGen/TargetOpcodes.h"
43 #include "llvm/CodeGen/TargetRegisterInfo.h"
44 #include "llvm/CodeGen/TargetSubtargetInfo.h"
45 #include "llvm/CodeGen/WinEHFuncInfo.h"
46 #include "llvm/IR/Attributes.h"
47 #include "llvm/IR/CallingConv.h"
48 #include "llvm/IR/DebugInfoMetadata.h"
49 #include "llvm/IR/DiagnosticInfo.h"
50 #include "llvm/IR/Function.h"
51 #include "llvm/IR/LLVMContext.h"
52 #include "llvm/InitializePasses.h"
53 #include "llvm/Pass.h"
54 #include "llvm/Support/CodeGen.h"
55 #include "llvm/Support/Debug.h"
56 #include "llvm/Support/ErrorHandling.h"
57 #include "llvm/Support/FormatVariadic.h"
58 #include "llvm/Support/raw_ostream.h"
59 #include "llvm/Target/TargetMachine.h"
60 #include "llvm/Target/TargetOptions.h"
70 #define DEBUG_TYPE "prologepilog"
72 using MBBVector
= SmallVector
<MachineBasicBlock
*, 4>;
74 STATISTIC(NumLeafFuncWithSpills
, "Number of leaf functions with CSRs");
75 STATISTIC(NumFuncSeen
, "Number of functions seen in PEI");
80 class PEI
: public MachineFunctionPass
{
84 PEI() : MachineFunctionPass(ID
) {
85 initializePEIPass(*PassRegistry::getPassRegistry());
88 void getAnalysisUsage(AnalysisUsage
&AU
) const override
;
90 /// runOnMachineFunction - Insert prolog/epilog code and replace abstract
91 /// frame indexes with appropriate references.
92 bool runOnMachineFunction(MachineFunction
&MF
) override
;
95 RegScavenger
*RS
= nullptr;
97 // MinCSFrameIndex, MaxCSFrameIndex - Keeps the range of callee saved
98 // stack frame indexes.
99 unsigned MinCSFrameIndex
= std::numeric_limits
<unsigned>::max();
100 unsigned MaxCSFrameIndex
= 0;
102 // Save and Restore blocks of the current function. Typically there is a
103 // single save block, unless Windows EH funclets are involved.
104 MBBVector SaveBlocks
;
105 MBBVector RestoreBlocks
;
107 // Flag to control whether to use the register scavenger to resolve
108 // frame index materialization registers. Set according to
109 // TRI->requiresFrameIndexScavenging() for the current function.
110 bool FrameIndexVirtualScavenging
= false;
112 // Flag to control whether the scavenger should be passed even though
113 // FrameIndexVirtualScavenging is used.
114 bool FrameIndexEliminationScavenging
= false;
117 MachineOptimizationRemarkEmitter
*ORE
= nullptr;
119 void calculateCallFrameInfo(MachineFunction
&MF
);
120 void calculateSaveRestoreBlocks(MachineFunction
&MF
);
121 void spillCalleeSavedRegs(MachineFunction
&MF
);
123 void calculateFrameObjectOffsets(MachineFunction
&MF
);
124 void replaceFrameIndices(MachineFunction
&MF
);
125 void replaceFrameIndices(MachineBasicBlock
*BB
, MachineFunction
&MF
,
127 // Frame indices in debug values are encoded in a target independent
128 // way with simply the frame index and offset rather than any
129 // target-specific addressing mode.
130 bool replaceFrameIndexDebugInstr(MachineFunction
&MF
, MachineInstr
&MI
,
131 unsigned OpIdx
, int SPAdj
= 0);
132 // Does same as replaceFrameIndices but using the backward MIR walk and
133 // backward register scavenger walk.
134 void replaceFrameIndicesBackward(MachineFunction
&MF
);
135 void replaceFrameIndicesBackward(MachineBasicBlock
*BB
, MachineFunction
&MF
,
138 void insertPrologEpilogCode(MachineFunction
&MF
);
139 void insertZeroCallUsedRegs(MachineFunction
&MF
);
142 } // end anonymous namespace
146 char &llvm::PrologEpilogCodeInserterID
= PEI::ID
;
148 INITIALIZE_PASS_BEGIN(PEI
, DEBUG_TYPE
, "Prologue/Epilogue Insertion", false,
150 INITIALIZE_PASS_DEPENDENCY(MachineLoopInfoWrapperPass
)
151 INITIALIZE_PASS_DEPENDENCY(MachineDominatorTreeWrapperPass
)
152 INITIALIZE_PASS_DEPENDENCY(MachineOptimizationRemarkEmitterPass
)
153 INITIALIZE_PASS_END(PEI
, DEBUG_TYPE
,
154 "Prologue/Epilogue Insertion & Frame Finalization", false,
157 MachineFunctionPass
*llvm::createPrologEpilogInserterPass() {
161 STATISTIC(NumBytesStackSpace
,
162 "Number of bytes used for stack in all functions");
164 void PEI::getAnalysisUsage(AnalysisUsage
&AU
) const {
165 AU
.setPreservesCFG();
166 AU
.addPreserved
<MachineLoopInfoWrapperPass
>();
167 AU
.addPreserved
<MachineDominatorTreeWrapperPass
>();
168 AU
.addRequired
<MachineOptimizationRemarkEmitterPass
>();
169 MachineFunctionPass::getAnalysisUsage(AU
);
172 /// StackObjSet - A set of stack object indexes
173 using StackObjSet
= SmallSetVector
<int, 8>;
175 using SavedDbgValuesMap
=
176 SmallDenseMap
<MachineBasicBlock
*, SmallVector
<MachineInstr
*, 4>, 4>;
178 /// Stash DBG_VALUEs that describe parameters and which are placed at the start
179 /// of the block. Later on, after the prologue code has been emitted, the
180 /// stashed DBG_VALUEs will be reinserted at the start of the block.
181 static void stashEntryDbgValues(MachineBasicBlock
&MBB
,
182 SavedDbgValuesMap
&EntryDbgValues
) {
183 SmallVector
<const MachineInstr
*, 4> FrameIndexValues
;
185 for (auto &MI
: MBB
) {
186 if (!MI
.isDebugInstr())
188 if (!MI
.isDebugValue() || !MI
.getDebugVariable()->isParameter())
190 if (any_of(MI
.debug_operands(),
191 [](const MachineOperand
&MO
) { return MO
.isFI(); })) {
192 // We can only emit valid locations for frame indices after the frame
193 // setup, so do not stash away them.
194 FrameIndexValues
.push_back(&MI
);
197 const DILocalVariable
*Var
= MI
.getDebugVariable();
198 const DIExpression
*Expr
= MI
.getDebugExpression();
199 auto Overlaps
= [Var
, Expr
](const MachineInstr
*DV
) {
200 return Var
== DV
->getDebugVariable() &&
201 Expr
->fragmentsOverlap(DV
->getDebugExpression());
203 // See if the debug value overlaps with any preceding debug value that will
204 // not be stashed. If that is the case, then we can't stash this value, as
205 // we would then reorder the values at reinsertion.
206 if (llvm::none_of(FrameIndexValues
, Overlaps
))
207 EntryDbgValues
[&MBB
].push_back(&MI
);
210 // Remove stashed debug values from the block.
211 if (auto It
= EntryDbgValues
.find(&MBB
); It
!= EntryDbgValues
.end())
212 for (auto *MI
: It
->second
)
213 MI
->removeFromParent();
216 /// runOnMachineFunction - Insert prolog/epilog code and replace abstract
217 /// frame indexes with appropriate references.
218 bool PEI::runOnMachineFunction(MachineFunction
&MF
) {
220 const Function
&F
= MF
.getFunction();
221 const TargetRegisterInfo
*TRI
= MF
.getSubtarget().getRegisterInfo();
222 const TargetFrameLowering
*TFI
= MF
.getSubtarget().getFrameLowering();
224 RS
= TRI
->requiresRegisterScavenging(MF
) ? new RegScavenger() : nullptr;
225 FrameIndexVirtualScavenging
= TRI
->requiresFrameIndexScavenging(MF
);
226 ORE
= &getAnalysis
<MachineOptimizationRemarkEmitterPass
>().getORE();
228 // Spill frame pointer and/or base pointer registers if they are clobbered.
229 // It is placed before call frame instruction elimination so it will not mess
230 // with stack arguments.
233 // Calculate the MaxCallFrameSize value for the function's frame
234 // information. Also eliminates call frame pseudo instructions.
235 calculateCallFrameInfo(MF
);
237 // Determine placement of CSR spill/restore code and prolog/epilog code:
238 // place all spills in the entry block, all restores in return blocks.
239 calculateSaveRestoreBlocks(MF
);
241 // Stash away DBG_VALUEs that should not be moved by insertion of prolog code.
242 SavedDbgValuesMap EntryDbgValues
;
243 for (MachineBasicBlock
*SaveBlock
: SaveBlocks
)
244 stashEntryDbgValues(*SaveBlock
, EntryDbgValues
);
246 // Handle CSR spilling and restoring, for targets that need it.
247 if (MF
.getTarget().usesPhysRegsForValues())
248 spillCalleeSavedRegs(MF
);
250 // Allow the target machine to make final modifications to the function
251 // before the frame layout is finalized.
252 TFI
->processFunctionBeforeFrameFinalized(MF
, RS
);
254 // Calculate actual frame offsets for all abstract stack objects...
255 calculateFrameObjectOffsets(MF
);
257 // Add prolog and epilog code to the function. This function is required
258 // to align the stack frame as necessary for any stack variables or
259 // called functions. Because of this, calculateCalleeSavedRegisters()
260 // must be called before this function in order to set the AdjustsStack
261 // and MaxCallFrameSize variables.
262 if (!F
.hasFnAttribute(Attribute::Naked
))
263 insertPrologEpilogCode(MF
);
265 // Reinsert stashed debug values at the start of the entry blocks.
266 for (auto &I
: EntryDbgValues
)
267 I
.first
->insert(I
.first
->begin(), I
.second
.begin(), I
.second
.end());
269 // Allow the target machine to make final modifications to the function
270 // before the frame layout is finalized.
271 TFI
->processFunctionBeforeFrameIndicesReplaced(MF
, RS
);
273 // Replace all MO_FrameIndex operands with physical register references
274 // and actual offsets.
275 if (TFI
->needsFrameIndexResolution(MF
)) {
276 // Allow the target to determine this after knowing the frame size.
277 FrameIndexEliminationScavenging
=
278 (RS
&& !FrameIndexVirtualScavenging
) ||
279 TRI
->requiresFrameIndexReplacementScavenging(MF
);
281 if (TRI
->eliminateFrameIndicesBackwards())
282 replaceFrameIndicesBackward(MF
);
284 replaceFrameIndices(MF
);
287 // If register scavenging is needed, as we've enabled doing it as a
288 // post-pass, scavenge the virtual registers that frame index elimination
290 if (TRI
->requiresRegisterScavenging(MF
) && FrameIndexVirtualScavenging
)
291 scavengeFrameVirtualRegs(MF
, *RS
);
293 // Warn on stack size when we exceeds the given limit.
294 MachineFrameInfo
&MFI
= MF
.getFrameInfo();
295 uint64_t StackSize
= MFI
.getStackSize();
297 uint64_t Threshold
= TFI
->getStackThreshold();
298 if (MF
.getFunction().hasFnAttribute("warn-stack-size")) {
299 bool Failed
= MF
.getFunction()
300 .getFnAttribute("warn-stack-size")
302 .getAsInteger(10, Threshold
);
303 // Verifier should have caught this.
304 assert(!Failed
&& "Invalid warn-stack-size fn attr value");
307 uint64_t UnsafeStackSize
= MFI
.getUnsafeStackSize();
308 if (MF
.getFunction().hasFnAttribute(Attribute::SafeStack
))
309 StackSize
+= UnsafeStackSize
;
311 if (StackSize
> Threshold
) {
312 DiagnosticInfoStackSize
DiagStackSize(F
, StackSize
, Threshold
, DS_Warning
);
313 F
.getContext().diagnose(DiagStackSize
);
314 int64_t SpillSize
= 0;
315 for (int Idx
= MFI
.getObjectIndexBegin(), End
= MFI
.getObjectIndexEnd();
317 if (MFI
.isSpillSlotObjectIndex(Idx
))
318 SpillSize
+= MFI
.getObjectSize(Idx
);
321 [[maybe_unused
]] float SpillPct
=
322 static_cast<float>(SpillSize
) / static_cast<float>(StackSize
);
324 dbgs() << formatv("{0}/{1} ({3:P}) spills, {2}/{1} ({4:P}) variables",
325 SpillSize
, StackSize
, StackSize
- SpillSize
, SpillPct
,
327 if (UnsafeStackSize
!= 0) {
328 LLVM_DEBUG(dbgs() << formatv(", {0}/{2} ({1:P}) unsafe stack",
330 static_cast<float>(UnsafeStackSize
) /
331 static_cast<float>(StackSize
),
334 LLVM_DEBUG(dbgs() << "\n");
338 return MachineOptimizationRemarkAnalysis(DEBUG_TYPE
, "StackSize",
339 MF
.getFunction().getSubprogram(),
341 << ore::NV("NumStackBytes", StackSize
)
342 << " stack bytes in function '"
343 << ore::NV("Function", MF
.getFunction().getName()) << "'";
346 // Emit any remarks implemented for the target, based on final frame layout.
347 TFI
->emitRemarks(MF
, ORE
);
351 RestoreBlocks
.clear();
352 MFI
.setSavePoint(nullptr);
353 MFI
.setRestorePoint(nullptr);
357 /// Calculate the MaxCallFrameSize variable for the function's frame
358 /// information and eliminate call frame pseudo instructions.
359 void PEI::calculateCallFrameInfo(MachineFunction
&MF
) {
360 const TargetInstrInfo
&TII
= *MF
.getSubtarget().getInstrInfo();
361 const TargetFrameLowering
*TFI
= MF
.getSubtarget().getFrameLowering();
362 MachineFrameInfo
&MFI
= MF
.getFrameInfo();
364 // Get the function call frame set-up and tear-down instruction opcode
365 unsigned FrameSetupOpcode
= TII
.getCallFrameSetupOpcode();
366 unsigned FrameDestroyOpcode
= TII
.getCallFrameDestroyOpcode();
368 // Early exit for targets which have no call frame setup/destroy pseudo
370 if (FrameSetupOpcode
== ~0u && FrameDestroyOpcode
== ~0u)
373 // (Re-)Compute the MaxCallFrameSize.
374 [[maybe_unused
]] uint64_t MaxCFSIn
=
375 MFI
.isMaxCallFrameSizeComputed() ? MFI
.getMaxCallFrameSize() : UINT64_MAX
;
376 std::vector
<MachineBasicBlock::iterator
> FrameSDOps
;
377 MFI
.computeMaxCallFrameSize(MF
, &FrameSDOps
);
378 assert(MFI
.getMaxCallFrameSize() <= MaxCFSIn
&&
379 "Recomputing MaxCFS gave a larger value.");
380 assert((FrameSDOps
.empty() || MF
.getFrameInfo().adjustsStack()) &&
381 "AdjustsStack not set in presence of a frame pseudo instruction.");
383 if (TFI
->canSimplifyCallFramePseudos(MF
)) {
384 // If call frames are not being included as part of the stack frame, and
385 // the target doesn't indicate otherwise, remove the call frame pseudos
386 // here. The sub/add sp instruction pairs are still inserted, but we don't
387 // need to track the SP adjustment for frame index elimination.
388 for (MachineBasicBlock::iterator I
: FrameSDOps
)
389 TFI
->eliminateCallFramePseudoInstr(MF
, *I
->getParent(), I
);
391 // We can't track the call frame size after call frame pseudos have been
392 // eliminated. Set it to zero everywhere to keep MachineVerifier happy.
393 for (MachineBasicBlock
&MBB
: MF
)
394 MBB
.setCallFrameSize(0);
398 /// Compute the sets of entry and return blocks for saving and restoring
399 /// callee-saved registers, and placing prolog and epilog code.
400 void PEI::calculateSaveRestoreBlocks(MachineFunction
&MF
) {
401 const MachineFrameInfo
&MFI
= MF
.getFrameInfo();
403 // Even when we do not change any CSR, we still want to insert the
404 // prologue and epilogue of the function.
405 // So set the save points for those.
407 // Use the points found by shrink-wrapping, if any.
408 if (MFI
.getSavePoint()) {
409 SaveBlocks
.push_back(MFI
.getSavePoint());
410 assert(MFI
.getRestorePoint() && "Both restore and save must be set");
411 MachineBasicBlock
*RestoreBlock
= MFI
.getRestorePoint();
412 // If RestoreBlock does not have any successor and is not a return block
413 // then the end point is unreachable and we do not need to insert any
415 if (!RestoreBlock
->succ_empty() || RestoreBlock
->isReturnBlock())
416 RestoreBlocks
.push_back(RestoreBlock
);
420 // Save refs to entry and return blocks.
421 SaveBlocks
.push_back(&MF
.front());
422 for (MachineBasicBlock
&MBB
: MF
) {
423 if (MBB
.isEHFuncletEntry())
424 SaveBlocks
.push_back(&MBB
);
425 if (MBB
.isReturnBlock())
426 RestoreBlocks
.push_back(&MBB
);
430 static void assignCalleeSavedSpillSlots(MachineFunction
&F
,
431 const BitVector
&SavedRegs
,
432 unsigned &MinCSFrameIndex
,
433 unsigned &MaxCSFrameIndex
) {
434 if (SavedRegs
.empty())
437 const TargetRegisterInfo
*RegInfo
= F
.getSubtarget().getRegisterInfo();
438 const MCPhysReg
*CSRegs
= F
.getRegInfo().getCalleeSavedRegs();
439 BitVector
CSMask(SavedRegs
.size());
441 for (unsigned i
= 0; CSRegs
[i
]; ++i
)
442 CSMask
.set(CSRegs
[i
]);
444 std::vector
<CalleeSavedInfo
> CSI
;
445 for (unsigned i
= 0; CSRegs
[i
]; ++i
) {
446 unsigned Reg
= CSRegs
[i
];
447 if (SavedRegs
.test(Reg
)) {
448 bool SavedSuper
= false;
449 for (const MCPhysReg
&SuperReg
: RegInfo
->superregs(Reg
)) {
450 // Some backends set all aliases for some registers as saved, such as
451 // Mips's $fp, so they appear in SavedRegs but not CSRegs.
452 if (SavedRegs
.test(SuperReg
) && CSMask
.test(SuperReg
)) {
459 CSI
.push_back(CalleeSavedInfo(Reg
));
463 const TargetFrameLowering
*TFI
= F
.getSubtarget().getFrameLowering();
464 MachineFrameInfo
&MFI
= F
.getFrameInfo();
465 if (!TFI
->assignCalleeSavedSpillSlots(F
, RegInfo
, CSI
, MinCSFrameIndex
,
467 // If target doesn't implement this, use generic code.
470 return; // Early exit if no callee saved registers are modified!
472 unsigned NumFixedSpillSlots
;
473 const TargetFrameLowering::SpillSlot
*FixedSpillSlots
=
474 TFI
->getCalleeSavedSpillSlots(NumFixedSpillSlots
);
476 // Now that we know which registers need to be saved and restored, allocate
477 // stack slots for them.
478 for (auto &CS
: CSI
) {
479 // If the target has spilled this register to another register, we don't
480 // need to allocate a stack slot.
481 if (CS
.isSpilledToReg())
484 unsigned Reg
= CS
.getReg();
485 const TargetRegisterClass
*RC
= RegInfo
->getMinimalPhysRegClass(Reg
);
488 if (RegInfo
->hasReservedSpillSlot(F
, Reg
, FrameIdx
)) {
489 CS
.setFrameIdx(FrameIdx
);
493 // Check to see if this physreg must be spilled to a particular stack slot
495 const TargetFrameLowering::SpillSlot
*FixedSlot
= FixedSpillSlots
;
496 while (FixedSlot
!= FixedSpillSlots
+ NumFixedSpillSlots
&&
497 FixedSlot
->Reg
!= Reg
)
500 unsigned Size
= RegInfo
->getSpillSize(*RC
);
501 if (FixedSlot
== FixedSpillSlots
+ NumFixedSpillSlots
) {
502 // Nope, just spill it anywhere convenient.
503 Align Alignment
= RegInfo
->getSpillAlign(*RC
);
504 // We may not be able to satisfy the desired alignment specification of
505 // the TargetRegisterClass if the stack alignment is smaller. Use the
507 Alignment
= std::min(Alignment
, TFI
->getStackAlign());
508 FrameIdx
= MFI
.CreateStackObject(Size
, Alignment
, true);
509 if ((unsigned)FrameIdx
< MinCSFrameIndex
) MinCSFrameIndex
= FrameIdx
;
510 if ((unsigned)FrameIdx
> MaxCSFrameIndex
) MaxCSFrameIndex
= FrameIdx
;
512 // Spill it to the stack where we must.
513 FrameIdx
= MFI
.CreateFixedSpillStackObject(Size
, FixedSlot
->Offset
);
516 CS
.setFrameIdx(FrameIdx
);
520 MFI
.setCalleeSavedInfo(CSI
);
523 /// Helper function to update the liveness information for the callee-saved
525 static void updateLiveness(MachineFunction
&MF
) {
526 MachineFrameInfo
&MFI
= MF
.getFrameInfo();
527 // Visited will contain all the basic blocks that are in the region
528 // where the callee saved registers are alive:
529 // - Anything that is not Save or Restore -> LiveThrough.
531 // - Restore -> LiveOut.
532 // The live-out is not attached to the block, so no need to keep
533 // Restore in this set.
534 SmallPtrSet
<MachineBasicBlock
*, 8> Visited
;
535 SmallVector
<MachineBasicBlock
*, 8> WorkList
;
536 MachineBasicBlock
*Entry
= &MF
.front();
537 MachineBasicBlock
*Save
= MFI
.getSavePoint();
543 WorkList
.push_back(Entry
);
544 Visited
.insert(Entry
);
546 Visited
.insert(Save
);
548 MachineBasicBlock
*Restore
= MFI
.getRestorePoint();
550 // By construction Restore cannot be visited, otherwise it
551 // means there exists a path to Restore that does not go
553 WorkList
.push_back(Restore
);
555 while (!WorkList
.empty()) {
556 const MachineBasicBlock
*CurBB
= WorkList
.pop_back_val();
557 // By construction, the region that is after the save point is
558 // dominated by the Save and post-dominated by the Restore.
559 if (CurBB
== Save
&& Save
!= Restore
)
561 // Enqueue all the successors not already visited.
562 // Those are by construction either before Save or after Restore.
563 for (MachineBasicBlock
*SuccBB
: CurBB
->successors())
564 if (Visited
.insert(SuccBB
).second
)
565 WorkList
.push_back(SuccBB
);
568 const std::vector
<CalleeSavedInfo
> &CSI
= MFI
.getCalleeSavedInfo();
570 MachineRegisterInfo
&MRI
= MF
.getRegInfo();
571 for (const CalleeSavedInfo
&I
: CSI
) {
572 for (MachineBasicBlock
*MBB
: Visited
) {
573 MCPhysReg Reg
= I
.getReg();
574 // Add the callee-saved register as live-in.
575 // It's killed at the spill.
576 if (!MRI
.isReserved(Reg
) && !MBB
->isLiveIn(Reg
))
579 // If callee-saved register is spilled to another register rather than
580 // spilling to stack, the destination register has to be marked as live for
581 // each MBB between the prologue and epilogue so that it is not clobbered
582 // before it is reloaded in the epilogue. The Visited set contains all
583 // blocks outside of the region delimited by prologue/epilogue.
584 if (I
.isSpilledToReg()) {
585 for (MachineBasicBlock
&MBB
: MF
) {
586 if (Visited
.count(&MBB
))
588 MCPhysReg DstReg
= I
.getDstReg();
589 if (!MBB
.isLiveIn(DstReg
))
590 MBB
.addLiveIn(DstReg
);
596 /// Insert spill code for the callee-saved registers used in the function.
597 static void insertCSRSaves(MachineBasicBlock
&SaveBlock
,
598 ArrayRef
<CalleeSavedInfo
> CSI
) {
599 MachineFunction
&MF
= *SaveBlock
.getParent();
600 const TargetInstrInfo
&TII
= *MF
.getSubtarget().getInstrInfo();
601 const TargetFrameLowering
*TFI
= MF
.getSubtarget().getFrameLowering();
602 const TargetRegisterInfo
*TRI
= MF
.getSubtarget().getRegisterInfo();
604 MachineBasicBlock::iterator I
= SaveBlock
.begin();
605 if (!TFI
->spillCalleeSavedRegisters(SaveBlock
, I
, CSI
, TRI
)) {
606 for (const CalleeSavedInfo
&CS
: CSI
) {
607 // Insert the spill to the stack frame.
608 unsigned Reg
= CS
.getReg();
610 if (CS
.isSpilledToReg()) {
611 BuildMI(SaveBlock
, I
, DebugLoc(),
612 TII
.get(TargetOpcode::COPY
), CS
.getDstReg())
613 .addReg(Reg
, getKillRegState(true));
615 const TargetRegisterClass
*RC
= TRI
->getMinimalPhysRegClass(Reg
);
616 TII
.storeRegToStackSlot(SaveBlock
, I
, Reg
, true, CS
.getFrameIdx(), RC
,
623 /// Insert restore code for the callee-saved registers used in the function.
624 static void insertCSRRestores(MachineBasicBlock
&RestoreBlock
,
625 std::vector
<CalleeSavedInfo
> &CSI
) {
626 MachineFunction
&MF
= *RestoreBlock
.getParent();
627 const TargetInstrInfo
&TII
= *MF
.getSubtarget().getInstrInfo();
628 const TargetFrameLowering
*TFI
= MF
.getSubtarget().getFrameLowering();
629 const TargetRegisterInfo
*TRI
= MF
.getSubtarget().getRegisterInfo();
631 // Restore all registers immediately before the return and any
632 // terminators that precede it.
633 MachineBasicBlock::iterator I
= RestoreBlock
.getFirstTerminator();
635 if (!TFI
->restoreCalleeSavedRegisters(RestoreBlock
, I
, CSI
, TRI
)) {
636 for (const CalleeSavedInfo
&CI
: reverse(CSI
)) {
637 unsigned Reg
= CI
.getReg();
638 if (CI
.isSpilledToReg()) {
639 BuildMI(RestoreBlock
, I
, DebugLoc(), TII
.get(TargetOpcode::COPY
), Reg
)
640 .addReg(CI
.getDstReg(), getKillRegState(true));
642 const TargetRegisterClass
*RC
= TRI
->getMinimalPhysRegClass(Reg
);
643 TII
.loadRegFromStackSlot(RestoreBlock
, I
, Reg
, CI
.getFrameIdx(), RC
,
645 assert(I
!= RestoreBlock
.begin() &&
646 "loadRegFromStackSlot didn't insert any code!");
647 // Insert in reverse order. loadRegFromStackSlot can insert
648 // multiple instructions.
654 void PEI::spillCalleeSavedRegs(MachineFunction
&MF
) {
655 // We can't list this requirement in getRequiredProperties because some
656 // targets (WebAssembly) use virtual registers past this point, and the pass
657 // pipeline is set up without giving the passes a chance to look at the
659 // FIXME: Find a way to express this in getRequiredProperties.
660 assert(MF
.getProperties().hasProperty(
661 MachineFunctionProperties::Property::NoVRegs
));
663 const Function
&F
= MF
.getFunction();
664 const TargetFrameLowering
*TFI
= MF
.getSubtarget().getFrameLowering();
665 MachineFrameInfo
&MFI
= MF
.getFrameInfo();
666 MinCSFrameIndex
= std::numeric_limits
<unsigned>::max();
669 // Determine which of the registers in the callee save list should be saved.
671 TFI
->determineCalleeSaves(MF
, SavedRegs
, RS
);
673 // Assign stack slots for any callee-saved registers that must be spilled.
674 assignCalleeSavedSpillSlots(MF
, SavedRegs
, MinCSFrameIndex
, MaxCSFrameIndex
);
676 // Add the code to save and restore the callee saved registers.
677 if (!F
.hasFnAttribute(Attribute::Naked
)) {
678 MFI
.setCalleeSavedInfoValid(true);
680 std::vector
<CalleeSavedInfo
> &CSI
= MFI
.getCalleeSavedInfo();
683 NumLeafFuncWithSpills
++;
685 for (MachineBasicBlock
*SaveBlock
: SaveBlocks
)
686 insertCSRSaves(*SaveBlock
, CSI
);
688 // Update the live-in information of all the blocks up to the save point.
691 for (MachineBasicBlock
*RestoreBlock
: RestoreBlocks
)
692 insertCSRRestores(*RestoreBlock
, CSI
);
697 /// AdjustStackOffset - Helper function used to adjust the stack frame offset.
698 static inline void AdjustStackOffset(MachineFrameInfo
&MFI
, int FrameIdx
,
699 bool StackGrowsDown
, int64_t &Offset
,
701 // If the stack grows down, add the object size to find the lowest address.
703 Offset
+= MFI
.getObjectSize(FrameIdx
);
705 Align Alignment
= MFI
.getObjectAlign(FrameIdx
);
707 // If the alignment of this object is greater than that of the stack, then
708 // increase the stack alignment to match.
709 MaxAlign
= std::max(MaxAlign
, Alignment
);
711 // Adjust to alignment boundary.
712 Offset
= alignTo(Offset
, Alignment
);
714 if (StackGrowsDown
) {
715 LLVM_DEBUG(dbgs() << "alloc FI(" << FrameIdx
<< ") at SP[" << -Offset
717 MFI
.setObjectOffset(FrameIdx
, -Offset
); // Set the computed offset
719 LLVM_DEBUG(dbgs() << "alloc FI(" << FrameIdx
<< ") at SP[" << Offset
721 MFI
.setObjectOffset(FrameIdx
, Offset
);
722 Offset
+= MFI
.getObjectSize(FrameIdx
);
726 /// Compute which bytes of fixed and callee-save stack area are unused and keep
727 /// track of them in StackBytesFree.
729 computeFreeStackSlots(MachineFrameInfo
&MFI
, bool StackGrowsDown
,
730 unsigned MinCSFrameIndex
, unsigned MaxCSFrameIndex
,
731 int64_t FixedCSEnd
, BitVector
&StackBytesFree
) {
732 // Avoid undefined int64_t -> int conversion below in extreme case.
733 if (FixedCSEnd
> std::numeric_limits
<int>::max())
736 StackBytesFree
.resize(FixedCSEnd
, true);
738 SmallVector
<int, 16> AllocatedFrameSlots
;
739 // Add fixed objects.
740 for (int i
= MFI
.getObjectIndexBegin(); i
!= 0; ++i
)
741 // StackSlot scavenging is only implemented for the default stack.
742 if (MFI
.getStackID(i
) == TargetStackID::Default
)
743 AllocatedFrameSlots
.push_back(i
);
744 // Add callee-save objects if there are any.
745 if (MinCSFrameIndex
<= MaxCSFrameIndex
) {
746 for (int i
= MinCSFrameIndex
; i
<= (int)MaxCSFrameIndex
; ++i
)
747 if (MFI
.getStackID(i
) == TargetStackID::Default
)
748 AllocatedFrameSlots
.push_back(i
);
751 for (int i
: AllocatedFrameSlots
) {
752 // These are converted from int64_t, but they should always fit in int
753 // because of the FixedCSEnd check above.
754 int ObjOffset
= MFI
.getObjectOffset(i
);
755 int ObjSize
= MFI
.getObjectSize(i
);
756 int ObjStart
, ObjEnd
;
757 if (StackGrowsDown
) {
758 // ObjOffset is negative when StackGrowsDown is true.
759 ObjStart
= -ObjOffset
- ObjSize
;
762 ObjStart
= ObjOffset
;
763 ObjEnd
= ObjOffset
+ ObjSize
;
765 // Ignore fixed holes that are in the previous stack frame.
767 StackBytesFree
.reset(ObjStart
, ObjEnd
);
771 /// Assign frame object to an unused portion of the stack in the fixed stack
772 /// object range. Return true if the allocation was successful.
773 static inline bool scavengeStackSlot(MachineFrameInfo
&MFI
, int FrameIdx
,
774 bool StackGrowsDown
, Align MaxAlign
,
775 BitVector
&StackBytesFree
) {
776 if (MFI
.isVariableSizedObjectIndex(FrameIdx
))
779 if (StackBytesFree
.none()) {
780 // clear it to speed up later scavengeStackSlot calls to
781 // StackBytesFree.none()
782 StackBytesFree
.clear();
786 Align ObjAlign
= MFI
.getObjectAlign(FrameIdx
);
787 if (ObjAlign
> MaxAlign
)
790 int64_t ObjSize
= MFI
.getObjectSize(FrameIdx
);
792 for (FreeStart
= StackBytesFree
.find_first(); FreeStart
!= -1;
793 FreeStart
= StackBytesFree
.find_next(FreeStart
)) {
795 // Check that free space has suitable alignment.
796 unsigned ObjStart
= StackGrowsDown
? FreeStart
+ ObjSize
: FreeStart
;
797 if (alignTo(ObjStart
, ObjAlign
) != ObjStart
)
800 if (FreeStart
+ ObjSize
> StackBytesFree
.size())
803 bool AllBytesFree
= true;
804 for (unsigned Byte
= 0; Byte
< ObjSize
; ++Byte
)
805 if (!StackBytesFree
.test(FreeStart
+ Byte
)) {
806 AllBytesFree
= false;
816 if (StackGrowsDown
) {
817 int ObjStart
= -(FreeStart
+ ObjSize
);
818 LLVM_DEBUG(dbgs() << "alloc FI(" << FrameIdx
<< ") scavenged at SP["
819 << ObjStart
<< "]\n");
820 MFI
.setObjectOffset(FrameIdx
, ObjStart
);
822 LLVM_DEBUG(dbgs() << "alloc FI(" << FrameIdx
<< ") scavenged at SP["
823 << FreeStart
<< "]\n");
824 MFI
.setObjectOffset(FrameIdx
, FreeStart
);
827 StackBytesFree
.reset(FreeStart
, FreeStart
+ ObjSize
);
831 /// AssignProtectedObjSet - Helper function to assign large stack objects (i.e.,
832 /// those required to be close to the Stack Protector) to stack offsets.
833 static void AssignProtectedObjSet(const StackObjSet
&UnassignedObjs
,
834 SmallSet
<int, 16> &ProtectedObjs
,
835 MachineFrameInfo
&MFI
, bool StackGrowsDown
,
836 int64_t &Offset
, Align
&MaxAlign
) {
838 for (int i
: UnassignedObjs
) {
839 AdjustStackOffset(MFI
, i
, StackGrowsDown
, Offset
, MaxAlign
);
840 ProtectedObjs
.insert(i
);
844 /// calculateFrameObjectOffsets - Calculate actual frame offsets for all of the
845 /// abstract stack objects.
846 void PEI::calculateFrameObjectOffsets(MachineFunction
&MF
) {
847 const TargetFrameLowering
&TFI
= *MF
.getSubtarget().getFrameLowering();
849 bool StackGrowsDown
=
850 TFI
.getStackGrowthDirection() == TargetFrameLowering::StackGrowsDown
;
852 // Loop over all of the stack objects, assigning sequential addresses...
853 MachineFrameInfo
&MFI
= MF
.getFrameInfo();
855 // Start at the beginning of the local area.
856 // The Offset is the distance from the stack top in the direction
857 // of stack growth -- so it's always nonnegative.
858 int LocalAreaOffset
= TFI
.getOffsetOfLocalArea();
860 LocalAreaOffset
= -LocalAreaOffset
;
861 assert(LocalAreaOffset
>= 0
862 && "Local area offset should be in direction of stack growth");
863 int64_t Offset
= LocalAreaOffset
;
865 #ifdef EXPENSIVE_CHECKS
866 for (unsigned i
= 0, e
= MFI
.getObjectIndexEnd(); i
!= e
; ++i
)
867 if (!MFI
.isDeadObjectIndex(i
) &&
868 MFI
.getStackID(i
) == TargetStackID::Default
)
869 assert(MFI
.getObjectAlign(i
) <= MFI
.getMaxAlign() &&
870 "MaxAlignment is invalid");
873 // If there are fixed sized objects that are preallocated in the local area,
874 // non-fixed objects can't be allocated right at the start of local area.
875 // Adjust 'Offset' to point to the end of last fixed sized preallocated
877 for (int i
= MFI
.getObjectIndexBegin(); i
!= 0; ++i
) {
878 // Only allocate objects on the default stack.
879 if (MFI
.getStackID(i
) != TargetStackID::Default
)
883 if (StackGrowsDown
) {
884 // The maximum distance from the stack pointer is at lower address of
885 // the object -- which is given by offset. For down growing stack
886 // the offset is negative, so we negate the offset to get the distance.
887 FixedOff
= -MFI
.getObjectOffset(i
);
889 // The maximum distance from the start pointer is at the upper
890 // address of the object.
891 FixedOff
= MFI
.getObjectOffset(i
) + MFI
.getObjectSize(i
);
893 if (FixedOff
> Offset
) Offset
= FixedOff
;
896 Align MaxAlign
= MFI
.getMaxAlign();
897 // First assign frame offsets to stack objects that are used to spill
898 // callee saved registers.
899 if (MaxCSFrameIndex
>= MinCSFrameIndex
) {
900 for (unsigned i
= 0; i
<= MaxCSFrameIndex
- MinCSFrameIndex
; ++i
) {
901 unsigned FrameIndex
=
902 StackGrowsDown
? MinCSFrameIndex
+ i
: MaxCSFrameIndex
- i
;
904 // Only allocate objects on the default stack.
905 if (MFI
.getStackID(FrameIndex
) != TargetStackID::Default
)
908 // TODO: should this just be if (MFI.isDeadObjectIndex(FrameIndex))
909 if (!StackGrowsDown
&& MFI
.isDeadObjectIndex(FrameIndex
))
912 AdjustStackOffset(MFI
, FrameIndex
, StackGrowsDown
, Offset
, MaxAlign
);
916 assert(MaxAlign
== MFI
.getMaxAlign() &&
917 "MFI.getMaxAlign should already account for all callee-saved "
918 "registers without a fixed stack slot");
920 // FixedCSEnd is the stack offset to the end of the fixed and callee-save
922 int64_t FixedCSEnd
= Offset
;
924 // Make sure the special register scavenging spill slot is closest to the
925 // incoming stack pointer if a frame pointer is required and is closer
926 // to the incoming rather than the final stack pointer.
927 const TargetRegisterInfo
*RegInfo
= MF
.getSubtarget().getRegisterInfo();
928 bool EarlyScavengingSlots
= TFI
.allocateScavengingFrameIndexesNearIncomingSP(MF
);
929 if (RS
&& EarlyScavengingSlots
) {
930 SmallVector
<int, 2> SFIs
;
931 RS
->getScavengingFrameIndices(SFIs
);
933 AdjustStackOffset(MFI
, SFI
, StackGrowsDown
, Offset
, MaxAlign
);
936 // FIXME: Once this is working, then enable flag will change to a target
937 // check for whether the frame is large enough to want to use virtual
938 // frame index registers. Functions which don't want/need this optimization
939 // will continue to use the existing code path.
940 if (MFI
.getUseLocalStackAllocationBlock()) {
941 Align Alignment
= MFI
.getLocalFrameMaxAlign();
943 // Adjust to alignment boundary.
944 Offset
= alignTo(Offset
, Alignment
);
946 LLVM_DEBUG(dbgs() << "Local frame base offset: " << Offset
<< "\n");
948 // Resolve offsets for objects in the local block.
949 for (unsigned i
= 0, e
= MFI
.getLocalFrameObjectCount(); i
!= e
; ++i
) {
950 std::pair
<int, int64_t> Entry
= MFI
.getLocalFrameObjectMap(i
);
951 int64_t FIOffset
= (StackGrowsDown
? -Offset
: Offset
) + Entry
.second
;
952 LLVM_DEBUG(dbgs() << "alloc FI(" << Entry
.first
<< ") at SP[" << FIOffset
954 MFI
.setObjectOffset(Entry
.first
, FIOffset
);
956 // Allocate the local block
957 Offset
+= MFI
.getLocalFrameSize();
959 MaxAlign
= std::max(Alignment
, MaxAlign
);
962 // Retrieve the Exception Handler registration node.
963 int EHRegNodeFrameIndex
= std::numeric_limits
<int>::max();
964 if (const WinEHFuncInfo
*FuncInfo
= MF
.getWinEHFuncInfo())
965 EHRegNodeFrameIndex
= FuncInfo
->EHRegNodeFrameIndex
;
967 // Make sure that the stack protector comes before the local variables on the
969 SmallSet
<int, 16> ProtectedObjs
;
970 if (MFI
.hasStackProtectorIndex()) {
971 int StackProtectorFI
= MFI
.getStackProtectorIndex();
972 StackObjSet LargeArrayObjs
;
973 StackObjSet SmallArrayObjs
;
974 StackObjSet AddrOfObjs
;
976 // If we need a stack protector, we need to make sure that
977 // LocalStackSlotPass didn't already allocate a slot for it.
978 // If we are told to use the LocalStackAllocationBlock, the stack protector
979 // is expected to be already pre-allocated.
980 if (MFI
.getStackID(StackProtectorFI
) != TargetStackID::Default
) {
981 // If the stack protector isn't on the default stack then it's up to the
982 // target to set the stack offset.
983 assert(MFI
.getObjectOffset(StackProtectorFI
) != 0 &&
984 "Offset of stack protector on non-default stack expected to be "
986 assert(!MFI
.isObjectPreAllocated(MFI
.getStackProtectorIndex()) &&
987 "Stack protector on non-default stack expected to not be "
988 "pre-allocated by LocalStackSlotPass.");
989 } else if (!MFI
.getUseLocalStackAllocationBlock()) {
990 AdjustStackOffset(MFI
, StackProtectorFI
, StackGrowsDown
, Offset
,
992 } else if (!MFI
.isObjectPreAllocated(MFI
.getStackProtectorIndex())) {
994 "Stack protector not pre-allocated by LocalStackSlotPass.");
997 // Assign large stack objects first.
998 for (unsigned i
= 0, e
= MFI
.getObjectIndexEnd(); i
!= e
; ++i
) {
999 if (MFI
.isObjectPreAllocated(i
) && MFI
.getUseLocalStackAllocationBlock())
1001 if (i
>= MinCSFrameIndex
&& i
<= MaxCSFrameIndex
)
1003 if (RS
&& RS
->isScavengingFrameIndex((int)i
))
1005 if (MFI
.isDeadObjectIndex(i
))
1007 if (StackProtectorFI
== (int)i
|| EHRegNodeFrameIndex
== (int)i
)
1009 // Only allocate objects on the default stack.
1010 if (MFI
.getStackID(i
) != TargetStackID::Default
)
1013 switch (MFI
.getObjectSSPLayout(i
)) {
1014 case MachineFrameInfo::SSPLK_None
:
1016 case MachineFrameInfo::SSPLK_SmallArray
:
1017 SmallArrayObjs
.insert(i
);
1019 case MachineFrameInfo::SSPLK_AddrOf
:
1020 AddrOfObjs
.insert(i
);
1022 case MachineFrameInfo::SSPLK_LargeArray
:
1023 LargeArrayObjs
.insert(i
);
1026 llvm_unreachable("Unexpected SSPLayoutKind.");
1029 // We expect **all** the protected stack objects to be pre-allocated by
1030 // LocalStackSlotPass. If it turns out that PEI still has to allocate some
1031 // of them, we may end up messing up the expected order of the objects.
1032 if (MFI
.getUseLocalStackAllocationBlock() &&
1033 !(LargeArrayObjs
.empty() && SmallArrayObjs
.empty() &&
1034 AddrOfObjs
.empty()))
1035 llvm_unreachable("Found protected stack objects not pre-allocated by "
1036 "LocalStackSlotPass.");
1038 AssignProtectedObjSet(LargeArrayObjs
, ProtectedObjs
, MFI
, StackGrowsDown
,
1040 AssignProtectedObjSet(SmallArrayObjs
, ProtectedObjs
, MFI
, StackGrowsDown
,
1042 AssignProtectedObjSet(AddrOfObjs
, ProtectedObjs
, MFI
, StackGrowsDown
,
1046 SmallVector
<int, 8> ObjectsToAllocate
;
1048 // Then prepare to assign frame offsets to stack objects that are not used to
1049 // spill callee saved registers.
1050 for (unsigned i
= 0, e
= MFI
.getObjectIndexEnd(); i
!= e
; ++i
) {
1051 if (MFI
.isObjectPreAllocated(i
) && MFI
.getUseLocalStackAllocationBlock())
1053 if (i
>= MinCSFrameIndex
&& i
<= MaxCSFrameIndex
)
1055 if (RS
&& RS
->isScavengingFrameIndex((int)i
))
1057 if (MFI
.isDeadObjectIndex(i
))
1059 if (MFI
.getStackProtectorIndex() == (int)i
|| EHRegNodeFrameIndex
== (int)i
)
1061 if (ProtectedObjs
.count(i
))
1063 // Only allocate objects on the default stack.
1064 if (MFI
.getStackID(i
) != TargetStackID::Default
)
1067 // Add the objects that we need to allocate to our working set.
1068 ObjectsToAllocate
.push_back(i
);
1071 // Allocate the EH registration node first if one is present.
1072 if (EHRegNodeFrameIndex
!= std::numeric_limits
<int>::max())
1073 AdjustStackOffset(MFI
, EHRegNodeFrameIndex
, StackGrowsDown
, Offset
,
1076 // Give the targets a chance to order the objects the way they like it.
1077 if (MF
.getTarget().getOptLevel() != CodeGenOptLevel::None
&&
1078 MF
.getTarget().Options
.StackSymbolOrdering
)
1079 TFI
.orderFrameObjects(MF
, ObjectsToAllocate
);
1081 // Keep track of which bytes in the fixed and callee-save range are used so we
1082 // can use the holes when allocating later stack objects. Only do this if
1083 // stack protector isn't being used and the target requests it and we're
1085 BitVector StackBytesFree
;
1086 if (!ObjectsToAllocate
.empty() &&
1087 MF
.getTarget().getOptLevel() != CodeGenOptLevel::None
&&
1088 MFI
.getStackProtectorIndex() < 0 && TFI
.enableStackSlotScavenging(MF
))
1089 computeFreeStackSlots(MFI
, StackGrowsDown
, MinCSFrameIndex
, MaxCSFrameIndex
,
1090 FixedCSEnd
, StackBytesFree
);
1092 // Now walk the objects and actually assign base offsets to them.
1093 for (auto &Object
: ObjectsToAllocate
)
1094 if (!scavengeStackSlot(MFI
, Object
, StackGrowsDown
, MaxAlign
,
1096 AdjustStackOffset(MFI
, Object
, StackGrowsDown
, Offset
, MaxAlign
);
1098 // Make sure the special register scavenging spill slot is closest to the
1100 if (RS
&& !EarlyScavengingSlots
) {
1101 SmallVector
<int, 2> SFIs
;
1102 RS
->getScavengingFrameIndices(SFIs
);
1103 for (int SFI
: SFIs
)
1104 AdjustStackOffset(MFI
, SFI
, StackGrowsDown
, Offset
, MaxAlign
);
1107 if (!TFI
.targetHandlesStackFrameRounding()) {
1108 // If we have reserved argument space for call sites in the function
1109 // immediately on entry to the current function, count it as part of the
1110 // overall stack size.
1111 if (MFI
.adjustsStack() && TFI
.hasReservedCallFrame(MF
))
1112 Offset
+= MFI
.getMaxCallFrameSize();
1114 // Round up the size to a multiple of the alignment. If the function has
1115 // any calls or alloca's, align to the target's StackAlignment value to
1116 // ensure that the callee's frame or the alloca data is suitably aligned;
1117 // otherwise, for leaf functions, align to the TransientStackAlignment
1120 if (MFI
.adjustsStack() || MFI
.hasVarSizedObjects() ||
1121 (RegInfo
->hasStackRealignment(MF
) && MFI
.getObjectIndexEnd() != 0))
1122 StackAlign
= TFI
.getStackAlign();
1124 StackAlign
= TFI
.getTransientStackAlign();
1126 // If the frame pointer is eliminated, all frame offsets will be relative to
1127 // SP not FP. Align to MaxAlign so this works.
1128 StackAlign
= std::max(StackAlign
, MaxAlign
);
1129 int64_t OffsetBeforeAlignment
= Offset
;
1130 Offset
= alignTo(Offset
, StackAlign
);
1132 // If we have increased the offset to fulfill the alignment constrants,
1133 // then the scavenging spill slots may become harder to reach from the
1134 // stack pointer, float them so they stay close.
1135 if (StackGrowsDown
&& OffsetBeforeAlignment
!= Offset
&& RS
&&
1136 !EarlyScavengingSlots
) {
1137 SmallVector
<int, 2> SFIs
;
1138 RS
->getScavengingFrameIndices(SFIs
);
1139 LLVM_DEBUG(if (!SFIs
.empty()) llvm::dbgs()
1140 << "Adjusting emergency spill slots!\n";);
1141 int64_t Delta
= Offset
- OffsetBeforeAlignment
;
1142 for (int SFI
: SFIs
) {
1143 LLVM_DEBUG(llvm::dbgs()
1144 << "Adjusting offset of emergency spill slot #" << SFI
1145 << " from " << MFI
.getObjectOffset(SFI
););
1146 MFI
.setObjectOffset(SFI
, MFI
.getObjectOffset(SFI
) - Delta
);
1147 LLVM_DEBUG(llvm::dbgs() << " to " << MFI
.getObjectOffset(SFI
) << "\n";);
1152 // Update frame info to pretend that this is part of the stack...
1153 int64_t StackSize
= Offset
- LocalAreaOffset
;
1154 MFI
.setStackSize(StackSize
);
1155 NumBytesStackSpace
+= StackSize
;
1158 /// insertPrologEpilogCode - Scan the function for modified callee saved
1159 /// registers, insert spill code for these callee saved registers, then add
1160 /// prolog and epilog code to the function.
1161 void PEI::insertPrologEpilogCode(MachineFunction
&MF
) {
1162 const TargetFrameLowering
&TFI
= *MF
.getSubtarget().getFrameLowering();
1164 // Add prologue to the function...
1165 for (MachineBasicBlock
*SaveBlock
: SaveBlocks
)
1166 TFI
.emitPrologue(MF
, *SaveBlock
);
1168 // Add epilogue to restore the callee-save registers in each exiting block.
1169 for (MachineBasicBlock
*RestoreBlock
: RestoreBlocks
)
1170 TFI
.emitEpilogue(MF
, *RestoreBlock
);
1172 // Zero call used registers before restoring callee-saved registers.
1173 insertZeroCallUsedRegs(MF
);
1175 for (MachineBasicBlock
*SaveBlock
: SaveBlocks
)
1176 TFI
.inlineStackProbe(MF
, *SaveBlock
);
1178 // Emit additional code that is required to support segmented stacks, if
1179 // we've been asked for it. This, when linked with a runtime with support
1180 // for segmented stacks (libgcc is one), will result in allocating stack
1181 // space in small chunks instead of one large contiguous block.
1182 if (MF
.shouldSplitStack()) {
1183 for (MachineBasicBlock
*SaveBlock
: SaveBlocks
)
1184 TFI
.adjustForSegmentedStacks(MF
, *SaveBlock
);
1187 // Emit additional code that is required to explicitly handle the stack in
1188 // HiPE native code (if needed) when loaded in the Erlang/OTP runtime. The
1189 // approach is rather similar to that of Segmented Stacks, but it uses a
1190 // different conditional check and another BIF for allocating more stack
1192 if (MF
.getFunction().getCallingConv() == CallingConv::HiPE
)
1193 for (MachineBasicBlock
*SaveBlock
: SaveBlocks
)
1194 TFI
.adjustForHiPEPrologue(MF
, *SaveBlock
);
1197 /// insertZeroCallUsedRegs - Zero out call used registers.
1198 void PEI::insertZeroCallUsedRegs(MachineFunction
&MF
) {
1199 const Function
&F
= MF
.getFunction();
1201 if (!F
.hasFnAttribute("zero-call-used-regs"))
1204 using namespace ZeroCallUsedRegs
;
1206 ZeroCallUsedRegsKind ZeroRegsKind
=
1207 StringSwitch
<ZeroCallUsedRegsKind
>(
1208 F
.getFnAttribute("zero-call-used-regs").getValueAsString())
1209 .Case("skip", ZeroCallUsedRegsKind::Skip
)
1210 .Case("used-gpr-arg", ZeroCallUsedRegsKind::UsedGPRArg
)
1211 .Case("used-gpr", ZeroCallUsedRegsKind::UsedGPR
)
1212 .Case("used-arg", ZeroCallUsedRegsKind::UsedArg
)
1213 .Case("used", ZeroCallUsedRegsKind::Used
)
1214 .Case("all-gpr-arg", ZeroCallUsedRegsKind::AllGPRArg
)
1215 .Case("all-gpr", ZeroCallUsedRegsKind::AllGPR
)
1216 .Case("all-arg", ZeroCallUsedRegsKind::AllArg
)
1217 .Case("all", ZeroCallUsedRegsKind::All
);
1219 if (ZeroRegsKind
== ZeroCallUsedRegsKind::Skip
)
1222 const bool OnlyGPR
= static_cast<unsigned>(ZeroRegsKind
) & ONLY_GPR
;
1223 const bool OnlyUsed
= static_cast<unsigned>(ZeroRegsKind
) & ONLY_USED
;
1224 const bool OnlyArg
= static_cast<unsigned>(ZeroRegsKind
) & ONLY_ARG
;
1226 const TargetRegisterInfo
&TRI
= *MF
.getSubtarget().getRegisterInfo();
1227 const BitVector
AllocatableSet(TRI
.getAllocatableSet(MF
));
1229 // Mark all used registers.
1230 BitVector
UsedRegs(TRI
.getNumRegs());
1232 for (const MachineBasicBlock
&MBB
: MF
)
1233 for (const MachineInstr
&MI
: MBB
) {
1234 // skip debug instructions
1235 if (MI
.isDebugInstr())
1238 for (const MachineOperand
&MO
: MI
.operands()) {
1242 MCRegister Reg
= MO
.getReg();
1243 if (AllocatableSet
[Reg
.id()] && !MO
.isImplicit() &&
1244 (MO
.isDef() || MO
.isUse()))
1245 UsedRegs
.set(Reg
.id());
1249 // Get a list of registers that are used.
1250 BitVector
LiveIns(TRI
.getNumRegs());
1251 for (const MachineBasicBlock::RegisterMaskPair
&LI
: MF
.front().liveins())
1252 LiveIns
.set(LI
.PhysReg
);
1254 BitVector
RegsToZero(TRI
.getNumRegs());
1255 for (MCRegister Reg
: AllocatableSet
.set_bits()) {
1256 // Skip over fixed registers.
1257 if (TRI
.isFixedRegister(MF
, Reg
))
1260 // Want only general purpose registers.
1261 if (OnlyGPR
&& !TRI
.isGeneralPurposeRegister(MF
, Reg
))
1264 // Want only used registers.
1265 if (OnlyUsed
&& !UsedRegs
[Reg
.id()])
1268 // Want only registers used for arguments.
1271 if (!LiveIns
[Reg
.id()])
1273 } else if (!TRI
.isArgumentRegister(MF
, Reg
)) {
1278 RegsToZero
.set(Reg
.id());
1281 // Don't clear registers that are live when leaving the function.
1282 for (const MachineBasicBlock
&MBB
: MF
)
1283 for (const MachineInstr
&MI
: MBB
.terminators()) {
1287 for (const auto &MO
: MI
.operands()) {
1291 MCRegister Reg
= MO
.getReg();
1295 // This picks up sibling registers (e.q. %al -> %ah).
1296 for (MCRegUnit Unit
: TRI
.regunits(Reg
))
1297 RegsToZero
.reset(Unit
);
1299 for (MCPhysReg SReg
: TRI
.sub_and_superregs_inclusive(Reg
))
1300 RegsToZero
.reset(SReg
);
1304 // Don't need to clear registers that are used/clobbered by terminating
1306 for (const MachineBasicBlock
&MBB
: MF
) {
1307 if (!MBB
.isReturnBlock())
1310 MachineBasicBlock::const_iterator MBBI
= MBB
.getFirstTerminator();
1311 for (MachineBasicBlock::const_iterator I
= MBBI
, E
= MBB
.end(); I
!= E
;
1313 for (const MachineOperand
&MO
: I
->operands()) {
1317 MCRegister Reg
= MO
.getReg();
1321 for (const MCPhysReg Reg
: TRI
.sub_and_superregs_inclusive(Reg
))
1322 RegsToZero
.reset(Reg
);
1327 // Don't clear registers that must be preserved.
1328 for (const MCPhysReg
*CSRegs
= TRI
.getCalleeSavedRegs(&MF
);
1329 MCPhysReg CSReg
= *CSRegs
; ++CSRegs
)
1330 for (MCRegister Reg
: TRI
.sub_and_superregs_inclusive(CSReg
))
1331 RegsToZero
.reset(Reg
.id());
1333 const TargetFrameLowering
&TFI
= *MF
.getSubtarget().getFrameLowering();
1334 for (MachineBasicBlock
&MBB
: MF
)
1335 if (MBB
.isReturnBlock())
1336 TFI
.emitZeroCallUsedRegs(RegsToZero
, MBB
);
1339 /// Replace all FrameIndex operands with physical register references and actual
1341 void PEI::replaceFrameIndicesBackward(MachineFunction
&MF
) {
1342 const TargetFrameLowering
&TFI
= *MF
.getSubtarget().getFrameLowering();
1344 for (auto &MBB
: MF
) {
1346 if (!MBB
.succ_empty()) {
1347 // Get the SP adjustment for the end of MBB from the start of any of its
1348 // successors. They should all be the same.
1349 assert(all_of(MBB
.successors(), [&MBB
](const MachineBasicBlock
*Succ
) {
1350 return Succ
->getCallFrameSize() ==
1351 (*MBB
.succ_begin())->getCallFrameSize();
1353 const MachineBasicBlock
&FirstSucc
= **MBB
.succ_begin();
1354 SPAdj
= TFI
.alignSPAdjust(FirstSucc
.getCallFrameSize());
1355 if (TFI
.getStackGrowthDirection() == TargetFrameLowering::StackGrowsUp
)
1359 replaceFrameIndicesBackward(&MBB
, MF
, SPAdj
);
1361 // We can't track the call frame size after call frame pseudos have been
1362 // eliminated. Set it to zero everywhere to keep MachineVerifier happy.
1363 MBB
.setCallFrameSize(0);
1367 /// replaceFrameIndices - Replace all MO_FrameIndex operands with physical
1368 /// register references and actual offsets.
1369 void PEI::replaceFrameIndices(MachineFunction
&MF
) {
1370 const TargetFrameLowering
&TFI
= *MF
.getSubtarget().getFrameLowering();
1372 for (auto &MBB
: MF
) {
1373 int SPAdj
= TFI
.alignSPAdjust(MBB
.getCallFrameSize());
1374 if (TFI
.getStackGrowthDirection() == TargetFrameLowering::StackGrowsUp
)
1377 replaceFrameIndices(&MBB
, MF
, SPAdj
);
1379 // We can't track the call frame size after call frame pseudos have been
1380 // eliminated. Set it to zero everywhere to keep MachineVerifier happy.
1381 MBB
.setCallFrameSize(0);
1385 bool PEI::replaceFrameIndexDebugInstr(MachineFunction
&MF
, MachineInstr
&MI
,
1386 unsigned OpIdx
, int SPAdj
) {
1387 const TargetFrameLowering
*TFI
= MF
.getSubtarget().getFrameLowering();
1388 const TargetRegisterInfo
&TRI
= *MF
.getSubtarget().getRegisterInfo();
1389 if (MI
.isDebugValue()) {
1391 MachineOperand
&Op
= MI
.getOperand(OpIdx
);
1392 assert(MI
.isDebugOperand(&Op
) &&
1393 "Frame indices can only appear as a debug operand in a DBG_VALUE*"
1394 " machine instruction");
1396 unsigned FrameIdx
= Op
.getIndex();
1397 unsigned Size
= MF
.getFrameInfo().getObjectSize(FrameIdx
);
1399 StackOffset Offset
= TFI
->getFrameIndexReference(MF
, FrameIdx
, Reg
);
1400 Op
.ChangeToRegister(Reg
, false /*isDef*/);
1402 const DIExpression
*DIExpr
= MI
.getDebugExpression();
1404 // If we have a direct DBG_VALUE, and its location expression isn't
1405 // currently complex, then adding an offset will morph it into a
1406 // complex location that is interpreted as being a memory address.
1407 // This changes a pointer-valued variable to dereference that pointer,
1408 // which is incorrect. Fix by adding DW_OP_stack_value.
1410 if (MI
.isNonListDebugValue()) {
1411 unsigned PrependFlags
= DIExpression::ApplyOffset
;
1412 if (!MI
.isIndirectDebugValue() && !DIExpr
->isComplex())
1413 PrependFlags
|= DIExpression::StackValue
;
1415 // If we have DBG_VALUE that is indirect and has a Implicit location
1416 // expression need to insert a deref before prepending a Memory
1417 // location expression. Also after doing this we change the DBG_VALUE
1419 if (MI
.isIndirectDebugValue() && DIExpr
->isImplicit()) {
1420 SmallVector
<uint64_t, 2> Ops
= {dwarf::DW_OP_deref_size
, Size
};
1421 bool WithStackValue
= true;
1422 DIExpr
= DIExpression::prependOpcodes(DIExpr
, Ops
, WithStackValue
);
1423 // Make the DBG_VALUE direct.
1424 MI
.getDebugOffset().ChangeToRegister(0, false);
1426 DIExpr
= TRI
.prependOffsetExpression(DIExpr
, PrependFlags
, Offset
);
1428 // The debug operand at DebugOpIndex was a frame index at offset
1429 // `Offset`; now the operand has been replaced with the frame
1430 // register, we must add Offset with `register x, plus Offset`.
1431 unsigned DebugOpIndex
= MI
.getDebugOperandIndex(&Op
);
1432 SmallVector
<uint64_t, 3> Ops
;
1433 TRI
.getOffsetOpcodes(Offset
, Ops
);
1434 DIExpr
= DIExpression::appendOpsToArg(DIExpr
, Ops
, DebugOpIndex
);
1436 MI
.getDebugExpressionOp().setMetadata(DIExpr
);
1440 if (MI
.isDebugPHI()) {
1441 // Allow stack ref to continue onwards.
1445 // TODO: This code should be commoned with the code for
1446 // PATCHPOINT. There's no good reason for the difference in
1447 // implementation other than historical accident. The only
1448 // remaining difference is the unconditional use of the stack
1449 // pointer as the base register.
1450 if (MI
.getOpcode() == TargetOpcode::STATEPOINT
) {
1451 assert((!MI
.isDebugValue() || OpIdx
== 0) &&
1452 "Frame indices can only appear as the first operand of a "
1453 "DBG_VALUE machine instruction");
1455 MachineOperand
&Offset
= MI
.getOperand(OpIdx
+ 1);
1456 StackOffset refOffset
= TFI
->getFrameIndexReferencePreferSP(
1457 MF
, MI
.getOperand(OpIdx
).getIndex(), Reg
, /*IgnoreSPUpdates*/ false);
1458 assert(!refOffset
.getScalable() &&
1459 "Frame offsets with a scalable component are not supported");
1460 Offset
.setImm(Offset
.getImm() + refOffset
.getFixed() + SPAdj
);
1461 MI
.getOperand(OpIdx
).ChangeToRegister(Reg
, false /*isDef*/);
1467 void PEI::replaceFrameIndicesBackward(MachineBasicBlock
*BB
,
1468 MachineFunction
&MF
, int &SPAdj
) {
1469 assert(MF
.getSubtarget().getRegisterInfo() &&
1470 "getRegisterInfo() must be implemented!");
1472 const TargetInstrInfo
&TII
= *MF
.getSubtarget().getInstrInfo();
1473 const TargetRegisterInfo
&TRI
= *MF
.getSubtarget().getRegisterInfo();
1474 const TargetFrameLowering
&TFI
= *MF
.getSubtarget().getFrameLowering();
1476 RegScavenger
*LocalRS
= FrameIndexEliminationScavenging
? RS
: nullptr;
1478 LocalRS
->enterBasicBlockEnd(*BB
);
1480 for (MachineBasicBlock::iterator I
= BB
->end(); I
!= BB
->begin();) {
1481 MachineInstr
&MI
= *std::prev(I
);
1483 if (TII
.isFrameInstr(MI
)) {
1484 SPAdj
-= TII
.getSPAdjust(MI
);
1485 TFI
.eliminateCallFramePseudoInstr(MF
, *BB
, &MI
);
1489 // Step backwards to get the liveness state at (immedately after) MI.
1491 LocalRS
->backward(I
);
1493 bool RemovedMI
= false;
1494 for (const auto &[Idx
, Op
] : enumerate(MI
.operands())) {
1498 if (replaceFrameIndexDebugInstr(MF
, MI
, Idx
, SPAdj
))
1501 // Eliminate this FrameIndex operand.
1502 RemovedMI
= TRI
.eliminateFrameIndex(MI
, SPAdj
, Idx
, LocalRS
);
1512 void PEI::replaceFrameIndices(MachineBasicBlock
*BB
, MachineFunction
&MF
,
1514 assert(MF
.getSubtarget().getRegisterInfo() &&
1515 "getRegisterInfo() must be implemented!");
1516 const TargetInstrInfo
&TII
= *MF
.getSubtarget().getInstrInfo();
1517 const TargetRegisterInfo
&TRI
= *MF
.getSubtarget().getRegisterInfo();
1518 const TargetFrameLowering
*TFI
= MF
.getSubtarget().getFrameLowering();
1520 bool InsideCallSequence
= false;
1522 for (MachineBasicBlock::iterator I
= BB
->begin(); I
!= BB
->end(); ) {
1523 if (TII
.isFrameInstr(*I
)) {
1524 InsideCallSequence
= TII
.isFrameSetup(*I
);
1525 SPAdj
+= TII
.getSPAdjust(*I
);
1526 I
= TFI
->eliminateCallFramePseudoInstr(MF
, *BB
, I
);
1530 MachineInstr
&MI
= *I
;
1532 bool DidFinishLoop
= true;
1533 for (unsigned i
= 0, e
= MI
.getNumOperands(); i
!= e
; ++i
) {
1534 if (!MI
.getOperand(i
).isFI())
1537 if (replaceFrameIndexDebugInstr(MF
, MI
, i
, SPAdj
))
1540 // Some instructions (e.g. inline asm instructions) can have
1541 // multiple frame indices and/or cause eliminateFrameIndex
1542 // to insert more than one instruction. We need the register
1543 // scavenger to go through all of these instructions so that
1544 // it can update its register information. We keep the
1545 // iterator at the point before insertion so that we can
1546 // revisit them in full.
1547 bool AtBeginning
= (I
== BB
->begin());
1548 if (!AtBeginning
) --I
;
1550 // If this instruction has a FrameIndex operand, we need to
1551 // use that target machine register info object to eliminate
1553 TRI
.eliminateFrameIndex(MI
, SPAdj
, i
);
1555 // Reset the iterator if we were at the beginning of the BB.
1561 DidFinishLoop
= false;
1565 // If we are looking at a call sequence, we need to keep track of
1566 // the SP adjustment made by each instruction in the sequence.
1567 // This includes both the frame setup/destroy pseudos (handled above),
1568 // as well as other instructions that have side effects w.r.t the SP.
1569 // Note that this must come after eliminateFrameIndex, because
1570 // if I itself referred to a frame index, we shouldn't count its own
1572 if (DidFinishLoop
&& InsideCallSequence
)
1573 SPAdj
+= TII
.getSPAdjust(MI
);
1575 if (DoIncr
&& I
!= BB
->end())