1 //===- PrologEpilogInserter.cpp - Insert Prolog/Epilog code in function ---===//
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
7 //===----------------------------------------------------------------------===//
9 // This pass is responsible for finalizing the functions frame layout, saving
10 // callee saved registers, and for emitting prolog & epilog code for the
13 // This pass must be run after register allocation. After this pass is
14 // executed, it is illegal to construct MO_FrameIndex operands.
16 //===----------------------------------------------------------------------===//
18 #include "llvm/ADT/ArrayRef.h"
19 #include "llvm/ADT/BitVector.h"
20 #include "llvm/ADT/DepthFirstIterator.h"
21 #include "llvm/ADT/STLExtras.h"
22 #include "llvm/ADT/SetVector.h"
23 #include "llvm/ADT/SmallPtrSet.h"
24 #include "llvm/ADT/SmallSet.h"
25 #include "llvm/ADT/SmallVector.h"
26 #include "llvm/ADT/Statistic.h"
27 #include "llvm/Analysis/OptimizationRemarkEmitter.h"
28 #include "llvm/CodeGen/MachineBasicBlock.h"
29 #include "llvm/CodeGen/MachineDominators.h"
30 #include "llvm/CodeGen/MachineFrameInfo.h"
31 #include "llvm/CodeGen/MachineFunction.h"
32 #include "llvm/CodeGen/MachineFunctionPass.h"
33 #include "llvm/CodeGen/MachineInstr.h"
34 #include "llvm/CodeGen/MachineInstrBuilder.h"
35 #include "llvm/CodeGen/MachineLoopInfo.h"
36 #include "llvm/CodeGen/MachineModuleInfo.h"
37 #include "llvm/CodeGen/MachineOperand.h"
38 #include "llvm/CodeGen/MachineOptimizationRemarkEmitter.h"
39 #include "llvm/CodeGen/MachineRegisterInfo.h"
40 #include "llvm/CodeGen/RegisterScavenging.h"
41 #include "llvm/CodeGen/TargetFrameLowering.h"
42 #include "llvm/CodeGen/TargetInstrInfo.h"
43 #include "llvm/CodeGen/TargetOpcodes.h"
44 #include "llvm/CodeGen/TargetRegisterInfo.h"
45 #include "llvm/CodeGen/TargetSubtargetInfo.h"
46 #include "llvm/CodeGen/WinEHFuncInfo.h"
47 #include "llvm/IR/Attributes.h"
48 #include "llvm/IR/CallingConv.h"
49 #include "llvm/IR/DebugInfoMetadata.h"
50 #include "llvm/IR/DiagnosticInfo.h"
51 #include "llvm/IR/Function.h"
52 #include "llvm/IR/InlineAsm.h"
53 #include "llvm/IR/LLVMContext.h"
54 #include "llvm/InitializePasses.h"
55 #include "llvm/MC/MCRegisterInfo.h"
56 #include "llvm/Pass.h"
57 #include "llvm/Support/CodeGen.h"
58 #include "llvm/Support/CommandLine.h"
59 #include "llvm/Support/Debug.h"
60 #include "llvm/Support/ErrorHandling.h"
61 #include "llvm/Support/MathExtras.h"
62 #include "llvm/Support/raw_ostream.h"
63 #include "llvm/Target/TargetMachine.h"
64 #include "llvm/Target/TargetOptions.h"
75 #define DEBUG_TYPE "prologepilog"
77 using MBBVector
= SmallVector
<MachineBasicBlock
*, 4>;
79 STATISTIC(NumLeafFuncWithSpills
, "Number of leaf functions with CSRs");
80 STATISTIC(NumFuncSeen
, "Number of functions seen in PEI");
85 class PEI
: public MachineFunctionPass
{
89 PEI() : MachineFunctionPass(ID
) {
90 initializePEIPass(*PassRegistry::getPassRegistry());
93 void getAnalysisUsage(AnalysisUsage
&AU
) const override
;
95 /// runOnMachineFunction - Insert prolog/epilog code and replace abstract
96 /// frame indexes with appropriate references.
97 bool runOnMachineFunction(MachineFunction
&MF
) override
;
102 // MinCSFrameIndex, MaxCSFrameIndex - Keeps the range of callee saved
103 // stack frame indexes.
104 unsigned MinCSFrameIndex
= std::numeric_limits
<unsigned>::max();
105 unsigned MaxCSFrameIndex
= 0;
107 // Save and Restore blocks of the current function. Typically there is a
108 // single save block, unless Windows EH funclets are involved.
109 MBBVector SaveBlocks
;
110 MBBVector RestoreBlocks
;
112 // Flag to control whether to use the register scavenger to resolve
113 // frame index materialization registers. Set according to
114 // TRI->requiresFrameIndexScavenging() for the current function.
115 bool FrameIndexVirtualScavenging
;
117 // Flag to control whether the scavenger should be passed even though
118 // FrameIndexVirtualScavenging is used.
119 bool FrameIndexEliminationScavenging
;
122 MachineOptimizationRemarkEmitter
*ORE
= nullptr;
124 void calculateCallFrameInfo(MachineFunction
&MF
);
125 void calculateSaveRestoreBlocks(MachineFunction
&MF
);
126 void spillCalleeSavedRegs(MachineFunction
&MF
);
128 void calculateFrameObjectOffsets(MachineFunction
&MF
);
129 void replaceFrameIndices(MachineFunction
&MF
);
130 void replaceFrameIndices(MachineBasicBlock
*BB
, MachineFunction
&MF
,
132 void insertPrologEpilogCode(MachineFunction
&MF
);
135 } // end anonymous namespace
139 char &llvm::PrologEpilogCodeInserterID
= PEI::ID
;
141 INITIALIZE_PASS_BEGIN(PEI
, DEBUG_TYPE
, "Prologue/Epilogue Insertion", false,
143 INITIALIZE_PASS_DEPENDENCY(MachineLoopInfo
)
144 INITIALIZE_PASS_DEPENDENCY(MachineDominatorTree
)
145 INITIALIZE_PASS_DEPENDENCY(MachineOptimizationRemarkEmitterPass
)
146 INITIALIZE_PASS_END(PEI
, DEBUG_TYPE
,
147 "Prologue/Epilogue Insertion & Frame Finalization", false,
150 MachineFunctionPass
*llvm::createPrologEpilogInserterPass() {
154 STATISTIC(NumBytesStackSpace
,
155 "Number of bytes used for stack in all functions");
157 void PEI::getAnalysisUsage(AnalysisUsage
&AU
) const {
158 AU
.setPreservesCFG();
159 AU
.addPreserved
<MachineLoopInfo
>();
160 AU
.addPreserved
<MachineDominatorTree
>();
161 AU
.addRequired
<MachineOptimizationRemarkEmitterPass
>();
162 MachineFunctionPass::getAnalysisUsage(AU
);
165 /// StackObjSet - A set of stack object indexes
166 using StackObjSet
= SmallSetVector
<int, 8>;
168 using SavedDbgValuesMap
=
169 SmallDenseMap
<MachineBasicBlock
*, SmallVector
<MachineInstr
*, 4>, 4>;
171 /// Stash DBG_VALUEs that describe parameters and which are placed at the start
172 /// of the block. Later on, after the prologue code has been emitted, the
173 /// stashed DBG_VALUEs will be reinserted at the start of the block.
174 static void stashEntryDbgValues(MachineBasicBlock
&MBB
,
175 SavedDbgValuesMap
&EntryDbgValues
) {
176 SmallVector
<const MachineInstr
*, 4> FrameIndexValues
;
178 for (auto &MI
: MBB
) {
179 if (!MI
.isDebugInstr())
181 if (!MI
.isDebugValue() || !MI
.getDebugVariable()->isParameter())
183 if (any_of(MI
.debug_operands(),
184 [](const MachineOperand
&MO
) { return MO
.isFI(); })) {
185 // We can only emit valid locations for frame indices after the frame
186 // setup, so do not stash away them.
187 FrameIndexValues
.push_back(&MI
);
190 const DILocalVariable
*Var
= MI
.getDebugVariable();
191 const DIExpression
*Expr
= MI
.getDebugExpression();
192 auto Overlaps
= [Var
, Expr
](const MachineInstr
*DV
) {
193 return Var
== DV
->getDebugVariable() &&
194 Expr
->fragmentsOverlap(DV
->getDebugExpression());
196 // See if the debug value overlaps with any preceding debug value that will
197 // not be stashed. If that is the case, then we can't stash this value, as
198 // we would then reorder the values at reinsertion.
199 if (llvm::none_of(FrameIndexValues
, Overlaps
))
200 EntryDbgValues
[&MBB
].push_back(&MI
);
203 // Remove stashed debug values from the block.
204 if (EntryDbgValues
.count(&MBB
))
205 for (auto *MI
: EntryDbgValues
[&MBB
])
206 MI
->removeFromParent();
209 /// runOnMachineFunction - Insert prolog/epilog code and replace abstract
210 /// frame indexes with appropriate references.
211 bool PEI::runOnMachineFunction(MachineFunction
&MF
) {
213 const Function
&F
= MF
.getFunction();
214 const TargetRegisterInfo
*TRI
= MF
.getSubtarget().getRegisterInfo();
215 const TargetFrameLowering
*TFI
= MF
.getSubtarget().getFrameLowering();
217 RS
= TRI
->requiresRegisterScavenging(MF
) ? new RegScavenger() : nullptr;
218 FrameIndexVirtualScavenging
= TRI
->requiresFrameIndexScavenging(MF
);
219 ORE
= &getAnalysis
<MachineOptimizationRemarkEmitterPass
>().getORE();
221 // Calculate the MaxCallFrameSize and AdjustsStack variables for the
222 // function's frame information. Also eliminates call frame pseudo
224 calculateCallFrameInfo(MF
);
226 // Determine placement of CSR spill/restore code and prolog/epilog code:
227 // place all spills in the entry block, all restores in return blocks.
228 calculateSaveRestoreBlocks(MF
);
230 // Stash away DBG_VALUEs that should not be moved by insertion of prolog code.
231 SavedDbgValuesMap EntryDbgValues
;
232 for (MachineBasicBlock
*SaveBlock
: SaveBlocks
)
233 stashEntryDbgValues(*SaveBlock
, EntryDbgValues
);
235 // Handle CSR spilling and restoring, for targets that need it.
236 if (MF
.getTarget().usesPhysRegsForValues())
237 spillCalleeSavedRegs(MF
);
239 // Allow the target machine to make final modifications to the function
240 // before the frame layout is finalized.
241 TFI
->processFunctionBeforeFrameFinalized(MF
, RS
);
243 // Calculate actual frame offsets for all abstract stack objects...
244 calculateFrameObjectOffsets(MF
);
246 // Add prolog and epilog code to the function. This function is required
247 // to align the stack frame as necessary for any stack variables or
248 // called functions. Because of this, calculateCalleeSavedRegisters()
249 // must be called before this function in order to set the AdjustsStack
250 // and MaxCallFrameSize variables.
251 if (!F
.hasFnAttribute(Attribute::Naked
))
252 insertPrologEpilogCode(MF
);
254 // Reinsert stashed debug values at the start of the entry blocks.
255 for (auto &I
: EntryDbgValues
)
256 I
.first
->insert(I
.first
->begin(), I
.second
.begin(), I
.second
.end());
258 // Allow the target machine to make final modifications to the function
259 // before the frame layout is finalized.
260 TFI
->processFunctionBeforeFrameIndicesReplaced(MF
, RS
);
262 // Replace all MO_FrameIndex operands with physical register references
263 // and actual offsets.
265 replaceFrameIndices(MF
);
267 // If register scavenging is needed, as we've enabled doing it as a
268 // post-pass, scavenge the virtual registers that frame index elimination
270 if (TRI
->requiresRegisterScavenging(MF
) && FrameIndexVirtualScavenging
)
271 scavengeFrameVirtualRegs(MF
, *RS
);
273 // Warn on stack size when we exceeds the given limit.
274 MachineFrameInfo
&MFI
= MF
.getFrameInfo();
275 uint64_t StackSize
= MFI
.getStackSize();
277 unsigned Threshold
= UINT_MAX
;
278 if (MF
.getFunction().hasFnAttribute("warn-stack-size")) {
279 bool Failed
= MF
.getFunction()
280 .getFnAttribute("warn-stack-size")
282 .getAsInteger(10, Threshold
);
283 // Verifier should have caught this.
284 assert(!Failed
&& "Invalid warn-stack-size fn attr value");
287 if (StackSize
> Threshold
) {
288 DiagnosticInfoStackSize
DiagStackSize(F
, StackSize
, Threshold
, DS_Warning
);
289 F
.getContext().diagnose(DiagStackSize
);
292 return MachineOptimizationRemarkAnalysis(DEBUG_TYPE
, "StackSize",
293 MF
.getFunction().getSubprogram(),
295 << ore::NV("NumStackBytes", StackSize
) << " stack bytes in function";
300 RestoreBlocks
.clear();
301 MFI
.setSavePoint(nullptr);
302 MFI
.setRestorePoint(nullptr);
306 /// Calculate the MaxCallFrameSize and AdjustsStack
307 /// variables for the function's frame information and eliminate call frame
308 /// pseudo instructions.
309 void PEI::calculateCallFrameInfo(MachineFunction
&MF
) {
310 const TargetInstrInfo
&TII
= *MF
.getSubtarget().getInstrInfo();
311 const TargetFrameLowering
*TFI
= MF
.getSubtarget().getFrameLowering();
312 MachineFrameInfo
&MFI
= MF
.getFrameInfo();
314 unsigned MaxCallFrameSize
= 0;
315 bool AdjustsStack
= MFI
.adjustsStack();
317 // Get the function call frame set-up and tear-down instruction opcode
318 unsigned FrameSetupOpcode
= TII
.getCallFrameSetupOpcode();
319 unsigned FrameDestroyOpcode
= TII
.getCallFrameDestroyOpcode();
321 // Early exit for targets which have no call frame setup/destroy pseudo
323 if (FrameSetupOpcode
== ~0u && FrameDestroyOpcode
== ~0u)
326 std::vector
<MachineBasicBlock::iterator
> FrameSDOps
;
327 for (MachineBasicBlock
&BB
: MF
)
328 for (MachineBasicBlock::iterator I
= BB
.begin(); I
!= BB
.end(); ++I
)
329 if (TII
.isFrameInstr(*I
)) {
330 unsigned Size
= TII
.getFrameSize(*I
);
331 if (Size
> MaxCallFrameSize
) MaxCallFrameSize
= Size
;
333 FrameSDOps
.push_back(I
);
334 } else if (I
->isInlineAsm()) {
335 // Some inline asm's need a stack frame, as indicated by operand 1.
336 unsigned ExtraInfo
= I
->getOperand(InlineAsm::MIOp_ExtraInfo
).getImm();
337 if (ExtraInfo
& InlineAsm::Extra_IsAlignStack
)
341 assert(!MFI
.isMaxCallFrameSizeComputed() ||
342 (MFI
.getMaxCallFrameSize() == MaxCallFrameSize
&&
343 MFI
.adjustsStack() == AdjustsStack
));
344 MFI
.setAdjustsStack(AdjustsStack
);
345 MFI
.setMaxCallFrameSize(MaxCallFrameSize
);
347 for (MachineBasicBlock::iterator I
: FrameSDOps
) {
348 // If call frames are not being included as part of the stack frame, and
349 // the target doesn't indicate otherwise, remove the call frame pseudos
350 // here. The sub/add sp instruction pairs are still inserted, but we don't
351 // need to track the SP adjustment for frame index elimination.
352 if (TFI
->canSimplifyCallFramePseudos(MF
))
353 TFI
->eliminateCallFramePseudoInstr(MF
, *I
->getParent(), I
);
357 /// Compute the sets of entry and return blocks for saving and restoring
358 /// callee-saved registers, and placing prolog and epilog code.
359 void PEI::calculateSaveRestoreBlocks(MachineFunction
&MF
) {
360 const MachineFrameInfo
&MFI
= MF
.getFrameInfo();
362 // Even when we do not change any CSR, we still want to insert the
363 // prologue and epilogue of the function.
364 // So set the save points for those.
366 // Use the points found by shrink-wrapping, if any.
367 if (MFI
.getSavePoint()) {
368 SaveBlocks
.push_back(MFI
.getSavePoint());
369 assert(MFI
.getRestorePoint() && "Both restore and save must be set");
370 MachineBasicBlock
*RestoreBlock
= MFI
.getRestorePoint();
371 // If RestoreBlock does not have any successor and is not a return block
372 // then the end point is unreachable and we do not need to insert any
374 if (!RestoreBlock
->succ_empty() || RestoreBlock
->isReturnBlock())
375 RestoreBlocks
.push_back(RestoreBlock
);
379 // Save refs to entry and return blocks.
380 SaveBlocks
.push_back(&MF
.front());
381 for (MachineBasicBlock
&MBB
: MF
) {
382 if (MBB
.isEHFuncletEntry())
383 SaveBlocks
.push_back(&MBB
);
384 if (MBB
.isReturnBlock())
385 RestoreBlocks
.push_back(&MBB
);
389 static void assignCalleeSavedSpillSlots(MachineFunction
&F
,
390 const BitVector
&SavedRegs
,
391 unsigned &MinCSFrameIndex
,
392 unsigned &MaxCSFrameIndex
) {
393 if (SavedRegs
.empty())
396 const TargetRegisterInfo
*RegInfo
= F
.getSubtarget().getRegisterInfo();
397 const MCPhysReg
*CSRegs
= F
.getRegInfo().getCalleeSavedRegs();
398 BitVector
CSMask(SavedRegs
.size());
400 for (unsigned i
= 0; CSRegs
[i
]; ++i
)
401 CSMask
.set(CSRegs
[i
]);
403 std::vector
<CalleeSavedInfo
> CSI
;
404 for (unsigned i
= 0; CSRegs
[i
]; ++i
) {
405 unsigned Reg
= CSRegs
[i
];
406 if (SavedRegs
.test(Reg
)) {
407 bool SavedSuper
= false;
408 for (const MCPhysReg
&SuperReg
: RegInfo
->superregs(Reg
)) {
409 // Some backends set all aliases for some registers as saved, such as
410 // Mips's $fp, so they appear in SavedRegs but not CSRegs.
411 if (SavedRegs
.test(SuperReg
) && CSMask
.test(SuperReg
)) {
418 CSI
.push_back(CalleeSavedInfo(Reg
));
422 const TargetFrameLowering
*TFI
= F
.getSubtarget().getFrameLowering();
423 MachineFrameInfo
&MFI
= F
.getFrameInfo();
424 if (!TFI
->assignCalleeSavedSpillSlots(F
, RegInfo
, CSI
, MinCSFrameIndex
,
426 // If target doesn't implement this, use generic code.
429 return; // Early exit if no callee saved registers are modified!
431 unsigned NumFixedSpillSlots
;
432 const TargetFrameLowering::SpillSlot
*FixedSpillSlots
=
433 TFI
->getCalleeSavedSpillSlots(NumFixedSpillSlots
);
435 // Now that we know which registers need to be saved and restored, allocate
436 // stack slots for them.
437 for (auto &CS
: CSI
) {
438 // If the target has spilled this register to another register, we don't
439 // need to allocate a stack slot.
440 if (CS
.isSpilledToReg())
443 unsigned Reg
= CS
.getReg();
444 const TargetRegisterClass
*RC
= RegInfo
->getMinimalPhysRegClass(Reg
);
447 if (RegInfo
->hasReservedSpillSlot(F
, Reg
, FrameIdx
)) {
448 CS
.setFrameIdx(FrameIdx
);
452 // Check to see if this physreg must be spilled to a particular stack slot
454 const TargetFrameLowering::SpillSlot
*FixedSlot
= FixedSpillSlots
;
455 while (FixedSlot
!= FixedSpillSlots
+ NumFixedSpillSlots
&&
456 FixedSlot
->Reg
!= Reg
)
459 unsigned Size
= RegInfo
->getSpillSize(*RC
);
460 if (FixedSlot
== FixedSpillSlots
+ NumFixedSpillSlots
) {
461 // Nope, just spill it anywhere convenient.
462 Align Alignment
= RegInfo
->getSpillAlign(*RC
);
463 // We may not be able to satisfy the desired alignment specification of
464 // the TargetRegisterClass if the stack alignment is smaller. Use the
466 Alignment
= std::min(Alignment
, TFI
->getStackAlign());
467 FrameIdx
= MFI
.CreateStackObject(Size
, Alignment
, true);
468 if ((unsigned)FrameIdx
< MinCSFrameIndex
) MinCSFrameIndex
= FrameIdx
;
469 if ((unsigned)FrameIdx
> MaxCSFrameIndex
) MaxCSFrameIndex
= FrameIdx
;
471 // Spill it to the stack where we must.
472 FrameIdx
= MFI
.CreateFixedSpillStackObject(Size
, FixedSlot
->Offset
);
475 CS
.setFrameIdx(FrameIdx
);
479 MFI
.setCalleeSavedInfo(CSI
);
482 /// Helper function to update the liveness information for the callee-saved
484 static void updateLiveness(MachineFunction
&MF
) {
485 MachineFrameInfo
&MFI
= MF
.getFrameInfo();
486 // Visited will contain all the basic blocks that are in the region
487 // where the callee saved registers are alive:
488 // - Anything that is not Save or Restore -> LiveThrough.
490 // - Restore -> LiveOut.
491 // The live-out is not attached to the block, so no need to keep
492 // Restore in this set.
493 SmallPtrSet
<MachineBasicBlock
*, 8> Visited
;
494 SmallVector
<MachineBasicBlock
*, 8> WorkList
;
495 MachineBasicBlock
*Entry
= &MF
.front();
496 MachineBasicBlock
*Save
= MFI
.getSavePoint();
502 WorkList
.push_back(Entry
);
503 Visited
.insert(Entry
);
505 Visited
.insert(Save
);
507 MachineBasicBlock
*Restore
= MFI
.getRestorePoint();
509 // By construction Restore cannot be visited, otherwise it
510 // means there exists a path to Restore that does not go
512 WorkList
.push_back(Restore
);
514 while (!WorkList
.empty()) {
515 const MachineBasicBlock
*CurBB
= WorkList
.pop_back_val();
516 // By construction, the region that is after the save point is
517 // dominated by the Save and post-dominated by the Restore.
518 if (CurBB
== Save
&& Save
!= Restore
)
520 // Enqueue all the successors not already visited.
521 // Those are by construction either before Save or after Restore.
522 for (MachineBasicBlock
*SuccBB
: CurBB
->successors())
523 if (Visited
.insert(SuccBB
).second
)
524 WorkList
.push_back(SuccBB
);
527 const std::vector
<CalleeSavedInfo
> &CSI
= MFI
.getCalleeSavedInfo();
529 MachineRegisterInfo
&MRI
= MF
.getRegInfo();
530 for (const CalleeSavedInfo
&I
: CSI
) {
531 for (MachineBasicBlock
*MBB
: Visited
) {
532 MCPhysReg Reg
= I
.getReg();
533 // Add the callee-saved register as live-in.
534 // It's killed at the spill.
535 if (!MRI
.isReserved(Reg
) && !MBB
->isLiveIn(Reg
))
538 // If callee-saved register is spilled to another register rather than
539 // spilling to stack, the destination register has to be marked as live for
540 // each MBB between the prologue and epilogue so that it is not clobbered
541 // before it is reloaded in the epilogue. The Visited set contains all
542 // blocks outside of the region delimited by prologue/epilogue.
543 if (I
.isSpilledToReg()) {
544 for (MachineBasicBlock
&MBB
: MF
) {
545 if (Visited
.count(&MBB
))
547 MCPhysReg DstReg
= I
.getDstReg();
548 if (!MBB
.isLiveIn(DstReg
))
549 MBB
.addLiveIn(DstReg
);
555 /// Insert restore code for the callee-saved registers used in the function.
556 static void insertCSRSaves(MachineBasicBlock
&SaveBlock
,
557 ArrayRef
<CalleeSavedInfo
> CSI
) {
558 MachineFunction
&MF
= *SaveBlock
.getParent();
559 const TargetInstrInfo
&TII
= *MF
.getSubtarget().getInstrInfo();
560 const TargetFrameLowering
*TFI
= MF
.getSubtarget().getFrameLowering();
561 const TargetRegisterInfo
*TRI
= MF
.getSubtarget().getRegisterInfo();
563 MachineBasicBlock::iterator I
= SaveBlock
.begin();
564 if (!TFI
->spillCalleeSavedRegisters(SaveBlock
, I
, CSI
, TRI
)) {
565 for (const CalleeSavedInfo
&CS
: CSI
) {
566 // Insert the spill to the stack frame.
567 unsigned Reg
= CS
.getReg();
569 if (CS
.isSpilledToReg()) {
570 BuildMI(SaveBlock
, I
, DebugLoc(),
571 TII
.get(TargetOpcode::COPY
), CS
.getDstReg())
572 .addReg(Reg
, getKillRegState(true));
574 const TargetRegisterClass
*RC
= TRI
->getMinimalPhysRegClass(Reg
);
575 TII
.storeRegToStackSlot(SaveBlock
, I
, Reg
, true, CS
.getFrameIdx(), RC
,
582 /// Insert restore code for the callee-saved registers used in the function.
583 static void insertCSRRestores(MachineBasicBlock
&RestoreBlock
,
584 std::vector
<CalleeSavedInfo
> &CSI
) {
585 MachineFunction
&MF
= *RestoreBlock
.getParent();
586 const TargetInstrInfo
&TII
= *MF
.getSubtarget().getInstrInfo();
587 const TargetFrameLowering
*TFI
= MF
.getSubtarget().getFrameLowering();
588 const TargetRegisterInfo
*TRI
= MF
.getSubtarget().getRegisterInfo();
590 // Restore all registers immediately before the return and any
591 // terminators that precede it.
592 MachineBasicBlock::iterator I
= RestoreBlock
.getFirstTerminator();
594 if (!TFI
->restoreCalleeSavedRegisters(RestoreBlock
, I
, CSI
, TRI
)) {
595 for (const CalleeSavedInfo
&CI
: reverse(CSI
)) {
596 unsigned Reg
= CI
.getReg();
597 if (CI
.isSpilledToReg()) {
598 BuildMI(RestoreBlock
, I
, DebugLoc(), TII
.get(TargetOpcode::COPY
), Reg
)
599 .addReg(CI
.getDstReg(), getKillRegState(true));
601 const TargetRegisterClass
*RC
= TRI
->getMinimalPhysRegClass(Reg
);
602 TII
.loadRegFromStackSlot(RestoreBlock
, I
, Reg
, CI
.getFrameIdx(), RC
, TRI
);
603 assert(I
!= RestoreBlock
.begin() &&
604 "loadRegFromStackSlot didn't insert any code!");
605 // Insert in reverse order. loadRegFromStackSlot can insert
606 // multiple instructions.
612 void PEI::spillCalleeSavedRegs(MachineFunction
&MF
) {
613 // We can't list this requirement in getRequiredProperties because some
614 // targets (WebAssembly) use virtual registers past this point, and the pass
615 // pipeline is set up without giving the passes a chance to look at the
617 // FIXME: Find a way to express this in getRequiredProperties.
618 assert(MF
.getProperties().hasProperty(
619 MachineFunctionProperties::Property::NoVRegs
));
621 const Function
&F
= MF
.getFunction();
622 const TargetFrameLowering
*TFI
= MF
.getSubtarget().getFrameLowering();
623 MachineFrameInfo
&MFI
= MF
.getFrameInfo();
624 MinCSFrameIndex
= std::numeric_limits
<unsigned>::max();
627 // Determine which of the registers in the callee save list should be saved.
629 TFI
->determineCalleeSaves(MF
, SavedRegs
, RS
);
631 // Assign stack slots for any callee-saved registers that must be spilled.
632 assignCalleeSavedSpillSlots(MF
, SavedRegs
, MinCSFrameIndex
, MaxCSFrameIndex
);
634 // Add the code to save and restore the callee saved registers.
635 if (!F
.hasFnAttribute(Attribute::Naked
)) {
636 MFI
.setCalleeSavedInfoValid(true);
638 std::vector
<CalleeSavedInfo
> &CSI
= MFI
.getCalleeSavedInfo();
641 NumLeafFuncWithSpills
++;
643 for (MachineBasicBlock
*SaveBlock
: SaveBlocks
)
644 insertCSRSaves(*SaveBlock
, CSI
);
646 // Update the live-in information of all the blocks up to the save point.
649 for (MachineBasicBlock
*RestoreBlock
: RestoreBlocks
)
650 insertCSRRestores(*RestoreBlock
, CSI
);
655 /// AdjustStackOffset - Helper function used to adjust the stack frame offset.
656 static inline void AdjustStackOffset(MachineFrameInfo
&MFI
, int FrameIdx
,
657 bool StackGrowsDown
, int64_t &Offset
,
658 Align
&MaxAlign
, unsigned Skew
) {
659 // If the stack grows down, add the object size to find the lowest address.
661 Offset
+= MFI
.getObjectSize(FrameIdx
);
663 Align Alignment
= MFI
.getObjectAlign(FrameIdx
);
665 // If the alignment of this object is greater than that of the stack, then
666 // increase the stack alignment to match.
667 MaxAlign
= std::max(MaxAlign
, Alignment
);
669 // Adjust to alignment boundary.
670 Offset
= alignTo(Offset
, Alignment
, Skew
);
672 if (StackGrowsDown
) {
673 LLVM_DEBUG(dbgs() << "alloc FI(" << FrameIdx
<< ") at SP[" << -Offset
675 MFI
.setObjectOffset(FrameIdx
, -Offset
); // Set the computed offset
677 LLVM_DEBUG(dbgs() << "alloc FI(" << FrameIdx
<< ") at SP[" << Offset
679 MFI
.setObjectOffset(FrameIdx
, Offset
);
680 Offset
+= MFI
.getObjectSize(FrameIdx
);
684 /// Compute which bytes of fixed and callee-save stack area are unused and keep
685 /// track of them in StackBytesFree.
687 computeFreeStackSlots(MachineFrameInfo
&MFI
, bool StackGrowsDown
,
688 unsigned MinCSFrameIndex
, unsigned MaxCSFrameIndex
,
689 int64_t FixedCSEnd
, BitVector
&StackBytesFree
) {
690 // Avoid undefined int64_t -> int conversion below in extreme case.
691 if (FixedCSEnd
> std::numeric_limits
<int>::max())
694 StackBytesFree
.resize(FixedCSEnd
, true);
696 SmallVector
<int, 16> AllocatedFrameSlots
;
697 // Add fixed objects.
698 for (int i
= MFI
.getObjectIndexBegin(); i
!= 0; ++i
)
699 // StackSlot scavenging is only implemented for the default stack.
700 if (MFI
.getStackID(i
) == TargetStackID::Default
)
701 AllocatedFrameSlots
.push_back(i
);
702 // Add callee-save objects if there are any.
703 if (MinCSFrameIndex
<= MaxCSFrameIndex
) {
704 for (int i
= MinCSFrameIndex
; i
<= (int)MaxCSFrameIndex
; ++i
)
705 if (MFI
.getStackID(i
) == TargetStackID::Default
)
706 AllocatedFrameSlots
.push_back(i
);
709 for (int i
: AllocatedFrameSlots
) {
710 // These are converted from int64_t, but they should always fit in int
711 // because of the FixedCSEnd check above.
712 int ObjOffset
= MFI
.getObjectOffset(i
);
713 int ObjSize
= MFI
.getObjectSize(i
);
714 int ObjStart
, ObjEnd
;
715 if (StackGrowsDown
) {
716 // ObjOffset is negative when StackGrowsDown is true.
717 ObjStart
= -ObjOffset
- ObjSize
;
720 ObjStart
= ObjOffset
;
721 ObjEnd
= ObjOffset
+ ObjSize
;
723 // Ignore fixed holes that are in the previous stack frame.
725 StackBytesFree
.reset(ObjStart
, ObjEnd
);
729 /// Assign frame object to an unused portion of the stack in the fixed stack
730 /// object range. Return true if the allocation was successful.
731 static inline bool scavengeStackSlot(MachineFrameInfo
&MFI
, int FrameIdx
,
732 bool StackGrowsDown
, Align MaxAlign
,
733 BitVector
&StackBytesFree
) {
734 if (MFI
.isVariableSizedObjectIndex(FrameIdx
))
737 if (StackBytesFree
.none()) {
738 // clear it to speed up later scavengeStackSlot calls to
739 // StackBytesFree.none()
740 StackBytesFree
.clear();
744 Align ObjAlign
= MFI
.getObjectAlign(FrameIdx
);
745 if (ObjAlign
> MaxAlign
)
748 int64_t ObjSize
= MFI
.getObjectSize(FrameIdx
);
750 for (FreeStart
= StackBytesFree
.find_first(); FreeStart
!= -1;
751 FreeStart
= StackBytesFree
.find_next(FreeStart
)) {
753 // Check that free space has suitable alignment.
754 unsigned ObjStart
= StackGrowsDown
? FreeStart
+ ObjSize
: FreeStart
;
755 if (alignTo(ObjStart
, ObjAlign
) != ObjStart
)
758 if (FreeStart
+ ObjSize
> StackBytesFree
.size())
761 bool AllBytesFree
= true;
762 for (unsigned Byte
= 0; Byte
< ObjSize
; ++Byte
)
763 if (!StackBytesFree
.test(FreeStart
+ Byte
)) {
764 AllBytesFree
= false;
774 if (StackGrowsDown
) {
775 int ObjStart
= -(FreeStart
+ ObjSize
);
776 LLVM_DEBUG(dbgs() << "alloc FI(" << FrameIdx
<< ") scavenged at SP["
777 << ObjStart
<< "]\n");
778 MFI
.setObjectOffset(FrameIdx
, ObjStart
);
780 LLVM_DEBUG(dbgs() << "alloc FI(" << FrameIdx
<< ") scavenged at SP["
781 << FreeStart
<< "]\n");
782 MFI
.setObjectOffset(FrameIdx
, FreeStart
);
785 StackBytesFree
.reset(FreeStart
, FreeStart
+ ObjSize
);
789 /// AssignProtectedObjSet - Helper function to assign large stack objects (i.e.,
790 /// those required to be close to the Stack Protector) to stack offsets.
791 static void AssignProtectedObjSet(const StackObjSet
&UnassignedObjs
,
792 SmallSet
<int, 16> &ProtectedObjs
,
793 MachineFrameInfo
&MFI
, bool StackGrowsDown
,
794 int64_t &Offset
, Align
&MaxAlign
,
797 for (int i
: UnassignedObjs
) {
798 AdjustStackOffset(MFI
, i
, StackGrowsDown
, Offset
, MaxAlign
, Skew
);
799 ProtectedObjs
.insert(i
);
803 /// calculateFrameObjectOffsets - Calculate actual frame offsets for all of the
804 /// abstract stack objects.
805 void PEI::calculateFrameObjectOffsets(MachineFunction
&MF
) {
806 const TargetFrameLowering
&TFI
= *MF
.getSubtarget().getFrameLowering();
808 bool StackGrowsDown
=
809 TFI
.getStackGrowthDirection() == TargetFrameLowering::StackGrowsDown
;
811 // Loop over all of the stack objects, assigning sequential addresses...
812 MachineFrameInfo
&MFI
= MF
.getFrameInfo();
814 // Start at the beginning of the local area.
815 // The Offset is the distance from the stack top in the direction
816 // of stack growth -- so it's always nonnegative.
817 int LocalAreaOffset
= TFI
.getOffsetOfLocalArea();
819 LocalAreaOffset
= -LocalAreaOffset
;
820 assert(LocalAreaOffset
>= 0
821 && "Local area offset should be in direction of stack growth");
822 int64_t Offset
= LocalAreaOffset
;
824 // Skew to be applied to alignment.
825 unsigned Skew
= TFI
.getStackAlignmentSkew(MF
);
827 #ifdef EXPENSIVE_CHECKS
828 for (unsigned i
= 0, e
= MFI
.getObjectIndexEnd(); i
!= e
; ++i
)
829 if (!MFI
.isDeadObjectIndex(i
) &&
830 MFI
.getStackID(i
) == TargetStackID::Default
)
831 assert(MFI
.getObjectAlign(i
) <= MFI
.getMaxAlign() &&
832 "MaxAlignment is invalid");
835 // If there are fixed sized objects that are preallocated in the local area,
836 // non-fixed objects can't be allocated right at the start of local area.
837 // Adjust 'Offset' to point to the end of last fixed sized preallocated
839 for (int i
= MFI
.getObjectIndexBegin(); i
!= 0; ++i
) {
840 if (MFI
.getStackID(i
) !=
841 TargetStackID::Default
) // Only allocate objects on the default stack.
845 if (StackGrowsDown
) {
846 // The maximum distance from the stack pointer is at lower address of
847 // the object -- which is given by offset. For down growing stack
848 // the offset is negative, so we negate the offset to get the distance.
849 FixedOff
= -MFI
.getObjectOffset(i
);
851 // The maximum distance from the start pointer is at the upper
852 // address of the object.
853 FixedOff
= MFI
.getObjectOffset(i
) + MFI
.getObjectSize(i
);
855 if (FixedOff
> Offset
) Offset
= FixedOff
;
858 // First assign frame offsets to stack objects that are used to spill
859 // callee saved registers.
860 if (StackGrowsDown
&& MaxCSFrameIndex
>= MinCSFrameIndex
) {
861 for (unsigned i
= MinCSFrameIndex
; i
<= MaxCSFrameIndex
; ++i
) {
862 if (MFI
.getStackID(i
) !=
863 TargetStackID::Default
) // Only allocate objects on the default stack.
866 // If the stack grows down, we need to add the size to find the lowest
867 // address of the object.
868 Offset
+= MFI
.getObjectSize(i
);
870 // Adjust to alignment boundary
871 Offset
= alignTo(Offset
, MFI
.getObjectAlign(i
), Skew
);
873 LLVM_DEBUG(dbgs() << "alloc FI(" << i
<< ") at SP[" << -Offset
<< "]\n");
874 MFI
.setObjectOffset(i
, -Offset
); // Set the computed offset
876 } else if (MaxCSFrameIndex
>= MinCSFrameIndex
) {
877 // Be careful about underflow in comparisons agains MinCSFrameIndex.
878 for (unsigned i
= MaxCSFrameIndex
; i
!= MinCSFrameIndex
- 1; --i
) {
879 if (MFI
.getStackID(i
) !=
880 TargetStackID::Default
) // Only allocate objects on the default stack.
883 if (MFI
.isDeadObjectIndex(i
))
886 // Adjust to alignment boundary
887 Offset
= alignTo(Offset
, MFI
.getObjectAlign(i
), Skew
);
889 LLVM_DEBUG(dbgs() << "alloc FI(" << i
<< ") at SP[" << Offset
<< "]\n");
890 MFI
.setObjectOffset(i
, Offset
);
891 Offset
+= MFI
.getObjectSize(i
);
895 // FixedCSEnd is the stack offset to the end of the fixed and callee-save
897 int64_t FixedCSEnd
= Offset
;
898 Align MaxAlign
= MFI
.getMaxAlign();
900 // Make sure the special register scavenging spill slot is closest to the
901 // incoming stack pointer if a frame pointer is required and is closer
902 // to the incoming rather than the final stack pointer.
903 const TargetRegisterInfo
*RegInfo
= MF
.getSubtarget().getRegisterInfo();
904 bool EarlyScavengingSlots
= TFI
.allocateScavengingFrameIndexesNearIncomingSP(MF
);
905 if (RS
&& EarlyScavengingSlots
) {
906 SmallVector
<int, 2> SFIs
;
907 RS
->getScavengingFrameIndices(SFIs
);
909 AdjustStackOffset(MFI
, SFI
, StackGrowsDown
, Offset
, MaxAlign
, Skew
);
912 // FIXME: Once this is working, then enable flag will change to a target
913 // check for whether the frame is large enough to want to use virtual
914 // frame index registers. Functions which don't want/need this optimization
915 // will continue to use the existing code path.
916 if (MFI
.getUseLocalStackAllocationBlock()) {
917 Align Alignment
= MFI
.getLocalFrameMaxAlign();
919 // Adjust to alignment boundary.
920 Offset
= alignTo(Offset
, Alignment
, Skew
);
922 LLVM_DEBUG(dbgs() << "Local frame base offset: " << Offset
<< "\n");
924 // Resolve offsets for objects in the local block.
925 for (unsigned i
= 0, e
= MFI
.getLocalFrameObjectCount(); i
!= e
; ++i
) {
926 std::pair
<int, int64_t> Entry
= MFI
.getLocalFrameObjectMap(i
);
927 int64_t FIOffset
= (StackGrowsDown
? -Offset
: Offset
) + Entry
.second
;
928 LLVM_DEBUG(dbgs() << "alloc FI(" << Entry
.first
<< ") at SP[" << FIOffset
930 MFI
.setObjectOffset(Entry
.first
, FIOffset
);
932 // Allocate the local block
933 Offset
+= MFI
.getLocalFrameSize();
935 MaxAlign
= std::max(Alignment
, MaxAlign
);
938 // Retrieve the Exception Handler registration node.
939 int EHRegNodeFrameIndex
= std::numeric_limits
<int>::max();
940 if (const WinEHFuncInfo
*FuncInfo
= MF
.getWinEHFuncInfo())
941 EHRegNodeFrameIndex
= FuncInfo
->EHRegNodeFrameIndex
;
943 // Make sure that the stack protector comes before the local variables on the
945 SmallSet
<int, 16> ProtectedObjs
;
946 if (MFI
.hasStackProtectorIndex()) {
947 int StackProtectorFI
= MFI
.getStackProtectorIndex();
948 StackObjSet LargeArrayObjs
;
949 StackObjSet SmallArrayObjs
;
950 StackObjSet AddrOfObjs
;
952 // If we need a stack protector, we need to make sure that
953 // LocalStackSlotPass didn't already allocate a slot for it.
954 // If we are told to use the LocalStackAllocationBlock, the stack protector
955 // is expected to be already pre-allocated.
956 if (MFI
.getStackID(StackProtectorFI
) != TargetStackID::Default
) {
957 // If the stack protector isn't on the default stack then it's up to the
958 // target to set the stack offset.
959 assert(MFI
.getObjectOffset(StackProtectorFI
) != 0 &&
960 "Offset of stack protector on non-default stack expected to be "
962 assert(!MFI
.isObjectPreAllocated(MFI
.getStackProtectorIndex()) &&
963 "Stack protector on non-default stack expected to not be "
964 "pre-allocated by LocalStackSlotPass.");
965 } else if (!MFI
.getUseLocalStackAllocationBlock()) {
966 AdjustStackOffset(MFI
, StackProtectorFI
, StackGrowsDown
, Offset
, MaxAlign
,
968 } else if (!MFI
.isObjectPreAllocated(MFI
.getStackProtectorIndex())) {
970 "Stack protector not pre-allocated by LocalStackSlotPass.");
973 // Assign large stack objects first.
974 for (unsigned i
= 0, e
= MFI
.getObjectIndexEnd(); i
!= e
; ++i
) {
975 if (MFI
.isObjectPreAllocated(i
) && MFI
.getUseLocalStackAllocationBlock())
977 if (i
>= MinCSFrameIndex
&& i
<= MaxCSFrameIndex
)
979 if (RS
&& RS
->isScavengingFrameIndex((int)i
))
981 if (MFI
.isDeadObjectIndex(i
))
983 if (StackProtectorFI
== (int)i
|| EHRegNodeFrameIndex
== (int)i
)
985 if (MFI
.getStackID(i
) !=
986 TargetStackID::Default
) // Only allocate objects on the default stack.
989 switch (MFI
.getObjectSSPLayout(i
)) {
990 case MachineFrameInfo::SSPLK_None
:
992 case MachineFrameInfo::SSPLK_SmallArray
:
993 SmallArrayObjs
.insert(i
);
995 case MachineFrameInfo::SSPLK_AddrOf
:
996 AddrOfObjs
.insert(i
);
998 case MachineFrameInfo::SSPLK_LargeArray
:
999 LargeArrayObjs
.insert(i
);
1002 llvm_unreachable("Unexpected SSPLayoutKind.");
1005 // We expect **all** the protected stack objects to be pre-allocated by
1006 // LocalStackSlotPass. If it turns out that PEI still has to allocate some
1007 // of them, we may end up messing up the expected order of the objects.
1008 if (MFI
.getUseLocalStackAllocationBlock() &&
1009 !(LargeArrayObjs
.empty() && SmallArrayObjs
.empty() &&
1010 AddrOfObjs
.empty()))
1011 llvm_unreachable("Found protected stack objects not pre-allocated by "
1012 "LocalStackSlotPass.");
1014 AssignProtectedObjSet(LargeArrayObjs
, ProtectedObjs
, MFI
, StackGrowsDown
,
1015 Offset
, MaxAlign
, Skew
);
1016 AssignProtectedObjSet(SmallArrayObjs
, ProtectedObjs
, MFI
, StackGrowsDown
,
1017 Offset
, MaxAlign
, Skew
);
1018 AssignProtectedObjSet(AddrOfObjs
, ProtectedObjs
, MFI
, StackGrowsDown
,
1019 Offset
, MaxAlign
, Skew
);
1022 SmallVector
<int, 8> ObjectsToAllocate
;
1024 // Then prepare to assign frame offsets to stack objects that are not used to
1025 // spill callee saved registers.
1026 for (unsigned i
= 0, e
= MFI
.getObjectIndexEnd(); i
!= e
; ++i
) {
1027 if (MFI
.isObjectPreAllocated(i
) && MFI
.getUseLocalStackAllocationBlock())
1029 if (i
>= MinCSFrameIndex
&& i
<= MaxCSFrameIndex
)
1031 if (RS
&& RS
->isScavengingFrameIndex((int)i
))
1033 if (MFI
.isDeadObjectIndex(i
))
1035 if (MFI
.getStackProtectorIndex() == (int)i
|| EHRegNodeFrameIndex
== (int)i
)
1037 if (ProtectedObjs
.count(i
))
1039 if (MFI
.getStackID(i
) !=
1040 TargetStackID::Default
) // Only allocate objects on the default stack.
1043 // Add the objects that we need to allocate to our working set.
1044 ObjectsToAllocate
.push_back(i
);
1047 // Allocate the EH registration node first if one is present.
1048 if (EHRegNodeFrameIndex
!= std::numeric_limits
<int>::max())
1049 AdjustStackOffset(MFI
, EHRegNodeFrameIndex
, StackGrowsDown
, Offset
,
1052 // Give the targets a chance to order the objects the way they like it.
1053 if (MF
.getTarget().getOptLevel() != CodeGenOpt::None
&&
1054 MF
.getTarget().Options
.StackSymbolOrdering
)
1055 TFI
.orderFrameObjects(MF
, ObjectsToAllocate
);
1057 // Keep track of which bytes in the fixed and callee-save range are used so we
1058 // can use the holes when allocating later stack objects. Only do this if
1059 // stack protector isn't being used and the target requests it and we're
1061 BitVector StackBytesFree
;
1062 if (!ObjectsToAllocate
.empty() &&
1063 MF
.getTarget().getOptLevel() != CodeGenOpt::None
&&
1064 MFI
.getStackProtectorIndex() < 0 && TFI
.enableStackSlotScavenging(MF
))
1065 computeFreeStackSlots(MFI
, StackGrowsDown
, MinCSFrameIndex
, MaxCSFrameIndex
,
1066 FixedCSEnd
, StackBytesFree
);
1068 // Now walk the objects and actually assign base offsets to them.
1069 for (auto &Object
: ObjectsToAllocate
)
1070 if (!scavengeStackSlot(MFI
, Object
, StackGrowsDown
, MaxAlign
,
1072 AdjustStackOffset(MFI
, Object
, StackGrowsDown
, Offset
, MaxAlign
, Skew
);
1074 // Make sure the special register scavenging spill slot is closest to the
1076 if (RS
&& !EarlyScavengingSlots
) {
1077 SmallVector
<int, 2> SFIs
;
1078 RS
->getScavengingFrameIndices(SFIs
);
1079 for (int SFI
: SFIs
)
1080 AdjustStackOffset(MFI
, SFI
, StackGrowsDown
, Offset
, MaxAlign
, Skew
);
1083 if (!TFI
.targetHandlesStackFrameRounding()) {
1084 // If we have reserved argument space for call sites in the function
1085 // immediately on entry to the current function, count it as part of the
1086 // overall stack size.
1087 if (MFI
.adjustsStack() && TFI
.hasReservedCallFrame(MF
))
1088 Offset
+= MFI
.getMaxCallFrameSize();
1090 // Round up the size to a multiple of the alignment. If the function has
1091 // any calls or alloca's, align to the target's StackAlignment value to
1092 // ensure that the callee's frame or the alloca data is suitably aligned;
1093 // otherwise, for leaf functions, align to the TransientStackAlignment
1096 if (MFI
.adjustsStack() || MFI
.hasVarSizedObjects() ||
1097 (RegInfo
->hasStackRealignment(MF
) && MFI
.getObjectIndexEnd() != 0))
1098 StackAlign
= TFI
.getStackAlign();
1100 StackAlign
= TFI
.getTransientStackAlign();
1102 // If the frame pointer is eliminated, all frame offsets will be relative to
1103 // SP not FP. Align to MaxAlign so this works.
1104 StackAlign
= std::max(StackAlign
, MaxAlign
);
1105 int64_t OffsetBeforeAlignment
= Offset
;
1106 Offset
= alignTo(Offset
, StackAlign
, Skew
);
1108 // If we have increased the offset to fulfill the alignment constrants,
1109 // then the scavenging spill slots may become harder to reach from the
1110 // stack pointer, float them so they stay close.
1111 if (StackGrowsDown
&& OffsetBeforeAlignment
!= Offset
&& RS
&&
1112 !EarlyScavengingSlots
) {
1113 SmallVector
<int, 2> SFIs
;
1114 RS
->getScavengingFrameIndices(SFIs
);
1115 LLVM_DEBUG(if (!SFIs
.empty()) llvm::dbgs()
1116 << "Adjusting emergency spill slots!\n";);
1117 int64_t Delta
= Offset
- OffsetBeforeAlignment
;
1118 for (int SFI
: SFIs
) {
1119 LLVM_DEBUG(llvm::dbgs()
1120 << "Adjusting offset of emergency spill slot #" << SFI
1121 << " from " << MFI
.getObjectOffset(SFI
););
1122 MFI
.setObjectOffset(SFI
, MFI
.getObjectOffset(SFI
) - Delta
);
1123 LLVM_DEBUG(llvm::dbgs() << " to " << MFI
.getObjectOffset(SFI
) << "\n";);
1128 // Update frame info to pretend that this is part of the stack...
1129 int64_t StackSize
= Offset
- LocalAreaOffset
;
1130 MFI
.setStackSize(StackSize
);
1131 NumBytesStackSpace
+= StackSize
;
1134 /// insertPrologEpilogCode - Scan the function for modified callee saved
1135 /// registers, insert spill code for these callee saved registers, then add
1136 /// prolog and epilog code to the function.
1137 void PEI::insertPrologEpilogCode(MachineFunction
&MF
) {
1138 const TargetFrameLowering
&TFI
= *MF
.getSubtarget().getFrameLowering();
1140 // Add prologue to the function...
1141 for (MachineBasicBlock
*SaveBlock
: SaveBlocks
)
1142 TFI
.emitPrologue(MF
, *SaveBlock
);
1144 // Add epilogue to restore the callee-save registers in each exiting block.
1145 for (MachineBasicBlock
*RestoreBlock
: RestoreBlocks
)
1146 TFI
.emitEpilogue(MF
, *RestoreBlock
);
1148 for (MachineBasicBlock
*SaveBlock
: SaveBlocks
)
1149 TFI
.inlineStackProbe(MF
, *SaveBlock
);
1151 // Emit additional code that is required to support segmented stacks, if
1152 // we've been asked for it. This, when linked with a runtime with support
1153 // for segmented stacks (libgcc is one), will result in allocating stack
1154 // space in small chunks instead of one large contiguous block.
1155 if (MF
.shouldSplitStack()) {
1156 for (MachineBasicBlock
*SaveBlock
: SaveBlocks
)
1157 TFI
.adjustForSegmentedStacks(MF
, *SaveBlock
);
1158 // Record that there are split-stack functions, so we will emit a
1159 // special section to tell the linker.
1160 MF
.getMMI().setHasSplitStack(true);
1162 MF
.getMMI().setHasNosplitStack(true);
1164 // Emit additional code that is required to explicitly handle the stack in
1165 // HiPE native code (if needed) when loaded in the Erlang/OTP runtime. The
1166 // approach is rather similar to that of Segmented Stacks, but it uses a
1167 // different conditional check and another BIF for allocating more stack
1169 if (MF
.getFunction().getCallingConv() == CallingConv::HiPE
)
1170 for (MachineBasicBlock
*SaveBlock
: SaveBlocks
)
1171 TFI
.adjustForHiPEPrologue(MF
, *SaveBlock
);
1174 /// replaceFrameIndices - Replace all MO_FrameIndex operands with physical
1175 /// register references and actual offsets.
1176 void PEI::replaceFrameIndices(MachineFunction
&MF
) {
1177 const auto &ST
= MF
.getSubtarget();
1178 const TargetFrameLowering
&TFI
= *ST
.getFrameLowering();
1179 if (!TFI
.needsFrameIndexResolution(MF
))
1182 const TargetRegisterInfo
*TRI
= ST
.getRegisterInfo();
1184 // Allow the target to determine this after knowing the frame size.
1185 FrameIndexEliminationScavenging
= (RS
&& !FrameIndexVirtualScavenging
) ||
1186 TRI
->requiresFrameIndexReplacementScavenging(MF
);
1188 // Store SPAdj at exit of a basic block.
1189 SmallVector
<int, 8> SPState
;
1190 SPState
.resize(MF
.getNumBlockIDs());
1191 df_iterator_default_set
<MachineBasicBlock
*> Reachable
;
1193 // Iterate over the reachable blocks in DFS order.
1194 for (auto DFI
= df_ext_begin(&MF
, Reachable
), DFE
= df_ext_end(&MF
, Reachable
);
1195 DFI
!= DFE
; ++DFI
) {
1197 // Check the exit state of the DFS stack predecessor.
1198 if (DFI
.getPathLength() >= 2) {
1199 MachineBasicBlock
*StackPred
= DFI
.getPath(DFI
.getPathLength() - 2);
1200 assert(Reachable
.count(StackPred
) &&
1201 "DFS stack predecessor is already visited.\n");
1202 SPAdj
= SPState
[StackPred
->getNumber()];
1204 MachineBasicBlock
*BB
= *DFI
;
1205 replaceFrameIndices(BB
, MF
, SPAdj
);
1206 SPState
[BB
->getNumber()] = SPAdj
;
1209 // Handle the unreachable blocks.
1210 for (auto &BB
: MF
) {
1211 if (Reachable
.count(&BB
))
1212 // Already handled in DFS traversal.
1215 replaceFrameIndices(&BB
, MF
, SPAdj
);
1219 void PEI::replaceFrameIndices(MachineBasicBlock
*BB
, MachineFunction
&MF
,
1221 assert(MF
.getSubtarget().getRegisterInfo() &&
1222 "getRegisterInfo() must be implemented!");
1223 const TargetInstrInfo
&TII
= *MF
.getSubtarget().getInstrInfo();
1224 const TargetRegisterInfo
&TRI
= *MF
.getSubtarget().getRegisterInfo();
1225 const TargetFrameLowering
*TFI
= MF
.getSubtarget().getFrameLowering();
1227 if (RS
&& FrameIndexEliminationScavenging
)
1228 RS
->enterBasicBlock(*BB
);
1230 bool InsideCallSequence
= false;
1232 for (MachineBasicBlock::iterator I
= BB
->begin(); I
!= BB
->end(); ) {
1233 if (TII
.isFrameInstr(*I
)) {
1234 InsideCallSequence
= TII
.isFrameSetup(*I
);
1235 SPAdj
+= TII
.getSPAdjust(*I
);
1236 I
= TFI
->eliminateCallFramePseudoInstr(MF
, *BB
, I
);
1240 MachineInstr
&MI
= *I
;
1242 bool DidFinishLoop
= true;
1243 for (unsigned i
= 0, e
= MI
.getNumOperands(); i
!= e
; ++i
) {
1244 if (!MI
.getOperand(i
).isFI())
1247 // Frame indices in debug values are encoded in a target independent
1248 // way with simply the frame index and offset rather than any
1249 // target-specific addressing mode.
1250 if (MI
.isDebugValue()) {
1251 MachineOperand
&Op
= MI
.getOperand(i
);
1253 MI
.isDebugOperand(&Op
) &&
1254 "Frame indices can only appear as a debug operand in a DBG_VALUE*"
1255 " machine instruction");
1257 unsigned FrameIdx
= Op
.getIndex();
1258 unsigned Size
= MF
.getFrameInfo().getObjectSize(FrameIdx
);
1260 StackOffset Offset
=
1261 TFI
->getFrameIndexReference(MF
, FrameIdx
, Reg
);
1262 Op
.ChangeToRegister(Reg
, false /*isDef*/);
1264 const DIExpression
*DIExpr
= MI
.getDebugExpression();
1266 // If we have a direct DBG_VALUE, and its location expression isn't
1267 // currently complex, then adding an offset will morph it into a
1268 // complex location that is interpreted as being a memory address.
1269 // This changes a pointer-valued variable to dereference that pointer,
1270 // which is incorrect. Fix by adding DW_OP_stack_value.
1272 if (MI
.isNonListDebugValue()) {
1273 unsigned PrependFlags
= DIExpression::ApplyOffset
;
1274 if (!MI
.isIndirectDebugValue() && !DIExpr
->isComplex())
1275 PrependFlags
|= DIExpression::StackValue
;
1277 // If we have DBG_VALUE that is indirect and has a Implicit location
1278 // expression need to insert a deref before prepending a Memory
1279 // location expression. Also after doing this we change the DBG_VALUE
1281 if (MI
.isIndirectDebugValue() && DIExpr
->isImplicit()) {
1282 SmallVector
<uint64_t, 2> Ops
= {dwarf::DW_OP_deref_size
, Size
};
1283 bool WithStackValue
= true;
1284 DIExpr
= DIExpression::prependOpcodes(DIExpr
, Ops
, WithStackValue
);
1285 // Make the DBG_VALUE direct.
1286 MI
.getDebugOffset().ChangeToRegister(0, false);
1288 DIExpr
= TRI
.prependOffsetExpression(DIExpr
, PrependFlags
, Offset
);
1290 // The debug operand at DebugOpIndex was a frame index at offset
1291 // `Offset`; now the operand has been replaced with the frame
1292 // register, we must add Offset with `register x, plus Offset`.
1293 unsigned DebugOpIndex
= MI
.getDebugOperandIndex(&Op
);
1294 SmallVector
<uint64_t, 3> Ops
;
1295 TRI
.getOffsetOpcodes(Offset
, Ops
);
1296 DIExpr
= DIExpression::appendOpsToArg(DIExpr
, Ops
, DebugOpIndex
);
1298 MI
.getDebugExpressionOp().setMetadata(DIExpr
);
1300 } else if (MI
.isDebugPHI()) {
1301 // Allow stack ref to continue onwards.
1305 // TODO: This code should be commoned with the code for
1306 // PATCHPOINT. There's no good reason for the difference in
1307 // implementation other than historical accident. The only
1308 // remaining difference is the unconditional use of the stack
1309 // pointer as the base register.
1310 if (MI
.getOpcode() == TargetOpcode::STATEPOINT
) {
1311 assert((!MI
.isDebugValue() || i
== 0) &&
1312 "Frame indicies can only appear as the first operand of a "
1313 "DBG_VALUE machine instruction");
1315 MachineOperand
&Offset
= MI
.getOperand(i
+ 1);
1316 StackOffset refOffset
= TFI
->getFrameIndexReferencePreferSP(
1317 MF
, MI
.getOperand(i
).getIndex(), Reg
, /*IgnoreSPUpdates*/ false);
1318 assert(!refOffset
.getScalable() &&
1319 "Frame offsets with a scalable component are not supported");
1320 Offset
.setImm(Offset
.getImm() + refOffset
.getFixed() + SPAdj
);
1321 MI
.getOperand(i
).ChangeToRegister(Reg
, false /*isDef*/);
1325 // Some instructions (e.g. inline asm instructions) can have
1326 // multiple frame indices and/or cause eliminateFrameIndex
1327 // to insert more than one instruction. We need the register
1328 // scavenger to go through all of these instructions so that
1329 // it can update its register information. We keep the
1330 // iterator at the point before insertion so that we can
1331 // revisit them in full.
1332 bool AtBeginning
= (I
== BB
->begin());
1333 if (!AtBeginning
) --I
;
1335 // If this instruction has a FrameIndex operand, we need to
1336 // use that target machine register info object to eliminate
1338 TRI
.eliminateFrameIndex(MI
, SPAdj
, i
,
1339 FrameIndexEliminationScavenging
? RS
: nullptr);
1341 // Reset the iterator if we were at the beginning of the BB.
1347 DidFinishLoop
= false;
1351 // If we are looking at a call sequence, we need to keep track of
1352 // the SP adjustment made by each instruction in the sequence.
1353 // This includes both the frame setup/destroy pseudos (handled above),
1354 // as well as other instructions that have side effects w.r.t the SP.
1355 // Note that this must come after eliminateFrameIndex, because
1356 // if I itself referred to a frame index, we shouldn't count its own
1358 if (DidFinishLoop
&& InsideCallSequence
)
1359 SPAdj
+= TII
.getSPAdjust(MI
);
1361 if (DoIncr
&& I
!= BB
->end()) ++I
;
1363 // Update register states.
1364 if (RS
&& FrameIndexEliminationScavenging
&& DidFinishLoop
)