[ARM] MVE integer min and max
[llvm-complete.git] / lib / Target / Hexagon / HexagonFrameLowering.cpp
blob3368ee4fb3b906bc5eab8fd7cbe5a0a6154081a6
1 //===- HexagonFrameLowering.cpp - Define frame lowering -------------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //
8 //===----------------------------------------------------------------------===//
10 #include "HexagonFrameLowering.h"
11 #include "HexagonBlockRanges.h"
12 #include "HexagonInstrInfo.h"
13 #include "HexagonMachineFunctionInfo.h"
14 #include "HexagonRegisterInfo.h"
15 #include "HexagonSubtarget.h"
16 #include "HexagonTargetMachine.h"
17 #include "MCTargetDesc/HexagonBaseInfo.h"
18 #include "llvm/ADT/BitVector.h"
19 #include "llvm/ADT/DenseMap.h"
20 #include "llvm/ADT/None.h"
21 #include "llvm/ADT/Optional.h"
22 #include "llvm/ADT/PostOrderIterator.h"
23 #include "llvm/ADT/SetVector.h"
24 #include "llvm/ADT/SmallSet.h"
25 #include "llvm/ADT/SmallVector.h"
26 #include "llvm/CodeGen/LivePhysRegs.h"
27 #include "llvm/CodeGen/MachineBasicBlock.h"
28 #include "llvm/CodeGen/MachineDominators.h"
29 #include "llvm/CodeGen/MachineFrameInfo.h"
30 #include "llvm/CodeGen/MachineFunction.h"
31 #include "llvm/CodeGen/MachineFunctionPass.h"
32 #include "llvm/CodeGen/MachineInstr.h"
33 #include "llvm/CodeGen/MachineInstrBuilder.h"
34 #include "llvm/CodeGen/MachineMemOperand.h"
35 #include "llvm/CodeGen/MachineModuleInfo.h"
36 #include "llvm/CodeGen/MachineOperand.h"
37 #include "llvm/CodeGen/MachinePostDominators.h"
38 #include "llvm/CodeGen/MachineRegisterInfo.h"
39 #include "llvm/CodeGen/RegisterScavenging.h"
40 #include "llvm/CodeGen/TargetRegisterInfo.h"
41 #include "llvm/IR/Attributes.h"
42 #include "llvm/IR/DebugLoc.h"
43 #include "llvm/IR/Function.h"
44 #include "llvm/MC/MCDwarf.h"
45 #include "llvm/MC/MCRegisterInfo.h"
46 #include "llvm/Pass.h"
47 #include "llvm/Support/CodeGen.h"
48 #include "llvm/Support/CommandLine.h"
49 #include "llvm/Support/Compiler.h"
50 #include "llvm/Support/Debug.h"
51 #include "llvm/Support/ErrorHandling.h"
52 #include "llvm/Support/MathExtras.h"
53 #include "llvm/Support/raw_ostream.h"
54 #include "llvm/Target/TargetMachine.h"
55 #include "llvm/Target/TargetOptions.h"
56 #include <algorithm>
57 #include <cassert>
58 #include <cstdint>
59 #include <iterator>
60 #include <limits>
61 #include <map>
62 #include <utility>
63 #include <vector>
65 #define DEBUG_TYPE "hexagon-pei"
67 // Hexagon stack frame layout as defined by the ABI:
69 // Incoming arguments
70 // passed via stack
71 // |
72 // |
73 // SP during function's FP during function's |
74 // +-- runtime (top of stack) runtime (bottom) --+ |
75 // | | |
76 // --++---------------------+------------------+-----------------++-+-------
77 // | parameter area for | variable-size | fixed-size |LR| arg
78 // | called functions | local objects | local objects |FP|
79 // --+----------------------+------------------+-----------------+--+-------
80 // <- size known -> <- size unknown -> <- size known ->
82 // Low address High address
84 // <--- stack growth
87 // - In any circumstances, the outgoing function arguments are always accessi-
88 // ble using the SP, and the incoming arguments are accessible using the FP.
89 // - If the local objects are not aligned, they can always be accessed using
90 // the FP.
91 // - If there are no variable-sized objects, the local objects can always be
92 // accessed using the SP, regardless whether they are aligned or not. (The
93 // alignment padding will be at the bottom of the stack (highest address),
94 // and so the offset with respect to the SP will be known at the compile-
95 // -time.)
97 // The only complication occurs if there are both, local aligned objects, and
98 // dynamically allocated (variable-sized) objects. The alignment pad will be
99 // placed between the FP and the local objects, thus preventing the use of the
100 // FP to access the local objects. At the same time, the variable-sized objects
101 // will be between the SP and the local objects, thus introducing an unknown
102 // distance from the SP to the locals.
104 // To avoid this problem, a new register is created that holds the aligned
105 // address of the bottom of the stack, referred in the sources as AP (aligned
106 // pointer). The AP will be equal to "FP-p", where "p" is the smallest pad
107 // that aligns AP to the required boundary (a maximum of the alignments of
108 // all stack objects, fixed- and variable-sized). All local objects[1] will
109 // then use AP as the base pointer.
110 // [1] The exception is with "fixed" stack objects. "Fixed" stack objects get
111 // their name from being allocated at fixed locations on the stack, relative
112 // to the FP. In the presence of dynamic allocation and local alignment, such
113 // objects can only be accessed through the FP.
115 // Illustration of the AP:
116 // FP --+
117 // |
118 // ---------------+---------------------+-----+-----------------------++-+--
119 // Rest of the | Local stack objects | Pad | Fixed stack objects |LR|
120 // stack frame | (aligned) | | (CSR, spills, etc.) |FP|
121 // ---------------+---------------------+-----+-----------------+-----+--+--
122 // |<-- Multiple of the -->|
123 // stack alignment +-- AP
125 // The AP is set up at the beginning of the function. Since it is not a dedi-
126 // cated (reserved) register, it needs to be kept live throughout the function
127 // to be available as the base register for local object accesses.
128 // Normally, an address of a stack objects is obtained by a pseudo-instruction
129 // PS_fi. To access local objects with the AP register present, a different
130 // pseudo-instruction needs to be used: PS_fia. The PS_fia takes one extra
131 // argument compared to PS_fi: the first input register is the AP register.
132 // This keeps the register live between its definition and its uses.
134 // The AP register is originally set up using pseudo-instruction PS_aligna:
135 // AP = PS_aligna A
136 // where
137 // A - required stack alignment
138 // The alignment value must be the maximum of all alignments required by
139 // any stack object.
141 // The dynamic allocation uses a pseudo-instruction PS_alloca:
142 // Rd = PS_alloca Rs, A
143 // where
144 // Rd - address of the allocated space
145 // Rs - minimum size (the actual allocated can be larger to accommodate
146 // alignment)
147 // A - required alignment
149 using namespace llvm;
151 static cl::opt<bool> DisableDeallocRet("disable-hexagon-dealloc-ret",
152 cl::Hidden, cl::desc("Disable Dealloc Return for Hexagon target"));
154 static cl::opt<unsigned> NumberScavengerSlots("number-scavenger-slots",
155 cl::Hidden, cl::desc("Set the number of scavenger slots"), cl::init(2),
156 cl::ZeroOrMore);
158 static cl::opt<int> SpillFuncThreshold("spill-func-threshold",
159 cl::Hidden, cl::desc("Specify O2(not Os) spill func threshold"),
160 cl::init(6), cl::ZeroOrMore);
162 static cl::opt<int> SpillFuncThresholdOs("spill-func-threshold-Os",
163 cl::Hidden, cl::desc("Specify Os spill func threshold"),
164 cl::init(1), cl::ZeroOrMore);
166 static cl::opt<bool> EnableStackOVFSanitizer("enable-stackovf-sanitizer",
167 cl::Hidden, cl::desc("Enable runtime checks for stack overflow."),
168 cl::init(false), cl::ZeroOrMore);
170 static cl::opt<bool> EnableShrinkWrapping("hexagon-shrink-frame",
171 cl::init(true), cl::Hidden, cl::ZeroOrMore,
172 cl::desc("Enable stack frame shrink wrapping"));
174 static cl::opt<unsigned> ShrinkLimit("shrink-frame-limit",
175 cl::init(std::numeric_limits<unsigned>::max()), cl::Hidden, cl::ZeroOrMore,
176 cl::desc("Max count of stack frame shrink-wraps"));
178 static cl::opt<bool> EnableSaveRestoreLong("enable-save-restore-long",
179 cl::Hidden, cl::desc("Enable long calls for save-restore stubs."),
180 cl::init(false), cl::ZeroOrMore);
182 static cl::opt<bool> EliminateFramePointer("hexagon-fp-elim", cl::init(true),
183 cl::Hidden, cl::desc("Refrain from using FP whenever possible"));
185 static cl::opt<bool> OptimizeSpillSlots("hexagon-opt-spill", cl::Hidden,
186 cl::init(true), cl::desc("Optimize spill slots"));
188 #ifndef NDEBUG
189 static cl::opt<unsigned> SpillOptMax("spill-opt-max", cl::Hidden,
190 cl::init(std::numeric_limits<unsigned>::max()));
191 static unsigned SpillOptCount = 0;
192 #endif
194 namespace llvm {
196 void initializeHexagonCallFrameInformationPass(PassRegistry&);
197 FunctionPass *createHexagonCallFrameInformation();
199 } // end namespace llvm
201 namespace {
203 class HexagonCallFrameInformation : public MachineFunctionPass {
204 public:
205 static char ID;
207 HexagonCallFrameInformation() : MachineFunctionPass(ID) {
208 PassRegistry &PR = *PassRegistry::getPassRegistry();
209 initializeHexagonCallFrameInformationPass(PR);
212 bool runOnMachineFunction(MachineFunction &MF) override;
214 MachineFunctionProperties getRequiredProperties() const override {
215 return MachineFunctionProperties().set(
216 MachineFunctionProperties::Property::NoVRegs);
220 char HexagonCallFrameInformation::ID = 0;
222 } // end anonymous namespace
224 bool HexagonCallFrameInformation::runOnMachineFunction(MachineFunction &MF) {
225 auto &HFI = *MF.getSubtarget<HexagonSubtarget>().getFrameLowering();
226 bool NeedCFI = MF.getMMI().hasDebugInfo() ||
227 MF.getFunction().needsUnwindTableEntry();
229 if (!NeedCFI)
230 return false;
231 HFI.insertCFIInstructions(MF);
232 return true;
235 INITIALIZE_PASS(HexagonCallFrameInformation, "hexagon-cfi",
236 "Hexagon call frame information", false, false)
238 FunctionPass *llvm::createHexagonCallFrameInformation() {
239 return new HexagonCallFrameInformation();
242 /// Map a register pair Reg to the subregister that has the greater "number",
243 /// i.e. D3 (aka R7:6) will be mapped to R7, etc.
244 static unsigned getMax32BitSubRegister(unsigned Reg,
245 const TargetRegisterInfo &TRI,
246 bool hireg = true) {
247 if (Reg < Hexagon::D0 || Reg > Hexagon::D15)
248 return Reg;
250 unsigned RegNo = 0;
251 for (MCSubRegIterator SubRegs(Reg, &TRI); SubRegs.isValid(); ++SubRegs) {
252 if (hireg) {
253 if (*SubRegs > RegNo)
254 RegNo = *SubRegs;
255 } else {
256 if (!RegNo || *SubRegs < RegNo)
257 RegNo = *SubRegs;
260 return RegNo;
263 /// Returns the callee saved register with the largest id in the vector.
264 static unsigned getMaxCalleeSavedReg(const std::vector<CalleeSavedInfo> &CSI,
265 const TargetRegisterInfo &TRI) {
266 static_assert(Hexagon::R1 > 0,
267 "Assume physical registers are encoded as positive integers");
268 if (CSI.empty())
269 return 0;
271 unsigned Max = getMax32BitSubRegister(CSI[0].getReg(), TRI);
272 for (unsigned I = 1, E = CSI.size(); I < E; ++I) {
273 unsigned Reg = getMax32BitSubRegister(CSI[I].getReg(), TRI);
274 if (Reg > Max)
275 Max = Reg;
277 return Max;
280 /// Checks if the basic block contains any instruction that needs a stack
281 /// frame to be already in place.
282 static bool needsStackFrame(const MachineBasicBlock &MBB, const BitVector &CSR,
283 const HexagonRegisterInfo &HRI) {
284 for (auto &I : MBB) {
285 const MachineInstr *MI = &I;
286 if (MI->isCall())
287 return true;
288 unsigned Opc = MI->getOpcode();
289 switch (Opc) {
290 case Hexagon::PS_alloca:
291 case Hexagon::PS_aligna:
292 return true;
293 default:
294 break;
296 // Check individual operands.
297 for (const MachineOperand &MO : MI->operands()) {
298 // While the presence of a frame index does not prove that a stack
299 // frame will be required, all frame indexes should be within alloc-
300 // frame/deallocframe. Otherwise, the code that translates a frame
301 // index into an offset would have to be aware of the placement of
302 // the frame creation/destruction instructions.
303 if (MO.isFI())
304 return true;
305 if (MO.isReg()) {
306 unsigned R = MO.getReg();
307 // Virtual registers will need scavenging, which then may require
308 // a stack slot.
309 if (TargetRegisterInfo::isVirtualRegister(R))
310 return true;
311 for (MCSubRegIterator S(R, &HRI, true); S.isValid(); ++S)
312 if (CSR[*S])
313 return true;
314 continue;
316 if (MO.isRegMask()) {
317 // A regmask would normally have all callee-saved registers marked
318 // as preserved, so this check would not be needed, but in case of
319 // ever having other regmasks (for other calling conventions),
320 // make sure they would be processed correctly.
321 const uint32_t *BM = MO.getRegMask();
322 for (int x = CSR.find_first(); x >= 0; x = CSR.find_next(x)) {
323 unsigned R = x;
324 // If this regmask does not preserve a CSR, a frame will be needed.
325 if (!(BM[R/32] & (1u << (R%32))))
326 return true;
331 return false;
334 /// Returns true if MBB has a machine instructions that indicates a tail call
335 /// in the block.
336 static bool hasTailCall(const MachineBasicBlock &MBB) {
337 MachineBasicBlock::const_iterator I = MBB.getLastNonDebugInstr();
338 if (I == MBB.end())
339 return false;
340 unsigned RetOpc = I->getOpcode();
341 return RetOpc == Hexagon::PS_tailcall_i || RetOpc == Hexagon::PS_tailcall_r;
344 /// Returns true if MBB contains an instruction that returns.
345 static bool hasReturn(const MachineBasicBlock &MBB) {
346 for (auto I = MBB.getFirstTerminator(), E = MBB.end(); I != E; ++I)
347 if (I->isReturn())
348 return true;
349 return false;
352 /// Returns the "return" instruction from this block, or nullptr if there
353 /// isn't any.
354 static MachineInstr *getReturn(MachineBasicBlock &MBB) {
355 for (auto &I : MBB)
356 if (I.isReturn())
357 return &I;
358 return nullptr;
361 static bool isRestoreCall(unsigned Opc) {
362 switch (Opc) {
363 case Hexagon::RESTORE_DEALLOC_RET_JMP_V4:
364 case Hexagon::RESTORE_DEALLOC_RET_JMP_V4_PIC:
365 case Hexagon::RESTORE_DEALLOC_RET_JMP_V4_EXT:
366 case Hexagon::RESTORE_DEALLOC_RET_JMP_V4_EXT_PIC:
367 case Hexagon::RESTORE_DEALLOC_BEFORE_TAILCALL_V4_EXT:
368 case Hexagon::RESTORE_DEALLOC_BEFORE_TAILCALL_V4_EXT_PIC:
369 case Hexagon::RESTORE_DEALLOC_BEFORE_TAILCALL_V4:
370 case Hexagon::RESTORE_DEALLOC_BEFORE_TAILCALL_V4_PIC:
371 return true;
373 return false;
376 static inline bool isOptNone(const MachineFunction &MF) {
377 return MF.getFunction().hasOptNone() ||
378 MF.getTarget().getOptLevel() == CodeGenOpt::None;
381 static inline bool isOptSize(const MachineFunction &MF) {
382 const Function &F = MF.getFunction();
383 return F.hasOptSize() && !F.hasMinSize();
386 static inline bool isMinSize(const MachineFunction &MF) {
387 return MF.getFunction().hasMinSize();
390 /// Implements shrink-wrapping of the stack frame. By default, stack frame
391 /// is created in the function entry block, and is cleaned up in every block
392 /// that returns. This function finds alternate blocks: one for the frame
393 /// setup (prolog) and one for the cleanup (epilog).
394 void HexagonFrameLowering::findShrunkPrologEpilog(MachineFunction &MF,
395 MachineBasicBlock *&PrologB, MachineBasicBlock *&EpilogB) const {
396 static unsigned ShrinkCounter = 0;
398 if (ShrinkLimit.getPosition()) {
399 if (ShrinkCounter >= ShrinkLimit)
400 return;
401 ShrinkCounter++;
404 auto &HRI = *MF.getSubtarget<HexagonSubtarget>().getRegisterInfo();
406 MachineDominatorTree MDT;
407 MDT.runOnMachineFunction(MF);
408 MachinePostDominatorTree MPT;
409 MPT.runOnMachineFunction(MF);
411 using UnsignedMap = DenseMap<unsigned, unsigned>;
412 using RPOTType = ReversePostOrderTraversal<const MachineFunction *>;
414 UnsignedMap RPO;
415 RPOTType RPOT(&MF);
416 unsigned RPON = 0;
417 for (RPOTType::rpo_iterator I = RPOT.begin(), E = RPOT.end(); I != E; ++I)
418 RPO[(*I)->getNumber()] = RPON++;
420 // Don't process functions that have loops, at least for now. Placement
421 // of prolog and epilog must take loop structure into account. For simpli-
422 // city don't do it right now.
423 for (auto &I : MF) {
424 unsigned BN = RPO[I.getNumber()];
425 for (auto SI = I.succ_begin(), SE = I.succ_end(); SI != SE; ++SI) {
426 // If found a back-edge, return.
427 if (RPO[(*SI)->getNumber()] <= BN)
428 return;
432 // Collect the set of blocks that need a stack frame to execute. Scan
433 // each block for uses/defs of callee-saved registers, calls, etc.
434 SmallVector<MachineBasicBlock*,16> SFBlocks;
435 BitVector CSR(Hexagon::NUM_TARGET_REGS);
436 for (const MCPhysReg *P = HRI.getCalleeSavedRegs(&MF); *P; ++P)
437 for (MCSubRegIterator S(*P, &HRI, true); S.isValid(); ++S)
438 CSR[*S] = true;
440 for (auto &I : MF)
441 if (needsStackFrame(I, CSR, HRI))
442 SFBlocks.push_back(&I);
444 LLVM_DEBUG({
445 dbgs() << "Blocks needing SF: {";
446 for (auto &B : SFBlocks)
447 dbgs() << " " << printMBBReference(*B);
448 dbgs() << " }\n";
450 // No frame needed?
451 if (SFBlocks.empty())
452 return;
454 // Pick a common dominator and a common post-dominator.
455 MachineBasicBlock *DomB = SFBlocks[0];
456 for (unsigned i = 1, n = SFBlocks.size(); i < n; ++i) {
457 DomB = MDT.findNearestCommonDominator(DomB, SFBlocks[i]);
458 if (!DomB)
459 break;
461 MachineBasicBlock *PDomB = SFBlocks[0];
462 for (unsigned i = 1, n = SFBlocks.size(); i < n; ++i) {
463 PDomB = MPT.findNearestCommonDominator(PDomB, SFBlocks[i]);
464 if (!PDomB)
465 break;
467 LLVM_DEBUG({
468 dbgs() << "Computed dom block: ";
469 if (DomB)
470 dbgs() << printMBBReference(*DomB);
471 else
472 dbgs() << "<null>";
473 dbgs() << ", computed pdom block: ";
474 if (PDomB)
475 dbgs() << printMBBReference(*PDomB);
476 else
477 dbgs() << "<null>";
478 dbgs() << "\n";
480 if (!DomB || !PDomB)
481 return;
483 // Make sure that DomB dominates PDomB and PDomB post-dominates DomB.
484 if (!MDT.dominates(DomB, PDomB)) {
485 LLVM_DEBUG(dbgs() << "Dom block does not dominate pdom block\n");
486 return;
488 if (!MPT.dominates(PDomB, DomB)) {
489 LLVM_DEBUG(dbgs() << "PDom block does not post-dominate dom block\n");
490 return;
493 // Finally, everything seems right.
494 PrologB = DomB;
495 EpilogB = PDomB;
498 /// Perform most of the PEI work here:
499 /// - saving/restoring of the callee-saved registers,
500 /// - stack frame creation and destruction.
501 /// Normally, this work is distributed among various functions, but doing it
502 /// in one place allows shrink-wrapping of the stack frame.
503 void HexagonFrameLowering::emitPrologue(MachineFunction &MF,
504 MachineBasicBlock &MBB) const {
505 auto &HRI = *MF.getSubtarget<HexagonSubtarget>().getRegisterInfo();
507 MachineFrameInfo &MFI = MF.getFrameInfo();
508 const std::vector<CalleeSavedInfo> &CSI = MFI.getCalleeSavedInfo();
510 MachineBasicBlock *PrologB = &MF.front(), *EpilogB = nullptr;
511 if (EnableShrinkWrapping)
512 findShrunkPrologEpilog(MF, PrologB, EpilogB);
514 bool PrologueStubs = false;
515 insertCSRSpillsInBlock(*PrologB, CSI, HRI, PrologueStubs);
516 insertPrologueInBlock(*PrologB, PrologueStubs);
517 updateEntryPaths(MF, *PrologB);
519 if (EpilogB) {
520 insertCSRRestoresInBlock(*EpilogB, CSI, HRI);
521 insertEpilogueInBlock(*EpilogB);
522 } else {
523 for (auto &B : MF)
524 if (B.isReturnBlock())
525 insertCSRRestoresInBlock(B, CSI, HRI);
527 for (auto &B : MF)
528 if (B.isReturnBlock())
529 insertEpilogueInBlock(B);
531 for (auto &B : MF) {
532 if (B.empty())
533 continue;
534 MachineInstr *RetI = getReturn(B);
535 if (!RetI || isRestoreCall(RetI->getOpcode()))
536 continue;
537 for (auto &R : CSI)
538 RetI->addOperand(MachineOperand::CreateReg(R.getReg(), false, true));
542 if (EpilogB) {
543 // If there is an epilog block, it may not have a return instruction.
544 // In such case, we need to add the callee-saved registers as live-ins
545 // in all blocks on all paths from the epilog to any return block.
546 unsigned MaxBN = MF.getNumBlockIDs();
547 BitVector DoneT(MaxBN+1), DoneF(MaxBN+1), Path(MaxBN+1);
548 updateExitPaths(*EpilogB, *EpilogB, DoneT, DoneF, Path);
552 /// Returns true if the target can safely skip saving callee-saved registers
553 /// for noreturn nounwind functions.
554 bool HexagonFrameLowering::enableCalleeSaveSkip(
555 const MachineFunction &MF) const {
556 const auto &F = MF.getFunction();
557 assert(F.hasFnAttribute(Attribute::NoReturn) &&
558 F.getFunction().hasFnAttribute(Attribute::NoUnwind) &&
559 !F.getFunction().hasFnAttribute(Attribute::UWTable));
560 (void)F;
562 // No need to save callee saved registers if the function does not return.
563 return MF.getSubtarget<HexagonSubtarget>().noreturnStackElim();
566 // Helper function used to determine when to eliminate the stack frame for
567 // functions marked as noreturn and when the noreturn-stack-elim options are
568 // specified. When both these conditions are true, then a FP may not be needed
569 // if the function makes a call. It is very similar to enableCalleeSaveSkip,
570 // but it used to check if the allocframe can be eliminated as well.
571 static bool enableAllocFrameElim(const MachineFunction &MF) {
572 const auto &F = MF.getFunction();
573 const auto &MFI = MF.getFrameInfo();
574 const auto &HST = MF.getSubtarget<HexagonSubtarget>();
575 assert(!MFI.hasVarSizedObjects() &&
576 !HST.getRegisterInfo()->needsStackRealignment(MF));
577 return F.hasFnAttribute(Attribute::NoReturn) &&
578 F.hasFnAttribute(Attribute::NoUnwind) &&
579 !F.hasFnAttribute(Attribute::UWTable) && HST.noreturnStackElim() &&
580 MFI.getStackSize() == 0;
583 void HexagonFrameLowering::insertPrologueInBlock(MachineBasicBlock &MBB,
584 bool PrologueStubs) const {
585 MachineFunction &MF = *MBB.getParent();
586 MachineFrameInfo &MFI = MF.getFrameInfo();
587 auto &HST = MF.getSubtarget<HexagonSubtarget>();
588 auto &HII = *HST.getInstrInfo();
589 auto &HRI = *HST.getRegisterInfo();
591 unsigned MaxAlign = std::max(MFI.getMaxAlignment(), getStackAlignment());
593 // Calculate the total stack frame size.
594 // Get the number of bytes to allocate from the FrameInfo.
595 unsigned FrameSize = MFI.getStackSize();
596 // Round up the max call frame size to the max alignment on the stack.
597 unsigned MaxCFA = alignTo(MFI.getMaxCallFrameSize(), MaxAlign);
598 MFI.setMaxCallFrameSize(MaxCFA);
600 FrameSize = MaxCFA + alignTo(FrameSize, MaxAlign);
601 MFI.setStackSize(FrameSize);
603 bool AlignStack = (MaxAlign > getStackAlignment());
605 // Get the number of bytes to allocate from the FrameInfo.
606 unsigned NumBytes = MFI.getStackSize();
607 unsigned SP = HRI.getStackRegister();
608 unsigned MaxCF = MFI.getMaxCallFrameSize();
609 MachineBasicBlock::iterator InsertPt = MBB.begin();
611 SmallVector<MachineInstr *, 4> AdjustRegs;
612 for (auto &MBB : MF)
613 for (auto &MI : MBB)
614 if (MI.getOpcode() == Hexagon::PS_alloca)
615 AdjustRegs.push_back(&MI);
617 for (auto MI : AdjustRegs) {
618 assert((MI->getOpcode() == Hexagon::PS_alloca) && "Expected alloca");
619 expandAlloca(MI, HII, SP, MaxCF);
620 MI->eraseFromParent();
623 DebugLoc dl = MBB.findDebugLoc(InsertPt);
625 if (hasFP(MF)) {
626 insertAllocframe(MBB, InsertPt, NumBytes);
627 if (AlignStack) {
628 BuildMI(MBB, InsertPt, dl, HII.get(Hexagon::A2_andir), SP)
629 .addReg(SP)
630 .addImm(-int64_t(MaxAlign));
632 // If the stack-checking is enabled, and we spilled the callee-saved
633 // registers inline (i.e. did not use a spill function), then call
634 // the stack checker directly.
635 if (EnableStackOVFSanitizer && !PrologueStubs)
636 BuildMI(MBB, InsertPt, dl, HII.get(Hexagon::PS_call_stk))
637 .addExternalSymbol("__runtime_stack_check");
638 } else if (NumBytes > 0) {
639 assert(alignTo(NumBytes, 8) == NumBytes);
640 BuildMI(MBB, InsertPt, dl, HII.get(Hexagon::A2_addi), SP)
641 .addReg(SP)
642 .addImm(-int(NumBytes));
646 void HexagonFrameLowering::insertEpilogueInBlock(MachineBasicBlock &MBB) const {
647 MachineFunction &MF = *MBB.getParent();
648 auto &HST = MF.getSubtarget<HexagonSubtarget>();
649 auto &HII = *HST.getInstrInfo();
650 auto &HRI = *HST.getRegisterInfo();
651 unsigned SP = HRI.getStackRegister();
653 MachineBasicBlock::iterator InsertPt = MBB.getFirstTerminator();
654 DebugLoc dl = MBB.findDebugLoc(InsertPt);
656 if (!hasFP(MF)) {
657 MachineFrameInfo &MFI = MF.getFrameInfo();
658 if (unsigned NumBytes = MFI.getStackSize()) {
659 BuildMI(MBB, InsertPt, dl, HII.get(Hexagon::A2_addi), SP)
660 .addReg(SP)
661 .addImm(NumBytes);
663 return;
666 MachineInstr *RetI = getReturn(MBB);
667 unsigned RetOpc = RetI ? RetI->getOpcode() : 0;
669 // Handle EH_RETURN.
670 if (RetOpc == Hexagon::EH_RETURN_JMPR) {
671 BuildMI(MBB, InsertPt, dl, HII.get(Hexagon::L2_deallocframe))
672 .addDef(Hexagon::D15)
673 .addReg(Hexagon::R30);
674 BuildMI(MBB, InsertPt, dl, HII.get(Hexagon::A2_add), SP)
675 .addReg(SP)
676 .addReg(Hexagon::R28);
677 return;
680 // Check for RESTORE_DEALLOC_RET* tail call. Don't emit an extra dealloc-
681 // frame instruction if we encounter it.
682 if (RetOpc == Hexagon::RESTORE_DEALLOC_RET_JMP_V4 ||
683 RetOpc == Hexagon::RESTORE_DEALLOC_RET_JMP_V4_PIC ||
684 RetOpc == Hexagon::RESTORE_DEALLOC_RET_JMP_V4_EXT ||
685 RetOpc == Hexagon::RESTORE_DEALLOC_RET_JMP_V4_EXT_PIC) {
686 MachineBasicBlock::iterator It = RetI;
687 ++It;
688 // Delete all instructions after the RESTORE (except labels).
689 while (It != MBB.end()) {
690 if (!It->isLabel())
691 It = MBB.erase(It);
692 else
693 ++It;
695 return;
698 // It is possible that the restoring code is a call to a library function.
699 // All of the restore* functions include "deallocframe", so we need to make
700 // sure that we don't add an extra one.
701 bool NeedsDeallocframe = true;
702 if (!MBB.empty() && InsertPt != MBB.begin()) {
703 MachineBasicBlock::iterator PrevIt = std::prev(InsertPt);
704 unsigned COpc = PrevIt->getOpcode();
705 if (COpc == Hexagon::RESTORE_DEALLOC_BEFORE_TAILCALL_V4 ||
706 COpc == Hexagon::RESTORE_DEALLOC_BEFORE_TAILCALL_V4_PIC ||
707 COpc == Hexagon::RESTORE_DEALLOC_BEFORE_TAILCALL_V4_EXT ||
708 COpc == Hexagon::RESTORE_DEALLOC_BEFORE_TAILCALL_V4_EXT_PIC ||
709 COpc == Hexagon::PS_call_nr || COpc == Hexagon::PS_callr_nr)
710 NeedsDeallocframe = false;
713 if (!NeedsDeallocframe)
714 return;
715 // If the returning instruction is PS_jmpret, replace it with dealloc_return,
716 // otherwise just add deallocframe. The function could be returning via a
717 // tail call.
718 if (RetOpc != Hexagon::PS_jmpret || DisableDeallocRet) {
719 BuildMI(MBB, InsertPt, dl, HII.get(Hexagon::L2_deallocframe))
720 .addDef(Hexagon::D15)
721 .addReg(Hexagon::R30);
722 return;
724 unsigned NewOpc = Hexagon::L4_return;
725 MachineInstr *NewI = BuildMI(MBB, RetI, dl, HII.get(NewOpc))
726 .addDef(Hexagon::D15)
727 .addReg(Hexagon::R30);
728 // Transfer the function live-out registers.
729 NewI->copyImplicitOps(MF, *RetI);
730 MBB.erase(RetI);
733 void HexagonFrameLowering::insertAllocframe(MachineBasicBlock &MBB,
734 MachineBasicBlock::iterator InsertPt, unsigned NumBytes) const {
735 MachineFunction &MF = *MBB.getParent();
736 auto &HST = MF.getSubtarget<HexagonSubtarget>();
737 auto &HII = *HST.getInstrInfo();
738 auto &HRI = *HST.getRegisterInfo();
740 // Check for overflow.
741 // Hexagon_TODO: Ugh! hardcoding. Is there an API that can be used?
742 const unsigned int ALLOCFRAME_MAX = 16384;
744 // Create a dummy memory operand to avoid allocframe from being treated as
745 // a volatile memory reference.
746 auto *MMO = MF.getMachineMemOperand(MachinePointerInfo::getStack(MF, 0),
747 MachineMemOperand::MOStore, 4, 4);
749 DebugLoc dl = MBB.findDebugLoc(InsertPt);
750 unsigned SP = HRI.getStackRegister();
752 if (NumBytes >= ALLOCFRAME_MAX) {
753 // Emit allocframe(#0).
754 BuildMI(MBB, InsertPt, dl, HII.get(Hexagon::S2_allocframe))
755 .addDef(SP)
756 .addReg(SP)
757 .addImm(0)
758 .addMemOperand(MMO);
760 // Subtract the size from the stack pointer.
761 unsigned SP = HRI.getStackRegister();
762 BuildMI(MBB, InsertPt, dl, HII.get(Hexagon::A2_addi), SP)
763 .addReg(SP)
764 .addImm(-int(NumBytes));
765 } else {
766 BuildMI(MBB, InsertPt, dl, HII.get(Hexagon::S2_allocframe))
767 .addDef(SP)
768 .addReg(SP)
769 .addImm(NumBytes)
770 .addMemOperand(MMO);
774 void HexagonFrameLowering::updateEntryPaths(MachineFunction &MF,
775 MachineBasicBlock &SaveB) const {
776 SetVector<unsigned> Worklist;
778 MachineBasicBlock &EntryB = MF.front();
779 Worklist.insert(EntryB.getNumber());
781 unsigned SaveN = SaveB.getNumber();
782 auto &CSI = MF.getFrameInfo().getCalleeSavedInfo();
784 for (unsigned i = 0; i < Worklist.size(); ++i) {
785 unsigned BN = Worklist[i];
786 MachineBasicBlock &MBB = *MF.getBlockNumbered(BN);
787 for (auto &R : CSI)
788 if (!MBB.isLiveIn(R.getReg()))
789 MBB.addLiveIn(R.getReg());
790 if (BN != SaveN)
791 for (auto &SB : MBB.successors())
792 Worklist.insert(SB->getNumber());
796 bool HexagonFrameLowering::updateExitPaths(MachineBasicBlock &MBB,
797 MachineBasicBlock &RestoreB, BitVector &DoneT, BitVector &DoneF,
798 BitVector &Path) const {
799 assert(MBB.getNumber() >= 0);
800 unsigned BN = MBB.getNumber();
801 if (Path[BN] || DoneF[BN])
802 return false;
803 if (DoneT[BN])
804 return true;
806 auto &CSI = MBB.getParent()->getFrameInfo().getCalleeSavedInfo();
808 Path[BN] = true;
809 bool ReachedExit = false;
810 for (auto &SB : MBB.successors())
811 ReachedExit |= updateExitPaths(*SB, RestoreB, DoneT, DoneF, Path);
813 if (!MBB.empty() && MBB.back().isReturn()) {
814 // Add implicit uses of all callee-saved registers to the reached
815 // return instructions. This is to prevent the anti-dependency breaker
816 // from renaming these registers.
817 MachineInstr &RetI = MBB.back();
818 if (!isRestoreCall(RetI.getOpcode()))
819 for (auto &R : CSI)
820 RetI.addOperand(MachineOperand::CreateReg(R.getReg(), false, true));
821 ReachedExit = true;
824 // We don't want to add unnecessary live-ins to the restore block: since
825 // the callee-saved registers are being defined in it, the entry of the
826 // restore block cannot be on the path from the definitions to any exit.
827 if (ReachedExit && &MBB != &RestoreB) {
828 for (auto &R : CSI)
829 if (!MBB.isLiveIn(R.getReg()))
830 MBB.addLiveIn(R.getReg());
831 DoneT[BN] = true;
833 if (!ReachedExit)
834 DoneF[BN] = true;
836 Path[BN] = false;
837 return ReachedExit;
840 static Optional<MachineBasicBlock::iterator>
841 findCFILocation(MachineBasicBlock &B) {
842 // The CFI instructions need to be inserted right after allocframe.
843 // An exception to this is a situation where allocframe is bundled
844 // with a call: then the CFI instructions need to be inserted before
845 // the packet with the allocframe+call (in case the call throws an
846 // exception).
847 auto End = B.instr_end();
849 for (MachineInstr &I : B) {
850 MachineBasicBlock::iterator It = I.getIterator();
851 if (!I.isBundle()) {
852 if (I.getOpcode() == Hexagon::S2_allocframe)
853 return std::next(It);
854 continue;
856 // I is a bundle.
857 bool HasCall = false, HasAllocFrame = false;
858 auto T = It.getInstrIterator();
859 while (++T != End && T->isBundled()) {
860 if (T->getOpcode() == Hexagon::S2_allocframe)
861 HasAllocFrame = true;
862 else if (T->isCall())
863 HasCall = true;
865 if (HasAllocFrame)
866 return HasCall ? It : std::next(It);
868 return None;
871 void HexagonFrameLowering::insertCFIInstructions(MachineFunction &MF) const {
872 for (auto &B : MF) {
873 auto At = findCFILocation(B);
874 if (At.hasValue())
875 insertCFIInstructionsAt(B, At.getValue());
879 void HexagonFrameLowering::insertCFIInstructionsAt(MachineBasicBlock &MBB,
880 MachineBasicBlock::iterator At) const {
881 MachineFunction &MF = *MBB.getParent();
882 MachineFrameInfo &MFI = MF.getFrameInfo();
883 MachineModuleInfo &MMI = MF.getMMI();
884 auto &HST = MF.getSubtarget<HexagonSubtarget>();
885 auto &HII = *HST.getInstrInfo();
886 auto &HRI = *HST.getRegisterInfo();
888 // If CFI instructions have debug information attached, something goes
889 // wrong with the final assembly generation: the prolog_end is placed
890 // in a wrong location.
891 DebugLoc DL;
892 const MCInstrDesc &CFID = HII.get(TargetOpcode::CFI_INSTRUCTION);
894 MCSymbol *FrameLabel = MMI.getContext().createTempSymbol();
895 bool HasFP = hasFP(MF);
897 if (HasFP) {
898 unsigned DwFPReg = HRI.getDwarfRegNum(HRI.getFrameRegister(), true);
899 unsigned DwRAReg = HRI.getDwarfRegNum(HRI.getRARegister(), true);
901 // Define CFA via an offset from the value of FP.
903 // -8 -4 0 (SP)
904 // --+----+----+---------------------
905 // | FP | LR | increasing addresses -->
906 // --+----+----+---------------------
907 // | +-- Old SP (before allocframe)
908 // +-- New FP (after allocframe)
910 // MCCFIInstruction::createDefCfa subtracts the offset from the register.
911 // MCCFIInstruction::createOffset takes the offset without sign change.
912 auto DefCfa = MCCFIInstruction::createDefCfa(FrameLabel, DwFPReg, -8);
913 BuildMI(MBB, At, DL, CFID)
914 .addCFIIndex(MF.addFrameInst(DefCfa));
915 // R31 (return addr) = CFA - 4
916 auto OffR31 = MCCFIInstruction::createOffset(FrameLabel, DwRAReg, -4);
917 BuildMI(MBB, At, DL, CFID)
918 .addCFIIndex(MF.addFrameInst(OffR31));
919 // R30 (frame ptr) = CFA - 8
920 auto OffR30 = MCCFIInstruction::createOffset(FrameLabel, DwFPReg, -8);
921 BuildMI(MBB, At, DL, CFID)
922 .addCFIIndex(MF.addFrameInst(OffR30));
925 static unsigned int RegsToMove[] = {
926 Hexagon::R1, Hexagon::R0, Hexagon::R3, Hexagon::R2,
927 Hexagon::R17, Hexagon::R16, Hexagon::R19, Hexagon::R18,
928 Hexagon::R21, Hexagon::R20, Hexagon::R23, Hexagon::R22,
929 Hexagon::R25, Hexagon::R24, Hexagon::R27, Hexagon::R26,
930 Hexagon::D0, Hexagon::D1, Hexagon::D8, Hexagon::D9,
931 Hexagon::D10, Hexagon::D11, Hexagon::D12, Hexagon::D13,
932 Hexagon::NoRegister
935 const std::vector<CalleeSavedInfo> &CSI = MFI.getCalleeSavedInfo();
937 for (unsigned i = 0; RegsToMove[i] != Hexagon::NoRegister; ++i) {
938 unsigned Reg = RegsToMove[i];
939 auto IfR = [Reg] (const CalleeSavedInfo &C) -> bool {
940 return C.getReg() == Reg;
942 auto F = find_if(CSI, IfR);
943 if (F == CSI.end())
944 continue;
946 int64_t Offset;
947 if (HasFP) {
948 // If the function has a frame pointer (i.e. has an allocframe),
949 // then the CFA has been defined in terms of FP. Any offsets in
950 // the following CFI instructions have to be defined relative
951 // to FP, which points to the bottom of the stack frame.
952 // The function getFrameIndexReference can still choose to use SP
953 // for the offset calculation, so we cannot simply call it here.
954 // Instead, get the offset (relative to the FP) directly.
955 Offset = MFI.getObjectOffset(F->getFrameIdx());
956 } else {
957 unsigned FrameReg;
958 Offset = getFrameIndexReference(MF, F->getFrameIdx(), FrameReg);
960 // Subtract 8 to make room for R30 and R31, which are added above.
961 Offset -= 8;
963 if (Reg < Hexagon::D0 || Reg > Hexagon::D15) {
964 unsigned DwarfReg = HRI.getDwarfRegNum(Reg, true);
965 auto OffReg = MCCFIInstruction::createOffset(FrameLabel, DwarfReg,
966 Offset);
967 BuildMI(MBB, At, DL, CFID)
968 .addCFIIndex(MF.addFrameInst(OffReg));
969 } else {
970 // Split the double regs into subregs, and generate appropriate
971 // cfi_offsets.
972 // The only reason, we are split double regs is, llvm-mc does not
973 // understand paired registers for cfi_offset.
974 // Eg .cfi_offset r1:0, -64
976 unsigned HiReg = HRI.getSubReg(Reg, Hexagon::isub_hi);
977 unsigned LoReg = HRI.getSubReg(Reg, Hexagon::isub_lo);
978 unsigned HiDwarfReg = HRI.getDwarfRegNum(HiReg, true);
979 unsigned LoDwarfReg = HRI.getDwarfRegNum(LoReg, true);
980 auto OffHi = MCCFIInstruction::createOffset(FrameLabel, HiDwarfReg,
981 Offset+4);
982 BuildMI(MBB, At, DL, CFID)
983 .addCFIIndex(MF.addFrameInst(OffHi));
984 auto OffLo = MCCFIInstruction::createOffset(FrameLabel, LoDwarfReg,
985 Offset);
986 BuildMI(MBB, At, DL, CFID)
987 .addCFIIndex(MF.addFrameInst(OffLo));
992 bool HexagonFrameLowering::hasFP(const MachineFunction &MF) const {
993 if (MF.getFunction().hasFnAttribute(Attribute::Naked))
994 return false;
996 auto &MFI = MF.getFrameInfo();
997 auto &HRI = *MF.getSubtarget<HexagonSubtarget>().getRegisterInfo();
998 bool HasExtraAlign = HRI.needsStackRealignment(MF);
999 bool HasAlloca = MFI.hasVarSizedObjects();
1001 // Insert ALLOCFRAME if we need to or at -O0 for the debugger. Think
1002 // that this shouldn't be required, but doing so now because gcc does and
1003 // gdb can't break at the start of the function without it. Will remove if
1004 // this turns out to be a gdb bug.
1006 if (MF.getTarget().getOptLevel() == CodeGenOpt::None)
1007 return true;
1009 // By default we want to use SP (since it's always there). FP requires
1010 // some setup (i.e. ALLOCFRAME).
1011 // Both, alloca and stack alignment modify the stack pointer by an
1012 // undetermined value, so we need to save it at the entry to the function
1013 // (i.e. use allocframe).
1014 if (HasAlloca || HasExtraAlign)
1015 return true;
1017 if (MFI.getStackSize() > 0) {
1018 // If FP-elimination is disabled, we have to use FP at this point.
1019 const TargetMachine &TM = MF.getTarget();
1020 if (TM.Options.DisableFramePointerElim(MF) || !EliminateFramePointer)
1021 return true;
1022 if (EnableStackOVFSanitizer)
1023 return true;
1026 const auto &HMFI = *MF.getInfo<HexagonMachineFunctionInfo>();
1027 if ((MFI.hasCalls() && !enableAllocFrameElim(MF)) || HMFI.hasClobberLR())
1028 return true;
1030 return false;
1033 enum SpillKind {
1034 SK_ToMem,
1035 SK_FromMem,
1036 SK_FromMemTailcall
1039 static const char *getSpillFunctionFor(unsigned MaxReg, SpillKind SpillType,
1040 bool Stkchk = false) {
1041 const char * V4SpillToMemoryFunctions[] = {
1042 "__save_r16_through_r17",
1043 "__save_r16_through_r19",
1044 "__save_r16_through_r21",
1045 "__save_r16_through_r23",
1046 "__save_r16_through_r25",
1047 "__save_r16_through_r27" };
1049 const char * V4SpillToMemoryStkchkFunctions[] = {
1050 "__save_r16_through_r17_stkchk",
1051 "__save_r16_through_r19_stkchk",
1052 "__save_r16_through_r21_stkchk",
1053 "__save_r16_through_r23_stkchk",
1054 "__save_r16_through_r25_stkchk",
1055 "__save_r16_through_r27_stkchk" };
1057 const char * V4SpillFromMemoryFunctions[] = {
1058 "__restore_r16_through_r17_and_deallocframe",
1059 "__restore_r16_through_r19_and_deallocframe",
1060 "__restore_r16_through_r21_and_deallocframe",
1061 "__restore_r16_through_r23_and_deallocframe",
1062 "__restore_r16_through_r25_and_deallocframe",
1063 "__restore_r16_through_r27_and_deallocframe" };
1065 const char * V4SpillFromMemoryTailcallFunctions[] = {
1066 "__restore_r16_through_r17_and_deallocframe_before_tailcall",
1067 "__restore_r16_through_r19_and_deallocframe_before_tailcall",
1068 "__restore_r16_through_r21_and_deallocframe_before_tailcall",
1069 "__restore_r16_through_r23_and_deallocframe_before_tailcall",
1070 "__restore_r16_through_r25_and_deallocframe_before_tailcall",
1071 "__restore_r16_through_r27_and_deallocframe_before_tailcall"
1074 const char **SpillFunc = nullptr;
1076 switch(SpillType) {
1077 case SK_ToMem:
1078 SpillFunc = Stkchk ? V4SpillToMemoryStkchkFunctions
1079 : V4SpillToMemoryFunctions;
1080 break;
1081 case SK_FromMem:
1082 SpillFunc = V4SpillFromMemoryFunctions;
1083 break;
1084 case SK_FromMemTailcall:
1085 SpillFunc = V4SpillFromMemoryTailcallFunctions;
1086 break;
1088 assert(SpillFunc && "Unknown spill kind");
1090 // Spill all callee-saved registers up to the highest register used.
1091 switch (MaxReg) {
1092 case Hexagon::R17:
1093 return SpillFunc[0];
1094 case Hexagon::R19:
1095 return SpillFunc[1];
1096 case Hexagon::R21:
1097 return SpillFunc[2];
1098 case Hexagon::R23:
1099 return SpillFunc[3];
1100 case Hexagon::R25:
1101 return SpillFunc[4];
1102 case Hexagon::R27:
1103 return SpillFunc[5];
1104 default:
1105 llvm_unreachable("Unhandled maximum callee save register");
1107 return nullptr;
1110 int HexagonFrameLowering::getFrameIndexReference(const MachineFunction &MF,
1111 int FI, unsigned &FrameReg) const {
1112 auto &MFI = MF.getFrameInfo();
1113 auto &HRI = *MF.getSubtarget<HexagonSubtarget>().getRegisterInfo();
1115 int Offset = MFI.getObjectOffset(FI);
1116 bool HasAlloca = MFI.hasVarSizedObjects();
1117 bool HasExtraAlign = HRI.needsStackRealignment(MF);
1118 bool NoOpt = MF.getTarget().getOptLevel() == CodeGenOpt::None;
1120 auto &HMFI = *MF.getInfo<HexagonMachineFunctionInfo>();
1121 unsigned FrameSize = MFI.getStackSize();
1122 unsigned SP = HRI.getStackRegister();
1123 unsigned FP = HRI.getFrameRegister();
1124 unsigned AP = HMFI.getStackAlignBasePhysReg();
1125 // It may happen that AP will be absent even HasAlloca && HasExtraAlign
1126 // is true. HasExtraAlign may be set because of vector spills, without
1127 // aligned locals or aligned outgoing function arguments. Since vector
1128 // spills will ultimately be "unaligned", it is safe to use FP as the
1129 // base register.
1130 // In fact, in such a scenario the stack is actually not required to be
1131 // aligned, although it may end up being aligned anyway, since this
1132 // particular case is not easily detectable. The alignment will be
1133 // unnecessary, but not incorrect.
1134 // Unfortunately there is no quick way to verify that the above is
1135 // indeed the case (and that it's not a result of an error), so just
1136 // assume that missing AP will be replaced by FP.
1137 // (A better fix would be to rematerialize AP from FP and always align
1138 // vector spills.)
1139 if (AP == 0)
1140 AP = FP;
1142 bool UseFP = false, UseAP = false; // Default: use SP (except at -O0).
1143 // Use FP at -O0, except when there are objects with extra alignment.
1144 // That additional alignment requirement may cause a pad to be inserted,
1145 // which will make it impossible to use FP to access objects located
1146 // past the pad.
1147 if (NoOpt && !HasExtraAlign)
1148 UseFP = true;
1149 if (MFI.isFixedObjectIndex(FI) || MFI.isObjectPreAllocated(FI)) {
1150 // Fixed and preallocated objects will be located before any padding
1151 // so FP must be used to access them.
1152 UseFP |= (HasAlloca || HasExtraAlign);
1153 } else {
1154 if (HasAlloca) {
1155 if (HasExtraAlign)
1156 UseAP = true;
1157 else
1158 UseFP = true;
1162 // If FP was picked, then there had better be FP.
1163 bool HasFP = hasFP(MF);
1164 assert((HasFP || !UseFP) && "This function must have frame pointer");
1166 // Having FP implies allocframe. Allocframe will store extra 8 bytes:
1167 // FP/LR. If the base register is used to access an object across these
1168 // 8 bytes, then the offset will need to be adjusted by 8.
1170 // After allocframe:
1171 // HexagonISelLowering adds 8 to ---+
1172 // the offsets of all stack-based |
1173 // arguments (*) |
1174 // |
1175 // getObjectOffset < 0 0 8 getObjectOffset >= 8
1176 // ------------------------+-----+------------------------> increasing
1177 // <local objects> |FP/LR| <input arguments> addresses
1178 // -----------------+------+-----+------------------------>
1179 // | |
1180 // SP/AP point --+ +-- FP points here (**)
1181 // somewhere on
1182 // this side of FP/LR
1184 // (*) See LowerFormalArguments. The FP/LR is assumed to be present.
1185 // (**) *FP == old-FP. FP+0..7 are the bytes of FP/LR.
1187 // The lowering assumes that FP/LR is present, and so the offsets of
1188 // the formal arguments start at 8. If FP/LR is not there we need to
1189 // reduce the offset by 8.
1190 if (Offset > 0 && !HasFP)
1191 Offset -= 8;
1193 if (UseFP)
1194 FrameReg = FP;
1195 else if (UseAP)
1196 FrameReg = AP;
1197 else
1198 FrameReg = SP;
1200 // Calculate the actual offset in the instruction. If there is no FP
1201 // (in other words, no allocframe), then SP will not be adjusted (i.e.
1202 // there will be no SP -= FrameSize), so the frame size should not be
1203 // added to the calculated offset.
1204 int RealOffset = Offset;
1205 if (!UseFP && !UseAP)
1206 RealOffset = FrameSize+Offset;
1207 return RealOffset;
1210 bool HexagonFrameLowering::insertCSRSpillsInBlock(MachineBasicBlock &MBB,
1211 const CSIVect &CSI, const HexagonRegisterInfo &HRI,
1212 bool &PrologueStubs) const {
1213 if (CSI.empty())
1214 return true;
1216 MachineBasicBlock::iterator MI = MBB.begin();
1217 PrologueStubs = false;
1218 MachineFunction &MF = *MBB.getParent();
1219 auto &HST = MF.getSubtarget<HexagonSubtarget>();
1220 auto &HII = *HST.getInstrInfo();
1222 if (useSpillFunction(MF, CSI)) {
1223 PrologueStubs = true;
1224 unsigned MaxReg = getMaxCalleeSavedReg(CSI, HRI);
1225 bool StkOvrFlowEnabled = EnableStackOVFSanitizer;
1226 const char *SpillFun = getSpillFunctionFor(MaxReg, SK_ToMem,
1227 StkOvrFlowEnabled);
1228 auto &HTM = static_cast<const HexagonTargetMachine&>(MF.getTarget());
1229 bool IsPIC = HTM.isPositionIndependent();
1230 bool LongCalls = HST.useLongCalls() || EnableSaveRestoreLong;
1232 // Call spill function.
1233 DebugLoc DL = MI != MBB.end() ? MI->getDebugLoc() : DebugLoc();
1234 unsigned SpillOpc;
1235 if (StkOvrFlowEnabled) {
1236 if (LongCalls)
1237 SpillOpc = IsPIC ? Hexagon::SAVE_REGISTERS_CALL_V4STK_EXT_PIC
1238 : Hexagon::SAVE_REGISTERS_CALL_V4STK_EXT;
1239 else
1240 SpillOpc = IsPIC ? Hexagon::SAVE_REGISTERS_CALL_V4STK_PIC
1241 : Hexagon::SAVE_REGISTERS_CALL_V4STK;
1242 } else {
1243 if (LongCalls)
1244 SpillOpc = IsPIC ? Hexagon::SAVE_REGISTERS_CALL_V4_EXT_PIC
1245 : Hexagon::SAVE_REGISTERS_CALL_V4_EXT;
1246 else
1247 SpillOpc = IsPIC ? Hexagon::SAVE_REGISTERS_CALL_V4_PIC
1248 : Hexagon::SAVE_REGISTERS_CALL_V4;
1251 MachineInstr *SaveRegsCall =
1252 BuildMI(MBB, MI, DL, HII.get(SpillOpc))
1253 .addExternalSymbol(SpillFun);
1255 // Add callee-saved registers as use.
1256 addCalleeSaveRegistersAsImpOperand(SaveRegsCall, CSI, false, true);
1257 // Add live in registers.
1258 for (unsigned I = 0; I < CSI.size(); ++I)
1259 MBB.addLiveIn(CSI[I].getReg());
1260 return true;
1263 for (unsigned i = 0, n = CSI.size(); i < n; ++i) {
1264 unsigned Reg = CSI[i].getReg();
1265 // Add live in registers. We treat eh_return callee saved register r0 - r3
1266 // specially. They are not really callee saved registers as they are not
1267 // supposed to be killed.
1268 bool IsKill = !HRI.isEHReturnCalleeSaveReg(Reg);
1269 int FI = CSI[i].getFrameIdx();
1270 const TargetRegisterClass *RC = HRI.getMinimalPhysRegClass(Reg);
1271 HII.storeRegToStackSlot(MBB, MI, Reg, IsKill, FI, RC, &HRI);
1272 if (IsKill)
1273 MBB.addLiveIn(Reg);
1275 return true;
1278 bool HexagonFrameLowering::insertCSRRestoresInBlock(MachineBasicBlock &MBB,
1279 const CSIVect &CSI, const HexagonRegisterInfo &HRI) const {
1280 if (CSI.empty())
1281 return false;
1283 MachineBasicBlock::iterator MI = MBB.getFirstTerminator();
1284 MachineFunction &MF = *MBB.getParent();
1285 auto &HST = MF.getSubtarget<HexagonSubtarget>();
1286 auto &HII = *HST.getInstrInfo();
1288 if (useRestoreFunction(MF, CSI)) {
1289 bool HasTC = hasTailCall(MBB) || !hasReturn(MBB);
1290 unsigned MaxR = getMaxCalleeSavedReg(CSI, HRI);
1291 SpillKind Kind = HasTC ? SK_FromMemTailcall : SK_FromMem;
1292 const char *RestoreFn = getSpillFunctionFor(MaxR, Kind);
1293 auto &HTM = static_cast<const HexagonTargetMachine&>(MF.getTarget());
1294 bool IsPIC = HTM.isPositionIndependent();
1295 bool LongCalls = HST.useLongCalls() || EnableSaveRestoreLong;
1297 // Call spill function.
1298 DebugLoc DL = MI != MBB.end() ? MI->getDebugLoc()
1299 : MBB.findDebugLoc(MBB.end());
1300 MachineInstr *DeallocCall = nullptr;
1302 if (HasTC) {
1303 unsigned RetOpc;
1304 if (LongCalls)
1305 RetOpc = IsPIC ? Hexagon::RESTORE_DEALLOC_BEFORE_TAILCALL_V4_EXT_PIC
1306 : Hexagon::RESTORE_DEALLOC_BEFORE_TAILCALL_V4_EXT;
1307 else
1308 RetOpc = IsPIC ? Hexagon::RESTORE_DEALLOC_BEFORE_TAILCALL_V4_PIC
1309 : Hexagon::RESTORE_DEALLOC_BEFORE_TAILCALL_V4;
1310 DeallocCall = BuildMI(MBB, MI, DL, HII.get(RetOpc))
1311 .addExternalSymbol(RestoreFn);
1312 } else {
1313 // The block has a return.
1314 MachineBasicBlock::iterator It = MBB.getFirstTerminator();
1315 assert(It->isReturn() && std::next(It) == MBB.end());
1316 unsigned RetOpc;
1317 if (LongCalls)
1318 RetOpc = IsPIC ? Hexagon::RESTORE_DEALLOC_RET_JMP_V4_EXT_PIC
1319 : Hexagon::RESTORE_DEALLOC_RET_JMP_V4_EXT;
1320 else
1321 RetOpc = IsPIC ? Hexagon::RESTORE_DEALLOC_RET_JMP_V4_PIC
1322 : Hexagon::RESTORE_DEALLOC_RET_JMP_V4;
1323 DeallocCall = BuildMI(MBB, It, DL, HII.get(RetOpc))
1324 .addExternalSymbol(RestoreFn);
1325 // Transfer the function live-out registers.
1326 DeallocCall->copyImplicitOps(MF, *It);
1328 addCalleeSaveRegistersAsImpOperand(DeallocCall, CSI, true, false);
1329 return true;
1332 for (unsigned i = 0; i < CSI.size(); ++i) {
1333 unsigned Reg = CSI[i].getReg();
1334 const TargetRegisterClass *RC = HRI.getMinimalPhysRegClass(Reg);
1335 int FI = CSI[i].getFrameIdx();
1336 HII.loadRegFromStackSlot(MBB, MI, Reg, FI, RC, &HRI);
1339 return true;
1342 MachineBasicBlock::iterator HexagonFrameLowering::eliminateCallFramePseudoInstr(
1343 MachineFunction &MF, MachineBasicBlock &MBB,
1344 MachineBasicBlock::iterator I) const {
1345 MachineInstr &MI = *I;
1346 unsigned Opc = MI.getOpcode();
1347 (void)Opc; // Silence compiler warning.
1348 assert((Opc == Hexagon::ADJCALLSTACKDOWN || Opc == Hexagon::ADJCALLSTACKUP) &&
1349 "Cannot handle this call frame pseudo instruction");
1350 return MBB.erase(I);
1353 void HexagonFrameLowering::processFunctionBeforeFrameFinalized(
1354 MachineFunction &MF, RegScavenger *RS) const {
1355 // If this function has uses aligned stack and also has variable sized stack
1356 // objects, then we need to map all spill slots to fixed positions, so that
1357 // they can be accessed through FP. Otherwise they would have to be accessed
1358 // via AP, which may not be available at the particular place in the program.
1359 MachineFrameInfo &MFI = MF.getFrameInfo();
1360 bool HasAlloca = MFI.hasVarSizedObjects();
1361 bool NeedsAlign = (MFI.getMaxAlignment() > getStackAlignment());
1363 if (!HasAlloca || !NeedsAlign)
1364 return;
1366 unsigned LFS = MFI.getLocalFrameSize();
1367 for (int i = 0, e = MFI.getObjectIndexEnd(); i != e; ++i) {
1368 if (!MFI.isSpillSlotObjectIndex(i) || MFI.isDeadObjectIndex(i))
1369 continue;
1370 unsigned S = MFI.getObjectSize(i);
1371 // Reduce the alignment to at most 8. This will require unaligned vector
1372 // stores if they happen here.
1373 unsigned A = std::max(MFI.getObjectAlignment(i), 8U);
1374 MFI.setObjectAlignment(i, 8);
1375 LFS = alignTo(LFS+S, A);
1376 MFI.mapLocalFrameObject(i, -LFS);
1379 MFI.setLocalFrameSize(LFS);
1380 unsigned A = MFI.getLocalFrameMaxAlign();
1381 assert(A <= 8 && "Unexpected local frame alignment");
1382 if (A == 0)
1383 MFI.setLocalFrameMaxAlign(8);
1384 MFI.setUseLocalStackAllocationBlock(true);
1386 // Set the physical aligned-stack base address register.
1387 unsigned AP = 0;
1388 if (const MachineInstr *AI = getAlignaInstr(MF))
1389 AP = AI->getOperand(0).getReg();
1390 auto &HMFI = *MF.getInfo<HexagonMachineFunctionInfo>();
1391 HMFI.setStackAlignBasePhysReg(AP);
1394 /// Returns true if there are no caller-saved registers available in class RC.
1395 static bool needToReserveScavengingSpillSlots(MachineFunction &MF,
1396 const HexagonRegisterInfo &HRI, const TargetRegisterClass *RC) {
1397 MachineRegisterInfo &MRI = MF.getRegInfo();
1399 auto IsUsed = [&HRI,&MRI] (unsigned Reg) -> bool {
1400 for (MCRegAliasIterator AI(Reg, &HRI, true); AI.isValid(); ++AI)
1401 if (MRI.isPhysRegUsed(*AI))
1402 return true;
1403 return false;
1406 // Check for an unused caller-saved register. Callee-saved registers
1407 // have become pristine by now.
1408 for (const MCPhysReg *P = HRI.getCallerSavedRegs(&MF, RC); *P; ++P)
1409 if (!IsUsed(*P))
1410 return false;
1412 // All caller-saved registers are used.
1413 return true;
1416 #ifndef NDEBUG
1417 static void dump_registers(BitVector &Regs, const TargetRegisterInfo &TRI) {
1418 dbgs() << '{';
1419 for (int x = Regs.find_first(); x >= 0; x = Regs.find_next(x)) {
1420 unsigned R = x;
1421 dbgs() << ' ' << printReg(R, &TRI);
1423 dbgs() << " }";
1425 #endif
1427 bool HexagonFrameLowering::assignCalleeSavedSpillSlots(MachineFunction &MF,
1428 const TargetRegisterInfo *TRI, std::vector<CalleeSavedInfo> &CSI) const {
1429 LLVM_DEBUG(dbgs() << __func__ << " on " << MF.getName() << '\n');
1430 MachineFrameInfo &MFI = MF.getFrameInfo();
1431 BitVector SRegs(Hexagon::NUM_TARGET_REGS);
1433 // Generate a set of unique, callee-saved registers (SRegs), where each
1434 // register in the set is maximal in terms of sub-/super-register relation,
1435 // i.e. for each R in SRegs, no proper super-register of R is also in SRegs.
1437 // (1) For each callee-saved register, add that register and all of its
1438 // sub-registers to SRegs.
1439 LLVM_DEBUG(dbgs() << "Initial CS registers: {");
1440 for (unsigned i = 0, n = CSI.size(); i < n; ++i) {
1441 unsigned R = CSI[i].getReg();
1442 LLVM_DEBUG(dbgs() << ' ' << printReg(R, TRI));
1443 for (MCSubRegIterator SR(R, TRI, true); SR.isValid(); ++SR)
1444 SRegs[*SR] = true;
1446 LLVM_DEBUG(dbgs() << " }\n");
1447 LLVM_DEBUG(dbgs() << "SRegs.1: "; dump_registers(SRegs, *TRI);
1448 dbgs() << "\n");
1450 // (2) For each reserved register, remove that register and all of its
1451 // sub- and super-registers from SRegs.
1452 BitVector Reserved = TRI->getReservedRegs(MF);
1453 for (int x = Reserved.find_first(); x >= 0; x = Reserved.find_next(x)) {
1454 unsigned R = x;
1455 for (MCSuperRegIterator SR(R, TRI, true); SR.isValid(); ++SR)
1456 SRegs[*SR] = false;
1458 LLVM_DEBUG(dbgs() << "Res: "; dump_registers(Reserved, *TRI);
1459 dbgs() << "\n");
1460 LLVM_DEBUG(dbgs() << "SRegs.2: "; dump_registers(SRegs, *TRI);
1461 dbgs() << "\n");
1463 // (3) Collect all registers that have at least one sub-register in SRegs,
1464 // and also have no sub-registers that are reserved. These will be the can-
1465 // didates for saving as a whole instead of their individual sub-registers.
1466 // (Saving R17:16 instead of R16 is fine, but only if R17 was not reserved.)
1467 BitVector TmpSup(Hexagon::NUM_TARGET_REGS);
1468 for (int x = SRegs.find_first(); x >= 0; x = SRegs.find_next(x)) {
1469 unsigned R = x;
1470 for (MCSuperRegIterator SR(R, TRI); SR.isValid(); ++SR)
1471 TmpSup[*SR] = true;
1473 for (int x = TmpSup.find_first(); x >= 0; x = TmpSup.find_next(x)) {
1474 unsigned R = x;
1475 for (MCSubRegIterator SR(R, TRI, true); SR.isValid(); ++SR) {
1476 if (!Reserved[*SR])
1477 continue;
1478 TmpSup[R] = false;
1479 break;
1482 LLVM_DEBUG(dbgs() << "TmpSup: "; dump_registers(TmpSup, *TRI);
1483 dbgs() << "\n");
1485 // (4) Include all super-registers found in (3) into SRegs.
1486 SRegs |= TmpSup;
1487 LLVM_DEBUG(dbgs() << "SRegs.4: "; dump_registers(SRegs, *TRI);
1488 dbgs() << "\n");
1490 // (5) For each register R in SRegs, if any super-register of R is in SRegs,
1491 // remove R from SRegs.
1492 for (int x = SRegs.find_first(); x >= 0; x = SRegs.find_next(x)) {
1493 unsigned R = x;
1494 for (MCSuperRegIterator SR(R, TRI); SR.isValid(); ++SR) {
1495 if (!SRegs[*SR])
1496 continue;
1497 SRegs[R] = false;
1498 break;
1501 LLVM_DEBUG(dbgs() << "SRegs.5: "; dump_registers(SRegs, *TRI);
1502 dbgs() << "\n");
1504 // Now, for each register that has a fixed stack slot, create the stack
1505 // object for it.
1506 CSI.clear();
1508 using SpillSlot = TargetFrameLowering::SpillSlot;
1510 unsigned NumFixed;
1511 int MinOffset = 0; // CS offsets are negative.
1512 const SpillSlot *FixedSlots = getCalleeSavedSpillSlots(NumFixed);
1513 for (const SpillSlot *S = FixedSlots; S != FixedSlots+NumFixed; ++S) {
1514 if (!SRegs[S->Reg])
1515 continue;
1516 const TargetRegisterClass *RC = TRI->getMinimalPhysRegClass(S->Reg);
1517 int FI = MFI.CreateFixedSpillStackObject(TRI->getSpillSize(*RC), S->Offset);
1518 MinOffset = std::min(MinOffset, S->Offset);
1519 CSI.push_back(CalleeSavedInfo(S->Reg, FI));
1520 SRegs[S->Reg] = false;
1523 // There can be some registers that don't have fixed slots. For example,
1524 // we need to store R0-R3 in functions with exception handling. For each
1525 // such register, create a non-fixed stack object.
1526 for (int x = SRegs.find_first(); x >= 0; x = SRegs.find_next(x)) {
1527 unsigned R = x;
1528 const TargetRegisterClass *RC = TRI->getMinimalPhysRegClass(R);
1529 unsigned Size = TRI->getSpillSize(*RC);
1530 int Off = MinOffset - Size;
1531 unsigned Align = std::min(TRI->getSpillAlignment(*RC), getStackAlignment());
1532 assert(isPowerOf2_32(Align));
1533 Off &= -Align;
1534 int FI = MFI.CreateFixedSpillStackObject(Size, Off);
1535 MinOffset = std::min(MinOffset, Off);
1536 CSI.push_back(CalleeSavedInfo(R, FI));
1537 SRegs[R] = false;
1540 LLVM_DEBUG({
1541 dbgs() << "CS information: {";
1542 for (unsigned i = 0, n = CSI.size(); i < n; ++i) {
1543 int FI = CSI[i].getFrameIdx();
1544 int Off = MFI.getObjectOffset(FI);
1545 dbgs() << ' ' << printReg(CSI[i].getReg(), TRI) << ":fi#" << FI << ":sp";
1546 if (Off >= 0)
1547 dbgs() << '+';
1548 dbgs() << Off;
1550 dbgs() << " }\n";
1553 #ifndef NDEBUG
1554 // Verify that all registers were handled.
1555 bool MissedReg = false;
1556 for (int x = SRegs.find_first(); x >= 0; x = SRegs.find_next(x)) {
1557 unsigned R = x;
1558 dbgs() << printReg(R, TRI) << ' ';
1559 MissedReg = true;
1561 if (MissedReg)
1562 llvm_unreachable("...there are unhandled callee-saved registers!");
1563 #endif
1565 return true;
1568 bool HexagonFrameLowering::expandCopy(MachineBasicBlock &B,
1569 MachineBasicBlock::iterator It, MachineRegisterInfo &MRI,
1570 const HexagonInstrInfo &HII, SmallVectorImpl<unsigned> &NewRegs) const {
1571 MachineInstr *MI = &*It;
1572 DebugLoc DL = MI->getDebugLoc();
1573 unsigned DstR = MI->getOperand(0).getReg();
1574 unsigned SrcR = MI->getOperand(1).getReg();
1575 if (!Hexagon::ModRegsRegClass.contains(DstR) ||
1576 !Hexagon::ModRegsRegClass.contains(SrcR))
1577 return false;
1579 unsigned TmpR = MRI.createVirtualRegister(&Hexagon::IntRegsRegClass);
1580 BuildMI(B, It, DL, HII.get(TargetOpcode::COPY), TmpR).add(MI->getOperand(1));
1581 BuildMI(B, It, DL, HII.get(TargetOpcode::COPY), DstR)
1582 .addReg(TmpR, RegState::Kill);
1584 NewRegs.push_back(TmpR);
1585 B.erase(It);
1586 return true;
1589 bool HexagonFrameLowering::expandStoreInt(MachineBasicBlock &B,
1590 MachineBasicBlock::iterator It, MachineRegisterInfo &MRI,
1591 const HexagonInstrInfo &HII, SmallVectorImpl<unsigned> &NewRegs) const {
1592 MachineInstr *MI = &*It;
1593 if (!MI->getOperand(0).isFI())
1594 return false;
1596 DebugLoc DL = MI->getDebugLoc();
1597 unsigned Opc = MI->getOpcode();
1598 unsigned SrcR = MI->getOperand(2).getReg();
1599 bool IsKill = MI->getOperand(2).isKill();
1600 int FI = MI->getOperand(0).getIndex();
1602 // TmpR = C2_tfrpr SrcR if SrcR is a predicate register
1603 // TmpR = A2_tfrcrr SrcR if SrcR is a modifier register
1604 unsigned TmpR = MRI.createVirtualRegister(&Hexagon::IntRegsRegClass);
1605 unsigned TfrOpc = (Opc == Hexagon::STriw_pred) ? Hexagon::C2_tfrpr
1606 : Hexagon::A2_tfrcrr;
1607 BuildMI(B, It, DL, HII.get(TfrOpc), TmpR)
1608 .addReg(SrcR, getKillRegState(IsKill));
1610 // S2_storeri_io FI, 0, TmpR
1611 BuildMI(B, It, DL, HII.get(Hexagon::S2_storeri_io))
1612 .addFrameIndex(FI)
1613 .addImm(0)
1614 .addReg(TmpR, RegState::Kill)
1615 .cloneMemRefs(*MI);
1617 NewRegs.push_back(TmpR);
1618 B.erase(It);
1619 return true;
1622 bool HexagonFrameLowering::expandLoadInt(MachineBasicBlock &B,
1623 MachineBasicBlock::iterator It, MachineRegisterInfo &MRI,
1624 const HexagonInstrInfo &HII, SmallVectorImpl<unsigned> &NewRegs) const {
1625 MachineInstr *MI = &*It;
1626 if (!MI->getOperand(1).isFI())
1627 return false;
1629 DebugLoc DL = MI->getDebugLoc();
1630 unsigned Opc = MI->getOpcode();
1631 unsigned DstR = MI->getOperand(0).getReg();
1632 int FI = MI->getOperand(1).getIndex();
1634 // TmpR = L2_loadri_io FI, 0
1635 unsigned TmpR = MRI.createVirtualRegister(&Hexagon::IntRegsRegClass);
1636 BuildMI(B, It, DL, HII.get(Hexagon::L2_loadri_io), TmpR)
1637 .addFrameIndex(FI)
1638 .addImm(0)
1639 .cloneMemRefs(*MI);
1641 // DstR = C2_tfrrp TmpR if DstR is a predicate register
1642 // DstR = A2_tfrrcr TmpR if DstR is a modifier register
1643 unsigned TfrOpc = (Opc == Hexagon::LDriw_pred) ? Hexagon::C2_tfrrp
1644 : Hexagon::A2_tfrrcr;
1645 BuildMI(B, It, DL, HII.get(TfrOpc), DstR)
1646 .addReg(TmpR, RegState::Kill);
1648 NewRegs.push_back(TmpR);
1649 B.erase(It);
1650 return true;
1653 bool HexagonFrameLowering::expandStoreVecPred(MachineBasicBlock &B,
1654 MachineBasicBlock::iterator It, MachineRegisterInfo &MRI,
1655 const HexagonInstrInfo &HII, SmallVectorImpl<unsigned> &NewRegs) const {
1656 MachineInstr *MI = &*It;
1657 if (!MI->getOperand(0).isFI())
1658 return false;
1660 DebugLoc DL = MI->getDebugLoc();
1661 unsigned SrcR = MI->getOperand(2).getReg();
1662 bool IsKill = MI->getOperand(2).isKill();
1663 int FI = MI->getOperand(0).getIndex();
1664 auto *RC = &Hexagon::HvxVRRegClass;
1666 // Insert transfer to general vector register.
1667 // TmpR0 = A2_tfrsi 0x01010101
1668 // TmpR1 = V6_vandqrt Qx, TmpR0
1669 // store FI, 0, TmpR1
1670 unsigned TmpR0 = MRI.createVirtualRegister(&Hexagon::IntRegsRegClass);
1671 unsigned TmpR1 = MRI.createVirtualRegister(RC);
1673 BuildMI(B, It, DL, HII.get(Hexagon::A2_tfrsi), TmpR0)
1674 .addImm(0x01010101);
1676 BuildMI(B, It, DL, HII.get(Hexagon::V6_vandqrt), TmpR1)
1677 .addReg(SrcR, getKillRegState(IsKill))
1678 .addReg(TmpR0, RegState::Kill);
1680 auto *HRI = B.getParent()->getSubtarget<HexagonSubtarget>().getRegisterInfo();
1681 HII.storeRegToStackSlot(B, It, TmpR1, true, FI, RC, HRI);
1682 expandStoreVec(B, std::prev(It), MRI, HII, NewRegs);
1684 NewRegs.push_back(TmpR0);
1685 NewRegs.push_back(TmpR1);
1686 B.erase(It);
1687 return true;
1690 bool HexagonFrameLowering::expandLoadVecPred(MachineBasicBlock &B,
1691 MachineBasicBlock::iterator It, MachineRegisterInfo &MRI,
1692 const HexagonInstrInfo &HII, SmallVectorImpl<unsigned> &NewRegs) const {
1693 MachineInstr *MI = &*It;
1694 if (!MI->getOperand(1).isFI())
1695 return false;
1697 DebugLoc DL = MI->getDebugLoc();
1698 unsigned DstR = MI->getOperand(0).getReg();
1699 int FI = MI->getOperand(1).getIndex();
1700 auto *RC = &Hexagon::HvxVRRegClass;
1702 // TmpR0 = A2_tfrsi 0x01010101
1703 // TmpR1 = load FI, 0
1704 // DstR = V6_vandvrt TmpR1, TmpR0
1705 unsigned TmpR0 = MRI.createVirtualRegister(&Hexagon::IntRegsRegClass);
1706 unsigned TmpR1 = MRI.createVirtualRegister(RC);
1708 BuildMI(B, It, DL, HII.get(Hexagon::A2_tfrsi), TmpR0)
1709 .addImm(0x01010101);
1710 MachineFunction &MF = *B.getParent();
1711 auto *HRI = MF.getSubtarget<HexagonSubtarget>().getRegisterInfo();
1712 HII.loadRegFromStackSlot(B, It, TmpR1, FI, RC, HRI);
1713 expandLoadVec(B, std::prev(It), MRI, HII, NewRegs);
1715 BuildMI(B, It, DL, HII.get(Hexagon::V6_vandvrt), DstR)
1716 .addReg(TmpR1, RegState::Kill)
1717 .addReg(TmpR0, RegState::Kill);
1719 NewRegs.push_back(TmpR0);
1720 NewRegs.push_back(TmpR1);
1721 B.erase(It);
1722 return true;
1725 bool HexagonFrameLowering::expandStoreVec2(MachineBasicBlock &B,
1726 MachineBasicBlock::iterator It, MachineRegisterInfo &MRI,
1727 const HexagonInstrInfo &HII, SmallVectorImpl<unsigned> &NewRegs) const {
1728 MachineFunction &MF = *B.getParent();
1729 auto &MFI = MF.getFrameInfo();
1730 auto &HRI = *MF.getSubtarget<HexagonSubtarget>().getRegisterInfo();
1731 MachineInstr *MI = &*It;
1732 if (!MI->getOperand(0).isFI())
1733 return false;
1735 // It is possible that the double vector being stored is only partially
1736 // defined. From the point of view of the liveness tracking, it is ok to
1737 // store it as a whole, but if we break it up we may end up storing a
1738 // register that is entirely undefined.
1739 LivePhysRegs LPR(HRI);
1740 LPR.addLiveIns(B);
1741 SmallVector<std::pair<MCPhysReg, const MachineOperand*>,2> Clobbers;
1742 for (auto R = B.begin(); R != It; ++R) {
1743 Clobbers.clear();
1744 LPR.stepForward(*R, Clobbers);
1747 DebugLoc DL = MI->getDebugLoc();
1748 unsigned SrcR = MI->getOperand(2).getReg();
1749 unsigned SrcLo = HRI.getSubReg(SrcR, Hexagon::vsub_lo);
1750 unsigned SrcHi = HRI.getSubReg(SrcR, Hexagon::vsub_hi);
1751 bool IsKill = MI->getOperand(2).isKill();
1752 int FI = MI->getOperand(0).getIndex();
1754 unsigned Size = HRI.getSpillSize(Hexagon::HvxVRRegClass);
1755 unsigned NeedAlign = HRI.getSpillAlignment(Hexagon::HvxVRRegClass);
1756 unsigned HasAlign = MFI.getObjectAlignment(FI);
1757 unsigned StoreOpc;
1759 // Store low part.
1760 if (LPR.contains(SrcLo)) {
1761 StoreOpc = NeedAlign <= HasAlign ? Hexagon::V6_vS32b_ai
1762 : Hexagon::V6_vS32Ub_ai;
1763 BuildMI(B, It, DL, HII.get(StoreOpc))
1764 .addFrameIndex(FI)
1765 .addImm(0)
1766 .addReg(SrcLo, getKillRegState(IsKill))
1767 .cloneMemRefs(*MI);
1770 // Store high part.
1771 if (LPR.contains(SrcHi)) {
1772 StoreOpc = NeedAlign <= MinAlign(HasAlign, Size) ? Hexagon::V6_vS32b_ai
1773 : Hexagon::V6_vS32Ub_ai;
1774 BuildMI(B, It, DL, HII.get(StoreOpc))
1775 .addFrameIndex(FI)
1776 .addImm(Size)
1777 .addReg(SrcHi, getKillRegState(IsKill))
1778 .cloneMemRefs(*MI);
1781 B.erase(It);
1782 return true;
1785 bool HexagonFrameLowering::expandLoadVec2(MachineBasicBlock &B,
1786 MachineBasicBlock::iterator It, MachineRegisterInfo &MRI,
1787 const HexagonInstrInfo &HII, SmallVectorImpl<unsigned> &NewRegs) const {
1788 MachineFunction &MF = *B.getParent();
1789 auto &MFI = MF.getFrameInfo();
1790 auto &HRI = *MF.getSubtarget<HexagonSubtarget>().getRegisterInfo();
1791 MachineInstr *MI = &*It;
1792 if (!MI->getOperand(1).isFI())
1793 return false;
1795 DebugLoc DL = MI->getDebugLoc();
1796 unsigned DstR = MI->getOperand(0).getReg();
1797 unsigned DstHi = HRI.getSubReg(DstR, Hexagon::vsub_hi);
1798 unsigned DstLo = HRI.getSubReg(DstR, Hexagon::vsub_lo);
1799 int FI = MI->getOperand(1).getIndex();
1801 unsigned Size = HRI.getSpillSize(Hexagon::HvxVRRegClass);
1802 unsigned NeedAlign = HRI.getSpillAlignment(Hexagon::HvxVRRegClass);
1803 unsigned HasAlign = MFI.getObjectAlignment(FI);
1804 unsigned LoadOpc;
1806 // Load low part.
1807 LoadOpc = NeedAlign <= HasAlign ? Hexagon::V6_vL32b_ai
1808 : Hexagon::V6_vL32Ub_ai;
1809 BuildMI(B, It, DL, HII.get(LoadOpc), DstLo)
1810 .addFrameIndex(FI)
1811 .addImm(0)
1812 .cloneMemRefs(*MI);
1814 // Load high part.
1815 LoadOpc = NeedAlign <= MinAlign(HasAlign, Size) ? Hexagon::V6_vL32b_ai
1816 : Hexagon::V6_vL32Ub_ai;
1817 BuildMI(B, It, DL, HII.get(LoadOpc), DstHi)
1818 .addFrameIndex(FI)
1819 .addImm(Size)
1820 .cloneMemRefs(*MI);
1822 B.erase(It);
1823 return true;
1826 bool HexagonFrameLowering::expandStoreVec(MachineBasicBlock &B,
1827 MachineBasicBlock::iterator It, MachineRegisterInfo &MRI,
1828 const HexagonInstrInfo &HII, SmallVectorImpl<unsigned> &NewRegs) const {
1829 MachineFunction &MF = *B.getParent();
1830 auto &MFI = MF.getFrameInfo();
1831 MachineInstr *MI = &*It;
1832 if (!MI->getOperand(0).isFI())
1833 return false;
1835 auto &HRI = *MF.getSubtarget<HexagonSubtarget>().getRegisterInfo();
1836 DebugLoc DL = MI->getDebugLoc();
1837 unsigned SrcR = MI->getOperand(2).getReg();
1838 bool IsKill = MI->getOperand(2).isKill();
1839 int FI = MI->getOperand(0).getIndex();
1841 unsigned NeedAlign = HRI.getSpillAlignment(Hexagon::HvxVRRegClass);
1842 unsigned HasAlign = MFI.getObjectAlignment(FI);
1843 unsigned StoreOpc = NeedAlign <= HasAlign ? Hexagon::V6_vS32b_ai
1844 : Hexagon::V6_vS32Ub_ai;
1845 BuildMI(B, It, DL, HII.get(StoreOpc))
1846 .addFrameIndex(FI)
1847 .addImm(0)
1848 .addReg(SrcR, getKillRegState(IsKill))
1849 .cloneMemRefs(*MI);
1851 B.erase(It);
1852 return true;
1855 bool HexagonFrameLowering::expandLoadVec(MachineBasicBlock &B,
1856 MachineBasicBlock::iterator It, MachineRegisterInfo &MRI,
1857 const HexagonInstrInfo &HII, SmallVectorImpl<unsigned> &NewRegs) const {
1858 MachineFunction &MF = *B.getParent();
1859 auto &MFI = MF.getFrameInfo();
1860 MachineInstr *MI = &*It;
1861 if (!MI->getOperand(1).isFI())
1862 return false;
1864 auto &HRI = *MF.getSubtarget<HexagonSubtarget>().getRegisterInfo();
1865 DebugLoc DL = MI->getDebugLoc();
1866 unsigned DstR = MI->getOperand(0).getReg();
1867 int FI = MI->getOperand(1).getIndex();
1869 unsigned NeedAlign = HRI.getSpillAlignment(Hexagon::HvxVRRegClass);
1870 unsigned HasAlign = MFI.getObjectAlignment(FI);
1871 unsigned LoadOpc = NeedAlign <= HasAlign ? Hexagon::V6_vL32b_ai
1872 : Hexagon::V6_vL32Ub_ai;
1873 BuildMI(B, It, DL, HII.get(LoadOpc), DstR)
1874 .addFrameIndex(FI)
1875 .addImm(0)
1876 .cloneMemRefs(*MI);
1878 B.erase(It);
1879 return true;
1882 bool HexagonFrameLowering::expandSpillMacros(MachineFunction &MF,
1883 SmallVectorImpl<unsigned> &NewRegs) const {
1884 auto &HII = *MF.getSubtarget<HexagonSubtarget>().getInstrInfo();
1885 MachineRegisterInfo &MRI = MF.getRegInfo();
1886 bool Changed = false;
1888 for (auto &B : MF) {
1889 // Traverse the basic block.
1890 MachineBasicBlock::iterator NextI;
1891 for (auto I = B.begin(), E = B.end(); I != E; I = NextI) {
1892 MachineInstr *MI = &*I;
1893 NextI = std::next(I);
1894 unsigned Opc = MI->getOpcode();
1896 switch (Opc) {
1897 case TargetOpcode::COPY:
1898 Changed |= expandCopy(B, I, MRI, HII, NewRegs);
1899 break;
1900 case Hexagon::STriw_pred:
1901 case Hexagon::STriw_ctr:
1902 Changed |= expandStoreInt(B, I, MRI, HII, NewRegs);
1903 break;
1904 case Hexagon::LDriw_pred:
1905 case Hexagon::LDriw_ctr:
1906 Changed |= expandLoadInt(B, I, MRI, HII, NewRegs);
1907 break;
1908 case Hexagon::PS_vstorerq_ai:
1909 Changed |= expandStoreVecPred(B, I, MRI, HII, NewRegs);
1910 break;
1911 case Hexagon::PS_vloadrq_ai:
1912 Changed |= expandLoadVecPred(B, I, MRI, HII, NewRegs);
1913 break;
1914 case Hexagon::PS_vloadrw_ai:
1915 case Hexagon::PS_vloadrwu_ai:
1916 Changed |= expandLoadVec2(B, I, MRI, HII, NewRegs);
1917 break;
1918 case Hexagon::PS_vstorerw_ai:
1919 case Hexagon::PS_vstorerwu_ai:
1920 Changed |= expandStoreVec2(B, I, MRI, HII, NewRegs);
1921 break;
1926 return Changed;
1929 void HexagonFrameLowering::determineCalleeSaves(MachineFunction &MF,
1930 BitVector &SavedRegs,
1931 RegScavenger *RS) const {
1932 auto &HRI = *MF.getSubtarget<HexagonSubtarget>().getRegisterInfo();
1934 SavedRegs.resize(HRI.getNumRegs());
1936 // If we have a function containing __builtin_eh_return we want to spill and
1937 // restore all callee saved registers. Pretend that they are used.
1938 if (MF.getInfo<HexagonMachineFunctionInfo>()->hasEHReturn())
1939 for (const MCPhysReg *R = HRI.getCalleeSavedRegs(&MF); *R; ++R)
1940 SavedRegs.set(*R);
1942 // Replace predicate register pseudo spill code.
1943 SmallVector<unsigned,8> NewRegs;
1944 expandSpillMacros(MF, NewRegs);
1945 if (OptimizeSpillSlots && !isOptNone(MF))
1946 optimizeSpillSlots(MF, NewRegs);
1948 // We need to reserve a spill slot if scavenging could potentially require
1949 // spilling a scavenged register.
1950 if (!NewRegs.empty() || mayOverflowFrameOffset(MF)) {
1951 MachineFrameInfo &MFI = MF.getFrameInfo();
1952 MachineRegisterInfo &MRI = MF.getRegInfo();
1953 SetVector<const TargetRegisterClass*> SpillRCs;
1954 // Reserve an int register in any case, because it could be used to hold
1955 // the stack offset in case it does not fit into a spill instruction.
1956 SpillRCs.insert(&Hexagon::IntRegsRegClass);
1958 for (unsigned VR : NewRegs)
1959 SpillRCs.insert(MRI.getRegClass(VR));
1961 for (auto *RC : SpillRCs) {
1962 if (!needToReserveScavengingSpillSlots(MF, HRI, RC))
1963 continue;
1964 unsigned Num = RC == &Hexagon::IntRegsRegClass ? NumberScavengerSlots : 1;
1965 unsigned S = HRI.getSpillSize(*RC), A = HRI.getSpillAlignment(*RC);
1966 for (unsigned i = 0; i < Num; i++) {
1967 int NewFI = MFI.CreateSpillStackObject(S, A);
1968 RS->addScavengingFrameIndex(NewFI);
1973 TargetFrameLowering::determineCalleeSaves(MF, SavedRegs, RS);
1976 unsigned HexagonFrameLowering::findPhysReg(MachineFunction &MF,
1977 HexagonBlockRanges::IndexRange &FIR,
1978 HexagonBlockRanges::InstrIndexMap &IndexMap,
1979 HexagonBlockRanges::RegToRangeMap &DeadMap,
1980 const TargetRegisterClass *RC) const {
1981 auto &HRI = *MF.getSubtarget<HexagonSubtarget>().getRegisterInfo();
1982 auto &MRI = MF.getRegInfo();
1984 auto isDead = [&FIR,&DeadMap] (unsigned Reg) -> bool {
1985 auto F = DeadMap.find({Reg,0});
1986 if (F == DeadMap.end())
1987 return false;
1988 for (auto &DR : F->second)
1989 if (DR.contains(FIR))
1990 return true;
1991 return false;
1994 for (unsigned Reg : RC->getRawAllocationOrder(MF)) {
1995 bool Dead = true;
1996 for (auto R : HexagonBlockRanges::expandToSubRegs({Reg,0}, MRI, HRI)) {
1997 if (isDead(R.Reg))
1998 continue;
1999 Dead = false;
2000 break;
2002 if (Dead)
2003 return Reg;
2005 return 0;
2008 void HexagonFrameLowering::optimizeSpillSlots(MachineFunction &MF,
2009 SmallVectorImpl<unsigned> &VRegs) const {
2010 auto &HST = MF.getSubtarget<HexagonSubtarget>();
2011 auto &HII = *HST.getInstrInfo();
2012 auto &HRI = *HST.getRegisterInfo();
2013 auto &MRI = MF.getRegInfo();
2014 HexagonBlockRanges HBR(MF);
2016 using BlockIndexMap =
2017 std::map<MachineBasicBlock *, HexagonBlockRanges::InstrIndexMap>;
2018 using BlockRangeMap =
2019 std::map<MachineBasicBlock *, HexagonBlockRanges::RangeList>;
2020 using IndexType = HexagonBlockRanges::IndexType;
2022 struct SlotInfo {
2023 BlockRangeMap Map;
2024 unsigned Size = 0;
2025 const TargetRegisterClass *RC = nullptr;
2027 SlotInfo() = default;
2030 BlockIndexMap BlockIndexes;
2031 SmallSet<int,4> BadFIs;
2032 std::map<int,SlotInfo> FIRangeMap;
2034 // Accumulate register classes: get a common class for a pre-existing
2035 // class HaveRC and a new class NewRC. Return nullptr if a common class
2036 // cannot be found, otherwise return the resulting class. If HaveRC is
2037 // nullptr, assume that it is still unset.
2038 auto getCommonRC =
2039 [](const TargetRegisterClass *HaveRC,
2040 const TargetRegisterClass *NewRC) -> const TargetRegisterClass * {
2041 if (HaveRC == nullptr || HaveRC == NewRC)
2042 return NewRC;
2043 // Different classes, both non-null. Pick the more general one.
2044 if (HaveRC->hasSubClassEq(NewRC))
2045 return HaveRC;
2046 if (NewRC->hasSubClassEq(HaveRC))
2047 return NewRC;
2048 return nullptr;
2051 // Scan all blocks in the function. Check all occurrences of frame indexes,
2052 // and collect relevant information.
2053 for (auto &B : MF) {
2054 std::map<int,IndexType> LastStore, LastLoad;
2055 // Emplace appears not to be supported in gcc 4.7.2-4.
2056 //auto P = BlockIndexes.emplace(&B, HexagonBlockRanges::InstrIndexMap(B));
2057 auto P = BlockIndexes.insert(
2058 std::make_pair(&B, HexagonBlockRanges::InstrIndexMap(B)));
2059 auto &IndexMap = P.first->second;
2060 LLVM_DEBUG(dbgs() << "Index map for " << printMBBReference(B) << "\n"
2061 << IndexMap << '\n');
2063 for (auto &In : B) {
2064 int LFI, SFI;
2065 bool Load = HII.isLoadFromStackSlot(In, LFI) && !HII.isPredicated(In);
2066 bool Store = HII.isStoreToStackSlot(In, SFI) && !HII.isPredicated(In);
2067 if (Load && Store) {
2068 // If it's both a load and a store, then we won't handle it.
2069 BadFIs.insert(LFI);
2070 BadFIs.insert(SFI);
2071 continue;
2073 // Check for register classes of the register used as the source for
2074 // the store, and the register used as the destination for the load.
2075 // Also, only accept base+imm_offset addressing modes. Other addressing
2076 // modes can have side-effects (post-increments, etc.). For stack
2077 // slots they are very unlikely, so there is not much loss due to
2078 // this restriction.
2079 if (Load || Store) {
2080 int TFI = Load ? LFI : SFI;
2081 unsigned AM = HII.getAddrMode(In);
2082 SlotInfo &SI = FIRangeMap[TFI];
2083 bool Bad = (AM != HexagonII::BaseImmOffset);
2084 if (!Bad) {
2085 // If the addressing mode is ok, check the register class.
2086 unsigned OpNum = Load ? 0 : 2;
2087 auto *RC = HII.getRegClass(In.getDesc(), OpNum, &HRI, MF);
2088 RC = getCommonRC(SI.RC, RC);
2089 if (RC == nullptr)
2090 Bad = true;
2091 else
2092 SI.RC = RC;
2094 if (!Bad) {
2095 // Check sizes.
2096 unsigned S = HII.getMemAccessSize(In);
2097 if (SI.Size != 0 && SI.Size != S)
2098 Bad = true;
2099 else
2100 SI.Size = S;
2102 if (!Bad) {
2103 for (auto *Mo : In.memoperands()) {
2104 if (!Mo->isVolatile() && !Mo->isAtomic())
2105 continue;
2106 Bad = true;
2107 break;
2110 if (Bad)
2111 BadFIs.insert(TFI);
2114 // Locate uses of frame indices.
2115 for (unsigned i = 0, n = In.getNumOperands(); i < n; ++i) {
2116 const MachineOperand &Op = In.getOperand(i);
2117 if (!Op.isFI())
2118 continue;
2119 int FI = Op.getIndex();
2120 // Make sure that the following operand is an immediate and that
2121 // it is 0. This is the offset in the stack object.
2122 if (i+1 >= n || !In.getOperand(i+1).isImm() ||
2123 In.getOperand(i+1).getImm() != 0)
2124 BadFIs.insert(FI);
2125 if (BadFIs.count(FI))
2126 continue;
2128 IndexType Index = IndexMap.getIndex(&In);
2129 if (Load) {
2130 if (LastStore[FI] == IndexType::None)
2131 LastStore[FI] = IndexType::Entry;
2132 LastLoad[FI] = Index;
2133 } else if (Store) {
2134 HexagonBlockRanges::RangeList &RL = FIRangeMap[FI].Map[&B];
2135 if (LastStore[FI] != IndexType::None)
2136 RL.add(LastStore[FI], LastLoad[FI], false, false);
2137 else if (LastLoad[FI] != IndexType::None)
2138 RL.add(IndexType::Entry, LastLoad[FI], false, false);
2139 LastLoad[FI] = IndexType::None;
2140 LastStore[FI] = Index;
2141 } else {
2142 BadFIs.insert(FI);
2147 for (auto &I : LastLoad) {
2148 IndexType LL = I.second;
2149 if (LL == IndexType::None)
2150 continue;
2151 auto &RL = FIRangeMap[I.first].Map[&B];
2152 IndexType &LS = LastStore[I.first];
2153 if (LS != IndexType::None)
2154 RL.add(LS, LL, false, false);
2155 else
2156 RL.add(IndexType::Entry, LL, false, false);
2157 LS = IndexType::None;
2159 for (auto &I : LastStore) {
2160 IndexType LS = I.second;
2161 if (LS == IndexType::None)
2162 continue;
2163 auto &RL = FIRangeMap[I.first].Map[&B];
2164 RL.add(LS, IndexType::None, false, false);
2168 LLVM_DEBUG({
2169 for (auto &P : FIRangeMap) {
2170 dbgs() << "fi#" << P.first;
2171 if (BadFIs.count(P.first))
2172 dbgs() << " (bad)";
2173 dbgs() << " RC: ";
2174 if (P.second.RC != nullptr)
2175 dbgs() << HRI.getRegClassName(P.second.RC) << '\n';
2176 else
2177 dbgs() << "<null>\n";
2178 for (auto &R : P.second.Map)
2179 dbgs() << " " << printMBBReference(*R.first) << " { " << R.second
2180 << "}\n";
2184 // When a slot is loaded from in a block without being stored to in the
2185 // same block, it is live-on-entry to this block. To avoid CFG analysis,
2186 // consider this slot to be live-on-exit from all blocks.
2187 SmallSet<int,4> LoxFIs;
2189 std::map<MachineBasicBlock*,std::vector<int>> BlockFIMap;
2191 for (auto &P : FIRangeMap) {
2192 // P = pair(FI, map: BB->RangeList)
2193 if (BadFIs.count(P.first))
2194 continue;
2195 for (auto &B : MF) {
2196 auto F = P.second.Map.find(&B);
2197 // F = pair(BB, RangeList)
2198 if (F == P.second.Map.end() || F->second.empty())
2199 continue;
2200 HexagonBlockRanges::IndexRange &IR = F->second.front();
2201 if (IR.start() == IndexType::Entry)
2202 LoxFIs.insert(P.first);
2203 BlockFIMap[&B].push_back(P.first);
2207 LLVM_DEBUG({
2208 dbgs() << "Block-to-FI map (* -- live-on-exit):\n";
2209 for (auto &P : BlockFIMap) {
2210 auto &FIs = P.second;
2211 if (FIs.empty())
2212 continue;
2213 dbgs() << " " << printMBBReference(*P.first) << ": {";
2214 for (auto I : FIs) {
2215 dbgs() << " fi#" << I;
2216 if (LoxFIs.count(I))
2217 dbgs() << '*';
2219 dbgs() << " }\n";
2223 #ifndef NDEBUG
2224 bool HasOptLimit = SpillOptMax.getPosition();
2225 #endif
2227 // eliminate loads, when all loads eliminated, eliminate all stores.
2228 for (auto &B : MF) {
2229 auto F = BlockIndexes.find(&B);
2230 assert(F != BlockIndexes.end());
2231 HexagonBlockRanges::InstrIndexMap &IM = F->second;
2232 HexagonBlockRanges::RegToRangeMap LM = HBR.computeLiveMap(IM);
2233 HexagonBlockRanges::RegToRangeMap DM = HBR.computeDeadMap(IM, LM);
2234 LLVM_DEBUG(dbgs() << printMBBReference(B) << " dead map\n"
2235 << HexagonBlockRanges::PrintRangeMap(DM, HRI));
2237 for (auto FI : BlockFIMap[&B]) {
2238 if (BadFIs.count(FI))
2239 continue;
2240 LLVM_DEBUG(dbgs() << "Working on fi#" << FI << '\n');
2241 HexagonBlockRanges::RangeList &RL = FIRangeMap[FI].Map[&B];
2242 for (auto &Range : RL) {
2243 LLVM_DEBUG(dbgs() << "--Examining range:" << RL << '\n');
2244 if (!IndexType::isInstr(Range.start()) ||
2245 !IndexType::isInstr(Range.end()))
2246 continue;
2247 MachineInstr &SI = *IM.getInstr(Range.start());
2248 MachineInstr &EI = *IM.getInstr(Range.end());
2249 assert(SI.mayStore() && "Unexpected start instruction");
2250 assert(EI.mayLoad() && "Unexpected end instruction");
2251 MachineOperand &SrcOp = SI.getOperand(2);
2253 HexagonBlockRanges::RegisterRef SrcRR = { SrcOp.getReg(),
2254 SrcOp.getSubReg() };
2255 auto *RC = HII.getRegClass(SI.getDesc(), 2, &HRI, MF);
2256 // The this-> is needed to unconfuse MSVC.
2257 unsigned FoundR = this->findPhysReg(MF, Range, IM, DM, RC);
2258 LLVM_DEBUG(dbgs() << "Replacement reg:" << printReg(FoundR, &HRI)
2259 << '\n');
2260 if (FoundR == 0)
2261 continue;
2262 #ifndef NDEBUG
2263 if (HasOptLimit) {
2264 if (SpillOptCount >= SpillOptMax)
2265 return;
2266 SpillOptCount++;
2268 #endif
2270 // Generate the copy-in: "FoundR = COPY SrcR" at the store location.
2271 MachineBasicBlock::iterator StartIt = SI.getIterator(), NextIt;
2272 MachineInstr *CopyIn = nullptr;
2273 if (SrcRR.Reg != FoundR || SrcRR.Sub != 0) {
2274 const DebugLoc &DL = SI.getDebugLoc();
2275 CopyIn = BuildMI(B, StartIt, DL, HII.get(TargetOpcode::COPY), FoundR)
2276 .add(SrcOp);
2279 ++StartIt;
2280 // Check if this is a last store and the FI is live-on-exit.
2281 if (LoxFIs.count(FI) && (&Range == &RL.back())) {
2282 // Update store's source register.
2283 if (unsigned SR = SrcOp.getSubReg())
2284 SrcOp.setReg(HRI.getSubReg(FoundR, SR));
2285 else
2286 SrcOp.setReg(FoundR);
2287 SrcOp.setSubReg(0);
2288 // We are keeping this register live.
2289 SrcOp.setIsKill(false);
2290 } else {
2291 B.erase(&SI);
2292 IM.replaceInstr(&SI, CopyIn);
2295 auto EndIt = std::next(EI.getIterator());
2296 for (auto It = StartIt; It != EndIt; It = NextIt) {
2297 MachineInstr &MI = *It;
2298 NextIt = std::next(It);
2299 int TFI;
2300 if (!HII.isLoadFromStackSlot(MI, TFI) || TFI != FI)
2301 continue;
2302 unsigned DstR = MI.getOperand(0).getReg();
2303 assert(MI.getOperand(0).getSubReg() == 0);
2304 MachineInstr *CopyOut = nullptr;
2305 if (DstR != FoundR) {
2306 DebugLoc DL = MI.getDebugLoc();
2307 unsigned MemSize = HII.getMemAccessSize(MI);
2308 assert(HII.getAddrMode(MI) == HexagonII::BaseImmOffset);
2309 unsigned CopyOpc = TargetOpcode::COPY;
2310 if (HII.isSignExtendingLoad(MI))
2311 CopyOpc = (MemSize == 1) ? Hexagon::A2_sxtb : Hexagon::A2_sxth;
2312 else if (HII.isZeroExtendingLoad(MI))
2313 CopyOpc = (MemSize == 1) ? Hexagon::A2_zxtb : Hexagon::A2_zxth;
2314 CopyOut = BuildMI(B, It, DL, HII.get(CopyOpc), DstR)
2315 .addReg(FoundR, getKillRegState(&MI == &EI));
2317 IM.replaceInstr(&MI, CopyOut);
2318 B.erase(It);
2321 // Update the dead map.
2322 HexagonBlockRanges::RegisterRef FoundRR = { FoundR, 0 };
2323 for (auto RR : HexagonBlockRanges::expandToSubRegs(FoundRR, MRI, HRI))
2324 DM[RR].subtract(Range);
2325 } // for Range in range list
2330 void HexagonFrameLowering::expandAlloca(MachineInstr *AI,
2331 const HexagonInstrInfo &HII, unsigned SP, unsigned CF) const {
2332 MachineBasicBlock &MB = *AI->getParent();
2333 DebugLoc DL = AI->getDebugLoc();
2334 unsigned A = AI->getOperand(2).getImm();
2336 // Have
2337 // Rd = alloca Rs, #A
2339 // If Rs and Rd are different registers, use this sequence:
2340 // Rd = sub(r29, Rs)
2341 // r29 = sub(r29, Rs)
2342 // Rd = and(Rd, #-A) ; if necessary
2343 // r29 = and(r29, #-A) ; if necessary
2344 // Rd = add(Rd, #CF) ; CF size aligned to at most A
2345 // otherwise, do
2346 // Rd = sub(r29, Rs)
2347 // Rd = and(Rd, #-A) ; if necessary
2348 // r29 = Rd
2349 // Rd = add(Rd, #CF) ; CF size aligned to at most A
2351 MachineOperand &RdOp = AI->getOperand(0);
2352 MachineOperand &RsOp = AI->getOperand(1);
2353 unsigned Rd = RdOp.getReg(), Rs = RsOp.getReg();
2355 // Rd = sub(r29, Rs)
2356 BuildMI(MB, AI, DL, HII.get(Hexagon::A2_sub), Rd)
2357 .addReg(SP)
2358 .addReg(Rs);
2359 if (Rs != Rd) {
2360 // r29 = sub(r29, Rs)
2361 BuildMI(MB, AI, DL, HII.get(Hexagon::A2_sub), SP)
2362 .addReg(SP)
2363 .addReg(Rs);
2365 if (A > 8) {
2366 // Rd = and(Rd, #-A)
2367 BuildMI(MB, AI, DL, HII.get(Hexagon::A2_andir), Rd)
2368 .addReg(Rd)
2369 .addImm(-int64_t(A));
2370 if (Rs != Rd)
2371 BuildMI(MB, AI, DL, HII.get(Hexagon::A2_andir), SP)
2372 .addReg(SP)
2373 .addImm(-int64_t(A));
2375 if (Rs == Rd) {
2376 // r29 = Rd
2377 BuildMI(MB, AI, DL, HII.get(TargetOpcode::COPY), SP)
2378 .addReg(Rd);
2380 if (CF > 0) {
2381 // Rd = add(Rd, #CF)
2382 BuildMI(MB, AI, DL, HII.get(Hexagon::A2_addi), Rd)
2383 .addReg(Rd)
2384 .addImm(CF);
2388 bool HexagonFrameLowering::needsAligna(const MachineFunction &MF) const {
2389 const MachineFrameInfo &MFI = MF.getFrameInfo();
2390 if (!MFI.hasVarSizedObjects())
2391 return false;
2392 unsigned MaxA = MFI.getMaxAlignment();
2393 if (MaxA <= getStackAlignment())
2394 return false;
2395 return true;
2398 const MachineInstr *HexagonFrameLowering::getAlignaInstr(
2399 const MachineFunction &MF) const {
2400 for (auto &B : MF)
2401 for (auto &I : B)
2402 if (I.getOpcode() == Hexagon::PS_aligna)
2403 return &I;
2404 return nullptr;
2407 /// Adds all callee-saved registers as implicit uses or defs to the
2408 /// instruction.
2409 void HexagonFrameLowering::addCalleeSaveRegistersAsImpOperand(MachineInstr *MI,
2410 const CSIVect &CSI, bool IsDef, bool IsKill) const {
2411 // Add the callee-saved registers as implicit uses.
2412 for (auto &R : CSI)
2413 MI->addOperand(MachineOperand::CreateReg(R.getReg(), IsDef, true, IsKill));
2416 /// Determine whether the callee-saved register saves and restores should
2417 /// be generated via inline code. If this function returns "true", inline
2418 /// code will be generated. If this function returns "false", additional
2419 /// checks are performed, which may still lead to the inline code.
2420 bool HexagonFrameLowering::shouldInlineCSR(const MachineFunction &MF,
2421 const CSIVect &CSI) const {
2422 if (MF.getInfo<HexagonMachineFunctionInfo>()->hasEHReturn())
2423 return true;
2424 if (!hasFP(MF))
2425 return true;
2426 if (!isOptSize(MF) && !isMinSize(MF))
2427 if (MF.getTarget().getOptLevel() > CodeGenOpt::Default)
2428 return true;
2430 // Check if CSI only has double registers, and if the registers form
2431 // a contiguous block starting from D8.
2432 BitVector Regs(Hexagon::NUM_TARGET_REGS);
2433 for (unsigned i = 0, n = CSI.size(); i < n; ++i) {
2434 unsigned R = CSI[i].getReg();
2435 if (!Hexagon::DoubleRegsRegClass.contains(R))
2436 return true;
2437 Regs[R] = true;
2439 int F = Regs.find_first();
2440 if (F != Hexagon::D8)
2441 return true;
2442 while (F >= 0) {
2443 int N = Regs.find_next(F);
2444 if (N >= 0 && N != F+1)
2445 return true;
2446 F = N;
2449 return false;
2452 bool HexagonFrameLowering::useSpillFunction(const MachineFunction &MF,
2453 const CSIVect &CSI) const {
2454 if (shouldInlineCSR(MF, CSI))
2455 return false;
2456 unsigned NumCSI = CSI.size();
2457 if (NumCSI <= 1)
2458 return false;
2460 unsigned Threshold = isOptSize(MF) ? SpillFuncThresholdOs
2461 : SpillFuncThreshold;
2462 return Threshold < NumCSI;
2465 bool HexagonFrameLowering::useRestoreFunction(const MachineFunction &MF,
2466 const CSIVect &CSI) const {
2467 if (shouldInlineCSR(MF, CSI))
2468 return false;
2469 // The restore functions do a bit more than just restoring registers.
2470 // The non-returning versions will go back directly to the caller's
2471 // caller, others will clean up the stack frame in preparation for
2472 // a tail call. Using them can still save code size even if only one
2473 // register is getting restores. Make the decision based on -Oz:
2474 // using -Os will use inline restore for a single register.
2475 if (isMinSize(MF))
2476 return true;
2477 unsigned NumCSI = CSI.size();
2478 if (NumCSI <= 1)
2479 return false;
2481 unsigned Threshold = isOptSize(MF) ? SpillFuncThresholdOs-1
2482 : SpillFuncThreshold;
2483 return Threshold < NumCSI;
2486 bool HexagonFrameLowering::mayOverflowFrameOffset(MachineFunction &MF) const {
2487 unsigned StackSize = MF.getFrameInfo().estimateStackSize(MF);
2488 auto &HST = MF.getSubtarget<HexagonSubtarget>();
2489 // A fairly simplistic guess as to whether a potential load/store to a
2490 // stack location could require an extra register.
2491 if (HST.useHVXOps() && StackSize > 256)
2492 return true;
2494 // Check if the function has store-immediate instructions that access
2495 // the stack. Since the offset field is not extendable, if the stack
2496 // size exceeds the offset limit (6 bits, shifted), the stores will
2497 // require a new base register.
2498 bool HasImmStack = false;
2499 unsigned MinLS = ~0u; // Log_2 of the memory access size.
2501 for (const MachineBasicBlock &B : MF) {
2502 for (const MachineInstr &MI : B) {
2503 unsigned LS = 0;
2504 switch (MI.getOpcode()) {
2505 case Hexagon::S4_storeirit_io:
2506 case Hexagon::S4_storeirif_io:
2507 case Hexagon::S4_storeiri_io:
2508 ++LS;
2509 LLVM_FALLTHROUGH;
2510 case Hexagon::S4_storeirht_io:
2511 case Hexagon::S4_storeirhf_io:
2512 case Hexagon::S4_storeirh_io:
2513 ++LS;
2514 LLVM_FALLTHROUGH;
2515 case Hexagon::S4_storeirbt_io:
2516 case Hexagon::S4_storeirbf_io:
2517 case Hexagon::S4_storeirb_io:
2518 if (MI.getOperand(0).isFI())
2519 HasImmStack = true;
2520 MinLS = std::min(MinLS, LS);
2521 break;
2526 if (HasImmStack)
2527 return !isUInt<6>(StackSize >> MinLS);
2529 return false;