[AMDGPU] Check for immediate SrcC in mfma in AsmParser
[llvm-core.git] / lib / Target / XCore / XCoreFrameLowering.cpp
blobadc9c959e0ecada5a20ee735cf6c585d5308360f
1 //===-- XCoreFrameLowering.cpp - Frame info for XCore Target --------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This file contains XCore frame information that doesn't fit anywhere else
10 // cleanly...
12 //===----------------------------------------------------------------------===//
14 #include "XCoreFrameLowering.h"
15 #include "XCore.h"
16 #include "XCoreInstrInfo.h"
17 #include "XCoreMachineFunctionInfo.h"
18 #include "XCoreSubtarget.h"
19 #include "llvm/CodeGen/MachineFrameInfo.h"
20 #include "llvm/CodeGen/MachineFunction.h"
21 #include "llvm/CodeGen/MachineInstrBuilder.h"
22 #include "llvm/CodeGen/MachineModuleInfo.h"
23 #include "llvm/CodeGen/MachineRegisterInfo.h"
24 #include "llvm/CodeGen/RegisterScavenging.h"
25 #include "llvm/CodeGen/TargetLowering.h"
26 #include "llvm/IR/DataLayout.h"
27 #include "llvm/IR/Function.h"
28 #include "llvm/Support/ErrorHandling.h"
29 #include "llvm/Target/TargetOptions.h"
30 #include <algorithm> // std::sort
32 using namespace llvm;
34 static const unsigned FramePtr = XCore::R10;
35 static const int MaxImmU16 = (1<<16) - 1;
37 // helper functions. FIXME: Eliminate.
38 static inline bool isImmU6(unsigned val) {
39 return val < (1 << 6);
42 static inline bool isImmU16(unsigned val) {
43 return val < (1 << 16);
46 // Helper structure with compare function for handling stack slots.
47 namespace {
48 struct StackSlotInfo {
49 int FI;
50 int Offset;
51 unsigned Reg;
52 StackSlotInfo(int f, int o, int r) : FI(f), Offset(o), Reg(r){};
54 } // end anonymous namespace
56 static bool CompareSSIOffset(const StackSlotInfo& a, const StackSlotInfo& b) {
57 return a.Offset < b.Offset;
60 static void EmitDefCfaRegister(MachineBasicBlock &MBB,
61 MachineBasicBlock::iterator MBBI,
62 const DebugLoc &dl, const TargetInstrInfo &TII,
63 MachineFunction &MF, unsigned DRegNum) {
64 unsigned CFIIndex = MF.addFrameInst(
65 MCCFIInstruction::createDefCfaRegister(nullptr, DRegNum));
66 BuildMI(MBB, MBBI, dl, TII.get(TargetOpcode::CFI_INSTRUCTION))
67 .addCFIIndex(CFIIndex);
70 static void EmitDefCfaOffset(MachineBasicBlock &MBB,
71 MachineBasicBlock::iterator MBBI,
72 const DebugLoc &dl, const TargetInstrInfo &TII,
73 int Offset) {
74 MachineFunction &MF = *MBB.getParent();
75 unsigned CFIIndex =
76 MF.addFrameInst(MCCFIInstruction::createDefCfaOffset(nullptr, -Offset));
77 BuildMI(MBB, MBBI, dl, TII.get(TargetOpcode::CFI_INSTRUCTION))
78 .addCFIIndex(CFIIndex);
81 static void EmitCfiOffset(MachineBasicBlock &MBB,
82 MachineBasicBlock::iterator MBBI, const DebugLoc &dl,
83 const TargetInstrInfo &TII, unsigned DRegNum,
84 int Offset) {
85 MachineFunction &MF = *MBB.getParent();
86 unsigned CFIIndex = MF.addFrameInst(
87 MCCFIInstruction::createOffset(nullptr, DRegNum, Offset));
88 BuildMI(MBB, MBBI, dl, TII.get(TargetOpcode::CFI_INSTRUCTION))
89 .addCFIIndex(CFIIndex);
92 /// The SP register is moved in steps of 'MaxImmU16' towards the bottom of the
93 /// frame. During these steps, it may be necessary to spill registers.
94 /// IfNeededExtSP emits the necessary EXTSP instructions to move the SP only
95 /// as far as to make 'OffsetFromBottom' reachable using an STWSP_lru6.
96 /// \param OffsetFromTop the spill offset from the top of the frame.
97 /// \param [in,out] Adjusted the current SP offset from the top of the frame.
98 static void IfNeededExtSP(MachineBasicBlock &MBB,
99 MachineBasicBlock::iterator MBBI, const DebugLoc &dl,
100 const TargetInstrInfo &TII, int OffsetFromTop,
101 int &Adjusted, int FrameSize, bool emitFrameMoves) {
102 while (OffsetFromTop > Adjusted) {
103 assert(Adjusted < FrameSize && "OffsetFromTop is beyond FrameSize");
104 int remaining = FrameSize - Adjusted;
105 int OpImm = (remaining > MaxImmU16) ? MaxImmU16 : remaining;
106 int Opcode = isImmU6(OpImm) ? XCore::EXTSP_u6 : XCore::EXTSP_lu6;
107 BuildMI(MBB, MBBI, dl, TII.get(Opcode)).addImm(OpImm);
108 Adjusted += OpImm;
109 if (emitFrameMoves)
110 EmitDefCfaOffset(MBB, MBBI, dl, TII, Adjusted*4);
114 /// The SP register is moved in steps of 'MaxImmU16' towards the top of the
115 /// frame. During these steps, it may be necessary to re-load registers.
116 /// IfNeededLDAWSP emits the necessary LDAWSP instructions to move the SP only
117 /// as far as to make 'OffsetFromTop' reachable using an LDAWSP_lru6.
118 /// \param OffsetFromTop the spill offset from the top of the frame.
119 /// \param [in,out] RemainingAdj the current SP offset from the top of the
120 /// frame.
121 static void IfNeededLDAWSP(MachineBasicBlock &MBB,
122 MachineBasicBlock::iterator MBBI, const DebugLoc &dl,
123 const TargetInstrInfo &TII, int OffsetFromTop,
124 int &RemainingAdj) {
125 while (OffsetFromTop < RemainingAdj - MaxImmU16) {
126 assert(RemainingAdj && "OffsetFromTop is beyond FrameSize");
127 int OpImm = (RemainingAdj > MaxImmU16) ? MaxImmU16 : RemainingAdj;
128 int Opcode = isImmU6(OpImm) ? XCore::LDAWSP_ru6 : XCore::LDAWSP_lru6;
129 BuildMI(MBB, MBBI, dl, TII.get(Opcode), XCore::SP).addImm(OpImm);
130 RemainingAdj -= OpImm;
134 /// Creates an ordered list of registers that are spilled
135 /// during the emitPrologue/emitEpilogue.
136 /// Registers are ordered according to their frame offset.
137 /// As offsets are negative, the largest offsets will be first.
138 static void GetSpillList(SmallVectorImpl<StackSlotInfo> &SpillList,
139 MachineFrameInfo &MFI, XCoreFunctionInfo *XFI,
140 bool fetchLR, bool fetchFP) {
141 if (fetchLR) {
142 int Offset = MFI.getObjectOffset(XFI->getLRSpillSlot());
143 SpillList.push_back(StackSlotInfo(XFI->getLRSpillSlot(),
144 Offset,
145 XCore::LR));
147 if (fetchFP) {
148 int Offset = MFI.getObjectOffset(XFI->getFPSpillSlot());
149 SpillList.push_back(StackSlotInfo(XFI->getFPSpillSlot(),
150 Offset,
151 FramePtr));
153 llvm::sort(SpillList, CompareSSIOffset);
156 /// Creates an ordered list of EH info register 'spills'.
157 /// These slots are only used by the unwinder and calls to llvm.eh.return().
158 /// Registers are ordered according to their frame offset.
159 /// As offsets are negative, the largest offsets will be first.
160 static void GetEHSpillList(SmallVectorImpl<StackSlotInfo> &SpillList,
161 MachineFrameInfo &MFI, XCoreFunctionInfo *XFI,
162 const Constant *PersonalityFn,
163 const TargetLowering *TL) {
164 assert(XFI->hasEHSpillSlot() && "There are no EH register spill slots");
165 const int *EHSlot = XFI->getEHSpillSlot();
166 SpillList.push_back(
167 StackSlotInfo(EHSlot[0], MFI.getObjectOffset(EHSlot[0]),
168 TL->getExceptionPointerRegister(PersonalityFn)));
169 SpillList.push_back(
170 StackSlotInfo(EHSlot[0], MFI.getObjectOffset(EHSlot[1]),
171 TL->getExceptionSelectorRegister(PersonalityFn)));
172 llvm::sort(SpillList, CompareSSIOffset);
175 static MachineMemOperand *getFrameIndexMMO(MachineBasicBlock &MBB,
176 int FrameIndex,
177 MachineMemOperand::Flags flags) {
178 MachineFunction *MF = MBB.getParent();
179 const MachineFrameInfo &MFI = MF->getFrameInfo();
180 MachineMemOperand *MMO = MF->getMachineMemOperand(
181 MachinePointerInfo::getFixedStack(*MF, FrameIndex), flags,
182 MFI.getObjectSize(FrameIndex), MFI.getObjectAlignment(FrameIndex));
183 return MMO;
187 /// Restore clobbered registers with their spill slot value.
188 /// The SP will be adjusted at the same time, thus the SpillList must be ordered
189 /// with the largest (negative) offsets first.
190 static void RestoreSpillList(MachineBasicBlock &MBB,
191 MachineBasicBlock::iterator MBBI,
192 const DebugLoc &dl, const TargetInstrInfo &TII,
193 int &RemainingAdj,
194 SmallVectorImpl<StackSlotInfo> &SpillList) {
195 for (unsigned i = 0, e = SpillList.size(); i != e; ++i) {
196 assert(SpillList[i].Offset % 4 == 0 && "Misaligned stack offset");
197 assert(SpillList[i].Offset <= 0 && "Unexpected positive stack offset");
198 int OffsetFromTop = - SpillList[i].Offset/4;
199 IfNeededLDAWSP(MBB, MBBI, dl, TII, OffsetFromTop, RemainingAdj);
200 int Offset = RemainingAdj - OffsetFromTop;
201 int Opcode = isImmU6(Offset) ? XCore::LDWSP_ru6 : XCore::LDWSP_lru6;
202 BuildMI(MBB, MBBI, dl, TII.get(Opcode), SpillList[i].Reg)
203 .addImm(Offset)
204 .addMemOperand(getFrameIndexMMO(MBB, SpillList[i].FI,
205 MachineMemOperand::MOLoad));
209 //===----------------------------------------------------------------------===//
210 // XCoreFrameLowering:
211 //===----------------------------------------------------------------------===//
213 XCoreFrameLowering::XCoreFrameLowering(const XCoreSubtarget &sti)
214 : TargetFrameLowering(TargetFrameLowering::StackGrowsDown, 4, 0) {
215 // Do nothing
218 bool XCoreFrameLowering::hasFP(const MachineFunction &MF) const {
219 return MF.getTarget().Options.DisableFramePointerElim(MF) ||
220 MF.getFrameInfo().hasVarSizedObjects();
223 void XCoreFrameLowering::emitPrologue(MachineFunction &MF,
224 MachineBasicBlock &MBB) const {
225 assert(&MF.front() == &MBB && "Shrink-wrapping not yet supported");
226 MachineBasicBlock::iterator MBBI = MBB.begin();
227 MachineFrameInfo &MFI = MF.getFrameInfo();
228 MachineModuleInfo *MMI = &MF.getMMI();
229 const MCRegisterInfo *MRI = MMI->getContext().getRegisterInfo();
230 const XCoreInstrInfo &TII = *MF.getSubtarget<XCoreSubtarget>().getInstrInfo();
231 XCoreFunctionInfo *XFI = MF.getInfo<XCoreFunctionInfo>();
232 // Debug location must be unknown since the first debug location is used
233 // to determine the end of the prologue.
234 DebugLoc dl;
236 if (MFI.getMaxAlignment() > getStackAlignment())
237 report_fatal_error("emitPrologue unsupported alignment: "
238 + Twine(MFI.getMaxAlignment()));
240 const AttributeList &PAL = MF.getFunction().getAttributes();
241 if (PAL.hasAttrSomewhere(Attribute::Nest))
242 BuildMI(MBB, MBBI, dl, TII.get(XCore::LDWSP_ru6), XCore::R11).addImm(0);
243 // FIX: Needs addMemOperand() but can't use getFixedStack() or getStack().
245 // Work out frame sizes.
246 // We will adjust the SP in stages towards the final FrameSize.
247 assert(MFI.getStackSize()%4 == 0 && "Misaligned frame size");
248 const int FrameSize = MFI.getStackSize() / 4;
249 int Adjusted = 0;
251 bool saveLR = XFI->hasLRSpillSlot();
252 bool UseENTSP = saveLR && FrameSize
253 && (MFI.getObjectOffset(XFI->getLRSpillSlot()) == 0);
254 if (UseENTSP)
255 saveLR = false;
256 bool FP = hasFP(MF);
257 bool emitFrameMoves = XCoreRegisterInfo::needsFrameMoves(MF);
259 if (UseENTSP) {
260 // Allocate space on the stack at the same time as saving LR.
261 Adjusted = (FrameSize > MaxImmU16) ? MaxImmU16 : FrameSize;
262 int Opcode = isImmU6(Adjusted) ? XCore::ENTSP_u6 : XCore::ENTSP_lu6;
263 MBB.addLiveIn(XCore::LR);
264 MachineInstrBuilder MIB = BuildMI(MBB, MBBI, dl, TII.get(Opcode));
265 MIB.addImm(Adjusted);
266 MIB->addRegisterKilled(XCore::LR, MF.getSubtarget().getRegisterInfo(),
267 true);
268 if (emitFrameMoves) {
269 EmitDefCfaOffset(MBB, MBBI, dl, TII, Adjusted*4);
270 unsigned DRegNum = MRI->getDwarfRegNum(XCore::LR, true);
271 EmitCfiOffset(MBB, MBBI, dl, TII, DRegNum, 0);
275 // If necessary, save LR and FP to the stack, as we EXTSP.
276 SmallVector<StackSlotInfo,2> SpillList;
277 GetSpillList(SpillList, MFI, XFI, saveLR, FP);
278 // We want the nearest (negative) offsets first, so reverse list.
279 std::reverse(SpillList.begin(), SpillList.end());
280 for (unsigned i = 0, e = SpillList.size(); i != e; ++i) {
281 assert(SpillList[i].Offset % 4 == 0 && "Misaligned stack offset");
282 assert(SpillList[i].Offset <= 0 && "Unexpected positive stack offset");
283 int OffsetFromTop = - SpillList[i].Offset/4;
284 IfNeededExtSP(MBB, MBBI, dl, TII, OffsetFromTop, Adjusted, FrameSize,
285 emitFrameMoves);
286 int Offset = Adjusted - OffsetFromTop;
287 int Opcode = isImmU6(Offset) ? XCore::STWSP_ru6 : XCore::STWSP_lru6;
288 MBB.addLiveIn(SpillList[i].Reg);
289 BuildMI(MBB, MBBI, dl, TII.get(Opcode))
290 .addReg(SpillList[i].Reg, RegState::Kill)
291 .addImm(Offset)
292 .addMemOperand(getFrameIndexMMO(MBB, SpillList[i].FI,
293 MachineMemOperand::MOStore));
294 if (emitFrameMoves) {
295 unsigned DRegNum = MRI->getDwarfRegNum(SpillList[i].Reg, true);
296 EmitCfiOffset(MBB, MBBI, dl, TII, DRegNum, SpillList[i].Offset);
300 // Complete any remaining Stack adjustment.
301 IfNeededExtSP(MBB, MBBI, dl, TII, FrameSize, Adjusted, FrameSize,
302 emitFrameMoves);
303 assert(Adjusted==FrameSize && "IfNeededExtSP has not completed adjustment");
305 if (FP) {
306 // Set the FP from the SP.
307 BuildMI(MBB, MBBI, dl, TII.get(XCore::LDAWSP_ru6), FramePtr).addImm(0);
308 if (emitFrameMoves)
309 EmitDefCfaRegister(MBB, MBBI, dl, TII, MF,
310 MRI->getDwarfRegNum(FramePtr, true));
313 if (emitFrameMoves) {
314 // Frame moves for callee saved.
315 for (const auto &SpillLabel : XFI->getSpillLabels()) {
316 MachineBasicBlock::iterator Pos = SpillLabel.first;
317 ++Pos;
318 const CalleeSavedInfo &CSI = SpillLabel.second;
319 int Offset = MFI.getObjectOffset(CSI.getFrameIdx());
320 unsigned DRegNum = MRI->getDwarfRegNum(CSI.getReg(), true);
321 EmitCfiOffset(MBB, Pos, dl, TII, DRegNum, Offset);
323 if (XFI->hasEHSpillSlot()) {
324 // The unwinder requires stack slot & CFI offsets for the exception info.
325 // We do not save/spill these registers.
326 const Function *Fn = &MF.getFunction();
327 const Constant *PersonalityFn =
328 Fn->hasPersonalityFn() ? Fn->getPersonalityFn() : nullptr;
329 SmallVector<StackSlotInfo, 2> SpillList;
330 GetEHSpillList(SpillList, MFI, XFI, PersonalityFn,
331 MF.getSubtarget().getTargetLowering());
332 assert(SpillList.size()==2 && "Unexpected SpillList size");
333 EmitCfiOffset(MBB, MBBI, dl, TII,
334 MRI->getDwarfRegNum(SpillList[0].Reg, true),
335 SpillList[0].Offset);
336 EmitCfiOffset(MBB, MBBI, dl, TII,
337 MRI->getDwarfRegNum(SpillList[1].Reg, true),
338 SpillList[1].Offset);
343 void XCoreFrameLowering::emitEpilogue(MachineFunction &MF,
344 MachineBasicBlock &MBB) const {
345 MachineFrameInfo &MFI = MF.getFrameInfo();
346 MachineBasicBlock::iterator MBBI = MBB.getLastNonDebugInstr();
347 const XCoreInstrInfo &TII = *MF.getSubtarget<XCoreSubtarget>().getInstrInfo();
348 XCoreFunctionInfo *XFI = MF.getInfo<XCoreFunctionInfo>();
349 DebugLoc dl = MBBI->getDebugLoc();
350 unsigned RetOpcode = MBBI->getOpcode();
352 // Work out frame sizes.
353 // We will adjust the SP in stages towards the final FrameSize.
354 int RemainingAdj = MFI.getStackSize();
355 assert(RemainingAdj%4 == 0 && "Misaligned frame size");
356 RemainingAdj /= 4;
358 if (RetOpcode == XCore::EH_RETURN) {
359 // 'Restore' the exception info the unwinder has placed into the stack
360 // slots.
361 const Function *Fn = &MF.getFunction();
362 const Constant *PersonalityFn =
363 Fn->hasPersonalityFn() ? Fn->getPersonalityFn() : nullptr;
364 SmallVector<StackSlotInfo, 2> SpillList;
365 GetEHSpillList(SpillList, MFI, XFI, PersonalityFn,
366 MF.getSubtarget().getTargetLowering());
367 RestoreSpillList(MBB, MBBI, dl, TII, RemainingAdj, SpillList);
369 // Return to the landing pad.
370 Register EhStackReg = MBBI->getOperand(0).getReg();
371 Register EhHandlerReg = MBBI->getOperand(1).getReg();
372 BuildMI(MBB, MBBI, dl, TII.get(XCore::SETSP_1r)).addReg(EhStackReg);
373 BuildMI(MBB, MBBI, dl, TII.get(XCore::BAU_1r)).addReg(EhHandlerReg);
374 MBB.erase(MBBI); // Erase the previous return instruction.
375 return;
378 bool restoreLR = XFI->hasLRSpillSlot();
379 bool UseRETSP = restoreLR && RemainingAdj
380 && (MFI.getObjectOffset(XFI->getLRSpillSlot()) == 0);
381 if (UseRETSP)
382 restoreLR = false;
383 bool FP = hasFP(MF);
385 if (FP) // Restore the stack pointer.
386 BuildMI(MBB, MBBI, dl, TII.get(XCore::SETSP_1r)).addReg(FramePtr);
388 // If necessary, restore LR and FP from the stack, as we EXTSP.
389 SmallVector<StackSlotInfo,2> SpillList;
390 GetSpillList(SpillList, MFI, XFI, restoreLR, FP);
391 RestoreSpillList(MBB, MBBI, dl, TII, RemainingAdj, SpillList);
393 if (RemainingAdj) {
394 // Complete all but one of the remaining Stack adjustments.
395 IfNeededLDAWSP(MBB, MBBI, dl, TII, 0, RemainingAdj);
396 if (UseRETSP) {
397 // Fold prologue into return instruction
398 assert(RetOpcode == XCore::RETSP_u6
399 || RetOpcode == XCore::RETSP_lu6);
400 int Opcode = isImmU6(RemainingAdj) ? XCore::RETSP_u6 : XCore::RETSP_lu6;
401 MachineInstrBuilder MIB = BuildMI(MBB, MBBI, dl, TII.get(Opcode))
402 .addImm(RemainingAdj);
403 for (unsigned i = 3, e = MBBI->getNumOperands(); i < e; ++i)
404 MIB->addOperand(MBBI->getOperand(i)); // copy any variadic operands
405 MBB.erase(MBBI); // Erase the previous return instruction.
406 } else {
407 int Opcode = isImmU6(RemainingAdj) ? XCore::LDAWSP_ru6 :
408 XCore::LDAWSP_lru6;
409 BuildMI(MBB, MBBI, dl, TII.get(Opcode), XCore::SP).addImm(RemainingAdj);
410 // Don't erase the return instruction.
412 } // else Don't erase the return instruction.
415 bool XCoreFrameLowering::
416 spillCalleeSavedRegisters(MachineBasicBlock &MBB,
417 MachineBasicBlock::iterator MI,
418 const std::vector<CalleeSavedInfo> &CSI,
419 const TargetRegisterInfo *TRI) const {
420 if (CSI.empty())
421 return true;
423 MachineFunction *MF = MBB.getParent();
424 const TargetInstrInfo &TII = *MF->getSubtarget().getInstrInfo();
425 XCoreFunctionInfo *XFI = MF->getInfo<XCoreFunctionInfo>();
426 bool emitFrameMoves = XCoreRegisterInfo::needsFrameMoves(*MF);
428 DebugLoc DL;
429 if (MI != MBB.end() && !MI->isDebugInstr())
430 DL = MI->getDebugLoc();
432 for (std::vector<CalleeSavedInfo>::const_iterator it = CSI.begin();
433 it != CSI.end(); ++it) {
434 unsigned Reg = it->getReg();
435 assert(Reg != XCore::LR && !(Reg == XCore::R10 && hasFP(*MF)) &&
436 "LR & FP are always handled in emitPrologue");
438 // Add the callee-saved register as live-in. It's killed at the spill.
439 MBB.addLiveIn(Reg);
440 const TargetRegisterClass *RC = TRI->getMinimalPhysRegClass(Reg);
441 TII.storeRegToStackSlot(MBB, MI, Reg, true, it->getFrameIdx(), RC, TRI);
442 if (emitFrameMoves) {
443 auto Store = MI;
444 --Store;
445 XFI->getSpillLabels().push_back(std::make_pair(Store, *it));
448 return true;
451 bool XCoreFrameLowering::
452 restoreCalleeSavedRegisters(MachineBasicBlock &MBB,
453 MachineBasicBlock::iterator MI,
454 std::vector<CalleeSavedInfo> &CSI,
455 const TargetRegisterInfo *TRI) const{
456 MachineFunction *MF = MBB.getParent();
457 const TargetInstrInfo &TII = *MF->getSubtarget().getInstrInfo();
458 bool AtStart = MI == MBB.begin();
459 MachineBasicBlock::iterator BeforeI = MI;
460 if (!AtStart)
461 --BeforeI;
462 for (std::vector<CalleeSavedInfo>::const_iterator it = CSI.begin();
463 it != CSI.end(); ++it) {
464 unsigned Reg = it->getReg();
465 assert(Reg != XCore::LR && !(Reg == XCore::R10 && hasFP(*MF)) &&
466 "LR & FP are always handled in emitEpilogue");
468 const TargetRegisterClass *RC = TRI->getMinimalPhysRegClass(Reg);
469 TII.loadRegFromStackSlot(MBB, MI, Reg, it->getFrameIdx(), RC, TRI);
470 assert(MI != MBB.begin() &&
471 "loadRegFromStackSlot didn't insert any code!");
472 // Insert in reverse order. loadRegFromStackSlot can insert multiple
473 // instructions.
474 if (AtStart)
475 MI = MBB.begin();
476 else {
477 MI = BeforeI;
478 ++MI;
481 return true;
484 // This function eliminates ADJCALLSTACKDOWN,
485 // ADJCALLSTACKUP pseudo instructions
486 MachineBasicBlock::iterator XCoreFrameLowering::eliminateCallFramePseudoInstr(
487 MachineFunction &MF, MachineBasicBlock &MBB,
488 MachineBasicBlock::iterator I) const {
489 const XCoreInstrInfo &TII = *MF.getSubtarget<XCoreSubtarget>().getInstrInfo();
490 if (!hasReservedCallFrame(MF)) {
491 // Turn the adjcallstackdown instruction into 'extsp <amt>' and the
492 // adjcallstackup instruction into 'ldaw sp, sp[<amt>]'
493 MachineInstr &Old = *I;
494 uint64_t Amount = Old.getOperand(0).getImm();
495 if (Amount != 0) {
496 // We need to keep the stack aligned properly. To do this, we round the
497 // amount of space needed for the outgoing arguments up to the next
498 // alignment boundary.
499 unsigned Align = getStackAlignment();
500 Amount = (Amount+Align-1)/Align*Align;
502 assert(Amount%4 == 0);
503 Amount /= 4;
505 bool isU6 = isImmU6(Amount);
506 if (!isU6 && !isImmU16(Amount)) {
507 // FIX could emit multiple instructions in this case.
508 #ifndef NDEBUG
509 errs() << "eliminateCallFramePseudoInstr size too big: "
510 << Amount << "\n";
511 #endif
512 llvm_unreachable(nullptr);
515 MachineInstr *New;
516 if (Old.getOpcode() == XCore::ADJCALLSTACKDOWN) {
517 int Opcode = isU6 ? XCore::EXTSP_u6 : XCore::EXTSP_lu6;
518 New = BuildMI(MF, Old.getDebugLoc(), TII.get(Opcode)).addImm(Amount);
519 } else {
520 assert(Old.getOpcode() == XCore::ADJCALLSTACKUP);
521 int Opcode = isU6 ? XCore::LDAWSP_ru6 : XCore::LDAWSP_lru6;
522 New = BuildMI(MF, Old.getDebugLoc(), TII.get(Opcode), XCore::SP)
523 .addImm(Amount);
526 // Replace the pseudo instruction with a new instruction...
527 MBB.insert(I, New);
531 return MBB.erase(I);
534 void XCoreFrameLowering::determineCalleeSaves(MachineFunction &MF,
535 BitVector &SavedRegs,
536 RegScavenger *RS) const {
537 TargetFrameLowering::determineCalleeSaves(MF, SavedRegs, RS);
539 XCoreFunctionInfo *XFI = MF.getInfo<XCoreFunctionInfo>();
541 const MachineRegisterInfo &MRI = MF.getRegInfo();
542 bool LRUsed = MRI.isPhysRegModified(XCore::LR);
544 if (!LRUsed && !MF.getFunction().isVarArg() &&
545 MF.getFrameInfo().estimateStackSize(MF))
546 // If we need to extend the stack it is more efficient to use entsp / retsp.
547 // We force the LR to be saved so these instructions are used.
548 LRUsed = true;
550 if (MF.callsUnwindInit() || MF.callsEHReturn()) {
551 // The unwinder expects to find spill slots for the exception info regs R0
552 // & R1. These are used during llvm.eh.return() to 'restore' the exception
553 // info. N.B. we do not spill or restore R0, R1 during normal operation.
554 XFI->createEHSpillSlot(MF);
555 // As we will have a stack, we force the LR to be saved.
556 LRUsed = true;
559 if (LRUsed) {
560 // We will handle the LR in the prologue/epilogue
561 // and allocate space on the stack ourselves.
562 SavedRegs.reset(XCore::LR);
563 XFI->createLRSpillSlot(MF);
566 if (hasFP(MF))
567 // A callee save register is used to hold the FP.
568 // This needs saving / restoring in the epilogue / prologue.
569 XFI->createFPSpillSlot(MF);
572 void XCoreFrameLowering::
573 processFunctionBeforeFrameFinalized(MachineFunction &MF,
574 RegScavenger *RS) const {
575 assert(RS && "requiresRegisterScavenging failed");
576 MachineFrameInfo &MFI = MF.getFrameInfo();
577 const TargetRegisterClass &RC = XCore::GRRegsRegClass;
578 const TargetRegisterInfo &TRI = *MF.getSubtarget().getRegisterInfo();
579 XCoreFunctionInfo *XFI = MF.getInfo<XCoreFunctionInfo>();
580 // Reserve slots close to SP or frame pointer for Scavenging spills.
581 // When using SP for small frames, we don't need any scratch registers.
582 // When using SP for large frames, we may need 2 scratch registers.
583 // When using FP, for large or small frames, we may need 1 scratch register.
584 unsigned Size = TRI.getSpillSize(RC);
585 unsigned Align = TRI.getSpillAlignment(RC);
586 if (XFI->isLargeFrame(MF) || hasFP(MF))
587 RS->addScavengingFrameIndex(MFI.CreateStackObject(Size, Align, false));
588 if (XFI->isLargeFrame(MF) && !hasFP(MF))
589 RS->addScavengingFrameIndex(MFI.CreateStackObject(Size, Align, false));