Fixed some bugs.
[llvm/zpu.git] / lib / Target / X86 / X86RegisterInfo.cpp
blobdfe73dcd9ef7e1eaf7ff622e2aca3bf359eaf99f
1 //===- X86RegisterInfo.cpp - X86 Register Information -----------*- C++ -*-===//
2 //
3 // The LLVM Compiler Infrastructure
4 //
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
7 //
8 //===----------------------------------------------------------------------===//
9 //
10 // This file contains the X86 implementation of the TargetRegisterInfo class.
11 // This file is responsible for the frame pointer elimination optimization
12 // on X86.
14 //===----------------------------------------------------------------------===//
16 #include "X86.h"
17 #include "X86RegisterInfo.h"
18 #include "X86InstrBuilder.h"
19 #include "X86MachineFunctionInfo.h"
20 #include "X86Subtarget.h"
21 #include "X86TargetMachine.h"
22 #include "llvm/Constants.h"
23 #include "llvm/Function.h"
24 #include "llvm/Type.h"
25 #include "llvm/CodeGen/ValueTypes.h"
26 #include "llvm/CodeGen/MachineInstrBuilder.h"
27 #include "llvm/CodeGen/MachineFunction.h"
28 #include "llvm/CodeGen/MachineFunctionPass.h"
29 #include "llvm/CodeGen/MachineFrameInfo.h"
30 #include "llvm/CodeGen/MachineLocation.h"
31 #include "llvm/CodeGen/MachineModuleInfo.h"
32 #include "llvm/CodeGen/MachineRegisterInfo.h"
33 #include "llvm/MC/MCAsmInfo.h"
34 #include "llvm/Target/TargetFrameInfo.h"
35 #include "llvm/Target/TargetInstrInfo.h"
36 #include "llvm/Target/TargetMachine.h"
37 #include "llvm/Target/TargetOptions.h"
38 #include "llvm/ADT/BitVector.h"
39 #include "llvm/ADT/STLExtras.h"
40 #include "llvm/Support/ErrorHandling.h"
41 #include "llvm/Support/CommandLine.h"
42 using namespace llvm;
44 static cl::opt<bool>
45 ForceStackAlign("force-align-stack",
46 cl::desc("Force align the stack to the minimum alignment"
47 " needed for the function."),
48 cl::init(false), cl::Hidden);
50 X86RegisterInfo::X86RegisterInfo(X86TargetMachine &tm,
51 const TargetInstrInfo &tii)
52 : X86GenRegisterInfo(tm.getSubtarget<X86Subtarget>().is64Bit() ?
53 X86::ADJCALLSTACKDOWN64 :
54 X86::ADJCALLSTACKDOWN32,
55 tm.getSubtarget<X86Subtarget>().is64Bit() ?
56 X86::ADJCALLSTACKUP64 :
57 X86::ADJCALLSTACKUP32),
58 TM(tm), TII(tii) {
59 // Cache some information.
60 const X86Subtarget *Subtarget = &TM.getSubtarget<X86Subtarget>();
61 Is64Bit = Subtarget->is64Bit();
62 IsWin64 = Subtarget->isTargetWin64();
63 StackAlign = TM.getFrameInfo()->getStackAlignment();
65 if (Is64Bit) {
66 SlotSize = 8;
67 StackPtr = X86::RSP;
68 FramePtr = X86::RBP;
69 } else {
70 SlotSize = 4;
71 StackPtr = X86::ESP;
72 FramePtr = X86::EBP;
76 /// getDwarfRegNum - This function maps LLVM register identifiers to the DWARF
77 /// specific numbering, used in debug info and exception tables.
78 int X86RegisterInfo::getDwarfRegNum(unsigned RegNo, bool isEH) const {
79 const X86Subtarget *Subtarget = &TM.getSubtarget<X86Subtarget>();
80 unsigned Flavour = DWARFFlavour::X86_64;
82 if (!Subtarget->is64Bit()) {
83 if (Subtarget->isTargetDarwin()) {
84 if (isEH)
85 Flavour = DWARFFlavour::X86_32_DarwinEH;
86 else
87 Flavour = DWARFFlavour::X86_32_Generic;
88 } else if (Subtarget->isTargetCygMing()) {
89 // Unsupported by now, just quick fallback
90 Flavour = DWARFFlavour::X86_32_Generic;
91 } else {
92 Flavour = DWARFFlavour::X86_32_Generic;
96 return X86GenRegisterInfo::getDwarfRegNumFull(RegNo, Flavour);
99 /// getX86RegNum - This function maps LLVM register identifiers to their X86
100 /// specific numbering, which is used in various places encoding instructions.
101 unsigned X86RegisterInfo::getX86RegNum(unsigned RegNo) {
102 switch(RegNo) {
103 case X86::RAX: case X86::EAX: case X86::AX: case X86::AL: return N86::EAX;
104 case X86::RCX: case X86::ECX: case X86::CX: case X86::CL: return N86::ECX;
105 case X86::RDX: case X86::EDX: case X86::DX: case X86::DL: return N86::EDX;
106 case X86::RBX: case X86::EBX: case X86::BX: case X86::BL: return N86::EBX;
107 case X86::RSP: case X86::ESP: case X86::SP: case X86::SPL: case X86::AH:
108 return N86::ESP;
109 case X86::RBP: case X86::EBP: case X86::BP: case X86::BPL: case X86::CH:
110 return N86::EBP;
111 case X86::RSI: case X86::ESI: case X86::SI: case X86::SIL: case X86::DH:
112 return N86::ESI;
113 case X86::RDI: case X86::EDI: case X86::DI: case X86::DIL: case X86::BH:
114 return N86::EDI;
116 case X86::R8: case X86::R8D: case X86::R8W: case X86::R8B:
117 return N86::EAX;
118 case X86::R9: case X86::R9D: case X86::R9W: case X86::R9B:
119 return N86::ECX;
120 case X86::R10: case X86::R10D: case X86::R10W: case X86::R10B:
121 return N86::EDX;
122 case X86::R11: case X86::R11D: case X86::R11W: case X86::R11B:
123 return N86::EBX;
124 case X86::R12: case X86::R12D: case X86::R12W: case X86::R12B:
125 return N86::ESP;
126 case X86::R13: case X86::R13D: case X86::R13W: case X86::R13B:
127 return N86::EBP;
128 case X86::R14: case X86::R14D: case X86::R14W: case X86::R14B:
129 return N86::ESI;
130 case X86::R15: case X86::R15D: case X86::R15W: case X86::R15B:
131 return N86::EDI;
133 case X86::ST0: case X86::ST1: case X86::ST2: case X86::ST3:
134 case X86::ST4: case X86::ST5: case X86::ST6: case X86::ST7:
135 return RegNo-X86::ST0;
137 case X86::XMM0: case X86::XMM8:
138 case X86::YMM0: case X86::YMM8: case X86::MM0:
139 return 0;
140 case X86::XMM1: case X86::XMM9:
141 case X86::YMM1: case X86::YMM9: case X86::MM1:
142 return 1;
143 case X86::XMM2: case X86::XMM10:
144 case X86::YMM2: case X86::YMM10: case X86::MM2:
145 return 2;
146 case X86::XMM3: case X86::XMM11:
147 case X86::YMM3: case X86::YMM11: case X86::MM3:
148 return 3;
149 case X86::XMM4: case X86::XMM12:
150 case X86::YMM4: case X86::YMM12: case X86::MM4:
151 return 4;
152 case X86::XMM5: case X86::XMM13:
153 case X86::YMM5: case X86::YMM13: case X86::MM5:
154 return 5;
155 case X86::XMM6: case X86::XMM14:
156 case X86::YMM6: case X86::YMM14: case X86::MM6:
157 return 6;
158 case X86::XMM7: case X86::XMM15:
159 case X86::YMM7: case X86::YMM15: case X86::MM7:
160 return 7;
162 case X86::ES: return 0;
163 case X86::CS: return 1;
164 case X86::SS: return 2;
165 case X86::DS: return 3;
166 case X86::FS: return 4;
167 case X86::GS: return 5;
169 case X86::CR0: case X86::CR8 : case X86::DR0: return 0;
170 case X86::CR1: case X86::CR9 : case X86::DR1: return 1;
171 case X86::CR2: case X86::CR10: case X86::DR2: return 2;
172 case X86::CR3: case X86::CR11: case X86::DR3: return 3;
173 case X86::CR4: case X86::CR12: case X86::DR4: return 4;
174 case X86::CR5: case X86::CR13: case X86::DR5: return 5;
175 case X86::CR6: case X86::CR14: case X86::DR6: return 6;
176 case X86::CR7: case X86::CR15: case X86::DR7: return 7;
178 // Pseudo index registers are equivalent to a "none"
179 // scaled index (See Intel Manual 2A, table 2-3)
180 case X86::EIZ:
181 case X86::RIZ:
182 return 4;
184 default:
185 assert(isVirtualRegister(RegNo) && "Unknown physical register!");
186 llvm_unreachable("Register allocator hasn't allocated reg correctly yet!");
187 return 0;
191 const TargetRegisterClass *
192 X86RegisterInfo::getMatchingSuperRegClass(const TargetRegisterClass *A,
193 const TargetRegisterClass *B,
194 unsigned SubIdx) const {
195 switch (SubIdx) {
196 default: return 0;
197 case X86::sub_8bit:
198 if (B == &X86::GR8RegClass) {
199 if (A->getSize() == 2 || A->getSize() == 4 || A->getSize() == 8)
200 return A;
201 } else if (B == &X86::GR8_ABCD_LRegClass || B == &X86::GR8_ABCD_HRegClass) {
202 if (A == &X86::GR64RegClass || A == &X86::GR64_ABCDRegClass ||
203 A == &X86::GR64_NOREXRegClass ||
204 A == &X86::GR64_NOSPRegClass ||
205 A == &X86::GR64_NOREX_NOSPRegClass)
206 return &X86::GR64_ABCDRegClass;
207 else if (A == &X86::GR32RegClass || A == &X86::GR32_ABCDRegClass ||
208 A == &X86::GR32_NOREXRegClass ||
209 A == &X86::GR32_NOSPRegClass)
210 return &X86::GR32_ABCDRegClass;
211 else if (A == &X86::GR16RegClass || A == &X86::GR16_ABCDRegClass ||
212 A == &X86::GR16_NOREXRegClass)
213 return &X86::GR16_ABCDRegClass;
214 } else if (B == &X86::GR8_NOREXRegClass) {
215 if (A == &X86::GR64RegClass || A == &X86::GR64_NOREXRegClass ||
216 A == &X86::GR64_NOSPRegClass || A == &X86::GR64_NOREX_NOSPRegClass)
217 return &X86::GR64_NOREXRegClass;
218 else if (A == &X86::GR64_ABCDRegClass)
219 return &X86::GR64_ABCDRegClass;
220 else if (A == &X86::GR32RegClass || A == &X86::GR32_NOREXRegClass ||
221 A == &X86::GR32_NOSPRegClass)
222 return &X86::GR32_NOREXRegClass;
223 else if (A == &X86::GR32_ABCDRegClass)
224 return &X86::GR32_ABCDRegClass;
225 else if (A == &X86::GR16RegClass || A == &X86::GR16_NOREXRegClass)
226 return &X86::GR16_NOREXRegClass;
227 else if (A == &X86::GR16_ABCDRegClass)
228 return &X86::GR16_ABCDRegClass;
230 break;
231 case X86::sub_8bit_hi:
232 if (B == &X86::GR8_ABCD_HRegClass) {
233 if (A == &X86::GR64RegClass || A == &X86::GR64_ABCDRegClass ||
234 A == &X86::GR64_NOREXRegClass ||
235 A == &X86::GR64_NOSPRegClass ||
236 A == &X86::GR64_NOREX_NOSPRegClass)
237 return &X86::GR64_ABCDRegClass;
238 else if (A == &X86::GR32RegClass || A == &X86::GR32_ABCDRegClass ||
239 A == &X86::GR32_NOREXRegClass || A == &X86::GR32_NOSPRegClass)
240 return &X86::GR32_ABCDRegClass;
241 else if (A == &X86::GR16RegClass || A == &X86::GR16_ABCDRegClass ||
242 A == &X86::GR16_NOREXRegClass)
243 return &X86::GR16_ABCDRegClass;
245 break;
246 case X86::sub_16bit:
247 if (B == &X86::GR16RegClass) {
248 if (A->getSize() == 4 || A->getSize() == 8)
249 return A;
250 } else if (B == &X86::GR16_ABCDRegClass) {
251 if (A == &X86::GR64RegClass || A == &X86::GR64_ABCDRegClass ||
252 A == &X86::GR64_NOREXRegClass ||
253 A == &X86::GR64_NOSPRegClass ||
254 A == &X86::GR64_NOREX_NOSPRegClass)
255 return &X86::GR64_ABCDRegClass;
256 else if (A == &X86::GR32RegClass || A == &X86::GR32_ABCDRegClass ||
257 A == &X86::GR32_NOREXRegClass || A == &X86::GR32_NOSPRegClass)
258 return &X86::GR32_ABCDRegClass;
259 } else if (B == &X86::GR16_NOREXRegClass) {
260 if (A == &X86::GR64RegClass || A == &X86::GR64_NOREXRegClass ||
261 A == &X86::GR64_NOSPRegClass || A == &X86::GR64_NOREX_NOSPRegClass)
262 return &X86::GR64_NOREXRegClass;
263 else if (A == &X86::GR64_ABCDRegClass)
264 return &X86::GR64_ABCDRegClass;
265 else if (A == &X86::GR32RegClass || A == &X86::GR32_NOREXRegClass ||
266 A == &X86::GR32_NOSPRegClass)
267 return &X86::GR32_NOREXRegClass;
268 else if (A == &X86::GR32_ABCDRegClass)
269 return &X86::GR64_ABCDRegClass;
271 break;
272 case X86::sub_32bit:
273 if (B == &X86::GR32RegClass) {
274 if (A->getSize() == 8)
275 return A;
276 } else if (B == &X86::GR32_NOSPRegClass) {
277 if (A == &X86::GR64RegClass || A == &X86::GR64_NOSPRegClass)
278 return &X86::GR64_NOSPRegClass;
279 if (A->getSize() == 8)
280 return getCommonSubClass(A, &X86::GR64_NOSPRegClass);
281 } else if (B == &X86::GR32_ABCDRegClass) {
282 if (A == &X86::GR64RegClass || A == &X86::GR64_ABCDRegClass ||
283 A == &X86::GR64_NOREXRegClass ||
284 A == &X86::GR64_NOSPRegClass ||
285 A == &X86::GR64_NOREX_NOSPRegClass)
286 return &X86::GR64_ABCDRegClass;
287 } else if (B == &X86::GR32_NOREXRegClass) {
288 if (A == &X86::GR64RegClass || A == &X86::GR64_NOREXRegClass ||
289 A == &X86::GR64_NOSPRegClass || A == &X86::GR64_NOREX_NOSPRegClass)
290 return &X86::GR64_NOREXRegClass;
291 else if (A == &X86::GR64_ABCDRegClass)
292 return &X86::GR64_ABCDRegClass;
294 break;
295 case X86::sub_ss:
296 if (B == &X86::FR32RegClass)
297 return A;
298 break;
299 case X86::sub_sd:
300 if (B == &X86::FR64RegClass)
301 return A;
302 break;
303 case X86::sub_xmm:
304 if (B == &X86::VR128RegClass)
305 return A;
306 break;
308 return 0;
311 const TargetRegisterClass *
312 X86RegisterInfo::getPointerRegClass(unsigned Kind) const {
313 switch (Kind) {
314 default: llvm_unreachable("Unexpected Kind in getPointerRegClass!");
315 case 0: // Normal GPRs.
316 if (TM.getSubtarget<X86Subtarget>().is64Bit())
317 return &X86::GR64RegClass;
318 return &X86::GR32RegClass;
319 case 1: // Normal GRPs except the stack pointer (for encoding reasons).
320 if (TM.getSubtarget<X86Subtarget>().is64Bit())
321 return &X86::GR64_NOSPRegClass;
322 return &X86::GR32_NOSPRegClass;
326 const TargetRegisterClass *
327 X86RegisterInfo::getCrossCopyRegClass(const TargetRegisterClass *RC) const {
328 if (RC == &X86::CCRRegClass) {
329 if (Is64Bit)
330 return &X86::GR64RegClass;
331 else
332 return &X86::GR32RegClass;
334 return NULL;
337 const unsigned *
338 X86RegisterInfo::getCalleeSavedRegs(const MachineFunction *MF) const {
339 bool callsEHReturn = false;
340 bool ghcCall = false;
342 if (MF) {
343 callsEHReturn = MF->getMMI().callsEHReturn();
344 const Function *F = MF->getFunction();
345 ghcCall = (F ? F->getCallingConv() == CallingConv::GHC : false);
348 static const unsigned GhcCalleeSavedRegs[] = {
352 static const unsigned CalleeSavedRegs32Bit[] = {
353 X86::ESI, X86::EDI, X86::EBX, X86::EBP, 0
356 static const unsigned CalleeSavedRegs32EHRet[] = {
357 X86::EAX, X86::EDX, X86::ESI, X86::EDI, X86::EBX, X86::EBP, 0
360 static const unsigned CalleeSavedRegs64Bit[] = {
361 X86::RBX, X86::R12, X86::R13, X86::R14, X86::R15, X86::RBP, 0
364 static const unsigned CalleeSavedRegs64EHRet[] = {
365 X86::RAX, X86::RDX, X86::RBX, X86::R12,
366 X86::R13, X86::R14, X86::R15, X86::RBP, 0
369 static const unsigned CalleeSavedRegsWin64[] = {
370 X86::RBX, X86::RBP, X86::RDI, X86::RSI,
371 X86::R12, X86::R13, X86::R14, X86::R15,
372 X86::XMM6, X86::XMM7, X86::XMM8, X86::XMM9,
373 X86::XMM10, X86::XMM11, X86::XMM12, X86::XMM13,
374 X86::XMM14, X86::XMM15, 0
377 if (ghcCall) {
378 return GhcCalleeSavedRegs;
379 } else if (Is64Bit) {
380 if (IsWin64)
381 return CalleeSavedRegsWin64;
382 else
383 return (callsEHReturn ? CalleeSavedRegs64EHRet : CalleeSavedRegs64Bit);
384 } else {
385 return (callsEHReturn ? CalleeSavedRegs32EHRet : CalleeSavedRegs32Bit);
389 BitVector X86RegisterInfo::getReservedRegs(const MachineFunction &MF) const {
390 BitVector Reserved(getNumRegs());
391 // Set the stack-pointer register and its aliases as reserved.
392 Reserved.set(X86::RSP);
393 Reserved.set(X86::ESP);
394 Reserved.set(X86::SP);
395 Reserved.set(X86::SPL);
397 // Set the instruction pointer register and its aliases as reserved.
398 Reserved.set(X86::RIP);
399 Reserved.set(X86::EIP);
400 Reserved.set(X86::IP);
402 // Set the frame-pointer register and its aliases as reserved if needed.
403 if (hasFP(MF)) {
404 Reserved.set(X86::RBP);
405 Reserved.set(X86::EBP);
406 Reserved.set(X86::BP);
407 Reserved.set(X86::BPL);
410 // Mark the x87 stack registers as reserved, since they don't behave normally
411 // with respect to liveness. We don't fully model the effects of x87 stack
412 // pushes and pops after stackification.
413 Reserved.set(X86::ST0);
414 Reserved.set(X86::ST1);
415 Reserved.set(X86::ST2);
416 Reserved.set(X86::ST3);
417 Reserved.set(X86::ST4);
418 Reserved.set(X86::ST5);
419 Reserved.set(X86::ST6);
420 Reserved.set(X86::ST7);
421 return Reserved;
424 //===----------------------------------------------------------------------===//
425 // Stack Frame Processing methods
426 //===----------------------------------------------------------------------===//
428 /// hasFP - Return true if the specified function should have a dedicated frame
429 /// pointer register. This is true if the function has variable sized allocas
430 /// or if frame pointer elimination is disabled.
431 bool X86RegisterInfo::hasFP(const MachineFunction &MF) const {
432 const MachineFrameInfo *MFI = MF.getFrameInfo();
433 const MachineModuleInfo &MMI = MF.getMMI();
435 return (DisableFramePointerElim(MF) ||
436 needsStackRealignment(MF) ||
437 MFI->hasVarSizedObjects() ||
438 MFI->isFrameAddressTaken() ||
439 MF.getInfo<X86MachineFunctionInfo>()->getForceFramePointer() ||
440 MMI.callsUnwindInit());
443 bool X86RegisterInfo::canRealignStack(const MachineFunction &MF) const {
444 const MachineFrameInfo *MFI = MF.getFrameInfo();
445 return (RealignStack &&
446 !MFI->hasVarSizedObjects());
449 bool X86RegisterInfo::needsStackRealignment(const MachineFunction &MF) const {
450 const MachineFrameInfo *MFI = MF.getFrameInfo();
451 const Function *F = MF.getFunction();
452 bool requiresRealignment = ((MFI->getMaxAlignment() > StackAlign) ||
453 F->hasFnAttr(Attribute::StackAlignment));
455 // FIXME: Currently we don't support stack realignment for functions with
456 // variable-sized allocas.
457 // FIXME: It's more complicated than this...
458 if (0 && requiresRealignment && MFI->hasVarSizedObjects())
459 report_fatal_error(
460 "Stack realignment in presense of dynamic allocas is not supported");
462 // If we've requested that we force align the stack do so now.
463 if (ForceStackAlign)
464 return canRealignStack(MF);
466 return requiresRealignment && canRealignStack(MF);
469 bool X86RegisterInfo::hasReservedCallFrame(const MachineFunction &MF) const {
470 return !MF.getFrameInfo()->hasVarSizedObjects();
473 bool X86RegisterInfo::hasReservedSpillSlot(const MachineFunction &MF,
474 unsigned Reg, int &FrameIdx) const {
475 if (Reg == FramePtr && hasFP(MF)) {
476 FrameIdx = MF.getFrameInfo()->getObjectIndexBegin();
477 return true;
479 return false;
483 X86RegisterInfo::getFrameIndexOffset(const MachineFunction &MF, int FI) const {
484 const TargetFrameInfo &TFI = *MF.getTarget().getFrameInfo();
485 const MachineFrameInfo *MFI = MF.getFrameInfo();
486 int Offset = MFI->getObjectOffset(FI) - TFI.getOffsetOfLocalArea();
487 uint64_t StackSize = MFI->getStackSize();
489 if (needsStackRealignment(MF)) {
490 if (FI < 0) {
491 // Skip the saved EBP.
492 Offset += SlotSize;
493 } else {
494 unsigned Align = MFI->getObjectAlignment(FI);
495 assert((-(Offset + StackSize)) % Align == 0);
496 Align = 0;
497 return Offset + StackSize;
499 // FIXME: Support tail calls
500 } else {
501 if (!hasFP(MF))
502 return Offset + StackSize;
504 // Skip the saved EBP.
505 Offset += SlotSize;
507 // Skip the RETADDR move area
508 const X86MachineFunctionInfo *X86FI = MF.getInfo<X86MachineFunctionInfo>();
509 int TailCallReturnAddrDelta = X86FI->getTCReturnAddrDelta();
510 if (TailCallReturnAddrDelta < 0)
511 Offset -= TailCallReturnAddrDelta;
514 return Offset;
517 static unsigned getSUBriOpcode(unsigned is64Bit, int64_t Imm) {
518 if (is64Bit) {
519 if (isInt<8>(Imm))
520 return X86::SUB64ri8;
521 return X86::SUB64ri32;
522 } else {
523 if (isInt<8>(Imm))
524 return X86::SUB32ri8;
525 return X86::SUB32ri;
529 static unsigned getADDriOpcode(unsigned is64Bit, int64_t Imm) {
530 if (is64Bit) {
531 if (isInt<8>(Imm))
532 return X86::ADD64ri8;
533 return X86::ADD64ri32;
534 } else {
535 if (isInt<8>(Imm))
536 return X86::ADD32ri8;
537 return X86::ADD32ri;
541 void X86RegisterInfo::
542 eliminateCallFramePseudoInstr(MachineFunction &MF, MachineBasicBlock &MBB,
543 MachineBasicBlock::iterator I) const {
544 if (!hasReservedCallFrame(MF)) {
545 // If the stack pointer can be changed after prologue, turn the
546 // adjcallstackup instruction into a 'sub ESP, <amt>' and the
547 // adjcallstackdown instruction into 'add ESP, <amt>'
548 // TODO: consider using push / pop instead of sub + store / add
549 MachineInstr *Old = I;
550 uint64_t Amount = Old->getOperand(0).getImm();
551 if (Amount != 0) {
552 // We need to keep the stack aligned properly. To do this, we round the
553 // amount of space needed for the outgoing arguments up to the next
554 // alignment boundary.
555 Amount = (Amount + StackAlign - 1) / StackAlign * StackAlign;
557 MachineInstr *New = 0;
558 if (Old->getOpcode() == getCallFrameSetupOpcode()) {
559 New = BuildMI(MF, Old->getDebugLoc(),
560 TII.get(getSUBriOpcode(Is64Bit, Amount)),
561 StackPtr)
562 .addReg(StackPtr)
563 .addImm(Amount);
564 } else {
565 assert(Old->getOpcode() == getCallFrameDestroyOpcode());
567 // Factor out the amount the callee already popped.
568 uint64_t CalleeAmt = Old->getOperand(1).getImm();
569 Amount -= CalleeAmt;
571 if (Amount) {
572 unsigned Opc = getADDriOpcode(Is64Bit, Amount);
573 New = BuildMI(MF, Old->getDebugLoc(), TII.get(Opc), StackPtr)
574 .addReg(StackPtr)
575 .addImm(Amount);
579 if (New) {
580 // The EFLAGS implicit def is dead.
581 New->getOperand(3).setIsDead();
583 // Replace the pseudo instruction with a new instruction.
584 MBB.insert(I, New);
587 } else if (I->getOpcode() == getCallFrameDestroyOpcode()) {
588 // If we are performing frame pointer elimination and if the callee pops
589 // something off the stack pointer, add it back. We do this until we have
590 // more advanced stack pointer tracking ability.
591 if (uint64_t CalleeAmt = I->getOperand(1).getImm()) {
592 unsigned Opc = getSUBriOpcode(Is64Bit, CalleeAmt);
593 MachineInstr *Old = I;
594 MachineInstr *New =
595 BuildMI(MF, Old->getDebugLoc(), TII.get(Opc),
596 StackPtr)
597 .addReg(StackPtr)
598 .addImm(CalleeAmt);
600 // The EFLAGS implicit def is dead.
601 New->getOperand(3).setIsDead();
602 MBB.insert(I, New);
606 MBB.erase(I);
609 void
610 X86RegisterInfo::eliminateFrameIndex(MachineBasicBlock::iterator II,
611 int SPAdj, RegScavenger *RS) const{
612 assert(SPAdj == 0 && "Unexpected");
614 unsigned i = 0;
615 MachineInstr &MI = *II;
616 MachineFunction &MF = *MI.getParent()->getParent();
618 while (!MI.getOperand(i).isFI()) {
619 ++i;
620 assert(i < MI.getNumOperands() && "Instr doesn't have FrameIndex operand!");
623 int FrameIndex = MI.getOperand(i).getIndex();
624 unsigned BasePtr;
626 unsigned Opc = MI.getOpcode();
627 bool AfterFPPop = Opc == X86::TAILJMPm64 || Opc == X86::TAILJMPm;
628 if (needsStackRealignment(MF))
629 BasePtr = (FrameIndex < 0 ? FramePtr : StackPtr);
630 else if (AfterFPPop)
631 BasePtr = StackPtr;
632 else
633 BasePtr = (hasFP(MF) ? FramePtr : StackPtr);
635 // This must be part of a four operand memory reference. Replace the
636 // FrameIndex with base register with EBP. Add an offset to the offset.
637 MI.getOperand(i).ChangeToRegister(BasePtr, false);
639 // Now add the frame object offset to the offset from EBP.
640 int FIOffset;
641 if (AfterFPPop) {
642 // Tail call jmp happens after FP is popped.
643 const TargetFrameInfo &TFI = *MF.getTarget().getFrameInfo();
644 const MachineFrameInfo *MFI = MF.getFrameInfo();
645 FIOffset = MFI->getObjectOffset(FrameIndex) - TFI.getOffsetOfLocalArea();
646 } else
647 FIOffset = getFrameIndexOffset(MF, FrameIndex);
649 if (MI.getOperand(i+3).isImm()) {
650 // Offset is a 32-bit integer.
651 int Offset = FIOffset + (int)(MI.getOperand(i + 3).getImm());
652 MI.getOperand(i + 3).ChangeToImmediate(Offset);
653 } else {
654 // Offset is symbolic. This is extremely rare.
655 uint64_t Offset = FIOffset + (uint64_t)MI.getOperand(i+3).getOffset();
656 MI.getOperand(i+3).setOffset(Offset);
660 void
661 X86RegisterInfo::processFunctionBeforeCalleeSavedScan(MachineFunction &MF,
662 RegScavenger *RS) const {
663 MachineFrameInfo *MFI = MF.getFrameInfo();
665 X86MachineFunctionInfo *X86FI = MF.getInfo<X86MachineFunctionInfo>();
666 int32_t TailCallReturnAddrDelta = X86FI->getTCReturnAddrDelta();
668 if (TailCallReturnAddrDelta < 0) {
669 // create RETURNADDR area
670 // arg
671 // arg
672 // RETADDR
673 // { ...
674 // RETADDR area
675 // ...
676 // }
677 // [EBP]
678 MFI->CreateFixedObject(-TailCallReturnAddrDelta,
679 (-1U*SlotSize)+TailCallReturnAddrDelta, true);
682 if (hasFP(MF)) {
683 assert((TailCallReturnAddrDelta <= 0) &&
684 "The Delta should always be zero or negative");
685 const TargetFrameInfo &TFI = *MF.getTarget().getFrameInfo();
687 // Create a frame entry for the EBP register that must be saved.
688 int FrameIdx = MFI->CreateFixedObject(SlotSize,
689 -(int)SlotSize +
690 TFI.getOffsetOfLocalArea() +
691 TailCallReturnAddrDelta,
692 true);
693 assert(FrameIdx == MFI->getObjectIndexBegin() &&
694 "Slot for EBP register must be last in order to be found!");
695 FrameIdx = 0;
699 /// emitSPUpdate - Emit a series of instructions to increment / decrement the
700 /// stack pointer by a constant value.
701 static
702 void emitSPUpdate(MachineBasicBlock &MBB, MachineBasicBlock::iterator &MBBI,
703 unsigned StackPtr, int64_t NumBytes, bool Is64Bit,
704 const TargetInstrInfo &TII) {
705 bool isSub = NumBytes < 0;
706 uint64_t Offset = isSub ? -NumBytes : NumBytes;
707 unsigned Opc = isSub ?
708 getSUBriOpcode(Is64Bit, Offset) :
709 getADDriOpcode(Is64Bit, Offset);
710 uint64_t Chunk = (1LL << 31) - 1;
711 DebugLoc DL = MBB.findDebugLoc(MBBI);
713 while (Offset) {
714 uint64_t ThisVal = (Offset > Chunk) ? Chunk : Offset;
715 MachineInstr *MI =
716 BuildMI(MBB, MBBI, DL, TII.get(Opc), StackPtr)
717 .addReg(StackPtr)
718 .addImm(ThisVal);
719 MI->getOperand(3).setIsDead(); // The EFLAGS implicit def is dead.
720 Offset -= ThisVal;
724 /// mergeSPUpdatesUp - Merge two stack-manipulating instructions upper iterator.
725 static
726 void mergeSPUpdatesUp(MachineBasicBlock &MBB, MachineBasicBlock::iterator &MBBI,
727 unsigned StackPtr, uint64_t *NumBytes = NULL) {
728 if (MBBI == MBB.begin()) return;
730 MachineBasicBlock::iterator PI = prior(MBBI);
731 unsigned Opc = PI->getOpcode();
732 if ((Opc == X86::ADD64ri32 || Opc == X86::ADD64ri8 ||
733 Opc == X86::ADD32ri || Opc == X86::ADD32ri8) &&
734 PI->getOperand(0).getReg() == StackPtr) {
735 if (NumBytes)
736 *NumBytes += PI->getOperand(2).getImm();
737 MBB.erase(PI);
738 } else if ((Opc == X86::SUB64ri32 || Opc == X86::SUB64ri8 ||
739 Opc == X86::SUB32ri || Opc == X86::SUB32ri8) &&
740 PI->getOperand(0).getReg() == StackPtr) {
741 if (NumBytes)
742 *NumBytes -= PI->getOperand(2).getImm();
743 MBB.erase(PI);
747 /// mergeSPUpdatesDown - Merge two stack-manipulating instructions lower iterator.
748 static
749 void mergeSPUpdatesDown(MachineBasicBlock &MBB,
750 MachineBasicBlock::iterator &MBBI,
751 unsigned StackPtr, uint64_t *NumBytes = NULL) {
752 // FIXME: THIS ISN'T RUN!!!
753 return;
755 if (MBBI == MBB.end()) return;
757 MachineBasicBlock::iterator NI = llvm::next(MBBI);
758 if (NI == MBB.end()) return;
760 unsigned Opc = NI->getOpcode();
761 if ((Opc == X86::ADD64ri32 || Opc == X86::ADD64ri8 ||
762 Opc == X86::ADD32ri || Opc == X86::ADD32ri8) &&
763 NI->getOperand(0).getReg() == StackPtr) {
764 if (NumBytes)
765 *NumBytes -= NI->getOperand(2).getImm();
766 MBB.erase(NI);
767 MBBI = NI;
768 } else if ((Opc == X86::SUB64ri32 || Opc == X86::SUB64ri8 ||
769 Opc == X86::SUB32ri || Opc == X86::SUB32ri8) &&
770 NI->getOperand(0).getReg() == StackPtr) {
771 if (NumBytes)
772 *NumBytes += NI->getOperand(2).getImm();
773 MBB.erase(NI);
774 MBBI = NI;
778 /// mergeSPUpdates - Checks the instruction before/after the passed
779 /// instruction. If it is an ADD/SUB instruction it is deleted argument and the
780 /// stack adjustment is returned as a positive value for ADD and a negative for
781 /// SUB.
782 static int mergeSPUpdates(MachineBasicBlock &MBB,
783 MachineBasicBlock::iterator &MBBI,
784 unsigned StackPtr,
785 bool doMergeWithPrevious) {
786 if ((doMergeWithPrevious && MBBI == MBB.begin()) ||
787 (!doMergeWithPrevious && MBBI == MBB.end()))
788 return 0;
790 MachineBasicBlock::iterator PI = doMergeWithPrevious ? prior(MBBI) : MBBI;
791 MachineBasicBlock::iterator NI = doMergeWithPrevious ? 0 : llvm::next(MBBI);
792 unsigned Opc = PI->getOpcode();
793 int Offset = 0;
795 if ((Opc == X86::ADD64ri32 || Opc == X86::ADD64ri8 ||
796 Opc == X86::ADD32ri || Opc == X86::ADD32ri8) &&
797 PI->getOperand(0).getReg() == StackPtr){
798 Offset += PI->getOperand(2).getImm();
799 MBB.erase(PI);
800 if (!doMergeWithPrevious) MBBI = NI;
801 } else if ((Opc == X86::SUB64ri32 || Opc == X86::SUB64ri8 ||
802 Opc == X86::SUB32ri || Opc == X86::SUB32ri8) &&
803 PI->getOperand(0).getReg() == StackPtr) {
804 Offset -= PI->getOperand(2).getImm();
805 MBB.erase(PI);
806 if (!doMergeWithPrevious) MBBI = NI;
809 return Offset;
812 void X86RegisterInfo::emitCalleeSavedFrameMoves(MachineFunction &MF,
813 MCSymbol *Label,
814 unsigned FramePtr) const {
815 MachineFrameInfo *MFI = MF.getFrameInfo();
816 MachineModuleInfo &MMI = MF.getMMI();
818 // Add callee saved registers to move list.
819 const std::vector<CalleeSavedInfo> &CSI = MFI->getCalleeSavedInfo();
820 if (CSI.empty()) return;
822 std::vector<MachineMove> &Moves = MMI.getFrameMoves();
823 const TargetData *TD = MF.getTarget().getTargetData();
824 bool HasFP = hasFP(MF);
826 // Calculate amount of bytes used for return address storing.
827 int stackGrowth =
828 (MF.getTarget().getFrameInfo()->getStackGrowthDirection() ==
829 TargetFrameInfo::StackGrowsUp ?
830 TD->getPointerSize() : -TD->getPointerSize());
832 // FIXME: This is dirty hack. The code itself is pretty mess right now.
833 // It should be rewritten from scratch and generalized sometimes.
835 // Determine maximum offset (minumum due to stack growth).
836 int64_t MaxOffset = 0;
837 for (std::vector<CalleeSavedInfo>::const_iterator
838 I = CSI.begin(), E = CSI.end(); I != E; ++I)
839 MaxOffset = std::min(MaxOffset,
840 MFI->getObjectOffset(I->getFrameIdx()));
842 // Calculate offsets.
843 int64_t saveAreaOffset = (HasFP ? 3 : 2) * stackGrowth;
844 for (std::vector<CalleeSavedInfo>::const_iterator
845 I = CSI.begin(), E = CSI.end(); I != E; ++I) {
846 int64_t Offset = MFI->getObjectOffset(I->getFrameIdx());
847 unsigned Reg = I->getReg();
848 Offset = MaxOffset - Offset + saveAreaOffset;
850 // Don't output a new machine move if we're re-saving the frame
851 // pointer. This happens when the PrologEpilogInserter has inserted an extra
852 // "PUSH" of the frame pointer -- the "emitPrologue" method automatically
853 // generates one when frame pointers are used. If we generate a "machine
854 // move" for this extra "PUSH", the linker will lose track of the fact that
855 // the frame pointer should have the value of the first "PUSH" when it's
856 // trying to unwind.
858 // FIXME: This looks inelegant. It's possibly correct, but it's covering up
859 // another bug. I.e., one where we generate a prolog like this:
861 // pushl %ebp
862 // movl %esp, %ebp
863 // pushl %ebp
864 // pushl %esi
865 // ...
867 // The immediate re-push of EBP is unnecessary. At the least, it's an
868 // optimization bug. EBP can be used as a scratch register in certain
869 // cases, but probably not when we have a frame pointer.
870 if (HasFP && FramePtr == Reg)
871 continue;
873 MachineLocation CSDst(MachineLocation::VirtualFP, Offset);
874 MachineLocation CSSrc(Reg);
875 Moves.push_back(MachineMove(Label, CSDst, CSSrc));
879 static bool isEAXLiveIn(MachineFunction &MF) {
880 for (MachineRegisterInfo::livein_iterator II = MF.getRegInfo().livein_begin(),
881 EE = MF.getRegInfo().livein_end(); II != EE; ++II) {
882 unsigned Reg = II->first;
884 if (Reg == X86::EAX || Reg == X86::AX ||
885 Reg == X86::AH || Reg == X86::AL)
886 return true;
889 return false;
892 /// emitPrologue - Push callee-saved registers onto the stack, which
893 /// automatically adjust the stack pointer. Adjust the stack pointer to allocate
894 /// space for local variables. Also emit labels used by the exception handler to
895 /// generate the exception handling frames.
896 void X86RegisterInfo::emitPrologue(MachineFunction &MF) const {
897 MachineBasicBlock &MBB = MF.front(); // Prologue goes in entry BB.
898 MachineBasicBlock::iterator MBBI = MBB.begin();
899 MachineFrameInfo *MFI = MF.getFrameInfo();
900 const Function *Fn = MF.getFunction();
901 const X86Subtarget *Subtarget = &MF.getTarget().getSubtarget<X86Subtarget>();
902 MachineModuleInfo &MMI = MF.getMMI();
903 X86MachineFunctionInfo *X86FI = MF.getInfo<X86MachineFunctionInfo>();
904 bool needsFrameMoves = MMI.hasDebugInfo() ||
905 !Fn->doesNotThrow() || UnwindTablesMandatory;
906 uint64_t MaxAlign = MFI->getMaxAlignment(); // Desired stack alignment.
907 uint64_t StackSize = MFI->getStackSize(); // Number of bytes to allocate.
908 bool HasFP = hasFP(MF);
909 DebugLoc DL;
911 // If we're forcing a stack realignment we can't rely on just the frame
912 // info, we need to know the ABI stack alignment as well in case we
913 // have a call out. Otherwise just make sure we have some alignment - we'll
914 // go with the minimum SlotSize.
915 if (ForceStackAlign) {
916 if (MFI->hasCalls())
917 MaxAlign = (StackAlign > MaxAlign) ? StackAlign : MaxAlign;
918 else if (MaxAlign < SlotSize)
919 MaxAlign = SlotSize;
922 // Add RETADDR move area to callee saved frame size.
923 int TailCallReturnAddrDelta = X86FI->getTCReturnAddrDelta();
924 if (TailCallReturnAddrDelta < 0)
925 X86FI->setCalleeSavedFrameSize(
926 X86FI->getCalleeSavedFrameSize() - TailCallReturnAddrDelta);
928 // If this is x86-64 and the Red Zone is not disabled, if we are a leaf
929 // function, and use up to 128 bytes of stack space, don't have a frame
930 // pointer, calls, or dynamic alloca then we do not need to adjust the
931 // stack pointer (we fit in the Red Zone).
932 if (Is64Bit && !Fn->hasFnAttr(Attribute::NoRedZone) &&
933 !needsStackRealignment(MF) &&
934 !MFI->hasVarSizedObjects() && // No dynamic alloca.
935 !MFI->adjustsStack() && // No calls.
936 !IsWin64) { // Win64 has no Red Zone
937 uint64_t MinSize = X86FI->getCalleeSavedFrameSize();
938 if (HasFP) MinSize += SlotSize;
939 StackSize = std::max(MinSize, StackSize > 128 ? StackSize - 128 : 0);
940 MFI->setStackSize(StackSize);
941 } else if (IsWin64) {
942 // We need to always allocate 32 bytes as register spill area.
943 // FIXME: We might reuse these 32 bytes for leaf functions.
944 StackSize += 32;
945 MFI->setStackSize(StackSize);
948 // Insert stack pointer adjustment for later moving of return addr. Only
949 // applies to tail call optimized functions where the callee argument stack
950 // size is bigger than the callers.
951 if (TailCallReturnAddrDelta < 0) {
952 MachineInstr *MI =
953 BuildMI(MBB, MBBI, DL,
954 TII.get(getSUBriOpcode(Is64Bit, -TailCallReturnAddrDelta)),
955 StackPtr)
956 .addReg(StackPtr)
957 .addImm(-TailCallReturnAddrDelta);
958 MI->getOperand(3).setIsDead(); // The EFLAGS implicit def is dead.
961 // Mapping for machine moves:
963 // DST: VirtualFP AND
964 // SRC: VirtualFP => DW_CFA_def_cfa_offset
965 // ELSE => DW_CFA_def_cfa
967 // SRC: VirtualFP AND
968 // DST: Register => DW_CFA_def_cfa_register
970 // ELSE
971 // OFFSET < 0 => DW_CFA_offset_extended_sf
972 // REG < 64 => DW_CFA_offset + Reg
973 // ELSE => DW_CFA_offset_extended
975 std::vector<MachineMove> &Moves = MMI.getFrameMoves();
976 const TargetData *TD = MF.getTarget().getTargetData();
977 uint64_t NumBytes = 0;
978 int stackGrowth = -TD->getPointerSize();
980 if (HasFP) {
981 // Calculate required stack adjustment.
982 uint64_t FrameSize = StackSize - SlotSize;
983 if (needsStackRealignment(MF))
984 FrameSize = (FrameSize + MaxAlign - 1) / MaxAlign * MaxAlign;
986 NumBytes = FrameSize - X86FI->getCalleeSavedFrameSize();
988 // Get the offset of the stack slot for the EBP register, which is
989 // guaranteed to be the last slot by processFunctionBeforeFrameFinalized.
990 // Update the frame offset adjustment.
991 MFI->setOffsetAdjustment(-NumBytes);
993 // Save EBP/RBP into the appropriate stack slot.
994 BuildMI(MBB, MBBI, DL, TII.get(Is64Bit ? X86::PUSH64r : X86::PUSH32r))
995 .addReg(FramePtr, RegState::Kill);
997 if (needsFrameMoves) {
998 // Mark the place where EBP/RBP was saved.
999 MCSymbol *FrameLabel = MMI.getContext().CreateTempSymbol();
1000 BuildMI(MBB, MBBI, DL, TII.get(X86::PROLOG_LABEL)).addSym(FrameLabel);
1002 // Define the current CFA rule to use the provided offset.
1003 if (StackSize) {
1004 MachineLocation SPDst(MachineLocation::VirtualFP);
1005 MachineLocation SPSrc(MachineLocation::VirtualFP, 2 * stackGrowth);
1006 Moves.push_back(MachineMove(FrameLabel, SPDst, SPSrc));
1007 } else {
1008 // FIXME: Verify & implement for FP
1009 MachineLocation SPDst(StackPtr);
1010 MachineLocation SPSrc(StackPtr, stackGrowth);
1011 Moves.push_back(MachineMove(FrameLabel, SPDst, SPSrc));
1014 // Change the rule for the FramePtr to be an "offset" rule.
1015 MachineLocation FPDst(MachineLocation::VirtualFP, 2 * stackGrowth);
1016 MachineLocation FPSrc(FramePtr);
1017 Moves.push_back(MachineMove(FrameLabel, FPDst, FPSrc));
1020 // Update EBP with the new base value...
1021 BuildMI(MBB, MBBI, DL,
1022 TII.get(Is64Bit ? X86::MOV64rr : X86::MOV32rr), FramePtr)
1023 .addReg(StackPtr);
1025 if (needsFrameMoves) {
1026 // Mark effective beginning of when frame pointer becomes valid.
1027 MCSymbol *FrameLabel = MMI.getContext().CreateTempSymbol();
1028 BuildMI(MBB, MBBI, DL, TII.get(X86::PROLOG_LABEL)).addSym(FrameLabel);
1030 // Define the current CFA to use the EBP/RBP register.
1031 MachineLocation FPDst(FramePtr);
1032 MachineLocation FPSrc(MachineLocation::VirtualFP);
1033 Moves.push_back(MachineMove(FrameLabel, FPDst, FPSrc));
1036 // Mark the FramePtr as live-in in every block except the entry.
1037 for (MachineFunction::iterator I = llvm::next(MF.begin()), E = MF.end();
1038 I != E; ++I)
1039 I->addLiveIn(FramePtr);
1041 // Realign stack
1042 if (needsStackRealignment(MF)) {
1043 MachineInstr *MI =
1044 BuildMI(MBB, MBBI, DL,
1045 TII.get(Is64Bit ? X86::AND64ri32 : X86::AND32ri),
1046 StackPtr).addReg(StackPtr).addImm(-MaxAlign);
1048 // The EFLAGS implicit def is dead.
1049 MI->getOperand(3).setIsDead();
1051 } else {
1052 NumBytes = StackSize - X86FI->getCalleeSavedFrameSize();
1055 // Skip the callee-saved push instructions.
1056 bool PushedRegs = false;
1057 int StackOffset = 2 * stackGrowth;
1059 while (MBBI != MBB.end() &&
1060 (MBBI->getOpcode() == X86::PUSH32r ||
1061 MBBI->getOpcode() == X86::PUSH64r)) {
1062 PushedRegs = true;
1063 ++MBBI;
1065 if (!HasFP && needsFrameMoves) {
1066 // Mark callee-saved push instruction.
1067 MCSymbol *Label = MMI.getContext().CreateTempSymbol();
1068 BuildMI(MBB, MBBI, DL, TII.get(X86::PROLOG_LABEL)).addSym(Label);
1070 // Define the current CFA rule to use the provided offset.
1071 unsigned Ptr = StackSize ?
1072 MachineLocation::VirtualFP : StackPtr;
1073 MachineLocation SPDst(Ptr);
1074 MachineLocation SPSrc(Ptr, StackOffset);
1075 Moves.push_back(MachineMove(Label, SPDst, SPSrc));
1076 StackOffset += stackGrowth;
1080 DL = MBB.findDebugLoc(MBBI);
1082 // If there is an SUB32ri of ESP immediately before this instruction, merge
1083 // the two. This can be the case when tail call elimination is enabled and
1084 // the callee has more arguments then the caller.
1085 NumBytes -= mergeSPUpdates(MBB, MBBI, StackPtr, true);
1087 // If there is an ADD32ri or SUB32ri of ESP immediately after this
1088 // instruction, merge the two instructions.
1089 mergeSPUpdatesDown(MBB, MBBI, StackPtr, &NumBytes);
1091 // Adjust stack pointer: ESP -= numbytes.
1093 // Windows and cygwin/mingw require a prologue helper routine when allocating
1094 // more than 4K bytes on the stack. Windows uses __chkstk and cygwin/mingw
1095 // uses __alloca. __alloca and the 32-bit version of __chkstk will probe the
1096 // stack and adjust the stack pointer in one go. The 64-bit version of
1097 // __chkstk is only responsible for probing the stack. The 64-bit prologue is
1098 // responsible for adjusting the stack pointer. Touching the stack at 4K
1099 // increments is necessary to ensure that the guard pages used by the OS
1100 // virtual memory manager are allocated in correct sequence.
1101 if (NumBytes >= 4096 &&
1102 (Subtarget->isTargetCygMing() || Subtarget->isTargetWin32())) {
1103 // Check whether EAX is livein for this function.
1104 bool isEAXAlive = isEAXLiveIn(MF);
1106 const char *StackProbeSymbol =
1107 Subtarget->isTargetWindows() ? "_chkstk" : "_alloca";
1108 unsigned CallOp = Is64Bit ? X86::CALL64pcrel32 : X86::CALLpcrel32;
1109 if (!isEAXAlive) {
1110 BuildMI(MBB, MBBI, DL, TII.get(X86::MOV32ri), X86::EAX)
1111 .addImm(NumBytes);
1112 BuildMI(MBB, MBBI, DL, TII.get(CallOp))
1113 .addExternalSymbol(StackProbeSymbol)
1114 .addReg(StackPtr, RegState::Define | RegState::Implicit)
1115 .addReg(X86::EFLAGS, RegState::Define | RegState::Implicit);
1116 } else {
1117 // Save EAX
1118 BuildMI(MBB, MBBI, DL, TII.get(X86::PUSH32r))
1119 .addReg(X86::EAX, RegState::Kill);
1121 // Allocate NumBytes-4 bytes on stack. We'll also use 4 already
1122 // allocated bytes for EAX.
1123 BuildMI(MBB, MBBI, DL, TII.get(X86::MOV32ri), X86::EAX)
1124 .addImm(NumBytes - 4);
1125 BuildMI(MBB, MBBI, DL, TII.get(CallOp))
1126 .addExternalSymbol(StackProbeSymbol)
1127 .addReg(StackPtr, RegState::Define | RegState::Implicit)
1128 .addReg(X86::EFLAGS, RegState::Define | RegState::Implicit);
1130 // Restore EAX
1131 MachineInstr *MI = addRegOffset(BuildMI(MF, DL, TII.get(X86::MOV32rm),
1132 X86::EAX),
1133 StackPtr, false, NumBytes - 4);
1134 MBB.insert(MBBI, MI);
1136 } else if (NumBytes >= 4096 && Subtarget->isTargetWin64()) {
1137 // Sanity check that EAX is not livein for this function. It should
1138 // should not be, so throw an assert.
1139 assert(!isEAXLiveIn(MF) && "EAX is livein in the Win64 case!");
1141 // Handle the 64-bit Windows ABI case where we need to call __chkstk.
1142 // Function prologue is responsible for adjusting the stack pointer.
1143 BuildMI(MBB, MBBI, DL, TII.get(X86::MOV32ri), X86::EAX)
1144 .addImm(NumBytes);
1145 BuildMI(MBB, MBBI, DL, TII.get(X86::WINCALL64pcrel32))
1146 .addExternalSymbol("__chkstk")
1147 .addReg(StackPtr, RegState::Define | RegState::Implicit);
1148 emitSPUpdate(MBB, MBBI, StackPtr, -(int64_t)NumBytes, Is64Bit, TII);
1149 } else if (NumBytes)
1150 emitSPUpdate(MBB, MBBI, StackPtr, -(int64_t)NumBytes, Is64Bit, TII);
1152 if ((NumBytes || PushedRegs) && needsFrameMoves) {
1153 // Mark end of stack pointer adjustment.
1154 MCSymbol *Label = MMI.getContext().CreateTempSymbol();
1155 BuildMI(MBB, MBBI, DL, TII.get(X86::PROLOG_LABEL)).addSym(Label);
1157 if (!HasFP && NumBytes) {
1158 // Define the current CFA rule to use the provided offset.
1159 if (StackSize) {
1160 MachineLocation SPDst(MachineLocation::VirtualFP);
1161 MachineLocation SPSrc(MachineLocation::VirtualFP,
1162 -StackSize + stackGrowth);
1163 Moves.push_back(MachineMove(Label, SPDst, SPSrc));
1164 } else {
1165 // FIXME: Verify & implement for FP
1166 MachineLocation SPDst(StackPtr);
1167 MachineLocation SPSrc(StackPtr, stackGrowth);
1168 Moves.push_back(MachineMove(Label, SPDst, SPSrc));
1172 // Emit DWARF info specifying the offsets of the callee-saved registers.
1173 if (PushedRegs)
1174 emitCalleeSavedFrameMoves(MF, Label, HasFP ? FramePtr : StackPtr);
1178 void X86RegisterInfo::emitEpilogue(MachineFunction &MF,
1179 MachineBasicBlock &MBB) const {
1180 const MachineFrameInfo *MFI = MF.getFrameInfo();
1181 X86MachineFunctionInfo *X86FI = MF.getInfo<X86MachineFunctionInfo>();
1182 MachineBasicBlock::iterator MBBI = prior(MBB.end());
1183 unsigned RetOpcode = MBBI->getOpcode();
1184 DebugLoc DL = MBBI->getDebugLoc();
1186 switch (RetOpcode) {
1187 default:
1188 llvm_unreachable("Can only insert epilog into returning blocks");
1189 case X86::RET:
1190 case X86::RETI:
1191 case X86::TCRETURNdi:
1192 case X86::TCRETURNri:
1193 case X86::TCRETURNmi:
1194 case X86::TCRETURNdi64:
1195 case X86::TCRETURNri64:
1196 case X86::TCRETURNmi64:
1197 case X86::EH_RETURN:
1198 case X86::EH_RETURN64:
1199 break; // These are ok
1202 // Get the number of bytes to allocate from the FrameInfo.
1203 uint64_t StackSize = MFI->getStackSize();
1204 uint64_t MaxAlign = MFI->getMaxAlignment();
1205 unsigned CSSize = X86FI->getCalleeSavedFrameSize();
1206 uint64_t NumBytes = 0;
1208 // If we're forcing a stack realignment we can't rely on just the frame
1209 // info, we need to know the ABI stack alignment as well in case we
1210 // have a call out. Otherwise just make sure we have some alignment - we'll
1211 // go with the minimum.
1212 if (ForceStackAlign) {
1213 if (MFI->hasCalls())
1214 MaxAlign = (StackAlign > MaxAlign) ? StackAlign : MaxAlign;
1215 else
1216 MaxAlign = MaxAlign ? MaxAlign : 4;
1219 if (hasFP(MF)) {
1220 // Calculate required stack adjustment.
1221 uint64_t FrameSize = StackSize - SlotSize;
1222 if (needsStackRealignment(MF))
1223 FrameSize = (FrameSize + MaxAlign - 1)/MaxAlign*MaxAlign;
1225 NumBytes = FrameSize - CSSize;
1227 // Pop EBP.
1228 BuildMI(MBB, MBBI, DL,
1229 TII.get(Is64Bit ? X86::POP64r : X86::POP32r), FramePtr);
1230 } else {
1231 NumBytes = StackSize - CSSize;
1234 // Skip the callee-saved pop instructions.
1235 MachineBasicBlock::iterator LastCSPop = MBBI;
1236 while (MBBI != MBB.begin()) {
1237 MachineBasicBlock::iterator PI = prior(MBBI);
1238 unsigned Opc = PI->getOpcode();
1240 if (Opc != X86::POP32r && Opc != X86::POP64r &&
1241 !PI->getDesc().isTerminator())
1242 break;
1244 --MBBI;
1247 DL = MBBI->getDebugLoc();
1249 // If there is an ADD32ri or SUB32ri of ESP immediately before this
1250 // instruction, merge the two instructions.
1251 if (NumBytes || MFI->hasVarSizedObjects())
1252 mergeSPUpdatesUp(MBB, MBBI, StackPtr, &NumBytes);
1254 // If dynamic alloca is used, then reset esp to point to the last callee-saved
1255 // slot before popping them off! Same applies for the case, when stack was
1256 // realigned.
1257 if (needsStackRealignment(MF)) {
1258 // We cannot use LEA here, because stack pointer was realigned. We need to
1259 // deallocate local frame back.
1260 if (CSSize) {
1261 emitSPUpdate(MBB, MBBI, StackPtr, NumBytes, Is64Bit, TII);
1262 MBBI = prior(LastCSPop);
1265 BuildMI(MBB, MBBI, DL,
1266 TII.get(Is64Bit ? X86::MOV64rr : X86::MOV32rr),
1267 StackPtr).addReg(FramePtr);
1268 } else if (MFI->hasVarSizedObjects()) {
1269 if (CSSize) {
1270 unsigned Opc = Is64Bit ? X86::LEA64r : X86::LEA32r;
1271 MachineInstr *MI =
1272 addRegOffset(BuildMI(MF, DL, TII.get(Opc), StackPtr),
1273 FramePtr, false, -CSSize);
1274 MBB.insert(MBBI, MI);
1275 } else {
1276 BuildMI(MBB, MBBI, DL,
1277 TII.get(Is64Bit ? X86::MOV64rr : X86::MOV32rr), StackPtr)
1278 .addReg(FramePtr);
1280 } else if (NumBytes) {
1281 // Adjust stack pointer back: ESP += numbytes.
1282 emitSPUpdate(MBB, MBBI, StackPtr, NumBytes, Is64Bit, TII);
1285 // We're returning from function via eh_return.
1286 if (RetOpcode == X86::EH_RETURN || RetOpcode == X86::EH_RETURN64) {
1287 MBBI = prior(MBB.end());
1288 MachineOperand &DestAddr = MBBI->getOperand(0);
1289 assert(DestAddr.isReg() && "Offset should be in register!");
1290 BuildMI(MBB, MBBI, DL,
1291 TII.get(Is64Bit ? X86::MOV64rr : X86::MOV32rr),
1292 StackPtr).addReg(DestAddr.getReg());
1293 } else if (RetOpcode == X86::TCRETURNri || RetOpcode == X86::TCRETURNdi ||
1294 RetOpcode == X86::TCRETURNmi ||
1295 RetOpcode == X86::TCRETURNri64 || RetOpcode == X86::TCRETURNdi64 ||
1296 RetOpcode == X86::TCRETURNmi64) {
1297 bool isMem = RetOpcode == X86::TCRETURNmi || RetOpcode == X86::TCRETURNmi64;
1298 // Tail call return: adjust the stack pointer and jump to callee.
1299 MBBI = prior(MBB.end());
1300 MachineOperand &JumpTarget = MBBI->getOperand(0);
1301 MachineOperand &StackAdjust = MBBI->getOperand(isMem ? 5 : 1);
1302 assert(StackAdjust.isImm() && "Expecting immediate value.");
1304 // Adjust stack pointer.
1305 int StackAdj = StackAdjust.getImm();
1306 int MaxTCDelta = X86FI->getTCReturnAddrDelta();
1307 int Offset = 0;
1308 assert(MaxTCDelta <= 0 && "MaxTCDelta should never be positive");
1310 // Incoporate the retaddr area.
1311 Offset = StackAdj-MaxTCDelta;
1312 assert(Offset >= 0 && "Offset should never be negative");
1314 if (Offset) {
1315 // Check for possible merge with preceeding ADD instruction.
1316 Offset += mergeSPUpdates(MBB, MBBI, StackPtr, true);
1317 emitSPUpdate(MBB, MBBI, StackPtr, Offset, Is64Bit, TII);
1320 // Jump to label or value in register.
1321 if (RetOpcode == X86::TCRETURNdi || RetOpcode == X86::TCRETURNdi64) {
1322 BuildMI(MBB, MBBI, DL, TII.get((RetOpcode == X86::TCRETURNdi)
1323 ? X86::TAILJMPd : X86::TAILJMPd64)).
1324 addGlobalAddress(JumpTarget.getGlobal(), JumpTarget.getOffset(),
1325 JumpTarget.getTargetFlags());
1326 } else if (RetOpcode == X86::TCRETURNmi || RetOpcode == X86::TCRETURNmi64) {
1327 MachineInstrBuilder MIB =
1328 BuildMI(MBB, MBBI, DL, TII.get((RetOpcode == X86::TCRETURNmi)
1329 ? X86::TAILJMPm : X86::TAILJMPm64));
1330 for (unsigned i = 0; i != 5; ++i)
1331 MIB.addOperand(MBBI->getOperand(i));
1332 } else if (RetOpcode == X86::TCRETURNri64) {
1333 BuildMI(MBB, MBBI, DL, TII.get(X86::TAILJMPr64)).
1334 addReg(JumpTarget.getReg(), RegState::Kill);
1335 } else {
1336 BuildMI(MBB, MBBI, DL, TII.get(X86::TAILJMPr)).
1337 addReg(JumpTarget.getReg(), RegState::Kill);
1340 MachineInstr *NewMI = prior(MBBI);
1341 for (unsigned i = 2, e = MBBI->getNumOperands(); i != e; ++i)
1342 NewMI->addOperand(MBBI->getOperand(i));
1344 // Delete the pseudo instruction TCRETURN.
1345 MBB.erase(MBBI);
1346 } else if ((RetOpcode == X86::RET || RetOpcode == X86::RETI) &&
1347 (X86FI->getTCReturnAddrDelta() < 0)) {
1348 // Add the return addr area delta back since we are not tail calling.
1349 int delta = -1*X86FI->getTCReturnAddrDelta();
1350 MBBI = prior(MBB.end());
1352 // Check for possible merge with preceeding ADD instruction.
1353 delta += mergeSPUpdates(MBB, MBBI, StackPtr, true);
1354 emitSPUpdate(MBB, MBBI, StackPtr, delta, Is64Bit, TII);
1358 unsigned X86RegisterInfo::getRARegister() const {
1359 return Is64Bit ? X86::RIP // Should have dwarf #16.
1360 : X86::EIP; // Should have dwarf #8.
1363 unsigned X86RegisterInfo::getFrameRegister(const MachineFunction &MF) const {
1364 return hasFP(MF) ? FramePtr : StackPtr;
1367 void
1368 X86RegisterInfo::getInitialFrameState(std::vector<MachineMove> &Moves) const {
1369 // Calculate amount of bytes used for return address storing
1370 int stackGrowth = (Is64Bit ? -8 : -4);
1372 // Initial state of the frame pointer is esp+stackGrowth.
1373 MachineLocation Dst(MachineLocation::VirtualFP);
1374 MachineLocation Src(StackPtr, stackGrowth);
1375 Moves.push_back(MachineMove(0, Dst, Src));
1377 // Add return address to move list
1378 MachineLocation CSDst(StackPtr, stackGrowth);
1379 MachineLocation CSSrc(getRARegister());
1380 Moves.push_back(MachineMove(0, CSDst, CSSrc));
1383 unsigned X86RegisterInfo::getEHExceptionRegister() const {
1384 llvm_unreachable("What is the exception register");
1385 return 0;
1388 unsigned X86RegisterInfo::getEHHandlerRegister() const {
1389 llvm_unreachable("What is the exception handler register");
1390 return 0;
1393 namespace llvm {
1394 unsigned getX86SubSuperRegister(unsigned Reg, EVT VT, bool High) {
1395 switch (VT.getSimpleVT().SimpleTy) {
1396 default: return Reg;
1397 case MVT::i8:
1398 if (High) {
1399 switch (Reg) {
1400 default: return 0;
1401 case X86::AH: case X86::AL: case X86::AX: case X86::EAX: case X86::RAX:
1402 return X86::AH;
1403 case X86::DH: case X86::DL: case X86::DX: case X86::EDX: case X86::RDX:
1404 return X86::DH;
1405 case X86::CH: case X86::CL: case X86::CX: case X86::ECX: case X86::RCX:
1406 return X86::CH;
1407 case X86::BH: case X86::BL: case X86::BX: case X86::EBX: case X86::RBX:
1408 return X86::BH;
1410 } else {
1411 switch (Reg) {
1412 default: return 0;
1413 case X86::AH: case X86::AL: case X86::AX: case X86::EAX: case X86::RAX:
1414 return X86::AL;
1415 case X86::DH: case X86::DL: case X86::DX: case X86::EDX: case X86::RDX:
1416 return X86::DL;
1417 case X86::CH: case X86::CL: case X86::CX: case X86::ECX: case X86::RCX:
1418 return X86::CL;
1419 case X86::BH: case X86::BL: case X86::BX: case X86::EBX: case X86::RBX:
1420 return X86::BL;
1421 case X86::SIL: case X86::SI: case X86::ESI: case X86::RSI:
1422 return X86::SIL;
1423 case X86::DIL: case X86::DI: case X86::EDI: case X86::RDI:
1424 return X86::DIL;
1425 case X86::BPL: case X86::BP: case X86::EBP: case X86::RBP:
1426 return X86::BPL;
1427 case X86::SPL: case X86::SP: case X86::ESP: case X86::RSP:
1428 return X86::SPL;
1429 case X86::R8B: case X86::R8W: case X86::R8D: case X86::R8:
1430 return X86::R8B;
1431 case X86::R9B: case X86::R9W: case X86::R9D: case X86::R9:
1432 return X86::R9B;
1433 case X86::R10B: case X86::R10W: case X86::R10D: case X86::R10:
1434 return X86::R10B;
1435 case X86::R11B: case X86::R11W: case X86::R11D: case X86::R11:
1436 return X86::R11B;
1437 case X86::R12B: case X86::R12W: case X86::R12D: case X86::R12:
1438 return X86::R12B;
1439 case X86::R13B: case X86::R13W: case X86::R13D: case X86::R13:
1440 return X86::R13B;
1441 case X86::R14B: case X86::R14W: case X86::R14D: case X86::R14:
1442 return X86::R14B;
1443 case X86::R15B: case X86::R15W: case X86::R15D: case X86::R15:
1444 return X86::R15B;
1447 case MVT::i16:
1448 switch (Reg) {
1449 default: return Reg;
1450 case X86::AH: case X86::AL: case X86::AX: case X86::EAX: case X86::RAX:
1451 return X86::AX;
1452 case X86::DH: case X86::DL: case X86::DX: case X86::EDX: case X86::RDX:
1453 return X86::DX;
1454 case X86::CH: case X86::CL: case X86::CX: case X86::ECX: case X86::RCX:
1455 return X86::CX;
1456 case X86::BH: case X86::BL: case X86::BX: case X86::EBX: case X86::RBX:
1457 return X86::BX;
1458 case X86::SIL: case X86::SI: case X86::ESI: case X86::RSI:
1459 return X86::SI;
1460 case X86::DIL: case X86::DI: case X86::EDI: case X86::RDI:
1461 return X86::DI;
1462 case X86::BPL: case X86::BP: case X86::EBP: case X86::RBP:
1463 return X86::BP;
1464 case X86::SPL: case X86::SP: case X86::ESP: case X86::RSP:
1465 return X86::SP;
1466 case X86::R8B: case X86::R8W: case X86::R8D: case X86::R8:
1467 return X86::R8W;
1468 case X86::R9B: case X86::R9W: case X86::R9D: case X86::R9:
1469 return X86::R9W;
1470 case X86::R10B: case X86::R10W: case X86::R10D: case X86::R10:
1471 return X86::R10W;
1472 case X86::R11B: case X86::R11W: case X86::R11D: case X86::R11:
1473 return X86::R11W;
1474 case X86::R12B: case X86::R12W: case X86::R12D: case X86::R12:
1475 return X86::R12W;
1476 case X86::R13B: case X86::R13W: case X86::R13D: case X86::R13:
1477 return X86::R13W;
1478 case X86::R14B: case X86::R14W: case X86::R14D: case X86::R14:
1479 return X86::R14W;
1480 case X86::R15B: case X86::R15W: case X86::R15D: case X86::R15:
1481 return X86::R15W;
1483 case MVT::i32:
1484 switch (Reg) {
1485 default: return Reg;
1486 case X86::AH: case X86::AL: case X86::AX: case X86::EAX: case X86::RAX:
1487 return X86::EAX;
1488 case X86::DH: case X86::DL: case X86::DX: case X86::EDX: case X86::RDX:
1489 return X86::EDX;
1490 case X86::CH: case X86::CL: case X86::CX: case X86::ECX: case X86::RCX:
1491 return X86::ECX;
1492 case X86::BH: case X86::BL: case X86::BX: case X86::EBX: case X86::RBX:
1493 return X86::EBX;
1494 case X86::SIL: case X86::SI: case X86::ESI: case X86::RSI:
1495 return X86::ESI;
1496 case X86::DIL: case X86::DI: case X86::EDI: case X86::RDI:
1497 return X86::EDI;
1498 case X86::BPL: case X86::BP: case X86::EBP: case X86::RBP:
1499 return X86::EBP;
1500 case X86::SPL: case X86::SP: case X86::ESP: case X86::RSP:
1501 return X86::ESP;
1502 case X86::R8B: case X86::R8W: case X86::R8D: case X86::R8:
1503 return X86::R8D;
1504 case X86::R9B: case X86::R9W: case X86::R9D: case X86::R9:
1505 return X86::R9D;
1506 case X86::R10B: case X86::R10W: case X86::R10D: case X86::R10:
1507 return X86::R10D;
1508 case X86::R11B: case X86::R11W: case X86::R11D: case X86::R11:
1509 return X86::R11D;
1510 case X86::R12B: case X86::R12W: case X86::R12D: case X86::R12:
1511 return X86::R12D;
1512 case X86::R13B: case X86::R13W: case X86::R13D: case X86::R13:
1513 return X86::R13D;
1514 case X86::R14B: case X86::R14W: case X86::R14D: case X86::R14:
1515 return X86::R14D;
1516 case X86::R15B: case X86::R15W: case X86::R15D: case X86::R15:
1517 return X86::R15D;
1519 case MVT::i64:
1520 switch (Reg) {
1521 default: return Reg;
1522 case X86::AH: case X86::AL: case X86::AX: case X86::EAX: case X86::RAX:
1523 return X86::RAX;
1524 case X86::DH: case X86::DL: case X86::DX: case X86::EDX: case X86::RDX:
1525 return X86::RDX;
1526 case X86::CH: case X86::CL: case X86::CX: case X86::ECX: case X86::RCX:
1527 return X86::RCX;
1528 case X86::BH: case X86::BL: case X86::BX: case X86::EBX: case X86::RBX:
1529 return X86::RBX;
1530 case X86::SIL: case X86::SI: case X86::ESI: case X86::RSI:
1531 return X86::RSI;
1532 case X86::DIL: case X86::DI: case X86::EDI: case X86::RDI:
1533 return X86::RDI;
1534 case X86::BPL: case X86::BP: case X86::EBP: case X86::RBP:
1535 return X86::RBP;
1536 case X86::SPL: case X86::SP: case X86::ESP: case X86::RSP:
1537 return X86::RSP;
1538 case X86::R8B: case X86::R8W: case X86::R8D: case X86::R8:
1539 return X86::R8;
1540 case X86::R9B: case X86::R9W: case X86::R9D: case X86::R9:
1541 return X86::R9;
1542 case X86::R10B: case X86::R10W: case X86::R10D: case X86::R10:
1543 return X86::R10;
1544 case X86::R11B: case X86::R11W: case X86::R11D: case X86::R11:
1545 return X86::R11;
1546 case X86::R12B: case X86::R12W: case X86::R12D: case X86::R12:
1547 return X86::R12;
1548 case X86::R13B: case X86::R13W: case X86::R13D: case X86::R13:
1549 return X86::R13;
1550 case X86::R14B: case X86::R14W: case X86::R14D: case X86::R14:
1551 return X86::R14;
1552 case X86::R15B: case X86::R15W: case X86::R15D: case X86::R15:
1553 return X86::R15;
1557 return Reg;
1561 #include "X86GenRegisterInfo.inc"
1563 namespace {
1564 struct MSAH : public MachineFunctionPass {
1565 static char ID;
1566 MSAH() : MachineFunctionPass(ID) {}
1568 virtual bool runOnMachineFunction(MachineFunction &MF) {
1569 const X86TargetMachine *TM =
1570 static_cast<const X86TargetMachine *>(&MF.getTarget());
1571 const X86RegisterInfo *X86RI = TM->getRegisterInfo();
1572 MachineRegisterInfo &RI = MF.getRegInfo();
1573 X86MachineFunctionInfo *FuncInfo = MF.getInfo<X86MachineFunctionInfo>();
1574 unsigned StackAlignment = X86RI->getStackAlignment();
1576 // Be over-conservative: scan over all vreg defs and find whether vector
1577 // registers are used. If yes, there is a possibility that vector register
1578 // will be spilled and thus require dynamic stack realignment.
1579 for (unsigned RegNum = TargetRegisterInfo::FirstVirtualRegister;
1580 RegNum < RI.getLastVirtReg(); ++RegNum)
1581 if (RI.getRegClass(RegNum)->getAlignment() > StackAlignment) {
1582 FuncInfo->setReserveFP(true);
1583 return true;
1586 // Nothing to do
1587 return false;
1590 virtual const char *getPassName() const {
1591 return "X86 Maximal Stack Alignment Check";
1594 virtual void getAnalysisUsage(AnalysisUsage &AU) const {
1595 AU.setPreservesCFG();
1596 MachineFunctionPass::getAnalysisUsage(AU);
1600 char MSAH::ID = 0;
1603 FunctionPass*
1604 llvm::createX86MaxStackAlignmentHeuristicPass() { return new MSAH(); }