1 //===- X86RegisterInfo.cpp - X86 Register Information -----------*- C++ -*-===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This file contains the X86 implementation of the TargetRegisterInfo class.
11 // This file is responsible for the frame pointer elimination optimization
14 //===----------------------------------------------------------------------===//
17 #include "X86RegisterInfo.h"
18 #include "X86InstrBuilder.h"
19 #include "X86MachineFunctionInfo.h"
20 #include "X86Subtarget.h"
21 #include "X86TargetMachine.h"
22 #include "llvm/Constants.h"
23 #include "llvm/Function.h"
24 #include "llvm/Type.h"
25 #include "llvm/CodeGen/ValueTypes.h"
26 #include "llvm/CodeGen/MachineInstrBuilder.h"
27 #include "llvm/CodeGen/MachineFunction.h"
28 #include "llvm/CodeGen/MachineFunctionPass.h"
29 #include "llvm/CodeGen/MachineFrameInfo.h"
30 #include "llvm/CodeGen/MachineLocation.h"
31 #include "llvm/CodeGen/MachineModuleInfo.h"
32 #include "llvm/CodeGen/MachineRegisterInfo.h"
33 #include "llvm/MC/MCAsmInfo.h"
34 #include "llvm/Target/TargetFrameInfo.h"
35 #include "llvm/Target/TargetInstrInfo.h"
36 #include "llvm/Target/TargetMachine.h"
37 #include "llvm/Target/TargetOptions.h"
38 #include "llvm/ADT/BitVector.h"
39 #include "llvm/ADT/STLExtras.h"
40 #include "llvm/Support/ErrorHandling.h"
41 #include "llvm/Support/CommandLine.h"
45 ForceStackAlign("force-align-stack",
46 cl::desc("Force align the stack to the minimum alignment"
47 " needed for the function."),
48 cl::init(false), cl::Hidden
);
50 X86RegisterInfo::X86RegisterInfo(X86TargetMachine
&tm
,
51 const TargetInstrInfo
&tii
)
52 : X86GenRegisterInfo(tm
.getSubtarget
<X86Subtarget
>().is64Bit() ?
53 X86::ADJCALLSTACKDOWN64
:
54 X86::ADJCALLSTACKDOWN32
,
55 tm
.getSubtarget
<X86Subtarget
>().is64Bit() ?
56 X86::ADJCALLSTACKUP64
:
57 X86::ADJCALLSTACKUP32
),
59 // Cache some information.
60 const X86Subtarget
*Subtarget
= &TM
.getSubtarget
<X86Subtarget
>();
61 Is64Bit
= Subtarget
->is64Bit();
62 IsWin64
= Subtarget
->isTargetWin64();
63 StackAlign
= TM
.getFrameInfo()->getStackAlignment();
76 /// getDwarfRegNum - This function maps LLVM register identifiers to the DWARF
77 /// specific numbering, used in debug info and exception tables.
78 int X86RegisterInfo::getDwarfRegNum(unsigned RegNo
, bool isEH
) const {
79 const X86Subtarget
*Subtarget
= &TM
.getSubtarget
<X86Subtarget
>();
80 unsigned Flavour
= DWARFFlavour::X86_64
;
82 if (!Subtarget
->is64Bit()) {
83 if (Subtarget
->isTargetDarwin()) {
85 Flavour
= DWARFFlavour::X86_32_DarwinEH
;
87 Flavour
= DWARFFlavour::X86_32_Generic
;
88 } else if (Subtarget
->isTargetCygMing()) {
89 // Unsupported by now, just quick fallback
90 Flavour
= DWARFFlavour::X86_32_Generic
;
92 Flavour
= DWARFFlavour::X86_32_Generic
;
96 return X86GenRegisterInfo::getDwarfRegNumFull(RegNo
, Flavour
);
99 /// getX86RegNum - This function maps LLVM register identifiers to their X86
100 /// specific numbering, which is used in various places encoding instructions.
101 unsigned X86RegisterInfo::getX86RegNum(unsigned RegNo
) {
103 case X86::RAX
: case X86::EAX
: case X86::AX
: case X86::AL
: return N86::EAX
;
104 case X86::RCX
: case X86::ECX
: case X86::CX
: case X86::CL
: return N86::ECX
;
105 case X86::RDX
: case X86::EDX
: case X86::DX
: case X86::DL
: return N86::EDX
;
106 case X86::RBX
: case X86::EBX
: case X86::BX
: case X86::BL
: return N86::EBX
;
107 case X86::RSP
: case X86::ESP
: case X86::SP
: case X86::SPL
: case X86::AH
:
109 case X86::RBP
: case X86::EBP
: case X86::BP
: case X86::BPL
: case X86::CH
:
111 case X86::RSI
: case X86::ESI
: case X86::SI
: case X86::SIL
: case X86::DH
:
113 case X86::RDI
: case X86::EDI
: case X86::DI
: case X86::DIL
: case X86::BH
:
116 case X86::R8
: case X86::R8D
: case X86::R8W
: case X86::R8B
:
118 case X86::R9
: case X86::R9D
: case X86::R9W
: case X86::R9B
:
120 case X86::R10
: case X86::R10D
: case X86::R10W
: case X86::R10B
:
122 case X86::R11
: case X86::R11D
: case X86::R11W
: case X86::R11B
:
124 case X86::R12
: case X86::R12D
: case X86::R12W
: case X86::R12B
:
126 case X86::R13
: case X86::R13D
: case X86::R13W
: case X86::R13B
:
128 case X86::R14
: case X86::R14D
: case X86::R14W
: case X86::R14B
:
130 case X86::R15
: case X86::R15D
: case X86::R15W
: case X86::R15B
:
133 case X86::ST0
: case X86::ST1
: case X86::ST2
: case X86::ST3
:
134 case X86::ST4
: case X86::ST5
: case X86::ST6
: case X86::ST7
:
135 return RegNo
-X86::ST0
;
137 case X86::XMM0
: case X86::XMM8
:
138 case X86::YMM0
: case X86::YMM8
: case X86::MM0
:
140 case X86::XMM1
: case X86::XMM9
:
141 case X86::YMM1
: case X86::YMM9
: case X86::MM1
:
143 case X86::XMM2
: case X86::XMM10
:
144 case X86::YMM2
: case X86::YMM10
: case X86::MM2
:
146 case X86::XMM3
: case X86::XMM11
:
147 case X86::YMM3
: case X86::YMM11
: case X86::MM3
:
149 case X86::XMM4
: case X86::XMM12
:
150 case X86::YMM4
: case X86::YMM12
: case X86::MM4
:
152 case X86::XMM5
: case X86::XMM13
:
153 case X86::YMM5
: case X86::YMM13
: case X86::MM5
:
155 case X86::XMM6
: case X86::XMM14
:
156 case X86::YMM6
: case X86::YMM14
: case X86::MM6
:
158 case X86::XMM7
: case X86::XMM15
:
159 case X86::YMM7
: case X86::YMM15
: case X86::MM7
:
162 case X86::ES
: return 0;
163 case X86::CS
: return 1;
164 case X86::SS
: return 2;
165 case X86::DS
: return 3;
166 case X86::FS
: return 4;
167 case X86::GS
: return 5;
169 case X86::CR0
: case X86::CR8
: case X86::DR0
: return 0;
170 case X86::CR1
: case X86::CR9
: case X86::DR1
: return 1;
171 case X86::CR2
: case X86::CR10
: case X86::DR2
: return 2;
172 case X86::CR3
: case X86::CR11
: case X86::DR3
: return 3;
173 case X86::CR4
: case X86::CR12
: case X86::DR4
: return 4;
174 case X86::CR5
: case X86::CR13
: case X86::DR5
: return 5;
175 case X86::CR6
: case X86::CR14
: case X86::DR6
: return 6;
176 case X86::CR7
: case X86::CR15
: case X86::DR7
: return 7;
178 // Pseudo index registers are equivalent to a "none"
179 // scaled index (See Intel Manual 2A, table 2-3)
185 assert(isVirtualRegister(RegNo
) && "Unknown physical register!");
186 llvm_unreachable("Register allocator hasn't allocated reg correctly yet!");
191 const TargetRegisterClass
*
192 X86RegisterInfo::getMatchingSuperRegClass(const TargetRegisterClass
*A
,
193 const TargetRegisterClass
*B
,
194 unsigned SubIdx
) const {
198 if (B
== &X86::GR8RegClass
) {
199 if (A
->getSize() == 2 || A
->getSize() == 4 || A
->getSize() == 8)
201 } else if (B
== &X86::GR8_ABCD_LRegClass
|| B
== &X86::GR8_ABCD_HRegClass
) {
202 if (A
== &X86::GR64RegClass
|| A
== &X86::GR64_ABCDRegClass
||
203 A
== &X86::GR64_NOREXRegClass
||
204 A
== &X86::GR64_NOSPRegClass
||
205 A
== &X86::GR64_NOREX_NOSPRegClass
)
206 return &X86::GR64_ABCDRegClass
;
207 else if (A
== &X86::GR32RegClass
|| A
== &X86::GR32_ABCDRegClass
||
208 A
== &X86::GR32_NOREXRegClass
||
209 A
== &X86::GR32_NOSPRegClass
)
210 return &X86::GR32_ABCDRegClass
;
211 else if (A
== &X86::GR16RegClass
|| A
== &X86::GR16_ABCDRegClass
||
212 A
== &X86::GR16_NOREXRegClass
)
213 return &X86::GR16_ABCDRegClass
;
214 } else if (B
== &X86::GR8_NOREXRegClass
) {
215 if (A
== &X86::GR64RegClass
|| A
== &X86::GR64_NOREXRegClass
||
216 A
== &X86::GR64_NOSPRegClass
|| A
== &X86::GR64_NOREX_NOSPRegClass
)
217 return &X86::GR64_NOREXRegClass
;
218 else if (A
== &X86::GR64_ABCDRegClass
)
219 return &X86::GR64_ABCDRegClass
;
220 else if (A
== &X86::GR32RegClass
|| A
== &X86::GR32_NOREXRegClass
||
221 A
== &X86::GR32_NOSPRegClass
)
222 return &X86::GR32_NOREXRegClass
;
223 else if (A
== &X86::GR32_ABCDRegClass
)
224 return &X86::GR32_ABCDRegClass
;
225 else if (A
== &X86::GR16RegClass
|| A
== &X86::GR16_NOREXRegClass
)
226 return &X86::GR16_NOREXRegClass
;
227 else if (A
== &X86::GR16_ABCDRegClass
)
228 return &X86::GR16_ABCDRegClass
;
231 case X86::sub_8bit_hi
:
232 if (B
== &X86::GR8_ABCD_HRegClass
) {
233 if (A
== &X86::GR64RegClass
|| A
== &X86::GR64_ABCDRegClass
||
234 A
== &X86::GR64_NOREXRegClass
||
235 A
== &X86::GR64_NOSPRegClass
||
236 A
== &X86::GR64_NOREX_NOSPRegClass
)
237 return &X86::GR64_ABCDRegClass
;
238 else if (A
== &X86::GR32RegClass
|| A
== &X86::GR32_ABCDRegClass
||
239 A
== &X86::GR32_NOREXRegClass
|| A
== &X86::GR32_NOSPRegClass
)
240 return &X86::GR32_ABCDRegClass
;
241 else if (A
== &X86::GR16RegClass
|| A
== &X86::GR16_ABCDRegClass
||
242 A
== &X86::GR16_NOREXRegClass
)
243 return &X86::GR16_ABCDRegClass
;
247 if (B
== &X86::GR16RegClass
) {
248 if (A
->getSize() == 4 || A
->getSize() == 8)
250 } else if (B
== &X86::GR16_ABCDRegClass
) {
251 if (A
== &X86::GR64RegClass
|| A
== &X86::GR64_ABCDRegClass
||
252 A
== &X86::GR64_NOREXRegClass
||
253 A
== &X86::GR64_NOSPRegClass
||
254 A
== &X86::GR64_NOREX_NOSPRegClass
)
255 return &X86::GR64_ABCDRegClass
;
256 else if (A
== &X86::GR32RegClass
|| A
== &X86::GR32_ABCDRegClass
||
257 A
== &X86::GR32_NOREXRegClass
|| A
== &X86::GR32_NOSPRegClass
)
258 return &X86::GR32_ABCDRegClass
;
259 } else if (B
== &X86::GR16_NOREXRegClass
) {
260 if (A
== &X86::GR64RegClass
|| A
== &X86::GR64_NOREXRegClass
||
261 A
== &X86::GR64_NOSPRegClass
|| A
== &X86::GR64_NOREX_NOSPRegClass
)
262 return &X86::GR64_NOREXRegClass
;
263 else if (A
== &X86::GR64_ABCDRegClass
)
264 return &X86::GR64_ABCDRegClass
;
265 else if (A
== &X86::GR32RegClass
|| A
== &X86::GR32_NOREXRegClass
||
266 A
== &X86::GR32_NOSPRegClass
)
267 return &X86::GR32_NOREXRegClass
;
268 else if (A
== &X86::GR32_ABCDRegClass
)
269 return &X86::GR64_ABCDRegClass
;
273 if (B
== &X86::GR32RegClass
) {
274 if (A
->getSize() == 8)
276 } else if (B
== &X86::GR32_NOSPRegClass
) {
277 if (A
== &X86::GR64RegClass
|| A
== &X86::GR64_NOSPRegClass
)
278 return &X86::GR64_NOSPRegClass
;
279 if (A
->getSize() == 8)
280 return getCommonSubClass(A
, &X86::GR64_NOSPRegClass
);
281 } else if (B
== &X86::GR32_ABCDRegClass
) {
282 if (A
== &X86::GR64RegClass
|| A
== &X86::GR64_ABCDRegClass
||
283 A
== &X86::GR64_NOREXRegClass
||
284 A
== &X86::GR64_NOSPRegClass
||
285 A
== &X86::GR64_NOREX_NOSPRegClass
)
286 return &X86::GR64_ABCDRegClass
;
287 } else if (B
== &X86::GR32_NOREXRegClass
) {
288 if (A
== &X86::GR64RegClass
|| A
== &X86::GR64_NOREXRegClass
||
289 A
== &X86::GR64_NOSPRegClass
|| A
== &X86::GR64_NOREX_NOSPRegClass
)
290 return &X86::GR64_NOREXRegClass
;
291 else if (A
== &X86::GR64_ABCDRegClass
)
292 return &X86::GR64_ABCDRegClass
;
296 if (B
== &X86::FR32RegClass
)
300 if (B
== &X86::FR64RegClass
)
304 if (B
== &X86::VR128RegClass
)
311 const TargetRegisterClass
*
312 X86RegisterInfo::getPointerRegClass(unsigned Kind
) const {
314 default: llvm_unreachable("Unexpected Kind in getPointerRegClass!");
315 case 0: // Normal GPRs.
316 if (TM
.getSubtarget
<X86Subtarget
>().is64Bit())
317 return &X86::GR64RegClass
;
318 return &X86::GR32RegClass
;
319 case 1: // Normal GRPs except the stack pointer (for encoding reasons).
320 if (TM
.getSubtarget
<X86Subtarget
>().is64Bit())
321 return &X86::GR64_NOSPRegClass
;
322 return &X86::GR32_NOSPRegClass
;
326 const TargetRegisterClass
*
327 X86RegisterInfo::getCrossCopyRegClass(const TargetRegisterClass
*RC
) const {
328 if (RC
== &X86::CCRRegClass
) {
330 return &X86::GR64RegClass
;
332 return &X86::GR32RegClass
;
338 X86RegisterInfo::getCalleeSavedRegs(const MachineFunction
*MF
) const {
339 bool callsEHReturn
= false;
340 bool ghcCall
= false;
343 callsEHReturn
= MF
->getMMI().callsEHReturn();
344 const Function
*F
= MF
->getFunction();
345 ghcCall
= (F
? F
->getCallingConv() == CallingConv::GHC
: false);
348 static const unsigned GhcCalleeSavedRegs
[] = {
352 static const unsigned CalleeSavedRegs32Bit
[] = {
353 X86::ESI
, X86::EDI
, X86::EBX
, X86::EBP
, 0
356 static const unsigned CalleeSavedRegs32EHRet
[] = {
357 X86::EAX
, X86::EDX
, X86::ESI
, X86::EDI
, X86::EBX
, X86::EBP
, 0
360 static const unsigned CalleeSavedRegs64Bit
[] = {
361 X86::RBX
, X86::R12
, X86::R13
, X86::R14
, X86::R15
, X86::RBP
, 0
364 static const unsigned CalleeSavedRegs64EHRet
[] = {
365 X86::RAX
, X86::RDX
, X86::RBX
, X86::R12
,
366 X86::R13
, X86::R14
, X86::R15
, X86::RBP
, 0
369 static const unsigned CalleeSavedRegsWin64
[] = {
370 X86::RBX
, X86::RBP
, X86::RDI
, X86::RSI
,
371 X86::R12
, X86::R13
, X86::R14
, X86::R15
,
372 X86::XMM6
, X86::XMM7
, X86::XMM8
, X86::XMM9
,
373 X86::XMM10
, X86::XMM11
, X86::XMM12
, X86::XMM13
,
374 X86::XMM14
, X86::XMM15
, 0
378 return GhcCalleeSavedRegs
;
379 } else if (Is64Bit
) {
381 return CalleeSavedRegsWin64
;
383 return (callsEHReturn
? CalleeSavedRegs64EHRet
: CalleeSavedRegs64Bit
);
385 return (callsEHReturn
? CalleeSavedRegs32EHRet
: CalleeSavedRegs32Bit
);
389 BitVector
X86RegisterInfo::getReservedRegs(const MachineFunction
&MF
) const {
390 BitVector
Reserved(getNumRegs());
391 // Set the stack-pointer register and its aliases as reserved.
392 Reserved
.set(X86::RSP
);
393 Reserved
.set(X86::ESP
);
394 Reserved
.set(X86::SP
);
395 Reserved
.set(X86::SPL
);
397 // Set the instruction pointer register and its aliases as reserved.
398 Reserved
.set(X86::RIP
);
399 Reserved
.set(X86::EIP
);
400 Reserved
.set(X86::IP
);
402 // Set the frame-pointer register and its aliases as reserved if needed.
404 Reserved
.set(X86::RBP
);
405 Reserved
.set(X86::EBP
);
406 Reserved
.set(X86::BP
);
407 Reserved
.set(X86::BPL
);
410 // Mark the x87 stack registers as reserved, since they don't behave normally
411 // with respect to liveness. We don't fully model the effects of x87 stack
412 // pushes and pops after stackification.
413 Reserved
.set(X86::ST0
);
414 Reserved
.set(X86::ST1
);
415 Reserved
.set(X86::ST2
);
416 Reserved
.set(X86::ST3
);
417 Reserved
.set(X86::ST4
);
418 Reserved
.set(X86::ST5
);
419 Reserved
.set(X86::ST6
);
420 Reserved
.set(X86::ST7
);
424 //===----------------------------------------------------------------------===//
425 // Stack Frame Processing methods
426 //===----------------------------------------------------------------------===//
428 /// hasFP - Return true if the specified function should have a dedicated frame
429 /// pointer register. This is true if the function has variable sized allocas
430 /// or if frame pointer elimination is disabled.
431 bool X86RegisterInfo::hasFP(const MachineFunction
&MF
) const {
432 const MachineFrameInfo
*MFI
= MF
.getFrameInfo();
433 const MachineModuleInfo
&MMI
= MF
.getMMI();
435 return (DisableFramePointerElim(MF
) ||
436 needsStackRealignment(MF
) ||
437 MFI
->hasVarSizedObjects() ||
438 MFI
->isFrameAddressTaken() ||
439 MF
.getInfo
<X86MachineFunctionInfo
>()->getForceFramePointer() ||
440 MMI
.callsUnwindInit());
443 bool X86RegisterInfo::canRealignStack(const MachineFunction
&MF
) const {
444 const MachineFrameInfo
*MFI
= MF
.getFrameInfo();
445 return (RealignStack
&&
446 !MFI
->hasVarSizedObjects());
449 bool X86RegisterInfo::needsStackRealignment(const MachineFunction
&MF
) const {
450 const MachineFrameInfo
*MFI
= MF
.getFrameInfo();
451 const Function
*F
= MF
.getFunction();
452 bool requiresRealignment
= ((MFI
->getMaxAlignment() > StackAlign
) ||
453 F
->hasFnAttr(Attribute::StackAlignment
));
455 // FIXME: Currently we don't support stack realignment for functions with
456 // variable-sized allocas.
457 // FIXME: It's more complicated than this...
458 if (0 && requiresRealignment
&& MFI
->hasVarSizedObjects())
460 "Stack realignment in presense of dynamic allocas is not supported");
462 // If we've requested that we force align the stack do so now.
464 return canRealignStack(MF
);
466 return requiresRealignment
&& canRealignStack(MF
);
469 bool X86RegisterInfo::hasReservedCallFrame(const MachineFunction
&MF
) const {
470 return !MF
.getFrameInfo()->hasVarSizedObjects();
473 bool X86RegisterInfo::hasReservedSpillSlot(const MachineFunction
&MF
,
474 unsigned Reg
, int &FrameIdx
) const {
475 if (Reg
== FramePtr
&& hasFP(MF
)) {
476 FrameIdx
= MF
.getFrameInfo()->getObjectIndexBegin();
483 X86RegisterInfo::getFrameIndexOffset(const MachineFunction
&MF
, int FI
) const {
484 const TargetFrameInfo
&TFI
= *MF
.getTarget().getFrameInfo();
485 const MachineFrameInfo
*MFI
= MF
.getFrameInfo();
486 int Offset
= MFI
->getObjectOffset(FI
) - TFI
.getOffsetOfLocalArea();
487 uint64_t StackSize
= MFI
->getStackSize();
489 if (needsStackRealignment(MF
)) {
491 // Skip the saved EBP.
494 unsigned Align
= MFI
->getObjectAlignment(FI
);
495 assert((-(Offset
+ StackSize
)) % Align
== 0);
497 return Offset
+ StackSize
;
499 // FIXME: Support tail calls
502 return Offset
+ StackSize
;
504 // Skip the saved EBP.
507 // Skip the RETADDR move area
508 const X86MachineFunctionInfo
*X86FI
= MF
.getInfo
<X86MachineFunctionInfo
>();
509 int TailCallReturnAddrDelta
= X86FI
->getTCReturnAddrDelta();
510 if (TailCallReturnAddrDelta
< 0)
511 Offset
-= TailCallReturnAddrDelta
;
517 static unsigned getSUBriOpcode(unsigned is64Bit
, int64_t Imm
) {
520 return X86::SUB64ri8
;
521 return X86::SUB64ri32
;
524 return X86::SUB32ri8
;
529 static unsigned getADDriOpcode(unsigned is64Bit
, int64_t Imm
) {
532 return X86::ADD64ri8
;
533 return X86::ADD64ri32
;
536 return X86::ADD32ri8
;
541 void X86RegisterInfo::
542 eliminateCallFramePseudoInstr(MachineFunction
&MF
, MachineBasicBlock
&MBB
,
543 MachineBasicBlock::iterator I
) const {
544 if (!hasReservedCallFrame(MF
)) {
545 // If the stack pointer can be changed after prologue, turn the
546 // adjcallstackup instruction into a 'sub ESP, <amt>' and the
547 // adjcallstackdown instruction into 'add ESP, <amt>'
548 // TODO: consider using push / pop instead of sub + store / add
549 MachineInstr
*Old
= I
;
550 uint64_t Amount
= Old
->getOperand(0).getImm();
552 // We need to keep the stack aligned properly. To do this, we round the
553 // amount of space needed for the outgoing arguments up to the next
554 // alignment boundary.
555 Amount
= (Amount
+ StackAlign
- 1) / StackAlign
* StackAlign
;
557 MachineInstr
*New
= 0;
558 if (Old
->getOpcode() == getCallFrameSetupOpcode()) {
559 New
= BuildMI(MF
, Old
->getDebugLoc(),
560 TII
.get(getSUBriOpcode(Is64Bit
, Amount
)),
565 assert(Old
->getOpcode() == getCallFrameDestroyOpcode());
567 // Factor out the amount the callee already popped.
568 uint64_t CalleeAmt
= Old
->getOperand(1).getImm();
572 unsigned Opc
= getADDriOpcode(Is64Bit
, Amount
);
573 New
= BuildMI(MF
, Old
->getDebugLoc(), TII
.get(Opc
), StackPtr
)
580 // The EFLAGS implicit def is dead.
581 New
->getOperand(3).setIsDead();
583 // Replace the pseudo instruction with a new instruction.
587 } else if (I
->getOpcode() == getCallFrameDestroyOpcode()) {
588 // If we are performing frame pointer elimination and if the callee pops
589 // something off the stack pointer, add it back. We do this until we have
590 // more advanced stack pointer tracking ability.
591 if (uint64_t CalleeAmt
= I
->getOperand(1).getImm()) {
592 unsigned Opc
= getSUBriOpcode(Is64Bit
, CalleeAmt
);
593 MachineInstr
*Old
= I
;
595 BuildMI(MF
, Old
->getDebugLoc(), TII
.get(Opc
),
600 // The EFLAGS implicit def is dead.
601 New
->getOperand(3).setIsDead();
610 X86RegisterInfo::eliminateFrameIndex(MachineBasicBlock::iterator II
,
611 int SPAdj
, RegScavenger
*RS
) const{
612 assert(SPAdj
== 0 && "Unexpected");
615 MachineInstr
&MI
= *II
;
616 MachineFunction
&MF
= *MI
.getParent()->getParent();
618 while (!MI
.getOperand(i
).isFI()) {
620 assert(i
< MI
.getNumOperands() && "Instr doesn't have FrameIndex operand!");
623 int FrameIndex
= MI
.getOperand(i
).getIndex();
626 unsigned Opc
= MI
.getOpcode();
627 bool AfterFPPop
= Opc
== X86::TAILJMPm64
|| Opc
== X86::TAILJMPm
;
628 if (needsStackRealignment(MF
))
629 BasePtr
= (FrameIndex
< 0 ? FramePtr
: StackPtr
);
633 BasePtr
= (hasFP(MF
) ? FramePtr
: StackPtr
);
635 // This must be part of a four operand memory reference. Replace the
636 // FrameIndex with base register with EBP. Add an offset to the offset.
637 MI
.getOperand(i
).ChangeToRegister(BasePtr
, false);
639 // Now add the frame object offset to the offset from EBP.
642 // Tail call jmp happens after FP is popped.
643 const TargetFrameInfo
&TFI
= *MF
.getTarget().getFrameInfo();
644 const MachineFrameInfo
*MFI
= MF
.getFrameInfo();
645 FIOffset
= MFI
->getObjectOffset(FrameIndex
) - TFI
.getOffsetOfLocalArea();
647 FIOffset
= getFrameIndexOffset(MF
, FrameIndex
);
649 if (MI
.getOperand(i
+3).isImm()) {
650 // Offset is a 32-bit integer.
651 int Offset
= FIOffset
+ (int)(MI
.getOperand(i
+ 3).getImm());
652 MI
.getOperand(i
+ 3).ChangeToImmediate(Offset
);
654 // Offset is symbolic. This is extremely rare.
655 uint64_t Offset
= FIOffset
+ (uint64_t)MI
.getOperand(i
+3).getOffset();
656 MI
.getOperand(i
+3).setOffset(Offset
);
661 X86RegisterInfo::processFunctionBeforeCalleeSavedScan(MachineFunction
&MF
,
662 RegScavenger
*RS
) const {
663 MachineFrameInfo
*MFI
= MF
.getFrameInfo();
665 X86MachineFunctionInfo
*X86FI
= MF
.getInfo
<X86MachineFunctionInfo
>();
666 int32_t TailCallReturnAddrDelta
= X86FI
->getTCReturnAddrDelta();
668 if (TailCallReturnAddrDelta
< 0) {
669 // create RETURNADDR area
678 MFI
->CreateFixedObject(-TailCallReturnAddrDelta
,
679 (-1U*SlotSize
)+TailCallReturnAddrDelta
, true);
683 assert((TailCallReturnAddrDelta
<= 0) &&
684 "The Delta should always be zero or negative");
685 const TargetFrameInfo
&TFI
= *MF
.getTarget().getFrameInfo();
687 // Create a frame entry for the EBP register that must be saved.
688 int FrameIdx
= MFI
->CreateFixedObject(SlotSize
,
690 TFI
.getOffsetOfLocalArea() +
691 TailCallReturnAddrDelta
,
693 assert(FrameIdx
== MFI
->getObjectIndexBegin() &&
694 "Slot for EBP register must be last in order to be found!");
699 /// emitSPUpdate - Emit a series of instructions to increment / decrement the
700 /// stack pointer by a constant value.
702 void emitSPUpdate(MachineBasicBlock
&MBB
, MachineBasicBlock::iterator
&MBBI
,
703 unsigned StackPtr
, int64_t NumBytes
, bool Is64Bit
,
704 const TargetInstrInfo
&TII
) {
705 bool isSub
= NumBytes
< 0;
706 uint64_t Offset
= isSub
? -NumBytes
: NumBytes
;
707 unsigned Opc
= isSub
?
708 getSUBriOpcode(Is64Bit
, Offset
) :
709 getADDriOpcode(Is64Bit
, Offset
);
710 uint64_t Chunk
= (1LL << 31) - 1;
711 DebugLoc DL
= MBB
.findDebugLoc(MBBI
);
714 uint64_t ThisVal
= (Offset
> Chunk
) ? Chunk
: Offset
;
716 BuildMI(MBB
, MBBI
, DL
, TII
.get(Opc
), StackPtr
)
719 MI
->getOperand(3).setIsDead(); // The EFLAGS implicit def is dead.
724 /// mergeSPUpdatesUp - Merge two stack-manipulating instructions upper iterator.
726 void mergeSPUpdatesUp(MachineBasicBlock
&MBB
, MachineBasicBlock::iterator
&MBBI
,
727 unsigned StackPtr
, uint64_t *NumBytes
= NULL
) {
728 if (MBBI
== MBB
.begin()) return;
730 MachineBasicBlock::iterator PI
= prior(MBBI
);
731 unsigned Opc
= PI
->getOpcode();
732 if ((Opc
== X86::ADD64ri32
|| Opc
== X86::ADD64ri8
||
733 Opc
== X86::ADD32ri
|| Opc
== X86::ADD32ri8
) &&
734 PI
->getOperand(0).getReg() == StackPtr
) {
736 *NumBytes
+= PI
->getOperand(2).getImm();
738 } else if ((Opc
== X86::SUB64ri32
|| Opc
== X86::SUB64ri8
||
739 Opc
== X86::SUB32ri
|| Opc
== X86::SUB32ri8
) &&
740 PI
->getOperand(0).getReg() == StackPtr
) {
742 *NumBytes
-= PI
->getOperand(2).getImm();
747 /// mergeSPUpdatesDown - Merge two stack-manipulating instructions lower iterator.
749 void mergeSPUpdatesDown(MachineBasicBlock
&MBB
,
750 MachineBasicBlock::iterator
&MBBI
,
751 unsigned StackPtr
, uint64_t *NumBytes
= NULL
) {
752 // FIXME: THIS ISN'T RUN!!!
755 if (MBBI
== MBB
.end()) return;
757 MachineBasicBlock::iterator NI
= llvm::next(MBBI
);
758 if (NI
== MBB
.end()) return;
760 unsigned Opc
= NI
->getOpcode();
761 if ((Opc
== X86::ADD64ri32
|| Opc
== X86::ADD64ri8
||
762 Opc
== X86::ADD32ri
|| Opc
== X86::ADD32ri8
) &&
763 NI
->getOperand(0).getReg() == StackPtr
) {
765 *NumBytes
-= NI
->getOperand(2).getImm();
768 } else if ((Opc
== X86::SUB64ri32
|| Opc
== X86::SUB64ri8
||
769 Opc
== X86::SUB32ri
|| Opc
== X86::SUB32ri8
) &&
770 NI
->getOperand(0).getReg() == StackPtr
) {
772 *NumBytes
+= NI
->getOperand(2).getImm();
778 /// mergeSPUpdates - Checks the instruction before/after the passed
779 /// instruction. If it is an ADD/SUB instruction it is deleted argument and the
780 /// stack adjustment is returned as a positive value for ADD and a negative for
782 static int mergeSPUpdates(MachineBasicBlock
&MBB
,
783 MachineBasicBlock::iterator
&MBBI
,
785 bool doMergeWithPrevious
) {
786 if ((doMergeWithPrevious
&& MBBI
== MBB
.begin()) ||
787 (!doMergeWithPrevious
&& MBBI
== MBB
.end()))
790 MachineBasicBlock::iterator PI
= doMergeWithPrevious
? prior(MBBI
) : MBBI
;
791 MachineBasicBlock::iterator NI
= doMergeWithPrevious
? 0 : llvm::next(MBBI
);
792 unsigned Opc
= PI
->getOpcode();
795 if ((Opc
== X86::ADD64ri32
|| Opc
== X86::ADD64ri8
||
796 Opc
== X86::ADD32ri
|| Opc
== X86::ADD32ri8
) &&
797 PI
->getOperand(0).getReg() == StackPtr
){
798 Offset
+= PI
->getOperand(2).getImm();
800 if (!doMergeWithPrevious
) MBBI
= NI
;
801 } else if ((Opc
== X86::SUB64ri32
|| Opc
== X86::SUB64ri8
||
802 Opc
== X86::SUB32ri
|| Opc
== X86::SUB32ri8
) &&
803 PI
->getOperand(0).getReg() == StackPtr
) {
804 Offset
-= PI
->getOperand(2).getImm();
806 if (!doMergeWithPrevious
) MBBI
= NI
;
812 void X86RegisterInfo::emitCalleeSavedFrameMoves(MachineFunction
&MF
,
814 unsigned FramePtr
) const {
815 MachineFrameInfo
*MFI
= MF
.getFrameInfo();
816 MachineModuleInfo
&MMI
= MF
.getMMI();
818 // Add callee saved registers to move list.
819 const std::vector
<CalleeSavedInfo
> &CSI
= MFI
->getCalleeSavedInfo();
820 if (CSI
.empty()) return;
822 std::vector
<MachineMove
> &Moves
= MMI
.getFrameMoves();
823 const TargetData
*TD
= MF
.getTarget().getTargetData();
824 bool HasFP
= hasFP(MF
);
826 // Calculate amount of bytes used for return address storing.
828 (MF
.getTarget().getFrameInfo()->getStackGrowthDirection() ==
829 TargetFrameInfo::StackGrowsUp
?
830 TD
->getPointerSize() : -TD
->getPointerSize());
832 // FIXME: This is dirty hack. The code itself is pretty mess right now.
833 // It should be rewritten from scratch and generalized sometimes.
835 // Determine maximum offset (minumum due to stack growth).
836 int64_t MaxOffset
= 0;
837 for (std::vector
<CalleeSavedInfo
>::const_iterator
838 I
= CSI
.begin(), E
= CSI
.end(); I
!= E
; ++I
)
839 MaxOffset
= std::min(MaxOffset
,
840 MFI
->getObjectOffset(I
->getFrameIdx()));
842 // Calculate offsets.
843 int64_t saveAreaOffset
= (HasFP
? 3 : 2) * stackGrowth
;
844 for (std::vector
<CalleeSavedInfo
>::const_iterator
845 I
= CSI
.begin(), E
= CSI
.end(); I
!= E
; ++I
) {
846 int64_t Offset
= MFI
->getObjectOffset(I
->getFrameIdx());
847 unsigned Reg
= I
->getReg();
848 Offset
= MaxOffset
- Offset
+ saveAreaOffset
;
850 // Don't output a new machine move if we're re-saving the frame
851 // pointer. This happens when the PrologEpilogInserter has inserted an extra
852 // "PUSH" of the frame pointer -- the "emitPrologue" method automatically
853 // generates one when frame pointers are used. If we generate a "machine
854 // move" for this extra "PUSH", the linker will lose track of the fact that
855 // the frame pointer should have the value of the first "PUSH" when it's
858 // FIXME: This looks inelegant. It's possibly correct, but it's covering up
859 // another bug. I.e., one where we generate a prolog like this:
867 // The immediate re-push of EBP is unnecessary. At the least, it's an
868 // optimization bug. EBP can be used as a scratch register in certain
869 // cases, but probably not when we have a frame pointer.
870 if (HasFP
&& FramePtr
== Reg
)
873 MachineLocation
CSDst(MachineLocation::VirtualFP
, Offset
);
874 MachineLocation
CSSrc(Reg
);
875 Moves
.push_back(MachineMove(Label
, CSDst
, CSSrc
));
879 static bool isEAXLiveIn(MachineFunction
&MF
) {
880 for (MachineRegisterInfo::livein_iterator II
= MF
.getRegInfo().livein_begin(),
881 EE
= MF
.getRegInfo().livein_end(); II
!= EE
; ++II
) {
882 unsigned Reg
= II
->first
;
884 if (Reg
== X86::EAX
|| Reg
== X86::AX
||
885 Reg
== X86::AH
|| Reg
== X86::AL
)
892 /// emitPrologue - Push callee-saved registers onto the stack, which
893 /// automatically adjust the stack pointer. Adjust the stack pointer to allocate
894 /// space for local variables. Also emit labels used by the exception handler to
895 /// generate the exception handling frames.
896 void X86RegisterInfo::emitPrologue(MachineFunction
&MF
) const {
897 MachineBasicBlock
&MBB
= MF
.front(); // Prologue goes in entry BB.
898 MachineBasicBlock::iterator MBBI
= MBB
.begin();
899 MachineFrameInfo
*MFI
= MF
.getFrameInfo();
900 const Function
*Fn
= MF
.getFunction();
901 const X86Subtarget
*Subtarget
= &MF
.getTarget().getSubtarget
<X86Subtarget
>();
902 MachineModuleInfo
&MMI
= MF
.getMMI();
903 X86MachineFunctionInfo
*X86FI
= MF
.getInfo
<X86MachineFunctionInfo
>();
904 bool needsFrameMoves
= MMI
.hasDebugInfo() ||
905 !Fn
->doesNotThrow() || UnwindTablesMandatory
;
906 uint64_t MaxAlign
= MFI
->getMaxAlignment(); // Desired stack alignment.
907 uint64_t StackSize
= MFI
->getStackSize(); // Number of bytes to allocate.
908 bool HasFP
= hasFP(MF
);
911 // If we're forcing a stack realignment we can't rely on just the frame
912 // info, we need to know the ABI stack alignment as well in case we
913 // have a call out. Otherwise just make sure we have some alignment - we'll
914 // go with the minimum SlotSize.
915 if (ForceStackAlign
) {
917 MaxAlign
= (StackAlign
> MaxAlign
) ? StackAlign
: MaxAlign
;
918 else if (MaxAlign
< SlotSize
)
922 // Add RETADDR move area to callee saved frame size.
923 int TailCallReturnAddrDelta
= X86FI
->getTCReturnAddrDelta();
924 if (TailCallReturnAddrDelta
< 0)
925 X86FI
->setCalleeSavedFrameSize(
926 X86FI
->getCalleeSavedFrameSize() - TailCallReturnAddrDelta
);
928 // If this is x86-64 and the Red Zone is not disabled, if we are a leaf
929 // function, and use up to 128 bytes of stack space, don't have a frame
930 // pointer, calls, or dynamic alloca then we do not need to adjust the
931 // stack pointer (we fit in the Red Zone).
932 if (Is64Bit
&& !Fn
->hasFnAttr(Attribute::NoRedZone
) &&
933 !needsStackRealignment(MF
) &&
934 !MFI
->hasVarSizedObjects() && // No dynamic alloca.
935 !MFI
->adjustsStack() && // No calls.
936 !IsWin64
) { // Win64 has no Red Zone
937 uint64_t MinSize
= X86FI
->getCalleeSavedFrameSize();
938 if (HasFP
) MinSize
+= SlotSize
;
939 StackSize
= std::max(MinSize
, StackSize
> 128 ? StackSize
- 128 : 0);
940 MFI
->setStackSize(StackSize
);
941 } else if (IsWin64
) {
942 // We need to always allocate 32 bytes as register spill area.
943 // FIXME: We might reuse these 32 bytes for leaf functions.
945 MFI
->setStackSize(StackSize
);
948 // Insert stack pointer adjustment for later moving of return addr. Only
949 // applies to tail call optimized functions where the callee argument stack
950 // size is bigger than the callers.
951 if (TailCallReturnAddrDelta
< 0) {
953 BuildMI(MBB
, MBBI
, DL
,
954 TII
.get(getSUBriOpcode(Is64Bit
, -TailCallReturnAddrDelta
)),
957 .addImm(-TailCallReturnAddrDelta
);
958 MI
->getOperand(3).setIsDead(); // The EFLAGS implicit def is dead.
961 // Mapping for machine moves:
963 // DST: VirtualFP AND
964 // SRC: VirtualFP => DW_CFA_def_cfa_offset
965 // ELSE => DW_CFA_def_cfa
967 // SRC: VirtualFP AND
968 // DST: Register => DW_CFA_def_cfa_register
971 // OFFSET < 0 => DW_CFA_offset_extended_sf
972 // REG < 64 => DW_CFA_offset + Reg
973 // ELSE => DW_CFA_offset_extended
975 std::vector
<MachineMove
> &Moves
= MMI
.getFrameMoves();
976 const TargetData
*TD
= MF
.getTarget().getTargetData();
977 uint64_t NumBytes
= 0;
978 int stackGrowth
= -TD
->getPointerSize();
981 // Calculate required stack adjustment.
982 uint64_t FrameSize
= StackSize
- SlotSize
;
983 if (needsStackRealignment(MF
))
984 FrameSize
= (FrameSize
+ MaxAlign
- 1) / MaxAlign
* MaxAlign
;
986 NumBytes
= FrameSize
- X86FI
->getCalleeSavedFrameSize();
988 // Get the offset of the stack slot for the EBP register, which is
989 // guaranteed to be the last slot by processFunctionBeforeFrameFinalized.
990 // Update the frame offset adjustment.
991 MFI
->setOffsetAdjustment(-NumBytes
);
993 // Save EBP/RBP into the appropriate stack slot.
994 BuildMI(MBB
, MBBI
, DL
, TII
.get(Is64Bit
? X86::PUSH64r
: X86::PUSH32r
))
995 .addReg(FramePtr
, RegState::Kill
);
997 if (needsFrameMoves
) {
998 // Mark the place where EBP/RBP was saved.
999 MCSymbol
*FrameLabel
= MMI
.getContext().CreateTempSymbol();
1000 BuildMI(MBB
, MBBI
, DL
, TII
.get(X86::PROLOG_LABEL
)).addSym(FrameLabel
);
1002 // Define the current CFA rule to use the provided offset.
1004 MachineLocation
SPDst(MachineLocation::VirtualFP
);
1005 MachineLocation
SPSrc(MachineLocation::VirtualFP
, 2 * stackGrowth
);
1006 Moves
.push_back(MachineMove(FrameLabel
, SPDst
, SPSrc
));
1008 // FIXME: Verify & implement for FP
1009 MachineLocation
SPDst(StackPtr
);
1010 MachineLocation
SPSrc(StackPtr
, stackGrowth
);
1011 Moves
.push_back(MachineMove(FrameLabel
, SPDst
, SPSrc
));
1014 // Change the rule for the FramePtr to be an "offset" rule.
1015 MachineLocation
FPDst(MachineLocation::VirtualFP
, 2 * stackGrowth
);
1016 MachineLocation
FPSrc(FramePtr
);
1017 Moves
.push_back(MachineMove(FrameLabel
, FPDst
, FPSrc
));
1020 // Update EBP with the new base value...
1021 BuildMI(MBB
, MBBI
, DL
,
1022 TII
.get(Is64Bit
? X86::MOV64rr
: X86::MOV32rr
), FramePtr
)
1025 if (needsFrameMoves
) {
1026 // Mark effective beginning of when frame pointer becomes valid.
1027 MCSymbol
*FrameLabel
= MMI
.getContext().CreateTempSymbol();
1028 BuildMI(MBB
, MBBI
, DL
, TII
.get(X86::PROLOG_LABEL
)).addSym(FrameLabel
);
1030 // Define the current CFA to use the EBP/RBP register.
1031 MachineLocation
FPDst(FramePtr
);
1032 MachineLocation
FPSrc(MachineLocation::VirtualFP
);
1033 Moves
.push_back(MachineMove(FrameLabel
, FPDst
, FPSrc
));
1036 // Mark the FramePtr as live-in in every block except the entry.
1037 for (MachineFunction::iterator I
= llvm::next(MF
.begin()), E
= MF
.end();
1039 I
->addLiveIn(FramePtr
);
1042 if (needsStackRealignment(MF
)) {
1044 BuildMI(MBB
, MBBI
, DL
,
1045 TII
.get(Is64Bit
? X86::AND64ri32
: X86::AND32ri
),
1046 StackPtr
).addReg(StackPtr
).addImm(-MaxAlign
);
1048 // The EFLAGS implicit def is dead.
1049 MI
->getOperand(3).setIsDead();
1052 NumBytes
= StackSize
- X86FI
->getCalleeSavedFrameSize();
1055 // Skip the callee-saved push instructions.
1056 bool PushedRegs
= false;
1057 int StackOffset
= 2 * stackGrowth
;
1059 while (MBBI
!= MBB
.end() &&
1060 (MBBI
->getOpcode() == X86::PUSH32r
||
1061 MBBI
->getOpcode() == X86::PUSH64r
)) {
1065 if (!HasFP
&& needsFrameMoves
) {
1066 // Mark callee-saved push instruction.
1067 MCSymbol
*Label
= MMI
.getContext().CreateTempSymbol();
1068 BuildMI(MBB
, MBBI
, DL
, TII
.get(X86::PROLOG_LABEL
)).addSym(Label
);
1070 // Define the current CFA rule to use the provided offset.
1071 unsigned Ptr
= StackSize
?
1072 MachineLocation::VirtualFP
: StackPtr
;
1073 MachineLocation
SPDst(Ptr
);
1074 MachineLocation
SPSrc(Ptr
, StackOffset
);
1075 Moves
.push_back(MachineMove(Label
, SPDst
, SPSrc
));
1076 StackOffset
+= stackGrowth
;
1080 DL
= MBB
.findDebugLoc(MBBI
);
1082 // If there is an SUB32ri of ESP immediately before this instruction, merge
1083 // the two. This can be the case when tail call elimination is enabled and
1084 // the callee has more arguments then the caller.
1085 NumBytes
-= mergeSPUpdates(MBB
, MBBI
, StackPtr
, true);
1087 // If there is an ADD32ri or SUB32ri of ESP immediately after this
1088 // instruction, merge the two instructions.
1089 mergeSPUpdatesDown(MBB
, MBBI
, StackPtr
, &NumBytes
);
1091 // Adjust stack pointer: ESP -= numbytes.
1093 // Windows and cygwin/mingw require a prologue helper routine when allocating
1094 // more than 4K bytes on the stack. Windows uses __chkstk and cygwin/mingw
1095 // uses __alloca. __alloca and the 32-bit version of __chkstk will probe the
1096 // stack and adjust the stack pointer in one go. The 64-bit version of
1097 // __chkstk is only responsible for probing the stack. The 64-bit prologue is
1098 // responsible for adjusting the stack pointer. Touching the stack at 4K
1099 // increments is necessary to ensure that the guard pages used by the OS
1100 // virtual memory manager are allocated in correct sequence.
1101 if (NumBytes
>= 4096 &&
1102 (Subtarget
->isTargetCygMing() || Subtarget
->isTargetWin32())) {
1103 // Check whether EAX is livein for this function.
1104 bool isEAXAlive
= isEAXLiveIn(MF
);
1106 const char *StackProbeSymbol
=
1107 Subtarget
->isTargetWindows() ? "_chkstk" : "_alloca";
1108 unsigned CallOp
= Is64Bit
? X86::CALL64pcrel32
: X86::CALLpcrel32
;
1110 BuildMI(MBB
, MBBI
, DL
, TII
.get(X86::MOV32ri
), X86::EAX
)
1112 BuildMI(MBB
, MBBI
, DL
, TII
.get(CallOp
))
1113 .addExternalSymbol(StackProbeSymbol
)
1114 .addReg(StackPtr
, RegState::Define
| RegState::Implicit
)
1115 .addReg(X86::EFLAGS
, RegState::Define
| RegState::Implicit
);
1118 BuildMI(MBB
, MBBI
, DL
, TII
.get(X86::PUSH32r
))
1119 .addReg(X86::EAX
, RegState::Kill
);
1121 // Allocate NumBytes-4 bytes on stack. We'll also use 4 already
1122 // allocated bytes for EAX.
1123 BuildMI(MBB
, MBBI
, DL
, TII
.get(X86::MOV32ri
), X86::EAX
)
1124 .addImm(NumBytes
- 4);
1125 BuildMI(MBB
, MBBI
, DL
, TII
.get(CallOp
))
1126 .addExternalSymbol(StackProbeSymbol
)
1127 .addReg(StackPtr
, RegState::Define
| RegState::Implicit
)
1128 .addReg(X86::EFLAGS
, RegState::Define
| RegState::Implicit
);
1131 MachineInstr
*MI
= addRegOffset(BuildMI(MF
, DL
, TII
.get(X86::MOV32rm
),
1133 StackPtr
, false, NumBytes
- 4);
1134 MBB
.insert(MBBI
, MI
);
1136 } else if (NumBytes
>= 4096 && Subtarget
->isTargetWin64()) {
1137 // Sanity check that EAX is not livein for this function. It should
1138 // should not be, so throw an assert.
1139 assert(!isEAXLiveIn(MF
) && "EAX is livein in the Win64 case!");
1141 // Handle the 64-bit Windows ABI case where we need to call __chkstk.
1142 // Function prologue is responsible for adjusting the stack pointer.
1143 BuildMI(MBB
, MBBI
, DL
, TII
.get(X86::MOV32ri
), X86::EAX
)
1145 BuildMI(MBB
, MBBI
, DL
, TII
.get(X86::WINCALL64pcrel32
))
1146 .addExternalSymbol("__chkstk")
1147 .addReg(StackPtr
, RegState::Define
| RegState::Implicit
);
1148 emitSPUpdate(MBB
, MBBI
, StackPtr
, -(int64_t)NumBytes
, Is64Bit
, TII
);
1149 } else if (NumBytes
)
1150 emitSPUpdate(MBB
, MBBI
, StackPtr
, -(int64_t)NumBytes
, Is64Bit
, TII
);
1152 if ((NumBytes
|| PushedRegs
) && needsFrameMoves
) {
1153 // Mark end of stack pointer adjustment.
1154 MCSymbol
*Label
= MMI
.getContext().CreateTempSymbol();
1155 BuildMI(MBB
, MBBI
, DL
, TII
.get(X86::PROLOG_LABEL
)).addSym(Label
);
1157 if (!HasFP
&& NumBytes
) {
1158 // Define the current CFA rule to use the provided offset.
1160 MachineLocation
SPDst(MachineLocation::VirtualFP
);
1161 MachineLocation
SPSrc(MachineLocation::VirtualFP
,
1162 -StackSize
+ stackGrowth
);
1163 Moves
.push_back(MachineMove(Label
, SPDst
, SPSrc
));
1165 // FIXME: Verify & implement for FP
1166 MachineLocation
SPDst(StackPtr
);
1167 MachineLocation
SPSrc(StackPtr
, stackGrowth
);
1168 Moves
.push_back(MachineMove(Label
, SPDst
, SPSrc
));
1172 // Emit DWARF info specifying the offsets of the callee-saved registers.
1174 emitCalleeSavedFrameMoves(MF
, Label
, HasFP
? FramePtr
: StackPtr
);
1178 void X86RegisterInfo::emitEpilogue(MachineFunction
&MF
,
1179 MachineBasicBlock
&MBB
) const {
1180 const MachineFrameInfo
*MFI
= MF
.getFrameInfo();
1181 X86MachineFunctionInfo
*X86FI
= MF
.getInfo
<X86MachineFunctionInfo
>();
1182 MachineBasicBlock::iterator MBBI
= prior(MBB
.end());
1183 unsigned RetOpcode
= MBBI
->getOpcode();
1184 DebugLoc DL
= MBBI
->getDebugLoc();
1186 switch (RetOpcode
) {
1188 llvm_unreachable("Can only insert epilog into returning blocks");
1191 case X86::TCRETURNdi
:
1192 case X86::TCRETURNri
:
1193 case X86::TCRETURNmi
:
1194 case X86::TCRETURNdi64
:
1195 case X86::TCRETURNri64
:
1196 case X86::TCRETURNmi64
:
1197 case X86::EH_RETURN
:
1198 case X86::EH_RETURN64
:
1199 break; // These are ok
1202 // Get the number of bytes to allocate from the FrameInfo.
1203 uint64_t StackSize
= MFI
->getStackSize();
1204 uint64_t MaxAlign
= MFI
->getMaxAlignment();
1205 unsigned CSSize
= X86FI
->getCalleeSavedFrameSize();
1206 uint64_t NumBytes
= 0;
1208 // If we're forcing a stack realignment we can't rely on just the frame
1209 // info, we need to know the ABI stack alignment as well in case we
1210 // have a call out. Otherwise just make sure we have some alignment - we'll
1211 // go with the minimum.
1212 if (ForceStackAlign
) {
1213 if (MFI
->hasCalls())
1214 MaxAlign
= (StackAlign
> MaxAlign
) ? StackAlign
: MaxAlign
;
1216 MaxAlign
= MaxAlign
? MaxAlign
: 4;
1220 // Calculate required stack adjustment.
1221 uint64_t FrameSize
= StackSize
- SlotSize
;
1222 if (needsStackRealignment(MF
))
1223 FrameSize
= (FrameSize
+ MaxAlign
- 1)/MaxAlign
*MaxAlign
;
1225 NumBytes
= FrameSize
- CSSize
;
1228 BuildMI(MBB
, MBBI
, DL
,
1229 TII
.get(Is64Bit
? X86::POP64r
: X86::POP32r
), FramePtr
);
1231 NumBytes
= StackSize
- CSSize
;
1234 // Skip the callee-saved pop instructions.
1235 MachineBasicBlock::iterator LastCSPop
= MBBI
;
1236 while (MBBI
!= MBB
.begin()) {
1237 MachineBasicBlock::iterator PI
= prior(MBBI
);
1238 unsigned Opc
= PI
->getOpcode();
1240 if (Opc
!= X86::POP32r
&& Opc
!= X86::POP64r
&&
1241 !PI
->getDesc().isTerminator())
1247 DL
= MBBI
->getDebugLoc();
1249 // If there is an ADD32ri or SUB32ri of ESP immediately before this
1250 // instruction, merge the two instructions.
1251 if (NumBytes
|| MFI
->hasVarSizedObjects())
1252 mergeSPUpdatesUp(MBB
, MBBI
, StackPtr
, &NumBytes
);
1254 // If dynamic alloca is used, then reset esp to point to the last callee-saved
1255 // slot before popping them off! Same applies for the case, when stack was
1257 if (needsStackRealignment(MF
)) {
1258 // We cannot use LEA here, because stack pointer was realigned. We need to
1259 // deallocate local frame back.
1261 emitSPUpdate(MBB
, MBBI
, StackPtr
, NumBytes
, Is64Bit
, TII
);
1262 MBBI
= prior(LastCSPop
);
1265 BuildMI(MBB
, MBBI
, DL
,
1266 TII
.get(Is64Bit
? X86::MOV64rr
: X86::MOV32rr
),
1267 StackPtr
).addReg(FramePtr
);
1268 } else if (MFI
->hasVarSizedObjects()) {
1270 unsigned Opc
= Is64Bit
? X86::LEA64r
: X86::LEA32r
;
1272 addRegOffset(BuildMI(MF
, DL
, TII
.get(Opc
), StackPtr
),
1273 FramePtr
, false, -CSSize
);
1274 MBB
.insert(MBBI
, MI
);
1276 BuildMI(MBB
, MBBI
, DL
,
1277 TII
.get(Is64Bit
? X86::MOV64rr
: X86::MOV32rr
), StackPtr
)
1280 } else if (NumBytes
) {
1281 // Adjust stack pointer back: ESP += numbytes.
1282 emitSPUpdate(MBB
, MBBI
, StackPtr
, NumBytes
, Is64Bit
, TII
);
1285 // We're returning from function via eh_return.
1286 if (RetOpcode
== X86::EH_RETURN
|| RetOpcode
== X86::EH_RETURN64
) {
1287 MBBI
= prior(MBB
.end());
1288 MachineOperand
&DestAddr
= MBBI
->getOperand(0);
1289 assert(DestAddr
.isReg() && "Offset should be in register!");
1290 BuildMI(MBB
, MBBI
, DL
,
1291 TII
.get(Is64Bit
? X86::MOV64rr
: X86::MOV32rr
),
1292 StackPtr
).addReg(DestAddr
.getReg());
1293 } else if (RetOpcode
== X86::TCRETURNri
|| RetOpcode
== X86::TCRETURNdi
||
1294 RetOpcode
== X86::TCRETURNmi
||
1295 RetOpcode
== X86::TCRETURNri64
|| RetOpcode
== X86::TCRETURNdi64
||
1296 RetOpcode
== X86::TCRETURNmi64
) {
1297 bool isMem
= RetOpcode
== X86::TCRETURNmi
|| RetOpcode
== X86::TCRETURNmi64
;
1298 // Tail call return: adjust the stack pointer and jump to callee.
1299 MBBI
= prior(MBB
.end());
1300 MachineOperand
&JumpTarget
= MBBI
->getOperand(0);
1301 MachineOperand
&StackAdjust
= MBBI
->getOperand(isMem
? 5 : 1);
1302 assert(StackAdjust
.isImm() && "Expecting immediate value.");
1304 // Adjust stack pointer.
1305 int StackAdj
= StackAdjust
.getImm();
1306 int MaxTCDelta
= X86FI
->getTCReturnAddrDelta();
1308 assert(MaxTCDelta
<= 0 && "MaxTCDelta should never be positive");
1310 // Incoporate the retaddr area.
1311 Offset
= StackAdj
-MaxTCDelta
;
1312 assert(Offset
>= 0 && "Offset should never be negative");
1315 // Check for possible merge with preceeding ADD instruction.
1316 Offset
+= mergeSPUpdates(MBB
, MBBI
, StackPtr
, true);
1317 emitSPUpdate(MBB
, MBBI
, StackPtr
, Offset
, Is64Bit
, TII
);
1320 // Jump to label or value in register.
1321 if (RetOpcode
== X86::TCRETURNdi
|| RetOpcode
== X86::TCRETURNdi64
) {
1322 BuildMI(MBB
, MBBI
, DL
, TII
.get((RetOpcode
== X86::TCRETURNdi
)
1323 ? X86::TAILJMPd
: X86::TAILJMPd64
)).
1324 addGlobalAddress(JumpTarget
.getGlobal(), JumpTarget
.getOffset(),
1325 JumpTarget
.getTargetFlags());
1326 } else if (RetOpcode
== X86::TCRETURNmi
|| RetOpcode
== X86::TCRETURNmi64
) {
1327 MachineInstrBuilder MIB
=
1328 BuildMI(MBB
, MBBI
, DL
, TII
.get((RetOpcode
== X86::TCRETURNmi
)
1329 ? X86::TAILJMPm
: X86::TAILJMPm64
));
1330 for (unsigned i
= 0; i
!= 5; ++i
)
1331 MIB
.addOperand(MBBI
->getOperand(i
));
1332 } else if (RetOpcode
== X86::TCRETURNri64
) {
1333 BuildMI(MBB
, MBBI
, DL
, TII
.get(X86::TAILJMPr64
)).
1334 addReg(JumpTarget
.getReg(), RegState::Kill
);
1336 BuildMI(MBB
, MBBI
, DL
, TII
.get(X86::TAILJMPr
)).
1337 addReg(JumpTarget
.getReg(), RegState::Kill
);
1340 MachineInstr
*NewMI
= prior(MBBI
);
1341 for (unsigned i
= 2, e
= MBBI
->getNumOperands(); i
!= e
; ++i
)
1342 NewMI
->addOperand(MBBI
->getOperand(i
));
1344 // Delete the pseudo instruction TCRETURN.
1346 } else if ((RetOpcode
== X86::RET
|| RetOpcode
== X86::RETI
) &&
1347 (X86FI
->getTCReturnAddrDelta() < 0)) {
1348 // Add the return addr area delta back since we are not tail calling.
1349 int delta
= -1*X86FI
->getTCReturnAddrDelta();
1350 MBBI
= prior(MBB
.end());
1352 // Check for possible merge with preceeding ADD instruction.
1353 delta
+= mergeSPUpdates(MBB
, MBBI
, StackPtr
, true);
1354 emitSPUpdate(MBB
, MBBI
, StackPtr
, delta
, Is64Bit
, TII
);
1358 unsigned X86RegisterInfo::getRARegister() const {
1359 return Is64Bit
? X86::RIP
// Should have dwarf #16.
1360 : X86::EIP
; // Should have dwarf #8.
1363 unsigned X86RegisterInfo::getFrameRegister(const MachineFunction
&MF
) const {
1364 return hasFP(MF
) ? FramePtr
: StackPtr
;
1368 X86RegisterInfo::getInitialFrameState(std::vector
<MachineMove
> &Moves
) const {
1369 // Calculate amount of bytes used for return address storing
1370 int stackGrowth
= (Is64Bit
? -8 : -4);
1372 // Initial state of the frame pointer is esp+stackGrowth.
1373 MachineLocation
Dst(MachineLocation::VirtualFP
);
1374 MachineLocation
Src(StackPtr
, stackGrowth
);
1375 Moves
.push_back(MachineMove(0, Dst
, Src
));
1377 // Add return address to move list
1378 MachineLocation
CSDst(StackPtr
, stackGrowth
);
1379 MachineLocation
CSSrc(getRARegister());
1380 Moves
.push_back(MachineMove(0, CSDst
, CSSrc
));
1383 unsigned X86RegisterInfo::getEHExceptionRegister() const {
1384 llvm_unreachable("What is the exception register");
1388 unsigned X86RegisterInfo::getEHHandlerRegister() const {
1389 llvm_unreachable("What is the exception handler register");
1394 unsigned getX86SubSuperRegister(unsigned Reg
, EVT VT
, bool High
) {
1395 switch (VT
.getSimpleVT().SimpleTy
) {
1396 default: return Reg
;
1401 case X86::AH
: case X86::AL
: case X86::AX
: case X86::EAX
: case X86::RAX
:
1403 case X86::DH
: case X86::DL
: case X86::DX
: case X86::EDX
: case X86::RDX
:
1405 case X86::CH
: case X86::CL
: case X86::CX
: case X86::ECX
: case X86::RCX
:
1407 case X86::BH
: case X86::BL
: case X86::BX
: case X86::EBX
: case X86::RBX
:
1413 case X86::AH
: case X86::AL
: case X86::AX
: case X86::EAX
: case X86::RAX
:
1415 case X86::DH
: case X86::DL
: case X86::DX
: case X86::EDX
: case X86::RDX
:
1417 case X86::CH
: case X86::CL
: case X86::CX
: case X86::ECX
: case X86::RCX
:
1419 case X86::BH
: case X86::BL
: case X86::BX
: case X86::EBX
: case X86::RBX
:
1421 case X86::SIL
: case X86::SI
: case X86::ESI
: case X86::RSI
:
1423 case X86::DIL
: case X86::DI
: case X86::EDI
: case X86::RDI
:
1425 case X86::BPL
: case X86::BP
: case X86::EBP
: case X86::RBP
:
1427 case X86::SPL
: case X86::SP
: case X86::ESP
: case X86::RSP
:
1429 case X86::R8B
: case X86::R8W
: case X86::R8D
: case X86::R8
:
1431 case X86::R9B
: case X86::R9W
: case X86::R9D
: case X86::R9
:
1433 case X86::R10B
: case X86::R10W
: case X86::R10D
: case X86::R10
:
1435 case X86::R11B
: case X86::R11W
: case X86::R11D
: case X86::R11
:
1437 case X86::R12B
: case X86::R12W
: case X86::R12D
: case X86::R12
:
1439 case X86::R13B
: case X86::R13W
: case X86::R13D
: case X86::R13
:
1441 case X86::R14B
: case X86::R14W
: case X86::R14D
: case X86::R14
:
1443 case X86::R15B
: case X86::R15W
: case X86::R15D
: case X86::R15
:
1449 default: return Reg
;
1450 case X86::AH
: case X86::AL
: case X86::AX
: case X86::EAX
: case X86::RAX
:
1452 case X86::DH
: case X86::DL
: case X86::DX
: case X86::EDX
: case X86::RDX
:
1454 case X86::CH
: case X86::CL
: case X86::CX
: case X86::ECX
: case X86::RCX
:
1456 case X86::BH
: case X86::BL
: case X86::BX
: case X86::EBX
: case X86::RBX
:
1458 case X86::SIL
: case X86::SI
: case X86::ESI
: case X86::RSI
:
1460 case X86::DIL
: case X86::DI
: case X86::EDI
: case X86::RDI
:
1462 case X86::BPL
: case X86::BP
: case X86::EBP
: case X86::RBP
:
1464 case X86::SPL
: case X86::SP
: case X86::ESP
: case X86::RSP
:
1466 case X86::R8B
: case X86::R8W
: case X86::R8D
: case X86::R8
:
1468 case X86::R9B
: case X86::R9W
: case X86::R9D
: case X86::R9
:
1470 case X86::R10B
: case X86::R10W
: case X86::R10D
: case X86::R10
:
1472 case X86::R11B
: case X86::R11W
: case X86::R11D
: case X86::R11
:
1474 case X86::R12B
: case X86::R12W
: case X86::R12D
: case X86::R12
:
1476 case X86::R13B
: case X86::R13W
: case X86::R13D
: case X86::R13
:
1478 case X86::R14B
: case X86::R14W
: case X86::R14D
: case X86::R14
:
1480 case X86::R15B
: case X86::R15W
: case X86::R15D
: case X86::R15
:
1485 default: return Reg
;
1486 case X86::AH
: case X86::AL
: case X86::AX
: case X86::EAX
: case X86::RAX
:
1488 case X86::DH
: case X86::DL
: case X86::DX
: case X86::EDX
: case X86::RDX
:
1490 case X86::CH
: case X86::CL
: case X86::CX
: case X86::ECX
: case X86::RCX
:
1492 case X86::BH
: case X86::BL
: case X86::BX
: case X86::EBX
: case X86::RBX
:
1494 case X86::SIL
: case X86::SI
: case X86::ESI
: case X86::RSI
:
1496 case X86::DIL
: case X86::DI
: case X86::EDI
: case X86::RDI
:
1498 case X86::BPL
: case X86::BP
: case X86::EBP
: case X86::RBP
:
1500 case X86::SPL
: case X86::SP
: case X86::ESP
: case X86::RSP
:
1502 case X86::R8B
: case X86::R8W
: case X86::R8D
: case X86::R8
:
1504 case X86::R9B
: case X86::R9W
: case X86::R9D
: case X86::R9
:
1506 case X86::R10B
: case X86::R10W
: case X86::R10D
: case X86::R10
:
1508 case X86::R11B
: case X86::R11W
: case X86::R11D
: case X86::R11
:
1510 case X86::R12B
: case X86::R12W
: case X86::R12D
: case X86::R12
:
1512 case X86::R13B
: case X86::R13W
: case X86::R13D
: case X86::R13
:
1514 case X86::R14B
: case X86::R14W
: case X86::R14D
: case X86::R14
:
1516 case X86::R15B
: case X86::R15W
: case X86::R15D
: case X86::R15
:
1521 default: return Reg
;
1522 case X86::AH
: case X86::AL
: case X86::AX
: case X86::EAX
: case X86::RAX
:
1524 case X86::DH
: case X86::DL
: case X86::DX
: case X86::EDX
: case X86::RDX
:
1526 case X86::CH
: case X86::CL
: case X86::CX
: case X86::ECX
: case X86::RCX
:
1528 case X86::BH
: case X86::BL
: case X86::BX
: case X86::EBX
: case X86::RBX
:
1530 case X86::SIL
: case X86::SI
: case X86::ESI
: case X86::RSI
:
1532 case X86::DIL
: case X86::DI
: case X86::EDI
: case X86::RDI
:
1534 case X86::BPL
: case X86::BP
: case X86::EBP
: case X86::RBP
:
1536 case X86::SPL
: case X86::SP
: case X86::ESP
: case X86::RSP
:
1538 case X86::R8B
: case X86::R8W
: case X86::R8D
: case X86::R8
:
1540 case X86::R9B
: case X86::R9W
: case X86::R9D
: case X86::R9
:
1542 case X86::R10B
: case X86::R10W
: case X86::R10D
: case X86::R10
:
1544 case X86::R11B
: case X86::R11W
: case X86::R11D
: case X86::R11
:
1546 case X86::R12B
: case X86::R12W
: case X86::R12D
: case X86::R12
:
1548 case X86::R13B
: case X86::R13W
: case X86::R13D
: case X86::R13
:
1550 case X86::R14B
: case X86::R14W
: case X86::R14D
: case X86::R14
:
1552 case X86::R15B
: case X86::R15W
: case X86::R15D
: case X86::R15
:
1561 #include "X86GenRegisterInfo.inc"
1564 struct MSAH
: public MachineFunctionPass
{
1566 MSAH() : MachineFunctionPass(ID
) {}
1568 virtual bool runOnMachineFunction(MachineFunction
&MF
) {
1569 const X86TargetMachine
*TM
=
1570 static_cast<const X86TargetMachine
*>(&MF
.getTarget());
1571 const X86RegisterInfo
*X86RI
= TM
->getRegisterInfo();
1572 MachineRegisterInfo
&RI
= MF
.getRegInfo();
1573 X86MachineFunctionInfo
*FuncInfo
= MF
.getInfo
<X86MachineFunctionInfo
>();
1574 unsigned StackAlignment
= X86RI
->getStackAlignment();
1576 // Be over-conservative: scan over all vreg defs and find whether vector
1577 // registers are used. If yes, there is a possibility that vector register
1578 // will be spilled and thus require dynamic stack realignment.
1579 for (unsigned RegNum
= TargetRegisterInfo::FirstVirtualRegister
;
1580 RegNum
< RI
.getLastVirtReg(); ++RegNum
)
1581 if (RI
.getRegClass(RegNum
)->getAlignment() > StackAlignment
) {
1582 FuncInfo
->setReserveFP(true);
1590 virtual const char *getPassName() const {
1591 return "X86 Maximal Stack Alignment Check";
1594 virtual void getAnalysisUsage(AnalysisUsage
&AU
) const {
1595 AU
.setPreservesCFG();
1596 MachineFunctionPass::getAnalysisUsage(AU
);
1604 llvm::createX86MaxStackAlignmentHeuristicPass() { return new MSAH(); }