1 //===----- X86CallFrameOptimization.cpp - Optimize x86 call sequences -----===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This file defines a pass that optimizes call sequences on x86.
11 // Currently, it converts movs of function parameters onto the stack into
12 // pushes. This is beneficial for two main reasons:
13 // 1) The push instruction encoding is much smaller than a stack-ptr-based mov.
14 // 2) It is possible to push memory arguments directly. So, if the
15 // the transformation is performed pre-reg-alloc, it can help relieve
18 //===----------------------------------------------------------------------===//
20 #include "MCTargetDesc/X86BaseInfo.h"
21 #include "X86FrameLowering.h"
22 #include "X86InstrInfo.h"
23 #include "X86MachineFunctionInfo.h"
24 #include "X86RegisterInfo.h"
25 #include "X86Subtarget.h"
26 #include "llvm/ADT/DenseSet.h"
27 #include "llvm/ADT/SmallVector.h"
28 #include "llvm/ADT/StringRef.h"
29 #include "llvm/CodeGen/MachineBasicBlock.h"
30 #include "llvm/CodeGen/MachineFrameInfo.h"
31 #include "llvm/CodeGen/MachineFunction.h"
32 #include "llvm/CodeGen/MachineFunctionPass.h"
33 #include "llvm/CodeGen/MachineInstr.h"
34 #include "llvm/CodeGen/MachineInstrBuilder.h"
35 #include "llvm/CodeGen/MachineOperand.h"
36 #include "llvm/CodeGen/MachineRegisterInfo.h"
37 #include "llvm/CodeGen/TargetInstrInfo.h"
38 #include "llvm/CodeGen/TargetRegisterInfo.h"
39 #include "llvm/IR/DebugLoc.h"
40 #include "llvm/IR/Function.h"
41 #include "llvm/MC/MCDwarf.h"
42 #include "llvm/Support/CommandLine.h"
43 #include "llvm/Support/ErrorHandling.h"
44 #include "llvm/Support/MathExtras.h"
52 #define DEBUG_TYPE "x86-cf-opt"
55 NoX86CFOpt("no-x86-call-frame-opt",
56 cl::desc("Avoid optimizing x86 call frames for size"),
57 cl::init(false), cl::Hidden
);
60 void initializeX86CallFrameOptimizationPass(PassRegistry
&);
65 class X86CallFrameOptimization
: public MachineFunctionPass
{
67 X86CallFrameOptimization() : MachineFunctionPass(ID
) {
68 initializeX86CallFrameOptimizationPass(
69 *PassRegistry::getPassRegistry());
72 bool runOnMachineFunction(MachineFunction
&MF
) override
;
77 // Information we know about a particular call site
79 CallContext() : FrameSetup(nullptr), ArgStoreVector(4, nullptr) {}
81 // Iterator referring to the frame setup instruction
82 MachineBasicBlock::iterator FrameSetup
;
84 // Actual call instruction
85 MachineInstr
*Call
= nullptr;
87 // A copy of the stack pointer
88 MachineInstr
*SPCopy
= nullptr;
90 // The total displacement of all passed parameters
91 int64_t ExpectedDist
= 0;
93 // The sequence of storing instructions used to pass the parameters
94 SmallVector
<MachineInstr
*, 4> ArgStoreVector
;
96 // True if this call site has no stack parameters
97 bool NoStackParams
= false;
99 // True if this call site can use push instructions
100 bool UsePush
= false;
103 typedef SmallVector
<CallContext
, 8> ContextVector
;
105 bool isLegal(MachineFunction
&MF
);
107 bool isProfitable(MachineFunction
&MF
, ContextVector
&CallSeqMap
);
109 void collectCallInfo(MachineFunction
&MF
, MachineBasicBlock
&MBB
,
110 MachineBasicBlock::iterator I
, CallContext
&Context
);
112 void adjustCallSequence(MachineFunction
&MF
, const CallContext
&Context
);
114 MachineInstr
*canFoldIntoRegPush(MachineBasicBlock::iterator FrameSetup
,
117 enum InstClassification
{ Convert
, Skip
, Exit
};
119 InstClassification
classifyInstruction(MachineBasicBlock
&MBB
,
120 MachineBasicBlock::iterator MI
,
121 const X86RegisterInfo
&RegInfo
,
122 DenseSet
<unsigned int> &UsedRegs
);
124 StringRef
getPassName() const override
{ return "X86 Optimize Call Frame"; }
126 const X86InstrInfo
*TII
;
127 const X86FrameLowering
*TFL
;
128 const X86Subtarget
*STI
;
129 MachineRegisterInfo
*MRI
;
131 unsigned Log2SlotSize
;
134 } // end anonymous namespace
135 char X86CallFrameOptimization::ID
= 0;
136 INITIALIZE_PASS(X86CallFrameOptimization
, DEBUG_TYPE
,
137 "X86 Call Frame Optimization", false, false)
139 // This checks whether the transformation is legal.
140 // Also returns false in cases where it's potentially legal, but
141 // we don't even want to try.
142 bool X86CallFrameOptimization::isLegal(MachineFunction
&MF
) {
143 if (NoX86CFOpt
.getValue())
146 // We can't encode multiple DW_CFA_GNU_args_size or DW_CFA_def_cfa_offset
147 // in the compact unwind encoding that Darwin uses. So, bail if there
148 // is a danger of that being generated.
149 if (STI
->isTargetDarwin() &&
150 (!MF
.getLandingPads().empty() ||
151 (MF
.getFunction().needsUnwindTableEntry() && !TFL
->hasFP(MF
))))
154 // It is not valid to change the stack pointer outside the prolog/epilog
155 // on 64-bit Windows.
156 if (STI
->isTargetWin64())
159 // You would expect straight-line code between call-frame setup and
160 // call-frame destroy. You would be wrong. There are circumstances (e.g.
161 // CMOV_GR8 expansion of a select that feeds a function call!) where we can
162 // end up with the setup and the destroy in different basic blocks.
163 // This is bad, and breaks SP adjustment.
164 // So, check that all of the frames in the function are closed inside
165 // the same block, and, for good measure, that there are no nested frames.
166 unsigned FrameSetupOpcode
= TII
->getCallFrameSetupOpcode();
167 unsigned FrameDestroyOpcode
= TII
->getCallFrameDestroyOpcode();
168 for (MachineBasicBlock
&BB
: MF
) {
169 bool InsideFrameSequence
= false;
170 for (MachineInstr
&MI
: BB
) {
171 if (MI
.getOpcode() == FrameSetupOpcode
) {
172 if (InsideFrameSequence
)
174 InsideFrameSequence
= true;
175 } else if (MI
.getOpcode() == FrameDestroyOpcode
) {
176 if (!InsideFrameSequence
)
178 InsideFrameSequence
= false;
182 if (InsideFrameSequence
)
189 // Check whether this transformation is profitable for a particular
190 // function - in terms of code size.
191 bool X86CallFrameOptimization::isProfitable(MachineFunction
&MF
,
192 ContextVector
&CallSeqVector
) {
193 // This transformation is always a win when we do not expect to have
194 // a reserved call frame. Under other circumstances, it may be either
195 // a win or a loss, and requires a heuristic.
196 bool CannotReserveFrame
= MF
.getFrameInfo().hasVarSizedObjects();
197 if (CannotReserveFrame
)
200 unsigned StackAlign
= TFL
->getStackAlignment();
202 int64_t Advantage
= 0;
203 for (auto CC
: CallSeqVector
) {
204 // Call sites where no parameters are passed on the stack
205 // do not affect the cost, since there needs to be no
207 if (CC
.NoStackParams
)
211 // If we don't use pushes for a particular call site,
212 // we pay for not having a reserved call frame with an
213 // additional sub/add esp pair. The cost is ~3 bytes per instruction,
214 // depending on the size of the constant.
215 // TODO: Callee-pop functions should have a smaller penalty, because
216 // an add is needed even with a reserved call frame.
219 // We can use pushes. First, account for the fixed costs.
220 // We'll need a add after the call.
222 // If we have to realign the stack, we'll also need a sub before
223 if (CC
.ExpectedDist
% StackAlign
)
225 // Now, for each push, we save ~3 bytes. For small constants, we actually,
226 // save more (up to 5 bytes), but 3 should be a good approximation.
227 Advantage
+= (CC
.ExpectedDist
>> Log2SlotSize
) * 3;
231 return Advantage
>= 0;
234 bool X86CallFrameOptimization::runOnMachineFunction(MachineFunction
&MF
) {
235 STI
= &MF
.getSubtarget
<X86Subtarget
>();
236 TII
= STI
->getInstrInfo();
237 TFL
= STI
->getFrameLowering();
238 MRI
= &MF
.getRegInfo();
240 const X86RegisterInfo
&RegInfo
=
241 *static_cast<const X86RegisterInfo
*>(STI
->getRegisterInfo());
242 SlotSize
= RegInfo
.getSlotSize();
243 assert(isPowerOf2_32(SlotSize
) && "Expect power of 2 stack slot size");
244 Log2SlotSize
= Log2_32(SlotSize
);
246 if (skipFunction(MF
.getFunction()) || !isLegal(MF
))
249 unsigned FrameSetupOpcode
= TII
->getCallFrameSetupOpcode();
251 bool Changed
= false;
253 ContextVector CallSeqVector
;
257 if (MI
.getOpcode() == FrameSetupOpcode
) {
259 collectCallInfo(MF
, MBB
, MI
, Context
);
260 CallSeqVector
.push_back(Context
);
263 if (!isProfitable(MF
, CallSeqVector
))
266 for (auto CC
: CallSeqVector
) {
268 adjustCallSequence(MF
, CC
);
276 X86CallFrameOptimization::InstClassification
277 X86CallFrameOptimization::classifyInstruction(
278 MachineBasicBlock
&MBB
, MachineBasicBlock::iterator MI
,
279 const X86RegisterInfo
&RegInfo
, DenseSet
<unsigned int> &UsedRegs
) {
283 // The instructions we actually care about are movs onto the stack or special
284 // cases of constant-stores to stack
285 switch (MI
->getOpcode()) {
288 case X86::AND64mi8
: {
289 MachineOperand ImmOp
= MI
->getOperand(X86::AddrNumOperands
);
290 return ImmOp
.getImm() == 0 ? Convert
: Exit
;
295 MachineOperand ImmOp
= MI
->getOperand(X86::AddrNumOperands
);
296 return ImmOp
.getImm() == -1 ? Convert
: Exit
;
305 // Not all calling conventions have only stack MOVs between the stack
306 // adjust and the call.
308 // We want to tolerate other instructions, to cover more cases.
310 // a) PCrel calls, where we expect an additional COPY of the basereg.
311 // b) Passing frame-index addresses.
312 // c) Calling conventions that have inreg parameters. These generate
313 // both copies and movs into registers.
314 // To avoid creating lots of special cases, allow any instruction
315 // that does not write into memory, does not def or use the stack
316 // pointer, and does not def any register that was used by a preceding
318 // (Reading from memory is allowed, even if referenced through a
319 // frame index, since these will get adjusted properly in PEI)
321 // The reason for the last condition is that the pushes can't replace
322 // the movs in place, because the order must be reversed.
323 // So if we have a MOV32mr that uses EDX, then an instruction that defs
324 // EDX, and then the call, after the transformation the push will use
325 // the modified version of EDX, and not the original one.
326 // Since we are still in SSA form at this point, we only need to
327 // make sure we don't clobber any *physical* registers that were
328 // used by an earlier mov that will become a push.
330 if (MI
->isCall() || MI
->mayStore())
333 for (const MachineOperand
&MO
: MI
->operands()) {
336 unsigned int Reg
= MO
.getReg();
337 if (!RegInfo
.isPhysicalRegister(Reg
))
339 if (RegInfo
.regsOverlap(Reg
, RegInfo
.getStackRegister()))
342 for (unsigned int U
: UsedRegs
)
343 if (RegInfo
.regsOverlap(Reg
, U
))
351 void X86CallFrameOptimization::collectCallInfo(MachineFunction
&MF
,
352 MachineBasicBlock
&MBB
,
353 MachineBasicBlock::iterator I
,
354 CallContext
&Context
) {
355 // Check that this particular call sequence is amenable to the
357 const X86RegisterInfo
&RegInfo
=
358 *static_cast<const X86RegisterInfo
*>(STI
->getRegisterInfo());
360 // We expect to enter this at the beginning of a call sequence
361 assert(I
->getOpcode() == TII
->getCallFrameSetupOpcode());
362 MachineBasicBlock::iterator FrameSetup
= I
++;
363 Context
.FrameSetup
= FrameSetup
;
365 // How much do we adjust the stack? This puts an upper bound on
366 // the number of parameters actually passed on it.
367 unsigned int MaxAdjust
= TII
->getFrameSize(*FrameSetup
) >> Log2SlotSize
;
369 // A zero adjustment means no stack parameters
371 Context
.NoStackParams
= true;
375 // Skip over DEBUG_VALUE.
376 // For globals in PIC mode, we can have some LEAs here. Skip them as well.
377 // TODO: Extend this to something that covers more cases.
378 while (I
->getOpcode() == X86::LEA32r
|| I
->isDebugInstr())
381 unsigned StackPtr
= RegInfo
.getStackRegister();
382 auto StackPtrCopyInst
= MBB
.end();
383 // SelectionDAG (but not FastISel) inserts a copy of ESP into a virtual
384 // register. If it's there, use that virtual register as stack pointer
385 // instead. Also, we need to locate this instruction so that we can later
386 // safely ignore it while doing the conservative processing of the call chain.
387 // The COPY can be located anywhere between the call-frame setup
388 // instruction and its first use. We use the call instruction as a boundary
389 // because it is usually cheaper to check if an instruction is a call than
390 // checking if an instruction uses a register.
391 for (auto J
= I
; !J
->isCall(); ++J
)
392 if (J
->isCopy() && J
->getOperand(0).isReg() && J
->getOperand(1).isReg() &&
393 J
->getOperand(1).getReg() == StackPtr
) {
394 StackPtrCopyInst
= J
;
395 Context
.SPCopy
= &*J
++;
396 StackPtr
= Context
.SPCopy
->getOperand(0).getReg();
400 // Scan the call setup sequence for the pattern we're looking for.
401 // We only handle a simple case - a sequence of store instructions that
402 // push a sequence of stack-slot-aligned values onto the stack, with
403 // no gaps between them.
405 Context
.ArgStoreVector
.resize(MaxAdjust
, nullptr);
407 DenseSet
<unsigned int> UsedRegs
;
409 for (InstClassification Classification
= Skip
; Classification
!= Exit
; ++I
) {
410 // If this is the COPY of the stack pointer, it's ok to ignore.
411 if (I
== StackPtrCopyInst
)
413 Classification
= classifyInstruction(MBB
, I
, RegInfo
, UsedRegs
);
414 if (Classification
!= Convert
)
416 // We know the instruction has a supported store opcode.
417 // We only want movs of the form:
418 // mov imm/reg, k(%StackPtr)
419 // If we run into something else, bail.
420 // Note that AddrBaseReg may, counter to its name, not be a register,
421 // but rather a frame index.
422 // TODO: Support the fi case. This should probably work now that we
423 // have the infrastructure to track the stack pointer within a call
425 if (!I
->getOperand(X86::AddrBaseReg
).isReg() ||
426 (I
->getOperand(X86::AddrBaseReg
).getReg() != StackPtr
) ||
427 !I
->getOperand(X86::AddrScaleAmt
).isImm() ||
428 (I
->getOperand(X86::AddrScaleAmt
).getImm() != 1) ||
429 (I
->getOperand(X86::AddrIndexReg
).getReg() != X86::NoRegister
) ||
430 (I
->getOperand(X86::AddrSegmentReg
).getReg() != X86::NoRegister
) ||
431 !I
->getOperand(X86::AddrDisp
).isImm())
434 int64_t StackDisp
= I
->getOperand(X86::AddrDisp
).getImm();
435 assert(StackDisp
>= 0 &&
436 "Negative stack displacement when passing parameters");
438 // We really don't want to consider the unaligned case.
439 if (StackDisp
& (SlotSize
- 1))
441 StackDisp
>>= Log2SlotSize
;
443 assert((size_t)StackDisp
< Context
.ArgStoreVector
.size() &&
444 "Function call has more parameters than the stack is adjusted for.");
446 // If the same stack slot is being filled twice, something's fishy.
447 if (Context
.ArgStoreVector
[StackDisp
] != nullptr)
449 Context
.ArgStoreVector
[StackDisp
] = &*I
;
451 for (const MachineOperand
&MO
: I
->uses()) {
454 unsigned int Reg
= MO
.getReg();
455 if (RegInfo
.isPhysicalRegister(Reg
))
456 UsedRegs
.insert(Reg
);
462 // We now expect the end of the sequence. If we stopped early,
463 // or reached the end of the block without finding a call, bail.
464 if (I
== MBB
.end() || !I
->isCall())
468 if ((++I
)->getOpcode() != TII
->getCallFrameDestroyOpcode())
471 // Now, go through the vector, and see that we don't have any gaps,
472 // but only a series of storing instructions.
473 auto MMI
= Context
.ArgStoreVector
.begin(), MME
= Context
.ArgStoreVector
.end();
474 for (; MMI
!= MME
; ++MMI
, Context
.ExpectedDist
+= SlotSize
)
478 // If the call had no parameters, do nothing
479 if (MMI
== Context
.ArgStoreVector
.begin())
482 // We are either at the last parameter, or a gap.
483 // Make sure it's not a gap
484 for (; MMI
!= MME
; ++MMI
)
488 Context
.UsePush
= true;
491 void X86CallFrameOptimization::adjustCallSequence(MachineFunction
&MF
,
492 const CallContext
&Context
) {
493 // Ok, we can in fact do the transformation for this call.
494 // Do not remove the FrameSetup instruction, but adjust the parameters.
495 // PEI will end up finalizing the handling of this.
496 MachineBasicBlock::iterator FrameSetup
= Context
.FrameSetup
;
497 MachineBasicBlock
&MBB
= *(FrameSetup
->getParent());
498 TII
->setFrameAdjustment(*FrameSetup
, Context
.ExpectedDist
);
500 DebugLoc DL
= FrameSetup
->getDebugLoc();
501 bool Is64Bit
= STI
->is64Bit();
502 // Now, iterate through the vector in reverse order, and replace the store to
503 // stack with pushes. MOVmi/MOVmr doesn't have any defs, so no need to
505 for (int Idx
= (Context
.ExpectedDist
>> Log2SlotSize
) - 1; Idx
>= 0; --Idx
) {
506 MachineBasicBlock::iterator Store
= *Context
.ArgStoreVector
[Idx
];
507 MachineOperand PushOp
= Store
->getOperand(X86::AddrNumOperands
);
508 MachineBasicBlock::iterator Push
= nullptr;
510 switch (Store
->getOpcode()) {
512 llvm_unreachable("Unexpected Opcode!");
521 PushOpcode
= Is64Bit
? X86::PUSH64i32
: X86::PUSHi32
;
522 // If the operand is a small (8-bit) immediate, we can use a
523 // PUSH instruction with a shorter encoding.
524 // Note that isImm() may fail even though this is a MOVmi, because
525 // the operand can also be a symbol.
526 if (PushOp
.isImm()) {
527 int64_t Val
= PushOp
.getImm();
529 PushOpcode
= Is64Bit
? X86::PUSH64i8
: X86::PUSH32i8
;
531 Push
= BuildMI(MBB
, Context
.Call
, DL
, TII
->get(PushOpcode
)).add(PushOp
);
535 unsigned int Reg
= PushOp
.getReg();
537 // If storing a 32-bit vreg on 64-bit targets, extend to a 64-bit vreg
538 // in preparation for the PUSH64. The upper 32 bits can be undef.
539 if (Is64Bit
&& Store
->getOpcode() == X86::MOV32mr
) {
540 unsigned UndefReg
= MRI
->createVirtualRegister(&X86::GR64RegClass
);
541 Reg
= MRI
->createVirtualRegister(&X86::GR64RegClass
);
542 BuildMI(MBB
, Context
.Call
, DL
, TII
->get(X86::IMPLICIT_DEF
), UndefReg
);
543 BuildMI(MBB
, Context
.Call
, DL
, TII
->get(X86::INSERT_SUBREG
), Reg
)
546 .addImm(X86::sub_32bit
);
549 // If PUSHrmm is not slow on this target, try to fold the source of the
550 // push into the instruction.
551 bool SlowPUSHrmm
= STI
->isAtom() || STI
->isSLM();
553 // Check that this is legal to fold. Right now, we're extremely
554 // conservative about that.
555 MachineInstr
*DefMov
= nullptr;
556 if (!SlowPUSHrmm
&& (DefMov
= canFoldIntoRegPush(FrameSetup
, Reg
))) {
557 PushOpcode
= Is64Bit
? X86::PUSH64rmm
: X86::PUSH32rmm
;
558 Push
= BuildMI(MBB
, Context
.Call
, DL
, TII
->get(PushOpcode
));
560 unsigned NumOps
= DefMov
->getDesc().getNumOperands();
561 for (unsigned i
= NumOps
- X86::AddrNumOperands
; i
!= NumOps
; ++i
)
562 Push
->addOperand(DefMov
->getOperand(i
));
564 DefMov
->eraseFromParent();
566 PushOpcode
= Is64Bit
? X86::PUSH64r
: X86::PUSH32r
;
567 Push
= BuildMI(MBB
, Context
.Call
, DL
, TII
->get(PushOpcode
))
575 // For debugging, when using SP-based CFA, we need to adjust the CFA
576 // offset after each push.
577 // TODO: This is needed only if we require precise CFA.
580 MBB
, std::next(Push
), DL
,
581 MCCFIInstruction::createAdjustCfaOffset(nullptr, SlotSize
));
586 // The stack-pointer copy is no longer used in the call sequences.
587 // There should not be any other users, but we can't commit to that, so:
588 if (Context
.SPCopy
&& MRI
->use_empty(Context
.SPCopy
->getOperand(0).getReg()))
589 Context
.SPCopy
->eraseFromParent();
591 // Once we've done this, we need to make sure PEI doesn't assume a reserved
593 X86MachineFunctionInfo
*FuncInfo
= MF
.getInfo
<X86MachineFunctionInfo
>();
594 FuncInfo
->setHasPushSequences(true);
597 MachineInstr
*X86CallFrameOptimization::canFoldIntoRegPush(
598 MachineBasicBlock::iterator FrameSetup
, unsigned Reg
) {
599 // Do an extremely restricted form of load folding.
600 // ISel will often create patterns like:
601 // movl 4(%edi), %eax
602 // movl 8(%edi), %ecx
603 // movl 12(%edi), %edx
604 // movl %edx, 8(%esp)
605 // movl %ecx, 4(%esp)
608 // Get rid of those with prejudice.
609 if (!TargetRegisterInfo::isVirtualRegister(Reg
))
612 // Make sure this is the only use of Reg.
613 if (!MRI
->hasOneNonDBGUse(Reg
))
616 MachineInstr
&DefMI
= *MRI
->getVRegDef(Reg
);
618 // Make sure the def is a MOV from memory.
619 // If the def is in another block, give up.
620 if ((DefMI
.getOpcode() != X86::MOV32rm
&&
621 DefMI
.getOpcode() != X86::MOV64rm
) ||
622 DefMI
.getParent() != FrameSetup
->getParent())
625 // Make sure we don't have any instructions between DefMI and the
626 // push that make folding the load illegal.
627 for (MachineBasicBlock::iterator I
= DefMI
; I
!= FrameSetup
; ++I
)
628 if (I
->isLoadFoldBarrier())
634 FunctionPass
*llvm::createX86CallFrameOptimization() {
635 return new X86CallFrameOptimization();