1 //===- llvm/lib/Target/X86/X86CallLowering.cpp - Call lowering ------------===//
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
7 //===----------------------------------------------------------------------===//
10 /// This file implements the lowering of LLVM calls to machine code calls for
13 //===----------------------------------------------------------------------===//
15 #include "X86CallLowering.h"
16 #include "X86CallingConv.h"
17 #include "X86ISelLowering.h"
18 #include "X86InstrInfo.h"
19 #include "X86RegisterInfo.h"
20 #include "X86Subtarget.h"
21 #include "llvm/ADT/ArrayRef.h"
22 #include "llvm/ADT/SmallVector.h"
23 #include "llvm/CodeGen/Analysis.h"
24 #include "llvm/CodeGen/CallingConvLower.h"
25 #include "llvm/CodeGen/GlobalISel/MachineIRBuilder.h"
26 #include "llvm/CodeGen/GlobalISel/Utils.h"
27 #include "llvm/CodeGen/LowLevelType.h"
28 #include "llvm/CodeGen/MachineBasicBlock.h"
29 #include "llvm/CodeGen/MachineFrameInfo.h"
30 #include "llvm/CodeGen/MachineFunction.h"
31 #include "llvm/CodeGen/MachineInstrBuilder.h"
32 #include "llvm/CodeGen/MachineMemOperand.h"
33 #include "llvm/CodeGen/MachineOperand.h"
34 #include "llvm/CodeGen/MachineRegisterInfo.h"
35 #include "llvm/CodeGen/TargetInstrInfo.h"
36 #include "llvm/CodeGen/TargetSubtargetInfo.h"
37 #include "llvm/CodeGen/ValueTypes.h"
38 #include "llvm/IR/Attributes.h"
39 #include "llvm/IR/DataLayout.h"
40 #include "llvm/IR/Function.h"
41 #include "llvm/IR/Value.h"
42 #include "llvm/MC/MCRegisterInfo.h"
43 #include "llvm/Support/LowLevelTypeImpl.h"
44 #include "llvm/Support/MachineValueType.h"
50 X86CallLowering::X86CallLowering(const X86TargetLowering
&TLI
)
51 : CallLowering(&TLI
) {}
55 struct X86OutgoingValueAssigner
: public CallLowering::OutgoingValueAssigner
{
57 uint64_t StackSize
= 0;
58 unsigned NumXMMRegs
= 0;
61 uint64_t getStackSize() { return StackSize
; }
62 unsigned getNumXmmRegs() { return NumXMMRegs
; }
64 X86OutgoingValueAssigner(CCAssignFn
*AssignFn_
)
65 : CallLowering::OutgoingValueAssigner(AssignFn_
) {}
67 bool assignArg(unsigned ValNo
, EVT OrigVT
, MVT ValVT
, MVT LocVT
,
68 CCValAssign::LocInfo LocInfo
,
69 const CallLowering::ArgInfo
&Info
, ISD::ArgFlagsTy Flags
,
70 CCState
&State
) override
{
71 bool Res
= AssignFn(ValNo
, ValVT
, LocVT
, LocInfo
, Flags
, State
);
72 StackSize
= State
.getNextStackOffset();
74 static const MCPhysReg XMMArgRegs
[] = {X86::XMM0
, X86::XMM1
, X86::XMM2
,
75 X86::XMM3
, X86::XMM4
, X86::XMM5
,
76 X86::XMM6
, X86::XMM7
};
78 NumXMMRegs
= State
.getFirstUnallocated(XMMArgRegs
);
84 struct X86OutgoingValueHandler
: public CallLowering::OutgoingValueHandler
{
85 X86OutgoingValueHandler(MachineIRBuilder
&MIRBuilder
,
86 MachineRegisterInfo
&MRI
, MachineInstrBuilder
&MIB
)
87 : OutgoingValueHandler(MIRBuilder
, MRI
), MIB(MIB
),
88 DL(MIRBuilder
.getMF().getDataLayout()),
89 STI(MIRBuilder
.getMF().getSubtarget
<X86Subtarget
>()) {}
91 Register
getStackAddress(uint64_t Size
, int64_t Offset
,
92 MachinePointerInfo
&MPO
,
93 ISD::ArgFlagsTy Flags
) override
{
94 LLT p0
= LLT::pointer(0, DL
.getPointerSizeInBits(0));
95 LLT SType
= LLT::scalar(DL
.getPointerSizeInBits(0));
97 MIRBuilder
.buildCopy(p0
, STI
.getRegisterInfo()->getStackRegister());
99 auto OffsetReg
= MIRBuilder
.buildConstant(SType
, Offset
);
101 auto AddrReg
= MIRBuilder
.buildPtrAdd(p0
, SPReg
, OffsetReg
);
103 MPO
= MachinePointerInfo::getStack(MIRBuilder
.getMF(), Offset
);
104 return AddrReg
.getReg(0);
107 void assignValueToReg(Register ValVReg
, Register PhysReg
,
108 CCValAssign
&VA
) override
{
109 MIB
.addUse(PhysReg
, RegState::Implicit
);
110 Register ExtReg
= extendRegister(ValVReg
, VA
);
111 MIRBuilder
.buildCopy(PhysReg
, ExtReg
);
114 void assignValueToAddress(Register ValVReg
, Register Addr
, LLT MemTy
,
115 MachinePointerInfo
&MPO
, CCValAssign
&VA
) override
{
116 MachineFunction
&MF
= MIRBuilder
.getMF();
117 Register ExtReg
= extendRegister(ValVReg
, VA
);
119 auto *MMO
= MF
.getMachineMemOperand(MPO
, MachineMemOperand::MOStore
, MemTy
,
120 inferAlignFromPtrInfo(MF
, MPO
));
121 MIRBuilder
.buildStore(ExtReg
, Addr
, *MMO
);
125 MachineInstrBuilder
&MIB
;
126 const DataLayout
&DL
;
127 const X86Subtarget
&STI
;
130 } // end anonymous namespace
132 bool X86CallLowering::lowerReturn(MachineIRBuilder
&MIRBuilder
,
133 const Value
*Val
, ArrayRef
<Register
> VRegs
,
134 FunctionLoweringInfo
&FLI
) const {
135 assert(((Val
&& !VRegs
.empty()) || (!Val
&& VRegs
.empty())) &&
136 "Return value without a vreg");
137 auto MIB
= MIRBuilder
.buildInstrNoInsert(X86::RET
).addImm(0);
139 if (!VRegs
.empty()) {
140 MachineFunction
&MF
= MIRBuilder
.getMF();
141 const Function
&F
= MF
.getFunction();
142 MachineRegisterInfo
&MRI
= MF
.getRegInfo();
143 const DataLayout
&DL
= MF
.getDataLayout();
145 ArgInfo
OrigRetInfo(VRegs
, Val
->getType(), 0);
146 setArgFlags(OrigRetInfo
, AttributeList::ReturnIndex
, DL
, F
);
148 SmallVector
<ArgInfo
, 4> SplitRetInfos
;
149 splitToValueTypes(OrigRetInfo
, SplitRetInfos
, DL
, F
.getCallingConv());
151 X86OutgoingValueAssigner
Assigner(RetCC_X86
);
152 X86OutgoingValueHandler
Handler(MIRBuilder
, MRI
, MIB
);
153 if (!determineAndHandleAssignments(Handler
, Assigner
, SplitRetInfos
,
154 MIRBuilder
, F
.getCallingConv(),
159 MIRBuilder
.insertInstr(MIB
);
165 struct X86IncomingValueHandler
: public CallLowering::IncomingValueHandler
{
166 X86IncomingValueHandler(MachineIRBuilder
&MIRBuilder
,
167 MachineRegisterInfo
&MRI
)
168 : IncomingValueHandler(MIRBuilder
, MRI
),
169 DL(MIRBuilder
.getMF().getDataLayout()) {}
171 Register
getStackAddress(uint64_t Size
, int64_t Offset
,
172 MachinePointerInfo
&MPO
,
173 ISD::ArgFlagsTy Flags
) override
{
174 auto &MFI
= MIRBuilder
.getMF().getFrameInfo();
176 // Byval is assumed to be writable memory, but other stack passed arguments
178 const bool IsImmutable
= !Flags
.isByVal();
180 int FI
= MFI
.CreateFixedObject(Size
, Offset
, IsImmutable
);
181 MPO
= MachinePointerInfo::getFixedStack(MIRBuilder
.getMF(), FI
);
184 .buildFrameIndex(LLT::pointer(0, DL
.getPointerSizeInBits(0)), FI
)
188 void assignValueToAddress(Register ValVReg
, Register Addr
, LLT MemTy
,
189 MachinePointerInfo
&MPO
, CCValAssign
&VA
) override
{
190 MachineFunction
&MF
= MIRBuilder
.getMF();
191 auto *MMO
= MF
.getMachineMemOperand(
192 MPO
, MachineMemOperand::MOLoad
| MachineMemOperand::MOInvariant
, MemTy
,
193 inferAlignFromPtrInfo(MF
, MPO
));
194 MIRBuilder
.buildLoad(ValVReg
, Addr
, *MMO
);
197 void assignValueToReg(Register ValVReg
, Register PhysReg
,
198 CCValAssign
&VA
) override
{
199 markPhysRegUsed(PhysReg
);
200 IncomingValueHandler::assignValueToReg(ValVReg
, PhysReg
, VA
);
203 /// How the physical register gets marked varies between formal
204 /// parameters (it's a basic-block live-in), and a call instruction
205 /// (it's an implicit-def of the BL).
206 virtual void markPhysRegUsed(unsigned PhysReg
) = 0;
209 const DataLayout
&DL
;
212 struct FormalArgHandler
: public X86IncomingValueHandler
{
213 FormalArgHandler(MachineIRBuilder
&MIRBuilder
, MachineRegisterInfo
&MRI
)
214 : X86IncomingValueHandler(MIRBuilder
, MRI
) {}
216 void markPhysRegUsed(unsigned PhysReg
) override
{
217 MIRBuilder
.getMRI()->addLiveIn(PhysReg
);
218 MIRBuilder
.getMBB().addLiveIn(PhysReg
);
222 struct CallReturnHandler
: public X86IncomingValueHandler
{
223 CallReturnHandler(MachineIRBuilder
&MIRBuilder
, MachineRegisterInfo
&MRI
,
224 MachineInstrBuilder
&MIB
)
225 : X86IncomingValueHandler(MIRBuilder
, MRI
), MIB(MIB
) {}
227 void markPhysRegUsed(unsigned PhysReg
) override
{
228 MIB
.addDef(PhysReg
, RegState::Implicit
);
232 MachineInstrBuilder
&MIB
;
235 } // end anonymous namespace
237 bool X86CallLowering::lowerFormalArguments(MachineIRBuilder
&MIRBuilder
,
239 ArrayRef
<ArrayRef
<Register
>> VRegs
,
240 FunctionLoweringInfo
&FLI
) const {
244 // TODO: handle variadic function
248 MachineFunction
&MF
= MIRBuilder
.getMF();
249 MachineRegisterInfo
&MRI
= MF
.getRegInfo();
250 auto DL
= MF
.getDataLayout();
252 SmallVector
<ArgInfo
, 8> SplitArgs
;
254 for (const auto &Arg
: F
.args()) {
255 // TODO: handle not simple cases.
256 if (Arg
.hasAttribute(Attribute::ByVal
) ||
257 Arg
.hasAttribute(Attribute::InReg
) ||
258 Arg
.hasAttribute(Attribute::StructRet
) ||
259 Arg
.hasAttribute(Attribute::SwiftSelf
) ||
260 Arg
.hasAttribute(Attribute::SwiftError
) ||
261 Arg
.hasAttribute(Attribute::Nest
) || VRegs
[Idx
].size() > 1)
264 ArgInfo
OrigArg(VRegs
[Idx
], Arg
.getType(), Idx
);
265 setArgFlags(OrigArg
, Idx
+ AttributeList::FirstArgIndex
, DL
, F
);
266 splitToValueTypes(OrigArg
, SplitArgs
, DL
, F
.getCallingConv());
270 MachineBasicBlock
&MBB
= MIRBuilder
.getMBB();
272 MIRBuilder
.setInstr(*MBB
.begin());
274 X86OutgoingValueAssigner
Assigner(CC_X86
);
275 FormalArgHandler
Handler(MIRBuilder
, MRI
);
276 if (!determineAndHandleAssignments(Handler
, Assigner
, SplitArgs
, MIRBuilder
,
277 F
.getCallingConv(), F
.isVarArg()))
280 // Move back to the end of the basic block.
281 MIRBuilder
.setMBB(MBB
);
286 bool X86CallLowering::lowerCall(MachineIRBuilder
&MIRBuilder
,
287 CallLoweringInfo
&Info
) const {
288 MachineFunction
&MF
= MIRBuilder
.getMF();
289 const Function
&F
= MF
.getFunction();
290 MachineRegisterInfo
&MRI
= MF
.getRegInfo();
291 const DataLayout
&DL
= F
.getParent()->getDataLayout();
292 const X86Subtarget
&STI
= MF
.getSubtarget
<X86Subtarget
>();
293 const TargetInstrInfo
&TII
= *STI
.getInstrInfo();
294 const X86RegisterInfo
*TRI
= STI
.getRegisterInfo();
296 // Handle only Linux C, X86_64_SysV calling conventions for now.
297 if (!STI
.isTargetLinux() || !(Info
.CallConv
== CallingConv::C
||
298 Info
.CallConv
== CallingConv::X86_64_SysV
))
301 unsigned AdjStackDown
= TII
.getCallFrameSetupOpcode();
302 auto CallSeqStart
= MIRBuilder
.buildInstr(AdjStackDown
);
304 // Create a temporarily-floating call instruction so we can add the implicit
305 // uses of arg registers.
306 bool Is64Bit
= STI
.is64Bit();
307 unsigned CallOpc
= Info
.Callee
.isReg()
308 ? (Is64Bit
? X86::CALL64r
: X86::CALL32r
)
309 : (Is64Bit
? X86::CALL64pcrel32
: X86::CALLpcrel32
);
311 auto MIB
= MIRBuilder
.buildInstrNoInsert(CallOpc
)
313 .addRegMask(TRI
->getCallPreservedMask(MF
, Info
.CallConv
));
315 SmallVector
<ArgInfo
, 8> SplitArgs
;
316 for (const auto &OrigArg
: Info
.OrigArgs
) {
318 // TODO: handle not simple cases.
319 if (OrigArg
.Flags
[0].isByVal())
322 if (OrigArg
.Regs
.size() > 1)
325 splitToValueTypes(OrigArg
, SplitArgs
, DL
, Info
.CallConv
);
327 // Do the actual argument marshalling.
328 X86OutgoingValueAssigner
Assigner(CC_X86
);
329 X86OutgoingValueHandler
Handler(MIRBuilder
, MRI
, MIB
);
330 if (!determineAndHandleAssignments(Handler
, Assigner
, SplitArgs
, MIRBuilder
,
331 Info
.CallConv
, Info
.IsVarArg
))
334 bool IsFixed
= Info
.OrigArgs
.empty() ? true : Info
.OrigArgs
.back().IsFixed
;
335 if (STI
.is64Bit() && !IsFixed
&& !STI
.isCallingConvWin64(Info
.CallConv
)) {
336 // From AMD64 ABI document:
337 // For calls that may call functions that use varargs or stdargs
338 // (prototype-less calls or calls to functions containing ellipsis (...) in
339 // the declaration) %al is used as hidden argument to specify the number
340 // of SSE registers used. The contents of %al do not need to match exactly
341 // the number of registers, but must be an ubound on the number of SSE
342 // registers used and is in the range 0 - 8 inclusive.
344 MIRBuilder
.buildInstr(X86::MOV8ri
)
346 .addImm(Assigner
.getNumXmmRegs());
347 MIB
.addUse(X86::AL
, RegState::Implicit
);
350 // Now we can add the actual call instruction to the correct basic block.
351 MIRBuilder
.insertInstr(MIB
);
353 // If Callee is a reg, since it is used by a target specific
354 // instruction, it must have a register class matching the
355 // constraint of that instruction.
356 if (Info
.Callee
.isReg())
357 MIB
->getOperand(0).setReg(constrainOperandRegClass(
358 MF
, *TRI
, MRI
, *MF
.getSubtarget().getInstrInfo(),
359 *MF
.getSubtarget().getRegBankInfo(), *MIB
, MIB
->getDesc(), Info
.Callee
,
362 // Finally we can copy the returned value back into its virtual-register. In
363 // symmetry with the arguments, the physical register must be an
364 // implicit-define of the call instruction.
366 if (!Info
.OrigRet
.Ty
->isVoidTy()) {
367 if (Info
.OrigRet
.Regs
.size() > 1)
371 SmallVector
<Register
, 8> NewRegs
;
373 splitToValueTypes(Info
.OrigRet
, SplitArgs
, DL
, Info
.CallConv
);
375 X86OutgoingValueAssigner
Assigner(RetCC_X86
);
376 CallReturnHandler
Handler(MIRBuilder
, MRI
, MIB
);
377 if (!determineAndHandleAssignments(Handler
, Assigner
, SplitArgs
, MIRBuilder
,
378 Info
.CallConv
, Info
.IsVarArg
))
381 if (!NewRegs
.empty())
382 MIRBuilder
.buildMerge(Info
.OrigRet
.Regs
[0], NewRegs
);
385 CallSeqStart
.addImm(Assigner
.getStackSize())
386 .addImm(0 /* see getFrameTotalSize */)
387 .addImm(0 /* see getFrameAdjustment */);
389 unsigned AdjStackUp
= TII
.getCallFrameDestroyOpcode();
390 MIRBuilder
.buildInstr(AdjStackUp
)
391 .addImm(Assigner
.getStackSize())
392 .addImm(0 /* NumBytesForCalleeToPop */);