1 //===-- lib/CodeGen/GlobalISel/CallLowering.cpp - Call lowering -----------===//
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
7 //===----------------------------------------------------------------------===//
10 /// This file implements some simple delegations needed for call lowering.
12 //===----------------------------------------------------------------------===//
14 #include "llvm/CodeGen/Analysis.h"
15 #include "llvm/CodeGen/GlobalISel/CallLowering.h"
16 #include "llvm/CodeGen/GlobalISel/Utils.h"
17 #include "llvm/CodeGen/GlobalISel/MachineIRBuilder.h"
18 #include "llvm/CodeGen/MachineOperand.h"
19 #include "llvm/CodeGen/MachineRegisterInfo.h"
20 #include "llvm/CodeGen/TargetLowering.h"
21 #include "llvm/IR/DataLayout.h"
22 #include "llvm/IR/Instructions.h"
23 #include "llvm/IR/LLVMContext.h"
24 #include "llvm/IR/Module.h"
26 #define DEBUG_TYPE "call-lowering"
30 void CallLowering::anchor() {}
32 bool CallLowering::lowerCall(MachineIRBuilder
&MIRBuilder
, ImmutableCallSite CS
,
33 ArrayRef
<Register
> ResRegs
,
34 ArrayRef
<ArrayRef
<Register
>> ArgRegs
,
35 Register SwiftErrorVReg
,
36 std::function
<unsigned()> GetCalleeReg
) const {
37 CallLoweringInfo Info
;
38 auto &DL
= CS
.getParent()->getParent()->getParent()->getDataLayout();
40 // First step is to marshall all the function's parameters into the correct
41 // physregs and memory locations. Gather the sequence of argument types that
42 // we'll pass to the assigner function.
44 unsigned NumFixedArgs
= CS
.getFunctionType()->getNumParams();
45 for (auto &Arg
: CS
.args()) {
46 ArgInfo OrigArg
{ArgRegs
[i
], Arg
->getType(), ISD::ArgFlagsTy
{},
48 setArgFlags(OrigArg
, i
+ AttributeList::FirstArgIndex
, DL
, CS
);
49 Info
.OrigArgs
.push_back(OrigArg
);
53 if (const Function
*F
= CS
.getCalledFunction())
54 Info
.Callee
= MachineOperand::CreateGA(F
, 0);
56 Info
.Callee
= MachineOperand::CreateReg(GetCalleeReg(), false);
58 Info
.OrigRet
= ArgInfo
{ResRegs
, CS
.getType(), ISD::ArgFlagsTy
{}};
59 if (!Info
.OrigRet
.Ty
->isVoidTy())
60 setArgFlags(Info
.OrigRet
, AttributeList::ReturnIndex
, DL
, CS
);
63 CS
.getInstruction()->getMetadata(LLVMContext::MD_callees
);
64 Info
.CallConv
= CS
.getCallingConv();
65 Info
.SwiftErrorVReg
= SwiftErrorVReg
;
66 Info
.IsMustTailCall
= CS
.isMustTailCall();
67 Info
.IsTailCall
= CS
.isTailCall() &&
68 isInTailCallPosition(CS
, MIRBuilder
.getMF().getTarget());
69 Info
.IsVarArg
= CS
.getFunctionType()->isVarArg();
70 return lowerCall(MIRBuilder
, Info
);
73 template <typename FuncInfoTy
>
74 void CallLowering::setArgFlags(CallLowering::ArgInfo
&Arg
, unsigned OpIdx
,
76 const FuncInfoTy
&FuncInfo
) const {
77 auto &Flags
= Arg
.Flags
[0];
78 const AttributeList
&Attrs
= FuncInfo
.getAttributes();
79 if (Attrs
.hasAttribute(OpIdx
, Attribute::ZExt
))
81 if (Attrs
.hasAttribute(OpIdx
, Attribute::SExt
))
83 if (Attrs
.hasAttribute(OpIdx
, Attribute::InReg
))
85 if (Attrs
.hasAttribute(OpIdx
, Attribute::StructRet
))
87 if (Attrs
.hasAttribute(OpIdx
, Attribute::SwiftSelf
))
89 if (Attrs
.hasAttribute(OpIdx
, Attribute::SwiftError
))
90 Flags
.setSwiftError();
91 if (Attrs
.hasAttribute(OpIdx
, Attribute::ByVal
))
93 if (Attrs
.hasAttribute(OpIdx
, Attribute::InAlloca
))
96 if (Flags
.isByVal() || Flags
.isInAlloca()) {
97 Type
*ElementTy
= cast
<PointerType
>(Arg
.Ty
)->getElementType();
99 auto Ty
= Attrs
.getAttribute(OpIdx
, Attribute::ByVal
).getValueAsType();
100 Flags
.setByValSize(DL
.getTypeAllocSize(Ty
? Ty
: ElementTy
));
102 // For ByVal, alignment should be passed from FE. BE will guess if
103 // this info is not there but there are cases it cannot get right.
105 if (FuncInfo
.getParamAlignment(OpIdx
- 2))
106 FrameAlign
= FuncInfo
.getParamAlignment(OpIdx
- 2);
108 FrameAlign
= getTLI()->getByValTypeAlignment(ElementTy
, DL
);
109 Flags
.setByValAlign(FrameAlign
);
111 if (Attrs
.hasAttribute(OpIdx
, Attribute::Nest
))
113 Flags
.setOrigAlign(DL
.getABITypeAlignment(Arg
.Ty
));
117 CallLowering::setArgFlags
<Function
>(CallLowering::ArgInfo
&Arg
, unsigned OpIdx
,
118 const DataLayout
&DL
,
119 const Function
&FuncInfo
) const;
122 CallLowering::setArgFlags
<CallInst
>(CallLowering::ArgInfo
&Arg
, unsigned OpIdx
,
123 const DataLayout
&DL
,
124 const CallInst
&FuncInfo
) const;
126 Register
CallLowering::packRegs(ArrayRef
<Register
> SrcRegs
, Type
*PackedTy
,
127 MachineIRBuilder
&MIRBuilder
) const {
128 assert(SrcRegs
.size() > 1 && "Nothing to pack");
130 const DataLayout
&DL
= MIRBuilder
.getMF().getDataLayout();
131 MachineRegisterInfo
*MRI
= MIRBuilder
.getMRI();
133 LLT PackedLLT
= getLLTForType(*PackedTy
, DL
);
135 SmallVector
<LLT
, 8> LLTs
;
136 SmallVector
<uint64_t, 8> Offsets
;
137 computeValueLLTs(DL
, *PackedTy
, LLTs
, &Offsets
);
138 assert(LLTs
.size() == SrcRegs
.size() && "Regs / types mismatch");
140 Register Dst
= MRI
->createGenericVirtualRegister(PackedLLT
);
141 MIRBuilder
.buildUndef(Dst
);
142 for (unsigned i
= 0; i
< SrcRegs
.size(); ++i
) {
143 Register NewDst
= MRI
->createGenericVirtualRegister(PackedLLT
);
144 MIRBuilder
.buildInsert(NewDst
, Dst
, SrcRegs
[i
], Offsets
[i
]);
151 void CallLowering::unpackRegs(ArrayRef
<Register
> DstRegs
, Register SrcReg
,
153 MachineIRBuilder
&MIRBuilder
) const {
154 assert(DstRegs
.size() > 1 && "Nothing to unpack");
156 const DataLayout
&DL
= MIRBuilder
.getMF().getDataLayout();
158 SmallVector
<LLT
, 8> LLTs
;
159 SmallVector
<uint64_t, 8> Offsets
;
160 computeValueLLTs(DL
, *PackedTy
, LLTs
, &Offsets
);
161 assert(LLTs
.size() == DstRegs
.size() && "Regs / types mismatch");
163 for (unsigned i
= 0; i
< DstRegs
.size(); ++i
)
164 MIRBuilder
.buildExtract(DstRegs
[i
], SrcReg
, Offsets
[i
]);
167 bool CallLowering::handleAssignments(MachineIRBuilder
&MIRBuilder
,
168 SmallVectorImpl
<ArgInfo
> &Args
,
169 ValueHandler
&Handler
) const {
170 MachineFunction
&MF
= MIRBuilder
.getMF();
171 const Function
&F
= MF
.getFunction();
172 SmallVector
<CCValAssign
, 16> ArgLocs
;
173 CCState
CCInfo(F
.getCallingConv(), F
.isVarArg(), MF
, ArgLocs
, F
.getContext());
174 return handleAssignments(CCInfo
, ArgLocs
, MIRBuilder
, Args
, Handler
);
177 bool CallLowering::handleAssignments(CCState
&CCInfo
,
178 SmallVectorImpl
<CCValAssign
> &ArgLocs
,
179 MachineIRBuilder
&MIRBuilder
,
180 SmallVectorImpl
<ArgInfo
> &Args
,
181 ValueHandler
&Handler
) const {
182 MachineFunction
&MF
= MIRBuilder
.getMF();
183 const Function
&F
= MF
.getFunction();
184 const DataLayout
&DL
= F
.getParent()->getDataLayout();
186 unsigned NumArgs
= Args
.size();
187 for (unsigned i
= 0; i
!= NumArgs
; ++i
) {
188 MVT CurVT
= MVT::getVT(Args
[i
].Ty
);
189 if (Handler
.assignArg(i
, CurVT
, CurVT
, CCValAssign::Full
, Args
[i
],
190 Args
[i
].Flags
[0], CCInfo
)) {
191 if (!CurVT
.isValid())
193 MVT NewVT
= TLI
->getRegisterTypeForCallingConv(
194 F
.getContext(), F
.getCallingConv(), EVT(CurVT
));
196 // If we need to split the type over multiple regs, check it's a scenario
197 // we currently support.
198 unsigned NumParts
= TLI
->getNumRegistersForCallingConv(
199 F
.getContext(), F
.getCallingConv(), CurVT
);
201 if (CurVT
.isVector())
203 // For now only handle exact splits.
204 if (NewVT
.getSizeInBits() * NumParts
!= CurVT
.getSizeInBits())
208 // For incoming arguments (return values), we could have values in
209 // physregs (or memlocs) which we want to extract and copy to vregs.
210 // During this, we might have to deal with the LLT being split across
211 // multiple regs, so we have to record this information for later.
213 // If we have outgoing args, then we have the opposite case. We have a
214 // vreg with an LLT which we want to assign to a physical location, and
215 // we might have to record that the value has to be split later.
216 if (Handler
.isIncomingArgumentHandler()) {
218 // Try to use the register type if we couldn't assign the VT.
219 if (Handler
.assignArg(i
, NewVT
, NewVT
, CCValAssign::Full
, Args
[i
],
220 Args
[i
].Flags
[0], CCInfo
))
223 // We're handling an incoming arg which is split over multiple regs.
224 // E.g. returning an s128 on AArch64.
225 ISD::ArgFlagsTy OrigFlags
= Args
[i
].Flags
[0];
226 Args
[i
].OrigRegs
.push_back(Args
[i
].Regs
[0]);
227 Args
[i
].Regs
.clear();
228 Args
[i
].Flags
.clear();
229 LLT NewLLT
= getLLTForMVT(NewVT
);
230 // For each split register, create and assign a vreg that will store
231 // the incoming component of the larger value. These will later be
232 // merged to form the final vreg.
233 for (unsigned Part
= 0; Part
< NumParts
; ++Part
) {
235 MIRBuilder
.getMRI()->createGenericVirtualRegister(NewLLT
);
236 ISD::ArgFlagsTy Flags
= OrigFlags
;
240 Flags
.setOrigAlign(1);
241 if (Part
== NumParts
- 1)
244 Args
[i
].Regs
.push_back(Reg
);
245 Args
[i
].Flags
.push_back(Flags
);
246 if (Handler
.assignArg(i
+ Part
, NewVT
, NewVT
, CCValAssign::Full
,
247 Args
[i
], Args
[i
].Flags
[Part
], CCInfo
)) {
248 // Still couldn't assign this smaller part type for some reason.
254 // Handling an outgoing arg that might need to be split.
256 return false; // Don't know how to deal with this type combination.
258 // This type is passed via multiple registers in the calling convention.
259 // We need to extract the individual parts.
260 Register LargeReg
= Args
[i
].Regs
[0];
261 LLT SmallTy
= LLT::scalar(NewVT
.getSizeInBits());
262 auto Unmerge
= MIRBuilder
.buildUnmerge(SmallTy
, LargeReg
);
263 assert(Unmerge
->getNumOperands() == NumParts
+ 1);
264 ISD::ArgFlagsTy OrigFlags
= Args
[i
].Flags
[0];
265 // We're going to replace the regs and flags with the split ones.
266 Args
[i
].Regs
.clear();
267 Args
[i
].Flags
.clear();
268 for (unsigned PartIdx
= 0; PartIdx
< NumParts
; ++PartIdx
) {
269 ISD::ArgFlagsTy Flags
= OrigFlags
;
273 Flags
.setOrigAlign(1);
274 if (PartIdx
== NumParts
- 1)
277 Args
[i
].Regs
.push_back(Unmerge
.getReg(PartIdx
));
278 Args
[i
].Flags
.push_back(Flags
);
279 if (Handler
.assignArg(i
+ PartIdx
, NewVT
, NewVT
, CCValAssign::Full
,
280 Args
[i
], Args
[i
].Flags
[PartIdx
], CCInfo
))
287 for (unsigned i
= 0, e
= Args
.size(), j
= 0; i
!= e
; ++i
, ++j
) {
288 assert(j
< ArgLocs
.size() && "Skipped too many arg locs");
290 CCValAssign
&VA
= ArgLocs
[j
];
291 assert(VA
.getValNo() == i
&& "Location doesn't correspond to current arg");
293 if (VA
.needsCustom()) {
294 j
+= Handler
.assignCustomValue(Args
[i
], makeArrayRef(ArgLocs
).slice(j
));
298 // FIXME: Pack registers if we have more than one.
299 Register ArgReg
= Args
[i
].Regs
[0];
301 MVT OrigVT
= MVT::getVT(Args
[i
].Ty
);
302 MVT VAVT
= VA
.getValVT();
304 if (Handler
.isIncomingArgumentHandler() && VAVT
!= OrigVT
) {
305 if (VAVT
.getSizeInBits() < OrigVT
.getSizeInBits()) {
306 // Expected to be multiple regs for a single incoming arg.
307 unsigned NumArgRegs
= Args
[i
].Regs
.size();
311 assert((j
+ (NumArgRegs
- 1)) < ArgLocs
.size() &&
312 "Too many regs for number of args");
313 for (unsigned Part
= 0; Part
< NumArgRegs
; ++Part
) {
314 // There should be Regs.size() ArgLocs per argument.
315 VA
= ArgLocs
[j
+ Part
];
316 Handler
.assignValueToReg(Args
[i
].Regs
[Part
], VA
.getLocReg(), VA
);
319 // Merge the split registers into the expected larger result vreg
320 // of the original call.
321 MIRBuilder
.buildMerge(Args
[i
].OrigRegs
[0], Args
[i
].Regs
);
324 const LLT
VATy(VAVT
);
326 MIRBuilder
.getMRI()->createGenericVirtualRegister(VATy
);
327 Handler
.assignValueToReg(NewReg
, VA
.getLocReg(), VA
);
328 // If it's a vector type, we either need to truncate the elements
329 // or do an unmerge to get the lower block of elements.
330 if (VATy
.isVector() &&
331 VATy
.getNumElements() > OrigVT
.getVectorNumElements()) {
332 const LLT
OrigTy(OrigVT
);
333 // Just handle the case where the VA type is 2 * original type.
334 if (VATy
.getNumElements() != OrigVT
.getVectorNumElements() * 2) {
336 << "Incoming promoted vector arg has too many elts");
339 auto Unmerge
= MIRBuilder
.buildUnmerge({OrigTy
, OrigTy
}, {NewReg
});
340 MIRBuilder
.buildCopy(ArgReg
, Unmerge
.getReg(0));
342 MIRBuilder
.buildTrunc(ArgReg
, {NewReg
}).getReg(0);
344 } else if (!Handler
.isIncomingArgumentHandler()) {
345 assert((j
+ (Args
[i
].Regs
.size() - 1)) < ArgLocs
.size() &&
346 "Too many regs for number of args");
347 // This is an outgoing argument that might have been split.
348 for (unsigned Part
= 0; Part
< Args
[i
].Regs
.size(); ++Part
) {
349 // There should be Regs.size() ArgLocs per argument.
350 VA
= ArgLocs
[j
+ Part
];
351 Handler
.assignValueToReg(Args
[i
].Regs
[Part
], VA
.getLocReg(), VA
);
353 j
+= Args
[i
].Regs
.size() - 1;
355 Handler
.assignValueToReg(ArgReg
, VA
.getLocReg(), VA
);
357 } else if (VA
.isMemLoc()) {
358 // Don't currently support loading/storing a type that needs to be split
359 // to the stack. Should be easy, just not implemented yet.
360 if (Args
[i
].Regs
.size() > 1) {
363 << "Load/store a split arg to/from the stack not implemented yet");
366 MVT VT
= MVT::getVT(Args
[i
].Ty
);
367 unsigned Size
= VT
== MVT::iPTR
? DL
.getPointerSize()
368 : alignTo(VT
.getSizeInBits(), 8) / 8;
369 unsigned Offset
= VA
.getLocMemOffset();
370 MachinePointerInfo MPO
;
371 Register StackAddr
= Handler
.getStackAddress(Size
, Offset
, MPO
);
372 Handler
.assignValueToAddress(ArgReg
, StackAddr
, Size
, MPO
, VA
);
374 // FIXME: Support byvals and other weirdness
381 bool CallLowering::analyzeArgInfo(CCState
&CCState
,
382 SmallVectorImpl
<ArgInfo
> &Args
,
383 CCAssignFn
&Fn
) const {
384 for (unsigned i
= 0, e
= Args
.size(); i
< e
; ++i
) {
385 MVT VT
= MVT::getVT(Args
[i
].Ty
);
386 if (Fn(i
, VT
, VT
, CCValAssign::Full
, Args
[i
].Flags
[0], CCState
)) {
387 // Bail out on anything we can't handle.
388 LLVM_DEBUG(dbgs() << "Cannot analyze " << EVT(VT
).getEVTString()
389 << " (arg number = " << i
<< "\n");
396 bool CallLowering::resultsCompatible(CallLoweringInfo
&Info
,
398 SmallVectorImpl
<ArgInfo
> &InArgs
,
399 CCAssignFn
&CalleeAssignFn
,
400 CCAssignFn
&CallerAssignFn
) const {
401 const Function
&F
= MF
.getFunction();
402 CallingConv::ID CalleeCC
= Info
.CallConv
;
403 CallingConv::ID CallerCC
= F
.getCallingConv();
405 if (CallerCC
== CalleeCC
)
408 SmallVector
<CCValAssign
, 16> ArgLocs1
;
409 CCState
CCInfo1(CalleeCC
, false, MF
, ArgLocs1
, F
.getContext());
410 if (!analyzeArgInfo(CCInfo1
, InArgs
, CalleeAssignFn
))
413 SmallVector
<CCValAssign
, 16> ArgLocs2
;
414 CCState
CCInfo2(CallerCC
, false, MF
, ArgLocs2
, F
.getContext());
415 if (!analyzeArgInfo(CCInfo2
, InArgs
, CallerAssignFn
))
418 // We need the argument locations to match up exactly. If there's more in
419 // one than the other, then we are done.
420 if (ArgLocs1
.size() != ArgLocs2
.size())
423 // Make sure that each location is passed in exactly the same way.
424 for (unsigned i
= 0, e
= ArgLocs1
.size(); i
< e
; ++i
) {
425 const CCValAssign
&Loc1
= ArgLocs1
[i
];
426 const CCValAssign
&Loc2
= ArgLocs2
[i
];
428 // We need both of them to be the same. So if one is a register and one
429 // isn't, we're done.
430 if (Loc1
.isRegLoc() != Loc2
.isRegLoc())
433 if (Loc1
.isRegLoc()) {
434 // If they don't have the same register location, we're done.
435 if (Loc1
.getLocReg() != Loc2
.getLocReg())
438 // They matched, so we can move to the next ArgLoc.
442 // Loc1 wasn't a RegLoc, so they both must be MemLocs. Check if they match.
443 if (Loc1
.getLocMemOffset() != Loc2
.getLocMemOffset())
450 Register
CallLowering::ValueHandler::extendRegister(Register ValReg
,
452 LLT LocTy
{VA
.getLocVT()};
453 if (LocTy
.getSizeInBits() == MRI
.getType(ValReg
).getSizeInBits())
455 switch (VA
.getLocInfo()) {
457 case CCValAssign::Full
:
458 case CCValAssign::BCvt
:
459 // FIXME: bitconverting between vector types may or may not be a
460 // nop in big-endian situations.
462 case CCValAssign::AExt
: {
463 auto MIB
= MIRBuilder
.buildAnyExt(LocTy
, ValReg
);
464 return MIB
->getOperand(0).getReg();
466 case CCValAssign::SExt
: {
467 Register NewReg
= MRI
.createGenericVirtualRegister(LocTy
);
468 MIRBuilder
.buildSExt(NewReg
, ValReg
);
471 case CCValAssign::ZExt
: {
472 Register NewReg
= MRI
.createGenericVirtualRegister(LocTy
);
473 MIRBuilder
.buildZExt(NewReg
, ValReg
);
477 llvm_unreachable("unable to extend register");
480 void CallLowering::ValueHandler::anchor() {}