1 //===-- lib/CodeGen/GlobalISel/CallLowering.cpp - Call lowering -----------===//
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
7 //===----------------------------------------------------------------------===//
10 /// This file implements some simple delegations needed for call lowering.
12 //===----------------------------------------------------------------------===//
14 #include "llvm/CodeGen/GlobalISel/CallLowering.h"
15 #include "llvm/CodeGen/GlobalISel/MachineIRBuilder.h"
16 #include "llvm/CodeGen/MachineOperand.h"
17 #include "llvm/CodeGen/MachineRegisterInfo.h"
18 #include "llvm/CodeGen/TargetLowering.h"
19 #include "llvm/IR/DataLayout.h"
20 #include "llvm/IR/Instructions.h"
21 #include "llvm/IR/Module.h"
23 #define DEBUG_TYPE "call-lowering"
27 void CallLowering::anchor() {}
29 bool CallLowering::lowerCall(
30 MachineIRBuilder
&MIRBuilder
, ImmutableCallSite CS
, unsigned ResReg
,
31 ArrayRef
<unsigned> ArgRegs
, std::function
<unsigned()> GetCalleeReg
) const {
32 auto &DL
= CS
.getParent()->getParent()->getParent()->getDataLayout();
34 // First step is to marshall all the function's parameters into the correct
35 // physregs and memory locations. Gather the sequence of argument types that
36 // we'll pass to the assigner function.
37 SmallVector
<ArgInfo
, 8> OrigArgs
;
39 unsigned NumFixedArgs
= CS
.getFunctionType()->getNumParams();
40 for (auto &Arg
: CS
.args()) {
41 ArgInfo OrigArg
{ArgRegs
[i
], Arg
->getType(), ISD::ArgFlagsTy
{},
43 setArgFlags(OrigArg
, i
+ AttributeList::FirstArgIndex
, DL
, CS
);
44 // We don't currently support swifterror or swiftself args.
45 if (OrigArg
.Flags
.isSwiftError() || OrigArg
.Flags
.isSwiftSelf())
47 OrigArgs
.push_back(OrigArg
);
51 MachineOperand Callee
= MachineOperand::CreateImm(0);
52 if (const Function
*F
= CS
.getCalledFunction())
53 Callee
= MachineOperand::CreateGA(F
, 0);
55 Callee
= MachineOperand::CreateReg(GetCalleeReg(), false);
57 ArgInfo OrigRet
{ResReg
, CS
.getType(), ISD::ArgFlagsTy
{}};
58 if (!OrigRet
.Ty
->isVoidTy())
59 setArgFlags(OrigRet
, AttributeList::ReturnIndex
, DL
, CS
);
61 return lowerCall(MIRBuilder
, CS
.getCallingConv(), Callee
, OrigRet
, OrigArgs
);
64 template <typename FuncInfoTy
>
65 void CallLowering::setArgFlags(CallLowering::ArgInfo
&Arg
, unsigned OpIdx
,
67 const FuncInfoTy
&FuncInfo
) const {
68 const AttributeList
&Attrs
= FuncInfo
.getAttributes();
69 if (Attrs
.hasAttribute(OpIdx
, Attribute::ZExt
))
71 if (Attrs
.hasAttribute(OpIdx
, Attribute::SExt
))
73 if (Attrs
.hasAttribute(OpIdx
, Attribute::InReg
))
75 if (Attrs
.hasAttribute(OpIdx
, Attribute::StructRet
))
77 if (Attrs
.hasAttribute(OpIdx
, Attribute::SwiftSelf
))
78 Arg
.Flags
.setSwiftSelf();
79 if (Attrs
.hasAttribute(OpIdx
, Attribute::SwiftError
))
80 Arg
.Flags
.setSwiftError();
81 if (Attrs
.hasAttribute(OpIdx
, Attribute::ByVal
))
83 if (Attrs
.hasAttribute(OpIdx
, Attribute::InAlloca
))
84 Arg
.Flags
.setInAlloca();
86 if (Arg
.Flags
.isByVal() || Arg
.Flags
.isInAlloca()) {
87 Type
*ElementTy
= cast
<PointerType
>(Arg
.Ty
)->getElementType();
88 Arg
.Flags
.setByValSize(DL
.getTypeAllocSize(ElementTy
));
89 // For ByVal, alignment should be passed from FE. BE will guess if
90 // this info is not there but there are cases it cannot get right.
92 if (FuncInfo
.getParamAlignment(OpIdx
- 2))
93 FrameAlign
= FuncInfo
.getParamAlignment(OpIdx
- 2);
95 FrameAlign
= getTLI()->getByValTypeAlignment(ElementTy
, DL
);
96 Arg
.Flags
.setByValAlign(FrameAlign
);
98 if (Attrs
.hasAttribute(OpIdx
, Attribute::Nest
))
100 Arg
.Flags
.setOrigAlign(DL
.getABITypeAlignment(Arg
.Ty
));
104 CallLowering::setArgFlags
<Function
>(CallLowering::ArgInfo
&Arg
, unsigned OpIdx
,
105 const DataLayout
&DL
,
106 const Function
&FuncInfo
) const;
109 CallLowering::setArgFlags
<CallInst
>(CallLowering::ArgInfo
&Arg
, unsigned OpIdx
,
110 const DataLayout
&DL
,
111 const CallInst
&FuncInfo
) const;
113 bool CallLowering::handleAssignments(MachineIRBuilder
&MIRBuilder
,
114 ArrayRef
<ArgInfo
> Args
,
115 ValueHandler
&Handler
) const {
116 MachineFunction
&MF
= MIRBuilder
.getMF();
117 const Function
&F
= MF
.getFunction();
118 const DataLayout
&DL
= F
.getParent()->getDataLayout();
120 SmallVector
<CCValAssign
, 16> ArgLocs
;
121 CCState
CCInfo(F
.getCallingConv(), F
.isVarArg(), MF
, ArgLocs
, F
.getContext());
123 unsigned NumArgs
= Args
.size();
124 for (unsigned i
= 0; i
!= NumArgs
; ++i
) {
125 MVT CurVT
= MVT::getVT(Args
[i
].Ty
);
126 if (Handler
.assignArg(i
, CurVT
, CurVT
, CCValAssign::Full
, Args
[i
], CCInfo
)) {
127 // Try to use the register type if we couldn't assign the VT.
128 if (!Handler
.isArgumentHandler() || !CurVT
.isValid())
130 CurVT
= TLI
->getRegisterTypeForCallingConv(
131 F
.getContext(), F
.getCallingConv(), EVT(CurVT
));
132 if (Handler
.assignArg(i
, CurVT
, CurVT
, CCValAssign::Full
, Args
[i
], CCInfo
))
137 for (unsigned i
= 0, e
= Args
.size(), j
= 0; i
!= e
; ++i
, ++j
) {
138 assert(j
< ArgLocs
.size() && "Skipped too many arg locs");
140 CCValAssign
&VA
= ArgLocs
[j
];
141 assert(VA
.getValNo() == i
&& "Location doesn't correspond to current arg");
143 if (VA
.needsCustom()) {
144 j
+= Handler
.assignCustomValue(Args
[i
], makeArrayRef(ArgLocs
).slice(j
));
149 MVT OrigVT
= MVT::getVT(Args
[i
].Ty
);
150 MVT VAVT
= VA
.getValVT();
151 if (Handler
.isArgumentHandler() && VAVT
!= OrigVT
) {
152 if (VAVT
.getSizeInBits() < OrigVT
.getSizeInBits())
153 return false; // Can't handle this type of arg yet.
154 const LLT
VATy(VAVT
);
156 MIRBuilder
.getMRI()->createGenericVirtualRegister(VATy
);
157 Handler
.assignValueToReg(NewReg
, VA
.getLocReg(), VA
);
158 // If it's a vector type, we either need to truncate the elements
159 // or do an unmerge to get the lower block of elements.
160 if (VATy
.isVector() &&
161 VATy
.getNumElements() > OrigVT
.getVectorNumElements()) {
162 const LLT
OrigTy(OrigVT
);
163 // Just handle the case where the VA type is 2 * original type.
164 if (VATy
.getNumElements() != OrigVT
.getVectorNumElements() * 2) {
166 << "Incoming promoted vector arg has too many elts");
169 auto Unmerge
= MIRBuilder
.buildUnmerge({OrigTy
, OrigTy
}, {NewReg
});
170 MIRBuilder
.buildCopy(Args
[i
].Reg
, Unmerge
.getReg(0));
172 MIRBuilder
.buildTrunc(Args
[i
].Reg
, {NewReg
}).getReg(0);
175 Handler
.assignValueToReg(Args
[i
].Reg
, VA
.getLocReg(), VA
);
177 } else if (VA
.isMemLoc()) {
178 MVT VT
= MVT::getVT(Args
[i
].Ty
);
179 unsigned Size
= VT
== MVT::iPTR
? DL
.getPointerSize()
180 : alignTo(VT
.getSizeInBits(), 8) / 8;
181 unsigned Offset
= VA
.getLocMemOffset();
182 MachinePointerInfo MPO
;
183 unsigned StackAddr
= Handler
.getStackAddress(Size
, Offset
, MPO
);
184 Handler
.assignValueToAddress(Args
[i
].Reg
, StackAddr
, Size
, MPO
, VA
);
186 // FIXME: Support byvals and other weirdness
193 unsigned CallLowering::ValueHandler::extendRegister(unsigned ValReg
,
195 LLT LocTy
{VA
.getLocVT()};
196 if (LocTy
.getSizeInBits() == MRI
.getType(ValReg
).getSizeInBits())
198 switch (VA
.getLocInfo()) {
200 case CCValAssign::Full
:
201 case CCValAssign::BCvt
:
202 // FIXME: bitconverting between vector types may or may not be a
203 // nop in big-endian situations.
205 case CCValAssign::AExt
: {
206 auto MIB
= MIRBuilder
.buildAnyExt(LocTy
, ValReg
);
207 return MIB
->getOperand(0).getReg();
209 case CCValAssign::SExt
: {
210 unsigned NewReg
= MRI
.createGenericVirtualRegister(LocTy
);
211 MIRBuilder
.buildSExt(NewReg
, ValReg
);
214 case CCValAssign::ZExt
: {
215 unsigned NewReg
= MRI
.createGenericVirtualRegister(LocTy
);
216 MIRBuilder
.buildZExt(NewReg
, ValReg
);
220 llvm_unreachable("unable to extend register");
223 void CallLowering::ValueHandler::anchor() {}