[Alignment][NFC] Use Align with TargetLowering::setMinFunctionAlignment
[llvm-core.git] / lib / CodeGen / GlobalISel / CallLowering.cpp
blob1c8e45418176e12f616654949703cb36b272e69a
1 //===-- lib/CodeGen/GlobalISel/CallLowering.cpp - Call lowering -----------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 ///
9 /// \file
10 /// This file implements some simple delegations needed for call lowering.
11 ///
12 //===----------------------------------------------------------------------===//
14 #include "llvm/CodeGen/Analysis.h"
15 #include "llvm/CodeGen/GlobalISel/CallLowering.h"
16 #include "llvm/CodeGen/GlobalISel/Utils.h"
17 #include "llvm/CodeGen/GlobalISel/MachineIRBuilder.h"
18 #include "llvm/CodeGen/MachineOperand.h"
19 #include "llvm/CodeGen/MachineRegisterInfo.h"
20 #include "llvm/CodeGen/TargetLowering.h"
21 #include "llvm/IR/DataLayout.h"
22 #include "llvm/IR/Instructions.h"
23 #include "llvm/IR/LLVMContext.h"
24 #include "llvm/IR/Module.h"
26 #define DEBUG_TYPE "call-lowering"
28 using namespace llvm;
30 void CallLowering::anchor() {}
32 bool CallLowering::lowerCall(MachineIRBuilder &MIRBuilder, ImmutableCallSite CS,
33 ArrayRef<Register> ResRegs,
34 ArrayRef<ArrayRef<Register>> ArgRegs,
35 Register SwiftErrorVReg,
36 std::function<unsigned()> GetCalleeReg) const {
37 CallLoweringInfo Info;
38 auto &DL = CS.getParent()->getParent()->getParent()->getDataLayout();
40 // First step is to marshall all the function's parameters into the correct
41 // physregs and memory locations. Gather the sequence of argument types that
42 // we'll pass to the assigner function.
43 unsigned i = 0;
44 unsigned NumFixedArgs = CS.getFunctionType()->getNumParams();
45 for (auto &Arg : CS.args()) {
46 ArgInfo OrigArg{ArgRegs[i], Arg->getType(), ISD::ArgFlagsTy{},
47 i < NumFixedArgs};
48 setArgFlags(OrigArg, i + AttributeList::FirstArgIndex, DL, CS);
49 Info.OrigArgs.push_back(OrigArg);
50 ++i;
53 if (const Function *F = CS.getCalledFunction())
54 Info.Callee = MachineOperand::CreateGA(F, 0);
55 else
56 Info.Callee = MachineOperand::CreateReg(GetCalleeReg(), false);
58 Info.OrigRet = ArgInfo{ResRegs, CS.getType(), ISD::ArgFlagsTy{}};
59 if (!Info.OrigRet.Ty->isVoidTy())
60 setArgFlags(Info.OrigRet, AttributeList::ReturnIndex, DL, CS);
62 Info.KnownCallees =
63 CS.getInstruction()->getMetadata(LLVMContext::MD_callees);
64 Info.CallConv = CS.getCallingConv();
65 Info.SwiftErrorVReg = SwiftErrorVReg;
66 Info.IsMustTailCall = CS.isMustTailCall();
67 Info.IsTailCall = CS.isTailCall() &&
68 isInTailCallPosition(CS, MIRBuilder.getMF().getTarget());
69 Info.IsVarArg = CS.getFunctionType()->isVarArg();
70 return lowerCall(MIRBuilder, Info);
73 template <typename FuncInfoTy>
74 void CallLowering::setArgFlags(CallLowering::ArgInfo &Arg, unsigned OpIdx,
75 const DataLayout &DL,
76 const FuncInfoTy &FuncInfo) const {
77 auto &Flags = Arg.Flags[0];
78 const AttributeList &Attrs = FuncInfo.getAttributes();
79 if (Attrs.hasAttribute(OpIdx, Attribute::ZExt))
80 Flags.setZExt();
81 if (Attrs.hasAttribute(OpIdx, Attribute::SExt))
82 Flags.setSExt();
83 if (Attrs.hasAttribute(OpIdx, Attribute::InReg))
84 Flags.setInReg();
85 if (Attrs.hasAttribute(OpIdx, Attribute::StructRet))
86 Flags.setSRet();
87 if (Attrs.hasAttribute(OpIdx, Attribute::SwiftSelf))
88 Flags.setSwiftSelf();
89 if (Attrs.hasAttribute(OpIdx, Attribute::SwiftError))
90 Flags.setSwiftError();
91 if (Attrs.hasAttribute(OpIdx, Attribute::ByVal))
92 Flags.setByVal();
93 if (Attrs.hasAttribute(OpIdx, Attribute::InAlloca))
94 Flags.setInAlloca();
96 if (Flags.isByVal() || Flags.isInAlloca()) {
97 Type *ElementTy = cast<PointerType>(Arg.Ty)->getElementType();
99 auto Ty = Attrs.getAttribute(OpIdx, Attribute::ByVal).getValueAsType();
100 Flags.setByValSize(DL.getTypeAllocSize(Ty ? Ty : ElementTy));
102 // For ByVal, alignment should be passed from FE. BE will guess if
103 // this info is not there but there are cases it cannot get right.
104 unsigned FrameAlign;
105 if (FuncInfo.getParamAlignment(OpIdx - 2))
106 FrameAlign = FuncInfo.getParamAlignment(OpIdx - 2);
107 else
108 FrameAlign = getTLI()->getByValTypeAlignment(ElementTy, DL);
109 Flags.setByValAlign(FrameAlign);
111 if (Attrs.hasAttribute(OpIdx, Attribute::Nest))
112 Flags.setNest();
113 Flags.setOrigAlign(DL.getABITypeAlignment(Arg.Ty));
116 template void
117 CallLowering::setArgFlags<Function>(CallLowering::ArgInfo &Arg, unsigned OpIdx,
118 const DataLayout &DL,
119 const Function &FuncInfo) const;
121 template void
122 CallLowering::setArgFlags<CallInst>(CallLowering::ArgInfo &Arg, unsigned OpIdx,
123 const DataLayout &DL,
124 const CallInst &FuncInfo) const;
126 Register CallLowering::packRegs(ArrayRef<Register> SrcRegs, Type *PackedTy,
127 MachineIRBuilder &MIRBuilder) const {
128 assert(SrcRegs.size() > 1 && "Nothing to pack");
130 const DataLayout &DL = MIRBuilder.getMF().getDataLayout();
131 MachineRegisterInfo *MRI = MIRBuilder.getMRI();
133 LLT PackedLLT = getLLTForType(*PackedTy, DL);
135 SmallVector<LLT, 8> LLTs;
136 SmallVector<uint64_t, 8> Offsets;
137 computeValueLLTs(DL, *PackedTy, LLTs, &Offsets);
138 assert(LLTs.size() == SrcRegs.size() && "Regs / types mismatch");
140 Register Dst = MRI->createGenericVirtualRegister(PackedLLT);
141 MIRBuilder.buildUndef(Dst);
142 for (unsigned i = 0; i < SrcRegs.size(); ++i) {
143 Register NewDst = MRI->createGenericVirtualRegister(PackedLLT);
144 MIRBuilder.buildInsert(NewDst, Dst, SrcRegs[i], Offsets[i]);
145 Dst = NewDst;
148 return Dst;
151 void CallLowering::unpackRegs(ArrayRef<Register> DstRegs, Register SrcReg,
152 Type *PackedTy,
153 MachineIRBuilder &MIRBuilder) const {
154 assert(DstRegs.size() > 1 && "Nothing to unpack");
156 const DataLayout &DL = MIRBuilder.getMF().getDataLayout();
158 SmallVector<LLT, 8> LLTs;
159 SmallVector<uint64_t, 8> Offsets;
160 computeValueLLTs(DL, *PackedTy, LLTs, &Offsets);
161 assert(LLTs.size() == DstRegs.size() && "Regs / types mismatch");
163 for (unsigned i = 0; i < DstRegs.size(); ++i)
164 MIRBuilder.buildExtract(DstRegs[i], SrcReg, Offsets[i]);
167 bool CallLowering::handleAssignments(MachineIRBuilder &MIRBuilder,
168 SmallVectorImpl<ArgInfo> &Args,
169 ValueHandler &Handler) const {
170 MachineFunction &MF = MIRBuilder.getMF();
171 const Function &F = MF.getFunction();
172 SmallVector<CCValAssign, 16> ArgLocs;
173 CCState CCInfo(F.getCallingConv(), F.isVarArg(), MF, ArgLocs, F.getContext());
174 return handleAssignments(CCInfo, ArgLocs, MIRBuilder, Args, Handler);
177 bool CallLowering::handleAssignments(CCState &CCInfo,
178 SmallVectorImpl<CCValAssign> &ArgLocs,
179 MachineIRBuilder &MIRBuilder,
180 SmallVectorImpl<ArgInfo> &Args,
181 ValueHandler &Handler) const {
182 MachineFunction &MF = MIRBuilder.getMF();
183 const Function &F = MF.getFunction();
184 const DataLayout &DL = F.getParent()->getDataLayout();
186 unsigned NumArgs = Args.size();
187 for (unsigned i = 0; i != NumArgs; ++i) {
188 MVT CurVT = MVT::getVT(Args[i].Ty);
189 if (Handler.assignArg(i, CurVT, CurVT, CCValAssign::Full, Args[i],
190 Args[i].Flags[0], CCInfo)) {
191 if (!CurVT.isValid())
192 return false;
193 MVT NewVT = TLI->getRegisterTypeForCallingConv(
194 F.getContext(), F.getCallingConv(), EVT(CurVT));
196 // If we need to split the type over multiple regs, check it's a scenario
197 // we currently support.
198 unsigned NumParts = TLI->getNumRegistersForCallingConv(
199 F.getContext(), F.getCallingConv(), CurVT);
200 if (NumParts > 1) {
201 if (CurVT.isVector())
202 return false;
203 // For now only handle exact splits.
204 if (NewVT.getSizeInBits() * NumParts != CurVT.getSizeInBits())
205 return false;
208 // For incoming arguments (return values), we could have values in
209 // physregs (or memlocs) which we want to extract and copy to vregs.
210 // During this, we might have to deal with the LLT being split across
211 // multiple regs, so we have to record this information for later.
213 // If we have outgoing args, then we have the opposite case. We have a
214 // vreg with an LLT which we want to assign to a physical location, and
215 // we might have to record that the value has to be split later.
216 if (Handler.isIncomingArgumentHandler()) {
217 if (NumParts == 1) {
218 // Try to use the register type if we couldn't assign the VT.
219 if (Handler.assignArg(i, NewVT, NewVT, CCValAssign::Full, Args[i],
220 Args[i].Flags[0], CCInfo))
221 return false;
222 } else {
223 // We're handling an incoming arg which is split over multiple regs.
224 // E.g. returning an s128 on AArch64.
225 ISD::ArgFlagsTy OrigFlags = Args[i].Flags[0];
226 Args[i].OrigRegs.push_back(Args[i].Regs[0]);
227 Args[i].Regs.clear();
228 Args[i].Flags.clear();
229 LLT NewLLT = getLLTForMVT(NewVT);
230 // For each split register, create and assign a vreg that will store
231 // the incoming component of the larger value. These will later be
232 // merged to form the final vreg.
233 for (unsigned Part = 0; Part < NumParts; ++Part) {
234 Register Reg =
235 MIRBuilder.getMRI()->createGenericVirtualRegister(NewLLT);
236 ISD::ArgFlagsTy Flags = OrigFlags;
237 if (Part == 0) {
238 Flags.setSplit();
239 } else {
240 Flags.setOrigAlign(1);
241 if (Part == NumParts - 1)
242 Flags.setSplitEnd();
244 Args[i].Regs.push_back(Reg);
245 Args[i].Flags.push_back(Flags);
246 if (Handler.assignArg(i, NewVT, NewVT, CCValAssign::Full, Args[i],
247 Args[i].Flags[Part], CCInfo)) {
248 // Still couldn't assign this smaller part type for some reason.
249 return false;
253 } else {
254 // Handling an outgoing arg that might need to be split.
255 if (NumParts < 2)
256 return false; // Don't know how to deal with this type combination.
258 // This type is passed via multiple registers in the calling convention.
259 // We need to extract the individual parts.
260 Register LargeReg = Args[i].Regs[0];
261 LLT SmallTy = LLT::scalar(NewVT.getSizeInBits());
262 auto Unmerge = MIRBuilder.buildUnmerge(SmallTy, LargeReg);
263 assert(Unmerge->getNumOperands() == NumParts + 1);
264 ISD::ArgFlagsTy OrigFlags = Args[i].Flags[0];
265 // We're going to replace the regs and flags with the split ones.
266 Args[i].Regs.clear();
267 Args[i].Flags.clear();
268 for (unsigned PartIdx = 0; PartIdx < NumParts; ++PartIdx) {
269 ISD::ArgFlagsTy Flags = OrigFlags;
270 if (PartIdx == 0) {
271 Flags.setSplit();
272 } else {
273 Flags.setOrigAlign(1);
274 if (PartIdx == NumParts - 1)
275 Flags.setSplitEnd();
277 Args[i].Regs.push_back(Unmerge.getReg(PartIdx));
278 Args[i].Flags.push_back(Flags);
279 if (Handler.assignArg(i, NewVT, NewVT, CCValAssign::Full, Args[i],
280 Args[i].Flags[PartIdx], CCInfo))
281 return false;
287 for (unsigned i = 0, e = Args.size(), j = 0; i != e; ++i, ++j) {
288 assert(j < ArgLocs.size() && "Skipped too many arg locs");
290 CCValAssign &VA = ArgLocs[j];
291 assert(VA.getValNo() == i && "Location doesn't correspond to current arg");
293 if (VA.needsCustom()) {
294 j += Handler.assignCustomValue(Args[i], makeArrayRef(ArgLocs).slice(j));
295 continue;
298 // FIXME: Pack registers if we have more than one.
299 Register ArgReg = Args[i].Regs[0];
301 if (VA.isRegLoc()) {
302 MVT OrigVT = MVT::getVT(Args[i].Ty);
303 MVT VAVT = VA.getValVT();
304 if (Handler.isIncomingArgumentHandler() && VAVT != OrigVT) {
305 if (VAVT.getSizeInBits() < OrigVT.getSizeInBits()) {
306 // Expected to be multiple regs for a single incoming arg.
307 unsigned NumArgRegs = Args[i].Regs.size();
308 if (NumArgRegs < 2)
309 return false;
311 assert((j + (NumArgRegs - 1)) < ArgLocs.size() &&
312 "Too many regs for number of args");
313 for (unsigned Part = 0; Part < NumArgRegs; ++Part) {
314 // There should be Regs.size() ArgLocs per argument.
315 VA = ArgLocs[j + Part];
316 Handler.assignValueToReg(Args[i].Regs[Part], VA.getLocReg(), VA);
318 j += NumArgRegs - 1;
319 // Merge the split registers into the expected larger result vreg
320 // of the original call.
321 MIRBuilder.buildMerge(Args[i].OrigRegs[0], Args[i].Regs);
322 continue;
324 const LLT VATy(VAVT);
325 Register NewReg =
326 MIRBuilder.getMRI()->createGenericVirtualRegister(VATy);
327 Handler.assignValueToReg(NewReg, VA.getLocReg(), VA);
328 // If it's a vector type, we either need to truncate the elements
329 // or do an unmerge to get the lower block of elements.
330 if (VATy.isVector() &&
331 VATy.getNumElements() > OrigVT.getVectorNumElements()) {
332 const LLT OrigTy(OrigVT);
333 // Just handle the case where the VA type is 2 * original type.
334 if (VATy.getNumElements() != OrigVT.getVectorNumElements() * 2) {
335 LLVM_DEBUG(dbgs()
336 << "Incoming promoted vector arg has too many elts");
337 return false;
339 auto Unmerge = MIRBuilder.buildUnmerge({OrigTy, OrigTy}, {NewReg});
340 MIRBuilder.buildCopy(ArgReg, Unmerge.getReg(0));
341 } else {
342 MIRBuilder.buildTrunc(ArgReg, {NewReg}).getReg(0);
344 } else if (!Handler.isIncomingArgumentHandler()) {
345 assert((j + (Args[i].Regs.size() - 1)) < ArgLocs.size() &&
346 "Too many regs for number of args");
347 // This is an outgoing argument that might have been split.
348 for (unsigned Part = 0; Part < Args[i].Regs.size(); ++Part) {
349 // There should be Regs.size() ArgLocs per argument.
350 VA = ArgLocs[j + Part];
351 Handler.assignValueToReg(Args[i].Regs[Part], VA.getLocReg(), VA);
353 j += Args[i].Regs.size() - 1;
354 } else {
355 Handler.assignValueToReg(ArgReg, VA.getLocReg(), VA);
357 } else if (VA.isMemLoc()) {
358 MVT VT = MVT::getVT(Args[i].Ty);
359 unsigned Size = VT == MVT::iPTR ? DL.getPointerSize()
360 : alignTo(VT.getSizeInBits(), 8) / 8;
361 unsigned Offset = VA.getLocMemOffset();
362 MachinePointerInfo MPO;
363 Register StackAddr = Handler.getStackAddress(Size, Offset, MPO);
364 Handler.assignValueToAddress(ArgReg, StackAddr, Size, MPO, VA);
365 } else {
366 // FIXME: Support byvals and other weirdness
367 return false;
370 return true;
373 Register CallLowering::ValueHandler::extendRegister(Register ValReg,
374 CCValAssign &VA) {
375 LLT LocTy{VA.getLocVT()};
376 if (LocTy.getSizeInBits() == MRI.getType(ValReg).getSizeInBits())
377 return ValReg;
378 switch (VA.getLocInfo()) {
379 default: break;
380 case CCValAssign::Full:
381 case CCValAssign::BCvt:
382 // FIXME: bitconverting between vector types may or may not be a
383 // nop in big-endian situations.
384 return ValReg;
385 case CCValAssign::AExt: {
386 auto MIB = MIRBuilder.buildAnyExt(LocTy, ValReg);
387 return MIB->getOperand(0).getReg();
389 case CCValAssign::SExt: {
390 Register NewReg = MRI.createGenericVirtualRegister(LocTy);
391 MIRBuilder.buildSExt(NewReg, ValReg);
392 return NewReg;
394 case CCValAssign::ZExt: {
395 Register NewReg = MRI.createGenericVirtualRegister(LocTy);
396 MIRBuilder.buildZExt(NewReg, ValReg);
397 return NewReg;
400 llvm_unreachable("unable to extend register");
403 void CallLowering::ValueHandler::anchor() {}