1 //===- llvm/lib/Target/ARM/ARMCallLowering.cpp - Call lowering ------------===//
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
7 //===----------------------------------------------------------------------===//
10 /// This file implements the lowering of LLVM calls to machine code calls for
13 //===----------------------------------------------------------------------===//
15 #include "ARMCallLowering.h"
16 #include "ARMBaseInstrInfo.h"
17 #include "ARMISelLowering.h"
18 #include "ARMSubtarget.h"
19 #include "Utils/ARMBaseInfo.h"
20 #include "llvm/ADT/SmallVector.h"
21 #include "llvm/CodeGen/Analysis.h"
22 #include "llvm/CodeGen/CallingConvLower.h"
23 #include "llvm/CodeGen/GlobalISel/MachineIRBuilder.h"
24 #include "llvm/CodeGen/GlobalISel/Utils.h"
25 #include "llvm/CodeGen/LowLevelType.h"
26 #include "llvm/CodeGen/MachineBasicBlock.h"
27 #include "llvm/CodeGen/MachineFrameInfo.h"
28 #include "llvm/CodeGen/MachineFunction.h"
29 #include "llvm/CodeGen/MachineInstrBuilder.h"
30 #include "llvm/CodeGen/MachineMemOperand.h"
31 #include "llvm/CodeGen/MachineOperand.h"
32 #include "llvm/CodeGen/MachineRegisterInfo.h"
33 #include "llvm/CodeGen/TargetRegisterInfo.h"
34 #include "llvm/CodeGen/TargetSubtargetInfo.h"
35 #include "llvm/CodeGen/ValueTypes.h"
36 #include "llvm/IR/Attributes.h"
37 #include "llvm/IR/DataLayout.h"
38 #include "llvm/IR/DerivedTypes.h"
39 #include "llvm/IR/Function.h"
40 #include "llvm/IR/Type.h"
41 #include "llvm/IR/Value.h"
42 #include "llvm/Support/Casting.h"
43 #include "llvm/Support/LowLevelTypeImpl.h"
44 #include "llvm/Support/MachineValueType.h"
52 ARMCallLowering::ARMCallLowering(const ARMTargetLowering
&TLI
)
53 : CallLowering(&TLI
) {}
55 static bool isSupportedType(const DataLayout
&DL
, const ARMTargetLowering
&TLI
,
60 if (T
->isStructTy()) {
61 // For now we only allow homogeneous structs that we can manipulate with
62 // G_MERGE_VALUES and G_UNMERGE_VALUES
63 auto StructT
= cast
<StructType
>(T
);
64 for (unsigned i
= 1, e
= StructT
->getNumElements(); i
!= e
; ++i
)
65 if (StructT
->getElementType(i
) != StructT
->getElementType(0))
70 EVT VT
= TLI
.getValueType(DL
, T
, true);
71 if (!VT
.isSimple() || VT
.isVector() ||
72 !(VT
.isInteger() || VT
.isFloatingPoint()))
75 unsigned VTSize
= VT
.getSimpleVT().getSizeInBits();
78 // FIXME: Support i64 too
79 return VT
.isFloatingPoint();
81 return VTSize
== 1 || VTSize
== 8 || VTSize
== 16 || VTSize
== 32;
86 /// Helper class for values going out through an ABI boundary (used for handling
87 /// function return values and call parameters).
88 struct OutgoingValueHandler
: public CallLowering::ValueHandler
{
89 OutgoingValueHandler(MachineIRBuilder
&MIRBuilder
, MachineRegisterInfo
&MRI
,
90 MachineInstrBuilder
&MIB
, CCAssignFn
*AssignFn
)
91 : ValueHandler(MIRBuilder
, MRI
, AssignFn
), MIB(MIB
) {}
93 unsigned getStackAddress(uint64_t Size
, int64_t Offset
,
94 MachinePointerInfo
&MPO
) override
{
95 assert((Size
== 1 || Size
== 2 || Size
== 4 || Size
== 8) &&
98 LLT p0
= LLT::pointer(0, 32);
99 LLT s32
= LLT::scalar(32);
100 unsigned SPReg
= MRI
.createGenericVirtualRegister(p0
);
101 MIRBuilder
.buildCopy(SPReg
, ARM::SP
);
103 unsigned OffsetReg
= MRI
.createGenericVirtualRegister(s32
);
104 MIRBuilder
.buildConstant(OffsetReg
, Offset
);
106 unsigned AddrReg
= MRI
.createGenericVirtualRegister(p0
);
107 MIRBuilder
.buildGEP(AddrReg
, SPReg
, OffsetReg
);
109 MPO
= MachinePointerInfo::getStack(MIRBuilder
.getMF(), Offset
);
113 void assignValueToReg(unsigned ValVReg
, unsigned PhysReg
,
114 CCValAssign
&VA
) override
{
115 assert(VA
.isRegLoc() && "Value shouldn't be assigned to reg");
116 assert(VA
.getLocReg() == PhysReg
&& "Assigning to the wrong reg?");
118 assert(VA
.getValVT().getSizeInBits() <= 64 && "Unsupported value size");
119 assert(VA
.getLocVT().getSizeInBits() <= 64 && "Unsupported location size");
121 unsigned ExtReg
= extendRegister(ValVReg
, VA
);
122 MIRBuilder
.buildCopy(PhysReg
, ExtReg
);
123 MIB
.addUse(PhysReg
, RegState::Implicit
);
126 void assignValueToAddress(unsigned ValVReg
, unsigned Addr
, uint64_t Size
,
127 MachinePointerInfo
&MPO
, CCValAssign
&VA
) override
{
128 assert((Size
== 1 || Size
== 2 || Size
== 4 || Size
== 8) &&
131 unsigned ExtReg
= extendRegister(ValVReg
, VA
);
132 auto MMO
= MIRBuilder
.getMF().getMachineMemOperand(
133 MPO
, MachineMemOperand::MOStore
, VA
.getLocVT().getStoreSize(),
135 MIRBuilder
.buildStore(ExtReg
, Addr
, *MMO
);
138 unsigned assignCustomValue(const CallLowering::ArgInfo
&Arg
,
139 ArrayRef
<CCValAssign
> VAs
) override
{
140 CCValAssign VA
= VAs
[0];
141 assert(VA
.needsCustom() && "Value doesn't need custom handling");
142 assert(VA
.getValVT() == MVT::f64
&& "Unsupported type");
144 CCValAssign NextVA
= VAs
[1];
145 assert(NextVA
.needsCustom() && "Value doesn't need custom handling");
146 assert(NextVA
.getValVT() == MVT::f64
&& "Unsupported type");
148 assert(VA
.getValNo() == NextVA
.getValNo() &&
149 "Values belong to different arguments");
151 assert(VA
.isRegLoc() && "Value should be in reg");
152 assert(NextVA
.isRegLoc() && "Value should be in reg");
154 unsigned NewRegs
[] = {MRI
.createGenericVirtualRegister(LLT::scalar(32)),
155 MRI
.createGenericVirtualRegister(LLT::scalar(32))};
156 MIRBuilder
.buildUnmerge(NewRegs
, Arg
.Reg
);
158 bool IsLittle
= MIRBuilder
.getMF().getSubtarget
<ARMSubtarget
>().isLittle();
160 std::swap(NewRegs
[0], NewRegs
[1]);
162 assignValueToReg(NewRegs
[0], VA
.getLocReg(), VA
);
163 assignValueToReg(NewRegs
[1], NextVA
.getLocReg(), NextVA
);
168 bool assignArg(unsigned ValNo
, MVT ValVT
, MVT LocVT
,
169 CCValAssign::LocInfo LocInfo
,
170 const CallLowering::ArgInfo
&Info
, CCState
&State
) override
{
171 if (AssignFn(ValNo
, ValVT
, LocVT
, LocInfo
, Info
.Flags
, State
))
175 std::max(StackSize
, static_cast<uint64_t>(State
.getNextStackOffset()));
179 MachineInstrBuilder
&MIB
;
180 uint64_t StackSize
= 0;
183 } // end anonymous namespace
185 void ARMCallLowering::splitToValueTypes(
186 const ArgInfo
&OrigArg
, SmallVectorImpl
<ArgInfo
> &SplitArgs
,
187 MachineFunction
&MF
, const SplitArgTy
&PerformArgSplit
) const {
188 const ARMTargetLowering
&TLI
= *getTLI
<ARMTargetLowering
>();
189 LLVMContext
&Ctx
= OrigArg
.Ty
->getContext();
190 const DataLayout
&DL
= MF
.getDataLayout();
191 MachineRegisterInfo
&MRI
= MF
.getRegInfo();
192 const Function
&F
= MF
.getFunction();
194 SmallVector
<EVT
, 4> SplitVTs
;
195 SmallVector
<uint64_t, 4> Offsets
;
196 ComputeValueVTs(TLI
, DL
, OrigArg
.Ty
, SplitVTs
, &Offsets
, 0);
198 if (SplitVTs
.size() == 1) {
199 // Even if there is no splitting to do, we still want to replace the
200 // original type (e.g. pointer type -> integer).
201 auto Flags
= OrigArg
.Flags
;
202 unsigned OriginalAlignment
= DL
.getABITypeAlignment(OrigArg
.Ty
);
203 Flags
.setOrigAlign(OriginalAlignment
);
204 SplitArgs
.emplace_back(OrigArg
.Reg
, SplitVTs
[0].getTypeForEVT(Ctx
), Flags
,
209 unsigned FirstRegIdx
= SplitArgs
.size();
210 for (unsigned i
= 0, e
= SplitVTs
.size(); i
!= e
; ++i
) {
211 EVT SplitVT
= SplitVTs
[i
];
212 Type
*SplitTy
= SplitVT
.getTypeForEVT(Ctx
);
213 auto Flags
= OrigArg
.Flags
;
215 unsigned OriginalAlignment
= DL
.getABITypeAlignment(SplitTy
);
216 Flags
.setOrigAlign(OriginalAlignment
);
218 bool NeedsConsecutiveRegisters
=
219 TLI
.functionArgumentNeedsConsecutiveRegisters(
220 SplitTy
, F
.getCallingConv(), F
.isVarArg());
221 if (NeedsConsecutiveRegisters
) {
222 Flags
.setInConsecutiveRegs();
224 Flags
.setInConsecutiveRegsLast();
228 ArgInfo
{MRI
.createGenericVirtualRegister(getLLTForType(*SplitTy
, DL
)),
229 SplitTy
, Flags
, OrigArg
.IsFixed
});
232 for (unsigned i
= 0; i
< Offsets
.size(); ++i
)
233 PerformArgSplit(SplitArgs
[FirstRegIdx
+ i
].Reg
, Offsets
[i
] * 8);
236 /// Lower the return value for the already existing \p Ret. This assumes that
237 /// \p MIRBuilder's insertion point is correct.
238 bool ARMCallLowering::lowerReturnVal(MachineIRBuilder
&MIRBuilder
,
239 const Value
*Val
, ArrayRef
<unsigned> VRegs
,
240 MachineInstrBuilder
&Ret
) const {
242 // Nothing to do here.
245 auto &MF
= MIRBuilder
.getMF();
246 const auto &F
= MF
.getFunction();
248 auto DL
= MF
.getDataLayout();
249 auto &TLI
= *getTLI
<ARMTargetLowering
>();
250 if (!isSupportedType(DL
, TLI
, Val
->getType()))
253 SmallVector
<EVT
, 4> SplitEVTs
;
254 ComputeValueVTs(TLI
, DL
, Val
->getType(), SplitEVTs
);
255 assert(VRegs
.size() == SplitEVTs
.size() &&
256 "For each split Type there should be exactly one VReg.");
258 SmallVector
<ArgInfo
, 4> SplitVTs
;
259 LLVMContext
&Ctx
= Val
->getType()->getContext();
260 for (unsigned i
= 0; i
< SplitEVTs
.size(); ++i
) {
261 ArgInfo
CurArgInfo(VRegs
[i
], SplitEVTs
[i
].getTypeForEVT(Ctx
));
262 setArgFlags(CurArgInfo
, AttributeList::ReturnIndex
, DL
, F
);
264 SmallVector
<unsigned, 4> Regs
;
266 CurArgInfo
, SplitVTs
, MF
,
267 [&](unsigned Reg
, uint64_t Offset
) { Regs
.push_back(Reg
); });
269 MIRBuilder
.buildUnmerge(Regs
, VRegs
[i
]);
272 CCAssignFn
*AssignFn
=
273 TLI
.CCAssignFnForReturn(F
.getCallingConv(), F
.isVarArg());
275 OutgoingValueHandler
RetHandler(MIRBuilder
, MF
.getRegInfo(), Ret
, AssignFn
);
276 return handleAssignments(MIRBuilder
, SplitVTs
, RetHandler
);
279 bool ARMCallLowering::lowerReturn(MachineIRBuilder
&MIRBuilder
,
281 ArrayRef
<unsigned> VRegs
) const {
282 assert(!Val
== VRegs
.empty() && "Return value without a vreg");
284 auto const &ST
= MIRBuilder
.getMF().getSubtarget
<ARMSubtarget
>();
285 unsigned Opcode
= ST
.getReturnOpcode();
286 auto Ret
= MIRBuilder
.buildInstrNoInsert(Opcode
).add(predOps(ARMCC::AL
));
288 if (!lowerReturnVal(MIRBuilder
, Val
, VRegs
, Ret
))
291 MIRBuilder
.insertInstr(Ret
);
297 /// Helper class for values coming in through an ABI boundary (used for handling
298 /// formal arguments and call return values).
299 struct IncomingValueHandler
: public CallLowering::ValueHandler
{
300 IncomingValueHandler(MachineIRBuilder
&MIRBuilder
, MachineRegisterInfo
&MRI
,
302 : ValueHandler(MIRBuilder
, MRI
, AssignFn
) {}
304 unsigned getStackAddress(uint64_t Size
, int64_t Offset
,
305 MachinePointerInfo
&MPO
) override
{
306 assert((Size
== 1 || Size
== 2 || Size
== 4 || Size
== 8) &&
309 auto &MFI
= MIRBuilder
.getMF().getFrameInfo();
311 int FI
= MFI
.CreateFixedObject(Size
, Offset
, true);
312 MPO
= MachinePointerInfo::getFixedStack(MIRBuilder
.getMF(), FI
);
315 MRI
.createGenericVirtualRegister(LLT::pointer(MPO
.getAddrSpace(), 32));
316 MIRBuilder
.buildFrameIndex(AddrReg
, FI
);
321 void assignValueToAddress(unsigned ValVReg
, unsigned Addr
, uint64_t Size
,
322 MachinePointerInfo
&MPO
, CCValAssign
&VA
) override
{
323 assert((Size
== 1 || Size
== 2 || Size
== 4 || Size
== 8) &&
326 if (VA
.getLocInfo() == CCValAssign::SExt
||
327 VA
.getLocInfo() == CCValAssign::ZExt
) {
328 // If the value is zero- or sign-extended, its size becomes 4 bytes, so
329 // that's what we should load.
331 assert(MRI
.getType(ValVReg
).isScalar() && "Only scalars supported atm");
333 auto LoadVReg
= MRI
.createGenericVirtualRegister(LLT::scalar(32));
334 buildLoad(LoadVReg
, Addr
, Size
, /* Alignment */ 1, MPO
);
335 MIRBuilder
.buildTrunc(ValVReg
, LoadVReg
);
337 // If the value is not extended, a simple load will suffice.
338 buildLoad(ValVReg
, Addr
, Size
, /* Alignment */ 1, MPO
);
342 void buildLoad(unsigned Val
, unsigned Addr
, uint64_t Size
, unsigned Alignment
,
343 MachinePointerInfo
&MPO
) {
344 auto MMO
= MIRBuilder
.getMF().getMachineMemOperand(
345 MPO
, MachineMemOperand::MOLoad
, Size
, Alignment
);
346 MIRBuilder
.buildLoad(Val
, Addr
, *MMO
);
349 void assignValueToReg(unsigned ValVReg
, unsigned PhysReg
,
350 CCValAssign
&VA
) override
{
351 assert(VA
.isRegLoc() && "Value shouldn't be assigned to reg");
352 assert(VA
.getLocReg() == PhysReg
&& "Assigning to the wrong reg?");
354 auto ValSize
= VA
.getValVT().getSizeInBits();
355 auto LocSize
= VA
.getLocVT().getSizeInBits();
357 assert(ValSize
<= 64 && "Unsupported value size");
358 assert(LocSize
<= 64 && "Unsupported location size");
360 markPhysRegUsed(PhysReg
);
361 if (ValSize
== LocSize
) {
362 MIRBuilder
.buildCopy(ValVReg
, PhysReg
);
364 assert(ValSize
< LocSize
&& "Extensions not supported");
366 // We cannot create a truncating copy, nor a trunc of a physical register.
367 // Therefore, we need to copy the content of the physical register into a
368 // virtual one and then truncate that.
370 MRI
.createGenericVirtualRegister(LLT::scalar(LocSize
));
371 MIRBuilder
.buildCopy(PhysRegToVReg
, PhysReg
);
372 MIRBuilder
.buildTrunc(ValVReg
, PhysRegToVReg
);
376 unsigned assignCustomValue(const ARMCallLowering::ArgInfo
&Arg
,
377 ArrayRef
<CCValAssign
> VAs
) override
{
378 CCValAssign VA
= VAs
[0];
379 assert(VA
.needsCustom() && "Value doesn't need custom handling");
380 assert(VA
.getValVT() == MVT::f64
&& "Unsupported type");
382 CCValAssign NextVA
= VAs
[1];
383 assert(NextVA
.needsCustom() && "Value doesn't need custom handling");
384 assert(NextVA
.getValVT() == MVT::f64
&& "Unsupported type");
386 assert(VA
.getValNo() == NextVA
.getValNo() &&
387 "Values belong to different arguments");
389 assert(VA
.isRegLoc() && "Value should be in reg");
390 assert(NextVA
.isRegLoc() && "Value should be in reg");
392 unsigned NewRegs
[] = {MRI
.createGenericVirtualRegister(LLT::scalar(32)),
393 MRI
.createGenericVirtualRegister(LLT::scalar(32))};
395 assignValueToReg(NewRegs
[0], VA
.getLocReg(), VA
);
396 assignValueToReg(NewRegs
[1], NextVA
.getLocReg(), NextVA
);
398 bool IsLittle
= MIRBuilder
.getMF().getSubtarget
<ARMSubtarget
>().isLittle();
400 std::swap(NewRegs
[0], NewRegs
[1]);
402 MIRBuilder
.buildMerge(Arg
.Reg
, NewRegs
);
407 /// Marking a physical register as used is different between formal
408 /// parameters, where it's a basic block live-in, and call returns, where it's
409 /// an implicit-def of the call instruction.
410 virtual void markPhysRegUsed(unsigned PhysReg
) = 0;
413 struct FormalArgHandler
: public IncomingValueHandler
{
414 FormalArgHandler(MachineIRBuilder
&MIRBuilder
, MachineRegisterInfo
&MRI
,
416 : IncomingValueHandler(MIRBuilder
, MRI
, AssignFn
) {}
418 void markPhysRegUsed(unsigned PhysReg
) override
{
419 MIRBuilder
.getMBB().addLiveIn(PhysReg
);
423 } // end anonymous namespace
425 bool ARMCallLowering::lowerFormalArguments(MachineIRBuilder
&MIRBuilder
,
427 ArrayRef
<unsigned> VRegs
) const {
428 auto &TLI
= *getTLI
<ARMTargetLowering
>();
429 auto Subtarget
= TLI
.getSubtarget();
431 if (Subtarget
->isThumb1Only())
434 // Quick exit if there aren't any args
441 auto &MF
= MIRBuilder
.getMF();
442 auto &MBB
= MIRBuilder
.getMBB();
443 auto DL
= MF
.getDataLayout();
445 for (auto &Arg
: F
.args()) {
446 if (!isSupportedType(DL
, TLI
, Arg
.getType()))
448 if (Arg
.hasByValOrInAllocaAttr())
452 CCAssignFn
*AssignFn
=
453 TLI
.CCAssignFnForCall(F
.getCallingConv(), F
.isVarArg());
455 FormalArgHandler
ArgHandler(MIRBuilder
, MIRBuilder
.getMF().getRegInfo(),
458 SmallVector
<ArgInfo
, 8> ArgInfos
;
459 SmallVector
<unsigned, 4> SplitRegs
;
461 for (auto &Arg
: F
.args()) {
462 ArgInfo
AInfo(VRegs
[Idx
], Arg
.getType());
463 setArgFlags(AInfo
, Idx
+ AttributeList::FirstArgIndex
, DL
, F
);
467 splitToValueTypes(AInfo
, ArgInfos
, MF
, [&](unsigned Reg
, uint64_t Offset
) {
468 SplitRegs
.push_back(Reg
);
471 if (!SplitRegs
.empty())
472 MIRBuilder
.buildMerge(VRegs
[Idx
], SplitRegs
);
478 MIRBuilder
.setInstr(*MBB
.begin());
480 if (!handleAssignments(MIRBuilder
, ArgInfos
, ArgHandler
))
483 // Move back to the end of the basic block.
484 MIRBuilder
.setMBB(MBB
);
490 struct CallReturnHandler
: public IncomingValueHandler
{
491 CallReturnHandler(MachineIRBuilder
&MIRBuilder
, MachineRegisterInfo
&MRI
,
492 MachineInstrBuilder MIB
, CCAssignFn
*AssignFn
)
493 : IncomingValueHandler(MIRBuilder
, MRI
, AssignFn
), MIB(MIB
) {}
495 void markPhysRegUsed(unsigned PhysReg
) override
{
496 MIB
.addDef(PhysReg
, RegState::Implicit
);
499 MachineInstrBuilder MIB
;
502 // FIXME: This should move to the ARMSubtarget when it supports all the opcodes.
503 unsigned getCallOpcode(const ARMSubtarget
&STI
, bool isDirect
) {
505 return STI
.isThumb() ? ARM::tBL
: ARM::BL
;
516 return ARM::BMOVPCRX_CALL
;
518 } // end anonymous namespace
520 bool ARMCallLowering::lowerCall(MachineIRBuilder
&MIRBuilder
,
521 CallingConv::ID CallConv
,
522 const MachineOperand
&Callee
,
523 const ArgInfo
&OrigRet
,
524 ArrayRef
<ArgInfo
> OrigArgs
) const {
525 MachineFunction
&MF
= MIRBuilder
.getMF();
526 const auto &TLI
= *getTLI
<ARMTargetLowering
>();
527 const auto &DL
= MF
.getDataLayout();
528 const auto &STI
= MF
.getSubtarget
<ARMSubtarget
>();
529 const TargetRegisterInfo
*TRI
= STI
.getRegisterInfo();
530 MachineRegisterInfo
&MRI
= MF
.getRegInfo();
532 if (STI
.genLongCalls())
535 if (STI
.isThumb1Only())
538 auto CallSeqStart
= MIRBuilder
.buildInstr(ARM::ADJCALLSTACKDOWN
);
540 // Create the call instruction so we can add the implicit uses of arg
541 // registers, but don't insert it yet.
542 bool IsDirect
= !Callee
.isReg();
543 auto CallOpcode
= getCallOpcode(STI
, IsDirect
);
544 auto MIB
= MIRBuilder
.buildInstrNoInsert(CallOpcode
);
546 bool IsThumb
= STI
.isThumb();
548 MIB
.add(predOps(ARMCC::AL
));
552 auto CalleeReg
= Callee
.getReg();
553 if (CalleeReg
&& !TRI
->isPhysicalRegister(CalleeReg
)) {
554 unsigned CalleeIdx
= IsThumb
? 2 : 0;
555 MIB
->getOperand(CalleeIdx
).setReg(constrainOperandRegClass(
556 MF
, *TRI
, MRI
, *STI
.getInstrInfo(), *STI
.getRegBankInfo(),
557 *MIB
.getInstr(), MIB
->getDesc(), Callee
, CalleeIdx
));
561 MIB
.addRegMask(TRI
->getCallPreservedMask(MF
, CallConv
));
563 bool IsVarArg
= false;
564 SmallVector
<ArgInfo
, 8> ArgInfos
;
565 for (auto Arg
: OrigArgs
) {
566 if (!isSupportedType(DL
, TLI
, Arg
.Ty
))
572 if (Arg
.Flags
.isByVal())
575 SmallVector
<unsigned, 8> Regs
;
576 splitToValueTypes(Arg
, ArgInfos
, MF
, [&](unsigned Reg
, uint64_t Offset
) {
581 MIRBuilder
.buildUnmerge(Regs
, Arg
.Reg
);
584 auto ArgAssignFn
= TLI
.CCAssignFnForCall(CallConv
, IsVarArg
);
585 OutgoingValueHandler
ArgHandler(MIRBuilder
, MRI
, MIB
, ArgAssignFn
);
586 if (!handleAssignments(MIRBuilder
, ArgInfos
, ArgHandler
))
589 // Now we can add the actual call instruction to the correct basic block.
590 MIRBuilder
.insertInstr(MIB
);
592 if (!OrigRet
.Ty
->isVoidTy()) {
593 if (!isSupportedType(DL
, TLI
, OrigRet
.Ty
))
597 SmallVector
<unsigned, 8> SplitRegs
;
598 splitToValueTypes(OrigRet
, ArgInfos
, MF
,
599 [&](unsigned Reg
, uint64_t Offset
) {
600 SplitRegs
.push_back(Reg
);
603 auto RetAssignFn
= TLI
.CCAssignFnForReturn(CallConv
, IsVarArg
);
604 CallReturnHandler
RetHandler(MIRBuilder
, MRI
, MIB
, RetAssignFn
);
605 if (!handleAssignments(MIRBuilder
, ArgInfos
, RetHandler
))
608 if (!SplitRegs
.empty()) {
609 // We have split the value and allocated each individual piece, now build
611 MIRBuilder
.buildMerge(OrigRet
.Reg
, SplitRegs
);
615 // We now know the size of the stack - update the ADJCALLSTACKDOWN
617 CallSeqStart
.addImm(ArgHandler
.StackSize
).addImm(0).add(predOps(ARMCC::AL
));
619 MIRBuilder
.buildInstr(ARM::ADJCALLSTACKUP
)
620 .addImm(ArgHandler
.StackSize
)
622 .add(predOps(ARMCC::AL
));