1 //===- MipsCallLowering.cpp -------------------------------------*- C++ -*-===//
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
7 //===----------------------------------------------------------------------===//
10 /// This file implements the lowering of LLVM calls to machine code calls for
13 //===----------------------------------------------------------------------===//
15 #include "MipsCallLowering.h"
16 #include "MipsCCState.h"
17 #include "MipsMachineFunction.h"
18 #include "MipsTargetMachine.h"
19 #include "llvm/CodeGen/Analysis.h"
20 #include "llvm/CodeGen/GlobalISel/MachineIRBuilder.h"
24 MipsCallLowering::MipsCallLowering(const MipsTargetLowering
&TLI
)
25 : CallLowering(&TLI
) {}
27 bool MipsCallLowering::MipsHandler::assign(Register VReg
, const CCValAssign
&VA
,
30 assignValueToReg(VReg
, VA
, VT
);
31 } else if (VA
.isMemLoc()) {
32 assignValueToAddress(VReg
, VA
);
39 bool MipsCallLowering::MipsHandler::assignVRegs(ArrayRef
<Register
> VRegs
,
40 ArrayRef
<CCValAssign
> ArgLocs
,
41 unsigned ArgLocsStartIndex
,
43 for (unsigned i
= 0; i
< VRegs
.size(); ++i
)
44 if (!assign(VRegs
[i
], ArgLocs
[ArgLocsStartIndex
+ i
], VT
))
49 void MipsCallLowering::MipsHandler::setLeastSignificantFirst(
50 SmallVectorImpl
<Register
> &VRegs
) {
51 if (!MIRBuilder
.getMF().getDataLayout().isLittleEndian())
52 std::reverse(VRegs
.begin(), VRegs
.end());
55 bool MipsCallLowering::MipsHandler::handle(
56 ArrayRef
<CCValAssign
> ArgLocs
, ArrayRef
<CallLowering::ArgInfo
> Args
) {
57 SmallVector
<Register
, 4> VRegs
;
59 const Function
&F
= MIRBuilder
.getMF().getFunction();
60 const DataLayout
&DL
= F
.getParent()->getDataLayout();
61 const MipsTargetLowering
&TLI
= *static_cast<const MipsTargetLowering
*>(
62 MIRBuilder
.getMF().getSubtarget().getTargetLowering());
64 for (unsigned ArgsIndex
= 0, ArgLocsIndex
= 0; ArgsIndex
< Args
.size();
65 ++ArgsIndex
, ArgLocsIndex
+= SplitLength
) {
66 EVT VT
= TLI
.getValueType(DL
, Args
[ArgsIndex
].Ty
);
67 SplitLength
= TLI
.getNumRegistersForCallingConv(F
.getContext(),
68 F
.getCallingConv(), VT
);
69 assert(Args
[ArgsIndex
].Regs
.size() == 1 && "Can't handle multple regs yet");
71 if (SplitLength
> 1) {
73 MVT RegisterVT
= TLI
.getRegisterTypeForCallingConv(
74 F
.getContext(), F
.getCallingConv(), VT
);
75 for (unsigned i
= 0; i
< SplitLength
; ++i
)
76 VRegs
.push_back(MRI
.createGenericVirtualRegister(LLT
{RegisterVT
}));
78 if (!handleSplit(VRegs
, ArgLocs
, ArgLocsIndex
, Args
[ArgsIndex
].Regs
[0],
82 if (!assign(Args
[ArgsIndex
].Regs
[0], ArgLocs
[ArgLocsIndex
], VT
))
90 class IncomingValueHandler
: public MipsCallLowering::MipsHandler
{
92 IncomingValueHandler(MachineIRBuilder
&MIRBuilder
, MachineRegisterInfo
&MRI
)
93 : MipsHandler(MIRBuilder
, MRI
) {}
96 void assignValueToReg(Register ValVReg
, const CCValAssign
&VA
,
97 const EVT
&VT
) override
;
99 Register
getStackAddress(const CCValAssign
&VA
,
100 MachineMemOperand
*&MMO
) override
;
102 void assignValueToAddress(Register ValVReg
, const CCValAssign
&VA
) override
;
104 bool handleSplit(SmallVectorImpl
<Register
> &VRegs
,
105 ArrayRef
<CCValAssign
> ArgLocs
, unsigned ArgLocsStartIndex
,
106 Register ArgsReg
, const EVT
&VT
) override
;
108 virtual void markPhysRegUsed(unsigned PhysReg
) {
109 MIRBuilder
.getMRI()->addLiveIn(PhysReg
);
110 MIRBuilder
.getMBB().addLiveIn(PhysReg
);
113 void buildLoad(Register Val
, const CCValAssign
&VA
) {
114 MachineMemOperand
*MMO
;
115 Register Addr
= getStackAddress(VA
, MMO
);
116 MIRBuilder
.buildLoad(Val
, Addr
, *MMO
);
120 class CallReturnHandler
: public IncomingValueHandler
{
122 CallReturnHandler(MachineIRBuilder
&MIRBuilder
, MachineRegisterInfo
&MRI
,
123 MachineInstrBuilder
&MIB
)
124 : IncomingValueHandler(MIRBuilder
, MRI
), MIB(MIB
) {}
127 void markPhysRegUsed(unsigned PhysReg
) override
{
128 MIB
.addDef(PhysReg
, RegState::Implicit
);
131 MachineInstrBuilder
&MIB
;
134 } // end anonymous namespace
136 void IncomingValueHandler::assignValueToReg(Register ValVReg
,
137 const CCValAssign
&VA
,
139 const MipsSubtarget
&STI
=
140 static_cast<const MipsSubtarget
&>(MIRBuilder
.getMF().getSubtarget());
141 Register PhysReg
= VA
.getLocReg();
142 if (VT
== MVT::f64
&& PhysReg
>= Mips::A0
&& PhysReg
<= Mips::A3
) {
143 const MipsSubtarget
&STI
=
144 static_cast<const MipsSubtarget
&>(MIRBuilder
.getMF().getSubtarget());
147 .buildInstr(STI
.isFP64bit() ? Mips::BuildPairF64_64
148 : Mips::BuildPairF64
)
150 .addUse(PhysReg
+ (STI
.isLittle() ? 0 : 1))
151 .addUse(PhysReg
+ (STI
.isLittle() ? 1 : 0))
152 .constrainAllUses(MIRBuilder
.getTII(), *STI
.getRegisterInfo(),
153 *STI
.getRegBankInfo());
154 markPhysRegUsed(PhysReg
);
155 markPhysRegUsed(PhysReg
+ 1);
156 } else if (VT
== MVT::f32
&& PhysReg
>= Mips::A0
&& PhysReg
<= Mips::A3
) {
157 MIRBuilder
.buildInstr(Mips::MTC1
)
160 .constrainAllUses(MIRBuilder
.getTII(), *STI
.getRegisterInfo(),
161 *STI
.getRegBankInfo());
162 markPhysRegUsed(PhysReg
);
164 switch (VA
.getLocInfo()) {
165 case CCValAssign::LocInfo::SExt
:
166 case CCValAssign::LocInfo::ZExt
:
167 case CCValAssign::LocInfo::AExt
: {
168 auto Copy
= MIRBuilder
.buildCopy(LLT
{VA
.getLocVT()}, PhysReg
);
169 MIRBuilder
.buildTrunc(ValVReg
, Copy
);
173 MIRBuilder
.buildCopy(ValVReg
, PhysReg
);
176 markPhysRegUsed(PhysReg
);
180 Register
IncomingValueHandler::getStackAddress(const CCValAssign
&VA
,
181 MachineMemOperand
*&MMO
) {
182 MachineFunction
&MF
= MIRBuilder
.getMF();
183 unsigned Size
= alignTo(VA
.getValVT().getSizeInBits(), 8) / 8;
184 unsigned Offset
= VA
.getLocMemOffset();
185 MachineFrameInfo
&MFI
= MF
.getFrameInfo();
187 int FI
= MFI
.CreateFixedObject(Size
, Offset
, true);
188 MachinePointerInfo MPO
=
189 MachinePointerInfo::getFixedStack(MIRBuilder
.getMF(), FI
);
191 const TargetFrameLowering
*TFL
= MF
.getSubtarget().getFrameLowering();
192 unsigned Align
= MinAlign(TFL
->getStackAlignment(), Offset
);
193 MMO
= MF
.getMachineMemOperand(MPO
, MachineMemOperand::MOLoad
, Size
, Align
);
195 Register AddrReg
= MRI
.createGenericVirtualRegister(LLT::pointer(0, 32));
196 MIRBuilder
.buildFrameIndex(AddrReg
, FI
);
201 void IncomingValueHandler::assignValueToAddress(Register ValVReg
,
202 const CCValAssign
&VA
) {
203 if (VA
.getLocInfo() == CCValAssign::SExt
||
204 VA
.getLocInfo() == CCValAssign::ZExt
||
205 VA
.getLocInfo() == CCValAssign::AExt
) {
206 Register LoadReg
= MRI
.createGenericVirtualRegister(LLT::scalar(32));
207 buildLoad(LoadReg
, VA
);
208 MIRBuilder
.buildTrunc(ValVReg
, LoadReg
);
210 buildLoad(ValVReg
, VA
);
213 bool IncomingValueHandler::handleSplit(SmallVectorImpl
<Register
> &VRegs
,
214 ArrayRef
<CCValAssign
> ArgLocs
,
215 unsigned ArgLocsStartIndex
,
216 Register ArgsReg
, const EVT
&VT
) {
217 if (!assignVRegs(VRegs
, ArgLocs
, ArgLocsStartIndex
, VT
))
219 setLeastSignificantFirst(VRegs
);
220 MIRBuilder
.buildMerge(ArgsReg
, VRegs
);
225 class OutgoingValueHandler
: public MipsCallLowering::MipsHandler
{
227 OutgoingValueHandler(MachineIRBuilder
&MIRBuilder
, MachineRegisterInfo
&MRI
,
228 MachineInstrBuilder
&MIB
)
229 : MipsHandler(MIRBuilder
, MRI
), MIB(MIB
) {}
232 void assignValueToReg(Register ValVReg
, const CCValAssign
&VA
,
233 const EVT
&VT
) override
;
235 Register
getStackAddress(const CCValAssign
&VA
,
236 MachineMemOperand
*&MMO
) override
;
238 void assignValueToAddress(Register ValVReg
, const CCValAssign
&VA
) override
;
240 bool handleSplit(SmallVectorImpl
<Register
> &VRegs
,
241 ArrayRef
<CCValAssign
> ArgLocs
, unsigned ArgLocsStartIndex
,
242 Register ArgsReg
, const EVT
&VT
) override
;
244 Register
extendRegister(Register ValReg
, const CCValAssign
&VA
);
246 MachineInstrBuilder
&MIB
;
248 } // end anonymous namespace
250 void OutgoingValueHandler::assignValueToReg(Register ValVReg
,
251 const CCValAssign
&VA
,
253 Register PhysReg
= VA
.getLocReg();
254 const MipsSubtarget
&STI
=
255 static_cast<const MipsSubtarget
&>(MIRBuilder
.getMF().getSubtarget());
257 if (VT
== MVT::f64
&& PhysReg
>= Mips::A0
&& PhysReg
<= Mips::A3
) {
259 .buildInstr(STI
.isFP64bit() ? Mips::ExtractElementF64_64
260 : Mips::ExtractElementF64
)
261 .addDef(PhysReg
+ (STI
.isLittle() ? 1 : 0))
264 .constrainAllUses(MIRBuilder
.getTII(), *STI
.getRegisterInfo(),
265 *STI
.getRegBankInfo());
267 .buildInstr(STI
.isFP64bit() ? Mips::ExtractElementF64_64
268 : Mips::ExtractElementF64
)
269 .addDef(PhysReg
+ (STI
.isLittle() ? 0 : 1))
272 .constrainAllUses(MIRBuilder
.getTII(), *STI
.getRegisterInfo(),
273 *STI
.getRegBankInfo());
274 } else if (VT
== MVT::f32
&& PhysReg
>= Mips::A0
&& PhysReg
<= Mips::A3
) {
275 MIRBuilder
.buildInstr(Mips::MFC1
)
278 .constrainAllUses(MIRBuilder
.getTII(), *STI
.getRegisterInfo(),
279 *STI
.getRegBankInfo());
281 Register ExtReg
= extendRegister(ValVReg
, VA
);
282 MIRBuilder
.buildCopy(PhysReg
, ExtReg
);
283 MIB
.addUse(PhysReg
, RegState::Implicit
);
287 Register
OutgoingValueHandler::getStackAddress(const CCValAssign
&VA
,
288 MachineMemOperand
*&MMO
) {
289 MachineFunction
&MF
= MIRBuilder
.getMF();
290 const TargetFrameLowering
*TFL
= MF
.getSubtarget().getFrameLowering();
292 LLT p0
= LLT::pointer(0, 32);
293 LLT s32
= LLT::scalar(32);
294 Register SPReg
= MRI
.createGenericVirtualRegister(p0
);
295 MIRBuilder
.buildCopy(SPReg
, Register(Mips::SP
));
297 Register OffsetReg
= MRI
.createGenericVirtualRegister(s32
);
298 unsigned Offset
= VA
.getLocMemOffset();
299 MIRBuilder
.buildConstant(OffsetReg
, Offset
);
301 Register AddrReg
= MRI
.createGenericVirtualRegister(p0
);
302 MIRBuilder
.buildGEP(AddrReg
, SPReg
, OffsetReg
);
304 MachinePointerInfo MPO
=
305 MachinePointerInfo::getStack(MIRBuilder
.getMF(), Offset
);
306 unsigned Size
= alignTo(VA
.getValVT().getSizeInBits(), 8) / 8;
307 unsigned Align
= MinAlign(TFL
->getStackAlignment(), Offset
);
308 MMO
= MF
.getMachineMemOperand(MPO
, MachineMemOperand::MOStore
, Size
, Align
);
313 void OutgoingValueHandler::assignValueToAddress(Register ValVReg
,
314 const CCValAssign
&VA
) {
315 MachineMemOperand
*MMO
;
316 Register Addr
= getStackAddress(VA
, MMO
);
317 Register ExtReg
= extendRegister(ValVReg
, VA
);
318 MIRBuilder
.buildStore(ExtReg
, Addr
, *MMO
);
321 Register
OutgoingValueHandler::extendRegister(Register ValReg
,
322 const CCValAssign
&VA
) {
323 LLT LocTy
{VA
.getLocVT()};
324 switch (VA
.getLocInfo()) {
325 case CCValAssign::SExt
: {
326 Register ExtReg
= MRI
.createGenericVirtualRegister(LocTy
);
327 MIRBuilder
.buildSExt(ExtReg
, ValReg
);
330 case CCValAssign::ZExt
: {
331 Register ExtReg
= MRI
.createGenericVirtualRegister(LocTy
);
332 MIRBuilder
.buildZExt(ExtReg
, ValReg
);
335 case CCValAssign::AExt
: {
336 Register ExtReg
= MRI
.createGenericVirtualRegister(LocTy
);
337 MIRBuilder
.buildAnyExt(ExtReg
, ValReg
);
340 // TODO : handle upper extends
341 case CCValAssign::Full
:
346 llvm_unreachable("unable to extend register");
349 bool OutgoingValueHandler::handleSplit(SmallVectorImpl
<Register
> &VRegs
,
350 ArrayRef
<CCValAssign
> ArgLocs
,
351 unsigned ArgLocsStartIndex
,
352 Register ArgsReg
, const EVT
&VT
) {
353 MIRBuilder
.buildUnmerge(VRegs
, ArgsReg
);
354 setLeastSignificantFirst(VRegs
);
355 if (!assignVRegs(VRegs
, ArgLocs
, ArgLocsStartIndex
, VT
))
361 static bool isSupportedType(Type
*T
) {
362 if (T
->isIntegerTy())
364 if (T
->isPointerTy())
366 if (T
->isFloatingPointTy())
371 static CCValAssign::LocInfo
determineLocInfo(const MVT RegisterVT
, const EVT VT
,
372 const ISD::ArgFlagsTy
&Flags
) {
373 // > does not mean loss of information as type RegisterVT can't hold type VT,
374 // it means that type VT is split into multiple registers of type RegisterVT
375 if (VT
.getSizeInBits() >= RegisterVT
.getSizeInBits())
376 return CCValAssign::LocInfo::Full
;
378 return CCValAssign::LocInfo::SExt
;
380 return CCValAssign::LocInfo::ZExt
;
381 return CCValAssign::LocInfo::AExt
;
384 template <typename T
>
385 static void setLocInfo(SmallVectorImpl
<CCValAssign
> &ArgLocs
,
386 const SmallVectorImpl
<T
> &Arguments
) {
387 for (unsigned i
= 0; i
< ArgLocs
.size(); ++i
) {
388 const CCValAssign
&VA
= ArgLocs
[i
];
389 CCValAssign::LocInfo LocInfo
= determineLocInfo(
390 Arguments
[i
].VT
, Arguments
[i
].ArgVT
, Arguments
[i
].Flags
);
393 CCValAssign::getMem(VA
.getValNo(), VA
.getValVT(),
394 VA
.getLocMemOffset(), VA
.getLocVT(), LocInfo
);
396 ArgLocs
[i
] = CCValAssign::getReg(VA
.getValNo(), VA
.getValVT(),
397 VA
.getLocReg(), VA
.getLocVT(), LocInfo
);
401 bool MipsCallLowering::lowerReturn(MachineIRBuilder
&MIRBuilder
,
403 ArrayRef
<Register
> VRegs
) const {
405 MachineInstrBuilder Ret
= MIRBuilder
.buildInstrNoInsert(Mips::RetRA
);
407 if (Val
!= nullptr && !isSupportedType(Val
->getType()))
410 if (!VRegs
.empty()) {
411 MachineFunction
&MF
= MIRBuilder
.getMF();
412 const Function
&F
= MF
.getFunction();
413 const DataLayout
&DL
= MF
.getDataLayout();
414 const MipsTargetLowering
&TLI
= *getTLI
<MipsTargetLowering
>();
415 LLVMContext
&Ctx
= Val
->getType()->getContext();
417 SmallVector
<EVT
, 4> SplitEVTs
;
418 ComputeValueVTs(TLI
, DL
, Val
->getType(), SplitEVTs
);
419 assert(VRegs
.size() == SplitEVTs
.size() &&
420 "For each split Type there should be exactly one VReg.");
422 SmallVector
<ArgInfo
, 8> RetInfos
;
423 SmallVector
<unsigned, 8> OrigArgIndices
;
425 for (unsigned i
= 0; i
< SplitEVTs
.size(); ++i
) {
426 ArgInfo CurArgInfo
= ArgInfo
{VRegs
[i
], SplitEVTs
[i
].getTypeForEVT(Ctx
)};
427 setArgFlags(CurArgInfo
, AttributeList::ReturnIndex
, DL
, F
);
428 splitToValueTypes(CurArgInfo
, 0, RetInfos
, OrigArgIndices
);
431 SmallVector
<ISD::OutputArg
, 8> Outs
;
432 subTargetRegTypeForCallingConv(F
, RetInfos
, OrigArgIndices
, Outs
);
434 SmallVector
<CCValAssign
, 16> ArgLocs
;
435 MipsCCState
CCInfo(F
.getCallingConv(), F
.isVarArg(), MF
, ArgLocs
,
437 CCInfo
.AnalyzeReturn(Outs
, TLI
.CCAssignFnForReturn());
438 setLocInfo(ArgLocs
, Outs
);
440 OutgoingValueHandler
RetHandler(MIRBuilder
, MF
.getRegInfo(), Ret
);
441 if (!RetHandler
.handle(ArgLocs
, RetInfos
)) {
445 MIRBuilder
.insertInstr(Ret
);
449 bool MipsCallLowering::lowerFormalArguments(
450 MachineIRBuilder
&MIRBuilder
, const Function
&F
,
451 ArrayRef
<ArrayRef
<Register
>> VRegs
) const {
453 // Quick exit if there aren't any args.
461 for (auto &Arg
: F
.args()) {
462 if (!isSupportedType(Arg
.getType()))
466 MachineFunction
&MF
= MIRBuilder
.getMF();
467 const DataLayout
&DL
= MF
.getDataLayout();
468 const MipsTargetLowering
&TLI
= *getTLI
<MipsTargetLowering
>();
470 SmallVector
<ArgInfo
, 8> ArgInfos
;
471 SmallVector
<unsigned, 8> OrigArgIndices
;
473 for (auto &Arg
: F
.args()) {
474 ArgInfo
AInfo(VRegs
[i
], Arg
.getType());
475 setArgFlags(AInfo
, i
+ AttributeList::FirstArgIndex
, DL
, F
);
476 splitToValueTypes(AInfo
, i
, ArgInfos
, OrigArgIndices
);
480 SmallVector
<ISD::InputArg
, 8> Ins
;
481 subTargetRegTypeForCallingConv(F
, ArgInfos
, OrigArgIndices
, Ins
);
483 SmallVector
<CCValAssign
, 16> ArgLocs
;
484 MipsCCState
CCInfo(F
.getCallingConv(), F
.isVarArg(), MF
, ArgLocs
,
487 const MipsTargetMachine
&TM
=
488 static_cast<const MipsTargetMachine
&>(MF
.getTarget());
489 const MipsABIInfo
&ABI
= TM
.getABI();
490 CCInfo
.AllocateStack(ABI
.GetCalleeAllocdArgSizeInBytes(F
.getCallingConv()),
492 CCInfo
.AnalyzeFormalArguments(Ins
, TLI
.CCAssignFnForCall());
493 setLocInfo(ArgLocs
, Ins
);
495 IncomingValueHandler
Handler(MIRBuilder
, MF
.getRegInfo());
496 if (!Handler
.handle(ArgLocs
, ArgInfos
))
502 bool MipsCallLowering::lowerCall(MachineIRBuilder
&MIRBuilder
,
503 CallLoweringInfo
&Info
) const {
505 if (Info
.CallConv
!= CallingConv::C
)
508 for (auto &Arg
: Info
.OrigArgs
) {
509 if (!isSupportedType(Arg
.Ty
))
511 if (Arg
.Flags
[0].isByVal())
513 if (Arg
.Flags
[0].isSRet() && !Arg
.Ty
->isPointerTy())
517 if (!Info
.OrigRet
.Ty
->isVoidTy() && !isSupportedType(Info
.OrigRet
.Ty
))
520 MachineFunction
&MF
= MIRBuilder
.getMF();
521 const Function
&F
= MF
.getFunction();
522 const MipsTargetLowering
&TLI
= *getTLI
<MipsTargetLowering
>();
523 const MipsTargetMachine
&TM
=
524 static_cast<const MipsTargetMachine
&>(MF
.getTarget());
525 const MipsABIInfo
&ABI
= TM
.getABI();
527 MachineInstrBuilder CallSeqStart
=
528 MIRBuilder
.buildInstr(Mips::ADJCALLSTACKDOWN
);
530 const bool IsCalleeGlobalPIC
=
531 Info
.Callee
.isGlobal() && TM
.isPositionIndependent();
533 MachineInstrBuilder MIB
= MIRBuilder
.buildInstrNoInsert(
534 Info
.Callee
.isReg() || IsCalleeGlobalPIC
? Mips::JALRPseudo
: Mips::JAL
);
535 MIB
.addDef(Mips::SP
, RegState::Implicit
);
536 if (IsCalleeGlobalPIC
) {
538 MF
.getRegInfo().createGenericVirtualRegister(LLT::pointer(0, 32));
539 MachineInstr
*CalleeGlobalValue
=
540 MIRBuilder
.buildGlobalValue(CalleeReg
, Info
.Callee
.getGlobal());
541 if (!Info
.Callee
.getGlobal()->hasLocalLinkage())
542 CalleeGlobalValue
->getOperand(1).setTargetFlags(MipsII::MO_GOT_CALL
);
543 MIB
.addUse(CalleeReg
);
545 MIB
.add(Info
.Callee
);
546 const TargetRegisterInfo
*TRI
= MF
.getSubtarget().getRegisterInfo();
547 MIB
.addRegMask(TRI
->getCallPreservedMask(MF
, F
.getCallingConv()));
549 TargetLowering::ArgListTy FuncOrigArgs
;
550 FuncOrigArgs
.reserve(Info
.OrigArgs
.size());
552 SmallVector
<ArgInfo
, 8> ArgInfos
;
553 SmallVector
<unsigned, 8> OrigArgIndices
;
555 for (auto &Arg
: Info
.OrigArgs
) {
557 TargetLowering::ArgListEntry Entry
;
559 FuncOrigArgs
.push_back(Entry
);
561 splitToValueTypes(Arg
, i
, ArgInfos
, OrigArgIndices
);
565 SmallVector
<ISD::OutputArg
, 8> Outs
;
566 subTargetRegTypeForCallingConv(F
, ArgInfos
, OrigArgIndices
, Outs
);
568 SmallVector
<CCValAssign
, 8> ArgLocs
;
569 MipsCCState
CCInfo(F
.getCallingConv(), F
.isVarArg(), MF
, ArgLocs
,
572 CCInfo
.AllocateStack(ABI
.GetCalleeAllocdArgSizeInBytes(Info
.CallConv
), 1);
574 Info
.Callee
.isSymbol() ? Info
.Callee
.getSymbolName() : nullptr;
575 CCInfo
.AnalyzeCallOperands(Outs
, TLI
.CCAssignFnForCall(), FuncOrigArgs
, Call
);
576 setLocInfo(ArgLocs
, Outs
);
578 OutgoingValueHandler
RetHandler(MIRBuilder
, MF
.getRegInfo(), MIB
);
579 if (!RetHandler
.handle(ArgLocs
, ArgInfos
)) {
583 unsigned NextStackOffset
= CCInfo
.getNextStackOffset();
584 const TargetFrameLowering
*TFL
= MF
.getSubtarget().getFrameLowering();
585 unsigned StackAlignment
= TFL
->getStackAlignment();
586 NextStackOffset
= alignTo(NextStackOffset
, StackAlignment
);
587 CallSeqStart
.addImm(NextStackOffset
).addImm(0);
589 if (IsCalleeGlobalPIC
) {
590 MIRBuilder
.buildCopy(
592 MF
.getInfo
<MipsFunctionInfo
>()->getGlobalBaseRegForGlobalISel());
593 MIB
.addDef(Mips::GP
, RegState::Implicit
);
595 MIRBuilder
.insertInstr(MIB
);
596 if (MIB
->getOpcode() == Mips::JALRPseudo
) {
597 const MipsSubtarget
&STI
=
598 static_cast<const MipsSubtarget
&>(MIRBuilder
.getMF().getSubtarget());
599 MIB
.constrainAllUses(MIRBuilder
.getTII(), *STI
.getRegisterInfo(),
600 *STI
.getRegBankInfo());
603 if (!Info
.OrigRet
.Ty
->isVoidTy()) {
605 SmallVector
<unsigned, 8> OrigRetIndices
;
607 splitToValueTypes(Info
.OrigRet
, 0, ArgInfos
, OrigRetIndices
);
609 SmallVector
<ISD::InputArg
, 8> Ins
;
610 subTargetRegTypeForCallingConv(F
, ArgInfos
, OrigRetIndices
, Ins
);
612 SmallVector
<CCValAssign
, 8> ArgLocs
;
613 MipsCCState
CCInfo(F
.getCallingConv(), F
.isVarArg(), MF
, ArgLocs
,
616 CCInfo
.AnalyzeCallResult(Ins
, TLI
.CCAssignFnForReturn(), Info
.OrigRet
.Ty
, Call
);
617 setLocInfo(ArgLocs
, Ins
);
619 CallReturnHandler
Handler(MIRBuilder
, MF
.getRegInfo(), MIB
);
620 if (!Handler
.handle(ArgLocs
, ArgInfos
))
624 MIRBuilder
.buildInstr(Mips::ADJCALLSTACKUP
).addImm(NextStackOffset
).addImm(0);
629 template <typename T
>
630 void MipsCallLowering::subTargetRegTypeForCallingConv(
631 const Function
&F
, ArrayRef
<ArgInfo
> Args
,
632 ArrayRef
<unsigned> OrigArgIndices
, SmallVectorImpl
<T
> &ISDArgs
) const {
633 const DataLayout
&DL
= F
.getParent()->getDataLayout();
634 const MipsTargetLowering
&TLI
= *getTLI
<MipsTargetLowering
>();
637 for (auto &Arg
: Args
) {
639 EVT VT
= TLI
.getValueType(DL
, Arg
.Ty
);
640 MVT RegisterVT
= TLI
.getRegisterTypeForCallingConv(F
.getContext(),
641 F
.getCallingConv(), VT
);
642 unsigned NumRegs
= TLI
.getNumRegistersForCallingConv(
643 F
.getContext(), F
.getCallingConv(), VT
);
645 for (unsigned i
= 0; i
< NumRegs
; ++i
) {
646 ISD::ArgFlagsTy Flags
= Arg
.Flags
[0];
649 Flags
.setOrigAlign(TLI
.getABIAlignmentForCallingConv(Arg
.Ty
, DL
));
651 Flags
.setOrigAlign(1);
653 ISDArgs
.emplace_back(Flags
, RegisterVT
, VT
, true, OrigArgIndices
[ArgNo
],
660 void MipsCallLowering::splitToValueTypes(
661 const ArgInfo
&OrigArg
, unsigned OriginalIndex
,
662 SmallVectorImpl
<ArgInfo
> &SplitArgs
,
663 SmallVectorImpl
<unsigned> &SplitArgsOrigIndices
) const {
665 // TODO : perform structure and array split. For now we only deal with
666 // types that pass isSupportedType check.
667 SplitArgs
.push_back(OrigArg
);
668 SplitArgsOrigIndices
.push_back(OriginalIndex
);