1 //===- MipsCallLowering.cpp -------------------------------------*- C++ -*-===//
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
7 //===----------------------------------------------------------------------===//
10 /// This file implements the lowering of LLVM calls to machine code calls for
13 //===----------------------------------------------------------------------===//
15 #include "MipsCallLowering.h"
16 #include "MipsCCState.h"
17 #include "MipsTargetMachine.h"
18 #include "llvm/CodeGen/Analysis.h"
19 #include "llvm/CodeGen/GlobalISel/MachineIRBuilder.h"
23 MipsCallLowering::MipsCallLowering(const MipsTargetLowering
&TLI
)
24 : CallLowering(&TLI
) {}
26 bool MipsCallLowering::MipsHandler::assign(unsigned VReg
, const CCValAssign
&VA
,
29 assignValueToReg(VReg
, VA
, VT
);
30 } else if (VA
.isMemLoc()) {
31 assignValueToAddress(VReg
, VA
);
38 bool MipsCallLowering::MipsHandler::assignVRegs(ArrayRef
<unsigned> VRegs
,
39 ArrayRef
<CCValAssign
> ArgLocs
,
40 unsigned ArgLocsStartIndex
,
42 for (unsigned i
= 0; i
< VRegs
.size(); ++i
)
43 if (!assign(VRegs
[i
], ArgLocs
[ArgLocsStartIndex
+ i
], VT
))
48 void MipsCallLowering::MipsHandler::setLeastSignificantFirst(
49 SmallVectorImpl
<unsigned> &VRegs
) {
50 if (!MIRBuilder
.getMF().getDataLayout().isLittleEndian())
51 std::reverse(VRegs
.begin(), VRegs
.end());
54 bool MipsCallLowering::MipsHandler::handle(
55 ArrayRef
<CCValAssign
> ArgLocs
, ArrayRef
<CallLowering::ArgInfo
> Args
) {
56 SmallVector
<unsigned, 4> VRegs
;
58 const Function
&F
= MIRBuilder
.getMF().getFunction();
59 const DataLayout
&DL
= F
.getParent()->getDataLayout();
60 const MipsTargetLowering
&TLI
= *static_cast<const MipsTargetLowering
*>(
61 MIRBuilder
.getMF().getSubtarget().getTargetLowering());
63 for (unsigned ArgsIndex
= 0, ArgLocsIndex
= 0; ArgsIndex
< Args
.size();
64 ++ArgsIndex
, ArgLocsIndex
+= SplitLength
) {
65 EVT VT
= TLI
.getValueType(DL
, Args
[ArgsIndex
].Ty
);
66 SplitLength
= TLI
.getNumRegistersForCallingConv(F
.getContext(),
67 F
.getCallingConv(), VT
);
68 if (SplitLength
> 1) {
70 MVT RegisterVT
= TLI
.getRegisterTypeForCallingConv(
71 F
.getContext(), F
.getCallingConv(), VT
);
72 for (unsigned i
= 0; i
< SplitLength
; ++i
)
73 VRegs
.push_back(MRI
.createGenericVirtualRegister(LLT
{RegisterVT
}));
75 if (!handleSplit(VRegs
, ArgLocs
, ArgLocsIndex
, Args
[ArgsIndex
].Reg
, VT
))
78 if (!assign(Args
[ArgsIndex
].Reg
, ArgLocs
[ArgLocsIndex
], VT
))
86 class IncomingValueHandler
: public MipsCallLowering::MipsHandler
{
88 IncomingValueHandler(MachineIRBuilder
&MIRBuilder
, MachineRegisterInfo
&MRI
)
89 : MipsHandler(MIRBuilder
, MRI
) {}
92 void assignValueToReg(unsigned ValVReg
, const CCValAssign
&VA
,
93 const EVT
&VT
) override
;
95 unsigned getStackAddress(const CCValAssign
&VA
,
96 MachineMemOperand
*&MMO
) override
;
98 void assignValueToAddress(unsigned ValVReg
, const CCValAssign
&VA
) override
;
100 bool handleSplit(SmallVectorImpl
<unsigned> &VRegs
,
101 ArrayRef
<CCValAssign
> ArgLocs
, unsigned ArgLocsStartIndex
,
102 unsigned ArgsReg
, const EVT
&VT
) override
;
104 virtual void markPhysRegUsed(unsigned PhysReg
) {
105 MIRBuilder
.getMBB().addLiveIn(PhysReg
);
108 void buildLoad(unsigned Val
, const CCValAssign
&VA
) {
109 MachineMemOperand
*MMO
;
110 unsigned Addr
= getStackAddress(VA
, MMO
);
111 MIRBuilder
.buildLoad(Val
, Addr
, *MMO
);
115 class CallReturnHandler
: public IncomingValueHandler
{
117 CallReturnHandler(MachineIRBuilder
&MIRBuilder
, MachineRegisterInfo
&MRI
,
118 MachineInstrBuilder
&MIB
)
119 : IncomingValueHandler(MIRBuilder
, MRI
), MIB(MIB
) {}
122 void markPhysRegUsed(unsigned PhysReg
) override
{
123 MIB
.addDef(PhysReg
, RegState::Implicit
);
126 MachineInstrBuilder
&MIB
;
129 } // end anonymous namespace
131 void IncomingValueHandler::assignValueToReg(unsigned ValVReg
,
132 const CCValAssign
&VA
,
134 const MipsSubtarget
&STI
=
135 static_cast<const MipsSubtarget
&>(MIRBuilder
.getMF().getSubtarget());
136 unsigned PhysReg
= VA
.getLocReg();
137 if (VT
== MVT::f64
&& PhysReg
>= Mips::A0
&& PhysReg
<= Mips::A3
) {
138 const MipsSubtarget
&STI
=
139 static_cast<const MipsSubtarget
&>(MIRBuilder
.getMF().getSubtarget());
142 .buildInstr(STI
.isFP64bit() ? Mips::BuildPairF64_64
143 : Mips::BuildPairF64
)
145 .addUse(PhysReg
+ (STI
.isLittle() ? 0 : 1))
146 .addUse(PhysReg
+ (STI
.isLittle() ? 1 : 0))
147 .constrainAllUses(MIRBuilder
.getTII(), *STI
.getRegisterInfo(),
148 *STI
.getRegBankInfo());
149 markPhysRegUsed(PhysReg
);
150 markPhysRegUsed(PhysReg
+ 1);
151 } else if (VT
== MVT::f32
&& PhysReg
>= Mips::A0
&& PhysReg
<= Mips::A3
) {
152 MIRBuilder
.buildInstr(Mips::MTC1
)
155 .constrainAllUses(MIRBuilder
.getTII(), *STI
.getRegisterInfo(),
156 *STI
.getRegBankInfo());
157 markPhysRegUsed(PhysReg
);
159 switch (VA
.getLocInfo()) {
160 case CCValAssign::LocInfo::SExt
:
161 case CCValAssign::LocInfo::ZExt
:
162 case CCValAssign::LocInfo::AExt
: {
163 auto Copy
= MIRBuilder
.buildCopy(LLT
{VA
.getLocVT()}, PhysReg
);
164 MIRBuilder
.buildTrunc(ValVReg
, Copy
);
168 MIRBuilder
.buildCopy(ValVReg
, PhysReg
);
171 markPhysRegUsed(PhysReg
);
175 unsigned IncomingValueHandler::getStackAddress(const CCValAssign
&VA
,
176 MachineMemOperand
*&MMO
) {
177 MachineFunction
&MF
= MIRBuilder
.getMF();
178 unsigned Size
= alignTo(VA
.getValVT().getSizeInBits(), 8) / 8;
179 unsigned Offset
= VA
.getLocMemOffset();
180 MachineFrameInfo
&MFI
= MF
.getFrameInfo();
182 int FI
= MFI
.CreateFixedObject(Size
, Offset
, true);
183 MachinePointerInfo MPO
=
184 MachinePointerInfo::getFixedStack(MIRBuilder
.getMF(), FI
);
186 const TargetFrameLowering
*TFL
= MF
.getSubtarget().getFrameLowering();
187 unsigned Align
= MinAlign(TFL
->getStackAlignment(), Offset
);
188 MMO
= MF
.getMachineMemOperand(MPO
, MachineMemOperand::MOLoad
, Size
, Align
);
190 unsigned AddrReg
= MRI
.createGenericVirtualRegister(LLT::pointer(0, 32));
191 MIRBuilder
.buildFrameIndex(AddrReg
, FI
);
196 void IncomingValueHandler::assignValueToAddress(unsigned ValVReg
,
197 const CCValAssign
&VA
) {
198 if (VA
.getLocInfo() == CCValAssign::SExt
||
199 VA
.getLocInfo() == CCValAssign::ZExt
||
200 VA
.getLocInfo() == CCValAssign::AExt
) {
201 unsigned LoadReg
= MRI
.createGenericVirtualRegister(LLT::scalar(32));
202 buildLoad(LoadReg
, VA
);
203 MIRBuilder
.buildTrunc(ValVReg
, LoadReg
);
205 buildLoad(ValVReg
, VA
);
208 bool IncomingValueHandler::handleSplit(SmallVectorImpl
<unsigned> &VRegs
,
209 ArrayRef
<CCValAssign
> ArgLocs
,
210 unsigned ArgLocsStartIndex
,
211 unsigned ArgsReg
, const EVT
&VT
) {
212 if (!assignVRegs(VRegs
, ArgLocs
, ArgLocsStartIndex
, VT
))
214 setLeastSignificantFirst(VRegs
);
215 MIRBuilder
.buildMerge(ArgsReg
, VRegs
);
220 class OutgoingValueHandler
: public MipsCallLowering::MipsHandler
{
222 OutgoingValueHandler(MachineIRBuilder
&MIRBuilder
, MachineRegisterInfo
&MRI
,
223 MachineInstrBuilder
&MIB
)
224 : MipsHandler(MIRBuilder
, MRI
), MIB(MIB
) {}
227 void assignValueToReg(unsigned ValVReg
, const CCValAssign
&VA
,
228 const EVT
&VT
) override
;
230 unsigned getStackAddress(const CCValAssign
&VA
,
231 MachineMemOperand
*&MMO
) override
;
233 void assignValueToAddress(unsigned ValVReg
, const CCValAssign
&VA
) override
;
235 bool handleSplit(SmallVectorImpl
<unsigned> &VRegs
,
236 ArrayRef
<CCValAssign
> ArgLocs
, unsigned ArgLocsStartIndex
,
237 unsigned ArgsReg
, const EVT
&VT
) override
;
239 unsigned extendRegister(unsigned ValReg
, const CCValAssign
&VA
);
241 MachineInstrBuilder
&MIB
;
243 } // end anonymous namespace
245 void OutgoingValueHandler::assignValueToReg(unsigned ValVReg
,
246 const CCValAssign
&VA
,
248 unsigned PhysReg
= VA
.getLocReg();
249 const MipsSubtarget
&STI
=
250 static_cast<const MipsSubtarget
&>(MIRBuilder
.getMF().getSubtarget());
252 if (VT
== MVT::f64
&& PhysReg
>= Mips::A0
&& PhysReg
<= Mips::A3
) {
254 .buildInstr(STI
.isFP64bit() ? Mips::ExtractElementF64_64
255 : Mips::ExtractElementF64
)
256 .addDef(PhysReg
+ (STI
.isLittle() ? 1 : 0))
259 .constrainAllUses(MIRBuilder
.getTII(), *STI
.getRegisterInfo(),
260 *STI
.getRegBankInfo());
262 .buildInstr(STI
.isFP64bit() ? Mips::ExtractElementF64_64
263 : Mips::ExtractElementF64
)
264 .addDef(PhysReg
+ (STI
.isLittle() ? 0 : 1))
267 .constrainAllUses(MIRBuilder
.getTII(), *STI
.getRegisterInfo(),
268 *STI
.getRegBankInfo());
269 } else if (VT
== MVT::f32
&& PhysReg
>= Mips::A0
&& PhysReg
<= Mips::A3
) {
270 MIRBuilder
.buildInstr(Mips::MFC1
)
273 .constrainAllUses(MIRBuilder
.getTII(), *STI
.getRegisterInfo(),
274 *STI
.getRegBankInfo());
276 unsigned ExtReg
= extendRegister(ValVReg
, VA
);
277 MIRBuilder
.buildCopy(PhysReg
, ExtReg
);
278 MIB
.addUse(PhysReg
, RegState::Implicit
);
282 unsigned OutgoingValueHandler::getStackAddress(const CCValAssign
&VA
,
283 MachineMemOperand
*&MMO
) {
284 MachineFunction
&MF
= MIRBuilder
.getMF();
285 const TargetFrameLowering
*TFL
= MF
.getSubtarget().getFrameLowering();
287 LLT p0
= LLT::pointer(0, 32);
288 LLT s32
= LLT::scalar(32);
289 unsigned SPReg
= MRI
.createGenericVirtualRegister(p0
);
290 MIRBuilder
.buildCopy(SPReg
, Mips::SP
);
292 unsigned OffsetReg
= MRI
.createGenericVirtualRegister(s32
);
293 unsigned Offset
= VA
.getLocMemOffset();
294 MIRBuilder
.buildConstant(OffsetReg
, Offset
);
296 unsigned AddrReg
= MRI
.createGenericVirtualRegister(p0
);
297 MIRBuilder
.buildGEP(AddrReg
, SPReg
, OffsetReg
);
299 MachinePointerInfo MPO
=
300 MachinePointerInfo::getStack(MIRBuilder
.getMF(), Offset
);
301 unsigned Size
= alignTo(VA
.getValVT().getSizeInBits(), 8) / 8;
302 unsigned Align
= MinAlign(TFL
->getStackAlignment(), Offset
);
303 MMO
= MF
.getMachineMemOperand(MPO
, MachineMemOperand::MOStore
, Size
, Align
);
308 void OutgoingValueHandler::assignValueToAddress(unsigned ValVReg
,
309 const CCValAssign
&VA
) {
310 MachineMemOperand
*MMO
;
311 unsigned Addr
= getStackAddress(VA
, MMO
);
312 unsigned ExtReg
= extendRegister(ValVReg
, VA
);
313 MIRBuilder
.buildStore(ExtReg
, Addr
, *MMO
);
316 unsigned OutgoingValueHandler::extendRegister(unsigned ValReg
,
317 const CCValAssign
&VA
) {
318 LLT LocTy
{VA
.getLocVT()};
319 switch (VA
.getLocInfo()) {
320 case CCValAssign::SExt
: {
321 unsigned ExtReg
= MRI
.createGenericVirtualRegister(LocTy
);
322 MIRBuilder
.buildSExt(ExtReg
, ValReg
);
325 case CCValAssign::ZExt
: {
326 unsigned ExtReg
= MRI
.createGenericVirtualRegister(LocTy
);
327 MIRBuilder
.buildZExt(ExtReg
, ValReg
);
330 case CCValAssign::AExt
: {
331 unsigned ExtReg
= MRI
.createGenericVirtualRegister(LocTy
);
332 MIRBuilder
.buildAnyExt(ExtReg
, ValReg
);
335 // TODO : handle upper extends
336 case CCValAssign::Full
:
341 llvm_unreachable("unable to extend register");
344 bool OutgoingValueHandler::handleSplit(SmallVectorImpl
<unsigned> &VRegs
,
345 ArrayRef
<CCValAssign
> ArgLocs
,
346 unsigned ArgLocsStartIndex
,
347 unsigned ArgsReg
, const EVT
&VT
) {
348 MIRBuilder
.buildUnmerge(VRegs
, ArgsReg
);
349 setLeastSignificantFirst(VRegs
);
350 if (!assignVRegs(VRegs
, ArgLocs
, ArgLocsStartIndex
, VT
))
356 static bool isSupportedType(Type
*T
) {
357 if (T
->isIntegerTy())
359 if (T
->isPointerTy())
361 if (T
->isFloatingPointTy())
366 static CCValAssign::LocInfo
determineLocInfo(const MVT RegisterVT
, const EVT VT
,
367 const ISD::ArgFlagsTy
&Flags
) {
368 // > does not mean loss of information as type RegisterVT can't hold type VT,
369 // it means that type VT is split into multiple registers of type RegisterVT
370 if (VT
.getSizeInBits() >= RegisterVT
.getSizeInBits())
371 return CCValAssign::LocInfo::Full
;
373 return CCValAssign::LocInfo::SExt
;
375 return CCValAssign::LocInfo::ZExt
;
376 return CCValAssign::LocInfo::AExt
;
379 template <typename T
>
380 static void setLocInfo(SmallVectorImpl
<CCValAssign
> &ArgLocs
,
381 const SmallVectorImpl
<T
> &Arguments
) {
382 for (unsigned i
= 0; i
< ArgLocs
.size(); ++i
) {
383 const CCValAssign
&VA
= ArgLocs
[i
];
384 CCValAssign::LocInfo LocInfo
= determineLocInfo(
385 Arguments
[i
].VT
, Arguments
[i
].ArgVT
, Arguments
[i
].Flags
);
388 CCValAssign::getMem(VA
.getValNo(), VA
.getValVT(),
389 VA
.getLocMemOffset(), VA
.getLocVT(), LocInfo
);
391 ArgLocs
[i
] = CCValAssign::getReg(VA
.getValNo(), VA
.getValVT(),
392 VA
.getLocReg(), VA
.getLocVT(), LocInfo
);
396 bool MipsCallLowering::lowerReturn(MachineIRBuilder
&MIRBuilder
,
398 ArrayRef
<unsigned> VRegs
) const {
400 MachineInstrBuilder Ret
= MIRBuilder
.buildInstrNoInsert(Mips::RetRA
);
402 if (Val
!= nullptr && !isSupportedType(Val
->getType()))
405 if (!VRegs
.empty()) {
406 MachineFunction
&MF
= MIRBuilder
.getMF();
407 const Function
&F
= MF
.getFunction();
408 const DataLayout
&DL
= MF
.getDataLayout();
409 const MipsTargetLowering
&TLI
= *getTLI
<MipsTargetLowering
>();
410 LLVMContext
&Ctx
= Val
->getType()->getContext();
412 SmallVector
<EVT
, 4> SplitEVTs
;
413 ComputeValueVTs(TLI
, DL
, Val
->getType(), SplitEVTs
);
414 assert(VRegs
.size() == SplitEVTs
.size() &&
415 "For each split Type there should be exactly one VReg.");
417 SmallVector
<ArgInfo
, 8> RetInfos
;
418 SmallVector
<unsigned, 8> OrigArgIndices
;
420 for (unsigned i
= 0; i
< SplitEVTs
.size(); ++i
) {
421 ArgInfo CurArgInfo
= ArgInfo
{VRegs
[i
], SplitEVTs
[i
].getTypeForEVT(Ctx
)};
422 setArgFlags(CurArgInfo
, AttributeList::ReturnIndex
, DL
, F
);
423 splitToValueTypes(CurArgInfo
, 0, RetInfos
, OrigArgIndices
);
426 SmallVector
<ISD::OutputArg
, 8> Outs
;
427 subTargetRegTypeForCallingConv(F
, RetInfos
, OrigArgIndices
, Outs
);
429 SmallVector
<CCValAssign
, 16> ArgLocs
;
430 MipsCCState
CCInfo(F
.getCallingConv(), F
.isVarArg(), MF
, ArgLocs
,
432 CCInfo
.AnalyzeReturn(Outs
, TLI
.CCAssignFnForReturn());
433 setLocInfo(ArgLocs
, Outs
);
435 OutgoingValueHandler
RetHandler(MIRBuilder
, MF
.getRegInfo(), Ret
);
436 if (!RetHandler
.handle(ArgLocs
, RetInfos
)) {
440 MIRBuilder
.insertInstr(Ret
);
444 bool MipsCallLowering::lowerFormalArguments(MachineIRBuilder
&MIRBuilder
,
446 ArrayRef
<unsigned> VRegs
) const {
448 // Quick exit if there aren't any args.
456 for (auto &Arg
: F
.args()) {
457 if (!isSupportedType(Arg
.getType()))
461 MachineFunction
&MF
= MIRBuilder
.getMF();
462 const DataLayout
&DL
= MF
.getDataLayout();
463 const MipsTargetLowering
&TLI
= *getTLI
<MipsTargetLowering
>();
465 SmallVector
<ArgInfo
, 8> ArgInfos
;
466 SmallVector
<unsigned, 8> OrigArgIndices
;
468 for (auto &Arg
: F
.args()) {
469 ArgInfo
AInfo(VRegs
[i
], Arg
.getType());
470 setArgFlags(AInfo
, i
+ AttributeList::FirstArgIndex
, DL
, F
);
471 splitToValueTypes(AInfo
, i
, ArgInfos
, OrigArgIndices
);
475 SmallVector
<ISD::InputArg
, 8> Ins
;
476 subTargetRegTypeForCallingConv(F
, ArgInfos
, OrigArgIndices
, Ins
);
478 SmallVector
<CCValAssign
, 16> ArgLocs
;
479 MipsCCState
CCInfo(F
.getCallingConv(), F
.isVarArg(), MF
, ArgLocs
,
482 const MipsTargetMachine
&TM
=
483 static_cast<const MipsTargetMachine
&>(MF
.getTarget());
484 const MipsABIInfo
&ABI
= TM
.getABI();
485 CCInfo
.AllocateStack(ABI
.GetCalleeAllocdArgSizeInBytes(F
.getCallingConv()),
487 CCInfo
.AnalyzeFormalArguments(Ins
, TLI
.CCAssignFnForCall());
488 setLocInfo(ArgLocs
, Ins
);
490 IncomingValueHandler
Handler(MIRBuilder
, MF
.getRegInfo());
491 if (!Handler
.handle(ArgLocs
, ArgInfos
))
497 bool MipsCallLowering::lowerCall(MachineIRBuilder
&MIRBuilder
,
498 CallingConv::ID CallConv
,
499 const MachineOperand
&Callee
,
500 const ArgInfo
&OrigRet
,
501 ArrayRef
<ArgInfo
> OrigArgs
) const {
503 if (CallConv
!= CallingConv::C
)
506 for (auto &Arg
: OrigArgs
) {
507 if (!isSupportedType(Arg
.Ty
))
509 if (Arg
.Flags
.isByVal() || Arg
.Flags
.isSRet())
512 if (OrigRet
.Reg
&& !isSupportedType(OrigRet
.Ty
))
515 MachineFunction
&MF
= MIRBuilder
.getMF();
516 const Function
&F
= MF
.getFunction();
517 const MipsTargetLowering
&TLI
= *getTLI
<MipsTargetLowering
>();
518 const MipsTargetMachine
&TM
=
519 static_cast<const MipsTargetMachine
&>(MF
.getTarget());
520 const MipsABIInfo
&ABI
= TM
.getABI();
522 MachineInstrBuilder CallSeqStart
=
523 MIRBuilder
.buildInstr(Mips::ADJCALLSTACKDOWN
);
525 MachineInstrBuilder MIB
= MIRBuilder
.buildInstrNoInsert(
526 Callee
.isReg() ? Mips::JALRPseudo
: Mips::JAL
);
527 MIB
.addDef(Mips::SP
, RegState::Implicit
);
529 const TargetRegisterInfo
*TRI
= MF
.getSubtarget().getRegisterInfo();
530 MIB
.addRegMask(TRI
->getCallPreservedMask(MF
, F
.getCallingConv()));
532 TargetLowering::ArgListTy FuncOrigArgs
;
533 FuncOrigArgs
.reserve(OrigArgs
.size());
535 SmallVector
<ArgInfo
, 8> ArgInfos
;
536 SmallVector
<unsigned, 8> OrigArgIndices
;
538 for (auto &Arg
: OrigArgs
) {
540 TargetLowering::ArgListEntry Entry
;
542 FuncOrigArgs
.push_back(Entry
);
544 splitToValueTypes(Arg
, i
, ArgInfos
, OrigArgIndices
);
548 SmallVector
<ISD::OutputArg
, 8> Outs
;
549 subTargetRegTypeForCallingConv(F
, ArgInfos
, OrigArgIndices
, Outs
);
551 SmallVector
<CCValAssign
, 8> ArgLocs
;
552 MipsCCState
CCInfo(F
.getCallingConv(), F
.isVarArg(), MF
, ArgLocs
,
555 CCInfo
.AllocateStack(ABI
.GetCalleeAllocdArgSizeInBytes(CallConv
), 1);
556 const char *Call
= Callee
.isSymbol() ? Callee
.getSymbolName() : nullptr;
557 CCInfo
.AnalyzeCallOperands(Outs
, TLI
.CCAssignFnForCall(), FuncOrigArgs
, Call
);
558 setLocInfo(ArgLocs
, Outs
);
560 OutgoingValueHandler
RetHandler(MIRBuilder
, MF
.getRegInfo(), MIB
);
561 if (!RetHandler
.handle(ArgLocs
, ArgInfos
)) {
565 unsigned NextStackOffset
= CCInfo
.getNextStackOffset();
566 const TargetFrameLowering
*TFL
= MF
.getSubtarget().getFrameLowering();
567 unsigned StackAlignment
= TFL
->getStackAlignment();
568 NextStackOffset
= alignTo(NextStackOffset
, StackAlignment
);
569 CallSeqStart
.addImm(NextStackOffset
).addImm(0);
571 MIRBuilder
.insertInstr(MIB
);
572 if (MIB
->getOpcode() == Mips::JALRPseudo
) {
573 const MipsSubtarget
&STI
=
574 static_cast<const MipsSubtarget
&>(MIRBuilder
.getMF().getSubtarget());
575 MIB
.constrainAllUses(MIRBuilder
.getTII(), *STI
.getRegisterInfo(),
576 *STI
.getRegBankInfo());
582 SmallVector
<unsigned, 8> OrigRetIndices
;
584 splitToValueTypes(OrigRet
, 0, ArgInfos
, OrigRetIndices
);
586 SmallVector
<ISD::InputArg
, 8> Ins
;
587 subTargetRegTypeForCallingConv(F
, ArgInfos
, OrigRetIndices
, Ins
);
589 SmallVector
<CCValAssign
, 8> ArgLocs
;
590 MipsCCState
CCInfo(F
.getCallingConv(), F
.isVarArg(), MF
, ArgLocs
,
593 CCInfo
.AnalyzeCallResult(Ins
, TLI
.CCAssignFnForReturn(), OrigRet
.Ty
, Call
);
594 setLocInfo(ArgLocs
, Ins
);
596 CallReturnHandler
Handler(MIRBuilder
, MF
.getRegInfo(), MIB
);
597 if (!Handler
.handle(ArgLocs
, ArgInfos
))
601 MIRBuilder
.buildInstr(Mips::ADJCALLSTACKUP
).addImm(NextStackOffset
).addImm(0);
606 template <typename T
>
607 void MipsCallLowering::subTargetRegTypeForCallingConv(
608 const Function
&F
, ArrayRef
<ArgInfo
> Args
,
609 ArrayRef
<unsigned> OrigArgIndices
, SmallVectorImpl
<T
> &ISDArgs
) const {
610 const DataLayout
&DL
= F
.getParent()->getDataLayout();
611 const MipsTargetLowering
&TLI
= *getTLI
<MipsTargetLowering
>();
614 for (auto &Arg
: Args
) {
616 EVT VT
= TLI
.getValueType(DL
, Arg
.Ty
);
617 MVT RegisterVT
= TLI
.getRegisterTypeForCallingConv(F
.getContext(),
618 F
.getCallingConv(), VT
);
619 unsigned NumRegs
= TLI
.getNumRegistersForCallingConv(
620 F
.getContext(), F
.getCallingConv(), VT
);
622 for (unsigned i
= 0; i
< NumRegs
; ++i
) {
623 ISD::ArgFlagsTy Flags
= Arg
.Flags
;
626 Flags
.setOrigAlign(TLI
.getABIAlignmentForCallingConv(Arg
.Ty
, DL
));
628 Flags
.setOrigAlign(1);
630 ISDArgs
.emplace_back(Flags
, RegisterVT
, VT
, true, OrigArgIndices
[ArgNo
],
637 void MipsCallLowering::splitToValueTypes(
638 const ArgInfo
&OrigArg
, unsigned OriginalIndex
,
639 SmallVectorImpl
<ArgInfo
> &SplitArgs
,
640 SmallVectorImpl
<unsigned> &SplitArgsOrigIndices
) const {
642 // TODO : perform structure and array split. For now we only deal with
643 // types that pass isSupportedType check.
644 SplitArgs
.push_back(OrigArg
);
645 SplitArgsOrigIndices
.push_back(OriginalIndex
);