1 //===- MipsCallLowering.cpp -------------------------------------*- C++ -*-===//
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
7 //===----------------------------------------------------------------------===//
10 /// This file implements the lowering of LLVM calls to machine code calls for
13 //===----------------------------------------------------------------------===//
15 #include "MipsCallLowering.h"
16 #include "MipsCCState.h"
17 #include "MipsMachineFunction.h"
18 #include "MipsTargetMachine.h"
19 #include "llvm/CodeGen/Analysis.h"
20 #include "llvm/CodeGen/GlobalISel/MachineIRBuilder.h"
24 MipsCallLowering::MipsCallLowering(const MipsTargetLowering
&TLI
)
25 : CallLowering(&TLI
) {}
27 bool MipsCallLowering::MipsHandler::assign(Register VReg
, const CCValAssign
&VA
,
30 assignValueToReg(VReg
, VA
, VT
);
31 } else if (VA
.isMemLoc()) {
32 assignValueToAddress(VReg
, VA
);
39 bool MipsCallLowering::MipsHandler::assignVRegs(ArrayRef
<Register
> VRegs
,
40 ArrayRef
<CCValAssign
> ArgLocs
,
41 unsigned ArgLocsStartIndex
,
43 for (unsigned i
= 0; i
< VRegs
.size(); ++i
)
44 if (!assign(VRegs
[i
], ArgLocs
[ArgLocsStartIndex
+ i
], VT
))
49 void MipsCallLowering::MipsHandler::setLeastSignificantFirst(
50 SmallVectorImpl
<Register
> &VRegs
) {
51 if (!MIRBuilder
.getMF().getDataLayout().isLittleEndian())
52 std::reverse(VRegs
.begin(), VRegs
.end());
55 bool MipsCallLowering::MipsHandler::handle(
56 ArrayRef
<CCValAssign
> ArgLocs
, ArrayRef
<CallLowering::ArgInfo
> Args
) {
57 SmallVector
<Register
, 4> VRegs
;
59 const Function
&F
= MIRBuilder
.getMF().getFunction();
60 const DataLayout
&DL
= F
.getParent()->getDataLayout();
61 const MipsTargetLowering
&TLI
= *static_cast<const MipsTargetLowering
*>(
62 MIRBuilder
.getMF().getSubtarget().getTargetLowering());
64 for (unsigned ArgsIndex
= 0, ArgLocsIndex
= 0; ArgsIndex
< Args
.size();
65 ++ArgsIndex
, ArgLocsIndex
+= SplitLength
) {
66 EVT VT
= TLI
.getValueType(DL
, Args
[ArgsIndex
].Ty
);
67 SplitLength
= TLI
.getNumRegistersForCallingConv(F
.getContext(),
68 F
.getCallingConv(), VT
);
69 assert(Args
[ArgsIndex
].Regs
.size() == 1 && "Can't handle multple regs yet");
71 if (SplitLength
> 1) {
73 MVT RegisterVT
= TLI
.getRegisterTypeForCallingConv(
74 F
.getContext(), F
.getCallingConv(), VT
);
75 for (unsigned i
= 0; i
< SplitLength
; ++i
)
76 VRegs
.push_back(MRI
.createGenericVirtualRegister(LLT
{RegisterVT
}));
78 if (!handleSplit(VRegs
, ArgLocs
, ArgLocsIndex
, Args
[ArgsIndex
].Regs
[0],
82 if (!assign(Args
[ArgsIndex
].Regs
[0], ArgLocs
[ArgLocsIndex
], VT
))
90 class IncomingValueHandler
: public MipsCallLowering::MipsHandler
{
92 IncomingValueHandler(MachineIRBuilder
&MIRBuilder
, MachineRegisterInfo
&MRI
)
93 : MipsHandler(MIRBuilder
, MRI
) {}
96 void assignValueToReg(Register ValVReg
, const CCValAssign
&VA
,
97 const EVT
&VT
) override
;
99 Register
getStackAddress(const CCValAssign
&VA
,
100 MachineMemOperand
*&MMO
) override
;
102 void assignValueToAddress(Register ValVReg
, const CCValAssign
&VA
) override
;
104 bool handleSplit(SmallVectorImpl
<Register
> &VRegs
,
105 ArrayRef
<CCValAssign
> ArgLocs
, unsigned ArgLocsStartIndex
,
106 Register ArgsReg
, const EVT
&VT
) override
;
108 virtual void markPhysRegUsed(unsigned PhysReg
) {
109 MIRBuilder
.getMBB().addLiveIn(PhysReg
);
112 void buildLoad(Register Val
, const CCValAssign
&VA
) {
113 MachineMemOperand
*MMO
;
114 Register Addr
= getStackAddress(VA
, MMO
);
115 MIRBuilder
.buildLoad(Val
, Addr
, *MMO
);
119 class CallReturnHandler
: public IncomingValueHandler
{
121 CallReturnHandler(MachineIRBuilder
&MIRBuilder
, MachineRegisterInfo
&MRI
,
122 MachineInstrBuilder
&MIB
)
123 : IncomingValueHandler(MIRBuilder
, MRI
), MIB(MIB
) {}
126 void markPhysRegUsed(unsigned PhysReg
) override
{
127 MIB
.addDef(PhysReg
, RegState::Implicit
);
130 MachineInstrBuilder
&MIB
;
133 } // end anonymous namespace
135 void IncomingValueHandler::assignValueToReg(Register ValVReg
,
136 const CCValAssign
&VA
,
138 const MipsSubtarget
&STI
=
139 static_cast<const MipsSubtarget
&>(MIRBuilder
.getMF().getSubtarget());
140 Register PhysReg
= VA
.getLocReg();
141 if (VT
== MVT::f64
&& PhysReg
>= Mips::A0
&& PhysReg
<= Mips::A3
) {
142 const MipsSubtarget
&STI
=
143 static_cast<const MipsSubtarget
&>(MIRBuilder
.getMF().getSubtarget());
146 .buildInstr(STI
.isFP64bit() ? Mips::BuildPairF64_64
147 : Mips::BuildPairF64
)
149 .addUse(PhysReg
+ (STI
.isLittle() ? 0 : 1))
150 .addUse(PhysReg
+ (STI
.isLittle() ? 1 : 0))
151 .constrainAllUses(MIRBuilder
.getTII(), *STI
.getRegisterInfo(),
152 *STI
.getRegBankInfo());
153 markPhysRegUsed(PhysReg
);
154 markPhysRegUsed(PhysReg
+ 1);
155 } else if (VT
== MVT::f32
&& PhysReg
>= Mips::A0
&& PhysReg
<= Mips::A3
) {
156 MIRBuilder
.buildInstr(Mips::MTC1
)
159 .constrainAllUses(MIRBuilder
.getTII(), *STI
.getRegisterInfo(),
160 *STI
.getRegBankInfo());
161 markPhysRegUsed(PhysReg
);
163 switch (VA
.getLocInfo()) {
164 case CCValAssign::LocInfo::SExt
:
165 case CCValAssign::LocInfo::ZExt
:
166 case CCValAssign::LocInfo::AExt
: {
167 auto Copy
= MIRBuilder
.buildCopy(LLT
{VA
.getLocVT()}, PhysReg
);
168 MIRBuilder
.buildTrunc(ValVReg
, Copy
);
172 MIRBuilder
.buildCopy(ValVReg
, PhysReg
);
175 markPhysRegUsed(PhysReg
);
179 Register
IncomingValueHandler::getStackAddress(const CCValAssign
&VA
,
180 MachineMemOperand
*&MMO
) {
181 MachineFunction
&MF
= MIRBuilder
.getMF();
182 unsigned Size
= alignTo(VA
.getValVT().getSizeInBits(), 8) / 8;
183 unsigned Offset
= VA
.getLocMemOffset();
184 MachineFrameInfo
&MFI
= MF
.getFrameInfo();
186 int FI
= MFI
.CreateFixedObject(Size
, Offset
, true);
187 MachinePointerInfo MPO
=
188 MachinePointerInfo::getFixedStack(MIRBuilder
.getMF(), FI
);
190 const TargetFrameLowering
*TFL
= MF
.getSubtarget().getFrameLowering();
191 unsigned Align
= MinAlign(TFL
->getStackAlignment(), Offset
);
192 MMO
= MF
.getMachineMemOperand(MPO
, MachineMemOperand::MOLoad
, Size
, Align
);
194 Register AddrReg
= MRI
.createGenericVirtualRegister(LLT::pointer(0, 32));
195 MIRBuilder
.buildFrameIndex(AddrReg
, FI
);
200 void IncomingValueHandler::assignValueToAddress(Register ValVReg
,
201 const CCValAssign
&VA
) {
202 if (VA
.getLocInfo() == CCValAssign::SExt
||
203 VA
.getLocInfo() == CCValAssign::ZExt
||
204 VA
.getLocInfo() == CCValAssign::AExt
) {
205 Register LoadReg
= MRI
.createGenericVirtualRegister(LLT::scalar(32));
206 buildLoad(LoadReg
, VA
);
207 MIRBuilder
.buildTrunc(ValVReg
, LoadReg
);
209 buildLoad(ValVReg
, VA
);
212 bool IncomingValueHandler::handleSplit(SmallVectorImpl
<Register
> &VRegs
,
213 ArrayRef
<CCValAssign
> ArgLocs
,
214 unsigned ArgLocsStartIndex
,
215 Register ArgsReg
, const EVT
&VT
) {
216 if (!assignVRegs(VRegs
, ArgLocs
, ArgLocsStartIndex
, VT
))
218 setLeastSignificantFirst(VRegs
);
219 MIRBuilder
.buildMerge(ArgsReg
, VRegs
);
224 class OutgoingValueHandler
: public MipsCallLowering::MipsHandler
{
226 OutgoingValueHandler(MachineIRBuilder
&MIRBuilder
, MachineRegisterInfo
&MRI
,
227 MachineInstrBuilder
&MIB
)
228 : MipsHandler(MIRBuilder
, MRI
), MIB(MIB
) {}
231 void assignValueToReg(Register ValVReg
, const CCValAssign
&VA
,
232 const EVT
&VT
) override
;
234 Register
getStackAddress(const CCValAssign
&VA
,
235 MachineMemOperand
*&MMO
) override
;
237 void assignValueToAddress(Register ValVReg
, const CCValAssign
&VA
) override
;
239 bool handleSplit(SmallVectorImpl
<Register
> &VRegs
,
240 ArrayRef
<CCValAssign
> ArgLocs
, unsigned ArgLocsStartIndex
,
241 Register ArgsReg
, const EVT
&VT
) override
;
243 Register
extendRegister(Register ValReg
, const CCValAssign
&VA
);
245 MachineInstrBuilder
&MIB
;
247 } // end anonymous namespace
249 void OutgoingValueHandler::assignValueToReg(Register ValVReg
,
250 const CCValAssign
&VA
,
252 Register PhysReg
= VA
.getLocReg();
253 const MipsSubtarget
&STI
=
254 static_cast<const MipsSubtarget
&>(MIRBuilder
.getMF().getSubtarget());
256 if (VT
== MVT::f64
&& PhysReg
>= Mips::A0
&& PhysReg
<= Mips::A3
) {
258 .buildInstr(STI
.isFP64bit() ? Mips::ExtractElementF64_64
259 : Mips::ExtractElementF64
)
260 .addDef(PhysReg
+ (STI
.isLittle() ? 1 : 0))
263 .constrainAllUses(MIRBuilder
.getTII(), *STI
.getRegisterInfo(),
264 *STI
.getRegBankInfo());
266 .buildInstr(STI
.isFP64bit() ? Mips::ExtractElementF64_64
267 : Mips::ExtractElementF64
)
268 .addDef(PhysReg
+ (STI
.isLittle() ? 0 : 1))
271 .constrainAllUses(MIRBuilder
.getTII(), *STI
.getRegisterInfo(),
272 *STI
.getRegBankInfo());
273 } else if (VT
== MVT::f32
&& PhysReg
>= Mips::A0
&& PhysReg
<= Mips::A3
) {
274 MIRBuilder
.buildInstr(Mips::MFC1
)
277 .constrainAllUses(MIRBuilder
.getTII(), *STI
.getRegisterInfo(),
278 *STI
.getRegBankInfo());
280 Register ExtReg
= extendRegister(ValVReg
, VA
);
281 MIRBuilder
.buildCopy(PhysReg
, ExtReg
);
282 MIB
.addUse(PhysReg
, RegState::Implicit
);
286 Register
OutgoingValueHandler::getStackAddress(const CCValAssign
&VA
,
287 MachineMemOperand
*&MMO
) {
288 MachineFunction
&MF
= MIRBuilder
.getMF();
289 const TargetFrameLowering
*TFL
= MF
.getSubtarget().getFrameLowering();
291 LLT p0
= LLT::pointer(0, 32);
292 LLT s32
= LLT::scalar(32);
293 Register SPReg
= MRI
.createGenericVirtualRegister(p0
);
294 MIRBuilder
.buildCopy(SPReg
, Register(Mips::SP
));
296 Register OffsetReg
= MRI
.createGenericVirtualRegister(s32
);
297 unsigned Offset
= VA
.getLocMemOffset();
298 MIRBuilder
.buildConstant(OffsetReg
, Offset
);
300 Register AddrReg
= MRI
.createGenericVirtualRegister(p0
);
301 MIRBuilder
.buildGEP(AddrReg
, SPReg
, OffsetReg
);
303 MachinePointerInfo MPO
=
304 MachinePointerInfo::getStack(MIRBuilder
.getMF(), Offset
);
305 unsigned Size
= alignTo(VA
.getValVT().getSizeInBits(), 8) / 8;
306 unsigned Align
= MinAlign(TFL
->getStackAlignment(), Offset
);
307 MMO
= MF
.getMachineMemOperand(MPO
, MachineMemOperand::MOStore
, Size
, Align
);
312 void OutgoingValueHandler::assignValueToAddress(Register ValVReg
,
313 const CCValAssign
&VA
) {
314 MachineMemOperand
*MMO
;
315 Register Addr
= getStackAddress(VA
, MMO
);
316 Register ExtReg
= extendRegister(ValVReg
, VA
);
317 MIRBuilder
.buildStore(ExtReg
, Addr
, *MMO
);
320 Register
OutgoingValueHandler::extendRegister(Register ValReg
,
321 const CCValAssign
&VA
) {
322 LLT LocTy
{VA
.getLocVT()};
323 switch (VA
.getLocInfo()) {
324 case CCValAssign::SExt
: {
325 Register ExtReg
= MRI
.createGenericVirtualRegister(LocTy
);
326 MIRBuilder
.buildSExt(ExtReg
, ValReg
);
329 case CCValAssign::ZExt
: {
330 Register ExtReg
= MRI
.createGenericVirtualRegister(LocTy
);
331 MIRBuilder
.buildZExt(ExtReg
, ValReg
);
334 case CCValAssign::AExt
: {
335 Register ExtReg
= MRI
.createGenericVirtualRegister(LocTy
);
336 MIRBuilder
.buildAnyExt(ExtReg
, ValReg
);
339 // TODO : handle upper extends
340 case CCValAssign::Full
:
345 llvm_unreachable("unable to extend register");
348 bool OutgoingValueHandler::handleSplit(SmallVectorImpl
<Register
> &VRegs
,
349 ArrayRef
<CCValAssign
> ArgLocs
,
350 unsigned ArgLocsStartIndex
,
351 Register ArgsReg
, const EVT
&VT
) {
352 MIRBuilder
.buildUnmerge(VRegs
, ArgsReg
);
353 setLeastSignificantFirst(VRegs
);
354 if (!assignVRegs(VRegs
, ArgLocs
, ArgLocsStartIndex
, VT
))
360 static bool isSupportedType(Type
*T
) {
361 if (T
->isIntegerTy())
363 if (T
->isPointerTy())
365 if (T
->isFloatingPointTy())
370 static CCValAssign::LocInfo
determineLocInfo(const MVT RegisterVT
, const EVT VT
,
371 const ISD::ArgFlagsTy
&Flags
) {
372 // > does not mean loss of information as type RegisterVT can't hold type VT,
373 // it means that type VT is split into multiple registers of type RegisterVT
374 if (VT
.getSizeInBits() >= RegisterVT
.getSizeInBits())
375 return CCValAssign::LocInfo::Full
;
377 return CCValAssign::LocInfo::SExt
;
379 return CCValAssign::LocInfo::ZExt
;
380 return CCValAssign::LocInfo::AExt
;
383 template <typename T
>
384 static void setLocInfo(SmallVectorImpl
<CCValAssign
> &ArgLocs
,
385 const SmallVectorImpl
<T
> &Arguments
) {
386 for (unsigned i
= 0; i
< ArgLocs
.size(); ++i
) {
387 const CCValAssign
&VA
= ArgLocs
[i
];
388 CCValAssign::LocInfo LocInfo
= determineLocInfo(
389 Arguments
[i
].VT
, Arguments
[i
].ArgVT
, Arguments
[i
].Flags
);
392 CCValAssign::getMem(VA
.getValNo(), VA
.getValVT(),
393 VA
.getLocMemOffset(), VA
.getLocVT(), LocInfo
);
395 ArgLocs
[i
] = CCValAssign::getReg(VA
.getValNo(), VA
.getValVT(),
396 VA
.getLocReg(), VA
.getLocVT(), LocInfo
);
400 bool MipsCallLowering::lowerReturn(MachineIRBuilder
&MIRBuilder
,
402 ArrayRef
<Register
> VRegs
) const {
404 MachineInstrBuilder Ret
= MIRBuilder
.buildInstrNoInsert(Mips::RetRA
);
406 if (Val
!= nullptr && !isSupportedType(Val
->getType()))
409 if (!VRegs
.empty()) {
410 MachineFunction
&MF
= MIRBuilder
.getMF();
411 const Function
&F
= MF
.getFunction();
412 const DataLayout
&DL
= MF
.getDataLayout();
413 const MipsTargetLowering
&TLI
= *getTLI
<MipsTargetLowering
>();
414 LLVMContext
&Ctx
= Val
->getType()->getContext();
416 SmallVector
<EVT
, 4> SplitEVTs
;
417 ComputeValueVTs(TLI
, DL
, Val
->getType(), SplitEVTs
);
418 assert(VRegs
.size() == SplitEVTs
.size() &&
419 "For each split Type there should be exactly one VReg.");
421 SmallVector
<ArgInfo
, 8> RetInfos
;
422 SmallVector
<unsigned, 8> OrigArgIndices
;
424 for (unsigned i
= 0; i
< SplitEVTs
.size(); ++i
) {
425 ArgInfo CurArgInfo
= ArgInfo
{VRegs
[i
], SplitEVTs
[i
].getTypeForEVT(Ctx
)};
426 setArgFlags(CurArgInfo
, AttributeList::ReturnIndex
, DL
, F
);
427 splitToValueTypes(CurArgInfo
, 0, RetInfos
, OrigArgIndices
);
430 SmallVector
<ISD::OutputArg
, 8> Outs
;
431 subTargetRegTypeForCallingConv(F
, RetInfos
, OrigArgIndices
, Outs
);
433 SmallVector
<CCValAssign
, 16> ArgLocs
;
434 MipsCCState
CCInfo(F
.getCallingConv(), F
.isVarArg(), MF
, ArgLocs
,
436 CCInfo
.AnalyzeReturn(Outs
, TLI
.CCAssignFnForReturn());
437 setLocInfo(ArgLocs
, Outs
);
439 OutgoingValueHandler
RetHandler(MIRBuilder
, MF
.getRegInfo(), Ret
);
440 if (!RetHandler
.handle(ArgLocs
, RetInfos
)) {
444 MIRBuilder
.insertInstr(Ret
);
448 bool MipsCallLowering::lowerFormalArguments(
449 MachineIRBuilder
&MIRBuilder
, const Function
&F
,
450 ArrayRef
<ArrayRef
<Register
>> VRegs
) const {
452 // Quick exit if there aren't any args.
460 for (auto &Arg
: F
.args()) {
461 if (!isSupportedType(Arg
.getType()))
465 MachineFunction
&MF
= MIRBuilder
.getMF();
466 const DataLayout
&DL
= MF
.getDataLayout();
467 const MipsTargetLowering
&TLI
= *getTLI
<MipsTargetLowering
>();
469 SmallVector
<ArgInfo
, 8> ArgInfos
;
470 SmallVector
<unsigned, 8> OrigArgIndices
;
472 for (auto &Arg
: F
.args()) {
473 ArgInfo
AInfo(VRegs
[i
], Arg
.getType());
474 setArgFlags(AInfo
, i
+ AttributeList::FirstArgIndex
, DL
, F
);
475 splitToValueTypes(AInfo
, i
, ArgInfos
, OrigArgIndices
);
479 SmallVector
<ISD::InputArg
, 8> Ins
;
480 subTargetRegTypeForCallingConv(F
, ArgInfos
, OrigArgIndices
, Ins
);
482 SmallVector
<CCValAssign
, 16> ArgLocs
;
483 MipsCCState
CCInfo(F
.getCallingConv(), F
.isVarArg(), MF
, ArgLocs
,
486 const MipsTargetMachine
&TM
=
487 static_cast<const MipsTargetMachine
&>(MF
.getTarget());
488 const MipsABIInfo
&ABI
= TM
.getABI();
489 CCInfo
.AllocateStack(ABI
.GetCalleeAllocdArgSizeInBytes(F
.getCallingConv()),
491 CCInfo
.AnalyzeFormalArguments(Ins
, TLI
.CCAssignFnForCall());
492 setLocInfo(ArgLocs
, Ins
);
494 IncomingValueHandler
Handler(MIRBuilder
, MF
.getRegInfo());
495 if (!Handler
.handle(ArgLocs
, ArgInfos
))
501 bool MipsCallLowering::lowerCall(MachineIRBuilder
&MIRBuilder
,
502 CallingConv::ID CallConv
,
503 const MachineOperand
&Callee
,
504 const ArgInfo
&OrigRet
,
505 ArrayRef
<ArgInfo
> OrigArgs
) const {
507 if (CallConv
!= CallingConv::C
)
510 for (auto &Arg
: OrigArgs
) {
511 if (!isSupportedType(Arg
.Ty
))
513 if (Arg
.Flags
.isByVal() || Arg
.Flags
.isSRet())
517 if (!OrigRet
.Ty
->isVoidTy() && !isSupportedType(OrigRet
.Ty
))
520 MachineFunction
&MF
= MIRBuilder
.getMF();
521 const Function
&F
= MF
.getFunction();
522 const MipsTargetLowering
&TLI
= *getTLI
<MipsTargetLowering
>();
523 const MipsTargetMachine
&TM
=
524 static_cast<const MipsTargetMachine
&>(MF
.getTarget());
525 const MipsABIInfo
&ABI
= TM
.getABI();
527 MachineInstrBuilder CallSeqStart
=
528 MIRBuilder
.buildInstr(Mips::ADJCALLSTACKDOWN
);
530 const bool IsCalleeGlobalPIC
=
531 Callee
.isGlobal() && TM
.isPositionIndependent();
533 MachineInstrBuilder MIB
= MIRBuilder
.buildInstrNoInsert(
534 Callee
.isReg() || IsCalleeGlobalPIC
? Mips::JALRPseudo
: Mips::JAL
);
535 MIB
.addDef(Mips::SP
, RegState::Implicit
);
536 if (IsCalleeGlobalPIC
) {
538 MF
.getRegInfo().createGenericVirtualRegister(LLT::pointer(0, 32));
539 MachineInstr
*CalleeGlobalValue
=
540 MIRBuilder
.buildGlobalValue(CalleeReg
, Callee
.getGlobal());
541 if (!Callee
.getGlobal()->hasLocalLinkage())
542 CalleeGlobalValue
->getOperand(1).setTargetFlags(MipsII::MO_GOT_CALL
);
543 MIB
.addUse(CalleeReg
);
546 const TargetRegisterInfo
*TRI
= MF
.getSubtarget().getRegisterInfo();
547 MIB
.addRegMask(TRI
->getCallPreservedMask(MF
, F
.getCallingConv()));
549 TargetLowering::ArgListTy FuncOrigArgs
;
550 FuncOrigArgs
.reserve(OrigArgs
.size());
552 SmallVector
<ArgInfo
, 8> ArgInfos
;
553 SmallVector
<unsigned, 8> OrigArgIndices
;
555 for (auto &Arg
: OrigArgs
) {
557 TargetLowering::ArgListEntry Entry
;
559 FuncOrigArgs
.push_back(Entry
);
561 splitToValueTypes(Arg
, i
, ArgInfos
, OrigArgIndices
);
565 SmallVector
<ISD::OutputArg
, 8> Outs
;
566 subTargetRegTypeForCallingConv(F
, ArgInfos
, OrigArgIndices
, Outs
);
568 SmallVector
<CCValAssign
, 8> ArgLocs
;
569 MipsCCState
CCInfo(F
.getCallingConv(), F
.isVarArg(), MF
, ArgLocs
,
572 CCInfo
.AllocateStack(ABI
.GetCalleeAllocdArgSizeInBytes(CallConv
), 1);
573 const char *Call
= Callee
.isSymbol() ? Callee
.getSymbolName() : nullptr;
574 CCInfo
.AnalyzeCallOperands(Outs
, TLI
.CCAssignFnForCall(), FuncOrigArgs
, Call
);
575 setLocInfo(ArgLocs
, Outs
);
577 OutgoingValueHandler
RetHandler(MIRBuilder
, MF
.getRegInfo(), MIB
);
578 if (!RetHandler
.handle(ArgLocs
, ArgInfos
)) {
582 unsigned NextStackOffset
= CCInfo
.getNextStackOffset();
583 const TargetFrameLowering
*TFL
= MF
.getSubtarget().getFrameLowering();
584 unsigned StackAlignment
= TFL
->getStackAlignment();
585 NextStackOffset
= alignTo(NextStackOffset
, StackAlignment
);
586 CallSeqStart
.addImm(NextStackOffset
).addImm(0);
588 if (IsCalleeGlobalPIC
) {
589 MIRBuilder
.buildCopy(
591 MF
.getInfo
<MipsFunctionInfo
>()->getGlobalBaseRegForGlobalISel());
592 MIB
.addDef(Mips::GP
, RegState::Implicit
);
594 MIRBuilder
.insertInstr(MIB
);
595 if (MIB
->getOpcode() == Mips::JALRPseudo
) {
596 const MipsSubtarget
&STI
=
597 static_cast<const MipsSubtarget
&>(MIRBuilder
.getMF().getSubtarget());
598 MIB
.constrainAllUses(MIRBuilder
.getTII(), *STI
.getRegisterInfo(),
599 *STI
.getRegBankInfo());
602 if (!OrigRet
.Ty
->isVoidTy()) {
604 SmallVector
<unsigned, 8> OrigRetIndices
;
606 splitToValueTypes(OrigRet
, 0, ArgInfos
, OrigRetIndices
);
608 SmallVector
<ISD::InputArg
, 8> Ins
;
609 subTargetRegTypeForCallingConv(F
, ArgInfos
, OrigRetIndices
, Ins
);
611 SmallVector
<CCValAssign
, 8> ArgLocs
;
612 MipsCCState
CCInfo(F
.getCallingConv(), F
.isVarArg(), MF
, ArgLocs
,
615 CCInfo
.AnalyzeCallResult(Ins
, TLI
.CCAssignFnForReturn(), OrigRet
.Ty
, Call
);
616 setLocInfo(ArgLocs
, Ins
);
618 CallReturnHandler
Handler(MIRBuilder
, MF
.getRegInfo(), MIB
);
619 if (!Handler
.handle(ArgLocs
, ArgInfos
))
623 MIRBuilder
.buildInstr(Mips::ADJCALLSTACKUP
).addImm(NextStackOffset
).addImm(0);
628 template <typename T
>
629 void MipsCallLowering::subTargetRegTypeForCallingConv(
630 const Function
&F
, ArrayRef
<ArgInfo
> Args
,
631 ArrayRef
<unsigned> OrigArgIndices
, SmallVectorImpl
<T
> &ISDArgs
) const {
632 const DataLayout
&DL
= F
.getParent()->getDataLayout();
633 const MipsTargetLowering
&TLI
= *getTLI
<MipsTargetLowering
>();
636 for (auto &Arg
: Args
) {
638 EVT VT
= TLI
.getValueType(DL
, Arg
.Ty
);
639 MVT RegisterVT
= TLI
.getRegisterTypeForCallingConv(F
.getContext(),
640 F
.getCallingConv(), VT
);
641 unsigned NumRegs
= TLI
.getNumRegistersForCallingConv(
642 F
.getContext(), F
.getCallingConv(), VT
);
644 for (unsigned i
= 0; i
< NumRegs
; ++i
) {
645 ISD::ArgFlagsTy Flags
= Arg
.Flags
;
648 Flags
.setOrigAlign(TLI
.getABIAlignmentForCallingConv(Arg
.Ty
, DL
));
650 Flags
.setOrigAlign(1);
652 ISDArgs
.emplace_back(Flags
, RegisterVT
, VT
, true, OrigArgIndices
[ArgNo
],
659 void MipsCallLowering::splitToValueTypes(
660 const ArgInfo
&OrigArg
, unsigned OriginalIndex
,
661 SmallVectorImpl
<ArgInfo
> &SplitArgs
,
662 SmallVectorImpl
<unsigned> &SplitArgsOrigIndices
) const {
664 // TODO : perform structure and array split. For now we only deal with
665 // types that pass isSupportedType check.
666 SplitArgs
.push_back(OrigArg
);
667 SplitArgsOrigIndices
.push_back(OriginalIndex
);