1 //===- MipsCallLowering.cpp -------------------------------------*- C++ -*-===//
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
7 //===----------------------------------------------------------------------===//
10 /// This file implements the lowering of LLVM calls to machine code calls for
13 //===----------------------------------------------------------------------===//
15 #include "MipsCallLowering.h"
16 #include "MipsCCState.h"
17 #include "MipsMachineFunction.h"
18 #include "MipsTargetMachine.h"
19 #include "llvm/CodeGen/Analysis.h"
20 #include "llvm/CodeGen/GlobalISel/MachineIRBuilder.h"
24 MipsCallLowering::MipsCallLowering(const MipsTargetLowering
&TLI
)
25 : CallLowering(&TLI
) {}
27 bool MipsCallLowering::MipsHandler::assign(Register VReg
, const CCValAssign
&VA
,
30 assignValueToReg(VReg
, VA
, VT
);
31 } else if (VA
.isMemLoc()) {
32 assignValueToAddress(VReg
, VA
);
39 bool MipsCallLowering::MipsHandler::assignVRegs(ArrayRef
<Register
> VRegs
,
40 ArrayRef
<CCValAssign
> ArgLocs
,
41 unsigned ArgLocsStartIndex
,
43 for (unsigned i
= 0; i
< VRegs
.size(); ++i
)
44 if (!assign(VRegs
[i
], ArgLocs
[ArgLocsStartIndex
+ i
], VT
))
49 void MipsCallLowering::MipsHandler::setLeastSignificantFirst(
50 SmallVectorImpl
<Register
> &VRegs
) {
51 if (!MIRBuilder
.getMF().getDataLayout().isLittleEndian())
52 std::reverse(VRegs
.begin(), VRegs
.end());
55 bool MipsCallLowering::MipsHandler::handle(
56 ArrayRef
<CCValAssign
> ArgLocs
, ArrayRef
<CallLowering::ArgInfo
> Args
) {
57 SmallVector
<Register
, 4> VRegs
;
59 const Function
&F
= MIRBuilder
.getMF().getFunction();
60 const DataLayout
&DL
= F
.getParent()->getDataLayout();
61 const MipsTargetLowering
&TLI
= *static_cast<const MipsTargetLowering
*>(
62 MIRBuilder
.getMF().getSubtarget().getTargetLowering());
64 for (unsigned ArgsIndex
= 0, ArgLocsIndex
= 0; ArgsIndex
< Args
.size();
65 ++ArgsIndex
, ArgLocsIndex
+= SplitLength
) {
66 EVT VT
= TLI
.getValueType(DL
, Args
[ArgsIndex
].Ty
);
67 SplitLength
= TLI
.getNumRegistersForCallingConv(F
.getContext(),
68 F
.getCallingConv(), VT
);
69 assert(Args
[ArgsIndex
].Regs
.size() == 1 && "Can't handle multple regs yet");
71 if (SplitLength
> 1) {
73 MVT RegisterVT
= TLI
.getRegisterTypeForCallingConv(
74 F
.getContext(), F
.getCallingConv(), VT
);
75 for (unsigned i
= 0; i
< SplitLength
; ++i
)
76 VRegs
.push_back(MRI
.createGenericVirtualRegister(LLT
{RegisterVT
}));
78 if (!handleSplit(VRegs
, ArgLocs
, ArgLocsIndex
, Args
[ArgsIndex
].Regs
[0],
82 if (!assign(Args
[ArgsIndex
].Regs
[0], ArgLocs
[ArgLocsIndex
], VT
))
90 class IncomingValueHandler
: public MipsCallLowering::MipsHandler
{
92 IncomingValueHandler(MachineIRBuilder
&MIRBuilder
, MachineRegisterInfo
&MRI
)
93 : MipsHandler(MIRBuilder
, MRI
) {}
96 void assignValueToReg(Register ValVReg
, const CCValAssign
&VA
,
97 const EVT
&VT
) override
;
99 Register
getStackAddress(const CCValAssign
&VA
,
100 MachineMemOperand
*&MMO
) override
;
102 void assignValueToAddress(Register ValVReg
, const CCValAssign
&VA
) override
;
104 bool handleSplit(SmallVectorImpl
<Register
> &VRegs
,
105 ArrayRef
<CCValAssign
> ArgLocs
, unsigned ArgLocsStartIndex
,
106 Register ArgsReg
, const EVT
&VT
) override
;
108 virtual void markPhysRegUsed(unsigned PhysReg
) {
109 MIRBuilder
.getMRI()->addLiveIn(PhysReg
);
110 MIRBuilder
.getMBB().addLiveIn(PhysReg
);
113 void buildLoad(Register Val
, const CCValAssign
&VA
) {
114 MachineMemOperand
*MMO
;
115 Register Addr
= getStackAddress(VA
, MMO
);
116 MIRBuilder
.buildLoad(Val
, Addr
, *MMO
);
120 class CallReturnHandler
: public IncomingValueHandler
{
122 CallReturnHandler(MachineIRBuilder
&MIRBuilder
, MachineRegisterInfo
&MRI
,
123 MachineInstrBuilder
&MIB
)
124 : IncomingValueHandler(MIRBuilder
, MRI
), MIB(MIB
) {}
127 void markPhysRegUsed(unsigned PhysReg
) override
{
128 MIB
.addDef(PhysReg
, RegState::Implicit
);
131 MachineInstrBuilder
&MIB
;
134 } // end anonymous namespace
136 void IncomingValueHandler::assignValueToReg(Register ValVReg
,
137 const CCValAssign
&VA
,
139 const MipsSubtarget
&STI
=
140 static_cast<const MipsSubtarget
&>(MIRBuilder
.getMF().getSubtarget());
141 Register PhysReg
= VA
.getLocReg();
142 if (VT
== MVT::f64
&& PhysReg
>= Mips::A0
&& PhysReg
<= Mips::A3
) {
143 const MipsSubtarget
&STI
=
144 static_cast<const MipsSubtarget
&>(MIRBuilder
.getMF().getSubtarget());
147 .buildInstr(STI
.isFP64bit() ? Mips::BuildPairF64_64
148 : Mips::BuildPairF64
)
150 .addUse(PhysReg
+ (STI
.isLittle() ? 0 : 1))
151 .addUse(PhysReg
+ (STI
.isLittle() ? 1 : 0))
152 .constrainAllUses(MIRBuilder
.getTII(), *STI
.getRegisterInfo(),
153 *STI
.getRegBankInfo());
154 markPhysRegUsed(PhysReg
);
155 markPhysRegUsed(PhysReg
+ 1);
156 } else if (VT
== MVT::f32
&& PhysReg
>= Mips::A0
&& PhysReg
<= Mips::A3
) {
157 MIRBuilder
.buildInstr(Mips::MTC1
)
160 .constrainAllUses(MIRBuilder
.getTII(), *STI
.getRegisterInfo(),
161 *STI
.getRegBankInfo());
162 markPhysRegUsed(PhysReg
);
164 switch (VA
.getLocInfo()) {
165 case CCValAssign::LocInfo::SExt
:
166 case CCValAssign::LocInfo::ZExt
:
167 case CCValAssign::LocInfo::AExt
: {
168 auto Copy
= MIRBuilder
.buildCopy(LLT
{VA
.getLocVT()}, PhysReg
);
169 MIRBuilder
.buildTrunc(ValVReg
, Copy
);
173 MIRBuilder
.buildCopy(ValVReg
, PhysReg
);
176 markPhysRegUsed(PhysReg
);
180 Register
IncomingValueHandler::getStackAddress(const CCValAssign
&VA
,
181 MachineMemOperand
*&MMO
) {
182 MachineFunction
&MF
= MIRBuilder
.getMF();
183 unsigned Size
= alignTo(VA
.getValVT().getSizeInBits(), 8) / 8;
184 unsigned Offset
= VA
.getLocMemOffset();
185 MachineFrameInfo
&MFI
= MF
.getFrameInfo();
187 int FI
= MFI
.CreateFixedObject(Size
, Offset
, true);
188 MachinePointerInfo MPO
=
189 MachinePointerInfo::getFixedStack(MIRBuilder
.getMF(), FI
);
191 const TargetFrameLowering
*TFL
= MF
.getSubtarget().getFrameLowering();
192 unsigned Align
= MinAlign(TFL
->getStackAlignment(), Offset
);
193 MMO
= MF
.getMachineMemOperand(MPO
, MachineMemOperand::MOLoad
, Size
, Align
);
195 Register AddrReg
= MRI
.createGenericVirtualRegister(LLT::pointer(0, 32));
196 MIRBuilder
.buildFrameIndex(AddrReg
, FI
);
201 void IncomingValueHandler::assignValueToAddress(Register ValVReg
,
202 const CCValAssign
&VA
) {
203 if (VA
.getLocInfo() == CCValAssign::SExt
||
204 VA
.getLocInfo() == CCValAssign::ZExt
||
205 VA
.getLocInfo() == CCValAssign::AExt
) {
206 Register LoadReg
= MRI
.createGenericVirtualRegister(LLT::scalar(32));
207 buildLoad(LoadReg
, VA
);
208 MIRBuilder
.buildTrunc(ValVReg
, LoadReg
);
210 buildLoad(ValVReg
, VA
);
213 bool IncomingValueHandler::handleSplit(SmallVectorImpl
<Register
> &VRegs
,
214 ArrayRef
<CCValAssign
> ArgLocs
,
215 unsigned ArgLocsStartIndex
,
216 Register ArgsReg
, const EVT
&VT
) {
217 if (!assignVRegs(VRegs
, ArgLocs
, ArgLocsStartIndex
, VT
))
219 setLeastSignificantFirst(VRegs
);
220 MIRBuilder
.buildMerge(ArgsReg
, VRegs
);
225 class OutgoingValueHandler
: public MipsCallLowering::MipsHandler
{
227 OutgoingValueHandler(MachineIRBuilder
&MIRBuilder
, MachineRegisterInfo
&MRI
,
228 MachineInstrBuilder
&MIB
)
229 : MipsHandler(MIRBuilder
, MRI
), MIB(MIB
) {}
232 void assignValueToReg(Register ValVReg
, const CCValAssign
&VA
,
233 const EVT
&VT
) override
;
235 Register
getStackAddress(const CCValAssign
&VA
,
236 MachineMemOperand
*&MMO
) override
;
238 void assignValueToAddress(Register ValVReg
, const CCValAssign
&VA
) override
;
240 bool handleSplit(SmallVectorImpl
<Register
> &VRegs
,
241 ArrayRef
<CCValAssign
> ArgLocs
, unsigned ArgLocsStartIndex
,
242 Register ArgsReg
, const EVT
&VT
) override
;
244 Register
extendRegister(Register ValReg
, const CCValAssign
&VA
);
246 MachineInstrBuilder
&MIB
;
248 } // end anonymous namespace
250 void OutgoingValueHandler::assignValueToReg(Register ValVReg
,
251 const CCValAssign
&VA
,
253 Register PhysReg
= VA
.getLocReg();
254 const MipsSubtarget
&STI
=
255 static_cast<const MipsSubtarget
&>(MIRBuilder
.getMF().getSubtarget());
257 if (VT
== MVT::f64
&& PhysReg
>= Mips::A0
&& PhysReg
<= Mips::A3
) {
259 .buildInstr(STI
.isFP64bit() ? Mips::ExtractElementF64_64
260 : Mips::ExtractElementF64
)
261 .addDef(PhysReg
+ (STI
.isLittle() ? 1 : 0))
264 .constrainAllUses(MIRBuilder
.getTII(), *STI
.getRegisterInfo(),
265 *STI
.getRegBankInfo());
267 .buildInstr(STI
.isFP64bit() ? Mips::ExtractElementF64_64
268 : Mips::ExtractElementF64
)
269 .addDef(PhysReg
+ (STI
.isLittle() ? 0 : 1))
272 .constrainAllUses(MIRBuilder
.getTII(), *STI
.getRegisterInfo(),
273 *STI
.getRegBankInfo());
274 } else if (VT
== MVT::f32
&& PhysReg
>= Mips::A0
&& PhysReg
<= Mips::A3
) {
275 MIRBuilder
.buildInstr(Mips::MFC1
)
278 .constrainAllUses(MIRBuilder
.getTII(), *STI
.getRegisterInfo(),
279 *STI
.getRegBankInfo());
281 Register ExtReg
= extendRegister(ValVReg
, VA
);
282 MIRBuilder
.buildCopy(PhysReg
, ExtReg
);
283 MIB
.addUse(PhysReg
, RegState::Implicit
);
287 Register
OutgoingValueHandler::getStackAddress(const CCValAssign
&VA
,
288 MachineMemOperand
*&MMO
) {
289 MachineFunction
&MF
= MIRBuilder
.getMF();
290 const TargetFrameLowering
*TFL
= MF
.getSubtarget().getFrameLowering();
292 LLT p0
= LLT::pointer(0, 32);
293 LLT s32
= LLT::scalar(32);
294 Register SPReg
= MRI
.createGenericVirtualRegister(p0
);
295 MIRBuilder
.buildCopy(SPReg
, Register(Mips::SP
));
297 Register OffsetReg
= MRI
.createGenericVirtualRegister(s32
);
298 unsigned Offset
= VA
.getLocMemOffset();
299 MIRBuilder
.buildConstant(OffsetReg
, Offset
);
301 Register AddrReg
= MRI
.createGenericVirtualRegister(p0
);
302 MIRBuilder
.buildGEP(AddrReg
, SPReg
, OffsetReg
);
304 MachinePointerInfo MPO
=
305 MachinePointerInfo::getStack(MIRBuilder
.getMF(), Offset
);
306 unsigned Size
= alignTo(VA
.getValVT().getSizeInBits(), 8) / 8;
307 unsigned Align
= MinAlign(TFL
->getStackAlignment(), Offset
);
308 MMO
= MF
.getMachineMemOperand(MPO
, MachineMemOperand::MOStore
, Size
, Align
);
313 void OutgoingValueHandler::assignValueToAddress(Register ValVReg
,
314 const CCValAssign
&VA
) {
315 MachineMemOperand
*MMO
;
316 Register Addr
= getStackAddress(VA
, MMO
);
317 Register ExtReg
= extendRegister(ValVReg
, VA
);
318 MIRBuilder
.buildStore(ExtReg
, Addr
, *MMO
);
321 Register
OutgoingValueHandler::extendRegister(Register ValReg
,
322 const CCValAssign
&VA
) {
323 LLT LocTy
{VA
.getLocVT()};
324 switch (VA
.getLocInfo()) {
325 case CCValAssign::SExt
: {
326 Register ExtReg
= MRI
.createGenericVirtualRegister(LocTy
);
327 MIRBuilder
.buildSExt(ExtReg
, ValReg
);
330 case CCValAssign::ZExt
: {
331 Register ExtReg
= MRI
.createGenericVirtualRegister(LocTy
);
332 MIRBuilder
.buildZExt(ExtReg
, ValReg
);
335 case CCValAssign::AExt
: {
336 Register ExtReg
= MRI
.createGenericVirtualRegister(LocTy
);
337 MIRBuilder
.buildAnyExt(ExtReg
, ValReg
);
340 // TODO : handle upper extends
341 case CCValAssign::Full
:
346 llvm_unreachable("unable to extend register");
349 bool OutgoingValueHandler::handleSplit(SmallVectorImpl
<Register
> &VRegs
,
350 ArrayRef
<CCValAssign
> ArgLocs
,
351 unsigned ArgLocsStartIndex
,
352 Register ArgsReg
, const EVT
&VT
) {
353 MIRBuilder
.buildUnmerge(VRegs
, ArgsReg
);
354 setLeastSignificantFirst(VRegs
);
355 if (!assignVRegs(VRegs
, ArgLocs
, ArgLocsStartIndex
, VT
))
361 static bool isSupportedArgumentType(Type
*T
) {
362 if (T
->isIntegerTy())
364 if (T
->isPointerTy())
366 if (T
->isFloatingPointTy())
371 static bool isSupportedReturnType(Type
*T
) {
372 if (T
->isIntegerTy())
374 if (T
->isPointerTy())
376 if (T
->isFloatingPointTy())
378 if (T
->isAggregateType())
383 static CCValAssign::LocInfo
determineLocInfo(const MVT RegisterVT
, const EVT VT
,
384 const ISD::ArgFlagsTy
&Flags
) {
385 // > does not mean loss of information as type RegisterVT can't hold type VT,
386 // it means that type VT is split into multiple registers of type RegisterVT
387 if (VT
.getSizeInBits() >= RegisterVT
.getSizeInBits())
388 return CCValAssign::LocInfo::Full
;
390 return CCValAssign::LocInfo::SExt
;
392 return CCValAssign::LocInfo::ZExt
;
393 return CCValAssign::LocInfo::AExt
;
396 template <typename T
>
397 static void setLocInfo(SmallVectorImpl
<CCValAssign
> &ArgLocs
,
398 const SmallVectorImpl
<T
> &Arguments
) {
399 for (unsigned i
= 0; i
< ArgLocs
.size(); ++i
) {
400 const CCValAssign
&VA
= ArgLocs
[i
];
401 CCValAssign::LocInfo LocInfo
= determineLocInfo(
402 Arguments
[i
].VT
, Arguments
[i
].ArgVT
, Arguments
[i
].Flags
);
405 CCValAssign::getMem(VA
.getValNo(), VA
.getValVT(),
406 VA
.getLocMemOffset(), VA
.getLocVT(), LocInfo
);
408 ArgLocs
[i
] = CCValAssign::getReg(VA
.getValNo(), VA
.getValVT(),
409 VA
.getLocReg(), VA
.getLocVT(), LocInfo
);
413 bool MipsCallLowering::lowerReturn(MachineIRBuilder
&MIRBuilder
,
415 ArrayRef
<Register
> VRegs
) const {
417 MachineInstrBuilder Ret
= MIRBuilder
.buildInstrNoInsert(Mips::RetRA
);
419 if (Val
!= nullptr && !isSupportedReturnType(Val
->getType()))
422 if (!VRegs
.empty()) {
423 MachineFunction
&MF
= MIRBuilder
.getMF();
424 const Function
&F
= MF
.getFunction();
425 const DataLayout
&DL
= MF
.getDataLayout();
426 const MipsTargetLowering
&TLI
= *getTLI
<MipsTargetLowering
>();
428 SmallVector
<ArgInfo
, 8> RetInfos
;
429 SmallVector
<unsigned, 8> OrigArgIndices
;
431 ArgInfo
ArgRetInfo(VRegs
, Val
->getType());
432 setArgFlags(ArgRetInfo
, AttributeList::ReturnIndex
, DL
, F
);
433 splitToValueTypes(DL
, ArgRetInfo
, 0, RetInfos
, OrigArgIndices
);
435 SmallVector
<ISD::OutputArg
, 8> Outs
;
436 subTargetRegTypeForCallingConv(F
, RetInfos
, OrigArgIndices
, Outs
);
438 SmallVector
<CCValAssign
, 16> ArgLocs
;
439 MipsCCState
CCInfo(F
.getCallingConv(), F
.isVarArg(), MF
, ArgLocs
,
441 CCInfo
.AnalyzeReturn(Outs
, TLI
.CCAssignFnForReturn());
442 setLocInfo(ArgLocs
, Outs
);
444 OutgoingValueHandler
RetHandler(MIRBuilder
, MF
.getRegInfo(), Ret
);
445 if (!RetHandler
.handle(ArgLocs
, RetInfos
)) {
449 MIRBuilder
.insertInstr(Ret
);
453 bool MipsCallLowering::lowerFormalArguments(
454 MachineIRBuilder
&MIRBuilder
, const Function
&F
,
455 ArrayRef
<ArrayRef
<Register
>> VRegs
) const {
457 // Quick exit if there aren't any args.
461 for (auto &Arg
: F
.args()) {
462 if (!isSupportedArgumentType(Arg
.getType()))
466 MachineFunction
&MF
= MIRBuilder
.getMF();
467 const DataLayout
&DL
= MF
.getDataLayout();
468 const MipsTargetLowering
&TLI
= *getTLI
<MipsTargetLowering
>();
470 SmallVector
<ArgInfo
, 8> ArgInfos
;
471 SmallVector
<unsigned, 8> OrigArgIndices
;
473 for (auto &Arg
: F
.args()) {
474 ArgInfo
AInfo(VRegs
[i
], Arg
.getType());
475 setArgFlags(AInfo
, i
+ AttributeList::FirstArgIndex
, DL
, F
);
476 ArgInfos
.push_back(AInfo
);
477 OrigArgIndices
.push_back(i
);
481 SmallVector
<ISD::InputArg
, 8> Ins
;
482 subTargetRegTypeForCallingConv(F
, ArgInfos
, OrigArgIndices
, Ins
);
484 SmallVector
<CCValAssign
, 16> ArgLocs
;
485 MipsCCState
CCInfo(F
.getCallingConv(), F
.isVarArg(), MF
, ArgLocs
,
488 const MipsTargetMachine
&TM
=
489 static_cast<const MipsTargetMachine
&>(MF
.getTarget());
490 const MipsABIInfo
&ABI
= TM
.getABI();
491 CCInfo
.AllocateStack(ABI
.GetCalleeAllocdArgSizeInBytes(F
.getCallingConv()),
493 CCInfo
.AnalyzeFormalArguments(Ins
, TLI
.CCAssignFnForCall());
494 setLocInfo(ArgLocs
, Ins
);
496 IncomingValueHandler
Handler(MIRBuilder
, MF
.getRegInfo());
497 if (!Handler
.handle(ArgLocs
, ArgInfos
))
501 ArrayRef
<MCPhysReg
> ArgRegs
= ABI
.GetVarArgRegs();
502 unsigned Idx
= CCInfo
.getFirstUnallocated(ArgRegs
);
505 unsigned RegSize
= 4;
506 if (ArgRegs
.size() == Idx
)
507 VaArgOffset
= alignTo(CCInfo
.getNextStackOffset(), RegSize
);
510 (int)ABI
.GetCalleeAllocdArgSizeInBytes(CCInfo
.getCallingConv()) -
511 (int)(RegSize
* (ArgRegs
.size() - Idx
));
514 MachineFrameInfo
&MFI
= MF
.getFrameInfo();
515 int FI
= MFI
.CreateFixedObject(RegSize
, VaArgOffset
, true);
516 MF
.getInfo
<MipsFunctionInfo
>()->setVarArgsFrameIndex(FI
);
518 for (unsigned I
= Idx
; I
< ArgRegs
.size(); ++I
, VaArgOffset
+= RegSize
) {
519 MIRBuilder
.getMBB().addLiveIn(ArgRegs
[I
]);
521 MachineInstrBuilder Copy
=
522 MIRBuilder
.buildCopy(LLT::scalar(RegSize
* 8), Register(ArgRegs
[I
]));
523 FI
= MFI
.CreateFixedObject(RegSize
, VaArgOffset
, true);
524 MachinePointerInfo MPO
= MachinePointerInfo::getFixedStack(MF
, FI
);
525 MachineInstrBuilder FrameIndex
=
526 MIRBuilder
.buildFrameIndex(LLT::pointer(MPO
.getAddrSpace(), 32), FI
);
527 MachineMemOperand
*MMO
=
528 MF
.getMachineMemOperand(MPO
, MachineMemOperand::MOStore
, RegSize
,
529 /* Alignment */ RegSize
);
530 MIRBuilder
.buildStore(Copy
, FrameIndex
, *MMO
);
537 bool MipsCallLowering::lowerCall(MachineIRBuilder
&MIRBuilder
,
538 CallLoweringInfo
&Info
) const {
540 if (Info
.CallConv
!= CallingConv::C
)
543 for (auto &Arg
: Info
.OrigArgs
) {
544 if (!isSupportedArgumentType(Arg
.Ty
))
546 if (Arg
.Flags
[0].isByVal())
548 if (Arg
.Flags
[0].isSRet() && !Arg
.Ty
->isPointerTy())
552 if (!Info
.OrigRet
.Ty
->isVoidTy() && !isSupportedReturnType(Info
.OrigRet
.Ty
))
555 MachineFunction
&MF
= MIRBuilder
.getMF();
556 const Function
&F
= MF
.getFunction();
557 const DataLayout
&DL
= MF
.getDataLayout();
558 const MipsTargetLowering
&TLI
= *getTLI
<MipsTargetLowering
>();
559 const MipsTargetMachine
&TM
=
560 static_cast<const MipsTargetMachine
&>(MF
.getTarget());
561 const MipsABIInfo
&ABI
= TM
.getABI();
563 MachineInstrBuilder CallSeqStart
=
564 MIRBuilder
.buildInstr(Mips::ADJCALLSTACKDOWN
);
566 const bool IsCalleeGlobalPIC
=
567 Info
.Callee
.isGlobal() && TM
.isPositionIndependent();
569 MachineInstrBuilder MIB
= MIRBuilder
.buildInstrNoInsert(
570 Info
.Callee
.isReg() || IsCalleeGlobalPIC
? Mips::JALRPseudo
: Mips::JAL
);
571 MIB
.addDef(Mips::SP
, RegState::Implicit
);
572 if (IsCalleeGlobalPIC
) {
574 MF
.getRegInfo().createGenericVirtualRegister(LLT::pointer(0, 32));
575 MachineInstr
*CalleeGlobalValue
=
576 MIRBuilder
.buildGlobalValue(CalleeReg
, Info
.Callee
.getGlobal());
577 if (!Info
.Callee
.getGlobal()->hasLocalLinkage())
578 CalleeGlobalValue
->getOperand(1).setTargetFlags(MipsII::MO_GOT_CALL
);
579 MIB
.addUse(CalleeReg
);
581 MIB
.add(Info
.Callee
);
582 const TargetRegisterInfo
*TRI
= MF
.getSubtarget().getRegisterInfo();
583 MIB
.addRegMask(TRI
->getCallPreservedMask(MF
, F
.getCallingConv()));
585 TargetLowering::ArgListTy FuncOrigArgs
;
586 FuncOrigArgs
.reserve(Info
.OrigArgs
.size());
588 SmallVector
<ArgInfo
, 8> ArgInfos
;
589 SmallVector
<unsigned, 8> OrigArgIndices
;
591 for (auto &Arg
: Info
.OrigArgs
) {
593 TargetLowering::ArgListEntry Entry
;
595 FuncOrigArgs
.push_back(Entry
);
597 ArgInfos
.push_back(Arg
);
598 OrigArgIndices
.push_back(i
);
602 SmallVector
<ISD::OutputArg
, 8> Outs
;
603 subTargetRegTypeForCallingConv(F
, ArgInfos
, OrigArgIndices
, Outs
);
605 SmallVector
<CCValAssign
, 8> ArgLocs
;
606 bool IsCalleeVarArg
= false;
607 if (Info
.Callee
.isGlobal()) {
608 const Function
*CF
= static_cast<const Function
*>(Info
.Callee
.getGlobal());
609 IsCalleeVarArg
= CF
->isVarArg();
611 MipsCCState
CCInfo(F
.getCallingConv(), IsCalleeVarArg
, MF
, ArgLocs
,
614 CCInfo
.AllocateStack(ABI
.GetCalleeAllocdArgSizeInBytes(Info
.CallConv
), 1);
616 Info
.Callee
.isSymbol() ? Info
.Callee
.getSymbolName() : nullptr;
617 CCInfo
.AnalyzeCallOperands(Outs
, TLI
.CCAssignFnForCall(), FuncOrigArgs
, Call
);
618 setLocInfo(ArgLocs
, Outs
);
620 OutgoingValueHandler
RetHandler(MIRBuilder
, MF
.getRegInfo(), MIB
);
621 if (!RetHandler
.handle(ArgLocs
, ArgInfos
)) {
625 unsigned NextStackOffset
= CCInfo
.getNextStackOffset();
626 const TargetFrameLowering
*TFL
= MF
.getSubtarget().getFrameLowering();
627 unsigned StackAlignment
= TFL
->getStackAlignment();
628 NextStackOffset
= alignTo(NextStackOffset
, StackAlignment
);
629 CallSeqStart
.addImm(NextStackOffset
).addImm(0);
631 if (IsCalleeGlobalPIC
) {
632 MIRBuilder
.buildCopy(
634 MF
.getInfo
<MipsFunctionInfo
>()->getGlobalBaseRegForGlobalISel());
635 MIB
.addDef(Mips::GP
, RegState::Implicit
);
637 MIRBuilder
.insertInstr(MIB
);
638 if (MIB
->getOpcode() == Mips::JALRPseudo
) {
639 const MipsSubtarget
&STI
=
640 static_cast<const MipsSubtarget
&>(MIRBuilder
.getMF().getSubtarget());
641 MIB
.constrainAllUses(MIRBuilder
.getTII(), *STI
.getRegisterInfo(),
642 *STI
.getRegBankInfo());
645 if (!Info
.OrigRet
.Ty
->isVoidTy()) {
647 SmallVector
<unsigned, 8> OrigRetIndices
;
649 splitToValueTypes(DL
, Info
.OrigRet
, 0, ArgInfos
, OrigRetIndices
);
651 SmallVector
<ISD::InputArg
, 8> Ins
;
652 subTargetRegTypeForCallingConv(F
, ArgInfos
, OrigRetIndices
, Ins
);
654 SmallVector
<CCValAssign
, 8> ArgLocs
;
655 MipsCCState
CCInfo(F
.getCallingConv(), F
.isVarArg(), MF
, ArgLocs
,
658 CCInfo
.AnalyzeCallResult(Ins
, TLI
.CCAssignFnForReturn(), Info
.OrigRet
.Ty
, Call
);
659 setLocInfo(ArgLocs
, Ins
);
661 CallReturnHandler
Handler(MIRBuilder
, MF
.getRegInfo(), MIB
);
662 if (!Handler
.handle(ArgLocs
, ArgInfos
))
666 MIRBuilder
.buildInstr(Mips::ADJCALLSTACKUP
).addImm(NextStackOffset
).addImm(0);
671 template <typename T
>
672 void MipsCallLowering::subTargetRegTypeForCallingConv(
673 const Function
&F
, ArrayRef
<ArgInfo
> Args
,
674 ArrayRef
<unsigned> OrigArgIndices
, SmallVectorImpl
<T
> &ISDArgs
) const {
675 const DataLayout
&DL
= F
.getParent()->getDataLayout();
676 const MipsTargetLowering
&TLI
= *getTLI
<MipsTargetLowering
>();
679 for (auto &Arg
: Args
) {
681 EVT VT
= TLI
.getValueType(DL
, Arg
.Ty
);
682 MVT RegisterVT
= TLI
.getRegisterTypeForCallingConv(F
.getContext(),
683 F
.getCallingConv(), VT
);
684 unsigned NumRegs
= TLI
.getNumRegistersForCallingConv(
685 F
.getContext(), F
.getCallingConv(), VT
);
687 for (unsigned i
= 0; i
< NumRegs
; ++i
) {
688 ISD::ArgFlagsTy Flags
= Arg
.Flags
[0];
691 Flags
.setOrigAlign(TLI
.getABIAlignmentForCallingConv(Arg
.Ty
, DL
));
693 Flags
.setOrigAlign(Align::None());
695 ISDArgs
.emplace_back(Flags
, RegisterVT
, VT
, true, OrigArgIndices
[ArgNo
],
702 void MipsCallLowering::splitToValueTypes(
703 const DataLayout
&DL
, const ArgInfo
&OrigArg
, unsigned OriginalIndex
,
704 SmallVectorImpl
<ArgInfo
> &SplitArgs
,
705 SmallVectorImpl
<unsigned> &SplitArgsOrigIndices
) const {
707 SmallVector
<EVT
, 4> SplitEVTs
;
708 SmallVector
<Register
, 4> SplitVRegs
;
709 const MipsTargetLowering
&TLI
= *getTLI
<MipsTargetLowering
>();
710 LLVMContext
&Ctx
= OrigArg
.Ty
->getContext();
712 ComputeValueVTs(TLI
, DL
, OrigArg
.Ty
, SplitEVTs
);
714 for (unsigned i
= 0; i
< SplitEVTs
.size(); ++i
) {
715 ArgInfo Info
= ArgInfo
{OrigArg
.Regs
[i
], SplitEVTs
[i
].getTypeForEVT(Ctx
)};
716 Info
.Flags
= OrigArg
.Flags
;
717 SplitArgs
.push_back(Info
);
718 SplitArgsOrigIndices
.push_back(OriginalIndex
);