1 //===- MipsCallLowering.cpp -------------------------------------*- C++ -*-===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
11 /// This file implements the lowering of LLVM calls to machine code calls for
14 //===----------------------------------------------------------------------===//
16 #include "MipsCallLowering.h"
17 #include "MipsCCState.h"
18 #include "MipsTargetMachine.h"
19 #include "llvm/CodeGen/Analysis.h"
20 #include "llvm/CodeGen/GlobalISel/MachineIRBuilder.h"
24 MipsCallLowering::MipsCallLowering(const MipsTargetLowering
&TLI
)
25 : CallLowering(&TLI
) {}
27 bool MipsCallLowering::MipsHandler::assign(unsigned VReg
,
28 const CCValAssign
&VA
) {
30 assignValueToReg(VReg
, VA
);
31 } else if (VA
.isMemLoc()) {
32 assignValueToAddress(VReg
, VA
);
40 class IncomingValueHandler
: public MipsCallLowering::MipsHandler
{
42 IncomingValueHandler(MachineIRBuilder
&MIRBuilder
, MachineRegisterInfo
&MRI
)
43 : MipsHandler(MIRBuilder
, MRI
) {}
45 bool handle(ArrayRef
<CCValAssign
> ArgLocs
,
46 ArrayRef
<CallLowering::ArgInfo
> Args
);
49 void assignValueToReg(unsigned ValVReg
, const CCValAssign
&VA
) override
;
51 unsigned getStackAddress(const CCValAssign
&VA
,
52 MachineMemOperand
*&MMO
) override
;
54 void assignValueToAddress(unsigned ValVReg
, const CCValAssign
&VA
) override
;
56 virtual void markPhysRegUsed(unsigned PhysReg
) {
57 MIRBuilder
.getMBB().addLiveIn(PhysReg
);
60 void buildLoad(unsigned Val
, const CCValAssign
&VA
) {
61 MachineMemOperand
*MMO
;
62 unsigned Addr
= getStackAddress(VA
, MMO
);
63 MIRBuilder
.buildLoad(Val
, Addr
, *MMO
);
67 class CallReturnHandler
: public IncomingValueHandler
{
69 CallReturnHandler(MachineIRBuilder
&MIRBuilder
, MachineRegisterInfo
&MRI
,
70 MachineInstrBuilder
&MIB
)
71 : IncomingValueHandler(MIRBuilder
, MRI
), MIB(MIB
) {}
74 void markPhysRegUsed(unsigned PhysReg
) override
{
75 MIB
.addDef(PhysReg
, RegState::Implicit
);
78 MachineInstrBuilder
&MIB
;
81 } // end anonymous namespace
83 void IncomingValueHandler::assignValueToReg(unsigned ValVReg
,
84 const CCValAssign
&VA
) {
85 unsigned PhysReg
= VA
.getLocReg();
86 switch (VA
.getLocInfo()) {
87 case CCValAssign::LocInfo::SExt
:
88 case CCValAssign::LocInfo::ZExt
:
89 case CCValAssign::LocInfo::AExt
: {
90 auto Copy
= MIRBuilder
.buildCopy(LLT
{VA
.getLocVT()}, PhysReg
);
91 MIRBuilder
.buildTrunc(ValVReg
, Copy
);
95 MIRBuilder
.buildCopy(ValVReg
, PhysReg
);
98 markPhysRegUsed(PhysReg
);
101 unsigned IncomingValueHandler::getStackAddress(const CCValAssign
&VA
,
102 MachineMemOperand
*&MMO
) {
103 unsigned Size
= alignTo(VA
.getValVT().getSizeInBits(), 8) / 8;
104 unsigned Offset
= VA
.getLocMemOffset();
105 MachineFrameInfo
&MFI
= MIRBuilder
.getMF().getFrameInfo();
107 int FI
= MFI
.CreateFixedObject(Size
, Offset
, true);
108 MachinePointerInfo MPO
=
109 MachinePointerInfo::getFixedStack(MIRBuilder
.getMF(), FI
);
110 MMO
= MIRBuilder
.getMF().getMachineMemOperand(MPO
, MachineMemOperand::MOLoad
,
111 Size
, /* Alignment */ 0);
113 unsigned AddrReg
= MRI
.createGenericVirtualRegister(LLT::pointer(0, 32));
114 MIRBuilder
.buildFrameIndex(AddrReg
, FI
);
119 void IncomingValueHandler::assignValueToAddress(unsigned ValVReg
,
120 const CCValAssign
&VA
) {
121 if (VA
.getLocInfo() == CCValAssign::SExt
||
122 VA
.getLocInfo() == CCValAssign::ZExt
||
123 VA
.getLocInfo() == CCValAssign::AExt
) {
124 unsigned LoadReg
= MRI
.createGenericVirtualRegister(LLT::scalar(32));
125 buildLoad(LoadReg
, VA
);
126 MIRBuilder
.buildTrunc(ValVReg
, LoadReg
);
128 buildLoad(ValVReg
, VA
);
131 bool IncomingValueHandler::handle(ArrayRef
<CCValAssign
> ArgLocs
,
132 ArrayRef
<CallLowering::ArgInfo
> Args
) {
133 for (unsigned i
= 0, ArgsSize
= Args
.size(); i
< ArgsSize
; ++i
) {
134 if (!assign(Args
[i
].Reg
, ArgLocs
[i
]))
141 class OutgoingValueHandler
: public MipsCallLowering::MipsHandler
{
143 OutgoingValueHandler(MachineIRBuilder
&MIRBuilder
, MachineRegisterInfo
&MRI
,
144 MachineInstrBuilder
&MIB
)
145 : MipsHandler(MIRBuilder
, MRI
), MIB(MIB
) {}
147 bool handle(ArrayRef
<CCValAssign
> ArgLocs
,
148 ArrayRef
<CallLowering::ArgInfo
> Args
);
151 void assignValueToReg(unsigned ValVReg
, const CCValAssign
&VA
) override
;
153 unsigned getStackAddress(const CCValAssign
&VA
,
154 MachineMemOperand
*&MMO
) override
;
156 void assignValueToAddress(unsigned ValVReg
, const CCValAssign
&VA
) override
;
158 unsigned extendRegister(unsigned ValReg
, const CCValAssign
&VA
);
160 MachineInstrBuilder
&MIB
;
162 } // end anonymous namespace
164 void OutgoingValueHandler::assignValueToReg(unsigned ValVReg
,
165 const CCValAssign
&VA
) {
166 unsigned PhysReg
= VA
.getLocReg();
167 unsigned ExtReg
= extendRegister(ValVReg
, VA
);
168 MIRBuilder
.buildCopy(PhysReg
, ExtReg
);
169 MIB
.addUse(PhysReg
, RegState::Implicit
);
172 unsigned OutgoingValueHandler::getStackAddress(const CCValAssign
&VA
,
173 MachineMemOperand
*&MMO
) {
174 LLT p0
= LLT::pointer(0, 32);
175 LLT s32
= LLT::scalar(32);
176 unsigned SPReg
= MRI
.createGenericVirtualRegister(p0
);
177 MIRBuilder
.buildCopy(SPReg
, Mips::SP
);
179 unsigned OffsetReg
= MRI
.createGenericVirtualRegister(s32
);
180 unsigned Offset
= VA
.getLocMemOffset();
181 MIRBuilder
.buildConstant(OffsetReg
, Offset
);
183 unsigned AddrReg
= MRI
.createGenericVirtualRegister(p0
);
184 MIRBuilder
.buildGEP(AddrReg
, SPReg
, OffsetReg
);
186 MachinePointerInfo MPO
=
187 MachinePointerInfo::getStack(MIRBuilder
.getMF(), Offset
);
188 unsigned Size
= alignTo(VA
.getValVT().getSizeInBits(), 8) / 8;
189 MMO
= MIRBuilder
.getMF().getMachineMemOperand(MPO
, MachineMemOperand::MOStore
,
190 Size
, /* Alignment */ 0);
195 void OutgoingValueHandler::assignValueToAddress(unsigned ValVReg
,
196 const CCValAssign
&VA
) {
197 MachineMemOperand
*MMO
;
198 unsigned Addr
= getStackAddress(VA
, MMO
);
199 unsigned ExtReg
= extendRegister(ValVReg
, VA
);
200 MIRBuilder
.buildStore(ExtReg
, Addr
, *MMO
);
203 unsigned OutgoingValueHandler::extendRegister(unsigned ValReg
,
204 const CCValAssign
&VA
) {
205 LLT LocTy
{VA
.getLocVT()};
206 switch (VA
.getLocInfo()) {
207 case CCValAssign::SExt
: {
208 unsigned ExtReg
= MRI
.createGenericVirtualRegister(LocTy
);
209 MIRBuilder
.buildSExt(ExtReg
, ValReg
);
212 case CCValAssign::ZExt
: {
213 unsigned ExtReg
= MRI
.createGenericVirtualRegister(LocTy
);
214 MIRBuilder
.buildZExt(ExtReg
, ValReg
);
217 case CCValAssign::AExt
: {
218 unsigned ExtReg
= MRI
.createGenericVirtualRegister(LocTy
);
219 MIRBuilder
.buildAnyExt(ExtReg
, ValReg
);
222 // TODO : handle upper extends
223 case CCValAssign::Full
:
228 llvm_unreachable("unable to extend register");
231 bool OutgoingValueHandler::handle(ArrayRef
<CCValAssign
> ArgLocs
,
232 ArrayRef
<CallLowering::ArgInfo
> Args
) {
233 for (unsigned i
= 0; i
< Args
.size(); ++i
) {
234 if (!assign(Args
[i
].Reg
, ArgLocs
[i
]))
240 static bool isSupportedType(Type
*T
) {
241 if (T
->isIntegerTy() && T
->getScalarSizeInBits() <= 32)
243 if (T
->isPointerTy())
248 CCValAssign::LocInfo
determineLocInfo(const MVT RegisterVT
, const EVT VT
,
249 const ISD::ArgFlagsTy
&Flags
) {
250 if (VT
.getSizeInBits() == RegisterVT
.getSizeInBits())
251 return CCValAssign::LocInfo::Full
;
253 return CCValAssign::LocInfo::SExt
;
255 return CCValAssign::LocInfo::ZExt
;
256 return CCValAssign::LocInfo::AExt
;
259 template <typename T
>
260 void setLocInfo(SmallVectorImpl
<CCValAssign
> &ArgLocs
,
261 const SmallVectorImpl
<T
> &Arguments
) {
262 for (unsigned i
= 0; i
< ArgLocs
.size(); ++i
) {
263 const CCValAssign
&VA
= ArgLocs
[i
];
264 CCValAssign::LocInfo LocInfo
= determineLocInfo(
265 Arguments
[i
].VT
, Arguments
[i
].ArgVT
, Arguments
[i
].Flags
);
268 CCValAssign::getMem(VA
.getValNo(), VA
.getValVT(),
269 VA
.getLocMemOffset(), VA
.getLocVT(), LocInfo
);
271 ArgLocs
[i
] = CCValAssign::getReg(VA
.getValNo(), VA
.getValVT(),
272 VA
.getLocReg(), VA
.getLocVT(), LocInfo
);
276 bool MipsCallLowering::lowerReturn(MachineIRBuilder
&MIRBuilder
,
278 ArrayRef
<unsigned> VRegs
) const {
280 MachineInstrBuilder Ret
= MIRBuilder
.buildInstrNoInsert(Mips::RetRA
);
282 if (Val
!= nullptr && !isSupportedType(Val
->getType()))
285 if (!VRegs
.empty()) {
286 MachineFunction
&MF
= MIRBuilder
.getMF();
287 const Function
&F
= MF
.getFunction();
288 const DataLayout
&DL
= MF
.getDataLayout();
289 const MipsTargetLowering
&TLI
= *getTLI
<MipsTargetLowering
>();
290 LLVMContext
&Ctx
= Val
->getType()->getContext();
292 SmallVector
<EVT
, 4> SplitEVTs
;
293 ComputeValueVTs(TLI
, DL
, Val
->getType(), SplitEVTs
);
294 assert(VRegs
.size() == SplitEVTs
.size() &&
295 "For each split Type there should be exactly one VReg.");
297 SmallVector
<ArgInfo
, 8> RetInfos
;
298 SmallVector
<unsigned, 8> OrigArgIndices
;
300 for (unsigned i
= 0; i
< SplitEVTs
.size(); ++i
) {
301 ArgInfo CurArgInfo
= ArgInfo
{VRegs
[i
], SplitEVTs
[i
].getTypeForEVT(Ctx
)};
302 setArgFlags(CurArgInfo
, AttributeList::ReturnIndex
, DL
, F
);
303 splitToValueTypes(CurArgInfo
, 0, RetInfos
, OrigArgIndices
);
306 SmallVector
<ISD::OutputArg
, 8> Outs
;
307 subTargetRegTypeForCallingConv(
308 MIRBuilder
, RetInfos
, OrigArgIndices
,
309 [&](ISD::ArgFlagsTy flags
, EVT vt
, EVT argvt
, bool used
,
310 unsigned origIdx
, unsigned partOffs
) {
311 Outs
.emplace_back(flags
, vt
, argvt
, used
, origIdx
, partOffs
);
314 SmallVector
<CCValAssign
, 16> ArgLocs
;
315 MipsCCState
CCInfo(F
.getCallingConv(), F
.isVarArg(), MF
, ArgLocs
,
317 CCInfo
.AnalyzeReturn(Outs
, TLI
.CCAssignFnForReturn());
318 setLocInfo(ArgLocs
, Outs
);
320 OutgoingValueHandler
RetHandler(MIRBuilder
, MF
.getRegInfo(), Ret
);
321 if (!RetHandler
.handle(ArgLocs
, RetInfos
)) {
325 MIRBuilder
.insertInstr(Ret
);
329 bool MipsCallLowering::lowerFormalArguments(MachineIRBuilder
&MIRBuilder
,
331 ArrayRef
<unsigned> VRegs
) const {
333 // Quick exit if there aren't any args.
341 for (auto &Arg
: F
.args()) {
342 if (!isSupportedType(Arg
.getType()))
346 MachineFunction
&MF
= MIRBuilder
.getMF();
347 const DataLayout
&DL
= MF
.getDataLayout();
348 const MipsTargetLowering
&TLI
= *getTLI
<MipsTargetLowering
>();
350 SmallVector
<ArgInfo
, 8> ArgInfos
;
351 SmallVector
<unsigned, 8> OrigArgIndices
;
353 for (auto &Arg
: F
.args()) {
354 ArgInfo
AInfo(VRegs
[i
], Arg
.getType());
355 setArgFlags(AInfo
, i
+ AttributeList::FirstArgIndex
, DL
, F
);
356 splitToValueTypes(AInfo
, i
, ArgInfos
, OrigArgIndices
);
360 SmallVector
<ISD::InputArg
, 8> Ins
;
361 subTargetRegTypeForCallingConv(
362 MIRBuilder
, ArgInfos
, OrigArgIndices
,
363 [&](ISD::ArgFlagsTy flags
, EVT vt
, EVT argvt
, bool used
, unsigned origIdx
,
365 Ins
.emplace_back(flags
, vt
, argvt
, used
, origIdx
, partOffs
);
368 SmallVector
<CCValAssign
, 16> ArgLocs
;
369 MipsCCState
CCInfo(F
.getCallingConv(), F
.isVarArg(), MF
, ArgLocs
,
372 const MipsTargetMachine
&TM
=
373 static_cast<const MipsTargetMachine
&>(MF
.getTarget());
374 const MipsABIInfo
&ABI
= TM
.getABI();
375 CCInfo
.AllocateStack(ABI
.GetCalleeAllocdArgSizeInBytes(F
.getCallingConv()),
377 CCInfo
.AnalyzeFormalArguments(Ins
, TLI
.CCAssignFnForCall());
378 setLocInfo(ArgLocs
, Ins
);
380 IncomingValueHandler
Handler(MIRBuilder
, MF
.getRegInfo());
381 if (!Handler
.handle(ArgLocs
, ArgInfos
))
387 bool MipsCallLowering::lowerCall(MachineIRBuilder
&MIRBuilder
,
388 CallingConv::ID CallConv
,
389 const MachineOperand
&Callee
,
390 const ArgInfo
&OrigRet
,
391 ArrayRef
<ArgInfo
> OrigArgs
) const {
393 if (CallConv
!= CallingConv::C
)
396 for (auto &Arg
: OrigArgs
) {
397 if (!isSupportedType(Arg
.Ty
))
399 if (Arg
.Flags
.isByVal() || Arg
.Flags
.isSRet())
402 if (OrigRet
.Reg
&& !isSupportedType(OrigRet
.Ty
))
405 MachineFunction
&MF
= MIRBuilder
.getMF();
406 const Function
&F
= MF
.getFunction();
407 const MipsTargetLowering
&TLI
= *getTLI
<MipsTargetLowering
>();
408 const MipsTargetMachine
&TM
=
409 static_cast<const MipsTargetMachine
&>(MF
.getTarget());
410 const MipsABIInfo
&ABI
= TM
.getABI();
412 MachineInstrBuilder CallSeqStart
=
413 MIRBuilder
.buildInstr(Mips::ADJCALLSTACKDOWN
);
415 // FIXME: Add support for pic calling sequences, long call sequences for O32,
416 // N32 and N64. First handle the case when Callee.isReg().
420 MachineInstrBuilder MIB
= MIRBuilder
.buildInstrNoInsert(Mips::JAL
);
421 MIB
.addDef(Mips::SP
, RegState::Implicit
);
423 const TargetRegisterInfo
*TRI
= MF
.getSubtarget().getRegisterInfo();
424 MIB
.addRegMask(TRI
->getCallPreservedMask(MF
, F
.getCallingConv()));
426 TargetLowering::ArgListTy FuncOrigArgs
;
427 FuncOrigArgs
.reserve(OrigArgs
.size());
429 SmallVector
<ArgInfo
, 8> ArgInfos
;
430 SmallVector
<unsigned, 8> OrigArgIndices
;
432 for (auto &Arg
: OrigArgs
) {
434 TargetLowering::ArgListEntry Entry
;
436 FuncOrigArgs
.push_back(Entry
);
438 splitToValueTypes(Arg
, i
, ArgInfos
, OrigArgIndices
);
442 SmallVector
<ISD::OutputArg
, 8> Outs
;
443 subTargetRegTypeForCallingConv(
444 MIRBuilder
, ArgInfos
, OrigArgIndices
,
445 [&](ISD::ArgFlagsTy flags
, EVT vt
, EVT argvt
, bool used
, unsigned origIdx
,
447 Outs
.emplace_back(flags
, vt
, argvt
, used
, origIdx
, partOffs
);
450 SmallVector
<CCValAssign
, 8> ArgLocs
;
451 MipsCCState
CCInfo(F
.getCallingConv(), F
.isVarArg(), MF
, ArgLocs
,
454 CCInfo
.AllocateStack(ABI
.GetCalleeAllocdArgSizeInBytes(CallConv
), 1);
455 const char *Call
= Callee
.isSymbol() ? Callee
.getSymbolName() : nullptr;
456 CCInfo
.AnalyzeCallOperands(Outs
, TLI
.CCAssignFnForCall(), FuncOrigArgs
, Call
);
457 setLocInfo(ArgLocs
, Outs
);
459 OutgoingValueHandler
RetHandler(MIRBuilder
, MF
.getRegInfo(), MIB
);
460 if (!RetHandler
.handle(ArgLocs
, ArgInfos
)) {
464 unsigned NextStackOffset
= CCInfo
.getNextStackOffset();
465 const TargetFrameLowering
*TFL
= MF
.getSubtarget().getFrameLowering();
466 unsigned StackAlignment
= TFL
->getStackAlignment();
467 NextStackOffset
= alignTo(NextStackOffset
, StackAlignment
);
468 CallSeqStart
.addImm(NextStackOffset
).addImm(0);
470 MIRBuilder
.insertInstr(MIB
);
475 SmallVector
<unsigned, 8> OrigRetIndices
;
477 splitToValueTypes(OrigRet
, 0, ArgInfos
, OrigRetIndices
);
479 SmallVector
<ISD::InputArg
, 8> Ins
;
480 subTargetRegTypeForCallingConv(
481 MIRBuilder
, ArgInfos
, OrigRetIndices
,
482 [&](ISD::ArgFlagsTy flags
, EVT vt
, EVT argvt
, bool used
,
483 unsigned origIdx
, unsigned partOffs
) {
484 Ins
.emplace_back(flags
, vt
, argvt
, used
, origIdx
, partOffs
);
487 SmallVector
<CCValAssign
, 8> ArgLocs
;
488 MipsCCState
CCInfo(F
.getCallingConv(), F
.isVarArg(), MF
, ArgLocs
,
491 CCInfo
.AnalyzeCallResult(Ins
, TLI
.CCAssignFnForReturn(), OrigRet
.Ty
, Call
);
492 setLocInfo(ArgLocs
, Ins
);
494 CallReturnHandler
Handler(MIRBuilder
, MF
.getRegInfo(), MIB
);
495 if (!Handler
.handle(ArgLocs
, ArgInfos
))
499 MIRBuilder
.buildInstr(Mips::ADJCALLSTACKUP
).addImm(NextStackOffset
).addImm(0);
504 void MipsCallLowering::subTargetRegTypeForCallingConv(
505 MachineIRBuilder
&MIRBuilder
, ArrayRef
<ArgInfo
> Args
,
506 ArrayRef
<unsigned> OrigArgIndices
, const FunTy
&PushBack
) const {
507 MachineFunction
&MF
= MIRBuilder
.getMF();
508 const Function
&F
= MF
.getFunction();
509 const DataLayout
&DL
= F
.getParent()->getDataLayout();
510 const MipsTargetLowering
&TLI
= *getTLI
<MipsTargetLowering
>();
513 for (auto &Arg
: Args
) {
515 EVT VT
= TLI
.getValueType(DL
, Arg
.Ty
);
516 MVT RegisterVT
= TLI
.getRegisterTypeForCallingConv(F
.getContext(),
517 F
.getCallingConv(), VT
);
519 ISD::ArgFlagsTy Flags
= Arg
.Flags
;
520 Flags
.setOrigAlign(TLI
.getABIAlignmentForCallingConv(Arg
.Ty
, DL
));
522 PushBack(Flags
, RegisterVT
, VT
, true, OrigArgIndices
[ArgNo
], 0);
528 void MipsCallLowering::splitToValueTypes(
529 const ArgInfo
&OrigArg
, unsigned OriginalIndex
,
530 SmallVectorImpl
<ArgInfo
> &SplitArgs
,
531 SmallVectorImpl
<unsigned> &SplitArgsOrigIndices
) const {
533 // TODO : perform structure and array split. For now we only deal with
534 // types that pass isSupportedType check.
535 SplitArgs
.push_back(OrigArg
);
536 SplitArgsOrigIndices
.push_back(OriginalIndex
);