Fix uninitialized variable
[llvm-core.git] / lib / Target / Mips / MipsCallLowering.cpp
blob4d070f9f523af6ef37daf921fe374d052845d7b9
1 //===- MipsCallLowering.cpp -------------------------------------*- C++ -*-===//
2 //
3 // The LLVM Compiler Infrastructure
4 //
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
7 //
8 //===----------------------------------------------------------------------===//
9 //
10 /// \file
11 /// This file implements the lowering of LLVM calls to machine code calls for
12 /// GlobalISel.
14 //===----------------------------------------------------------------------===//
16 #include "MipsCallLowering.h"
17 #include "MipsCCState.h"
18 #include "MipsTargetMachine.h"
19 #include "llvm/CodeGen/Analysis.h"
20 #include "llvm/CodeGen/GlobalISel/MachineIRBuilder.h"
22 using namespace llvm;
24 MipsCallLowering::MipsCallLowering(const MipsTargetLowering &TLI)
25 : CallLowering(&TLI) {}
27 bool MipsCallLowering::MipsHandler::assign(unsigned VReg,
28 const CCValAssign &VA) {
29 if (VA.isRegLoc()) {
30 assignValueToReg(VReg, VA);
31 } else if (VA.isMemLoc()) {
32 assignValueToAddress(VReg, VA);
33 } else {
34 return false;
36 return true;
39 bool MipsCallLowering::MipsHandler::assignVRegs(ArrayRef<unsigned> VRegs,
40 ArrayRef<CCValAssign> ArgLocs,
41 unsigned ArgLocsStartIndex) {
42 for (unsigned i = 0; i < VRegs.size(); ++i)
43 if (!assign(VRegs[i], ArgLocs[ArgLocsStartIndex + i]))
44 return false;
45 return true;
48 void MipsCallLowering::MipsHandler::setMostSignificantFirst(
49 SmallVectorImpl<unsigned> &VRegs) {
50 if (MIRBuilder.getMF().getDataLayout().isLittleEndian())
51 std::reverse(VRegs.begin(), VRegs.end());
54 bool MipsCallLowering::MipsHandler::handle(
55 ArrayRef<CCValAssign> ArgLocs, ArrayRef<CallLowering::ArgInfo> Args) {
56 SmallVector<unsigned, 4> VRegs;
57 unsigned SplitLength;
58 const Function &F = MIRBuilder.getMF().getFunction();
59 const DataLayout &DL = F.getParent()->getDataLayout();
60 const MipsTargetLowering &TLI = *static_cast<const MipsTargetLowering *>(
61 MIRBuilder.getMF().getSubtarget().getTargetLowering());
63 for (unsigned ArgsIndex = 0, ArgLocsIndex = 0; ArgsIndex < Args.size();
64 ++ArgsIndex, ArgLocsIndex += SplitLength) {
65 EVT VT = TLI.getValueType(DL, Args[ArgsIndex].Ty);
66 SplitLength = TLI.getNumRegistersForCallingConv(F.getContext(),
67 F.getCallingConv(), VT);
68 if (SplitLength > 1) {
69 VRegs.clear();
70 MVT RegisterVT = TLI.getRegisterTypeForCallingConv(
71 F.getContext(), F.getCallingConv(), VT);
72 for (unsigned i = 0; i < SplitLength; ++i)
73 VRegs.push_back(MRI.createGenericVirtualRegister(LLT{RegisterVT}));
75 if (!handleSplit(VRegs, ArgLocs, ArgLocsIndex, Args[ArgsIndex].Reg))
76 return false;
77 } else {
78 if (!assign(Args[ArgsIndex].Reg, ArgLocs[ArgLocsIndex]))
79 return false;
82 return true;
85 namespace {
86 class IncomingValueHandler : public MipsCallLowering::MipsHandler {
87 public:
88 IncomingValueHandler(MachineIRBuilder &MIRBuilder, MachineRegisterInfo &MRI)
89 : MipsHandler(MIRBuilder, MRI) {}
91 private:
92 void assignValueToReg(unsigned ValVReg, const CCValAssign &VA) override;
94 unsigned getStackAddress(const CCValAssign &VA,
95 MachineMemOperand *&MMO) override;
97 void assignValueToAddress(unsigned ValVReg, const CCValAssign &VA) override;
99 bool handleSplit(SmallVectorImpl<unsigned> &VRegs,
100 ArrayRef<CCValAssign> ArgLocs, unsigned ArgLocsStartIndex,
101 unsigned ArgsReg) override;
103 virtual void markPhysRegUsed(unsigned PhysReg) {
104 MIRBuilder.getMBB().addLiveIn(PhysReg);
107 void buildLoad(unsigned Val, const CCValAssign &VA) {
108 MachineMemOperand *MMO;
109 unsigned Addr = getStackAddress(VA, MMO);
110 MIRBuilder.buildLoad(Val, Addr, *MMO);
114 class CallReturnHandler : public IncomingValueHandler {
115 public:
116 CallReturnHandler(MachineIRBuilder &MIRBuilder, MachineRegisterInfo &MRI,
117 MachineInstrBuilder &MIB)
118 : IncomingValueHandler(MIRBuilder, MRI), MIB(MIB) {}
120 private:
121 void markPhysRegUsed(unsigned PhysReg) override {
122 MIB.addDef(PhysReg, RegState::Implicit);
125 MachineInstrBuilder &MIB;
128 } // end anonymous namespace
130 void IncomingValueHandler::assignValueToReg(unsigned ValVReg,
131 const CCValAssign &VA) {
132 unsigned PhysReg = VA.getLocReg();
133 switch (VA.getLocInfo()) {
134 case CCValAssign::LocInfo::SExt:
135 case CCValAssign::LocInfo::ZExt:
136 case CCValAssign::LocInfo::AExt: {
137 auto Copy = MIRBuilder.buildCopy(LLT{VA.getLocVT()}, PhysReg);
138 MIRBuilder.buildTrunc(ValVReg, Copy);
139 break;
141 default:
142 MIRBuilder.buildCopy(ValVReg, PhysReg);
143 break;
145 markPhysRegUsed(PhysReg);
148 unsigned IncomingValueHandler::getStackAddress(const CCValAssign &VA,
149 MachineMemOperand *&MMO) {
150 unsigned Size = alignTo(VA.getValVT().getSizeInBits(), 8) / 8;
151 unsigned Offset = VA.getLocMemOffset();
152 MachineFrameInfo &MFI = MIRBuilder.getMF().getFrameInfo();
154 int FI = MFI.CreateFixedObject(Size, Offset, true);
155 MachinePointerInfo MPO =
156 MachinePointerInfo::getFixedStack(MIRBuilder.getMF(), FI);
157 MMO = MIRBuilder.getMF().getMachineMemOperand(MPO, MachineMemOperand::MOLoad,
158 Size, /* Alignment */ 0);
160 unsigned AddrReg = MRI.createGenericVirtualRegister(LLT::pointer(0, 32));
161 MIRBuilder.buildFrameIndex(AddrReg, FI);
163 return AddrReg;
166 void IncomingValueHandler::assignValueToAddress(unsigned ValVReg,
167 const CCValAssign &VA) {
168 if (VA.getLocInfo() == CCValAssign::SExt ||
169 VA.getLocInfo() == CCValAssign::ZExt ||
170 VA.getLocInfo() == CCValAssign::AExt) {
171 unsigned LoadReg = MRI.createGenericVirtualRegister(LLT::scalar(32));
172 buildLoad(LoadReg, VA);
173 MIRBuilder.buildTrunc(ValVReg, LoadReg);
174 } else
175 buildLoad(ValVReg, VA);
178 bool IncomingValueHandler::handleSplit(SmallVectorImpl<unsigned> &VRegs,
179 ArrayRef<CCValAssign> ArgLocs,
180 unsigned ArgLocsStartIndex,
181 unsigned ArgsReg) {
182 if (!assignVRegs(VRegs, ArgLocs, ArgLocsStartIndex))
183 return false;
184 setMostSignificantFirst(VRegs);
185 MIRBuilder.buildMerge(ArgsReg, VRegs);
186 return true;
189 namespace {
190 class OutgoingValueHandler : public MipsCallLowering::MipsHandler {
191 public:
192 OutgoingValueHandler(MachineIRBuilder &MIRBuilder, MachineRegisterInfo &MRI,
193 MachineInstrBuilder &MIB)
194 : MipsHandler(MIRBuilder, MRI), MIB(MIB) {}
196 private:
197 void assignValueToReg(unsigned ValVReg, const CCValAssign &VA) override;
199 unsigned getStackAddress(const CCValAssign &VA,
200 MachineMemOperand *&MMO) override;
202 void assignValueToAddress(unsigned ValVReg, const CCValAssign &VA) override;
204 bool handleSplit(SmallVectorImpl<unsigned> &VRegs,
205 ArrayRef<CCValAssign> ArgLocs, unsigned ArgLocsStartIndex,
206 unsigned ArgsReg) override;
208 unsigned extendRegister(unsigned ValReg, const CCValAssign &VA);
210 MachineInstrBuilder &MIB;
212 } // end anonymous namespace
214 void OutgoingValueHandler::assignValueToReg(unsigned ValVReg,
215 const CCValAssign &VA) {
216 unsigned PhysReg = VA.getLocReg();
217 unsigned ExtReg = extendRegister(ValVReg, VA);
218 MIRBuilder.buildCopy(PhysReg, ExtReg);
219 MIB.addUse(PhysReg, RegState::Implicit);
222 unsigned OutgoingValueHandler::getStackAddress(const CCValAssign &VA,
223 MachineMemOperand *&MMO) {
224 LLT p0 = LLT::pointer(0, 32);
225 LLT s32 = LLT::scalar(32);
226 unsigned SPReg = MRI.createGenericVirtualRegister(p0);
227 MIRBuilder.buildCopy(SPReg, Mips::SP);
229 unsigned OffsetReg = MRI.createGenericVirtualRegister(s32);
230 unsigned Offset = VA.getLocMemOffset();
231 MIRBuilder.buildConstant(OffsetReg, Offset);
233 unsigned AddrReg = MRI.createGenericVirtualRegister(p0);
234 MIRBuilder.buildGEP(AddrReg, SPReg, OffsetReg);
236 MachinePointerInfo MPO =
237 MachinePointerInfo::getStack(MIRBuilder.getMF(), Offset);
238 unsigned Size = alignTo(VA.getValVT().getSizeInBits(), 8) / 8;
239 MMO = MIRBuilder.getMF().getMachineMemOperand(MPO, MachineMemOperand::MOStore,
240 Size, /* Alignment */ 0);
242 return AddrReg;
245 void OutgoingValueHandler::assignValueToAddress(unsigned ValVReg,
246 const CCValAssign &VA) {
247 MachineMemOperand *MMO;
248 unsigned Addr = getStackAddress(VA, MMO);
249 unsigned ExtReg = extendRegister(ValVReg, VA);
250 MIRBuilder.buildStore(ExtReg, Addr, *MMO);
253 unsigned OutgoingValueHandler::extendRegister(unsigned ValReg,
254 const CCValAssign &VA) {
255 LLT LocTy{VA.getLocVT()};
256 switch (VA.getLocInfo()) {
257 case CCValAssign::SExt: {
258 unsigned ExtReg = MRI.createGenericVirtualRegister(LocTy);
259 MIRBuilder.buildSExt(ExtReg, ValReg);
260 return ExtReg;
262 case CCValAssign::ZExt: {
263 unsigned ExtReg = MRI.createGenericVirtualRegister(LocTy);
264 MIRBuilder.buildZExt(ExtReg, ValReg);
265 return ExtReg;
267 case CCValAssign::AExt: {
268 unsigned ExtReg = MRI.createGenericVirtualRegister(LocTy);
269 MIRBuilder.buildAnyExt(ExtReg, ValReg);
270 return ExtReg;
272 // TODO : handle upper extends
273 case CCValAssign::Full:
274 return ValReg;
275 default:
276 break;
278 llvm_unreachable("unable to extend register");
281 bool OutgoingValueHandler::handleSplit(SmallVectorImpl<unsigned> &VRegs,
282 ArrayRef<CCValAssign> ArgLocs,
283 unsigned ArgLocsStartIndex,
284 unsigned ArgsReg) {
285 MIRBuilder.buildUnmerge(VRegs, ArgsReg);
286 setMostSignificantFirst(VRegs);
287 if (!assignVRegs(VRegs, ArgLocs, ArgLocsStartIndex))
288 return false;
290 return true;
293 static bool isSupportedType(Type *T) {
294 if (T->isIntegerTy())
295 return true;
296 if (T->isPointerTy())
297 return true;
298 return false;
301 static CCValAssign::LocInfo determineLocInfo(const MVT RegisterVT, const EVT VT,
302 const ISD::ArgFlagsTy &Flags) {
303 // > does not mean loss of information as type RegisterVT can't hold type VT,
304 // it means that type VT is split into multiple registers of type RegisterVT
305 if (VT.getSizeInBits() >= RegisterVT.getSizeInBits())
306 return CCValAssign::LocInfo::Full;
307 if (Flags.isSExt())
308 return CCValAssign::LocInfo::SExt;
309 if (Flags.isZExt())
310 return CCValAssign::LocInfo::ZExt;
311 return CCValAssign::LocInfo::AExt;
314 template <typename T>
315 static void setLocInfo(SmallVectorImpl<CCValAssign> &ArgLocs,
316 const SmallVectorImpl<T> &Arguments) {
317 for (unsigned i = 0; i < ArgLocs.size(); ++i) {
318 const CCValAssign &VA = ArgLocs[i];
319 CCValAssign::LocInfo LocInfo = determineLocInfo(
320 Arguments[i].VT, Arguments[i].ArgVT, Arguments[i].Flags);
321 if (VA.isMemLoc())
322 ArgLocs[i] =
323 CCValAssign::getMem(VA.getValNo(), VA.getValVT(),
324 VA.getLocMemOffset(), VA.getLocVT(), LocInfo);
325 else
326 ArgLocs[i] = CCValAssign::getReg(VA.getValNo(), VA.getValVT(),
327 VA.getLocReg(), VA.getLocVT(), LocInfo);
331 bool MipsCallLowering::lowerReturn(MachineIRBuilder &MIRBuilder,
332 const Value *Val,
333 ArrayRef<unsigned> VRegs) const {
335 MachineInstrBuilder Ret = MIRBuilder.buildInstrNoInsert(Mips::RetRA);
337 if (Val != nullptr && !isSupportedType(Val->getType()))
338 return false;
340 if (!VRegs.empty()) {
341 MachineFunction &MF = MIRBuilder.getMF();
342 const Function &F = MF.getFunction();
343 const DataLayout &DL = MF.getDataLayout();
344 const MipsTargetLowering &TLI = *getTLI<MipsTargetLowering>();
345 LLVMContext &Ctx = Val->getType()->getContext();
347 SmallVector<EVT, 4> SplitEVTs;
348 ComputeValueVTs(TLI, DL, Val->getType(), SplitEVTs);
349 assert(VRegs.size() == SplitEVTs.size() &&
350 "For each split Type there should be exactly one VReg.");
352 SmallVector<ArgInfo, 8> RetInfos;
353 SmallVector<unsigned, 8> OrigArgIndices;
355 for (unsigned i = 0; i < SplitEVTs.size(); ++i) {
356 ArgInfo CurArgInfo = ArgInfo{VRegs[i], SplitEVTs[i].getTypeForEVT(Ctx)};
357 setArgFlags(CurArgInfo, AttributeList::ReturnIndex, DL, F);
358 splitToValueTypes(CurArgInfo, 0, RetInfos, OrigArgIndices);
361 SmallVector<ISD::OutputArg, 8> Outs;
362 subTargetRegTypeForCallingConv(F, RetInfos, OrigArgIndices, Outs);
364 SmallVector<CCValAssign, 16> ArgLocs;
365 MipsCCState CCInfo(F.getCallingConv(), F.isVarArg(), MF, ArgLocs,
366 F.getContext());
367 CCInfo.AnalyzeReturn(Outs, TLI.CCAssignFnForReturn());
368 setLocInfo(ArgLocs, Outs);
370 OutgoingValueHandler RetHandler(MIRBuilder, MF.getRegInfo(), Ret);
371 if (!RetHandler.handle(ArgLocs, RetInfos)) {
372 return false;
375 MIRBuilder.insertInstr(Ret);
376 return true;
379 bool MipsCallLowering::lowerFormalArguments(MachineIRBuilder &MIRBuilder,
380 const Function &F,
381 ArrayRef<unsigned> VRegs) const {
383 // Quick exit if there aren't any args.
384 if (F.arg_empty())
385 return true;
387 if (F.isVarArg()) {
388 return false;
391 for (auto &Arg : F.args()) {
392 if (!isSupportedType(Arg.getType()))
393 return false;
396 MachineFunction &MF = MIRBuilder.getMF();
397 const DataLayout &DL = MF.getDataLayout();
398 const MipsTargetLowering &TLI = *getTLI<MipsTargetLowering>();
400 SmallVector<ArgInfo, 8> ArgInfos;
401 SmallVector<unsigned, 8> OrigArgIndices;
402 unsigned i = 0;
403 for (auto &Arg : F.args()) {
404 ArgInfo AInfo(VRegs[i], Arg.getType());
405 setArgFlags(AInfo, i + AttributeList::FirstArgIndex, DL, F);
406 splitToValueTypes(AInfo, i, ArgInfos, OrigArgIndices);
407 ++i;
410 SmallVector<ISD::InputArg, 8> Ins;
411 subTargetRegTypeForCallingConv(F, ArgInfos, OrigArgIndices, Ins);
413 SmallVector<CCValAssign, 16> ArgLocs;
414 MipsCCState CCInfo(F.getCallingConv(), F.isVarArg(), MF, ArgLocs,
415 F.getContext());
417 const MipsTargetMachine &TM =
418 static_cast<const MipsTargetMachine &>(MF.getTarget());
419 const MipsABIInfo &ABI = TM.getABI();
420 CCInfo.AllocateStack(ABI.GetCalleeAllocdArgSizeInBytes(F.getCallingConv()),
422 CCInfo.AnalyzeFormalArguments(Ins, TLI.CCAssignFnForCall());
423 setLocInfo(ArgLocs, Ins);
425 IncomingValueHandler Handler(MIRBuilder, MF.getRegInfo());
426 if (!Handler.handle(ArgLocs, ArgInfos))
427 return false;
429 return true;
432 bool MipsCallLowering::lowerCall(MachineIRBuilder &MIRBuilder,
433 CallingConv::ID CallConv,
434 const MachineOperand &Callee,
435 const ArgInfo &OrigRet,
436 ArrayRef<ArgInfo> OrigArgs) const {
438 if (CallConv != CallingConv::C)
439 return false;
441 for (auto &Arg : OrigArgs) {
442 if (!isSupportedType(Arg.Ty))
443 return false;
444 if (Arg.Flags.isByVal() || Arg.Flags.isSRet())
445 return false;
447 if (OrigRet.Reg && !isSupportedType(OrigRet.Ty))
448 return false;
450 MachineFunction &MF = MIRBuilder.getMF();
451 const Function &F = MF.getFunction();
452 const MipsTargetLowering &TLI = *getTLI<MipsTargetLowering>();
453 const MipsTargetMachine &TM =
454 static_cast<const MipsTargetMachine &>(MF.getTarget());
455 const MipsABIInfo &ABI = TM.getABI();
457 MachineInstrBuilder CallSeqStart =
458 MIRBuilder.buildInstr(Mips::ADJCALLSTACKDOWN);
460 // FIXME: Add support for pic calling sequences, long call sequences for O32,
461 // N32 and N64. First handle the case when Callee.isReg().
462 if (Callee.isReg())
463 return false;
465 MachineInstrBuilder MIB = MIRBuilder.buildInstrNoInsert(Mips::JAL);
466 MIB.addDef(Mips::SP, RegState::Implicit);
467 MIB.add(Callee);
468 const TargetRegisterInfo *TRI = MF.getSubtarget().getRegisterInfo();
469 MIB.addRegMask(TRI->getCallPreservedMask(MF, F.getCallingConv()));
471 TargetLowering::ArgListTy FuncOrigArgs;
472 FuncOrigArgs.reserve(OrigArgs.size());
474 SmallVector<ArgInfo, 8> ArgInfos;
475 SmallVector<unsigned, 8> OrigArgIndices;
476 unsigned i = 0;
477 for (auto &Arg : OrigArgs) {
479 TargetLowering::ArgListEntry Entry;
480 Entry.Ty = Arg.Ty;
481 FuncOrigArgs.push_back(Entry);
483 splitToValueTypes(Arg, i, ArgInfos, OrigArgIndices);
484 ++i;
487 SmallVector<ISD::OutputArg, 8> Outs;
488 subTargetRegTypeForCallingConv(F, ArgInfos, OrigArgIndices, Outs);
490 SmallVector<CCValAssign, 8> ArgLocs;
491 MipsCCState CCInfo(F.getCallingConv(), F.isVarArg(), MF, ArgLocs,
492 F.getContext());
494 CCInfo.AllocateStack(ABI.GetCalleeAllocdArgSizeInBytes(CallConv), 1);
495 const char *Call = Callee.isSymbol() ? Callee.getSymbolName() : nullptr;
496 CCInfo.AnalyzeCallOperands(Outs, TLI.CCAssignFnForCall(), FuncOrigArgs, Call);
497 setLocInfo(ArgLocs, Outs);
499 OutgoingValueHandler RetHandler(MIRBuilder, MF.getRegInfo(), MIB);
500 if (!RetHandler.handle(ArgLocs, ArgInfos)) {
501 return false;
504 unsigned NextStackOffset = CCInfo.getNextStackOffset();
505 const TargetFrameLowering *TFL = MF.getSubtarget().getFrameLowering();
506 unsigned StackAlignment = TFL->getStackAlignment();
507 NextStackOffset = alignTo(NextStackOffset, StackAlignment);
508 CallSeqStart.addImm(NextStackOffset).addImm(0);
510 MIRBuilder.insertInstr(MIB);
512 if (OrigRet.Reg) {
514 ArgInfos.clear();
515 SmallVector<unsigned, 8> OrigRetIndices;
517 splitToValueTypes(OrigRet, 0, ArgInfos, OrigRetIndices);
519 SmallVector<ISD::InputArg, 8> Ins;
520 subTargetRegTypeForCallingConv(F, ArgInfos, OrigRetIndices, Ins);
522 SmallVector<CCValAssign, 8> ArgLocs;
523 MipsCCState CCInfo(F.getCallingConv(), F.isVarArg(), MF, ArgLocs,
524 F.getContext());
526 CCInfo.AnalyzeCallResult(Ins, TLI.CCAssignFnForReturn(), OrigRet.Ty, Call);
527 setLocInfo(ArgLocs, Ins);
529 CallReturnHandler Handler(MIRBuilder, MF.getRegInfo(), MIB);
530 if (!Handler.handle(ArgLocs, ArgInfos))
531 return false;
534 MIRBuilder.buildInstr(Mips::ADJCALLSTACKUP).addImm(NextStackOffset).addImm(0);
536 return true;
539 template <typename T>
540 void MipsCallLowering::subTargetRegTypeForCallingConv(
541 const Function &F, ArrayRef<ArgInfo> Args,
542 ArrayRef<unsigned> OrigArgIndices, SmallVectorImpl<T> &ISDArgs) const {
543 const DataLayout &DL = F.getParent()->getDataLayout();
544 const MipsTargetLowering &TLI = *getTLI<MipsTargetLowering>();
546 unsigned ArgNo = 0;
547 for (auto &Arg : Args) {
549 EVT VT = TLI.getValueType(DL, Arg.Ty);
550 MVT RegisterVT = TLI.getRegisterTypeForCallingConv(F.getContext(),
551 F.getCallingConv(), VT);
552 unsigned NumRegs = TLI.getNumRegistersForCallingConv(
553 F.getContext(), F.getCallingConv(), VT);
555 for (unsigned i = 0; i < NumRegs; ++i) {
556 ISD::ArgFlagsTy Flags = Arg.Flags;
558 if (i == 0)
559 Flags.setOrigAlign(TLI.getABIAlignmentForCallingConv(Arg.Ty, DL));
560 else
561 Flags.setOrigAlign(1);
563 ISDArgs.emplace_back(Flags, RegisterVT, VT, true, OrigArgIndices[ArgNo],
566 ++ArgNo;
570 void MipsCallLowering::splitToValueTypes(
571 const ArgInfo &OrigArg, unsigned OriginalIndex,
572 SmallVectorImpl<ArgInfo> &SplitArgs,
573 SmallVectorImpl<unsigned> &SplitArgsOrigIndices) const {
575 // TODO : perform structure and array split. For now we only deal with
576 // types that pass isSupportedType check.
577 SplitArgs.push_back(OrigArg);
578 SplitArgsOrigIndices.push_back(OriginalIndex);