[RISCV] Fix mgather -> riscv.masked.strided.load combine not extending indices (...
[llvm-project.git] / llvm / lib / Target / RISCV / GISel / RISCVCallLowering.cpp
blob26eac17ed24c9f3696e0904f33a40b7a65d1cb98
1 //===-- RISCVCallLowering.cpp - Call lowering -------------------*- C++ -*-===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 /// \file
10 /// This file implements the lowering of LLVM calls to machine code calls for
11 /// GlobalISel.
13 //===----------------------------------------------------------------------===//
15 #include "RISCVCallLowering.h"
16 #include "RISCVISelLowering.h"
17 #include "RISCVMachineFunctionInfo.h"
18 #include "RISCVSubtarget.h"
19 #include "llvm/CodeGen/Analysis.h"
20 #include "llvm/CodeGen/GlobalISel/MachineIRBuilder.h"
21 #include "llvm/CodeGen/MachineFrameInfo.h"
23 using namespace llvm;
25 namespace {
27 struct RISCVOutgoingValueAssigner : public CallLowering::OutgoingValueAssigner {
28 private:
29 // The function used internally to assign args - we ignore the AssignFn stored
30 // by OutgoingValueAssigner since RISC-V implements its CC using a custom
31 // function with a different signature.
32 RISCVTargetLowering::RISCVCCAssignFn *RISCVAssignFn;
34 // Whether this is assigning args for a return.
35 bool IsRet;
37 public:
38 RISCVOutgoingValueAssigner(
39 RISCVTargetLowering::RISCVCCAssignFn *RISCVAssignFn_, bool IsRet)
40 : CallLowering::OutgoingValueAssigner(nullptr),
41 RISCVAssignFn(RISCVAssignFn_), IsRet(IsRet) {}
43 bool assignArg(unsigned ValNo, EVT OrigVT, MVT ValVT, MVT LocVT,
44 CCValAssign::LocInfo LocInfo,
45 const CallLowering::ArgInfo &Info, ISD::ArgFlagsTy Flags,
46 CCState &State) override {
47 MachineFunction &MF = State.getMachineFunction();
48 const DataLayout &DL = MF.getDataLayout();
49 const RISCVSubtarget &Subtarget = MF.getSubtarget<RISCVSubtarget>();
51 if (RISCVAssignFn(DL, Subtarget.getTargetABI(), ValNo, ValVT, LocVT,
52 LocInfo, Flags, State, Info.IsFixed, IsRet, Info.Ty,
53 *Subtarget.getTargetLowering(),
54 /*FirstMaskArgument=*/std::nullopt))
55 return true;
57 StackSize = State.getStackSize();
58 return false;
62 struct RISCVOutgoingValueHandler : public CallLowering::OutgoingValueHandler {
63 RISCVOutgoingValueHandler(MachineIRBuilder &B, MachineRegisterInfo &MRI,
64 MachineInstrBuilder MIB)
65 : OutgoingValueHandler(B, MRI), MIB(MIB),
66 Subtarget(MIRBuilder.getMF().getSubtarget<RISCVSubtarget>()) {}
67 Register getStackAddress(uint64_t MemSize, int64_t Offset,
68 MachinePointerInfo &MPO,
69 ISD::ArgFlagsTy Flags) override {
70 MachineFunction &MF = MIRBuilder.getMF();
71 LLT p0 = LLT::pointer(0, Subtarget.getXLen());
72 LLT sXLen = LLT::scalar(Subtarget.getXLen());
74 if (!SPReg)
75 SPReg = MIRBuilder.buildCopy(p0, Register(RISCV::X2)).getReg(0);
77 auto OffsetReg = MIRBuilder.buildConstant(sXLen, Offset);
79 auto AddrReg = MIRBuilder.buildPtrAdd(p0, SPReg, OffsetReg);
81 MPO = MachinePointerInfo::getStack(MF, Offset);
82 return AddrReg.getReg(0);
85 void assignValueToAddress(Register ValVReg, Register Addr, LLT MemTy,
86 const MachinePointerInfo &MPO,
87 const CCValAssign &VA) override {
88 MachineFunction &MF = MIRBuilder.getMF();
89 uint64_t LocMemOffset = VA.getLocMemOffset();
91 // TODO: Move StackAlignment to subtarget and share with FrameLowering.
92 auto MMO =
93 MF.getMachineMemOperand(MPO, MachineMemOperand::MOStore, MemTy,
94 commonAlignment(Align(16), LocMemOffset));
96 Register ExtReg = extendRegister(ValVReg, VA);
97 MIRBuilder.buildStore(ExtReg, Addr, *MMO);
100 void assignValueToReg(Register ValVReg, Register PhysReg,
101 const CCValAssign &VA) override {
102 // If we're passing an f32 value into an i64, anyextend before copying.
103 if (VA.getLocVT() == MVT::i64 && VA.getValVT() == MVT::f32)
104 ValVReg = MIRBuilder.buildAnyExt(LLT::scalar(64), ValVReg).getReg(0);
106 Register ExtReg = extendRegister(ValVReg, VA);
107 MIRBuilder.buildCopy(PhysReg, ExtReg);
108 MIB.addUse(PhysReg, RegState::Implicit);
111 unsigned assignCustomValue(CallLowering::ArgInfo &Arg,
112 ArrayRef<CCValAssign> VAs,
113 std::function<void()> *Thunk) override {
114 assert(VAs.size() >= 2 && "Expected at least 2 VAs.");
115 const CCValAssign &VALo = VAs[0];
116 const CCValAssign &VAHi = VAs[1];
118 assert(VAHi.needsCustom() && "Value doesn't need custom handling");
119 assert(VALo.getValNo() == VAHi.getValNo() &&
120 "Values belong to different arguments");
122 assert(VALo.getLocVT() == MVT::i32 && VAHi.getLocVT() == MVT::i32 &&
123 VALo.getValVT() == MVT::f64 && VAHi.getValVT() == MVT::f64 &&
124 "unexpected custom value");
126 Register NewRegs[] = {MRI.createGenericVirtualRegister(LLT::scalar(32)),
127 MRI.createGenericVirtualRegister(LLT::scalar(32))};
128 MIRBuilder.buildUnmerge(NewRegs, Arg.Regs[0]);
130 if (VAHi.isMemLoc()) {
131 LLT MemTy(VAHi.getLocVT());
133 MachinePointerInfo MPO;
134 Register StackAddr = getStackAddress(
135 MemTy.getSizeInBytes(), VAHi.getLocMemOffset(), MPO, Arg.Flags[0]);
137 assignValueToAddress(NewRegs[1], StackAddr, MemTy, MPO,
138 const_cast<CCValAssign &>(VAHi));
141 auto assignFunc = [=]() {
142 assignValueToReg(NewRegs[0], VALo.getLocReg(), VALo);
143 if (VAHi.isRegLoc())
144 assignValueToReg(NewRegs[1], VAHi.getLocReg(), VAHi);
147 if (Thunk) {
148 *Thunk = assignFunc;
149 return 2;
152 assignFunc();
153 return 2;
156 private:
157 MachineInstrBuilder MIB;
159 // Cache the SP register vreg if we need it more than once in this call site.
160 Register SPReg;
162 const RISCVSubtarget &Subtarget;
165 struct RISCVIncomingValueAssigner : public CallLowering::IncomingValueAssigner {
166 private:
167 // The function used internally to assign args - we ignore the AssignFn stored
168 // by IncomingValueAssigner since RISC-V implements its CC using a custom
169 // function with a different signature.
170 RISCVTargetLowering::RISCVCCAssignFn *RISCVAssignFn;
172 // Whether this is assigning args from a return.
173 bool IsRet;
175 public:
176 RISCVIncomingValueAssigner(
177 RISCVTargetLowering::RISCVCCAssignFn *RISCVAssignFn_, bool IsRet)
178 : CallLowering::IncomingValueAssigner(nullptr),
179 RISCVAssignFn(RISCVAssignFn_), IsRet(IsRet) {}
181 bool assignArg(unsigned ValNo, EVT OrigVT, MVT ValVT, MVT LocVT,
182 CCValAssign::LocInfo LocInfo,
183 const CallLowering::ArgInfo &Info, ISD::ArgFlagsTy Flags,
184 CCState &State) override {
185 MachineFunction &MF = State.getMachineFunction();
186 const DataLayout &DL = MF.getDataLayout();
187 const RISCVSubtarget &Subtarget = MF.getSubtarget<RISCVSubtarget>();
189 if (LocVT.isScalableVector())
190 MF.getInfo<RISCVMachineFunctionInfo>()->setIsVectorCall();
192 if (RISCVAssignFn(DL, Subtarget.getTargetABI(), ValNo, ValVT, LocVT,
193 LocInfo, Flags, State, /*IsFixed=*/true, IsRet, Info.Ty,
194 *Subtarget.getTargetLowering(),
195 /*FirstMaskArgument=*/std::nullopt))
196 return true;
198 StackSize = State.getStackSize();
199 return false;
203 struct RISCVIncomingValueHandler : public CallLowering::IncomingValueHandler {
204 RISCVIncomingValueHandler(MachineIRBuilder &B, MachineRegisterInfo &MRI)
205 : IncomingValueHandler(B, MRI),
206 Subtarget(MIRBuilder.getMF().getSubtarget<RISCVSubtarget>()) {}
208 Register getStackAddress(uint64_t MemSize, int64_t Offset,
209 MachinePointerInfo &MPO,
210 ISD::ArgFlagsTy Flags) override {
211 MachineFrameInfo &MFI = MIRBuilder.getMF().getFrameInfo();
213 int FI = MFI.CreateFixedObject(MemSize, Offset, /*Immutable=*/true);
214 MPO = MachinePointerInfo::getFixedStack(MIRBuilder.getMF(), FI);
215 return MIRBuilder.buildFrameIndex(LLT::pointer(0, Subtarget.getXLen()), FI)
216 .getReg(0);
219 void assignValueToAddress(Register ValVReg, Register Addr, LLT MemTy,
220 const MachinePointerInfo &MPO,
221 const CCValAssign &VA) override {
222 MachineFunction &MF = MIRBuilder.getMF();
223 auto MMO = MF.getMachineMemOperand(MPO, MachineMemOperand::MOLoad, MemTy,
224 inferAlignFromPtrInfo(MF, MPO));
225 MIRBuilder.buildLoad(ValVReg, Addr, *MMO);
228 void assignValueToReg(Register ValVReg, Register PhysReg,
229 const CCValAssign &VA) override {
230 markPhysRegUsed(PhysReg);
231 IncomingValueHandler::assignValueToReg(ValVReg, PhysReg, VA);
234 unsigned assignCustomValue(CallLowering::ArgInfo &Arg,
235 ArrayRef<CCValAssign> VAs,
236 std::function<void()> *Thunk) override {
237 assert(VAs.size() >= 2 && "Expected at least 2 VAs.");
238 const CCValAssign &VALo = VAs[0];
239 const CCValAssign &VAHi = VAs[1];
241 assert(VAHi.needsCustom() && "Value doesn't need custom handling");
242 assert(VALo.getValNo() == VAHi.getValNo() &&
243 "Values belong to different arguments");
245 assert(VALo.getLocVT() == MVT::i32 && VAHi.getLocVT() == MVT::i32 &&
246 VALo.getValVT() == MVT::f64 && VAHi.getValVT() == MVT::f64 &&
247 "unexpected custom value");
249 Register NewRegs[] = {MRI.createGenericVirtualRegister(LLT::scalar(32)),
250 MRI.createGenericVirtualRegister(LLT::scalar(32))};
252 if (VAHi.isMemLoc()) {
253 LLT MemTy(VAHi.getLocVT());
255 MachinePointerInfo MPO;
256 Register StackAddr = getStackAddress(
257 MemTy.getSizeInBytes(), VAHi.getLocMemOffset(), MPO, Arg.Flags[0]);
259 assignValueToAddress(NewRegs[1], StackAddr, MemTy, MPO,
260 const_cast<CCValAssign &>(VAHi));
263 assignValueToReg(NewRegs[0], VALo.getLocReg(), VALo);
264 if (VAHi.isRegLoc())
265 assignValueToReg(NewRegs[1], VAHi.getLocReg(), VAHi);
267 MIRBuilder.buildMergeLikeInstr(Arg.Regs[0], NewRegs);
269 return 2;
272 /// How the physical register gets marked varies between formal
273 /// parameters (it's a basic-block live-in), and a call instruction
274 /// (it's an implicit-def of the BL).
275 virtual void markPhysRegUsed(MCRegister PhysReg) = 0;
277 private:
278 const RISCVSubtarget &Subtarget;
281 struct RISCVFormalArgHandler : public RISCVIncomingValueHandler {
282 RISCVFormalArgHandler(MachineIRBuilder &B, MachineRegisterInfo &MRI)
283 : RISCVIncomingValueHandler(B, MRI) {}
285 void markPhysRegUsed(MCRegister PhysReg) override {
286 MIRBuilder.getMRI()->addLiveIn(PhysReg);
287 MIRBuilder.getMBB().addLiveIn(PhysReg);
291 struct RISCVCallReturnHandler : public RISCVIncomingValueHandler {
292 RISCVCallReturnHandler(MachineIRBuilder &B, MachineRegisterInfo &MRI,
293 MachineInstrBuilder &MIB)
294 : RISCVIncomingValueHandler(B, MRI), MIB(MIB) {}
296 void markPhysRegUsed(MCRegister PhysReg) override {
297 MIB.addDef(PhysReg, RegState::Implicit);
300 MachineInstrBuilder MIB;
303 } // namespace
305 RISCVCallLowering::RISCVCallLowering(const RISCVTargetLowering &TLI)
306 : CallLowering(&TLI) {}
308 /// Return true if scalable vector with ScalarTy is legal for lowering.
309 static bool isLegalElementTypeForRVV(Type *EltTy,
310 const RISCVSubtarget &Subtarget) {
311 if (EltTy->isPointerTy())
312 return Subtarget.is64Bit() ? Subtarget.hasVInstructionsI64() : true;
313 if (EltTy->isIntegerTy(1) || EltTy->isIntegerTy(8) ||
314 EltTy->isIntegerTy(16) || EltTy->isIntegerTy(32))
315 return true;
316 if (EltTy->isIntegerTy(64))
317 return Subtarget.hasVInstructionsI64();
318 if (EltTy->isHalfTy())
319 return Subtarget.hasVInstructionsF16();
320 if (EltTy->isBFloatTy())
321 return Subtarget.hasVInstructionsBF16();
322 if (EltTy->isFloatTy())
323 return Subtarget.hasVInstructionsF32();
324 if (EltTy->isDoubleTy())
325 return Subtarget.hasVInstructionsF64();
326 return false;
329 // TODO: Support all argument types.
330 // TODO: Remove IsLowerArgs argument by adding support for vectors in lowerCall.
331 static bool isSupportedArgumentType(Type *T, const RISCVSubtarget &Subtarget,
332 bool IsLowerArgs = false) {
333 // TODO: Integers larger than 2*XLen are passed indirectly which is not
334 // supported yet.
335 if (T->isIntegerTy())
336 return T->getIntegerBitWidth() <= Subtarget.getXLen() * 2;
337 if (T->isFloatTy() || T->isDoubleTy())
338 return true;
339 if (T->isPointerTy())
340 return true;
341 // TODO: Support fixed vector types.
342 if (IsLowerArgs && T->isVectorTy() && Subtarget.hasVInstructions() &&
343 T->isScalableTy() &&
344 isLegalElementTypeForRVV(T->getScalarType(), Subtarget))
345 return true;
346 return false;
349 // TODO: Only integer, pointer and aggregate types are supported now.
350 // TODO: Remove IsLowerRetVal argument by adding support for vectors in
351 // lowerCall.
352 static bool isSupportedReturnType(Type *T, const RISCVSubtarget &Subtarget,
353 bool IsLowerRetVal = false) {
354 // TODO: Integers larger than 2*XLen are passed indirectly which is not
355 // supported yet.
356 if (T->isIntegerTy())
357 return T->getIntegerBitWidth() <= Subtarget.getXLen() * 2;
358 if (T->isFloatTy() || T->isDoubleTy())
359 return true;
360 if (T->isPointerTy())
361 return true;
363 if (T->isArrayTy())
364 return isSupportedReturnType(T->getArrayElementType(), Subtarget);
366 if (T->isStructTy()) {
367 auto StructT = cast<StructType>(T);
368 for (unsigned i = 0, e = StructT->getNumElements(); i != e; ++i)
369 if (!isSupportedReturnType(StructT->getElementType(i), Subtarget))
370 return false;
371 return true;
374 if (IsLowerRetVal && T->isVectorTy() && Subtarget.hasVInstructions() &&
375 T->isScalableTy() &&
376 isLegalElementTypeForRVV(T->getScalarType(), Subtarget))
377 return true;
379 return false;
382 bool RISCVCallLowering::lowerReturnVal(MachineIRBuilder &MIRBuilder,
383 const Value *Val,
384 ArrayRef<Register> VRegs,
385 MachineInstrBuilder &Ret) const {
386 if (!Val)
387 return true;
389 const RISCVSubtarget &Subtarget =
390 MIRBuilder.getMF().getSubtarget<RISCVSubtarget>();
391 if (!isSupportedReturnType(Val->getType(), Subtarget, /*IsLowerRetVal=*/true))
392 return false;
394 MachineFunction &MF = MIRBuilder.getMF();
395 const DataLayout &DL = MF.getDataLayout();
396 const Function &F = MF.getFunction();
397 CallingConv::ID CC = F.getCallingConv();
399 ArgInfo OrigRetInfo(VRegs, Val->getType(), 0);
400 setArgFlags(OrigRetInfo, AttributeList::ReturnIndex, DL, F);
402 SmallVector<ArgInfo, 4> SplitRetInfos;
403 splitToValueTypes(OrigRetInfo, SplitRetInfos, DL, CC);
405 RISCVOutgoingValueAssigner Assigner(
406 CC == CallingConv::Fast ? RISCV::CC_RISCV_FastCC : RISCV::CC_RISCV,
407 /*IsRet=*/true);
408 RISCVOutgoingValueHandler Handler(MIRBuilder, MF.getRegInfo(), Ret);
409 return determineAndHandleAssignments(Handler, Assigner, SplitRetInfos,
410 MIRBuilder, CC, F.isVarArg());
413 bool RISCVCallLowering::lowerReturn(MachineIRBuilder &MIRBuilder,
414 const Value *Val, ArrayRef<Register> VRegs,
415 FunctionLoweringInfo &FLI) const {
416 assert(!Val == VRegs.empty() && "Return value without a vreg");
417 MachineInstrBuilder Ret = MIRBuilder.buildInstrNoInsert(RISCV::PseudoRET);
419 if (!lowerReturnVal(MIRBuilder, Val, VRegs, Ret))
420 return false;
422 MIRBuilder.insertInstr(Ret);
423 return true;
426 /// If there are varargs that were passed in a0-a7, the data in those registers
427 /// must be copied to the varargs save area on the stack.
428 void RISCVCallLowering::saveVarArgRegisters(
429 MachineIRBuilder &MIRBuilder, CallLowering::IncomingValueHandler &Handler,
430 IncomingValueAssigner &Assigner, CCState &CCInfo) const {
431 MachineFunction &MF = MIRBuilder.getMF();
432 const RISCVSubtarget &Subtarget = MF.getSubtarget<RISCVSubtarget>();
433 unsigned XLenInBytes = Subtarget.getXLen() / 8;
434 ArrayRef<MCPhysReg> ArgRegs = RISCV::getArgGPRs(Subtarget.getTargetABI());
435 MachineRegisterInfo &MRI = MF.getRegInfo();
436 unsigned Idx = CCInfo.getFirstUnallocated(ArgRegs);
437 MachineFrameInfo &MFI = MF.getFrameInfo();
438 RISCVMachineFunctionInfo *RVFI = MF.getInfo<RISCVMachineFunctionInfo>();
440 // Size of the vararg save area. For now, the varargs save area is either
441 // zero or large enough to hold a0-a7.
442 int VarArgsSaveSize = XLenInBytes * (ArgRegs.size() - Idx);
443 int FI;
445 // If all registers are allocated, then all varargs must be passed on the
446 // stack and we don't need to save any argregs.
447 if (VarArgsSaveSize == 0) {
448 int VaArgOffset = Assigner.StackSize;
449 FI = MFI.CreateFixedObject(XLenInBytes, VaArgOffset, true);
450 } else {
451 int VaArgOffset = -VarArgsSaveSize;
452 FI = MFI.CreateFixedObject(VarArgsSaveSize, VaArgOffset, true);
454 // If saving an odd number of registers then create an extra stack slot to
455 // ensure that the frame pointer is 2*XLEN-aligned, which in turn ensures
456 // offsets to even-numbered registered remain 2*XLEN-aligned.
457 if (Idx % 2) {
458 MFI.CreateFixedObject(XLenInBytes,
459 VaArgOffset - static_cast<int>(XLenInBytes), true);
460 VarArgsSaveSize += XLenInBytes;
463 const LLT p0 = LLT::pointer(MF.getDataLayout().getAllocaAddrSpace(),
464 Subtarget.getXLen());
465 const LLT sXLen = LLT::scalar(Subtarget.getXLen());
467 auto FIN = MIRBuilder.buildFrameIndex(p0, FI);
468 auto Offset = MIRBuilder.buildConstant(
469 MRI.createGenericVirtualRegister(sXLen), XLenInBytes);
471 // Copy the integer registers that may have been used for passing varargs
472 // to the vararg save area.
473 const MVT XLenVT = Subtarget.getXLenVT();
474 for (unsigned I = Idx; I < ArgRegs.size(); ++I) {
475 const Register VReg = MRI.createGenericVirtualRegister(sXLen);
476 Handler.assignValueToReg(
477 VReg, ArgRegs[I],
478 CCValAssign::getReg(I + MF.getFunction().getNumOperands(), XLenVT,
479 ArgRegs[I], XLenVT, CCValAssign::Full));
480 auto MPO =
481 MachinePointerInfo::getFixedStack(MF, FI, (I - Idx) * XLenInBytes);
482 MIRBuilder.buildStore(VReg, FIN, MPO, inferAlignFromPtrInfo(MF, MPO));
483 FIN = MIRBuilder.buildPtrAdd(MRI.createGenericVirtualRegister(p0),
484 FIN.getReg(0), Offset);
488 // Record the frame index of the first variable argument which is a value
489 // necessary to G_VASTART.
490 RVFI->setVarArgsFrameIndex(FI);
491 RVFI->setVarArgsSaveSize(VarArgsSaveSize);
494 bool RISCVCallLowering::lowerFormalArguments(MachineIRBuilder &MIRBuilder,
495 const Function &F,
496 ArrayRef<ArrayRef<Register>> VRegs,
497 FunctionLoweringInfo &FLI) const {
498 // Early exit if there are no arguments. varargs are not part of F.args() but
499 // must be lowered.
500 if (F.arg_empty() && !F.isVarArg())
501 return true;
503 const RISCVSubtarget &Subtarget =
504 MIRBuilder.getMF().getSubtarget<RISCVSubtarget>();
505 for (auto &Arg : F.args()) {
506 if (!isSupportedArgumentType(Arg.getType(), Subtarget,
507 /*IsLowerArgs=*/true))
508 return false;
511 MachineFunction &MF = MIRBuilder.getMF();
512 const DataLayout &DL = MF.getDataLayout();
513 CallingConv::ID CC = F.getCallingConv();
515 SmallVector<ArgInfo, 32> SplitArgInfos;
516 unsigned Index = 0;
517 for (auto &Arg : F.args()) {
518 // Construct the ArgInfo object from destination register and argument type.
519 ArgInfo AInfo(VRegs[Index], Arg.getType(), Index);
520 setArgFlags(AInfo, Index + AttributeList::FirstArgIndex, DL, F);
522 // Handle any required merging from split value types from physical
523 // registers into the desired VReg. ArgInfo objects are constructed
524 // correspondingly and appended to SplitArgInfos.
525 splitToValueTypes(AInfo, SplitArgInfos, DL, CC);
527 ++Index;
530 RISCVIncomingValueAssigner Assigner(
531 CC == CallingConv::Fast ? RISCV::CC_RISCV_FastCC : RISCV::CC_RISCV,
532 /*IsRet=*/false);
533 RISCVFormalArgHandler Handler(MIRBuilder, MF.getRegInfo());
535 SmallVector<CCValAssign, 16> ArgLocs;
536 CCState CCInfo(CC, F.isVarArg(), MIRBuilder.getMF(), ArgLocs, F.getContext());
537 if (!determineAssignments(Assigner, SplitArgInfos, CCInfo) ||
538 !handleAssignments(Handler, SplitArgInfos, CCInfo, ArgLocs, MIRBuilder))
539 return false;
541 if (F.isVarArg())
542 saveVarArgRegisters(MIRBuilder, Handler, Assigner, CCInfo);
544 return true;
547 bool RISCVCallLowering::lowerCall(MachineIRBuilder &MIRBuilder,
548 CallLoweringInfo &Info) const {
549 MachineFunction &MF = MIRBuilder.getMF();
550 const DataLayout &DL = MF.getDataLayout();
551 const Function &F = MF.getFunction();
552 CallingConv::ID CC = F.getCallingConv();
554 const RISCVSubtarget &Subtarget =
555 MIRBuilder.getMF().getSubtarget<RISCVSubtarget>();
556 for (auto &AInfo : Info.OrigArgs) {
557 if (!isSupportedArgumentType(AInfo.Ty, Subtarget))
558 return false;
561 if (!Info.OrigRet.Ty->isVoidTy() &&
562 !isSupportedReturnType(Info.OrigRet.Ty, Subtarget))
563 return false;
565 MachineInstrBuilder CallSeqStart =
566 MIRBuilder.buildInstr(RISCV::ADJCALLSTACKDOWN);
568 SmallVector<ArgInfo, 32> SplitArgInfos;
569 SmallVector<ISD::OutputArg, 8> Outs;
570 for (auto &AInfo : Info.OrigArgs) {
571 // Handle any required unmerging of split value types from a given VReg into
572 // physical registers. ArgInfo objects are constructed correspondingly and
573 // appended to SplitArgInfos.
574 splitToValueTypes(AInfo, SplitArgInfos, DL, CC);
577 // TODO: Support tail calls.
578 Info.IsTailCall = false;
580 // Select the recommended relocation type R_RISCV_CALL_PLT.
581 if (!Info.Callee.isReg())
582 Info.Callee.setTargetFlags(RISCVII::MO_CALL);
584 MachineInstrBuilder Call =
585 MIRBuilder
586 .buildInstrNoInsert(Info.Callee.isReg() ? RISCV::PseudoCALLIndirect
587 : RISCV::PseudoCALL)
588 .add(Info.Callee);
589 const TargetRegisterInfo *TRI = Subtarget.getRegisterInfo();
590 Call.addRegMask(TRI->getCallPreservedMask(MF, Info.CallConv));
592 RISCVOutgoingValueAssigner ArgAssigner(
593 CC == CallingConv::Fast ? RISCV::CC_RISCV_FastCC : RISCV::CC_RISCV,
594 /*IsRet=*/false);
595 RISCVOutgoingValueHandler ArgHandler(MIRBuilder, MF.getRegInfo(), Call);
596 if (!determineAndHandleAssignments(ArgHandler, ArgAssigner, SplitArgInfos,
597 MIRBuilder, CC, Info.IsVarArg))
598 return false;
600 MIRBuilder.insertInstr(Call);
602 CallSeqStart.addImm(ArgAssigner.StackSize).addImm(0);
603 MIRBuilder.buildInstr(RISCV::ADJCALLSTACKUP)
604 .addImm(ArgAssigner.StackSize)
605 .addImm(0);
607 // If Callee is a reg, since it is used by a target specific
608 // instruction, it must have a register class matching the
609 // constraint of that instruction.
610 if (Call->getOperand(0).isReg())
611 constrainOperandRegClass(MF, *TRI, MF.getRegInfo(),
612 *Subtarget.getInstrInfo(),
613 *Subtarget.getRegBankInfo(), *Call,
614 Call->getDesc(), Call->getOperand(0), 0);
616 if (Info.OrigRet.Ty->isVoidTy())
617 return true;
619 SmallVector<ArgInfo, 4> SplitRetInfos;
620 splitToValueTypes(Info.OrigRet, SplitRetInfos, DL, CC);
622 RISCVIncomingValueAssigner RetAssigner(
623 CC == CallingConv::Fast ? RISCV::CC_RISCV_FastCC : RISCV::CC_RISCV,
624 /*IsRet=*/true);
625 RISCVCallReturnHandler RetHandler(MIRBuilder, MF.getRegInfo(), Call);
626 if (!determineAndHandleAssignments(RetHandler, RetAssigner, SplitRetInfos,
627 MIRBuilder, CC, Info.IsVarArg))
628 return false;
630 return true;