1 //===- ARCISelLowering.cpp - ARC DAG Lowering Impl --------------*- C++ -*-===//
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
7 //===----------------------------------------------------------------------===//
9 // This file implements the ARCTargetLowering class.
11 //===----------------------------------------------------------------------===//
13 #include "ARCISelLowering.h"
15 #include "ARCMachineFunctionInfo.h"
16 #include "ARCSubtarget.h"
17 #include "ARCTargetMachine.h"
18 #include "MCTargetDesc/ARCInfo.h"
19 #include "llvm/CodeGen/CallingConvLower.h"
20 #include "llvm/CodeGen/MachineFrameInfo.h"
21 #include "llvm/CodeGen/MachineFunction.h"
22 #include "llvm/CodeGen/MachineInstrBuilder.h"
23 #include "llvm/CodeGen/MachineJumpTableInfo.h"
24 #include "llvm/CodeGen/MachineRegisterInfo.h"
25 #include "llvm/CodeGen/SelectionDAGISel.h"
26 #include "llvm/CodeGen/ValueTypes.h"
27 #include "llvm/IR/CallingConv.h"
28 #include "llvm/IR/Intrinsics.h"
29 #include "llvm/Support/Debug.h"
32 #define DEBUG_TYPE "arc-lower"
36 static SDValue
lowerCallResult(SDValue Chain
, SDValue InFlag
,
37 const SmallVectorImpl
<CCValAssign
> &RVLocs
,
38 SDLoc dl
, SelectionDAG
&DAG
,
39 SmallVectorImpl
<SDValue
> &InVals
);
41 static ARCCC::CondCode
ISDCCtoARCCC(ISD::CondCode isdCC
) {
68 llvm_unreachable("Unhandled ISDCC code.");
72 ARCTargetLowering::ARCTargetLowering(const TargetMachine
&TM
,
73 const ARCSubtarget
&Subtarget
)
74 : TargetLowering(TM
), Subtarget(Subtarget
) {
75 // Set up the register classes.
76 addRegisterClass(MVT::i32
, &ARC::GPR32RegClass
);
78 // Compute derived properties from the register classes
79 computeRegisterProperties(Subtarget
.getRegisterInfo());
81 setStackPointerRegisterToSaveRestore(ARC::SP
);
83 setSchedulingPreference(Sched::Source
);
85 // Use i32 for setcc operations results (slt, sgt, ...).
86 setBooleanContents(ZeroOrOneBooleanContent
);
87 setBooleanVectorContents(ZeroOrOneBooleanContent
);
89 for (unsigned Opc
= 0; Opc
< ISD::BUILTIN_OP_END
; ++Opc
)
90 setOperationAction(Opc
, MVT::i32
, Expand
);
92 // Operations to get us off of the ground.
94 setOperationAction(ISD::ADD
, MVT::i32
, Legal
);
95 setOperationAction(ISD::SUB
, MVT::i32
, Legal
);
96 setOperationAction(ISD::AND
, MVT::i32
, Legal
);
97 setOperationAction(ISD::SMAX
, MVT::i32
, Legal
);
98 setOperationAction(ISD::SMIN
, MVT::i32
, Legal
);
100 // Need barrel shifter.
101 setOperationAction(ISD::SHL
, MVT::i32
, Legal
);
102 setOperationAction(ISD::SRA
, MVT::i32
, Legal
);
103 setOperationAction(ISD::SRL
, MVT::i32
, Legal
);
104 setOperationAction(ISD::ROTR
, MVT::i32
, Legal
);
106 setOperationAction(ISD::Constant
, MVT::i32
, Legal
);
107 setOperationAction(ISD::UNDEF
, MVT::i32
, Legal
);
110 setOperationAction(ISD::MUL
, MVT::i32
, Legal
);
111 setOperationAction(ISD::MULHS
, MVT::i32
, Legal
);
112 setOperationAction(ISD::MULHU
, MVT::i32
, Legal
);
113 setOperationAction(ISD::LOAD
, MVT::i32
, Legal
);
114 setOperationAction(ISD::STORE
, MVT::i32
, Legal
);
116 setOperationAction(ISD::SELECT_CC
, MVT::i32
, Custom
);
117 setOperationAction(ISD::BR_CC
, MVT::i32
, Custom
);
118 setOperationAction(ISD::BRCOND
, MVT::Other
, Expand
);
119 setOperationAction(ISD::BR_JT
, MVT::Other
, Expand
);
120 setOperationAction(ISD::JumpTable
, MVT::i32
, Custom
);
122 // Have psuedo instruction for frame addresses.
123 setOperationAction(ISD::FRAMEADDR
, MVT::i32
, Legal
);
124 // Custom lower global addresses.
125 setOperationAction(ISD::GlobalAddress
, MVT::i32
, Custom
);
127 // Expand var-args ops.
128 setOperationAction(ISD::VASTART
, MVT::Other
, Custom
);
129 setOperationAction(ISD::VAEND
, MVT::Other
, Expand
);
130 setOperationAction(ISD::VAARG
, MVT::Other
, Expand
);
131 setOperationAction(ISD::VACOPY
, MVT::Other
, Expand
);
134 setOperationAction(ISD::STACKSAVE
, MVT::Other
, Expand
);
135 setOperationAction(ISD::STACKRESTORE
, MVT::Other
, Expand
);
138 setOperationAction(ISD::SIGN_EXTEND_INREG
, MVT::i1
, Custom
);
141 const char *ARCTargetLowering::getTargetNodeName(unsigned Opcode
) const {
146 return "ARCISD::CMOV";
148 return "ARCISD::CMP";
150 return "ARCISD::BRcc";
152 return "ARCISD::RET";
153 case ARCISD::GAWRAPPER
:
154 return "ARCISD::GAWRAPPER";
159 //===----------------------------------------------------------------------===//
160 // Misc Lower Operation implementation
161 //===----------------------------------------------------------------------===//
163 SDValue
ARCTargetLowering::LowerSELECT_CC(SDValue Op
, SelectionDAG
&DAG
) const {
164 SDValue LHS
= Op
.getOperand(0);
165 SDValue RHS
= Op
.getOperand(1);
166 ISD::CondCode CC
= cast
<CondCodeSDNode
>(Op
.getOperand(4))->get();
167 SDValue TVal
= Op
.getOperand(2);
168 SDValue FVal
= Op
.getOperand(3);
170 ARCCC::CondCode ArcCC
= ISDCCtoARCCC(CC
);
171 assert(LHS
.getValueType() == MVT::i32
&& "Only know how to SELECT_CC i32");
172 SDValue Cmp
= DAG
.getNode(ARCISD::CMP
, dl
, MVT::Glue
, LHS
, RHS
);
173 return DAG
.getNode(ARCISD::CMOV
, dl
, TVal
.getValueType(), TVal
, FVal
,
174 DAG
.getConstant(ArcCC
, dl
, MVT::i32
), Cmp
);
177 SDValue
ARCTargetLowering::LowerSIGN_EXTEND_INREG(SDValue Op
,
178 SelectionDAG
&DAG
) const {
179 SDValue Op0
= Op
.getOperand(0);
181 assert(Op
.getValueType() == MVT::i32
&&
182 "Unhandled target sign_extend_inreg.");
184 unsigned Width
= cast
<VTSDNode
>(Op
.getOperand(1))->getVT().getSizeInBits();
185 if (Width
== 16 || Width
== 8)
190 SDValue LS
= DAG
.getNode(ISD::SHL
, dl
, MVT::i32
, Op0
,
191 DAG
.getConstant(32 - Width
, dl
, MVT::i32
));
192 SDValue SR
= DAG
.getNode(ISD::SRA
, dl
, MVT::i32
, LS
,
193 DAG
.getConstant(32 - Width
, dl
, MVT::i32
));
197 SDValue
ARCTargetLowering::LowerBR_CC(SDValue Op
, SelectionDAG
&DAG
) const {
198 SDValue Chain
= Op
.getOperand(0);
199 ISD::CondCode CC
= cast
<CondCodeSDNode
>(Op
.getOperand(1))->get();
200 SDValue LHS
= Op
.getOperand(2);
201 SDValue RHS
= Op
.getOperand(3);
202 SDValue Dest
= Op
.getOperand(4);
204 ARCCC::CondCode arcCC
= ISDCCtoARCCC(CC
);
205 assert(LHS
.getValueType() == MVT::i32
&& "Only know how to BR_CC i32");
206 return DAG
.getNode(ARCISD::BRcc
, dl
, MVT::Other
, Chain
, Dest
, LHS
, RHS
,
207 DAG
.getConstant(arcCC
, dl
, MVT::i32
));
210 SDValue
ARCTargetLowering::LowerJumpTable(SDValue Op
, SelectionDAG
&DAG
) const {
211 auto *N
= cast
<JumpTableSDNode
>(Op
);
212 SDValue GA
= DAG
.getTargetJumpTable(N
->getIndex(), MVT::i32
);
213 return DAG
.getNode(ARCISD::GAWRAPPER
, SDLoc(N
), MVT::i32
, GA
);
216 #include "ARCGenCallingConv.inc"
218 //===----------------------------------------------------------------------===//
219 // Call Calling Convention Implementation
220 //===----------------------------------------------------------------------===//
222 /// ARC call implementation
223 SDValue
ARCTargetLowering::LowerCall(TargetLowering::CallLoweringInfo
&CLI
,
224 SmallVectorImpl
<SDValue
> &InVals
) const {
225 SelectionDAG
&DAG
= CLI
.DAG
;
227 SmallVectorImpl
<ISD::OutputArg
> &Outs
= CLI
.Outs
;
228 SmallVectorImpl
<SDValue
> &OutVals
= CLI
.OutVals
;
229 SmallVectorImpl
<ISD::InputArg
> &Ins
= CLI
.Ins
;
230 SDValue Chain
= CLI
.Chain
;
231 SDValue Callee
= CLI
.Callee
;
232 CallingConv::ID CallConv
= CLI
.CallConv
;
233 bool IsVarArg
= CLI
.IsVarArg
;
234 bool &IsTailCall
= CLI
.IsTailCall
;
236 IsTailCall
= false; // Do not support tail calls yet.
238 SmallVector
<CCValAssign
, 16> ArgLocs
;
239 CCState
CCInfo(CallConv
, IsVarArg
, DAG
.getMachineFunction(), ArgLocs
,
242 CCInfo
.AnalyzeCallOperands(Outs
, CC_ARC
);
244 SmallVector
<CCValAssign
, 16> RVLocs
;
245 // Analyze return values to determine the number of bytes of stack required.
246 CCState
RetCCInfo(CallConv
, IsVarArg
, DAG
.getMachineFunction(), RVLocs
,
248 RetCCInfo
.AllocateStack(CCInfo
.getNextStackOffset(), 4);
249 RetCCInfo
.AnalyzeCallResult(Ins
, RetCC_ARC
);
251 // Get a count of how many bytes are to be pushed on the stack.
252 unsigned NumBytes
= RetCCInfo
.getNextStackOffset();
253 auto PtrVT
= getPointerTy(DAG
.getDataLayout());
255 Chain
= DAG
.getCALLSEQ_START(Chain
, NumBytes
, 0, dl
);
257 SmallVector
<std::pair
<unsigned, SDValue
>, 4> RegsToPass
;
258 SmallVector
<SDValue
, 12> MemOpChains
;
261 // Walk the register/memloc assignments, inserting copies/loads.
262 for (unsigned i
= 0, e
= ArgLocs
.size(); i
!= e
; ++i
) {
263 CCValAssign
&VA
= ArgLocs
[i
];
264 SDValue Arg
= OutVals
[i
];
266 // Promote the value if needed.
267 switch (VA
.getLocInfo()) {
269 llvm_unreachable("Unknown loc info!");
270 case CCValAssign::Full
:
272 case CCValAssign::SExt
:
273 Arg
= DAG
.getNode(ISD::SIGN_EXTEND
, dl
, VA
.getLocVT(), Arg
);
275 case CCValAssign::ZExt
:
276 Arg
= DAG
.getNode(ISD::ZERO_EXTEND
, dl
, VA
.getLocVT(), Arg
);
278 case CCValAssign::AExt
:
279 Arg
= DAG
.getNode(ISD::ANY_EXTEND
, dl
, VA
.getLocVT(), Arg
);
283 // Arguments that can be passed on register must be kept at
286 RegsToPass
.push_back(std::make_pair(VA
.getLocReg(), Arg
));
288 assert(VA
.isMemLoc() && "Must be register or memory argument.");
289 if (!StackPtr
.getNode())
290 StackPtr
= DAG
.getCopyFromReg(Chain
, dl
, ARC::SP
,
291 getPointerTy(DAG
.getDataLayout()));
292 // Calculate the stack position.
293 SDValue SOffset
= DAG
.getIntPtrConstant(VA
.getLocMemOffset(), dl
);
294 SDValue PtrOff
= DAG
.getNode(
295 ISD::ADD
, dl
, getPointerTy(DAG
.getDataLayout()), StackPtr
, SOffset
);
298 DAG
.getStore(Chain
, dl
, Arg
, PtrOff
, MachinePointerInfo());
299 MemOpChains
.push_back(Store
);
304 // Transform all store nodes into one single node because
305 // all store nodes are independent of each other.
306 if (!MemOpChains
.empty())
307 Chain
= DAG
.getNode(ISD::TokenFactor
, dl
, MVT::Other
, MemOpChains
);
309 // Build a sequence of copy-to-reg nodes chained together with token
310 // chain and flag operands which copy the outgoing args into registers.
311 // The InFlag in necessary since all emitted instructions must be
314 for (unsigned i
= 0, e
= RegsToPass
.size(); i
!= e
; ++i
) {
315 Chain
= DAG
.getCopyToReg(Chain
, dl
, RegsToPass
[i
].first
,
316 RegsToPass
[i
].second
, Glue
);
317 Glue
= Chain
.getValue(1);
320 // If the callee is a GlobalAddress node (quite common, every direct call is)
321 // turn it into a TargetGlobalAddress node so that legalize doesn't hack it.
322 // Likewise ExternalSymbol -> TargetExternalSymbol.
323 bool IsDirect
= true;
324 if (auto *G
= dyn_cast
<GlobalAddressSDNode
>(Callee
))
325 Callee
= DAG
.getTargetGlobalAddress(G
->getGlobal(), dl
, MVT::i32
);
326 else if (auto *E
= dyn_cast
<ExternalSymbolSDNode
>(Callee
))
327 Callee
= DAG
.getTargetExternalSymbol(E
->getSymbol(), MVT::i32
);
330 // Branch + Link = #chain, #target_address, #opt_in_flags...
331 // = Chain, Callee, Reg#1, Reg#2, ...
333 // Returns a chain & a flag for retval copy to use.
334 SDVTList NodeTys
= DAG
.getVTList(MVT::Other
, MVT::Glue
);
335 SmallVector
<SDValue
, 8> Ops
;
336 Ops
.push_back(Chain
);
337 Ops
.push_back(Callee
);
339 for (unsigned i
= 0, e
= RegsToPass
.size(); i
!= e
; ++i
)
340 Ops
.push_back(DAG
.getRegister(RegsToPass
[i
].first
,
341 RegsToPass
[i
].second
.getValueType()));
343 // Add a register mask operand representing the call-preserved registers.
344 const TargetRegisterInfo
*TRI
= Subtarget
.getRegisterInfo();
345 const uint32_t *Mask
=
346 TRI
->getCallPreservedMask(DAG
.getMachineFunction(), CallConv
);
347 assert(Mask
&& "Missing call preserved mask for calling convention");
348 Ops
.push_back(DAG
.getRegisterMask(Mask
));
353 Chain
= DAG
.getNode(IsDirect
? ARCISD::BL
: ARCISD::JL
, dl
, NodeTys
, Ops
);
354 Glue
= Chain
.getValue(1);
356 // Create the CALLSEQ_END node.
357 Chain
= DAG
.getCALLSEQ_END(Chain
, DAG
.getConstant(NumBytes
, dl
, PtrVT
, true),
358 DAG
.getConstant(0, dl
, PtrVT
, true), Glue
, dl
);
359 Glue
= Chain
.getValue(1);
361 // Handle result values, copying them out of physregs into vregs that we
365 return lowerCallResult(Chain
, Glue
, RVLocs
, dl
, DAG
, InVals
);
368 /// Lower the result values of a call into the appropriate copies out of
369 /// physical registers / memory locations.
370 static SDValue
lowerCallResult(SDValue Chain
, SDValue Glue
,
371 const SmallVectorImpl
<CCValAssign
> &RVLocs
,
372 SDLoc dl
, SelectionDAG
&DAG
,
373 SmallVectorImpl
<SDValue
> &InVals
) {
374 SmallVector
<std::pair
<int, unsigned>, 4> ResultMemLocs
;
375 // Copy results out of physical registers.
376 for (unsigned i
= 0, e
= RVLocs
.size(); i
!= e
; ++i
) {
377 const CCValAssign
&VA
= RVLocs
[i
];
381 DAG
.getCopyFromReg(Chain
, dl
, VA
.getLocReg(), VA
.getValVT(), Glue
);
382 Chain
= RetValue
.getValue(1);
383 Glue
= RetValue
.getValue(2);
384 InVals
.push_back(RetValue
);
386 assert(VA
.isMemLoc() && "Must be memory location.");
387 ResultMemLocs
.push_back(
388 std::make_pair(VA
.getLocMemOffset(), InVals
.size()));
390 // Reserve space for this result.
391 InVals
.push_back(SDValue());
395 // Copy results out of memory.
396 SmallVector
<SDValue
, 4> MemOpChains
;
397 for (unsigned i
= 0, e
= ResultMemLocs
.size(); i
!= e
; ++i
) {
398 int Offset
= ResultMemLocs
[i
].first
;
399 unsigned Index
= ResultMemLocs
[i
].second
;
400 SDValue StackPtr
= DAG
.getRegister(ARC::SP
, MVT::i32
);
401 SDValue SpLoc
= DAG
.getNode(ISD::ADD
, dl
, MVT::i32
, StackPtr
,
402 DAG
.getConstant(Offset
, dl
, MVT::i32
));
404 DAG
.getLoad(MVT::i32
, dl
, Chain
, SpLoc
, MachinePointerInfo());
405 InVals
[Index
] = Load
;
406 MemOpChains
.push_back(Load
.getValue(1));
409 // Transform all loads nodes into one single node because
410 // all load nodes are independent of each other.
411 if (!MemOpChains
.empty())
412 Chain
= DAG
.getNode(ISD::TokenFactor
, dl
, MVT::Other
, MemOpChains
);
417 //===----------------------------------------------------------------------===//
418 // Formal Arguments Calling Convention Implementation
419 //===----------------------------------------------------------------------===//
425 ISD::ArgFlagsTy Flags
;
428 } // end anonymous namespace
430 /// ARC formal arguments implementation
431 SDValue
ARCTargetLowering::LowerFormalArguments(
432 SDValue Chain
, CallingConv::ID CallConv
, bool IsVarArg
,
433 const SmallVectorImpl
<ISD::InputArg
> &Ins
, const SDLoc
&dl
,
434 SelectionDAG
&DAG
, SmallVectorImpl
<SDValue
> &InVals
) const {
437 llvm_unreachable("Unsupported calling convention");
439 case CallingConv::Fast
:
440 return LowerCallArguments(Chain
, CallConv
, IsVarArg
, Ins
, dl
, DAG
, InVals
);
444 /// Transform physical registers into virtual registers, and generate load
445 /// operations for argument places on the stack.
446 SDValue
ARCTargetLowering::LowerCallArguments(
447 SDValue Chain
, CallingConv::ID CallConv
, bool IsVarArg
,
448 const SmallVectorImpl
<ISD::InputArg
> &Ins
, SDLoc dl
, SelectionDAG
&DAG
,
449 SmallVectorImpl
<SDValue
> &InVals
) const {
450 MachineFunction
&MF
= DAG
.getMachineFunction();
451 MachineFrameInfo
&MFI
= MF
.getFrameInfo();
452 MachineRegisterInfo
&RegInfo
= MF
.getRegInfo();
453 auto *AFI
= MF
.getInfo
<ARCFunctionInfo
>();
455 // Assign locations to all of the incoming arguments.
456 SmallVector
<CCValAssign
, 16> ArgLocs
;
457 CCState
CCInfo(CallConv
, IsVarArg
, DAG
.getMachineFunction(), ArgLocs
,
460 CCInfo
.AnalyzeFormalArguments(Ins
, CC_ARC
);
462 unsigned StackSlotSize
= 4;
465 AFI
->setReturnStackOffset(CCInfo
.getNextStackOffset());
467 // All getCopyFromReg ops must precede any getMemcpys to prevent the
468 // scheduler clobbering a register before it has been copied.
470 // 1. CopyFromReg (and load) arg & vararg registers.
471 // 2. Chain CopyFromReg nodes into a TokenFactor.
472 // 3. Memcpy 'byVal' args & push final InVals.
473 // 4. Chain mem ops nodes into a TokenFactor.
474 SmallVector
<SDValue
, 4> CFRegNode
;
475 SmallVector
<ArgDataPair
, 4> ArgData
;
476 SmallVector
<SDValue
, 4> MemOps
;
478 // 1a. CopyFromReg (and load) arg registers.
479 for (unsigned i
= 0, e
= ArgLocs
.size(); i
!= e
; ++i
) {
480 CCValAssign
&VA
= ArgLocs
[i
];
484 // Arguments passed in registers
485 EVT RegVT
= VA
.getLocVT();
486 switch (RegVT
.getSimpleVT().SimpleTy
) {
488 LLVM_DEBUG(errs() << "LowerFormalArguments Unhandled argument type: "
489 << (unsigned)RegVT
.getSimpleVT().SimpleTy
<< "\n");
490 llvm_unreachable("Unhandled LowerFormalArguments type.");
493 unsigned VReg
= RegInfo
.createVirtualRegister(&ARC::GPR32RegClass
);
494 RegInfo
.addLiveIn(VA
.getLocReg(), VReg
);
495 ArgIn
= DAG
.getCopyFromReg(Chain
, dl
, VReg
, RegVT
);
496 CFRegNode
.push_back(ArgIn
.getValue(ArgIn
->getNumValues() - 1));
500 assert(VA
.isMemLoc());
501 // Load the argument to a virtual register
502 unsigned ObjSize
= VA
.getLocVT().getStoreSize();
503 assert((ObjSize
<= StackSlotSize
) && "Unhandled argument");
505 // Create the frame index object for this incoming parameter...
506 int FI
= MFI
.CreateFixedObject(ObjSize
, VA
.getLocMemOffset(), true);
508 // Create the SelectionDAG nodes corresponding to a load
509 // from this parameter
510 SDValue FIN
= DAG
.getFrameIndex(FI
, MVT::i32
);
511 ArgIn
= DAG
.getLoad(VA
.getLocVT(), dl
, Chain
, FIN
,
512 MachinePointerInfo::getFixedStack(MF
, FI
));
514 const ArgDataPair ADP
= {ArgIn
, Ins
[i
].Flags
};
515 ArgData
.push_back(ADP
);
518 // 1b. CopyFromReg vararg registers.
520 // Argument registers
521 static const MCPhysReg ArgRegs
[] = {ARC::R0
, ARC::R1
, ARC::R2
, ARC::R3
,
522 ARC::R4
, ARC::R5
, ARC::R6
, ARC::R7
};
523 auto *AFI
= MF
.getInfo
<ARCFunctionInfo
>();
524 unsigned FirstVAReg
= CCInfo
.getFirstUnallocated(ArgRegs
);
525 if (FirstVAReg
< array_lengthof(ArgRegs
)) {
527 // Save remaining registers, storing higher register numbers at a higher
529 // There are (array_lengthof(ArgRegs) - FirstVAReg) registers which
532 MFI
.CreateFixedObject((array_lengthof(ArgRegs
) - FirstVAReg
) * 4,
533 CCInfo
.getNextStackOffset(), true);
534 AFI
->setVarArgsFrameIndex(VarFI
);
535 SDValue FIN
= DAG
.getFrameIndex(VarFI
, MVT::i32
);
536 for (unsigned i
= FirstVAReg
; i
< array_lengthof(ArgRegs
); i
++) {
537 // Move argument from phys reg -> virt reg
538 unsigned VReg
= RegInfo
.createVirtualRegister(&ARC::GPR32RegClass
);
539 RegInfo
.addLiveIn(ArgRegs
[i
], VReg
);
540 SDValue Val
= DAG
.getCopyFromReg(Chain
, dl
, VReg
, MVT::i32
);
541 CFRegNode
.push_back(Val
.getValue(Val
->getNumValues() - 1));
542 SDValue VAObj
= DAG
.getNode(ISD::ADD
, dl
, MVT::i32
, FIN
,
543 DAG
.getConstant(Offset
, dl
, MVT::i32
));
544 // Move argument from virt reg -> stack
546 DAG
.getStore(Val
.getValue(1), dl
, Val
, VAObj
, MachinePointerInfo());
547 MemOps
.push_back(Store
);
551 llvm_unreachable("Too many var args parameters.");
555 // 2. Chain CopyFromReg nodes into a TokenFactor.
556 if (!CFRegNode
.empty())
557 Chain
= DAG
.getNode(ISD::TokenFactor
, dl
, MVT::Other
, CFRegNode
);
559 // 3. Memcpy 'byVal' args & push final InVals.
560 // Aggregates passed "byVal" need to be copied by the callee.
561 // The callee will use a pointer to this copy, rather than the original
563 for (const auto &ArgDI
: ArgData
) {
564 if (ArgDI
.Flags
.isByVal() && ArgDI
.Flags
.getByValSize()) {
565 unsigned Size
= ArgDI
.Flags
.getByValSize();
566 unsigned Align
= std::max(StackSlotSize
, ArgDI
.Flags
.getByValAlign());
567 // Create a new object on the stack and copy the pointee into it.
568 int FI
= MFI
.CreateStackObject(Size
, Align
, false);
569 SDValue FIN
= DAG
.getFrameIndex(FI
, MVT::i32
);
570 InVals
.push_back(FIN
);
571 MemOps
.push_back(DAG
.getMemcpy(
572 Chain
, dl
, FIN
, ArgDI
.SDV
, DAG
.getConstant(Size
, dl
, MVT::i32
), Align
,
573 false, false, false, MachinePointerInfo(), MachinePointerInfo()));
575 InVals
.push_back(ArgDI
.SDV
);
579 // 4. Chain mem ops nodes into a TokenFactor.
580 if (!MemOps
.empty()) {
581 MemOps
.push_back(Chain
);
582 Chain
= DAG
.getNode(ISD::TokenFactor
, dl
, MVT::Other
, MemOps
);
588 //===----------------------------------------------------------------------===//
589 // Return Value Calling Convention Implementation
590 //===----------------------------------------------------------------------===//
592 bool ARCTargetLowering::CanLowerReturn(
593 CallingConv::ID CallConv
, MachineFunction
&MF
, bool IsVarArg
,
594 const SmallVectorImpl
<ISD::OutputArg
> &Outs
, LLVMContext
&Context
) const {
595 SmallVector
<CCValAssign
, 16> RVLocs
;
596 CCState
CCInfo(CallConv
, IsVarArg
, MF
, RVLocs
, Context
);
597 if (!CCInfo
.CheckReturn(Outs
, RetCC_ARC
))
599 if (CCInfo
.getNextStackOffset() != 0 && IsVarArg
)
605 ARCTargetLowering::LowerReturn(SDValue Chain
, CallingConv::ID CallConv
,
607 const SmallVectorImpl
<ISD::OutputArg
> &Outs
,
608 const SmallVectorImpl
<SDValue
> &OutVals
,
609 const SDLoc
&dl
, SelectionDAG
&DAG
) const {
610 auto *AFI
= DAG
.getMachineFunction().getInfo
<ARCFunctionInfo
>();
611 MachineFrameInfo
&MFI
= DAG
.getMachineFunction().getFrameInfo();
613 // CCValAssign - represent the assignment of
614 // the return value to a location
615 SmallVector
<CCValAssign
, 16> RVLocs
;
617 // CCState - Info about the registers and stack slot.
618 CCState
CCInfo(CallConv
, IsVarArg
, DAG
.getMachineFunction(), RVLocs
,
621 // Analyze return values.
623 CCInfo
.AllocateStack(AFI
->getReturnStackOffset(), 4);
625 CCInfo
.AnalyzeReturn(Outs
, RetCC_ARC
);
628 SmallVector
<SDValue
, 4> RetOps(1, Chain
);
629 SmallVector
<SDValue
, 4> MemOpChains
;
630 // Handle return values that must be copied to memory.
631 for (unsigned i
= 0, e
= RVLocs
.size(); i
!= e
; ++i
) {
632 CCValAssign
&VA
= RVLocs
[i
];
635 assert(VA
.isMemLoc());
637 report_fatal_error("Can't return value from vararg function in memory");
640 int Offset
= VA
.getLocMemOffset();
641 unsigned ObjSize
= VA
.getLocVT().getStoreSize();
642 // Create the frame index object for the memory location.
643 int FI
= MFI
.CreateFixedObject(ObjSize
, Offset
, false);
645 // Create a SelectionDAG node corresponding to a store
646 // to this memory location.
647 SDValue FIN
= DAG
.getFrameIndex(FI
, MVT::i32
);
648 MemOpChains
.push_back(DAG
.getStore(
649 Chain
, dl
, OutVals
[i
], FIN
,
650 MachinePointerInfo::getFixedStack(DAG
.getMachineFunction(), FI
)));
653 // Transform all store nodes into one single node because
654 // all stores are independent of each other.
655 if (!MemOpChains
.empty())
656 Chain
= DAG
.getNode(ISD::TokenFactor
, dl
, MVT::Other
, MemOpChains
);
658 // Now handle return values copied to registers.
659 for (unsigned i
= 0, e
= RVLocs
.size(); i
!= e
; ++i
) {
660 CCValAssign
&VA
= RVLocs
[i
];
663 // Copy the result values into the output registers.
664 Chain
= DAG
.getCopyToReg(Chain
, dl
, VA
.getLocReg(), OutVals
[i
], Flag
);
666 // guarantee that all emitted copies are
667 // stuck together, avoiding something bad
668 Flag
= Chain
.getValue(1);
669 RetOps
.push_back(DAG
.getRegister(VA
.getLocReg(), VA
.getLocVT()));
672 RetOps
[0] = Chain
; // Update chain.
674 // Add the flag if we have it.
676 RetOps
.push_back(Flag
);
678 // What to do with the RetOps?
679 return DAG
.getNode(ARCISD::RET
, dl
, MVT::Other
, RetOps
);
682 //===----------------------------------------------------------------------===//
683 // Target Optimization Hooks
684 //===----------------------------------------------------------------------===//
686 SDValue
ARCTargetLowering::PerformDAGCombine(SDNode
*N
,
687 DAGCombinerInfo
&DCI
) const {
691 //===----------------------------------------------------------------------===//
692 // Addressing mode description hooks
693 //===----------------------------------------------------------------------===//
695 /// Return true if the addressing mode represented by AM is legal for this
696 /// target, for a load/store of the specified type.
697 bool ARCTargetLowering::isLegalAddressingMode(const DataLayout
&DL
,
698 const AddrMode
&AM
, Type
*Ty
,
700 Instruction
*I
) const {
701 return AM
.Scale
== 0;
704 // Don't emit tail calls for the time being.
705 bool ARCTargetLowering::mayBeEmittedAsTailCall(const CallInst
*CI
) const {
709 SDValue
ARCTargetLowering::LowerFRAMEADDR(SDValue Op
, SelectionDAG
&DAG
) const {
710 const ARCRegisterInfo
&ARI
= *Subtarget
.getRegisterInfo();
711 MachineFunction
&MF
= DAG
.getMachineFunction();
712 MachineFrameInfo
&MFI
= MF
.getFrameInfo();
713 MFI
.setFrameAddressIsTaken(true);
715 EVT VT
= Op
.getValueType();
717 assert(cast
<ConstantSDNode
>(Op
.getOperand(0))->getZExtValue() == 0 &&
718 "Only support lowering frame addr of current frame.");
719 Register FrameReg
= ARI
.getFrameRegister(MF
);
720 return DAG
.getCopyFromReg(DAG
.getEntryNode(), dl
, FrameReg
, VT
);
723 SDValue
ARCTargetLowering::LowerGlobalAddress(SDValue Op
,
724 SelectionDAG
&DAG
) const {
725 const GlobalAddressSDNode
*GN
= cast
<GlobalAddressSDNode
>(Op
);
726 const GlobalValue
*GV
= GN
->getGlobal();
728 int64_t Offset
= GN
->getOffset();
729 SDValue GA
= DAG
.getTargetGlobalAddress(GV
, dl
, MVT::i32
, Offset
);
730 return DAG
.getNode(ARCISD::GAWRAPPER
, dl
, MVT::i32
, GA
);
733 static SDValue
LowerVASTART(SDValue Op
, SelectionDAG
&DAG
) {
734 MachineFunction
&MF
= DAG
.getMachineFunction();
735 auto *FuncInfo
= MF
.getInfo
<ARCFunctionInfo
>();
737 // vastart just stores the address of the VarArgsFrameIndex slot into the
738 // memory location argument.
740 EVT PtrVT
= DAG
.getTargetLoweringInfo().getPointerTy(DAG
.getDataLayout());
741 SDValue FR
= DAG
.getFrameIndex(FuncInfo
->getVarArgsFrameIndex(), PtrVT
);
742 const Value
*SV
= cast
<SrcValueSDNode
>(Op
.getOperand(2))->getValue();
743 return DAG
.getStore(Op
.getOperand(0), dl
, FR
, Op
.getOperand(1),
744 MachinePointerInfo(SV
));
747 SDValue
ARCTargetLowering::LowerOperation(SDValue Op
, SelectionDAG
&DAG
) const {
748 switch (Op
.getOpcode()) {
749 case ISD::GlobalAddress
:
750 return LowerGlobalAddress(Op
, DAG
);
752 return LowerFRAMEADDR(Op
, DAG
);
754 return LowerSELECT_CC(Op
, DAG
);
756 return LowerBR_CC(Op
, DAG
);
757 case ISD::SIGN_EXTEND_INREG
:
758 return LowerSIGN_EXTEND_INREG(Op
, DAG
);
760 return LowerJumpTable(Op
, DAG
);
762 return LowerVASTART(Op
, DAG
);
764 llvm_unreachable("unimplemented operand");