1 //===-- XCoreISelLowering.cpp - XCore DAG Lowering Implementation ------===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This file implements the XCoreTargetLowering class.
12 //===----------------------------------------------------------------------===//
14 #define DEBUG_TYPE "xcore-lower"
16 #include "XCoreISelLowering.h"
17 #include "XCoreMachineFunctionInfo.h"
19 #include "XCoreTargetMachine.h"
20 #include "XCoreSubtarget.h"
21 #include "llvm/DerivedTypes.h"
22 #include "llvm/Function.h"
23 #include "llvm/Intrinsics.h"
24 #include "llvm/CallingConv.h"
25 #include "llvm/GlobalVariable.h"
26 #include "llvm/GlobalAlias.h"
27 #include "llvm/CodeGen/CallingConvLower.h"
28 #include "llvm/CodeGen/MachineFrameInfo.h"
29 #include "llvm/CodeGen/MachineFunction.h"
30 #include "llvm/CodeGen/MachineInstrBuilder.h"
31 #include "llvm/CodeGen/MachineRegisterInfo.h"
32 #include "llvm/CodeGen/SelectionDAGISel.h"
33 #include "llvm/CodeGen/ValueTypes.h"
34 #include "llvm/Support/Debug.h"
35 #include "llvm/ADT/VectorExtras.h"
40 const char *XCoreTargetLowering::
41 getTargetNodeName(unsigned Opcode
) const
45 case XCoreISD::BL
: return "XCoreISD::BL";
46 case XCoreISD::PCRelativeWrapper
: return "XCoreISD::PCRelativeWrapper";
47 case XCoreISD::DPRelativeWrapper
: return "XCoreISD::DPRelativeWrapper";
48 case XCoreISD::CPRelativeWrapper
: return "XCoreISD::CPRelativeWrapper";
49 case XCoreISD::STWSP
: return "XCoreISD::STWSP";
50 case XCoreISD::RETSP
: return "XCoreISD::RETSP";
51 default : return NULL
;
55 XCoreTargetLowering::XCoreTargetLowering(XCoreTargetMachine
&XTM
)
56 : TargetLowering(XTM
),
58 Subtarget(*XTM
.getSubtargetImpl()) {
60 // Set up the register classes.
61 addRegisterClass(MVT::i32
, XCore::GRRegsRegisterClass
);
63 // Compute derived properties from the register classes
64 computeRegisterProperties();
66 // Division is expensive
67 setIntDivIsCheap(false);
69 setShiftAmountType(MVT::i32
);
71 setShiftAmountFlavor(Extend
);
72 setStackPointerRegisterToSaveRestore(XCore::SP
);
74 setSchedulingPreference(SchedulingForRegPressure
);
76 // Use i32 for setcc operations results (slt, sgt, ...).
77 setBooleanContents(ZeroOrOneBooleanContent
);
79 // XCore does not have the NodeTypes below.
80 setOperationAction(ISD::BR_CC
, MVT::Other
, Expand
);
81 setOperationAction(ISD::SELECT_CC
, MVT::i32
, Custom
);
82 setOperationAction(ISD::ADDC
, MVT::i32
, Expand
);
83 setOperationAction(ISD::ADDE
, MVT::i32
, Expand
);
84 setOperationAction(ISD::SUBC
, MVT::i32
, Expand
);
85 setOperationAction(ISD::SUBE
, MVT::i32
, Expand
);
87 // Stop the combiner recombining select and set_cc
88 setOperationAction(ISD::SELECT_CC
, MVT::Other
, Expand
);
91 if (!Subtarget
.isXS1A()) {
92 setOperationAction(ISD::ADD
, MVT::i64
, Custom
);
93 setOperationAction(ISD::SUB
, MVT::i64
, Custom
);
95 if (Subtarget
.isXS1A()) {
96 setOperationAction(ISD::SMUL_LOHI
, MVT::i32
, Expand
);
98 setOperationAction(ISD::MULHS
, MVT::i32
, Expand
);
99 setOperationAction(ISD::MULHU
, MVT::i32
, Expand
);
100 setOperationAction(ISD::SHL_PARTS
, MVT::i32
, Expand
);
101 setOperationAction(ISD::SRA_PARTS
, MVT::i32
, Expand
);
102 setOperationAction(ISD::SRL_PARTS
, MVT::i32
, Expand
);
105 setOperationAction(ISD::CTPOP
, MVT::i32
, Expand
);
106 setOperationAction(ISD::ROTL
, MVT::i32
, Expand
);
107 setOperationAction(ISD::ROTR
, MVT::i32
, Expand
);
109 setOperationAction(ISD::TRAP
, MVT::Other
, Legal
);
111 // Expand jump tables for now
112 setOperationAction(ISD::BR_JT
, MVT::Other
, Expand
);
113 setOperationAction(ISD::JumpTable
, MVT::i32
, Custom
);
115 // RET must be custom lowered, to meet ABI requirements
116 setOperationAction(ISD::RET
, MVT::Other
, Custom
);
118 setOperationAction(ISD::GlobalAddress
, MVT::i32
, Custom
);
120 // Thread Local Storage
121 setOperationAction(ISD::GlobalTLSAddress
, MVT::i32
, Custom
);
123 // Conversion of i64 -> double produces constantpool nodes
124 setOperationAction(ISD::ConstantPool
, MVT::i32
, Custom
);
127 setLoadExtAction(ISD::EXTLOAD
, MVT::i1
, Promote
);
128 setLoadExtAction(ISD::ZEXTLOAD
, MVT::i1
, Promote
);
129 setLoadExtAction(ISD::SEXTLOAD
, MVT::i1
, Promote
);
131 setLoadExtAction(ISD::SEXTLOAD
, MVT::i8
, Expand
);
132 setLoadExtAction(ISD::ZEXTLOAD
, MVT::i16
, Expand
);
135 setOperationAction(ISD::VAEND
, MVT::Other
, Expand
);
136 setOperationAction(ISD::VACOPY
, MVT::Other
, Expand
);
137 setOperationAction(ISD::VAARG
, MVT::Other
, Custom
);
138 setOperationAction(ISD::VASTART
, MVT::Other
, Custom
);
141 setOperationAction(ISD::STACKSAVE
, MVT::Other
, Expand
);
142 setOperationAction(ISD::STACKRESTORE
, MVT::Other
, Expand
);
143 setOperationAction(ISD::DYNAMIC_STACKALLOC
, MVT::i32
, Expand
);
146 setOperationAction(ISD::DBG_STOPPOINT
, MVT::Other
, Expand
);
147 setOperationAction(ISD::DEBUG_LOC
, MVT::Other
, Expand
);
150 SDValue
XCoreTargetLowering::
151 LowerOperation(SDValue Op
, SelectionDAG
&DAG
) {
152 switch (Op
.getOpcode())
154 case ISD::CALL
: return LowerCALL(Op
, DAG
);
155 case ISD::FORMAL_ARGUMENTS
: return LowerFORMAL_ARGUMENTS(Op
, DAG
);
156 case ISD::RET
: return LowerRET(Op
, DAG
);
157 case ISD::GlobalAddress
: return LowerGlobalAddress(Op
, DAG
);
158 case ISD::GlobalTLSAddress
: return LowerGlobalTLSAddress(Op
, DAG
);
159 case ISD::ConstantPool
: return LowerConstantPool(Op
, DAG
);
160 case ISD::JumpTable
: return LowerJumpTable(Op
, DAG
);
161 case ISD::SELECT_CC
: return LowerSELECT_CC(Op
, DAG
);
162 case ISD::VAARG
: return LowerVAARG(Op
, DAG
);
163 case ISD::VASTART
: return LowerVASTART(Op
, DAG
);
164 // FIXME: Remove these when LegalizeDAGTypes lands.
166 case ISD::SUB
: return ExpandADDSUB(Op
.getNode(), DAG
);
167 case ISD::FRAMEADDR
: return LowerFRAMEADDR(Op
, DAG
);
169 assert(0 && "unimplemented operand");
174 /// ReplaceNodeResults - Replace the results of node with an illegal result
175 /// type with new values built out of custom code.
176 void XCoreTargetLowering::ReplaceNodeResults(SDNode
*N
,
177 SmallVectorImpl
<SDValue
>&Results
,
179 switch (N
->getOpcode()) {
181 assert(0 && "Don't know how to custom expand this!");
185 Results
.push_back(ExpandADDSUB(N
, DAG
));
190 //===----------------------------------------------------------------------===//
191 // Misc Lower Operation implementation
192 //===----------------------------------------------------------------------===//
194 SDValue
XCoreTargetLowering::
195 LowerSELECT_CC(SDValue Op
, SelectionDAG
&DAG
)
197 DebugLoc dl
= Op
.getDebugLoc();
198 SDValue Cond
= DAG
.getNode(ISD::SETCC
, dl
, MVT::i32
, Op
.getOperand(2),
199 Op
.getOperand(3), Op
.getOperand(4));
200 return DAG
.getNode(ISD::SELECT
, dl
, MVT::i32
, Cond
, Op
.getOperand(0),
204 SDValue
XCoreTargetLowering::
205 getGlobalAddressWrapper(SDValue GA
, GlobalValue
*GV
, SelectionDAG
&DAG
)
207 // FIXME there is no actual debug info here
208 DebugLoc dl
= GA
.getDebugLoc();
209 if (isa
<Function
>(GV
)) {
210 return DAG
.getNode(XCoreISD::PCRelativeWrapper
, dl
, MVT::i32
, GA
);
211 } else if (!Subtarget
.isXS1A()) {
212 const GlobalVariable
*GVar
= dyn_cast
<GlobalVariable
>(GV
);
214 // If GV is an alias then use the aliasee to determine constness
215 if (const GlobalAlias
*GA
= dyn_cast
<GlobalAlias
>(GV
))
216 GVar
= dyn_cast_or_null
<GlobalVariable
>(GA
->resolveAliasedGlobal());
218 bool isConst
= GVar
&& GVar
->isConstant();
220 return DAG
.getNode(XCoreISD::CPRelativeWrapper
, dl
, MVT::i32
, GA
);
223 return DAG
.getNode(XCoreISD::DPRelativeWrapper
, dl
, MVT::i32
, GA
);
226 SDValue
XCoreTargetLowering::
227 LowerGlobalAddress(SDValue Op
, SelectionDAG
&DAG
)
229 GlobalValue
*GV
= cast
<GlobalAddressSDNode
>(Op
)->getGlobal();
230 SDValue GA
= DAG
.getTargetGlobalAddress(GV
, MVT::i32
);
231 // If it's a debug information descriptor, don't mess with it.
232 if (DAG
.isVerifiedDebugInfoDesc(Op
))
234 return getGlobalAddressWrapper(GA
, GV
, DAG
);
237 static inline SDValue
BuildGetId(SelectionDAG
&DAG
, DebugLoc dl
) {
238 return DAG
.getNode(ISD::INTRINSIC_WO_CHAIN
, dl
, MVT::i32
,
239 DAG
.getConstant(Intrinsic::xcore_getid
, MVT::i32
));
242 static inline bool isZeroLengthArray(const Type
*Ty
) {
243 const ArrayType
*AT
= dyn_cast_or_null
<ArrayType
>(Ty
);
244 return AT
&& (AT
->getNumElements() == 0);
247 SDValue
XCoreTargetLowering::
248 LowerGlobalTLSAddress(SDValue Op
, SelectionDAG
&DAG
)
250 // FIXME there isn't really debug info here
251 DebugLoc dl
= Op
.getDebugLoc();
252 // transform to label + getid() * size
253 GlobalValue
*GV
= cast
<GlobalAddressSDNode
>(Op
)->getGlobal();
254 SDValue GA
= DAG
.getTargetGlobalAddress(GV
, MVT::i32
);
255 const GlobalVariable
*GVar
= dyn_cast
<GlobalVariable
>(GV
);
257 // If GV is an alias then use the aliasee to determine size
258 if (const GlobalAlias
*GA
= dyn_cast
<GlobalAlias
>(GV
))
259 GVar
= dyn_cast_or_null
<GlobalVariable
>(GA
->resolveAliasedGlobal());
262 assert(0 && "Thread local object not a GlobalVariable?");
265 const Type
*Ty
= cast
<PointerType
>(GV
->getType())->getElementType();
266 if (!Ty
->isSized() || isZeroLengthArray(Ty
)) {
267 cerr
<< "Size of thread local object " << GVar
->getName()
271 SDValue base
= getGlobalAddressWrapper(GA
, GV
, DAG
);
272 const TargetData
*TD
= TM
.getTargetData();
273 unsigned Size
= TD
->getTypePaddedSize(Ty
);
274 SDValue offset
= DAG
.getNode(ISD::MUL
, dl
, MVT::i32
, BuildGetId(DAG
, dl
),
275 DAG
.getConstant(Size
, MVT::i32
));
276 return DAG
.getNode(ISD::ADD
, dl
, MVT::i32
, base
, offset
);
279 SDValue
XCoreTargetLowering::
280 LowerConstantPool(SDValue Op
, SelectionDAG
&DAG
)
282 ConstantPoolSDNode
*CP
= cast
<ConstantPoolSDNode
>(Op
);
283 // FIXME there isn't really debug info here
284 DebugLoc dl
= CP
->getDebugLoc();
285 if (Subtarget
.isXS1A()) {
286 assert(0 && "Lowering of constant pool unimplemented");
289 MVT PtrVT
= Op
.getValueType();
291 if (CP
->isMachineConstantPoolEntry()) {
292 Res
= DAG
.getTargetConstantPool(CP
->getMachineCPVal(), PtrVT
,
295 Res
= DAG
.getTargetConstantPool(CP
->getConstVal(), PtrVT
,
298 return DAG
.getNode(XCoreISD::CPRelativeWrapper
, dl
, MVT::i32
, Res
);
302 SDValue
XCoreTargetLowering::
303 LowerJumpTable(SDValue Op
, SelectionDAG
&DAG
)
305 // FIXME there isn't really debug info here
306 DebugLoc dl
= Op
.getDebugLoc();
307 MVT PtrVT
= Op
.getValueType();
308 JumpTableSDNode
*JT
= cast
<JumpTableSDNode
>(Op
);
309 SDValue JTI
= DAG
.getTargetJumpTable(JT
->getIndex(), PtrVT
);
310 return DAG
.getNode(XCoreISD::DPRelativeWrapper
, dl
, MVT::i32
, JTI
);
313 SDValue
XCoreTargetLowering::
314 ExpandADDSUB(SDNode
*N
, SelectionDAG
&DAG
)
316 assert(N
->getValueType(0) == MVT::i64
&&
317 (N
->getOpcode() == ISD::ADD
|| N
->getOpcode() == ISD::SUB
) &&
318 "Unknown operand to lower!");
319 assert(!Subtarget
.isXS1A() && "Cannot custom lower ADD/SUB on xs1a");
320 DebugLoc dl
= N
->getDebugLoc();
322 // Extract components
323 SDValue LHSL
= DAG
.getNode(ISD::EXTRACT_ELEMENT
, dl
, MVT::i32
,
324 N
->getOperand(0), DAG
.getConstant(0, MVT::i32
));
325 SDValue LHSH
= DAG
.getNode(ISD::EXTRACT_ELEMENT
, dl
, MVT::i32
,
326 N
->getOperand(0), DAG
.getConstant(1, MVT::i32
));
327 SDValue RHSL
= DAG
.getNode(ISD::EXTRACT_ELEMENT
, dl
, MVT::i32
,
328 N
->getOperand(1), DAG
.getConstant(0, MVT::i32
));
329 SDValue RHSH
= DAG
.getNode(ISD::EXTRACT_ELEMENT
, dl
, MVT::i32
,
330 N
->getOperand(1), DAG
.getConstant(1, MVT::i32
));
333 unsigned Opcode
= (N
->getOpcode() == ISD::ADD
) ? XCoreISD::LADD
:
335 SDValue Zero
= DAG
.getConstant(0, MVT::i32
);
336 SDValue Carry
= DAG
.getNode(Opcode
, dl
, DAG
.getVTList(MVT::i32
, MVT::i32
),
338 SDValue
Lo(Carry
.getNode(), 1);
340 SDValue Ignored
= DAG
.getNode(Opcode
, dl
, DAG
.getVTList(MVT::i32
, MVT::i32
),
342 SDValue
Hi(Ignored
.getNode(), 1);
344 return DAG
.getNode(ISD::BUILD_PAIR
, dl
, MVT::i64
, Lo
, Hi
);
347 SDValue
XCoreTargetLowering::
348 LowerVAARG(SDValue Op
, SelectionDAG
&DAG
)
350 assert(0 && "unimplemented");
351 // FIX Arguments passed by reference need a extra dereference.
352 SDNode
*Node
= Op
.getNode();
353 DebugLoc dl
= Node
->getDebugLoc();
354 const Value
*V
= cast
<SrcValueSDNode
>(Node
->getOperand(2))->getValue();
355 MVT VT
= Node
->getValueType(0);
356 SDValue VAList
= DAG
.getLoad(getPointerTy(), dl
, Node
->getOperand(0),
357 Node
->getOperand(1), V
, 0);
358 // Increment the pointer, VAList, to the next vararg
359 SDValue Tmp3
= DAG
.getNode(ISD::ADD
, dl
, getPointerTy(), VAList
,
360 DAG
.getConstant(VT
.getSizeInBits(),
362 // Store the incremented VAList to the legalized pointer
363 Tmp3
= DAG
.getStore(VAList
.getValue(1), dl
, Tmp3
, Node
->getOperand(1), V
, 0);
364 // Load the actual argument out of the pointer VAList
365 return DAG
.getLoad(VT
, dl
, Tmp3
, VAList
, NULL
, 0);
368 SDValue
XCoreTargetLowering::
369 LowerVASTART(SDValue Op
, SelectionDAG
&DAG
)
371 DebugLoc dl
= Op
.getDebugLoc();
372 // vastart stores the address of the VarArgsFrameIndex slot into the
373 // memory location argument
374 MachineFunction
&MF
= DAG
.getMachineFunction();
375 XCoreFunctionInfo
*XFI
= MF
.getInfo
<XCoreFunctionInfo
>();
376 SDValue Addr
= DAG
.getFrameIndex(XFI
->getVarArgsFrameIndex(), MVT::i32
);
377 const Value
*SV
= cast
<SrcValueSDNode
>(Op
.getOperand(2))->getValue();
378 return DAG
.getStore(Op
.getOperand(0), dl
, Addr
, Op
.getOperand(1), SV
, 0);
381 SDValue
XCoreTargetLowering::LowerFRAMEADDR(SDValue Op
, SelectionDAG
&DAG
) {
382 DebugLoc dl
= Op
.getDebugLoc();
383 // Depths > 0 not supported yet!
384 if (cast
<ConstantSDNode
>(Op
.getOperand(0))->getZExtValue() > 0)
387 MachineFunction
&MF
= DAG
.getMachineFunction();
388 const TargetRegisterInfo
*RegInfo
= getTargetMachine().getRegisterInfo();
389 return DAG
.getCopyFromReg(DAG
.getEntryNode(), dl
,
390 RegInfo
->getFrameRegister(MF
), MVT::i32
);
393 //===----------------------------------------------------------------------===//
394 // Calling Convention Implementation
396 // The lower operations present on calling convention works on this order:
397 // LowerCALL (virt regs --> phys regs, virt regs --> stack)
398 // LowerFORMAL_ARGUMENTS (phys --> virt regs, stack --> virt regs)
399 // LowerRET (virt regs --> phys regs)
400 // LowerCALL (phys regs --> virt regs)
402 //===----------------------------------------------------------------------===//
404 #include "XCoreGenCallingConv.inc"
406 //===----------------------------------------------------------------------===//
407 // CALL Calling Convention Implementation
408 //===----------------------------------------------------------------------===//
410 /// XCore custom CALL implementation
411 SDValue
XCoreTargetLowering::
412 LowerCALL(SDValue Op
, SelectionDAG
&DAG
)
414 CallSDNode
*TheCall
= cast
<CallSDNode
>(Op
.getNode());
415 unsigned CallingConv
= TheCall
->getCallingConv();
416 // For now, only CallingConv::C implemented
420 assert(0 && "Unsupported calling convention");
421 case CallingConv::Fast
:
423 return LowerCCCCallTo(Op
, DAG
, CallingConv
);
427 /// LowerCCCCallTo - functions arguments are copied from virtual
428 /// regs to (physical regs)/(stack frame), CALLSEQ_START and
429 /// CALLSEQ_END are emitted.
430 /// TODO: isTailCall, sret.
431 SDValue
XCoreTargetLowering::
432 LowerCCCCallTo(SDValue Op
, SelectionDAG
&DAG
, unsigned CC
)
434 CallSDNode
*TheCall
= cast
<CallSDNode
>(Op
.getNode());
435 SDValue Chain
= TheCall
->getChain();
436 SDValue Callee
= TheCall
->getCallee();
437 bool isVarArg
= TheCall
->isVarArg();
438 DebugLoc dl
= Op
.getDebugLoc();
440 // Analyze operands of the call, assigning locations to each operand.
441 SmallVector
<CCValAssign
, 16> ArgLocs
;
442 CCState
CCInfo(CC
, isVarArg
, getTargetMachine(), ArgLocs
);
444 // The ABI dictates there should be one stack slot available to the callee
445 // on function entry (for saving lr).
446 CCInfo
.AllocateStack(4, 4);
448 CCInfo
.AnalyzeCallOperands(TheCall
, CC_XCore
);
450 // Get a count of how many bytes are to be pushed on the stack.
451 unsigned NumBytes
= CCInfo
.getNextStackOffset();
453 Chain
= DAG
.getCALLSEQ_START(Chain
,DAG
.getConstant(NumBytes
,
454 getPointerTy(), true));
456 SmallVector
<std::pair
<unsigned, SDValue
>, 4> RegsToPass
;
457 SmallVector
<SDValue
, 12> MemOpChains
;
459 // Walk the register/memloc assignments, inserting copies/loads.
460 for (unsigned i
= 0, e
= ArgLocs
.size(); i
!= e
; ++i
) {
461 CCValAssign
&VA
= ArgLocs
[i
];
463 // Arguments start after the 5 first operands of ISD::CALL
464 SDValue Arg
= TheCall
->getArg(i
);
466 // Promote the value if needed.
467 switch (VA
.getLocInfo()) {
468 default: assert(0 && "Unknown loc info!");
469 case CCValAssign::Full
: break;
470 case CCValAssign::SExt
:
471 Arg
= DAG
.getNode(ISD::SIGN_EXTEND
, dl
, VA
.getLocVT(), Arg
);
473 case CCValAssign::ZExt
:
474 Arg
= DAG
.getNode(ISD::ZERO_EXTEND
, dl
, VA
.getLocVT(), Arg
);
476 case CCValAssign::AExt
:
477 Arg
= DAG
.getNode(ISD::ANY_EXTEND
, dl
, VA
.getLocVT(), Arg
);
481 // Arguments that can be passed on register must be kept at
484 RegsToPass
.push_back(std::make_pair(VA
.getLocReg(), Arg
));
486 assert(VA
.isMemLoc());
488 int Offset
= VA
.getLocMemOffset();
490 MemOpChains
.push_back(DAG
.getNode(XCoreISD::STWSP
, dl
, MVT::Other
,
492 DAG
.getConstant(Offset
/4, MVT::i32
)));
496 // Transform all store nodes into one single node because
497 // all store nodes are independent of each other.
498 if (!MemOpChains
.empty())
499 Chain
= DAG
.getNode(ISD::TokenFactor
, dl
, MVT::Other
,
500 &MemOpChains
[0], MemOpChains
.size());
502 // Build a sequence of copy-to-reg nodes chained together with token
503 // chain and flag operands which copy the outgoing args into registers.
504 // The InFlag in necessary since all emited instructions must be
507 for (unsigned i
= 0, e
= RegsToPass
.size(); i
!= e
; ++i
) {
508 Chain
= DAG
.getCopyToReg(Chain
, dl
, RegsToPass
[i
].first
,
509 RegsToPass
[i
].second
, InFlag
);
510 InFlag
= Chain
.getValue(1);
513 // If the callee is a GlobalAddress node (quite common, every direct call is)
514 // turn it into a TargetGlobalAddress node so that legalize doesn't hack it.
515 // Likewise ExternalSymbol -> TargetExternalSymbol.
516 if (GlobalAddressSDNode
*G
= dyn_cast
<GlobalAddressSDNode
>(Callee
))
517 Callee
= DAG
.getTargetGlobalAddress(G
->getGlobal(), MVT::i32
);
518 else if (ExternalSymbolSDNode
*E
= dyn_cast
<ExternalSymbolSDNode
>(Callee
))
519 Callee
= DAG
.getTargetExternalSymbol(E
->getSymbol(), MVT::i32
);
521 // XCoreBranchLink = #chain, #target_address, #opt_in_flags...
522 // = Chain, Callee, Reg#1, Reg#2, ...
524 // Returns a chain & a flag for retval copy to use.
525 SDVTList NodeTys
= DAG
.getVTList(MVT::Other
, MVT::Flag
);
526 SmallVector
<SDValue
, 8> Ops
;
527 Ops
.push_back(Chain
);
528 Ops
.push_back(Callee
);
530 // Add argument registers to the end of the list so that they are
531 // known live into the call.
532 for (unsigned i
= 0, e
= RegsToPass
.size(); i
!= e
; ++i
)
533 Ops
.push_back(DAG
.getRegister(RegsToPass
[i
].first
,
534 RegsToPass
[i
].second
.getValueType()));
536 if (InFlag
.getNode())
537 Ops
.push_back(InFlag
);
539 Chain
= DAG
.getNode(XCoreISD::BL
, dl
, NodeTys
, &Ops
[0], Ops
.size());
540 InFlag
= Chain
.getValue(1);
542 // Create the CALLSEQ_END node.
543 Chain
= DAG
.getCALLSEQ_END(Chain
,
544 DAG
.getConstant(NumBytes
, getPointerTy(), true),
545 DAG
.getConstant(0, getPointerTy(), true),
547 InFlag
= Chain
.getValue(1);
549 // Handle result values, copying them out of physregs into vregs that we
551 return SDValue(LowerCallResult(Chain
, InFlag
, TheCall
, CC
, DAG
),
555 /// LowerCallResult - Lower the result values of an ISD::CALL into the
556 /// appropriate copies out of appropriate physical registers. This assumes that
557 /// Chain/InFlag are the input chain/flag to use, and that TheCall is the call
558 /// being lowered. Returns a SDNode with the same number of values as the
560 SDNode
*XCoreTargetLowering::
561 LowerCallResult(SDValue Chain
, SDValue InFlag
, CallSDNode
*TheCall
,
562 unsigned CallingConv
, SelectionDAG
&DAG
) {
563 bool isVarArg
= TheCall
->isVarArg();
564 DebugLoc dl
= TheCall
->getDebugLoc();
566 // Assign locations to each value returned by this call.
567 SmallVector
<CCValAssign
, 16> RVLocs
;
568 CCState
CCInfo(CallingConv
, isVarArg
, getTargetMachine(), RVLocs
);
570 CCInfo
.AnalyzeCallResult(TheCall
, RetCC_XCore
);
571 SmallVector
<SDValue
, 8> ResultVals
;
573 // Copy all of the result registers out of their specified physreg.
574 for (unsigned i
= 0; i
!= RVLocs
.size(); ++i
) {
575 Chain
= DAG
.getCopyFromReg(Chain
, dl
, RVLocs
[i
].getLocReg(),
576 RVLocs
[i
].getValVT(), InFlag
).getValue(1);
577 InFlag
= Chain
.getValue(2);
578 ResultVals
.push_back(Chain
.getValue(0));
581 ResultVals
.push_back(Chain
);
583 // Merge everything together with a MERGE_VALUES node.
584 return DAG
.getNode(ISD::MERGE_VALUES
, dl
, TheCall
->getVTList(),
585 &ResultVals
[0], ResultVals
.size()).getNode();
588 //===----------------------------------------------------------------------===//
589 // FORMAL_ARGUMENTS Calling Convention Implementation
590 //===----------------------------------------------------------------------===//
592 /// XCore custom FORMAL_ARGUMENTS implementation
593 SDValue
XCoreTargetLowering::
594 LowerFORMAL_ARGUMENTS(SDValue Op
, SelectionDAG
&DAG
)
596 unsigned CC
= cast
<ConstantSDNode
>(Op
.getOperand(1))->getZExtValue();
600 assert(0 && "Unsupported calling convention");
602 case CallingConv::Fast
:
603 return LowerCCCArguments(Op
, DAG
);
607 /// LowerCCCArguments - transform physical registers into
608 /// virtual registers and generate load operations for
609 /// arguments places on the stack.
611 SDValue
XCoreTargetLowering::
612 LowerCCCArguments(SDValue Op
, SelectionDAG
&DAG
)
614 MachineFunction
&MF
= DAG
.getMachineFunction();
615 MachineFrameInfo
*MFI
= MF
.getFrameInfo();
616 MachineRegisterInfo
&RegInfo
= MF
.getRegInfo();
617 SDValue Root
= Op
.getOperand(0);
618 bool isVarArg
= cast
<ConstantSDNode
>(Op
.getOperand(2))->getZExtValue() != 0;
619 unsigned CC
= MF
.getFunction()->getCallingConv();
620 DebugLoc dl
= Op
.getDebugLoc();
622 // Assign locations to all of the incoming arguments.
623 SmallVector
<CCValAssign
, 16> ArgLocs
;
624 CCState
CCInfo(CC
, isVarArg
, getTargetMachine(), ArgLocs
);
626 CCInfo
.AnalyzeFormalArguments(Op
.getNode(), CC_XCore
);
628 unsigned StackSlotSize
= XCoreFrameInfo::stackSlotSize();
630 SmallVector
<SDValue
, 16> ArgValues
;
632 unsigned LRSaveSize
= StackSlotSize
;
634 for (unsigned i
= 0, e
= ArgLocs
.size(); i
!= e
; ++i
) {
636 CCValAssign
&VA
= ArgLocs
[i
];
639 // Arguments passed in registers
640 MVT RegVT
= VA
.getLocVT();
641 switch (RegVT
.getSimpleVT()) {
643 cerr
<< "LowerFORMAL_ARGUMENTS Unhandled argument type: "
644 << RegVT
.getSimpleVT()
648 unsigned VReg
= RegInfo
.createVirtualRegister(
649 XCore::GRRegsRegisterClass
);
650 RegInfo
.addLiveIn(VA
.getLocReg(), VReg
);
651 ArgValues
.push_back(DAG
.getCopyFromReg(Root
, dl
, VReg
, RegVT
));
655 assert(VA
.isMemLoc());
656 // Load the argument to a virtual register
657 unsigned ObjSize
= VA
.getLocVT().getSizeInBits()/8;
658 if (ObjSize
> StackSlotSize
) {
659 cerr
<< "LowerFORMAL_ARGUMENTS Unhandled argument type: "
660 << VA
.getLocVT().getSimpleVT()
663 // Create the frame index object for this incoming parameter...
664 int FI
= MFI
->CreateFixedObject(ObjSize
,
665 LRSaveSize
+ VA
.getLocMemOffset());
667 // Create the SelectionDAG nodes corresponding to a load
668 //from this parameter
669 SDValue FIN
= DAG
.getFrameIndex(FI
, MVT::i32
);
670 ArgValues
.push_back(DAG
.getLoad(VA
.getLocVT(), dl
, Root
, FIN
, NULL
, 0));
675 /* Argument registers */
676 static const unsigned ArgRegs
[] = {
677 XCore::R0
, XCore::R1
, XCore::R2
, XCore::R3
679 XCoreFunctionInfo
*XFI
= MF
.getInfo
<XCoreFunctionInfo
>();
680 unsigned FirstVAReg
= CCInfo
.getFirstUnallocated(ArgRegs
,
681 array_lengthof(ArgRegs
));
682 if (FirstVAReg
< array_lengthof(ArgRegs
)) {
683 SmallVector
<SDValue
, 4> MemOps
;
685 // Save remaining registers, storing higher register numbers at a higher
687 for (unsigned i
= array_lengthof(ArgRegs
) - 1; i
>= FirstVAReg
; --i
) {
688 // Create a stack slot
689 int FI
= MFI
->CreateFixedObject(4, offset
);
690 if (i
== FirstVAReg
) {
691 XFI
->setVarArgsFrameIndex(FI
);
693 offset
-= StackSlotSize
;
694 SDValue FIN
= DAG
.getFrameIndex(FI
, MVT::i32
);
695 // Move argument from phys reg -> virt reg
696 unsigned VReg
= RegInfo
.createVirtualRegister(
697 XCore::GRRegsRegisterClass
);
698 RegInfo
.addLiveIn(ArgRegs
[i
], VReg
);
699 SDValue Val
= DAG
.getCopyFromReg(Root
, dl
, VReg
, MVT::i32
);
700 // Move argument from virt reg -> stack
701 SDValue Store
= DAG
.getStore(Val
.getValue(1), dl
, Val
, FIN
, NULL
, 0);
702 MemOps
.push_back(Store
);
705 Root
= DAG
.getNode(ISD::TokenFactor
, dl
, MVT::Other
,
706 &MemOps
[0], MemOps
.size());
708 // This will point to the next argument passed via stack.
709 XFI
->setVarArgsFrameIndex(
710 MFI
->CreateFixedObject(4, LRSaveSize
+ CCInfo
.getNextStackOffset()));
714 ArgValues
.push_back(Root
);
716 // Return the new list of results.
717 std::vector
<MVT
> RetVT(Op
.getNode()->value_begin(),
718 Op
.getNode()->value_end());
719 return DAG
.getNode(ISD::MERGE_VALUES
, dl
, RetVT
,
720 &ArgValues
[0], ArgValues
.size());
723 //===----------------------------------------------------------------------===//
724 // Return Value Calling Convention Implementation
725 //===----------------------------------------------------------------------===//
727 SDValue
XCoreTargetLowering::
728 LowerRET(SDValue Op
, SelectionDAG
&DAG
)
730 // CCValAssign - represent the assignment of
731 // the return value to a location
732 SmallVector
<CCValAssign
, 16> RVLocs
;
733 unsigned CC
= DAG
.getMachineFunction().getFunction()->getCallingConv();
734 bool isVarArg
= DAG
.getMachineFunction().getFunction()->isVarArg();
735 DebugLoc dl
= Op
.getDebugLoc();
737 // CCState - Info about the registers and stack slot.
738 CCState
CCInfo(CC
, isVarArg
, getTargetMachine(), RVLocs
);
740 // Analize return values of ISD::RET
741 CCInfo
.AnalyzeReturn(Op
.getNode(), RetCC_XCore
);
743 // If this is the first return lowered for this function, add
744 // the regs to the liveout set for the function.
745 if (DAG
.getMachineFunction().getRegInfo().liveout_empty()) {
746 for (unsigned i
= 0; i
!= RVLocs
.size(); ++i
)
747 if (RVLocs
[i
].isRegLoc())
748 DAG
.getMachineFunction().getRegInfo().addLiveOut(RVLocs
[i
].getLocReg());
751 // The chain is always operand #0
752 SDValue Chain
= Op
.getOperand(0);
755 // Copy the result values into the output registers.
756 for (unsigned i
= 0; i
!= RVLocs
.size(); ++i
) {
757 CCValAssign
&VA
= RVLocs
[i
];
758 assert(VA
.isRegLoc() && "Can only return in registers!");
760 // ISD::RET => ret chain, (regnum1,val1), ...
761 // So i*2+1 index only the regnums
762 Chain
= DAG
.getCopyToReg(Chain
, dl
, VA
.getLocReg(),
763 Op
.getOperand(i
*2+1), Flag
);
765 // guarantee that all emitted copies are
766 // stuck together, avoiding something bad
767 Flag
= Chain
.getValue(1);
770 // Return on XCore is always a "retsp 0"
772 return DAG
.getNode(XCoreISD::RETSP
, dl
, MVT::Other
,
773 Chain
, DAG
.getConstant(0, MVT::i32
), Flag
);
775 return DAG
.getNode(XCoreISD::RETSP
, dl
, MVT::Other
,
776 Chain
, DAG
.getConstant(0, MVT::i32
));
779 //===----------------------------------------------------------------------===//
780 // Other Lowering Code
781 //===----------------------------------------------------------------------===//
784 XCoreTargetLowering::EmitInstrWithCustomInserter(MachineInstr
*MI
,
785 MachineBasicBlock
*BB
) const {
786 const TargetInstrInfo
&TII
= *getTargetMachine().getInstrInfo();
787 DebugLoc dl
= MI
->getDebugLoc();
788 assert((MI
->getOpcode() == XCore::SELECT_CC
) &&
789 "Unexpected instr type to insert");
791 // To "insert" a SELECT_CC instruction, we actually have to insert the diamond
792 // control-flow pattern. The incoming instruction knows the destination vreg
793 // to set, the condition code register to branch on, the true/false values to
794 // select between, and a branch opcode to use.
795 const BasicBlock
*LLVM_BB
= BB
->getBasicBlock();
796 MachineFunction::iterator It
= BB
;
804 // fallthrough --> copy0MBB
805 MachineBasicBlock
*thisMBB
= BB
;
806 MachineFunction
*F
= BB
->getParent();
807 MachineBasicBlock
*copy0MBB
= F
->CreateMachineBasicBlock(LLVM_BB
);
808 MachineBasicBlock
*sinkMBB
= F
->CreateMachineBasicBlock(LLVM_BB
);
809 BuildMI(BB
, dl
, TII
.get(XCore::BRFT_lru6
))
810 .addReg(MI
->getOperand(1).getReg()).addMBB(sinkMBB
);
811 F
->insert(It
, copy0MBB
);
812 F
->insert(It
, sinkMBB
);
813 // Update machine-CFG edges by transferring all successors of the current
814 // block to the new block which will contain the Phi node for the select.
815 sinkMBB
->transferSuccessors(BB
);
816 // Next, add the true and fallthrough blocks as its successors.
817 BB
->addSuccessor(copy0MBB
);
818 BB
->addSuccessor(sinkMBB
);
822 // # fallthrough to sinkMBB
825 // Update machine-CFG edges
826 BB
->addSuccessor(sinkMBB
);
829 // %Result = phi [ %FalseValue, copy0MBB ], [ %TrueValue, thisMBB ]
832 BuildMI(BB
, dl
, TII
.get(XCore::PHI
), MI
->getOperand(0).getReg())
833 .addReg(MI
->getOperand(3).getReg()).addMBB(copy0MBB
)
834 .addReg(MI
->getOperand(2).getReg()).addMBB(thisMBB
);
836 F
->DeleteMachineInstr(MI
); // The pseudo instruction is gone now.
840 //===----------------------------------------------------------------------===//
841 // Addressing mode description hooks
842 //===----------------------------------------------------------------------===//
844 static inline bool isImmUs(int64_t val
)
846 return (val
>= 0 && val
<= 11);
849 static inline bool isImmUs2(int64_t val
)
851 return (val
%2 == 0 && isImmUs(val
/2));
854 static inline bool isImmUs4(int64_t val
)
856 return (val
%4 == 0 && isImmUs(val
/4));
859 /// isLegalAddressingMode - Return true if the addressing mode represented
860 /// by AM is legal for this target, for a load/store of the specified type.
862 XCoreTargetLowering::isLegalAddressingMode(const AddrMode
&AM
,
863 const Type
*Ty
) const {
864 MVT VT
= getValueType(Ty
, true);
865 // Get expected value type after legalization
866 switch (VT
.getSimpleVT()) {
867 // Legal load / stores
876 // Everything else is lowered to words
882 return VT
== MVT::i32
&& !AM
.HasBaseReg
&& AM
.Scale
== 0 &&
886 switch (VT
.getSimpleVT()) {
892 return isImmUs(AM
.BaseOffs
);
894 return AM
.Scale
== 1 && AM
.BaseOffs
== 0;
898 return isImmUs2(AM
.BaseOffs
);
900 return AM
.Scale
== 2 && AM
.BaseOffs
== 0;
904 return isImmUs4(AM
.BaseOffs
);
907 return AM
.Scale
== 4 && AM
.BaseOffs
== 0;
913 //===----------------------------------------------------------------------===//
914 // XCore Inline Assembly Support
915 //===----------------------------------------------------------------------===//
917 std::vector
<unsigned> XCoreTargetLowering::
918 getRegClassForInlineAsmConstraint(const std::string
&Constraint
,
921 if (Constraint
.size() != 1)
922 return std::vector
<unsigned>();
924 switch (Constraint
[0]) {
927 return make_vector
<unsigned>(XCore::R0
, XCore::R1
, XCore::R2
,
928 XCore::R3
, XCore::R4
, XCore::R5
,
929 XCore::R6
, XCore::R7
, XCore::R8
,
930 XCore::R9
, XCore::R10
, XCore::R11
, 0);
933 return std::vector
<unsigned>();