1 //===-- XCoreISelLowering.cpp - XCore DAG Lowering Implementation ------===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This file implements the XCoreTargetLowering class.
12 //===----------------------------------------------------------------------===//
14 #define DEBUG_TYPE "xcore-lower"
16 #include "XCoreISelLowering.h"
17 #include "XCoreMachineFunctionInfo.h"
19 #include "XCoreTargetObjectFile.h"
20 #include "XCoreTargetMachine.h"
21 #include "XCoreSubtarget.h"
22 #include "llvm/DerivedTypes.h"
23 #include "llvm/Function.h"
24 #include "llvm/Intrinsics.h"
25 #include "llvm/CallingConv.h"
26 #include "llvm/GlobalVariable.h"
27 #include "llvm/GlobalAlias.h"
28 #include "llvm/CodeGen/CallingConvLower.h"
29 #include "llvm/CodeGen/MachineFrameInfo.h"
30 #include "llvm/CodeGen/MachineFunction.h"
31 #include "llvm/CodeGen/MachineInstrBuilder.h"
32 #include "llvm/CodeGen/MachineJumpTableInfo.h"
33 #include "llvm/CodeGen/MachineRegisterInfo.h"
34 #include "llvm/CodeGen/SelectionDAGISel.h"
35 #include "llvm/CodeGen/ValueTypes.h"
36 #include "llvm/Support/Debug.h"
37 #include "llvm/Support/ErrorHandling.h"
38 #include "llvm/Support/raw_ostream.h"
39 #include "llvm/ADT/VectorExtras.h"
42 const char *XCoreTargetLowering::
43 getTargetNodeName(unsigned Opcode
) const
47 case XCoreISD::BL
: return "XCoreISD::BL";
48 case XCoreISD::PCRelativeWrapper
: return "XCoreISD::PCRelativeWrapper";
49 case XCoreISD::DPRelativeWrapper
: return "XCoreISD::DPRelativeWrapper";
50 case XCoreISD::CPRelativeWrapper
: return "XCoreISD::CPRelativeWrapper";
51 case XCoreISD::STWSP
: return "XCoreISD::STWSP";
52 case XCoreISD::RETSP
: return "XCoreISD::RETSP";
53 case XCoreISD::LADD
: return "XCoreISD::LADD";
54 case XCoreISD::LSUB
: return "XCoreISD::LSUB";
55 case XCoreISD::LMUL
: return "XCoreISD::LMUL";
56 case XCoreISD::MACCU
: return "XCoreISD::MACCU";
57 case XCoreISD::MACCS
: return "XCoreISD::MACCS";
58 case XCoreISD::BR_JT
: return "XCoreISD::BR_JT";
59 case XCoreISD::BR_JT32
: return "XCoreISD::BR_JT32";
60 default : return NULL
;
64 XCoreTargetLowering::XCoreTargetLowering(XCoreTargetMachine
&XTM
)
65 : TargetLowering(XTM
, new XCoreTargetObjectFile()),
67 Subtarget(*XTM
.getSubtargetImpl()) {
69 // Set up the register classes.
70 addRegisterClass(MVT::i32
, XCore::GRRegsRegisterClass
);
72 // Compute derived properties from the register classes
73 computeRegisterProperties();
75 // Division is expensive
76 setIntDivIsCheap(false);
78 setStackPointerRegisterToSaveRestore(XCore::SP
);
80 setSchedulingPreference(Sched::RegPressure
);
82 // Use i32 for setcc operations results (slt, sgt, ...).
83 setBooleanContents(ZeroOrOneBooleanContent
);
85 // XCore does not have the NodeTypes below.
86 setOperationAction(ISD::BR_CC
, MVT::Other
, Expand
);
87 setOperationAction(ISD::SELECT_CC
, MVT::i32
, Custom
);
88 setOperationAction(ISD::ADDC
, MVT::i32
, Expand
);
89 setOperationAction(ISD::ADDE
, MVT::i32
, Expand
);
90 setOperationAction(ISD::SUBC
, MVT::i32
, Expand
);
91 setOperationAction(ISD::SUBE
, MVT::i32
, Expand
);
93 // Stop the combiner recombining select and set_cc
94 setOperationAction(ISD::SELECT_CC
, MVT::Other
, Expand
);
97 setOperationAction(ISD::ADD
, MVT::i64
, Custom
);
98 setOperationAction(ISD::SUB
, MVT::i64
, Custom
);
99 setOperationAction(ISD::SMUL_LOHI
, MVT::i32
, Custom
);
100 setOperationAction(ISD::UMUL_LOHI
, MVT::i32
, Custom
);
101 setOperationAction(ISD::MULHS
, MVT::i32
, Expand
);
102 setOperationAction(ISD::MULHU
, MVT::i32
, Expand
);
103 setOperationAction(ISD::SHL_PARTS
, MVT::i32
, Expand
);
104 setOperationAction(ISD::SRA_PARTS
, MVT::i32
, Expand
);
105 setOperationAction(ISD::SRL_PARTS
, MVT::i32
, Expand
);
108 setOperationAction(ISD::CTPOP
, MVT::i32
, Expand
);
109 setOperationAction(ISD::ROTL
, MVT::i32
, Expand
);
110 setOperationAction(ISD::ROTR
, MVT::i32
, Expand
);
112 setOperationAction(ISD::TRAP
, MVT::Other
, Legal
);
115 setOperationAction(ISD::BR_JT
, MVT::Other
, Custom
);
117 setOperationAction(ISD::GlobalAddress
, MVT::i32
, Custom
);
118 setOperationAction(ISD::BlockAddress
, MVT::i32
, Custom
);
120 // Thread Local Storage
121 setOperationAction(ISD::GlobalTLSAddress
, MVT::i32
, Custom
);
123 // Conversion of i64 -> double produces constantpool nodes
124 setOperationAction(ISD::ConstantPool
, MVT::i32
, Custom
);
127 setLoadExtAction(ISD::EXTLOAD
, MVT::i1
, Promote
);
128 setLoadExtAction(ISD::ZEXTLOAD
, MVT::i1
, Promote
);
129 setLoadExtAction(ISD::SEXTLOAD
, MVT::i1
, Promote
);
131 setLoadExtAction(ISD::SEXTLOAD
, MVT::i8
, Expand
);
132 setLoadExtAction(ISD::ZEXTLOAD
, MVT::i16
, Expand
);
134 // Custom expand misaligned loads / stores.
135 setOperationAction(ISD::LOAD
, MVT::i32
, Custom
);
136 setOperationAction(ISD::STORE
, MVT::i32
, Custom
);
139 setOperationAction(ISD::VAEND
, MVT::Other
, Expand
);
140 setOperationAction(ISD::VACOPY
, MVT::Other
, Expand
);
141 setOperationAction(ISD::VAARG
, MVT::Other
, Custom
);
142 setOperationAction(ISD::VASTART
, MVT::Other
, Custom
);
145 setOperationAction(ISD::STACKSAVE
, MVT::Other
, Expand
);
146 setOperationAction(ISD::STACKRESTORE
, MVT::Other
, Expand
);
147 setOperationAction(ISD::DYNAMIC_STACKALLOC
, MVT::i32
, Expand
);
149 // TRAMPOLINE is custom lowered.
150 setOperationAction(ISD::TRAMPOLINE
, MVT::Other
, Custom
);
152 maxStoresPerMemset
= maxStoresPerMemsetOptSize
= 4;
153 maxStoresPerMemmove
= maxStoresPerMemmoveOptSize
154 = maxStoresPerMemcpy
= maxStoresPerMemcpyOptSize
= 2;
156 // We have target-specific dag combine patterns for the following nodes:
157 setTargetDAGCombine(ISD::STORE
);
158 setTargetDAGCombine(ISD::ADD
);
160 setMinFunctionAlignment(1);
163 SDValue
XCoreTargetLowering::
164 LowerOperation(SDValue Op
, SelectionDAG
&DAG
) const {
165 switch (Op
.getOpcode())
167 case ISD::GlobalAddress
: return LowerGlobalAddress(Op
, DAG
);
168 case ISD::GlobalTLSAddress
: return LowerGlobalTLSAddress(Op
, DAG
);
169 case ISD::BlockAddress
: return LowerBlockAddress(Op
, DAG
);
170 case ISD::ConstantPool
: return LowerConstantPool(Op
, DAG
);
171 case ISD::BR_JT
: return LowerBR_JT(Op
, DAG
);
172 case ISD::LOAD
: return LowerLOAD(Op
, DAG
);
173 case ISD::STORE
: return LowerSTORE(Op
, DAG
);
174 case ISD::SELECT_CC
: return LowerSELECT_CC(Op
, DAG
);
175 case ISD::VAARG
: return LowerVAARG(Op
, DAG
);
176 case ISD::VASTART
: return LowerVASTART(Op
, DAG
);
177 case ISD::SMUL_LOHI
: return LowerSMUL_LOHI(Op
, DAG
);
178 case ISD::UMUL_LOHI
: return LowerUMUL_LOHI(Op
, DAG
);
179 // FIXME: Remove these when LegalizeDAGTypes lands.
181 case ISD::SUB
: return ExpandADDSUB(Op
.getNode(), DAG
);
182 case ISD::FRAMEADDR
: return LowerFRAMEADDR(Op
, DAG
);
183 case ISD::TRAMPOLINE
: return LowerTRAMPOLINE(Op
, DAG
);
185 llvm_unreachable("unimplemented operand");
190 /// ReplaceNodeResults - Replace the results of node with an illegal result
191 /// type with new values built out of custom code.
192 void XCoreTargetLowering::ReplaceNodeResults(SDNode
*N
,
193 SmallVectorImpl
<SDValue
>&Results
,
194 SelectionDAG
&DAG
) const {
195 switch (N
->getOpcode()) {
197 llvm_unreachable("Don't know how to custom expand this!");
201 Results
.push_back(ExpandADDSUB(N
, DAG
));
206 //===----------------------------------------------------------------------===//
207 // Misc Lower Operation implementation
208 //===----------------------------------------------------------------------===//
210 SDValue
XCoreTargetLowering::
211 LowerSELECT_CC(SDValue Op
, SelectionDAG
&DAG
) const
213 DebugLoc dl
= Op
.getDebugLoc();
214 SDValue Cond
= DAG
.getNode(ISD::SETCC
, dl
, MVT::i32
, Op
.getOperand(2),
215 Op
.getOperand(3), Op
.getOperand(4));
216 return DAG
.getNode(ISD::SELECT
, dl
, MVT::i32
, Cond
, Op
.getOperand(0),
220 SDValue
XCoreTargetLowering::
221 getGlobalAddressWrapper(SDValue GA
, const GlobalValue
*GV
,
222 SelectionDAG
&DAG
) const
224 // FIXME there is no actual debug info here
225 DebugLoc dl
= GA
.getDebugLoc();
226 if (isa
<Function
>(GV
)) {
227 return DAG
.getNode(XCoreISD::PCRelativeWrapper
, dl
, MVT::i32
, GA
);
229 const GlobalVariable
*GVar
= dyn_cast
<GlobalVariable
>(GV
);
231 // If GV is an alias then use the aliasee to determine constness
232 if (const GlobalAlias
*GA
= dyn_cast
<GlobalAlias
>(GV
))
233 GVar
= dyn_cast_or_null
<GlobalVariable
>(GA
->resolveAliasedGlobal());
235 bool isConst
= GVar
&& GVar
->isConstant();
237 return DAG
.getNode(XCoreISD::CPRelativeWrapper
, dl
, MVT::i32
, GA
);
239 return DAG
.getNode(XCoreISD::DPRelativeWrapper
, dl
, MVT::i32
, GA
);
242 SDValue
XCoreTargetLowering::
243 LowerGlobalAddress(SDValue Op
, SelectionDAG
&DAG
) const
245 const GlobalValue
*GV
= cast
<GlobalAddressSDNode
>(Op
)->getGlobal();
246 SDValue GA
= DAG
.getTargetGlobalAddress(GV
, Op
.getDebugLoc(), MVT::i32
);
247 return getGlobalAddressWrapper(GA
, GV
, DAG
);
250 static inline SDValue
BuildGetId(SelectionDAG
&DAG
, DebugLoc dl
) {
251 return DAG
.getNode(ISD::INTRINSIC_WO_CHAIN
, dl
, MVT::i32
,
252 DAG
.getConstant(Intrinsic::xcore_getid
, MVT::i32
));
255 static inline bool isZeroLengthArray(const Type
*Ty
) {
256 const ArrayType
*AT
= dyn_cast_or_null
<ArrayType
>(Ty
);
257 return AT
&& (AT
->getNumElements() == 0);
260 SDValue
XCoreTargetLowering::
261 LowerGlobalTLSAddress(SDValue Op
, SelectionDAG
&DAG
) const
263 // FIXME there isn't really debug info here
264 DebugLoc dl
= Op
.getDebugLoc();
265 // transform to label + getid() * size
266 const GlobalValue
*GV
= cast
<GlobalAddressSDNode
>(Op
)->getGlobal();
267 SDValue GA
= DAG
.getTargetGlobalAddress(GV
, dl
, MVT::i32
);
268 const GlobalVariable
*GVar
= dyn_cast
<GlobalVariable
>(GV
);
270 // If GV is an alias then use the aliasee to determine size
271 if (const GlobalAlias
*GA
= dyn_cast
<GlobalAlias
>(GV
))
272 GVar
= dyn_cast_or_null
<GlobalVariable
>(GA
->resolveAliasedGlobal());
275 llvm_unreachable("Thread local object not a GlobalVariable?");
278 const Type
*Ty
= cast
<PointerType
>(GV
->getType())->getElementType();
279 if (!Ty
->isSized() || isZeroLengthArray(Ty
)) {
281 errs() << "Size of thread local object " << GVar
->getName()
286 SDValue base
= getGlobalAddressWrapper(GA
, GV
, DAG
);
287 const TargetData
*TD
= TM
.getTargetData();
288 unsigned Size
= TD
->getTypeAllocSize(Ty
);
289 SDValue offset
= DAG
.getNode(ISD::MUL
, dl
, MVT::i32
, BuildGetId(DAG
, dl
),
290 DAG
.getConstant(Size
, MVT::i32
));
291 return DAG
.getNode(ISD::ADD
, dl
, MVT::i32
, base
, offset
);
294 SDValue
XCoreTargetLowering::
295 LowerBlockAddress(SDValue Op
, SelectionDAG
&DAG
) const
297 DebugLoc DL
= Op
.getDebugLoc();
299 const BlockAddress
*BA
= cast
<BlockAddressSDNode
>(Op
)->getBlockAddress();
300 SDValue Result
= DAG
.getBlockAddress(BA
, getPointerTy(), /*isTarget=*/true);
302 return DAG
.getNode(XCoreISD::PCRelativeWrapper
, DL
, getPointerTy(), Result
);
305 SDValue
XCoreTargetLowering::
306 LowerConstantPool(SDValue Op
, SelectionDAG
&DAG
) const
308 ConstantPoolSDNode
*CP
= cast
<ConstantPoolSDNode
>(Op
);
309 // FIXME there isn't really debug info here
310 DebugLoc dl
= CP
->getDebugLoc();
311 EVT PtrVT
= Op
.getValueType();
313 if (CP
->isMachineConstantPoolEntry()) {
314 Res
= DAG
.getTargetConstantPool(CP
->getMachineCPVal(), PtrVT
,
317 Res
= DAG
.getTargetConstantPool(CP
->getConstVal(), PtrVT
,
320 return DAG
.getNode(XCoreISD::CPRelativeWrapper
, dl
, MVT::i32
, Res
);
323 unsigned XCoreTargetLowering::getJumpTableEncoding() const {
324 return MachineJumpTableInfo::EK_Inline
;
327 SDValue
XCoreTargetLowering::
328 LowerBR_JT(SDValue Op
, SelectionDAG
&DAG
) const
330 SDValue Chain
= Op
.getOperand(0);
331 SDValue Table
= Op
.getOperand(1);
332 SDValue Index
= Op
.getOperand(2);
333 DebugLoc dl
= Op
.getDebugLoc();
334 JumpTableSDNode
*JT
= cast
<JumpTableSDNode
>(Table
);
335 unsigned JTI
= JT
->getIndex();
336 MachineFunction
&MF
= DAG
.getMachineFunction();
337 const MachineJumpTableInfo
*MJTI
= MF
.getJumpTableInfo();
338 SDValue TargetJT
= DAG
.getTargetJumpTable(JT
->getIndex(), MVT::i32
);
340 unsigned NumEntries
= MJTI
->getJumpTables()[JTI
].MBBs
.size();
341 if (NumEntries
<= 32) {
342 return DAG
.getNode(XCoreISD::BR_JT
, dl
, MVT::Other
, Chain
, TargetJT
, Index
);
344 assert((NumEntries
>> 31) == 0);
345 SDValue ScaledIndex
= DAG
.getNode(ISD::SHL
, dl
, MVT::i32
, Index
,
346 DAG
.getConstant(1, MVT::i32
));
347 return DAG
.getNode(XCoreISD::BR_JT32
, dl
, MVT::Other
, Chain
, TargetJT
,
352 IsWordAlignedBasePlusConstantOffset(SDValue Addr
, SDValue
&AlignedBase
,
355 if (Addr
.getOpcode() != ISD::ADD
) {
358 ConstantSDNode
*CN
= 0;
359 if (!(CN
= dyn_cast
<ConstantSDNode
>(Addr
.getOperand(1)))) {
362 int64_t off
= CN
->getSExtValue();
363 const SDValue
&Base
= Addr
.getOperand(0);
364 const SDValue
*Root
= &Base
;
365 if (Base
.getOpcode() == ISD::ADD
&&
366 Base
.getOperand(1).getOpcode() == ISD::SHL
) {
367 ConstantSDNode
*CN
= dyn_cast
<ConstantSDNode
>(Base
.getOperand(1)
369 if (CN
&& (CN
->getSExtValue() >= 2)) {
370 Root
= &Base
.getOperand(0);
373 if (isa
<FrameIndexSDNode
>(*Root
)) {
374 // All frame indicies are word aligned
379 if (Root
->getOpcode() == XCoreISD::DPRelativeWrapper
||
380 Root
->getOpcode() == XCoreISD::CPRelativeWrapper
) {
381 // All dp / cp relative addresses are word aligned
389 SDValue
XCoreTargetLowering::
390 LowerLOAD(SDValue Op
, SelectionDAG
&DAG
) const {
391 LoadSDNode
*LD
= cast
<LoadSDNode
>(Op
);
392 assert(LD
->getExtensionType() == ISD::NON_EXTLOAD
&&
393 "Unexpected extension type");
394 assert(LD
->getMemoryVT() == MVT::i32
&& "Unexpected load EVT");
395 if (allowsUnalignedMemoryAccesses(LD
->getMemoryVT()))
398 unsigned ABIAlignment
= getTargetData()->
399 getABITypeAlignment(LD
->getMemoryVT().getTypeForEVT(*DAG
.getContext()));
400 // Leave aligned load alone.
401 if (LD
->getAlignment() >= ABIAlignment
)
404 SDValue Chain
= LD
->getChain();
405 SDValue BasePtr
= LD
->getBasePtr();
406 DebugLoc DL
= Op
.getDebugLoc();
410 if (!LD
->isVolatile() &&
411 IsWordAlignedBasePlusConstantOffset(BasePtr
, Base
, Offset
)) {
412 if (Offset
% 4 == 0) {
413 // We've managed to infer better alignment information than the load
414 // already has. Use an aligned load.
416 return DAG
.getLoad(getPointerTy(), DL
, Chain
, BasePtr
,
417 MachinePointerInfo(),
421 // ldw low, base[offset >> 2]
422 // ldw high, base[(offset >> 2) + 1]
423 // shr low_shifted, low, (offset & 0x3) * 8
424 // shl high_shifted, high, 32 - (offset & 0x3) * 8
425 // or result, low_shifted, high_shifted
426 SDValue LowOffset
= DAG
.getConstant(Offset
& ~0x3, MVT::i32
);
427 SDValue HighOffset
= DAG
.getConstant((Offset
& ~0x3) + 4, MVT::i32
);
428 SDValue LowShift
= DAG
.getConstant((Offset
& 0x3) * 8, MVT::i32
);
429 SDValue HighShift
= DAG
.getConstant(32 - (Offset
& 0x3) * 8, MVT::i32
);
431 SDValue LowAddr
= DAG
.getNode(ISD::ADD
, DL
, MVT::i32
, Base
, LowOffset
);
432 SDValue HighAddr
= DAG
.getNode(ISD::ADD
, DL
, MVT::i32
, Base
, HighOffset
);
434 SDValue Low
= DAG
.getLoad(getPointerTy(), DL
, Chain
,
435 LowAddr
, MachinePointerInfo(), false, false, 0);
436 SDValue High
= DAG
.getLoad(getPointerTy(), DL
, Chain
,
437 HighAddr
, MachinePointerInfo(), false, false, 0);
438 SDValue LowShifted
= DAG
.getNode(ISD::SRL
, DL
, MVT::i32
, Low
, LowShift
);
439 SDValue HighShifted
= DAG
.getNode(ISD::SHL
, DL
, MVT::i32
, High
, HighShift
);
440 SDValue Result
= DAG
.getNode(ISD::OR
, DL
, MVT::i32
, LowShifted
, HighShifted
);
441 Chain
= DAG
.getNode(ISD::TokenFactor
, DL
, MVT::Other
, Low
.getValue(1),
443 SDValue Ops
[] = { Result
, Chain
};
444 return DAG
.getMergeValues(Ops
, 2, DL
);
447 if (LD
->getAlignment() == 2) {
448 SDValue Low
= DAG
.getExtLoad(ISD::ZEXTLOAD
, DL
, MVT::i32
, Chain
,
449 BasePtr
, LD
->getPointerInfo(), MVT::i16
,
450 LD
->isVolatile(), LD
->isNonTemporal(), 2);
451 SDValue HighAddr
= DAG
.getNode(ISD::ADD
, DL
, MVT::i32
, BasePtr
,
452 DAG
.getConstant(2, MVT::i32
));
453 SDValue High
= DAG
.getExtLoad(ISD::EXTLOAD
, DL
, MVT::i32
, Chain
,
455 LD
->getPointerInfo().getWithOffset(2),
456 MVT::i16
, LD
->isVolatile(),
457 LD
->isNonTemporal(), 2);
458 SDValue HighShifted
= DAG
.getNode(ISD::SHL
, DL
, MVT::i32
, High
,
459 DAG
.getConstant(16, MVT::i32
));
460 SDValue Result
= DAG
.getNode(ISD::OR
, DL
, MVT::i32
, Low
, HighShifted
);
461 Chain
= DAG
.getNode(ISD::TokenFactor
, DL
, MVT::Other
, Low
.getValue(1),
463 SDValue Ops
[] = { Result
, Chain
};
464 return DAG
.getMergeValues(Ops
, 2, DL
);
467 // Lower to a call to __misaligned_load(BasePtr).
468 const Type
*IntPtrTy
= getTargetData()->getIntPtrType(*DAG
.getContext());
469 TargetLowering::ArgListTy Args
;
470 TargetLowering::ArgListEntry Entry
;
473 Entry
.Node
= BasePtr
;
474 Args
.push_back(Entry
);
476 std::pair
<SDValue
, SDValue
> CallResult
=
477 LowerCallTo(Chain
, IntPtrTy
, false, false,
478 false, false, 0, CallingConv::C
, false,
479 /*isReturnValueUsed=*/true,
480 DAG
.getExternalSymbol("__misaligned_load", getPointerTy()),
484 { CallResult
.first
, CallResult
.second
};
486 return DAG
.getMergeValues(Ops
, 2, DL
);
489 SDValue
XCoreTargetLowering::
490 LowerSTORE(SDValue Op
, SelectionDAG
&DAG
) const
492 StoreSDNode
*ST
= cast
<StoreSDNode
>(Op
);
493 assert(!ST
->isTruncatingStore() && "Unexpected store type");
494 assert(ST
->getMemoryVT() == MVT::i32
&& "Unexpected store EVT");
495 if (allowsUnalignedMemoryAccesses(ST
->getMemoryVT())) {
498 unsigned ABIAlignment
= getTargetData()->
499 getABITypeAlignment(ST
->getMemoryVT().getTypeForEVT(*DAG
.getContext()));
500 // Leave aligned store alone.
501 if (ST
->getAlignment() >= ABIAlignment
) {
504 SDValue Chain
= ST
->getChain();
505 SDValue BasePtr
= ST
->getBasePtr();
506 SDValue Value
= ST
->getValue();
507 DebugLoc dl
= Op
.getDebugLoc();
509 if (ST
->getAlignment() == 2) {
511 SDValue High
= DAG
.getNode(ISD::SRL
, dl
, MVT::i32
, Value
,
512 DAG
.getConstant(16, MVT::i32
));
513 SDValue StoreLow
= DAG
.getTruncStore(Chain
, dl
, Low
, BasePtr
,
514 ST
->getPointerInfo(), MVT::i16
,
515 ST
->isVolatile(), ST
->isNonTemporal(),
517 SDValue HighAddr
= DAG
.getNode(ISD::ADD
, dl
, MVT::i32
, BasePtr
,
518 DAG
.getConstant(2, MVT::i32
));
519 SDValue StoreHigh
= DAG
.getTruncStore(Chain
, dl
, High
, HighAddr
,
520 ST
->getPointerInfo().getWithOffset(2),
521 MVT::i16
, ST
->isVolatile(),
522 ST
->isNonTemporal(), 2);
523 return DAG
.getNode(ISD::TokenFactor
, dl
, MVT::Other
, StoreLow
, StoreHigh
);
526 // Lower to a call to __misaligned_store(BasePtr, Value).
527 const Type
*IntPtrTy
= getTargetData()->getIntPtrType(*DAG
.getContext());
528 TargetLowering::ArgListTy Args
;
529 TargetLowering::ArgListEntry Entry
;
532 Entry
.Node
= BasePtr
;
533 Args
.push_back(Entry
);
536 Args
.push_back(Entry
);
538 std::pair
<SDValue
, SDValue
> CallResult
=
539 LowerCallTo(Chain
, Type::getVoidTy(*DAG
.getContext()), false, false,
540 false, false, 0, CallingConv::C
, false,
541 /*isReturnValueUsed=*/true,
542 DAG
.getExternalSymbol("__misaligned_store", getPointerTy()),
545 return CallResult
.second
;
548 SDValue
XCoreTargetLowering::
549 LowerSMUL_LOHI(SDValue Op
, SelectionDAG
&DAG
) const
551 assert(Op
.getValueType() == MVT::i32
&& Op
.getOpcode() == ISD::SMUL_LOHI
&&
552 "Unexpected operand to lower!");
553 DebugLoc dl
= Op
.getDebugLoc();
554 SDValue LHS
= Op
.getOperand(0);
555 SDValue RHS
= Op
.getOperand(1);
556 SDValue Zero
= DAG
.getConstant(0, MVT::i32
);
557 SDValue Hi
= DAG
.getNode(XCoreISD::MACCS
, dl
,
558 DAG
.getVTList(MVT::i32
, MVT::i32
), Zero
, Zero
,
560 SDValue
Lo(Hi
.getNode(), 1);
561 SDValue Ops
[] = { Lo
, Hi
};
562 return DAG
.getMergeValues(Ops
, 2, dl
);
565 SDValue
XCoreTargetLowering::
566 LowerUMUL_LOHI(SDValue Op
, SelectionDAG
&DAG
) const
568 assert(Op
.getValueType() == MVT::i32
&& Op
.getOpcode() == ISD::UMUL_LOHI
&&
569 "Unexpected operand to lower!");
570 DebugLoc dl
= Op
.getDebugLoc();
571 SDValue LHS
= Op
.getOperand(0);
572 SDValue RHS
= Op
.getOperand(1);
573 SDValue Zero
= DAG
.getConstant(0, MVT::i32
);
574 SDValue Hi
= DAG
.getNode(XCoreISD::LMUL
, dl
,
575 DAG
.getVTList(MVT::i32
, MVT::i32
), LHS
, RHS
,
577 SDValue
Lo(Hi
.getNode(), 1);
578 SDValue Ops
[] = { Lo
, Hi
};
579 return DAG
.getMergeValues(Ops
, 2, dl
);
582 /// isADDADDMUL - Return whether Op is in a form that is equivalent to
583 /// add(add(mul(x,y),a),b). If requireIntermediatesHaveOneUse is true then
584 /// each intermediate result in the calculation must also have a single use.
585 /// If the Op is in the correct form the constituent parts are written to Mul0,
586 /// Mul1, Addend0 and Addend1.
588 isADDADDMUL(SDValue Op
, SDValue
&Mul0
, SDValue
&Mul1
, SDValue
&Addend0
,
589 SDValue
&Addend1
, bool requireIntermediatesHaveOneUse
)
591 if (Op
.getOpcode() != ISD::ADD
)
593 SDValue N0
= Op
.getOperand(0);
594 SDValue N1
= Op
.getOperand(1);
597 if (N0
.getOpcode() == ISD::ADD
) {
600 } else if (N1
.getOpcode() == ISD::ADD
) {
606 if (requireIntermediatesHaveOneUse
&& !AddOp
.hasOneUse())
608 if (OtherOp
.getOpcode() == ISD::MUL
) {
609 // add(add(a,b),mul(x,y))
610 if (requireIntermediatesHaveOneUse
&& !OtherOp
.hasOneUse())
612 Mul0
= OtherOp
.getOperand(0);
613 Mul1
= OtherOp
.getOperand(1);
614 Addend0
= AddOp
.getOperand(0);
615 Addend1
= AddOp
.getOperand(1);
618 if (AddOp
.getOperand(0).getOpcode() == ISD::MUL
) {
619 // add(add(mul(x,y),a),b)
620 if (requireIntermediatesHaveOneUse
&& !AddOp
.getOperand(0).hasOneUse())
622 Mul0
= AddOp
.getOperand(0).getOperand(0);
623 Mul1
= AddOp
.getOperand(0).getOperand(1);
624 Addend0
= AddOp
.getOperand(1);
628 if (AddOp
.getOperand(1).getOpcode() == ISD::MUL
) {
629 // add(add(a,mul(x,y)),b)
630 if (requireIntermediatesHaveOneUse
&& !AddOp
.getOperand(1).hasOneUse())
632 Mul0
= AddOp
.getOperand(1).getOperand(0);
633 Mul1
= AddOp
.getOperand(1).getOperand(1);
634 Addend0
= AddOp
.getOperand(0);
641 SDValue
XCoreTargetLowering::
642 TryExpandADDWithMul(SDNode
*N
, SelectionDAG
&DAG
) const
646 if (N
->getOperand(0).getOpcode() == ISD::MUL
) {
647 Mul
= N
->getOperand(0);
648 Other
= N
->getOperand(1);
649 } else if (N
->getOperand(1).getOpcode() == ISD::MUL
) {
650 Mul
= N
->getOperand(1);
651 Other
= N
->getOperand(0);
655 DebugLoc dl
= N
->getDebugLoc();
656 SDValue LL
, RL
, AddendL
, AddendH
;
657 LL
= DAG
.getNode(ISD::EXTRACT_ELEMENT
, dl
, MVT::i32
,
658 Mul
.getOperand(0), DAG
.getConstant(0, MVT::i32
));
659 RL
= DAG
.getNode(ISD::EXTRACT_ELEMENT
, dl
, MVT::i32
,
660 Mul
.getOperand(1), DAG
.getConstant(0, MVT::i32
));
661 AddendL
= DAG
.getNode(ISD::EXTRACT_ELEMENT
, dl
, MVT::i32
,
662 Other
, DAG
.getConstant(0, MVT::i32
));
663 AddendH
= DAG
.getNode(ISD::EXTRACT_ELEMENT
, dl
, MVT::i32
,
664 Other
, DAG
.getConstant(1, MVT::i32
));
665 APInt HighMask
= APInt::getHighBitsSet(64, 32);
666 unsigned LHSSB
= DAG
.ComputeNumSignBits(Mul
.getOperand(0));
667 unsigned RHSSB
= DAG
.ComputeNumSignBits(Mul
.getOperand(1));
668 if (DAG
.MaskedValueIsZero(Mul
.getOperand(0), HighMask
) &&
669 DAG
.MaskedValueIsZero(Mul
.getOperand(1), HighMask
)) {
670 // The inputs are both zero-extended.
671 SDValue Hi
= DAG
.getNode(XCoreISD::MACCU
, dl
,
672 DAG
.getVTList(MVT::i32
, MVT::i32
), AddendH
,
674 SDValue
Lo(Hi
.getNode(), 1);
675 return DAG
.getNode(ISD::BUILD_PAIR
, dl
, MVT::i64
, Lo
, Hi
);
677 if (LHSSB
> 32 && RHSSB
> 32) {
678 // The inputs are both sign-extended.
679 SDValue Hi
= DAG
.getNode(XCoreISD::MACCS
, dl
,
680 DAG
.getVTList(MVT::i32
, MVT::i32
), AddendH
,
682 SDValue
Lo(Hi
.getNode(), 1);
683 return DAG
.getNode(ISD::BUILD_PAIR
, dl
, MVT::i64
, Lo
, Hi
);
686 LH
= DAG
.getNode(ISD::EXTRACT_ELEMENT
, dl
, MVT::i32
,
687 Mul
.getOperand(0), DAG
.getConstant(1, MVT::i32
));
688 RH
= DAG
.getNode(ISD::EXTRACT_ELEMENT
, dl
, MVT::i32
,
689 Mul
.getOperand(1), DAG
.getConstant(1, MVT::i32
));
690 SDValue Hi
= DAG
.getNode(XCoreISD::MACCU
, dl
,
691 DAG
.getVTList(MVT::i32
, MVT::i32
), AddendH
,
693 SDValue
Lo(Hi
.getNode(), 1);
694 RH
= DAG
.getNode(ISD::MUL
, dl
, MVT::i32
, LL
, RH
);
695 LH
= DAG
.getNode(ISD::MUL
, dl
, MVT::i32
, LH
, RL
);
696 Hi
= DAG
.getNode(ISD::ADD
, dl
, MVT::i32
, Hi
, RH
);
697 Hi
= DAG
.getNode(ISD::ADD
, dl
, MVT::i32
, Hi
, LH
);
698 return DAG
.getNode(ISD::BUILD_PAIR
, dl
, MVT::i64
, Lo
, Hi
);
701 SDValue
XCoreTargetLowering::
702 ExpandADDSUB(SDNode
*N
, SelectionDAG
&DAG
) const
704 assert(N
->getValueType(0) == MVT::i64
&&
705 (N
->getOpcode() == ISD::ADD
|| N
->getOpcode() == ISD::SUB
) &&
706 "Unknown operand to lower!");
708 if (N
->getOpcode() == ISD::ADD
) {
709 SDValue Result
= TryExpandADDWithMul(N
, DAG
);
710 if (Result
.getNode() != 0)
714 DebugLoc dl
= N
->getDebugLoc();
716 // Extract components
717 SDValue LHSL
= DAG
.getNode(ISD::EXTRACT_ELEMENT
, dl
, MVT::i32
,
718 N
->getOperand(0), DAG
.getConstant(0, MVT::i32
));
719 SDValue LHSH
= DAG
.getNode(ISD::EXTRACT_ELEMENT
, dl
, MVT::i32
,
720 N
->getOperand(0), DAG
.getConstant(1, MVT::i32
));
721 SDValue RHSL
= DAG
.getNode(ISD::EXTRACT_ELEMENT
, dl
, MVT::i32
,
722 N
->getOperand(1), DAG
.getConstant(0, MVT::i32
));
723 SDValue RHSH
= DAG
.getNode(ISD::EXTRACT_ELEMENT
, dl
, MVT::i32
,
724 N
->getOperand(1), DAG
.getConstant(1, MVT::i32
));
727 unsigned Opcode
= (N
->getOpcode() == ISD::ADD
) ? XCoreISD::LADD
:
729 SDValue Zero
= DAG
.getConstant(0, MVT::i32
);
730 SDValue Carry
= DAG
.getNode(Opcode
, dl
, DAG
.getVTList(MVT::i32
, MVT::i32
),
732 SDValue
Lo(Carry
.getNode(), 1);
734 SDValue Ignored
= DAG
.getNode(Opcode
, dl
, DAG
.getVTList(MVT::i32
, MVT::i32
),
736 SDValue
Hi(Ignored
.getNode(), 1);
738 return DAG
.getNode(ISD::BUILD_PAIR
, dl
, MVT::i64
, Lo
, Hi
);
741 SDValue
XCoreTargetLowering::
742 LowerVAARG(SDValue Op
, SelectionDAG
&DAG
) const
744 llvm_unreachable("unimplemented");
745 // FIX Arguments passed by reference need a extra dereference.
746 SDNode
*Node
= Op
.getNode();
747 DebugLoc dl
= Node
->getDebugLoc();
748 const Value
*V
= cast
<SrcValueSDNode
>(Node
->getOperand(2))->getValue();
749 EVT VT
= Node
->getValueType(0);
750 SDValue VAList
= DAG
.getLoad(getPointerTy(), dl
, Node
->getOperand(0),
751 Node
->getOperand(1), MachinePointerInfo(V
),
753 // Increment the pointer, VAList, to the next vararg
754 SDValue Tmp3
= DAG
.getNode(ISD::ADD
, dl
, getPointerTy(), VAList
,
755 DAG
.getConstant(VT
.getSizeInBits(),
757 // Store the incremented VAList to the legalized pointer
758 Tmp3
= DAG
.getStore(VAList
.getValue(1), dl
, Tmp3
, Node
->getOperand(1),
759 MachinePointerInfo(V
), false, false, 0);
760 // Load the actual argument out of the pointer VAList
761 return DAG
.getLoad(VT
, dl
, Tmp3
, VAList
, MachinePointerInfo(),
765 SDValue
XCoreTargetLowering::
766 LowerVASTART(SDValue Op
, SelectionDAG
&DAG
) const
768 DebugLoc dl
= Op
.getDebugLoc();
769 // vastart stores the address of the VarArgsFrameIndex slot into the
770 // memory location argument
771 MachineFunction
&MF
= DAG
.getMachineFunction();
772 XCoreFunctionInfo
*XFI
= MF
.getInfo
<XCoreFunctionInfo
>();
773 SDValue Addr
= DAG
.getFrameIndex(XFI
->getVarArgsFrameIndex(), MVT::i32
);
774 return DAG
.getStore(Op
.getOperand(0), dl
, Addr
, Op
.getOperand(1),
775 MachinePointerInfo(), false, false, 0);
778 SDValue
XCoreTargetLowering::LowerFRAMEADDR(SDValue Op
,
779 SelectionDAG
&DAG
) const {
780 DebugLoc dl
= Op
.getDebugLoc();
781 // Depths > 0 not supported yet!
782 if (cast
<ConstantSDNode
>(Op
.getOperand(0))->getZExtValue() > 0)
785 MachineFunction
&MF
= DAG
.getMachineFunction();
786 const TargetRegisterInfo
*RegInfo
= getTargetMachine().getRegisterInfo();
787 return DAG
.getCopyFromReg(DAG
.getEntryNode(), dl
,
788 RegInfo
->getFrameRegister(MF
), MVT::i32
);
791 SDValue
XCoreTargetLowering::
792 LowerTRAMPOLINE(SDValue Op
, SelectionDAG
&DAG
) const {
793 SDValue Chain
= Op
.getOperand(0);
794 SDValue Trmp
= Op
.getOperand(1); // trampoline
795 SDValue FPtr
= Op
.getOperand(2); // nested function
796 SDValue Nest
= Op
.getOperand(3); // 'nest' parameter value
798 const Value
*TrmpAddr
= cast
<SrcValueSDNode
>(Op
.getOperand(4))->getValue();
801 // LDAPF_u10 r11, nest
802 // LDW_2rus r11, r11[0]
803 // STWSP_ru6 r11, sp[0]
804 // LDAPF_u10 r11, fptr
805 // LDW_2rus r11, r11[0]
811 SDValue OutChains
[5];
815 DebugLoc dl
= Op
.getDebugLoc();
816 OutChains
[0] = DAG
.getStore(Chain
, dl
, DAG
.getConstant(0x0a3cd805, MVT::i32
),
817 Addr
, MachinePointerInfo(TrmpAddr
), false, false,
820 Addr
= DAG
.getNode(ISD::ADD
, dl
, MVT::i32
, Trmp
,
821 DAG
.getConstant(4, MVT::i32
));
822 OutChains
[1] = DAG
.getStore(Chain
, dl
, DAG
.getConstant(0xd80456c0, MVT::i32
),
823 Addr
, MachinePointerInfo(TrmpAddr
, 4), false,
826 Addr
= DAG
.getNode(ISD::ADD
, dl
, MVT::i32
, Trmp
,
827 DAG
.getConstant(8, MVT::i32
));
828 OutChains
[2] = DAG
.getStore(Chain
, dl
, DAG
.getConstant(0x27fb0a3c, MVT::i32
),
829 Addr
, MachinePointerInfo(TrmpAddr
, 8), false,
832 Addr
= DAG
.getNode(ISD::ADD
, dl
, MVT::i32
, Trmp
,
833 DAG
.getConstant(12, MVT::i32
));
834 OutChains
[3] = DAG
.getStore(Chain
, dl
, Nest
, Addr
,
835 MachinePointerInfo(TrmpAddr
, 12), false, false,
838 Addr
= DAG
.getNode(ISD::ADD
, dl
, MVT::i32
, Trmp
,
839 DAG
.getConstant(16, MVT::i32
));
840 OutChains
[4] = DAG
.getStore(Chain
, dl
, FPtr
, Addr
,
841 MachinePointerInfo(TrmpAddr
, 16), false, false,
845 { Trmp
, DAG
.getNode(ISD::TokenFactor
, dl
, MVT::Other
, OutChains
, 5) };
846 return DAG
.getMergeValues(Ops
, 2, dl
);
849 //===----------------------------------------------------------------------===//
850 // Calling Convention Implementation
851 //===----------------------------------------------------------------------===//
853 #include "XCoreGenCallingConv.inc"
855 //===----------------------------------------------------------------------===//
856 // Call Calling Convention Implementation
857 //===----------------------------------------------------------------------===//
859 /// XCore call implementation
861 XCoreTargetLowering::LowerCall(SDValue Chain
, SDValue Callee
,
862 CallingConv::ID CallConv
, bool isVarArg
,
864 const SmallVectorImpl
<ISD::OutputArg
> &Outs
,
865 const SmallVectorImpl
<SDValue
> &OutVals
,
866 const SmallVectorImpl
<ISD::InputArg
> &Ins
,
867 DebugLoc dl
, SelectionDAG
&DAG
,
868 SmallVectorImpl
<SDValue
> &InVals
) const {
869 // XCore target does not yet support tail call optimization.
872 // For now, only CallingConv::C implemented
876 llvm_unreachable("Unsupported calling convention");
877 case CallingConv::Fast
:
879 return LowerCCCCallTo(Chain
, Callee
, CallConv
, isVarArg
, isTailCall
,
880 Outs
, OutVals
, Ins
, dl
, DAG
, InVals
);
884 /// LowerCCCCallTo - functions arguments are copied from virtual
885 /// regs to (physical regs)/(stack frame), CALLSEQ_START and
886 /// CALLSEQ_END are emitted.
887 /// TODO: isTailCall, sret.
889 XCoreTargetLowering::LowerCCCCallTo(SDValue Chain
, SDValue Callee
,
890 CallingConv::ID CallConv
, bool isVarArg
,
892 const SmallVectorImpl
<ISD::OutputArg
> &Outs
,
893 const SmallVectorImpl
<SDValue
> &OutVals
,
894 const SmallVectorImpl
<ISD::InputArg
> &Ins
,
895 DebugLoc dl
, SelectionDAG
&DAG
,
896 SmallVectorImpl
<SDValue
> &InVals
) const {
898 // Analyze operands of the call, assigning locations to each operand.
899 SmallVector
<CCValAssign
, 16> ArgLocs
;
900 CCState
CCInfo(CallConv
, isVarArg
, DAG
.getMachineFunction(),
901 getTargetMachine(), ArgLocs
, *DAG
.getContext());
903 // The ABI dictates there should be one stack slot available to the callee
904 // on function entry (for saving lr).
905 CCInfo
.AllocateStack(4, 4);
907 CCInfo
.AnalyzeCallOperands(Outs
, CC_XCore
);
909 // Get a count of how many bytes are to be pushed on the stack.
910 unsigned NumBytes
= CCInfo
.getNextStackOffset();
912 Chain
= DAG
.getCALLSEQ_START(Chain
,DAG
.getConstant(NumBytes
,
913 getPointerTy(), true));
915 SmallVector
<std::pair
<unsigned, SDValue
>, 4> RegsToPass
;
916 SmallVector
<SDValue
, 12> MemOpChains
;
918 // Walk the register/memloc assignments, inserting copies/loads.
919 for (unsigned i
= 0, e
= ArgLocs
.size(); i
!= e
; ++i
) {
920 CCValAssign
&VA
= ArgLocs
[i
];
921 SDValue Arg
= OutVals
[i
];
923 // Promote the value if needed.
924 switch (VA
.getLocInfo()) {
925 default: llvm_unreachable("Unknown loc info!");
926 case CCValAssign::Full
: break;
927 case CCValAssign::SExt
:
928 Arg
= DAG
.getNode(ISD::SIGN_EXTEND
, dl
, VA
.getLocVT(), Arg
);
930 case CCValAssign::ZExt
:
931 Arg
= DAG
.getNode(ISD::ZERO_EXTEND
, dl
, VA
.getLocVT(), Arg
);
933 case CCValAssign::AExt
:
934 Arg
= DAG
.getNode(ISD::ANY_EXTEND
, dl
, VA
.getLocVT(), Arg
);
938 // Arguments that can be passed on register must be kept at
941 RegsToPass
.push_back(std::make_pair(VA
.getLocReg(), Arg
));
943 assert(VA
.isMemLoc());
945 int Offset
= VA
.getLocMemOffset();
947 MemOpChains
.push_back(DAG
.getNode(XCoreISD::STWSP
, dl
, MVT::Other
,
949 DAG
.getConstant(Offset
/4, MVT::i32
)));
953 // Transform all store nodes into one single node because
954 // all store nodes are independent of each other.
955 if (!MemOpChains
.empty())
956 Chain
= DAG
.getNode(ISD::TokenFactor
, dl
, MVT::Other
,
957 &MemOpChains
[0], MemOpChains
.size());
959 // Build a sequence of copy-to-reg nodes chained together with token
960 // chain and flag operands which copy the outgoing args into registers.
961 // The InFlag in necessary since all emitted instructions must be
964 for (unsigned i
= 0, e
= RegsToPass
.size(); i
!= e
; ++i
) {
965 Chain
= DAG
.getCopyToReg(Chain
, dl
, RegsToPass
[i
].first
,
966 RegsToPass
[i
].second
, InFlag
);
967 InFlag
= Chain
.getValue(1);
970 // If the callee is a GlobalAddress node (quite common, every direct call is)
971 // turn it into a TargetGlobalAddress node so that legalize doesn't hack it.
972 // Likewise ExternalSymbol -> TargetExternalSymbol.
973 if (GlobalAddressSDNode
*G
= dyn_cast
<GlobalAddressSDNode
>(Callee
))
974 Callee
= DAG
.getTargetGlobalAddress(G
->getGlobal(), dl
, MVT::i32
);
975 else if (ExternalSymbolSDNode
*E
= dyn_cast
<ExternalSymbolSDNode
>(Callee
))
976 Callee
= DAG
.getTargetExternalSymbol(E
->getSymbol(), MVT::i32
);
978 // XCoreBranchLink = #chain, #target_address, #opt_in_flags...
979 // = Chain, Callee, Reg#1, Reg#2, ...
981 // Returns a chain & a flag for retval copy to use.
982 SDVTList NodeTys
= DAG
.getVTList(MVT::Other
, MVT::Glue
);
983 SmallVector
<SDValue
, 8> Ops
;
984 Ops
.push_back(Chain
);
985 Ops
.push_back(Callee
);
987 // Add argument registers to the end of the list so that they are
988 // known live into the call.
989 for (unsigned i
= 0, e
= RegsToPass
.size(); i
!= e
; ++i
)
990 Ops
.push_back(DAG
.getRegister(RegsToPass
[i
].first
,
991 RegsToPass
[i
].second
.getValueType()));
993 if (InFlag
.getNode())
994 Ops
.push_back(InFlag
);
996 Chain
= DAG
.getNode(XCoreISD::BL
, dl
, NodeTys
, &Ops
[0], Ops
.size());
997 InFlag
= Chain
.getValue(1);
999 // Create the CALLSEQ_END node.
1000 Chain
= DAG
.getCALLSEQ_END(Chain
,
1001 DAG
.getConstant(NumBytes
, getPointerTy(), true),
1002 DAG
.getConstant(0, getPointerTy(), true),
1004 InFlag
= Chain
.getValue(1);
1006 // Handle result values, copying them out of physregs into vregs that we
1008 return LowerCallResult(Chain
, InFlag
, CallConv
, isVarArg
,
1009 Ins
, dl
, DAG
, InVals
);
1012 /// LowerCallResult - Lower the result values of a call into the
1013 /// appropriate copies out of appropriate physical registers.
1015 XCoreTargetLowering::LowerCallResult(SDValue Chain
, SDValue InFlag
,
1016 CallingConv::ID CallConv
, bool isVarArg
,
1017 const SmallVectorImpl
<ISD::InputArg
> &Ins
,
1018 DebugLoc dl
, SelectionDAG
&DAG
,
1019 SmallVectorImpl
<SDValue
> &InVals
) const {
1021 // Assign locations to each value returned by this call.
1022 SmallVector
<CCValAssign
, 16> RVLocs
;
1023 CCState
CCInfo(CallConv
, isVarArg
, DAG
.getMachineFunction(),
1024 getTargetMachine(), RVLocs
, *DAG
.getContext());
1026 CCInfo
.AnalyzeCallResult(Ins
, RetCC_XCore
);
1028 // Copy all of the result registers out of their specified physreg.
1029 for (unsigned i
= 0; i
!= RVLocs
.size(); ++i
) {
1030 Chain
= DAG
.getCopyFromReg(Chain
, dl
, RVLocs
[i
].getLocReg(),
1031 RVLocs
[i
].getValVT(), InFlag
).getValue(1);
1032 InFlag
= Chain
.getValue(2);
1033 InVals
.push_back(Chain
.getValue(0));
1039 //===----------------------------------------------------------------------===//
1040 // Formal Arguments Calling Convention Implementation
1041 //===----------------------------------------------------------------------===//
1043 /// XCore formal arguments implementation
1045 XCoreTargetLowering::LowerFormalArguments(SDValue Chain
,
1046 CallingConv::ID CallConv
,
1048 const SmallVectorImpl
<ISD::InputArg
> &Ins
,
1051 SmallVectorImpl
<SDValue
> &InVals
)
1056 llvm_unreachable("Unsupported calling convention");
1057 case CallingConv::C
:
1058 case CallingConv::Fast
:
1059 return LowerCCCArguments(Chain
, CallConv
, isVarArg
,
1060 Ins
, dl
, DAG
, InVals
);
1064 /// LowerCCCArguments - transform physical registers into
1065 /// virtual registers and generate load operations for
1066 /// arguments places on the stack.
1069 XCoreTargetLowering::LowerCCCArguments(SDValue Chain
,
1070 CallingConv::ID CallConv
,
1072 const SmallVectorImpl
<ISD::InputArg
>
1076 SmallVectorImpl
<SDValue
> &InVals
) const {
1077 MachineFunction
&MF
= DAG
.getMachineFunction();
1078 MachineFrameInfo
*MFI
= MF
.getFrameInfo();
1079 MachineRegisterInfo
&RegInfo
= MF
.getRegInfo();
1081 // Assign locations to all of the incoming arguments.
1082 SmallVector
<CCValAssign
, 16> ArgLocs
;
1083 CCState
CCInfo(CallConv
, isVarArg
, DAG
.getMachineFunction(),
1084 getTargetMachine(), ArgLocs
, *DAG
.getContext());
1086 CCInfo
.AnalyzeFormalArguments(Ins
, CC_XCore
);
1088 unsigned StackSlotSize
= XCoreFrameLowering::stackSlotSize();
1090 unsigned LRSaveSize
= StackSlotSize
;
1092 for (unsigned i
= 0, e
= ArgLocs
.size(); i
!= e
; ++i
) {
1094 CCValAssign
&VA
= ArgLocs
[i
];
1096 if (VA
.isRegLoc()) {
1097 // Arguments passed in registers
1098 EVT RegVT
= VA
.getLocVT();
1099 switch (RegVT
.getSimpleVT().SimpleTy
) {
1103 errs() << "LowerFormalArguments Unhandled argument type: "
1104 << RegVT
.getSimpleVT().SimpleTy
<< "\n";
1106 llvm_unreachable(0);
1109 unsigned VReg
= RegInfo
.createVirtualRegister(
1110 XCore::GRRegsRegisterClass
);
1111 RegInfo
.addLiveIn(VA
.getLocReg(), VReg
);
1112 InVals
.push_back(DAG
.getCopyFromReg(Chain
, dl
, VReg
, RegVT
));
1116 assert(VA
.isMemLoc());
1117 // Load the argument to a virtual register
1118 unsigned ObjSize
= VA
.getLocVT().getSizeInBits()/8;
1119 if (ObjSize
> StackSlotSize
) {
1120 errs() << "LowerFormalArguments Unhandled argument type: "
1121 << EVT(VA
.getLocVT()).getEVTString()
1124 // Create the frame index object for this incoming parameter...
1125 int FI
= MFI
->CreateFixedObject(ObjSize
,
1126 LRSaveSize
+ VA
.getLocMemOffset(),
1129 // Create the SelectionDAG nodes corresponding to a load
1130 //from this parameter
1131 SDValue FIN
= DAG
.getFrameIndex(FI
, MVT::i32
);
1132 InVals
.push_back(DAG
.getLoad(VA
.getLocVT(), dl
, Chain
, FIN
,
1133 MachinePointerInfo::getFixedStack(FI
),
1139 /* Argument registers */
1140 static const unsigned ArgRegs
[] = {
1141 XCore::R0
, XCore::R1
, XCore::R2
, XCore::R3
1143 XCoreFunctionInfo
*XFI
= MF
.getInfo
<XCoreFunctionInfo
>();
1144 unsigned FirstVAReg
= CCInfo
.getFirstUnallocated(ArgRegs
,
1145 array_lengthof(ArgRegs
));
1146 if (FirstVAReg
< array_lengthof(ArgRegs
)) {
1147 SmallVector
<SDValue
, 4> MemOps
;
1149 // Save remaining registers, storing higher register numbers at a higher
1151 for (unsigned i
= array_lengthof(ArgRegs
) - 1; i
>= FirstVAReg
; --i
) {
1152 // Create a stack slot
1153 int FI
= MFI
->CreateFixedObject(4, offset
, true);
1154 if (i
== FirstVAReg
) {
1155 XFI
->setVarArgsFrameIndex(FI
);
1157 offset
-= StackSlotSize
;
1158 SDValue FIN
= DAG
.getFrameIndex(FI
, MVT::i32
);
1159 // Move argument from phys reg -> virt reg
1160 unsigned VReg
= RegInfo
.createVirtualRegister(
1161 XCore::GRRegsRegisterClass
);
1162 RegInfo
.addLiveIn(ArgRegs
[i
], VReg
);
1163 SDValue Val
= DAG
.getCopyFromReg(Chain
, dl
, VReg
, MVT::i32
);
1164 // Move argument from virt reg -> stack
1165 SDValue Store
= DAG
.getStore(Val
.getValue(1), dl
, Val
, FIN
,
1166 MachinePointerInfo(), false, false, 0);
1167 MemOps
.push_back(Store
);
1169 if (!MemOps
.empty())
1170 Chain
= DAG
.getNode(ISD::TokenFactor
, dl
, MVT::Other
,
1171 &MemOps
[0], MemOps
.size());
1173 // This will point to the next argument passed via stack.
1174 XFI
->setVarArgsFrameIndex(
1175 MFI
->CreateFixedObject(4, LRSaveSize
+ CCInfo
.getNextStackOffset(),
1183 //===----------------------------------------------------------------------===//
1184 // Return Value Calling Convention Implementation
1185 //===----------------------------------------------------------------------===//
1187 bool XCoreTargetLowering::
1188 CanLowerReturn(CallingConv::ID CallConv
, MachineFunction
&MF
,
1190 const SmallVectorImpl
<ISD::OutputArg
> &Outs
,
1191 LLVMContext
&Context
) const {
1192 SmallVector
<CCValAssign
, 16> RVLocs
;
1193 CCState
CCInfo(CallConv
, isVarArg
, MF
, getTargetMachine(), RVLocs
, Context
);
1194 return CCInfo
.CheckReturn(Outs
, RetCC_XCore
);
1198 XCoreTargetLowering::LowerReturn(SDValue Chain
,
1199 CallingConv::ID CallConv
, bool isVarArg
,
1200 const SmallVectorImpl
<ISD::OutputArg
> &Outs
,
1201 const SmallVectorImpl
<SDValue
> &OutVals
,
1202 DebugLoc dl
, SelectionDAG
&DAG
) const {
1204 // CCValAssign - represent the assignment of
1205 // the return value to a location
1206 SmallVector
<CCValAssign
, 16> RVLocs
;
1208 // CCState - Info about the registers and stack slot.
1209 CCState
CCInfo(CallConv
, isVarArg
, DAG
.getMachineFunction(),
1210 getTargetMachine(), RVLocs
, *DAG
.getContext());
1212 // Analyze return values.
1213 CCInfo
.AnalyzeReturn(Outs
, RetCC_XCore
);
1215 // If this is the first return lowered for this function, add
1216 // the regs to the liveout set for the function.
1217 if (DAG
.getMachineFunction().getRegInfo().liveout_empty()) {
1218 for (unsigned i
= 0; i
!= RVLocs
.size(); ++i
)
1219 if (RVLocs
[i
].isRegLoc())
1220 DAG
.getMachineFunction().getRegInfo().addLiveOut(RVLocs
[i
].getLocReg());
1225 // Copy the result values into the output registers.
1226 for (unsigned i
= 0; i
!= RVLocs
.size(); ++i
) {
1227 CCValAssign
&VA
= RVLocs
[i
];
1228 assert(VA
.isRegLoc() && "Can only return in registers!");
1230 Chain
= DAG
.getCopyToReg(Chain
, dl
, VA
.getLocReg(),
1233 // guarantee that all emitted copies are
1234 // stuck together, avoiding something bad
1235 Flag
= Chain
.getValue(1);
1238 // Return on XCore is always a "retsp 0"
1240 return DAG
.getNode(XCoreISD::RETSP
, dl
, MVT::Other
,
1241 Chain
, DAG
.getConstant(0, MVT::i32
), Flag
);
1243 return DAG
.getNode(XCoreISD::RETSP
, dl
, MVT::Other
,
1244 Chain
, DAG
.getConstant(0, MVT::i32
));
1247 //===----------------------------------------------------------------------===//
1248 // Other Lowering Code
1249 //===----------------------------------------------------------------------===//
1252 XCoreTargetLowering::EmitInstrWithCustomInserter(MachineInstr
*MI
,
1253 MachineBasicBlock
*BB
) const {
1254 const TargetInstrInfo
&TII
= *getTargetMachine().getInstrInfo();
1255 DebugLoc dl
= MI
->getDebugLoc();
1256 assert((MI
->getOpcode() == XCore::SELECT_CC
) &&
1257 "Unexpected instr type to insert");
1259 // To "insert" a SELECT_CC instruction, we actually have to insert the diamond
1260 // control-flow pattern. The incoming instruction knows the destination vreg
1261 // to set, the condition code register to branch on, the true/false values to
1262 // select between, and a branch opcode to use.
1263 const BasicBlock
*LLVM_BB
= BB
->getBasicBlock();
1264 MachineFunction::iterator It
= BB
;
1270 // cmpTY ccX, r1, r2
1272 // fallthrough --> copy0MBB
1273 MachineBasicBlock
*thisMBB
= BB
;
1274 MachineFunction
*F
= BB
->getParent();
1275 MachineBasicBlock
*copy0MBB
= F
->CreateMachineBasicBlock(LLVM_BB
);
1276 MachineBasicBlock
*sinkMBB
= F
->CreateMachineBasicBlock(LLVM_BB
);
1277 F
->insert(It
, copy0MBB
);
1278 F
->insert(It
, sinkMBB
);
1280 // Transfer the remainder of BB and its successor edges to sinkMBB.
1281 sinkMBB
->splice(sinkMBB
->begin(), BB
,
1282 llvm::next(MachineBasicBlock::iterator(MI
)),
1284 sinkMBB
->transferSuccessorsAndUpdatePHIs(BB
);
1286 // Next, add the true and fallthrough blocks as its successors.
1287 BB
->addSuccessor(copy0MBB
);
1288 BB
->addSuccessor(sinkMBB
);
1290 BuildMI(BB
, dl
, TII
.get(XCore::BRFT_lru6
))
1291 .addReg(MI
->getOperand(1).getReg()).addMBB(sinkMBB
);
1294 // %FalseValue = ...
1295 // # fallthrough to sinkMBB
1298 // Update machine-CFG edges
1299 BB
->addSuccessor(sinkMBB
);
1302 // %Result = phi [ %FalseValue, copy0MBB ], [ %TrueValue, thisMBB ]
1305 BuildMI(*BB
, BB
->begin(), dl
,
1306 TII
.get(XCore::PHI
), MI
->getOperand(0).getReg())
1307 .addReg(MI
->getOperand(3).getReg()).addMBB(copy0MBB
)
1308 .addReg(MI
->getOperand(2).getReg()).addMBB(thisMBB
);
1310 MI
->eraseFromParent(); // The pseudo instruction is gone now.
1314 //===----------------------------------------------------------------------===//
1315 // Target Optimization Hooks
1316 //===----------------------------------------------------------------------===//
1318 SDValue
XCoreTargetLowering::PerformDAGCombine(SDNode
*N
,
1319 DAGCombinerInfo
&DCI
) const {
1320 SelectionDAG
&DAG
= DCI
.DAG
;
1321 DebugLoc dl
= N
->getDebugLoc();
1322 switch (N
->getOpcode()) {
1324 case XCoreISD::LADD
: {
1325 SDValue N0
= N
->getOperand(0);
1326 SDValue N1
= N
->getOperand(1);
1327 SDValue N2
= N
->getOperand(2);
1328 ConstantSDNode
*N0C
= dyn_cast
<ConstantSDNode
>(N0
);
1329 ConstantSDNode
*N1C
= dyn_cast
<ConstantSDNode
>(N1
);
1330 EVT VT
= N0
.getValueType();
1332 // canonicalize constant to RHS
1334 return DAG
.getNode(XCoreISD::LADD
, dl
, DAG
.getVTList(VT
, VT
), N1
, N0
, N2
);
1336 // fold (ladd 0, 0, x) -> 0, x & 1
1337 if (N0C
&& N0C
->isNullValue() && N1C
&& N1C
->isNullValue()) {
1338 SDValue Carry
= DAG
.getConstant(0, VT
);
1339 SDValue Result
= DAG
.getNode(ISD::AND
, dl
, VT
, N2
,
1340 DAG
.getConstant(1, VT
));
1341 SDValue Ops
[] = { Carry
, Result
};
1342 return DAG
.getMergeValues(Ops
, 2, dl
);
1345 // fold (ladd x, 0, y) -> 0, add x, y iff carry is unused and y has only the
1347 if (N1C
&& N1C
->isNullValue() && N
->hasNUsesOfValue(0, 0)) {
1348 APInt KnownZero
, KnownOne
;
1349 APInt Mask
= APInt::getHighBitsSet(VT
.getSizeInBits(),
1350 VT
.getSizeInBits() - 1);
1351 DAG
.ComputeMaskedBits(N2
, Mask
, KnownZero
, KnownOne
);
1352 if (KnownZero
== Mask
) {
1353 SDValue Carry
= DAG
.getConstant(0, VT
);
1354 SDValue Result
= DAG
.getNode(ISD::ADD
, dl
, VT
, N0
, N2
);
1355 SDValue Ops
[] = { Carry
, Result
};
1356 return DAG
.getMergeValues(Ops
, 2, dl
);
1361 case XCoreISD::LSUB
: {
1362 SDValue N0
= N
->getOperand(0);
1363 SDValue N1
= N
->getOperand(1);
1364 SDValue N2
= N
->getOperand(2);
1365 ConstantSDNode
*N0C
= dyn_cast
<ConstantSDNode
>(N0
);
1366 ConstantSDNode
*N1C
= dyn_cast
<ConstantSDNode
>(N1
);
1367 EVT VT
= N0
.getValueType();
1369 // fold (lsub 0, 0, x) -> x, -x iff x has only the low bit set
1370 if (N0C
&& N0C
->isNullValue() && N1C
&& N1C
->isNullValue()) {
1371 APInt KnownZero
, KnownOne
;
1372 APInt Mask
= APInt::getHighBitsSet(VT
.getSizeInBits(),
1373 VT
.getSizeInBits() - 1);
1374 DAG
.ComputeMaskedBits(N2
, Mask
, KnownZero
, KnownOne
);
1375 if (KnownZero
== Mask
) {
1376 SDValue Borrow
= N2
;
1377 SDValue Result
= DAG
.getNode(ISD::SUB
, dl
, VT
,
1378 DAG
.getConstant(0, VT
), N2
);
1379 SDValue Ops
[] = { Borrow
, Result
};
1380 return DAG
.getMergeValues(Ops
, 2, dl
);
1384 // fold (lsub x, 0, y) -> 0, sub x, y iff borrow is unused and y has only the
1386 if (N1C
&& N1C
->isNullValue() && N
->hasNUsesOfValue(0, 0)) {
1387 APInt KnownZero
, KnownOne
;
1388 APInt Mask
= APInt::getHighBitsSet(VT
.getSizeInBits(),
1389 VT
.getSizeInBits() - 1);
1390 DAG
.ComputeMaskedBits(N2
, Mask
, KnownZero
, KnownOne
);
1391 if (KnownZero
== Mask
) {
1392 SDValue Borrow
= DAG
.getConstant(0, VT
);
1393 SDValue Result
= DAG
.getNode(ISD::SUB
, dl
, VT
, N0
, N2
);
1394 SDValue Ops
[] = { Borrow
, Result
};
1395 return DAG
.getMergeValues(Ops
, 2, dl
);
1400 case XCoreISD::LMUL
: {
1401 SDValue N0
= N
->getOperand(0);
1402 SDValue N1
= N
->getOperand(1);
1403 SDValue N2
= N
->getOperand(2);
1404 SDValue N3
= N
->getOperand(3);
1405 ConstantSDNode
*N0C
= dyn_cast
<ConstantSDNode
>(N0
);
1406 ConstantSDNode
*N1C
= dyn_cast
<ConstantSDNode
>(N1
);
1407 EVT VT
= N0
.getValueType();
1408 // Canonicalize multiplicative constant to RHS. If both multiplicative
1409 // operands are constant canonicalize smallest to RHS.
1410 if ((N0C
&& !N1C
) ||
1411 (N0C
&& N1C
&& N0C
->getZExtValue() < N1C
->getZExtValue()))
1412 return DAG
.getNode(XCoreISD::LMUL
, dl
, DAG
.getVTList(VT
, VT
), N1
, N0
, N2
, N3
);
1415 if (N1C
&& N1C
->isNullValue()) {
1416 // If the high result is unused fold to add(a, b)
1417 if (N
->hasNUsesOfValue(0, 0)) {
1418 SDValue Lo
= DAG
.getNode(ISD::ADD
, dl
, VT
, N2
, N3
);
1419 SDValue Ops
[] = { Lo
, Lo
};
1420 return DAG
.getMergeValues(Ops
, 2, dl
);
1422 // Otherwise fold to ladd(a, b, 0)
1423 return DAG
.getNode(XCoreISD::LADD
, dl
, DAG
.getVTList(VT
, VT
), N2
, N3
, N1
);
1428 // Fold 32 bit expressions such as add(add(mul(x,y),a),b) ->
1429 // lmul(x, y, a, b). The high result of lmul will be ignored.
1430 // This is only profitable if the intermediate results are unused
1432 SDValue Mul0
, Mul1
, Addend0
, Addend1
;
1433 if (N
->getValueType(0) == MVT::i32
&&
1434 isADDADDMUL(SDValue(N
, 0), Mul0
, Mul1
, Addend0
, Addend1
, true)) {
1435 SDValue Ignored
= DAG
.getNode(XCoreISD::LMUL
, dl
,
1436 DAG
.getVTList(MVT::i32
, MVT::i32
), Mul0
,
1437 Mul1
, Addend0
, Addend1
);
1438 SDValue
Result(Ignored
.getNode(), 1);
1441 APInt HighMask
= APInt::getHighBitsSet(64, 32);
1442 // Fold 64 bit expression such as add(add(mul(x,y),a),b) ->
1443 // lmul(x, y, a, b) if all operands are zero-extended. We do this
1444 // before type legalization as it is messy to match the operands after
1446 if (N
->getValueType(0) == MVT::i64
&&
1447 isADDADDMUL(SDValue(N
, 0), Mul0
, Mul1
, Addend0
, Addend1
, false) &&
1448 DAG
.MaskedValueIsZero(Mul0
, HighMask
) &&
1449 DAG
.MaskedValueIsZero(Mul1
, HighMask
) &&
1450 DAG
.MaskedValueIsZero(Addend0
, HighMask
) &&
1451 DAG
.MaskedValueIsZero(Addend1
, HighMask
)) {
1452 SDValue Mul0L
= DAG
.getNode(ISD::EXTRACT_ELEMENT
, dl
, MVT::i32
,
1453 Mul0
, DAG
.getConstant(0, MVT::i32
));
1454 SDValue Mul1L
= DAG
.getNode(ISD::EXTRACT_ELEMENT
, dl
, MVT::i32
,
1455 Mul1
, DAG
.getConstant(0, MVT::i32
));
1456 SDValue Addend0L
= DAG
.getNode(ISD::EXTRACT_ELEMENT
, dl
, MVT::i32
,
1457 Addend0
, DAG
.getConstant(0, MVT::i32
));
1458 SDValue Addend1L
= DAG
.getNode(ISD::EXTRACT_ELEMENT
, dl
, MVT::i32
,
1459 Addend1
, DAG
.getConstant(0, MVT::i32
));
1460 SDValue Hi
= DAG
.getNode(XCoreISD::LMUL
, dl
,
1461 DAG
.getVTList(MVT::i32
, MVT::i32
), Mul0L
, Mul1L
,
1462 Addend0L
, Addend1L
);
1463 SDValue
Lo(Hi
.getNode(), 1);
1464 return DAG
.getNode(ISD::BUILD_PAIR
, dl
, MVT::i64
, Lo
, Hi
);
1469 // Replace unaligned store of unaligned load with memmove.
1470 StoreSDNode
*ST
= cast
<StoreSDNode
>(N
);
1471 if (!DCI
.isBeforeLegalize() ||
1472 allowsUnalignedMemoryAccesses(ST
->getMemoryVT()) ||
1473 ST
->isVolatile() || ST
->isIndexed()) {
1476 SDValue Chain
= ST
->getChain();
1478 unsigned StoreBits
= ST
->getMemoryVT().getStoreSizeInBits();
1479 if (StoreBits
% 8) {
1482 unsigned ABIAlignment
= getTargetData()->getABITypeAlignment(
1483 ST
->getMemoryVT().getTypeForEVT(*DCI
.DAG
.getContext()));
1484 unsigned Alignment
= ST
->getAlignment();
1485 if (Alignment
>= ABIAlignment
) {
1489 if (LoadSDNode
*LD
= dyn_cast
<LoadSDNode
>(ST
->getValue())) {
1490 if (LD
->hasNUsesOfValue(1, 0) && ST
->getMemoryVT() == LD
->getMemoryVT() &&
1491 LD
->getAlignment() == Alignment
&&
1492 !LD
->isVolatile() && !LD
->isIndexed() &&
1493 Chain
.reachesChainWithoutSideEffects(SDValue(LD
, 1))) {
1494 return DAG
.getMemmove(Chain
, dl
, ST
->getBasePtr(),
1496 DAG
.getConstant(StoreBits
/8, MVT::i32
),
1497 Alignment
, false, ST
->getPointerInfo(),
1498 LD
->getPointerInfo());
1507 void XCoreTargetLowering::computeMaskedBitsForTargetNode(const SDValue Op
,
1511 const SelectionDAG
&DAG
,
1512 unsigned Depth
) const {
1513 KnownZero
= KnownOne
= APInt(Mask
.getBitWidth(), 0);
1514 switch (Op
.getOpcode()) {
1516 case XCoreISD::LADD
:
1517 case XCoreISD::LSUB
:
1518 if (Op
.getResNo() == 0) {
1519 // Top bits of carry / borrow are clear.
1520 KnownZero
= APInt::getHighBitsSet(Mask
.getBitWidth(),
1521 Mask
.getBitWidth() - 1);
1528 //===----------------------------------------------------------------------===//
1529 // Addressing mode description hooks
1530 //===----------------------------------------------------------------------===//
1532 static inline bool isImmUs(int64_t val
)
1534 return (val
>= 0 && val
<= 11);
1537 static inline bool isImmUs2(int64_t val
)
1539 return (val
%2 == 0 && isImmUs(val
/2));
1542 static inline bool isImmUs4(int64_t val
)
1544 return (val
%4 == 0 && isImmUs(val
/4));
1547 /// isLegalAddressingMode - Return true if the addressing mode represented
1548 /// by AM is legal for this target, for a load/store of the specified type.
1550 XCoreTargetLowering::isLegalAddressingMode(const AddrMode
&AM
,
1551 const Type
*Ty
) const {
1552 if (Ty
->getTypeID() == Type::VoidTyID
)
1553 return AM
.Scale
== 0 && isImmUs(AM
.BaseOffs
) && isImmUs4(AM
.BaseOffs
);
1555 const TargetData
*TD
= TM
.getTargetData();
1556 unsigned Size
= TD
->getTypeAllocSize(Ty
);
1558 return Size
>= 4 && !AM
.HasBaseReg
&& AM
.Scale
== 0 &&
1565 if (AM
.Scale
== 0) {
1566 return isImmUs(AM
.BaseOffs
);
1569 return AM
.Scale
== 1 && AM
.BaseOffs
== 0;
1573 if (AM
.Scale
== 0) {
1574 return isImmUs2(AM
.BaseOffs
);
1577 return AM
.Scale
== 2 && AM
.BaseOffs
== 0;
1580 if (AM
.Scale
== 0) {
1581 return isImmUs4(AM
.BaseOffs
);
1584 return AM
.Scale
== 4 && AM
.BaseOffs
== 0;
1590 //===----------------------------------------------------------------------===//
1591 // XCore Inline Assembly Support
1592 //===----------------------------------------------------------------------===//
1594 std::pair
<unsigned, const TargetRegisterClass
*>
1595 XCoreTargetLowering::
1596 getRegForInlineAsmConstraint(const std::string
&Constraint
,
1598 if (Constraint
.size() == 1) {
1599 switch (Constraint
[0]) {
1602 return std::make_pair(0U, XCore::GRRegsRegisterClass
);
1605 // Use the default implementation in TargetLowering to convert the register
1606 // constraint into a member of a register class.
1607 return TargetLowering::getRegForInlineAsmConstraint(Constraint
, VT
);