1 //===- BlackfinISelLowering.cpp - Blackfin DAG Lowering Implementation ----===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This file implements the interfaces that Blackfin uses to lower LLVM code
11 // into a selection DAG.
13 //===----------------------------------------------------------------------===//
15 #include "BlackfinISelLowering.h"
16 #include "BlackfinTargetMachine.h"
17 #include "llvm/Function.h"
18 #include "llvm/CodeGen/CallingConvLower.h"
19 #include "llvm/CodeGen/MachineFrameInfo.h"
20 #include "llvm/CodeGen/MachineFunction.h"
21 #include "llvm/CodeGen/MachineInstrBuilder.h"
22 #include "llvm/CodeGen/MachineRegisterInfo.h"
23 #include "llvm/CodeGen/PseudoSourceValue.h"
24 #include "llvm/CodeGen/SelectionDAG.h"
25 #include "llvm/Target/TargetLoweringObjectFile.h"
26 #include "llvm/ADT/VectorExtras.h"
27 #include "llvm/Support/Debug.h"
31 //===----------------------------------------------------------------------===//
32 // Calling Convention Implementation
33 //===----------------------------------------------------------------------===//
35 #include "BlackfinGenCallingConv.inc"
37 //===----------------------------------------------------------------------===//
38 // TargetLowering Implementation
39 //===----------------------------------------------------------------------===//
41 BlackfinTargetLowering::BlackfinTargetLowering(TargetMachine
&TM
)
42 : TargetLowering(TM
, new TargetLoweringObjectFileELF()) {
43 setShiftAmountType(MVT::i16
);
44 setBooleanContents(ZeroOrOneBooleanContent
);
45 setStackPointerRegisterToSaveRestore(BF::SP
);
46 setIntDivIsCheap(false);
48 // Set up the legal register classes.
49 addRegisterClass(MVT::i32
, BF::DRegisterClass
);
50 addRegisterClass(MVT::i16
, BF::D16RegisterClass
);
52 computeRegisterProperties();
54 // Blackfin doesn't have i1 loads or stores
55 setLoadExtAction(ISD::EXTLOAD
, MVT::i1
, Promote
);
56 setLoadExtAction(ISD::ZEXTLOAD
, MVT::i1
, Promote
);
57 setLoadExtAction(ISD::SEXTLOAD
, MVT::i1
, Promote
);
59 setOperationAction(ISD::GlobalAddress
, MVT::i32
, Custom
);
60 setOperationAction(ISD::JumpTable
, MVT::i32
, Custom
);
62 setOperationAction(ISD::SELECT_CC
, MVT::Other
, Expand
);
63 setOperationAction(ISD::BR_JT
, MVT::Other
, Expand
);
64 setOperationAction(ISD::BR_CC
, MVT::Other
, Expand
);
66 // i16 registers don't do much
67 setOperationAction(ISD::AND
, MVT::i16
, Promote
);
68 setOperationAction(ISD::OR
, MVT::i16
, Promote
);
69 setOperationAction(ISD::XOR
, MVT::i16
, Promote
);
70 setOperationAction(ISD::CTPOP
, MVT::i16
, Promote
);
71 // The expansion of CTLZ/CTTZ uses AND/OR, so we might as well promote
73 setOperationAction(ISD::CTLZ
, MVT::i16
, Promote
);
74 setOperationAction(ISD::CTTZ
, MVT::i16
, Promote
);
75 setOperationAction(ISD::SETCC
, MVT::i16
, Promote
);
77 // Blackfin has no division
78 setOperationAction(ISD::SDIV
, MVT::i16
, Expand
);
79 setOperationAction(ISD::SDIV
, MVT::i32
, Expand
);
80 setOperationAction(ISD::SDIVREM
, MVT::i16
, Expand
);
81 setOperationAction(ISD::SDIVREM
, MVT::i32
, Expand
);
82 setOperationAction(ISD::SREM
, MVT::i16
, Expand
);
83 setOperationAction(ISD::SREM
, MVT::i32
, Expand
);
84 setOperationAction(ISD::UDIV
, MVT::i16
, Expand
);
85 setOperationAction(ISD::UDIV
, MVT::i32
, Expand
);
86 setOperationAction(ISD::UDIVREM
, MVT::i16
, Expand
);
87 setOperationAction(ISD::UDIVREM
, MVT::i32
, Expand
);
88 setOperationAction(ISD::UREM
, MVT::i16
, Expand
);
89 setOperationAction(ISD::UREM
, MVT::i32
, Expand
);
91 setOperationAction(ISD::SMUL_LOHI
, MVT::i32
, Expand
);
92 setOperationAction(ISD::UMUL_LOHI
, MVT::i32
, Expand
);
93 setOperationAction(ISD::MULHU
, MVT::i32
, Expand
);
94 setOperationAction(ISD::MULHS
, MVT::i32
, Expand
);
96 // No carry-in operations.
97 setOperationAction(ISD::ADDE
, MVT::i32
, Custom
);
98 setOperationAction(ISD::SUBE
, MVT::i32
, Custom
);
100 // Blackfin has no intrinsics for these particular operations.
101 setOperationAction(ISD::MEMBARRIER
, MVT::Other
, Expand
);
102 setOperationAction(ISD::BSWAP
, MVT::i32
, Expand
);
104 setOperationAction(ISD::SHL_PARTS
, MVT::i32
, Expand
);
105 setOperationAction(ISD::SRA_PARTS
, MVT::i32
, Expand
);
106 setOperationAction(ISD::SRL_PARTS
, MVT::i32
, Expand
);
108 setOperationAction(ISD::SIGN_EXTEND_INREG
, MVT::i1
, Expand
);
110 // i32 has native CTPOP, but not CTLZ/CTTZ
111 setOperationAction(ISD::CTLZ
, MVT::i32
, Expand
);
112 setOperationAction(ISD::CTTZ
, MVT::i32
, Expand
);
114 // READCYCLECOUNTER needs special type legalization.
115 setOperationAction(ISD::READCYCLECOUNTER
, MVT::i64
, Custom
);
117 // We don't have line number support yet.
118 setOperationAction(ISD::DBG_STOPPOINT
, MVT::Other
, Expand
);
119 setOperationAction(ISD::DEBUG_LOC
, MVT::Other
, Expand
);
120 setOperationAction(ISD::DBG_LABEL
, MVT::Other
, Expand
);
121 setOperationAction(ISD::EH_LABEL
, MVT::Other
, Expand
);
123 // Use the default implementation.
124 setOperationAction(ISD::VACOPY
, MVT::Other
, Expand
);
125 setOperationAction(ISD::VAEND
, MVT::Other
, Expand
);
126 setOperationAction(ISD::STACKSAVE
, MVT::Other
, Expand
);
127 setOperationAction(ISD::STACKRESTORE
, MVT::Other
, Expand
);
130 const char *BlackfinTargetLowering::getTargetNodeName(unsigned Opcode
) const {
133 case BFISD::CALL
: return "BFISD::CALL";
134 case BFISD::RET_FLAG
: return "BFISD::RET_FLAG";
135 case BFISD::Wrapper
: return "BFISD::Wrapper";
139 MVT::SimpleValueType
BlackfinTargetLowering::getSetCCResultType(EVT VT
) const {
140 // SETCC always sets the CC register. Technically that is an i1 register, but
141 // that type is not legal, so we treat it as an i32 register.
145 SDValue
BlackfinTargetLowering::LowerGlobalAddress(SDValue Op
,
147 DebugLoc DL
= Op
.getDebugLoc();
148 GlobalValue
*GV
= cast
<GlobalAddressSDNode
>(Op
)->getGlobal();
150 Op
= DAG
.getTargetGlobalAddress(GV
, MVT::i32
);
151 return DAG
.getNode(BFISD::Wrapper
, DL
, MVT::i32
, Op
);
154 SDValue
BlackfinTargetLowering::LowerJumpTable(SDValue Op
, SelectionDAG
&DAG
) {
155 DebugLoc DL
= Op
.getDebugLoc();
156 int JTI
= cast
<JumpTableSDNode
>(Op
)->getIndex();
158 Op
= DAG
.getTargetJumpTable(JTI
, MVT::i32
);
159 return DAG
.getNode(BFISD::Wrapper
, DL
, MVT::i32
, Op
);
163 BlackfinTargetLowering::LowerFormalArguments(SDValue Chain
,
164 CallingConv::ID CallConv
, bool isVarArg
,
165 const SmallVectorImpl
<ISD::InputArg
>
167 DebugLoc dl
, SelectionDAG
&DAG
,
168 SmallVectorImpl
<SDValue
> &InVals
) {
170 MachineFunction
&MF
= DAG
.getMachineFunction();
171 MachineFrameInfo
*MFI
= MF
.getFrameInfo();
173 SmallVector
<CCValAssign
, 16> ArgLocs
;
174 CCState
CCInfo(CallConv
, isVarArg
, getTargetMachine(),
175 ArgLocs
, *DAG
.getContext());
176 CCInfo
.AllocateStack(12, 4); // ABI requires 12 bytes stack space
177 CCInfo
.AnalyzeFormalArguments(Ins
, CC_Blackfin
);
179 for (unsigned i
= 0, e
= ArgLocs
.size(); i
!= e
; ++i
) {
180 CCValAssign
&VA
= ArgLocs
[i
];
183 EVT RegVT
= VA
.getLocVT();
184 TargetRegisterClass
*RC
= VA
.getLocReg() == BF::P0
?
185 BF::PRegisterClass
: BF::DRegisterClass
;
186 assert(RC
->contains(VA
.getLocReg()) && "Unexpected regclass in CCState");
187 assert(RC
->hasType(RegVT
) && "Unexpected regclass in CCState");
189 unsigned Reg
= MF
.getRegInfo().createVirtualRegister(RC
);
190 MF
.getRegInfo().addLiveIn(VA
.getLocReg(), Reg
);
191 SDValue ArgValue
= DAG
.getCopyFromReg(Chain
, dl
, Reg
, RegVT
);
193 // If this is an 8 or 16-bit value, it is really passed promoted to 32
194 // bits. Insert an assert[sz]ext to capture this, then truncate to the
196 if (VA
.getLocInfo() == CCValAssign::SExt
)
197 ArgValue
= DAG
.getNode(ISD::AssertSext
, dl
, RegVT
, ArgValue
,
198 DAG
.getValueType(VA
.getValVT()));
199 else if (VA
.getLocInfo() == CCValAssign::ZExt
)
200 ArgValue
= DAG
.getNode(ISD::AssertZext
, dl
, RegVT
, ArgValue
,
201 DAG
.getValueType(VA
.getValVT()));
203 if (VA
.getLocInfo() != CCValAssign::Full
)
204 ArgValue
= DAG
.getNode(ISD::TRUNCATE
, dl
, VA
.getValVT(), ArgValue
);
206 InVals
.push_back(ArgValue
);
208 assert(VA
.isMemLoc() && "CCValAssign must be RegLoc or MemLoc");
209 unsigned ObjSize
= VA
.getLocVT().getStoreSizeInBits()/8;
210 int FI
= MFI
->CreateFixedObject(ObjSize
, VA
.getLocMemOffset());
211 SDValue FIN
= DAG
.getFrameIndex(FI
, MVT::i32
);
212 InVals
.push_back(DAG
.getLoad(VA
.getValVT(), dl
, Chain
, FIN
, NULL
, 0));
220 BlackfinTargetLowering::LowerReturn(SDValue Chain
,
221 CallingConv::ID CallConv
, bool isVarArg
,
222 const SmallVectorImpl
<ISD::OutputArg
> &Outs
,
223 DebugLoc dl
, SelectionDAG
&DAG
) {
225 // CCValAssign - represent the assignment of the return value to locations.
226 SmallVector
<CCValAssign
, 16> RVLocs
;
228 // CCState - Info about the registers and stack slot.
229 CCState
CCInfo(CallConv
, isVarArg
, DAG
.getTarget(),
230 RVLocs
, *DAG
.getContext());
232 // Analize return values.
233 CCInfo
.AnalyzeReturn(Outs
, RetCC_Blackfin
);
235 // If this is the first return lowered for this function, add the regs to the
236 // liveout set for the function.
237 if (DAG
.getMachineFunction().getRegInfo().liveout_empty()) {
238 for (unsigned i
= 0; i
!= RVLocs
.size(); ++i
)
239 DAG
.getMachineFunction().getRegInfo().addLiveOut(RVLocs
[i
].getLocReg());
244 // Copy the result values into the output registers.
245 for (unsigned i
= 0; i
!= RVLocs
.size(); ++i
) {
246 CCValAssign
&VA
= RVLocs
[i
];
247 assert(VA
.isRegLoc() && "Can only return in registers!");
248 SDValue Opi
= Outs
[i
].Val
;
250 // Expand to i32 if necessary
251 switch (VA
.getLocInfo()) {
252 default: llvm_unreachable("Unknown loc info!");
253 case CCValAssign::Full
: break;
254 case CCValAssign::SExt
:
255 Opi
= DAG
.getNode(ISD::SIGN_EXTEND
, dl
, VA
.getLocVT(), Opi
);
257 case CCValAssign::ZExt
:
258 Opi
= DAG
.getNode(ISD::ZERO_EXTEND
, dl
, VA
.getLocVT(), Opi
);
260 case CCValAssign::AExt
:
261 Opi
= DAG
.getNode(ISD::ANY_EXTEND
, dl
, VA
.getLocVT(), Opi
);
264 Chain
= DAG
.getCopyToReg(Chain
, dl
, VA
.getLocReg(), Opi
, SDValue());
265 // Guarantee that all emitted copies are stuck together with flags.
266 Flag
= Chain
.getValue(1);
269 if (Flag
.getNode()) {
270 return DAG
.getNode(BFISD::RET_FLAG
, dl
, MVT::Other
, Chain
, Flag
);
272 return DAG
.getNode(BFISD::RET_FLAG
, dl
, MVT::Other
, Chain
);
277 BlackfinTargetLowering::LowerCall(SDValue Chain
, SDValue Callee
,
278 CallingConv::ID CallConv
, bool isVarArg
,
280 const SmallVectorImpl
<ISD::OutputArg
> &Outs
,
281 const SmallVectorImpl
<ISD::InputArg
> &Ins
,
282 DebugLoc dl
, SelectionDAG
&DAG
,
283 SmallVectorImpl
<SDValue
> &InVals
) {
285 // Analyze operands of the call, assigning locations to each operand.
286 SmallVector
<CCValAssign
, 16> ArgLocs
;
287 CCState
CCInfo(CallConv
, isVarArg
, DAG
.getTarget(), ArgLocs
,
289 CCInfo
.AllocateStack(12, 4); // ABI requires 12 bytes stack space
290 CCInfo
.AnalyzeCallOperands(Outs
, CC_Blackfin
);
292 // Get the size of the outgoing arguments stack space requirement.
293 unsigned ArgsSize
= CCInfo
.getNextStackOffset();
295 Chain
= DAG
.getCALLSEQ_START(Chain
, DAG
.getIntPtrConstant(ArgsSize
, true));
296 SmallVector
<std::pair
<unsigned, SDValue
>, 8> RegsToPass
;
297 SmallVector
<SDValue
, 8> MemOpChains
;
299 // Walk the register/memloc assignments, inserting copies/loads.
300 for (unsigned i
= 0, e
= ArgLocs
.size(); i
!= e
; ++i
) {
301 CCValAssign
&VA
= ArgLocs
[i
];
302 SDValue Arg
= Outs
[i
].Val
;
304 // Promote the value if needed.
305 switch (VA
.getLocInfo()) {
306 default: llvm_unreachable("Unknown loc info!");
307 case CCValAssign::Full
: break;
308 case CCValAssign::SExt
:
309 Arg
= DAG
.getNode(ISD::SIGN_EXTEND
, dl
, VA
.getLocVT(), Arg
);
311 case CCValAssign::ZExt
:
312 Arg
= DAG
.getNode(ISD::ZERO_EXTEND
, dl
, VA
.getLocVT(), Arg
);
314 case CCValAssign::AExt
:
315 Arg
= DAG
.getNode(ISD::ANY_EXTEND
, dl
, VA
.getLocVT(), Arg
);
319 // Arguments that can be passed on register must be kept at
322 RegsToPass
.push_back(std::make_pair(VA
.getLocReg(), Arg
));
324 assert(VA
.isMemLoc() && "CCValAssign must be RegLoc or MemLoc");
325 int Offset
= VA
.getLocMemOffset();
326 assert(Offset
%4 == 0 && "Unaligned LocMemOffset");
327 assert(VA
.getLocVT()==MVT::i32
&& "Illegal CCValAssign type");
328 SDValue SPN
= DAG
.getCopyFromReg(Chain
, dl
, BF::SP
, MVT::i32
);
329 SDValue OffsetN
= DAG
.getIntPtrConstant(Offset
);
330 OffsetN
= DAG
.getNode(ISD::ADD
, dl
, MVT::i32
, SPN
, OffsetN
);
331 MemOpChains
.push_back(DAG
.getStore(Chain
, dl
, Arg
, OffsetN
,
332 PseudoSourceValue::getStack(),
337 // Transform all store nodes into one single node because
338 // all store nodes are independent of each other.
339 if (!MemOpChains
.empty())
340 Chain
= DAG
.getNode(ISD::TokenFactor
, dl
, MVT::Other
,
341 &MemOpChains
[0], MemOpChains
.size());
343 // Build a sequence of copy-to-reg nodes chained together with token
344 // chain and flag operands which copy the outgoing args into registers.
345 // The InFlag in necessary since all emited instructions must be
348 for (unsigned i
= 0, e
= RegsToPass
.size(); i
!= e
; ++i
) {
349 Chain
= DAG
.getCopyToReg(Chain
, dl
, RegsToPass
[i
].first
,
350 RegsToPass
[i
].second
, InFlag
);
351 InFlag
= Chain
.getValue(1);
354 // If the callee is a GlobalAddress node (quite common, every direct call is)
355 // turn it into a TargetGlobalAddress node so that legalize doesn't hack it.
356 // Likewise ExternalSymbol -> TargetExternalSymbol.
357 if (GlobalAddressSDNode
*G
= dyn_cast
<GlobalAddressSDNode
>(Callee
))
358 Callee
= DAG
.getTargetGlobalAddress(G
->getGlobal(), MVT::i32
);
359 else if (ExternalSymbolSDNode
*E
= dyn_cast
<ExternalSymbolSDNode
>(Callee
))
360 Callee
= DAG
.getTargetExternalSymbol(E
->getSymbol(), MVT::i32
);
362 std::vector
<EVT
> NodeTys
;
363 NodeTys
.push_back(MVT::Other
); // Returns a chain
364 NodeTys
.push_back(MVT::Flag
); // Returns a flag for retval copy to use.
365 SDValue Ops
[] = { Chain
, Callee
, InFlag
};
366 Chain
= DAG
.getNode(BFISD::CALL
, dl
, NodeTys
, Ops
,
367 InFlag
.getNode() ? 3 : 2);
368 InFlag
= Chain
.getValue(1);
370 Chain
= DAG
.getCALLSEQ_END(Chain
, DAG
.getIntPtrConstant(ArgsSize
, true),
371 DAG
.getIntPtrConstant(0, true), InFlag
);
372 InFlag
= Chain
.getValue(1);
374 // Assign locations to each value returned by this call.
375 SmallVector
<CCValAssign
, 16> RVLocs
;
376 CCState
RVInfo(CallConv
, isVarArg
, DAG
.getTarget(), RVLocs
,
379 RVInfo
.AnalyzeCallResult(Ins
, RetCC_Blackfin
);
381 // Copy all of the result registers out of their specified physreg.
382 for (unsigned i
= 0; i
!= RVLocs
.size(); ++i
) {
383 CCValAssign
&RV
= RVLocs
[i
];
384 unsigned Reg
= RV
.getLocReg();
386 Chain
= DAG
.getCopyFromReg(Chain
, dl
, Reg
,
387 RVLocs
[i
].getLocVT(), InFlag
);
388 SDValue Val
= Chain
.getValue(0);
389 InFlag
= Chain
.getValue(2);
390 Chain
= Chain
.getValue(1);
392 // Callee is responsible for extending any i16 return values.
393 switch (RV
.getLocInfo()) {
394 case CCValAssign::SExt
:
395 Val
= DAG
.getNode(ISD::AssertSext
, dl
, RV
.getLocVT(), Val
,
396 DAG
.getValueType(RV
.getValVT()));
398 case CCValAssign::ZExt
:
399 Val
= DAG
.getNode(ISD::AssertZext
, dl
, RV
.getLocVT(), Val
,
400 DAG
.getValueType(RV
.getValVT()));
406 // Truncate to valtype
407 if (RV
.getLocInfo() != CCValAssign::Full
)
408 Val
= DAG
.getNode(ISD::TRUNCATE
, dl
, RV
.getValVT(), Val
);
409 InVals
.push_back(Val
);
415 // Expansion of ADDE / SUBE. This is a bit involved since blackfin doesn't have
416 // add-with-carry instructions.
417 SDValue
BlackfinTargetLowering::LowerADDE(SDValue Op
, SelectionDAG
&DAG
) {
418 // Operands: lhs, rhs, carry-in (AC0 flag)
419 // Results: sum, carry-out (AC0 flag)
420 DebugLoc dl
= Op
.getDebugLoc();
422 unsigned Opcode
= Op
.getOpcode()==ISD::ADDE
? BF::ADD
: BF::SUB
;
424 // zext incoming carry flag in AC0 to 32 bits
425 SDNode
* CarryIn
= DAG
.getTargetNode(BF::MOVE_cc_ac0
, dl
, MVT::i32
,
426 /* flag= */ Op
.getOperand(2));
427 CarryIn
= DAG
.getTargetNode(BF::MOVECC_zext
, dl
, MVT::i32
,
428 SDValue(CarryIn
, 0));
430 // Add operands, produce sum and carry flag
431 SDNode
*Sum
= DAG
.getTargetNode(Opcode
, dl
, MVT::i32
, MVT::Flag
,
432 Op
.getOperand(0), Op
.getOperand(1));
434 // Store intermediate carry from Sum
435 SDNode
* Carry1
= DAG
.getTargetNode(BF::MOVE_cc_ac0
, dl
, MVT::i32
,
436 /* flag= */ SDValue(Sum
, 1));
438 // Add incoming carry, again producing an output flag
439 Sum
= DAG
.getTargetNode(Opcode
, dl
, MVT::i32
, MVT::Flag
,
440 SDValue(Sum
, 0), SDValue(CarryIn
, 0));
442 // Update AC0 with the intermediate carry, producing a flag.
443 SDNode
*CarryOut
= DAG
.getTargetNode(BF::OR_ac0_cc
, dl
, MVT::Flag
,
446 // Compose (i32, flag) pair
447 SDValue ops
[2] = { SDValue(Sum
, 0), SDValue(CarryOut
, 0) };
448 return DAG
.getMergeValues(ops
, 2, dl
);
451 SDValue
BlackfinTargetLowering::LowerOperation(SDValue Op
, SelectionDAG
&DAG
) {
452 switch (Op
.getOpcode()) {
454 Op
.getNode()->dump();
455 llvm_unreachable("Should not custom lower this!");
456 case ISD::GlobalAddress
: return LowerGlobalAddress(Op
, DAG
);
457 case ISD::GlobalTLSAddress
:
458 llvm_unreachable("TLS not implemented for Blackfin.");
459 case ISD::JumpTable
: return LowerJumpTable(Op
, DAG
);
460 // Frame & Return address. Currently unimplemented
461 case ISD::FRAMEADDR
: return SDValue();
462 case ISD::RETURNADDR
: return SDValue();
464 case ISD::SUBE
: return LowerADDE(Op
, DAG
);
469 BlackfinTargetLowering::ReplaceNodeResults(SDNode
*N
,
470 SmallVectorImpl
<SDValue
> &Results
,
472 DebugLoc dl
= N
->getDebugLoc();
473 switch (N
->getOpcode()) {
475 llvm_unreachable("Do not know how to custom type legalize this operation!");
477 case ISD::READCYCLECOUNTER
: {
478 // The low part of the cycle counter is in CYCLES, the high part in
479 // CYCLES2. Reading CYCLES will latch the value of CYCLES2, so we must read
481 SDValue TheChain
= N
->getOperand(0);
482 SDValue lo
= DAG
.getCopyFromReg(TheChain
, dl
, BF::CYCLES
, MVT::i32
);
483 SDValue hi
= DAG
.getCopyFromReg(lo
.getValue(1), dl
, BF::CYCLES2
, MVT::i32
);
484 // Use a buildpair to merge the two 32-bit values into a 64-bit one.
485 Results
.push_back(DAG
.getNode(ISD::BUILD_PAIR
, dl
, MVT::i64
, lo
, hi
));
486 // Outgoing chain. If we were to use the chain from lo instead, it would be
487 // possible to entirely eliminate the CYCLES2 read in (i32 (trunc
488 // readcyclecounter)). Unfortunately this could possibly delay the CYCLES2
489 // read beyond the next CYCLES read, leading to invalid results.
490 Results
.push_back(hi
.getValue(1));
496 /// getFunctionAlignment - Return the Log2 alignment of this function.
497 unsigned BlackfinTargetLowering::getFunctionAlignment(const Function
*F
) const {
501 //===----------------------------------------------------------------------===//
502 // Blackfin Inline Assembly Support
503 //===----------------------------------------------------------------------===//
505 /// getConstraintType - Given a constraint letter, return the type of
506 /// constraint it is for this target.
507 BlackfinTargetLowering::ConstraintType
508 BlackfinTargetLowering::getConstraintType(const std::string
&Constraint
) const {
509 if (Constraint
.size() != 1)
510 return TargetLowering::getConstraintType(Constraint
);
512 switch (Constraint
[0]) {
513 // Standard constraints
515 return C_RegisterClass
;
517 // Blackfin-specific constraints
534 return C_RegisterClass
;
543 // Not implemented: q0-q7, qA. Use {R2} etc instead
545 return TargetLowering::getConstraintType(Constraint
);
548 /// getRegForInlineAsmConstraint - Return register no and class for a C_Register
550 std::pair
<unsigned, const TargetRegisterClass
*> BlackfinTargetLowering::
551 getRegForInlineAsmConstraint(const std::string
&Constraint
, EVT VT
) const {
552 typedef std::pair
<unsigned, const TargetRegisterClass
*> Pair
;
555 if (Constraint
.size() != 1)
556 return TargetLowering::getRegForInlineAsmConstraint(Constraint
, VT
);
558 switch (Constraint
[0]) {
559 // Standard constraints
561 return Pair(0U, VT
== MVT::i16
? D16RegisterClass
: DPRegisterClass
);
563 // Blackfin-specific constraints
564 case 'a': return Pair(0U, PRegisterClass
);
565 case 'd': return Pair(0U, DRegisterClass
);
566 case 'e': return Pair(0U, AccuRegisterClass
);
567 case 'A': return Pair(A0
, AccuRegisterClass
);
568 case 'B': return Pair(A1
, AccuRegisterClass
);
569 case 'b': return Pair(0U, IRegisterClass
);
570 case 'v': return Pair(0U, BRegisterClass
);
571 case 'f': return Pair(0U, MRegisterClass
);
572 case 'C': return Pair(CC
, JustCCRegisterClass
);
573 case 'x': return Pair(0U, GRRegisterClass
);
574 case 'w': return Pair(0U, ALLRegisterClass
);
575 case 'Z': return Pair(P3
, PRegisterClass
);
576 case 'Y': return Pair(P1
, PRegisterClass
);
579 // Not implemented: q0-q7, qA. Use {R2} etc instead.
580 // Constraints z, D, W, c, t, u, k, and y use non-existing classes, defer to
581 // getRegClassForInlineAsmConstraint()
583 return TargetLowering::getRegForInlineAsmConstraint(Constraint
, VT
);
586 std::vector
<unsigned> BlackfinTargetLowering::
587 getRegClassForInlineAsmConstraint(const std::string
&Constraint
, EVT VT
) const {
590 if (Constraint
.size() != 1)
591 return std::vector
<unsigned>();
593 switch (Constraint
[0]) {
594 case 'z': return make_vector
<unsigned>(P0
, P1
, P2
, 0);
595 case 'D': return make_vector
<unsigned>(R0
, R2
, R4
, R6
, 0);
596 case 'W': return make_vector
<unsigned>(R1
, R3
, R5
, R7
, 0);
597 case 'c': return make_vector
<unsigned>(I0
, I1
, I2
, I3
,
600 case 't': return make_vector
<unsigned>(LT0
, LT1
, 0);
601 case 'u': return make_vector
<unsigned>(LB0
, LB1
, 0);
602 case 'k': return make_vector
<unsigned>(LC0
, LC1
, 0);
603 case 'y': return make_vector
<unsigned>(RETS
, RETN
, RETI
, RETX
, RETE
,
604 ASTAT
, SEQSTAT
, USP
, 0);
607 return std::vector
<unsigned>();
610 bool BlackfinTargetLowering::
611 isOffsetFoldingLegal(const GlobalAddressSDNode
*GA
) const {
612 // The Blackfin target isn't yet aware of offsets.