remove a dead bool.
[llvm/avr.git] / lib / Target / Sparc / SparcISelLowering.cpp
blobaf4288057e4772a94888ddae7f9905b7255194a4
1 //===-- SparcISelLowering.cpp - Sparc DAG Lowering Implementation ---------===//
2 //
3 // The LLVM Compiler Infrastructure
4 //
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
7 //
8 //===----------------------------------------------------------------------===//
9 //
10 // This file implements the interfaces that Sparc uses to lower LLVM code into a
11 // selection DAG.
13 //===----------------------------------------------------------------------===//
15 #include "SparcISelLowering.h"
16 #include "SparcTargetMachine.h"
17 #include "llvm/Function.h"
18 #include "llvm/CodeGen/CallingConvLower.h"
19 #include "llvm/CodeGen/MachineFrameInfo.h"
20 #include "llvm/CodeGen/MachineFunction.h"
21 #include "llvm/CodeGen/MachineInstrBuilder.h"
22 #include "llvm/CodeGen/MachineRegisterInfo.h"
23 #include "llvm/CodeGen/SelectionDAG.h"
24 #include "llvm/Target/TargetLoweringObjectFile.h"
25 #include "llvm/ADT/VectorExtras.h"
26 #include "llvm/Support/ErrorHandling.h"
27 using namespace llvm;
30 //===----------------------------------------------------------------------===//
31 // Calling Convention Implementation
32 //===----------------------------------------------------------------------===//
34 #include "SparcGenCallingConv.inc"
36 SDValue
37 SparcTargetLowering::LowerReturn(SDValue Chain,
38 CallingConv::ID CallConv, bool isVarArg,
39 const SmallVectorImpl<ISD::OutputArg> &Outs,
40 DebugLoc dl, SelectionDAG &DAG) {
42 // CCValAssign - represent the assignment of the return value to locations.
43 SmallVector<CCValAssign, 16> RVLocs;
45 // CCState - Info about the registers and stack slot.
46 CCState CCInfo(CallConv, isVarArg, DAG.getTarget(),
47 RVLocs, *DAG.getContext());
49 // Analize return values.
50 CCInfo.AnalyzeReturn(Outs, RetCC_Sparc32);
52 // If this is the first return lowered for this function, add the regs to the
53 // liveout set for the function.
54 if (DAG.getMachineFunction().getRegInfo().liveout_empty()) {
55 for (unsigned i = 0; i != RVLocs.size(); ++i)
56 if (RVLocs[i].isRegLoc())
57 DAG.getMachineFunction().getRegInfo().addLiveOut(RVLocs[i].getLocReg());
60 SDValue Flag;
62 // Copy the result values into the output registers.
63 for (unsigned i = 0; i != RVLocs.size(); ++i) {
64 CCValAssign &VA = RVLocs[i];
65 assert(VA.isRegLoc() && "Can only return in registers!");
67 Chain = DAG.getCopyToReg(Chain, dl, VA.getLocReg(),
68 Outs[i].Val, Flag);
70 // Guarantee that all emitted copies are stuck together with flags.
71 Flag = Chain.getValue(1);
74 if (Flag.getNode())
75 return DAG.getNode(SPISD::RET_FLAG, dl, MVT::Other, Chain, Flag);
76 return DAG.getNode(SPISD::RET_FLAG, dl, MVT::Other, Chain);
79 /// LowerFormalArguments - V8 uses a very simple ABI, where all values are
80 /// passed in either one or two GPRs, including FP values. TODO: we should
81 /// pass FP values in FP registers for fastcc functions.
82 SDValue
83 SparcTargetLowering::LowerFormalArguments(SDValue Chain,
84 CallingConv::ID CallConv, bool isVarArg,
85 const SmallVectorImpl<ISD::InputArg>
86 &Ins,
87 DebugLoc dl, SelectionDAG &DAG,
88 SmallVectorImpl<SDValue> &InVals) {
90 MachineFunction &MF = DAG.getMachineFunction();
91 MachineRegisterInfo &RegInfo = MF.getRegInfo();
93 // Assign locations to all of the incoming arguments.
94 SmallVector<CCValAssign, 16> ArgLocs;
95 CCState CCInfo(CallConv, isVarArg, getTargetMachine(),
96 ArgLocs, *DAG.getContext());
97 CCInfo.AnalyzeFormalArguments(Ins, CC_Sparc32);
99 static const unsigned ArgRegs[] = {
100 SP::I0, SP::I1, SP::I2, SP::I3, SP::I4, SP::I5
102 const unsigned *CurArgReg = ArgRegs, *ArgRegEnd = ArgRegs+6;
103 unsigned ArgOffset = 68;
105 for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) {
106 SDValue ArgValue;
107 CCValAssign &VA = ArgLocs[i];
108 // FIXME: We ignore the register assignments of AnalyzeFormalArguments
109 // because it doesn't know how to split a double into two i32 registers.
110 EVT ObjectVT = VA.getValVT();
111 switch (ObjectVT.getSimpleVT().SimpleTy) {
112 default: llvm_unreachable("Unhandled argument type!");
113 case MVT::i1:
114 case MVT::i8:
115 case MVT::i16:
116 case MVT::i32:
117 if (!Ins[i].Used) { // Argument is dead.
118 if (CurArgReg < ArgRegEnd) ++CurArgReg;
119 InVals.push_back(DAG.getUNDEF(ObjectVT));
120 } else if (CurArgReg < ArgRegEnd) { // Lives in an incoming GPR
121 unsigned VReg = RegInfo.createVirtualRegister(&SP::IntRegsRegClass);
122 MF.getRegInfo().addLiveIn(*CurArgReg++, VReg);
123 SDValue Arg = DAG.getCopyFromReg(Chain, dl, VReg, MVT::i32);
124 if (ObjectVT != MVT::i32) {
125 unsigned AssertOp = ISD::AssertSext;
126 Arg = DAG.getNode(AssertOp, dl, MVT::i32, Arg,
127 DAG.getValueType(ObjectVT));
128 Arg = DAG.getNode(ISD::TRUNCATE, dl, ObjectVT, Arg);
130 InVals.push_back(Arg);
131 } else {
132 int FrameIdx = MF.getFrameInfo()->CreateFixedObject(4, ArgOffset);
133 SDValue FIPtr = DAG.getFrameIndex(FrameIdx, MVT::i32);
134 SDValue Load;
135 if (ObjectVT == MVT::i32) {
136 Load = DAG.getLoad(MVT::i32, dl, Chain, FIPtr, NULL, 0);
137 } else {
138 ISD::LoadExtType LoadOp = ISD::SEXTLOAD;
140 // Sparc is big endian, so add an offset based on the ObjectVT.
141 unsigned Offset = 4-std::max(1U, ObjectVT.getSizeInBits()/8);
142 FIPtr = DAG.getNode(ISD::ADD, dl, MVT::i32, FIPtr,
143 DAG.getConstant(Offset, MVT::i32));
144 Load = DAG.getExtLoad(LoadOp, dl, MVT::i32, Chain, FIPtr,
145 NULL, 0, ObjectVT);
146 Load = DAG.getNode(ISD::TRUNCATE, dl, ObjectVT, Load);
148 InVals.push_back(Load);
151 ArgOffset += 4;
152 break;
153 case MVT::f32:
154 if (!Ins[i].Used) { // Argument is dead.
155 if (CurArgReg < ArgRegEnd) ++CurArgReg;
156 InVals.push_back(DAG.getUNDEF(ObjectVT));
157 } else if (CurArgReg < ArgRegEnd) { // Lives in an incoming GPR
158 // FP value is passed in an integer register.
159 unsigned VReg = RegInfo.createVirtualRegister(&SP::IntRegsRegClass);
160 MF.getRegInfo().addLiveIn(*CurArgReg++, VReg);
161 SDValue Arg = DAG.getCopyFromReg(Chain, dl, VReg, MVT::i32);
163 Arg = DAG.getNode(ISD::BIT_CONVERT, dl, MVT::f32, Arg);
164 InVals.push_back(Arg);
165 } else {
166 int FrameIdx = MF.getFrameInfo()->CreateFixedObject(4, ArgOffset);
167 SDValue FIPtr = DAG.getFrameIndex(FrameIdx, MVT::i32);
168 SDValue Load = DAG.getLoad(MVT::f32, dl, Chain, FIPtr, NULL, 0);
169 InVals.push_back(Load);
171 ArgOffset += 4;
172 break;
174 case MVT::i64:
175 case MVT::f64:
176 if (!Ins[i].Used) { // Argument is dead.
177 if (CurArgReg < ArgRegEnd) ++CurArgReg;
178 if (CurArgReg < ArgRegEnd) ++CurArgReg;
179 InVals.push_back(DAG.getUNDEF(ObjectVT));
180 } else {
181 SDValue HiVal;
182 if (CurArgReg < ArgRegEnd) { // Lives in an incoming GPR
183 unsigned VRegHi = RegInfo.createVirtualRegister(&SP::IntRegsRegClass);
184 MF.getRegInfo().addLiveIn(*CurArgReg++, VRegHi);
185 HiVal = DAG.getCopyFromReg(Chain, dl, VRegHi, MVT::i32);
186 } else {
187 int FrameIdx = MF.getFrameInfo()->CreateFixedObject(4, ArgOffset);
188 SDValue FIPtr = DAG.getFrameIndex(FrameIdx, MVT::i32);
189 HiVal = DAG.getLoad(MVT::i32, dl, Chain, FIPtr, NULL, 0);
192 SDValue LoVal;
193 if (CurArgReg < ArgRegEnd) { // Lives in an incoming GPR
194 unsigned VRegLo = RegInfo.createVirtualRegister(&SP::IntRegsRegClass);
195 MF.getRegInfo().addLiveIn(*CurArgReg++, VRegLo);
196 LoVal = DAG.getCopyFromReg(Chain, dl, VRegLo, MVT::i32);
197 } else {
198 int FrameIdx = MF.getFrameInfo()->CreateFixedObject(4, ArgOffset+4);
199 SDValue FIPtr = DAG.getFrameIndex(FrameIdx, MVT::i32);
200 LoVal = DAG.getLoad(MVT::i32, dl, Chain, FIPtr, NULL, 0);
203 // Compose the two halves together into an i64 unit.
204 SDValue WholeValue =
205 DAG.getNode(ISD::BUILD_PAIR, dl, MVT::i64, LoVal, HiVal);
207 // If we want a double, do a bit convert.
208 if (ObjectVT == MVT::f64)
209 WholeValue = DAG.getNode(ISD::BIT_CONVERT, dl, MVT::f64, WholeValue);
211 InVals.push_back(WholeValue);
213 ArgOffset += 8;
214 break;
218 // Store remaining ArgRegs to the stack if this is a varargs function.
219 if (isVarArg) {
220 // Remember the vararg offset for the va_start implementation.
221 VarArgsFrameOffset = ArgOffset;
223 std::vector<SDValue> OutChains;
225 for (; CurArgReg != ArgRegEnd; ++CurArgReg) {
226 unsigned VReg = RegInfo.createVirtualRegister(&SP::IntRegsRegClass);
227 MF.getRegInfo().addLiveIn(*CurArgReg, VReg);
228 SDValue Arg = DAG.getCopyFromReg(DAG.getRoot(), dl, VReg, MVT::i32);
230 int FrameIdx = MF.getFrameInfo()->CreateFixedObject(4, ArgOffset);
231 SDValue FIPtr = DAG.getFrameIndex(FrameIdx, MVT::i32);
233 OutChains.push_back(DAG.getStore(DAG.getRoot(), dl, Arg, FIPtr, NULL, 0));
234 ArgOffset += 4;
237 if (!OutChains.empty()) {
238 OutChains.push_back(Chain);
239 Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other,
240 &OutChains[0], OutChains.size());
244 return Chain;
247 SDValue
248 SparcTargetLowering::LowerCall(SDValue Chain, SDValue Callee,
249 CallingConv::ID CallConv, bool isVarArg,
250 bool isTailCall,
251 const SmallVectorImpl<ISD::OutputArg> &Outs,
252 const SmallVectorImpl<ISD::InputArg> &Ins,
253 DebugLoc dl, SelectionDAG &DAG,
254 SmallVectorImpl<SDValue> &InVals) {
256 #if 0
257 // Analyze operands of the call, assigning locations to each operand.
258 SmallVector<CCValAssign, 16> ArgLocs;
259 CCState CCInfo(CallConv, isVarArg, DAG.getTarget(), ArgLocs);
260 CCInfo.AnalyzeCallOperands(Outs, CC_Sparc32);
262 // Get the size of the outgoing arguments stack space requirement.
263 unsigned ArgsSize = CCInfo.getNextStackOffset();
264 // FIXME: We can't use this until f64 is known to take two GPRs.
265 #else
266 (void)CC_Sparc32;
268 // Count the size of the outgoing arguments.
269 unsigned ArgsSize = 0;
270 for (unsigned i = 0, e = Outs.size(); i != e; ++i) {
271 switch (Outs[i].Val.getValueType().getSimpleVT().SimpleTy) {
272 default: llvm_unreachable("Unknown value type!");
273 case MVT::i1:
274 case MVT::i8:
275 case MVT::i16:
276 case MVT::i32:
277 case MVT::f32:
278 ArgsSize += 4;
279 break;
280 case MVT::i64:
281 case MVT::f64:
282 ArgsSize += 8;
283 break;
286 if (ArgsSize > 4*6)
287 ArgsSize -= 4*6; // Space for first 6 arguments is prereserved.
288 else
289 ArgsSize = 0;
290 #endif
292 // Keep stack frames 8-byte aligned.
293 ArgsSize = (ArgsSize+7) & ~7;
295 Chain = DAG.getCALLSEQ_START(Chain, DAG.getIntPtrConstant(ArgsSize, true));
297 SmallVector<std::pair<unsigned, SDValue>, 8> RegsToPass;
298 SmallVector<SDValue, 8> MemOpChains;
300 #if 0
301 // Walk the register/memloc assignments, inserting copies/loads.
302 for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) {
303 CCValAssign &VA = ArgLocs[i];
304 SDValue Arg = Outs[i].Val;
306 // Promote the value if needed.
307 switch (VA.getLocInfo()) {
308 default: llvm_unreachable("Unknown loc info!");
309 case CCValAssign::Full: break;
310 case CCValAssign::SExt:
311 Arg = DAG.getNode(ISD::SIGN_EXTEND, VA.getLocVT(), Arg);
312 break;
313 case CCValAssign::ZExt:
314 Arg = DAG.getNode(ISD::ZERO_EXTEND, VA.getLocVT(), Arg);
315 break;
316 case CCValAssign::AExt:
317 Arg = DAG.getNode(ISD::ANY_EXTEND, VA.getLocVT(), Arg);
318 break;
321 // Arguments that can be passed on register must be kept at
322 // RegsToPass vector
323 if (VA.isRegLoc()) {
324 RegsToPass.push_back(std::make_pair(VA.getLocReg(), Arg));
325 continue;
328 assert(VA.isMemLoc());
330 // Create a store off the stack pointer for this argument.
331 SDValue StackPtr = DAG.getRegister(SP::O6, MVT::i32);
332 // FIXME: VERIFY THAT 68 IS RIGHT.
333 SDValue PtrOff = DAG.getIntPtrConstant(VA.getLocMemOffset()+68);
334 PtrOff = DAG.getNode(ISD::ADD, MVT::i32, StackPtr, PtrOff);
335 MemOpChains.push_back(DAG.getStore(Chain, Arg, PtrOff, NULL, 0));
338 #else
339 static const unsigned ArgRegs[] = {
340 SP::I0, SP::I1, SP::I2, SP::I3, SP::I4, SP::I5
342 unsigned ArgOffset = 68;
344 for (unsigned i = 0, e = Outs.size(); i != e; ++i) {
345 SDValue Val = Outs[i].Val;
346 EVT ObjectVT = Val.getValueType();
347 SDValue ValToStore(0, 0);
348 unsigned ObjSize;
349 switch (ObjectVT.getSimpleVT().SimpleTy) {
350 default: llvm_unreachable("Unhandled argument type!");
351 case MVT::i32:
352 ObjSize = 4;
354 if (RegsToPass.size() >= 6) {
355 ValToStore = Val;
356 } else {
357 RegsToPass.push_back(std::make_pair(ArgRegs[RegsToPass.size()], Val));
359 break;
360 case MVT::f32:
361 ObjSize = 4;
362 if (RegsToPass.size() >= 6) {
363 ValToStore = Val;
364 } else {
365 // Convert this to a FP value in an int reg.
366 Val = DAG.getNode(ISD::BIT_CONVERT, dl, MVT::i32, Val);
367 RegsToPass.push_back(std::make_pair(ArgRegs[RegsToPass.size()], Val));
369 break;
370 case MVT::f64: {
371 ObjSize = 8;
372 if (RegsToPass.size() >= 6) {
373 ValToStore = Val; // Whole thing is passed in memory.
374 break;
377 // Break into top and bottom parts by storing to the stack and loading
378 // out the parts as integers. Top part goes in a reg.
379 SDValue StackPtr = DAG.CreateStackTemporary(MVT::f64, MVT::i32);
380 SDValue Store = DAG.getStore(DAG.getEntryNode(), dl,
381 Val, StackPtr, NULL, 0);
382 // Sparc is big-endian, so the high part comes first.
383 SDValue Hi = DAG.getLoad(MVT::i32, dl, Store, StackPtr, NULL, 0, 0);
384 // Increment the pointer to the other half.
385 StackPtr = DAG.getNode(ISD::ADD, dl, StackPtr.getValueType(), StackPtr,
386 DAG.getIntPtrConstant(4));
387 // Load the low part.
388 SDValue Lo = DAG.getLoad(MVT::i32, dl, Store, StackPtr, NULL, 0, 0);
390 RegsToPass.push_back(std::make_pair(ArgRegs[RegsToPass.size()], Hi));
392 if (RegsToPass.size() >= 6) {
393 ValToStore = Lo;
394 ArgOffset += 4;
395 ObjSize = 4;
396 } else {
397 RegsToPass.push_back(std::make_pair(ArgRegs[RegsToPass.size()], Lo));
399 break;
401 case MVT::i64: {
402 ObjSize = 8;
403 if (RegsToPass.size() >= 6) {
404 ValToStore = Val; // Whole thing is passed in memory.
405 break;
408 // Split the value into top and bottom part. Top part goes in a reg.
409 SDValue Hi = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, MVT::i32, Val,
410 DAG.getConstant(1, MVT::i32));
411 SDValue Lo = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, MVT::i32, Val,
412 DAG.getConstant(0, MVT::i32));
413 RegsToPass.push_back(std::make_pair(ArgRegs[RegsToPass.size()], Hi));
415 if (RegsToPass.size() >= 6) {
416 ValToStore = Lo;
417 ArgOffset += 4;
418 ObjSize = 4;
419 } else {
420 RegsToPass.push_back(std::make_pair(ArgRegs[RegsToPass.size()], Lo));
422 break;
426 if (ValToStore.getNode()) {
427 SDValue StackPtr = DAG.getRegister(SP::O6, MVT::i32);
428 SDValue PtrOff = DAG.getConstant(ArgOffset, MVT::i32);
429 PtrOff = DAG.getNode(ISD::ADD, dl, MVT::i32, StackPtr, PtrOff);
430 MemOpChains.push_back(DAG.getStore(Chain, dl, ValToStore,
431 PtrOff, NULL, 0));
433 ArgOffset += ObjSize;
435 #endif
437 // Emit all stores, make sure the occur before any copies into physregs.
438 if (!MemOpChains.empty())
439 Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other,
440 &MemOpChains[0], MemOpChains.size());
442 // Build a sequence of copy-to-reg nodes chained together with token
443 // chain and flag operands which copy the outgoing args into registers.
444 // The InFlag in necessary since all emited instructions must be
445 // stuck together.
446 SDValue InFlag;
447 for (unsigned i = 0, e = RegsToPass.size(); i != e; ++i) {
448 unsigned Reg = RegsToPass[i].first;
449 // Remap I0->I7 -> O0->O7.
450 if (Reg >= SP::I0 && Reg <= SP::I7)
451 Reg = Reg-SP::I0+SP::O0;
453 Chain = DAG.getCopyToReg(Chain, dl, Reg, RegsToPass[i].second, InFlag);
454 InFlag = Chain.getValue(1);
457 // If the callee is a GlobalAddress node (quite common, every direct call is)
458 // turn it into a TargetGlobalAddress node so that legalize doesn't hack it.
459 // Likewise ExternalSymbol -> TargetExternalSymbol.
460 if (GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(Callee))
461 Callee = DAG.getTargetGlobalAddress(G->getGlobal(), MVT::i32);
462 else if (ExternalSymbolSDNode *E = dyn_cast<ExternalSymbolSDNode>(Callee))
463 Callee = DAG.getTargetExternalSymbol(E->getSymbol(), MVT::i32);
465 std::vector<EVT> NodeTys;
466 NodeTys.push_back(MVT::Other); // Returns a chain
467 NodeTys.push_back(MVT::Flag); // Returns a flag for retval copy to use.
468 SDValue Ops[] = { Chain, Callee, InFlag };
469 Chain = DAG.getNode(SPISD::CALL, dl, NodeTys, Ops, InFlag.getNode() ? 3 : 2);
470 InFlag = Chain.getValue(1);
472 Chain = DAG.getCALLSEQ_END(Chain, DAG.getIntPtrConstant(ArgsSize, true),
473 DAG.getIntPtrConstant(0, true), InFlag);
474 InFlag = Chain.getValue(1);
476 // Assign locations to each value returned by this call.
477 SmallVector<CCValAssign, 16> RVLocs;
478 CCState RVInfo(CallConv, isVarArg, DAG.getTarget(),
479 RVLocs, *DAG.getContext());
481 RVInfo.AnalyzeCallResult(Ins, RetCC_Sparc32);
483 // Copy all of the result registers out of their specified physreg.
484 for (unsigned i = 0; i != RVLocs.size(); ++i) {
485 unsigned Reg = RVLocs[i].getLocReg();
487 // Remap I0->I7 -> O0->O7.
488 if (Reg >= SP::I0 && Reg <= SP::I7)
489 Reg = Reg-SP::I0+SP::O0;
491 Chain = DAG.getCopyFromReg(Chain, dl, Reg,
492 RVLocs[i].getValVT(), InFlag).getValue(1);
493 InFlag = Chain.getValue(2);
494 InVals.push_back(Chain.getValue(0));
497 return Chain;
502 //===----------------------------------------------------------------------===//
503 // TargetLowering Implementation
504 //===----------------------------------------------------------------------===//
506 /// IntCondCCodeToICC - Convert a DAG integer condition code to a SPARC ICC
507 /// condition.
508 static SPCC::CondCodes IntCondCCodeToICC(ISD::CondCode CC) {
509 switch (CC) {
510 default: llvm_unreachable("Unknown integer condition code!");
511 case ISD::SETEQ: return SPCC::ICC_E;
512 case ISD::SETNE: return SPCC::ICC_NE;
513 case ISD::SETLT: return SPCC::ICC_L;
514 case ISD::SETGT: return SPCC::ICC_G;
515 case ISD::SETLE: return SPCC::ICC_LE;
516 case ISD::SETGE: return SPCC::ICC_GE;
517 case ISD::SETULT: return SPCC::ICC_CS;
518 case ISD::SETULE: return SPCC::ICC_LEU;
519 case ISD::SETUGT: return SPCC::ICC_GU;
520 case ISD::SETUGE: return SPCC::ICC_CC;
524 /// FPCondCCodeToFCC - Convert a DAG floatingp oint condition code to a SPARC
525 /// FCC condition.
526 static SPCC::CondCodes FPCondCCodeToFCC(ISD::CondCode CC) {
527 switch (CC) {
528 default: llvm_unreachable("Unknown fp condition code!");
529 case ISD::SETEQ:
530 case ISD::SETOEQ: return SPCC::FCC_E;
531 case ISD::SETNE:
532 case ISD::SETUNE: return SPCC::FCC_NE;
533 case ISD::SETLT:
534 case ISD::SETOLT: return SPCC::FCC_L;
535 case ISD::SETGT:
536 case ISD::SETOGT: return SPCC::FCC_G;
537 case ISD::SETLE:
538 case ISD::SETOLE: return SPCC::FCC_LE;
539 case ISD::SETGE:
540 case ISD::SETOGE: return SPCC::FCC_GE;
541 case ISD::SETULT: return SPCC::FCC_UL;
542 case ISD::SETULE: return SPCC::FCC_ULE;
543 case ISD::SETUGT: return SPCC::FCC_UG;
544 case ISD::SETUGE: return SPCC::FCC_UGE;
545 case ISD::SETUO: return SPCC::FCC_U;
546 case ISD::SETO: return SPCC::FCC_O;
547 case ISD::SETONE: return SPCC::FCC_LG;
548 case ISD::SETUEQ: return SPCC::FCC_UE;
552 SparcTargetLowering::SparcTargetLowering(TargetMachine &TM)
553 : TargetLowering(TM, new TargetLoweringObjectFileELF()) {
555 // Set up the register classes.
556 addRegisterClass(MVT::i32, SP::IntRegsRegisterClass);
557 addRegisterClass(MVT::f32, SP::FPRegsRegisterClass);
558 addRegisterClass(MVT::f64, SP::DFPRegsRegisterClass);
560 // Turn FP extload into load/fextend
561 setLoadExtAction(ISD::EXTLOAD, MVT::f32, Expand);
562 // Sparc doesn't have i1 sign extending load
563 setLoadExtAction(ISD::SEXTLOAD, MVT::i1, Promote);
564 // Turn FP truncstore into trunc + store.
565 setTruncStoreAction(MVT::f64, MVT::f32, Expand);
567 // Custom legalize GlobalAddress nodes into LO/HI parts.
568 setOperationAction(ISD::GlobalAddress, MVT::i32, Custom);
569 setOperationAction(ISD::GlobalTLSAddress, MVT::i32, Custom);
570 setOperationAction(ISD::ConstantPool , MVT::i32, Custom);
572 // Sparc doesn't have sext_inreg, replace them with shl/sra
573 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i16, Expand);
574 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i8 , Expand);
575 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i1 , Expand);
577 // Sparc has no REM or DIVREM operations.
578 setOperationAction(ISD::UREM, MVT::i32, Expand);
579 setOperationAction(ISD::SREM, MVT::i32, Expand);
580 setOperationAction(ISD::SDIVREM, MVT::i32, Expand);
581 setOperationAction(ISD::UDIVREM, MVT::i32, Expand);
583 // Custom expand fp<->sint
584 setOperationAction(ISD::FP_TO_SINT, MVT::i32, Custom);
585 setOperationAction(ISD::SINT_TO_FP, MVT::i32, Custom);
587 // Expand fp<->uint
588 setOperationAction(ISD::FP_TO_UINT, MVT::i32, Expand);
589 setOperationAction(ISD::UINT_TO_FP, MVT::i32, Expand);
591 setOperationAction(ISD::BIT_CONVERT, MVT::f32, Expand);
592 setOperationAction(ISD::BIT_CONVERT, MVT::i32, Expand);
594 // Sparc has no select or setcc: expand to SELECT_CC.
595 setOperationAction(ISD::SELECT, MVT::i32, Expand);
596 setOperationAction(ISD::SELECT, MVT::f32, Expand);
597 setOperationAction(ISD::SELECT, MVT::f64, Expand);
598 setOperationAction(ISD::SETCC, MVT::i32, Expand);
599 setOperationAction(ISD::SETCC, MVT::f32, Expand);
600 setOperationAction(ISD::SETCC, MVT::f64, Expand);
602 // Sparc doesn't have BRCOND either, it has BR_CC.
603 setOperationAction(ISD::BRCOND, MVT::Other, Expand);
604 setOperationAction(ISD::BRIND, MVT::Other, Expand);
605 setOperationAction(ISD::BR_JT, MVT::Other, Expand);
606 setOperationAction(ISD::BR_CC, MVT::i32, Custom);
607 setOperationAction(ISD::BR_CC, MVT::f32, Custom);
608 setOperationAction(ISD::BR_CC, MVT::f64, Custom);
610 setOperationAction(ISD::SELECT_CC, MVT::i32, Custom);
611 setOperationAction(ISD::SELECT_CC, MVT::f32, Custom);
612 setOperationAction(ISD::SELECT_CC, MVT::f64, Custom);
614 // SPARC has no intrinsics for these particular operations.
615 setOperationAction(ISD::MEMBARRIER, MVT::Other, Expand);
617 setOperationAction(ISD::FSIN , MVT::f64, Expand);
618 setOperationAction(ISD::FCOS , MVT::f64, Expand);
619 setOperationAction(ISD::FREM , MVT::f64, Expand);
620 setOperationAction(ISD::FSIN , MVT::f32, Expand);
621 setOperationAction(ISD::FCOS , MVT::f32, Expand);
622 setOperationAction(ISD::FREM , MVT::f32, Expand);
623 setOperationAction(ISD::CTPOP, MVT::i32, Expand);
624 setOperationAction(ISD::CTTZ , MVT::i32, Expand);
625 setOperationAction(ISD::CTLZ , MVT::i32, Expand);
626 setOperationAction(ISD::ROTL , MVT::i32, Expand);
627 setOperationAction(ISD::ROTR , MVT::i32, Expand);
628 setOperationAction(ISD::BSWAP, MVT::i32, Expand);
629 setOperationAction(ISD::FCOPYSIGN, MVT::f64, Expand);
630 setOperationAction(ISD::FCOPYSIGN, MVT::f32, Expand);
631 setOperationAction(ISD::FPOW , MVT::f64, Expand);
632 setOperationAction(ISD::FPOW , MVT::f32, Expand);
634 setOperationAction(ISD::SHL_PARTS, MVT::i32, Expand);
635 setOperationAction(ISD::SRA_PARTS, MVT::i32, Expand);
636 setOperationAction(ISD::SRL_PARTS, MVT::i32, Expand);
638 // FIXME: Sparc provides these multiplies, but we don't have them yet.
639 setOperationAction(ISD::UMUL_LOHI, MVT::i32, Expand);
640 setOperationAction(ISD::SMUL_LOHI, MVT::i32, Expand);
642 // We don't have line number support yet.
643 setOperationAction(ISD::DBG_STOPPOINT, MVT::Other, Expand);
644 setOperationAction(ISD::DEBUG_LOC, MVT::Other, Expand);
645 setOperationAction(ISD::DBG_LABEL, MVT::Other, Expand);
646 setOperationAction(ISD::EH_LABEL, MVT::Other, Expand);
648 // VASTART needs to be custom lowered to use the VarArgsFrameIndex.
649 setOperationAction(ISD::VASTART , MVT::Other, Custom);
650 // VAARG needs to be lowered to not do unaligned accesses for doubles.
651 setOperationAction(ISD::VAARG , MVT::Other, Custom);
653 // Use the default implementation.
654 setOperationAction(ISD::VACOPY , MVT::Other, Expand);
655 setOperationAction(ISD::VAEND , MVT::Other, Expand);
656 setOperationAction(ISD::STACKSAVE , MVT::Other, Expand);
657 setOperationAction(ISD::STACKRESTORE , MVT::Other, Expand);
658 setOperationAction(ISD::DYNAMIC_STACKALLOC, MVT::i32 , Custom);
660 // No debug info support yet.
661 setOperationAction(ISD::DBG_STOPPOINT, MVT::Other, Expand);
662 setOperationAction(ISD::DBG_LABEL, MVT::Other, Expand);
663 setOperationAction(ISD::EH_LABEL, MVT::Other, Expand);
665 setStackPointerRegisterToSaveRestore(SP::O6);
667 if (TM.getSubtarget<SparcSubtarget>().isV9())
668 setOperationAction(ISD::CTPOP, MVT::i32, Legal);
670 computeRegisterProperties();
673 const char *SparcTargetLowering::getTargetNodeName(unsigned Opcode) const {
674 switch (Opcode) {
675 default: return 0;
676 case SPISD::CMPICC: return "SPISD::CMPICC";
677 case SPISD::CMPFCC: return "SPISD::CMPFCC";
678 case SPISD::BRICC: return "SPISD::BRICC";
679 case SPISD::BRFCC: return "SPISD::BRFCC";
680 case SPISD::SELECT_ICC: return "SPISD::SELECT_ICC";
681 case SPISD::SELECT_FCC: return "SPISD::SELECT_FCC";
682 case SPISD::Hi: return "SPISD::Hi";
683 case SPISD::Lo: return "SPISD::Lo";
684 case SPISD::FTOI: return "SPISD::FTOI";
685 case SPISD::ITOF: return "SPISD::ITOF";
686 case SPISD::CALL: return "SPISD::CALL";
687 case SPISD::RET_FLAG: return "SPISD::RET_FLAG";
691 /// isMaskedValueZeroForTargetNode - Return true if 'Op & Mask' is known to
692 /// be zero. Op is expected to be a target specific node. Used by DAG
693 /// combiner.
694 void SparcTargetLowering::computeMaskedBitsForTargetNode(const SDValue Op,
695 const APInt &Mask,
696 APInt &KnownZero,
697 APInt &KnownOne,
698 const SelectionDAG &DAG,
699 unsigned Depth) const {
700 APInt KnownZero2, KnownOne2;
701 KnownZero = KnownOne = APInt(Mask.getBitWidth(), 0); // Don't know anything.
703 switch (Op.getOpcode()) {
704 default: break;
705 case SPISD::SELECT_ICC:
706 case SPISD::SELECT_FCC:
707 DAG.ComputeMaskedBits(Op.getOperand(1), Mask, KnownZero, KnownOne,
708 Depth+1);
709 DAG.ComputeMaskedBits(Op.getOperand(0), Mask, KnownZero2, KnownOne2,
710 Depth+1);
711 assert((KnownZero & KnownOne) == 0 && "Bits known to be one AND zero?");
712 assert((KnownZero2 & KnownOne2) == 0 && "Bits known to be one AND zero?");
714 // Only known if known in both the LHS and RHS.
715 KnownOne &= KnownOne2;
716 KnownZero &= KnownZero2;
717 break;
721 // Look at LHS/RHS/CC and see if they are a lowered setcc instruction. If so
722 // set LHS/RHS and SPCC to the LHS/RHS of the setcc and SPCC to the condition.
723 static void LookThroughSetCC(SDValue &LHS, SDValue &RHS,
724 ISD::CondCode CC, unsigned &SPCC) {
725 if (isa<ConstantSDNode>(RHS) &&
726 cast<ConstantSDNode>(RHS)->getZExtValue() == 0 &&
727 CC == ISD::SETNE &&
728 ((LHS.getOpcode() == SPISD::SELECT_ICC &&
729 LHS.getOperand(3).getOpcode() == SPISD::CMPICC) ||
730 (LHS.getOpcode() == SPISD::SELECT_FCC &&
731 LHS.getOperand(3).getOpcode() == SPISD::CMPFCC)) &&
732 isa<ConstantSDNode>(LHS.getOperand(0)) &&
733 isa<ConstantSDNode>(LHS.getOperand(1)) &&
734 cast<ConstantSDNode>(LHS.getOperand(0))->getZExtValue() == 1 &&
735 cast<ConstantSDNode>(LHS.getOperand(1))->getZExtValue() == 0) {
736 SDValue CMPCC = LHS.getOperand(3);
737 SPCC = cast<ConstantSDNode>(LHS.getOperand(2))->getZExtValue();
738 LHS = CMPCC.getOperand(0);
739 RHS = CMPCC.getOperand(1);
743 SDValue SparcTargetLowering::LowerGlobalAddress(SDValue Op,
744 SelectionDAG &DAG) {
745 GlobalValue *GV = cast<GlobalAddressSDNode>(Op)->getGlobal();
746 // FIXME there isn't really any debug info here
747 DebugLoc dl = Op.getDebugLoc();
748 SDValue GA = DAG.getTargetGlobalAddress(GV, MVT::i32);
749 SDValue Hi = DAG.getNode(SPISD::Hi, dl, MVT::i32, GA);
750 SDValue Lo = DAG.getNode(SPISD::Lo, dl, MVT::i32, GA);
752 if (getTargetMachine().getRelocationModel() != Reloc::PIC_)
753 return DAG.getNode(ISD::ADD, dl, MVT::i32, Lo, Hi);
755 SDValue GlobalBase = DAG.getNode(SPISD::GLOBAL_BASE_REG, dl,
756 getPointerTy());
757 SDValue RelAddr = DAG.getNode(ISD::ADD, dl, MVT::i32, Lo, Hi);
758 SDValue AbsAddr = DAG.getNode(ISD::ADD, dl, MVT::i32,
759 GlobalBase, RelAddr);
760 return DAG.getLoad(getPointerTy(), dl, DAG.getEntryNode(),
761 AbsAddr, NULL, 0);
764 SDValue SparcTargetLowering::LowerConstantPool(SDValue Op,
765 SelectionDAG &DAG) {
766 ConstantPoolSDNode *N = cast<ConstantPoolSDNode>(Op);
767 // FIXME there isn't really any debug info here
768 DebugLoc dl = Op.getDebugLoc();
769 Constant *C = N->getConstVal();
770 SDValue CP = DAG.getTargetConstantPool(C, MVT::i32, N->getAlignment());
771 SDValue Hi = DAG.getNode(SPISD::Hi, dl, MVT::i32, CP);
772 SDValue Lo = DAG.getNode(SPISD::Lo, dl, MVT::i32, CP);
773 if (getTargetMachine().getRelocationModel() != Reloc::PIC_)
774 return DAG.getNode(ISD::ADD, dl, MVT::i32, Lo, Hi);
776 SDValue GlobalBase = DAG.getNode(SPISD::GLOBAL_BASE_REG, dl,
777 getPointerTy());
778 SDValue RelAddr = DAG.getNode(ISD::ADD, dl, MVT::i32, Lo, Hi);
779 SDValue AbsAddr = DAG.getNode(ISD::ADD, dl, MVT::i32,
780 GlobalBase, RelAddr);
781 return DAG.getLoad(getPointerTy(), dl, DAG.getEntryNode(),
782 AbsAddr, NULL, 0);
785 static SDValue LowerFP_TO_SINT(SDValue Op, SelectionDAG &DAG) {
786 DebugLoc dl = Op.getDebugLoc();
787 // Convert the fp value to integer in an FP register.
788 assert(Op.getValueType() == MVT::i32);
789 Op = DAG.getNode(SPISD::FTOI, dl, MVT::f32, Op.getOperand(0));
790 return DAG.getNode(ISD::BIT_CONVERT, dl, MVT::i32, Op);
793 static SDValue LowerSINT_TO_FP(SDValue Op, SelectionDAG &DAG) {
794 DebugLoc dl = Op.getDebugLoc();
795 assert(Op.getOperand(0).getValueType() == MVT::i32);
796 SDValue Tmp = DAG.getNode(ISD::BIT_CONVERT, dl, MVT::f32, Op.getOperand(0));
797 // Convert the int value to FP in an FP register.
798 return DAG.getNode(SPISD::ITOF, dl, Op.getValueType(), Tmp);
801 static SDValue LowerBR_CC(SDValue Op, SelectionDAG &DAG) {
802 SDValue Chain = Op.getOperand(0);
803 ISD::CondCode CC = cast<CondCodeSDNode>(Op.getOperand(1))->get();
804 SDValue LHS = Op.getOperand(2);
805 SDValue RHS = Op.getOperand(3);
806 SDValue Dest = Op.getOperand(4);
807 DebugLoc dl = Op.getDebugLoc();
808 unsigned Opc, SPCC = ~0U;
810 // If this is a br_cc of a "setcc", and if the setcc got lowered into
811 // an CMP[IF]CC/SELECT_[IF]CC pair, find the original compared values.
812 LookThroughSetCC(LHS, RHS, CC, SPCC);
814 // Get the condition flag.
815 SDValue CompareFlag;
816 if (LHS.getValueType() == MVT::i32) {
817 std::vector<EVT> VTs;
818 VTs.push_back(MVT::i32);
819 VTs.push_back(MVT::Flag);
820 SDValue Ops[2] = { LHS, RHS };
821 CompareFlag = DAG.getNode(SPISD::CMPICC, dl, VTs, Ops, 2).getValue(1);
822 if (SPCC == ~0U) SPCC = IntCondCCodeToICC(CC);
823 Opc = SPISD::BRICC;
824 } else {
825 CompareFlag = DAG.getNode(SPISD::CMPFCC, dl, MVT::Flag, LHS, RHS);
826 if (SPCC == ~0U) SPCC = FPCondCCodeToFCC(CC);
827 Opc = SPISD::BRFCC;
829 return DAG.getNode(Opc, dl, MVT::Other, Chain, Dest,
830 DAG.getConstant(SPCC, MVT::i32), CompareFlag);
833 static SDValue LowerSELECT_CC(SDValue Op, SelectionDAG &DAG) {
834 SDValue LHS = Op.getOperand(0);
835 SDValue RHS = Op.getOperand(1);
836 ISD::CondCode CC = cast<CondCodeSDNode>(Op.getOperand(4))->get();
837 SDValue TrueVal = Op.getOperand(2);
838 SDValue FalseVal = Op.getOperand(3);
839 DebugLoc dl = Op.getDebugLoc();
840 unsigned Opc, SPCC = ~0U;
842 // If this is a select_cc of a "setcc", and if the setcc got lowered into
843 // an CMP[IF]CC/SELECT_[IF]CC pair, find the original compared values.
844 LookThroughSetCC(LHS, RHS, CC, SPCC);
846 SDValue CompareFlag;
847 if (LHS.getValueType() == MVT::i32) {
848 std::vector<EVT> VTs;
849 VTs.push_back(LHS.getValueType()); // subcc returns a value
850 VTs.push_back(MVT::Flag);
851 SDValue Ops[2] = { LHS, RHS };
852 CompareFlag = DAG.getNode(SPISD::CMPICC, dl, VTs, Ops, 2).getValue(1);
853 Opc = SPISD::SELECT_ICC;
854 if (SPCC == ~0U) SPCC = IntCondCCodeToICC(CC);
855 } else {
856 CompareFlag = DAG.getNode(SPISD::CMPFCC, dl, MVT::Flag, LHS, RHS);
857 Opc = SPISD::SELECT_FCC;
858 if (SPCC == ~0U) SPCC = FPCondCCodeToFCC(CC);
860 return DAG.getNode(Opc, dl, TrueVal.getValueType(), TrueVal, FalseVal,
861 DAG.getConstant(SPCC, MVT::i32), CompareFlag);
864 static SDValue LowerVASTART(SDValue Op, SelectionDAG &DAG,
865 SparcTargetLowering &TLI) {
866 // vastart just stores the address of the VarArgsFrameIndex slot into the
867 // memory location argument.
868 DebugLoc dl = Op.getDebugLoc();
869 SDValue Offset = DAG.getNode(ISD::ADD, dl, MVT::i32,
870 DAG.getRegister(SP::I6, MVT::i32),
871 DAG.getConstant(TLI.getVarArgsFrameOffset(),
872 MVT::i32));
873 const Value *SV = cast<SrcValueSDNode>(Op.getOperand(2))->getValue();
874 return DAG.getStore(Op.getOperand(0), dl, Offset, Op.getOperand(1), SV, 0);
877 static SDValue LowerVAARG(SDValue Op, SelectionDAG &DAG) {
878 SDNode *Node = Op.getNode();
879 EVT VT = Node->getValueType(0);
880 SDValue InChain = Node->getOperand(0);
881 SDValue VAListPtr = Node->getOperand(1);
882 const Value *SV = cast<SrcValueSDNode>(Node->getOperand(2))->getValue();
883 DebugLoc dl = Node->getDebugLoc();
884 SDValue VAList = DAG.getLoad(MVT::i32, dl, InChain, VAListPtr, SV, 0);
885 // Increment the pointer, VAList, to the next vaarg
886 SDValue NextPtr = DAG.getNode(ISD::ADD, dl, MVT::i32, VAList,
887 DAG.getConstant(VT.getSizeInBits()/8,
888 MVT::i32));
889 // Store the incremented VAList to the legalized pointer
890 InChain = DAG.getStore(VAList.getValue(1), dl, NextPtr,
891 VAListPtr, SV, 0);
892 // Load the actual argument out of the pointer VAList, unless this is an
893 // f64 load.
894 if (VT != MVT::f64)
895 return DAG.getLoad(VT, dl, InChain, VAList, NULL, 0);
897 // Otherwise, load it as i64, then do a bitconvert.
898 SDValue V = DAG.getLoad(MVT::i64, dl, InChain, VAList, NULL, 0);
900 // Bit-Convert the value to f64.
901 SDValue Ops[2] = {
902 DAG.getNode(ISD::BIT_CONVERT, dl, MVT::f64, V),
903 V.getValue(1)
905 return DAG.getMergeValues(Ops, 2, dl);
908 static SDValue LowerDYNAMIC_STACKALLOC(SDValue Op, SelectionDAG &DAG) {
909 SDValue Chain = Op.getOperand(0); // Legalize the chain.
910 SDValue Size = Op.getOperand(1); // Legalize the size.
911 DebugLoc dl = Op.getDebugLoc();
913 unsigned SPReg = SP::O6;
914 SDValue SP = DAG.getCopyFromReg(Chain, dl, SPReg, MVT::i32);
915 SDValue NewSP = DAG.getNode(ISD::SUB, dl, MVT::i32, SP, Size); // Value
916 Chain = DAG.getCopyToReg(SP.getValue(1), dl, SPReg, NewSP); // Output chain
918 // The resultant pointer is actually 16 words from the bottom of the stack,
919 // to provide a register spill area.
920 SDValue NewVal = DAG.getNode(ISD::ADD, dl, MVT::i32, NewSP,
921 DAG.getConstant(96, MVT::i32));
922 SDValue Ops[2] = { NewVal, Chain };
923 return DAG.getMergeValues(Ops, 2, dl);
927 SDValue SparcTargetLowering::
928 LowerOperation(SDValue Op, SelectionDAG &DAG) {
929 switch (Op.getOpcode()) {
930 default: llvm_unreachable("Should not custom lower this!");
931 // Frame & Return address. Currently unimplemented
932 case ISD::RETURNADDR: return SDValue();
933 case ISD::FRAMEADDR: return SDValue();
934 case ISD::GlobalTLSAddress:
935 llvm_unreachable("TLS not implemented for Sparc.");
936 case ISD::GlobalAddress: return LowerGlobalAddress(Op, DAG);
937 case ISD::ConstantPool: return LowerConstantPool(Op, DAG);
938 case ISD::FP_TO_SINT: return LowerFP_TO_SINT(Op, DAG);
939 case ISD::SINT_TO_FP: return LowerSINT_TO_FP(Op, DAG);
940 case ISD::BR_CC: return LowerBR_CC(Op, DAG);
941 case ISD::SELECT_CC: return LowerSELECT_CC(Op, DAG);
942 case ISD::VASTART: return LowerVASTART(Op, DAG, *this);
943 case ISD::VAARG: return LowerVAARG(Op, DAG);
944 case ISD::DYNAMIC_STACKALLOC: return LowerDYNAMIC_STACKALLOC(Op, DAG);
948 MachineBasicBlock *
949 SparcTargetLowering::EmitInstrWithCustomInserter(MachineInstr *MI,
950 MachineBasicBlock *BB) const {
951 const TargetInstrInfo &TII = *getTargetMachine().getInstrInfo();
952 unsigned BROpcode;
953 unsigned CC;
954 DebugLoc dl = MI->getDebugLoc();
955 // Figure out the conditional branch opcode to use for this select_cc.
956 switch (MI->getOpcode()) {
957 default: llvm_unreachable("Unknown SELECT_CC!");
958 case SP::SELECT_CC_Int_ICC:
959 case SP::SELECT_CC_FP_ICC:
960 case SP::SELECT_CC_DFP_ICC:
961 BROpcode = SP::BCOND;
962 break;
963 case SP::SELECT_CC_Int_FCC:
964 case SP::SELECT_CC_FP_FCC:
965 case SP::SELECT_CC_DFP_FCC:
966 BROpcode = SP::FBCOND;
967 break;
970 CC = (SPCC::CondCodes)MI->getOperand(3).getImm();
972 // To "insert" a SELECT_CC instruction, we actually have to insert the diamond
973 // control-flow pattern. The incoming instruction knows the destination vreg
974 // to set, the condition code register to branch on, the true/false values to
975 // select between, and a branch opcode to use.
976 const BasicBlock *LLVM_BB = BB->getBasicBlock();
977 MachineFunction::iterator It = BB;
978 ++It;
980 // thisMBB:
981 // ...
982 // TrueVal = ...
983 // [f]bCC copy1MBB
984 // fallthrough --> copy0MBB
985 MachineBasicBlock *thisMBB = BB;
986 MachineFunction *F = BB->getParent();
987 MachineBasicBlock *copy0MBB = F->CreateMachineBasicBlock(LLVM_BB);
988 MachineBasicBlock *sinkMBB = F->CreateMachineBasicBlock(LLVM_BB);
989 BuildMI(BB, dl, TII.get(BROpcode)).addMBB(sinkMBB).addImm(CC);
990 F->insert(It, copy0MBB);
991 F->insert(It, sinkMBB);
992 // Update machine-CFG edges by transferring all successors of the current
993 // block to the new block which will contain the Phi node for the select.
994 sinkMBB->transferSuccessors(BB);
995 // Next, add the true and fallthrough blocks as its successors.
996 BB->addSuccessor(copy0MBB);
997 BB->addSuccessor(sinkMBB);
999 // copy0MBB:
1000 // %FalseValue = ...
1001 // # fallthrough to sinkMBB
1002 BB = copy0MBB;
1004 // Update machine-CFG edges
1005 BB->addSuccessor(sinkMBB);
1007 // sinkMBB:
1008 // %Result = phi [ %FalseValue, copy0MBB ], [ %TrueValue, thisMBB ]
1009 // ...
1010 BB = sinkMBB;
1011 BuildMI(BB, dl, TII.get(SP::PHI), MI->getOperand(0).getReg())
1012 .addReg(MI->getOperand(2).getReg()).addMBB(copy0MBB)
1013 .addReg(MI->getOperand(1).getReg()).addMBB(thisMBB);
1015 F->DeleteMachineInstr(MI); // The pseudo instruction is gone now.
1016 return BB;
1019 //===----------------------------------------------------------------------===//
1020 // Sparc Inline Assembly Support
1021 //===----------------------------------------------------------------------===//
1023 /// getConstraintType - Given a constraint letter, return the type of
1024 /// constraint it is for this target.
1025 SparcTargetLowering::ConstraintType
1026 SparcTargetLowering::getConstraintType(const std::string &Constraint) const {
1027 if (Constraint.size() == 1) {
1028 switch (Constraint[0]) {
1029 default: break;
1030 case 'r': return C_RegisterClass;
1034 return TargetLowering::getConstraintType(Constraint);
1037 std::pair<unsigned, const TargetRegisterClass*>
1038 SparcTargetLowering::getRegForInlineAsmConstraint(const std::string &Constraint,
1039 EVT VT) const {
1040 if (Constraint.size() == 1) {
1041 switch (Constraint[0]) {
1042 case 'r':
1043 return std::make_pair(0U, SP::IntRegsRegisterClass);
1047 return TargetLowering::getRegForInlineAsmConstraint(Constraint, VT);
1050 std::vector<unsigned> SparcTargetLowering::
1051 getRegClassForInlineAsmConstraint(const std::string &Constraint,
1052 EVT VT) const {
1053 if (Constraint.size() != 1)
1054 return std::vector<unsigned>();
1056 switch (Constraint[0]) {
1057 default: break;
1058 case 'r':
1059 return make_vector<unsigned>(SP::L0, SP::L1, SP::L2, SP::L3,
1060 SP::L4, SP::L5, SP::L6, SP::L7,
1061 SP::I0, SP::I1, SP::I2, SP::I3,
1062 SP::I4, SP::I5,
1063 SP::O0, SP::O1, SP::O2, SP::O3,
1064 SP::O4, SP::O5, SP::O7, 0);
1067 return std::vector<unsigned>();
1070 bool
1071 SparcTargetLowering::isOffsetFoldingLegal(const GlobalAddressSDNode *GA) const {
1072 // The Sparc target isn't yet aware of offsets.
1073 return false;
1076 /// getFunctionAlignment - Return the Log2 alignment of this function.
1077 unsigned SparcTargetLowering::getFunctionAlignment(const Function *) const {
1078 return 2;