Added llvmgcc version to allow tests to be xfailed by frontend version.
[llvm-complete.git] / lib / Target / PowerPC / PPCISelLowering.cpp
blob80f9cc053bd0cf340f0776f1c67a0768818b1662
1 //===-- PPCISelLowering.cpp - PPC DAG Lowering Implementation -------------===//
2 //
3 // The LLVM Compiler Infrastructure
4 //
5 // This file was developed by Chris Lattner and is distributed under
6 // the University of Illinois Open Source License. See LICENSE.TXT for details.
7 //
8 //===----------------------------------------------------------------------===//
9 //
10 // This file implements the PPCISelLowering class.
12 //===----------------------------------------------------------------------===//
14 #include "PPCISelLowering.h"
15 #include "PPCTargetMachine.h"
16 #include "llvm/ADT/VectorExtras.h"
17 #include "llvm/Analysis/ScalarEvolutionExpressions.h"
18 #include "llvm/CodeGen/MachineFrameInfo.h"
19 #include "llvm/CodeGen/MachineFunction.h"
20 #include "llvm/CodeGen/MachineInstrBuilder.h"
21 #include "llvm/CodeGen/SelectionDAG.h"
22 #include "llvm/CodeGen/SSARegMap.h"
23 #include "llvm/Constants.h"
24 #include "llvm/Function.h"
25 #include "llvm/Intrinsics.h"
26 #include "llvm/Support/MathExtras.h"
27 #include "llvm/Target/TargetOptions.h"
28 using namespace llvm;
30 PPCTargetLowering::PPCTargetLowering(TargetMachine &TM)
31 : TargetLowering(TM) {
33 // Fold away setcc operations if possible.
34 setSetCCIsExpensive();
35 setPow2DivIsCheap();
37 // Use _setjmp/_longjmp instead of setjmp/longjmp.
38 setUseUnderscoreSetJmpLongJmp(true);
40 // Set up the register classes.
41 addRegisterClass(MVT::i32, PPC::GPRCRegisterClass);
42 addRegisterClass(MVT::f32, PPC::F4RCRegisterClass);
43 addRegisterClass(MVT::f64, PPC::F8RCRegisterClass);
45 setOperationAction(ISD::ConstantFP, MVT::f64, Expand);
46 setOperationAction(ISD::ConstantFP, MVT::f32, Expand);
48 // PowerPC has no intrinsics for these particular operations
49 setOperationAction(ISD::MEMMOVE, MVT::Other, Expand);
50 setOperationAction(ISD::MEMSET, MVT::Other, Expand);
51 setOperationAction(ISD::MEMCPY, MVT::Other, Expand);
53 // PowerPC has an i16 but no i8 (or i1) SEXTLOAD
54 setOperationAction(ISD::SEXTLOAD, MVT::i1, Expand);
55 setOperationAction(ISD::SEXTLOAD, MVT::i8, Expand);
57 // PowerPC has no SREM/UREM instructions
58 setOperationAction(ISD::SREM, MVT::i32, Expand);
59 setOperationAction(ISD::UREM, MVT::i32, Expand);
61 // We don't support sin/cos/sqrt/fmod
62 setOperationAction(ISD::FSIN , MVT::f64, Expand);
63 setOperationAction(ISD::FCOS , MVT::f64, Expand);
64 setOperationAction(ISD::FREM , MVT::f64, Expand);
65 setOperationAction(ISD::FSIN , MVT::f32, Expand);
66 setOperationAction(ISD::FCOS , MVT::f32, Expand);
67 setOperationAction(ISD::FREM , MVT::f32, Expand);
69 // If we're enabling GP optimizations, use hardware square root
70 if (!TM.getSubtarget<PPCSubtarget>().hasFSQRT()) {
71 setOperationAction(ISD::FSQRT, MVT::f64, Expand);
72 setOperationAction(ISD::FSQRT, MVT::f32, Expand);
75 setOperationAction(ISD::FCOPYSIGN, MVT::f64, Expand);
76 setOperationAction(ISD::FCOPYSIGN, MVT::f32, Expand);
78 // PowerPC does not have BSWAP, CTPOP or CTTZ
79 setOperationAction(ISD::BSWAP, MVT::i32 , Expand);
80 setOperationAction(ISD::CTPOP, MVT::i32 , Expand);
81 setOperationAction(ISD::CTTZ , MVT::i32 , Expand);
83 // PowerPC does not have ROTR
84 setOperationAction(ISD::ROTR, MVT::i32 , Expand);
86 // PowerPC does not have Select
87 setOperationAction(ISD::SELECT, MVT::i32, Expand);
88 setOperationAction(ISD::SELECT, MVT::f32, Expand);
89 setOperationAction(ISD::SELECT, MVT::f64, Expand);
90 setOperationAction(ISD::SELECT, MVT::v4f32, Expand);
91 setOperationAction(ISD::SELECT, MVT::v4i32, Expand);
92 setOperationAction(ISD::SELECT, MVT::v8i16, Expand);
93 setOperationAction(ISD::SELECT, MVT::v16i8, Expand);
95 // PowerPC wants to turn select_cc of FP into fsel when possible.
96 setOperationAction(ISD::SELECT_CC, MVT::f32, Custom);
97 setOperationAction(ISD::SELECT_CC, MVT::f64, Custom);
99 // PowerPC wants to optimize integer setcc a bit
100 setOperationAction(ISD::SETCC, MVT::i32, Custom);
102 // PowerPC does not have BRCOND which requires SetCC
103 setOperationAction(ISD::BRCOND, MVT::Other, Expand);
105 // PowerPC turns FP_TO_SINT into FCTIWZ and some load/stores.
106 setOperationAction(ISD::FP_TO_SINT, MVT::i32, Custom);
108 // PowerPC does not have [U|S]INT_TO_FP
109 setOperationAction(ISD::SINT_TO_FP, MVT::i32, Expand);
110 setOperationAction(ISD::UINT_TO_FP, MVT::i32, Expand);
112 setOperationAction(ISD::BIT_CONVERT, MVT::f32, Expand);
113 setOperationAction(ISD::BIT_CONVERT, MVT::i32, Expand);
115 // PowerPC does not have truncstore for i1.
116 setOperationAction(ISD::TRUNCSTORE, MVT::i1, Promote);
118 // Support label based line numbers.
119 setOperationAction(ISD::LOCATION, MVT::Other, Expand);
120 setOperationAction(ISD::DEBUG_LOC, MVT::Other, Expand);
121 // FIXME - use subtarget debug flags
122 if (!TM.getSubtarget<PPCSubtarget>().isDarwin())
123 setOperationAction(ISD::DEBUG_LABEL, MVT::Other, Expand);
125 // We want to legalize GlobalAddress and ConstantPool nodes into the
126 // appropriate instructions to materialize the address.
127 setOperationAction(ISD::GlobalAddress, MVT::i32, Custom);
128 setOperationAction(ISD::ConstantPool, MVT::i32, Custom);
130 // RET must be custom lowered, to meet ABI requirements
131 setOperationAction(ISD::RET , MVT::Other, Custom);
133 // VASTART needs to be custom lowered to use the VarArgsFrameIndex
134 setOperationAction(ISD::VASTART , MVT::Other, Custom);
136 // Use the default implementation.
137 setOperationAction(ISD::VAARG , MVT::Other, Expand);
138 setOperationAction(ISD::VACOPY , MVT::Other, Expand);
139 setOperationAction(ISD::VAEND , MVT::Other, Expand);
140 setOperationAction(ISD::STACKSAVE , MVT::Other, Expand);
141 setOperationAction(ISD::STACKRESTORE , MVT::Other, Expand);
142 setOperationAction(ISD::DYNAMIC_STACKALLOC, MVT::i32 , Expand);
144 // We want to custom lower some of our intrinsics.
145 setOperationAction(ISD::INTRINSIC_WO_CHAIN, MVT::Other, Custom);
147 if (TM.getSubtarget<PPCSubtarget>().is64Bit()) {
148 // They also have instructions for converting between i64 and fp.
149 setOperationAction(ISD::FP_TO_SINT, MVT::i64, Custom);
150 setOperationAction(ISD::SINT_TO_FP, MVT::i64, Custom);
152 // FIXME: disable this lowered code. This generates 64-bit register values,
153 // and we don't model the fact that the top part is clobbered by calls. We
154 // need to flag these together so that the value isn't live across a call.
155 //setOperationAction(ISD::SINT_TO_FP, MVT::i32, Custom);
157 // To take advantage of the above i64 FP_TO_SINT, promote i32 FP_TO_UINT
158 setOperationAction(ISD::FP_TO_UINT, MVT::i32, Promote);
159 } else {
160 // PowerPC does not have FP_TO_UINT on 32-bit implementations.
161 setOperationAction(ISD::FP_TO_UINT, MVT::i32, Expand);
164 if (TM.getSubtarget<PPCSubtarget>().has64BitRegs()) {
165 // 64 bit PowerPC implementations can support i64 types directly
166 addRegisterClass(MVT::i64, PPC::G8RCRegisterClass);
167 // BUILD_PAIR can't be handled natively, and should be expanded to shl/or
168 setOperationAction(ISD::BUILD_PAIR, MVT::i64, Expand);
169 } else {
170 // 32 bit PowerPC wants to expand i64 shifts itself.
171 setOperationAction(ISD::SHL, MVT::i64, Custom);
172 setOperationAction(ISD::SRL, MVT::i64, Custom);
173 setOperationAction(ISD::SRA, MVT::i64, Custom);
176 if (TM.getSubtarget<PPCSubtarget>().hasAltivec()) {
177 // First set operation action for all vector types to expand. Then we
178 // will selectively turn on ones that can be effectively codegen'd.
179 for (unsigned VT = (unsigned)MVT::FIRST_VECTOR_VALUETYPE;
180 VT != (unsigned)MVT::LAST_VECTOR_VALUETYPE; ++VT) {
181 // add/sub/and/or/xor are legal for all supported vector VT's.
182 setOperationAction(ISD::ADD , (MVT::ValueType)VT, Legal);
183 setOperationAction(ISD::SUB , (MVT::ValueType)VT, Legal);
184 setOperationAction(ISD::AND , (MVT::ValueType)VT, Legal);
185 setOperationAction(ISD::OR , (MVT::ValueType)VT, Legal);
186 setOperationAction(ISD::XOR , (MVT::ValueType)VT, Legal);
188 // We promote all shuffles to v16i8.
189 setOperationAction(ISD::VECTOR_SHUFFLE, (MVT::ValueType)VT, Promote);
190 AddPromotedToType(ISD::VECTOR_SHUFFLE, (MVT::ValueType)VT, MVT::v16i8);
192 setOperationAction(ISD::MUL , (MVT::ValueType)VT, Expand);
193 setOperationAction(ISD::SDIV, (MVT::ValueType)VT, Expand);
194 setOperationAction(ISD::SREM, (MVT::ValueType)VT, Expand);
195 setOperationAction(ISD::UDIV, (MVT::ValueType)VT, Expand);
196 setOperationAction(ISD::UREM, (MVT::ValueType)VT, Expand);
197 setOperationAction(ISD::EXTRACT_VECTOR_ELT, (MVT::ValueType)VT, Expand);
198 setOperationAction(ISD::INSERT_VECTOR_ELT, (MVT::ValueType)VT, Expand);
199 setOperationAction(ISD::BUILD_VECTOR, (MVT::ValueType)VT, Expand);
201 setOperationAction(ISD::SCALAR_TO_VECTOR, (MVT::ValueType)VT, Expand);
204 // We can custom expand all VECTOR_SHUFFLEs to VPERM, others we can handle
205 // with merges, splats, etc.
206 setOperationAction(ISD::VECTOR_SHUFFLE, MVT::v16i8, Custom);
208 addRegisterClass(MVT::v4f32, PPC::VRRCRegisterClass);
209 addRegisterClass(MVT::v4i32, PPC::VRRCRegisterClass);
210 addRegisterClass(MVT::v8i16, PPC::VRRCRegisterClass);
211 addRegisterClass(MVT::v16i8, PPC::VRRCRegisterClass);
213 setOperationAction(ISD::MUL, MVT::v4f32, Legal);
215 setOperationAction(ISD::SCALAR_TO_VECTOR, MVT::v4f32, Custom);
216 setOperationAction(ISD::SCALAR_TO_VECTOR, MVT::v4i32, Custom);
218 setOperationAction(ISD::BUILD_VECTOR, MVT::v16i8, Custom);
219 setOperationAction(ISD::BUILD_VECTOR, MVT::v8i16, Custom);
220 setOperationAction(ISD::BUILD_VECTOR, MVT::v4i32, Custom);
221 setOperationAction(ISD::BUILD_VECTOR, MVT::v4f32, Custom);
224 setSetCCResultContents(ZeroOrOneSetCCResult);
225 setStackPointerRegisterToSaveRestore(PPC::R1);
227 // We have target-specific dag combine patterns for the following nodes:
228 setTargetDAGCombine(ISD::SINT_TO_FP);
229 setTargetDAGCombine(ISD::STORE);
231 computeRegisterProperties();
234 const char *PPCTargetLowering::getTargetNodeName(unsigned Opcode) const {
235 switch (Opcode) {
236 default: return 0;
237 case PPCISD::FSEL: return "PPCISD::FSEL";
238 case PPCISD::FCFID: return "PPCISD::FCFID";
239 case PPCISD::FCTIDZ: return "PPCISD::FCTIDZ";
240 case PPCISD::FCTIWZ: return "PPCISD::FCTIWZ";
241 case PPCISD::STFIWX: return "PPCISD::STFIWX";
242 case PPCISD::VMADDFP: return "PPCISD::VMADDFP";
243 case PPCISD::VNMSUBFP: return "PPCISD::VNMSUBFP";
244 case PPCISD::VPERM: return "PPCISD::VPERM";
245 case PPCISD::Hi: return "PPCISD::Hi";
246 case PPCISD::Lo: return "PPCISD::Lo";
247 case PPCISD::GlobalBaseReg: return "PPCISD::GlobalBaseReg";
248 case PPCISD::SRL: return "PPCISD::SRL";
249 case PPCISD::SRA: return "PPCISD::SRA";
250 case PPCISD::SHL: return "PPCISD::SHL";
251 case PPCISD::EXTSW_32: return "PPCISD::EXTSW_32";
252 case PPCISD::STD_32: return "PPCISD::STD_32";
253 case PPCISD::CALL: return "PPCISD::CALL";
254 case PPCISD::RET_FLAG: return "PPCISD::RET_FLAG";
255 case PPCISD::MFCR: return "PPCISD::MFCR";
256 case PPCISD::VCMP: return "PPCISD::VCMP";
257 case PPCISD::VCMPo: return "PPCISD::VCMPo";
261 /// isFloatingPointZero - Return true if this is 0.0 or -0.0.
262 static bool isFloatingPointZero(SDOperand Op) {
263 if (ConstantFPSDNode *CFP = dyn_cast<ConstantFPSDNode>(Op))
264 return CFP->isExactlyValue(-0.0) || CFP->isExactlyValue(0.0);
265 else if (Op.getOpcode() == ISD::EXTLOAD || Op.getOpcode() == ISD::LOAD) {
266 // Maybe this has already been legalized into the constant pool?
267 if (ConstantPoolSDNode *CP = dyn_cast<ConstantPoolSDNode>(Op.getOperand(1)))
268 if (ConstantFP *CFP = dyn_cast<ConstantFP>(CP->get()))
269 return CFP->isExactlyValue(-0.0) || CFP->isExactlyValue(0.0);
271 return false;
274 /// isConstantOrUndef - Op is either an undef node or a ConstantSDNode. Return
275 /// true if Op is undef or if it matches the specified value.
276 static bool isConstantOrUndef(SDOperand Op, unsigned Val) {
277 return Op.getOpcode() == ISD::UNDEF ||
278 cast<ConstantSDNode>(Op)->getValue() == Val;
281 /// isVPKUHUMShuffleMask - Return true if this is the shuffle mask for a
282 /// VPKUHUM instruction.
283 bool PPC::isVPKUHUMShuffleMask(SDNode *N, bool isUnary) {
284 if (!isUnary) {
285 for (unsigned i = 0; i != 16; ++i)
286 if (!isConstantOrUndef(N->getOperand(i), i*2+1))
287 return false;
288 } else {
289 for (unsigned i = 0; i != 8; ++i)
290 if (!isConstantOrUndef(N->getOperand(i), i*2+1) ||
291 !isConstantOrUndef(N->getOperand(i+8), i*2+1))
292 return false;
294 return true;
297 /// isVPKUWUMShuffleMask - Return true if this is the shuffle mask for a
298 /// VPKUWUM instruction.
299 bool PPC::isVPKUWUMShuffleMask(SDNode *N, bool isUnary) {
300 if (!isUnary) {
301 for (unsigned i = 0; i != 16; i += 2)
302 if (!isConstantOrUndef(N->getOperand(i ), i*2+2) ||
303 !isConstantOrUndef(N->getOperand(i+1), i*2+3))
304 return false;
305 } else {
306 for (unsigned i = 0; i != 8; i += 2)
307 if (!isConstantOrUndef(N->getOperand(i ), i*2+2) ||
308 !isConstantOrUndef(N->getOperand(i+1), i*2+3) ||
309 !isConstantOrUndef(N->getOperand(i+8), i*2+2) ||
310 !isConstantOrUndef(N->getOperand(i+9), i*2+3))
311 return false;
313 return true;
316 /// isVMerge - Common function, used to match vmrg* shuffles.
318 static bool isVMerge(SDNode *N, unsigned UnitSize,
319 unsigned LHSStart, unsigned RHSStart) {
320 assert(N->getOpcode() == ISD::BUILD_VECTOR &&
321 N->getNumOperands() == 16 && "PPC only supports shuffles by bytes!");
322 assert((UnitSize == 1 || UnitSize == 2 || UnitSize == 4) &&
323 "Unsupported merge size!");
325 for (unsigned i = 0; i != 8/UnitSize; ++i) // Step over units
326 for (unsigned j = 0; j != UnitSize; ++j) { // Step over bytes within unit
327 if (!isConstantOrUndef(N->getOperand(i*UnitSize*2+j),
328 LHSStart+j+i*UnitSize) ||
329 !isConstantOrUndef(N->getOperand(i*UnitSize*2+UnitSize+j),
330 RHSStart+j+i*UnitSize))
331 return false;
333 return true;
336 /// isVMRGLShuffleMask - Return true if this is a shuffle mask suitable for
337 /// a VRGL* instruction with the specified unit size (1,2 or 4 bytes).
338 bool PPC::isVMRGLShuffleMask(SDNode *N, unsigned UnitSize, bool isUnary) {
339 if (!isUnary)
340 return isVMerge(N, UnitSize, 8, 24);
341 return isVMerge(N, UnitSize, 8, 8);
344 /// isVMRGHShuffleMask - Return true if this is a shuffle mask suitable for
345 /// a VRGH* instruction with the specified unit size (1,2 or 4 bytes).
346 bool PPC::isVMRGHShuffleMask(SDNode *N, unsigned UnitSize, bool isUnary) {
347 if (!isUnary)
348 return isVMerge(N, UnitSize, 0, 16);
349 return isVMerge(N, UnitSize, 0, 0);
353 /// isVSLDOIShuffleMask - If this is a vsldoi shuffle mask, return the shift
354 /// amount, otherwise return -1.
355 int PPC::isVSLDOIShuffleMask(SDNode *N, bool isUnary) {
356 assert(N->getOpcode() == ISD::BUILD_VECTOR &&
357 N->getNumOperands() == 16 && "PPC only supports shuffles by bytes!");
358 // Find the first non-undef value in the shuffle mask.
359 unsigned i;
360 for (i = 0; i != 16 && N->getOperand(i).getOpcode() == ISD::UNDEF; ++i)
361 /*search*/;
363 if (i == 16) return -1; // all undef.
365 // Otherwise, check to see if the rest of the elements are consequtively
366 // numbered from this value.
367 unsigned ShiftAmt = cast<ConstantSDNode>(N->getOperand(i))->getValue();
368 if (ShiftAmt < i) return -1;
369 ShiftAmt -= i;
371 if (!isUnary) {
372 // Check the rest of the elements to see if they are consequtive.
373 for (++i; i != 16; ++i)
374 if (!isConstantOrUndef(N->getOperand(i), ShiftAmt+i))
375 return -1;
376 } else {
377 // Check the rest of the elements to see if they are consequtive.
378 for (++i; i != 16; ++i)
379 if (!isConstantOrUndef(N->getOperand(i), (ShiftAmt+i) & 15))
380 return -1;
383 return ShiftAmt;
386 /// isSplatShuffleMask - Return true if the specified VECTOR_SHUFFLE operand
387 /// specifies a splat of a single element that is suitable for input to
388 /// VSPLTB/VSPLTH/VSPLTW.
389 bool PPC::isSplatShuffleMask(SDNode *N, unsigned EltSize) {
390 assert(N->getOpcode() == ISD::BUILD_VECTOR &&
391 N->getNumOperands() == 16 &&
392 (EltSize == 1 || EltSize == 2 || EltSize == 4));
394 // This is a splat operation if each element of the permute is the same, and
395 // if the value doesn't reference the second vector.
396 unsigned ElementBase = 0;
397 SDOperand Elt = N->getOperand(0);
398 if (ConstantSDNode *EltV = dyn_cast<ConstantSDNode>(Elt))
399 ElementBase = EltV->getValue();
400 else
401 return false; // FIXME: Handle UNDEF elements too!
403 if (cast<ConstantSDNode>(Elt)->getValue() >= 16)
404 return false;
406 // Check that they are consequtive.
407 for (unsigned i = 1; i != EltSize; ++i) {
408 if (!isa<ConstantSDNode>(N->getOperand(i)) ||
409 cast<ConstantSDNode>(N->getOperand(i))->getValue() != i+ElementBase)
410 return false;
413 assert(isa<ConstantSDNode>(Elt) && "Invalid VECTOR_SHUFFLE mask!");
414 for (unsigned i = EltSize, e = 16; i != e; i += EltSize) {
415 assert(isa<ConstantSDNode>(N->getOperand(i)) &&
416 "Invalid VECTOR_SHUFFLE mask!");
417 for (unsigned j = 0; j != EltSize; ++j)
418 if (N->getOperand(i+j) != N->getOperand(j))
419 return false;
422 return true;
425 /// getVSPLTImmediate - Return the appropriate VSPLT* immediate to splat the
426 /// specified isSplatShuffleMask VECTOR_SHUFFLE mask.
427 unsigned PPC::getVSPLTImmediate(SDNode *N, unsigned EltSize) {
428 assert(isSplatShuffleMask(N, EltSize));
429 return cast<ConstantSDNode>(N->getOperand(0))->getValue() / EltSize;
432 /// get_VSPLTI_elt - If this is a build_vector of constants which can be formed
433 /// by using a vspltis[bhw] instruction of the specified element size, return
434 /// the constant being splatted. The ByteSize field indicates the number of
435 /// bytes of each element [124] -> [bhw].
436 SDOperand PPC::get_VSPLTI_elt(SDNode *N, unsigned ByteSize, SelectionDAG &DAG) {
437 SDOperand OpVal(0, 0);
439 // If ByteSize of the splat is bigger than the element size of the
440 // build_vector, then we have a case where we are checking for a splat where
441 // multiple elements of the buildvector are folded together into a single
442 // logical element of the splat (e.g. "vsplish 1" to splat {0,1}*8).
443 unsigned EltSize = 16/N->getNumOperands();
444 if (EltSize < ByteSize) {
445 unsigned Multiple = ByteSize/EltSize; // Number of BV entries per spltval.
446 SDOperand UniquedVals[4];
447 assert(Multiple > 1 && Multiple <= 4 && "How can this happen?");
449 // See if all of the elements in the buildvector agree across.
450 for (unsigned i = 0, e = N->getNumOperands(); i != e; ++i) {
451 if (N->getOperand(i).getOpcode() == ISD::UNDEF) continue;
452 // If the element isn't a constant, bail fully out.
453 if (!isa<ConstantSDNode>(N->getOperand(i))) return SDOperand();
456 if (UniquedVals[i&(Multiple-1)].Val == 0)
457 UniquedVals[i&(Multiple-1)] = N->getOperand(i);
458 else if (UniquedVals[i&(Multiple-1)] != N->getOperand(i))
459 return SDOperand(); // no match.
462 // Okay, if we reached this point, UniquedVals[0..Multiple-1] contains
463 // either constant or undef values that are identical for each chunk. See
464 // if these chunks can form into a larger vspltis*.
466 // Check to see if all of the leading entries are either 0 or -1. If
467 // neither, then this won't fit into the immediate field.
468 bool LeadingZero = true;
469 bool LeadingOnes = true;
470 for (unsigned i = 0; i != Multiple-1; ++i) {
471 if (UniquedVals[i].Val == 0) continue; // Must have been undefs.
473 LeadingZero &= cast<ConstantSDNode>(UniquedVals[i])->isNullValue();
474 LeadingOnes &= cast<ConstantSDNode>(UniquedVals[i])->isAllOnesValue();
476 // Finally, check the least significant entry.
477 if (LeadingZero) {
478 if (UniquedVals[Multiple-1].Val == 0)
479 return DAG.getTargetConstant(0, MVT::i32); // 0,0,0,undef
480 int Val = cast<ConstantSDNode>(UniquedVals[Multiple-1])->getValue();
481 if (Val < 16)
482 return DAG.getTargetConstant(Val, MVT::i32); // 0,0,0,4 -> vspltisw(4)
484 if (LeadingOnes) {
485 if (UniquedVals[Multiple-1].Val == 0)
486 return DAG.getTargetConstant(~0U, MVT::i32); // -1,-1,-1,undef
487 int Val =cast<ConstantSDNode>(UniquedVals[Multiple-1])->getSignExtended();
488 if (Val >= -16) // -1,-1,-1,-2 -> vspltisw(-2)
489 return DAG.getTargetConstant(Val, MVT::i32);
492 return SDOperand();
495 // Check to see if this buildvec has a single non-undef value in its elements.
496 for (unsigned i = 0, e = N->getNumOperands(); i != e; ++i) {
497 if (N->getOperand(i).getOpcode() == ISD::UNDEF) continue;
498 if (OpVal.Val == 0)
499 OpVal = N->getOperand(i);
500 else if (OpVal != N->getOperand(i))
501 return SDOperand();
504 if (OpVal.Val == 0) return SDOperand(); // All UNDEF: use implicit def.
506 unsigned ValSizeInBytes = 0;
507 uint64_t Value = 0;
508 if (ConstantSDNode *CN = dyn_cast<ConstantSDNode>(OpVal)) {
509 Value = CN->getValue();
510 ValSizeInBytes = MVT::getSizeInBits(CN->getValueType(0))/8;
511 } else if (ConstantFPSDNode *CN = dyn_cast<ConstantFPSDNode>(OpVal)) {
512 assert(CN->getValueType(0) == MVT::f32 && "Only one legal FP vector type!");
513 Value = FloatToBits(CN->getValue());
514 ValSizeInBytes = 4;
517 // If the splat value is larger than the element value, then we can never do
518 // this splat. The only case that we could fit the replicated bits into our
519 // immediate field for would be zero, and we prefer to use vxor for it.
520 if (ValSizeInBytes < ByteSize) return SDOperand();
522 // If the element value is larger than the splat value, cut it in half and
523 // check to see if the two halves are equal. Continue doing this until we
524 // get to ByteSize. This allows us to handle 0x01010101 as 0x01.
525 while (ValSizeInBytes > ByteSize) {
526 ValSizeInBytes >>= 1;
528 // If the top half equals the bottom half, we're still ok.
529 if (((Value >> (ValSizeInBytes*8)) & ((1 << (8*ValSizeInBytes))-1)) !=
530 (Value & ((1 << (8*ValSizeInBytes))-1)))
531 return SDOperand();
534 // Properly sign extend the value.
535 int ShAmt = (4-ByteSize)*8;
536 int MaskVal = ((int)Value << ShAmt) >> ShAmt;
538 // If this is zero, don't match, zero matches ISD::isBuildVectorAllZeros.
539 if (MaskVal == 0) return SDOperand();
541 // Finally, if this value fits in a 5 bit sext field, return it
542 if (((MaskVal << (32-5)) >> (32-5)) == MaskVal)
543 return DAG.getTargetConstant(MaskVal, MVT::i32);
544 return SDOperand();
548 /// LowerOperation - Provide custom lowering hooks for some operations.
550 SDOperand PPCTargetLowering::LowerOperation(SDOperand Op, SelectionDAG &DAG) {
551 switch (Op.getOpcode()) {
552 default: assert(0 && "Wasn't expecting to be able to lower this!");
553 case ISD::FP_TO_SINT: {
554 assert(MVT::isFloatingPoint(Op.getOperand(0).getValueType()));
555 SDOperand Src = Op.getOperand(0);
556 if (Src.getValueType() == MVT::f32)
557 Src = DAG.getNode(ISD::FP_EXTEND, MVT::f64, Src);
559 SDOperand Tmp;
560 switch (Op.getValueType()) {
561 default: assert(0 && "Unhandled FP_TO_SINT type in custom expander!");
562 case MVT::i32:
563 Tmp = DAG.getNode(PPCISD::FCTIWZ, MVT::f64, Src);
564 break;
565 case MVT::i64:
566 Tmp = DAG.getNode(PPCISD::FCTIDZ, MVT::f64, Src);
567 break;
570 // Convert the FP value to an int value through memory.
571 SDOperand Bits = DAG.getNode(ISD::BIT_CONVERT, MVT::i64, Tmp);
572 if (Op.getValueType() == MVT::i32)
573 Bits = DAG.getNode(ISD::TRUNCATE, MVT::i32, Bits);
574 return Bits;
576 case ISD::SINT_TO_FP:
577 if (Op.getOperand(0).getValueType() == MVT::i64) {
578 SDOperand Bits = DAG.getNode(ISD::BIT_CONVERT, MVT::f64, Op.getOperand(0));
579 SDOperand FP = DAG.getNode(PPCISD::FCFID, MVT::f64, Bits);
580 if (Op.getValueType() == MVT::f32)
581 FP = DAG.getNode(ISD::FP_ROUND, MVT::f32, FP);
582 return FP;
583 } else {
584 assert(Op.getOperand(0).getValueType() == MVT::i32 &&
585 "Unhandled SINT_TO_FP type in custom expander!");
586 // Since we only generate this in 64-bit mode, we can take advantage of
587 // 64-bit registers. In particular, sign extend the input value into the
588 // 64-bit register with extsw, store the WHOLE 64-bit value into the stack
589 // then lfd it and fcfid it.
590 MachineFrameInfo *FrameInfo = DAG.getMachineFunction().getFrameInfo();
591 int FrameIdx = FrameInfo->CreateStackObject(8, 8);
592 SDOperand FIdx = DAG.getFrameIndex(FrameIdx, MVT::i32);
594 SDOperand Ext64 = DAG.getNode(PPCISD::EXTSW_32, MVT::i32,
595 Op.getOperand(0));
597 // STD the extended value into the stack slot.
598 SDOperand Store = DAG.getNode(PPCISD::STD_32, MVT::Other,
599 DAG.getEntryNode(), Ext64, FIdx,
600 DAG.getSrcValue(NULL));
601 // Load the value as a double.
602 SDOperand Ld = DAG.getLoad(MVT::f64, Store, FIdx, DAG.getSrcValue(NULL));
604 // FCFID it and return it.
605 SDOperand FP = DAG.getNode(PPCISD::FCFID, MVT::f64, Ld);
606 if (Op.getValueType() == MVT::f32)
607 FP = DAG.getNode(ISD::FP_ROUND, MVT::f32, FP);
608 return FP;
610 break;
612 case ISD::SELECT_CC: {
613 // Turn FP only select_cc's into fsel instructions.
614 if (!MVT::isFloatingPoint(Op.getOperand(0).getValueType()) ||
615 !MVT::isFloatingPoint(Op.getOperand(2).getValueType()))
616 break;
618 ISD::CondCode CC = cast<CondCodeSDNode>(Op.getOperand(4))->get();
620 // Cannot handle SETEQ/SETNE.
621 if (CC == ISD::SETEQ || CC == ISD::SETNE) break;
623 MVT::ValueType ResVT = Op.getValueType();
624 MVT::ValueType CmpVT = Op.getOperand(0).getValueType();
625 SDOperand LHS = Op.getOperand(0), RHS = Op.getOperand(1);
626 SDOperand TV = Op.getOperand(2), FV = Op.getOperand(3);
628 // If the RHS of the comparison is a 0.0, we don't need to do the
629 // subtraction at all.
630 if (isFloatingPointZero(RHS))
631 switch (CC) {
632 default: break; // SETUO etc aren't handled by fsel.
633 case ISD::SETULT:
634 case ISD::SETLT:
635 std::swap(TV, FV); // fsel is natively setge, swap operands for setlt
636 case ISD::SETUGE:
637 case ISD::SETGE:
638 if (LHS.getValueType() == MVT::f32) // Comparison is always 64-bits
639 LHS = DAG.getNode(ISD::FP_EXTEND, MVT::f64, LHS);
640 return DAG.getNode(PPCISD::FSEL, ResVT, LHS, TV, FV);
641 case ISD::SETUGT:
642 case ISD::SETGT:
643 std::swap(TV, FV); // fsel is natively setge, swap operands for setlt
644 case ISD::SETULE:
645 case ISD::SETLE:
646 if (LHS.getValueType() == MVT::f32) // Comparison is always 64-bits
647 LHS = DAG.getNode(ISD::FP_EXTEND, MVT::f64, LHS);
648 return DAG.getNode(PPCISD::FSEL, ResVT,
649 DAG.getNode(ISD::FNEG, MVT::f64, LHS), TV, FV);
652 SDOperand Cmp;
653 switch (CC) {
654 default: break; // SETUO etc aren't handled by fsel.
655 case ISD::SETULT:
656 case ISD::SETLT:
657 Cmp = DAG.getNode(ISD::FSUB, CmpVT, LHS, RHS);
658 if (Cmp.getValueType() == MVT::f32) // Comparison is always 64-bits
659 Cmp = DAG.getNode(ISD::FP_EXTEND, MVT::f64, Cmp);
660 return DAG.getNode(PPCISD::FSEL, ResVT, Cmp, FV, TV);
661 case ISD::SETUGE:
662 case ISD::SETGE:
663 Cmp = DAG.getNode(ISD::FSUB, CmpVT, LHS, RHS);
664 if (Cmp.getValueType() == MVT::f32) // Comparison is always 64-bits
665 Cmp = DAG.getNode(ISD::FP_EXTEND, MVT::f64, Cmp);
666 return DAG.getNode(PPCISD::FSEL, ResVT, Cmp, TV, FV);
667 case ISD::SETUGT:
668 case ISD::SETGT:
669 Cmp = DAG.getNode(ISD::FSUB, CmpVT, RHS, LHS);
670 if (Cmp.getValueType() == MVT::f32) // Comparison is always 64-bits
671 Cmp = DAG.getNode(ISD::FP_EXTEND, MVT::f64, Cmp);
672 return DAG.getNode(PPCISD::FSEL, ResVT, Cmp, FV, TV);
673 case ISD::SETULE:
674 case ISD::SETLE:
675 Cmp = DAG.getNode(ISD::FSUB, CmpVT, RHS, LHS);
676 if (Cmp.getValueType() == MVT::f32) // Comparison is always 64-bits
677 Cmp = DAG.getNode(ISD::FP_EXTEND, MVT::f64, Cmp);
678 return DAG.getNode(PPCISD::FSEL, ResVT, Cmp, TV, FV);
680 break;
682 case ISD::SHL: {
683 assert(Op.getValueType() == MVT::i64 &&
684 Op.getOperand(1).getValueType() == MVT::i32 && "Unexpected SHL!");
685 // The generic code does a fine job expanding shift by a constant.
686 if (isa<ConstantSDNode>(Op.getOperand(1))) break;
688 // Otherwise, expand into a bunch of logical ops. Note that these ops
689 // depend on the PPC behavior for oversized shift amounts.
690 SDOperand Lo = DAG.getNode(ISD::EXTRACT_ELEMENT, MVT::i32, Op.getOperand(0),
691 DAG.getConstant(0, MVT::i32));
692 SDOperand Hi = DAG.getNode(ISD::EXTRACT_ELEMENT, MVT::i32, Op.getOperand(0),
693 DAG.getConstant(1, MVT::i32));
694 SDOperand Amt = Op.getOperand(1);
696 SDOperand Tmp1 = DAG.getNode(ISD::SUB, MVT::i32,
697 DAG.getConstant(32, MVT::i32), Amt);
698 SDOperand Tmp2 = DAG.getNode(PPCISD::SHL, MVT::i32, Hi, Amt);
699 SDOperand Tmp3 = DAG.getNode(PPCISD::SRL, MVT::i32, Lo, Tmp1);
700 SDOperand Tmp4 = DAG.getNode(ISD::OR , MVT::i32, Tmp2, Tmp3);
701 SDOperand Tmp5 = DAG.getNode(ISD::ADD, MVT::i32, Amt,
702 DAG.getConstant(-32U, MVT::i32));
703 SDOperand Tmp6 = DAG.getNode(PPCISD::SHL, MVT::i32, Lo, Tmp5);
704 SDOperand OutHi = DAG.getNode(ISD::OR, MVT::i32, Tmp4, Tmp6);
705 SDOperand OutLo = DAG.getNode(PPCISD::SHL, MVT::i32, Lo, Amt);
706 return DAG.getNode(ISD::BUILD_PAIR, MVT::i64, OutLo, OutHi);
708 case ISD::SRL: {
709 assert(Op.getValueType() == MVT::i64 &&
710 Op.getOperand(1).getValueType() == MVT::i32 && "Unexpected SHL!");
711 // The generic code does a fine job expanding shift by a constant.
712 if (isa<ConstantSDNode>(Op.getOperand(1))) break;
714 // Otherwise, expand into a bunch of logical ops. Note that these ops
715 // depend on the PPC behavior for oversized shift amounts.
716 SDOperand Lo = DAG.getNode(ISD::EXTRACT_ELEMENT, MVT::i32, Op.getOperand(0),
717 DAG.getConstant(0, MVT::i32));
718 SDOperand Hi = DAG.getNode(ISD::EXTRACT_ELEMENT, MVT::i32, Op.getOperand(0),
719 DAG.getConstant(1, MVT::i32));
720 SDOperand Amt = Op.getOperand(1);
722 SDOperand Tmp1 = DAG.getNode(ISD::SUB, MVT::i32,
723 DAG.getConstant(32, MVT::i32), Amt);
724 SDOperand Tmp2 = DAG.getNode(PPCISD::SRL, MVT::i32, Lo, Amt);
725 SDOperand Tmp3 = DAG.getNode(PPCISD::SHL, MVT::i32, Hi, Tmp1);
726 SDOperand Tmp4 = DAG.getNode(ISD::OR , MVT::i32, Tmp2, Tmp3);
727 SDOperand Tmp5 = DAG.getNode(ISD::ADD, MVT::i32, Amt,
728 DAG.getConstant(-32U, MVT::i32));
729 SDOperand Tmp6 = DAG.getNode(PPCISD::SRL, MVT::i32, Hi, Tmp5);
730 SDOperand OutLo = DAG.getNode(ISD::OR, MVT::i32, Tmp4, Tmp6);
731 SDOperand OutHi = DAG.getNode(PPCISD::SRL, MVT::i32, Hi, Amt);
732 return DAG.getNode(ISD::BUILD_PAIR, MVT::i64, OutLo, OutHi);
734 case ISD::SRA: {
735 assert(Op.getValueType() == MVT::i64 &&
736 Op.getOperand(1).getValueType() == MVT::i32 && "Unexpected SRA!");
737 // The generic code does a fine job expanding shift by a constant.
738 if (isa<ConstantSDNode>(Op.getOperand(1))) break;
740 // Otherwise, expand into a bunch of logical ops, followed by a select_cc.
741 SDOperand Lo = DAG.getNode(ISD::EXTRACT_ELEMENT, MVT::i32, Op.getOperand(0),
742 DAG.getConstant(0, MVT::i32));
743 SDOperand Hi = DAG.getNode(ISD::EXTRACT_ELEMENT, MVT::i32, Op.getOperand(0),
744 DAG.getConstant(1, MVT::i32));
745 SDOperand Amt = Op.getOperand(1);
747 SDOperand Tmp1 = DAG.getNode(ISD::SUB, MVT::i32,
748 DAG.getConstant(32, MVT::i32), Amt);
749 SDOperand Tmp2 = DAG.getNode(PPCISD::SRL, MVT::i32, Lo, Amt);
750 SDOperand Tmp3 = DAG.getNode(PPCISD::SHL, MVT::i32, Hi, Tmp1);
751 SDOperand Tmp4 = DAG.getNode(ISD::OR , MVT::i32, Tmp2, Tmp3);
752 SDOperand Tmp5 = DAG.getNode(ISD::ADD, MVT::i32, Amt,
753 DAG.getConstant(-32U, MVT::i32));
754 SDOperand Tmp6 = DAG.getNode(PPCISD::SRA, MVT::i32, Hi, Tmp5);
755 SDOperand OutHi = DAG.getNode(PPCISD::SRA, MVT::i32, Hi, Amt);
756 SDOperand OutLo = DAG.getSelectCC(Tmp5, DAG.getConstant(0, MVT::i32),
757 Tmp4, Tmp6, ISD::SETLE);
758 return DAG.getNode(ISD::BUILD_PAIR, MVT::i64, OutLo, OutHi);
760 case ISD::ConstantPool: {
761 ConstantPoolSDNode *CP = cast<ConstantPoolSDNode>(Op);
762 Constant *C = CP->get();
763 SDOperand CPI = DAG.getTargetConstantPool(C, MVT::i32, CP->getAlignment());
764 SDOperand Zero = DAG.getConstant(0, MVT::i32);
766 if (getTargetMachine().getRelocationModel() == Reloc::Static) {
767 // Generate non-pic code that has direct accesses to the constant pool.
768 // The address of the global is just (hi(&g)+lo(&g)).
769 SDOperand Hi = DAG.getNode(PPCISD::Hi, MVT::i32, CPI, Zero);
770 SDOperand Lo = DAG.getNode(PPCISD::Lo, MVT::i32, CPI, Zero);
771 return DAG.getNode(ISD::ADD, MVT::i32, Hi, Lo);
774 // Only lower ConstantPool on Darwin.
775 if (!getTargetMachine().getSubtarget<PPCSubtarget>().isDarwin()) break;
776 SDOperand Hi = DAG.getNode(PPCISD::Hi, MVT::i32, CPI, Zero);
777 if (getTargetMachine().getRelocationModel() == Reloc::PIC) {
778 // With PIC, the first instruction is actually "GR+hi(&G)".
779 Hi = DAG.getNode(ISD::ADD, MVT::i32,
780 DAG.getNode(PPCISD::GlobalBaseReg, MVT::i32), Hi);
783 SDOperand Lo = DAG.getNode(PPCISD::Lo, MVT::i32, CPI, Zero);
784 Lo = DAG.getNode(ISD::ADD, MVT::i32, Hi, Lo);
785 return Lo;
787 case ISD::GlobalAddress: {
788 GlobalAddressSDNode *GSDN = cast<GlobalAddressSDNode>(Op);
789 GlobalValue *GV = GSDN->getGlobal();
790 SDOperand GA = DAG.getTargetGlobalAddress(GV, MVT::i32, GSDN->getOffset());
791 SDOperand Zero = DAG.getConstant(0, MVT::i32);
793 if (getTargetMachine().getRelocationModel() == Reloc::Static) {
794 // Generate non-pic code that has direct accesses to globals.
795 // The address of the global is just (hi(&g)+lo(&g)).
796 SDOperand Hi = DAG.getNode(PPCISD::Hi, MVT::i32, GA, Zero);
797 SDOperand Lo = DAG.getNode(PPCISD::Lo, MVT::i32, GA, Zero);
798 return DAG.getNode(ISD::ADD, MVT::i32, Hi, Lo);
801 // Only lower GlobalAddress on Darwin.
802 if (!getTargetMachine().getSubtarget<PPCSubtarget>().isDarwin()) break;
804 SDOperand Hi = DAG.getNode(PPCISD::Hi, MVT::i32, GA, Zero);
805 if (getTargetMachine().getRelocationModel() == Reloc::PIC) {
806 // With PIC, the first instruction is actually "GR+hi(&G)".
807 Hi = DAG.getNode(ISD::ADD, MVT::i32,
808 DAG.getNode(PPCISD::GlobalBaseReg, MVT::i32), Hi);
811 SDOperand Lo = DAG.getNode(PPCISD::Lo, MVT::i32, GA, Zero);
812 Lo = DAG.getNode(ISD::ADD, MVT::i32, Hi, Lo);
814 if (!GV->hasWeakLinkage() && !GV->hasLinkOnceLinkage() &&
815 (!GV->isExternal() || GV->hasNotBeenReadFromBytecode()))
816 return Lo;
818 // If the global is weak or external, we have to go through the lazy
819 // resolution stub.
820 return DAG.getLoad(MVT::i32, DAG.getEntryNode(), Lo, DAG.getSrcValue(0));
822 case ISD::SETCC: {
823 ISD::CondCode CC = cast<CondCodeSDNode>(Op.getOperand(2))->get();
825 // If we're comparing for equality to zero, expose the fact that this is
826 // implented as a ctlz/srl pair on ppc, so that the dag combiner can
827 // fold the new nodes.
828 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op.getOperand(1))) {
829 if (C->isNullValue() && CC == ISD::SETEQ) {
830 MVT::ValueType VT = Op.getOperand(0).getValueType();
831 SDOperand Zext = Op.getOperand(0);
832 if (VT < MVT::i32) {
833 VT = MVT::i32;
834 Zext = DAG.getNode(ISD::ZERO_EXTEND, VT, Op.getOperand(0));
836 unsigned Log2b = Log2_32(MVT::getSizeInBits(VT));
837 SDOperand Clz = DAG.getNode(ISD::CTLZ, VT, Zext);
838 SDOperand Scc = DAG.getNode(ISD::SRL, VT, Clz,
839 DAG.getConstant(Log2b, getShiftAmountTy()));
840 return DAG.getNode(ISD::TRUNCATE, getSetCCResultTy(), Scc);
842 // Leave comparisons against 0 and -1 alone for now, since they're usually
843 // optimized. FIXME: revisit this when we can custom lower all setcc
844 // optimizations.
845 if (C->isAllOnesValue() || C->isNullValue())
846 break;
849 // If we have an integer seteq/setne, turn it into a compare against zero
850 // by subtracting the rhs from the lhs, which is faster than setting a
851 // condition register, reading it back out, and masking the correct bit.
852 MVT::ValueType LHSVT = Op.getOperand(0).getValueType();
853 if (MVT::isInteger(LHSVT) && (CC == ISD::SETEQ || CC == ISD::SETNE)) {
854 MVT::ValueType VT = Op.getValueType();
855 SDOperand Sub = DAG.getNode(ISD::SUB, LHSVT, Op.getOperand(0),
856 Op.getOperand(1));
857 return DAG.getSetCC(VT, Sub, DAG.getConstant(0, LHSVT), CC);
859 break;
861 case ISD::VASTART: {
862 // vastart just stores the address of the VarArgsFrameIndex slot into the
863 // memory location argument.
864 // FIXME: Replace MVT::i32 with PointerTy
865 SDOperand FR = DAG.getFrameIndex(VarArgsFrameIndex, MVT::i32);
866 return DAG.getNode(ISD::STORE, MVT::Other, Op.getOperand(0), FR,
867 Op.getOperand(1), Op.getOperand(2));
869 case ISD::RET: {
870 SDOperand Copy;
872 switch(Op.getNumOperands()) {
873 default:
874 assert(0 && "Do not know how to return this many arguments!");
875 abort();
876 case 1:
877 return SDOperand(); // ret void is legal
878 case 2: {
879 MVT::ValueType ArgVT = Op.getOperand(1).getValueType();
880 unsigned ArgReg;
881 if (MVT::isVector(ArgVT))
882 ArgReg = PPC::V2;
883 else if (MVT::isInteger(ArgVT))
884 ArgReg = PPC::R3;
885 else {
886 assert(MVT::isFloatingPoint(ArgVT));
887 ArgReg = PPC::F1;
890 Copy = DAG.getCopyToReg(Op.getOperand(0), ArgReg, Op.getOperand(1),
891 SDOperand());
893 // If we haven't noted the R3/F1 are live out, do so now.
894 if (DAG.getMachineFunction().liveout_empty())
895 DAG.getMachineFunction().addLiveOut(ArgReg);
896 break;
898 case 3:
899 Copy = DAG.getCopyToReg(Op.getOperand(0), PPC::R3, Op.getOperand(2),
900 SDOperand());
901 Copy = DAG.getCopyToReg(Copy, PPC::R4, Op.getOperand(1),Copy.getValue(1));
902 // If we haven't noted the R3+R4 are live out, do so now.
903 if (DAG.getMachineFunction().liveout_empty()) {
904 DAG.getMachineFunction().addLiveOut(PPC::R3);
905 DAG.getMachineFunction().addLiveOut(PPC::R4);
907 break;
909 return DAG.getNode(PPCISD::RET_FLAG, MVT::Other, Copy, Copy.getValue(1));
911 case ISD::SCALAR_TO_VECTOR: {
912 // Create a stack slot that is 16-byte aligned.
913 MachineFrameInfo *FrameInfo = DAG.getMachineFunction().getFrameInfo();
914 int FrameIdx = FrameInfo->CreateStackObject(16, 16);
915 SDOperand FIdx = DAG.getFrameIndex(FrameIdx, MVT::i32);
917 // Store the input value into Value#0 of the stack slot.
918 SDOperand Store = DAG.getNode(ISD::STORE, MVT::Other, DAG.getEntryNode(),
919 Op.getOperand(0), FIdx,DAG.getSrcValue(NULL));
920 // Load it out.
921 return DAG.getLoad(Op.getValueType(), Store, FIdx, DAG.getSrcValue(NULL));
923 case ISD::BUILD_VECTOR: {
924 // If this is a case we can't handle, return null and let the default
925 // expansion code take care of it. If we CAN select this case, return Op.
927 // FIXME: We should handle splat(-0.0), and other cases here.
929 // See if this is all zeros.
930 if (ISD::isBuildVectorAllZeros(Op.Val)) {
931 // Canonicalize all zero vectors to be v4i32.
932 if (Op.getValueType() != MVT::v4i32) {
933 SDOperand Z = DAG.getConstant(0, MVT::i32);
934 Z = DAG.getNode(ISD::BUILD_VECTOR, MVT::v4i32, Z, Z, Z, Z);
935 Op = DAG.getNode(ISD::BIT_CONVERT, Op.getValueType(), Z);
937 return Op;
940 // Check to see if this is something we can use VSPLTI* to form.
941 MVT::ValueType CanonicalVT = MVT::Other;
942 SDNode *CST = 0;
944 if ((CST = PPC::get_VSPLTI_elt(Op.Val, 4, DAG).Val)) // vspltisw
945 CanonicalVT = MVT::v4i32;
946 else if ((CST = PPC::get_VSPLTI_elt(Op.Val, 2, DAG).Val)) // vspltish
947 CanonicalVT = MVT::v8i16;
948 else if ((CST = PPC::get_VSPLTI_elt(Op.Val, 1, DAG).Val)) // vspltisb
949 CanonicalVT = MVT::v16i8;
951 // If this matches one of the vsplti* patterns, force it to the canonical
952 // type for the pattern.
953 if (CST) {
954 if (Op.getValueType() != CanonicalVT) {
955 // Convert the splatted element to the right element type.
956 SDOperand Elt = DAG.getNode(ISD::TRUNCATE,
957 MVT::getVectorBaseType(CanonicalVT),
958 SDOperand(CST, 0));
959 std::vector<SDOperand> Ops(MVT::getVectorNumElements(CanonicalVT), Elt);
960 SDOperand Res = DAG.getNode(ISD::BUILD_VECTOR, CanonicalVT, Ops);
961 Op = DAG.getNode(ISD::BIT_CONVERT, Op.getValueType(), Res);
963 return Op;
966 return SDOperand();
968 case ISD::VECTOR_SHUFFLE: {
969 SDOperand V1 = Op.getOperand(0);
970 SDOperand V2 = Op.getOperand(1);
971 SDOperand PermMask = Op.getOperand(2);
973 // Cases that are handled by instructions that take permute immediates
974 // (such as vsplt*) should be left as VECTOR_SHUFFLE nodes so they can be
975 // selected by the instruction selector.
976 if (V2.getOpcode() == ISD::UNDEF) {
977 if (PPC::isSplatShuffleMask(PermMask.Val, 1) ||
978 PPC::isSplatShuffleMask(PermMask.Val, 2) ||
979 PPC::isSplatShuffleMask(PermMask.Val, 4) ||
980 PPC::isVPKUWUMShuffleMask(PermMask.Val, true) ||
981 PPC::isVPKUHUMShuffleMask(PermMask.Val, true) ||
982 PPC::isVSLDOIShuffleMask(PermMask.Val, true) != -1 ||
983 PPC::isVMRGLShuffleMask(PermMask.Val, 1, true) ||
984 PPC::isVMRGLShuffleMask(PermMask.Val, 2, true) ||
985 PPC::isVMRGLShuffleMask(PermMask.Val, 4, true) ||
986 PPC::isVMRGHShuffleMask(PermMask.Val, 1, true) ||
987 PPC::isVMRGHShuffleMask(PermMask.Val, 2, true) ||
988 PPC::isVMRGHShuffleMask(PermMask.Val, 4, true)) {
989 return Op;
993 // Altivec has a variety of "shuffle immediates" that take two vector inputs
994 // and produce a fixed permutation. If any of these match, do not lower to
995 // VPERM.
996 if (PPC::isVPKUWUMShuffleMask(PermMask.Val, false) ||
997 PPC::isVPKUHUMShuffleMask(PermMask.Val, false) ||
998 PPC::isVSLDOIShuffleMask(PermMask.Val, false) != -1 ||
999 PPC::isVMRGLShuffleMask(PermMask.Val, 1, false) ||
1000 PPC::isVMRGLShuffleMask(PermMask.Val, 2, false) ||
1001 PPC::isVMRGLShuffleMask(PermMask.Val, 4, false) ||
1002 PPC::isVMRGHShuffleMask(PermMask.Val, 1, false) ||
1003 PPC::isVMRGHShuffleMask(PermMask.Val, 2, false) ||
1004 PPC::isVMRGHShuffleMask(PermMask.Val, 4, false))
1005 return Op;
1007 // TODO: Handle more cases, and also handle cases that are cheaper to do as
1008 // multiple such instructions than as a constant pool load/vperm pair.
1010 // Lower this to a VPERM(V1, V2, V3) expression, where V3 is a constant
1011 // vector that will get spilled to the constant pool.
1012 if (V2.getOpcode() == ISD::UNDEF) V2 = V1;
1014 // The SHUFFLE_VECTOR mask is almost exactly what we want for vperm, except
1015 // that it is in input element units, not in bytes. Convert now.
1016 MVT::ValueType EltVT = MVT::getVectorBaseType(V1.getValueType());
1017 unsigned BytesPerElement = MVT::getSizeInBits(EltVT)/8;
1019 std::vector<SDOperand> ResultMask;
1020 for (unsigned i = 0, e = PermMask.getNumOperands(); i != e; ++i) {
1021 unsigned SrcElt =cast<ConstantSDNode>(PermMask.getOperand(i))->getValue();
1023 for (unsigned j = 0; j != BytesPerElement; ++j)
1024 ResultMask.push_back(DAG.getConstant(SrcElt*BytesPerElement+j,
1025 MVT::i8));
1028 SDOperand VPermMask =DAG.getNode(ISD::BUILD_VECTOR, MVT::v16i8, ResultMask);
1029 return DAG.getNode(PPCISD::VPERM, V1.getValueType(), V1, V2, VPermMask);
1031 case ISD::INTRINSIC_WO_CHAIN: {
1032 unsigned IntNo = cast<ConstantSDNode>(Op.getOperand(0))->getValue();
1034 // If this is a lowered altivec predicate compare, CompareOpc is set to the
1035 // opcode number of the comparison.
1036 int CompareOpc = -1;
1037 bool isDot = false;
1038 switch (IntNo) {
1039 default: return SDOperand(); // Don't custom lower most intrinsics.
1040 // Comparison predicates.
1041 case Intrinsic::ppc_altivec_vcmpbfp_p: CompareOpc = 966; isDot = 1; break;
1042 case Intrinsic::ppc_altivec_vcmpeqfp_p: CompareOpc = 198; isDot = 1; break;
1043 case Intrinsic::ppc_altivec_vcmpequb_p: CompareOpc = 6; isDot = 1; break;
1044 case Intrinsic::ppc_altivec_vcmpequh_p: CompareOpc = 70; isDot = 1; break;
1045 case Intrinsic::ppc_altivec_vcmpequw_p: CompareOpc = 134; isDot = 1; break;
1046 case Intrinsic::ppc_altivec_vcmpgefp_p: CompareOpc = 454; isDot = 1; break;
1047 case Intrinsic::ppc_altivec_vcmpgtfp_p: CompareOpc = 710; isDot = 1; break;
1048 case Intrinsic::ppc_altivec_vcmpgtsb_p: CompareOpc = 774; isDot = 1; break;
1049 case Intrinsic::ppc_altivec_vcmpgtsh_p: CompareOpc = 838; isDot = 1; break;
1050 case Intrinsic::ppc_altivec_vcmpgtsw_p: CompareOpc = 902; isDot = 1; break;
1051 case Intrinsic::ppc_altivec_vcmpgtub_p: CompareOpc = 518; isDot = 1; break;
1052 case Intrinsic::ppc_altivec_vcmpgtuh_p: CompareOpc = 582; isDot = 1; break;
1053 case Intrinsic::ppc_altivec_vcmpgtuw_p: CompareOpc = 646; isDot = 1; break;
1055 // Normal Comparisons.
1056 case Intrinsic::ppc_altivec_vcmpbfp: CompareOpc = 966; isDot = 0; break;
1057 case Intrinsic::ppc_altivec_vcmpeqfp: CompareOpc = 198; isDot = 0; break;
1058 case Intrinsic::ppc_altivec_vcmpequb: CompareOpc = 6; isDot = 0; break;
1059 case Intrinsic::ppc_altivec_vcmpequh: CompareOpc = 70; isDot = 0; break;
1060 case Intrinsic::ppc_altivec_vcmpequw: CompareOpc = 134; isDot = 0; break;
1061 case Intrinsic::ppc_altivec_vcmpgefp: CompareOpc = 454; isDot = 0; break;
1062 case Intrinsic::ppc_altivec_vcmpgtfp: CompareOpc = 710; isDot = 0; break;
1063 case Intrinsic::ppc_altivec_vcmpgtsb: CompareOpc = 774; isDot = 0; break;
1064 case Intrinsic::ppc_altivec_vcmpgtsh: CompareOpc = 838; isDot = 0; break;
1065 case Intrinsic::ppc_altivec_vcmpgtsw: CompareOpc = 902; isDot = 0; break;
1066 case Intrinsic::ppc_altivec_vcmpgtub: CompareOpc = 518; isDot = 0; break;
1067 case Intrinsic::ppc_altivec_vcmpgtuh: CompareOpc = 582; isDot = 0; break;
1068 case Intrinsic::ppc_altivec_vcmpgtuw: CompareOpc = 646; isDot = 0; break;
1071 assert(CompareOpc>0 && "We only lower altivec predicate compares so far!");
1073 // If this is a non-dot comparison, make the VCMP node.
1074 if (!isDot) {
1075 SDOperand Tmp = DAG.getNode(PPCISD::VCMP, Op.getOperand(2).getValueType(),
1076 Op.getOperand(1), Op.getOperand(2),
1077 DAG.getConstant(CompareOpc, MVT::i32));
1078 return DAG.getNode(ISD::BIT_CONVERT, Op.getValueType(), Tmp);
1081 // Create the PPCISD altivec 'dot' comparison node.
1082 std::vector<SDOperand> Ops;
1083 std::vector<MVT::ValueType> VTs;
1084 Ops.push_back(Op.getOperand(2)); // LHS
1085 Ops.push_back(Op.getOperand(3)); // RHS
1086 Ops.push_back(DAG.getConstant(CompareOpc, MVT::i32));
1087 VTs.push_back(Op.getOperand(2).getValueType());
1088 VTs.push_back(MVT::Flag);
1089 SDOperand CompNode = DAG.getNode(PPCISD::VCMPo, VTs, Ops);
1091 // Now that we have the comparison, emit a copy from the CR to a GPR.
1092 // This is flagged to the above dot comparison.
1093 SDOperand Flags = DAG.getNode(PPCISD::MFCR, MVT::i32,
1094 DAG.getRegister(PPC::CR6, MVT::i32),
1095 CompNode.getValue(1));
1097 // Unpack the result based on how the target uses it.
1098 unsigned BitNo; // Bit # of CR6.
1099 bool InvertBit; // Invert result?
1100 switch (cast<ConstantSDNode>(Op.getOperand(1))->getValue()) {
1101 default: // Can't happen, don't crash on invalid number though.
1102 case 0: // Return the value of the EQ bit of CR6.
1103 BitNo = 0; InvertBit = false;
1104 break;
1105 case 1: // Return the inverted value of the EQ bit of CR6.
1106 BitNo = 0; InvertBit = true;
1107 break;
1108 case 2: // Return the value of the LT bit of CR6.
1109 BitNo = 2; InvertBit = false;
1110 break;
1111 case 3: // Return the inverted value of the LT bit of CR6.
1112 BitNo = 2; InvertBit = true;
1113 break;
1116 // Shift the bit into the low position.
1117 Flags = DAG.getNode(ISD::SRL, MVT::i32, Flags,
1118 DAG.getConstant(8-(3-BitNo), MVT::i32));
1119 // Isolate the bit.
1120 Flags = DAG.getNode(ISD::AND, MVT::i32, Flags,
1121 DAG.getConstant(1, MVT::i32));
1123 // If we are supposed to, toggle the bit.
1124 if (InvertBit)
1125 Flags = DAG.getNode(ISD::XOR, MVT::i32, Flags,
1126 DAG.getConstant(1, MVT::i32));
1127 return Flags;
1130 return SDOperand();
1133 std::vector<SDOperand>
1134 PPCTargetLowering::LowerArguments(Function &F, SelectionDAG &DAG) {
1136 // add beautiful description of PPC stack frame format, or at least some docs
1138 MachineFunction &MF = DAG.getMachineFunction();
1139 MachineFrameInfo *MFI = MF.getFrameInfo();
1140 MachineBasicBlock& BB = MF.front();
1141 SSARegMap *RegMap = MF.getSSARegMap();
1142 std::vector<SDOperand> ArgValues;
1144 unsigned ArgOffset = 24;
1145 unsigned GPR_remaining = 8;
1146 unsigned FPR_remaining = 13;
1147 unsigned GPR_idx = 0, FPR_idx = 0;
1148 static const unsigned GPR[] = {
1149 PPC::R3, PPC::R4, PPC::R5, PPC::R6,
1150 PPC::R7, PPC::R8, PPC::R9, PPC::R10,
1152 static const unsigned FPR[] = {
1153 PPC::F1, PPC::F2, PPC::F3, PPC::F4, PPC::F5, PPC::F6, PPC::F7,
1154 PPC::F8, PPC::F9, PPC::F10, PPC::F11, PPC::F12, PPC::F13
1157 // Add DAG nodes to load the arguments... On entry to a function on PPC,
1158 // the arguments start at offset 24, although they are likely to be passed
1159 // in registers.
1160 for (Function::arg_iterator I = F.arg_begin(), E = F.arg_end(); I != E; ++I) {
1161 SDOperand newroot, argt;
1162 unsigned ObjSize;
1163 bool needsLoad = false;
1164 bool ArgLive = !I->use_empty();
1165 MVT::ValueType ObjectVT = getValueType(I->getType());
1167 switch (ObjectVT) {
1168 default: assert(0 && "Unhandled argument type!");
1169 case MVT::i1:
1170 case MVT::i8:
1171 case MVT::i16:
1172 case MVT::i32:
1173 ObjSize = 4;
1174 if (!ArgLive) break;
1175 if (GPR_remaining > 0) {
1176 unsigned VReg = RegMap->createVirtualRegister(&PPC::GPRCRegClass);
1177 MF.addLiveIn(GPR[GPR_idx], VReg);
1178 argt = newroot = DAG.getCopyFromReg(DAG.getRoot(), VReg, MVT::i32);
1179 if (ObjectVT != MVT::i32) {
1180 unsigned AssertOp = I->getType()->isSigned() ? ISD::AssertSext
1181 : ISD::AssertZext;
1182 argt = DAG.getNode(AssertOp, MVT::i32, argt,
1183 DAG.getValueType(ObjectVT));
1184 argt = DAG.getNode(ISD::TRUNCATE, ObjectVT, argt);
1186 } else {
1187 needsLoad = true;
1189 break;
1190 case MVT::i64:
1191 ObjSize = 8;
1192 if (!ArgLive) break;
1193 if (GPR_remaining > 0) {
1194 SDOperand argHi, argLo;
1195 unsigned VReg = RegMap->createVirtualRegister(&PPC::GPRCRegClass);
1196 MF.addLiveIn(GPR[GPR_idx], VReg);
1197 argHi = DAG.getCopyFromReg(DAG.getRoot(), VReg, MVT::i32);
1198 // If we have two or more remaining argument registers, then both halves
1199 // of the i64 can be sourced from there. Otherwise, the lower half will
1200 // have to come off the stack. This can happen when an i64 is preceded
1201 // by 28 bytes of arguments.
1202 if (GPR_remaining > 1) {
1203 unsigned VReg = RegMap->createVirtualRegister(&PPC::GPRCRegClass);
1204 MF.addLiveIn(GPR[GPR_idx+1], VReg);
1205 argLo = DAG.getCopyFromReg(argHi, VReg, MVT::i32);
1206 } else {
1207 int FI = MFI->CreateFixedObject(4, ArgOffset+4);
1208 SDOperand FIN = DAG.getFrameIndex(FI, MVT::i32);
1209 argLo = DAG.getLoad(MVT::i32, DAG.getEntryNode(), FIN,
1210 DAG.getSrcValue(NULL));
1212 // Build the outgoing arg thingy
1213 argt = DAG.getNode(ISD::BUILD_PAIR, MVT::i64, argLo, argHi);
1214 newroot = argLo;
1215 } else {
1216 needsLoad = true;
1218 break;
1219 case MVT::f32:
1220 case MVT::f64:
1221 ObjSize = (ObjectVT == MVT::f64) ? 8 : 4;
1222 if (!ArgLive) {
1223 if (FPR_remaining > 0) {
1224 --FPR_remaining;
1225 ++FPR_idx;
1227 break;
1229 if (FPR_remaining > 0) {
1230 unsigned VReg;
1231 if (ObjectVT == MVT::f32)
1232 VReg = RegMap->createVirtualRegister(&PPC::F4RCRegClass);
1233 else
1234 VReg = RegMap->createVirtualRegister(&PPC::F8RCRegClass);
1235 MF.addLiveIn(FPR[FPR_idx], VReg);
1236 argt = newroot = DAG.getCopyFromReg(DAG.getRoot(), VReg, ObjectVT);
1237 --FPR_remaining;
1238 ++FPR_idx;
1239 } else {
1240 needsLoad = true;
1242 break;
1245 // We need to load the argument to a virtual register if we determined above
1246 // that we ran out of physical registers of the appropriate type
1247 if (needsLoad) {
1248 unsigned SubregOffset = 0;
1249 if (ObjectVT == MVT::i8 || ObjectVT == MVT::i1) SubregOffset = 3;
1250 if (ObjectVT == MVT::i16) SubregOffset = 2;
1251 int FI = MFI->CreateFixedObject(ObjSize, ArgOffset);
1252 SDOperand FIN = DAG.getFrameIndex(FI, MVT::i32);
1253 FIN = DAG.getNode(ISD::ADD, MVT::i32, FIN,
1254 DAG.getConstant(SubregOffset, MVT::i32));
1255 argt = newroot = DAG.getLoad(ObjectVT, DAG.getEntryNode(), FIN,
1256 DAG.getSrcValue(NULL));
1259 // Every 4 bytes of argument space consumes one of the GPRs available for
1260 // argument passing.
1261 if (GPR_remaining > 0) {
1262 unsigned delta = (GPR_remaining > 1 && ObjSize == 8) ? 2 : 1;
1263 GPR_remaining -= delta;
1264 GPR_idx += delta;
1266 ArgOffset += ObjSize;
1267 if (newroot.Val)
1268 DAG.setRoot(newroot.getValue(1));
1270 ArgValues.push_back(argt);
1273 // If the function takes variable number of arguments, make a frame index for
1274 // the start of the first vararg value... for expansion of llvm.va_start.
1275 if (F.isVarArg()) {
1276 VarArgsFrameIndex = MFI->CreateFixedObject(4, ArgOffset);
1277 SDOperand FIN = DAG.getFrameIndex(VarArgsFrameIndex, MVT::i32);
1278 // If this function is vararg, store any remaining integer argument regs
1279 // to their spots on the stack so that they may be loaded by deferencing the
1280 // result of va_next.
1281 std::vector<SDOperand> MemOps;
1282 for (; GPR_remaining > 0; --GPR_remaining, ++GPR_idx) {
1283 unsigned VReg = RegMap->createVirtualRegister(&PPC::GPRCRegClass);
1284 MF.addLiveIn(GPR[GPR_idx], VReg);
1285 SDOperand Val = DAG.getCopyFromReg(DAG.getRoot(), VReg, MVT::i32);
1286 SDOperand Store = DAG.getNode(ISD::STORE, MVT::Other, Val.getValue(1),
1287 Val, FIN, DAG.getSrcValue(NULL));
1288 MemOps.push_back(Store);
1289 // Increment the address by four for the next argument to store
1290 SDOperand PtrOff = DAG.getConstant(4, getPointerTy());
1291 FIN = DAG.getNode(ISD::ADD, MVT::i32, FIN, PtrOff);
1293 if (!MemOps.empty()) {
1294 MemOps.push_back(DAG.getRoot());
1295 DAG.setRoot(DAG.getNode(ISD::TokenFactor, MVT::Other, MemOps));
1299 return ArgValues;
1302 std::pair<SDOperand, SDOperand>
1303 PPCTargetLowering::LowerCallTo(SDOperand Chain,
1304 const Type *RetTy, bool isVarArg,
1305 unsigned CallingConv, bool isTailCall,
1306 SDOperand Callee, ArgListTy &Args,
1307 SelectionDAG &DAG) {
1308 // args_to_use will accumulate outgoing args for the PPCISD::CALL case in
1309 // SelectExpr to use to put the arguments in the appropriate registers.
1310 std::vector<SDOperand> args_to_use;
1312 // Count how many bytes are to be pushed on the stack, including the linkage
1313 // area, and parameter passing area.
1314 unsigned NumBytes = 24;
1316 if (Args.empty()) {
1317 Chain = DAG.getCALLSEQ_START(Chain,
1318 DAG.getConstant(NumBytes, getPointerTy()));
1319 } else {
1320 for (unsigned i = 0, e = Args.size(); i != e; ++i) {
1321 switch (getValueType(Args[i].second)) {
1322 default: assert(0 && "Unknown value type!");
1323 case MVT::i1:
1324 case MVT::i8:
1325 case MVT::i16:
1326 case MVT::i32:
1327 case MVT::f32:
1328 NumBytes += 4;
1329 break;
1330 case MVT::i64:
1331 case MVT::f64:
1332 NumBytes += 8;
1333 break;
1337 // Just to be safe, we'll always reserve the full 24 bytes of linkage area
1338 // plus 32 bytes of argument space in case any called code gets funky on us.
1339 // (Required by ABI to support var arg)
1340 if (NumBytes < 56) NumBytes = 56;
1342 // Adjust the stack pointer for the new arguments...
1343 // These operations are automatically eliminated by the prolog/epilog pass
1344 Chain = DAG.getCALLSEQ_START(Chain,
1345 DAG.getConstant(NumBytes, getPointerTy()));
1347 // Set up a copy of the stack pointer for use loading and storing any
1348 // arguments that may not fit in the registers available for argument
1349 // passing.
1350 SDOperand StackPtr = DAG.getRegister(PPC::R1, MVT::i32);
1352 // Figure out which arguments are going to go in registers, and which in
1353 // memory. Also, if this is a vararg function, floating point operations
1354 // must be stored to our stack, and loaded into integer regs as well, if
1355 // any integer regs are available for argument passing.
1356 unsigned ArgOffset = 24;
1357 unsigned GPR_remaining = 8;
1358 unsigned FPR_remaining = 13;
1360 std::vector<SDOperand> MemOps;
1361 for (unsigned i = 0, e = Args.size(); i != e; ++i) {
1362 // PtrOff will be used to store the current argument to the stack if a
1363 // register cannot be found for it.
1364 SDOperand PtrOff = DAG.getConstant(ArgOffset, getPointerTy());
1365 PtrOff = DAG.getNode(ISD::ADD, MVT::i32, StackPtr, PtrOff);
1366 MVT::ValueType ArgVT = getValueType(Args[i].second);
1368 switch (ArgVT) {
1369 default: assert(0 && "Unexpected ValueType for argument!");
1370 case MVT::i1:
1371 case MVT::i8:
1372 case MVT::i16:
1373 // Promote the integer to 32 bits. If the input type is signed use a
1374 // sign extend, otherwise use a zero extend.
1375 if (Args[i].second->isSigned())
1376 Args[i].first =DAG.getNode(ISD::SIGN_EXTEND, MVT::i32, Args[i].first);
1377 else
1378 Args[i].first =DAG.getNode(ISD::ZERO_EXTEND, MVT::i32, Args[i].first);
1379 // FALL THROUGH
1380 case MVT::i32:
1381 if (GPR_remaining > 0) {
1382 args_to_use.push_back(Args[i].first);
1383 --GPR_remaining;
1384 } else {
1385 MemOps.push_back(DAG.getNode(ISD::STORE, MVT::Other, Chain,
1386 Args[i].first, PtrOff,
1387 DAG.getSrcValue(NULL)));
1389 ArgOffset += 4;
1390 break;
1391 case MVT::i64:
1392 // If we have one free GPR left, we can place the upper half of the i64
1393 // in it, and store the other half to the stack. If we have two or more
1394 // free GPRs, then we can pass both halves of the i64 in registers.
1395 if (GPR_remaining > 0) {
1396 SDOperand Hi = DAG.getNode(ISD::EXTRACT_ELEMENT, MVT::i32,
1397 Args[i].first, DAG.getConstant(1, MVT::i32));
1398 SDOperand Lo = DAG.getNode(ISD::EXTRACT_ELEMENT, MVT::i32,
1399 Args[i].first, DAG.getConstant(0, MVT::i32));
1400 args_to_use.push_back(Hi);
1401 --GPR_remaining;
1402 if (GPR_remaining > 0) {
1403 args_to_use.push_back(Lo);
1404 --GPR_remaining;
1405 } else {
1406 SDOperand ConstFour = DAG.getConstant(4, getPointerTy());
1407 PtrOff = DAG.getNode(ISD::ADD, MVT::i32, PtrOff, ConstFour);
1408 MemOps.push_back(DAG.getNode(ISD::STORE, MVT::Other, Chain,
1409 Lo, PtrOff, DAG.getSrcValue(NULL)));
1411 } else {
1412 MemOps.push_back(DAG.getNode(ISD::STORE, MVT::Other, Chain,
1413 Args[i].first, PtrOff,
1414 DAG.getSrcValue(NULL)));
1416 ArgOffset += 8;
1417 break;
1418 case MVT::f32:
1419 case MVT::f64:
1420 if (FPR_remaining > 0) {
1421 args_to_use.push_back(Args[i].first);
1422 --FPR_remaining;
1423 if (isVarArg) {
1424 SDOperand Store = DAG.getNode(ISD::STORE, MVT::Other, Chain,
1425 Args[i].first, PtrOff,
1426 DAG.getSrcValue(NULL));
1427 MemOps.push_back(Store);
1428 // Float varargs are always shadowed in available integer registers
1429 if (GPR_remaining > 0) {
1430 SDOperand Load = DAG.getLoad(MVT::i32, Store, PtrOff,
1431 DAG.getSrcValue(NULL));
1432 MemOps.push_back(Load.getValue(1));
1433 args_to_use.push_back(Load);
1434 --GPR_remaining;
1436 if (GPR_remaining > 0 && MVT::f64 == ArgVT) {
1437 SDOperand ConstFour = DAG.getConstant(4, getPointerTy());
1438 PtrOff = DAG.getNode(ISD::ADD, MVT::i32, PtrOff, ConstFour);
1439 SDOperand Load = DAG.getLoad(MVT::i32, Store, PtrOff,
1440 DAG.getSrcValue(NULL));
1441 MemOps.push_back(Load.getValue(1));
1442 args_to_use.push_back(Load);
1443 --GPR_remaining;
1445 } else {
1446 // If we have any FPRs remaining, we may also have GPRs remaining.
1447 // Args passed in FPRs consume either 1 (f32) or 2 (f64) available
1448 // GPRs.
1449 if (GPR_remaining > 0) {
1450 args_to_use.push_back(DAG.getNode(ISD::UNDEF, MVT::i32));
1451 --GPR_remaining;
1453 if (GPR_remaining > 0 && MVT::f64 == ArgVT) {
1454 args_to_use.push_back(DAG.getNode(ISD::UNDEF, MVT::i32));
1455 --GPR_remaining;
1458 } else {
1459 MemOps.push_back(DAG.getNode(ISD::STORE, MVT::Other, Chain,
1460 Args[i].first, PtrOff,
1461 DAG.getSrcValue(NULL)));
1463 ArgOffset += (ArgVT == MVT::f32) ? 4 : 8;
1464 break;
1467 if (!MemOps.empty())
1468 Chain = DAG.getNode(ISD::TokenFactor, MVT::Other, MemOps);
1471 std::vector<MVT::ValueType> RetVals;
1472 MVT::ValueType RetTyVT = getValueType(RetTy);
1473 MVT::ValueType ActualRetTyVT = RetTyVT;
1474 if (RetTyVT >= MVT::i1 && RetTyVT <= MVT::i16)
1475 ActualRetTyVT = MVT::i32; // Promote result to i32.
1477 if (RetTyVT == MVT::i64) {
1478 RetVals.push_back(MVT::i32);
1479 RetVals.push_back(MVT::i32);
1480 } else if (RetTyVT != MVT::isVoid) {
1481 RetVals.push_back(ActualRetTyVT);
1483 RetVals.push_back(MVT::Other);
1485 // If the callee is a GlobalAddress node (quite common, every direct call is)
1486 // turn it into a TargetGlobalAddress node so that legalize doesn't hack it.
1487 if (GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(Callee))
1488 Callee = DAG.getTargetGlobalAddress(G->getGlobal(), MVT::i32);
1490 std::vector<SDOperand> Ops;
1491 Ops.push_back(Chain);
1492 Ops.push_back(Callee);
1493 Ops.insert(Ops.end(), args_to_use.begin(), args_to_use.end());
1494 SDOperand TheCall = DAG.getNode(PPCISD::CALL, RetVals, Ops);
1495 Chain = TheCall.getValue(TheCall.Val->getNumValues()-1);
1496 Chain = DAG.getNode(ISD::CALLSEQ_END, MVT::Other, Chain,
1497 DAG.getConstant(NumBytes, getPointerTy()));
1498 SDOperand RetVal = TheCall;
1500 // If the result is a small value, add a note so that we keep track of the
1501 // information about whether it is sign or zero extended.
1502 if (RetTyVT != ActualRetTyVT) {
1503 RetVal = DAG.getNode(RetTy->isSigned() ? ISD::AssertSext : ISD::AssertZext,
1504 MVT::i32, RetVal, DAG.getValueType(RetTyVT));
1505 RetVal = DAG.getNode(ISD::TRUNCATE, RetTyVT, RetVal);
1506 } else if (RetTyVT == MVT::i64) {
1507 RetVal = DAG.getNode(ISD::BUILD_PAIR, MVT::i64, RetVal, RetVal.getValue(1));
1510 return std::make_pair(RetVal, Chain);
1513 MachineBasicBlock *
1514 PPCTargetLowering::InsertAtEndOfBasicBlock(MachineInstr *MI,
1515 MachineBasicBlock *BB) {
1516 assert((MI->getOpcode() == PPC::SELECT_CC_Int ||
1517 MI->getOpcode() == PPC::SELECT_CC_F4 ||
1518 MI->getOpcode() == PPC::SELECT_CC_F8 ||
1519 MI->getOpcode() == PPC::SELECT_CC_VRRC) &&
1520 "Unexpected instr type to insert");
1522 // To "insert" a SELECT_CC instruction, we actually have to insert the diamond
1523 // control-flow pattern. The incoming instruction knows the destination vreg
1524 // to set, the condition code register to branch on, the true/false values to
1525 // select between, and a branch opcode to use.
1526 const BasicBlock *LLVM_BB = BB->getBasicBlock();
1527 ilist<MachineBasicBlock>::iterator It = BB;
1528 ++It;
1530 // thisMBB:
1531 // ...
1532 // TrueVal = ...
1533 // cmpTY ccX, r1, r2
1534 // bCC copy1MBB
1535 // fallthrough --> copy0MBB
1536 MachineBasicBlock *thisMBB = BB;
1537 MachineBasicBlock *copy0MBB = new MachineBasicBlock(LLVM_BB);
1538 MachineBasicBlock *sinkMBB = new MachineBasicBlock(LLVM_BB);
1539 BuildMI(BB, MI->getOperand(4).getImmedValue(), 2)
1540 .addReg(MI->getOperand(1).getReg()).addMBB(sinkMBB);
1541 MachineFunction *F = BB->getParent();
1542 F->getBasicBlockList().insert(It, copy0MBB);
1543 F->getBasicBlockList().insert(It, sinkMBB);
1544 // Update machine-CFG edges by first adding all successors of the current
1545 // block to the new block which will contain the Phi node for the select.
1546 for(MachineBasicBlock::succ_iterator i = BB->succ_begin(),
1547 e = BB->succ_end(); i != e; ++i)
1548 sinkMBB->addSuccessor(*i);
1549 // Next, remove all successors of the current block, and add the true
1550 // and fallthrough blocks as its successors.
1551 while(!BB->succ_empty())
1552 BB->removeSuccessor(BB->succ_begin());
1553 BB->addSuccessor(copy0MBB);
1554 BB->addSuccessor(sinkMBB);
1556 // copy0MBB:
1557 // %FalseValue = ...
1558 // # fallthrough to sinkMBB
1559 BB = copy0MBB;
1561 // Update machine-CFG edges
1562 BB->addSuccessor(sinkMBB);
1564 // sinkMBB:
1565 // %Result = phi [ %FalseValue, copy0MBB ], [ %TrueValue, thisMBB ]
1566 // ...
1567 BB = sinkMBB;
1568 BuildMI(BB, PPC::PHI, 4, MI->getOperand(0).getReg())
1569 .addReg(MI->getOperand(3).getReg()).addMBB(copy0MBB)
1570 .addReg(MI->getOperand(2).getReg()).addMBB(thisMBB);
1572 delete MI; // The pseudo instruction is gone now.
1573 return BB;
1576 SDOperand PPCTargetLowering::PerformDAGCombine(SDNode *N,
1577 DAGCombinerInfo &DCI) const {
1578 TargetMachine &TM = getTargetMachine();
1579 SelectionDAG &DAG = DCI.DAG;
1580 switch (N->getOpcode()) {
1581 default: break;
1582 case ISD::SINT_TO_FP:
1583 if (TM.getSubtarget<PPCSubtarget>().is64Bit()) {
1584 if (N->getOperand(0).getOpcode() == ISD::FP_TO_SINT) {
1585 // Turn (sint_to_fp (fp_to_sint X)) -> fctidz/fcfid without load/stores.
1586 // We allow the src/dst to be either f32/f64, but the intermediate
1587 // type must be i64.
1588 if (N->getOperand(0).getValueType() == MVT::i64) {
1589 SDOperand Val = N->getOperand(0).getOperand(0);
1590 if (Val.getValueType() == MVT::f32) {
1591 Val = DAG.getNode(ISD::FP_EXTEND, MVT::f64, Val);
1592 DCI.AddToWorklist(Val.Val);
1595 Val = DAG.getNode(PPCISD::FCTIDZ, MVT::f64, Val);
1596 DCI.AddToWorklist(Val.Val);
1597 Val = DAG.getNode(PPCISD::FCFID, MVT::f64, Val);
1598 DCI.AddToWorklist(Val.Val);
1599 if (N->getValueType(0) == MVT::f32) {
1600 Val = DAG.getNode(ISD::FP_ROUND, MVT::f32, Val);
1601 DCI.AddToWorklist(Val.Val);
1603 return Val;
1604 } else if (N->getOperand(0).getValueType() == MVT::i32) {
1605 // If the intermediate type is i32, we can avoid the load/store here
1606 // too.
1610 break;
1611 case ISD::STORE:
1612 // Turn STORE (FP_TO_SINT F) -> STFIWX(FCTIWZ(F)).
1613 if (TM.getSubtarget<PPCSubtarget>().hasSTFIWX() &&
1614 N->getOperand(1).getOpcode() == ISD::FP_TO_SINT &&
1615 N->getOperand(1).getValueType() == MVT::i32) {
1616 SDOperand Val = N->getOperand(1).getOperand(0);
1617 if (Val.getValueType() == MVT::f32) {
1618 Val = DAG.getNode(ISD::FP_EXTEND, MVT::f64, Val);
1619 DCI.AddToWorklist(Val.Val);
1621 Val = DAG.getNode(PPCISD::FCTIWZ, MVT::f64, Val);
1622 DCI.AddToWorklist(Val.Val);
1624 Val = DAG.getNode(PPCISD::STFIWX, MVT::Other, N->getOperand(0), Val,
1625 N->getOperand(2), N->getOperand(3));
1626 DCI.AddToWorklist(Val.Val);
1627 return Val;
1629 break;
1630 case PPCISD::VCMP: {
1631 // If a VCMPo node already exists with exactly the same operands as this
1632 // node, use its result instead of this node (VCMPo computes both a CR6 and
1633 // a normal output).
1635 if (!N->getOperand(0).hasOneUse() &&
1636 !N->getOperand(1).hasOneUse() &&
1637 !N->getOperand(2).hasOneUse()) {
1639 // Scan all of the users of the LHS, looking for VCMPo's that match.
1640 SDNode *VCMPoNode = 0;
1642 SDNode *LHSN = N->getOperand(0).Val;
1643 for (SDNode::use_iterator UI = LHSN->use_begin(), E = LHSN->use_end();
1644 UI != E; ++UI)
1645 if ((*UI)->getOpcode() == PPCISD::VCMPo &&
1646 (*UI)->getOperand(1) == N->getOperand(1) &&
1647 (*UI)->getOperand(2) == N->getOperand(2) &&
1648 (*UI)->getOperand(0) == N->getOperand(0)) {
1649 VCMPoNode = *UI;
1650 break;
1653 // If there are non-zero uses of the flag value, use the VCMPo node!
1654 if (VCMPoNode && !VCMPoNode->hasNUsesOfValue(0, 1))
1655 return SDOperand(VCMPoNode, 0);
1657 break;
1661 return SDOperand();
1664 void PPCTargetLowering::computeMaskedBitsForTargetNode(const SDOperand Op,
1665 uint64_t Mask,
1666 uint64_t &KnownZero,
1667 uint64_t &KnownOne,
1668 unsigned Depth) const {
1669 KnownZero = 0;
1670 KnownOne = 0;
1671 switch (Op.getOpcode()) {
1672 default: break;
1673 case ISD::INTRINSIC_WO_CHAIN: {
1674 switch (cast<ConstantSDNode>(Op.getOperand(0))->getValue()) {
1675 default: break;
1676 case Intrinsic::ppc_altivec_vcmpbfp_p:
1677 case Intrinsic::ppc_altivec_vcmpeqfp_p:
1678 case Intrinsic::ppc_altivec_vcmpequb_p:
1679 case Intrinsic::ppc_altivec_vcmpequh_p:
1680 case Intrinsic::ppc_altivec_vcmpequw_p:
1681 case Intrinsic::ppc_altivec_vcmpgefp_p:
1682 case Intrinsic::ppc_altivec_vcmpgtfp_p:
1683 case Intrinsic::ppc_altivec_vcmpgtsb_p:
1684 case Intrinsic::ppc_altivec_vcmpgtsh_p:
1685 case Intrinsic::ppc_altivec_vcmpgtsw_p:
1686 case Intrinsic::ppc_altivec_vcmpgtub_p:
1687 case Intrinsic::ppc_altivec_vcmpgtuh_p:
1688 case Intrinsic::ppc_altivec_vcmpgtuw_p:
1689 KnownZero = ~1U; // All bits but the low one are known to be zero.
1690 break;
1697 /// getConstraintType - Given a constraint letter, return the type of
1698 /// constraint it is for this target.
1699 PPCTargetLowering::ConstraintType
1700 PPCTargetLowering::getConstraintType(char ConstraintLetter) const {
1701 switch (ConstraintLetter) {
1702 default: break;
1703 case 'b':
1704 case 'r':
1705 case 'f':
1706 case 'v':
1707 case 'y':
1708 return C_RegisterClass;
1710 return TargetLowering::getConstraintType(ConstraintLetter);
1714 std::vector<unsigned> PPCTargetLowering::
1715 getRegClassForInlineAsmConstraint(const std::string &Constraint,
1716 MVT::ValueType VT) const {
1717 if (Constraint.size() == 1) {
1718 switch (Constraint[0]) { // GCC RS6000 Constraint Letters
1719 default: break; // Unknown constriant letter
1720 case 'b':
1721 return make_vector<unsigned>(/*no R0*/ PPC::R1 , PPC::R2 , PPC::R3 ,
1722 PPC::R4 , PPC::R5 , PPC::R6 , PPC::R7 ,
1723 PPC::R8 , PPC::R9 , PPC::R10, PPC::R11,
1724 PPC::R12, PPC::R13, PPC::R14, PPC::R15,
1725 PPC::R16, PPC::R17, PPC::R18, PPC::R19,
1726 PPC::R20, PPC::R21, PPC::R22, PPC::R23,
1727 PPC::R24, PPC::R25, PPC::R26, PPC::R27,
1728 PPC::R28, PPC::R29, PPC::R30, PPC::R31,
1730 case 'r':
1731 return make_vector<unsigned>(PPC::R0 , PPC::R1 , PPC::R2 , PPC::R3 ,
1732 PPC::R4 , PPC::R5 , PPC::R6 , PPC::R7 ,
1733 PPC::R8 , PPC::R9 , PPC::R10, PPC::R11,
1734 PPC::R12, PPC::R13, PPC::R14, PPC::R15,
1735 PPC::R16, PPC::R17, PPC::R18, PPC::R19,
1736 PPC::R20, PPC::R21, PPC::R22, PPC::R23,
1737 PPC::R24, PPC::R25, PPC::R26, PPC::R27,
1738 PPC::R28, PPC::R29, PPC::R30, PPC::R31,
1740 case 'f':
1741 return make_vector<unsigned>(PPC::F0 , PPC::F1 , PPC::F2 , PPC::F3 ,
1742 PPC::F4 , PPC::F5 , PPC::F6 , PPC::F7 ,
1743 PPC::F8 , PPC::F9 , PPC::F10, PPC::F11,
1744 PPC::F12, PPC::F13, PPC::F14, PPC::F15,
1745 PPC::F16, PPC::F17, PPC::F18, PPC::F19,
1746 PPC::F20, PPC::F21, PPC::F22, PPC::F23,
1747 PPC::F24, PPC::F25, PPC::F26, PPC::F27,
1748 PPC::F28, PPC::F29, PPC::F30, PPC::F31,
1750 case 'v':
1751 return make_vector<unsigned>(PPC::V0 , PPC::V1 , PPC::V2 , PPC::V3 ,
1752 PPC::V4 , PPC::V5 , PPC::V6 , PPC::V7 ,
1753 PPC::V8 , PPC::V9 , PPC::V10, PPC::V11,
1754 PPC::V12, PPC::V13, PPC::V14, PPC::V15,
1755 PPC::V16, PPC::V17, PPC::V18, PPC::V19,
1756 PPC::V20, PPC::V21, PPC::V22, PPC::V23,
1757 PPC::V24, PPC::V25, PPC::V26, PPC::V27,
1758 PPC::V28, PPC::V29, PPC::V30, PPC::V31,
1760 case 'y':
1761 return make_vector<unsigned>(PPC::CR0, PPC::CR1, PPC::CR2, PPC::CR3,
1762 PPC::CR4, PPC::CR5, PPC::CR6, PPC::CR7,
1767 return std::vector<unsigned>();
1770 // isOperandValidForConstraint
1771 bool PPCTargetLowering::
1772 isOperandValidForConstraint(SDOperand Op, char Letter) {
1773 switch (Letter) {
1774 default: break;
1775 case 'I':
1776 case 'J':
1777 case 'K':
1778 case 'L':
1779 case 'M':
1780 case 'N':
1781 case 'O':
1782 case 'P': {
1783 if (!isa<ConstantSDNode>(Op)) return false; // Must be an immediate.
1784 unsigned Value = cast<ConstantSDNode>(Op)->getValue();
1785 switch (Letter) {
1786 default: assert(0 && "Unknown constraint letter!");
1787 case 'I': // "I" is a signed 16-bit constant.
1788 return (short)Value == (int)Value;
1789 case 'J': // "J" is a constant with only the high-order 16 bits nonzero.
1790 case 'L': // "L" is a signed 16-bit constant shifted left 16 bits.
1791 return (short)Value == 0;
1792 case 'K': // "K" is a constant with only the low-order 16 bits nonzero.
1793 return (Value >> 16) == 0;
1794 case 'M': // "M" is a constant that is greater than 31.
1795 return Value > 31;
1796 case 'N': // "N" is a positive constant that is an exact power of two.
1797 return (int)Value > 0 && isPowerOf2_32(Value);
1798 case 'O': // "O" is the constant zero.
1799 return Value == 0;
1800 case 'P': // "P" is a constant whose negation is a signed 16-bit constant.
1801 return (short)-Value == (int)-Value;
1803 break;
1807 // Handle standard constraint letters.
1808 return TargetLowering::isOperandValidForConstraint(Op, Letter);
1811 /// isLegalAddressImmediate - Return true if the integer value can be used
1812 /// as the offset of the target addressing mode.
1813 bool PPCTargetLowering::isLegalAddressImmediate(int64_t V) const {
1814 // PPC allows a sign-extended 16-bit immediate field.
1815 return (V > -(1 << 16) && V < (1 << 16)-1);