1 //===-- SPUISelDAGToDAG.cpp - CellSPU pattern matching inst selector ------===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This file defines a pattern matching instruction selector for the Cell SPU,
11 // converting from a legalized dag to a SPU-target dag.
13 //===----------------------------------------------------------------------===//
16 #include "SPUTargetMachine.h"
17 #include "SPUISelLowering.h"
18 #include "SPUHazardRecognizers.h"
19 #include "SPUFrameInfo.h"
20 #include "SPURegisterNames.h"
21 #include "SPUTargetMachine.h"
22 #include "llvm/CodeGen/MachineConstantPool.h"
23 #include "llvm/CodeGen/MachineInstrBuilder.h"
24 #include "llvm/CodeGen/MachineFunction.h"
25 #include "llvm/CodeGen/SelectionDAG.h"
26 #include "llvm/CodeGen/SelectionDAGISel.h"
27 #include "llvm/CodeGen/PseudoSourceValue.h"
28 #include "llvm/Target/TargetOptions.h"
29 #include "llvm/ADT/Statistic.h"
30 #include "llvm/Constants.h"
31 #include "llvm/GlobalValue.h"
32 #include "llvm/Intrinsics.h"
33 #include "llvm/LLVMContext.h"
34 #include "llvm/Support/Debug.h"
35 #include "llvm/Support/ErrorHandling.h"
36 #include "llvm/Support/MathExtras.h"
37 #include "llvm/Support/Compiler.h"
38 #include "llvm/Support/raw_ostream.h"
43 //! ConstantSDNode predicate for i32 sign-extended, 10-bit immediates
45 isI64IntS10Immediate(ConstantSDNode
*CN
)
47 return isS10Constant(CN
->getSExtValue());
50 //! ConstantSDNode predicate for i32 sign-extended, 10-bit immediates
52 isI32IntS10Immediate(ConstantSDNode
*CN
)
54 return isS10Constant(CN
->getSExtValue());
57 //! ConstantSDNode predicate for i32 unsigned 10-bit immediate values
59 isI32IntU10Immediate(ConstantSDNode
*CN
)
61 return isU10Constant(CN
->getSExtValue());
64 //! ConstantSDNode predicate for i16 sign-extended, 10-bit immediate values
66 isI16IntS10Immediate(ConstantSDNode
*CN
)
68 return isS10Constant(CN
->getSExtValue());
71 //! SDNode predicate for i16 sign-extended, 10-bit immediate values
73 isI16IntS10Immediate(SDNode
*N
)
75 ConstantSDNode
*CN
= dyn_cast
<ConstantSDNode
>(N
);
76 return (CN
!= 0 && isI16IntS10Immediate(CN
));
79 //! ConstantSDNode predicate for i16 unsigned 10-bit immediate values
81 isI16IntU10Immediate(ConstantSDNode
*CN
)
83 return isU10Constant((short) CN
->getZExtValue());
86 //! SDNode predicate for i16 sign-extended, 10-bit immediate values
88 isI16IntU10Immediate(SDNode
*N
)
90 return (N
->getOpcode() == ISD::Constant
91 && isI16IntU10Immediate(cast
<ConstantSDNode
>(N
)));
94 //! ConstantSDNode predicate for signed 16-bit values
96 \arg CN The constant SelectionDAG node holding the value
97 \arg Imm The returned 16-bit value, if returning true
99 This predicate tests the value in \a CN to see whether it can be
100 represented as a 16-bit, sign-extended quantity. Returns true if
104 isIntS16Immediate(ConstantSDNode
*CN
, short &Imm
)
106 EVT vt
= CN
->getValueType(0);
107 Imm
= (short) CN
->getZExtValue();
108 if (vt
.getSimpleVT() >= MVT::i1
&& vt
.getSimpleVT() <= MVT::i16
) {
110 } else if (vt
== MVT::i32
) {
111 int32_t i_val
= (int32_t) CN
->getZExtValue();
112 short s_val
= (short) i_val
;
113 return i_val
== s_val
;
115 int64_t i_val
= (int64_t) CN
->getZExtValue();
116 short s_val
= (short) i_val
;
117 return i_val
== s_val
;
123 //! SDNode predicate for signed 16-bit values.
125 isIntS16Immediate(SDNode
*N
, short &Imm
)
127 return (N
->getOpcode() == ISD::Constant
128 && isIntS16Immediate(cast
<ConstantSDNode
>(N
), Imm
));
131 //! ConstantFPSDNode predicate for representing floats as 16-bit sign ext.
133 isFPS16Immediate(ConstantFPSDNode
*FPN
, short &Imm
)
135 EVT vt
= FPN
->getValueType(0);
136 if (vt
== MVT::f32
) {
137 int val
= FloatToBits(FPN
->getValueAPF().convertToFloat());
138 int sval
= (int) ((val
<< 16) >> 16);
147 isHighLow(const SDValue
&Op
)
149 return (Op
.getOpcode() == SPUISD::IndirectAddr
150 && ((Op
.getOperand(0).getOpcode() == SPUISD::Hi
151 && Op
.getOperand(1).getOpcode() == SPUISD::Lo
)
152 || (Op
.getOperand(0).getOpcode() == SPUISD::Lo
153 && Op
.getOperand(1).getOpcode() == SPUISD::Hi
)));
156 //===------------------------------------------------------------------===//
157 //! EVT to "useful stuff" mapping structure:
159 struct valtype_map_s
{
161 unsigned ldresult_ins
; /// LDRESULT instruction (0 = undefined)
162 bool ldresult_imm
; /// LDRESULT instruction requires immediate?
163 unsigned lrinst
; /// LR instruction
166 const valtype_map_s valtype_map
[] = {
167 { MVT::i8
, SPU::ORBIr8
, true, SPU::LRr8
},
168 { MVT::i16
, SPU::ORHIr16
, true, SPU::LRr16
},
169 { MVT::i32
, SPU::ORIr32
, true, SPU::LRr32
},
170 { MVT::i64
, SPU::ORr64
, false, SPU::LRr64
},
171 { MVT::f32
, SPU::ORf32
, false, SPU::LRf32
},
172 { MVT::f64
, SPU::ORf64
, false, SPU::LRf64
},
173 // vector types... (sigh!)
174 { MVT::v16i8
, 0, false, SPU::LRv16i8
},
175 { MVT::v8i16
, 0, false, SPU::LRv8i16
},
176 { MVT::v4i32
, 0, false, SPU::LRv4i32
},
177 { MVT::v2i64
, 0, false, SPU::LRv2i64
},
178 { MVT::v4f32
, 0, false, SPU::LRv4f32
},
179 { MVT::v2f64
, 0, false, SPU::LRv2f64
}
182 const size_t n_valtype_map
= sizeof(valtype_map
) / sizeof(valtype_map
[0]);
184 const valtype_map_s
*getValueTypeMapEntry(EVT VT
)
186 const valtype_map_s
*retval
= 0;
187 for (size_t i
= 0; i
< n_valtype_map
; ++i
) {
188 if (valtype_map
[i
].VT
== VT
) {
189 retval
= valtype_map
+ i
;
198 raw_string_ostream
Msg(msg
);
199 Msg
<< "SPUISelDAGToDAG.cpp: getValueTypeMapEntry returns NULL for "
200 << VT
.getEVTString();
201 llvm_report_error(Msg
.str());
208 //! Generate the carry-generate shuffle mask.
209 SDValue
getCarryGenerateShufMask(SelectionDAG
&DAG
, DebugLoc dl
) {
210 SmallVector
<SDValue
, 16 > ShufBytes
;
212 // Create the shuffle mask for "rotating" the borrow up one register slot
213 // once the borrow is generated.
214 ShufBytes
.push_back(DAG
.getConstant(0x04050607, MVT::i32
));
215 ShufBytes
.push_back(DAG
.getConstant(0x80808080, MVT::i32
));
216 ShufBytes
.push_back(DAG
.getConstant(0x0c0d0e0f, MVT::i32
));
217 ShufBytes
.push_back(DAG
.getConstant(0x80808080, MVT::i32
));
219 return DAG
.getNode(ISD::BUILD_VECTOR
, dl
, MVT::v4i32
,
220 &ShufBytes
[0], ShufBytes
.size());
223 //! Generate the borrow-generate shuffle mask
224 SDValue
getBorrowGenerateShufMask(SelectionDAG
&DAG
, DebugLoc dl
) {
225 SmallVector
<SDValue
, 16 > ShufBytes
;
227 // Create the shuffle mask for "rotating" the borrow up one register slot
228 // once the borrow is generated.
229 ShufBytes
.push_back(DAG
.getConstant(0x04050607, MVT::i32
));
230 ShufBytes
.push_back(DAG
.getConstant(0xc0c0c0c0, MVT::i32
));
231 ShufBytes
.push_back(DAG
.getConstant(0x0c0d0e0f, MVT::i32
));
232 ShufBytes
.push_back(DAG
.getConstant(0xc0c0c0c0, MVT::i32
));
234 return DAG
.getNode(ISD::BUILD_VECTOR
, dl
, MVT::v4i32
,
235 &ShufBytes
[0], ShufBytes
.size());
238 //===------------------------------------------------------------------===//
239 /// SPUDAGToDAGISel - Cell SPU-specific code to select SPU machine
240 /// instructions for SelectionDAG operations.
242 class SPUDAGToDAGISel
:
243 public SelectionDAGISel
245 SPUTargetMachine
&TM
;
246 SPUTargetLowering
&SPUtli
;
247 unsigned GlobalBaseReg
;
250 explicit SPUDAGToDAGISel(SPUTargetMachine
&tm
) :
251 SelectionDAGISel(tm
),
253 SPUtli(*tm
.getTargetLowering())
256 virtual bool runOnMachineFunction(MachineFunction
&MF
) {
257 // Make sure we re-emit a set of the global base reg if necessary
259 SelectionDAGISel::runOnMachineFunction(MF
);
263 /// getI32Imm - Return a target constant with the specified value, of type
265 inline SDValue
getI32Imm(uint32_t Imm
) {
266 return CurDAG
->getTargetConstant(Imm
, MVT::i32
);
269 /// getI64Imm - Return a target constant with the specified value, of type
271 inline SDValue
getI64Imm(uint64_t Imm
) {
272 return CurDAG
->getTargetConstant(Imm
, MVT::i64
);
275 /// getSmallIPtrImm - Return a target constant of pointer type.
276 inline SDValue
getSmallIPtrImm(unsigned Imm
) {
277 return CurDAG
->getTargetConstant(Imm
, SPUtli
.getPointerTy());
280 SDNode
*emitBuildVector(SDValue build_vec
) {
281 EVT vecVT
= build_vec
.getValueType();
282 EVT eltVT
= vecVT
.getVectorElementType();
283 SDNode
*bvNode
= build_vec
.getNode();
284 DebugLoc dl
= bvNode
->getDebugLoc();
286 // Check to see if this vector can be represented as a CellSPU immediate
287 // constant by invoking all of the instruction selection predicates:
288 if (((vecVT
== MVT::v8i16
) &&
289 (SPU::get_vec_i16imm(bvNode
, *CurDAG
, MVT::i16
).getNode() != 0)) ||
290 ((vecVT
== MVT::v4i32
) &&
291 ((SPU::get_vec_i16imm(bvNode
, *CurDAG
, MVT::i32
).getNode() != 0) ||
292 (SPU::get_ILHUvec_imm(bvNode
, *CurDAG
, MVT::i32
).getNode() != 0) ||
293 (SPU::get_vec_u18imm(bvNode
, *CurDAG
, MVT::i32
).getNode() != 0) ||
294 (SPU::get_v4i32_imm(bvNode
, *CurDAG
).getNode() != 0))) ||
295 ((vecVT
== MVT::v2i64
) &&
296 ((SPU::get_vec_i16imm(bvNode
, *CurDAG
, MVT::i64
).getNode() != 0) ||
297 (SPU::get_ILHUvec_imm(bvNode
, *CurDAG
, MVT::i64
).getNode() != 0) ||
298 (SPU::get_vec_u18imm(bvNode
, *CurDAG
, MVT::i64
).getNode() != 0))))
299 return Select(build_vec
);
301 // No, need to emit a constant pool spill:
302 std::vector
<Constant
*> CV
;
304 for (size_t i
= 0; i
< build_vec
.getNumOperands(); ++i
) {
305 ConstantSDNode
*V
= dyn_cast
<ConstantSDNode
> (build_vec
.getOperand(i
));
306 CV
.push_back(const_cast<ConstantInt
*> (V
->getConstantIntValue()));
309 Constant
*CP
= ConstantVector::get(CV
);
310 SDValue CPIdx
= CurDAG
->getConstantPool(CP
, SPUtli
.getPointerTy());
311 unsigned Alignment
= cast
<ConstantPoolSDNode
>(CPIdx
)->getAlignment();
312 SDValue CGPoolOffset
=
313 SPU::LowerConstantPool(CPIdx
, *CurDAG
,
314 SPUtli
.getSPUTargetMachine());
315 return SelectCode(CurDAG
->getLoad(build_vec
.getValueType(), dl
,
316 CurDAG
->getEntryNode(), CGPoolOffset
,
317 PseudoSourceValue::getConstantPool(), 0,
321 /// Select - Convert the specified operand from a target-independent to a
322 /// target-specific node if it hasn't already been changed.
323 SDNode
*Select(SDValue Op
);
325 //! Emit the instruction sequence for i64 shl
326 SDNode
*SelectSHLi64(SDValue
&Op
, EVT OpVT
);
328 //! Emit the instruction sequence for i64 srl
329 SDNode
*SelectSRLi64(SDValue
&Op
, EVT OpVT
);
331 //! Emit the instruction sequence for i64 sra
332 SDNode
*SelectSRAi64(SDValue
&Op
, EVT OpVT
);
334 //! Emit the necessary sequence for loading i64 constants:
335 SDNode
*SelectI64Constant(SDValue
&Op
, EVT OpVT
, DebugLoc dl
);
337 //! Alternate instruction emit sequence for loading i64 constants
338 SDNode
*SelectI64Constant(uint64_t i64const
, EVT OpVT
, DebugLoc dl
);
340 //! Returns true if the address N is an A-form (local store) address
341 bool SelectAFormAddr(SDValue Op
, SDValue N
, SDValue
&Base
,
344 //! D-form address predicate
345 bool SelectDFormAddr(SDValue Op
, SDValue N
, SDValue
&Base
,
348 /// Alternate D-form address using i7 offset predicate
349 bool SelectDForm2Addr(SDValue Op
, SDValue N
, SDValue
&Disp
,
352 /// D-form address selection workhorse
353 bool DFormAddressPredicate(SDValue Op
, SDValue N
, SDValue
&Disp
,
354 SDValue
&Base
, int minOffset
, int maxOffset
);
356 //! Address predicate if N can be expressed as an indexed [r+r] operation.
357 bool SelectXFormAddr(SDValue Op
, SDValue N
, SDValue
&Base
,
360 /// SelectInlineAsmMemoryOperand - Implement addressing mode selection for
361 /// inline asm expressions.
362 virtual bool SelectInlineAsmMemoryOperand(const SDValue
&Op
,
364 std::vector
<SDValue
> &OutOps
) {
366 switch (ConstraintCode
) {
367 default: return true;
369 if (!SelectDFormAddr(Op
, Op
, Op0
, Op1
)
370 && !SelectAFormAddr(Op
, Op
, Op0
, Op1
))
371 SelectXFormAddr(Op
, Op
, Op0
, Op1
);
373 case 'o': // offsetable
374 if (!SelectDFormAddr(Op
, Op
, Op0
, Op1
)
375 && !SelectAFormAddr(Op
, Op
, Op0
, Op1
)) {
377 Op1
= getSmallIPtrImm(0);
380 case 'v': // not offsetable
382 llvm_unreachable("InlineAsmMemoryOperand 'v' constraint not handled.");
384 SelectAddrIdxOnly(Op
, Op
, Op0
, Op1
);
389 OutOps
.push_back(Op0
);
390 OutOps
.push_back(Op1
);
394 /// InstructionSelect - This callback is invoked by
395 /// SelectionDAGISel when it has created a SelectionDAG for us to codegen.
396 virtual void InstructionSelect();
398 virtual const char *getPassName() const {
399 return "Cell SPU DAG->DAG Pattern Instruction Selection";
402 /// CreateTargetHazardRecognizer - Return the hazard recognizer to use for
403 /// this target when scheduling the DAG.
404 virtual ScheduleHazardRecognizer
*CreateTargetHazardRecognizer() {
405 const TargetInstrInfo
*II
= TM
.getInstrInfo();
406 assert(II
&& "No InstrInfo?");
407 return new SPUHazardRecognizer(*II
);
410 // Include the pieces autogenerated from the target description.
411 #include "SPUGenDAGISel.inc"
415 /// InstructionSelect - This callback is invoked by
416 /// SelectionDAGISel when it has created a SelectionDAG for us to codegen.
418 SPUDAGToDAGISel::InstructionSelect()
422 // Select target instructions for the DAG.
424 CurDAG
->RemoveDeadNodes();
428 \arg Op The ISD instruction operand
429 \arg N The address to be tested
430 \arg Base The base address
431 \arg Index The base address index
434 SPUDAGToDAGISel::SelectAFormAddr(SDValue Op
, SDValue N
, SDValue
&Base
,
436 // These match the addr256k operand type:
437 EVT OffsVT
= MVT::i16
;
438 SDValue Zero
= CurDAG
->getTargetConstant(0, OffsVT
);
440 switch (N
.getOpcode()) {
442 case ISD::ConstantPool
:
443 case ISD::GlobalAddress
:
444 llvm_report_error("SPU SelectAFormAddr: Constant/Pool/Global not lowered.");
447 case ISD::TargetConstant
:
448 case ISD::TargetGlobalAddress
:
449 case ISD::TargetJumpTable
:
450 llvm_report_error("SPUSelectAFormAddr: Target Constant/Pool/Global "
451 "not wrapped as A-form address.");
454 case SPUISD::AFormAddr
:
455 // Just load from memory if there's only a single use of the location,
456 // otherwise, this will get handled below with D-form offset addresses
458 SDValue Op0
= N
.getOperand(0);
459 switch (Op0
.getOpcode()) {
460 case ISD::TargetConstantPool
:
461 case ISD::TargetJumpTable
:
466 case ISD::TargetGlobalAddress
: {
467 GlobalAddressSDNode
*GSDN
= cast
<GlobalAddressSDNode
>(Op0
);
468 GlobalValue
*GV
= GSDN
->getGlobal();
469 if (GV
->getAlignment() == 16) {
484 SPUDAGToDAGISel::SelectDForm2Addr(SDValue Op
, SDValue N
, SDValue
&Disp
,
486 const int minDForm2Offset
= -(1 << 7);
487 const int maxDForm2Offset
= (1 << 7) - 1;
488 return DFormAddressPredicate(Op
, N
, Disp
, Base
, minDForm2Offset
,
493 \arg Op The ISD instruction (ignored)
494 \arg N The address to be tested
495 \arg Base Base address register/pointer
496 \arg Index Base address index
498 Examine the input address by a base register plus a signed 10-bit
499 displacement, [r+I10] (D-form address).
501 \return true if \a N is a D-form address with \a Base and \a Index set
502 to non-empty SDValue instances.
505 SPUDAGToDAGISel::SelectDFormAddr(SDValue Op
, SDValue N
, SDValue
&Base
,
507 return DFormAddressPredicate(Op
, N
, Base
, Index
,
508 SPUFrameInfo::minFrameOffset(),
509 SPUFrameInfo::maxFrameOffset());
513 SPUDAGToDAGISel::DFormAddressPredicate(SDValue Op
, SDValue N
, SDValue
&Base
,
514 SDValue
&Index
, int minOffset
,
516 unsigned Opc
= N
.getOpcode();
517 EVT PtrTy
= SPUtli
.getPointerTy();
519 if (Opc
== ISD::FrameIndex
) {
520 // Stack frame index must be less than 512 (divided by 16):
521 FrameIndexSDNode
*FIN
= dyn_cast
<FrameIndexSDNode
>(N
);
522 int FI
= int(FIN
->getIndex());
523 DEBUG(errs() << "SelectDFormAddr: ISD::FrameIndex = "
525 if (SPUFrameInfo::FItoStackOffset(FI
) < maxOffset
) {
526 Base
= CurDAG
->getTargetConstant(0, PtrTy
);
527 Index
= CurDAG
->getTargetFrameIndex(FI
, PtrTy
);
530 } else if (Opc
== ISD::ADD
) {
531 // Generated by getelementptr
532 const SDValue Op0
= N
.getOperand(0);
533 const SDValue Op1
= N
.getOperand(1);
535 if ((Op0
.getOpcode() == SPUISD::Hi
&& Op1
.getOpcode() == SPUISD::Lo
)
536 || (Op1
.getOpcode() == SPUISD::Hi
&& Op0
.getOpcode() == SPUISD::Lo
)) {
537 Base
= CurDAG
->getTargetConstant(0, PtrTy
);
540 } else if (Op1
.getOpcode() == ISD::Constant
541 || Op1
.getOpcode() == ISD::TargetConstant
) {
542 ConstantSDNode
*CN
= dyn_cast
<ConstantSDNode
>(Op1
);
543 int32_t offset
= int32_t(CN
->getSExtValue());
545 if (Op0
.getOpcode() == ISD::FrameIndex
) {
546 FrameIndexSDNode
*FIN
= dyn_cast
<FrameIndexSDNode
>(Op0
);
547 int FI
= int(FIN
->getIndex());
548 DEBUG(errs() << "SelectDFormAddr: ISD::ADD offset = " << offset
549 << " frame index = " << FI
<< "\n");
551 if (SPUFrameInfo::FItoStackOffset(FI
) < maxOffset
) {
552 Base
= CurDAG
->getTargetConstant(offset
, PtrTy
);
553 Index
= CurDAG
->getTargetFrameIndex(FI
, PtrTy
);
556 } else if (offset
> minOffset
&& offset
< maxOffset
) {
557 Base
= CurDAG
->getTargetConstant(offset
, PtrTy
);
561 } else if (Op0
.getOpcode() == ISD::Constant
562 || Op0
.getOpcode() == ISD::TargetConstant
) {
563 ConstantSDNode
*CN
= dyn_cast
<ConstantSDNode
>(Op0
);
564 int32_t offset
= int32_t(CN
->getSExtValue());
566 if (Op1
.getOpcode() == ISD::FrameIndex
) {
567 FrameIndexSDNode
*FIN
= dyn_cast
<FrameIndexSDNode
>(Op1
);
568 int FI
= int(FIN
->getIndex());
569 DEBUG(errs() << "SelectDFormAddr: ISD::ADD offset = " << offset
570 << " frame index = " << FI
<< "\n");
572 if (SPUFrameInfo::FItoStackOffset(FI
) < maxOffset
) {
573 Base
= CurDAG
->getTargetConstant(offset
, PtrTy
);
574 Index
= CurDAG
->getTargetFrameIndex(FI
, PtrTy
);
577 } else if (offset
> minOffset
&& offset
< maxOffset
) {
578 Base
= CurDAG
->getTargetConstant(offset
, PtrTy
);
583 } else if (Opc
== SPUISD::IndirectAddr
) {
584 // Indirect with constant offset -> D-Form address
585 const SDValue Op0
= N
.getOperand(0);
586 const SDValue Op1
= N
.getOperand(1);
588 if (Op0
.getOpcode() == SPUISD::Hi
589 && Op1
.getOpcode() == SPUISD::Lo
) {
590 // (SPUindirect (SPUhi <arg>, 0), (SPUlo <arg>, 0))
591 Base
= CurDAG
->getTargetConstant(0, PtrTy
);
594 } else if (isa
<ConstantSDNode
>(Op0
) || isa
<ConstantSDNode
>(Op1
)) {
598 if (isa
<ConstantSDNode
>(Op1
)) {
599 ConstantSDNode
*CN
= cast
<ConstantSDNode
>(Op1
);
600 offset
= int32_t(CN
->getSExtValue());
602 } else if (isa
<ConstantSDNode
>(Op0
)) {
603 ConstantSDNode
*CN
= cast
<ConstantSDNode
>(Op0
);
604 offset
= int32_t(CN
->getSExtValue());
608 if (offset
>= minOffset
&& offset
<= maxOffset
) {
609 Base
= CurDAG
->getTargetConstant(offset
, PtrTy
);
614 } else if (Opc
== SPUISD::AFormAddr
) {
615 Base
= CurDAG
->getTargetConstant(0, N
.getValueType());
618 } else if (Opc
== SPUISD::LDRESULT
) {
619 Base
= CurDAG
->getTargetConstant(0, N
.getValueType());
622 } else if (Opc
== ISD::Register
|| Opc
== ISD::CopyFromReg
) {
623 unsigned OpOpc
= Op
.getOpcode();
625 if (OpOpc
== ISD::STORE
|| OpOpc
== ISD::LOAD
) {
626 // Direct load/store without getelementptr
629 // Get the register from CopyFromReg
630 if (Opc
== ISD::CopyFromReg
)
631 Addr
= N
.getOperand(1);
633 Addr
= N
; // Register
635 Offs
= ((OpOpc
== ISD::STORE
) ? Op
.getOperand(3) : Op
.getOperand(2));
637 if (Offs
.getOpcode() == ISD::Constant
|| Offs
.getOpcode() == ISD::UNDEF
) {
638 if (Offs
.getOpcode() == ISD::UNDEF
)
639 Offs
= CurDAG
->getTargetConstant(0, Offs
.getValueType());
646 /* If otherwise unadorned, default to D-form address with 0 offset: */
647 if (Opc
== ISD::CopyFromReg
) {
648 Index
= N
.getOperand(1);
653 Base
= CurDAG
->getTargetConstant(0, Index
.getValueType());
662 \arg Op The ISD instruction operand
663 \arg N The address operand
664 \arg Base The base pointer operand
665 \arg Index The offset/index operand
667 If the address \a N can be expressed as an A-form or D-form address, returns
668 false. Otherwise, creates two operands, Base and Index that will become the
669 (r)(r) X-form address.
672 SPUDAGToDAGISel::SelectXFormAddr(SDValue Op
, SDValue N
, SDValue
&Base
,
674 if (!SelectAFormAddr(Op
, N
, Base
, Index
)
675 && !SelectDFormAddr(Op
, N
, Base
, Index
)) {
676 // If the address is neither A-form or D-form, punt and use an X-form
678 Base
= N
.getOperand(1);
679 Index
= N
.getOperand(0);
686 //! Convert the operand from a target-independent to a target-specific node
690 SPUDAGToDAGISel::Select(SDValue Op
) {
691 SDNode
*N
= Op
.getNode();
692 unsigned Opc
= N
->getOpcode();
695 EVT OpVT
= Op
.getValueType();
697 DebugLoc dl
= N
->getDebugLoc();
699 if (N
->isMachineOpcode()) {
700 return NULL
; // Already selected.
703 if (Opc
== ISD::FrameIndex
) {
704 int FI
= cast
<FrameIndexSDNode
>(N
)->getIndex();
705 SDValue TFI
= CurDAG
->getTargetFrameIndex(FI
, Op
.getValueType());
706 SDValue Imm0
= CurDAG
->getTargetConstant(0, Op
.getValueType());
715 Ops
[0] = CurDAG
->getRegister(SPU::R1
, Op
.getValueType());
716 Ops
[1] = SDValue(CurDAG
->getTargetNode(SPU::ILAr32
, dl
, Op
.getValueType(),
720 } else if (Opc
== ISD::Constant
&& OpVT
== MVT::i64
) {
721 // Catch the i64 constants that end up here. Note: The backend doesn't
722 // attempt to legalize the constant (it's useless because DAGCombiner
723 // will insert 64-bit constants and we can't stop it).
724 return SelectI64Constant(Op
, OpVT
, Op
.getDebugLoc());
725 } else if ((Opc
== ISD::ZERO_EXTEND
|| Opc
== ISD::ANY_EXTEND
)
726 && OpVT
== MVT::i64
) {
727 SDValue Op0
= Op
.getOperand(0);
728 EVT Op0VT
= Op0
.getValueType();
729 EVT Op0VecVT
= EVT::getVectorVT(*CurDAG
->getContext(),
730 Op0VT
, (128 / Op0VT
.getSizeInBits()));
731 EVT OpVecVT
= EVT::getVectorVT(*CurDAG
->getContext(),
732 OpVT
, (128 / OpVT
.getSizeInBits()));
735 switch (Op0VT
.getSimpleVT().SimpleTy
) {
737 llvm_report_error("CellSPU Select: Unhandled zero/any extend EVT");
740 shufMask
= CurDAG
->getNode(ISD::BUILD_VECTOR
, dl
, MVT::v4i32
,
741 CurDAG
->getConstant(0x80808080, MVT::i32
),
742 CurDAG
->getConstant(0x00010203, MVT::i32
),
743 CurDAG
->getConstant(0x80808080, MVT::i32
),
744 CurDAG
->getConstant(0x08090a0b, MVT::i32
));
748 shufMask
= CurDAG
->getNode(ISD::BUILD_VECTOR
, dl
, MVT::v4i32
,
749 CurDAG
->getConstant(0x80808080, MVT::i32
),
750 CurDAG
->getConstant(0x80800203, MVT::i32
),
751 CurDAG
->getConstant(0x80808080, MVT::i32
),
752 CurDAG
->getConstant(0x80800a0b, MVT::i32
));
756 shufMask
= CurDAG
->getNode(ISD::BUILD_VECTOR
, dl
, MVT::v4i32
,
757 CurDAG
->getConstant(0x80808080, MVT::i32
),
758 CurDAG
->getConstant(0x80808003, MVT::i32
),
759 CurDAG
->getConstant(0x80808080, MVT::i32
),
760 CurDAG
->getConstant(0x8080800b, MVT::i32
));
764 SDNode
*shufMaskLoad
= emitBuildVector(shufMask
);
765 SDNode
*PromoteScalar
=
766 SelectCode(CurDAG
->getNode(SPUISD::PREFSLOT2VEC
, dl
, Op0VecVT
, Op0
));
768 SDValue zextShuffle
=
769 CurDAG
->getNode(SPUISD::SHUFB
, dl
, OpVecVT
,
770 SDValue(PromoteScalar
, 0),
771 SDValue(PromoteScalar
, 0),
772 SDValue(shufMaskLoad
, 0));
774 // N.B.: BIT_CONVERT replaces and updates the zextShuffle node, so we
775 // re-use it in the VEC2PREFSLOT selection without needing to explicitly
776 // call SelectCode (it's already done for us.)
777 SelectCode(CurDAG
->getNode(ISD::BIT_CONVERT
, dl
, OpVecVT
, zextShuffle
));
778 return SelectCode(CurDAG
->getNode(SPUISD::VEC2PREFSLOT
, dl
, OpVT
,
780 } else if (Opc
== ISD::ADD
&& (OpVT
== MVT::i64
|| OpVT
== MVT::v2i64
)) {
782 emitBuildVector(getCarryGenerateShufMask(*CurDAG
, dl
));
784 return SelectCode(CurDAG
->getNode(SPUISD::ADD64_MARKER
, dl
, OpVT
,
785 Op
.getOperand(0), Op
.getOperand(1),
786 SDValue(CGLoad
, 0)));
787 } else if (Opc
== ISD::SUB
&& (OpVT
== MVT::i64
|| OpVT
== MVT::v2i64
)) {
789 emitBuildVector(getBorrowGenerateShufMask(*CurDAG
, dl
));
791 return SelectCode(CurDAG
->getNode(SPUISD::SUB64_MARKER
, dl
, OpVT
,
792 Op
.getOperand(0), Op
.getOperand(1),
793 SDValue(CGLoad
, 0)));
794 } else if (Opc
== ISD::MUL
&& (OpVT
== MVT::i64
|| OpVT
== MVT::v2i64
)) {
796 emitBuildVector(getCarryGenerateShufMask(*CurDAG
, dl
));
798 return SelectCode(CurDAG
->getNode(SPUISD::MUL64_MARKER
, dl
, OpVT
,
799 Op
.getOperand(0), Op
.getOperand(1),
800 SDValue(CGLoad
, 0)));
801 } else if (Opc
== ISD::TRUNCATE
) {
802 SDValue Op0
= Op
.getOperand(0);
803 if ((Op0
.getOpcode() == ISD::SRA
|| Op0
.getOpcode() == ISD::SRL
)
805 && Op0
.getValueType() == MVT::i64
) {
806 // Catch (truncate:i32 ([sra|srl]:i64 arg, c), where c >= 32
808 // Take advantage of the fact that the upper 32 bits are in the
809 // i32 preferred slot and avoid shuffle gymnastics:
810 ConstantSDNode
*CN
= dyn_cast
<ConstantSDNode
>(Op0
.getOperand(1));
812 unsigned shift_amt
= unsigned(CN
->getZExtValue());
814 if (shift_amt
>= 32) {
816 CurDAG
->getTargetNode(SPU::ORr32_r64
, dl
, OpVT
,
821 // Take care of the additional shift, if present:
822 SDValue shift
= CurDAG
->getTargetConstant(shift_amt
, MVT::i32
);
823 unsigned Opc
= SPU::ROTMAIr32_i32
;
825 if (Op0
.getOpcode() == ISD::SRL
)
828 hi32
= CurDAG
->getTargetNode(Opc
, dl
, OpVT
, SDValue(hi32
, 0),
836 } else if (Opc
== ISD::SHL
) {
837 if (OpVT
== MVT::i64
) {
838 return SelectSHLi64(Op
, OpVT
);
840 } else if (Opc
== ISD::SRL
) {
841 if (OpVT
== MVT::i64
) {
842 return SelectSRLi64(Op
, OpVT
);
844 } else if (Opc
== ISD::SRA
) {
845 if (OpVT
== MVT::i64
) {
846 return SelectSRAi64(Op
, OpVT
);
848 } else if (Opc
== ISD::FNEG
849 && (OpVT
== MVT::f64
|| OpVT
== MVT::v2f64
)) {
850 DebugLoc dl
= Op
.getDebugLoc();
851 // Check if the pattern is a special form of DFNMS:
852 // (fneg (fsub (fmul R64FP:$rA, R64FP:$rB), R64FP:$rC))
853 SDValue Op0
= Op
.getOperand(0);
854 if (Op0
.getOpcode() == ISD::FSUB
) {
855 SDValue Op00
= Op0
.getOperand(0);
856 if (Op00
.getOpcode() == ISD::FMUL
) {
857 unsigned Opc
= SPU::DFNMSf64
;
858 if (OpVT
== MVT::v2f64
)
859 Opc
= SPU::DFNMSv2f64
;
861 return CurDAG
->getTargetNode(Opc
, dl
, OpVT
,
868 SDValue negConst
= CurDAG
->getConstant(0x8000000000000000ULL
, MVT::i64
);
869 SDNode
*signMask
= 0;
870 unsigned Opc
= SPU::XORfneg64
;
872 if (OpVT
== MVT::f64
) {
873 signMask
= SelectI64Constant(negConst
, MVT::i64
, dl
);
874 } else if (OpVT
== MVT::v2f64
) {
875 Opc
= SPU::XORfnegvec
;
876 signMask
= emitBuildVector(CurDAG
->getNode(ISD::BUILD_VECTOR
, dl
,
878 negConst
, negConst
));
881 return CurDAG
->getTargetNode(Opc
, dl
, OpVT
,
882 Op
.getOperand(0), SDValue(signMask
, 0));
883 } else if (Opc
== ISD::FABS
) {
884 if (OpVT
== MVT::f64
) {
885 SDNode
*signMask
= SelectI64Constant(0x7fffffffffffffffULL
, MVT::i64
, dl
);
886 return CurDAG
->getTargetNode(SPU::ANDfabs64
, dl
, OpVT
,
887 Op
.getOperand(0), SDValue(signMask
, 0));
888 } else if (OpVT
== MVT::v2f64
) {
889 SDValue absConst
= CurDAG
->getConstant(0x7fffffffffffffffULL
, MVT::i64
);
890 SDValue absVec
= CurDAG
->getNode(ISD::BUILD_VECTOR
, dl
, MVT::v2i64
,
892 SDNode
*signMask
= emitBuildVector(absVec
);
893 return CurDAG
->getTargetNode(SPU::ANDfabsvec
, dl
, OpVT
,
894 Op
.getOperand(0), SDValue(signMask
, 0));
896 } else if (Opc
== SPUISD::LDRESULT
) {
897 // Custom select instructions for LDRESULT
898 EVT VT
= N
->getValueType(0);
899 SDValue Arg
= N
->getOperand(0);
900 SDValue Chain
= N
->getOperand(1);
902 const valtype_map_s
*vtm
= getValueTypeMapEntry(VT
);
904 if (vtm
->ldresult_ins
== 0) {
906 raw_string_ostream
Msg(msg
);
907 Msg
<< "LDRESULT for unsupported type: "
908 << VT
.getEVTString();
909 llvm_report_error(Msg
.str());
912 Opc
= vtm
->ldresult_ins
;
913 if (vtm
->ldresult_imm
) {
914 SDValue Zero
= CurDAG
->getTargetConstant(0, VT
);
916 Result
= CurDAG
->getTargetNode(Opc
, dl
, VT
, MVT::Other
, Arg
, Zero
, Chain
);
918 Result
= CurDAG
->getTargetNode(Opc
, dl
, VT
, MVT::Other
, Arg
, Arg
, Chain
);
922 } else if (Opc
== SPUISD::IndirectAddr
) {
923 // Look at the operands: SelectCode() will catch the cases that aren't
924 // specifically handled here.
926 // SPUInstrInfo catches the following patterns:
927 // (SPUindirect (SPUhi ...), (SPUlo ...))
928 // (SPUindirect $sp, imm)
929 EVT VT
= Op
.getValueType();
930 SDValue Op0
= N
->getOperand(0);
931 SDValue Op1
= N
->getOperand(1);
934 if ((Op0
.getOpcode() != SPUISD::Hi
&& Op1
.getOpcode() != SPUISD::Lo
)
935 || (Op0
.getOpcode() == ISD::Register
936 && ((RN
= dyn_cast
<RegisterSDNode
>(Op0
.getNode())) != 0
937 && RN
->getReg() != SPU::R1
))) {
939 if (Op1
.getOpcode() == ISD::Constant
) {
940 ConstantSDNode
*CN
= cast
<ConstantSDNode
>(Op1
);
941 Op1
= CurDAG
->getTargetConstant(CN
->getSExtValue(), VT
);
942 NewOpc
= (isI32IntS10Immediate(CN
) ? SPU::AIr32
: SPU::Ar32
);
952 return CurDAG
->SelectNodeTo(N
, NewOpc
, OpVT
, Ops
, n_ops
);
954 return CurDAG
->getTargetNode(NewOpc
, dl
, OpVT
, Ops
, n_ops
);
956 return SelectCode(Op
);
960 * Emit the instruction sequence for i64 left shifts. The basic algorithm
961 * is to fill the bottom two word slots with zeros so that zeros are shifted
962 * in as the entire quadword is shifted left.
964 * \note This code could also be used to implement v2i64 shl.
966 * @param Op The shl operand
967 * @param OpVT Op's machine value value type (doesn't need to be passed, but
968 * makes life easier.)
969 * @return The SDNode with the entire instruction sequence
972 SPUDAGToDAGISel::SelectSHLi64(SDValue
&Op
, EVT OpVT
) {
973 SDValue Op0
= Op
.getOperand(0);
974 EVT VecVT
= EVT::getVectorVT(*CurDAG
->getContext(),
975 OpVT
, (128 / OpVT
.getSizeInBits()));
976 SDValue ShiftAmt
= Op
.getOperand(1);
977 EVT ShiftAmtVT
= ShiftAmt
.getValueType();
978 SDNode
*VecOp0
, *SelMask
, *ZeroFill
, *Shift
= 0;
980 DebugLoc dl
= Op
.getDebugLoc();
982 VecOp0
= CurDAG
->getTargetNode(SPU::ORv2i64_i64
, dl
, VecVT
, Op0
);
983 SelMaskVal
= CurDAG
->getTargetConstant(0xff00ULL
, MVT::i16
);
984 SelMask
= CurDAG
->getTargetNode(SPU::FSMBIv2i64
, dl
, VecVT
, SelMaskVal
);
985 ZeroFill
= CurDAG
->getTargetNode(SPU::ILv2i64
, dl
, VecVT
,
986 CurDAG
->getTargetConstant(0, OpVT
));
987 VecOp0
= CurDAG
->getTargetNode(SPU::SELBv2i64
, dl
, VecVT
,
988 SDValue(ZeroFill
, 0),
990 SDValue(SelMask
, 0));
992 if (ConstantSDNode
*CN
= dyn_cast
<ConstantSDNode
>(ShiftAmt
)) {
993 unsigned bytes
= unsigned(CN
->getZExtValue()) >> 3;
994 unsigned bits
= unsigned(CN
->getZExtValue()) & 7;
998 CurDAG
->getTargetNode(SPU::SHLQBYIv2i64
, dl
, VecVT
,
1000 CurDAG
->getTargetConstant(bytes
, ShiftAmtVT
));
1005 CurDAG
->getTargetNode(SPU::SHLQBIIv2i64
, dl
, VecVT
,
1006 SDValue((Shift
!= 0 ? Shift
: VecOp0
), 0),
1007 CurDAG
->getTargetConstant(bits
, ShiftAmtVT
));
1011 CurDAG
->getTargetNode(SPU::ROTMIr32
, dl
, ShiftAmtVT
,
1013 CurDAG
->getTargetConstant(3, ShiftAmtVT
));
1015 CurDAG
->getTargetNode(SPU::ANDIr32
, dl
, ShiftAmtVT
,
1017 CurDAG
->getTargetConstant(7, ShiftAmtVT
));
1019 CurDAG
->getTargetNode(SPU::SHLQBYv2i64
, dl
, VecVT
,
1020 SDValue(VecOp0
, 0), SDValue(Bytes
, 0));
1022 CurDAG
->getTargetNode(SPU::SHLQBIv2i64
, dl
, VecVT
,
1023 SDValue(Shift
, 0), SDValue(Bits
, 0));
1026 return CurDAG
->getTargetNode(SPU::ORi64_v2i64
, dl
, OpVT
, SDValue(Shift
, 0));
1030 * Emit the instruction sequence for i64 logical right shifts.
1032 * @param Op The shl operand
1033 * @param OpVT Op's machine value value type (doesn't need to be passed, but
1034 * makes life easier.)
1035 * @return The SDNode with the entire instruction sequence
1038 SPUDAGToDAGISel::SelectSRLi64(SDValue
&Op
, EVT OpVT
) {
1039 SDValue Op0
= Op
.getOperand(0);
1040 EVT VecVT
= EVT::getVectorVT(*CurDAG
->getContext(),
1041 OpVT
, (128 / OpVT
.getSizeInBits()));
1042 SDValue ShiftAmt
= Op
.getOperand(1);
1043 EVT ShiftAmtVT
= ShiftAmt
.getValueType();
1044 SDNode
*VecOp0
, *Shift
= 0;
1045 DebugLoc dl
= Op
.getDebugLoc();
1047 VecOp0
= CurDAG
->getTargetNode(SPU::ORv2i64_i64
, dl
, VecVT
, Op0
);
1049 if (ConstantSDNode
*CN
= dyn_cast
<ConstantSDNode
>(ShiftAmt
)) {
1050 unsigned bytes
= unsigned(CN
->getZExtValue()) >> 3;
1051 unsigned bits
= unsigned(CN
->getZExtValue()) & 7;
1055 CurDAG
->getTargetNode(SPU::ROTQMBYIv2i64
, dl
, VecVT
,
1057 CurDAG
->getTargetConstant(bytes
, ShiftAmtVT
));
1062 CurDAG
->getTargetNode(SPU::ROTQMBIIv2i64
, dl
, VecVT
,
1063 SDValue((Shift
!= 0 ? Shift
: VecOp0
), 0),
1064 CurDAG
->getTargetConstant(bits
, ShiftAmtVT
));
1068 CurDAG
->getTargetNode(SPU::ROTMIr32
, dl
, ShiftAmtVT
,
1070 CurDAG
->getTargetConstant(3, ShiftAmtVT
));
1072 CurDAG
->getTargetNode(SPU::ANDIr32
, dl
, ShiftAmtVT
,
1074 CurDAG
->getTargetConstant(7, ShiftAmtVT
));
1076 // Ensure that the shift amounts are negated!
1077 Bytes
= CurDAG
->getTargetNode(SPU::SFIr32
, dl
, ShiftAmtVT
,
1079 CurDAG
->getTargetConstant(0, ShiftAmtVT
));
1081 Bits
= CurDAG
->getTargetNode(SPU::SFIr32
, dl
, ShiftAmtVT
,
1083 CurDAG
->getTargetConstant(0, ShiftAmtVT
));
1086 CurDAG
->getTargetNode(SPU::ROTQMBYv2i64
, dl
, VecVT
,
1087 SDValue(VecOp0
, 0), SDValue(Bytes
, 0));
1089 CurDAG
->getTargetNode(SPU::ROTQMBIv2i64
, dl
, VecVT
,
1090 SDValue(Shift
, 0), SDValue(Bits
, 0));
1093 return CurDAG
->getTargetNode(SPU::ORi64_v2i64
, dl
, OpVT
, SDValue(Shift
, 0));
1097 * Emit the instruction sequence for i64 arithmetic right shifts.
1099 * @param Op The shl operand
1100 * @param OpVT Op's machine value value type (doesn't need to be passed, but
1101 * makes life easier.)
1102 * @return The SDNode with the entire instruction sequence
1105 SPUDAGToDAGISel::SelectSRAi64(SDValue
&Op
, EVT OpVT
) {
1106 // Promote Op0 to vector
1107 EVT VecVT
= EVT::getVectorVT(*CurDAG
->getContext(),
1108 OpVT
, (128 / OpVT
.getSizeInBits()));
1109 SDValue ShiftAmt
= Op
.getOperand(1);
1110 EVT ShiftAmtVT
= ShiftAmt
.getValueType();
1111 DebugLoc dl
= Op
.getDebugLoc();
1114 CurDAG
->getTargetNode(SPU::ORv2i64_i64
, dl
, VecVT
, Op
.getOperand(0));
1116 SDValue SignRotAmt
= CurDAG
->getTargetConstant(31, ShiftAmtVT
);
1118 CurDAG
->getTargetNode(SPU::ROTMAIv2i64_i32
, dl
, MVT::v2i64
,
1119 SDValue(VecOp0
, 0), SignRotAmt
);
1120 SDNode
*UpperHalfSign
=
1121 CurDAG
->getTargetNode(SPU::ORi32_v4i32
, dl
, MVT::i32
, SDValue(SignRot
, 0));
1123 SDNode
*UpperHalfSignMask
=
1124 CurDAG
->getTargetNode(SPU::FSM64r32
, dl
, VecVT
, SDValue(UpperHalfSign
, 0));
1125 SDNode
*UpperLowerMask
=
1126 CurDAG
->getTargetNode(SPU::FSMBIv2i64
, dl
, VecVT
,
1127 CurDAG
->getTargetConstant(0xff00ULL
, MVT::i16
));
1128 SDNode
*UpperLowerSelect
=
1129 CurDAG
->getTargetNode(SPU::SELBv2i64
, dl
, VecVT
,
1130 SDValue(UpperHalfSignMask
, 0),
1132 SDValue(UpperLowerMask
, 0));
1136 if (ConstantSDNode
*CN
= dyn_cast
<ConstantSDNode
>(ShiftAmt
)) {
1137 unsigned bytes
= unsigned(CN
->getZExtValue()) >> 3;
1138 unsigned bits
= unsigned(CN
->getZExtValue()) & 7;
1143 CurDAG
->getTargetNode(SPU::ROTQBYIv2i64
, dl
, VecVT
,
1144 SDValue(UpperLowerSelect
, 0),
1145 CurDAG
->getTargetConstant(bytes
, ShiftAmtVT
));
1151 CurDAG
->getTargetNode(SPU::ROTQBIIv2i64
, dl
, VecVT
,
1152 SDValue((Shift
!= 0 ? Shift
: UpperLowerSelect
), 0),
1153 CurDAG
->getTargetConstant(bits
, ShiftAmtVT
));
1157 CurDAG
->getTargetNode(SPU::SFIr32
, dl
, ShiftAmtVT
,
1158 ShiftAmt
, CurDAG
->getTargetConstant(0, ShiftAmtVT
));
1161 CurDAG
->getTargetNode(SPU::ROTQBYBIv2i64_r32
, dl
, VecVT
,
1162 SDValue(UpperLowerSelect
, 0), SDValue(NegShift
, 0));
1164 CurDAG
->getTargetNode(SPU::ROTQBIv2i64
, dl
, VecVT
,
1165 SDValue(Shift
, 0), SDValue(NegShift
, 0));
1168 return CurDAG
->getTargetNode(SPU::ORi64_v2i64
, dl
, OpVT
, SDValue(Shift
, 0));
1172 Do the necessary magic necessary to load a i64 constant
1174 SDNode
*SPUDAGToDAGISel::SelectI64Constant(SDValue
& Op
, EVT OpVT
,
1176 ConstantSDNode
*CN
= cast
<ConstantSDNode
>(Op
.getNode());
1177 return SelectI64Constant(CN
->getZExtValue(), OpVT
, dl
);
1180 SDNode
*SPUDAGToDAGISel::SelectI64Constant(uint64_t Value64
, EVT OpVT
,
1182 EVT OpVecVT
= EVT::getVectorVT(*CurDAG
->getContext(), OpVT
, 2);
1184 SPU::LowerV2I64Splat(OpVecVT
, *CurDAG
, Value64
, dl
);
1186 // Here's where it gets interesting, because we have to parse out the
1187 // subtree handed back in i64vec:
1189 if (i64vec
.getOpcode() == ISD::BIT_CONVERT
) {
1190 // The degenerate case where the upper and lower bits in the splat are
1192 SDValue Op0
= i64vec
.getOperand(0);
1194 ReplaceUses(i64vec
, Op0
);
1195 return CurDAG
->getTargetNode(SPU::ORi64_v2i64
, dl
, OpVT
,
1196 SDValue(emitBuildVector(Op0
), 0));
1197 } else if (i64vec
.getOpcode() == SPUISD::SHUFB
) {
1198 SDValue lhs
= i64vec
.getOperand(0);
1199 SDValue rhs
= i64vec
.getOperand(1);
1200 SDValue shufmask
= i64vec
.getOperand(2);
1202 if (lhs
.getOpcode() == ISD::BIT_CONVERT
) {
1203 ReplaceUses(lhs
, lhs
.getOperand(0));
1204 lhs
= lhs
.getOperand(0);
1207 SDNode
*lhsNode
= (lhs
.getNode()->isMachineOpcode()
1209 : emitBuildVector(lhs
));
1211 if (rhs
.getOpcode() == ISD::BIT_CONVERT
) {
1212 ReplaceUses(rhs
, rhs
.getOperand(0));
1213 rhs
= rhs
.getOperand(0);
1216 SDNode
*rhsNode
= (rhs
.getNode()->isMachineOpcode()
1218 : emitBuildVector(rhs
));
1220 if (shufmask
.getOpcode() == ISD::BIT_CONVERT
) {
1221 ReplaceUses(shufmask
, shufmask
.getOperand(0));
1222 shufmask
= shufmask
.getOperand(0);
1225 SDNode
*shufMaskNode
= (shufmask
.getNode()->isMachineOpcode()
1226 ? shufmask
.getNode()
1227 : emitBuildVector(shufmask
));
1230 Select(CurDAG
->getNode(SPUISD::SHUFB
, dl
, OpVecVT
,
1231 SDValue(lhsNode
, 0), SDValue(rhsNode
, 0),
1232 SDValue(shufMaskNode
, 0)));
1234 return CurDAG
->getTargetNode(SPU::ORi64_v2i64
, dl
, OpVT
,
1235 SDValue(shufNode
, 0));
1236 } else if (i64vec
.getOpcode() == ISD::BUILD_VECTOR
) {
1237 return CurDAG
->getTargetNode(SPU::ORi64_v2i64
, dl
, OpVT
,
1238 SDValue(emitBuildVector(i64vec
), 0));
1240 llvm_report_error("SPUDAGToDAGISel::SelectI64Constant: Unhandled i64vec"
1245 /// createSPUISelDag - This pass converts a legalized DAG into a
1246 /// SPU-specific DAG, ready for instruction scheduling.
1248 FunctionPass
*llvm::createSPUISelDag(SPUTargetMachine
&TM
) {
1249 return new SPUDAGToDAGISel(TM
);