2 //===-- SPUISelLowering.cpp - Cell SPU DAG Lowering Implementation --------===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This file implements the SPUTargetLowering class.
12 //===----------------------------------------------------------------------===//
14 #include "SPURegisterNames.h"
15 #include "SPUISelLowering.h"
16 #include "SPUTargetMachine.h"
17 #include "SPUFrameInfo.h"
18 #include "llvm/ADT/APInt.h"
19 #include "llvm/ADT/VectorExtras.h"
20 #include "llvm/CallingConv.h"
21 #include "llvm/CodeGen/CallingConvLower.h"
22 #include "llvm/CodeGen/MachineFrameInfo.h"
23 #include "llvm/CodeGen/MachineFunction.h"
24 #include "llvm/CodeGen/MachineInstrBuilder.h"
25 #include "llvm/CodeGen/MachineRegisterInfo.h"
26 #include "llvm/CodeGen/SelectionDAG.h"
27 #include "llvm/Constants.h"
28 #include "llvm/Function.h"
29 #include "llvm/Intrinsics.h"
30 #include "llvm/Support/Debug.h"
31 #include "llvm/Support/ErrorHandling.h"
32 #include "llvm/Support/MathExtras.h"
33 #include "llvm/Support/raw_ostream.h"
34 #include "llvm/Target/TargetOptions.h"
40 // Used in getTargetNodeName() below
42 std::map
<unsigned, const char *> node_names
;
44 //! MVT mapping to useful data for Cell SPU
45 struct valtype_map_s
{
47 const int prefslot_byte
;
50 const valtype_map_s valtype_map
[] = {
61 const size_t n_valtype_map
= sizeof(valtype_map
) / sizeof(valtype_map
[0]);
63 const valtype_map_s
*getValueTypeMapEntry(MVT VT
) {
64 const valtype_map_s
*retval
= 0;
66 for (size_t i
= 0; i
< n_valtype_map
; ++i
) {
67 if (valtype_map
[i
].valtype
== VT
) {
68 retval
= valtype_map
+ i
;
76 raw_string_ostream
Msg(msg
);
77 Msg
<< "getValueTypeMapEntry returns NULL for "
79 llvm_report_error(Msg
.str());
86 //! Expand a library call into an actual call DAG node
89 This code is taken from SelectionDAGLegalize, since it is not exposed as
90 part of the LLVM SelectionDAG API.
94 ExpandLibCall(RTLIB::Libcall LC
, SDValue Op
, SelectionDAG
&DAG
,
95 bool isSigned
, SDValue
&Hi
, SPUTargetLowering
&TLI
) {
96 // The input chain to this libcall is the entry node of the function.
97 // Legalizing the call will automatically add the previous call to the
99 SDValue InChain
= DAG
.getEntryNode();
101 TargetLowering::ArgListTy Args
;
102 TargetLowering::ArgListEntry Entry
;
103 for (unsigned i
= 0, e
= Op
.getNumOperands(); i
!= e
; ++i
) {
104 MVT ArgVT
= Op
.getOperand(i
).getValueType();
105 const Type
*ArgTy
= ArgVT
.getTypeForMVT(*DAG
.getContext());
106 Entry
.Node
= Op
.getOperand(i
);
108 Entry
.isSExt
= isSigned
;
109 Entry
.isZExt
= !isSigned
;
110 Args
.push_back(Entry
);
112 SDValue Callee
= DAG
.getExternalSymbol(TLI
.getLibcallName(LC
),
115 // Splice the libcall in wherever FindInputOutputChains tells us to.
117 Op
.getNode()->getValueType(0).getTypeForMVT(*DAG
.getContext());
118 std::pair
<SDValue
, SDValue
> CallInfo
=
119 TLI
.LowerCallTo(InChain
, RetTy
, isSigned
, !isSigned
, false, false,
120 0, CallingConv::C
, false, Callee
, Args
, DAG
,
123 return CallInfo
.first
;
127 SPUTargetLowering::SPUTargetLowering(SPUTargetMachine
&TM
)
128 : TargetLowering(TM
),
131 // Fold away setcc operations if possible.
134 // Use _setjmp/_longjmp instead of setjmp/longjmp.
135 setUseUnderscoreSetJmp(true);
136 setUseUnderscoreLongJmp(true);
138 // Set RTLIB libcall names as used by SPU:
139 setLibcallName(RTLIB::DIV_F64
, "__fast_divdf3");
141 // Set up the SPU's register classes:
142 addRegisterClass(MVT::i8
, SPU::R8CRegisterClass
);
143 addRegisterClass(MVT::i16
, SPU::R16CRegisterClass
);
144 addRegisterClass(MVT::i32
, SPU::R32CRegisterClass
);
145 addRegisterClass(MVT::i64
, SPU::R64CRegisterClass
);
146 addRegisterClass(MVT::f32
, SPU::R32FPRegisterClass
);
147 addRegisterClass(MVT::f64
, SPU::R64FPRegisterClass
);
148 addRegisterClass(MVT::i128
, SPU::GPRCRegisterClass
);
150 // SPU has no sign or zero extended loads for i1, i8, i16:
151 setLoadExtAction(ISD::EXTLOAD
, MVT::i1
, Promote
);
152 setLoadExtAction(ISD::SEXTLOAD
, MVT::i1
, Promote
);
153 setLoadExtAction(ISD::ZEXTLOAD
, MVT::i1
, Promote
);
155 setLoadExtAction(ISD::EXTLOAD
, MVT::f32
, Expand
);
156 setLoadExtAction(ISD::EXTLOAD
, MVT::f64
, Expand
);
158 // SPU constant load actions are custom lowered:
159 setOperationAction(ISD::ConstantFP
, MVT::f32
, Legal
);
160 setOperationAction(ISD::ConstantFP
, MVT::f64
, Custom
);
162 // SPU's loads and stores have to be custom lowered:
163 for (unsigned sctype
= (unsigned) MVT::i8
; sctype
< (unsigned) MVT::i128
;
165 MVT VT
= (MVT::SimpleValueType
)sctype
;
167 setOperationAction(ISD::LOAD
, VT
, Custom
);
168 setOperationAction(ISD::STORE
, VT
, Custom
);
169 setLoadExtAction(ISD::EXTLOAD
, VT
, Custom
);
170 setLoadExtAction(ISD::ZEXTLOAD
, VT
, Custom
);
171 setLoadExtAction(ISD::SEXTLOAD
, VT
, Custom
);
173 for (unsigned stype
= sctype
- 1; stype
>= (unsigned) MVT::i8
; --stype
) {
174 MVT StoreVT
= (MVT::SimpleValueType
) stype
;
175 setTruncStoreAction(VT
, StoreVT
, Expand
);
179 for (unsigned sctype
= (unsigned) MVT::f32
; sctype
< (unsigned) MVT::f64
;
181 MVT VT
= (MVT::SimpleValueType
) sctype
;
183 setOperationAction(ISD::LOAD
, VT
, Custom
);
184 setOperationAction(ISD::STORE
, VT
, Custom
);
186 for (unsigned stype
= sctype
- 1; stype
>= (unsigned) MVT::f32
; --stype
) {
187 MVT StoreVT
= (MVT::SimpleValueType
) stype
;
188 setTruncStoreAction(VT
, StoreVT
, Expand
);
192 // Expand the jumptable branches
193 setOperationAction(ISD::BR_JT
, MVT::Other
, Expand
);
194 setOperationAction(ISD::BR_CC
, MVT::Other
, Expand
);
196 // Custom lower SELECT_CC for most cases, but expand by default
197 setOperationAction(ISD::SELECT_CC
, MVT::Other
, Expand
);
198 setOperationAction(ISD::SELECT_CC
, MVT::i8
, Custom
);
199 setOperationAction(ISD::SELECT_CC
, MVT::i16
, Custom
);
200 setOperationAction(ISD::SELECT_CC
, MVT::i32
, Custom
);
201 setOperationAction(ISD::SELECT_CC
, MVT::i64
, Custom
);
203 // SPU has no intrinsics for these particular operations:
204 setOperationAction(ISD::MEMBARRIER
, MVT::Other
, Expand
);
206 // SPU has no SREM/UREM instructions
207 setOperationAction(ISD::SREM
, MVT::i32
, Expand
);
208 setOperationAction(ISD::UREM
, MVT::i32
, Expand
);
209 setOperationAction(ISD::SREM
, MVT::i64
, Expand
);
210 setOperationAction(ISD::UREM
, MVT::i64
, Expand
);
212 // We don't support sin/cos/sqrt/fmod
213 setOperationAction(ISD::FSIN
, MVT::f64
, Expand
);
214 setOperationAction(ISD::FCOS
, MVT::f64
, Expand
);
215 setOperationAction(ISD::FREM
, MVT::f64
, Expand
);
216 setOperationAction(ISD::FSIN
, MVT::f32
, Expand
);
217 setOperationAction(ISD::FCOS
, MVT::f32
, Expand
);
218 setOperationAction(ISD::FREM
, MVT::f32
, Expand
);
220 // Expand fsqrt to the appropriate libcall (NOTE: should use h/w fsqrt
222 setOperationAction(ISD::FSQRT
, MVT::f64
, Expand
);
223 setOperationAction(ISD::FSQRT
, MVT::f32
, Expand
);
225 setOperationAction(ISD::FCOPYSIGN
, MVT::f64
, Expand
);
226 setOperationAction(ISD::FCOPYSIGN
, MVT::f32
, Expand
);
228 // SPU can do rotate right and left, so legalize it... but customize for i8
229 // because instructions don't exist.
231 // FIXME: Change from "expand" to appropriate type once ROTR is supported in
233 setOperationAction(ISD::ROTR
, MVT::i32
, Expand
/*Legal*/);
234 setOperationAction(ISD::ROTR
, MVT::i16
, Expand
/*Legal*/);
235 setOperationAction(ISD::ROTR
, MVT::i8
, Expand
/*Custom*/);
237 setOperationAction(ISD::ROTL
, MVT::i32
, Legal
);
238 setOperationAction(ISD::ROTL
, MVT::i16
, Legal
);
239 setOperationAction(ISD::ROTL
, MVT::i8
, Custom
);
241 // SPU has no native version of shift left/right for i8
242 setOperationAction(ISD::SHL
, MVT::i8
, Custom
);
243 setOperationAction(ISD::SRL
, MVT::i8
, Custom
);
244 setOperationAction(ISD::SRA
, MVT::i8
, Custom
);
246 // Make these operations legal and handle them during instruction selection:
247 setOperationAction(ISD::SHL
, MVT::i64
, Legal
);
248 setOperationAction(ISD::SRL
, MVT::i64
, Legal
);
249 setOperationAction(ISD::SRA
, MVT::i64
, Legal
);
251 // Custom lower i8, i32 and i64 multiplications
252 setOperationAction(ISD::MUL
, MVT::i8
, Custom
);
253 setOperationAction(ISD::MUL
, MVT::i32
, Legal
);
254 setOperationAction(ISD::MUL
, MVT::i64
, Legal
);
256 // Expand double-width multiplication
257 // FIXME: It would probably be reasonable to support some of these operations
258 setOperationAction(ISD::UMUL_LOHI
, MVT::i8
, Expand
);
259 setOperationAction(ISD::SMUL_LOHI
, MVT::i8
, Expand
);
260 setOperationAction(ISD::MULHU
, MVT::i8
, Expand
);
261 setOperationAction(ISD::MULHS
, MVT::i8
, Expand
);
262 setOperationAction(ISD::UMUL_LOHI
, MVT::i16
, Expand
);
263 setOperationAction(ISD::SMUL_LOHI
, MVT::i16
, Expand
);
264 setOperationAction(ISD::MULHU
, MVT::i16
, Expand
);
265 setOperationAction(ISD::MULHS
, MVT::i16
, Expand
);
266 setOperationAction(ISD::UMUL_LOHI
, MVT::i32
, Expand
);
267 setOperationAction(ISD::SMUL_LOHI
, MVT::i32
, Expand
);
268 setOperationAction(ISD::MULHU
, MVT::i32
, Expand
);
269 setOperationAction(ISD::MULHS
, MVT::i32
, Expand
);
270 setOperationAction(ISD::UMUL_LOHI
, MVT::i64
, Expand
);
271 setOperationAction(ISD::SMUL_LOHI
, MVT::i64
, Expand
);
272 setOperationAction(ISD::MULHU
, MVT::i64
, Expand
);
273 setOperationAction(ISD::MULHS
, MVT::i64
, Expand
);
275 // Need to custom handle (some) common i8, i64 math ops
276 setOperationAction(ISD::ADD
, MVT::i8
, Custom
);
277 setOperationAction(ISD::ADD
, MVT::i64
, Legal
);
278 setOperationAction(ISD::SUB
, MVT::i8
, Custom
);
279 setOperationAction(ISD::SUB
, MVT::i64
, Legal
);
281 // SPU does not have BSWAP. It does have i32 support CTLZ.
282 // CTPOP has to be custom lowered.
283 setOperationAction(ISD::BSWAP
, MVT::i32
, Expand
);
284 setOperationAction(ISD::BSWAP
, MVT::i64
, Expand
);
286 setOperationAction(ISD::CTPOP
, MVT::i8
, Custom
);
287 setOperationAction(ISD::CTPOP
, MVT::i16
, Custom
);
288 setOperationAction(ISD::CTPOP
, MVT::i32
, Custom
);
289 setOperationAction(ISD::CTPOP
, MVT::i64
, Custom
);
291 setOperationAction(ISD::CTTZ
, MVT::i32
, Expand
);
292 setOperationAction(ISD::CTTZ
, MVT::i64
, Expand
);
294 setOperationAction(ISD::CTLZ
, MVT::i32
, Legal
);
296 // SPU has a version of select that implements (a&~c)|(b&c), just like
297 // select ought to work:
298 setOperationAction(ISD::SELECT
, MVT::i8
, Legal
);
299 setOperationAction(ISD::SELECT
, MVT::i16
, Legal
);
300 setOperationAction(ISD::SELECT
, MVT::i32
, Legal
);
301 setOperationAction(ISD::SELECT
, MVT::i64
, Legal
);
303 setOperationAction(ISD::SETCC
, MVT::i8
, Legal
);
304 setOperationAction(ISD::SETCC
, MVT::i16
, Legal
);
305 setOperationAction(ISD::SETCC
, MVT::i32
, Legal
);
306 setOperationAction(ISD::SETCC
, MVT::i64
, Legal
);
307 setOperationAction(ISD::SETCC
, MVT::f64
, Custom
);
309 // Custom lower i128 -> i64 truncates
310 setOperationAction(ISD::TRUNCATE
, MVT::i64
, Custom
);
312 // SPU has a legal FP -> signed INT instruction for f32, but for f64, need
313 // to expand to a libcall, hence the custom lowering:
314 setOperationAction(ISD::FP_TO_SINT
, MVT::i32
, Custom
);
315 setOperationAction(ISD::FP_TO_UINT
, MVT::i32
, Custom
);
317 // FDIV on SPU requires custom lowering
318 setOperationAction(ISD::FDIV
, MVT::f64
, Expand
); // to libcall
320 // SPU has [U|S]INT_TO_FP for f32->i32, but not for f64->i32, f64->i64:
321 setOperationAction(ISD::SINT_TO_FP
, MVT::i32
, Custom
);
322 setOperationAction(ISD::SINT_TO_FP
, MVT::i16
, Promote
);
323 setOperationAction(ISD::SINT_TO_FP
, MVT::i8
, Promote
);
324 setOperationAction(ISD::UINT_TO_FP
, MVT::i32
, Custom
);
325 setOperationAction(ISD::UINT_TO_FP
, MVT::i16
, Promote
);
326 setOperationAction(ISD::UINT_TO_FP
, MVT::i8
, Promote
);
327 setOperationAction(ISD::SINT_TO_FP
, MVT::i64
, Custom
);
328 setOperationAction(ISD::UINT_TO_FP
, MVT::i64
, Custom
);
330 setOperationAction(ISD::BIT_CONVERT
, MVT::i32
, Legal
);
331 setOperationAction(ISD::BIT_CONVERT
, MVT::f32
, Legal
);
332 setOperationAction(ISD::BIT_CONVERT
, MVT::i64
, Legal
);
333 setOperationAction(ISD::BIT_CONVERT
, MVT::f64
, Legal
);
335 // We cannot sextinreg(i1). Expand to shifts.
336 setOperationAction(ISD::SIGN_EXTEND_INREG
, MVT::i1
, Expand
);
338 // Support label based line numbers.
339 setOperationAction(ISD::DBG_STOPPOINT
, MVT::Other
, Expand
);
340 setOperationAction(ISD::DEBUG_LOC
, MVT::Other
, Expand
);
342 // We want to legalize GlobalAddress and ConstantPool nodes into the
343 // appropriate instructions to materialize the address.
344 for (unsigned sctype
= (unsigned) MVT::i8
; sctype
< (unsigned) MVT::f128
;
346 MVT VT
= (MVT::SimpleValueType
)sctype
;
348 setOperationAction(ISD::GlobalAddress
, VT
, Custom
);
349 setOperationAction(ISD::ConstantPool
, VT
, Custom
);
350 setOperationAction(ISD::JumpTable
, VT
, Custom
);
353 // RET must be custom lowered, to meet ABI requirements
354 setOperationAction(ISD::RET
, MVT::Other
, Custom
);
356 // VASTART needs to be custom lowered to use the VarArgsFrameIndex
357 setOperationAction(ISD::VASTART
, MVT::Other
, Custom
);
359 // Use the default implementation.
360 setOperationAction(ISD::VAARG
, MVT::Other
, Expand
);
361 setOperationAction(ISD::VACOPY
, MVT::Other
, Expand
);
362 setOperationAction(ISD::VAEND
, MVT::Other
, Expand
);
363 setOperationAction(ISD::STACKSAVE
, MVT::Other
, Expand
);
364 setOperationAction(ISD::STACKRESTORE
, MVT::Other
, Expand
);
365 setOperationAction(ISD::DYNAMIC_STACKALLOC
, MVT::i32
, Expand
);
366 setOperationAction(ISD::DYNAMIC_STACKALLOC
, MVT::i64
, Expand
);
368 // Cell SPU has instructions for converting between i64 and fp.
369 setOperationAction(ISD::FP_TO_SINT
, MVT::i64
, Custom
);
370 setOperationAction(ISD::SINT_TO_FP
, MVT::i64
, Custom
);
372 // To take advantage of the above i64 FP_TO_SINT, promote i32 FP_TO_UINT
373 setOperationAction(ISD::FP_TO_UINT
, MVT::i32
, Promote
);
375 // BUILD_PAIR can't be handled natively, and should be expanded to shl/or
376 setOperationAction(ISD::BUILD_PAIR
, MVT::i64
, Expand
);
378 // First set operation action for all vector types to expand. Then we
379 // will selectively turn on ones that can be effectively codegen'd.
380 addRegisterClass(MVT::v16i8
, SPU::VECREGRegisterClass
);
381 addRegisterClass(MVT::v8i16
, SPU::VECREGRegisterClass
);
382 addRegisterClass(MVT::v4i32
, SPU::VECREGRegisterClass
);
383 addRegisterClass(MVT::v2i64
, SPU::VECREGRegisterClass
);
384 addRegisterClass(MVT::v4f32
, SPU::VECREGRegisterClass
);
385 addRegisterClass(MVT::v2f64
, SPU::VECREGRegisterClass
);
387 // "Odd size" vector classes that we're willing to support:
388 addRegisterClass(MVT::v2i32
, SPU::VECREGRegisterClass
);
390 for (unsigned i
= (unsigned)MVT::FIRST_VECTOR_VALUETYPE
;
391 i
<= (unsigned)MVT::LAST_VECTOR_VALUETYPE
; ++i
) {
392 MVT VT
= (MVT::SimpleValueType
)i
;
394 // add/sub are legal for all supported vector VT's.
395 setOperationAction(ISD::ADD
, VT
, Legal
);
396 setOperationAction(ISD::SUB
, VT
, Legal
);
397 // mul has to be custom lowered.
398 setOperationAction(ISD::MUL
, VT
, Legal
);
400 setOperationAction(ISD::AND
, VT
, Legal
);
401 setOperationAction(ISD::OR
, VT
, Legal
);
402 setOperationAction(ISD::XOR
, VT
, Legal
);
403 setOperationAction(ISD::LOAD
, VT
, Legal
);
404 setOperationAction(ISD::SELECT
, VT
, Legal
);
405 setOperationAction(ISD::STORE
, VT
, Legal
);
407 // These operations need to be expanded:
408 setOperationAction(ISD::SDIV
, VT
, Expand
);
409 setOperationAction(ISD::SREM
, VT
, Expand
);
410 setOperationAction(ISD::UDIV
, VT
, Expand
);
411 setOperationAction(ISD::UREM
, VT
, Expand
);
413 // Custom lower build_vector, constant pool spills, insert and
414 // extract vector elements:
415 setOperationAction(ISD::BUILD_VECTOR
, VT
, Custom
);
416 setOperationAction(ISD::ConstantPool
, VT
, Custom
);
417 setOperationAction(ISD::SCALAR_TO_VECTOR
, VT
, Custom
);
418 setOperationAction(ISD::EXTRACT_VECTOR_ELT
, VT
, Custom
);
419 setOperationAction(ISD::INSERT_VECTOR_ELT
, VT
, Custom
);
420 setOperationAction(ISD::VECTOR_SHUFFLE
, VT
, Custom
);
423 setOperationAction(ISD::AND
, MVT::v16i8
, Custom
);
424 setOperationAction(ISD::OR
, MVT::v16i8
, Custom
);
425 setOperationAction(ISD::XOR
, MVT::v16i8
, Custom
);
426 setOperationAction(ISD::SCALAR_TO_VECTOR
, MVT::v4f32
, Custom
);
428 setOperationAction(ISD::FDIV
, MVT::v4f32
, Legal
);
430 setShiftAmountType(MVT::i32
);
431 setBooleanContents(ZeroOrNegativeOneBooleanContent
);
433 setStackPointerRegisterToSaveRestore(SPU::R1
);
435 // We have target-specific dag combine patterns for the following nodes:
436 setTargetDAGCombine(ISD::ADD
);
437 setTargetDAGCombine(ISD::ZERO_EXTEND
);
438 setTargetDAGCombine(ISD::SIGN_EXTEND
);
439 setTargetDAGCombine(ISD::ANY_EXTEND
);
441 computeRegisterProperties();
443 // Set pre-RA register scheduler default to BURR, which produces slightly
444 // better code than the default (could also be TDRR, but TargetLowering.h
445 // needs a mod to support that model):
446 setSchedulingPreference(SchedulingForRegPressure
);
450 SPUTargetLowering::getTargetNodeName(unsigned Opcode
) const
452 if (node_names
.empty()) {
453 node_names
[(unsigned) SPUISD::RET_FLAG
] = "SPUISD::RET_FLAG";
454 node_names
[(unsigned) SPUISD::Hi
] = "SPUISD::Hi";
455 node_names
[(unsigned) SPUISD::Lo
] = "SPUISD::Lo";
456 node_names
[(unsigned) SPUISD::PCRelAddr
] = "SPUISD::PCRelAddr";
457 node_names
[(unsigned) SPUISD::AFormAddr
] = "SPUISD::AFormAddr";
458 node_names
[(unsigned) SPUISD::IndirectAddr
] = "SPUISD::IndirectAddr";
459 node_names
[(unsigned) SPUISD::LDRESULT
] = "SPUISD::LDRESULT";
460 node_names
[(unsigned) SPUISD::CALL
] = "SPUISD::CALL";
461 node_names
[(unsigned) SPUISD::SHUFB
] = "SPUISD::SHUFB";
462 node_names
[(unsigned) SPUISD::SHUFFLE_MASK
] = "SPUISD::SHUFFLE_MASK";
463 node_names
[(unsigned) SPUISD::CNTB
] = "SPUISD::CNTB";
464 node_names
[(unsigned) SPUISD::PREFSLOT2VEC
] = "SPUISD::PREFSLOT2VEC";
465 node_names
[(unsigned) SPUISD::VEC2PREFSLOT
] = "SPUISD::VEC2PREFSLOT";
466 node_names
[(unsigned) SPUISD::SHLQUAD_L_BITS
] = "SPUISD::SHLQUAD_L_BITS";
467 node_names
[(unsigned) SPUISD::SHLQUAD_L_BYTES
] = "SPUISD::SHLQUAD_L_BYTES";
468 node_names
[(unsigned) SPUISD::VEC_SHL
] = "SPUISD::VEC_SHL";
469 node_names
[(unsigned) SPUISD::VEC_SRL
] = "SPUISD::VEC_SRL";
470 node_names
[(unsigned) SPUISD::VEC_SRA
] = "SPUISD::VEC_SRA";
471 node_names
[(unsigned) SPUISD::VEC_ROTL
] = "SPUISD::VEC_ROTL";
472 node_names
[(unsigned) SPUISD::VEC_ROTR
] = "SPUISD::VEC_ROTR";
473 node_names
[(unsigned) SPUISD::ROTBYTES_LEFT
] = "SPUISD::ROTBYTES_LEFT";
474 node_names
[(unsigned) SPUISD::ROTBYTES_LEFT_BITS
] =
475 "SPUISD::ROTBYTES_LEFT_BITS";
476 node_names
[(unsigned) SPUISD::SELECT_MASK
] = "SPUISD::SELECT_MASK";
477 node_names
[(unsigned) SPUISD::SELB
] = "SPUISD::SELB";
478 node_names
[(unsigned) SPUISD::ADD64_MARKER
] = "SPUISD::ADD64_MARKER";
479 node_names
[(unsigned) SPUISD::SUB64_MARKER
] = "SPUISD::SUB64_MARKER";
480 node_names
[(unsigned) SPUISD::MUL64_MARKER
] = "SPUISD::MUL64_MARKER";
483 std::map
<unsigned, const char *>::iterator i
= node_names
.find(Opcode
);
485 return ((i
!= node_names
.end()) ? i
->second
: 0);
488 /// getFunctionAlignment - Return the Log2 alignment of this function.
489 unsigned SPUTargetLowering::getFunctionAlignment(const Function
*) const {
493 //===----------------------------------------------------------------------===//
494 // Return the Cell SPU's SETCC result type
495 //===----------------------------------------------------------------------===//
497 MVT
SPUTargetLowering::getSetCCResultType(MVT VT
) const {
498 // i16 and i32 are valid SETCC result types
499 return ((VT
== MVT::i8
|| VT
== MVT::i16
|| VT
== MVT::i32
) ? VT
: MVT::i32
);
502 //===----------------------------------------------------------------------===//
503 // Calling convention code:
504 //===----------------------------------------------------------------------===//
506 #include "SPUGenCallingConv.inc"
508 //===----------------------------------------------------------------------===//
509 // LowerOperation implementation
510 //===----------------------------------------------------------------------===//
512 /// Custom lower loads for CellSPU
514 All CellSPU loads and stores are aligned to 16-byte boundaries, so for elements
515 within a 16-byte block, we have to rotate to extract the requested element.
517 For extending loads, we also want to ensure that the following sequence is
518 emitted, e.g. for MVT::f32 extending load to MVT::f64:
522 %2 v16i8,ch = rotate %1
523 %3 v4f8, ch = bitconvert %2
524 %4 f32 = vec2perfslot %3
525 %5 f64 = fp_extend %4
529 LowerLOAD(SDValue Op
, SelectionDAG
&DAG
, const SPUSubtarget
*ST
) {
530 LoadSDNode
*LN
= cast
<LoadSDNode
>(Op
);
531 SDValue the_chain
= LN
->getChain();
532 MVT PtrVT
= DAG
.getTargetLoweringInfo().getPointerTy();
533 MVT InVT
= LN
->getMemoryVT();
534 MVT OutVT
= Op
.getValueType();
535 ISD::LoadExtType ExtType
= LN
->getExtensionType();
536 unsigned alignment
= LN
->getAlignment();
537 const valtype_map_s
*vtm
= getValueTypeMapEntry(InVT
);
538 DebugLoc dl
= Op
.getDebugLoc();
540 switch (LN
->getAddressingMode()) {
541 case ISD::UNINDEXED
: {
543 SDValue basePtr
= LN
->getBasePtr();
546 if (alignment
== 16) {
549 // Special cases for a known aligned load to simplify the base pointer
550 // and the rotation amount:
551 if (basePtr
.getOpcode() == ISD::ADD
552 && (CN
= dyn_cast
<ConstantSDNode
> (basePtr
.getOperand(1))) != 0) {
553 // Known offset into basePtr
554 int64_t offset
= CN
->getSExtValue();
555 int64_t rotamt
= int64_t((offset
& 0xf) - vtm
->prefslot_byte
);
560 rotate
= DAG
.getConstant(rotamt
, MVT::i16
);
562 // Simplify the base pointer for this case:
563 basePtr
= basePtr
.getOperand(0);
564 if ((offset
& ~0xf) > 0) {
565 basePtr
= DAG
.getNode(SPUISD::IndirectAddr
, dl
, PtrVT
,
567 DAG
.getConstant((offset
& ~0xf), PtrVT
));
569 } else if ((basePtr
.getOpcode() == SPUISD::AFormAddr
)
570 || (basePtr
.getOpcode() == SPUISD::IndirectAddr
571 && basePtr
.getOperand(0).getOpcode() == SPUISD::Hi
572 && basePtr
.getOperand(1).getOpcode() == SPUISD::Lo
)) {
573 // Plain aligned a-form address: rotate into preferred slot
574 // Same for (SPUindirect (SPUhi ...), (SPUlo ...))
575 int64_t rotamt
= -vtm
->prefslot_byte
;
578 rotate
= DAG
.getConstant(rotamt
, MVT::i16
);
580 // Offset the rotate amount by the basePtr and the preferred slot
582 int64_t rotamt
= -vtm
->prefslot_byte
;
585 rotate
= DAG
.getNode(ISD::ADD
, dl
, PtrVT
,
587 DAG
.getConstant(rotamt
, PtrVT
));
590 // Unaligned load: must be more pessimistic about addressing modes:
591 if (basePtr
.getOpcode() == ISD::ADD
) {
592 MachineFunction
&MF
= DAG
.getMachineFunction();
593 MachineRegisterInfo
&RegInfo
= MF
.getRegInfo();
594 unsigned VReg
= RegInfo
.createVirtualRegister(&SPU::R32CRegClass
);
597 SDValue Op0
= basePtr
.getOperand(0);
598 SDValue Op1
= basePtr
.getOperand(1);
600 if (isa
<ConstantSDNode
>(Op1
)) {
601 // Convert the (add <ptr>, <const>) to an indirect address contained
602 // in a register. Note that this is done because we need to avoid
603 // creating a 0(reg) d-form address due to the SPU's block loads.
604 basePtr
= DAG
.getNode(SPUISD::IndirectAddr
, dl
, PtrVT
, Op0
, Op1
);
605 the_chain
= DAG
.getCopyToReg(the_chain
, dl
, VReg
, basePtr
, Flag
);
606 basePtr
= DAG
.getCopyFromReg(the_chain
, dl
, VReg
, PtrVT
);
608 // Convert the (add <arg1>, <arg2>) to an indirect address, which
609 // will likely be lowered as a reg(reg) x-form address.
610 basePtr
= DAG
.getNode(SPUISD::IndirectAddr
, dl
, PtrVT
, Op0
, Op1
);
613 basePtr
= DAG
.getNode(SPUISD::IndirectAddr
, dl
, PtrVT
,
615 DAG
.getConstant(0, PtrVT
));
618 // Offset the rotate amount by the basePtr and the preferred slot
620 rotate
= DAG
.getNode(ISD::ADD
, dl
, PtrVT
,
622 DAG
.getConstant(-vtm
->prefslot_byte
, PtrVT
));
625 // Re-emit as a v16i8 vector load
626 result
= DAG
.getLoad(MVT::v16i8
, dl
, the_chain
, basePtr
,
627 LN
->getSrcValue(), LN
->getSrcValueOffset(),
628 LN
->isVolatile(), 16);
631 the_chain
= result
.getValue(1);
633 // Rotate into the preferred slot:
634 result
= DAG
.getNode(SPUISD::ROTBYTES_LEFT
, dl
, MVT::v16i8
,
635 result
.getValue(0), rotate
);
637 // Convert the loaded v16i8 vector to the appropriate vector type
638 // specified by the operand:
639 MVT vecVT
= MVT::getVectorVT(InVT
, (128 / InVT
.getSizeInBits()));
640 result
= DAG
.getNode(SPUISD::VEC2PREFSLOT
, dl
, InVT
,
641 DAG
.getNode(ISD::BIT_CONVERT
, dl
, vecVT
, result
));
643 // Handle extending loads by extending the scalar result:
644 if (ExtType
== ISD::SEXTLOAD
) {
645 result
= DAG
.getNode(ISD::SIGN_EXTEND
, dl
, OutVT
, result
);
646 } else if (ExtType
== ISD::ZEXTLOAD
) {
647 result
= DAG
.getNode(ISD::ZERO_EXTEND
, dl
, OutVT
, result
);
648 } else if (ExtType
== ISD::EXTLOAD
) {
649 unsigned NewOpc
= ISD::ANY_EXTEND
;
651 if (OutVT
.isFloatingPoint())
652 NewOpc
= ISD::FP_EXTEND
;
654 result
= DAG
.getNode(NewOpc
, dl
, OutVT
, result
);
657 SDVTList retvts
= DAG
.getVTList(OutVT
, MVT::Other
);
658 SDValue retops
[2] = {
663 result
= DAG
.getNode(SPUISD::LDRESULT
, dl
, retvts
,
664 retops
, sizeof(retops
) / sizeof(retops
[0]));
671 case ISD::LAST_INDEXED_MODE
:
674 raw_string_ostream
Msg(msg
);
675 Msg
<< "LowerLOAD: Got a LoadSDNode with an addr mode other than "
677 Msg
<< (unsigned) LN
->getAddressingMode();
678 llvm_report_error(Msg
.str());
686 /// Custom lower stores for CellSPU
688 All CellSPU stores are aligned to 16-byte boundaries, so for elements
689 within a 16-byte block, we have to generate a shuffle to insert the
690 requested element into its place, then store the resulting block.
693 LowerSTORE(SDValue Op
, SelectionDAG
&DAG
, const SPUSubtarget
*ST
) {
694 StoreSDNode
*SN
= cast
<StoreSDNode
>(Op
);
695 SDValue Value
= SN
->getValue();
696 MVT VT
= Value
.getValueType();
697 MVT StVT
= (!SN
->isTruncatingStore() ? VT
: SN
->getMemoryVT());
698 MVT PtrVT
= DAG
.getTargetLoweringInfo().getPointerTy();
699 DebugLoc dl
= Op
.getDebugLoc();
700 unsigned alignment
= SN
->getAlignment();
702 switch (SN
->getAddressingMode()) {
703 case ISD::UNINDEXED
: {
704 // The vector type we really want to load from the 16-byte chunk.
705 MVT vecVT
= MVT::getVectorVT(VT
, (128 / VT
.getSizeInBits())),
706 stVecVT
= MVT::getVectorVT(StVT
, (128 / StVT
.getSizeInBits()));
708 SDValue alignLoadVec
;
709 SDValue basePtr
= SN
->getBasePtr();
710 SDValue the_chain
= SN
->getChain();
711 SDValue insertEltOffs
;
713 if (alignment
== 16) {
716 // Special cases for a known aligned load to simplify the base pointer
717 // and insertion byte:
718 if (basePtr
.getOpcode() == ISD::ADD
719 && (CN
= dyn_cast
<ConstantSDNode
>(basePtr
.getOperand(1))) != 0) {
720 // Known offset into basePtr
721 int64_t offset
= CN
->getSExtValue();
723 // Simplify the base pointer for this case:
724 basePtr
= basePtr
.getOperand(0);
725 insertEltOffs
= DAG
.getNode(SPUISD::IndirectAddr
, dl
, PtrVT
,
727 DAG
.getConstant((offset
& 0xf), PtrVT
));
729 if ((offset
& ~0xf) > 0) {
730 basePtr
= DAG
.getNode(SPUISD::IndirectAddr
, dl
, PtrVT
,
732 DAG
.getConstant((offset
& ~0xf), PtrVT
));
735 // Otherwise, assume it's at byte 0 of basePtr
736 insertEltOffs
= DAG
.getNode(SPUISD::IndirectAddr
, dl
, PtrVT
,
738 DAG
.getConstant(0, PtrVT
));
741 // Unaligned load: must be more pessimistic about addressing modes:
742 if (basePtr
.getOpcode() == ISD::ADD
) {
743 MachineFunction
&MF
= DAG
.getMachineFunction();
744 MachineRegisterInfo
&RegInfo
= MF
.getRegInfo();
745 unsigned VReg
= RegInfo
.createVirtualRegister(&SPU::R32CRegClass
);
748 SDValue Op0
= basePtr
.getOperand(0);
749 SDValue Op1
= basePtr
.getOperand(1);
751 if (isa
<ConstantSDNode
>(Op1
)) {
752 // Convert the (add <ptr>, <const>) to an indirect address contained
753 // in a register. Note that this is done because we need to avoid
754 // creating a 0(reg) d-form address due to the SPU's block loads.
755 basePtr
= DAG
.getNode(SPUISD::IndirectAddr
, dl
, PtrVT
, Op0
, Op1
);
756 the_chain
= DAG
.getCopyToReg(the_chain
, dl
, VReg
, basePtr
, Flag
);
757 basePtr
= DAG
.getCopyFromReg(the_chain
, dl
, VReg
, PtrVT
);
759 // Convert the (add <arg1>, <arg2>) to an indirect address, which
760 // will likely be lowered as a reg(reg) x-form address.
761 basePtr
= DAG
.getNode(SPUISD::IndirectAddr
, dl
, PtrVT
, Op0
, Op1
);
764 basePtr
= DAG
.getNode(SPUISD::IndirectAddr
, dl
, PtrVT
,
766 DAG
.getConstant(0, PtrVT
));
769 // Insertion point is solely determined by basePtr's contents
770 insertEltOffs
= DAG
.getNode(ISD::ADD
, dl
, PtrVT
,
772 DAG
.getConstant(0, PtrVT
));
775 // Re-emit as a v16i8 vector load
776 alignLoadVec
= DAG
.getLoad(MVT::v16i8
, dl
, the_chain
, basePtr
,
777 SN
->getSrcValue(), SN
->getSrcValueOffset(),
778 SN
->isVolatile(), 16);
781 the_chain
= alignLoadVec
.getValue(1);
783 LoadSDNode
*LN
= cast
<LoadSDNode
>(alignLoadVec
);
784 SDValue theValue
= SN
->getValue();
788 && (theValue
.getOpcode() == ISD::AssertZext
789 || theValue
.getOpcode() == ISD::AssertSext
)) {
790 // Drill down and get the value for zero- and sign-extended
792 theValue
= theValue
.getOperand(0);
795 // If the base pointer is already a D-form address, then just create
796 // a new D-form address with a slot offset and the orignal base pointer.
797 // Otherwise generate a D-form address with the slot offset relative
798 // to the stack pointer, which is always aligned.
800 if (DebugFlag
&& isCurrentDebugType(DEBUG_TYPE
)) {
801 cerr
<< "CellSPU LowerSTORE: basePtr = ";
802 basePtr
.getNode()->dump(&DAG
);
807 SDValue insertEltOp
=
808 DAG
.getNode(SPUISD::SHUFFLE_MASK
, dl
, vecVT
, insertEltOffs
);
809 SDValue vectorizeOp
=
810 DAG
.getNode(ISD::SCALAR_TO_VECTOR
, dl
, vecVT
, theValue
);
812 result
= DAG
.getNode(SPUISD::SHUFB
, dl
, vecVT
,
813 vectorizeOp
, alignLoadVec
,
814 DAG
.getNode(ISD::BIT_CONVERT
, dl
,
815 MVT::v4i32
, insertEltOp
));
817 result
= DAG
.getStore(the_chain
, dl
, result
, basePtr
,
818 LN
->getSrcValue(), LN
->getSrcValueOffset(),
819 LN
->isVolatile(), LN
->getAlignment());
821 #if 0 && !defined(NDEBUG)
822 if (DebugFlag
&& isCurrentDebugType(DEBUG_TYPE
)) {
823 const SDValue
¤tRoot
= DAG
.getRoot();
826 cerr
<< "------- CellSPU:LowerStore result:\n";
829 DAG
.setRoot(currentRoot
);
840 case ISD::LAST_INDEXED_MODE
:
843 raw_string_ostream
Msg(msg
);
844 Msg
<< "LowerLOAD: Got a LoadSDNode with an addr mode other than "
846 Msg
<< (unsigned) SN
->getAddressingMode();
847 llvm_report_error(Msg
.str());
855 //! Generate the address of a constant pool entry.
857 LowerConstantPool(SDValue Op
, SelectionDAG
&DAG
, const SPUSubtarget
*ST
) {
858 MVT PtrVT
= Op
.getValueType();
859 ConstantPoolSDNode
*CP
= cast
<ConstantPoolSDNode
>(Op
);
860 Constant
*C
= CP
->getConstVal();
861 SDValue CPI
= DAG
.getTargetConstantPool(C
, PtrVT
, CP
->getAlignment());
862 SDValue Zero
= DAG
.getConstant(0, PtrVT
);
863 const TargetMachine
&TM
= DAG
.getTarget();
864 // FIXME there is no actual debug info here
865 DebugLoc dl
= Op
.getDebugLoc();
867 if (TM
.getRelocationModel() == Reloc::Static
) {
868 if (!ST
->usingLargeMem()) {
869 // Just return the SDValue with the constant pool address in it.
870 return DAG
.getNode(SPUISD::AFormAddr
, dl
, PtrVT
, CPI
, Zero
);
872 SDValue Hi
= DAG
.getNode(SPUISD::Hi
, dl
, PtrVT
, CPI
, Zero
);
873 SDValue Lo
= DAG
.getNode(SPUISD::Lo
, dl
, PtrVT
, CPI
, Zero
);
874 return DAG
.getNode(SPUISD::IndirectAddr
, dl
, PtrVT
, Hi
, Lo
);
878 llvm_unreachable("LowerConstantPool: Relocation model other than static"
883 //! Alternate entry point for generating the address of a constant pool entry
885 SPU::LowerConstantPool(SDValue Op
, SelectionDAG
&DAG
, const SPUTargetMachine
&TM
) {
886 return ::LowerConstantPool(Op
, DAG
, TM
.getSubtargetImpl());
890 LowerJumpTable(SDValue Op
, SelectionDAG
&DAG
, const SPUSubtarget
*ST
) {
891 MVT PtrVT
= Op
.getValueType();
892 JumpTableSDNode
*JT
= cast
<JumpTableSDNode
>(Op
);
893 SDValue JTI
= DAG
.getTargetJumpTable(JT
->getIndex(), PtrVT
);
894 SDValue Zero
= DAG
.getConstant(0, PtrVT
);
895 const TargetMachine
&TM
= DAG
.getTarget();
896 // FIXME there is no actual debug info here
897 DebugLoc dl
= Op
.getDebugLoc();
899 if (TM
.getRelocationModel() == Reloc::Static
) {
900 if (!ST
->usingLargeMem()) {
901 return DAG
.getNode(SPUISD::AFormAddr
, dl
, PtrVT
, JTI
, Zero
);
903 SDValue Hi
= DAG
.getNode(SPUISD::Hi
, dl
, PtrVT
, JTI
, Zero
);
904 SDValue Lo
= DAG
.getNode(SPUISD::Lo
, dl
, PtrVT
, JTI
, Zero
);
905 return DAG
.getNode(SPUISD::IndirectAddr
, dl
, PtrVT
, Hi
, Lo
);
909 llvm_unreachable("LowerJumpTable: Relocation model other than static"
915 LowerGlobalAddress(SDValue Op
, SelectionDAG
&DAG
, const SPUSubtarget
*ST
) {
916 MVT PtrVT
= Op
.getValueType();
917 GlobalAddressSDNode
*GSDN
= cast
<GlobalAddressSDNode
>(Op
);
918 GlobalValue
*GV
= GSDN
->getGlobal();
919 SDValue GA
= DAG
.getTargetGlobalAddress(GV
, PtrVT
, GSDN
->getOffset());
920 const TargetMachine
&TM
= DAG
.getTarget();
921 SDValue Zero
= DAG
.getConstant(0, PtrVT
);
922 // FIXME there is no actual debug info here
923 DebugLoc dl
= Op
.getDebugLoc();
925 if (TM
.getRelocationModel() == Reloc::Static
) {
926 if (!ST
->usingLargeMem()) {
927 return DAG
.getNode(SPUISD::AFormAddr
, dl
, PtrVT
, GA
, Zero
);
929 SDValue Hi
= DAG
.getNode(SPUISD::Hi
, dl
, PtrVT
, GA
, Zero
);
930 SDValue Lo
= DAG
.getNode(SPUISD::Lo
, dl
, PtrVT
, GA
, Zero
);
931 return DAG
.getNode(SPUISD::IndirectAddr
, dl
, PtrVT
, Hi
, Lo
);
934 llvm_report_error("LowerGlobalAddress: Relocation model other than static"
942 //! Custom lower double precision floating point constants
944 LowerConstantFP(SDValue Op
, SelectionDAG
&DAG
) {
945 MVT VT
= Op
.getValueType();
946 // FIXME there is no actual debug info here
947 DebugLoc dl
= Op
.getDebugLoc();
949 if (VT
== MVT::f64
) {
950 ConstantFPSDNode
*FP
= cast
<ConstantFPSDNode
>(Op
.getNode());
953 "LowerConstantFP: Node is not ConstantFPSDNode");
955 uint64_t dbits
= DoubleToBits(FP
->getValueAPF().convertToDouble());
956 SDValue T
= DAG
.getConstant(dbits
, MVT::i64
);
957 SDValue Tvec
= DAG
.getNode(ISD::BUILD_VECTOR
, dl
, MVT::v2i64
, T
, T
);
958 return DAG
.getNode(SPUISD::VEC2PREFSLOT
, dl
, VT
,
959 DAG
.getNode(ISD::BIT_CONVERT
, dl
, MVT::v2f64
, Tvec
));
966 LowerFORMAL_ARGUMENTS(SDValue Op
, SelectionDAG
&DAG
, int &VarArgsFrameIndex
)
968 MachineFunction
&MF
= DAG
.getMachineFunction();
969 MachineFrameInfo
*MFI
= MF
.getFrameInfo();
970 MachineRegisterInfo
&RegInfo
= MF
.getRegInfo();
971 SmallVector
<SDValue
, 48> ArgValues
;
972 SDValue Root
= Op
.getOperand(0);
973 bool isVarArg
= cast
<ConstantSDNode
>(Op
.getOperand(2))->getZExtValue() != 0;
974 DebugLoc dl
= Op
.getDebugLoc();
976 const unsigned *ArgRegs
= SPURegisterInfo::getArgRegs();
977 const unsigned NumArgRegs
= SPURegisterInfo::getNumArgRegs();
979 unsigned ArgOffset
= SPUFrameInfo::minStackSize();
980 unsigned ArgRegIdx
= 0;
981 unsigned StackSlotSize
= SPUFrameInfo::stackSlotSize();
983 MVT PtrVT
= DAG
.getTargetLoweringInfo().getPointerTy();
985 // Add DAG nodes to load the arguments or copy them out of registers.
986 for (unsigned ArgNo
= 0, e
= Op
.getNode()->getNumValues() - 1;
987 ArgNo
!= e
; ++ArgNo
) {
988 MVT ObjectVT
= Op
.getValue(ArgNo
).getValueType();
989 unsigned ObjSize
= ObjectVT
.getSizeInBits()/8;
992 if (ArgRegIdx
< NumArgRegs
) {
993 const TargetRegisterClass
*ArgRegClass
;
995 switch (ObjectVT
.getSimpleVT()) {
998 raw_string_ostream
Msg(msg
);
999 Msg
<< "LowerFORMAL_ARGUMENTS Unhandled argument type: "
1000 << ObjectVT
.getMVTString();
1001 llvm_report_error(Msg
.str());
1004 ArgRegClass
= &SPU::R8CRegClass
;
1007 ArgRegClass
= &SPU::R16CRegClass
;
1010 ArgRegClass
= &SPU::R32CRegClass
;
1013 ArgRegClass
= &SPU::R64CRegClass
;
1016 ArgRegClass
= &SPU::GPRCRegClass
;
1019 ArgRegClass
= &SPU::R32FPRegClass
;
1022 ArgRegClass
= &SPU::R64FPRegClass
;
1030 ArgRegClass
= &SPU::VECREGRegClass
;
1034 unsigned VReg
= RegInfo
.createVirtualRegister(ArgRegClass
);
1035 RegInfo
.addLiveIn(ArgRegs
[ArgRegIdx
], VReg
);
1036 ArgVal
= DAG
.getCopyFromReg(Root
, dl
, VReg
, ObjectVT
);
1039 // We need to load the argument to a virtual register if we determined
1040 // above that we ran out of physical registers of the appropriate type
1041 // or we're forced to do vararg
1042 int FI
= MFI
->CreateFixedObject(ObjSize
, ArgOffset
);
1043 SDValue FIN
= DAG
.getFrameIndex(FI
, PtrVT
);
1044 ArgVal
= DAG
.getLoad(ObjectVT
, dl
, Root
, FIN
, NULL
, 0);
1045 ArgOffset
+= StackSlotSize
;
1048 ArgValues
.push_back(ArgVal
);
1050 Root
= ArgVal
.getOperand(0);
1055 // unsigned int ptr_size = PtrVT.getSizeInBits() / 8;
1056 // We will spill (79-3)+1 registers to the stack
1057 SmallVector
<SDValue
, 79-3+1> MemOps
;
1059 // Create the frame slot
1061 for (; ArgRegIdx
!= NumArgRegs
; ++ArgRegIdx
) {
1062 VarArgsFrameIndex
= MFI
->CreateFixedObject(StackSlotSize
, ArgOffset
);
1063 SDValue FIN
= DAG
.getFrameIndex(VarArgsFrameIndex
, PtrVT
);
1064 SDValue ArgVal
= DAG
.getRegister(ArgRegs
[ArgRegIdx
], MVT::v16i8
);
1065 SDValue Store
= DAG
.getStore(Root
, dl
, ArgVal
, FIN
, NULL
, 0);
1066 Root
= Store
.getOperand(0);
1067 MemOps
.push_back(Store
);
1069 // Increment address by stack slot size for the next stored argument
1070 ArgOffset
+= StackSlotSize
;
1072 if (!MemOps
.empty())
1073 Root
= DAG
.getNode(ISD::TokenFactor
, dl
, MVT::Other
,
1074 &MemOps
[0], MemOps
.size());
1077 ArgValues
.push_back(Root
);
1079 // Return the new list of results.
1080 return DAG
.getNode(ISD::MERGE_VALUES
, dl
, Op
.getNode()->getVTList(),
1081 &ArgValues
[0], ArgValues
.size());
1084 /// isLSAAddress - Return the immediate to use if the specified
1085 /// value is representable as a LSA address.
1086 static SDNode
*isLSAAddress(SDValue Op
, SelectionDAG
&DAG
) {
1087 ConstantSDNode
*C
= dyn_cast
<ConstantSDNode
>(Op
);
1090 int Addr
= C
->getZExtValue();
1091 if ((Addr
& 3) != 0 || // Low 2 bits are implicitly zero.
1092 (Addr
<< 14 >> 14) != Addr
)
1093 return 0; // Top 14 bits have to be sext of immediate.
1095 return DAG
.getConstant((int)C
->getZExtValue() >> 2, MVT::i32
).getNode();
1099 LowerCALL(SDValue Op
, SelectionDAG
&DAG
, const SPUSubtarget
*ST
) {
1100 CallSDNode
*TheCall
= cast
<CallSDNode
>(Op
.getNode());
1101 SDValue Chain
= TheCall
->getChain();
1102 SDValue Callee
= TheCall
->getCallee();
1103 unsigned NumOps
= TheCall
->getNumArgs();
1104 unsigned StackSlotSize
= SPUFrameInfo::stackSlotSize();
1105 const unsigned *ArgRegs
= SPURegisterInfo::getArgRegs();
1106 const unsigned NumArgRegs
= SPURegisterInfo::getNumArgRegs();
1107 DebugLoc dl
= TheCall
->getDebugLoc();
1109 // Handy pointer type
1110 MVT PtrVT
= DAG
.getTargetLoweringInfo().getPointerTy();
1112 // Accumulate how many bytes are to be pushed on the stack, including the
1113 // linkage area, and parameter passing area. According to the SPU ABI,
1114 // we minimally need space for [LR] and [SP]
1115 unsigned NumStackBytes
= SPUFrameInfo::minStackSize();
1117 // Set up a copy of the stack pointer for use loading and storing any
1118 // arguments that may not fit in the registers available for argument
1120 SDValue StackPtr
= DAG
.getRegister(SPU::R1
, MVT::i32
);
1122 // Figure out which arguments are going to go in registers, and which in
1124 unsigned ArgOffset
= SPUFrameInfo::minStackSize(); // Just below [LR]
1125 unsigned ArgRegIdx
= 0;
1127 // Keep track of registers passing arguments
1128 std::vector
<std::pair
<unsigned, SDValue
> > RegsToPass
;
1129 // And the arguments passed on the stack
1130 SmallVector
<SDValue
, 8> MemOpChains
;
1132 for (unsigned i
= 0; i
!= NumOps
; ++i
) {
1133 SDValue Arg
= TheCall
->getArg(i
);
1135 // PtrOff will be used to store the current argument to the stack if a
1136 // register cannot be found for it.
1137 SDValue PtrOff
= DAG
.getConstant(ArgOffset
, StackPtr
.getValueType());
1138 PtrOff
= DAG
.getNode(ISD::ADD
, dl
, PtrVT
, StackPtr
, PtrOff
);
1140 switch (Arg
.getValueType().getSimpleVT()) {
1141 default: llvm_unreachable("Unexpected ValueType for argument!");
1147 if (ArgRegIdx
!= NumArgRegs
) {
1148 RegsToPass
.push_back(std::make_pair(ArgRegs
[ArgRegIdx
++], Arg
));
1150 MemOpChains
.push_back(DAG
.getStore(Chain
, dl
, Arg
, PtrOff
, NULL
, 0));
1151 ArgOffset
+= StackSlotSize
;
1156 if (ArgRegIdx
!= NumArgRegs
) {
1157 RegsToPass
.push_back(std::make_pair(ArgRegs
[ArgRegIdx
++], Arg
));
1159 MemOpChains
.push_back(DAG
.getStore(Chain
, dl
, Arg
, PtrOff
, NULL
, 0));
1160 ArgOffset
+= StackSlotSize
;
1169 if (ArgRegIdx
!= NumArgRegs
) {
1170 RegsToPass
.push_back(std::make_pair(ArgRegs
[ArgRegIdx
++], Arg
));
1172 MemOpChains
.push_back(DAG
.getStore(Chain
, dl
, Arg
, PtrOff
, NULL
, 0));
1173 ArgOffset
+= StackSlotSize
;
1179 // Update number of stack bytes actually used, insert a call sequence start
1180 NumStackBytes
= (ArgOffset
- SPUFrameInfo::minStackSize());
1181 Chain
= DAG
.getCALLSEQ_START(Chain
, DAG
.getIntPtrConstant(NumStackBytes
,
1184 if (!MemOpChains
.empty()) {
1185 // Adjust the stack pointer for the stack arguments.
1186 Chain
= DAG
.getNode(ISD::TokenFactor
, dl
, MVT::Other
,
1187 &MemOpChains
[0], MemOpChains
.size());
1190 // Build a sequence of copy-to-reg nodes chained together with token chain
1191 // and flag operands which copy the outgoing args into the appropriate regs.
1193 for (unsigned i
= 0, e
= RegsToPass
.size(); i
!= e
; ++i
) {
1194 Chain
= DAG
.getCopyToReg(Chain
, dl
, RegsToPass
[i
].first
,
1195 RegsToPass
[i
].second
, InFlag
);
1196 InFlag
= Chain
.getValue(1);
1199 SmallVector
<SDValue
, 8> Ops
;
1200 unsigned CallOpc
= SPUISD::CALL
;
1202 // If the callee is a GlobalAddress/ExternalSymbol node (quite common, every
1203 // direct call is) turn it into a TargetGlobalAddress/TargetExternalSymbol
1204 // node so that legalize doesn't hack it.
1205 if (GlobalAddressSDNode
*G
= dyn_cast
<GlobalAddressSDNode
>(Callee
)) {
1206 GlobalValue
*GV
= G
->getGlobal();
1207 MVT CalleeVT
= Callee
.getValueType();
1208 SDValue Zero
= DAG
.getConstant(0, PtrVT
);
1209 SDValue GA
= DAG
.getTargetGlobalAddress(GV
, CalleeVT
);
1211 if (!ST
->usingLargeMem()) {
1212 // Turn calls to targets that are defined (i.e., have bodies) into BRSL
1213 // style calls, otherwise, external symbols are BRASL calls. This assumes
1214 // that declared/defined symbols are in the same compilation unit and can
1215 // be reached through PC-relative jumps.
1218 // This may be an unsafe assumption for JIT and really large compilation
1220 if (GV
->isDeclaration()) {
1221 Callee
= DAG
.getNode(SPUISD::AFormAddr
, dl
, CalleeVT
, GA
, Zero
);
1223 Callee
= DAG
.getNode(SPUISD::PCRelAddr
, dl
, CalleeVT
, GA
, Zero
);
1226 // "Large memory" mode: Turn all calls into indirect calls with a X-form
1228 Callee
= DAG
.getNode(SPUISD::IndirectAddr
, dl
, PtrVT
, GA
, Zero
);
1230 } else if (ExternalSymbolSDNode
*S
= dyn_cast
<ExternalSymbolSDNode
>(Callee
)) {
1231 MVT CalleeVT
= Callee
.getValueType();
1232 SDValue Zero
= DAG
.getConstant(0, PtrVT
);
1233 SDValue ExtSym
= DAG
.getTargetExternalSymbol(S
->getSymbol(),
1234 Callee
.getValueType());
1236 if (!ST
->usingLargeMem()) {
1237 Callee
= DAG
.getNode(SPUISD::AFormAddr
, dl
, CalleeVT
, ExtSym
, Zero
);
1239 Callee
= DAG
.getNode(SPUISD::IndirectAddr
, dl
, PtrVT
, ExtSym
, Zero
);
1241 } else if (SDNode
*Dest
= isLSAAddress(Callee
, DAG
)) {
1242 // If this is an absolute destination address that appears to be a legal
1243 // local store address, use the munged value.
1244 Callee
= SDValue(Dest
, 0);
1247 Ops
.push_back(Chain
);
1248 Ops
.push_back(Callee
);
1250 // Add argument registers to the end of the list so that they are known live
1252 for (unsigned i
= 0, e
= RegsToPass
.size(); i
!= e
; ++i
)
1253 Ops
.push_back(DAG
.getRegister(RegsToPass
[i
].first
,
1254 RegsToPass
[i
].second
.getValueType()));
1256 if (InFlag
.getNode())
1257 Ops
.push_back(InFlag
);
1258 // Returns a chain and a flag for retval copy to use.
1259 Chain
= DAG
.getNode(CallOpc
, dl
, DAG
.getVTList(MVT::Other
, MVT::Flag
),
1260 &Ops
[0], Ops
.size());
1261 InFlag
= Chain
.getValue(1);
1263 Chain
= DAG
.getCALLSEQ_END(Chain
, DAG
.getIntPtrConstant(NumStackBytes
, true),
1264 DAG
.getIntPtrConstant(0, true), InFlag
);
1265 if (TheCall
->getValueType(0) != MVT::Other
)
1266 InFlag
= Chain
.getValue(1);
1268 SDValue ResultVals
[3];
1269 unsigned NumResults
= 0;
1271 // If the call has results, copy the values out of the ret val registers.
1272 switch (TheCall
->getValueType(0).getSimpleVT()) {
1273 default: llvm_unreachable("Unexpected ret value!");
1274 case MVT::Other
: break;
1276 if (TheCall
->getValueType(1) == MVT::i32
) {
1277 Chain
= DAG
.getCopyFromReg(Chain
, dl
, SPU::R4
,
1278 MVT::i32
, InFlag
).getValue(1);
1279 ResultVals
[0] = Chain
.getValue(0);
1280 Chain
= DAG
.getCopyFromReg(Chain
, dl
, SPU::R3
, MVT::i32
,
1281 Chain
.getValue(2)).getValue(1);
1282 ResultVals
[1] = Chain
.getValue(0);
1285 Chain
= DAG
.getCopyFromReg(Chain
, dl
, SPU::R3
, MVT::i32
,
1286 InFlag
).getValue(1);
1287 ResultVals
[0] = Chain
.getValue(0);
1292 Chain
= DAG
.getCopyFromReg(Chain
, dl
, SPU::R3
, MVT::i64
,
1293 InFlag
).getValue(1);
1294 ResultVals
[0] = Chain
.getValue(0);
1298 Chain
= DAG
.getCopyFromReg(Chain
, dl
, SPU::R3
, MVT::i128
,
1299 InFlag
).getValue(1);
1300 ResultVals
[0] = Chain
.getValue(0);
1305 Chain
= DAG
.getCopyFromReg(Chain
, dl
, SPU::R3
, TheCall
->getValueType(0),
1306 InFlag
).getValue(1);
1307 ResultVals
[0] = Chain
.getValue(0);
1316 Chain
= DAG
.getCopyFromReg(Chain
, dl
, SPU::R3
, TheCall
->getValueType(0),
1317 InFlag
).getValue(1);
1318 ResultVals
[0] = Chain
.getValue(0);
1323 // If the function returns void, just return the chain.
1324 if (NumResults
== 0)
1327 // Otherwise, merge everything together with a MERGE_VALUES node.
1328 ResultVals
[NumResults
++] = Chain
;
1329 SDValue Res
= DAG
.getMergeValues(ResultVals
, NumResults
, dl
);
1330 return Res
.getValue(Op
.getResNo());
1334 LowerRET(SDValue Op
, SelectionDAG
&DAG
, TargetMachine
&TM
) {
1335 SmallVector
<CCValAssign
, 16> RVLocs
;
1336 unsigned CC
= DAG
.getMachineFunction().getFunction()->getCallingConv();
1337 bool isVarArg
= DAG
.getMachineFunction().getFunction()->isVarArg();
1338 DebugLoc dl
= Op
.getDebugLoc();
1339 CCState
CCInfo(CC
, isVarArg
, TM
, RVLocs
, DAG
.getContext());
1340 CCInfo
.AnalyzeReturn(Op
.getNode(), RetCC_SPU
);
1342 // If this is the first return lowered for this function, add the regs to the
1343 // liveout set for the function.
1344 if (DAG
.getMachineFunction().getRegInfo().liveout_empty()) {
1345 for (unsigned i
= 0; i
!= RVLocs
.size(); ++i
)
1346 DAG
.getMachineFunction().getRegInfo().addLiveOut(RVLocs
[i
].getLocReg());
1349 SDValue Chain
= Op
.getOperand(0);
1352 // Copy the result values into the output registers.
1353 for (unsigned i
= 0; i
!= RVLocs
.size(); ++i
) {
1354 CCValAssign
&VA
= RVLocs
[i
];
1355 assert(VA
.isRegLoc() && "Can only return in registers!");
1356 Chain
= DAG
.getCopyToReg(Chain
, dl
, VA
.getLocReg(),
1357 Op
.getOperand(i
*2+1), Flag
);
1358 Flag
= Chain
.getValue(1);
1362 return DAG
.getNode(SPUISD::RET_FLAG
, dl
, MVT::Other
, Chain
, Flag
);
1364 return DAG
.getNode(SPUISD::RET_FLAG
, dl
, MVT::Other
, Chain
);
1368 //===----------------------------------------------------------------------===//
1369 // Vector related lowering:
1370 //===----------------------------------------------------------------------===//
1372 static ConstantSDNode
*
1373 getVecImm(SDNode
*N
) {
1374 SDValue
OpVal(0, 0);
1376 // Check to see if this buildvec has a single non-undef value in its elements.
1377 for (unsigned i
= 0, e
= N
->getNumOperands(); i
!= e
; ++i
) {
1378 if (N
->getOperand(i
).getOpcode() == ISD::UNDEF
) continue;
1379 if (OpVal
.getNode() == 0)
1380 OpVal
= N
->getOperand(i
);
1381 else if (OpVal
!= N
->getOperand(i
))
1385 if (OpVal
.getNode() != 0) {
1386 if (ConstantSDNode
*CN
= dyn_cast
<ConstantSDNode
>(OpVal
)) {
1394 /// get_vec_i18imm - Test if this vector is a vector filled with the same value
1395 /// and the value fits into an unsigned 18-bit constant, and if so, return the
1397 SDValue
SPU::get_vec_u18imm(SDNode
*N
, SelectionDAG
&DAG
,
1399 if (ConstantSDNode
*CN
= getVecImm(N
)) {
1400 uint64_t Value
= CN
->getZExtValue();
1401 if (ValueType
== MVT::i64
) {
1402 uint64_t UValue
= CN
->getZExtValue();
1403 uint32_t upper
= uint32_t(UValue
>> 32);
1404 uint32_t lower
= uint32_t(UValue
);
1407 Value
= Value
>> 32;
1409 if (Value
<= 0x3ffff)
1410 return DAG
.getTargetConstant(Value
, ValueType
);
1416 /// get_vec_i16imm - Test if this vector is a vector filled with the same value
1417 /// and the value fits into a signed 16-bit constant, and if so, return the
1419 SDValue
SPU::get_vec_i16imm(SDNode
*N
, SelectionDAG
&DAG
,
1421 if (ConstantSDNode
*CN
= getVecImm(N
)) {
1422 int64_t Value
= CN
->getSExtValue();
1423 if (ValueType
== MVT::i64
) {
1424 uint64_t UValue
= CN
->getZExtValue();
1425 uint32_t upper
= uint32_t(UValue
>> 32);
1426 uint32_t lower
= uint32_t(UValue
);
1429 Value
= Value
>> 32;
1431 if (Value
>= -(1 << 15) && Value
<= ((1 << 15) - 1)) {
1432 return DAG
.getTargetConstant(Value
, ValueType
);
1439 /// get_vec_i10imm - Test if this vector is a vector filled with the same value
1440 /// and the value fits into a signed 10-bit constant, and if so, return the
1442 SDValue
SPU::get_vec_i10imm(SDNode
*N
, SelectionDAG
&DAG
,
1444 if (ConstantSDNode
*CN
= getVecImm(N
)) {
1445 int64_t Value
= CN
->getSExtValue();
1446 if (ValueType
== MVT::i64
) {
1447 uint64_t UValue
= CN
->getZExtValue();
1448 uint32_t upper
= uint32_t(UValue
>> 32);
1449 uint32_t lower
= uint32_t(UValue
);
1452 Value
= Value
>> 32;
1454 if (isS10Constant(Value
))
1455 return DAG
.getTargetConstant(Value
, ValueType
);
1461 /// get_vec_i8imm - Test if this vector is a vector filled with the same value
1462 /// and the value fits into a signed 8-bit constant, and if so, return the
1465 /// @note: The incoming vector is v16i8 because that's the only way we can load
1466 /// constant vectors. Thus, we test to see if the upper and lower bytes are the
1468 SDValue
SPU::get_vec_i8imm(SDNode
*N
, SelectionDAG
&DAG
,
1470 if (ConstantSDNode
*CN
= getVecImm(N
)) {
1471 int Value
= (int) CN
->getZExtValue();
1472 if (ValueType
== MVT::i16
1473 && Value
<= 0xffff /* truncated from uint64_t */
1474 && ((short) Value
>> 8) == ((short) Value
& 0xff))
1475 return DAG
.getTargetConstant(Value
& 0xff, ValueType
);
1476 else if (ValueType
== MVT::i8
1477 && (Value
& 0xff) == Value
)
1478 return DAG
.getTargetConstant(Value
, ValueType
);
1484 /// get_ILHUvec_imm - Test if this vector is a vector filled with the same value
1485 /// and the value fits into a signed 16-bit constant, and if so, return the
1487 SDValue
SPU::get_ILHUvec_imm(SDNode
*N
, SelectionDAG
&DAG
,
1489 if (ConstantSDNode
*CN
= getVecImm(N
)) {
1490 uint64_t Value
= CN
->getZExtValue();
1491 if ((ValueType
== MVT::i32
1492 && ((unsigned) Value
& 0xffff0000) == (unsigned) Value
)
1493 || (ValueType
== MVT::i64
&& (Value
& 0xffff0000) == Value
))
1494 return DAG
.getTargetConstant(Value
>> 16, ValueType
);
1500 /// get_v4i32_imm - Catch-all for general 32-bit constant vectors
1501 SDValue
SPU::get_v4i32_imm(SDNode
*N
, SelectionDAG
&DAG
) {
1502 if (ConstantSDNode
*CN
= getVecImm(N
)) {
1503 return DAG
.getTargetConstant((unsigned) CN
->getZExtValue(), MVT::i32
);
1509 /// get_v4i32_imm - Catch-all for general 64-bit constant vectors
1510 SDValue
SPU::get_v2i64_imm(SDNode
*N
, SelectionDAG
&DAG
) {
1511 if (ConstantSDNode
*CN
= getVecImm(N
)) {
1512 return DAG
.getTargetConstant((unsigned) CN
->getZExtValue(), MVT::i64
);
1518 //! Lower a BUILD_VECTOR instruction creatively:
1520 LowerBUILD_VECTOR(SDValue Op
, SelectionDAG
&DAG
) {
1521 MVT VT
= Op
.getValueType();
1522 MVT EltVT
= VT
.getVectorElementType();
1523 DebugLoc dl
= Op
.getDebugLoc();
1524 BuildVectorSDNode
*BCN
= dyn_cast
<BuildVectorSDNode
>(Op
.getNode());
1525 assert(BCN
!= 0 && "Expected BuildVectorSDNode in SPU LowerBUILD_VECTOR");
1526 unsigned minSplatBits
= EltVT
.getSizeInBits();
1528 if (minSplatBits
< 16)
1531 APInt APSplatBits
, APSplatUndef
;
1532 unsigned SplatBitSize
;
1535 if (!BCN
->isConstantSplat(APSplatBits
, APSplatUndef
, SplatBitSize
,
1536 HasAnyUndefs
, minSplatBits
)
1537 || minSplatBits
< SplatBitSize
)
1538 return SDValue(); // Wasn't a constant vector or splat exceeded min
1540 uint64_t SplatBits
= APSplatBits
.getZExtValue();
1542 switch (VT
.getSimpleVT()) {
1545 raw_string_ostream
Msg(msg
);
1546 Msg
<< "CellSPU: Unhandled VT in LowerBUILD_VECTOR, VT = "
1547 << VT
.getMVTString();
1548 llvm_report_error(Msg
.str());
1552 uint32_t Value32
= uint32_t(SplatBits
);
1553 assert(SplatBitSize
== 32
1554 && "LowerBUILD_VECTOR: Unexpected floating point vector element.");
1555 // NOTE: pretend the constant is an integer. LLVM won't load FP constants
1556 SDValue T
= DAG
.getConstant(Value32
, MVT::i32
);
1557 return DAG
.getNode(ISD::BIT_CONVERT
, dl
, MVT::v4f32
,
1558 DAG
.getNode(ISD::BUILD_VECTOR
, dl
, MVT::v4i32
, T
,T
,T
,T
));
1562 uint64_t f64val
= uint64_t(SplatBits
);
1563 assert(SplatBitSize
== 64
1564 && "LowerBUILD_VECTOR: 64-bit float vector size > 8 bytes.");
1565 // NOTE: pretend the constant is an integer. LLVM won't load FP constants
1566 SDValue T
= DAG
.getConstant(f64val
, MVT::i64
);
1567 return DAG
.getNode(ISD::BIT_CONVERT
, dl
, MVT::v2f64
,
1568 DAG
.getNode(ISD::BUILD_VECTOR
, dl
, MVT::v2i64
, T
, T
));
1572 // 8-bit constants have to be expanded to 16-bits
1573 unsigned short Value16
= SplatBits
/* | (SplatBits << 8) */;
1574 SmallVector
<SDValue
, 8> Ops
;
1576 Ops
.assign(8, DAG
.getConstant(Value16
, MVT::i16
));
1577 return DAG
.getNode(ISD::BIT_CONVERT
, dl
, VT
,
1578 DAG
.getNode(ISD::BUILD_VECTOR
, dl
, MVT::v8i16
, &Ops
[0], Ops
.size()));
1581 unsigned short Value16
= SplatBits
;
1582 SDValue T
= DAG
.getConstant(Value16
, EltVT
);
1583 SmallVector
<SDValue
, 8> Ops
;
1586 return DAG
.getNode(ISD::BUILD_VECTOR
, dl
, VT
, &Ops
[0], Ops
.size());
1589 SDValue T
= DAG
.getConstant(unsigned(SplatBits
), VT
.getVectorElementType());
1590 return DAG
.getNode(ISD::BUILD_VECTOR
, dl
, VT
, T
, T
, T
, T
);
1593 SDValue T
= DAG
.getConstant(unsigned(SplatBits
), VT
.getVectorElementType());
1594 return DAG
.getNode(ISD::BUILD_VECTOR
, dl
, VT
, T
, T
);
1597 return SPU::LowerV2I64Splat(VT
, DAG
, SplatBits
, dl
);
1607 SPU::LowerV2I64Splat(MVT OpVT
, SelectionDAG
& DAG
, uint64_t SplatVal
,
1609 uint32_t upper
= uint32_t(SplatVal
>> 32);
1610 uint32_t lower
= uint32_t(SplatVal
);
1612 if (upper
== lower
) {
1613 // Magic constant that can be matched by IL, ILA, et. al.
1614 SDValue Val
= DAG
.getTargetConstant(upper
, MVT::i32
);
1615 return DAG
.getNode(ISD::BIT_CONVERT
, dl
, OpVT
,
1616 DAG
.getNode(ISD::BUILD_VECTOR
, dl
, MVT::v4i32
,
1617 Val
, Val
, Val
, Val
));
1619 bool upper_special
, lower_special
;
1621 // NOTE: This code creates common-case shuffle masks that can be easily
1622 // detected as common expressions. It is not attempting to create highly
1623 // specialized masks to replace any and all 0's, 0xff's and 0x80's.
1625 // Detect if the upper or lower half is a special shuffle mask pattern:
1626 upper_special
= (upper
== 0 || upper
== 0xffffffff || upper
== 0x80000000);
1627 lower_special
= (lower
== 0 || lower
== 0xffffffff || lower
== 0x80000000);
1629 // Both upper and lower are special, lower to a constant pool load:
1630 if (lower_special
&& upper_special
) {
1631 SDValue SplatValCN
= DAG
.getConstant(SplatVal
, MVT::i64
);
1632 return DAG
.getNode(ISD::BUILD_VECTOR
, dl
, MVT::v2i64
,
1633 SplatValCN
, SplatValCN
);
1638 SmallVector
<SDValue
, 16> ShufBytes
;
1641 // Create lower vector if not a special pattern
1642 if (!lower_special
) {
1643 SDValue LO32C
= DAG
.getConstant(lower
, MVT::i32
);
1644 LO32
= DAG
.getNode(ISD::BIT_CONVERT
, dl
, OpVT
,
1645 DAG
.getNode(ISD::BUILD_VECTOR
, dl
, MVT::v4i32
,
1646 LO32C
, LO32C
, LO32C
, LO32C
));
1649 // Create upper vector if not a special pattern
1650 if (!upper_special
) {
1651 SDValue HI32C
= DAG
.getConstant(upper
, MVT::i32
);
1652 HI32
= DAG
.getNode(ISD::BIT_CONVERT
, dl
, OpVT
,
1653 DAG
.getNode(ISD::BUILD_VECTOR
, dl
, MVT::v4i32
,
1654 HI32C
, HI32C
, HI32C
, HI32C
));
1657 // If either upper or lower are special, then the two input operands are
1658 // the same (basically, one of them is a "don't care")
1664 for (int i
= 0; i
< 4; ++i
) {
1666 for (int j
= 0; j
< 4; ++j
) {
1668 bool process_upper
, process_lower
;
1670 process_upper
= (upper_special
&& (i
& 1) == 0);
1671 process_lower
= (lower_special
&& (i
& 1) == 1);
1673 if (process_upper
|| process_lower
) {
1674 if ((process_upper
&& upper
== 0)
1675 || (process_lower
&& lower
== 0))
1677 else if ((process_upper
&& upper
== 0xffffffff)
1678 || (process_lower
&& lower
== 0xffffffff))
1680 else if ((process_upper
&& upper
== 0x80000000)
1681 || (process_lower
&& lower
== 0x80000000))
1682 val
|= (j
== 0 ? 0xe0 : 0x80);
1684 val
|= i
* 4 + j
+ ((i
& 1) * 16);
1687 ShufBytes
.push_back(DAG
.getConstant(val
, MVT::i32
));
1690 return DAG
.getNode(SPUISD::SHUFB
, dl
, OpVT
, HI32
, LO32
,
1691 DAG
.getNode(ISD::BUILD_VECTOR
, dl
, MVT::v4i32
,
1692 &ShufBytes
[0], ShufBytes
.size()));
1696 /// LowerVECTOR_SHUFFLE - Lower a vector shuffle (V1, V2, V3) to something on
1697 /// which the Cell can operate. The code inspects V3 to ascertain whether the
1698 /// permutation vector, V3, is monotonically increasing with one "exception"
1699 /// element, e.g., (0, 1, _, 3). If this is the case, then generate a
1700 /// SHUFFLE_MASK synthetic instruction. Otherwise, spill V3 to the constant pool.
1701 /// In either case, the net result is going to eventually invoke SHUFB to
1702 /// permute/shuffle the bytes from V1 and V2.
1704 /// SHUFFLE_MASK is eventually selected as one of the C*D instructions, generate
1705 /// control word for byte/halfword/word insertion. This takes care of a single
1706 /// element move from V2 into V1.
1708 /// SPUISD::SHUFB is eventually selected as Cell's <i>shufb</i> instructions.
1709 static SDValue
LowerVECTOR_SHUFFLE(SDValue Op
, SelectionDAG
&DAG
) {
1710 const ShuffleVectorSDNode
*SVN
= cast
<ShuffleVectorSDNode
>(Op
);
1711 SDValue V1
= Op
.getOperand(0);
1712 SDValue V2
= Op
.getOperand(1);
1713 DebugLoc dl
= Op
.getDebugLoc();
1715 if (V2
.getOpcode() == ISD::UNDEF
) V2
= V1
;
1717 // If we have a single element being moved from V1 to V2, this can be handled
1718 // using the C*[DX] compute mask instructions, but the vector elements have
1719 // to be monotonically increasing with one exception element.
1720 MVT VecVT
= V1
.getValueType();
1721 MVT EltVT
= VecVT
.getVectorElementType();
1722 unsigned EltsFromV2
= 0;
1724 unsigned V2EltIdx0
= 0;
1725 unsigned CurrElt
= 0;
1726 unsigned MaxElts
= VecVT
.getVectorNumElements();
1727 unsigned PrevElt
= 0;
1729 bool monotonic
= true;
1732 if (EltVT
== MVT::i8
) {
1734 } else if (EltVT
== MVT::i16
) {
1736 } else if (EltVT
== MVT::i32
|| EltVT
== MVT::f32
) {
1738 } else if (EltVT
== MVT::i64
|| EltVT
== MVT::f64
) {
1741 llvm_unreachable("Unhandled vector type in LowerVECTOR_SHUFFLE");
1743 for (unsigned i
= 0; i
!= MaxElts
; ++i
) {
1744 if (SVN
->getMaskElt(i
) < 0)
1747 unsigned SrcElt
= SVN
->getMaskElt(i
);
1750 if (SrcElt
>= V2EltIdx0
) {
1751 if (1 >= (++EltsFromV2
)) {
1752 V2Elt
= (V2EltIdx0
- SrcElt
) << 2;
1754 } else if (CurrElt
!= SrcElt
) {
1762 if (PrevElt
> 0 && SrcElt
< MaxElts
) {
1763 if ((PrevElt
== SrcElt
- 1)
1764 || (PrevElt
== MaxElts
- 1 && SrcElt
== 0)) {
1771 } else if (PrevElt
== 0) {
1772 // First time through, need to keep track of previous element
1775 // This isn't a rotation, takes elements from vector 2
1781 if (EltsFromV2
== 1 && monotonic
) {
1782 // Compute mask and shuffle
1783 MachineFunction
&MF
= DAG
.getMachineFunction();
1784 MachineRegisterInfo
&RegInfo
= MF
.getRegInfo();
1785 unsigned VReg
= RegInfo
.createVirtualRegister(&SPU::R32CRegClass
);
1786 MVT PtrVT
= DAG
.getTargetLoweringInfo().getPointerTy();
1787 // Initialize temporary register to 0
1788 SDValue InitTempReg
=
1789 DAG
.getCopyToReg(DAG
.getEntryNode(), dl
, VReg
, DAG
.getConstant(0, PtrVT
));
1790 // Copy register's contents as index in SHUFFLE_MASK:
1791 SDValue ShufMaskOp
=
1792 DAG
.getNode(SPUISD::SHUFFLE_MASK
, dl
, MVT::v4i32
,
1793 DAG
.getTargetConstant(V2Elt
, MVT::i32
),
1794 DAG
.getCopyFromReg(InitTempReg
, dl
, VReg
, PtrVT
));
1795 // Use shuffle mask in SHUFB synthetic instruction:
1796 return DAG
.getNode(SPUISD::SHUFB
, dl
, V1
.getValueType(), V2
, V1
,
1798 } else if (rotate
) {
1799 int rotamt
= (MaxElts
- V0Elt
) * EltVT
.getSizeInBits()/8;
1801 return DAG
.getNode(SPUISD::ROTBYTES_LEFT
, dl
, V1
.getValueType(),
1802 V1
, DAG
.getConstant(rotamt
, MVT::i16
));
1804 // Convert the SHUFFLE_VECTOR mask's input element units to the
1806 unsigned BytesPerElement
= EltVT
.getSizeInBits()/8;
1808 SmallVector
<SDValue
, 16> ResultMask
;
1809 for (unsigned i
= 0, e
= MaxElts
; i
!= e
; ++i
) {
1810 unsigned SrcElt
= SVN
->getMaskElt(i
) < 0 ? 0 : SVN
->getMaskElt(i
);
1812 for (unsigned j
= 0; j
< BytesPerElement
; ++j
)
1813 ResultMask
.push_back(DAG
.getConstant(SrcElt
*BytesPerElement
+j
,MVT::i8
));
1816 SDValue VPermMask
= DAG
.getNode(ISD::BUILD_VECTOR
, dl
, MVT::v16i8
,
1817 &ResultMask
[0], ResultMask
.size());
1818 return DAG
.getNode(SPUISD::SHUFB
, dl
, V1
.getValueType(), V1
, V2
, VPermMask
);
1822 static SDValue
LowerSCALAR_TO_VECTOR(SDValue Op
, SelectionDAG
&DAG
) {
1823 SDValue Op0
= Op
.getOperand(0); // Op0 = the scalar
1824 DebugLoc dl
= Op
.getDebugLoc();
1826 if (Op0
.getNode()->getOpcode() == ISD::Constant
) {
1827 // For a constant, build the appropriate constant vector, which will
1828 // eventually simplify to a vector register load.
1830 ConstantSDNode
*CN
= cast
<ConstantSDNode
>(Op0
.getNode());
1831 SmallVector
<SDValue
, 16> ConstVecValues
;
1835 // Create a constant vector:
1836 switch (Op
.getValueType().getSimpleVT()) {
1837 default: llvm_unreachable("Unexpected constant value type in "
1838 "LowerSCALAR_TO_VECTOR");
1839 case MVT::v16i8
: n_copies
= 16; VT
= MVT::i8
; break;
1840 case MVT::v8i16
: n_copies
= 8; VT
= MVT::i16
; break;
1841 case MVT::v4i32
: n_copies
= 4; VT
= MVT::i32
; break;
1842 case MVT::v4f32
: n_copies
= 4; VT
= MVT::f32
; break;
1843 case MVT::v2i64
: n_copies
= 2; VT
= MVT::i64
; break;
1844 case MVT::v2f64
: n_copies
= 2; VT
= MVT::f64
; break;
1847 SDValue CValue
= DAG
.getConstant(CN
->getZExtValue(), VT
);
1848 for (size_t j
= 0; j
< n_copies
; ++j
)
1849 ConstVecValues
.push_back(CValue
);
1851 return DAG
.getNode(ISD::BUILD_VECTOR
, dl
, Op
.getValueType(),
1852 &ConstVecValues
[0], ConstVecValues
.size());
1854 // Otherwise, copy the value from one register to another:
1855 switch (Op0
.getValueType().getSimpleVT()) {
1856 default: llvm_unreachable("Unexpected value type in LowerSCALAR_TO_VECTOR");
1863 return DAG
.getNode(SPUISD::PREFSLOT2VEC
, dl
, Op
.getValueType(), Op0
, Op0
);
1870 static SDValue
LowerEXTRACT_VECTOR_ELT(SDValue Op
, SelectionDAG
&DAG
) {
1871 MVT VT
= Op
.getValueType();
1872 SDValue N
= Op
.getOperand(0);
1873 SDValue Elt
= Op
.getOperand(1);
1874 DebugLoc dl
= Op
.getDebugLoc();
1877 if (ConstantSDNode
*C
= dyn_cast
<ConstantSDNode
>(Elt
)) {
1878 // Constant argument:
1879 int EltNo
= (int) C
->getZExtValue();
1882 if (VT
== MVT::i8
&& EltNo
>= 16)
1883 llvm_unreachable("SPU LowerEXTRACT_VECTOR_ELT: i8 extraction slot > 15");
1884 else if (VT
== MVT::i16
&& EltNo
>= 8)
1885 llvm_unreachable("SPU LowerEXTRACT_VECTOR_ELT: i16 extraction slot > 7");
1886 else if (VT
== MVT::i32
&& EltNo
>= 4)
1887 llvm_unreachable("SPU LowerEXTRACT_VECTOR_ELT: i32 extraction slot > 4");
1888 else if (VT
== MVT::i64
&& EltNo
>= 2)
1889 llvm_unreachable("SPU LowerEXTRACT_VECTOR_ELT: i64 extraction slot > 2");
1891 if (EltNo
== 0 && (VT
== MVT::i32
|| VT
== MVT::i64
)) {
1892 // i32 and i64: Element 0 is the preferred slot
1893 return DAG
.getNode(SPUISD::VEC2PREFSLOT
, dl
, VT
, N
);
1896 // Need to generate shuffle mask and extract:
1897 int prefslot_begin
= -1, prefslot_end
= -1;
1898 int elt_byte
= EltNo
* VT
.getSizeInBits() / 8;
1900 switch (VT
.getSimpleVT()) {
1902 assert(false && "Invalid value type!");
1904 prefslot_begin
= prefslot_end
= 3;
1908 prefslot_begin
= 2; prefslot_end
= 3;
1913 prefslot_begin
= 0; prefslot_end
= 3;
1918 prefslot_begin
= 0; prefslot_end
= 7;
1923 assert(prefslot_begin
!= -1 && prefslot_end
!= -1 &&
1924 "LowerEXTRACT_VECTOR_ELT: preferred slots uninitialized");
1926 unsigned int ShufBytes
[16];
1927 for (int i
= 0; i
< 16; ++i
) {
1928 // zero fill uppper part of preferred slot, don't care about the
1930 unsigned int mask_val
;
1931 if (i
<= prefslot_end
) {
1933 ((i
< prefslot_begin
)
1935 : elt_byte
+ (i
- prefslot_begin
));
1937 ShufBytes
[i
] = mask_val
;
1939 ShufBytes
[i
] = ShufBytes
[i
% (prefslot_end
+ 1)];
1942 SDValue ShufMask
[4];
1943 for (unsigned i
= 0; i
< sizeof(ShufMask
)/sizeof(ShufMask
[0]); ++i
) {
1944 unsigned bidx
= i
* 4;
1945 unsigned int bits
= ((ShufBytes
[bidx
] << 24) |
1946 (ShufBytes
[bidx
+1] << 16) |
1947 (ShufBytes
[bidx
+2] << 8) |
1949 ShufMask
[i
] = DAG
.getConstant(bits
, MVT::i32
);
1952 SDValue ShufMaskVec
=
1953 DAG
.getNode(ISD::BUILD_VECTOR
, dl
, MVT::v4i32
,
1954 &ShufMask
[0], sizeof(ShufMask
)/sizeof(ShufMask
[0]));
1956 retval
= DAG
.getNode(SPUISD::VEC2PREFSLOT
, dl
, VT
,
1957 DAG
.getNode(SPUISD::SHUFB
, dl
, N
.getValueType(),
1958 N
, N
, ShufMaskVec
));
1960 // Variable index: Rotate the requested element into slot 0, then replicate
1961 // slot 0 across the vector
1962 MVT VecVT
= N
.getValueType();
1963 if (!VecVT
.isSimple() || !VecVT
.isVector() || !VecVT
.is128BitVector()) {
1964 llvm_report_error("LowerEXTRACT_VECTOR_ELT: Must have a simple, 128-bit"
1968 // Make life easier by making sure the index is zero-extended to i32
1969 if (Elt
.getValueType() != MVT::i32
)
1970 Elt
= DAG
.getNode(ISD::ZERO_EXTEND
, dl
, MVT::i32
, Elt
);
1972 // Scale the index to a bit/byte shift quantity
1974 APInt(32, uint64_t(16 / N
.getValueType().getVectorNumElements()), false);
1975 unsigned scaleShift
= scaleFactor
.logBase2();
1978 if (scaleShift
> 0) {
1979 // Scale the shift factor:
1980 Elt
= DAG
.getNode(ISD::SHL
, dl
, MVT::i32
, Elt
,
1981 DAG
.getConstant(scaleShift
, MVT::i32
));
1984 vecShift
= DAG
.getNode(SPUISD::SHLQUAD_L_BYTES
, dl
, VecVT
, N
, Elt
);
1986 // Replicate the bytes starting at byte 0 across the entire vector (for
1987 // consistency with the notion of a unified register set)
1990 switch (VT
.getSimpleVT()) {
1992 llvm_report_error("LowerEXTRACT_VECTOR_ELT(varable): Unhandled vector"
1996 SDValue factor
= DAG
.getConstant(0x00000000, MVT::i32
);
1997 replicate
= DAG
.getNode(ISD::BUILD_VECTOR
, dl
, MVT::v4i32
,
1998 factor
, factor
, factor
, factor
);
2002 SDValue factor
= DAG
.getConstant(0x00010001, MVT::i32
);
2003 replicate
= DAG
.getNode(ISD::BUILD_VECTOR
, dl
, MVT::v4i32
,
2004 factor
, factor
, factor
, factor
);
2009 SDValue factor
= DAG
.getConstant(0x00010203, MVT::i32
);
2010 replicate
= DAG
.getNode(ISD::BUILD_VECTOR
, dl
, MVT::v4i32
,
2011 factor
, factor
, factor
, factor
);
2016 SDValue loFactor
= DAG
.getConstant(0x00010203, MVT::i32
);
2017 SDValue hiFactor
= DAG
.getConstant(0x04050607, MVT::i32
);
2018 replicate
= DAG
.getNode(ISD::BUILD_VECTOR
, dl
, MVT::v4i32
,
2019 loFactor
, hiFactor
, loFactor
, hiFactor
);
2024 retval
= DAG
.getNode(SPUISD::VEC2PREFSLOT
, dl
, VT
,
2025 DAG
.getNode(SPUISD::SHUFB
, dl
, VecVT
,
2026 vecShift
, vecShift
, replicate
));
2032 static SDValue
LowerINSERT_VECTOR_ELT(SDValue Op
, SelectionDAG
&DAG
) {
2033 SDValue VecOp
= Op
.getOperand(0);
2034 SDValue ValOp
= Op
.getOperand(1);
2035 SDValue IdxOp
= Op
.getOperand(2);
2036 DebugLoc dl
= Op
.getDebugLoc();
2037 MVT VT
= Op
.getValueType();
2039 ConstantSDNode
*CN
= cast
<ConstantSDNode
>(IdxOp
);
2040 assert(CN
!= 0 && "LowerINSERT_VECTOR_ELT: Index is not constant!");
2042 MVT PtrVT
= DAG
.getTargetLoweringInfo().getPointerTy();
2043 // Use $sp ($1) because it's always 16-byte aligned and it's available:
2044 SDValue Pointer
= DAG
.getNode(SPUISD::IndirectAddr
, dl
, PtrVT
,
2045 DAG
.getRegister(SPU::R1
, PtrVT
),
2046 DAG
.getConstant(CN
->getSExtValue(), PtrVT
));
2047 SDValue ShufMask
= DAG
.getNode(SPUISD::SHUFFLE_MASK
, dl
, VT
, Pointer
);
2050 DAG
.getNode(SPUISD::SHUFB
, dl
, VT
,
2051 DAG
.getNode(ISD::SCALAR_TO_VECTOR
, dl
, VT
, ValOp
),
2053 DAG
.getNode(ISD::BIT_CONVERT
, dl
, MVT::v4i32
, ShufMask
));
2058 static SDValue
LowerI8Math(SDValue Op
, SelectionDAG
&DAG
, unsigned Opc
,
2059 const TargetLowering
&TLI
)
2061 SDValue N0
= Op
.getOperand(0); // Everything has at least one operand
2062 DebugLoc dl
= Op
.getDebugLoc();
2063 MVT ShiftVT
= TLI
.getShiftAmountTy();
2065 assert(Op
.getValueType() == MVT::i8
);
2068 llvm_unreachable("Unhandled i8 math operator");
2072 // 8-bit addition: Promote the arguments up to 16-bits and truncate
2074 SDValue N1
= Op
.getOperand(1);
2075 N0
= DAG
.getNode(ISD::SIGN_EXTEND
, dl
, MVT::i16
, N0
);
2076 N1
= DAG
.getNode(ISD::SIGN_EXTEND
, dl
, MVT::i16
, N1
);
2077 return DAG
.getNode(ISD::TRUNCATE
, dl
, MVT::i8
,
2078 DAG
.getNode(Opc
, dl
, MVT::i16
, N0
, N1
));
2083 // 8-bit subtraction: Promote the arguments up to 16-bits and truncate
2085 SDValue N1
= Op
.getOperand(1);
2086 N0
= DAG
.getNode(ISD::SIGN_EXTEND
, dl
, MVT::i16
, N0
);
2087 N1
= DAG
.getNode(ISD::SIGN_EXTEND
, dl
, MVT::i16
, N1
);
2088 return DAG
.getNode(ISD::TRUNCATE
, dl
, MVT::i8
,
2089 DAG
.getNode(Opc
, dl
, MVT::i16
, N0
, N1
));
2093 SDValue N1
= Op
.getOperand(1);
2094 MVT N1VT
= N1
.getValueType();
2096 N0
= DAG
.getNode(ISD::ZERO_EXTEND
, dl
, MVT::i16
, N0
);
2097 if (!N1VT
.bitsEq(ShiftVT
)) {
2098 unsigned N1Opc
= N1
.getValueType().bitsLT(ShiftVT
)
2101 N1
= DAG
.getNode(N1Opc
, dl
, ShiftVT
, N1
);
2104 // Replicate lower 8-bits into upper 8:
2106 DAG
.getNode(ISD::OR
, dl
, MVT::i16
, N0
,
2107 DAG
.getNode(ISD::SHL
, dl
, MVT::i16
,
2108 N0
, DAG
.getConstant(8, MVT::i32
)));
2110 // Truncate back down to i8
2111 return DAG
.getNode(ISD::TRUNCATE
, dl
, MVT::i8
,
2112 DAG
.getNode(Opc
, dl
, MVT::i16
, ExpandArg
, N1
));
2116 SDValue N1
= Op
.getOperand(1);
2117 MVT N1VT
= N1
.getValueType();
2119 N0
= DAG
.getNode(ISD::ZERO_EXTEND
, dl
, MVT::i16
, N0
);
2120 if (!N1VT
.bitsEq(ShiftVT
)) {
2121 unsigned N1Opc
= ISD::ZERO_EXTEND
;
2123 if (N1
.getValueType().bitsGT(ShiftVT
))
2124 N1Opc
= ISD::TRUNCATE
;
2126 N1
= DAG
.getNode(N1Opc
, dl
, ShiftVT
, N1
);
2129 return DAG
.getNode(ISD::TRUNCATE
, dl
, MVT::i8
,
2130 DAG
.getNode(Opc
, dl
, MVT::i16
, N0
, N1
));
2133 SDValue N1
= Op
.getOperand(1);
2134 MVT N1VT
= N1
.getValueType();
2136 N0
= DAG
.getNode(ISD::SIGN_EXTEND
, dl
, MVT::i16
, N0
);
2137 if (!N1VT
.bitsEq(ShiftVT
)) {
2138 unsigned N1Opc
= ISD::SIGN_EXTEND
;
2140 if (N1VT
.bitsGT(ShiftVT
))
2141 N1Opc
= ISD::TRUNCATE
;
2142 N1
= DAG
.getNode(N1Opc
, dl
, ShiftVT
, N1
);
2145 return DAG
.getNode(ISD::TRUNCATE
, dl
, MVT::i8
,
2146 DAG
.getNode(Opc
, dl
, MVT::i16
, N0
, N1
));
2149 SDValue N1
= Op
.getOperand(1);
2151 N0
= DAG
.getNode(ISD::SIGN_EXTEND
, dl
, MVT::i16
, N0
);
2152 N1
= DAG
.getNode(ISD::SIGN_EXTEND
, dl
, MVT::i16
, N1
);
2153 return DAG
.getNode(ISD::TRUNCATE
, dl
, MVT::i8
,
2154 DAG
.getNode(Opc
, dl
, MVT::i16
, N0
, N1
));
2162 //! Lower byte immediate operations for v16i8 vectors:
2164 LowerByteImmed(SDValue Op
, SelectionDAG
&DAG
) {
2167 MVT VT
= Op
.getValueType();
2168 DebugLoc dl
= Op
.getDebugLoc();
2170 ConstVec
= Op
.getOperand(0);
2171 Arg
= Op
.getOperand(1);
2172 if (ConstVec
.getNode()->getOpcode() != ISD::BUILD_VECTOR
) {
2173 if (ConstVec
.getNode()->getOpcode() == ISD::BIT_CONVERT
) {
2174 ConstVec
= ConstVec
.getOperand(0);
2176 ConstVec
= Op
.getOperand(1);
2177 Arg
= Op
.getOperand(0);
2178 if (ConstVec
.getNode()->getOpcode() == ISD::BIT_CONVERT
) {
2179 ConstVec
= ConstVec
.getOperand(0);
2184 if (ConstVec
.getNode()->getOpcode() == ISD::BUILD_VECTOR
) {
2185 BuildVectorSDNode
*BCN
= dyn_cast
<BuildVectorSDNode
>(ConstVec
.getNode());
2186 assert(BCN
!= 0 && "Expected BuildVectorSDNode in SPU LowerByteImmed");
2188 APInt APSplatBits
, APSplatUndef
;
2189 unsigned SplatBitSize
;
2191 unsigned minSplatBits
= VT
.getVectorElementType().getSizeInBits();
2193 if (BCN
->isConstantSplat(APSplatBits
, APSplatUndef
, SplatBitSize
,
2194 HasAnyUndefs
, minSplatBits
)
2195 && minSplatBits
<= SplatBitSize
) {
2196 uint64_t SplatBits
= APSplatBits
.getZExtValue();
2197 SDValue tc
= DAG
.getTargetConstant(SplatBits
& 0xff, MVT::i8
);
2199 SmallVector
<SDValue
, 16> tcVec
;
2200 tcVec
.assign(16, tc
);
2201 return DAG
.getNode(Op
.getNode()->getOpcode(), dl
, VT
, Arg
,
2202 DAG
.getNode(ISD::BUILD_VECTOR
, dl
, VT
, &tcVec
[0], tcVec
.size()));
2206 // These operations (AND, OR, XOR) are legal, they just couldn't be custom
2207 // lowered. Return the operation, rather than a null SDValue.
2211 //! Custom lowering for CTPOP (count population)
2213 Custom lowering code that counts the number ones in the input
2214 operand. SPU has such an instruction, but it counts the number of
2215 ones per byte, which then have to be accumulated.
2217 static SDValue
LowerCTPOP(SDValue Op
, SelectionDAG
&DAG
) {
2218 MVT VT
= Op
.getValueType();
2219 MVT vecVT
= MVT::getVectorVT(VT
, (128 / VT
.getSizeInBits()));
2220 DebugLoc dl
= Op
.getDebugLoc();
2222 switch (VT
.getSimpleVT()) {
2224 assert(false && "Invalid value type!");
2226 SDValue N
= Op
.getOperand(0);
2227 SDValue Elt0
= DAG
.getConstant(0, MVT::i32
);
2229 SDValue Promote
= DAG
.getNode(SPUISD::PREFSLOT2VEC
, dl
, vecVT
, N
, N
);
2230 SDValue CNTB
= DAG
.getNode(SPUISD::CNTB
, dl
, vecVT
, Promote
);
2232 return DAG
.getNode(ISD::EXTRACT_VECTOR_ELT
, dl
, MVT::i8
, CNTB
, Elt0
);
2236 MachineFunction
&MF
= DAG
.getMachineFunction();
2237 MachineRegisterInfo
&RegInfo
= MF
.getRegInfo();
2239 unsigned CNTB_reg
= RegInfo
.createVirtualRegister(&SPU::R16CRegClass
);
2241 SDValue N
= Op
.getOperand(0);
2242 SDValue Elt0
= DAG
.getConstant(0, MVT::i16
);
2243 SDValue Mask0
= DAG
.getConstant(0x0f, MVT::i16
);
2244 SDValue Shift1
= DAG
.getConstant(8, MVT::i32
);
2246 SDValue Promote
= DAG
.getNode(SPUISD::PREFSLOT2VEC
, dl
, vecVT
, N
, N
);
2247 SDValue CNTB
= DAG
.getNode(SPUISD::CNTB
, dl
, vecVT
, Promote
);
2249 // CNTB_result becomes the chain to which all of the virtual registers
2250 // CNTB_reg, SUM1_reg become associated:
2251 SDValue CNTB_result
=
2252 DAG
.getNode(ISD::EXTRACT_VECTOR_ELT
, dl
, MVT::i16
, CNTB
, Elt0
);
2254 SDValue CNTB_rescopy
=
2255 DAG
.getCopyToReg(CNTB_result
, dl
, CNTB_reg
, CNTB_result
);
2257 SDValue Tmp1
= DAG
.getCopyFromReg(CNTB_rescopy
, dl
, CNTB_reg
, MVT::i16
);
2259 return DAG
.getNode(ISD::AND
, dl
, MVT::i16
,
2260 DAG
.getNode(ISD::ADD
, dl
, MVT::i16
,
2261 DAG
.getNode(ISD::SRL
, dl
, MVT::i16
,
2268 MachineFunction
&MF
= DAG
.getMachineFunction();
2269 MachineRegisterInfo
&RegInfo
= MF
.getRegInfo();
2271 unsigned CNTB_reg
= RegInfo
.createVirtualRegister(&SPU::R32CRegClass
);
2272 unsigned SUM1_reg
= RegInfo
.createVirtualRegister(&SPU::R32CRegClass
);
2274 SDValue N
= Op
.getOperand(0);
2275 SDValue Elt0
= DAG
.getConstant(0, MVT::i32
);
2276 SDValue Mask0
= DAG
.getConstant(0xff, MVT::i32
);
2277 SDValue Shift1
= DAG
.getConstant(16, MVT::i32
);
2278 SDValue Shift2
= DAG
.getConstant(8, MVT::i32
);
2280 SDValue Promote
= DAG
.getNode(SPUISD::PREFSLOT2VEC
, dl
, vecVT
, N
, N
);
2281 SDValue CNTB
= DAG
.getNode(SPUISD::CNTB
, dl
, vecVT
, Promote
);
2283 // CNTB_result becomes the chain to which all of the virtual registers
2284 // CNTB_reg, SUM1_reg become associated:
2285 SDValue CNTB_result
=
2286 DAG
.getNode(ISD::EXTRACT_VECTOR_ELT
, dl
, MVT::i32
, CNTB
, Elt0
);
2288 SDValue CNTB_rescopy
=
2289 DAG
.getCopyToReg(CNTB_result
, dl
, CNTB_reg
, CNTB_result
);
2292 DAG
.getNode(ISD::SRL
, dl
, MVT::i32
,
2293 DAG
.getCopyFromReg(CNTB_rescopy
, dl
, CNTB_reg
, MVT::i32
),
2297 DAG
.getNode(ISD::ADD
, dl
, MVT::i32
, Comp1
,
2298 DAG
.getCopyFromReg(CNTB_rescopy
, dl
, CNTB_reg
, MVT::i32
));
2300 SDValue Sum1_rescopy
=
2301 DAG
.getCopyToReg(CNTB_result
, dl
, SUM1_reg
, Sum1
);
2304 DAG
.getNode(ISD::SRL
, dl
, MVT::i32
,
2305 DAG
.getCopyFromReg(Sum1_rescopy
, dl
, SUM1_reg
, MVT::i32
),
2308 DAG
.getNode(ISD::ADD
, dl
, MVT::i32
, Comp2
,
2309 DAG
.getCopyFromReg(Sum1_rescopy
, dl
, SUM1_reg
, MVT::i32
));
2311 return DAG
.getNode(ISD::AND
, dl
, MVT::i32
, Sum2
, Mask0
);
2321 //! Lower ISD::FP_TO_SINT, ISD::FP_TO_UINT for i32
2323 f32->i32 passes through unchanged, whereas f64->i32 expands to a libcall.
2324 All conversions to i64 are expanded to a libcall.
2326 static SDValue
LowerFP_TO_INT(SDValue Op
, SelectionDAG
&DAG
,
2327 SPUTargetLowering
&TLI
) {
2328 MVT OpVT
= Op
.getValueType();
2329 SDValue Op0
= Op
.getOperand(0);
2330 MVT Op0VT
= Op0
.getValueType();
2332 if ((OpVT
== MVT::i32
&& Op0VT
== MVT::f64
)
2333 || OpVT
== MVT::i64
) {
2334 // Convert f32 / f64 to i32 / i64 via libcall.
2336 (Op
.getOpcode() == ISD::FP_TO_SINT
)
2337 ? RTLIB::getFPTOSINT(Op0VT
, OpVT
)
2338 : RTLIB::getFPTOUINT(Op0VT
, OpVT
);
2339 assert(LC
!= RTLIB::UNKNOWN_LIBCALL
&& "Unexpectd fp-to-int conversion!");
2341 return ExpandLibCall(LC
, Op
, DAG
, false, Dummy
, TLI
);
2347 //! Lower ISD::SINT_TO_FP, ISD::UINT_TO_FP for i32
2349 i32->f32 passes through unchanged, whereas i32->f64 is expanded to a libcall.
2350 All conversions from i64 are expanded to a libcall.
2352 static SDValue
LowerINT_TO_FP(SDValue Op
, SelectionDAG
&DAG
,
2353 SPUTargetLowering
&TLI
) {
2354 MVT OpVT
= Op
.getValueType();
2355 SDValue Op0
= Op
.getOperand(0);
2356 MVT Op0VT
= Op0
.getValueType();
2358 if ((OpVT
== MVT::f64
&& Op0VT
== MVT::i32
)
2359 || Op0VT
== MVT::i64
) {
2360 // Convert i32, i64 to f64 via libcall:
2362 (Op
.getOpcode() == ISD::SINT_TO_FP
)
2363 ? RTLIB::getSINTTOFP(Op0VT
, OpVT
)
2364 : RTLIB::getUINTTOFP(Op0VT
, OpVT
);
2365 assert(LC
!= RTLIB::UNKNOWN_LIBCALL
&& "Unexpectd int-to-fp conversion!");
2367 return ExpandLibCall(LC
, Op
, DAG
, false, Dummy
, TLI
);
2373 //! Lower ISD::SETCC
2375 This handles MVT::f64 (double floating point) condition lowering
2377 static SDValue
LowerSETCC(SDValue Op
, SelectionDAG
&DAG
,
2378 const TargetLowering
&TLI
) {
2379 CondCodeSDNode
*CC
= dyn_cast
<CondCodeSDNode
>(Op
.getOperand(2));
2380 DebugLoc dl
= Op
.getDebugLoc();
2381 assert(CC
!= 0 && "LowerSETCC: CondCodeSDNode should not be null here!\n");
2383 SDValue lhs
= Op
.getOperand(0);
2384 SDValue rhs
= Op
.getOperand(1);
2385 MVT lhsVT
= lhs
.getValueType();
2386 assert(lhsVT
== MVT::f64
&& "LowerSETCC: type other than MVT::64\n");
2388 MVT ccResultVT
= TLI
.getSetCCResultType(lhs
.getValueType());
2389 APInt ccResultOnes
= APInt::getAllOnesValue(ccResultVT
.getSizeInBits());
2390 MVT
IntVT(MVT::i64
);
2392 // Take advantage of the fact that (truncate (sra arg, 32)) is efficiently
2393 // selected to a NOP:
2394 SDValue i64lhs
= DAG
.getNode(ISD::BIT_CONVERT
, dl
, IntVT
, lhs
);
2396 DAG
.getNode(ISD::TRUNCATE
, dl
, MVT::i32
,
2397 DAG
.getNode(ISD::SRL
, dl
, IntVT
,
2398 i64lhs
, DAG
.getConstant(32, MVT::i32
)));
2399 SDValue lhsHi32abs
=
2400 DAG
.getNode(ISD::AND
, dl
, MVT::i32
,
2401 lhsHi32
, DAG
.getConstant(0x7fffffff, MVT::i32
));
2403 DAG
.getNode(ISD::TRUNCATE
, dl
, MVT::i32
, i64lhs
);
2405 // SETO and SETUO only use the lhs operand:
2406 if (CC
->get() == ISD::SETO
) {
2407 // Evaluates to true if Op0 is not [SQ]NaN - lowers to the inverse of
2409 APInt ccResultAllOnes
= APInt::getAllOnesValue(ccResultVT
.getSizeInBits());
2410 return DAG
.getNode(ISD::XOR
, dl
, ccResultVT
,
2411 DAG
.getSetCC(dl
, ccResultVT
,
2412 lhs
, DAG
.getConstantFP(0.0, lhsVT
),
2414 DAG
.getConstant(ccResultAllOnes
, ccResultVT
));
2415 } else if (CC
->get() == ISD::SETUO
) {
2416 // Evaluates to true if Op0 is [SQ]NaN
2417 return DAG
.getNode(ISD::AND
, dl
, ccResultVT
,
2418 DAG
.getSetCC(dl
, ccResultVT
,
2420 DAG
.getConstant(0x7ff00000, MVT::i32
),
2422 DAG
.getSetCC(dl
, ccResultVT
,
2424 DAG
.getConstant(0, MVT::i32
),
2428 SDValue i64rhs
= DAG
.getNode(ISD::BIT_CONVERT
, dl
, IntVT
, rhs
);
2430 DAG
.getNode(ISD::TRUNCATE
, dl
, MVT::i32
,
2431 DAG
.getNode(ISD::SRL
, dl
, IntVT
,
2432 i64rhs
, DAG
.getConstant(32, MVT::i32
)));
2434 // If a value is negative, subtract from the sign magnitude constant:
2435 SDValue signMag2TC
= DAG
.getConstant(0x8000000000000000ULL
, IntVT
);
2437 // Convert the sign-magnitude representation into 2's complement:
2438 SDValue lhsSelectMask
= DAG
.getNode(ISD::SRA
, dl
, ccResultVT
,
2439 lhsHi32
, DAG
.getConstant(31, MVT::i32
));
2440 SDValue lhsSignMag2TC
= DAG
.getNode(ISD::SUB
, dl
, IntVT
, signMag2TC
, i64lhs
);
2442 DAG
.getNode(ISD::SELECT
, dl
, IntVT
,
2443 lhsSelectMask
, lhsSignMag2TC
, i64lhs
);
2445 SDValue rhsSelectMask
= DAG
.getNode(ISD::SRA
, dl
, ccResultVT
,
2446 rhsHi32
, DAG
.getConstant(31, MVT::i32
));
2447 SDValue rhsSignMag2TC
= DAG
.getNode(ISD::SUB
, dl
, IntVT
, signMag2TC
, i64rhs
);
2449 DAG
.getNode(ISD::SELECT
, dl
, IntVT
,
2450 rhsSelectMask
, rhsSignMag2TC
, i64rhs
);
2454 switch (CC
->get()) {
2457 compareOp
= ISD::SETEQ
; break;
2460 compareOp
= ISD::SETGT
; break;
2463 compareOp
= ISD::SETGE
; break;
2466 compareOp
= ISD::SETLT
; break;
2469 compareOp
= ISD::SETLE
; break;
2472 compareOp
= ISD::SETNE
; break;
2474 llvm_report_error("CellSPU ISel Select: unimplemented f64 condition");
2478 DAG
.getSetCC(dl
, ccResultVT
, lhsSelect
, rhsSelect
,
2479 (ISD::CondCode
) compareOp
);
2481 if ((CC
->get() & 0x8) == 0) {
2482 // Ordered comparison:
2483 SDValue lhsNaN
= DAG
.getSetCC(dl
, ccResultVT
,
2484 lhs
, DAG
.getConstantFP(0.0, MVT::f64
),
2486 SDValue rhsNaN
= DAG
.getSetCC(dl
, ccResultVT
,
2487 rhs
, DAG
.getConstantFP(0.0, MVT::f64
),
2489 SDValue ordered
= DAG
.getNode(ISD::AND
, dl
, ccResultVT
, lhsNaN
, rhsNaN
);
2491 result
= DAG
.getNode(ISD::AND
, dl
, ccResultVT
, ordered
, result
);
2497 //! Lower ISD::SELECT_CC
2499 ISD::SELECT_CC can (generally) be implemented directly on the SPU using the
2502 \note Need to revisit this in the future: if the code path through the true
2503 and false value computations is longer than the latency of a branch (6
2504 cycles), then it would be more advantageous to branch and insert a new basic
2505 block and branch on the condition. However, this code does not make that
2506 assumption, given the simplisitc uses so far.
2509 static SDValue
LowerSELECT_CC(SDValue Op
, SelectionDAG
&DAG
,
2510 const TargetLowering
&TLI
) {
2511 MVT VT
= Op
.getValueType();
2512 SDValue lhs
= Op
.getOperand(0);
2513 SDValue rhs
= Op
.getOperand(1);
2514 SDValue trueval
= Op
.getOperand(2);
2515 SDValue falseval
= Op
.getOperand(3);
2516 SDValue condition
= Op
.getOperand(4);
2517 DebugLoc dl
= Op
.getDebugLoc();
2519 // NOTE: SELB's arguments: $rA, $rB, $mask
2521 // SELB selects bits from $rA where bits in $mask are 0, bits from $rB
2522 // where bits in $mask are 1. CCond will be inverted, having 1s where the
2523 // condition was true and 0s where the condition was false. Hence, the
2524 // arguments to SELB get reversed.
2526 // Note: Really should be ISD::SELECT instead of SPUISD::SELB, but LLVM's
2527 // legalizer insists on combining SETCC/SELECT into SELECT_CC, so we end up
2528 // with another "cannot select select_cc" assert:
2530 SDValue compare
= DAG
.getNode(ISD::SETCC
, dl
,
2531 TLI
.getSetCCResultType(Op
.getValueType()),
2532 lhs
, rhs
, condition
);
2533 return DAG
.getNode(SPUISD::SELB
, dl
, VT
, falseval
, trueval
, compare
);
2536 //! Custom lower ISD::TRUNCATE
2537 static SDValue
LowerTRUNCATE(SDValue Op
, SelectionDAG
&DAG
)
2539 // Type to truncate to
2540 MVT VT
= Op
.getValueType();
2541 MVT::SimpleValueType simpleVT
= VT
.getSimpleVT();
2542 MVT VecVT
= MVT::getVectorVT(VT
, (128 / VT
.getSizeInBits()));
2543 DebugLoc dl
= Op
.getDebugLoc();
2545 // Type to truncate from
2546 SDValue Op0
= Op
.getOperand(0);
2547 MVT Op0VT
= Op0
.getValueType();
2549 if (Op0VT
.getSimpleVT() == MVT::i128
&& simpleVT
== MVT::i64
) {
2550 // Create shuffle mask, least significant doubleword of quadword
2551 unsigned maskHigh
= 0x08090a0b;
2552 unsigned maskLow
= 0x0c0d0e0f;
2553 // Use a shuffle to perform the truncation
2554 SDValue shufMask
= DAG
.getNode(ISD::BUILD_VECTOR
, dl
, MVT::v4i32
,
2555 DAG
.getConstant(maskHigh
, MVT::i32
),
2556 DAG
.getConstant(maskLow
, MVT::i32
),
2557 DAG
.getConstant(maskHigh
, MVT::i32
),
2558 DAG
.getConstant(maskLow
, MVT::i32
));
2560 SDValue truncShuffle
= DAG
.getNode(SPUISD::SHUFB
, dl
, VecVT
,
2561 Op0
, Op0
, shufMask
);
2563 return DAG
.getNode(SPUISD::VEC2PREFSLOT
, dl
, VT
, truncShuffle
);
2566 return SDValue(); // Leave the truncate unmolested
2569 //! Custom (target-specific) lowering entry point
2571 This is where LLVM's DAG selection process calls to do target-specific
2575 SPUTargetLowering::LowerOperation(SDValue Op
, SelectionDAG
&DAG
)
2577 unsigned Opc
= (unsigned) Op
.getOpcode();
2578 MVT VT
= Op
.getValueType();
2583 cerr
<< "SPUTargetLowering::LowerOperation(): need to lower this!\n";
2584 cerr
<< "Op.getOpcode() = " << Opc
<< "\n";
2585 cerr
<< "*Op.getNode():\n";
2586 Op
.getNode()->dump();
2588 llvm_unreachable(0);
2594 return LowerLOAD(Op
, DAG
, SPUTM
.getSubtargetImpl());
2596 return LowerSTORE(Op
, DAG
, SPUTM
.getSubtargetImpl());
2597 case ISD::ConstantPool
:
2598 return LowerConstantPool(Op
, DAG
, SPUTM
.getSubtargetImpl());
2599 case ISD::GlobalAddress
:
2600 return LowerGlobalAddress(Op
, DAG
, SPUTM
.getSubtargetImpl());
2601 case ISD::JumpTable
:
2602 return LowerJumpTable(Op
, DAG
, SPUTM
.getSubtargetImpl());
2603 case ISD::ConstantFP
:
2604 return LowerConstantFP(Op
, DAG
);
2605 case ISD::FORMAL_ARGUMENTS
:
2606 return LowerFORMAL_ARGUMENTS(Op
, DAG
, VarArgsFrameIndex
);
2608 return LowerCALL(Op
, DAG
, SPUTM
.getSubtargetImpl());
2610 return LowerRET(Op
, DAG
, getTargetMachine());
2612 // i8, i64 math ops:
2621 return LowerI8Math(Op
, DAG
, Opc
, *this);
2625 case ISD::FP_TO_SINT
:
2626 case ISD::FP_TO_UINT
:
2627 return LowerFP_TO_INT(Op
, DAG
, *this);
2629 case ISD::SINT_TO_FP
:
2630 case ISD::UINT_TO_FP
:
2631 return LowerINT_TO_FP(Op
, DAG
, *this);
2633 // Vector-related lowering.
2634 case ISD::BUILD_VECTOR
:
2635 return LowerBUILD_VECTOR(Op
, DAG
);
2636 case ISD::SCALAR_TO_VECTOR
:
2637 return LowerSCALAR_TO_VECTOR(Op
, DAG
);
2638 case ISD::VECTOR_SHUFFLE
:
2639 return LowerVECTOR_SHUFFLE(Op
, DAG
);
2640 case ISD::EXTRACT_VECTOR_ELT
:
2641 return LowerEXTRACT_VECTOR_ELT(Op
, DAG
);
2642 case ISD::INSERT_VECTOR_ELT
:
2643 return LowerINSERT_VECTOR_ELT(Op
, DAG
);
2645 // Look for ANDBI, ORBI and XORBI opportunities and lower appropriately:
2649 return LowerByteImmed(Op
, DAG
);
2651 // Vector and i8 multiply:
2654 return LowerI8Math(Op
, DAG
, Opc
, *this);
2657 return LowerCTPOP(Op
, DAG
);
2659 case ISD::SELECT_CC
:
2660 return LowerSELECT_CC(Op
, DAG
, *this);
2663 return LowerSETCC(Op
, DAG
, *this);
2666 return LowerTRUNCATE(Op
, DAG
);
2672 void SPUTargetLowering::ReplaceNodeResults(SDNode
*N
,
2673 SmallVectorImpl
<SDValue
>&Results
,
2677 unsigned Opc
= (unsigned) N
->getOpcode();
2678 MVT OpVT
= N
->getValueType(0);
2682 cerr
<< "SPUTargetLowering::ReplaceNodeResults(): need to fix this!\n";
2683 cerr
<< "Op.getOpcode() = " << Opc
<< "\n";
2684 cerr
<< "*Op.getNode():\n";
2692 /* Otherwise, return unchanged */
2695 //===----------------------------------------------------------------------===//
2696 // Target Optimization Hooks
2697 //===----------------------------------------------------------------------===//
2700 SPUTargetLowering::PerformDAGCombine(SDNode
*N
, DAGCombinerInfo
&DCI
) const
2703 TargetMachine
&TM
= getTargetMachine();
2705 const SPUSubtarget
*ST
= SPUTM
.getSubtargetImpl();
2706 SelectionDAG
&DAG
= DCI
.DAG
;
2707 SDValue Op0
= N
->getOperand(0); // everything has at least one operand
2708 MVT NodeVT
= N
->getValueType(0); // The node's value type
2709 MVT Op0VT
= Op0
.getValueType(); // The first operand's result
2710 SDValue Result
; // Initially, empty result
2711 DebugLoc dl
= N
->getDebugLoc();
2713 switch (N
->getOpcode()) {
2716 SDValue Op1
= N
->getOperand(1);
2718 if (Op0
.getOpcode() == SPUISD::IndirectAddr
2719 || Op1
.getOpcode() == SPUISD::IndirectAddr
) {
2720 // Normalize the operands to reduce repeated code
2721 SDValue IndirectArg
= Op0
, AddArg
= Op1
;
2723 if (Op1
.getOpcode() == SPUISD::IndirectAddr
) {
2728 if (isa
<ConstantSDNode
>(AddArg
)) {
2729 ConstantSDNode
*CN0
= cast
<ConstantSDNode
> (AddArg
);
2730 SDValue IndOp1
= IndirectArg
.getOperand(1);
2732 if (CN0
->isNullValue()) {
2733 // (add (SPUindirect <arg>, <arg>), 0) ->
2734 // (SPUindirect <arg>, <arg>)
2736 #if !defined(NDEBUG)
2737 if (DebugFlag
&& isCurrentDebugType(DEBUG_TYPE
)) {
2739 << "Replace: (add (SPUindirect <arg>, <arg>), 0)\n"
2740 << "With: (SPUindirect <arg>, <arg>)\n";
2745 } else if (isa
<ConstantSDNode
>(IndOp1
)) {
2746 // (add (SPUindirect <arg>, <const>), <const>) ->
2747 // (SPUindirect <arg>, <const + const>)
2748 ConstantSDNode
*CN1
= cast
<ConstantSDNode
> (IndOp1
);
2749 int64_t combinedConst
= CN0
->getSExtValue() + CN1
->getSExtValue();
2750 SDValue combinedValue
= DAG
.getConstant(combinedConst
, Op0VT
);
2752 #if !defined(NDEBUG)
2753 if (DebugFlag
&& isCurrentDebugType(DEBUG_TYPE
)) {
2755 << "Replace: (add (SPUindirect <arg>, " << CN1
->getSExtValue()
2756 << "), " << CN0
->getSExtValue() << ")\n"
2757 << "With: (SPUindirect <arg>, "
2758 << combinedConst
<< ")\n";
2762 return DAG
.getNode(SPUISD::IndirectAddr
, dl
, Op0VT
,
2763 IndirectArg
, combinedValue
);
2769 case ISD::SIGN_EXTEND
:
2770 case ISD::ZERO_EXTEND
:
2771 case ISD::ANY_EXTEND
: {
2772 if (Op0
.getOpcode() == SPUISD::VEC2PREFSLOT
&& NodeVT
== Op0VT
) {
2773 // (any_extend (SPUextract_elt0 <arg>)) ->
2774 // (SPUextract_elt0 <arg>)
2775 // Types must match, however...
2776 #if !defined(NDEBUG)
2777 if (DebugFlag
&& isCurrentDebugType(DEBUG_TYPE
)) {
2778 cerr
<< "\nReplace: ";
2781 Op0
.getNode()->dump(&DAG
);
2790 case SPUISD::IndirectAddr
: {
2791 if (!ST
->usingLargeMem() && Op0
.getOpcode() == SPUISD::AFormAddr
) {
2792 ConstantSDNode
*CN
= dyn_cast
<ConstantSDNode
>(N
->getOperand(1));
2793 if (CN
!= 0 && CN
->getZExtValue() == 0) {
2794 // (SPUindirect (SPUaform <addr>, 0), 0) ->
2795 // (SPUaform <addr>, 0)
2797 DEBUG(cerr
<< "Replace: ");
2798 DEBUG(N
->dump(&DAG
));
2799 DEBUG(cerr
<< "\nWith: ");
2800 DEBUG(Op0
.getNode()->dump(&DAG
));
2801 DEBUG(cerr
<< "\n");
2805 } else if (Op0
.getOpcode() == ISD::ADD
) {
2806 SDValue Op1
= N
->getOperand(1);
2807 if (ConstantSDNode
*CN1
= dyn_cast
<ConstantSDNode
>(Op1
)) {
2808 // (SPUindirect (add <arg>, <arg>), 0) ->
2809 // (SPUindirect <arg>, <arg>)
2810 if (CN1
->isNullValue()) {
2812 #if !defined(NDEBUG)
2813 if (DebugFlag
&& isCurrentDebugType(DEBUG_TYPE
)) {
2815 << "Replace: (SPUindirect (add <arg>, <arg>), 0)\n"
2816 << "With: (SPUindirect <arg>, <arg>)\n";
2820 return DAG
.getNode(SPUISD::IndirectAddr
, dl
, Op0VT
,
2821 Op0
.getOperand(0), Op0
.getOperand(1));
2827 case SPUISD::SHLQUAD_L_BITS
:
2828 case SPUISD::SHLQUAD_L_BYTES
:
2829 case SPUISD::VEC_SHL
:
2830 case SPUISD::VEC_SRL
:
2831 case SPUISD::VEC_SRA
:
2832 case SPUISD::ROTBYTES_LEFT
: {
2833 SDValue Op1
= N
->getOperand(1);
2835 // Kill degenerate vector shifts:
2836 if (ConstantSDNode
*CN
= dyn_cast
<ConstantSDNode
>(Op1
)) {
2837 if (CN
->isNullValue()) {
2843 case SPUISD::PREFSLOT2VEC
: {
2844 switch (Op0
.getOpcode()) {
2847 case ISD::ANY_EXTEND
:
2848 case ISD::ZERO_EXTEND
:
2849 case ISD::SIGN_EXTEND
: {
2850 // (SPUprefslot2vec (any|zero|sign_extend (SPUvec2prefslot <arg>))) ->
2852 // but only if the SPUprefslot2vec and <arg> types match.
2853 SDValue Op00
= Op0
.getOperand(0);
2854 if (Op00
.getOpcode() == SPUISD::VEC2PREFSLOT
) {
2855 SDValue Op000
= Op00
.getOperand(0);
2856 if (Op000
.getValueType() == NodeVT
) {
2862 case SPUISD::VEC2PREFSLOT
: {
2863 // (SPUprefslot2vec (SPUvec2prefslot <arg>)) ->
2865 Result
= Op0
.getOperand(0);
2873 // Otherwise, return unchanged.
2875 if (Result
.getNode()) {
2876 DEBUG(cerr
<< "\nReplace.SPU: ");
2877 DEBUG(N
->dump(&DAG
));
2878 DEBUG(cerr
<< "\nWith: ");
2879 DEBUG(Result
.getNode()->dump(&DAG
));
2880 DEBUG(cerr
<< "\n");
2887 //===----------------------------------------------------------------------===//
2888 // Inline Assembly Support
2889 //===----------------------------------------------------------------------===//
2891 /// getConstraintType - Given a constraint letter, return the type of
2892 /// constraint it is for this target.
2893 SPUTargetLowering::ConstraintType
2894 SPUTargetLowering::getConstraintType(const std::string
&ConstraintLetter
) const {
2895 if (ConstraintLetter
.size() == 1) {
2896 switch (ConstraintLetter
[0]) {
2903 return C_RegisterClass
;
2906 return TargetLowering::getConstraintType(ConstraintLetter
);
2909 std::pair
<unsigned, const TargetRegisterClass
*>
2910 SPUTargetLowering::getRegForInlineAsmConstraint(const std::string
&Constraint
,
2913 if (Constraint
.size() == 1) {
2914 // GCC RS6000 Constraint Letters
2915 switch (Constraint
[0]) {
2919 return std::make_pair(0U, SPU::R64CRegisterClass
);
2920 return std::make_pair(0U, SPU::R32CRegisterClass
);
2923 return std::make_pair(0U, SPU::R32FPRegisterClass
);
2924 else if (VT
== MVT::f64
)
2925 return std::make_pair(0U, SPU::R64FPRegisterClass
);
2928 return std::make_pair(0U, SPU::GPRCRegisterClass
);
2932 return TargetLowering::getRegForInlineAsmConstraint(Constraint
, VT
);
2935 //! Compute used/known bits for a SPU operand
2937 SPUTargetLowering::computeMaskedBitsForTargetNode(const SDValue Op
,
2941 const SelectionDAG
&DAG
,
2942 unsigned Depth
) const {
2944 const uint64_t uint64_sizebits
= sizeof(uint64_t) * CHAR_BIT
;
2946 switch (Op
.getOpcode()) {
2948 // KnownZero = KnownOne = APInt(Mask.getBitWidth(), 0);
2954 case SPUISD::PREFSLOT2VEC
:
2955 case SPUISD::LDRESULT
:
2956 case SPUISD::VEC2PREFSLOT
:
2957 case SPUISD::SHLQUAD_L_BITS
:
2958 case SPUISD::SHLQUAD_L_BYTES
:
2959 case SPUISD::VEC_SHL
:
2960 case SPUISD::VEC_SRL
:
2961 case SPUISD::VEC_SRA
:
2962 case SPUISD::VEC_ROTL
:
2963 case SPUISD::VEC_ROTR
:
2964 case SPUISD::ROTBYTES_LEFT
:
2965 case SPUISD::SELECT_MASK
:
2972 SPUTargetLowering::ComputeNumSignBitsForTargetNode(SDValue Op
,
2973 unsigned Depth
) const {
2974 switch (Op
.getOpcode()) {
2979 MVT VT
= Op
.getValueType();
2981 if (VT
!= MVT::i8
&& VT
!= MVT::i16
&& VT
!= MVT::i32
) {
2984 return VT
.getSizeInBits();
2989 // LowerAsmOperandForConstraint
2991 SPUTargetLowering::LowerAsmOperandForConstraint(SDValue Op
,
2992 char ConstraintLetter
,
2994 std::vector
<SDValue
> &Ops
,
2995 SelectionDAG
&DAG
) const {
2996 // Default, for the time being, to the base class handler
2997 TargetLowering::LowerAsmOperandForConstraint(Op
, ConstraintLetter
, hasMemory
,
3001 /// isLegalAddressImmediate - Return true if the integer value can be used
3002 /// as the offset of the target addressing mode.
3003 bool SPUTargetLowering::isLegalAddressImmediate(int64_t V
,
3004 const Type
*Ty
) const {
3005 // SPU's addresses are 256K:
3006 return (V
> -(1 << 18) && V
< (1 << 18) - 1);
3009 bool SPUTargetLowering::isLegalAddressImmediate(llvm::GlobalValue
* GV
) const {
3014 SPUTargetLowering::isOffsetFoldingLegal(const GlobalAddressSDNode
*GA
) const {
3015 // The SPU target isn't yet aware of offsets.