1 //===-- SystemZTargetTransformInfo.cpp - SystemZ-specific TTI -------------===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This file implements a TargetTransformInfo analysis pass specific to the
11 // SystemZ target machine. It uses the target's detailed information to provide
12 // more precise answers to certain TTI queries, while letting the target
13 // independent and default TTI implementations handle the rest.
15 //===----------------------------------------------------------------------===//
17 #include "SystemZTargetTransformInfo.h"
18 #include "llvm/Analysis/TargetTransformInfo.h"
19 #include "llvm/CodeGen/BasicTTIImpl.h"
20 #include "llvm/CodeGen/CostTable.h"
21 #include "llvm/CodeGen/TargetLowering.h"
22 #include "llvm/IR/IntrinsicInst.h"
23 #include "llvm/Support/Debug.h"
26 #define DEBUG_TYPE "systemztti"
28 //===----------------------------------------------------------------------===//
30 // SystemZ cost model.
32 //===----------------------------------------------------------------------===//
34 int SystemZTTIImpl::getIntImmCost(const APInt
&Imm
, Type
*Ty
) {
35 assert(Ty
->isIntegerTy());
37 unsigned BitSize
= Ty
->getPrimitiveSizeInBits();
38 // There is no cost model for constants with a bit size of 0. Return TCC_Free
39 // here, so that constant hoisting will ignore this constant.
42 // No cost model for operations on integers larger than 64 bit implemented yet.
49 if (Imm
.getBitWidth() <= 64) {
50 // Constants loaded via lgfi.
51 if (isInt
<32>(Imm
.getSExtValue()))
52 return TTI::TCC_Basic
;
53 // Constants loaded via llilf.
54 if (isUInt
<32>(Imm
.getZExtValue()))
55 return TTI::TCC_Basic
;
56 // Constants loaded via llihf:
57 if ((Imm
.getZExtValue() & 0xffffffff) == 0)
58 return TTI::TCC_Basic
;
60 return 2 * TTI::TCC_Basic
;
63 return 4 * TTI::TCC_Basic
;
66 int SystemZTTIImpl::getIntImmCost(unsigned Opcode
, unsigned Idx
,
67 const APInt
&Imm
, Type
*Ty
) {
68 assert(Ty
->isIntegerTy());
70 unsigned BitSize
= Ty
->getPrimitiveSizeInBits();
71 // There is no cost model for constants with a bit size of 0. Return TCC_Free
72 // here, so that constant hoisting will ignore this constant.
75 // No cost model for operations on integers larger than 64 bit implemented yet.
82 case Instruction::GetElementPtr
:
83 // Always hoist the base address of a GetElementPtr. This prevents the
84 // creation of new constants for every base constant that gets constant
85 // folded with the offset.
87 return 2 * TTI::TCC_Basic
;
89 case Instruction::Store
:
90 if (Idx
== 0 && Imm
.getBitWidth() <= 64) {
91 // Any 8-bit immediate store can by implemented via mvi.
94 // 16-bit immediate values can be stored via mvhhi/mvhi/mvghi.
95 if (isInt
<16>(Imm
.getSExtValue()))
99 case Instruction::ICmp
:
100 if (Idx
== 1 && Imm
.getBitWidth() <= 64) {
101 // Comparisons against signed 32-bit immediates implemented via cgfi.
102 if (isInt
<32>(Imm
.getSExtValue()))
103 return TTI::TCC_Free
;
104 // Comparisons against unsigned 32-bit immediates implemented via clgfi.
105 if (isUInt
<32>(Imm
.getZExtValue()))
106 return TTI::TCC_Free
;
109 case Instruction::Add
:
110 case Instruction::Sub
:
111 if (Idx
== 1 && Imm
.getBitWidth() <= 64) {
112 // We use algfi/slgfi to add/subtract 32-bit unsigned immediates.
113 if (isUInt
<32>(Imm
.getZExtValue()))
114 return TTI::TCC_Free
;
115 // Or their negation, by swapping addition vs. subtraction.
116 if (isUInt
<32>(-Imm
.getSExtValue()))
117 return TTI::TCC_Free
;
120 case Instruction::Mul
:
121 if (Idx
== 1 && Imm
.getBitWidth() <= 64) {
122 // We use msgfi to multiply by 32-bit signed immediates.
123 if (isInt
<32>(Imm
.getSExtValue()))
124 return TTI::TCC_Free
;
127 case Instruction::Or
:
128 case Instruction::Xor
:
129 if (Idx
== 1 && Imm
.getBitWidth() <= 64) {
130 // Masks supported by oilf/xilf.
131 if (isUInt
<32>(Imm
.getZExtValue()))
132 return TTI::TCC_Free
;
133 // Masks supported by oihf/xihf.
134 if ((Imm
.getZExtValue() & 0xffffffff) == 0)
135 return TTI::TCC_Free
;
138 case Instruction::And
:
139 if (Idx
== 1 && Imm
.getBitWidth() <= 64) {
140 // Any 32-bit AND operation can by implemented via nilf.
142 return TTI::TCC_Free
;
143 // 64-bit masks supported by nilf.
144 if (isUInt
<32>(~Imm
.getZExtValue()))
145 return TTI::TCC_Free
;
146 // 64-bit masks supported by nilh.
147 if ((Imm
.getZExtValue() & 0xffffffff) == 0xffffffff)
148 return TTI::TCC_Free
;
149 // Some 64-bit AND operations can be implemented via risbg.
150 const SystemZInstrInfo
*TII
= ST
->getInstrInfo();
152 if (TII
->isRxSBGMask(Imm
.getZExtValue(), BitSize
, Start
, End
))
153 return TTI::TCC_Free
;
156 case Instruction::Shl
:
157 case Instruction::LShr
:
158 case Instruction::AShr
:
159 // Always return TCC_Free for the shift value of a shift instruction.
161 return TTI::TCC_Free
;
163 case Instruction::UDiv
:
164 case Instruction::SDiv
:
165 case Instruction::URem
:
166 case Instruction::SRem
:
167 case Instruction::Trunc
:
168 case Instruction::ZExt
:
169 case Instruction::SExt
:
170 case Instruction::IntToPtr
:
171 case Instruction::PtrToInt
:
172 case Instruction::BitCast
:
173 case Instruction::PHI
:
174 case Instruction::Call
:
175 case Instruction::Select
:
176 case Instruction::Ret
:
177 case Instruction::Load
:
181 return SystemZTTIImpl::getIntImmCost(Imm
, Ty
);
184 int SystemZTTIImpl::getIntImmCost(Intrinsic::ID IID
, unsigned Idx
,
185 const APInt
&Imm
, Type
*Ty
) {
186 assert(Ty
->isIntegerTy());
188 unsigned BitSize
= Ty
->getPrimitiveSizeInBits();
189 // There is no cost model for constants with a bit size of 0. Return TCC_Free
190 // here, so that constant hoisting will ignore this constant.
192 return TTI::TCC_Free
;
193 // No cost model for operations on integers larger than 64 bit implemented yet.
195 return TTI::TCC_Free
;
199 return TTI::TCC_Free
;
200 case Intrinsic::sadd_with_overflow
:
201 case Intrinsic::uadd_with_overflow
:
202 case Intrinsic::ssub_with_overflow
:
203 case Intrinsic::usub_with_overflow
:
204 // These get expanded to include a normal addition/subtraction.
205 if (Idx
== 1 && Imm
.getBitWidth() <= 64) {
206 if (isUInt
<32>(Imm
.getZExtValue()))
207 return TTI::TCC_Free
;
208 if (isUInt
<32>(-Imm
.getSExtValue()))
209 return TTI::TCC_Free
;
212 case Intrinsic::smul_with_overflow
:
213 case Intrinsic::umul_with_overflow
:
214 // These get expanded to include a normal multiplication.
215 if (Idx
== 1 && Imm
.getBitWidth() <= 64) {
216 if (isInt
<32>(Imm
.getSExtValue()))
217 return TTI::TCC_Free
;
220 case Intrinsic::experimental_stackmap
:
221 if ((Idx
< 2) || (Imm
.getBitWidth() <= 64 && isInt
<64>(Imm
.getSExtValue())))
222 return TTI::TCC_Free
;
224 case Intrinsic::experimental_patchpoint_void
:
225 case Intrinsic::experimental_patchpoint_i64
:
226 if ((Idx
< 4) || (Imm
.getBitWidth() <= 64 && isInt
<64>(Imm
.getSExtValue())))
227 return TTI::TCC_Free
;
230 return SystemZTTIImpl::getIntImmCost(Imm
, Ty
);
233 TargetTransformInfo::PopcntSupportKind
234 SystemZTTIImpl::getPopcntSupport(unsigned TyWidth
) {
235 assert(isPowerOf2_32(TyWidth
) && "Type width must be power of 2");
236 if (ST
->hasPopulationCount() && TyWidth
<= 64)
237 return TTI::PSK_FastHardware
;
238 return TTI::PSK_Software
;
241 void SystemZTTIImpl::getUnrollingPreferences(Loop
*L
, ScalarEvolution
&SE
,
242 TTI::UnrollingPreferences
&UP
) {
243 // Find out if L contains a call, what the machine instruction count
244 // estimate is, and how many stores there are.
245 bool HasCall
= false;
246 unsigned NumStores
= 0;
247 for (auto &BB
: L
->blocks())
248 for (auto &I
: *BB
) {
249 if (isa
<CallInst
>(&I
) || isa
<InvokeInst
>(&I
)) {
250 ImmutableCallSite
CS(&I
);
251 if (const Function
*F
= CS
.getCalledFunction()) {
252 if (isLoweredToCall(F
))
254 if (F
->getIntrinsicID() == Intrinsic::memcpy
||
255 F
->getIntrinsicID() == Intrinsic::memset
)
257 } else { // indirect call.
261 if (isa
<StoreInst
>(&I
)) {
262 Type
*MemAccessTy
= I
.getOperand(0)->getType();
263 NumStores
+= getMemoryOpCost(Instruction::Store
, MemAccessTy
, 0, 0);
267 // The z13 processor will run out of store tags if too many stores
268 // are fed into it too quickly. Therefore make sure there are not
269 // too many stores in the resulting unrolled loop.
270 unsigned const Max
= (NumStores
? (12 / NumStores
) : UINT_MAX
);
273 // Only allow full unrolling if loop has any calls.
274 UP
.FullUnrollMaxCount
= Max
;
280 if (UP
.MaxCount
<= 1)
283 // Allow partial and runtime trip count unrolling.
284 UP
.Partial
= UP
.Runtime
= true;
286 UP
.PartialThreshold
= 75;
287 UP
.DefaultUnrollRuntimeCount
= 4;
289 // Allow expensive instructions in the pre-header of the loop.
290 UP
.AllowExpensiveTripCount
= true;
296 bool SystemZTTIImpl::isLSRCostLess(TargetTransformInfo::LSRCost
&C1
,
297 TargetTransformInfo::LSRCost
&C2
) {
298 // SystemZ specific: check instruction count (first), and don't care about
299 // ImmCost, since offsets are checked explicitly.
300 return std::tie(C1
.Insns
, C1
.NumRegs
, C1
.AddRecCost
,
301 C1
.NumIVMuls
, C1
.NumBaseAdds
,
302 C1
.ScaleCost
, C1
.SetupCost
) <
303 std::tie(C2
.Insns
, C2
.NumRegs
, C2
.AddRecCost
,
304 C2
.NumIVMuls
, C2
.NumBaseAdds
,
305 C2
.ScaleCost
, C2
.SetupCost
);
308 unsigned SystemZTTIImpl::getNumberOfRegisters(bool Vector
) {
310 // Discount the stack pointer. Also leave out %r0, since it can't
311 // be used in an address.
318 unsigned SystemZTTIImpl::getRegisterBitWidth(bool Vector
) const {
326 bool SystemZTTIImpl::hasDivRemOp(Type
*DataType
, bool IsSigned
) {
327 EVT VT
= TLI
->getValueType(DL
, DataType
);
328 return (VT
.isScalarInteger() && TLI
->isTypeLegal(VT
));
331 // Return the bit size for the scalar type or vector element
332 // type. getScalarSizeInBits() returns 0 for a pointer type.
333 static unsigned getScalarSizeInBits(Type
*Ty
) {
335 (Ty
->isPtrOrPtrVectorTy() ? 64U : Ty
->getScalarSizeInBits());
336 assert(Size
> 0 && "Element must have non-zero size.");
340 // getNumberOfParts() calls getTypeLegalizationCost() which splits the vector
341 // type until it is legal. This would e.g. return 4 for <6 x i64>, instead of
343 static unsigned getNumVectorRegs(Type
*Ty
) {
344 assert(Ty
->isVectorTy() && "Expected vector type");
345 unsigned WideBits
= getScalarSizeInBits(Ty
) * Ty
->getVectorNumElements();
346 assert(WideBits
> 0 && "Could not compute size of vector");
347 return ((WideBits
% 128U) ? ((WideBits
/ 128U) + 1) : (WideBits
/ 128U));
350 int SystemZTTIImpl::getArithmeticInstrCost(
351 unsigned Opcode
, Type
*Ty
,
352 TTI::OperandValueKind Op1Info
, TTI::OperandValueKind Op2Info
,
353 TTI::OperandValueProperties Opd1PropInfo
,
354 TTI::OperandValueProperties Opd2PropInfo
,
355 ArrayRef
<const Value
*> Args
) {
357 // TODO: return a good value for BB-VECTORIZER that includes the
358 // immediate loads, which we do not want to count for the loop
359 // vectorizer, since they are hopefully hoisted out of the loop. This
360 // would require a new parameter 'InLoop', but not sure if constant
361 // args are common enough to motivate this.
363 unsigned ScalarBits
= Ty
->getScalarSizeInBits();
365 // Div with a constant which is a power of 2 will be converted by
366 // DAGCombiner to use shifts. With vector shift-element instructions, a
367 // vector sdiv costs about as much as a scalar one.
368 const unsigned SDivCostEstimate
= 4;
369 bool SDivPow2
= false;
370 bool UDivPow2
= false;
371 if ((Opcode
== Instruction::SDiv
|| Opcode
== Instruction::UDiv
) &&
373 const ConstantInt
*CI
= nullptr;
374 if (const Constant
*C
= dyn_cast
<Constant
>(Args
[1])) {
375 if (C
->getType()->isVectorTy())
376 CI
= dyn_cast_or_null
<const ConstantInt
>(C
->getSplatValue());
378 CI
= dyn_cast
<const ConstantInt
>(C
);
381 (CI
->getValue().isPowerOf2() || (-CI
->getValue()).isPowerOf2())) {
382 if (Opcode
== Instruction::SDiv
)
389 if (Ty
->isVectorTy()) {
390 assert (ST
->hasVector() && "getArithmeticInstrCost() called with vector type.");
391 unsigned VF
= Ty
->getVectorNumElements();
392 unsigned NumVectors
= getNumVectorRegs(Ty
);
394 // These vector operations are custom handled, but are still supported
395 // with one instruction per vector, regardless of element size.
396 if (Opcode
== Instruction::Shl
|| Opcode
== Instruction::LShr
||
397 Opcode
== Instruction::AShr
|| UDivPow2
) {
402 return (NumVectors
* SDivCostEstimate
);
404 // Temporary hack: disable high vectorization factors with integer
405 // division/remainder, which will get scalarized and handled with GR128
406 // registers. The mischeduler is not clever enough to avoid spilling yet.
407 if ((Opcode
== Instruction::UDiv
|| Opcode
== Instruction::SDiv
||
408 Opcode
== Instruction::URem
|| Opcode
== Instruction::SRem
) && VF
> 4)
411 // These FP operations are supported with a single vector instruction for
412 // double (base implementation assumes float generally costs 2). For
413 // FP128, the scalar cost is 1, and there is no overhead since the values
414 // are already in scalar registers.
415 if (Opcode
== Instruction::FAdd
|| Opcode
== Instruction::FSub
||
416 Opcode
== Instruction::FMul
|| Opcode
== Instruction::FDiv
) {
417 switch (ScalarBits
) {
419 // The vector enhancements facility 1 provides v4f32 instructions.
420 if (ST
->hasVectorEnhancements1())
422 // Return the cost of multiple scalar invocation plus the cost of
423 // inserting and extracting the values.
424 unsigned ScalarCost
= getArithmeticInstrCost(Opcode
, Ty
->getScalarType());
425 unsigned Cost
= (VF
* ScalarCost
) + getScalarizationOverhead(Ty
, Args
);
426 // FIXME: VF 2 for these FP operations are currently just as
427 // expensive as for VF 4.
440 // There is no native support for FRem.
441 if (Opcode
== Instruction::FRem
) {
442 unsigned Cost
= (VF
* LIBCALL_COST
) + getScalarizationOverhead(Ty
, Args
);
443 // FIXME: VF 2 for float is currently just as expensive as for VF 4.
444 if (VF
== 2 && ScalarBits
== 32)
450 // These FP operations are supported with a dedicated instruction for
451 // float, double and fp128 (base implementation assumes float generally
453 if (Opcode
== Instruction::FAdd
|| Opcode
== Instruction::FSub
||
454 Opcode
== Instruction::FMul
|| Opcode
== Instruction::FDiv
)
457 // There is no native support for FRem.
458 if (Opcode
== Instruction::FRem
)
461 if (Opcode
== Instruction::LShr
|| Opcode
== Instruction::AShr
)
462 return (ScalarBits
>= 32 ? 1 : 2 /*ext*/);
464 // Or requires one instruction, although it has custom handling for i64.
465 if (Opcode
== Instruction::Or
)
468 if (Opcode
== Instruction::Xor
&& ScalarBits
== 1) {
469 if (ST
->hasLoadStoreOnCond2())
470 return 5; // 2 * (li 0; loc 1); xor
471 return 7; // 2 * ipm sequences ; xor ; shift ; compare
477 return SDivCostEstimate
;
479 // An extra extension for narrow types is needed.
480 if ((Opcode
== Instruction::SDiv
|| Opcode
== Instruction::SRem
))
481 // sext of op(s) for narrow types
482 return (ScalarBits
< 32 ? 4 : (ScalarBits
== 32 ? 2 : 1));
484 if (Opcode
== Instruction::UDiv
|| Opcode
== Instruction::URem
)
485 // Clearing of low 64 bit reg + sext of op(s) for narrow types + dl[g]r
486 return (ScalarBits
< 32 ? 4 : 2);
489 // Fallback to the default implementation.
490 return BaseT::getArithmeticInstrCost(Opcode
, Ty
, Op1Info
, Op2Info
,
491 Opd1PropInfo
, Opd2PropInfo
, Args
);
494 int SystemZTTIImpl::getShuffleCost(TTI::ShuffleKind Kind
, Type
*Tp
, int Index
,
496 assert (Tp
->isVectorTy());
497 assert (ST
->hasVector() && "getShuffleCost() called.");
498 unsigned NumVectors
= getNumVectorRegs(Tp
);
500 // TODO: Since fp32 is expanded, the shuffle cost should always be 0.
502 // FP128 values are always in scalar registers, so there is no work
503 // involved with a shuffle, except for broadcast. In that case register
504 // moves are done with a single instruction per element.
505 if (Tp
->getScalarType()->isFP128Ty())
506 return (Kind
== TargetTransformInfo::SK_Broadcast
? NumVectors
- 1 : 0);
509 case TargetTransformInfo::SK_ExtractSubvector
:
510 // ExtractSubvector Index indicates start offset.
512 // Extracting a subvector from first index is a noop.
513 return (Index
== 0 ? 0 : NumVectors
);
515 case TargetTransformInfo::SK_Broadcast
:
516 // Loop vectorizer calls here to figure out the extra cost of
517 // broadcasting a loaded value to all elements of a vector. Since vlrep
518 // loads and replicates with a single instruction, adjust the returned
520 return NumVectors
- 1;
524 // SystemZ supports single instruction permutation / replication.
528 return BaseT::getShuffleCost(Kind
, Tp
, Index
, SubTp
);
531 // Return the log2 difference of the element sizes of the two vector types.
532 static unsigned getElSizeLog2Diff(Type
*Ty0
, Type
*Ty1
) {
533 unsigned Bits0
= Ty0
->getScalarSizeInBits();
534 unsigned Bits1
= Ty1
->getScalarSizeInBits();
537 return (Log2_32(Bits1
) - Log2_32(Bits0
));
539 return (Log2_32(Bits0
) - Log2_32(Bits1
));
542 // Return the number of instructions needed to truncate SrcTy to DstTy.
543 unsigned SystemZTTIImpl::
544 getVectorTruncCost(Type
*SrcTy
, Type
*DstTy
) {
545 assert (SrcTy
->isVectorTy() && DstTy
->isVectorTy());
546 assert (SrcTy
->getPrimitiveSizeInBits() > DstTy
->getPrimitiveSizeInBits() &&
547 "Packing must reduce size of vector type.");
548 assert (SrcTy
->getVectorNumElements() == DstTy
->getVectorNumElements() &&
549 "Packing should not change number of elements.");
551 // TODO: Since fp32 is expanded, the extract cost should always be 0.
553 unsigned NumParts
= getNumVectorRegs(SrcTy
);
555 // Up to 2 vector registers can be truncated efficiently with pack or
556 // permute. The latter requires an immediate mask to be loaded, which
557 // typically gets hoisted out of a loop. TODO: return a good value for
558 // BB-VECTORIZER that includes the immediate loads, which we do not want
559 // to count for the loop vectorizer.
563 unsigned Log2Diff
= getElSizeLog2Diff(SrcTy
, DstTy
);
564 unsigned VF
= SrcTy
->getVectorNumElements();
565 for (unsigned P
= 0; P
< Log2Diff
; ++P
) {
571 // Currently, a general mix of permutes and pack instructions is output by
572 // isel, which follow the cost computation above except for this case which
573 // is one instruction less:
574 if (VF
== 8 && SrcTy
->getScalarSizeInBits() == 64 &&
575 DstTy
->getScalarSizeInBits() == 8)
581 // Return the cost of converting a vector bitmask produced by a compare
582 // (SrcTy), to the type of the select or extend instruction (DstTy).
583 unsigned SystemZTTIImpl::
584 getVectorBitmaskConversionCost(Type
*SrcTy
, Type
*DstTy
) {
585 assert (SrcTy
->isVectorTy() && DstTy
->isVectorTy() &&
586 "Should only be called with vector types.");
588 unsigned PackCost
= 0;
589 unsigned SrcScalarBits
= SrcTy
->getScalarSizeInBits();
590 unsigned DstScalarBits
= DstTy
->getScalarSizeInBits();
591 unsigned Log2Diff
= getElSizeLog2Diff(SrcTy
, DstTy
);
592 if (SrcScalarBits
> DstScalarBits
)
593 // The bitmask will be truncated.
594 PackCost
= getVectorTruncCost(SrcTy
, DstTy
);
595 else if (SrcScalarBits
< DstScalarBits
) {
596 unsigned DstNumParts
= getNumVectorRegs(DstTy
);
597 // Each vector select needs its part of the bitmask unpacked.
598 PackCost
= Log2Diff
* DstNumParts
;
599 // Extra cost for moving part of mask before unpacking.
600 PackCost
+= DstNumParts
- 1;
606 // Return the type of the compared operands. This is needed to compute the
607 // cost for a Select / ZExt or SExt instruction.
608 static Type
*getCmpOpsType(const Instruction
*I
, unsigned VF
= 1) {
609 Type
*OpTy
= nullptr;
610 if (CmpInst
*CI
= dyn_cast
<CmpInst
>(I
->getOperand(0)))
611 OpTy
= CI
->getOperand(0)->getType();
612 else if (Instruction
*LogicI
= dyn_cast
<Instruction
>(I
->getOperand(0)))
613 if (LogicI
->getNumOperands() == 2)
614 if (CmpInst
*CI0
= dyn_cast
<CmpInst
>(LogicI
->getOperand(0)))
615 if (isa
<CmpInst
>(LogicI
->getOperand(1)))
616 OpTy
= CI0
->getOperand(0)->getType();
618 if (OpTy
!= nullptr) {
620 assert (!OpTy
->isVectorTy() && "Expected scalar type");
623 // Return the potentially vectorized type based on 'I' and 'VF'. 'I' may
624 // be either scalar or already vectorized with a same or lesser VF.
625 Type
*ElTy
= OpTy
->getScalarType();
626 return VectorType::get(ElTy
, VF
);
632 int SystemZTTIImpl::getCastInstrCost(unsigned Opcode
, Type
*Dst
, Type
*Src
,
633 const Instruction
*I
) {
634 unsigned DstScalarBits
= Dst
->getScalarSizeInBits();
635 unsigned SrcScalarBits
= Src
->getScalarSizeInBits();
637 if (Src
->isVectorTy()) {
638 assert (ST
->hasVector() && "getCastInstrCost() called with vector type.");
639 assert (Dst
->isVectorTy());
640 unsigned VF
= Src
->getVectorNumElements();
641 unsigned NumDstVectors
= getNumVectorRegs(Dst
);
642 unsigned NumSrcVectors
= getNumVectorRegs(Src
);
644 if (Opcode
== Instruction::Trunc
) {
645 if (Src
->getScalarSizeInBits() == Dst
->getScalarSizeInBits())
646 return 0; // Check for NOOP conversions.
647 return getVectorTruncCost(Src
, Dst
);
650 if (Opcode
== Instruction::ZExt
|| Opcode
== Instruction::SExt
) {
651 if (SrcScalarBits
>= 8) {
652 // ZExt/SExt will be handled with one unpack per doubling of width.
653 unsigned NumUnpacks
= getElSizeLog2Diff(Src
, Dst
);
655 // For types that spans multiple vector registers, some additional
656 // instructions are used to setup the unpacking.
657 unsigned NumSrcVectorOps
=
658 (NumUnpacks
> 1 ? (NumDstVectors
- NumSrcVectors
)
659 : (NumDstVectors
/ 2));
661 return (NumUnpacks
* NumDstVectors
) + NumSrcVectorOps
;
663 else if (SrcScalarBits
== 1) {
664 // This should be extension of a compare i1 result.
665 // If we know what the widths of the compared operands, get the
666 // cost of converting it to Dst. Otherwise assume same widths.
668 Type
*CmpOpTy
= ((I
!= nullptr) ? getCmpOpsType(I
, VF
) : nullptr);
669 if (CmpOpTy
!= nullptr)
670 Cost
= getVectorBitmaskConversionCost(CmpOpTy
, Dst
);
671 if (Opcode
== Instruction::ZExt
)
672 // One 'vn' per dst vector with an immediate mask.
673 Cost
+= NumDstVectors
;
678 if (Opcode
== Instruction::SIToFP
|| Opcode
== Instruction::UIToFP
||
679 Opcode
== Instruction::FPToSI
|| Opcode
== Instruction::FPToUI
) {
680 // TODO: Fix base implementation which could simplify things a bit here
681 // (seems to miss on differentiating on scalar/vector types).
683 // Only 64 bit vector conversions are natively supported.
684 if (SrcScalarBits
== 64 && DstScalarBits
== 64)
685 return NumDstVectors
;
687 // Return the cost of multiple scalar invocation plus the cost of
688 // inserting and extracting the values. Base implementation does not
689 // realize float->int gets scalarized.
690 unsigned ScalarCost
= getCastInstrCost(Opcode
, Dst
->getScalarType(),
691 Src
->getScalarType());
692 unsigned TotCost
= VF
* ScalarCost
;
693 bool NeedsInserts
= true, NeedsExtracts
= true;
694 // FP128 registers do not get inserted or extracted.
695 if (DstScalarBits
== 128 &&
696 (Opcode
== Instruction::SIToFP
|| Opcode
== Instruction::UIToFP
))
697 NeedsInserts
= false;
698 if (SrcScalarBits
== 128 &&
699 (Opcode
== Instruction::FPToSI
|| Opcode
== Instruction::FPToUI
))
700 NeedsExtracts
= false;
702 TotCost
+= getScalarizationOverhead(Dst
, NeedsInserts
, NeedsExtracts
);
704 // FIXME: VF 2 for float<->i32 is currently just as expensive as for VF 4.
705 if (VF
== 2 && SrcScalarBits
== 32 && DstScalarBits
== 32)
711 if (Opcode
== Instruction::FPTrunc
) {
712 if (SrcScalarBits
== 128) // fp128 -> double/float + inserts of elements.
713 return VF
/*ldxbr/lexbr*/ + getScalarizationOverhead(Dst
, true, false);
714 else // double -> float
715 return VF
/ 2 /*vledb*/ + std::max(1U, VF
/ 4 /*vperm*/);
718 if (Opcode
== Instruction::FPExt
) {
719 if (SrcScalarBits
== 32 && DstScalarBits
== 64) {
720 // float -> double is very rare and currently unoptimized. Instead of
721 // using vldeb, which can do two at a time, all conversions are
725 // -> fp128. VF * lxdb/lxeb + extraction of elements.
726 return VF
+ getScalarizationOverhead(Src
, false, true);
730 assert (!Dst
->isVectorTy());
732 if (Opcode
== Instruction::SIToFP
|| Opcode
== Instruction::UIToFP
)
733 return (SrcScalarBits
>= 32 ? 1 : 2 /*i8/i16 extend*/);
735 if ((Opcode
== Instruction::ZExt
|| Opcode
== Instruction::SExt
) &&
736 Src
->isIntegerTy(1)) {
737 if (ST
->hasLoadStoreOnCond2())
738 return 2; // li 0; loc 1
740 // This should be extension of a compare i1 result, which is done with
741 // ipm and a varying sequence of instructions.
743 if (Opcode
== Instruction::SExt
)
744 Cost
= (DstScalarBits
< 64 ? 3 : 4);
745 if (Opcode
== Instruction::ZExt
)
747 Type
*CmpOpTy
= ((I
!= nullptr) ? getCmpOpsType(I
) : nullptr);
748 if (CmpOpTy
!= nullptr && CmpOpTy
->isFloatingPointTy())
749 // If operands of an fp-type was compared, this costs +1.
755 return BaseT::getCastInstrCost(Opcode
, Dst
, Src
, I
);
758 int SystemZTTIImpl::getCmpSelInstrCost(unsigned Opcode
, Type
*ValTy
, Type
*CondTy
,
759 const Instruction
*I
) {
760 if (ValTy
->isVectorTy()) {
761 assert (ST
->hasVector() && "getCmpSelInstrCost() called with vector type.");
762 unsigned VF
= ValTy
->getVectorNumElements();
764 // Called with a compare instruction.
765 if (Opcode
== Instruction::ICmp
|| Opcode
== Instruction::FCmp
) {
766 unsigned PredicateExtraCost
= 0;
768 // Some predicates cost one or two extra instructions.
769 switch (cast
<CmpInst
>(I
)->getPredicate()) {
770 case CmpInst::Predicate::ICMP_NE
:
771 case CmpInst::Predicate::ICMP_UGE
:
772 case CmpInst::Predicate::ICMP_ULE
:
773 case CmpInst::Predicate::ICMP_SGE
:
774 case CmpInst::Predicate::ICMP_SLE
:
775 PredicateExtraCost
= 1;
777 case CmpInst::Predicate::FCMP_ONE
:
778 case CmpInst::Predicate::FCMP_ORD
:
779 case CmpInst::Predicate::FCMP_UEQ
:
780 case CmpInst::Predicate::FCMP_UNO
:
781 PredicateExtraCost
= 2;
788 // Float is handled with 2*vmr[lh]f + 2*vldeb + vfchdb for each pair of
789 // floats. FIXME: <2 x float> generates same code as <4 x float>.
790 unsigned CmpCostPerVector
= (ValTy
->getScalarType()->isFloatTy() ? 10 : 1);
791 unsigned NumVecs_cmp
= getNumVectorRegs(ValTy
);
793 unsigned Cost
= (NumVecs_cmp
* (CmpCostPerVector
+ PredicateExtraCost
));
796 else { // Called with a select instruction.
797 assert (Opcode
== Instruction::Select
);
799 // We can figure out the extra cost of packing / unpacking if the
800 // instruction was passed and the compare instruction is found.
801 unsigned PackCost
= 0;
802 Type
*CmpOpTy
= ((I
!= nullptr) ? getCmpOpsType(I
, VF
) : nullptr);
803 if (CmpOpTy
!= nullptr)
805 getVectorBitmaskConversionCost(CmpOpTy
, ValTy
);
807 return getNumVectorRegs(ValTy
) /*vsel*/ + PackCost
;
812 case Instruction::ICmp
: {
814 if (ValTy
->isIntegerTy() && ValTy
->getScalarSizeInBits() <= 16)
815 Cost
+= 2; // extend both operands
818 case Instruction::Select
:
819 if (ValTy
->isFloatingPointTy())
820 return 4; // No load on condition for FP, so this costs a conditional jump.
821 return 1; // Load On Condition.
825 return BaseT::getCmpSelInstrCost(Opcode
, ValTy
, CondTy
, nullptr);
829 getVectorInstrCost(unsigned Opcode
, Type
*Val
, unsigned Index
) {
830 // vlvgp will insert two grs into a vector register, so only count half the
831 // number of instructions.
832 if (Opcode
== Instruction::InsertElement
&& Val
->isIntOrIntVectorTy(64))
833 return ((Index
% 2 == 0) ? 1 : 0);
835 if (Opcode
== Instruction::ExtractElement
) {
836 int Cost
= ((getScalarSizeInBits(Val
) == 1) ? 2 /*+test-under-mask*/ : 1);
838 // Give a slight penalty for moving out of vector pipeline to FXU unit.
839 if (Index
== 0 && Val
->isIntOrIntVectorTy())
845 return BaseT::getVectorInstrCost(Opcode
, Val
, Index
);
848 int SystemZTTIImpl::getMemoryOpCost(unsigned Opcode
, Type
*Src
,
849 unsigned Alignment
, unsigned AddressSpace
,
850 const Instruction
*I
) {
851 assert(!Src
->isVoidTy() && "Invalid type");
853 if (!Src
->isVectorTy() && Opcode
== Instruction::Load
&&
854 I
!= nullptr && I
->hasOneUse()) {
855 const Instruction
*UserI
= cast
<Instruction
>(*I
->user_begin());
856 unsigned Bits
= getScalarSizeInBits(Src
);
857 bool FoldsLoad
= false;
858 switch (UserI
->getOpcode()) {
859 case Instruction::ICmp
:
860 case Instruction::Add
:
861 case Instruction::Sub
:
862 case Instruction::Mul
:
863 case Instruction::SDiv
:
864 case Instruction::UDiv
:
865 case Instruction::And
:
866 case Instruction::Or
:
867 case Instruction::Xor
:
868 // This also makes sense for float operations, but disabled for now due
870 // case Instruction::FCmp:
871 // case Instruction::FAdd:
872 // case Instruction::FSub:
873 // case Instruction::FMul:
874 // case Instruction::FDiv:
875 FoldsLoad
= (Bits
== 32 || Bits
== 64);
880 assert (UserI
->getNumOperands() == 2 &&
881 "Expected to only handle binops.");
883 // UserI can't fold two loads, so in that case return 0 cost only
885 for (unsigned i
= 0; i
< 2; ++i
) {
886 if (UserI
->getOperand(i
) == I
)
888 if (LoadInst
*LI
= dyn_cast
<LoadInst
>(UserI
->getOperand(i
))) {
899 (Src
->isVectorTy() ? getNumVectorRegs(Src
) : getNumberOfParts(Src
));
901 if (Src
->getScalarSizeInBits() == 128)
902 // 128 bit scalars are held in a pair of two 64 bit registers.
908 int SystemZTTIImpl::getInterleavedMemoryOpCost(unsigned Opcode
, Type
*VecTy
,
910 ArrayRef
<unsigned> Indices
,
912 unsigned AddressSpace
,
915 return BaseT::getInterleavedMemoryOpCost(Opcode
, VecTy
, Factor
, Indices
,
916 Alignment
, AddressSpace
, IsMasked
);
917 assert(isa
<VectorType
>(VecTy
) &&
918 "Expect a vector type for interleaved memory op");
920 int NumWideParts
= getNumVectorRegs(VecTy
);
922 // How many source vectors are handled to produce a vectorized operand?
923 int NumElsPerVector
= (VecTy
->getVectorNumElements() / NumWideParts
);
925 ((NumWideParts
> NumElsPerVector
) ? NumElsPerVector
: NumWideParts
);
927 // A Load group may have gaps.
928 unsigned NumOperands
=
929 ((Opcode
== Instruction::Load
) ? Indices
.size() : Factor
);
931 // Each needed permute takes two vectors as input.
934 int NumPermutes
= NumSrcParts
* NumOperands
;
936 // Cost of load/store operations and the permutations needed.
937 return NumWideParts
+ NumPermutes
;