1 //===-- SystemZTargetTransformInfo.cpp - SystemZ-specific TTI -------------===//
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
7 //===----------------------------------------------------------------------===//
9 // This file implements a TargetTransformInfo analysis pass specific to the
10 // SystemZ target machine. It uses the target's detailed information to provide
11 // more precise answers to certain TTI queries, while letting the target
12 // independent and default TTI implementations handle the rest.
14 //===----------------------------------------------------------------------===//
16 #include "SystemZTargetTransformInfo.h"
17 #include "llvm/Analysis/TargetTransformInfo.h"
18 #include "llvm/CodeGen/BasicTTIImpl.h"
19 #include "llvm/CodeGen/CostTable.h"
20 #include "llvm/CodeGen/TargetLowering.h"
21 #include "llvm/IR/IntrinsicInst.h"
22 #include "llvm/Support/Debug.h"
25 #define DEBUG_TYPE "systemztti"
27 //===----------------------------------------------------------------------===//
29 // SystemZ cost model.
31 //===----------------------------------------------------------------------===//
33 int SystemZTTIImpl::getIntImmCost(const APInt
&Imm
, Type
*Ty
) {
34 assert(Ty
->isIntegerTy());
36 unsigned BitSize
= Ty
->getPrimitiveSizeInBits();
37 // There is no cost model for constants with a bit size of 0. Return TCC_Free
38 // here, so that constant hoisting will ignore this constant.
41 // No cost model for operations on integers larger than 64 bit implemented yet.
48 if (Imm
.getBitWidth() <= 64) {
49 // Constants loaded via lgfi.
50 if (isInt
<32>(Imm
.getSExtValue()))
51 return TTI::TCC_Basic
;
52 // Constants loaded via llilf.
53 if (isUInt
<32>(Imm
.getZExtValue()))
54 return TTI::TCC_Basic
;
55 // Constants loaded via llihf:
56 if ((Imm
.getZExtValue() & 0xffffffff) == 0)
57 return TTI::TCC_Basic
;
59 return 2 * TTI::TCC_Basic
;
62 return 4 * TTI::TCC_Basic
;
65 int SystemZTTIImpl::getIntImmCost(unsigned Opcode
, unsigned Idx
,
66 const APInt
&Imm
, Type
*Ty
) {
67 assert(Ty
->isIntegerTy());
69 unsigned BitSize
= Ty
->getPrimitiveSizeInBits();
70 // There is no cost model for constants with a bit size of 0. Return TCC_Free
71 // here, so that constant hoisting will ignore this constant.
74 // No cost model for operations on integers larger than 64 bit implemented yet.
81 case Instruction::GetElementPtr
:
82 // Always hoist the base address of a GetElementPtr. This prevents the
83 // creation of new constants for every base constant that gets constant
84 // folded with the offset.
86 return 2 * TTI::TCC_Basic
;
88 case Instruction::Store
:
89 if (Idx
== 0 && Imm
.getBitWidth() <= 64) {
90 // Any 8-bit immediate store can by implemented via mvi.
93 // 16-bit immediate values can be stored via mvhhi/mvhi/mvghi.
94 if (isInt
<16>(Imm
.getSExtValue()))
98 case Instruction::ICmp
:
99 if (Idx
== 1 && Imm
.getBitWidth() <= 64) {
100 // Comparisons against signed 32-bit immediates implemented via cgfi.
101 if (isInt
<32>(Imm
.getSExtValue()))
102 return TTI::TCC_Free
;
103 // Comparisons against unsigned 32-bit immediates implemented via clgfi.
104 if (isUInt
<32>(Imm
.getZExtValue()))
105 return TTI::TCC_Free
;
108 case Instruction::Add
:
109 case Instruction::Sub
:
110 if (Idx
== 1 && Imm
.getBitWidth() <= 64) {
111 // We use algfi/slgfi to add/subtract 32-bit unsigned immediates.
112 if (isUInt
<32>(Imm
.getZExtValue()))
113 return TTI::TCC_Free
;
114 // Or their negation, by swapping addition vs. subtraction.
115 if (isUInt
<32>(-Imm
.getSExtValue()))
116 return TTI::TCC_Free
;
119 case Instruction::Mul
:
120 if (Idx
== 1 && Imm
.getBitWidth() <= 64) {
121 // We use msgfi to multiply by 32-bit signed immediates.
122 if (isInt
<32>(Imm
.getSExtValue()))
123 return TTI::TCC_Free
;
126 case Instruction::Or
:
127 case Instruction::Xor
:
128 if (Idx
== 1 && Imm
.getBitWidth() <= 64) {
129 // Masks supported by oilf/xilf.
130 if (isUInt
<32>(Imm
.getZExtValue()))
131 return TTI::TCC_Free
;
132 // Masks supported by oihf/xihf.
133 if ((Imm
.getZExtValue() & 0xffffffff) == 0)
134 return TTI::TCC_Free
;
137 case Instruction::And
:
138 if (Idx
== 1 && Imm
.getBitWidth() <= 64) {
139 // Any 32-bit AND operation can by implemented via nilf.
141 return TTI::TCC_Free
;
142 // 64-bit masks supported by nilf.
143 if (isUInt
<32>(~Imm
.getZExtValue()))
144 return TTI::TCC_Free
;
145 // 64-bit masks supported by nilh.
146 if ((Imm
.getZExtValue() & 0xffffffff) == 0xffffffff)
147 return TTI::TCC_Free
;
148 // Some 64-bit AND operations can be implemented via risbg.
149 const SystemZInstrInfo
*TII
= ST
->getInstrInfo();
151 if (TII
->isRxSBGMask(Imm
.getZExtValue(), BitSize
, Start
, End
))
152 return TTI::TCC_Free
;
155 case Instruction::Shl
:
156 case Instruction::LShr
:
157 case Instruction::AShr
:
158 // Always return TCC_Free for the shift value of a shift instruction.
160 return TTI::TCC_Free
;
162 case Instruction::UDiv
:
163 case Instruction::SDiv
:
164 case Instruction::URem
:
165 case Instruction::SRem
:
166 case Instruction::Trunc
:
167 case Instruction::ZExt
:
168 case Instruction::SExt
:
169 case Instruction::IntToPtr
:
170 case Instruction::PtrToInt
:
171 case Instruction::BitCast
:
172 case Instruction::PHI
:
173 case Instruction::Call
:
174 case Instruction::Select
:
175 case Instruction::Ret
:
176 case Instruction::Load
:
180 return SystemZTTIImpl::getIntImmCost(Imm
, Ty
);
183 int SystemZTTIImpl::getIntImmCost(Intrinsic::ID IID
, unsigned Idx
,
184 const APInt
&Imm
, Type
*Ty
) {
185 assert(Ty
->isIntegerTy());
187 unsigned BitSize
= Ty
->getPrimitiveSizeInBits();
188 // There is no cost model for constants with a bit size of 0. Return TCC_Free
189 // here, so that constant hoisting will ignore this constant.
191 return TTI::TCC_Free
;
192 // No cost model for operations on integers larger than 64 bit implemented yet.
194 return TTI::TCC_Free
;
198 return TTI::TCC_Free
;
199 case Intrinsic::sadd_with_overflow
:
200 case Intrinsic::uadd_with_overflow
:
201 case Intrinsic::ssub_with_overflow
:
202 case Intrinsic::usub_with_overflow
:
203 // These get expanded to include a normal addition/subtraction.
204 if (Idx
== 1 && Imm
.getBitWidth() <= 64) {
205 if (isUInt
<32>(Imm
.getZExtValue()))
206 return TTI::TCC_Free
;
207 if (isUInt
<32>(-Imm
.getSExtValue()))
208 return TTI::TCC_Free
;
211 case Intrinsic::smul_with_overflow
:
212 case Intrinsic::umul_with_overflow
:
213 // These get expanded to include a normal multiplication.
214 if (Idx
== 1 && Imm
.getBitWidth() <= 64) {
215 if (isInt
<32>(Imm
.getSExtValue()))
216 return TTI::TCC_Free
;
219 case Intrinsic::experimental_stackmap
:
220 if ((Idx
< 2) || (Imm
.getBitWidth() <= 64 && isInt
<64>(Imm
.getSExtValue())))
221 return TTI::TCC_Free
;
223 case Intrinsic::experimental_patchpoint_void
:
224 case Intrinsic::experimental_patchpoint_i64
:
225 if ((Idx
< 4) || (Imm
.getBitWidth() <= 64 && isInt
<64>(Imm
.getSExtValue())))
226 return TTI::TCC_Free
;
229 return SystemZTTIImpl::getIntImmCost(Imm
, Ty
);
232 TargetTransformInfo::PopcntSupportKind
233 SystemZTTIImpl::getPopcntSupport(unsigned TyWidth
) {
234 assert(isPowerOf2_32(TyWidth
) && "Type width must be power of 2");
235 if (ST
->hasPopulationCount() && TyWidth
<= 64)
236 return TTI::PSK_FastHardware
;
237 return TTI::PSK_Software
;
240 void SystemZTTIImpl::getUnrollingPreferences(Loop
*L
, ScalarEvolution
&SE
,
241 TTI::UnrollingPreferences
&UP
) {
242 // Find out if L contains a call, what the machine instruction count
243 // estimate is, and how many stores there are.
244 bool HasCall
= false;
245 unsigned NumStores
= 0;
246 for (auto &BB
: L
->blocks())
247 for (auto &I
: *BB
) {
248 if (isa
<CallInst
>(&I
) || isa
<InvokeInst
>(&I
)) {
249 ImmutableCallSite
CS(&I
);
250 if (const Function
*F
= CS
.getCalledFunction()) {
251 if (isLoweredToCall(F
))
253 if (F
->getIntrinsicID() == Intrinsic::memcpy
||
254 F
->getIntrinsicID() == Intrinsic::memset
)
256 } else { // indirect call.
260 if (isa
<StoreInst
>(&I
)) {
261 Type
*MemAccessTy
= I
.getOperand(0)->getType();
262 NumStores
+= getMemoryOpCost(Instruction::Store
, MemAccessTy
, 0, 0);
266 // The z13 processor will run out of store tags if too many stores
267 // are fed into it too quickly. Therefore make sure there are not
268 // too many stores in the resulting unrolled loop.
269 unsigned const Max
= (NumStores
? (12 / NumStores
) : UINT_MAX
);
272 // Only allow full unrolling if loop has any calls.
273 UP
.FullUnrollMaxCount
= Max
;
279 if (UP
.MaxCount
<= 1)
282 // Allow partial and runtime trip count unrolling.
283 UP
.Partial
= UP
.Runtime
= true;
285 UP
.PartialThreshold
= 75;
286 UP
.DefaultUnrollRuntimeCount
= 4;
288 // Allow expensive instructions in the pre-header of the loop.
289 UP
.AllowExpensiveTripCount
= true;
295 bool SystemZTTIImpl::isLSRCostLess(TargetTransformInfo::LSRCost
&C1
,
296 TargetTransformInfo::LSRCost
&C2
) {
297 // SystemZ specific: check instruction count (first), and don't care about
298 // ImmCost, since offsets are checked explicitly.
299 return std::tie(C1
.Insns
, C1
.NumRegs
, C1
.AddRecCost
,
300 C1
.NumIVMuls
, C1
.NumBaseAdds
,
301 C1
.ScaleCost
, C1
.SetupCost
) <
302 std::tie(C2
.Insns
, C2
.NumRegs
, C2
.AddRecCost
,
303 C2
.NumIVMuls
, C2
.NumBaseAdds
,
304 C2
.ScaleCost
, C2
.SetupCost
);
307 unsigned SystemZTTIImpl::getNumberOfRegisters(unsigned ClassID
) const {
308 bool Vector
= (ClassID
== 1);
310 // Discount the stack pointer. Also leave out %r0, since it can't
311 // be used in an address.
318 unsigned SystemZTTIImpl::getRegisterBitWidth(bool Vector
) const {
326 bool SystemZTTIImpl::hasDivRemOp(Type
*DataType
, bool IsSigned
) {
327 EVT VT
= TLI
->getValueType(DL
, DataType
);
328 return (VT
.isScalarInteger() && TLI
->isTypeLegal(VT
));
331 // Return the bit size for the scalar type or vector element
332 // type. getScalarSizeInBits() returns 0 for a pointer type.
333 static unsigned getScalarSizeInBits(Type
*Ty
) {
335 (Ty
->isPtrOrPtrVectorTy() ? 64U : Ty
->getScalarSizeInBits());
336 assert(Size
> 0 && "Element must have non-zero size.");
340 // getNumberOfParts() calls getTypeLegalizationCost() which splits the vector
341 // type until it is legal. This would e.g. return 4 for <6 x i64>, instead of
343 static unsigned getNumVectorRegs(Type
*Ty
) {
344 assert(Ty
->isVectorTy() && "Expected vector type");
345 unsigned WideBits
= getScalarSizeInBits(Ty
) * Ty
->getVectorNumElements();
346 assert(WideBits
> 0 && "Could not compute size of vector");
347 return ((WideBits
% 128U) ? ((WideBits
/ 128U) + 1) : (WideBits
/ 128U));
350 int SystemZTTIImpl::getArithmeticInstrCost(
351 unsigned Opcode
, Type
*Ty
,
352 TTI::OperandValueKind Op1Info
, TTI::OperandValueKind Op2Info
,
353 TTI::OperandValueProperties Opd1PropInfo
,
354 TTI::OperandValueProperties Opd2PropInfo
,
355 ArrayRef
<const Value
*> Args
) {
357 // TODO: return a good value for BB-VECTORIZER that includes the
358 // immediate loads, which we do not want to count for the loop
359 // vectorizer, since they are hopefully hoisted out of the loop. This
360 // would require a new parameter 'InLoop', but not sure if constant
361 // args are common enough to motivate this.
363 unsigned ScalarBits
= Ty
->getScalarSizeInBits();
365 // There are thre cases of division and remainder: Dividing with a register
366 // needs a divide instruction. A divisor which is a power of two constant
367 // can be implemented with a sequence of shifts. Any other constant needs a
368 // multiply and shifts.
369 const unsigned DivInstrCost
= 20;
370 const unsigned DivMulSeqCost
= 10;
371 const unsigned SDivPow2Cost
= 4;
374 Opcode
== Instruction::SDiv
|| Opcode
== Instruction::SRem
;
375 bool UnsignedDivRem
=
376 Opcode
== Instruction::UDiv
|| Opcode
== Instruction::URem
;
378 // Check for a constant divisor.
379 bool DivRemConst
= false;
380 bool DivRemConstPow2
= false;
381 if ((SignedDivRem
|| UnsignedDivRem
) && Args
.size() == 2) {
382 if (const Constant
*C
= dyn_cast
<Constant
>(Args
[1])) {
383 const ConstantInt
*CVal
=
384 (C
->getType()->isVectorTy()
385 ? dyn_cast_or_null
<const ConstantInt
>(C
->getSplatValue())
386 : dyn_cast
<const ConstantInt
>(C
));
387 if (CVal
!= nullptr &&
388 (CVal
->getValue().isPowerOf2() || (-CVal
->getValue()).isPowerOf2()))
389 DivRemConstPow2
= true;
395 if (Ty
->isVectorTy()) {
396 assert(ST
->hasVector() &&
397 "getArithmeticInstrCost() called with vector type.");
398 unsigned VF
= Ty
->getVectorNumElements();
399 unsigned NumVectors
= getNumVectorRegs(Ty
);
401 // These vector operations are custom handled, but are still supported
402 // with one instruction per vector, regardless of element size.
403 if (Opcode
== Instruction::Shl
|| Opcode
== Instruction::LShr
||
404 Opcode
== Instruction::AShr
) {
409 return (NumVectors
* (SignedDivRem
? SDivPow2Cost
: 1));
411 return VF
* DivMulSeqCost
+ getScalarizationOverhead(Ty
, Args
);
412 if ((SignedDivRem
|| UnsignedDivRem
) && VF
> 4)
413 // Temporary hack: disable high vectorization factors with integer
414 // division/remainder, which will get scalarized and handled with
415 // GR128 registers. The mischeduler is not clever enough to avoid
419 // These FP operations are supported with a single vector instruction for
420 // double (base implementation assumes float generally costs 2). For
421 // FP128, the scalar cost is 1, and there is no overhead since the values
422 // are already in scalar registers.
423 if (Opcode
== Instruction::FAdd
|| Opcode
== Instruction::FSub
||
424 Opcode
== Instruction::FMul
|| Opcode
== Instruction::FDiv
) {
425 switch (ScalarBits
) {
427 // The vector enhancements facility 1 provides v4f32 instructions.
428 if (ST
->hasVectorEnhancements1())
430 // Return the cost of multiple scalar invocation plus the cost of
431 // inserting and extracting the values.
432 unsigned ScalarCost
=
433 getArithmeticInstrCost(Opcode
, Ty
->getScalarType());
434 unsigned Cost
= (VF
* ScalarCost
) + getScalarizationOverhead(Ty
, Args
);
435 // FIXME: VF 2 for these FP operations are currently just as
436 // expensive as for VF 4.
449 // There is no native support for FRem.
450 if (Opcode
== Instruction::FRem
) {
451 unsigned Cost
= (VF
* LIBCALL_COST
) + getScalarizationOverhead(Ty
, Args
);
452 // FIXME: VF 2 for float is currently just as expensive as for VF 4.
453 if (VF
== 2 && ScalarBits
== 32)
459 // These FP operations are supported with a dedicated instruction for
460 // float, double and fp128 (base implementation assumes float generally
462 if (Opcode
== Instruction::FAdd
|| Opcode
== Instruction::FSub
||
463 Opcode
== Instruction::FMul
|| Opcode
== Instruction::FDiv
)
466 // There is no native support for FRem.
467 if (Opcode
== Instruction::FRem
)
470 // Give discount for some combined logical operations if supported.
471 if (Args
.size() == 2 && ST
->hasMiscellaneousExtensions3()) {
472 if (Opcode
== Instruction::Xor
) {
473 for (const Value
*A
: Args
) {
474 if (const Instruction
*I
= dyn_cast
<Instruction
>(A
))
475 if (I
->hasOneUse() &&
476 (I
->getOpcode() == Instruction::And
||
477 I
->getOpcode() == Instruction::Or
||
478 I
->getOpcode() == Instruction::Xor
))
482 else if (Opcode
== Instruction::Or
|| Opcode
== Instruction::And
) {
483 for (const Value
*A
: Args
) {
484 if (const Instruction
*I
= dyn_cast
<Instruction
>(A
))
485 if (I
->hasOneUse() && I
->getOpcode() == Instruction::Xor
)
491 // Or requires one instruction, although it has custom handling for i64.
492 if (Opcode
== Instruction::Or
)
495 if (Opcode
== Instruction::Xor
&& ScalarBits
== 1) {
496 if (ST
->hasLoadStoreOnCond2())
497 return 5; // 2 * (li 0; loc 1); xor
498 return 7; // 2 * ipm sequences ; xor ; shift ; compare
502 return (SignedDivRem
? SDivPow2Cost
: 1);
504 return DivMulSeqCost
;
505 if (SignedDivRem
|| UnsignedDivRem
)
509 // Fallback to the default implementation.
510 return BaseT::getArithmeticInstrCost(Opcode
, Ty
, Op1Info
, Op2Info
,
511 Opd1PropInfo
, Opd2PropInfo
, Args
);
514 int SystemZTTIImpl::getShuffleCost(TTI::ShuffleKind Kind
, Type
*Tp
, int Index
,
516 assert (Tp
->isVectorTy());
517 assert (ST
->hasVector() && "getShuffleCost() called.");
518 unsigned NumVectors
= getNumVectorRegs(Tp
);
520 // TODO: Since fp32 is expanded, the shuffle cost should always be 0.
522 // FP128 values are always in scalar registers, so there is no work
523 // involved with a shuffle, except for broadcast. In that case register
524 // moves are done with a single instruction per element.
525 if (Tp
->getScalarType()->isFP128Ty())
526 return (Kind
== TargetTransformInfo::SK_Broadcast
? NumVectors
- 1 : 0);
529 case TargetTransformInfo::SK_ExtractSubvector
:
530 // ExtractSubvector Index indicates start offset.
532 // Extracting a subvector from first index is a noop.
533 return (Index
== 0 ? 0 : NumVectors
);
535 case TargetTransformInfo::SK_Broadcast
:
536 // Loop vectorizer calls here to figure out the extra cost of
537 // broadcasting a loaded value to all elements of a vector. Since vlrep
538 // loads and replicates with a single instruction, adjust the returned
540 return NumVectors
- 1;
544 // SystemZ supports single instruction permutation / replication.
548 return BaseT::getShuffleCost(Kind
, Tp
, Index
, SubTp
);
551 // Return the log2 difference of the element sizes of the two vector types.
552 static unsigned getElSizeLog2Diff(Type
*Ty0
, Type
*Ty1
) {
553 unsigned Bits0
= Ty0
->getScalarSizeInBits();
554 unsigned Bits1
= Ty1
->getScalarSizeInBits();
557 return (Log2_32(Bits1
) - Log2_32(Bits0
));
559 return (Log2_32(Bits0
) - Log2_32(Bits1
));
562 // Return the number of instructions needed to truncate SrcTy to DstTy.
563 unsigned SystemZTTIImpl::
564 getVectorTruncCost(Type
*SrcTy
, Type
*DstTy
) {
565 assert (SrcTy
->isVectorTy() && DstTy
->isVectorTy());
566 assert (SrcTy
->getPrimitiveSizeInBits() > DstTy
->getPrimitiveSizeInBits() &&
567 "Packing must reduce size of vector type.");
568 assert (SrcTy
->getVectorNumElements() == DstTy
->getVectorNumElements() &&
569 "Packing should not change number of elements.");
571 // TODO: Since fp32 is expanded, the extract cost should always be 0.
573 unsigned NumParts
= getNumVectorRegs(SrcTy
);
575 // Up to 2 vector registers can be truncated efficiently with pack or
576 // permute. The latter requires an immediate mask to be loaded, which
577 // typically gets hoisted out of a loop. TODO: return a good value for
578 // BB-VECTORIZER that includes the immediate loads, which we do not want
579 // to count for the loop vectorizer.
583 unsigned Log2Diff
= getElSizeLog2Diff(SrcTy
, DstTy
);
584 unsigned VF
= SrcTy
->getVectorNumElements();
585 for (unsigned P
= 0; P
< Log2Diff
; ++P
) {
591 // Currently, a general mix of permutes and pack instructions is output by
592 // isel, which follow the cost computation above except for this case which
593 // is one instruction less:
594 if (VF
== 8 && SrcTy
->getScalarSizeInBits() == 64 &&
595 DstTy
->getScalarSizeInBits() == 8)
601 // Return the cost of converting a vector bitmask produced by a compare
602 // (SrcTy), to the type of the select or extend instruction (DstTy).
603 unsigned SystemZTTIImpl::
604 getVectorBitmaskConversionCost(Type
*SrcTy
, Type
*DstTy
) {
605 assert (SrcTy
->isVectorTy() && DstTy
->isVectorTy() &&
606 "Should only be called with vector types.");
608 unsigned PackCost
= 0;
609 unsigned SrcScalarBits
= SrcTy
->getScalarSizeInBits();
610 unsigned DstScalarBits
= DstTy
->getScalarSizeInBits();
611 unsigned Log2Diff
= getElSizeLog2Diff(SrcTy
, DstTy
);
612 if (SrcScalarBits
> DstScalarBits
)
613 // The bitmask will be truncated.
614 PackCost
= getVectorTruncCost(SrcTy
, DstTy
);
615 else if (SrcScalarBits
< DstScalarBits
) {
616 unsigned DstNumParts
= getNumVectorRegs(DstTy
);
617 // Each vector select needs its part of the bitmask unpacked.
618 PackCost
= Log2Diff
* DstNumParts
;
619 // Extra cost for moving part of mask before unpacking.
620 PackCost
+= DstNumParts
- 1;
626 // Return the type of the compared operands. This is needed to compute the
627 // cost for a Select / ZExt or SExt instruction.
628 static Type
*getCmpOpsType(const Instruction
*I
, unsigned VF
= 1) {
629 Type
*OpTy
= nullptr;
630 if (CmpInst
*CI
= dyn_cast
<CmpInst
>(I
->getOperand(0)))
631 OpTy
= CI
->getOperand(0)->getType();
632 else if (Instruction
*LogicI
= dyn_cast
<Instruction
>(I
->getOperand(0)))
633 if (LogicI
->getNumOperands() == 2)
634 if (CmpInst
*CI0
= dyn_cast
<CmpInst
>(LogicI
->getOperand(0)))
635 if (isa
<CmpInst
>(LogicI
->getOperand(1)))
636 OpTy
= CI0
->getOperand(0)->getType();
638 if (OpTy
!= nullptr) {
640 assert (!OpTy
->isVectorTy() && "Expected scalar type");
643 // Return the potentially vectorized type based on 'I' and 'VF'. 'I' may
644 // be either scalar or already vectorized with a same or lesser VF.
645 Type
*ElTy
= OpTy
->getScalarType();
646 return VectorType::get(ElTy
, VF
);
652 // Get the cost of converting a boolean vector to a vector with same width
653 // and element size as Dst, plus the cost of zero extending if needed.
654 unsigned SystemZTTIImpl::
655 getBoolVecToIntConversionCost(unsigned Opcode
, Type
*Dst
,
656 const Instruction
*I
) {
657 assert (Dst
->isVectorTy());
658 unsigned VF
= Dst
->getVectorNumElements();
660 // If we know what the widths of the compared operands, get any cost of
661 // converting it to match Dst. Otherwise assume same widths.
662 Type
*CmpOpTy
= ((I
!= nullptr) ? getCmpOpsType(I
, VF
) : nullptr);
663 if (CmpOpTy
!= nullptr)
664 Cost
= getVectorBitmaskConversionCost(CmpOpTy
, Dst
);
665 if (Opcode
== Instruction::ZExt
|| Opcode
== Instruction::UIToFP
)
666 // One 'vn' per dst vector with an immediate mask.
667 Cost
+= getNumVectorRegs(Dst
);
671 int SystemZTTIImpl::getCastInstrCost(unsigned Opcode
, Type
*Dst
, Type
*Src
,
672 const Instruction
*I
) {
673 unsigned DstScalarBits
= Dst
->getScalarSizeInBits();
674 unsigned SrcScalarBits
= Src
->getScalarSizeInBits();
676 if (Src
->isVectorTy()) {
677 assert (ST
->hasVector() && "getCastInstrCost() called with vector type.");
678 assert (Dst
->isVectorTy());
679 unsigned VF
= Src
->getVectorNumElements();
680 unsigned NumDstVectors
= getNumVectorRegs(Dst
);
681 unsigned NumSrcVectors
= getNumVectorRegs(Src
);
683 if (Opcode
== Instruction::Trunc
) {
684 if (Src
->getScalarSizeInBits() == Dst
->getScalarSizeInBits())
685 return 0; // Check for NOOP conversions.
686 return getVectorTruncCost(Src
, Dst
);
689 if (Opcode
== Instruction::ZExt
|| Opcode
== Instruction::SExt
) {
690 if (SrcScalarBits
>= 8) {
691 // ZExt/SExt will be handled with one unpack per doubling of width.
692 unsigned NumUnpacks
= getElSizeLog2Diff(Src
, Dst
);
694 // For types that spans multiple vector registers, some additional
695 // instructions are used to setup the unpacking.
696 unsigned NumSrcVectorOps
=
697 (NumUnpacks
> 1 ? (NumDstVectors
- NumSrcVectors
)
698 : (NumDstVectors
/ 2));
700 return (NumUnpacks
* NumDstVectors
) + NumSrcVectorOps
;
702 else if (SrcScalarBits
== 1)
703 return getBoolVecToIntConversionCost(Opcode
, Dst
, I
);
706 if (Opcode
== Instruction::SIToFP
|| Opcode
== Instruction::UIToFP
||
707 Opcode
== Instruction::FPToSI
|| Opcode
== Instruction::FPToUI
) {
708 // TODO: Fix base implementation which could simplify things a bit here
709 // (seems to miss on differentiating on scalar/vector types).
711 // Only 64 bit vector conversions are natively supported before z15.
712 if (DstScalarBits
== 64 || ST
->hasVectorEnhancements2()) {
713 if (SrcScalarBits
== DstScalarBits
)
714 return NumDstVectors
;
716 if (SrcScalarBits
== 1)
717 return getBoolVecToIntConversionCost(Opcode
, Dst
, I
) + NumDstVectors
;
720 // Return the cost of multiple scalar invocation plus the cost of
721 // inserting and extracting the values. Base implementation does not
722 // realize float->int gets scalarized.
723 unsigned ScalarCost
= getCastInstrCost(Opcode
, Dst
->getScalarType(),
724 Src
->getScalarType());
725 unsigned TotCost
= VF
* ScalarCost
;
726 bool NeedsInserts
= true, NeedsExtracts
= true;
727 // FP128 registers do not get inserted or extracted.
728 if (DstScalarBits
== 128 &&
729 (Opcode
== Instruction::SIToFP
|| Opcode
== Instruction::UIToFP
))
730 NeedsInserts
= false;
731 if (SrcScalarBits
== 128 &&
732 (Opcode
== Instruction::FPToSI
|| Opcode
== Instruction::FPToUI
))
733 NeedsExtracts
= false;
735 TotCost
+= getScalarizationOverhead(Src
, false, NeedsExtracts
);
736 TotCost
+= getScalarizationOverhead(Dst
, NeedsInserts
, false);
738 // FIXME: VF 2 for float<->i32 is currently just as expensive as for VF 4.
739 if (VF
== 2 && SrcScalarBits
== 32 && DstScalarBits
== 32)
745 if (Opcode
== Instruction::FPTrunc
) {
746 if (SrcScalarBits
== 128) // fp128 -> double/float + inserts of elements.
747 return VF
/*ldxbr/lexbr*/ + getScalarizationOverhead(Dst
, true, false);
748 else // double -> float
749 return VF
/ 2 /*vledb*/ + std::max(1U, VF
/ 4 /*vperm*/);
752 if (Opcode
== Instruction::FPExt
) {
753 if (SrcScalarBits
== 32 && DstScalarBits
== 64) {
754 // float -> double is very rare and currently unoptimized. Instead of
755 // using vldeb, which can do two at a time, all conversions are
759 // -> fp128. VF * lxdb/lxeb + extraction of elements.
760 return VF
+ getScalarizationOverhead(Src
, false, true);
764 assert (!Dst
->isVectorTy());
766 if (Opcode
== Instruction::SIToFP
|| Opcode
== Instruction::UIToFP
) {
767 if (SrcScalarBits
>= 32 ||
768 (I
!= nullptr && isa
<LoadInst
>(I
->getOperand(0))))
770 return SrcScalarBits
> 1 ? 2 /*i8/i16 extend*/ : 5 /*branch seq.*/;
773 if ((Opcode
== Instruction::ZExt
|| Opcode
== Instruction::SExt
) &&
774 Src
->isIntegerTy(1)) {
775 if (ST
->hasLoadStoreOnCond2())
776 return 2; // li 0; loc 1
778 // This should be extension of a compare i1 result, which is done with
779 // ipm and a varying sequence of instructions.
781 if (Opcode
== Instruction::SExt
)
782 Cost
= (DstScalarBits
< 64 ? 3 : 4);
783 if (Opcode
== Instruction::ZExt
)
785 Type
*CmpOpTy
= ((I
!= nullptr) ? getCmpOpsType(I
) : nullptr);
786 if (CmpOpTy
!= nullptr && CmpOpTy
->isFloatingPointTy())
787 // If operands of an fp-type was compared, this costs +1.
793 return BaseT::getCastInstrCost(Opcode
, Dst
, Src
, I
);
796 // Scalar i8 / i16 operations will typically be made after first extending
797 // the operands to i32.
798 static unsigned getOperandsExtensionCost(const Instruction
*I
) {
799 unsigned ExtCost
= 0;
800 for (Value
*Op
: I
->operands())
801 // A load of i8 or i16 sign/zero extends to i32.
802 if (!isa
<LoadInst
>(Op
) && !isa
<ConstantInt
>(Op
))
808 int SystemZTTIImpl::getCmpSelInstrCost(unsigned Opcode
, Type
*ValTy
,
809 Type
*CondTy
, const Instruction
*I
) {
810 if (ValTy
->isVectorTy()) {
811 assert (ST
->hasVector() && "getCmpSelInstrCost() called with vector type.");
812 unsigned VF
= ValTy
->getVectorNumElements();
814 // Called with a compare instruction.
815 if (Opcode
== Instruction::ICmp
|| Opcode
== Instruction::FCmp
) {
816 unsigned PredicateExtraCost
= 0;
818 // Some predicates cost one or two extra instructions.
819 switch (cast
<CmpInst
>(I
)->getPredicate()) {
820 case CmpInst::Predicate::ICMP_NE
:
821 case CmpInst::Predicate::ICMP_UGE
:
822 case CmpInst::Predicate::ICMP_ULE
:
823 case CmpInst::Predicate::ICMP_SGE
:
824 case CmpInst::Predicate::ICMP_SLE
:
825 PredicateExtraCost
= 1;
827 case CmpInst::Predicate::FCMP_ONE
:
828 case CmpInst::Predicate::FCMP_ORD
:
829 case CmpInst::Predicate::FCMP_UEQ
:
830 case CmpInst::Predicate::FCMP_UNO
:
831 PredicateExtraCost
= 2;
838 // Float is handled with 2*vmr[lh]f + 2*vldeb + vfchdb for each pair of
839 // floats. FIXME: <2 x float> generates same code as <4 x float>.
840 unsigned CmpCostPerVector
= (ValTy
->getScalarType()->isFloatTy() ? 10 : 1);
841 unsigned NumVecs_cmp
= getNumVectorRegs(ValTy
);
843 unsigned Cost
= (NumVecs_cmp
* (CmpCostPerVector
+ PredicateExtraCost
));
846 else { // Called with a select instruction.
847 assert (Opcode
== Instruction::Select
);
849 // We can figure out the extra cost of packing / unpacking if the
850 // instruction was passed and the compare instruction is found.
851 unsigned PackCost
= 0;
852 Type
*CmpOpTy
= ((I
!= nullptr) ? getCmpOpsType(I
, VF
) : nullptr);
853 if (CmpOpTy
!= nullptr)
855 getVectorBitmaskConversionCost(CmpOpTy
, ValTy
);
857 return getNumVectorRegs(ValTy
) /*vsel*/ + PackCost
;
862 case Instruction::ICmp
: {
863 // A loaded value compared with 0 with multiple users becomes Load and
864 // Test. The load is then not foldable, so return 0 cost for the ICmp.
865 unsigned ScalarBits
= ValTy
->getScalarSizeInBits();
866 if (I
!= nullptr && ScalarBits
>= 32)
867 if (LoadInst
*Ld
= dyn_cast
<LoadInst
>(I
->getOperand(0)))
868 if (const ConstantInt
*C
= dyn_cast
<ConstantInt
>(I
->getOperand(1)))
869 if (!Ld
->hasOneUse() && Ld
->getParent() == I
->getParent() &&
870 C
->getZExtValue() == 0)
874 if (ValTy
->isIntegerTy() && ValTy
->getScalarSizeInBits() <= 16)
875 Cost
+= (I
!= nullptr ? getOperandsExtensionCost(I
) : 2);
878 case Instruction::Select
:
879 if (ValTy
->isFloatingPointTy())
880 return 4; // No load on condition for FP - costs a conditional jump.
881 return 1; // Load On Condition / Select Register.
885 return BaseT::getCmpSelInstrCost(Opcode
, ValTy
, CondTy
, nullptr);
889 getVectorInstrCost(unsigned Opcode
, Type
*Val
, unsigned Index
) {
890 // vlvgp will insert two grs into a vector register, so only count half the
891 // number of instructions.
892 if (Opcode
== Instruction::InsertElement
&& Val
->isIntOrIntVectorTy(64))
893 return ((Index
% 2 == 0) ? 1 : 0);
895 if (Opcode
== Instruction::ExtractElement
) {
896 int Cost
= ((getScalarSizeInBits(Val
) == 1) ? 2 /*+test-under-mask*/ : 1);
898 // Give a slight penalty for moving out of vector pipeline to FXU unit.
899 if (Index
== 0 && Val
->isIntOrIntVectorTy())
905 return BaseT::getVectorInstrCost(Opcode
, Val
, Index
);
908 // Check if a load may be folded as a memory operand in its user.
909 bool SystemZTTIImpl::
910 isFoldableLoad(const LoadInst
*Ld
, const Instruction
*&FoldedValue
) {
911 if (!Ld
->hasOneUse())
914 const Instruction
*UserI
= cast
<Instruction
>(*Ld
->user_begin());
915 unsigned LoadedBits
= getScalarSizeInBits(Ld
->getType());
916 unsigned TruncBits
= 0;
917 unsigned SExtBits
= 0;
918 unsigned ZExtBits
= 0;
919 if (UserI
->hasOneUse()) {
920 unsigned UserBits
= UserI
->getType()->getScalarSizeInBits();
921 if (isa
<TruncInst
>(UserI
))
922 TruncBits
= UserBits
;
923 else if (isa
<SExtInst
>(UserI
))
925 else if (isa
<ZExtInst
>(UserI
))
928 if (TruncBits
|| SExtBits
|| ZExtBits
) {
930 UserI
= cast
<Instruction
>(*UserI
->user_begin());
931 // Load (single use) -> trunc/extend (single use) -> UserI
933 if ((UserI
->getOpcode() == Instruction::Sub
||
934 UserI
->getOpcode() == Instruction::SDiv
||
935 UserI
->getOpcode() == Instruction::UDiv
) &&
936 UserI
->getOperand(1) != FoldedValue
)
937 return false; // Not commutative, only RHS foldable.
938 // LoadOrTruncBits holds the number of effectively loaded bits, but 0 if an
939 // extension was made of the load.
940 unsigned LoadOrTruncBits
=
941 ((SExtBits
|| ZExtBits
) ? 0 : (TruncBits
? TruncBits
: LoadedBits
));
942 switch (UserI
->getOpcode()) {
943 case Instruction::Add
: // SE: 16->32, 16/32->64, z14:16->64. ZE: 32->64
944 case Instruction::Sub
:
945 case Instruction::ICmp
:
946 if (LoadedBits
== 32 && ZExtBits
== 64)
949 case Instruction::Mul
: // SE: 16->32, 32->64, z14:16->64
950 if (UserI
->getOpcode() != Instruction::ICmp
) {
951 if (LoadedBits
== 16 &&
953 (SExtBits
== 64 && ST
->hasMiscellaneousExtensions2())))
955 if (LoadOrTruncBits
== 16)
959 case Instruction::SDiv
:// SE: 32->64
960 if (LoadedBits
== 32 && SExtBits
== 64)
963 case Instruction::UDiv
:
964 case Instruction::And
:
965 case Instruction::Or
:
966 case Instruction::Xor
:
967 // This also makes sense for float operations, but disabled for now due
969 // case Instruction::FCmp:
970 // case Instruction::FAdd:
971 // case Instruction::FSub:
972 // case Instruction::FMul:
973 // case Instruction::FDiv:
975 // All possible extensions of memory checked above.
977 // Comparison between memory and immediate.
978 if (UserI
->getOpcode() == Instruction::ICmp
)
979 if (ConstantInt
*CI
= dyn_cast
<ConstantInt
>(UserI
->getOperand(1)))
980 if (isUInt
<16>(CI
->getZExtValue()))
982 return (LoadOrTruncBits
== 32 || LoadOrTruncBits
== 64);
988 static bool isBswapIntrinsicCall(const Value
*V
) {
989 if (const Instruction
*I
= dyn_cast
<Instruction
>(V
))
990 if (auto *CI
= dyn_cast
<CallInst
>(I
))
991 if (auto *F
= CI
->getCalledFunction())
992 if (F
->getIntrinsicID() == Intrinsic::bswap
)
997 int SystemZTTIImpl::getMemoryOpCost(unsigned Opcode
, Type
*Src
,
998 unsigned Alignment
, unsigned AddressSpace
,
999 const Instruction
*I
) {
1000 assert(!Src
->isVoidTy() && "Invalid type");
1002 if (!Src
->isVectorTy() && Opcode
== Instruction::Load
&& I
!= nullptr) {
1003 // Store the load or its truncated or extended value in FoldedValue.
1004 const Instruction
*FoldedValue
= nullptr;
1005 if (isFoldableLoad(cast
<LoadInst
>(I
), FoldedValue
)) {
1006 const Instruction
*UserI
= cast
<Instruction
>(*FoldedValue
->user_begin());
1007 assert (UserI
->getNumOperands() == 2 && "Expected a binop.");
1009 // UserI can't fold two loads, so in that case return 0 cost only
1010 // half of the time.
1011 for (unsigned i
= 0; i
< 2; ++i
) {
1012 if (UserI
->getOperand(i
) == FoldedValue
)
1015 if (Instruction
*OtherOp
= dyn_cast
<Instruction
>(UserI
->getOperand(i
))){
1016 LoadInst
*OtherLoad
= dyn_cast
<LoadInst
>(OtherOp
);
1018 (isa
<TruncInst
>(OtherOp
) || isa
<SExtInst
>(OtherOp
) ||
1019 isa
<ZExtInst
>(OtherOp
)))
1020 OtherLoad
= dyn_cast
<LoadInst
>(OtherOp
->getOperand(0));
1021 if (OtherLoad
&& isFoldableLoad(OtherLoad
, FoldedValue
/*dummy*/))
1022 return i
== 0; // Both operands foldable.
1026 return 0; // Only I is foldable in user.
1031 (Src
->isVectorTy() ? getNumVectorRegs(Src
) : getNumberOfParts(Src
));
1033 // Store/Load reversed saves one instruction.
1034 if (((!Src
->isVectorTy() && NumOps
== 1) || ST
->hasVectorEnhancements2()) &&
1036 if (Opcode
== Instruction::Load
&& I
->hasOneUse()) {
1037 const Instruction
*LdUser
= cast
<Instruction
>(*I
->user_begin());
1038 // In case of load -> bswap -> store, return normal cost for the load.
1039 if (isBswapIntrinsicCall(LdUser
) &&
1040 (!LdUser
->hasOneUse() || !isa
<StoreInst
>(*LdUser
->user_begin())))
1043 else if (const StoreInst
*SI
= dyn_cast
<StoreInst
>(I
)) {
1044 const Value
*StoredVal
= SI
->getValueOperand();
1045 if (StoredVal
->hasOneUse() && isBswapIntrinsicCall(StoredVal
))
1050 if (Src
->getScalarSizeInBits() == 128)
1051 // 128 bit scalars are held in a pair of two 64 bit registers.
1057 // The generic implementation of getInterleavedMemoryOpCost() is based on
1058 // adding costs of the memory operations plus all the extracts and inserts
1059 // needed for using / defining the vector operands. The SystemZ version does
1060 // roughly the same but bases the computations on vector permutations
1062 int SystemZTTIImpl::getInterleavedMemoryOpCost(unsigned Opcode
, Type
*VecTy
,
1064 ArrayRef
<unsigned> Indices
,
1066 unsigned AddressSpace
,
1067 bool UseMaskForCond
,
1068 bool UseMaskForGaps
) {
1069 if (UseMaskForCond
|| UseMaskForGaps
)
1070 return BaseT::getInterleavedMemoryOpCost(Opcode
, VecTy
, Factor
, Indices
,
1071 Alignment
, AddressSpace
,
1072 UseMaskForCond
, UseMaskForGaps
);
1073 assert(isa
<VectorType
>(VecTy
) &&
1074 "Expect a vector type for interleaved memory op");
1076 // Return the ceiling of dividing A by B.
1077 auto ceil
= [](unsigned A
, unsigned B
) { return (A
+ B
- 1) / B
; };
1079 unsigned NumElts
= VecTy
->getVectorNumElements();
1080 assert(Factor
> 1 && NumElts
% Factor
== 0 && "Invalid interleave factor");
1081 unsigned VF
= NumElts
/ Factor
;
1082 unsigned NumEltsPerVecReg
= (128U / getScalarSizeInBits(VecTy
));
1083 unsigned NumVectorMemOps
= getNumVectorRegs(VecTy
);
1084 unsigned NumPermutes
= 0;
1086 if (Opcode
== Instruction::Load
) {
1087 // Loading interleave groups may have gaps, which may mean fewer
1088 // loads. Find out how many vectors will be loaded in total, and in how
1089 // many of them each value will be in.
1090 BitVector
UsedInsts(NumVectorMemOps
, false);
1091 std::vector
<BitVector
> ValueVecs(Factor
, BitVector(NumVectorMemOps
, false));
1092 for (unsigned Index
: Indices
)
1093 for (unsigned Elt
= 0; Elt
< VF
; ++Elt
) {
1094 unsigned Vec
= (Index
+ Elt
* Factor
) / NumEltsPerVecReg
;
1096 ValueVecs
[Index
].set(Vec
);
1098 NumVectorMemOps
= UsedInsts
.count();
1100 for (unsigned Index
: Indices
) {
1101 // Estimate that each loaded source vector containing this Index
1102 // requires one operation, except that vperm can handle two input
1103 // registers first time for each dst vector.
1104 unsigned NumSrcVecs
= ValueVecs
[Index
].count();
1105 unsigned NumDstVecs
= ceil(VF
* getScalarSizeInBits(VecTy
), 128U);
1106 assert (NumSrcVecs
>= NumDstVecs
&& "Expected at least as many sources");
1107 NumPermutes
+= std::max(1U, NumSrcVecs
- NumDstVecs
);
1110 // Estimate the permutes for each stored vector as the smaller of the
1111 // number of elements and the number of source vectors. Subtract one per
1112 // dst vector for vperm (S.A.).
1113 unsigned NumSrcVecs
= std::min(NumEltsPerVecReg
, Factor
);
1114 unsigned NumDstVecs
= NumVectorMemOps
;
1115 assert (NumSrcVecs
> 1 && "Expected at least two source vectors.");
1116 NumPermutes
+= (NumDstVecs
* NumSrcVecs
) - NumDstVecs
;
1119 // Cost of load/store operations and the permutations needed.
1120 return NumVectorMemOps
+ NumPermutes
;
1123 static int getVectorIntrinsicInstrCost(Intrinsic::ID ID
, Type
*RetTy
) {
1124 if (RetTy
->isVectorTy() && ID
== Intrinsic::bswap
)
1125 return getNumVectorRegs(RetTy
); // VPERM
1129 int SystemZTTIImpl::getIntrinsicInstrCost(Intrinsic::ID ID
, Type
*RetTy
,
1130 ArrayRef
<Value
*> Args
,
1131 FastMathFlags FMF
, unsigned VF
) {
1132 int Cost
= getVectorIntrinsicInstrCost(ID
, RetTy
);
1135 return BaseT::getIntrinsicInstrCost(ID
, RetTy
, Args
, FMF
, VF
);
1138 int SystemZTTIImpl::getIntrinsicInstrCost(Intrinsic::ID ID
, Type
*RetTy
,
1139 ArrayRef
<Type
*> Tys
,
1141 unsigned ScalarizationCostPassed
) {
1142 int Cost
= getVectorIntrinsicInstrCost(ID
, RetTy
);
1145 return BaseT::getIntrinsicInstrCost(ID
, RetTy
, Tys
,
1146 FMF
, ScalarizationCostPassed
);