1 //===-- PPCTargetTransformInfo.cpp - PPC specific TTI ---------------------===//
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
7 //===----------------------------------------------------------------------===//
9 #include "PPCTargetTransformInfo.h"
10 #include "llvm/Analysis/TargetTransformInfo.h"
11 #include "llvm/CodeGen/BasicTTIImpl.h"
12 #include "llvm/CodeGen/CostTable.h"
13 #include "llvm/CodeGen/TargetLowering.h"
14 #include "llvm/Support/CommandLine.h"
15 #include "llvm/Support/Debug.h"
18 #define DEBUG_TYPE "ppctti"
20 static cl::opt
<bool> DisablePPCConstHoist("disable-ppc-constant-hoisting",
21 cl::desc("disable constant hoisting on PPC"), cl::init(false), cl::Hidden
);
23 // This is currently only used for the data prefetch pass which is only enabled
24 // for BG/Q by default.
25 static cl::opt
<unsigned>
26 CacheLineSize("ppc-loop-prefetch-cache-line", cl::Hidden
, cl::init(64),
27 cl::desc("The loop prefetch cache line size"));
30 EnablePPCColdCC("ppc-enable-coldcc", cl::Hidden
, cl::init(false),
31 cl::desc("Enable using coldcc calling conv for cold "
32 "internal functions"));
34 //===----------------------------------------------------------------------===//
38 //===----------------------------------------------------------------------===//
40 TargetTransformInfo::PopcntSupportKind
41 PPCTTIImpl::getPopcntSupport(unsigned TyWidth
) {
42 assert(isPowerOf2_32(TyWidth
) && "Ty width must be power of 2");
43 if (ST
->hasPOPCNTD() != PPCSubtarget::POPCNTD_Unavailable
&& TyWidth
<= 64)
44 return ST
->hasPOPCNTD() == PPCSubtarget::POPCNTD_Slow
?
45 TTI::PSK_SlowHardware
: TTI::PSK_FastHardware
;
46 return TTI::PSK_Software
;
49 int PPCTTIImpl::getIntImmCost(const APInt
&Imm
, Type
*Ty
) {
50 if (DisablePPCConstHoist
)
51 return BaseT::getIntImmCost(Imm
, Ty
);
53 assert(Ty
->isIntegerTy());
55 unsigned BitSize
= Ty
->getPrimitiveSizeInBits();
62 if (Imm
.getBitWidth() <= 64) {
63 if (isInt
<16>(Imm
.getSExtValue()))
64 return TTI::TCC_Basic
;
66 if (isInt
<32>(Imm
.getSExtValue())) {
67 // A constant that can be materialized using lis.
68 if ((Imm
.getZExtValue() & 0xFFFF) == 0)
69 return TTI::TCC_Basic
;
71 return 2 * TTI::TCC_Basic
;
75 return 4 * TTI::TCC_Basic
;
78 int PPCTTIImpl::getIntImmCost(Intrinsic::ID IID
, unsigned Idx
, const APInt
&Imm
,
80 if (DisablePPCConstHoist
)
81 return BaseT::getIntImmCost(IID
, Idx
, Imm
, Ty
);
83 assert(Ty
->isIntegerTy());
85 unsigned BitSize
= Ty
->getPrimitiveSizeInBits();
92 case Intrinsic::sadd_with_overflow
:
93 case Intrinsic::uadd_with_overflow
:
94 case Intrinsic::ssub_with_overflow
:
95 case Intrinsic::usub_with_overflow
:
96 if ((Idx
== 1) && Imm
.getBitWidth() <= 64 && isInt
<16>(Imm
.getSExtValue()))
99 case Intrinsic::experimental_stackmap
:
100 if ((Idx
< 2) || (Imm
.getBitWidth() <= 64 && isInt
<64>(Imm
.getSExtValue())))
101 return TTI::TCC_Free
;
103 case Intrinsic::experimental_patchpoint_void
:
104 case Intrinsic::experimental_patchpoint_i64
:
105 if ((Idx
< 4) || (Imm
.getBitWidth() <= 64 && isInt
<64>(Imm
.getSExtValue())))
106 return TTI::TCC_Free
;
109 return PPCTTIImpl::getIntImmCost(Imm
, Ty
);
112 int PPCTTIImpl::getIntImmCost(unsigned Opcode
, unsigned Idx
, const APInt
&Imm
,
114 if (DisablePPCConstHoist
)
115 return BaseT::getIntImmCost(Opcode
, Idx
, Imm
, Ty
);
117 assert(Ty
->isIntegerTy());
119 unsigned BitSize
= Ty
->getPrimitiveSizeInBits();
123 unsigned ImmIdx
= ~0U;
124 bool ShiftedFree
= false, RunFree
= false, UnsignedFree
= false,
128 return TTI::TCC_Free
;
129 case Instruction::GetElementPtr
:
130 // Always hoist the base address of a GetElementPtr. This prevents the
131 // creation of new constants for every base constant that gets constant
132 // folded with the offset.
134 return 2 * TTI::TCC_Basic
;
135 return TTI::TCC_Free
;
136 case Instruction::And
:
137 RunFree
= true; // (for the rotate-and-mask instructions)
139 case Instruction::Add
:
140 case Instruction::Or
:
141 case Instruction::Xor
:
144 case Instruction::Sub
:
145 case Instruction::Mul
:
146 case Instruction::Shl
:
147 case Instruction::LShr
:
148 case Instruction::AShr
:
151 case Instruction::ICmp
:
154 // Zero comparisons can use record-form instructions.
156 case Instruction::Select
:
159 case Instruction::PHI
:
160 case Instruction::Call
:
161 case Instruction::Ret
:
162 case Instruction::Load
:
163 case Instruction::Store
:
167 if (ZeroFree
&& Imm
== 0)
168 return TTI::TCC_Free
;
170 if (Idx
== ImmIdx
&& Imm
.getBitWidth() <= 64) {
171 if (isInt
<16>(Imm
.getSExtValue()))
172 return TTI::TCC_Free
;
175 if (Imm
.getBitWidth() <= 32 &&
176 (isShiftedMask_32(Imm
.getZExtValue()) ||
177 isShiftedMask_32(~Imm
.getZExtValue())))
178 return TTI::TCC_Free
;
181 (isShiftedMask_64(Imm
.getZExtValue()) ||
182 isShiftedMask_64(~Imm
.getZExtValue())))
183 return TTI::TCC_Free
;
186 if (UnsignedFree
&& isUInt
<16>(Imm
.getZExtValue()))
187 return TTI::TCC_Free
;
189 if (ShiftedFree
&& (Imm
.getZExtValue() & 0xFFFF) == 0)
190 return TTI::TCC_Free
;
193 return PPCTTIImpl::getIntImmCost(Imm
, Ty
);
196 unsigned PPCTTIImpl::getUserCost(const User
*U
,
197 ArrayRef
<const Value
*> Operands
) {
198 if (U
->getType()->isVectorTy()) {
199 // Instructions that need to be split should cost more.
200 std::pair
<int, MVT
> LT
= TLI
->getTypeLegalizationCost(DL
, U
->getType());
201 return LT
.first
* BaseT::getUserCost(U
, Operands
);
204 return BaseT::getUserCost(U
, Operands
);
207 void PPCTTIImpl::getUnrollingPreferences(Loop
*L
, ScalarEvolution
&SE
,
208 TTI::UnrollingPreferences
&UP
) {
209 if (ST
->getDarwinDirective() == PPC::DIR_A2
) {
210 // The A2 is in-order with a deep pipeline, and concatenation unrolling
211 // helps expose latency-hiding opportunities to the instruction scheduler.
212 UP
.Partial
= UP
.Runtime
= true;
214 // We unroll a lot on the A2 (hundreds of instructions), and the benefits
215 // often outweigh the cost of a division to compute the trip count.
216 UP
.AllowExpensiveTripCount
= true;
219 BaseT::getUnrollingPreferences(L
, SE
, UP
);
222 // This function returns true to allow using coldcc calling convention.
223 // Returning true results in coldcc being used for functions which are cold at
224 // all call sites when the callers of the functions are not calling any other
225 // non coldcc functions.
226 bool PPCTTIImpl::useColdCCForColdCall(Function
&F
) {
227 return EnablePPCColdCC
;
230 bool PPCTTIImpl::enableAggressiveInterleaving(bool LoopHasReductions
) {
231 // On the A2, always unroll aggressively. For QPX unaligned loads, we depend
232 // on combining the loads generated for consecutive accesses, and failure to
233 // do so is particularly expensive. This makes it much more likely (compared
234 // to only using concatenation unrolling).
235 if (ST
->getDarwinDirective() == PPC::DIR_A2
)
238 return LoopHasReductions
;
241 const PPCTTIImpl::TTI::MemCmpExpansionOptions
*
242 PPCTTIImpl::enableMemCmpExpansion(bool IsZeroCmp
) const {
243 static const auto Options
= []() {
244 TTI::MemCmpExpansionOptions Options
;
245 Options
.LoadSizes
.push_back(8);
246 Options
.LoadSizes
.push_back(4);
247 Options
.LoadSizes
.push_back(2);
248 Options
.LoadSizes
.push_back(1);
254 bool PPCTTIImpl::enableInterleavedAccessVectorization() {
258 unsigned PPCTTIImpl::getNumberOfRegisters(bool Vector
) {
259 if (Vector
&& !ST
->hasAltivec() && !ST
->hasQPX())
261 return ST
->hasVSX() ? 64 : 32;
264 unsigned PPCTTIImpl::getRegisterBitWidth(bool Vector
) const {
266 if (ST
->hasQPX()) return 256;
267 if (ST
->hasAltivec()) return 128;
277 unsigned PPCTTIImpl::getCacheLineSize() {
278 // Check first if the user specified a custom line size.
279 if (CacheLineSize
.getNumOccurrences() > 0)
280 return CacheLineSize
;
282 // On P7, P8 or P9 we have a cache line size of 128.
283 unsigned Directive
= ST
->getDarwinDirective();
284 if (Directive
== PPC::DIR_PWR7
|| Directive
== PPC::DIR_PWR8
||
285 Directive
== PPC::DIR_PWR9
)
288 // On other processors return a default of 64 bytes.
292 unsigned PPCTTIImpl::getPrefetchDistance() {
293 // This seems like a reasonable default for the BG/Q (this pass is enabled, by
294 // default, only on the BG/Q).
298 unsigned PPCTTIImpl::getMaxInterleaveFactor(unsigned VF
) {
299 unsigned Directive
= ST
->getDarwinDirective();
300 // The 440 has no SIMD support, but floating-point instructions
301 // have a 5-cycle latency, so unroll by 5x for latency hiding.
302 if (Directive
== PPC::DIR_440
)
305 // The A2 has no SIMD support, but floating-point instructions
306 // have a 6-cycle latency, so unroll by 6x for latency hiding.
307 if (Directive
== PPC::DIR_A2
)
310 // FIXME: For lack of any better information, do no harm...
311 if (Directive
== PPC::DIR_E500mc
|| Directive
== PPC::DIR_E5500
)
314 // For P7 and P8, floating-point instructions have a 6-cycle latency and
315 // there are two execution units, so unroll by 12x for latency hiding.
316 // FIXME: the same for P9 as previous gen until POWER9 scheduling is ready
317 if (Directive
== PPC::DIR_PWR7
|| Directive
== PPC::DIR_PWR8
||
318 Directive
== PPC::DIR_PWR9
)
321 // For most things, modern systems have two execution units (and
322 // out-of-order execution).
326 // Adjust the cost of vector instructions on targets which there is overlap
327 // between the vector and scalar units, thereby reducing the overall throughput
328 // of vector code wrt. scalar code.
329 int PPCTTIImpl::vectorCostAdjustment(int Cost
, unsigned Opcode
, Type
*Ty1
,
331 if (!ST
->vectorsUseTwoUnits() || !Ty1
->isVectorTy())
334 std::pair
<int, MVT
> LT1
= TLI
->getTypeLegalizationCost(DL
, Ty1
);
335 // If type legalization involves splitting the vector, we don't want to
336 // double the cost at every step - only the last step.
337 if (LT1
.first
!= 1 || !LT1
.second
.isVector())
339 int ISD
= TLI
->InstructionOpcodeToISD(Opcode
);
340 if (TLI
->isOperationExpand(ISD
, LT1
.second
))
344 std::pair
<int, MVT
> LT2
= TLI
->getTypeLegalizationCost(DL
, Ty2
);
345 if (LT2
.first
!= 1 || !LT2
.second
.isVector())
352 int PPCTTIImpl::getArithmeticInstrCost(
353 unsigned Opcode
, Type
*Ty
, TTI::OperandValueKind Op1Info
,
354 TTI::OperandValueKind Op2Info
, TTI::OperandValueProperties Opd1PropInfo
,
355 TTI::OperandValueProperties Opd2PropInfo
, ArrayRef
<const Value
*> Args
) {
356 assert(TLI
->InstructionOpcodeToISD(Opcode
) && "Invalid opcode");
358 // Fallback to the default implementation.
359 int Cost
= BaseT::getArithmeticInstrCost(Opcode
, Ty
, Op1Info
, Op2Info
,
360 Opd1PropInfo
, Opd2PropInfo
);
361 return vectorCostAdjustment(Cost
, Opcode
, Ty
, nullptr);
364 int PPCTTIImpl::getShuffleCost(TTI::ShuffleKind Kind
, Type
*Tp
, int Index
,
366 // Legalize the type.
367 std::pair
<int, MVT
> LT
= TLI
->getTypeLegalizationCost(DL
, Tp
);
369 // PPC, for both Altivec/VSX and QPX, support cheap arbitrary permutations
370 // (at least in the sense that there need only be one non-loop-invariant
371 // instruction). We need one such shuffle instruction for each actual
372 // register (this is not true for arbitrary shuffles, but is true for the
373 // structured types of shuffles covered by TTI::ShuffleKind).
374 return vectorCostAdjustment(LT
.first
, Instruction::ShuffleVector
, Tp
,
378 int PPCTTIImpl::getCastInstrCost(unsigned Opcode
, Type
*Dst
, Type
*Src
,
379 const Instruction
*I
) {
380 assert(TLI
->InstructionOpcodeToISD(Opcode
) && "Invalid opcode");
382 int Cost
= BaseT::getCastInstrCost(Opcode
, Dst
, Src
);
383 return vectorCostAdjustment(Cost
, Opcode
, Dst
, Src
);
386 int PPCTTIImpl::getCmpSelInstrCost(unsigned Opcode
, Type
*ValTy
, Type
*CondTy
,
387 const Instruction
*I
) {
388 int Cost
= BaseT::getCmpSelInstrCost(Opcode
, ValTy
, CondTy
, I
);
389 return vectorCostAdjustment(Cost
, Opcode
, ValTy
, nullptr);
392 int PPCTTIImpl::getVectorInstrCost(unsigned Opcode
, Type
*Val
, unsigned Index
) {
393 assert(Val
->isVectorTy() && "This must be a vector type");
395 int ISD
= TLI
->InstructionOpcodeToISD(Opcode
);
396 assert(ISD
&& "Invalid opcode");
398 int Cost
= BaseT::getVectorInstrCost(Opcode
, Val
, Index
);
399 Cost
= vectorCostAdjustment(Cost
, Opcode
, Val
, nullptr);
401 if (ST
->hasVSX() && Val
->getScalarType()->isDoubleTy()) {
402 // Double-precision scalars are already located in index #0 (or #1 if LE).
403 if (ISD
== ISD::EXTRACT_VECTOR_ELT
&& Index
== ST
->isLittleEndian() ? 1 : 0)
408 } else if (ST
->hasQPX() && Val
->getScalarType()->isFloatingPointTy()) {
409 // Floating point scalars are already located in index #0.
416 // Estimated cost of a load-hit-store delay. This was obtained
417 // experimentally as a minimum needed to prevent unprofitable
418 // vectorization for the paq8p benchmark. It may need to be
419 // raised further if other unprofitable cases remain.
420 unsigned LHSPenalty
= 2;
421 if (ISD
== ISD::INSERT_VECTOR_ELT
)
424 // Vector element insert/extract with Altivec is very expensive,
425 // because they require store and reload with the attendant
426 // processor stall for load-hit-store. Until VSX is available,
427 // these need to be estimated as very costly.
428 if (ISD
== ISD::EXTRACT_VECTOR_ELT
||
429 ISD
== ISD::INSERT_VECTOR_ELT
)
430 return LHSPenalty
+ Cost
;
435 int PPCTTIImpl::getMemoryOpCost(unsigned Opcode
, Type
*Src
, unsigned Alignment
,
436 unsigned AddressSpace
, const Instruction
*I
) {
437 // Legalize the type.
438 std::pair
<int, MVT
> LT
= TLI
->getTypeLegalizationCost(DL
, Src
);
439 assert((Opcode
== Instruction::Load
|| Opcode
== Instruction::Store
) &&
442 int Cost
= BaseT::getMemoryOpCost(Opcode
, Src
, Alignment
, AddressSpace
);
443 Cost
= vectorCostAdjustment(Cost
, Opcode
, Src
, nullptr);
445 bool IsAltivecType
= ST
->hasAltivec() &&
446 (LT
.second
== MVT::v16i8
|| LT
.second
== MVT::v8i16
||
447 LT
.second
== MVT::v4i32
|| LT
.second
== MVT::v4f32
);
448 bool IsVSXType
= ST
->hasVSX() &&
449 (LT
.second
== MVT::v2f64
|| LT
.second
== MVT::v2i64
);
450 bool IsQPXType
= ST
->hasQPX() &&
451 (LT
.second
== MVT::v4f64
|| LT
.second
== MVT::v4f32
);
453 // VSX has 32b/64b load instructions. Legalization can handle loading of
454 // 32b/64b to VSR correctly and cheaply. But BaseT::getMemoryOpCost and
455 // PPCTargetLowering can't compute the cost appropriately. So here we
456 // explicitly check this case.
457 unsigned MemBytes
= Src
->getPrimitiveSizeInBits();
458 if (Opcode
== Instruction::Load
&& ST
->hasVSX() && IsAltivecType
&&
459 (MemBytes
== 64 || (ST
->hasP8Vector() && MemBytes
== 32)))
462 // Aligned loads and stores are easy.
463 unsigned SrcBytes
= LT
.second
.getStoreSize();
464 if (!SrcBytes
|| !Alignment
|| Alignment
>= SrcBytes
)
467 // If we can use the permutation-based load sequence, then this is also
468 // relatively cheap (not counting loop-invariant instructions): one load plus
469 // one permute (the last load in a series has extra cost, but we're
470 // neglecting that here). Note that on the P7, we could do unaligned loads
471 // for Altivec types using the VSX instructions, but that's more expensive
472 // than using the permutation-based load sequence. On the P8, that's no
474 if (Opcode
== Instruction::Load
&&
475 ((!ST
->hasP8Vector() && IsAltivecType
) || IsQPXType
) &&
476 Alignment
>= LT
.second
.getScalarType().getStoreSize())
477 return Cost
+ LT
.first
; // Add the cost of the permutations.
479 // For VSX, we can do unaligned loads and stores on Altivec/VSX types. On the
480 // P7, unaligned vector loads are more expensive than the permutation-based
481 // load sequence, so that might be used instead, but regardless, the net cost
482 // is about the same (not counting loop-invariant instructions).
483 if (IsVSXType
|| (ST
->hasVSX() && IsAltivecType
))
486 // Newer PPC supports unaligned memory access.
487 if (TLI
->allowsMisalignedMemoryAccesses(LT
.second
, 0))
490 // PPC in general does not support unaligned loads and stores. They'll need
491 // to be decomposed based on the alignment factor.
493 // Add the cost of each scalar load or store.
494 Cost
+= LT
.first
*(SrcBytes
/Alignment
-1);
496 // For a vector type, there is also scalarization overhead (only for
497 // stores, loads are expanded using the vector-load + permutation sequence,
498 // which is much less expensive).
499 if (Src
->isVectorTy() && Opcode
== Instruction::Store
)
500 for (int i
= 0, e
= Src
->getVectorNumElements(); i
< e
; ++i
)
501 Cost
+= getVectorInstrCost(Instruction::ExtractElement
, Src
, i
);
506 int PPCTTIImpl::getInterleavedMemoryOpCost(unsigned Opcode
, Type
*VecTy
,
508 ArrayRef
<unsigned> Indices
,
510 unsigned AddressSpace
,
512 bool UseMaskForGaps
) {
513 if (UseMaskForCond
|| UseMaskForGaps
)
514 return BaseT::getInterleavedMemoryOpCost(Opcode
, VecTy
, Factor
, Indices
,
515 Alignment
, AddressSpace
,
516 UseMaskForCond
, UseMaskForGaps
);
518 assert(isa
<VectorType
>(VecTy
) &&
519 "Expect a vector type for interleaved memory op");
521 // Legalize the type.
522 std::pair
<int, MVT
> LT
= TLI
->getTypeLegalizationCost(DL
, VecTy
);
524 // Firstly, the cost of load/store operation.
525 int Cost
= getMemoryOpCost(Opcode
, VecTy
, Alignment
, AddressSpace
);
527 // PPC, for both Altivec/VSX and QPX, support cheap arbitrary permutations
528 // (at least in the sense that there need only be one non-loop-invariant
529 // instruction). For each result vector, we need one shuffle per incoming
530 // vector (except that the first shuffle can take two incoming vectors
531 // because it does not need to take itself).
532 Cost
+= Factor
*(LT
.first
-1);