1 //===-- PPCTargetTransformInfo.cpp - PPC specific TTI ---------------------===//
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
7 //===----------------------------------------------------------------------===//
9 #include "PPCTargetTransformInfo.h"
10 #include "llvm/Analysis/CodeMetrics.h"
11 #include "llvm/Analysis/TargetTransformInfo.h"
12 #include "llvm/CodeGen/BasicTTIImpl.h"
13 #include "llvm/CodeGen/CostTable.h"
14 #include "llvm/CodeGen/TargetLowering.h"
15 #include "llvm/CodeGen/TargetSchedule.h"
16 #include "llvm/Support/CommandLine.h"
17 #include "llvm/Support/Debug.h"
20 #define DEBUG_TYPE "ppctti"
22 static cl::opt
<bool> DisablePPCConstHoist("disable-ppc-constant-hoisting",
23 cl::desc("disable constant hoisting on PPC"), cl::init(false), cl::Hidden
);
25 // This is currently only used for the data prefetch pass which is only enabled
26 // for BG/Q by default.
27 static cl::opt
<unsigned>
28 CacheLineSize("ppc-loop-prefetch-cache-line", cl::Hidden
, cl::init(64),
29 cl::desc("The loop prefetch cache line size"));
32 EnablePPCColdCC("ppc-enable-coldcc", cl::Hidden
, cl::init(false),
33 cl::desc("Enable using coldcc calling conv for cold "
34 "internal functions"));
36 // The latency of mtctr is only justified if there are more than 4
37 // comparisons that will be removed as a result.
38 static cl::opt
<unsigned>
39 SmallCTRLoopThreshold("min-ctr-loop-threshold", cl::init(4), cl::Hidden
,
40 cl::desc("Loops with a constant trip count smaller than "
41 "this value will not use the count register."));
43 //===----------------------------------------------------------------------===//
47 //===----------------------------------------------------------------------===//
49 TargetTransformInfo::PopcntSupportKind
50 PPCTTIImpl::getPopcntSupport(unsigned TyWidth
) {
51 assert(isPowerOf2_32(TyWidth
) && "Ty width must be power of 2");
52 if (ST
->hasPOPCNTD() != PPCSubtarget::POPCNTD_Unavailable
&& TyWidth
<= 64)
53 return ST
->hasPOPCNTD() == PPCSubtarget::POPCNTD_Slow
?
54 TTI::PSK_SlowHardware
: TTI::PSK_FastHardware
;
55 return TTI::PSK_Software
;
58 int PPCTTIImpl::getIntImmCost(const APInt
&Imm
, Type
*Ty
) {
59 if (DisablePPCConstHoist
)
60 return BaseT::getIntImmCost(Imm
, Ty
);
62 assert(Ty
->isIntegerTy());
64 unsigned BitSize
= Ty
->getPrimitiveSizeInBits();
71 if (Imm
.getBitWidth() <= 64) {
72 if (isInt
<16>(Imm
.getSExtValue()))
73 return TTI::TCC_Basic
;
75 if (isInt
<32>(Imm
.getSExtValue())) {
76 // A constant that can be materialized using lis.
77 if ((Imm
.getZExtValue() & 0xFFFF) == 0)
78 return TTI::TCC_Basic
;
80 return 2 * TTI::TCC_Basic
;
84 return 4 * TTI::TCC_Basic
;
87 int PPCTTIImpl::getIntImmCost(Intrinsic::ID IID
, unsigned Idx
, const APInt
&Imm
,
89 if (DisablePPCConstHoist
)
90 return BaseT::getIntImmCost(IID
, Idx
, Imm
, Ty
);
92 assert(Ty
->isIntegerTy());
94 unsigned BitSize
= Ty
->getPrimitiveSizeInBits();
100 return TTI::TCC_Free
;
101 case Intrinsic::sadd_with_overflow
:
102 case Intrinsic::uadd_with_overflow
:
103 case Intrinsic::ssub_with_overflow
:
104 case Intrinsic::usub_with_overflow
:
105 if ((Idx
== 1) && Imm
.getBitWidth() <= 64 && isInt
<16>(Imm
.getSExtValue()))
106 return TTI::TCC_Free
;
108 case Intrinsic::experimental_stackmap
:
109 if ((Idx
< 2) || (Imm
.getBitWidth() <= 64 && isInt
<64>(Imm
.getSExtValue())))
110 return TTI::TCC_Free
;
112 case Intrinsic::experimental_patchpoint_void
:
113 case Intrinsic::experimental_patchpoint_i64
:
114 if ((Idx
< 4) || (Imm
.getBitWidth() <= 64 && isInt
<64>(Imm
.getSExtValue())))
115 return TTI::TCC_Free
;
118 return PPCTTIImpl::getIntImmCost(Imm
, Ty
);
121 int PPCTTIImpl::getIntImmCost(unsigned Opcode
, unsigned Idx
, const APInt
&Imm
,
123 if (DisablePPCConstHoist
)
124 return BaseT::getIntImmCost(Opcode
, Idx
, Imm
, Ty
);
126 assert(Ty
->isIntegerTy());
128 unsigned BitSize
= Ty
->getPrimitiveSizeInBits();
132 unsigned ImmIdx
= ~0U;
133 bool ShiftedFree
= false, RunFree
= false, UnsignedFree
= false,
137 return TTI::TCC_Free
;
138 case Instruction::GetElementPtr
:
139 // Always hoist the base address of a GetElementPtr. This prevents the
140 // creation of new constants for every base constant that gets constant
141 // folded with the offset.
143 return 2 * TTI::TCC_Basic
;
144 return TTI::TCC_Free
;
145 case Instruction::And
:
146 RunFree
= true; // (for the rotate-and-mask instructions)
148 case Instruction::Add
:
149 case Instruction::Or
:
150 case Instruction::Xor
:
153 case Instruction::Sub
:
154 case Instruction::Mul
:
155 case Instruction::Shl
:
156 case Instruction::LShr
:
157 case Instruction::AShr
:
160 case Instruction::ICmp
:
163 // Zero comparisons can use record-form instructions.
165 case Instruction::Select
:
168 case Instruction::PHI
:
169 case Instruction::Call
:
170 case Instruction::Ret
:
171 case Instruction::Load
:
172 case Instruction::Store
:
176 if (ZeroFree
&& Imm
== 0)
177 return TTI::TCC_Free
;
179 if (Idx
== ImmIdx
&& Imm
.getBitWidth() <= 64) {
180 if (isInt
<16>(Imm
.getSExtValue()))
181 return TTI::TCC_Free
;
184 if (Imm
.getBitWidth() <= 32 &&
185 (isShiftedMask_32(Imm
.getZExtValue()) ||
186 isShiftedMask_32(~Imm
.getZExtValue())))
187 return TTI::TCC_Free
;
190 (isShiftedMask_64(Imm
.getZExtValue()) ||
191 isShiftedMask_64(~Imm
.getZExtValue())))
192 return TTI::TCC_Free
;
195 if (UnsignedFree
&& isUInt
<16>(Imm
.getZExtValue()))
196 return TTI::TCC_Free
;
198 if (ShiftedFree
&& (Imm
.getZExtValue() & 0xFFFF) == 0)
199 return TTI::TCC_Free
;
202 return PPCTTIImpl::getIntImmCost(Imm
, Ty
);
205 unsigned PPCTTIImpl::getUserCost(const User
*U
,
206 ArrayRef
<const Value
*> Operands
) {
207 if (U
->getType()->isVectorTy()) {
208 // Instructions that need to be split should cost more.
209 std::pair
<int, MVT
> LT
= TLI
->getTypeLegalizationCost(DL
, U
->getType());
210 return LT
.first
* BaseT::getUserCost(U
, Operands
);
213 return BaseT::getUserCost(U
, Operands
);
216 bool PPCTTIImpl::mightUseCTR(BasicBlock
*BB
,
217 TargetLibraryInfo
*LibInfo
) {
218 const PPCTargetMachine
&TM
= ST
->getTargetMachine();
220 // Loop through the inline asm constraints and look for something that
222 auto asmClobbersCTR
= [](InlineAsm
*IA
) {
223 InlineAsm::ConstraintInfoVector CIV
= IA
->ParseConstraints();
224 for (unsigned i
= 0, ie
= CIV
.size(); i
< ie
; ++i
) {
225 InlineAsm::ConstraintInfo
&C
= CIV
[i
];
226 if (C
.Type
!= InlineAsm::isInput
)
227 for (unsigned j
= 0, je
= C
.Codes
.size(); j
< je
; ++j
)
228 if (StringRef(C
.Codes
[j
]).equals_lower("{ctr}"))
234 // Determining the address of a TLS variable results in a function call in
235 // certain TLS models.
236 std::function
<bool(const Value
*)> memAddrUsesCTR
=
237 [&memAddrUsesCTR
, &TM
](const Value
*MemAddr
) -> bool {
238 const auto *GV
= dyn_cast
<GlobalValue
>(MemAddr
);
240 // Recurse to check for constants that refer to TLS global variables.
241 if (const auto *CV
= dyn_cast
<Constant
>(MemAddr
))
242 for (const auto &CO
: CV
->operands())
243 if (memAddrUsesCTR(CO
))
249 if (!GV
->isThreadLocal())
251 TLSModel::Model Model
= TM
.getTLSModel(GV
);
252 return Model
== TLSModel::GeneralDynamic
||
253 Model
== TLSModel::LocalDynamic
;
256 auto isLargeIntegerTy
= [](bool Is32Bit
, Type
*Ty
) {
257 if (IntegerType
*ITy
= dyn_cast
<IntegerType
>(Ty
))
258 return ITy
->getBitWidth() > (Is32Bit
? 32U : 64U);
263 for (BasicBlock::iterator J
= BB
->begin(), JE
= BB
->end();
265 if (CallInst
*CI
= dyn_cast
<CallInst
>(J
)) {
266 // Inline ASM is okay, unless it clobbers the ctr register.
267 if (InlineAsm
*IA
= dyn_cast
<InlineAsm
>(CI
->getCalledValue())) {
268 if (asmClobbersCTR(IA
))
273 if (Function
*F
= CI
->getCalledFunction()) {
274 // Most intrinsics don't become function calls, but some might.
275 // sin, cos, exp and log are always calls.
277 if (F
->getIntrinsicID() != Intrinsic::not_intrinsic
) {
278 switch (F
->getIntrinsicID()) {
280 // If we have a call to ppc_is_decremented_ctr_nonzero, or ppc_mtctr
281 // we're definitely using CTR.
282 case Intrinsic::set_loop_iterations
:
283 case Intrinsic::loop_decrement
:
286 // VisualStudio defines setjmp as _setjmp
287 #if defined(_MSC_VER) && defined(setjmp) && \
288 !defined(setjmp_undefined_for_msvc)
289 # pragma push_macro("setjmp")
291 # define setjmp_undefined_for_msvc
294 case Intrinsic::setjmp
:
296 #if defined(_MSC_VER) && defined(setjmp_undefined_for_msvc)
297 // let's return it to _setjmp state
298 # pragma pop_macro("setjmp")
299 # undef setjmp_undefined_for_msvc
302 case Intrinsic::longjmp
:
304 // Exclude eh_sjlj_setjmp; we don't need to exclude eh_sjlj_longjmp
305 // because, although it does clobber the counter register, the
306 // control can't then return to inside the loop unless there is also
307 // an eh_sjlj_setjmp.
308 case Intrinsic::eh_sjlj_setjmp
:
310 case Intrinsic::memcpy
:
311 case Intrinsic::memmove
:
312 case Intrinsic::memset
:
313 case Intrinsic::powi
:
315 case Intrinsic::log2
:
316 case Intrinsic::log10
:
318 case Intrinsic::exp2
:
323 case Intrinsic::copysign
:
324 if (CI
->getArgOperand(0)->getType()->getScalarType()->
328 continue; // ISD::FCOPYSIGN is never a library call.
329 case Intrinsic::sqrt
: Opcode
= ISD::FSQRT
; break;
330 case Intrinsic::floor
: Opcode
= ISD::FFLOOR
; break;
331 case Intrinsic::ceil
: Opcode
= ISD::FCEIL
; break;
332 case Intrinsic::trunc
: Opcode
= ISD::FTRUNC
; break;
333 case Intrinsic::rint
: Opcode
= ISD::FRINT
; break;
334 case Intrinsic::nearbyint
: Opcode
= ISD::FNEARBYINT
; break;
335 case Intrinsic::round
: Opcode
= ISD::FROUND
; break;
336 case Intrinsic::minnum
: Opcode
= ISD::FMINNUM
; break;
337 case Intrinsic::maxnum
: Opcode
= ISD::FMAXNUM
; break;
338 case Intrinsic::umul_with_overflow
: Opcode
= ISD::UMULO
; break;
339 case Intrinsic::smul_with_overflow
: Opcode
= ISD::SMULO
; break;
343 // PowerPC does not use [US]DIVREM or other library calls for
344 // operations on regular types which are not otherwise library calls
345 // (i.e. soft float or atomics). If adapting for targets that do,
346 // additional care is required here.
349 if (!F
->hasLocalLinkage() && F
->hasName() && LibInfo
&&
350 LibInfo
->getLibFunc(F
->getName(), Func
) &&
351 LibInfo
->hasOptimizedCodeGen(Func
)) {
352 // Non-read-only functions are never treated as intrinsics.
353 if (!CI
->onlyReadsMemory())
356 // Conversion happens only for FP calls.
357 if (!CI
->getArgOperand(0)->getType()->isFloatingPointTy())
361 default: return true;
362 case LibFunc_copysign
:
363 case LibFunc_copysignf
:
364 continue; // ISD::FCOPYSIGN is never a library call.
365 case LibFunc_copysignl
:
370 continue; // ISD::FABS is never a library call.
374 Opcode
= ISD::FSQRT
; break;
378 Opcode
= ISD::FFLOOR
; break;
379 case LibFunc_nearbyint
:
380 case LibFunc_nearbyintf
:
381 case LibFunc_nearbyintl
:
382 Opcode
= ISD::FNEARBYINT
; break;
386 Opcode
= ISD::FCEIL
; break;
390 Opcode
= ISD::FRINT
; break;
394 Opcode
= ISD::FROUND
; break;
398 Opcode
= ISD::FTRUNC
; break;
402 Opcode
= ISD::FMINNUM
; break;
406 Opcode
= ISD::FMAXNUM
; break;
412 TLI
->getValueType(DL
, CI
->getArgOperand(0)->getType(), true);
414 if (EVTy
== MVT::Other
)
417 if (TLI
->isOperationLegalOrCustom(Opcode
, EVTy
))
419 else if (EVTy
.isVector() &&
420 TLI
->isOperationLegalOrCustom(Opcode
, EVTy
.getScalarType()))
428 } else if (isa
<BinaryOperator
>(J
) &&
429 J
->getType()->getScalarType()->isPPC_FP128Ty()) {
430 // Most operations on ppc_f128 values become calls.
432 } else if (isa
<UIToFPInst
>(J
) || isa
<SIToFPInst
>(J
) ||
433 isa
<FPToUIInst
>(J
) || isa
<FPToSIInst
>(J
)) {
434 CastInst
*CI
= cast
<CastInst
>(J
);
435 if (CI
->getSrcTy()->getScalarType()->isPPC_FP128Ty() ||
436 CI
->getDestTy()->getScalarType()->isPPC_FP128Ty() ||
437 isLargeIntegerTy(!TM
.isPPC64(), CI
->getSrcTy()->getScalarType()) ||
438 isLargeIntegerTy(!TM
.isPPC64(), CI
->getDestTy()->getScalarType()))
440 } else if (isLargeIntegerTy(!TM
.isPPC64(),
441 J
->getType()->getScalarType()) &&
442 (J
->getOpcode() == Instruction::UDiv
||
443 J
->getOpcode() == Instruction::SDiv
||
444 J
->getOpcode() == Instruction::URem
||
445 J
->getOpcode() == Instruction::SRem
)) {
447 } else if (!TM
.isPPC64() &&
448 isLargeIntegerTy(false, J
->getType()->getScalarType()) &&
449 (J
->getOpcode() == Instruction::Shl
||
450 J
->getOpcode() == Instruction::AShr
||
451 J
->getOpcode() == Instruction::LShr
)) {
452 // Only on PPC32, for 128-bit integers (specifically not 64-bit
453 // integers), these might be runtime calls.
455 } else if (isa
<IndirectBrInst
>(J
) || isa
<InvokeInst
>(J
)) {
456 // On PowerPC, indirect jumps use the counter register.
458 } else if (SwitchInst
*SI
= dyn_cast
<SwitchInst
>(J
)) {
459 if (SI
->getNumCases() + 1 >= (unsigned)TLI
->getMinimumJumpTableEntries())
463 // FREM is always a call.
464 if (J
->getOpcode() == Instruction::FRem
)
467 if (ST
->useSoftFloat()) {
468 switch(J
->getOpcode()) {
469 case Instruction::FAdd
:
470 case Instruction::FSub
:
471 case Instruction::FMul
:
472 case Instruction::FDiv
:
473 case Instruction::FPTrunc
:
474 case Instruction::FPExt
:
475 case Instruction::FPToUI
:
476 case Instruction::FPToSI
:
477 case Instruction::UIToFP
:
478 case Instruction::SIToFP
:
479 case Instruction::FCmp
:
484 for (Value
*Operand
: J
->operands())
485 if (memAddrUsesCTR(Operand
))
492 bool PPCTTIImpl::isHardwareLoopProfitable(Loop
*L
, ScalarEvolution
&SE
,
494 TargetLibraryInfo
*LibInfo
,
495 HardwareLoopInfo
&HWLoopInfo
) {
496 const PPCTargetMachine
&TM
= ST
->getTargetMachine();
497 TargetSchedModel SchedModel
;
500 // Do not convert small short loops to CTR loop.
501 unsigned ConstTripCount
= SE
.getSmallConstantTripCount(L
);
502 if (ConstTripCount
&& ConstTripCount
< SmallCTRLoopThreshold
) {
503 SmallPtrSet
<const Value
*, 32> EphValues
;
504 CodeMetrics::collectEphemeralValues(L
, &AC
, EphValues
);
506 for (BasicBlock
*BB
: L
->blocks())
507 Metrics
.analyzeBasicBlock(BB
, *this, EphValues
);
508 // 6 is an approximate latency for the mtctr instruction.
509 if (Metrics
.NumInsts
<= (6 * SchedModel
.getIssueWidth()))
513 // We don't want to spill/restore the counter register, and so we don't
514 // want to use the counter register if the loop contains calls.
515 for (Loop::block_iterator I
= L
->block_begin(), IE
= L
->block_end();
517 if (mightUseCTR(*I
, LibInfo
))
520 SmallVector
<BasicBlock
*, 4> ExitingBlocks
;
521 L
->getExitingBlocks(ExitingBlocks
);
523 // If there is an exit edge known to be frequently taken,
524 // we should not transform this loop.
525 for (auto &BB
: ExitingBlocks
) {
526 Instruction
*TI
= BB
->getTerminator();
529 if (BranchInst
*BI
= dyn_cast
<BranchInst
>(TI
)) {
530 uint64_t TrueWeight
= 0, FalseWeight
= 0;
531 if (!BI
->isConditional() ||
532 !BI
->extractProfMetadata(TrueWeight
, FalseWeight
))
535 // If the exit path is more frequent than the loop path,
536 // we return here without further analysis for this loop.
537 bool TrueIsExit
= !L
->contains(BI
->getSuccessor(0));
538 if (( TrueIsExit
&& FalseWeight
< TrueWeight
) ||
539 (!TrueIsExit
&& FalseWeight
> TrueWeight
))
544 LLVMContext
&C
= L
->getHeader()->getContext();
545 HWLoopInfo
.CountType
= TM
.isPPC64() ?
546 Type::getInt64Ty(C
) : Type::getInt32Ty(C
);
547 HWLoopInfo
.LoopDecrement
= ConstantInt::get(HWLoopInfo
.CountType
, 1);
551 void PPCTTIImpl::getUnrollingPreferences(Loop
*L
, ScalarEvolution
&SE
,
552 TTI::UnrollingPreferences
&UP
) {
553 if (ST
->getDarwinDirective() == PPC::DIR_A2
) {
554 // The A2 is in-order with a deep pipeline, and concatenation unrolling
555 // helps expose latency-hiding opportunities to the instruction scheduler.
556 UP
.Partial
= UP
.Runtime
= true;
558 // We unroll a lot on the A2 (hundreds of instructions), and the benefits
559 // often outweigh the cost of a division to compute the trip count.
560 UP
.AllowExpensiveTripCount
= true;
563 BaseT::getUnrollingPreferences(L
, SE
, UP
);
566 // This function returns true to allow using coldcc calling convention.
567 // Returning true results in coldcc being used for functions which are cold at
568 // all call sites when the callers of the functions are not calling any other
569 // non coldcc functions.
570 bool PPCTTIImpl::useColdCCForColdCall(Function
&F
) {
571 return EnablePPCColdCC
;
574 bool PPCTTIImpl::enableAggressiveInterleaving(bool LoopHasReductions
) {
575 // On the A2, always unroll aggressively. For QPX unaligned loads, we depend
576 // on combining the loads generated for consecutive accesses, and failure to
577 // do so is particularly expensive. This makes it much more likely (compared
578 // to only using concatenation unrolling).
579 if (ST
->getDarwinDirective() == PPC::DIR_A2
)
582 return LoopHasReductions
;
585 PPCTTIImpl::TTI::MemCmpExpansionOptions
586 PPCTTIImpl::enableMemCmpExpansion(bool OptSize
, bool IsZeroCmp
) const {
587 TTI::MemCmpExpansionOptions Options
;
588 Options
.LoadSizes
= {8, 4, 2, 1};
589 Options
.MaxNumLoads
= TLI
->getMaxExpandSizeMemcmp(OptSize
);
593 bool PPCTTIImpl::enableInterleavedAccessVectorization() {
597 unsigned PPCTTIImpl::getNumberOfRegisters(bool Vector
) {
598 if (Vector
&& !ST
->hasAltivec() && !ST
->hasQPX())
600 return ST
->hasVSX() ? 64 : 32;
603 unsigned PPCTTIImpl::getRegisterBitWidth(bool Vector
) const {
605 if (ST
->hasQPX()) return 256;
606 if (ST
->hasAltivec()) return 128;
616 unsigned PPCTTIImpl::getCacheLineSize() {
617 // Check first if the user specified a custom line size.
618 if (CacheLineSize
.getNumOccurrences() > 0)
619 return CacheLineSize
;
621 // On P7, P8 or P9 we have a cache line size of 128.
622 unsigned Directive
= ST
->getDarwinDirective();
623 if (Directive
== PPC::DIR_PWR7
|| Directive
== PPC::DIR_PWR8
||
624 Directive
== PPC::DIR_PWR9
)
627 // On other processors return a default of 64 bytes.
631 unsigned PPCTTIImpl::getPrefetchDistance() {
632 // This seems like a reasonable default for the BG/Q (this pass is enabled, by
633 // default, only on the BG/Q).
637 unsigned PPCTTIImpl::getMaxInterleaveFactor(unsigned VF
) {
638 unsigned Directive
= ST
->getDarwinDirective();
639 // The 440 has no SIMD support, but floating-point instructions
640 // have a 5-cycle latency, so unroll by 5x for latency hiding.
641 if (Directive
== PPC::DIR_440
)
644 // The A2 has no SIMD support, but floating-point instructions
645 // have a 6-cycle latency, so unroll by 6x for latency hiding.
646 if (Directive
== PPC::DIR_A2
)
649 // FIXME: For lack of any better information, do no harm...
650 if (Directive
== PPC::DIR_E500mc
|| Directive
== PPC::DIR_E5500
)
653 // For P7 and P8, floating-point instructions have a 6-cycle latency and
654 // there are two execution units, so unroll by 12x for latency hiding.
655 // FIXME: the same for P9 as previous gen until POWER9 scheduling is ready
656 if (Directive
== PPC::DIR_PWR7
|| Directive
== PPC::DIR_PWR8
||
657 Directive
== PPC::DIR_PWR9
)
660 // For most things, modern systems have two execution units (and
661 // out-of-order execution).
665 // Adjust the cost of vector instructions on targets which there is overlap
666 // between the vector and scalar units, thereby reducing the overall throughput
667 // of vector code wrt. scalar code.
668 int PPCTTIImpl::vectorCostAdjustment(int Cost
, unsigned Opcode
, Type
*Ty1
,
670 if (!ST
->vectorsUseTwoUnits() || !Ty1
->isVectorTy())
673 std::pair
<int, MVT
> LT1
= TLI
->getTypeLegalizationCost(DL
, Ty1
);
674 // If type legalization involves splitting the vector, we don't want to
675 // double the cost at every step - only the last step.
676 if (LT1
.first
!= 1 || !LT1
.second
.isVector())
679 int ISD
= TLI
->InstructionOpcodeToISD(Opcode
);
680 if (TLI
->isOperationExpand(ISD
, LT1
.second
))
684 std::pair
<int, MVT
> LT2
= TLI
->getTypeLegalizationCost(DL
, Ty2
);
685 if (LT2
.first
!= 1 || !LT2
.second
.isVector())
692 int PPCTTIImpl::getArithmeticInstrCost(
693 unsigned Opcode
, Type
*Ty
, TTI::OperandValueKind Op1Info
,
694 TTI::OperandValueKind Op2Info
, TTI::OperandValueProperties Opd1PropInfo
,
695 TTI::OperandValueProperties Opd2PropInfo
, ArrayRef
<const Value
*> Args
) {
696 assert(TLI
->InstructionOpcodeToISD(Opcode
) && "Invalid opcode");
698 // Fallback to the default implementation.
699 int Cost
= BaseT::getArithmeticInstrCost(Opcode
, Ty
, Op1Info
, Op2Info
,
700 Opd1PropInfo
, Opd2PropInfo
);
701 return vectorCostAdjustment(Cost
, Opcode
, Ty
, nullptr);
704 int PPCTTIImpl::getShuffleCost(TTI::ShuffleKind Kind
, Type
*Tp
, int Index
,
706 // Legalize the type.
707 std::pair
<int, MVT
> LT
= TLI
->getTypeLegalizationCost(DL
, Tp
);
709 // PPC, for both Altivec/VSX and QPX, support cheap arbitrary permutations
710 // (at least in the sense that there need only be one non-loop-invariant
711 // instruction). We need one such shuffle instruction for each actual
712 // register (this is not true for arbitrary shuffles, but is true for the
713 // structured types of shuffles covered by TTI::ShuffleKind).
714 return vectorCostAdjustment(LT
.first
, Instruction::ShuffleVector
, Tp
,
718 int PPCTTIImpl::getCastInstrCost(unsigned Opcode
, Type
*Dst
, Type
*Src
,
719 const Instruction
*I
) {
720 assert(TLI
->InstructionOpcodeToISD(Opcode
) && "Invalid opcode");
722 int Cost
= BaseT::getCastInstrCost(Opcode
, Dst
, Src
);
723 return vectorCostAdjustment(Cost
, Opcode
, Dst
, Src
);
726 int PPCTTIImpl::getCmpSelInstrCost(unsigned Opcode
, Type
*ValTy
, Type
*CondTy
,
727 const Instruction
*I
) {
728 int Cost
= BaseT::getCmpSelInstrCost(Opcode
, ValTy
, CondTy
, I
);
729 return vectorCostAdjustment(Cost
, Opcode
, ValTy
, nullptr);
732 int PPCTTIImpl::getVectorInstrCost(unsigned Opcode
, Type
*Val
, unsigned Index
) {
733 assert(Val
->isVectorTy() && "This must be a vector type");
735 int ISD
= TLI
->InstructionOpcodeToISD(Opcode
);
736 assert(ISD
&& "Invalid opcode");
738 int Cost
= BaseT::getVectorInstrCost(Opcode
, Val
, Index
);
739 Cost
= vectorCostAdjustment(Cost
, Opcode
, Val
, nullptr);
741 if (ST
->hasVSX() && Val
->getScalarType()->isDoubleTy()) {
742 // Double-precision scalars are already located in index #0 (or #1 if LE).
743 if (ISD
== ISD::EXTRACT_VECTOR_ELT
&&
744 Index
== (ST
->isLittleEndian() ? 1 : 0))
749 } else if (ST
->hasQPX() && Val
->getScalarType()->isFloatingPointTy()) {
750 // Floating point scalars are already located in index #0.
756 } else if (Val
->getScalarType()->isIntegerTy() && Index
!= -1U) {
757 if (ST
->hasP9Altivec()) {
758 if (ISD
== ISD::INSERT_VECTOR_ELT
)
759 // A move-to VSR and a permute/insert. Assume vector operation cost
760 // for both (cost will be 2x on P9).
761 return vectorCostAdjustment(2, Opcode
, Val
, nullptr);
763 // It's an extract. Maybe we can do a cheap move-from VSR.
764 unsigned EltSize
= Val
->getScalarSizeInBits();
766 unsigned MfvsrdIndex
= ST
->isLittleEndian() ? 1 : 0;
767 if (Index
== MfvsrdIndex
)
769 } else if (EltSize
== 32) {
770 unsigned MfvsrwzIndex
= ST
->isLittleEndian() ? 2 : 1;
771 if (Index
== MfvsrwzIndex
)
775 // We need a vector extract (or mfvsrld). Assume vector operation cost.
776 // The cost of the load constant for a vector extract is disregarded
777 // (invariant, easily schedulable).
778 return vectorCostAdjustment(1, Opcode
, Val
, nullptr);
780 } else if (ST
->hasDirectMove())
781 // Assume permute has standard cost.
782 // Assume move-to/move-from VSR have 2x standard cost.
786 // Estimated cost of a load-hit-store delay. This was obtained
787 // experimentally as a minimum needed to prevent unprofitable
788 // vectorization for the paq8p benchmark. It may need to be
789 // raised further if other unprofitable cases remain.
790 unsigned LHSPenalty
= 2;
791 if (ISD
== ISD::INSERT_VECTOR_ELT
)
794 // Vector element insert/extract with Altivec is very expensive,
795 // because they require store and reload with the attendant
796 // processor stall for load-hit-store. Until VSX is available,
797 // these need to be estimated as very costly.
798 if (ISD
== ISD::EXTRACT_VECTOR_ELT
||
799 ISD
== ISD::INSERT_VECTOR_ELT
)
800 return LHSPenalty
+ Cost
;
805 int PPCTTIImpl::getMemoryOpCost(unsigned Opcode
, Type
*Src
, unsigned Alignment
,
806 unsigned AddressSpace
, const Instruction
*I
) {
807 // Legalize the type.
808 std::pair
<int, MVT
> LT
= TLI
->getTypeLegalizationCost(DL
, Src
);
809 assert((Opcode
== Instruction::Load
|| Opcode
== Instruction::Store
) &&
812 int Cost
= BaseT::getMemoryOpCost(Opcode
, Src
, Alignment
, AddressSpace
);
813 Cost
= vectorCostAdjustment(Cost
, Opcode
, Src
, nullptr);
815 bool IsAltivecType
= ST
->hasAltivec() &&
816 (LT
.second
== MVT::v16i8
|| LT
.second
== MVT::v8i16
||
817 LT
.second
== MVT::v4i32
|| LT
.second
== MVT::v4f32
);
818 bool IsVSXType
= ST
->hasVSX() &&
819 (LT
.second
== MVT::v2f64
|| LT
.second
== MVT::v2i64
);
820 bool IsQPXType
= ST
->hasQPX() &&
821 (LT
.second
== MVT::v4f64
|| LT
.second
== MVT::v4f32
);
823 // VSX has 32b/64b load instructions. Legalization can handle loading of
824 // 32b/64b to VSR correctly and cheaply. But BaseT::getMemoryOpCost and
825 // PPCTargetLowering can't compute the cost appropriately. So here we
826 // explicitly check this case.
827 unsigned MemBytes
= Src
->getPrimitiveSizeInBits();
828 if (Opcode
== Instruction::Load
&& ST
->hasVSX() && IsAltivecType
&&
829 (MemBytes
== 64 || (ST
->hasP8Vector() && MemBytes
== 32)))
832 // Aligned loads and stores are easy.
833 unsigned SrcBytes
= LT
.second
.getStoreSize();
834 if (!SrcBytes
|| !Alignment
|| Alignment
>= SrcBytes
)
837 // If we can use the permutation-based load sequence, then this is also
838 // relatively cheap (not counting loop-invariant instructions): one load plus
839 // one permute (the last load in a series has extra cost, but we're
840 // neglecting that here). Note that on the P7, we could do unaligned loads
841 // for Altivec types using the VSX instructions, but that's more expensive
842 // than using the permutation-based load sequence. On the P8, that's no
844 if (Opcode
== Instruction::Load
&&
845 ((!ST
->hasP8Vector() && IsAltivecType
) || IsQPXType
) &&
846 Alignment
>= LT
.second
.getScalarType().getStoreSize())
847 return Cost
+ LT
.first
; // Add the cost of the permutations.
849 // For VSX, we can do unaligned loads and stores on Altivec/VSX types. On the
850 // P7, unaligned vector loads are more expensive than the permutation-based
851 // load sequence, so that might be used instead, but regardless, the net cost
852 // is about the same (not counting loop-invariant instructions).
853 if (IsVSXType
|| (ST
->hasVSX() && IsAltivecType
))
856 // Newer PPC supports unaligned memory access.
857 if (TLI
->allowsMisalignedMemoryAccesses(LT
.second
, 0))
860 // PPC in general does not support unaligned loads and stores. They'll need
861 // to be decomposed based on the alignment factor.
863 // Add the cost of each scalar load or store.
864 Cost
+= LT
.first
*(SrcBytes
/Alignment
-1);
866 // For a vector type, there is also scalarization overhead (only for
867 // stores, loads are expanded using the vector-load + permutation sequence,
868 // which is much less expensive).
869 if (Src
->isVectorTy() && Opcode
== Instruction::Store
)
870 for (int i
= 0, e
= Src
->getVectorNumElements(); i
< e
; ++i
)
871 Cost
+= getVectorInstrCost(Instruction::ExtractElement
, Src
, i
);
876 int PPCTTIImpl::getInterleavedMemoryOpCost(unsigned Opcode
, Type
*VecTy
,
878 ArrayRef
<unsigned> Indices
,
880 unsigned AddressSpace
,
882 bool UseMaskForGaps
) {
883 if (UseMaskForCond
|| UseMaskForGaps
)
884 return BaseT::getInterleavedMemoryOpCost(Opcode
, VecTy
, Factor
, Indices
,
885 Alignment
, AddressSpace
,
886 UseMaskForCond
, UseMaskForGaps
);
888 assert(isa
<VectorType
>(VecTy
) &&
889 "Expect a vector type for interleaved memory op");
891 // Legalize the type.
892 std::pair
<int, MVT
> LT
= TLI
->getTypeLegalizationCost(DL
, VecTy
);
894 // Firstly, the cost of load/store operation.
895 int Cost
= getMemoryOpCost(Opcode
, VecTy
, Alignment
, AddressSpace
);
897 // PPC, for both Altivec/VSX and QPX, support cheap arbitrary permutations
898 // (at least in the sense that there need only be one non-loop-invariant
899 // instruction). For each result vector, we need one shuffle per incoming
900 // vector (except that the first shuffle can take two incoming vectors
901 // because it does not need to take itself).
902 Cost
+= Factor
*(LT
.first
-1);
907 bool PPCTTIImpl::canSaveCmp(Loop
*L
, BranchInst
**BI
, ScalarEvolution
*SE
,
908 LoopInfo
*LI
, DominatorTree
*DT
,
909 AssumptionCache
*AC
, TargetLibraryInfo
*LibInfo
) {
910 // Process nested loops first.
911 for (Loop::iterator I
= L
->begin(), E
= L
->end(); I
!= E
; ++I
)
912 if (canSaveCmp(*I
, BI
, SE
, LI
, DT
, AC
, LibInfo
))
913 return false; // Stop search.
915 HardwareLoopInfo
HWLoopInfo(L
);
917 if (!HWLoopInfo
.canAnalyze(*LI
))
920 if (!isHardwareLoopProfitable(L
, *SE
, *AC
, LibInfo
, HWLoopInfo
))
923 if (!HWLoopInfo
.isHardwareLoopCandidate(*SE
, *LI
, *DT
))
926 *BI
= HWLoopInfo
.ExitBranch
;