[Alignment][NFC] Use Align with TargetLowering::setMinFunctionAlignment
[llvm-core.git] / lib / Target / AArch64 / AArch64TargetTransformInfo.h
blob95cda63b01744668e2ce5becb6d7151014916f9c
1 //===- AArch64TargetTransformInfo.h - AArch64 specific TTI ------*- C++ -*-===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 /// \file
9 /// This file a TargetTransformInfo::Concept conforming object specific to the
10 /// AArch64 target machine. It uses the target's detailed information to
11 /// provide more precise answers to certain TTI queries, while letting the
12 /// target independent and default TTI implementations handle the rest.
13 ///
14 //===----------------------------------------------------------------------===//
16 #ifndef LLVM_LIB_TARGET_AARCH64_AARCH64TARGETTRANSFORMINFO_H
17 #define LLVM_LIB_TARGET_AARCH64_AARCH64TARGETTRANSFORMINFO_H
19 #include "AArch64.h"
20 #include "AArch64Subtarget.h"
21 #include "AArch64TargetMachine.h"
22 #include "llvm/ADT/ArrayRef.h"
23 #include "llvm/Analysis/TargetTransformInfo.h"
24 #include "llvm/CodeGen/BasicTTIImpl.h"
25 #include "llvm/IR/Function.h"
26 #include "llvm/IR/Intrinsics.h"
27 #include <cstdint>
29 namespace llvm {
31 class APInt;
32 class Instruction;
33 class IntrinsicInst;
34 class Loop;
35 class SCEV;
36 class ScalarEvolution;
37 class Type;
38 class Value;
39 class VectorType;
41 class AArch64TTIImpl : public BasicTTIImplBase<AArch64TTIImpl> {
42 using BaseT = BasicTTIImplBase<AArch64TTIImpl>;
43 using TTI = TargetTransformInfo;
45 friend BaseT;
47 const AArch64Subtarget *ST;
48 const AArch64TargetLowering *TLI;
50 const AArch64Subtarget *getST() const { return ST; }
51 const AArch64TargetLowering *getTLI() const { return TLI; }
53 enum MemIntrinsicType {
54 VECTOR_LDST_TWO_ELEMENTS,
55 VECTOR_LDST_THREE_ELEMENTS,
56 VECTOR_LDST_FOUR_ELEMENTS
59 bool isWideningInstruction(Type *Ty, unsigned Opcode,
60 ArrayRef<const Value *> Args);
62 public:
63 explicit AArch64TTIImpl(const AArch64TargetMachine *TM, const Function &F)
64 : BaseT(TM, F.getParent()->getDataLayout()), ST(TM->getSubtargetImpl(F)),
65 TLI(ST->getTargetLowering()) {}
67 bool areInlineCompatible(const Function *Caller,
68 const Function *Callee) const;
70 /// \name Scalar TTI Implementations
71 /// @{
73 using BaseT::getIntImmCost;
74 int getIntImmCost(int64_t Val);
75 int getIntImmCost(const APInt &Imm, Type *Ty);
76 int getIntImmCost(unsigned Opcode, unsigned Idx, const APInt &Imm, Type *Ty);
77 int getIntImmCost(Intrinsic::ID IID, unsigned Idx, const APInt &Imm,
78 Type *Ty);
79 TTI::PopcntSupportKind getPopcntSupport(unsigned TyWidth);
81 /// @}
83 /// \name Vector TTI Implementations
84 /// @{
86 bool enableInterleavedAccessVectorization() { return true; }
88 unsigned getNumberOfRegisters(bool Vector) {
89 if (Vector) {
90 if (ST->hasNEON())
91 return 32;
92 return 0;
94 return 31;
97 unsigned getRegisterBitWidth(bool Vector) const {
98 if (Vector) {
99 if (ST->hasNEON())
100 return 128;
101 return 0;
103 return 64;
106 unsigned getMinVectorRegisterBitWidth() {
107 return ST->getMinVectorRegisterBitWidth();
110 unsigned getMaxInterleaveFactor(unsigned VF);
112 int getCastInstrCost(unsigned Opcode, Type *Dst, Type *Src,
113 const Instruction *I = nullptr);
115 int getExtractWithExtendCost(unsigned Opcode, Type *Dst, VectorType *VecTy,
116 unsigned Index);
118 int getVectorInstrCost(unsigned Opcode, Type *Val, unsigned Index);
120 int getArithmeticInstrCost(
121 unsigned Opcode, Type *Ty,
122 TTI::OperandValueKind Opd1Info = TTI::OK_AnyValue,
123 TTI::OperandValueKind Opd2Info = TTI::OK_AnyValue,
124 TTI::OperandValueProperties Opd1PropInfo = TTI::OP_None,
125 TTI::OperandValueProperties Opd2PropInfo = TTI::OP_None,
126 ArrayRef<const Value *> Args = ArrayRef<const Value *>());
128 int getAddressComputationCost(Type *Ty, ScalarEvolution *SE, const SCEV *Ptr);
130 int getCmpSelInstrCost(unsigned Opcode, Type *ValTy, Type *CondTy,
131 const Instruction *I = nullptr);
133 TTI::MemCmpExpansionOptions enableMemCmpExpansion(bool OptSize,
134 bool IsZeroCmp) const;
136 int getMemoryOpCost(unsigned Opcode, Type *Src, unsigned Alignment,
137 unsigned AddressSpace, const Instruction *I = nullptr);
139 int getCostOfKeepingLiveOverCall(ArrayRef<Type *> Tys);
141 void getUnrollingPreferences(Loop *L, ScalarEvolution &SE,
142 TTI::UnrollingPreferences &UP);
144 Value *getOrCreateResultFromMemIntrinsic(IntrinsicInst *Inst,
145 Type *ExpectedType);
147 bool getTgtMemIntrinsic(IntrinsicInst *Inst, MemIntrinsicInfo &Info);
149 int getInterleavedMemoryOpCost(unsigned Opcode, Type *VecTy, unsigned Factor,
150 ArrayRef<unsigned> Indices, unsigned Alignment,
151 unsigned AddressSpace,
152 bool UseMaskForCond = false,
153 bool UseMaskForGaps = false);
155 bool
156 shouldConsiderAddressTypePromotion(const Instruction &I,
157 bool &AllowPromotionWithoutCommonHeader);
159 unsigned getCacheLineSize();
161 unsigned getPrefetchDistance();
163 unsigned getMinPrefetchStride();
165 unsigned getMaxPrefetchIterationsAhead();
167 bool shouldExpandReduction(const IntrinsicInst *II) const {
168 return false;
171 unsigned getGISelRematGlobalCost() const {
172 return 2;
175 bool useReductionIntrinsic(unsigned Opcode, Type *Ty,
176 TTI::ReductionFlags Flags) const;
178 int getArithmeticReductionCost(unsigned Opcode, Type *Ty,
179 bool IsPairwiseForm);
181 int getShuffleCost(TTI::ShuffleKind Kind, Type *Tp, int Index, Type *SubTp);
182 /// @}
185 } // end namespace llvm
187 #endif // LLVM_LIB_TARGET_AARCH64_AARCH64TARGETTRANSFORMINFO_H