[InstCombine] Signed saturation tests. NFC
[llvm-complete.git] / lib / Target / AArch64 / AArch64TargetTransformInfo.h
blob32c59f41e1c39d4d528cc5a53b356e6098c778e8
1 //===- AArch64TargetTransformInfo.h - AArch64 specific TTI ------*- C++ -*-===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 /// \file
9 /// This file a TargetTransformInfo::Concept conforming object specific to the
10 /// AArch64 target machine. It uses the target's detailed information to
11 /// provide more precise answers to certain TTI queries, while letting the
12 /// target independent and default TTI implementations handle the rest.
13 ///
14 //===----------------------------------------------------------------------===//
16 #ifndef LLVM_LIB_TARGET_AARCH64_AARCH64TARGETTRANSFORMINFO_H
17 #define LLVM_LIB_TARGET_AARCH64_AARCH64TARGETTRANSFORMINFO_H
19 #include "AArch64.h"
20 #include "AArch64Subtarget.h"
21 #include "AArch64TargetMachine.h"
22 #include "llvm/ADT/ArrayRef.h"
23 #include "llvm/Analysis/TargetTransformInfo.h"
24 #include "llvm/CodeGen/BasicTTIImpl.h"
25 #include "llvm/IR/Function.h"
26 #include "llvm/IR/Intrinsics.h"
27 #include <cstdint>
29 namespace llvm {
31 class APInt;
32 class Instruction;
33 class IntrinsicInst;
34 class Loop;
35 class SCEV;
36 class ScalarEvolution;
37 class Type;
38 class Value;
39 class VectorType;
41 class AArch64TTIImpl : public BasicTTIImplBase<AArch64TTIImpl> {
42 using BaseT = BasicTTIImplBase<AArch64TTIImpl>;
43 using TTI = TargetTransformInfo;
45 friend BaseT;
47 const AArch64Subtarget *ST;
48 const AArch64TargetLowering *TLI;
50 const AArch64Subtarget *getST() const { return ST; }
51 const AArch64TargetLowering *getTLI() const { return TLI; }
53 enum MemIntrinsicType {
54 VECTOR_LDST_TWO_ELEMENTS,
55 VECTOR_LDST_THREE_ELEMENTS,
56 VECTOR_LDST_FOUR_ELEMENTS
59 bool isWideningInstruction(Type *Ty, unsigned Opcode,
60 ArrayRef<const Value *> Args);
62 public:
63 explicit AArch64TTIImpl(const AArch64TargetMachine *TM, const Function &F)
64 : BaseT(TM, F.getParent()->getDataLayout()), ST(TM->getSubtargetImpl(F)),
65 TLI(ST->getTargetLowering()) {}
67 bool areInlineCompatible(const Function *Caller,
68 const Function *Callee) const;
70 /// \name Scalar TTI Implementations
71 /// @{
73 using BaseT::getIntImmCost;
74 int getIntImmCost(int64_t Val);
75 int getIntImmCost(const APInt &Imm, Type *Ty);
76 int getIntImmCost(unsigned Opcode, unsigned Idx, const APInt &Imm, Type *Ty);
77 int getIntImmCost(Intrinsic::ID IID, unsigned Idx, const APInt &Imm,
78 Type *Ty);
79 TTI::PopcntSupportKind getPopcntSupport(unsigned TyWidth);
81 /// @}
83 /// \name Vector TTI Implementations
84 /// @{
86 bool enableInterleavedAccessVectorization() { return true; }
88 unsigned getNumberOfRegisters(unsigned ClassID) const {
89 bool Vector = (ClassID == 1);
90 if (Vector) {
91 if (ST->hasNEON())
92 return 32;
93 return 0;
95 return 31;
98 unsigned getRegisterBitWidth(bool Vector) const {
99 if (Vector) {
100 if (ST->hasNEON())
101 return 128;
102 return 0;
104 return 64;
107 unsigned getMinVectorRegisterBitWidth() {
108 return ST->getMinVectorRegisterBitWidth();
111 unsigned getMaxInterleaveFactor(unsigned VF);
113 int getCastInstrCost(unsigned Opcode, Type *Dst, Type *Src,
114 const Instruction *I = nullptr);
116 int getExtractWithExtendCost(unsigned Opcode, Type *Dst, VectorType *VecTy,
117 unsigned Index);
119 int getVectorInstrCost(unsigned Opcode, Type *Val, unsigned Index);
121 int getArithmeticInstrCost(
122 unsigned Opcode, Type *Ty,
123 TTI::OperandValueKind Opd1Info = TTI::OK_AnyValue,
124 TTI::OperandValueKind Opd2Info = TTI::OK_AnyValue,
125 TTI::OperandValueProperties Opd1PropInfo = TTI::OP_None,
126 TTI::OperandValueProperties Opd2PropInfo = TTI::OP_None,
127 ArrayRef<const Value *> Args = ArrayRef<const Value *>());
129 int getAddressComputationCost(Type *Ty, ScalarEvolution *SE, const SCEV *Ptr);
131 int getCmpSelInstrCost(unsigned Opcode, Type *ValTy, Type *CondTy,
132 const Instruction *I = nullptr);
134 TTI::MemCmpExpansionOptions enableMemCmpExpansion(bool OptSize,
135 bool IsZeroCmp) const;
137 int getMemoryOpCost(unsigned Opcode, Type *Src, unsigned Alignment,
138 unsigned AddressSpace, const Instruction *I = nullptr);
140 int getCostOfKeepingLiveOverCall(ArrayRef<Type *> Tys);
142 void getUnrollingPreferences(Loop *L, ScalarEvolution &SE,
143 TTI::UnrollingPreferences &UP);
145 Value *getOrCreateResultFromMemIntrinsic(IntrinsicInst *Inst,
146 Type *ExpectedType);
148 bool getTgtMemIntrinsic(IntrinsicInst *Inst, MemIntrinsicInfo &Info);
150 int getInterleavedMemoryOpCost(unsigned Opcode, Type *VecTy, unsigned Factor,
151 ArrayRef<unsigned> Indices, unsigned Alignment,
152 unsigned AddressSpace,
153 bool UseMaskForCond = false,
154 bool UseMaskForGaps = false);
156 bool
157 shouldConsiderAddressTypePromotion(const Instruction &I,
158 bool &AllowPromotionWithoutCommonHeader);
160 bool shouldExpandReduction(const IntrinsicInst *II) const {
161 return false;
164 unsigned getGISelRematGlobalCost() const {
165 return 2;
168 bool useReductionIntrinsic(unsigned Opcode, Type *Ty,
169 TTI::ReductionFlags Flags) const;
171 int getArithmeticReductionCost(unsigned Opcode, Type *Ty,
172 bool IsPairwiseForm);
174 int getShuffleCost(TTI::ShuffleKind Kind, Type *Tp, int Index, Type *SubTp);
175 /// @}
178 } // end namespace llvm
180 #endif // LLVM_LIB_TARGET_AARCH64_AARCH64TARGETTRANSFORMINFO_H