Recommit r373598 "[yaml2obj/obj2yaml] - Add support for SHT_LLVM_ADDRSIG sections."
[llvm-complete.git] / lib / Target / NVPTX / NVPTXTargetTransformInfo.h
blobb179a28fa713b2e498bf0e6b79b088fd60d1fdb1
1 //===-- NVPTXTargetTransformInfo.h - NVPTX specific TTI ---------*- C++ -*-===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 /// \file
9 /// This file a TargetTransformInfo::Concept conforming object specific to the
10 /// NVPTX target machine. It uses the target's detailed information to
11 /// provide more precise answers to certain TTI queries, while letting the
12 /// target independent and default TTI implementations handle the rest.
13 ///
14 //===----------------------------------------------------------------------===//
16 #ifndef LLVM_LIB_TARGET_NVPTX_NVPTXTARGETTRANSFORMINFO_H
17 #define LLVM_LIB_TARGET_NVPTX_NVPTXTARGETTRANSFORMINFO_H
19 #include "NVPTXTargetMachine.h"
20 #include "MCTargetDesc/NVPTXBaseInfo.h"
21 #include "llvm/Analysis/TargetTransformInfo.h"
22 #include "llvm/CodeGen/BasicTTIImpl.h"
23 #include "llvm/CodeGen/TargetLowering.h"
25 namespace llvm {
27 class NVPTXTTIImpl : public BasicTTIImplBase<NVPTXTTIImpl> {
28 typedef BasicTTIImplBase<NVPTXTTIImpl> BaseT;
29 typedef TargetTransformInfo TTI;
30 friend BaseT;
32 const NVPTXSubtarget *ST;
33 const NVPTXTargetLowering *TLI;
35 const NVPTXSubtarget *getST() const { return ST; };
36 const NVPTXTargetLowering *getTLI() const { return TLI; };
38 public:
39 explicit NVPTXTTIImpl(const NVPTXTargetMachine *TM, const Function &F)
40 : BaseT(TM, F.getParent()->getDataLayout()), ST(TM->getSubtargetImpl()),
41 TLI(ST->getTargetLowering()) {}
43 bool hasBranchDivergence() { return true; }
45 bool isSourceOfDivergence(const Value *V);
47 unsigned getFlatAddressSpace() const {
48 return AddressSpace::ADDRESS_SPACE_GENERIC;
51 // Loads and stores can be vectorized if the alignment is at least as big as
52 // the load/store we want to vectorize.
53 bool isLegalToVectorizeLoadChain(unsigned ChainSizeInBytes,
54 unsigned Alignment,
55 unsigned AddrSpace) const {
56 return Alignment >= ChainSizeInBytes;
58 bool isLegalToVectorizeStoreChain(unsigned ChainSizeInBytes,
59 unsigned Alignment,
60 unsigned AddrSpace) const {
61 return isLegalToVectorizeLoadChain(ChainSizeInBytes, Alignment, AddrSpace);
64 // NVPTX has infinite registers of all kinds, but the actual machine doesn't.
65 // We conservatively return 1 here which is just enough to enable the
66 // vectorizers but disables heuristics based on the number of registers.
67 // FIXME: Return a more reasonable number, while keeping an eye on
68 // LoopVectorizer's unrolling heuristics.
69 unsigned getNumberOfRegisters(bool Vector) const { return 1; }
71 // Only <2 x half> should be vectorized, so always return 32 for the vector
72 // register size.
73 unsigned getRegisterBitWidth(bool Vector) const { return 32; }
74 unsigned getMinVectorRegisterBitWidth() const { return 32; }
76 // We don't want to prevent inlining because of target-cpu and -features
77 // attributes that were added to newer versions of LLVM/Clang: There are
78 // no incompatible functions in PTX, ptxas will throw errors in such cases.
79 bool areInlineCompatible(const Function *Caller,
80 const Function *Callee) const {
81 return true;
84 // Increase the inlining cost threshold by a factor of 5, reflecting that
85 // calls are particularly expensive in NVPTX.
86 unsigned getInliningThresholdMultiplier() { return 5; }
88 int getArithmeticInstrCost(
89 unsigned Opcode, Type *Ty,
90 TTI::OperandValueKind Opd1Info = TTI::OK_AnyValue,
91 TTI::OperandValueKind Opd2Info = TTI::OK_AnyValue,
92 TTI::OperandValueProperties Opd1PropInfo = TTI::OP_None,
93 TTI::OperandValueProperties Opd2PropInfo = TTI::OP_None,
94 ArrayRef<const Value *> Args = ArrayRef<const Value *>());
96 void getUnrollingPreferences(Loop *L, ScalarEvolution &SE,
97 TTI::UnrollingPreferences &UP);
98 bool hasVolatileVariant(Instruction *I, unsigned AddrSpace) {
99 // Volatile loads/stores are only supported for shared and global address
100 // spaces, or for generic AS that maps to them.
101 if (!(AddrSpace == llvm::ADDRESS_SPACE_GENERIC ||
102 AddrSpace == llvm::ADDRESS_SPACE_GLOBAL ||
103 AddrSpace == llvm::ADDRESS_SPACE_SHARED))
104 return false;
106 switch(I->getOpcode()){
107 default:
108 return false;
109 case Instruction::Load:
110 case Instruction::Store:
111 return true;
116 } // end namespace llvm
118 #endif