1 //===-- NVPTXTargetTransformInfo.cpp - NVPTX specific TTI -----------------===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 #include "NVPTXTargetTransformInfo.h"
11 #include "NVPTXUtilities.h"
12 #include "llvm/Analysis/LoopInfo.h"
13 #include "llvm/Analysis/TargetTransformInfo.h"
14 #include "llvm/Analysis/ValueTracking.h"
15 #include "llvm/CodeGen/BasicTTIImpl.h"
16 #include "llvm/CodeGen/CostTable.h"
17 #include "llvm/CodeGen/TargetLowering.h"
18 #include "llvm/Support/Debug.h"
21 #define DEBUG_TYPE "NVPTXtti"
23 // Whether the given intrinsic reads threadIdx.x/y/z.
24 static bool readsThreadIndex(const IntrinsicInst
*II
) {
25 switch (II
->getIntrinsicID()) {
26 default: return false;
27 case Intrinsic::nvvm_read_ptx_sreg_tid_x
:
28 case Intrinsic::nvvm_read_ptx_sreg_tid_y
:
29 case Intrinsic::nvvm_read_ptx_sreg_tid_z
:
34 static bool readsLaneId(const IntrinsicInst
*II
) {
35 return II
->getIntrinsicID() == Intrinsic::nvvm_read_ptx_sreg_laneid
;
38 // Whether the given intrinsic is an atomic instruction in PTX.
39 static bool isNVVMAtomic(const IntrinsicInst
*II
) {
40 switch (II
->getIntrinsicID()) {
41 default: return false;
42 case Intrinsic::nvvm_atomic_load_add_f32
:
43 case Intrinsic::nvvm_atomic_load_inc_32
:
44 case Intrinsic::nvvm_atomic_load_dec_32
:
46 case Intrinsic::nvvm_atomic_add_gen_f_cta
:
47 case Intrinsic::nvvm_atomic_add_gen_f_sys
:
48 case Intrinsic::nvvm_atomic_add_gen_i_cta
:
49 case Intrinsic::nvvm_atomic_add_gen_i_sys
:
50 case Intrinsic::nvvm_atomic_and_gen_i_cta
:
51 case Intrinsic::nvvm_atomic_and_gen_i_sys
:
52 case Intrinsic::nvvm_atomic_cas_gen_i_cta
:
53 case Intrinsic::nvvm_atomic_cas_gen_i_sys
:
54 case Intrinsic::nvvm_atomic_dec_gen_i_cta
:
55 case Intrinsic::nvvm_atomic_dec_gen_i_sys
:
56 case Intrinsic::nvvm_atomic_inc_gen_i_cta
:
57 case Intrinsic::nvvm_atomic_inc_gen_i_sys
:
58 case Intrinsic::nvvm_atomic_max_gen_i_cta
:
59 case Intrinsic::nvvm_atomic_max_gen_i_sys
:
60 case Intrinsic::nvvm_atomic_min_gen_i_cta
:
61 case Intrinsic::nvvm_atomic_min_gen_i_sys
:
62 case Intrinsic::nvvm_atomic_or_gen_i_cta
:
63 case Intrinsic::nvvm_atomic_or_gen_i_sys
:
64 case Intrinsic::nvvm_atomic_exch_gen_i_cta
:
65 case Intrinsic::nvvm_atomic_exch_gen_i_sys
:
66 case Intrinsic::nvvm_atomic_xor_gen_i_cta
:
67 case Intrinsic::nvvm_atomic_xor_gen_i_sys
:
72 bool NVPTXTTIImpl::isSourceOfDivergence(const Value
*V
) {
73 // Without inter-procedural analysis, we conservatively assume that arguments
74 // to __device__ functions are divergent.
75 if (const Argument
*Arg
= dyn_cast
<Argument
>(V
))
76 return !isKernelFunction(*Arg
->getParent());
78 if (const Instruction
*I
= dyn_cast
<Instruction
>(V
)) {
79 // Without pointer analysis, we conservatively assume values loaded from
80 // generic or local address space are divergent.
81 if (const LoadInst
*LI
= dyn_cast
<LoadInst
>(I
)) {
82 unsigned AS
= LI
->getPointerAddressSpace();
83 return AS
== ADDRESS_SPACE_GENERIC
|| AS
== ADDRESS_SPACE_LOCAL
;
85 // Atomic instructions may cause divergence. Atomic instructions are
86 // executed sequentially across all threads in a warp. Therefore, an earlier
87 // executed thread may see different memory inputs than a later executed
88 // thread. For example, suppose *a = 0 initially.
90 // atom.global.add.s32 d, [a], 1
92 // returns 0 for the first thread that enters the critical region, and 1 for
96 if (const IntrinsicInst
*II
= dyn_cast
<IntrinsicInst
>(I
)) {
97 // Instructions that read threadIdx are obviously divergent.
98 if (readsThreadIndex(II
) || readsLaneId(II
))
100 // Handle the NVPTX atomic instrinsics that cannot be represented as an
101 // atomic IR instruction.
102 if (isNVVMAtomic(II
))
105 // Conservatively consider the return value of function calls as divergent.
106 // We could analyze callees with bodies more precisely using
107 // inter-procedural analysis.
108 if (isa
<CallInst
>(I
))
115 int NVPTXTTIImpl::getArithmeticInstrCost(
116 unsigned Opcode
, Type
*Ty
, TTI::OperandValueKind Opd1Info
,
117 TTI::OperandValueKind Opd2Info
, TTI::OperandValueProperties Opd1PropInfo
,
118 TTI::OperandValueProperties Opd2PropInfo
, ArrayRef
<const Value
*> Args
) {
119 // Legalize the type.
120 std::pair
<int, MVT
> LT
= TLI
->getTypeLegalizationCost(DL
, Ty
);
122 int ISD
= TLI
->InstructionOpcodeToISD(Opcode
);
126 return BaseT::getArithmeticInstrCost(Opcode
, Ty
, Opd1Info
, Opd2Info
,
127 Opd1PropInfo
, Opd2PropInfo
);
133 // The machine code (SASS) simulates an i64 with two i32. Therefore, we
134 // estimate that arithmetic operations on i64 are twice as expensive as
135 // those on types that can fit into one machine register.
136 if (LT
.second
.SimpleTy
== MVT::i64
)
138 // Delegate other cases to the basic TTI.
139 return BaseT::getArithmeticInstrCost(Opcode
, Ty
, Opd1Info
, Opd2Info
,
140 Opd1PropInfo
, Opd2PropInfo
);
144 void NVPTXTTIImpl::getUnrollingPreferences(Loop
*L
, ScalarEvolution
&SE
,
145 TTI::UnrollingPreferences
&UP
) {
146 BaseT::getUnrollingPreferences(L
, SE
, UP
);
148 // Enable partial unrolling and runtime unrolling, but reduce the
149 // threshold. This partially unrolls small loops which are often
150 // unrolled by the PTX to SASS compiler and unrolling earlier can be
152 UP
.Partial
= UP
.Runtime
= true;
153 UP
.PartialThreshold
= UP
.Threshold
/ 4;