1 //===-- AMDGPUISelLowering.h - AMDGPU Lowering Interface --------*- C++ -*-===//
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
7 //===----------------------------------------------------------------------===//
10 /// Interface definition of the TargetLowering class that is common
13 //===----------------------------------------------------------------------===//
15 #ifndef LLVM_LIB_TARGET_AMDGPU_AMDGPUISELLOWERING_H
16 #define LLVM_LIB_TARGET_AMDGPU_AMDGPUISELLOWERING_H
18 #include "llvm/CodeGen/CallingConvLower.h"
19 #include "llvm/CodeGen/TargetLowering.h"
23 class AMDGPUMachineFunction
;
24 class AMDGPUSubtarget
;
27 class AMDGPUTargetLowering
: public TargetLowering
{
29 const AMDGPUSubtarget
*Subtarget
;
31 /// \returns AMDGPUISD::FFBH_U32 node if the incoming \p Op may have been
32 /// legalized from a smaller type VT. Need to match pre-legalized type because
33 /// the generic legalization inserts the add/sub between the select and
35 SDValue
getFFBX_U32(SelectionDAG
&DAG
, SDValue Op
, const SDLoc
&DL
, unsigned Opc
) const;
38 /// \returns The minimum number of bits needed to store the value of \Op as an
39 /// unsigned integer. Truncating to this size and then zero-extending to the
40 /// original size will not change the value.
41 static unsigned numBitsUnsigned(SDValue Op
, SelectionDAG
&DAG
);
43 /// \returns The minimum number of bits needed to store the value of \Op as a
44 /// signed integer. Truncating to this size and then sign-extending to the
45 /// original size will not change the value.
46 static unsigned numBitsSigned(SDValue Op
, SelectionDAG
&DAG
);
49 SDValue
LowerEXTRACT_SUBVECTOR(SDValue Op
, SelectionDAG
&DAG
) const;
50 SDValue
LowerCONCAT_VECTORS(SDValue Op
, SelectionDAG
&DAG
) const;
51 /// Split a vector store into multiple scalar stores.
52 /// \returns The resulting chain.
54 SDValue
LowerFREM(SDValue Op
, SelectionDAG
&DAG
) const;
55 SDValue
LowerFCEIL(SDValue Op
, SelectionDAG
&DAG
) const;
56 SDValue
LowerFTRUNC(SDValue Op
, SelectionDAG
&DAG
) const;
57 SDValue
LowerFRINT(SDValue Op
, SelectionDAG
&DAG
) const;
58 SDValue
LowerFNEARBYINT(SDValue Op
, SelectionDAG
&DAG
) const;
60 SDValue
LowerFROUNDEVEN(SDValue Op
, SelectionDAG
&DAG
) const;
61 SDValue
LowerFROUND(SDValue Op
, SelectionDAG
&DAG
) const;
62 SDValue
LowerFFLOOR(SDValue Op
, SelectionDAG
&DAG
) const;
64 static bool allowApproxFunc(const SelectionDAG
&DAG
, SDNodeFlags Flags
);
65 static bool needsDenormHandlingF32(const SelectionDAG
&DAG
, SDValue Src
,
67 SDValue
getIsLtSmallestNormal(SelectionDAG
&DAG
, SDValue Op
,
68 SDNodeFlags Flags
) const;
69 SDValue
getIsFinite(SelectionDAG
&DAG
, SDValue Op
, SDNodeFlags Flags
) const;
70 std::pair
<SDValue
, SDValue
> getScaledLogInput(SelectionDAG
&DAG
,
71 const SDLoc SL
, SDValue Op
,
72 SDNodeFlags Flags
) const;
74 SDValue
LowerFLOG2(SDValue Op
, SelectionDAG
&DAG
) const;
75 SDValue
LowerFLOGCommon(SDValue Op
, SelectionDAG
&DAG
) const;
76 SDValue
LowerFLOG10(SDValue Op
, SelectionDAG
&DAG
) const;
77 SDValue
LowerFLOGUnsafe(SDValue Op
, const SDLoc
&SL
, SelectionDAG
&DAG
,
78 bool IsLog10
, SDNodeFlags Flags
) const;
79 SDValue
lowerFEXP2(SDValue Op
, SelectionDAG
&DAG
) const;
81 SDValue
lowerFEXPUnsafe(SDValue Op
, const SDLoc
&SL
, SelectionDAG
&DAG
,
82 SDNodeFlags Flags
) const;
83 SDValue
lowerFEXP10Unsafe(SDValue Op
, const SDLoc
&SL
, SelectionDAG
&DAG
,
84 SDNodeFlags Flags
) const;
85 SDValue
lowerFEXP(SDValue Op
, SelectionDAG
&DAG
) const;
87 SDValue
lowerCTLZResults(SDValue Op
, SelectionDAG
&DAG
) const;
89 SDValue
LowerCTLZ_CTTZ(SDValue Op
, SelectionDAG
&DAG
) const;
91 SDValue
LowerINT_TO_FP32(SDValue Op
, SelectionDAG
&DAG
, bool Signed
) const;
92 SDValue
LowerINT_TO_FP64(SDValue Op
, SelectionDAG
&DAG
, bool Signed
) const;
93 SDValue
LowerUINT_TO_FP(SDValue Op
, SelectionDAG
&DAG
) const;
94 SDValue
LowerSINT_TO_FP(SDValue Op
, SelectionDAG
&DAG
) const;
96 SDValue
LowerFP_TO_INT64(SDValue Op
, SelectionDAG
&DAG
, bool Signed
) const;
97 SDValue
LowerFP_TO_FP16(SDValue Op
, SelectionDAG
&DAG
) const;
98 SDValue
LowerFP_TO_INT(SDValue Op
, SelectionDAG
&DAG
) const;
100 SDValue
LowerSIGN_EXTEND_INREG(SDValue Op
, SelectionDAG
&DAG
) const;
103 bool shouldCombineMemoryType(EVT VT
) const;
104 SDValue
performLoadCombine(SDNode
*N
, DAGCombinerInfo
&DCI
) const;
105 SDValue
performStoreCombine(SDNode
*N
, DAGCombinerInfo
&DCI
) const;
106 SDValue
performAssertSZExtCombine(SDNode
*N
, DAGCombinerInfo
&DCI
) const;
107 SDValue
performIntrinsicWOChainCombine(SDNode
*N
, DAGCombinerInfo
&DCI
) const;
109 SDValue
splitBinaryBitConstantOpImpl(DAGCombinerInfo
&DCI
, const SDLoc
&SL
,
110 unsigned Opc
, SDValue LHS
,
111 uint32_t ValLo
, uint32_t ValHi
) const;
112 SDValue
performShlCombine(SDNode
*N
, DAGCombinerInfo
&DCI
) const;
113 SDValue
performSraCombine(SDNode
*N
, DAGCombinerInfo
&DCI
) const;
114 SDValue
performSrlCombine(SDNode
*N
, DAGCombinerInfo
&DCI
) const;
115 SDValue
performTruncateCombine(SDNode
*N
, DAGCombinerInfo
&DCI
) const;
116 SDValue
performMulCombine(SDNode
*N
, DAGCombinerInfo
&DCI
) const;
117 SDValue
performMulLoHiCombine(SDNode
*N
, DAGCombinerInfo
&DCI
) const;
118 SDValue
performMulhsCombine(SDNode
*N
, DAGCombinerInfo
&DCI
) const;
119 SDValue
performMulhuCombine(SDNode
*N
, DAGCombinerInfo
&DCI
) const;
120 SDValue
performCtlz_CttzCombine(const SDLoc
&SL
, SDValue Cond
, SDValue LHS
,
121 SDValue RHS
, DAGCombinerInfo
&DCI
) const;
123 SDValue
foldFreeOpFromSelect(TargetLowering::DAGCombinerInfo
&DCI
,
125 SDValue
performSelectCombine(SDNode
*N
, DAGCombinerInfo
&DCI
) const;
127 TargetLowering::NegatibleCost
128 getConstantNegateCost(const ConstantFPSDNode
*C
) const;
130 bool isConstantCostlierToNegate(SDValue N
) const;
131 bool isConstantCheaperToNegate(SDValue N
) const;
132 SDValue
performFNegCombine(SDNode
*N
, DAGCombinerInfo
&DCI
) const;
133 SDValue
performFAbsCombine(SDNode
*N
, DAGCombinerInfo
&DCI
) const;
134 SDValue
performRcpCombine(SDNode
*N
, DAGCombinerInfo
&DCI
) const;
136 static EVT
getEquivalentMemType(LLVMContext
&Context
, EVT VT
);
138 virtual SDValue
LowerGlobalAddress(AMDGPUMachineFunction
*MFI
, SDValue Op
,
139 SelectionDAG
&DAG
) const;
141 /// Return 64-bit value Op as two 32-bit integers.
142 std::pair
<SDValue
, SDValue
> split64BitValue(SDValue Op
,
143 SelectionDAG
&DAG
) const;
144 SDValue
getLoHalf64(SDValue Op
, SelectionDAG
&DAG
) const;
145 SDValue
getHiHalf64(SDValue Op
, SelectionDAG
&DAG
) const;
147 /// Split a vector type into two parts. The first part is a power of two
148 /// vector. The second part is whatever is left over, and is a scalar if it
149 /// would otherwise be a 1-vector.
150 std::pair
<EVT
, EVT
> getSplitDestVTs(const EVT
&VT
, SelectionDAG
&DAG
) const;
152 /// Split a vector value into two parts of types LoVT and HiVT. HiVT could be
154 std::pair
<SDValue
, SDValue
> splitVector(const SDValue
&N
, const SDLoc
&DL
,
155 const EVT
&LoVT
, const EVT
&HighVT
,
156 SelectionDAG
&DAG
) const;
158 /// Split a vector load into 2 loads of half the vector.
159 SDValue
SplitVectorLoad(SDValue Op
, SelectionDAG
&DAG
) const;
161 /// Widen a suitably aligned v3 load. For all other cases, split the input
163 SDValue
WidenOrSplitVectorLoad(SDValue Op
, SelectionDAG
&DAG
) const;
165 /// Split a vector store into 2 stores of half the vector.
166 SDValue
SplitVectorStore(SDValue Op
, SelectionDAG
&DAG
) const;
168 SDValue
LowerSTORE(SDValue Op
, SelectionDAG
&DAG
) const;
169 SDValue
LowerSDIVREM(SDValue Op
, SelectionDAG
&DAG
) const;
170 SDValue
LowerUDIVREM(SDValue Op
, SelectionDAG
&DAG
) const;
171 SDValue
LowerDIVREM24(SDValue Op
, SelectionDAG
&DAG
, bool sign
) const;
172 void LowerUDIVREM64(SDValue Op
, SelectionDAG
&DAG
,
173 SmallVectorImpl
<SDValue
> &Results
) const;
175 void analyzeFormalArgumentsCompute(
177 const SmallVectorImpl
<ISD::InputArg
> &Ins
) const;
180 AMDGPUTargetLowering(const TargetMachine
&TM
, const AMDGPUSubtarget
&STI
);
182 bool mayIgnoreSignedZero(SDValue Op
) const;
184 static inline SDValue
stripBitcast(SDValue Val
) {
185 return Val
.getOpcode() == ISD::BITCAST
? Val
.getOperand(0) : Val
;
188 static bool shouldFoldFNegIntoSrc(SDNode
*FNeg
, SDValue FNegSrc
);
189 static bool allUsesHaveSourceMods(const SDNode
*N
,
190 unsigned CostThreshold
= 4);
191 bool isFAbsFree(EVT VT
) const override
;
192 bool isFNegFree(EVT VT
) const override
;
193 bool isTruncateFree(EVT Src
, EVT Dest
) const override
;
194 bool isTruncateFree(Type
*Src
, Type
*Dest
) const override
;
196 bool isZExtFree(Type
*Src
, Type
*Dest
) const override
;
197 bool isZExtFree(EVT Src
, EVT Dest
) const override
;
199 SDValue
getNegatedExpression(SDValue Op
, SelectionDAG
&DAG
,
200 bool LegalOperations
, bool ForCodeSize
,
202 unsigned Depth
) const override
;
204 bool isNarrowingProfitable(EVT SrcVT
, EVT DestVT
) const override
;
206 bool isDesirableToCommuteWithShift(const SDNode
*N
,
207 CombineLevel Level
) const override
;
209 EVT
getTypeForExtReturn(LLVMContext
&Context
, EVT VT
,
210 ISD::NodeType ExtendKind
) const override
;
212 MVT
getVectorIdxTy(const DataLayout
&) const override
;
213 bool isSelectSupported(SelectSupportKind
) const override
;
215 bool isFPImmLegal(const APFloat
&Imm
, EVT VT
,
216 bool ForCodeSize
) const override
;
217 bool ShouldShrinkFPConstant(EVT VT
) const override
;
218 bool shouldReduceLoadWidth(SDNode
*Load
,
219 ISD::LoadExtType ExtType
,
220 EVT ExtVT
) const override
;
222 bool isLoadBitCastBeneficial(EVT
, EVT
, const SelectionDAG
&DAG
,
223 const MachineMemOperand
&MMO
) const final
;
225 bool storeOfVectorConstantIsCheap(bool IsZero
, EVT MemVT
,
227 unsigned AS
) const override
;
228 bool aggressivelyPreferBuildVectorSources(EVT VecVT
) const override
;
229 bool isCheapToSpeculateCttz(Type
*Ty
) const override
;
230 bool isCheapToSpeculateCtlz(Type
*Ty
) const override
;
232 bool isSDNodeAlwaysUniform(const SDNode
*N
) const override
;
234 // FIXME: This hook should not exist
235 AtomicExpansionKind
shouldCastAtomicLoadInIR(LoadInst
*LI
) const override
{
236 return AtomicExpansionKind::None
;
239 AtomicExpansionKind
shouldCastAtomicStoreInIR(StoreInst
*SI
) const override
{
240 return AtomicExpansionKind::None
;
243 AtomicExpansionKind
shouldCastAtomicRMWIInIR(AtomicRMWInst
*) const override
{
244 return AtomicExpansionKind::None
;
247 static CCAssignFn
*CCAssignFnForCall(CallingConv::ID CC
, bool IsVarArg
);
248 static CCAssignFn
*CCAssignFnForReturn(CallingConv::ID CC
, bool IsVarArg
);
250 SDValue
LowerReturn(SDValue Chain
, CallingConv::ID CallConv
, bool isVarArg
,
251 const SmallVectorImpl
<ISD::OutputArg
> &Outs
,
252 const SmallVectorImpl
<SDValue
> &OutVals
, const SDLoc
&DL
,
253 SelectionDAG
&DAG
) const override
;
255 SDValue
addTokenForArgument(SDValue Chain
,
257 MachineFrameInfo
&MFI
,
258 int ClobberedFI
) const;
260 SDValue
lowerUnhandledCall(CallLoweringInfo
&CLI
,
261 SmallVectorImpl
<SDValue
> &InVals
,
262 StringRef Reason
) const;
263 SDValue
LowerCall(CallLoweringInfo
&CLI
,
264 SmallVectorImpl
<SDValue
> &InVals
) const override
;
266 SDValue
LowerDYNAMIC_STACKALLOC(SDValue Op
, SelectionDAG
&DAG
) const;
267 SDValue
LowerOperation(SDValue Op
, SelectionDAG
&DAG
) const override
;
268 SDValue
PerformDAGCombine(SDNode
*N
, DAGCombinerInfo
&DCI
) const override
;
269 void ReplaceNodeResults(SDNode
* N
,
270 SmallVectorImpl
<SDValue
> &Results
,
271 SelectionDAG
&DAG
) const override
;
273 SDValue
combineFMinMaxLegacyImpl(const SDLoc
&DL
, EVT VT
, SDValue LHS
,
274 SDValue RHS
, SDValue True
, SDValue False
,
275 SDValue CC
, DAGCombinerInfo
&DCI
) const;
277 SDValue
combineFMinMaxLegacy(const SDLoc
&DL
, EVT VT
, SDValue LHS
,
278 SDValue RHS
, SDValue True
, SDValue False
,
279 SDValue CC
, DAGCombinerInfo
&DCI
) const;
281 const char* getTargetNodeName(unsigned Opcode
) const override
;
283 // FIXME: Turn off MergeConsecutiveStores() before Instruction Selection for
284 // AMDGPU. Commit r319036,
285 // (https://github.com/llvm/llvm-project/commit/db77e57ea86d941a4262ef60261692f4cb6893e6)
286 // turned on MergeConsecutiveStores() before Instruction Selection for all
287 // targets. Enough AMDGPU compiles go into an infinite loop (
288 // MergeConsecutiveStores() merges two stores; LegalizeStoreOps() un-merges;
289 // MergeConsecutiveStores() re-merges, etc. ) to warrant turning it off for
291 bool mergeStoresAfterLegalization(EVT
) const override
{ return false; }
293 bool isFsqrtCheap(SDValue Operand
, SelectionDAG
&DAG
) const override
{
296 SDValue
getSqrtEstimate(SDValue Operand
, SelectionDAG
&DAG
, int Enabled
,
297 int &RefinementSteps
, bool &UseOneConstNR
,
298 bool Reciprocal
) const override
;
299 SDValue
getRecipEstimate(SDValue Operand
, SelectionDAG
&DAG
, int Enabled
,
300 int &RefinementSteps
) const override
;
302 virtual SDNode
*PostISelFolding(MachineSDNode
*N
,
303 SelectionDAG
&DAG
) const = 0;
305 /// Determine which of the bits specified in \p Mask are known to be
306 /// either zero or one and return them in the \p KnownZero and \p KnownOne
308 void computeKnownBitsForTargetNode(const SDValue Op
,
310 const APInt
&DemandedElts
,
311 const SelectionDAG
&DAG
,
312 unsigned Depth
= 0) const override
;
314 unsigned ComputeNumSignBitsForTargetNode(SDValue Op
, const APInt
&DemandedElts
,
315 const SelectionDAG
&DAG
,
316 unsigned Depth
= 0) const override
;
318 unsigned computeNumSignBitsForTargetInstr(GISelKnownBits
&Analysis
,
320 const APInt
&DemandedElts
,
321 const MachineRegisterInfo
&MRI
,
322 unsigned Depth
= 0) const override
;
324 bool isKnownNeverNaNForTargetNode(SDValue Op
,
325 const SelectionDAG
&DAG
,
327 unsigned Depth
= 0) const override
;
329 bool isReassocProfitable(MachineRegisterInfo
&MRI
, Register N0
,
330 Register N1
) const override
;
332 /// Helper function that adds Reg to the LiveIn list of the DAG's
335 /// \returns a RegisterSDNode representing Reg if \p RawReg is true, otherwise
336 /// a copy from the register.
337 SDValue
CreateLiveInRegister(SelectionDAG
&DAG
,
338 const TargetRegisterClass
*RC
,
339 Register Reg
, EVT VT
,
341 bool RawReg
= false) const;
342 SDValue
CreateLiveInRegister(SelectionDAG
&DAG
,
343 const TargetRegisterClass
*RC
,
344 Register Reg
, EVT VT
) const {
345 return CreateLiveInRegister(DAG
, RC
, Reg
, VT
, SDLoc(DAG
.getEntryNode()));
348 // Returns the raw live in register rather than a copy from it.
349 SDValue
CreateLiveInRegisterRaw(SelectionDAG
&DAG
,
350 const TargetRegisterClass
*RC
,
351 Register Reg
, EVT VT
) const {
352 return CreateLiveInRegister(DAG
, RC
, Reg
, VT
, SDLoc(DAG
.getEntryNode()), true);
355 /// Similar to CreateLiveInRegister, except value maybe loaded from a stack
356 /// slot rather than passed in a register.
357 SDValue
loadStackInputValue(SelectionDAG
&DAG
,
360 int64_t Offset
) const;
362 SDValue
storeStackInputValue(SelectionDAG
&DAG
,
366 int64_t Offset
) const;
368 SDValue
loadInputValue(SelectionDAG
&DAG
,
369 const TargetRegisterClass
*RC
,
370 EVT VT
, const SDLoc
&SL
,
371 const ArgDescriptor
&Arg
) const;
373 enum ImplicitParameter
{
380 /// Helper function that returns the byte offset of the given
381 /// type of implicit parameter.
382 uint32_t getImplicitParameterOffset(const MachineFunction
&MF
,
383 const ImplicitParameter Param
) const;
384 uint32_t getImplicitParameterOffset(const uint64_t ExplicitKernArgSize
,
385 const ImplicitParameter Param
) const;
387 MVT
getFenceOperandTy(const DataLayout
&DL
) const override
{
391 AtomicExpansionKind
shouldExpandAtomicRMWInIR(AtomicRMWInst
*) const override
;
393 bool shouldSinkOperands(Instruction
*I
,
394 SmallVectorImpl
<Use
*> &Ops
) const override
;
397 namespace AMDGPUISD
{
399 enum NodeType
: unsigned {
401 FIRST_NUMBER
= ISD::BUILTIN_OP_END
,
402 UMUL
, // 32bit unsigned multiplication
404 // End AMDIL ISD Opcodes
413 // Masked control flow nodes.
418 // A uniform kernel return that terminates the wavefront.
421 // s_endpgm, but we may want to insert it in the middle of the block.
424 // "s_trap 2" equivalent on hardware that does not support it.
427 // Return to a shader part's epilog code.
430 // Return with values from a non-entry function.
433 // Convert a unswizzled wave uniform stack address to an address compatible
434 // with a vector offset for use in stack access.
440 /// CLAMP value between 0.0 and 1.0. NaN clamped to 0, following clamp output
441 /// modifier behavior with dx10_enable.
444 // This is SETCC with the full mask result which is used for a compare with a
445 // result bit per item in the wavefront.
451 // FP ops with input and output chain.
455 // SIN_HW, COS_HW - f32 for SI, 1 ULP max error, valid from -100 pi to 100 pi.
456 // Denormals handled on some parts.
478 // For emitting ISD::FMAD when f32 denormals are enabled because mac/mad is
479 // treated as an illegal operation.
482 // RCP, RSQ - For f32, 1 ULP max error, no denormal handling.
483 // For f64, max error 2^29 ULP, handles denormals.
489 // log2, no denormal handling for f32.
492 // exp2, no denormal handling for f32.
501 BFE_U32
, // Extract range of bits with zero extension to 32-bits.
502 BFE_I32
, // Extract range of bits with sign extension to 32-bits.
503 BFI
, // (src0 & src1) | (~src0 & src2)
504 BFM
, // Insert a range of bits into a 32-bit word.
505 FFBH_U32
, // ctlz with -1 if input is zero.
507 FFBL_B32
, // cttz with -1 if input is zero.
527 // These cvt_f32_ubyte* nodes need to remain consecutive and in order.
533 // Convert two float 32 numbers into a single register holding two packed f16
534 // with round to zero.
541 // Same as the standard node, except the high bits of the resulting integer
545 /// This node is for VLIW targets and it is used to represent a vector
546 /// that is stored in consecutive registers with the same channel.
553 BUILD_VERTICAL_VECTOR
,
554 /// Pointer to the start of the shader's constant data.
558 FPTRUNC_ROUND_UPWARD
,
559 FPTRUNC_ROUND_DOWNWARD
,
562 FIRST_MEM_OPCODE_NUMBER
= ISD::FIRST_TARGET_MEMORY_OPCODE
,
572 TBUFFER_STORE_FORMAT
,
573 TBUFFER_STORE_FORMAT_D16
,
575 TBUFFER_LOAD_FORMAT_D16
,
584 BUFFER_LOAD_UBYTE_TFE
,
585 BUFFER_LOAD_USHORT_TFE
,
586 BUFFER_LOAD_BYTE_TFE
,
587 BUFFER_LOAD_SHORT_TFE
,
589 BUFFER_LOAD_FORMAT_TFE
,
590 BUFFER_LOAD_FORMAT_D16
,
600 BUFFER_STORE_FORMAT_D16
,
613 BUFFER_ATOMIC_CMPSWAP
,
618 BUFFER_ATOMIC_COND_SUB_U32
,
620 LAST_AMDGPU_ISD_NUMBER
623 } // End namespace AMDGPUISD
625 } // End namespace llvm