Revert r354244 "[DAGCombiner] Eliminate dead stores to stack."
[llvm-complete.git] / lib / Target / AMDGPU / AMDGPUISelLowering.h
blob359e16cfa5617715d519abb644f410ea3ad83f6e
1 //===-- AMDGPUISelLowering.h - AMDGPU Lowering Interface --------*- C++ -*-===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 /// \file
10 /// Interface definition of the TargetLowering class that is common
11 /// to all AMD GPUs.
13 //===----------------------------------------------------------------------===//
15 #ifndef LLVM_LIB_TARGET_AMDGPU_AMDGPUISELLOWERING_H
16 #define LLVM_LIB_TARGET_AMDGPU_AMDGPUISELLOWERING_H
18 #include "AMDGPU.h"
19 #include "llvm/CodeGen/CallingConvLower.h"
20 #include "llvm/CodeGen/TargetLowering.h"
22 namespace llvm {
24 class AMDGPUMachineFunction;
25 class AMDGPUSubtarget;
26 struct ArgDescriptor;
28 class AMDGPUTargetLowering : public TargetLowering {
29 private:
30 const AMDGPUSubtarget *Subtarget;
32 /// \returns AMDGPUISD::FFBH_U32 node if the incoming \p Op may have been
33 /// legalized from a smaller type VT. Need to match pre-legalized type because
34 /// the generic legalization inserts the add/sub between the select and
35 /// compare.
36 SDValue getFFBX_U32(SelectionDAG &DAG, SDValue Op, const SDLoc &DL, unsigned Opc) const;
38 public:
39 static unsigned numBitsUnsigned(SDValue Op, SelectionDAG &DAG);
40 static unsigned numBitsSigned(SDValue Op, SelectionDAG &DAG);
42 protected:
43 SDValue LowerEXTRACT_SUBVECTOR(SDValue Op, SelectionDAG &DAG) const;
44 SDValue LowerCONCAT_VECTORS(SDValue Op, SelectionDAG &DAG) const;
45 /// Split a vector store into multiple scalar stores.
46 /// \returns The resulting chain.
48 SDValue LowerFREM(SDValue Op, SelectionDAG &DAG) const;
49 SDValue LowerFCEIL(SDValue Op, SelectionDAG &DAG) const;
50 SDValue LowerFTRUNC(SDValue Op, SelectionDAG &DAG) const;
51 SDValue LowerFRINT(SDValue Op, SelectionDAG &DAG) const;
52 SDValue LowerFNEARBYINT(SDValue Op, SelectionDAG &DAG) const;
54 SDValue LowerFROUND32_16(SDValue Op, SelectionDAG &DAG) const;
55 SDValue LowerFROUND64(SDValue Op, SelectionDAG &DAG) const;
56 SDValue LowerFROUND(SDValue Op, SelectionDAG &DAG) const;
57 SDValue LowerFFLOOR(SDValue Op, SelectionDAG &DAG) const;
58 SDValue LowerFLOG(SDValue Op, SelectionDAG &DAG,
59 double Log2BaseInverted) const;
60 SDValue lowerFEXP(SDValue Op, SelectionDAG &DAG) const;
62 SDValue LowerCTLZ_CTTZ(SDValue Op, SelectionDAG &DAG) const;
64 SDValue LowerINT_TO_FP32(SDValue Op, SelectionDAG &DAG, bool Signed) const;
65 SDValue LowerINT_TO_FP64(SDValue Op, SelectionDAG &DAG, bool Signed) const;
66 SDValue LowerUINT_TO_FP(SDValue Op, SelectionDAG &DAG) const;
67 SDValue LowerSINT_TO_FP(SDValue Op, SelectionDAG &DAG) const;
69 SDValue LowerFP64_TO_INT(SDValue Op, SelectionDAG &DAG, bool Signed) const;
70 SDValue LowerFP_TO_FP16(SDValue Op, SelectionDAG &DAG) const;
71 SDValue LowerFP_TO_UINT(SDValue Op, SelectionDAG &DAG) const;
72 SDValue LowerFP_TO_SINT(SDValue Op, SelectionDAG &DAG) const;
74 SDValue LowerSIGN_EXTEND_INREG(SDValue Op, SelectionDAG &DAG) const;
76 protected:
77 bool shouldCombineMemoryType(EVT VT) const;
78 SDValue performLoadCombine(SDNode *N, DAGCombinerInfo &DCI) const;
79 SDValue performStoreCombine(SDNode *N, DAGCombinerInfo &DCI) const;
80 SDValue performAssertSZExtCombine(SDNode *N, DAGCombinerInfo &DCI) const;
82 SDValue splitBinaryBitConstantOpImpl(DAGCombinerInfo &DCI, const SDLoc &SL,
83 unsigned Opc, SDValue LHS,
84 uint32_t ValLo, uint32_t ValHi) const;
85 SDValue performShlCombine(SDNode *N, DAGCombinerInfo &DCI) const;
86 SDValue performSraCombine(SDNode *N, DAGCombinerInfo &DCI) const;
87 SDValue performSrlCombine(SDNode *N, DAGCombinerInfo &DCI) const;
88 SDValue performTruncateCombine(SDNode *N, DAGCombinerInfo &DCI) const;
89 SDValue performMulCombine(SDNode *N, DAGCombinerInfo &DCI) const;
90 SDValue performMulhsCombine(SDNode *N, DAGCombinerInfo &DCI) const;
91 SDValue performMulhuCombine(SDNode *N, DAGCombinerInfo &DCI) const;
92 SDValue performMulLoHi24Combine(SDNode *N, DAGCombinerInfo &DCI) const;
93 SDValue performCtlz_CttzCombine(const SDLoc &SL, SDValue Cond, SDValue LHS,
94 SDValue RHS, DAGCombinerInfo &DCI) const;
95 SDValue performSelectCombine(SDNode *N, DAGCombinerInfo &DCI) const;
97 bool isConstantCostlierToNegate(SDValue N) const;
98 SDValue performFNegCombine(SDNode *N, DAGCombinerInfo &DCI) const;
99 SDValue performFAbsCombine(SDNode *N, DAGCombinerInfo &DCI) const;
100 SDValue performRcpCombine(SDNode *N, DAGCombinerInfo &DCI) const;
102 static EVT getEquivalentMemType(LLVMContext &Context, EVT VT);
104 virtual SDValue LowerGlobalAddress(AMDGPUMachineFunction *MFI, SDValue Op,
105 SelectionDAG &DAG) const;
107 /// Return 64-bit value Op as two 32-bit integers.
108 std::pair<SDValue, SDValue> split64BitValue(SDValue Op,
109 SelectionDAG &DAG) const;
110 SDValue getLoHalf64(SDValue Op, SelectionDAG &DAG) const;
111 SDValue getHiHalf64(SDValue Op, SelectionDAG &DAG) const;
113 /// Split a vector load into 2 loads of half the vector.
114 SDValue SplitVectorLoad(SDValue Op, SelectionDAG &DAG) const;
116 /// Split a vector store into 2 stores of half the vector.
117 SDValue SplitVectorStore(SDValue Op, SelectionDAG &DAG) const;
119 SDValue LowerSTORE(SDValue Op, SelectionDAG &DAG) const;
120 SDValue LowerSDIVREM(SDValue Op, SelectionDAG &DAG) const;
121 SDValue LowerUDIVREM(SDValue Op, SelectionDAG &DAG) const;
122 SDValue LowerDIVREM24(SDValue Op, SelectionDAG &DAG, bool sign) const;
123 void LowerUDIVREM64(SDValue Op, SelectionDAG &DAG,
124 SmallVectorImpl<SDValue> &Results) const;
126 void analyzeFormalArgumentsCompute(
127 CCState &State,
128 const SmallVectorImpl<ISD::InputArg> &Ins) const;
130 public:
131 AMDGPUTargetLowering(const TargetMachine &TM, const AMDGPUSubtarget &STI);
133 bool mayIgnoreSignedZero(SDValue Op) const {
134 if (getTargetMachine().Options.NoSignedZerosFPMath)
135 return true;
137 const auto Flags = Op.getNode()->getFlags();
138 if (Flags.isDefined())
139 return Flags.hasNoSignedZeros();
141 return false;
144 static inline SDValue stripBitcast(SDValue Val) {
145 return Val.getOpcode() == ISD::BITCAST ? Val.getOperand(0) : Val;
148 static bool allUsesHaveSourceMods(const SDNode *N,
149 unsigned CostThreshold = 4);
150 bool isFAbsFree(EVT VT) const override;
151 bool isFNegFree(EVT VT) const override;
152 bool isTruncateFree(EVT Src, EVT Dest) const override;
153 bool isTruncateFree(Type *Src, Type *Dest) const override;
155 bool isZExtFree(Type *Src, Type *Dest) const override;
156 bool isZExtFree(EVT Src, EVT Dest) const override;
157 bool isZExtFree(SDValue Val, EVT VT2) const override;
159 bool isNarrowingProfitable(EVT VT1, EVT VT2) const override;
161 MVT getVectorIdxTy(const DataLayout &) const override;
162 bool isSelectSupported(SelectSupportKind) const override;
164 bool isFPImmLegal(const APFloat &Imm, EVT VT) const override;
165 bool ShouldShrinkFPConstant(EVT VT) const override;
166 bool shouldReduceLoadWidth(SDNode *Load,
167 ISD::LoadExtType ExtType,
168 EVT ExtVT) const override;
170 bool isLoadBitCastBeneficial(EVT, EVT) const final;
172 bool storeOfVectorConstantIsCheap(EVT MemVT,
173 unsigned NumElem,
174 unsigned AS) const override;
175 bool aggressivelyPreferBuildVectorSources(EVT VecVT) const override;
176 bool isCheapToSpeculateCttz() const override;
177 bool isCheapToSpeculateCtlz() const override;
179 bool isSDNodeAlwaysUniform(const SDNode *N) const override;
180 static CCAssignFn *CCAssignFnForCall(CallingConv::ID CC, bool IsVarArg);
181 static CCAssignFn *CCAssignFnForReturn(CallingConv::ID CC, bool IsVarArg);
183 SDValue LowerReturn(SDValue Chain, CallingConv::ID CallConv, bool isVarArg,
184 const SmallVectorImpl<ISD::OutputArg> &Outs,
185 const SmallVectorImpl<SDValue> &OutVals, const SDLoc &DL,
186 SelectionDAG &DAG) const override;
188 SDValue addTokenForArgument(SDValue Chain,
189 SelectionDAG &DAG,
190 MachineFrameInfo &MFI,
191 int ClobberedFI) const;
193 SDValue lowerUnhandledCall(CallLoweringInfo &CLI,
194 SmallVectorImpl<SDValue> &InVals,
195 StringRef Reason) const;
196 SDValue LowerCall(CallLoweringInfo &CLI,
197 SmallVectorImpl<SDValue> &InVals) const override;
199 SDValue LowerDYNAMIC_STACKALLOC(SDValue Op,
200 SelectionDAG &DAG) const;
202 SDValue LowerOperation(SDValue Op, SelectionDAG &DAG) const override;
203 SDValue PerformDAGCombine(SDNode *N, DAGCombinerInfo &DCI) const override;
204 void ReplaceNodeResults(SDNode * N,
205 SmallVectorImpl<SDValue> &Results,
206 SelectionDAG &DAG) const override;
208 SDValue combineFMinMaxLegacy(const SDLoc &DL, EVT VT, SDValue LHS,
209 SDValue RHS, SDValue True, SDValue False,
210 SDValue CC, DAGCombinerInfo &DCI) const;
212 const char* getTargetNodeName(unsigned Opcode) const override;
214 // FIXME: Turn off MergeConsecutiveStores() before Instruction Selection for
215 // AMDGPU. Commit r319036,
216 // (https://github.com/llvm/llvm-project/commit/db77e57ea86d941a4262ef60261692f4cb6893e6)
217 // turned on MergeConsecutiveStores() before Instruction Selection for all
218 // targets. Enough AMDGPU compiles go into an infinite loop (
219 // MergeConsecutiveStores() merges two stores; LegalizeStoreOps() un-merges;
220 // MergeConsecutiveStores() re-merges, etc. ) to warrant turning it off for
221 // now.
222 bool mergeStoresAfterLegalization() const override { return false; }
224 bool isFsqrtCheap(SDValue Operand, SelectionDAG &DAG) const override {
225 return true;
227 SDValue getSqrtEstimate(SDValue Operand, SelectionDAG &DAG, int Enabled,
228 int &RefinementSteps, bool &UseOneConstNR,
229 bool Reciprocal) const override;
230 SDValue getRecipEstimate(SDValue Operand, SelectionDAG &DAG, int Enabled,
231 int &RefinementSteps) const override;
233 virtual SDNode *PostISelFolding(MachineSDNode *N,
234 SelectionDAG &DAG) const = 0;
236 /// Determine which of the bits specified in \p Mask are known to be
237 /// either zero or one and return them in the \p KnownZero and \p KnownOne
238 /// bitsets.
239 void computeKnownBitsForTargetNode(const SDValue Op,
240 KnownBits &Known,
241 const APInt &DemandedElts,
242 const SelectionDAG &DAG,
243 unsigned Depth = 0) const override;
245 unsigned ComputeNumSignBitsForTargetNode(SDValue Op, const APInt &DemandedElts,
246 const SelectionDAG &DAG,
247 unsigned Depth = 0) const override;
249 bool isKnownNeverNaNForTargetNode(SDValue Op,
250 const SelectionDAG &DAG,
251 bool SNaN = false,
252 unsigned Depth = 0) const override;
254 /// Helper function that adds Reg to the LiveIn list of the DAG's
255 /// MachineFunction.
257 /// \returns a RegisterSDNode representing Reg if \p RawReg is true, otherwise
258 /// a copy from the register.
259 SDValue CreateLiveInRegister(SelectionDAG &DAG,
260 const TargetRegisterClass *RC,
261 unsigned Reg, EVT VT,
262 const SDLoc &SL,
263 bool RawReg = false) const;
264 SDValue CreateLiveInRegister(SelectionDAG &DAG,
265 const TargetRegisterClass *RC,
266 unsigned Reg, EVT VT) const {
267 return CreateLiveInRegister(DAG, RC, Reg, VT, SDLoc(DAG.getEntryNode()));
270 // Returns the raw live in register rather than a copy from it.
271 SDValue CreateLiveInRegisterRaw(SelectionDAG &DAG,
272 const TargetRegisterClass *RC,
273 unsigned Reg, EVT VT) const {
274 return CreateLiveInRegister(DAG, RC, Reg, VT, SDLoc(DAG.getEntryNode()), true);
277 /// Similar to CreateLiveInRegister, except value maybe loaded from a stack
278 /// slot rather than passed in a register.
279 SDValue loadStackInputValue(SelectionDAG &DAG,
280 EVT VT,
281 const SDLoc &SL,
282 int64_t Offset) const;
284 SDValue storeStackInputValue(SelectionDAG &DAG,
285 const SDLoc &SL,
286 SDValue Chain,
287 SDValue ArgVal,
288 int64_t Offset) const;
290 SDValue loadInputValue(SelectionDAG &DAG,
291 const TargetRegisterClass *RC,
292 EVT VT, const SDLoc &SL,
293 const ArgDescriptor &Arg) const;
295 enum ImplicitParameter {
296 FIRST_IMPLICIT,
297 GRID_DIM = FIRST_IMPLICIT,
298 GRID_OFFSET,
301 /// Helper function that returns the byte offset of the given
302 /// type of implicit parameter.
303 uint32_t getImplicitParameterOffset(const MachineFunction &MF,
304 const ImplicitParameter Param) const;
306 MVT getFenceOperandTy(const DataLayout &DL) const override {
307 return MVT::i32;
310 AtomicExpansionKind shouldExpandAtomicRMWInIR(AtomicRMWInst *) const override;
313 namespace AMDGPUISD {
315 enum NodeType : unsigned {
316 // AMDIL ISD Opcodes
317 FIRST_NUMBER = ISD::BUILTIN_OP_END,
318 UMUL, // 32bit unsigned multiplication
319 BRANCH_COND,
320 // End AMDIL ISD Opcodes
322 // Function call.
323 CALL,
324 TC_RETURN,
325 TRAP,
327 // Masked control flow nodes.
329 ELSE,
330 LOOP,
332 // A uniform kernel return that terminates the wavefront.
333 ENDPGM,
335 // Return to a shader part's epilog code.
336 RETURN_TO_EPILOG,
338 // Return with values from a non-entry function.
339 RET_FLAG,
341 DWORDADDR,
342 FRACT,
344 /// CLAMP value between 0.0 and 1.0. NaN clamped to 0, following clamp output
345 /// modifier behavior with dx10_enable.
346 CLAMP,
348 // This is SETCC with the full mask result which is used for a compare with a
349 // result bit per item in the wavefront.
350 SETCC,
351 SETREG,
352 // FP ops with input and output chain.
353 FMA_W_CHAIN,
354 FMUL_W_CHAIN,
356 // SIN_HW, COS_HW - f32 for SI, 1 ULP max error, valid from -100 pi to 100 pi.
357 // Denormals handled on some parts.
358 COS_HW,
359 SIN_HW,
360 FMAX_LEGACY,
361 FMIN_LEGACY,
363 FMAX3,
364 SMAX3,
365 UMAX3,
366 FMIN3,
367 SMIN3,
368 UMIN3,
369 FMED3,
370 SMED3,
371 UMED3,
372 FDOT2,
373 URECIP,
374 DIV_SCALE,
375 DIV_FMAS,
376 DIV_FIXUP,
377 // For emitting ISD::FMAD when f32 denormals are enabled because mac/mad is
378 // treated as an illegal operation.
379 FMAD_FTZ,
380 TRIG_PREOP, // 1 ULP max error for f64
382 // RCP, RSQ - For f32, 1 ULP max error, no denormal handling.
383 // For f64, max error 2^29 ULP, handles denormals.
384 RCP,
385 RSQ,
386 RCP_LEGACY,
387 RSQ_LEGACY,
388 RCP_IFLAG,
389 FMUL_LEGACY,
390 RSQ_CLAMP,
391 LDEXP,
392 FP_CLASS,
393 DOT4,
394 CARRY,
395 BORROW,
396 BFE_U32, // Extract range of bits with zero extension to 32-bits.
397 BFE_I32, // Extract range of bits with sign extension to 32-bits.
398 BFI, // (src0 & src1) | (~src0 & src2)
399 BFM, // Insert a range of bits into a 32-bit word.
400 FFBH_U32, // ctlz with -1 if input is zero.
401 FFBH_I32,
402 FFBL_B32, // cttz with -1 if input is zero.
403 MUL_U24,
404 MUL_I24,
405 MULHI_U24,
406 MULHI_I24,
407 MAD_U24,
408 MAD_I24,
409 MAD_U64_U32,
410 MAD_I64_I32,
411 MUL_LOHI_I24,
412 MUL_LOHI_U24,
413 PERM,
414 TEXTURE_FETCH,
415 EXPORT, // exp on SI+
416 EXPORT_DONE, // exp on SI+ with done bit set
417 R600_EXPORT,
418 CONST_ADDRESS,
419 REGISTER_LOAD,
420 REGISTER_STORE,
421 SAMPLE,
422 SAMPLEB,
423 SAMPLED,
424 SAMPLEL,
426 // These cvt_f32_ubyte* nodes need to remain consecutive and in order.
427 CVT_F32_UBYTE0,
428 CVT_F32_UBYTE1,
429 CVT_F32_UBYTE2,
430 CVT_F32_UBYTE3,
432 // Convert two float 32 numbers into a single register holding two packed f16
433 // with round to zero.
434 CVT_PKRTZ_F16_F32,
435 CVT_PKNORM_I16_F32,
436 CVT_PKNORM_U16_F32,
437 CVT_PK_I16_I32,
438 CVT_PK_U16_U32,
440 // Same as the standard node, except the high bits of the resulting integer
441 // are known 0.
442 FP_TO_FP16,
444 // Wrapper around fp16 results that are known to zero the high bits.
445 FP16_ZEXT,
447 /// This node is for VLIW targets and it is used to represent a vector
448 /// that is stored in consecutive registers with the same channel.
449 /// For example:
450 /// |X |Y|Z|W|
451 /// T0|v.x| | | |
452 /// T1|v.y| | | |
453 /// T2|v.z| | | |
454 /// T3|v.w| | | |
455 BUILD_VERTICAL_VECTOR,
456 /// Pointer to the start of the shader's constant data.
457 CONST_DATA_PTR,
458 INIT_EXEC,
459 INIT_EXEC_FROM_INPUT,
460 SENDMSG,
461 SENDMSGHALT,
462 INTERP_MOV,
463 INTERP_P1,
464 INTERP_P2,
465 INTERP_P1LL_F16,
466 INTERP_P1LV_F16,
467 INTERP_P2_F16,
468 PC_ADD_REL_OFFSET,
469 KILL,
470 DUMMY_CHAIN,
471 FIRST_MEM_OPCODE_NUMBER = ISD::FIRST_TARGET_MEMORY_OPCODE,
472 STORE_MSKOR,
473 LOAD_CONSTANT,
474 TBUFFER_STORE_FORMAT,
475 TBUFFER_STORE_FORMAT_X3,
476 TBUFFER_STORE_FORMAT_D16,
477 TBUFFER_LOAD_FORMAT,
478 TBUFFER_LOAD_FORMAT_D16,
479 DS_ORDERED_COUNT,
480 ATOMIC_CMP_SWAP,
481 ATOMIC_INC,
482 ATOMIC_DEC,
483 ATOMIC_LOAD_FMIN,
484 ATOMIC_LOAD_FMAX,
485 BUFFER_LOAD,
486 BUFFER_LOAD_FORMAT,
487 BUFFER_LOAD_FORMAT_D16,
488 SBUFFER_LOAD,
489 BUFFER_STORE,
490 BUFFER_STORE_FORMAT,
491 BUFFER_STORE_FORMAT_D16,
492 BUFFER_ATOMIC_SWAP,
493 BUFFER_ATOMIC_ADD,
494 BUFFER_ATOMIC_SUB,
495 BUFFER_ATOMIC_SMIN,
496 BUFFER_ATOMIC_UMIN,
497 BUFFER_ATOMIC_SMAX,
498 BUFFER_ATOMIC_UMAX,
499 BUFFER_ATOMIC_AND,
500 BUFFER_ATOMIC_OR,
501 BUFFER_ATOMIC_XOR,
502 BUFFER_ATOMIC_CMPSWAP,
504 LAST_AMDGPU_ISD_NUMBER
508 } // End namespace AMDGPUISD
510 } // End namespace llvm
512 #endif