[AMDGPU] New gfx940 mfma instructions
[llvm-project.git] / llvm / lib / Target / AMDGPU / AMDGPUISelLowering.h
blob73081483f1c3d04b10a94f52204dd7f749dfc21e
1 //===-- AMDGPUISelLowering.h - AMDGPU Lowering Interface --------*- C++ -*-===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 /// \file
10 /// Interface definition of the TargetLowering class that is common
11 /// to all AMD GPUs.
13 //===----------------------------------------------------------------------===//
15 #ifndef LLVM_LIB_TARGET_AMDGPU_AMDGPUISELLOWERING_H
16 #define LLVM_LIB_TARGET_AMDGPU_AMDGPUISELLOWERING_H
18 #include "llvm/CodeGen/CallingConvLower.h"
19 #include "llvm/CodeGen/TargetLowering.h"
21 namespace llvm {
23 class AMDGPUMachineFunction;
24 class AMDGPUSubtarget;
25 struct ArgDescriptor;
27 class AMDGPUTargetLowering : public TargetLowering {
28 private:
29 const AMDGPUSubtarget *Subtarget;
31 /// \returns AMDGPUISD::FFBH_U32 node if the incoming \p Op may have been
32 /// legalized from a smaller type VT. Need to match pre-legalized type because
33 /// the generic legalization inserts the add/sub between the select and
34 /// compare.
35 SDValue getFFBX_U32(SelectionDAG &DAG, SDValue Op, const SDLoc &DL, unsigned Opc) const;
37 public:
38 /// \returns The minimum number of bits needed to store the value of \Op as an
39 /// unsigned integer. Truncating to this size and then zero-extending to the
40 /// original size will not change the value.
41 static unsigned numBitsUnsigned(SDValue Op, SelectionDAG &DAG);
43 /// \returns The minimum number of bits needed to store the value of \Op as a
44 /// signed integer. Truncating to this size and then sign-extending to the
45 /// original size will not change the value.
46 static unsigned numBitsSigned(SDValue Op, SelectionDAG &DAG);
48 protected:
49 SDValue LowerEXTRACT_SUBVECTOR(SDValue Op, SelectionDAG &DAG) const;
50 SDValue LowerCONCAT_VECTORS(SDValue Op, SelectionDAG &DAG) const;
51 /// Split a vector store into multiple scalar stores.
52 /// \returns The resulting chain.
54 SDValue LowerFREM(SDValue Op, SelectionDAG &DAG) const;
55 SDValue LowerFCEIL(SDValue Op, SelectionDAG &DAG) const;
56 SDValue LowerFTRUNC(SDValue Op, SelectionDAG &DAG) const;
57 SDValue LowerFRINT(SDValue Op, SelectionDAG &DAG) const;
58 SDValue LowerFNEARBYINT(SDValue Op, SelectionDAG &DAG) const;
60 SDValue LowerFROUND(SDValue Op, SelectionDAG &DAG) const;
61 SDValue LowerFFLOOR(SDValue Op, SelectionDAG &DAG) const;
62 SDValue LowerFLOG(SDValue Op, SelectionDAG &DAG,
63 double Log2BaseInverted) const;
64 SDValue lowerFEXP(SDValue Op, SelectionDAG &DAG) const;
66 SDValue LowerCTLZ_CTTZ(SDValue Op, SelectionDAG &DAG) const;
68 SDValue LowerINT_TO_FP32(SDValue Op, SelectionDAG &DAG, bool Signed) const;
69 SDValue LowerINT_TO_FP64(SDValue Op, SelectionDAG &DAG, bool Signed) const;
70 SDValue LowerUINT_TO_FP(SDValue Op, SelectionDAG &DAG) const;
71 SDValue LowerSINT_TO_FP(SDValue Op, SelectionDAG &DAG) const;
73 SDValue LowerFP_TO_INT64(SDValue Op, SelectionDAG &DAG, bool Signed) const;
74 SDValue LowerFP_TO_FP16(SDValue Op, SelectionDAG &DAG) const;
75 SDValue LowerFP_TO_INT(SDValue Op, SelectionDAG &DAG) const;
77 SDValue LowerSIGN_EXTEND_INREG(SDValue Op, SelectionDAG &DAG) const;
79 protected:
80 bool shouldCombineMemoryType(EVT VT) const;
81 SDValue performLoadCombine(SDNode *N, DAGCombinerInfo &DCI) const;
82 SDValue performStoreCombine(SDNode *N, DAGCombinerInfo &DCI) const;
83 SDValue performAssertSZExtCombine(SDNode *N, DAGCombinerInfo &DCI) const;
84 SDValue performIntrinsicWOChainCombine(SDNode *N, DAGCombinerInfo &DCI) const;
86 SDValue splitBinaryBitConstantOpImpl(DAGCombinerInfo &DCI, const SDLoc &SL,
87 unsigned Opc, SDValue LHS,
88 uint32_t ValLo, uint32_t ValHi) const;
89 SDValue performShlCombine(SDNode *N, DAGCombinerInfo &DCI) const;
90 SDValue performSraCombine(SDNode *N, DAGCombinerInfo &DCI) const;
91 SDValue performSrlCombine(SDNode *N, DAGCombinerInfo &DCI) const;
92 SDValue performTruncateCombine(SDNode *N, DAGCombinerInfo &DCI) const;
93 SDValue performMulCombine(SDNode *N, DAGCombinerInfo &DCI) const;
94 SDValue performMulLoHiCombine(SDNode *N, DAGCombinerInfo &DCI) const;
95 SDValue performMulhsCombine(SDNode *N, DAGCombinerInfo &DCI) const;
96 SDValue performMulhuCombine(SDNode *N, DAGCombinerInfo &DCI) const;
97 SDValue performCtlz_CttzCombine(const SDLoc &SL, SDValue Cond, SDValue LHS,
98 SDValue RHS, DAGCombinerInfo &DCI) const;
99 SDValue performSelectCombine(SDNode *N, DAGCombinerInfo &DCI) const;
101 bool isConstantCostlierToNegate(SDValue N) const;
102 SDValue performFNegCombine(SDNode *N, DAGCombinerInfo &DCI) const;
103 SDValue performFAbsCombine(SDNode *N, DAGCombinerInfo &DCI) const;
104 SDValue performRcpCombine(SDNode *N, DAGCombinerInfo &DCI) const;
106 static EVT getEquivalentMemType(LLVMContext &Context, EVT VT);
108 virtual SDValue LowerGlobalAddress(AMDGPUMachineFunction *MFI, SDValue Op,
109 SelectionDAG &DAG) const;
111 /// Return 64-bit value Op as two 32-bit integers.
112 std::pair<SDValue, SDValue> split64BitValue(SDValue Op,
113 SelectionDAG &DAG) const;
114 SDValue getLoHalf64(SDValue Op, SelectionDAG &DAG) const;
115 SDValue getHiHalf64(SDValue Op, SelectionDAG &DAG) const;
117 /// Split a vector type into two parts. The first part is a power of two
118 /// vector. The second part is whatever is left over, and is a scalar if it
119 /// would otherwise be a 1-vector.
120 std::pair<EVT, EVT> getSplitDestVTs(const EVT &VT, SelectionDAG &DAG) const;
122 /// Split a vector value into two parts of types LoVT and HiVT. HiVT could be
123 /// scalar.
124 std::pair<SDValue, SDValue> splitVector(const SDValue &N, const SDLoc &DL,
125 const EVT &LoVT, const EVT &HighVT,
126 SelectionDAG &DAG) const;
128 /// Split a vector load into 2 loads of half the vector.
129 SDValue SplitVectorLoad(SDValue Op, SelectionDAG &DAG) const;
131 /// Widen a suitably aligned v3 load. For all other cases, split the input
132 /// vector load.
133 SDValue WidenOrSplitVectorLoad(SDValue Op, SelectionDAG &DAG) const;
135 /// Split a vector store into 2 stores of half the vector.
136 SDValue SplitVectorStore(SDValue Op, SelectionDAG &DAG) const;
138 SDValue LowerSTORE(SDValue Op, SelectionDAG &DAG) const;
139 SDValue LowerSDIVREM(SDValue Op, SelectionDAG &DAG) const;
140 SDValue LowerUDIVREM(SDValue Op, SelectionDAG &DAG) const;
141 SDValue LowerDIVREM24(SDValue Op, SelectionDAG &DAG, bool sign) const;
142 void LowerUDIVREM64(SDValue Op, SelectionDAG &DAG,
143 SmallVectorImpl<SDValue> &Results) const;
145 void analyzeFormalArgumentsCompute(
146 CCState &State,
147 const SmallVectorImpl<ISD::InputArg> &Ins) const;
149 public:
150 AMDGPUTargetLowering(const TargetMachine &TM, const AMDGPUSubtarget &STI);
152 bool mayIgnoreSignedZero(SDValue Op) const;
154 static inline SDValue stripBitcast(SDValue Val) {
155 return Val.getOpcode() == ISD::BITCAST ? Val.getOperand(0) : Val;
158 static bool allUsesHaveSourceMods(const SDNode *N,
159 unsigned CostThreshold = 4);
160 bool isFAbsFree(EVT VT) const override;
161 bool isFNegFree(EVT VT) const override;
162 bool isTruncateFree(EVT Src, EVT Dest) const override;
163 bool isTruncateFree(Type *Src, Type *Dest) const override;
165 bool isZExtFree(Type *Src, Type *Dest) const override;
166 bool isZExtFree(EVT Src, EVT Dest) const override;
167 bool isZExtFree(SDValue Val, EVT VT2) const override;
169 SDValue getNegatedExpression(SDValue Op, SelectionDAG &DAG,
170 bool LegalOperations, bool ForCodeSize,
171 NegatibleCost &Cost,
172 unsigned Depth) const override;
174 bool isNarrowingProfitable(EVT VT1, EVT VT2) const override;
176 EVT getTypeForExtReturn(LLVMContext &Context, EVT VT,
177 ISD::NodeType ExtendKind) const override;
179 MVT getVectorIdxTy(const DataLayout &) const override;
180 bool isSelectSupported(SelectSupportKind) const override;
182 bool isFPImmLegal(const APFloat &Imm, EVT VT,
183 bool ForCodeSize) const override;
184 bool ShouldShrinkFPConstant(EVT VT) const override;
185 bool shouldReduceLoadWidth(SDNode *Load,
186 ISD::LoadExtType ExtType,
187 EVT ExtVT) const override;
189 bool isLoadBitCastBeneficial(EVT, EVT, const SelectionDAG &DAG,
190 const MachineMemOperand &MMO) const final;
192 bool storeOfVectorConstantIsCheap(EVT MemVT,
193 unsigned NumElem,
194 unsigned AS) const override;
195 bool aggressivelyPreferBuildVectorSources(EVT VecVT) const override;
196 bool isCheapToSpeculateCttz() const override;
197 bool isCheapToSpeculateCtlz() const override;
199 bool isSDNodeAlwaysUniform(const SDNode *N) const override;
200 static CCAssignFn *CCAssignFnForCall(CallingConv::ID CC, bool IsVarArg);
201 static CCAssignFn *CCAssignFnForReturn(CallingConv::ID CC, bool IsVarArg);
203 SDValue LowerReturn(SDValue Chain, CallingConv::ID CallConv, bool isVarArg,
204 const SmallVectorImpl<ISD::OutputArg> &Outs,
205 const SmallVectorImpl<SDValue> &OutVals, const SDLoc &DL,
206 SelectionDAG &DAG) const override;
208 SDValue addTokenForArgument(SDValue Chain,
209 SelectionDAG &DAG,
210 MachineFrameInfo &MFI,
211 int ClobberedFI) const;
213 SDValue lowerUnhandledCall(CallLoweringInfo &CLI,
214 SmallVectorImpl<SDValue> &InVals,
215 StringRef Reason) const;
216 SDValue LowerCall(CallLoweringInfo &CLI,
217 SmallVectorImpl<SDValue> &InVals) const override;
219 SDValue LowerDYNAMIC_STACKALLOC(SDValue Op,
220 SelectionDAG &DAG) const;
222 SDValue LowerOperation(SDValue Op, SelectionDAG &DAG) const override;
223 SDValue PerformDAGCombine(SDNode *N, DAGCombinerInfo &DCI) const override;
224 void ReplaceNodeResults(SDNode * N,
225 SmallVectorImpl<SDValue> &Results,
226 SelectionDAG &DAG) const override;
228 SDValue combineFMinMaxLegacy(const SDLoc &DL, EVT VT, SDValue LHS,
229 SDValue RHS, SDValue True, SDValue False,
230 SDValue CC, DAGCombinerInfo &DCI) const;
232 const char* getTargetNodeName(unsigned Opcode) const override;
234 // FIXME: Turn off MergeConsecutiveStores() before Instruction Selection for
235 // AMDGPU. Commit r319036,
236 // (https://github.com/llvm/llvm-project/commit/db77e57ea86d941a4262ef60261692f4cb6893e6)
237 // turned on MergeConsecutiveStores() before Instruction Selection for all
238 // targets. Enough AMDGPU compiles go into an infinite loop (
239 // MergeConsecutiveStores() merges two stores; LegalizeStoreOps() un-merges;
240 // MergeConsecutiveStores() re-merges, etc. ) to warrant turning it off for
241 // now.
242 bool mergeStoresAfterLegalization(EVT) const override { return false; }
244 bool isFsqrtCheap(SDValue Operand, SelectionDAG &DAG) const override {
245 return true;
247 SDValue getSqrtEstimate(SDValue Operand, SelectionDAG &DAG, int Enabled,
248 int &RefinementSteps, bool &UseOneConstNR,
249 bool Reciprocal) const override;
250 SDValue getRecipEstimate(SDValue Operand, SelectionDAG &DAG, int Enabled,
251 int &RefinementSteps) const override;
253 virtual SDNode *PostISelFolding(MachineSDNode *N,
254 SelectionDAG &DAG) const = 0;
256 /// Determine which of the bits specified in \p Mask are known to be
257 /// either zero or one and return them in the \p KnownZero and \p KnownOne
258 /// bitsets.
259 void computeKnownBitsForTargetNode(const SDValue Op,
260 KnownBits &Known,
261 const APInt &DemandedElts,
262 const SelectionDAG &DAG,
263 unsigned Depth = 0) const override;
265 unsigned ComputeNumSignBitsForTargetNode(SDValue Op, const APInt &DemandedElts,
266 const SelectionDAG &DAG,
267 unsigned Depth = 0) const override;
269 unsigned computeNumSignBitsForTargetInstr(GISelKnownBits &Analysis,
270 Register R,
271 const APInt &DemandedElts,
272 const MachineRegisterInfo &MRI,
273 unsigned Depth = 0) const override;
275 bool isKnownNeverNaNForTargetNode(SDValue Op,
276 const SelectionDAG &DAG,
277 bool SNaN = false,
278 unsigned Depth = 0) const override;
280 /// Helper function that adds Reg to the LiveIn list of the DAG's
281 /// MachineFunction.
283 /// \returns a RegisterSDNode representing Reg if \p RawReg is true, otherwise
284 /// a copy from the register.
285 SDValue CreateLiveInRegister(SelectionDAG &DAG,
286 const TargetRegisterClass *RC,
287 Register Reg, EVT VT,
288 const SDLoc &SL,
289 bool RawReg = false) const;
290 SDValue CreateLiveInRegister(SelectionDAG &DAG,
291 const TargetRegisterClass *RC,
292 Register Reg, EVT VT) const {
293 return CreateLiveInRegister(DAG, RC, Reg, VT, SDLoc(DAG.getEntryNode()));
296 // Returns the raw live in register rather than a copy from it.
297 SDValue CreateLiveInRegisterRaw(SelectionDAG &DAG,
298 const TargetRegisterClass *RC,
299 Register Reg, EVT VT) const {
300 return CreateLiveInRegister(DAG, RC, Reg, VT, SDLoc(DAG.getEntryNode()), true);
303 /// Similar to CreateLiveInRegister, except value maybe loaded from a stack
304 /// slot rather than passed in a register.
305 SDValue loadStackInputValue(SelectionDAG &DAG,
306 EVT VT,
307 const SDLoc &SL,
308 int64_t Offset) const;
310 SDValue storeStackInputValue(SelectionDAG &DAG,
311 const SDLoc &SL,
312 SDValue Chain,
313 SDValue ArgVal,
314 int64_t Offset) const;
316 SDValue loadInputValue(SelectionDAG &DAG,
317 const TargetRegisterClass *RC,
318 EVT VT, const SDLoc &SL,
319 const ArgDescriptor &Arg) const;
321 enum ImplicitParameter {
322 FIRST_IMPLICIT,
323 PRIVATE_BASE,
324 SHARED_BASE,
325 QUEUE_PTR,
328 /// Helper function that returns the byte offset of the given
329 /// type of implicit parameter.
330 uint32_t getImplicitParameterOffset(const MachineFunction &MF,
331 const ImplicitParameter Param) const;
333 MVT getFenceOperandTy(const DataLayout &DL) const override {
334 return MVT::i32;
337 AtomicExpansionKind shouldExpandAtomicRMWInIR(AtomicRMWInst *) const override;
339 bool isConstantUnsignedBitfieldExtractLegal(unsigned Opc, LLT Ty1,
340 LLT Ty2) const override;
343 namespace AMDGPUISD {
345 enum NodeType : unsigned {
346 // AMDIL ISD Opcodes
347 FIRST_NUMBER = ISD::BUILTIN_OP_END,
348 UMUL, // 32bit unsigned multiplication
349 BRANCH_COND,
350 // End AMDIL ISD Opcodes
352 // Function call.
353 CALL,
354 TC_RETURN,
355 TRAP,
357 // Masked control flow nodes.
359 ELSE,
360 LOOP,
362 // A uniform kernel return that terminates the wavefront.
363 ENDPGM,
365 // Return to a shader part's epilog code.
366 RETURN_TO_EPILOG,
368 // Return with values from a non-entry function.
369 RET_FLAG,
371 DWORDADDR,
372 FRACT,
374 /// CLAMP value between 0.0 and 1.0. NaN clamped to 0, following clamp output
375 /// modifier behavior with dx10_enable.
376 CLAMP,
378 // This is SETCC with the full mask result which is used for a compare with a
379 // result bit per item in the wavefront.
380 SETCC,
381 SETREG,
383 DENORM_MODE,
385 // FP ops with input and output chain.
386 FMA_W_CHAIN,
387 FMUL_W_CHAIN,
389 // SIN_HW, COS_HW - f32 for SI, 1 ULP max error, valid from -100 pi to 100 pi.
390 // Denormals handled on some parts.
391 COS_HW,
392 SIN_HW,
393 FMAX_LEGACY,
394 FMIN_LEGACY,
396 FMAX3,
397 SMAX3,
398 UMAX3,
399 FMIN3,
400 SMIN3,
401 UMIN3,
402 FMED3,
403 SMED3,
404 UMED3,
405 FDOT2,
406 URECIP,
407 DIV_SCALE,
408 DIV_FMAS,
409 DIV_FIXUP,
410 // For emitting ISD::FMAD when f32 denormals are enabled because mac/mad is
411 // treated as an illegal operation.
412 FMAD_FTZ,
414 // RCP, RSQ - For f32, 1 ULP max error, no denormal handling.
415 // For f64, max error 2^29 ULP, handles denormals.
416 RCP,
417 RSQ,
418 RCP_LEGACY,
419 RCP_IFLAG,
420 FMUL_LEGACY,
421 RSQ_CLAMP,
422 LDEXP,
423 FP_CLASS,
424 DOT4,
425 CARRY,
426 BORROW,
427 BFE_U32, // Extract range of bits with zero extension to 32-bits.
428 BFE_I32, // Extract range of bits with sign extension to 32-bits.
429 BFI, // (src0 & src1) | (~src0 & src2)
430 BFM, // Insert a range of bits into a 32-bit word.
431 FFBH_U32, // ctlz with -1 if input is zero.
432 FFBH_I32,
433 FFBL_B32, // cttz with -1 if input is zero.
434 MUL_U24,
435 MUL_I24,
436 MULHI_U24,
437 MULHI_I24,
438 MAD_U24,
439 MAD_I24,
440 MAD_U64_U32,
441 MAD_I64_I32,
442 PERM,
443 TEXTURE_FETCH,
444 R600_EXPORT,
445 CONST_ADDRESS,
446 REGISTER_LOAD,
447 REGISTER_STORE,
448 SAMPLE,
449 SAMPLEB,
450 SAMPLED,
451 SAMPLEL,
453 // These cvt_f32_ubyte* nodes need to remain consecutive and in order.
454 CVT_F32_UBYTE0,
455 CVT_F32_UBYTE1,
456 CVT_F32_UBYTE2,
457 CVT_F32_UBYTE3,
459 // Convert two float 32 numbers into a single register holding two packed f16
460 // with round to zero.
461 CVT_PKRTZ_F16_F32,
462 CVT_PKNORM_I16_F32,
463 CVT_PKNORM_U16_F32,
464 CVT_PK_I16_I32,
465 CVT_PK_U16_U32,
467 // Same as the standard node, except the high bits of the resulting integer
468 // are known 0.
469 FP_TO_FP16,
471 /// This node is for VLIW targets and it is used to represent a vector
472 /// that is stored in consecutive registers with the same channel.
473 /// For example:
474 /// |X |Y|Z|W|
475 /// T0|v.x| | | |
476 /// T1|v.y| | | |
477 /// T2|v.z| | | |
478 /// T3|v.w| | | |
479 BUILD_VERTICAL_VECTOR,
480 /// Pointer to the start of the shader's constant data.
481 CONST_DATA_PTR,
482 PC_ADD_REL_OFFSET,
483 LDS,
484 FPTRUNC_ROUND_UPWARD,
485 FPTRUNC_ROUND_DOWNWARD,
487 DUMMY_CHAIN,
488 FIRST_MEM_OPCODE_NUMBER = ISD::FIRST_TARGET_MEMORY_OPCODE,
489 LOAD_D16_HI,
490 LOAD_D16_LO,
491 LOAD_D16_HI_I8,
492 LOAD_D16_HI_U8,
493 LOAD_D16_LO_I8,
494 LOAD_D16_LO_U8,
496 STORE_MSKOR,
497 LOAD_CONSTANT,
498 TBUFFER_STORE_FORMAT,
499 TBUFFER_STORE_FORMAT_D16,
500 TBUFFER_LOAD_FORMAT,
501 TBUFFER_LOAD_FORMAT_D16,
502 DS_ORDERED_COUNT,
503 ATOMIC_CMP_SWAP,
504 ATOMIC_INC,
505 ATOMIC_DEC,
506 ATOMIC_LOAD_FMIN,
507 ATOMIC_LOAD_FMAX,
508 BUFFER_LOAD,
509 BUFFER_LOAD_UBYTE,
510 BUFFER_LOAD_USHORT,
511 BUFFER_LOAD_BYTE,
512 BUFFER_LOAD_SHORT,
513 BUFFER_LOAD_FORMAT,
514 BUFFER_LOAD_FORMAT_D16,
515 SBUFFER_LOAD,
516 BUFFER_STORE,
517 BUFFER_STORE_BYTE,
518 BUFFER_STORE_SHORT,
519 BUFFER_STORE_FORMAT,
520 BUFFER_STORE_FORMAT_D16,
521 BUFFER_ATOMIC_SWAP,
522 BUFFER_ATOMIC_ADD,
523 BUFFER_ATOMIC_SUB,
524 BUFFER_ATOMIC_SMIN,
525 BUFFER_ATOMIC_UMIN,
526 BUFFER_ATOMIC_SMAX,
527 BUFFER_ATOMIC_UMAX,
528 BUFFER_ATOMIC_AND,
529 BUFFER_ATOMIC_OR,
530 BUFFER_ATOMIC_XOR,
531 BUFFER_ATOMIC_INC,
532 BUFFER_ATOMIC_DEC,
533 BUFFER_ATOMIC_CMPSWAP,
534 BUFFER_ATOMIC_CSUB,
535 BUFFER_ATOMIC_FADD,
536 BUFFER_ATOMIC_FMIN,
537 BUFFER_ATOMIC_FMAX,
539 LAST_AMDGPU_ISD_NUMBER
542 } // End namespace AMDGPUISD
544 } // End namespace llvm
546 #endif