[ARM] Better patterns for fp <> predicate vectors
[llvm-complete.git] / lib / Target / AMDGPU / Utils / AMDGPUBaseInfo.h
blob590df20b4c25a3e292ae6dff158bda9e0fad9adb
1 //===- AMDGPUBaseInfo.h - Top level definitions for AMDGPU ------*- C++ -*-===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
9 #ifndef LLVM_LIB_TARGET_AMDGPU_UTILS_AMDGPUBASEINFO_H
10 #define LLVM_LIB_TARGET_AMDGPU_UTILS_AMDGPUBASEINFO_H
12 #include "AMDGPU.h"
13 #include "AMDKernelCodeT.h"
14 #include "SIDefines.h"
15 #include "llvm/ADT/StringRef.h"
16 #include "llvm/IR/CallingConv.h"
17 #include "llvm/MC/MCInstrDesc.h"
18 #include "llvm/Support/AMDHSAKernelDescriptor.h"
19 #include "llvm/Support/Compiler.h"
20 #include "llvm/Support/ErrorHandling.h"
21 #include "llvm/Support/TargetParser.h"
22 #include <cstdint>
23 #include <string>
24 #include <utility>
26 namespace llvm {
28 class Argument;
29 class AMDGPUSubtarget;
30 class FeatureBitset;
31 class Function;
32 class GCNSubtarget;
33 class GlobalValue;
34 class MCContext;
35 class MCRegisterClass;
36 class MCRegisterInfo;
37 class MCSection;
38 class MCSubtargetInfo;
39 class MachineMemOperand;
40 class Triple;
42 namespace AMDGPU {
44 #define GET_MIMGBaseOpcode_DECL
45 #define GET_MIMGDim_DECL
46 #define GET_MIMGEncoding_DECL
47 #define GET_MIMGLZMapping_DECL
48 #define GET_MIMGMIPMapping_DECL
49 #include "AMDGPUGenSearchableTables.inc"
51 namespace IsaInfo {
53 enum {
54 // The closed Vulkan driver sets 96, which limits the wave count to 8 but
55 // doesn't spill SGPRs as much as when 80 is set.
56 FIXED_NUM_SGPRS_FOR_INIT_BUG = 96,
57 TRAP_NUM_SGPRS = 16
60 /// Streams isa version string for given subtarget \p STI into \p Stream.
61 void streamIsaVersion(const MCSubtargetInfo *STI, raw_ostream &Stream);
63 /// \returns True if given subtarget \p STI supports code object version 3,
64 /// false otherwise.
65 bool hasCodeObjectV3(const MCSubtargetInfo *STI);
67 /// \returns Wavefront size for given subtarget \p STI.
68 unsigned getWavefrontSize(const MCSubtargetInfo *STI);
70 /// \returns Local memory size in bytes for given subtarget \p STI.
71 unsigned getLocalMemorySize(const MCSubtargetInfo *STI);
73 /// \returns Number of execution units per compute unit for given subtarget \p
74 /// STI.
75 unsigned getEUsPerCU(const MCSubtargetInfo *STI);
77 /// \returns Maximum number of work groups per compute unit for given subtarget
78 /// \p STI and limited by given \p FlatWorkGroupSize.
79 unsigned getMaxWorkGroupsPerCU(const MCSubtargetInfo *STI,
80 unsigned FlatWorkGroupSize);
82 /// \returns Maximum number of waves per compute unit for given subtarget \p
83 /// STI without any kind of limitation.
84 unsigned getMaxWavesPerCU(const MCSubtargetInfo *STI);
86 /// \returns Maximum number of waves per compute unit for given subtarget \p
87 /// STI and limited by given \p FlatWorkGroupSize.
88 unsigned getMaxWavesPerCU(const MCSubtargetInfo *STI,
89 unsigned FlatWorkGroupSize);
91 /// \returns Minimum number of waves per execution unit for given subtarget \p
92 /// STI.
93 unsigned getMinWavesPerEU(const MCSubtargetInfo *STI);
95 /// \returns Maximum number of waves per execution unit for given subtarget \p
96 /// STI without any kind of limitation.
97 unsigned getMaxWavesPerEU(const MCSubtargetInfo *STI);
99 /// \returns Maximum number of waves per execution unit for given subtarget \p
100 /// STI and limited by given \p FlatWorkGroupSize.
101 unsigned getMaxWavesPerEU(const MCSubtargetInfo *STI,
102 unsigned FlatWorkGroupSize);
104 /// \returns Minimum flat work group size for given subtarget \p STI.
105 unsigned getMinFlatWorkGroupSize(const MCSubtargetInfo *STI);
107 /// \returns Maximum flat work group size for given subtarget \p STI.
108 unsigned getMaxFlatWorkGroupSize(const MCSubtargetInfo *STI);
110 /// \returns Number of waves per work group for given subtarget \p STI and
111 /// limited by given \p FlatWorkGroupSize.
112 unsigned getWavesPerWorkGroup(const MCSubtargetInfo *STI,
113 unsigned FlatWorkGroupSize);
115 /// \returns SGPR allocation granularity for given subtarget \p STI.
116 unsigned getSGPRAllocGranule(const MCSubtargetInfo *STI);
118 /// \returns SGPR encoding granularity for given subtarget \p STI.
119 unsigned getSGPREncodingGranule(const MCSubtargetInfo *STI);
121 /// \returns Total number of SGPRs for given subtarget \p STI.
122 unsigned getTotalNumSGPRs(const MCSubtargetInfo *STI);
124 /// \returns Addressable number of SGPRs for given subtarget \p STI.
125 unsigned getAddressableNumSGPRs(const MCSubtargetInfo *STI);
127 /// \returns Minimum number of SGPRs that meets the given number of waves per
128 /// execution unit requirement for given subtarget \p STI.
129 unsigned getMinNumSGPRs(const MCSubtargetInfo *STI, unsigned WavesPerEU);
131 /// \returns Maximum number of SGPRs that meets the given number of waves per
132 /// execution unit requirement for given subtarget \p STI.
133 unsigned getMaxNumSGPRs(const MCSubtargetInfo *STI, unsigned WavesPerEU,
134 bool Addressable);
136 /// \returns Number of extra SGPRs implicitly required by given subtarget \p
137 /// STI when the given special registers are used.
138 unsigned getNumExtraSGPRs(const MCSubtargetInfo *STI, bool VCCUsed,
139 bool FlatScrUsed, bool XNACKUsed);
141 /// \returns Number of extra SGPRs implicitly required by given subtarget \p
142 /// STI when the given special registers are used. XNACK is inferred from
143 /// \p STI.
144 unsigned getNumExtraSGPRs(const MCSubtargetInfo *STI, bool VCCUsed,
145 bool FlatScrUsed);
147 /// \returns Number of SGPR blocks needed for given subtarget \p STI when
148 /// \p NumSGPRs are used. \p NumSGPRs should already include any special
149 /// register counts.
150 unsigned getNumSGPRBlocks(const MCSubtargetInfo *STI, unsigned NumSGPRs);
152 /// \returns VGPR allocation granularity for given subtarget \p STI.
154 /// For subtargets which support it, \p EnableWavefrontSize32 should match
155 /// the ENABLE_WAVEFRONT_SIZE32 kernel descriptor field.
156 unsigned getVGPRAllocGranule(const MCSubtargetInfo *STI,
157 Optional<bool> EnableWavefrontSize32 = None);
159 /// \returns VGPR encoding granularity for given subtarget \p STI.
161 /// For subtargets which support it, \p EnableWavefrontSize32 should match
162 /// the ENABLE_WAVEFRONT_SIZE32 kernel descriptor field.
163 unsigned getVGPREncodingGranule(const MCSubtargetInfo *STI,
164 Optional<bool> EnableWavefrontSize32 = None);
166 /// \returns Total number of VGPRs for given subtarget \p STI.
167 unsigned getTotalNumVGPRs(const MCSubtargetInfo *STI);
169 /// \returns Addressable number of VGPRs for given subtarget \p STI.
170 unsigned getAddressableNumVGPRs(const MCSubtargetInfo *STI);
172 /// \returns Minimum number of VGPRs that meets given number of waves per
173 /// execution unit requirement for given subtarget \p STI.
174 unsigned getMinNumVGPRs(const MCSubtargetInfo *STI, unsigned WavesPerEU);
176 /// \returns Maximum number of VGPRs that meets given number of waves per
177 /// execution unit requirement for given subtarget \p STI.
178 unsigned getMaxNumVGPRs(const MCSubtargetInfo *STI, unsigned WavesPerEU);
180 /// \returns Number of VGPR blocks needed for given subtarget \p STI when
181 /// \p NumVGPRs are used.
183 /// For subtargets which support it, \p EnableWavefrontSize32 should match the
184 /// ENABLE_WAVEFRONT_SIZE32 kernel descriptor field.
185 unsigned getNumVGPRBlocks(const MCSubtargetInfo *STI, unsigned NumSGPRs,
186 Optional<bool> EnableWavefrontSize32 = None);
188 } // end namespace IsaInfo
190 LLVM_READONLY
191 int16_t getNamedOperandIdx(uint16_t Opcode, uint16_t NamedIdx);
193 LLVM_READONLY
194 int getSOPPWithRelaxation(uint16_t Opcode);
196 struct MIMGBaseOpcodeInfo {
197 MIMGBaseOpcode BaseOpcode;
198 bool Store;
199 bool Atomic;
200 bool AtomicX2;
201 bool Sampler;
202 bool Gather4;
204 uint8_t NumExtraArgs;
205 bool Gradients;
206 bool Coordinates;
207 bool LodOrClampOrMip;
208 bool HasD16;
211 LLVM_READONLY
212 const MIMGBaseOpcodeInfo *getMIMGBaseOpcodeInfo(unsigned BaseOpcode);
214 struct MIMGDimInfo {
215 MIMGDim Dim;
216 uint8_t NumCoords;
217 uint8_t NumGradients;
218 bool DA;
219 uint8_t Encoding;
220 const char *AsmSuffix;
223 LLVM_READONLY
224 const MIMGDimInfo *getMIMGDimInfo(unsigned DimEnum);
226 LLVM_READONLY
227 const MIMGDimInfo *getMIMGDimInfoByEncoding(uint8_t DimEnc);
229 LLVM_READONLY
230 const MIMGDimInfo *getMIMGDimInfoByAsmSuffix(StringRef AsmSuffix);
232 struct MIMGLZMappingInfo {
233 MIMGBaseOpcode L;
234 MIMGBaseOpcode LZ;
237 struct MIMGMIPMappingInfo {
238 MIMGBaseOpcode MIP;
239 MIMGBaseOpcode NONMIP;
242 LLVM_READONLY
243 const MIMGLZMappingInfo *getMIMGLZMappingInfo(unsigned L);
245 LLVM_READONLY
246 const MIMGMIPMappingInfo *getMIMGMIPMappingInfo(unsigned L);
248 LLVM_READONLY
249 int getMIMGOpcode(unsigned BaseOpcode, unsigned MIMGEncoding,
250 unsigned VDataDwords, unsigned VAddrDwords);
252 LLVM_READONLY
253 int getMaskedMIMGOp(unsigned Opc, unsigned NewChannels);
255 struct MIMGInfo {
256 uint16_t Opcode;
257 uint16_t BaseOpcode;
258 uint8_t MIMGEncoding;
259 uint8_t VDataDwords;
260 uint8_t VAddrDwords;
263 LLVM_READONLY
264 const MIMGInfo *getMIMGInfo(unsigned Opc);
266 LLVM_READONLY
267 int getMUBUFBaseOpcode(unsigned Opc);
269 LLVM_READONLY
270 int getMUBUFOpcode(unsigned BaseOpc, unsigned Dwords);
272 LLVM_READONLY
273 int getMUBUFDwords(unsigned Opc);
275 LLVM_READONLY
276 bool getMUBUFHasVAddr(unsigned Opc);
278 LLVM_READONLY
279 bool getMUBUFHasSrsrc(unsigned Opc);
281 LLVM_READONLY
282 bool getMUBUFHasSoffset(unsigned Opc);
284 LLVM_READONLY
285 int getMCOpcode(uint16_t Opcode, unsigned Gen);
287 void initDefaultAMDKernelCodeT(amd_kernel_code_t &Header,
288 const MCSubtargetInfo *STI);
290 amdhsa::kernel_descriptor_t getDefaultAmdhsaKernelDescriptor(
291 const MCSubtargetInfo *STI);
293 bool isGroupSegment(const GlobalValue *GV);
294 bool isGlobalSegment(const GlobalValue *GV);
295 bool isReadOnlySegment(const GlobalValue *GV);
297 /// \returns True if constants should be emitted to .text section for given
298 /// target triple \p TT, false otherwise.
299 bool shouldEmitConstantsToTextSection(const Triple &TT);
301 /// \returns Integer value requested using \p F's \p Name attribute.
303 /// \returns \p Default if attribute is not present.
305 /// \returns \p Default and emits error if requested value cannot be converted
306 /// to integer.
307 int getIntegerAttribute(const Function &F, StringRef Name, int Default);
309 /// \returns A pair of integer values requested using \p F's \p Name attribute
310 /// in "first[,second]" format ("second" is optional unless \p OnlyFirstRequired
311 /// is false).
313 /// \returns \p Default if attribute is not present.
315 /// \returns \p Default and emits error if one of the requested values cannot be
316 /// converted to integer, or \p OnlyFirstRequired is false and "second" value is
317 /// not present.
318 std::pair<int, int> getIntegerPairAttribute(const Function &F,
319 StringRef Name,
320 std::pair<int, int> Default,
321 bool OnlyFirstRequired = false);
323 /// Represents the counter values to wait for in an s_waitcnt instruction.
325 /// Large values (including the maximum possible integer) can be used to
326 /// represent "don't care" waits.
327 struct Waitcnt {
328 unsigned VmCnt = ~0u;
329 unsigned ExpCnt = ~0u;
330 unsigned LgkmCnt = ~0u;
331 unsigned VsCnt = ~0u;
333 Waitcnt() {}
334 Waitcnt(unsigned VmCnt, unsigned ExpCnt, unsigned LgkmCnt, unsigned VsCnt)
335 : VmCnt(VmCnt), ExpCnt(ExpCnt), LgkmCnt(LgkmCnt), VsCnt(VsCnt) {}
337 static Waitcnt allZero(const IsaVersion &Version) {
338 return Waitcnt(0, 0, 0, Version.Major >= 10 ? 0 : ~0u);
340 static Waitcnt allZeroExceptVsCnt() { return Waitcnt(0, 0, 0, ~0u); }
342 bool hasWait() const {
343 return VmCnt != ~0u || ExpCnt != ~0u || LgkmCnt != ~0u || VsCnt != ~0u;
346 bool dominates(const Waitcnt &Other) const {
347 return VmCnt <= Other.VmCnt && ExpCnt <= Other.ExpCnt &&
348 LgkmCnt <= Other.LgkmCnt && VsCnt <= Other.VsCnt;
351 Waitcnt combined(const Waitcnt &Other) const {
352 return Waitcnt(std::min(VmCnt, Other.VmCnt), std::min(ExpCnt, Other.ExpCnt),
353 std::min(LgkmCnt, Other.LgkmCnt),
354 std::min(VsCnt, Other.VsCnt));
358 /// \returns Vmcnt bit mask for given isa \p Version.
359 unsigned getVmcntBitMask(const IsaVersion &Version);
361 /// \returns Expcnt bit mask for given isa \p Version.
362 unsigned getExpcntBitMask(const IsaVersion &Version);
364 /// \returns Lgkmcnt bit mask for given isa \p Version.
365 unsigned getLgkmcntBitMask(const IsaVersion &Version);
367 /// \returns Waitcnt bit mask for given isa \p Version.
368 unsigned getWaitcntBitMask(const IsaVersion &Version);
370 /// \returns Decoded Vmcnt from given \p Waitcnt for given isa \p Version.
371 unsigned decodeVmcnt(const IsaVersion &Version, unsigned Waitcnt);
373 /// \returns Decoded Expcnt from given \p Waitcnt for given isa \p Version.
374 unsigned decodeExpcnt(const IsaVersion &Version, unsigned Waitcnt);
376 /// \returns Decoded Lgkmcnt from given \p Waitcnt for given isa \p Version.
377 unsigned decodeLgkmcnt(const IsaVersion &Version, unsigned Waitcnt);
379 /// Decodes Vmcnt, Expcnt and Lgkmcnt from given \p Waitcnt for given isa
380 /// \p Version, and writes decoded values into \p Vmcnt, \p Expcnt and
381 /// \p Lgkmcnt respectively.
383 /// \details \p Vmcnt, \p Expcnt and \p Lgkmcnt are decoded as follows:
384 /// \p Vmcnt = \p Waitcnt[3:0] (pre-gfx9 only)
385 /// \p Vmcnt = \p Waitcnt[3:0] | \p Waitcnt[15:14] (gfx9+ only)
386 /// \p Expcnt = \p Waitcnt[6:4]
387 /// \p Lgkmcnt = \p Waitcnt[11:8] (pre-gfx10 only)
388 /// \p Lgkmcnt = \p Waitcnt[13:8] (gfx10+ only)
389 void decodeWaitcnt(const IsaVersion &Version, unsigned Waitcnt,
390 unsigned &Vmcnt, unsigned &Expcnt, unsigned &Lgkmcnt);
392 Waitcnt decodeWaitcnt(const IsaVersion &Version, unsigned Encoded);
394 /// \returns \p Waitcnt with encoded \p Vmcnt for given isa \p Version.
395 unsigned encodeVmcnt(const IsaVersion &Version, unsigned Waitcnt,
396 unsigned Vmcnt);
398 /// \returns \p Waitcnt with encoded \p Expcnt for given isa \p Version.
399 unsigned encodeExpcnt(const IsaVersion &Version, unsigned Waitcnt,
400 unsigned Expcnt);
402 /// \returns \p Waitcnt with encoded \p Lgkmcnt for given isa \p Version.
403 unsigned encodeLgkmcnt(const IsaVersion &Version, unsigned Waitcnt,
404 unsigned Lgkmcnt);
406 /// Encodes \p Vmcnt, \p Expcnt and \p Lgkmcnt into Waitcnt for given isa
407 /// \p Version.
409 /// \details \p Vmcnt, \p Expcnt and \p Lgkmcnt are encoded as follows:
410 /// Waitcnt[3:0] = \p Vmcnt (pre-gfx9 only)
411 /// Waitcnt[3:0] = \p Vmcnt[3:0] (gfx9+ only)
412 /// Waitcnt[6:4] = \p Expcnt
413 /// Waitcnt[11:8] = \p Lgkmcnt (pre-gfx10 only)
414 /// Waitcnt[13:8] = \p Lgkmcnt (gfx10+ only)
415 /// Waitcnt[15:14] = \p Vmcnt[5:4] (gfx9+ only)
417 /// \returns Waitcnt with encoded \p Vmcnt, \p Expcnt and \p Lgkmcnt for given
418 /// isa \p Version.
419 unsigned encodeWaitcnt(const IsaVersion &Version,
420 unsigned Vmcnt, unsigned Expcnt, unsigned Lgkmcnt);
422 unsigned encodeWaitcnt(const IsaVersion &Version, const Waitcnt &Decoded);
424 namespace Hwreg {
426 LLVM_READONLY
427 int64_t getHwregId(const StringRef Name);
429 LLVM_READNONE
430 bool isValidHwreg(int64_t Id, const MCSubtargetInfo &STI);
432 LLVM_READNONE
433 bool isValidHwreg(int64_t Id);
435 LLVM_READNONE
436 bool isValidHwregOffset(int64_t Offset);
438 LLVM_READNONE
439 bool isValidHwregWidth(int64_t Width);
441 LLVM_READNONE
442 uint64_t encodeHwreg(uint64_t Id, uint64_t Offset, uint64_t Width);
444 LLVM_READNONE
445 StringRef getHwreg(unsigned Id, const MCSubtargetInfo &STI);
447 void decodeHwreg(unsigned Val, unsigned &Id, unsigned &Offset, unsigned &Width);
449 } // namespace Hwreg
451 namespace SendMsg {
453 LLVM_READONLY
454 int64_t getMsgId(const StringRef Name);
456 LLVM_READONLY
457 int64_t getMsgOpId(int64_t MsgId, const StringRef Name);
459 LLVM_READNONE
460 StringRef getMsgName(int64_t MsgId);
462 LLVM_READNONE
463 StringRef getMsgOpName(int64_t MsgId, int64_t OpId);
465 LLVM_READNONE
466 bool isValidMsgId(int64_t MsgId, const MCSubtargetInfo &STI, bool Strict = true);
468 LLVM_READNONE
469 bool isValidMsgOp(int64_t MsgId, int64_t OpId, bool Strict = true);
471 LLVM_READNONE
472 bool isValidMsgStream(int64_t MsgId, int64_t OpId, int64_t StreamId, bool Strict = true);
474 LLVM_READNONE
475 bool msgRequiresOp(int64_t MsgId);
477 LLVM_READNONE
478 bool msgSupportsStream(int64_t MsgId, int64_t OpId);
480 void decodeMsg(unsigned Val,
481 uint16_t &MsgId,
482 uint16_t &OpId,
483 uint16_t &StreamId);
485 LLVM_READNONE
486 uint64_t encodeMsg(uint64_t MsgId,
487 uint64_t OpId,
488 uint64_t StreamId);
490 } // namespace SendMsg
493 unsigned getInitialPSInputAddr(const Function &F);
495 LLVM_READNONE
496 bool isShader(CallingConv::ID CC);
498 LLVM_READNONE
499 bool isCompute(CallingConv::ID CC);
501 LLVM_READNONE
502 bool isEntryFunctionCC(CallingConv::ID CC);
504 // FIXME: Remove this when calling conventions cleaned up
505 LLVM_READNONE
506 inline bool isKernel(CallingConv::ID CC) {
507 switch (CC) {
508 case CallingConv::AMDGPU_KERNEL:
509 case CallingConv::SPIR_KERNEL:
510 return true;
511 default:
512 return false;
516 bool hasXNACK(const MCSubtargetInfo &STI);
517 bool hasSRAMECC(const MCSubtargetInfo &STI);
518 bool hasMIMG_R128(const MCSubtargetInfo &STI);
519 bool hasPackedD16(const MCSubtargetInfo &STI);
521 bool isSI(const MCSubtargetInfo &STI);
522 bool isCI(const MCSubtargetInfo &STI);
523 bool isVI(const MCSubtargetInfo &STI);
524 bool isGFX9(const MCSubtargetInfo &STI);
525 bool isGFX10(const MCSubtargetInfo &STI);
527 /// Is Reg - scalar register
528 bool isSGPR(unsigned Reg, const MCRegisterInfo* TRI);
530 /// Is there any intersection between registers
531 bool isRegIntersect(unsigned Reg0, unsigned Reg1, const MCRegisterInfo* TRI);
533 /// If \p Reg is a pseudo reg, return the correct hardware register given
534 /// \p STI otherwise return \p Reg.
535 unsigned getMCReg(unsigned Reg, const MCSubtargetInfo &STI);
537 /// Convert hardware register \p Reg to a pseudo register
538 LLVM_READNONE
539 unsigned mc2PseudoReg(unsigned Reg);
541 /// Can this operand also contain immediate values?
542 bool isSISrcOperand(const MCInstrDesc &Desc, unsigned OpNo);
544 /// Is this floating-point operand?
545 bool isSISrcFPOperand(const MCInstrDesc &Desc, unsigned OpNo);
547 /// Does this opearnd support only inlinable literals?
548 bool isSISrcInlinableOperand(const MCInstrDesc &Desc, unsigned OpNo);
550 /// Get the size in bits of a register from the register class \p RC.
551 unsigned getRegBitWidth(unsigned RCID);
553 /// Get the size in bits of a register from the register class \p RC.
554 unsigned getRegBitWidth(const MCRegisterClass &RC);
556 /// Get size of register operand
557 unsigned getRegOperandSize(const MCRegisterInfo *MRI, const MCInstrDesc &Desc,
558 unsigned OpNo);
560 LLVM_READNONE
561 inline unsigned getOperandSize(const MCOperandInfo &OpInfo) {
562 switch (OpInfo.OperandType) {
563 case AMDGPU::OPERAND_REG_IMM_INT32:
564 case AMDGPU::OPERAND_REG_IMM_FP32:
565 case AMDGPU::OPERAND_REG_INLINE_C_INT32:
566 case AMDGPU::OPERAND_REG_INLINE_C_FP32:
567 case AMDGPU::OPERAND_REG_INLINE_AC_INT32:
568 case AMDGPU::OPERAND_REG_INLINE_AC_FP32:
569 return 4;
571 case AMDGPU::OPERAND_REG_IMM_INT64:
572 case AMDGPU::OPERAND_REG_IMM_FP64:
573 case AMDGPU::OPERAND_REG_INLINE_C_INT64:
574 case AMDGPU::OPERAND_REG_INLINE_C_FP64:
575 return 8;
577 case AMDGPU::OPERAND_REG_IMM_INT16:
578 case AMDGPU::OPERAND_REG_IMM_FP16:
579 case AMDGPU::OPERAND_REG_INLINE_C_INT16:
580 case AMDGPU::OPERAND_REG_INLINE_C_FP16:
581 case AMDGPU::OPERAND_REG_INLINE_C_V2INT16:
582 case AMDGPU::OPERAND_REG_INLINE_C_V2FP16:
583 case AMDGPU::OPERAND_REG_INLINE_AC_INT16:
584 case AMDGPU::OPERAND_REG_INLINE_AC_FP16:
585 case AMDGPU::OPERAND_REG_INLINE_AC_V2INT16:
586 case AMDGPU::OPERAND_REG_INLINE_AC_V2FP16:
587 case AMDGPU::OPERAND_REG_IMM_V2INT16:
588 case AMDGPU::OPERAND_REG_IMM_V2FP16:
589 return 2;
591 default:
592 llvm_unreachable("unhandled operand type");
596 LLVM_READNONE
597 inline unsigned getOperandSize(const MCInstrDesc &Desc, unsigned OpNo) {
598 return getOperandSize(Desc.OpInfo[OpNo]);
601 /// Is this literal inlinable
602 LLVM_READNONE
603 bool isInlinableLiteral64(int64_t Literal, bool HasInv2Pi);
605 LLVM_READNONE
606 bool isInlinableLiteral32(int32_t Literal, bool HasInv2Pi);
608 LLVM_READNONE
609 bool isInlinableLiteral16(int16_t Literal, bool HasInv2Pi);
611 LLVM_READNONE
612 bool isInlinableLiteralV216(int32_t Literal, bool HasInv2Pi);
614 bool isArgPassedInSGPR(const Argument *Arg);
616 /// \returns The encoding that will be used for \p ByteOffset in the SMRD
617 /// offset field.
618 int64_t getSMRDEncodedOffset(const MCSubtargetInfo &ST, int64_t ByteOffset);
620 /// \returns true if this offset is small enough to fit in the SMRD
621 /// offset field. \p ByteOffset should be the offset in bytes and
622 /// not the encoded offset.
623 bool isLegalSMRDImmOffset(const MCSubtargetInfo &ST, int64_t ByteOffset);
625 bool splitMUBUFOffset(uint32_t Imm, uint32_t &SOffset, uint32_t &ImmOffset,
626 const GCNSubtarget *Subtarget, uint32_t Align = 4);
628 /// \returns true if the intrinsic is divergent
629 bool isIntrinsicSourceOfDivergence(unsigned IntrID);
632 // Track defaults for fields in the MODE registser.
633 struct SIModeRegisterDefaults {
634 /// Floating point opcodes that support exception flag gathering quiet and
635 /// propagate signaling NaN inputs per IEEE 754-2008. Min_dx10 and max_dx10
636 /// become IEEE 754- 2008 compliant due to signaling NaN propagation and
637 /// quieting.
638 bool IEEE : 1;
640 /// Used by the vector ALU to force DX10-style treatment of NaNs: when set,
641 /// clamp NaN to zero; otherwise, pass NaN through.
642 bool DX10Clamp : 1;
644 // TODO: FP mode fields
646 SIModeRegisterDefaults() :
647 IEEE(true),
648 DX10Clamp(true) {}
650 SIModeRegisterDefaults(const Function &F);
652 static SIModeRegisterDefaults getDefaultForCallingConv(CallingConv::ID CC) {
653 SIModeRegisterDefaults Mode;
654 Mode.DX10Clamp = true;
655 Mode.IEEE = AMDGPU::isCompute(CC);
656 return Mode;
659 bool operator ==(const SIModeRegisterDefaults Other) const {
660 return IEEE == Other.IEEE && DX10Clamp == Other.DX10Clamp;
663 // FIXME: Inlining should be OK for dx10-clamp, since the caller's mode should
664 // be able to override.
665 bool isInlineCompatible(SIModeRegisterDefaults CalleeMode) const {
666 return *this == CalleeMode;
670 } // end namespace AMDGPU
671 } // end namespace llvm
673 #endif // LLVM_LIB_TARGET_AMDGPU_UTILS_AMDGPUBASEINFO_H