1 //===- AMDGPUBaseInfo.cpp - AMDGPU Base encoding information --------------===//
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
7 //===----------------------------------------------------------------------===//
9 #include "AMDGPUBaseInfo.h"
10 #include "AMDGPUTargetTransformInfo.h"
12 #include "SIDefines.h"
13 #include "llvm/ADT/StringRef.h"
14 #include "llvm/ADT/Triple.h"
15 #include "llvm/BinaryFormat/ELF.h"
16 #include "llvm/CodeGen/MachineMemOperand.h"
17 #include "llvm/IR/Attributes.h"
18 #include "llvm/IR/Constants.h"
19 #include "llvm/IR/Function.h"
20 #include "llvm/IR/GlobalValue.h"
21 #include "llvm/IR/Instruction.h"
22 #include "llvm/IR/LLVMContext.h"
23 #include "llvm/IR/Module.h"
24 #include "llvm/MC/MCContext.h"
25 #include "llvm/MC/MCInstrDesc.h"
26 #include "llvm/MC/MCInstrInfo.h"
27 #include "llvm/MC/MCRegisterInfo.h"
28 #include "llvm/MC/MCSectionELF.h"
29 #include "llvm/MC/MCSubtargetInfo.h"
30 #include "llvm/MC/SubtargetFeature.h"
31 #include "llvm/Support/Casting.h"
32 #include "llvm/Support/ErrorHandling.h"
33 #include "llvm/Support/MathExtras.h"
40 #include "MCTargetDesc/AMDGPUMCTargetDesc.h"
42 #define GET_INSTRINFO_NAMED_OPS
43 #define GET_INSTRMAP_INFO
44 #include "AMDGPUGenInstrInfo.inc"
45 #undef GET_INSTRMAP_INFO
46 #undef GET_INSTRINFO_NAMED_OPS
50 /// \returns Bit mask for given bit \p Shift and bit \p Width.
51 unsigned getBitMask(unsigned Shift
, unsigned Width
) {
52 return ((1 << Width
) - 1) << Shift
;
55 /// Packs \p Src into \p Dst for given bit \p Shift and bit \p Width.
57 /// \returns Packed \p Dst.
58 unsigned packBits(unsigned Src
, unsigned Dst
, unsigned Shift
, unsigned Width
) {
59 Dst
&= ~(1 << Shift
) & ~getBitMask(Shift
, Width
);
60 Dst
|= (Src
<< Shift
) & getBitMask(Shift
, Width
);
64 /// Unpacks bits from \p Src for given bit \p Shift and bit \p Width.
66 /// \returns Unpacked bits.
67 unsigned unpackBits(unsigned Src
, unsigned Shift
, unsigned Width
) {
68 return (Src
& getBitMask(Shift
, Width
)) >> Shift
;
71 /// \returns Vmcnt bit shift (lower bits).
72 unsigned getVmcntBitShiftLo() { return 0; }
74 /// \returns Vmcnt bit width (lower bits).
75 unsigned getVmcntBitWidthLo() { return 4; }
77 /// \returns Expcnt bit shift.
78 unsigned getExpcntBitShift() { return 4; }
80 /// \returns Expcnt bit width.
81 unsigned getExpcntBitWidth() { return 3; }
83 /// \returns Lgkmcnt bit shift.
84 unsigned getLgkmcntBitShift() { return 8; }
86 /// \returns Lgkmcnt bit width.
87 unsigned getLgkmcntBitWidth() { return 4; }
89 /// \returns Vmcnt bit shift (higher bits).
90 unsigned getVmcntBitShiftHi() { return 14; }
92 /// \returns Vmcnt bit width (higher bits).
93 unsigned getVmcntBitWidthHi() { return 2; }
95 } // end namespace anonymous
104 uint8_t MIMGEncoding
;
109 #define GET_MIMGBaseOpcodesTable_IMPL
110 #define GET_MIMGDimInfoTable_IMPL
111 #define GET_MIMGInfoTable_IMPL
112 #define GET_MIMGLZMappingTable_IMPL
113 #include "AMDGPUGenSearchableTables.inc"
115 int getMIMGOpcode(unsigned BaseOpcode
, unsigned MIMGEncoding
,
116 unsigned VDataDwords
, unsigned VAddrDwords
) {
117 const MIMGInfo
*Info
= getMIMGOpcodeHelper(BaseOpcode
, MIMGEncoding
,
118 VDataDwords
, VAddrDwords
);
119 return Info
? Info
->Opcode
: -1;
122 int getMaskedMIMGOp(unsigned Opc
, unsigned NewChannels
) {
123 const MIMGInfo
*OrigInfo
= getMIMGInfo(Opc
);
124 const MIMGInfo
*NewInfo
=
125 getMIMGOpcodeHelper(OrigInfo
->BaseOpcode
, OrigInfo
->MIMGEncoding
,
126 NewChannels
, OrigInfo
->VAddrDwords
);
127 return NewInfo
? NewInfo
->Opcode
: -1;
139 #define GET_MUBUFInfoTable_DECL
140 #define GET_MUBUFInfoTable_IMPL
141 #include "AMDGPUGenSearchableTables.inc"
143 int getMUBUFBaseOpcode(unsigned Opc
) {
144 const MUBUFInfo
*Info
= getMUBUFInfoFromOpcode(Opc
);
145 return Info
? Info
->BaseOpcode
: -1;
148 int getMUBUFOpcode(unsigned BaseOpc
, unsigned Dwords
) {
149 const MUBUFInfo
*Info
= getMUBUFInfoFromBaseOpcodeAndDwords(BaseOpc
, Dwords
);
150 return Info
? Info
->Opcode
: -1;
153 int getMUBUFDwords(unsigned Opc
) {
154 const MUBUFInfo
*Info
= getMUBUFOpcodeHelper(Opc
);
155 return Info
? Info
->dwords
: 0;
158 bool getMUBUFHasVAddr(unsigned Opc
) {
159 const MUBUFInfo
*Info
= getMUBUFOpcodeHelper(Opc
);
160 return Info
? Info
->has_vaddr
: false;
163 bool getMUBUFHasSrsrc(unsigned Opc
) {
164 const MUBUFInfo
*Info
= getMUBUFOpcodeHelper(Opc
);
165 return Info
? Info
->has_srsrc
: false;
168 bool getMUBUFHasSoffset(unsigned Opc
) {
169 const MUBUFInfo
*Info
= getMUBUFOpcodeHelper(Opc
);
170 return Info
? Info
->has_soffset
: false;
173 // Wrapper for Tablegen'd function. enum Subtarget is not defined in any
174 // header files, so we need to wrap it in a function that takes unsigned
176 int getMCOpcode(uint16_t Opcode
, unsigned Gen
) {
177 return getMCOpcodeGen(Opcode
, static_cast<Subtarget
>(Gen
));
182 void streamIsaVersion(const MCSubtargetInfo
*STI
, raw_ostream
&Stream
) {
183 auto TargetTriple
= STI
->getTargetTriple();
184 auto Version
= getIsaVersion(STI
->getCPU());
186 Stream
<< TargetTriple
.getArchName() << '-'
187 << TargetTriple
.getVendorName() << '-'
188 << TargetTriple
.getOSName() << '-'
189 << TargetTriple
.getEnvironmentName() << '-'
197 if (hasSRAMECC(*STI
))
198 Stream
<< "+sram-ecc";
203 bool hasCodeObjectV3(const MCSubtargetInfo
*STI
) {
204 return STI
->getTargetTriple().getOS() == Triple::AMDHSA
&&
205 STI
->getFeatureBits().test(FeatureCodeObjectV3
);
208 unsigned getWavefrontSize(const MCSubtargetInfo
*STI
) {
209 if (STI
->getFeatureBits().test(FeatureWavefrontSize16
))
211 if (STI
->getFeatureBits().test(FeatureWavefrontSize32
))
217 unsigned getLocalMemorySize(const MCSubtargetInfo
*STI
) {
218 if (STI
->getFeatureBits().test(FeatureLocalMemorySize32768
))
220 if (STI
->getFeatureBits().test(FeatureLocalMemorySize65536
))
226 unsigned getEUsPerCU(const MCSubtargetInfo
*STI
) {
230 unsigned getMaxWorkGroupsPerCU(const MCSubtargetInfo
*STI
,
231 unsigned FlatWorkGroupSize
) {
232 assert(FlatWorkGroupSize
!= 0);
233 if (STI
->getTargetTriple().getArch() != Triple::amdgcn
)
235 unsigned N
= getWavesPerWorkGroup(STI
, FlatWorkGroupSize
);
239 return std::min(N
, 16u);
242 unsigned getMaxWavesPerCU(const MCSubtargetInfo
*STI
) {
243 return getMaxWavesPerEU() * getEUsPerCU(STI
);
246 unsigned getMaxWavesPerCU(const MCSubtargetInfo
*STI
,
247 unsigned FlatWorkGroupSize
) {
248 return getWavesPerWorkGroup(STI
, FlatWorkGroupSize
);
251 unsigned getMinWavesPerEU(const MCSubtargetInfo
*STI
) {
255 unsigned getMaxWavesPerEU() {
256 // FIXME: Need to take scratch memory into account.
260 unsigned getMaxWavesPerEU(const MCSubtargetInfo
*STI
,
261 unsigned FlatWorkGroupSize
) {
262 return alignTo(getMaxWavesPerCU(STI
, FlatWorkGroupSize
),
263 getEUsPerCU(STI
)) / getEUsPerCU(STI
);
266 unsigned getMinFlatWorkGroupSize(const MCSubtargetInfo
*STI
) {
270 unsigned getMaxFlatWorkGroupSize(const MCSubtargetInfo
*STI
) {
274 unsigned getWavesPerWorkGroup(const MCSubtargetInfo
*STI
,
275 unsigned FlatWorkGroupSize
) {
276 return alignTo(FlatWorkGroupSize
, getWavefrontSize(STI
)) /
277 getWavefrontSize(STI
);
280 unsigned getSGPRAllocGranule(const MCSubtargetInfo
*STI
) {
281 IsaVersion Version
= getIsaVersion(STI
->getCPU());
282 if (Version
.Major
>= 8)
287 unsigned getSGPREncodingGranule(const MCSubtargetInfo
*STI
) {
291 unsigned getTotalNumSGPRs(const MCSubtargetInfo
*STI
) {
292 IsaVersion Version
= getIsaVersion(STI
->getCPU());
293 if (Version
.Major
>= 8)
298 unsigned getAddressableNumSGPRs(const MCSubtargetInfo
*STI
) {
299 if (STI
->getFeatureBits().test(FeatureSGPRInitBug
))
300 return FIXED_NUM_SGPRS_FOR_INIT_BUG
;
302 IsaVersion Version
= getIsaVersion(STI
->getCPU());
303 if (Version
.Major
>= 8)
308 unsigned getMinNumSGPRs(const MCSubtargetInfo
*STI
, unsigned WavesPerEU
) {
309 assert(WavesPerEU
!= 0);
311 if (WavesPerEU
>= getMaxWavesPerEU())
314 unsigned MinNumSGPRs
= getTotalNumSGPRs(STI
) / (WavesPerEU
+ 1);
315 if (STI
->getFeatureBits().test(FeatureTrapHandler
))
316 MinNumSGPRs
-= std::min(MinNumSGPRs
, (unsigned)TRAP_NUM_SGPRS
);
317 MinNumSGPRs
= alignDown(MinNumSGPRs
, getSGPRAllocGranule(STI
)) + 1;
318 return std::min(MinNumSGPRs
, getAddressableNumSGPRs(STI
));
321 unsigned getMaxNumSGPRs(const MCSubtargetInfo
*STI
, unsigned WavesPerEU
,
323 assert(WavesPerEU
!= 0);
325 IsaVersion Version
= getIsaVersion(STI
->getCPU());
326 unsigned AddressableNumSGPRs
= getAddressableNumSGPRs(STI
);
327 if (Version
.Major
>= 8 && !Addressable
)
328 AddressableNumSGPRs
= 112;
329 unsigned MaxNumSGPRs
= getTotalNumSGPRs(STI
) / WavesPerEU
;
330 if (STI
->getFeatureBits().test(FeatureTrapHandler
))
331 MaxNumSGPRs
-= std::min(MaxNumSGPRs
, (unsigned)TRAP_NUM_SGPRS
);
332 MaxNumSGPRs
= alignDown(MaxNumSGPRs
, getSGPRAllocGranule(STI
));
333 return std::min(MaxNumSGPRs
, AddressableNumSGPRs
);
336 unsigned getNumExtraSGPRs(const MCSubtargetInfo
*STI
, bool VCCUsed
,
337 bool FlatScrUsed
, bool XNACKUsed
) {
338 unsigned ExtraSGPRs
= 0;
342 IsaVersion Version
= getIsaVersion(STI
->getCPU());
343 if (Version
.Major
< 8) {
357 unsigned getNumExtraSGPRs(const MCSubtargetInfo
*STI
, bool VCCUsed
,
359 return getNumExtraSGPRs(STI
, VCCUsed
, FlatScrUsed
,
360 STI
->getFeatureBits().test(AMDGPU::FeatureXNACK
));
363 unsigned getNumSGPRBlocks(const MCSubtargetInfo
*STI
, unsigned NumSGPRs
) {
364 NumSGPRs
= alignTo(std::max(1u, NumSGPRs
), getSGPREncodingGranule(STI
));
365 // SGPRBlocks is actual number of SGPR blocks minus 1.
366 return NumSGPRs
/ getSGPREncodingGranule(STI
) - 1;
369 unsigned getVGPRAllocGranule(const MCSubtargetInfo
*STI
) {
373 unsigned getVGPREncodingGranule(const MCSubtargetInfo
*STI
) {
374 return getVGPRAllocGranule(STI
);
377 unsigned getTotalNumVGPRs(const MCSubtargetInfo
*STI
) {
381 unsigned getAddressableNumVGPRs(const MCSubtargetInfo
*STI
) {
382 return getTotalNumVGPRs(STI
);
385 unsigned getMinNumVGPRs(const MCSubtargetInfo
*STI
, unsigned WavesPerEU
) {
386 assert(WavesPerEU
!= 0);
388 if (WavesPerEU
>= getMaxWavesPerEU())
390 unsigned MinNumVGPRs
=
391 alignDown(getTotalNumVGPRs(STI
) / (WavesPerEU
+ 1),
392 getVGPRAllocGranule(STI
)) + 1;
393 return std::min(MinNumVGPRs
, getAddressableNumVGPRs(STI
));
396 unsigned getMaxNumVGPRs(const MCSubtargetInfo
*STI
, unsigned WavesPerEU
) {
397 assert(WavesPerEU
!= 0);
399 unsigned MaxNumVGPRs
= alignDown(getTotalNumVGPRs(STI
) / WavesPerEU
,
400 getVGPRAllocGranule(STI
));
401 unsigned AddressableNumVGPRs
= getAddressableNumVGPRs(STI
);
402 return std::min(MaxNumVGPRs
, AddressableNumVGPRs
);
405 unsigned getNumVGPRBlocks(const MCSubtargetInfo
*STI
, unsigned NumVGPRs
) {
406 NumVGPRs
= alignTo(std::max(1u, NumVGPRs
), getVGPREncodingGranule(STI
));
407 // VGPRBlocks is actual number of VGPR blocks minus 1.
408 return NumVGPRs
/ getVGPREncodingGranule(STI
) - 1;
411 } // end namespace IsaInfo
413 void initDefaultAMDKernelCodeT(amd_kernel_code_t
&Header
,
414 const MCSubtargetInfo
*STI
) {
415 IsaVersion Version
= getIsaVersion(STI
->getCPU());
417 memset(&Header
, 0, sizeof(Header
));
419 Header
.amd_kernel_code_version_major
= 1;
420 Header
.amd_kernel_code_version_minor
= 2;
421 Header
.amd_machine_kind
= 1; // AMD_MACHINE_KIND_AMDGPU
422 Header
.amd_machine_version_major
= Version
.Major
;
423 Header
.amd_machine_version_minor
= Version
.Minor
;
424 Header
.amd_machine_version_stepping
= Version
.Stepping
;
425 Header
.kernel_code_entry_byte_offset
= sizeof(Header
);
426 // wavefront_size is specified as a power of 2: 2^6 = 64 threads.
427 Header
.wavefront_size
= 6;
429 // If the code object does not support indirect functions, then the value must
431 Header
.call_convention
= -1;
433 // These alignment values are specified in powers of two, so alignment =
434 // 2^n. The minimum alignment is 2^4 = 16.
435 Header
.kernarg_segment_alignment
= 4;
436 Header
.group_segment_alignment
= 4;
437 Header
.private_segment_alignment
= 4;
440 amdhsa::kernel_descriptor_t
getDefaultAmdhsaKernelDescriptor() {
441 amdhsa::kernel_descriptor_t KD
;
442 memset(&KD
, 0, sizeof(KD
));
443 AMDHSA_BITS_SET(KD
.compute_pgm_rsrc1
,
444 amdhsa::COMPUTE_PGM_RSRC1_FLOAT_DENORM_MODE_16_64
,
445 amdhsa::FLOAT_DENORM_MODE_FLUSH_NONE
);
446 AMDHSA_BITS_SET(KD
.compute_pgm_rsrc1
,
447 amdhsa::COMPUTE_PGM_RSRC1_ENABLE_DX10_CLAMP
, 1);
448 AMDHSA_BITS_SET(KD
.compute_pgm_rsrc1
,
449 amdhsa::COMPUTE_PGM_RSRC1_ENABLE_IEEE_MODE
, 1);
450 AMDHSA_BITS_SET(KD
.compute_pgm_rsrc2
,
451 amdhsa::COMPUTE_PGM_RSRC2_ENABLE_SGPR_WORKGROUP_ID_X
, 1);
455 bool isGroupSegment(const GlobalValue
*GV
) {
456 return GV
->getType()->getAddressSpace() == AMDGPUAS::LOCAL_ADDRESS
;
459 bool isGlobalSegment(const GlobalValue
*GV
) {
460 return GV
->getType()->getAddressSpace() == AMDGPUAS::GLOBAL_ADDRESS
;
463 bool isReadOnlySegment(const GlobalValue
*GV
) {
464 return GV
->getType()->getAddressSpace() == AMDGPUAS::CONSTANT_ADDRESS
||
465 GV
->getType()->getAddressSpace() == AMDGPUAS::CONSTANT_ADDRESS_32BIT
;
468 bool shouldEmitConstantsToTextSection(const Triple
&TT
) {
469 return TT
.getOS() != Triple::AMDHSA
;
472 int getIntegerAttribute(const Function
&F
, StringRef Name
, int Default
) {
473 Attribute A
= F
.getFnAttribute(Name
);
474 int Result
= Default
;
476 if (A
.isStringAttribute()) {
477 StringRef Str
= A
.getValueAsString();
478 if (Str
.getAsInteger(0, Result
)) {
479 LLVMContext
&Ctx
= F
.getContext();
480 Ctx
.emitError("can't parse integer attribute " + Name
);
487 std::pair
<int, int> getIntegerPairAttribute(const Function
&F
,
489 std::pair
<int, int> Default
,
490 bool OnlyFirstRequired
) {
491 Attribute A
= F
.getFnAttribute(Name
);
492 if (!A
.isStringAttribute())
495 LLVMContext
&Ctx
= F
.getContext();
496 std::pair
<int, int> Ints
= Default
;
497 std::pair
<StringRef
, StringRef
> Strs
= A
.getValueAsString().split(',');
498 if (Strs
.first
.trim().getAsInteger(0, Ints
.first
)) {
499 Ctx
.emitError("can't parse first integer attribute " + Name
);
502 if (Strs
.second
.trim().getAsInteger(0, Ints
.second
)) {
503 if (!OnlyFirstRequired
|| !Strs
.second
.trim().empty()) {
504 Ctx
.emitError("can't parse second integer attribute " + Name
);
512 unsigned getVmcntBitMask(const IsaVersion
&Version
) {
513 unsigned VmcntLo
= (1 << getVmcntBitWidthLo()) - 1;
514 if (Version
.Major
< 9)
517 unsigned VmcntHi
= ((1 << getVmcntBitWidthHi()) - 1) << getVmcntBitWidthLo();
518 return VmcntLo
| VmcntHi
;
521 unsigned getExpcntBitMask(const IsaVersion
&Version
) {
522 return (1 << getExpcntBitWidth()) - 1;
525 unsigned getLgkmcntBitMask(const IsaVersion
&Version
) {
526 return (1 << getLgkmcntBitWidth()) - 1;
529 unsigned getWaitcntBitMask(const IsaVersion
&Version
) {
530 unsigned VmcntLo
= getBitMask(getVmcntBitShiftLo(), getVmcntBitWidthLo());
531 unsigned Expcnt
= getBitMask(getExpcntBitShift(), getExpcntBitWidth());
532 unsigned Lgkmcnt
= getBitMask(getLgkmcntBitShift(), getLgkmcntBitWidth());
533 unsigned Waitcnt
= VmcntLo
| Expcnt
| Lgkmcnt
;
534 if (Version
.Major
< 9)
537 unsigned VmcntHi
= getBitMask(getVmcntBitShiftHi(), getVmcntBitWidthHi());
538 return Waitcnt
| VmcntHi
;
541 unsigned decodeVmcnt(const IsaVersion
&Version
, unsigned Waitcnt
) {
543 unpackBits(Waitcnt
, getVmcntBitShiftLo(), getVmcntBitWidthLo());
544 if (Version
.Major
< 9)
548 unpackBits(Waitcnt
, getVmcntBitShiftHi(), getVmcntBitWidthHi());
549 VmcntHi
<<= getVmcntBitWidthLo();
550 return VmcntLo
| VmcntHi
;
553 unsigned decodeExpcnt(const IsaVersion
&Version
, unsigned Waitcnt
) {
554 return unpackBits(Waitcnt
, getExpcntBitShift(), getExpcntBitWidth());
557 unsigned decodeLgkmcnt(const IsaVersion
&Version
, unsigned Waitcnt
) {
558 return unpackBits(Waitcnt
, getLgkmcntBitShift(), getLgkmcntBitWidth());
561 void decodeWaitcnt(const IsaVersion
&Version
, unsigned Waitcnt
,
562 unsigned &Vmcnt
, unsigned &Expcnt
, unsigned &Lgkmcnt
) {
563 Vmcnt
= decodeVmcnt(Version
, Waitcnt
);
564 Expcnt
= decodeExpcnt(Version
, Waitcnt
);
565 Lgkmcnt
= decodeLgkmcnt(Version
, Waitcnt
);
568 Waitcnt
decodeWaitcnt(const IsaVersion
&Version
, unsigned Encoded
) {
570 Decoded
.VmCnt
= decodeVmcnt(Version
, Encoded
);
571 Decoded
.ExpCnt
= decodeExpcnt(Version
, Encoded
);
572 Decoded
.LgkmCnt
= decodeLgkmcnt(Version
, Encoded
);
576 unsigned encodeVmcnt(const IsaVersion
&Version
, unsigned Waitcnt
,
579 packBits(Vmcnt
, Waitcnt
, getVmcntBitShiftLo(), getVmcntBitWidthLo());
580 if (Version
.Major
< 9)
583 Vmcnt
>>= getVmcntBitWidthLo();
584 return packBits(Vmcnt
, Waitcnt
, getVmcntBitShiftHi(), getVmcntBitWidthHi());
587 unsigned encodeExpcnt(const IsaVersion
&Version
, unsigned Waitcnt
,
589 return packBits(Expcnt
, Waitcnt
, getExpcntBitShift(), getExpcntBitWidth());
592 unsigned encodeLgkmcnt(const IsaVersion
&Version
, unsigned Waitcnt
,
594 return packBits(Lgkmcnt
, Waitcnt
, getLgkmcntBitShift(), getLgkmcntBitWidth());
597 unsigned encodeWaitcnt(const IsaVersion
&Version
,
598 unsigned Vmcnt
, unsigned Expcnt
, unsigned Lgkmcnt
) {
599 unsigned Waitcnt
= getWaitcntBitMask(Version
);
600 Waitcnt
= encodeVmcnt(Version
, Waitcnt
, Vmcnt
);
601 Waitcnt
= encodeExpcnt(Version
, Waitcnt
, Expcnt
);
602 Waitcnt
= encodeLgkmcnt(Version
, Waitcnt
, Lgkmcnt
);
606 unsigned encodeWaitcnt(const IsaVersion
&Version
, const Waitcnt
&Decoded
) {
607 return encodeWaitcnt(Version
, Decoded
.VmCnt
, Decoded
.ExpCnt
, Decoded
.LgkmCnt
);
610 unsigned getInitialPSInputAddr(const Function
&F
) {
611 return getIntegerAttribute(F
, "InitialPSInputAddr", 0);
614 bool isShader(CallingConv::ID cc
) {
616 case CallingConv::AMDGPU_VS
:
617 case CallingConv::AMDGPU_LS
:
618 case CallingConv::AMDGPU_HS
:
619 case CallingConv::AMDGPU_ES
:
620 case CallingConv::AMDGPU_GS
:
621 case CallingConv::AMDGPU_PS
:
622 case CallingConv::AMDGPU_CS
:
629 bool isCompute(CallingConv::ID cc
) {
630 return !isShader(cc
) || cc
== CallingConv::AMDGPU_CS
;
633 bool isEntryFunctionCC(CallingConv::ID CC
) {
635 case CallingConv::AMDGPU_KERNEL
:
636 case CallingConv::SPIR_KERNEL
:
637 case CallingConv::AMDGPU_VS
:
638 case CallingConv::AMDGPU_GS
:
639 case CallingConv::AMDGPU_PS
:
640 case CallingConv::AMDGPU_CS
:
641 case CallingConv::AMDGPU_ES
:
642 case CallingConv::AMDGPU_HS
:
643 case CallingConv::AMDGPU_LS
:
650 bool hasXNACK(const MCSubtargetInfo
&STI
) {
651 return STI
.getFeatureBits()[AMDGPU::FeatureXNACK
];
654 bool hasSRAMECC(const MCSubtargetInfo
&STI
) {
655 return STI
.getFeatureBits()[AMDGPU::FeatureSRAMECC
];
658 bool hasMIMG_R128(const MCSubtargetInfo
&STI
) {
659 return STI
.getFeatureBits()[AMDGPU::FeatureMIMG_R128
];
662 bool hasPackedD16(const MCSubtargetInfo
&STI
) {
663 return !STI
.getFeatureBits()[AMDGPU::FeatureUnpackedD16VMem
];
666 bool isSI(const MCSubtargetInfo
&STI
) {
667 return STI
.getFeatureBits()[AMDGPU::FeatureSouthernIslands
];
670 bool isCI(const MCSubtargetInfo
&STI
) {
671 return STI
.getFeatureBits()[AMDGPU::FeatureSeaIslands
];
674 bool isVI(const MCSubtargetInfo
&STI
) {
675 return STI
.getFeatureBits()[AMDGPU::FeatureVolcanicIslands
];
678 bool isGFX9(const MCSubtargetInfo
&STI
) {
679 return STI
.getFeatureBits()[AMDGPU::FeatureGFX9
];
682 bool isGCN3Encoding(const MCSubtargetInfo
&STI
) {
683 return STI
.getFeatureBits()[AMDGPU::FeatureGCN3Encoding
];
686 bool isSGPR(unsigned Reg
, const MCRegisterInfo
* TRI
) {
687 const MCRegisterClass SGPRClass
= TRI
->getRegClass(AMDGPU::SReg_32RegClassID
);
688 const unsigned FirstSubReg
= TRI
->getSubReg(Reg
, 1);
689 return SGPRClass
.contains(FirstSubReg
!= 0 ? FirstSubReg
: Reg
) ||
693 bool isRegIntersect(unsigned Reg0
, unsigned Reg1
, const MCRegisterInfo
* TRI
) {
694 for (MCRegAliasIterator
R(Reg0
, TRI
, true); R
.isValid(); ++R
) {
695 if (*R
== Reg1
) return true;
700 #define MAP_REG2REG \
701 using namespace AMDGPU; \
703 default: return Reg; \
704 CASE_CI_VI(FLAT_SCR) \
705 CASE_CI_VI(FLAT_SCR_LO) \
706 CASE_CI_VI(FLAT_SCR_HI) \
707 CASE_VI_GFX9(TTMP0) \
708 CASE_VI_GFX9(TTMP1) \
709 CASE_VI_GFX9(TTMP2) \
710 CASE_VI_GFX9(TTMP3) \
711 CASE_VI_GFX9(TTMP4) \
712 CASE_VI_GFX9(TTMP5) \
713 CASE_VI_GFX9(TTMP6) \
714 CASE_VI_GFX9(TTMP7) \
715 CASE_VI_GFX9(TTMP8) \
716 CASE_VI_GFX9(TTMP9) \
717 CASE_VI_GFX9(TTMP10) \
718 CASE_VI_GFX9(TTMP11) \
719 CASE_VI_GFX9(TTMP12) \
720 CASE_VI_GFX9(TTMP13) \
721 CASE_VI_GFX9(TTMP14) \
722 CASE_VI_GFX9(TTMP15) \
723 CASE_VI_GFX9(TTMP0_TTMP1) \
724 CASE_VI_GFX9(TTMP2_TTMP3) \
725 CASE_VI_GFX9(TTMP4_TTMP5) \
726 CASE_VI_GFX9(TTMP6_TTMP7) \
727 CASE_VI_GFX9(TTMP8_TTMP9) \
728 CASE_VI_GFX9(TTMP10_TTMP11) \
729 CASE_VI_GFX9(TTMP12_TTMP13) \
730 CASE_VI_GFX9(TTMP14_TTMP15) \
731 CASE_VI_GFX9(TTMP0_TTMP1_TTMP2_TTMP3) \
732 CASE_VI_GFX9(TTMP4_TTMP5_TTMP6_TTMP7) \
733 CASE_VI_GFX9(TTMP8_TTMP9_TTMP10_TTMP11) \
734 CASE_VI_GFX9(TTMP12_TTMP13_TTMP14_TTMP15) \
735 CASE_VI_GFX9(TTMP0_TTMP1_TTMP2_TTMP3_TTMP4_TTMP5_TTMP6_TTMP7) \
736 CASE_VI_GFX9(TTMP4_TTMP5_TTMP6_TTMP7_TTMP8_TTMP9_TTMP10_TTMP11) \
737 CASE_VI_GFX9(TTMP8_TTMP9_TTMP10_TTMP11_TTMP12_TTMP13_TTMP14_TTMP15) \
738 CASE_VI_GFX9(TTMP0_TTMP1_TTMP2_TTMP3_TTMP4_TTMP5_TTMP6_TTMP7_TTMP8_TTMP9_TTMP10_TTMP11_TTMP12_TTMP13_TTMP14_TTMP15) \
741 #define CASE_CI_VI(node) \
742 assert(!isSI(STI)); \
743 case node: return isCI(STI) ? node##_ci : node##_vi;
745 #define CASE_VI_GFX9(node) \
746 case node: return isGFX9(STI) ? node##_gfx9 : node##_vi;
748 unsigned getMCReg(unsigned Reg
, const MCSubtargetInfo
&STI
) {
749 if (STI
.getTargetTriple().getArch() == Triple::r600
)
757 #define CASE_CI_VI(node) case node##_ci: case node##_vi: return node;
758 #define CASE_VI_GFX9(node) case node##_vi: case node##_gfx9: return node;
760 unsigned mc2PseudoReg(unsigned Reg
) {
768 bool isSISrcOperand(const MCInstrDesc
&Desc
, unsigned OpNo
) {
769 assert(OpNo
< Desc
.NumOperands
);
770 unsigned OpType
= Desc
.OpInfo
[OpNo
].OperandType
;
771 return OpType
>= AMDGPU::OPERAND_SRC_FIRST
&&
772 OpType
<= AMDGPU::OPERAND_SRC_LAST
;
775 bool isSISrcFPOperand(const MCInstrDesc
&Desc
, unsigned OpNo
) {
776 assert(OpNo
< Desc
.NumOperands
);
777 unsigned OpType
= Desc
.OpInfo
[OpNo
].OperandType
;
779 case AMDGPU::OPERAND_REG_IMM_FP32
:
780 case AMDGPU::OPERAND_REG_IMM_FP64
:
781 case AMDGPU::OPERAND_REG_IMM_FP16
:
782 case AMDGPU::OPERAND_REG_INLINE_C_FP32
:
783 case AMDGPU::OPERAND_REG_INLINE_C_FP64
:
784 case AMDGPU::OPERAND_REG_INLINE_C_FP16
:
785 case AMDGPU::OPERAND_REG_INLINE_C_V2FP16
:
792 bool isSISrcInlinableOperand(const MCInstrDesc
&Desc
, unsigned OpNo
) {
793 assert(OpNo
< Desc
.NumOperands
);
794 unsigned OpType
= Desc
.OpInfo
[OpNo
].OperandType
;
795 return OpType
>= AMDGPU::OPERAND_REG_INLINE_C_FIRST
&&
796 OpType
<= AMDGPU::OPERAND_REG_INLINE_C_LAST
;
799 // Avoid using MCRegisterClass::getSize, since that function will go away
800 // (move from MC* level to Target* level). Return size in bits.
801 unsigned getRegBitWidth(unsigned RCID
) {
803 case AMDGPU::SGPR_32RegClassID
:
804 case AMDGPU::VGPR_32RegClassID
:
805 case AMDGPU::VS_32RegClassID
:
806 case AMDGPU::SReg_32RegClassID
:
807 case AMDGPU::SReg_32_XM0RegClassID
:
809 case AMDGPU::SGPR_64RegClassID
:
810 case AMDGPU::VS_64RegClassID
:
811 case AMDGPU::SReg_64RegClassID
:
812 case AMDGPU::VReg_64RegClassID
:
813 case AMDGPU::SReg_64_XEXECRegClassID
:
815 case AMDGPU::VReg_96RegClassID
:
817 case AMDGPU::SGPR_128RegClassID
:
818 case AMDGPU::SReg_128RegClassID
:
819 case AMDGPU::VReg_128RegClassID
:
821 case AMDGPU::SReg_256RegClassID
:
822 case AMDGPU::VReg_256RegClassID
:
824 case AMDGPU::SReg_512RegClassID
:
825 case AMDGPU::VReg_512RegClassID
:
828 llvm_unreachable("Unexpected register class");
832 unsigned getRegBitWidth(const MCRegisterClass
&RC
) {
833 return getRegBitWidth(RC
.getID());
836 unsigned getRegOperandSize(const MCRegisterInfo
*MRI
, const MCInstrDesc
&Desc
,
838 assert(OpNo
< Desc
.NumOperands
);
839 unsigned RCID
= Desc
.OpInfo
[OpNo
].RegClass
;
840 return getRegBitWidth(MRI
->getRegClass(RCID
)) / 8;
843 bool isInlinableLiteral64(int64_t Literal
, bool HasInv2Pi
) {
844 if (Literal
>= -16 && Literal
<= 64)
847 uint64_t Val
= static_cast<uint64_t>(Literal
);
848 return (Val
== DoubleToBits(0.0)) ||
849 (Val
== DoubleToBits(1.0)) ||
850 (Val
== DoubleToBits(-1.0)) ||
851 (Val
== DoubleToBits(0.5)) ||
852 (Val
== DoubleToBits(-0.5)) ||
853 (Val
== DoubleToBits(2.0)) ||
854 (Val
== DoubleToBits(-2.0)) ||
855 (Val
== DoubleToBits(4.0)) ||
856 (Val
== DoubleToBits(-4.0)) ||
857 (Val
== 0x3fc45f306dc9c882 && HasInv2Pi
);
860 bool isInlinableLiteral32(int32_t Literal
, bool HasInv2Pi
) {
861 if (Literal
>= -16 && Literal
<= 64)
864 // The actual type of the operand does not seem to matter as long
865 // as the bits match one of the inline immediate values. For example:
867 // -nan has the hexadecimal encoding of 0xfffffffe which is -2 in decimal,
868 // so it is a legal inline immediate.
870 // 1065353216 has the hexadecimal encoding 0x3f800000 which is 1.0f in
871 // floating-point, so it is a legal inline immediate.
873 uint32_t Val
= static_cast<uint32_t>(Literal
);
874 return (Val
== FloatToBits(0.0f
)) ||
875 (Val
== FloatToBits(1.0f
)) ||
876 (Val
== FloatToBits(-1.0f
)) ||
877 (Val
== FloatToBits(0.5f
)) ||
878 (Val
== FloatToBits(-0.5f
)) ||
879 (Val
== FloatToBits(2.0f
)) ||
880 (Val
== FloatToBits(-2.0f
)) ||
881 (Val
== FloatToBits(4.0f
)) ||
882 (Val
== FloatToBits(-4.0f
)) ||
883 (Val
== 0x3e22f983 && HasInv2Pi
);
886 bool isInlinableLiteral16(int16_t Literal
, bool HasInv2Pi
) {
890 if (Literal
>= -16 && Literal
<= 64)
893 uint16_t Val
= static_cast<uint16_t>(Literal
);
894 return Val
== 0x3C00 || // 1.0
895 Val
== 0xBC00 || // -1.0
896 Val
== 0x3800 || // 0.5
897 Val
== 0xB800 || // -0.5
898 Val
== 0x4000 || // 2.0
899 Val
== 0xC000 || // -2.0
900 Val
== 0x4400 || // 4.0
901 Val
== 0xC400 || // -4.0
902 Val
== 0x3118; // 1/2pi
905 bool isInlinableLiteralV216(int32_t Literal
, bool HasInv2Pi
) {
908 int16_t Lo16
= static_cast<int16_t>(Literal
);
909 int16_t Hi16
= static_cast<int16_t>(Literal
>> 16);
910 return Lo16
== Hi16
&& isInlinableLiteral16(Lo16
, HasInv2Pi
);
913 bool isArgPassedInSGPR(const Argument
*A
) {
914 const Function
*F
= A
->getParent();
916 // Arguments to compute shaders are never a source of divergence.
917 CallingConv::ID CC
= F
->getCallingConv();
919 case CallingConv::AMDGPU_KERNEL
:
920 case CallingConv::SPIR_KERNEL
:
922 case CallingConv::AMDGPU_VS
:
923 case CallingConv::AMDGPU_LS
:
924 case CallingConv::AMDGPU_HS
:
925 case CallingConv::AMDGPU_ES
:
926 case CallingConv::AMDGPU_GS
:
927 case CallingConv::AMDGPU_PS
:
928 case CallingConv::AMDGPU_CS
:
929 // For non-compute shaders, SGPR inputs are marked with either inreg or byval.
930 // Everything else is in VGPRs.
931 return F
->getAttributes().hasParamAttribute(A
->getArgNo(), Attribute::InReg
) ||
932 F
->getAttributes().hasParamAttribute(A
->getArgNo(), Attribute::ByVal
);
934 // TODO: Should calls support inreg for SGPR inputs?
939 int64_t getSMRDEncodedOffset(const MCSubtargetInfo
&ST
, int64_t ByteOffset
) {
940 if (isGCN3Encoding(ST
))
942 return ByteOffset
>> 2;
945 bool isLegalSMRDImmOffset(const MCSubtargetInfo
&ST
, int64_t ByteOffset
) {
946 int64_t EncodedOffset
= getSMRDEncodedOffset(ST
, ByteOffset
);
947 return isGCN3Encoding(ST
) ?
948 isUInt
<20>(EncodedOffset
) : isUInt
<8>(EncodedOffset
);
951 // Given Imm, split it into the values to put into the SOffset and ImmOffset
952 // fields in an MUBUF instruction. Return false if it is not possible (due to a
953 // hardware bug needing a workaround).
955 // The required alignment ensures that individual address components remain
956 // aligned if they are aligned to begin with. It also ensures that additional
957 // offsets within the given alignment can be added to the resulting ImmOffset.
958 bool splitMUBUFOffset(uint32_t Imm
, uint32_t &SOffset
, uint32_t &ImmOffset
,
959 const GCNSubtarget
*Subtarget
, uint32_t Align
) {
960 const uint32_t MaxImm
= alignDown(4095, Align
);
961 uint32_t Overflow
= 0;
964 if (Imm
<= MaxImm
+ 64) {
965 // Use an SOffset inline constant for 4..64
966 Overflow
= Imm
- MaxImm
;
969 // Try to keep the same value in SOffset for adjacent loads, so that
970 // the corresponding register contents can be re-used.
972 // Load values with all low-bits (except for alignment bits) set into
973 // SOffset, so that a larger range of values can be covered using
976 // Atomic operations fail to work correctly when individual address
977 // components are unaligned, even if their sum is aligned.
978 uint32_t High
= (Imm
+ Align
) & ~4095;
979 uint32_t Low
= (Imm
+ Align
) & 4095;
981 Overflow
= High
- Align
;
985 // There is a hardware bug in SI and CI which prevents address clamping in
986 // MUBUF instructions from working correctly with SOffsets. The immediate
987 // offset is unaffected.
989 Subtarget
->getGeneration() <= AMDGPUSubtarget::SEA_ISLANDS
)
999 struct SourceOfDivergence
{
1002 const SourceOfDivergence
*lookupSourceOfDivergence(unsigned Intr
);
1004 #define GET_SourcesOfDivergence_IMPL
1005 #include "AMDGPUGenSearchableTables.inc"
1007 } // end anonymous namespace
1009 bool isIntrinsicSourceOfDivergence(unsigned IntrID
) {
1010 return lookupSourceOfDivergence(IntrID
);
1012 } // namespace AMDGPU