1 //===- AMDGPUBaseInfo.cpp - AMDGPU Base encoding information --------------===//
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
7 //===----------------------------------------------------------------------===//
9 #include "AMDGPUBaseInfo.h"
10 #include "AMDGPUTargetTransformInfo.h"
12 #include "SIDefines.h"
13 #include "AMDGPUAsmUtils.h"
14 #include "llvm/ADT/StringRef.h"
15 #include "llvm/ADT/Triple.h"
16 #include "llvm/BinaryFormat/ELF.h"
17 #include "llvm/CodeGen/MachineMemOperand.h"
18 #include "llvm/IR/Attributes.h"
19 #include "llvm/IR/Constants.h"
20 #include "llvm/IR/Function.h"
21 #include "llvm/IR/GlobalValue.h"
22 #include "llvm/IR/Instruction.h"
23 #include "llvm/IR/LLVMContext.h"
24 #include "llvm/IR/Module.h"
25 #include "llvm/MC/MCContext.h"
26 #include "llvm/MC/MCInstrDesc.h"
27 #include "llvm/MC/MCInstrInfo.h"
28 #include "llvm/MC/MCRegisterInfo.h"
29 #include "llvm/MC/MCSectionELF.h"
30 #include "llvm/MC/MCSubtargetInfo.h"
31 #include "llvm/MC/SubtargetFeature.h"
32 #include "llvm/Support/Casting.h"
33 #include "llvm/Support/ErrorHandling.h"
34 #include "llvm/Support/MathExtras.h"
41 #include "MCTargetDesc/AMDGPUMCTargetDesc.h"
43 #define GET_INSTRINFO_NAMED_OPS
44 #define GET_INSTRMAP_INFO
45 #include "AMDGPUGenInstrInfo.inc"
46 #undef GET_INSTRMAP_INFO
47 #undef GET_INSTRINFO_NAMED_OPS
51 /// \returns Bit mask for given bit \p Shift and bit \p Width.
52 unsigned getBitMask(unsigned Shift
, unsigned Width
) {
53 return ((1 << Width
) - 1) << Shift
;
56 /// Packs \p Src into \p Dst for given bit \p Shift and bit \p Width.
58 /// \returns Packed \p Dst.
59 unsigned packBits(unsigned Src
, unsigned Dst
, unsigned Shift
, unsigned Width
) {
60 Dst
&= ~(1 << Shift
) & ~getBitMask(Shift
, Width
);
61 Dst
|= (Src
<< Shift
) & getBitMask(Shift
, Width
);
65 /// Unpacks bits from \p Src for given bit \p Shift and bit \p Width.
67 /// \returns Unpacked bits.
68 unsigned unpackBits(unsigned Src
, unsigned Shift
, unsigned Width
) {
69 return (Src
& getBitMask(Shift
, Width
)) >> Shift
;
72 /// \returns Vmcnt bit shift (lower bits).
73 unsigned getVmcntBitShiftLo() { return 0; }
75 /// \returns Vmcnt bit width (lower bits).
76 unsigned getVmcntBitWidthLo() { return 4; }
78 /// \returns Expcnt bit shift.
79 unsigned getExpcntBitShift() { return 4; }
81 /// \returns Expcnt bit width.
82 unsigned getExpcntBitWidth() { return 3; }
84 /// \returns Lgkmcnt bit shift.
85 unsigned getLgkmcntBitShift() { return 8; }
87 /// \returns Lgkmcnt bit width.
88 unsigned getLgkmcntBitWidth(unsigned VersionMajor
) {
89 return (VersionMajor
>= 10) ? 6 : 4;
92 /// \returns Vmcnt bit shift (higher bits).
93 unsigned getVmcntBitShiftHi() { return 14; }
95 /// \returns Vmcnt bit width (higher bits).
96 unsigned getVmcntBitWidthHi() { return 2; }
98 } // end namespace anonymous
104 #define GET_MIMGBaseOpcodesTable_IMPL
105 #define GET_MIMGDimInfoTable_IMPL
106 #define GET_MIMGInfoTable_IMPL
107 #define GET_MIMGLZMappingTable_IMPL
108 #define GET_MIMGMIPMappingTable_IMPL
109 #include "AMDGPUGenSearchableTables.inc"
111 int getMIMGOpcode(unsigned BaseOpcode
, unsigned MIMGEncoding
,
112 unsigned VDataDwords
, unsigned VAddrDwords
) {
113 const MIMGInfo
*Info
= getMIMGOpcodeHelper(BaseOpcode
, MIMGEncoding
,
114 VDataDwords
, VAddrDwords
);
115 return Info
? Info
->Opcode
: -1;
118 const MIMGBaseOpcodeInfo
*getMIMGBaseOpcode(unsigned Opc
) {
119 const MIMGInfo
*Info
= getMIMGInfo(Opc
);
120 return Info
? getMIMGBaseOpcodeInfo(Info
->BaseOpcode
) : nullptr;
123 int getMaskedMIMGOp(unsigned Opc
, unsigned NewChannels
) {
124 const MIMGInfo
*OrigInfo
= getMIMGInfo(Opc
);
125 const MIMGInfo
*NewInfo
=
126 getMIMGOpcodeHelper(OrigInfo
->BaseOpcode
, OrigInfo
->MIMGEncoding
,
127 NewChannels
, OrigInfo
->VAddrDwords
);
128 return NewInfo
? NewInfo
->Opcode
: -1;
149 #define GET_MTBUFInfoTable_DECL
150 #define GET_MTBUFInfoTable_IMPL
151 #define GET_MUBUFInfoTable_DECL
152 #define GET_MUBUFInfoTable_IMPL
153 #include "AMDGPUGenSearchableTables.inc"
155 int getMTBUFBaseOpcode(unsigned Opc
) {
156 const MTBUFInfo
*Info
= getMTBUFInfoFromOpcode(Opc
);
157 return Info
? Info
->BaseOpcode
: -1;
160 int getMTBUFOpcode(unsigned BaseOpc
, unsigned Elements
) {
161 const MTBUFInfo
*Info
= getMTBUFInfoFromBaseOpcodeAndElements(BaseOpc
, Elements
);
162 return Info
? Info
->Opcode
: -1;
165 int getMTBUFElements(unsigned Opc
) {
166 const MTBUFInfo
*Info
= getMTBUFOpcodeHelper(Opc
);
167 return Info
? Info
->elements
: 0;
170 bool getMTBUFHasVAddr(unsigned Opc
) {
171 const MTBUFInfo
*Info
= getMTBUFOpcodeHelper(Opc
);
172 return Info
? Info
->has_vaddr
: false;
175 bool getMTBUFHasSrsrc(unsigned Opc
) {
176 const MTBUFInfo
*Info
= getMTBUFOpcodeHelper(Opc
);
177 return Info
? Info
->has_srsrc
: false;
180 bool getMTBUFHasSoffset(unsigned Opc
) {
181 const MTBUFInfo
*Info
= getMTBUFOpcodeHelper(Opc
);
182 return Info
? Info
->has_soffset
: false;
185 int getMUBUFBaseOpcode(unsigned Opc
) {
186 const MUBUFInfo
*Info
= getMUBUFInfoFromOpcode(Opc
);
187 return Info
? Info
->BaseOpcode
: -1;
190 int getMUBUFOpcode(unsigned BaseOpc
, unsigned Elements
) {
191 const MUBUFInfo
*Info
= getMUBUFInfoFromBaseOpcodeAndElements(BaseOpc
, Elements
);
192 return Info
? Info
->Opcode
: -1;
195 int getMUBUFElements(unsigned Opc
) {
196 const MUBUFInfo
*Info
= getMUBUFOpcodeHelper(Opc
);
197 return Info
? Info
->elements
: 0;
200 bool getMUBUFHasVAddr(unsigned Opc
) {
201 const MUBUFInfo
*Info
= getMUBUFOpcodeHelper(Opc
);
202 return Info
? Info
->has_vaddr
: false;
205 bool getMUBUFHasSrsrc(unsigned Opc
) {
206 const MUBUFInfo
*Info
= getMUBUFOpcodeHelper(Opc
);
207 return Info
? Info
->has_srsrc
: false;
210 bool getMUBUFHasSoffset(unsigned Opc
) {
211 const MUBUFInfo
*Info
= getMUBUFOpcodeHelper(Opc
);
212 return Info
? Info
->has_soffset
: false;
215 // Wrapper for Tablegen'd function. enum Subtarget is not defined in any
216 // header files, so we need to wrap it in a function that takes unsigned
218 int getMCOpcode(uint16_t Opcode
, unsigned Gen
) {
219 return getMCOpcodeGen(Opcode
, static_cast<Subtarget
>(Gen
));
224 void streamIsaVersion(const MCSubtargetInfo
*STI
, raw_ostream
&Stream
) {
225 auto TargetTriple
= STI
->getTargetTriple();
226 auto Version
= getIsaVersion(STI
->getCPU());
228 Stream
<< TargetTriple
.getArchName() << '-'
229 << TargetTriple
.getVendorName() << '-'
230 << TargetTriple
.getOSName() << '-'
231 << TargetTriple
.getEnvironmentName() << '-'
239 if (hasSRAMECC(*STI
))
240 Stream
<< "+sram-ecc";
245 bool hasCodeObjectV3(const MCSubtargetInfo
*STI
) {
246 return STI
->getTargetTriple().getOS() == Triple::AMDHSA
&&
247 STI
->getFeatureBits().test(FeatureCodeObjectV3
);
250 unsigned getWavefrontSize(const MCSubtargetInfo
*STI
) {
251 if (STI
->getFeatureBits().test(FeatureWavefrontSize16
))
253 if (STI
->getFeatureBits().test(FeatureWavefrontSize32
))
259 unsigned getLocalMemorySize(const MCSubtargetInfo
*STI
) {
260 if (STI
->getFeatureBits().test(FeatureLocalMemorySize32768
))
262 if (STI
->getFeatureBits().test(FeatureLocalMemorySize65536
))
268 unsigned getEUsPerCU(const MCSubtargetInfo
*STI
) {
272 unsigned getMaxWorkGroupsPerCU(const MCSubtargetInfo
*STI
,
273 unsigned FlatWorkGroupSize
) {
274 assert(FlatWorkGroupSize
!= 0);
275 if (STI
->getTargetTriple().getArch() != Triple::amdgcn
)
277 unsigned N
= getWavesPerWorkGroup(STI
, FlatWorkGroupSize
);
281 return std::min(N
, 16u);
284 unsigned getMaxWavesPerCU(const MCSubtargetInfo
*STI
) {
285 return getMaxWavesPerEU(STI
) * getEUsPerCU(STI
);
288 unsigned getMaxWavesPerCU(const MCSubtargetInfo
*STI
,
289 unsigned FlatWorkGroupSize
) {
290 return getWavesPerWorkGroup(STI
, FlatWorkGroupSize
);
293 unsigned getMinWavesPerEU(const MCSubtargetInfo
*STI
) {
297 unsigned getMaxWavesPerEU(const MCSubtargetInfo
*STI
) {
298 // FIXME: Need to take scratch memory into account.
304 unsigned getMaxWavesPerEU(const MCSubtargetInfo
*STI
,
305 unsigned FlatWorkGroupSize
) {
306 return alignTo(getMaxWavesPerCU(STI
, FlatWorkGroupSize
),
307 getEUsPerCU(STI
)) / getEUsPerCU(STI
);
310 unsigned getMinFlatWorkGroupSize(const MCSubtargetInfo
*STI
) {
314 unsigned getMaxFlatWorkGroupSize(const MCSubtargetInfo
*STI
) {
318 unsigned getWavesPerWorkGroup(const MCSubtargetInfo
*STI
,
319 unsigned FlatWorkGroupSize
) {
320 return alignTo(FlatWorkGroupSize
, getWavefrontSize(STI
)) /
321 getWavefrontSize(STI
);
324 unsigned getSGPRAllocGranule(const MCSubtargetInfo
*STI
) {
325 IsaVersion Version
= getIsaVersion(STI
->getCPU());
326 if (Version
.Major
>= 10)
327 return getAddressableNumSGPRs(STI
);
328 if (Version
.Major
>= 8)
333 unsigned getSGPREncodingGranule(const MCSubtargetInfo
*STI
) {
337 unsigned getTotalNumSGPRs(const MCSubtargetInfo
*STI
) {
338 IsaVersion Version
= getIsaVersion(STI
->getCPU());
339 if (Version
.Major
>= 8)
344 unsigned getAddressableNumSGPRs(const MCSubtargetInfo
*STI
) {
345 if (STI
->getFeatureBits().test(FeatureSGPRInitBug
))
346 return FIXED_NUM_SGPRS_FOR_INIT_BUG
;
348 IsaVersion Version
= getIsaVersion(STI
->getCPU());
349 if (Version
.Major
>= 10)
351 if (Version
.Major
>= 8)
356 unsigned getMinNumSGPRs(const MCSubtargetInfo
*STI
, unsigned WavesPerEU
) {
357 assert(WavesPerEU
!= 0);
359 IsaVersion Version
= getIsaVersion(STI
->getCPU());
360 if (Version
.Major
>= 10)
363 if (WavesPerEU
>= getMaxWavesPerEU(STI
))
366 unsigned MinNumSGPRs
= getTotalNumSGPRs(STI
) / (WavesPerEU
+ 1);
367 if (STI
->getFeatureBits().test(FeatureTrapHandler
))
368 MinNumSGPRs
-= std::min(MinNumSGPRs
, (unsigned)TRAP_NUM_SGPRS
);
369 MinNumSGPRs
= alignDown(MinNumSGPRs
, getSGPRAllocGranule(STI
)) + 1;
370 return std::min(MinNumSGPRs
, getAddressableNumSGPRs(STI
));
373 unsigned getMaxNumSGPRs(const MCSubtargetInfo
*STI
, unsigned WavesPerEU
,
375 assert(WavesPerEU
!= 0);
377 unsigned AddressableNumSGPRs
= getAddressableNumSGPRs(STI
);
378 IsaVersion Version
= getIsaVersion(STI
->getCPU());
379 if (Version
.Major
>= 10)
380 return Addressable
? AddressableNumSGPRs
: 108;
381 if (Version
.Major
>= 8 && !Addressable
)
382 AddressableNumSGPRs
= 112;
383 unsigned MaxNumSGPRs
= getTotalNumSGPRs(STI
) / WavesPerEU
;
384 if (STI
->getFeatureBits().test(FeatureTrapHandler
))
385 MaxNumSGPRs
-= std::min(MaxNumSGPRs
, (unsigned)TRAP_NUM_SGPRS
);
386 MaxNumSGPRs
= alignDown(MaxNumSGPRs
, getSGPRAllocGranule(STI
));
387 return std::min(MaxNumSGPRs
, AddressableNumSGPRs
);
390 unsigned getNumExtraSGPRs(const MCSubtargetInfo
*STI
, bool VCCUsed
,
391 bool FlatScrUsed
, bool XNACKUsed
) {
392 unsigned ExtraSGPRs
= 0;
396 IsaVersion Version
= getIsaVersion(STI
->getCPU());
397 if (Version
.Major
>= 10)
400 if (Version
.Major
< 8) {
414 unsigned getNumExtraSGPRs(const MCSubtargetInfo
*STI
, bool VCCUsed
,
416 return getNumExtraSGPRs(STI
, VCCUsed
, FlatScrUsed
,
417 STI
->getFeatureBits().test(AMDGPU::FeatureXNACK
));
420 unsigned getNumSGPRBlocks(const MCSubtargetInfo
*STI
, unsigned NumSGPRs
) {
421 NumSGPRs
= alignTo(std::max(1u, NumSGPRs
), getSGPREncodingGranule(STI
));
422 // SGPRBlocks is actual number of SGPR blocks minus 1.
423 return NumSGPRs
/ getSGPREncodingGranule(STI
) - 1;
426 unsigned getVGPRAllocGranule(const MCSubtargetInfo
*STI
,
427 Optional
<bool> EnableWavefrontSize32
) {
428 bool IsWave32
= EnableWavefrontSize32
?
429 *EnableWavefrontSize32
:
430 STI
->getFeatureBits().test(FeatureWavefrontSize32
);
431 return IsWave32
? 8 : 4;
434 unsigned getVGPREncodingGranule(const MCSubtargetInfo
*STI
,
435 Optional
<bool> EnableWavefrontSize32
) {
436 return getVGPRAllocGranule(STI
, EnableWavefrontSize32
);
439 unsigned getTotalNumVGPRs(const MCSubtargetInfo
*STI
) {
442 return STI
->getFeatureBits().test(FeatureWavefrontSize32
) ? 1024 : 512;
445 unsigned getAddressableNumVGPRs(const MCSubtargetInfo
*STI
) {
449 unsigned getMinNumVGPRs(const MCSubtargetInfo
*STI
, unsigned WavesPerEU
) {
450 assert(WavesPerEU
!= 0);
452 if (WavesPerEU
>= getMaxWavesPerEU(STI
))
454 unsigned MinNumVGPRs
=
455 alignDown(getTotalNumVGPRs(STI
) / (WavesPerEU
+ 1),
456 getVGPRAllocGranule(STI
)) + 1;
457 return std::min(MinNumVGPRs
, getAddressableNumVGPRs(STI
));
460 unsigned getMaxNumVGPRs(const MCSubtargetInfo
*STI
, unsigned WavesPerEU
) {
461 assert(WavesPerEU
!= 0);
463 unsigned MaxNumVGPRs
= alignDown(getTotalNumVGPRs(STI
) / WavesPerEU
,
464 getVGPRAllocGranule(STI
));
465 unsigned AddressableNumVGPRs
= getAddressableNumVGPRs(STI
);
466 return std::min(MaxNumVGPRs
, AddressableNumVGPRs
);
469 unsigned getNumVGPRBlocks(const MCSubtargetInfo
*STI
, unsigned NumVGPRs
,
470 Optional
<bool> EnableWavefrontSize32
) {
471 NumVGPRs
= alignTo(std::max(1u, NumVGPRs
),
472 getVGPREncodingGranule(STI
, EnableWavefrontSize32
));
473 // VGPRBlocks is actual number of VGPR blocks minus 1.
474 return NumVGPRs
/ getVGPREncodingGranule(STI
, EnableWavefrontSize32
) - 1;
477 } // end namespace IsaInfo
479 void initDefaultAMDKernelCodeT(amd_kernel_code_t
&Header
,
480 const MCSubtargetInfo
*STI
) {
481 IsaVersion Version
= getIsaVersion(STI
->getCPU());
483 memset(&Header
, 0, sizeof(Header
));
485 Header
.amd_kernel_code_version_major
= 1;
486 Header
.amd_kernel_code_version_minor
= 2;
487 Header
.amd_machine_kind
= 1; // AMD_MACHINE_KIND_AMDGPU
488 Header
.amd_machine_version_major
= Version
.Major
;
489 Header
.amd_machine_version_minor
= Version
.Minor
;
490 Header
.amd_machine_version_stepping
= Version
.Stepping
;
491 Header
.kernel_code_entry_byte_offset
= sizeof(Header
);
492 Header
.wavefront_size
= 6;
494 // If the code object does not support indirect functions, then the value must
496 Header
.call_convention
= -1;
498 // These alignment values are specified in powers of two, so alignment =
499 // 2^n. The minimum alignment is 2^4 = 16.
500 Header
.kernarg_segment_alignment
= 4;
501 Header
.group_segment_alignment
= 4;
502 Header
.private_segment_alignment
= 4;
504 if (Version
.Major
>= 10) {
505 if (STI
->getFeatureBits().test(FeatureWavefrontSize32
)) {
506 Header
.wavefront_size
= 5;
507 Header
.code_properties
|= AMD_CODE_PROPERTY_ENABLE_WAVEFRONT_SIZE32
;
509 Header
.compute_pgm_resource_registers
|=
510 S_00B848_WGP_MODE(STI
->getFeatureBits().test(FeatureCuMode
) ? 0 : 1) |
511 S_00B848_MEM_ORDERED(1);
515 amdhsa::kernel_descriptor_t
getDefaultAmdhsaKernelDescriptor(
516 const MCSubtargetInfo
*STI
) {
517 IsaVersion Version
= getIsaVersion(STI
->getCPU());
519 amdhsa::kernel_descriptor_t KD
;
520 memset(&KD
, 0, sizeof(KD
));
522 AMDHSA_BITS_SET(KD
.compute_pgm_rsrc1
,
523 amdhsa::COMPUTE_PGM_RSRC1_FLOAT_DENORM_MODE_16_64
,
524 amdhsa::FLOAT_DENORM_MODE_FLUSH_NONE
);
525 AMDHSA_BITS_SET(KD
.compute_pgm_rsrc1
,
526 amdhsa::COMPUTE_PGM_RSRC1_ENABLE_DX10_CLAMP
, 1);
527 AMDHSA_BITS_SET(KD
.compute_pgm_rsrc1
,
528 amdhsa::COMPUTE_PGM_RSRC1_ENABLE_IEEE_MODE
, 1);
529 AMDHSA_BITS_SET(KD
.compute_pgm_rsrc2
,
530 amdhsa::COMPUTE_PGM_RSRC2_ENABLE_SGPR_WORKGROUP_ID_X
, 1);
531 if (Version
.Major
>= 10) {
532 AMDHSA_BITS_SET(KD
.kernel_code_properties
,
533 amdhsa::KERNEL_CODE_PROPERTY_ENABLE_WAVEFRONT_SIZE32
,
534 STI
->getFeatureBits().test(FeatureWavefrontSize32
) ? 1 : 0);
535 AMDHSA_BITS_SET(KD
.compute_pgm_rsrc1
,
536 amdhsa::COMPUTE_PGM_RSRC1_WGP_MODE
,
537 STI
->getFeatureBits().test(FeatureCuMode
) ? 0 : 1);
538 AMDHSA_BITS_SET(KD
.compute_pgm_rsrc1
,
539 amdhsa::COMPUTE_PGM_RSRC1_MEM_ORDERED
, 1);
544 bool isGroupSegment(const GlobalValue
*GV
) {
545 return GV
->getType()->getAddressSpace() == AMDGPUAS::LOCAL_ADDRESS
;
548 bool isGlobalSegment(const GlobalValue
*GV
) {
549 return GV
->getType()->getAddressSpace() == AMDGPUAS::GLOBAL_ADDRESS
;
552 bool isReadOnlySegment(const GlobalValue
*GV
) {
553 return GV
->getType()->getAddressSpace() == AMDGPUAS::CONSTANT_ADDRESS
||
554 GV
->getType()->getAddressSpace() == AMDGPUAS::CONSTANT_ADDRESS_32BIT
;
557 bool shouldEmitConstantsToTextSection(const Triple
&TT
) {
558 return TT
.getOS() == Triple::AMDPAL
;
561 int getIntegerAttribute(const Function
&F
, StringRef Name
, int Default
) {
562 Attribute A
= F
.getFnAttribute(Name
);
563 int Result
= Default
;
565 if (A
.isStringAttribute()) {
566 StringRef Str
= A
.getValueAsString();
567 if (Str
.getAsInteger(0, Result
)) {
568 LLVMContext
&Ctx
= F
.getContext();
569 Ctx
.emitError("can't parse integer attribute " + Name
);
576 std::pair
<int, int> getIntegerPairAttribute(const Function
&F
,
578 std::pair
<int, int> Default
,
579 bool OnlyFirstRequired
) {
580 Attribute A
= F
.getFnAttribute(Name
);
581 if (!A
.isStringAttribute())
584 LLVMContext
&Ctx
= F
.getContext();
585 std::pair
<int, int> Ints
= Default
;
586 std::pair
<StringRef
, StringRef
> Strs
= A
.getValueAsString().split(',');
587 if (Strs
.first
.trim().getAsInteger(0, Ints
.first
)) {
588 Ctx
.emitError("can't parse first integer attribute " + Name
);
591 if (Strs
.second
.trim().getAsInteger(0, Ints
.second
)) {
592 if (!OnlyFirstRequired
|| !Strs
.second
.trim().empty()) {
593 Ctx
.emitError("can't parse second integer attribute " + Name
);
601 unsigned getVmcntBitMask(const IsaVersion
&Version
) {
602 unsigned VmcntLo
= (1 << getVmcntBitWidthLo()) - 1;
603 if (Version
.Major
< 9)
606 unsigned VmcntHi
= ((1 << getVmcntBitWidthHi()) - 1) << getVmcntBitWidthLo();
607 return VmcntLo
| VmcntHi
;
610 unsigned getExpcntBitMask(const IsaVersion
&Version
) {
611 return (1 << getExpcntBitWidth()) - 1;
614 unsigned getLgkmcntBitMask(const IsaVersion
&Version
) {
615 return (1 << getLgkmcntBitWidth(Version
.Major
)) - 1;
618 unsigned getWaitcntBitMask(const IsaVersion
&Version
) {
619 unsigned VmcntLo
= getBitMask(getVmcntBitShiftLo(), getVmcntBitWidthLo());
620 unsigned Expcnt
= getBitMask(getExpcntBitShift(), getExpcntBitWidth());
621 unsigned Lgkmcnt
= getBitMask(getLgkmcntBitShift(),
622 getLgkmcntBitWidth(Version
.Major
));
623 unsigned Waitcnt
= VmcntLo
| Expcnt
| Lgkmcnt
;
624 if (Version
.Major
< 9)
627 unsigned VmcntHi
= getBitMask(getVmcntBitShiftHi(), getVmcntBitWidthHi());
628 return Waitcnt
| VmcntHi
;
631 unsigned decodeVmcnt(const IsaVersion
&Version
, unsigned Waitcnt
) {
633 unpackBits(Waitcnt
, getVmcntBitShiftLo(), getVmcntBitWidthLo());
634 if (Version
.Major
< 9)
638 unpackBits(Waitcnt
, getVmcntBitShiftHi(), getVmcntBitWidthHi());
639 VmcntHi
<<= getVmcntBitWidthLo();
640 return VmcntLo
| VmcntHi
;
643 unsigned decodeExpcnt(const IsaVersion
&Version
, unsigned Waitcnt
) {
644 return unpackBits(Waitcnt
, getExpcntBitShift(), getExpcntBitWidth());
647 unsigned decodeLgkmcnt(const IsaVersion
&Version
, unsigned Waitcnt
) {
648 return unpackBits(Waitcnt
, getLgkmcntBitShift(),
649 getLgkmcntBitWidth(Version
.Major
));
652 void decodeWaitcnt(const IsaVersion
&Version
, unsigned Waitcnt
,
653 unsigned &Vmcnt
, unsigned &Expcnt
, unsigned &Lgkmcnt
) {
654 Vmcnt
= decodeVmcnt(Version
, Waitcnt
);
655 Expcnt
= decodeExpcnt(Version
, Waitcnt
);
656 Lgkmcnt
= decodeLgkmcnt(Version
, Waitcnt
);
659 Waitcnt
decodeWaitcnt(const IsaVersion
&Version
, unsigned Encoded
) {
661 Decoded
.VmCnt
= decodeVmcnt(Version
, Encoded
);
662 Decoded
.ExpCnt
= decodeExpcnt(Version
, Encoded
);
663 Decoded
.LgkmCnt
= decodeLgkmcnt(Version
, Encoded
);
667 unsigned encodeVmcnt(const IsaVersion
&Version
, unsigned Waitcnt
,
670 packBits(Vmcnt
, Waitcnt
, getVmcntBitShiftLo(), getVmcntBitWidthLo());
671 if (Version
.Major
< 9)
674 Vmcnt
>>= getVmcntBitWidthLo();
675 return packBits(Vmcnt
, Waitcnt
, getVmcntBitShiftHi(), getVmcntBitWidthHi());
678 unsigned encodeExpcnt(const IsaVersion
&Version
, unsigned Waitcnt
,
680 return packBits(Expcnt
, Waitcnt
, getExpcntBitShift(), getExpcntBitWidth());
683 unsigned encodeLgkmcnt(const IsaVersion
&Version
, unsigned Waitcnt
,
685 return packBits(Lgkmcnt
, Waitcnt
, getLgkmcntBitShift(),
686 getLgkmcntBitWidth(Version
.Major
));
689 unsigned encodeWaitcnt(const IsaVersion
&Version
,
690 unsigned Vmcnt
, unsigned Expcnt
, unsigned Lgkmcnt
) {
691 unsigned Waitcnt
= getWaitcntBitMask(Version
);
692 Waitcnt
= encodeVmcnt(Version
, Waitcnt
, Vmcnt
);
693 Waitcnt
= encodeExpcnt(Version
, Waitcnt
, Expcnt
);
694 Waitcnt
= encodeLgkmcnt(Version
, Waitcnt
, Lgkmcnt
);
698 unsigned encodeWaitcnt(const IsaVersion
&Version
, const Waitcnt
&Decoded
) {
699 return encodeWaitcnt(Version
, Decoded
.VmCnt
, Decoded
.ExpCnt
, Decoded
.LgkmCnt
);
702 //===----------------------------------------------------------------------===//
704 //===----------------------------------------------------------------------===//
708 int64_t getHwregId(const StringRef Name
) {
709 for (int Id
= ID_SYMBOLIC_FIRST_
; Id
< ID_SYMBOLIC_LAST_
; ++Id
) {
710 if (IdSymbolic
[Id
] && Name
== IdSymbolic
[Id
])
716 static unsigned getLastSymbolicHwreg(const MCSubtargetInfo
&STI
) {
717 if (isSI(STI
) || isCI(STI
) || isVI(STI
))
718 return ID_SYMBOLIC_FIRST_GFX9_
;
719 else if (isGFX9(STI
))
720 return ID_SYMBOLIC_FIRST_GFX10_
;
722 return ID_SYMBOLIC_LAST_
;
725 bool isValidHwreg(int64_t Id
, const MCSubtargetInfo
&STI
) {
726 return ID_SYMBOLIC_FIRST_
<= Id
&& Id
< getLastSymbolicHwreg(STI
) &&
730 bool isValidHwreg(int64_t Id
) {
731 return 0 <= Id
&& isUInt
<ID_WIDTH_
>(Id
);
734 bool isValidHwregOffset(int64_t Offset
) {
735 return 0 <= Offset
&& isUInt
<OFFSET_WIDTH_
>(Offset
);
738 bool isValidHwregWidth(int64_t Width
) {
739 return 0 <= (Width
- 1) && isUInt
<WIDTH_M1_WIDTH_
>(Width
- 1);
742 uint64_t encodeHwreg(uint64_t Id
, uint64_t Offset
, uint64_t Width
) {
743 return (Id
<< ID_SHIFT_
) |
744 (Offset
<< OFFSET_SHIFT_
) |
745 ((Width
- 1) << WIDTH_M1_SHIFT_
);
748 StringRef
getHwreg(unsigned Id
, const MCSubtargetInfo
&STI
) {
749 return isValidHwreg(Id
, STI
) ? IdSymbolic
[Id
] : "";
752 void decodeHwreg(unsigned Val
, unsigned &Id
, unsigned &Offset
, unsigned &Width
) {
753 Id
= (Val
& ID_MASK_
) >> ID_SHIFT_
;
754 Offset
= (Val
& OFFSET_MASK_
) >> OFFSET_SHIFT_
;
755 Width
= ((Val
& WIDTH_M1_MASK_
) >> WIDTH_M1_SHIFT_
) + 1;
760 //===----------------------------------------------------------------------===//
762 //===----------------------------------------------------------------------===//
766 int64_t getMsgId(const StringRef Name
) {
767 for (int i
= ID_GAPS_FIRST_
; i
< ID_GAPS_LAST_
; ++i
) {
768 if (IdSymbolic
[i
] && Name
== IdSymbolic
[i
])
774 static bool isValidMsgId(int64_t MsgId
) {
775 return (ID_GAPS_FIRST_
<= MsgId
&& MsgId
< ID_GAPS_LAST_
) && IdSymbolic
[MsgId
];
778 bool isValidMsgId(int64_t MsgId
, const MCSubtargetInfo
&STI
, bool Strict
) {
780 if (MsgId
== ID_GS_ALLOC_REQ
|| MsgId
== ID_GET_DOORBELL
)
781 return isGFX9(STI
) || isGFX10(STI
);
783 return isValidMsgId(MsgId
);
785 return 0 <= MsgId
&& isUInt
<ID_WIDTH_
>(MsgId
);
789 StringRef
getMsgName(int64_t MsgId
) {
790 return isValidMsgId(MsgId
)? IdSymbolic
[MsgId
] : "";
793 int64_t getMsgOpId(int64_t MsgId
, const StringRef Name
) {
794 const char* const *S
= (MsgId
== ID_SYSMSG
) ? OpSysSymbolic
: OpGsSymbolic
;
795 const int F
= (MsgId
== ID_SYSMSG
) ? OP_SYS_FIRST_
: OP_GS_FIRST_
;
796 const int L
= (MsgId
== ID_SYSMSG
) ? OP_SYS_LAST_
: OP_GS_LAST_
;
797 for (int i
= F
; i
< L
; ++i
) {
805 bool isValidMsgOp(int64_t MsgId
, int64_t OpId
, bool Strict
) {
808 return 0 <= OpId
&& isUInt
<OP_WIDTH_
>(OpId
);
813 return (OP_GS_FIRST_
<= OpId
&& OpId
< OP_GS_LAST_
) && OpId
!= OP_GS_NOP
;
815 return OP_GS_FIRST_
<= OpId
&& OpId
< OP_GS_LAST_
;
817 return OP_SYS_FIRST_
<= OpId
&& OpId
< OP_SYS_LAST_
;
819 return OpId
== OP_NONE_
;
823 StringRef
getMsgOpName(int64_t MsgId
, int64_t OpId
) {
824 assert(msgRequiresOp(MsgId
));
825 return (MsgId
== ID_SYSMSG
)? OpSysSymbolic
[OpId
] : OpGsSymbolic
[OpId
];
828 bool isValidMsgStream(int64_t MsgId
, int64_t OpId
, int64_t StreamId
, bool Strict
) {
831 return 0 <= StreamId
&& isUInt
<STREAM_ID_WIDTH_
>(StreamId
);
836 return STREAM_ID_FIRST_
<= StreamId
&& StreamId
< STREAM_ID_LAST_
;
838 return (OpId
== OP_GS_NOP
)?
839 (StreamId
== STREAM_ID_NONE_
) :
840 (STREAM_ID_FIRST_
<= StreamId
&& StreamId
< STREAM_ID_LAST_
);
842 return StreamId
== STREAM_ID_NONE_
;
846 bool msgRequiresOp(int64_t MsgId
) {
847 return MsgId
== ID_GS
|| MsgId
== ID_GS_DONE
|| MsgId
== ID_SYSMSG
;
850 bool msgSupportsStream(int64_t MsgId
, int64_t OpId
) {
851 return (MsgId
== ID_GS
|| MsgId
== ID_GS_DONE
) && OpId
!= OP_GS_NOP
;
854 void decodeMsg(unsigned Val
,
857 uint16_t &StreamId
) {
858 MsgId
= Val
& ID_MASK_
;
859 OpId
= (Val
& OP_MASK_
) >> OP_SHIFT_
;
860 StreamId
= (Val
& STREAM_ID_MASK_
) >> STREAM_ID_SHIFT_
;
863 uint64_t encodeMsg(uint64_t MsgId
,
866 return (MsgId
<< ID_SHIFT_
) |
867 (OpId
<< OP_SHIFT_
) |
868 (StreamId
<< STREAM_ID_SHIFT_
);
871 } // namespace SendMsg
873 //===----------------------------------------------------------------------===//
875 //===----------------------------------------------------------------------===//
877 unsigned getInitialPSInputAddr(const Function
&F
) {
878 return getIntegerAttribute(F
, "InitialPSInputAddr", 0);
881 bool isShader(CallingConv::ID cc
) {
883 case CallingConv::AMDGPU_VS
:
884 case CallingConv::AMDGPU_LS
:
885 case CallingConv::AMDGPU_HS
:
886 case CallingConv::AMDGPU_ES
:
887 case CallingConv::AMDGPU_GS
:
888 case CallingConv::AMDGPU_PS
:
889 case CallingConv::AMDGPU_CS
:
896 bool isCompute(CallingConv::ID cc
) {
897 return !isShader(cc
) || cc
== CallingConv::AMDGPU_CS
;
900 bool isEntryFunctionCC(CallingConv::ID CC
) {
902 case CallingConv::AMDGPU_KERNEL
:
903 case CallingConv::SPIR_KERNEL
:
904 case CallingConv::AMDGPU_VS
:
905 case CallingConv::AMDGPU_GS
:
906 case CallingConv::AMDGPU_PS
:
907 case CallingConv::AMDGPU_CS
:
908 case CallingConv::AMDGPU_ES
:
909 case CallingConv::AMDGPU_HS
:
910 case CallingConv::AMDGPU_LS
:
917 bool hasXNACK(const MCSubtargetInfo
&STI
) {
918 return STI
.getFeatureBits()[AMDGPU::FeatureXNACK
];
921 bool hasSRAMECC(const MCSubtargetInfo
&STI
) {
922 return STI
.getFeatureBits()[AMDGPU::FeatureSRAMECC
];
925 bool hasMIMG_R128(const MCSubtargetInfo
&STI
) {
926 return STI
.getFeatureBits()[AMDGPU::FeatureMIMG_R128
];
929 bool hasPackedD16(const MCSubtargetInfo
&STI
) {
930 return !STI
.getFeatureBits()[AMDGPU::FeatureUnpackedD16VMem
];
933 bool isSI(const MCSubtargetInfo
&STI
) {
934 return STI
.getFeatureBits()[AMDGPU::FeatureSouthernIslands
];
937 bool isCI(const MCSubtargetInfo
&STI
) {
938 return STI
.getFeatureBits()[AMDGPU::FeatureSeaIslands
];
941 bool isVI(const MCSubtargetInfo
&STI
) {
942 return STI
.getFeatureBits()[AMDGPU::FeatureVolcanicIslands
];
945 bool isGFX9(const MCSubtargetInfo
&STI
) {
946 return STI
.getFeatureBits()[AMDGPU::FeatureGFX9
];
949 bool isGFX10(const MCSubtargetInfo
&STI
) {
950 return STI
.getFeatureBits()[AMDGPU::FeatureGFX10
];
953 bool isGCN3Encoding(const MCSubtargetInfo
&STI
) {
954 return STI
.getFeatureBits()[AMDGPU::FeatureGCN3Encoding
];
957 bool isSGPR(unsigned Reg
, const MCRegisterInfo
* TRI
) {
958 const MCRegisterClass SGPRClass
= TRI
->getRegClass(AMDGPU::SReg_32RegClassID
);
959 const unsigned FirstSubReg
= TRI
->getSubReg(Reg
, 1);
960 return SGPRClass
.contains(FirstSubReg
!= 0 ? FirstSubReg
: Reg
) ||
964 bool isRegIntersect(unsigned Reg0
, unsigned Reg1
, const MCRegisterInfo
* TRI
) {
965 for (MCRegAliasIterator
R(Reg0
, TRI
, true); R
.isValid(); ++R
) {
966 if (*R
== Reg1
) return true;
971 #define MAP_REG2REG \
972 using namespace AMDGPU; \
974 default: return Reg; \
975 CASE_CI_VI(FLAT_SCR) \
976 CASE_CI_VI(FLAT_SCR_LO) \
977 CASE_CI_VI(FLAT_SCR_HI) \
978 CASE_VI_GFX9_GFX10(TTMP0) \
979 CASE_VI_GFX9_GFX10(TTMP1) \
980 CASE_VI_GFX9_GFX10(TTMP2) \
981 CASE_VI_GFX9_GFX10(TTMP3) \
982 CASE_VI_GFX9_GFX10(TTMP4) \
983 CASE_VI_GFX9_GFX10(TTMP5) \
984 CASE_VI_GFX9_GFX10(TTMP6) \
985 CASE_VI_GFX9_GFX10(TTMP7) \
986 CASE_VI_GFX9_GFX10(TTMP8) \
987 CASE_VI_GFX9_GFX10(TTMP9) \
988 CASE_VI_GFX9_GFX10(TTMP10) \
989 CASE_VI_GFX9_GFX10(TTMP11) \
990 CASE_VI_GFX9_GFX10(TTMP12) \
991 CASE_VI_GFX9_GFX10(TTMP13) \
992 CASE_VI_GFX9_GFX10(TTMP14) \
993 CASE_VI_GFX9_GFX10(TTMP15) \
994 CASE_VI_GFX9_GFX10(TTMP0_TTMP1) \
995 CASE_VI_GFX9_GFX10(TTMP2_TTMP3) \
996 CASE_VI_GFX9_GFX10(TTMP4_TTMP5) \
997 CASE_VI_GFX9_GFX10(TTMP6_TTMP7) \
998 CASE_VI_GFX9_GFX10(TTMP8_TTMP9) \
999 CASE_VI_GFX9_GFX10(TTMP10_TTMP11) \
1000 CASE_VI_GFX9_GFX10(TTMP12_TTMP13) \
1001 CASE_VI_GFX9_GFX10(TTMP14_TTMP15) \
1002 CASE_VI_GFX9_GFX10(TTMP0_TTMP1_TTMP2_TTMP3) \
1003 CASE_VI_GFX9_GFX10(TTMP4_TTMP5_TTMP6_TTMP7) \
1004 CASE_VI_GFX9_GFX10(TTMP8_TTMP9_TTMP10_TTMP11) \
1005 CASE_VI_GFX9_GFX10(TTMP12_TTMP13_TTMP14_TTMP15) \
1006 CASE_VI_GFX9_GFX10(TTMP0_TTMP1_TTMP2_TTMP3_TTMP4_TTMP5_TTMP6_TTMP7) \
1007 CASE_VI_GFX9_GFX10(TTMP4_TTMP5_TTMP6_TTMP7_TTMP8_TTMP9_TTMP10_TTMP11) \
1008 CASE_VI_GFX9_GFX10(TTMP8_TTMP9_TTMP10_TTMP11_TTMP12_TTMP13_TTMP14_TTMP15) \
1009 CASE_VI_GFX9_GFX10(TTMP0_TTMP1_TTMP2_TTMP3_TTMP4_TTMP5_TTMP6_TTMP7_TTMP8_TTMP9_TTMP10_TTMP11_TTMP12_TTMP13_TTMP14_TTMP15) \
1012 #define CASE_CI_VI(node) \
1013 assert(!isSI(STI)); \
1014 case node: return isCI(STI) ? node##_ci : node##_vi;
1016 #define CASE_VI_GFX9_GFX10(node) \
1017 case node: return (isGFX9(STI) || isGFX10(STI)) ? node##_gfx9_gfx10 : node##_vi;
1019 unsigned getMCReg(unsigned Reg
, const MCSubtargetInfo
&STI
) {
1020 if (STI
.getTargetTriple().getArch() == Triple::r600
)
1026 #undef CASE_VI_GFX9_GFX10
1028 #define CASE_CI_VI(node) case node##_ci: case node##_vi: return node;
1029 #define CASE_VI_GFX9_GFX10(node) case node##_vi: case node##_gfx9_gfx10: return node;
1031 unsigned mc2PseudoReg(unsigned Reg
) {
1036 #undef CASE_VI_GFX9_GFX10
1039 bool isSISrcOperand(const MCInstrDesc
&Desc
, unsigned OpNo
) {
1040 assert(OpNo
< Desc
.NumOperands
);
1041 unsigned OpType
= Desc
.OpInfo
[OpNo
].OperandType
;
1042 return OpType
>= AMDGPU::OPERAND_SRC_FIRST
&&
1043 OpType
<= AMDGPU::OPERAND_SRC_LAST
;
1046 bool isSISrcFPOperand(const MCInstrDesc
&Desc
, unsigned OpNo
) {
1047 assert(OpNo
< Desc
.NumOperands
);
1048 unsigned OpType
= Desc
.OpInfo
[OpNo
].OperandType
;
1050 case AMDGPU::OPERAND_REG_IMM_FP32
:
1051 case AMDGPU::OPERAND_REG_IMM_FP64
:
1052 case AMDGPU::OPERAND_REG_IMM_FP16
:
1053 case AMDGPU::OPERAND_REG_IMM_V2FP16
:
1054 case AMDGPU::OPERAND_REG_IMM_V2INT16
:
1055 case AMDGPU::OPERAND_REG_INLINE_C_FP32
:
1056 case AMDGPU::OPERAND_REG_INLINE_C_FP64
:
1057 case AMDGPU::OPERAND_REG_INLINE_C_FP16
:
1058 case AMDGPU::OPERAND_REG_INLINE_C_V2FP16
:
1059 case AMDGPU::OPERAND_REG_INLINE_C_V2INT16
:
1060 case AMDGPU::OPERAND_REG_INLINE_AC_FP32
:
1061 case AMDGPU::OPERAND_REG_INLINE_AC_FP16
:
1062 case AMDGPU::OPERAND_REG_INLINE_AC_V2FP16
:
1063 case AMDGPU::OPERAND_REG_INLINE_AC_V2INT16
:
1070 bool isSISrcInlinableOperand(const MCInstrDesc
&Desc
, unsigned OpNo
) {
1071 assert(OpNo
< Desc
.NumOperands
);
1072 unsigned OpType
= Desc
.OpInfo
[OpNo
].OperandType
;
1073 return OpType
>= AMDGPU::OPERAND_REG_INLINE_C_FIRST
&&
1074 OpType
<= AMDGPU::OPERAND_REG_INLINE_C_LAST
;
1077 // Avoid using MCRegisterClass::getSize, since that function will go away
1078 // (move from MC* level to Target* level). Return size in bits.
1079 unsigned getRegBitWidth(unsigned RCID
) {
1081 case AMDGPU::SGPR_32RegClassID
:
1082 case AMDGPU::VGPR_32RegClassID
:
1083 case AMDGPU::VRegOrLds_32RegClassID
:
1084 case AMDGPU::AGPR_32RegClassID
:
1085 case AMDGPU::VS_32RegClassID
:
1086 case AMDGPU::AV_32RegClassID
:
1087 case AMDGPU::SReg_32RegClassID
:
1088 case AMDGPU::SReg_32_XM0RegClassID
:
1089 case AMDGPU::SRegOrLds_32RegClassID
:
1091 case AMDGPU::SGPR_64RegClassID
:
1092 case AMDGPU::VS_64RegClassID
:
1093 case AMDGPU::AV_64RegClassID
:
1094 case AMDGPU::SReg_64RegClassID
:
1095 case AMDGPU::VReg_64RegClassID
:
1096 case AMDGPU::AReg_64RegClassID
:
1097 case AMDGPU::SReg_64_XEXECRegClassID
:
1099 case AMDGPU::SGPR_96RegClassID
:
1100 case AMDGPU::SReg_96RegClassID
:
1101 case AMDGPU::VReg_96RegClassID
:
1103 case AMDGPU::SGPR_128RegClassID
:
1104 case AMDGPU::SReg_128RegClassID
:
1105 case AMDGPU::VReg_128RegClassID
:
1106 case AMDGPU::AReg_128RegClassID
:
1108 case AMDGPU::SGPR_160RegClassID
:
1109 case AMDGPU::SReg_160RegClassID
:
1110 case AMDGPU::VReg_160RegClassID
:
1112 case AMDGPU::SReg_256RegClassID
:
1113 case AMDGPU::VReg_256RegClassID
:
1115 case AMDGPU::SReg_512RegClassID
:
1116 case AMDGPU::VReg_512RegClassID
:
1117 case AMDGPU::AReg_512RegClassID
:
1119 case AMDGPU::SReg_1024RegClassID
:
1120 case AMDGPU::VReg_1024RegClassID
:
1121 case AMDGPU::AReg_1024RegClassID
:
1124 llvm_unreachable("Unexpected register class");
1128 unsigned getRegBitWidth(const MCRegisterClass
&RC
) {
1129 return getRegBitWidth(RC
.getID());
1132 unsigned getRegOperandSize(const MCRegisterInfo
*MRI
, const MCInstrDesc
&Desc
,
1134 assert(OpNo
< Desc
.NumOperands
);
1135 unsigned RCID
= Desc
.OpInfo
[OpNo
].RegClass
;
1136 return getRegBitWidth(MRI
->getRegClass(RCID
)) / 8;
1139 bool isInlinableLiteral64(int64_t Literal
, bool HasInv2Pi
) {
1140 if (Literal
>= -16 && Literal
<= 64)
1143 uint64_t Val
= static_cast<uint64_t>(Literal
);
1144 return (Val
== DoubleToBits(0.0)) ||
1145 (Val
== DoubleToBits(1.0)) ||
1146 (Val
== DoubleToBits(-1.0)) ||
1147 (Val
== DoubleToBits(0.5)) ||
1148 (Val
== DoubleToBits(-0.5)) ||
1149 (Val
== DoubleToBits(2.0)) ||
1150 (Val
== DoubleToBits(-2.0)) ||
1151 (Val
== DoubleToBits(4.0)) ||
1152 (Val
== DoubleToBits(-4.0)) ||
1153 (Val
== 0x3fc45f306dc9c882 && HasInv2Pi
);
1156 bool isInlinableLiteral32(int32_t Literal
, bool HasInv2Pi
) {
1157 if (Literal
>= -16 && Literal
<= 64)
1160 // The actual type of the operand does not seem to matter as long
1161 // as the bits match one of the inline immediate values. For example:
1163 // -nan has the hexadecimal encoding of 0xfffffffe which is -2 in decimal,
1164 // so it is a legal inline immediate.
1166 // 1065353216 has the hexadecimal encoding 0x3f800000 which is 1.0f in
1167 // floating-point, so it is a legal inline immediate.
1169 uint32_t Val
= static_cast<uint32_t>(Literal
);
1170 return (Val
== FloatToBits(0.0f
)) ||
1171 (Val
== FloatToBits(1.0f
)) ||
1172 (Val
== FloatToBits(-1.0f
)) ||
1173 (Val
== FloatToBits(0.5f
)) ||
1174 (Val
== FloatToBits(-0.5f
)) ||
1175 (Val
== FloatToBits(2.0f
)) ||
1176 (Val
== FloatToBits(-2.0f
)) ||
1177 (Val
== FloatToBits(4.0f
)) ||
1178 (Val
== FloatToBits(-4.0f
)) ||
1179 (Val
== 0x3e22f983 && HasInv2Pi
);
1182 bool isInlinableLiteral16(int16_t Literal
, bool HasInv2Pi
) {
1186 if (Literal
>= -16 && Literal
<= 64)
1189 uint16_t Val
= static_cast<uint16_t>(Literal
);
1190 return Val
== 0x3C00 || // 1.0
1191 Val
== 0xBC00 || // -1.0
1192 Val
== 0x3800 || // 0.5
1193 Val
== 0xB800 || // -0.5
1194 Val
== 0x4000 || // 2.0
1195 Val
== 0xC000 || // -2.0
1196 Val
== 0x4400 || // 4.0
1197 Val
== 0xC400 || // -4.0
1198 Val
== 0x3118; // 1/2pi
1201 bool isInlinableLiteralV216(int32_t Literal
, bool HasInv2Pi
) {
1204 if (isInt
<16>(Literal
) || isUInt
<16>(Literal
)) {
1205 int16_t Trunc
= static_cast<int16_t>(Literal
);
1206 return AMDGPU::isInlinableLiteral16(Trunc
, HasInv2Pi
);
1208 if (!(Literal
& 0xffff))
1209 return AMDGPU::isInlinableLiteral16(Literal
>> 16, HasInv2Pi
);
1211 int16_t Lo16
= static_cast<int16_t>(Literal
);
1212 int16_t Hi16
= static_cast<int16_t>(Literal
>> 16);
1213 return Lo16
== Hi16
&& isInlinableLiteral16(Lo16
, HasInv2Pi
);
1216 bool isArgPassedInSGPR(const Argument
*A
) {
1217 const Function
*F
= A
->getParent();
1219 // Arguments to compute shaders are never a source of divergence.
1220 CallingConv::ID CC
= F
->getCallingConv();
1222 case CallingConv::AMDGPU_KERNEL
:
1223 case CallingConv::SPIR_KERNEL
:
1225 case CallingConv::AMDGPU_VS
:
1226 case CallingConv::AMDGPU_LS
:
1227 case CallingConv::AMDGPU_HS
:
1228 case CallingConv::AMDGPU_ES
:
1229 case CallingConv::AMDGPU_GS
:
1230 case CallingConv::AMDGPU_PS
:
1231 case CallingConv::AMDGPU_CS
:
1232 // For non-compute shaders, SGPR inputs are marked with either inreg or byval.
1233 // Everything else is in VGPRs.
1234 return F
->getAttributes().hasParamAttribute(A
->getArgNo(), Attribute::InReg
) ||
1235 F
->getAttributes().hasParamAttribute(A
->getArgNo(), Attribute::ByVal
);
1237 // TODO: Should calls support inreg for SGPR inputs?
1242 static bool hasSMEMByteOffset(const MCSubtargetInfo
&ST
) {
1243 return isGCN3Encoding(ST
) || isGFX10(ST
);
1246 int64_t getSMRDEncodedOffset(const MCSubtargetInfo
&ST
, int64_t ByteOffset
) {
1247 if (hasSMEMByteOffset(ST
))
1249 return ByteOffset
>> 2;
1252 bool isLegalSMRDImmOffset(const MCSubtargetInfo
&ST
, int64_t ByteOffset
) {
1253 int64_t EncodedOffset
= getSMRDEncodedOffset(ST
, ByteOffset
);
1254 return (hasSMEMByteOffset(ST
)) ?
1255 isUInt
<20>(EncodedOffset
) : isUInt
<8>(EncodedOffset
);
1258 // Given Imm, split it into the values to put into the SOffset and ImmOffset
1259 // fields in an MUBUF instruction. Return false if it is not possible (due to a
1260 // hardware bug needing a workaround).
1262 // The required alignment ensures that individual address components remain
1263 // aligned if they are aligned to begin with. It also ensures that additional
1264 // offsets within the given alignment can be added to the resulting ImmOffset.
1265 bool splitMUBUFOffset(uint32_t Imm
, uint32_t &SOffset
, uint32_t &ImmOffset
,
1266 const GCNSubtarget
*Subtarget
, uint32_t Align
) {
1267 const uint32_t MaxImm
= alignDown(4095, Align
);
1268 uint32_t Overflow
= 0;
1271 if (Imm
<= MaxImm
+ 64) {
1272 // Use an SOffset inline constant for 4..64
1273 Overflow
= Imm
- MaxImm
;
1276 // Try to keep the same value in SOffset for adjacent loads, so that
1277 // the corresponding register contents can be re-used.
1279 // Load values with all low-bits (except for alignment bits) set into
1280 // SOffset, so that a larger range of values can be covered using
1283 // Atomic operations fail to work correctly when individual address
1284 // components are unaligned, even if their sum is aligned.
1285 uint32_t High
= (Imm
+ Align
) & ~4095;
1286 uint32_t Low
= (Imm
+ Align
) & 4095;
1288 Overflow
= High
- Align
;
1292 // There is a hardware bug in SI and CI which prevents address clamping in
1293 // MUBUF instructions from working correctly with SOffsets. The immediate
1294 // offset is unaffected.
1296 Subtarget
->getGeneration() <= AMDGPUSubtarget::SEA_ISLANDS
)
1304 SIModeRegisterDefaults::SIModeRegisterDefaults(const Function
&F
) {
1305 *this = getDefaultForCallingConv(F
.getCallingConv());
1307 StringRef IEEEAttr
= F
.getFnAttribute("amdgpu-ieee").getValueAsString();
1308 if (!IEEEAttr
.empty())
1309 IEEE
= IEEEAttr
== "true";
1311 StringRef DX10ClampAttr
1312 = F
.getFnAttribute("amdgpu-dx10-clamp").getValueAsString();
1313 if (!DX10ClampAttr
.empty())
1314 DX10Clamp
= DX10ClampAttr
== "true";
1319 struct SourceOfDivergence
{
1322 const SourceOfDivergence
*lookupSourceOfDivergence(unsigned Intr
);
1324 #define GET_SourcesOfDivergence_IMPL
1325 #include "AMDGPUGenSearchableTables.inc"
1327 } // end anonymous namespace
1329 bool isIntrinsicSourceOfDivergence(unsigned IntrID
) {
1330 return lookupSourceOfDivergence(IntrID
);
1333 } // namespace AMDGPU