1 //===- AMDGPUDisassembler.cpp - Disassembler for AMDGPU ISA ---------------===//
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
7 //===----------------------------------------------------------------------===//
9 //===----------------------------------------------------------------------===//
13 /// This file contains definition for AMDGPU ISA disassembler
15 //===----------------------------------------------------------------------===//
17 // ToDo: What to do with instruction suffixes (v_mov_b32 vs v_mov_b32_e32)?
19 #include "Disassembler/AMDGPUDisassembler.h"
20 #include "MCTargetDesc/AMDGPUMCTargetDesc.h"
21 #include "TargetInfo/AMDGPUTargetInfo.h"
22 #include "Utils/AMDGPUBaseInfo.h"
23 #include "llvm-c/DisassemblerTypes.h"
24 #include "llvm/MC/MCAsmInfo.h"
25 #include "llvm/MC/MCContext.h"
26 #include "llvm/MC/MCExpr.h"
27 #include "llvm/MC/MCFixedLenDisassembler.h"
28 #include "llvm/Support/AMDHSAKernelDescriptor.h"
29 #include "llvm/Support/TargetRegistry.h"
33 #define DEBUG_TYPE "amdgpu-disassembler"
36 (isGFX10Plus() ? AMDGPU::EncValues::SGPR_MAX_GFX10 \
37 : AMDGPU::EncValues::SGPR_MAX_SI)
39 using DecodeStatus
= llvm::MCDisassembler::DecodeStatus
;
41 AMDGPUDisassembler::AMDGPUDisassembler(const MCSubtargetInfo
&STI
,
43 MCInstrInfo
const *MCII
) :
44 MCDisassembler(STI
, Ctx
), MCII(MCII
), MRI(*Ctx
.getRegisterInfo()),
45 TargetMaxInstBytes(Ctx
.getAsmInfo()->getMaxInstLength(&STI
)) {
47 // ToDo: AMDGPUDisassembler supports only VI ISA.
48 if (!STI
.getFeatureBits()[AMDGPU::FeatureGCN3Encoding
] && !isGFX10Plus())
49 report_fatal_error("Disassembly not yet supported for subtarget");
52 inline static MCDisassembler::DecodeStatus
53 addOperand(MCInst
&Inst
, const MCOperand
& Opnd
) {
54 Inst
.addOperand(Opnd
);
55 return Opnd
.isValid() ?
56 MCDisassembler::Success
:
60 static int insertNamedMCOperand(MCInst
&MI
, const MCOperand
&Op
,
62 int OpIdx
= AMDGPU::getNamedOperandIdx(MI
.getOpcode(), NameIdx
);
65 std::advance(I
, OpIdx
);
71 static DecodeStatus
decodeSoppBrTarget(MCInst
&Inst
, unsigned Imm
,
72 uint64_t Addr
, const void *Decoder
) {
73 auto DAsm
= static_cast<const AMDGPUDisassembler
*>(Decoder
);
75 // Our branches take a simm16, but we need two extra bits to account for the
77 APInt
SignedOffset(18, Imm
* 4, true);
78 int64_t Offset
= (SignedOffset
.sext(64) + 4 + Addr
).getSExtValue();
80 if (DAsm
->tryAddingSymbolicOperand(Inst
, Offset
, Addr
, true, 2, 2))
81 return MCDisassembler::Success
;
82 return addOperand(Inst
, MCOperand::createImm(Imm
));
85 static DecodeStatus
decodeSMEMOffset(MCInst
&Inst
, unsigned Imm
,
86 uint64_t Addr
, const void *Decoder
) {
87 auto DAsm
= static_cast<const AMDGPUDisassembler
*>(Decoder
);
89 if (DAsm
->isVI()) { // VI supports 20-bit unsigned offsets.
90 Offset
= Imm
& 0xFFFFF;
91 } else { // GFX9+ supports 21-bit signed offsets.
92 Offset
= SignExtend64
<21>(Imm
);
94 return addOperand(Inst
, MCOperand::createImm(Offset
));
97 static DecodeStatus
decodeBoolReg(MCInst
&Inst
, unsigned Val
,
98 uint64_t Addr
, const void *Decoder
) {
99 auto DAsm
= static_cast<const AMDGPUDisassembler
*>(Decoder
);
100 return addOperand(Inst
, DAsm
->decodeBoolReg(Val
));
103 #define DECODE_OPERAND(StaticDecoderName, DecoderName) \
104 static DecodeStatus StaticDecoderName(MCInst &Inst, \
107 const void *Decoder) { \
108 auto DAsm = static_cast<const AMDGPUDisassembler*>(Decoder); \
109 return addOperand(Inst, DAsm->DecoderName(Imm)); \
112 #define DECODE_OPERAND_REG(RegClass) \
113 DECODE_OPERAND(Decode##RegClass##RegisterClass, decodeOperand_##RegClass)
115 DECODE_OPERAND_REG(VGPR_32
)
116 DECODE_OPERAND_REG(VRegOrLds_32
)
117 DECODE_OPERAND_REG(VS_32
)
118 DECODE_OPERAND_REG(VS_64
)
119 DECODE_OPERAND_REG(VS_128
)
121 DECODE_OPERAND_REG(VReg_64
)
122 DECODE_OPERAND_REG(VReg_96
)
123 DECODE_OPERAND_REG(VReg_128
)
124 DECODE_OPERAND_REG(VReg_256
)
125 DECODE_OPERAND_REG(VReg_512
)
126 DECODE_OPERAND_REG(VReg_1024
)
128 DECODE_OPERAND_REG(SReg_32
)
129 DECODE_OPERAND_REG(SReg_32_XM0_XEXEC
)
130 DECODE_OPERAND_REG(SReg_32_XEXEC_HI
)
131 DECODE_OPERAND_REG(SRegOrLds_32
)
132 DECODE_OPERAND_REG(SReg_64
)
133 DECODE_OPERAND_REG(SReg_64_XEXEC
)
134 DECODE_OPERAND_REG(SReg_128
)
135 DECODE_OPERAND_REG(SReg_256
)
136 DECODE_OPERAND_REG(SReg_512
)
138 DECODE_OPERAND_REG(AGPR_32
)
139 DECODE_OPERAND_REG(AReg_64
)
140 DECODE_OPERAND_REG(AReg_128
)
141 DECODE_OPERAND_REG(AReg_256
)
142 DECODE_OPERAND_REG(AReg_512
)
143 DECODE_OPERAND_REG(AReg_1024
)
144 DECODE_OPERAND_REG(AV_32
)
145 DECODE_OPERAND_REG(AV_64
)
147 static DecodeStatus
decodeOperand_VSrc16(MCInst
&Inst
,
150 const void *Decoder
) {
151 auto DAsm
= static_cast<const AMDGPUDisassembler
*>(Decoder
);
152 return addOperand(Inst
, DAsm
->decodeOperand_VSrc16(Imm
));
155 static DecodeStatus
decodeOperand_VSrcV216(MCInst
&Inst
,
158 const void *Decoder
) {
159 auto DAsm
= static_cast<const AMDGPUDisassembler
*>(Decoder
);
160 return addOperand(Inst
, DAsm
->decodeOperand_VSrcV216(Imm
));
163 static DecodeStatus
decodeOperand_VSrcV232(MCInst
&Inst
,
166 const void *Decoder
) {
167 auto DAsm
= static_cast<const AMDGPUDisassembler
*>(Decoder
);
168 return addOperand(Inst
, DAsm
->decodeOperand_VSrcV232(Imm
));
171 static DecodeStatus
decodeOperand_VS_16(MCInst
&Inst
,
174 const void *Decoder
) {
175 auto DAsm
= static_cast<const AMDGPUDisassembler
*>(Decoder
);
176 return addOperand(Inst
, DAsm
->decodeOperand_VSrc16(Imm
));
179 static DecodeStatus
decodeOperand_VS_32(MCInst
&Inst
,
182 const void *Decoder
) {
183 auto DAsm
= static_cast<const AMDGPUDisassembler
*>(Decoder
);
184 return addOperand(Inst
, DAsm
->decodeOperand_VS_32(Imm
));
187 static DecodeStatus
decodeOperand_AReg_64(MCInst
&Inst
,
190 const void *Decoder
) {
191 auto DAsm
= static_cast<const AMDGPUDisassembler
*>(Decoder
);
192 return addOperand(Inst
, DAsm
->decodeSrcOp(AMDGPUDisassembler::OPW64
, Imm
| 512));
195 static DecodeStatus
decodeOperand_AReg_128(MCInst
&Inst
,
198 const void *Decoder
) {
199 auto DAsm
= static_cast<const AMDGPUDisassembler
*>(Decoder
);
200 return addOperand(Inst
, DAsm
->decodeSrcOp(AMDGPUDisassembler::OPW128
, Imm
| 512));
203 static DecodeStatus
decodeOperand_AReg_256(MCInst
&Inst
,
206 const void *Decoder
) {
207 auto DAsm
= static_cast<const AMDGPUDisassembler
*>(Decoder
);
208 return addOperand(Inst
, DAsm
->decodeSrcOp(AMDGPUDisassembler::OPW256
, Imm
| 512));
211 static DecodeStatus
decodeOperand_AReg_512(MCInst
&Inst
,
214 const void *Decoder
) {
215 auto DAsm
= static_cast<const AMDGPUDisassembler
*>(Decoder
);
216 return addOperand(Inst
, DAsm
->decodeSrcOp(AMDGPUDisassembler::OPW512
, Imm
| 512));
219 static DecodeStatus
decodeOperand_AReg_1024(MCInst
&Inst
,
222 const void *Decoder
) {
223 auto DAsm
= static_cast<const AMDGPUDisassembler
*>(Decoder
);
224 return addOperand(Inst
, DAsm
->decodeSrcOp(AMDGPUDisassembler::OPW1024
, Imm
| 512));
227 static DecodeStatus
decodeOperand_VReg_64(MCInst
&Inst
,
230 const void *Decoder
) {
231 auto DAsm
= static_cast<const AMDGPUDisassembler
*>(Decoder
);
232 return addOperand(Inst
, DAsm
->decodeSrcOp(AMDGPUDisassembler::OPW64
, Imm
));
235 static DecodeStatus
decodeOperand_VReg_128(MCInst
&Inst
,
238 const void *Decoder
) {
239 auto DAsm
= static_cast<const AMDGPUDisassembler
*>(Decoder
);
240 return addOperand(Inst
, DAsm
->decodeSrcOp(AMDGPUDisassembler::OPW128
, Imm
));
243 static DecodeStatus
decodeOperand_VReg_256(MCInst
&Inst
,
246 const void *Decoder
) {
247 auto DAsm
= static_cast<const AMDGPUDisassembler
*>(Decoder
);
248 return addOperand(Inst
, DAsm
->decodeSrcOp(AMDGPUDisassembler::OPW256
, Imm
));
251 static DecodeStatus
decodeOperand_VReg_512(MCInst
&Inst
,
254 const void *Decoder
) {
255 auto DAsm
= static_cast<const AMDGPUDisassembler
*>(Decoder
);
256 return addOperand(Inst
, DAsm
->decodeSrcOp(AMDGPUDisassembler::OPW512
, Imm
));
259 static DecodeStatus
decodeOperand_VReg_1024(MCInst
&Inst
,
262 const void *Decoder
) {
263 auto DAsm
= static_cast<const AMDGPUDisassembler
*>(Decoder
);
264 return addOperand(Inst
, DAsm
->decodeSrcOp(AMDGPUDisassembler::OPW1024
, Imm
));
267 static bool IsAGPROperand(const MCInst
&Inst
, int OpIdx
,
268 const MCRegisterInfo
*MRI
) {
272 const MCOperand
&Op
= Inst
.getOperand(OpIdx
);
276 unsigned Sub
= MRI
->getSubReg(Op
.getReg(), AMDGPU::sub0
);
277 auto Reg
= Sub
? Sub
: Op
.getReg();
278 return Reg
>= AMDGPU::AGPR0
&& Reg
<= AMDGPU::AGPR255
;
281 static DecodeStatus
decodeOperand_AVLdSt_Any(MCInst
&Inst
,
283 AMDGPUDisassembler::OpWidthTy Opw
,
284 const void *Decoder
) {
285 auto DAsm
= static_cast<const AMDGPUDisassembler
*>(Decoder
);
286 if (!DAsm
->isGFX90A()) {
289 // If atomic has both vdata and vdst their register classes are tied.
290 // The bit is decoded along with the vdst, first operand. We need to
291 // change register class to AGPR if vdst was AGPR.
292 // If a DS instruction has both data0 and data1 their register classes
294 unsigned Opc
= Inst
.getOpcode();
295 uint64_t TSFlags
= DAsm
->getMCII()->get(Opc
).TSFlags
;
296 uint16_t DataNameIdx
= (TSFlags
& SIInstrFlags::DS
) ? AMDGPU::OpName::data0
297 : AMDGPU::OpName::vdata
;
298 const MCRegisterInfo
*MRI
= DAsm
->getContext().getRegisterInfo();
299 int DataIdx
= AMDGPU::getNamedOperandIdx(Opc
, DataNameIdx
);
300 if ((int)Inst
.getNumOperands() == DataIdx
) {
301 int DstIdx
= AMDGPU::getNamedOperandIdx(Opc
, AMDGPU::OpName::vdst
);
302 if (IsAGPROperand(Inst
, DstIdx
, MRI
))
306 if (TSFlags
& SIInstrFlags::DS
) {
307 int Data2Idx
= AMDGPU::getNamedOperandIdx(Opc
, AMDGPU::OpName::data1
);
308 if ((int)Inst
.getNumOperands() == Data2Idx
&&
309 IsAGPROperand(Inst
, DataIdx
, MRI
))
313 return addOperand(Inst
, DAsm
->decodeSrcOp(Opw
, Imm
| 256));
316 static DecodeStatus
DecodeAVLdSt_32RegisterClass(MCInst
&Inst
,
319 const void *Decoder
) {
320 return decodeOperand_AVLdSt_Any(Inst
, Imm
,
321 AMDGPUDisassembler::OPW32
, Decoder
);
324 static DecodeStatus
DecodeAVLdSt_64RegisterClass(MCInst
&Inst
,
327 const void *Decoder
) {
328 return decodeOperand_AVLdSt_Any(Inst
, Imm
,
329 AMDGPUDisassembler::OPW64
, Decoder
);
332 static DecodeStatus
DecodeAVLdSt_96RegisterClass(MCInst
&Inst
,
335 const void *Decoder
) {
336 return decodeOperand_AVLdSt_Any(Inst
, Imm
,
337 AMDGPUDisassembler::OPW96
, Decoder
);
340 static DecodeStatus
DecodeAVLdSt_128RegisterClass(MCInst
&Inst
,
343 const void *Decoder
) {
344 return decodeOperand_AVLdSt_Any(Inst
, Imm
,
345 AMDGPUDisassembler::OPW128
, Decoder
);
348 static DecodeStatus
decodeOperand_SReg_32(MCInst
&Inst
,
351 const void *Decoder
) {
352 auto DAsm
= static_cast<const AMDGPUDisassembler
*>(Decoder
);
353 return addOperand(Inst
, DAsm
->decodeOperand_SReg_32(Imm
));
356 static DecodeStatus
decodeOperand_VGPR_32(MCInst
&Inst
,
359 const void *Decoder
) {
360 auto DAsm
= static_cast<const AMDGPUDisassembler
*>(Decoder
);
361 return addOperand(Inst
, DAsm
->decodeSrcOp(AMDGPUDisassembler::OPW32
, Imm
));
364 #define DECODE_SDWA(DecName) \
365 DECODE_OPERAND(decodeSDWA##DecName, decodeSDWA##DecName)
371 #include "AMDGPUGenDisassemblerTables.inc"
373 //===----------------------------------------------------------------------===//
375 //===----------------------------------------------------------------------===//
377 template <typename T
> static inline T
eatBytes(ArrayRef
<uint8_t>& Bytes
) {
378 assert(Bytes
.size() >= sizeof(T
));
379 const auto Res
= support::endian::read
<T
, support::endianness::little
>(Bytes
.data());
380 Bytes
= Bytes
.slice(sizeof(T
));
384 DecodeStatus
AMDGPUDisassembler::tryDecodeInst(const uint8_t* Table
,
387 uint64_t Address
) const {
388 assert(MI
.getOpcode() == 0);
389 assert(MI
.getNumOperands() == 0);
392 const auto SavedBytes
= Bytes
;
393 if (decodeInstruction(Table
, TmpInst
, Inst
, Address
, this, STI
)) {
395 return MCDisassembler::Success
;
398 return MCDisassembler::Fail
;
401 // The disassembler is greedy, so we need to check FI operand value to
402 // not parse a dpp if the correct literal is not set. For dpp16 the
403 // autogenerated decoder checks the dpp literal
404 static bool isValidDPP8(const MCInst
&MI
) {
405 using namespace llvm::AMDGPU::DPP
;
406 int FiIdx
= AMDGPU::getNamedOperandIdx(MI
.getOpcode(), AMDGPU::OpName::fi
);
408 if ((unsigned)FiIdx
>= MI
.getNumOperands())
410 unsigned Fi
= MI
.getOperand(FiIdx
).getImm();
411 return Fi
== DPP8_FI_0
|| Fi
== DPP8_FI_1
;
414 DecodeStatus
AMDGPUDisassembler::getInstruction(MCInst
&MI
, uint64_t &Size
,
415 ArrayRef
<uint8_t> Bytes_
,
417 raw_ostream
&CS
) const {
421 unsigned MaxInstBytesNum
= std::min((size_t)TargetMaxInstBytes
, Bytes_
.size());
422 Bytes
= Bytes_
.slice(0, MaxInstBytesNum
);
424 DecodeStatus Res
= MCDisassembler::Fail
;
426 // ToDo: better to switch encoding length using some bit predicate
427 // but it is unknown yet, so try all we can
429 // Try to decode DPP and SDWA first to solve conflict with VOP1 and VOP2
431 if (Bytes
.size() >= 8) {
432 const uint64_t QW
= eatBytes
<uint64_t>(Bytes
);
434 if (STI
.getFeatureBits()[AMDGPU::FeatureGFX10_BEncoding
]) {
435 Res
= tryDecodeInst(DecoderTableGFX10_B64
, MI
, QW
, Address
);
437 if (AMDGPU::getNamedOperandIdx(MI
.getOpcode(), AMDGPU::OpName::dpp8
)
440 if (convertDPP8Inst(MI
) == MCDisassembler::Success
)
442 MI
= MCInst(); // clear
446 Res
= tryDecodeInst(DecoderTableDPP864
, MI
, QW
, Address
);
447 if (Res
&& convertDPP8Inst(MI
) == MCDisassembler::Success
)
450 MI
= MCInst(); // clear
452 Res
= tryDecodeInst(DecoderTableDPP64
, MI
, QW
, Address
);
455 Res
= tryDecodeInst(DecoderTableSDWA64
, MI
, QW
, Address
);
456 if (Res
) { IsSDWA
= true; break; }
458 Res
= tryDecodeInst(DecoderTableSDWA964
, MI
, QW
, Address
);
459 if (Res
) { IsSDWA
= true; break; }
461 Res
= tryDecodeInst(DecoderTableSDWA1064
, MI
, QW
, Address
);
462 if (Res
) { IsSDWA
= true; break; }
464 if (STI
.getFeatureBits()[AMDGPU::FeatureUnpackedD16VMem
]) {
465 Res
= tryDecodeInst(DecoderTableGFX80_UNPACKED64
, MI
, QW
, Address
);
470 // Some GFX9 subtargets repurposed the v_mad_mix_f32, v_mad_mixlo_f16 and
471 // v_mad_mixhi_f16 for FMA variants. Try to decode using this special
472 // table first so we print the correct name.
473 if (STI
.getFeatureBits()[AMDGPU::FeatureFmaMixInsts
]) {
474 Res
= tryDecodeInst(DecoderTableGFX9_DL64
, MI
, QW
, Address
);
480 // Reinitialize Bytes as DPP64 could have eaten too much
481 Bytes
= Bytes_
.slice(0, MaxInstBytesNum
);
483 // Try decode 32-bit instruction
484 if (Bytes
.size() < 4) break;
485 const uint32_t DW
= eatBytes
<uint32_t>(Bytes
);
486 Res
= tryDecodeInst(DecoderTableGFX832
, MI
, DW
, Address
);
489 Res
= tryDecodeInst(DecoderTableAMDGPU32
, MI
, DW
, Address
);
492 Res
= tryDecodeInst(DecoderTableGFX932
, MI
, DW
, Address
);
495 if (STI
.getFeatureBits()[AMDGPU::FeatureGFX90AInsts
]) {
496 Res
= tryDecodeInst(DecoderTableGFX90A32
, MI
, DW
, Address
);
501 if (STI
.getFeatureBits()[AMDGPU::FeatureGFX10_BEncoding
]) {
502 Res
= tryDecodeInst(DecoderTableGFX10_B32
, MI
, DW
, Address
);
506 Res
= tryDecodeInst(DecoderTableGFX1032
, MI
, DW
, Address
);
509 if (Bytes
.size() < 4) break;
510 const uint64_t QW
= ((uint64_t)eatBytes
<uint32_t>(Bytes
) << 32) | DW
;
512 if (STI
.getFeatureBits()[AMDGPU::FeatureGFX90AInsts
]) {
513 Res
= tryDecodeInst(DecoderTableGFX90A64
, MI
, QW
, Address
);
518 Res
= tryDecodeInst(DecoderTableGFX864
, MI
, QW
, Address
);
521 Res
= tryDecodeInst(DecoderTableAMDGPU64
, MI
, QW
, Address
);
524 Res
= tryDecodeInst(DecoderTableGFX964
, MI
, QW
, Address
);
527 Res
= tryDecodeInst(DecoderTableGFX1064
, MI
, QW
, Address
);
530 if (Res
&& (MI
.getOpcode() == AMDGPU::V_MAC_F32_e64_vi
||
531 MI
.getOpcode() == AMDGPU::V_MAC_F32_e64_gfx6_gfx7
||
532 MI
.getOpcode() == AMDGPU::V_MAC_F32_e64_gfx10
||
533 MI
.getOpcode() == AMDGPU::V_MAC_LEGACY_F32_e64_gfx6_gfx7
||
534 MI
.getOpcode() == AMDGPU::V_MAC_LEGACY_F32_e64_gfx10
||
535 MI
.getOpcode() == AMDGPU::V_MAC_F16_e64_vi
||
536 MI
.getOpcode() == AMDGPU::V_FMAC_F64_e64_gfx90a
||
537 MI
.getOpcode() == AMDGPU::V_FMAC_F32_e64_vi
||
538 MI
.getOpcode() == AMDGPU::V_FMAC_F32_e64_gfx10
||
539 MI
.getOpcode() == AMDGPU::V_FMAC_LEGACY_F32_e64_gfx10
||
540 MI
.getOpcode() == AMDGPU::V_FMAC_F16_e64_gfx10
)) {
541 // Insert dummy unused src2_modifiers.
542 insertNamedMCOperand(MI
, MCOperand::createImm(0),
543 AMDGPU::OpName::src2_modifiers
);
546 if (Res
&& (MCII
->get(MI
.getOpcode()).TSFlags
&
547 (SIInstrFlags::MUBUF
| SIInstrFlags::FLAT
| SIInstrFlags::SMRD
))) {
548 int CPolPos
= AMDGPU::getNamedOperandIdx(MI
.getOpcode(),
549 AMDGPU::OpName::cpol
);
552 (MCII
->get(MI
.getOpcode()).TSFlags
& SIInstrFlags::IsAtomicRet
) ?
553 AMDGPU::CPol::GLC
: 0;
554 if (MI
.getNumOperands() <= (unsigned)CPolPos
) {
555 insertNamedMCOperand(MI
, MCOperand::createImm(CPol
),
556 AMDGPU::OpName::cpol
);
558 MI
.getOperand(CPolPos
).setImm(MI
.getOperand(CPolPos
).getImm() | CPol
);
563 if (Res
&& (MCII
->get(MI
.getOpcode()).TSFlags
&
564 (SIInstrFlags::MTBUF
| SIInstrFlags::MUBUF
)) &&
565 (STI
.getFeatureBits()[AMDGPU::FeatureGFX90AInsts
])) {
566 // GFX90A lost TFE, its place is occupied by ACC.
568 AMDGPU::getNamedOperandIdx(MI
.getOpcode(), AMDGPU::OpName::tfe
);
569 if (TFEOpIdx
!= -1) {
570 auto TFEIter
= MI
.begin();
571 std::advance(TFEIter
, TFEOpIdx
);
572 MI
.insert(TFEIter
, MCOperand::createImm(0));
576 if (Res
&& (MCII
->get(MI
.getOpcode()).TSFlags
&
577 (SIInstrFlags::MTBUF
| SIInstrFlags::MUBUF
))) {
579 AMDGPU::getNamedOperandIdx(MI
.getOpcode(), AMDGPU::OpName::swz
);
580 if (SWZOpIdx
!= -1) {
581 auto SWZIter
= MI
.begin();
582 std::advance(SWZIter
, SWZOpIdx
);
583 MI
.insert(SWZIter
, MCOperand::createImm(0));
587 if (Res
&& (MCII
->get(MI
.getOpcode()).TSFlags
& SIInstrFlags::MIMG
)) {
589 AMDGPU::getNamedOperandIdx(MI
.getOpcode(), AMDGPU::OpName::vaddr0
);
591 AMDGPU::getNamedOperandIdx(MI
.getOpcode(), AMDGPU::OpName::srsrc
);
592 unsigned NSAArgs
= RsrcIdx
- VAddr0Idx
- 1;
593 if (VAddr0Idx
>= 0 && NSAArgs
> 0) {
594 unsigned NSAWords
= (NSAArgs
+ 3) / 4;
595 if (Bytes
.size() < 4 * NSAWords
) {
596 Res
= MCDisassembler::Fail
;
598 for (unsigned i
= 0; i
< NSAArgs
; ++i
) {
599 MI
.insert(MI
.begin() + VAddr0Idx
+ 1 + i
,
600 decodeOperand_VGPR_32(Bytes
[i
]));
602 Bytes
= Bytes
.slice(4 * NSAWords
);
607 Res
= convertMIMGInst(MI
);
611 Res
= convertSDWAInst(MI
);
613 int VDstIn_Idx
= AMDGPU::getNamedOperandIdx(MI
.getOpcode(),
614 AMDGPU::OpName::vdst_in
);
615 if (VDstIn_Idx
!= -1) {
616 int Tied
= MCII
->get(MI
.getOpcode()).getOperandConstraint(VDstIn_Idx
,
617 MCOI::OperandConstraint::TIED_TO
);
618 if (Tied
!= -1 && (MI
.getNumOperands() <= (unsigned)VDstIn_Idx
||
619 !MI
.getOperand(VDstIn_Idx
).isReg() ||
620 MI
.getOperand(VDstIn_Idx
).getReg() != MI
.getOperand(Tied
).getReg())) {
621 if (MI
.getNumOperands() > (unsigned)VDstIn_Idx
)
622 MI
.erase(&MI
.getOperand(VDstIn_Idx
));
623 insertNamedMCOperand(MI
,
624 MCOperand::createReg(MI
.getOperand(Tied
).getReg()),
625 AMDGPU::OpName::vdst_in
);
629 // if the opcode was not recognized we'll assume a Size of 4 bytes
630 // (unless there are fewer bytes left)
631 Size
= Res
? (MaxInstBytesNum
- Bytes
.size())
632 : std::min((size_t)4, Bytes_
.size());
636 DecodeStatus
AMDGPUDisassembler::convertSDWAInst(MCInst
&MI
) const {
637 if (STI
.getFeatureBits()[AMDGPU::FeatureGFX9
] ||
638 STI
.getFeatureBits()[AMDGPU::FeatureGFX10
]) {
639 if (AMDGPU::getNamedOperandIdx(MI
.getOpcode(), AMDGPU::OpName::sdst
) != -1)
640 // VOPC - insert clamp
641 insertNamedMCOperand(MI
, MCOperand::createImm(0), AMDGPU::OpName::clamp
);
642 } else if (STI
.getFeatureBits()[AMDGPU::FeatureVolcanicIslands
]) {
643 int SDst
= AMDGPU::getNamedOperandIdx(MI
.getOpcode(), AMDGPU::OpName::sdst
);
645 // VOPC - insert VCC register as sdst
646 insertNamedMCOperand(MI
, createRegOperand(AMDGPU::VCC
),
647 AMDGPU::OpName::sdst
);
649 // VOP1/2 - insert omod if present in instruction
650 insertNamedMCOperand(MI
, MCOperand::createImm(0), AMDGPU::OpName::omod
);
653 return MCDisassembler::Success
;
656 // We must check FI == literal to reject not genuine dpp8 insts, and we must
657 // first add optional MI operands to check FI
658 DecodeStatus
AMDGPUDisassembler::convertDPP8Inst(MCInst
&MI
) const {
659 unsigned Opc
= MI
.getOpcode();
660 unsigned DescNumOps
= MCII
->get(Opc
).getNumOperands();
662 // Insert dummy unused src modifiers.
663 if (MI
.getNumOperands() < DescNumOps
&&
664 AMDGPU::getNamedOperandIdx(Opc
, AMDGPU::OpName::src0_modifiers
) != -1)
665 insertNamedMCOperand(MI
, MCOperand::createImm(0),
666 AMDGPU::OpName::src0_modifiers
);
668 if (MI
.getNumOperands() < DescNumOps
&&
669 AMDGPU::getNamedOperandIdx(Opc
, AMDGPU::OpName::src1_modifiers
) != -1)
670 insertNamedMCOperand(MI
, MCOperand::createImm(0),
671 AMDGPU::OpName::src1_modifiers
);
673 return isValidDPP8(MI
) ? MCDisassembler::Success
: MCDisassembler::SoftFail
;
676 // Note that before gfx10, the MIMG encoding provided no information about
677 // VADDR size. Consequently, decoded instructions always show address as if it
678 // has 1 dword, which could be not really so.
679 DecodeStatus
AMDGPUDisassembler::convertMIMGInst(MCInst
&MI
) const {
681 int VDstIdx
= AMDGPU::getNamedOperandIdx(MI
.getOpcode(),
682 AMDGPU::OpName::vdst
);
684 int VDataIdx
= AMDGPU::getNamedOperandIdx(MI
.getOpcode(),
685 AMDGPU::OpName::vdata
);
687 AMDGPU::getNamedOperandIdx(MI
.getOpcode(), AMDGPU::OpName::vaddr0
);
688 int DMaskIdx
= AMDGPU::getNamedOperandIdx(MI
.getOpcode(),
689 AMDGPU::OpName::dmask
);
691 int TFEIdx
= AMDGPU::getNamedOperandIdx(MI
.getOpcode(),
692 AMDGPU::OpName::tfe
);
693 int D16Idx
= AMDGPU::getNamedOperandIdx(MI
.getOpcode(),
694 AMDGPU::OpName::d16
);
696 const AMDGPU::MIMGInfo
*Info
= AMDGPU::getMIMGInfo(MI
.getOpcode());
697 const AMDGPU::MIMGBaseOpcodeInfo
*BaseOpcode
=
698 AMDGPU::getMIMGBaseOpcodeInfo(Info
->BaseOpcode
);
700 assert(VDataIdx
!= -1);
701 if (BaseOpcode
->BVH
) {
702 // Add A16 operand for intersect_ray instructions
703 if (AMDGPU::getNamedOperandIdx(MI
.getOpcode(), AMDGPU::OpName::a16
) > -1) {
704 addOperand(MI
, MCOperand::createImm(1));
706 return MCDisassembler::Success
;
709 bool IsAtomic
= (VDstIdx
!= -1);
710 bool IsGather4
= MCII
->get(MI
.getOpcode()).TSFlags
& SIInstrFlags::Gather4
;
712 unsigned AddrSize
= Info
->VAddrDwords
;
714 if (STI
.getFeatureBits()[AMDGPU::FeatureGFX10
]) {
716 AMDGPU::getNamedOperandIdx(MI
.getOpcode(), AMDGPU::OpName::dim
);
718 AMDGPU::getNamedOperandIdx(MI
.getOpcode(), AMDGPU::OpName::a16
);
719 const AMDGPU::MIMGDimInfo
*Dim
=
720 AMDGPU::getMIMGDimInfoByEncoding(MI
.getOperand(DimIdx
).getImm());
721 const bool IsA16
= (A16Idx
!= -1 && MI
.getOperand(A16Idx
).getImm());
724 AMDGPU::getAddrSizeMIMGOp(BaseOpcode
, Dim
, IsA16
, AMDGPU::hasG16(STI
));
726 IsNSA
= Info
->MIMGEncoding
== AMDGPU::MIMGEncGfx10NSA
;
731 if (AddrSize
> Info
->VAddrDwords
) {
732 // The NSA encoding does not contain enough operands for the combination
733 // of base opcode / dimension. Should this be an error?
734 return MCDisassembler::Success
;
739 unsigned DMask
= MI
.getOperand(DMaskIdx
).getImm() & 0xf;
740 unsigned DstSize
= IsGather4
? 4 : std::max(countPopulation(DMask
), 1u);
742 bool D16
= D16Idx
>= 0 && MI
.getOperand(D16Idx
).getImm();
743 if (D16
&& AMDGPU::hasPackedD16(STI
)) {
744 DstSize
= (DstSize
+ 1) / 2;
747 if (TFEIdx
!= -1 && MI
.getOperand(TFEIdx
).getImm())
750 if (DstSize
== Info
->VDataDwords
&& AddrSize
== Info
->VAddrDwords
)
751 return MCDisassembler::Success
;
754 AMDGPU::getMIMGOpcode(Info
->BaseOpcode
, Info
->MIMGEncoding
, DstSize
, AddrSize
);
756 return MCDisassembler::Success
;
758 // Widen the register to the correct number of enabled channels.
759 unsigned NewVdata
= AMDGPU::NoRegister
;
760 if (DstSize
!= Info
->VDataDwords
) {
761 auto DataRCID
= MCII
->get(NewOpcode
).OpInfo
[VDataIdx
].RegClass
;
763 // Get first subregister of VData
764 unsigned Vdata0
= MI
.getOperand(VDataIdx
).getReg();
765 unsigned VdataSub0
= MRI
.getSubReg(Vdata0
, AMDGPU::sub0
);
766 Vdata0
= (VdataSub0
!= 0)? VdataSub0
: Vdata0
;
768 NewVdata
= MRI
.getMatchingSuperReg(Vdata0
, AMDGPU::sub0
,
769 &MRI
.getRegClass(DataRCID
));
770 if (NewVdata
== AMDGPU::NoRegister
) {
771 // It's possible to encode this such that the low register + enabled
772 // components exceeds the register count.
773 return MCDisassembler::Success
;
777 unsigned NewVAddr0
= AMDGPU::NoRegister
;
778 if (STI
.getFeatureBits()[AMDGPU::FeatureGFX10
] && !IsNSA
&&
779 AddrSize
!= Info
->VAddrDwords
) {
780 unsigned VAddr0
= MI
.getOperand(VAddr0Idx
).getReg();
781 unsigned VAddrSub0
= MRI
.getSubReg(VAddr0
, AMDGPU::sub0
);
782 VAddr0
= (VAddrSub0
!= 0) ? VAddrSub0
: VAddr0
;
784 auto AddrRCID
= MCII
->get(NewOpcode
).OpInfo
[VAddr0Idx
].RegClass
;
785 NewVAddr0
= MRI
.getMatchingSuperReg(VAddr0
, AMDGPU::sub0
,
786 &MRI
.getRegClass(AddrRCID
));
787 if (NewVAddr0
== AMDGPU::NoRegister
)
788 return MCDisassembler::Success
;
791 MI
.setOpcode(NewOpcode
);
793 if (NewVdata
!= AMDGPU::NoRegister
) {
794 MI
.getOperand(VDataIdx
) = MCOperand::createReg(NewVdata
);
797 // Atomic operations have an additional operand (a copy of data)
798 MI
.getOperand(VDstIdx
) = MCOperand::createReg(NewVdata
);
802 if (NewVAddr0
!= AMDGPU::NoRegister
) {
803 MI
.getOperand(VAddr0Idx
) = MCOperand::createReg(NewVAddr0
);
805 assert(AddrSize
<= Info
->VAddrDwords
);
806 MI
.erase(MI
.begin() + VAddr0Idx
+ AddrSize
,
807 MI
.begin() + VAddr0Idx
+ Info
->VAddrDwords
);
810 return MCDisassembler::Success
;
813 const char* AMDGPUDisassembler::getRegClassName(unsigned RegClassID
) const {
814 return getContext().getRegisterInfo()->
815 getRegClassName(&AMDGPUMCRegisterClasses
[RegClassID
]);
819 MCOperand
AMDGPUDisassembler::errOperand(unsigned V
,
820 const Twine
& ErrMsg
) const {
821 *CommentStream
<< "Error: " + ErrMsg
;
823 // ToDo: add support for error operands to MCInst.h
824 // return MCOperand::createError(V);
829 MCOperand
AMDGPUDisassembler::createRegOperand(unsigned int RegId
) const {
830 return MCOperand::createReg(AMDGPU::getMCReg(RegId
, STI
));
834 MCOperand
AMDGPUDisassembler::createRegOperand(unsigned RegClassID
,
835 unsigned Val
) const {
836 const auto& RegCl
= AMDGPUMCRegisterClasses
[RegClassID
];
837 if (Val
>= RegCl
.getNumRegs())
838 return errOperand(Val
, Twine(getRegClassName(RegClassID
)) +
839 ": unknown register " + Twine(Val
));
840 return createRegOperand(RegCl
.getRegister(Val
));
844 MCOperand
AMDGPUDisassembler::createSRegOperand(unsigned SRegClassID
,
845 unsigned Val
) const {
846 // ToDo: SI/CI have 104 SGPRs, VI - 102
847 // Valery: here we accepting as much as we can, let assembler sort it out
849 switch (SRegClassID
) {
850 case AMDGPU::SGPR_32RegClassID
:
851 case AMDGPU::TTMP_32RegClassID
:
853 case AMDGPU::SGPR_64RegClassID
:
854 case AMDGPU::TTMP_64RegClassID
:
857 case AMDGPU::SGPR_128RegClassID
:
858 case AMDGPU::TTMP_128RegClassID
:
859 // ToDo: unclear if s[100:104] is available on VI. Can we use VCC as SGPR in
861 case AMDGPU::SGPR_256RegClassID
:
862 case AMDGPU::TTMP_256RegClassID
:
863 // ToDo: unclear if s[96:104] is available on VI. Can we use VCC as SGPR in
865 case AMDGPU::SGPR_512RegClassID
:
866 case AMDGPU::TTMP_512RegClassID
:
869 // ToDo: unclear if s[88:104] is available on VI. Can we use VCC as SGPR in
872 llvm_unreachable("unhandled register class");
875 if (Val
% (1 << shift
)) {
876 *CommentStream
<< "Warning: " << getRegClassName(SRegClassID
)
877 << ": scalar reg isn't aligned " << Val
;
880 return createRegOperand(SRegClassID
, Val
>> shift
);
883 MCOperand
AMDGPUDisassembler::decodeOperand_VS_32(unsigned Val
) const {
884 return decodeSrcOp(OPW32
, Val
);
887 MCOperand
AMDGPUDisassembler::decodeOperand_VS_64(unsigned Val
) const {
888 return decodeSrcOp(OPW64
, Val
);
891 MCOperand
AMDGPUDisassembler::decodeOperand_VS_128(unsigned Val
) const {
892 return decodeSrcOp(OPW128
, Val
);
895 MCOperand
AMDGPUDisassembler::decodeOperand_VSrc16(unsigned Val
) const {
896 return decodeSrcOp(OPW16
, Val
);
899 MCOperand
AMDGPUDisassembler::decodeOperand_VSrcV216(unsigned Val
) const {
900 return decodeSrcOp(OPWV216
, Val
);
903 MCOperand
AMDGPUDisassembler::decodeOperand_VSrcV232(unsigned Val
) const {
904 return decodeSrcOp(OPWV232
, Val
);
907 MCOperand
AMDGPUDisassembler::decodeOperand_VGPR_32(unsigned Val
) const {
908 // Some instructions have operand restrictions beyond what the encoding
909 // allows. Some ordinarily VSrc_32 operands are VGPR_32, so clear the extra
913 return createRegOperand(AMDGPU::VGPR_32RegClassID
, Val
);
916 MCOperand
AMDGPUDisassembler::decodeOperand_VRegOrLds_32(unsigned Val
) const {
917 return decodeSrcOp(OPW32
, Val
);
920 MCOperand
AMDGPUDisassembler::decodeOperand_AGPR_32(unsigned Val
) const {
921 return createRegOperand(AMDGPU::AGPR_32RegClassID
, Val
& 255);
924 MCOperand
AMDGPUDisassembler::decodeOperand_AReg_64(unsigned Val
) const {
925 return createRegOperand(AMDGPU::AReg_64RegClassID
, Val
& 255);
928 MCOperand
AMDGPUDisassembler::decodeOperand_AReg_128(unsigned Val
) const {
929 return createRegOperand(AMDGPU::AReg_128RegClassID
, Val
& 255);
932 MCOperand
AMDGPUDisassembler::decodeOperand_AReg_256(unsigned Val
) const {
933 return createRegOperand(AMDGPU::AReg_256RegClassID
, Val
& 255);
936 MCOperand
AMDGPUDisassembler::decodeOperand_AReg_512(unsigned Val
) const {
937 return createRegOperand(AMDGPU::AReg_512RegClassID
, Val
& 255);
940 MCOperand
AMDGPUDisassembler::decodeOperand_AReg_1024(unsigned Val
) const {
941 return createRegOperand(AMDGPU::AReg_1024RegClassID
, Val
& 255);
944 MCOperand
AMDGPUDisassembler::decodeOperand_AV_32(unsigned Val
) const {
945 return decodeSrcOp(OPW32
, Val
);
948 MCOperand
AMDGPUDisassembler::decodeOperand_AV_64(unsigned Val
) const {
949 return decodeSrcOp(OPW64
, Val
);
952 MCOperand
AMDGPUDisassembler::decodeOperand_VReg_64(unsigned Val
) const {
953 return createRegOperand(AMDGPU::VReg_64RegClassID
, Val
);
956 MCOperand
AMDGPUDisassembler::decodeOperand_VReg_96(unsigned Val
) const {
957 return createRegOperand(AMDGPU::VReg_96RegClassID
, Val
);
960 MCOperand
AMDGPUDisassembler::decodeOperand_VReg_128(unsigned Val
) const {
961 return createRegOperand(AMDGPU::VReg_128RegClassID
, Val
);
964 MCOperand
AMDGPUDisassembler::decodeOperand_VReg_256(unsigned Val
) const {
965 return createRegOperand(AMDGPU::VReg_256RegClassID
, Val
);
968 MCOperand
AMDGPUDisassembler::decodeOperand_VReg_512(unsigned Val
) const {
969 return createRegOperand(AMDGPU::VReg_512RegClassID
, Val
);
972 MCOperand
AMDGPUDisassembler::decodeOperand_VReg_1024(unsigned Val
) const {
973 return createRegOperand(AMDGPU::VReg_1024RegClassID
, Val
);
976 MCOperand
AMDGPUDisassembler::decodeOperand_SReg_32(unsigned Val
) const {
977 // table-gen generated disassembler doesn't care about operand types
978 // leaving only registry class so SSrc_32 operand turns into SReg_32
979 // and therefore we accept immediates and literals here as well
980 return decodeSrcOp(OPW32
, Val
);
983 MCOperand
AMDGPUDisassembler::decodeOperand_SReg_32_XM0_XEXEC(
984 unsigned Val
) const {
985 // SReg_32_XM0 is SReg_32 without M0 or EXEC_LO/EXEC_HI
986 return decodeOperand_SReg_32(Val
);
989 MCOperand
AMDGPUDisassembler::decodeOperand_SReg_32_XEXEC_HI(
990 unsigned Val
) const {
991 // SReg_32_XM0 is SReg_32 without EXEC_HI
992 return decodeOperand_SReg_32(Val
);
995 MCOperand
AMDGPUDisassembler::decodeOperand_SRegOrLds_32(unsigned Val
) const {
996 // table-gen generated disassembler doesn't care about operand types
997 // leaving only registry class so SSrc_32 operand turns into SReg_32
998 // and therefore we accept immediates and literals here as well
999 return decodeSrcOp(OPW32
, Val
);
1002 MCOperand
AMDGPUDisassembler::decodeOperand_SReg_64(unsigned Val
) const {
1003 return decodeSrcOp(OPW64
, Val
);
1006 MCOperand
AMDGPUDisassembler::decodeOperand_SReg_64_XEXEC(unsigned Val
) const {
1007 return decodeSrcOp(OPW64
, Val
);
1010 MCOperand
AMDGPUDisassembler::decodeOperand_SReg_128(unsigned Val
) const {
1011 return decodeSrcOp(OPW128
, Val
);
1014 MCOperand
AMDGPUDisassembler::decodeOperand_SReg_256(unsigned Val
) const {
1015 return decodeDstOp(OPW256
, Val
);
1018 MCOperand
AMDGPUDisassembler::decodeOperand_SReg_512(unsigned Val
) const {
1019 return decodeDstOp(OPW512
, Val
);
1022 MCOperand
AMDGPUDisassembler::decodeLiteralConstant() const {
1023 // For now all literal constants are supposed to be unsigned integer
1024 // ToDo: deal with signed/unsigned 64-bit integer constants
1025 // ToDo: deal with float/double constants
1027 if (Bytes
.size() < 4) {
1028 return errOperand(0, "cannot read literal, inst bytes left " +
1029 Twine(Bytes
.size()));
1032 Literal
= eatBytes
<uint32_t>(Bytes
);
1034 return MCOperand::createImm(Literal
);
1037 MCOperand
AMDGPUDisassembler::decodeIntImmed(unsigned Imm
) {
1038 using namespace AMDGPU::EncValues
;
1040 assert(Imm
>= INLINE_INTEGER_C_MIN
&& Imm
<= INLINE_INTEGER_C_MAX
);
1041 return MCOperand::createImm((Imm
<= INLINE_INTEGER_C_POSITIVE_MAX
) ?
1042 (static_cast<int64_t>(Imm
) - INLINE_INTEGER_C_MIN
) :
1043 (INLINE_INTEGER_C_POSITIVE_MAX
- static_cast<int64_t>(Imm
)));
1044 // Cast prevents negative overflow.
1047 static int64_t getInlineImmVal32(unsigned Imm
) {
1050 return FloatToBits(0.5f
);
1052 return FloatToBits(-0.5f
);
1054 return FloatToBits(1.0f
);
1056 return FloatToBits(-1.0f
);
1058 return FloatToBits(2.0f
);
1060 return FloatToBits(-2.0f
);
1062 return FloatToBits(4.0f
);
1064 return FloatToBits(-4.0f
);
1065 case 248: // 1 / (2 * PI)
1068 llvm_unreachable("invalid fp inline imm");
1072 static int64_t getInlineImmVal64(unsigned Imm
) {
1075 return DoubleToBits(0.5);
1077 return DoubleToBits(-0.5);
1079 return DoubleToBits(1.0);
1081 return DoubleToBits(-1.0);
1083 return DoubleToBits(2.0);
1085 return DoubleToBits(-2.0);
1087 return DoubleToBits(4.0);
1089 return DoubleToBits(-4.0);
1090 case 248: // 1 / (2 * PI)
1091 return 0x3fc45f306dc9c882;
1093 llvm_unreachable("invalid fp inline imm");
1097 static int64_t getInlineImmVal16(unsigned Imm
) {
1115 case 248: // 1 / (2 * PI)
1118 llvm_unreachable("invalid fp inline imm");
1122 MCOperand
AMDGPUDisassembler::decodeFPImmed(OpWidthTy Width
, unsigned Imm
) {
1123 assert(Imm
>= AMDGPU::EncValues::INLINE_FLOATING_C_MIN
1124 && Imm
<= AMDGPU::EncValues::INLINE_FLOATING_C_MAX
);
1126 // ToDo: case 248: 1/(2*PI) - is allowed only on VI
1129 case OPW128
: // splat constants
1133 return MCOperand::createImm(getInlineImmVal32(Imm
));
1136 return MCOperand::createImm(getInlineImmVal64(Imm
));
1139 return MCOperand::createImm(getInlineImmVal16(Imm
));
1141 llvm_unreachable("implement me");
1145 unsigned AMDGPUDisassembler::getVgprClassId(const OpWidthTy Width
) const {
1146 using namespace AMDGPU
;
1148 assert(OPW_FIRST_
<= Width
&& Width
< OPW_LAST_
);
1154 return VGPR_32RegClassID
;
1156 case OPWV232
: return VReg_64RegClassID
;
1157 case OPW96
: return VReg_96RegClassID
;
1158 case OPW128
: return VReg_128RegClassID
;
1159 case OPW160
: return VReg_160RegClassID
;
1160 case OPW256
: return VReg_256RegClassID
;
1161 case OPW512
: return VReg_512RegClassID
;
1162 case OPW1024
: return VReg_1024RegClassID
;
1166 unsigned AMDGPUDisassembler::getAgprClassId(const OpWidthTy Width
) const {
1167 using namespace AMDGPU
;
1169 assert(OPW_FIRST_
<= Width
&& Width
< OPW_LAST_
);
1175 return AGPR_32RegClassID
;
1177 case OPWV232
: return AReg_64RegClassID
;
1178 case OPW96
: return AReg_96RegClassID
;
1179 case OPW128
: return AReg_128RegClassID
;
1180 case OPW160
: return AReg_160RegClassID
;
1181 case OPW256
: return AReg_256RegClassID
;
1182 case OPW512
: return AReg_512RegClassID
;
1183 case OPW1024
: return AReg_1024RegClassID
;
1188 unsigned AMDGPUDisassembler::getSgprClassId(const OpWidthTy Width
) const {
1189 using namespace AMDGPU
;
1191 assert(OPW_FIRST_
<= Width
&& Width
< OPW_LAST_
);
1197 return SGPR_32RegClassID
;
1199 case OPWV232
: return SGPR_64RegClassID
;
1200 case OPW96
: return SGPR_96RegClassID
;
1201 case OPW128
: return SGPR_128RegClassID
;
1202 case OPW160
: return SGPR_160RegClassID
;
1203 case OPW256
: return SGPR_256RegClassID
;
1204 case OPW512
: return SGPR_512RegClassID
;
1208 unsigned AMDGPUDisassembler::getTtmpClassId(const OpWidthTy Width
) const {
1209 using namespace AMDGPU
;
1211 assert(OPW_FIRST_
<= Width
&& Width
< OPW_LAST_
);
1217 return TTMP_32RegClassID
;
1219 case OPWV232
: return TTMP_64RegClassID
;
1220 case OPW128
: return TTMP_128RegClassID
;
1221 case OPW256
: return TTMP_256RegClassID
;
1222 case OPW512
: return TTMP_512RegClassID
;
1226 int AMDGPUDisassembler::getTTmpIdx(unsigned Val
) const {
1227 using namespace AMDGPU::EncValues
;
1229 unsigned TTmpMin
= isGFX9Plus() ? TTMP_GFX9PLUS_MIN
: TTMP_VI_MIN
;
1230 unsigned TTmpMax
= isGFX9Plus() ? TTMP_GFX9PLUS_MAX
: TTMP_VI_MAX
;
1232 return (TTmpMin
<= Val
&& Val
<= TTmpMax
)? Val
- TTmpMin
: -1;
1235 MCOperand
AMDGPUDisassembler::decodeSrcOp(const OpWidthTy Width
, unsigned Val
) const {
1236 using namespace AMDGPU::EncValues
;
1238 assert(Val
< 1024); // enum10
1240 bool IsAGPR
= Val
& 512;
1243 if (VGPR_MIN
<= Val
&& Val
<= VGPR_MAX
) {
1244 return createRegOperand(IsAGPR
? getAgprClassId(Width
)
1245 : getVgprClassId(Width
), Val
- VGPR_MIN
);
1247 if (Val
<= SGPR_MAX
) {
1248 // "SGPR_MIN <= Val" is always true and causes compilation warning.
1249 static_assert(SGPR_MIN
== 0, "");
1250 return createSRegOperand(getSgprClassId(Width
), Val
- SGPR_MIN
);
1253 int TTmpIdx
= getTTmpIdx(Val
);
1255 return createSRegOperand(getTtmpClassId(Width
), TTmpIdx
);
1258 if (INLINE_INTEGER_C_MIN
<= Val
&& Val
<= INLINE_INTEGER_C_MAX
)
1259 return decodeIntImmed(Val
);
1261 if (INLINE_FLOATING_C_MIN
<= Val
&& Val
<= INLINE_FLOATING_C_MAX
)
1262 return decodeFPImmed(Width
, Val
);
1264 if (Val
== LITERAL_CONST
)
1265 return decodeLiteralConstant();
1271 return decodeSpecialReg32(Val
);
1274 return decodeSpecialReg64(Val
);
1276 llvm_unreachable("unexpected immediate type");
1280 MCOperand
AMDGPUDisassembler::decodeDstOp(const OpWidthTy Width
, unsigned Val
) const {
1281 using namespace AMDGPU::EncValues
;
1284 assert(Width
== OPW256
|| Width
== OPW512
);
1286 if (Val
<= SGPR_MAX
) {
1287 // "SGPR_MIN <= Val" is always true and causes compilation warning.
1288 static_assert(SGPR_MIN
== 0, "");
1289 return createSRegOperand(getSgprClassId(Width
), Val
- SGPR_MIN
);
1292 int TTmpIdx
= getTTmpIdx(Val
);
1294 return createSRegOperand(getTtmpClassId(Width
), TTmpIdx
);
1297 llvm_unreachable("unknown dst register");
1300 MCOperand
AMDGPUDisassembler::decodeSpecialReg32(unsigned Val
) const {
1301 using namespace AMDGPU
;
1304 case 102: return createRegOperand(FLAT_SCR_LO
);
1305 case 103: return createRegOperand(FLAT_SCR_HI
);
1306 case 104: return createRegOperand(XNACK_MASK_LO
);
1307 case 105: return createRegOperand(XNACK_MASK_HI
);
1308 case 106: return createRegOperand(VCC_LO
);
1309 case 107: return createRegOperand(VCC_HI
);
1310 case 108: return createRegOperand(TBA_LO
);
1311 case 109: return createRegOperand(TBA_HI
);
1312 case 110: return createRegOperand(TMA_LO
);
1313 case 111: return createRegOperand(TMA_HI
);
1314 case 124: return createRegOperand(M0
);
1315 case 125: return createRegOperand(SGPR_NULL
);
1316 case 126: return createRegOperand(EXEC_LO
);
1317 case 127: return createRegOperand(EXEC_HI
);
1318 case 235: return createRegOperand(SRC_SHARED_BASE
);
1319 case 236: return createRegOperand(SRC_SHARED_LIMIT
);
1320 case 237: return createRegOperand(SRC_PRIVATE_BASE
);
1321 case 238: return createRegOperand(SRC_PRIVATE_LIMIT
);
1322 case 239: return createRegOperand(SRC_POPS_EXITING_WAVE_ID
);
1323 case 251: return createRegOperand(SRC_VCCZ
);
1324 case 252: return createRegOperand(SRC_EXECZ
);
1325 case 253: return createRegOperand(SRC_SCC
);
1326 case 254: return createRegOperand(LDS_DIRECT
);
1329 return errOperand(Val
, "unknown operand encoding " + Twine(Val
));
1332 MCOperand
AMDGPUDisassembler::decodeSpecialReg64(unsigned Val
) const {
1333 using namespace AMDGPU
;
1336 case 102: return createRegOperand(FLAT_SCR
);
1337 case 104: return createRegOperand(XNACK_MASK
);
1338 case 106: return createRegOperand(VCC
);
1339 case 108: return createRegOperand(TBA
);
1340 case 110: return createRegOperand(TMA
);
1341 case 125: return createRegOperand(SGPR_NULL
);
1342 case 126: return createRegOperand(EXEC
);
1343 case 235: return createRegOperand(SRC_SHARED_BASE
);
1344 case 236: return createRegOperand(SRC_SHARED_LIMIT
);
1345 case 237: return createRegOperand(SRC_PRIVATE_BASE
);
1346 case 238: return createRegOperand(SRC_PRIVATE_LIMIT
);
1347 case 239: return createRegOperand(SRC_POPS_EXITING_WAVE_ID
);
1348 case 251: return createRegOperand(SRC_VCCZ
);
1349 case 252: return createRegOperand(SRC_EXECZ
);
1350 case 253: return createRegOperand(SRC_SCC
);
1353 return errOperand(Val
, "unknown operand encoding " + Twine(Val
));
1356 MCOperand
AMDGPUDisassembler::decodeSDWASrc(const OpWidthTy Width
,
1357 const unsigned Val
) const {
1358 using namespace AMDGPU::SDWA
;
1359 using namespace AMDGPU::EncValues
;
1361 if (STI
.getFeatureBits()[AMDGPU::FeatureGFX9
] ||
1362 STI
.getFeatureBits()[AMDGPU::FeatureGFX10
]) {
1363 // XXX: cast to int is needed to avoid stupid warning:
1364 // compare with unsigned is always true
1365 if (int(SDWA9EncValues::SRC_VGPR_MIN
) <= int(Val
) &&
1366 Val
<= SDWA9EncValues::SRC_VGPR_MAX
) {
1367 return createRegOperand(getVgprClassId(Width
),
1368 Val
- SDWA9EncValues::SRC_VGPR_MIN
);
1370 if (SDWA9EncValues::SRC_SGPR_MIN
<= Val
&&
1371 Val
<= (isGFX10Plus() ? SDWA9EncValues::SRC_SGPR_MAX_GFX10
1372 : SDWA9EncValues::SRC_SGPR_MAX_SI
)) {
1373 return createSRegOperand(getSgprClassId(Width
),
1374 Val
- SDWA9EncValues::SRC_SGPR_MIN
);
1376 if (SDWA9EncValues::SRC_TTMP_MIN
<= Val
&&
1377 Val
<= SDWA9EncValues::SRC_TTMP_MAX
) {
1378 return createSRegOperand(getTtmpClassId(Width
),
1379 Val
- SDWA9EncValues::SRC_TTMP_MIN
);
1382 const unsigned SVal
= Val
- SDWA9EncValues::SRC_SGPR_MIN
;
1384 if (INLINE_INTEGER_C_MIN
<= SVal
&& SVal
<= INLINE_INTEGER_C_MAX
)
1385 return decodeIntImmed(SVal
);
1387 if (INLINE_FLOATING_C_MIN
<= SVal
&& SVal
<= INLINE_FLOATING_C_MAX
)
1388 return decodeFPImmed(Width
, SVal
);
1390 return decodeSpecialReg32(SVal
);
1391 } else if (STI
.getFeatureBits()[AMDGPU::FeatureVolcanicIslands
]) {
1392 return createRegOperand(getVgprClassId(Width
), Val
);
1394 llvm_unreachable("unsupported target");
1397 MCOperand
AMDGPUDisassembler::decodeSDWASrc16(unsigned Val
) const {
1398 return decodeSDWASrc(OPW16
, Val
);
1401 MCOperand
AMDGPUDisassembler::decodeSDWASrc32(unsigned Val
) const {
1402 return decodeSDWASrc(OPW32
, Val
);
1405 MCOperand
AMDGPUDisassembler::decodeSDWAVopcDst(unsigned Val
) const {
1406 using namespace AMDGPU::SDWA
;
1408 assert((STI
.getFeatureBits()[AMDGPU::FeatureGFX9
] ||
1409 STI
.getFeatureBits()[AMDGPU::FeatureGFX10
]) &&
1410 "SDWAVopcDst should be present only on GFX9+");
1412 bool IsWave64
= STI
.getFeatureBits()[AMDGPU::FeatureWavefrontSize64
];
1414 if (Val
& SDWA9EncValues::VOPC_DST_VCC_MASK
) {
1415 Val
&= SDWA9EncValues::VOPC_DST_SGPR_MASK
;
1417 int TTmpIdx
= getTTmpIdx(Val
);
1419 auto TTmpClsId
= getTtmpClassId(IsWave64
? OPW64
: OPW32
);
1420 return createSRegOperand(TTmpClsId
, TTmpIdx
);
1421 } else if (Val
> SGPR_MAX
) {
1422 return IsWave64
? decodeSpecialReg64(Val
)
1423 : decodeSpecialReg32(Val
);
1425 return createSRegOperand(getSgprClassId(IsWave64
? OPW64
: OPW32
), Val
);
1428 return createRegOperand(IsWave64
? AMDGPU::VCC
: AMDGPU::VCC_LO
);
1432 MCOperand
AMDGPUDisassembler::decodeBoolReg(unsigned Val
) const {
1433 return STI
.getFeatureBits()[AMDGPU::FeatureWavefrontSize64
] ?
1434 decodeOperand_SReg_64(Val
) : decodeOperand_SReg_32(Val
);
1437 bool AMDGPUDisassembler::isVI() const {
1438 return STI
.getFeatureBits()[AMDGPU::FeatureVolcanicIslands
];
1441 bool AMDGPUDisassembler::isGFX9() const { return AMDGPU::isGFX9(STI
); }
1443 bool AMDGPUDisassembler::isGFX90A() const {
1444 return STI
.getFeatureBits()[AMDGPU::FeatureGFX90AInsts
];
1447 bool AMDGPUDisassembler::isGFX9Plus() const { return AMDGPU::isGFX9Plus(STI
); }
1449 bool AMDGPUDisassembler::isGFX10() const { return AMDGPU::isGFX10(STI
); }
1451 bool AMDGPUDisassembler::isGFX10Plus() const {
1452 return AMDGPU::isGFX10Plus(STI
);
1455 bool AMDGPUDisassembler::hasArchitectedFlatScratch() const {
1456 return STI
.getFeatureBits()[AMDGPU::FeatureArchitectedFlatScratch
];
1459 //===----------------------------------------------------------------------===//
1460 // AMDGPU specific symbol handling
1461 //===----------------------------------------------------------------------===//
1462 #define PRINT_DIRECTIVE(DIRECTIVE, MASK) \
1464 KdStream << Indent << DIRECTIVE " " \
1465 << ((FourByteBuffer & MASK) >> (MASK##_SHIFT)) << '\n'; \
1468 // NOLINTNEXTLINE(readability-identifier-naming)
1469 MCDisassembler::DecodeStatus
AMDGPUDisassembler::decodeCOMPUTE_PGM_RSRC1(
1470 uint32_t FourByteBuffer
, raw_string_ostream
&KdStream
) const {
1471 using namespace amdhsa
;
1472 StringRef Indent
= "\t";
1474 // We cannot accurately backward compute #VGPRs used from
1475 // GRANULATED_WORKITEM_VGPR_COUNT. But we are concerned with getting the same
1476 // value of GRANULATED_WORKITEM_VGPR_COUNT in the reassembled binary. So we
1477 // simply calculate the inverse of what the assembler does.
1479 uint32_t GranulatedWorkitemVGPRCount
=
1480 (FourByteBuffer
& COMPUTE_PGM_RSRC1_GRANULATED_WORKITEM_VGPR_COUNT
) >>
1481 COMPUTE_PGM_RSRC1_GRANULATED_WORKITEM_VGPR_COUNT_SHIFT
;
1483 uint32_t NextFreeVGPR
= (GranulatedWorkitemVGPRCount
+ 1) *
1484 AMDGPU::IsaInfo::getVGPREncodingGranule(&STI
);
1486 KdStream
<< Indent
<< ".amdhsa_next_free_vgpr " << NextFreeVGPR
<< '\n';
1488 // We cannot backward compute values used to calculate
1489 // GRANULATED_WAVEFRONT_SGPR_COUNT. Hence the original values for following
1490 // directives can't be computed:
1491 // .amdhsa_reserve_vcc
1492 // .amdhsa_reserve_flat_scratch
1493 // .amdhsa_reserve_xnack_mask
1494 // They take their respective default values if not specified in the assembly.
1496 // GRANULATED_WAVEFRONT_SGPR_COUNT
1497 // = f(NEXT_FREE_SGPR + VCC + FLAT_SCRATCH + XNACK_MASK)
1499 // We compute the inverse as though all directives apart from NEXT_FREE_SGPR
1500 // are set to 0. So while disassembling we consider that:
1502 // GRANULATED_WAVEFRONT_SGPR_COUNT
1503 // = f(NEXT_FREE_SGPR + 0 + 0 + 0)
1505 // The disassembler cannot recover the original values of those 3 directives.
1507 uint32_t GranulatedWavefrontSGPRCount
=
1508 (FourByteBuffer
& COMPUTE_PGM_RSRC1_GRANULATED_WAVEFRONT_SGPR_COUNT
) >>
1509 COMPUTE_PGM_RSRC1_GRANULATED_WAVEFRONT_SGPR_COUNT_SHIFT
;
1511 if (isGFX10Plus() && GranulatedWavefrontSGPRCount
)
1512 return MCDisassembler::Fail
;
1514 uint32_t NextFreeSGPR
= (GranulatedWavefrontSGPRCount
+ 1) *
1515 AMDGPU::IsaInfo::getSGPREncodingGranule(&STI
);
1517 KdStream
<< Indent
<< ".amdhsa_reserve_vcc " << 0 << '\n';
1518 if (!hasArchitectedFlatScratch())
1519 KdStream
<< Indent
<< ".amdhsa_reserve_flat_scratch " << 0 << '\n';
1520 KdStream
<< Indent
<< ".amdhsa_reserve_xnack_mask " << 0 << '\n';
1521 KdStream
<< Indent
<< ".amdhsa_next_free_sgpr " << NextFreeSGPR
<< "\n";
1523 if (FourByteBuffer
& COMPUTE_PGM_RSRC1_PRIORITY
)
1524 return MCDisassembler::Fail
;
1526 PRINT_DIRECTIVE(".amdhsa_float_round_mode_32",
1527 COMPUTE_PGM_RSRC1_FLOAT_ROUND_MODE_32
);
1528 PRINT_DIRECTIVE(".amdhsa_float_round_mode_16_64",
1529 COMPUTE_PGM_RSRC1_FLOAT_ROUND_MODE_16_64
);
1530 PRINT_DIRECTIVE(".amdhsa_float_denorm_mode_32",
1531 COMPUTE_PGM_RSRC1_FLOAT_DENORM_MODE_32
);
1532 PRINT_DIRECTIVE(".amdhsa_float_denorm_mode_16_64",
1533 COMPUTE_PGM_RSRC1_FLOAT_DENORM_MODE_16_64
);
1535 if (FourByteBuffer
& COMPUTE_PGM_RSRC1_PRIV
)
1536 return MCDisassembler::Fail
;
1538 PRINT_DIRECTIVE(".amdhsa_dx10_clamp", COMPUTE_PGM_RSRC1_ENABLE_DX10_CLAMP
);
1540 if (FourByteBuffer
& COMPUTE_PGM_RSRC1_DEBUG_MODE
)
1541 return MCDisassembler::Fail
;
1543 PRINT_DIRECTIVE(".amdhsa_ieee_mode", COMPUTE_PGM_RSRC1_ENABLE_IEEE_MODE
);
1545 if (FourByteBuffer
& COMPUTE_PGM_RSRC1_BULKY
)
1546 return MCDisassembler::Fail
;
1548 if (FourByteBuffer
& COMPUTE_PGM_RSRC1_CDBG_USER
)
1549 return MCDisassembler::Fail
;
1551 PRINT_DIRECTIVE(".amdhsa_fp16_overflow", COMPUTE_PGM_RSRC1_FP16_OVFL
);
1553 if (FourByteBuffer
& COMPUTE_PGM_RSRC1_RESERVED0
)
1554 return MCDisassembler::Fail
;
1556 if (isGFX10Plus()) {
1557 PRINT_DIRECTIVE(".amdhsa_workgroup_processor_mode",
1558 COMPUTE_PGM_RSRC1_WGP_MODE
);
1559 PRINT_DIRECTIVE(".amdhsa_memory_ordered", COMPUTE_PGM_RSRC1_MEM_ORDERED
);
1560 PRINT_DIRECTIVE(".amdhsa_forward_progress", COMPUTE_PGM_RSRC1_FWD_PROGRESS
);
1562 return MCDisassembler::Success
;
1565 // NOLINTNEXTLINE(readability-identifier-naming)
1566 MCDisassembler::DecodeStatus
AMDGPUDisassembler::decodeCOMPUTE_PGM_RSRC2(
1567 uint32_t FourByteBuffer
, raw_string_ostream
&KdStream
) const {
1568 using namespace amdhsa
;
1569 StringRef Indent
= "\t";
1570 if (hasArchitectedFlatScratch())
1571 PRINT_DIRECTIVE(".amdhsa_enable_private_segment",
1572 COMPUTE_PGM_RSRC2_ENABLE_PRIVATE_SEGMENT
);
1574 PRINT_DIRECTIVE(".amdhsa_system_sgpr_private_segment_wavefront_offset",
1575 COMPUTE_PGM_RSRC2_ENABLE_PRIVATE_SEGMENT
);
1576 PRINT_DIRECTIVE(".amdhsa_system_sgpr_workgroup_id_x",
1577 COMPUTE_PGM_RSRC2_ENABLE_SGPR_WORKGROUP_ID_X
);
1578 PRINT_DIRECTIVE(".amdhsa_system_sgpr_workgroup_id_y",
1579 COMPUTE_PGM_RSRC2_ENABLE_SGPR_WORKGROUP_ID_Y
);
1580 PRINT_DIRECTIVE(".amdhsa_system_sgpr_workgroup_id_z",
1581 COMPUTE_PGM_RSRC2_ENABLE_SGPR_WORKGROUP_ID_Z
);
1582 PRINT_DIRECTIVE(".amdhsa_system_sgpr_workgroup_info",
1583 COMPUTE_PGM_RSRC2_ENABLE_SGPR_WORKGROUP_INFO
);
1584 PRINT_DIRECTIVE(".amdhsa_system_vgpr_workitem_id",
1585 COMPUTE_PGM_RSRC2_ENABLE_VGPR_WORKITEM_ID
);
1587 if (FourByteBuffer
& COMPUTE_PGM_RSRC2_ENABLE_EXCEPTION_ADDRESS_WATCH
)
1588 return MCDisassembler::Fail
;
1590 if (FourByteBuffer
& COMPUTE_PGM_RSRC2_ENABLE_EXCEPTION_MEMORY
)
1591 return MCDisassembler::Fail
;
1593 if (FourByteBuffer
& COMPUTE_PGM_RSRC2_GRANULATED_LDS_SIZE
)
1594 return MCDisassembler::Fail
;
1597 ".amdhsa_exception_fp_ieee_invalid_op",
1598 COMPUTE_PGM_RSRC2_ENABLE_EXCEPTION_IEEE_754_FP_INVALID_OPERATION
);
1599 PRINT_DIRECTIVE(".amdhsa_exception_fp_denorm_src",
1600 COMPUTE_PGM_RSRC2_ENABLE_EXCEPTION_FP_DENORMAL_SOURCE
);
1602 ".amdhsa_exception_fp_ieee_div_zero",
1603 COMPUTE_PGM_RSRC2_ENABLE_EXCEPTION_IEEE_754_FP_DIVISION_BY_ZERO
);
1604 PRINT_DIRECTIVE(".amdhsa_exception_fp_ieee_overflow",
1605 COMPUTE_PGM_RSRC2_ENABLE_EXCEPTION_IEEE_754_FP_OVERFLOW
);
1606 PRINT_DIRECTIVE(".amdhsa_exception_fp_ieee_underflow",
1607 COMPUTE_PGM_RSRC2_ENABLE_EXCEPTION_IEEE_754_FP_UNDERFLOW
);
1608 PRINT_DIRECTIVE(".amdhsa_exception_fp_ieee_inexact",
1609 COMPUTE_PGM_RSRC2_ENABLE_EXCEPTION_IEEE_754_FP_INEXACT
);
1610 PRINT_DIRECTIVE(".amdhsa_exception_int_div_zero",
1611 COMPUTE_PGM_RSRC2_ENABLE_EXCEPTION_INT_DIVIDE_BY_ZERO
);
1613 if (FourByteBuffer
& COMPUTE_PGM_RSRC2_RESERVED0
)
1614 return MCDisassembler::Fail
;
1616 return MCDisassembler::Success
;
1619 #undef PRINT_DIRECTIVE
1621 MCDisassembler::DecodeStatus
1622 AMDGPUDisassembler::decodeKernelDescriptorDirective(
1623 DataExtractor::Cursor
&Cursor
, ArrayRef
<uint8_t> Bytes
,
1624 raw_string_ostream
&KdStream
) const {
1625 #define PRINT_DIRECTIVE(DIRECTIVE, MASK) \
1627 KdStream << Indent << DIRECTIVE " " \
1628 << ((TwoByteBuffer & MASK) >> (MASK##_SHIFT)) << '\n'; \
1631 uint16_t TwoByteBuffer
= 0;
1632 uint32_t FourByteBuffer
= 0;
1634 StringRef ReservedBytes
;
1635 StringRef Indent
= "\t";
1637 assert(Bytes
.size() == 64);
1638 DataExtractor
DE(Bytes
, /*IsLittleEndian=*/true, /*AddressSize=*/8);
1640 switch (Cursor
.tell()) {
1641 case amdhsa::GROUP_SEGMENT_FIXED_SIZE_OFFSET
:
1642 FourByteBuffer
= DE
.getU32(Cursor
);
1643 KdStream
<< Indent
<< ".amdhsa_group_segment_fixed_size " << FourByteBuffer
1645 return MCDisassembler::Success
;
1647 case amdhsa::PRIVATE_SEGMENT_FIXED_SIZE_OFFSET
:
1648 FourByteBuffer
= DE
.getU32(Cursor
);
1649 KdStream
<< Indent
<< ".amdhsa_private_segment_fixed_size "
1650 << FourByteBuffer
<< '\n';
1651 return MCDisassembler::Success
;
1653 case amdhsa::KERNARG_SIZE_OFFSET
:
1654 FourByteBuffer
= DE
.getU32(Cursor
);
1655 KdStream
<< Indent
<< ".amdhsa_kernarg_size "
1656 << FourByteBuffer
<< '\n';
1657 return MCDisassembler::Success
;
1659 case amdhsa::RESERVED0_OFFSET
:
1660 // 4 reserved bytes, must be 0.
1661 ReservedBytes
= DE
.getBytes(Cursor
, 4);
1662 for (int I
= 0; I
< 4; ++I
) {
1663 if (ReservedBytes
[I
] != 0) {
1664 return MCDisassembler::Fail
;
1667 return MCDisassembler::Success
;
1669 case amdhsa::KERNEL_CODE_ENTRY_BYTE_OFFSET_OFFSET
:
1670 // KERNEL_CODE_ENTRY_BYTE_OFFSET
1671 // So far no directive controls this for Code Object V3, so simply skip for
1674 return MCDisassembler::Success
;
1676 case amdhsa::RESERVED1_OFFSET
:
1677 // 20 reserved bytes, must be 0.
1678 ReservedBytes
= DE
.getBytes(Cursor
, 20);
1679 for (int I
= 0; I
< 20; ++I
) {
1680 if (ReservedBytes
[I
] != 0) {
1681 return MCDisassembler::Fail
;
1684 return MCDisassembler::Success
;
1686 case amdhsa::COMPUTE_PGM_RSRC3_OFFSET
:
1687 // COMPUTE_PGM_RSRC3
1688 // - Only set for GFX10, GFX6-9 have this to be 0.
1689 // - Currently no directives directly control this.
1690 FourByteBuffer
= DE
.getU32(Cursor
);
1691 if (!isGFX10Plus() && FourByteBuffer
) {
1692 return MCDisassembler::Fail
;
1694 return MCDisassembler::Success
;
1696 case amdhsa::COMPUTE_PGM_RSRC1_OFFSET
:
1697 FourByteBuffer
= DE
.getU32(Cursor
);
1698 if (decodeCOMPUTE_PGM_RSRC1(FourByteBuffer
, KdStream
) ==
1699 MCDisassembler::Fail
) {
1700 return MCDisassembler::Fail
;
1702 return MCDisassembler::Success
;
1704 case amdhsa::COMPUTE_PGM_RSRC2_OFFSET
:
1705 FourByteBuffer
= DE
.getU32(Cursor
);
1706 if (decodeCOMPUTE_PGM_RSRC2(FourByteBuffer
, KdStream
) ==
1707 MCDisassembler::Fail
) {
1708 return MCDisassembler::Fail
;
1710 return MCDisassembler::Success
;
1712 case amdhsa::KERNEL_CODE_PROPERTIES_OFFSET
:
1713 using namespace amdhsa
;
1714 TwoByteBuffer
= DE
.getU16(Cursor
);
1716 if (!hasArchitectedFlatScratch())
1717 PRINT_DIRECTIVE(".amdhsa_user_sgpr_private_segment_buffer",
1718 KERNEL_CODE_PROPERTY_ENABLE_SGPR_PRIVATE_SEGMENT_BUFFER
);
1719 PRINT_DIRECTIVE(".amdhsa_user_sgpr_dispatch_ptr",
1720 KERNEL_CODE_PROPERTY_ENABLE_SGPR_DISPATCH_PTR
);
1721 PRINT_DIRECTIVE(".amdhsa_user_sgpr_queue_ptr",
1722 KERNEL_CODE_PROPERTY_ENABLE_SGPR_QUEUE_PTR
);
1723 PRINT_DIRECTIVE(".amdhsa_user_sgpr_kernarg_segment_ptr",
1724 KERNEL_CODE_PROPERTY_ENABLE_SGPR_KERNARG_SEGMENT_PTR
);
1725 PRINT_DIRECTIVE(".amdhsa_user_sgpr_dispatch_id",
1726 KERNEL_CODE_PROPERTY_ENABLE_SGPR_DISPATCH_ID
);
1727 if (!hasArchitectedFlatScratch())
1728 PRINT_DIRECTIVE(".amdhsa_user_sgpr_flat_scratch_init",
1729 KERNEL_CODE_PROPERTY_ENABLE_SGPR_FLAT_SCRATCH_INIT
);
1730 PRINT_DIRECTIVE(".amdhsa_user_sgpr_private_segment_size",
1731 KERNEL_CODE_PROPERTY_ENABLE_SGPR_PRIVATE_SEGMENT_SIZE
);
1733 if (TwoByteBuffer
& KERNEL_CODE_PROPERTY_RESERVED0
)
1734 return MCDisassembler::Fail
;
1736 // Reserved for GFX9
1738 (TwoByteBuffer
& KERNEL_CODE_PROPERTY_ENABLE_WAVEFRONT_SIZE32
)) {
1739 return MCDisassembler::Fail
;
1740 } else if (isGFX10Plus()) {
1741 PRINT_DIRECTIVE(".amdhsa_wavefront_size32",
1742 KERNEL_CODE_PROPERTY_ENABLE_WAVEFRONT_SIZE32
);
1745 if (TwoByteBuffer
& KERNEL_CODE_PROPERTY_RESERVED1
)
1746 return MCDisassembler::Fail
;
1748 return MCDisassembler::Success
;
1750 case amdhsa::RESERVED2_OFFSET
:
1751 // 6 bytes from here are reserved, must be 0.
1752 ReservedBytes
= DE
.getBytes(Cursor
, 6);
1753 for (int I
= 0; I
< 6; ++I
) {
1754 if (ReservedBytes
[I
] != 0)
1755 return MCDisassembler::Fail
;
1757 return MCDisassembler::Success
;
1760 llvm_unreachable("Unhandled index. Case statements cover everything.");
1761 return MCDisassembler::Fail
;
1763 #undef PRINT_DIRECTIVE
1766 MCDisassembler::DecodeStatus
AMDGPUDisassembler::decodeKernelDescriptor(
1767 StringRef KdName
, ArrayRef
<uint8_t> Bytes
, uint64_t KdAddress
) const {
1768 // CP microcode requires the kernel descriptor to be 64 aligned.
1769 if (Bytes
.size() != 64 || KdAddress
% 64 != 0)
1770 return MCDisassembler::Fail
;
1773 raw_string_ostream
KdStream(Kd
);
1774 KdStream
<< ".amdhsa_kernel " << KdName
<< '\n';
1776 DataExtractor::Cursor
C(0);
1777 while (C
&& C
.tell() < Bytes
.size()) {
1778 MCDisassembler::DecodeStatus Status
=
1779 decodeKernelDescriptorDirective(C
, Bytes
, KdStream
);
1781 cantFail(C
.takeError());
1783 if (Status
== MCDisassembler::Fail
)
1784 return MCDisassembler::Fail
;
1786 KdStream
<< ".end_amdhsa_kernel\n";
1787 outs() << KdStream
.str();
1788 return MCDisassembler::Success
;
1791 Optional
<MCDisassembler::DecodeStatus
>
1792 AMDGPUDisassembler::onSymbolStart(SymbolInfoTy
&Symbol
, uint64_t &Size
,
1793 ArrayRef
<uint8_t> Bytes
, uint64_t Address
,
1794 raw_ostream
&CStream
) const {
1795 // Right now only kernel descriptor needs to be handled.
1796 // We ignore all other symbols for target specific handling.
1798 // Fix the spurious symbol issue for AMDGPU kernels. Exists for both Code
1799 // Object V2 and V3 when symbols are marked protected.
1801 // amd_kernel_code_t for Code Object V2.
1802 if (Symbol
.Type
== ELF::STT_AMDGPU_HSA_KERNEL
) {
1804 return MCDisassembler::Fail
;
1807 // Code Object V3 kernel descriptors.
1808 StringRef Name
= Symbol
.Name
;
1809 if (Symbol
.Type
== ELF::STT_OBJECT
&& Name
.endswith(StringRef(".kd"))) {
1810 Size
= 64; // Size = 64 regardless of success or failure.
1811 return decodeKernelDescriptor(Name
.drop_back(3), Bytes
, Address
);
1816 //===----------------------------------------------------------------------===//
1818 //===----------------------------------------------------------------------===//
1820 // Try to find symbol name for specified label
1821 bool AMDGPUSymbolizer::tryAddingSymbolicOperand(MCInst
&Inst
,
1822 raw_ostream
&/*cStream*/, int64_t Value
,
1823 uint64_t /*Address*/, bool IsBranch
,
1824 uint64_t /*Offset*/, uint64_t /*InstSize*/) {
1830 auto *Symbols
= static_cast<SectionSymbolsTy
*>(DisInfo
);
1834 auto Result
= llvm::find_if(*Symbols
, [Value
](const SymbolInfoTy
&Val
) {
1835 return Val
.Addr
== static_cast<uint64_t>(Value
) &&
1836 Val
.Type
== ELF::STT_NOTYPE
;
1838 if (Result
!= Symbols
->end()) {
1839 auto *Sym
= Ctx
.getOrCreateSymbol(Result
->Name
);
1840 const auto *Add
= MCSymbolRefExpr::create(Sym
, Ctx
);
1841 Inst
.addOperand(MCOperand::createExpr(Add
));
1844 // Add to list of referenced addresses, so caller can synthesize a label.
1845 ReferencedAddresses
.push_back(static_cast<uint64_t>(Value
));
1849 void AMDGPUSymbolizer::tryAddingPcLoadReferenceComment(raw_ostream
&cStream
,
1852 llvm_unreachable("unimplemented");
1855 //===----------------------------------------------------------------------===//
1857 //===----------------------------------------------------------------------===//
1859 static MCSymbolizer
*createAMDGPUSymbolizer(const Triple
&/*TT*/,
1860 LLVMOpInfoCallback
/*GetOpInfo*/,
1861 LLVMSymbolLookupCallback
/*SymbolLookUp*/,
1864 std::unique_ptr
<MCRelocationInfo
> &&RelInfo
) {
1865 return new AMDGPUSymbolizer(*Ctx
, std::move(RelInfo
), DisInfo
);
1868 static MCDisassembler
*createAMDGPUDisassembler(const Target
&T
,
1869 const MCSubtargetInfo
&STI
,
1871 return new AMDGPUDisassembler(STI
, Ctx
, T
.createMCInstrInfo());
1874 extern "C" LLVM_EXTERNAL_VISIBILITY
void LLVMInitializeAMDGPUDisassembler() {
1875 TargetRegistry::RegisterMCDisassembler(getTheGCNTarget(),
1876 createAMDGPUDisassembler
);
1877 TargetRegistry::RegisterMCSymbolizer(getTheGCNTarget(),
1878 createAMDGPUSymbolizer
);