1 //===- AMDGPUDisassembler.cpp - Disassembler for AMDGPU ISA ---------------===//
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
7 //===----------------------------------------------------------------------===//
9 //===----------------------------------------------------------------------===//
13 /// This file contains definition for AMDGPU ISA disassembler
15 //===----------------------------------------------------------------------===//
17 // ToDo: What to do with instruction suffixes (v_mov_b32 vs v_mov_b32_e32)?
19 #include "Disassembler/AMDGPUDisassembler.h"
21 #include "AMDGPURegisterInfo.h"
22 #include "MCTargetDesc/AMDGPUMCTargetDesc.h"
23 #include "SIDefines.h"
24 #include "TargetInfo/AMDGPUTargetInfo.h"
25 #include "Utils/AMDGPUBaseInfo.h"
26 #include "llvm-c/Disassembler.h"
27 #include "llvm/ADT/APInt.h"
28 #include "llvm/ADT/ArrayRef.h"
29 #include "llvm/ADT/Twine.h"
30 #include "llvm/BinaryFormat/ELF.h"
31 #include "llvm/MC/MCAsmInfo.h"
32 #include "llvm/MC/MCContext.h"
33 #include "llvm/MC/MCDisassembler/MCDisassembler.h"
34 #include "llvm/MC/MCExpr.h"
35 #include "llvm/MC/MCFixedLenDisassembler.h"
36 #include "llvm/MC/MCInst.h"
37 #include "llvm/MC/MCSubtargetInfo.h"
38 #include "llvm/Support/Endian.h"
39 #include "llvm/Support/ErrorHandling.h"
40 #include "llvm/Support/MathExtras.h"
41 #include "llvm/Support/TargetRegistry.h"
42 #include "llvm/Support/raw_ostream.h"
53 #define DEBUG_TYPE "amdgpu-disassembler"
55 #define SGPR_MAX (isGFX10() ? AMDGPU::EncValues::SGPR_MAX_GFX10 \
56 : AMDGPU::EncValues::SGPR_MAX_SI)
58 using DecodeStatus
= llvm::MCDisassembler::DecodeStatus
;
60 AMDGPUDisassembler::AMDGPUDisassembler(const MCSubtargetInfo
&STI
,
62 MCInstrInfo
const *MCII
) :
63 MCDisassembler(STI
, Ctx
), MCII(MCII
), MRI(*Ctx
.getRegisterInfo()),
64 TargetMaxInstBytes(Ctx
.getAsmInfo()->getMaxInstLength(&STI
)) {
66 // ToDo: AMDGPUDisassembler supports only VI ISA.
67 if (!STI
.getFeatureBits()[AMDGPU::FeatureGCN3Encoding
] && !isGFX10())
68 report_fatal_error("Disassembly not yet supported for subtarget");
71 inline static MCDisassembler::DecodeStatus
72 addOperand(MCInst
&Inst
, const MCOperand
& Opnd
) {
73 Inst
.addOperand(Opnd
);
74 return Opnd
.isValid() ?
75 MCDisassembler::Success
:
76 MCDisassembler::SoftFail
;
79 static int insertNamedMCOperand(MCInst
&MI
, const MCOperand
&Op
,
81 int OpIdx
= AMDGPU::getNamedOperandIdx(MI
.getOpcode(), NameIdx
);
84 std::advance(I
, OpIdx
);
90 static DecodeStatus
decodeSoppBrTarget(MCInst
&Inst
, unsigned Imm
,
91 uint64_t Addr
, const void *Decoder
) {
92 auto DAsm
= static_cast<const AMDGPUDisassembler
*>(Decoder
);
94 // Our branches take a simm16, but we need two extra bits to account for the
96 APInt
SignedOffset(18, Imm
* 4, true);
97 int64_t Offset
= (SignedOffset
.sext(64) + 4 + Addr
).getSExtValue();
99 if (DAsm
->tryAddingSymbolicOperand(Inst
, Offset
, Addr
, true, 2, 2))
100 return MCDisassembler::Success
;
101 return addOperand(Inst
, MCOperand::createImm(Imm
));
104 static DecodeStatus
decodeBoolReg(MCInst
&Inst
, unsigned Val
,
105 uint64_t Addr
, const void *Decoder
) {
106 auto DAsm
= static_cast<const AMDGPUDisassembler
*>(Decoder
);
107 return addOperand(Inst
, DAsm
->decodeBoolReg(Val
));
110 #define DECODE_OPERAND(StaticDecoderName, DecoderName) \
111 static DecodeStatus StaticDecoderName(MCInst &Inst, \
114 const void *Decoder) { \
115 auto DAsm = static_cast<const AMDGPUDisassembler*>(Decoder); \
116 return addOperand(Inst, DAsm->DecoderName(Imm)); \
119 #define DECODE_OPERAND_REG(RegClass) \
120 DECODE_OPERAND(Decode##RegClass##RegisterClass, decodeOperand_##RegClass)
122 DECODE_OPERAND_REG(VGPR_32
)
123 DECODE_OPERAND_REG(VRegOrLds_32
)
124 DECODE_OPERAND_REG(VS_32
)
125 DECODE_OPERAND_REG(VS_64
)
126 DECODE_OPERAND_REG(VS_128
)
128 DECODE_OPERAND_REG(VReg_64
)
129 DECODE_OPERAND_REG(VReg_96
)
130 DECODE_OPERAND_REG(VReg_128
)
132 DECODE_OPERAND_REG(SReg_32
)
133 DECODE_OPERAND_REG(SReg_32_XM0_XEXEC
)
134 DECODE_OPERAND_REG(SReg_32_XEXEC_HI
)
135 DECODE_OPERAND_REG(SRegOrLds_32
)
136 DECODE_OPERAND_REG(SReg_64
)
137 DECODE_OPERAND_REG(SReg_64_XEXEC
)
138 DECODE_OPERAND_REG(SReg_128
)
139 DECODE_OPERAND_REG(SReg_256
)
140 DECODE_OPERAND_REG(SReg_512
)
142 DECODE_OPERAND_REG(AGPR_32
)
143 DECODE_OPERAND_REG(AReg_128
)
144 DECODE_OPERAND_REG(AReg_512
)
145 DECODE_OPERAND_REG(AReg_1024
)
146 DECODE_OPERAND_REG(AV_32
)
147 DECODE_OPERAND_REG(AV_64
)
149 static DecodeStatus
decodeOperand_VSrc16(MCInst
&Inst
,
152 const void *Decoder
) {
153 auto DAsm
= static_cast<const AMDGPUDisassembler
*>(Decoder
);
154 return addOperand(Inst
, DAsm
->decodeOperand_VSrc16(Imm
));
157 static DecodeStatus
decodeOperand_VSrcV216(MCInst
&Inst
,
160 const void *Decoder
) {
161 auto DAsm
= static_cast<const AMDGPUDisassembler
*>(Decoder
);
162 return addOperand(Inst
, DAsm
->decodeOperand_VSrcV216(Imm
));
165 static DecodeStatus
decodeOperand_VS_16(MCInst
&Inst
,
168 const void *Decoder
) {
169 auto DAsm
= static_cast<const AMDGPUDisassembler
*>(Decoder
);
170 return addOperand(Inst
, DAsm
->decodeOperand_VSrc16(Imm
));
173 static DecodeStatus
decodeOperand_VS_32(MCInst
&Inst
,
176 const void *Decoder
) {
177 auto DAsm
= static_cast<const AMDGPUDisassembler
*>(Decoder
);
178 return addOperand(Inst
, DAsm
->decodeOperand_VS_32(Imm
));
181 static DecodeStatus
decodeOperand_AReg_128(MCInst
&Inst
,
184 const void *Decoder
) {
185 auto DAsm
= static_cast<const AMDGPUDisassembler
*>(Decoder
);
186 return addOperand(Inst
, DAsm
->decodeSrcOp(AMDGPUDisassembler::OPW128
, Imm
| 512));
189 static DecodeStatus
decodeOperand_AReg_512(MCInst
&Inst
,
192 const void *Decoder
) {
193 auto DAsm
= static_cast<const AMDGPUDisassembler
*>(Decoder
);
194 return addOperand(Inst
, DAsm
->decodeSrcOp(AMDGPUDisassembler::OPW512
, Imm
| 512));
197 static DecodeStatus
decodeOperand_AReg_1024(MCInst
&Inst
,
200 const void *Decoder
) {
201 auto DAsm
= static_cast<const AMDGPUDisassembler
*>(Decoder
);
202 return addOperand(Inst
, DAsm
->decodeSrcOp(AMDGPUDisassembler::OPW1024
, Imm
| 512));
205 static DecodeStatus
decodeOperand_SReg_32(MCInst
&Inst
,
208 const void *Decoder
) {
209 auto DAsm
= static_cast<const AMDGPUDisassembler
*>(Decoder
);
210 return addOperand(Inst
, DAsm
->decodeOperand_SReg_32(Imm
));
213 static DecodeStatus
decodeOperand_VGPR_32(MCInst
&Inst
,
216 const void *Decoder
) {
217 auto DAsm
= static_cast<const AMDGPUDisassembler
*>(Decoder
);
218 return addOperand(Inst
, DAsm
->decodeSrcOp(AMDGPUDisassembler::OPW32
, Imm
));
221 #define DECODE_SDWA(DecName) \
222 DECODE_OPERAND(decodeSDWA##DecName, decodeSDWA##DecName)
228 #include "AMDGPUGenDisassemblerTables.inc"
230 //===----------------------------------------------------------------------===//
232 //===----------------------------------------------------------------------===//
234 template <typename T
> static inline T
eatBytes(ArrayRef
<uint8_t>& Bytes
) {
235 assert(Bytes
.size() >= sizeof(T
));
236 const auto Res
= support::endian::read
<T
, support::endianness::little
>(Bytes
.data());
237 Bytes
= Bytes
.slice(sizeof(T
));
241 DecodeStatus
AMDGPUDisassembler::tryDecodeInst(const uint8_t* Table
,
244 uint64_t Address
) const {
245 assert(MI
.getOpcode() == 0);
246 assert(MI
.getNumOperands() == 0);
249 const auto SavedBytes
= Bytes
;
250 if (decodeInstruction(Table
, TmpInst
, Inst
, Address
, this, STI
)) {
252 return MCDisassembler::Success
;
255 return MCDisassembler::Fail
;
258 static bool isValidDPP8(const MCInst
&MI
) {
259 using namespace llvm::AMDGPU::DPP
;
260 int FiIdx
= AMDGPU::getNamedOperandIdx(MI
.getOpcode(), AMDGPU::OpName::fi
);
262 if ((unsigned)FiIdx
>= MI
.getNumOperands())
264 unsigned Fi
= MI
.getOperand(FiIdx
).getImm();
265 return Fi
== DPP8_FI_0
|| Fi
== DPP8_FI_1
;
268 DecodeStatus
AMDGPUDisassembler::getInstruction(MCInst
&MI
, uint64_t &Size
,
269 ArrayRef
<uint8_t> Bytes_
,
272 raw_ostream
&CS
) const {
276 unsigned MaxInstBytesNum
= std::min((size_t)TargetMaxInstBytes
, Bytes_
.size());
277 Bytes
= Bytes_
.slice(0, MaxInstBytesNum
);
279 DecodeStatus Res
= MCDisassembler::Fail
;
281 // ToDo: better to switch encoding length using some bit predicate
282 // but it is unknown yet, so try all we can
284 // Try to decode DPP and SDWA first to solve conflict with VOP1 and VOP2
286 if (Bytes
.size() >= 8) {
287 const uint64_t QW
= eatBytes
<uint64_t>(Bytes
);
289 Res
= tryDecodeInst(DecoderTableDPP864
, MI
, QW
, Address
);
290 if (Res
&& convertDPP8Inst(MI
) == MCDisassembler::Success
)
293 MI
= MCInst(); // clear
295 Res
= tryDecodeInst(DecoderTableDPP64
, MI
, QW
, Address
);
298 Res
= tryDecodeInst(DecoderTableSDWA64
, MI
, QW
, Address
);
299 if (Res
) { IsSDWA
= true; break; }
301 Res
= tryDecodeInst(DecoderTableSDWA964
, MI
, QW
, Address
);
302 if (Res
) { IsSDWA
= true; break; }
304 Res
= tryDecodeInst(DecoderTableSDWA1064
, MI
, QW
, Address
);
305 if (Res
) { IsSDWA
= true; break; }
307 // Some GFX9 subtargets repurposed the v_mad_mix_f32, v_mad_mixlo_f16 and
308 // v_mad_mixhi_f16 for FMA variants. Try to decode using this special
309 // table first so we print the correct name.
311 if (STI
.getFeatureBits()[AMDGPU::FeatureFmaMixInsts
]) {
312 Res
= tryDecodeInst(DecoderTableGFX9_DL64
, MI
, QW
, Address
);
316 if (STI
.getFeatureBits()[AMDGPU::FeatureUnpackedD16VMem
]) {
317 Res
= tryDecodeInst(DecoderTableGFX80_UNPACKED64
, MI
, QW
, Address
);
322 // Some GFX9 subtargets repurposed the v_mad_mix_f32, v_mad_mixlo_f16 and
323 // v_mad_mixhi_f16 for FMA variants. Try to decode using this special
324 // table first so we print the correct name.
325 if (STI
.getFeatureBits()[AMDGPU::FeatureFmaMixInsts
]) {
326 Res
= tryDecodeInst(DecoderTableGFX9_DL64
, MI
, QW
, Address
);
332 // Reinitialize Bytes as DPP64 could have eaten too much
333 Bytes
= Bytes_
.slice(0, MaxInstBytesNum
);
335 // Try decode 32-bit instruction
336 if (Bytes
.size() < 4) break;
337 const uint32_t DW
= eatBytes
<uint32_t>(Bytes
);
338 Res
= tryDecodeInst(DecoderTableGFX832
, MI
, DW
, Address
);
341 Res
= tryDecodeInst(DecoderTableAMDGPU32
, MI
, DW
, Address
);
344 Res
= tryDecodeInst(DecoderTableGFX932
, MI
, DW
, Address
);
347 Res
= tryDecodeInst(DecoderTableGFX1032
, MI
, DW
, Address
);
350 if (Bytes
.size() < 4) break;
351 const uint64_t QW
= ((uint64_t)eatBytes
<uint32_t>(Bytes
) << 32) | DW
;
352 Res
= tryDecodeInst(DecoderTableGFX864
, MI
, QW
, Address
);
355 Res
= tryDecodeInst(DecoderTableAMDGPU64
, MI
, QW
, Address
);
358 Res
= tryDecodeInst(DecoderTableGFX964
, MI
, QW
, Address
);
361 Res
= tryDecodeInst(DecoderTableGFX1064
, MI
, QW
, Address
);
364 if (Res
&& (MaxInstBytesNum
- Bytes
.size()) == 12 && (!HasLiteral
||
365 !(MCII
->get(MI
.getOpcode()).TSFlags
& SIInstrFlags::VOP3
))) {
367 Bytes
= Bytes_
.slice(0, MaxInstBytesNum
);
368 eatBytes
<uint64_t>(Bytes
);
371 if (Res
&& (MI
.getOpcode() == AMDGPU::V_MAC_F32_e64_vi
||
372 MI
.getOpcode() == AMDGPU::V_MAC_F32_e64_gfx6_gfx7
||
373 MI
.getOpcode() == AMDGPU::V_MAC_F32_e64_gfx10
||
374 MI
.getOpcode() == AMDGPU::V_MAC_F16_e64_vi
||
375 MI
.getOpcode() == AMDGPU::V_FMAC_F32_e64_vi
||
376 MI
.getOpcode() == AMDGPU::V_FMAC_F32_e64_gfx10
||
377 MI
.getOpcode() == AMDGPU::V_FMAC_F16_e64_gfx10
)) {
378 // Insert dummy unused src2_modifiers.
379 insertNamedMCOperand(MI
, MCOperand::createImm(0),
380 AMDGPU::OpName::src2_modifiers
);
383 if (Res
&& (MCII
->get(MI
.getOpcode()).TSFlags
& SIInstrFlags::MIMG
)) {
385 AMDGPU::getNamedOperandIdx(MI
.getOpcode(), AMDGPU::OpName::vaddr0
);
387 AMDGPU::getNamedOperandIdx(MI
.getOpcode(), AMDGPU::OpName::srsrc
);
388 unsigned NSAArgs
= RsrcIdx
- VAddr0Idx
- 1;
389 if (VAddr0Idx
>= 0 && NSAArgs
> 0) {
390 unsigned NSAWords
= (NSAArgs
+ 3) / 4;
391 if (Bytes
.size() < 4 * NSAWords
) {
392 Res
= MCDisassembler::Fail
;
394 for (unsigned i
= 0; i
< NSAArgs
; ++i
) {
395 MI
.insert(MI
.begin() + VAddr0Idx
+ 1 + i
,
396 decodeOperand_VGPR_32(Bytes
[i
]));
398 Bytes
= Bytes
.slice(4 * NSAWords
);
403 Res
= convertMIMGInst(MI
);
407 Res
= convertSDWAInst(MI
);
409 int VDstIn_Idx
= AMDGPU::getNamedOperandIdx(MI
.getOpcode(),
410 AMDGPU::OpName::vdst_in
);
411 if (VDstIn_Idx
!= -1) {
412 int Tied
= MCII
->get(MI
.getOpcode()).getOperandConstraint(VDstIn_Idx
,
413 MCOI::OperandConstraint::TIED_TO
);
414 if (Tied
!= -1 && (MI
.getNumOperands() <= (unsigned)VDstIn_Idx
||
415 !MI
.getOperand(VDstIn_Idx
).isReg() ||
416 MI
.getOperand(VDstIn_Idx
).getReg() != MI
.getOperand(Tied
).getReg())) {
417 if (MI
.getNumOperands() > (unsigned)VDstIn_Idx
)
418 MI
.erase(&MI
.getOperand(VDstIn_Idx
));
419 insertNamedMCOperand(MI
,
420 MCOperand::createReg(MI
.getOperand(Tied
).getReg()),
421 AMDGPU::OpName::vdst_in
);
425 // if the opcode was not recognized we'll assume a Size of 4 bytes
426 // (unless there are fewer bytes left)
427 Size
= Res
? (MaxInstBytesNum
- Bytes
.size())
428 : std::min((size_t)4, Bytes_
.size());
432 DecodeStatus
AMDGPUDisassembler::convertSDWAInst(MCInst
&MI
) const {
433 if (STI
.getFeatureBits()[AMDGPU::FeatureGFX9
] ||
434 STI
.getFeatureBits()[AMDGPU::FeatureGFX10
]) {
435 if (AMDGPU::getNamedOperandIdx(MI
.getOpcode(), AMDGPU::OpName::sdst
) != -1)
436 // VOPC - insert clamp
437 insertNamedMCOperand(MI
, MCOperand::createImm(0), AMDGPU::OpName::clamp
);
438 } else if (STI
.getFeatureBits()[AMDGPU::FeatureVolcanicIslands
]) {
439 int SDst
= AMDGPU::getNamedOperandIdx(MI
.getOpcode(), AMDGPU::OpName::sdst
);
441 // VOPC - insert VCC register as sdst
442 insertNamedMCOperand(MI
, createRegOperand(AMDGPU::VCC
),
443 AMDGPU::OpName::sdst
);
445 // VOP1/2 - insert omod if present in instruction
446 insertNamedMCOperand(MI
, MCOperand::createImm(0), AMDGPU::OpName::omod
);
449 return MCDisassembler::Success
;
452 DecodeStatus
AMDGPUDisassembler::convertDPP8Inst(MCInst
&MI
) const {
453 unsigned Opc
= MI
.getOpcode();
454 unsigned DescNumOps
= MCII
->get(Opc
).getNumOperands();
456 // Insert dummy unused src modifiers.
457 if (MI
.getNumOperands() < DescNumOps
&&
458 AMDGPU::getNamedOperandIdx(Opc
, AMDGPU::OpName::src0_modifiers
) != -1)
459 insertNamedMCOperand(MI
, MCOperand::createImm(0),
460 AMDGPU::OpName::src0_modifiers
);
462 if (MI
.getNumOperands() < DescNumOps
&&
463 AMDGPU::getNamedOperandIdx(Opc
, AMDGPU::OpName::src1_modifiers
) != -1)
464 insertNamedMCOperand(MI
, MCOperand::createImm(0),
465 AMDGPU::OpName::src1_modifiers
);
467 return isValidDPP8(MI
) ? MCDisassembler::Success
: MCDisassembler::SoftFail
;
470 // Note that before gfx10, the MIMG encoding provided no information about
471 // VADDR size. Consequently, decoded instructions always show address as if it
472 // has 1 dword, which could be not really so.
473 DecodeStatus
AMDGPUDisassembler::convertMIMGInst(MCInst
&MI
) const {
475 int VDstIdx
= AMDGPU::getNamedOperandIdx(MI
.getOpcode(),
476 AMDGPU::OpName::vdst
);
478 int VDataIdx
= AMDGPU::getNamedOperandIdx(MI
.getOpcode(),
479 AMDGPU::OpName::vdata
);
481 AMDGPU::getNamedOperandIdx(MI
.getOpcode(), AMDGPU::OpName::vaddr0
);
482 int DMaskIdx
= AMDGPU::getNamedOperandIdx(MI
.getOpcode(),
483 AMDGPU::OpName::dmask
);
485 int TFEIdx
= AMDGPU::getNamedOperandIdx(MI
.getOpcode(),
486 AMDGPU::OpName::tfe
);
487 int D16Idx
= AMDGPU::getNamedOperandIdx(MI
.getOpcode(),
488 AMDGPU::OpName::d16
);
490 assert(VDataIdx
!= -1);
491 assert(DMaskIdx
!= -1);
492 assert(TFEIdx
!= -1);
494 const AMDGPU::MIMGInfo
*Info
= AMDGPU::getMIMGInfo(MI
.getOpcode());
495 bool IsAtomic
= (VDstIdx
!= -1);
496 bool IsGather4
= MCII
->get(MI
.getOpcode()).TSFlags
& SIInstrFlags::Gather4
;
499 unsigned AddrSize
= Info
->VAddrDwords
;
501 if (STI
.getFeatureBits()[AMDGPU::FeatureGFX10
]) {
503 AMDGPU::getNamedOperandIdx(MI
.getOpcode(), AMDGPU::OpName::dim
);
504 const AMDGPU::MIMGBaseOpcodeInfo
*BaseOpcode
=
505 AMDGPU::getMIMGBaseOpcodeInfo(Info
->BaseOpcode
);
506 const AMDGPU::MIMGDimInfo
*Dim
=
507 AMDGPU::getMIMGDimInfoByEncoding(MI
.getOperand(DimIdx
).getImm());
509 AddrSize
= BaseOpcode
->NumExtraArgs
+
510 (BaseOpcode
->Gradients
? Dim
->NumGradients
: 0) +
511 (BaseOpcode
->Coordinates
? Dim
->NumCoords
: 0) +
512 (BaseOpcode
->LodOrClampOrMip
? 1 : 0);
513 IsNSA
= Info
->MIMGEncoding
== AMDGPU::MIMGEncGfx10NSA
;
517 else if (AddrSize
> 4)
520 if (AddrSize
> Info
->VAddrDwords
) {
521 // The NSA encoding does not contain enough operands for the combination
522 // of base opcode / dimension. Should this be an error?
523 return MCDisassembler::Success
;
528 unsigned DMask
= MI
.getOperand(DMaskIdx
).getImm() & 0xf;
529 unsigned DstSize
= IsGather4
? 4 : std::max(countPopulation(DMask
), 1u);
531 bool D16
= D16Idx
>= 0 && MI
.getOperand(D16Idx
).getImm();
532 if (D16
&& AMDGPU::hasPackedD16(STI
)) {
533 DstSize
= (DstSize
+ 1) / 2;
536 // FIXME: Add tfe support
537 if (MI
.getOperand(TFEIdx
).getImm())
538 return MCDisassembler::Success
;
540 if (DstSize
== Info
->VDataDwords
&& AddrSize
== Info
->VAddrDwords
)
541 return MCDisassembler::Success
;
544 AMDGPU::getMIMGOpcode(Info
->BaseOpcode
, Info
->MIMGEncoding
, DstSize
, AddrSize
);
546 return MCDisassembler::Success
;
548 // Widen the register to the correct number of enabled channels.
549 unsigned NewVdata
= AMDGPU::NoRegister
;
550 if (DstSize
!= Info
->VDataDwords
) {
551 auto DataRCID
= MCII
->get(NewOpcode
).OpInfo
[VDataIdx
].RegClass
;
553 // Get first subregister of VData
554 unsigned Vdata0
= MI
.getOperand(VDataIdx
).getReg();
555 unsigned VdataSub0
= MRI
.getSubReg(Vdata0
, AMDGPU::sub0
);
556 Vdata0
= (VdataSub0
!= 0)? VdataSub0
: Vdata0
;
558 NewVdata
= MRI
.getMatchingSuperReg(Vdata0
, AMDGPU::sub0
,
559 &MRI
.getRegClass(DataRCID
));
560 if (NewVdata
== AMDGPU::NoRegister
) {
561 // It's possible to encode this such that the low register + enabled
562 // components exceeds the register count.
563 return MCDisassembler::Success
;
567 unsigned NewVAddr0
= AMDGPU::NoRegister
;
568 if (STI
.getFeatureBits()[AMDGPU::FeatureGFX10
] && !IsNSA
&&
569 AddrSize
!= Info
->VAddrDwords
) {
570 unsigned VAddr0
= MI
.getOperand(VAddr0Idx
).getReg();
571 unsigned VAddrSub0
= MRI
.getSubReg(VAddr0
, AMDGPU::sub0
);
572 VAddr0
= (VAddrSub0
!= 0) ? VAddrSub0
: VAddr0
;
574 auto AddrRCID
= MCII
->get(NewOpcode
).OpInfo
[VAddr0Idx
].RegClass
;
575 NewVAddr0
= MRI
.getMatchingSuperReg(VAddr0
, AMDGPU::sub0
,
576 &MRI
.getRegClass(AddrRCID
));
577 if (NewVAddr0
== AMDGPU::NoRegister
)
578 return MCDisassembler::Success
;
581 MI
.setOpcode(NewOpcode
);
583 if (NewVdata
!= AMDGPU::NoRegister
) {
584 MI
.getOperand(VDataIdx
) = MCOperand::createReg(NewVdata
);
587 // Atomic operations have an additional operand (a copy of data)
588 MI
.getOperand(VDstIdx
) = MCOperand::createReg(NewVdata
);
592 if (NewVAddr0
!= AMDGPU::NoRegister
) {
593 MI
.getOperand(VAddr0Idx
) = MCOperand::createReg(NewVAddr0
);
595 assert(AddrSize
<= Info
->VAddrDwords
);
596 MI
.erase(MI
.begin() + VAddr0Idx
+ AddrSize
,
597 MI
.begin() + VAddr0Idx
+ Info
->VAddrDwords
);
600 return MCDisassembler::Success
;
603 const char* AMDGPUDisassembler::getRegClassName(unsigned RegClassID
) const {
604 return getContext().getRegisterInfo()->
605 getRegClassName(&AMDGPUMCRegisterClasses
[RegClassID
]);
609 MCOperand
AMDGPUDisassembler::errOperand(unsigned V
,
610 const Twine
& ErrMsg
) const {
611 *CommentStream
<< "Error: " + ErrMsg
;
613 // ToDo: add support for error operands to MCInst.h
614 // return MCOperand::createError(V);
619 MCOperand
AMDGPUDisassembler::createRegOperand(unsigned int RegId
) const {
620 return MCOperand::createReg(AMDGPU::getMCReg(RegId
, STI
));
624 MCOperand
AMDGPUDisassembler::createRegOperand(unsigned RegClassID
,
625 unsigned Val
) const {
626 const auto& RegCl
= AMDGPUMCRegisterClasses
[RegClassID
];
627 if (Val
>= RegCl
.getNumRegs())
628 return errOperand(Val
, Twine(getRegClassName(RegClassID
)) +
629 ": unknown register " + Twine(Val
));
630 return createRegOperand(RegCl
.getRegister(Val
));
634 MCOperand
AMDGPUDisassembler::createSRegOperand(unsigned SRegClassID
,
635 unsigned Val
) const {
636 // ToDo: SI/CI have 104 SGPRs, VI - 102
637 // Valery: here we accepting as much as we can, let assembler sort it out
639 switch (SRegClassID
) {
640 case AMDGPU::SGPR_32RegClassID
:
641 case AMDGPU::TTMP_32RegClassID
:
643 case AMDGPU::SGPR_64RegClassID
:
644 case AMDGPU::TTMP_64RegClassID
:
647 case AMDGPU::SGPR_128RegClassID
:
648 case AMDGPU::TTMP_128RegClassID
:
649 // ToDo: unclear if s[100:104] is available on VI. Can we use VCC as SGPR in
651 case AMDGPU::SGPR_256RegClassID
:
652 case AMDGPU::TTMP_256RegClassID
:
653 // ToDo: unclear if s[96:104] is available on VI. Can we use VCC as SGPR in
655 case AMDGPU::SGPR_512RegClassID
:
656 case AMDGPU::TTMP_512RegClassID
:
659 // ToDo: unclear if s[88:104] is available on VI. Can we use VCC as SGPR in
662 llvm_unreachable("unhandled register class");
665 if (Val
% (1 << shift
)) {
666 *CommentStream
<< "Warning: " << getRegClassName(SRegClassID
)
667 << ": scalar reg isn't aligned " << Val
;
670 return createRegOperand(SRegClassID
, Val
>> shift
);
673 MCOperand
AMDGPUDisassembler::decodeOperand_VS_32(unsigned Val
) const {
674 return decodeSrcOp(OPW32
, Val
);
677 MCOperand
AMDGPUDisassembler::decodeOperand_VS_64(unsigned Val
) const {
678 return decodeSrcOp(OPW64
, Val
);
681 MCOperand
AMDGPUDisassembler::decodeOperand_VS_128(unsigned Val
) const {
682 return decodeSrcOp(OPW128
, Val
);
685 MCOperand
AMDGPUDisassembler::decodeOperand_VSrc16(unsigned Val
) const {
686 return decodeSrcOp(OPW16
, Val
);
689 MCOperand
AMDGPUDisassembler::decodeOperand_VSrcV216(unsigned Val
) const {
690 return decodeSrcOp(OPWV216
, Val
);
693 MCOperand
AMDGPUDisassembler::decodeOperand_VGPR_32(unsigned Val
) const {
694 // Some instructions have operand restrictions beyond what the encoding
695 // allows. Some ordinarily VSrc_32 operands are VGPR_32, so clear the extra
699 return createRegOperand(AMDGPU::VGPR_32RegClassID
, Val
);
702 MCOperand
AMDGPUDisassembler::decodeOperand_VRegOrLds_32(unsigned Val
) const {
703 return decodeSrcOp(OPW32
, Val
);
706 MCOperand
AMDGPUDisassembler::decodeOperand_AGPR_32(unsigned Val
) const {
707 return createRegOperand(AMDGPU::AGPR_32RegClassID
, Val
& 255);
710 MCOperand
AMDGPUDisassembler::decodeOperand_AReg_128(unsigned Val
) const {
711 return createRegOperand(AMDGPU::AReg_128RegClassID
, Val
& 255);
714 MCOperand
AMDGPUDisassembler::decodeOperand_AReg_512(unsigned Val
) const {
715 return createRegOperand(AMDGPU::AReg_512RegClassID
, Val
& 255);
718 MCOperand
AMDGPUDisassembler::decodeOperand_AReg_1024(unsigned Val
) const {
719 return createRegOperand(AMDGPU::AReg_1024RegClassID
, Val
& 255);
722 MCOperand
AMDGPUDisassembler::decodeOperand_AV_32(unsigned Val
) const {
723 return decodeSrcOp(OPW32
, Val
);
726 MCOperand
AMDGPUDisassembler::decodeOperand_AV_64(unsigned Val
) const {
727 return decodeSrcOp(OPW64
, Val
);
730 MCOperand
AMDGPUDisassembler::decodeOperand_VReg_64(unsigned Val
) const {
731 return createRegOperand(AMDGPU::VReg_64RegClassID
, Val
);
734 MCOperand
AMDGPUDisassembler::decodeOperand_VReg_96(unsigned Val
) const {
735 return createRegOperand(AMDGPU::VReg_96RegClassID
, Val
);
738 MCOperand
AMDGPUDisassembler::decodeOperand_VReg_128(unsigned Val
) const {
739 return createRegOperand(AMDGPU::VReg_128RegClassID
, Val
);
742 MCOperand
AMDGPUDisassembler::decodeOperand_VReg_256(unsigned Val
) const {
743 return createRegOperand(AMDGPU::VReg_256RegClassID
, Val
);
746 MCOperand
AMDGPUDisassembler::decodeOperand_VReg_512(unsigned Val
) const {
747 return createRegOperand(AMDGPU::VReg_512RegClassID
, Val
);
750 MCOperand
AMDGPUDisassembler::decodeOperand_SReg_32(unsigned Val
) const {
751 // table-gen generated disassembler doesn't care about operand types
752 // leaving only registry class so SSrc_32 operand turns into SReg_32
753 // and therefore we accept immediates and literals here as well
754 return decodeSrcOp(OPW32
, Val
);
757 MCOperand
AMDGPUDisassembler::decodeOperand_SReg_32_XM0_XEXEC(
758 unsigned Val
) const {
759 // SReg_32_XM0 is SReg_32 without M0 or EXEC_LO/EXEC_HI
760 return decodeOperand_SReg_32(Val
);
763 MCOperand
AMDGPUDisassembler::decodeOperand_SReg_32_XEXEC_HI(
764 unsigned Val
) const {
765 // SReg_32_XM0 is SReg_32 without EXEC_HI
766 return decodeOperand_SReg_32(Val
);
769 MCOperand
AMDGPUDisassembler::decodeOperand_SRegOrLds_32(unsigned Val
) const {
770 // table-gen generated disassembler doesn't care about operand types
771 // leaving only registry class so SSrc_32 operand turns into SReg_32
772 // and therefore we accept immediates and literals here as well
773 return decodeSrcOp(OPW32
, Val
);
776 MCOperand
AMDGPUDisassembler::decodeOperand_SReg_64(unsigned Val
) const {
777 return decodeSrcOp(OPW64
, Val
);
780 MCOperand
AMDGPUDisassembler::decodeOperand_SReg_64_XEXEC(unsigned Val
) const {
781 return decodeSrcOp(OPW64
, Val
);
784 MCOperand
AMDGPUDisassembler::decodeOperand_SReg_128(unsigned Val
) const {
785 return decodeSrcOp(OPW128
, Val
);
788 MCOperand
AMDGPUDisassembler::decodeOperand_SReg_256(unsigned Val
) const {
789 return decodeDstOp(OPW256
, Val
);
792 MCOperand
AMDGPUDisassembler::decodeOperand_SReg_512(unsigned Val
) const {
793 return decodeDstOp(OPW512
, Val
);
796 MCOperand
AMDGPUDisassembler::decodeLiteralConstant() const {
797 // For now all literal constants are supposed to be unsigned integer
798 // ToDo: deal with signed/unsigned 64-bit integer constants
799 // ToDo: deal with float/double constants
801 if (Bytes
.size() < 4) {
802 return errOperand(0, "cannot read literal, inst bytes left " +
803 Twine(Bytes
.size()));
806 Literal
= eatBytes
<uint32_t>(Bytes
);
808 return MCOperand::createImm(Literal
);
811 MCOperand
AMDGPUDisassembler::decodeIntImmed(unsigned Imm
) {
812 using namespace AMDGPU::EncValues
;
814 assert(Imm
>= INLINE_INTEGER_C_MIN
&& Imm
<= INLINE_INTEGER_C_MAX
);
815 return MCOperand::createImm((Imm
<= INLINE_INTEGER_C_POSITIVE_MAX
) ?
816 (static_cast<int64_t>(Imm
) - INLINE_INTEGER_C_MIN
) :
817 (INLINE_INTEGER_C_POSITIVE_MAX
- static_cast<int64_t>(Imm
)));
818 // Cast prevents negative overflow.
821 static int64_t getInlineImmVal32(unsigned Imm
) {
824 return FloatToBits(0.5f
);
826 return FloatToBits(-0.5f
);
828 return FloatToBits(1.0f
);
830 return FloatToBits(-1.0f
);
832 return FloatToBits(2.0f
);
834 return FloatToBits(-2.0f
);
836 return FloatToBits(4.0f
);
838 return FloatToBits(-4.0f
);
839 case 248: // 1 / (2 * PI)
842 llvm_unreachable("invalid fp inline imm");
846 static int64_t getInlineImmVal64(unsigned Imm
) {
849 return DoubleToBits(0.5);
851 return DoubleToBits(-0.5);
853 return DoubleToBits(1.0);
855 return DoubleToBits(-1.0);
857 return DoubleToBits(2.0);
859 return DoubleToBits(-2.0);
861 return DoubleToBits(4.0);
863 return DoubleToBits(-4.0);
864 case 248: // 1 / (2 * PI)
865 return 0x3fc45f306dc9c882;
867 llvm_unreachable("invalid fp inline imm");
871 static int64_t getInlineImmVal16(unsigned Imm
) {
889 case 248: // 1 / (2 * PI)
892 llvm_unreachable("invalid fp inline imm");
896 MCOperand
AMDGPUDisassembler::decodeFPImmed(OpWidthTy Width
, unsigned Imm
) {
897 assert(Imm
>= AMDGPU::EncValues::INLINE_FLOATING_C_MIN
898 && Imm
<= AMDGPU::EncValues::INLINE_FLOATING_C_MAX
);
900 // ToDo: case 248: 1/(2*PI) - is allowed only on VI
903 case OPW128
: // splat constants
906 return MCOperand::createImm(getInlineImmVal32(Imm
));
908 return MCOperand::createImm(getInlineImmVal64(Imm
));
911 return MCOperand::createImm(getInlineImmVal16(Imm
));
913 llvm_unreachable("implement me");
917 unsigned AMDGPUDisassembler::getVgprClassId(const OpWidthTy Width
) const {
918 using namespace AMDGPU
;
920 assert(OPW_FIRST_
<= Width
&& Width
< OPW_LAST_
);
926 return VGPR_32RegClassID
;
927 case OPW64
: return VReg_64RegClassID
;
928 case OPW128
: return VReg_128RegClassID
;
932 unsigned AMDGPUDisassembler::getAgprClassId(const OpWidthTy Width
) const {
933 using namespace AMDGPU
;
935 assert(OPW_FIRST_
<= Width
&& Width
< OPW_LAST_
);
941 return AGPR_32RegClassID
;
942 case OPW64
: return AReg_64RegClassID
;
943 case OPW128
: return AReg_128RegClassID
;
944 case OPW512
: return AReg_512RegClassID
;
945 case OPW1024
: return AReg_1024RegClassID
;
950 unsigned AMDGPUDisassembler::getSgprClassId(const OpWidthTy Width
) const {
951 using namespace AMDGPU
;
953 assert(OPW_FIRST_
<= Width
&& Width
< OPW_LAST_
);
959 return SGPR_32RegClassID
;
960 case OPW64
: return SGPR_64RegClassID
;
961 case OPW128
: return SGPR_128RegClassID
;
962 case OPW256
: return SGPR_256RegClassID
;
963 case OPW512
: return SGPR_512RegClassID
;
967 unsigned AMDGPUDisassembler::getTtmpClassId(const OpWidthTy Width
) const {
968 using namespace AMDGPU
;
970 assert(OPW_FIRST_
<= Width
&& Width
< OPW_LAST_
);
976 return TTMP_32RegClassID
;
977 case OPW64
: return TTMP_64RegClassID
;
978 case OPW128
: return TTMP_128RegClassID
;
979 case OPW256
: return TTMP_256RegClassID
;
980 case OPW512
: return TTMP_512RegClassID
;
984 int AMDGPUDisassembler::getTTmpIdx(unsigned Val
) const {
985 using namespace AMDGPU::EncValues
;
988 (isGFX9() || isGFX10()) ? TTMP_GFX9_GFX10_MIN
: TTMP_VI_MIN
;
990 (isGFX9() || isGFX10()) ? TTMP_GFX9_GFX10_MAX
: TTMP_VI_MAX
;
992 return (TTmpMin
<= Val
&& Val
<= TTmpMax
)? Val
- TTmpMin
: -1;
995 MCOperand
AMDGPUDisassembler::decodeSrcOp(const OpWidthTy Width
, unsigned Val
) const {
996 using namespace AMDGPU::EncValues
;
998 assert(Val
< 1024); // enum10
1000 bool IsAGPR
= Val
& 512;
1003 if (VGPR_MIN
<= Val
&& Val
<= VGPR_MAX
) {
1004 return createRegOperand(IsAGPR
? getAgprClassId(Width
)
1005 : getVgprClassId(Width
), Val
- VGPR_MIN
);
1007 if (Val
<= SGPR_MAX
) {
1008 assert(SGPR_MIN
== 0); // "SGPR_MIN <= Val" is always true and causes compilation warning.
1009 return createSRegOperand(getSgprClassId(Width
), Val
- SGPR_MIN
);
1012 int TTmpIdx
= getTTmpIdx(Val
);
1014 return createSRegOperand(getTtmpClassId(Width
), TTmpIdx
);
1017 if (INLINE_INTEGER_C_MIN
<= Val
&& Val
<= INLINE_INTEGER_C_MAX
)
1018 return decodeIntImmed(Val
);
1020 if (INLINE_FLOATING_C_MIN
<= Val
&& Val
<= INLINE_FLOATING_C_MAX
)
1021 return decodeFPImmed(Width
, Val
);
1023 if (Val
== LITERAL_CONST
)
1024 return decodeLiteralConstant();
1030 return decodeSpecialReg32(Val
);
1032 return decodeSpecialReg64(Val
);
1034 llvm_unreachable("unexpected immediate type");
1038 MCOperand
AMDGPUDisassembler::decodeDstOp(const OpWidthTy Width
, unsigned Val
) const {
1039 using namespace AMDGPU::EncValues
;
1042 assert(Width
== OPW256
|| Width
== OPW512
);
1044 if (Val
<= SGPR_MAX
) {
1045 assert(SGPR_MIN
== 0); // "SGPR_MIN <= Val" is always true and causes compilation warning.
1046 return createSRegOperand(getSgprClassId(Width
), Val
- SGPR_MIN
);
1049 int TTmpIdx
= getTTmpIdx(Val
);
1051 return createSRegOperand(getTtmpClassId(Width
), TTmpIdx
);
1054 llvm_unreachable("unknown dst register");
1057 MCOperand
AMDGPUDisassembler::decodeSpecialReg32(unsigned Val
) const {
1058 using namespace AMDGPU
;
1061 case 102: return createRegOperand(FLAT_SCR_LO
);
1062 case 103: return createRegOperand(FLAT_SCR_HI
);
1063 case 104: return createRegOperand(XNACK_MASK_LO
);
1064 case 105: return createRegOperand(XNACK_MASK_HI
);
1065 case 106: return createRegOperand(VCC_LO
);
1066 case 107: return createRegOperand(VCC_HI
);
1067 case 108: return createRegOperand(TBA_LO
);
1068 case 109: return createRegOperand(TBA_HI
);
1069 case 110: return createRegOperand(TMA_LO
);
1070 case 111: return createRegOperand(TMA_HI
);
1071 case 124: return createRegOperand(M0
);
1072 case 125: return createRegOperand(SGPR_NULL
);
1073 case 126: return createRegOperand(EXEC_LO
);
1074 case 127: return createRegOperand(EXEC_HI
);
1075 case 235: return createRegOperand(SRC_SHARED_BASE
);
1076 case 236: return createRegOperand(SRC_SHARED_LIMIT
);
1077 case 237: return createRegOperand(SRC_PRIVATE_BASE
);
1078 case 238: return createRegOperand(SRC_PRIVATE_LIMIT
);
1079 case 239: return createRegOperand(SRC_POPS_EXITING_WAVE_ID
);
1080 case 251: return createRegOperand(SRC_VCCZ
);
1081 case 252: return createRegOperand(SRC_EXECZ
);
1082 case 253: return createRegOperand(SRC_SCC
);
1083 case 254: return createRegOperand(LDS_DIRECT
);
1086 return errOperand(Val
, "unknown operand encoding " + Twine(Val
));
1089 MCOperand
AMDGPUDisassembler::decodeSpecialReg64(unsigned Val
) const {
1090 using namespace AMDGPU
;
1093 case 102: return createRegOperand(FLAT_SCR
);
1094 case 104: return createRegOperand(XNACK_MASK
);
1095 case 106: return createRegOperand(VCC
);
1096 case 108: return createRegOperand(TBA
);
1097 case 110: return createRegOperand(TMA
);
1098 case 126: return createRegOperand(EXEC
);
1099 case 235: return createRegOperand(SRC_SHARED_BASE
);
1100 case 236: return createRegOperand(SRC_SHARED_LIMIT
);
1101 case 237: return createRegOperand(SRC_PRIVATE_BASE
);
1102 case 238: return createRegOperand(SRC_PRIVATE_LIMIT
);
1103 case 239: return createRegOperand(SRC_POPS_EXITING_WAVE_ID
);
1104 case 251: return createRegOperand(SRC_VCCZ
);
1105 case 252: return createRegOperand(SRC_EXECZ
);
1106 case 253: return createRegOperand(SRC_SCC
);
1109 return errOperand(Val
, "unknown operand encoding " + Twine(Val
));
1112 MCOperand
AMDGPUDisassembler::decodeSDWASrc(const OpWidthTy Width
,
1113 const unsigned Val
) const {
1114 using namespace AMDGPU::SDWA
;
1115 using namespace AMDGPU::EncValues
;
1117 if (STI
.getFeatureBits()[AMDGPU::FeatureGFX9
] ||
1118 STI
.getFeatureBits()[AMDGPU::FeatureGFX10
]) {
1119 // XXX: cast to int is needed to avoid stupid warning:
1120 // compare with unsigned is always true
1121 if (int(SDWA9EncValues::SRC_VGPR_MIN
) <= int(Val
) &&
1122 Val
<= SDWA9EncValues::SRC_VGPR_MAX
) {
1123 return createRegOperand(getVgprClassId(Width
),
1124 Val
- SDWA9EncValues::SRC_VGPR_MIN
);
1126 if (SDWA9EncValues::SRC_SGPR_MIN
<= Val
&&
1127 Val
<= (isGFX10() ? SDWA9EncValues::SRC_SGPR_MAX_GFX10
1128 : SDWA9EncValues::SRC_SGPR_MAX_SI
)) {
1129 return createSRegOperand(getSgprClassId(Width
),
1130 Val
- SDWA9EncValues::SRC_SGPR_MIN
);
1132 if (SDWA9EncValues::SRC_TTMP_MIN
<= Val
&&
1133 Val
<= SDWA9EncValues::SRC_TTMP_MAX
) {
1134 return createSRegOperand(getTtmpClassId(Width
),
1135 Val
- SDWA9EncValues::SRC_TTMP_MIN
);
1138 const unsigned SVal
= Val
- SDWA9EncValues::SRC_SGPR_MIN
;
1140 if (INLINE_INTEGER_C_MIN
<= SVal
&& SVal
<= INLINE_INTEGER_C_MAX
)
1141 return decodeIntImmed(SVal
);
1143 if (INLINE_FLOATING_C_MIN
<= SVal
&& SVal
<= INLINE_FLOATING_C_MAX
)
1144 return decodeFPImmed(Width
, SVal
);
1146 return decodeSpecialReg32(SVal
);
1147 } else if (STI
.getFeatureBits()[AMDGPU::FeatureVolcanicIslands
]) {
1148 return createRegOperand(getVgprClassId(Width
), Val
);
1150 llvm_unreachable("unsupported target");
1153 MCOperand
AMDGPUDisassembler::decodeSDWASrc16(unsigned Val
) const {
1154 return decodeSDWASrc(OPW16
, Val
);
1157 MCOperand
AMDGPUDisassembler::decodeSDWASrc32(unsigned Val
) const {
1158 return decodeSDWASrc(OPW32
, Val
);
1161 MCOperand
AMDGPUDisassembler::decodeSDWAVopcDst(unsigned Val
) const {
1162 using namespace AMDGPU::SDWA
;
1164 assert((STI
.getFeatureBits()[AMDGPU::FeatureGFX9
] ||
1165 STI
.getFeatureBits()[AMDGPU::FeatureGFX10
]) &&
1166 "SDWAVopcDst should be present only on GFX9+");
1168 bool IsWave64
= STI
.getFeatureBits()[AMDGPU::FeatureWavefrontSize64
];
1170 if (Val
& SDWA9EncValues::VOPC_DST_VCC_MASK
) {
1171 Val
&= SDWA9EncValues::VOPC_DST_SGPR_MASK
;
1173 int TTmpIdx
= getTTmpIdx(Val
);
1175 return createSRegOperand(getTtmpClassId(OPW64
), TTmpIdx
);
1176 } else if (Val
> SGPR_MAX
) {
1177 return IsWave64
? decodeSpecialReg64(Val
)
1178 : decodeSpecialReg32(Val
);
1180 return createSRegOperand(getSgprClassId(IsWave64
? OPW64
: OPW32
), Val
);
1183 return createRegOperand(IsWave64
? AMDGPU::VCC
: AMDGPU::VCC_LO
);
1187 MCOperand
AMDGPUDisassembler::decodeBoolReg(unsigned Val
) const {
1188 return STI
.getFeatureBits()[AMDGPU::FeatureWavefrontSize64
] ?
1189 decodeOperand_SReg_64(Val
) : decodeOperand_SReg_32(Val
);
1192 bool AMDGPUDisassembler::isVI() const {
1193 return STI
.getFeatureBits()[AMDGPU::FeatureVolcanicIslands
];
1196 bool AMDGPUDisassembler::isGFX9() const {
1197 return STI
.getFeatureBits()[AMDGPU::FeatureGFX9
];
1200 bool AMDGPUDisassembler::isGFX10() const {
1201 return STI
.getFeatureBits()[AMDGPU::FeatureGFX10
];
1204 //===----------------------------------------------------------------------===//
1206 //===----------------------------------------------------------------------===//
1208 // Try to find symbol name for specified label
1209 bool AMDGPUSymbolizer::tryAddingSymbolicOperand(MCInst
&Inst
,
1210 raw_ostream
&/*cStream*/, int64_t Value
,
1211 uint64_t /*Address*/, bool IsBranch
,
1212 uint64_t /*Offset*/, uint64_t /*InstSize*/) {
1213 using SymbolInfoTy
= std::tuple
<uint64_t, StringRef
, uint8_t>;
1214 using SectionSymbolsTy
= std::vector
<SymbolInfoTy
>;
1220 auto *Symbols
= static_cast<SectionSymbolsTy
*>(DisInfo
);
1224 auto Result
= std::find_if(Symbols
->begin(), Symbols
->end(),
1225 [Value
](const SymbolInfoTy
& Val
) {
1226 return std::get
<0>(Val
) == static_cast<uint64_t>(Value
)
1227 && std::get
<2>(Val
) == ELF::STT_NOTYPE
;
1229 if (Result
!= Symbols
->end()) {
1230 auto *Sym
= Ctx
.getOrCreateSymbol(std::get
<1>(*Result
));
1231 const auto *Add
= MCSymbolRefExpr::create(Sym
, Ctx
);
1232 Inst
.addOperand(MCOperand::createExpr(Add
));
1238 void AMDGPUSymbolizer::tryAddingPcLoadReferenceComment(raw_ostream
&cStream
,
1241 llvm_unreachable("unimplemented");
1244 //===----------------------------------------------------------------------===//
1246 //===----------------------------------------------------------------------===//
1248 static MCSymbolizer
*createAMDGPUSymbolizer(const Triple
&/*TT*/,
1249 LLVMOpInfoCallback
/*GetOpInfo*/,
1250 LLVMSymbolLookupCallback
/*SymbolLookUp*/,
1253 std::unique_ptr
<MCRelocationInfo
> &&RelInfo
) {
1254 return new AMDGPUSymbolizer(*Ctx
, std::move(RelInfo
), DisInfo
);
1257 static MCDisassembler
*createAMDGPUDisassembler(const Target
&T
,
1258 const MCSubtargetInfo
&STI
,
1260 return new AMDGPUDisassembler(STI
, Ctx
, T
.createMCInstrInfo());
1263 extern "C" void LLVMInitializeAMDGPUDisassembler() {
1264 TargetRegistry::RegisterMCDisassembler(getTheGCNTarget(),
1265 createAMDGPUDisassembler
);
1266 TargetRegistry::RegisterMCSymbolizer(getTheGCNTarget(),
1267 createAMDGPUSymbolizer
);