1 //==-- AArch64InstPrinter.cpp - Convert AArch64 MCInst to assembly syntax --==//
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
7 //===----------------------------------------------------------------------===//
9 // This class prints an AArch64 MCInst to a .s file.
11 //===----------------------------------------------------------------------===//
13 #include "AArch64InstPrinter.h"
14 #include "MCTargetDesc/AArch64AddressingModes.h"
15 #include "Utils/AArch64BaseInfo.h"
16 #include "llvm/ADT/StringExtras.h"
17 #include "llvm/ADT/StringRef.h"
18 #include "llvm/MC/MCAsmInfo.h"
19 #include "llvm/MC/MCExpr.h"
20 #include "llvm/MC/MCInst.h"
21 #include "llvm/MC/MCRegisterInfo.h"
22 #include "llvm/MC/MCSubtargetInfo.h"
23 #include "llvm/Support/Casting.h"
24 #include "llvm/Support/ErrorHandling.h"
25 #include "llvm/Support/Format.h"
26 #include "llvm/Support/MathExtras.h"
27 #include "llvm/Support/raw_ostream.h"
34 #define DEBUG_TYPE "asm-printer"
36 #define GET_INSTRUCTION_NAME
37 #define PRINT_ALIAS_INSTR
38 #include "AArch64GenAsmWriter.inc"
39 #define GET_INSTRUCTION_NAME
40 #define PRINT_ALIAS_INSTR
41 #include "AArch64GenAsmWriter1.inc"
43 AArch64InstPrinter::AArch64InstPrinter(const MCAsmInfo
&MAI
,
44 const MCInstrInfo
&MII
,
45 const MCRegisterInfo
&MRI
)
46 : MCInstPrinter(MAI
, MII
, MRI
) {}
48 AArch64AppleInstPrinter::AArch64AppleInstPrinter(const MCAsmInfo
&MAI
,
49 const MCInstrInfo
&MII
,
50 const MCRegisterInfo
&MRI
)
51 : AArch64InstPrinter(MAI
, MII
, MRI
) {}
53 bool AArch64InstPrinter::applyTargetSpecificCLOption(StringRef Opt
) {
54 if (Opt
== "no-aliases") {
61 void AArch64InstPrinter::printRegName(raw_ostream
&OS
, MCRegister Reg
) {
62 markup(OS
, Markup::Register
) << getRegisterName(Reg
);
65 void AArch64InstPrinter::printRegName(raw_ostream
&OS
, MCRegister Reg
,
67 markup(OS
, Markup::Register
) << getRegisterName(Reg
, AltIdx
);
70 StringRef
AArch64InstPrinter::getRegName(MCRegister Reg
) const {
71 return getRegisterName(Reg
);
74 void AArch64InstPrinter::printInst(const MCInst
*MI
, uint64_t Address
,
75 StringRef Annot
, const MCSubtargetInfo
&STI
,
77 // Check for special encodings and print the canonical alias instead.
79 unsigned Opcode
= MI
->getOpcode();
81 if (Opcode
== AArch64::SYSxt
)
82 if (printSysAlias(MI
, STI
, O
)) {
83 printAnnotation(O
, Annot
);
87 if (Opcode
== AArch64::SYSPxt
|| Opcode
== AArch64::SYSPxt_XZR
)
88 if (printSyspAlias(MI
, STI
, O
)) {
89 printAnnotation(O
, Annot
);
93 // RPRFM overlaps PRFM (reg), so try to print it as RPRFM here.
94 if ((Opcode
== AArch64::PRFMroX
) || (Opcode
== AArch64::PRFMroW
)) {
95 if (printRangePrefetchAlias(MI
, STI
, O
, Annot
))
99 // SBFM/UBFM should print to a nicer aliased form if possible.
100 if (Opcode
== AArch64::SBFMXri
|| Opcode
== AArch64::SBFMWri
||
101 Opcode
== AArch64::UBFMXri
|| Opcode
== AArch64::UBFMWri
) {
102 const MCOperand
&Op0
= MI
->getOperand(0);
103 const MCOperand
&Op1
= MI
->getOperand(1);
104 const MCOperand
&Op2
= MI
->getOperand(2);
105 const MCOperand
&Op3
= MI
->getOperand(3);
107 bool IsSigned
= (Opcode
== AArch64::SBFMXri
|| Opcode
== AArch64::SBFMWri
);
108 bool Is64Bit
= (Opcode
== AArch64::SBFMXri
|| Opcode
== AArch64::UBFMXri
);
109 if (Op2
.isImm() && Op2
.getImm() == 0 && Op3
.isImm()) {
110 const char *AsmMnemonic
= nullptr;
112 switch (Op3
.getImm()) {
117 AsmMnemonic
= "sxtb";
119 AsmMnemonic
= "uxtb";
123 AsmMnemonic
= "sxth";
125 AsmMnemonic
= "uxth";
128 // *xtw is only valid for signed 64-bit operations.
129 if (Is64Bit
&& IsSigned
)
130 AsmMnemonic
= "sxtw";
135 O
<< '\t' << AsmMnemonic
<< '\t';
136 printRegName(O
, Op0
.getReg());
138 printRegName(O
, getWRegFromXReg(Op1
.getReg()));
139 printAnnotation(O
, Annot
);
144 // All immediate shifts are aliases, implemented using the Bitfield
145 // instruction. In all cases the immediate shift amount shift must be in
146 // the range 0 to (reg.size -1).
147 if (Op2
.isImm() && Op3
.isImm()) {
148 const char *AsmMnemonic
= nullptr;
150 int64_t immr
= Op2
.getImm();
151 int64_t imms
= Op3
.getImm();
152 if (Opcode
== AArch64::UBFMWri
&& imms
!= 0x1F && ((imms
+ 1) == immr
)) {
155 } else if (Opcode
== AArch64::UBFMXri
&& imms
!= 0x3f &&
156 ((imms
+ 1 == immr
))) {
159 } else if (Opcode
== AArch64::UBFMWri
&& imms
== 0x1f) {
162 } else if (Opcode
== AArch64::UBFMXri
&& imms
== 0x3f) {
165 } else if (Opcode
== AArch64::SBFMWri
&& imms
== 0x1f) {
168 } else if (Opcode
== AArch64::SBFMXri
&& imms
== 0x3f) {
173 O
<< '\t' << AsmMnemonic
<< '\t';
174 printRegName(O
, Op0
.getReg());
176 printRegName(O
, Op1
.getReg());
178 markup(O
, Markup::Immediate
) << "#" << shift
;
179 printAnnotation(O
, Annot
);
184 // SBFIZ/UBFIZ aliases
185 if (Op2
.getImm() > Op3
.getImm()) {
186 O
<< '\t' << (IsSigned
? "sbfiz" : "ubfiz") << '\t';
187 printRegName(O
, Op0
.getReg());
189 printRegName(O
, Op1
.getReg());
191 markup(O
, Markup::Immediate
) << "#" << (Is64Bit
? 64 : 32) - Op2
.getImm();
193 markup(O
, Markup::Immediate
) << "#" << Op3
.getImm() + 1;
194 printAnnotation(O
, Annot
);
198 // Otherwise SBFX/UBFX is the preferred form
199 O
<< '\t' << (IsSigned
? "sbfx" : "ubfx") << '\t';
200 printRegName(O
, Op0
.getReg());
202 printRegName(O
, Op1
.getReg());
204 markup(O
, Markup::Immediate
) << "#" << Op2
.getImm();
206 markup(O
, Markup::Immediate
) << "#" << Op3
.getImm() - Op2
.getImm() + 1;
207 printAnnotation(O
, Annot
);
211 if (Opcode
== AArch64::BFMXri
|| Opcode
== AArch64::BFMWri
) {
212 const MCOperand
&Op0
= MI
->getOperand(0); // Op1 == Op0
213 const MCOperand
&Op2
= MI
->getOperand(2);
214 int ImmR
= MI
->getOperand(3).getImm();
215 int ImmS
= MI
->getOperand(4).getImm();
217 if ((Op2
.getReg() == AArch64::WZR
|| Op2
.getReg() == AArch64::XZR
) &&
218 (ImmR
== 0 || ImmS
< ImmR
) && STI
.hasFeature(AArch64::HasV8_2aOps
)) {
219 // BFC takes precedence over its entire range, sligtly differently to BFI.
220 int BitWidth
= Opcode
== AArch64::BFMXri
? 64 : 32;
221 int LSB
= (BitWidth
- ImmR
) % BitWidth
;
222 int Width
= ImmS
+ 1;
225 printRegName(O
, Op0
.getReg());
227 markup(O
, Markup::Immediate
) << "#" << LSB
;
229 markup(O
, Markup::Immediate
) << "#" << Width
;
230 printAnnotation(O
, Annot
);
232 } else if (ImmS
< ImmR
) {
234 int BitWidth
= Opcode
== AArch64::BFMXri
? 64 : 32;
235 int LSB
= (BitWidth
- ImmR
) % BitWidth
;
236 int Width
= ImmS
+ 1;
239 printRegName(O
, Op0
.getReg());
241 printRegName(O
, Op2
.getReg());
243 markup(O
, Markup::Immediate
) << "#" << LSB
;
245 markup(O
, Markup::Immediate
) << "#" << Width
;
246 printAnnotation(O
, Annot
);
251 int Width
= ImmS
- ImmR
+ 1;
252 // Otherwise BFXIL the preferred form
254 printRegName(O
, Op0
.getReg());
256 printRegName(O
, Op2
.getReg());
258 markup(O
, Markup::Immediate
) << "#" << LSB
;
260 markup(O
, Markup::Immediate
) << "#" << Width
;
261 printAnnotation(O
, Annot
);
265 // Symbolic operands for MOVZ, MOVN and MOVK already imply a shift
266 // (e.g. :gottprel_g1: is always going to be "lsl #16") so it should not be
268 if ((Opcode
== AArch64::MOVZXi
|| Opcode
== AArch64::MOVZWi
||
269 Opcode
== AArch64::MOVNXi
|| Opcode
== AArch64::MOVNWi
) &&
270 MI
->getOperand(1).isExpr()) {
271 if (Opcode
== AArch64::MOVZXi
|| Opcode
== AArch64::MOVZWi
)
276 printRegName(O
, MI
->getOperand(0).getReg());
279 WithMarkup M
= markup(O
, Markup::Immediate
);
281 MI
->getOperand(1).getExpr()->print(O
, &MAI
);
286 if ((Opcode
== AArch64::MOVKXi
|| Opcode
== AArch64::MOVKWi
) &&
287 MI
->getOperand(2).isExpr()) {
289 printRegName(O
, MI
->getOperand(0).getReg());
292 WithMarkup M
= markup(O
, Markup::Immediate
);
294 MI
->getOperand(2).getExpr()->print(O
, &MAI
);
299 auto PrintMovImm
= [&](uint64_t Value
, int RegWidth
) {
300 int64_t SExtVal
= SignExtend64(Value
, RegWidth
);
302 printRegName(O
, MI
->getOperand(0).getReg());
304 markup(O
, Markup::Immediate
) << "#" << formatImm(SExtVal
);
306 // Do the opposite to that used for instruction operands.
307 if (getPrintImmHex())
308 *CommentStream
<< '=' << formatDec(SExtVal
) << '\n';
310 uint64_t Mask
= maskTrailingOnes
<uint64_t>(RegWidth
);
311 *CommentStream
<< '=' << formatHex(SExtVal
& Mask
) << '\n';
316 // MOVZ, MOVN and "ORR wzr, #imm" instructions are aliases for MOV, but their
317 // domains overlap so they need to be prioritized. The chain is "MOVZ lsl #0 >
318 // MOVZ lsl #N > MOVN lsl #0 > MOVN lsl #N > ORR". The highest instruction
319 // that can represent the move is the MOV alias, and the rest get printed
321 if ((Opcode
== AArch64::MOVZXi
|| Opcode
== AArch64::MOVZWi
) &&
322 MI
->getOperand(1).isImm() && MI
->getOperand(2).isImm()) {
323 int RegWidth
= Opcode
== AArch64::MOVZXi
? 64 : 32;
324 int Shift
= MI
->getOperand(2).getImm();
325 uint64_t Value
= (uint64_t)MI
->getOperand(1).getImm() << Shift
;
327 if (AArch64_AM::isMOVZMovAlias(Value
, Shift
,
328 Opcode
== AArch64::MOVZXi
? 64 : 32)) {
329 PrintMovImm(Value
, RegWidth
);
334 if ((Opcode
== AArch64::MOVNXi
|| Opcode
== AArch64::MOVNWi
) &&
335 MI
->getOperand(1).isImm() && MI
->getOperand(2).isImm()) {
336 int RegWidth
= Opcode
== AArch64::MOVNXi
? 64 : 32;
337 int Shift
= MI
->getOperand(2).getImm();
338 uint64_t Value
= ~((uint64_t)MI
->getOperand(1).getImm() << Shift
);
340 Value
= Value
& 0xffffffff;
342 if (AArch64_AM::isMOVNMovAlias(Value
, Shift
, RegWidth
)) {
343 PrintMovImm(Value
, RegWidth
);
348 if ((Opcode
== AArch64::ORRXri
|| Opcode
== AArch64::ORRWri
) &&
349 (MI
->getOperand(1).getReg() == AArch64::XZR
||
350 MI
->getOperand(1).getReg() == AArch64::WZR
) &&
351 MI
->getOperand(2).isImm()) {
352 int RegWidth
= Opcode
== AArch64::ORRXri
? 64 : 32;
353 uint64_t Value
= AArch64_AM::decodeLogicalImmediate(
354 MI
->getOperand(2).getImm(), RegWidth
);
355 if (!AArch64_AM::isAnyMOVWMovAlias(Value
, RegWidth
)) {
356 PrintMovImm(Value
, RegWidth
);
361 if (Opcode
== AArch64::SPACE
) {
362 O
<< '\t' << MAI
.getCommentString() << " SPACE "
363 << MI
->getOperand(1).getImm();
364 printAnnotation(O
, Annot
);
368 // Instruction TSB is specified as a one operand instruction, but 'csync' is
369 // not encoded, so for printing it is treated as a special case here:
370 if (Opcode
== AArch64::TSB
) {
375 if (!PrintAliases
|| !printAliasInstr(MI
, Address
, STI
, O
))
376 printInstruction(MI
, Address
, STI
, O
);
378 printAnnotation(O
, Annot
);
380 if (atomicBarrierDroppedOnZero(Opcode
) &&
381 (MI
->getOperand(0).getReg() == AArch64::XZR
||
382 MI
->getOperand(0).getReg() == AArch64::WZR
)) {
383 printAnnotation(O
, "acquire semantics dropped since destination is zero");
387 static bool isTblTbxInstruction(unsigned Opcode
, StringRef
&Layout
,
390 case AArch64::TBXv8i8One
:
391 case AArch64::TBXv8i8Two
:
392 case AArch64::TBXv8i8Three
:
393 case AArch64::TBXv8i8Four
:
397 case AArch64::TBLv8i8One
:
398 case AArch64::TBLv8i8Two
:
399 case AArch64::TBLv8i8Three
:
400 case AArch64::TBLv8i8Four
:
404 case AArch64::TBXv16i8One
:
405 case AArch64::TBXv16i8Two
:
406 case AArch64::TBXv16i8Three
:
407 case AArch64::TBXv16i8Four
:
411 case AArch64::TBLv16i8One
:
412 case AArch64::TBLv16i8Two
:
413 case AArch64::TBLv16i8Three
:
414 case AArch64::TBLv16i8Four
:
423 struct LdStNInstrDesc
{
425 const char *Mnemonic
;
432 static const LdStNInstrDesc LdStNInstInfo
[] = {
433 { AArch64::LD1i8
, "ld1", ".b", 1, true, 0 },
434 { AArch64::LD1i16
, "ld1", ".h", 1, true, 0 },
435 { AArch64::LD1i32
, "ld1", ".s", 1, true, 0 },
436 { AArch64::LD1i64
, "ld1", ".d", 1, true, 0 },
437 { AArch64::LD1i8_POST
, "ld1", ".b", 2, true, 1 },
438 { AArch64::LD1i16_POST
, "ld1", ".h", 2, true, 2 },
439 { AArch64::LD1i32_POST
, "ld1", ".s", 2, true, 4 },
440 { AArch64::LD1i64_POST
, "ld1", ".d", 2, true, 8 },
441 { AArch64::LD1Rv16b
, "ld1r", ".16b", 0, false, 0 },
442 { AArch64::LD1Rv8h
, "ld1r", ".8h", 0, false, 0 },
443 { AArch64::LD1Rv4s
, "ld1r", ".4s", 0, false, 0 },
444 { AArch64::LD1Rv2d
, "ld1r", ".2d", 0, false, 0 },
445 { AArch64::LD1Rv8b
, "ld1r", ".8b", 0, false, 0 },
446 { AArch64::LD1Rv4h
, "ld1r", ".4h", 0, false, 0 },
447 { AArch64::LD1Rv2s
, "ld1r", ".2s", 0, false, 0 },
448 { AArch64::LD1Rv1d
, "ld1r", ".1d", 0, false, 0 },
449 { AArch64::LD1Rv16b_POST
, "ld1r", ".16b", 1, false, 1 },
450 { AArch64::LD1Rv8h_POST
, "ld1r", ".8h", 1, false, 2 },
451 { AArch64::LD1Rv4s_POST
, "ld1r", ".4s", 1, false, 4 },
452 { AArch64::LD1Rv2d_POST
, "ld1r", ".2d", 1, false, 8 },
453 { AArch64::LD1Rv8b_POST
, "ld1r", ".8b", 1, false, 1 },
454 { AArch64::LD1Rv4h_POST
, "ld1r", ".4h", 1, false, 2 },
455 { AArch64::LD1Rv2s_POST
, "ld1r", ".2s", 1, false, 4 },
456 { AArch64::LD1Rv1d_POST
, "ld1r", ".1d", 1, false, 8 },
457 { AArch64::LD1Onev16b
, "ld1", ".16b", 0, false, 0 },
458 { AArch64::LD1Onev8h
, "ld1", ".8h", 0, false, 0 },
459 { AArch64::LD1Onev4s
, "ld1", ".4s", 0, false, 0 },
460 { AArch64::LD1Onev2d
, "ld1", ".2d", 0, false, 0 },
461 { AArch64::LD1Onev8b
, "ld1", ".8b", 0, false, 0 },
462 { AArch64::LD1Onev4h
, "ld1", ".4h", 0, false, 0 },
463 { AArch64::LD1Onev2s
, "ld1", ".2s", 0, false, 0 },
464 { AArch64::LD1Onev1d
, "ld1", ".1d", 0, false, 0 },
465 { AArch64::LD1Onev16b_POST
, "ld1", ".16b", 1, false, 16 },
466 { AArch64::LD1Onev8h_POST
, "ld1", ".8h", 1, false, 16 },
467 { AArch64::LD1Onev4s_POST
, "ld1", ".4s", 1, false, 16 },
468 { AArch64::LD1Onev2d_POST
, "ld1", ".2d", 1, false, 16 },
469 { AArch64::LD1Onev8b_POST
, "ld1", ".8b", 1, false, 8 },
470 { AArch64::LD1Onev4h_POST
, "ld1", ".4h", 1, false, 8 },
471 { AArch64::LD1Onev2s_POST
, "ld1", ".2s", 1, false, 8 },
472 { AArch64::LD1Onev1d_POST
, "ld1", ".1d", 1, false, 8 },
473 { AArch64::LD1Twov16b
, "ld1", ".16b", 0, false, 0 },
474 { AArch64::LD1Twov8h
, "ld1", ".8h", 0, false, 0 },
475 { AArch64::LD1Twov4s
, "ld1", ".4s", 0, false, 0 },
476 { AArch64::LD1Twov2d
, "ld1", ".2d", 0, false, 0 },
477 { AArch64::LD1Twov8b
, "ld1", ".8b", 0, false, 0 },
478 { AArch64::LD1Twov4h
, "ld1", ".4h", 0, false, 0 },
479 { AArch64::LD1Twov2s
, "ld1", ".2s", 0, false, 0 },
480 { AArch64::LD1Twov1d
, "ld1", ".1d", 0, false, 0 },
481 { AArch64::LD1Twov16b_POST
, "ld1", ".16b", 1, false, 32 },
482 { AArch64::LD1Twov8h_POST
, "ld1", ".8h", 1, false, 32 },
483 { AArch64::LD1Twov4s_POST
, "ld1", ".4s", 1, false, 32 },
484 { AArch64::LD1Twov2d_POST
, "ld1", ".2d", 1, false, 32 },
485 { AArch64::LD1Twov8b_POST
, "ld1", ".8b", 1, false, 16 },
486 { AArch64::LD1Twov4h_POST
, "ld1", ".4h", 1, false, 16 },
487 { AArch64::LD1Twov2s_POST
, "ld1", ".2s", 1, false, 16 },
488 { AArch64::LD1Twov1d_POST
, "ld1", ".1d", 1, false, 16 },
489 { AArch64::LD1Threev16b
, "ld1", ".16b", 0, false, 0 },
490 { AArch64::LD1Threev8h
, "ld1", ".8h", 0, false, 0 },
491 { AArch64::LD1Threev4s
, "ld1", ".4s", 0, false, 0 },
492 { AArch64::LD1Threev2d
, "ld1", ".2d", 0, false, 0 },
493 { AArch64::LD1Threev8b
, "ld1", ".8b", 0, false, 0 },
494 { AArch64::LD1Threev4h
, "ld1", ".4h", 0, false, 0 },
495 { AArch64::LD1Threev2s
, "ld1", ".2s", 0, false, 0 },
496 { AArch64::LD1Threev1d
, "ld1", ".1d", 0, false, 0 },
497 { AArch64::LD1Threev16b_POST
, "ld1", ".16b", 1, false, 48 },
498 { AArch64::LD1Threev8h_POST
, "ld1", ".8h", 1, false, 48 },
499 { AArch64::LD1Threev4s_POST
, "ld1", ".4s", 1, false, 48 },
500 { AArch64::LD1Threev2d_POST
, "ld1", ".2d", 1, false, 48 },
501 { AArch64::LD1Threev8b_POST
, "ld1", ".8b", 1, false, 24 },
502 { AArch64::LD1Threev4h_POST
, "ld1", ".4h", 1, false, 24 },
503 { AArch64::LD1Threev2s_POST
, "ld1", ".2s", 1, false, 24 },
504 { AArch64::LD1Threev1d_POST
, "ld1", ".1d", 1, false, 24 },
505 { AArch64::LD1Fourv16b
, "ld1", ".16b", 0, false, 0 },
506 { AArch64::LD1Fourv8h
, "ld1", ".8h", 0, false, 0 },
507 { AArch64::LD1Fourv4s
, "ld1", ".4s", 0, false, 0 },
508 { AArch64::LD1Fourv2d
, "ld1", ".2d", 0, false, 0 },
509 { AArch64::LD1Fourv8b
, "ld1", ".8b", 0, false, 0 },
510 { AArch64::LD1Fourv4h
, "ld1", ".4h", 0, false, 0 },
511 { AArch64::LD1Fourv2s
, "ld1", ".2s", 0, false, 0 },
512 { AArch64::LD1Fourv1d
, "ld1", ".1d", 0, false, 0 },
513 { AArch64::LD1Fourv16b_POST
, "ld1", ".16b", 1, false, 64 },
514 { AArch64::LD1Fourv8h_POST
, "ld1", ".8h", 1, false, 64 },
515 { AArch64::LD1Fourv4s_POST
, "ld1", ".4s", 1, false, 64 },
516 { AArch64::LD1Fourv2d_POST
, "ld1", ".2d", 1, false, 64 },
517 { AArch64::LD1Fourv8b_POST
, "ld1", ".8b", 1, false, 32 },
518 { AArch64::LD1Fourv4h_POST
, "ld1", ".4h", 1, false, 32 },
519 { AArch64::LD1Fourv2s_POST
, "ld1", ".2s", 1, false, 32 },
520 { AArch64::LD1Fourv1d_POST
, "ld1", ".1d", 1, false, 32 },
521 { AArch64::LD2i8
, "ld2", ".b", 1, true, 0 },
522 { AArch64::LD2i16
, "ld2", ".h", 1, true, 0 },
523 { AArch64::LD2i32
, "ld2", ".s", 1, true, 0 },
524 { AArch64::LD2i64
, "ld2", ".d", 1, true, 0 },
525 { AArch64::LD2i8_POST
, "ld2", ".b", 2, true, 2 },
526 { AArch64::LD2i16_POST
, "ld2", ".h", 2, true, 4 },
527 { AArch64::LD2i32_POST
, "ld2", ".s", 2, true, 8 },
528 { AArch64::LD2i64_POST
, "ld2", ".d", 2, true, 16 },
529 { AArch64::LD2Rv16b
, "ld2r", ".16b", 0, false, 0 },
530 { AArch64::LD2Rv8h
, "ld2r", ".8h", 0, false, 0 },
531 { AArch64::LD2Rv4s
, "ld2r", ".4s", 0, false, 0 },
532 { AArch64::LD2Rv2d
, "ld2r", ".2d", 0, false, 0 },
533 { AArch64::LD2Rv8b
, "ld2r", ".8b", 0, false, 0 },
534 { AArch64::LD2Rv4h
, "ld2r", ".4h", 0, false, 0 },
535 { AArch64::LD2Rv2s
, "ld2r", ".2s", 0, false, 0 },
536 { AArch64::LD2Rv1d
, "ld2r", ".1d", 0, false, 0 },
537 { AArch64::LD2Rv16b_POST
, "ld2r", ".16b", 1, false, 2 },
538 { AArch64::LD2Rv8h_POST
, "ld2r", ".8h", 1, false, 4 },
539 { AArch64::LD2Rv4s_POST
, "ld2r", ".4s", 1, false, 8 },
540 { AArch64::LD2Rv2d_POST
, "ld2r", ".2d", 1, false, 16 },
541 { AArch64::LD2Rv8b_POST
, "ld2r", ".8b", 1, false, 2 },
542 { AArch64::LD2Rv4h_POST
, "ld2r", ".4h", 1, false, 4 },
543 { AArch64::LD2Rv2s_POST
, "ld2r", ".2s", 1, false, 8 },
544 { AArch64::LD2Rv1d_POST
, "ld2r", ".1d", 1, false, 16 },
545 { AArch64::LD2Twov16b
, "ld2", ".16b", 0, false, 0 },
546 { AArch64::LD2Twov8h
, "ld2", ".8h", 0, false, 0 },
547 { AArch64::LD2Twov4s
, "ld2", ".4s", 0, false, 0 },
548 { AArch64::LD2Twov2d
, "ld2", ".2d", 0, false, 0 },
549 { AArch64::LD2Twov8b
, "ld2", ".8b", 0, false, 0 },
550 { AArch64::LD2Twov4h
, "ld2", ".4h", 0, false, 0 },
551 { AArch64::LD2Twov2s
, "ld2", ".2s", 0, false, 0 },
552 { AArch64::LD2Twov16b_POST
, "ld2", ".16b", 1, false, 32 },
553 { AArch64::LD2Twov8h_POST
, "ld2", ".8h", 1, false, 32 },
554 { AArch64::LD2Twov4s_POST
, "ld2", ".4s", 1, false, 32 },
555 { AArch64::LD2Twov2d_POST
, "ld2", ".2d", 1, false, 32 },
556 { AArch64::LD2Twov8b_POST
, "ld2", ".8b", 1, false, 16 },
557 { AArch64::LD2Twov4h_POST
, "ld2", ".4h", 1, false, 16 },
558 { AArch64::LD2Twov2s_POST
, "ld2", ".2s", 1, false, 16 },
559 { AArch64::LD3i8
, "ld3", ".b", 1, true, 0 },
560 { AArch64::LD3i16
, "ld3", ".h", 1, true, 0 },
561 { AArch64::LD3i32
, "ld3", ".s", 1, true, 0 },
562 { AArch64::LD3i64
, "ld3", ".d", 1, true, 0 },
563 { AArch64::LD3i8_POST
, "ld3", ".b", 2, true, 3 },
564 { AArch64::LD3i16_POST
, "ld3", ".h", 2, true, 6 },
565 { AArch64::LD3i32_POST
, "ld3", ".s", 2, true, 12 },
566 { AArch64::LD3i64_POST
, "ld3", ".d", 2, true, 24 },
567 { AArch64::LD3Rv16b
, "ld3r", ".16b", 0, false, 0 },
568 { AArch64::LD3Rv8h
, "ld3r", ".8h", 0, false, 0 },
569 { AArch64::LD3Rv4s
, "ld3r", ".4s", 0, false, 0 },
570 { AArch64::LD3Rv2d
, "ld3r", ".2d", 0, false, 0 },
571 { AArch64::LD3Rv8b
, "ld3r", ".8b", 0, false, 0 },
572 { AArch64::LD3Rv4h
, "ld3r", ".4h", 0, false, 0 },
573 { AArch64::LD3Rv2s
, "ld3r", ".2s", 0, false, 0 },
574 { AArch64::LD3Rv1d
, "ld3r", ".1d", 0, false, 0 },
575 { AArch64::LD3Rv16b_POST
, "ld3r", ".16b", 1, false, 3 },
576 { AArch64::LD3Rv8h_POST
, "ld3r", ".8h", 1, false, 6 },
577 { AArch64::LD3Rv4s_POST
, "ld3r", ".4s", 1, false, 12 },
578 { AArch64::LD3Rv2d_POST
, "ld3r", ".2d", 1, false, 24 },
579 { AArch64::LD3Rv8b_POST
, "ld3r", ".8b", 1, false, 3 },
580 { AArch64::LD3Rv4h_POST
, "ld3r", ".4h", 1, false, 6 },
581 { AArch64::LD3Rv2s_POST
, "ld3r", ".2s", 1, false, 12 },
582 { AArch64::LD3Rv1d_POST
, "ld3r", ".1d", 1, false, 24 },
583 { AArch64::LD3Threev16b
, "ld3", ".16b", 0, false, 0 },
584 { AArch64::LD3Threev8h
, "ld3", ".8h", 0, false, 0 },
585 { AArch64::LD3Threev4s
, "ld3", ".4s", 0, false, 0 },
586 { AArch64::LD3Threev2d
, "ld3", ".2d", 0, false, 0 },
587 { AArch64::LD3Threev8b
, "ld3", ".8b", 0, false, 0 },
588 { AArch64::LD3Threev4h
, "ld3", ".4h", 0, false, 0 },
589 { AArch64::LD3Threev2s
, "ld3", ".2s", 0, false, 0 },
590 { AArch64::LD3Threev16b_POST
, "ld3", ".16b", 1, false, 48 },
591 { AArch64::LD3Threev8h_POST
, "ld3", ".8h", 1, false, 48 },
592 { AArch64::LD3Threev4s_POST
, "ld3", ".4s", 1, false, 48 },
593 { AArch64::LD3Threev2d_POST
, "ld3", ".2d", 1, false, 48 },
594 { AArch64::LD3Threev8b_POST
, "ld3", ".8b", 1, false, 24 },
595 { AArch64::LD3Threev4h_POST
, "ld3", ".4h", 1, false, 24 },
596 { AArch64::LD3Threev2s_POST
, "ld3", ".2s", 1, false, 24 },
597 { AArch64::LD4i8
, "ld4", ".b", 1, true, 0 },
598 { AArch64::LD4i16
, "ld4", ".h", 1, true, 0 },
599 { AArch64::LD4i32
, "ld4", ".s", 1, true, 0 },
600 { AArch64::LD4i64
, "ld4", ".d", 1, true, 0 },
601 { AArch64::LD4i8_POST
, "ld4", ".b", 2, true, 4 },
602 { AArch64::LD4i16_POST
, "ld4", ".h", 2, true, 8 },
603 { AArch64::LD4i32_POST
, "ld4", ".s", 2, true, 16 },
604 { AArch64::LD4i64_POST
, "ld4", ".d", 2, true, 32 },
605 { AArch64::LD4Rv16b
, "ld4r", ".16b", 0, false, 0 },
606 { AArch64::LD4Rv8h
, "ld4r", ".8h", 0, false, 0 },
607 { AArch64::LD4Rv4s
, "ld4r", ".4s", 0, false, 0 },
608 { AArch64::LD4Rv2d
, "ld4r", ".2d", 0, false, 0 },
609 { AArch64::LD4Rv8b
, "ld4r", ".8b", 0, false, 0 },
610 { AArch64::LD4Rv4h
, "ld4r", ".4h", 0, false, 0 },
611 { AArch64::LD4Rv2s
, "ld4r", ".2s", 0, false, 0 },
612 { AArch64::LD4Rv1d
, "ld4r", ".1d", 0, false, 0 },
613 { AArch64::LD4Rv16b_POST
, "ld4r", ".16b", 1, false, 4 },
614 { AArch64::LD4Rv8h_POST
, "ld4r", ".8h", 1, false, 8 },
615 { AArch64::LD4Rv4s_POST
, "ld4r", ".4s", 1, false, 16 },
616 { AArch64::LD4Rv2d_POST
, "ld4r", ".2d", 1, false, 32 },
617 { AArch64::LD4Rv8b_POST
, "ld4r", ".8b", 1, false, 4 },
618 { AArch64::LD4Rv4h_POST
, "ld4r", ".4h", 1, false, 8 },
619 { AArch64::LD4Rv2s_POST
, "ld4r", ".2s", 1, false, 16 },
620 { AArch64::LD4Rv1d_POST
, "ld4r", ".1d", 1, false, 32 },
621 { AArch64::LD4Fourv16b
, "ld4", ".16b", 0, false, 0 },
622 { AArch64::LD4Fourv8h
, "ld4", ".8h", 0, false, 0 },
623 { AArch64::LD4Fourv4s
, "ld4", ".4s", 0, false, 0 },
624 { AArch64::LD4Fourv2d
, "ld4", ".2d", 0, false, 0 },
625 { AArch64::LD4Fourv8b
, "ld4", ".8b", 0, false, 0 },
626 { AArch64::LD4Fourv4h
, "ld4", ".4h", 0, false, 0 },
627 { AArch64::LD4Fourv2s
, "ld4", ".2s", 0, false, 0 },
628 { AArch64::LD4Fourv16b_POST
, "ld4", ".16b", 1, false, 64 },
629 { AArch64::LD4Fourv8h_POST
, "ld4", ".8h", 1, false, 64 },
630 { AArch64::LD4Fourv4s_POST
, "ld4", ".4s", 1, false, 64 },
631 { AArch64::LD4Fourv2d_POST
, "ld4", ".2d", 1, false, 64 },
632 { AArch64::LD4Fourv8b_POST
, "ld4", ".8b", 1, false, 32 },
633 { AArch64::LD4Fourv4h_POST
, "ld4", ".4h", 1, false, 32 },
634 { AArch64::LD4Fourv2s_POST
, "ld4", ".2s", 1, false, 32 },
635 { AArch64::ST1i8
, "st1", ".b", 0, true, 0 },
636 { AArch64::ST1i16
, "st1", ".h", 0, true, 0 },
637 { AArch64::ST1i32
, "st1", ".s", 0, true, 0 },
638 { AArch64::ST1i64
, "st1", ".d", 0, true, 0 },
639 { AArch64::ST1i8_POST
, "st1", ".b", 1, true, 1 },
640 { AArch64::ST1i16_POST
, "st1", ".h", 1, true, 2 },
641 { AArch64::ST1i32_POST
, "st1", ".s", 1, true, 4 },
642 { AArch64::ST1i64_POST
, "st1", ".d", 1, true, 8 },
643 { AArch64::ST1Onev16b
, "st1", ".16b", 0, false, 0 },
644 { AArch64::ST1Onev8h
, "st1", ".8h", 0, false, 0 },
645 { AArch64::ST1Onev4s
, "st1", ".4s", 0, false, 0 },
646 { AArch64::ST1Onev2d
, "st1", ".2d", 0, false, 0 },
647 { AArch64::ST1Onev8b
, "st1", ".8b", 0, false, 0 },
648 { AArch64::ST1Onev4h
, "st1", ".4h", 0, false, 0 },
649 { AArch64::ST1Onev2s
, "st1", ".2s", 0, false, 0 },
650 { AArch64::ST1Onev1d
, "st1", ".1d", 0, false, 0 },
651 { AArch64::ST1Onev16b_POST
, "st1", ".16b", 1, false, 16 },
652 { AArch64::ST1Onev8h_POST
, "st1", ".8h", 1, false, 16 },
653 { AArch64::ST1Onev4s_POST
, "st1", ".4s", 1, false, 16 },
654 { AArch64::ST1Onev2d_POST
, "st1", ".2d", 1, false, 16 },
655 { AArch64::ST1Onev8b_POST
, "st1", ".8b", 1, false, 8 },
656 { AArch64::ST1Onev4h_POST
, "st1", ".4h", 1, false, 8 },
657 { AArch64::ST1Onev2s_POST
, "st1", ".2s", 1, false, 8 },
658 { AArch64::ST1Onev1d_POST
, "st1", ".1d", 1, false, 8 },
659 { AArch64::ST1Twov16b
, "st1", ".16b", 0, false, 0 },
660 { AArch64::ST1Twov8h
, "st1", ".8h", 0, false, 0 },
661 { AArch64::ST1Twov4s
, "st1", ".4s", 0, false, 0 },
662 { AArch64::ST1Twov2d
, "st1", ".2d", 0, false, 0 },
663 { AArch64::ST1Twov8b
, "st1", ".8b", 0, false, 0 },
664 { AArch64::ST1Twov4h
, "st1", ".4h", 0, false, 0 },
665 { AArch64::ST1Twov2s
, "st1", ".2s", 0, false, 0 },
666 { AArch64::ST1Twov1d
, "st1", ".1d", 0, false, 0 },
667 { AArch64::ST1Twov16b_POST
, "st1", ".16b", 1, false, 32 },
668 { AArch64::ST1Twov8h_POST
, "st1", ".8h", 1, false, 32 },
669 { AArch64::ST1Twov4s_POST
, "st1", ".4s", 1, false, 32 },
670 { AArch64::ST1Twov2d_POST
, "st1", ".2d", 1, false, 32 },
671 { AArch64::ST1Twov8b_POST
, "st1", ".8b", 1, false, 16 },
672 { AArch64::ST1Twov4h_POST
, "st1", ".4h", 1, false, 16 },
673 { AArch64::ST1Twov2s_POST
, "st1", ".2s", 1, false, 16 },
674 { AArch64::ST1Twov1d_POST
, "st1", ".1d", 1, false, 16 },
675 { AArch64::ST1Threev16b
, "st1", ".16b", 0, false, 0 },
676 { AArch64::ST1Threev8h
, "st1", ".8h", 0, false, 0 },
677 { AArch64::ST1Threev4s
, "st1", ".4s", 0, false, 0 },
678 { AArch64::ST1Threev2d
, "st1", ".2d", 0, false, 0 },
679 { AArch64::ST1Threev8b
, "st1", ".8b", 0, false, 0 },
680 { AArch64::ST1Threev4h
, "st1", ".4h", 0, false, 0 },
681 { AArch64::ST1Threev2s
, "st1", ".2s", 0, false, 0 },
682 { AArch64::ST1Threev1d
, "st1", ".1d", 0, false, 0 },
683 { AArch64::ST1Threev16b_POST
, "st1", ".16b", 1, false, 48 },
684 { AArch64::ST1Threev8h_POST
, "st1", ".8h", 1, false, 48 },
685 { AArch64::ST1Threev4s_POST
, "st1", ".4s", 1, false, 48 },
686 { AArch64::ST1Threev2d_POST
, "st1", ".2d", 1, false, 48 },
687 { AArch64::ST1Threev8b_POST
, "st1", ".8b", 1, false, 24 },
688 { AArch64::ST1Threev4h_POST
, "st1", ".4h", 1, false, 24 },
689 { AArch64::ST1Threev2s_POST
, "st1", ".2s", 1, false, 24 },
690 { AArch64::ST1Threev1d_POST
, "st1", ".1d", 1, false, 24 },
691 { AArch64::ST1Fourv16b
, "st1", ".16b", 0, false, 0 },
692 { AArch64::ST1Fourv8h
, "st1", ".8h", 0, false, 0 },
693 { AArch64::ST1Fourv4s
, "st1", ".4s", 0, false, 0 },
694 { AArch64::ST1Fourv2d
, "st1", ".2d", 0, false, 0 },
695 { AArch64::ST1Fourv8b
, "st1", ".8b", 0, false, 0 },
696 { AArch64::ST1Fourv4h
, "st1", ".4h", 0, false, 0 },
697 { AArch64::ST1Fourv2s
, "st1", ".2s", 0, false, 0 },
698 { AArch64::ST1Fourv1d
, "st1", ".1d", 0, false, 0 },
699 { AArch64::ST1Fourv16b_POST
, "st1", ".16b", 1, false, 64 },
700 { AArch64::ST1Fourv8h_POST
, "st1", ".8h", 1, false, 64 },
701 { AArch64::ST1Fourv4s_POST
, "st1", ".4s", 1, false, 64 },
702 { AArch64::ST1Fourv2d_POST
, "st1", ".2d", 1, false, 64 },
703 { AArch64::ST1Fourv8b_POST
, "st1", ".8b", 1, false, 32 },
704 { AArch64::ST1Fourv4h_POST
, "st1", ".4h", 1, false, 32 },
705 { AArch64::ST1Fourv2s_POST
, "st1", ".2s", 1, false, 32 },
706 { AArch64::ST1Fourv1d_POST
, "st1", ".1d", 1, false, 32 },
707 { AArch64::ST2i8
, "st2", ".b", 0, true, 0 },
708 { AArch64::ST2i16
, "st2", ".h", 0, true, 0 },
709 { AArch64::ST2i32
, "st2", ".s", 0, true, 0 },
710 { AArch64::ST2i64
, "st2", ".d", 0, true, 0 },
711 { AArch64::ST2i8_POST
, "st2", ".b", 1, true, 2 },
712 { AArch64::ST2i16_POST
, "st2", ".h", 1, true, 4 },
713 { AArch64::ST2i32_POST
, "st2", ".s", 1, true, 8 },
714 { AArch64::ST2i64_POST
, "st2", ".d", 1, true, 16 },
715 { AArch64::ST2Twov16b
, "st2", ".16b", 0, false, 0 },
716 { AArch64::ST2Twov8h
, "st2", ".8h", 0, false, 0 },
717 { AArch64::ST2Twov4s
, "st2", ".4s", 0, false, 0 },
718 { AArch64::ST2Twov2d
, "st2", ".2d", 0, false, 0 },
719 { AArch64::ST2Twov8b
, "st2", ".8b", 0, false, 0 },
720 { AArch64::ST2Twov4h
, "st2", ".4h", 0, false, 0 },
721 { AArch64::ST2Twov2s
, "st2", ".2s", 0, false, 0 },
722 { AArch64::ST2Twov16b_POST
, "st2", ".16b", 1, false, 32 },
723 { AArch64::ST2Twov8h_POST
, "st2", ".8h", 1, false, 32 },
724 { AArch64::ST2Twov4s_POST
, "st2", ".4s", 1, false, 32 },
725 { AArch64::ST2Twov2d_POST
, "st2", ".2d", 1, false, 32 },
726 { AArch64::ST2Twov8b_POST
, "st2", ".8b", 1, false, 16 },
727 { AArch64::ST2Twov4h_POST
, "st2", ".4h", 1, false, 16 },
728 { AArch64::ST2Twov2s_POST
, "st2", ".2s", 1, false, 16 },
729 { AArch64::ST3i8
, "st3", ".b", 0, true, 0 },
730 { AArch64::ST3i16
, "st3", ".h", 0, true, 0 },
731 { AArch64::ST3i32
, "st3", ".s", 0, true, 0 },
732 { AArch64::ST3i64
, "st3", ".d", 0, true, 0 },
733 { AArch64::ST3i8_POST
, "st3", ".b", 1, true, 3 },
734 { AArch64::ST3i16_POST
, "st3", ".h", 1, true, 6 },
735 { AArch64::ST3i32_POST
, "st3", ".s", 1, true, 12 },
736 { AArch64::ST3i64_POST
, "st3", ".d", 1, true, 24 },
737 { AArch64::ST3Threev16b
, "st3", ".16b", 0, false, 0 },
738 { AArch64::ST3Threev8h
, "st3", ".8h", 0, false, 0 },
739 { AArch64::ST3Threev4s
, "st3", ".4s", 0, false, 0 },
740 { AArch64::ST3Threev2d
, "st3", ".2d", 0, false, 0 },
741 { AArch64::ST3Threev8b
, "st3", ".8b", 0, false, 0 },
742 { AArch64::ST3Threev4h
, "st3", ".4h", 0, false, 0 },
743 { AArch64::ST3Threev2s
, "st3", ".2s", 0, false, 0 },
744 { AArch64::ST3Threev16b_POST
, "st3", ".16b", 1, false, 48 },
745 { AArch64::ST3Threev8h_POST
, "st3", ".8h", 1, false, 48 },
746 { AArch64::ST3Threev4s_POST
, "st3", ".4s", 1, false, 48 },
747 { AArch64::ST3Threev2d_POST
, "st3", ".2d", 1, false, 48 },
748 { AArch64::ST3Threev8b_POST
, "st3", ".8b", 1, false, 24 },
749 { AArch64::ST3Threev4h_POST
, "st3", ".4h", 1, false, 24 },
750 { AArch64::ST3Threev2s_POST
, "st3", ".2s", 1, false, 24 },
751 { AArch64::ST4i8
, "st4", ".b", 0, true, 0 },
752 { AArch64::ST4i16
, "st4", ".h", 0, true, 0 },
753 { AArch64::ST4i32
, "st4", ".s", 0, true, 0 },
754 { AArch64::ST4i64
, "st4", ".d", 0, true, 0 },
755 { AArch64::ST4i8_POST
, "st4", ".b", 1, true, 4 },
756 { AArch64::ST4i16_POST
, "st4", ".h", 1, true, 8 },
757 { AArch64::ST4i32_POST
, "st4", ".s", 1, true, 16 },
758 { AArch64::ST4i64_POST
, "st4", ".d", 1, true, 32 },
759 { AArch64::ST4Fourv16b
, "st4", ".16b", 0, false, 0 },
760 { AArch64::ST4Fourv8h
, "st4", ".8h", 0, false, 0 },
761 { AArch64::ST4Fourv4s
, "st4", ".4s", 0, false, 0 },
762 { AArch64::ST4Fourv2d
, "st4", ".2d", 0, false, 0 },
763 { AArch64::ST4Fourv8b
, "st4", ".8b", 0, false, 0 },
764 { AArch64::ST4Fourv4h
, "st4", ".4h", 0, false, 0 },
765 { AArch64::ST4Fourv2s
, "st4", ".2s", 0, false, 0 },
766 { AArch64::ST4Fourv16b_POST
, "st4", ".16b", 1, false, 64 },
767 { AArch64::ST4Fourv8h_POST
, "st4", ".8h", 1, false, 64 },
768 { AArch64::ST4Fourv4s_POST
, "st4", ".4s", 1, false, 64 },
769 { AArch64::ST4Fourv2d_POST
, "st4", ".2d", 1, false, 64 },
770 { AArch64::ST4Fourv8b_POST
, "st4", ".8b", 1, false, 32 },
771 { AArch64::ST4Fourv4h_POST
, "st4", ".4h", 1, false, 32 },
772 { AArch64::ST4Fourv2s_POST
, "st4", ".2s", 1, false, 32 },
775 static const LdStNInstrDesc
*getLdStNInstrDesc(unsigned Opcode
) {
776 for (const auto &Info
: LdStNInstInfo
)
777 if (Info
.Opcode
== Opcode
)
783 void AArch64AppleInstPrinter::printInst(const MCInst
*MI
, uint64_t Address
,
785 const MCSubtargetInfo
&STI
,
787 unsigned Opcode
= MI
->getOpcode();
791 if (isTblTbxInstruction(MI
->getOpcode(), Layout
, IsTbx
)) {
792 O
<< "\t" << (IsTbx
? "tbx" : "tbl") << Layout
<< '\t';
793 printRegName(O
, MI
->getOperand(0).getReg(), AArch64::vreg
);
796 unsigned ListOpNum
= IsTbx
? 2 : 1;
797 printVectorList(MI
, ListOpNum
, STI
, O
, "");
800 printRegName(O
, MI
->getOperand(ListOpNum
+ 1).getReg(), AArch64::vreg
);
801 printAnnotation(O
, Annot
);
805 if (const LdStNInstrDesc
*LdStDesc
= getLdStNInstrDesc(Opcode
)) {
806 O
<< "\t" << LdStDesc
->Mnemonic
<< LdStDesc
->Layout
<< '\t';
808 // Now onto the operands: first a vector list with possible lane
809 // specifier. E.g. { v0 }[2]
810 int OpNum
= LdStDesc
->ListOperand
;
811 printVectorList(MI
, OpNum
++, STI
, O
, "");
813 if (LdStDesc
->HasLane
)
814 O
<< '[' << MI
->getOperand(OpNum
++).getImm() << ']';
816 // Next the address: [xN]
817 MCRegister AddrReg
= MI
->getOperand(OpNum
++).getReg();
819 printRegName(O
, AddrReg
);
822 // Finally, there might be a post-indexed offset.
823 if (LdStDesc
->NaturalOffset
!= 0) {
824 MCRegister Reg
= MI
->getOperand(OpNum
++).getReg();
825 if (Reg
!= AArch64::XZR
) {
827 printRegName(O
, Reg
);
829 assert(LdStDesc
->NaturalOffset
&& "no offset on post-inc instruction?");
831 markup(O
, Markup::Immediate
) << "#" << LdStDesc
->NaturalOffset
;
835 printAnnotation(O
, Annot
);
839 AArch64InstPrinter::printInst(MI
, Address
, Annot
, STI
, O
);
842 StringRef
AArch64AppleInstPrinter::getRegName(MCRegister Reg
) const {
843 return getRegisterName(Reg
);
846 bool AArch64InstPrinter::printRangePrefetchAlias(const MCInst
*MI
,
847 const MCSubtargetInfo
&STI
,
850 unsigned Opcode
= MI
->getOpcode();
853 assert(((Opcode
== AArch64::PRFMroX
) || (Opcode
== AArch64::PRFMroW
)) &&
854 "Invalid opcode for RPRFM alias!");
857 unsigned PRFOp
= MI
->getOperand(0).getImm();
858 unsigned Mask
= 0x18; // 0b11000
859 if ((PRFOp
& Mask
) != Mask
)
860 return false; // Rt != '11xxx', it's a PRFM instruction.
862 MCRegister Rm
= MI
->getOperand(2).getReg();
864 // "Rm" must be a 64-bit GPR for RPRFM.
865 if (MRI
.getRegClass(AArch64::GPR32RegClassID
).contains(Rm
))
866 Rm
= MRI
.getMatchingSuperReg(Rm
, AArch64::sub_32
,
867 &MRI
.getRegClass(AArch64::GPR64RegClassID
));
869 unsigned SignExtend
= MI
->getOperand(3).getImm(); // encoded in "option<2>".
870 unsigned Shift
= MI
->getOperand(4).getImm(); // encoded in "S".
872 assert((SignExtend
<= 1) && "sign extend should be a single bit!");
873 assert((Shift
<= 1) && "Shift should be a single bit!");
875 unsigned Option0
= (Opcode
== AArch64::PRFMroX
) ? 1 : 0;
877 // encoded in "option<2>:option<0>:S:Rt<2:0>".
879 (SignExtend
<< 5) | (Option0
<< 4) | (Shift
<< 3) | (PRFOp
& 0x7);
882 if (auto RPRFM
= AArch64RPRFM::lookupRPRFMByEncoding(RPRFOp
))
883 O
<< RPRFM
->Name
<< ", ";
885 O
<< "#" << formatImm(RPRFOp
) << ", ";
886 O
<< getRegisterName(Rm
);
888 printOperand(MI
, 1, STI
, O
); // "Rn".
891 printAnnotation(O
, Annot
);
896 bool AArch64InstPrinter::printSysAlias(const MCInst
*MI
,
897 const MCSubtargetInfo
&STI
,
900 unsigned Opcode
= MI
->getOpcode();
901 assert(Opcode
== AArch64::SYSxt
&& "Invalid opcode for SYS alias!");
904 const MCOperand
&Op1
= MI
->getOperand(0);
905 const MCOperand
&Cn
= MI
->getOperand(1);
906 const MCOperand
&Cm
= MI
->getOperand(2);
907 const MCOperand
&Op2
= MI
->getOperand(3);
909 unsigned Op1Val
= Op1
.getImm();
910 unsigned CnVal
= Cn
.getImm();
911 unsigned CmVal
= Cm
.getImm();
912 unsigned Op2Val
= Op2
.getImm();
914 uint16_t Encoding
= Op2Val
;
915 Encoding
|= CmVal
<< 3;
916 Encoding
|= CnVal
<< 7;
917 Encoding
|= Op1Val
<< 11;
925 default: return false;
926 // Maybe IC, maybe Prediction Restriction
929 default: return false;
930 case 0: goto Search_IC
;
931 case 3: goto Search_PRCTX
;
933 // Prediction Restriction aliases
936 if (Op1Val
!= 3 || CnVal
!= 7 || CmVal
!= 3)
939 const auto Requires
=
940 Op2Val
== 6 ? AArch64::FeatureSPECRES2
: AArch64::FeaturePredRes
;
941 if (!(STI
.hasFeature(AArch64::FeatureAll
) || STI
.hasFeature(Requires
)))
946 default: return false;
947 case 4: Ins
= "cfp\t"; break;
948 case 5: Ins
= "dvp\t"; break;
949 case 6: Ins
= "cosp\t"; break;
950 case 7: Ins
= "cpp\t"; break;
958 const AArch64IC::IC
*IC
= AArch64IC::lookupICByEncoding(Encoding
);
959 if (!IC
|| !IC
->haveFeatures(STI
.getFeatureBits()))
962 NeedsReg
= IC
->NeedsReg
;
964 Name
= std::string(IC
->Name
);
968 case 4: case 6: case 10: case 11: case 12: case 13: case 14:
970 const AArch64DC::DC
*DC
= AArch64DC::lookupDCByEncoding(Encoding
);
971 if (!DC
|| !DC
->haveFeatures(STI
.getFeatureBits()))
976 Name
= std::string(DC
->Name
);
981 const AArch64AT::AT
*AT
= AArch64AT::lookupATByEncoding(Encoding
);
982 if (!AT
|| !AT
->haveFeatures(STI
.getFeatureBits()))
987 Name
= std::string(AT
->Name
);
990 // Overlaps with AT and DC
992 const AArch64AT::AT
*AT
= AArch64AT::lookupATByEncoding(Encoding
);
993 const AArch64DC::DC
*DC
= AArch64DC::lookupDCByEncoding(Encoding
);
994 if (AT
&& AT
->haveFeatures(STI
.getFeatureBits())) {
997 Name
= std::string(AT
->Name
);
998 } else if (DC
&& DC
->haveFeatures(STI
.getFeatureBits())) {
1001 Name
= std::string(DC
->Name
);
1007 } else if (CnVal
== 8 || CnVal
== 9) {
1009 const AArch64TLBI::TLBI
*TLBI
= AArch64TLBI::lookupTLBIByEncoding(Encoding
);
1010 if (!TLBI
|| !TLBI
->haveFeatures(STI
.getFeatureBits()))
1013 NeedsReg
= TLBI
->NeedsReg
;
1015 Name
= std::string(TLBI
->Name
);
1020 std::string Str
= Ins
+ Name
;
1021 std::transform(Str
.begin(), Str
.end(), Str
.begin(), ::tolower
);
1026 printRegName(O
, MI
->getOperand(4).getReg());
1032 bool AArch64InstPrinter::printSyspAlias(const MCInst
*MI
,
1033 const MCSubtargetInfo
&STI
,
1036 unsigned Opcode
= MI
->getOpcode();
1037 assert((Opcode
== AArch64::SYSPxt
|| Opcode
== AArch64::SYSPxt_XZR
) &&
1038 "Invalid opcode for SYSP alias!");
1041 const MCOperand
&Op1
= MI
->getOperand(0);
1042 const MCOperand
&Cn
= MI
->getOperand(1);
1043 const MCOperand
&Cm
= MI
->getOperand(2);
1044 const MCOperand
&Op2
= MI
->getOperand(3);
1046 unsigned Op1Val
= Op1
.getImm();
1047 unsigned CnVal
= Cn
.getImm();
1048 unsigned CmVal
= Cm
.getImm();
1049 unsigned Op2Val
= Op2
.getImm();
1051 uint16_t Encoding
= Op2Val
;
1052 Encoding
|= CmVal
<< 3;
1053 Encoding
|= CnVal
<< 7;
1054 Encoding
|= Op1Val
<< 11;
1059 if (CnVal
== 8 || CnVal
== 9) {
1063 if (!STI
.hasFeature(AArch64::FeatureXS
))
1065 Encoding
&= ~(1 << 7);
1068 const AArch64TLBI::TLBI
*TLBI
= AArch64TLBI::lookupTLBIByEncoding(Encoding
);
1069 if (!TLBI
|| !TLBI
->haveFeatures(STI
.getFeatureBits()))
1073 Name
= std::string(TLBI
->Name
);
1079 std::string Str
= Ins
+ Name
;
1080 std::transform(Str
.begin(), Str
.end(), Str
.begin(), ::tolower
);
1084 if (MI
->getOperand(4).getReg() == AArch64::XZR
)
1085 printSyspXzrPair(MI
, 4, STI
, O
);
1087 printGPRSeqPairsClassOperand
<64>(MI
, 4, STI
, O
);
1092 template <int EltSize
>
1093 void AArch64InstPrinter::printMatrix(const MCInst
*MI
, unsigned OpNum
,
1094 const MCSubtargetInfo
&STI
,
1096 const MCOperand
&RegOp
= MI
->getOperand(OpNum
);
1097 assert(RegOp
.isReg() && "Unexpected operand type!");
1099 printRegName(O
, RegOp
.getReg());
1119 llvm_unreachable("Unsupported element size");
1123 template <bool IsVertical
>
1124 void AArch64InstPrinter::printMatrixTileVector(const MCInst
*MI
, unsigned OpNum
,
1125 const MCSubtargetInfo
&STI
,
1127 const MCOperand
&RegOp
= MI
->getOperand(OpNum
);
1128 assert(RegOp
.isReg() && "Unexpected operand type!");
1129 StringRef RegName
= getRegisterName(RegOp
.getReg());
1131 // Insert the horizontal/vertical flag before the suffix.
1132 StringRef Base
, Suffix
;
1133 std::tie(Base
, Suffix
) = RegName
.split('.');
1134 O
<< Base
<< (IsVertical
? "v" : "h") << '.' << Suffix
;
1137 void AArch64InstPrinter::printMatrixTile(const MCInst
*MI
, unsigned OpNum
,
1138 const MCSubtargetInfo
&STI
,
1140 const MCOperand
&RegOp
= MI
->getOperand(OpNum
);
1141 assert(RegOp
.isReg() && "Unexpected operand type!");
1142 printRegName(O
, RegOp
.getReg());
1145 void AArch64InstPrinter::printSVCROp(const MCInst
*MI
, unsigned OpNum
,
1146 const MCSubtargetInfo
&STI
,
1148 const MCOperand
&MO
= MI
->getOperand(OpNum
);
1149 assert(MO
.isImm() && "Unexpected operand type!");
1150 unsigned svcrop
= MO
.getImm();
1151 const auto *SVCR
= AArch64SVCR::lookupSVCRByEncoding(svcrop
);
1152 assert(SVCR
&& "Unexpected SVCR operand!");
1156 void AArch64InstPrinter::printOperand(const MCInst
*MI
, unsigned OpNo
,
1157 const MCSubtargetInfo
&STI
,
1159 const MCOperand
&Op
= MI
->getOperand(OpNo
);
1161 printRegName(O
, Op
.getReg());
1162 } else if (Op
.isImm()) {
1163 printImm(MI
, OpNo
, STI
, O
);
1165 assert(Op
.isExpr() && "unknown operand kind in printOperand");
1166 Op
.getExpr()->print(O
, &MAI
);
1170 void AArch64InstPrinter::printImm(const MCInst
*MI
, unsigned OpNo
,
1171 const MCSubtargetInfo
&STI
,
1173 const MCOperand
&Op
= MI
->getOperand(OpNo
);
1174 markup(O
, Markup::Immediate
) << "#" << formatImm(Op
.getImm());
1177 void AArch64InstPrinter::printImmHex(const MCInst
*MI
, unsigned OpNo
,
1178 const MCSubtargetInfo
&STI
,
1180 const MCOperand
&Op
= MI
->getOperand(OpNo
);
1181 markup(O
, Markup::Immediate
) << format("#%#llx", Op
.getImm());
1185 void AArch64InstPrinter::printSImm(const MCInst
*MI
, unsigned OpNo
,
1186 const MCSubtargetInfo
&STI
,
1188 const MCOperand
&Op
= MI
->getOperand(OpNo
);
1190 markup(O
, Markup::Immediate
) << "#" << formatImm((signed char)Op
.getImm());
1191 else if (Size
== 16)
1192 markup(O
, Markup::Immediate
) << "#" << formatImm((signed short)Op
.getImm());
1194 markup(O
, Markup::Immediate
) << "#" << formatImm(Op
.getImm());
1197 void AArch64InstPrinter::printPostIncOperand(const MCInst
*MI
, unsigned OpNo
,
1198 unsigned Imm
, raw_ostream
&O
) {
1199 const MCOperand
&Op
= MI
->getOperand(OpNo
);
1201 MCRegister Reg
= Op
.getReg();
1202 if (Reg
== AArch64::XZR
)
1203 markup(O
, Markup::Immediate
) << "#" << Imm
;
1205 printRegName(O
, Reg
);
1207 llvm_unreachable("unknown operand kind in printPostIncOperand64");
1210 void AArch64InstPrinter::printVRegOperand(const MCInst
*MI
, unsigned OpNo
,
1211 const MCSubtargetInfo
&STI
,
1213 const MCOperand
&Op
= MI
->getOperand(OpNo
);
1214 assert(Op
.isReg() && "Non-register vreg operand!");
1215 printRegName(O
, Op
.getReg(), AArch64::vreg
);
1218 void AArch64InstPrinter::printSysCROperand(const MCInst
*MI
, unsigned OpNo
,
1219 const MCSubtargetInfo
&STI
,
1221 const MCOperand
&Op
= MI
->getOperand(OpNo
);
1222 assert(Op
.isImm() && "System instruction C[nm] operands must be immediates!");
1223 O
<< "c" << Op
.getImm();
1226 void AArch64InstPrinter::printAddSubImm(const MCInst
*MI
, unsigned OpNum
,
1227 const MCSubtargetInfo
&STI
,
1229 const MCOperand
&MO
= MI
->getOperand(OpNum
);
1231 unsigned Val
= (MO
.getImm() & 0xfff);
1232 assert(Val
== MO
.getImm() && "Add/sub immediate out of range!");
1234 AArch64_AM::getShiftValue(MI
->getOperand(OpNum
+ 1).getImm());
1235 markup(O
, Markup::Immediate
) << '#' << formatImm(Val
);
1237 printShifter(MI
, OpNum
+ 1, STI
, O
);
1239 *CommentStream
<< '=' << formatImm(Val
<< Shift
) << '\n';
1242 assert(MO
.isExpr() && "Unexpected operand type!");
1243 MO
.getExpr()->print(O
, &MAI
);
1244 printShifter(MI
, OpNum
+ 1, STI
, O
);
1248 template <typename T
>
1249 void AArch64InstPrinter::printLogicalImm(const MCInst
*MI
, unsigned OpNum
,
1250 const MCSubtargetInfo
&STI
,
1252 uint64_t Val
= MI
->getOperand(OpNum
).getImm();
1253 WithMarkup M
= markup(O
, Markup::Immediate
);
1255 O
.write_hex(AArch64_AM::decodeLogicalImmediate(Val
, 8 * sizeof(T
)));
1258 void AArch64InstPrinter::printShifter(const MCInst
*MI
, unsigned OpNum
,
1259 const MCSubtargetInfo
&STI
,
1261 unsigned Val
= MI
->getOperand(OpNum
).getImm();
1262 // LSL #0 should not be printed.
1263 if (AArch64_AM::getShiftType(Val
) == AArch64_AM::LSL
&&
1264 AArch64_AM::getShiftValue(Val
) == 0)
1266 O
<< ", " << AArch64_AM::getShiftExtendName(AArch64_AM::getShiftType(Val
))
1268 markup(O
, Markup::Immediate
) << "#" << AArch64_AM::getShiftValue(Val
);
1271 void AArch64InstPrinter::printShiftedRegister(const MCInst
*MI
, unsigned OpNum
,
1272 const MCSubtargetInfo
&STI
,
1274 printRegName(O
, MI
->getOperand(OpNum
).getReg());
1275 printShifter(MI
, OpNum
+ 1, STI
, O
);
1278 void AArch64InstPrinter::printExtendedRegister(const MCInst
*MI
, unsigned OpNum
,
1279 const MCSubtargetInfo
&STI
,
1281 printRegName(O
, MI
->getOperand(OpNum
).getReg());
1282 printArithExtend(MI
, OpNum
+ 1, STI
, O
);
1285 void AArch64InstPrinter::printArithExtend(const MCInst
*MI
, unsigned OpNum
,
1286 const MCSubtargetInfo
&STI
,
1288 unsigned Val
= MI
->getOperand(OpNum
).getImm();
1289 AArch64_AM::ShiftExtendType ExtType
= AArch64_AM::getArithExtendType(Val
);
1290 unsigned ShiftVal
= AArch64_AM::getArithShiftValue(Val
);
1292 // If the destination or first source register operand is [W]SP, print
1293 // UXTW/UXTX as LSL, and if the shift amount is also zero, print nothing at
1295 if (ExtType
== AArch64_AM::UXTW
|| ExtType
== AArch64_AM::UXTX
) {
1296 MCRegister Dest
= MI
->getOperand(0).getReg();
1297 MCRegister Src1
= MI
->getOperand(1).getReg();
1298 if ( ((Dest
== AArch64::SP
|| Src1
== AArch64::SP
) &&
1299 ExtType
== AArch64_AM::UXTX
) ||
1300 ((Dest
== AArch64::WSP
|| Src1
== AArch64::WSP
) &&
1301 ExtType
== AArch64_AM::UXTW
) ) {
1302 if (ShiftVal
!= 0) {
1304 markup(O
, Markup::Immediate
) << "#" << ShiftVal
;
1309 O
<< ", " << AArch64_AM::getShiftExtendName(ExtType
);
1310 if (ShiftVal
!= 0) {
1312 markup(O
, Markup::Immediate
) << "#" << ShiftVal
;
1316 void AArch64InstPrinter::printMemExtendImpl(bool SignExtend
, bool DoShift
,
1317 unsigned Width
, char SrcRegKind
,
1319 // sxtw, sxtx, uxtw or lsl (== uxtx)
1320 bool IsLSL
= !SignExtend
&& SrcRegKind
== 'x';
1324 O
<< (SignExtend
? 's' : 'u') << "xt" << SrcRegKind
;
1326 if (DoShift
|| IsLSL
) {
1328 markup(O
, Markup::Immediate
) << "#" << Log2_32(Width
/ 8);
1332 void AArch64InstPrinter::printMemExtend(const MCInst
*MI
, unsigned OpNum
,
1333 raw_ostream
&O
, char SrcRegKind
,
1335 bool SignExtend
= MI
->getOperand(OpNum
).getImm();
1336 bool DoShift
= MI
->getOperand(OpNum
+ 1).getImm();
1337 printMemExtendImpl(SignExtend
, DoShift
, Width
, SrcRegKind
, O
);
1340 template <bool SignExtend
, int ExtWidth
, char SrcRegKind
, char Suffix
>
1341 void AArch64InstPrinter::printRegWithShiftExtend(const MCInst
*MI
,
1343 const MCSubtargetInfo
&STI
,
1345 printOperand(MI
, OpNum
, STI
, O
);
1346 if (Suffix
== 's' || Suffix
== 'd')
1349 assert(Suffix
== 0 && "Unsupported suffix size");
1351 bool DoShift
= ExtWidth
!= 8;
1352 if (SignExtend
|| DoShift
|| SrcRegKind
== 'w') {
1354 printMemExtendImpl(SignExtend
, DoShift
, ExtWidth
, SrcRegKind
, O
);
1358 template <int EltSize
>
1359 void AArch64InstPrinter::printPredicateAsCounter(const MCInst
*MI
,
1361 const MCSubtargetInfo
&STI
,
1363 MCRegister Reg
= MI
->getOperand(OpNum
).getReg();
1364 if (Reg
< AArch64::PN0
|| Reg
> AArch64::PN15
)
1365 llvm_unreachable("Unsupported predicate-as-counter register");
1366 O
<< "pn" << Reg
- AArch64::PN0
;
1384 llvm_unreachable("Unsupported element size");
1388 void AArch64InstPrinter::printCondCode(const MCInst
*MI
, unsigned OpNum
,
1389 const MCSubtargetInfo
&STI
,
1391 AArch64CC::CondCode CC
= (AArch64CC::CondCode
)MI
->getOperand(OpNum
).getImm();
1392 O
<< AArch64CC::getCondCodeName(CC
);
1395 void AArch64InstPrinter::printInverseCondCode(const MCInst
*MI
, unsigned OpNum
,
1396 const MCSubtargetInfo
&STI
,
1398 AArch64CC::CondCode CC
= (AArch64CC::CondCode
)MI
->getOperand(OpNum
).getImm();
1399 O
<< AArch64CC::getCondCodeName(AArch64CC::getInvertedCondCode(CC
));
1402 void AArch64InstPrinter::printAMNoIndex(const MCInst
*MI
, unsigned OpNum
,
1403 const MCSubtargetInfo
&STI
,
1406 printRegName(O
, MI
->getOperand(OpNum
).getReg());
1410 template <int Scale
>
1411 void AArch64InstPrinter::printImmScale(const MCInst
*MI
, unsigned OpNum
,
1412 const MCSubtargetInfo
&STI
,
1414 markup(O
, Markup::Immediate
)
1415 << '#' << formatImm(Scale
* MI
->getOperand(OpNum
).getImm());
1418 template <int Scale
, int Offset
>
1419 void AArch64InstPrinter::printImmRangeScale(const MCInst
*MI
, unsigned OpNum
,
1420 const MCSubtargetInfo
&STI
,
1422 unsigned FirstImm
= Scale
* MI
->getOperand(OpNum
).getImm();
1423 O
<< formatImm(FirstImm
);
1424 O
<< ":" << formatImm(FirstImm
+ Offset
);
1427 void AArch64InstPrinter::printUImm12Offset(const MCInst
*MI
, unsigned OpNum
,
1428 unsigned Scale
, raw_ostream
&O
) {
1429 const MCOperand MO
= MI
->getOperand(OpNum
);
1431 markup(O
, Markup::Immediate
) << '#' << formatImm(MO
.getImm() * Scale
);
1433 assert(MO
.isExpr() && "Unexpected operand type!");
1434 MO
.getExpr()->print(O
, &MAI
);
1438 void AArch64InstPrinter::printAMIndexedWB(const MCInst
*MI
, unsigned OpNum
,
1439 unsigned Scale
, raw_ostream
&O
) {
1440 const MCOperand MO1
= MI
->getOperand(OpNum
+ 1);
1442 printRegName(O
, MI
->getOperand(OpNum
).getReg());
1445 markup(O
, Markup::Immediate
) << "#" << formatImm(MO1
.getImm() * Scale
);
1447 assert(MO1
.isExpr() && "Unexpected operand type!");
1449 MO1
.getExpr()->print(O
, &MAI
);
1454 void AArch64InstPrinter::printRPRFMOperand(const MCInst
*MI
, unsigned OpNum
,
1455 const MCSubtargetInfo
&STI
,
1457 unsigned prfop
= MI
->getOperand(OpNum
).getImm();
1458 if (auto PRFM
= AArch64RPRFM::lookupRPRFMByEncoding(prfop
)) {
1463 O
<< '#' << formatImm(prfop
);
1466 template <bool IsSVEPrefetch
>
1467 void AArch64InstPrinter::printPrefetchOp(const MCInst
*MI
, unsigned OpNum
,
1468 const MCSubtargetInfo
&STI
,
1470 unsigned prfop
= MI
->getOperand(OpNum
).getImm();
1471 if (IsSVEPrefetch
) {
1472 if (auto PRFM
= AArch64SVEPRFM::lookupSVEPRFMByEncoding(prfop
)) {
1477 auto PRFM
= AArch64PRFM::lookupPRFMByEncoding(prfop
);
1478 if (PRFM
&& PRFM
->haveFeatures(STI
.getFeatureBits())) {
1484 markup(O
, Markup::Immediate
) << '#' << formatImm(prfop
);
1487 void AArch64InstPrinter::printPSBHintOp(const MCInst
*MI
, unsigned OpNum
,
1488 const MCSubtargetInfo
&STI
,
1490 unsigned psbhintop
= MI
->getOperand(OpNum
).getImm();
1491 auto PSB
= AArch64PSBHint::lookupPSBByEncoding(psbhintop
);
1495 markup(O
, Markup::Immediate
) << '#' << formatImm(psbhintop
);
1498 void AArch64InstPrinter::printBTIHintOp(const MCInst
*MI
, unsigned OpNum
,
1499 const MCSubtargetInfo
&STI
,
1501 unsigned btihintop
= MI
->getOperand(OpNum
).getImm() ^ 32;
1502 auto BTI
= AArch64BTIHint::lookupBTIByEncoding(btihintop
);
1506 markup(O
, Markup::Immediate
) << '#' << formatImm(btihintop
);
1509 void AArch64InstPrinter::printFPImmOperand(const MCInst
*MI
, unsigned OpNum
,
1510 const MCSubtargetInfo
&STI
,
1512 const MCOperand
&MO
= MI
->getOperand(OpNum
);
1513 float FPImm
= MO
.isDFPImm() ? bit_cast
<double>(MO
.getDFPImm())
1514 : AArch64_AM::getFPImmFloat(MO
.getImm());
1516 // 8 decimal places are enough to perfectly represent permitted floats.
1517 markup(O
, Markup::Immediate
) << format("#%.8f", FPImm
);
1520 static MCRegister
getNextVectorRegister(MCRegister Reg
, unsigned Stride
= 1) {
1524 llvm_unreachable("Vector register expected!");
1525 case AArch64::Q0
: Reg
= AArch64::Q1
; break;
1526 case AArch64::Q1
: Reg
= AArch64::Q2
; break;
1527 case AArch64::Q2
: Reg
= AArch64::Q3
; break;
1528 case AArch64::Q3
: Reg
= AArch64::Q4
; break;
1529 case AArch64::Q4
: Reg
= AArch64::Q5
; break;
1530 case AArch64::Q5
: Reg
= AArch64::Q6
; break;
1531 case AArch64::Q6
: Reg
= AArch64::Q7
; break;
1532 case AArch64::Q7
: Reg
= AArch64::Q8
; break;
1533 case AArch64::Q8
: Reg
= AArch64::Q9
; break;
1534 case AArch64::Q9
: Reg
= AArch64::Q10
; break;
1535 case AArch64::Q10
: Reg
= AArch64::Q11
; break;
1536 case AArch64::Q11
: Reg
= AArch64::Q12
; break;
1537 case AArch64::Q12
: Reg
= AArch64::Q13
; break;
1538 case AArch64::Q13
: Reg
= AArch64::Q14
; break;
1539 case AArch64::Q14
: Reg
= AArch64::Q15
; break;
1540 case AArch64::Q15
: Reg
= AArch64::Q16
; break;
1541 case AArch64::Q16
: Reg
= AArch64::Q17
; break;
1542 case AArch64::Q17
: Reg
= AArch64::Q18
; break;
1543 case AArch64::Q18
: Reg
= AArch64::Q19
; break;
1544 case AArch64::Q19
: Reg
= AArch64::Q20
; break;
1545 case AArch64::Q20
: Reg
= AArch64::Q21
; break;
1546 case AArch64::Q21
: Reg
= AArch64::Q22
; break;
1547 case AArch64::Q22
: Reg
= AArch64::Q23
; break;
1548 case AArch64::Q23
: Reg
= AArch64::Q24
; break;
1549 case AArch64::Q24
: Reg
= AArch64::Q25
; break;
1550 case AArch64::Q25
: Reg
= AArch64::Q26
; break;
1551 case AArch64::Q26
: Reg
= AArch64::Q27
; break;
1552 case AArch64::Q27
: Reg
= AArch64::Q28
; break;
1553 case AArch64::Q28
: Reg
= AArch64::Q29
; break;
1554 case AArch64::Q29
: Reg
= AArch64::Q30
; break;
1555 case AArch64::Q30
: Reg
= AArch64::Q31
; break;
1556 // Vector lists can wrap around.
1560 case AArch64::Z0
: Reg
= AArch64::Z1
; break;
1561 case AArch64::Z1
: Reg
= AArch64::Z2
; break;
1562 case AArch64::Z2
: Reg
= AArch64::Z3
; break;
1563 case AArch64::Z3
: Reg
= AArch64::Z4
; break;
1564 case AArch64::Z4
: Reg
= AArch64::Z5
; break;
1565 case AArch64::Z5
: Reg
= AArch64::Z6
; break;
1566 case AArch64::Z6
: Reg
= AArch64::Z7
; break;
1567 case AArch64::Z7
: Reg
= AArch64::Z8
; break;
1568 case AArch64::Z8
: Reg
= AArch64::Z9
; break;
1569 case AArch64::Z9
: Reg
= AArch64::Z10
; break;
1570 case AArch64::Z10
: Reg
= AArch64::Z11
; break;
1571 case AArch64::Z11
: Reg
= AArch64::Z12
; break;
1572 case AArch64::Z12
: Reg
= AArch64::Z13
; break;
1573 case AArch64::Z13
: Reg
= AArch64::Z14
; break;
1574 case AArch64::Z14
: Reg
= AArch64::Z15
; break;
1575 case AArch64::Z15
: Reg
= AArch64::Z16
; break;
1576 case AArch64::Z16
: Reg
= AArch64::Z17
; break;
1577 case AArch64::Z17
: Reg
= AArch64::Z18
; break;
1578 case AArch64::Z18
: Reg
= AArch64::Z19
; break;
1579 case AArch64::Z19
: Reg
= AArch64::Z20
; break;
1580 case AArch64::Z20
: Reg
= AArch64::Z21
; break;
1581 case AArch64::Z21
: Reg
= AArch64::Z22
; break;
1582 case AArch64::Z22
: Reg
= AArch64::Z23
; break;
1583 case AArch64::Z23
: Reg
= AArch64::Z24
; break;
1584 case AArch64::Z24
: Reg
= AArch64::Z25
; break;
1585 case AArch64::Z25
: Reg
= AArch64::Z26
; break;
1586 case AArch64::Z26
: Reg
= AArch64::Z27
; break;
1587 case AArch64::Z27
: Reg
= AArch64::Z28
; break;
1588 case AArch64::Z28
: Reg
= AArch64::Z29
; break;
1589 case AArch64::Z29
: Reg
= AArch64::Z30
; break;
1590 case AArch64::Z30
: Reg
= AArch64::Z31
; break;
1591 // Vector lists can wrap around.
1595 case AArch64::P0
: Reg
= AArch64::P1
; break;
1596 case AArch64::P1
: Reg
= AArch64::P2
; break;
1597 case AArch64::P2
: Reg
= AArch64::P3
; break;
1598 case AArch64::P3
: Reg
= AArch64::P4
; break;
1599 case AArch64::P4
: Reg
= AArch64::P5
; break;
1600 case AArch64::P5
: Reg
= AArch64::P6
; break;
1601 case AArch64::P6
: Reg
= AArch64::P7
; break;
1602 case AArch64::P7
: Reg
= AArch64::P8
; break;
1603 case AArch64::P8
: Reg
= AArch64::P9
; break;
1604 case AArch64::P9
: Reg
= AArch64::P10
; break;
1605 case AArch64::P10
: Reg
= AArch64::P11
; break;
1606 case AArch64::P11
: Reg
= AArch64::P12
; break;
1607 case AArch64::P12
: Reg
= AArch64::P13
; break;
1608 case AArch64::P13
: Reg
= AArch64::P14
; break;
1609 case AArch64::P14
: Reg
= AArch64::P15
; break;
1610 // Vector lists can wrap around.
1611 case AArch64::P15
: Reg
= AArch64::P0
; break;
1617 template<unsigned size
>
1618 void AArch64InstPrinter::printGPRSeqPairsClassOperand(const MCInst
*MI
,
1620 const MCSubtargetInfo
&STI
,
1622 static_assert(size
== 64 || size
== 32,
1623 "Template parameter must be either 32 or 64");
1624 MCRegister Reg
= MI
->getOperand(OpNum
).getReg();
1626 unsigned Sube
= (size
== 32) ? AArch64::sube32
: AArch64::sube64
;
1627 unsigned Subo
= (size
== 32) ? AArch64::subo32
: AArch64::subo64
;
1629 MCRegister Even
= MRI
.getSubReg(Reg
, Sube
);
1630 MCRegister Odd
= MRI
.getSubReg(Reg
, Subo
);
1631 printRegName(O
, Even
);
1633 printRegName(O
, Odd
);
1636 void AArch64InstPrinter::printMatrixTileList(const MCInst
*MI
, unsigned OpNum
,
1637 const MCSubtargetInfo
&STI
,
1639 unsigned MaxRegs
= 8;
1640 unsigned RegMask
= MI
->getOperand(OpNum
).getImm();
1642 unsigned NumRegs
= 0;
1643 for (unsigned I
= 0; I
< MaxRegs
; ++I
)
1644 if ((RegMask
& (1 << I
)) != 0)
1648 unsigned Printed
= 0;
1649 for (unsigned I
= 0; I
< MaxRegs
; ++I
) {
1650 unsigned Reg
= RegMask
& (1 << I
);
1653 printRegName(O
, AArch64::ZAD0
+ I
);
1654 if (Printed
+ 1 != NumRegs
)
1661 void AArch64InstPrinter::printVectorList(const MCInst
*MI
, unsigned OpNum
,
1662 const MCSubtargetInfo
&STI
,
1664 StringRef LayoutSuffix
) {
1665 MCRegister Reg
= MI
->getOperand(OpNum
).getReg();
1669 // Work out how many registers there are in the list (if there is an actual
1671 unsigned NumRegs
= 1;
1672 if (MRI
.getRegClass(AArch64::DDRegClassID
).contains(Reg
) ||
1673 MRI
.getRegClass(AArch64::ZPR2RegClassID
).contains(Reg
) ||
1674 MRI
.getRegClass(AArch64::QQRegClassID
).contains(Reg
) ||
1675 MRI
.getRegClass(AArch64::PPR2RegClassID
).contains(Reg
) ||
1676 MRI
.getRegClass(AArch64::ZPR2StridedRegClassID
).contains(Reg
))
1678 else if (MRI
.getRegClass(AArch64::DDDRegClassID
).contains(Reg
) ||
1679 MRI
.getRegClass(AArch64::ZPR3RegClassID
).contains(Reg
) ||
1680 MRI
.getRegClass(AArch64::QQQRegClassID
).contains(Reg
))
1682 else if (MRI
.getRegClass(AArch64::DDDDRegClassID
).contains(Reg
) ||
1683 MRI
.getRegClass(AArch64::ZPR4RegClassID
).contains(Reg
) ||
1684 MRI
.getRegClass(AArch64::QQQQRegClassID
).contains(Reg
) ||
1685 MRI
.getRegClass(AArch64::ZPR4StridedRegClassID
).contains(Reg
))
1688 unsigned Stride
= 1;
1689 if (MRI
.getRegClass(AArch64::ZPR2StridedRegClassID
).contains(Reg
))
1691 else if (MRI
.getRegClass(AArch64::ZPR4StridedRegClassID
).contains(Reg
))
1694 // Now forget about the list and find out what the first register is.
1695 if (MCRegister FirstReg
= MRI
.getSubReg(Reg
, AArch64::dsub0
))
1697 else if (MCRegister FirstReg
= MRI
.getSubReg(Reg
, AArch64::qsub0
))
1699 else if (MCRegister FirstReg
= MRI
.getSubReg(Reg
, AArch64::zsub0
))
1701 else if (MCRegister FirstReg
= MRI
.getSubReg(Reg
, AArch64::psub0
))
1704 // If it's a D-reg, we need to promote it to the equivalent Q-reg before
1705 // printing (otherwise getRegisterName fails).
1706 if (MRI
.getRegClass(AArch64::FPR64RegClassID
).contains(Reg
)) {
1707 const MCRegisterClass
&FPR128RC
=
1708 MRI
.getRegClass(AArch64::FPR128RegClassID
);
1709 Reg
= MRI
.getMatchingSuperReg(Reg
, AArch64::dsub
, &FPR128RC
);
1712 if ((MRI
.getRegClass(AArch64::ZPRRegClassID
).contains(Reg
) ||
1713 MRI
.getRegClass(AArch64::PPRRegClassID
).contains(Reg
)) &&
1714 NumRegs
> 1 && Stride
== 1 &&
1715 // Do not print the range when the last register is lower than the first.
1716 // Because it is a wrap-around register.
1717 Reg
< getNextVectorRegister(Reg
, NumRegs
- 1)) {
1718 printRegName(O
, Reg
);
1721 // Set of two sve registers should be separated by ','
1722 StringRef split_char
= NumRegs
== 2 ? ", " : " - ";
1724 printRegName(O
, (getNextVectorRegister(Reg
, NumRegs
- 1)));
1728 for (unsigned i
= 0; i
< NumRegs
;
1729 ++i
, Reg
= getNextVectorRegister(Reg
, Stride
)) {
1730 // wrap-around sve register
1731 if (MRI
.getRegClass(AArch64::ZPRRegClassID
).contains(Reg
) ||
1732 MRI
.getRegClass(AArch64::PPRRegClassID
).contains(Reg
))
1733 printRegName(O
, Reg
);
1735 printRegName(O
, Reg
, AArch64::vreg
);
1737 if (i
+ 1 != NumRegs
)
1745 AArch64InstPrinter::printImplicitlyTypedVectorList(const MCInst
*MI
,
1747 const MCSubtargetInfo
&STI
,
1749 printVectorList(MI
, OpNum
, STI
, O
, "");
1752 template <unsigned NumLanes
, char LaneKind
>
1753 void AArch64InstPrinter::printTypedVectorList(const MCInst
*MI
, unsigned OpNum
,
1754 const MCSubtargetInfo
&STI
,
1756 if (LaneKind
== 0) {
1757 printVectorList(MI
, OpNum
, STI
, O
, "");
1760 std::string
Suffix(".");
1762 Suffix
+= itostr(NumLanes
) + LaneKind
;
1766 printVectorList(MI
, OpNum
, STI
, O
, Suffix
);
1769 template <unsigned Scale
>
1770 void AArch64InstPrinter::printVectorIndex(const MCInst
*MI
, unsigned OpNum
,
1771 const MCSubtargetInfo
&STI
,
1773 O
<< "[" << Scale
* MI
->getOperand(OpNum
).getImm() << "]";
1776 template <unsigned Scale
>
1777 void AArch64InstPrinter::printMatrixIndex(const MCInst
*MI
, unsigned OpNum
,
1778 const MCSubtargetInfo
&STI
,
1780 O
<< Scale
* MI
->getOperand(OpNum
).getImm();
1783 void AArch64InstPrinter::printAlignedLabel(const MCInst
*MI
, uint64_t Address
,
1785 const MCSubtargetInfo
&STI
,
1787 const MCOperand
&Op
= MI
->getOperand(OpNum
);
1789 // If the label has already been resolved to an immediate offset (say, when
1790 // we're running the disassembler), just print the immediate.
1792 int64_t Offset
= Op
.getImm() * 4;
1793 if (PrintBranchImmAsAddress
)
1794 markup(O
, Markup::Target
) << formatHex(Address
+ Offset
);
1796 markup(O
, Markup::Immediate
) << "#" << formatImm(Offset
);
1800 // If the branch target is simply an address then print it in hex.
1801 const MCConstantExpr
*BranchTarget
=
1802 dyn_cast
<MCConstantExpr
>(MI
->getOperand(OpNum
).getExpr());
1803 int64_t TargetAddress
;
1804 if (BranchTarget
&& BranchTarget
->evaluateAsAbsolute(TargetAddress
)) {
1805 markup(O
, Markup::Target
) << formatHex((uint64_t)TargetAddress
);
1807 // Otherwise, just print the expression.
1808 MI
->getOperand(OpNum
).getExpr()->print(O
, &MAI
);
1812 void AArch64InstPrinter::printAdrAdrpLabel(const MCInst
*MI
, uint64_t Address
,
1814 const MCSubtargetInfo
&STI
,
1816 const MCOperand
&Op
= MI
->getOperand(OpNum
);
1818 // If the label has already been resolved to an immediate offset (say, when
1819 // we're running the disassembler), just print the immediate.
1821 int64_t Offset
= Op
.getImm();
1822 if (MI
->getOpcode() == AArch64::ADRP
) {
1823 Offset
= Offset
* 4096;
1824 Address
= Address
& -4096;
1826 WithMarkup M
= markup(O
, Markup::Immediate
);
1827 if (PrintBranchImmAsAddress
)
1828 markup(O
, Markup::Target
) << formatHex(Address
+ Offset
);
1830 markup(O
, Markup::Immediate
) << "#" << Offset
;
1834 // Otherwise, just print the expression.
1835 MI
->getOperand(OpNum
).getExpr()->print(O
, &MAI
);
1838 void AArch64InstPrinter::printBarrierOption(const MCInst
*MI
, unsigned OpNo
,
1839 const MCSubtargetInfo
&STI
,
1841 unsigned Val
= MI
->getOperand(OpNo
).getImm();
1842 unsigned Opcode
= MI
->getOpcode();
1845 if (Opcode
== AArch64::ISB
) {
1846 auto ISB
= AArch64ISB::lookupISBByEncoding(Val
);
1847 Name
= ISB
? ISB
->Name
: "";
1848 } else if (Opcode
== AArch64::TSB
) {
1849 auto TSB
= AArch64TSB::lookupTSBByEncoding(Val
);
1850 Name
= TSB
? TSB
->Name
: "";
1852 auto DB
= AArch64DB::lookupDBByEncoding(Val
);
1853 Name
= DB
? DB
->Name
: "";
1858 markup(O
, Markup::Immediate
) << "#" << Val
;
1861 void AArch64InstPrinter::printBarriernXSOption(const MCInst
*MI
, unsigned OpNo
,
1862 const MCSubtargetInfo
&STI
,
1864 unsigned Val
= MI
->getOperand(OpNo
).getImm();
1865 assert(MI
->getOpcode() == AArch64::DSBnXS
);
1868 auto DB
= AArch64DBnXS::lookupDBnXSByEncoding(Val
);
1869 Name
= DB
? DB
->Name
: "";
1874 markup(O
, Markup::Immediate
) << "#" << Val
;
1877 static bool isValidSysReg(const AArch64SysReg::SysReg
*Reg
, bool Read
,
1878 const MCSubtargetInfo
&STI
) {
1879 return (Reg
&& (Read
? Reg
->Readable
: Reg
->Writeable
) &&
1880 Reg
->haveFeatures(STI
.getFeatureBits()));
1883 // Looks up a system register either by encoding or by name. Some system
1884 // registers share the same encoding between different architectures,
1885 // therefore a tablegen lookup by encoding will return an entry regardless
1886 // of the register's predication on a specific subtarget feature. To work
1887 // around this problem we keep an alternative name for such registers and
1888 // look them up by that name if the first lookup was unsuccessful.
1889 static const AArch64SysReg::SysReg
*lookupSysReg(unsigned Val
, bool Read
,
1890 const MCSubtargetInfo
&STI
) {
1891 const AArch64SysReg::SysReg
*Reg
= AArch64SysReg::lookupSysRegByEncoding(Val
);
1893 if (Reg
&& !isValidSysReg(Reg
, Read
, STI
))
1894 Reg
= AArch64SysReg::lookupSysRegByName(Reg
->AltName
);
1899 void AArch64InstPrinter::printMRSSystemRegister(const MCInst
*MI
, unsigned OpNo
,
1900 const MCSubtargetInfo
&STI
,
1902 unsigned Val
= MI
->getOperand(OpNo
).getImm();
1904 // Horrible hack for the one register that has identical encodings but
1905 // different names in MSR and MRS. Because of this, one of MRS and MSR is
1906 // going to get the wrong entry
1907 if (Val
== AArch64SysReg::DBGDTRRX_EL0
) {
1908 O
<< "DBGDTRRX_EL0";
1912 // Horrible hack for two different registers having the same encoding.
1913 if (Val
== AArch64SysReg::TRCEXTINSELR
) {
1914 O
<< "TRCEXTINSELR";
1918 const AArch64SysReg::SysReg
*Reg
= lookupSysReg(Val
, true /*Read*/, STI
);
1920 if (isValidSysReg(Reg
, true /*Read*/, STI
))
1923 O
<< AArch64SysReg::genericRegisterString(Val
);
1926 void AArch64InstPrinter::printMSRSystemRegister(const MCInst
*MI
, unsigned OpNo
,
1927 const MCSubtargetInfo
&STI
,
1929 unsigned Val
= MI
->getOperand(OpNo
).getImm();
1931 // Horrible hack for the one register that has identical encodings but
1932 // different names in MSR and MRS. Because of this, one of MRS and MSR is
1933 // going to get the wrong entry
1934 if (Val
== AArch64SysReg::DBGDTRTX_EL0
) {
1935 O
<< "DBGDTRTX_EL0";
1939 // Horrible hack for two different registers having the same encoding.
1940 if (Val
== AArch64SysReg::TRCEXTINSELR
) {
1941 O
<< "TRCEXTINSELR";
1945 const AArch64SysReg::SysReg
*Reg
= lookupSysReg(Val
, false /*Read*/, STI
);
1947 if (isValidSysReg(Reg
, false /*Read*/, STI
))
1950 O
<< AArch64SysReg::genericRegisterString(Val
);
1953 void AArch64InstPrinter::printSystemPStateField(const MCInst
*MI
, unsigned OpNo
,
1954 const MCSubtargetInfo
&STI
,
1956 unsigned Val
= MI
->getOperand(OpNo
).getImm();
1958 auto PStateImm15
= AArch64PState::lookupPStateImm0_15ByEncoding(Val
);
1959 auto PStateImm1
= AArch64PState::lookupPStateImm0_1ByEncoding(Val
);
1960 if (PStateImm15
&& PStateImm15
->haveFeatures(STI
.getFeatureBits()))
1961 O
<< PStateImm15
->Name
;
1962 else if (PStateImm1
&& PStateImm1
->haveFeatures(STI
.getFeatureBits()))
1963 O
<< PStateImm1
->Name
;
1965 O
<< "#" << formatImm(Val
);
1968 void AArch64InstPrinter::printSIMDType10Operand(const MCInst
*MI
, unsigned OpNo
,
1969 const MCSubtargetInfo
&STI
,
1971 unsigned RawVal
= MI
->getOperand(OpNo
).getImm();
1972 uint64_t Val
= AArch64_AM::decodeAdvSIMDModImmType10(RawVal
);
1973 markup(O
, Markup::Immediate
) << format("#%#016llx", Val
);
1976 template<int64_t Angle
, int64_t Remainder
>
1977 void AArch64InstPrinter::printComplexRotationOp(const MCInst
*MI
, unsigned OpNo
,
1978 const MCSubtargetInfo
&STI
,
1980 unsigned Val
= MI
->getOperand(OpNo
).getImm();
1981 markup(O
, Markup::Immediate
) << "#" << (Val
* Angle
) + Remainder
;
1984 void AArch64InstPrinter::printSVEPattern(const MCInst
*MI
, unsigned OpNum
,
1985 const MCSubtargetInfo
&STI
,
1987 unsigned Val
= MI
->getOperand(OpNum
).getImm();
1988 if (auto Pat
= AArch64SVEPredPattern::lookupSVEPREDPATByEncoding(Val
))
1991 markup(O
, Markup::Immediate
) << '#' << formatImm(Val
);
1994 void AArch64InstPrinter::printSVEVecLenSpecifier(const MCInst
*MI
,
1996 const MCSubtargetInfo
&STI
,
1998 unsigned Val
= MI
->getOperand(OpNum
).getImm();
1999 // Pattern has only 1 bit
2001 llvm_unreachable("Invalid vector length specifier");
2003 AArch64SVEVecLenSpecifier::lookupSVEVECLENSPECIFIERByEncoding(Val
))
2006 llvm_unreachable("Invalid vector length specifier");
2009 template <char suffix
>
2010 void AArch64InstPrinter::printSVERegOp(const MCInst
*MI
, unsigned OpNum
,
2011 const MCSubtargetInfo
&STI
,
2021 default: llvm_unreachable("Invalid kind specifier.");
2024 MCRegister Reg
= MI
->getOperand(OpNum
).getReg();
2025 printRegName(O
, Reg
);
2030 template <typename T
>
2031 void AArch64InstPrinter::printImmSVE(T Value
, raw_ostream
&O
) {
2032 std::make_unsigned_t
<T
> HexValue
= Value
;
2034 if (getPrintImmHex())
2035 markup(O
, Markup::Immediate
) << '#' << formatHex((uint64_t)HexValue
);
2037 markup(O
, Markup::Immediate
) << '#' << formatDec(Value
);
2039 if (CommentStream
) {
2040 // Do the opposite to that used for instruction operands.
2041 if (getPrintImmHex())
2042 *CommentStream
<< '=' << formatDec(HexValue
) << '\n';
2044 *CommentStream
<< '=' << formatHex((uint64_t)Value
) << '\n';
2048 template <typename T
>
2049 void AArch64InstPrinter::printImm8OptLsl(const MCInst
*MI
, unsigned OpNum
,
2050 const MCSubtargetInfo
&STI
,
2052 unsigned UnscaledVal
= MI
->getOperand(OpNum
).getImm();
2053 unsigned Shift
= MI
->getOperand(OpNum
+ 1).getImm();
2054 assert(AArch64_AM::getShiftType(Shift
) == AArch64_AM::LSL
&&
2055 "Unexepected shift type!");
2057 // #0 lsl #8 is never pretty printed
2058 if ((UnscaledVal
== 0) && (AArch64_AM::getShiftValue(Shift
) != 0)) {
2059 markup(O
, Markup::Immediate
) << '#' << formatImm(UnscaledVal
);
2060 printShifter(MI
, OpNum
+ 1, STI
, O
);
2065 if (std::is_signed
<T
>())
2066 Val
= (int8_t)UnscaledVal
* (1 << AArch64_AM::getShiftValue(Shift
));
2068 Val
= (uint8_t)UnscaledVal
* (1 << AArch64_AM::getShiftValue(Shift
));
2070 printImmSVE(Val
, O
);
2073 template <typename T
>
2074 void AArch64InstPrinter::printSVELogicalImm(const MCInst
*MI
, unsigned OpNum
,
2075 const MCSubtargetInfo
&STI
,
2077 typedef std::make_signed_t
<T
> SignedT
;
2078 typedef std::make_unsigned_t
<T
> UnsignedT
;
2080 uint64_t Val
= MI
->getOperand(OpNum
).getImm();
2081 UnsignedT PrintVal
= AArch64_AM::decodeLogicalImmediate(Val
, 64);
2083 // Prefer the default format for 16bit values, hex otherwise.
2084 if ((int16_t)PrintVal
== (SignedT
)PrintVal
)
2085 printImmSVE((T
)PrintVal
, O
);
2086 else if ((uint16_t)PrintVal
== PrintVal
)
2087 printImmSVE(PrintVal
, O
);
2089 markup(O
, Markup::Immediate
) << '#' << formatHex((uint64_t)PrintVal
);
2092 template <int Width
>
2093 void AArch64InstPrinter::printZPRasFPR(const MCInst
*MI
, unsigned OpNum
,
2094 const MCSubtargetInfo
&STI
,
2098 case 8: Base
= AArch64::B0
; break;
2099 case 16: Base
= AArch64::H0
; break;
2100 case 32: Base
= AArch64::S0
; break;
2101 case 64: Base
= AArch64::D0
; break;
2102 case 128: Base
= AArch64::Q0
; break;
2104 llvm_unreachable("Unsupported width");
2106 MCRegister Reg
= MI
->getOperand(OpNum
).getReg();
2107 printRegName(O
, Reg
- AArch64::Z0
+ Base
);
2110 template <unsigned ImmIs0
, unsigned ImmIs1
>
2111 void AArch64InstPrinter::printExactFPImm(const MCInst
*MI
, unsigned OpNum
,
2112 const MCSubtargetInfo
&STI
,
2114 auto *Imm0Desc
= AArch64ExactFPImm::lookupExactFPImmByEnum(ImmIs0
);
2115 auto *Imm1Desc
= AArch64ExactFPImm::lookupExactFPImmByEnum(ImmIs1
);
2116 unsigned Val
= MI
->getOperand(OpNum
).getImm();
2117 markup(O
, Markup::Immediate
)
2118 << "#" << (Val
? Imm1Desc
->Repr
: Imm0Desc
->Repr
);
2121 void AArch64InstPrinter::printGPR64as32(const MCInst
*MI
, unsigned OpNum
,
2122 const MCSubtargetInfo
&STI
,
2124 MCRegister Reg
= MI
->getOperand(OpNum
).getReg();
2125 printRegName(O
, getWRegFromXReg(Reg
));
2128 void AArch64InstPrinter::printGPR64x8(const MCInst
*MI
, unsigned OpNum
,
2129 const MCSubtargetInfo
&STI
,
2131 MCRegister Reg
= MI
->getOperand(OpNum
).getReg();
2132 printRegName(O
, MRI
.getSubReg(Reg
, AArch64::x8sub_0
));
2135 void AArch64InstPrinter::printSyspXzrPair(const MCInst
*MI
, unsigned OpNum
,
2136 const MCSubtargetInfo
&STI
,
2138 MCRegister Reg
= MI
->getOperand(OpNum
).getReg();
2139 assert(Reg
== AArch64::XZR
&&
2140 "MC representation of SyspXzrPair should be XZR");
2141 O
<< getRegisterName(Reg
) << ", " << getRegisterName(Reg
);
2144 void AArch64InstPrinter::printPHintOp(const MCInst
*MI
, unsigned OpNum
,
2145 const MCSubtargetInfo
&STI
,
2147 unsigned Op
= MI
->getOperand(OpNum
).getImm();
2148 auto PH
= AArch64PHint::lookupPHintByEncoding(Op
);
2152 markup(O
, Markup::Immediate
) << '#' << formatImm(Op
);