1 //===- ARMDisassemblerCore.cpp - ARM disassembler helpers -------*- C++ -*-===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This file is part of the ARM Disassembler.
11 // It contains code to represent the core concepts of Builder and DisassembleFP
12 // to solve the problem of disassembling an ARM instr.
14 //===----------------------------------------------------------------------===//
16 #define DEBUG_TYPE "arm-disassembler"
18 #include "ARMDisassemblerCore.h"
19 #include "ARMAddressingModes.h"
20 #include "llvm/Support/Debug.h"
21 #include "llvm/Support/raw_ostream.h"
23 //#define DEBUG(X) do { X; } while (0)
25 /// ARMGenInstrInfo.inc - ARMGenInstrInfo.inc contains the static const
26 /// TargetInstrDesc ARMInsts[] definition and the TargetOperandInfo[]'s
27 /// describing the operand info for each ARMInsts[i].
29 /// Together with an instruction's encoding format, we can take advantage of the
30 /// NumOperands and the OpInfo fields of the target instruction description in
31 /// the quest to build out the MCOperand list for an MCInst.
33 /// The general guideline is that with a known format, the number of dst and src
34 /// operands are well-known. The dst is built first, followed by the src
35 /// operand(s). The operands not yet used at this point are for the Implicit
36 /// Uses and Defs by this instr. For the Uses part, the pred:$p operand is
37 /// defined with two components:
39 /// def pred { // Operand PredicateOperand
40 /// ValueType Type = OtherVT;
41 /// string PrintMethod = "printPredicateOperand";
42 /// string AsmOperandLowerMethod = ?;
43 /// dag MIOperandInfo = (ops i32imm, CCR);
44 /// AsmOperandClass ParserMatchClass = ImmAsmOperand;
45 /// dag DefaultOps = (ops (i32 14), (i32 zero_reg));
48 /// which is manifested by the TargetOperandInfo[] of:
50 /// { 0, 0|(1<<TOI::Predicate), 0 },
51 /// { ARM::CCRRegClassID, 0|(1<<TOI::Predicate), 0 }
53 /// So the first predicate MCOperand corresponds to the immediate part of the
54 /// ARM condition field (Inst{31-28}), and the second predicate MCOperand
55 /// corresponds to a register kind of ARM::CPSR.
57 /// For the Defs part, in the simple case of only cc_out:$s, we have:
59 /// def cc_out { // Operand OptionalDefOperand
60 /// ValueType Type = OtherVT;
61 /// string PrintMethod = "printSBitModifierOperand";
62 /// string AsmOperandLowerMethod = ?;
63 /// dag MIOperandInfo = (ops CCR);
64 /// AsmOperandClass ParserMatchClass = ImmAsmOperand;
65 /// dag DefaultOps = (ops (i32 zero_reg));
68 /// which is manifested by the one TargetOperandInfo of:
70 /// { ARM::CCRRegClassID, 0|(1<<TOI::OptionalDef), 0 }
72 /// And this maps to one MCOperand with the regsiter kind of ARM::CPSR.
73 #include "ARMGenInstrInfo.inc"
77 const char *ARMUtils::OpcodeName(unsigned Opcode
) {
78 return ARMInsts
[Opcode
].Name
;
81 // Return the register enum Based on RegClass and the raw register number.
84 getRegisterEnum(BO B
, unsigned RegClassID
, unsigned RawRegister
) {
85 // For this purpose, we can treat rGPR as if it were GPR.
86 if (RegClassID
== ARM::rGPRRegClassID
) RegClassID
= ARM::GPRRegClassID
;
88 // See also decodeNEONRd(), decodeNEONRn(), decodeNEONRm().
90 RegClassID
== ARM::QPRRegClassID
? RawRegister
>> 1 : RawRegister
;
97 case ARM::GPRRegClassID
: case ARM::tGPRRegClassID
: return ARM::R0
;
98 case ARM::DPRRegClassID
: case ARM::DPR_8RegClassID
:
99 case ARM::DPR_VFP2RegClassID
:
101 case ARM::QPRRegClassID
: case ARM::QPR_8RegClassID
:
102 case ARM::QPR_VFP2RegClassID
:
104 case ARM::SPRRegClassID
: case ARM::SPR_8RegClassID
: return ARM::S0
;
108 switch (RegClassID
) {
109 case ARM::GPRRegClassID
: case ARM::tGPRRegClassID
: return ARM::R1
;
110 case ARM::DPRRegClassID
: case ARM::DPR_8RegClassID
:
111 case ARM::DPR_VFP2RegClassID
:
113 case ARM::QPRRegClassID
: case ARM::QPR_8RegClassID
:
114 case ARM::QPR_VFP2RegClassID
:
116 case ARM::SPRRegClassID
: case ARM::SPR_8RegClassID
: return ARM::S1
;
120 switch (RegClassID
) {
121 case ARM::GPRRegClassID
: case ARM::tGPRRegClassID
: return ARM::R2
;
122 case ARM::DPRRegClassID
: case ARM::DPR_8RegClassID
:
123 case ARM::DPR_VFP2RegClassID
:
125 case ARM::QPRRegClassID
: case ARM::QPR_8RegClassID
:
126 case ARM::QPR_VFP2RegClassID
:
128 case ARM::SPRRegClassID
: case ARM::SPR_8RegClassID
: return ARM::S2
;
132 switch (RegClassID
) {
133 case ARM::GPRRegClassID
: case ARM::tGPRRegClassID
: return ARM::R3
;
134 case ARM::DPRRegClassID
: case ARM::DPR_8RegClassID
:
135 case ARM::DPR_VFP2RegClassID
:
137 case ARM::QPRRegClassID
: case ARM::QPR_8RegClassID
:
138 case ARM::QPR_VFP2RegClassID
:
140 case ARM::SPRRegClassID
: case ARM::SPR_8RegClassID
: return ARM::S3
;
144 switch (RegClassID
) {
145 case ARM::GPRRegClassID
: case ARM::tGPRRegClassID
: return ARM::R4
;
146 case ARM::DPRRegClassID
: case ARM::DPR_8RegClassID
:
147 case ARM::DPR_VFP2RegClassID
:
149 case ARM::QPRRegClassID
: case ARM::QPR_VFP2RegClassID
: return ARM::Q4
;
150 case ARM::SPRRegClassID
: case ARM::SPR_8RegClassID
: return ARM::S4
;
154 switch (RegClassID
) {
155 case ARM::GPRRegClassID
: case ARM::tGPRRegClassID
: return ARM::R5
;
156 case ARM::DPRRegClassID
: case ARM::DPR_8RegClassID
:
157 case ARM::DPR_VFP2RegClassID
:
159 case ARM::QPRRegClassID
: case ARM::QPR_VFP2RegClassID
: return ARM::Q5
;
160 case ARM::SPRRegClassID
: case ARM::SPR_8RegClassID
: return ARM::S5
;
164 switch (RegClassID
) {
165 case ARM::GPRRegClassID
: case ARM::tGPRRegClassID
: return ARM::R6
;
166 case ARM::DPRRegClassID
: case ARM::DPR_8RegClassID
:
167 case ARM::DPR_VFP2RegClassID
:
169 case ARM::QPRRegClassID
: case ARM::QPR_VFP2RegClassID
: return ARM::Q6
;
170 case ARM::SPRRegClassID
: case ARM::SPR_8RegClassID
: return ARM::S6
;
174 switch (RegClassID
) {
175 case ARM::GPRRegClassID
: case ARM::tGPRRegClassID
: return ARM::R7
;
176 case ARM::DPRRegClassID
: case ARM::DPR_8RegClassID
:
177 case ARM::DPR_VFP2RegClassID
:
179 case ARM::QPRRegClassID
: case ARM::QPR_VFP2RegClassID
: return ARM::Q7
;
180 case ARM::SPRRegClassID
: case ARM::SPR_8RegClassID
: return ARM::S7
;
184 switch (RegClassID
) {
185 case ARM::GPRRegClassID
: return ARM::R8
;
186 case ARM::DPRRegClassID
: case ARM::DPR_VFP2RegClassID
: return ARM::D8
;
187 case ARM::QPRRegClassID
: return ARM::Q8
;
188 case ARM::SPRRegClassID
: case ARM::SPR_8RegClassID
: return ARM::S8
;
192 switch (RegClassID
) {
193 case ARM::GPRRegClassID
: return ARM::R9
;
194 case ARM::DPRRegClassID
: case ARM::DPR_VFP2RegClassID
: return ARM::D9
;
195 case ARM::QPRRegClassID
: return ARM::Q9
;
196 case ARM::SPRRegClassID
: case ARM::SPR_8RegClassID
: return ARM::S9
;
200 switch (RegClassID
) {
201 case ARM::GPRRegClassID
: return ARM::R10
;
202 case ARM::DPRRegClassID
: case ARM::DPR_VFP2RegClassID
: return ARM::D10
;
203 case ARM::QPRRegClassID
: return ARM::Q10
;
204 case ARM::SPRRegClassID
: case ARM::SPR_8RegClassID
: return ARM::S10
;
208 switch (RegClassID
) {
209 case ARM::GPRRegClassID
: return ARM::R11
;
210 case ARM::DPRRegClassID
: case ARM::DPR_VFP2RegClassID
: return ARM::D11
;
211 case ARM::QPRRegClassID
: return ARM::Q11
;
212 case ARM::SPRRegClassID
: case ARM::SPR_8RegClassID
: return ARM::S11
;
216 switch (RegClassID
) {
217 case ARM::GPRRegClassID
: return ARM::R12
;
218 case ARM::DPRRegClassID
: case ARM::DPR_VFP2RegClassID
: return ARM::D12
;
219 case ARM::QPRRegClassID
: return ARM::Q12
;
220 case ARM::SPRRegClassID
: case ARM::SPR_8RegClassID
: return ARM::S12
;
224 switch (RegClassID
) {
225 case ARM::GPRRegClassID
: return ARM::SP
;
226 case ARM::DPRRegClassID
: case ARM::DPR_VFP2RegClassID
: return ARM::D13
;
227 case ARM::QPRRegClassID
: return ARM::Q13
;
228 case ARM::SPRRegClassID
: case ARM::SPR_8RegClassID
: return ARM::S13
;
232 switch (RegClassID
) {
233 case ARM::GPRRegClassID
: return ARM::LR
;
234 case ARM::DPRRegClassID
: case ARM::DPR_VFP2RegClassID
: return ARM::D14
;
235 case ARM::QPRRegClassID
: return ARM::Q14
;
236 case ARM::SPRRegClassID
: case ARM::SPR_8RegClassID
: return ARM::S14
;
240 switch (RegClassID
) {
241 case ARM::GPRRegClassID
: return ARM::PC
;
242 case ARM::DPRRegClassID
: case ARM::DPR_VFP2RegClassID
: return ARM::D15
;
243 case ARM::QPRRegClassID
: return ARM::Q15
;
244 case ARM::SPRRegClassID
: case ARM::SPR_8RegClassID
: return ARM::S15
;
248 switch (RegClassID
) {
249 case ARM::DPRRegClassID
: return ARM::D16
;
250 case ARM::SPRRegClassID
: return ARM::S16
;
254 switch (RegClassID
) {
255 case ARM::DPRRegClassID
: return ARM::D17
;
256 case ARM::SPRRegClassID
: return ARM::S17
;
260 switch (RegClassID
) {
261 case ARM::DPRRegClassID
: return ARM::D18
;
262 case ARM::SPRRegClassID
: return ARM::S18
;
266 switch (RegClassID
) {
267 case ARM::DPRRegClassID
: return ARM::D19
;
268 case ARM::SPRRegClassID
: return ARM::S19
;
272 switch (RegClassID
) {
273 case ARM::DPRRegClassID
: return ARM::D20
;
274 case ARM::SPRRegClassID
: return ARM::S20
;
278 switch (RegClassID
) {
279 case ARM::DPRRegClassID
: return ARM::D21
;
280 case ARM::SPRRegClassID
: return ARM::S21
;
284 switch (RegClassID
) {
285 case ARM::DPRRegClassID
: return ARM::D22
;
286 case ARM::SPRRegClassID
: return ARM::S22
;
290 switch (RegClassID
) {
291 case ARM::DPRRegClassID
: return ARM::D23
;
292 case ARM::SPRRegClassID
: return ARM::S23
;
296 switch (RegClassID
) {
297 case ARM::DPRRegClassID
: return ARM::D24
;
298 case ARM::SPRRegClassID
: return ARM::S24
;
302 switch (RegClassID
) {
303 case ARM::DPRRegClassID
: return ARM::D25
;
304 case ARM::SPRRegClassID
: return ARM::S25
;
308 switch (RegClassID
) {
309 case ARM::DPRRegClassID
: return ARM::D26
;
310 case ARM::SPRRegClassID
: return ARM::S26
;
314 switch (RegClassID
) {
315 case ARM::DPRRegClassID
: return ARM::D27
;
316 case ARM::SPRRegClassID
: return ARM::S27
;
320 switch (RegClassID
) {
321 case ARM::DPRRegClassID
: return ARM::D28
;
322 case ARM::SPRRegClassID
: return ARM::S28
;
326 switch (RegClassID
) {
327 case ARM::DPRRegClassID
: return ARM::D29
;
328 case ARM::SPRRegClassID
: return ARM::S29
;
332 switch (RegClassID
) {
333 case ARM::DPRRegClassID
: return ARM::D30
;
334 case ARM::SPRRegClassID
: return ARM::S30
;
338 switch (RegClassID
) {
339 case ARM::DPRRegClassID
: return ARM::D31
;
340 case ARM::SPRRegClassID
: return ARM::S31
;
344 DEBUG(errs() << "Invalid (RegClassID, RawRegister) combination\n");
345 // Encoding error. Mark the builder with error code != 0.
350 ///////////////////////////////
352 // Utility Functions //
354 ///////////////////////////////
356 // Extract/Decode Rd: Inst{15-12}.
357 static inline unsigned decodeRd(uint32_t insn
) {
358 return (insn
>> ARMII::RegRdShift
) & ARMII::GPRRegMask
;
361 // Extract/Decode Rn: Inst{19-16}.
362 static inline unsigned decodeRn(uint32_t insn
) {
363 return (insn
>> ARMII::RegRnShift
) & ARMII::GPRRegMask
;
366 // Extract/Decode Rm: Inst{3-0}.
367 static inline unsigned decodeRm(uint32_t insn
) {
368 return (insn
& ARMII::GPRRegMask
);
371 // Extract/Decode Rs: Inst{11-8}.
372 static inline unsigned decodeRs(uint32_t insn
) {
373 return (insn
>> ARMII::RegRsShift
) & ARMII::GPRRegMask
;
376 static inline unsigned getCondField(uint32_t insn
) {
377 return (insn
>> ARMII::CondShift
);
380 static inline unsigned getIBit(uint32_t insn
) {
381 return (insn
>> ARMII::I_BitShift
) & 1;
384 static inline unsigned getAM3IBit(uint32_t insn
) {
385 return (insn
>> ARMII::AM3_I_BitShift
) & 1;
388 static inline unsigned getPBit(uint32_t insn
) {
389 return (insn
>> ARMII::P_BitShift
) & 1;
392 static inline unsigned getUBit(uint32_t insn
) {
393 return (insn
>> ARMII::U_BitShift
) & 1;
396 static inline unsigned getPUBits(uint32_t insn
) {
397 return (insn
>> ARMII::U_BitShift
) & 3;
400 static inline unsigned getSBit(uint32_t insn
) {
401 return (insn
>> ARMII::S_BitShift
) & 1;
404 static inline unsigned getWBit(uint32_t insn
) {
405 return (insn
>> ARMII::W_BitShift
) & 1;
408 static inline unsigned getDBit(uint32_t insn
) {
409 return (insn
>> ARMII::D_BitShift
) & 1;
412 static inline unsigned getNBit(uint32_t insn
) {
413 return (insn
>> ARMII::N_BitShift
) & 1;
416 static inline unsigned getMBit(uint32_t insn
) {
417 return (insn
>> ARMII::M_BitShift
) & 1;
420 // See A8.4 Shifts applied to a register.
421 // A8.4.2 Register controlled shifts.
423 // getShiftOpcForBits - getShiftOpcForBits translates from the ARM encoding bits
424 // into llvm enums for shift opcode. The API clients should pass in the value
425 // encoded with two bits, so the assert stays to signal a wrong API usage.
427 // A8-12: DecodeRegShift()
428 static inline ARM_AM::ShiftOpc
getShiftOpcForBits(unsigned bits
) {
430 default: assert(0 && "No such value"); return ARM_AM::no_shift
;
431 case 0: return ARM_AM::lsl
;
432 case 1: return ARM_AM::lsr
;
433 case 2: return ARM_AM::asr
;
434 case 3: return ARM_AM::ror
;
438 // See A8.4 Shifts applied to a register.
439 // A8.4.1 Constant shifts.
441 // getImmShiftSE - getImmShiftSE translates from the raw ShiftOpc and raw Imm5
442 // encodings into the intended ShiftOpc and shift amount.
444 // A8-11: DecodeImmShift()
445 static inline void getImmShiftSE(ARM_AM::ShiftOpc
&ShOp
, unsigned &ShImm
) {
449 case ARM_AM::no_shift
:
453 ShOp
= ARM_AM::no_shift
;
465 // getAMSubModeForBits - getAMSubModeForBits translates from the ARM encoding
466 // bits Inst{24-23} (P(24) and U(23)) into llvm enums for AMSubMode. The API
467 // clients should pass in the value encoded with two bits, so the assert stays
468 // to signal a wrong API usage.
469 static inline ARM_AM::AMSubMode
getAMSubModeForBits(unsigned bits
) {
471 default: assert(0 && "No such value"); return ARM_AM::bad_am_submode
;
472 case 1: return ARM_AM::ia
; // P=0 U=1
473 case 3: return ARM_AM::ib
; // P=1 U=1
474 case 0: return ARM_AM::da
; // P=0 U=0
475 case 2: return ARM_AM::db
; // P=1 U=0
479 ////////////////////////////////////////////
481 // Disassemble function definitions //
483 ////////////////////////////////////////////
485 /// There is a separate Disassemble*Frm function entry for disassembly of an ARM
486 /// instr into a list of MCOperands in the appropriate order, with possible dst,
487 /// followed by possible src(s).
489 /// The processing of the predicate, and the 'S' modifier bit, if MI modifies
490 /// the CPSR, is factored into ARMBasicMCBuilder's method named
491 /// TryPredicateAndSBitModifier.
493 static bool DisassemblePseudo(MCInst
&MI
, unsigned Opcode
, uint32_t insn
,
494 unsigned short NumOps
, unsigned &NumOpsAdded
, BO
) {
496 assert(0 && "Unexpected pseudo instruction!");
500 // Multiply Instructions.
501 // MLA, MLS, SMLABB, SMLABT, SMLATB, SMLATT, SMLAWB, SMLAWT, SMMLA, SMMLS:
502 // Rd{19-16} Rn{3-0} Rm{11-8} Ra{15-12}
504 // MUL, SMMUL, SMULBB, SMULBT, SMULTB, SMULTT, SMULWB, SMULWT:
505 // Rd{19-16} Rn{3-0} Rm{11-8}
507 // SMLAL, SMULL, UMAAL, UMLAL, UMULL, SMLALBB, SMLALBT, SMLALTB, SMLALTT:
508 // RdLo{15-12} RdHi{19-16} Rn{3-0} Rm{11-8}
510 // The mapping of the multiply registers to the "regular" ARM registers, where
511 // there are convenience decoder functions, is:
517 static bool DisassembleMulFrm(MCInst
&MI
, unsigned Opcode
, uint32_t insn
,
518 unsigned short NumOps
, unsigned &NumOpsAdded
, BO B
) {
520 const TargetInstrDesc
&TID
= ARMInsts
[Opcode
];
521 unsigned short NumDefs
= TID
.getNumDefs();
522 const TargetOperandInfo
*OpInfo
= TID
.OpInfo
;
523 unsigned &OpIdx
= NumOpsAdded
;
527 assert(NumDefs
> 0 && "NumDefs should be greater than 0 for MulFrm");
529 && OpInfo
[0].RegClass
== ARM::GPRRegClassID
530 && OpInfo
[1].RegClass
== ARM::GPRRegClassID
531 && OpInfo
[2].RegClass
== ARM::GPRRegClassID
532 && "Expect three register operands");
534 // Instructions with two destination registers have RdLo{15-12} first.
536 assert(NumOps
>= 4 && OpInfo
[3].RegClass
== ARM::GPRRegClassID
&&
537 "Expect 4th register operand");
538 MI
.addOperand(MCOperand::CreateReg(getRegisterEnum(B
, ARM::GPRRegClassID
,
543 // The destination register: RdHi{19-16} or Rd{19-16}.
544 MI
.addOperand(MCOperand::CreateReg(getRegisterEnum(B
, ARM::GPRRegClassID
,
547 // The two src regsiters: Rn{3-0}, then Rm{11-8}.
548 MI
.addOperand(MCOperand::CreateReg(getRegisterEnum(B
, ARM::GPRRegClassID
,
550 MI
.addOperand(MCOperand::CreateReg(getRegisterEnum(B
, ARM::GPRRegClassID
,
554 // Many multiply instructions (e.g., MLA) have three src registers.
555 // The third register operand is Ra{15-12}.
556 if (OpIdx
< NumOps
&& OpInfo
[OpIdx
].RegClass
== ARM::GPRRegClassID
) {
557 MI
.addOperand(MCOperand::CreateReg(getRegisterEnum(B
, ARM::GPRRegClassID
,
565 // Helper routines for disassembly of coprocessor instructions.
567 static bool LdStCopOpcode(unsigned Opcode
) {
568 if ((Opcode
>= ARM::LDC2L_OFFSET
&& Opcode
<= ARM::LDC_PRE
) ||
569 (Opcode
>= ARM::STC2L_OFFSET
&& Opcode
<= ARM::STC_PRE
))
573 static bool CoprocessorOpcode(unsigned Opcode
) {
574 if (LdStCopOpcode(Opcode
))
580 case ARM::CDP
: case ARM::CDP2
:
581 case ARM::MCR
: case ARM::MCR2
: case ARM::MRC
: case ARM::MRC2
:
582 case ARM::MCRR
: case ARM::MCRR2
: case ARM::MRRC
: case ARM::MRRC2
:
586 static inline unsigned GetCoprocessor(uint32_t insn
) {
587 return slice(insn
, 11, 8);
589 static inline unsigned GetCopOpc1(uint32_t insn
, bool CDP
) {
590 return CDP
? slice(insn
, 23, 20) : slice(insn
, 23, 21);
592 static inline unsigned GetCopOpc2(uint32_t insn
) {
593 return slice(insn
, 7, 5);
595 static inline unsigned GetCopOpc(uint32_t insn
) {
596 return slice(insn
, 7, 4);
598 // Most of the operands are in immediate forms, except Rd and Rn, which are ARM
601 // CDP, CDP2: cop opc1 CRd CRn CRm opc2
603 // MCR, MCR2, MRC, MRC2: cop opc1 Rd CRn CRm opc2
605 // MCRR, MCRR2, MRRC, MRRc2: cop opc Rd Rn CRm
607 // LDC_OFFSET, LDC_PRE, LDC_POST: cop CRd Rn R0 [+/-]imm8:00
609 // STC_OFFSET, STC_PRE, STC_POST: cop CRd Rn R0 [+/-]imm8:00
613 // LDC_OPTION: cop CRd Rn imm8
615 // STC_OPTION: cop CRd Rn imm8
618 static bool DisassembleCoprocessor(MCInst
&MI
, unsigned Opcode
, uint32_t insn
,
619 unsigned short NumOps
, unsigned &NumOpsAdded
, BO B
) {
621 assert(NumOps
>= 5 && "Num of operands >= 5 for coprocessor instr");
623 unsigned &OpIdx
= NumOpsAdded
;
624 bool OneCopOpc
= (Opcode
== ARM::MCRR
|| Opcode
== ARM::MCRR2
||
625 Opcode
== ARM::MRRC
|| Opcode
== ARM::MRRC2
);
626 // CDP/CDP2 has no GPR operand; the opc1 operand is also wider (Inst{23-20}).
627 bool NoGPR
= (Opcode
== ARM::CDP
|| Opcode
== ARM::CDP2
);
628 bool LdStCop
= LdStCopOpcode(Opcode
);
632 MI
.addOperand(MCOperand::CreateImm(GetCoprocessor(insn
)));
635 // Unindex if P:W = 0b00 --> _OPTION variant
636 unsigned PW
= getPBit(insn
) << 1 | getWBit(insn
);
638 MI
.addOperand(MCOperand::CreateImm(decodeRd(insn
)));
640 MI
.addOperand(MCOperand::CreateReg(getRegisterEnum(B
, ARM::GPRRegClassID
,
644 MI
.addOperand(MCOperand::CreateReg(0));
645 ARM_AM::AddrOpc AddrOpcode
= getUBit(insn
) ? ARM_AM::add
: ARM_AM::sub
;
646 unsigned Offset
= ARM_AM::getAM2Opc(AddrOpcode
, slice(insn
, 7, 0) << 2,
648 MI
.addOperand(MCOperand::CreateImm(Offset
));
651 MI
.addOperand(MCOperand::CreateImm(slice(insn
, 7, 0)));
655 MI
.addOperand(MCOperand::CreateImm(OneCopOpc
? GetCopOpc(insn
)
656 : GetCopOpc1(insn
, NoGPR
)));
658 MI
.addOperand(NoGPR
? MCOperand::CreateImm(decodeRd(insn
))
659 : MCOperand::CreateReg(
660 getRegisterEnum(B
, ARM::GPRRegClassID
,
663 MI
.addOperand(OneCopOpc
? MCOperand::CreateReg(
664 getRegisterEnum(B
, ARM::GPRRegClassID
,
666 : MCOperand::CreateImm(decodeRn(insn
)));
668 MI
.addOperand(MCOperand::CreateImm(decodeRm(insn
)));
673 MI
.addOperand(MCOperand::CreateImm(GetCopOpc2(insn
)));
681 // Branch Instructions.
682 // BLr9: SignExtend(Imm24:'00', 32)
683 // Bcc, BLr9_pred: SignExtend(Imm24:'00', 32) Pred0 Pred1
684 // SMC: ZeroExtend(imm4, 32)
685 // SVC: ZeroExtend(Imm24, 32)
687 // Various coprocessor instructions are assigned BrFrm arbitrarily.
688 // Delegates to DisassembleCoprocessor() helper function.
691 // MSR/MSRsys: Rm mask=Inst{19-16}
693 // MSRi/MSRsysi: so_imm
694 // SRSW/SRS: ldstm_mode:$amode mode_imm
695 // RFEW/RFE: ldstm_mode:$amode Rn
696 static bool DisassembleBrFrm(MCInst
&MI
, unsigned Opcode
, uint32_t insn
,
697 unsigned short NumOps
, unsigned &NumOpsAdded
, BO B
) {
699 if (CoprocessorOpcode(Opcode
))
700 return DisassembleCoprocessor(MI
, Opcode
, insn
, NumOps
, NumOpsAdded
, B
);
702 const TargetOperandInfo
*OpInfo
= ARMInsts
[Opcode
].OpInfo
;
703 if (!OpInfo
) return false;
705 // MRS and MRSsys take one GPR reg Rd.
706 if (Opcode
== ARM::MRS
|| Opcode
== ARM::MRSsys
) {
707 assert(NumOps
>= 1 && OpInfo
[0].RegClass
== ARM::GPRRegClassID
&&
708 "Reg operand expected");
709 MI
.addOperand(MCOperand::CreateReg(getRegisterEnum(B
, ARM::GPRRegClassID
,
714 // BXJ takes one GPR reg Rm.
715 if (Opcode
== ARM::BXJ
) {
716 assert(NumOps
>= 1 && OpInfo
[0].RegClass
== ARM::GPRRegClassID
&&
717 "Reg operand expected");
718 MI
.addOperand(MCOperand::CreateReg(getRegisterEnum(B
, ARM::GPRRegClassID
,
723 // MSR and MSRsys take one GPR reg Rm, followed by the mask.
724 if (Opcode
== ARM::MSR
|| Opcode
== ARM::MSRsys
) {
725 assert(NumOps
>= 1 && OpInfo
[0].RegClass
== ARM::GPRRegClassID
&&
726 "Reg operand expected");
727 MI
.addOperand(MCOperand::CreateReg(getRegisterEnum(B
, ARM::GPRRegClassID
,
729 MI
.addOperand(MCOperand::CreateImm(slice(insn
, 19, 16)));
733 // MSRi and MSRsysi take one so_imm operand, followed by the mask.
734 if (Opcode
== ARM::MSRi
|| Opcode
== ARM::MSRsysi
) {
735 // SOImm is 4-bit rotate amount in bits 11-8 with 8-bit imm in bits 7-0.
736 // A5.2.4 Rotate amount is twice the numeric value of Inst{11-8}.
737 // See also ARMAddressingModes.h: getSOImmValImm() and getSOImmValRot().
738 unsigned Rot
= (insn
>> ARMII::SoRotImmShift
) & 0xF;
739 unsigned Imm
= insn
& 0xFF;
740 MI
.addOperand(MCOperand::CreateImm(ARM_AM::rotr32(Imm
, 2*Rot
)));
741 MI
.addOperand(MCOperand::CreateImm(slice(insn
, 19, 16)));
745 if (Opcode
== ARM::SRSW
|| Opcode
== ARM::SRS
||
746 Opcode
== ARM::RFEW
|| Opcode
== ARM::RFE
) {
747 ARM_AM::AMSubMode SubMode
= getAMSubModeForBits(getPUBits(insn
));
748 MI
.addOperand(MCOperand::CreateImm(ARM_AM::getAM4ModeImm(SubMode
)));
750 if (Opcode
== ARM::SRSW
|| Opcode
== ARM::SRS
)
751 MI
.addOperand(MCOperand::CreateImm(slice(insn
, 4, 0)));
753 MI
.addOperand(MCOperand::CreateReg(getRegisterEnum(B
, ARM::GPRRegClassID
,
759 assert((Opcode
== ARM::Bcc
|| Opcode
== ARM::BLr9
|| Opcode
== ARM::BLr9_pred
760 || Opcode
== ARM::SMC
|| Opcode
== ARM::SVC
) &&
761 "Unexpected Opcode");
763 assert(NumOps
>= 1 && OpInfo
[0].RegClass
< 0 && "Reg operand expected");
766 if (Opcode
== ARM::SMC
) {
767 // ZeroExtend(imm4, 32) where imm24 = Inst{3-0}.
768 Imm32
= slice(insn
, 3, 0);
769 } else if (Opcode
== ARM::SVC
) {
770 // ZeroExtend(imm24, 32) where imm24 = Inst{23-0}.
771 Imm32
= slice(insn
, 23, 0);
773 // SignExtend(imm24:'00', 32) where imm24 = Inst{23-0}.
774 unsigned Imm26
= slice(insn
, 23, 0) << 2;
775 //Imm32 = signextend<signed int, 26>(Imm26);
776 Imm32
= SignExtend32
<26>(Imm26
);
778 // When executing an ARM instruction, PC reads as the address of the current
779 // instruction plus 8. The assembler subtracts 8 from the difference
780 // between the branch instruction and the target address, disassembler has
781 // to add 8 to compensate.
785 MI
.addOperand(MCOperand::CreateImm(Imm32
));
791 // Misc. Branch Instructions.
792 // BR_JTadd, BR_JTr, BR_JTm
795 static bool DisassembleBrMiscFrm(MCInst
&MI
, unsigned Opcode
, uint32_t insn
,
796 unsigned short NumOps
, unsigned &NumOpsAdded
, BO B
) {
798 const TargetOperandInfo
*OpInfo
= ARMInsts
[Opcode
].OpInfo
;
799 if (!OpInfo
) return false;
801 unsigned &OpIdx
= NumOpsAdded
;
805 // BX_RET has only two predicate operands, do an early return.
806 if (Opcode
== ARM::BX_RET
)
809 // BLXr9 and BRIND take one GPR reg.
810 if (Opcode
== ARM::BLXr9
|| Opcode
== ARM::BRIND
) {
811 assert(NumOps
>= 1 && OpInfo
[OpIdx
].RegClass
== ARM::GPRRegClassID
&&
812 "Reg operand expected");
813 MI
.addOperand(MCOperand::CreateReg(getRegisterEnum(B
, ARM::GPRRegClassID
,
819 // BR_JTadd is an ADD with Rd = PC, (Rn, Rm) as the target and index regs.
820 if (Opcode
== ARM::BR_JTadd
) {
821 // InOperandList with GPR:$target and GPR:$idx regs.
823 assert(NumOps
== 4 && "Expect 4 operands");
824 MI
.addOperand(MCOperand::CreateReg(getRegisterEnum(B
, ARM::GPRRegClassID
,
826 MI
.addOperand(MCOperand::CreateReg(getRegisterEnum(B
, ARM::GPRRegClassID
,
829 // Fill in the two remaining imm operands to signify build completion.
830 MI
.addOperand(MCOperand::CreateImm(0));
831 MI
.addOperand(MCOperand::CreateImm(0));
837 // BR_JTr is a MOV with Rd = PC, and Rm as the source register.
838 if (Opcode
== ARM::BR_JTr
) {
839 // InOperandList with GPR::$target reg.
841 assert(NumOps
== 3 && "Expect 3 operands");
842 MI
.addOperand(MCOperand::CreateReg(getRegisterEnum(B
, ARM::GPRRegClassID
,
845 // Fill in the two remaining imm operands to signify build completion.
846 MI
.addOperand(MCOperand::CreateImm(0));
847 MI
.addOperand(MCOperand::CreateImm(0));
853 // BR_JTm is an LDR with Rt = PC.
854 if (Opcode
== ARM::BR_JTm
) {
855 // This is the reg/reg form, with base reg followed by +/- reg shop imm.
856 // See also ARMAddressingModes.h (Addressing Mode #2).
858 assert(NumOps
== 5 && getIBit(insn
) == 1 && "Expect 5 operands && I-bit=1");
859 MI
.addOperand(MCOperand::CreateReg(getRegisterEnum(B
, ARM::GPRRegClassID
,
862 ARM_AM::AddrOpc AddrOpcode
= getUBit(insn
) ? ARM_AM::add
: ARM_AM::sub
;
864 // Disassemble the offset reg (Rm), shift type, and immediate shift length.
865 MI
.addOperand(MCOperand::CreateReg(getRegisterEnum(B
, ARM::GPRRegClassID
,
867 // Inst{6-5} encodes the shift opcode.
868 ARM_AM::ShiftOpc ShOp
= getShiftOpcForBits(slice(insn
, 6, 5));
869 // Inst{11-7} encodes the imm5 shift amount.
870 unsigned ShImm
= slice(insn
, 11, 7);
872 // A8.4.1. Possible rrx or shift amount of 32...
873 getImmShiftSE(ShOp
, ShImm
);
874 MI
.addOperand(MCOperand::CreateImm(
875 ARM_AM::getAM2Opc(AddrOpcode
, ShImm
, ShOp
)));
877 // Fill in the two remaining imm operands to signify build completion.
878 MI
.addOperand(MCOperand::CreateImm(0));
879 MI
.addOperand(MCOperand::CreateImm(0));
888 static inline bool getBFCInvMask(uint32_t insn
, uint32_t &mask
) {
889 uint32_t lsb
= slice(insn
, 11, 7);
890 uint32_t msb
= slice(insn
, 20, 16);
893 DEBUG(errs() << "Encoding error: msb < lsb\n");
897 for (uint32_t i
= lsb
; i
<= msb
; ++i
)
903 // A major complication is the fact that some of the saturating add/subtract
904 // operations have Rd Rm Rn, instead of the "normal" Rd Rn Rm.
905 // They are QADD, QDADD, QDSUB, and QSUB.
906 static bool DisassembleDPFrm(MCInst
&MI
, unsigned Opcode
, uint32_t insn
,
907 unsigned short NumOps
, unsigned &NumOpsAdded
, BO B
) {
909 const TargetInstrDesc
&TID
= ARMInsts
[Opcode
];
910 unsigned short NumDefs
= TID
.getNumDefs();
911 bool isUnary
= isUnaryDP(TID
.TSFlags
);
912 const TargetOperandInfo
*OpInfo
= TID
.OpInfo
;
913 unsigned &OpIdx
= NumOpsAdded
;
917 // Disassemble register def if there is one.
918 if (NumDefs
&& (OpInfo
[OpIdx
].RegClass
== ARM::GPRRegClassID
)) {
919 MI
.addOperand(MCOperand::CreateReg(getRegisterEnum(B
, ARM::GPRRegClassID
,
924 // Now disassemble the src operands.
928 // Special-case handling of BFC/BFI/SBFX/UBFX.
929 if (Opcode
== ARM::BFC
|| Opcode
== ARM::BFI
) {
930 MI
.addOperand(MCOperand::CreateReg(0));
931 if (Opcode
== ARM::BFI
) {
932 MI
.addOperand(MCOperand::CreateReg(getRegisterEnum(B
, ARM::GPRRegClassID
,
937 if (!getBFCInvMask(insn
, mask
))
940 MI
.addOperand(MCOperand::CreateImm(mask
));
944 if (Opcode
== ARM::SBFX
|| Opcode
== ARM::UBFX
) {
945 MI
.addOperand(MCOperand::CreateReg(getRegisterEnum(B
, ARM::GPRRegClassID
,
947 MI
.addOperand(MCOperand::CreateImm(slice(insn
, 11, 7)));
948 MI
.addOperand(MCOperand::CreateImm(slice(insn
, 20, 16) + 1));
953 bool RmRn
= (Opcode
== ARM::QADD
|| Opcode
== ARM::QDADD
||
954 Opcode
== ARM::QDSUB
|| Opcode
== ARM::QSUB
);
956 // BinaryDP has an Rn operand.
958 assert(OpInfo
[OpIdx
].RegClass
== ARM::GPRRegClassID
&&
959 "Reg operand expected");
960 MI
.addOperand(MCOperand::CreateReg(
961 getRegisterEnum(B
, ARM::GPRRegClassID
,
962 RmRn
? decodeRm(insn
) : decodeRn(insn
))));
966 // If this is a two-address operand, skip it, e.g., MOVCCr operand 1.
967 if (isUnary
&& (TID
.getOperandConstraint(OpIdx
, TOI::TIED_TO
) != -1)) {
968 MI
.addOperand(MCOperand::CreateReg(0));
972 // Now disassemble operand 2.
976 if (OpInfo
[OpIdx
].RegClass
== ARM::GPRRegClassID
) {
977 // We have a reg/reg form.
978 // Assert disabled because saturating operations, e.g., A8.6.127 QASX, are
979 // routed here as well.
980 // assert(getIBit(insn) == 0 && "I_Bit != '0' reg/reg form");
981 MI
.addOperand(MCOperand::CreateReg(
982 getRegisterEnum(B
, ARM::GPRRegClassID
,
983 RmRn
? decodeRn(insn
) : decodeRm(insn
))));
985 } else if (Opcode
== ARM::MOVi16
|| Opcode
== ARM::MOVTi16
) {
986 // We have an imm16 = imm4:imm12 (imm4=Inst{19:16}, imm12 = Inst{11:0}).
987 assert(getIBit(insn
) == 1 && "I_Bit != '1' reg/imm form");
988 unsigned Imm16
= slice(insn
, 19, 16) << 12 | slice(insn
, 11, 0);
989 MI
.addOperand(MCOperand::CreateImm(Imm16
));
992 // We have a reg/imm form.
993 // SOImm is 4-bit rotate amount in bits 11-8 with 8-bit imm in bits 7-0.
994 // A5.2.4 Rotate amount is twice the numeric value of Inst{11-8}.
995 // See also ARMAddressingModes.h: getSOImmValImm() and getSOImmValRot().
996 assert(getIBit(insn
) == 1 && "I_Bit != '1' reg/imm form");
997 unsigned Rot
= (insn
>> ARMII::SoRotImmShift
) & 0xF;
998 unsigned Imm
= insn
& 0xFF;
999 MI
.addOperand(MCOperand::CreateImm(ARM_AM::rotr32(Imm
, 2*Rot
)));
1006 static bool DisassembleDPSoRegFrm(MCInst
&MI
, unsigned Opcode
, uint32_t insn
,
1007 unsigned short NumOps
, unsigned &NumOpsAdded
, BO B
) {
1009 const TargetInstrDesc
&TID
= ARMInsts
[Opcode
];
1010 unsigned short NumDefs
= TID
.getNumDefs();
1011 bool isUnary
= isUnaryDP(TID
.TSFlags
);
1012 const TargetOperandInfo
*OpInfo
= TID
.OpInfo
;
1013 unsigned &OpIdx
= NumOpsAdded
;
1017 // Disassemble register def if there is one.
1018 if (NumDefs
&& (OpInfo
[OpIdx
].RegClass
== ARM::GPRRegClassID
)) {
1019 MI
.addOperand(MCOperand::CreateReg(getRegisterEnum(B
, ARM::GPRRegClassID
,
1024 // Disassemble the src operands.
1025 if (OpIdx
>= NumOps
)
1028 // BinaryDP has an Rn operand.
1030 assert(OpInfo
[OpIdx
].RegClass
== ARM::GPRRegClassID
&&
1031 "Reg operand expected");
1032 MI
.addOperand(MCOperand::CreateReg(getRegisterEnum(B
, ARM::GPRRegClassID
,
1037 // If this is a two-address operand, skip it, e.g., MOVCCs operand 1.
1038 if (isUnary
&& (TID
.getOperandConstraint(OpIdx
, TOI::TIED_TO
) != -1)) {
1039 MI
.addOperand(MCOperand::CreateReg(0));
1043 // Disassemble operand 2, which consists of three components.
1044 if (OpIdx
+ 2 >= NumOps
)
1047 assert((OpInfo
[OpIdx
].RegClass
== ARM::GPRRegClassID
) &&
1048 (OpInfo
[OpIdx
+1].RegClass
== ARM::GPRRegClassID
) &&
1049 (OpInfo
[OpIdx
+2].RegClass
< 0) &&
1050 "Expect 3 reg operands");
1052 // Register-controlled shifts have Inst{7} = 0 and Inst{4} = 1.
1053 unsigned Rs
= slice(insn
, 4, 4);
1055 MI
.addOperand(MCOperand::CreateReg(getRegisterEnum(B
, ARM::GPRRegClassID
,
1058 // Register-controlled shifts: [Rm, Rs, shift].
1059 MI
.addOperand(MCOperand::CreateReg(getRegisterEnum(B
, ARM::GPRRegClassID
,
1061 // Inst{6-5} encodes the shift opcode.
1062 ARM_AM::ShiftOpc ShOp
= getShiftOpcForBits(slice(insn
, 6, 5));
1063 MI
.addOperand(MCOperand::CreateImm(ARM_AM::getSORegOpc(ShOp
, 0)));
1065 // Constant shifts: [Rm, reg0, shift_imm].
1066 MI
.addOperand(MCOperand::CreateReg(0)); // NoRegister
1067 // Inst{6-5} encodes the shift opcode.
1068 ARM_AM::ShiftOpc ShOp
= getShiftOpcForBits(slice(insn
, 6, 5));
1069 // Inst{11-7} encodes the imm5 shift amount.
1070 unsigned ShImm
= slice(insn
, 11, 7);
1072 // A8.4.1. Possible rrx or shift amount of 32...
1073 getImmShiftSE(ShOp
, ShImm
);
1074 MI
.addOperand(MCOperand::CreateImm(ARM_AM::getSORegOpc(ShOp
, ShImm
)));
1081 static bool DisassembleLdStFrm(MCInst
&MI
, unsigned Opcode
, uint32_t insn
,
1082 unsigned short NumOps
, unsigned &NumOpsAdded
, bool isStore
, BO B
) {
1084 const TargetInstrDesc
&TID
= ARMInsts
[Opcode
];
1085 bool isPrePost
= isPrePostLdSt(TID
.TSFlags
);
1086 const TargetOperandInfo
*OpInfo
= TID
.OpInfo
;
1087 if (!OpInfo
) return false;
1089 unsigned &OpIdx
= NumOpsAdded
;
1093 assert(((!isStore
&& TID
.getNumDefs() > 0) ||
1094 (isStore
&& (TID
.getNumDefs() == 0 || isPrePost
)))
1095 && "Invalid arguments");
1097 // Operand 0 of a pre- and post-indexed store is the address base writeback.
1098 if (isPrePost
&& isStore
) {
1099 assert(OpInfo
[OpIdx
].RegClass
== ARM::GPRRegClassID
&&
1100 "Reg operand expected");
1101 MI
.addOperand(MCOperand::CreateReg(getRegisterEnum(B
, ARM::GPRRegClassID
,
1106 // Disassemble the dst/src operand.
1107 if (OpIdx
>= NumOps
)
1110 assert(OpInfo
[OpIdx
].RegClass
== ARM::GPRRegClassID
&&
1111 "Reg operand expected");
1112 MI
.addOperand(MCOperand::CreateReg(getRegisterEnum(B
, ARM::GPRRegClassID
,
1116 // After dst of a pre- and post-indexed load is the address base writeback.
1117 if (isPrePost
&& !isStore
) {
1118 assert(OpInfo
[OpIdx
].RegClass
== ARM::GPRRegClassID
&&
1119 "Reg operand expected");
1120 MI
.addOperand(MCOperand::CreateReg(getRegisterEnum(B
, ARM::GPRRegClassID
,
1125 // Disassemble the base operand.
1126 if (OpIdx
>= NumOps
)
1129 assert(OpInfo
[OpIdx
].RegClass
== ARM::GPRRegClassID
&&
1130 "Reg operand expected");
1131 assert((!isPrePost
|| (TID
.getOperandConstraint(OpIdx
, TOI::TIED_TO
) != -1))
1132 && "Index mode or tied_to operand expected");
1133 MI
.addOperand(MCOperand::CreateReg(getRegisterEnum(B
, ARM::GPRRegClassID
,
1137 // For reg/reg form, base reg is followed by +/- reg shop imm.
1138 // For immediate form, it is followed by +/- imm12.
1139 // See also ARMAddressingModes.h (Addressing Mode #2).
1140 if (OpIdx
+ 1 >= NumOps
)
1143 assert((OpInfo
[OpIdx
].RegClass
== ARM::GPRRegClassID
) &&
1144 (OpInfo
[OpIdx
+1].RegClass
< 0) &&
1145 "Expect 1 reg operand followed by 1 imm operand");
1147 ARM_AM::AddrOpc AddrOpcode
= getUBit(insn
) ? ARM_AM::add
: ARM_AM::sub
;
1148 if (getIBit(insn
) == 0) {
1149 MI
.addOperand(MCOperand::CreateReg(0));
1151 // Disassemble the 12-bit immediate offset.
1152 unsigned Imm12
= slice(insn
, 11, 0);
1153 unsigned Offset
= ARM_AM::getAM2Opc(AddrOpcode
, Imm12
, ARM_AM::no_shift
);
1154 MI
.addOperand(MCOperand::CreateImm(Offset
));
1156 // Disassemble the offset reg (Rm), shift type, and immediate shift length.
1157 MI
.addOperand(MCOperand::CreateReg(getRegisterEnum(B
, ARM::GPRRegClassID
,
1159 // Inst{6-5} encodes the shift opcode.
1160 ARM_AM::ShiftOpc ShOp
= getShiftOpcForBits(slice(insn
, 6, 5));
1161 // Inst{11-7} encodes the imm5 shift amount.
1162 unsigned ShImm
= slice(insn
, 11, 7);
1164 // A8.4.1. Possible rrx or shift amount of 32...
1165 getImmShiftSE(ShOp
, ShImm
);
1166 MI
.addOperand(MCOperand::CreateImm(
1167 ARM_AM::getAM2Opc(AddrOpcode
, ShImm
, ShOp
)));
1174 static bool DisassembleLdFrm(MCInst
&MI
, unsigned Opcode
, uint32_t insn
,
1175 unsigned short NumOps
, unsigned &NumOpsAdded
, BO B
) {
1176 return DisassembleLdStFrm(MI
, Opcode
, insn
, NumOps
, NumOpsAdded
, false, B
);
1179 static bool DisassembleStFrm(MCInst
&MI
, unsigned Opcode
, uint32_t insn
,
1180 unsigned short NumOps
, unsigned &NumOpsAdded
, BO B
) {
1181 return DisassembleLdStFrm(MI
, Opcode
, insn
, NumOps
, NumOpsAdded
, true, B
);
1184 static bool HasDualReg(unsigned Opcode
) {
1188 case ARM::LDRD
: case ARM::LDRD_PRE
: case ARM::LDRD_POST
:
1189 case ARM::STRD
: case ARM::STRD_PRE
: case ARM::STRD_POST
:
1194 static bool DisassembleLdStMiscFrm(MCInst
&MI
, unsigned Opcode
, uint32_t insn
,
1195 unsigned short NumOps
, unsigned &NumOpsAdded
, bool isStore
, BO B
) {
1197 const TargetInstrDesc
&TID
= ARMInsts
[Opcode
];
1198 bool isPrePost
= isPrePostLdSt(TID
.TSFlags
);
1199 const TargetOperandInfo
*OpInfo
= TID
.OpInfo
;
1200 if (!OpInfo
) return false;
1202 unsigned &OpIdx
= NumOpsAdded
;
1206 assert(((!isStore
&& TID
.getNumDefs() > 0) ||
1207 (isStore
&& (TID
.getNumDefs() == 0 || isPrePost
)))
1208 && "Invalid arguments");
1210 // Operand 0 of a pre- and post-indexed store is the address base writeback.
1211 if (isPrePost
&& isStore
) {
1212 assert(OpInfo
[OpIdx
].RegClass
== ARM::GPRRegClassID
&&
1213 "Reg operand expected");
1214 MI
.addOperand(MCOperand::CreateReg(getRegisterEnum(B
, ARM::GPRRegClassID
,
1219 bool DualReg
= HasDualReg(Opcode
);
1221 // Disassemble the dst/src operand.
1222 if (OpIdx
>= NumOps
)
1225 assert(OpInfo
[OpIdx
].RegClass
== ARM::GPRRegClassID
&&
1226 "Reg operand expected");
1227 MI
.addOperand(MCOperand::CreateReg(getRegisterEnum(B
, ARM::GPRRegClassID
,
1231 // Fill in LDRD and STRD's second operand.
1233 MI
.addOperand(MCOperand::CreateReg(getRegisterEnum(B
, ARM::GPRRegClassID
,
1234 decodeRd(insn
) + 1)));
1238 // After dst of a pre- and post-indexed load is the address base writeback.
1239 if (isPrePost
&& !isStore
) {
1240 assert(OpInfo
[OpIdx
].RegClass
== ARM::GPRRegClassID
&&
1241 "Reg operand expected");
1242 MI
.addOperand(MCOperand::CreateReg(getRegisterEnum(B
, ARM::GPRRegClassID
,
1247 // Disassemble the base operand.
1248 if (OpIdx
>= NumOps
)
1251 assert(OpInfo
[OpIdx
].RegClass
== ARM::GPRRegClassID
&&
1252 "Reg operand expected");
1253 assert((!isPrePost
|| (TID
.getOperandConstraint(OpIdx
, TOI::TIED_TO
) != -1))
1254 && "Index mode or tied_to operand expected");
1255 MI
.addOperand(MCOperand::CreateReg(getRegisterEnum(B
, ARM::GPRRegClassID
,
1259 // For reg/reg form, base reg is followed by +/- reg.
1260 // For immediate form, it is followed by +/- imm8.
1261 // See also ARMAddressingModes.h (Addressing Mode #3).
1262 if (OpIdx
+ 1 >= NumOps
)
1265 assert((OpInfo
[OpIdx
].RegClass
== ARM::GPRRegClassID
) &&
1266 (OpInfo
[OpIdx
+1].RegClass
< 0) &&
1267 "Expect 1 reg operand followed by 1 imm operand");
1269 ARM_AM::AddrOpc AddrOpcode
= getUBit(insn
) ? ARM_AM::add
: ARM_AM::sub
;
1270 if (getAM3IBit(insn
) == 1) {
1271 MI
.addOperand(MCOperand::CreateReg(0));
1273 // Disassemble the 8-bit immediate offset.
1274 unsigned Imm4H
= (insn
>> ARMII::ImmHiShift
) & 0xF;
1275 unsigned Imm4L
= insn
& 0xF;
1276 unsigned Offset
= ARM_AM::getAM3Opc(AddrOpcode
, (Imm4H
<< 4) | Imm4L
);
1277 MI
.addOperand(MCOperand::CreateImm(Offset
));
1279 // Disassemble the offset reg (Rm).
1280 MI
.addOperand(MCOperand::CreateReg(getRegisterEnum(B
, ARM::GPRRegClassID
,
1282 unsigned Offset
= ARM_AM::getAM3Opc(AddrOpcode
, 0);
1283 MI
.addOperand(MCOperand::CreateImm(Offset
));
1290 static bool DisassembleLdMiscFrm(MCInst
&MI
, unsigned Opcode
, uint32_t insn
,
1291 unsigned short NumOps
, unsigned &NumOpsAdded
, BO B
) {
1292 return DisassembleLdStMiscFrm(MI
, Opcode
, insn
, NumOps
, NumOpsAdded
, false,
1296 static bool DisassembleStMiscFrm(MCInst
&MI
, unsigned Opcode
, uint32_t insn
,
1297 unsigned short NumOps
, unsigned &NumOpsAdded
, BO B
) {
1298 return DisassembleLdStMiscFrm(MI
, Opcode
, insn
, NumOps
, NumOpsAdded
, true, B
);
1301 // The algorithm for disassembly of LdStMulFrm is different from others because
1302 // it explicitly populates the two predicate operands after operand 0 (the base)
1303 // and operand 1 (the AM4 mode imm). After operand 3, we need to populate the
1304 // reglist with each affected register encoded as an MCOperand.
1305 static bool DisassembleLdStMulFrm(MCInst
&MI
, unsigned Opcode
, uint32_t insn
,
1306 unsigned short NumOps
, unsigned &NumOpsAdded
, BO B
) {
1308 assert(NumOps
>= 5 && "LdStMulFrm expects NumOps >= 5");
1310 unsigned &OpIdx
= NumOpsAdded
;
1314 unsigned Base
= getRegisterEnum(B
, ARM::GPRRegClassID
, decodeRn(insn
));
1316 // Writeback to base, if necessary.
1317 if (Opcode
== ARM::LDM_UPD
|| Opcode
== ARM::STM_UPD
) {
1318 MI
.addOperand(MCOperand::CreateReg(Base
));
1322 MI
.addOperand(MCOperand::CreateReg(Base
));
1324 ARM_AM::AMSubMode SubMode
= getAMSubModeForBits(getPUBits(insn
));
1325 MI
.addOperand(MCOperand::CreateImm(ARM_AM::getAM4ModeImm(SubMode
)));
1327 // Handling the two predicate operands before the reglist.
1328 int64_t CondVal
= insn
>> ARMII::CondShift
;
1329 MI
.addOperand(MCOperand::CreateImm(CondVal
== 0xF ? 0xE : CondVal
));
1330 MI
.addOperand(MCOperand::CreateReg(ARM::CPSR
));
1334 // Fill the variadic part of reglist.
1335 unsigned RegListBits
= insn
& ((1 << 16) - 1);
1336 for (unsigned i
= 0; i
< 16; ++i
) {
1337 if ((RegListBits
>> i
) & 1) {
1338 MI
.addOperand(MCOperand::CreateReg(getRegisterEnum(B
, ARM::GPRRegClassID
,
1347 // LDREX, LDREXB, LDREXH: Rd Rn
1348 // LDREXD: Rd Rd+1 Rn
1349 // STREX, STREXB, STREXH: Rd Rm Rn
1350 // STREXD: Rd Rm Rm+1 Rn
1352 // SWP, SWPB: Rd Rm Rn
1353 static bool DisassembleLdStExFrm(MCInst
&MI
, unsigned Opcode
, uint32_t insn
,
1354 unsigned short NumOps
, unsigned &NumOpsAdded
, BO B
) {
1356 const TargetOperandInfo
*OpInfo
= ARMInsts
[Opcode
].OpInfo
;
1357 if (!OpInfo
) return false;
1359 unsigned &OpIdx
= NumOpsAdded
;
1364 && OpInfo
[0].RegClass
== ARM::GPRRegClassID
1365 && OpInfo
[1].RegClass
== ARM::GPRRegClassID
1366 && "Expect 2 reg operands");
1368 bool isStore
= slice(insn
, 20, 20) == 0;
1369 bool isDW
= (Opcode
== ARM::LDREXD
|| Opcode
== ARM::STREXD
);
1371 // Add the destination operand.
1372 MI
.addOperand(MCOperand::CreateReg(getRegisterEnum(B
, ARM::GPRRegClassID
,
1376 // Store register Exclusive needs a source operand.
1378 MI
.addOperand(MCOperand::CreateReg(getRegisterEnum(B
, ARM::GPRRegClassID
,
1383 MI
.addOperand(MCOperand::CreateReg(getRegisterEnum(B
, ARM::GPRRegClassID
,
1384 decodeRm(insn
)+1)));
1388 MI
.addOperand(MCOperand::CreateReg(getRegisterEnum(B
, ARM::GPRRegClassID
,
1389 decodeRd(insn
)+1)));
1393 // Finally add the pointer operand.
1394 MI
.addOperand(MCOperand::CreateReg(getRegisterEnum(B
, ARM::GPRRegClassID
,
1401 // Misc. Arithmetic Instructions.
1403 // PKHBT, PKHTB: Rd Rn Rm , LSL/ASR #imm5
1404 // RBIT, REV, REV16, REVSH: Rd Rm
1405 static bool DisassembleArithMiscFrm(MCInst
&MI
, unsigned Opcode
, uint32_t insn
,
1406 unsigned short NumOps
, unsigned &NumOpsAdded
, BO B
) {
1408 const TargetOperandInfo
*OpInfo
= ARMInsts
[Opcode
].OpInfo
;
1409 unsigned &OpIdx
= NumOpsAdded
;
1414 && OpInfo
[0].RegClass
== ARM::GPRRegClassID
1415 && OpInfo
[1].RegClass
== ARM::GPRRegClassID
1416 && "Expect 2 reg operands");
1418 bool ThreeReg
= NumOps
> 2 && OpInfo
[2].RegClass
== ARM::GPRRegClassID
;
1420 MI
.addOperand(MCOperand::CreateReg(getRegisterEnum(B
, ARM::GPRRegClassID
,
1425 assert(NumOps
>= 4 && "Expect >= 4 operands");
1426 MI
.addOperand(MCOperand::CreateReg(getRegisterEnum(B
, ARM::GPRRegClassID
,
1431 MI
.addOperand(MCOperand::CreateReg(getRegisterEnum(B
, ARM::GPRRegClassID
,
1435 // If there is still an operand info left which is an immediate operand, add
1436 // an additional imm5 LSL/ASR operand.
1437 if (ThreeReg
&& OpInfo
[OpIdx
].RegClass
< 0
1438 && !OpInfo
[OpIdx
].isPredicate() && !OpInfo
[OpIdx
].isOptionalDef()) {
1439 // Extract the 5-bit immediate field Inst{11-7}.
1440 unsigned ShiftAmt
= (insn
>> ARMII::ShiftShift
) & 0x1F;
1441 ARM_AM::ShiftOpc Opc
= ARM_AM::no_shift
;
1442 if (Opcode
== ARM::PKHBT
)
1444 else if (Opcode
== ARM::PKHBT
)
1446 getImmShiftSE(Opc
, ShiftAmt
);
1447 MI
.addOperand(MCOperand::CreateImm(ARM_AM::getSORegOpc(Opc
, ShiftAmt
)));
1454 /// DisassembleSatFrm - Disassemble saturate instructions:
1455 /// SSAT, SSAT16, USAT, and USAT16.
1456 static bool DisassembleSatFrm(MCInst
&MI
, unsigned Opcode
, uint32_t insn
,
1457 unsigned short NumOps
, unsigned &NumOpsAdded
, BO B
) {
1459 const TargetInstrDesc
&TID
= ARMInsts
[Opcode
];
1460 NumOpsAdded
= TID
.getNumOperands() - 2; // ignore predicate operands
1462 // Disassemble register def.
1463 MI
.addOperand(MCOperand::CreateReg(getRegisterEnum(B
, ARM::GPRRegClassID
,
1466 unsigned Pos
= slice(insn
, 20, 16);
1467 if (Opcode
== ARM::SSAT
|| Opcode
== ARM::SSAT16
)
1469 MI
.addOperand(MCOperand::CreateImm(Pos
));
1471 MI
.addOperand(MCOperand::CreateReg(getRegisterEnum(B
, ARM::GPRRegClassID
,
1474 if (NumOpsAdded
== 4) {
1475 ARM_AM::ShiftOpc Opc
= (slice(insn
, 6, 6) != 0 ? ARM_AM::asr
: ARM_AM::lsl
);
1476 // Inst{11-7} encodes the imm5 shift amount.
1477 unsigned ShAmt
= slice(insn
, 11, 7);
1479 // A8.6.183. Possible ASR shift amount of 32...
1480 if (Opc
== ARM_AM::asr
)
1483 Opc
= ARM_AM::no_shift
;
1485 MI
.addOperand(MCOperand::CreateImm(ARM_AM::getSORegOpc(Opc
, ShAmt
)));
1490 // Extend instructions.
1491 // SXT* and UXT*: Rd [Rn] Rm [rot_imm].
1492 // The 2nd operand register is Rn and the 3rd operand regsiter is Rm for the
1493 // three register operand form. Otherwise, Rn=0b1111 and only Rm is used.
1494 static bool DisassembleExtFrm(MCInst
&MI
, unsigned Opcode
, uint32_t insn
,
1495 unsigned short NumOps
, unsigned &NumOpsAdded
, BO B
) {
1497 const TargetOperandInfo
*OpInfo
= ARMInsts
[Opcode
].OpInfo
;
1498 unsigned &OpIdx
= NumOpsAdded
;
1503 && OpInfo
[0].RegClass
== ARM::GPRRegClassID
1504 && OpInfo
[1].RegClass
== ARM::GPRRegClassID
1505 && "Expect 2 reg operands");
1507 bool ThreeReg
= NumOps
> 2 && OpInfo
[2].RegClass
== ARM::GPRRegClassID
;
1509 MI
.addOperand(MCOperand::CreateReg(getRegisterEnum(B
, ARM::GPRRegClassID
,
1514 MI
.addOperand(MCOperand::CreateReg(getRegisterEnum(B
, ARM::GPRRegClassID
,
1519 MI
.addOperand(MCOperand::CreateReg(getRegisterEnum(B
, ARM::GPRRegClassID
,
1523 // If there is still an operand info left which is an immediate operand, add
1524 // an additional rotate immediate operand.
1525 if (OpIdx
< NumOps
&& OpInfo
[OpIdx
].RegClass
< 0
1526 && !OpInfo
[OpIdx
].isPredicate() && !OpInfo
[OpIdx
].isOptionalDef()) {
1527 // Extract the 2-bit rotate field Inst{11-10}.
1528 unsigned rot
= (insn
>> ARMII::ExtRotImmShift
) & 3;
1529 // Rotation by 8, 16, or 24 bits.
1530 MI
.addOperand(MCOperand::CreateImm(rot
<< 3));
1537 /////////////////////////////////////
1539 // Utility Functions For VFP //
1541 /////////////////////////////////////
1543 // Extract/Decode Dd/Sd:
1545 // SP => d = UInt(Vd:D)
1546 // DP => d = UInt(D:Vd)
1547 static unsigned decodeVFPRd(uint32_t insn
, bool isSPVFP
) {
1548 return isSPVFP
? (decodeRd(insn
) << 1 | getDBit(insn
))
1549 : (decodeRd(insn
) | getDBit(insn
) << 4);
1552 // Extract/Decode Dn/Sn:
1554 // SP => n = UInt(Vn:N)
1555 // DP => n = UInt(N:Vn)
1556 static unsigned decodeVFPRn(uint32_t insn
, bool isSPVFP
) {
1557 return isSPVFP
? (decodeRn(insn
) << 1 | getNBit(insn
))
1558 : (decodeRn(insn
) | getNBit(insn
) << 4);
1561 // Extract/Decode Dm/Sm:
1563 // SP => m = UInt(Vm:M)
1564 // DP => m = UInt(M:Vm)
1565 static unsigned decodeVFPRm(uint32_t insn
, bool isSPVFP
) {
1566 return isSPVFP
? (decodeRm(insn
) << 1 | getMBit(insn
))
1567 : (decodeRm(insn
) | getMBit(insn
) << 4);
1571 static APInt
VFPExpandImm(unsigned char byte
, unsigned N
) {
1572 assert(N
== 32 || N
== 64);
1575 unsigned bit6
= slice(byte
, 6, 6);
1577 Result
= slice(byte
, 7, 7) << 31 | slice(byte
, 5, 0) << 19;
1579 Result
|= 0x1f << 25;
1581 Result
|= 0x1 << 30;
1583 Result
= (uint64_t)slice(byte
, 7, 7) << 63 |
1584 (uint64_t)slice(byte
, 5, 0) << 48;
1586 Result
|= 0xffULL
<< 54;
1588 Result
|= 0x1ULL
<< 62;
1590 return APInt(N
, Result
);
1593 // VFP Unary Format Instructions:
1595 // VCMP[E]ZD, VCMP[E]ZS: compares one floating-point register with zero
1596 // VCVTDS, VCVTSD: converts between double-precision and single-precision
1597 // The rest of the instructions have homogeneous [VFP]Rd and [VFP]Rm registers.
1598 static bool DisassembleVFPUnaryFrm(MCInst
&MI
, unsigned Opcode
, uint32_t insn
,
1599 unsigned short NumOps
, unsigned &NumOpsAdded
, BO B
) {
1601 assert(NumOps
>= 1 && "VFPUnaryFrm expects NumOps >= 1");
1603 const TargetOperandInfo
*OpInfo
= ARMInsts
[Opcode
].OpInfo
;
1604 unsigned &OpIdx
= NumOpsAdded
;
1608 unsigned RegClass
= OpInfo
[OpIdx
].RegClass
;
1609 assert((RegClass
== ARM::SPRRegClassID
|| RegClass
== ARM::DPRRegClassID
) &&
1610 "Reg operand expected");
1611 bool isSP
= (RegClass
== ARM::SPRRegClassID
);
1613 MI
.addOperand(MCOperand::CreateReg(
1614 getRegisterEnum(B
, RegClass
, decodeVFPRd(insn
, isSP
))));
1617 // Early return for compare with zero instructions.
1618 if (Opcode
== ARM::VCMPEZD
|| Opcode
== ARM::VCMPEZS
1619 || Opcode
== ARM::VCMPZD
|| Opcode
== ARM::VCMPZS
)
1622 RegClass
= OpInfo
[OpIdx
].RegClass
;
1623 assert((RegClass
== ARM::SPRRegClassID
|| RegClass
== ARM::DPRRegClassID
) &&
1624 "Reg operand expected");
1625 isSP
= (RegClass
== ARM::SPRRegClassID
);
1627 MI
.addOperand(MCOperand::CreateReg(
1628 getRegisterEnum(B
, RegClass
, decodeVFPRm(insn
, isSP
))));
1634 // All the instructions have homogeneous [VFP]Rd, [VFP]Rn, and [VFP]Rm regs.
1635 // Some of them have operand constraints which tie the first operand in the
1636 // InOperandList to that of the dst. As far as asm printing is concerned, this
1637 // tied_to operand is simply skipped.
1638 static bool DisassembleVFPBinaryFrm(MCInst
&MI
, unsigned Opcode
, uint32_t insn
,
1639 unsigned short NumOps
, unsigned &NumOpsAdded
, BO B
) {
1641 assert(NumOps
>= 3 && "VFPBinaryFrm expects NumOps >= 3");
1643 const TargetInstrDesc
&TID
= ARMInsts
[Opcode
];
1644 const TargetOperandInfo
*OpInfo
= TID
.OpInfo
;
1645 unsigned &OpIdx
= NumOpsAdded
;
1649 unsigned RegClass
= OpInfo
[OpIdx
].RegClass
;
1650 assert((RegClass
== ARM::SPRRegClassID
|| RegClass
== ARM::DPRRegClassID
) &&
1651 "Reg operand expected");
1652 bool isSP
= (RegClass
== ARM::SPRRegClassID
);
1654 MI
.addOperand(MCOperand::CreateReg(
1655 getRegisterEnum(B
, RegClass
, decodeVFPRd(insn
, isSP
))));
1658 // Skip tied_to operand constraint.
1659 if (TID
.getOperandConstraint(OpIdx
, TOI::TIED_TO
) != -1) {
1660 assert(NumOps
>= 4 && "Expect >=4 operands");
1661 MI
.addOperand(MCOperand::CreateReg(0));
1665 MI
.addOperand(MCOperand::CreateReg(
1666 getRegisterEnum(B
, RegClass
, decodeVFPRn(insn
, isSP
))));
1669 MI
.addOperand(MCOperand::CreateReg(
1670 getRegisterEnum(B
, RegClass
, decodeVFPRm(insn
, isSP
))));
1676 // A8.6.295 vcvt (floating-point <-> integer)
1677 // Int to FP: VSITOD, VSITOS, VUITOD, VUITOS
1678 // FP to Int: VTOSI[Z|R]D, VTOSI[Z|R]S, VTOUI[Z|R]D, VTOUI[Z|R]S
1680 // A8.6.297 vcvt (floating-point and fixed-point)
1681 // Dd|Sd Dd|Sd(TIED_TO) #fbits(= 16|32 - UInt(imm4:i))
1682 static bool DisassembleVFPConv1Frm(MCInst
&MI
, unsigned Opcode
, uint32_t insn
,
1683 unsigned short NumOps
, unsigned &NumOpsAdded
, BO B
) {
1685 assert(NumOps
>= 2 && "VFPConv1Frm expects NumOps >= 2");
1687 const TargetInstrDesc
&TID
= ARMInsts
[Opcode
];
1688 const TargetOperandInfo
*OpInfo
= TID
.OpInfo
;
1689 if (!OpInfo
) return false;
1691 bool SP
= slice(insn
, 8, 8) == 0; // A8.6.295 & A8.6.297
1692 bool fixed_point
= slice(insn
, 17, 17) == 1; // A8.6.297
1693 unsigned RegClassID
= SP
? ARM::SPRRegClassID
: ARM::DPRRegClassID
;
1697 assert(NumOps
>= 3 && "Expect >= 3 operands");
1698 int size
= slice(insn
, 7, 7) == 0 ? 16 : 32;
1699 int fbits
= size
- (slice(insn
,3,0) << 1 | slice(insn
,5,5));
1700 MI
.addOperand(MCOperand::CreateReg(
1701 getRegisterEnum(B
, RegClassID
,
1702 decodeVFPRd(insn
, SP
))));
1704 assert(TID
.getOperandConstraint(1, TOI::TIED_TO
) != -1 &&
1705 "Tied to operand expected");
1706 MI
.addOperand(MI
.getOperand(0));
1708 assert(OpInfo
[2].RegClass
< 0 && !OpInfo
[2].isPredicate() &&
1709 !OpInfo
[2].isOptionalDef() && "Imm operand expected");
1710 MI
.addOperand(MCOperand::CreateImm(fbits
));
1715 // The Rd (destination) and Rm (source) bits have different interpretations
1716 // depending on their single-precisonness.
1718 if (slice(insn
, 18, 18) == 1) { // to_integer operation
1719 d
= decodeVFPRd(insn
, true /* Is Single Precision */);
1720 MI
.addOperand(MCOperand::CreateReg(
1721 getRegisterEnum(B
, ARM::SPRRegClassID
, d
)));
1722 m
= decodeVFPRm(insn
, SP
);
1723 MI
.addOperand(MCOperand::CreateReg(getRegisterEnum(B
, RegClassID
, m
)));
1725 d
= decodeVFPRd(insn
, SP
);
1726 MI
.addOperand(MCOperand::CreateReg(getRegisterEnum(B
, RegClassID
, d
)));
1727 m
= decodeVFPRm(insn
, true /* Is Single Precision */);
1728 MI
.addOperand(MCOperand::CreateReg(
1729 getRegisterEnum(B
, ARM::SPRRegClassID
, m
)));
1737 // VMOVRS - A8.6.330
1738 // Rt => Rd; Sn => UInt(Vn:N)
1739 static bool DisassembleVFPConv2Frm(MCInst
&MI
, unsigned Opcode
, uint32_t insn
,
1740 unsigned short NumOps
, unsigned &NumOpsAdded
, BO B
) {
1742 assert(NumOps
>= 2 && "VFPConv2Frm expects NumOps >= 2");
1744 MI
.addOperand(MCOperand::CreateReg(getRegisterEnum(B
, ARM::GPRRegClassID
,
1746 MI
.addOperand(MCOperand::CreateReg(getRegisterEnum(B
, ARM::SPRRegClassID
,
1747 decodeVFPRn(insn
, true))));
1752 // VMOVRRD - A8.6.332
1753 // Rt => Rd; Rt2 => Rn; Dm => UInt(M:Vm)
1755 // VMOVRRS - A8.6.331
1756 // Rt => Rd; Rt2 => Rn; Sm => UInt(Vm:M); Sm1 = Sm+1
1757 static bool DisassembleVFPConv3Frm(MCInst
&MI
, unsigned Opcode
, uint32_t insn
,
1758 unsigned short NumOps
, unsigned &NumOpsAdded
, BO B
) {
1760 assert(NumOps
>= 3 && "VFPConv3Frm expects NumOps >= 3");
1762 const TargetOperandInfo
*OpInfo
= ARMInsts
[Opcode
].OpInfo
;
1763 unsigned &OpIdx
= NumOpsAdded
;
1765 MI
.addOperand(MCOperand::CreateReg(getRegisterEnum(B
, ARM::GPRRegClassID
,
1767 MI
.addOperand(MCOperand::CreateReg(getRegisterEnum(B
, ARM::GPRRegClassID
,
1771 if (OpInfo
[OpIdx
].RegClass
== ARM::SPRRegClassID
) {
1772 unsigned Sm
= decodeVFPRm(insn
, true);
1773 MI
.addOperand(MCOperand::CreateReg(getRegisterEnum(B
, ARM::SPRRegClassID
,
1775 MI
.addOperand(MCOperand::CreateReg(getRegisterEnum(B
, ARM::SPRRegClassID
,
1779 MI
.addOperand(MCOperand::CreateReg(
1780 getRegisterEnum(B
, ARM::DPRRegClassID
,
1781 decodeVFPRm(insn
, false))));
1787 // VMOVSR - A8.6.330
1788 // Rt => Rd; Sn => UInt(Vn:N)
1789 static bool DisassembleVFPConv4Frm(MCInst
&MI
, unsigned Opcode
, uint32_t insn
,
1790 unsigned short NumOps
, unsigned &NumOpsAdded
, BO B
) {
1792 assert(NumOps
>= 2 && "VFPConv4Frm expects NumOps >= 2");
1794 MI
.addOperand(MCOperand::CreateReg(getRegisterEnum(B
, ARM::SPRRegClassID
,
1795 decodeVFPRn(insn
, true))));
1796 MI
.addOperand(MCOperand::CreateReg(getRegisterEnum(B
, ARM::GPRRegClassID
,
1802 // VMOVDRR - A8.6.332
1803 // Rt => Rd; Rt2 => Rn; Dm => UInt(M:Vm)
1805 // VMOVRRS - A8.6.331
1806 // Rt => Rd; Rt2 => Rn; Sm => UInt(Vm:M); Sm1 = Sm+1
1807 static bool DisassembleVFPConv5Frm(MCInst
&MI
, unsigned Opcode
, uint32_t insn
,
1808 unsigned short NumOps
, unsigned &NumOpsAdded
, BO B
) {
1810 assert(NumOps
>= 3 && "VFPConv5Frm expects NumOps >= 3");
1812 const TargetOperandInfo
*OpInfo
= ARMInsts
[Opcode
].OpInfo
;
1813 unsigned &OpIdx
= NumOpsAdded
;
1817 if (OpInfo
[OpIdx
].RegClass
== ARM::SPRRegClassID
) {
1818 unsigned Sm
= decodeVFPRm(insn
, true);
1819 MI
.addOperand(MCOperand::CreateReg(getRegisterEnum(B
, ARM::SPRRegClassID
,
1821 MI
.addOperand(MCOperand::CreateReg(getRegisterEnum(B
, ARM::SPRRegClassID
,
1825 MI
.addOperand(MCOperand::CreateReg(
1826 getRegisterEnum(B
, ARM::DPRRegClassID
,
1827 decodeVFPRm(insn
, false))));
1831 MI
.addOperand(MCOperand::CreateReg(getRegisterEnum(B
, ARM::GPRRegClassID
,
1833 MI
.addOperand(MCOperand::CreateReg(getRegisterEnum(B
, ARM::GPRRegClassID
,
1839 // VFP Load/Store Instructions.
1840 // VLDRD, VLDRS, VSTRD, VSTRS
1841 static bool DisassembleVFPLdStFrm(MCInst
&MI
, unsigned Opcode
, uint32_t insn
,
1842 unsigned short NumOps
, unsigned &NumOpsAdded
, BO B
) {
1844 assert(NumOps
>= 3 && "VFPLdStFrm expects NumOps >= 3");
1846 bool isSPVFP
= (Opcode
== ARM::VLDRS
|| Opcode
== ARM::VSTRS
);
1847 unsigned RegClassID
= isSPVFP
? ARM::SPRRegClassID
: ARM::DPRRegClassID
;
1849 // Extract Dd/Sd for operand 0.
1850 unsigned RegD
= decodeVFPRd(insn
, isSPVFP
);
1852 MI
.addOperand(MCOperand::CreateReg(getRegisterEnum(B
, RegClassID
, RegD
)));
1854 unsigned Base
= getRegisterEnum(B
, ARM::GPRRegClassID
, decodeRn(insn
));
1855 MI
.addOperand(MCOperand::CreateReg(Base
));
1857 // Next comes the AM5 Opcode.
1858 ARM_AM::AddrOpc AddrOpcode
= getUBit(insn
) ? ARM_AM::add
: ARM_AM::sub
;
1859 unsigned char Imm8
= insn
& 0xFF;
1860 MI
.addOperand(MCOperand::CreateImm(ARM_AM::getAM5Opc(AddrOpcode
, Imm8
)));
1867 // VFP Load/Store Multiple Instructions.
1868 // This is similar to the algorithm for LDM/STM in that operand 0 (the base) and
1869 // operand 1 (the AM4 mode imm) is followed by two predicate operands. It is
1870 // followed by a reglist of either DPR(s) or SPR(s).
1872 // VLDMD[_UPD], VLDMS[_UPD], VSTMD[_UPD], VSTMS[_UPD]
1873 static bool DisassembleVFPLdStMulFrm(MCInst
&MI
, unsigned Opcode
, uint32_t insn
,
1874 unsigned short NumOps
, unsigned &NumOpsAdded
, BO B
) {
1876 assert(NumOps
>= 5 && "VFPLdStMulFrm expects NumOps >= 5");
1878 unsigned &OpIdx
= NumOpsAdded
;
1882 unsigned Base
= getRegisterEnum(B
, ARM::GPRRegClassID
, decodeRn(insn
));
1884 // Writeback to base, if necessary.
1885 if (Opcode
== ARM::VLDMD_UPD
|| Opcode
== ARM::VLDMS_UPD
||
1886 Opcode
== ARM::VSTMD_UPD
|| Opcode
== ARM::VSTMS_UPD
) {
1887 MI
.addOperand(MCOperand::CreateReg(Base
));
1891 MI
.addOperand(MCOperand::CreateReg(Base
));
1893 // Next comes the AM4 Opcode.
1894 ARM_AM::AMSubMode SubMode
= getAMSubModeForBits(getPUBits(insn
));
1895 // Must be either "ia" or "db" submode.
1896 if (SubMode
!= ARM_AM::ia
&& SubMode
!= ARM_AM::db
) {
1897 DEBUG(errs() << "Illegal addressing mode 4 sub-mode!\n");
1900 MI
.addOperand(MCOperand::CreateImm(ARM_AM::getAM4ModeImm(SubMode
)));
1902 // Handling the two predicate operands before the reglist.
1903 int64_t CondVal
= insn
>> ARMII::CondShift
;
1904 MI
.addOperand(MCOperand::CreateImm(CondVal
== 0xF ? 0xE : CondVal
));
1905 MI
.addOperand(MCOperand::CreateReg(ARM::CPSR
));
1909 bool isSPVFP
= (Opcode
== ARM::VLDMS
|| Opcode
== ARM::VLDMS_UPD
||
1910 Opcode
== ARM::VSTMS
|| Opcode
== ARM::VSTMS_UPD
);
1911 unsigned RegClassID
= isSPVFP
? ARM::SPRRegClassID
: ARM::DPRRegClassID
;
1914 unsigned RegD
= decodeVFPRd(insn
, isSPVFP
);
1916 // Fill the variadic part of reglist.
1917 unsigned char Imm8
= insn
& 0xFF;
1918 unsigned Regs
= isSPVFP
? Imm8
: Imm8
/2;
1919 for (unsigned i
= 0; i
< Regs
; ++i
) {
1920 MI
.addOperand(MCOperand::CreateReg(getRegisterEnum(B
, RegClassID
,
1928 // Misc. VFP Instructions.
1929 // FMSTAT (vmrs with Rt=0b1111, i.e., to apsr_nzcv and no register operand)
1930 // FCONSTD (DPR and a VFPf64Imm operand)
1931 // FCONSTS (SPR and a VFPf32Imm operand)
1932 // VMRS/VMSR (GPR operand)
1933 static bool DisassembleVFPMiscFrm(MCInst
&MI
, unsigned Opcode
, uint32_t insn
,
1934 unsigned short NumOps
, unsigned &NumOpsAdded
, BO B
) {
1936 const TargetOperandInfo
*OpInfo
= ARMInsts
[Opcode
].OpInfo
;
1937 unsigned &OpIdx
= NumOpsAdded
;
1941 if (Opcode
== ARM::FMSTAT
)
1944 assert(NumOps
>= 2 && "VFPMiscFrm expects >=2 operands");
1946 unsigned RegEnum
= 0;
1947 switch (OpInfo
[0].RegClass
) {
1948 case ARM::DPRRegClassID
:
1949 RegEnum
= getRegisterEnum(B
, ARM::DPRRegClassID
, decodeVFPRd(insn
, false));
1951 case ARM::SPRRegClassID
:
1952 RegEnum
= getRegisterEnum(B
, ARM::SPRRegClassID
, decodeVFPRd(insn
, true));
1954 case ARM::GPRRegClassID
:
1955 RegEnum
= getRegisterEnum(B
, ARM::GPRRegClassID
, decodeRd(insn
));
1958 assert(0 && "Invalid reg class id");
1962 MI
.addOperand(MCOperand::CreateReg(RegEnum
));
1965 // Extract/decode the f64/f32 immediate.
1966 if (OpIdx
< NumOps
&& OpInfo
[OpIdx
].RegClass
< 0
1967 && !OpInfo
[OpIdx
].isPredicate() && !OpInfo
[OpIdx
].isOptionalDef()) {
1968 // The asm syntax specifies the floating point value, not the 8-bit literal.
1969 APInt immRaw
= VFPExpandImm(slice(insn
,19,16) << 4 | slice(insn
, 3, 0),
1970 Opcode
== ARM::FCONSTD
? 64 : 32);
1971 APFloat immFP
= APFloat(immRaw
, true);
1972 double imm
= Opcode
== ARM::FCONSTD
? immFP
.convertToDouble() :
1973 immFP
.convertToFloat();
1974 MI
.addOperand(MCOperand::CreateFPImm(imm
));
1982 // DisassembleThumbFrm() is defined in ThumbDisassemblerCore.h file.
1983 #include "ThumbDisassemblerCore.h"
1985 /////////////////////////////////////////////////////
1987 // Utility Functions For ARM Advanced SIMD //
1989 /////////////////////////////////////////////////////
1991 // The following NEON namings are based on A8.6.266 VABA, VABAL. Notice that
1992 // A8.6.303 VDUP (ARM core register)'s D/Vd pair is the N/Vn pair of VABA/VABAL.
1994 // A7.3 Register encoding
1996 // Extract/Decode NEON D/Vd:
1998 // Note that for quadword, Qd = UInt(D:Vd<3:1>) = Inst{22:15-13}, whereas for
1999 // doubleword, Dd = UInt(D:Vd). We compensate for this difference by
2000 // handling it in the getRegisterEnum() utility function.
2001 // D = Inst{22}, Vd = Inst{15-12}
2002 static unsigned decodeNEONRd(uint32_t insn
) {
2003 return ((insn
>> ARMII::NEON_D_BitShift
) & 1) << 4
2004 | ((insn
>> ARMII::NEON_RegRdShift
) & ARMII::NEONRegMask
);
2007 // Extract/Decode NEON N/Vn:
2009 // Note that for quadword, Qn = UInt(N:Vn<3:1>) = Inst{7:19-17}, whereas for
2010 // doubleword, Dn = UInt(N:Vn). We compensate for this difference by
2011 // handling it in the getRegisterEnum() utility function.
2012 // N = Inst{7}, Vn = Inst{19-16}
2013 static unsigned decodeNEONRn(uint32_t insn
) {
2014 return ((insn
>> ARMII::NEON_N_BitShift
) & 1) << 4
2015 | ((insn
>> ARMII::NEON_RegRnShift
) & ARMII::NEONRegMask
);
2018 // Extract/Decode NEON M/Vm:
2020 // Note that for quadword, Qm = UInt(M:Vm<3:1>) = Inst{5:3-1}, whereas for
2021 // doubleword, Dm = UInt(M:Vm). We compensate for this difference by
2022 // handling it in the getRegisterEnum() utility function.
2023 // M = Inst{5}, Vm = Inst{3-0}
2024 static unsigned decodeNEONRm(uint32_t insn
) {
2025 return ((insn
>> ARMII::NEON_M_BitShift
) & 1) << 4
2026 | ((insn
>> ARMII::NEON_RegRmShift
) & ARMII::NEONRegMask
);
2037 } // End of unnamed namespace
2039 // size field -> Inst{11-10}
2040 // index_align field -> Inst{7-4}
2042 // The Lane Index interpretation depends on the Data Size:
2043 // 8 (encoded as size = 0b00) -> Index = index_align[3:1]
2044 // 16 (encoded as size = 0b01) -> Index = index_align[3:2]
2045 // 32 (encoded as size = 0b10) -> Index = index_align[3]
2047 // Ref: A8.6.317 VLD4 (single 4-element structure to one lane).
2048 static unsigned decodeLaneIndex(uint32_t insn
) {
2049 unsigned size
= insn
>> 10 & 3;
2050 assert((size
== 0 || size
== 1 || size
== 2) &&
2051 "Encoding error: size should be either 0, 1, or 2");
2053 unsigned index_align
= insn
>> 4 & 0xF;
2054 return (index_align
>> 1) >> size
;
2057 // imm64 = AdvSIMDExpandImm(op, cmode, i:imm3:imm4)
2058 // op = Inst{5}, cmode = Inst{11-8}
2059 // i = Inst{24} (ARM architecture)
2060 // imm3 = Inst{18-16}, imm4 = Inst{3-0}
2061 // Ref: Table A7-15 Modified immediate values for Advanced SIMD instructions.
2062 static uint64_t decodeN1VImm(uint32_t insn
, ElemSize esize
) {
2063 unsigned char op
= (insn
>> 5) & 1;
2064 unsigned char cmode
= (insn
>> 8) & 0xF;
2065 unsigned char Imm8
= ((insn
>> 24) & 1) << 7 |
2066 ((insn
>> 16) & 7) << 4 |
2068 return (op
<< 12) | (cmode
<< 8) | Imm8
;
2071 // A8.6.339 VMUL, VMULL (by scalar)
2072 // ESize16 => m = Inst{2-0} (Vm<2:0>) D0-D7
2073 // ESize32 => m = Inst{3-0} (Vm<3:0>) D0-D15
2074 static unsigned decodeRestrictedDm(uint32_t insn
, ElemSize esize
) {
2081 assert(0 && "Unreachable code!");
2086 // A8.6.339 VMUL, VMULL (by scalar)
2087 // ESize16 => index = Inst{5:3} (M:Vm<3>) D0-D7
2088 // ESize32 => index = Inst{5} (M) D0-D15
2089 static unsigned decodeRestrictedDmIndex(uint32_t insn
, ElemSize esize
) {
2092 return (((insn
>> 5) & 1) << 1) | ((insn
>> 3) & 1);
2094 return (insn
>> 5) & 1;
2096 assert(0 && "Unreachable code!");
2101 // A8.6.296 VCVT (between floating-point and fixed-point, Advanced SIMD)
2102 // (64 - <fbits>) is encoded as imm6, i.e., Inst{21-16}.
2103 static unsigned decodeVCVTFractionBits(uint32_t insn
) {
2104 return 64 - ((insn
>> 16) & 0x3F);
2107 // A8.6.302 VDUP (scalar)
2108 // ESize8 => index = Inst{19-17}
2109 // ESize16 => index = Inst{19-18}
2110 // ESize32 => index = Inst{19}
2111 static unsigned decodeNVLaneDupIndex(uint32_t insn
, ElemSize esize
) {
2114 return (insn
>> 17) & 7;
2116 return (insn
>> 18) & 3;
2118 return (insn
>> 19) & 1;
2120 assert(0 && "Unspecified element size!");
2125 // A8.6.328 VMOV (ARM core register to scalar)
2126 // A8.6.329 VMOV (scalar to ARM core register)
2127 // ESize8 => index = Inst{21:6-5}
2128 // ESize16 => index = Inst{21:6}
2129 // ESize32 => index = Inst{21}
2130 static unsigned decodeNVLaneOpIndex(uint32_t insn
, ElemSize esize
) {
2133 return ((insn
>> 21) & 1) << 2 | ((insn
>> 5) & 3);
2135 return ((insn
>> 21) & 1) << 1 | ((insn
>> 6) & 1);
2137 return ((insn
>> 21) & 1);
2139 assert(0 && "Unspecified element size!");
2144 // Imm6 = Inst{21-16}, L = Inst{7}
2146 // LeftShift == true (A8.6.367 VQSHL, A8.6.387 VSLI):
2148 // '0001xxx' => esize = 8; shift_amount = imm6 - 8
2149 // '001xxxx' => esize = 16; shift_amount = imm6 - 16
2150 // '01xxxxx' => esize = 32; shift_amount = imm6 - 32
2151 // '1xxxxxx' => esize = 64; shift_amount = imm6
2153 // LeftShift == false (A8.6.376 VRSHR, A8.6.368 VQSHRN):
2155 // '0001xxx' => esize = 8; shift_amount = 16 - imm6
2156 // '001xxxx' => esize = 16; shift_amount = 32 - imm6
2157 // '01xxxxx' => esize = 32; shift_amount = 64 - imm6
2158 // '1xxxxxx' => esize = 64; shift_amount = 64 - imm6
2160 static unsigned decodeNVSAmt(uint32_t insn
, bool LeftShift
) {
2161 ElemSize esize
= ESizeNA
;
2162 unsigned L
= (insn
>> 7) & 1;
2163 unsigned imm6
= (insn
>> 16) & 0x3F;
2167 else if (imm6
>> 4 == 1)
2169 else if (imm6
>> 5 == 1)
2172 assert(0 && "Wrong encoding of Inst{7:21-16}!");
2177 return esize
== ESize64
? imm6
: (imm6
- esize
);
2179 return esize
== ESize64
? (esize
- imm6
) : (2*esize
- imm6
);
2183 // Imm4 = Inst{11-8}
2184 static unsigned decodeN3VImm(uint32_t insn
) {
2185 return (insn
>> 8) & 0xF;
2189 // D[d] D[d2] ... Rn [TIED_TO Rn] align [Rm]
2191 // D[d] D[d2] ... Rn [TIED_TO Rn] align [Rm] TIED_TO ... imm(idx)
2193 // Rn [TIED_TO Rn] align [Rm] D[d] D[d2] ...
2195 // Rn [TIED_TO Rn] align [Rm] D[d] D[d2] ... [imm(idx)]
2197 // Correctly set VLD*/VST*'s TIED_TO GPR, as the asm printer needs it.
2198 static bool DisassembleNLdSt0(MCInst
&MI
, unsigned Opcode
, uint32_t insn
,
2199 unsigned short NumOps
, unsigned &NumOpsAdded
, bool Store
, bool DblSpaced
,
2202 const TargetInstrDesc
&TID
= ARMInsts
[Opcode
];
2203 const TargetOperandInfo
*OpInfo
= TID
.OpInfo
;
2205 // At least one DPR register plus addressing mode #6.
2206 assert(NumOps
>= 3 && "Expect >= 3 operands");
2208 unsigned &OpIdx
= NumOpsAdded
;
2212 // We have homogeneous NEON registers for Load/Store.
2213 unsigned RegClass
= 0;
2215 // Double-spaced registers have increments of 2.
2216 unsigned Inc
= DblSpaced
? 2 : 1;
2218 unsigned Rn
= decodeRn(insn
);
2219 unsigned Rm
= decodeRm(insn
);
2220 unsigned Rd
= decodeNEONRd(insn
);
2222 // A7.7.1 Advanced SIMD addressing mode.
2225 // LLVM Addressing Mode #6.
2226 unsigned RmEnum
= 0;
2228 RmEnum
= getRegisterEnum(B
, ARM::GPRRegClassID
, Rm
);
2231 // Consume possible WB, AddrMode6, possible increment reg, the DPR/QPR's,
2232 // then possible lane index.
2233 assert(OpIdx
< NumOps
&& OpInfo
[0].RegClass
== ARM::GPRRegClassID
&&
2234 "Reg operand expected");
2237 MI
.addOperand(MCOperand::CreateReg(getRegisterEnum(B
, ARM::GPRRegClassID
,
2242 assert((OpIdx
+1) < NumOps
&& OpInfo
[OpIdx
].RegClass
== ARM::GPRRegClassID
&&
2243 OpInfo
[OpIdx
+ 1].RegClass
< 0 && "Addrmode #6 Operands expected");
2244 MI
.addOperand(MCOperand::CreateReg(getRegisterEnum(B
, ARM::GPRRegClassID
,
2246 MI
.addOperand(MCOperand::CreateImm(0)); // Alignment ignored?
2250 MI
.addOperand(MCOperand::CreateReg(RmEnum
));
2254 assert(OpIdx
< NumOps
&&
2255 (OpInfo
[OpIdx
].RegClass
== ARM::DPRRegClassID
||
2256 OpInfo
[OpIdx
].RegClass
== ARM::QPRRegClassID
) &&
2257 "Reg operand expected");
2259 RegClass
= OpInfo
[OpIdx
].RegClass
;
2260 while (OpIdx
< NumOps
&& (unsigned)OpInfo
[OpIdx
].RegClass
== RegClass
) {
2261 MI
.addOperand(MCOperand::CreateReg(
2262 getRegisterEnum(B
, RegClass
, Rd
)));
2267 // Handle possible lane index.
2268 if (OpIdx
< NumOps
&& OpInfo
[OpIdx
].RegClass
< 0
2269 && !OpInfo
[OpIdx
].isPredicate() && !OpInfo
[OpIdx
].isOptionalDef()) {
2270 MI
.addOperand(MCOperand::CreateImm(decodeLaneIndex(insn
)));
2275 // Consume the DPR/QPR's, possible WB, AddrMode6, possible incrment reg,
2276 // possible TIED_TO DPR/QPR's (ignored), then possible lane index.
2277 RegClass
= OpInfo
[0].RegClass
;
2279 while (OpIdx
< NumOps
&& (unsigned)OpInfo
[OpIdx
].RegClass
== RegClass
) {
2280 MI
.addOperand(MCOperand::CreateReg(
2281 getRegisterEnum(B
, RegClass
, Rd
)));
2287 MI
.addOperand(MCOperand::CreateReg(getRegisterEnum(B
, ARM::GPRRegClassID
,
2292 assert((OpIdx
+1) < NumOps
&& OpInfo
[OpIdx
].RegClass
== ARM::GPRRegClassID
&&
2293 OpInfo
[OpIdx
+ 1].RegClass
< 0 && "Addrmode #6 Operands expected");
2294 MI
.addOperand(MCOperand::CreateReg(getRegisterEnum(B
, ARM::GPRRegClassID
,
2296 MI
.addOperand(MCOperand::CreateImm(0)); // Alignment ignored?
2300 MI
.addOperand(MCOperand::CreateReg(RmEnum
));
2304 while (OpIdx
< NumOps
&& (unsigned)OpInfo
[OpIdx
].RegClass
== RegClass
) {
2305 assert(TID
.getOperandConstraint(OpIdx
, TOI::TIED_TO
) != -1 &&
2306 "Tied to operand expected");
2307 MI
.addOperand(MCOperand::CreateReg(0));
2311 // Handle possible lane index.
2312 if (OpIdx
< NumOps
&& OpInfo
[OpIdx
].RegClass
< 0
2313 && !OpInfo
[OpIdx
].isPredicate() && !OpInfo
[OpIdx
].isOptionalDef()) {
2314 MI
.addOperand(MCOperand::CreateImm(decodeLaneIndex(insn
)));
2319 // Accessing registers past the end of the NEON register file is not
2328 // If L (Inst{21}) == 0, store instructions.
2329 // Find out about double-spaced-ness of the Opcode and pass it on to
2330 // DisassembleNLdSt0().
2331 static bool DisassembleNLdSt(MCInst
&MI
, unsigned Opcode
, uint32_t insn
,
2332 unsigned short NumOps
, unsigned &NumOpsAdded
, BO B
) {
2334 const StringRef Name
= ARMInsts
[Opcode
].Name
;
2335 bool DblSpaced
= false;
2337 if (Name
.find("LN") != std::string::npos
) {
2338 // To one lane instructions.
2339 // See, for example, 8.6.317 VLD4 (single 4-element structure to one lane).
2341 // <size> == 16 && Inst{5} == 1 --> DblSpaced = true
2342 if (Name
.endswith("16") || Name
.endswith("16_UPD"))
2343 DblSpaced
= slice(insn
, 5, 5) == 1;
2345 // <size> == 32 && Inst{6} == 1 --> DblSpaced = true
2346 if (Name
.endswith("32") || Name
.endswith("32_UPD"))
2347 DblSpaced
= slice(insn
, 6, 6) == 1;
2350 // Multiple n-element structures with type encoded as Inst{11-8}.
2351 // See, for example, A8.6.316 VLD4 (multiple 4-element structures).
2353 // n == 2 && type == 0b1001 -> DblSpaced = true
2354 if (Name
.startswith("VST2") || Name
.startswith("VLD2"))
2355 DblSpaced
= slice(insn
, 11, 8) == 9;
2357 // n == 3 && type == 0b0101 -> DblSpaced = true
2358 if (Name
.startswith("VST3") || Name
.startswith("VLD3"))
2359 DblSpaced
= slice(insn
, 11, 8) == 5;
2361 // n == 4 && type == 0b0001 -> DblSpaced = true
2362 if (Name
.startswith("VST4") || Name
.startswith("VLD4"))
2363 DblSpaced
= slice(insn
, 11, 8) == 1;
2366 return DisassembleNLdSt0(MI
, Opcode
, insn
, NumOps
, NumOpsAdded
,
2367 slice(insn
, 21, 21) == 0, DblSpaced
, B
);
2372 static bool DisassembleN1RegModImmFrm(MCInst
&MI
, unsigned Opcode
,
2373 uint32_t insn
, unsigned short NumOps
, unsigned &NumOpsAdded
, BO B
) {
2375 const TargetInstrDesc
&TID
= ARMInsts
[Opcode
];
2376 const TargetOperandInfo
*OpInfo
= TID
.OpInfo
;
2378 assert(NumOps
>= 2 &&
2379 (OpInfo
[0].RegClass
== ARM::DPRRegClassID
||
2380 OpInfo
[0].RegClass
== ARM::QPRRegClassID
) &&
2381 (OpInfo
[1].RegClass
< 0) &&
2382 "Expect 1 reg operand followed by 1 imm operand");
2384 // Qd/Dd = Inst{22:15-12} => NEON Rd
2385 MI
.addOperand(MCOperand::CreateReg(getRegisterEnum(B
, OpInfo
[0].RegClass
,
2386 decodeNEONRd(insn
))));
2388 ElemSize esize
= ESizeNA
;
2391 case ARM::VMOVv16i8
:
2394 case ARM::VMOVv4i16
:
2395 case ARM::VMOVv8i16
:
2396 case ARM::VMVNv4i16
:
2397 case ARM::VMVNv8i16
:
2400 case ARM::VMOVv2i32
:
2401 case ARM::VMOVv4i32
:
2402 case ARM::VMVNv2i32
:
2403 case ARM::VMVNv4i32
:
2406 case ARM::VMOVv1i64
:
2407 case ARM::VMOVv2i64
:
2411 assert(0 && "Unreachable code!");
2415 // One register and a modified immediate value.
2416 // Add the imm operand.
2417 MI
.addOperand(MCOperand::CreateImm(decodeN1VImm(insn
, esize
)));
2427 N2V_VectorConvert_Between_Float_Fixed
2429 } // End of unnamed namespace
2431 // Vector Convert [between floating-point and fixed-point]
2432 // Qd/Dd Qm/Dm [fbits]
2434 // Vector Duplicate Lane (from scalar to all elements) Instructions.
2435 // VDUPLN16d, VDUPLN16q, VDUPLN32d, VDUPLN32q, VDUPLN8d, VDUPLN8q:
2438 // Vector Move Long:
2441 // Vector Move Narrow:
2445 static bool DisassembleNVdVmOptImm(MCInst
&MI
, unsigned Opc
, uint32_t insn
,
2446 unsigned short NumOps
, unsigned &NumOpsAdded
, N2VFlag Flag
, BO B
) {
2448 const TargetInstrDesc
&TID
= ARMInsts
[Opc
];
2449 const TargetOperandInfo
*OpInfo
= TID
.OpInfo
;
2451 assert(NumOps
>= 2 &&
2452 (OpInfo
[0].RegClass
== ARM::DPRRegClassID
||
2453 OpInfo
[0].RegClass
== ARM::QPRRegClassID
) &&
2454 (OpInfo
[1].RegClass
== ARM::DPRRegClassID
||
2455 OpInfo
[1].RegClass
== ARM::QPRRegClassID
) &&
2456 "Expect >= 2 operands and first 2 as reg operands");
2458 unsigned &OpIdx
= NumOpsAdded
;
2462 ElemSize esize
= ESizeNA
;
2463 if (Flag
== N2V_VectorDupLane
) {
2464 // VDUPLN has its index embedded. Its size can be inferred from the Opcode.
2465 assert(Opc
>= ARM::VDUPLN16d
&& Opc
<= ARM::VDUPLN8q
&&
2466 "Unexpected Opcode");
2467 esize
= (Opc
== ARM::VDUPLN8d
|| Opc
== ARM::VDUPLN8q
) ? ESize8
2468 : ((Opc
== ARM::VDUPLN16d
|| Opc
== ARM::VDUPLN16q
) ? ESize16
2472 // Qd/Dd = Inst{22:15-12} => NEON Rd
2473 MI
.addOperand(MCOperand::CreateReg(getRegisterEnum(B
, OpInfo
[OpIdx
].RegClass
,
2474 decodeNEONRd(insn
))));
2478 if (TID
.getOperandConstraint(OpIdx
, TOI::TIED_TO
) != -1) {
2480 MI
.addOperand(MCOperand::CreateReg(0));
2484 // Dm = Inst{5:3-0} => NEON Rm
2485 MI
.addOperand(MCOperand::CreateReg(getRegisterEnum(B
, OpInfo
[OpIdx
].RegClass
,
2486 decodeNEONRm(insn
))));
2489 // VZIP and others have two TIED_TO reg operands.
2491 while (OpIdx
< NumOps
&&
2492 (Idx
= TID
.getOperandConstraint(OpIdx
, TOI::TIED_TO
)) != -1) {
2493 // Add TIED_TO operand.
2494 MI
.addOperand(MI
.getOperand(Idx
));
2498 // Add the imm operand, if required.
2499 if (OpIdx
< NumOps
&& OpInfo
[OpIdx
].RegClass
< 0
2500 && !OpInfo
[OpIdx
].isPredicate() && !OpInfo
[OpIdx
].isOptionalDef()) {
2502 unsigned imm
= 0xFFFFFFFF;
2504 if (Flag
== N2V_VectorDupLane
)
2505 imm
= decodeNVLaneDupIndex(insn
, esize
);
2506 if (Flag
== N2V_VectorConvert_Between_Float_Fixed
)
2507 imm
= decodeVCVTFractionBits(insn
);
2509 assert(imm
!= 0xFFFFFFFF && "Internal error");
2510 MI
.addOperand(MCOperand::CreateImm(imm
));
2517 static bool DisassembleN2RegFrm(MCInst
&MI
, unsigned Opc
, uint32_t insn
,
2518 unsigned short NumOps
, unsigned &NumOpsAdded
, BO B
) {
2520 return DisassembleNVdVmOptImm(MI
, Opc
, insn
, NumOps
, NumOpsAdded
,
2523 static bool DisassembleNVCVTFrm(MCInst
&MI
, unsigned Opc
, uint32_t insn
,
2524 unsigned short NumOps
, unsigned &NumOpsAdded
, BO B
) {
2526 return DisassembleNVdVmOptImm(MI
, Opc
, insn
, NumOps
, NumOpsAdded
,
2527 N2V_VectorConvert_Between_Float_Fixed
, B
);
2529 static bool DisassembleNVecDupLnFrm(MCInst
&MI
, unsigned Opc
, uint32_t insn
,
2530 unsigned short NumOps
, unsigned &NumOpsAdded
, BO B
) {
2532 return DisassembleNVdVmOptImm(MI
, Opc
, insn
, NumOps
, NumOpsAdded
,
2533 N2V_VectorDupLane
, B
);
2536 // Vector Shift [Accumulate] Instructions.
2537 // Qd/Dd [Qd/Dd (TIED_TO)] Qm/Dm ShiftAmt
2539 // Vector Shift Left Long (with maximum shift count) Instructions.
2540 // VSHLLi16, VSHLLi32, VSHLLi8: Qd Dm imm (== size)
2542 static bool DisassembleNVectorShift(MCInst
&MI
, unsigned Opcode
, uint32_t insn
,
2543 unsigned short NumOps
, unsigned &NumOpsAdded
, bool LeftShift
, BO B
) {
2545 const TargetInstrDesc
&TID
= ARMInsts
[Opcode
];
2546 const TargetOperandInfo
*OpInfo
= TID
.OpInfo
;
2548 assert(NumOps
>= 3 &&
2549 (OpInfo
[0].RegClass
== ARM::DPRRegClassID
||
2550 OpInfo
[0].RegClass
== ARM::QPRRegClassID
) &&
2551 (OpInfo
[1].RegClass
== ARM::DPRRegClassID
||
2552 OpInfo
[1].RegClass
== ARM::QPRRegClassID
) &&
2553 "Expect >= 3 operands and first 2 as reg operands");
2555 unsigned &OpIdx
= NumOpsAdded
;
2559 // Qd/Dd = Inst{22:15-12} => NEON Rd
2560 MI
.addOperand(MCOperand::CreateReg(getRegisterEnum(B
, OpInfo
[OpIdx
].RegClass
,
2561 decodeNEONRd(insn
))));
2564 if (TID
.getOperandConstraint(OpIdx
, TOI::TIED_TO
) != -1) {
2566 MI
.addOperand(MCOperand::CreateReg(0));
2570 assert((OpInfo
[OpIdx
].RegClass
== ARM::DPRRegClassID
||
2571 OpInfo
[OpIdx
].RegClass
== ARM::QPRRegClassID
) &&
2572 "Reg operand expected");
2574 // Qm/Dm = Inst{5:3-0} => NEON Rm
2575 MI
.addOperand(MCOperand::CreateReg(getRegisterEnum(B
, OpInfo
[OpIdx
].RegClass
,
2576 decodeNEONRm(insn
))));
2579 assert(OpInfo
[OpIdx
].RegClass
< 0 && "Imm operand expected");
2581 // Add the imm operand.
2583 // VSHLL has maximum shift count as the imm, inferred from its size.
2587 Imm
= decodeNVSAmt(insn
, LeftShift
);
2599 MI
.addOperand(MCOperand::CreateImm(Imm
));
2605 // Left shift instructions.
2606 static bool DisassembleN2RegVecShLFrm(MCInst
&MI
, unsigned Opcode
,
2607 uint32_t insn
, unsigned short NumOps
, unsigned &NumOpsAdded
, BO B
) {
2609 return DisassembleNVectorShift(MI
, Opcode
, insn
, NumOps
, NumOpsAdded
, true,
2612 // Right shift instructions have different shift amount interpretation.
2613 static bool DisassembleN2RegVecShRFrm(MCInst
&MI
, unsigned Opcode
,
2614 uint32_t insn
, unsigned short NumOps
, unsigned &NumOpsAdded
, BO B
) {
2616 return DisassembleNVectorShift(MI
, Opcode
, insn
, NumOps
, NumOpsAdded
, false,
2625 N3V_Multiply_By_Scalar
2627 } // End of unnamed namespace
2629 // NEON Three Register Instructions with Optional Immediate Operand
2631 // Vector Extract Instructions.
2632 // Qd/Dd Qn/Dn Qm/Dm imm4
2634 // Vector Shift (Register) Instructions.
2635 // Qd/Dd Qm/Dm Qn/Dn (notice the order of m, n)
2637 // Vector Multiply [Accumulate/Subtract] [Long] By Scalar Instructions.
2638 // Qd/Dd Qn/Dn RestrictedDm index
2641 static bool DisassembleNVdVnVmOptImm(MCInst
&MI
, unsigned Opcode
, uint32_t insn
,
2642 unsigned short NumOps
, unsigned &NumOpsAdded
, N3VFlag Flag
, BO B
) {
2644 const TargetInstrDesc
&TID
= ARMInsts
[Opcode
];
2645 const TargetOperandInfo
*OpInfo
= TID
.OpInfo
;
2647 // No checking for OpInfo[2] because of MOVDneon/MOVQ with only two regs.
2648 assert(NumOps
>= 3 &&
2649 (OpInfo
[0].RegClass
== ARM::DPRRegClassID
||
2650 OpInfo
[0].RegClass
== ARM::QPRRegClassID
) &&
2651 (OpInfo
[1].RegClass
== ARM::DPRRegClassID
||
2652 OpInfo
[1].RegClass
== ARM::QPRRegClassID
) &&
2653 "Expect >= 3 operands and first 2 as reg operands");
2655 unsigned &OpIdx
= NumOpsAdded
;
2659 bool VdVnVm
= Flag
== N3V_VectorShift
? false : true;
2660 bool IsImm4
= Flag
== N3V_VectorExtract
? true : false;
2661 bool IsDmRestricted
= Flag
== N3V_Multiply_By_Scalar
? true : false;
2662 ElemSize esize
= ESizeNA
;
2663 if (Flag
== N3V_Multiply_By_Scalar
) {
2664 unsigned size
= (insn
>> 20) & 3;
2665 if (size
== 1) esize
= ESize16
;
2666 if (size
== 2) esize
= ESize32
;
2667 assert (esize
== ESize16
|| esize
== ESize32
);
2670 // Qd/Dd = Inst{22:15-12} => NEON Rd
2671 MI
.addOperand(MCOperand::CreateReg(getRegisterEnum(B
, OpInfo
[OpIdx
].RegClass
,
2672 decodeNEONRd(insn
))));
2675 // VABA, VABAL, VBSLd, VBSLq, ...
2676 if (TID
.getOperandConstraint(OpIdx
, TOI::TIED_TO
) != -1) {
2678 MI
.addOperand(MCOperand::CreateReg(0));
2682 // Dn = Inst{7:19-16} => NEON Rn
2684 // Dm = Inst{5:3-0} => NEON Rm
2685 MI
.addOperand(MCOperand::CreateReg(
2686 getRegisterEnum(B
, OpInfo
[OpIdx
].RegClass
,
2687 VdVnVm
? decodeNEONRn(insn
)
2688 : decodeNEONRm(insn
))));
2691 // Special case handling for VMOVDneon and VMOVQ because they are marked as
2693 if (Opcode
== ARM::VMOVDneon
|| Opcode
== ARM::VMOVQ
)
2696 // Dm = Inst{5:3-0} => NEON Rm
2698 // Dm is restricted to D0-D7 if size is 16, D0-D15 otherwise
2700 // Dn = Inst{7:19-16} => NEON Rn
2701 unsigned m
= VdVnVm
? (IsDmRestricted
? decodeRestrictedDm(insn
, esize
)
2702 : decodeNEONRm(insn
))
2703 : decodeNEONRn(insn
);
2705 MI
.addOperand(MCOperand::CreateReg(
2706 getRegisterEnum(B
, OpInfo
[OpIdx
].RegClass
, m
)));
2709 if (OpIdx
< NumOps
&& OpInfo
[OpIdx
].RegClass
< 0
2710 && !OpInfo
[OpIdx
].isPredicate() && !OpInfo
[OpIdx
].isOptionalDef()) {
2711 // Add the imm operand.
2714 Imm
= decodeN3VImm(insn
);
2715 else if (IsDmRestricted
)
2716 Imm
= decodeRestrictedDmIndex(insn
, esize
);
2718 assert(0 && "Internal error: unreachable code!");
2722 MI
.addOperand(MCOperand::CreateImm(Imm
));
2729 static bool DisassembleN3RegFrm(MCInst
&MI
, unsigned Opcode
, uint32_t insn
,
2730 unsigned short NumOps
, unsigned &NumOpsAdded
, BO B
) {
2732 return DisassembleNVdVnVmOptImm(MI
, Opcode
, insn
, NumOps
, NumOpsAdded
,
2735 static bool DisassembleN3RegVecShFrm(MCInst
&MI
, unsigned Opcode
,
2736 uint32_t insn
, unsigned short NumOps
, unsigned &NumOpsAdded
, BO B
) {
2738 return DisassembleNVdVnVmOptImm(MI
, Opcode
, insn
, NumOps
, NumOpsAdded
,
2739 N3V_VectorShift
, B
);
2741 static bool DisassembleNVecExtractFrm(MCInst
&MI
, unsigned Opcode
,
2742 uint32_t insn
, unsigned short NumOps
, unsigned &NumOpsAdded
, BO B
) {
2744 return DisassembleNVdVnVmOptImm(MI
, Opcode
, insn
, NumOps
, NumOpsAdded
,
2745 N3V_VectorExtract
, B
);
2747 static bool DisassembleNVecMulScalarFrm(MCInst
&MI
, unsigned Opcode
,
2748 uint32_t insn
, unsigned short NumOps
, unsigned &NumOpsAdded
, BO B
) {
2750 return DisassembleNVdVnVmOptImm(MI
, Opcode
, insn
, NumOps
, NumOpsAdded
,
2751 N3V_Multiply_By_Scalar
, B
);
2754 // Vector Table Lookup
2756 // VTBL1, VTBX1: Dd [Dd(TIED_TO)] Dn Dm
2757 // VTBL2, VTBX2: Dd [Dd(TIED_TO)] Dn Dn+1 Dm
2758 // VTBL3, VTBX3: Dd [Dd(TIED_TO)] Dn Dn+1 Dn+2 Dm
2759 // VTBL4, VTBX4: Dd [Dd(TIED_TO)] Dn Dn+1 Dn+2 Dn+3 Dm
2760 static bool DisassembleNVTBLFrm(MCInst
&MI
, unsigned Opcode
, uint32_t insn
,
2761 unsigned short NumOps
, unsigned &NumOpsAdded
, BO B
) {
2763 const TargetInstrDesc
&TID
= ARMInsts
[Opcode
];
2764 const TargetOperandInfo
*OpInfo
= TID
.OpInfo
;
2765 if (!OpInfo
) return false;
2767 assert(NumOps
>= 3 &&
2768 OpInfo
[0].RegClass
== ARM::DPRRegClassID
&&
2769 OpInfo
[1].RegClass
== ARM::DPRRegClassID
&&
2770 OpInfo
[2].RegClass
== ARM::DPRRegClassID
&&
2771 "Expect >= 3 operands and first 3 as reg operands");
2773 unsigned &OpIdx
= NumOpsAdded
;
2777 unsigned Rn
= decodeNEONRn(insn
);
2779 // {Dn} encoded as len = 0b00
2780 // {Dn Dn+1} encoded as len = 0b01
2781 // {Dn Dn+1 Dn+2 } encoded as len = 0b10
2782 // {Dn Dn+1 Dn+2 Dn+3} encoded as len = 0b11
2783 unsigned Len
= slice(insn
, 9, 8) + 1;
2785 // Dd (the destination vector)
2786 MI
.addOperand(MCOperand::CreateReg(getRegisterEnum(B
, ARM::DPRRegClassID
,
2787 decodeNEONRd(insn
))));
2790 // Process tied_to operand constraint.
2792 if ((Idx
= TID
.getOperandConstraint(OpIdx
, TOI::TIED_TO
)) != -1) {
2793 MI
.addOperand(MI
.getOperand(Idx
));
2797 // Do the <list> now.
2798 for (unsigned i
= 0; i
< Len
; ++i
) {
2799 assert(OpIdx
< NumOps
&& OpInfo
[OpIdx
].RegClass
== ARM::DPRRegClassID
&&
2800 "Reg operand expected");
2801 MI
.addOperand(MCOperand::CreateReg(getRegisterEnum(B
, ARM::DPRRegClassID
,
2806 // Dm (the index vector)
2807 assert(OpIdx
< NumOps
&& OpInfo
[OpIdx
].RegClass
== ARM::DPRRegClassID
&&
2808 "Reg operand (index vector) expected");
2809 MI
.addOperand(MCOperand::CreateReg(getRegisterEnum(B
, ARM::DPRRegClassID
,
2810 decodeNEONRm(insn
))));
2816 // Vector Get Lane (move scalar to ARM core register) Instructions.
2817 // VGETLNi32, VGETLNs16, VGETLNs8, VGETLNu16, VGETLNu8: Rt Dn index
2818 static bool DisassembleNGetLnFrm(MCInst
&MI
, unsigned Opcode
, uint32_t insn
,
2819 unsigned short NumOps
, unsigned &NumOpsAdded
, BO B
) {
2821 const TargetInstrDesc
&TID
= ARMInsts
[Opcode
];
2822 const TargetOperandInfo
*OpInfo
= TID
.OpInfo
;
2823 if (!OpInfo
) return false;
2825 assert(TID
.getNumDefs() == 1 && NumOps
>= 3 &&
2826 OpInfo
[0].RegClass
== ARM::GPRRegClassID
&&
2827 OpInfo
[1].RegClass
== ARM::DPRRegClassID
&&
2828 OpInfo
[2].RegClass
< 0 &&
2829 "Expect >= 3 operands with one dst operand");
2832 Opcode
== ARM::VGETLNi32
? ESize32
2833 : ((Opcode
== ARM::VGETLNs16
|| Opcode
== ARM::VGETLNu16
) ? ESize16
2836 // Rt = Inst{15-12} => ARM Rd
2837 MI
.addOperand(MCOperand::CreateReg(getRegisterEnum(B
, ARM::GPRRegClassID
,
2840 // Dn = Inst{7:19-16} => NEON Rn
2841 MI
.addOperand(MCOperand::CreateReg(getRegisterEnum(B
, ARM::DPRRegClassID
,
2842 decodeNEONRn(insn
))));
2844 MI
.addOperand(MCOperand::CreateImm(decodeNVLaneOpIndex(insn
, esize
)));
2850 // Vector Set Lane (move ARM core register to scalar) Instructions.
2851 // VSETLNi16, VSETLNi32, VSETLNi8: Dd Dd (TIED_TO) Rt index
2852 static bool DisassembleNSetLnFrm(MCInst
&MI
, unsigned Opcode
, uint32_t insn
,
2853 unsigned short NumOps
, unsigned &NumOpsAdded
, BO B
) {
2855 const TargetInstrDesc
&TID
= ARMInsts
[Opcode
];
2856 const TargetOperandInfo
*OpInfo
= TID
.OpInfo
;
2857 if (!OpInfo
) return false;
2859 assert(TID
.getNumDefs() == 1 && NumOps
>= 3 &&
2860 OpInfo
[0].RegClass
== ARM::DPRRegClassID
&&
2861 OpInfo
[1].RegClass
== ARM::DPRRegClassID
&&
2862 TID
.getOperandConstraint(1, TOI::TIED_TO
) != -1 &&
2863 OpInfo
[2].RegClass
== ARM::GPRRegClassID
&&
2864 OpInfo
[3].RegClass
< 0 &&
2865 "Expect >= 3 operands with one dst operand");
2868 Opcode
== ARM::VSETLNi8
? ESize8
2869 : (Opcode
== ARM::VSETLNi16
? ESize16
2872 // Dd = Inst{7:19-16} => NEON Rn
2873 MI
.addOperand(MCOperand::CreateReg(getRegisterEnum(B
, ARM::DPRRegClassID
,
2874 decodeNEONRn(insn
))));
2877 MI
.addOperand(MCOperand::CreateReg(0));
2879 // Rt = Inst{15-12} => ARM Rd
2880 MI
.addOperand(MCOperand::CreateReg(getRegisterEnum(B
, ARM::GPRRegClassID
,
2883 MI
.addOperand(MCOperand::CreateImm(decodeNVLaneOpIndex(insn
, esize
)));
2889 // Vector Duplicate Instructions (from ARM core register to all elements).
2890 // VDUP8d, VDUP16d, VDUP32d, VDUP8q, VDUP16q, VDUP32q: Qd/Dd Rt
2891 static bool DisassembleNDupFrm(MCInst
&MI
, unsigned Opcode
, uint32_t insn
,
2892 unsigned short NumOps
, unsigned &NumOpsAdded
, BO B
) {
2894 const TargetOperandInfo
*OpInfo
= ARMInsts
[Opcode
].OpInfo
;
2896 assert(NumOps
>= 2 &&
2897 (OpInfo
[0].RegClass
== ARM::DPRRegClassID
||
2898 OpInfo
[0].RegClass
== ARM::QPRRegClassID
) &&
2899 OpInfo
[1].RegClass
== ARM::GPRRegClassID
&&
2900 "Expect >= 2 operands and first 2 as reg operand");
2902 unsigned RegClass
= OpInfo
[0].RegClass
;
2904 // Qd/Dd = Inst{7:19-16} => NEON Rn
2905 MI
.addOperand(MCOperand::CreateReg(getRegisterEnum(B
, RegClass
,
2906 decodeNEONRn(insn
))));
2908 // Rt = Inst{15-12} => ARM Rd
2909 MI
.addOperand(MCOperand::CreateReg(getRegisterEnum(B
, ARM::GPRRegClassID
,
2919 static inline bool MemBarrierInstr(uint32_t insn
) {
2920 unsigned op7_4
= slice(insn
, 7, 4);
2921 if (slice(insn
, 31, 8) == 0xf57ff0 && (op7_4
>= 4 && op7_4
<= 6))
2927 static inline bool PreLoadOpcode(unsigned Opcode
) {
2929 case ARM::PLDi12
: case ARM::PLDrs
:
2930 case ARM::PLDWi12
: case ARM::PLDWrs
:
2931 case ARM::PLIi12
: case ARM::PLIrs
:
2938 static bool DisassemblePreLoadFrm(MCInst
&MI
, unsigned Opcode
, uint32_t insn
,
2939 unsigned short NumOps
, unsigned &NumOpsAdded
, BO B
) {
2941 // Preload Data/Instruction requires either 2 or 3 operands.
2942 // PLDi, PLDWi, PLIi: addrmode_imm12
2943 // PLDr[a|m], PLDWr[a|m], PLIr[a|m]: ldst_so_reg
2945 MI
.addOperand(MCOperand::CreateReg(getRegisterEnum(B
, ARM::GPRRegClassID
,
2948 if (Opcode
== ARM::PLDi12
|| Opcode
== ARM::PLDWi12
2949 || Opcode
== ARM::PLIi12
) {
2950 unsigned Imm12
= slice(insn
, 11, 0);
2951 bool Negative
= getUBit(insn
) == 0;
2952 // -0 is represented specially. All other values are as normal.
2953 if (Imm12
== 0 && Negative
)
2955 MI
.addOperand(MCOperand::CreateImm(Imm12
));
2958 MI
.addOperand(MCOperand::CreateReg(getRegisterEnum(B
, ARM::GPRRegClassID
,
2961 ARM_AM::AddrOpc AddrOpcode
= getUBit(insn
) ? ARM_AM::add
: ARM_AM::sub
;
2963 // Inst{6-5} encodes the shift opcode.
2964 ARM_AM::ShiftOpc ShOp
= getShiftOpcForBits(slice(insn
, 6, 5));
2965 // Inst{11-7} encodes the imm5 shift amount.
2966 unsigned ShImm
= slice(insn
, 11, 7);
2968 // A8.4.1. Possible rrx or shift amount of 32...
2969 getImmShiftSE(ShOp
, ShImm
);
2970 MI
.addOperand(MCOperand::CreateImm(
2971 ARM_AM::getAM2Opc(AddrOpcode
, ShImm
, ShOp
)));
2978 static bool DisassembleMiscFrm(MCInst
&MI
, unsigned Opcode
, uint32_t insn
,
2979 unsigned short NumOps
, unsigned &NumOpsAdded
, BO B
) {
2981 if (MemBarrierInstr(insn
)) {
2982 // DMBsy, DSBsy, and ISBsy instructions have zero operand and are taken care
2983 // of within the generic ARMBasicMCBuilder::BuildIt() method.
2985 // Inst{3-0} encodes the memory barrier option for the variants.
2986 MI
.addOperand(MCOperand::CreateImm(slice(insn
, 3, 0)));
3004 if (Opcode
== ARM::SETEND
) {
3006 MI
.addOperand(MCOperand::CreateImm(slice(insn
, 9, 9)));
3010 // CPS has a singleton $opt operand that contains the following information:
3011 // opt{4-0} = mode from Inst{4-0}
3012 // opt{5} = changemode from Inst{17}
3013 // opt{8-6} = AIF from Inst{8-6}
3014 // opt{10-9} = imod from Inst{19-18} with 0b10 as enable and 0b11 as disable
3015 if (Opcode
== ARM::CPS
) {
3016 unsigned Option
= slice(insn
, 4, 0) | slice(insn
, 17, 17) << 5 |
3017 slice(insn
, 8, 6) << 6 | slice(insn
, 19, 18) << 9;
3018 MI
.addOperand(MCOperand::CreateImm(Option
));
3023 // DBG has its option specified in Inst{3-0}.
3024 if (Opcode
== ARM::DBG
) {
3025 MI
.addOperand(MCOperand::CreateImm(slice(insn
, 3, 0)));
3030 // BKPT takes an imm32 val equal to ZeroExtend(Inst{19-8:3-0}).
3031 if (Opcode
== ARM::BKPT
) {
3032 MI
.addOperand(MCOperand::CreateImm(slice(insn
, 19, 8) << 4 |
3033 slice(insn
, 3, 0)));
3038 if (PreLoadOpcode(Opcode
))
3039 return DisassemblePreLoadFrm(MI
, Opcode
, insn
, NumOps
, NumOpsAdded
, B
);
3041 assert(0 && "Unexpected misc instruction!");
3045 /// FuncPtrs - FuncPtrs maps ARMFormat to its corresponding DisassembleFP.
3046 /// We divide the disassembly task into different categories, with each one
3047 /// corresponding to a specific instruction encoding format. There could be
3048 /// exceptions when handling a specific format, and that is why the Opcode is
3049 /// also present in the function prototype.
3050 static const DisassembleFP FuncPtrs
[] = {
3054 &DisassembleBrMiscFrm
,
3056 &DisassembleDPSoRegFrm
,
3059 &DisassembleLdMiscFrm
,
3060 &DisassembleStMiscFrm
,
3061 &DisassembleLdStMulFrm
,
3062 &DisassembleLdStExFrm
,
3063 &DisassembleArithMiscFrm
,
3066 &DisassembleVFPUnaryFrm
,
3067 &DisassembleVFPBinaryFrm
,
3068 &DisassembleVFPConv1Frm
,
3069 &DisassembleVFPConv2Frm
,
3070 &DisassembleVFPConv3Frm
,
3071 &DisassembleVFPConv4Frm
,
3072 &DisassembleVFPConv5Frm
,
3073 &DisassembleVFPLdStFrm
,
3074 &DisassembleVFPLdStMulFrm
,
3075 &DisassembleVFPMiscFrm
,
3076 &DisassembleThumbFrm
,
3077 &DisassembleMiscFrm
,
3078 &DisassembleNGetLnFrm
,
3079 &DisassembleNSetLnFrm
,
3080 &DisassembleNDupFrm
,
3082 // VLD and VST (including one lane) Instructions.
3085 // A7.4.6 One register and a modified immediate value
3086 // 1-Register Instructions with imm.
3087 // LLVM only defines VMOVv instructions.
3088 &DisassembleN1RegModImmFrm
,
3090 // 2-Register Instructions with no imm.
3091 &DisassembleN2RegFrm
,
3093 // 2-Register Instructions with imm (vector convert float/fixed point).
3094 &DisassembleNVCVTFrm
,
3096 // 2-Register Instructions with imm (vector dup lane).
3097 &DisassembleNVecDupLnFrm
,
3099 // Vector Shift Left Instructions.
3100 &DisassembleN2RegVecShLFrm
,
3102 // Vector Shift Righ Instructions, which has different interpretation of the
3103 // shift amount from the imm6 field.
3104 &DisassembleN2RegVecShRFrm
,
3106 // 3-Register Data-Processing Instructions.
3107 &DisassembleN3RegFrm
,
3109 // Vector Shift (Register) Instructions.
3110 // D:Vd M:Vm N:Vn (notice that M:Vm is the first operand)
3111 &DisassembleN3RegVecShFrm
,
3113 // Vector Extract Instructions.
3114 &DisassembleNVecExtractFrm
,
3116 // Vector [Saturating Rounding Doubling] Multiply [Accumulate/Subtract] [Long]
3117 // By Scalar Instructions.
3118 &DisassembleNVecMulScalarFrm
,
3120 // Vector Table Lookup uses byte indexes in a control vector to look up byte
3121 // values in a table and generate a new vector.
3122 &DisassembleNVTBLFrm
,
3127 /// BuildIt - BuildIt performs the build step for this ARM Basic MC Builder.
3128 /// The general idea is to set the Opcode for the MCInst, followed by adding
3129 /// the appropriate MCOperands to the MCInst. ARM Basic MC Builder delegates
3130 /// to the Format-specific disassemble function for disassembly, followed by
3131 /// TryPredicateAndSBitModifier() to do PredicateOperand and OptionalDefOperand
3132 /// which follow the Dst/Src Operands.
3133 bool ARMBasicMCBuilder::BuildIt(MCInst
&MI
, uint32_t insn
) {
3134 // Stage 1 sets the Opcode.
3135 MI
.setOpcode(Opcode
);
3136 // If the number of operands is zero, we're done!
3140 // Stage 2 calls the format-specific disassemble function to build the operand
3144 unsigned NumOpsAdded
= 0;
3145 bool OK
= (*Disasm
)(MI
, Opcode
, insn
, NumOps
, NumOpsAdded
, this);
3147 if (!OK
|| this->Err
!= 0) return false;
3148 if (NumOpsAdded
>= NumOps
)
3151 // Stage 3 deals with operands unaccounted for after stage 2 is finished.
3152 // FIXME: Should this be done selectively?
3153 return TryPredicateAndSBitModifier(MI
, Opcode
, insn
, NumOps
- NumOpsAdded
);
3156 // A8.3 Conditional execution
3157 // A8.3.1 Pseudocode details of conditional execution
3158 // Condition bits '111x' indicate the instruction is always executed.
3159 static uint32_t CondCode(uint32_t CondField
) {
3160 if (CondField
== 0xF)
3165 /// DoPredicateOperands - DoPredicateOperands process the predicate operands
3166 /// of some Thumb instructions which come before the reglist operands. It
3167 /// returns true if the two predicate operands have been processed.
3168 bool ARMBasicMCBuilder::DoPredicateOperands(MCInst
& MI
, unsigned Opcode
,
3169 uint32_t /* insn */, unsigned short NumOpsRemaining
) {
3171 assert(NumOpsRemaining
> 0 && "Invalid argument");
3173 const TargetOperandInfo
*OpInfo
= ARMInsts
[Opcode
].OpInfo
;
3174 unsigned Idx
= MI
.getNumOperands();
3176 // First, we check whether this instr specifies the PredicateOperand through
3177 // a pair of TargetOperandInfos with isPredicate() property.
3178 if (NumOpsRemaining
>= 2 &&
3179 OpInfo
[Idx
].isPredicate() && OpInfo
[Idx
+1].isPredicate() &&
3180 OpInfo
[Idx
].RegClass
< 0 &&
3181 OpInfo
[Idx
+1].RegClass
== ARM::CCRRegClassID
)
3183 // If we are inside an IT block, get the IT condition bits maintained via
3184 // ARMBasicMCBuilder::ITState[7:0], through ARMBasicMCBuilder::GetITCond().
3187 MI
.addOperand(MCOperand::CreateImm(GetITCond()));
3189 MI
.addOperand(MCOperand::CreateImm(ARMCC::AL
));
3190 MI
.addOperand(MCOperand::CreateReg(ARM::CPSR
));
3197 /// TryPredicateAndSBitModifier - TryPredicateAndSBitModifier tries to process
3198 /// the possible Predicate and SBitModifier, to build the remaining MCOperand
3200 bool ARMBasicMCBuilder::TryPredicateAndSBitModifier(MCInst
& MI
, unsigned Opcode
,
3201 uint32_t insn
, unsigned short NumOpsRemaining
) {
3203 assert(NumOpsRemaining
> 0 && "Invalid argument");
3205 const TargetOperandInfo
*OpInfo
= ARMInsts
[Opcode
].OpInfo
;
3206 const std::string
&Name
= ARMInsts
[Opcode
].Name
;
3207 unsigned Idx
= MI
.getNumOperands();
3209 // First, we check whether this instr specifies the PredicateOperand through
3210 // a pair of TargetOperandInfos with isPredicate() property.
3211 if (NumOpsRemaining
>= 2 &&
3212 OpInfo
[Idx
].isPredicate() && OpInfo
[Idx
+1].isPredicate() &&
3213 OpInfo
[Idx
].RegClass
< 0 &&
3214 OpInfo
[Idx
+1].RegClass
== ARM::CCRRegClassID
)
3216 // If we are inside an IT block, get the IT condition bits maintained via
3217 // ARMBasicMCBuilder::ITState[7:0], through ARMBasicMCBuilder::GetITCond().
3220 MI
.addOperand(MCOperand::CreateImm(GetITCond()));
3222 if (Name
.length() > 1 && Name
[0] == 't') {
3223 // Thumb conditional branch instructions have their cond field embedded,
3227 if (Name
== "t2Bcc")
3228 MI
.addOperand(MCOperand::CreateImm(CondCode(slice(insn
, 25, 22))));
3229 else if (Name
== "tBcc")
3230 MI
.addOperand(MCOperand::CreateImm(CondCode(slice(insn
, 11, 8))));
3232 MI
.addOperand(MCOperand::CreateImm(ARMCC::AL
));
3234 // ARM instructions get their condition field from Inst{31-28}.
3235 MI
.addOperand(MCOperand::CreateImm(CondCode(getCondField(insn
))));
3238 MI
.addOperand(MCOperand::CreateReg(ARM::CPSR
));
3240 NumOpsRemaining
-= 2;
3243 if (NumOpsRemaining
== 0)
3246 // Next, if OptionalDefOperand exists, we check whether the 'S' bit is set.
3247 if (OpInfo
[Idx
].isOptionalDef() && OpInfo
[Idx
].RegClass
==ARM::CCRRegClassID
) {
3248 MI
.addOperand(MCOperand::CreateReg(getSBit(insn
) == 1 ? ARM::CPSR
: 0));
3252 if (NumOpsRemaining
== 0)
3258 /// RunBuildAfterHook - RunBuildAfterHook performs operations deemed necessary
3259 /// after BuildIt is finished.
3260 bool ARMBasicMCBuilder::RunBuildAfterHook(bool Status
, MCInst
&MI
,
3263 if (!SP
) return Status
;
3265 if (Opcode
== ARM::t2IT
)
3266 Status
= SP
->InitIT(slice(insn
, 7, 0)) ? Status
: false;
3267 else if (InITBlock())
3273 /// Opcode, Format, and NumOperands make up an ARM Basic MCBuilder.
3274 ARMBasicMCBuilder::ARMBasicMCBuilder(unsigned opc
, ARMFormat format
,
3276 : Opcode(opc
), Format(format
), NumOps(num
), SP(0), Err(0) {
3277 unsigned Idx
= (unsigned)format
;
3278 assert(Idx
< (array_lengthof(FuncPtrs
) - 1) && "Unknown format");
3279 Disasm
= FuncPtrs
[Idx
];
3282 /// CreateMCBuilder - Return an ARMBasicMCBuilder that can build up the MC
3283 /// infrastructure of an MCInst given the Opcode and Format of the instr.
3284 /// Return NULL if it fails to create/return a proper builder. API clients
3285 /// are responsible for freeing up of the allocated memory. Cacheing can be
3286 /// performed by the API clients to improve performance.
3287 ARMBasicMCBuilder
*llvm::CreateMCBuilder(unsigned Opcode
, ARMFormat Format
) {
3288 // For "Unknown format", fail by returning a NULL pointer.
3289 if ((unsigned)Format
>= (array_lengthof(FuncPtrs
) - 1)) {
3290 DEBUG(errs() << "Unknown format\n");
3294 return new ARMBasicMCBuilder(Opcode
, Format
,
3295 ARMInsts
[Opcode
].getNumOperands());