Use BranchProbability instead of floating points in IfConverter.
[llvm/stm8.git] / lib / Target / ARM / Disassembler / ARMDisassemblerCore.cpp
blobfe165b04b4346169954c598a6091855aeb46f72b
1 //===- ARMDisassemblerCore.cpp - ARM disassembler helpers -------*- C++ -*-===//
2 //
3 // The LLVM Compiler Infrastructure
4 //
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
7 //
8 //===----------------------------------------------------------------------===//
9 //
10 // This file is part of the ARM Disassembler.
11 // It contains code to represent the core concepts of Builder and DisassembleFP
12 // to solve the problem of disassembling an ARM instr.
14 //===----------------------------------------------------------------------===//
16 #define DEBUG_TYPE "arm-disassembler"
18 #include "ARMDisassemblerCore.h"
19 #include "ARMAddressingModes.h"
20 #include "ARMMCExpr.h"
21 #include "llvm/Support/Debug.h"
22 #include "llvm/Support/raw_ostream.h"
24 //#define DEBUG(X) do { X; } while (0)
26 /// ARMGenInstrInfo.inc - ARMGenInstrInfo.inc contains the static const
27 /// MCInstrDesc ARMInsts[] definition and the MCOperandInfo[]'s describing the
28 /// operand info for each ARMInsts[i].
29 ///
30 /// Together with an instruction's encoding format, we can take advantage of the
31 /// NumOperands and the OpInfo fields of the target instruction description in
32 /// the quest to build out the MCOperand list for an MCInst.
33 ///
34 /// The general guideline is that with a known format, the number of dst and src
35 /// operands are well-known. The dst is built first, followed by the src
36 /// operand(s). The operands not yet used at this point are for the Implicit
37 /// Uses and Defs by this instr. For the Uses part, the pred:$p operand is
38 /// defined with two components:
39 ///
40 /// def pred { // Operand PredicateOperand
41 /// ValueType Type = OtherVT;
42 /// string PrintMethod = "printPredicateOperand";
43 /// string AsmOperandLowerMethod = ?;
44 /// dag MIOperandInfo = (ops i32imm, CCR);
45 /// AsmOperandClass ParserMatchClass = ImmAsmOperand;
46 /// dag DefaultOps = (ops (i32 14), (i32 zero_reg));
47 /// }
48 ///
49 /// which is manifested by the MCOperandInfo[] of:
50 ///
51 /// { 0, 0|(1<<MCOI::Predicate), 0 },
52 /// { ARM::CCRRegClassID, 0|(1<<MCOI::Predicate), 0 }
53 ///
54 /// So the first predicate MCOperand corresponds to the immediate part of the
55 /// ARM condition field (Inst{31-28}), and the second predicate MCOperand
56 /// corresponds to a register kind of ARM::CPSR.
57 ///
58 /// For the Defs part, in the simple case of only cc_out:$s, we have:
59 ///
60 /// def cc_out { // Operand OptionalDefOperand
61 /// ValueType Type = OtherVT;
62 /// string PrintMethod = "printSBitModifierOperand";
63 /// string AsmOperandLowerMethod = ?;
64 /// dag MIOperandInfo = (ops CCR);
65 /// AsmOperandClass ParserMatchClass = ImmAsmOperand;
66 /// dag DefaultOps = (ops (i32 zero_reg));
67 /// }
68 ///
69 /// which is manifested by the one MCOperandInfo of:
70 ///
71 /// { ARM::CCRRegClassID, 0|(1<<MCOI::OptionalDef), 0 }
72 ///
73 /// And this maps to one MCOperand with the regsiter kind of ARM::CPSR.
74 #define GET_INSTRINFO_MC_DESC
75 #include "ARMGenInstrInfo.inc"
77 using namespace llvm;
79 const char *ARMUtils::OpcodeName(unsigned Opcode) {
80 return ARMInsts[Opcode].Name;
83 // Return the register enum Based on RegClass and the raw register number.
84 // FIXME: Auto-gened?
85 static unsigned
86 getRegisterEnum(BO B, unsigned RegClassID, unsigned RawRegister) {
87 if (RegClassID == ARM::rGPRRegClassID) {
88 // Check for The register numbers 13 and 15 that are not permitted for many
89 // Thumb register specifiers.
90 if (RawRegister == 13 || RawRegister == 15) {
91 B->SetErr(-1);
92 return 0;
94 // For this purpose, we can treat rGPR as if it were GPR.
95 RegClassID = ARM::GPRRegClassID;
98 // See also decodeNEONRd(), decodeNEONRn(), decodeNEONRm().
99 // A7.3 register encoding
100 // Qd -> bit[12] == 0
101 // Qn -> bit[16] == 0
102 // Qm -> bit[0] == 0
104 // If one of these bits is 1, the instruction is UNDEFINED.
105 if (RegClassID == ARM::QPRRegClassID && slice(RawRegister, 0, 0) == 1) {
106 B->SetErr(-1);
107 return 0;
109 unsigned RegNum =
110 RegClassID == ARM::QPRRegClassID ? RawRegister >> 1 : RawRegister;
112 switch (RegNum) {
113 default:
114 break;
115 case 0:
116 switch (RegClassID) {
117 case ARM::GPRRegClassID: case ARM::tGPRRegClassID: return ARM::R0;
118 case ARM::DPRRegClassID: case ARM::DPR_8RegClassID:
119 case ARM::DPR_VFP2RegClassID:
120 return ARM::D0;
121 case ARM::QPRRegClassID: case ARM::QPR_8RegClassID:
122 case ARM::QPR_VFP2RegClassID:
123 return ARM::Q0;
124 case ARM::SPRRegClassID: case ARM::SPR_8RegClassID: return ARM::S0;
126 break;
127 case 1:
128 switch (RegClassID) {
129 case ARM::GPRRegClassID: case ARM::tGPRRegClassID: return ARM::R1;
130 case ARM::DPRRegClassID: case ARM::DPR_8RegClassID:
131 case ARM::DPR_VFP2RegClassID:
132 return ARM::D1;
133 case ARM::QPRRegClassID: case ARM::QPR_8RegClassID:
134 case ARM::QPR_VFP2RegClassID:
135 return ARM::Q1;
136 case ARM::SPRRegClassID: case ARM::SPR_8RegClassID: return ARM::S1;
138 break;
139 case 2:
140 switch (RegClassID) {
141 case ARM::GPRRegClassID: case ARM::tGPRRegClassID: return ARM::R2;
142 case ARM::DPRRegClassID: case ARM::DPR_8RegClassID:
143 case ARM::DPR_VFP2RegClassID:
144 return ARM::D2;
145 case ARM::QPRRegClassID: case ARM::QPR_8RegClassID:
146 case ARM::QPR_VFP2RegClassID:
147 return ARM::Q2;
148 case ARM::SPRRegClassID: case ARM::SPR_8RegClassID: return ARM::S2;
150 break;
151 case 3:
152 switch (RegClassID) {
153 case ARM::GPRRegClassID: case ARM::tGPRRegClassID: return ARM::R3;
154 case ARM::DPRRegClassID: case ARM::DPR_8RegClassID:
155 case ARM::DPR_VFP2RegClassID:
156 return ARM::D3;
157 case ARM::QPRRegClassID: case ARM::QPR_8RegClassID:
158 case ARM::QPR_VFP2RegClassID:
159 return ARM::Q3;
160 case ARM::SPRRegClassID: case ARM::SPR_8RegClassID: return ARM::S3;
162 break;
163 case 4:
164 switch (RegClassID) {
165 case ARM::GPRRegClassID: case ARM::tGPRRegClassID: return ARM::R4;
166 case ARM::DPRRegClassID: case ARM::DPR_8RegClassID:
167 case ARM::DPR_VFP2RegClassID:
168 return ARM::D4;
169 case ARM::QPRRegClassID: case ARM::QPR_VFP2RegClassID: return ARM::Q4;
170 case ARM::SPRRegClassID: case ARM::SPR_8RegClassID: return ARM::S4;
172 break;
173 case 5:
174 switch (RegClassID) {
175 case ARM::GPRRegClassID: case ARM::tGPRRegClassID: return ARM::R5;
176 case ARM::DPRRegClassID: case ARM::DPR_8RegClassID:
177 case ARM::DPR_VFP2RegClassID:
178 return ARM::D5;
179 case ARM::QPRRegClassID: case ARM::QPR_VFP2RegClassID: return ARM::Q5;
180 case ARM::SPRRegClassID: case ARM::SPR_8RegClassID: return ARM::S5;
182 break;
183 case 6:
184 switch (RegClassID) {
185 case ARM::GPRRegClassID: case ARM::tGPRRegClassID: return ARM::R6;
186 case ARM::DPRRegClassID: case ARM::DPR_8RegClassID:
187 case ARM::DPR_VFP2RegClassID:
188 return ARM::D6;
189 case ARM::QPRRegClassID: case ARM::QPR_VFP2RegClassID: return ARM::Q6;
190 case ARM::SPRRegClassID: case ARM::SPR_8RegClassID: return ARM::S6;
192 break;
193 case 7:
194 switch (RegClassID) {
195 case ARM::GPRRegClassID: case ARM::tGPRRegClassID: return ARM::R7;
196 case ARM::DPRRegClassID: case ARM::DPR_8RegClassID:
197 case ARM::DPR_VFP2RegClassID:
198 return ARM::D7;
199 case ARM::QPRRegClassID: case ARM::QPR_VFP2RegClassID: return ARM::Q7;
200 case ARM::SPRRegClassID: case ARM::SPR_8RegClassID: return ARM::S7;
202 break;
203 case 8:
204 switch (RegClassID) {
205 case ARM::GPRRegClassID: return ARM::R8;
206 case ARM::DPRRegClassID: case ARM::DPR_VFP2RegClassID: return ARM::D8;
207 case ARM::QPRRegClassID: return ARM::Q8;
208 case ARM::SPRRegClassID: case ARM::SPR_8RegClassID: return ARM::S8;
210 break;
211 case 9:
212 switch (RegClassID) {
213 case ARM::GPRRegClassID: return ARM::R9;
214 case ARM::DPRRegClassID: case ARM::DPR_VFP2RegClassID: return ARM::D9;
215 case ARM::QPRRegClassID: return ARM::Q9;
216 case ARM::SPRRegClassID: case ARM::SPR_8RegClassID: return ARM::S9;
218 break;
219 case 10:
220 switch (RegClassID) {
221 case ARM::GPRRegClassID: return ARM::R10;
222 case ARM::DPRRegClassID: case ARM::DPR_VFP2RegClassID: return ARM::D10;
223 case ARM::QPRRegClassID: return ARM::Q10;
224 case ARM::SPRRegClassID: case ARM::SPR_8RegClassID: return ARM::S10;
226 break;
227 case 11:
228 switch (RegClassID) {
229 case ARM::GPRRegClassID: return ARM::R11;
230 case ARM::DPRRegClassID: case ARM::DPR_VFP2RegClassID: return ARM::D11;
231 case ARM::QPRRegClassID: return ARM::Q11;
232 case ARM::SPRRegClassID: case ARM::SPR_8RegClassID: return ARM::S11;
234 break;
235 case 12:
236 switch (RegClassID) {
237 case ARM::GPRRegClassID: return ARM::R12;
238 case ARM::DPRRegClassID: case ARM::DPR_VFP2RegClassID: return ARM::D12;
239 case ARM::QPRRegClassID: return ARM::Q12;
240 case ARM::SPRRegClassID: case ARM::SPR_8RegClassID: return ARM::S12;
242 break;
243 case 13:
244 switch (RegClassID) {
245 case ARM::GPRRegClassID: return ARM::SP;
246 case ARM::DPRRegClassID: case ARM::DPR_VFP2RegClassID: return ARM::D13;
247 case ARM::QPRRegClassID: return ARM::Q13;
248 case ARM::SPRRegClassID: case ARM::SPR_8RegClassID: return ARM::S13;
250 break;
251 case 14:
252 switch (RegClassID) {
253 case ARM::GPRRegClassID: return ARM::LR;
254 case ARM::DPRRegClassID: case ARM::DPR_VFP2RegClassID: return ARM::D14;
255 case ARM::QPRRegClassID: return ARM::Q14;
256 case ARM::SPRRegClassID: case ARM::SPR_8RegClassID: return ARM::S14;
258 break;
259 case 15:
260 switch (RegClassID) {
261 case ARM::GPRRegClassID: return ARM::PC;
262 case ARM::DPRRegClassID: case ARM::DPR_VFP2RegClassID: return ARM::D15;
263 case ARM::QPRRegClassID: return ARM::Q15;
264 case ARM::SPRRegClassID: case ARM::SPR_8RegClassID: return ARM::S15;
266 break;
267 case 16:
268 switch (RegClassID) {
269 case ARM::DPRRegClassID: return ARM::D16;
270 case ARM::SPRRegClassID: return ARM::S16;
272 break;
273 case 17:
274 switch (RegClassID) {
275 case ARM::DPRRegClassID: return ARM::D17;
276 case ARM::SPRRegClassID: return ARM::S17;
278 break;
279 case 18:
280 switch (RegClassID) {
281 case ARM::DPRRegClassID: return ARM::D18;
282 case ARM::SPRRegClassID: return ARM::S18;
284 break;
285 case 19:
286 switch (RegClassID) {
287 case ARM::DPRRegClassID: return ARM::D19;
288 case ARM::SPRRegClassID: return ARM::S19;
290 break;
291 case 20:
292 switch (RegClassID) {
293 case ARM::DPRRegClassID: return ARM::D20;
294 case ARM::SPRRegClassID: return ARM::S20;
296 break;
297 case 21:
298 switch (RegClassID) {
299 case ARM::DPRRegClassID: return ARM::D21;
300 case ARM::SPRRegClassID: return ARM::S21;
302 break;
303 case 22:
304 switch (RegClassID) {
305 case ARM::DPRRegClassID: return ARM::D22;
306 case ARM::SPRRegClassID: return ARM::S22;
308 break;
309 case 23:
310 switch (RegClassID) {
311 case ARM::DPRRegClassID: return ARM::D23;
312 case ARM::SPRRegClassID: return ARM::S23;
314 break;
315 case 24:
316 switch (RegClassID) {
317 case ARM::DPRRegClassID: return ARM::D24;
318 case ARM::SPRRegClassID: return ARM::S24;
320 break;
321 case 25:
322 switch (RegClassID) {
323 case ARM::DPRRegClassID: return ARM::D25;
324 case ARM::SPRRegClassID: return ARM::S25;
326 break;
327 case 26:
328 switch (RegClassID) {
329 case ARM::DPRRegClassID: return ARM::D26;
330 case ARM::SPRRegClassID: return ARM::S26;
332 break;
333 case 27:
334 switch (RegClassID) {
335 case ARM::DPRRegClassID: return ARM::D27;
336 case ARM::SPRRegClassID: return ARM::S27;
338 break;
339 case 28:
340 switch (RegClassID) {
341 case ARM::DPRRegClassID: return ARM::D28;
342 case ARM::SPRRegClassID: return ARM::S28;
344 break;
345 case 29:
346 switch (RegClassID) {
347 case ARM::DPRRegClassID: return ARM::D29;
348 case ARM::SPRRegClassID: return ARM::S29;
350 break;
351 case 30:
352 switch (RegClassID) {
353 case ARM::DPRRegClassID: return ARM::D30;
354 case ARM::SPRRegClassID: return ARM::S30;
356 break;
357 case 31:
358 switch (RegClassID) {
359 case ARM::DPRRegClassID: return ARM::D31;
360 case ARM::SPRRegClassID: return ARM::S31;
362 break;
364 DEBUG(errs() << "Invalid (RegClassID, RawRegister) combination\n");
365 // Encoding error. Mark the builder with error code != 0.
366 B->SetErr(-1);
367 return 0;
370 ///////////////////////////////
371 // //
372 // Utility Functions //
373 // //
374 ///////////////////////////////
376 // Extract/Decode Rd: Inst{15-12}.
377 static inline unsigned decodeRd(uint32_t insn) {
378 return (insn >> ARMII::RegRdShift) & ARMII::GPRRegMask;
381 // Extract/Decode Rn: Inst{19-16}.
382 static inline unsigned decodeRn(uint32_t insn) {
383 return (insn >> ARMII::RegRnShift) & ARMII::GPRRegMask;
386 // Extract/Decode Rm: Inst{3-0}.
387 static inline unsigned decodeRm(uint32_t insn) {
388 return (insn & ARMII::GPRRegMask);
391 // Extract/Decode Rs: Inst{11-8}.
392 static inline unsigned decodeRs(uint32_t insn) {
393 return (insn >> ARMII::RegRsShift) & ARMII::GPRRegMask;
396 static inline unsigned getCondField(uint32_t insn) {
397 return (insn >> ARMII::CondShift);
400 static inline unsigned getIBit(uint32_t insn) {
401 return (insn >> ARMII::I_BitShift) & 1;
404 static inline unsigned getAM3IBit(uint32_t insn) {
405 return (insn >> ARMII::AM3_I_BitShift) & 1;
408 static inline unsigned getPBit(uint32_t insn) {
409 return (insn >> ARMII::P_BitShift) & 1;
412 static inline unsigned getUBit(uint32_t insn) {
413 return (insn >> ARMII::U_BitShift) & 1;
416 static inline unsigned getPUBits(uint32_t insn) {
417 return (insn >> ARMII::U_BitShift) & 3;
420 static inline unsigned getSBit(uint32_t insn) {
421 return (insn >> ARMII::S_BitShift) & 1;
424 static inline unsigned getWBit(uint32_t insn) {
425 return (insn >> ARMII::W_BitShift) & 1;
428 static inline unsigned getDBit(uint32_t insn) {
429 return (insn >> ARMII::D_BitShift) & 1;
432 static inline unsigned getNBit(uint32_t insn) {
433 return (insn >> ARMII::N_BitShift) & 1;
436 static inline unsigned getMBit(uint32_t insn) {
437 return (insn >> ARMII::M_BitShift) & 1;
440 // See A8.4 Shifts applied to a register.
441 // A8.4.2 Register controlled shifts.
443 // getShiftOpcForBits - getShiftOpcForBits translates from the ARM encoding bits
444 // into llvm enums for shift opcode. The API clients should pass in the value
445 // encoded with two bits, so the assert stays to signal a wrong API usage.
447 // A8-12: DecodeRegShift()
448 static inline ARM_AM::ShiftOpc getShiftOpcForBits(unsigned bits) {
449 switch (bits) {
450 default: assert(0 && "No such value"); return ARM_AM::no_shift;
451 case 0: return ARM_AM::lsl;
452 case 1: return ARM_AM::lsr;
453 case 2: return ARM_AM::asr;
454 case 3: return ARM_AM::ror;
458 // See A8.4 Shifts applied to a register.
459 // A8.4.1 Constant shifts.
461 // getImmShiftSE - getImmShiftSE translates from the raw ShiftOpc and raw Imm5
462 // encodings into the intended ShiftOpc and shift amount.
464 // A8-11: DecodeImmShift()
465 static inline void getImmShiftSE(ARM_AM::ShiftOpc &ShOp, unsigned &ShImm) {
466 if (ShImm != 0)
467 return;
468 switch (ShOp) {
469 case ARM_AM::no_shift:
470 case ARM_AM::rrx:
471 break;
472 case ARM_AM::lsl:
473 ShOp = ARM_AM::no_shift;
474 break;
475 case ARM_AM::lsr:
476 case ARM_AM::asr:
477 ShImm = 32;
478 break;
479 case ARM_AM::ror:
480 ShOp = ARM_AM::rrx;
481 break;
485 // getAMSubModeForBits - getAMSubModeForBits translates from the ARM encoding
486 // bits Inst{24-23} (P(24) and U(23)) into llvm enums for AMSubMode. The API
487 // clients should pass in the value encoded with two bits, so the assert stays
488 // to signal a wrong API usage.
489 static inline ARM_AM::AMSubMode getAMSubModeForBits(unsigned bits) {
490 switch (bits) {
491 default: assert(0 && "No such value"); return ARM_AM::bad_am_submode;
492 case 1: return ARM_AM::ia; // P=0 U=1
493 case 3: return ARM_AM::ib; // P=1 U=1
494 case 0: return ARM_AM::da; // P=0 U=0
495 case 2: return ARM_AM::db; // P=1 U=0
499 ////////////////////////////////////////////
500 // //
501 // Disassemble function definitions //
502 // //
503 ////////////////////////////////////////////
505 /// There is a separate Disassemble*Frm function entry for disassembly of an ARM
506 /// instr into a list of MCOperands in the appropriate order, with possible dst,
507 /// followed by possible src(s).
509 /// The processing of the predicate, and the 'S' modifier bit, if MI modifies
510 /// the CPSR, is factored into ARMBasicMCBuilder's method named
511 /// TryPredicateAndSBitModifier.
513 static bool DisassemblePseudo(MCInst &MI, unsigned Opcode, uint32_t insn,
514 unsigned short NumOps, unsigned &NumOpsAdded, BO) {
516 assert(0 && "Unexpected pseudo instruction!");
517 return false;
520 // A8.6.94 MLA
521 // if d == 15 || n == 15 || m == 15 || a == 15 then UNPREDICTABLE;
523 // A8.6.105 MUL
524 // if d == 15 || n == 15 || m == 15 then UNPREDICTABLE;
526 // A8.6.246 UMULL
527 // if dLo == 15 || dHi == 15 || n == 15 || m == 15 then UNPREDICTABLE;
528 // if dHi == dLo then UNPREDICTABLE;
529 static bool BadRegsMulFrm(unsigned Opcode, uint32_t insn) {
530 unsigned R19_16 = slice(insn, 19, 16);
531 unsigned R15_12 = slice(insn, 15, 12);
532 unsigned R11_8 = slice(insn, 11, 8);
533 unsigned R3_0 = slice(insn, 3, 0);
534 switch (Opcode) {
535 default:
536 // Did we miss an opcode?
537 DEBUG(errs() << "BadRegsMulFrm: unexpected opcode!");
538 return false;
539 case ARM::MLA: case ARM::MLS: case ARM::SMLABB: case ARM::SMLABT:
540 case ARM::SMLATB: case ARM::SMLATT: case ARM::SMLAWB: case ARM::SMLAWT:
541 case ARM::SMMLA: case ARM::SMMLAR: case ARM::SMMLS: case ARM::SMMLSR:
542 case ARM::USADA8:
543 if (R19_16 == 15 || R15_12 == 15 || R11_8 == 15 || R3_0 == 15)
544 return true;
545 return false;
546 case ARM::MUL: case ARM::SMMUL: case ARM::SMMULR:
547 case ARM::SMULBB: case ARM::SMULBT: case ARM::SMULTB: case ARM::SMULTT:
548 case ARM::SMULWB: case ARM::SMULWT: case ARM::SMUAD: case ARM::SMUADX:
549 // A8.6.167 SMLAD & A8.6.172 SMLSD
550 case ARM::SMLAD: case ARM::SMLADX: case ARM::SMLSD: case ARM::SMLSDX:
551 case ARM::USAD8:
552 if (R19_16 == 15 || R11_8 == 15 || R3_0 == 15)
553 return true;
554 return false;
555 case ARM::SMLAL: case ARM::SMULL: case ARM::UMAAL: case ARM::UMLAL:
556 case ARM::UMULL:
557 case ARM::SMLALBB: case ARM::SMLALBT: case ARM::SMLALTB: case ARM::SMLALTT:
558 case ARM::SMLALD: case ARM::SMLALDX: case ARM::SMLSLD: case ARM::SMLSLDX:
559 if (R19_16 == 15 || R15_12 == 15 || R11_8 == 15 || R3_0 == 15)
560 return true;
561 if (R19_16 == R15_12)
562 return true;
563 return false;;
567 // Multiply Instructions.
568 // MLA, MLS, SMLABB, SMLABT, SMLATB, SMLATT, SMLAWB, SMLAWT, SMMLA, SMMLAR,
569 // SMMLS, SMMLAR, SMLAD, SMLADX, SMLSD, SMLSDX, and USADA8 (for convenience):
570 // Rd{19-16} Rn{3-0} Rm{11-8} Ra{15-12}
571 // But note that register checking for {SMLAD, SMLADX, SMLSD, SMLSDX} is
572 // only for {d, n, m}.
574 // MUL, SMMUL, SMMULR, SMULBB, SMULBT, SMULTB, SMULTT, SMULWB, SMULWT, SMUAD,
575 // SMUADX, and USAD8 (for convenience):
576 // Rd{19-16} Rn{3-0} Rm{11-8}
578 // SMLAL, SMULL, UMAAL, UMLAL, UMULL, SMLALBB, SMLALBT, SMLALTB, SMLALTT,
579 // SMLALD, SMLADLX, SMLSLD, SMLSLDX:
580 // RdLo{15-12} RdHi{19-16} Rn{3-0} Rm{11-8}
582 // The mapping of the multiply registers to the "regular" ARM registers, where
583 // there are convenience decoder functions, is:
585 // Inst{15-12} => Rd
586 // Inst{19-16} => Rn
587 // Inst{3-0} => Rm
588 // Inst{11-8} => Rs
589 static bool DisassembleMulFrm(MCInst &MI, unsigned Opcode, uint32_t insn,
590 unsigned short NumOps, unsigned &NumOpsAdded, BO B) {
592 const MCInstrDesc &MCID = ARMInsts[Opcode];
593 unsigned short NumDefs = MCID.getNumDefs();
594 const MCOperandInfo *OpInfo = MCID.OpInfo;
595 unsigned &OpIdx = NumOpsAdded;
597 OpIdx = 0;
599 assert(NumDefs > 0 && "NumDefs should be greater than 0 for MulFrm");
600 assert(NumOps >= 3
601 && OpInfo[0].RegClass == ARM::GPRRegClassID
602 && OpInfo[1].RegClass == ARM::GPRRegClassID
603 && OpInfo[2].RegClass == ARM::GPRRegClassID
604 && "Expect three register operands");
606 // Sanity check for the register encodings.
607 if (BadRegsMulFrm(Opcode, insn))
608 return false;
610 // Instructions with two destination registers have RdLo{15-12} first.
611 if (NumDefs == 2) {
612 assert(NumOps >= 4 && OpInfo[3].RegClass == ARM::GPRRegClassID &&
613 "Expect 4th register operand");
614 MI.addOperand(MCOperand::CreateReg(getRegisterEnum(B, ARM::GPRRegClassID,
615 decodeRd(insn))));
616 ++OpIdx;
619 // The destination register: RdHi{19-16} or Rd{19-16}.
620 MI.addOperand(MCOperand::CreateReg(getRegisterEnum(B, ARM::GPRRegClassID,
621 decodeRn(insn))));
623 // The two src regsiters: Rn{3-0}, then Rm{11-8}.
624 MI.addOperand(MCOperand::CreateReg(getRegisterEnum(B, ARM::GPRRegClassID,
625 decodeRm(insn))));
626 MI.addOperand(MCOperand::CreateReg(getRegisterEnum(B, ARM::GPRRegClassID,
627 decodeRs(insn))));
628 OpIdx += 3;
630 // Many multiply instructions (e.g., MLA) have three src registers.
631 // The third register operand is Ra{15-12}.
632 if (OpIdx < NumOps && OpInfo[OpIdx].RegClass == ARM::GPRRegClassID) {
633 MI.addOperand(MCOperand::CreateReg(getRegisterEnum(B, ARM::GPRRegClassID,
634 decodeRd(insn))));
635 ++OpIdx;
638 return true;
641 // Helper routines for disassembly of coprocessor instructions.
643 static bool LdStCopOpcode(unsigned Opcode) {
644 if ((Opcode >= ARM::LDC2L_OFFSET && Opcode <= ARM::LDC_PRE) ||
645 (Opcode >= ARM::STC2L_OFFSET && Opcode <= ARM::STC_PRE))
646 return true;
647 return false;
649 static bool CoprocessorOpcode(unsigned Opcode) {
650 if (LdStCopOpcode(Opcode))
651 return true;
653 switch (Opcode) {
654 default:
655 return false;
656 case ARM::CDP: case ARM::CDP2:
657 case ARM::MCR: case ARM::MCR2: case ARM::MRC: case ARM::MRC2:
658 case ARM::MCRR: case ARM::MCRR2: case ARM::MRRC: case ARM::MRRC2:
659 return true;
662 static inline unsigned GetCoprocessor(uint32_t insn) {
663 return slice(insn, 11, 8);
665 static inline unsigned GetCopOpc1(uint32_t insn, bool CDP) {
666 return CDP ? slice(insn, 23, 20) : slice(insn, 23, 21);
668 static inline unsigned GetCopOpc2(uint32_t insn) {
669 return slice(insn, 7, 5);
671 static inline unsigned GetCopOpc(uint32_t insn) {
672 return slice(insn, 7, 4);
674 // Most of the operands are in immediate forms, except Rd and Rn, which are ARM
675 // core registers.
677 // CDP, CDP2: cop opc1 CRd CRn CRm opc2
679 // MCR, MCR2, MRC, MRC2: cop opc1 Rd CRn CRm opc2
681 // MCRR, MCRR2, MRRC, MRRc2: cop opc Rd Rn CRm
683 // LDC_OFFSET, LDC_PRE, LDC_POST: cop CRd Rn R0 [+/-]imm8:00
684 // and friends
685 // STC_OFFSET, STC_PRE, STC_POST: cop CRd Rn R0 [+/-]imm8:00
686 // and friends
687 // <-- addrmode2 -->
689 // LDC_OPTION: cop CRd Rn imm8
690 // and friends
691 // STC_OPTION: cop CRd Rn imm8
692 // and friends
694 static bool DisassembleCoprocessor(MCInst &MI, unsigned Opcode, uint32_t insn,
695 unsigned short NumOps, unsigned &NumOpsAdded, BO B) {
697 assert(NumOps >= 4 && "Num of operands >= 4 for coprocessor instr");
699 unsigned &OpIdx = NumOpsAdded;
700 // A8.6.92
701 // if coproc == '101x' then SEE "Advanced SIMD and VFP"
702 // But since the special instructions have more explicit encoding bits
703 // specified, if coproc == 10 or 11, we should reject it as invalid.
704 unsigned coproc = GetCoprocessor(insn);
705 if ((Opcode == ARM::MCR || Opcode == ARM::MCRR ||
706 Opcode == ARM::MRC || Opcode == ARM::MRRC) &&
707 (coproc == 10 || coproc == 11)) {
708 DEBUG(errs() << "Encoding error: coproc == 10 or 11 for MCR[R]/MR[R]C\n");
709 return false;
712 bool OneCopOpc = (Opcode == ARM::MCRR || Opcode == ARM::MCRR2 ||
713 Opcode == ARM::MRRC || Opcode == ARM::MRRC2);
715 // CDP/CDP2 has no GPR operand; the opc1 operand is also wider (Inst{23-20}).
716 bool NoGPR = (Opcode == ARM::CDP || Opcode == ARM::CDP2);
717 bool LdStCop = LdStCopOpcode(Opcode);
718 bool RtOut = (Opcode == ARM::MRC || Opcode == ARM::MRC2);
720 OpIdx = 0;
722 if (RtOut) {
723 MI.addOperand(MCOperand::CreateReg(getRegisterEnum(B, ARM::GPRRegClassID,
724 decodeRd(insn))));
725 ++OpIdx;
727 MI.addOperand(MCOperand::CreateImm(coproc));
728 ++OpIdx;
730 if (LdStCop) {
731 // Unindex if P:W = 0b00 --> _OPTION variant
732 unsigned PW = getPBit(insn) << 1 | getWBit(insn);
734 MI.addOperand(MCOperand::CreateImm(decodeRd(insn)));
736 MI.addOperand(MCOperand::CreateReg(getRegisterEnum(B, ARM::GPRRegClassID,
737 decodeRn(insn))));
738 OpIdx += 2;
740 if (PW) {
741 MI.addOperand(MCOperand::CreateReg(0));
742 ARM_AM::AddrOpc AddrOpcode = getUBit(insn) ? ARM_AM::add : ARM_AM::sub;
743 const MCInstrDesc &MCID = ARMInsts[Opcode];
744 unsigned IndexMode =
745 (MCID.TSFlags & ARMII::IndexModeMask) >> ARMII::IndexModeShift;
746 unsigned Offset = ARM_AM::getAM2Opc(AddrOpcode, slice(insn, 7, 0) << 2,
747 ARM_AM::no_shift, IndexMode);
748 MI.addOperand(MCOperand::CreateImm(Offset));
749 OpIdx += 2;
750 } else {
751 MI.addOperand(MCOperand::CreateImm(slice(insn, 7, 0)));
752 ++OpIdx;
754 } else {
755 MI.addOperand(MCOperand::CreateImm(OneCopOpc ? GetCopOpc(insn)
756 : GetCopOpc1(insn, NoGPR)));
757 ++OpIdx;
759 if (!RtOut) {
760 MI.addOperand(NoGPR ? MCOperand::CreateImm(decodeRd(insn))
761 : MCOperand::CreateReg(
762 getRegisterEnum(B, ARM::GPRRegClassID,
763 decodeRd(insn))));
764 ++OpIdx;
767 MI.addOperand(OneCopOpc ? MCOperand::CreateReg(
768 getRegisterEnum(B, ARM::GPRRegClassID,
769 decodeRn(insn)))
770 : MCOperand::CreateImm(decodeRn(insn)));
772 MI.addOperand(MCOperand::CreateImm(decodeRm(insn)));
774 OpIdx += 2;
776 if (!OneCopOpc) {
777 MI.addOperand(MCOperand::CreateImm(GetCopOpc2(insn)));
778 ++OpIdx;
782 return true;
785 // Branch Instructions.
786 // BL: SignExtend(Imm24:'00', 32)
787 // Bcc, BL_pred: SignExtend(Imm24:'00', 32) Pred0 Pred1
788 // SMC: ZeroExtend(imm4, 32)
789 // SVC: ZeroExtend(Imm24, 32)
791 // Various coprocessor instructions are assigned BrFrm arbitrarily.
792 // Delegates to DisassembleCoprocessor() helper function.
794 // MRS/MRSsys: Rd
795 // MSR/MSRsys: Rm mask=Inst{19-16}
796 // BXJ: Rm
797 // MSRi/MSRsysi: so_imm
798 // SRSW/SRS: ldstm_mode:$amode mode_imm
799 // RFEW/RFE: ldstm_mode:$amode Rn
800 static bool DisassembleBrFrm(MCInst &MI, unsigned Opcode, uint32_t insn,
801 unsigned short NumOps, unsigned &NumOpsAdded, BO B) {
803 if (CoprocessorOpcode(Opcode))
804 return DisassembleCoprocessor(MI, Opcode, insn, NumOps, NumOpsAdded, B);
806 const MCOperandInfo *OpInfo = ARMInsts[Opcode].OpInfo;
807 if (!OpInfo) return false;
809 // MRS and MRSsys take one GPR reg Rd.
810 if (Opcode == ARM::MRS || Opcode == ARM::MRSsys) {
811 assert(NumOps >= 1 && OpInfo[0].RegClass == ARM::GPRRegClassID &&
812 "Reg operand expected");
813 MI.addOperand(MCOperand::CreateReg(getRegisterEnum(B, ARM::GPRRegClassID,
814 decodeRd(insn))));
815 NumOpsAdded = 1;
816 return true;
818 // BXJ takes one GPR reg Rm.
819 if (Opcode == ARM::BXJ) {
820 assert(NumOps >= 1 && OpInfo[0].RegClass == ARM::GPRRegClassID &&
821 "Reg operand expected");
822 MI.addOperand(MCOperand::CreateReg(getRegisterEnum(B, ARM::GPRRegClassID,
823 decodeRm(insn))));
824 NumOpsAdded = 1;
825 return true;
827 // MSR take a mask, followed by one GPR reg Rm. The mask contains the R Bit in
828 // bit 4, and the special register fields in bits 3-0.
829 if (Opcode == ARM::MSR) {
830 assert(NumOps >= 1 && OpInfo[1].RegClass == ARM::GPRRegClassID &&
831 "Reg operand expected");
832 MI.addOperand(MCOperand::CreateImm(slice(insn, 22, 22) << 4 /* R Bit */ |
833 slice(insn, 19, 16) /* Special Reg */ ));
834 MI.addOperand(MCOperand::CreateReg(getRegisterEnum(B, ARM::GPRRegClassID,
835 decodeRm(insn))));
836 NumOpsAdded = 2;
837 return true;
839 // MSRi take a mask, followed by one so_imm operand. The mask contains the
840 // R Bit in bit 4, and the special register fields in bits 3-0.
841 if (Opcode == ARM::MSRi) {
842 // A5.2.11 MSR (immediate), and hints & B6.1.6 MSR (immediate)
843 // The hints instructions have more specific encodings, so if mask == 0,
844 // we should reject this as an invalid instruction.
845 if (slice(insn, 19, 16) == 0)
846 return false;
847 MI.addOperand(MCOperand::CreateImm(slice(insn, 22, 22) << 4 /* R Bit */ |
848 slice(insn, 19, 16) /* Special Reg */ ));
849 // SOImm is 4-bit rotate amount in bits 11-8 with 8-bit imm in bits 7-0.
850 // A5.2.4 Rotate amount is twice the numeric value of Inst{11-8}.
851 // See also ARMAddressingModes.h: getSOImmValImm() and getSOImmValRot().
852 unsigned Rot = (insn >> ARMII::SoRotImmShift) & 0xF;
853 unsigned Imm = insn & 0xFF;
854 MI.addOperand(MCOperand::CreateImm(ARM_AM::rotr32(Imm, 2*Rot)));
855 NumOpsAdded = 2;
856 return true;
858 if (Opcode == ARM::SRSW || Opcode == ARM::SRS ||
859 Opcode == ARM::RFEW || Opcode == ARM::RFE) {
860 ARM_AM::AMSubMode SubMode = getAMSubModeForBits(getPUBits(insn));
861 MI.addOperand(MCOperand::CreateImm(ARM_AM::getAM4ModeImm(SubMode)));
863 if (Opcode == ARM::SRSW || Opcode == ARM::SRS)
864 MI.addOperand(MCOperand::CreateImm(slice(insn, 4, 0)));
865 else
866 MI.addOperand(MCOperand::CreateReg(getRegisterEnum(B, ARM::GPRRegClassID,
867 decodeRn(insn))));
868 NumOpsAdded = 3;
869 return true;
872 assert((Opcode == ARM::Bcc || Opcode == ARM::BL || Opcode == ARM::BL_pred
873 || Opcode == ARM::SMC || Opcode == ARM::SVC) &&
874 "Unexpected Opcode");
876 assert(NumOps >= 1 && OpInfo[0].RegClass < 0 && "Imm operand expected");
878 int Imm32 = 0;
879 if (Opcode == ARM::SMC) {
880 // ZeroExtend(imm4, 32) where imm24 = Inst{3-0}.
881 Imm32 = slice(insn, 3, 0);
882 } else if (Opcode == ARM::SVC) {
883 // ZeroExtend(imm24, 32) where imm24 = Inst{23-0}.
884 Imm32 = slice(insn, 23, 0);
885 } else {
886 // SignExtend(imm24:'00', 32) where imm24 = Inst{23-0}.
887 unsigned Imm26 = slice(insn, 23, 0) << 2;
888 //Imm32 = signextend<signed int, 26>(Imm26);
889 Imm32 = SignExtend32<26>(Imm26);
892 MI.addOperand(MCOperand::CreateImm(Imm32));
893 NumOpsAdded = 1;
895 return true;
898 // Misc. Branch Instructions.
899 // BX_RET, MOVPCLR
900 // BLX, BLX_pred, BX, BX_pred
901 // BLXi
902 static bool DisassembleBrMiscFrm(MCInst &MI, unsigned Opcode, uint32_t insn,
903 unsigned short NumOps, unsigned &NumOpsAdded, BO B) {
905 const MCOperandInfo *OpInfo = ARMInsts[Opcode].OpInfo;
906 if (!OpInfo) return false;
908 unsigned &OpIdx = NumOpsAdded;
910 OpIdx = 0;
912 // BX_RET and MOVPCLR have only two predicate operands; do an early return.
913 if (Opcode == ARM::BX_RET || Opcode == ARM::MOVPCLR)
914 return true;
916 // BLX and BX take one GPR reg.
917 if (Opcode == ARM::BLX || Opcode == ARM::BLX_pred ||
918 Opcode == ARM::BX || Opcode == ARM::BX_pred) {
919 assert(NumOps >= 1 && OpInfo[OpIdx].RegClass == ARM::GPRRegClassID &&
920 "Reg operand expected");
921 MI.addOperand(MCOperand::CreateReg(getRegisterEnum(B, ARM::GPRRegClassID,
922 decodeRm(insn))));
923 OpIdx = 1;
924 return true;
927 // BLXi takes imm32 (the PC offset).
928 if (Opcode == ARM::BLXi) {
929 assert(NumOps >= 1 && OpInfo[0].RegClass < 0 && "Imm operand expected");
930 // SignExtend(imm24:H:'0', 32) where imm24 = Inst{23-0} and H = Inst{24}.
931 unsigned Imm26 = slice(insn, 23, 0) << 2 | slice(insn, 24, 24) << 1;
932 int Imm32 = SignExtend32<26>(Imm26);
933 MI.addOperand(MCOperand::CreateImm(Imm32));
934 OpIdx = 1;
935 return true;
938 return false;
941 static inline bool getBFCInvMask(uint32_t insn, uint32_t &mask) {
942 uint32_t lsb = slice(insn, 11, 7);
943 uint32_t msb = slice(insn, 20, 16);
944 uint32_t Val = 0;
945 if (msb < lsb) {
946 DEBUG(errs() << "Encoding error: msb < lsb\n");
947 return false;
950 for (uint32_t i = lsb; i <= msb; ++i)
951 Val |= (1 << i);
952 mask = ~Val;
953 return true;
956 // Standard data-processing instructions allow PC as a register specifier,
957 // but we should reject other DPFrm instructions with PC as registers.
958 static bool BadRegsDPFrm(unsigned Opcode, uint32_t insn) {
959 switch (Opcode) {
960 default:
961 // Did we miss an opcode?
962 if (decodeRd(insn) == 15 || decodeRn(insn) == 15 || decodeRm(insn) == 15) {
963 DEBUG(errs() << "DPFrm with bad reg specifier(s)\n");
964 return true;
966 case ARM::ADCrr: case ARM::ADDSrr: case ARM::ADDrr: case ARM::ANDrr:
967 case ARM::BICrr: case ARM::CMNzrr: case ARM::CMPrr: case ARM::EORrr:
968 case ARM::ORRrr: case ARM::RSBrr: case ARM::RSCrr: case ARM::SBCrr:
969 case ARM::SUBSrr: case ARM::SUBrr: case ARM::TEQrr: case ARM::TSTrr:
970 return false;
974 // A major complication is the fact that some of the saturating add/subtract
975 // operations have Rd Rm Rn, instead of the "normal" Rd Rn Rm.
976 // They are QADD, QDADD, QDSUB, and QSUB.
977 static bool DisassembleDPFrm(MCInst &MI, unsigned Opcode, uint32_t insn,
978 unsigned short NumOps, unsigned &NumOpsAdded, BO B) {
980 const MCInstrDesc &MCID = ARMInsts[Opcode];
981 unsigned short NumDefs = MCID.getNumDefs();
982 bool isUnary = isUnaryDP(MCID.TSFlags);
983 const MCOperandInfo *OpInfo = MCID.OpInfo;
984 unsigned &OpIdx = NumOpsAdded;
986 OpIdx = 0;
988 // Disassemble register def if there is one.
989 if (NumDefs && (OpInfo[OpIdx].RegClass == ARM::GPRRegClassID)) {
990 MI.addOperand(MCOperand::CreateReg(getRegisterEnum(B, ARM::GPRRegClassID,
991 decodeRd(insn))));
992 ++OpIdx;
995 // Now disassemble the src operands.
996 if (OpIdx >= NumOps)
997 return false;
999 // Special-case handling of BFC/BFI/SBFX/UBFX.
1000 if (Opcode == ARM::BFC || Opcode == ARM::BFI) {
1001 // A8.6.17 BFC & A8.6.18 BFI
1002 // Sanity check Rd.
1003 if (decodeRd(insn) == 15)
1004 return false;
1005 MI.addOperand(MCOperand::CreateReg(0));
1006 if (Opcode == ARM::BFI) {
1007 MI.addOperand(MCOperand::CreateReg(getRegisterEnum(B, ARM::GPRRegClassID,
1008 decodeRm(insn))));
1009 ++OpIdx;
1011 uint32_t mask = 0;
1012 if (!getBFCInvMask(insn, mask))
1013 return false;
1015 MI.addOperand(MCOperand::CreateImm(mask));
1016 OpIdx += 2;
1017 return true;
1019 if (Opcode == ARM::SBFX || Opcode == ARM::UBFX) {
1020 // Sanity check Rd and Rm.
1021 if (decodeRd(insn) == 15 || decodeRm(insn) == 15)
1022 return false;
1023 MI.addOperand(MCOperand::CreateReg(getRegisterEnum(B, ARM::GPRRegClassID,
1024 decodeRm(insn))));
1025 MI.addOperand(MCOperand::CreateImm(slice(insn, 11, 7)));
1026 MI.addOperand(MCOperand::CreateImm(slice(insn, 20, 16) + 1));
1027 OpIdx += 3;
1028 return true;
1031 bool RmRn = (Opcode == ARM::QADD || Opcode == ARM::QDADD ||
1032 Opcode == ARM::QDSUB || Opcode == ARM::QSUB);
1034 // BinaryDP has an Rn operand.
1035 if (!isUnary) {
1036 assert(OpInfo[OpIdx].RegClass == ARM::GPRRegClassID &&
1037 "Reg operand expected");
1038 MI.addOperand(MCOperand::CreateReg(
1039 getRegisterEnum(B, ARM::GPRRegClassID,
1040 RmRn ? decodeRm(insn) : decodeRn(insn))));
1041 ++OpIdx;
1044 // If this is a two-address operand, skip it, e.g., MOVCCr operand 1.
1045 if (isUnary && (MCID.getOperandConstraint(OpIdx, MCOI::TIED_TO) != -1)) {
1046 MI.addOperand(MCOperand::CreateReg(0));
1047 ++OpIdx;
1050 // Now disassemble operand 2.
1051 if (OpIdx >= NumOps)
1052 return false;
1054 if (OpInfo[OpIdx].RegClass == ARM::GPRRegClassID) {
1055 // We have a reg/reg form.
1056 // Assert disabled because saturating operations, e.g., A8.6.127 QASX, are
1057 // routed here as well.
1058 // assert(getIBit(insn) == 0 && "I_Bit != '0' reg/reg form");
1059 if (BadRegsDPFrm(Opcode, insn))
1060 return false;
1061 MI.addOperand(MCOperand::CreateReg(
1062 getRegisterEnum(B, ARM::GPRRegClassID,
1063 RmRn? decodeRn(insn) : decodeRm(insn))));
1064 ++OpIdx;
1065 } else if (Opcode == ARM::MOVi16 || Opcode == ARM::MOVTi16) {
1066 // These two instructions don't allow d as 15.
1067 if (decodeRd(insn) == 15)
1068 return false;
1069 // We have an imm16 = imm4:imm12 (imm4=Inst{19:16}, imm12 = Inst{11:0}).
1070 assert(getIBit(insn) == 1 && "I_Bit != '1' reg/imm form");
1071 unsigned Imm16 = slice(insn, 19, 16) << 12 | slice(insn, 11, 0);
1072 if (!B->tryAddingSymbolicOperand(Imm16, 4, MI))
1073 MI.addOperand(MCOperand::CreateImm(Imm16));
1074 ++OpIdx;
1075 } else {
1076 // We have a reg/imm form.
1077 // SOImm is 4-bit rotate amount in bits 11-8 with 8-bit imm in bits 7-0.
1078 // A5.2.4 Rotate amount is twice the numeric value of Inst{11-8}.
1079 // See also ARMAddressingModes.h: getSOImmValImm() and getSOImmValRot().
1080 assert(getIBit(insn) == 1 && "I_Bit != '1' reg/imm form");
1081 unsigned Rot = (insn >> ARMII::SoRotImmShift) & 0xF;
1082 unsigned Imm = insn & 0xFF;
1083 MI.addOperand(MCOperand::CreateImm(ARM_AM::rotr32(Imm, 2*Rot)));
1084 ++OpIdx;
1087 return true;
1090 static bool DisassembleDPSoRegFrm(MCInst &MI, unsigned Opcode, uint32_t insn,
1091 unsigned short NumOps, unsigned &NumOpsAdded, BO B) {
1093 const MCInstrDesc &MCID = ARMInsts[Opcode];
1094 unsigned short NumDefs = MCID.getNumDefs();
1095 bool isUnary = isUnaryDP(MCID.TSFlags);
1096 const MCOperandInfo *OpInfo = MCID.OpInfo;
1097 unsigned &OpIdx = NumOpsAdded;
1099 OpIdx = 0;
1101 // Disassemble register def if there is one.
1102 if (NumDefs && (OpInfo[OpIdx].RegClass == ARM::GPRRegClassID)) {
1103 MI.addOperand(MCOperand::CreateReg(getRegisterEnum(B, ARM::GPRRegClassID,
1104 decodeRd(insn))));
1105 ++OpIdx;
1108 // Disassemble the src operands.
1109 if (OpIdx >= NumOps)
1110 return false;
1112 // BinaryDP has an Rn operand.
1113 if (!isUnary) {
1114 assert(OpInfo[OpIdx].RegClass == ARM::GPRRegClassID &&
1115 "Reg operand expected");
1116 MI.addOperand(MCOperand::CreateReg(getRegisterEnum(B, ARM::GPRRegClassID,
1117 decodeRn(insn))));
1118 ++OpIdx;
1121 // If this is a two-address operand, skip it, e.g., MOVCCs operand 1.
1122 if (isUnary && (MCID.getOperandConstraint(OpIdx, MCOI::TIED_TO) != -1)) {
1123 MI.addOperand(MCOperand::CreateReg(0));
1124 ++OpIdx;
1127 // Disassemble operand 2, which consists of three components.
1128 if (OpIdx + 2 >= NumOps)
1129 return false;
1131 assert((OpInfo[OpIdx].RegClass == ARM::GPRRegClassID) &&
1132 (OpInfo[OpIdx+1].RegClass == ARM::GPRRegClassID) &&
1133 (OpInfo[OpIdx+2].RegClass < 0) &&
1134 "Expect 3 reg operands");
1136 // Register-controlled shifts have Inst{7} = 0 and Inst{4} = 1.
1137 unsigned Rs = slice(insn, 4, 4);
1139 MI.addOperand(MCOperand::CreateReg(getRegisterEnum(B, ARM::GPRRegClassID,
1140 decodeRm(insn))));
1141 if (Rs) {
1142 // If Inst{7} != 0, we should reject this insn as an invalid encoding.
1143 if (slice(insn, 7, 7))
1144 return false;
1146 // A8.6.3 ADC (register-shifted register)
1147 // if d == 15 || n == 15 || m == 15 || s == 15 then UNPREDICTABLE;
1149 // This also accounts for shift instructions (register) where, fortunately,
1150 // Inst{19-16} = 0b0000.
1151 // A8.6.89 LSL (register)
1152 // if d == 15 || n == 15 || m == 15 then UNPREDICTABLE;
1153 if (decodeRd(insn) == 15 || decodeRn(insn) == 15 ||
1154 decodeRm(insn) == 15 || decodeRs(insn) == 15)
1155 return false;
1157 // Register-controlled shifts: [Rm, Rs, shift].
1158 MI.addOperand(MCOperand::CreateReg(getRegisterEnum(B, ARM::GPRRegClassID,
1159 decodeRs(insn))));
1160 // Inst{6-5} encodes the shift opcode.
1161 ARM_AM::ShiftOpc ShOp = getShiftOpcForBits(slice(insn, 6, 5));
1162 MI.addOperand(MCOperand::CreateImm(ARM_AM::getSORegOpc(ShOp, 0)));
1163 } else {
1164 // Constant shifts: [Rm, reg0, shift_imm].
1165 MI.addOperand(MCOperand::CreateReg(0)); // NoRegister
1166 // Inst{6-5} encodes the shift opcode.
1167 ARM_AM::ShiftOpc ShOp = getShiftOpcForBits(slice(insn, 6, 5));
1168 // Inst{11-7} encodes the imm5 shift amount.
1169 unsigned ShImm = slice(insn, 11, 7);
1171 // A8.4.1. Possible rrx or shift amount of 32...
1172 getImmShiftSE(ShOp, ShImm);
1173 MI.addOperand(MCOperand::CreateImm(ARM_AM::getSORegOpc(ShOp, ShImm)));
1175 OpIdx += 3;
1177 return true;
1180 static bool BadRegsLdStFrm(unsigned Opcode, uint32_t insn, bool Store, bool WBack,
1181 bool Imm) {
1182 const StringRef Name = ARMInsts[Opcode].Name;
1183 unsigned Rt = decodeRd(insn);
1184 unsigned Rn = decodeRn(insn);
1185 unsigned Rm = decodeRm(insn);
1186 unsigned P = getPBit(insn);
1187 unsigned W = getWBit(insn);
1189 if (Store) {
1190 // Only STR (immediate, register) allows PC as the source.
1191 if (Name.startswith("STRB") && Rt == 15) {
1192 DEBUG(errs() << "if t == 15 then UNPREDICTABLE\n");
1193 return true;
1195 if (WBack && (Rn == 15 || Rn == Rt)) {
1196 DEBUG(errs() << "if wback && (n == 15 || n == t) then UNPREDICTABLE\n");
1197 return true;
1199 if (!Imm && Rm == 15) {
1200 DEBUG(errs() << "if m == 15 then UNPREDICTABLE\n");
1201 return true;
1203 } else {
1204 // Only LDR (immediate, register) allows PC as the destination.
1205 if (Name.startswith("LDRB") && Rt == 15) {
1206 DEBUG(errs() << "if t == 15 then UNPREDICTABLE\n");
1207 return true;
1209 if (Imm) {
1210 // Immediate
1211 if (Rn == 15) {
1212 // The literal form must be in offset mode; it's an encoding error
1213 // otherwise.
1214 if (!(P == 1 && W == 0)) {
1215 DEBUG(errs() << "Ld literal form with !(P == 1 && W == 0)\n");
1216 return true;
1218 // LDRB (literal) does not allow PC as the destination.
1219 if (Opcode != ARM::LDRi12 && Rt == 15) {
1220 DEBUG(errs() << "if t == 15 then UNPREDICTABLE\n");
1221 return true;
1223 } else {
1224 // Write back while Rn == Rt does not make sense.
1225 if (WBack && (Rn == Rt)) {
1226 DEBUG(errs() << "if wback && n == t then UNPREDICTABLE\n");
1227 return true;
1230 } else {
1231 // Register
1232 if (Rm == 15) {
1233 DEBUG(errs() << "if m == 15 then UNPREDICTABLE\n");
1234 return true;
1236 if (WBack && (Rn == 15 || Rn == Rt)) {
1237 DEBUG(errs() << "if wback && (n == 15 || n == t) then UNPREDICTABLE\n");
1238 return true;
1242 return false;
1245 static bool DisassembleLdStFrm(MCInst &MI, unsigned Opcode, uint32_t insn,
1246 unsigned short NumOps, unsigned &NumOpsAdded, bool isStore, BO B) {
1248 const MCInstrDesc &MCID = ARMInsts[Opcode];
1249 bool isPrePost = isPrePostLdSt(MCID.TSFlags);
1250 const MCOperandInfo *OpInfo = MCID.OpInfo;
1251 if (!OpInfo) return false;
1253 unsigned &OpIdx = NumOpsAdded;
1255 OpIdx = 0;
1257 assert(((!isStore && MCID.getNumDefs() > 0) ||
1258 (isStore && (MCID.getNumDefs() == 0 || isPrePost)))
1259 && "Invalid arguments");
1261 // Operand 0 of a pre- and post-indexed store is the address base writeback.
1262 if (isPrePost && isStore) {
1263 assert(OpInfo[OpIdx].RegClass == ARM::GPRRegClassID &&
1264 "Reg operand expected");
1265 MI.addOperand(MCOperand::CreateReg(getRegisterEnum(B, ARM::GPRRegClassID,
1266 decodeRn(insn))));
1267 ++OpIdx;
1270 // Disassemble the dst/src operand.
1271 if (OpIdx >= NumOps)
1272 return false;
1274 assert(OpInfo[OpIdx].RegClass == ARM::GPRRegClassID &&
1275 "Reg operand expected");
1276 MI.addOperand(MCOperand::CreateReg(getRegisterEnum(B, ARM::GPRRegClassID,
1277 decodeRd(insn))));
1278 ++OpIdx;
1280 // After dst of a pre- and post-indexed load is the address base writeback.
1281 if (isPrePost && !isStore) {
1282 assert(OpInfo[OpIdx].RegClass == ARM::GPRRegClassID &&
1283 "Reg operand expected");
1284 MI.addOperand(MCOperand::CreateReg(getRegisterEnum(B, ARM::GPRRegClassID,
1285 decodeRn(insn))));
1286 ++OpIdx;
1289 // Disassemble the base operand.
1290 if (OpIdx >= NumOps)
1291 return false;
1293 assert(OpInfo[OpIdx].RegClass == ARM::GPRRegClassID &&
1294 "Reg operand expected");
1295 assert((!isPrePost || (MCID.getOperandConstraint(OpIdx, MCOI::TIED_TO) != -1))
1296 && "Index mode or tied_to operand expected");
1297 MI.addOperand(MCOperand::CreateReg(getRegisterEnum(B, ARM::GPRRegClassID,
1298 decodeRn(insn))));
1299 ++OpIdx;
1301 // For reg/reg form, base reg is followed by +/- reg shop imm.
1302 // For immediate form, it is followed by +/- imm12.
1303 // See also ARMAddressingModes.h (Addressing Mode #2).
1304 if (OpIdx + 1 >= NumOps)
1305 return false;
1307 if (BadRegsLdStFrm(Opcode, insn, isStore, isPrePost, getIBit(insn)==0))
1308 return false;
1310 ARM_AM::AddrOpc AddrOpcode = getUBit(insn) ? ARM_AM::add : ARM_AM::sub;
1311 unsigned IndexMode =
1312 (MCID.TSFlags & ARMII::IndexModeMask) >> ARMII::IndexModeShift;
1313 if (getIBit(insn) == 0) {
1314 // For pre- and post-indexed case, add a reg0 operand (Addressing Mode #2).
1315 // Otherwise, skip the reg operand since for addrmode_imm12, Rn has already
1316 // been populated.
1317 if (isPrePost) {
1318 MI.addOperand(MCOperand::CreateReg(0));
1319 OpIdx += 1;
1322 unsigned Imm12 = slice(insn, 11, 0);
1323 if (Opcode == ARM::LDRBi12 || Opcode == ARM::LDRi12 ||
1324 Opcode == ARM::STRBi12 || Opcode == ARM::STRi12) {
1325 // Disassemble the 12-bit immediate offset, which is the second operand in
1326 // $addrmode_imm12 => (ops GPR:$base, i32imm:$offsimm).
1327 int Offset = AddrOpcode == ARM_AM::add ? 1 * Imm12 : -1 * Imm12;
1328 MI.addOperand(MCOperand::CreateImm(Offset));
1329 } else {
1330 // Disassemble the 12-bit immediate offset, which is the second operand in
1331 // $am2offset => (ops GPR, i32imm).
1332 unsigned Offset = ARM_AM::getAM2Opc(AddrOpcode, Imm12, ARM_AM::no_shift,
1333 IndexMode);
1334 MI.addOperand(MCOperand::CreateImm(Offset));
1336 OpIdx += 1;
1337 } else {
1338 // If Inst{25} = 1 and Inst{4} != 0, we should reject this as invalid.
1339 if (slice(insn,4,4) == 1)
1340 return false;
1342 // Disassemble the offset reg (Rm), shift type, and immediate shift length.
1343 MI.addOperand(MCOperand::CreateReg(getRegisterEnum(B, ARM::GPRRegClassID,
1344 decodeRm(insn))));
1345 // Inst{6-5} encodes the shift opcode.
1346 ARM_AM::ShiftOpc ShOp = getShiftOpcForBits(slice(insn, 6, 5));
1347 // Inst{11-7} encodes the imm5 shift amount.
1348 unsigned ShImm = slice(insn, 11, 7);
1350 // A8.4.1. Possible rrx or shift amount of 32...
1351 getImmShiftSE(ShOp, ShImm);
1352 MI.addOperand(MCOperand::CreateImm(
1353 ARM_AM::getAM2Opc(AddrOpcode, ShImm, ShOp, IndexMode)));
1354 OpIdx += 2;
1357 return true;
1360 static bool DisassembleLdFrm(MCInst &MI, unsigned Opcode, uint32_t insn,
1361 unsigned short NumOps, unsigned &NumOpsAdded, BO B) {
1362 return DisassembleLdStFrm(MI, Opcode, insn, NumOps, NumOpsAdded, false, B);
1365 static bool DisassembleStFrm(MCInst &MI, unsigned Opcode, uint32_t insn,
1366 unsigned short NumOps, unsigned &NumOpsAdded, BO B) {
1367 return DisassembleLdStFrm(MI, Opcode, insn, NumOps, NumOpsAdded, true, B);
1370 static bool HasDualReg(unsigned Opcode) {
1371 switch (Opcode) {
1372 default:
1373 return false;
1374 case ARM::LDRD: case ARM::LDRD_PRE: case ARM::LDRD_POST:
1375 case ARM::STRD: case ARM::STRD_PRE: case ARM::STRD_POST:
1376 return true;
1380 static bool DisassembleLdStMiscFrm(MCInst &MI, unsigned Opcode, uint32_t insn,
1381 unsigned short NumOps, unsigned &NumOpsAdded, bool isStore, BO B) {
1383 const MCInstrDesc &MCID = ARMInsts[Opcode];
1384 bool isPrePost = isPrePostLdSt(MCID.TSFlags);
1385 const MCOperandInfo *OpInfo = MCID.OpInfo;
1386 if (!OpInfo) return false;
1388 unsigned &OpIdx = NumOpsAdded;
1390 OpIdx = 0;
1392 assert(((!isStore && MCID.getNumDefs() > 0) ||
1393 (isStore && (MCID.getNumDefs() == 0 || isPrePost)))
1394 && "Invalid arguments");
1396 // Operand 0 of a pre- and post-indexed store is the address base writeback.
1397 if (isPrePost && isStore) {
1398 assert(OpInfo[OpIdx].RegClass == ARM::GPRRegClassID &&
1399 "Reg operand expected");
1400 MI.addOperand(MCOperand::CreateReg(getRegisterEnum(B, ARM::GPRRegClassID,
1401 decodeRn(insn))));
1402 ++OpIdx;
1405 // Disassemble the dst/src operand.
1406 if (OpIdx >= NumOps)
1407 return false;
1409 assert(OpInfo[OpIdx].RegClass == ARM::GPRRegClassID &&
1410 "Reg operand expected");
1411 MI.addOperand(MCOperand::CreateReg(getRegisterEnum(B, ARM::GPRRegClassID,
1412 decodeRd(insn))));
1413 ++OpIdx;
1415 // Fill in LDRD and STRD's second operand Rt operand.
1416 if (HasDualReg(Opcode)) {
1417 MI.addOperand(MCOperand::CreateReg(getRegisterEnum(B, ARM::GPRRegClassID,
1418 decodeRd(insn) + 1)));
1419 ++OpIdx;
1422 // After dst of a pre- and post-indexed load is the address base writeback.
1423 if (isPrePost && !isStore) {
1424 assert(OpInfo[OpIdx].RegClass == ARM::GPRRegClassID &&
1425 "Reg operand expected");
1426 MI.addOperand(MCOperand::CreateReg(getRegisterEnum(B, ARM::GPRRegClassID,
1427 decodeRn(insn))));
1428 ++OpIdx;
1431 // Disassemble the base operand.
1432 if (OpIdx >= NumOps)
1433 return false;
1435 assert(OpInfo[OpIdx].RegClass == ARM::GPRRegClassID &&
1436 "Reg operand expected");
1437 assert((!isPrePost || (MCID.getOperandConstraint(OpIdx, MCOI::TIED_TO) != -1))
1438 && "Offset mode or tied_to operand expected");
1439 MI.addOperand(MCOperand::CreateReg(getRegisterEnum(B, ARM::GPRRegClassID,
1440 decodeRn(insn))));
1441 ++OpIdx;
1443 // For reg/reg form, base reg is followed by +/- reg.
1444 // For immediate form, it is followed by +/- imm8.
1445 // See also ARMAddressingModes.h (Addressing Mode #3).
1446 if (OpIdx + 1 >= NumOps)
1447 return false;
1449 assert((OpInfo[OpIdx].RegClass == ARM::GPRRegClassID) &&
1450 (OpInfo[OpIdx+1].RegClass < 0) &&
1451 "Expect 1 reg operand followed by 1 imm operand");
1453 ARM_AM::AddrOpc AddrOpcode = getUBit(insn) ? ARM_AM::add : ARM_AM::sub;
1454 unsigned IndexMode =
1455 (MCID.TSFlags & ARMII::IndexModeMask) >> ARMII::IndexModeShift;
1456 if (getAM3IBit(insn) == 1) {
1457 MI.addOperand(MCOperand::CreateReg(0));
1459 // Disassemble the 8-bit immediate offset.
1460 unsigned Imm4H = (insn >> ARMII::ImmHiShift) & 0xF;
1461 unsigned Imm4L = insn & 0xF;
1462 unsigned Offset = ARM_AM::getAM3Opc(AddrOpcode, (Imm4H << 4) | Imm4L,
1463 IndexMode);
1464 MI.addOperand(MCOperand::CreateImm(Offset));
1465 } else {
1466 // Disassemble the offset reg (Rm).
1467 MI.addOperand(MCOperand::CreateReg(getRegisterEnum(B, ARM::GPRRegClassID,
1468 decodeRm(insn))));
1469 unsigned Offset = ARM_AM::getAM3Opc(AddrOpcode, 0, IndexMode);
1470 MI.addOperand(MCOperand::CreateImm(Offset));
1472 OpIdx += 2;
1474 return true;
1477 static bool DisassembleLdMiscFrm(MCInst &MI, unsigned Opcode, uint32_t insn,
1478 unsigned short NumOps, unsigned &NumOpsAdded, BO B) {
1479 return DisassembleLdStMiscFrm(MI, Opcode, insn, NumOps, NumOpsAdded, false,
1483 static bool DisassembleStMiscFrm(MCInst &MI, unsigned Opcode, uint32_t insn,
1484 unsigned short NumOps, unsigned &NumOpsAdded, BO B) {
1485 return DisassembleLdStMiscFrm(MI, Opcode, insn, NumOps, NumOpsAdded, true, B);
1488 // The algorithm for disassembly of LdStMulFrm is different from others because
1489 // it explicitly populates the two predicate operands after the base register.
1490 // After that, we need to populate the reglist with each affected register
1491 // encoded as an MCOperand.
1492 static bool DisassembleLdStMulFrm(MCInst &MI, unsigned Opcode, uint32_t insn,
1493 unsigned short NumOps, unsigned &NumOpsAdded, BO B) {
1495 assert(NumOps >= 4 && "LdStMulFrm expects NumOps >= 4");
1496 NumOpsAdded = 0;
1498 unsigned Base = getRegisterEnum(B, ARM::GPRRegClassID, decodeRn(insn));
1500 // Writeback to base, if necessary.
1501 if (Opcode == ARM::LDMIA_UPD || Opcode == ARM::STMIA_UPD ||
1502 Opcode == ARM::LDMDA_UPD || Opcode == ARM::STMDA_UPD ||
1503 Opcode == ARM::LDMDB_UPD || Opcode == ARM::STMDB_UPD ||
1504 Opcode == ARM::LDMIB_UPD || Opcode == ARM::STMIB_UPD) {
1505 MI.addOperand(MCOperand::CreateReg(Base));
1506 ++NumOpsAdded;
1509 // Add the base register operand.
1510 MI.addOperand(MCOperand::CreateReg(Base));
1512 // Handling the two predicate operands before the reglist.
1513 int64_t CondVal = getCondField(insn);
1514 if (CondVal == 0xF)
1515 return false;
1516 MI.addOperand(MCOperand::CreateImm(CondVal));
1517 MI.addOperand(MCOperand::CreateReg(ARM::CPSR));
1519 NumOpsAdded += 3;
1521 // Fill the variadic part of reglist.
1522 unsigned RegListBits = insn & ((1 << 16) - 1);
1523 for (unsigned i = 0; i < 16; ++i) {
1524 if ((RegListBits >> i) & 1) {
1525 MI.addOperand(MCOperand::CreateReg(getRegisterEnum(B, ARM::GPRRegClassID,
1526 i)));
1527 ++NumOpsAdded;
1531 return true;
1534 // LDREX, LDREXB, LDREXH: Rd Rn
1535 // LDREXD: Rd Rd+1 Rn
1536 // STREX, STREXB, STREXH: Rd Rm Rn
1537 // STREXD: Rd Rm Rm+1 Rn
1539 // SWP, SWPB: Rd Rm Rn
1540 static bool DisassembleLdStExFrm(MCInst &MI, unsigned Opcode, uint32_t insn,
1541 unsigned short NumOps, unsigned &NumOpsAdded, BO B) {
1543 const MCOperandInfo *OpInfo = ARMInsts[Opcode].OpInfo;
1544 if (!OpInfo) return false;
1546 unsigned &OpIdx = NumOpsAdded;
1548 OpIdx = 0;
1550 assert(NumOps >= 2
1551 && OpInfo[0].RegClass == ARM::GPRRegClassID
1552 && OpInfo[1].RegClass == ARM::GPRRegClassID
1553 && "Expect 2 reg operands");
1555 bool isStore = slice(insn, 20, 20) == 0;
1556 bool isDW = (Opcode == ARM::LDREXD || Opcode == ARM::STREXD);
1558 // Add the destination operand.
1559 MI.addOperand(MCOperand::CreateReg(getRegisterEnum(B, ARM::GPRRegClassID,
1560 decodeRd(insn))));
1561 ++OpIdx;
1563 // Store register Exclusive needs a source operand.
1564 if (isStore) {
1565 MI.addOperand(MCOperand::CreateReg(getRegisterEnum(B, ARM::GPRRegClassID,
1566 decodeRm(insn))));
1567 ++OpIdx;
1569 if (isDW) {
1570 MI.addOperand(MCOperand::CreateReg(getRegisterEnum(B, ARM::GPRRegClassID,
1571 decodeRm(insn)+1)));
1572 ++OpIdx;
1574 } else if (isDW) {
1575 MI.addOperand(MCOperand::CreateReg(getRegisterEnum(B, ARM::GPRRegClassID,
1576 decodeRd(insn)+1)));
1577 ++OpIdx;
1580 // Finally add the pointer operand.
1581 MI.addOperand(MCOperand::CreateReg(getRegisterEnum(B, ARM::GPRRegClassID,
1582 decodeRn(insn))));
1583 ++OpIdx;
1585 return true;
1588 // Misc. Arithmetic Instructions.
1589 // CLZ: Rd Rm
1590 // PKHBT, PKHTB: Rd Rn Rm , LSL/ASR #imm5
1591 // RBIT, REV, REV16, REVSH: Rd Rm
1592 static bool DisassembleArithMiscFrm(MCInst &MI, unsigned Opcode, uint32_t insn,
1593 unsigned short NumOps, unsigned &NumOpsAdded, BO B) {
1595 const MCOperandInfo *OpInfo = ARMInsts[Opcode].OpInfo;
1596 unsigned &OpIdx = NumOpsAdded;
1598 OpIdx = 0;
1600 assert(NumOps >= 2
1601 && OpInfo[0].RegClass == ARM::GPRRegClassID
1602 && OpInfo[1].RegClass == ARM::GPRRegClassID
1603 && "Expect 2 reg operands");
1605 bool ThreeReg = NumOps > 2 && OpInfo[2].RegClass == ARM::GPRRegClassID;
1607 // Sanity check the registers, which should not be 15.
1608 if (decodeRd(insn) == 15 || decodeRm(insn) == 15)
1609 return false;
1610 if (ThreeReg && decodeRn(insn) == 15)
1611 return false;
1613 MI.addOperand(MCOperand::CreateReg(getRegisterEnum(B, ARM::GPRRegClassID,
1614 decodeRd(insn))));
1615 ++OpIdx;
1617 if (ThreeReg) {
1618 assert(NumOps >= 4 && "Expect >= 4 operands");
1619 MI.addOperand(MCOperand::CreateReg(getRegisterEnum(B, ARM::GPRRegClassID,
1620 decodeRn(insn))));
1621 ++OpIdx;
1624 MI.addOperand(MCOperand::CreateReg(getRegisterEnum(B, ARM::GPRRegClassID,
1625 decodeRm(insn))));
1626 ++OpIdx;
1628 // If there is still an operand info left which is an immediate operand, add
1629 // an additional imm5 LSL/ASR operand.
1630 if (ThreeReg && OpInfo[OpIdx].RegClass < 0
1631 && !OpInfo[OpIdx].isPredicate() && !OpInfo[OpIdx].isOptionalDef()) {
1632 // Extract the 5-bit immediate field Inst{11-7}.
1633 unsigned ShiftAmt = (insn >> ARMII::ShiftShift) & 0x1F;
1634 ARM_AM::ShiftOpc Opc = ARM_AM::no_shift;
1635 if (Opcode == ARM::PKHBT)
1636 Opc = ARM_AM::lsl;
1637 else if (Opcode == ARM::PKHTB)
1638 Opc = ARM_AM::asr;
1639 getImmShiftSE(Opc, ShiftAmt);
1640 MI.addOperand(MCOperand::CreateImm(ARM_AM::getSORegOpc(Opc, ShiftAmt)));
1641 ++OpIdx;
1644 return true;
1647 /// DisassembleSatFrm - Disassemble saturate instructions:
1648 /// SSAT, SSAT16, USAT, and USAT16.
1649 static bool DisassembleSatFrm(MCInst &MI, unsigned Opcode, uint32_t insn,
1650 unsigned short NumOps, unsigned &NumOpsAdded, BO B) {
1652 // A8.6.183 SSAT
1653 // if d == 15 || n == 15 then UNPREDICTABLE;
1654 if (decodeRd(insn) == 15 || decodeRm(insn) == 15)
1655 return false;
1657 const MCInstrDesc &MCID = ARMInsts[Opcode];
1658 NumOpsAdded = MCID.getNumOperands() - 2; // ignore predicate operands
1660 // Disassemble register def.
1661 MI.addOperand(MCOperand::CreateReg(getRegisterEnum(B, ARM::GPRRegClassID,
1662 decodeRd(insn))));
1664 unsigned Pos = slice(insn, 20, 16);
1665 if (Opcode == ARM::SSAT || Opcode == ARM::SSAT16)
1666 Pos += 1;
1667 MI.addOperand(MCOperand::CreateImm(Pos));
1669 MI.addOperand(MCOperand::CreateReg(getRegisterEnum(B, ARM::GPRRegClassID,
1670 decodeRm(insn))));
1672 if (NumOpsAdded == 4) {
1673 ARM_AM::ShiftOpc Opc = (slice(insn, 6, 6) != 0 ? ARM_AM::asr : ARM_AM::lsl);
1674 // Inst{11-7} encodes the imm5 shift amount.
1675 unsigned ShAmt = slice(insn, 11, 7);
1676 if (ShAmt == 0) {
1677 // A8.6.183. Possible ASR shift amount of 32...
1678 if (Opc == ARM_AM::asr)
1679 ShAmt = 32;
1680 else
1681 Opc = ARM_AM::no_shift;
1683 MI.addOperand(MCOperand::CreateImm(ARM_AM::getSORegOpc(Opc, ShAmt)));
1685 return true;
1688 // Extend instructions.
1689 // SXT* and UXT*: Rd [Rn] Rm [rot_imm].
1690 // The 2nd operand register is Rn and the 3rd operand regsiter is Rm for the
1691 // three register operand form. Otherwise, Rn=0b1111 and only Rm is used.
1692 static bool DisassembleExtFrm(MCInst &MI, unsigned Opcode, uint32_t insn,
1693 unsigned short NumOps, unsigned &NumOpsAdded, BO B) {
1695 // A8.6.220 SXTAB
1696 // if d == 15 || m == 15 then UNPREDICTABLE;
1697 if (decodeRd(insn) == 15 || decodeRm(insn) == 15)
1698 return false;
1700 const MCOperandInfo *OpInfo = ARMInsts[Opcode].OpInfo;
1701 unsigned &OpIdx = NumOpsAdded;
1703 OpIdx = 0;
1705 assert(NumOps >= 2
1706 && OpInfo[0].RegClass == ARM::GPRRegClassID
1707 && OpInfo[1].RegClass == ARM::GPRRegClassID
1708 && "Expect 2 reg operands");
1710 bool ThreeReg = NumOps > 2 && OpInfo[2].RegClass == ARM::GPRRegClassID;
1712 MI.addOperand(MCOperand::CreateReg(getRegisterEnum(B, ARM::GPRRegClassID,
1713 decodeRd(insn))));
1714 ++OpIdx;
1716 if (ThreeReg) {
1717 MI.addOperand(MCOperand::CreateReg(getRegisterEnum(B, ARM::GPRRegClassID,
1718 decodeRn(insn))));
1719 ++OpIdx;
1722 MI.addOperand(MCOperand::CreateReg(getRegisterEnum(B, ARM::GPRRegClassID,
1723 decodeRm(insn))));
1724 ++OpIdx;
1726 // If there is still an operand info left which is an immediate operand, add
1727 // an additional rotate immediate operand.
1728 if (OpIdx < NumOps && OpInfo[OpIdx].RegClass < 0
1729 && !OpInfo[OpIdx].isPredicate() && !OpInfo[OpIdx].isOptionalDef()) {
1730 // Extract the 2-bit rotate field Inst{11-10}.
1731 unsigned rot = (insn >> ARMII::ExtRotImmShift) & 3;
1732 // Rotation by 8, 16, or 24 bits.
1733 MI.addOperand(MCOperand::CreateImm(rot << 3));
1734 ++OpIdx;
1737 return true;
1740 /////////////////////////////////////
1741 // //
1742 // Utility Functions For VFP //
1743 // //
1744 /////////////////////////////////////
1746 // Extract/Decode Dd/Sd:
1748 // SP => d = UInt(Vd:D)
1749 // DP => d = UInt(D:Vd)
1750 static unsigned decodeVFPRd(uint32_t insn, bool isSPVFP) {
1751 return isSPVFP ? (decodeRd(insn) << 1 | getDBit(insn))
1752 : (decodeRd(insn) | getDBit(insn) << 4);
1755 // Extract/Decode Dn/Sn:
1757 // SP => n = UInt(Vn:N)
1758 // DP => n = UInt(N:Vn)
1759 static unsigned decodeVFPRn(uint32_t insn, bool isSPVFP) {
1760 return isSPVFP ? (decodeRn(insn) << 1 | getNBit(insn))
1761 : (decodeRn(insn) | getNBit(insn) << 4);
1764 // Extract/Decode Dm/Sm:
1766 // SP => m = UInt(Vm:M)
1767 // DP => m = UInt(M:Vm)
1768 static unsigned decodeVFPRm(uint32_t insn, bool isSPVFP) {
1769 return isSPVFP ? (decodeRm(insn) << 1 | getMBit(insn))
1770 : (decodeRm(insn) | getMBit(insn) << 4);
1773 // A7.5.1
1774 static APInt VFPExpandImm(unsigned char byte, unsigned N) {
1775 assert(N == 32 || N == 64);
1777 uint64_t Result;
1778 unsigned bit6 = slice(byte, 6, 6);
1779 if (N == 32) {
1780 Result = slice(byte, 7, 7) << 31 | slice(byte, 5, 0) << 19;
1781 if (bit6)
1782 Result |= 0x1f << 25;
1783 else
1784 Result |= 0x1 << 30;
1785 } else {
1786 Result = (uint64_t)slice(byte, 7, 7) << 63 |
1787 (uint64_t)slice(byte, 5, 0) << 48;
1788 if (bit6)
1789 Result |= 0xffULL << 54;
1790 else
1791 Result |= 0x1ULL << 62;
1793 return APInt(N, Result);
1796 // VFP Unary Format Instructions:
1798 // VCMP[E]ZD, VCMP[E]ZS: compares one floating-point register with zero
1799 // VCVTDS, VCVTSD: converts between double-precision and single-precision
1800 // The rest of the instructions have homogeneous [VFP]Rd and [VFP]Rm registers.
1801 static bool DisassembleVFPUnaryFrm(MCInst &MI, unsigned Opcode, uint32_t insn,
1802 unsigned short NumOps, unsigned &NumOpsAdded, BO B) {
1804 assert(NumOps >= 1 && "VFPUnaryFrm expects NumOps >= 1");
1806 const MCOperandInfo *OpInfo = ARMInsts[Opcode].OpInfo;
1807 unsigned &OpIdx = NumOpsAdded;
1809 OpIdx = 0;
1811 unsigned RegClass = OpInfo[OpIdx].RegClass;
1812 assert((RegClass == ARM::SPRRegClassID || RegClass == ARM::DPRRegClassID) &&
1813 "Reg operand expected");
1814 bool isSP = (RegClass == ARM::SPRRegClassID);
1816 MI.addOperand(MCOperand::CreateReg(
1817 getRegisterEnum(B, RegClass, decodeVFPRd(insn, isSP))));
1818 ++OpIdx;
1820 // Early return for compare with zero instructions.
1821 if (Opcode == ARM::VCMPEZD || Opcode == ARM::VCMPEZS
1822 || Opcode == ARM::VCMPZD || Opcode == ARM::VCMPZS)
1823 return true;
1825 RegClass = OpInfo[OpIdx].RegClass;
1826 assert((RegClass == ARM::SPRRegClassID || RegClass == ARM::DPRRegClassID) &&
1827 "Reg operand expected");
1828 isSP = (RegClass == ARM::SPRRegClassID);
1830 MI.addOperand(MCOperand::CreateReg(
1831 getRegisterEnum(B, RegClass, decodeVFPRm(insn, isSP))));
1832 ++OpIdx;
1834 return true;
1837 // All the instructions have homogeneous [VFP]Rd, [VFP]Rn, and [VFP]Rm regs.
1838 // Some of them have operand constraints which tie the first operand in the
1839 // InOperandList to that of the dst. As far as asm printing is concerned, this
1840 // tied_to operand is simply skipped.
1841 static bool DisassembleVFPBinaryFrm(MCInst &MI, unsigned Opcode, uint32_t insn,
1842 unsigned short NumOps, unsigned &NumOpsAdded, BO B) {
1844 assert(NumOps >= 3 && "VFPBinaryFrm expects NumOps >= 3");
1846 const MCInstrDesc &MCID = ARMInsts[Opcode];
1847 const MCOperandInfo *OpInfo = MCID.OpInfo;
1848 unsigned &OpIdx = NumOpsAdded;
1850 OpIdx = 0;
1852 unsigned RegClass = OpInfo[OpIdx].RegClass;
1853 assert((RegClass == ARM::SPRRegClassID || RegClass == ARM::DPRRegClassID) &&
1854 "Reg operand expected");
1855 bool isSP = (RegClass == ARM::SPRRegClassID);
1857 MI.addOperand(MCOperand::CreateReg(
1858 getRegisterEnum(B, RegClass, decodeVFPRd(insn, isSP))));
1859 ++OpIdx;
1861 // Skip tied_to operand constraint.
1862 if (MCID.getOperandConstraint(OpIdx, MCOI::TIED_TO) != -1) {
1863 assert(NumOps >= 4 && "Expect >=4 operands");
1864 MI.addOperand(MCOperand::CreateReg(0));
1865 ++OpIdx;
1868 MI.addOperand(MCOperand::CreateReg(
1869 getRegisterEnum(B, RegClass, decodeVFPRn(insn, isSP))));
1870 ++OpIdx;
1872 MI.addOperand(MCOperand::CreateReg(
1873 getRegisterEnum(B, RegClass, decodeVFPRm(insn, isSP))));
1874 ++OpIdx;
1876 return true;
1879 // A8.6.295 vcvt (floating-point <-> integer)
1880 // Int to FP: VSITOD, VSITOS, VUITOD, VUITOS
1881 // FP to Int: VTOSI[Z|R]D, VTOSI[Z|R]S, VTOUI[Z|R]D, VTOUI[Z|R]S
1883 // A8.6.297 vcvt (floating-point and fixed-point)
1884 // Dd|Sd Dd|Sd(TIED_TO) #fbits(= 16|32 - UInt(imm4:i))
1885 static bool DisassembleVFPConv1Frm(MCInst &MI, unsigned Opcode, uint32_t insn,
1886 unsigned short NumOps, unsigned &NumOpsAdded, BO B) {
1888 assert(NumOps >= 2 && "VFPConv1Frm expects NumOps >= 2");
1890 const MCInstrDesc &MCID = ARMInsts[Opcode];
1891 const MCOperandInfo *OpInfo = MCID.OpInfo;
1892 if (!OpInfo) return false;
1894 bool SP = slice(insn, 8, 8) == 0; // A8.6.295 & A8.6.297
1895 bool fixed_point = slice(insn, 17, 17) == 1; // A8.6.297
1896 unsigned RegClassID = SP ? ARM::SPRRegClassID : ARM::DPRRegClassID;
1898 if (fixed_point) {
1899 // A8.6.297
1900 assert(NumOps >= 3 && "Expect >= 3 operands");
1901 int size = slice(insn, 7, 7) == 0 ? 16 : 32;
1902 int fbits = size - (slice(insn,3,0) << 1 | slice(insn,5,5));
1903 MI.addOperand(MCOperand::CreateReg(
1904 getRegisterEnum(B, RegClassID,
1905 decodeVFPRd(insn, SP))));
1907 assert(MCID.getOperandConstraint(1, MCOI::TIED_TO) != -1 &&
1908 "Tied to operand expected");
1909 MI.addOperand(MI.getOperand(0));
1911 assert(OpInfo[2].RegClass < 0 && !OpInfo[2].isPredicate() &&
1912 !OpInfo[2].isOptionalDef() && "Imm operand expected");
1913 MI.addOperand(MCOperand::CreateImm(fbits));
1915 NumOpsAdded = 3;
1916 } else {
1917 // A8.6.295
1918 // The Rd (destination) and Rm (source) bits have different interpretations
1919 // depending on their single-precisonness.
1920 unsigned d, m;
1921 if (slice(insn, 18, 18) == 1) { // to_integer operation
1922 d = decodeVFPRd(insn, true /* Is Single Precision */);
1923 MI.addOperand(MCOperand::CreateReg(
1924 getRegisterEnum(B, ARM::SPRRegClassID, d)));
1925 m = decodeVFPRm(insn, SP);
1926 MI.addOperand(MCOperand::CreateReg(getRegisterEnum(B, RegClassID, m)));
1927 } else {
1928 d = decodeVFPRd(insn, SP);
1929 MI.addOperand(MCOperand::CreateReg(getRegisterEnum(B, RegClassID, d)));
1930 m = decodeVFPRm(insn, true /* Is Single Precision */);
1931 MI.addOperand(MCOperand::CreateReg(
1932 getRegisterEnum(B, ARM::SPRRegClassID, m)));
1934 NumOpsAdded = 2;
1937 return true;
1940 // VMOVRS - A8.6.330
1941 // Rt => Rd; Sn => UInt(Vn:N)
1942 static bool DisassembleVFPConv2Frm(MCInst &MI, unsigned Opcode, uint32_t insn,
1943 unsigned short NumOps, unsigned &NumOpsAdded, BO B) {
1945 assert(NumOps >= 2 && "VFPConv2Frm expects NumOps >= 2");
1947 MI.addOperand(MCOperand::CreateReg(getRegisterEnum(B, ARM::GPRRegClassID,
1948 decodeRd(insn))));
1949 MI.addOperand(MCOperand::CreateReg(getRegisterEnum(B, ARM::SPRRegClassID,
1950 decodeVFPRn(insn, true))));
1951 NumOpsAdded = 2;
1952 return true;
1955 // VMOVRRD - A8.6.332
1956 // Rt => Rd; Rt2 => Rn; Dm => UInt(M:Vm)
1958 // VMOVRRS - A8.6.331
1959 // Rt => Rd; Rt2 => Rn; Sm => UInt(Vm:M); Sm1 = Sm+1
1960 static bool DisassembleVFPConv3Frm(MCInst &MI, unsigned Opcode, uint32_t insn,
1961 unsigned short NumOps, unsigned &NumOpsAdded, BO B) {
1963 assert(NumOps >= 3 && "VFPConv3Frm expects NumOps >= 3");
1965 const MCOperandInfo *OpInfo = ARMInsts[Opcode].OpInfo;
1966 unsigned &OpIdx = NumOpsAdded;
1968 MI.addOperand(MCOperand::CreateReg(getRegisterEnum(B, ARM::GPRRegClassID,
1969 decodeRd(insn))));
1970 MI.addOperand(MCOperand::CreateReg(getRegisterEnum(B, ARM::GPRRegClassID,
1971 decodeRn(insn))));
1972 OpIdx = 2;
1974 if (OpInfo[OpIdx].RegClass == ARM::SPRRegClassID) {
1975 unsigned Sm = decodeVFPRm(insn, true);
1976 MI.addOperand(MCOperand::CreateReg(getRegisterEnum(B, ARM::SPRRegClassID,
1977 Sm)));
1978 MI.addOperand(MCOperand::CreateReg(getRegisterEnum(B, ARM::SPRRegClassID,
1979 Sm+1)));
1980 OpIdx += 2;
1981 } else {
1982 MI.addOperand(MCOperand::CreateReg(
1983 getRegisterEnum(B, ARM::DPRRegClassID,
1984 decodeVFPRm(insn, false))));
1985 ++OpIdx;
1987 return true;
1990 // VMOVSR - A8.6.330
1991 // Rt => Rd; Sn => UInt(Vn:N)
1992 static bool DisassembleVFPConv4Frm(MCInst &MI, unsigned Opcode, uint32_t insn,
1993 unsigned short NumOps, unsigned &NumOpsAdded, BO B) {
1995 assert(NumOps >= 2 && "VFPConv4Frm expects NumOps >= 2");
1997 MI.addOperand(MCOperand::CreateReg(getRegisterEnum(B, ARM::SPRRegClassID,
1998 decodeVFPRn(insn, true))));
1999 MI.addOperand(MCOperand::CreateReg(getRegisterEnum(B, ARM::GPRRegClassID,
2000 decodeRd(insn))));
2001 NumOpsAdded = 2;
2002 return true;
2005 // VMOVDRR - A8.6.332
2006 // Rt => Rd; Rt2 => Rn; Dm => UInt(M:Vm)
2008 // VMOVRRS - A8.6.331
2009 // Rt => Rd; Rt2 => Rn; Sm => UInt(Vm:M); Sm1 = Sm+1
2010 static bool DisassembleVFPConv5Frm(MCInst &MI, unsigned Opcode, uint32_t insn,
2011 unsigned short NumOps, unsigned &NumOpsAdded, BO B) {
2013 assert(NumOps >= 3 && "VFPConv5Frm expects NumOps >= 3");
2015 const MCOperandInfo *OpInfo = ARMInsts[Opcode].OpInfo;
2016 unsigned &OpIdx = NumOpsAdded;
2018 OpIdx = 0;
2020 if (OpInfo[OpIdx].RegClass == ARM::SPRRegClassID) {
2021 unsigned Sm = decodeVFPRm(insn, true);
2022 MI.addOperand(MCOperand::CreateReg(getRegisterEnum(B, ARM::SPRRegClassID,
2023 Sm)));
2024 MI.addOperand(MCOperand::CreateReg(getRegisterEnum(B, ARM::SPRRegClassID,
2025 Sm+1)));
2026 OpIdx += 2;
2027 } else {
2028 MI.addOperand(MCOperand::CreateReg(
2029 getRegisterEnum(B, ARM::DPRRegClassID,
2030 decodeVFPRm(insn, false))));
2031 ++OpIdx;
2034 MI.addOperand(MCOperand::CreateReg(getRegisterEnum(B, ARM::GPRRegClassID,
2035 decodeRd(insn))));
2036 MI.addOperand(MCOperand::CreateReg(getRegisterEnum(B, ARM::GPRRegClassID,
2037 decodeRn(insn))));
2038 OpIdx += 2;
2039 return true;
2042 // VFP Load/Store Instructions.
2043 // VLDRD, VLDRS, VSTRD, VSTRS
2044 static bool DisassembleVFPLdStFrm(MCInst &MI, unsigned Opcode, uint32_t insn,
2045 unsigned short NumOps, unsigned &NumOpsAdded, BO B) {
2047 assert(NumOps >= 3 && "VFPLdStFrm expects NumOps >= 3");
2049 bool isSPVFP = (Opcode == ARM::VLDRS || Opcode == ARM::VSTRS);
2050 unsigned RegClassID = isSPVFP ? ARM::SPRRegClassID : ARM::DPRRegClassID;
2052 // Extract Dd/Sd for operand 0.
2053 unsigned RegD = decodeVFPRd(insn, isSPVFP);
2055 MI.addOperand(MCOperand::CreateReg(getRegisterEnum(B, RegClassID, RegD)));
2057 unsigned Base = getRegisterEnum(B, ARM::GPRRegClassID, decodeRn(insn));
2058 MI.addOperand(MCOperand::CreateReg(Base));
2060 // Next comes the AM5 Opcode.
2061 ARM_AM::AddrOpc AddrOpcode = getUBit(insn) ? ARM_AM::add : ARM_AM::sub;
2062 unsigned char Imm8 = insn & 0xFF;
2063 MI.addOperand(MCOperand::CreateImm(ARM_AM::getAM5Opc(AddrOpcode, Imm8)));
2065 NumOpsAdded = 3;
2067 return true;
2070 // VFP Load/Store Multiple Instructions.
2071 // We have an optional write back reg, the base, and two predicate operands.
2072 // It is then followed by a reglist of either DPR(s) or SPR(s).
2074 // VLDMD[_UPD], VLDMS[_UPD], VSTMD[_UPD], VSTMS[_UPD]
2075 static bool DisassembleVFPLdStMulFrm(MCInst &MI, unsigned Opcode, uint32_t insn,
2076 unsigned short NumOps, unsigned &NumOpsAdded, BO B) {
2078 assert(NumOps >= 4 && "VFPLdStMulFrm expects NumOps >= 4");
2080 unsigned &OpIdx = NumOpsAdded;
2082 OpIdx = 0;
2084 unsigned Base = getRegisterEnum(B, ARM::GPRRegClassID, decodeRn(insn));
2086 // Writeback to base, if necessary.
2087 if (Opcode == ARM::VLDMDIA_UPD || Opcode == ARM::VLDMSIA_UPD ||
2088 Opcode == ARM::VLDMDDB_UPD || Opcode == ARM::VLDMSDB_UPD ||
2089 Opcode == ARM::VSTMDIA_UPD || Opcode == ARM::VSTMSIA_UPD ||
2090 Opcode == ARM::VSTMDDB_UPD || Opcode == ARM::VSTMSDB_UPD) {
2091 MI.addOperand(MCOperand::CreateReg(Base));
2092 ++OpIdx;
2095 MI.addOperand(MCOperand::CreateReg(Base));
2097 // Handling the two predicate operands before the reglist.
2098 int64_t CondVal = getCondField(insn);
2099 if (CondVal == 0xF)
2100 return false;
2101 MI.addOperand(MCOperand::CreateImm(CondVal));
2102 MI.addOperand(MCOperand::CreateReg(ARM::CPSR));
2104 OpIdx += 3;
2106 bool isSPVFP = (Opcode == ARM::VLDMSIA ||
2107 Opcode == ARM::VLDMSIA_UPD || Opcode == ARM::VLDMSDB_UPD ||
2108 Opcode == ARM::VSTMSIA ||
2109 Opcode == ARM::VSTMSIA_UPD || Opcode == ARM::VSTMSDB_UPD);
2110 unsigned RegClassID = isSPVFP ? ARM::SPRRegClassID : ARM::DPRRegClassID;
2112 // Extract Dd/Sd.
2113 unsigned RegD = decodeVFPRd(insn, isSPVFP);
2115 // Fill the variadic part of reglist.
2116 unsigned char Imm8 = insn & 0xFF;
2117 unsigned Regs = isSPVFP ? Imm8 : Imm8/2;
2119 // Apply some sanity checks before proceeding.
2120 if (Regs == 0 || (RegD + Regs) > 32 || (!isSPVFP && Regs > 16))
2121 return false;
2123 for (unsigned i = 0; i < Regs; ++i) {
2124 MI.addOperand(MCOperand::CreateReg(getRegisterEnum(B, RegClassID,
2125 RegD + i)));
2126 ++OpIdx;
2129 return true;
2132 // Misc. VFP Instructions.
2133 // FMSTAT (vmrs with Rt=0b1111, i.e., to apsr_nzcv and no register operand)
2134 // FCONSTD (DPR and a VFPf64Imm operand)
2135 // FCONSTS (SPR and a VFPf32Imm operand)
2136 // VMRS/VMSR (GPR operand)
2137 static bool DisassembleVFPMiscFrm(MCInst &MI, unsigned Opcode, uint32_t insn,
2138 unsigned short NumOps, unsigned &NumOpsAdded, BO B) {
2140 const MCOperandInfo *OpInfo = ARMInsts[Opcode].OpInfo;
2141 unsigned &OpIdx = NumOpsAdded;
2143 OpIdx = 0;
2145 if (Opcode == ARM::FMSTAT)
2146 return true;
2148 assert(NumOps >= 2 && "VFPMiscFrm expects >=2 operands");
2150 unsigned RegEnum = 0;
2151 switch (OpInfo[0].RegClass) {
2152 case ARM::DPRRegClassID:
2153 RegEnum = getRegisterEnum(B, ARM::DPRRegClassID, decodeVFPRd(insn, false));
2154 break;
2155 case ARM::SPRRegClassID:
2156 RegEnum = getRegisterEnum(B, ARM::SPRRegClassID, decodeVFPRd(insn, true));
2157 break;
2158 case ARM::GPRRegClassID:
2159 RegEnum = getRegisterEnum(B, ARM::GPRRegClassID, decodeRd(insn));
2160 break;
2161 default:
2162 assert(0 && "Invalid reg class id");
2163 return false;
2166 MI.addOperand(MCOperand::CreateReg(RegEnum));
2167 ++OpIdx;
2169 // Extract/decode the f64/f32 immediate.
2170 if (OpIdx < NumOps && OpInfo[OpIdx].RegClass < 0
2171 && !OpInfo[OpIdx].isPredicate() && !OpInfo[OpIdx].isOptionalDef()) {
2172 // The asm syntax specifies the floating point value, not the 8-bit literal.
2173 APInt immRaw = VFPExpandImm(slice(insn,19,16) << 4 | slice(insn, 3, 0),
2174 Opcode == ARM::FCONSTD ? 64 : 32);
2175 APFloat immFP = APFloat(immRaw, true);
2176 double imm = Opcode == ARM::FCONSTD ? immFP.convertToDouble() :
2177 immFP.convertToFloat();
2178 MI.addOperand(MCOperand::CreateFPImm(imm));
2180 ++OpIdx;
2183 return true;
2186 // DisassembleThumbFrm() is defined in ThumbDisassemblerCore.h file.
2187 #include "ThumbDisassemblerCore.h"
2189 /////////////////////////////////////////////////////
2190 // //
2191 // Utility Functions For ARM Advanced SIMD //
2192 // //
2193 /////////////////////////////////////////////////////
2195 // The following NEON namings are based on A8.6.266 VABA, VABAL. Notice that
2196 // A8.6.303 VDUP (ARM core register)'s D/Vd pair is the N/Vn pair of VABA/VABAL.
2198 // A7.3 Register encoding
2200 // Extract/Decode NEON D/Vd:
2202 // Note that for quadword, Qd = UInt(D:Vd<3:1>) = Inst{22:15-13}, whereas for
2203 // doubleword, Dd = UInt(D:Vd). We compensate for this difference by
2204 // handling it in the getRegisterEnum() utility function.
2205 // D = Inst{22}, Vd = Inst{15-12}
2206 static unsigned decodeNEONRd(uint32_t insn) {
2207 return ((insn >> ARMII::NEON_D_BitShift) & 1) << 4
2208 | ((insn >> ARMII::NEON_RegRdShift) & ARMII::NEONRegMask);
2211 // Extract/Decode NEON N/Vn:
2213 // Note that for quadword, Qn = UInt(N:Vn<3:1>) = Inst{7:19-17}, whereas for
2214 // doubleword, Dn = UInt(N:Vn). We compensate for this difference by
2215 // handling it in the getRegisterEnum() utility function.
2216 // N = Inst{7}, Vn = Inst{19-16}
2217 static unsigned decodeNEONRn(uint32_t insn) {
2218 return ((insn >> ARMII::NEON_N_BitShift) & 1) << 4
2219 | ((insn >> ARMII::NEON_RegRnShift) & ARMII::NEONRegMask);
2222 // Extract/Decode NEON M/Vm:
2224 // Note that for quadword, Qm = UInt(M:Vm<3:1>) = Inst{5:3-1}, whereas for
2225 // doubleword, Dm = UInt(M:Vm). We compensate for this difference by
2226 // handling it in the getRegisterEnum() utility function.
2227 // M = Inst{5}, Vm = Inst{3-0}
2228 static unsigned decodeNEONRm(uint32_t insn) {
2229 return ((insn >> ARMII::NEON_M_BitShift) & 1) << 4
2230 | ((insn >> ARMII::NEON_RegRmShift) & ARMII::NEONRegMask);
2233 namespace {
2234 enum ElemSize {
2235 ESizeNA = 0,
2236 ESize8 = 8,
2237 ESize16 = 16,
2238 ESize32 = 32,
2239 ESize64 = 64
2241 } // End of unnamed namespace
2243 // size field -> Inst{11-10}
2244 // index_align field -> Inst{7-4}
2246 // The Lane Index interpretation depends on the Data Size:
2247 // 8 (encoded as size = 0b00) -> Index = index_align[3:1]
2248 // 16 (encoded as size = 0b01) -> Index = index_align[3:2]
2249 // 32 (encoded as size = 0b10) -> Index = index_align[3]
2251 // Ref: A8.6.317 VLD4 (single 4-element structure to one lane).
2252 static unsigned decodeLaneIndex(uint32_t insn) {
2253 unsigned size = insn >> 10 & 3;
2254 assert((size == 0 || size == 1 || size == 2) &&
2255 "Encoding error: size should be either 0, 1, or 2");
2257 unsigned index_align = insn >> 4 & 0xF;
2258 return (index_align >> 1) >> size;
2261 // imm64 = AdvSIMDExpandImm(op, cmode, i:imm3:imm4)
2262 // op = Inst{5}, cmode = Inst{11-8}
2263 // i = Inst{24} (ARM architecture)
2264 // imm3 = Inst{18-16}, imm4 = Inst{3-0}
2265 // Ref: Table A7-15 Modified immediate values for Advanced SIMD instructions.
2266 static uint64_t decodeN1VImm(uint32_t insn, ElemSize esize) {
2267 unsigned char op = (insn >> 5) & 1;
2268 unsigned char cmode = (insn >> 8) & 0xF;
2269 unsigned char Imm8 = ((insn >> 24) & 1) << 7 |
2270 ((insn >> 16) & 7) << 4 |
2271 (insn & 0xF);
2272 return (op << 12) | (cmode << 8) | Imm8;
2275 // A8.6.339 VMUL, VMULL (by scalar)
2276 // ESize16 => m = Inst{2-0} (Vm<2:0>) D0-D7
2277 // ESize32 => m = Inst{3-0} (Vm<3:0>) D0-D15
2278 static unsigned decodeRestrictedDm(uint32_t insn, ElemSize esize) {
2279 switch (esize) {
2280 case ESize16:
2281 return insn & 7;
2282 case ESize32:
2283 return insn & 0xF;
2284 default:
2285 assert(0 && "Unreachable code!");
2286 return 0;
2290 // A8.6.339 VMUL, VMULL (by scalar)
2291 // ESize16 => index = Inst{5:3} (M:Vm<3>) D0-D7
2292 // ESize32 => index = Inst{5} (M) D0-D15
2293 static unsigned decodeRestrictedDmIndex(uint32_t insn, ElemSize esize) {
2294 switch (esize) {
2295 case ESize16:
2296 return (((insn >> 5) & 1) << 1) | ((insn >> 3) & 1);
2297 case ESize32:
2298 return (insn >> 5) & 1;
2299 default:
2300 assert(0 && "Unreachable code!");
2301 return 0;
2305 // A8.6.296 VCVT (between floating-point and fixed-point, Advanced SIMD)
2306 // (64 - <fbits>) is encoded as imm6, i.e., Inst{21-16}.
2307 static unsigned decodeVCVTFractionBits(uint32_t insn) {
2308 return 64 - ((insn >> 16) & 0x3F);
2311 // A8.6.302 VDUP (scalar)
2312 // ESize8 => index = Inst{19-17}
2313 // ESize16 => index = Inst{19-18}
2314 // ESize32 => index = Inst{19}
2315 static unsigned decodeNVLaneDupIndex(uint32_t insn, ElemSize esize) {
2316 switch (esize) {
2317 case ESize8:
2318 return (insn >> 17) & 7;
2319 case ESize16:
2320 return (insn >> 18) & 3;
2321 case ESize32:
2322 return (insn >> 19) & 1;
2323 default:
2324 assert(0 && "Unspecified element size!");
2325 return 0;
2329 // A8.6.328 VMOV (ARM core register to scalar)
2330 // A8.6.329 VMOV (scalar to ARM core register)
2331 // ESize8 => index = Inst{21:6-5}
2332 // ESize16 => index = Inst{21:6}
2333 // ESize32 => index = Inst{21}
2334 static unsigned decodeNVLaneOpIndex(uint32_t insn, ElemSize esize) {
2335 switch (esize) {
2336 case ESize8:
2337 return ((insn >> 21) & 1) << 2 | ((insn >> 5) & 3);
2338 case ESize16:
2339 return ((insn >> 21) & 1) << 1 | ((insn >> 6) & 1);
2340 case ESize32:
2341 return ((insn >> 21) & 1);
2342 default:
2343 assert(0 && "Unspecified element size!");
2344 return 0;
2348 // Imm6 = Inst{21-16}, L = Inst{7}
2350 // LeftShift == true (A8.6.367 VQSHL, A8.6.387 VSLI):
2351 // case L:imm6 of
2352 // '0001xxx' => esize = 8; shift_amount = imm6 - 8
2353 // '001xxxx' => esize = 16; shift_amount = imm6 - 16
2354 // '01xxxxx' => esize = 32; shift_amount = imm6 - 32
2355 // '1xxxxxx' => esize = 64; shift_amount = imm6
2357 // LeftShift == false (A8.6.376 VRSHR, A8.6.368 VQSHRN):
2358 // case L:imm6 of
2359 // '0001xxx' => esize = 8; shift_amount = 16 - imm6
2360 // '001xxxx' => esize = 16; shift_amount = 32 - imm6
2361 // '01xxxxx' => esize = 32; shift_amount = 64 - imm6
2362 // '1xxxxxx' => esize = 64; shift_amount = 64 - imm6
2364 static unsigned decodeNVSAmt(uint32_t insn, bool LeftShift) {
2365 ElemSize esize = ESizeNA;
2366 unsigned L = (insn >> 7) & 1;
2367 unsigned imm6 = (insn >> 16) & 0x3F;
2368 if (L == 0) {
2369 if (imm6 >> 3 == 1)
2370 esize = ESize8;
2371 else if (imm6 >> 4 == 1)
2372 esize = ESize16;
2373 else if (imm6 >> 5 == 1)
2374 esize = ESize32;
2375 else
2376 assert(0 && "Wrong encoding of Inst{7:21-16}!");
2377 } else
2378 esize = ESize64;
2380 if (LeftShift)
2381 return esize == ESize64 ? imm6 : (imm6 - esize);
2382 else
2383 return esize == ESize64 ? (esize - imm6) : (2*esize - imm6);
2386 // A8.6.305 VEXT
2387 // Imm4 = Inst{11-8}
2388 static unsigned decodeN3VImm(uint32_t insn) {
2389 return (insn >> 8) & 0xF;
2392 // VLD*
2393 // D[d] D[d2] ... Rn [TIED_TO Rn] align [Rm]
2394 // VLD*LN*
2395 // D[d] D[d2] ... Rn [TIED_TO Rn] align [Rm] TIED_TO ... imm(idx)
2396 // VST*
2397 // Rn [TIED_TO Rn] align [Rm] D[d] D[d2] ...
2398 // VST*LN*
2399 // Rn [TIED_TO Rn] align [Rm] D[d] D[d2] ... [imm(idx)]
2401 // Correctly set VLD*/VST*'s TIED_TO GPR, as the asm printer needs it.
2402 static bool DisassembleNLdSt0(MCInst &MI, unsigned Opcode, uint32_t insn,
2403 unsigned short NumOps, unsigned &NumOpsAdded, bool Store, bool DblSpaced,
2404 unsigned alignment, BO B) {
2406 const MCInstrDesc &MCID = ARMInsts[Opcode];
2407 const MCOperandInfo *OpInfo = MCID.OpInfo;
2409 // At least one DPR register plus addressing mode #6.
2410 assert(NumOps >= 3 && "Expect >= 3 operands");
2412 unsigned &OpIdx = NumOpsAdded;
2414 OpIdx = 0;
2416 // We have homogeneous NEON registers for Load/Store.
2417 unsigned RegClass = 0;
2419 // Double-spaced registers have increments of 2.
2420 unsigned Inc = DblSpaced ? 2 : 1;
2422 unsigned Rn = decodeRn(insn);
2423 unsigned Rm = decodeRm(insn);
2424 unsigned Rd = decodeNEONRd(insn);
2426 // A7.7.1 Advanced SIMD addressing mode.
2427 bool WB = Rm != 15;
2429 // LLVM Addressing Mode #6.
2430 unsigned RmEnum = 0;
2431 if (WB && Rm != 13)
2432 RmEnum = getRegisterEnum(B, ARM::GPRRegClassID, Rm);
2434 if (Store) {
2435 // Consume possible WB, AddrMode6, possible increment reg, the DPR/QPR's,
2436 // then possible lane index.
2437 assert(OpIdx < NumOps && OpInfo[0].RegClass == ARM::GPRRegClassID &&
2438 "Reg operand expected");
2440 if (WB) {
2441 MI.addOperand(MCOperand::CreateReg(getRegisterEnum(B, ARM::GPRRegClassID,
2442 Rn)));
2443 ++OpIdx;
2446 assert((OpIdx+1) < NumOps && OpInfo[OpIdx].RegClass == ARM::GPRRegClassID &&
2447 OpInfo[OpIdx + 1].RegClass < 0 && "Addrmode #6 Operands expected");
2448 // addrmode6 := (ops GPR:$addr, i32imm)
2449 MI.addOperand(MCOperand::CreateReg(getRegisterEnum(B, ARM::GPRRegClassID,
2450 Rn)));
2451 MI.addOperand(MCOperand::CreateImm(alignment)); // Alignment
2452 OpIdx += 2;
2454 if (WB) {
2455 MI.addOperand(MCOperand::CreateReg(RmEnum));
2456 ++OpIdx;
2459 assert(OpIdx < NumOps &&
2460 (OpInfo[OpIdx].RegClass == ARM::DPRRegClassID ||
2461 OpInfo[OpIdx].RegClass == ARM::QPRRegClassID) &&
2462 "Reg operand expected");
2464 RegClass = OpInfo[OpIdx].RegClass;
2465 while (OpIdx < NumOps && (unsigned)OpInfo[OpIdx].RegClass == RegClass) {
2466 MI.addOperand(MCOperand::CreateReg(
2467 getRegisterEnum(B, RegClass, Rd)));
2468 Rd += Inc;
2469 ++OpIdx;
2472 // Handle possible lane index.
2473 if (OpIdx < NumOps && OpInfo[OpIdx].RegClass < 0
2474 && !OpInfo[OpIdx].isPredicate() && !OpInfo[OpIdx].isOptionalDef()) {
2475 MI.addOperand(MCOperand::CreateImm(decodeLaneIndex(insn)));
2476 ++OpIdx;
2479 } else {
2480 // Consume the DPR/QPR's, possible WB, AddrMode6, possible incrment reg,
2481 // possible TIED_TO DPR/QPR's (ignored), then possible lane index.
2482 RegClass = OpInfo[0].RegClass;
2484 while (OpIdx < NumOps && (unsigned)OpInfo[OpIdx].RegClass == RegClass) {
2485 MI.addOperand(MCOperand::CreateReg(
2486 getRegisterEnum(B, RegClass, Rd)));
2487 Rd += Inc;
2488 ++OpIdx;
2491 if (WB) {
2492 MI.addOperand(MCOperand::CreateReg(getRegisterEnum(B, ARM::GPRRegClassID,
2493 Rn)));
2494 ++OpIdx;
2497 assert((OpIdx+1) < NumOps && OpInfo[OpIdx].RegClass == ARM::GPRRegClassID &&
2498 OpInfo[OpIdx + 1].RegClass < 0 && "Addrmode #6 Operands expected");
2499 // addrmode6 := (ops GPR:$addr, i32imm)
2500 MI.addOperand(MCOperand::CreateReg(getRegisterEnum(B, ARM::GPRRegClassID,
2501 Rn)));
2502 MI.addOperand(MCOperand::CreateImm(alignment)); // Alignment
2503 OpIdx += 2;
2505 if (WB) {
2506 MI.addOperand(MCOperand::CreateReg(RmEnum));
2507 ++OpIdx;
2510 while (OpIdx < NumOps && (unsigned)OpInfo[OpIdx].RegClass == RegClass) {
2511 assert(MCID.getOperandConstraint(OpIdx, MCOI::TIED_TO) != -1 &&
2512 "Tied to operand expected");
2513 MI.addOperand(MCOperand::CreateReg(0));
2514 ++OpIdx;
2517 // Handle possible lane index.
2518 if (OpIdx < NumOps && OpInfo[OpIdx].RegClass < 0
2519 && !OpInfo[OpIdx].isPredicate() && !OpInfo[OpIdx].isOptionalDef()) {
2520 MI.addOperand(MCOperand::CreateImm(decodeLaneIndex(insn)));
2521 ++OpIdx;
2525 // Accessing registers past the end of the NEON register file is not
2526 // defined.
2527 if (Rd > 32)
2528 return false;
2530 return true;
2533 // A8.6.308, A8.6.311, A8.6.314, A8.6.317.
2534 static bool Align4OneLaneInst(unsigned elem, unsigned size,
2535 unsigned index_align, unsigned & alignment) {
2536 unsigned bits = 0;
2537 switch (elem) {
2538 default:
2539 return false;
2540 case 1:
2541 // A8.6.308
2542 if (size == 0)
2543 return slice(index_align, 0, 0) == 0;
2544 else if (size == 1) {
2545 bits = slice(index_align, 1, 0);
2546 if (bits != 0 && bits != 1)
2547 return false;
2548 if (bits == 1)
2549 alignment = 16;
2550 return true;
2551 } else if (size == 2) {
2552 bits = slice(index_align, 2, 0);
2553 if (bits != 0 && bits != 3)
2554 return false;
2555 if (bits == 3)
2556 alignment = 32;
2557 return true;;
2559 return true;
2560 case 2:
2561 // A8.6.311
2562 if (size == 0) {
2563 if (slice(index_align, 0, 0) == 1)
2564 alignment = 16;
2565 return true;
2566 } if (size == 1) {
2567 if (slice(index_align, 0, 0) == 1)
2568 alignment = 32;
2569 return true;
2570 } else if (size == 2) {
2571 if (slice(index_align, 1, 1) != 0)
2572 return false;
2573 if (slice(index_align, 0, 0) == 1)
2574 alignment = 64;
2575 return true;;
2577 return true;
2578 case 3:
2579 // A8.6.314
2580 if (size == 0) {
2581 if (slice(index_align, 0, 0) != 0)
2582 return false;
2583 return true;
2584 } if (size == 1) {
2585 if (slice(index_align, 0, 0) != 0)
2586 return false;
2587 return true;
2588 return true;
2589 } else if (size == 2) {
2590 if (slice(index_align, 1, 0) != 0)
2591 return false;
2592 return true;;
2594 return true;
2595 case 4:
2596 // A8.6.317
2597 if (size == 0) {
2598 if (slice(index_align, 0, 0) == 1)
2599 alignment = 32;
2600 return true;
2601 } if (size == 1) {
2602 if (slice(index_align, 0, 0) == 1)
2603 alignment = 64;
2604 return true;
2605 } else if (size == 2) {
2606 bits = slice(index_align, 1, 0);
2607 if (bits == 3)
2608 return false;
2609 if (bits == 1)
2610 alignment = 64;
2611 else if (bits == 2)
2612 alignment = 128;
2613 return true;;
2615 return true;
2619 // A7.7
2620 // If L (Inst{21}) == 0, store instructions.
2621 // Find out about double-spaced-ness of the Opcode and pass it on to
2622 // DisassembleNLdSt0().
2623 static bool DisassembleNLdSt(MCInst &MI, unsigned Opcode, uint32_t insn,
2624 unsigned short NumOps, unsigned &NumOpsAdded, BO B) {
2626 const StringRef Name = ARMInsts[Opcode].Name;
2627 bool DblSpaced = false;
2628 // 0 represents standard alignment, i.e., unaligned data access.
2629 unsigned alignment = 0;
2631 unsigned elem = 0; // legal values: {1, 2, 3, 4}
2632 if (Name.startswith("VST1") || Name.startswith("VLD1"))
2633 elem = 1;
2635 if (Name.startswith("VST2") || Name.startswith("VLD2"))
2636 elem = 2;
2638 if (Name.startswith("VST3") || Name.startswith("VLD3"))
2639 elem = 3;
2641 if (Name.startswith("VST4") || Name.startswith("VLD4"))
2642 elem = 4;
2644 if (Name.find("LN") != std::string::npos) {
2645 // To one lane instructions.
2646 // See, for example, 8.6.317 VLD4 (single 4-element structure to one lane).
2648 // Utility function takes number of elements, size, and index_align.
2649 if (!Align4OneLaneInst(elem,
2650 slice(insn, 11, 10),
2651 slice(insn, 7, 4),
2652 alignment))
2653 return false;
2655 // <size> == 16 && Inst{5} == 1 --> DblSpaced = true
2656 if (Name.endswith("16") || Name.endswith("16_UPD"))
2657 DblSpaced = slice(insn, 5, 5) == 1;
2659 // <size> == 32 && Inst{6} == 1 --> DblSpaced = true
2660 if (Name.endswith("32") || Name.endswith("32_UPD"))
2661 DblSpaced = slice(insn, 6, 6) == 1;
2662 } else if (Name.find("DUP") != std::string::npos) {
2663 // Single element (or structure) to all lanes.
2664 // Inst{9-8} encodes the number of element(s) in the structure, with:
2665 // 0b00 (VLD1DUP) (for this, a bit makes sense only for data size 16 and 32.
2666 // 0b01 (VLD2DUP)
2667 // 0b10 (VLD3DUP) (for this, a bit must be encoded as 0)
2668 // 0b11 (VLD4DUP)
2670 // Inst{7-6} encodes the data size, with:
2671 // 0b00 => 8, 0b01 => 16, 0b10 => 32
2673 // Inst{4} (the a bit) encodes the align action (0: standard alignment)
2674 unsigned elem = slice(insn, 9, 8) + 1;
2675 unsigned a = slice(insn, 4, 4);
2676 if (elem != 3) {
2677 // 0b11 is not a valid encoding for Inst{7-6}.
2678 if (slice(insn, 7, 6) == 3)
2679 return false;
2680 unsigned data_size = 8 << slice(insn, 7, 6);
2681 // For VLD1DUP, a bit makes sense only for data size of 16 and 32.
2682 if (a && data_size == 8)
2683 return false;
2685 // Now we can calculate the alignment!
2686 if (a)
2687 alignment = elem * data_size;
2688 } else {
2689 if (a) {
2690 // A8.6.315 VLD3 (single 3-element structure to all lanes)
2691 // The a bit must be encoded as 0.
2692 return false;
2695 } else {
2696 // Multiple n-element structures with type encoded as Inst{11-8}.
2697 // See, for example, A8.6.316 VLD4 (multiple 4-element structures).
2699 // Inst{5-4} encodes alignment.
2700 unsigned align = slice(insn, 5, 4);
2701 switch (align) {
2702 default:
2703 break;
2704 case 1:
2705 alignment = 64; break;
2706 case 2:
2707 alignment = 128; break;
2708 case 3:
2709 alignment = 256; break;
2712 unsigned type = slice(insn, 11, 8);
2713 // Reject UNDEFINED instructions based on type and align.
2714 // Plus set DblSpaced flag where appropriate.
2715 switch (elem) {
2716 default:
2717 break;
2718 case 1:
2719 // n == 1
2720 // A8.6.307 & A8.6.391
2721 if ((type == 7 && slice(align, 1, 1) == 1) ||
2722 (type == 10 && align == 3) ||
2723 (type == 6 && slice(align, 1, 1) == 1))
2724 return false;
2725 break;
2726 case 2:
2727 // n == 2 && type == 0b1001 -> DblSpaced = true
2728 // A8.6.310 & A8.6.393
2729 if ((type == 8 || type == 9) && align == 3)
2730 return false;
2731 DblSpaced = (type == 9);
2732 break;
2733 case 3:
2734 // n == 3 && type == 0b0101 -> DblSpaced = true
2735 // A8.6.313 & A8.6.395
2736 if (slice(insn, 7, 6) == 3 || slice(align, 1, 1) == 1)
2737 return false;
2738 DblSpaced = (type == 5);
2739 break;
2740 case 4:
2741 // n == 4 && type == 0b0001 -> DblSpaced = true
2742 // A8.6.316 & A8.6.397
2743 if (slice(insn, 7, 6) == 3)
2744 return false;
2745 DblSpaced = (type == 1);
2746 break;
2749 return DisassembleNLdSt0(MI, Opcode, insn, NumOps, NumOpsAdded,
2750 slice(insn, 21, 21) == 0, DblSpaced, alignment/8, B);
2753 // VMOV (immediate)
2754 // Qd/Dd imm
2755 // VBIC (immediate)
2756 // VORR (immediate)
2757 // Qd/Dd imm src(=Qd/Dd)
2758 static bool DisassembleN1RegModImmFrm(MCInst &MI, unsigned Opcode,
2759 uint32_t insn, unsigned short NumOps, unsigned &NumOpsAdded, BO B) {
2761 const MCInstrDesc &MCID = ARMInsts[Opcode];
2762 const MCOperandInfo *OpInfo = MCID.OpInfo;
2764 assert(NumOps >= 2 &&
2765 (OpInfo[0].RegClass == ARM::DPRRegClassID ||
2766 OpInfo[0].RegClass == ARM::QPRRegClassID) &&
2767 (OpInfo[1].RegClass < 0) &&
2768 "Expect 1 reg operand followed by 1 imm operand");
2770 // Qd/Dd = Inst{22:15-12} => NEON Rd
2771 MI.addOperand(MCOperand::CreateReg(getRegisterEnum(B, OpInfo[0].RegClass,
2772 decodeNEONRd(insn))));
2774 ElemSize esize = ESizeNA;
2775 switch (Opcode) {
2776 case ARM::VMOVv8i8:
2777 case ARM::VMOVv16i8:
2778 esize = ESize8;
2779 break;
2780 case ARM::VMOVv4i16:
2781 case ARM::VMOVv8i16:
2782 case ARM::VMVNv4i16:
2783 case ARM::VMVNv8i16:
2784 case ARM::VBICiv4i16:
2785 case ARM::VBICiv8i16:
2786 case ARM::VORRiv4i16:
2787 case ARM::VORRiv8i16:
2788 esize = ESize16;
2789 break;
2790 case ARM::VMOVv2i32:
2791 case ARM::VMOVv4i32:
2792 case ARM::VMVNv2i32:
2793 case ARM::VMVNv4i32:
2794 case ARM::VBICiv2i32:
2795 case ARM::VBICiv4i32:
2796 case ARM::VORRiv2i32:
2797 case ARM::VORRiv4i32:
2798 esize = ESize32;
2799 break;
2800 case ARM::VMOVv1i64:
2801 case ARM::VMOVv2i64:
2802 esize = ESize64;
2803 break;
2804 default:
2805 assert(0 && "Unexpected opcode!");
2806 return false;
2809 // One register and a modified immediate value.
2810 // Add the imm operand.
2811 MI.addOperand(MCOperand::CreateImm(decodeN1VImm(insn, esize)));
2813 NumOpsAdded = 2;
2815 // VBIC/VORRiv*i* variants have an extra $src = $Vd to be filled in.
2816 if (NumOps >= 3 &&
2817 (OpInfo[2].RegClass == ARM::DPRRegClassID ||
2818 OpInfo[2].RegClass == ARM::QPRRegClassID)) {
2819 MI.addOperand(MCOperand::CreateReg(getRegisterEnum(B, OpInfo[0].RegClass,
2820 decodeNEONRd(insn))));
2821 NumOpsAdded += 1;
2824 return true;
2827 namespace {
2828 enum N2VFlag {
2829 N2V_None,
2830 N2V_VectorDupLane,
2831 N2V_VectorConvert_Between_Float_Fixed
2833 } // End of unnamed namespace
2835 // Vector Convert [between floating-point and fixed-point]
2836 // Qd/Dd Qm/Dm [fbits]
2838 // Vector Duplicate Lane (from scalar to all elements) Instructions.
2839 // VDUPLN16d, VDUPLN16q, VDUPLN32d, VDUPLN32q, VDUPLN8d, VDUPLN8q:
2840 // Qd/Dd Dm index
2842 // Vector Move Long:
2843 // Qd Dm
2845 // Vector Move Narrow:
2846 // Dd Qm
2848 // Others
2849 static bool DisassembleNVdVmOptImm(MCInst &MI, unsigned Opc, uint32_t insn,
2850 unsigned short NumOps, unsigned &NumOpsAdded, N2VFlag Flag, BO B) {
2852 const MCInstrDesc &MCID = ARMInsts[Opc];
2853 const MCOperandInfo *OpInfo = MCID.OpInfo;
2855 assert(NumOps >= 2 &&
2856 (OpInfo[0].RegClass == ARM::DPRRegClassID ||
2857 OpInfo[0].RegClass == ARM::QPRRegClassID) &&
2858 (OpInfo[1].RegClass == ARM::DPRRegClassID ||
2859 OpInfo[1].RegClass == ARM::QPRRegClassID) &&
2860 "Expect >= 2 operands and first 2 as reg operands");
2862 unsigned &OpIdx = NumOpsAdded;
2864 OpIdx = 0;
2866 ElemSize esize = ESizeNA;
2867 if (Flag == N2V_VectorDupLane) {
2868 // VDUPLN has its index embedded. Its size can be inferred from the Opcode.
2869 assert(Opc >= ARM::VDUPLN16d && Opc <= ARM::VDUPLN8q &&
2870 "Unexpected Opcode");
2871 esize = (Opc == ARM::VDUPLN8d || Opc == ARM::VDUPLN8q) ? ESize8
2872 : ((Opc == ARM::VDUPLN16d || Opc == ARM::VDUPLN16q) ? ESize16
2873 : ESize32);
2876 // Qd/Dd = Inst{22:15-12} => NEON Rd
2877 MI.addOperand(MCOperand::CreateReg(getRegisterEnum(B, OpInfo[OpIdx].RegClass,
2878 decodeNEONRd(insn))));
2879 ++OpIdx;
2881 // VPADAL...
2882 if (MCID.getOperandConstraint(OpIdx, MCOI::TIED_TO) != -1) {
2883 // TIED_TO operand.
2884 MI.addOperand(MCOperand::CreateReg(0));
2885 ++OpIdx;
2888 // Dm = Inst{5:3-0} => NEON Rm
2889 MI.addOperand(MCOperand::CreateReg(getRegisterEnum(B, OpInfo[OpIdx].RegClass,
2890 decodeNEONRm(insn))));
2891 ++OpIdx;
2893 // VZIP and others have two TIED_TO reg operands.
2894 int Idx;
2895 while (OpIdx < NumOps &&
2896 (Idx = MCID.getOperandConstraint(OpIdx, MCOI::TIED_TO)) != -1) {
2897 // Add TIED_TO operand.
2898 MI.addOperand(MI.getOperand(Idx));
2899 ++OpIdx;
2902 // Add the imm operand, if required.
2903 if (OpIdx < NumOps && OpInfo[OpIdx].RegClass < 0
2904 && !OpInfo[OpIdx].isPredicate() && !OpInfo[OpIdx].isOptionalDef()) {
2906 unsigned imm = 0xFFFFFFFF;
2908 if (Flag == N2V_VectorDupLane)
2909 imm = decodeNVLaneDupIndex(insn, esize);
2910 if (Flag == N2V_VectorConvert_Between_Float_Fixed)
2911 imm = decodeVCVTFractionBits(insn);
2913 assert(imm != 0xFFFFFFFF && "Internal error");
2914 MI.addOperand(MCOperand::CreateImm(imm));
2915 ++OpIdx;
2918 return true;
2921 static bool DisassembleN2RegFrm(MCInst &MI, unsigned Opc, uint32_t insn,
2922 unsigned short NumOps, unsigned &NumOpsAdded, BO B) {
2924 return DisassembleNVdVmOptImm(MI, Opc, insn, NumOps, NumOpsAdded,
2925 N2V_None, B);
2927 static bool DisassembleNVCVTFrm(MCInst &MI, unsigned Opc, uint32_t insn,
2928 unsigned short NumOps, unsigned &NumOpsAdded, BO B) {
2930 return DisassembleNVdVmOptImm(MI, Opc, insn, NumOps, NumOpsAdded,
2931 N2V_VectorConvert_Between_Float_Fixed, B);
2933 static bool DisassembleNVecDupLnFrm(MCInst &MI, unsigned Opc, uint32_t insn,
2934 unsigned short NumOps, unsigned &NumOpsAdded, BO B) {
2936 return DisassembleNVdVmOptImm(MI, Opc, insn, NumOps, NumOpsAdded,
2937 N2V_VectorDupLane, B);
2940 // Vector Shift [Accumulate] Instructions.
2941 // Qd/Dd [Qd/Dd (TIED_TO)] Qm/Dm ShiftAmt
2943 // Vector Shift Left Long (with maximum shift count) Instructions.
2944 // VSHLLi16, VSHLLi32, VSHLLi8: Qd Dm imm (== size)
2946 static bool DisassembleNVectorShift(MCInst &MI, unsigned Opcode, uint32_t insn,
2947 unsigned short NumOps, unsigned &NumOpsAdded, bool LeftShift, BO B) {
2949 const MCInstrDesc &MCID = ARMInsts[Opcode];
2950 const MCOperandInfo *OpInfo = MCID.OpInfo;
2952 assert(NumOps >= 3 &&
2953 (OpInfo[0].RegClass == ARM::DPRRegClassID ||
2954 OpInfo[0].RegClass == ARM::QPRRegClassID) &&
2955 (OpInfo[1].RegClass == ARM::DPRRegClassID ||
2956 OpInfo[1].RegClass == ARM::QPRRegClassID) &&
2957 "Expect >= 3 operands and first 2 as reg operands");
2959 unsigned &OpIdx = NumOpsAdded;
2961 OpIdx = 0;
2963 // Qd/Dd = Inst{22:15-12} => NEON Rd
2964 MI.addOperand(MCOperand::CreateReg(getRegisterEnum(B, OpInfo[OpIdx].RegClass,
2965 decodeNEONRd(insn))));
2966 ++OpIdx;
2968 if (MCID.getOperandConstraint(OpIdx, MCOI::TIED_TO) != -1) {
2969 // TIED_TO operand.
2970 MI.addOperand(MCOperand::CreateReg(0));
2971 ++OpIdx;
2974 assert((OpInfo[OpIdx].RegClass == ARM::DPRRegClassID ||
2975 OpInfo[OpIdx].RegClass == ARM::QPRRegClassID) &&
2976 "Reg operand expected");
2978 // Qm/Dm = Inst{5:3-0} => NEON Rm
2979 MI.addOperand(MCOperand::CreateReg(getRegisterEnum(B, OpInfo[OpIdx].RegClass,
2980 decodeNEONRm(insn))));
2981 ++OpIdx;
2983 assert(OpInfo[OpIdx].RegClass < 0 && "Imm operand expected");
2985 // Add the imm operand.
2987 // VSHLL has maximum shift count as the imm, inferred from its size.
2988 unsigned Imm;
2989 switch (Opcode) {
2990 default:
2991 Imm = decodeNVSAmt(insn, LeftShift);
2992 break;
2993 case ARM::VSHLLi8:
2994 Imm = 8;
2995 break;
2996 case ARM::VSHLLi16:
2997 Imm = 16;
2998 break;
2999 case ARM::VSHLLi32:
3000 Imm = 32;
3001 break;
3003 MI.addOperand(MCOperand::CreateImm(Imm));
3004 ++OpIdx;
3006 return true;
3009 // Left shift instructions.
3010 static bool DisassembleN2RegVecShLFrm(MCInst &MI, unsigned Opcode,
3011 uint32_t insn, unsigned short NumOps, unsigned &NumOpsAdded, BO B) {
3013 return DisassembleNVectorShift(MI, Opcode, insn, NumOps, NumOpsAdded, true,
3016 // Right shift instructions have different shift amount interpretation.
3017 static bool DisassembleN2RegVecShRFrm(MCInst &MI, unsigned Opcode,
3018 uint32_t insn, unsigned short NumOps, unsigned &NumOpsAdded, BO B) {
3020 return DisassembleNVectorShift(MI, Opcode, insn, NumOps, NumOpsAdded, false,
3024 namespace {
3025 enum N3VFlag {
3026 N3V_None,
3027 N3V_VectorExtract,
3028 N3V_VectorShift,
3029 N3V_Multiply_By_Scalar
3031 } // End of unnamed namespace
3033 // NEON Three Register Instructions with Optional Immediate Operand
3035 // Vector Extract Instructions.
3036 // Qd/Dd Qn/Dn Qm/Dm imm4
3038 // Vector Shift (Register) Instructions.
3039 // Qd/Dd Qm/Dm Qn/Dn (notice the order of m, n)
3041 // Vector Multiply [Accumulate/Subtract] [Long] By Scalar Instructions.
3042 // Qd/Dd Qn/Dn RestrictedDm index
3044 // Others
3045 static bool DisassembleNVdVnVmOptImm(MCInst &MI, unsigned Opcode, uint32_t insn,
3046 unsigned short NumOps, unsigned &NumOpsAdded, N3VFlag Flag, BO B) {
3048 const MCInstrDesc &MCID = ARMInsts[Opcode];
3049 const MCOperandInfo *OpInfo = MCID.OpInfo;
3051 // No checking for OpInfo[2] because of MOVDneon/MOVQ with only two regs.
3052 assert(NumOps >= 3 &&
3053 (OpInfo[0].RegClass == ARM::DPRRegClassID ||
3054 OpInfo[0].RegClass == ARM::QPRRegClassID) &&
3055 (OpInfo[1].RegClass == ARM::DPRRegClassID ||
3056 OpInfo[1].RegClass == ARM::QPRRegClassID) &&
3057 "Expect >= 3 operands and first 2 as reg operands");
3059 unsigned &OpIdx = NumOpsAdded;
3061 OpIdx = 0;
3063 bool VdVnVm = Flag == N3V_VectorShift ? false : true;
3064 bool IsImm4 = Flag == N3V_VectorExtract ? true : false;
3065 bool IsDmRestricted = Flag == N3V_Multiply_By_Scalar ? true : false;
3066 ElemSize esize = ESizeNA;
3067 if (Flag == N3V_Multiply_By_Scalar) {
3068 unsigned size = (insn >> 20) & 3;
3069 if (size == 1) esize = ESize16;
3070 if (size == 2) esize = ESize32;
3071 assert (esize == ESize16 || esize == ESize32);
3074 // Qd/Dd = Inst{22:15-12} => NEON Rd
3075 MI.addOperand(MCOperand::CreateReg(getRegisterEnum(B, OpInfo[OpIdx].RegClass,
3076 decodeNEONRd(insn))));
3077 ++OpIdx;
3079 // VABA, VABAL, VBSLd, VBSLq, ...
3080 if (MCID.getOperandConstraint(OpIdx, MCOI::TIED_TO) != -1) {
3081 // TIED_TO operand.
3082 MI.addOperand(MCOperand::CreateReg(0));
3083 ++OpIdx;
3086 // Dn = Inst{7:19-16} => NEON Rn
3087 // or
3088 // Dm = Inst{5:3-0} => NEON Rm
3089 MI.addOperand(MCOperand::CreateReg(
3090 getRegisterEnum(B, OpInfo[OpIdx].RegClass,
3091 VdVnVm ? decodeNEONRn(insn)
3092 : decodeNEONRm(insn))));
3093 ++OpIdx;
3095 // Special case handling for VMOVDneon and VMOVQ because they are marked as
3096 // N3RegFrm.
3097 if (Opcode == ARM::VMOVDneon || Opcode == ARM::VMOVQ)
3098 return true;
3100 // Dm = Inst{5:3-0} => NEON Rm
3101 // or
3102 // Dm is restricted to D0-D7 if size is 16, D0-D15 otherwise
3103 // or
3104 // Dn = Inst{7:19-16} => NEON Rn
3105 unsigned m = VdVnVm ? (IsDmRestricted ? decodeRestrictedDm(insn, esize)
3106 : decodeNEONRm(insn))
3107 : decodeNEONRn(insn);
3109 MI.addOperand(MCOperand::CreateReg(
3110 getRegisterEnum(B, OpInfo[OpIdx].RegClass, m)));
3111 ++OpIdx;
3113 if (OpIdx < NumOps && OpInfo[OpIdx].RegClass < 0
3114 && !OpInfo[OpIdx].isPredicate() && !OpInfo[OpIdx].isOptionalDef()) {
3115 // Add the imm operand.
3116 unsigned Imm = 0;
3117 if (IsImm4)
3118 Imm = decodeN3VImm(insn);
3119 else if (IsDmRestricted)
3120 Imm = decodeRestrictedDmIndex(insn, esize);
3121 else {
3122 assert(0 && "Internal error: unreachable code!");
3123 return false;
3126 MI.addOperand(MCOperand::CreateImm(Imm));
3127 ++OpIdx;
3130 return true;
3133 static bool DisassembleN3RegFrm(MCInst &MI, unsigned Opcode, uint32_t insn,
3134 unsigned short NumOps, unsigned &NumOpsAdded, BO B) {
3136 return DisassembleNVdVnVmOptImm(MI, Opcode, insn, NumOps, NumOpsAdded,
3137 N3V_None, B);
3139 static bool DisassembleN3RegVecShFrm(MCInst &MI, unsigned Opcode,
3140 uint32_t insn, unsigned short NumOps, unsigned &NumOpsAdded, BO B) {
3142 return DisassembleNVdVnVmOptImm(MI, Opcode, insn, NumOps, NumOpsAdded,
3143 N3V_VectorShift, B);
3145 static bool DisassembleNVecExtractFrm(MCInst &MI, unsigned Opcode,
3146 uint32_t insn, unsigned short NumOps, unsigned &NumOpsAdded, BO B) {
3148 return DisassembleNVdVnVmOptImm(MI, Opcode, insn, NumOps, NumOpsAdded,
3149 N3V_VectorExtract, B);
3151 static bool DisassembleNVecMulScalarFrm(MCInst &MI, unsigned Opcode,
3152 uint32_t insn, unsigned short NumOps, unsigned &NumOpsAdded, BO B) {
3154 return DisassembleNVdVnVmOptImm(MI, Opcode, insn, NumOps, NumOpsAdded,
3155 N3V_Multiply_By_Scalar, B);
3158 // Vector Table Lookup
3160 // VTBL1, VTBX1: Dd [Dd(TIED_TO)] Dn Dm
3161 // VTBL2, VTBX2: Dd [Dd(TIED_TO)] Dn Dn+1 Dm
3162 // VTBL3, VTBX3: Dd [Dd(TIED_TO)] Dn Dn+1 Dn+2 Dm
3163 // VTBL4, VTBX4: Dd [Dd(TIED_TO)] Dn Dn+1 Dn+2 Dn+3 Dm
3164 static bool DisassembleNVTBLFrm(MCInst &MI, unsigned Opcode, uint32_t insn,
3165 unsigned short NumOps, unsigned &NumOpsAdded, BO B) {
3167 const MCInstrDesc &MCID = ARMInsts[Opcode];
3168 const MCOperandInfo *OpInfo = MCID.OpInfo;
3169 if (!OpInfo) return false;
3171 assert(NumOps >= 3 &&
3172 OpInfo[0].RegClass == ARM::DPRRegClassID &&
3173 OpInfo[1].RegClass == ARM::DPRRegClassID &&
3174 OpInfo[2].RegClass == ARM::DPRRegClassID &&
3175 "Expect >= 3 operands and first 3 as reg operands");
3177 unsigned &OpIdx = NumOpsAdded;
3179 OpIdx = 0;
3181 unsigned Rn = decodeNEONRn(insn);
3183 // {Dn} encoded as len = 0b00
3184 // {Dn Dn+1} encoded as len = 0b01
3185 // {Dn Dn+1 Dn+2 } encoded as len = 0b10
3186 // {Dn Dn+1 Dn+2 Dn+3} encoded as len = 0b11
3187 unsigned Len = slice(insn, 9, 8) + 1;
3189 // Dd (the destination vector)
3190 MI.addOperand(MCOperand::CreateReg(getRegisterEnum(B, ARM::DPRRegClassID,
3191 decodeNEONRd(insn))));
3192 ++OpIdx;
3194 // Process tied_to operand constraint.
3195 int Idx;
3196 if ((Idx = MCID.getOperandConstraint(OpIdx, MCOI::TIED_TO)) != -1) {
3197 MI.addOperand(MI.getOperand(Idx));
3198 ++OpIdx;
3201 // Do the <list> now.
3202 for (unsigned i = 0; i < Len; ++i) {
3203 assert(OpIdx < NumOps && OpInfo[OpIdx].RegClass == ARM::DPRRegClassID &&
3204 "Reg operand expected");
3205 MI.addOperand(MCOperand::CreateReg(getRegisterEnum(B, ARM::DPRRegClassID,
3206 Rn + i)));
3207 ++OpIdx;
3210 // Dm (the index vector)
3211 assert(OpIdx < NumOps && OpInfo[OpIdx].RegClass == ARM::DPRRegClassID &&
3212 "Reg operand (index vector) expected");
3213 MI.addOperand(MCOperand::CreateReg(getRegisterEnum(B, ARM::DPRRegClassID,
3214 decodeNEONRm(insn))));
3215 ++OpIdx;
3217 return true;
3220 // Vector Get Lane (move scalar to ARM core register) Instructions.
3221 // VGETLNi32, VGETLNs16, VGETLNs8, VGETLNu16, VGETLNu8: Rt Dn index
3222 static bool DisassembleNGetLnFrm(MCInst &MI, unsigned Opcode, uint32_t insn,
3223 unsigned short NumOps, unsigned &NumOpsAdded, BO B) {
3225 const MCInstrDesc &MCID = ARMInsts[Opcode];
3226 const MCOperandInfo *OpInfo = MCID.OpInfo;
3227 if (!OpInfo) return false;
3229 assert(MCID.getNumDefs() == 1 && NumOps >= 3 &&
3230 OpInfo[0].RegClass == ARM::GPRRegClassID &&
3231 OpInfo[1].RegClass == ARM::DPRRegClassID &&
3232 OpInfo[2].RegClass < 0 &&
3233 "Expect >= 3 operands with one dst operand");
3235 ElemSize esize =
3236 Opcode == ARM::VGETLNi32 ? ESize32
3237 : ((Opcode == ARM::VGETLNs16 || Opcode == ARM::VGETLNu16) ? ESize16
3238 : ESize8);
3240 // Rt = Inst{15-12} => ARM Rd
3241 MI.addOperand(MCOperand::CreateReg(getRegisterEnum(B, ARM::GPRRegClassID,
3242 decodeRd(insn))));
3244 // Dn = Inst{7:19-16} => NEON Rn
3245 MI.addOperand(MCOperand::CreateReg(getRegisterEnum(B, ARM::DPRRegClassID,
3246 decodeNEONRn(insn))));
3248 MI.addOperand(MCOperand::CreateImm(decodeNVLaneOpIndex(insn, esize)));
3250 NumOpsAdded = 3;
3251 return true;
3254 // Vector Set Lane (move ARM core register to scalar) Instructions.
3255 // VSETLNi16, VSETLNi32, VSETLNi8: Dd Dd (TIED_TO) Rt index
3256 static bool DisassembleNSetLnFrm(MCInst &MI, unsigned Opcode, uint32_t insn,
3257 unsigned short NumOps, unsigned &NumOpsAdded, BO B) {
3259 const MCInstrDesc &MCID = ARMInsts[Opcode];
3260 const MCOperandInfo *OpInfo = MCID.OpInfo;
3261 if (!OpInfo) return false;
3263 assert(MCID.getNumDefs() == 1 && NumOps >= 3 &&
3264 OpInfo[0].RegClass == ARM::DPRRegClassID &&
3265 OpInfo[1].RegClass == ARM::DPRRegClassID &&
3266 MCID.getOperandConstraint(1, MCOI::TIED_TO) != -1 &&
3267 OpInfo[2].RegClass == ARM::GPRRegClassID &&
3268 OpInfo[3].RegClass < 0 &&
3269 "Expect >= 3 operands with one dst operand");
3271 ElemSize esize =
3272 Opcode == ARM::VSETLNi8 ? ESize8
3273 : (Opcode == ARM::VSETLNi16 ? ESize16
3274 : ESize32);
3276 // Dd = Inst{7:19-16} => NEON Rn
3277 MI.addOperand(MCOperand::CreateReg(getRegisterEnum(B, ARM::DPRRegClassID,
3278 decodeNEONRn(insn))));
3280 // TIED_TO operand.
3281 MI.addOperand(MCOperand::CreateReg(0));
3283 // Rt = Inst{15-12} => ARM Rd
3284 MI.addOperand(MCOperand::CreateReg(getRegisterEnum(B, ARM::GPRRegClassID,
3285 decodeRd(insn))));
3287 MI.addOperand(MCOperand::CreateImm(decodeNVLaneOpIndex(insn, esize)));
3289 NumOpsAdded = 4;
3290 return true;
3293 // Vector Duplicate Instructions (from ARM core register to all elements).
3294 // VDUP8d, VDUP16d, VDUP32d, VDUP8q, VDUP16q, VDUP32q: Qd/Dd Rt
3295 static bool DisassembleNDupFrm(MCInst &MI, unsigned Opcode, uint32_t insn,
3296 unsigned short NumOps, unsigned &NumOpsAdded, BO B) {
3298 const MCOperandInfo *OpInfo = ARMInsts[Opcode].OpInfo;
3300 assert(NumOps >= 2 &&
3301 (OpInfo[0].RegClass == ARM::DPRRegClassID ||
3302 OpInfo[0].RegClass == ARM::QPRRegClassID) &&
3303 OpInfo[1].RegClass == ARM::GPRRegClassID &&
3304 "Expect >= 2 operands and first 2 as reg operand");
3306 unsigned RegClass = OpInfo[0].RegClass;
3308 // Qd/Dd = Inst{7:19-16} => NEON Rn
3309 MI.addOperand(MCOperand::CreateReg(getRegisterEnum(B, RegClass,
3310 decodeNEONRn(insn))));
3312 // Rt = Inst{15-12} => ARM Rd
3313 MI.addOperand(MCOperand::CreateReg(getRegisterEnum(B, ARM::GPRRegClassID,
3314 decodeRd(insn))));
3316 NumOpsAdded = 2;
3317 return true;
3320 static inline bool PreLoadOpcode(unsigned Opcode) {
3321 switch(Opcode) {
3322 case ARM::PLDi12: case ARM::PLDrs:
3323 case ARM::PLDWi12: case ARM::PLDWrs:
3324 case ARM::PLIi12: case ARM::PLIrs:
3325 return true;
3326 default:
3327 return false;
3331 static bool DisassemblePreLoadFrm(MCInst &MI, unsigned Opcode, uint32_t insn,
3332 unsigned short NumOps, unsigned &NumOpsAdded, BO B) {
3334 // Preload Data/Instruction requires either 2 or 3 operands.
3335 // PLDi12, PLDWi12, PLIi12: addrmode_imm12
3336 // PLDrs, PLDWrs, PLIrs: ldst_so_reg
3338 MI.addOperand(MCOperand::CreateReg(getRegisterEnum(B, ARM::GPRRegClassID,
3339 decodeRn(insn))));
3341 if (Opcode == ARM::PLDi12 || Opcode == ARM::PLDWi12
3342 || Opcode == ARM::PLIi12) {
3343 unsigned Imm12 = slice(insn, 11, 0);
3344 bool Negative = getUBit(insn) == 0;
3346 // A8.6.118 PLD (literal) PLDWi12 with Rn=PC is transformed to PLDi12.
3347 if (Opcode == ARM::PLDWi12 && slice(insn, 19, 16) == 0xF) {
3348 DEBUG(errs() << "Rn == '1111': PLDWi12 morphed to PLDi12\n");
3349 MI.setOpcode(ARM::PLDi12);
3352 // -0 is represented specially. All other values are as normal.
3353 int Offset = Negative ? -1 * Imm12 : Imm12;
3354 if (Imm12 == 0 && Negative)
3355 Offset = INT32_MIN;
3357 MI.addOperand(MCOperand::CreateImm(Offset));
3358 NumOpsAdded = 2;
3359 } else {
3360 MI.addOperand(MCOperand::CreateReg(getRegisterEnum(B, ARM::GPRRegClassID,
3361 decodeRm(insn))));
3363 ARM_AM::AddrOpc AddrOpcode = getUBit(insn) ? ARM_AM::add : ARM_AM::sub;
3365 // Inst{6-5} encodes the shift opcode.
3366 ARM_AM::ShiftOpc ShOp = getShiftOpcForBits(slice(insn, 6, 5));
3367 // Inst{11-7} encodes the imm5 shift amount.
3368 unsigned ShImm = slice(insn, 11, 7);
3370 // A8.4.1. Possible rrx or shift amount of 32...
3371 getImmShiftSE(ShOp, ShImm);
3372 MI.addOperand(MCOperand::CreateImm(
3373 ARM_AM::getAM2Opc(AddrOpcode, ShImm, ShOp)));
3374 NumOpsAdded = 3;
3377 return true;
3380 static bool DisassembleMiscFrm(MCInst &MI, unsigned Opcode, uint32_t insn,
3381 unsigned short NumOps, unsigned &NumOpsAdded, BO B) {
3383 if (Opcode == ARM::DMB || Opcode == ARM::DSB) {
3384 // Inst{3-0} encodes the memory barrier option for the variants.
3385 unsigned opt = slice(insn, 3, 0);
3386 switch (opt) {
3387 case ARM_MB::SY: case ARM_MB::ST:
3388 case ARM_MB::ISH: case ARM_MB::ISHST:
3389 case ARM_MB::NSH: case ARM_MB::NSHST:
3390 case ARM_MB::OSH: case ARM_MB::OSHST:
3391 MI.addOperand(MCOperand::CreateImm(opt));
3392 NumOpsAdded = 1;
3393 return true;
3394 default:
3395 return false;
3399 switch (Opcode) {
3400 case ARM::CLREX:
3401 case ARM::NOP:
3402 case ARM::TRAP:
3403 case ARM::YIELD:
3404 case ARM::WFE:
3405 case ARM::WFI:
3406 case ARM::SEV:
3407 return true;
3408 case ARM::SWP:
3409 case ARM::SWPB:
3410 // SWP, SWPB: Rd Rm Rn
3411 // Delegate to DisassembleLdStExFrm()....
3412 return DisassembleLdStExFrm(MI, Opcode, insn, NumOps, NumOpsAdded, B);
3413 default:
3414 break;
3417 if (Opcode == ARM::SETEND) {
3418 NumOpsAdded = 1;
3419 MI.addOperand(MCOperand::CreateImm(slice(insn, 9, 9)));
3420 return true;
3423 // FIXME: To enable correct asm parsing and disasm of CPS we need 3 different
3424 // opcodes which match the same real instruction. This is needed since there's
3425 // no current handling of optional arguments. Fix here when a better handling
3426 // of optional arguments is implemented.
3427 if (Opcode == ARM::CPS3p) { // M = 1
3428 // Let's reject these impossible imod values by returning false:
3429 // 1. (imod=0b01)
3431 // AsmPrinter cannot handle imod=0b00, plus (imod=0b00,M=1,iflags!=0) is an
3432 // invalid combination, so we just check for imod=0b00 here.
3433 if (slice(insn, 19, 18) == 0 || slice(insn, 19, 18) == 1)
3434 return false;
3435 MI.addOperand(MCOperand::CreateImm(slice(insn, 19, 18))); // imod
3436 MI.addOperand(MCOperand::CreateImm(slice(insn, 8, 6))); // iflags
3437 MI.addOperand(MCOperand::CreateImm(slice(insn, 4, 0))); // mode
3438 NumOpsAdded = 3;
3439 return true;
3441 if (Opcode == ARM::CPS2p) { // mode = 0, M = 0
3442 // Let's reject these impossible imod values by returning false:
3443 // 1. (imod=0b00,M=0)
3444 // 2. (imod=0b01)
3445 if (slice(insn, 19, 18) == 0 || slice(insn, 19, 18) == 1)
3446 return false;
3447 MI.addOperand(MCOperand::CreateImm(slice(insn, 19, 18))); // imod
3448 MI.addOperand(MCOperand::CreateImm(slice(insn, 8, 6))); // iflags
3449 NumOpsAdded = 2;
3450 return true;
3452 if (Opcode == ARM::CPS1p) { // imod = 0, iflags = 0, M = 1
3453 MI.addOperand(MCOperand::CreateImm(slice(insn, 4, 0))); // mode
3454 NumOpsAdded = 1;
3455 return true;
3458 // DBG has its option specified in Inst{3-0}.
3459 if (Opcode == ARM::DBG) {
3460 MI.addOperand(MCOperand::CreateImm(slice(insn, 3, 0)));
3461 NumOpsAdded = 1;
3462 return true;
3465 // BKPT takes an imm32 val equal to ZeroExtend(Inst{19-8:3-0}).
3466 if (Opcode == ARM::BKPT) {
3467 MI.addOperand(MCOperand::CreateImm(slice(insn, 19, 8) << 4 |
3468 slice(insn, 3, 0)));
3469 NumOpsAdded = 1;
3470 return true;
3473 if (PreLoadOpcode(Opcode))
3474 return DisassemblePreLoadFrm(MI, Opcode, insn, NumOps, NumOpsAdded, B);
3476 assert(0 && "Unexpected misc instruction!");
3477 return false;
3480 /// FuncPtrs - FuncPtrs maps ARMFormat to its corresponding DisassembleFP.
3481 /// We divide the disassembly task into different categories, with each one
3482 /// corresponding to a specific instruction encoding format. There could be
3483 /// exceptions when handling a specific format, and that is why the Opcode is
3484 /// also present in the function prototype.
3485 static const DisassembleFP FuncPtrs[] = {
3486 &DisassemblePseudo,
3487 &DisassembleMulFrm,
3488 &DisassembleBrFrm,
3489 &DisassembleBrMiscFrm,
3490 &DisassembleDPFrm,
3491 &DisassembleDPSoRegFrm,
3492 &DisassembleLdFrm,
3493 &DisassembleStFrm,
3494 &DisassembleLdMiscFrm,
3495 &DisassembleStMiscFrm,
3496 &DisassembleLdStMulFrm,
3497 &DisassembleLdStExFrm,
3498 &DisassembleArithMiscFrm,
3499 &DisassembleSatFrm,
3500 &DisassembleExtFrm,
3501 &DisassembleVFPUnaryFrm,
3502 &DisassembleVFPBinaryFrm,
3503 &DisassembleVFPConv1Frm,
3504 &DisassembleVFPConv2Frm,
3505 &DisassembleVFPConv3Frm,
3506 &DisassembleVFPConv4Frm,
3507 &DisassembleVFPConv5Frm,
3508 &DisassembleVFPLdStFrm,
3509 &DisassembleVFPLdStMulFrm,
3510 &DisassembleVFPMiscFrm,
3511 &DisassembleThumbFrm,
3512 &DisassembleMiscFrm,
3513 &DisassembleNGetLnFrm,
3514 &DisassembleNSetLnFrm,
3515 &DisassembleNDupFrm,
3517 // VLD and VST (including one lane) Instructions.
3518 &DisassembleNLdSt,
3520 // A7.4.6 One register and a modified immediate value
3521 // 1-Register Instructions with imm.
3522 // LLVM only defines VMOVv instructions.
3523 &DisassembleN1RegModImmFrm,
3525 // 2-Register Instructions with no imm.
3526 &DisassembleN2RegFrm,
3528 // 2-Register Instructions with imm (vector convert float/fixed point).
3529 &DisassembleNVCVTFrm,
3531 // 2-Register Instructions with imm (vector dup lane).
3532 &DisassembleNVecDupLnFrm,
3534 // Vector Shift Left Instructions.
3535 &DisassembleN2RegVecShLFrm,
3537 // Vector Shift Righ Instructions, which has different interpretation of the
3538 // shift amount from the imm6 field.
3539 &DisassembleN2RegVecShRFrm,
3541 // 3-Register Data-Processing Instructions.
3542 &DisassembleN3RegFrm,
3544 // Vector Shift (Register) Instructions.
3545 // D:Vd M:Vm N:Vn (notice that M:Vm is the first operand)
3546 &DisassembleN3RegVecShFrm,
3548 // Vector Extract Instructions.
3549 &DisassembleNVecExtractFrm,
3551 // Vector [Saturating Rounding Doubling] Multiply [Accumulate/Subtract] [Long]
3552 // By Scalar Instructions.
3553 &DisassembleNVecMulScalarFrm,
3555 // Vector Table Lookup uses byte indexes in a control vector to look up byte
3556 // values in a table and generate a new vector.
3557 &DisassembleNVTBLFrm,
3559 NULL
3562 /// BuildIt - BuildIt performs the build step for this ARM Basic MC Builder.
3563 /// The general idea is to set the Opcode for the MCInst, followed by adding
3564 /// the appropriate MCOperands to the MCInst. ARM Basic MC Builder delegates
3565 /// to the Format-specific disassemble function for disassembly, followed by
3566 /// TryPredicateAndSBitModifier() to do PredicateOperand and OptionalDefOperand
3567 /// which follow the Dst/Src Operands.
3568 bool ARMBasicMCBuilder::BuildIt(MCInst &MI, uint32_t insn) {
3569 // Stage 1 sets the Opcode.
3570 MI.setOpcode(Opcode);
3571 // If the number of operands is zero, we're done!
3572 if (NumOps == 0)
3573 return true;
3575 // Stage 2 calls the format-specific disassemble function to build the operand
3576 // list.
3577 if (Disasm == NULL)
3578 return false;
3579 unsigned NumOpsAdded = 0;
3580 bool OK = (*Disasm)(MI, Opcode, insn, NumOps, NumOpsAdded, this);
3582 if (!OK || this->Err != 0) return false;
3583 if (NumOpsAdded >= NumOps)
3584 return true;
3586 // Stage 3 deals with operands unaccounted for after stage 2 is finished.
3587 // FIXME: Should this be done selectively?
3588 return TryPredicateAndSBitModifier(MI, Opcode, insn, NumOps - NumOpsAdded);
3591 // A8.3 Conditional execution
3592 // A8.3.1 Pseudocode details of conditional execution
3593 // Condition bits '111x' indicate the instruction is always executed.
3594 static uint32_t CondCode(uint32_t CondField) {
3595 if (CondField == 0xF)
3596 return ARMCC::AL;
3597 return CondField;
3600 /// DoPredicateOperands - DoPredicateOperands process the predicate operands
3601 /// of some Thumb instructions which come before the reglist operands. It
3602 /// returns true if the two predicate operands have been processed.
3603 bool ARMBasicMCBuilder::DoPredicateOperands(MCInst& MI, unsigned Opcode,
3604 uint32_t /* insn */, unsigned short NumOpsRemaining) {
3606 assert(NumOpsRemaining > 0 && "Invalid argument");
3608 const MCOperandInfo *OpInfo = ARMInsts[Opcode].OpInfo;
3609 unsigned Idx = MI.getNumOperands();
3611 // First, we check whether this instr specifies the PredicateOperand through
3612 // a pair of MCOperandInfos with isPredicate() property.
3613 if (NumOpsRemaining >= 2 &&
3614 OpInfo[Idx].isPredicate() && OpInfo[Idx+1].isPredicate() &&
3615 OpInfo[Idx].RegClass < 0 &&
3616 OpInfo[Idx+1].RegClass == ARM::CCRRegClassID)
3618 // If we are inside an IT block, get the IT condition bits maintained via
3619 // ARMBasicMCBuilder::ITState[7:0], through ARMBasicMCBuilder::GetITCond().
3620 // See also A2.5.2.
3621 if (InITBlock())
3622 MI.addOperand(MCOperand::CreateImm(GetITCond()));
3623 else
3624 MI.addOperand(MCOperand::CreateImm(ARMCC::AL));
3625 MI.addOperand(MCOperand::CreateReg(ARM::CPSR));
3626 return true;
3629 return false;
3632 /// TryPredicateAndSBitModifier - TryPredicateAndSBitModifier tries to process
3633 /// the possible Predicate and SBitModifier, to build the remaining MCOperand
3634 /// constituents.
3635 bool ARMBasicMCBuilder::TryPredicateAndSBitModifier(MCInst& MI, unsigned Opcode,
3636 uint32_t insn, unsigned short NumOpsRemaining) {
3638 assert(NumOpsRemaining > 0 && "Invalid argument");
3640 const MCOperandInfo *OpInfo = ARMInsts[Opcode].OpInfo;
3641 const std::string &Name = ARMInsts[Opcode].Name;
3642 unsigned Idx = MI.getNumOperands();
3643 uint64_t TSFlags = ARMInsts[Opcode].TSFlags;
3645 // First, we check whether this instr specifies the PredicateOperand through
3646 // a pair of MCOperandInfos with isPredicate() property.
3647 if (NumOpsRemaining >= 2 &&
3648 OpInfo[Idx].isPredicate() && OpInfo[Idx+1].isPredicate() &&
3649 OpInfo[Idx].RegClass < 0 &&
3650 OpInfo[Idx+1].RegClass == ARM::CCRRegClassID)
3652 // If we are inside an IT block, get the IT condition bits maintained via
3653 // ARMBasicMCBuilder::ITState[7:0], through ARMBasicMCBuilder::GetITCond().
3654 // See also A2.5.2.
3655 if (InITBlock())
3656 MI.addOperand(MCOperand::CreateImm(GetITCond()));
3657 else {
3658 if (Name.length() > 1 && Name[0] == 't') {
3659 // Thumb conditional branch instructions have their cond field embedded,
3660 // like ARM.
3662 // A8.6.16 B
3663 // Check for undefined encodings.
3664 unsigned cond;
3665 if (Name == "t2Bcc") {
3666 if ((cond = slice(insn, 25, 22)) >= 14)
3667 return false;
3668 MI.addOperand(MCOperand::CreateImm(CondCode(cond)));
3669 } else if (Name == "tBcc") {
3670 if ((cond = slice(insn, 11, 8)) == 14)
3671 return false;
3672 MI.addOperand(MCOperand::CreateImm(CondCode(cond)));
3673 } else
3674 MI.addOperand(MCOperand::CreateImm(ARMCC::AL));
3675 } else {
3676 // ARM instructions get their condition field from Inst{31-28}.
3677 // We should reject Inst{31-28} = 0b1111 as invalid encoding.
3678 if (!isNEONDomain(TSFlags) && getCondField(insn) == 0xF)
3679 return false;
3680 MI.addOperand(MCOperand::CreateImm(CondCode(getCondField(insn))));
3683 MI.addOperand(MCOperand::CreateReg(ARM::CPSR));
3684 Idx += 2;
3685 NumOpsRemaining -= 2;
3688 if (NumOpsRemaining == 0)
3689 return true;
3691 // Next, if OptionalDefOperand exists, we check whether the 'S' bit is set.
3692 if (OpInfo[Idx].isOptionalDef() && OpInfo[Idx].RegClass==ARM::CCRRegClassID) {
3693 MI.addOperand(MCOperand::CreateReg(getSBit(insn) == 1 ? ARM::CPSR : 0));
3694 --NumOpsRemaining;
3697 if (NumOpsRemaining == 0)
3698 return true;
3699 else
3700 return false;
3703 /// RunBuildAfterHook - RunBuildAfterHook performs operations deemed necessary
3704 /// after BuildIt is finished.
3705 bool ARMBasicMCBuilder::RunBuildAfterHook(bool Status, MCInst &MI,
3706 uint32_t insn) {
3708 if (!SP) return Status;
3710 if (Opcode == ARM::t2IT)
3711 Status = SP->InitIT(slice(insn, 7, 0)) ? Status : false;
3712 else if (InITBlock())
3713 SP->UpdateIT();
3715 return Status;
3718 /// Opcode, Format, and NumOperands make up an ARM Basic MCBuilder.
3719 ARMBasicMCBuilder::ARMBasicMCBuilder(unsigned opc, ARMFormat format,
3720 unsigned short num)
3721 : Opcode(opc), Format(format), NumOps(num), SP(0), Err(0) {
3722 unsigned Idx = (unsigned)format;
3723 assert(Idx < (array_lengthof(FuncPtrs) - 1) && "Unknown format");
3724 Disasm = FuncPtrs[Idx];
3727 /// CreateMCBuilder - Return an ARMBasicMCBuilder that can build up the MC
3728 /// infrastructure of an MCInst given the Opcode and Format of the instr.
3729 /// Return NULL if it fails to create/return a proper builder. API clients
3730 /// are responsible for freeing up of the allocated memory. Cacheing can be
3731 /// performed by the API clients to improve performance.
3732 ARMBasicMCBuilder *llvm::CreateMCBuilder(unsigned Opcode, ARMFormat Format) {
3733 // For "Unknown format", fail by returning a NULL pointer.
3734 if ((unsigned)Format >= (array_lengthof(FuncPtrs) - 1)) {
3735 DEBUG(errs() << "Unknown format\n");
3736 return 0;
3739 return new ARMBasicMCBuilder(Opcode, Format,
3740 ARMInsts[Opcode].getNumOperands());
3743 /// tryAddingSymbolicOperand - tryAddingSymbolicOperand trys to add a symbolic
3744 /// operand in place of the immediate Value in the MCInst. The immediate
3745 /// Value has had any PC adjustment made by the caller. If the getOpInfo()
3746 /// function was set as part of the setupBuilderForSymbolicDisassembly() call
3747 /// then that function is called to get any symbolic information at the
3748 /// builder's Address for this instrution. If that returns non-zero then the
3749 /// symbolic information it returns is used to create an MCExpr and that is
3750 /// added as an operand to the MCInst. This function returns true if it adds
3751 /// an operand to the MCInst and false otherwise.
3752 bool ARMBasicMCBuilder::tryAddingSymbolicOperand(uint64_t Value,
3753 uint64_t InstSize,
3754 MCInst &MI) {
3755 if (!GetOpInfo)
3756 return false;
3758 struct LLVMOpInfo1 SymbolicOp;
3759 SymbolicOp.Value = Value;
3760 if (!GetOpInfo(DisInfo, Address, 0 /* Offset */, InstSize, 1, &SymbolicOp))
3761 return false;
3763 const MCExpr *Add = NULL;
3764 if (SymbolicOp.AddSymbol.Present) {
3765 if (SymbolicOp.AddSymbol.Name) {
3766 StringRef Name(SymbolicOp.AddSymbol.Name);
3767 MCSymbol *Sym = Ctx->GetOrCreateSymbol(Name);
3768 Add = MCSymbolRefExpr::Create(Sym, *Ctx);
3769 } else {
3770 Add = MCConstantExpr::Create(SymbolicOp.AddSymbol.Value, *Ctx);
3774 const MCExpr *Sub = NULL;
3775 if (SymbolicOp.SubtractSymbol.Present) {
3776 if (SymbolicOp.SubtractSymbol.Name) {
3777 StringRef Name(SymbolicOp.SubtractSymbol.Name);
3778 MCSymbol *Sym = Ctx->GetOrCreateSymbol(Name);
3779 Sub = MCSymbolRefExpr::Create(Sym, *Ctx);
3780 } else {
3781 Sub = MCConstantExpr::Create(SymbolicOp.SubtractSymbol.Value, *Ctx);
3785 const MCExpr *Off = NULL;
3786 if (SymbolicOp.Value != 0)
3787 Off = MCConstantExpr::Create(SymbolicOp.Value, *Ctx);
3789 const MCExpr *Expr;
3790 if (Sub) {
3791 const MCExpr *LHS;
3792 if (Add)
3793 LHS = MCBinaryExpr::CreateSub(Add, Sub, *Ctx);
3794 else
3795 LHS = MCUnaryExpr::CreateMinus(Sub, *Ctx);
3796 if (Off != 0)
3797 Expr = MCBinaryExpr::CreateAdd(LHS, Off, *Ctx);
3798 else
3799 Expr = LHS;
3800 } else if (Add) {
3801 if (Off != 0)
3802 Expr = MCBinaryExpr::CreateAdd(Add, Off, *Ctx);
3803 else
3804 Expr = Add;
3805 } else {
3806 if (Off != 0)
3807 Expr = Off;
3808 else
3809 Expr = MCConstantExpr::Create(0, *Ctx);
3812 if (SymbolicOp.VariantKind == LLVMDisassembler_VariantKind_ARM_HI16)
3813 MI.addOperand(MCOperand::CreateExpr(ARMMCExpr::CreateUpper16(Expr, *Ctx)));
3814 else if (SymbolicOp.VariantKind == LLVMDisassembler_VariantKind_ARM_LO16)
3815 MI.addOperand(MCOperand::CreateExpr(ARMMCExpr::CreateLower16(Expr, *Ctx)));
3816 else if (SymbolicOp.VariantKind == LLVMDisassembler_VariantKind_None)
3817 MI.addOperand(MCOperand::CreateExpr(Expr));
3818 else
3819 assert("bad SymbolicOp.VariantKind");
3821 return true;