1 /* ----------------------------------------------------------------------- *
3 * Copyright 1996-2012 The NASM Authors - All Rights Reserved
4 * See the file AUTHORS included with the NASM distribution for
5 * the specific copyright holders.
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following
11 * * Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * * Redistributions in binary form must reproduce the above
14 * copyright notice, this list of conditions and the following
15 * disclaimer in the documentation and/or other materials provided
16 * with the distribution.
18 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND
19 * CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES,
20 * INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
21 * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
22 * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
23 * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
24 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
25 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
26 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
27 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
29 * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
30 * EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32 * ----------------------------------------------------------------------- */
35 * disasm.c where all the _work_ gets done in the Netwide Disassembler
49 #define fetch_safe(_start, _ptr, _size, _need, _op) \
51 if (((_ptr) - (_start)) >= ((_size) - (_need))) \
55 #define fetch_or_return(_start, _ptr, _size, _need) \
56 fetch_safe(_start, _ptr, _size, _need, return 0)
59 * Flags that go into the `segment' field of `insn' structures
62 #define SEG_RELATIVE 1
69 #define SEG_SIGNED 128
76 uint8_t osize
; /* Operand size */
77 uint8_t asize
; /* Address size */
78 uint8_t osp
; /* Operand size prefix present */
79 uint8_t asp
; /* Address size prefix present */
80 uint8_t rep
; /* Rep prefix present */
81 uint8_t seg
; /* Segment override prefix present */
82 uint8_t wait
; /* WAIT "prefix" present */
83 uint8_t lock
; /* Lock prefix present */
84 uint8_t vex
[3]; /* VEX prefix present */
85 uint8_t vex_c
; /* VEX "class" (VEX, XOP, ...) */
86 uint8_t vex_m
; /* VEX.M field */
88 uint8_t vex_lp
; /* VEX.LP fields */
89 uint32_t rex
; /* REX prefix present */
90 uint8_t evex
[3]; /* EVEX prefix present */
93 #define getu8(x) (*(uint8_t *)(x))
95 /* Littleendian CPU which can handle unaligned references */
96 #define getu16(x) (*(uint16_t *)(x))
97 #define getu32(x) (*(uint32_t *)(x))
98 #define getu64(x) (*(uint64_t *)(x))
100 static uint16_t getu16(uint8_t *data
)
102 return (uint16_t)data
[0] + ((uint16_t)data
[1] << 8);
104 static uint32_t getu32(uint8_t *data
)
106 return (uint32_t)getu16(data
) + ((uint32_t)getu16(data
+2) << 16);
108 static uint64_t getu64(uint8_t *data
)
110 return (uint64_t)getu32(data
) + ((uint64_t)getu32(data
+4) << 32);
114 #define gets8(x) ((int8_t)getu8(x))
115 #define gets16(x) ((int16_t)getu16(x))
116 #define gets32(x) ((int32_t)getu32(x))
117 #define gets64(x) ((int64_t)getu64(x))
119 /* Important: regval must already have been adjusted for rex extensions */
120 static enum reg_enum
whichreg(opflags_t regflags
, int regval
, int rex
)
124 static const struct {
127 } specific_registers
[] = {
153 if (!(regflags
& (REGISTER
|REGMEM
)))
154 return 0; /* Registers not permissible?! */
156 regflags
|= REGISTER
;
158 for (i
= 0; i
< ARRAY_SIZE(specific_registers
); i
++)
159 if (!(specific_registers
[i
].flags
& ~regflags
))
160 return specific_registers
[i
].reg
;
162 /* All the entries below look up regval in an 16-entry array */
163 if (regval
< 0 || regval
> (rex
& REX_EV
? 31 : 15))
166 #define GET_REGISTER(__array, __index) \
167 ((size_t)(__index) < (size_t)ARRAY_SIZE(__array) ? __array[(__index)] : 0)
169 if (!(REG8
& ~regflags
)) {
170 if (rex
& (REX_P
|REX_NH
))
171 return GET_REGISTER(nasm_rd_reg8_rex
, regval
);
173 return GET_REGISTER(nasm_rd_reg8
, regval
);
175 if (!(REG16
& ~regflags
))
176 return GET_REGISTER(nasm_rd_reg16
, regval
);
177 if (!(REG32
& ~regflags
))
178 return GET_REGISTER(nasm_rd_reg32
, regval
);
179 if (!(REG64
& ~regflags
))
180 return GET_REGISTER(nasm_rd_reg64
, regval
);
181 if (!(REG_SREG
& ~regflags
))
182 return GET_REGISTER(nasm_rd_sreg
, regval
& 7); /* Ignore REX */
183 if (!(REG_CREG
& ~regflags
))
184 return GET_REGISTER(nasm_rd_creg
, regval
);
185 if (!(REG_DREG
& ~regflags
))
186 return GET_REGISTER(nasm_rd_dreg
, regval
);
187 if (!(REG_TREG
& ~regflags
)) {
189 return 0; /* TR registers are ill-defined with rex */
190 return GET_REGISTER(nasm_rd_treg
, regval
);
192 if (!(FPUREG
& ~regflags
))
193 return GET_REGISTER(nasm_rd_fpureg
, regval
& 7); /* Ignore REX */
194 if (!(MMXREG
& ~regflags
))
195 return GET_REGISTER(nasm_rd_mmxreg
, regval
& 7); /* Ignore REX */
196 if (!(XMMREG
& ~regflags
))
197 return GET_REGISTER(nasm_rd_xmmreg
, regval
);
198 if (!(YMMREG
& ~regflags
))
199 return GET_REGISTER(nasm_rd_ymmreg
, regval
);
200 if (!(ZMMREG
& ~regflags
))
201 return GET_REGISTER(nasm_rd_zmmreg
, regval
);
202 if (!(OPMASKREG
& ~regflags
))
203 return GET_REGISTER(nasm_rd_opmaskreg
, regval
);
204 if (!(BNDREG
& ~regflags
))
205 return GET_REGISTER(nasm_rd_bndreg
, regval
);
211 static uint32_t append_evex_reg_deco(char *buf
, uint32_t num
,
212 decoflags_t deco
, uint8_t *evex
)
214 const char * const er_names
[] = {"rn-sae", "rd-sae", "ru-sae", "rz-sae"};
215 uint32_t num_chars
= 0;
217 if ((deco
& MASK
) && (evex
[2] & EVEX_P2AAA
)) {
218 enum reg_enum opmasknum
= nasm_rd_opmaskreg
[evex
[2] & EVEX_P2AAA
];
219 const char * regname
= nasm_reg_names
[opmasknum
- EXPR_REG_START
];
221 num_chars
+= snprintf(buf
+ num_chars
, num
- num_chars
,
224 if ((deco
& Z
) && (evex
[2] & EVEX_P2Z
)) {
225 num_chars
+= snprintf(buf
+ num_chars
, num
- num_chars
,
230 if (evex
[2] & EVEX_P2B
) {
232 uint8_t er_type
= (evex
[2] & EVEX_P2LL
) >> 5;
233 num_chars
+= snprintf(buf
+ num_chars
, num
- num_chars
,
234 ",{%s}", er_names
[er_type
]);
235 } else if (deco
& SAE
) {
236 num_chars
+= snprintf(buf
+ num_chars
, num
- num_chars
,
244 static uint32_t append_evex_mem_deco(char *buf
, uint32_t num
, opflags_t type
,
245 decoflags_t deco
, uint8_t *evex
)
247 uint32_t num_chars
= 0;
249 if ((evex
[2] & EVEX_P2B
) && (deco
& BRDCAST_MASK
)) {
250 decoflags_t deco_brsize
= deco
& BRSIZE_MASK
;
251 opflags_t template_opsize
= (deco_brsize
== BR_BITS32
? BITS32
: BITS64
);
252 uint8_t br_num
= (type
& SIZE_MASK
) / BITS128
*
253 BITS64
/ template_opsize
* 2;
255 num_chars
+= snprintf(buf
+ num_chars
, num
- num_chars
,
259 if ((deco
& MASK
) && (evex
[2] & EVEX_P2AAA
)) {
260 enum reg_enum opmasknum
= nasm_rd_opmaskreg
[evex
[2] & EVEX_P2AAA
];
261 const char * regname
= nasm_reg_names
[opmasknum
- EXPR_REG_START
];
263 num_chars
+= snprintf(buf
+ num_chars
, num
- num_chars
,
266 if ((deco
& Z
) && (evex
[2] & EVEX_P2Z
)) {
267 num_chars
+= snprintf(buf
+ num_chars
, num
- num_chars
,
277 * Process an effective address (ModRM) specification.
279 static uint8_t *do_ea(uint8_t *data
, int modrm
, int asize
,
280 int segsize
, enum ea_type type
,
281 operand
*op
, insn
*ins
)
283 int mod
, rm
, scale
, index
, base
;
287 bool is_evex
= !!(ins
->rex
& REX_EV
);
289 mod
= (modrm
>> 6) & 03;
292 if (mod
!= 3 && asize
!= 16 && rm
== 4)
298 if (mod
== 3) { /* pure register version */
299 op
->basereg
= rm
+(rex
& REX_B
? 8 : 0);
300 op
->segment
|= SEG_RMREG
;
301 if (is_evex
&& segsize
== 64) {
302 op
->basereg
+= (evex
[0] & EVEX_P0X
? 0 : 16);
312 * <mod> specifies the displacement size (none, byte or
313 * word), and <rm> specifies the register combination.
314 * Exception: mod=0,rm=6 does not specify [BP] as one might
315 * expect, but instead specifies [disp16].
318 if (type
!= EA_SCALAR
)
321 op
->indexreg
= op
->basereg
= -1;
322 op
->scale
= 1; /* always, in 16 bits */
353 if (rm
== 6 && mod
== 0) { /* special case */
357 mod
= 2; /* fake disp16 */
361 op
->segment
|= SEG_NODISP
;
364 op
->segment
|= SEG_DISP8
;
365 if (ins
->evex_tuple
!= 0) {
366 op
->offset
= gets8(data
) * get_disp8N(ins
);
368 op
->offset
= gets8(data
);
373 op
->segment
|= SEG_DISP16
;
374 op
->offset
= *data
++;
375 op
->offset
|= ((unsigned)*data
++) << 8;
381 * Once again, <mod> specifies displacement size (this time
382 * none, byte or *dword*), while <rm> specifies the base
383 * register. Again, [EBP] is missing, replaced by a pure
384 * disp32 (this time that's mod=0,rm=*5*) in 32-bit mode,
385 * and RIP-relative addressing in 64-bit mode.
388 * indicates not a single base register, but instead the
389 * presence of a SIB byte...
391 int a64
= asize
== 64;
396 op
->basereg
= nasm_rd_reg64
[rm
| ((rex
& REX_B
) ? 8 : 0)];
398 op
->basereg
= nasm_rd_reg32
[rm
| ((rex
& REX_B
) ? 8 : 0)];
400 if (rm
== 5 && mod
== 0) {
402 op
->eaflags
|= EAF_REL
;
403 op
->segment
|= SEG_RELATIVE
;
407 op
->disp_size
= asize
;
410 mod
= 2; /* fake disp32 */
414 if (rm
== 4) { /* process SIB */
416 scale
= (sib
>> 6) & 03;
417 index
= (sib
>> 3) & 07;
420 op
->scale
= 1 << scale
;
423 vsib_hi
= (rex
& REX_X
? 8 : 0) |
424 (evex
[2] & EVEX_P2VP
? 0 : 16);
427 if (type
== EA_XMMVSIB
)
428 op
->indexreg
= nasm_rd_xmmreg
[index
| vsib_hi
];
429 else if (type
== EA_YMMVSIB
)
430 op
->indexreg
= nasm_rd_ymmreg
[index
| vsib_hi
];
431 else if (type
== EA_ZMMVSIB
)
432 op
->indexreg
= nasm_rd_zmmreg
[index
| vsib_hi
];
433 else if (index
== 4 && !(rex
& REX_X
))
434 op
->indexreg
= -1; /* ESP/RSP cannot be an index */
436 op
->indexreg
= nasm_rd_reg64
[index
| ((rex
& REX_X
) ? 8 : 0)];
438 op
->indexreg
= nasm_rd_reg32
[index
| ((rex
& REX_X
) ? 8 : 0)];
440 if (base
== 5 && mod
== 0) {
442 mod
= 2; /* Fake disp32 */
444 op
->basereg
= nasm_rd_reg64
[base
| ((rex
& REX_B
) ? 8 : 0)];
446 op
->basereg
= nasm_rd_reg32
[base
| ((rex
& REX_B
) ? 8 : 0)];
450 } else if (type
!= EA_SCALAR
) {
451 /* Can't have VSIB without SIB */
457 op
->segment
|= SEG_NODISP
;
460 op
->segment
|= SEG_DISP8
;
461 if (ins
->evex_tuple
!= 0) {
462 op
->offset
= gets8(data
) * get_disp8N(ins
);
464 op
->offset
= gets8(data
);
469 op
->segment
|= SEG_DISP32
;
470 op
->offset
= gets32(data
);
479 * Determine whether the instruction template in t corresponds to the data
480 * stream in data. Return the number of bytes matched if so.
482 #define case4(x) case (x): case (x)+1: case (x)+2: case (x)+3
484 static int matches(const struct itemplate
*t
, uint8_t *data
,
485 const struct prefix_info
*prefix
, int segsize
, insn
*ins
)
487 uint8_t *r
= (uint8_t *)(t
->code
);
488 uint8_t *origdata
= data
;
489 bool a_used
= false, o_used
= false;
490 enum prefixes drep
= 0;
491 enum prefixes dwait
= 0;
492 uint8_t lock
= prefix
->lock
;
493 int osize
= prefix
->osize
;
494 int asize
= prefix
->asize
;
497 struct operand
*opx
, *opy
;
500 int regmask
= (segsize
== 64) ? 15 : 7;
501 enum ea_type eat
= EA_SCALAR
;
503 for (i
= 0; i
< MAX_OPERANDS
; i
++) {
504 ins
->oprs
[i
].segment
= ins
->oprs
[i
].disp_size
=
505 (segsize
== 64 ? SEG_64BIT
: segsize
== 32 ? SEG_32BIT
: 0);
509 ins
->rex
= prefix
->rex
;
510 memset(ins
->prefixes
, 0, sizeof ins
->prefixes
);
512 if (itemp_has(t
, (segsize
== 64 ? IF_NOLONG
: IF_LONG
)))
515 if (prefix
->rep
== 0xF2)
516 drep
= (itemp_has(t
, IF_BND
) ? P_BND
: P_REPNE
);
517 else if (prefix
->rep
== 0xF3)
520 dwait
= prefix
->wait
? P_WAIT
: 0;
522 while ((c
= *r
++) != 0) {
523 op1
= (c
& 3) + ((opex
& 1) << 2);
524 op2
= ((c
>> 3) & 3) + ((opex
& 2) << 1);
525 opx
= &ins
->oprs
[op1
];
526 opy
= &ins
->oprs
[op2
];
547 int t
= *r
++, d
= *data
++;
548 if (d
< t
|| d
> t
+ 7)
551 opx
->basereg
= (d
-t
)+
552 (ins
->rex
& REX_B
? 8 : 0);
553 opx
->segment
|= SEG_RMREG
;
559 /* this is an separate index reg position of MIB operand (ICC) */
560 /* Disassembler uses NASM's split EA form only */
564 opx
->offset
= (int8_t)*data
++;
565 opx
->segment
|= SEG_SIGNED
;
569 opx
->offset
= *data
++;
573 opx
->offset
= *data
++;
577 opx
->offset
= getu16(data
);
583 opx
->offset
= getu32(data
);
586 opx
->offset
= getu16(data
);
589 if (segsize
!= asize
)
590 opx
->disp_size
= asize
;
594 opx
->offset
= getu32(data
);
599 opx
->offset
= gets32(data
);
606 opx
->offset
= getu16(data
);
612 opx
->offset
= getu32(data
);
618 opx
->offset
= getu64(data
);
626 opx
->offset
= gets8(data
++);
627 opx
->segment
|= SEG_RELATIVE
;
631 opx
->offset
= getu64(data
);
636 opx
->offset
= gets16(data
);
638 opx
->segment
|= SEG_RELATIVE
;
639 opx
->segment
&= ~SEG_32BIT
;
642 case4(064): /* rel */
643 opx
->segment
|= SEG_RELATIVE
;
644 /* In long mode rel is always 32 bits, sign extended. */
645 if (segsize
== 64 || osize
== 32) {
646 opx
->offset
= gets32(data
);
649 opx
->segment
|= SEG_32BIT
;
650 opx
->type
= (opx
->type
& ~SIZE_MASK
)
651 | (segsize
== 64 ? BITS64
: BITS32
);
653 opx
->offset
= gets16(data
);
655 opx
->segment
&= ~SEG_32BIT
;
656 opx
->type
= (opx
->type
& ~SIZE_MASK
) | BITS16
;
661 opx
->offset
= gets32(data
);
663 opx
->segment
|= SEG_32BIT
| SEG_RELATIVE
;
672 opx
->segment
|= SEG_RMREG
;
673 data
= do_ea(data
, modrm
, asize
, segsize
, eat
, opy
, ins
);
676 opx
->basereg
= ((modrm
>> 3) & 7) + (ins
->rex
& REX_R
? 8 : 0);
677 if ((ins
->rex
& REX_EV
) && (segsize
== 64))
678 opx
->basereg
+= (ins
->evex_p
[0] & EVEX_P0RP
? 0 : 16);
684 uint8_t ximm
= *data
++;
686 ins
->oprs
[c
>> 3].basereg
= (ximm
>> 4) & regmask
;
687 ins
->oprs
[c
>> 3].segment
|= SEG_RMREG
;
688 ins
->oprs
[c
& 7].offset
= ximm
& 15;
694 uint8_t ximm
= *data
++;
700 ins
->oprs
[c
>> 4].basereg
= (ximm
>> 4) & regmask
;
701 ins
->oprs
[c
>> 4].segment
|= SEG_RMREG
;
707 uint8_t ximm
= *data
++;
709 opx
->basereg
= (ximm
>> 4) & regmask
;
710 opx
->segment
|= SEG_RMREG
;
724 if (((modrm
>> 3) & 07) != (c
& 07))
725 return 0; /* spare field doesn't match up */
726 data
= do_ea(data
, modrm
, asize
, segsize
, eat
, opy
, ins
);
735 uint8_t evexm
= *r
++;
736 uint8_t evexwlp
= *r
++;
737 uint8_t modrm
, valid_mask
;
738 ins
->evex_tuple
= *r
++ - 0300;
739 modrm
= *(origdata
+ 1);
742 if ((prefix
->rex
& (REX_EV
|REX_V
|REX_P
)) != REX_EV
)
745 if ((evexm
& 0x1f) != prefix
->vex_m
)
748 switch (evexwlp
& 060) {
750 if (prefix
->rex
& REX_W
)
754 if (!(prefix
->rex
& REX_W
))
758 case 040: /* VEX.W is a don't care */
765 /* If EVEX.b is set with reg-reg op,
766 * EVEX.L'L contains embedded rounding control info
768 if ((prefix
->evex
[2] & EVEX_P2B
) && ((modrm
>> 6) == 3)) {
769 valid_mask
= 0x3; /* prefix only */
771 valid_mask
= 0xf; /* vector length and prefix */
773 if ((evexwlp
^ prefix
->vex_lp
) & valid_mask
)
777 if ((prefix
->vex_v
!= 0) ||
778 (!(prefix
->evex
[2] & EVEX_P2VP
) &&
779 ((eat
< EA_XMMVSIB
) || (eat
> EA_ZMMVSIB
))))
782 opx
->segment
|= SEG_RMREG
;
783 opx
->basereg
= ((~prefix
->evex
[2] & EVEX_P2VP
) << (4 - 3) ) |
787 memcpy(ins
->evex_p
, prefix
->evex
, 3);
798 if ((prefix
->rex
& (REX_V
|REX_P
)) != REX_V
)
801 if ((vexm
& 0x1f) != prefix
->vex_m
)
804 switch (vexwlp
& 060) {
806 if (prefix
->rex
& REX_W
)
810 if (!(prefix
->rex
& REX_W
))
814 case 040: /* VEX.W is a don't care */
821 /* The 010 bit of vexwlp is set if VEX.L is ignored */
822 if ((vexwlp
^ prefix
->vex_lp
) & ((vexwlp
& 010) ? 03 : 07))
826 if (prefix
->vex_v
!= 0)
829 opx
->segment
|= SEG_RMREG
;
830 opx
->basereg
= prefix
->vex_v
;
837 if (prefix
->rep
== 0xF3)
842 if (prefix
->rep
== 0xF2)
844 else if (prefix
->rep
== 0xF3)
849 if (prefix
->lock
== 0xF0) {
850 if (prefix
->rep
== 0xF2)
852 else if (prefix
->rep
== 0xF3)
872 if (asize
!= segsize
)
886 if (prefix
->rex
& REX_B
)
891 if (prefix
->rex
& REX_X
)
896 if (prefix
->rex
& REX_R
)
901 if (prefix
->rex
& REX_W
)
920 if (osize
!= (segsize
== 16 ? 16 : 32))
927 ins
->rex
|= REX_W
; /* 64-bit only instruction */
944 int t
= *r
++, d
= *data
++;
945 if (d
< t
|| d
> t
+ 15)
948 ins
->condition
= d
- t
;
953 if (prefix
->rep
== 0xF3)
963 if (prefix
->rep
!= 0xF2)
969 if (prefix
->rep
!= 0xF3)
994 if (prefix
->wait
!= 0x9B)
1000 if (prefix
->osp
|| prefix
->rep
)
1005 if (!prefix
->osp
|| prefix
->rep
)
1049 return 0; /* Unknown code */
1053 if (!vex_ok
&& (ins
->rex
& (REX_V
| REX_EV
)))
1056 /* REX cannot be combined with VEX */
1057 if ((ins
->rex
& REX_V
) && (prefix
->rex
& REX_P
))
1061 * Check for unused rep or a/o prefixes.
1063 for (i
= 0; i
< t
->operands
; i
++) {
1064 if (ins
->oprs
[i
].segment
!= SEG_RMREG
)
1069 if (ins
->prefixes
[PPS_LOCK
])
1071 ins
->prefixes
[PPS_LOCK
] = P_LOCK
;
1074 if (ins
->prefixes
[PPS_REP
])
1076 ins
->prefixes
[PPS_REP
] = drep
;
1078 ins
->prefixes
[PPS_WAIT
] = dwait
;
1080 if (osize
!= ((segsize
== 16) ? 16 : 32)) {
1081 enum prefixes pfx
= 0;
1095 if (ins
->prefixes
[PPS_OSIZE
])
1097 ins
->prefixes
[PPS_OSIZE
] = pfx
;
1100 if (!a_used
&& asize
!= segsize
) {
1101 if (ins
->prefixes
[PPS_ASIZE
])
1103 ins
->prefixes
[PPS_ASIZE
] = asize
== 16 ? P_A16
: P_A32
;
1106 /* Fix: check for redundant REX prefixes */
1108 return data
- origdata
;
1111 /* Condition names for disassembly, sorted by x86 code */
1112 static const char * const condition_name
[16] = {
1113 "o", "no", "c", "nc", "z", "nz", "na", "a",
1114 "s", "ns", "pe", "po", "l", "nl", "ng", "g"
1117 int32_t disasm(uint8_t *data
, int32_t data_size
, char *output
, int outbufsize
, int segsize
,
1118 int64_t offset
, int autosync
, iflag_t
*prefer
)
1120 const struct itemplate
* const *p
, * const *best_p
;
1121 const struct disasm_index
*ix
;
1123 int length
, best_length
= 0;
1125 int i
, slen
, colon
, n
;
1129 iflag_t goodness
, best
;
1131 struct prefix_info prefix
;
1135 memset(&ins
, 0, sizeof ins
);
1138 * Scan for prefixes.
1140 memset(&prefix
, 0, sizeof prefix
);
1141 prefix
.asize
= segsize
;
1142 prefix
.osize
= (segsize
== 64) ? 32 : segsize
;
1149 while (!end_prefix
) {
1153 fetch_or_return(origdata
, data
, data_size
, 1);
1154 prefix
.rep
= *data
++;
1158 fetch_or_return(origdata
, data
, data_size
, 1);
1159 prefix
.wait
= *data
++;
1163 fetch_or_return(origdata
, data
, data_size
, 1);
1164 prefix
.lock
= *data
++;
1168 fetch_or_return(origdata
, data
, data_size
, 1);
1169 segover
= "cs", prefix
.seg
= *data
++;
1172 fetch_or_return(origdata
, data
, data_size
, 1);
1173 segover
= "ss", prefix
.seg
= *data
++;
1176 fetch_or_return(origdata
, data
, data_size
, 1);
1177 segover
= "ds", prefix
.seg
= *data
++;
1180 fetch_or_return(origdata
, data
, data_size
, 1);
1181 segover
= "es", prefix
.seg
= *data
++;
1184 fetch_or_return(origdata
, data
, data_size
, 1);
1185 segover
= "fs", prefix
.seg
= *data
++;
1188 fetch_or_return(origdata
, data
, data_size
, 1);
1189 segover
= "gs", prefix
.seg
= *data
++;
1193 fetch_or_return(origdata
, data
, data_size
, 1);
1194 prefix
.osize
= (segsize
== 16) ? 32 : 16;
1195 prefix
.osp
= *data
++;
1198 fetch_or_return(origdata
, data
, data_size
, 1);
1199 prefix
.asize
= (segsize
== 32) ? 16 : 32;
1200 prefix
.asp
= *data
++;
1205 if (segsize
== 64 || (data
[1] & 0xc0) == 0xc0) {
1206 fetch_or_return(origdata
, data
, data_size
, 2);
1207 prefix
.vex
[0] = *data
++;
1208 prefix
.vex
[1] = *data
++;
1211 prefix
.vex_c
= RV_VEX
;
1213 if (prefix
.vex
[0] == 0xc4) {
1214 fetch_or_return(origdata
, data
, data_size
, 1);
1215 prefix
.vex
[2] = *data
++;
1216 prefix
.rex
|= (~prefix
.vex
[1] >> 5) & 7; /* REX_RXB */
1217 prefix
.rex
|= (prefix
.vex
[2] >> (7-3)) & REX_W
;
1218 prefix
.vex_m
= prefix
.vex
[1] & 0x1f;
1219 prefix
.vex_v
= (~prefix
.vex
[2] >> 3) & 15;
1220 prefix
.vex_lp
= prefix
.vex
[2] & 7;
1222 prefix
.rex
|= (~prefix
.vex
[1] >> (7-2)) & REX_R
;
1224 prefix
.vex_v
= (~prefix
.vex
[1] >> 3) & 15;
1225 prefix
.vex_lp
= prefix
.vex
[1] & 7;
1228 ix
= itable_vex
[RV_VEX
][prefix
.vex_m
][prefix
.vex_lp
& 3];
1235 if (segsize
== 64 || ((data
[1] & 0xc0) == 0xc0)) {
1236 fetch_or_return(origdata
, data
, data_size
, 4);
1237 data
++; /* 62h EVEX prefix */
1238 prefix
.evex
[0] = *data
++;
1239 prefix
.evex
[1] = *data
++;
1240 prefix
.evex
[2] = *data
++;
1242 prefix
.rex
= REX_EV
;
1243 prefix
.vex_c
= RV_EVEX
;
1244 prefix
.rex
|= (~prefix
.evex
[0] >> 5) & 7; /* REX_RXB */
1245 prefix
.rex
|= (prefix
.evex
[1] >> (7-3)) & REX_W
;
1246 prefix
.vex_m
= prefix
.evex
[0] & EVEX_P0MM
;
1247 prefix
.vex_v
= (~prefix
.evex
[1] & EVEX_P1VVVV
) >> 3;
1248 prefix
.vex_lp
= ((prefix
.evex
[2] & EVEX_P2LL
) >> (5-2)) |
1249 (prefix
.evex
[1] & EVEX_P1PP
);
1251 ix
= itable_vex
[prefix
.vex_c
][prefix
.vex_m
][prefix
.vex_lp
& 3];
1258 if ((data
[1] & 030) != 0 &&
1259 (segsize
== 64 || (data
[1] & 0xc0) == 0xc0)) {
1260 fetch_or_return(origdata
, data
, data_size
, 3);
1261 prefix
.vex
[0] = *data
++;
1262 prefix
.vex
[1] = *data
++;
1263 prefix
.vex
[2] = *data
++;
1266 prefix
.vex_c
= RV_XOP
;
1268 prefix
.rex
|= (~prefix
.vex
[1] >> 5) & 7; /* REX_RXB */
1269 prefix
.rex
|= (prefix
.vex
[2] >> (7-3)) & REX_W
;
1270 prefix
.vex_m
= prefix
.vex
[1] & 0x1f;
1271 prefix
.vex_v
= (~prefix
.vex
[2] >> 3) & 15;
1272 prefix
.vex_lp
= prefix
.vex
[2] & 7;
1274 ix
= itable_vex
[RV_XOP
][prefix
.vex_m
][prefix
.vex_lp
& 3];
1295 if (segsize
== 64) {
1296 fetch_or_return(origdata
, data
, data_size
, 1);
1297 prefix
.rex
= *data
++;
1298 if (prefix
.rex
& REX_W
)
1310 iflag_set_all(&best
); /* Worst possible */
1312 best_pref
= INT_MAX
;
1315 return 0; /* No instruction table at all... */
1318 fetch_or_return(origdata
, dp
, data_size
, 1);
1320 while (ix
->n
== -1) {
1321 fetch_or_return(origdata
, dp
, data_size
, 1);
1322 ix
= (const struct disasm_index
*)ix
->p
+ *dp
++;
1325 p
= (const struct itemplate
* const *)ix
->p
;
1326 for (n
= ix
->n
; n
; n
--, p
++) {
1327 if ((length
= matches(*p
, data
, &prefix
, segsize
, &tmp_ins
))) {
1330 * Final check to make sure the types of r/m match up.
1331 * XXX: Need to make sure this is actually correct.
1333 for (i
= 0; i
< (*p
)->operands
; i
++) {
1335 /* If it's a mem-only EA but we have a
1337 ((tmp_ins
.oprs
[i
].segment
& SEG_RMREG
) &&
1338 is_class(MEMORY
, (*p
)->opd
[i
])) ||
1339 /* If it's a reg-only EA but we have a memory
1341 (!(tmp_ins
.oprs
[i
].segment
& SEG_RMREG
) &&
1342 !(REG_EA
& ~(*p
)->opd
[i
]) &&
1343 !((*p
)->opd
[i
] & REG_SMASK
)) ||
1344 /* Register type mismatch (eg FS vs REG_DESS):
1346 ((((*p
)->opd
[i
] & (REGISTER
| FPUREG
)) ||
1347 (tmp_ins
.oprs
[i
].segment
& SEG_RMREG
)) &&
1348 !whichreg((*p
)->opd
[i
],
1349 tmp_ins
.oprs
[i
].basereg
, tmp_ins
.rex
))
1357 * Note: we always prefer instructions which incorporate
1358 * prefixes in the instructions themselves. This is to allow
1359 * e.g. PAUSE to be preferred to REP NOP, and deal with
1360 * MMX/SSE instructions where prefixes are used to select
1361 * between MMX and SSE register sets or outright opcode
1366 goodness
= iflag_pfmask(*p
);
1367 goodness
= iflag_xor(&goodness
, prefer
);
1369 for (i
= 0; i
< MAXPREFIX
; i
++)
1370 if (tmp_ins
.prefixes
[i
])
1372 if (nprefix
< best_pref
||
1373 (nprefix
== best_pref
&&
1374 iflag_cmp(&goodness
, &best
) < 0)) {
1375 /* This is the best one found so far */
1378 best_pref
= nprefix
;
1379 best_length
= length
;
1387 return 0; /* no instruction was matched */
1389 /* Pick the best match */
1391 length
= best_length
;
1395 /* TODO: snprintf returns the value that the string would have if
1396 * the buffer were long enough, and not the actual length of
1397 * the returned string, so each instance of using the return
1398 * value of snprintf should actually be checked to assure that
1399 * the return value is "sane." Maybe a macro wrapper could
1400 * be used for that purpose.
1402 for (i
= 0; i
< MAXPREFIX
; i
++) {
1403 const char *prefix
= prefix_name(ins
.prefixes
[i
]);
1405 slen
+= snprintf(output
+slen
, outbufsize
-slen
, "%s ", prefix
);
1409 if (i
>= FIRST_COND_OPCODE
)
1410 slen
+= snprintf(output
+ slen
, outbufsize
- slen
, "%s%s",
1411 nasm_insn_names
[i
], condition_name
[ins
.condition
]);
1413 slen
+= snprintf(output
+ slen
, outbufsize
- slen
, "%s",
1414 nasm_insn_names
[i
]);
1417 is_evex
= !!(ins
.rex
& REX_EV
);
1418 length
+= data
- origdata
; /* fix up for prefixes */
1419 for (i
= 0; i
< (*p
)->operands
; i
++) {
1420 opflags_t t
= (*p
)->opd
[i
];
1421 decoflags_t deco
= (*p
)->deco
[i
];
1422 const operand
*o
= &ins
.oprs
[i
];
1425 output
[slen
++] = (colon
? ':' : i
== 0 ? ' ' : ',');
1428 if (o
->segment
& SEG_RELATIVE
) {
1429 offs
+= offset
+ length
;
1431 * sort out wraparound
1433 if (!(o
->segment
& (SEG_32BIT
|SEG_64BIT
)))
1435 else if (segsize
!= 64)
1439 * add sync marker, if autosync is on
1450 if ((t
& (REGISTER
| FPUREG
)) ||
1451 (o
->segment
& SEG_RMREG
)) {
1453 reg
= whichreg(t
, o
->basereg
, ins
.rex
);
1455 slen
+= snprintf(output
+ slen
, outbufsize
- slen
, "to ");
1456 slen
+= snprintf(output
+ slen
, outbufsize
- slen
, "%s",
1457 nasm_reg_names
[reg
-EXPR_REG_START
]);
1458 if (t
& REGSET_MASK
)
1459 slen
+= snprintf(output
+ slen
, outbufsize
- slen
, "+%d",
1460 (int)((t
& REGSET_MASK
) >> (REGSET_SHIFT
-1))-1);
1461 if (is_evex
&& deco
)
1462 slen
+= append_evex_reg_deco(output
+ slen
, outbufsize
- slen
,
1464 } else if (!(UNITY
& ~t
)) {
1465 output
[slen
++] = '1';
1466 } else if (t
& IMMEDIATE
) {
1469 snprintf(output
+ slen
, outbufsize
- slen
, "byte ");
1470 if (o
->segment
& SEG_SIGNED
) {
1473 output
[slen
++] = '-';
1475 output
[slen
++] = '+';
1477 } else if (t
& BITS16
) {
1479 snprintf(output
+ slen
, outbufsize
- slen
, "word ");
1480 } else if (t
& BITS32
) {
1482 snprintf(output
+ slen
, outbufsize
- slen
, "dword ");
1483 } else if (t
& BITS64
) {
1485 snprintf(output
+ slen
, outbufsize
- slen
, "qword ");
1486 } else if (t
& NEAR
) {
1488 snprintf(output
+ slen
, outbufsize
- slen
, "near ");
1489 } else if (t
& SHORT
) {
1491 snprintf(output
+ slen
, outbufsize
- slen
, "short ");
1494 snprintf(output
+ slen
, outbufsize
- slen
, "0x%"PRIx64
"",
1496 } else if (!(MEM_OFFS
& ~t
)) {
1498 snprintf(output
+ slen
, outbufsize
- slen
,
1499 "[%s%s%s0x%"PRIx64
"]",
1500 (segover
? segover
: ""),
1501 (segover
? ":" : ""),
1502 (o
->disp_size
== 64 ? "qword " :
1503 o
->disp_size
== 32 ? "dword " :
1504 o
->disp_size
== 16 ? "word " : ""), offs
);
1506 } else if (is_class(REGMEM
, t
)) {
1507 int started
= false;
1510 snprintf(output
+ slen
, outbufsize
- slen
, "byte ");
1513 snprintf(output
+ slen
, outbufsize
- slen
, "word ");
1516 snprintf(output
+ slen
, outbufsize
- slen
, "dword ");
1519 snprintf(output
+ slen
, outbufsize
- slen
, "qword ");
1522 snprintf(output
+ slen
, outbufsize
- slen
, "tword ");
1523 if ((ins
.evex_p
[2] & EVEX_P2B
) && (deco
& BRDCAST_MASK
)) {
1524 /* when broadcasting, each element size should be used */
1525 if (deco
& BR_BITS32
)
1527 snprintf(output
+ slen
, outbufsize
- slen
, "dword ");
1528 else if (deco
& BR_BITS64
)
1530 snprintf(output
+ slen
, outbufsize
- slen
, "qword ");
1534 snprintf(output
+ slen
, outbufsize
- slen
, "oword ");
1537 snprintf(output
+ slen
, outbufsize
- slen
, "yword ");
1540 snprintf(output
+ slen
, outbufsize
- slen
, "zword ");
1543 slen
+= snprintf(output
+ slen
, outbufsize
- slen
, "far ");
1546 snprintf(output
+ slen
, outbufsize
- slen
, "near ");
1547 output
[slen
++] = '[';
1549 slen
+= snprintf(output
+ slen
, outbufsize
- slen
, "%s",
1550 (o
->disp_size
== 64 ? "qword " :
1551 o
->disp_size
== 32 ? "dword " :
1552 o
->disp_size
== 16 ? "word " :
1554 if (o
->eaflags
& EAF_REL
)
1555 slen
+= snprintf(output
+ slen
, outbufsize
- slen
, "rel ");
1558 snprintf(output
+ slen
, outbufsize
- slen
, "%s:",
1562 if (o
->basereg
!= -1) {
1563 slen
+= snprintf(output
+ slen
, outbufsize
- slen
, "%s",
1564 nasm_reg_names
[(o
->basereg
-EXPR_REG_START
)]);
1567 if (o
->indexreg
!= -1 && !itemp_has(*best_p
, IF_MIB
)) {
1569 output
[slen
++] = '+';
1570 slen
+= snprintf(output
+ slen
, outbufsize
- slen
, "%s",
1571 nasm_reg_names
[(o
->indexreg
-EXPR_REG_START
)]);
1574 snprintf(output
+ slen
, outbufsize
- slen
, "*%d",
1580 if (o
->segment
& SEG_DISP8
) {
1583 uint32_t offset
= offs
;
1584 if ((int32_t)offset
< 0) {
1591 snprintf(output
+ slen
, outbufsize
- slen
, "%s0x%"PRIx32
"",
1595 uint8_t offset
= offs
;
1596 if ((int8_t)offset
< 0) {
1603 snprintf(output
+ slen
, outbufsize
- slen
, "%s0x%"PRIx8
"",
1606 } else if (o
->segment
& SEG_DISP16
) {
1608 uint16_t offset
= offs
;
1609 if ((int16_t)offset
< 0 && started
) {
1613 prefix
= started
? "+" : "";
1616 snprintf(output
+ slen
, outbufsize
- slen
,
1617 "%s0x%"PRIx16
"", prefix
, offset
);
1618 } else if (o
->segment
& SEG_DISP32
) {
1619 if (prefix
.asize
== 64) {
1621 uint64_t offset
= offs
;
1622 if ((int32_t)offs
< 0 && started
) {
1626 prefix
= started
? "+" : "";
1629 snprintf(output
+ slen
, outbufsize
- slen
,
1630 "%s0x%"PRIx64
"", prefix
, offset
);
1633 uint32_t offset
= offs
;
1634 if ((int32_t) offset
< 0 && started
) {
1638 prefix
= started
? "+" : "";
1641 snprintf(output
+ slen
, outbufsize
- slen
,
1642 "%s0x%"PRIx32
"", prefix
, offset
);
1646 if (o
->indexreg
!= -1 && itemp_has(*best_p
, IF_MIB
)) {
1647 output
[slen
++] = ',';
1648 slen
+= snprintf(output
+ slen
, outbufsize
- slen
, "%s",
1649 nasm_reg_names
[(o
->indexreg
-EXPR_REG_START
)]);
1652 snprintf(output
+ slen
, outbufsize
- slen
, "*%d",
1657 output
[slen
++] = ']';
1659 if (is_evex
&& deco
)
1660 slen
+= append_evex_mem_deco(output
+ slen
, outbufsize
- slen
,
1661 t
, deco
, ins
.evex_p
);
1664 snprintf(output
+ slen
, outbufsize
- slen
, "<operand%d>",
1668 output
[slen
] = '\0';
1669 if (segover
) { /* unused segment override */
1671 int count
= slen
+ 1;
1673 p
[count
+ 3] = p
[count
];
1674 strncpy(output
, segover
, 2);
1681 * This is called when we don't have a complete instruction. If it
1682 * is a standalone *single-byte* prefix show it as such, otherwise
1683 * print it as a literal.
1685 int32_t eatbyte(uint8_t *data
, char *output
, int outbufsize
, int segsize
)
1687 uint8_t byte
= *data
;
1688 const char *str
= NULL
;
1722 str
= (segsize
== 16) ? "o32" : "o16";
1725 str
= (segsize
== 32) ? "a16" : "a32";
1743 if (segsize
== 64) {
1744 snprintf(output
, outbufsize
, "rex%s%s%s%s%s",
1745 (byte
== REX_P
) ? "" : ".",
1746 (byte
& REX_W
) ? "w" : "",
1747 (byte
& REX_R
) ? "r" : "",
1748 (byte
& REX_X
) ? "x" : "",
1749 (byte
& REX_B
) ? "b" : "");
1752 /* else fall through */
1754 snprintf(output
, outbufsize
, "db 0x%02x", byte
);
1759 snprintf(output
, outbufsize
, "%s", str
);