1 // SPDX-License-Identifier: GPL-2.0-or-later
3 * x86 instruction analysis
5 * Copyright (C) IBM Corporation, 2002, 2004, 2009
8 #include <linux/kernel.h>
10 #include <linux/string.h>
14 #include <asm/inat.h> /*__ignore_sync_check__ */
15 #include <asm/insn.h> /* __ignore_sync_check__ */
16 #include <asm/unaligned.h> /* __ignore_sync_check__ */
18 #include <linux/errno.h>
19 #include <linux/kconfig.h>
21 #include <asm/emulate_prefix.h> /* __ignore_sync_check__ */
23 #define leXX_to_cpu(t, r) \
26 switch (sizeof(t)) { \
27 case 4: v = le32_to_cpu(r); break; \
28 case 2: v = le16_to_cpu(r); break; \
29 case 1: v = r; break; \
36 /* Verify next sizeof(t) bytes can be on the same instruction */
37 #define validate_next(t, insn, n) \
38 ((insn)->next_byte + sizeof(t) + n <= (insn)->end_kaddr)
40 #define __get_next(t, insn) \
41 ({ t r = get_unaligned((t *)(insn)->next_byte); (insn)->next_byte += sizeof(t); leXX_to_cpu(t, r); })
43 #define __peek_nbyte_next(t, insn, n) \
44 ({ t r = get_unaligned((t *)(insn)->next_byte + n); leXX_to_cpu(t, r); })
46 #define get_next(t, insn) \
47 ({ if (unlikely(!validate_next(t, insn, 0))) goto err_out; __get_next(t, insn); })
49 #define peek_nbyte_next(t, insn, n) \
50 ({ if (unlikely(!validate_next(t, insn, n))) goto err_out; __peek_nbyte_next(t, insn, n); })
52 #define peek_next(t, insn) peek_nbyte_next(t, insn, 0)
55 * insn_init() - initialize struct insn
56 * @insn: &struct insn to be initialized
57 * @kaddr: address (in kernel memory) of instruction (or copy thereof)
58 * @buf_len: length of the insn buffer at @kaddr
59 * @x86_64: !0 for 64-bit kernel or 64-bit app
61 void insn_init(struct insn
*insn
, const void *kaddr
, int buf_len
, int x86_64
)
64 * Instructions longer than MAX_INSN_SIZE (15 bytes) are invalid
65 * even if the input buffer is long enough to hold them.
67 if (buf_len
> MAX_INSN_SIZE
)
68 buf_len
= MAX_INSN_SIZE
;
70 memset(insn
, 0, sizeof(*insn
));
72 insn
->end_kaddr
= kaddr
+ buf_len
;
73 insn
->next_byte
= kaddr
;
74 insn
->x86_64
= x86_64
;
82 static const insn_byte_t xen_prefix
[] = { __XEN_EMULATE_PREFIX
};
83 static const insn_byte_t kvm_prefix
[] = { __KVM_EMULATE_PREFIX
};
85 static int __insn_get_emulate_prefix(struct insn
*insn
,
86 const insn_byte_t
*prefix
, size_t len
)
90 for (i
= 0; i
< len
; i
++) {
91 if (peek_nbyte_next(insn_byte_t
, insn
, i
) != prefix
[i
])
95 insn
->emulate_prefix_size
= len
;
96 insn
->next_byte
+= len
;
104 static void insn_get_emulate_prefix(struct insn
*insn
)
106 if (__insn_get_emulate_prefix(insn
, xen_prefix
, sizeof(xen_prefix
)))
109 __insn_get_emulate_prefix(insn
, kvm_prefix
, sizeof(kvm_prefix
));
113 * insn_get_prefixes - scan x86 instruction prefix bytes
114 * @insn: &struct insn containing instruction
116 * Populates the @insn->prefixes bitmap, and updates @insn->next_byte
117 * to point to the (first) opcode. No effect if @insn->prefixes.got
124 int insn_get_prefixes(struct insn
*insn
)
126 struct insn_field
*prefixes
= &insn
->prefixes
;
134 insn_get_emulate_prefix(insn
);
138 b
= peek_next(insn_byte_t
, insn
);
139 attr
= inat_get_opcode_attribute(b
);
140 while (inat_is_legacy_prefix(attr
)) {
141 /* Skip if same prefix */
142 for (i
= 0; i
< nb
; i
++)
143 if (prefixes
->bytes
[i
] == b
)
146 /* Invalid instruction */
148 prefixes
->bytes
[nb
++] = b
;
149 if (inat_is_address_size_prefix(attr
)) {
150 /* address size switches 2/4 or 4/8 */
152 insn
->addr_bytes
^= 12;
154 insn
->addr_bytes
^= 6;
155 } else if (inat_is_operand_size_prefix(attr
)) {
156 /* oprand size switches 2/4 */
157 insn
->opnd_bytes
^= 6;
163 b
= peek_next(insn_byte_t
, insn
);
164 attr
= inat_get_opcode_attribute(b
);
166 /* Set the last prefix */
167 if (lb
&& lb
!= insn
->prefixes
.bytes
[3]) {
168 if (unlikely(insn
->prefixes
.bytes
[3])) {
169 /* Swap the last prefix */
170 b
= insn
->prefixes
.bytes
[3];
171 for (i
= 0; i
< nb
; i
++)
172 if (prefixes
->bytes
[i
] == lb
)
173 insn_set_byte(prefixes
, i
, b
);
175 insn_set_byte(&insn
->prefixes
, 3, lb
);
178 /* Decode REX prefix */
180 b
= peek_next(insn_byte_t
, insn
);
181 attr
= inat_get_opcode_attribute(b
);
182 if (inat_is_rex_prefix(attr
)) {
183 insn_field_set(&insn
->rex_prefix
, b
, 1);
186 /* REX.W overrides opnd_size */
187 insn
->opnd_bytes
= 8;
188 } else if (inat_is_rex2_prefix(attr
)) {
189 insn_set_byte(&insn
->rex_prefix
, 0, b
);
190 b
= peek_nbyte_next(insn_byte_t
, insn
, 1);
191 insn_set_byte(&insn
->rex_prefix
, 1, b
);
192 insn
->rex_prefix
.nbytes
= 2;
193 insn
->next_byte
+= 2;
195 /* REX.W overrides opnd_size */
196 insn
->opnd_bytes
= 8;
197 insn
->rex_prefix
.got
= 1;
201 insn
->rex_prefix
.got
= 1;
203 /* Decode VEX prefix */
204 b
= peek_next(insn_byte_t
, insn
);
205 attr
= inat_get_opcode_attribute(b
);
206 if (inat_is_vex_prefix(attr
)) {
207 insn_byte_t b2
= peek_nbyte_next(insn_byte_t
, insn
, 1);
210 * In 32-bits mode, if the [7:6] bits (mod bits of
211 * ModRM) on the second byte are not 11b, it is
212 * LDS or LES or BOUND.
214 if (X86_MODRM_MOD(b2
) != 3)
217 insn_set_byte(&insn
->vex_prefix
, 0, b
);
218 insn_set_byte(&insn
->vex_prefix
, 1, b2
);
219 if (inat_is_evex_prefix(attr
)) {
220 b2
= peek_nbyte_next(insn_byte_t
, insn
, 2);
221 insn_set_byte(&insn
->vex_prefix
, 2, b2
);
222 b2
= peek_nbyte_next(insn_byte_t
, insn
, 3);
223 insn_set_byte(&insn
->vex_prefix
, 3, b2
);
224 insn
->vex_prefix
.nbytes
= 4;
225 insn
->next_byte
+= 4;
226 if (insn
->x86_64
&& X86_VEX_W(b2
))
227 /* VEX.W overrides opnd_size */
228 insn
->opnd_bytes
= 8;
229 } else if (inat_is_vex3_prefix(attr
)) {
230 b2
= peek_nbyte_next(insn_byte_t
, insn
, 2);
231 insn_set_byte(&insn
->vex_prefix
, 2, b2
);
232 insn
->vex_prefix
.nbytes
= 3;
233 insn
->next_byte
+= 3;
234 if (insn
->x86_64
&& X86_VEX_W(b2
))
235 /* VEX.W overrides opnd_size */
236 insn
->opnd_bytes
= 8;
239 * For VEX2, fake VEX3-like byte#2.
240 * Makes it easier to decode vex.W, vex.vvvv,
241 * vex.L and vex.pp. Masking with 0x7f sets vex.W == 0.
243 insn_set_byte(&insn
->vex_prefix
, 2, b2
& 0x7f);
244 insn
->vex_prefix
.nbytes
= 2;
245 insn
->next_byte
+= 2;
249 insn
->vex_prefix
.got
= 1;
260 * insn_get_opcode - collect opcode(s)
261 * @insn: &struct insn containing instruction
263 * Populates @insn->opcode, updates @insn->next_byte to point past the
264 * opcode byte(s), and set @insn->attr (except for groups).
265 * If necessary, first collects any preceding (prefix) bytes.
266 * Sets @insn->opcode.value = opcode1. No effect if @insn->opcode.got
273 int insn_get_opcode(struct insn
*insn
)
275 struct insn_field
*opcode
= &insn
->opcode
;
282 ret
= insn_get_prefixes(insn
);
286 /* Get first opcode */
287 op
= get_next(insn_byte_t
, insn
);
288 insn_set_byte(opcode
, 0, op
);
291 /* Check if there is VEX prefix or not */
292 if (insn_is_avx(insn
)) {
294 m
= insn_vex_m_bits(insn
);
295 p
= insn_vex_p_bits(insn
);
296 insn
->attr
= inat_get_avx_attribute(op
, m
, p
);
297 /* SCALABLE EVEX uses p bits to encode operand size */
298 if (inat_evex_scalable(insn
->attr
) && !insn_vex_w_bit(insn
) &&
299 p
== INAT_PFX_OPNDSZ
)
300 insn
->opnd_bytes
= 2;
301 if ((inat_must_evex(insn
->attr
) && !insn_is_evex(insn
)) ||
302 (!inat_accept_vex(insn
->attr
) &&
303 !inat_is_group(insn
->attr
))) {
304 /* This instruction is bad */
308 /* VEX has only 1 byte for opcode */
312 /* Check if there is REX2 prefix or not */
313 if (insn_is_rex2(insn
)) {
314 if (insn_rex2_m_bit(insn
)) {
315 /* map 1 is escape 0x0f */
316 insn_attr_t esc_attr
= inat_get_opcode_attribute(0x0f);
318 pfx_id
= insn_last_prefix_id(insn
);
319 insn
->attr
= inat_get_escape_attribute(op
, pfx_id
, esc_attr
);
321 insn
->attr
= inat_get_opcode_attribute(op
);
326 insn
->attr
= inat_get_opcode_attribute(op
);
327 while (inat_is_escape(insn
->attr
)) {
328 /* Get escaped opcode */
329 op
= get_next(insn_byte_t
, insn
);
330 opcode
->bytes
[opcode
->nbytes
++] = op
;
331 pfx_id
= insn_last_prefix_id(insn
);
332 insn
->attr
= inat_get_escape_attribute(op
, pfx_id
, insn
->attr
);
335 if (inat_must_vex(insn
->attr
)) {
336 /* This instruction is bad */
349 * insn_get_modrm - collect ModRM byte, if any
350 * @insn: &struct insn containing instruction
352 * Populates @insn->modrm and updates @insn->next_byte to point past the
353 * ModRM byte, if any. If necessary, first collects the preceding bytes
354 * (prefixes and opcode(s)). No effect if @insn->modrm.got is already 1.
360 int insn_get_modrm(struct insn
*insn
)
362 struct insn_field
*modrm
= &insn
->modrm
;
363 insn_byte_t pfx_id
, mod
;
369 ret
= insn_get_opcode(insn
);
373 if (inat_has_modrm(insn
->attr
)) {
374 mod
= get_next(insn_byte_t
, insn
);
375 insn_field_set(modrm
, mod
, 1);
376 if (inat_is_group(insn
->attr
)) {
377 pfx_id
= insn_last_prefix_id(insn
);
378 insn
->attr
= inat_get_group_attribute(mod
, pfx_id
,
380 if (insn_is_avx(insn
) && !inat_accept_vex(insn
->attr
)) {
388 if (insn
->x86_64
&& inat_is_force64(insn
->attr
))
389 insn
->opnd_bytes
= 8;
400 * insn_rip_relative() - Does instruction use RIP-relative addressing mode?
401 * @insn: &struct insn containing instruction
403 * If necessary, first collects the instruction up to and including the
404 * ModRM byte. No effect if @insn->x86_64 is 0.
406 int insn_rip_relative(struct insn
*insn
)
408 struct insn_field
*modrm
= &insn
->modrm
;
414 ret
= insn_get_modrm(insn
);
418 * For rip-relative instructions, the mod field (top 2 bits)
419 * is zero and the r/m field (bottom 3 bits) is 0x5.
421 return (modrm
->nbytes
&& (modrm
->bytes
[0] & 0xc7) == 0x5);
425 * insn_get_sib() - Get the SIB byte of instruction
426 * @insn: &struct insn containing instruction
428 * If necessary, first collects the instruction up to and including the
432 * 0: if decoding succeeded
435 int insn_get_sib(struct insn
*insn
)
443 ret
= insn_get_modrm(insn
);
447 if (insn
->modrm
.nbytes
) {
448 modrm
= insn
->modrm
.bytes
[0];
449 if (insn
->addr_bytes
!= 2 &&
450 X86_MODRM_MOD(modrm
) != 3 && X86_MODRM_RM(modrm
) == 4) {
451 insn_field_set(&insn
->sib
,
452 get_next(insn_byte_t
, insn
), 1);
465 * insn_get_displacement() - Get the displacement of instruction
466 * @insn: &struct insn containing instruction
468 * If necessary, first collects the instruction up to and including the
470 * Displacement value is sign-expanded.
473 * 0: if decoding succeeded
476 int insn_get_displacement(struct insn
*insn
)
478 insn_byte_t mod
, rm
, base
;
481 if (insn
->displacement
.got
)
484 ret
= insn_get_sib(insn
);
488 if (insn
->modrm
.nbytes
) {
490 * Interpreting the modrm byte:
491 * mod = 00 - no displacement fields (exceptions below)
492 * mod = 01 - 1-byte displacement field
493 * mod = 10 - displacement field is 4 bytes, or 2 bytes if
494 * address size = 2 (0x67 prefix in 32-bit mode)
495 * mod = 11 - no memory operand
497 * If address size = 2...
498 * mod = 00, r/m = 110 - displacement field is 2 bytes
500 * If address size != 2...
501 * mod != 11, r/m = 100 - SIB byte exists
502 * mod = 00, SIB base = 101 - displacement field is 4 bytes
503 * mod = 00, r/m = 101 - rip-relative addressing, displacement
506 mod
= X86_MODRM_MOD(insn
->modrm
.value
);
507 rm
= X86_MODRM_RM(insn
->modrm
.value
);
508 base
= X86_SIB_BASE(insn
->sib
.value
);
512 insn_field_set(&insn
->displacement
,
513 get_next(signed char, insn
), 1);
514 } else if (insn
->addr_bytes
== 2) {
515 if ((mod
== 0 && rm
== 6) || mod
== 2) {
516 insn_field_set(&insn
->displacement
,
517 get_next(short, insn
), 2);
520 if ((mod
== 0 && rm
== 5) || mod
== 2 ||
521 (mod
== 0 && base
== 5)) {
522 insn_field_set(&insn
->displacement
,
523 get_next(int, insn
), 4);
528 insn
->displacement
.got
= 1;
535 /* Decode moffset16/32/64. Return 0 if failed */
536 static int __get_moffset(struct insn
*insn
)
538 switch (insn
->addr_bytes
) {
540 insn_field_set(&insn
->moffset1
, get_next(short, insn
), 2);
543 insn_field_set(&insn
->moffset1
, get_next(int, insn
), 4);
546 insn_field_set(&insn
->moffset1
, get_next(int, insn
), 4);
547 insn_field_set(&insn
->moffset2
, get_next(int, insn
), 4);
549 default: /* opnd_bytes must be modified manually */
552 insn
->moffset1
.got
= insn
->moffset2
.got
= 1;
560 /* Decode imm v32(Iz). Return 0 if failed */
561 static int __get_immv32(struct insn
*insn
)
563 switch (insn
->opnd_bytes
) {
565 insn_field_set(&insn
->immediate
, get_next(short, insn
), 2);
569 insn_field_set(&insn
->immediate
, get_next(int, insn
), 4);
571 default: /* opnd_bytes must be modified manually */
581 /* Decode imm v64(Iv/Ov), Return 0 if failed */
582 static int __get_immv(struct insn
*insn
)
584 switch (insn
->opnd_bytes
) {
586 insn_field_set(&insn
->immediate1
, get_next(short, insn
), 2);
589 insn_field_set(&insn
->immediate1
, get_next(int, insn
), 4);
590 insn
->immediate1
.nbytes
= 4;
593 insn_field_set(&insn
->immediate1
, get_next(int, insn
), 4);
594 insn_field_set(&insn
->immediate2
, get_next(int, insn
), 4);
596 default: /* opnd_bytes must be modified manually */
599 insn
->immediate1
.got
= insn
->immediate2
.got
= 1;
606 /* Decode ptr16:16/32(Ap) */
607 static int __get_immptr(struct insn
*insn
)
609 switch (insn
->opnd_bytes
) {
611 insn_field_set(&insn
->immediate1
, get_next(short, insn
), 2);
614 insn_field_set(&insn
->immediate1
, get_next(int, insn
), 4);
617 /* ptr16:64 is not exist (no segment) */
619 default: /* opnd_bytes must be modified manually */
622 insn_field_set(&insn
->immediate2
, get_next(unsigned short, insn
), 2);
623 insn
->immediate1
.got
= insn
->immediate2
.got
= 1;
631 * insn_get_immediate() - Get the immediate in an instruction
632 * @insn: &struct insn containing instruction
634 * If necessary, first collects the instruction up to and including the
635 * displacement bytes.
636 * Basically, most of immediates are sign-expanded. Unsigned-value can be
637 * computed by bit masking with ((1 << (nbytes * 8)) - 1)
643 int insn_get_immediate(struct insn
*insn
)
647 if (insn
->immediate
.got
)
650 ret
= insn_get_displacement(insn
);
654 if (inat_has_moffset(insn
->attr
)) {
655 if (!__get_moffset(insn
))
660 if (!inat_has_immediate(insn
->attr
))
664 switch (inat_immediate_size(insn
->attr
)) {
666 insn_field_set(&insn
->immediate
, get_next(signed char, insn
), 1);
669 insn_field_set(&insn
->immediate
, get_next(short, insn
), 2);
672 insn_field_set(&insn
->immediate
, get_next(int, insn
), 4);
675 insn_field_set(&insn
->immediate1
, get_next(int, insn
), 4);
676 insn_field_set(&insn
->immediate2
, get_next(int, insn
), 4);
679 if (!__get_immptr(insn
))
682 case INAT_IMM_VWORD32
:
683 if (!__get_immv32(insn
))
687 if (!__get_immv(insn
))
691 /* Here, insn must have an immediate, but failed */
694 if (inat_has_second_immediate(insn
->attr
)) {
695 insn_field_set(&insn
->immediate2
, get_next(signed char, insn
), 1);
698 insn
->immediate
.got
= 1;
706 * insn_get_length() - Get the length of instruction
707 * @insn: &struct insn containing instruction
709 * If necessary, first collects the instruction up to and including the
716 int insn_get_length(struct insn
*insn
)
723 ret
= insn_get_immediate(insn
);
727 insn
->length
= (unsigned char)((unsigned long)insn
->next_byte
728 - (unsigned long)insn
->kaddr
);
733 /* Ensure this instruction is decoded completely */
734 static inline int insn_complete(struct insn
*insn
)
736 return insn
->opcode
.got
&& insn
->modrm
.got
&& insn
->sib
.got
&&
737 insn
->displacement
.got
&& insn
->immediate
.got
;
741 * insn_decode() - Decode an x86 instruction
742 * @insn: &struct insn to be initialized
743 * @kaddr: address (in kernel memory) of instruction (or copy thereof)
744 * @buf_len: length of the insn buffer at @kaddr
745 * @m: insn mode, see enum insn_mode
748 * 0: if decoding succeeded
751 int insn_decode(struct insn
*insn
, const void *kaddr
, int buf_len
, enum insn_mode m
)
755 /* #define INSN_MODE_KERN -1 __ignore_sync_check__ mode is only valid in the kernel */
757 if (m
== INSN_MODE_KERN
)
758 insn_init(insn
, kaddr
, buf_len
, IS_ENABLED(CONFIG_X86_64
));
760 insn_init(insn
, kaddr
, buf_len
, m
== INSN_MODE_64
);
762 ret
= insn_get_length(insn
);
766 if (insn_complete(insn
))