1 /* SPDX-License-Identifier: GPL-2.0-or-later */
2 #ifndef _ASM_X86_INSN_H
3 #define _ASM_X86_INSN_H
5 * x86 instruction analysis
7 * Copyright (C) IBM Corporation, 2009
10 /* insn_attr_t is defined in inat.h */
18 /* !0 if we've run insn_get_xxx() for this field */
24 struct insn_field prefixes
; /*
26 * prefixes.bytes[3]: last prefix
28 struct insn_field rex_prefix
; /* REX prefix */
29 struct insn_field vex_prefix
; /* VEX prefix */
30 struct insn_field opcode
; /*
31 * opcode.bytes[0]: opcode1
32 * opcode.bytes[1]: opcode2
33 * opcode.bytes[2]: opcode3
35 struct insn_field modrm
;
36 struct insn_field sib
;
37 struct insn_field displacement
;
39 struct insn_field immediate
;
40 struct insn_field moffset1
; /* for 64bit MOV */
41 struct insn_field immediate1
; /* for 64bit imm or off16/32 */
44 struct insn_field moffset2
; /* for 64bit MOV */
45 struct insn_field immediate2
; /* for 64bit imm or seg16 */
48 int emulate_prefix_size
;
50 unsigned char opnd_bytes
;
51 unsigned char addr_bytes
;
55 const insn_byte_t
*kaddr
; /* kernel address of insn to analyze */
56 const insn_byte_t
*end_kaddr
; /* kernel address of last insn in buffer */
57 const insn_byte_t
*next_byte
;
60 #define MAX_INSN_SIZE 15
62 #define X86_MODRM_MOD(modrm) (((modrm) & 0xc0) >> 6)
63 #define X86_MODRM_REG(modrm) (((modrm) & 0x38) >> 3)
64 #define X86_MODRM_RM(modrm) ((modrm) & 0x07)
66 #define X86_SIB_SCALE(sib) (((sib) & 0xc0) >> 6)
67 #define X86_SIB_INDEX(sib) (((sib) & 0x38) >> 3)
68 #define X86_SIB_BASE(sib) ((sib) & 0x07)
70 #define X86_REX_W(rex) ((rex) & 8)
71 #define X86_REX_R(rex) ((rex) & 4)
72 #define X86_REX_X(rex) ((rex) & 2)
73 #define X86_REX_B(rex) ((rex) & 1)
76 #define X86_VEX_W(vex) ((vex) & 0x80) /* VEX3 Byte2 */
77 #define X86_VEX_R(vex) ((vex) & 0x80) /* VEX2/3 Byte1 */
78 #define X86_VEX_X(vex) ((vex) & 0x40) /* VEX3 Byte1 */
79 #define X86_VEX_B(vex) ((vex) & 0x20) /* VEX3 Byte1 */
80 #define X86_VEX_L(vex) ((vex) & 0x04) /* VEX3 Byte2, VEX2 Byte1 */
82 #define X86_EVEX_M(vex) ((vex) & 0x03) /* EVEX Byte1 */
83 #define X86_VEX3_M(vex) ((vex) & 0x1f) /* VEX3 Byte1 */
84 #define X86_VEX2_M 1 /* VEX2.M always 1 */
85 #define X86_VEX_V(vex) (((vex) & 0x78) >> 3) /* VEX3 Byte2, VEX2 Byte1 */
86 #define X86_VEX_P(vex) ((vex) & 0x03) /* VEX3 Byte2, VEX2 Byte1 */
87 #define X86_VEX_M_MAX 0x1f /* VEX3.M Maximum value */
89 extern void insn_init(struct insn
*insn
, const void *kaddr
, int buf_len
, int x86_64
);
90 extern void insn_get_prefixes(struct insn
*insn
);
91 extern void insn_get_opcode(struct insn
*insn
);
92 extern void insn_get_modrm(struct insn
*insn
);
93 extern void insn_get_sib(struct insn
*insn
);
94 extern void insn_get_displacement(struct insn
*insn
);
95 extern void insn_get_immediate(struct insn
*insn
);
96 extern void insn_get_length(struct insn
*insn
);
98 /* Attribute will be determined after getting ModRM (for opcode groups) */
99 static inline void insn_get_attribute(struct insn
*insn
)
101 insn_get_modrm(insn
);
104 /* Instruction uses RIP-relative addressing */
105 extern int insn_rip_relative(struct insn
*insn
);
107 /* Init insn for kernel text */
108 static inline void kernel_insn_init(struct insn
*insn
,
109 const void *kaddr
, int buf_len
)
112 insn_init(insn
, kaddr
, buf_len
, 1);
113 #else /* CONFIG_X86_32 */
114 insn_init(insn
, kaddr
, buf_len
, 0);
118 static inline int insn_is_avx(struct insn
*insn
)
120 if (!insn
->prefixes
.got
)
121 insn_get_prefixes(insn
);
122 return (insn
->vex_prefix
.value
!= 0);
125 static inline int insn_is_evex(struct insn
*insn
)
127 if (!insn
->prefixes
.got
)
128 insn_get_prefixes(insn
);
129 return (insn
->vex_prefix
.nbytes
== 4);
132 static inline int insn_has_emulate_prefix(struct insn
*insn
)
134 return !!insn
->emulate_prefix_size
;
137 /* Ensure this instruction is decoded completely */
138 static inline int insn_complete(struct insn
*insn
)
140 return insn
->opcode
.got
&& insn
->modrm
.got
&& insn
->sib
.got
&&
141 insn
->displacement
.got
&& insn
->immediate
.got
;
144 static inline insn_byte_t
insn_vex_m_bits(struct insn
*insn
)
146 if (insn
->vex_prefix
.nbytes
== 2) /* 2 bytes VEX */
148 else if (insn
->vex_prefix
.nbytes
== 3) /* 3 bytes VEX */
149 return X86_VEX3_M(insn
->vex_prefix
.bytes
[1]);
151 return X86_EVEX_M(insn
->vex_prefix
.bytes
[1]);
154 static inline insn_byte_t
insn_vex_p_bits(struct insn
*insn
)
156 if (insn
->vex_prefix
.nbytes
== 2) /* 2 bytes VEX */
157 return X86_VEX_P(insn
->vex_prefix
.bytes
[1]);
159 return X86_VEX_P(insn
->vex_prefix
.bytes
[2]);
162 /* Get the last prefix id from last prefix or VEX prefix */
163 static inline int insn_last_prefix_id(struct insn
*insn
)
165 if (insn_is_avx(insn
))
166 return insn_vex_p_bits(insn
); /* VEX_p is a SIMD prefix id */
168 if (insn
->prefixes
.bytes
[3])
169 return inat_get_last_prefix_id(insn
->prefixes
.bytes
[3]);
174 /* Offset of each field from kaddr */
175 static inline int insn_offset_rex_prefix(struct insn
*insn
)
177 return insn
->prefixes
.nbytes
;
179 static inline int insn_offset_vex_prefix(struct insn
*insn
)
181 return insn_offset_rex_prefix(insn
) + insn
->rex_prefix
.nbytes
;
183 static inline int insn_offset_opcode(struct insn
*insn
)
185 return insn_offset_vex_prefix(insn
) + insn
->vex_prefix
.nbytes
;
187 static inline int insn_offset_modrm(struct insn
*insn
)
189 return insn_offset_opcode(insn
) + insn
->opcode
.nbytes
;
191 static inline int insn_offset_sib(struct insn
*insn
)
193 return insn_offset_modrm(insn
) + insn
->modrm
.nbytes
;
195 static inline int insn_offset_displacement(struct insn
*insn
)
197 return insn_offset_sib(insn
) + insn
->sib
.nbytes
;
199 static inline int insn_offset_immediate(struct insn
*insn
)
201 return insn_offset_displacement(insn
) + insn
->displacement
.nbytes
;
205 * for_each_insn_prefix() -- Iterate prefixes in the instruction
206 * @insn: Pointer to struct insn.
207 * @idx: Index storage.
208 * @prefix: Prefix byte.
210 * Iterate prefix bytes of given @insn. Each prefix byte is stored in @prefix
211 * and the index is stored in @idx (note that this @idx is just for a cursor,
213 * Since prefixes.nbytes can be bigger than 4 if some prefixes
214 * are repeated, it cannot be used for looping over the prefixes.
216 #define for_each_insn_prefix(insn, idx, prefix) \
217 for (idx = 0; idx < ARRAY_SIZE(insn->prefixes.bytes) && (prefix = insn->prefixes.bytes[idx]) != 0; idx++)
219 #define POP_SS_OPCODE 0x1f
220 #define MOV_SREG_OPCODE 0x8e
223 * Intel SDM Vol.3A 6.8.3 states;
224 * "Any single-step trap that would be delivered following the MOV to SS
225 * instruction or POP to SS instruction (because EFLAGS.TF is 1) is
227 * This function returns true if @insn is MOV SS or POP SS. On these
228 * instructions, single stepping is suppressed.
230 static inline int insn_masking_exception(struct insn
*insn
)
232 return insn
->opcode
.bytes
[0] == POP_SS_OPCODE
||
233 (insn
->opcode
.bytes
[0] == MOV_SREG_OPCODE
&&
234 X86_MODRM_REG(insn
->modrm
.bytes
[0]) == 2);
237 #endif /* _ASM_X86_INSN_H */