1 /******************************************************************************
4 * Generic x86 (32-bit and 64-bit) instruction decoder and emulator.
6 * Copyright (c) 2005 Keir Fraser
8 * Linux coding style, mod r/m decoder, segment base fixes, real-mode
9 * privileged instructions:
11 * Copyright (C) 2006 Qumranet
13 * Avi Kivity <avi@qumranet.com>
14 * Yaniv Kamay <yaniv@qumranet.com>
16 * This work is licensed under the terms of the GNU GPL, version 2. See
17 * the COPYING file in the top-level directory.
19 * From: xen-unstable 10676:af9809f51f81a3c43f276f00c81a52ef558afda4
25 #include <public/xen.h>
26 #define DPRINTF(_f, _a ...) printf(_f , ## _a)
28 #include <linux/kvm_host.h>
29 #include "kvm_cache_regs.h"
30 #define DPRINTF(x...) do {} while (0)
32 #include <linux/module.h>
33 #include <asm/kvm_emulate.h>
35 #include "mmu.h" /* for is_long_mode() */
38 * Opcode effective-address decode tables.
39 * Note that we only emulate instructions that have at least one memory
40 * operand (excluding implicit stack references). We assume that stack
41 * references and instruction fetches will never occur in special memory
42 * areas that require emulation. So, for example, 'mov <imm>,<reg>' need
46 /* Operand sizes: 8-bit operands or specified/overridden size. */
47 #define ByteOp (1<<0) /* 8-bit operands. */
48 /* Destination operand type. */
49 #define ImplicitOps (1<<1) /* Implicit in opcode. No generic decode. */
50 #define DstReg (2<<1) /* Register operand. */
51 #define DstMem (3<<1) /* Memory operand. */
52 #define DstAcc (4<<1) /* Destination Accumulator */
53 #define DstMask (7<<1)
54 /* Source operand type. */
55 #define SrcNone (0<<4) /* No source operand. */
56 #define SrcImplicit (0<<4) /* Source operand is implicit in the opcode. */
57 #define SrcReg (1<<4) /* Register operand. */
58 #define SrcMem (2<<4) /* Memory operand. */
59 #define SrcMem16 (3<<4) /* Memory operand (16-bit). */
60 #define SrcMem32 (4<<4) /* Memory operand (32-bit). */
61 #define SrcImm (5<<4) /* Immediate operand. */
62 #define SrcImmByte (6<<4) /* 8-bit sign-extended immediate operand. */
63 #define SrcOne (7<<4) /* Implied '1' */
64 #define SrcImmUByte (8<<4) /* 8-bit unsigned immediate operand. */
65 #define SrcImmU (9<<4) /* Immediate operand, unsigned */
66 #define SrcMask (0xf<<4)
67 /* Generic ModRM decode. */
69 /* Destination is only written; never read. */
72 #define MemAbs (1<<11) /* Memory operand is absolute displacement */
73 #define String (1<<12) /* String instruction (rep capable) */
74 #define Stack (1<<13) /* Stack instruction (push/pop) */
75 #define Group (1<<14) /* Bits 3:5 of modrm byte extend opcode */
76 #define GroupDual (1<<15) /* Alternate decoding of mod == 3 */
77 #define GroupMask 0xff /* Group number stored in bits 0:7 */
78 /* Source 2 operand type */
79 #define Src2None (0<<29)
80 #define Src2CL (1<<29)
81 #define Src2ImmByte (2<<29)
82 #define Src2One (3<<29)
83 #define Src2Imm16 (4<<29)
84 #define Src2Mask (7<<29)
87 Group1_80
, Group1_81
, Group1_82
, Group1_83
,
88 Group1A
, Group3_Byte
, Group3
, Group4
, Group5
, Group7
,
91 static u32 opcode_table
[256] = {
93 ByteOp
| DstMem
| SrcReg
| ModRM
, DstMem
| SrcReg
| ModRM
,
94 ByteOp
| DstReg
| SrcMem
| ModRM
, DstReg
| SrcMem
| ModRM
,
95 ByteOp
| DstAcc
| SrcImm
, DstAcc
| SrcImm
, 0, 0,
97 ByteOp
| DstMem
| SrcReg
| ModRM
, DstMem
| SrcReg
| ModRM
,
98 ByteOp
| DstReg
| SrcMem
| ModRM
, DstReg
| SrcMem
| ModRM
,
101 ByteOp
| DstMem
| SrcReg
| ModRM
, DstMem
| SrcReg
| ModRM
,
102 ByteOp
| DstReg
| SrcMem
| ModRM
, DstReg
| SrcMem
| ModRM
,
103 ByteOp
| DstAcc
| SrcImm
, DstAcc
| SrcImm
, 0, 0,
105 ByteOp
| DstMem
| SrcReg
| ModRM
, DstMem
| SrcReg
| ModRM
,
106 ByteOp
| DstReg
| SrcMem
| ModRM
, DstReg
| SrcMem
| ModRM
,
107 ByteOp
| DstAcc
| SrcImm
, DstAcc
| SrcImm
, 0, 0,
109 ByteOp
| DstMem
| SrcReg
| ModRM
, DstMem
| SrcReg
| ModRM
,
110 ByteOp
| DstReg
| SrcMem
| ModRM
, DstReg
| SrcMem
| ModRM
,
111 DstAcc
| SrcImmByte
, DstAcc
| SrcImm
, 0, 0,
113 ByteOp
| DstMem
| SrcReg
| ModRM
, DstMem
| SrcReg
| ModRM
,
114 ByteOp
| DstReg
| SrcMem
| ModRM
, DstReg
| SrcMem
| ModRM
,
117 ByteOp
| DstMem
| SrcReg
| ModRM
, DstMem
| SrcReg
| ModRM
,
118 ByteOp
| DstReg
| SrcMem
| ModRM
, DstReg
| SrcMem
| ModRM
,
121 ByteOp
| DstMem
| SrcReg
| ModRM
, DstMem
| SrcReg
| ModRM
,
122 ByteOp
| DstReg
| SrcMem
| ModRM
, DstReg
| SrcMem
| ModRM
,
123 ByteOp
| DstAcc
| SrcImm
, DstAcc
| SrcImm
,
126 DstReg
, DstReg
, DstReg
, DstReg
, DstReg
, DstReg
, DstReg
, DstReg
,
128 DstReg
, DstReg
, DstReg
, DstReg
, DstReg
, DstReg
, DstReg
, DstReg
,
130 SrcReg
| Stack
, SrcReg
| Stack
, SrcReg
| Stack
, SrcReg
| Stack
,
131 SrcReg
| Stack
, SrcReg
| Stack
, SrcReg
| Stack
, SrcReg
| Stack
,
133 DstReg
| Stack
, DstReg
| Stack
, DstReg
| Stack
, DstReg
| Stack
,
134 DstReg
| Stack
, DstReg
| Stack
, DstReg
| Stack
, DstReg
| Stack
,
136 0, 0, 0, DstReg
| SrcMem32
| ModRM
| Mov
/* movsxd (x86/64) */ ,
139 SrcImm
| Mov
| Stack
, 0, SrcImmByte
| Mov
| Stack
, 0,
140 SrcNone
| ByteOp
| ImplicitOps
, SrcNone
| ImplicitOps
, /* insb, insw/insd */
141 SrcNone
| ByteOp
| ImplicitOps
, SrcNone
| ImplicitOps
, /* outsb, outsw/outsd */
143 SrcImmByte
, SrcImmByte
, SrcImmByte
, SrcImmByte
,
144 SrcImmByte
, SrcImmByte
, SrcImmByte
, SrcImmByte
,
146 SrcImmByte
, SrcImmByte
, SrcImmByte
, SrcImmByte
,
147 SrcImmByte
, SrcImmByte
, SrcImmByte
, SrcImmByte
,
149 Group
| Group1_80
, Group
| Group1_81
,
150 Group
| Group1_82
, Group
| Group1_83
,
151 ByteOp
| DstMem
| SrcReg
| ModRM
, DstMem
| SrcReg
| ModRM
,
152 ByteOp
| DstMem
| SrcReg
| ModRM
, DstMem
| SrcReg
| ModRM
,
154 ByteOp
| DstMem
| SrcReg
| ModRM
| Mov
, DstMem
| SrcReg
| ModRM
| Mov
,
155 ByteOp
| DstReg
| SrcMem
| ModRM
| Mov
, DstReg
| SrcMem
| ModRM
| Mov
,
156 DstMem
| SrcReg
| ModRM
| Mov
, ModRM
| DstReg
,
157 DstReg
| SrcMem
| ModRM
| Mov
, Group
| Group1A
,
159 DstReg
, DstReg
, DstReg
, DstReg
, DstReg
, DstReg
, DstReg
, DstReg
,
161 0, 0, SrcImm
| Src2Imm16
, 0,
162 ImplicitOps
| Stack
, ImplicitOps
| Stack
, 0, 0,
164 ByteOp
| DstReg
| SrcMem
| Mov
| MemAbs
, DstReg
| SrcMem
| Mov
| MemAbs
,
165 ByteOp
| DstMem
| SrcReg
| Mov
| MemAbs
, DstMem
| SrcReg
| Mov
| MemAbs
,
166 ByteOp
| ImplicitOps
| Mov
| String
, ImplicitOps
| Mov
| String
,
167 ByteOp
| ImplicitOps
| String
, ImplicitOps
| String
,
169 0, 0, ByteOp
| ImplicitOps
| Mov
| String
, ImplicitOps
| Mov
| String
,
170 ByteOp
| ImplicitOps
| Mov
| String
, ImplicitOps
| Mov
| String
,
171 ByteOp
| ImplicitOps
| String
, ImplicitOps
| String
,
173 ByteOp
| DstReg
| SrcImm
| Mov
, ByteOp
| DstReg
| SrcImm
| Mov
,
174 ByteOp
| DstReg
| SrcImm
| Mov
, ByteOp
| DstReg
| SrcImm
| Mov
,
175 ByteOp
| DstReg
| SrcImm
| Mov
, ByteOp
| DstReg
| SrcImm
| Mov
,
176 ByteOp
| DstReg
| SrcImm
| Mov
, ByteOp
| DstReg
| SrcImm
| Mov
,
178 DstReg
| SrcImm
| Mov
, DstReg
| SrcImm
| Mov
,
179 DstReg
| SrcImm
| Mov
, DstReg
| SrcImm
| Mov
,
180 DstReg
| SrcImm
| Mov
, DstReg
| SrcImm
| Mov
,
181 DstReg
| SrcImm
| Mov
, DstReg
| SrcImm
| Mov
,
183 ByteOp
| DstMem
| SrcImm
| ModRM
, DstMem
| SrcImmByte
| ModRM
,
184 0, ImplicitOps
| Stack
, 0, 0,
185 ByteOp
| DstMem
| SrcImm
| ModRM
| Mov
, DstMem
| SrcImm
| ModRM
| Mov
,
187 0, 0, 0, ImplicitOps
| Stack
,
188 ImplicitOps
, SrcImmByte
, ImplicitOps
, ImplicitOps
,
190 ByteOp
| DstMem
| SrcImplicit
| ModRM
, DstMem
| SrcImplicit
| ModRM
,
191 ByteOp
| DstMem
| SrcImplicit
| ModRM
, DstMem
| SrcImplicit
| ModRM
,
194 0, 0, 0, 0, 0, 0, 0, 0,
197 ByteOp
| SrcImmUByte
, SrcImmUByte
,
198 ByteOp
| SrcImmUByte
, SrcImmUByte
,
200 SrcImm
| Stack
, SrcImm
| ImplicitOps
,
201 SrcImmU
| Src2Imm16
, SrcImmByte
| ImplicitOps
,
202 SrcNone
| ByteOp
| ImplicitOps
, SrcNone
| ImplicitOps
,
203 SrcNone
| ByteOp
| ImplicitOps
, SrcNone
| ImplicitOps
,
206 ImplicitOps
, ImplicitOps
, Group
| Group3_Byte
, Group
| Group3
,
208 ImplicitOps
, 0, ImplicitOps
, ImplicitOps
,
209 ImplicitOps
, ImplicitOps
, Group
| Group4
, Group
| Group5
,
212 static u32 twobyte_table
[256] = {
214 0, Group
| GroupDual
| Group7
, 0, 0, 0, ImplicitOps
, ImplicitOps
, 0,
215 ImplicitOps
, ImplicitOps
, 0, 0, 0, ImplicitOps
| ModRM
, 0, 0,
217 0, 0, 0, 0, 0, 0, 0, 0, ImplicitOps
| ModRM
, 0, 0, 0, 0, 0, 0, 0,
219 ModRM
| ImplicitOps
, ModRM
, ModRM
| ImplicitOps
, ModRM
, 0, 0, 0, 0,
220 0, 0, 0, 0, 0, 0, 0, 0,
222 ImplicitOps
, 0, ImplicitOps
, 0,
223 ImplicitOps
, ImplicitOps
, 0, 0,
224 0, 0, 0, 0, 0, 0, 0, 0,
226 DstReg
| SrcMem
| ModRM
| Mov
, DstReg
| SrcMem
| ModRM
| Mov
,
227 DstReg
| SrcMem
| ModRM
| Mov
, DstReg
| SrcMem
| ModRM
| Mov
,
228 DstReg
| SrcMem
| ModRM
| Mov
, DstReg
| SrcMem
| ModRM
| Mov
,
229 DstReg
| SrcMem
| ModRM
| Mov
, DstReg
| SrcMem
| ModRM
| Mov
,
231 DstReg
| SrcMem
| ModRM
| Mov
, DstReg
| SrcMem
| ModRM
| Mov
,
232 DstReg
| SrcMem
| ModRM
| Mov
, DstReg
| SrcMem
| ModRM
| Mov
,
233 DstReg
| SrcMem
| ModRM
| Mov
, DstReg
| SrcMem
| ModRM
| Mov
,
234 DstReg
| SrcMem
| ModRM
| Mov
, DstReg
| SrcMem
| ModRM
| Mov
,
236 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
238 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
240 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
242 SrcImm
, SrcImm
, SrcImm
, SrcImm
, SrcImm
, SrcImm
, SrcImm
, SrcImm
,
243 SrcImm
, SrcImm
, SrcImm
, SrcImm
, SrcImm
, SrcImm
, SrcImm
, SrcImm
,
245 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
247 0, 0, 0, DstMem
| SrcReg
| ModRM
| BitOp
,
248 DstMem
| SrcReg
| Src2ImmByte
| ModRM
,
249 DstMem
| SrcReg
| Src2CL
| ModRM
, 0, 0,
251 0, 0, 0, DstMem
| SrcReg
| ModRM
| BitOp
,
252 DstMem
| SrcReg
| Src2ImmByte
| ModRM
,
253 DstMem
| SrcReg
| Src2CL
| ModRM
,
256 ByteOp
| DstMem
| SrcReg
| ModRM
, DstMem
| SrcReg
| ModRM
, 0,
257 DstMem
| SrcReg
| ModRM
| BitOp
,
258 0, 0, ByteOp
| DstReg
| SrcMem
| ModRM
| Mov
,
259 DstReg
| SrcMem16
| ModRM
| Mov
,
261 0, 0, DstMem
| SrcImmByte
| ModRM
, DstMem
| SrcReg
| ModRM
| BitOp
,
262 0, 0, ByteOp
| DstReg
| SrcMem
| ModRM
| Mov
,
263 DstReg
| SrcMem16
| ModRM
| Mov
,
265 0, 0, 0, DstMem
| SrcReg
| ModRM
| Mov
, 0, 0, 0, ImplicitOps
| ModRM
,
266 0, 0, 0, 0, 0, 0, 0, 0,
268 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
270 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
272 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
275 static u32 group_table
[] = {
277 ByteOp
| DstMem
| SrcImm
| ModRM
, ByteOp
| DstMem
| SrcImm
| ModRM
,
278 ByteOp
| DstMem
| SrcImm
| ModRM
, ByteOp
| DstMem
| SrcImm
| ModRM
,
279 ByteOp
| DstMem
| SrcImm
| ModRM
, ByteOp
| DstMem
| SrcImm
| ModRM
,
280 ByteOp
| DstMem
| SrcImm
| ModRM
, ByteOp
| DstMem
| SrcImm
| ModRM
,
282 DstMem
| SrcImm
| ModRM
, DstMem
| SrcImm
| ModRM
,
283 DstMem
| SrcImm
| ModRM
, DstMem
| SrcImm
| ModRM
,
284 DstMem
| SrcImm
| ModRM
, DstMem
| SrcImm
| ModRM
,
285 DstMem
| SrcImm
| ModRM
, DstMem
| SrcImm
| ModRM
,
287 ByteOp
| DstMem
| SrcImm
| ModRM
, ByteOp
| DstMem
| SrcImm
| ModRM
,
288 ByteOp
| DstMem
| SrcImm
| ModRM
, ByteOp
| DstMem
| SrcImm
| ModRM
,
289 ByteOp
| DstMem
| SrcImm
| ModRM
, ByteOp
| DstMem
| SrcImm
| ModRM
,
290 ByteOp
| DstMem
| SrcImm
| ModRM
, ByteOp
| DstMem
| SrcImm
| ModRM
,
292 DstMem
| SrcImmByte
| ModRM
, DstMem
| SrcImmByte
| ModRM
,
293 DstMem
| SrcImmByte
| ModRM
, DstMem
| SrcImmByte
| ModRM
,
294 DstMem
| SrcImmByte
| ModRM
, DstMem
| SrcImmByte
| ModRM
,
295 DstMem
| SrcImmByte
| ModRM
, DstMem
| SrcImmByte
| ModRM
,
297 DstMem
| SrcNone
| ModRM
| Mov
| Stack
, 0, 0, 0, 0, 0, 0, 0,
299 ByteOp
| SrcImm
| DstMem
| ModRM
, 0,
300 ByteOp
| DstMem
| SrcNone
| ModRM
, ByteOp
| DstMem
| SrcNone
| ModRM
,
303 DstMem
| SrcImm
| ModRM
, 0,
304 DstMem
| SrcNone
| ModRM
, DstMem
| SrcNone
| ModRM
,
307 ByteOp
| DstMem
| SrcNone
| ModRM
, ByteOp
| DstMem
| SrcNone
| ModRM
,
310 DstMem
| SrcNone
| ModRM
, DstMem
| SrcNone
| ModRM
,
311 SrcMem
| ModRM
| Stack
, 0,
312 SrcMem
| ModRM
| Stack
, 0, SrcMem
| ModRM
| Stack
, 0,
314 0, 0, ModRM
| SrcMem
, ModRM
| SrcMem
,
315 SrcNone
| ModRM
| DstMem
| Mov
, 0,
316 SrcMem16
| ModRM
| Mov
, SrcMem
| ModRM
| ByteOp
,
319 static u32 group2_table
[] = {
321 SrcNone
| ModRM
, 0, 0, SrcNone
| ModRM
,
322 SrcNone
| ModRM
| DstMem
| Mov
, 0,
323 SrcMem16
| ModRM
| Mov
, 0,
326 /* EFLAGS bit definitions. */
327 #define EFLG_VM (1<<17)
328 #define EFLG_RF (1<<16)
329 #define EFLG_OF (1<<11)
330 #define EFLG_DF (1<<10)
331 #define EFLG_IF (1<<9)
332 #define EFLG_SF (1<<7)
333 #define EFLG_ZF (1<<6)
334 #define EFLG_AF (1<<4)
335 #define EFLG_PF (1<<2)
336 #define EFLG_CF (1<<0)
339 * Instruction emulation:
340 * Most instructions are emulated directly via a fragment of inline assembly
341 * code. This allows us to save/restore EFLAGS and thus very easily pick up
342 * any modified flags.
345 #if defined(CONFIG_X86_64)
346 #define _LO32 "k" /* force 32-bit operand */
347 #define _STK "%%rsp" /* stack pointer */
348 #elif defined(__i386__)
349 #define _LO32 "" /* force 32-bit operand */
350 #define _STK "%%esp" /* stack pointer */
354 * These EFLAGS bits are restored from saved value during emulation, and
355 * any changes are written back to the saved value after emulation.
357 #define EFLAGS_MASK (EFLG_OF|EFLG_SF|EFLG_ZF|EFLG_AF|EFLG_PF|EFLG_CF)
359 /* Before executing instruction: restore necessary bits in EFLAGS. */
360 #define _PRE_EFLAGS(_sav, _msk, _tmp) \
361 /* EFLAGS = (_sav & _msk) | (EFLAGS & ~_msk); _sav &= ~_msk; */ \
362 "movl %"_sav",%"_LO32 _tmp"; " \
365 "movl %"_msk",%"_LO32 _tmp"; " \
366 "andl %"_LO32 _tmp",("_STK"); " \
368 "notl %"_LO32 _tmp"; " \
369 "andl %"_LO32 _tmp",("_STK"); " \
370 "andl %"_LO32 _tmp","__stringify(BITS_PER_LONG/4)"("_STK"); " \
372 "orl %"_LO32 _tmp",("_STK"); " \
376 /* After executing instruction: write-back necessary bits in EFLAGS. */
377 #define _POST_EFLAGS(_sav, _msk, _tmp) \
378 /* _sav |= EFLAGS & _msk; */ \
381 "andl %"_msk",%"_LO32 _tmp"; " \
382 "orl %"_LO32 _tmp",%"_sav"; "
390 #define ____emulate_2op(_op, _src, _dst, _eflags, _x, _y, _suffix) \
392 __asm__ __volatile__ ( \
393 _PRE_EFLAGS("0", "4", "2") \
394 _op _suffix " %"_x"3,%1; " \
395 _POST_EFLAGS("0", "4", "2") \
396 : "=m" (_eflags), "=m" ((_dst).val), \
398 : _y ((_src).val), "i" (EFLAGS_MASK)); \
402 /* Raw emulation: instruction has two explicit operands. */
403 #define __emulate_2op_nobyte(_op,_src,_dst,_eflags,_wx,_wy,_lx,_ly,_qx,_qy) \
405 unsigned long _tmp; \
407 switch ((_dst).bytes) { \
409 ____emulate_2op(_op,_src,_dst,_eflags,_wx,_wy,"w"); \
412 ____emulate_2op(_op,_src,_dst,_eflags,_lx,_ly,"l"); \
415 ON64(____emulate_2op(_op,_src,_dst,_eflags,_qx,_qy,"q")); \
420 #define __emulate_2op(_op,_src,_dst,_eflags,_bx,_by,_wx,_wy,_lx,_ly,_qx,_qy) \
422 unsigned long _tmp; \
423 switch ((_dst).bytes) { \
425 ____emulate_2op(_op,_src,_dst,_eflags,_bx,_by,"b"); \
428 __emulate_2op_nobyte(_op, _src, _dst, _eflags, \
429 _wx, _wy, _lx, _ly, _qx, _qy); \
434 /* Source operand is byte-sized and may be restricted to just %cl. */
435 #define emulate_2op_SrcB(_op, _src, _dst, _eflags) \
436 __emulate_2op(_op, _src, _dst, _eflags, \
437 "b", "c", "b", "c", "b", "c", "b", "c")
439 /* Source operand is byte, word, long or quad sized. */
440 #define emulate_2op_SrcV(_op, _src, _dst, _eflags) \
441 __emulate_2op(_op, _src, _dst, _eflags, \
442 "b", "q", "w", "r", _LO32, "r", "", "r")
444 /* Source operand is word, long or quad sized. */
445 #define emulate_2op_SrcV_nobyte(_op, _src, _dst, _eflags) \
446 __emulate_2op_nobyte(_op, _src, _dst, _eflags, \
447 "w", "r", _LO32, "r", "", "r")
449 /* Instruction has three operands and one operand is stored in ECX register */
450 #define __emulate_2op_cl(_op, _cl, _src, _dst, _eflags, _suffix, _type) \
452 unsigned long _tmp; \
453 _type _clv = (_cl).val; \
454 _type _srcv = (_src).val; \
455 _type _dstv = (_dst).val; \
457 __asm__ __volatile__ ( \
458 _PRE_EFLAGS("0", "5", "2") \
459 _op _suffix " %4,%1 \n" \
460 _POST_EFLAGS("0", "5", "2") \
461 : "=m" (_eflags), "+r" (_dstv), "=&r" (_tmp) \
462 : "c" (_clv) , "r" (_srcv), "i" (EFLAGS_MASK) \
465 (_cl).val = (unsigned long) _clv; \
466 (_src).val = (unsigned long) _srcv; \
467 (_dst).val = (unsigned long) _dstv; \
470 #define emulate_2op_cl(_op, _cl, _src, _dst, _eflags) \
472 switch ((_dst).bytes) { \
474 __emulate_2op_cl(_op, _cl, _src, _dst, _eflags, \
475 "w", unsigned short); \
478 __emulate_2op_cl(_op, _cl, _src, _dst, _eflags, \
479 "l", unsigned int); \
482 ON64(__emulate_2op_cl(_op, _cl, _src, _dst, _eflags, \
483 "q", unsigned long)); \
488 #define __emulate_1op(_op, _dst, _eflags, _suffix) \
490 unsigned long _tmp; \
492 __asm__ __volatile__ ( \
493 _PRE_EFLAGS("0", "3", "2") \
494 _op _suffix " %1; " \
495 _POST_EFLAGS("0", "3", "2") \
496 : "=m" (_eflags), "+m" ((_dst).val), \
498 : "i" (EFLAGS_MASK)); \
501 /* Instruction has only one explicit operand (no source operand). */
502 #define emulate_1op(_op, _dst, _eflags) \
504 switch ((_dst).bytes) { \
505 case 1: __emulate_1op(_op, _dst, _eflags, "b"); break; \
506 case 2: __emulate_1op(_op, _dst, _eflags, "w"); break; \
507 case 4: __emulate_1op(_op, _dst, _eflags, "l"); break; \
508 case 8: ON64(__emulate_1op(_op, _dst, _eflags, "q")); break; \
512 /* Fetch next part of the instruction being emulated. */
513 #define insn_fetch(_type, _size, _eip) \
514 ({ unsigned long _x; \
515 rc = do_insn_fetch(ctxt, ops, (_eip), &_x, (_size)); \
522 static inline unsigned long ad_mask(struct decode_cache
*c
)
524 return (1UL << (c
->ad_bytes
<< 3)) - 1;
527 /* Access/update address held in a register, based on addressing mode. */
528 static inline unsigned long
529 address_mask(struct decode_cache
*c
, unsigned long reg
)
531 if (c
->ad_bytes
== sizeof(unsigned long))
534 return reg
& ad_mask(c
);
537 static inline unsigned long
538 register_address(struct decode_cache
*c
, unsigned long base
, unsigned long reg
)
540 return base
+ address_mask(c
, reg
);
544 register_address_increment(struct decode_cache
*c
, unsigned long *reg
, int inc
)
546 if (c
->ad_bytes
== sizeof(unsigned long))
549 *reg
= (*reg
& ~ad_mask(c
)) | ((*reg
+ inc
) & ad_mask(c
));
552 static inline void jmp_rel(struct decode_cache
*c
, int rel
)
554 register_address_increment(c
, &c
->eip
, rel
);
557 static void set_seg_override(struct decode_cache
*c
, int seg
)
559 c
->has_seg_override
= true;
560 c
->seg_override
= seg
;
563 static unsigned long seg_base(struct x86_emulate_ctxt
*ctxt
, int seg
)
565 if (ctxt
->mode
== X86EMUL_MODE_PROT64
&& seg
< VCPU_SREG_FS
)
568 return kvm_x86_ops
->get_segment_base(ctxt
->vcpu
, seg
);
571 static unsigned long seg_override_base(struct x86_emulate_ctxt
*ctxt
,
572 struct decode_cache
*c
)
574 if (!c
->has_seg_override
)
577 return seg_base(ctxt
, c
->seg_override
);
580 static unsigned long es_base(struct x86_emulate_ctxt
*ctxt
)
582 return seg_base(ctxt
, VCPU_SREG_ES
);
585 static unsigned long ss_base(struct x86_emulate_ctxt
*ctxt
)
587 return seg_base(ctxt
, VCPU_SREG_SS
);
590 static int do_fetch_insn_byte(struct x86_emulate_ctxt
*ctxt
,
591 struct x86_emulate_ops
*ops
,
592 unsigned long linear
, u8
*dest
)
594 struct fetch_cache
*fc
= &ctxt
->decode
.fetch
;
598 if (linear
< fc
->start
|| linear
>= fc
->end
) {
599 size
= min(15UL, PAGE_SIZE
- offset_in_page(linear
));
600 rc
= ops
->read_std(linear
, fc
->data
, size
, ctxt
->vcpu
);
604 fc
->end
= linear
+ size
;
606 *dest
= fc
->data
[linear
- fc
->start
];
610 static int do_insn_fetch(struct x86_emulate_ctxt
*ctxt
,
611 struct x86_emulate_ops
*ops
,
612 unsigned long eip
, void *dest
, unsigned size
)
616 eip
+= ctxt
->cs_base
;
618 rc
= do_fetch_insn_byte(ctxt
, ops
, eip
++, dest
++);
626 * Given the 'reg' portion of a ModRM byte, and a register block, return a
627 * pointer into the block that addresses the relevant register.
628 * @highbyte_regs specifies whether to decode AH,CH,DH,BH.
630 static void *decode_register(u8 modrm_reg
, unsigned long *regs
,
635 p
= ®s
[modrm_reg
];
636 if (highbyte_regs
&& modrm_reg
>= 4 && modrm_reg
< 8)
637 p
= (unsigned char *)®s
[modrm_reg
& 3] + 1;
641 static int read_descriptor(struct x86_emulate_ctxt
*ctxt
,
642 struct x86_emulate_ops
*ops
,
644 u16
*size
, unsigned long *address
, int op_bytes
)
651 rc
= ops
->read_std((unsigned long)ptr
, (unsigned long *)size
, 2,
655 rc
= ops
->read_std((unsigned long)ptr
+ 2, address
, op_bytes
,
660 static int test_cc(unsigned int condition
, unsigned int flags
)
664 switch ((condition
& 15) >> 1) {
666 rc
|= (flags
& EFLG_OF
);
668 case 1: /* b/c/nae */
669 rc
|= (flags
& EFLG_CF
);
672 rc
|= (flags
& EFLG_ZF
);
675 rc
|= (flags
& (EFLG_CF
|EFLG_ZF
));
678 rc
|= (flags
& EFLG_SF
);
681 rc
|= (flags
& EFLG_PF
);
684 rc
|= (flags
& EFLG_ZF
);
687 rc
|= (!(flags
& EFLG_SF
) != !(flags
& EFLG_OF
));
691 /* Odd condition identifiers (lsb == 1) have inverted sense. */
692 return (!!rc
^ (condition
& 1));
695 static void decode_register_operand(struct operand
*op
,
696 struct decode_cache
*c
,
699 unsigned reg
= c
->modrm_reg
;
700 int highbyte_regs
= c
->rex_prefix
== 0;
703 reg
= (c
->b
& 7) | ((c
->rex_prefix
& 1) << 3);
705 if ((c
->d
& ByteOp
) && !inhibit_bytereg
) {
706 op
->ptr
= decode_register(reg
, c
->regs
, highbyte_regs
);
707 op
->val
= *(u8
*)op
->ptr
;
710 op
->ptr
= decode_register(reg
, c
->regs
, 0);
711 op
->bytes
= c
->op_bytes
;
714 op
->val
= *(u16
*)op
->ptr
;
717 op
->val
= *(u32
*)op
->ptr
;
720 op
->val
= *(u64
*) op
->ptr
;
724 op
->orig_val
= op
->val
;
727 static int decode_modrm(struct x86_emulate_ctxt
*ctxt
,
728 struct x86_emulate_ops
*ops
)
730 struct decode_cache
*c
= &ctxt
->decode
;
732 int index_reg
= 0, base_reg
= 0, scale
;
736 c
->modrm_reg
= (c
->rex_prefix
& 4) << 1; /* REX.R */
737 index_reg
= (c
->rex_prefix
& 2) << 2; /* REX.X */
738 c
->modrm_rm
= base_reg
= (c
->rex_prefix
& 1) << 3; /* REG.B */
741 c
->modrm
= insn_fetch(u8
, 1, c
->eip
);
742 c
->modrm_mod
|= (c
->modrm
& 0xc0) >> 6;
743 c
->modrm_reg
|= (c
->modrm
& 0x38) >> 3;
744 c
->modrm_rm
|= (c
->modrm
& 0x07);
748 if (c
->modrm_mod
== 3) {
749 c
->modrm_ptr
= decode_register(c
->modrm_rm
,
750 c
->regs
, c
->d
& ByteOp
);
751 c
->modrm_val
= *(unsigned long *)c
->modrm_ptr
;
755 if (c
->ad_bytes
== 2) {
756 unsigned bx
= c
->regs
[VCPU_REGS_RBX
];
757 unsigned bp
= c
->regs
[VCPU_REGS_RBP
];
758 unsigned si
= c
->regs
[VCPU_REGS_RSI
];
759 unsigned di
= c
->regs
[VCPU_REGS_RDI
];
761 /* 16-bit ModR/M decode. */
762 switch (c
->modrm_mod
) {
764 if (c
->modrm_rm
== 6)
765 c
->modrm_ea
+= insn_fetch(u16
, 2, c
->eip
);
768 c
->modrm_ea
+= insn_fetch(s8
, 1, c
->eip
);
771 c
->modrm_ea
+= insn_fetch(u16
, 2, c
->eip
);
774 switch (c
->modrm_rm
) {
776 c
->modrm_ea
+= bx
+ si
;
779 c
->modrm_ea
+= bx
+ di
;
782 c
->modrm_ea
+= bp
+ si
;
785 c
->modrm_ea
+= bp
+ di
;
794 if (c
->modrm_mod
!= 0)
801 if (c
->modrm_rm
== 2 || c
->modrm_rm
== 3 ||
802 (c
->modrm_rm
== 6 && c
->modrm_mod
!= 0))
803 if (!c
->has_seg_override
)
804 set_seg_override(c
, VCPU_SREG_SS
);
805 c
->modrm_ea
= (u16
)c
->modrm_ea
;
807 /* 32/64-bit ModR/M decode. */
808 if ((c
->modrm_rm
& 7) == 4) {
809 sib
= insn_fetch(u8
, 1, c
->eip
);
810 index_reg
|= (sib
>> 3) & 7;
814 if ((base_reg
& 7) == 5 && c
->modrm_mod
== 0)
815 c
->modrm_ea
+= insn_fetch(s32
, 4, c
->eip
);
817 c
->modrm_ea
+= c
->regs
[base_reg
];
819 c
->modrm_ea
+= c
->regs
[index_reg
] << scale
;
820 } else if ((c
->modrm_rm
& 7) == 5 && c
->modrm_mod
== 0) {
821 if (ctxt
->mode
== X86EMUL_MODE_PROT64
)
824 c
->modrm_ea
+= c
->regs
[c
->modrm_rm
];
825 switch (c
->modrm_mod
) {
827 if (c
->modrm_rm
== 5)
828 c
->modrm_ea
+= insn_fetch(s32
, 4, c
->eip
);
831 c
->modrm_ea
+= insn_fetch(s8
, 1, c
->eip
);
834 c
->modrm_ea
+= insn_fetch(s32
, 4, c
->eip
);
842 static int decode_abs(struct x86_emulate_ctxt
*ctxt
,
843 struct x86_emulate_ops
*ops
)
845 struct decode_cache
*c
= &ctxt
->decode
;
848 switch (c
->ad_bytes
) {
850 c
->modrm_ea
= insn_fetch(u16
, 2, c
->eip
);
853 c
->modrm_ea
= insn_fetch(u32
, 4, c
->eip
);
856 c
->modrm_ea
= insn_fetch(u64
, 8, c
->eip
);
864 x86_decode_insn(struct x86_emulate_ctxt
*ctxt
, struct x86_emulate_ops
*ops
)
866 struct decode_cache
*c
= &ctxt
->decode
;
868 int mode
= ctxt
->mode
;
869 int def_op_bytes
, def_ad_bytes
, group
;
871 /* Shadow copy of register state. Committed on successful emulation. */
873 memset(c
, 0, sizeof(struct decode_cache
));
874 c
->eip
= kvm_rip_read(ctxt
->vcpu
);
875 ctxt
->cs_base
= seg_base(ctxt
, VCPU_SREG_CS
);
876 memcpy(c
->regs
, ctxt
->vcpu
->arch
.regs
, sizeof c
->regs
);
879 case X86EMUL_MODE_REAL
:
880 case X86EMUL_MODE_PROT16
:
881 def_op_bytes
= def_ad_bytes
= 2;
883 case X86EMUL_MODE_PROT32
:
884 def_op_bytes
= def_ad_bytes
= 4;
887 case X86EMUL_MODE_PROT64
:
896 c
->op_bytes
= def_op_bytes
;
897 c
->ad_bytes
= def_ad_bytes
;
899 /* Legacy prefixes. */
901 switch (c
->b
= insn_fetch(u8
, 1, c
->eip
)) {
902 case 0x66: /* operand-size override */
903 /* switch between 2/4 bytes */
904 c
->op_bytes
= def_op_bytes
^ 6;
906 case 0x67: /* address-size override */
907 if (mode
== X86EMUL_MODE_PROT64
)
908 /* switch between 4/8 bytes */
909 c
->ad_bytes
= def_ad_bytes
^ 12;
911 /* switch between 2/4 bytes */
912 c
->ad_bytes
= def_ad_bytes
^ 6;
914 case 0x26: /* ES override */
915 case 0x2e: /* CS override */
916 case 0x36: /* SS override */
917 case 0x3e: /* DS override */
918 set_seg_override(c
, (c
->b
>> 3) & 3);
920 case 0x64: /* FS override */
921 case 0x65: /* GS override */
922 set_seg_override(c
, c
->b
& 7);
924 case 0x40 ... 0x4f: /* REX */
925 if (mode
!= X86EMUL_MODE_PROT64
)
927 c
->rex_prefix
= c
->b
;
929 case 0xf0: /* LOCK */
932 case 0xf2: /* REPNE/REPNZ */
933 c
->rep_prefix
= REPNE_PREFIX
;
935 case 0xf3: /* REP/REPE/REPZ */
936 c
->rep_prefix
= REPE_PREFIX
;
942 /* Any legacy prefix after a REX prefix nullifies its effect. */
951 if (c
->rex_prefix
& 8)
952 c
->op_bytes
= 8; /* REX.W */
954 /* Opcode byte(s). */
955 c
->d
= opcode_table
[c
->b
];
957 /* Two-byte opcode? */
960 c
->b
= insn_fetch(u8
, 1, c
->eip
);
961 c
->d
= twobyte_table
[c
->b
];
966 group
= c
->d
& GroupMask
;
967 c
->modrm
= insn_fetch(u8
, 1, c
->eip
);
970 group
= (group
<< 3) + ((c
->modrm
>> 3) & 7);
971 if ((c
->d
& GroupDual
) && (c
->modrm
>> 6) == 3)
972 c
->d
= group2_table
[group
];
974 c
->d
= group_table
[group
];
979 DPRINTF("Cannot emulate %02x\n", c
->b
);
983 if (mode
== X86EMUL_MODE_PROT64
&& (c
->d
& Stack
))
986 /* ModRM and SIB bytes. */
988 rc
= decode_modrm(ctxt
, ops
);
989 else if (c
->d
& MemAbs
)
990 rc
= decode_abs(ctxt
, ops
);
994 if (!c
->has_seg_override
)
995 set_seg_override(c
, VCPU_SREG_DS
);
997 if (!(!c
->twobyte
&& c
->b
== 0x8d))
998 c
->modrm_ea
+= seg_override_base(ctxt
, c
);
1000 if (c
->ad_bytes
!= 8)
1001 c
->modrm_ea
= (u32
)c
->modrm_ea
;
1003 * Decode and fetch the source operand: register, memory
1006 switch (c
->d
& SrcMask
) {
1010 decode_register_operand(&c
->src
, c
, 0);
1019 c
->src
.bytes
= (c
->d
& ByteOp
) ? 1 :
1021 /* Don't fetch the address for invlpg: it could be unmapped. */
1022 if (c
->twobyte
&& c
->b
== 0x01 && c
->modrm_reg
== 7)
1026 * For instructions with a ModR/M byte, switch to register
1027 * access if Mod = 3.
1029 if ((c
->d
& ModRM
) && c
->modrm_mod
== 3) {
1030 c
->src
.type
= OP_REG
;
1031 c
->src
.val
= c
->modrm_val
;
1032 c
->src
.ptr
= c
->modrm_ptr
;
1035 c
->src
.type
= OP_MEM
;
1039 c
->src
.type
= OP_IMM
;
1040 c
->src
.ptr
= (unsigned long *)c
->eip
;
1041 c
->src
.bytes
= (c
->d
& ByteOp
) ? 1 : c
->op_bytes
;
1042 if (c
->src
.bytes
== 8)
1044 /* NB. Immediates are sign-extended as necessary. */
1045 switch (c
->src
.bytes
) {
1047 c
->src
.val
= insn_fetch(s8
, 1, c
->eip
);
1050 c
->src
.val
= insn_fetch(s16
, 2, c
->eip
);
1053 c
->src
.val
= insn_fetch(s32
, 4, c
->eip
);
1056 if ((c
->d
& SrcMask
) == SrcImmU
) {
1057 switch (c
->src
.bytes
) {
1062 c
->src
.val
&= 0xffff;
1065 c
->src
.val
&= 0xffffffff;
1072 c
->src
.type
= OP_IMM
;
1073 c
->src
.ptr
= (unsigned long *)c
->eip
;
1075 if ((c
->d
& SrcMask
) == SrcImmByte
)
1076 c
->src
.val
= insn_fetch(s8
, 1, c
->eip
);
1078 c
->src
.val
= insn_fetch(u8
, 1, c
->eip
);
1087 * Decode and fetch the second source operand: register, memory
1090 switch (c
->d
& Src2Mask
) {
1095 c
->src2
.val
= c
->regs
[VCPU_REGS_RCX
] & 0x8;
1098 c
->src2
.type
= OP_IMM
;
1099 c
->src2
.ptr
= (unsigned long *)c
->eip
;
1101 c
->src2
.val
= insn_fetch(u8
, 1, c
->eip
);
1104 c
->src2
.type
= OP_IMM
;
1105 c
->src2
.ptr
= (unsigned long *)c
->eip
;
1107 c
->src2
.val
= insn_fetch(u16
, 2, c
->eip
);
1115 /* Decode and fetch the destination operand: register or memory. */
1116 switch (c
->d
& DstMask
) {
1118 /* Special instructions do their own operand decoding. */
1121 decode_register_operand(&c
->dst
, c
,
1122 c
->twobyte
&& (c
->b
== 0xb6 || c
->b
== 0xb7));
1125 if ((c
->d
& ModRM
) && c
->modrm_mod
== 3) {
1126 c
->dst
.bytes
= (c
->d
& ByteOp
) ? 1 : c
->op_bytes
;
1127 c
->dst
.type
= OP_REG
;
1128 c
->dst
.val
= c
->dst
.orig_val
= c
->modrm_val
;
1129 c
->dst
.ptr
= c
->modrm_ptr
;
1132 c
->dst
.type
= OP_MEM
;
1135 c
->dst
.type
= OP_REG
;
1136 c
->dst
.bytes
= c
->op_bytes
;
1137 c
->dst
.ptr
= &c
->regs
[VCPU_REGS_RAX
];
1138 switch (c
->op_bytes
) {
1140 c
->dst
.val
= *(u8
*)c
->dst
.ptr
;
1143 c
->dst
.val
= *(u16
*)c
->dst
.ptr
;
1146 c
->dst
.val
= *(u32
*)c
->dst
.ptr
;
1149 c
->dst
.orig_val
= c
->dst
.val
;
1153 if (c
->rip_relative
)
1154 c
->modrm_ea
+= c
->eip
;
1157 return (rc
== X86EMUL_UNHANDLEABLE
) ? -1 : 0;
1160 static inline void emulate_push(struct x86_emulate_ctxt
*ctxt
)
1162 struct decode_cache
*c
= &ctxt
->decode
;
1164 c
->dst
.type
= OP_MEM
;
1165 c
->dst
.bytes
= c
->op_bytes
;
1166 c
->dst
.val
= c
->src
.val
;
1167 register_address_increment(c
, &c
->regs
[VCPU_REGS_RSP
], -c
->op_bytes
);
1168 c
->dst
.ptr
= (void *) register_address(c
, ss_base(ctxt
),
1169 c
->regs
[VCPU_REGS_RSP
]);
1172 static int emulate_pop(struct x86_emulate_ctxt
*ctxt
,
1173 struct x86_emulate_ops
*ops
,
1174 void *dest
, int len
)
1176 struct decode_cache
*c
= &ctxt
->decode
;
1179 rc
= ops
->read_emulated(register_address(c
, ss_base(ctxt
),
1180 c
->regs
[VCPU_REGS_RSP
]),
1181 dest
, len
, ctxt
->vcpu
);
1185 register_address_increment(c
, &c
->regs
[VCPU_REGS_RSP
], len
);
1189 static inline int emulate_grp1a(struct x86_emulate_ctxt
*ctxt
,
1190 struct x86_emulate_ops
*ops
)
1192 struct decode_cache
*c
= &ctxt
->decode
;
1195 rc
= emulate_pop(ctxt
, ops
, &c
->dst
.val
, c
->dst
.bytes
);
1201 static inline void emulate_grp2(struct x86_emulate_ctxt
*ctxt
)
1203 struct decode_cache
*c
= &ctxt
->decode
;
1204 switch (c
->modrm_reg
) {
1206 emulate_2op_SrcB("rol", c
->src
, c
->dst
, ctxt
->eflags
);
1209 emulate_2op_SrcB("ror", c
->src
, c
->dst
, ctxt
->eflags
);
1212 emulate_2op_SrcB("rcl", c
->src
, c
->dst
, ctxt
->eflags
);
1215 emulate_2op_SrcB("rcr", c
->src
, c
->dst
, ctxt
->eflags
);
1217 case 4: /* sal/shl */
1218 case 6: /* sal/shl */
1219 emulate_2op_SrcB("sal", c
->src
, c
->dst
, ctxt
->eflags
);
1222 emulate_2op_SrcB("shr", c
->src
, c
->dst
, ctxt
->eflags
);
1225 emulate_2op_SrcB("sar", c
->src
, c
->dst
, ctxt
->eflags
);
1230 static inline int emulate_grp3(struct x86_emulate_ctxt
*ctxt
,
1231 struct x86_emulate_ops
*ops
)
1233 struct decode_cache
*c
= &ctxt
->decode
;
1236 switch (c
->modrm_reg
) {
1237 case 0 ... 1: /* test */
1238 emulate_2op_SrcV("test", c
->src
, c
->dst
, ctxt
->eflags
);
1241 c
->dst
.val
= ~c
->dst
.val
;
1244 emulate_1op("neg", c
->dst
, ctxt
->eflags
);
1247 DPRINTF("Cannot emulate %02x\n", c
->b
);
1248 rc
= X86EMUL_UNHANDLEABLE
;
1254 static inline int emulate_grp45(struct x86_emulate_ctxt
*ctxt
,
1255 struct x86_emulate_ops
*ops
)
1257 struct decode_cache
*c
= &ctxt
->decode
;
1259 switch (c
->modrm_reg
) {
1261 emulate_1op("inc", c
->dst
, ctxt
->eflags
);
1264 emulate_1op("dec", c
->dst
, ctxt
->eflags
);
1266 case 2: /* call near abs */ {
1269 c
->eip
= c
->src
.val
;
1270 c
->src
.val
= old_eip
;
1274 case 4: /* jmp abs */
1275 c
->eip
= c
->src
.val
;
1284 static inline int emulate_grp9(struct x86_emulate_ctxt
*ctxt
,
1285 struct x86_emulate_ops
*ops
,
1286 unsigned long memop
)
1288 struct decode_cache
*c
= &ctxt
->decode
;
1292 rc
= ops
->read_emulated(memop
, &old
, 8, ctxt
->vcpu
);
1296 if (((u32
) (old
>> 0) != (u32
) c
->regs
[VCPU_REGS_RAX
]) ||
1297 ((u32
) (old
>> 32) != (u32
) c
->regs
[VCPU_REGS_RDX
])) {
1299 c
->regs
[VCPU_REGS_RAX
] = (u32
) (old
>> 0);
1300 c
->regs
[VCPU_REGS_RDX
] = (u32
) (old
>> 32);
1301 ctxt
->eflags
&= ~EFLG_ZF
;
1304 new = ((u64
)c
->regs
[VCPU_REGS_RCX
] << 32) |
1305 (u32
) c
->regs
[VCPU_REGS_RBX
];
1307 rc
= ops
->cmpxchg_emulated(memop
, &old
, &new, 8, ctxt
->vcpu
);
1310 ctxt
->eflags
|= EFLG_ZF
;
1315 static int emulate_ret_far(struct x86_emulate_ctxt
*ctxt
,
1316 struct x86_emulate_ops
*ops
)
1318 struct decode_cache
*c
= &ctxt
->decode
;
1322 rc
= emulate_pop(ctxt
, ops
, &c
->eip
, c
->op_bytes
);
1325 if (c
->op_bytes
== 4)
1326 c
->eip
= (u32
)c
->eip
;
1327 rc
= emulate_pop(ctxt
, ops
, &cs
, c
->op_bytes
);
1330 rc
= kvm_load_segment_descriptor(ctxt
->vcpu
, (u16
)cs
, 1, VCPU_SREG_CS
);
1334 static inline int writeback(struct x86_emulate_ctxt
*ctxt
,
1335 struct x86_emulate_ops
*ops
)
1338 struct decode_cache
*c
= &ctxt
->decode
;
1340 switch (c
->dst
.type
) {
1342 /* The 4-byte case *is* correct:
1343 * in 64-bit mode we zero-extend.
1345 switch (c
->dst
.bytes
) {
1347 *(u8
*)c
->dst
.ptr
= (u8
)c
->dst
.val
;
1350 *(u16
*)c
->dst
.ptr
= (u16
)c
->dst
.val
;
1353 *c
->dst
.ptr
= (u32
)c
->dst
.val
;
1354 break; /* 64b: zero-ext */
1356 *c
->dst
.ptr
= c
->dst
.val
;
1362 rc
= ops
->cmpxchg_emulated(
1363 (unsigned long)c
->dst
.ptr
,
1369 rc
= ops
->write_emulated(
1370 (unsigned long)c
->dst
.ptr
,
1386 static void toggle_interruptibility(struct x86_emulate_ctxt
*ctxt
, u32 mask
)
1388 u32 int_shadow
= kvm_x86_ops
->get_interrupt_shadow(ctxt
->vcpu
, mask
);
1390 * an sti; sti; sequence only disable interrupts for the first
1391 * instruction. So, if the last instruction, be it emulated or
1392 * not, left the system with the INT_STI flag enabled, it
1393 * means that the last instruction is an sti. We should not
1394 * leave the flag on in this case. The same goes for mov ss
1396 if (!(int_shadow
& mask
))
1397 ctxt
->interruptibility
= mask
;
1401 setup_syscalls_segments(struct x86_emulate_ctxt
*ctxt
,
1402 struct kvm_segment
*cs
, struct kvm_segment
*ss
)
1404 memset(cs
, 0, sizeof(struct kvm_segment
));
1405 kvm_x86_ops
->get_segment(ctxt
->vcpu
, cs
, VCPU_SREG_CS
);
1406 memset(ss
, 0, sizeof(struct kvm_segment
));
1408 cs
->l
= 0; /* will be adjusted later */
1409 cs
->base
= 0; /* flat segment */
1410 cs
->g
= 1; /* 4kb granularity */
1411 cs
->limit
= 0xffffffff; /* 4GB limit */
1412 cs
->type
= 0x0b; /* Read, Execute, Accessed */
1414 cs
->dpl
= 0; /* will be adjusted later */
1419 ss
->base
= 0; /* flat segment */
1420 ss
->limit
= 0xffffffff; /* 4GB limit */
1421 ss
->g
= 1; /* 4kb granularity */
1423 ss
->type
= 0x03; /* Read/Write, Accessed */
1424 ss
->db
= 1; /* 32bit stack segment */
1430 emulate_syscall(struct x86_emulate_ctxt
*ctxt
)
1432 struct decode_cache
*c
= &ctxt
->decode
;
1433 struct kvm_segment cs
, ss
;
1436 /* syscall is not available in real mode */
1437 if (c
->lock_prefix
|| ctxt
->mode
== X86EMUL_MODE_REAL
1438 || !(ctxt
->vcpu
->arch
.cr0
& X86_CR0_PE
))
1441 setup_syscalls_segments(ctxt
, &cs
, &ss
);
1443 kvm_x86_ops
->get_msr(ctxt
->vcpu
, MSR_STAR
, &msr_data
);
1445 cs
.selector
= (u16
)(msr_data
& 0xfffc);
1446 ss
.selector
= (u16
)(msr_data
+ 8);
1448 if (is_long_mode(ctxt
->vcpu
)) {
1452 kvm_x86_ops
->set_segment(ctxt
->vcpu
, &cs
, VCPU_SREG_CS
);
1453 kvm_x86_ops
->set_segment(ctxt
->vcpu
, &ss
, VCPU_SREG_SS
);
1455 c
->regs
[VCPU_REGS_RCX
] = c
->eip
;
1456 if (is_long_mode(ctxt
->vcpu
)) {
1457 #ifdef CONFIG_X86_64
1458 c
->regs
[VCPU_REGS_R11
] = ctxt
->eflags
& ~EFLG_RF
;
1460 kvm_x86_ops
->get_msr(ctxt
->vcpu
,
1461 ctxt
->mode
== X86EMUL_MODE_PROT64
?
1462 MSR_LSTAR
: MSR_CSTAR
, &msr_data
);
1465 kvm_x86_ops
->get_msr(ctxt
->vcpu
, MSR_SYSCALL_MASK
, &msr_data
);
1466 ctxt
->eflags
&= ~(msr_data
| EFLG_RF
);
1470 kvm_x86_ops
->get_msr(ctxt
->vcpu
, MSR_STAR
, &msr_data
);
1471 c
->eip
= (u32
)msr_data
;
1473 ctxt
->eflags
&= ~(EFLG_VM
| EFLG_IF
| EFLG_RF
);
1480 emulate_sysenter(struct x86_emulate_ctxt
*ctxt
)
1482 struct decode_cache
*c
= &ctxt
->decode
;
1483 struct kvm_segment cs
, ss
;
1486 /* inject #UD if LOCK prefix is used */
1490 /* inject #GP if in real mode or paging is disabled */
1491 if (ctxt
->mode
== X86EMUL_MODE_REAL
||
1492 !(ctxt
->vcpu
->arch
.cr0
& X86_CR0_PE
)) {
1493 kvm_inject_gp(ctxt
->vcpu
, 0);
1497 /* XXX sysenter/sysexit have not been tested in 64bit mode.
1498 * Therefore, we inject an #UD.
1500 if (ctxt
->mode
== X86EMUL_MODE_PROT64
)
1503 setup_syscalls_segments(ctxt
, &cs
, &ss
);
1505 kvm_x86_ops
->get_msr(ctxt
->vcpu
, MSR_IA32_SYSENTER_CS
, &msr_data
);
1506 switch (ctxt
->mode
) {
1507 case X86EMUL_MODE_PROT32
:
1508 if ((msr_data
& 0xfffc) == 0x0) {
1509 kvm_inject_gp(ctxt
->vcpu
, 0);
1513 case X86EMUL_MODE_PROT64
:
1514 if (msr_data
== 0x0) {
1515 kvm_inject_gp(ctxt
->vcpu
, 0);
1521 ctxt
->eflags
&= ~(EFLG_VM
| EFLG_IF
| EFLG_RF
);
1522 cs
.selector
= (u16
)msr_data
;
1523 cs
.selector
&= ~SELECTOR_RPL_MASK
;
1524 ss
.selector
= cs
.selector
+ 8;
1525 ss
.selector
&= ~SELECTOR_RPL_MASK
;
1526 if (ctxt
->mode
== X86EMUL_MODE_PROT64
1527 || is_long_mode(ctxt
->vcpu
)) {
1532 kvm_x86_ops
->set_segment(ctxt
->vcpu
, &cs
, VCPU_SREG_CS
);
1533 kvm_x86_ops
->set_segment(ctxt
->vcpu
, &ss
, VCPU_SREG_SS
);
1535 kvm_x86_ops
->get_msr(ctxt
->vcpu
, MSR_IA32_SYSENTER_EIP
, &msr_data
);
1538 kvm_x86_ops
->get_msr(ctxt
->vcpu
, MSR_IA32_SYSENTER_ESP
, &msr_data
);
1539 c
->regs
[VCPU_REGS_RSP
] = msr_data
;
1545 emulate_sysexit(struct x86_emulate_ctxt
*ctxt
)
1547 struct decode_cache
*c
= &ctxt
->decode
;
1548 struct kvm_segment cs
, ss
;
1552 /* inject #UD if LOCK prefix is used */
1556 /* inject #GP if in real mode or paging is disabled */
1557 if (ctxt
->mode
== X86EMUL_MODE_REAL
1558 || !(ctxt
->vcpu
->arch
.cr0
& X86_CR0_PE
)) {
1559 kvm_inject_gp(ctxt
->vcpu
, 0);
1563 /* sysexit must be called from CPL 0 */
1564 if (kvm_x86_ops
->get_cpl(ctxt
->vcpu
) != 0) {
1565 kvm_inject_gp(ctxt
->vcpu
, 0);
1569 setup_syscalls_segments(ctxt
, &cs
, &ss
);
1571 if ((c
->rex_prefix
& 0x8) != 0x0)
1572 usermode
= X86EMUL_MODE_PROT64
;
1574 usermode
= X86EMUL_MODE_PROT32
;
1578 kvm_x86_ops
->get_msr(ctxt
->vcpu
, MSR_IA32_SYSENTER_CS
, &msr_data
);
1580 case X86EMUL_MODE_PROT32
:
1581 cs
.selector
= (u16
)(msr_data
+ 16);
1582 if ((msr_data
& 0xfffc) == 0x0) {
1583 kvm_inject_gp(ctxt
->vcpu
, 0);
1586 ss
.selector
= (u16
)(msr_data
+ 24);
1588 case X86EMUL_MODE_PROT64
:
1589 cs
.selector
= (u16
)(msr_data
+ 32);
1590 if (msr_data
== 0x0) {
1591 kvm_inject_gp(ctxt
->vcpu
, 0);
1594 ss
.selector
= cs
.selector
+ 8;
1599 cs
.selector
|= SELECTOR_RPL_MASK
;
1600 ss
.selector
|= SELECTOR_RPL_MASK
;
1602 kvm_x86_ops
->set_segment(ctxt
->vcpu
, &cs
, VCPU_SREG_CS
);
1603 kvm_x86_ops
->set_segment(ctxt
->vcpu
, &ss
, VCPU_SREG_SS
);
1605 c
->eip
= ctxt
->vcpu
->arch
.regs
[VCPU_REGS_RDX
];
1606 c
->regs
[VCPU_REGS_RSP
] = ctxt
->vcpu
->arch
.regs
[VCPU_REGS_RCX
];
1612 x86_emulate_insn(struct x86_emulate_ctxt
*ctxt
, struct x86_emulate_ops
*ops
)
1614 unsigned long memop
= 0;
1616 unsigned long saved_eip
= 0;
1617 struct decode_cache
*c
= &ctxt
->decode
;
1622 ctxt
->interruptibility
= 0;
1624 /* Shadow copy of register state. Committed on successful emulation.
1625 * NOTE: we can copy them from vcpu as x86_decode_insn() doesn't
1629 memcpy(c
->regs
, ctxt
->vcpu
->arch
.regs
, sizeof c
->regs
);
1632 if (((c
->d
& ModRM
) && (c
->modrm_mod
!= 3)) || (c
->d
& MemAbs
))
1633 memop
= c
->modrm_ea
;
1635 if (c
->rep_prefix
&& (c
->d
& String
)) {
1636 /* All REP prefixes have the same first termination condition */
1637 if (c
->regs
[VCPU_REGS_RCX
] == 0) {
1638 kvm_rip_write(ctxt
->vcpu
, c
->eip
);
1641 /* The second termination condition only applies for REPE
1642 * and REPNE. Test if the repeat string operation prefix is
1643 * REPE/REPZ or REPNE/REPNZ and if it's the case it tests the
1644 * corresponding termination condition according to:
1645 * - if REPE/REPZ and ZF = 0 then done
1646 * - if REPNE/REPNZ and ZF = 1 then done
1648 if ((c
->b
== 0xa6) || (c
->b
== 0xa7) ||
1649 (c
->b
== 0xae) || (c
->b
== 0xaf)) {
1650 if ((c
->rep_prefix
== REPE_PREFIX
) &&
1651 ((ctxt
->eflags
& EFLG_ZF
) == 0)) {
1652 kvm_rip_write(ctxt
->vcpu
, c
->eip
);
1655 if ((c
->rep_prefix
== REPNE_PREFIX
) &&
1656 ((ctxt
->eflags
& EFLG_ZF
) == EFLG_ZF
)) {
1657 kvm_rip_write(ctxt
->vcpu
, c
->eip
);
1661 c
->regs
[VCPU_REGS_RCX
]--;
1662 c
->eip
= kvm_rip_read(ctxt
->vcpu
);
1665 if (c
->src
.type
== OP_MEM
) {
1666 c
->src
.ptr
= (unsigned long *)memop
;
1668 rc
= ops
->read_emulated((unsigned long)c
->src
.ptr
,
1674 c
->src
.orig_val
= c
->src
.val
;
1677 if ((c
->d
& DstMask
) == ImplicitOps
)
1681 if (c
->dst
.type
== OP_MEM
) {
1682 c
->dst
.ptr
= (unsigned long *)memop
;
1683 c
->dst
.bytes
= (c
->d
& ByteOp
) ? 1 : c
->op_bytes
;
1686 unsigned long mask
= ~(c
->dst
.bytes
* 8 - 1);
1688 c
->dst
.ptr
= (void *)c
->dst
.ptr
+
1689 (c
->src
.val
& mask
) / 8;
1691 if (!(c
->d
& Mov
) &&
1692 /* optimisation - avoid slow emulated read */
1693 ((rc
= ops
->read_emulated((unsigned long)c
->dst
.ptr
,
1695 c
->dst
.bytes
, ctxt
->vcpu
)) != 0))
1698 c
->dst
.orig_val
= c
->dst
.val
;
1708 emulate_2op_SrcV("add", c
->src
, c
->dst
, ctxt
->eflags
);
1712 emulate_2op_SrcV("or", c
->src
, c
->dst
, ctxt
->eflags
);
1716 emulate_2op_SrcV("adc", c
->src
, c
->dst
, ctxt
->eflags
);
1720 emulate_2op_SrcV("sbb", c
->src
, c
->dst
, ctxt
->eflags
);
1724 emulate_2op_SrcV("and", c
->src
, c
->dst
, ctxt
->eflags
);
1728 emulate_2op_SrcV("sub", c
->src
, c
->dst
, ctxt
->eflags
);
1732 emulate_2op_SrcV("xor", c
->src
, c
->dst
, ctxt
->eflags
);
1736 emulate_2op_SrcV("cmp", c
->src
, c
->dst
, ctxt
->eflags
);
1738 case 0x40 ... 0x47: /* inc r16/r32 */
1739 emulate_1op("inc", c
->dst
, ctxt
->eflags
);
1741 case 0x48 ... 0x4f: /* dec r16/r32 */
1742 emulate_1op("dec", c
->dst
, ctxt
->eflags
);
1744 case 0x50 ... 0x57: /* push reg */
1747 case 0x58 ... 0x5f: /* pop reg */
1749 rc
= emulate_pop(ctxt
, ops
, &c
->dst
.val
, c
->op_bytes
);
1753 case 0x63: /* movsxd */
1754 if (ctxt
->mode
!= X86EMUL_MODE_PROT64
)
1755 goto cannot_emulate
;
1756 c
->dst
.val
= (s32
) c
->src
.val
;
1758 case 0x68: /* push imm */
1759 case 0x6a: /* push imm8 */
1762 case 0x6c: /* insb */
1763 case 0x6d: /* insw/insd */
1764 if (kvm_emulate_pio_string(ctxt
->vcpu
, NULL
,
1766 (c
->d
& ByteOp
) ? 1 : c
->op_bytes
,
1768 address_mask(c
, c
->regs
[VCPU_REGS_RCX
]) : 1,
1769 (ctxt
->eflags
& EFLG_DF
),
1770 register_address(c
, es_base(ctxt
),
1771 c
->regs
[VCPU_REGS_RDI
]),
1773 c
->regs
[VCPU_REGS_RDX
]) == 0) {
1778 case 0x6e: /* outsb */
1779 case 0x6f: /* outsw/outsd */
1780 if (kvm_emulate_pio_string(ctxt
->vcpu
, NULL
,
1782 (c
->d
& ByteOp
) ? 1 : c
->op_bytes
,
1784 address_mask(c
, c
->regs
[VCPU_REGS_RCX
]) : 1,
1785 (ctxt
->eflags
& EFLG_DF
),
1787 seg_override_base(ctxt
, c
),
1788 c
->regs
[VCPU_REGS_RSI
]),
1790 c
->regs
[VCPU_REGS_RDX
]) == 0) {
1795 case 0x70 ... 0x7f: /* jcc (short) */
1796 if (test_cc(c
->b
, ctxt
->eflags
))
1797 jmp_rel(c
, c
->src
.val
);
1799 case 0x80 ... 0x83: /* Grp1 */
1800 switch (c
->modrm_reg
) {
1820 emulate_2op_SrcV("test", c
->src
, c
->dst
, ctxt
->eflags
);
1822 case 0x86 ... 0x87: /* xchg */
1824 /* Write back the register source. */
1825 switch (c
->dst
.bytes
) {
1827 *(u8
*) c
->src
.ptr
= (u8
) c
->dst
.val
;
1830 *(u16
*) c
->src
.ptr
= (u16
) c
->dst
.val
;
1833 *c
->src
.ptr
= (u32
) c
->dst
.val
;
1834 break; /* 64b reg: zero-extend */
1836 *c
->src
.ptr
= c
->dst
.val
;
1840 * Write back the memory destination with implicit LOCK
1843 c
->dst
.val
= c
->src
.val
;
1846 case 0x88 ... 0x8b: /* mov */
1848 case 0x8c: { /* mov r/m, sreg */
1849 struct kvm_segment segreg
;
1851 if (c
->modrm_reg
<= 5)
1852 kvm_get_segment(ctxt
->vcpu
, &segreg
, c
->modrm_reg
);
1854 printk(KERN_INFO
"0x8c: Invalid segreg in modrm byte 0x%02x\n",
1856 goto cannot_emulate
;
1858 c
->dst
.val
= segreg
.selector
;
1861 case 0x8d: /* lea r16/r32, m */
1862 c
->dst
.val
= c
->modrm_ea
;
1864 case 0x8e: { /* mov seg, r/m16 */
1870 if (c
->modrm_reg
== VCPU_SREG_SS
)
1871 toggle_interruptibility(ctxt
, X86_SHADOW_INT_MOV_SS
);
1873 if (c
->modrm_reg
<= 5) {
1874 type_bits
= (c
->modrm_reg
== 1) ? 9 : 1;
1875 err
= kvm_load_segment_descriptor(ctxt
->vcpu
, sel
,
1876 type_bits
, c
->modrm_reg
);
1878 printk(KERN_INFO
"Invalid segreg in modrm byte 0x%02x\n",
1880 goto cannot_emulate
;
1884 goto cannot_emulate
;
1886 c
->dst
.type
= OP_NONE
; /* Disable writeback. */
1889 case 0x8f: /* pop (sole member of Grp1a) */
1890 rc
= emulate_grp1a(ctxt
, ops
);
1894 case 0x90: /* nop / xchg r8,rax */
1895 if (!(c
->rex_prefix
& 1)) { /* nop */
1896 c
->dst
.type
= OP_NONE
;
1899 case 0x91 ... 0x97: /* xchg reg,rax */
1900 c
->src
.type
= c
->dst
.type
= OP_REG
;
1901 c
->src
.bytes
= c
->dst
.bytes
= c
->op_bytes
;
1902 c
->src
.ptr
= (unsigned long *) &c
->regs
[VCPU_REGS_RAX
];
1903 c
->src
.val
= *(c
->src
.ptr
);
1905 case 0x9c: /* pushf */
1906 c
->src
.val
= (unsigned long) ctxt
->eflags
;
1909 case 0x9d: /* popf */
1910 c
->dst
.type
= OP_REG
;
1911 c
->dst
.ptr
= (unsigned long *) &ctxt
->eflags
;
1912 c
->dst
.bytes
= c
->op_bytes
;
1913 goto pop_instruction
;
1914 case 0xa0 ... 0xa1: /* mov */
1915 c
->dst
.ptr
= (unsigned long *)&c
->regs
[VCPU_REGS_RAX
];
1916 c
->dst
.val
= c
->src
.val
;
1918 case 0xa2 ... 0xa3: /* mov */
1919 c
->dst
.val
= (unsigned long)c
->regs
[VCPU_REGS_RAX
];
1921 case 0xa4 ... 0xa5: /* movs */
1922 c
->dst
.type
= OP_MEM
;
1923 c
->dst
.bytes
= (c
->d
& ByteOp
) ? 1 : c
->op_bytes
;
1924 c
->dst
.ptr
= (unsigned long *)register_address(c
,
1926 c
->regs
[VCPU_REGS_RDI
]);
1927 if ((rc
= ops
->read_emulated(register_address(c
,
1928 seg_override_base(ctxt
, c
),
1929 c
->regs
[VCPU_REGS_RSI
]),
1931 c
->dst
.bytes
, ctxt
->vcpu
)) != 0)
1933 register_address_increment(c
, &c
->regs
[VCPU_REGS_RSI
],
1934 (ctxt
->eflags
& EFLG_DF
) ? -c
->dst
.bytes
1936 register_address_increment(c
, &c
->regs
[VCPU_REGS_RDI
],
1937 (ctxt
->eflags
& EFLG_DF
) ? -c
->dst
.bytes
1940 case 0xa6 ... 0xa7: /* cmps */
1941 c
->src
.type
= OP_NONE
; /* Disable writeback. */
1942 c
->src
.bytes
= (c
->d
& ByteOp
) ? 1 : c
->op_bytes
;
1943 c
->src
.ptr
= (unsigned long *)register_address(c
,
1944 seg_override_base(ctxt
, c
),
1945 c
->regs
[VCPU_REGS_RSI
]);
1946 if ((rc
= ops
->read_emulated((unsigned long)c
->src
.ptr
,
1952 c
->dst
.type
= OP_NONE
; /* Disable writeback. */
1953 c
->dst
.bytes
= (c
->d
& ByteOp
) ? 1 : c
->op_bytes
;
1954 c
->dst
.ptr
= (unsigned long *)register_address(c
,
1956 c
->regs
[VCPU_REGS_RDI
]);
1957 if ((rc
= ops
->read_emulated((unsigned long)c
->dst
.ptr
,
1963 DPRINTF("cmps: mem1=0x%p mem2=0x%p\n", c
->src
.ptr
, c
->dst
.ptr
);
1965 emulate_2op_SrcV("cmp", c
->src
, c
->dst
, ctxt
->eflags
);
1967 register_address_increment(c
, &c
->regs
[VCPU_REGS_RSI
],
1968 (ctxt
->eflags
& EFLG_DF
) ? -c
->src
.bytes
1970 register_address_increment(c
, &c
->regs
[VCPU_REGS_RDI
],
1971 (ctxt
->eflags
& EFLG_DF
) ? -c
->dst
.bytes
1975 case 0xaa ... 0xab: /* stos */
1976 c
->dst
.type
= OP_MEM
;
1977 c
->dst
.bytes
= (c
->d
& ByteOp
) ? 1 : c
->op_bytes
;
1978 c
->dst
.ptr
= (unsigned long *)register_address(c
,
1980 c
->regs
[VCPU_REGS_RDI
]);
1981 c
->dst
.val
= c
->regs
[VCPU_REGS_RAX
];
1982 register_address_increment(c
, &c
->regs
[VCPU_REGS_RDI
],
1983 (ctxt
->eflags
& EFLG_DF
) ? -c
->dst
.bytes
1986 case 0xac ... 0xad: /* lods */
1987 c
->dst
.type
= OP_REG
;
1988 c
->dst
.bytes
= (c
->d
& ByteOp
) ? 1 : c
->op_bytes
;
1989 c
->dst
.ptr
= (unsigned long *)&c
->regs
[VCPU_REGS_RAX
];
1990 if ((rc
= ops
->read_emulated(register_address(c
,
1991 seg_override_base(ctxt
, c
),
1992 c
->regs
[VCPU_REGS_RSI
]),
1997 register_address_increment(c
, &c
->regs
[VCPU_REGS_RSI
],
1998 (ctxt
->eflags
& EFLG_DF
) ? -c
->dst
.bytes
2001 case 0xae ... 0xaf: /* scas */
2002 DPRINTF("Urk! I don't handle SCAS.\n");
2003 goto cannot_emulate
;
2004 case 0xb0 ... 0xbf: /* mov r, imm */
2009 case 0xc3: /* ret */
2010 c
->dst
.type
= OP_REG
;
2011 c
->dst
.ptr
= &c
->eip
;
2012 c
->dst
.bytes
= c
->op_bytes
;
2013 goto pop_instruction
;
2014 case 0xc6 ... 0xc7: /* mov (sole member of Grp11) */
2016 c
->dst
.val
= c
->src
.val
;
2018 case 0xcb: /* ret far */
2019 rc
= emulate_ret_far(ctxt
, ops
);
2023 case 0xd0 ... 0xd1: /* Grp2 */
2027 case 0xd2 ... 0xd3: /* Grp2 */
2028 c
->src
.val
= c
->regs
[VCPU_REGS_RCX
];
2031 case 0xe4: /* inb */
2036 case 0xe6: /* outb */
2037 case 0xe7: /* out */
2041 case 0xe8: /* call (near) */ {
2042 long int rel
= c
->src
.val
;
2043 c
->src
.val
= (unsigned long) c
->eip
;
2048 case 0xe9: /* jmp rel */
2050 case 0xea: /* jmp far */
2051 if (kvm_load_segment_descriptor(ctxt
->vcpu
, c
->src2
.val
, 9,
2052 VCPU_SREG_CS
) < 0) {
2053 DPRINTF("jmp far: Failed to load CS descriptor\n");
2054 goto cannot_emulate
;
2057 c
->eip
= c
->src
.val
;
2060 jmp
: /* jmp rel short */
2061 jmp_rel(c
, c
->src
.val
);
2062 c
->dst
.type
= OP_NONE
; /* Disable writeback. */
2064 case 0xec: /* in al,dx */
2065 case 0xed: /* in (e/r)ax,dx */
2066 port
= c
->regs
[VCPU_REGS_RDX
];
2069 case 0xee: /* out al,dx */
2070 case 0xef: /* out (e/r)ax,dx */
2071 port
= c
->regs
[VCPU_REGS_RDX
];
2073 do_io
: if (kvm_emulate_pio(ctxt
->vcpu
, NULL
, io_dir_in
,
2074 (c
->d
& ByteOp
) ? 1 : c
->op_bytes
,
2077 goto cannot_emulate
;
2080 case 0xf4: /* hlt */
2081 ctxt
->vcpu
->arch
.halt_request
= 1;
2083 case 0xf5: /* cmc */
2084 /* complement carry flag from eflags reg */
2085 ctxt
->eflags
^= EFLG_CF
;
2086 c
->dst
.type
= OP_NONE
; /* Disable writeback. */
2088 case 0xf6 ... 0xf7: /* Grp3 */
2089 rc
= emulate_grp3(ctxt
, ops
);
2093 case 0xf8: /* clc */
2094 ctxt
->eflags
&= ~EFLG_CF
;
2095 c
->dst
.type
= OP_NONE
; /* Disable writeback. */
2097 case 0xfa: /* cli */
2098 ctxt
->eflags
&= ~X86_EFLAGS_IF
;
2099 c
->dst
.type
= OP_NONE
; /* Disable writeback. */
2101 case 0xfb: /* sti */
2102 toggle_interruptibility(ctxt
, X86_SHADOW_INT_STI
);
2103 ctxt
->eflags
|= X86_EFLAGS_IF
;
2104 c
->dst
.type
= OP_NONE
; /* Disable writeback. */
2106 case 0xfc: /* cld */
2107 ctxt
->eflags
&= ~EFLG_DF
;
2108 c
->dst
.type
= OP_NONE
; /* Disable writeback. */
2110 case 0xfd: /* std */
2111 ctxt
->eflags
|= EFLG_DF
;
2112 c
->dst
.type
= OP_NONE
; /* Disable writeback. */
2114 case 0xfe ... 0xff: /* Grp4/Grp5 */
2115 rc
= emulate_grp45(ctxt
, ops
);
2122 rc
= writeback(ctxt
, ops
);
2126 /* Commit shadow register state. */
2127 memcpy(ctxt
->vcpu
->arch
.regs
, c
->regs
, sizeof c
->regs
);
2128 kvm_rip_write(ctxt
->vcpu
, c
->eip
);
2131 if (rc
== X86EMUL_UNHANDLEABLE
) {
2139 case 0x01: /* lgdt, lidt, lmsw */
2140 switch (c
->modrm_reg
) {
2142 unsigned long address
;
2144 case 0: /* vmcall */
2145 if (c
->modrm_mod
!= 3 || c
->modrm_rm
!= 1)
2146 goto cannot_emulate
;
2148 rc
= kvm_fix_hypercall(ctxt
->vcpu
);
2152 /* Let the processor re-execute the fixed hypercall */
2153 c
->eip
= kvm_rip_read(ctxt
->vcpu
);
2154 /* Disable writeback. */
2155 c
->dst
.type
= OP_NONE
;
2158 rc
= read_descriptor(ctxt
, ops
, c
->src
.ptr
,
2159 &size
, &address
, c
->op_bytes
);
2162 realmode_lgdt(ctxt
->vcpu
, size
, address
);
2163 /* Disable writeback. */
2164 c
->dst
.type
= OP_NONE
;
2166 case 3: /* lidt/vmmcall */
2167 if (c
->modrm_mod
== 3) {
2168 switch (c
->modrm_rm
) {
2170 rc
= kvm_fix_hypercall(ctxt
->vcpu
);
2175 goto cannot_emulate
;
2178 rc
= read_descriptor(ctxt
, ops
, c
->src
.ptr
,
2183 realmode_lidt(ctxt
->vcpu
, size
, address
);
2185 /* Disable writeback. */
2186 c
->dst
.type
= OP_NONE
;
2190 c
->dst
.val
= realmode_get_cr(ctxt
->vcpu
, 0);
2193 realmode_lmsw(ctxt
->vcpu
, (u16
)c
->src
.val
,
2195 c
->dst
.type
= OP_NONE
;
2198 emulate_invlpg(ctxt
->vcpu
, memop
);
2199 /* Disable writeback. */
2200 c
->dst
.type
= OP_NONE
;
2203 goto cannot_emulate
;
2206 case 0x05: /* syscall */
2207 if (emulate_syscall(ctxt
) == -1)
2208 goto cannot_emulate
;
2213 emulate_clts(ctxt
->vcpu
);
2214 c
->dst
.type
= OP_NONE
;
2216 case 0x08: /* invd */
2217 case 0x09: /* wbinvd */
2218 case 0x0d: /* GrpP (prefetch) */
2219 case 0x18: /* Grp16 (prefetch/nop) */
2220 c
->dst
.type
= OP_NONE
;
2222 case 0x20: /* mov cr, reg */
2223 if (c
->modrm_mod
!= 3)
2224 goto cannot_emulate
;
2225 c
->regs
[c
->modrm_rm
] =
2226 realmode_get_cr(ctxt
->vcpu
, c
->modrm_reg
);
2227 c
->dst
.type
= OP_NONE
; /* no writeback */
2229 case 0x21: /* mov from dr to reg */
2230 if (c
->modrm_mod
!= 3)
2231 goto cannot_emulate
;
2232 rc
= emulator_get_dr(ctxt
, c
->modrm_reg
, &c
->regs
[c
->modrm_rm
]);
2234 goto cannot_emulate
;
2235 c
->dst
.type
= OP_NONE
; /* no writeback */
2237 case 0x22: /* mov reg, cr */
2238 if (c
->modrm_mod
!= 3)
2239 goto cannot_emulate
;
2240 realmode_set_cr(ctxt
->vcpu
,
2241 c
->modrm_reg
, c
->modrm_val
, &ctxt
->eflags
);
2242 c
->dst
.type
= OP_NONE
;
2244 case 0x23: /* mov from reg to dr */
2245 if (c
->modrm_mod
!= 3)
2246 goto cannot_emulate
;
2247 rc
= emulator_set_dr(ctxt
, c
->modrm_reg
,
2248 c
->regs
[c
->modrm_rm
]);
2250 goto cannot_emulate
;
2251 c
->dst
.type
= OP_NONE
; /* no writeback */
2255 msr_data
= (u32
)c
->regs
[VCPU_REGS_RAX
]
2256 | ((u64
)c
->regs
[VCPU_REGS_RDX
] << 32);
2257 rc
= kvm_set_msr(ctxt
->vcpu
, c
->regs
[VCPU_REGS_RCX
], msr_data
);
2259 kvm_inject_gp(ctxt
->vcpu
, 0);
2260 c
->eip
= kvm_rip_read(ctxt
->vcpu
);
2262 rc
= X86EMUL_CONTINUE
;
2263 c
->dst
.type
= OP_NONE
;
2267 rc
= kvm_get_msr(ctxt
->vcpu
, c
->regs
[VCPU_REGS_RCX
], &msr_data
);
2269 kvm_inject_gp(ctxt
->vcpu
, 0);
2270 c
->eip
= kvm_rip_read(ctxt
->vcpu
);
2272 c
->regs
[VCPU_REGS_RAX
] = (u32
)msr_data
;
2273 c
->regs
[VCPU_REGS_RDX
] = msr_data
>> 32;
2275 rc
= X86EMUL_CONTINUE
;
2276 c
->dst
.type
= OP_NONE
;
2278 case 0x34: /* sysenter */
2279 if (emulate_sysenter(ctxt
) == -1)
2280 goto cannot_emulate
;
2284 case 0x35: /* sysexit */
2285 if (emulate_sysexit(ctxt
) == -1)
2286 goto cannot_emulate
;
2290 case 0x40 ... 0x4f: /* cmov */
2291 c
->dst
.val
= c
->dst
.orig_val
= c
->src
.val
;
2292 if (!test_cc(c
->b
, ctxt
->eflags
))
2293 c
->dst
.type
= OP_NONE
; /* no writeback */
2295 case 0x80 ... 0x8f: /* jnz rel, etc*/
2296 if (test_cc(c
->b
, ctxt
->eflags
))
2297 jmp_rel(c
, c
->src
.val
);
2298 c
->dst
.type
= OP_NONE
;
2302 c
->dst
.type
= OP_NONE
;
2303 /* only subword offset */
2304 c
->src
.val
&= (c
->dst
.bytes
<< 3) - 1;
2305 emulate_2op_SrcV_nobyte("bt", c
->src
, c
->dst
, ctxt
->eflags
);
2307 case 0xa4: /* shld imm8, r, r/m */
2308 case 0xa5: /* shld cl, r, r/m */
2309 emulate_2op_cl("shld", c
->src2
, c
->src
, c
->dst
, ctxt
->eflags
);
2313 /* only subword offset */
2314 c
->src
.val
&= (c
->dst
.bytes
<< 3) - 1;
2315 emulate_2op_SrcV_nobyte("bts", c
->src
, c
->dst
, ctxt
->eflags
);
2317 case 0xac: /* shrd imm8, r, r/m */
2318 case 0xad: /* shrd cl, r, r/m */
2319 emulate_2op_cl("shrd", c
->src2
, c
->src
, c
->dst
, ctxt
->eflags
);
2321 case 0xae: /* clflush */
2323 case 0xb0 ... 0xb1: /* cmpxchg */
2325 * Save real source value, then compare EAX against
2328 c
->src
.orig_val
= c
->src
.val
;
2329 c
->src
.val
= c
->regs
[VCPU_REGS_RAX
];
2330 emulate_2op_SrcV("cmp", c
->src
, c
->dst
, ctxt
->eflags
);
2331 if (ctxt
->eflags
& EFLG_ZF
) {
2332 /* Success: write back to memory. */
2333 c
->dst
.val
= c
->src
.orig_val
;
2335 /* Failure: write the value we saw to EAX. */
2336 c
->dst
.type
= OP_REG
;
2337 c
->dst
.ptr
= (unsigned long *)&c
->regs
[VCPU_REGS_RAX
];
2342 /* only subword offset */
2343 c
->src
.val
&= (c
->dst
.bytes
<< 3) - 1;
2344 emulate_2op_SrcV_nobyte("btr", c
->src
, c
->dst
, ctxt
->eflags
);
2346 case 0xb6 ... 0xb7: /* movzx */
2347 c
->dst
.bytes
= c
->op_bytes
;
2348 c
->dst
.val
= (c
->d
& ByteOp
) ? (u8
) c
->src
.val
2351 case 0xba: /* Grp8 */
2352 switch (c
->modrm_reg
& 3) {
2365 /* only subword offset */
2366 c
->src
.val
&= (c
->dst
.bytes
<< 3) - 1;
2367 emulate_2op_SrcV_nobyte("btc", c
->src
, c
->dst
, ctxt
->eflags
);
2369 case 0xbe ... 0xbf: /* movsx */
2370 c
->dst
.bytes
= c
->op_bytes
;
2371 c
->dst
.val
= (c
->d
& ByteOp
) ? (s8
) c
->src
.val
:
2374 case 0xc3: /* movnti */
2375 c
->dst
.bytes
= c
->op_bytes
;
2376 c
->dst
.val
= (c
->op_bytes
== 4) ? (u32
) c
->src
.val
:
2379 case 0xc7: /* Grp9 (cmpxchg8b) */
2380 rc
= emulate_grp9(ctxt
, ops
, memop
);
2383 c
->dst
.type
= OP_NONE
;
2389 DPRINTF("Cannot emulate %02x\n", c
->b
);