1 /******************************************************************************
4 * Generic x86 (32-bit and 64-bit) instruction decoder and emulator.
6 * Copyright (c) 2005 Keir Fraser
8 * Linux coding style, mod r/m decoder, segment base fixes, real-mode
9 * privileged instructions:
11 * Copyright (C) 2006 Qumranet
13 * Avi Kivity <avi@qumranet.com>
14 * Yaniv Kamay <yaniv@qumranet.com>
16 * This work is licensed under the terms of the GNU GPL, version 2. See
17 * the COPYING file in the top-level directory.
19 * From: xen-unstable 10676:af9809f51f81a3c43f276f00c81a52ef558afda4
25 #include <public/xen.h>
26 #define DPRINTF(_f, _a ...) printf(_f , ## _a)
28 #include <linux/kvm_host.h>
29 #include "kvm_cache_regs.h"
30 #define DPRINTF(x...) do {} while (0)
32 #include <linux/module.h>
33 #include <asm/kvm_emulate.h>
35 #include "mmu.h" /* for is_long_mode() */
38 * Opcode effective-address decode tables.
39 * Note that we only emulate instructions that have at least one memory
40 * operand (excluding implicit stack references). We assume that stack
41 * references and instruction fetches will never occur in special memory
42 * areas that require emulation. So, for example, 'mov <imm>,<reg>' need
46 /* Operand sizes: 8-bit operands or specified/overridden size. */
47 #define ByteOp (1<<0) /* 8-bit operands. */
48 /* Destination operand type. */
49 #define ImplicitOps (1<<1) /* Implicit in opcode. No generic decode. */
50 #define DstReg (2<<1) /* Register operand. */
51 #define DstMem (3<<1) /* Memory operand. */
52 #define DstAcc (4<<1) /* Destination Accumulator */
53 #define DstMask (7<<1)
54 /* Source operand type. */
55 #define SrcNone (0<<4) /* No source operand. */
56 #define SrcImplicit (0<<4) /* Source operand is implicit in the opcode. */
57 #define SrcReg (1<<4) /* Register operand. */
58 #define SrcMem (2<<4) /* Memory operand. */
59 #define SrcMem16 (3<<4) /* Memory operand (16-bit). */
60 #define SrcMem32 (4<<4) /* Memory operand (32-bit). */
61 #define SrcImm (5<<4) /* Immediate operand. */
62 #define SrcImmByte (6<<4) /* 8-bit sign-extended immediate operand. */
63 #define SrcOne (7<<4) /* Implied '1' */
64 #define SrcImmUByte (8<<4) /* 8-bit unsigned immediate operand. */
65 #define SrcImmU (9<<4) /* Immediate operand, unsigned */
66 #define SrcMask (0xf<<4)
67 /* Generic ModRM decode. */
69 /* Destination is only written; never read. */
72 #define MemAbs (1<<11) /* Memory operand is absolute displacement */
73 #define String (1<<12) /* String instruction (rep capable) */
74 #define Stack (1<<13) /* Stack instruction (push/pop) */
75 #define Group (1<<14) /* Bits 3:5 of modrm byte extend opcode */
76 #define GroupDual (1<<15) /* Alternate decoding of mod == 3 */
77 #define GroupMask 0xff /* Group number stored in bits 0:7 */
80 /* Source 2 operand type */
81 #define Src2None (0<<29)
82 #define Src2CL (1<<29)
83 #define Src2ImmByte (2<<29)
84 #define Src2One (3<<29)
85 #define Src2Imm16 (4<<29)
86 #define Src2Mask (7<<29)
89 Group1_80
, Group1_81
, Group1_82
, Group1_83
,
90 Group1A
, Group3_Byte
, Group3
, Group4
, Group5
, Group7
,
93 static u32 opcode_table
[256] = {
95 ByteOp
| DstMem
| SrcReg
| ModRM
, DstMem
| SrcReg
| ModRM
,
96 ByteOp
| DstReg
| SrcMem
| ModRM
, DstReg
| SrcMem
| ModRM
,
97 ByteOp
| DstAcc
| SrcImm
, DstAcc
| SrcImm
,
98 ImplicitOps
| Stack
| No64
, ImplicitOps
| Stack
| No64
,
100 ByteOp
| DstMem
| SrcReg
| ModRM
, DstMem
| SrcReg
| ModRM
,
101 ByteOp
| DstReg
| SrcMem
| ModRM
, DstReg
| SrcMem
| ModRM
,
102 ByteOp
| DstAcc
| SrcImm
, DstAcc
| SrcImm
,
103 ImplicitOps
| Stack
| No64
, 0,
105 ByteOp
| DstMem
| SrcReg
| ModRM
, DstMem
| SrcReg
| ModRM
,
106 ByteOp
| DstReg
| SrcMem
| ModRM
, DstReg
| SrcMem
| ModRM
,
107 ByteOp
| DstAcc
| SrcImm
, DstAcc
| SrcImm
,
108 ImplicitOps
| Stack
| No64
, ImplicitOps
| Stack
| No64
,
110 ByteOp
| DstMem
| SrcReg
| ModRM
, DstMem
| SrcReg
| ModRM
,
111 ByteOp
| DstReg
| SrcMem
| ModRM
, DstReg
| SrcMem
| ModRM
,
112 ByteOp
| DstAcc
| SrcImm
, DstAcc
| SrcImm
,
113 ImplicitOps
| Stack
| No64
, ImplicitOps
| Stack
| No64
,
115 ByteOp
| DstMem
| SrcReg
| ModRM
, DstMem
| SrcReg
| ModRM
,
116 ByteOp
| DstReg
| SrcMem
| ModRM
, DstReg
| SrcMem
| ModRM
,
117 DstAcc
| SrcImmByte
, DstAcc
| SrcImm
, 0, 0,
119 ByteOp
| DstMem
| SrcReg
| ModRM
, DstMem
| SrcReg
| ModRM
,
120 ByteOp
| DstReg
| SrcMem
| ModRM
, DstReg
| SrcMem
| ModRM
,
123 ByteOp
| DstMem
| SrcReg
| ModRM
, DstMem
| SrcReg
| ModRM
,
124 ByteOp
| DstReg
| SrcMem
| ModRM
, DstReg
| SrcMem
| ModRM
,
127 ByteOp
| DstMem
| SrcReg
| ModRM
, DstMem
| SrcReg
| ModRM
,
128 ByteOp
| DstReg
| SrcMem
| ModRM
, DstReg
| SrcMem
| ModRM
,
129 ByteOp
| DstAcc
| SrcImm
, DstAcc
| SrcImm
,
132 DstReg
, DstReg
, DstReg
, DstReg
, DstReg
, DstReg
, DstReg
, DstReg
,
134 DstReg
, DstReg
, DstReg
, DstReg
, DstReg
, DstReg
, DstReg
, DstReg
,
136 SrcReg
| Stack
, SrcReg
| Stack
, SrcReg
| Stack
, SrcReg
| Stack
,
137 SrcReg
| Stack
, SrcReg
| Stack
, SrcReg
| Stack
, SrcReg
| Stack
,
139 DstReg
| Stack
, DstReg
| Stack
, DstReg
| Stack
, DstReg
| Stack
,
140 DstReg
| Stack
, DstReg
| Stack
, DstReg
| Stack
, DstReg
| Stack
,
142 ImplicitOps
| Stack
| No64
, ImplicitOps
| Stack
| No64
,
143 0, DstReg
| SrcMem32
| ModRM
| Mov
/* movsxd (x86/64) */ ,
146 SrcImm
| Mov
| Stack
, 0, SrcImmByte
| Mov
| Stack
, 0,
147 SrcNone
| ByteOp
| ImplicitOps
, SrcNone
| ImplicitOps
, /* insb, insw/insd */
148 SrcNone
| ByteOp
| ImplicitOps
, SrcNone
| ImplicitOps
, /* outsb, outsw/outsd */
150 SrcImmByte
, SrcImmByte
, SrcImmByte
, SrcImmByte
,
151 SrcImmByte
, SrcImmByte
, SrcImmByte
, SrcImmByte
,
153 SrcImmByte
, SrcImmByte
, SrcImmByte
, SrcImmByte
,
154 SrcImmByte
, SrcImmByte
, SrcImmByte
, SrcImmByte
,
156 Group
| Group1_80
, Group
| Group1_81
,
157 Group
| Group1_82
, Group
| Group1_83
,
158 ByteOp
| DstMem
| SrcReg
| ModRM
, DstMem
| SrcReg
| ModRM
,
159 ByteOp
| DstMem
| SrcReg
| ModRM
, DstMem
| SrcReg
| ModRM
,
161 ByteOp
| DstMem
| SrcReg
| ModRM
| Mov
, DstMem
| SrcReg
| ModRM
| Mov
,
162 ByteOp
| DstReg
| SrcMem
| ModRM
| Mov
, DstReg
| SrcMem
| ModRM
| Mov
,
163 DstMem
| SrcReg
| ModRM
| Mov
, ModRM
| DstReg
,
164 DstReg
| SrcMem
| ModRM
| Mov
, Group
| Group1A
,
166 DstReg
, DstReg
, DstReg
, DstReg
, DstReg
, DstReg
, DstReg
, DstReg
,
168 0, 0, SrcImm
| Src2Imm16
| No64
, 0,
169 ImplicitOps
| Stack
, ImplicitOps
| Stack
, 0, 0,
171 ByteOp
| DstReg
| SrcMem
| Mov
| MemAbs
, DstReg
| SrcMem
| Mov
| MemAbs
,
172 ByteOp
| DstMem
| SrcReg
| Mov
| MemAbs
, DstMem
| SrcReg
| Mov
| MemAbs
,
173 ByteOp
| ImplicitOps
| Mov
| String
, ImplicitOps
| Mov
| String
,
174 ByteOp
| ImplicitOps
| String
, ImplicitOps
| String
,
176 0, 0, ByteOp
| ImplicitOps
| Mov
| String
, ImplicitOps
| Mov
| String
,
177 ByteOp
| ImplicitOps
| Mov
| String
, ImplicitOps
| Mov
| String
,
178 ByteOp
| ImplicitOps
| String
, ImplicitOps
| String
,
180 ByteOp
| DstReg
| SrcImm
| Mov
, ByteOp
| DstReg
| SrcImm
| Mov
,
181 ByteOp
| DstReg
| SrcImm
| Mov
, ByteOp
| DstReg
| SrcImm
| Mov
,
182 ByteOp
| DstReg
| SrcImm
| Mov
, ByteOp
| DstReg
| SrcImm
| Mov
,
183 ByteOp
| DstReg
| SrcImm
| Mov
, ByteOp
| DstReg
| SrcImm
| Mov
,
185 DstReg
| SrcImm
| Mov
, DstReg
| SrcImm
| Mov
,
186 DstReg
| SrcImm
| Mov
, DstReg
| SrcImm
| Mov
,
187 DstReg
| SrcImm
| Mov
, DstReg
| SrcImm
| Mov
,
188 DstReg
| SrcImm
| Mov
, DstReg
| SrcImm
| Mov
,
190 ByteOp
| DstMem
| SrcImm
| ModRM
, DstMem
| SrcImmByte
| ModRM
,
191 0, ImplicitOps
| Stack
, 0, 0,
192 ByteOp
| DstMem
| SrcImm
| ModRM
| Mov
, DstMem
| SrcImm
| ModRM
| Mov
,
194 0, 0, 0, ImplicitOps
| Stack
,
195 ImplicitOps
, SrcImmByte
, ImplicitOps
| No64
, ImplicitOps
,
197 ByteOp
| DstMem
| SrcImplicit
| ModRM
, DstMem
| SrcImplicit
| ModRM
,
198 ByteOp
| DstMem
| SrcImplicit
| ModRM
, DstMem
| SrcImplicit
| ModRM
,
201 0, 0, 0, 0, 0, 0, 0, 0,
204 ByteOp
| SrcImmUByte
, SrcImmUByte
,
205 ByteOp
| SrcImmUByte
, SrcImmUByte
,
207 SrcImm
| Stack
, SrcImm
| ImplicitOps
,
208 SrcImmU
| Src2Imm16
| No64
, SrcImmByte
| ImplicitOps
,
209 SrcNone
| ByteOp
| ImplicitOps
, SrcNone
| ImplicitOps
,
210 SrcNone
| ByteOp
| ImplicitOps
, SrcNone
| ImplicitOps
,
213 ImplicitOps
, ImplicitOps
, Group
| Group3_Byte
, Group
| Group3
,
215 ImplicitOps
, 0, ImplicitOps
, ImplicitOps
,
216 ImplicitOps
, ImplicitOps
, Group
| Group4
, Group
| Group5
,
219 static u32 twobyte_table
[256] = {
221 0, Group
| GroupDual
| Group7
, 0, 0, 0, ImplicitOps
, ImplicitOps
, 0,
222 ImplicitOps
, ImplicitOps
, 0, 0, 0, ImplicitOps
| ModRM
, 0, 0,
224 0, 0, 0, 0, 0, 0, 0, 0, ImplicitOps
| ModRM
, 0, 0, 0, 0, 0, 0, 0,
226 ModRM
| ImplicitOps
, ModRM
, ModRM
| ImplicitOps
, ModRM
, 0, 0, 0, 0,
227 0, 0, 0, 0, 0, 0, 0, 0,
229 ImplicitOps
, 0, ImplicitOps
, 0,
230 ImplicitOps
, ImplicitOps
, 0, 0,
231 0, 0, 0, 0, 0, 0, 0, 0,
233 DstReg
| SrcMem
| ModRM
| Mov
, DstReg
| SrcMem
| ModRM
| Mov
,
234 DstReg
| SrcMem
| ModRM
| Mov
, DstReg
| SrcMem
| ModRM
| Mov
,
235 DstReg
| SrcMem
| ModRM
| Mov
, DstReg
| SrcMem
| ModRM
| Mov
,
236 DstReg
| SrcMem
| ModRM
| Mov
, DstReg
| SrcMem
| ModRM
| Mov
,
238 DstReg
| SrcMem
| ModRM
| Mov
, DstReg
| SrcMem
| ModRM
| Mov
,
239 DstReg
| SrcMem
| ModRM
| Mov
, DstReg
| SrcMem
| ModRM
| Mov
,
240 DstReg
| SrcMem
| ModRM
| Mov
, DstReg
| SrcMem
| ModRM
| Mov
,
241 DstReg
| SrcMem
| ModRM
| Mov
, DstReg
| SrcMem
| ModRM
| Mov
,
243 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
245 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
247 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
249 SrcImm
, SrcImm
, SrcImm
, SrcImm
, SrcImm
, SrcImm
, SrcImm
, SrcImm
,
250 SrcImm
, SrcImm
, SrcImm
, SrcImm
, SrcImm
, SrcImm
, SrcImm
, SrcImm
,
252 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
254 ImplicitOps
| Stack
, ImplicitOps
| Stack
,
255 0, DstMem
| SrcReg
| ModRM
| BitOp
,
256 DstMem
| SrcReg
| Src2ImmByte
| ModRM
,
257 DstMem
| SrcReg
| Src2CL
| ModRM
, 0, 0,
259 ImplicitOps
| Stack
, ImplicitOps
| Stack
,
260 0, DstMem
| SrcReg
| ModRM
| BitOp
,
261 DstMem
| SrcReg
| Src2ImmByte
| ModRM
,
262 DstMem
| SrcReg
| Src2CL
| ModRM
,
265 ByteOp
| DstMem
| SrcReg
| ModRM
, DstMem
| SrcReg
| ModRM
, 0,
266 DstMem
| SrcReg
| ModRM
| BitOp
,
267 0, 0, ByteOp
| DstReg
| SrcMem
| ModRM
| Mov
,
268 DstReg
| SrcMem16
| ModRM
| Mov
,
270 0, 0, DstMem
| SrcImmByte
| ModRM
, DstMem
| SrcReg
| ModRM
| BitOp
,
271 0, 0, ByteOp
| DstReg
| SrcMem
| ModRM
| Mov
,
272 DstReg
| SrcMem16
| ModRM
| Mov
,
274 0, 0, 0, DstMem
| SrcReg
| ModRM
| Mov
, 0, 0, 0, ImplicitOps
| ModRM
,
275 0, 0, 0, 0, 0, 0, 0, 0,
277 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
279 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
281 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
284 static u32 group_table
[] = {
286 ByteOp
| DstMem
| SrcImm
| ModRM
, ByteOp
| DstMem
| SrcImm
| ModRM
,
287 ByteOp
| DstMem
| SrcImm
| ModRM
, ByteOp
| DstMem
| SrcImm
| ModRM
,
288 ByteOp
| DstMem
| SrcImm
| ModRM
, ByteOp
| DstMem
| SrcImm
| ModRM
,
289 ByteOp
| DstMem
| SrcImm
| ModRM
, ByteOp
| DstMem
| SrcImm
| ModRM
,
291 DstMem
| SrcImm
| ModRM
, DstMem
| SrcImm
| ModRM
,
292 DstMem
| SrcImm
| ModRM
, DstMem
| SrcImm
| ModRM
,
293 DstMem
| SrcImm
| ModRM
, DstMem
| SrcImm
| ModRM
,
294 DstMem
| SrcImm
| ModRM
, DstMem
| SrcImm
| ModRM
,
296 ByteOp
| DstMem
| SrcImm
| ModRM
, ByteOp
| DstMem
| SrcImm
| ModRM
,
297 ByteOp
| DstMem
| SrcImm
| ModRM
, ByteOp
| DstMem
| SrcImm
| ModRM
,
298 ByteOp
| DstMem
| SrcImm
| ModRM
, ByteOp
| DstMem
| SrcImm
| ModRM
,
299 ByteOp
| DstMem
| SrcImm
| ModRM
, ByteOp
| DstMem
| SrcImm
| ModRM
,
301 DstMem
| SrcImmByte
| ModRM
, DstMem
| SrcImmByte
| ModRM
,
302 DstMem
| SrcImmByte
| ModRM
, DstMem
| SrcImmByte
| ModRM
,
303 DstMem
| SrcImmByte
| ModRM
, DstMem
| SrcImmByte
| ModRM
,
304 DstMem
| SrcImmByte
| ModRM
, DstMem
| SrcImmByte
| ModRM
,
306 DstMem
| SrcNone
| ModRM
| Mov
| Stack
, 0, 0, 0, 0, 0, 0, 0,
308 ByteOp
| SrcImm
| DstMem
| ModRM
, 0,
309 ByteOp
| DstMem
| SrcNone
| ModRM
, ByteOp
| DstMem
| SrcNone
| ModRM
,
312 DstMem
| SrcImm
| ModRM
, 0,
313 DstMem
| SrcNone
| ModRM
, DstMem
| SrcNone
| ModRM
,
316 ByteOp
| DstMem
| SrcNone
| ModRM
, ByteOp
| DstMem
| SrcNone
| ModRM
,
319 DstMem
| SrcNone
| ModRM
, DstMem
| SrcNone
| ModRM
,
320 SrcMem
| ModRM
| Stack
, 0,
321 SrcMem
| ModRM
| Stack
, 0, SrcMem
| ModRM
| Stack
, 0,
323 0, 0, ModRM
| SrcMem
, ModRM
| SrcMem
,
324 SrcNone
| ModRM
| DstMem
| Mov
, 0,
325 SrcMem16
| ModRM
| Mov
, SrcMem
| ModRM
| ByteOp
,
328 static u32 group2_table
[] = {
330 SrcNone
| ModRM
, 0, 0, SrcNone
| ModRM
,
331 SrcNone
| ModRM
| DstMem
| Mov
, 0,
332 SrcMem16
| ModRM
| Mov
, 0,
335 /* EFLAGS bit definitions. */
336 #define EFLG_VM (1<<17)
337 #define EFLG_RF (1<<16)
338 #define EFLG_OF (1<<11)
339 #define EFLG_DF (1<<10)
340 #define EFLG_IF (1<<9)
341 #define EFLG_SF (1<<7)
342 #define EFLG_ZF (1<<6)
343 #define EFLG_AF (1<<4)
344 #define EFLG_PF (1<<2)
345 #define EFLG_CF (1<<0)
348 * Instruction emulation:
349 * Most instructions are emulated directly via a fragment of inline assembly
350 * code. This allows us to save/restore EFLAGS and thus very easily pick up
351 * any modified flags.
354 #if defined(CONFIG_X86_64)
355 #define _LO32 "k" /* force 32-bit operand */
356 #define _STK "%%rsp" /* stack pointer */
357 #elif defined(__i386__)
358 #define _LO32 "" /* force 32-bit operand */
359 #define _STK "%%esp" /* stack pointer */
363 * These EFLAGS bits are restored from saved value during emulation, and
364 * any changes are written back to the saved value after emulation.
366 #define EFLAGS_MASK (EFLG_OF|EFLG_SF|EFLG_ZF|EFLG_AF|EFLG_PF|EFLG_CF)
368 /* Before executing instruction: restore necessary bits in EFLAGS. */
369 #define _PRE_EFLAGS(_sav, _msk, _tmp) \
370 /* EFLAGS = (_sav & _msk) | (EFLAGS & ~_msk); _sav &= ~_msk; */ \
371 "movl %"_sav",%"_LO32 _tmp"; " \
374 "movl %"_msk",%"_LO32 _tmp"; " \
375 "andl %"_LO32 _tmp",("_STK"); " \
377 "notl %"_LO32 _tmp"; " \
378 "andl %"_LO32 _tmp",("_STK"); " \
379 "andl %"_LO32 _tmp","__stringify(BITS_PER_LONG/4)"("_STK"); " \
381 "orl %"_LO32 _tmp",("_STK"); " \
385 /* After executing instruction: write-back necessary bits in EFLAGS. */
386 #define _POST_EFLAGS(_sav, _msk, _tmp) \
387 /* _sav |= EFLAGS & _msk; */ \
390 "andl %"_msk",%"_LO32 _tmp"; " \
391 "orl %"_LO32 _tmp",%"_sav"; "
399 #define ____emulate_2op(_op, _src, _dst, _eflags, _x, _y, _suffix) \
401 __asm__ __volatile__ ( \
402 _PRE_EFLAGS("0", "4", "2") \
403 _op _suffix " %"_x"3,%1; " \
404 _POST_EFLAGS("0", "4", "2") \
405 : "=m" (_eflags), "=m" ((_dst).val), \
407 : _y ((_src).val), "i" (EFLAGS_MASK)); \
411 /* Raw emulation: instruction has two explicit operands. */
412 #define __emulate_2op_nobyte(_op,_src,_dst,_eflags,_wx,_wy,_lx,_ly,_qx,_qy) \
414 unsigned long _tmp; \
416 switch ((_dst).bytes) { \
418 ____emulate_2op(_op,_src,_dst,_eflags,_wx,_wy,"w"); \
421 ____emulate_2op(_op,_src,_dst,_eflags,_lx,_ly,"l"); \
424 ON64(____emulate_2op(_op,_src,_dst,_eflags,_qx,_qy,"q")); \
429 #define __emulate_2op(_op,_src,_dst,_eflags,_bx,_by,_wx,_wy,_lx,_ly,_qx,_qy) \
431 unsigned long _tmp; \
432 switch ((_dst).bytes) { \
434 ____emulate_2op(_op,_src,_dst,_eflags,_bx,_by,"b"); \
437 __emulate_2op_nobyte(_op, _src, _dst, _eflags, \
438 _wx, _wy, _lx, _ly, _qx, _qy); \
443 /* Source operand is byte-sized and may be restricted to just %cl. */
444 #define emulate_2op_SrcB(_op, _src, _dst, _eflags) \
445 __emulate_2op(_op, _src, _dst, _eflags, \
446 "b", "c", "b", "c", "b", "c", "b", "c")
448 /* Source operand is byte, word, long or quad sized. */
449 #define emulate_2op_SrcV(_op, _src, _dst, _eflags) \
450 __emulate_2op(_op, _src, _dst, _eflags, \
451 "b", "q", "w", "r", _LO32, "r", "", "r")
453 /* Source operand is word, long or quad sized. */
454 #define emulate_2op_SrcV_nobyte(_op, _src, _dst, _eflags) \
455 __emulate_2op_nobyte(_op, _src, _dst, _eflags, \
456 "w", "r", _LO32, "r", "", "r")
458 /* Instruction has three operands and one operand is stored in ECX register */
459 #define __emulate_2op_cl(_op, _cl, _src, _dst, _eflags, _suffix, _type) \
461 unsigned long _tmp; \
462 _type _clv = (_cl).val; \
463 _type _srcv = (_src).val; \
464 _type _dstv = (_dst).val; \
466 __asm__ __volatile__ ( \
467 _PRE_EFLAGS("0", "5", "2") \
468 _op _suffix " %4,%1 \n" \
469 _POST_EFLAGS("0", "5", "2") \
470 : "=m" (_eflags), "+r" (_dstv), "=&r" (_tmp) \
471 : "c" (_clv) , "r" (_srcv), "i" (EFLAGS_MASK) \
474 (_cl).val = (unsigned long) _clv; \
475 (_src).val = (unsigned long) _srcv; \
476 (_dst).val = (unsigned long) _dstv; \
479 #define emulate_2op_cl(_op, _cl, _src, _dst, _eflags) \
481 switch ((_dst).bytes) { \
483 __emulate_2op_cl(_op, _cl, _src, _dst, _eflags, \
484 "w", unsigned short); \
487 __emulate_2op_cl(_op, _cl, _src, _dst, _eflags, \
488 "l", unsigned int); \
491 ON64(__emulate_2op_cl(_op, _cl, _src, _dst, _eflags, \
492 "q", unsigned long)); \
497 #define __emulate_1op(_op, _dst, _eflags, _suffix) \
499 unsigned long _tmp; \
501 __asm__ __volatile__ ( \
502 _PRE_EFLAGS("0", "3", "2") \
503 _op _suffix " %1; " \
504 _POST_EFLAGS("0", "3", "2") \
505 : "=m" (_eflags), "+m" ((_dst).val), \
507 : "i" (EFLAGS_MASK)); \
510 /* Instruction has only one explicit operand (no source operand). */
511 #define emulate_1op(_op, _dst, _eflags) \
513 switch ((_dst).bytes) { \
514 case 1: __emulate_1op(_op, _dst, _eflags, "b"); break; \
515 case 2: __emulate_1op(_op, _dst, _eflags, "w"); break; \
516 case 4: __emulate_1op(_op, _dst, _eflags, "l"); break; \
517 case 8: ON64(__emulate_1op(_op, _dst, _eflags, "q")); break; \
521 /* Fetch next part of the instruction being emulated. */
522 #define insn_fetch(_type, _size, _eip) \
523 ({ unsigned long _x; \
524 rc = do_insn_fetch(ctxt, ops, (_eip), &_x, (_size)); \
531 static inline unsigned long ad_mask(struct decode_cache
*c
)
533 return (1UL << (c
->ad_bytes
<< 3)) - 1;
536 /* Access/update address held in a register, based on addressing mode. */
537 static inline unsigned long
538 address_mask(struct decode_cache
*c
, unsigned long reg
)
540 if (c
->ad_bytes
== sizeof(unsigned long))
543 return reg
& ad_mask(c
);
546 static inline unsigned long
547 register_address(struct decode_cache
*c
, unsigned long base
, unsigned long reg
)
549 return base
+ address_mask(c
, reg
);
553 register_address_increment(struct decode_cache
*c
, unsigned long *reg
, int inc
)
555 if (c
->ad_bytes
== sizeof(unsigned long))
558 *reg
= (*reg
& ~ad_mask(c
)) | ((*reg
+ inc
) & ad_mask(c
));
561 static inline void jmp_rel(struct decode_cache
*c
, int rel
)
563 register_address_increment(c
, &c
->eip
, rel
);
566 static void set_seg_override(struct decode_cache
*c
, int seg
)
568 c
->has_seg_override
= true;
569 c
->seg_override
= seg
;
572 static unsigned long seg_base(struct x86_emulate_ctxt
*ctxt
, int seg
)
574 if (ctxt
->mode
== X86EMUL_MODE_PROT64
&& seg
< VCPU_SREG_FS
)
577 return kvm_x86_ops
->get_segment_base(ctxt
->vcpu
, seg
);
580 static unsigned long seg_override_base(struct x86_emulate_ctxt
*ctxt
,
581 struct decode_cache
*c
)
583 if (!c
->has_seg_override
)
586 return seg_base(ctxt
, c
->seg_override
);
589 static unsigned long es_base(struct x86_emulate_ctxt
*ctxt
)
591 return seg_base(ctxt
, VCPU_SREG_ES
);
594 static unsigned long ss_base(struct x86_emulate_ctxt
*ctxt
)
596 return seg_base(ctxt
, VCPU_SREG_SS
);
599 static int do_fetch_insn_byte(struct x86_emulate_ctxt
*ctxt
,
600 struct x86_emulate_ops
*ops
,
601 unsigned long linear
, u8
*dest
)
603 struct fetch_cache
*fc
= &ctxt
->decode
.fetch
;
607 if (linear
< fc
->start
|| linear
>= fc
->end
) {
608 size
= min(15UL, PAGE_SIZE
- offset_in_page(linear
));
609 rc
= ops
->read_std(linear
, fc
->data
, size
, ctxt
->vcpu
);
613 fc
->end
= linear
+ size
;
615 *dest
= fc
->data
[linear
- fc
->start
];
619 static int do_insn_fetch(struct x86_emulate_ctxt
*ctxt
,
620 struct x86_emulate_ops
*ops
,
621 unsigned long eip
, void *dest
, unsigned size
)
625 eip
+= ctxt
->cs_base
;
627 rc
= do_fetch_insn_byte(ctxt
, ops
, eip
++, dest
++);
635 * Given the 'reg' portion of a ModRM byte, and a register block, return a
636 * pointer into the block that addresses the relevant register.
637 * @highbyte_regs specifies whether to decode AH,CH,DH,BH.
639 static void *decode_register(u8 modrm_reg
, unsigned long *regs
,
644 p
= ®s
[modrm_reg
];
645 if (highbyte_regs
&& modrm_reg
>= 4 && modrm_reg
< 8)
646 p
= (unsigned char *)®s
[modrm_reg
& 3] + 1;
650 static int read_descriptor(struct x86_emulate_ctxt
*ctxt
,
651 struct x86_emulate_ops
*ops
,
653 u16
*size
, unsigned long *address
, int op_bytes
)
660 rc
= ops
->read_std((unsigned long)ptr
, (unsigned long *)size
, 2,
664 rc
= ops
->read_std((unsigned long)ptr
+ 2, address
, op_bytes
,
669 static int test_cc(unsigned int condition
, unsigned int flags
)
673 switch ((condition
& 15) >> 1) {
675 rc
|= (flags
& EFLG_OF
);
677 case 1: /* b/c/nae */
678 rc
|= (flags
& EFLG_CF
);
681 rc
|= (flags
& EFLG_ZF
);
684 rc
|= (flags
& (EFLG_CF
|EFLG_ZF
));
687 rc
|= (flags
& EFLG_SF
);
690 rc
|= (flags
& EFLG_PF
);
693 rc
|= (flags
& EFLG_ZF
);
696 rc
|= (!(flags
& EFLG_SF
) != !(flags
& EFLG_OF
));
700 /* Odd condition identifiers (lsb == 1) have inverted sense. */
701 return (!!rc
^ (condition
& 1));
704 static void decode_register_operand(struct operand
*op
,
705 struct decode_cache
*c
,
708 unsigned reg
= c
->modrm_reg
;
709 int highbyte_regs
= c
->rex_prefix
== 0;
712 reg
= (c
->b
& 7) | ((c
->rex_prefix
& 1) << 3);
714 if ((c
->d
& ByteOp
) && !inhibit_bytereg
) {
715 op
->ptr
= decode_register(reg
, c
->regs
, highbyte_regs
);
716 op
->val
= *(u8
*)op
->ptr
;
719 op
->ptr
= decode_register(reg
, c
->regs
, 0);
720 op
->bytes
= c
->op_bytes
;
723 op
->val
= *(u16
*)op
->ptr
;
726 op
->val
= *(u32
*)op
->ptr
;
729 op
->val
= *(u64
*) op
->ptr
;
733 op
->orig_val
= op
->val
;
736 static int decode_modrm(struct x86_emulate_ctxt
*ctxt
,
737 struct x86_emulate_ops
*ops
)
739 struct decode_cache
*c
= &ctxt
->decode
;
741 int index_reg
= 0, base_reg
= 0, scale
;
745 c
->modrm_reg
= (c
->rex_prefix
& 4) << 1; /* REX.R */
746 index_reg
= (c
->rex_prefix
& 2) << 2; /* REX.X */
747 c
->modrm_rm
= base_reg
= (c
->rex_prefix
& 1) << 3; /* REG.B */
750 c
->modrm
= insn_fetch(u8
, 1, c
->eip
);
751 c
->modrm_mod
|= (c
->modrm
& 0xc0) >> 6;
752 c
->modrm_reg
|= (c
->modrm
& 0x38) >> 3;
753 c
->modrm_rm
|= (c
->modrm
& 0x07);
757 if (c
->modrm_mod
== 3) {
758 c
->modrm_ptr
= decode_register(c
->modrm_rm
,
759 c
->regs
, c
->d
& ByteOp
);
760 c
->modrm_val
= *(unsigned long *)c
->modrm_ptr
;
764 if (c
->ad_bytes
== 2) {
765 unsigned bx
= c
->regs
[VCPU_REGS_RBX
];
766 unsigned bp
= c
->regs
[VCPU_REGS_RBP
];
767 unsigned si
= c
->regs
[VCPU_REGS_RSI
];
768 unsigned di
= c
->regs
[VCPU_REGS_RDI
];
770 /* 16-bit ModR/M decode. */
771 switch (c
->modrm_mod
) {
773 if (c
->modrm_rm
== 6)
774 c
->modrm_ea
+= insn_fetch(u16
, 2, c
->eip
);
777 c
->modrm_ea
+= insn_fetch(s8
, 1, c
->eip
);
780 c
->modrm_ea
+= insn_fetch(u16
, 2, c
->eip
);
783 switch (c
->modrm_rm
) {
785 c
->modrm_ea
+= bx
+ si
;
788 c
->modrm_ea
+= bx
+ di
;
791 c
->modrm_ea
+= bp
+ si
;
794 c
->modrm_ea
+= bp
+ di
;
803 if (c
->modrm_mod
!= 0)
810 if (c
->modrm_rm
== 2 || c
->modrm_rm
== 3 ||
811 (c
->modrm_rm
== 6 && c
->modrm_mod
!= 0))
812 if (!c
->has_seg_override
)
813 set_seg_override(c
, VCPU_SREG_SS
);
814 c
->modrm_ea
= (u16
)c
->modrm_ea
;
816 /* 32/64-bit ModR/M decode. */
817 if ((c
->modrm_rm
& 7) == 4) {
818 sib
= insn_fetch(u8
, 1, c
->eip
);
819 index_reg
|= (sib
>> 3) & 7;
823 if ((base_reg
& 7) == 5 && c
->modrm_mod
== 0)
824 c
->modrm_ea
+= insn_fetch(s32
, 4, c
->eip
);
826 c
->modrm_ea
+= c
->regs
[base_reg
];
828 c
->modrm_ea
+= c
->regs
[index_reg
] << scale
;
829 } else if ((c
->modrm_rm
& 7) == 5 && c
->modrm_mod
== 0) {
830 if (ctxt
->mode
== X86EMUL_MODE_PROT64
)
833 c
->modrm_ea
+= c
->regs
[c
->modrm_rm
];
834 switch (c
->modrm_mod
) {
836 if (c
->modrm_rm
== 5)
837 c
->modrm_ea
+= insn_fetch(s32
, 4, c
->eip
);
840 c
->modrm_ea
+= insn_fetch(s8
, 1, c
->eip
);
843 c
->modrm_ea
+= insn_fetch(s32
, 4, c
->eip
);
851 static int decode_abs(struct x86_emulate_ctxt
*ctxt
,
852 struct x86_emulate_ops
*ops
)
854 struct decode_cache
*c
= &ctxt
->decode
;
857 switch (c
->ad_bytes
) {
859 c
->modrm_ea
= insn_fetch(u16
, 2, c
->eip
);
862 c
->modrm_ea
= insn_fetch(u32
, 4, c
->eip
);
865 c
->modrm_ea
= insn_fetch(u64
, 8, c
->eip
);
873 x86_decode_insn(struct x86_emulate_ctxt
*ctxt
, struct x86_emulate_ops
*ops
)
875 struct decode_cache
*c
= &ctxt
->decode
;
877 int mode
= ctxt
->mode
;
878 int def_op_bytes
, def_ad_bytes
, group
;
880 /* Shadow copy of register state. Committed on successful emulation. */
882 memset(c
, 0, sizeof(struct decode_cache
));
883 c
->eip
= kvm_rip_read(ctxt
->vcpu
);
884 ctxt
->cs_base
= seg_base(ctxt
, VCPU_SREG_CS
);
885 memcpy(c
->regs
, ctxt
->vcpu
->arch
.regs
, sizeof c
->regs
);
888 case X86EMUL_MODE_REAL
:
889 case X86EMUL_MODE_PROT16
:
890 def_op_bytes
= def_ad_bytes
= 2;
892 case X86EMUL_MODE_PROT32
:
893 def_op_bytes
= def_ad_bytes
= 4;
896 case X86EMUL_MODE_PROT64
:
905 c
->op_bytes
= def_op_bytes
;
906 c
->ad_bytes
= def_ad_bytes
;
908 /* Legacy prefixes. */
910 switch (c
->b
= insn_fetch(u8
, 1, c
->eip
)) {
911 case 0x66: /* operand-size override */
912 /* switch between 2/4 bytes */
913 c
->op_bytes
= def_op_bytes
^ 6;
915 case 0x67: /* address-size override */
916 if (mode
== X86EMUL_MODE_PROT64
)
917 /* switch between 4/8 bytes */
918 c
->ad_bytes
= def_ad_bytes
^ 12;
920 /* switch between 2/4 bytes */
921 c
->ad_bytes
= def_ad_bytes
^ 6;
923 case 0x26: /* ES override */
924 case 0x2e: /* CS override */
925 case 0x36: /* SS override */
926 case 0x3e: /* DS override */
927 set_seg_override(c
, (c
->b
>> 3) & 3);
929 case 0x64: /* FS override */
930 case 0x65: /* GS override */
931 set_seg_override(c
, c
->b
& 7);
933 case 0x40 ... 0x4f: /* REX */
934 if (mode
!= X86EMUL_MODE_PROT64
)
936 c
->rex_prefix
= c
->b
;
938 case 0xf0: /* LOCK */
941 case 0xf2: /* REPNE/REPNZ */
942 c
->rep_prefix
= REPNE_PREFIX
;
944 case 0xf3: /* REP/REPE/REPZ */
945 c
->rep_prefix
= REPE_PREFIX
;
951 /* Any legacy prefix after a REX prefix nullifies its effect. */
960 if (c
->rex_prefix
& 8)
961 c
->op_bytes
= 8; /* REX.W */
963 /* Opcode byte(s). */
964 c
->d
= opcode_table
[c
->b
];
966 /* Two-byte opcode? */
969 c
->b
= insn_fetch(u8
, 1, c
->eip
);
970 c
->d
= twobyte_table
[c
->b
];
974 if (mode
== X86EMUL_MODE_PROT64
&& (c
->d
& No64
)) {
975 kvm_report_emulation_failure(ctxt
->vcpu
, "invalid x86/64 instruction");;
980 group
= c
->d
& GroupMask
;
981 c
->modrm
= insn_fetch(u8
, 1, c
->eip
);
984 group
= (group
<< 3) + ((c
->modrm
>> 3) & 7);
985 if ((c
->d
& GroupDual
) && (c
->modrm
>> 6) == 3)
986 c
->d
= group2_table
[group
];
988 c
->d
= group_table
[group
];
993 DPRINTF("Cannot emulate %02x\n", c
->b
);
997 if (mode
== X86EMUL_MODE_PROT64
&& (c
->d
& Stack
))
1000 /* ModRM and SIB bytes. */
1002 rc
= decode_modrm(ctxt
, ops
);
1003 else if (c
->d
& MemAbs
)
1004 rc
= decode_abs(ctxt
, ops
);
1008 if (!c
->has_seg_override
)
1009 set_seg_override(c
, VCPU_SREG_DS
);
1011 if (!(!c
->twobyte
&& c
->b
== 0x8d))
1012 c
->modrm_ea
+= seg_override_base(ctxt
, c
);
1014 if (c
->ad_bytes
!= 8)
1015 c
->modrm_ea
= (u32
)c
->modrm_ea
;
1017 * Decode and fetch the source operand: register, memory
1020 switch (c
->d
& SrcMask
) {
1024 decode_register_operand(&c
->src
, c
, 0);
1033 c
->src
.bytes
= (c
->d
& ByteOp
) ? 1 :
1035 /* Don't fetch the address for invlpg: it could be unmapped. */
1036 if (c
->twobyte
&& c
->b
== 0x01 && c
->modrm_reg
== 7)
1040 * For instructions with a ModR/M byte, switch to register
1041 * access if Mod = 3.
1043 if ((c
->d
& ModRM
) && c
->modrm_mod
== 3) {
1044 c
->src
.type
= OP_REG
;
1045 c
->src
.val
= c
->modrm_val
;
1046 c
->src
.ptr
= c
->modrm_ptr
;
1049 c
->src
.type
= OP_MEM
;
1053 c
->src
.type
= OP_IMM
;
1054 c
->src
.ptr
= (unsigned long *)c
->eip
;
1055 c
->src
.bytes
= (c
->d
& ByteOp
) ? 1 : c
->op_bytes
;
1056 if (c
->src
.bytes
== 8)
1058 /* NB. Immediates are sign-extended as necessary. */
1059 switch (c
->src
.bytes
) {
1061 c
->src
.val
= insn_fetch(s8
, 1, c
->eip
);
1064 c
->src
.val
= insn_fetch(s16
, 2, c
->eip
);
1067 c
->src
.val
= insn_fetch(s32
, 4, c
->eip
);
1070 if ((c
->d
& SrcMask
) == SrcImmU
) {
1071 switch (c
->src
.bytes
) {
1076 c
->src
.val
&= 0xffff;
1079 c
->src
.val
&= 0xffffffff;
1086 c
->src
.type
= OP_IMM
;
1087 c
->src
.ptr
= (unsigned long *)c
->eip
;
1089 if ((c
->d
& SrcMask
) == SrcImmByte
)
1090 c
->src
.val
= insn_fetch(s8
, 1, c
->eip
);
1092 c
->src
.val
= insn_fetch(u8
, 1, c
->eip
);
1101 * Decode and fetch the second source operand: register, memory
1104 switch (c
->d
& Src2Mask
) {
1109 c
->src2
.val
= c
->regs
[VCPU_REGS_RCX
] & 0x8;
1112 c
->src2
.type
= OP_IMM
;
1113 c
->src2
.ptr
= (unsigned long *)c
->eip
;
1115 c
->src2
.val
= insn_fetch(u8
, 1, c
->eip
);
1118 c
->src2
.type
= OP_IMM
;
1119 c
->src2
.ptr
= (unsigned long *)c
->eip
;
1121 c
->src2
.val
= insn_fetch(u16
, 2, c
->eip
);
1129 /* Decode and fetch the destination operand: register or memory. */
1130 switch (c
->d
& DstMask
) {
1132 /* Special instructions do their own operand decoding. */
1135 decode_register_operand(&c
->dst
, c
,
1136 c
->twobyte
&& (c
->b
== 0xb6 || c
->b
== 0xb7));
1139 if ((c
->d
& ModRM
) && c
->modrm_mod
== 3) {
1140 c
->dst
.bytes
= (c
->d
& ByteOp
) ? 1 : c
->op_bytes
;
1141 c
->dst
.type
= OP_REG
;
1142 c
->dst
.val
= c
->dst
.orig_val
= c
->modrm_val
;
1143 c
->dst
.ptr
= c
->modrm_ptr
;
1146 c
->dst
.type
= OP_MEM
;
1149 c
->dst
.type
= OP_REG
;
1150 c
->dst
.bytes
= c
->op_bytes
;
1151 c
->dst
.ptr
= &c
->regs
[VCPU_REGS_RAX
];
1152 switch (c
->op_bytes
) {
1154 c
->dst
.val
= *(u8
*)c
->dst
.ptr
;
1157 c
->dst
.val
= *(u16
*)c
->dst
.ptr
;
1160 c
->dst
.val
= *(u32
*)c
->dst
.ptr
;
1163 c
->dst
.orig_val
= c
->dst
.val
;
1167 if (c
->rip_relative
)
1168 c
->modrm_ea
+= c
->eip
;
1171 return (rc
== X86EMUL_UNHANDLEABLE
) ? -1 : 0;
1174 static inline void emulate_push(struct x86_emulate_ctxt
*ctxt
)
1176 struct decode_cache
*c
= &ctxt
->decode
;
1178 c
->dst
.type
= OP_MEM
;
1179 c
->dst
.bytes
= c
->op_bytes
;
1180 c
->dst
.val
= c
->src
.val
;
1181 register_address_increment(c
, &c
->regs
[VCPU_REGS_RSP
], -c
->op_bytes
);
1182 c
->dst
.ptr
= (void *) register_address(c
, ss_base(ctxt
),
1183 c
->regs
[VCPU_REGS_RSP
]);
1186 static int emulate_pop(struct x86_emulate_ctxt
*ctxt
,
1187 struct x86_emulate_ops
*ops
,
1188 void *dest
, int len
)
1190 struct decode_cache
*c
= &ctxt
->decode
;
1193 rc
= ops
->read_emulated(register_address(c
, ss_base(ctxt
),
1194 c
->regs
[VCPU_REGS_RSP
]),
1195 dest
, len
, ctxt
->vcpu
);
1199 register_address_increment(c
, &c
->regs
[VCPU_REGS_RSP
], len
);
1203 static void emulate_push_sreg(struct x86_emulate_ctxt
*ctxt
, int seg
)
1205 struct decode_cache
*c
= &ctxt
->decode
;
1206 struct kvm_segment segment
;
1208 kvm_x86_ops
->get_segment(ctxt
->vcpu
, &segment
, seg
);
1210 c
->src
.val
= segment
.selector
;
1214 static int emulate_pop_sreg(struct x86_emulate_ctxt
*ctxt
,
1215 struct x86_emulate_ops
*ops
, int seg
)
1217 struct decode_cache
*c
= &ctxt
->decode
;
1218 unsigned long selector
;
1221 rc
= emulate_pop(ctxt
, ops
, &selector
, c
->op_bytes
);
1225 rc
= kvm_load_segment_descriptor(ctxt
->vcpu
, (u16
)selector
, 1, seg
);
1229 static void emulate_pusha(struct x86_emulate_ctxt
*ctxt
)
1231 struct decode_cache
*c
= &ctxt
->decode
;
1232 unsigned long old_esp
= c
->regs
[VCPU_REGS_RSP
];
1233 int reg
= VCPU_REGS_RAX
;
1235 while (reg
<= VCPU_REGS_RDI
) {
1236 (reg
== VCPU_REGS_RSP
) ?
1237 (c
->src
.val
= old_esp
) : (c
->src
.val
= c
->regs
[reg
]);
1244 static int emulate_popa(struct x86_emulate_ctxt
*ctxt
,
1245 struct x86_emulate_ops
*ops
)
1247 struct decode_cache
*c
= &ctxt
->decode
;
1249 int reg
= VCPU_REGS_RDI
;
1251 while (reg
>= VCPU_REGS_RAX
) {
1252 if (reg
== VCPU_REGS_RSP
) {
1253 register_address_increment(c
, &c
->regs
[VCPU_REGS_RSP
],
1258 rc
= emulate_pop(ctxt
, ops
, &c
->regs
[reg
], c
->op_bytes
);
1266 static inline int emulate_grp1a(struct x86_emulate_ctxt
*ctxt
,
1267 struct x86_emulate_ops
*ops
)
1269 struct decode_cache
*c
= &ctxt
->decode
;
1272 rc
= emulate_pop(ctxt
, ops
, &c
->dst
.val
, c
->dst
.bytes
);
1278 static inline void emulate_grp2(struct x86_emulate_ctxt
*ctxt
)
1280 struct decode_cache
*c
= &ctxt
->decode
;
1281 switch (c
->modrm_reg
) {
1283 emulate_2op_SrcB("rol", c
->src
, c
->dst
, ctxt
->eflags
);
1286 emulate_2op_SrcB("ror", c
->src
, c
->dst
, ctxt
->eflags
);
1289 emulate_2op_SrcB("rcl", c
->src
, c
->dst
, ctxt
->eflags
);
1292 emulate_2op_SrcB("rcr", c
->src
, c
->dst
, ctxt
->eflags
);
1294 case 4: /* sal/shl */
1295 case 6: /* sal/shl */
1296 emulate_2op_SrcB("sal", c
->src
, c
->dst
, ctxt
->eflags
);
1299 emulate_2op_SrcB("shr", c
->src
, c
->dst
, ctxt
->eflags
);
1302 emulate_2op_SrcB("sar", c
->src
, c
->dst
, ctxt
->eflags
);
1307 static inline int emulate_grp3(struct x86_emulate_ctxt
*ctxt
,
1308 struct x86_emulate_ops
*ops
)
1310 struct decode_cache
*c
= &ctxt
->decode
;
1313 switch (c
->modrm_reg
) {
1314 case 0 ... 1: /* test */
1315 emulate_2op_SrcV("test", c
->src
, c
->dst
, ctxt
->eflags
);
1318 c
->dst
.val
= ~c
->dst
.val
;
1321 emulate_1op("neg", c
->dst
, ctxt
->eflags
);
1324 DPRINTF("Cannot emulate %02x\n", c
->b
);
1325 rc
= X86EMUL_UNHANDLEABLE
;
1331 static inline int emulate_grp45(struct x86_emulate_ctxt
*ctxt
,
1332 struct x86_emulate_ops
*ops
)
1334 struct decode_cache
*c
= &ctxt
->decode
;
1336 switch (c
->modrm_reg
) {
1338 emulate_1op("inc", c
->dst
, ctxt
->eflags
);
1341 emulate_1op("dec", c
->dst
, ctxt
->eflags
);
1343 case 2: /* call near abs */ {
1346 c
->eip
= c
->src
.val
;
1347 c
->src
.val
= old_eip
;
1351 case 4: /* jmp abs */
1352 c
->eip
= c
->src
.val
;
1361 static inline int emulate_grp9(struct x86_emulate_ctxt
*ctxt
,
1362 struct x86_emulate_ops
*ops
,
1363 unsigned long memop
)
1365 struct decode_cache
*c
= &ctxt
->decode
;
1369 rc
= ops
->read_emulated(memop
, &old
, 8, ctxt
->vcpu
);
1373 if (((u32
) (old
>> 0) != (u32
) c
->regs
[VCPU_REGS_RAX
]) ||
1374 ((u32
) (old
>> 32) != (u32
) c
->regs
[VCPU_REGS_RDX
])) {
1376 c
->regs
[VCPU_REGS_RAX
] = (u32
) (old
>> 0);
1377 c
->regs
[VCPU_REGS_RDX
] = (u32
) (old
>> 32);
1378 ctxt
->eflags
&= ~EFLG_ZF
;
1381 new = ((u64
)c
->regs
[VCPU_REGS_RCX
] << 32) |
1382 (u32
) c
->regs
[VCPU_REGS_RBX
];
1384 rc
= ops
->cmpxchg_emulated(memop
, &old
, &new, 8, ctxt
->vcpu
);
1387 ctxt
->eflags
|= EFLG_ZF
;
1392 static int emulate_ret_far(struct x86_emulate_ctxt
*ctxt
,
1393 struct x86_emulate_ops
*ops
)
1395 struct decode_cache
*c
= &ctxt
->decode
;
1399 rc
= emulate_pop(ctxt
, ops
, &c
->eip
, c
->op_bytes
);
1402 if (c
->op_bytes
== 4)
1403 c
->eip
= (u32
)c
->eip
;
1404 rc
= emulate_pop(ctxt
, ops
, &cs
, c
->op_bytes
);
1407 rc
= kvm_load_segment_descriptor(ctxt
->vcpu
, (u16
)cs
, 1, VCPU_SREG_CS
);
1411 static inline int writeback(struct x86_emulate_ctxt
*ctxt
,
1412 struct x86_emulate_ops
*ops
)
1415 struct decode_cache
*c
= &ctxt
->decode
;
1417 switch (c
->dst
.type
) {
1419 /* The 4-byte case *is* correct:
1420 * in 64-bit mode we zero-extend.
1422 switch (c
->dst
.bytes
) {
1424 *(u8
*)c
->dst
.ptr
= (u8
)c
->dst
.val
;
1427 *(u16
*)c
->dst
.ptr
= (u16
)c
->dst
.val
;
1430 *c
->dst
.ptr
= (u32
)c
->dst
.val
;
1431 break; /* 64b: zero-ext */
1433 *c
->dst
.ptr
= c
->dst
.val
;
1439 rc
= ops
->cmpxchg_emulated(
1440 (unsigned long)c
->dst
.ptr
,
1446 rc
= ops
->write_emulated(
1447 (unsigned long)c
->dst
.ptr
,
1463 static void toggle_interruptibility(struct x86_emulate_ctxt
*ctxt
, u32 mask
)
1465 u32 int_shadow
= kvm_x86_ops
->get_interrupt_shadow(ctxt
->vcpu
, mask
);
1467 * an sti; sti; sequence only disable interrupts for the first
1468 * instruction. So, if the last instruction, be it emulated or
1469 * not, left the system with the INT_STI flag enabled, it
1470 * means that the last instruction is an sti. We should not
1471 * leave the flag on in this case. The same goes for mov ss
1473 if (!(int_shadow
& mask
))
1474 ctxt
->interruptibility
= mask
;
1478 setup_syscalls_segments(struct x86_emulate_ctxt
*ctxt
,
1479 struct kvm_segment
*cs
, struct kvm_segment
*ss
)
1481 memset(cs
, 0, sizeof(struct kvm_segment
));
1482 kvm_x86_ops
->get_segment(ctxt
->vcpu
, cs
, VCPU_SREG_CS
);
1483 memset(ss
, 0, sizeof(struct kvm_segment
));
1485 cs
->l
= 0; /* will be adjusted later */
1486 cs
->base
= 0; /* flat segment */
1487 cs
->g
= 1; /* 4kb granularity */
1488 cs
->limit
= 0xffffffff; /* 4GB limit */
1489 cs
->type
= 0x0b; /* Read, Execute, Accessed */
1491 cs
->dpl
= 0; /* will be adjusted later */
1496 ss
->base
= 0; /* flat segment */
1497 ss
->limit
= 0xffffffff; /* 4GB limit */
1498 ss
->g
= 1; /* 4kb granularity */
1500 ss
->type
= 0x03; /* Read/Write, Accessed */
1501 ss
->db
= 1; /* 32bit stack segment */
1507 emulate_syscall(struct x86_emulate_ctxt
*ctxt
)
1509 struct decode_cache
*c
= &ctxt
->decode
;
1510 struct kvm_segment cs
, ss
;
1513 /* syscall is not available in real mode */
1514 if (c
->lock_prefix
|| ctxt
->mode
== X86EMUL_MODE_REAL
1515 || !(ctxt
->vcpu
->arch
.cr0
& X86_CR0_PE
))
1518 setup_syscalls_segments(ctxt
, &cs
, &ss
);
1520 kvm_x86_ops
->get_msr(ctxt
->vcpu
, MSR_STAR
, &msr_data
);
1522 cs
.selector
= (u16
)(msr_data
& 0xfffc);
1523 ss
.selector
= (u16
)(msr_data
+ 8);
1525 if (is_long_mode(ctxt
->vcpu
)) {
1529 kvm_x86_ops
->set_segment(ctxt
->vcpu
, &cs
, VCPU_SREG_CS
);
1530 kvm_x86_ops
->set_segment(ctxt
->vcpu
, &ss
, VCPU_SREG_SS
);
1532 c
->regs
[VCPU_REGS_RCX
] = c
->eip
;
1533 if (is_long_mode(ctxt
->vcpu
)) {
1534 #ifdef CONFIG_X86_64
1535 c
->regs
[VCPU_REGS_R11
] = ctxt
->eflags
& ~EFLG_RF
;
1537 kvm_x86_ops
->get_msr(ctxt
->vcpu
,
1538 ctxt
->mode
== X86EMUL_MODE_PROT64
?
1539 MSR_LSTAR
: MSR_CSTAR
, &msr_data
);
1542 kvm_x86_ops
->get_msr(ctxt
->vcpu
, MSR_SYSCALL_MASK
, &msr_data
);
1543 ctxt
->eflags
&= ~(msr_data
| EFLG_RF
);
1547 kvm_x86_ops
->get_msr(ctxt
->vcpu
, MSR_STAR
, &msr_data
);
1548 c
->eip
= (u32
)msr_data
;
1550 ctxt
->eflags
&= ~(EFLG_VM
| EFLG_IF
| EFLG_RF
);
1557 emulate_sysenter(struct x86_emulate_ctxt
*ctxt
)
1559 struct decode_cache
*c
= &ctxt
->decode
;
1560 struct kvm_segment cs
, ss
;
1563 /* inject #UD if LOCK prefix is used */
1567 /* inject #GP if in real mode or paging is disabled */
1568 if (ctxt
->mode
== X86EMUL_MODE_REAL
||
1569 !(ctxt
->vcpu
->arch
.cr0
& X86_CR0_PE
)) {
1570 kvm_inject_gp(ctxt
->vcpu
, 0);
1574 /* XXX sysenter/sysexit have not been tested in 64bit mode.
1575 * Therefore, we inject an #UD.
1577 if (ctxt
->mode
== X86EMUL_MODE_PROT64
)
1580 setup_syscalls_segments(ctxt
, &cs
, &ss
);
1582 kvm_x86_ops
->get_msr(ctxt
->vcpu
, MSR_IA32_SYSENTER_CS
, &msr_data
);
1583 switch (ctxt
->mode
) {
1584 case X86EMUL_MODE_PROT32
:
1585 if ((msr_data
& 0xfffc) == 0x0) {
1586 kvm_inject_gp(ctxt
->vcpu
, 0);
1590 case X86EMUL_MODE_PROT64
:
1591 if (msr_data
== 0x0) {
1592 kvm_inject_gp(ctxt
->vcpu
, 0);
1598 ctxt
->eflags
&= ~(EFLG_VM
| EFLG_IF
| EFLG_RF
);
1599 cs
.selector
= (u16
)msr_data
;
1600 cs
.selector
&= ~SELECTOR_RPL_MASK
;
1601 ss
.selector
= cs
.selector
+ 8;
1602 ss
.selector
&= ~SELECTOR_RPL_MASK
;
1603 if (ctxt
->mode
== X86EMUL_MODE_PROT64
1604 || is_long_mode(ctxt
->vcpu
)) {
1609 kvm_x86_ops
->set_segment(ctxt
->vcpu
, &cs
, VCPU_SREG_CS
);
1610 kvm_x86_ops
->set_segment(ctxt
->vcpu
, &ss
, VCPU_SREG_SS
);
1612 kvm_x86_ops
->get_msr(ctxt
->vcpu
, MSR_IA32_SYSENTER_EIP
, &msr_data
);
1615 kvm_x86_ops
->get_msr(ctxt
->vcpu
, MSR_IA32_SYSENTER_ESP
, &msr_data
);
1616 c
->regs
[VCPU_REGS_RSP
] = msr_data
;
1622 emulate_sysexit(struct x86_emulate_ctxt
*ctxt
)
1624 struct decode_cache
*c
= &ctxt
->decode
;
1625 struct kvm_segment cs
, ss
;
1629 /* inject #UD if LOCK prefix is used */
1633 /* inject #GP if in real mode or paging is disabled */
1634 if (ctxt
->mode
== X86EMUL_MODE_REAL
1635 || !(ctxt
->vcpu
->arch
.cr0
& X86_CR0_PE
)) {
1636 kvm_inject_gp(ctxt
->vcpu
, 0);
1640 /* sysexit must be called from CPL 0 */
1641 if (kvm_x86_ops
->get_cpl(ctxt
->vcpu
) != 0) {
1642 kvm_inject_gp(ctxt
->vcpu
, 0);
1646 setup_syscalls_segments(ctxt
, &cs
, &ss
);
1648 if ((c
->rex_prefix
& 0x8) != 0x0)
1649 usermode
= X86EMUL_MODE_PROT64
;
1651 usermode
= X86EMUL_MODE_PROT32
;
1655 kvm_x86_ops
->get_msr(ctxt
->vcpu
, MSR_IA32_SYSENTER_CS
, &msr_data
);
1657 case X86EMUL_MODE_PROT32
:
1658 cs
.selector
= (u16
)(msr_data
+ 16);
1659 if ((msr_data
& 0xfffc) == 0x0) {
1660 kvm_inject_gp(ctxt
->vcpu
, 0);
1663 ss
.selector
= (u16
)(msr_data
+ 24);
1665 case X86EMUL_MODE_PROT64
:
1666 cs
.selector
= (u16
)(msr_data
+ 32);
1667 if (msr_data
== 0x0) {
1668 kvm_inject_gp(ctxt
->vcpu
, 0);
1671 ss
.selector
= cs
.selector
+ 8;
1676 cs
.selector
|= SELECTOR_RPL_MASK
;
1677 ss
.selector
|= SELECTOR_RPL_MASK
;
1679 kvm_x86_ops
->set_segment(ctxt
->vcpu
, &cs
, VCPU_SREG_CS
);
1680 kvm_x86_ops
->set_segment(ctxt
->vcpu
, &ss
, VCPU_SREG_SS
);
1682 c
->eip
= ctxt
->vcpu
->arch
.regs
[VCPU_REGS_RDX
];
1683 c
->regs
[VCPU_REGS_RSP
] = ctxt
->vcpu
->arch
.regs
[VCPU_REGS_RCX
];
1689 x86_emulate_insn(struct x86_emulate_ctxt
*ctxt
, struct x86_emulate_ops
*ops
)
1691 unsigned long memop
= 0;
1693 unsigned long saved_eip
= 0;
1694 struct decode_cache
*c
= &ctxt
->decode
;
1699 ctxt
->interruptibility
= 0;
1701 /* Shadow copy of register state. Committed on successful emulation.
1702 * NOTE: we can copy them from vcpu as x86_decode_insn() doesn't
1706 memcpy(c
->regs
, ctxt
->vcpu
->arch
.regs
, sizeof c
->regs
);
1709 if (((c
->d
& ModRM
) && (c
->modrm_mod
!= 3)) || (c
->d
& MemAbs
))
1710 memop
= c
->modrm_ea
;
1712 if (c
->rep_prefix
&& (c
->d
& String
)) {
1713 /* All REP prefixes have the same first termination condition */
1714 if (c
->regs
[VCPU_REGS_RCX
] == 0) {
1715 kvm_rip_write(ctxt
->vcpu
, c
->eip
);
1718 /* The second termination condition only applies for REPE
1719 * and REPNE. Test if the repeat string operation prefix is
1720 * REPE/REPZ or REPNE/REPNZ and if it's the case it tests the
1721 * corresponding termination condition according to:
1722 * - if REPE/REPZ and ZF = 0 then done
1723 * - if REPNE/REPNZ and ZF = 1 then done
1725 if ((c
->b
== 0xa6) || (c
->b
== 0xa7) ||
1726 (c
->b
== 0xae) || (c
->b
== 0xaf)) {
1727 if ((c
->rep_prefix
== REPE_PREFIX
) &&
1728 ((ctxt
->eflags
& EFLG_ZF
) == 0)) {
1729 kvm_rip_write(ctxt
->vcpu
, c
->eip
);
1732 if ((c
->rep_prefix
== REPNE_PREFIX
) &&
1733 ((ctxt
->eflags
& EFLG_ZF
) == EFLG_ZF
)) {
1734 kvm_rip_write(ctxt
->vcpu
, c
->eip
);
1738 c
->regs
[VCPU_REGS_RCX
]--;
1739 c
->eip
= kvm_rip_read(ctxt
->vcpu
);
1742 if (c
->src
.type
== OP_MEM
) {
1743 c
->src
.ptr
= (unsigned long *)memop
;
1745 rc
= ops
->read_emulated((unsigned long)c
->src
.ptr
,
1751 c
->src
.orig_val
= c
->src
.val
;
1754 if ((c
->d
& DstMask
) == ImplicitOps
)
1758 if (c
->dst
.type
== OP_MEM
) {
1759 c
->dst
.ptr
= (unsigned long *)memop
;
1760 c
->dst
.bytes
= (c
->d
& ByteOp
) ? 1 : c
->op_bytes
;
1763 unsigned long mask
= ~(c
->dst
.bytes
* 8 - 1);
1765 c
->dst
.ptr
= (void *)c
->dst
.ptr
+
1766 (c
->src
.val
& mask
) / 8;
1768 if (!(c
->d
& Mov
) &&
1769 /* optimisation - avoid slow emulated read */
1770 ((rc
= ops
->read_emulated((unsigned long)c
->dst
.ptr
,
1772 c
->dst
.bytes
, ctxt
->vcpu
)) != 0))
1775 c
->dst
.orig_val
= c
->dst
.val
;
1785 emulate_2op_SrcV("add", c
->src
, c
->dst
, ctxt
->eflags
);
1787 case 0x06: /* push es */
1788 emulate_push_sreg(ctxt
, VCPU_SREG_ES
);
1790 case 0x07: /* pop es */
1791 rc
= emulate_pop_sreg(ctxt
, ops
, VCPU_SREG_ES
);
1797 emulate_2op_SrcV("or", c
->src
, c
->dst
, ctxt
->eflags
);
1799 case 0x0e: /* push cs */
1800 emulate_push_sreg(ctxt
, VCPU_SREG_CS
);
1804 emulate_2op_SrcV("adc", c
->src
, c
->dst
, ctxt
->eflags
);
1806 case 0x16: /* push ss */
1807 emulate_push_sreg(ctxt
, VCPU_SREG_SS
);
1809 case 0x17: /* pop ss */
1810 rc
= emulate_pop_sreg(ctxt
, ops
, VCPU_SREG_SS
);
1816 emulate_2op_SrcV("sbb", c
->src
, c
->dst
, ctxt
->eflags
);
1818 case 0x1e: /* push ds */
1819 emulate_push_sreg(ctxt
, VCPU_SREG_DS
);
1821 case 0x1f: /* pop ds */
1822 rc
= emulate_pop_sreg(ctxt
, ops
, VCPU_SREG_DS
);
1828 emulate_2op_SrcV("and", c
->src
, c
->dst
, ctxt
->eflags
);
1832 emulate_2op_SrcV("sub", c
->src
, c
->dst
, ctxt
->eflags
);
1836 emulate_2op_SrcV("xor", c
->src
, c
->dst
, ctxt
->eflags
);
1840 emulate_2op_SrcV("cmp", c
->src
, c
->dst
, ctxt
->eflags
);
1842 case 0x40 ... 0x47: /* inc r16/r32 */
1843 emulate_1op("inc", c
->dst
, ctxt
->eflags
);
1845 case 0x48 ... 0x4f: /* dec r16/r32 */
1846 emulate_1op("dec", c
->dst
, ctxt
->eflags
);
1848 case 0x50 ... 0x57: /* push reg */
1851 case 0x58 ... 0x5f: /* pop reg */
1853 rc
= emulate_pop(ctxt
, ops
, &c
->dst
.val
, c
->op_bytes
);
1857 case 0x60: /* pusha */
1858 emulate_pusha(ctxt
);
1860 case 0x61: /* popa */
1861 rc
= emulate_popa(ctxt
, ops
);
1865 case 0x63: /* movsxd */
1866 if (ctxt
->mode
!= X86EMUL_MODE_PROT64
)
1867 goto cannot_emulate
;
1868 c
->dst
.val
= (s32
) c
->src
.val
;
1870 case 0x68: /* push imm */
1871 case 0x6a: /* push imm8 */
1874 case 0x6c: /* insb */
1875 case 0x6d: /* insw/insd */
1876 if (kvm_emulate_pio_string(ctxt
->vcpu
,
1878 (c
->d
& ByteOp
) ? 1 : c
->op_bytes
,
1880 address_mask(c
, c
->regs
[VCPU_REGS_RCX
]) : 1,
1881 (ctxt
->eflags
& EFLG_DF
),
1882 register_address(c
, es_base(ctxt
),
1883 c
->regs
[VCPU_REGS_RDI
]),
1885 c
->regs
[VCPU_REGS_RDX
]) == 0) {
1890 case 0x6e: /* outsb */
1891 case 0x6f: /* outsw/outsd */
1892 if (kvm_emulate_pio_string(ctxt
->vcpu
,
1894 (c
->d
& ByteOp
) ? 1 : c
->op_bytes
,
1896 address_mask(c
, c
->regs
[VCPU_REGS_RCX
]) : 1,
1897 (ctxt
->eflags
& EFLG_DF
),
1899 seg_override_base(ctxt
, c
),
1900 c
->regs
[VCPU_REGS_RSI
]),
1902 c
->regs
[VCPU_REGS_RDX
]) == 0) {
1907 case 0x70 ... 0x7f: /* jcc (short) */
1908 if (test_cc(c
->b
, ctxt
->eflags
))
1909 jmp_rel(c
, c
->src
.val
);
1911 case 0x80 ... 0x83: /* Grp1 */
1912 switch (c
->modrm_reg
) {
1932 emulate_2op_SrcV("test", c
->src
, c
->dst
, ctxt
->eflags
);
1934 case 0x86 ... 0x87: /* xchg */
1936 /* Write back the register source. */
1937 switch (c
->dst
.bytes
) {
1939 *(u8
*) c
->src
.ptr
= (u8
) c
->dst
.val
;
1942 *(u16
*) c
->src
.ptr
= (u16
) c
->dst
.val
;
1945 *c
->src
.ptr
= (u32
) c
->dst
.val
;
1946 break; /* 64b reg: zero-extend */
1948 *c
->src
.ptr
= c
->dst
.val
;
1952 * Write back the memory destination with implicit LOCK
1955 c
->dst
.val
= c
->src
.val
;
1958 case 0x88 ... 0x8b: /* mov */
1960 case 0x8c: { /* mov r/m, sreg */
1961 struct kvm_segment segreg
;
1963 if (c
->modrm_reg
<= 5)
1964 kvm_get_segment(ctxt
->vcpu
, &segreg
, c
->modrm_reg
);
1966 printk(KERN_INFO
"0x8c: Invalid segreg in modrm byte 0x%02x\n",
1968 goto cannot_emulate
;
1970 c
->dst
.val
= segreg
.selector
;
1973 case 0x8d: /* lea r16/r32, m */
1974 c
->dst
.val
= c
->modrm_ea
;
1976 case 0x8e: { /* mov seg, r/m16 */
1982 if (c
->modrm_reg
== VCPU_SREG_SS
)
1983 toggle_interruptibility(ctxt
, X86_SHADOW_INT_MOV_SS
);
1985 if (c
->modrm_reg
<= 5) {
1986 type_bits
= (c
->modrm_reg
== 1) ? 9 : 1;
1987 err
= kvm_load_segment_descriptor(ctxt
->vcpu
, sel
,
1988 type_bits
, c
->modrm_reg
);
1990 printk(KERN_INFO
"Invalid segreg in modrm byte 0x%02x\n",
1992 goto cannot_emulate
;
1996 goto cannot_emulate
;
1998 c
->dst
.type
= OP_NONE
; /* Disable writeback. */
2001 case 0x8f: /* pop (sole member of Grp1a) */
2002 rc
= emulate_grp1a(ctxt
, ops
);
2006 case 0x90: /* nop / xchg r8,rax */
2007 if (!(c
->rex_prefix
& 1)) { /* nop */
2008 c
->dst
.type
= OP_NONE
;
2011 case 0x91 ... 0x97: /* xchg reg,rax */
2012 c
->src
.type
= c
->dst
.type
= OP_REG
;
2013 c
->src
.bytes
= c
->dst
.bytes
= c
->op_bytes
;
2014 c
->src
.ptr
= (unsigned long *) &c
->regs
[VCPU_REGS_RAX
];
2015 c
->src
.val
= *(c
->src
.ptr
);
2017 case 0x9c: /* pushf */
2018 c
->src
.val
= (unsigned long) ctxt
->eflags
;
2021 case 0x9d: /* popf */
2022 c
->dst
.type
= OP_REG
;
2023 c
->dst
.ptr
= (unsigned long *) &ctxt
->eflags
;
2024 c
->dst
.bytes
= c
->op_bytes
;
2025 goto pop_instruction
;
2026 case 0xa0 ... 0xa1: /* mov */
2027 c
->dst
.ptr
= (unsigned long *)&c
->regs
[VCPU_REGS_RAX
];
2028 c
->dst
.val
= c
->src
.val
;
2030 case 0xa2 ... 0xa3: /* mov */
2031 c
->dst
.val
= (unsigned long)c
->regs
[VCPU_REGS_RAX
];
2033 case 0xa4 ... 0xa5: /* movs */
2034 c
->dst
.type
= OP_MEM
;
2035 c
->dst
.bytes
= (c
->d
& ByteOp
) ? 1 : c
->op_bytes
;
2036 c
->dst
.ptr
= (unsigned long *)register_address(c
,
2038 c
->regs
[VCPU_REGS_RDI
]);
2039 if ((rc
= ops
->read_emulated(register_address(c
,
2040 seg_override_base(ctxt
, c
),
2041 c
->regs
[VCPU_REGS_RSI
]),
2043 c
->dst
.bytes
, ctxt
->vcpu
)) != 0)
2045 register_address_increment(c
, &c
->regs
[VCPU_REGS_RSI
],
2046 (ctxt
->eflags
& EFLG_DF
) ? -c
->dst
.bytes
2048 register_address_increment(c
, &c
->regs
[VCPU_REGS_RDI
],
2049 (ctxt
->eflags
& EFLG_DF
) ? -c
->dst
.bytes
2052 case 0xa6 ... 0xa7: /* cmps */
2053 c
->src
.type
= OP_NONE
; /* Disable writeback. */
2054 c
->src
.bytes
= (c
->d
& ByteOp
) ? 1 : c
->op_bytes
;
2055 c
->src
.ptr
= (unsigned long *)register_address(c
,
2056 seg_override_base(ctxt
, c
),
2057 c
->regs
[VCPU_REGS_RSI
]);
2058 if ((rc
= ops
->read_emulated((unsigned long)c
->src
.ptr
,
2064 c
->dst
.type
= OP_NONE
; /* Disable writeback. */
2065 c
->dst
.bytes
= (c
->d
& ByteOp
) ? 1 : c
->op_bytes
;
2066 c
->dst
.ptr
= (unsigned long *)register_address(c
,
2068 c
->regs
[VCPU_REGS_RDI
]);
2069 if ((rc
= ops
->read_emulated((unsigned long)c
->dst
.ptr
,
2075 DPRINTF("cmps: mem1=0x%p mem2=0x%p\n", c
->src
.ptr
, c
->dst
.ptr
);
2077 emulate_2op_SrcV("cmp", c
->src
, c
->dst
, ctxt
->eflags
);
2079 register_address_increment(c
, &c
->regs
[VCPU_REGS_RSI
],
2080 (ctxt
->eflags
& EFLG_DF
) ? -c
->src
.bytes
2082 register_address_increment(c
, &c
->regs
[VCPU_REGS_RDI
],
2083 (ctxt
->eflags
& EFLG_DF
) ? -c
->dst
.bytes
2087 case 0xaa ... 0xab: /* stos */
2088 c
->dst
.type
= OP_MEM
;
2089 c
->dst
.bytes
= (c
->d
& ByteOp
) ? 1 : c
->op_bytes
;
2090 c
->dst
.ptr
= (unsigned long *)register_address(c
,
2092 c
->regs
[VCPU_REGS_RDI
]);
2093 c
->dst
.val
= c
->regs
[VCPU_REGS_RAX
];
2094 register_address_increment(c
, &c
->regs
[VCPU_REGS_RDI
],
2095 (ctxt
->eflags
& EFLG_DF
) ? -c
->dst
.bytes
2098 case 0xac ... 0xad: /* lods */
2099 c
->dst
.type
= OP_REG
;
2100 c
->dst
.bytes
= (c
->d
& ByteOp
) ? 1 : c
->op_bytes
;
2101 c
->dst
.ptr
= (unsigned long *)&c
->regs
[VCPU_REGS_RAX
];
2102 if ((rc
= ops
->read_emulated(register_address(c
,
2103 seg_override_base(ctxt
, c
),
2104 c
->regs
[VCPU_REGS_RSI
]),
2109 register_address_increment(c
, &c
->regs
[VCPU_REGS_RSI
],
2110 (ctxt
->eflags
& EFLG_DF
) ? -c
->dst
.bytes
2113 case 0xae ... 0xaf: /* scas */
2114 DPRINTF("Urk! I don't handle SCAS.\n");
2115 goto cannot_emulate
;
2116 case 0xb0 ... 0xbf: /* mov r, imm */
2121 case 0xc3: /* ret */
2122 c
->dst
.type
= OP_REG
;
2123 c
->dst
.ptr
= &c
->eip
;
2124 c
->dst
.bytes
= c
->op_bytes
;
2125 goto pop_instruction
;
2126 case 0xc6 ... 0xc7: /* mov (sole member of Grp11) */
2128 c
->dst
.val
= c
->src
.val
;
2130 case 0xcb: /* ret far */
2131 rc
= emulate_ret_far(ctxt
, ops
);
2135 case 0xd0 ... 0xd1: /* Grp2 */
2139 case 0xd2 ... 0xd3: /* Grp2 */
2140 c
->src
.val
= c
->regs
[VCPU_REGS_RCX
];
2143 case 0xe4: /* inb */
2148 case 0xe6: /* outb */
2149 case 0xe7: /* out */
2153 case 0xe8: /* call (near) */ {
2154 long int rel
= c
->src
.val
;
2155 c
->src
.val
= (unsigned long) c
->eip
;
2160 case 0xe9: /* jmp rel */
2162 case 0xea: /* jmp far */
2163 if (kvm_load_segment_descriptor(ctxt
->vcpu
, c
->src2
.val
, 9,
2164 VCPU_SREG_CS
) < 0) {
2165 DPRINTF("jmp far: Failed to load CS descriptor\n");
2166 goto cannot_emulate
;
2169 c
->eip
= c
->src
.val
;
2172 jmp
: /* jmp rel short */
2173 jmp_rel(c
, c
->src
.val
);
2174 c
->dst
.type
= OP_NONE
; /* Disable writeback. */
2176 case 0xec: /* in al,dx */
2177 case 0xed: /* in (e/r)ax,dx */
2178 port
= c
->regs
[VCPU_REGS_RDX
];
2181 case 0xee: /* out al,dx */
2182 case 0xef: /* out (e/r)ax,dx */
2183 port
= c
->regs
[VCPU_REGS_RDX
];
2185 do_io
: if (kvm_emulate_pio(ctxt
->vcpu
, io_dir_in
,
2186 (c
->d
& ByteOp
) ? 1 : c
->op_bytes
,
2189 goto cannot_emulate
;
2192 case 0xf4: /* hlt */
2193 ctxt
->vcpu
->arch
.halt_request
= 1;
2195 case 0xf5: /* cmc */
2196 /* complement carry flag from eflags reg */
2197 ctxt
->eflags
^= EFLG_CF
;
2198 c
->dst
.type
= OP_NONE
; /* Disable writeback. */
2200 case 0xf6 ... 0xf7: /* Grp3 */
2201 rc
= emulate_grp3(ctxt
, ops
);
2205 case 0xf8: /* clc */
2206 ctxt
->eflags
&= ~EFLG_CF
;
2207 c
->dst
.type
= OP_NONE
; /* Disable writeback. */
2209 case 0xfa: /* cli */
2210 ctxt
->eflags
&= ~X86_EFLAGS_IF
;
2211 c
->dst
.type
= OP_NONE
; /* Disable writeback. */
2213 case 0xfb: /* sti */
2214 toggle_interruptibility(ctxt
, X86_SHADOW_INT_STI
);
2215 ctxt
->eflags
|= X86_EFLAGS_IF
;
2216 c
->dst
.type
= OP_NONE
; /* Disable writeback. */
2218 case 0xfc: /* cld */
2219 ctxt
->eflags
&= ~EFLG_DF
;
2220 c
->dst
.type
= OP_NONE
; /* Disable writeback. */
2222 case 0xfd: /* std */
2223 ctxt
->eflags
|= EFLG_DF
;
2224 c
->dst
.type
= OP_NONE
; /* Disable writeback. */
2226 case 0xfe ... 0xff: /* Grp4/Grp5 */
2227 rc
= emulate_grp45(ctxt
, ops
);
2234 rc
= writeback(ctxt
, ops
);
2238 /* Commit shadow register state. */
2239 memcpy(ctxt
->vcpu
->arch
.regs
, c
->regs
, sizeof c
->regs
);
2240 kvm_rip_write(ctxt
->vcpu
, c
->eip
);
2243 if (rc
== X86EMUL_UNHANDLEABLE
) {
2251 case 0x01: /* lgdt, lidt, lmsw */
2252 switch (c
->modrm_reg
) {
2254 unsigned long address
;
2256 case 0: /* vmcall */
2257 if (c
->modrm_mod
!= 3 || c
->modrm_rm
!= 1)
2258 goto cannot_emulate
;
2260 rc
= kvm_fix_hypercall(ctxt
->vcpu
);
2264 /* Let the processor re-execute the fixed hypercall */
2265 c
->eip
= kvm_rip_read(ctxt
->vcpu
);
2266 /* Disable writeback. */
2267 c
->dst
.type
= OP_NONE
;
2270 rc
= read_descriptor(ctxt
, ops
, c
->src
.ptr
,
2271 &size
, &address
, c
->op_bytes
);
2274 realmode_lgdt(ctxt
->vcpu
, size
, address
);
2275 /* Disable writeback. */
2276 c
->dst
.type
= OP_NONE
;
2278 case 3: /* lidt/vmmcall */
2279 if (c
->modrm_mod
== 3) {
2280 switch (c
->modrm_rm
) {
2282 rc
= kvm_fix_hypercall(ctxt
->vcpu
);
2287 goto cannot_emulate
;
2290 rc
= read_descriptor(ctxt
, ops
, c
->src
.ptr
,
2295 realmode_lidt(ctxt
->vcpu
, size
, address
);
2297 /* Disable writeback. */
2298 c
->dst
.type
= OP_NONE
;
2302 c
->dst
.val
= realmode_get_cr(ctxt
->vcpu
, 0);
2305 realmode_lmsw(ctxt
->vcpu
, (u16
)c
->src
.val
,
2307 c
->dst
.type
= OP_NONE
;
2310 emulate_invlpg(ctxt
->vcpu
, memop
);
2311 /* Disable writeback. */
2312 c
->dst
.type
= OP_NONE
;
2315 goto cannot_emulate
;
2318 case 0x05: /* syscall */
2319 if (emulate_syscall(ctxt
) == -1)
2320 goto cannot_emulate
;
2325 emulate_clts(ctxt
->vcpu
);
2326 c
->dst
.type
= OP_NONE
;
2328 case 0x08: /* invd */
2329 case 0x09: /* wbinvd */
2330 case 0x0d: /* GrpP (prefetch) */
2331 case 0x18: /* Grp16 (prefetch/nop) */
2332 c
->dst
.type
= OP_NONE
;
2334 case 0x20: /* mov cr, reg */
2335 if (c
->modrm_mod
!= 3)
2336 goto cannot_emulate
;
2337 c
->regs
[c
->modrm_rm
] =
2338 realmode_get_cr(ctxt
->vcpu
, c
->modrm_reg
);
2339 c
->dst
.type
= OP_NONE
; /* no writeback */
2341 case 0x21: /* mov from dr to reg */
2342 if (c
->modrm_mod
!= 3)
2343 goto cannot_emulate
;
2344 rc
= emulator_get_dr(ctxt
, c
->modrm_reg
, &c
->regs
[c
->modrm_rm
]);
2346 goto cannot_emulate
;
2347 c
->dst
.type
= OP_NONE
; /* no writeback */
2349 case 0x22: /* mov reg, cr */
2350 if (c
->modrm_mod
!= 3)
2351 goto cannot_emulate
;
2352 realmode_set_cr(ctxt
->vcpu
,
2353 c
->modrm_reg
, c
->modrm_val
, &ctxt
->eflags
);
2354 c
->dst
.type
= OP_NONE
;
2356 case 0x23: /* mov from reg to dr */
2357 if (c
->modrm_mod
!= 3)
2358 goto cannot_emulate
;
2359 rc
= emulator_set_dr(ctxt
, c
->modrm_reg
,
2360 c
->regs
[c
->modrm_rm
]);
2362 goto cannot_emulate
;
2363 c
->dst
.type
= OP_NONE
; /* no writeback */
2367 msr_data
= (u32
)c
->regs
[VCPU_REGS_RAX
]
2368 | ((u64
)c
->regs
[VCPU_REGS_RDX
] << 32);
2369 rc
= kvm_set_msr(ctxt
->vcpu
, c
->regs
[VCPU_REGS_RCX
], msr_data
);
2371 kvm_inject_gp(ctxt
->vcpu
, 0);
2372 c
->eip
= kvm_rip_read(ctxt
->vcpu
);
2374 rc
= X86EMUL_CONTINUE
;
2375 c
->dst
.type
= OP_NONE
;
2379 rc
= kvm_get_msr(ctxt
->vcpu
, c
->regs
[VCPU_REGS_RCX
], &msr_data
);
2381 kvm_inject_gp(ctxt
->vcpu
, 0);
2382 c
->eip
= kvm_rip_read(ctxt
->vcpu
);
2384 c
->regs
[VCPU_REGS_RAX
] = (u32
)msr_data
;
2385 c
->regs
[VCPU_REGS_RDX
] = msr_data
>> 32;
2387 rc
= X86EMUL_CONTINUE
;
2388 c
->dst
.type
= OP_NONE
;
2390 case 0x34: /* sysenter */
2391 if (emulate_sysenter(ctxt
) == -1)
2392 goto cannot_emulate
;
2396 case 0x35: /* sysexit */
2397 if (emulate_sysexit(ctxt
) == -1)
2398 goto cannot_emulate
;
2402 case 0x40 ... 0x4f: /* cmov */
2403 c
->dst
.val
= c
->dst
.orig_val
= c
->src
.val
;
2404 if (!test_cc(c
->b
, ctxt
->eflags
))
2405 c
->dst
.type
= OP_NONE
; /* no writeback */
2407 case 0x80 ... 0x8f: /* jnz rel, etc*/
2408 if (test_cc(c
->b
, ctxt
->eflags
))
2409 jmp_rel(c
, c
->src
.val
);
2410 c
->dst
.type
= OP_NONE
;
2412 case 0xa0: /* push fs */
2413 emulate_push_sreg(ctxt
, VCPU_SREG_FS
);
2415 case 0xa1: /* pop fs */
2416 rc
= emulate_pop_sreg(ctxt
, ops
, VCPU_SREG_FS
);
2422 c
->dst
.type
= OP_NONE
;
2423 /* only subword offset */
2424 c
->src
.val
&= (c
->dst
.bytes
<< 3) - 1;
2425 emulate_2op_SrcV_nobyte("bt", c
->src
, c
->dst
, ctxt
->eflags
);
2427 case 0xa4: /* shld imm8, r, r/m */
2428 case 0xa5: /* shld cl, r, r/m */
2429 emulate_2op_cl("shld", c
->src2
, c
->src
, c
->dst
, ctxt
->eflags
);
2431 case 0xa8: /* push gs */
2432 emulate_push_sreg(ctxt
, VCPU_SREG_GS
);
2434 case 0xa9: /* pop gs */
2435 rc
= emulate_pop_sreg(ctxt
, ops
, VCPU_SREG_GS
);
2441 /* only subword offset */
2442 c
->src
.val
&= (c
->dst
.bytes
<< 3) - 1;
2443 emulate_2op_SrcV_nobyte("bts", c
->src
, c
->dst
, ctxt
->eflags
);
2445 case 0xac: /* shrd imm8, r, r/m */
2446 case 0xad: /* shrd cl, r, r/m */
2447 emulate_2op_cl("shrd", c
->src2
, c
->src
, c
->dst
, ctxt
->eflags
);
2449 case 0xae: /* clflush */
2451 case 0xb0 ... 0xb1: /* cmpxchg */
2453 * Save real source value, then compare EAX against
2456 c
->src
.orig_val
= c
->src
.val
;
2457 c
->src
.val
= c
->regs
[VCPU_REGS_RAX
];
2458 emulate_2op_SrcV("cmp", c
->src
, c
->dst
, ctxt
->eflags
);
2459 if (ctxt
->eflags
& EFLG_ZF
) {
2460 /* Success: write back to memory. */
2461 c
->dst
.val
= c
->src
.orig_val
;
2463 /* Failure: write the value we saw to EAX. */
2464 c
->dst
.type
= OP_REG
;
2465 c
->dst
.ptr
= (unsigned long *)&c
->regs
[VCPU_REGS_RAX
];
2470 /* only subword offset */
2471 c
->src
.val
&= (c
->dst
.bytes
<< 3) - 1;
2472 emulate_2op_SrcV_nobyte("btr", c
->src
, c
->dst
, ctxt
->eflags
);
2474 case 0xb6 ... 0xb7: /* movzx */
2475 c
->dst
.bytes
= c
->op_bytes
;
2476 c
->dst
.val
= (c
->d
& ByteOp
) ? (u8
) c
->src
.val
2479 case 0xba: /* Grp8 */
2480 switch (c
->modrm_reg
& 3) {
2493 /* only subword offset */
2494 c
->src
.val
&= (c
->dst
.bytes
<< 3) - 1;
2495 emulate_2op_SrcV_nobyte("btc", c
->src
, c
->dst
, ctxt
->eflags
);
2497 case 0xbe ... 0xbf: /* movsx */
2498 c
->dst
.bytes
= c
->op_bytes
;
2499 c
->dst
.val
= (c
->d
& ByteOp
) ? (s8
) c
->src
.val
:
2502 case 0xc3: /* movnti */
2503 c
->dst
.bytes
= c
->op_bytes
;
2504 c
->dst
.val
= (c
->op_bytes
== 4) ? (u32
) c
->src
.val
:
2507 case 0xc7: /* Grp9 (cmpxchg8b) */
2508 rc
= emulate_grp9(ctxt
, ops
, memop
);
2511 c
->dst
.type
= OP_NONE
;
2517 DPRINTF("Cannot emulate %02x\n", c
->b
);