iwlwifi: mvm: fix version check for GEO_TX_POWER_LIMIT support
[linux/fpc-iii.git] / arch / x86 / kvm / emulate.c
blob4a688ef9e4481c1698b026e960a9a75b6acf4424
1 /******************************************************************************
2 * emulate.c
4 * Generic x86 (32-bit and 64-bit) instruction decoder and emulator.
6 * Copyright (c) 2005 Keir Fraser
8 * Linux coding style, mod r/m decoder, segment base fixes, real-mode
9 * privileged instructions:
11 * Copyright (C) 2006 Qumranet
12 * Copyright 2010 Red Hat, Inc. and/or its affiliates.
14 * Avi Kivity <avi@qumranet.com>
15 * Yaniv Kamay <yaniv@qumranet.com>
17 * This work is licensed under the terms of the GNU GPL, version 2. See
18 * the COPYING file in the top-level directory.
20 * From: xen-unstable 10676:af9809f51f81a3c43f276f00c81a52ef558afda4
23 #include <linux/kvm_host.h>
24 #include "kvm_cache_regs.h"
25 #include <asm/kvm_emulate.h>
26 #include <linux/stringify.h>
27 #include <asm/debugreg.h>
28 #include <asm/nospec-branch.h>
30 #include "x86.h"
31 #include "tss.h"
32 #include "mmu.h"
33 #include "pmu.h"
36 * Operand types
38 #define OpNone 0ull
39 #define OpImplicit 1ull /* No generic decode */
40 #define OpReg 2ull /* Register */
41 #define OpMem 3ull /* Memory */
42 #define OpAcc 4ull /* Accumulator: AL/AX/EAX/RAX */
43 #define OpDI 5ull /* ES:DI/EDI/RDI */
44 #define OpMem64 6ull /* Memory, 64-bit */
45 #define OpImmUByte 7ull /* Zero-extended 8-bit immediate */
46 #define OpDX 8ull /* DX register */
47 #define OpCL 9ull /* CL register (for shifts) */
48 #define OpImmByte 10ull /* 8-bit sign extended immediate */
49 #define OpOne 11ull /* Implied 1 */
50 #define OpImm 12ull /* Sign extended up to 32-bit immediate */
51 #define OpMem16 13ull /* Memory operand (16-bit). */
52 #define OpMem32 14ull /* Memory operand (32-bit). */
53 #define OpImmU 15ull /* Immediate operand, zero extended */
54 #define OpSI 16ull /* SI/ESI/RSI */
55 #define OpImmFAddr 17ull /* Immediate far address */
56 #define OpMemFAddr 18ull /* Far address in memory */
57 #define OpImmU16 19ull /* Immediate operand, 16 bits, zero extended */
58 #define OpES 20ull /* ES */
59 #define OpCS 21ull /* CS */
60 #define OpSS 22ull /* SS */
61 #define OpDS 23ull /* DS */
62 #define OpFS 24ull /* FS */
63 #define OpGS 25ull /* GS */
64 #define OpMem8 26ull /* 8-bit zero extended memory operand */
65 #define OpImm64 27ull /* Sign extended 16/32/64-bit immediate */
66 #define OpXLat 28ull /* memory at BX/EBX/RBX + zero-extended AL */
67 #define OpAccLo 29ull /* Low part of extended acc (AX/AX/EAX/RAX) */
68 #define OpAccHi 30ull /* High part of extended acc (-/DX/EDX/RDX) */
70 #define OpBits 5 /* Width of operand field */
71 #define OpMask ((1ull << OpBits) - 1)
74 * Opcode effective-address decode tables.
75 * Note that we only emulate instructions that have at least one memory
76 * operand (excluding implicit stack references). We assume that stack
77 * references and instruction fetches will never occur in special memory
78 * areas that require emulation. So, for example, 'mov <imm>,<reg>' need
79 * not be handled.
82 /* Operand sizes: 8-bit operands or specified/overridden size. */
83 #define ByteOp (1<<0) /* 8-bit operands. */
84 /* Destination operand type. */
85 #define DstShift 1
86 #define ImplicitOps (OpImplicit << DstShift)
87 #define DstReg (OpReg << DstShift)
88 #define DstMem (OpMem << DstShift)
89 #define DstAcc (OpAcc << DstShift)
90 #define DstDI (OpDI << DstShift)
91 #define DstMem64 (OpMem64 << DstShift)
92 #define DstMem16 (OpMem16 << DstShift)
93 #define DstImmUByte (OpImmUByte << DstShift)
94 #define DstDX (OpDX << DstShift)
95 #define DstAccLo (OpAccLo << DstShift)
96 #define DstMask (OpMask << DstShift)
97 /* Source operand type. */
98 #define SrcShift 6
99 #define SrcNone (OpNone << SrcShift)
100 #define SrcReg (OpReg << SrcShift)
101 #define SrcMem (OpMem << SrcShift)
102 #define SrcMem16 (OpMem16 << SrcShift)
103 #define SrcMem32 (OpMem32 << SrcShift)
104 #define SrcImm (OpImm << SrcShift)
105 #define SrcImmByte (OpImmByte << SrcShift)
106 #define SrcOne (OpOne << SrcShift)
107 #define SrcImmUByte (OpImmUByte << SrcShift)
108 #define SrcImmU (OpImmU << SrcShift)
109 #define SrcSI (OpSI << SrcShift)
110 #define SrcXLat (OpXLat << SrcShift)
111 #define SrcImmFAddr (OpImmFAddr << SrcShift)
112 #define SrcMemFAddr (OpMemFAddr << SrcShift)
113 #define SrcAcc (OpAcc << SrcShift)
114 #define SrcImmU16 (OpImmU16 << SrcShift)
115 #define SrcImm64 (OpImm64 << SrcShift)
116 #define SrcDX (OpDX << SrcShift)
117 #define SrcMem8 (OpMem8 << SrcShift)
118 #define SrcAccHi (OpAccHi << SrcShift)
119 #define SrcMask (OpMask << SrcShift)
120 #define BitOp (1<<11)
121 #define MemAbs (1<<12) /* Memory operand is absolute displacement */
122 #define String (1<<13) /* String instruction (rep capable) */
123 #define Stack (1<<14) /* Stack instruction (push/pop) */
124 #define GroupMask (7<<15) /* Opcode uses one of the group mechanisms */
125 #define Group (1<<15) /* Bits 3:5 of modrm byte extend opcode */
126 #define GroupDual (2<<15) /* Alternate decoding of mod == 3 */
127 #define Prefix (3<<15) /* Instruction varies with 66/f2/f3 prefix */
128 #define RMExt (4<<15) /* Opcode extension in ModRM r/m if mod == 3 */
129 #define Escape (5<<15) /* Escape to coprocessor instruction */
130 #define InstrDual (6<<15) /* Alternate instruction decoding of mod == 3 */
131 #define ModeDual (7<<15) /* Different instruction for 32/64 bit */
132 #define Sse (1<<18) /* SSE Vector instruction */
133 /* Generic ModRM decode. */
134 #define ModRM (1<<19)
135 /* Destination is only written; never read. */
136 #define Mov (1<<20)
137 /* Misc flags */
138 #define Prot (1<<21) /* instruction generates #UD if not in prot-mode */
139 #define EmulateOnUD (1<<22) /* Emulate if unsupported by the host */
140 #define NoAccess (1<<23) /* Don't access memory (lea/invlpg/verr etc) */
141 #define Op3264 (1<<24) /* Operand is 64b in long mode, 32b otherwise */
142 #define Undefined (1<<25) /* No Such Instruction */
143 #define Lock (1<<26) /* lock prefix is allowed for the instruction */
144 #define Priv (1<<27) /* instruction generates #GP if current CPL != 0 */
145 #define No64 (1<<28)
146 #define PageTable (1 << 29) /* instruction used to write page table */
147 #define NotImpl (1 << 30) /* instruction is not implemented */
148 /* Source 2 operand type */
149 #define Src2Shift (31)
150 #define Src2None (OpNone << Src2Shift)
151 #define Src2Mem (OpMem << Src2Shift)
152 #define Src2CL (OpCL << Src2Shift)
153 #define Src2ImmByte (OpImmByte << Src2Shift)
154 #define Src2One (OpOne << Src2Shift)
155 #define Src2Imm (OpImm << Src2Shift)
156 #define Src2ES (OpES << Src2Shift)
157 #define Src2CS (OpCS << Src2Shift)
158 #define Src2SS (OpSS << Src2Shift)
159 #define Src2DS (OpDS << Src2Shift)
160 #define Src2FS (OpFS << Src2Shift)
161 #define Src2GS (OpGS << Src2Shift)
162 #define Src2Mask (OpMask << Src2Shift)
163 #define Mmx ((u64)1 << 40) /* MMX Vector instruction */
164 #define AlignMask ((u64)7 << 41)
165 #define Aligned ((u64)1 << 41) /* Explicitly aligned (e.g. MOVDQA) */
166 #define Unaligned ((u64)2 << 41) /* Explicitly unaligned (e.g. MOVDQU) */
167 #define Avx ((u64)3 << 41) /* Advanced Vector Extensions */
168 #define Aligned16 ((u64)4 << 41) /* Aligned to 16 byte boundary (e.g. FXSAVE) */
169 #define Fastop ((u64)1 << 44) /* Use opcode::u.fastop */
170 #define NoWrite ((u64)1 << 45) /* No writeback */
171 #define SrcWrite ((u64)1 << 46) /* Write back src operand */
172 #define NoMod ((u64)1 << 47) /* Mod field is ignored */
173 #define Intercept ((u64)1 << 48) /* Has valid intercept field */
174 #define CheckPerm ((u64)1 << 49) /* Has valid check_perm field */
175 #define PrivUD ((u64)1 << 51) /* #UD instead of #GP on CPL > 0 */
176 #define NearBranch ((u64)1 << 52) /* Near branches */
177 #define No16 ((u64)1 << 53) /* No 16 bit operand */
178 #define IncSP ((u64)1 << 54) /* SP is incremented before ModRM calc */
179 #define TwoMemOp ((u64)1 << 55) /* Instruction has two memory operand */
181 #define DstXacc (DstAccLo | SrcAccHi | SrcWrite)
183 #define X2(x...) x, x
184 #define X3(x...) X2(x), x
185 #define X4(x...) X2(x), X2(x)
186 #define X5(x...) X4(x), x
187 #define X6(x...) X4(x), X2(x)
188 #define X7(x...) X4(x), X3(x)
189 #define X8(x...) X4(x), X4(x)
190 #define X16(x...) X8(x), X8(x)
192 #define NR_FASTOP (ilog2(sizeof(ulong)) + 1)
193 #define FASTOP_SIZE 8
196 * fastop functions have a special calling convention:
198 * dst: rax (in/out)
199 * src: rdx (in/out)
200 * src2: rcx (in)
201 * flags: rflags (in/out)
202 * ex: rsi (in:fastop pointer, out:zero if exception)
204 * Moreover, they are all exactly FASTOP_SIZE bytes long, so functions for
205 * different operand sizes can be reached by calculation, rather than a jump
206 * table (which would be bigger than the code).
208 * fastop functions are declared as taking a never-defined fastop parameter,
209 * so they can't be called from C directly.
212 struct fastop;
214 struct opcode {
215 u64 flags : 56;
216 u64 intercept : 8;
217 union {
218 int (*execute)(struct x86_emulate_ctxt *ctxt);
219 const struct opcode *group;
220 const struct group_dual *gdual;
221 const struct gprefix *gprefix;
222 const struct escape *esc;
223 const struct instr_dual *idual;
224 const struct mode_dual *mdual;
225 void (*fastop)(struct fastop *fake);
226 } u;
227 int (*check_perm)(struct x86_emulate_ctxt *ctxt);
230 struct group_dual {
231 struct opcode mod012[8];
232 struct opcode mod3[8];
235 struct gprefix {
236 struct opcode pfx_no;
237 struct opcode pfx_66;
238 struct opcode pfx_f2;
239 struct opcode pfx_f3;
242 struct escape {
243 struct opcode op[8];
244 struct opcode high[64];
247 struct instr_dual {
248 struct opcode mod012;
249 struct opcode mod3;
252 struct mode_dual {
253 struct opcode mode32;
254 struct opcode mode64;
257 #define EFLG_RESERVED_ZEROS_MASK 0xffc0802a
259 enum x86_transfer_type {
260 X86_TRANSFER_NONE,
261 X86_TRANSFER_CALL_JMP,
262 X86_TRANSFER_RET,
263 X86_TRANSFER_TASK_SWITCH,
266 static ulong reg_read(struct x86_emulate_ctxt *ctxt, unsigned nr)
268 if (!(ctxt->regs_valid & (1 << nr))) {
269 ctxt->regs_valid |= 1 << nr;
270 ctxt->_regs[nr] = ctxt->ops->read_gpr(ctxt, nr);
272 return ctxt->_regs[nr];
275 static ulong *reg_write(struct x86_emulate_ctxt *ctxt, unsigned nr)
277 ctxt->regs_valid |= 1 << nr;
278 ctxt->regs_dirty |= 1 << nr;
279 return &ctxt->_regs[nr];
282 static ulong *reg_rmw(struct x86_emulate_ctxt *ctxt, unsigned nr)
284 reg_read(ctxt, nr);
285 return reg_write(ctxt, nr);
288 static void writeback_registers(struct x86_emulate_ctxt *ctxt)
290 unsigned reg;
292 for_each_set_bit(reg, (ulong *)&ctxt->regs_dirty, 16)
293 ctxt->ops->write_gpr(ctxt, reg, ctxt->_regs[reg]);
296 static void invalidate_registers(struct x86_emulate_ctxt *ctxt)
298 ctxt->regs_dirty = 0;
299 ctxt->regs_valid = 0;
303 * These EFLAGS bits are restored from saved value during emulation, and
304 * any changes are written back to the saved value after emulation.
306 #define EFLAGS_MASK (X86_EFLAGS_OF|X86_EFLAGS_SF|X86_EFLAGS_ZF|X86_EFLAGS_AF|\
307 X86_EFLAGS_PF|X86_EFLAGS_CF)
309 #ifdef CONFIG_X86_64
310 #define ON64(x) x
311 #else
312 #define ON64(x)
313 #endif
315 static int fastop(struct x86_emulate_ctxt *ctxt, void (*fop)(struct fastop *));
317 #define FOP_FUNC(name) \
318 ".align " __stringify(FASTOP_SIZE) " \n\t" \
319 ".type " name ", @function \n\t" \
320 name ":\n\t"
322 #define FOP_RET "ret \n\t"
324 #define FOP_START(op) \
325 extern void em_##op(struct fastop *fake); \
326 asm(".pushsection .text, \"ax\" \n\t" \
327 ".global em_" #op " \n\t" \
328 FOP_FUNC("em_" #op)
330 #define FOP_END \
331 ".popsection")
333 #define FOPNOP() \
334 FOP_FUNC(__stringify(__UNIQUE_ID(nop))) \
335 FOP_RET
337 #define FOP1E(op, dst) \
338 FOP_FUNC(#op "_" #dst) \
339 "10: " #op " %" #dst " \n\t" FOP_RET
341 #define FOP1EEX(op, dst) \
342 FOP1E(op, dst) _ASM_EXTABLE(10b, kvm_fastop_exception)
344 #define FASTOP1(op) \
345 FOP_START(op) \
346 FOP1E(op##b, al) \
347 FOP1E(op##w, ax) \
348 FOP1E(op##l, eax) \
349 ON64(FOP1E(op##q, rax)) \
350 FOP_END
352 /* 1-operand, using src2 (for MUL/DIV r/m) */
353 #define FASTOP1SRC2(op, name) \
354 FOP_START(name) \
355 FOP1E(op, cl) \
356 FOP1E(op, cx) \
357 FOP1E(op, ecx) \
358 ON64(FOP1E(op, rcx)) \
359 FOP_END
361 /* 1-operand, using src2 (for MUL/DIV r/m), with exceptions */
362 #define FASTOP1SRC2EX(op, name) \
363 FOP_START(name) \
364 FOP1EEX(op, cl) \
365 FOP1EEX(op, cx) \
366 FOP1EEX(op, ecx) \
367 ON64(FOP1EEX(op, rcx)) \
368 FOP_END
370 #define FOP2E(op, dst, src) \
371 FOP_FUNC(#op "_" #dst "_" #src) \
372 #op " %" #src ", %" #dst " \n\t" FOP_RET
374 #define FASTOP2(op) \
375 FOP_START(op) \
376 FOP2E(op##b, al, dl) \
377 FOP2E(op##w, ax, dx) \
378 FOP2E(op##l, eax, edx) \
379 ON64(FOP2E(op##q, rax, rdx)) \
380 FOP_END
382 /* 2 operand, word only */
383 #define FASTOP2W(op) \
384 FOP_START(op) \
385 FOPNOP() \
386 FOP2E(op##w, ax, dx) \
387 FOP2E(op##l, eax, edx) \
388 ON64(FOP2E(op##q, rax, rdx)) \
389 FOP_END
391 /* 2 operand, src is CL */
392 #define FASTOP2CL(op) \
393 FOP_START(op) \
394 FOP2E(op##b, al, cl) \
395 FOP2E(op##w, ax, cl) \
396 FOP2E(op##l, eax, cl) \
397 ON64(FOP2E(op##q, rax, cl)) \
398 FOP_END
400 /* 2 operand, src and dest are reversed */
401 #define FASTOP2R(op, name) \
402 FOP_START(name) \
403 FOP2E(op##b, dl, al) \
404 FOP2E(op##w, dx, ax) \
405 FOP2E(op##l, edx, eax) \
406 ON64(FOP2E(op##q, rdx, rax)) \
407 FOP_END
409 #define FOP3E(op, dst, src, src2) \
410 FOP_FUNC(#op "_" #dst "_" #src "_" #src2) \
411 #op " %" #src2 ", %" #src ", %" #dst " \n\t" FOP_RET
413 /* 3-operand, word-only, src2=cl */
414 #define FASTOP3WCL(op) \
415 FOP_START(op) \
416 FOPNOP() \
417 FOP3E(op##w, ax, dx, cl) \
418 FOP3E(op##l, eax, edx, cl) \
419 ON64(FOP3E(op##q, rax, rdx, cl)) \
420 FOP_END
422 /* Special case for SETcc - 1 instruction per cc */
423 #define FOP_SETCC(op) \
424 ".align 4 \n\t" \
425 ".type " #op ", @function \n\t" \
426 #op ": \n\t" \
427 #op " %al \n\t" \
428 FOP_RET
430 asm(".pushsection .fixup, \"ax\"\n"
431 ".global kvm_fastop_exception \n"
432 "kvm_fastop_exception: xor %esi, %esi; ret\n"
433 ".popsection");
435 FOP_START(setcc)
436 FOP_SETCC(seto)
437 FOP_SETCC(setno)
438 FOP_SETCC(setc)
439 FOP_SETCC(setnc)
440 FOP_SETCC(setz)
441 FOP_SETCC(setnz)
442 FOP_SETCC(setbe)
443 FOP_SETCC(setnbe)
444 FOP_SETCC(sets)
445 FOP_SETCC(setns)
446 FOP_SETCC(setp)
447 FOP_SETCC(setnp)
448 FOP_SETCC(setl)
449 FOP_SETCC(setnl)
450 FOP_SETCC(setle)
451 FOP_SETCC(setnle)
452 FOP_END;
454 FOP_START(salc) "pushf; sbb %al, %al; popf \n\t" FOP_RET
455 FOP_END;
458 * XXX: inoutclob user must know where the argument is being expanded.
459 * Relying on CONFIG_CC_HAS_ASM_GOTO would allow us to remove _fault.
461 #define asm_safe(insn, inoutclob...) \
462 ({ \
463 int _fault = 0; \
465 asm volatile("1:" insn "\n" \
466 "2:\n" \
467 ".pushsection .fixup, \"ax\"\n" \
468 "3: movl $1, %[_fault]\n" \
469 " jmp 2b\n" \
470 ".popsection\n" \
471 _ASM_EXTABLE(1b, 3b) \
472 : [_fault] "+qm"(_fault) inoutclob ); \
474 _fault ? X86EMUL_UNHANDLEABLE : X86EMUL_CONTINUE; \
477 static int emulator_check_intercept(struct x86_emulate_ctxt *ctxt,
478 enum x86_intercept intercept,
479 enum x86_intercept_stage stage)
481 struct x86_instruction_info info = {
482 .intercept = intercept,
483 .rep_prefix = ctxt->rep_prefix,
484 .modrm_mod = ctxt->modrm_mod,
485 .modrm_reg = ctxt->modrm_reg,
486 .modrm_rm = ctxt->modrm_rm,
487 .src_val = ctxt->src.val64,
488 .dst_val = ctxt->dst.val64,
489 .src_bytes = ctxt->src.bytes,
490 .dst_bytes = ctxt->dst.bytes,
491 .ad_bytes = ctxt->ad_bytes,
492 .next_rip = ctxt->eip,
495 return ctxt->ops->intercept(ctxt, &info, stage);
498 static void assign_masked(ulong *dest, ulong src, ulong mask)
500 *dest = (*dest & ~mask) | (src & mask);
503 static void assign_register(unsigned long *reg, u64 val, int bytes)
505 /* The 4-byte case *is* correct: in 64-bit mode we zero-extend. */
506 switch (bytes) {
507 case 1:
508 *(u8 *)reg = (u8)val;
509 break;
510 case 2:
511 *(u16 *)reg = (u16)val;
512 break;
513 case 4:
514 *reg = (u32)val;
515 break; /* 64b: zero-extend */
516 case 8:
517 *reg = val;
518 break;
522 static inline unsigned long ad_mask(struct x86_emulate_ctxt *ctxt)
524 return (1UL << (ctxt->ad_bytes << 3)) - 1;
527 static ulong stack_mask(struct x86_emulate_ctxt *ctxt)
529 u16 sel;
530 struct desc_struct ss;
532 if (ctxt->mode == X86EMUL_MODE_PROT64)
533 return ~0UL;
534 ctxt->ops->get_segment(ctxt, &sel, &ss, NULL, VCPU_SREG_SS);
535 return ~0U >> ((ss.d ^ 1) * 16); /* d=0: 0xffff; d=1: 0xffffffff */
538 static int stack_size(struct x86_emulate_ctxt *ctxt)
540 return (__fls(stack_mask(ctxt)) + 1) >> 3;
543 /* Access/update address held in a register, based on addressing mode. */
544 static inline unsigned long
545 address_mask(struct x86_emulate_ctxt *ctxt, unsigned long reg)
547 if (ctxt->ad_bytes == sizeof(unsigned long))
548 return reg;
549 else
550 return reg & ad_mask(ctxt);
553 static inline unsigned long
554 register_address(struct x86_emulate_ctxt *ctxt, int reg)
556 return address_mask(ctxt, reg_read(ctxt, reg));
559 static void masked_increment(ulong *reg, ulong mask, int inc)
561 assign_masked(reg, *reg + inc, mask);
564 static inline void
565 register_address_increment(struct x86_emulate_ctxt *ctxt, int reg, int inc)
567 ulong *preg = reg_rmw(ctxt, reg);
569 assign_register(preg, *preg + inc, ctxt->ad_bytes);
572 static void rsp_increment(struct x86_emulate_ctxt *ctxt, int inc)
574 masked_increment(reg_rmw(ctxt, VCPU_REGS_RSP), stack_mask(ctxt), inc);
577 static u32 desc_limit_scaled(struct desc_struct *desc)
579 u32 limit = get_desc_limit(desc);
581 return desc->g ? (limit << 12) | 0xfff : limit;
584 static unsigned long seg_base(struct x86_emulate_ctxt *ctxt, int seg)
586 if (ctxt->mode == X86EMUL_MODE_PROT64 && seg < VCPU_SREG_FS)
587 return 0;
589 return ctxt->ops->get_cached_segment_base(ctxt, seg);
592 static int emulate_exception(struct x86_emulate_ctxt *ctxt, int vec,
593 u32 error, bool valid)
595 WARN_ON(vec > 0x1f);
596 ctxt->exception.vector = vec;
597 ctxt->exception.error_code = error;
598 ctxt->exception.error_code_valid = valid;
599 return X86EMUL_PROPAGATE_FAULT;
602 static int emulate_db(struct x86_emulate_ctxt *ctxt)
604 return emulate_exception(ctxt, DB_VECTOR, 0, false);
607 static int emulate_gp(struct x86_emulate_ctxt *ctxt, int err)
609 return emulate_exception(ctxt, GP_VECTOR, err, true);
612 static int emulate_ss(struct x86_emulate_ctxt *ctxt, int err)
614 return emulate_exception(ctxt, SS_VECTOR, err, true);
617 static int emulate_ud(struct x86_emulate_ctxt *ctxt)
619 return emulate_exception(ctxt, UD_VECTOR, 0, false);
622 static int emulate_ts(struct x86_emulate_ctxt *ctxt, int err)
624 return emulate_exception(ctxt, TS_VECTOR, err, true);
627 static int emulate_de(struct x86_emulate_ctxt *ctxt)
629 return emulate_exception(ctxt, DE_VECTOR, 0, false);
632 static int emulate_nm(struct x86_emulate_ctxt *ctxt)
634 return emulate_exception(ctxt, NM_VECTOR, 0, false);
637 static u16 get_segment_selector(struct x86_emulate_ctxt *ctxt, unsigned seg)
639 u16 selector;
640 struct desc_struct desc;
642 ctxt->ops->get_segment(ctxt, &selector, &desc, NULL, seg);
643 return selector;
646 static void set_segment_selector(struct x86_emulate_ctxt *ctxt, u16 selector,
647 unsigned seg)
649 u16 dummy;
650 u32 base3;
651 struct desc_struct desc;
653 ctxt->ops->get_segment(ctxt, &dummy, &desc, &base3, seg);
654 ctxt->ops->set_segment(ctxt, selector, &desc, base3, seg);
658 * x86 defines three classes of vector instructions: explicitly
659 * aligned, explicitly unaligned, and the rest, which change behaviour
660 * depending on whether they're AVX encoded or not.
662 * Also included is CMPXCHG16B which is not a vector instruction, yet it is
663 * subject to the same check. FXSAVE and FXRSTOR are checked here too as their
664 * 512 bytes of data must be aligned to a 16 byte boundary.
666 static unsigned insn_alignment(struct x86_emulate_ctxt *ctxt, unsigned size)
668 u64 alignment = ctxt->d & AlignMask;
670 if (likely(size < 16))
671 return 1;
673 switch (alignment) {
674 case Unaligned:
675 case Avx:
676 return 1;
677 case Aligned16:
678 return 16;
679 case Aligned:
680 default:
681 return size;
685 static __always_inline int __linearize(struct x86_emulate_ctxt *ctxt,
686 struct segmented_address addr,
687 unsigned *max_size, unsigned size,
688 bool write, bool fetch,
689 enum x86emul_mode mode, ulong *linear)
691 struct desc_struct desc;
692 bool usable;
693 ulong la;
694 u32 lim;
695 u16 sel;
696 u8 va_bits;
698 la = seg_base(ctxt, addr.seg) + addr.ea;
699 *max_size = 0;
700 switch (mode) {
701 case X86EMUL_MODE_PROT64:
702 *linear = la;
703 va_bits = ctxt_virt_addr_bits(ctxt);
704 if (get_canonical(la, va_bits) != la)
705 goto bad;
707 *max_size = min_t(u64, ~0u, (1ull << va_bits) - la);
708 if (size > *max_size)
709 goto bad;
710 break;
711 default:
712 *linear = la = (u32)la;
713 usable = ctxt->ops->get_segment(ctxt, &sel, &desc, NULL,
714 addr.seg);
715 if (!usable)
716 goto bad;
717 /* code segment in protected mode or read-only data segment */
718 if ((((ctxt->mode != X86EMUL_MODE_REAL) && (desc.type & 8))
719 || !(desc.type & 2)) && write)
720 goto bad;
721 /* unreadable code segment */
722 if (!fetch && (desc.type & 8) && !(desc.type & 2))
723 goto bad;
724 lim = desc_limit_scaled(&desc);
725 if (!(desc.type & 8) && (desc.type & 4)) {
726 /* expand-down segment */
727 if (addr.ea <= lim)
728 goto bad;
729 lim = desc.d ? 0xffffffff : 0xffff;
731 if (addr.ea > lim)
732 goto bad;
733 if (lim == 0xffffffff)
734 *max_size = ~0u;
735 else {
736 *max_size = (u64)lim + 1 - addr.ea;
737 if (size > *max_size)
738 goto bad;
740 break;
742 if (la & (insn_alignment(ctxt, size) - 1))
743 return emulate_gp(ctxt, 0);
744 return X86EMUL_CONTINUE;
745 bad:
746 if (addr.seg == VCPU_SREG_SS)
747 return emulate_ss(ctxt, 0);
748 else
749 return emulate_gp(ctxt, 0);
752 static int linearize(struct x86_emulate_ctxt *ctxt,
753 struct segmented_address addr,
754 unsigned size, bool write,
755 ulong *linear)
757 unsigned max_size;
758 return __linearize(ctxt, addr, &max_size, size, write, false,
759 ctxt->mode, linear);
762 static inline int assign_eip(struct x86_emulate_ctxt *ctxt, ulong dst,
763 enum x86emul_mode mode)
765 ulong linear;
766 int rc;
767 unsigned max_size;
768 struct segmented_address addr = { .seg = VCPU_SREG_CS,
769 .ea = dst };
771 if (ctxt->op_bytes != sizeof(unsigned long))
772 addr.ea = dst & ((1UL << (ctxt->op_bytes << 3)) - 1);
773 rc = __linearize(ctxt, addr, &max_size, 1, false, true, mode, &linear);
774 if (rc == X86EMUL_CONTINUE)
775 ctxt->_eip = addr.ea;
776 return rc;
779 static inline int assign_eip_near(struct x86_emulate_ctxt *ctxt, ulong dst)
781 return assign_eip(ctxt, dst, ctxt->mode);
784 static int assign_eip_far(struct x86_emulate_ctxt *ctxt, ulong dst,
785 const struct desc_struct *cs_desc)
787 enum x86emul_mode mode = ctxt->mode;
788 int rc;
790 #ifdef CONFIG_X86_64
791 if (ctxt->mode >= X86EMUL_MODE_PROT16) {
792 if (cs_desc->l) {
793 u64 efer = 0;
795 ctxt->ops->get_msr(ctxt, MSR_EFER, &efer);
796 if (efer & EFER_LMA)
797 mode = X86EMUL_MODE_PROT64;
798 } else
799 mode = X86EMUL_MODE_PROT32; /* temporary value */
801 #endif
802 if (mode == X86EMUL_MODE_PROT16 || mode == X86EMUL_MODE_PROT32)
803 mode = cs_desc->d ? X86EMUL_MODE_PROT32 : X86EMUL_MODE_PROT16;
804 rc = assign_eip(ctxt, dst, mode);
805 if (rc == X86EMUL_CONTINUE)
806 ctxt->mode = mode;
807 return rc;
810 static inline int jmp_rel(struct x86_emulate_ctxt *ctxt, int rel)
812 return assign_eip_near(ctxt, ctxt->_eip + rel);
815 static int linear_read_system(struct x86_emulate_ctxt *ctxt, ulong linear,
816 void *data, unsigned size)
818 return ctxt->ops->read_std(ctxt, linear, data, size, &ctxt->exception, true);
821 static int linear_write_system(struct x86_emulate_ctxt *ctxt,
822 ulong linear, void *data,
823 unsigned int size)
825 return ctxt->ops->write_std(ctxt, linear, data, size, &ctxt->exception, true);
828 static int segmented_read_std(struct x86_emulate_ctxt *ctxt,
829 struct segmented_address addr,
830 void *data,
831 unsigned size)
833 int rc;
834 ulong linear;
836 rc = linearize(ctxt, addr, size, false, &linear);
837 if (rc != X86EMUL_CONTINUE)
838 return rc;
839 return ctxt->ops->read_std(ctxt, linear, data, size, &ctxt->exception, false);
842 static int segmented_write_std(struct x86_emulate_ctxt *ctxt,
843 struct segmented_address addr,
844 void *data,
845 unsigned int size)
847 int rc;
848 ulong linear;
850 rc = linearize(ctxt, addr, size, true, &linear);
851 if (rc != X86EMUL_CONTINUE)
852 return rc;
853 return ctxt->ops->write_std(ctxt, linear, data, size, &ctxt->exception, false);
857 * Prefetch the remaining bytes of the instruction without crossing page
858 * boundary if they are not in fetch_cache yet.
860 static int __do_insn_fetch_bytes(struct x86_emulate_ctxt *ctxt, int op_size)
862 int rc;
863 unsigned size, max_size;
864 unsigned long linear;
865 int cur_size = ctxt->fetch.end - ctxt->fetch.data;
866 struct segmented_address addr = { .seg = VCPU_SREG_CS,
867 .ea = ctxt->eip + cur_size };
870 * We do not know exactly how many bytes will be needed, and
871 * __linearize is expensive, so fetch as much as possible. We
872 * just have to avoid going beyond the 15 byte limit, the end
873 * of the segment, or the end of the page.
875 * __linearize is called with size 0 so that it does not do any
876 * boundary check itself. Instead, we use max_size to check
877 * against op_size.
879 rc = __linearize(ctxt, addr, &max_size, 0, false, true, ctxt->mode,
880 &linear);
881 if (unlikely(rc != X86EMUL_CONTINUE))
882 return rc;
884 size = min_t(unsigned, 15UL ^ cur_size, max_size);
885 size = min_t(unsigned, size, PAGE_SIZE - offset_in_page(linear));
888 * One instruction can only straddle two pages,
889 * and one has been loaded at the beginning of
890 * x86_decode_insn. So, if not enough bytes
891 * still, we must have hit the 15-byte boundary.
893 if (unlikely(size < op_size))
894 return emulate_gp(ctxt, 0);
896 rc = ctxt->ops->fetch(ctxt, linear, ctxt->fetch.end,
897 size, &ctxt->exception);
898 if (unlikely(rc != X86EMUL_CONTINUE))
899 return rc;
900 ctxt->fetch.end += size;
901 return X86EMUL_CONTINUE;
904 static __always_inline int do_insn_fetch_bytes(struct x86_emulate_ctxt *ctxt,
905 unsigned size)
907 unsigned done_size = ctxt->fetch.end - ctxt->fetch.ptr;
909 if (unlikely(done_size < size))
910 return __do_insn_fetch_bytes(ctxt, size - done_size);
911 else
912 return X86EMUL_CONTINUE;
915 /* Fetch next part of the instruction being emulated. */
916 #define insn_fetch(_type, _ctxt) \
917 ({ _type _x; \
919 rc = do_insn_fetch_bytes(_ctxt, sizeof(_type)); \
920 if (rc != X86EMUL_CONTINUE) \
921 goto done; \
922 ctxt->_eip += sizeof(_type); \
923 memcpy(&_x, ctxt->fetch.ptr, sizeof(_type)); \
924 ctxt->fetch.ptr += sizeof(_type); \
925 _x; \
928 #define insn_fetch_arr(_arr, _size, _ctxt) \
929 ({ \
930 rc = do_insn_fetch_bytes(_ctxt, _size); \
931 if (rc != X86EMUL_CONTINUE) \
932 goto done; \
933 ctxt->_eip += (_size); \
934 memcpy(_arr, ctxt->fetch.ptr, _size); \
935 ctxt->fetch.ptr += (_size); \
939 * Given the 'reg' portion of a ModRM byte, and a register block, return a
940 * pointer into the block that addresses the relevant register.
941 * @highbyte_regs specifies whether to decode AH,CH,DH,BH.
943 static void *decode_register(struct x86_emulate_ctxt *ctxt, u8 modrm_reg,
944 int byteop)
946 void *p;
947 int highbyte_regs = (ctxt->rex_prefix == 0) && byteop;
949 if (highbyte_regs && modrm_reg >= 4 && modrm_reg < 8)
950 p = (unsigned char *)reg_rmw(ctxt, modrm_reg & 3) + 1;
951 else
952 p = reg_rmw(ctxt, modrm_reg);
953 return p;
956 static int read_descriptor(struct x86_emulate_ctxt *ctxt,
957 struct segmented_address addr,
958 u16 *size, unsigned long *address, int op_bytes)
960 int rc;
962 if (op_bytes == 2)
963 op_bytes = 3;
964 *address = 0;
965 rc = segmented_read_std(ctxt, addr, size, 2);
966 if (rc != X86EMUL_CONTINUE)
967 return rc;
968 addr.ea += 2;
969 rc = segmented_read_std(ctxt, addr, address, op_bytes);
970 return rc;
973 FASTOP2(add);
974 FASTOP2(or);
975 FASTOP2(adc);
976 FASTOP2(sbb);
977 FASTOP2(and);
978 FASTOP2(sub);
979 FASTOP2(xor);
980 FASTOP2(cmp);
981 FASTOP2(test);
983 FASTOP1SRC2(mul, mul_ex);
984 FASTOP1SRC2(imul, imul_ex);
985 FASTOP1SRC2EX(div, div_ex);
986 FASTOP1SRC2EX(idiv, idiv_ex);
988 FASTOP3WCL(shld);
989 FASTOP3WCL(shrd);
991 FASTOP2W(imul);
993 FASTOP1(not);
994 FASTOP1(neg);
995 FASTOP1(inc);
996 FASTOP1(dec);
998 FASTOP2CL(rol);
999 FASTOP2CL(ror);
1000 FASTOP2CL(rcl);
1001 FASTOP2CL(rcr);
1002 FASTOP2CL(shl);
1003 FASTOP2CL(shr);
1004 FASTOP2CL(sar);
1006 FASTOP2W(bsf);
1007 FASTOP2W(bsr);
1008 FASTOP2W(bt);
1009 FASTOP2W(bts);
1010 FASTOP2W(btr);
1011 FASTOP2W(btc);
1013 FASTOP2(xadd);
1015 FASTOP2R(cmp, cmp_r);
1017 static int em_bsf_c(struct x86_emulate_ctxt *ctxt)
1019 /* If src is zero, do not writeback, but update flags */
1020 if (ctxt->src.val == 0)
1021 ctxt->dst.type = OP_NONE;
1022 return fastop(ctxt, em_bsf);
1025 static int em_bsr_c(struct x86_emulate_ctxt *ctxt)
1027 /* If src is zero, do not writeback, but update flags */
1028 if (ctxt->src.val == 0)
1029 ctxt->dst.type = OP_NONE;
1030 return fastop(ctxt, em_bsr);
1033 static __always_inline u8 test_cc(unsigned int condition, unsigned long flags)
1035 u8 rc;
1036 void (*fop)(void) = (void *)em_setcc + 4 * (condition & 0xf);
1038 flags = (flags & EFLAGS_MASK) | X86_EFLAGS_IF;
1039 asm("push %[flags]; popf; " CALL_NOSPEC
1040 : "=a"(rc) : [thunk_target]"r"(fop), [flags]"r"(flags));
1041 return rc;
1044 static void fetch_register_operand(struct operand *op)
1046 switch (op->bytes) {
1047 case 1:
1048 op->val = *(u8 *)op->addr.reg;
1049 break;
1050 case 2:
1051 op->val = *(u16 *)op->addr.reg;
1052 break;
1053 case 4:
1054 op->val = *(u32 *)op->addr.reg;
1055 break;
1056 case 8:
1057 op->val = *(u64 *)op->addr.reg;
1058 break;
1062 static void read_sse_reg(struct x86_emulate_ctxt *ctxt, sse128_t *data, int reg)
1064 switch (reg) {
1065 case 0: asm("movdqa %%xmm0, %0" : "=m"(*data)); break;
1066 case 1: asm("movdqa %%xmm1, %0" : "=m"(*data)); break;
1067 case 2: asm("movdqa %%xmm2, %0" : "=m"(*data)); break;
1068 case 3: asm("movdqa %%xmm3, %0" : "=m"(*data)); break;
1069 case 4: asm("movdqa %%xmm4, %0" : "=m"(*data)); break;
1070 case 5: asm("movdqa %%xmm5, %0" : "=m"(*data)); break;
1071 case 6: asm("movdqa %%xmm6, %0" : "=m"(*data)); break;
1072 case 7: asm("movdqa %%xmm7, %0" : "=m"(*data)); break;
1073 #ifdef CONFIG_X86_64
1074 case 8: asm("movdqa %%xmm8, %0" : "=m"(*data)); break;
1075 case 9: asm("movdqa %%xmm9, %0" : "=m"(*data)); break;
1076 case 10: asm("movdqa %%xmm10, %0" : "=m"(*data)); break;
1077 case 11: asm("movdqa %%xmm11, %0" : "=m"(*data)); break;
1078 case 12: asm("movdqa %%xmm12, %0" : "=m"(*data)); break;
1079 case 13: asm("movdqa %%xmm13, %0" : "=m"(*data)); break;
1080 case 14: asm("movdqa %%xmm14, %0" : "=m"(*data)); break;
1081 case 15: asm("movdqa %%xmm15, %0" : "=m"(*data)); break;
1082 #endif
1083 default: BUG();
1087 static void write_sse_reg(struct x86_emulate_ctxt *ctxt, sse128_t *data,
1088 int reg)
1090 switch (reg) {
1091 case 0: asm("movdqa %0, %%xmm0" : : "m"(*data)); break;
1092 case 1: asm("movdqa %0, %%xmm1" : : "m"(*data)); break;
1093 case 2: asm("movdqa %0, %%xmm2" : : "m"(*data)); break;
1094 case 3: asm("movdqa %0, %%xmm3" : : "m"(*data)); break;
1095 case 4: asm("movdqa %0, %%xmm4" : : "m"(*data)); break;
1096 case 5: asm("movdqa %0, %%xmm5" : : "m"(*data)); break;
1097 case 6: asm("movdqa %0, %%xmm6" : : "m"(*data)); break;
1098 case 7: asm("movdqa %0, %%xmm7" : : "m"(*data)); break;
1099 #ifdef CONFIG_X86_64
1100 case 8: asm("movdqa %0, %%xmm8" : : "m"(*data)); break;
1101 case 9: asm("movdqa %0, %%xmm9" : : "m"(*data)); break;
1102 case 10: asm("movdqa %0, %%xmm10" : : "m"(*data)); break;
1103 case 11: asm("movdqa %0, %%xmm11" : : "m"(*data)); break;
1104 case 12: asm("movdqa %0, %%xmm12" : : "m"(*data)); break;
1105 case 13: asm("movdqa %0, %%xmm13" : : "m"(*data)); break;
1106 case 14: asm("movdqa %0, %%xmm14" : : "m"(*data)); break;
1107 case 15: asm("movdqa %0, %%xmm15" : : "m"(*data)); break;
1108 #endif
1109 default: BUG();
1113 static void read_mmx_reg(struct x86_emulate_ctxt *ctxt, u64 *data, int reg)
1115 switch (reg) {
1116 case 0: asm("movq %%mm0, %0" : "=m"(*data)); break;
1117 case 1: asm("movq %%mm1, %0" : "=m"(*data)); break;
1118 case 2: asm("movq %%mm2, %0" : "=m"(*data)); break;
1119 case 3: asm("movq %%mm3, %0" : "=m"(*data)); break;
1120 case 4: asm("movq %%mm4, %0" : "=m"(*data)); break;
1121 case 5: asm("movq %%mm5, %0" : "=m"(*data)); break;
1122 case 6: asm("movq %%mm6, %0" : "=m"(*data)); break;
1123 case 7: asm("movq %%mm7, %0" : "=m"(*data)); break;
1124 default: BUG();
1128 static void write_mmx_reg(struct x86_emulate_ctxt *ctxt, u64 *data, int reg)
1130 switch (reg) {
1131 case 0: asm("movq %0, %%mm0" : : "m"(*data)); break;
1132 case 1: asm("movq %0, %%mm1" : : "m"(*data)); break;
1133 case 2: asm("movq %0, %%mm2" : : "m"(*data)); break;
1134 case 3: asm("movq %0, %%mm3" : : "m"(*data)); break;
1135 case 4: asm("movq %0, %%mm4" : : "m"(*data)); break;
1136 case 5: asm("movq %0, %%mm5" : : "m"(*data)); break;
1137 case 6: asm("movq %0, %%mm6" : : "m"(*data)); break;
1138 case 7: asm("movq %0, %%mm7" : : "m"(*data)); break;
1139 default: BUG();
1143 static int em_fninit(struct x86_emulate_ctxt *ctxt)
1145 if (ctxt->ops->get_cr(ctxt, 0) & (X86_CR0_TS | X86_CR0_EM))
1146 return emulate_nm(ctxt);
1148 asm volatile("fninit");
1149 return X86EMUL_CONTINUE;
1152 static int em_fnstcw(struct x86_emulate_ctxt *ctxt)
1154 u16 fcw;
1156 if (ctxt->ops->get_cr(ctxt, 0) & (X86_CR0_TS | X86_CR0_EM))
1157 return emulate_nm(ctxt);
1159 asm volatile("fnstcw %0": "+m"(fcw));
1161 ctxt->dst.val = fcw;
1163 return X86EMUL_CONTINUE;
1166 static int em_fnstsw(struct x86_emulate_ctxt *ctxt)
1168 u16 fsw;
1170 if (ctxt->ops->get_cr(ctxt, 0) & (X86_CR0_TS | X86_CR0_EM))
1171 return emulate_nm(ctxt);
1173 asm volatile("fnstsw %0": "+m"(fsw));
1175 ctxt->dst.val = fsw;
1177 return X86EMUL_CONTINUE;
1180 static void decode_register_operand(struct x86_emulate_ctxt *ctxt,
1181 struct operand *op)
1183 unsigned reg = ctxt->modrm_reg;
1185 if (!(ctxt->d & ModRM))
1186 reg = (ctxt->b & 7) | ((ctxt->rex_prefix & 1) << 3);
1188 if (ctxt->d & Sse) {
1189 op->type = OP_XMM;
1190 op->bytes = 16;
1191 op->addr.xmm = reg;
1192 read_sse_reg(ctxt, &op->vec_val, reg);
1193 return;
1195 if (ctxt->d & Mmx) {
1196 reg &= 7;
1197 op->type = OP_MM;
1198 op->bytes = 8;
1199 op->addr.mm = reg;
1200 return;
1203 op->type = OP_REG;
1204 op->bytes = (ctxt->d & ByteOp) ? 1 : ctxt->op_bytes;
1205 op->addr.reg = decode_register(ctxt, reg, ctxt->d & ByteOp);
1207 fetch_register_operand(op);
1208 op->orig_val = op->val;
1211 static void adjust_modrm_seg(struct x86_emulate_ctxt *ctxt, int base_reg)
1213 if (base_reg == VCPU_REGS_RSP || base_reg == VCPU_REGS_RBP)
1214 ctxt->modrm_seg = VCPU_SREG_SS;
1217 static int decode_modrm(struct x86_emulate_ctxt *ctxt,
1218 struct operand *op)
1220 u8 sib;
1221 int index_reg, base_reg, scale;
1222 int rc = X86EMUL_CONTINUE;
1223 ulong modrm_ea = 0;
1225 ctxt->modrm_reg = ((ctxt->rex_prefix << 1) & 8); /* REX.R */
1226 index_reg = (ctxt->rex_prefix << 2) & 8; /* REX.X */
1227 base_reg = (ctxt->rex_prefix << 3) & 8; /* REX.B */
1229 ctxt->modrm_mod = (ctxt->modrm & 0xc0) >> 6;
1230 ctxt->modrm_reg |= (ctxt->modrm & 0x38) >> 3;
1231 ctxt->modrm_rm = base_reg | (ctxt->modrm & 0x07);
1232 ctxt->modrm_seg = VCPU_SREG_DS;
1234 if (ctxt->modrm_mod == 3 || (ctxt->d & NoMod)) {
1235 op->type = OP_REG;
1236 op->bytes = (ctxt->d & ByteOp) ? 1 : ctxt->op_bytes;
1237 op->addr.reg = decode_register(ctxt, ctxt->modrm_rm,
1238 ctxt->d & ByteOp);
1239 if (ctxt->d & Sse) {
1240 op->type = OP_XMM;
1241 op->bytes = 16;
1242 op->addr.xmm = ctxt->modrm_rm;
1243 read_sse_reg(ctxt, &op->vec_val, ctxt->modrm_rm);
1244 return rc;
1246 if (ctxt->d & Mmx) {
1247 op->type = OP_MM;
1248 op->bytes = 8;
1249 op->addr.mm = ctxt->modrm_rm & 7;
1250 return rc;
1252 fetch_register_operand(op);
1253 return rc;
1256 op->type = OP_MEM;
1258 if (ctxt->ad_bytes == 2) {
1259 unsigned bx = reg_read(ctxt, VCPU_REGS_RBX);
1260 unsigned bp = reg_read(ctxt, VCPU_REGS_RBP);
1261 unsigned si = reg_read(ctxt, VCPU_REGS_RSI);
1262 unsigned di = reg_read(ctxt, VCPU_REGS_RDI);
1264 /* 16-bit ModR/M decode. */
1265 switch (ctxt->modrm_mod) {
1266 case 0:
1267 if (ctxt->modrm_rm == 6)
1268 modrm_ea += insn_fetch(u16, ctxt);
1269 break;
1270 case 1:
1271 modrm_ea += insn_fetch(s8, ctxt);
1272 break;
1273 case 2:
1274 modrm_ea += insn_fetch(u16, ctxt);
1275 break;
1277 switch (ctxt->modrm_rm) {
1278 case 0:
1279 modrm_ea += bx + si;
1280 break;
1281 case 1:
1282 modrm_ea += bx + di;
1283 break;
1284 case 2:
1285 modrm_ea += bp + si;
1286 break;
1287 case 3:
1288 modrm_ea += bp + di;
1289 break;
1290 case 4:
1291 modrm_ea += si;
1292 break;
1293 case 5:
1294 modrm_ea += di;
1295 break;
1296 case 6:
1297 if (ctxt->modrm_mod != 0)
1298 modrm_ea += bp;
1299 break;
1300 case 7:
1301 modrm_ea += bx;
1302 break;
1304 if (ctxt->modrm_rm == 2 || ctxt->modrm_rm == 3 ||
1305 (ctxt->modrm_rm == 6 && ctxt->modrm_mod != 0))
1306 ctxt->modrm_seg = VCPU_SREG_SS;
1307 modrm_ea = (u16)modrm_ea;
1308 } else {
1309 /* 32/64-bit ModR/M decode. */
1310 if ((ctxt->modrm_rm & 7) == 4) {
1311 sib = insn_fetch(u8, ctxt);
1312 index_reg |= (sib >> 3) & 7;
1313 base_reg |= sib & 7;
1314 scale = sib >> 6;
1316 if ((base_reg & 7) == 5 && ctxt->modrm_mod == 0)
1317 modrm_ea += insn_fetch(s32, ctxt);
1318 else {
1319 modrm_ea += reg_read(ctxt, base_reg);
1320 adjust_modrm_seg(ctxt, base_reg);
1321 /* Increment ESP on POP [ESP] */
1322 if ((ctxt->d & IncSP) &&
1323 base_reg == VCPU_REGS_RSP)
1324 modrm_ea += ctxt->op_bytes;
1326 if (index_reg != 4)
1327 modrm_ea += reg_read(ctxt, index_reg) << scale;
1328 } else if ((ctxt->modrm_rm & 7) == 5 && ctxt->modrm_mod == 0) {
1329 modrm_ea += insn_fetch(s32, ctxt);
1330 if (ctxt->mode == X86EMUL_MODE_PROT64)
1331 ctxt->rip_relative = 1;
1332 } else {
1333 base_reg = ctxt->modrm_rm;
1334 modrm_ea += reg_read(ctxt, base_reg);
1335 adjust_modrm_seg(ctxt, base_reg);
1337 switch (ctxt->modrm_mod) {
1338 case 1:
1339 modrm_ea += insn_fetch(s8, ctxt);
1340 break;
1341 case 2:
1342 modrm_ea += insn_fetch(s32, ctxt);
1343 break;
1346 op->addr.mem.ea = modrm_ea;
1347 if (ctxt->ad_bytes != 8)
1348 ctxt->memop.addr.mem.ea = (u32)ctxt->memop.addr.mem.ea;
1350 done:
1351 return rc;
1354 static int decode_abs(struct x86_emulate_ctxt *ctxt,
1355 struct operand *op)
1357 int rc = X86EMUL_CONTINUE;
1359 op->type = OP_MEM;
1360 switch (ctxt->ad_bytes) {
1361 case 2:
1362 op->addr.mem.ea = insn_fetch(u16, ctxt);
1363 break;
1364 case 4:
1365 op->addr.mem.ea = insn_fetch(u32, ctxt);
1366 break;
1367 case 8:
1368 op->addr.mem.ea = insn_fetch(u64, ctxt);
1369 break;
1371 done:
1372 return rc;
1375 static void fetch_bit_operand(struct x86_emulate_ctxt *ctxt)
1377 long sv = 0, mask;
1379 if (ctxt->dst.type == OP_MEM && ctxt->src.type == OP_REG) {
1380 mask = ~((long)ctxt->dst.bytes * 8 - 1);
1382 if (ctxt->src.bytes == 2)
1383 sv = (s16)ctxt->src.val & (s16)mask;
1384 else if (ctxt->src.bytes == 4)
1385 sv = (s32)ctxt->src.val & (s32)mask;
1386 else
1387 sv = (s64)ctxt->src.val & (s64)mask;
1389 ctxt->dst.addr.mem.ea = address_mask(ctxt,
1390 ctxt->dst.addr.mem.ea + (sv >> 3));
1393 /* only subword offset */
1394 ctxt->src.val &= (ctxt->dst.bytes << 3) - 1;
1397 static int read_emulated(struct x86_emulate_ctxt *ctxt,
1398 unsigned long addr, void *dest, unsigned size)
1400 int rc;
1401 struct read_cache *mc = &ctxt->mem_read;
1403 if (mc->pos < mc->end)
1404 goto read_cached;
1406 WARN_ON((mc->end + size) >= sizeof(mc->data));
1408 rc = ctxt->ops->read_emulated(ctxt, addr, mc->data + mc->end, size,
1409 &ctxt->exception);
1410 if (rc != X86EMUL_CONTINUE)
1411 return rc;
1413 mc->end += size;
1415 read_cached:
1416 memcpy(dest, mc->data + mc->pos, size);
1417 mc->pos += size;
1418 return X86EMUL_CONTINUE;
1421 static int segmented_read(struct x86_emulate_ctxt *ctxt,
1422 struct segmented_address addr,
1423 void *data,
1424 unsigned size)
1426 int rc;
1427 ulong linear;
1429 rc = linearize(ctxt, addr, size, false, &linear);
1430 if (rc != X86EMUL_CONTINUE)
1431 return rc;
1432 return read_emulated(ctxt, linear, data, size);
1435 static int segmented_write(struct x86_emulate_ctxt *ctxt,
1436 struct segmented_address addr,
1437 const void *data,
1438 unsigned size)
1440 int rc;
1441 ulong linear;
1443 rc = linearize(ctxt, addr, size, true, &linear);
1444 if (rc != X86EMUL_CONTINUE)
1445 return rc;
1446 return ctxt->ops->write_emulated(ctxt, linear, data, size,
1447 &ctxt->exception);
1450 static int segmented_cmpxchg(struct x86_emulate_ctxt *ctxt,
1451 struct segmented_address addr,
1452 const void *orig_data, const void *data,
1453 unsigned size)
1455 int rc;
1456 ulong linear;
1458 rc = linearize(ctxt, addr, size, true, &linear);
1459 if (rc != X86EMUL_CONTINUE)
1460 return rc;
1461 return ctxt->ops->cmpxchg_emulated(ctxt, linear, orig_data, data,
1462 size, &ctxt->exception);
1465 static int pio_in_emulated(struct x86_emulate_ctxt *ctxt,
1466 unsigned int size, unsigned short port,
1467 void *dest)
1469 struct read_cache *rc = &ctxt->io_read;
1471 if (rc->pos == rc->end) { /* refill pio read ahead */
1472 unsigned int in_page, n;
1473 unsigned int count = ctxt->rep_prefix ?
1474 address_mask(ctxt, reg_read(ctxt, VCPU_REGS_RCX)) : 1;
1475 in_page = (ctxt->eflags & X86_EFLAGS_DF) ?
1476 offset_in_page(reg_read(ctxt, VCPU_REGS_RDI)) :
1477 PAGE_SIZE - offset_in_page(reg_read(ctxt, VCPU_REGS_RDI));
1478 n = min3(in_page, (unsigned int)sizeof(rc->data) / size, count);
1479 if (n == 0)
1480 n = 1;
1481 rc->pos = rc->end = 0;
1482 if (!ctxt->ops->pio_in_emulated(ctxt, size, port, rc->data, n))
1483 return 0;
1484 rc->end = n * size;
1487 if (ctxt->rep_prefix && (ctxt->d & String) &&
1488 !(ctxt->eflags & X86_EFLAGS_DF)) {
1489 ctxt->dst.data = rc->data + rc->pos;
1490 ctxt->dst.type = OP_MEM_STR;
1491 ctxt->dst.count = (rc->end - rc->pos) / size;
1492 rc->pos = rc->end;
1493 } else {
1494 memcpy(dest, rc->data + rc->pos, size);
1495 rc->pos += size;
1497 return 1;
1500 static int read_interrupt_descriptor(struct x86_emulate_ctxt *ctxt,
1501 u16 index, struct desc_struct *desc)
1503 struct desc_ptr dt;
1504 ulong addr;
1506 ctxt->ops->get_idt(ctxt, &dt);
1508 if (dt.size < index * 8 + 7)
1509 return emulate_gp(ctxt, index << 3 | 0x2);
1511 addr = dt.address + index * 8;
1512 return linear_read_system(ctxt, addr, desc, sizeof *desc);
1515 static void get_descriptor_table_ptr(struct x86_emulate_ctxt *ctxt,
1516 u16 selector, struct desc_ptr *dt)
1518 const struct x86_emulate_ops *ops = ctxt->ops;
1519 u32 base3 = 0;
1521 if (selector & 1 << 2) {
1522 struct desc_struct desc;
1523 u16 sel;
1525 memset (dt, 0, sizeof *dt);
1526 if (!ops->get_segment(ctxt, &sel, &desc, &base3,
1527 VCPU_SREG_LDTR))
1528 return;
1530 dt->size = desc_limit_scaled(&desc); /* what if limit > 65535? */
1531 dt->address = get_desc_base(&desc) | ((u64)base3 << 32);
1532 } else
1533 ops->get_gdt(ctxt, dt);
1536 static int get_descriptor_ptr(struct x86_emulate_ctxt *ctxt,
1537 u16 selector, ulong *desc_addr_p)
1539 struct desc_ptr dt;
1540 u16 index = selector >> 3;
1541 ulong addr;
1543 get_descriptor_table_ptr(ctxt, selector, &dt);
1545 if (dt.size < index * 8 + 7)
1546 return emulate_gp(ctxt, selector & 0xfffc);
1548 addr = dt.address + index * 8;
1550 #ifdef CONFIG_X86_64
1551 if (addr >> 32 != 0) {
1552 u64 efer = 0;
1554 ctxt->ops->get_msr(ctxt, MSR_EFER, &efer);
1555 if (!(efer & EFER_LMA))
1556 addr &= (u32)-1;
1558 #endif
1560 *desc_addr_p = addr;
1561 return X86EMUL_CONTINUE;
1564 /* allowed just for 8 bytes segments */
1565 static int read_segment_descriptor(struct x86_emulate_ctxt *ctxt,
1566 u16 selector, struct desc_struct *desc,
1567 ulong *desc_addr_p)
1569 int rc;
1571 rc = get_descriptor_ptr(ctxt, selector, desc_addr_p);
1572 if (rc != X86EMUL_CONTINUE)
1573 return rc;
1575 return linear_read_system(ctxt, *desc_addr_p, desc, sizeof(*desc));
1578 /* allowed just for 8 bytes segments */
1579 static int write_segment_descriptor(struct x86_emulate_ctxt *ctxt,
1580 u16 selector, struct desc_struct *desc)
1582 int rc;
1583 ulong addr;
1585 rc = get_descriptor_ptr(ctxt, selector, &addr);
1586 if (rc != X86EMUL_CONTINUE)
1587 return rc;
1589 return linear_write_system(ctxt, addr, desc, sizeof *desc);
1592 static int __load_segment_descriptor(struct x86_emulate_ctxt *ctxt,
1593 u16 selector, int seg, u8 cpl,
1594 enum x86_transfer_type transfer,
1595 struct desc_struct *desc)
1597 struct desc_struct seg_desc, old_desc;
1598 u8 dpl, rpl;
1599 unsigned err_vec = GP_VECTOR;
1600 u32 err_code = 0;
1601 bool null_selector = !(selector & ~0x3); /* 0000-0003 are null */
1602 ulong desc_addr;
1603 int ret;
1604 u16 dummy;
1605 u32 base3 = 0;
1607 memset(&seg_desc, 0, sizeof seg_desc);
1609 if (ctxt->mode == X86EMUL_MODE_REAL) {
1610 /* set real mode segment descriptor (keep limit etc. for
1611 * unreal mode) */
1612 ctxt->ops->get_segment(ctxt, &dummy, &seg_desc, NULL, seg);
1613 set_desc_base(&seg_desc, selector << 4);
1614 goto load;
1615 } else if (seg <= VCPU_SREG_GS && ctxt->mode == X86EMUL_MODE_VM86) {
1616 /* VM86 needs a clean new segment descriptor */
1617 set_desc_base(&seg_desc, selector << 4);
1618 set_desc_limit(&seg_desc, 0xffff);
1619 seg_desc.type = 3;
1620 seg_desc.p = 1;
1621 seg_desc.s = 1;
1622 seg_desc.dpl = 3;
1623 goto load;
1626 rpl = selector & 3;
1628 /* TR should be in GDT only */
1629 if (seg == VCPU_SREG_TR && (selector & (1 << 2)))
1630 goto exception;
1632 /* NULL selector is not valid for TR, CS and (except for long mode) SS */
1633 if (null_selector) {
1634 if (seg == VCPU_SREG_CS || seg == VCPU_SREG_TR)
1635 goto exception;
1637 if (seg == VCPU_SREG_SS) {
1638 if (ctxt->mode != X86EMUL_MODE_PROT64 || rpl != cpl)
1639 goto exception;
1642 * ctxt->ops->set_segment expects the CPL to be in
1643 * SS.DPL, so fake an expand-up 32-bit data segment.
1645 seg_desc.type = 3;
1646 seg_desc.p = 1;
1647 seg_desc.s = 1;
1648 seg_desc.dpl = cpl;
1649 seg_desc.d = 1;
1650 seg_desc.g = 1;
1653 /* Skip all following checks */
1654 goto load;
1657 ret = read_segment_descriptor(ctxt, selector, &seg_desc, &desc_addr);
1658 if (ret != X86EMUL_CONTINUE)
1659 return ret;
1661 err_code = selector & 0xfffc;
1662 err_vec = (transfer == X86_TRANSFER_TASK_SWITCH) ? TS_VECTOR :
1663 GP_VECTOR;
1665 /* can't load system descriptor into segment selector */
1666 if (seg <= VCPU_SREG_GS && !seg_desc.s) {
1667 if (transfer == X86_TRANSFER_CALL_JMP)
1668 return X86EMUL_UNHANDLEABLE;
1669 goto exception;
1672 if (!seg_desc.p) {
1673 err_vec = (seg == VCPU_SREG_SS) ? SS_VECTOR : NP_VECTOR;
1674 goto exception;
1677 dpl = seg_desc.dpl;
1679 switch (seg) {
1680 case VCPU_SREG_SS:
1682 * segment is not a writable data segment or segment
1683 * selector's RPL != CPL or segment selector's RPL != CPL
1685 if (rpl != cpl || (seg_desc.type & 0xa) != 0x2 || dpl != cpl)
1686 goto exception;
1687 break;
1688 case VCPU_SREG_CS:
1689 if (!(seg_desc.type & 8))
1690 goto exception;
1692 if (seg_desc.type & 4) {
1693 /* conforming */
1694 if (dpl > cpl)
1695 goto exception;
1696 } else {
1697 /* nonconforming */
1698 if (rpl > cpl || dpl != cpl)
1699 goto exception;
1701 /* in long-mode d/b must be clear if l is set */
1702 if (seg_desc.d && seg_desc.l) {
1703 u64 efer = 0;
1705 ctxt->ops->get_msr(ctxt, MSR_EFER, &efer);
1706 if (efer & EFER_LMA)
1707 goto exception;
1710 /* CS(RPL) <- CPL */
1711 selector = (selector & 0xfffc) | cpl;
1712 break;
1713 case VCPU_SREG_TR:
1714 if (seg_desc.s || (seg_desc.type != 1 && seg_desc.type != 9))
1715 goto exception;
1716 old_desc = seg_desc;
1717 seg_desc.type |= 2; /* busy */
1718 ret = ctxt->ops->cmpxchg_emulated(ctxt, desc_addr, &old_desc, &seg_desc,
1719 sizeof(seg_desc), &ctxt->exception);
1720 if (ret != X86EMUL_CONTINUE)
1721 return ret;
1722 break;
1723 case VCPU_SREG_LDTR:
1724 if (seg_desc.s || seg_desc.type != 2)
1725 goto exception;
1726 break;
1727 default: /* DS, ES, FS, or GS */
1729 * segment is not a data or readable code segment or
1730 * ((segment is a data or nonconforming code segment)
1731 * and (both RPL and CPL > DPL))
1733 if ((seg_desc.type & 0xa) == 0x8 ||
1734 (((seg_desc.type & 0xc) != 0xc) &&
1735 (rpl > dpl && cpl > dpl)))
1736 goto exception;
1737 break;
1740 if (seg_desc.s) {
1741 /* mark segment as accessed */
1742 if (!(seg_desc.type & 1)) {
1743 seg_desc.type |= 1;
1744 ret = write_segment_descriptor(ctxt, selector,
1745 &seg_desc);
1746 if (ret != X86EMUL_CONTINUE)
1747 return ret;
1749 } else if (ctxt->mode == X86EMUL_MODE_PROT64) {
1750 ret = linear_read_system(ctxt, desc_addr+8, &base3, sizeof(base3));
1751 if (ret != X86EMUL_CONTINUE)
1752 return ret;
1753 if (emul_is_noncanonical_address(get_desc_base(&seg_desc) |
1754 ((u64)base3 << 32), ctxt))
1755 return emulate_gp(ctxt, 0);
1757 load:
1758 ctxt->ops->set_segment(ctxt, selector, &seg_desc, base3, seg);
1759 if (desc)
1760 *desc = seg_desc;
1761 return X86EMUL_CONTINUE;
1762 exception:
1763 return emulate_exception(ctxt, err_vec, err_code, true);
1766 static int load_segment_descriptor(struct x86_emulate_ctxt *ctxt,
1767 u16 selector, int seg)
1769 u8 cpl = ctxt->ops->cpl(ctxt);
1772 * None of MOV, POP and LSS can load a NULL selector in CPL=3, but
1773 * they can load it at CPL<3 (Intel's manual says only LSS can,
1774 * but it's wrong).
1776 * However, the Intel manual says that putting IST=1/DPL=3 in
1777 * an interrupt gate will result in SS=3 (the AMD manual instead
1778 * says it doesn't), so allow SS=3 in __load_segment_descriptor
1779 * and only forbid it here.
1781 if (seg == VCPU_SREG_SS && selector == 3 &&
1782 ctxt->mode == X86EMUL_MODE_PROT64)
1783 return emulate_exception(ctxt, GP_VECTOR, 0, true);
1785 return __load_segment_descriptor(ctxt, selector, seg, cpl,
1786 X86_TRANSFER_NONE, NULL);
1789 static void write_register_operand(struct operand *op)
1791 return assign_register(op->addr.reg, op->val, op->bytes);
1794 static int writeback(struct x86_emulate_ctxt *ctxt, struct operand *op)
1796 switch (op->type) {
1797 case OP_REG:
1798 write_register_operand(op);
1799 break;
1800 case OP_MEM:
1801 if (ctxt->lock_prefix)
1802 return segmented_cmpxchg(ctxt,
1803 op->addr.mem,
1804 &op->orig_val,
1805 &op->val,
1806 op->bytes);
1807 else
1808 return segmented_write(ctxt,
1809 op->addr.mem,
1810 &op->val,
1811 op->bytes);
1812 break;
1813 case OP_MEM_STR:
1814 return segmented_write(ctxt,
1815 op->addr.mem,
1816 op->data,
1817 op->bytes * op->count);
1818 break;
1819 case OP_XMM:
1820 write_sse_reg(ctxt, &op->vec_val, op->addr.xmm);
1821 break;
1822 case OP_MM:
1823 write_mmx_reg(ctxt, &op->mm_val, op->addr.mm);
1824 break;
1825 case OP_NONE:
1826 /* no writeback */
1827 break;
1828 default:
1829 break;
1831 return X86EMUL_CONTINUE;
1834 static int push(struct x86_emulate_ctxt *ctxt, void *data, int bytes)
1836 struct segmented_address addr;
1838 rsp_increment(ctxt, -bytes);
1839 addr.ea = reg_read(ctxt, VCPU_REGS_RSP) & stack_mask(ctxt);
1840 addr.seg = VCPU_SREG_SS;
1842 return segmented_write(ctxt, addr, data, bytes);
1845 static int em_push(struct x86_emulate_ctxt *ctxt)
1847 /* Disable writeback. */
1848 ctxt->dst.type = OP_NONE;
1849 return push(ctxt, &ctxt->src.val, ctxt->op_bytes);
1852 static int emulate_pop(struct x86_emulate_ctxt *ctxt,
1853 void *dest, int len)
1855 int rc;
1856 struct segmented_address addr;
1858 addr.ea = reg_read(ctxt, VCPU_REGS_RSP) & stack_mask(ctxt);
1859 addr.seg = VCPU_SREG_SS;
1860 rc = segmented_read(ctxt, addr, dest, len);
1861 if (rc != X86EMUL_CONTINUE)
1862 return rc;
1864 rsp_increment(ctxt, len);
1865 return rc;
1868 static int em_pop(struct x86_emulate_ctxt *ctxt)
1870 return emulate_pop(ctxt, &ctxt->dst.val, ctxt->op_bytes);
1873 static int emulate_popf(struct x86_emulate_ctxt *ctxt,
1874 void *dest, int len)
1876 int rc;
1877 unsigned long val, change_mask;
1878 int iopl = (ctxt->eflags & X86_EFLAGS_IOPL) >> X86_EFLAGS_IOPL_BIT;
1879 int cpl = ctxt->ops->cpl(ctxt);
1881 rc = emulate_pop(ctxt, &val, len);
1882 if (rc != X86EMUL_CONTINUE)
1883 return rc;
1885 change_mask = X86_EFLAGS_CF | X86_EFLAGS_PF | X86_EFLAGS_AF |
1886 X86_EFLAGS_ZF | X86_EFLAGS_SF | X86_EFLAGS_OF |
1887 X86_EFLAGS_TF | X86_EFLAGS_DF | X86_EFLAGS_NT |
1888 X86_EFLAGS_AC | X86_EFLAGS_ID;
1890 switch(ctxt->mode) {
1891 case X86EMUL_MODE_PROT64:
1892 case X86EMUL_MODE_PROT32:
1893 case X86EMUL_MODE_PROT16:
1894 if (cpl == 0)
1895 change_mask |= X86_EFLAGS_IOPL;
1896 if (cpl <= iopl)
1897 change_mask |= X86_EFLAGS_IF;
1898 break;
1899 case X86EMUL_MODE_VM86:
1900 if (iopl < 3)
1901 return emulate_gp(ctxt, 0);
1902 change_mask |= X86_EFLAGS_IF;
1903 break;
1904 default: /* real mode */
1905 change_mask |= (X86_EFLAGS_IOPL | X86_EFLAGS_IF);
1906 break;
1909 *(unsigned long *)dest =
1910 (ctxt->eflags & ~change_mask) | (val & change_mask);
1912 return rc;
1915 static int em_popf(struct x86_emulate_ctxt *ctxt)
1917 ctxt->dst.type = OP_REG;
1918 ctxt->dst.addr.reg = &ctxt->eflags;
1919 ctxt->dst.bytes = ctxt->op_bytes;
1920 return emulate_popf(ctxt, &ctxt->dst.val, ctxt->op_bytes);
1923 static int em_enter(struct x86_emulate_ctxt *ctxt)
1925 int rc;
1926 unsigned frame_size = ctxt->src.val;
1927 unsigned nesting_level = ctxt->src2.val & 31;
1928 ulong rbp;
1930 if (nesting_level)
1931 return X86EMUL_UNHANDLEABLE;
1933 rbp = reg_read(ctxt, VCPU_REGS_RBP);
1934 rc = push(ctxt, &rbp, stack_size(ctxt));
1935 if (rc != X86EMUL_CONTINUE)
1936 return rc;
1937 assign_masked(reg_rmw(ctxt, VCPU_REGS_RBP), reg_read(ctxt, VCPU_REGS_RSP),
1938 stack_mask(ctxt));
1939 assign_masked(reg_rmw(ctxt, VCPU_REGS_RSP),
1940 reg_read(ctxt, VCPU_REGS_RSP) - frame_size,
1941 stack_mask(ctxt));
1942 return X86EMUL_CONTINUE;
1945 static int em_leave(struct x86_emulate_ctxt *ctxt)
1947 assign_masked(reg_rmw(ctxt, VCPU_REGS_RSP), reg_read(ctxt, VCPU_REGS_RBP),
1948 stack_mask(ctxt));
1949 return emulate_pop(ctxt, reg_rmw(ctxt, VCPU_REGS_RBP), ctxt->op_bytes);
1952 static int em_push_sreg(struct x86_emulate_ctxt *ctxt)
1954 int seg = ctxt->src2.val;
1956 ctxt->src.val = get_segment_selector(ctxt, seg);
1957 if (ctxt->op_bytes == 4) {
1958 rsp_increment(ctxt, -2);
1959 ctxt->op_bytes = 2;
1962 return em_push(ctxt);
1965 static int em_pop_sreg(struct x86_emulate_ctxt *ctxt)
1967 int seg = ctxt->src2.val;
1968 unsigned long selector;
1969 int rc;
1971 rc = emulate_pop(ctxt, &selector, 2);
1972 if (rc != X86EMUL_CONTINUE)
1973 return rc;
1975 if (ctxt->modrm_reg == VCPU_SREG_SS)
1976 ctxt->interruptibility = KVM_X86_SHADOW_INT_MOV_SS;
1977 if (ctxt->op_bytes > 2)
1978 rsp_increment(ctxt, ctxt->op_bytes - 2);
1980 rc = load_segment_descriptor(ctxt, (u16)selector, seg);
1981 return rc;
1984 static int em_pusha(struct x86_emulate_ctxt *ctxt)
1986 unsigned long old_esp = reg_read(ctxt, VCPU_REGS_RSP);
1987 int rc = X86EMUL_CONTINUE;
1988 int reg = VCPU_REGS_RAX;
1990 while (reg <= VCPU_REGS_RDI) {
1991 (reg == VCPU_REGS_RSP) ?
1992 (ctxt->src.val = old_esp) : (ctxt->src.val = reg_read(ctxt, reg));
1994 rc = em_push(ctxt);
1995 if (rc != X86EMUL_CONTINUE)
1996 return rc;
1998 ++reg;
2001 return rc;
2004 static int em_pushf(struct x86_emulate_ctxt *ctxt)
2006 ctxt->src.val = (unsigned long)ctxt->eflags & ~X86_EFLAGS_VM;
2007 return em_push(ctxt);
2010 static int em_popa(struct x86_emulate_ctxt *ctxt)
2012 int rc = X86EMUL_CONTINUE;
2013 int reg = VCPU_REGS_RDI;
2014 u32 val;
2016 while (reg >= VCPU_REGS_RAX) {
2017 if (reg == VCPU_REGS_RSP) {
2018 rsp_increment(ctxt, ctxt->op_bytes);
2019 --reg;
2022 rc = emulate_pop(ctxt, &val, ctxt->op_bytes);
2023 if (rc != X86EMUL_CONTINUE)
2024 break;
2025 assign_register(reg_rmw(ctxt, reg), val, ctxt->op_bytes);
2026 --reg;
2028 return rc;
2031 static int __emulate_int_real(struct x86_emulate_ctxt *ctxt, int irq)
2033 const struct x86_emulate_ops *ops = ctxt->ops;
2034 int rc;
2035 struct desc_ptr dt;
2036 gva_t cs_addr;
2037 gva_t eip_addr;
2038 u16 cs, eip;
2040 /* TODO: Add limit checks */
2041 ctxt->src.val = ctxt->eflags;
2042 rc = em_push(ctxt);
2043 if (rc != X86EMUL_CONTINUE)
2044 return rc;
2046 ctxt->eflags &= ~(X86_EFLAGS_IF | X86_EFLAGS_TF | X86_EFLAGS_AC);
2048 ctxt->src.val = get_segment_selector(ctxt, VCPU_SREG_CS);
2049 rc = em_push(ctxt);
2050 if (rc != X86EMUL_CONTINUE)
2051 return rc;
2053 ctxt->src.val = ctxt->_eip;
2054 rc = em_push(ctxt);
2055 if (rc != X86EMUL_CONTINUE)
2056 return rc;
2058 ops->get_idt(ctxt, &dt);
2060 eip_addr = dt.address + (irq << 2);
2061 cs_addr = dt.address + (irq << 2) + 2;
2063 rc = linear_read_system(ctxt, cs_addr, &cs, 2);
2064 if (rc != X86EMUL_CONTINUE)
2065 return rc;
2067 rc = linear_read_system(ctxt, eip_addr, &eip, 2);
2068 if (rc != X86EMUL_CONTINUE)
2069 return rc;
2071 rc = load_segment_descriptor(ctxt, cs, VCPU_SREG_CS);
2072 if (rc != X86EMUL_CONTINUE)
2073 return rc;
2075 ctxt->_eip = eip;
2077 return rc;
2080 int emulate_int_real(struct x86_emulate_ctxt *ctxt, int irq)
2082 int rc;
2084 invalidate_registers(ctxt);
2085 rc = __emulate_int_real(ctxt, irq);
2086 if (rc == X86EMUL_CONTINUE)
2087 writeback_registers(ctxt);
2088 return rc;
2091 static int emulate_int(struct x86_emulate_ctxt *ctxt, int irq)
2093 switch(ctxt->mode) {
2094 case X86EMUL_MODE_REAL:
2095 return __emulate_int_real(ctxt, irq);
2096 case X86EMUL_MODE_VM86:
2097 case X86EMUL_MODE_PROT16:
2098 case X86EMUL_MODE_PROT32:
2099 case X86EMUL_MODE_PROT64:
2100 default:
2101 /* Protected mode interrupts unimplemented yet */
2102 return X86EMUL_UNHANDLEABLE;
2106 static int emulate_iret_real(struct x86_emulate_ctxt *ctxt)
2108 int rc = X86EMUL_CONTINUE;
2109 unsigned long temp_eip = 0;
2110 unsigned long temp_eflags = 0;
2111 unsigned long cs = 0;
2112 unsigned long mask = X86_EFLAGS_CF | X86_EFLAGS_PF | X86_EFLAGS_AF |
2113 X86_EFLAGS_ZF | X86_EFLAGS_SF | X86_EFLAGS_TF |
2114 X86_EFLAGS_IF | X86_EFLAGS_DF | X86_EFLAGS_OF |
2115 X86_EFLAGS_IOPL | X86_EFLAGS_NT | X86_EFLAGS_RF |
2116 X86_EFLAGS_AC | X86_EFLAGS_ID |
2117 X86_EFLAGS_FIXED;
2118 unsigned long vm86_mask = X86_EFLAGS_VM | X86_EFLAGS_VIF |
2119 X86_EFLAGS_VIP;
2121 /* TODO: Add stack limit check */
2123 rc = emulate_pop(ctxt, &temp_eip, ctxt->op_bytes);
2125 if (rc != X86EMUL_CONTINUE)
2126 return rc;
2128 if (temp_eip & ~0xffff)
2129 return emulate_gp(ctxt, 0);
2131 rc = emulate_pop(ctxt, &cs, ctxt->op_bytes);
2133 if (rc != X86EMUL_CONTINUE)
2134 return rc;
2136 rc = emulate_pop(ctxt, &temp_eflags, ctxt->op_bytes);
2138 if (rc != X86EMUL_CONTINUE)
2139 return rc;
2141 rc = load_segment_descriptor(ctxt, (u16)cs, VCPU_SREG_CS);
2143 if (rc != X86EMUL_CONTINUE)
2144 return rc;
2146 ctxt->_eip = temp_eip;
2148 if (ctxt->op_bytes == 4)
2149 ctxt->eflags = ((temp_eflags & mask) | (ctxt->eflags & vm86_mask));
2150 else if (ctxt->op_bytes == 2) {
2151 ctxt->eflags &= ~0xffff;
2152 ctxt->eflags |= temp_eflags;
2155 ctxt->eflags &= ~EFLG_RESERVED_ZEROS_MASK; /* Clear reserved zeros */
2156 ctxt->eflags |= X86_EFLAGS_FIXED;
2157 ctxt->ops->set_nmi_mask(ctxt, false);
2159 return rc;
2162 static int em_iret(struct x86_emulate_ctxt *ctxt)
2164 switch(ctxt->mode) {
2165 case X86EMUL_MODE_REAL:
2166 return emulate_iret_real(ctxt);
2167 case X86EMUL_MODE_VM86:
2168 case X86EMUL_MODE_PROT16:
2169 case X86EMUL_MODE_PROT32:
2170 case X86EMUL_MODE_PROT64:
2171 default:
2172 /* iret from protected mode unimplemented yet */
2173 return X86EMUL_UNHANDLEABLE;
2177 static int em_jmp_far(struct x86_emulate_ctxt *ctxt)
2179 int rc;
2180 unsigned short sel;
2181 struct desc_struct new_desc;
2182 u8 cpl = ctxt->ops->cpl(ctxt);
2184 memcpy(&sel, ctxt->src.valptr + ctxt->op_bytes, 2);
2186 rc = __load_segment_descriptor(ctxt, sel, VCPU_SREG_CS, cpl,
2187 X86_TRANSFER_CALL_JMP,
2188 &new_desc);
2189 if (rc != X86EMUL_CONTINUE)
2190 return rc;
2192 rc = assign_eip_far(ctxt, ctxt->src.val, &new_desc);
2193 /* Error handling is not implemented. */
2194 if (rc != X86EMUL_CONTINUE)
2195 return X86EMUL_UNHANDLEABLE;
2197 return rc;
2200 static int em_jmp_abs(struct x86_emulate_ctxt *ctxt)
2202 return assign_eip_near(ctxt, ctxt->src.val);
2205 static int em_call_near_abs(struct x86_emulate_ctxt *ctxt)
2207 int rc;
2208 long int old_eip;
2210 old_eip = ctxt->_eip;
2211 rc = assign_eip_near(ctxt, ctxt->src.val);
2212 if (rc != X86EMUL_CONTINUE)
2213 return rc;
2214 ctxt->src.val = old_eip;
2215 rc = em_push(ctxt);
2216 return rc;
2219 static int em_cmpxchg8b(struct x86_emulate_ctxt *ctxt)
2221 u64 old = ctxt->dst.orig_val64;
2223 if (ctxt->dst.bytes == 16)
2224 return X86EMUL_UNHANDLEABLE;
2226 if (((u32) (old >> 0) != (u32) reg_read(ctxt, VCPU_REGS_RAX)) ||
2227 ((u32) (old >> 32) != (u32) reg_read(ctxt, VCPU_REGS_RDX))) {
2228 *reg_write(ctxt, VCPU_REGS_RAX) = (u32) (old >> 0);
2229 *reg_write(ctxt, VCPU_REGS_RDX) = (u32) (old >> 32);
2230 ctxt->eflags &= ~X86_EFLAGS_ZF;
2231 } else {
2232 ctxt->dst.val64 = ((u64)reg_read(ctxt, VCPU_REGS_RCX) << 32) |
2233 (u32) reg_read(ctxt, VCPU_REGS_RBX);
2235 ctxt->eflags |= X86_EFLAGS_ZF;
2237 return X86EMUL_CONTINUE;
2240 static int em_ret(struct x86_emulate_ctxt *ctxt)
2242 int rc;
2243 unsigned long eip;
2245 rc = emulate_pop(ctxt, &eip, ctxt->op_bytes);
2246 if (rc != X86EMUL_CONTINUE)
2247 return rc;
2249 return assign_eip_near(ctxt, eip);
2252 static int em_ret_far(struct x86_emulate_ctxt *ctxt)
2254 int rc;
2255 unsigned long eip, cs;
2256 int cpl = ctxt->ops->cpl(ctxt);
2257 struct desc_struct new_desc;
2259 rc = emulate_pop(ctxt, &eip, ctxt->op_bytes);
2260 if (rc != X86EMUL_CONTINUE)
2261 return rc;
2262 rc = emulate_pop(ctxt, &cs, ctxt->op_bytes);
2263 if (rc != X86EMUL_CONTINUE)
2264 return rc;
2265 /* Outer-privilege level return is not implemented */
2266 if (ctxt->mode >= X86EMUL_MODE_PROT16 && (cs & 3) > cpl)
2267 return X86EMUL_UNHANDLEABLE;
2268 rc = __load_segment_descriptor(ctxt, (u16)cs, VCPU_SREG_CS, cpl,
2269 X86_TRANSFER_RET,
2270 &new_desc);
2271 if (rc != X86EMUL_CONTINUE)
2272 return rc;
2273 rc = assign_eip_far(ctxt, eip, &new_desc);
2274 /* Error handling is not implemented. */
2275 if (rc != X86EMUL_CONTINUE)
2276 return X86EMUL_UNHANDLEABLE;
2278 return rc;
2281 static int em_ret_far_imm(struct x86_emulate_ctxt *ctxt)
2283 int rc;
2285 rc = em_ret_far(ctxt);
2286 if (rc != X86EMUL_CONTINUE)
2287 return rc;
2288 rsp_increment(ctxt, ctxt->src.val);
2289 return X86EMUL_CONTINUE;
2292 static int em_cmpxchg(struct x86_emulate_ctxt *ctxt)
2294 /* Save real source value, then compare EAX against destination. */
2295 ctxt->dst.orig_val = ctxt->dst.val;
2296 ctxt->dst.val = reg_read(ctxt, VCPU_REGS_RAX);
2297 ctxt->src.orig_val = ctxt->src.val;
2298 ctxt->src.val = ctxt->dst.orig_val;
2299 fastop(ctxt, em_cmp);
2301 if (ctxt->eflags & X86_EFLAGS_ZF) {
2302 /* Success: write back to memory; no update of EAX */
2303 ctxt->src.type = OP_NONE;
2304 ctxt->dst.val = ctxt->src.orig_val;
2305 } else {
2306 /* Failure: write the value we saw to EAX. */
2307 ctxt->src.type = OP_REG;
2308 ctxt->src.addr.reg = reg_rmw(ctxt, VCPU_REGS_RAX);
2309 ctxt->src.val = ctxt->dst.orig_val;
2310 /* Create write-cycle to dest by writing the same value */
2311 ctxt->dst.val = ctxt->dst.orig_val;
2313 return X86EMUL_CONTINUE;
2316 static int em_lseg(struct x86_emulate_ctxt *ctxt)
2318 int seg = ctxt->src2.val;
2319 unsigned short sel;
2320 int rc;
2322 memcpy(&sel, ctxt->src.valptr + ctxt->op_bytes, 2);
2324 rc = load_segment_descriptor(ctxt, sel, seg);
2325 if (rc != X86EMUL_CONTINUE)
2326 return rc;
2328 ctxt->dst.val = ctxt->src.val;
2329 return rc;
2332 static int emulator_has_longmode(struct x86_emulate_ctxt *ctxt)
2334 u32 eax, ebx, ecx, edx;
2336 eax = 0x80000001;
2337 ecx = 0;
2338 ctxt->ops->get_cpuid(ctxt, &eax, &ebx, &ecx, &edx, false);
2339 return edx & bit(X86_FEATURE_LM);
2342 #define GET_SMSTATE(type, smbase, offset) \
2343 ({ \
2344 type __val; \
2345 int r = ctxt->ops->read_phys(ctxt, smbase + offset, &__val, \
2346 sizeof(__val)); \
2347 if (r != X86EMUL_CONTINUE) \
2348 return X86EMUL_UNHANDLEABLE; \
2349 __val; \
2352 static void rsm_set_desc_flags(struct desc_struct *desc, u32 flags)
2354 desc->g = (flags >> 23) & 1;
2355 desc->d = (flags >> 22) & 1;
2356 desc->l = (flags >> 21) & 1;
2357 desc->avl = (flags >> 20) & 1;
2358 desc->p = (flags >> 15) & 1;
2359 desc->dpl = (flags >> 13) & 3;
2360 desc->s = (flags >> 12) & 1;
2361 desc->type = (flags >> 8) & 15;
2364 static int rsm_load_seg_32(struct x86_emulate_ctxt *ctxt, u64 smbase, int n)
2366 struct desc_struct desc;
2367 int offset;
2368 u16 selector;
2370 selector = GET_SMSTATE(u32, smbase, 0x7fa8 + n * 4);
2372 if (n < 3)
2373 offset = 0x7f84 + n * 12;
2374 else
2375 offset = 0x7f2c + (n - 3) * 12;
2377 set_desc_base(&desc, GET_SMSTATE(u32, smbase, offset + 8));
2378 set_desc_limit(&desc, GET_SMSTATE(u32, smbase, offset + 4));
2379 rsm_set_desc_flags(&desc, GET_SMSTATE(u32, smbase, offset));
2380 ctxt->ops->set_segment(ctxt, selector, &desc, 0, n);
2381 return X86EMUL_CONTINUE;
2384 static int rsm_load_seg_64(struct x86_emulate_ctxt *ctxt, u64 smbase, int n)
2386 struct desc_struct desc;
2387 int offset;
2388 u16 selector;
2389 u32 base3;
2391 offset = 0x7e00 + n * 16;
2393 selector = GET_SMSTATE(u16, smbase, offset);
2394 rsm_set_desc_flags(&desc, GET_SMSTATE(u16, smbase, offset + 2) << 8);
2395 set_desc_limit(&desc, GET_SMSTATE(u32, smbase, offset + 4));
2396 set_desc_base(&desc, GET_SMSTATE(u32, smbase, offset + 8));
2397 base3 = GET_SMSTATE(u32, smbase, offset + 12);
2399 ctxt->ops->set_segment(ctxt, selector, &desc, base3, n);
2400 return X86EMUL_CONTINUE;
2403 static int rsm_enter_protected_mode(struct x86_emulate_ctxt *ctxt,
2404 u64 cr0, u64 cr3, u64 cr4)
2406 int bad;
2407 u64 pcid;
2409 /* In order to later set CR4.PCIDE, CR3[11:0] must be zero. */
2410 pcid = 0;
2411 if (cr4 & X86_CR4_PCIDE) {
2412 pcid = cr3 & 0xfff;
2413 cr3 &= ~0xfff;
2416 bad = ctxt->ops->set_cr(ctxt, 3, cr3);
2417 if (bad)
2418 return X86EMUL_UNHANDLEABLE;
2421 * First enable PAE, long mode needs it before CR0.PG = 1 is set.
2422 * Then enable protected mode. However, PCID cannot be enabled
2423 * if EFER.LMA=0, so set it separately.
2425 bad = ctxt->ops->set_cr(ctxt, 4, cr4 & ~X86_CR4_PCIDE);
2426 if (bad)
2427 return X86EMUL_UNHANDLEABLE;
2429 bad = ctxt->ops->set_cr(ctxt, 0, cr0);
2430 if (bad)
2431 return X86EMUL_UNHANDLEABLE;
2433 if (cr4 & X86_CR4_PCIDE) {
2434 bad = ctxt->ops->set_cr(ctxt, 4, cr4);
2435 if (bad)
2436 return X86EMUL_UNHANDLEABLE;
2437 if (pcid) {
2438 bad = ctxt->ops->set_cr(ctxt, 3, cr3 | pcid);
2439 if (bad)
2440 return X86EMUL_UNHANDLEABLE;
2445 return X86EMUL_CONTINUE;
2448 static int rsm_load_state_32(struct x86_emulate_ctxt *ctxt, u64 smbase)
2450 struct desc_struct desc;
2451 struct desc_ptr dt;
2452 u16 selector;
2453 u32 val, cr0, cr3, cr4;
2454 int i;
2456 cr0 = GET_SMSTATE(u32, smbase, 0x7ffc);
2457 cr3 = GET_SMSTATE(u32, smbase, 0x7ff8);
2458 ctxt->eflags = GET_SMSTATE(u32, smbase, 0x7ff4) | X86_EFLAGS_FIXED;
2459 ctxt->_eip = GET_SMSTATE(u32, smbase, 0x7ff0);
2461 for (i = 0; i < 8; i++)
2462 *reg_write(ctxt, i) = GET_SMSTATE(u32, smbase, 0x7fd0 + i * 4);
2464 val = GET_SMSTATE(u32, smbase, 0x7fcc);
2465 ctxt->ops->set_dr(ctxt, 6, (val & DR6_VOLATILE) | DR6_FIXED_1);
2466 val = GET_SMSTATE(u32, smbase, 0x7fc8);
2467 ctxt->ops->set_dr(ctxt, 7, (val & DR7_VOLATILE) | DR7_FIXED_1);
2469 selector = GET_SMSTATE(u32, smbase, 0x7fc4);
2470 set_desc_base(&desc, GET_SMSTATE(u32, smbase, 0x7f64));
2471 set_desc_limit(&desc, GET_SMSTATE(u32, smbase, 0x7f60));
2472 rsm_set_desc_flags(&desc, GET_SMSTATE(u32, smbase, 0x7f5c));
2473 ctxt->ops->set_segment(ctxt, selector, &desc, 0, VCPU_SREG_TR);
2475 selector = GET_SMSTATE(u32, smbase, 0x7fc0);
2476 set_desc_base(&desc, GET_SMSTATE(u32, smbase, 0x7f80));
2477 set_desc_limit(&desc, GET_SMSTATE(u32, smbase, 0x7f7c));
2478 rsm_set_desc_flags(&desc, GET_SMSTATE(u32, smbase, 0x7f78));
2479 ctxt->ops->set_segment(ctxt, selector, &desc, 0, VCPU_SREG_LDTR);
2481 dt.address = GET_SMSTATE(u32, smbase, 0x7f74);
2482 dt.size = GET_SMSTATE(u32, smbase, 0x7f70);
2483 ctxt->ops->set_gdt(ctxt, &dt);
2485 dt.address = GET_SMSTATE(u32, smbase, 0x7f58);
2486 dt.size = GET_SMSTATE(u32, smbase, 0x7f54);
2487 ctxt->ops->set_idt(ctxt, &dt);
2489 for (i = 0; i < 6; i++) {
2490 int r = rsm_load_seg_32(ctxt, smbase, i);
2491 if (r != X86EMUL_CONTINUE)
2492 return r;
2495 cr4 = GET_SMSTATE(u32, smbase, 0x7f14);
2497 ctxt->ops->set_smbase(ctxt, GET_SMSTATE(u32, smbase, 0x7ef8));
2499 return rsm_enter_protected_mode(ctxt, cr0, cr3, cr4);
2502 static int rsm_load_state_64(struct x86_emulate_ctxt *ctxt, u64 smbase)
2504 struct desc_struct desc;
2505 struct desc_ptr dt;
2506 u64 val, cr0, cr3, cr4;
2507 u32 base3;
2508 u16 selector;
2509 int i, r;
2511 for (i = 0; i < 16; i++)
2512 *reg_write(ctxt, i) = GET_SMSTATE(u64, smbase, 0x7ff8 - i * 8);
2514 ctxt->_eip = GET_SMSTATE(u64, smbase, 0x7f78);
2515 ctxt->eflags = GET_SMSTATE(u32, smbase, 0x7f70) | X86_EFLAGS_FIXED;
2517 val = GET_SMSTATE(u32, smbase, 0x7f68);
2518 ctxt->ops->set_dr(ctxt, 6, (val & DR6_VOLATILE) | DR6_FIXED_1);
2519 val = GET_SMSTATE(u32, smbase, 0x7f60);
2520 ctxt->ops->set_dr(ctxt, 7, (val & DR7_VOLATILE) | DR7_FIXED_1);
2522 cr0 = GET_SMSTATE(u64, smbase, 0x7f58);
2523 cr3 = GET_SMSTATE(u64, smbase, 0x7f50);
2524 cr4 = GET_SMSTATE(u64, smbase, 0x7f48);
2525 ctxt->ops->set_smbase(ctxt, GET_SMSTATE(u32, smbase, 0x7f00));
2526 val = GET_SMSTATE(u64, smbase, 0x7ed0);
2527 ctxt->ops->set_msr(ctxt, MSR_EFER, val & ~EFER_LMA);
2529 selector = GET_SMSTATE(u32, smbase, 0x7e90);
2530 rsm_set_desc_flags(&desc, GET_SMSTATE(u32, smbase, 0x7e92) << 8);
2531 set_desc_limit(&desc, GET_SMSTATE(u32, smbase, 0x7e94));
2532 set_desc_base(&desc, GET_SMSTATE(u32, smbase, 0x7e98));
2533 base3 = GET_SMSTATE(u32, smbase, 0x7e9c);
2534 ctxt->ops->set_segment(ctxt, selector, &desc, base3, VCPU_SREG_TR);
2536 dt.size = GET_SMSTATE(u32, smbase, 0x7e84);
2537 dt.address = GET_SMSTATE(u64, smbase, 0x7e88);
2538 ctxt->ops->set_idt(ctxt, &dt);
2540 selector = GET_SMSTATE(u32, smbase, 0x7e70);
2541 rsm_set_desc_flags(&desc, GET_SMSTATE(u32, smbase, 0x7e72) << 8);
2542 set_desc_limit(&desc, GET_SMSTATE(u32, smbase, 0x7e74));
2543 set_desc_base(&desc, GET_SMSTATE(u32, smbase, 0x7e78));
2544 base3 = GET_SMSTATE(u32, smbase, 0x7e7c);
2545 ctxt->ops->set_segment(ctxt, selector, &desc, base3, VCPU_SREG_LDTR);
2547 dt.size = GET_SMSTATE(u32, smbase, 0x7e64);
2548 dt.address = GET_SMSTATE(u64, smbase, 0x7e68);
2549 ctxt->ops->set_gdt(ctxt, &dt);
2551 r = rsm_enter_protected_mode(ctxt, cr0, cr3, cr4);
2552 if (r != X86EMUL_CONTINUE)
2553 return r;
2555 for (i = 0; i < 6; i++) {
2556 r = rsm_load_seg_64(ctxt, smbase, i);
2557 if (r != X86EMUL_CONTINUE)
2558 return r;
2561 return X86EMUL_CONTINUE;
2564 static int em_rsm(struct x86_emulate_ctxt *ctxt)
2566 unsigned long cr0, cr4, efer;
2567 u64 smbase;
2568 int ret;
2570 if ((ctxt->ops->get_hflags(ctxt) & X86EMUL_SMM_MASK) == 0)
2571 return emulate_ud(ctxt);
2574 * Get back to real mode, to prepare a safe state in which to load
2575 * CR0/CR3/CR4/EFER. It's all a bit more complicated if the vCPU
2576 * supports long mode.
2578 if (emulator_has_longmode(ctxt)) {
2579 struct desc_struct cs_desc;
2581 /* Zero CR4.PCIDE before CR0.PG. */
2582 cr4 = ctxt->ops->get_cr(ctxt, 4);
2583 if (cr4 & X86_CR4_PCIDE)
2584 ctxt->ops->set_cr(ctxt, 4, cr4 & ~X86_CR4_PCIDE);
2586 /* A 32-bit code segment is required to clear EFER.LMA. */
2587 memset(&cs_desc, 0, sizeof(cs_desc));
2588 cs_desc.type = 0xb;
2589 cs_desc.s = cs_desc.g = cs_desc.p = 1;
2590 ctxt->ops->set_segment(ctxt, 0, &cs_desc, 0, VCPU_SREG_CS);
2593 /* For the 64-bit case, this will clear EFER.LMA. */
2594 cr0 = ctxt->ops->get_cr(ctxt, 0);
2595 if (cr0 & X86_CR0_PE)
2596 ctxt->ops->set_cr(ctxt, 0, cr0 & ~(X86_CR0_PG | X86_CR0_PE));
2598 if (emulator_has_longmode(ctxt)) {
2599 /* Clear CR4.PAE before clearing EFER.LME. */
2600 cr4 = ctxt->ops->get_cr(ctxt, 4);
2601 if (cr4 & X86_CR4_PAE)
2602 ctxt->ops->set_cr(ctxt, 4, cr4 & ~X86_CR4_PAE);
2604 /* And finally go back to 32-bit mode. */
2605 efer = 0;
2606 ctxt->ops->set_msr(ctxt, MSR_EFER, efer);
2609 smbase = ctxt->ops->get_smbase(ctxt);
2612 * Give pre_leave_smm() a chance to make ISA-specific changes to the
2613 * vCPU state (e.g. enter guest mode) before loading state from the SMM
2614 * state-save area.
2616 if (ctxt->ops->pre_leave_smm(ctxt, smbase))
2617 return X86EMUL_UNHANDLEABLE;
2619 if (emulator_has_longmode(ctxt))
2620 ret = rsm_load_state_64(ctxt, smbase + 0x8000);
2621 else
2622 ret = rsm_load_state_32(ctxt, smbase + 0x8000);
2624 if (ret != X86EMUL_CONTINUE) {
2625 /* FIXME: should triple fault */
2626 return X86EMUL_UNHANDLEABLE;
2629 if ((ctxt->ops->get_hflags(ctxt) & X86EMUL_SMM_INSIDE_NMI_MASK) == 0)
2630 ctxt->ops->set_nmi_mask(ctxt, false);
2632 ctxt->ops->set_hflags(ctxt, ctxt->ops->get_hflags(ctxt) &
2633 ~(X86EMUL_SMM_INSIDE_NMI_MASK | X86EMUL_SMM_MASK));
2634 return X86EMUL_CONTINUE;
2637 static void
2638 setup_syscalls_segments(struct x86_emulate_ctxt *ctxt,
2639 struct desc_struct *cs, struct desc_struct *ss)
2641 cs->l = 0; /* will be adjusted later */
2642 set_desc_base(cs, 0); /* flat segment */
2643 cs->g = 1; /* 4kb granularity */
2644 set_desc_limit(cs, 0xfffff); /* 4GB limit */
2645 cs->type = 0x0b; /* Read, Execute, Accessed */
2646 cs->s = 1;
2647 cs->dpl = 0; /* will be adjusted later */
2648 cs->p = 1;
2649 cs->d = 1;
2650 cs->avl = 0;
2652 set_desc_base(ss, 0); /* flat segment */
2653 set_desc_limit(ss, 0xfffff); /* 4GB limit */
2654 ss->g = 1; /* 4kb granularity */
2655 ss->s = 1;
2656 ss->type = 0x03; /* Read/Write, Accessed */
2657 ss->d = 1; /* 32bit stack segment */
2658 ss->dpl = 0;
2659 ss->p = 1;
2660 ss->l = 0;
2661 ss->avl = 0;
2664 static bool vendor_intel(struct x86_emulate_ctxt *ctxt)
2666 u32 eax, ebx, ecx, edx;
2668 eax = ecx = 0;
2669 ctxt->ops->get_cpuid(ctxt, &eax, &ebx, &ecx, &edx, false);
2670 return ebx == X86EMUL_CPUID_VENDOR_GenuineIntel_ebx
2671 && ecx == X86EMUL_CPUID_VENDOR_GenuineIntel_ecx
2672 && edx == X86EMUL_CPUID_VENDOR_GenuineIntel_edx;
2675 static bool em_syscall_is_enabled(struct x86_emulate_ctxt *ctxt)
2677 const struct x86_emulate_ops *ops = ctxt->ops;
2678 u32 eax, ebx, ecx, edx;
2681 * syscall should always be enabled in longmode - so only become
2682 * vendor specific (cpuid) if other modes are active...
2684 if (ctxt->mode == X86EMUL_MODE_PROT64)
2685 return true;
2687 eax = 0x00000000;
2688 ecx = 0x00000000;
2689 ops->get_cpuid(ctxt, &eax, &ebx, &ecx, &edx, false);
2691 * Intel ("GenuineIntel")
2692 * remark: Intel CPUs only support "syscall" in 64bit
2693 * longmode. Also an 64bit guest with a
2694 * 32bit compat-app running will #UD !! While this
2695 * behaviour can be fixed (by emulating) into AMD
2696 * response - CPUs of AMD can't behave like Intel.
2698 if (ebx == X86EMUL_CPUID_VENDOR_GenuineIntel_ebx &&
2699 ecx == X86EMUL_CPUID_VENDOR_GenuineIntel_ecx &&
2700 edx == X86EMUL_CPUID_VENDOR_GenuineIntel_edx)
2701 return false;
2703 /* AMD ("AuthenticAMD") */
2704 if (ebx == X86EMUL_CPUID_VENDOR_AuthenticAMD_ebx &&
2705 ecx == X86EMUL_CPUID_VENDOR_AuthenticAMD_ecx &&
2706 edx == X86EMUL_CPUID_VENDOR_AuthenticAMD_edx)
2707 return true;
2709 /* AMD ("AMDisbetter!") */
2710 if (ebx == X86EMUL_CPUID_VENDOR_AMDisbetterI_ebx &&
2711 ecx == X86EMUL_CPUID_VENDOR_AMDisbetterI_ecx &&
2712 edx == X86EMUL_CPUID_VENDOR_AMDisbetterI_edx)
2713 return true;
2715 /* default: (not Intel, not AMD), apply Intel's stricter rules... */
2716 return false;
2719 static int em_syscall(struct x86_emulate_ctxt *ctxt)
2721 const struct x86_emulate_ops *ops = ctxt->ops;
2722 struct desc_struct cs, ss;
2723 u64 msr_data;
2724 u16 cs_sel, ss_sel;
2725 u64 efer = 0;
2727 /* syscall is not available in real mode */
2728 if (ctxt->mode == X86EMUL_MODE_REAL ||
2729 ctxt->mode == X86EMUL_MODE_VM86)
2730 return emulate_ud(ctxt);
2732 if (!(em_syscall_is_enabled(ctxt)))
2733 return emulate_ud(ctxt);
2735 ops->get_msr(ctxt, MSR_EFER, &efer);
2736 setup_syscalls_segments(ctxt, &cs, &ss);
2738 if (!(efer & EFER_SCE))
2739 return emulate_ud(ctxt);
2741 ops->get_msr(ctxt, MSR_STAR, &msr_data);
2742 msr_data >>= 32;
2743 cs_sel = (u16)(msr_data & 0xfffc);
2744 ss_sel = (u16)(msr_data + 8);
2746 if (efer & EFER_LMA) {
2747 cs.d = 0;
2748 cs.l = 1;
2750 ops->set_segment(ctxt, cs_sel, &cs, 0, VCPU_SREG_CS);
2751 ops->set_segment(ctxt, ss_sel, &ss, 0, VCPU_SREG_SS);
2753 *reg_write(ctxt, VCPU_REGS_RCX) = ctxt->_eip;
2754 if (efer & EFER_LMA) {
2755 #ifdef CONFIG_X86_64
2756 *reg_write(ctxt, VCPU_REGS_R11) = ctxt->eflags;
2758 ops->get_msr(ctxt,
2759 ctxt->mode == X86EMUL_MODE_PROT64 ?
2760 MSR_LSTAR : MSR_CSTAR, &msr_data);
2761 ctxt->_eip = msr_data;
2763 ops->get_msr(ctxt, MSR_SYSCALL_MASK, &msr_data);
2764 ctxt->eflags &= ~msr_data;
2765 ctxt->eflags |= X86_EFLAGS_FIXED;
2766 #endif
2767 } else {
2768 /* legacy mode */
2769 ops->get_msr(ctxt, MSR_STAR, &msr_data);
2770 ctxt->_eip = (u32)msr_data;
2772 ctxt->eflags &= ~(X86_EFLAGS_VM | X86_EFLAGS_IF);
2775 ctxt->tf = (ctxt->eflags & X86_EFLAGS_TF) != 0;
2776 return X86EMUL_CONTINUE;
2779 static int em_sysenter(struct x86_emulate_ctxt *ctxt)
2781 const struct x86_emulate_ops *ops = ctxt->ops;
2782 struct desc_struct cs, ss;
2783 u64 msr_data;
2784 u16 cs_sel, ss_sel;
2785 u64 efer = 0;
2787 ops->get_msr(ctxt, MSR_EFER, &efer);
2788 /* inject #GP if in real mode */
2789 if (ctxt->mode == X86EMUL_MODE_REAL)
2790 return emulate_gp(ctxt, 0);
2793 * Not recognized on AMD in compat mode (but is recognized in legacy
2794 * mode).
2796 if ((ctxt->mode != X86EMUL_MODE_PROT64) && (efer & EFER_LMA)
2797 && !vendor_intel(ctxt))
2798 return emulate_ud(ctxt);
2800 /* sysenter/sysexit have not been tested in 64bit mode. */
2801 if (ctxt->mode == X86EMUL_MODE_PROT64)
2802 return X86EMUL_UNHANDLEABLE;
2804 setup_syscalls_segments(ctxt, &cs, &ss);
2806 ops->get_msr(ctxt, MSR_IA32_SYSENTER_CS, &msr_data);
2807 if ((msr_data & 0xfffc) == 0x0)
2808 return emulate_gp(ctxt, 0);
2810 ctxt->eflags &= ~(X86_EFLAGS_VM | X86_EFLAGS_IF);
2811 cs_sel = (u16)msr_data & ~SEGMENT_RPL_MASK;
2812 ss_sel = cs_sel + 8;
2813 if (efer & EFER_LMA) {
2814 cs.d = 0;
2815 cs.l = 1;
2818 ops->set_segment(ctxt, cs_sel, &cs, 0, VCPU_SREG_CS);
2819 ops->set_segment(ctxt, ss_sel, &ss, 0, VCPU_SREG_SS);
2821 ops->get_msr(ctxt, MSR_IA32_SYSENTER_EIP, &msr_data);
2822 ctxt->_eip = (efer & EFER_LMA) ? msr_data : (u32)msr_data;
2824 ops->get_msr(ctxt, MSR_IA32_SYSENTER_ESP, &msr_data);
2825 *reg_write(ctxt, VCPU_REGS_RSP) = (efer & EFER_LMA) ? msr_data :
2826 (u32)msr_data;
2828 return X86EMUL_CONTINUE;
2831 static int em_sysexit(struct x86_emulate_ctxt *ctxt)
2833 const struct x86_emulate_ops *ops = ctxt->ops;
2834 struct desc_struct cs, ss;
2835 u64 msr_data, rcx, rdx;
2836 int usermode;
2837 u16 cs_sel = 0, ss_sel = 0;
2839 /* inject #GP if in real mode or Virtual 8086 mode */
2840 if (ctxt->mode == X86EMUL_MODE_REAL ||
2841 ctxt->mode == X86EMUL_MODE_VM86)
2842 return emulate_gp(ctxt, 0);
2844 setup_syscalls_segments(ctxt, &cs, &ss);
2846 if ((ctxt->rex_prefix & 0x8) != 0x0)
2847 usermode = X86EMUL_MODE_PROT64;
2848 else
2849 usermode = X86EMUL_MODE_PROT32;
2851 rcx = reg_read(ctxt, VCPU_REGS_RCX);
2852 rdx = reg_read(ctxt, VCPU_REGS_RDX);
2854 cs.dpl = 3;
2855 ss.dpl = 3;
2856 ops->get_msr(ctxt, MSR_IA32_SYSENTER_CS, &msr_data);
2857 switch (usermode) {
2858 case X86EMUL_MODE_PROT32:
2859 cs_sel = (u16)(msr_data + 16);
2860 if ((msr_data & 0xfffc) == 0x0)
2861 return emulate_gp(ctxt, 0);
2862 ss_sel = (u16)(msr_data + 24);
2863 rcx = (u32)rcx;
2864 rdx = (u32)rdx;
2865 break;
2866 case X86EMUL_MODE_PROT64:
2867 cs_sel = (u16)(msr_data + 32);
2868 if (msr_data == 0x0)
2869 return emulate_gp(ctxt, 0);
2870 ss_sel = cs_sel + 8;
2871 cs.d = 0;
2872 cs.l = 1;
2873 if (emul_is_noncanonical_address(rcx, ctxt) ||
2874 emul_is_noncanonical_address(rdx, ctxt))
2875 return emulate_gp(ctxt, 0);
2876 break;
2878 cs_sel |= SEGMENT_RPL_MASK;
2879 ss_sel |= SEGMENT_RPL_MASK;
2881 ops->set_segment(ctxt, cs_sel, &cs, 0, VCPU_SREG_CS);
2882 ops->set_segment(ctxt, ss_sel, &ss, 0, VCPU_SREG_SS);
2884 ctxt->_eip = rdx;
2885 *reg_write(ctxt, VCPU_REGS_RSP) = rcx;
2887 return X86EMUL_CONTINUE;
2890 static bool emulator_bad_iopl(struct x86_emulate_ctxt *ctxt)
2892 int iopl;
2893 if (ctxt->mode == X86EMUL_MODE_REAL)
2894 return false;
2895 if (ctxt->mode == X86EMUL_MODE_VM86)
2896 return true;
2897 iopl = (ctxt->eflags & X86_EFLAGS_IOPL) >> X86_EFLAGS_IOPL_BIT;
2898 return ctxt->ops->cpl(ctxt) > iopl;
2901 #define VMWARE_PORT_VMPORT (0x5658)
2902 #define VMWARE_PORT_VMRPC (0x5659)
2904 static bool emulator_io_port_access_allowed(struct x86_emulate_ctxt *ctxt,
2905 u16 port, u16 len)
2907 const struct x86_emulate_ops *ops = ctxt->ops;
2908 struct desc_struct tr_seg;
2909 u32 base3;
2910 int r;
2911 u16 tr, io_bitmap_ptr, perm, bit_idx = port & 0x7;
2912 unsigned mask = (1 << len) - 1;
2913 unsigned long base;
2916 * VMware allows access to these ports even if denied
2917 * by TSS I/O permission bitmap. Mimic behavior.
2919 if (enable_vmware_backdoor &&
2920 ((port == VMWARE_PORT_VMPORT) || (port == VMWARE_PORT_VMRPC)))
2921 return true;
2923 ops->get_segment(ctxt, &tr, &tr_seg, &base3, VCPU_SREG_TR);
2924 if (!tr_seg.p)
2925 return false;
2926 if (desc_limit_scaled(&tr_seg) < 103)
2927 return false;
2928 base = get_desc_base(&tr_seg);
2929 #ifdef CONFIG_X86_64
2930 base |= ((u64)base3) << 32;
2931 #endif
2932 r = ops->read_std(ctxt, base + 102, &io_bitmap_ptr, 2, NULL, true);
2933 if (r != X86EMUL_CONTINUE)
2934 return false;
2935 if (io_bitmap_ptr + port/8 > desc_limit_scaled(&tr_seg))
2936 return false;
2937 r = ops->read_std(ctxt, base + io_bitmap_ptr + port/8, &perm, 2, NULL, true);
2938 if (r != X86EMUL_CONTINUE)
2939 return false;
2940 if ((perm >> bit_idx) & mask)
2941 return false;
2942 return true;
2945 static bool emulator_io_permited(struct x86_emulate_ctxt *ctxt,
2946 u16 port, u16 len)
2948 if (ctxt->perm_ok)
2949 return true;
2951 if (emulator_bad_iopl(ctxt))
2952 if (!emulator_io_port_access_allowed(ctxt, port, len))
2953 return false;
2955 ctxt->perm_ok = true;
2957 return true;
2960 static void string_registers_quirk(struct x86_emulate_ctxt *ctxt)
2963 * Intel CPUs mask the counter and pointers in quite strange
2964 * manner when ECX is zero due to REP-string optimizations.
2966 #ifdef CONFIG_X86_64
2967 if (ctxt->ad_bytes != 4 || !vendor_intel(ctxt))
2968 return;
2970 *reg_write(ctxt, VCPU_REGS_RCX) = 0;
2972 switch (ctxt->b) {
2973 case 0xa4: /* movsb */
2974 case 0xa5: /* movsd/w */
2975 *reg_rmw(ctxt, VCPU_REGS_RSI) &= (u32)-1;
2976 /* fall through */
2977 case 0xaa: /* stosb */
2978 case 0xab: /* stosd/w */
2979 *reg_rmw(ctxt, VCPU_REGS_RDI) &= (u32)-1;
2981 #endif
2984 static void save_state_to_tss16(struct x86_emulate_ctxt *ctxt,
2985 struct tss_segment_16 *tss)
2987 tss->ip = ctxt->_eip;
2988 tss->flag = ctxt->eflags;
2989 tss->ax = reg_read(ctxt, VCPU_REGS_RAX);
2990 tss->cx = reg_read(ctxt, VCPU_REGS_RCX);
2991 tss->dx = reg_read(ctxt, VCPU_REGS_RDX);
2992 tss->bx = reg_read(ctxt, VCPU_REGS_RBX);
2993 tss->sp = reg_read(ctxt, VCPU_REGS_RSP);
2994 tss->bp = reg_read(ctxt, VCPU_REGS_RBP);
2995 tss->si = reg_read(ctxt, VCPU_REGS_RSI);
2996 tss->di = reg_read(ctxt, VCPU_REGS_RDI);
2998 tss->es = get_segment_selector(ctxt, VCPU_SREG_ES);
2999 tss->cs = get_segment_selector(ctxt, VCPU_SREG_CS);
3000 tss->ss = get_segment_selector(ctxt, VCPU_SREG_SS);
3001 tss->ds = get_segment_selector(ctxt, VCPU_SREG_DS);
3002 tss->ldt = get_segment_selector(ctxt, VCPU_SREG_LDTR);
3005 static int load_state_from_tss16(struct x86_emulate_ctxt *ctxt,
3006 struct tss_segment_16 *tss)
3008 int ret;
3009 u8 cpl;
3011 ctxt->_eip = tss->ip;
3012 ctxt->eflags = tss->flag | 2;
3013 *reg_write(ctxt, VCPU_REGS_RAX) = tss->ax;
3014 *reg_write(ctxt, VCPU_REGS_RCX) = tss->cx;
3015 *reg_write(ctxt, VCPU_REGS_RDX) = tss->dx;
3016 *reg_write(ctxt, VCPU_REGS_RBX) = tss->bx;
3017 *reg_write(ctxt, VCPU_REGS_RSP) = tss->sp;
3018 *reg_write(ctxt, VCPU_REGS_RBP) = tss->bp;
3019 *reg_write(ctxt, VCPU_REGS_RSI) = tss->si;
3020 *reg_write(ctxt, VCPU_REGS_RDI) = tss->di;
3023 * SDM says that segment selectors are loaded before segment
3024 * descriptors
3026 set_segment_selector(ctxt, tss->ldt, VCPU_SREG_LDTR);
3027 set_segment_selector(ctxt, tss->es, VCPU_SREG_ES);
3028 set_segment_selector(ctxt, tss->cs, VCPU_SREG_CS);
3029 set_segment_selector(ctxt, tss->ss, VCPU_SREG_SS);
3030 set_segment_selector(ctxt, tss->ds, VCPU_SREG_DS);
3032 cpl = tss->cs & 3;
3035 * Now load segment descriptors. If fault happens at this stage
3036 * it is handled in a context of new task
3038 ret = __load_segment_descriptor(ctxt, tss->ldt, VCPU_SREG_LDTR, cpl,
3039 X86_TRANSFER_TASK_SWITCH, NULL);
3040 if (ret != X86EMUL_CONTINUE)
3041 return ret;
3042 ret = __load_segment_descriptor(ctxt, tss->es, VCPU_SREG_ES, cpl,
3043 X86_TRANSFER_TASK_SWITCH, NULL);
3044 if (ret != X86EMUL_CONTINUE)
3045 return ret;
3046 ret = __load_segment_descriptor(ctxt, tss->cs, VCPU_SREG_CS, cpl,
3047 X86_TRANSFER_TASK_SWITCH, NULL);
3048 if (ret != X86EMUL_CONTINUE)
3049 return ret;
3050 ret = __load_segment_descriptor(ctxt, tss->ss, VCPU_SREG_SS, cpl,
3051 X86_TRANSFER_TASK_SWITCH, NULL);
3052 if (ret != X86EMUL_CONTINUE)
3053 return ret;
3054 ret = __load_segment_descriptor(ctxt, tss->ds, VCPU_SREG_DS, cpl,
3055 X86_TRANSFER_TASK_SWITCH, NULL);
3056 if (ret != X86EMUL_CONTINUE)
3057 return ret;
3059 return X86EMUL_CONTINUE;
3062 static int task_switch_16(struct x86_emulate_ctxt *ctxt,
3063 u16 tss_selector, u16 old_tss_sel,
3064 ulong old_tss_base, struct desc_struct *new_desc)
3066 struct tss_segment_16 tss_seg;
3067 int ret;
3068 u32 new_tss_base = get_desc_base(new_desc);
3070 ret = linear_read_system(ctxt, old_tss_base, &tss_seg, sizeof tss_seg);
3071 if (ret != X86EMUL_CONTINUE)
3072 return ret;
3074 save_state_to_tss16(ctxt, &tss_seg);
3076 ret = linear_write_system(ctxt, old_tss_base, &tss_seg, sizeof tss_seg);
3077 if (ret != X86EMUL_CONTINUE)
3078 return ret;
3080 ret = linear_read_system(ctxt, new_tss_base, &tss_seg, sizeof tss_seg);
3081 if (ret != X86EMUL_CONTINUE)
3082 return ret;
3084 if (old_tss_sel != 0xffff) {
3085 tss_seg.prev_task_link = old_tss_sel;
3087 ret = linear_write_system(ctxt, new_tss_base,
3088 &tss_seg.prev_task_link,
3089 sizeof tss_seg.prev_task_link);
3090 if (ret != X86EMUL_CONTINUE)
3091 return ret;
3094 return load_state_from_tss16(ctxt, &tss_seg);
3097 static void save_state_to_tss32(struct x86_emulate_ctxt *ctxt,
3098 struct tss_segment_32 *tss)
3100 /* CR3 and ldt selector are not saved intentionally */
3101 tss->eip = ctxt->_eip;
3102 tss->eflags = ctxt->eflags;
3103 tss->eax = reg_read(ctxt, VCPU_REGS_RAX);
3104 tss->ecx = reg_read(ctxt, VCPU_REGS_RCX);
3105 tss->edx = reg_read(ctxt, VCPU_REGS_RDX);
3106 tss->ebx = reg_read(ctxt, VCPU_REGS_RBX);
3107 tss->esp = reg_read(ctxt, VCPU_REGS_RSP);
3108 tss->ebp = reg_read(ctxt, VCPU_REGS_RBP);
3109 tss->esi = reg_read(ctxt, VCPU_REGS_RSI);
3110 tss->edi = reg_read(ctxt, VCPU_REGS_RDI);
3112 tss->es = get_segment_selector(ctxt, VCPU_SREG_ES);
3113 tss->cs = get_segment_selector(ctxt, VCPU_SREG_CS);
3114 tss->ss = get_segment_selector(ctxt, VCPU_SREG_SS);
3115 tss->ds = get_segment_selector(ctxt, VCPU_SREG_DS);
3116 tss->fs = get_segment_selector(ctxt, VCPU_SREG_FS);
3117 tss->gs = get_segment_selector(ctxt, VCPU_SREG_GS);
3120 static int load_state_from_tss32(struct x86_emulate_ctxt *ctxt,
3121 struct tss_segment_32 *tss)
3123 int ret;
3124 u8 cpl;
3126 if (ctxt->ops->set_cr(ctxt, 3, tss->cr3))
3127 return emulate_gp(ctxt, 0);
3128 ctxt->_eip = tss->eip;
3129 ctxt->eflags = tss->eflags | 2;
3131 /* General purpose registers */
3132 *reg_write(ctxt, VCPU_REGS_RAX) = tss->eax;
3133 *reg_write(ctxt, VCPU_REGS_RCX) = tss->ecx;
3134 *reg_write(ctxt, VCPU_REGS_RDX) = tss->edx;
3135 *reg_write(ctxt, VCPU_REGS_RBX) = tss->ebx;
3136 *reg_write(ctxt, VCPU_REGS_RSP) = tss->esp;
3137 *reg_write(ctxt, VCPU_REGS_RBP) = tss->ebp;
3138 *reg_write(ctxt, VCPU_REGS_RSI) = tss->esi;
3139 *reg_write(ctxt, VCPU_REGS_RDI) = tss->edi;
3142 * SDM says that segment selectors are loaded before segment
3143 * descriptors. This is important because CPL checks will
3144 * use CS.RPL.
3146 set_segment_selector(ctxt, tss->ldt_selector, VCPU_SREG_LDTR);
3147 set_segment_selector(ctxt, tss->es, VCPU_SREG_ES);
3148 set_segment_selector(ctxt, tss->cs, VCPU_SREG_CS);
3149 set_segment_selector(ctxt, tss->ss, VCPU_SREG_SS);
3150 set_segment_selector(ctxt, tss->ds, VCPU_SREG_DS);
3151 set_segment_selector(ctxt, tss->fs, VCPU_SREG_FS);
3152 set_segment_selector(ctxt, tss->gs, VCPU_SREG_GS);
3155 * If we're switching between Protected Mode and VM86, we need to make
3156 * sure to update the mode before loading the segment descriptors so
3157 * that the selectors are interpreted correctly.
3159 if (ctxt->eflags & X86_EFLAGS_VM) {
3160 ctxt->mode = X86EMUL_MODE_VM86;
3161 cpl = 3;
3162 } else {
3163 ctxt->mode = X86EMUL_MODE_PROT32;
3164 cpl = tss->cs & 3;
3168 * Now load segment descriptors. If fault happenes at this stage
3169 * it is handled in a context of new task
3171 ret = __load_segment_descriptor(ctxt, tss->ldt_selector, VCPU_SREG_LDTR,
3172 cpl, X86_TRANSFER_TASK_SWITCH, NULL);
3173 if (ret != X86EMUL_CONTINUE)
3174 return ret;
3175 ret = __load_segment_descriptor(ctxt, tss->es, VCPU_SREG_ES, cpl,
3176 X86_TRANSFER_TASK_SWITCH, NULL);
3177 if (ret != X86EMUL_CONTINUE)
3178 return ret;
3179 ret = __load_segment_descriptor(ctxt, tss->cs, VCPU_SREG_CS, cpl,
3180 X86_TRANSFER_TASK_SWITCH, NULL);
3181 if (ret != X86EMUL_CONTINUE)
3182 return ret;
3183 ret = __load_segment_descriptor(ctxt, tss->ss, VCPU_SREG_SS, cpl,
3184 X86_TRANSFER_TASK_SWITCH, NULL);
3185 if (ret != X86EMUL_CONTINUE)
3186 return ret;
3187 ret = __load_segment_descriptor(ctxt, tss->ds, VCPU_SREG_DS, cpl,
3188 X86_TRANSFER_TASK_SWITCH, NULL);
3189 if (ret != X86EMUL_CONTINUE)
3190 return ret;
3191 ret = __load_segment_descriptor(ctxt, tss->fs, VCPU_SREG_FS, cpl,
3192 X86_TRANSFER_TASK_SWITCH, NULL);
3193 if (ret != X86EMUL_CONTINUE)
3194 return ret;
3195 ret = __load_segment_descriptor(ctxt, tss->gs, VCPU_SREG_GS, cpl,
3196 X86_TRANSFER_TASK_SWITCH, NULL);
3198 return ret;
3201 static int task_switch_32(struct x86_emulate_ctxt *ctxt,
3202 u16 tss_selector, u16 old_tss_sel,
3203 ulong old_tss_base, struct desc_struct *new_desc)
3205 struct tss_segment_32 tss_seg;
3206 int ret;
3207 u32 new_tss_base = get_desc_base(new_desc);
3208 u32 eip_offset = offsetof(struct tss_segment_32, eip);
3209 u32 ldt_sel_offset = offsetof(struct tss_segment_32, ldt_selector);
3211 ret = linear_read_system(ctxt, old_tss_base, &tss_seg, sizeof tss_seg);
3212 if (ret != X86EMUL_CONTINUE)
3213 return ret;
3215 save_state_to_tss32(ctxt, &tss_seg);
3217 /* Only GP registers and segment selectors are saved */
3218 ret = linear_write_system(ctxt, old_tss_base + eip_offset, &tss_seg.eip,
3219 ldt_sel_offset - eip_offset);
3220 if (ret != X86EMUL_CONTINUE)
3221 return ret;
3223 ret = linear_read_system(ctxt, new_tss_base, &tss_seg, sizeof tss_seg);
3224 if (ret != X86EMUL_CONTINUE)
3225 return ret;
3227 if (old_tss_sel != 0xffff) {
3228 tss_seg.prev_task_link = old_tss_sel;
3230 ret = linear_write_system(ctxt, new_tss_base,
3231 &tss_seg.prev_task_link,
3232 sizeof tss_seg.prev_task_link);
3233 if (ret != X86EMUL_CONTINUE)
3234 return ret;
3237 return load_state_from_tss32(ctxt, &tss_seg);
3240 static int emulator_do_task_switch(struct x86_emulate_ctxt *ctxt,
3241 u16 tss_selector, int idt_index, int reason,
3242 bool has_error_code, u32 error_code)
3244 const struct x86_emulate_ops *ops = ctxt->ops;
3245 struct desc_struct curr_tss_desc, next_tss_desc;
3246 int ret;
3247 u16 old_tss_sel = get_segment_selector(ctxt, VCPU_SREG_TR);
3248 ulong old_tss_base =
3249 ops->get_cached_segment_base(ctxt, VCPU_SREG_TR);
3250 u32 desc_limit;
3251 ulong desc_addr, dr7;
3253 /* FIXME: old_tss_base == ~0 ? */
3255 ret = read_segment_descriptor(ctxt, tss_selector, &next_tss_desc, &desc_addr);
3256 if (ret != X86EMUL_CONTINUE)
3257 return ret;
3258 ret = read_segment_descriptor(ctxt, old_tss_sel, &curr_tss_desc, &desc_addr);
3259 if (ret != X86EMUL_CONTINUE)
3260 return ret;
3262 /* FIXME: check that next_tss_desc is tss */
3265 * Check privileges. The three cases are task switch caused by...
3267 * 1. jmp/call/int to task gate: Check against DPL of the task gate
3268 * 2. Exception/IRQ/iret: No check is performed
3269 * 3. jmp/call to TSS/task-gate: No check is performed since the
3270 * hardware checks it before exiting.
3272 if (reason == TASK_SWITCH_GATE) {
3273 if (idt_index != -1) {
3274 /* Software interrupts */
3275 struct desc_struct task_gate_desc;
3276 int dpl;
3278 ret = read_interrupt_descriptor(ctxt, idt_index,
3279 &task_gate_desc);
3280 if (ret != X86EMUL_CONTINUE)
3281 return ret;
3283 dpl = task_gate_desc.dpl;
3284 if ((tss_selector & 3) > dpl || ops->cpl(ctxt) > dpl)
3285 return emulate_gp(ctxt, (idt_index << 3) | 0x2);
3289 desc_limit = desc_limit_scaled(&next_tss_desc);
3290 if (!next_tss_desc.p ||
3291 ((desc_limit < 0x67 && (next_tss_desc.type & 8)) ||
3292 desc_limit < 0x2b)) {
3293 return emulate_ts(ctxt, tss_selector & 0xfffc);
3296 if (reason == TASK_SWITCH_IRET || reason == TASK_SWITCH_JMP) {
3297 curr_tss_desc.type &= ~(1 << 1); /* clear busy flag */
3298 write_segment_descriptor(ctxt, old_tss_sel, &curr_tss_desc);
3301 if (reason == TASK_SWITCH_IRET)
3302 ctxt->eflags = ctxt->eflags & ~X86_EFLAGS_NT;
3304 /* set back link to prev task only if NT bit is set in eflags
3305 note that old_tss_sel is not used after this point */
3306 if (reason != TASK_SWITCH_CALL && reason != TASK_SWITCH_GATE)
3307 old_tss_sel = 0xffff;
3309 if (next_tss_desc.type & 8)
3310 ret = task_switch_32(ctxt, tss_selector, old_tss_sel,
3311 old_tss_base, &next_tss_desc);
3312 else
3313 ret = task_switch_16(ctxt, tss_selector, old_tss_sel,
3314 old_tss_base, &next_tss_desc);
3315 if (ret != X86EMUL_CONTINUE)
3316 return ret;
3318 if (reason == TASK_SWITCH_CALL || reason == TASK_SWITCH_GATE)
3319 ctxt->eflags = ctxt->eflags | X86_EFLAGS_NT;
3321 if (reason != TASK_SWITCH_IRET) {
3322 next_tss_desc.type |= (1 << 1); /* set busy flag */
3323 write_segment_descriptor(ctxt, tss_selector, &next_tss_desc);
3326 ops->set_cr(ctxt, 0, ops->get_cr(ctxt, 0) | X86_CR0_TS);
3327 ops->set_segment(ctxt, tss_selector, &next_tss_desc, 0, VCPU_SREG_TR);
3329 if (has_error_code) {
3330 ctxt->op_bytes = ctxt->ad_bytes = (next_tss_desc.type & 8) ? 4 : 2;
3331 ctxt->lock_prefix = 0;
3332 ctxt->src.val = (unsigned long) error_code;
3333 ret = em_push(ctxt);
3336 ops->get_dr(ctxt, 7, &dr7);
3337 ops->set_dr(ctxt, 7, dr7 & ~(DR_LOCAL_ENABLE_MASK | DR_LOCAL_SLOWDOWN));
3339 return ret;
3342 int emulator_task_switch(struct x86_emulate_ctxt *ctxt,
3343 u16 tss_selector, int idt_index, int reason,
3344 bool has_error_code, u32 error_code)
3346 int rc;
3348 invalidate_registers(ctxt);
3349 ctxt->_eip = ctxt->eip;
3350 ctxt->dst.type = OP_NONE;
3352 rc = emulator_do_task_switch(ctxt, tss_selector, idt_index, reason,
3353 has_error_code, error_code);
3355 if (rc == X86EMUL_CONTINUE) {
3356 ctxt->eip = ctxt->_eip;
3357 writeback_registers(ctxt);
3360 return (rc == X86EMUL_UNHANDLEABLE) ? EMULATION_FAILED : EMULATION_OK;
3363 static void string_addr_inc(struct x86_emulate_ctxt *ctxt, int reg,
3364 struct operand *op)
3366 int df = (ctxt->eflags & X86_EFLAGS_DF) ? -op->count : op->count;
3368 register_address_increment(ctxt, reg, df * op->bytes);
3369 op->addr.mem.ea = register_address(ctxt, reg);
3372 static int em_das(struct x86_emulate_ctxt *ctxt)
3374 u8 al, old_al;
3375 bool af, cf, old_cf;
3377 cf = ctxt->eflags & X86_EFLAGS_CF;
3378 al = ctxt->dst.val;
3380 old_al = al;
3381 old_cf = cf;
3382 cf = false;
3383 af = ctxt->eflags & X86_EFLAGS_AF;
3384 if ((al & 0x0f) > 9 || af) {
3385 al -= 6;
3386 cf = old_cf | (al >= 250);
3387 af = true;
3388 } else {
3389 af = false;
3391 if (old_al > 0x99 || old_cf) {
3392 al -= 0x60;
3393 cf = true;
3396 ctxt->dst.val = al;
3397 /* Set PF, ZF, SF */
3398 ctxt->src.type = OP_IMM;
3399 ctxt->src.val = 0;
3400 ctxt->src.bytes = 1;
3401 fastop(ctxt, em_or);
3402 ctxt->eflags &= ~(X86_EFLAGS_AF | X86_EFLAGS_CF);
3403 if (cf)
3404 ctxt->eflags |= X86_EFLAGS_CF;
3405 if (af)
3406 ctxt->eflags |= X86_EFLAGS_AF;
3407 return X86EMUL_CONTINUE;
3410 static int em_aam(struct x86_emulate_ctxt *ctxt)
3412 u8 al, ah;
3414 if (ctxt->src.val == 0)
3415 return emulate_de(ctxt);
3417 al = ctxt->dst.val & 0xff;
3418 ah = al / ctxt->src.val;
3419 al %= ctxt->src.val;
3421 ctxt->dst.val = (ctxt->dst.val & 0xffff0000) | al | (ah << 8);
3423 /* Set PF, ZF, SF */
3424 ctxt->src.type = OP_IMM;
3425 ctxt->src.val = 0;
3426 ctxt->src.bytes = 1;
3427 fastop(ctxt, em_or);
3429 return X86EMUL_CONTINUE;
3432 static int em_aad(struct x86_emulate_ctxt *ctxt)
3434 u8 al = ctxt->dst.val & 0xff;
3435 u8 ah = (ctxt->dst.val >> 8) & 0xff;
3437 al = (al + (ah * ctxt->src.val)) & 0xff;
3439 ctxt->dst.val = (ctxt->dst.val & 0xffff0000) | al;
3441 /* Set PF, ZF, SF */
3442 ctxt->src.type = OP_IMM;
3443 ctxt->src.val = 0;
3444 ctxt->src.bytes = 1;
3445 fastop(ctxt, em_or);
3447 return X86EMUL_CONTINUE;
3450 static int em_call(struct x86_emulate_ctxt *ctxt)
3452 int rc;
3453 long rel = ctxt->src.val;
3455 ctxt->src.val = (unsigned long)ctxt->_eip;
3456 rc = jmp_rel(ctxt, rel);
3457 if (rc != X86EMUL_CONTINUE)
3458 return rc;
3459 return em_push(ctxt);
3462 static int em_call_far(struct x86_emulate_ctxt *ctxt)
3464 u16 sel, old_cs;
3465 ulong old_eip;
3466 int rc;
3467 struct desc_struct old_desc, new_desc;
3468 const struct x86_emulate_ops *ops = ctxt->ops;
3469 int cpl = ctxt->ops->cpl(ctxt);
3470 enum x86emul_mode prev_mode = ctxt->mode;
3472 old_eip = ctxt->_eip;
3473 ops->get_segment(ctxt, &old_cs, &old_desc, NULL, VCPU_SREG_CS);
3475 memcpy(&sel, ctxt->src.valptr + ctxt->op_bytes, 2);
3476 rc = __load_segment_descriptor(ctxt, sel, VCPU_SREG_CS, cpl,
3477 X86_TRANSFER_CALL_JMP, &new_desc);
3478 if (rc != X86EMUL_CONTINUE)
3479 return rc;
3481 rc = assign_eip_far(ctxt, ctxt->src.val, &new_desc);
3482 if (rc != X86EMUL_CONTINUE)
3483 goto fail;
3485 ctxt->src.val = old_cs;
3486 rc = em_push(ctxt);
3487 if (rc != X86EMUL_CONTINUE)
3488 goto fail;
3490 ctxt->src.val = old_eip;
3491 rc = em_push(ctxt);
3492 /* If we failed, we tainted the memory, but the very least we should
3493 restore cs */
3494 if (rc != X86EMUL_CONTINUE) {
3495 pr_warn_once("faulting far call emulation tainted memory\n");
3496 goto fail;
3498 return rc;
3499 fail:
3500 ops->set_segment(ctxt, old_cs, &old_desc, 0, VCPU_SREG_CS);
3501 ctxt->mode = prev_mode;
3502 return rc;
3506 static int em_ret_near_imm(struct x86_emulate_ctxt *ctxt)
3508 int rc;
3509 unsigned long eip;
3511 rc = emulate_pop(ctxt, &eip, ctxt->op_bytes);
3512 if (rc != X86EMUL_CONTINUE)
3513 return rc;
3514 rc = assign_eip_near(ctxt, eip);
3515 if (rc != X86EMUL_CONTINUE)
3516 return rc;
3517 rsp_increment(ctxt, ctxt->src.val);
3518 return X86EMUL_CONTINUE;
3521 static int em_xchg(struct x86_emulate_ctxt *ctxt)
3523 /* Write back the register source. */
3524 ctxt->src.val = ctxt->dst.val;
3525 write_register_operand(&ctxt->src);
3527 /* Write back the memory destination with implicit LOCK prefix. */
3528 ctxt->dst.val = ctxt->src.orig_val;
3529 ctxt->lock_prefix = 1;
3530 return X86EMUL_CONTINUE;
3533 static int em_imul_3op(struct x86_emulate_ctxt *ctxt)
3535 ctxt->dst.val = ctxt->src2.val;
3536 return fastop(ctxt, em_imul);
3539 static int em_cwd(struct x86_emulate_ctxt *ctxt)
3541 ctxt->dst.type = OP_REG;
3542 ctxt->dst.bytes = ctxt->src.bytes;
3543 ctxt->dst.addr.reg = reg_rmw(ctxt, VCPU_REGS_RDX);
3544 ctxt->dst.val = ~((ctxt->src.val >> (ctxt->src.bytes * 8 - 1)) - 1);
3546 return X86EMUL_CONTINUE;
3549 static int em_rdpid(struct x86_emulate_ctxt *ctxt)
3551 u64 tsc_aux = 0;
3553 if (ctxt->ops->get_msr(ctxt, MSR_TSC_AUX, &tsc_aux))
3554 return emulate_gp(ctxt, 0);
3555 ctxt->dst.val = tsc_aux;
3556 return X86EMUL_CONTINUE;
3559 static int em_rdtsc(struct x86_emulate_ctxt *ctxt)
3561 u64 tsc = 0;
3563 ctxt->ops->get_msr(ctxt, MSR_IA32_TSC, &tsc);
3564 *reg_write(ctxt, VCPU_REGS_RAX) = (u32)tsc;
3565 *reg_write(ctxt, VCPU_REGS_RDX) = tsc >> 32;
3566 return X86EMUL_CONTINUE;
3569 static int em_rdpmc(struct x86_emulate_ctxt *ctxt)
3571 u64 pmc;
3573 if (ctxt->ops->read_pmc(ctxt, reg_read(ctxt, VCPU_REGS_RCX), &pmc))
3574 return emulate_gp(ctxt, 0);
3575 *reg_write(ctxt, VCPU_REGS_RAX) = (u32)pmc;
3576 *reg_write(ctxt, VCPU_REGS_RDX) = pmc >> 32;
3577 return X86EMUL_CONTINUE;
3580 static int em_mov(struct x86_emulate_ctxt *ctxt)
3582 memcpy(ctxt->dst.valptr, ctxt->src.valptr, sizeof(ctxt->src.valptr));
3583 return X86EMUL_CONTINUE;
3586 #define FFL(x) bit(X86_FEATURE_##x)
3588 static int em_movbe(struct x86_emulate_ctxt *ctxt)
3590 u32 ebx, ecx, edx, eax = 1;
3591 u16 tmp;
3594 * Check MOVBE is set in the guest-visible CPUID leaf.
3596 ctxt->ops->get_cpuid(ctxt, &eax, &ebx, &ecx, &edx, false);
3597 if (!(ecx & FFL(MOVBE)))
3598 return emulate_ud(ctxt);
3600 switch (ctxt->op_bytes) {
3601 case 2:
3603 * From MOVBE definition: "...When the operand size is 16 bits,
3604 * the upper word of the destination register remains unchanged
3605 * ..."
3607 * Both casting ->valptr and ->val to u16 breaks strict aliasing
3608 * rules so we have to do the operation almost per hand.
3610 tmp = (u16)ctxt->src.val;
3611 ctxt->dst.val &= ~0xffffUL;
3612 ctxt->dst.val |= (unsigned long)swab16(tmp);
3613 break;
3614 case 4:
3615 ctxt->dst.val = swab32((u32)ctxt->src.val);
3616 break;
3617 case 8:
3618 ctxt->dst.val = swab64(ctxt->src.val);
3619 break;
3620 default:
3621 BUG();
3623 return X86EMUL_CONTINUE;
3626 static int em_cr_write(struct x86_emulate_ctxt *ctxt)
3628 if (ctxt->ops->set_cr(ctxt, ctxt->modrm_reg, ctxt->src.val))
3629 return emulate_gp(ctxt, 0);
3631 /* Disable writeback. */
3632 ctxt->dst.type = OP_NONE;
3633 return X86EMUL_CONTINUE;
3636 static int em_dr_write(struct x86_emulate_ctxt *ctxt)
3638 unsigned long val;
3640 if (ctxt->mode == X86EMUL_MODE_PROT64)
3641 val = ctxt->src.val & ~0ULL;
3642 else
3643 val = ctxt->src.val & ~0U;
3645 /* #UD condition is already handled. */
3646 if (ctxt->ops->set_dr(ctxt, ctxt->modrm_reg, val) < 0)
3647 return emulate_gp(ctxt, 0);
3649 /* Disable writeback. */
3650 ctxt->dst.type = OP_NONE;
3651 return X86EMUL_CONTINUE;
3654 static int em_wrmsr(struct x86_emulate_ctxt *ctxt)
3656 u64 msr_data;
3658 msr_data = (u32)reg_read(ctxt, VCPU_REGS_RAX)
3659 | ((u64)reg_read(ctxt, VCPU_REGS_RDX) << 32);
3660 if (ctxt->ops->set_msr(ctxt, reg_read(ctxt, VCPU_REGS_RCX), msr_data))
3661 return emulate_gp(ctxt, 0);
3663 return X86EMUL_CONTINUE;
3666 static int em_rdmsr(struct x86_emulate_ctxt *ctxt)
3668 u64 msr_data;
3670 if (ctxt->ops->get_msr(ctxt, reg_read(ctxt, VCPU_REGS_RCX), &msr_data))
3671 return emulate_gp(ctxt, 0);
3673 *reg_write(ctxt, VCPU_REGS_RAX) = (u32)msr_data;
3674 *reg_write(ctxt, VCPU_REGS_RDX) = msr_data >> 32;
3675 return X86EMUL_CONTINUE;
3678 static int em_store_sreg(struct x86_emulate_ctxt *ctxt, int segment)
3680 if (segment > VCPU_SREG_GS &&
3681 (ctxt->ops->get_cr(ctxt, 4) & X86_CR4_UMIP) &&
3682 ctxt->ops->cpl(ctxt) > 0)
3683 return emulate_gp(ctxt, 0);
3685 ctxt->dst.val = get_segment_selector(ctxt, segment);
3686 if (ctxt->dst.bytes == 4 && ctxt->dst.type == OP_MEM)
3687 ctxt->dst.bytes = 2;
3688 return X86EMUL_CONTINUE;
3691 static int em_mov_rm_sreg(struct x86_emulate_ctxt *ctxt)
3693 if (ctxt->modrm_reg > VCPU_SREG_GS)
3694 return emulate_ud(ctxt);
3696 return em_store_sreg(ctxt, ctxt->modrm_reg);
3699 static int em_mov_sreg_rm(struct x86_emulate_ctxt *ctxt)
3701 u16 sel = ctxt->src.val;
3703 if (ctxt->modrm_reg == VCPU_SREG_CS || ctxt->modrm_reg > VCPU_SREG_GS)
3704 return emulate_ud(ctxt);
3706 if (ctxt->modrm_reg == VCPU_SREG_SS)
3707 ctxt->interruptibility = KVM_X86_SHADOW_INT_MOV_SS;
3709 /* Disable writeback. */
3710 ctxt->dst.type = OP_NONE;
3711 return load_segment_descriptor(ctxt, sel, ctxt->modrm_reg);
3714 static int em_sldt(struct x86_emulate_ctxt *ctxt)
3716 return em_store_sreg(ctxt, VCPU_SREG_LDTR);
3719 static int em_lldt(struct x86_emulate_ctxt *ctxt)
3721 u16 sel = ctxt->src.val;
3723 /* Disable writeback. */
3724 ctxt->dst.type = OP_NONE;
3725 return load_segment_descriptor(ctxt, sel, VCPU_SREG_LDTR);
3728 static int em_str(struct x86_emulate_ctxt *ctxt)
3730 return em_store_sreg(ctxt, VCPU_SREG_TR);
3733 static int em_ltr(struct x86_emulate_ctxt *ctxt)
3735 u16 sel = ctxt->src.val;
3737 /* Disable writeback. */
3738 ctxt->dst.type = OP_NONE;
3739 return load_segment_descriptor(ctxt, sel, VCPU_SREG_TR);
3742 static int em_invlpg(struct x86_emulate_ctxt *ctxt)
3744 int rc;
3745 ulong linear;
3747 rc = linearize(ctxt, ctxt->src.addr.mem, 1, false, &linear);
3748 if (rc == X86EMUL_CONTINUE)
3749 ctxt->ops->invlpg(ctxt, linear);
3750 /* Disable writeback. */
3751 ctxt->dst.type = OP_NONE;
3752 return X86EMUL_CONTINUE;
3755 static int em_clts(struct x86_emulate_ctxt *ctxt)
3757 ulong cr0;
3759 cr0 = ctxt->ops->get_cr(ctxt, 0);
3760 cr0 &= ~X86_CR0_TS;
3761 ctxt->ops->set_cr(ctxt, 0, cr0);
3762 return X86EMUL_CONTINUE;
3765 static int em_hypercall(struct x86_emulate_ctxt *ctxt)
3767 int rc = ctxt->ops->fix_hypercall(ctxt);
3769 if (rc != X86EMUL_CONTINUE)
3770 return rc;
3772 /* Let the processor re-execute the fixed hypercall */
3773 ctxt->_eip = ctxt->eip;
3774 /* Disable writeback. */
3775 ctxt->dst.type = OP_NONE;
3776 return X86EMUL_CONTINUE;
3779 static int emulate_store_desc_ptr(struct x86_emulate_ctxt *ctxt,
3780 void (*get)(struct x86_emulate_ctxt *ctxt,
3781 struct desc_ptr *ptr))
3783 struct desc_ptr desc_ptr;
3785 if ((ctxt->ops->get_cr(ctxt, 4) & X86_CR4_UMIP) &&
3786 ctxt->ops->cpl(ctxt) > 0)
3787 return emulate_gp(ctxt, 0);
3789 if (ctxt->mode == X86EMUL_MODE_PROT64)
3790 ctxt->op_bytes = 8;
3791 get(ctxt, &desc_ptr);
3792 if (ctxt->op_bytes == 2) {
3793 ctxt->op_bytes = 4;
3794 desc_ptr.address &= 0x00ffffff;
3796 /* Disable writeback. */
3797 ctxt->dst.type = OP_NONE;
3798 return segmented_write_std(ctxt, ctxt->dst.addr.mem,
3799 &desc_ptr, 2 + ctxt->op_bytes);
3802 static int em_sgdt(struct x86_emulate_ctxt *ctxt)
3804 return emulate_store_desc_ptr(ctxt, ctxt->ops->get_gdt);
3807 static int em_sidt(struct x86_emulate_ctxt *ctxt)
3809 return emulate_store_desc_ptr(ctxt, ctxt->ops->get_idt);
3812 static int em_lgdt_lidt(struct x86_emulate_ctxt *ctxt, bool lgdt)
3814 struct desc_ptr desc_ptr;
3815 int rc;
3817 if (ctxt->mode == X86EMUL_MODE_PROT64)
3818 ctxt->op_bytes = 8;
3819 rc = read_descriptor(ctxt, ctxt->src.addr.mem,
3820 &desc_ptr.size, &desc_ptr.address,
3821 ctxt->op_bytes);
3822 if (rc != X86EMUL_CONTINUE)
3823 return rc;
3824 if (ctxt->mode == X86EMUL_MODE_PROT64 &&
3825 emul_is_noncanonical_address(desc_ptr.address, ctxt))
3826 return emulate_gp(ctxt, 0);
3827 if (lgdt)
3828 ctxt->ops->set_gdt(ctxt, &desc_ptr);
3829 else
3830 ctxt->ops->set_idt(ctxt, &desc_ptr);
3831 /* Disable writeback. */
3832 ctxt->dst.type = OP_NONE;
3833 return X86EMUL_CONTINUE;
3836 static int em_lgdt(struct x86_emulate_ctxt *ctxt)
3838 return em_lgdt_lidt(ctxt, true);
3841 static int em_lidt(struct x86_emulate_ctxt *ctxt)
3843 return em_lgdt_lidt(ctxt, false);
3846 static int em_smsw(struct x86_emulate_ctxt *ctxt)
3848 if ((ctxt->ops->get_cr(ctxt, 4) & X86_CR4_UMIP) &&
3849 ctxt->ops->cpl(ctxt) > 0)
3850 return emulate_gp(ctxt, 0);
3852 if (ctxt->dst.type == OP_MEM)
3853 ctxt->dst.bytes = 2;
3854 ctxt->dst.val = ctxt->ops->get_cr(ctxt, 0);
3855 return X86EMUL_CONTINUE;
3858 static int em_lmsw(struct x86_emulate_ctxt *ctxt)
3860 ctxt->ops->set_cr(ctxt, 0, (ctxt->ops->get_cr(ctxt, 0) & ~0x0eul)
3861 | (ctxt->src.val & 0x0f));
3862 ctxt->dst.type = OP_NONE;
3863 return X86EMUL_CONTINUE;
3866 static int em_loop(struct x86_emulate_ctxt *ctxt)
3868 int rc = X86EMUL_CONTINUE;
3870 register_address_increment(ctxt, VCPU_REGS_RCX, -1);
3871 if ((address_mask(ctxt, reg_read(ctxt, VCPU_REGS_RCX)) != 0) &&
3872 (ctxt->b == 0xe2 || test_cc(ctxt->b ^ 0x5, ctxt->eflags)))
3873 rc = jmp_rel(ctxt, ctxt->src.val);
3875 return rc;
3878 static int em_jcxz(struct x86_emulate_ctxt *ctxt)
3880 int rc = X86EMUL_CONTINUE;
3882 if (address_mask(ctxt, reg_read(ctxt, VCPU_REGS_RCX)) == 0)
3883 rc = jmp_rel(ctxt, ctxt->src.val);
3885 return rc;
3888 static int em_in(struct x86_emulate_ctxt *ctxt)
3890 if (!pio_in_emulated(ctxt, ctxt->dst.bytes, ctxt->src.val,
3891 &ctxt->dst.val))
3892 return X86EMUL_IO_NEEDED;
3894 return X86EMUL_CONTINUE;
3897 static int em_out(struct x86_emulate_ctxt *ctxt)
3899 ctxt->ops->pio_out_emulated(ctxt, ctxt->src.bytes, ctxt->dst.val,
3900 &ctxt->src.val, 1);
3901 /* Disable writeback. */
3902 ctxt->dst.type = OP_NONE;
3903 return X86EMUL_CONTINUE;
3906 static int em_cli(struct x86_emulate_ctxt *ctxt)
3908 if (emulator_bad_iopl(ctxt))
3909 return emulate_gp(ctxt, 0);
3911 ctxt->eflags &= ~X86_EFLAGS_IF;
3912 return X86EMUL_CONTINUE;
3915 static int em_sti(struct x86_emulate_ctxt *ctxt)
3917 if (emulator_bad_iopl(ctxt))
3918 return emulate_gp(ctxt, 0);
3920 ctxt->interruptibility = KVM_X86_SHADOW_INT_STI;
3921 ctxt->eflags |= X86_EFLAGS_IF;
3922 return X86EMUL_CONTINUE;
3925 static int em_cpuid(struct x86_emulate_ctxt *ctxt)
3927 u32 eax, ebx, ecx, edx;
3928 u64 msr = 0;
3930 ctxt->ops->get_msr(ctxt, MSR_MISC_FEATURES_ENABLES, &msr);
3931 if (msr & MSR_MISC_FEATURES_ENABLES_CPUID_FAULT &&
3932 ctxt->ops->cpl(ctxt)) {
3933 return emulate_gp(ctxt, 0);
3936 eax = reg_read(ctxt, VCPU_REGS_RAX);
3937 ecx = reg_read(ctxt, VCPU_REGS_RCX);
3938 ctxt->ops->get_cpuid(ctxt, &eax, &ebx, &ecx, &edx, true);
3939 *reg_write(ctxt, VCPU_REGS_RAX) = eax;
3940 *reg_write(ctxt, VCPU_REGS_RBX) = ebx;
3941 *reg_write(ctxt, VCPU_REGS_RCX) = ecx;
3942 *reg_write(ctxt, VCPU_REGS_RDX) = edx;
3943 return X86EMUL_CONTINUE;
3946 static int em_sahf(struct x86_emulate_ctxt *ctxt)
3948 u32 flags;
3950 flags = X86_EFLAGS_CF | X86_EFLAGS_PF | X86_EFLAGS_AF | X86_EFLAGS_ZF |
3951 X86_EFLAGS_SF;
3952 flags &= *reg_rmw(ctxt, VCPU_REGS_RAX) >> 8;
3954 ctxt->eflags &= ~0xffUL;
3955 ctxt->eflags |= flags | X86_EFLAGS_FIXED;
3956 return X86EMUL_CONTINUE;
3959 static int em_lahf(struct x86_emulate_ctxt *ctxt)
3961 *reg_rmw(ctxt, VCPU_REGS_RAX) &= ~0xff00UL;
3962 *reg_rmw(ctxt, VCPU_REGS_RAX) |= (ctxt->eflags & 0xff) << 8;
3963 return X86EMUL_CONTINUE;
3966 static int em_bswap(struct x86_emulate_ctxt *ctxt)
3968 switch (ctxt->op_bytes) {
3969 #ifdef CONFIG_X86_64
3970 case 8:
3971 asm("bswap %0" : "+r"(ctxt->dst.val));
3972 break;
3973 #endif
3974 default:
3975 asm("bswap %0" : "+r"(*(u32 *)&ctxt->dst.val));
3976 break;
3978 return X86EMUL_CONTINUE;
3981 static int em_clflush(struct x86_emulate_ctxt *ctxt)
3983 /* emulating clflush regardless of cpuid */
3984 return X86EMUL_CONTINUE;
3987 static int em_movsxd(struct x86_emulate_ctxt *ctxt)
3989 ctxt->dst.val = (s32) ctxt->src.val;
3990 return X86EMUL_CONTINUE;
3993 static int check_fxsr(struct x86_emulate_ctxt *ctxt)
3995 u32 eax = 1, ebx, ecx = 0, edx;
3997 ctxt->ops->get_cpuid(ctxt, &eax, &ebx, &ecx, &edx, false);
3998 if (!(edx & FFL(FXSR)))
3999 return emulate_ud(ctxt);
4001 if (ctxt->ops->get_cr(ctxt, 0) & (X86_CR0_TS | X86_CR0_EM))
4002 return emulate_nm(ctxt);
4005 * Don't emulate a case that should never be hit, instead of working
4006 * around a lack of fxsave64/fxrstor64 on old compilers.
4008 if (ctxt->mode >= X86EMUL_MODE_PROT64)
4009 return X86EMUL_UNHANDLEABLE;
4011 return X86EMUL_CONTINUE;
4015 * Hardware doesn't save and restore XMM 0-7 without CR4.OSFXSR, but does save
4016 * and restore MXCSR.
4018 static size_t __fxstate_size(int nregs)
4020 return offsetof(struct fxregs_state, xmm_space[0]) + nregs * 16;
4023 static inline size_t fxstate_size(struct x86_emulate_ctxt *ctxt)
4025 bool cr4_osfxsr;
4026 if (ctxt->mode == X86EMUL_MODE_PROT64)
4027 return __fxstate_size(16);
4029 cr4_osfxsr = ctxt->ops->get_cr(ctxt, 4) & X86_CR4_OSFXSR;
4030 return __fxstate_size(cr4_osfxsr ? 8 : 0);
4034 * FXSAVE and FXRSTOR have 4 different formats depending on execution mode,
4035 * 1) 16 bit mode
4036 * 2) 32 bit mode
4037 * - like (1), but FIP and FDP (foo) are only 16 bit. At least Intel CPUs
4038 * preserve whole 32 bit values, though, so (1) and (2) are the same wrt.
4039 * save and restore
4040 * 3) 64-bit mode with REX.W prefix
4041 * - like (2), but XMM 8-15 are being saved and restored
4042 * 4) 64-bit mode without REX.W prefix
4043 * - like (3), but FIP and FDP are 64 bit
4045 * Emulation uses (3) for (1) and (2) and preserves XMM 8-15 to reach the
4046 * desired result. (4) is not emulated.
4048 * Note: Guest and host CPUID.(EAX=07H,ECX=0H):EBX[bit 13] (deprecate FPU CS
4049 * and FPU DS) should match.
4051 static int em_fxsave(struct x86_emulate_ctxt *ctxt)
4053 struct fxregs_state fx_state;
4054 int rc;
4056 rc = check_fxsr(ctxt);
4057 if (rc != X86EMUL_CONTINUE)
4058 return rc;
4060 rc = asm_safe("fxsave %[fx]", , [fx] "+m"(fx_state));
4062 if (rc != X86EMUL_CONTINUE)
4063 return rc;
4065 return segmented_write_std(ctxt, ctxt->memop.addr.mem, &fx_state,
4066 fxstate_size(ctxt));
4070 * FXRSTOR might restore XMM registers not provided by the guest. Fill
4071 * in the host registers (via FXSAVE) instead, so they won't be modified.
4072 * (preemption has to stay disabled until FXRSTOR).
4074 * Use noinline to keep the stack for other functions called by callers small.
4076 static noinline int fxregs_fixup(struct fxregs_state *fx_state,
4077 const size_t used_size)
4079 struct fxregs_state fx_tmp;
4080 int rc;
4082 rc = asm_safe("fxsave %[fx]", , [fx] "+m"(fx_tmp));
4083 memcpy((void *)fx_state + used_size, (void *)&fx_tmp + used_size,
4084 __fxstate_size(16) - used_size);
4086 return rc;
4089 static int em_fxrstor(struct x86_emulate_ctxt *ctxt)
4091 struct fxregs_state fx_state;
4092 int rc;
4093 size_t size;
4095 rc = check_fxsr(ctxt);
4096 if (rc != X86EMUL_CONTINUE)
4097 return rc;
4099 size = fxstate_size(ctxt);
4100 rc = segmented_read_std(ctxt, ctxt->memop.addr.mem, &fx_state, size);
4101 if (rc != X86EMUL_CONTINUE)
4102 return rc;
4104 if (size < __fxstate_size(16)) {
4105 rc = fxregs_fixup(&fx_state, size);
4106 if (rc != X86EMUL_CONTINUE)
4107 goto out;
4110 if (fx_state.mxcsr >> 16) {
4111 rc = emulate_gp(ctxt, 0);
4112 goto out;
4115 if (rc == X86EMUL_CONTINUE)
4116 rc = asm_safe("fxrstor %[fx]", : [fx] "m"(fx_state));
4118 out:
4119 return rc;
4122 static bool valid_cr(int nr)
4124 switch (nr) {
4125 case 0:
4126 case 2 ... 4:
4127 case 8:
4128 return true;
4129 default:
4130 return false;
4134 static int check_cr_read(struct x86_emulate_ctxt *ctxt)
4136 if (!valid_cr(ctxt->modrm_reg))
4137 return emulate_ud(ctxt);
4139 return X86EMUL_CONTINUE;
4142 static int check_cr_write(struct x86_emulate_ctxt *ctxt)
4144 u64 new_val = ctxt->src.val64;
4145 int cr = ctxt->modrm_reg;
4146 u64 efer = 0;
4148 static u64 cr_reserved_bits[] = {
4149 0xffffffff00000000ULL,
4150 0, 0, 0, /* CR3 checked later */
4151 CR4_RESERVED_BITS,
4152 0, 0, 0,
4153 CR8_RESERVED_BITS,
4156 if (!valid_cr(cr))
4157 return emulate_ud(ctxt);
4159 if (new_val & cr_reserved_bits[cr])
4160 return emulate_gp(ctxt, 0);
4162 switch (cr) {
4163 case 0: {
4164 u64 cr4;
4165 if (((new_val & X86_CR0_PG) && !(new_val & X86_CR0_PE)) ||
4166 ((new_val & X86_CR0_NW) && !(new_val & X86_CR0_CD)))
4167 return emulate_gp(ctxt, 0);
4169 cr4 = ctxt->ops->get_cr(ctxt, 4);
4170 ctxt->ops->get_msr(ctxt, MSR_EFER, &efer);
4172 if ((new_val & X86_CR0_PG) && (efer & EFER_LME) &&
4173 !(cr4 & X86_CR4_PAE))
4174 return emulate_gp(ctxt, 0);
4176 break;
4178 case 3: {
4179 u64 rsvd = 0;
4181 ctxt->ops->get_msr(ctxt, MSR_EFER, &efer);
4182 if (efer & EFER_LMA) {
4183 u64 maxphyaddr;
4184 u32 eax, ebx, ecx, edx;
4186 eax = 0x80000008;
4187 ecx = 0;
4188 if (ctxt->ops->get_cpuid(ctxt, &eax, &ebx, &ecx,
4189 &edx, false))
4190 maxphyaddr = eax & 0xff;
4191 else
4192 maxphyaddr = 36;
4193 rsvd = rsvd_bits(maxphyaddr, 63);
4194 if (ctxt->ops->get_cr(ctxt, 4) & X86_CR4_PCIDE)
4195 rsvd &= ~X86_CR3_PCID_NOFLUSH;
4198 if (new_val & rsvd)
4199 return emulate_gp(ctxt, 0);
4201 break;
4203 case 4: {
4204 ctxt->ops->get_msr(ctxt, MSR_EFER, &efer);
4206 if ((efer & EFER_LMA) && !(new_val & X86_CR4_PAE))
4207 return emulate_gp(ctxt, 0);
4209 break;
4213 return X86EMUL_CONTINUE;
4216 static int check_dr7_gd(struct x86_emulate_ctxt *ctxt)
4218 unsigned long dr7;
4220 ctxt->ops->get_dr(ctxt, 7, &dr7);
4222 /* Check if DR7.Global_Enable is set */
4223 return dr7 & (1 << 13);
4226 static int check_dr_read(struct x86_emulate_ctxt *ctxt)
4228 int dr = ctxt->modrm_reg;
4229 u64 cr4;
4231 if (dr > 7)
4232 return emulate_ud(ctxt);
4234 cr4 = ctxt->ops->get_cr(ctxt, 4);
4235 if ((cr4 & X86_CR4_DE) && (dr == 4 || dr == 5))
4236 return emulate_ud(ctxt);
4238 if (check_dr7_gd(ctxt)) {
4239 ulong dr6;
4241 ctxt->ops->get_dr(ctxt, 6, &dr6);
4242 dr6 &= ~15;
4243 dr6 |= DR6_BD | DR6_RTM;
4244 ctxt->ops->set_dr(ctxt, 6, dr6);
4245 return emulate_db(ctxt);
4248 return X86EMUL_CONTINUE;
4251 static int check_dr_write(struct x86_emulate_ctxt *ctxt)
4253 u64 new_val = ctxt->src.val64;
4254 int dr = ctxt->modrm_reg;
4256 if ((dr == 6 || dr == 7) && (new_val & 0xffffffff00000000ULL))
4257 return emulate_gp(ctxt, 0);
4259 return check_dr_read(ctxt);
4262 static int check_svme(struct x86_emulate_ctxt *ctxt)
4264 u64 efer = 0;
4266 ctxt->ops->get_msr(ctxt, MSR_EFER, &efer);
4268 if (!(efer & EFER_SVME))
4269 return emulate_ud(ctxt);
4271 return X86EMUL_CONTINUE;
4274 static int check_svme_pa(struct x86_emulate_ctxt *ctxt)
4276 u64 rax = reg_read(ctxt, VCPU_REGS_RAX);
4278 /* Valid physical address? */
4279 if (rax & 0xffff000000000000ULL)
4280 return emulate_gp(ctxt, 0);
4282 return check_svme(ctxt);
4285 static int check_rdtsc(struct x86_emulate_ctxt *ctxt)
4287 u64 cr4 = ctxt->ops->get_cr(ctxt, 4);
4289 if (cr4 & X86_CR4_TSD && ctxt->ops->cpl(ctxt))
4290 return emulate_ud(ctxt);
4292 return X86EMUL_CONTINUE;
4295 static int check_rdpmc(struct x86_emulate_ctxt *ctxt)
4297 u64 cr4 = ctxt->ops->get_cr(ctxt, 4);
4298 u64 rcx = reg_read(ctxt, VCPU_REGS_RCX);
4301 * VMware allows access to these Pseduo-PMCs even when read via RDPMC
4302 * in Ring3 when CR4.PCE=0.
4304 if (enable_vmware_backdoor && is_vmware_backdoor_pmc(rcx))
4305 return X86EMUL_CONTINUE;
4307 if ((!(cr4 & X86_CR4_PCE) && ctxt->ops->cpl(ctxt)) ||
4308 ctxt->ops->check_pmc(ctxt, rcx))
4309 return emulate_gp(ctxt, 0);
4311 return X86EMUL_CONTINUE;
4314 static int check_perm_in(struct x86_emulate_ctxt *ctxt)
4316 ctxt->dst.bytes = min(ctxt->dst.bytes, 4u);
4317 if (!emulator_io_permited(ctxt, ctxt->src.val, ctxt->dst.bytes))
4318 return emulate_gp(ctxt, 0);
4320 return X86EMUL_CONTINUE;
4323 static int check_perm_out(struct x86_emulate_ctxt *ctxt)
4325 ctxt->src.bytes = min(ctxt->src.bytes, 4u);
4326 if (!emulator_io_permited(ctxt, ctxt->dst.val, ctxt->src.bytes))
4327 return emulate_gp(ctxt, 0);
4329 return X86EMUL_CONTINUE;
4332 #define D(_y) { .flags = (_y) }
4333 #define DI(_y, _i) { .flags = (_y)|Intercept, .intercept = x86_intercept_##_i }
4334 #define DIP(_y, _i, _p) { .flags = (_y)|Intercept|CheckPerm, \
4335 .intercept = x86_intercept_##_i, .check_perm = (_p) }
4336 #define N D(NotImpl)
4337 #define EXT(_f, _e) { .flags = ((_f) | RMExt), .u.group = (_e) }
4338 #define G(_f, _g) { .flags = ((_f) | Group | ModRM), .u.group = (_g) }
4339 #define GD(_f, _g) { .flags = ((_f) | GroupDual | ModRM), .u.gdual = (_g) }
4340 #define ID(_f, _i) { .flags = ((_f) | InstrDual | ModRM), .u.idual = (_i) }
4341 #define MD(_f, _m) { .flags = ((_f) | ModeDual), .u.mdual = (_m) }
4342 #define E(_f, _e) { .flags = ((_f) | Escape | ModRM), .u.esc = (_e) }
4343 #define I(_f, _e) { .flags = (_f), .u.execute = (_e) }
4344 #define F(_f, _e) { .flags = (_f) | Fastop, .u.fastop = (_e) }
4345 #define II(_f, _e, _i) \
4346 { .flags = (_f)|Intercept, .u.execute = (_e), .intercept = x86_intercept_##_i }
4347 #define IIP(_f, _e, _i, _p) \
4348 { .flags = (_f)|Intercept|CheckPerm, .u.execute = (_e), \
4349 .intercept = x86_intercept_##_i, .check_perm = (_p) }
4350 #define GP(_f, _g) { .flags = ((_f) | Prefix), .u.gprefix = (_g) }
4352 #define D2bv(_f) D((_f) | ByteOp), D(_f)
4353 #define D2bvIP(_f, _i, _p) DIP((_f) | ByteOp, _i, _p), DIP(_f, _i, _p)
4354 #define I2bv(_f, _e) I((_f) | ByteOp, _e), I(_f, _e)
4355 #define F2bv(_f, _e) F((_f) | ByteOp, _e), F(_f, _e)
4356 #define I2bvIP(_f, _e, _i, _p) \
4357 IIP((_f) | ByteOp, _e, _i, _p), IIP(_f, _e, _i, _p)
4359 #define F6ALU(_f, _e) F2bv((_f) | DstMem | SrcReg | ModRM, _e), \
4360 F2bv(((_f) | DstReg | SrcMem | ModRM) & ~Lock, _e), \
4361 F2bv(((_f) & ~Lock) | DstAcc | SrcImm, _e)
4363 static const struct opcode group7_rm0[] = {
4365 I(SrcNone | Priv | EmulateOnUD, em_hypercall),
4366 N, N, N, N, N, N,
4369 static const struct opcode group7_rm1[] = {
4370 DI(SrcNone | Priv, monitor),
4371 DI(SrcNone | Priv, mwait),
4372 N, N, N, N, N, N,
4375 static const struct opcode group7_rm3[] = {
4376 DIP(SrcNone | Prot | Priv, vmrun, check_svme_pa),
4377 II(SrcNone | Prot | EmulateOnUD, em_hypercall, vmmcall),
4378 DIP(SrcNone | Prot | Priv, vmload, check_svme_pa),
4379 DIP(SrcNone | Prot | Priv, vmsave, check_svme_pa),
4380 DIP(SrcNone | Prot | Priv, stgi, check_svme),
4381 DIP(SrcNone | Prot | Priv, clgi, check_svme),
4382 DIP(SrcNone | Prot | Priv, skinit, check_svme),
4383 DIP(SrcNone | Prot | Priv, invlpga, check_svme),
4386 static const struct opcode group7_rm7[] = {
4388 DIP(SrcNone, rdtscp, check_rdtsc),
4389 N, N, N, N, N, N,
4392 static const struct opcode group1[] = {
4393 F(Lock, em_add),
4394 F(Lock | PageTable, em_or),
4395 F(Lock, em_adc),
4396 F(Lock, em_sbb),
4397 F(Lock | PageTable, em_and),
4398 F(Lock, em_sub),
4399 F(Lock, em_xor),
4400 F(NoWrite, em_cmp),
4403 static const struct opcode group1A[] = {
4404 I(DstMem | SrcNone | Mov | Stack | IncSP | TwoMemOp, em_pop), N, N, N, N, N, N, N,
4407 static const struct opcode group2[] = {
4408 F(DstMem | ModRM, em_rol),
4409 F(DstMem | ModRM, em_ror),
4410 F(DstMem | ModRM, em_rcl),
4411 F(DstMem | ModRM, em_rcr),
4412 F(DstMem | ModRM, em_shl),
4413 F(DstMem | ModRM, em_shr),
4414 F(DstMem | ModRM, em_shl),
4415 F(DstMem | ModRM, em_sar),
4418 static const struct opcode group3[] = {
4419 F(DstMem | SrcImm | NoWrite, em_test),
4420 F(DstMem | SrcImm | NoWrite, em_test),
4421 F(DstMem | SrcNone | Lock, em_not),
4422 F(DstMem | SrcNone | Lock, em_neg),
4423 F(DstXacc | Src2Mem, em_mul_ex),
4424 F(DstXacc | Src2Mem, em_imul_ex),
4425 F(DstXacc | Src2Mem, em_div_ex),
4426 F(DstXacc | Src2Mem, em_idiv_ex),
4429 static const struct opcode group4[] = {
4430 F(ByteOp | DstMem | SrcNone | Lock, em_inc),
4431 F(ByteOp | DstMem | SrcNone | Lock, em_dec),
4432 N, N, N, N, N, N,
4435 static const struct opcode group5[] = {
4436 F(DstMem | SrcNone | Lock, em_inc),
4437 F(DstMem | SrcNone | Lock, em_dec),
4438 I(SrcMem | NearBranch, em_call_near_abs),
4439 I(SrcMemFAddr | ImplicitOps, em_call_far),
4440 I(SrcMem | NearBranch, em_jmp_abs),
4441 I(SrcMemFAddr | ImplicitOps, em_jmp_far),
4442 I(SrcMem | Stack | TwoMemOp, em_push), D(Undefined),
4445 static const struct opcode group6[] = {
4446 II(Prot | DstMem, em_sldt, sldt),
4447 II(Prot | DstMem, em_str, str),
4448 II(Prot | Priv | SrcMem16, em_lldt, lldt),
4449 II(Prot | Priv | SrcMem16, em_ltr, ltr),
4450 N, N, N, N,
4453 static const struct group_dual group7 = { {
4454 II(Mov | DstMem, em_sgdt, sgdt),
4455 II(Mov | DstMem, em_sidt, sidt),
4456 II(SrcMem | Priv, em_lgdt, lgdt),
4457 II(SrcMem | Priv, em_lidt, lidt),
4458 II(SrcNone | DstMem | Mov, em_smsw, smsw), N,
4459 II(SrcMem16 | Mov | Priv, em_lmsw, lmsw),
4460 II(SrcMem | ByteOp | Priv | NoAccess, em_invlpg, invlpg),
4461 }, {
4462 EXT(0, group7_rm0),
4463 EXT(0, group7_rm1),
4464 N, EXT(0, group7_rm3),
4465 II(SrcNone | DstMem | Mov, em_smsw, smsw), N,
4466 II(SrcMem16 | Mov | Priv, em_lmsw, lmsw),
4467 EXT(0, group7_rm7),
4468 } };
4470 static const struct opcode group8[] = {
4471 N, N, N, N,
4472 F(DstMem | SrcImmByte | NoWrite, em_bt),
4473 F(DstMem | SrcImmByte | Lock | PageTable, em_bts),
4474 F(DstMem | SrcImmByte | Lock, em_btr),
4475 F(DstMem | SrcImmByte | Lock | PageTable, em_btc),
4479 * The "memory" destination is actually always a register, since we come
4480 * from the register case of group9.
4482 static const struct gprefix pfx_0f_c7_7 = {
4483 N, N, N, II(DstMem | ModRM | Op3264 | EmulateOnUD, em_rdpid, rdtscp),
4487 static const struct group_dual group9 = { {
4488 N, I(DstMem64 | Lock | PageTable, em_cmpxchg8b), N, N, N, N, N, N,
4489 }, {
4490 N, N, N, N, N, N, N,
4491 GP(0, &pfx_0f_c7_7),
4492 } };
4494 static const struct opcode group11[] = {
4495 I(DstMem | SrcImm | Mov | PageTable, em_mov),
4496 X7(D(Undefined)),
4499 static const struct gprefix pfx_0f_ae_7 = {
4500 I(SrcMem | ByteOp, em_clflush), N, N, N,
4503 static const struct group_dual group15 = { {
4504 I(ModRM | Aligned16, em_fxsave),
4505 I(ModRM | Aligned16, em_fxrstor),
4506 N, N, N, N, N, GP(0, &pfx_0f_ae_7),
4507 }, {
4508 N, N, N, N, N, N, N, N,
4509 } };
4511 static const struct gprefix pfx_0f_6f_0f_7f = {
4512 I(Mmx, em_mov), I(Sse | Aligned, em_mov), N, I(Sse | Unaligned, em_mov),
4515 static const struct instr_dual instr_dual_0f_2b = {
4516 I(0, em_mov), N
4519 static const struct gprefix pfx_0f_2b = {
4520 ID(0, &instr_dual_0f_2b), ID(0, &instr_dual_0f_2b), N, N,
4523 static const struct gprefix pfx_0f_10_0f_11 = {
4524 I(Unaligned, em_mov), I(Unaligned, em_mov), N, N,
4527 static const struct gprefix pfx_0f_28_0f_29 = {
4528 I(Aligned, em_mov), I(Aligned, em_mov), N, N,
4531 static const struct gprefix pfx_0f_e7 = {
4532 N, I(Sse, em_mov), N, N,
4535 static const struct escape escape_d9 = { {
4536 N, N, N, N, N, N, N, I(DstMem16 | Mov, em_fnstcw),
4537 }, {
4538 /* 0xC0 - 0xC7 */
4539 N, N, N, N, N, N, N, N,
4540 /* 0xC8 - 0xCF */
4541 N, N, N, N, N, N, N, N,
4542 /* 0xD0 - 0xC7 */
4543 N, N, N, N, N, N, N, N,
4544 /* 0xD8 - 0xDF */
4545 N, N, N, N, N, N, N, N,
4546 /* 0xE0 - 0xE7 */
4547 N, N, N, N, N, N, N, N,
4548 /* 0xE8 - 0xEF */
4549 N, N, N, N, N, N, N, N,
4550 /* 0xF0 - 0xF7 */
4551 N, N, N, N, N, N, N, N,
4552 /* 0xF8 - 0xFF */
4553 N, N, N, N, N, N, N, N,
4554 } };
4556 static const struct escape escape_db = { {
4557 N, N, N, N, N, N, N, N,
4558 }, {
4559 /* 0xC0 - 0xC7 */
4560 N, N, N, N, N, N, N, N,
4561 /* 0xC8 - 0xCF */
4562 N, N, N, N, N, N, N, N,
4563 /* 0xD0 - 0xC7 */
4564 N, N, N, N, N, N, N, N,
4565 /* 0xD8 - 0xDF */
4566 N, N, N, N, N, N, N, N,
4567 /* 0xE0 - 0xE7 */
4568 N, N, N, I(ImplicitOps, em_fninit), N, N, N, N,
4569 /* 0xE8 - 0xEF */
4570 N, N, N, N, N, N, N, N,
4571 /* 0xF0 - 0xF7 */
4572 N, N, N, N, N, N, N, N,
4573 /* 0xF8 - 0xFF */
4574 N, N, N, N, N, N, N, N,
4575 } };
4577 static const struct escape escape_dd = { {
4578 N, N, N, N, N, N, N, I(DstMem16 | Mov, em_fnstsw),
4579 }, {
4580 /* 0xC0 - 0xC7 */
4581 N, N, N, N, N, N, N, N,
4582 /* 0xC8 - 0xCF */
4583 N, N, N, N, N, N, N, N,
4584 /* 0xD0 - 0xC7 */
4585 N, N, N, N, N, N, N, N,
4586 /* 0xD8 - 0xDF */
4587 N, N, N, N, N, N, N, N,
4588 /* 0xE0 - 0xE7 */
4589 N, N, N, N, N, N, N, N,
4590 /* 0xE8 - 0xEF */
4591 N, N, N, N, N, N, N, N,
4592 /* 0xF0 - 0xF7 */
4593 N, N, N, N, N, N, N, N,
4594 /* 0xF8 - 0xFF */
4595 N, N, N, N, N, N, N, N,
4596 } };
4598 static const struct instr_dual instr_dual_0f_c3 = {
4599 I(DstMem | SrcReg | ModRM | No16 | Mov, em_mov), N
4602 static const struct mode_dual mode_dual_63 = {
4603 N, I(DstReg | SrcMem32 | ModRM | Mov, em_movsxd)
4606 static const struct opcode opcode_table[256] = {
4607 /* 0x00 - 0x07 */
4608 F6ALU(Lock, em_add),
4609 I(ImplicitOps | Stack | No64 | Src2ES, em_push_sreg),
4610 I(ImplicitOps | Stack | No64 | Src2ES, em_pop_sreg),
4611 /* 0x08 - 0x0F */
4612 F6ALU(Lock | PageTable, em_or),
4613 I(ImplicitOps | Stack | No64 | Src2CS, em_push_sreg),
4615 /* 0x10 - 0x17 */
4616 F6ALU(Lock, em_adc),
4617 I(ImplicitOps | Stack | No64 | Src2SS, em_push_sreg),
4618 I(ImplicitOps | Stack | No64 | Src2SS, em_pop_sreg),
4619 /* 0x18 - 0x1F */
4620 F6ALU(Lock, em_sbb),
4621 I(ImplicitOps | Stack | No64 | Src2DS, em_push_sreg),
4622 I(ImplicitOps | Stack | No64 | Src2DS, em_pop_sreg),
4623 /* 0x20 - 0x27 */
4624 F6ALU(Lock | PageTable, em_and), N, N,
4625 /* 0x28 - 0x2F */
4626 F6ALU(Lock, em_sub), N, I(ByteOp | DstAcc | No64, em_das),
4627 /* 0x30 - 0x37 */
4628 F6ALU(Lock, em_xor), N, N,
4629 /* 0x38 - 0x3F */
4630 F6ALU(NoWrite, em_cmp), N, N,
4631 /* 0x40 - 0x4F */
4632 X8(F(DstReg, em_inc)), X8(F(DstReg, em_dec)),
4633 /* 0x50 - 0x57 */
4634 X8(I(SrcReg | Stack, em_push)),
4635 /* 0x58 - 0x5F */
4636 X8(I(DstReg | Stack, em_pop)),
4637 /* 0x60 - 0x67 */
4638 I(ImplicitOps | Stack | No64, em_pusha),
4639 I(ImplicitOps | Stack | No64, em_popa),
4640 N, MD(ModRM, &mode_dual_63),
4641 N, N, N, N,
4642 /* 0x68 - 0x6F */
4643 I(SrcImm | Mov | Stack, em_push),
4644 I(DstReg | SrcMem | ModRM | Src2Imm, em_imul_3op),
4645 I(SrcImmByte | Mov | Stack, em_push),
4646 I(DstReg | SrcMem | ModRM | Src2ImmByte, em_imul_3op),
4647 I2bvIP(DstDI | SrcDX | Mov | String | Unaligned, em_in, ins, check_perm_in), /* insb, insw/insd */
4648 I2bvIP(SrcSI | DstDX | String, em_out, outs, check_perm_out), /* outsb, outsw/outsd */
4649 /* 0x70 - 0x7F */
4650 X16(D(SrcImmByte | NearBranch)),
4651 /* 0x80 - 0x87 */
4652 G(ByteOp | DstMem | SrcImm, group1),
4653 G(DstMem | SrcImm, group1),
4654 G(ByteOp | DstMem | SrcImm | No64, group1),
4655 G(DstMem | SrcImmByte, group1),
4656 F2bv(DstMem | SrcReg | ModRM | NoWrite, em_test),
4657 I2bv(DstMem | SrcReg | ModRM | Lock | PageTable, em_xchg),
4658 /* 0x88 - 0x8F */
4659 I2bv(DstMem | SrcReg | ModRM | Mov | PageTable, em_mov),
4660 I2bv(DstReg | SrcMem | ModRM | Mov, em_mov),
4661 I(DstMem | SrcNone | ModRM | Mov | PageTable, em_mov_rm_sreg),
4662 D(ModRM | SrcMem | NoAccess | DstReg),
4663 I(ImplicitOps | SrcMem16 | ModRM, em_mov_sreg_rm),
4664 G(0, group1A),
4665 /* 0x90 - 0x97 */
4666 DI(SrcAcc | DstReg, pause), X7(D(SrcAcc | DstReg)),
4667 /* 0x98 - 0x9F */
4668 D(DstAcc | SrcNone), I(ImplicitOps | SrcAcc, em_cwd),
4669 I(SrcImmFAddr | No64, em_call_far), N,
4670 II(ImplicitOps | Stack, em_pushf, pushf),
4671 II(ImplicitOps | Stack, em_popf, popf),
4672 I(ImplicitOps, em_sahf), I(ImplicitOps, em_lahf),
4673 /* 0xA0 - 0xA7 */
4674 I2bv(DstAcc | SrcMem | Mov | MemAbs, em_mov),
4675 I2bv(DstMem | SrcAcc | Mov | MemAbs | PageTable, em_mov),
4676 I2bv(SrcSI | DstDI | Mov | String | TwoMemOp, em_mov),
4677 F2bv(SrcSI | DstDI | String | NoWrite | TwoMemOp, em_cmp_r),
4678 /* 0xA8 - 0xAF */
4679 F2bv(DstAcc | SrcImm | NoWrite, em_test),
4680 I2bv(SrcAcc | DstDI | Mov | String, em_mov),
4681 I2bv(SrcSI | DstAcc | Mov | String, em_mov),
4682 F2bv(SrcAcc | DstDI | String | NoWrite, em_cmp_r),
4683 /* 0xB0 - 0xB7 */
4684 X8(I(ByteOp | DstReg | SrcImm | Mov, em_mov)),
4685 /* 0xB8 - 0xBF */
4686 X8(I(DstReg | SrcImm64 | Mov, em_mov)),
4687 /* 0xC0 - 0xC7 */
4688 G(ByteOp | Src2ImmByte, group2), G(Src2ImmByte, group2),
4689 I(ImplicitOps | NearBranch | SrcImmU16, em_ret_near_imm),
4690 I(ImplicitOps | NearBranch, em_ret),
4691 I(DstReg | SrcMemFAddr | ModRM | No64 | Src2ES, em_lseg),
4692 I(DstReg | SrcMemFAddr | ModRM | No64 | Src2DS, em_lseg),
4693 G(ByteOp, group11), G(0, group11),
4694 /* 0xC8 - 0xCF */
4695 I(Stack | SrcImmU16 | Src2ImmByte, em_enter), I(Stack, em_leave),
4696 I(ImplicitOps | SrcImmU16, em_ret_far_imm),
4697 I(ImplicitOps, em_ret_far),
4698 D(ImplicitOps), DI(SrcImmByte, intn),
4699 D(ImplicitOps | No64), II(ImplicitOps, em_iret, iret),
4700 /* 0xD0 - 0xD7 */
4701 G(Src2One | ByteOp, group2), G(Src2One, group2),
4702 G(Src2CL | ByteOp, group2), G(Src2CL, group2),
4703 I(DstAcc | SrcImmUByte | No64, em_aam),
4704 I(DstAcc | SrcImmUByte | No64, em_aad),
4705 F(DstAcc | ByteOp | No64, em_salc),
4706 I(DstAcc | SrcXLat | ByteOp, em_mov),
4707 /* 0xD8 - 0xDF */
4708 N, E(0, &escape_d9), N, E(0, &escape_db), N, E(0, &escape_dd), N, N,
4709 /* 0xE0 - 0xE7 */
4710 X3(I(SrcImmByte | NearBranch, em_loop)),
4711 I(SrcImmByte | NearBranch, em_jcxz),
4712 I2bvIP(SrcImmUByte | DstAcc, em_in, in, check_perm_in),
4713 I2bvIP(SrcAcc | DstImmUByte, em_out, out, check_perm_out),
4714 /* 0xE8 - 0xEF */
4715 I(SrcImm | NearBranch, em_call), D(SrcImm | ImplicitOps | NearBranch),
4716 I(SrcImmFAddr | No64, em_jmp_far),
4717 D(SrcImmByte | ImplicitOps | NearBranch),
4718 I2bvIP(SrcDX | DstAcc, em_in, in, check_perm_in),
4719 I2bvIP(SrcAcc | DstDX, em_out, out, check_perm_out),
4720 /* 0xF0 - 0xF7 */
4721 N, DI(ImplicitOps, icebp), N, N,
4722 DI(ImplicitOps | Priv, hlt), D(ImplicitOps),
4723 G(ByteOp, group3), G(0, group3),
4724 /* 0xF8 - 0xFF */
4725 D(ImplicitOps), D(ImplicitOps),
4726 I(ImplicitOps, em_cli), I(ImplicitOps, em_sti),
4727 D(ImplicitOps), D(ImplicitOps), G(0, group4), G(0, group5),
4730 static const struct opcode twobyte_table[256] = {
4731 /* 0x00 - 0x0F */
4732 G(0, group6), GD(0, &group7), N, N,
4733 N, I(ImplicitOps | EmulateOnUD, em_syscall),
4734 II(ImplicitOps | Priv, em_clts, clts), N,
4735 DI(ImplicitOps | Priv, invd), DI(ImplicitOps | Priv, wbinvd), N, N,
4736 N, D(ImplicitOps | ModRM | SrcMem | NoAccess), N, N,
4737 /* 0x10 - 0x1F */
4738 GP(ModRM | DstReg | SrcMem | Mov | Sse, &pfx_0f_10_0f_11),
4739 GP(ModRM | DstMem | SrcReg | Mov | Sse, &pfx_0f_10_0f_11),
4740 N, N, N, N, N, N,
4741 D(ImplicitOps | ModRM | SrcMem | NoAccess),
4742 N, N, N, N, N, N, D(ImplicitOps | ModRM | SrcMem | NoAccess),
4743 /* 0x20 - 0x2F */
4744 DIP(ModRM | DstMem | Priv | Op3264 | NoMod, cr_read, check_cr_read),
4745 DIP(ModRM | DstMem | Priv | Op3264 | NoMod, dr_read, check_dr_read),
4746 IIP(ModRM | SrcMem | Priv | Op3264 | NoMod, em_cr_write, cr_write,
4747 check_cr_write),
4748 IIP(ModRM | SrcMem | Priv | Op3264 | NoMod, em_dr_write, dr_write,
4749 check_dr_write),
4750 N, N, N, N,
4751 GP(ModRM | DstReg | SrcMem | Mov | Sse, &pfx_0f_28_0f_29),
4752 GP(ModRM | DstMem | SrcReg | Mov | Sse, &pfx_0f_28_0f_29),
4753 N, GP(ModRM | DstMem | SrcReg | Mov | Sse, &pfx_0f_2b),
4754 N, N, N, N,
4755 /* 0x30 - 0x3F */
4756 II(ImplicitOps | Priv, em_wrmsr, wrmsr),
4757 IIP(ImplicitOps, em_rdtsc, rdtsc, check_rdtsc),
4758 II(ImplicitOps | Priv, em_rdmsr, rdmsr),
4759 IIP(ImplicitOps, em_rdpmc, rdpmc, check_rdpmc),
4760 I(ImplicitOps | EmulateOnUD, em_sysenter),
4761 I(ImplicitOps | Priv | EmulateOnUD, em_sysexit),
4762 N, N,
4763 N, N, N, N, N, N, N, N,
4764 /* 0x40 - 0x4F */
4765 X16(D(DstReg | SrcMem | ModRM)),
4766 /* 0x50 - 0x5F */
4767 N, N, N, N, N, N, N, N, N, N, N, N, N, N, N, N,
4768 /* 0x60 - 0x6F */
4769 N, N, N, N,
4770 N, N, N, N,
4771 N, N, N, N,
4772 N, N, N, GP(SrcMem | DstReg | ModRM | Mov, &pfx_0f_6f_0f_7f),
4773 /* 0x70 - 0x7F */
4774 N, N, N, N,
4775 N, N, N, N,
4776 N, N, N, N,
4777 N, N, N, GP(SrcReg | DstMem | ModRM | Mov, &pfx_0f_6f_0f_7f),
4778 /* 0x80 - 0x8F */
4779 X16(D(SrcImm | NearBranch)),
4780 /* 0x90 - 0x9F */
4781 X16(D(ByteOp | DstMem | SrcNone | ModRM| Mov)),
4782 /* 0xA0 - 0xA7 */
4783 I(Stack | Src2FS, em_push_sreg), I(Stack | Src2FS, em_pop_sreg),
4784 II(ImplicitOps, em_cpuid, cpuid),
4785 F(DstMem | SrcReg | ModRM | BitOp | NoWrite, em_bt),
4786 F(DstMem | SrcReg | Src2ImmByte | ModRM, em_shld),
4787 F(DstMem | SrcReg | Src2CL | ModRM, em_shld), N, N,
4788 /* 0xA8 - 0xAF */
4789 I(Stack | Src2GS, em_push_sreg), I(Stack | Src2GS, em_pop_sreg),
4790 II(EmulateOnUD | ImplicitOps, em_rsm, rsm),
4791 F(DstMem | SrcReg | ModRM | BitOp | Lock | PageTable, em_bts),
4792 F(DstMem | SrcReg | Src2ImmByte | ModRM, em_shrd),
4793 F(DstMem | SrcReg | Src2CL | ModRM, em_shrd),
4794 GD(0, &group15), F(DstReg | SrcMem | ModRM, em_imul),
4795 /* 0xB0 - 0xB7 */
4796 I2bv(DstMem | SrcReg | ModRM | Lock | PageTable | SrcWrite, em_cmpxchg),
4797 I(DstReg | SrcMemFAddr | ModRM | Src2SS, em_lseg),
4798 F(DstMem | SrcReg | ModRM | BitOp | Lock, em_btr),
4799 I(DstReg | SrcMemFAddr | ModRM | Src2FS, em_lseg),
4800 I(DstReg | SrcMemFAddr | ModRM | Src2GS, em_lseg),
4801 D(DstReg | SrcMem8 | ModRM | Mov), D(DstReg | SrcMem16 | ModRM | Mov),
4802 /* 0xB8 - 0xBF */
4803 N, N,
4804 G(BitOp, group8),
4805 F(DstMem | SrcReg | ModRM | BitOp | Lock | PageTable, em_btc),
4806 I(DstReg | SrcMem | ModRM, em_bsf_c),
4807 I(DstReg | SrcMem | ModRM, em_bsr_c),
4808 D(DstReg | SrcMem8 | ModRM | Mov), D(DstReg | SrcMem16 | ModRM | Mov),
4809 /* 0xC0 - 0xC7 */
4810 F2bv(DstMem | SrcReg | ModRM | SrcWrite | Lock, em_xadd),
4811 N, ID(0, &instr_dual_0f_c3),
4812 N, N, N, GD(0, &group9),
4813 /* 0xC8 - 0xCF */
4814 X8(I(DstReg, em_bswap)),
4815 /* 0xD0 - 0xDF */
4816 N, N, N, N, N, N, N, N, N, N, N, N, N, N, N, N,
4817 /* 0xE0 - 0xEF */
4818 N, N, N, N, N, N, N, GP(SrcReg | DstMem | ModRM | Mov, &pfx_0f_e7),
4819 N, N, N, N, N, N, N, N,
4820 /* 0xF0 - 0xFF */
4821 N, N, N, N, N, N, N, N, N, N, N, N, N, N, N, N
4824 static const struct instr_dual instr_dual_0f_38_f0 = {
4825 I(DstReg | SrcMem | Mov, em_movbe), N
4828 static const struct instr_dual instr_dual_0f_38_f1 = {
4829 I(DstMem | SrcReg | Mov, em_movbe), N
4832 static const struct gprefix three_byte_0f_38_f0 = {
4833 ID(0, &instr_dual_0f_38_f0), N, N, N
4836 static const struct gprefix three_byte_0f_38_f1 = {
4837 ID(0, &instr_dual_0f_38_f1), N, N, N
4841 * Insns below are selected by the prefix which indexed by the third opcode
4842 * byte.
4844 static const struct opcode opcode_map_0f_38[256] = {
4845 /* 0x00 - 0x7f */
4846 X16(N), X16(N), X16(N), X16(N), X16(N), X16(N), X16(N), X16(N),
4847 /* 0x80 - 0xef */
4848 X16(N), X16(N), X16(N), X16(N), X16(N), X16(N), X16(N),
4849 /* 0xf0 - 0xf1 */
4850 GP(EmulateOnUD | ModRM, &three_byte_0f_38_f0),
4851 GP(EmulateOnUD | ModRM, &three_byte_0f_38_f1),
4852 /* 0xf2 - 0xff */
4853 N, N, X4(N), X8(N)
4856 #undef D
4857 #undef N
4858 #undef G
4859 #undef GD
4860 #undef I
4861 #undef GP
4862 #undef EXT
4863 #undef MD
4864 #undef ID
4866 #undef D2bv
4867 #undef D2bvIP
4868 #undef I2bv
4869 #undef I2bvIP
4870 #undef I6ALU
4872 static unsigned imm_size(struct x86_emulate_ctxt *ctxt)
4874 unsigned size;
4876 size = (ctxt->d & ByteOp) ? 1 : ctxt->op_bytes;
4877 if (size == 8)
4878 size = 4;
4879 return size;
4882 static int decode_imm(struct x86_emulate_ctxt *ctxt, struct operand *op,
4883 unsigned size, bool sign_extension)
4885 int rc = X86EMUL_CONTINUE;
4887 op->type = OP_IMM;
4888 op->bytes = size;
4889 op->addr.mem.ea = ctxt->_eip;
4890 /* NB. Immediates are sign-extended as necessary. */
4891 switch (op->bytes) {
4892 case 1:
4893 op->val = insn_fetch(s8, ctxt);
4894 break;
4895 case 2:
4896 op->val = insn_fetch(s16, ctxt);
4897 break;
4898 case 4:
4899 op->val = insn_fetch(s32, ctxt);
4900 break;
4901 case 8:
4902 op->val = insn_fetch(s64, ctxt);
4903 break;
4905 if (!sign_extension) {
4906 switch (op->bytes) {
4907 case 1:
4908 op->val &= 0xff;
4909 break;
4910 case 2:
4911 op->val &= 0xffff;
4912 break;
4913 case 4:
4914 op->val &= 0xffffffff;
4915 break;
4918 done:
4919 return rc;
4922 static int decode_operand(struct x86_emulate_ctxt *ctxt, struct operand *op,
4923 unsigned d)
4925 int rc = X86EMUL_CONTINUE;
4927 switch (d) {
4928 case OpReg:
4929 decode_register_operand(ctxt, op);
4930 break;
4931 case OpImmUByte:
4932 rc = decode_imm(ctxt, op, 1, false);
4933 break;
4934 case OpMem:
4935 ctxt->memop.bytes = (ctxt->d & ByteOp) ? 1 : ctxt->op_bytes;
4936 mem_common:
4937 *op = ctxt->memop;
4938 ctxt->memopp = op;
4939 if (ctxt->d & BitOp)
4940 fetch_bit_operand(ctxt);
4941 op->orig_val = op->val;
4942 break;
4943 case OpMem64:
4944 ctxt->memop.bytes = (ctxt->op_bytes == 8) ? 16 : 8;
4945 goto mem_common;
4946 case OpAcc:
4947 op->type = OP_REG;
4948 op->bytes = (ctxt->d & ByteOp) ? 1 : ctxt->op_bytes;
4949 op->addr.reg = reg_rmw(ctxt, VCPU_REGS_RAX);
4950 fetch_register_operand(op);
4951 op->orig_val = op->val;
4952 break;
4953 case OpAccLo:
4954 op->type = OP_REG;
4955 op->bytes = (ctxt->d & ByteOp) ? 2 : ctxt->op_bytes;
4956 op->addr.reg = reg_rmw(ctxt, VCPU_REGS_RAX);
4957 fetch_register_operand(op);
4958 op->orig_val = op->val;
4959 break;
4960 case OpAccHi:
4961 if (ctxt->d & ByteOp) {
4962 op->type = OP_NONE;
4963 break;
4965 op->type = OP_REG;
4966 op->bytes = ctxt->op_bytes;
4967 op->addr.reg = reg_rmw(ctxt, VCPU_REGS_RDX);
4968 fetch_register_operand(op);
4969 op->orig_val = op->val;
4970 break;
4971 case OpDI:
4972 op->type = OP_MEM;
4973 op->bytes = (ctxt->d & ByteOp) ? 1 : ctxt->op_bytes;
4974 op->addr.mem.ea =
4975 register_address(ctxt, VCPU_REGS_RDI);
4976 op->addr.mem.seg = VCPU_SREG_ES;
4977 op->val = 0;
4978 op->count = 1;
4979 break;
4980 case OpDX:
4981 op->type = OP_REG;
4982 op->bytes = 2;
4983 op->addr.reg = reg_rmw(ctxt, VCPU_REGS_RDX);
4984 fetch_register_operand(op);
4985 break;
4986 case OpCL:
4987 op->type = OP_IMM;
4988 op->bytes = 1;
4989 op->val = reg_read(ctxt, VCPU_REGS_RCX) & 0xff;
4990 break;
4991 case OpImmByte:
4992 rc = decode_imm(ctxt, op, 1, true);
4993 break;
4994 case OpOne:
4995 op->type = OP_IMM;
4996 op->bytes = 1;
4997 op->val = 1;
4998 break;
4999 case OpImm:
5000 rc = decode_imm(ctxt, op, imm_size(ctxt), true);
5001 break;
5002 case OpImm64:
5003 rc = decode_imm(ctxt, op, ctxt->op_bytes, true);
5004 break;
5005 case OpMem8:
5006 ctxt->memop.bytes = 1;
5007 if (ctxt->memop.type == OP_REG) {
5008 ctxt->memop.addr.reg = decode_register(ctxt,
5009 ctxt->modrm_rm, true);
5010 fetch_register_operand(&ctxt->memop);
5012 goto mem_common;
5013 case OpMem16:
5014 ctxt->memop.bytes = 2;
5015 goto mem_common;
5016 case OpMem32:
5017 ctxt->memop.bytes = 4;
5018 goto mem_common;
5019 case OpImmU16:
5020 rc = decode_imm(ctxt, op, 2, false);
5021 break;
5022 case OpImmU:
5023 rc = decode_imm(ctxt, op, imm_size(ctxt), false);
5024 break;
5025 case OpSI:
5026 op->type = OP_MEM;
5027 op->bytes = (ctxt->d & ByteOp) ? 1 : ctxt->op_bytes;
5028 op->addr.mem.ea =
5029 register_address(ctxt, VCPU_REGS_RSI);
5030 op->addr.mem.seg = ctxt->seg_override;
5031 op->val = 0;
5032 op->count = 1;
5033 break;
5034 case OpXLat:
5035 op->type = OP_MEM;
5036 op->bytes = (ctxt->d & ByteOp) ? 1 : ctxt->op_bytes;
5037 op->addr.mem.ea =
5038 address_mask(ctxt,
5039 reg_read(ctxt, VCPU_REGS_RBX) +
5040 (reg_read(ctxt, VCPU_REGS_RAX) & 0xff));
5041 op->addr.mem.seg = ctxt->seg_override;
5042 op->val = 0;
5043 break;
5044 case OpImmFAddr:
5045 op->type = OP_IMM;
5046 op->addr.mem.ea = ctxt->_eip;
5047 op->bytes = ctxt->op_bytes + 2;
5048 insn_fetch_arr(op->valptr, op->bytes, ctxt);
5049 break;
5050 case OpMemFAddr:
5051 ctxt->memop.bytes = ctxt->op_bytes + 2;
5052 goto mem_common;
5053 case OpES:
5054 op->type = OP_IMM;
5055 op->val = VCPU_SREG_ES;
5056 break;
5057 case OpCS:
5058 op->type = OP_IMM;
5059 op->val = VCPU_SREG_CS;
5060 break;
5061 case OpSS:
5062 op->type = OP_IMM;
5063 op->val = VCPU_SREG_SS;
5064 break;
5065 case OpDS:
5066 op->type = OP_IMM;
5067 op->val = VCPU_SREG_DS;
5068 break;
5069 case OpFS:
5070 op->type = OP_IMM;
5071 op->val = VCPU_SREG_FS;
5072 break;
5073 case OpGS:
5074 op->type = OP_IMM;
5075 op->val = VCPU_SREG_GS;
5076 break;
5077 case OpImplicit:
5078 /* Special instructions do their own operand decoding. */
5079 default:
5080 op->type = OP_NONE; /* Disable writeback. */
5081 break;
5084 done:
5085 return rc;
5088 int x86_decode_insn(struct x86_emulate_ctxt *ctxt, void *insn, int insn_len)
5090 int rc = X86EMUL_CONTINUE;
5091 int mode = ctxt->mode;
5092 int def_op_bytes, def_ad_bytes, goffset, simd_prefix;
5093 bool op_prefix = false;
5094 bool has_seg_override = false;
5095 struct opcode opcode;
5096 u16 dummy;
5097 struct desc_struct desc;
5099 ctxt->memop.type = OP_NONE;
5100 ctxt->memopp = NULL;
5101 ctxt->_eip = ctxt->eip;
5102 ctxt->fetch.ptr = ctxt->fetch.data;
5103 ctxt->fetch.end = ctxt->fetch.data + insn_len;
5104 ctxt->opcode_len = 1;
5105 if (insn_len > 0)
5106 memcpy(ctxt->fetch.data, insn, insn_len);
5107 else {
5108 rc = __do_insn_fetch_bytes(ctxt, 1);
5109 if (rc != X86EMUL_CONTINUE)
5110 return rc;
5113 switch (mode) {
5114 case X86EMUL_MODE_REAL:
5115 case X86EMUL_MODE_VM86:
5116 def_op_bytes = def_ad_bytes = 2;
5117 ctxt->ops->get_segment(ctxt, &dummy, &desc, NULL, VCPU_SREG_CS);
5118 if (desc.d)
5119 def_op_bytes = def_ad_bytes = 4;
5120 break;
5121 case X86EMUL_MODE_PROT16:
5122 def_op_bytes = def_ad_bytes = 2;
5123 break;
5124 case X86EMUL_MODE_PROT32:
5125 def_op_bytes = def_ad_bytes = 4;
5126 break;
5127 #ifdef CONFIG_X86_64
5128 case X86EMUL_MODE_PROT64:
5129 def_op_bytes = 4;
5130 def_ad_bytes = 8;
5131 break;
5132 #endif
5133 default:
5134 return EMULATION_FAILED;
5137 ctxt->op_bytes = def_op_bytes;
5138 ctxt->ad_bytes = def_ad_bytes;
5140 /* Legacy prefixes. */
5141 for (;;) {
5142 switch (ctxt->b = insn_fetch(u8, ctxt)) {
5143 case 0x66: /* operand-size override */
5144 op_prefix = true;
5145 /* switch between 2/4 bytes */
5146 ctxt->op_bytes = def_op_bytes ^ 6;
5147 break;
5148 case 0x67: /* address-size override */
5149 if (mode == X86EMUL_MODE_PROT64)
5150 /* switch between 4/8 bytes */
5151 ctxt->ad_bytes = def_ad_bytes ^ 12;
5152 else
5153 /* switch between 2/4 bytes */
5154 ctxt->ad_bytes = def_ad_bytes ^ 6;
5155 break;
5156 case 0x26: /* ES override */
5157 case 0x2e: /* CS override */
5158 case 0x36: /* SS override */
5159 case 0x3e: /* DS override */
5160 has_seg_override = true;
5161 ctxt->seg_override = (ctxt->b >> 3) & 3;
5162 break;
5163 case 0x64: /* FS override */
5164 case 0x65: /* GS override */
5165 has_seg_override = true;
5166 ctxt->seg_override = ctxt->b & 7;
5167 break;
5168 case 0x40 ... 0x4f: /* REX */
5169 if (mode != X86EMUL_MODE_PROT64)
5170 goto done_prefixes;
5171 ctxt->rex_prefix = ctxt->b;
5172 continue;
5173 case 0xf0: /* LOCK */
5174 ctxt->lock_prefix = 1;
5175 break;
5176 case 0xf2: /* REPNE/REPNZ */
5177 case 0xf3: /* REP/REPE/REPZ */
5178 ctxt->rep_prefix = ctxt->b;
5179 break;
5180 default:
5181 goto done_prefixes;
5184 /* Any legacy prefix after a REX prefix nullifies its effect. */
5186 ctxt->rex_prefix = 0;
5189 done_prefixes:
5191 /* REX prefix. */
5192 if (ctxt->rex_prefix & 8)
5193 ctxt->op_bytes = 8; /* REX.W */
5195 /* Opcode byte(s). */
5196 opcode = opcode_table[ctxt->b];
5197 /* Two-byte opcode? */
5198 if (ctxt->b == 0x0f) {
5199 ctxt->opcode_len = 2;
5200 ctxt->b = insn_fetch(u8, ctxt);
5201 opcode = twobyte_table[ctxt->b];
5203 /* 0F_38 opcode map */
5204 if (ctxt->b == 0x38) {
5205 ctxt->opcode_len = 3;
5206 ctxt->b = insn_fetch(u8, ctxt);
5207 opcode = opcode_map_0f_38[ctxt->b];
5210 ctxt->d = opcode.flags;
5212 if (ctxt->d & ModRM)
5213 ctxt->modrm = insn_fetch(u8, ctxt);
5215 /* vex-prefix instructions are not implemented */
5216 if (ctxt->opcode_len == 1 && (ctxt->b == 0xc5 || ctxt->b == 0xc4) &&
5217 (mode == X86EMUL_MODE_PROT64 || (ctxt->modrm & 0xc0) == 0xc0)) {
5218 ctxt->d = NotImpl;
5221 while (ctxt->d & GroupMask) {
5222 switch (ctxt->d & GroupMask) {
5223 case Group:
5224 goffset = (ctxt->modrm >> 3) & 7;
5225 opcode = opcode.u.group[goffset];
5226 break;
5227 case GroupDual:
5228 goffset = (ctxt->modrm >> 3) & 7;
5229 if ((ctxt->modrm >> 6) == 3)
5230 opcode = opcode.u.gdual->mod3[goffset];
5231 else
5232 opcode = opcode.u.gdual->mod012[goffset];
5233 break;
5234 case RMExt:
5235 goffset = ctxt->modrm & 7;
5236 opcode = opcode.u.group[goffset];
5237 break;
5238 case Prefix:
5239 if (ctxt->rep_prefix && op_prefix)
5240 return EMULATION_FAILED;
5241 simd_prefix = op_prefix ? 0x66 : ctxt->rep_prefix;
5242 switch (simd_prefix) {
5243 case 0x00: opcode = opcode.u.gprefix->pfx_no; break;
5244 case 0x66: opcode = opcode.u.gprefix->pfx_66; break;
5245 case 0xf2: opcode = opcode.u.gprefix->pfx_f2; break;
5246 case 0xf3: opcode = opcode.u.gprefix->pfx_f3; break;
5248 break;
5249 case Escape:
5250 if (ctxt->modrm > 0xbf)
5251 opcode = opcode.u.esc->high[ctxt->modrm - 0xc0];
5252 else
5253 opcode = opcode.u.esc->op[(ctxt->modrm >> 3) & 7];
5254 break;
5255 case InstrDual:
5256 if ((ctxt->modrm >> 6) == 3)
5257 opcode = opcode.u.idual->mod3;
5258 else
5259 opcode = opcode.u.idual->mod012;
5260 break;
5261 case ModeDual:
5262 if (ctxt->mode == X86EMUL_MODE_PROT64)
5263 opcode = opcode.u.mdual->mode64;
5264 else
5265 opcode = opcode.u.mdual->mode32;
5266 break;
5267 default:
5268 return EMULATION_FAILED;
5271 ctxt->d &= ~(u64)GroupMask;
5272 ctxt->d |= opcode.flags;
5275 /* Unrecognised? */
5276 if (ctxt->d == 0)
5277 return EMULATION_FAILED;
5279 ctxt->execute = opcode.u.execute;
5281 if (unlikely(ctxt->ud) && likely(!(ctxt->d & EmulateOnUD)))
5282 return EMULATION_FAILED;
5284 if (unlikely(ctxt->d &
5285 (NotImpl|Stack|Op3264|Sse|Mmx|Intercept|CheckPerm|NearBranch|
5286 No16))) {
5288 * These are copied unconditionally here, and checked unconditionally
5289 * in x86_emulate_insn.
5291 ctxt->check_perm = opcode.check_perm;
5292 ctxt->intercept = opcode.intercept;
5294 if (ctxt->d & NotImpl)
5295 return EMULATION_FAILED;
5297 if (mode == X86EMUL_MODE_PROT64) {
5298 if (ctxt->op_bytes == 4 && (ctxt->d & Stack))
5299 ctxt->op_bytes = 8;
5300 else if (ctxt->d & NearBranch)
5301 ctxt->op_bytes = 8;
5304 if (ctxt->d & Op3264) {
5305 if (mode == X86EMUL_MODE_PROT64)
5306 ctxt->op_bytes = 8;
5307 else
5308 ctxt->op_bytes = 4;
5311 if ((ctxt->d & No16) && ctxt->op_bytes == 2)
5312 ctxt->op_bytes = 4;
5314 if (ctxt->d & Sse)
5315 ctxt->op_bytes = 16;
5316 else if (ctxt->d & Mmx)
5317 ctxt->op_bytes = 8;
5320 /* ModRM and SIB bytes. */
5321 if (ctxt->d & ModRM) {
5322 rc = decode_modrm(ctxt, &ctxt->memop);
5323 if (!has_seg_override) {
5324 has_seg_override = true;
5325 ctxt->seg_override = ctxt->modrm_seg;
5327 } else if (ctxt->d & MemAbs)
5328 rc = decode_abs(ctxt, &ctxt->memop);
5329 if (rc != X86EMUL_CONTINUE)
5330 goto done;
5332 if (!has_seg_override)
5333 ctxt->seg_override = VCPU_SREG_DS;
5335 ctxt->memop.addr.mem.seg = ctxt->seg_override;
5338 * Decode and fetch the source operand: register, memory
5339 * or immediate.
5341 rc = decode_operand(ctxt, &ctxt->src, (ctxt->d >> SrcShift) & OpMask);
5342 if (rc != X86EMUL_CONTINUE)
5343 goto done;
5346 * Decode and fetch the second source operand: register, memory
5347 * or immediate.
5349 rc = decode_operand(ctxt, &ctxt->src2, (ctxt->d >> Src2Shift) & OpMask);
5350 if (rc != X86EMUL_CONTINUE)
5351 goto done;
5353 /* Decode and fetch the destination operand: register or memory. */
5354 rc = decode_operand(ctxt, &ctxt->dst, (ctxt->d >> DstShift) & OpMask);
5356 if (ctxt->rip_relative && likely(ctxt->memopp))
5357 ctxt->memopp->addr.mem.ea = address_mask(ctxt,
5358 ctxt->memopp->addr.mem.ea + ctxt->_eip);
5360 done:
5361 return (rc != X86EMUL_CONTINUE) ? EMULATION_FAILED : EMULATION_OK;
5364 bool x86_page_table_writing_insn(struct x86_emulate_ctxt *ctxt)
5366 return ctxt->d & PageTable;
5369 static bool string_insn_completed(struct x86_emulate_ctxt *ctxt)
5371 /* The second termination condition only applies for REPE
5372 * and REPNE. Test if the repeat string operation prefix is
5373 * REPE/REPZ or REPNE/REPNZ and if it's the case it tests the
5374 * corresponding termination condition according to:
5375 * - if REPE/REPZ and ZF = 0 then done
5376 * - if REPNE/REPNZ and ZF = 1 then done
5378 if (((ctxt->b == 0xa6) || (ctxt->b == 0xa7) ||
5379 (ctxt->b == 0xae) || (ctxt->b == 0xaf))
5380 && (((ctxt->rep_prefix == REPE_PREFIX) &&
5381 ((ctxt->eflags & X86_EFLAGS_ZF) == 0))
5382 || ((ctxt->rep_prefix == REPNE_PREFIX) &&
5383 ((ctxt->eflags & X86_EFLAGS_ZF) == X86_EFLAGS_ZF))))
5384 return true;
5386 return false;
5389 static int flush_pending_x87_faults(struct x86_emulate_ctxt *ctxt)
5391 int rc;
5393 rc = asm_safe("fwait");
5395 if (unlikely(rc != X86EMUL_CONTINUE))
5396 return emulate_exception(ctxt, MF_VECTOR, 0, false);
5398 return X86EMUL_CONTINUE;
5401 static void fetch_possible_mmx_operand(struct x86_emulate_ctxt *ctxt,
5402 struct operand *op)
5404 if (op->type == OP_MM)
5405 read_mmx_reg(ctxt, &op->mm_val, op->addr.mm);
5408 static int fastop(struct x86_emulate_ctxt *ctxt, void (*fop)(struct fastop *))
5410 ulong flags = (ctxt->eflags & EFLAGS_MASK) | X86_EFLAGS_IF;
5412 if (!(ctxt->d & ByteOp))
5413 fop += __ffs(ctxt->dst.bytes) * FASTOP_SIZE;
5415 asm("push %[flags]; popf; " CALL_NOSPEC " ; pushf; pop %[flags]\n"
5416 : "+a"(ctxt->dst.val), "+d"(ctxt->src.val), [flags]"+D"(flags),
5417 [thunk_target]"+S"(fop), ASM_CALL_CONSTRAINT
5418 : "c"(ctxt->src2.val));
5420 ctxt->eflags = (ctxt->eflags & ~EFLAGS_MASK) | (flags & EFLAGS_MASK);
5421 if (!fop) /* exception is returned in fop variable */
5422 return emulate_de(ctxt);
5423 return X86EMUL_CONTINUE;
5426 void init_decode_cache(struct x86_emulate_ctxt *ctxt)
5428 memset(&ctxt->rip_relative, 0,
5429 (void *)&ctxt->modrm - (void *)&ctxt->rip_relative);
5431 ctxt->io_read.pos = 0;
5432 ctxt->io_read.end = 0;
5433 ctxt->mem_read.end = 0;
5436 int x86_emulate_insn(struct x86_emulate_ctxt *ctxt)
5438 const struct x86_emulate_ops *ops = ctxt->ops;
5439 int rc = X86EMUL_CONTINUE;
5440 int saved_dst_type = ctxt->dst.type;
5441 unsigned emul_flags;
5443 ctxt->mem_read.pos = 0;
5445 /* LOCK prefix is allowed only with some instructions */
5446 if (ctxt->lock_prefix && (!(ctxt->d & Lock) || ctxt->dst.type != OP_MEM)) {
5447 rc = emulate_ud(ctxt);
5448 goto done;
5451 if ((ctxt->d & SrcMask) == SrcMemFAddr && ctxt->src.type != OP_MEM) {
5452 rc = emulate_ud(ctxt);
5453 goto done;
5456 emul_flags = ctxt->ops->get_hflags(ctxt);
5457 if (unlikely(ctxt->d &
5458 (No64|Undefined|Sse|Mmx|Intercept|CheckPerm|Priv|Prot|String))) {
5459 if ((ctxt->mode == X86EMUL_MODE_PROT64 && (ctxt->d & No64)) ||
5460 (ctxt->d & Undefined)) {
5461 rc = emulate_ud(ctxt);
5462 goto done;
5465 if (((ctxt->d & (Sse|Mmx)) && ((ops->get_cr(ctxt, 0) & X86_CR0_EM)))
5466 || ((ctxt->d & Sse) && !(ops->get_cr(ctxt, 4) & X86_CR4_OSFXSR))) {
5467 rc = emulate_ud(ctxt);
5468 goto done;
5471 if ((ctxt->d & (Sse|Mmx)) && (ops->get_cr(ctxt, 0) & X86_CR0_TS)) {
5472 rc = emulate_nm(ctxt);
5473 goto done;
5476 if (ctxt->d & Mmx) {
5477 rc = flush_pending_x87_faults(ctxt);
5478 if (rc != X86EMUL_CONTINUE)
5479 goto done;
5481 * Now that we know the fpu is exception safe, we can fetch
5482 * operands from it.
5484 fetch_possible_mmx_operand(ctxt, &ctxt->src);
5485 fetch_possible_mmx_operand(ctxt, &ctxt->src2);
5486 if (!(ctxt->d & Mov))
5487 fetch_possible_mmx_operand(ctxt, &ctxt->dst);
5490 if (unlikely(emul_flags & X86EMUL_GUEST_MASK) && ctxt->intercept) {
5491 rc = emulator_check_intercept(ctxt, ctxt->intercept,
5492 X86_ICPT_PRE_EXCEPT);
5493 if (rc != X86EMUL_CONTINUE)
5494 goto done;
5497 /* Instruction can only be executed in protected mode */
5498 if ((ctxt->d & Prot) && ctxt->mode < X86EMUL_MODE_PROT16) {
5499 rc = emulate_ud(ctxt);
5500 goto done;
5503 /* Privileged instruction can be executed only in CPL=0 */
5504 if ((ctxt->d & Priv) && ops->cpl(ctxt)) {
5505 if (ctxt->d & PrivUD)
5506 rc = emulate_ud(ctxt);
5507 else
5508 rc = emulate_gp(ctxt, 0);
5509 goto done;
5512 /* Do instruction specific permission checks */
5513 if (ctxt->d & CheckPerm) {
5514 rc = ctxt->check_perm(ctxt);
5515 if (rc != X86EMUL_CONTINUE)
5516 goto done;
5519 if (unlikely(emul_flags & X86EMUL_GUEST_MASK) && (ctxt->d & Intercept)) {
5520 rc = emulator_check_intercept(ctxt, ctxt->intercept,
5521 X86_ICPT_POST_EXCEPT);
5522 if (rc != X86EMUL_CONTINUE)
5523 goto done;
5526 if (ctxt->rep_prefix && (ctxt->d & String)) {
5527 /* All REP prefixes have the same first termination condition */
5528 if (address_mask(ctxt, reg_read(ctxt, VCPU_REGS_RCX)) == 0) {
5529 string_registers_quirk(ctxt);
5530 ctxt->eip = ctxt->_eip;
5531 ctxt->eflags &= ~X86_EFLAGS_RF;
5532 goto done;
5537 if ((ctxt->src.type == OP_MEM) && !(ctxt->d & NoAccess)) {
5538 rc = segmented_read(ctxt, ctxt->src.addr.mem,
5539 ctxt->src.valptr, ctxt->src.bytes);
5540 if (rc != X86EMUL_CONTINUE)
5541 goto done;
5542 ctxt->src.orig_val64 = ctxt->src.val64;
5545 if (ctxt->src2.type == OP_MEM) {
5546 rc = segmented_read(ctxt, ctxt->src2.addr.mem,
5547 &ctxt->src2.val, ctxt->src2.bytes);
5548 if (rc != X86EMUL_CONTINUE)
5549 goto done;
5552 if ((ctxt->d & DstMask) == ImplicitOps)
5553 goto special_insn;
5556 if ((ctxt->dst.type == OP_MEM) && !(ctxt->d & Mov)) {
5557 /* optimisation - avoid slow emulated read if Mov */
5558 rc = segmented_read(ctxt, ctxt->dst.addr.mem,
5559 &ctxt->dst.val, ctxt->dst.bytes);
5560 if (rc != X86EMUL_CONTINUE) {
5561 if (!(ctxt->d & NoWrite) &&
5562 rc == X86EMUL_PROPAGATE_FAULT &&
5563 ctxt->exception.vector == PF_VECTOR)
5564 ctxt->exception.error_code |= PFERR_WRITE_MASK;
5565 goto done;
5568 /* Copy full 64-bit value for CMPXCHG8B. */
5569 ctxt->dst.orig_val64 = ctxt->dst.val64;
5571 special_insn:
5573 if (unlikely(emul_flags & X86EMUL_GUEST_MASK) && (ctxt->d & Intercept)) {
5574 rc = emulator_check_intercept(ctxt, ctxt->intercept,
5575 X86_ICPT_POST_MEMACCESS);
5576 if (rc != X86EMUL_CONTINUE)
5577 goto done;
5580 if (ctxt->rep_prefix && (ctxt->d & String))
5581 ctxt->eflags |= X86_EFLAGS_RF;
5582 else
5583 ctxt->eflags &= ~X86_EFLAGS_RF;
5585 if (ctxt->execute) {
5586 if (ctxt->d & Fastop) {
5587 void (*fop)(struct fastop *) = (void *)ctxt->execute;
5588 rc = fastop(ctxt, fop);
5589 if (rc != X86EMUL_CONTINUE)
5590 goto done;
5591 goto writeback;
5593 rc = ctxt->execute(ctxt);
5594 if (rc != X86EMUL_CONTINUE)
5595 goto done;
5596 goto writeback;
5599 if (ctxt->opcode_len == 2)
5600 goto twobyte_insn;
5601 else if (ctxt->opcode_len == 3)
5602 goto threebyte_insn;
5604 switch (ctxt->b) {
5605 case 0x70 ... 0x7f: /* jcc (short) */
5606 if (test_cc(ctxt->b, ctxt->eflags))
5607 rc = jmp_rel(ctxt, ctxt->src.val);
5608 break;
5609 case 0x8d: /* lea r16/r32, m */
5610 ctxt->dst.val = ctxt->src.addr.mem.ea;
5611 break;
5612 case 0x90 ... 0x97: /* nop / xchg reg, rax */
5613 if (ctxt->dst.addr.reg == reg_rmw(ctxt, VCPU_REGS_RAX))
5614 ctxt->dst.type = OP_NONE;
5615 else
5616 rc = em_xchg(ctxt);
5617 break;
5618 case 0x98: /* cbw/cwde/cdqe */
5619 switch (ctxt->op_bytes) {
5620 case 2: ctxt->dst.val = (s8)ctxt->dst.val; break;
5621 case 4: ctxt->dst.val = (s16)ctxt->dst.val; break;
5622 case 8: ctxt->dst.val = (s32)ctxt->dst.val; break;
5624 break;
5625 case 0xcc: /* int3 */
5626 rc = emulate_int(ctxt, 3);
5627 break;
5628 case 0xcd: /* int n */
5629 rc = emulate_int(ctxt, ctxt->src.val);
5630 break;
5631 case 0xce: /* into */
5632 if (ctxt->eflags & X86_EFLAGS_OF)
5633 rc = emulate_int(ctxt, 4);
5634 break;
5635 case 0xe9: /* jmp rel */
5636 case 0xeb: /* jmp rel short */
5637 rc = jmp_rel(ctxt, ctxt->src.val);
5638 ctxt->dst.type = OP_NONE; /* Disable writeback. */
5639 break;
5640 case 0xf4: /* hlt */
5641 ctxt->ops->halt(ctxt);
5642 break;
5643 case 0xf5: /* cmc */
5644 /* complement carry flag from eflags reg */
5645 ctxt->eflags ^= X86_EFLAGS_CF;
5646 break;
5647 case 0xf8: /* clc */
5648 ctxt->eflags &= ~X86_EFLAGS_CF;
5649 break;
5650 case 0xf9: /* stc */
5651 ctxt->eflags |= X86_EFLAGS_CF;
5652 break;
5653 case 0xfc: /* cld */
5654 ctxt->eflags &= ~X86_EFLAGS_DF;
5655 break;
5656 case 0xfd: /* std */
5657 ctxt->eflags |= X86_EFLAGS_DF;
5658 break;
5659 default:
5660 goto cannot_emulate;
5663 if (rc != X86EMUL_CONTINUE)
5664 goto done;
5666 writeback:
5667 if (ctxt->d & SrcWrite) {
5668 BUG_ON(ctxt->src.type == OP_MEM || ctxt->src.type == OP_MEM_STR);
5669 rc = writeback(ctxt, &ctxt->src);
5670 if (rc != X86EMUL_CONTINUE)
5671 goto done;
5673 if (!(ctxt->d & NoWrite)) {
5674 rc = writeback(ctxt, &ctxt->dst);
5675 if (rc != X86EMUL_CONTINUE)
5676 goto done;
5680 * restore dst type in case the decoding will be reused
5681 * (happens for string instruction )
5683 ctxt->dst.type = saved_dst_type;
5685 if ((ctxt->d & SrcMask) == SrcSI)
5686 string_addr_inc(ctxt, VCPU_REGS_RSI, &ctxt->src);
5688 if ((ctxt->d & DstMask) == DstDI)
5689 string_addr_inc(ctxt, VCPU_REGS_RDI, &ctxt->dst);
5691 if (ctxt->rep_prefix && (ctxt->d & String)) {
5692 unsigned int count;
5693 struct read_cache *r = &ctxt->io_read;
5694 if ((ctxt->d & SrcMask) == SrcSI)
5695 count = ctxt->src.count;
5696 else
5697 count = ctxt->dst.count;
5698 register_address_increment(ctxt, VCPU_REGS_RCX, -count);
5700 if (!string_insn_completed(ctxt)) {
5702 * Re-enter guest when pio read ahead buffer is empty
5703 * or, if it is not used, after each 1024 iteration.
5705 if ((r->end != 0 || reg_read(ctxt, VCPU_REGS_RCX) & 0x3ff) &&
5706 (r->end == 0 || r->end != r->pos)) {
5708 * Reset read cache. Usually happens before
5709 * decode, but since instruction is restarted
5710 * we have to do it here.
5712 ctxt->mem_read.end = 0;
5713 writeback_registers(ctxt);
5714 return EMULATION_RESTART;
5716 goto done; /* skip rip writeback */
5718 ctxt->eflags &= ~X86_EFLAGS_RF;
5721 ctxt->eip = ctxt->_eip;
5723 done:
5724 if (rc == X86EMUL_PROPAGATE_FAULT) {
5725 WARN_ON(ctxt->exception.vector > 0x1f);
5726 ctxt->have_exception = true;
5728 if (rc == X86EMUL_INTERCEPTED)
5729 return EMULATION_INTERCEPTED;
5731 if (rc == X86EMUL_CONTINUE)
5732 writeback_registers(ctxt);
5734 return (rc == X86EMUL_UNHANDLEABLE) ? EMULATION_FAILED : EMULATION_OK;
5736 twobyte_insn:
5737 switch (ctxt->b) {
5738 case 0x09: /* wbinvd */
5739 (ctxt->ops->wbinvd)(ctxt);
5740 break;
5741 case 0x08: /* invd */
5742 case 0x0d: /* GrpP (prefetch) */
5743 case 0x18: /* Grp16 (prefetch/nop) */
5744 case 0x1f: /* nop */
5745 break;
5746 case 0x20: /* mov cr, reg */
5747 ctxt->dst.val = ops->get_cr(ctxt, ctxt->modrm_reg);
5748 break;
5749 case 0x21: /* mov from dr to reg */
5750 ops->get_dr(ctxt, ctxt->modrm_reg, &ctxt->dst.val);
5751 break;
5752 case 0x40 ... 0x4f: /* cmov */
5753 if (test_cc(ctxt->b, ctxt->eflags))
5754 ctxt->dst.val = ctxt->src.val;
5755 else if (ctxt->op_bytes != 4)
5756 ctxt->dst.type = OP_NONE; /* no writeback */
5757 break;
5758 case 0x80 ... 0x8f: /* jnz rel, etc*/
5759 if (test_cc(ctxt->b, ctxt->eflags))
5760 rc = jmp_rel(ctxt, ctxt->src.val);
5761 break;
5762 case 0x90 ... 0x9f: /* setcc r/m8 */
5763 ctxt->dst.val = test_cc(ctxt->b, ctxt->eflags);
5764 break;
5765 case 0xb6 ... 0xb7: /* movzx */
5766 ctxt->dst.bytes = ctxt->op_bytes;
5767 ctxt->dst.val = (ctxt->src.bytes == 1) ? (u8) ctxt->src.val
5768 : (u16) ctxt->src.val;
5769 break;
5770 case 0xbe ... 0xbf: /* movsx */
5771 ctxt->dst.bytes = ctxt->op_bytes;
5772 ctxt->dst.val = (ctxt->src.bytes == 1) ? (s8) ctxt->src.val :
5773 (s16) ctxt->src.val;
5774 break;
5775 default:
5776 goto cannot_emulate;
5779 threebyte_insn:
5781 if (rc != X86EMUL_CONTINUE)
5782 goto done;
5784 goto writeback;
5786 cannot_emulate:
5787 return EMULATION_FAILED;
5790 void emulator_invalidate_register_cache(struct x86_emulate_ctxt *ctxt)
5792 invalidate_registers(ctxt);
5795 void emulator_writeback_register_cache(struct x86_emulate_ctxt *ctxt)
5797 writeback_registers(ctxt);
5800 bool emulator_can_use_gpa(struct x86_emulate_ctxt *ctxt)
5802 if (ctxt->rep_prefix && (ctxt->d & String))
5803 return false;
5805 if (ctxt->d & TwoMemOp)
5806 return false;
5808 return true;