btrfs: account for non-CoW'd blocks in btrfs_abort_transaction
[linux/fpc-iii.git] / arch / x86 / kvm / emulate.c
blob630bcb0d7a045b4930213eac1c1bbee3ef7d0ebe
1 /******************************************************************************
2 * emulate.c
4 * Generic x86 (32-bit and 64-bit) instruction decoder and emulator.
6 * Copyright (c) 2005 Keir Fraser
8 * Linux coding style, mod r/m decoder, segment base fixes, real-mode
9 * privileged instructions:
11 * Copyright (C) 2006 Qumranet
12 * Copyright 2010 Red Hat, Inc. and/or its affiliates.
14 * Avi Kivity <avi@qumranet.com>
15 * Yaniv Kamay <yaniv@qumranet.com>
17 * This work is licensed under the terms of the GNU GPL, version 2. See
18 * the COPYING file in the top-level directory.
20 * From: xen-unstable 10676:af9809f51f81a3c43f276f00c81a52ef558afda4
23 #include <linux/kvm_host.h>
24 #include "kvm_cache_regs.h"
25 #include <linux/module.h>
26 #include <asm/kvm_emulate.h>
27 #include <linux/stringify.h>
29 #include "x86.h"
30 #include "tss.h"
33 * Operand types
35 #define OpNone 0ull
36 #define OpImplicit 1ull /* No generic decode */
37 #define OpReg 2ull /* Register */
38 #define OpMem 3ull /* Memory */
39 #define OpAcc 4ull /* Accumulator: AL/AX/EAX/RAX */
40 #define OpDI 5ull /* ES:DI/EDI/RDI */
41 #define OpMem64 6ull /* Memory, 64-bit */
42 #define OpImmUByte 7ull /* Zero-extended 8-bit immediate */
43 #define OpDX 8ull /* DX register */
44 #define OpCL 9ull /* CL register (for shifts) */
45 #define OpImmByte 10ull /* 8-bit sign extended immediate */
46 #define OpOne 11ull /* Implied 1 */
47 #define OpImm 12ull /* Sign extended up to 32-bit immediate */
48 #define OpMem16 13ull /* Memory operand (16-bit). */
49 #define OpMem32 14ull /* Memory operand (32-bit). */
50 #define OpImmU 15ull /* Immediate operand, zero extended */
51 #define OpSI 16ull /* SI/ESI/RSI */
52 #define OpImmFAddr 17ull /* Immediate far address */
53 #define OpMemFAddr 18ull /* Far address in memory */
54 #define OpImmU16 19ull /* Immediate operand, 16 bits, zero extended */
55 #define OpES 20ull /* ES */
56 #define OpCS 21ull /* CS */
57 #define OpSS 22ull /* SS */
58 #define OpDS 23ull /* DS */
59 #define OpFS 24ull /* FS */
60 #define OpGS 25ull /* GS */
61 #define OpMem8 26ull /* 8-bit zero extended memory operand */
62 #define OpImm64 27ull /* Sign extended 16/32/64-bit immediate */
63 #define OpXLat 28ull /* memory at BX/EBX/RBX + zero-extended AL */
64 #define OpAccLo 29ull /* Low part of extended acc (AX/AX/EAX/RAX) */
65 #define OpAccHi 30ull /* High part of extended acc (-/DX/EDX/RDX) */
67 #define OpBits 5 /* Width of operand field */
68 #define OpMask ((1ull << OpBits) - 1)
71 * Opcode effective-address decode tables.
72 * Note that we only emulate instructions that have at least one memory
73 * operand (excluding implicit stack references). We assume that stack
74 * references and instruction fetches will never occur in special memory
75 * areas that require emulation. So, for example, 'mov <imm>,<reg>' need
76 * not be handled.
79 /* Operand sizes: 8-bit operands or specified/overridden size. */
80 #define ByteOp (1<<0) /* 8-bit operands. */
81 /* Destination operand type. */
82 #define DstShift 1
83 #define ImplicitOps (OpImplicit << DstShift)
84 #define DstReg (OpReg << DstShift)
85 #define DstMem (OpMem << DstShift)
86 #define DstAcc (OpAcc << DstShift)
87 #define DstDI (OpDI << DstShift)
88 #define DstMem64 (OpMem64 << DstShift)
89 #define DstMem16 (OpMem16 << DstShift)
90 #define DstImmUByte (OpImmUByte << DstShift)
91 #define DstDX (OpDX << DstShift)
92 #define DstAccLo (OpAccLo << DstShift)
93 #define DstMask (OpMask << DstShift)
94 /* Source operand type. */
95 #define SrcShift 6
96 #define SrcNone (OpNone << SrcShift)
97 #define SrcReg (OpReg << SrcShift)
98 #define SrcMem (OpMem << SrcShift)
99 #define SrcMem16 (OpMem16 << SrcShift)
100 #define SrcMem32 (OpMem32 << SrcShift)
101 #define SrcImm (OpImm << SrcShift)
102 #define SrcImmByte (OpImmByte << SrcShift)
103 #define SrcOne (OpOne << SrcShift)
104 #define SrcImmUByte (OpImmUByte << SrcShift)
105 #define SrcImmU (OpImmU << SrcShift)
106 #define SrcSI (OpSI << SrcShift)
107 #define SrcXLat (OpXLat << SrcShift)
108 #define SrcImmFAddr (OpImmFAddr << SrcShift)
109 #define SrcMemFAddr (OpMemFAddr << SrcShift)
110 #define SrcAcc (OpAcc << SrcShift)
111 #define SrcImmU16 (OpImmU16 << SrcShift)
112 #define SrcImm64 (OpImm64 << SrcShift)
113 #define SrcDX (OpDX << SrcShift)
114 #define SrcMem8 (OpMem8 << SrcShift)
115 #define SrcAccHi (OpAccHi << SrcShift)
116 #define SrcMask (OpMask << SrcShift)
117 #define BitOp (1<<11)
118 #define MemAbs (1<<12) /* Memory operand is absolute displacement */
119 #define String (1<<13) /* String instruction (rep capable) */
120 #define Stack (1<<14) /* Stack instruction (push/pop) */
121 #define GroupMask (7<<15) /* Opcode uses one of the group mechanisms */
122 #define Group (1<<15) /* Bits 3:5 of modrm byte extend opcode */
123 #define GroupDual (2<<15) /* Alternate decoding of mod == 3 */
124 #define Prefix (3<<15) /* Instruction varies with 66/f2/f3 prefix */
125 #define RMExt (4<<15) /* Opcode extension in ModRM r/m if mod == 3 */
126 #define Escape (5<<15) /* Escape to coprocessor instruction */
127 #define InstrDual (6<<15) /* Alternate instruction decoding of mod == 3 */
128 #define ModeDual (7<<15) /* Different instruction for 32/64 bit */
129 #define Sse (1<<18) /* SSE Vector instruction */
130 /* Generic ModRM decode. */
131 #define ModRM (1<<19)
132 /* Destination is only written; never read. */
133 #define Mov (1<<20)
134 /* Misc flags */
135 #define Prot (1<<21) /* instruction generates #UD if not in prot-mode */
136 #define EmulateOnUD (1<<22) /* Emulate if unsupported by the host */
137 #define NoAccess (1<<23) /* Don't access memory (lea/invlpg/verr etc) */
138 #define Op3264 (1<<24) /* Operand is 64b in long mode, 32b otherwise */
139 #define Undefined (1<<25) /* No Such Instruction */
140 #define Lock (1<<26) /* lock prefix is allowed for the instruction */
141 #define Priv (1<<27) /* instruction generates #GP if current CPL != 0 */
142 #define No64 (1<<28)
143 #define PageTable (1 << 29) /* instruction used to write page table */
144 #define NotImpl (1 << 30) /* instruction is not implemented */
145 /* Source 2 operand type */
146 #define Src2Shift (31)
147 #define Src2None (OpNone << Src2Shift)
148 #define Src2Mem (OpMem << Src2Shift)
149 #define Src2CL (OpCL << Src2Shift)
150 #define Src2ImmByte (OpImmByte << Src2Shift)
151 #define Src2One (OpOne << Src2Shift)
152 #define Src2Imm (OpImm << Src2Shift)
153 #define Src2ES (OpES << Src2Shift)
154 #define Src2CS (OpCS << Src2Shift)
155 #define Src2SS (OpSS << Src2Shift)
156 #define Src2DS (OpDS << Src2Shift)
157 #define Src2FS (OpFS << Src2Shift)
158 #define Src2GS (OpGS << Src2Shift)
159 #define Src2Mask (OpMask << Src2Shift)
160 #define Mmx ((u64)1 << 40) /* MMX Vector instruction */
161 #define Aligned ((u64)1 << 41) /* Explicitly aligned (e.g. MOVDQA) */
162 #define Unaligned ((u64)1 << 42) /* Explicitly unaligned (e.g. MOVDQU) */
163 #define Avx ((u64)1 << 43) /* Advanced Vector Extensions */
164 #define Fastop ((u64)1 << 44) /* Use opcode::u.fastop */
165 #define NoWrite ((u64)1 << 45) /* No writeback */
166 #define SrcWrite ((u64)1 << 46) /* Write back src operand */
167 #define NoMod ((u64)1 << 47) /* Mod field is ignored */
168 #define Intercept ((u64)1 << 48) /* Has valid intercept field */
169 #define CheckPerm ((u64)1 << 49) /* Has valid check_perm field */
170 #define PrivUD ((u64)1 << 51) /* #UD instead of #GP on CPL > 0 */
171 #define NearBranch ((u64)1 << 52) /* Near branches */
172 #define No16 ((u64)1 << 53) /* No 16 bit operand */
173 #define IncSP ((u64)1 << 54) /* SP is incremented before ModRM calc */
175 #define DstXacc (DstAccLo | SrcAccHi | SrcWrite)
177 #define X2(x...) x, x
178 #define X3(x...) X2(x), x
179 #define X4(x...) X2(x), X2(x)
180 #define X5(x...) X4(x), x
181 #define X6(x...) X4(x), X2(x)
182 #define X7(x...) X4(x), X3(x)
183 #define X8(x...) X4(x), X4(x)
184 #define X16(x...) X8(x), X8(x)
186 #define NR_FASTOP (ilog2(sizeof(ulong)) + 1)
187 #define FASTOP_SIZE 8
190 * fastop functions have a special calling convention:
192 * dst: rax (in/out)
193 * src: rdx (in/out)
194 * src2: rcx (in)
195 * flags: rflags (in/out)
196 * ex: rsi (in:fastop pointer, out:zero if exception)
198 * Moreover, they are all exactly FASTOP_SIZE bytes long, so functions for
199 * different operand sizes can be reached by calculation, rather than a jump
200 * table (which would be bigger than the code).
202 * fastop functions are declared as taking a never-defined fastop parameter,
203 * so they can't be called from C directly.
206 struct fastop;
208 struct opcode {
209 u64 flags : 56;
210 u64 intercept : 8;
211 union {
212 int (*execute)(struct x86_emulate_ctxt *ctxt);
213 const struct opcode *group;
214 const struct group_dual *gdual;
215 const struct gprefix *gprefix;
216 const struct escape *esc;
217 const struct instr_dual *idual;
218 const struct mode_dual *mdual;
219 void (*fastop)(struct fastop *fake);
220 } u;
221 int (*check_perm)(struct x86_emulate_ctxt *ctxt);
224 struct group_dual {
225 struct opcode mod012[8];
226 struct opcode mod3[8];
229 struct gprefix {
230 struct opcode pfx_no;
231 struct opcode pfx_66;
232 struct opcode pfx_f2;
233 struct opcode pfx_f3;
236 struct escape {
237 struct opcode op[8];
238 struct opcode high[64];
241 struct instr_dual {
242 struct opcode mod012;
243 struct opcode mod3;
246 struct mode_dual {
247 struct opcode mode32;
248 struct opcode mode64;
251 #define EFLG_RESERVED_ZEROS_MASK 0xffc0802a
253 enum x86_transfer_type {
254 X86_TRANSFER_NONE,
255 X86_TRANSFER_CALL_JMP,
256 X86_TRANSFER_RET,
257 X86_TRANSFER_TASK_SWITCH,
260 static ulong reg_read(struct x86_emulate_ctxt *ctxt, unsigned nr)
262 if (!(ctxt->regs_valid & (1 << nr))) {
263 ctxt->regs_valid |= 1 << nr;
264 ctxt->_regs[nr] = ctxt->ops->read_gpr(ctxt, nr);
266 return ctxt->_regs[nr];
269 static ulong *reg_write(struct x86_emulate_ctxt *ctxt, unsigned nr)
271 ctxt->regs_valid |= 1 << nr;
272 ctxt->regs_dirty |= 1 << nr;
273 return &ctxt->_regs[nr];
276 static ulong *reg_rmw(struct x86_emulate_ctxt *ctxt, unsigned nr)
278 reg_read(ctxt, nr);
279 return reg_write(ctxt, nr);
282 static void writeback_registers(struct x86_emulate_ctxt *ctxt)
284 unsigned reg;
286 for_each_set_bit(reg, (ulong *)&ctxt->regs_dirty, 16)
287 ctxt->ops->write_gpr(ctxt, reg, ctxt->_regs[reg]);
290 static void invalidate_registers(struct x86_emulate_ctxt *ctxt)
292 ctxt->regs_dirty = 0;
293 ctxt->regs_valid = 0;
297 * These EFLAGS bits are restored from saved value during emulation, and
298 * any changes are written back to the saved value after emulation.
300 #define EFLAGS_MASK (X86_EFLAGS_OF|X86_EFLAGS_SF|X86_EFLAGS_ZF|X86_EFLAGS_AF|\
301 X86_EFLAGS_PF|X86_EFLAGS_CF)
303 #ifdef CONFIG_X86_64
304 #define ON64(x) x
305 #else
306 #define ON64(x)
307 #endif
309 static int fastop(struct x86_emulate_ctxt *ctxt, void (*fop)(struct fastop *));
311 #define FOP_ALIGN ".align " __stringify(FASTOP_SIZE) " \n\t"
312 #define FOP_RET "ret \n\t"
314 #define FOP_START(op) \
315 extern void em_##op(struct fastop *fake); \
316 asm(".pushsection .text, \"ax\" \n\t" \
317 ".global em_" #op " \n\t" \
318 FOP_ALIGN \
319 "em_" #op ": \n\t"
321 #define FOP_END \
322 ".popsection")
324 #define FOPNOP() FOP_ALIGN FOP_RET
326 #define FOP1E(op, dst) \
327 FOP_ALIGN "10: " #op " %" #dst " \n\t" FOP_RET
329 #define FOP1EEX(op, dst) \
330 FOP1E(op, dst) _ASM_EXTABLE(10b, kvm_fastop_exception)
332 #define FASTOP1(op) \
333 FOP_START(op) \
334 FOP1E(op##b, al) \
335 FOP1E(op##w, ax) \
336 FOP1E(op##l, eax) \
337 ON64(FOP1E(op##q, rax)) \
338 FOP_END
340 /* 1-operand, using src2 (for MUL/DIV r/m) */
341 #define FASTOP1SRC2(op, name) \
342 FOP_START(name) \
343 FOP1E(op, cl) \
344 FOP1E(op, cx) \
345 FOP1E(op, ecx) \
346 ON64(FOP1E(op, rcx)) \
347 FOP_END
349 /* 1-operand, using src2 (for MUL/DIV r/m), with exceptions */
350 #define FASTOP1SRC2EX(op, name) \
351 FOP_START(name) \
352 FOP1EEX(op, cl) \
353 FOP1EEX(op, cx) \
354 FOP1EEX(op, ecx) \
355 ON64(FOP1EEX(op, rcx)) \
356 FOP_END
358 #define FOP2E(op, dst, src) \
359 FOP_ALIGN #op " %" #src ", %" #dst " \n\t" FOP_RET
361 #define FASTOP2(op) \
362 FOP_START(op) \
363 FOP2E(op##b, al, dl) \
364 FOP2E(op##w, ax, dx) \
365 FOP2E(op##l, eax, edx) \
366 ON64(FOP2E(op##q, rax, rdx)) \
367 FOP_END
369 /* 2 operand, word only */
370 #define FASTOP2W(op) \
371 FOP_START(op) \
372 FOPNOP() \
373 FOP2E(op##w, ax, dx) \
374 FOP2E(op##l, eax, edx) \
375 ON64(FOP2E(op##q, rax, rdx)) \
376 FOP_END
378 /* 2 operand, src is CL */
379 #define FASTOP2CL(op) \
380 FOP_START(op) \
381 FOP2E(op##b, al, cl) \
382 FOP2E(op##w, ax, cl) \
383 FOP2E(op##l, eax, cl) \
384 ON64(FOP2E(op##q, rax, cl)) \
385 FOP_END
387 /* 2 operand, src and dest are reversed */
388 #define FASTOP2R(op, name) \
389 FOP_START(name) \
390 FOP2E(op##b, dl, al) \
391 FOP2E(op##w, dx, ax) \
392 FOP2E(op##l, edx, eax) \
393 ON64(FOP2E(op##q, rdx, rax)) \
394 FOP_END
396 #define FOP3E(op, dst, src, src2) \
397 FOP_ALIGN #op " %" #src2 ", %" #src ", %" #dst " \n\t" FOP_RET
399 /* 3-operand, word-only, src2=cl */
400 #define FASTOP3WCL(op) \
401 FOP_START(op) \
402 FOPNOP() \
403 FOP3E(op##w, ax, dx, cl) \
404 FOP3E(op##l, eax, edx, cl) \
405 ON64(FOP3E(op##q, rax, rdx, cl)) \
406 FOP_END
408 /* Special case for SETcc - 1 instruction per cc */
409 #define FOP_SETCC(op) ".align 4; " #op " %al; ret \n\t"
411 asm(".global kvm_fastop_exception \n"
412 "kvm_fastop_exception: xor %esi, %esi; ret");
414 FOP_START(setcc)
415 FOP_SETCC(seto)
416 FOP_SETCC(setno)
417 FOP_SETCC(setc)
418 FOP_SETCC(setnc)
419 FOP_SETCC(setz)
420 FOP_SETCC(setnz)
421 FOP_SETCC(setbe)
422 FOP_SETCC(setnbe)
423 FOP_SETCC(sets)
424 FOP_SETCC(setns)
425 FOP_SETCC(setp)
426 FOP_SETCC(setnp)
427 FOP_SETCC(setl)
428 FOP_SETCC(setnl)
429 FOP_SETCC(setle)
430 FOP_SETCC(setnle)
431 FOP_END;
433 FOP_START(salc) "pushf; sbb %al, %al; popf \n\t" FOP_RET
434 FOP_END;
436 static int emulator_check_intercept(struct x86_emulate_ctxt *ctxt,
437 enum x86_intercept intercept,
438 enum x86_intercept_stage stage)
440 struct x86_instruction_info info = {
441 .intercept = intercept,
442 .rep_prefix = ctxt->rep_prefix,
443 .modrm_mod = ctxt->modrm_mod,
444 .modrm_reg = ctxt->modrm_reg,
445 .modrm_rm = ctxt->modrm_rm,
446 .src_val = ctxt->src.val64,
447 .dst_val = ctxt->dst.val64,
448 .src_bytes = ctxt->src.bytes,
449 .dst_bytes = ctxt->dst.bytes,
450 .ad_bytes = ctxt->ad_bytes,
451 .next_rip = ctxt->eip,
454 return ctxt->ops->intercept(ctxt, &info, stage);
457 static void assign_masked(ulong *dest, ulong src, ulong mask)
459 *dest = (*dest & ~mask) | (src & mask);
462 static void assign_register(unsigned long *reg, u64 val, int bytes)
464 /* The 4-byte case *is* correct: in 64-bit mode we zero-extend. */
465 switch (bytes) {
466 case 1:
467 *(u8 *)reg = (u8)val;
468 break;
469 case 2:
470 *(u16 *)reg = (u16)val;
471 break;
472 case 4:
473 *reg = (u32)val;
474 break; /* 64b: zero-extend */
475 case 8:
476 *reg = val;
477 break;
481 static inline unsigned long ad_mask(struct x86_emulate_ctxt *ctxt)
483 return (1UL << (ctxt->ad_bytes << 3)) - 1;
486 static ulong stack_mask(struct x86_emulate_ctxt *ctxt)
488 u16 sel;
489 struct desc_struct ss;
491 if (ctxt->mode == X86EMUL_MODE_PROT64)
492 return ~0UL;
493 ctxt->ops->get_segment(ctxt, &sel, &ss, NULL, VCPU_SREG_SS);
494 return ~0U >> ((ss.d ^ 1) * 16); /* d=0: 0xffff; d=1: 0xffffffff */
497 static int stack_size(struct x86_emulate_ctxt *ctxt)
499 return (__fls(stack_mask(ctxt)) + 1) >> 3;
502 /* Access/update address held in a register, based on addressing mode. */
503 static inline unsigned long
504 address_mask(struct x86_emulate_ctxt *ctxt, unsigned long reg)
506 if (ctxt->ad_bytes == sizeof(unsigned long))
507 return reg;
508 else
509 return reg & ad_mask(ctxt);
512 static inline unsigned long
513 register_address(struct x86_emulate_ctxt *ctxt, int reg)
515 return address_mask(ctxt, reg_read(ctxt, reg));
518 static void masked_increment(ulong *reg, ulong mask, int inc)
520 assign_masked(reg, *reg + inc, mask);
523 static inline void
524 register_address_increment(struct x86_emulate_ctxt *ctxt, int reg, int inc)
526 ulong mask;
528 if (ctxt->ad_bytes == sizeof(unsigned long))
529 mask = ~0UL;
530 else
531 mask = ad_mask(ctxt);
532 masked_increment(reg_rmw(ctxt, reg), mask, inc);
535 static void rsp_increment(struct x86_emulate_ctxt *ctxt, int inc)
537 masked_increment(reg_rmw(ctxt, VCPU_REGS_RSP), stack_mask(ctxt), inc);
540 static u32 desc_limit_scaled(struct desc_struct *desc)
542 u32 limit = get_desc_limit(desc);
544 return desc->g ? (limit << 12) | 0xfff : limit;
547 static unsigned long seg_base(struct x86_emulate_ctxt *ctxt, int seg)
549 if (ctxt->mode == X86EMUL_MODE_PROT64 && seg < VCPU_SREG_FS)
550 return 0;
552 return ctxt->ops->get_cached_segment_base(ctxt, seg);
555 static int emulate_exception(struct x86_emulate_ctxt *ctxt, int vec,
556 u32 error, bool valid)
558 WARN_ON(vec > 0x1f);
559 ctxt->exception.vector = vec;
560 ctxt->exception.error_code = error;
561 ctxt->exception.error_code_valid = valid;
562 return X86EMUL_PROPAGATE_FAULT;
565 static int emulate_db(struct x86_emulate_ctxt *ctxt)
567 return emulate_exception(ctxt, DB_VECTOR, 0, false);
570 static int emulate_gp(struct x86_emulate_ctxt *ctxt, int err)
572 return emulate_exception(ctxt, GP_VECTOR, err, true);
575 static int emulate_ss(struct x86_emulate_ctxt *ctxt, int err)
577 return emulate_exception(ctxt, SS_VECTOR, err, true);
580 static int emulate_ud(struct x86_emulate_ctxt *ctxt)
582 return emulate_exception(ctxt, UD_VECTOR, 0, false);
585 static int emulate_ts(struct x86_emulate_ctxt *ctxt, int err)
587 return emulate_exception(ctxt, TS_VECTOR, err, true);
590 static int emulate_de(struct x86_emulate_ctxt *ctxt)
592 return emulate_exception(ctxt, DE_VECTOR, 0, false);
595 static int emulate_nm(struct x86_emulate_ctxt *ctxt)
597 return emulate_exception(ctxt, NM_VECTOR, 0, false);
600 static u16 get_segment_selector(struct x86_emulate_ctxt *ctxt, unsigned seg)
602 u16 selector;
603 struct desc_struct desc;
605 ctxt->ops->get_segment(ctxt, &selector, &desc, NULL, seg);
606 return selector;
609 static void set_segment_selector(struct x86_emulate_ctxt *ctxt, u16 selector,
610 unsigned seg)
612 u16 dummy;
613 u32 base3;
614 struct desc_struct desc;
616 ctxt->ops->get_segment(ctxt, &dummy, &desc, &base3, seg);
617 ctxt->ops->set_segment(ctxt, selector, &desc, base3, seg);
621 * x86 defines three classes of vector instructions: explicitly
622 * aligned, explicitly unaligned, and the rest, which change behaviour
623 * depending on whether they're AVX encoded or not.
625 * Also included is CMPXCHG16B which is not a vector instruction, yet it is
626 * subject to the same check.
628 static bool insn_aligned(struct x86_emulate_ctxt *ctxt, unsigned size)
630 if (likely(size < 16))
631 return false;
633 if (ctxt->d & Aligned)
634 return true;
635 else if (ctxt->d & Unaligned)
636 return false;
637 else if (ctxt->d & Avx)
638 return false;
639 else
640 return true;
643 static __always_inline int __linearize(struct x86_emulate_ctxt *ctxt,
644 struct segmented_address addr,
645 unsigned *max_size, unsigned size,
646 bool write, bool fetch,
647 enum x86emul_mode mode, ulong *linear)
649 struct desc_struct desc;
650 bool usable;
651 ulong la;
652 u32 lim;
653 u16 sel;
655 la = seg_base(ctxt, addr.seg) + addr.ea;
656 *max_size = 0;
657 switch (mode) {
658 case X86EMUL_MODE_PROT64:
659 if (is_noncanonical_address(la))
660 goto bad;
662 *max_size = min_t(u64, ~0u, (1ull << 48) - la);
663 if (size > *max_size)
664 goto bad;
665 break;
666 default:
667 usable = ctxt->ops->get_segment(ctxt, &sel, &desc, NULL,
668 addr.seg);
669 if (!usable)
670 goto bad;
671 /* code segment in protected mode or read-only data segment */
672 if ((((ctxt->mode != X86EMUL_MODE_REAL) && (desc.type & 8))
673 || !(desc.type & 2)) && write)
674 goto bad;
675 /* unreadable code segment */
676 if (!fetch && (desc.type & 8) && !(desc.type & 2))
677 goto bad;
678 lim = desc_limit_scaled(&desc);
679 if (!(desc.type & 8) && (desc.type & 4)) {
680 /* expand-down segment */
681 if (addr.ea <= lim)
682 goto bad;
683 lim = desc.d ? 0xffffffff : 0xffff;
685 if (addr.ea > lim)
686 goto bad;
687 if (lim == 0xffffffff)
688 *max_size = ~0u;
689 else {
690 *max_size = (u64)lim + 1 - addr.ea;
691 if (size > *max_size)
692 goto bad;
694 la &= (u32)-1;
695 break;
697 if (insn_aligned(ctxt, size) && ((la & (size - 1)) != 0))
698 return emulate_gp(ctxt, 0);
699 *linear = la;
700 return X86EMUL_CONTINUE;
701 bad:
702 if (addr.seg == VCPU_SREG_SS)
703 return emulate_ss(ctxt, 0);
704 else
705 return emulate_gp(ctxt, 0);
708 static int linearize(struct x86_emulate_ctxt *ctxt,
709 struct segmented_address addr,
710 unsigned size, bool write,
711 ulong *linear)
713 unsigned max_size;
714 return __linearize(ctxt, addr, &max_size, size, write, false,
715 ctxt->mode, linear);
718 static inline int assign_eip(struct x86_emulate_ctxt *ctxt, ulong dst,
719 enum x86emul_mode mode)
721 ulong linear;
722 int rc;
723 unsigned max_size;
724 struct segmented_address addr = { .seg = VCPU_SREG_CS,
725 .ea = dst };
727 if (ctxt->op_bytes != sizeof(unsigned long))
728 addr.ea = dst & ((1UL << (ctxt->op_bytes << 3)) - 1);
729 rc = __linearize(ctxt, addr, &max_size, 1, false, true, mode, &linear);
730 if (rc == X86EMUL_CONTINUE)
731 ctxt->_eip = addr.ea;
732 return rc;
735 static inline int assign_eip_near(struct x86_emulate_ctxt *ctxt, ulong dst)
737 return assign_eip(ctxt, dst, ctxt->mode);
740 static int assign_eip_far(struct x86_emulate_ctxt *ctxt, ulong dst,
741 const struct desc_struct *cs_desc)
743 enum x86emul_mode mode = ctxt->mode;
744 int rc;
746 #ifdef CONFIG_X86_64
747 if (ctxt->mode >= X86EMUL_MODE_PROT16) {
748 if (cs_desc->l) {
749 u64 efer = 0;
751 ctxt->ops->get_msr(ctxt, MSR_EFER, &efer);
752 if (efer & EFER_LMA)
753 mode = X86EMUL_MODE_PROT64;
754 } else
755 mode = X86EMUL_MODE_PROT32; /* temporary value */
757 #endif
758 if (mode == X86EMUL_MODE_PROT16 || mode == X86EMUL_MODE_PROT32)
759 mode = cs_desc->d ? X86EMUL_MODE_PROT32 : X86EMUL_MODE_PROT16;
760 rc = assign_eip(ctxt, dst, mode);
761 if (rc == X86EMUL_CONTINUE)
762 ctxt->mode = mode;
763 return rc;
766 static inline int jmp_rel(struct x86_emulate_ctxt *ctxt, int rel)
768 return assign_eip_near(ctxt, ctxt->_eip + rel);
771 static int segmented_read_std(struct x86_emulate_ctxt *ctxt,
772 struct segmented_address addr,
773 void *data,
774 unsigned size)
776 int rc;
777 ulong linear;
779 rc = linearize(ctxt, addr, size, false, &linear);
780 if (rc != X86EMUL_CONTINUE)
781 return rc;
782 return ctxt->ops->read_std(ctxt, linear, data, size, &ctxt->exception);
786 * Prefetch the remaining bytes of the instruction without crossing page
787 * boundary if they are not in fetch_cache yet.
789 static int __do_insn_fetch_bytes(struct x86_emulate_ctxt *ctxt, int op_size)
791 int rc;
792 unsigned size, max_size;
793 unsigned long linear;
794 int cur_size = ctxt->fetch.end - ctxt->fetch.data;
795 struct segmented_address addr = { .seg = VCPU_SREG_CS,
796 .ea = ctxt->eip + cur_size };
799 * We do not know exactly how many bytes will be needed, and
800 * __linearize is expensive, so fetch as much as possible. We
801 * just have to avoid going beyond the 15 byte limit, the end
802 * of the segment, or the end of the page.
804 * __linearize is called with size 0 so that it does not do any
805 * boundary check itself. Instead, we use max_size to check
806 * against op_size.
808 rc = __linearize(ctxt, addr, &max_size, 0, false, true, ctxt->mode,
809 &linear);
810 if (unlikely(rc != X86EMUL_CONTINUE))
811 return rc;
813 size = min_t(unsigned, 15UL ^ cur_size, max_size);
814 size = min_t(unsigned, size, PAGE_SIZE - offset_in_page(linear));
817 * One instruction can only straddle two pages,
818 * and one has been loaded at the beginning of
819 * x86_decode_insn. So, if not enough bytes
820 * still, we must have hit the 15-byte boundary.
822 if (unlikely(size < op_size))
823 return emulate_gp(ctxt, 0);
825 rc = ctxt->ops->fetch(ctxt, linear, ctxt->fetch.end,
826 size, &ctxt->exception);
827 if (unlikely(rc != X86EMUL_CONTINUE))
828 return rc;
829 ctxt->fetch.end += size;
830 return X86EMUL_CONTINUE;
833 static __always_inline int do_insn_fetch_bytes(struct x86_emulate_ctxt *ctxt,
834 unsigned size)
836 unsigned done_size = ctxt->fetch.end - ctxt->fetch.ptr;
838 if (unlikely(done_size < size))
839 return __do_insn_fetch_bytes(ctxt, size - done_size);
840 else
841 return X86EMUL_CONTINUE;
844 /* Fetch next part of the instruction being emulated. */
845 #define insn_fetch(_type, _ctxt) \
846 ({ _type _x; \
848 rc = do_insn_fetch_bytes(_ctxt, sizeof(_type)); \
849 if (rc != X86EMUL_CONTINUE) \
850 goto done; \
851 ctxt->_eip += sizeof(_type); \
852 _x = *(_type __aligned(1) *) ctxt->fetch.ptr; \
853 ctxt->fetch.ptr += sizeof(_type); \
854 _x; \
857 #define insn_fetch_arr(_arr, _size, _ctxt) \
858 ({ \
859 rc = do_insn_fetch_bytes(_ctxt, _size); \
860 if (rc != X86EMUL_CONTINUE) \
861 goto done; \
862 ctxt->_eip += (_size); \
863 memcpy(_arr, ctxt->fetch.ptr, _size); \
864 ctxt->fetch.ptr += (_size); \
868 * Given the 'reg' portion of a ModRM byte, and a register block, return a
869 * pointer into the block that addresses the relevant register.
870 * @highbyte_regs specifies whether to decode AH,CH,DH,BH.
872 static void *decode_register(struct x86_emulate_ctxt *ctxt, u8 modrm_reg,
873 int byteop)
875 void *p;
876 int highbyte_regs = (ctxt->rex_prefix == 0) && byteop;
878 if (highbyte_regs && modrm_reg >= 4 && modrm_reg < 8)
879 p = (unsigned char *)reg_rmw(ctxt, modrm_reg & 3) + 1;
880 else
881 p = reg_rmw(ctxt, modrm_reg);
882 return p;
885 static int read_descriptor(struct x86_emulate_ctxt *ctxt,
886 struct segmented_address addr,
887 u16 *size, unsigned long *address, int op_bytes)
889 int rc;
891 if (op_bytes == 2)
892 op_bytes = 3;
893 *address = 0;
894 rc = segmented_read_std(ctxt, addr, size, 2);
895 if (rc != X86EMUL_CONTINUE)
896 return rc;
897 addr.ea += 2;
898 rc = segmented_read_std(ctxt, addr, address, op_bytes);
899 return rc;
902 FASTOP2(add);
903 FASTOP2(or);
904 FASTOP2(adc);
905 FASTOP2(sbb);
906 FASTOP2(and);
907 FASTOP2(sub);
908 FASTOP2(xor);
909 FASTOP2(cmp);
910 FASTOP2(test);
912 FASTOP1SRC2(mul, mul_ex);
913 FASTOP1SRC2(imul, imul_ex);
914 FASTOP1SRC2EX(div, div_ex);
915 FASTOP1SRC2EX(idiv, idiv_ex);
917 FASTOP3WCL(shld);
918 FASTOP3WCL(shrd);
920 FASTOP2W(imul);
922 FASTOP1(not);
923 FASTOP1(neg);
924 FASTOP1(inc);
925 FASTOP1(dec);
927 FASTOP2CL(rol);
928 FASTOP2CL(ror);
929 FASTOP2CL(rcl);
930 FASTOP2CL(rcr);
931 FASTOP2CL(shl);
932 FASTOP2CL(shr);
933 FASTOP2CL(sar);
935 FASTOP2W(bsf);
936 FASTOP2W(bsr);
937 FASTOP2W(bt);
938 FASTOP2W(bts);
939 FASTOP2W(btr);
940 FASTOP2W(btc);
942 FASTOP2(xadd);
944 FASTOP2R(cmp, cmp_r);
946 static int em_bsf_c(struct x86_emulate_ctxt *ctxt)
948 /* If src is zero, do not writeback, but update flags */
949 if (ctxt->src.val == 0)
950 ctxt->dst.type = OP_NONE;
951 return fastop(ctxt, em_bsf);
954 static int em_bsr_c(struct x86_emulate_ctxt *ctxt)
956 /* If src is zero, do not writeback, but update flags */
957 if (ctxt->src.val == 0)
958 ctxt->dst.type = OP_NONE;
959 return fastop(ctxt, em_bsr);
962 static u8 test_cc(unsigned int condition, unsigned long flags)
964 u8 rc;
965 void (*fop)(void) = (void *)em_setcc + 4 * (condition & 0xf);
967 flags = (flags & EFLAGS_MASK) | X86_EFLAGS_IF;
968 asm("push %[flags]; popf; call *%[fastop]"
969 : "=a"(rc) : [fastop]"r"(fop), [flags]"r"(flags));
970 return rc;
973 static void fetch_register_operand(struct operand *op)
975 switch (op->bytes) {
976 case 1:
977 op->val = *(u8 *)op->addr.reg;
978 break;
979 case 2:
980 op->val = *(u16 *)op->addr.reg;
981 break;
982 case 4:
983 op->val = *(u32 *)op->addr.reg;
984 break;
985 case 8:
986 op->val = *(u64 *)op->addr.reg;
987 break;
991 static void read_sse_reg(struct x86_emulate_ctxt *ctxt, sse128_t *data, int reg)
993 ctxt->ops->get_fpu(ctxt);
994 switch (reg) {
995 case 0: asm("movdqa %%xmm0, %0" : "=m"(*data)); break;
996 case 1: asm("movdqa %%xmm1, %0" : "=m"(*data)); break;
997 case 2: asm("movdqa %%xmm2, %0" : "=m"(*data)); break;
998 case 3: asm("movdqa %%xmm3, %0" : "=m"(*data)); break;
999 case 4: asm("movdqa %%xmm4, %0" : "=m"(*data)); break;
1000 case 5: asm("movdqa %%xmm5, %0" : "=m"(*data)); break;
1001 case 6: asm("movdqa %%xmm6, %0" : "=m"(*data)); break;
1002 case 7: asm("movdqa %%xmm7, %0" : "=m"(*data)); break;
1003 #ifdef CONFIG_X86_64
1004 case 8: asm("movdqa %%xmm8, %0" : "=m"(*data)); break;
1005 case 9: asm("movdqa %%xmm9, %0" : "=m"(*data)); break;
1006 case 10: asm("movdqa %%xmm10, %0" : "=m"(*data)); break;
1007 case 11: asm("movdqa %%xmm11, %0" : "=m"(*data)); break;
1008 case 12: asm("movdqa %%xmm12, %0" : "=m"(*data)); break;
1009 case 13: asm("movdqa %%xmm13, %0" : "=m"(*data)); break;
1010 case 14: asm("movdqa %%xmm14, %0" : "=m"(*data)); break;
1011 case 15: asm("movdqa %%xmm15, %0" : "=m"(*data)); break;
1012 #endif
1013 default: BUG();
1015 ctxt->ops->put_fpu(ctxt);
1018 static void write_sse_reg(struct x86_emulate_ctxt *ctxt, sse128_t *data,
1019 int reg)
1021 ctxt->ops->get_fpu(ctxt);
1022 switch (reg) {
1023 case 0: asm("movdqa %0, %%xmm0" : : "m"(*data)); break;
1024 case 1: asm("movdqa %0, %%xmm1" : : "m"(*data)); break;
1025 case 2: asm("movdqa %0, %%xmm2" : : "m"(*data)); break;
1026 case 3: asm("movdqa %0, %%xmm3" : : "m"(*data)); break;
1027 case 4: asm("movdqa %0, %%xmm4" : : "m"(*data)); break;
1028 case 5: asm("movdqa %0, %%xmm5" : : "m"(*data)); break;
1029 case 6: asm("movdqa %0, %%xmm6" : : "m"(*data)); break;
1030 case 7: asm("movdqa %0, %%xmm7" : : "m"(*data)); break;
1031 #ifdef CONFIG_X86_64
1032 case 8: asm("movdqa %0, %%xmm8" : : "m"(*data)); break;
1033 case 9: asm("movdqa %0, %%xmm9" : : "m"(*data)); break;
1034 case 10: asm("movdqa %0, %%xmm10" : : "m"(*data)); break;
1035 case 11: asm("movdqa %0, %%xmm11" : : "m"(*data)); break;
1036 case 12: asm("movdqa %0, %%xmm12" : : "m"(*data)); break;
1037 case 13: asm("movdqa %0, %%xmm13" : : "m"(*data)); break;
1038 case 14: asm("movdqa %0, %%xmm14" : : "m"(*data)); break;
1039 case 15: asm("movdqa %0, %%xmm15" : : "m"(*data)); break;
1040 #endif
1041 default: BUG();
1043 ctxt->ops->put_fpu(ctxt);
1046 static void read_mmx_reg(struct x86_emulate_ctxt *ctxt, u64 *data, int reg)
1048 ctxt->ops->get_fpu(ctxt);
1049 switch (reg) {
1050 case 0: asm("movq %%mm0, %0" : "=m"(*data)); break;
1051 case 1: asm("movq %%mm1, %0" : "=m"(*data)); break;
1052 case 2: asm("movq %%mm2, %0" : "=m"(*data)); break;
1053 case 3: asm("movq %%mm3, %0" : "=m"(*data)); break;
1054 case 4: asm("movq %%mm4, %0" : "=m"(*data)); break;
1055 case 5: asm("movq %%mm5, %0" : "=m"(*data)); break;
1056 case 6: asm("movq %%mm6, %0" : "=m"(*data)); break;
1057 case 7: asm("movq %%mm7, %0" : "=m"(*data)); break;
1058 default: BUG();
1060 ctxt->ops->put_fpu(ctxt);
1063 static void write_mmx_reg(struct x86_emulate_ctxt *ctxt, u64 *data, int reg)
1065 ctxt->ops->get_fpu(ctxt);
1066 switch (reg) {
1067 case 0: asm("movq %0, %%mm0" : : "m"(*data)); break;
1068 case 1: asm("movq %0, %%mm1" : : "m"(*data)); break;
1069 case 2: asm("movq %0, %%mm2" : : "m"(*data)); break;
1070 case 3: asm("movq %0, %%mm3" : : "m"(*data)); break;
1071 case 4: asm("movq %0, %%mm4" : : "m"(*data)); break;
1072 case 5: asm("movq %0, %%mm5" : : "m"(*data)); break;
1073 case 6: asm("movq %0, %%mm6" : : "m"(*data)); break;
1074 case 7: asm("movq %0, %%mm7" : : "m"(*data)); break;
1075 default: BUG();
1077 ctxt->ops->put_fpu(ctxt);
1080 static int em_fninit(struct x86_emulate_ctxt *ctxt)
1082 if (ctxt->ops->get_cr(ctxt, 0) & (X86_CR0_TS | X86_CR0_EM))
1083 return emulate_nm(ctxt);
1085 ctxt->ops->get_fpu(ctxt);
1086 asm volatile("fninit");
1087 ctxt->ops->put_fpu(ctxt);
1088 return X86EMUL_CONTINUE;
1091 static int em_fnstcw(struct x86_emulate_ctxt *ctxt)
1093 u16 fcw;
1095 if (ctxt->ops->get_cr(ctxt, 0) & (X86_CR0_TS | X86_CR0_EM))
1096 return emulate_nm(ctxt);
1098 ctxt->ops->get_fpu(ctxt);
1099 asm volatile("fnstcw %0": "+m"(fcw));
1100 ctxt->ops->put_fpu(ctxt);
1102 ctxt->dst.val = fcw;
1104 return X86EMUL_CONTINUE;
1107 static int em_fnstsw(struct x86_emulate_ctxt *ctxt)
1109 u16 fsw;
1111 if (ctxt->ops->get_cr(ctxt, 0) & (X86_CR0_TS | X86_CR0_EM))
1112 return emulate_nm(ctxt);
1114 ctxt->ops->get_fpu(ctxt);
1115 asm volatile("fnstsw %0": "+m"(fsw));
1116 ctxt->ops->put_fpu(ctxt);
1118 ctxt->dst.val = fsw;
1120 return X86EMUL_CONTINUE;
1123 static void decode_register_operand(struct x86_emulate_ctxt *ctxt,
1124 struct operand *op)
1126 unsigned reg = ctxt->modrm_reg;
1128 if (!(ctxt->d & ModRM))
1129 reg = (ctxt->b & 7) | ((ctxt->rex_prefix & 1) << 3);
1131 if (ctxt->d & Sse) {
1132 op->type = OP_XMM;
1133 op->bytes = 16;
1134 op->addr.xmm = reg;
1135 read_sse_reg(ctxt, &op->vec_val, reg);
1136 return;
1138 if (ctxt->d & Mmx) {
1139 reg &= 7;
1140 op->type = OP_MM;
1141 op->bytes = 8;
1142 op->addr.mm = reg;
1143 return;
1146 op->type = OP_REG;
1147 op->bytes = (ctxt->d & ByteOp) ? 1 : ctxt->op_bytes;
1148 op->addr.reg = decode_register(ctxt, reg, ctxt->d & ByteOp);
1150 fetch_register_operand(op);
1151 op->orig_val = op->val;
1154 static void adjust_modrm_seg(struct x86_emulate_ctxt *ctxt, int base_reg)
1156 if (base_reg == VCPU_REGS_RSP || base_reg == VCPU_REGS_RBP)
1157 ctxt->modrm_seg = VCPU_SREG_SS;
1160 static int decode_modrm(struct x86_emulate_ctxt *ctxt,
1161 struct operand *op)
1163 u8 sib;
1164 int index_reg, base_reg, scale;
1165 int rc = X86EMUL_CONTINUE;
1166 ulong modrm_ea = 0;
1168 ctxt->modrm_reg = ((ctxt->rex_prefix << 1) & 8); /* REX.R */
1169 index_reg = (ctxt->rex_prefix << 2) & 8; /* REX.X */
1170 base_reg = (ctxt->rex_prefix << 3) & 8; /* REX.B */
1172 ctxt->modrm_mod = (ctxt->modrm & 0xc0) >> 6;
1173 ctxt->modrm_reg |= (ctxt->modrm & 0x38) >> 3;
1174 ctxt->modrm_rm = base_reg | (ctxt->modrm & 0x07);
1175 ctxt->modrm_seg = VCPU_SREG_DS;
1177 if (ctxt->modrm_mod == 3 || (ctxt->d & NoMod)) {
1178 op->type = OP_REG;
1179 op->bytes = (ctxt->d & ByteOp) ? 1 : ctxt->op_bytes;
1180 op->addr.reg = decode_register(ctxt, ctxt->modrm_rm,
1181 ctxt->d & ByteOp);
1182 if (ctxt->d & Sse) {
1183 op->type = OP_XMM;
1184 op->bytes = 16;
1185 op->addr.xmm = ctxt->modrm_rm;
1186 read_sse_reg(ctxt, &op->vec_val, ctxt->modrm_rm);
1187 return rc;
1189 if (ctxt->d & Mmx) {
1190 op->type = OP_MM;
1191 op->bytes = 8;
1192 op->addr.mm = ctxt->modrm_rm & 7;
1193 return rc;
1195 fetch_register_operand(op);
1196 return rc;
1199 op->type = OP_MEM;
1201 if (ctxt->ad_bytes == 2) {
1202 unsigned bx = reg_read(ctxt, VCPU_REGS_RBX);
1203 unsigned bp = reg_read(ctxt, VCPU_REGS_RBP);
1204 unsigned si = reg_read(ctxt, VCPU_REGS_RSI);
1205 unsigned di = reg_read(ctxt, VCPU_REGS_RDI);
1207 /* 16-bit ModR/M decode. */
1208 switch (ctxt->modrm_mod) {
1209 case 0:
1210 if (ctxt->modrm_rm == 6)
1211 modrm_ea += insn_fetch(u16, ctxt);
1212 break;
1213 case 1:
1214 modrm_ea += insn_fetch(s8, ctxt);
1215 break;
1216 case 2:
1217 modrm_ea += insn_fetch(u16, ctxt);
1218 break;
1220 switch (ctxt->modrm_rm) {
1221 case 0:
1222 modrm_ea += bx + si;
1223 break;
1224 case 1:
1225 modrm_ea += bx + di;
1226 break;
1227 case 2:
1228 modrm_ea += bp + si;
1229 break;
1230 case 3:
1231 modrm_ea += bp + di;
1232 break;
1233 case 4:
1234 modrm_ea += si;
1235 break;
1236 case 5:
1237 modrm_ea += di;
1238 break;
1239 case 6:
1240 if (ctxt->modrm_mod != 0)
1241 modrm_ea += bp;
1242 break;
1243 case 7:
1244 modrm_ea += bx;
1245 break;
1247 if (ctxt->modrm_rm == 2 || ctxt->modrm_rm == 3 ||
1248 (ctxt->modrm_rm == 6 && ctxt->modrm_mod != 0))
1249 ctxt->modrm_seg = VCPU_SREG_SS;
1250 modrm_ea = (u16)modrm_ea;
1251 } else {
1252 /* 32/64-bit ModR/M decode. */
1253 if ((ctxt->modrm_rm & 7) == 4) {
1254 sib = insn_fetch(u8, ctxt);
1255 index_reg |= (sib >> 3) & 7;
1256 base_reg |= sib & 7;
1257 scale = sib >> 6;
1259 if ((base_reg & 7) == 5 && ctxt->modrm_mod == 0)
1260 modrm_ea += insn_fetch(s32, ctxt);
1261 else {
1262 modrm_ea += reg_read(ctxt, base_reg);
1263 adjust_modrm_seg(ctxt, base_reg);
1264 /* Increment ESP on POP [ESP] */
1265 if ((ctxt->d & IncSP) &&
1266 base_reg == VCPU_REGS_RSP)
1267 modrm_ea += ctxt->op_bytes;
1269 if (index_reg != 4)
1270 modrm_ea += reg_read(ctxt, index_reg) << scale;
1271 } else if ((ctxt->modrm_rm & 7) == 5 && ctxt->modrm_mod == 0) {
1272 modrm_ea += insn_fetch(s32, ctxt);
1273 if (ctxt->mode == X86EMUL_MODE_PROT64)
1274 ctxt->rip_relative = 1;
1275 } else {
1276 base_reg = ctxt->modrm_rm;
1277 modrm_ea += reg_read(ctxt, base_reg);
1278 adjust_modrm_seg(ctxt, base_reg);
1280 switch (ctxt->modrm_mod) {
1281 case 1:
1282 modrm_ea += insn_fetch(s8, ctxt);
1283 break;
1284 case 2:
1285 modrm_ea += insn_fetch(s32, ctxt);
1286 break;
1289 op->addr.mem.ea = modrm_ea;
1290 if (ctxt->ad_bytes != 8)
1291 ctxt->memop.addr.mem.ea = (u32)ctxt->memop.addr.mem.ea;
1293 done:
1294 return rc;
1297 static int decode_abs(struct x86_emulate_ctxt *ctxt,
1298 struct operand *op)
1300 int rc = X86EMUL_CONTINUE;
1302 op->type = OP_MEM;
1303 switch (ctxt->ad_bytes) {
1304 case 2:
1305 op->addr.mem.ea = insn_fetch(u16, ctxt);
1306 break;
1307 case 4:
1308 op->addr.mem.ea = insn_fetch(u32, ctxt);
1309 break;
1310 case 8:
1311 op->addr.mem.ea = insn_fetch(u64, ctxt);
1312 break;
1314 done:
1315 return rc;
1318 static void fetch_bit_operand(struct x86_emulate_ctxt *ctxt)
1320 long sv = 0, mask;
1322 if (ctxt->dst.type == OP_MEM && ctxt->src.type == OP_REG) {
1323 mask = ~((long)ctxt->dst.bytes * 8 - 1);
1325 if (ctxt->src.bytes == 2)
1326 sv = (s16)ctxt->src.val & (s16)mask;
1327 else if (ctxt->src.bytes == 4)
1328 sv = (s32)ctxt->src.val & (s32)mask;
1329 else
1330 sv = (s64)ctxt->src.val & (s64)mask;
1332 ctxt->dst.addr.mem.ea = address_mask(ctxt,
1333 ctxt->dst.addr.mem.ea + (sv >> 3));
1336 /* only subword offset */
1337 ctxt->src.val &= (ctxt->dst.bytes << 3) - 1;
1340 static int read_emulated(struct x86_emulate_ctxt *ctxt,
1341 unsigned long addr, void *dest, unsigned size)
1343 int rc;
1344 struct read_cache *mc = &ctxt->mem_read;
1346 if (mc->pos < mc->end)
1347 goto read_cached;
1349 WARN_ON((mc->end + size) >= sizeof(mc->data));
1351 rc = ctxt->ops->read_emulated(ctxt, addr, mc->data + mc->end, size,
1352 &ctxt->exception);
1353 if (rc != X86EMUL_CONTINUE)
1354 return rc;
1356 mc->end += size;
1358 read_cached:
1359 memcpy(dest, mc->data + mc->pos, size);
1360 mc->pos += size;
1361 return X86EMUL_CONTINUE;
1364 static int segmented_read(struct x86_emulate_ctxt *ctxt,
1365 struct segmented_address addr,
1366 void *data,
1367 unsigned size)
1369 int rc;
1370 ulong linear;
1372 rc = linearize(ctxt, addr, size, false, &linear);
1373 if (rc != X86EMUL_CONTINUE)
1374 return rc;
1375 return read_emulated(ctxt, linear, data, size);
1378 static int segmented_write(struct x86_emulate_ctxt *ctxt,
1379 struct segmented_address addr,
1380 const void *data,
1381 unsigned size)
1383 int rc;
1384 ulong linear;
1386 rc = linearize(ctxt, addr, size, true, &linear);
1387 if (rc != X86EMUL_CONTINUE)
1388 return rc;
1389 return ctxt->ops->write_emulated(ctxt, linear, data, size,
1390 &ctxt->exception);
1393 static int segmented_cmpxchg(struct x86_emulate_ctxt *ctxt,
1394 struct segmented_address addr,
1395 const void *orig_data, const void *data,
1396 unsigned size)
1398 int rc;
1399 ulong linear;
1401 rc = linearize(ctxt, addr, size, true, &linear);
1402 if (rc != X86EMUL_CONTINUE)
1403 return rc;
1404 return ctxt->ops->cmpxchg_emulated(ctxt, linear, orig_data, data,
1405 size, &ctxt->exception);
1408 static int pio_in_emulated(struct x86_emulate_ctxt *ctxt,
1409 unsigned int size, unsigned short port,
1410 void *dest)
1412 struct read_cache *rc = &ctxt->io_read;
1414 if (rc->pos == rc->end) { /* refill pio read ahead */
1415 unsigned int in_page, n;
1416 unsigned int count = ctxt->rep_prefix ?
1417 address_mask(ctxt, reg_read(ctxt, VCPU_REGS_RCX)) : 1;
1418 in_page = (ctxt->eflags & X86_EFLAGS_DF) ?
1419 offset_in_page(reg_read(ctxt, VCPU_REGS_RDI)) :
1420 PAGE_SIZE - offset_in_page(reg_read(ctxt, VCPU_REGS_RDI));
1421 n = min3(in_page, (unsigned int)sizeof(rc->data) / size, count);
1422 if (n == 0)
1423 n = 1;
1424 rc->pos = rc->end = 0;
1425 if (!ctxt->ops->pio_in_emulated(ctxt, size, port, rc->data, n))
1426 return 0;
1427 rc->end = n * size;
1430 if (ctxt->rep_prefix && (ctxt->d & String) &&
1431 !(ctxt->eflags & X86_EFLAGS_DF)) {
1432 ctxt->dst.data = rc->data + rc->pos;
1433 ctxt->dst.type = OP_MEM_STR;
1434 ctxt->dst.count = (rc->end - rc->pos) / size;
1435 rc->pos = rc->end;
1436 } else {
1437 memcpy(dest, rc->data + rc->pos, size);
1438 rc->pos += size;
1440 return 1;
1443 static int read_interrupt_descriptor(struct x86_emulate_ctxt *ctxt,
1444 u16 index, struct desc_struct *desc)
1446 struct desc_ptr dt;
1447 ulong addr;
1449 ctxt->ops->get_idt(ctxt, &dt);
1451 if (dt.size < index * 8 + 7)
1452 return emulate_gp(ctxt, index << 3 | 0x2);
1454 addr = dt.address + index * 8;
1455 return ctxt->ops->read_std(ctxt, addr, desc, sizeof *desc,
1456 &ctxt->exception);
1459 static void get_descriptor_table_ptr(struct x86_emulate_ctxt *ctxt,
1460 u16 selector, struct desc_ptr *dt)
1462 const struct x86_emulate_ops *ops = ctxt->ops;
1463 u32 base3 = 0;
1465 if (selector & 1 << 2) {
1466 struct desc_struct desc;
1467 u16 sel;
1469 memset (dt, 0, sizeof *dt);
1470 if (!ops->get_segment(ctxt, &sel, &desc, &base3,
1471 VCPU_SREG_LDTR))
1472 return;
1474 dt->size = desc_limit_scaled(&desc); /* what if limit > 65535? */
1475 dt->address = get_desc_base(&desc) | ((u64)base3 << 32);
1476 } else
1477 ops->get_gdt(ctxt, dt);
1480 static int get_descriptor_ptr(struct x86_emulate_ctxt *ctxt,
1481 u16 selector, ulong *desc_addr_p)
1483 struct desc_ptr dt;
1484 u16 index = selector >> 3;
1485 ulong addr;
1487 get_descriptor_table_ptr(ctxt, selector, &dt);
1489 if (dt.size < index * 8 + 7)
1490 return emulate_gp(ctxt, selector & 0xfffc);
1492 addr = dt.address + index * 8;
1494 #ifdef CONFIG_X86_64
1495 if (addr >> 32 != 0) {
1496 u64 efer = 0;
1498 ctxt->ops->get_msr(ctxt, MSR_EFER, &efer);
1499 if (!(efer & EFER_LMA))
1500 addr &= (u32)-1;
1502 #endif
1504 *desc_addr_p = addr;
1505 return X86EMUL_CONTINUE;
1508 /* allowed just for 8 bytes segments */
1509 static int read_segment_descriptor(struct x86_emulate_ctxt *ctxt,
1510 u16 selector, struct desc_struct *desc,
1511 ulong *desc_addr_p)
1513 int rc;
1515 rc = get_descriptor_ptr(ctxt, selector, desc_addr_p);
1516 if (rc != X86EMUL_CONTINUE)
1517 return rc;
1519 return ctxt->ops->read_std(ctxt, *desc_addr_p, desc, sizeof(*desc),
1520 &ctxt->exception);
1523 /* allowed just for 8 bytes segments */
1524 static int write_segment_descriptor(struct x86_emulate_ctxt *ctxt,
1525 u16 selector, struct desc_struct *desc)
1527 int rc;
1528 ulong addr;
1530 rc = get_descriptor_ptr(ctxt, selector, &addr);
1531 if (rc != X86EMUL_CONTINUE)
1532 return rc;
1534 return ctxt->ops->write_std(ctxt, addr, desc, sizeof *desc,
1535 &ctxt->exception);
1538 /* Does not support long mode */
1539 static int __load_segment_descriptor(struct x86_emulate_ctxt *ctxt,
1540 u16 selector, int seg, u8 cpl,
1541 enum x86_transfer_type transfer,
1542 struct desc_struct *desc)
1544 struct desc_struct seg_desc, old_desc;
1545 u8 dpl, rpl;
1546 unsigned err_vec = GP_VECTOR;
1547 u32 err_code = 0;
1548 bool null_selector = !(selector & ~0x3); /* 0000-0003 are null */
1549 ulong desc_addr;
1550 int ret;
1551 u16 dummy;
1552 u32 base3 = 0;
1554 memset(&seg_desc, 0, sizeof seg_desc);
1556 if (ctxt->mode == X86EMUL_MODE_REAL) {
1557 /* set real mode segment descriptor (keep limit etc. for
1558 * unreal mode) */
1559 ctxt->ops->get_segment(ctxt, &dummy, &seg_desc, NULL, seg);
1560 set_desc_base(&seg_desc, selector << 4);
1561 goto load;
1562 } else if (seg <= VCPU_SREG_GS && ctxt->mode == X86EMUL_MODE_VM86) {
1563 /* VM86 needs a clean new segment descriptor */
1564 set_desc_base(&seg_desc, selector << 4);
1565 set_desc_limit(&seg_desc, 0xffff);
1566 seg_desc.type = 3;
1567 seg_desc.p = 1;
1568 seg_desc.s = 1;
1569 seg_desc.dpl = 3;
1570 goto load;
1573 rpl = selector & 3;
1575 /* NULL selector is not valid for TR, CS and SS (except for long mode) */
1576 if ((seg == VCPU_SREG_CS
1577 || (seg == VCPU_SREG_SS
1578 && (ctxt->mode != X86EMUL_MODE_PROT64 || rpl != cpl))
1579 || seg == VCPU_SREG_TR)
1580 && null_selector)
1581 goto exception;
1583 /* TR should be in GDT only */
1584 if (seg == VCPU_SREG_TR && (selector & (1 << 2)))
1585 goto exception;
1587 if (null_selector) /* for NULL selector skip all following checks */
1588 goto load;
1590 ret = read_segment_descriptor(ctxt, selector, &seg_desc, &desc_addr);
1591 if (ret != X86EMUL_CONTINUE)
1592 return ret;
1594 err_code = selector & 0xfffc;
1595 err_vec = (transfer == X86_TRANSFER_TASK_SWITCH) ? TS_VECTOR :
1596 GP_VECTOR;
1598 /* can't load system descriptor into segment selector */
1599 if (seg <= VCPU_SREG_GS && !seg_desc.s) {
1600 if (transfer == X86_TRANSFER_CALL_JMP)
1601 return X86EMUL_UNHANDLEABLE;
1602 goto exception;
1605 if (!seg_desc.p) {
1606 err_vec = (seg == VCPU_SREG_SS) ? SS_VECTOR : NP_VECTOR;
1607 goto exception;
1610 dpl = seg_desc.dpl;
1612 switch (seg) {
1613 case VCPU_SREG_SS:
1615 * segment is not a writable data segment or segment
1616 * selector's RPL != CPL or segment selector's RPL != CPL
1618 if (rpl != cpl || (seg_desc.type & 0xa) != 0x2 || dpl != cpl)
1619 goto exception;
1620 break;
1621 case VCPU_SREG_CS:
1622 if (!(seg_desc.type & 8))
1623 goto exception;
1625 if (seg_desc.type & 4) {
1626 /* conforming */
1627 if (dpl > cpl)
1628 goto exception;
1629 } else {
1630 /* nonconforming */
1631 if (rpl > cpl || dpl != cpl)
1632 goto exception;
1634 /* in long-mode d/b must be clear if l is set */
1635 if (seg_desc.d && seg_desc.l) {
1636 u64 efer = 0;
1638 ctxt->ops->get_msr(ctxt, MSR_EFER, &efer);
1639 if (efer & EFER_LMA)
1640 goto exception;
1643 /* CS(RPL) <- CPL */
1644 selector = (selector & 0xfffc) | cpl;
1645 break;
1646 case VCPU_SREG_TR:
1647 if (seg_desc.s || (seg_desc.type != 1 && seg_desc.type != 9))
1648 goto exception;
1649 old_desc = seg_desc;
1650 seg_desc.type |= 2; /* busy */
1651 ret = ctxt->ops->cmpxchg_emulated(ctxt, desc_addr, &old_desc, &seg_desc,
1652 sizeof(seg_desc), &ctxt->exception);
1653 if (ret != X86EMUL_CONTINUE)
1654 return ret;
1655 break;
1656 case VCPU_SREG_LDTR:
1657 if (seg_desc.s || seg_desc.type != 2)
1658 goto exception;
1659 break;
1660 default: /* DS, ES, FS, or GS */
1662 * segment is not a data or readable code segment or
1663 * ((segment is a data or nonconforming code segment)
1664 * and (both RPL and CPL > DPL))
1666 if ((seg_desc.type & 0xa) == 0x8 ||
1667 (((seg_desc.type & 0xc) != 0xc) &&
1668 (rpl > dpl && cpl > dpl)))
1669 goto exception;
1670 break;
1673 if (seg_desc.s) {
1674 /* mark segment as accessed */
1675 if (!(seg_desc.type & 1)) {
1676 seg_desc.type |= 1;
1677 ret = write_segment_descriptor(ctxt, selector,
1678 &seg_desc);
1679 if (ret != X86EMUL_CONTINUE)
1680 return ret;
1682 } else if (ctxt->mode == X86EMUL_MODE_PROT64) {
1683 ret = ctxt->ops->read_std(ctxt, desc_addr+8, &base3,
1684 sizeof(base3), &ctxt->exception);
1685 if (ret != X86EMUL_CONTINUE)
1686 return ret;
1687 if (is_noncanonical_address(get_desc_base(&seg_desc) |
1688 ((u64)base3 << 32)))
1689 return emulate_gp(ctxt, 0);
1691 load:
1692 ctxt->ops->set_segment(ctxt, selector, &seg_desc, base3, seg);
1693 if (desc)
1694 *desc = seg_desc;
1695 return X86EMUL_CONTINUE;
1696 exception:
1697 return emulate_exception(ctxt, err_vec, err_code, true);
1700 static int load_segment_descriptor(struct x86_emulate_ctxt *ctxt,
1701 u16 selector, int seg)
1703 u8 cpl = ctxt->ops->cpl(ctxt);
1704 return __load_segment_descriptor(ctxt, selector, seg, cpl,
1705 X86_TRANSFER_NONE, NULL);
1708 static void write_register_operand(struct operand *op)
1710 return assign_register(op->addr.reg, op->val, op->bytes);
1713 static int writeback(struct x86_emulate_ctxt *ctxt, struct operand *op)
1715 switch (op->type) {
1716 case OP_REG:
1717 write_register_operand(op);
1718 break;
1719 case OP_MEM:
1720 if (ctxt->lock_prefix)
1721 return segmented_cmpxchg(ctxt,
1722 op->addr.mem,
1723 &op->orig_val,
1724 &op->val,
1725 op->bytes);
1726 else
1727 return segmented_write(ctxt,
1728 op->addr.mem,
1729 &op->val,
1730 op->bytes);
1731 break;
1732 case OP_MEM_STR:
1733 return segmented_write(ctxt,
1734 op->addr.mem,
1735 op->data,
1736 op->bytes * op->count);
1737 break;
1738 case OP_XMM:
1739 write_sse_reg(ctxt, &op->vec_val, op->addr.xmm);
1740 break;
1741 case OP_MM:
1742 write_mmx_reg(ctxt, &op->mm_val, op->addr.mm);
1743 break;
1744 case OP_NONE:
1745 /* no writeback */
1746 break;
1747 default:
1748 break;
1750 return X86EMUL_CONTINUE;
1753 static int push(struct x86_emulate_ctxt *ctxt, void *data, int bytes)
1755 struct segmented_address addr;
1757 rsp_increment(ctxt, -bytes);
1758 addr.ea = reg_read(ctxt, VCPU_REGS_RSP) & stack_mask(ctxt);
1759 addr.seg = VCPU_SREG_SS;
1761 return segmented_write(ctxt, addr, data, bytes);
1764 static int em_push(struct x86_emulate_ctxt *ctxt)
1766 /* Disable writeback. */
1767 ctxt->dst.type = OP_NONE;
1768 return push(ctxt, &ctxt->src.val, ctxt->op_bytes);
1771 static int emulate_pop(struct x86_emulate_ctxt *ctxt,
1772 void *dest, int len)
1774 int rc;
1775 struct segmented_address addr;
1777 addr.ea = reg_read(ctxt, VCPU_REGS_RSP) & stack_mask(ctxt);
1778 addr.seg = VCPU_SREG_SS;
1779 rc = segmented_read(ctxt, addr, dest, len);
1780 if (rc != X86EMUL_CONTINUE)
1781 return rc;
1783 rsp_increment(ctxt, len);
1784 return rc;
1787 static int em_pop(struct x86_emulate_ctxt *ctxt)
1789 return emulate_pop(ctxt, &ctxt->dst.val, ctxt->op_bytes);
1792 static int emulate_popf(struct x86_emulate_ctxt *ctxt,
1793 void *dest, int len)
1795 int rc;
1796 unsigned long val, change_mask;
1797 int iopl = (ctxt->eflags & X86_EFLAGS_IOPL) >> X86_EFLAGS_IOPL_BIT;
1798 int cpl = ctxt->ops->cpl(ctxt);
1800 rc = emulate_pop(ctxt, &val, len);
1801 if (rc != X86EMUL_CONTINUE)
1802 return rc;
1804 change_mask = X86_EFLAGS_CF | X86_EFLAGS_PF | X86_EFLAGS_AF |
1805 X86_EFLAGS_ZF | X86_EFLAGS_SF | X86_EFLAGS_OF |
1806 X86_EFLAGS_TF | X86_EFLAGS_DF | X86_EFLAGS_NT |
1807 X86_EFLAGS_AC | X86_EFLAGS_ID;
1809 switch(ctxt->mode) {
1810 case X86EMUL_MODE_PROT64:
1811 case X86EMUL_MODE_PROT32:
1812 case X86EMUL_MODE_PROT16:
1813 if (cpl == 0)
1814 change_mask |= X86_EFLAGS_IOPL;
1815 if (cpl <= iopl)
1816 change_mask |= X86_EFLAGS_IF;
1817 break;
1818 case X86EMUL_MODE_VM86:
1819 if (iopl < 3)
1820 return emulate_gp(ctxt, 0);
1821 change_mask |= X86_EFLAGS_IF;
1822 break;
1823 default: /* real mode */
1824 change_mask |= (X86_EFLAGS_IOPL | X86_EFLAGS_IF);
1825 break;
1828 *(unsigned long *)dest =
1829 (ctxt->eflags & ~change_mask) | (val & change_mask);
1831 return rc;
1834 static int em_popf(struct x86_emulate_ctxt *ctxt)
1836 ctxt->dst.type = OP_REG;
1837 ctxt->dst.addr.reg = &ctxt->eflags;
1838 ctxt->dst.bytes = ctxt->op_bytes;
1839 return emulate_popf(ctxt, &ctxt->dst.val, ctxt->op_bytes);
1842 static int em_enter(struct x86_emulate_ctxt *ctxt)
1844 int rc;
1845 unsigned frame_size = ctxt->src.val;
1846 unsigned nesting_level = ctxt->src2.val & 31;
1847 ulong rbp;
1849 if (nesting_level)
1850 return X86EMUL_UNHANDLEABLE;
1852 rbp = reg_read(ctxt, VCPU_REGS_RBP);
1853 rc = push(ctxt, &rbp, stack_size(ctxt));
1854 if (rc != X86EMUL_CONTINUE)
1855 return rc;
1856 assign_masked(reg_rmw(ctxt, VCPU_REGS_RBP), reg_read(ctxt, VCPU_REGS_RSP),
1857 stack_mask(ctxt));
1858 assign_masked(reg_rmw(ctxt, VCPU_REGS_RSP),
1859 reg_read(ctxt, VCPU_REGS_RSP) - frame_size,
1860 stack_mask(ctxt));
1861 return X86EMUL_CONTINUE;
1864 static int em_leave(struct x86_emulate_ctxt *ctxt)
1866 assign_masked(reg_rmw(ctxt, VCPU_REGS_RSP), reg_read(ctxt, VCPU_REGS_RBP),
1867 stack_mask(ctxt));
1868 return emulate_pop(ctxt, reg_rmw(ctxt, VCPU_REGS_RBP), ctxt->op_bytes);
1871 static int em_push_sreg(struct x86_emulate_ctxt *ctxt)
1873 int seg = ctxt->src2.val;
1875 ctxt->src.val = get_segment_selector(ctxt, seg);
1876 if (ctxt->op_bytes == 4) {
1877 rsp_increment(ctxt, -2);
1878 ctxt->op_bytes = 2;
1881 return em_push(ctxt);
1884 static int em_pop_sreg(struct x86_emulate_ctxt *ctxt)
1886 int seg = ctxt->src2.val;
1887 unsigned long selector;
1888 int rc;
1890 rc = emulate_pop(ctxt, &selector, 2);
1891 if (rc != X86EMUL_CONTINUE)
1892 return rc;
1894 if (ctxt->modrm_reg == VCPU_SREG_SS)
1895 ctxt->interruptibility = KVM_X86_SHADOW_INT_MOV_SS;
1896 if (ctxt->op_bytes > 2)
1897 rsp_increment(ctxt, ctxt->op_bytes - 2);
1899 rc = load_segment_descriptor(ctxt, (u16)selector, seg);
1900 return rc;
1903 static int em_pusha(struct x86_emulate_ctxt *ctxt)
1905 unsigned long old_esp = reg_read(ctxt, VCPU_REGS_RSP);
1906 int rc = X86EMUL_CONTINUE;
1907 int reg = VCPU_REGS_RAX;
1909 while (reg <= VCPU_REGS_RDI) {
1910 (reg == VCPU_REGS_RSP) ?
1911 (ctxt->src.val = old_esp) : (ctxt->src.val = reg_read(ctxt, reg));
1913 rc = em_push(ctxt);
1914 if (rc != X86EMUL_CONTINUE)
1915 return rc;
1917 ++reg;
1920 return rc;
1923 static int em_pushf(struct x86_emulate_ctxt *ctxt)
1925 ctxt->src.val = (unsigned long)ctxt->eflags & ~X86_EFLAGS_VM;
1926 return em_push(ctxt);
1929 static int em_popa(struct x86_emulate_ctxt *ctxt)
1931 int rc = X86EMUL_CONTINUE;
1932 int reg = VCPU_REGS_RDI;
1933 u32 val;
1935 while (reg >= VCPU_REGS_RAX) {
1936 if (reg == VCPU_REGS_RSP) {
1937 rsp_increment(ctxt, ctxt->op_bytes);
1938 --reg;
1941 rc = emulate_pop(ctxt, &val, ctxt->op_bytes);
1942 if (rc != X86EMUL_CONTINUE)
1943 break;
1944 assign_register(reg_rmw(ctxt, reg), val, ctxt->op_bytes);
1945 --reg;
1947 return rc;
1950 static int __emulate_int_real(struct x86_emulate_ctxt *ctxt, int irq)
1952 const struct x86_emulate_ops *ops = ctxt->ops;
1953 int rc;
1954 struct desc_ptr dt;
1955 gva_t cs_addr;
1956 gva_t eip_addr;
1957 u16 cs, eip;
1959 /* TODO: Add limit checks */
1960 ctxt->src.val = ctxt->eflags;
1961 rc = em_push(ctxt);
1962 if (rc != X86EMUL_CONTINUE)
1963 return rc;
1965 ctxt->eflags &= ~(X86_EFLAGS_IF | X86_EFLAGS_TF | X86_EFLAGS_AC);
1967 ctxt->src.val = get_segment_selector(ctxt, VCPU_SREG_CS);
1968 rc = em_push(ctxt);
1969 if (rc != X86EMUL_CONTINUE)
1970 return rc;
1972 ctxt->src.val = ctxt->_eip;
1973 rc = em_push(ctxt);
1974 if (rc != X86EMUL_CONTINUE)
1975 return rc;
1977 ops->get_idt(ctxt, &dt);
1979 eip_addr = dt.address + (irq << 2);
1980 cs_addr = dt.address + (irq << 2) + 2;
1982 rc = ops->read_std(ctxt, cs_addr, &cs, 2, &ctxt->exception);
1983 if (rc != X86EMUL_CONTINUE)
1984 return rc;
1986 rc = ops->read_std(ctxt, eip_addr, &eip, 2, &ctxt->exception);
1987 if (rc != X86EMUL_CONTINUE)
1988 return rc;
1990 rc = load_segment_descriptor(ctxt, cs, VCPU_SREG_CS);
1991 if (rc != X86EMUL_CONTINUE)
1992 return rc;
1994 ctxt->_eip = eip;
1996 return rc;
1999 int emulate_int_real(struct x86_emulate_ctxt *ctxt, int irq)
2001 int rc;
2003 invalidate_registers(ctxt);
2004 rc = __emulate_int_real(ctxt, irq);
2005 if (rc == X86EMUL_CONTINUE)
2006 writeback_registers(ctxt);
2007 return rc;
2010 static int emulate_int(struct x86_emulate_ctxt *ctxt, int irq)
2012 switch(ctxt->mode) {
2013 case X86EMUL_MODE_REAL:
2014 return __emulate_int_real(ctxt, irq);
2015 case X86EMUL_MODE_VM86:
2016 case X86EMUL_MODE_PROT16:
2017 case X86EMUL_MODE_PROT32:
2018 case X86EMUL_MODE_PROT64:
2019 default:
2020 /* Protected mode interrupts unimplemented yet */
2021 return X86EMUL_UNHANDLEABLE;
2025 static int emulate_iret_real(struct x86_emulate_ctxt *ctxt)
2027 int rc = X86EMUL_CONTINUE;
2028 unsigned long temp_eip = 0;
2029 unsigned long temp_eflags = 0;
2030 unsigned long cs = 0;
2031 unsigned long mask = X86_EFLAGS_CF | X86_EFLAGS_PF | X86_EFLAGS_AF |
2032 X86_EFLAGS_ZF | X86_EFLAGS_SF | X86_EFLAGS_TF |
2033 X86_EFLAGS_IF | X86_EFLAGS_DF | X86_EFLAGS_OF |
2034 X86_EFLAGS_IOPL | X86_EFLAGS_NT | X86_EFLAGS_RF |
2035 X86_EFLAGS_AC | X86_EFLAGS_ID |
2036 X86_EFLAGS_FIXED;
2037 unsigned long vm86_mask = X86_EFLAGS_VM | X86_EFLAGS_VIF |
2038 X86_EFLAGS_VIP;
2040 /* TODO: Add stack limit check */
2042 rc = emulate_pop(ctxt, &temp_eip, ctxt->op_bytes);
2044 if (rc != X86EMUL_CONTINUE)
2045 return rc;
2047 if (temp_eip & ~0xffff)
2048 return emulate_gp(ctxt, 0);
2050 rc = emulate_pop(ctxt, &cs, ctxt->op_bytes);
2052 if (rc != X86EMUL_CONTINUE)
2053 return rc;
2055 rc = emulate_pop(ctxt, &temp_eflags, ctxt->op_bytes);
2057 if (rc != X86EMUL_CONTINUE)
2058 return rc;
2060 rc = load_segment_descriptor(ctxt, (u16)cs, VCPU_SREG_CS);
2062 if (rc != X86EMUL_CONTINUE)
2063 return rc;
2065 ctxt->_eip = temp_eip;
2067 if (ctxt->op_bytes == 4)
2068 ctxt->eflags = ((temp_eflags & mask) | (ctxt->eflags & vm86_mask));
2069 else if (ctxt->op_bytes == 2) {
2070 ctxt->eflags &= ~0xffff;
2071 ctxt->eflags |= temp_eflags;
2074 ctxt->eflags &= ~EFLG_RESERVED_ZEROS_MASK; /* Clear reserved zeros */
2075 ctxt->eflags |= X86_EFLAGS_FIXED;
2076 ctxt->ops->set_nmi_mask(ctxt, false);
2078 return rc;
2081 static int em_iret(struct x86_emulate_ctxt *ctxt)
2083 switch(ctxt->mode) {
2084 case X86EMUL_MODE_REAL:
2085 return emulate_iret_real(ctxt);
2086 case X86EMUL_MODE_VM86:
2087 case X86EMUL_MODE_PROT16:
2088 case X86EMUL_MODE_PROT32:
2089 case X86EMUL_MODE_PROT64:
2090 default:
2091 /* iret from protected mode unimplemented yet */
2092 return X86EMUL_UNHANDLEABLE;
2096 static int em_jmp_far(struct x86_emulate_ctxt *ctxt)
2098 int rc;
2099 unsigned short sel, old_sel;
2100 struct desc_struct old_desc, new_desc;
2101 const struct x86_emulate_ops *ops = ctxt->ops;
2102 u8 cpl = ctxt->ops->cpl(ctxt);
2104 /* Assignment of RIP may only fail in 64-bit mode */
2105 if (ctxt->mode == X86EMUL_MODE_PROT64)
2106 ops->get_segment(ctxt, &old_sel, &old_desc, NULL,
2107 VCPU_SREG_CS);
2109 memcpy(&sel, ctxt->src.valptr + ctxt->op_bytes, 2);
2111 rc = __load_segment_descriptor(ctxt, sel, VCPU_SREG_CS, cpl,
2112 X86_TRANSFER_CALL_JMP,
2113 &new_desc);
2114 if (rc != X86EMUL_CONTINUE)
2115 return rc;
2117 rc = assign_eip_far(ctxt, ctxt->src.val, &new_desc);
2118 if (rc != X86EMUL_CONTINUE) {
2119 WARN_ON(ctxt->mode != X86EMUL_MODE_PROT64);
2120 /* assigning eip failed; restore the old cs */
2121 ops->set_segment(ctxt, old_sel, &old_desc, 0, VCPU_SREG_CS);
2122 return rc;
2124 return rc;
2127 static int em_jmp_abs(struct x86_emulate_ctxt *ctxt)
2129 return assign_eip_near(ctxt, ctxt->src.val);
2132 static int em_call_near_abs(struct x86_emulate_ctxt *ctxt)
2134 int rc;
2135 long int old_eip;
2137 old_eip = ctxt->_eip;
2138 rc = assign_eip_near(ctxt, ctxt->src.val);
2139 if (rc != X86EMUL_CONTINUE)
2140 return rc;
2141 ctxt->src.val = old_eip;
2142 rc = em_push(ctxt);
2143 return rc;
2146 static int em_cmpxchg8b(struct x86_emulate_ctxt *ctxt)
2148 u64 old = ctxt->dst.orig_val64;
2150 if (ctxt->dst.bytes == 16)
2151 return X86EMUL_UNHANDLEABLE;
2153 if (((u32) (old >> 0) != (u32) reg_read(ctxt, VCPU_REGS_RAX)) ||
2154 ((u32) (old >> 32) != (u32) reg_read(ctxt, VCPU_REGS_RDX))) {
2155 *reg_write(ctxt, VCPU_REGS_RAX) = (u32) (old >> 0);
2156 *reg_write(ctxt, VCPU_REGS_RDX) = (u32) (old >> 32);
2157 ctxt->eflags &= ~X86_EFLAGS_ZF;
2158 } else {
2159 ctxt->dst.val64 = ((u64)reg_read(ctxt, VCPU_REGS_RCX) << 32) |
2160 (u32) reg_read(ctxt, VCPU_REGS_RBX);
2162 ctxt->eflags |= X86_EFLAGS_ZF;
2164 return X86EMUL_CONTINUE;
2167 static int em_ret(struct x86_emulate_ctxt *ctxt)
2169 int rc;
2170 unsigned long eip;
2172 rc = emulate_pop(ctxt, &eip, ctxt->op_bytes);
2173 if (rc != X86EMUL_CONTINUE)
2174 return rc;
2176 return assign_eip_near(ctxt, eip);
2179 static int em_ret_far(struct x86_emulate_ctxt *ctxt)
2181 int rc;
2182 unsigned long eip, cs;
2183 u16 old_cs;
2184 int cpl = ctxt->ops->cpl(ctxt);
2185 struct desc_struct old_desc, new_desc;
2186 const struct x86_emulate_ops *ops = ctxt->ops;
2188 if (ctxt->mode == X86EMUL_MODE_PROT64)
2189 ops->get_segment(ctxt, &old_cs, &old_desc, NULL,
2190 VCPU_SREG_CS);
2192 rc = emulate_pop(ctxt, &eip, ctxt->op_bytes);
2193 if (rc != X86EMUL_CONTINUE)
2194 return rc;
2195 rc = emulate_pop(ctxt, &cs, ctxt->op_bytes);
2196 if (rc != X86EMUL_CONTINUE)
2197 return rc;
2198 /* Outer-privilege level return is not implemented */
2199 if (ctxt->mode >= X86EMUL_MODE_PROT16 && (cs & 3) > cpl)
2200 return X86EMUL_UNHANDLEABLE;
2201 rc = __load_segment_descriptor(ctxt, (u16)cs, VCPU_SREG_CS, cpl,
2202 X86_TRANSFER_RET,
2203 &new_desc);
2204 if (rc != X86EMUL_CONTINUE)
2205 return rc;
2206 rc = assign_eip_far(ctxt, eip, &new_desc);
2207 if (rc != X86EMUL_CONTINUE) {
2208 WARN_ON(ctxt->mode != X86EMUL_MODE_PROT64);
2209 ops->set_segment(ctxt, old_cs, &old_desc, 0, VCPU_SREG_CS);
2211 return rc;
2214 static int em_ret_far_imm(struct x86_emulate_ctxt *ctxt)
2216 int rc;
2218 rc = em_ret_far(ctxt);
2219 if (rc != X86EMUL_CONTINUE)
2220 return rc;
2221 rsp_increment(ctxt, ctxt->src.val);
2222 return X86EMUL_CONTINUE;
2225 static int em_cmpxchg(struct x86_emulate_ctxt *ctxt)
2227 /* Save real source value, then compare EAX against destination. */
2228 ctxt->dst.orig_val = ctxt->dst.val;
2229 ctxt->dst.val = reg_read(ctxt, VCPU_REGS_RAX);
2230 ctxt->src.orig_val = ctxt->src.val;
2231 ctxt->src.val = ctxt->dst.orig_val;
2232 fastop(ctxt, em_cmp);
2234 if (ctxt->eflags & X86_EFLAGS_ZF) {
2235 /* Success: write back to memory; no update of EAX */
2236 ctxt->src.type = OP_NONE;
2237 ctxt->dst.val = ctxt->src.orig_val;
2238 } else {
2239 /* Failure: write the value we saw to EAX. */
2240 ctxt->src.type = OP_REG;
2241 ctxt->src.addr.reg = reg_rmw(ctxt, VCPU_REGS_RAX);
2242 ctxt->src.val = ctxt->dst.orig_val;
2243 /* Create write-cycle to dest by writing the same value */
2244 ctxt->dst.val = ctxt->dst.orig_val;
2246 return X86EMUL_CONTINUE;
2249 static int em_lseg(struct x86_emulate_ctxt *ctxt)
2251 int seg = ctxt->src2.val;
2252 unsigned short sel;
2253 int rc;
2255 memcpy(&sel, ctxt->src.valptr + ctxt->op_bytes, 2);
2257 rc = load_segment_descriptor(ctxt, sel, seg);
2258 if (rc != X86EMUL_CONTINUE)
2259 return rc;
2261 ctxt->dst.val = ctxt->src.val;
2262 return rc;
2265 static void
2266 setup_syscalls_segments(struct x86_emulate_ctxt *ctxt,
2267 struct desc_struct *cs, struct desc_struct *ss)
2269 cs->l = 0; /* will be adjusted later */
2270 set_desc_base(cs, 0); /* flat segment */
2271 cs->g = 1; /* 4kb granularity */
2272 set_desc_limit(cs, 0xfffff); /* 4GB limit */
2273 cs->type = 0x0b; /* Read, Execute, Accessed */
2274 cs->s = 1;
2275 cs->dpl = 0; /* will be adjusted later */
2276 cs->p = 1;
2277 cs->d = 1;
2278 cs->avl = 0;
2280 set_desc_base(ss, 0); /* flat segment */
2281 set_desc_limit(ss, 0xfffff); /* 4GB limit */
2282 ss->g = 1; /* 4kb granularity */
2283 ss->s = 1;
2284 ss->type = 0x03; /* Read/Write, Accessed */
2285 ss->d = 1; /* 32bit stack segment */
2286 ss->dpl = 0;
2287 ss->p = 1;
2288 ss->l = 0;
2289 ss->avl = 0;
2292 static bool vendor_intel(struct x86_emulate_ctxt *ctxt)
2294 u32 eax, ebx, ecx, edx;
2296 eax = ecx = 0;
2297 ctxt->ops->get_cpuid(ctxt, &eax, &ebx, &ecx, &edx);
2298 return ebx == X86EMUL_CPUID_VENDOR_GenuineIntel_ebx
2299 && ecx == X86EMUL_CPUID_VENDOR_GenuineIntel_ecx
2300 && edx == X86EMUL_CPUID_VENDOR_GenuineIntel_edx;
2303 static bool em_syscall_is_enabled(struct x86_emulate_ctxt *ctxt)
2305 const struct x86_emulate_ops *ops = ctxt->ops;
2306 u32 eax, ebx, ecx, edx;
2309 * syscall should always be enabled in longmode - so only become
2310 * vendor specific (cpuid) if other modes are active...
2312 if (ctxt->mode == X86EMUL_MODE_PROT64)
2313 return true;
2315 eax = 0x00000000;
2316 ecx = 0x00000000;
2317 ops->get_cpuid(ctxt, &eax, &ebx, &ecx, &edx);
2319 * Intel ("GenuineIntel")
2320 * remark: Intel CPUs only support "syscall" in 64bit
2321 * longmode. Also an 64bit guest with a
2322 * 32bit compat-app running will #UD !! While this
2323 * behaviour can be fixed (by emulating) into AMD
2324 * response - CPUs of AMD can't behave like Intel.
2326 if (ebx == X86EMUL_CPUID_VENDOR_GenuineIntel_ebx &&
2327 ecx == X86EMUL_CPUID_VENDOR_GenuineIntel_ecx &&
2328 edx == X86EMUL_CPUID_VENDOR_GenuineIntel_edx)
2329 return false;
2331 /* AMD ("AuthenticAMD") */
2332 if (ebx == X86EMUL_CPUID_VENDOR_AuthenticAMD_ebx &&
2333 ecx == X86EMUL_CPUID_VENDOR_AuthenticAMD_ecx &&
2334 edx == X86EMUL_CPUID_VENDOR_AuthenticAMD_edx)
2335 return true;
2337 /* AMD ("AMDisbetter!") */
2338 if (ebx == X86EMUL_CPUID_VENDOR_AMDisbetterI_ebx &&
2339 ecx == X86EMUL_CPUID_VENDOR_AMDisbetterI_ecx &&
2340 edx == X86EMUL_CPUID_VENDOR_AMDisbetterI_edx)
2341 return true;
2343 /* default: (not Intel, not AMD), apply Intel's stricter rules... */
2344 return false;
2347 static int em_syscall(struct x86_emulate_ctxt *ctxt)
2349 const struct x86_emulate_ops *ops = ctxt->ops;
2350 struct desc_struct cs, ss;
2351 u64 msr_data;
2352 u16 cs_sel, ss_sel;
2353 u64 efer = 0;
2355 /* syscall is not available in real mode */
2356 if (ctxt->mode == X86EMUL_MODE_REAL ||
2357 ctxt->mode == X86EMUL_MODE_VM86)
2358 return emulate_ud(ctxt);
2360 if (!(em_syscall_is_enabled(ctxt)))
2361 return emulate_ud(ctxt);
2363 ops->get_msr(ctxt, MSR_EFER, &efer);
2364 setup_syscalls_segments(ctxt, &cs, &ss);
2366 if (!(efer & EFER_SCE))
2367 return emulate_ud(ctxt);
2369 ops->get_msr(ctxt, MSR_STAR, &msr_data);
2370 msr_data >>= 32;
2371 cs_sel = (u16)(msr_data & 0xfffc);
2372 ss_sel = (u16)(msr_data + 8);
2374 if (efer & EFER_LMA) {
2375 cs.d = 0;
2376 cs.l = 1;
2378 ops->set_segment(ctxt, cs_sel, &cs, 0, VCPU_SREG_CS);
2379 ops->set_segment(ctxt, ss_sel, &ss, 0, VCPU_SREG_SS);
2381 *reg_write(ctxt, VCPU_REGS_RCX) = ctxt->_eip;
2382 if (efer & EFER_LMA) {
2383 #ifdef CONFIG_X86_64
2384 *reg_write(ctxt, VCPU_REGS_R11) = ctxt->eflags;
2386 ops->get_msr(ctxt,
2387 ctxt->mode == X86EMUL_MODE_PROT64 ?
2388 MSR_LSTAR : MSR_CSTAR, &msr_data);
2389 ctxt->_eip = msr_data;
2391 ops->get_msr(ctxt, MSR_SYSCALL_MASK, &msr_data);
2392 ctxt->eflags &= ~msr_data;
2393 ctxt->eflags |= X86_EFLAGS_FIXED;
2394 #endif
2395 } else {
2396 /* legacy mode */
2397 ops->get_msr(ctxt, MSR_STAR, &msr_data);
2398 ctxt->_eip = (u32)msr_data;
2400 ctxt->eflags &= ~(X86_EFLAGS_VM | X86_EFLAGS_IF);
2403 return X86EMUL_CONTINUE;
2406 static int em_sysenter(struct x86_emulate_ctxt *ctxt)
2408 const struct x86_emulate_ops *ops = ctxt->ops;
2409 struct desc_struct cs, ss;
2410 u64 msr_data;
2411 u16 cs_sel, ss_sel;
2412 u64 efer = 0;
2414 ops->get_msr(ctxt, MSR_EFER, &efer);
2415 /* inject #GP if in real mode */
2416 if (ctxt->mode == X86EMUL_MODE_REAL)
2417 return emulate_gp(ctxt, 0);
2420 * Not recognized on AMD in compat mode (but is recognized in legacy
2421 * mode).
2423 if ((ctxt->mode != X86EMUL_MODE_PROT64) && (efer & EFER_LMA)
2424 && !vendor_intel(ctxt))
2425 return emulate_ud(ctxt);
2427 /* sysenter/sysexit have not been tested in 64bit mode. */
2428 if (ctxt->mode == X86EMUL_MODE_PROT64)
2429 return X86EMUL_UNHANDLEABLE;
2431 setup_syscalls_segments(ctxt, &cs, &ss);
2433 ops->get_msr(ctxt, MSR_IA32_SYSENTER_CS, &msr_data);
2434 if ((msr_data & 0xfffc) == 0x0)
2435 return emulate_gp(ctxt, 0);
2437 ctxt->eflags &= ~(X86_EFLAGS_VM | X86_EFLAGS_IF);
2438 cs_sel = (u16)msr_data & ~SEGMENT_RPL_MASK;
2439 ss_sel = cs_sel + 8;
2440 if (efer & EFER_LMA) {
2441 cs.d = 0;
2442 cs.l = 1;
2445 ops->set_segment(ctxt, cs_sel, &cs, 0, VCPU_SREG_CS);
2446 ops->set_segment(ctxt, ss_sel, &ss, 0, VCPU_SREG_SS);
2448 ops->get_msr(ctxt, MSR_IA32_SYSENTER_EIP, &msr_data);
2449 ctxt->_eip = (efer & EFER_LMA) ? msr_data : (u32)msr_data;
2451 ops->get_msr(ctxt, MSR_IA32_SYSENTER_ESP, &msr_data);
2452 *reg_write(ctxt, VCPU_REGS_RSP) = (efer & EFER_LMA) ? msr_data :
2453 (u32)msr_data;
2455 return X86EMUL_CONTINUE;
2458 static int em_sysexit(struct x86_emulate_ctxt *ctxt)
2460 const struct x86_emulate_ops *ops = ctxt->ops;
2461 struct desc_struct cs, ss;
2462 u64 msr_data, rcx, rdx;
2463 int usermode;
2464 u16 cs_sel = 0, ss_sel = 0;
2466 /* inject #GP if in real mode or Virtual 8086 mode */
2467 if (ctxt->mode == X86EMUL_MODE_REAL ||
2468 ctxt->mode == X86EMUL_MODE_VM86)
2469 return emulate_gp(ctxt, 0);
2471 setup_syscalls_segments(ctxt, &cs, &ss);
2473 if ((ctxt->rex_prefix & 0x8) != 0x0)
2474 usermode = X86EMUL_MODE_PROT64;
2475 else
2476 usermode = X86EMUL_MODE_PROT32;
2478 rcx = reg_read(ctxt, VCPU_REGS_RCX);
2479 rdx = reg_read(ctxt, VCPU_REGS_RDX);
2481 cs.dpl = 3;
2482 ss.dpl = 3;
2483 ops->get_msr(ctxt, MSR_IA32_SYSENTER_CS, &msr_data);
2484 switch (usermode) {
2485 case X86EMUL_MODE_PROT32:
2486 cs_sel = (u16)(msr_data + 16);
2487 if ((msr_data & 0xfffc) == 0x0)
2488 return emulate_gp(ctxt, 0);
2489 ss_sel = (u16)(msr_data + 24);
2490 rcx = (u32)rcx;
2491 rdx = (u32)rdx;
2492 break;
2493 case X86EMUL_MODE_PROT64:
2494 cs_sel = (u16)(msr_data + 32);
2495 if (msr_data == 0x0)
2496 return emulate_gp(ctxt, 0);
2497 ss_sel = cs_sel + 8;
2498 cs.d = 0;
2499 cs.l = 1;
2500 if (is_noncanonical_address(rcx) ||
2501 is_noncanonical_address(rdx))
2502 return emulate_gp(ctxt, 0);
2503 break;
2505 cs_sel |= SEGMENT_RPL_MASK;
2506 ss_sel |= SEGMENT_RPL_MASK;
2508 ops->set_segment(ctxt, cs_sel, &cs, 0, VCPU_SREG_CS);
2509 ops->set_segment(ctxt, ss_sel, &ss, 0, VCPU_SREG_SS);
2511 ctxt->_eip = rdx;
2512 *reg_write(ctxt, VCPU_REGS_RSP) = rcx;
2514 return X86EMUL_CONTINUE;
2517 static bool emulator_bad_iopl(struct x86_emulate_ctxt *ctxt)
2519 int iopl;
2520 if (ctxt->mode == X86EMUL_MODE_REAL)
2521 return false;
2522 if (ctxt->mode == X86EMUL_MODE_VM86)
2523 return true;
2524 iopl = (ctxt->eflags & X86_EFLAGS_IOPL) >> X86_EFLAGS_IOPL_BIT;
2525 return ctxt->ops->cpl(ctxt) > iopl;
2528 static bool emulator_io_port_access_allowed(struct x86_emulate_ctxt *ctxt,
2529 u16 port, u16 len)
2531 const struct x86_emulate_ops *ops = ctxt->ops;
2532 struct desc_struct tr_seg;
2533 u32 base3;
2534 int r;
2535 u16 tr, io_bitmap_ptr, perm, bit_idx = port & 0x7;
2536 unsigned mask = (1 << len) - 1;
2537 unsigned long base;
2539 ops->get_segment(ctxt, &tr, &tr_seg, &base3, VCPU_SREG_TR);
2540 if (!tr_seg.p)
2541 return false;
2542 if (desc_limit_scaled(&tr_seg) < 103)
2543 return false;
2544 base = get_desc_base(&tr_seg);
2545 #ifdef CONFIG_X86_64
2546 base |= ((u64)base3) << 32;
2547 #endif
2548 r = ops->read_std(ctxt, base + 102, &io_bitmap_ptr, 2, NULL);
2549 if (r != X86EMUL_CONTINUE)
2550 return false;
2551 if (io_bitmap_ptr + port/8 > desc_limit_scaled(&tr_seg))
2552 return false;
2553 r = ops->read_std(ctxt, base + io_bitmap_ptr + port/8, &perm, 2, NULL);
2554 if (r != X86EMUL_CONTINUE)
2555 return false;
2556 if ((perm >> bit_idx) & mask)
2557 return false;
2558 return true;
2561 static bool emulator_io_permited(struct x86_emulate_ctxt *ctxt,
2562 u16 port, u16 len)
2564 if (ctxt->perm_ok)
2565 return true;
2567 if (emulator_bad_iopl(ctxt))
2568 if (!emulator_io_port_access_allowed(ctxt, port, len))
2569 return false;
2571 ctxt->perm_ok = true;
2573 return true;
2576 static void save_state_to_tss16(struct x86_emulate_ctxt *ctxt,
2577 struct tss_segment_16 *tss)
2579 tss->ip = ctxt->_eip;
2580 tss->flag = ctxt->eflags;
2581 tss->ax = reg_read(ctxt, VCPU_REGS_RAX);
2582 tss->cx = reg_read(ctxt, VCPU_REGS_RCX);
2583 tss->dx = reg_read(ctxt, VCPU_REGS_RDX);
2584 tss->bx = reg_read(ctxt, VCPU_REGS_RBX);
2585 tss->sp = reg_read(ctxt, VCPU_REGS_RSP);
2586 tss->bp = reg_read(ctxt, VCPU_REGS_RBP);
2587 tss->si = reg_read(ctxt, VCPU_REGS_RSI);
2588 tss->di = reg_read(ctxt, VCPU_REGS_RDI);
2590 tss->es = get_segment_selector(ctxt, VCPU_SREG_ES);
2591 tss->cs = get_segment_selector(ctxt, VCPU_SREG_CS);
2592 tss->ss = get_segment_selector(ctxt, VCPU_SREG_SS);
2593 tss->ds = get_segment_selector(ctxt, VCPU_SREG_DS);
2594 tss->ldt = get_segment_selector(ctxt, VCPU_SREG_LDTR);
2597 static int load_state_from_tss16(struct x86_emulate_ctxt *ctxt,
2598 struct tss_segment_16 *tss)
2600 int ret;
2601 u8 cpl;
2603 ctxt->_eip = tss->ip;
2604 ctxt->eflags = tss->flag | 2;
2605 *reg_write(ctxt, VCPU_REGS_RAX) = tss->ax;
2606 *reg_write(ctxt, VCPU_REGS_RCX) = tss->cx;
2607 *reg_write(ctxt, VCPU_REGS_RDX) = tss->dx;
2608 *reg_write(ctxt, VCPU_REGS_RBX) = tss->bx;
2609 *reg_write(ctxt, VCPU_REGS_RSP) = tss->sp;
2610 *reg_write(ctxt, VCPU_REGS_RBP) = tss->bp;
2611 *reg_write(ctxt, VCPU_REGS_RSI) = tss->si;
2612 *reg_write(ctxt, VCPU_REGS_RDI) = tss->di;
2615 * SDM says that segment selectors are loaded before segment
2616 * descriptors
2618 set_segment_selector(ctxt, tss->ldt, VCPU_SREG_LDTR);
2619 set_segment_selector(ctxt, tss->es, VCPU_SREG_ES);
2620 set_segment_selector(ctxt, tss->cs, VCPU_SREG_CS);
2621 set_segment_selector(ctxt, tss->ss, VCPU_SREG_SS);
2622 set_segment_selector(ctxt, tss->ds, VCPU_SREG_DS);
2624 cpl = tss->cs & 3;
2627 * Now load segment descriptors. If fault happens at this stage
2628 * it is handled in a context of new task
2630 ret = __load_segment_descriptor(ctxt, tss->ldt, VCPU_SREG_LDTR, cpl,
2631 X86_TRANSFER_TASK_SWITCH, NULL);
2632 if (ret != X86EMUL_CONTINUE)
2633 return ret;
2634 ret = __load_segment_descriptor(ctxt, tss->es, VCPU_SREG_ES, cpl,
2635 X86_TRANSFER_TASK_SWITCH, NULL);
2636 if (ret != X86EMUL_CONTINUE)
2637 return ret;
2638 ret = __load_segment_descriptor(ctxt, tss->cs, VCPU_SREG_CS, cpl,
2639 X86_TRANSFER_TASK_SWITCH, NULL);
2640 if (ret != X86EMUL_CONTINUE)
2641 return ret;
2642 ret = __load_segment_descriptor(ctxt, tss->ss, VCPU_SREG_SS, cpl,
2643 X86_TRANSFER_TASK_SWITCH, NULL);
2644 if (ret != X86EMUL_CONTINUE)
2645 return ret;
2646 ret = __load_segment_descriptor(ctxt, tss->ds, VCPU_SREG_DS, cpl,
2647 X86_TRANSFER_TASK_SWITCH, NULL);
2648 if (ret != X86EMUL_CONTINUE)
2649 return ret;
2651 return X86EMUL_CONTINUE;
2654 static int task_switch_16(struct x86_emulate_ctxt *ctxt,
2655 u16 tss_selector, u16 old_tss_sel,
2656 ulong old_tss_base, struct desc_struct *new_desc)
2658 const struct x86_emulate_ops *ops = ctxt->ops;
2659 struct tss_segment_16 tss_seg;
2660 int ret;
2661 u32 new_tss_base = get_desc_base(new_desc);
2663 ret = ops->read_std(ctxt, old_tss_base, &tss_seg, sizeof tss_seg,
2664 &ctxt->exception);
2665 if (ret != X86EMUL_CONTINUE)
2666 return ret;
2668 save_state_to_tss16(ctxt, &tss_seg);
2670 ret = ops->write_std(ctxt, old_tss_base, &tss_seg, sizeof tss_seg,
2671 &ctxt->exception);
2672 if (ret != X86EMUL_CONTINUE)
2673 return ret;
2675 ret = ops->read_std(ctxt, new_tss_base, &tss_seg, sizeof tss_seg,
2676 &ctxt->exception);
2677 if (ret != X86EMUL_CONTINUE)
2678 return ret;
2680 if (old_tss_sel != 0xffff) {
2681 tss_seg.prev_task_link = old_tss_sel;
2683 ret = ops->write_std(ctxt, new_tss_base,
2684 &tss_seg.prev_task_link,
2685 sizeof tss_seg.prev_task_link,
2686 &ctxt->exception);
2687 if (ret != X86EMUL_CONTINUE)
2688 return ret;
2691 return load_state_from_tss16(ctxt, &tss_seg);
2694 static void save_state_to_tss32(struct x86_emulate_ctxt *ctxt,
2695 struct tss_segment_32 *tss)
2697 /* CR3 and ldt selector are not saved intentionally */
2698 tss->eip = ctxt->_eip;
2699 tss->eflags = ctxt->eflags;
2700 tss->eax = reg_read(ctxt, VCPU_REGS_RAX);
2701 tss->ecx = reg_read(ctxt, VCPU_REGS_RCX);
2702 tss->edx = reg_read(ctxt, VCPU_REGS_RDX);
2703 tss->ebx = reg_read(ctxt, VCPU_REGS_RBX);
2704 tss->esp = reg_read(ctxt, VCPU_REGS_RSP);
2705 tss->ebp = reg_read(ctxt, VCPU_REGS_RBP);
2706 tss->esi = reg_read(ctxt, VCPU_REGS_RSI);
2707 tss->edi = reg_read(ctxt, VCPU_REGS_RDI);
2709 tss->es = get_segment_selector(ctxt, VCPU_SREG_ES);
2710 tss->cs = get_segment_selector(ctxt, VCPU_SREG_CS);
2711 tss->ss = get_segment_selector(ctxt, VCPU_SREG_SS);
2712 tss->ds = get_segment_selector(ctxt, VCPU_SREG_DS);
2713 tss->fs = get_segment_selector(ctxt, VCPU_SREG_FS);
2714 tss->gs = get_segment_selector(ctxt, VCPU_SREG_GS);
2717 static int load_state_from_tss32(struct x86_emulate_ctxt *ctxt,
2718 struct tss_segment_32 *tss)
2720 int ret;
2721 u8 cpl;
2723 if (ctxt->ops->set_cr(ctxt, 3, tss->cr3))
2724 return emulate_gp(ctxt, 0);
2725 ctxt->_eip = tss->eip;
2726 ctxt->eflags = tss->eflags | 2;
2728 /* General purpose registers */
2729 *reg_write(ctxt, VCPU_REGS_RAX) = tss->eax;
2730 *reg_write(ctxt, VCPU_REGS_RCX) = tss->ecx;
2731 *reg_write(ctxt, VCPU_REGS_RDX) = tss->edx;
2732 *reg_write(ctxt, VCPU_REGS_RBX) = tss->ebx;
2733 *reg_write(ctxt, VCPU_REGS_RSP) = tss->esp;
2734 *reg_write(ctxt, VCPU_REGS_RBP) = tss->ebp;
2735 *reg_write(ctxt, VCPU_REGS_RSI) = tss->esi;
2736 *reg_write(ctxt, VCPU_REGS_RDI) = tss->edi;
2739 * SDM says that segment selectors are loaded before segment
2740 * descriptors. This is important because CPL checks will
2741 * use CS.RPL.
2743 set_segment_selector(ctxt, tss->ldt_selector, VCPU_SREG_LDTR);
2744 set_segment_selector(ctxt, tss->es, VCPU_SREG_ES);
2745 set_segment_selector(ctxt, tss->cs, VCPU_SREG_CS);
2746 set_segment_selector(ctxt, tss->ss, VCPU_SREG_SS);
2747 set_segment_selector(ctxt, tss->ds, VCPU_SREG_DS);
2748 set_segment_selector(ctxt, tss->fs, VCPU_SREG_FS);
2749 set_segment_selector(ctxt, tss->gs, VCPU_SREG_GS);
2752 * If we're switching between Protected Mode and VM86, we need to make
2753 * sure to update the mode before loading the segment descriptors so
2754 * that the selectors are interpreted correctly.
2756 if (ctxt->eflags & X86_EFLAGS_VM) {
2757 ctxt->mode = X86EMUL_MODE_VM86;
2758 cpl = 3;
2759 } else {
2760 ctxt->mode = X86EMUL_MODE_PROT32;
2761 cpl = tss->cs & 3;
2765 * Now load segment descriptors. If fault happenes at this stage
2766 * it is handled in a context of new task
2768 ret = __load_segment_descriptor(ctxt, tss->ldt_selector, VCPU_SREG_LDTR,
2769 cpl, X86_TRANSFER_TASK_SWITCH, NULL);
2770 if (ret != X86EMUL_CONTINUE)
2771 return ret;
2772 ret = __load_segment_descriptor(ctxt, tss->es, VCPU_SREG_ES, cpl,
2773 X86_TRANSFER_TASK_SWITCH, NULL);
2774 if (ret != X86EMUL_CONTINUE)
2775 return ret;
2776 ret = __load_segment_descriptor(ctxt, tss->cs, VCPU_SREG_CS, cpl,
2777 X86_TRANSFER_TASK_SWITCH, NULL);
2778 if (ret != X86EMUL_CONTINUE)
2779 return ret;
2780 ret = __load_segment_descriptor(ctxt, tss->ss, VCPU_SREG_SS, cpl,
2781 X86_TRANSFER_TASK_SWITCH, NULL);
2782 if (ret != X86EMUL_CONTINUE)
2783 return ret;
2784 ret = __load_segment_descriptor(ctxt, tss->ds, VCPU_SREG_DS, cpl,
2785 X86_TRANSFER_TASK_SWITCH, NULL);
2786 if (ret != X86EMUL_CONTINUE)
2787 return ret;
2788 ret = __load_segment_descriptor(ctxt, tss->fs, VCPU_SREG_FS, cpl,
2789 X86_TRANSFER_TASK_SWITCH, NULL);
2790 if (ret != X86EMUL_CONTINUE)
2791 return ret;
2792 ret = __load_segment_descriptor(ctxt, tss->gs, VCPU_SREG_GS, cpl,
2793 X86_TRANSFER_TASK_SWITCH, NULL);
2795 return ret;
2798 static int task_switch_32(struct x86_emulate_ctxt *ctxt,
2799 u16 tss_selector, u16 old_tss_sel,
2800 ulong old_tss_base, struct desc_struct *new_desc)
2802 const struct x86_emulate_ops *ops = ctxt->ops;
2803 struct tss_segment_32 tss_seg;
2804 int ret;
2805 u32 new_tss_base = get_desc_base(new_desc);
2806 u32 eip_offset = offsetof(struct tss_segment_32, eip);
2807 u32 ldt_sel_offset = offsetof(struct tss_segment_32, ldt_selector);
2809 ret = ops->read_std(ctxt, old_tss_base, &tss_seg, sizeof tss_seg,
2810 &ctxt->exception);
2811 if (ret != X86EMUL_CONTINUE)
2812 return ret;
2814 save_state_to_tss32(ctxt, &tss_seg);
2816 /* Only GP registers and segment selectors are saved */
2817 ret = ops->write_std(ctxt, old_tss_base + eip_offset, &tss_seg.eip,
2818 ldt_sel_offset - eip_offset, &ctxt->exception);
2819 if (ret != X86EMUL_CONTINUE)
2820 return ret;
2822 ret = ops->read_std(ctxt, new_tss_base, &tss_seg, sizeof tss_seg,
2823 &ctxt->exception);
2824 if (ret != X86EMUL_CONTINUE)
2825 return ret;
2827 if (old_tss_sel != 0xffff) {
2828 tss_seg.prev_task_link = old_tss_sel;
2830 ret = ops->write_std(ctxt, new_tss_base,
2831 &tss_seg.prev_task_link,
2832 sizeof tss_seg.prev_task_link,
2833 &ctxt->exception);
2834 if (ret != X86EMUL_CONTINUE)
2835 return ret;
2838 return load_state_from_tss32(ctxt, &tss_seg);
2841 static int emulator_do_task_switch(struct x86_emulate_ctxt *ctxt,
2842 u16 tss_selector, int idt_index, int reason,
2843 bool has_error_code, u32 error_code)
2845 const struct x86_emulate_ops *ops = ctxt->ops;
2846 struct desc_struct curr_tss_desc, next_tss_desc;
2847 int ret;
2848 u16 old_tss_sel = get_segment_selector(ctxt, VCPU_SREG_TR);
2849 ulong old_tss_base =
2850 ops->get_cached_segment_base(ctxt, VCPU_SREG_TR);
2851 u32 desc_limit;
2852 ulong desc_addr;
2854 /* FIXME: old_tss_base == ~0 ? */
2856 ret = read_segment_descriptor(ctxt, tss_selector, &next_tss_desc, &desc_addr);
2857 if (ret != X86EMUL_CONTINUE)
2858 return ret;
2859 ret = read_segment_descriptor(ctxt, old_tss_sel, &curr_tss_desc, &desc_addr);
2860 if (ret != X86EMUL_CONTINUE)
2861 return ret;
2863 /* FIXME: check that next_tss_desc is tss */
2866 * Check privileges. The three cases are task switch caused by...
2868 * 1. jmp/call/int to task gate: Check against DPL of the task gate
2869 * 2. Exception/IRQ/iret: No check is performed
2870 * 3. jmp/call to TSS/task-gate: No check is performed since the
2871 * hardware checks it before exiting.
2873 if (reason == TASK_SWITCH_GATE) {
2874 if (idt_index != -1) {
2875 /* Software interrupts */
2876 struct desc_struct task_gate_desc;
2877 int dpl;
2879 ret = read_interrupt_descriptor(ctxt, idt_index,
2880 &task_gate_desc);
2881 if (ret != X86EMUL_CONTINUE)
2882 return ret;
2884 dpl = task_gate_desc.dpl;
2885 if ((tss_selector & 3) > dpl || ops->cpl(ctxt) > dpl)
2886 return emulate_gp(ctxt, (idt_index << 3) | 0x2);
2890 desc_limit = desc_limit_scaled(&next_tss_desc);
2891 if (!next_tss_desc.p ||
2892 ((desc_limit < 0x67 && (next_tss_desc.type & 8)) ||
2893 desc_limit < 0x2b)) {
2894 return emulate_ts(ctxt, tss_selector & 0xfffc);
2897 if (reason == TASK_SWITCH_IRET || reason == TASK_SWITCH_JMP) {
2898 curr_tss_desc.type &= ~(1 << 1); /* clear busy flag */
2899 write_segment_descriptor(ctxt, old_tss_sel, &curr_tss_desc);
2902 if (reason == TASK_SWITCH_IRET)
2903 ctxt->eflags = ctxt->eflags & ~X86_EFLAGS_NT;
2905 /* set back link to prev task only if NT bit is set in eflags
2906 note that old_tss_sel is not used after this point */
2907 if (reason != TASK_SWITCH_CALL && reason != TASK_SWITCH_GATE)
2908 old_tss_sel = 0xffff;
2910 if (next_tss_desc.type & 8)
2911 ret = task_switch_32(ctxt, tss_selector, old_tss_sel,
2912 old_tss_base, &next_tss_desc);
2913 else
2914 ret = task_switch_16(ctxt, tss_selector, old_tss_sel,
2915 old_tss_base, &next_tss_desc);
2916 if (ret != X86EMUL_CONTINUE)
2917 return ret;
2919 if (reason == TASK_SWITCH_CALL || reason == TASK_SWITCH_GATE)
2920 ctxt->eflags = ctxt->eflags | X86_EFLAGS_NT;
2922 if (reason != TASK_SWITCH_IRET) {
2923 next_tss_desc.type |= (1 << 1); /* set busy flag */
2924 write_segment_descriptor(ctxt, tss_selector, &next_tss_desc);
2927 ops->set_cr(ctxt, 0, ops->get_cr(ctxt, 0) | X86_CR0_TS);
2928 ops->set_segment(ctxt, tss_selector, &next_tss_desc, 0, VCPU_SREG_TR);
2930 if (has_error_code) {
2931 ctxt->op_bytes = ctxt->ad_bytes = (next_tss_desc.type & 8) ? 4 : 2;
2932 ctxt->lock_prefix = 0;
2933 ctxt->src.val = (unsigned long) error_code;
2934 ret = em_push(ctxt);
2937 return ret;
2940 int emulator_task_switch(struct x86_emulate_ctxt *ctxt,
2941 u16 tss_selector, int idt_index, int reason,
2942 bool has_error_code, u32 error_code)
2944 int rc;
2946 invalidate_registers(ctxt);
2947 ctxt->_eip = ctxt->eip;
2948 ctxt->dst.type = OP_NONE;
2950 rc = emulator_do_task_switch(ctxt, tss_selector, idt_index, reason,
2951 has_error_code, error_code);
2953 if (rc == X86EMUL_CONTINUE) {
2954 ctxt->eip = ctxt->_eip;
2955 writeback_registers(ctxt);
2958 return (rc == X86EMUL_UNHANDLEABLE) ? EMULATION_FAILED : EMULATION_OK;
2961 static void string_addr_inc(struct x86_emulate_ctxt *ctxt, int reg,
2962 struct operand *op)
2964 int df = (ctxt->eflags & X86_EFLAGS_DF) ? -op->count : op->count;
2966 register_address_increment(ctxt, reg, df * op->bytes);
2967 op->addr.mem.ea = register_address(ctxt, reg);
2970 static int em_das(struct x86_emulate_ctxt *ctxt)
2972 u8 al, old_al;
2973 bool af, cf, old_cf;
2975 cf = ctxt->eflags & X86_EFLAGS_CF;
2976 al = ctxt->dst.val;
2978 old_al = al;
2979 old_cf = cf;
2980 cf = false;
2981 af = ctxt->eflags & X86_EFLAGS_AF;
2982 if ((al & 0x0f) > 9 || af) {
2983 al -= 6;
2984 cf = old_cf | (al >= 250);
2985 af = true;
2986 } else {
2987 af = false;
2989 if (old_al > 0x99 || old_cf) {
2990 al -= 0x60;
2991 cf = true;
2994 ctxt->dst.val = al;
2995 /* Set PF, ZF, SF */
2996 ctxt->src.type = OP_IMM;
2997 ctxt->src.val = 0;
2998 ctxt->src.bytes = 1;
2999 fastop(ctxt, em_or);
3000 ctxt->eflags &= ~(X86_EFLAGS_AF | X86_EFLAGS_CF);
3001 if (cf)
3002 ctxt->eflags |= X86_EFLAGS_CF;
3003 if (af)
3004 ctxt->eflags |= X86_EFLAGS_AF;
3005 return X86EMUL_CONTINUE;
3008 static int em_aam(struct x86_emulate_ctxt *ctxt)
3010 u8 al, ah;
3012 if (ctxt->src.val == 0)
3013 return emulate_de(ctxt);
3015 al = ctxt->dst.val & 0xff;
3016 ah = al / ctxt->src.val;
3017 al %= ctxt->src.val;
3019 ctxt->dst.val = (ctxt->dst.val & 0xffff0000) | al | (ah << 8);
3021 /* Set PF, ZF, SF */
3022 ctxt->src.type = OP_IMM;
3023 ctxt->src.val = 0;
3024 ctxt->src.bytes = 1;
3025 fastop(ctxt, em_or);
3027 return X86EMUL_CONTINUE;
3030 static int em_aad(struct x86_emulate_ctxt *ctxt)
3032 u8 al = ctxt->dst.val & 0xff;
3033 u8 ah = (ctxt->dst.val >> 8) & 0xff;
3035 al = (al + (ah * ctxt->src.val)) & 0xff;
3037 ctxt->dst.val = (ctxt->dst.val & 0xffff0000) | al;
3039 /* Set PF, ZF, SF */
3040 ctxt->src.type = OP_IMM;
3041 ctxt->src.val = 0;
3042 ctxt->src.bytes = 1;
3043 fastop(ctxt, em_or);
3045 return X86EMUL_CONTINUE;
3048 static int em_call(struct x86_emulate_ctxt *ctxt)
3050 int rc;
3051 long rel = ctxt->src.val;
3053 ctxt->src.val = (unsigned long)ctxt->_eip;
3054 rc = jmp_rel(ctxt, rel);
3055 if (rc != X86EMUL_CONTINUE)
3056 return rc;
3057 return em_push(ctxt);
3060 static int em_call_far(struct x86_emulate_ctxt *ctxt)
3062 u16 sel, old_cs;
3063 ulong old_eip;
3064 int rc;
3065 struct desc_struct old_desc, new_desc;
3066 const struct x86_emulate_ops *ops = ctxt->ops;
3067 int cpl = ctxt->ops->cpl(ctxt);
3068 enum x86emul_mode prev_mode = ctxt->mode;
3070 old_eip = ctxt->_eip;
3071 ops->get_segment(ctxt, &old_cs, &old_desc, NULL, VCPU_SREG_CS);
3073 memcpy(&sel, ctxt->src.valptr + ctxt->op_bytes, 2);
3074 rc = __load_segment_descriptor(ctxt, sel, VCPU_SREG_CS, cpl,
3075 X86_TRANSFER_CALL_JMP, &new_desc);
3076 if (rc != X86EMUL_CONTINUE)
3077 return rc;
3079 rc = assign_eip_far(ctxt, ctxt->src.val, &new_desc);
3080 if (rc != X86EMUL_CONTINUE)
3081 goto fail;
3083 ctxt->src.val = old_cs;
3084 rc = em_push(ctxt);
3085 if (rc != X86EMUL_CONTINUE)
3086 goto fail;
3088 ctxt->src.val = old_eip;
3089 rc = em_push(ctxt);
3090 /* If we failed, we tainted the memory, but the very least we should
3091 restore cs */
3092 if (rc != X86EMUL_CONTINUE) {
3093 pr_warn_once("faulting far call emulation tainted memory\n");
3094 goto fail;
3096 return rc;
3097 fail:
3098 ops->set_segment(ctxt, old_cs, &old_desc, 0, VCPU_SREG_CS);
3099 ctxt->mode = prev_mode;
3100 return rc;
3104 static int em_ret_near_imm(struct x86_emulate_ctxt *ctxt)
3106 int rc;
3107 unsigned long eip;
3109 rc = emulate_pop(ctxt, &eip, ctxt->op_bytes);
3110 if (rc != X86EMUL_CONTINUE)
3111 return rc;
3112 rc = assign_eip_near(ctxt, eip);
3113 if (rc != X86EMUL_CONTINUE)
3114 return rc;
3115 rsp_increment(ctxt, ctxt->src.val);
3116 return X86EMUL_CONTINUE;
3119 static int em_xchg(struct x86_emulate_ctxt *ctxt)
3121 /* Write back the register source. */
3122 ctxt->src.val = ctxt->dst.val;
3123 write_register_operand(&ctxt->src);
3125 /* Write back the memory destination with implicit LOCK prefix. */
3126 ctxt->dst.val = ctxt->src.orig_val;
3127 ctxt->lock_prefix = 1;
3128 return X86EMUL_CONTINUE;
3131 static int em_imul_3op(struct x86_emulate_ctxt *ctxt)
3133 ctxt->dst.val = ctxt->src2.val;
3134 return fastop(ctxt, em_imul);
3137 static int em_cwd(struct x86_emulate_ctxt *ctxt)
3139 ctxt->dst.type = OP_REG;
3140 ctxt->dst.bytes = ctxt->src.bytes;
3141 ctxt->dst.addr.reg = reg_rmw(ctxt, VCPU_REGS_RDX);
3142 ctxt->dst.val = ~((ctxt->src.val >> (ctxt->src.bytes * 8 - 1)) - 1);
3144 return X86EMUL_CONTINUE;
3147 static int em_rdtsc(struct x86_emulate_ctxt *ctxt)
3149 u64 tsc = 0;
3151 ctxt->ops->get_msr(ctxt, MSR_IA32_TSC, &tsc);
3152 *reg_write(ctxt, VCPU_REGS_RAX) = (u32)tsc;
3153 *reg_write(ctxt, VCPU_REGS_RDX) = tsc >> 32;
3154 return X86EMUL_CONTINUE;
3157 static int em_rdpmc(struct x86_emulate_ctxt *ctxt)
3159 u64 pmc;
3161 if (ctxt->ops->read_pmc(ctxt, reg_read(ctxt, VCPU_REGS_RCX), &pmc))
3162 return emulate_gp(ctxt, 0);
3163 *reg_write(ctxt, VCPU_REGS_RAX) = (u32)pmc;
3164 *reg_write(ctxt, VCPU_REGS_RDX) = pmc >> 32;
3165 return X86EMUL_CONTINUE;
3168 static int em_mov(struct x86_emulate_ctxt *ctxt)
3170 memcpy(ctxt->dst.valptr, ctxt->src.valptr, sizeof(ctxt->src.valptr));
3171 return X86EMUL_CONTINUE;
3174 #define FFL(x) bit(X86_FEATURE_##x)
3176 static int em_movbe(struct x86_emulate_ctxt *ctxt)
3178 u32 ebx, ecx, edx, eax = 1;
3179 u16 tmp;
3182 * Check MOVBE is set in the guest-visible CPUID leaf.
3184 ctxt->ops->get_cpuid(ctxt, &eax, &ebx, &ecx, &edx);
3185 if (!(ecx & FFL(MOVBE)))
3186 return emulate_ud(ctxt);
3188 switch (ctxt->op_bytes) {
3189 case 2:
3191 * From MOVBE definition: "...When the operand size is 16 bits,
3192 * the upper word of the destination register remains unchanged
3193 * ..."
3195 * Both casting ->valptr and ->val to u16 breaks strict aliasing
3196 * rules so we have to do the operation almost per hand.
3198 tmp = (u16)ctxt->src.val;
3199 ctxt->dst.val &= ~0xffffUL;
3200 ctxt->dst.val |= (unsigned long)swab16(tmp);
3201 break;
3202 case 4:
3203 ctxt->dst.val = swab32((u32)ctxt->src.val);
3204 break;
3205 case 8:
3206 ctxt->dst.val = swab64(ctxt->src.val);
3207 break;
3208 default:
3209 BUG();
3211 return X86EMUL_CONTINUE;
3214 static int em_cr_write(struct x86_emulate_ctxt *ctxt)
3216 if (ctxt->ops->set_cr(ctxt, ctxt->modrm_reg, ctxt->src.val))
3217 return emulate_gp(ctxt, 0);
3219 /* Disable writeback. */
3220 ctxt->dst.type = OP_NONE;
3221 return X86EMUL_CONTINUE;
3224 static int em_dr_write(struct x86_emulate_ctxt *ctxt)
3226 unsigned long val;
3228 if (ctxt->mode == X86EMUL_MODE_PROT64)
3229 val = ctxt->src.val & ~0ULL;
3230 else
3231 val = ctxt->src.val & ~0U;
3233 /* #UD condition is already handled. */
3234 if (ctxt->ops->set_dr(ctxt, ctxt->modrm_reg, val) < 0)
3235 return emulate_gp(ctxt, 0);
3237 /* Disable writeback. */
3238 ctxt->dst.type = OP_NONE;
3239 return X86EMUL_CONTINUE;
3242 static int em_wrmsr(struct x86_emulate_ctxt *ctxt)
3244 u64 msr_data;
3246 msr_data = (u32)reg_read(ctxt, VCPU_REGS_RAX)
3247 | ((u64)reg_read(ctxt, VCPU_REGS_RDX) << 32);
3248 if (ctxt->ops->set_msr(ctxt, reg_read(ctxt, VCPU_REGS_RCX), msr_data))
3249 return emulate_gp(ctxt, 0);
3251 return X86EMUL_CONTINUE;
3254 static int em_rdmsr(struct x86_emulate_ctxt *ctxt)
3256 u64 msr_data;
3258 if (ctxt->ops->get_msr(ctxt, reg_read(ctxt, VCPU_REGS_RCX), &msr_data))
3259 return emulate_gp(ctxt, 0);
3261 *reg_write(ctxt, VCPU_REGS_RAX) = (u32)msr_data;
3262 *reg_write(ctxt, VCPU_REGS_RDX) = msr_data >> 32;
3263 return X86EMUL_CONTINUE;
3266 static int em_mov_rm_sreg(struct x86_emulate_ctxt *ctxt)
3268 if (ctxt->modrm_reg > VCPU_SREG_GS)
3269 return emulate_ud(ctxt);
3271 ctxt->dst.val = get_segment_selector(ctxt, ctxt->modrm_reg);
3272 if (ctxt->dst.bytes == 4 && ctxt->dst.type == OP_MEM)
3273 ctxt->dst.bytes = 2;
3274 return X86EMUL_CONTINUE;
3277 static int em_mov_sreg_rm(struct x86_emulate_ctxt *ctxt)
3279 u16 sel = ctxt->src.val;
3281 if (ctxt->modrm_reg == VCPU_SREG_CS || ctxt->modrm_reg > VCPU_SREG_GS)
3282 return emulate_ud(ctxt);
3284 if (ctxt->modrm_reg == VCPU_SREG_SS)
3285 ctxt->interruptibility = KVM_X86_SHADOW_INT_MOV_SS;
3287 /* Disable writeback. */
3288 ctxt->dst.type = OP_NONE;
3289 return load_segment_descriptor(ctxt, sel, ctxt->modrm_reg);
3292 static int em_lldt(struct x86_emulate_ctxt *ctxt)
3294 u16 sel = ctxt->src.val;
3296 /* Disable writeback. */
3297 ctxt->dst.type = OP_NONE;
3298 return load_segment_descriptor(ctxt, sel, VCPU_SREG_LDTR);
3301 static int em_ltr(struct x86_emulate_ctxt *ctxt)
3303 u16 sel = ctxt->src.val;
3305 /* Disable writeback. */
3306 ctxt->dst.type = OP_NONE;
3307 return load_segment_descriptor(ctxt, sel, VCPU_SREG_TR);
3310 static int em_invlpg(struct x86_emulate_ctxt *ctxt)
3312 int rc;
3313 ulong linear;
3315 rc = linearize(ctxt, ctxt->src.addr.mem, 1, false, &linear);
3316 if (rc == X86EMUL_CONTINUE)
3317 ctxt->ops->invlpg(ctxt, linear);
3318 /* Disable writeback. */
3319 ctxt->dst.type = OP_NONE;
3320 return X86EMUL_CONTINUE;
3323 static int em_clts(struct x86_emulate_ctxt *ctxt)
3325 ulong cr0;
3327 cr0 = ctxt->ops->get_cr(ctxt, 0);
3328 cr0 &= ~X86_CR0_TS;
3329 ctxt->ops->set_cr(ctxt, 0, cr0);
3330 return X86EMUL_CONTINUE;
3333 static int em_hypercall(struct x86_emulate_ctxt *ctxt)
3335 int rc = ctxt->ops->fix_hypercall(ctxt);
3337 if (rc != X86EMUL_CONTINUE)
3338 return rc;
3340 /* Let the processor re-execute the fixed hypercall */
3341 ctxt->_eip = ctxt->eip;
3342 /* Disable writeback. */
3343 ctxt->dst.type = OP_NONE;
3344 return X86EMUL_CONTINUE;
3347 static int emulate_store_desc_ptr(struct x86_emulate_ctxt *ctxt,
3348 void (*get)(struct x86_emulate_ctxt *ctxt,
3349 struct desc_ptr *ptr))
3351 struct desc_ptr desc_ptr;
3353 if (ctxt->mode == X86EMUL_MODE_PROT64)
3354 ctxt->op_bytes = 8;
3355 get(ctxt, &desc_ptr);
3356 if (ctxt->op_bytes == 2) {
3357 ctxt->op_bytes = 4;
3358 desc_ptr.address &= 0x00ffffff;
3360 /* Disable writeback. */
3361 ctxt->dst.type = OP_NONE;
3362 return segmented_write(ctxt, ctxt->dst.addr.mem,
3363 &desc_ptr, 2 + ctxt->op_bytes);
3366 static int em_sgdt(struct x86_emulate_ctxt *ctxt)
3368 return emulate_store_desc_ptr(ctxt, ctxt->ops->get_gdt);
3371 static int em_sidt(struct x86_emulate_ctxt *ctxt)
3373 return emulate_store_desc_ptr(ctxt, ctxt->ops->get_idt);
3376 static int em_lgdt_lidt(struct x86_emulate_ctxt *ctxt, bool lgdt)
3378 struct desc_ptr desc_ptr;
3379 int rc;
3381 if (ctxt->mode == X86EMUL_MODE_PROT64)
3382 ctxt->op_bytes = 8;
3383 rc = read_descriptor(ctxt, ctxt->src.addr.mem,
3384 &desc_ptr.size, &desc_ptr.address,
3385 ctxt->op_bytes);
3386 if (rc != X86EMUL_CONTINUE)
3387 return rc;
3388 if (ctxt->mode == X86EMUL_MODE_PROT64 &&
3389 is_noncanonical_address(desc_ptr.address))
3390 return emulate_gp(ctxt, 0);
3391 if (lgdt)
3392 ctxt->ops->set_gdt(ctxt, &desc_ptr);
3393 else
3394 ctxt->ops->set_idt(ctxt, &desc_ptr);
3395 /* Disable writeback. */
3396 ctxt->dst.type = OP_NONE;
3397 return X86EMUL_CONTINUE;
3400 static int em_lgdt(struct x86_emulate_ctxt *ctxt)
3402 return em_lgdt_lidt(ctxt, true);
3405 static int em_lidt(struct x86_emulate_ctxt *ctxt)
3407 return em_lgdt_lidt(ctxt, false);
3410 static int em_smsw(struct x86_emulate_ctxt *ctxt)
3412 if (ctxt->dst.type == OP_MEM)
3413 ctxt->dst.bytes = 2;
3414 ctxt->dst.val = ctxt->ops->get_cr(ctxt, 0);
3415 return X86EMUL_CONTINUE;
3418 static int em_lmsw(struct x86_emulate_ctxt *ctxt)
3420 ctxt->ops->set_cr(ctxt, 0, (ctxt->ops->get_cr(ctxt, 0) & ~0x0eul)
3421 | (ctxt->src.val & 0x0f));
3422 ctxt->dst.type = OP_NONE;
3423 return X86EMUL_CONTINUE;
3426 static int em_loop(struct x86_emulate_ctxt *ctxt)
3428 int rc = X86EMUL_CONTINUE;
3430 register_address_increment(ctxt, VCPU_REGS_RCX, -1);
3431 if ((address_mask(ctxt, reg_read(ctxt, VCPU_REGS_RCX)) != 0) &&
3432 (ctxt->b == 0xe2 || test_cc(ctxt->b ^ 0x5, ctxt->eflags)))
3433 rc = jmp_rel(ctxt, ctxt->src.val);
3435 return rc;
3438 static int em_jcxz(struct x86_emulate_ctxt *ctxt)
3440 int rc = X86EMUL_CONTINUE;
3442 if (address_mask(ctxt, reg_read(ctxt, VCPU_REGS_RCX)) == 0)
3443 rc = jmp_rel(ctxt, ctxt->src.val);
3445 return rc;
3448 static int em_in(struct x86_emulate_ctxt *ctxt)
3450 if (!pio_in_emulated(ctxt, ctxt->dst.bytes, ctxt->src.val,
3451 &ctxt->dst.val))
3452 return X86EMUL_IO_NEEDED;
3454 return X86EMUL_CONTINUE;
3457 static int em_out(struct x86_emulate_ctxt *ctxt)
3459 ctxt->ops->pio_out_emulated(ctxt, ctxt->src.bytes, ctxt->dst.val,
3460 &ctxt->src.val, 1);
3461 /* Disable writeback. */
3462 ctxt->dst.type = OP_NONE;
3463 return X86EMUL_CONTINUE;
3466 static int em_cli(struct x86_emulate_ctxt *ctxt)
3468 if (emulator_bad_iopl(ctxt))
3469 return emulate_gp(ctxt, 0);
3471 ctxt->eflags &= ~X86_EFLAGS_IF;
3472 return X86EMUL_CONTINUE;
3475 static int em_sti(struct x86_emulate_ctxt *ctxt)
3477 if (emulator_bad_iopl(ctxt))
3478 return emulate_gp(ctxt, 0);
3480 ctxt->interruptibility = KVM_X86_SHADOW_INT_STI;
3481 ctxt->eflags |= X86_EFLAGS_IF;
3482 return X86EMUL_CONTINUE;
3485 static int em_cpuid(struct x86_emulate_ctxt *ctxt)
3487 u32 eax, ebx, ecx, edx;
3489 eax = reg_read(ctxt, VCPU_REGS_RAX);
3490 ecx = reg_read(ctxt, VCPU_REGS_RCX);
3491 ctxt->ops->get_cpuid(ctxt, &eax, &ebx, &ecx, &edx);
3492 *reg_write(ctxt, VCPU_REGS_RAX) = eax;
3493 *reg_write(ctxt, VCPU_REGS_RBX) = ebx;
3494 *reg_write(ctxt, VCPU_REGS_RCX) = ecx;
3495 *reg_write(ctxt, VCPU_REGS_RDX) = edx;
3496 return X86EMUL_CONTINUE;
3499 static int em_sahf(struct x86_emulate_ctxt *ctxt)
3501 u32 flags;
3503 flags = X86_EFLAGS_CF | X86_EFLAGS_PF | X86_EFLAGS_AF | X86_EFLAGS_ZF |
3504 X86_EFLAGS_SF;
3505 flags &= *reg_rmw(ctxt, VCPU_REGS_RAX) >> 8;
3507 ctxt->eflags &= ~0xffUL;
3508 ctxt->eflags |= flags | X86_EFLAGS_FIXED;
3509 return X86EMUL_CONTINUE;
3512 static int em_lahf(struct x86_emulate_ctxt *ctxt)
3514 *reg_rmw(ctxt, VCPU_REGS_RAX) &= ~0xff00UL;
3515 *reg_rmw(ctxt, VCPU_REGS_RAX) |= (ctxt->eflags & 0xff) << 8;
3516 return X86EMUL_CONTINUE;
3519 static int em_bswap(struct x86_emulate_ctxt *ctxt)
3521 switch (ctxt->op_bytes) {
3522 #ifdef CONFIG_X86_64
3523 case 8:
3524 asm("bswap %0" : "+r"(ctxt->dst.val));
3525 break;
3526 #endif
3527 default:
3528 asm("bswap %0" : "+r"(*(u32 *)&ctxt->dst.val));
3529 break;
3531 return X86EMUL_CONTINUE;
3534 static int em_clflush(struct x86_emulate_ctxt *ctxt)
3536 /* emulating clflush regardless of cpuid */
3537 return X86EMUL_CONTINUE;
3540 static int em_movsxd(struct x86_emulate_ctxt *ctxt)
3542 ctxt->dst.val = (s32) ctxt->src.val;
3543 return X86EMUL_CONTINUE;
3546 static bool valid_cr(int nr)
3548 switch (nr) {
3549 case 0:
3550 case 2 ... 4:
3551 case 8:
3552 return true;
3553 default:
3554 return false;
3558 static int check_cr_read(struct x86_emulate_ctxt *ctxt)
3560 if (!valid_cr(ctxt->modrm_reg))
3561 return emulate_ud(ctxt);
3563 return X86EMUL_CONTINUE;
3566 static int check_cr_write(struct x86_emulate_ctxt *ctxt)
3568 u64 new_val = ctxt->src.val64;
3569 int cr = ctxt->modrm_reg;
3570 u64 efer = 0;
3572 static u64 cr_reserved_bits[] = {
3573 0xffffffff00000000ULL,
3574 0, 0, 0, /* CR3 checked later */
3575 CR4_RESERVED_BITS,
3576 0, 0, 0,
3577 CR8_RESERVED_BITS,
3580 if (!valid_cr(cr))
3581 return emulate_ud(ctxt);
3583 if (new_val & cr_reserved_bits[cr])
3584 return emulate_gp(ctxt, 0);
3586 switch (cr) {
3587 case 0: {
3588 u64 cr4;
3589 if (((new_val & X86_CR0_PG) && !(new_val & X86_CR0_PE)) ||
3590 ((new_val & X86_CR0_NW) && !(new_val & X86_CR0_CD)))
3591 return emulate_gp(ctxt, 0);
3593 cr4 = ctxt->ops->get_cr(ctxt, 4);
3594 ctxt->ops->get_msr(ctxt, MSR_EFER, &efer);
3596 if ((new_val & X86_CR0_PG) && (efer & EFER_LME) &&
3597 !(cr4 & X86_CR4_PAE))
3598 return emulate_gp(ctxt, 0);
3600 break;
3602 case 3: {
3603 u64 rsvd = 0;
3605 ctxt->ops->get_msr(ctxt, MSR_EFER, &efer);
3606 if (efer & EFER_LMA)
3607 rsvd = CR3_L_MODE_RESERVED_BITS & ~CR3_PCID_INVD;
3609 if (new_val & rsvd)
3610 return emulate_gp(ctxt, 0);
3612 break;
3614 case 4: {
3615 ctxt->ops->get_msr(ctxt, MSR_EFER, &efer);
3617 if ((efer & EFER_LMA) && !(new_val & X86_CR4_PAE))
3618 return emulate_gp(ctxt, 0);
3620 break;
3624 return X86EMUL_CONTINUE;
3627 static int check_dr7_gd(struct x86_emulate_ctxt *ctxt)
3629 unsigned long dr7;
3631 ctxt->ops->get_dr(ctxt, 7, &dr7);
3633 /* Check if DR7.Global_Enable is set */
3634 return dr7 & (1 << 13);
3637 static int check_dr_read(struct x86_emulate_ctxt *ctxt)
3639 int dr = ctxt->modrm_reg;
3640 u64 cr4;
3642 if (dr > 7)
3643 return emulate_ud(ctxt);
3645 cr4 = ctxt->ops->get_cr(ctxt, 4);
3646 if ((cr4 & X86_CR4_DE) && (dr == 4 || dr == 5))
3647 return emulate_ud(ctxt);
3649 if (check_dr7_gd(ctxt)) {
3650 ulong dr6;
3652 ctxt->ops->get_dr(ctxt, 6, &dr6);
3653 dr6 &= ~15;
3654 dr6 |= DR6_BD | DR6_RTM;
3655 ctxt->ops->set_dr(ctxt, 6, dr6);
3656 return emulate_db(ctxt);
3659 return X86EMUL_CONTINUE;
3662 static int check_dr_write(struct x86_emulate_ctxt *ctxt)
3664 u64 new_val = ctxt->src.val64;
3665 int dr = ctxt->modrm_reg;
3667 if ((dr == 6 || dr == 7) && (new_val & 0xffffffff00000000ULL))
3668 return emulate_gp(ctxt, 0);
3670 return check_dr_read(ctxt);
3673 static int check_svme(struct x86_emulate_ctxt *ctxt)
3675 u64 efer;
3677 ctxt->ops->get_msr(ctxt, MSR_EFER, &efer);
3679 if (!(efer & EFER_SVME))
3680 return emulate_ud(ctxt);
3682 return X86EMUL_CONTINUE;
3685 static int check_svme_pa(struct x86_emulate_ctxt *ctxt)
3687 u64 rax = reg_read(ctxt, VCPU_REGS_RAX);
3689 /* Valid physical address? */
3690 if (rax & 0xffff000000000000ULL)
3691 return emulate_gp(ctxt, 0);
3693 return check_svme(ctxt);
3696 static int check_rdtsc(struct x86_emulate_ctxt *ctxt)
3698 u64 cr4 = ctxt->ops->get_cr(ctxt, 4);
3700 if (cr4 & X86_CR4_TSD && ctxt->ops->cpl(ctxt))
3701 return emulate_ud(ctxt);
3703 return X86EMUL_CONTINUE;
3706 static int check_rdpmc(struct x86_emulate_ctxt *ctxt)
3708 u64 cr4 = ctxt->ops->get_cr(ctxt, 4);
3709 u64 rcx = reg_read(ctxt, VCPU_REGS_RCX);
3711 if ((!(cr4 & X86_CR4_PCE) && ctxt->ops->cpl(ctxt)) ||
3712 ctxt->ops->check_pmc(ctxt, rcx))
3713 return emulate_gp(ctxt, 0);
3715 return X86EMUL_CONTINUE;
3718 static int check_perm_in(struct x86_emulate_ctxt *ctxt)
3720 ctxt->dst.bytes = min(ctxt->dst.bytes, 4u);
3721 if (!emulator_io_permited(ctxt, ctxt->src.val, ctxt->dst.bytes))
3722 return emulate_gp(ctxt, 0);
3724 return X86EMUL_CONTINUE;
3727 static int check_perm_out(struct x86_emulate_ctxt *ctxt)
3729 ctxt->src.bytes = min(ctxt->src.bytes, 4u);
3730 if (!emulator_io_permited(ctxt, ctxt->dst.val, ctxt->src.bytes))
3731 return emulate_gp(ctxt, 0);
3733 return X86EMUL_CONTINUE;
3736 #define D(_y) { .flags = (_y) }
3737 #define DI(_y, _i) { .flags = (_y)|Intercept, .intercept = x86_intercept_##_i }
3738 #define DIP(_y, _i, _p) { .flags = (_y)|Intercept|CheckPerm, \
3739 .intercept = x86_intercept_##_i, .check_perm = (_p) }
3740 #define N D(NotImpl)
3741 #define EXT(_f, _e) { .flags = ((_f) | RMExt), .u.group = (_e) }
3742 #define G(_f, _g) { .flags = ((_f) | Group | ModRM), .u.group = (_g) }
3743 #define GD(_f, _g) { .flags = ((_f) | GroupDual | ModRM), .u.gdual = (_g) }
3744 #define ID(_f, _i) { .flags = ((_f) | InstrDual | ModRM), .u.idual = (_i) }
3745 #define MD(_f, _m) { .flags = ((_f) | ModeDual), .u.mdual = (_m) }
3746 #define E(_f, _e) { .flags = ((_f) | Escape | ModRM), .u.esc = (_e) }
3747 #define I(_f, _e) { .flags = (_f), .u.execute = (_e) }
3748 #define F(_f, _e) { .flags = (_f) | Fastop, .u.fastop = (_e) }
3749 #define II(_f, _e, _i) \
3750 { .flags = (_f)|Intercept, .u.execute = (_e), .intercept = x86_intercept_##_i }
3751 #define IIP(_f, _e, _i, _p) \
3752 { .flags = (_f)|Intercept|CheckPerm, .u.execute = (_e), \
3753 .intercept = x86_intercept_##_i, .check_perm = (_p) }
3754 #define GP(_f, _g) { .flags = ((_f) | Prefix), .u.gprefix = (_g) }
3756 #define D2bv(_f) D((_f) | ByteOp), D(_f)
3757 #define D2bvIP(_f, _i, _p) DIP((_f) | ByteOp, _i, _p), DIP(_f, _i, _p)
3758 #define I2bv(_f, _e) I((_f) | ByteOp, _e), I(_f, _e)
3759 #define F2bv(_f, _e) F((_f) | ByteOp, _e), F(_f, _e)
3760 #define I2bvIP(_f, _e, _i, _p) \
3761 IIP((_f) | ByteOp, _e, _i, _p), IIP(_f, _e, _i, _p)
3763 #define F6ALU(_f, _e) F2bv((_f) | DstMem | SrcReg | ModRM, _e), \
3764 F2bv(((_f) | DstReg | SrcMem | ModRM) & ~Lock, _e), \
3765 F2bv(((_f) & ~Lock) | DstAcc | SrcImm, _e)
3767 static const struct opcode group7_rm0[] = {
3769 I(SrcNone | Priv | EmulateOnUD, em_hypercall),
3770 N, N, N, N, N, N,
3773 static const struct opcode group7_rm1[] = {
3774 DI(SrcNone | Priv, monitor),
3775 DI(SrcNone | Priv, mwait),
3776 N, N, N, N, N, N,
3779 static const struct opcode group7_rm3[] = {
3780 DIP(SrcNone | Prot | Priv, vmrun, check_svme_pa),
3781 II(SrcNone | Prot | EmulateOnUD, em_hypercall, vmmcall),
3782 DIP(SrcNone | Prot | Priv, vmload, check_svme_pa),
3783 DIP(SrcNone | Prot | Priv, vmsave, check_svme_pa),
3784 DIP(SrcNone | Prot | Priv, stgi, check_svme),
3785 DIP(SrcNone | Prot | Priv, clgi, check_svme),
3786 DIP(SrcNone | Prot | Priv, skinit, check_svme),
3787 DIP(SrcNone | Prot | Priv, invlpga, check_svme),
3790 static const struct opcode group7_rm7[] = {
3792 DIP(SrcNone, rdtscp, check_rdtsc),
3793 N, N, N, N, N, N,
3796 static const struct opcode group1[] = {
3797 F(Lock, em_add),
3798 F(Lock | PageTable, em_or),
3799 F(Lock, em_adc),
3800 F(Lock, em_sbb),
3801 F(Lock | PageTable, em_and),
3802 F(Lock, em_sub),
3803 F(Lock, em_xor),
3804 F(NoWrite, em_cmp),
3807 static const struct opcode group1A[] = {
3808 I(DstMem | SrcNone | Mov | Stack | IncSP, em_pop), N, N, N, N, N, N, N,
3811 static const struct opcode group2[] = {
3812 F(DstMem | ModRM, em_rol),
3813 F(DstMem | ModRM, em_ror),
3814 F(DstMem | ModRM, em_rcl),
3815 F(DstMem | ModRM, em_rcr),
3816 F(DstMem | ModRM, em_shl),
3817 F(DstMem | ModRM, em_shr),
3818 F(DstMem | ModRM, em_shl),
3819 F(DstMem | ModRM, em_sar),
3822 static const struct opcode group3[] = {
3823 F(DstMem | SrcImm | NoWrite, em_test),
3824 F(DstMem | SrcImm | NoWrite, em_test),
3825 F(DstMem | SrcNone | Lock, em_not),
3826 F(DstMem | SrcNone | Lock, em_neg),
3827 F(DstXacc | Src2Mem, em_mul_ex),
3828 F(DstXacc | Src2Mem, em_imul_ex),
3829 F(DstXacc | Src2Mem, em_div_ex),
3830 F(DstXacc | Src2Mem, em_idiv_ex),
3833 static const struct opcode group4[] = {
3834 F(ByteOp | DstMem | SrcNone | Lock, em_inc),
3835 F(ByteOp | DstMem | SrcNone | Lock, em_dec),
3836 N, N, N, N, N, N,
3839 static const struct opcode group5[] = {
3840 F(DstMem | SrcNone | Lock, em_inc),
3841 F(DstMem | SrcNone | Lock, em_dec),
3842 I(SrcMem | NearBranch, em_call_near_abs),
3843 I(SrcMemFAddr | ImplicitOps | Stack, em_call_far),
3844 I(SrcMem | NearBranch, em_jmp_abs),
3845 I(SrcMemFAddr | ImplicitOps, em_jmp_far),
3846 I(SrcMem | Stack, em_push), D(Undefined),
3849 static const struct opcode group6[] = {
3850 DI(Prot | DstMem, sldt),
3851 DI(Prot | DstMem, str),
3852 II(Prot | Priv | SrcMem16, em_lldt, lldt),
3853 II(Prot | Priv | SrcMem16, em_ltr, ltr),
3854 N, N, N, N,
3857 static const struct group_dual group7 = { {
3858 II(Mov | DstMem, em_sgdt, sgdt),
3859 II(Mov | DstMem, em_sidt, sidt),
3860 II(SrcMem | Priv, em_lgdt, lgdt),
3861 II(SrcMem | Priv, em_lidt, lidt),
3862 II(SrcNone | DstMem | Mov, em_smsw, smsw), N,
3863 II(SrcMem16 | Mov | Priv, em_lmsw, lmsw),
3864 II(SrcMem | ByteOp | Priv | NoAccess, em_invlpg, invlpg),
3865 }, {
3866 EXT(0, group7_rm0),
3867 EXT(0, group7_rm1),
3868 N, EXT(0, group7_rm3),
3869 II(SrcNone | DstMem | Mov, em_smsw, smsw), N,
3870 II(SrcMem16 | Mov | Priv, em_lmsw, lmsw),
3871 EXT(0, group7_rm7),
3872 } };
3874 static const struct opcode group8[] = {
3875 N, N, N, N,
3876 F(DstMem | SrcImmByte | NoWrite, em_bt),
3877 F(DstMem | SrcImmByte | Lock | PageTable, em_bts),
3878 F(DstMem | SrcImmByte | Lock, em_btr),
3879 F(DstMem | SrcImmByte | Lock | PageTable, em_btc),
3882 static const struct group_dual group9 = { {
3883 N, I(DstMem64 | Lock | PageTable, em_cmpxchg8b), N, N, N, N, N, N,
3884 }, {
3885 N, N, N, N, N, N, N, N,
3886 } };
3888 static const struct opcode group11[] = {
3889 I(DstMem | SrcImm | Mov | PageTable, em_mov),
3890 X7(D(Undefined)),
3893 static const struct gprefix pfx_0f_ae_7 = {
3894 I(SrcMem | ByteOp, em_clflush), N, N, N,
3897 static const struct group_dual group15 = { {
3898 N, N, N, N, N, N, N, GP(0, &pfx_0f_ae_7),
3899 }, {
3900 N, N, N, N, N, N, N, N,
3901 } };
3903 static const struct gprefix pfx_0f_6f_0f_7f = {
3904 I(Mmx, em_mov), I(Sse | Aligned, em_mov), N, I(Sse | Unaligned, em_mov),
3907 static const struct instr_dual instr_dual_0f_2b = {
3908 I(0, em_mov), N
3911 static const struct gprefix pfx_0f_2b = {
3912 ID(0, &instr_dual_0f_2b), ID(0, &instr_dual_0f_2b), N, N,
3915 static const struct gprefix pfx_0f_28_0f_29 = {
3916 I(Aligned, em_mov), I(Aligned, em_mov), N, N,
3919 static const struct gprefix pfx_0f_e7 = {
3920 N, I(Sse, em_mov), N, N,
3923 static const struct escape escape_d9 = { {
3924 N, N, N, N, N, N, N, I(DstMem16 | Mov, em_fnstcw),
3925 }, {
3926 /* 0xC0 - 0xC7 */
3927 N, N, N, N, N, N, N, N,
3928 /* 0xC8 - 0xCF */
3929 N, N, N, N, N, N, N, N,
3930 /* 0xD0 - 0xC7 */
3931 N, N, N, N, N, N, N, N,
3932 /* 0xD8 - 0xDF */
3933 N, N, N, N, N, N, N, N,
3934 /* 0xE0 - 0xE7 */
3935 N, N, N, N, N, N, N, N,
3936 /* 0xE8 - 0xEF */
3937 N, N, N, N, N, N, N, N,
3938 /* 0xF0 - 0xF7 */
3939 N, N, N, N, N, N, N, N,
3940 /* 0xF8 - 0xFF */
3941 N, N, N, N, N, N, N, N,
3942 } };
3944 static const struct escape escape_db = { {
3945 N, N, N, N, N, N, N, N,
3946 }, {
3947 /* 0xC0 - 0xC7 */
3948 N, N, N, N, N, N, N, N,
3949 /* 0xC8 - 0xCF */
3950 N, N, N, N, N, N, N, N,
3951 /* 0xD0 - 0xC7 */
3952 N, N, N, N, N, N, N, N,
3953 /* 0xD8 - 0xDF */
3954 N, N, N, N, N, N, N, N,
3955 /* 0xE0 - 0xE7 */
3956 N, N, N, I(ImplicitOps, em_fninit), N, N, N, N,
3957 /* 0xE8 - 0xEF */
3958 N, N, N, N, N, N, N, N,
3959 /* 0xF0 - 0xF7 */
3960 N, N, N, N, N, N, N, N,
3961 /* 0xF8 - 0xFF */
3962 N, N, N, N, N, N, N, N,
3963 } };
3965 static const struct escape escape_dd = { {
3966 N, N, N, N, N, N, N, I(DstMem16 | Mov, em_fnstsw),
3967 }, {
3968 /* 0xC0 - 0xC7 */
3969 N, N, N, N, N, N, N, N,
3970 /* 0xC8 - 0xCF */
3971 N, N, N, N, N, N, N, N,
3972 /* 0xD0 - 0xC7 */
3973 N, N, N, N, N, N, N, N,
3974 /* 0xD8 - 0xDF */
3975 N, N, N, N, N, N, N, N,
3976 /* 0xE0 - 0xE7 */
3977 N, N, N, N, N, N, N, N,
3978 /* 0xE8 - 0xEF */
3979 N, N, N, N, N, N, N, N,
3980 /* 0xF0 - 0xF7 */
3981 N, N, N, N, N, N, N, N,
3982 /* 0xF8 - 0xFF */
3983 N, N, N, N, N, N, N, N,
3984 } };
3986 static const struct instr_dual instr_dual_0f_c3 = {
3987 I(DstMem | SrcReg | ModRM | No16 | Mov, em_mov), N
3990 static const struct mode_dual mode_dual_63 = {
3991 N, I(DstReg | SrcMem32 | ModRM | Mov, em_movsxd)
3994 static const struct opcode opcode_table[256] = {
3995 /* 0x00 - 0x07 */
3996 F6ALU(Lock, em_add),
3997 I(ImplicitOps | Stack | No64 | Src2ES, em_push_sreg),
3998 I(ImplicitOps | Stack | No64 | Src2ES, em_pop_sreg),
3999 /* 0x08 - 0x0F */
4000 F6ALU(Lock | PageTable, em_or),
4001 I(ImplicitOps | Stack | No64 | Src2CS, em_push_sreg),
4003 /* 0x10 - 0x17 */
4004 F6ALU(Lock, em_adc),
4005 I(ImplicitOps | Stack | No64 | Src2SS, em_push_sreg),
4006 I(ImplicitOps | Stack | No64 | Src2SS, em_pop_sreg),
4007 /* 0x18 - 0x1F */
4008 F6ALU(Lock, em_sbb),
4009 I(ImplicitOps | Stack | No64 | Src2DS, em_push_sreg),
4010 I(ImplicitOps | Stack | No64 | Src2DS, em_pop_sreg),
4011 /* 0x20 - 0x27 */
4012 F6ALU(Lock | PageTable, em_and), N, N,
4013 /* 0x28 - 0x2F */
4014 F6ALU(Lock, em_sub), N, I(ByteOp | DstAcc | No64, em_das),
4015 /* 0x30 - 0x37 */
4016 F6ALU(Lock, em_xor), N, N,
4017 /* 0x38 - 0x3F */
4018 F6ALU(NoWrite, em_cmp), N, N,
4019 /* 0x40 - 0x4F */
4020 X8(F(DstReg, em_inc)), X8(F(DstReg, em_dec)),
4021 /* 0x50 - 0x57 */
4022 X8(I(SrcReg | Stack, em_push)),
4023 /* 0x58 - 0x5F */
4024 X8(I(DstReg | Stack, em_pop)),
4025 /* 0x60 - 0x67 */
4026 I(ImplicitOps | Stack | No64, em_pusha),
4027 I(ImplicitOps | Stack | No64, em_popa),
4028 N, MD(ModRM, &mode_dual_63),
4029 N, N, N, N,
4030 /* 0x68 - 0x6F */
4031 I(SrcImm | Mov | Stack, em_push),
4032 I(DstReg | SrcMem | ModRM | Src2Imm, em_imul_3op),
4033 I(SrcImmByte | Mov | Stack, em_push),
4034 I(DstReg | SrcMem | ModRM | Src2ImmByte, em_imul_3op),
4035 I2bvIP(DstDI | SrcDX | Mov | String | Unaligned, em_in, ins, check_perm_in), /* insb, insw/insd */
4036 I2bvIP(SrcSI | DstDX | String, em_out, outs, check_perm_out), /* outsb, outsw/outsd */
4037 /* 0x70 - 0x7F */
4038 X16(D(SrcImmByte | NearBranch)),
4039 /* 0x80 - 0x87 */
4040 G(ByteOp | DstMem | SrcImm, group1),
4041 G(DstMem | SrcImm, group1),
4042 G(ByteOp | DstMem | SrcImm | No64, group1),
4043 G(DstMem | SrcImmByte, group1),
4044 F2bv(DstMem | SrcReg | ModRM | NoWrite, em_test),
4045 I2bv(DstMem | SrcReg | ModRM | Lock | PageTable, em_xchg),
4046 /* 0x88 - 0x8F */
4047 I2bv(DstMem | SrcReg | ModRM | Mov | PageTable, em_mov),
4048 I2bv(DstReg | SrcMem | ModRM | Mov, em_mov),
4049 I(DstMem | SrcNone | ModRM | Mov | PageTable, em_mov_rm_sreg),
4050 D(ModRM | SrcMem | NoAccess | DstReg),
4051 I(ImplicitOps | SrcMem16 | ModRM, em_mov_sreg_rm),
4052 G(0, group1A),
4053 /* 0x90 - 0x97 */
4054 DI(SrcAcc | DstReg, pause), X7(D(SrcAcc | DstReg)),
4055 /* 0x98 - 0x9F */
4056 D(DstAcc | SrcNone), I(ImplicitOps | SrcAcc, em_cwd),
4057 I(SrcImmFAddr | No64, em_call_far), N,
4058 II(ImplicitOps | Stack, em_pushf, pushf),
4059 II(ImplicitOps | Stack, em_popf, popf),
4060 I(ImplicitOps, em_sahf), I(ImplicitOps, em_lahf),
4061 /* 0xA0 - 0xA7 */
4062 I2bv(DstAcc | SrcMem | Mov | MemAbs, em_mov),
4063 I2bv(DstMem | SrcAcc | Mov | MemAbs | PageTable, em_mov),
4064 I2bv(SrcSI | DstDI | Mov | String, em_mov),
4065 F2bv(SrcSI | DstDI | String | NoWrite, em_cmp_r),
4066 /* 0xA8 - 0xAF */
4067 F2bv(DstAcc | SrcImm | NoWrite, em_test),
4068 I2bv(SrcAcc | DstDI | Mov | String, em_mov),
4069 I2bv(SrcSI | DstAcc | Mov | String, em_mov),
4070 F2bv(SrcAcc | DstDI | String | NoWrite, em_cmp_r),
4071 /* 0xB0 - 0xB7 */
4072 X8(I(ByteOp | DstReg | SrcImm | Mov, em_mov)),
4073 /* 0xB8 - 0xBF */
4074 X8(I(DstReg | SrcImm64 | Mov, em_mov)),
4075 /* 0xC0 - 0xC7 */
4076 G(ByteOp | Src2ImmByte, group2), G(Src2ImmByte, group2),
4077 I(ImplicitOps | NearBranch | SrcImmU16, em_ret_near_imm),
4078 I(ImplicitOps | NearBranch, em_ret),
4079 I(DstReg | SrcMemFAddr | ModRM | No64 | Src2ES, em_lseg),
4080 I(DstReg | SrcMemFAddr | ModRM | No64 | Src2DS, em_lseg),
4081 G(ByteOp, group11), G(0, group11),
4082 /* 0xC8 - 0xCF */
4083 I(Stack | SrcImmU16 | Src2ImmByte, em_enter), I(Stack, em_leave),
4084 I(ImplicitOps | SrcImmU16, em_ret_far_imm),
4085 I(ImplicitOps, em_ret_far),
4086 D(ImplicitOps), DI(SrcImmByte, intn),
4087 D(ImplicitOps | No64), II(ImplicitOps, em_iret, iret),
4088 /* 0xD0 - 0xD7 */
4089 G(Src2One | ByteOp, group2), G(Src2One, group2),
4090 G(Src2CL | ByteOp, group2), G(Src2CL, group2),
4091 I(DstAcc | SrcImmUByte | No64, em_aam),
4092 I(DstAcc | SrcImmUByte | No64, em_aad),
4093 F(DstAcc | ByteOp | No64, em_salc),
4094 I(DstAcc | SrcXLat | ByteOp, em_mov),
4095 /* 0xD8 - 0xDF */
4096 N, E(0, &escape_d9), N, E(0, &escape_db), N, E(0, &escape_dd), N, N,
4097 /* 0xE0 - 0xE7 */
4098 X3(I(SrcImmByte | NearBranch, em_loop)),
4099 I(SrcImmByte | NearBranch, em_jcxz),
4100 I2bvIP(SrcImmUByte | DstAcc, em_in, in, check_perm_in),
4101 I2bvIP(SrcAcc | DstImmUByte, em_out, out, check_perm_out),
4102 /* 0xE8 - 0xEF */
4103 I(SrcImm | NearBranch, em_call), D(SrcImm | ImplicitOps | NearBranch),
4104 I(SrcImmFAddr | No64, em_jmp_far),
4105 D(SrcImmByte | ImplicitOps | NearBranch),
4106 I2bvIP(SrcDX | DstAcc, em_in, in, check_perm_in),
4107 I2bvIP(SrcAcc | DstDX, em_out, out, check_perm_out),
4108 /* 0xF0 - 0xF7 */
4109 N, DI(ImplicitOps, icebp), N, N,
4110 DI(ImplicitOps | Priv, hlt), D(ImplicitOps),
4111 G(ByteOp, group3), G(0, group3),
4112 /* 0xF8 - 0xFF */
4113 D(ImplicitOps), D(ImplicitOps),
4114 I(ImplicitOps, em_cli), I(ImplicitOps, em_sti),
4115 D(ImplicitOps), D(ImplicitOps), G(0, group4), G(0, group5),
4118 static const struct opcode twobyte_table[256] = {
4119 /* 0x00 - 0x0F */
4120 G(0, group6), GD(0, &group7), N, N,
4121 N, I(ImplicitOps | EmulateOnUD, em_syscall),
4122 II(ImplicitOps | Priv, em_clts, clts), N,
4123 DI(ImplicitOps | Priv, invd), DI(ImplicitOps | Priv, wbinvd), N, N,
4124 N, D(ImplicitOps | ModRM | SrcMem | NoAccess), N, N,
4125 /* 0x10 - 0x1F */
4126 N, N, N, N, N, N, N, N,
4127 D(ImplicitOps | ModRM | SrcMem | NoAccess),
4128 N, N, N, N, N, N, D(ImplicitOps | ModRM | SrcMem | NoAccess),
4129 /* 0x20 - 0x2F */
4130 DIP(ModRM | DstMem | Priv | Op3264 | NoMod, cr_read, check_cr_read),
4131 DIP(ModRM | DstMem | Priv | Op3264 | NoMod, dr_read, check_dr_read),
4132 IIP(ModRM | SrcMem | Priv | Op3264 | NoMod, em_cr_write, cr_write,
4133 check_cr_write),
4134 IIP(ModRM | SrcMem | Priv | Op3264 | NoMod, em_dr_write, dr_write,
4135 check_dr_write),
4136 N, N, N, N,
4137 GP(ModRM | DstReg | SrcMem | Mov | Sse, &pfx_0f_28_0f_29),
4138 GP(ModRM | DstMem | SrcReg | Mov | Sse, &pfx_0f_28_0f_29),
4139 N, GP(ModRM | DstMem | SrcReg | Mov | Sse, &pfx_0f_2b),
4140 N, N, N, N,
4141 /* 0x30 - 0x3F */
4142 II(ImplicitOps | Priv, em_wrmsr, wrmsr),
4143 IIP(ImplicitOps, em_rdtsc, rdtsc, check_rdtsc),
4144 II(ImplicitOps | Priv, em_rdmsr, rdmsr),
4145 IIP(ImplicitOps, em_rdpmc, rdpmc, check_rdpmc),
4146 I(ImplicitOps | EmulateOnUD, em_sysenter),
4147 I(ImplicitOps | Priv | EmulateOnUD, em_sysexit),
4148 N, N,
4149 N, N, N, N, N, N, N, N,
4150 /* 0x40 - 0x4F */
4151 X16(D(DstReg | SrcMem | ModRM)),
4152 /* 0x50 - 0x5F */
4153 N, N, N, N, N, N, N, N, N, N, N, N, N, N, N, N,
4154 /* 0x60 - 0x6F */
4155 N, N, N, N,
4156 N, N, N, N,
4157 N, N, N, N,
4158 N, N, N, GP(SrcMem | DstReg | ModRM | Mov, &pfx_0f_6f_0f_7f),
4159 /* 0x70 - 0x7F */
4160 N, N, N, N,
4161 N, N, N, N,
4162 N, N, N, N,
4163 N, N, N, GP(SrcReg | DstMem | ModRM | Mov, &pfx_0f_6f_0f_7f),
4164 /* 0x80 - 0x8F */
4165 X16(D(SrcImm | NearBranch)),
4166 /* 0x90 - 0x9F */
4167 X16(D(ByteOp | DstMem | SrcNone | ModRM| Mov)),
4168 /* 0xA0 - 0xA7 */
4169 I(Stack | Src2FS, em_push_sreg), I(Stack | Src2FS, em_pop_sreg),
4170 II(ImplicitOps, em_cpuid, cpuid),
4171 F(DstMem | SrcReg | ModRM | BitOp | NoWrite, em_bt),
4172 F(DstMem | SrcReg | Src2ImmByte | ModRM, em_shld),
4173 F(DstMem | SrcReg | Src2CL | ModRM, em_shld), N, N,
4174 /* 0xA8 - 0xAF */
4175 I(Stack | Src2GS, em_push_sreg), I(Stack | Src2GS, em_pop_sreg),
4176 DI(ImplicitOps, rsm),
4177 F(DstMem | SrcReg | ModRM | BitOp | Lock | PageTable, em_bts),
4178 F(DstMem | SrcReg | Src2ImmByte | ModRM, em_shrd),
4179 F(DstMem | SrcReg | Src2CL | ModRM, em_shrd),
4180 GD(0, &group15), F(DstReg | SrcMem | ModRM, em_imul),
4181 /* 0xB0 - 0xB7 */
4182 I2bv(DstMem | SrcReg | ModRM | Lock | PageTable | SrcWrite, em_cmpxchg),
4183 I(DstReg | SrcMemFAddr | ModRM | Src2SS, em_lseg),
4184 F(DstMem | SrcReg | ModRM | BitOp | Lock, em_btr),
4185 I(DstReg | SrcMemFAddr | ModRM | Src2FS, em_lseg),
4186 I(DstReg | SrcMemFAddr | ModRM | Src2GS, em_lseg),
4187 D(DstReg | SrcMem8 | ModRM | Mov), D(DstReg | SrcMem16 | ModRM | Mov),
4188 /* 0xB8 - 0xBF */
4189 N, N,
4190 G(BitOp, group8),
4191 F(DstMem | SrcReg | ModRM | BitOp | Lock | PageTable, em_btc),
4192 I(DstReg | SrcMem | ModRM, em_bsf_c),
4193 I(DstReg | SrcMem | ModRM, em_bsr_c),
4194 D(DstReg | SrcMem8 | ModRM | Mov), D(DstReg | SrcMem16 | ModRM | Mov),
4195 /* 0xC0 - 0xC7 */
4196 F2bv(DstMem | SrcReg | ModRM | SrcWrite | Lock, em_xadd),
4197 N, ID(0, &instr_dual_0f_c3),
4198 N, N, N, GD(0, &group9),
4199 /* 0xC8 - 0xCF */
4200 X8(I(DstReg, em_bswap)),
4201 /* 0xD0 - 0xDF */
4202 N, N, N, N, N, N, N, N, N, N, N, N, N, N, N, N,
4203 /* 0xE0 - 0xEF */
4204 N, N, N, N, N, N, N, GP(SrcReg | DstMem | ModRM | Mov, &pfx_0f_e7),
4205 N, N, N, N, N, N, N, N,
4206 /* 0xF0 - 0xFF */
4207 N, N, N, N, N, N, N, N, N, N, N, N, N, N, N, N
4210 static const struct instr_dual instr_dual_0f_38_f0 = {
4211 I(DstReg | SrcMem | Mov, em_movbe), N
4214 static const struct instr_dual instr_dual_0f_38_f1 = {
4215 I(DstMem | SrcReg | Mov, em_movbe), N
4218 static const struct gprefix three_byte_0f_38_f0 = {
4219 ID(0, &instr_dual_0f_38_f0), N, N, N
4222 static const struct gprefix three_byte_0f_38_f1 = {
4223 ID(0, &instr_dual_0f_38_f1), N, N, N
4227 * Insns below are selected by the prefix which indexed by the third opcode
4228 * byte.
4230 static const struct opcode opcode_map_0f_38[256] = {
4231 /* 0x00 - 0x7f */
4232 X16(N), X16(N), X16(N), X16(N), X16(N), X16(N), X16(N), X16(N),
4233 /* 0x80 - 0xef */
4234 X16(N), X16(N), X16(N), X16(N), X16(N), X16(N), X16(N),
4235 /* 0xf0 - 0xf1 */
4236 GP(EmulateOnUD | ModRM, &three_byte_0f_38_f0),
4237 GP(EmulateOnUD | ModRM, &three_byte_0f_38_f1),
4238 /* 0xf2 - 0xff */
4239 N, N, X4(N), X8(N)
4242 #undef D
4243 #undef N
4244 #undef G
4245 #undef GD
4246 #undef I
4247 #undef GP
4248 #undef EXT
4249 #undef MD
4250 #undef ID
4252 #undef D2bv
4253 #undef D2bvIP
4254 #undef I2bv
4255 #undef I2bvIP
4256 #undef I6ALU
4258 static unsigned imm_size(struct x86_emulate_ctxt *ctxt)
4260 unsigned size;
4262 size = (ctxt->d & ByteOp) ? 1 : ctxt->op_bytes;
4263 if (size == 8)
4264 size = 4;
4265 return size;
4268 static int decode_imm(struct x86_emulate_ctxt *ctxt, struct operand *op,
4269 unsigned size, bool sign_extension)
4271 int rc = X86EMUL_CONTINUE;
4273 op->type = OP_IMM;
4274 op->bytes = size;
4275 op->addr.mem.ea = ctxt->_eip;
4276 /* NB. Immediates are sign-extended as necessary. */
4277 switch (op->bytes) {
4278 case 1:
4279 op->val = insn_fetch(s8, ctxt);
4280 break;
4281 case 2:
4282 op->val = insn_fetch(s16, ctxt);
4283 break;
4284 case 4:
4285 op->val = insn_fetch(s32, ctxt);
4286 break;
4287 case 8:
4288 op->val = insn_fetch(s64, ctxt);
4289 break;
4291 if (!sign_extension) {
4292 switch (op->bytes) {
4293 case 1:
4294 op->val &= 0xff;
4295 break;
4296 case 2:
4297 op->val &= 0xffff;
4298 break;
4299 case 4:
4300 op->val &= 0xffffffff;
4301 break;
4304 done:
4305 return rc;
4308 static int decode_operand(struct x86_emulate_ctxt *ctxt, struct operand *op,
4309 unsigned d)
4311 int rc = X86EMUL_CONTINUE;
4313 switch (d) {
4314 case OpReg:
4315 decode_register_operand(ctxt, op);
4316 break;
4317 case OpImmUByte:
4318 rc = decode_imm(ctxt, op, 1, false);
4319 break;
4320 case OpMem:
4321 ctxt->memop.bytes = (ctxt->d & ByteOp) ? 1 : ctxt->op_bytes;
4322 mem_common:
4323 *op = ctxt->memop;
4324 ctxt->memopp = op;
4325 if (ctxt->d & BitOp)
4326 fetch_bit_operand(ctxt);
4327 op->orig_val = op->val;
4328 break;
4329 case OpMem64:
4330 ctxt->memop.bytes = (ctxt->op_bytes == 8) ? 16 : 8;
4331 goto mem_common;
4332 case OpAcc:
4333 op->type = OP_REG;
4334 op->bytes = (ctxt->d & ByteOp) ? 1 : ctxt->op_bytes;
4335 op->addr.reg = reg_rmw(ctxt, VCPU_REGS_RAX);
4336 fetch_register_operand(op);
4337 op->orig_val = op->val;
4338 break;
4339 case OpAccLo:
4340 op->type = OP_REG;
4341 op->bytes = (ctxt->d & ByteOp) ? 2 : ctxt->op_bytes;
4342 op->addr.reg = reg_rmw(ctxt, VCPU_REGS_RAX);
4343 fetch_register_operand(op);
4344 op->orig_val = op->val;
4345 break;
4346 case OpAccHi:
4347 if (ctxt->d & ByteOp) {
4348 op->type = OP_NONE;
4349 break;
4351 op->type = OP_REG;
4352 op->bytes = ctxt->op_bytes;
4353 op->addr.reg = reg_rmw(ctxt, VCPU_REGS_RDX);
4354 fetch_register_operand(op);
4355 op->orig_val = op->val;
4356 break;
4357 case OpDI:
4358 op->type = OP_MEM;
4359 op->bytes = (ctxt->d & ByteOp) ? 1 : ctxt->op_bytes;
4360 op->addr.mem.ea =
4361 register_address(ctxt, VCPU_REGS_RDI);
4362 op->addr.mem.seg = VCPU_SREG_ES;
4363 op->val = 0;
4364 op->count = 1;
4365 break;
4366 case OpDX:
4367 op->type = OP_REG;
4368 op->bytes = 2;
4369 op->addr.reg = reg_rmw(ctxt, VCPU_REGS_RDX);
4370 fetch_register_operand(op);
4371 break;
4372 case OpCL:
4373 op->type = OP_IMM;
4374 op->bytes = 1;
4375 op->val = reg_read(ctxt, VCPU_REGS_RCX) & 0xff;
4376 break;
4377 case OpImmByte:
4378 rc = decode_imm(ctxt, op, 1, true);
4379 break;
4380 case OpOne:
4381 op->type = OP_IMM;
4382 op->bytes = 1;
4383 op->val = 1;
4384 break;
4385 case OpImm:
4386 rc = decode_imm(ctxt, op, imm_size(ctxt), true);
4387 break;
4388 case OpImm64:
4389 rc = decode_imm(ctxt, op, ctxt->op_bytes, true);
4390 break;
4391 case OpMem8:
4392 ctxt->memop.bytes = 1;
4393 if (ctxt->memop.type == OP_REG) {
4394 ctxt->memop.addr.reg = decode_register(ctxt,
4395 ctxt->modrm_rm, true);
4396 fetch_register_operand(&ctxt->memop);
4398 goto mem_common;
4399 case OpMem16:
4400 ctxt->memop.bytes = 2;
4401 goto mem_common;
4402 case OpMem32:
4403 ctxt->memop.bytes = 4;
4404 goto mem_common;
4405 case OpImmU16:
4406 rc = decode_imm(ctxt, op, 2, false);
4407 break;
4408 case OpImmU:
4409 rc = decode_imm(ctxt, op, imm_size(ctxt), false);
4410 break;
4411 case OpSI:
4412 op->type = OP_MEM;
4413 op->bytes = (ctxt->d & ByteOp) ? 1 : ctxt->op_bytes;
4414 op->addr.mem.ea =
4415 register_address(ctxt, VCPU_REGS_RSI);
4416 op->addr.mem.seg = ctxt->seg_override;
4417 op->val = 0;
4418 op->count = 1;
4419 break;
4420 case OpXLat:
4421 op->type = OP_MEM;
4422 op->bytes = (ctxt->d & ByteOp) ? 1 : ctxt->op_bytes;
4423 op->addr.mem.ea =
4424 address_mask(ctxt,
4425 reg_read(ctxt, VCPU_REGS_RBX) +
4426 (reg_read(ctxt, VCPU_REGS_RAX) & 0xff));
4427 op->addr.mem.seg = ctxt->seg_override;
4428 op->val = 0;
4429 break;
4430 case OpImmFAddr:
4431 op->type = OP_IMM;
4432 op->addr.mem.ea = ctxt->_eip;
4433 op->bytes = ctxt->op_bytes + 2;
4434 insn_fetch_arr(op->valptr, op->bytes, ctxt);
4435 break;
4436 case OpMemFAddr:
4437 ctxt->memop.bytes = ctxt->op_bytes + 2;
4438 goto mem_common;
4439 case OpES:
4440 op->type = OP_IMM;
4441 op->val = VCPU_SREG_ES;
4442 break;
4443 case OpCS:
4444 op->type = OP_IMM;
4445 op->val = VCPU_SREG_CS;
4446 break;
4447 case OpSS:
4448 op->type = OP_IMM;
4449 op->val = VCPU_SREG_SS;
4450 break;
4451 case OpDS:
4452 op->type = OP_IMM;
4453 op->val = VCPU_SREG_DS;
4454 break;
4455 case OpFS:
4456 op->type = OP_IMM;
4457 op->val = VCPU_SREG_FS;
4458 break;
4459 case OpGS:
4460 op->type = OP_IMM;
4461 op->val = VCPU_SREG_GS;
4462 break;
4463 case OpImplicit:
4464 /* Special instructions do their own operand decoding. */
4465 default:
4466 op->type = OP_NONE; /* Disable writeback. */
4467 break;
4470 done:
4471 return rc;
4474 int x86_decode_insn(struct x86_emulate_ctxt *ctxt, void *insn, int insn_len)
4476 int rc = X86EMUL_CONTINUE;
4477 int mode = ctxt->mode;
4478 int def_op_bytes, def_ad_bytes, goffset, simd_prefix;
4479 bool op_prefix = false;
4480 bool has_seg_override = false;
4481 struct opcode opcode;
4483 ctxt->memop.type = OP_NONE;
4484 ctxt->memopp = NULL;
4485 ctxt->_eip = ctxt->eip;
4486 ctxt->fetch.ptr = ctxt->fetch.data;
4487 ctxt->fetch.end = ctxt->fetch.data + insn_len;
4488 ctxt->opcode_len = 1;
4489 if (insn_len > 0)
4490 memcpy(ctxt->fetch.data, insn, insn_len);
4491 else {
4492 rc = __do_insn_fetch_bytes(ctxt, 1);
4493 if (rc != X86EMUL_CONTINUE)
4494 return rc;
4497 switch (mode) {
4498 case X86EMUL_MODE_REAL:
4499 case X86EMUL_MODE_VM86:
4500 case X86EMUL_MODE_PROT16:
4501 def_op_bytes = def_ad_bytes = 2;
4502 break;
4503 case X86EMUL_MODE_PROT32:
4504 def_op_bytes = def_ad_bytes = 4;
4505 break;
4506 #ifdef CONFIG_X86_64
4507 case X86EMUL_MODE_PROT64:
4508 def_op_bytes = 4;
4509 def_ad_bytes = 8;
4510 break;
4511 #endif
4512 default:
4513 return EMULATION_FAILED;
4516 ctxt->op_bytes = def_op_bytes;
4517 ctxt->ad_bytes = def_ad_bytes;
4519 /* Legacy prefixes. */
4520 for (;;) {
4521 switch (ctxt->b = insn_fetch(u8, ctxt)) {
4522 case 0x66: /* operand-size override */
4523 op_prefix = true;
4524 /* switch between 2/4 bytes */
4525 ctxt->op_bytes = def_op_bytes ^ 6;
4526 break;
4527 case 0x67: /* address-size override */
4528 if (mode == X86EMUL_MODE_PROT64)
4529 /* switch between 4/8 bytes */
4530 ctxt->ad_bytes = def_ad_bytes ^ 12;
4531 else
4532 /* switch between 2/4 bytes */
4533 ctxt->ad_bytes = def_ad_bytes ^ 6;
4534 break;
4535 case 0x26: /* ES override */
4536 case 0x2e: /* CS override */
4537 case 0x36: /* SS override */
4538 case 0x3e: /* DS override */
4539 has_seg_override = true;
4540 ctxt->seg_override = (ctxt->b >> 3) & 3;
4541 break;
4542 case 0x64: /* FS override */
4543 case 0x65: /* GS override */
4544 has_seg_override = true;
4545 ctxt->seg_override = ctxt->b & 7;
4546 break;
4547 case 0x40 ... 0x4f: /* REX */
4548 if (mode != X86EMUL_MODE_PROT64)
4549 goto done_prefixes;
4550 ctxt->rex_prefix = ctxt->b;
4551 continue;
4552 case 0xf0: /* LOCK */
4553 ctxt->lock_prefix = 1;
4554 break;
4555 case 0xf2: /* REPNE/REPNZ */
4556 case 0xf3: /* REP/REPE/REPZ */
4557 ctxt->rep_prefix = ctxt->b;
4558 break;
4559 default:
4560 goto done_prefixes;
4563 /* Any legacy prefix after a REX prefix nullifies its effect. */
4565 ctxt->rex_prefix = 0;
4568 done_prefixes:
4570 /* REX prefix. */
4571 if (ctxt->rex_prefix & 8)
4572 ctxt->op_bytes = 8; /* REX.W */
4574 /* Opcode byte(s). */
4575 opcode = opcode_table[ctxt->b];
4576 /* Two-byte opcode? */
4577 if (ctxt->b == 0x0f) {
4578 ctxt->opcode_len = 2;
4579 ctxt->b = insn_fetch(u8, ctxt);
4580 opcode = twobyte_table[ctxt->b];
4582 /* 0F_38 opcode map */
4583 if (ctxt->b == 0x38) {
4584 ctxt->opcode_len = 3;
4585 ctxt->b = insn_fetch(u8, ctxt);
4586 opcode = opcode_map_0f_38[ctxt->b];
4589 ctxt->d = opcode.flags;
4591 if (ctxt->d & ModRM)
4592 ctxt->modrm = insn_fetch(u8, ctxt);
4594 /* vex-prefix instructions are not implemented */
4595 if (ctxt->opcode_len == 1 && (ctxt->b == 0xc5 || ctxt->b == 0xc4) &&
4596 (mode == X86EMUL_MODE_PROT64 || (ctxt->modrm & 0xc0) == 0xc0)) {
4597 ctxt->d = NotImpl;
4600 while (ctxt->d & GroupMask) {
4601 switch (ctxt->d & GroupMask) {
4602 case Group:
4603 goffset = (ctxt->modrm >> 3) & 7;
4604 opcode = opcode.u.group[goffset];
4605 break;
4606 case GroupDual:
4607 goffset = (ctxt->modrm >> 3) & 7;
4608 if ((ctxt->modrm >> 6) == 3)
4609 opcode = opcode.u.gdual->mod3[goffset];
4610 else
4611 opcode = opcode.u.gdual->mod012[goffset];
4612 break;
4613 case RMExt:
4614 goffset = ctxt->modrm & 7;
4615 opcode = opcode.u.group[goffset];
4616 break;
4617 case Prefix:
4618 if (ctxt->rep_prefix && op_prefix)
4619 return EMULATION_FAILED;
4620 simd_prefix = op_prefix ? 0x66 : ctxt->rep_prefix;
4621 switch (simd_prefix) {
4622 case 0x00: opcode = opcode.u.gprefix->pfx_no; break;
4623 case 0x66: opcode = opcode.u.gprefix->pfx_66; break;
4624 case 0xf2: opcode = opcode.u.gprefix->pfx_f2; break;
4625 case 0xf3: opcode = opcode.u.gprefix->pfx_f3; break;
4627 break;
4628 case Escape:
4629 if (ctxt->modrm > 0xbf)
4630 opcode = opcode.u.esc->high[ctxt->modrm - 0xc0];
4631 else
4632 opcode = opcode.u.esc->op[(ctxt->modrm >> 3) & 7];
4633 break;
4634 case InstrDual:
4635 if ((ctxt->modrm >> 6) == 3)
4636 opcode = opcode.u.idual->mod3;
4637 else
4638 opcode = opcode.u.idual->mod012;
4639 break;
4640 case ModeDual:
4641 if (ctxt->mode == X86EMUL_MODE_PROT64)
4642 opcode = opcode.u.mdual->mode64;
4643 else
4644 opcode = opcode.u.mdual->mode32;
4645 break;
4646 default:
4647 return EMULATION_FAILED;
4650 ctxt->d &= ~(u64)GroupMask;
4651 ctxt->d |= opcode.flags;
4654 /* Unrecognised? */
4655 if (ctxt->d == 0)
4656 return EMULATION_FAILED;
4658 ctxt->execute = opcode.u.execute;
4660 if (unlikely(ctxt->ud) && likely(!(ctxt->d & EmulateOnUD)))
4661 return EMULATION_FAILED;
4663 if (unlikely(ctxt->d &
4664 (NotImpl|Stack|Op3264|Sse|Mmx|Intercept|CheckPerm|NearBranch|
4665 No16))) {
4667 * These are copied unconditionally here, and checked unconditionally
4668 * in x86_emulate_insn.
4670 ctxt->check_perm = opcode.check_perm;
4671 ctxt->intercept = opcode.intercept;
4673 if (ctxt->d & NotImpl)
4674 return EMULATION_FAILED;
4676 if (mode == X86EMUL_MODE_PROT64) {
4677 if (ctxt->op_bytes == 4 && (ctxt->d & Stack))
4678 ctxt->op_bytes = 8;
4679 else if (ctxt->d & NearBranch)
4680 ctxt->op_bytes = 8;
4683 if (ctxt->d & Op3264) {
4684 if (mode == X86EMUL_MODE_PROT64)
4685 ctxt->op_bytes = 8;
4686 else
4687 ctxt->op_bytes = 4;
4690 if ((ctxt->d & No16) && ctxt->op_bytes == 2)
4691 ctxt->op_bytes = 4;
4693 if (ctxt->d & Sse)
4694 ctxt->op_bytes = 16;
4695 else if (ctxt->d & Mmx)
4696 ctxt->op_bytes = 8;
4699 /* ModRM and SIB bytes. */
4700 if (ctxt->d & ModRM) {
4701 rc = decode_modrm(ctxt, &ctxt->memop);
4702 if (!has_seg_override) {
4703 has_seg_override = true;
4704 ctxt->seg_override = ctxt->modrm_seg;
4706 } else if (ctxt->d & MemAbs)
4707 rc = decode_abs(ctxt, &ctxt->memop);
4708 if (rc != X86EMUL_CONTINUE)
4709 goto done;
4711 if (!has_seg_override)
4712 ctxt->seg_override = VCPU_SREG_DS;
4714 ctxt->memop.addr.mem.seg = ctxt->seg_override;
4717 * Decode and fetch the source operand: register, memory
4718 * or immediate.
4720 rc = decode_operand(ctxt, &ctxt->src, (ctxt->d >> SrcShift) & OpMask);
4721 if (rc != X86EMUL_CONTINUE)
4722 goto done;
4725 * Decode and fetch the second source operand: register, memory
4726 * or immediate.
4728 rc = decode_operand(ctxt, &ctxt->src2, (ctxt->d >> Src2Shift) & OpMask);
4729 if (rc != X86EMUL_CONTINUE)
4730 goto done;
4732 /* Decode and fetch the destination operand: register or memory. */
4733 rc = decode_operand(ctxt, &ctxt->dst, (ctxt->d >> DstShift) & OpMask);
4735 if (ctxt->rip_relative)
4736 ctxt->memopp->addr.mem.ea = address_mask(ctxt,
4737 ctxt->memopp->addr.mem.ea + ctxt->_eip);
4739 done:
4740 return (rc != X86EMUL_CONTINUE) ? EMULATION_FAILED : EMULATION_OK;
4743 bool x86_page_table_writing_insn(struct x86_emulate_ctxt *ctxt)
4745 return ctxt->d & PageTable;
4748 static bool string_insn_completed(struct x86_emulate_ctxt *ctxt)
4750 /* The second termination condition only applies for REPE
4751 * and REPNE. Test if the repeat string operation prefix is
4752 * REPE/REPZ or REPNE/REPNZ and if it's the case it tests the
4753 * corresponding termination condition according to:
4754 * - if REPE/REPZ and ZF = 0 then done
4755 * - if REPNE/REPNZ and ZF = 1 then done
4757 if (((ctxt->b == 0xa6) || (ctxt->b == 0xa7) ||
4758 (ctxt->b == 0xae) || (ctxt->b == 0xaf))
4759 && (((ctxt->rep_prefix == REPE_PREFIX) &&
4760 ((ctxt->eflags & X86_EFLAGS_ZF) == 0))
4761 || ((ctxt->rep_prefix == REPNE_PREFIX) &&
4762 ((ctxt->eflags & X86_EFLAGS_ZF) == X86_EFLAGS_ZF))))
4763 return true;
4765 return false;
4768 static int flush_pending_x87_faults(struct x86_emulate_ctxt *ctxt)
4770 bool fault = false;
4772 ctxt->ops->get_fpu(ctxt);
4773 asm volatile("1: fwait \n\t"
4774 "2: \n\t"
4775 ".pushsection .fixup,\"ax\" \n\t"
4776 "3: \n\t"
4777 "movb $1, %[fault] \n\t"
4778 "jmp 2b \n\t"
4779 ".popsection \n\t"
4780 _ASM_EXTABLE(1b, 3b)
4781 : [fault]"+qm"(fault));
4782 ctxt->ops->put_fpu(ctxt);
4784 if (unlikely(fault))
4785 return emulate_exception(ctxt, MF_VECTOR, 0, false);
4787 return X86EMUL_CONTINUE;
4790 static void fetch_possible_mmx_operand(struct x86_emulate_ctxt *ctxt,
4791 struct operand *op)
4793 if (op->type == OP_MM)
4794 read_mmx_reg(ctxt, &op->mm_val, op->addr.mm);
4797 static int fastop(struct x86_emulate_ctxt *ctxt, void (*fop)(struct fastop *))
4799 ulong flags = (ctxt->eflags & EFLAGS_MASK) | X86_EFLAGS_IF;
4800 if (!(ctxt->d & ByteOp))
4801 fop += __ffs(ctxt->dst.bytes) * FASTOP_SIZE;
4802 asm("push %[flags]; popf; call *%[fastop]; pushf; pop %[flags]\n"
4803 : "+a"(ctxt->dst.val), "+d"(ctxt->src.val), [flags]"+D"(flags),
4804 [fastop]"+S"(fop)
4805 : "c"(ctxt->src2.val));
4806 ctxt->eflags = (ctxt->eflags & ~EFLAGS_MASK) | (flags & EFLAGS_MASK);
4807 if (!fop) /* exception is returned in fop variable */
4808 return emulate_de(ctxt);
4809 return X86EMUL_CONTINUE;
4812 void init_decode_cache(struct x86_emulate_ctxt *ctxt)
4814 memset(&ctxt->rip_relative, 0,
4815 (void *)&ctxt->modrm - (void *)&ctxt->rip_relative);
4817 ctxt->io_read.pos = 0;
4818 ctxt->io_read.end = 0;
4819 ctxt->mem_read.end = 0;
4822 int x86_emulate_insn(struct x86_emulate_ctxt *ctxt)
4824 const struct x86_emulate_ops *ops = ctxt->ops;
4825 int rc = X86EMUL_CONTINUE;
4826 int saved_dst_type = ctxt->dst.type;
4828 ctxt->mem_read.pos = 0;
4830 /* LOCK prefix is allowed only with some instructions */
4831 if (ctxt->lock_prefix && (!(ctxt->d & Lock) || ctxt->dst.type != OP_MEM)) {
4832 rc = emulate_ud(ctxt);
4833 goto done;
4836 if ((ctxt->d & SrcMask) == SrcMemFAddr && ctxt->src.type != OP_MEM) {
4837 rc = emulate_ud(ctxt);
4838 goto done;
4841 if (unlikely(ctxt->d &
4842 (No64|Undefined|Sse|Mmx|Intercept|CheckPerm|Priv|Prot|String))) {
4843 if ((ctxt->mode == X86EMUL_MODE_PROT64 && (ctxt->d & No64)) ||
4844 (ctxt->d & Undefined)) {
4845 rc = emulate_ud(ctxt);
4846 goto done;
4849 if (((ctxt->d & (Sse|Mmx)) && ((ops->get_cr(ctxt, 0) & X86_CR0_EM)))
4850 || ((ctxt->d & Sse) && !(ops->get_cr(ctxt, 4) & X86_CR4_OSFXSR))) {
4851 rc = emulate_ud(ctxt);
4852 goto done;
4855 if ((ctxt->d & (Sse|Mmx)) && (ops->get_cr(ctxt, 0) & X86_CR0_TS)) {
4856 rc = emulate_nm(ctxt);
4857 goto done;
4860 if (ctxt->d & Mmx) {
4861 rc = flush_pending_x87_faults(ctxt);
4862 if (rc != X86EMUL_CONTINUE)
4863 goto done;
4865 * Now that we know the fpu is exception safe, we can fetch
4866 * operands from it.
4868 fetch_possible_mmx_operand(ctxt, &ctxt->src);
4869 fetch_possible_mmx_operand(ctxt, &ctxt->src2);
4870 if (!(ctxt->d & Mov))
4871 fetch_possible_mmx_operand(ctxt, &ctxt->dst);
4874 if (unlikely(ctxt->guest_mode) && (ctxt->d & Intercept)) {
4875 rc = emulator_check_intercept(ctxt, ctxt->intercept,
4876 X86_ICPT_PRE_EXCEPT);
4877 if (rc != X86EMUL_CONTINUE)
4878 goto done;
4881 /* Instruction can only be executed in protected mode */
4882 if ((ctxt->d & Prot) && ctxt->mode < X86EMUL_MODE_PROT16) {
4883 rc = emulate_ud(ctxt);
4884 goto done;
4887 /* Privileged instruction can be executed only in CPL=0 */
4888 if ((ctxt->d & Priv) && ops->cpl(ctxt)) {
4889 if (ctxt->d & PrivUD)
4890 rc = emulate_ud(ctxt);
4891 else
4892 rc = emulate_gp(ctxt, 0);
4893 goto done;
4896 /* Do instruction specific permission checks */
4897 if (ctxt->d & CheckPerm) {
4898 rc = ctxt->check_perm(ctxt);
4899 if (rc != X86EMUL_CONTINUE)
4900 goto done;
4903 if (unlikely(ctxt->guest_mode) && (ctxt->d & Intercept)) {
4904 rc = emulator_check_intercept(ctxt, ctxt->intercept,
4905 X86_ICPT_POST_EXCEPT);
4906 if (rc != X86EMUL_CONTINUE)
4907 goto done;
4910 if (ctxt->rep_prefix && (ctxt->d & String)) {
4911 /* All REP prefixes have the same first termination condition */
4912 if (address_mask(ctxt, reg_read(ctxt, VCPU_REGS_RCX)) == 0) {
4913 ctxt->eip = ctxt->_eip;
4914 ctxt->eflags &= ~X86_EFLAGS_RF;
4915 goto done;
4920 if ((ctxt->src.type == OP_MEM) && !(ctxt->d & NoAccess)) {
4921 rc = segmented_read(ctxt, ctxt->src.addr.mem,
4922 ctxt->src.valptr, ctxt->src.bytes);
4923 if (rc != X86EMUL_CONTINUE)
4924 goto done;
4925 ctxt->src.orig_val64 = ctxt->src.val64;
4928 if (ctxt->src2.type == OP_MEM) {
4929 rc = segmented_read(ctxt, ctxt->src2.addr.mem,
4930 &ctxt->src2.val, ctxt->src2.bytes);
4931 if (rc != X86EMUL_CONTINUE)
4932 goto done;
4935 if ((ctxt->d & DstMask) == ImplicitOps)
4936 goto special_insn;
4939 if ((ctxt->dst.type == OP_MEM) && !(ctxt->d & Mov)) {
4940 /* optimisation - avoid slow emulated read if Mov */
4941 rc = segmented_read(ctxt, ctxt->dst.addr.mem,
4942 &ctxt->dst.val, ctxt->dst.bytes);
4943 if (rc != X86EMUL_CONTINUE) {
4944 if (!(ctxt->d & NoWrite) &&
4945 rc == X86EMUL_PROPAGATE_FAULT &&
4946 ctxt->exception.vector == PF_VECTOR)
4947 ctxt->exception.error_code |= PFERR_WRITE_MASK;
4948 goto done;
4951 /* Copy full 64-bit value for CMPXCHG8B. */
4952 ctxt->dst.orig_val64 = ctxt->dst.val64;
4954 special_insn:
4956 if (unlikely(ctxt->guest_mode) && (ctxt->d & Intercept)) {
4957 rc = emulator_check_intercept(ctxt, ctxt->intercept,
4958 X86_ICPT_POST_MEMACCESS);
4959 if (rc != X86EMUL_CONTINUE)
4960 goto done;
4963 if (ctxt->rep_prefix && (ctxt->d & String))
4964 ctxt->eflags |= X86_EFLAGS_RF;
4965 else
4966 ctxt->eflags &= ~X86_EFLAGS_RF;
4968 if (ctxt->execute) {
4969 if (ctxt->d & Fastop) {
4970 void (*fop)(struct fastop *) = (void *)ctxt->execute;
4971 rc = fastop(ctxt, fop);
4972 if (rc != X86EMUL_CONTINUE)
4973 goto done;
4974 goto writeback;
4976 rc = ctxt->execute(ctxt);
4977 if (rc != X86EMUL_CONTINUE)
4978 goto done;
4979 goto writeback;
4982 if (ctxt->opcode_len == 2)
4983 goto twobyte_insn;
4984 else if (ctxt->opcode_len == 3)
4985 goto threebyte_insn;
4987 switch (ctxt->b) {
4988 case 0x70 ... 0x7f: /* jcc (short) */
4989 if (test_cc(ctxt->b, ctxt->eflags))
4990 rc = jmp_rel(ctxt, ctxt->src.val);
4991 break;
4992 case 0x8d: /* lea r16/r32, m */
4993 ctxt->dst.val = ctxt->src.addr.mem.ea;
4994 break;
4995 case 0x90 ... 0x97: /* nop / xchg reg, rax */
4996 if (ctxt->dst.addr.reg == reg_rmw(ctxt, VCPU_REGS_RAX))
4997 ctxt->dst.type = OP_NONE;
4998 else
4999 rc = em_xchg(ctxt);
5000 break;
5001 case 0x98: /* cbw/cwde/cdqe */
5002 switch (ctxt->op_bytes) {
5003 case 2: ctxt->dst.val = (s8)ctxt->dst.val; break;
5004 case 4: ctxt->dst.val = (s16)ctxt->dst.val; break;
5005 case 8: ctxt->dst.val = (s32)ctxt->dst.val; break;
5007 break;
5008 case 0xcc: /* int3 */
5009 rc = emulate_int(ctxt, 3);
5010 break;
5011 case 0xcd: /* int n */
5012 rc = emulate_int(ctxt, ctxt->src.val);
5013 break;
5014 case 0xce: /* into */
5015 if (ctxt->eflags & X86_EFLAGS_OF)
5016 rc = emulate_int(ctxt, 4);
5017 break;
5018 case 0xe9: /* jmp rel */
5019 case 0xeb: /* jmp rel short */
5020 rc = jmp_rel(ctxt, ctxt->src.val);
5021 ctxt->dst.type = OP_NONE; /* Disable writeback. */
5022 break;
5023 case 0xf4: /* hlt */
5024 ctxt->ops->halt(ctxt);
5025 break;
5026 case 0xf5: /* cmc */
5027 /* complement carry flag from eflags reg */
5028 ctxt->eflags ^= X86_EFLAGS_CF;
5029 break;
5030 case 0xf8: /* clc */
5031 ctxt->eflags &= ~X86_EFLAGS_CF;
5032 break;
5033 case 0xf9: /* stc */
5034 ctxt->eflags |= X86_EFLAGS_CF;
5035 break;
5036 case 0xfc: /* cld */
5037 ctxt->eflags &= ~X86_EFLAGS_DF;
5038 break;
5039 case 0xfd: /* std */
5040 ctxt->eflags |= X86_EFLAGS_DF;
5041 break;
5042 default:
5043 goto cannot_emulate;
5046 if (rc != X86EMUL_CONTINUE)
5047 goto done;
5049 writeback:
5050 if (ctxt->d & SrcWrite) {
5051 BUG_ON(ctxt->src.type == OP_MEM || ctxt->src.type == OP_MEM_STR);
5052 rc = writeback(ctxt, &ctxt->src);
5053 if (rc != X86EMUL_CONTINUE)
5054 goto done;
5056 if (!(ctxt->d & NoWrite)) {
5057 rc = writeback(ctxt, &ctxt->dst);
5058 if (rc != X86EMUL_CONTINUE)
5059 goto done;
5063 * restore dst type in case the decoding will be reused
5064 * (happens for string instruction )
5066 ctxt->dst.type = saved_dst_type;
5068 if ((ctxt->d & SrcMask) == SrcSI)
5069 string_addr_inc(ctxt, VCPU_REGS_RSI, &ctxt->src);
5071 if ((ctxt->d & DstMask) == DstDI)
5072 string_addr_inc(ctxt, VCPU_REGS_RDI, &ctxt->dst);
5074 if (ctxt->rep_prefix && (ctxt->d & String)) {
5075 unsigned int count;
5076 struct read_cache *r = &ctxt->io_read;
5077 if ((ctxt->d & SrcMask) == SrcSI)
5078 count = ctxt->src.count;
5079 else
5080 count = ctxt->dst.count;
5081 register_address_increment(ctxt, VCPU_REGS_RCX, -count);
5083 if (!string_insn_completed(ctxt)) {
5085 * Re-enter guest when pio read ahead buffer is empty
5086 * or, if it is not used, after each 1024 iteration.
5088 if ((r->end != 0 || reg_read(ctxt, VCPU_REGS_RCX) & 0x3ff) &&
5089 (r->end == 0 || r->end != r->pos)) {
5091 * Reset read cache. Usually happens before
5092 * decode, but since instruction is restarted
5093 * we have to do it here.
5095 ctxt->mem_read.end = 0;
5096 writeback_registers(ctxt);
5097 return EMULATION_RESTART;
5099 goto done; /* skip rip writeback */
5101 ctxt->eflags &= ~X86_EFLAGS_RF;
5104 ctxt->eip = ctxt->_eip;
5106 done:
5107 if (rc == X86EMUL_PROPAGATE_FAULT) {
5108 WARN_ON(ctxt->exception.vector > 0x1f);
5109 ctxt->have_exception = true;
5111 if (rc == X86EMUL_INTERCEPTED)
5112 return EMULATION_INTERCEPTED;
5114 if (rc == X86EMUL_CONTINUE)
5115 writeback_registers(ctxt);
5117 return (rc == X86EMUL_UNHANDLEABLE) ? EMULATION_FAILED : EMULATION_OK;
5119 twobyte_insn:
5120 switch (ctxt->b) {
5121 case 0x09: /* wbinvd */
5122 (ctxt->ops->wbinvd)(ctxt);
5123 break;
5124 case 0x08: /* invd */
5125 case 0x0d: /* GrpP (prefetch) */
5126 case 0x18: /* Grp16 (prefetch/nop) */
5127 case 0x1f: /* nop */
5128 break;
5129 case 0x20: /* mov cr, reg */
5130 ctxt->dst.val = ops->get_cr(ctxt, ctxt->modrm_reg);
5131 break;
5132 case 0x21: /* mov from dr to reg */
5133 ops->get_dr(ctxt, ctxt->modrm_reg, &ctxt->dst.val);
5134 break;
5135 case 0x40 ... 0x4f: /* cmov */
5136 if (test_cc(ctxt->b, ctxt->eflags))
5137 ctxt->dst.val = ctxt->src.val;
5138 else if (ctxt->op_bytes != 4)
5139 ctxt->dst.type = OP_NONE; /* no writeback */
5140 break;
5141 case 0x80 ... 0x8f: /* jnz rel, etc*/
5142 if (test_cc(ctxt->b, ctxt->eflags))
5143 rc = jmp_rel(ctxt, ctxt->src.val);
5144 break;
5145 case 0x90 ... 0x9f: /* setcc r/m8 */
5146 ctxt->dst.val = test_cc(ctxt->b, ctxt->eflags);
5147 break;
5148 case 0xb6 ... 0xb7: /* movzx */
5149 ctxt->dst.bytes = ctxt->op_bytes;
5150 ctxt->dst.val = (ctxt->src.bytes == 1) ? (u8) ctxt->src.val
5151 : (u16) ctxt->src.val;
5152 break;
5153 case 0xbe ... 0xbf: /* movsx */
5154 ctxt->dst.bytes = ctxt->op_bytes;
5155 ctxt->dst.val = (ctxt->src.bytes == 1) ? (s8) ctxt->src.val :
5156 (s16) ctxt->src.val;
5157 break;
5158 default:
5159 goto cannot_emulate;
5162 threebyte_insn:
5164 if (rc != X86EMUL_CONTINUE)
5165 goto done;
5167 goto writeback;
5169 cannot_emulate:
5170 return EMULATION_FAILED;
5173 void emulator_invalidate_register_cache(struct x86_emulate_ctxt *ctxt)
5175 invalidate_registers(ctxt);
5178 void emulator_writeback_register_cache(struct x86_emulate_ctxt *ctxt)
5180 writeback_registers(ctxt);