1 /******************************************************************************
4 * Generic x86 (32-bit and 64-bit) instruction decoder and emulator.
6 * Copyright (c) 2005 Keir Fraser
8 * Linux coding style, mod r/m decoder, segment base fixes, real-mode
9 * privileged instructions:
11 * Copyright (C) 2006 Qumranet
12 * Copyright 2010 Red Hat, Inc. and/or its affiliates.
14 * Avi Kivity <avi@qumranet.com>
15 * Yaniv Kamay <yaniv@qumranet.com>
17 * This work is licensed under the terms of the GNU GPL, version 2. See
18 * the COPYING file in the top-level directory.
20 * From: xen-unstable 10676:af9809f51f81a3c43f276f00c81a52ef558afda4
23 #include <linux/kvm_host.h>
24 #include "kvm_cache_regs.h"
25 #include <linux/module.h>
26 #include <asm/kvm_emulate.h>
27 #include <linux/stringify.h>
28 #include <asm/debugreg.h>
37 #define OpImplicit 1ull /* No generic decode */
38 #define OpReg 2ull /* Register */
39 #define OpMem 3ull /* Memory */
40 #define OpAcc 4ull /* Accumulator: AL/AX/EAX/RAX */
41 #define OpDI 5ull /* ES:DI/EDI/RDI */
42 #define OpMem64 6ull /* Memory, 64-bit */
43 #define OpImmUByte 7ull /* Zero-extended 8-bit immediate */
44 #define OpDX 8ull /* DX register */
45 #define OpCL 9ull /* CL register (for shifts) */
46 #define OpImmByte 10ull /* 8-bit sign extended immediate */
47 #define OpOne 11ull /* Implied 1 */
48 #define OpImm 12ull /* Sign extended up to 32-bit immediate */
49 #define OpMem16 13ull /* Memory operand (16-bit). */
50 #define OpMem32 14ull /* Memory operand (32-bit). */
51 #define OpImmU 15ull /* Immediate operand, zero extended */
52 #define OpSI 16ull /* SI/ESI/RSI */
53 #define OpImmFAddr 17ull /* Immediate far address */
54 #define OpMemFAddr 18ull /* Far address in memory */
55 #define OpImmU16 19ull /* Immediate operand, 16 bits, zero extended */
56 #define OpES 20ull /* ES */
57 #define OpCS 21ull /* CS */
58 #define OpSS 22ull /* SS */
59 #define OpDS 23ull /* DS */
60 #define OpFS 24ull /* FS */
61 #define OpGS 25ull /* GS */
62 #define OpMem8 26ull /* 8-bit zero extended memory operand */
63 #define OpImm64 27ull /* Sign extended 16/32/64-bit immediate */
64 #define OpXLat 28ull /* memory at BX/EBX/RBX + zero-extended AL */
65 #define OpAccLo 29ull /* Low part of extended acc (AX/AX/EAX/RAX) */
66 #define OpAccHi 30ull /* High part of extended acc (-/DX/EDX/RDX) */
68 #define OpBits 5 /* Width of operand field */
69 #define OpMask ((1ull << OpBits) - 1)
72 * Opcode effective-address decode tables.
73 * Note that we only emulate instructions that have at least one memory
74 * operand (excluding implicit stack references). We assume that stack
75 * references and instruction fetches will never occur in special memory
76 * areas that require emulation. So, for example, 'mov <imm>,<reg>' need
80 /* Operand sizes: 8-bit operands or specified/overridden size. */
81 #define ByteOp (1<<0) /* 8-bit operands. */
82 /* Destination operand type. */
84 #define ImplicitOps (OpImplicit << DstShift)
85 #define DstReg (OpReg << DstShift)
86 #define DstMem (OpMem << DstShift)
87 #define DstAcc (OpAcc << DstShift)
88 #define DstDI (OpDI << DstShift)
89 #define DstMem64 (OpMem64 << DstShift)
90 #define DstMem16 (OpMem16 << DstShift)
91 #define DstImmUByte (OpImmUByte << DstShift)
92 #define DstDX (OpDX << DstShift)
93 #define DstAccLo (OpAccLo << DstShift)
94 #define DstMask (OpMask << DstShift)
95 /* Source operand type. */
97 #define SrcNone (OpNone << SrcShift)
98 #define SrcReg (OpReg << SrcShift)
99 #define SrcMem (OpMem << SrcShift)
100 #define SrcMem16 (OpMem16 << SrcShift)
101 #define SrcMem32 (OpMem32 << SrcShift)
102 #define SrcImm (OpImm << SrcShift)
103 #define SrcImmByte (OpImmByte << SrcShift)
104 #define SrcOne (OpOne << SrcShift)
105 #define SrcImmUByte (OpImmUByte << SrcShift)
106 #define SrcImmU (OpImmU << SrcShift)
107 #define SrcSI (OpSI << SrcShift)
108 #define SrcXLat (OpXLat << SrcShift)
109 #define SrcImmFAddr (OpImmFAddr << SrcShift)
110 #define SrcMemFAddr (OpMemFAddr << SrcShift)
111 #define SrcAcc (OpAcc << SrcShift)
112 #define SrcImmU16 (OpImmU16 << SrcShift)
113 #define SrcImm64 (OpImm64 << SrcShift)
114 #define SrcDX (OpDX << SrcShift)
115 #define SrcMem8 (OpMem8 << SrcShift)
116 #define SrcAccHi (OpAccHi << SrcShift)
117 #define SrcMask (OpMask << SrcShift)
118 #define BitOp (1<<11)
119 #define MemAbs (1<<12) /* Memory operand is absolute displacement */
120 #define String (1<<13) /* String instruction (rep capable) */
121 #define Stack (1<<14) /* Stack instruction (push/pop) */
122 #define GroupMask (7<<15) /* Opcode uses one of the group mechanisms */
123 #define Group (1<<15) /* Bits 3:5 of modrm byte extend opcode */
124 #define GroupDual (2<<15) /* Alternate decoding of mod == 3 */
125 #define Prefix (3<<15) /* Instruction varies with 66/f2/f3 prefix */
126 #define RMExt (4<<15) /* Opcode extension in ModRM r/m if mod == 3 */
127 #define Escape (5<<15) /* Escape to coprocessor instruction */
128 #define InstrDual (6<<15) /* Alternate instruction decoding of mod == 3 */
129 #define ModeDual (7<<15) /* Different instruction for 32/64 bit */
130 #define Sse (1<<18) /* SSE Vector instruction */
131 /* Generic ModRM decode. */
132 #define ModRM (1<<19)
133 /* Destination is only written; never read. */
136 #define Prot (1<<21) /* instruction generates #UD if not in prot-mode */
137 #define EmulateOnUD (1<<22) /* Emulate if unsupported by the host */
138 #define NoAccess (1<<23) /* Don't access memory (lea/invlpg/verr etc) */
139 #define Op3264 (1<<24) /* Operand is 64b in long mode, 32b otherwise */
140 #define Undefined (1<<25) /* No Such Instruction */
141 #define Lock (1<<26) /* lock prefix is allowed for the instruction */
142 #define Priv (1<<27) /* instruction generates #GP if current CPL != 0 */
144 #define PageTable (1 << 29) /* instruction used to write page table */
145 #define NotImpl (1 << 30) /* instruction is not implemented */
146 /* Source 2 operand type */
147 #define Src2Shift (31)
148 #define Src2None (OpNone << Src2Shift)
149 #define Src2Mem (OpMem << Src2Shift)
150 #define Src2CL (OpCL << Src2Shift)
151 #define Src2ImmByte (OpImmByte << Src2Shift)
152 #define Src2One (OpOne << Src2Shift)
153 #define Src2Imm (OpImm << Src2Shift)
154 #define Src2ES (OpES << Src2Shift)
155 #define Src2CS (OpCS << Src2Shift)
156 #define Src2SS (OpSS << Src2Shift)
157 #define Src2DS (OpDS << Src2Shift)
158 #define Src2FS (OpFS << Src2Shift)
159 #define Src2GS (OpGS << Src2Shift)
160 #define Src2Mask (OpMask << Src2Shift)
161 #define Mmx ((u64)1 << 40) /* MMX Vector instruction */
162 #define Aligned ((u64)1 << 41) /* Explicitly aligned (e.g. MOVDQA) */
163 #define Unaligned ((u64)1 << 42) /* Explicitly unaligned (e.g. MOVDQU) */
164 #define Avx ((u64)1 << 43) /* Advanced Vector Extensions */
165 #define Fastop ((u64)1 << 44) /* Use opcode::u.fastop */
166 #define NoWrite ((u64)1 << 45) /* No writeback */
167 #define SrcWrite ((u64)1 << 46) /* Write back src operand */
168 #define NoMod ((u64)1 << 47) /* Mod field is ignored */
169 #define Intercept ((u64)1 << 48) /* Has valid intercept field */
170 #define CheckPerm ((u64)1 << 49) /* Has valid check_perm field */
171 #define PrivUD ((u64)1 << 51) /* #UD instead of #GP on CPL > 0 */
172 #define NearBranch ((u64)1 << 52) /* Near branches */
173 #define No16 ((u64)1 << 53) /* No 16 bit operand */
174 #define IncSP ((u64)1 << 54) /* SP is incremented before ModRM calc */
176 #define DstXacc (DstAccLo | SrcAccHi | SrcWrite)
178 #define X2(x...) x, x
179 #define X3(x...) X2(x), x
180 #define X4(x...) X2(x), X2(x)
181 #define X5(x...) X4(x), x
182 #define X6(x...) X4(x), X2(x)
183 #define X7(x...) X4(x), X3(x)
184 #define X8(x...) X4(x), X4(x)
185 #define X16(x...) X8(x), X8(x)
187 #define NR_FASTOP (ilog2(sizeof(ulong)) + 1)
188 #define FASTOP_SIZE 8
191 * fastop functions have a special calling convention:
196 * flags: rflags (in/out)
197 * ex: rsi (in:fastop pointer, out:zero if exception)
199 * Moreover, they are all exactly FASTOP_SIZE bytes long, so functions for
200 * different operand sizes can be reached by calculation, rather than a jump
201 * table (which would be bigger than the code).
203 * fastop functions are declared as taking a never-defined fastop parameter,
204 * so they can't be called from C directly.
213 int (*execute
)(struct x86_emulate_ctxt
*ctxt
);
214 const struct opcode
*group
;
215 const struct group_dual
*gdual
;
216 const struct gprefix
*gprefix
;
217 const struct escape
*esc
;
218 const struct instr_dual
*idual
;
219 const struct mode_dual
*mdual
;
220 void (*fastop
)(struct fastop
*fake
);
222 int (*check_perm
)(struct x86_emulate_ctxt
*ctxt
);
226 struct opcode mod012
[8];
227 struct opcode mod3
[8];
231 struct opcode pfx_no
;
232 struct opcode pfx_66
;
233 struct opcode pfx_f2
;
234 struct opcode pfx_f3
;
239 struct opcode high
[64];
243 struct opcode mod012
;
248 struct opcode mode32
;
249 struct opcode mode64
;
252 #define EFLG_RESERVED_ZEROS_MASK 0xffc0802a
254 enum x86_transfer_type
{
256 X86_TRANSFER_CALL_JMP
,
258 X86_TRANSFER_TASK_SWITCH
,
261 static ulong
reg_read(struct x86_emulate_ctxt
*ctxt
, unsigned nr
)
263 if (!(ctxt
->regs_valid
& (1 << nr
))) {
264 ctxt
->regs_valid
|= 1 << nr
;
265 ctxt
->_regs
[nr
] = ctxt
->ops
->read_gpr(ctxt
, nr
);
267 return ctxt
->_regs
[nr
];
270 static ulong
*reg_write(struct x86_emulate_ctxt
*ctxt
, unsigned nr
)
272 ctxt
->regs_valid
|= 1 << nr
;
273 ctxt
->regs_dirty
|= 1 << nr
;
274 return &ctxt
->_regs
[nr
];
277 static ulong
*reg_rmw(struct x86_emulate_ctxt
*ctxt
, unsigned nr
)
280 return reg_write(ctxt
, nr
);
283 static void writeback_registers(struct x86_emulate_ctxt
*ctxt
)
287 for_each_set_bit(reg
, (ulong
*)&ctxt
->regs_dirty
, 16)
288 ctxt
->ops
->write_gpr(ctxt
, reg
, ctxt
->_regs
[reg
]);
291 static void invalidate_registers(struct x86_emulate_ctxt
*ctxt
)
293 ctxt
->regs_dirty
= 0;
294 ctxt
->regs_valid
= 0;
298 * These EFLAGS bits are restored from saved value during emulation, and
299 * any changes are written back to the saved value after emulation.
301 #define EFLAGS_MASK (X86_EFLAGS_OF|X86_EFLAGS_SF|X86_EFLAGS_ZF|X86_EFLAGS_AF|\
302 X86_EFLAGS_PF|X86_EFLAGS_CF)
310 static int fastop(struct x86_emulate_ctxt
*ctxt
, void (*fop
)(struct fastop
*));
312 #define FOP_FUNC(name) \
313 ".align " __stringify(FASTOP_SIZE) " \n\t" \
314 ".type " name ", @function \n\t" \
317 #define FOP_RET "ret \n\t"
319 #define FOP_START(op) \
320 extern void em_##op(struct fastop *fake); \
321 asm(".pushsection .text, \"ax\" \n\t" \
322 ".global em_" #op " \n\t" \
329 FOP_FUNC(__stringify(__UNIQUE_ID(nop))) \
332 #define FOP1E(op, dst) \
333 FOP_FUNC(#op "_" #dst) \
334 "10: " #op " %" #dst " \n\t" FOP_RET
336 #define FOP1EEX(op, dst) \
337 FOP1E(op, dst) _ASM_EXTABLE(10b, kvm_fastop_exception)
339 #define FASTOP1(op) \
344 ON64(FOP1E(op##q, rax)) \
347 /* 1-operand, using src2 (for MUL/DIV r/m) */
348 #define FASTOP1SRC2(op, name) \
353 ON64(FOP1E(op, rcx)) \
356 /* 1-operand, using src2 (for MUL/DIV r/m), with exceptions */
357 #define FASTOP1SRC2EX(op, name) \
362 ON64(FOP1EEX(op, rcx)) \
365 #define FOP2E(op, dst, src) \
366 FOP_FUNC(#op "_" #dst "_" #src) \
367 #op " %" #src ", %" #dst " \n\t" FOP_RET
369 #define FASTOP2(op) \
371 FOP2E(op##b, al, dl) \
372 FOP2E(op##w, ax, dx) \
373 FOP2E(op##l, eax, edx) \
374 ON64(FOP2E(op##q, rax, rdx)) \
377 /* 2 operand, word only */
378 #define FASTOP2W(op) \
381 FOP2E(op##w, ax, dx) \
382 FOP2E(op##l, eax, edx) \
383 ON64(FOP2E(op##q, rax, rdx)) \
386 /* 2 operand, src is CL */
387 #define FASTOP2CL(op) \
389 FOP2E(op##b, al, cl) \
390 FOP2E(op##w, ax, cl) \
391 FOP2E(op##l, eax, cl) \
392 ON64(FOP2E(op##q, rax, cl)) \
395 /* 2 operand, src and dest are reversed */
396 #define FASTOP2R(op, name) \
398 FOP2E(op##b, dl, al) \
399 FOP2E(op##w, dx, ax) \
400 FOP2E(op##l, edx, eax) \
401 ON64(FOP2E(op##q, rdx, rax)) \
404 #define FOP3E(op, dst, src, src2) \
405 FOP_FUNC(#op "_" #dst "_" #src "_" #src2) \
406 #op " %" #src2 ", %" #src ", %" #dst " \n\t" FOP_RET
408 /* 3-operand, word-only, src2=cl */
409 #define FASTOP3WCL(op) \
412 FOP3E(op##w, ax, dx, cl) \
413 FOP3E(op##l, eax, edx, cl) \
414 ON64(FOP3E(op##q, rax, rdx, cl)) \
417 /* Special case for SETcc - 1 instruction per cc */
418 #define FOP_SETCC(op) \
420 ".type " #op ", @function \n\t" \
425 asm(".global kvm_fastop_exception \n"
426 "kvm_fastop_exception: xor %esi, %esi; ret");
447 FOP_START(salc
) "pushf; sbb %al, %al; popf \n\t" FOP_RET
450 static int emulator_check_intercept(struct x86_emulate_ctxt
*ctxt
,
451 enum x86_intercept intercept
,
452 enum x86_intercept_stage stage
)
454 struct x86_instruction_info info
= {
455 .intercept
= intercept
,
456 .rep_prefix
= ctxt
->rep_prefix
,
457 .modrm_mod
= ctxt
->modrm_mod
,
458 .modrm_reg
= ctxt
->modrm_reg
,
459 .modrm_rm
= ctxt
->modrm_rm
,
460 .src_val
= ctxt
->src
.val64
,
461 .dst_val
= ctxt
->dst
.val64
,
462 .src_bytes
= ctxt
->src
.bytes
,
463 .dst_bytes
= ctxt
->dst
.bytes
,
464 .ad_bytes
= ctxt
->ad_bytes
,
465 .next_rip
= ctxt
->eip
,
468 return ctxt
->ops
->intercept(ctxt
, &info
, stage
);
471 static void assign_masked(ulong
*dest
, ulong src
, ulong mask
)
473 *dest
= (*dest
& ~mask
) | (src
& mask
);
476 static void assign_register(unsigned long *reg
, u64 val
, int bytes
)
478 /* The 4-byte case *is* correct: in 64-bit mode we zero-extend. */
481 *(u8
*)reg
= (u8
)val
;
484 *(u16
*)reg
= (u16
)val
;
488 break; /* 64b: zero-extend */
495 static inline unsigned long ad_mask(struct x86_emulate_ctxt
*ctxt
)
497 return (1UL << (ctxt
->ad_bytes
<< 3)) - 1;
500 static ulong
stack_mask(struct x86_emulate_ctxt
*ctxt
)
503 struct desc_struct ss
;
505 if (ctxt
->mode
== X86EMUL_MODE_PROT64
)
507 ctxt
->ops
->get_segment(ctxt
, &sel
, &ss
, NULL
, VCPU_SREG_SS
);
508 return ~0U >> ((ss
.d
^ 1) * 16); /* d=0: 0xffff; d=1: 0xffffffff */
511 static int stack_size(struct x86_emulate_ctxt
*ctxt
)
513 return (__fls(stack_mask(ctxt
)) + 1) >> 3;
516 /* Access/update address held in a register, based on addressing mode. */
517 static inline unsigned long
518 address_mask(struct x86_emulate_ctxt
*ctxt
, unsigned long reg
)
520 if (ctxt
->ad_bytes
== sizeof(unsigned long))
523 return reg
& ad_mask(ctxt
);
526 static inline unsigned long
527 register_address(struct x86_emulate_ctxt
*ctxt
, int reg
)
529 return address_mask(ctxt
, reg_read(ctxt
, reg
));
532 static void masked_increment(ulong
*reg
, ulong mask
, int inc
)
534 assign_masked(reg
, *reg
+ inc
, mask
);
538 register_address_increment(struct x86_emulate_ctxt
*ctxt
, int reg
, int inc
)
540 ulong
*preg
= reg_rmw(ctxt
, reg
);
542 assign_register(preg
, *preg
+ inc
, ctxt
->ad_bytes
);
545 static void rsp_increment(struct x86_emulate_ctxt
*ctxt
, int inc
)
547 masked_increment(reg_rmw(ctxt
, VCPU_REGS_RSP
), stack_mask(ctxt
), inc
);
550 static u32
desc_limit_scaled(struct desc_struct
*desc
)
552 u32 limit
= get_desc_limit(desc
);
554 return desc
->g
? (limit
<< 12) | 0xfff : limit
;
557 static unsigned long seg_base(struct x86_emulate_ctxt
*ctxt
, int seg
)
559 if (ctxt
->mode
== X86EMUL_MODE_PROT64
&& seg
< VCPU_SREG_FS
)
562 return ctxt
->ops
->get_cached_segment_base(ctxt
, seg
);
565 static int emulate_exception(struct x86_emulate_ctxt
*ctxt
, int vec
,
566 u32 error
, bool valid
)
569 ctxt
->exception
.vector
= vec
;
570 ctxt
->exception
.error_code
= error
;
571 ctxt
->exception
.error_code_valid
= valid
;
572 return X86EMUL_PROPAGATE_FAULT
;
575 static int emulate_db(struct x86_emulate_ctxt
*ctxt
)
577 return emulate_exception(ctxt
, DB_VECTOR
, 0, false);
580 static int emulate_gp(struct x86_emulate_ctxt
*ctxt
, int err
)
582 return emulate_exception(ctxt
, GP_VECTOR
, err
, true);
585 static int emulate_ss(struct x86_emulate_ctxt
*ctxt
, int err
)
587 return emulate_exception(ctxt
, SS_VECTOR
, err
, true);
590 static int emulate_ud(struct x86_emulate_ctxt
*ctxt
)
592 return emulate_exception(ctxt
, UD_VECTOR
, 0, false);
595 static int emulate_ts(struct x86_emulate_ctxt
*ctxt
, int err
)
597 return emulate_exception(ctxt
, TS_VECTOR
, err
, true);
600 static int emulate_de(struct x86_emulate_ctxt
*ctxt
)
602 return emulate_exception(ctxt
, DE_VECTOR
, 0, false);
605 static int emulate_nm(struct x86_emulate_ctxt
*ctxt
)
607 return emulate_exception(ctxt
, NM_VECTOR
, 0, false);
610 static u16
get_segment_selector(struct x86_emulate_ctxt
*ctxt
, unsigned seg
)
613 struct desc_struct desc
;
615 ctxt
->ops
->get_segment(ctxt
, &selector
, &desc
, NULL
, seg
);
619 static void set_segment_selector(struct x86_emulate_ctxt
*ctxt
, u16 selector
,
624 struct desc_struct desc
;
626 ctxt
->ops
->get_segment(ctxt
, &dummy
, &desc
, &base3
, seg
);
627 ctxt
->ops
->set_segment(ctxt
, selector
, &desc
, base3
, seg
);
631 * x86 defines three classes of vector instructions: explicitly
632 * aligned, explicitly unaligned, and the rest, which change behaviour
633 * depending on whether they're AVX encoded or not.
635 * Also included is CMPXCHG16B which is not a vector instruction, yet it is
636 * subject to the same check.
638 static bool insn_aligned(struct x86_emulate_ctxt
*ctxt
, unsigned size
)
640 if (likely(size
< 16))
643 if (ctxt
->d
& Aligned
)
645 else if (ctxt
->d
& Unaligned
)
647 else if (ctxt
->d
& Avx
)
653 static __always_inline
int __linearize(struct x86_emulate_ctxt
*ctxt
,
654 struct segmented_address addr
,
655 unsigned *max_size
, unsigned size
,
656 bool write
, bool fetch
,
657 enum x86emul_mode mode
, ulong
*linear
)
659 struct desc_struct desc
;
665 la
= seg_base(ctxt
, addr
.seg
) + addr
.ea
;
668 case X86EMUL_MODE_PROT64
:
670 if (is_noncanonical_address(la
))
673 *max_size
= min_t(u64
, ~0u, (1ull << 48) - la
);
674 if (size
> *max_size
)
678 *linear
= la
= (u32
)la
;
679 usable
= ctxt
->ops
->get_segment(ctxt
, &sel
, &desc
, NULL
,
683 /* code segment in protected mode or read-only data segment */
684 if ((((ctxt
->mode
!= X86EMUL_MODE_REAL
) && (desc
.type
& 8))
685 || !(desc
.type
& 2)) && write
)
687 /* unreadable code segment */
688 if (!fetch
&& (desc
.type
& 8) && !(desc
.type
& 2))
690 lim
= desc_limit_scaled(&desc
);
691 if (!(desc
.type
& 8) && (desc
.type
& 4)) {
692 /* expand-down segment */
695 lim
= desc
.d
? 0xffffffff : 0xffff;
699 if (lim
== 0xffffffff)
702 *max_size
= (u64
)lim
+ 1 - addr
.ea
;
703 if (size
> *max_size
)
708 if (insn_aligned(ctxt
, size
) && ((la
& (size
- 1)) != 0))
709 return emulate_gp(ctxt
, 0);
710 return X86EMUL_CONTINUE
;
712 if (addr
.seg
== VCPU_SREG_SS
)
713 return emulate_ss(ctxt
, 0);
715 return emulate_gp(ctxt
, 0);
718 static int linearize(struct x86_emulate_ctxt
*ctxt
,
719 struct segmented_address addr
,
720 unsigned size
, bool write
,
724 return __linearize(ctxt
, addr
, &max_size
, size
, write
, false,
728 static inline int assign_eip(struct x86_emulate_ctxt
*ctxt
, ulong dst
,
729 enum x86emul_mode mode
)
734 struct segmented_address addr
= { .seg
= VCPU_SREG_CS
,
737 if (ctxt
->op_bytes
!= sizeof(unsigned long))
738 addr
.ea
= dst
& ((1UL << (ctxt
->op_bytes
<< 3)) - 1);
739 rc
= __linearize(ctxt
, addr
, &max_size
, 1, false, true, mode
, &linear
);
740 if (rc
== X86EMUL_CONTINUE
)
741 ctxt
->_eip
= addr
.ea
;
745 static inline int assign_eip_near(struct x86_emulate_ctxt
*ctxt
, ulong dst
)
747 return assign_eip(ctxt
, dst
, ctxt
->mode
);
750 static int assign_eip_far(struct x86_emulate_ctxt
*ctxt
, ulong dst
,
751 const struct desc_struct
*cs_desc
)
753 enum x86emul_mode mode
= ctxt
->mode
;
757 if (ctxt
->mode
>= X86EMUL_MODE_PROT16
) {
761 ctxt
->ops
->get_msr(ctxt
, MSR_EFER
, &efer
);
763 mode
= X86EMUL_MODE_PROT64
;
765 mode
= X86EMUL_MODE_PROT32
; /* temporary value */
768 if (mode
== X86EMUL_MODE_PROT16
|| mode
== X86EMUL_MODE_PROT32
)
769 mode
= cs_desc
->d
? X86EMUL_MODE_PROT32
: X86EMUL_MODE_PROT16
;
770 rc
= assign_eip(ctxt
, dst
, mode
);
771 if (rc
== X86EMUL_CONTINUE
)
776 static inline int jmp_rel(struct x86_emulate_ctxt
*ctxt
, int rel
)
778 return assign_eip_near(ctxt
, ctxt
->_eip
+ rel
);
781 static int segmented_read_std(struct x86_emulate_ctxt
*ctxt
,
782 struct segmented_address addr
,
789 rc
= linearize(ctxt
, addr
, size
, false, &linear
);
790 if (rc
!= X86EMUL_CONTINUE
)
792 return ctxt
->ops
->read_std(ctxt
, linear
, data
, size
, &ctxt
->exception
);
796 * Prefetch the remaining bytes of the instruction without crossing page
797 * boundary if they are not in fetch_cache yet.
799 static int __do_insn_fetch_bytes(struct x86_emulate_ctxt
*ctxt
, int op_size
)
802 unsigned size
, max_size
;
803 unsigned long linear
;
804 int cur_size
= ctxt
->fetch
.end
- ctxt
->fetch
.data
;
805 struct segmented_address addr
= { .seg
= VCPU_SREG_CS
,
806 .ea
= ctxt
->eip
+ cur_size
};
809 * We do not know exactly how many bytes will be needed, and
810 * __linearize is expensive, so fetch as much as possible. We
811 * just have to avoid going beyond the 15 byte limit, the end
812 * of the segment, or the end of the page.
814 * __linearize is called with size 0 so that it does not do any
815 * boundary check itself. Instead, we use max_size to check
818 rc
= __linearize(ctxt
, addr
, &max_size
, 0, false, true, ctxt
->mode
,
820 if (unlikely(rc
!= X86EMUL_CONTINUE
))
823 size
= min_t(unsigned, 15UL ^ cur_size
, max_size
);
824 size
= min_t(unsigned, size
, PAGE_SIZE
- offset_in_page(linear
));
827 * One instruction can only straddle two pages,
828 * and one has been loaded at the beginning of
829 * x86_decode_insn. So, if not enough bytes
830 * still, we must have hit the 15-byte boundary.
832 if (unlikely(size
< op_size
))
833 return emulate_gp(ctxt
, 0);
835 rc
= ctxt
->ops
->fetch(ctxt
, linear
, ctxt
->fetch
.end
,
836 size
, &ctxt
->exception
);
837 if (unlikely(rc
!= X86EMUL_CONTINUE
))
839 ctxt
->fetch
.end
+= size
;
840 return X86EMUL_CONTINUE
;
843 static __always_inline
int do_insn_fetch_bytes(struct x86_emulate_ctxt
*ctxt
,
846 unsigned done_size
= ctxt
->fetch
.end
- ctxt
->fetch
.ptr
;
848 if (unlikely(done_size
< size
))
849 return __do_insn_fetch_bytes(ctxt
, size
- done_size
);
851 return X86EMUL_CONTINUE
;
854 /* Fetch next part of the instruction being emulated. */
855 #define insn_fetch(_type, _ctxt) \
858 rc = do_insn_fetch_bytes(_ctxt, sizeof(_type)); \
859 if (rc != X86EMUL_CONTINUE) \
861 ctxt->_eip += sizeof(_type); \
862 _x = *(_type __aligned(1) *) ctxt->fetch.ptr; \
863 ctxt->fetch.ptr += sizeof(_type); \
867 #define insn_fetch_arr(_arr, _size, _ctxt) \
869 rc = do_insn_fetch_bytes(_ctxt, _size); \
870 if (rc != X86EMUL_CONTINUE) \
872 ctxt->_eip += (_size); \
873 memcpy(_arr, ctxt->fetch.ptr, _size); \
874 ctxt->fetch.ptr += (_size); \
878 * Given the 'reg' portion of a ModRM byte, and a register block, return a
879 * pointer into the block that addresses the relevant register.
880 * @highbyte_regs specifies whether to decode AH,CH,DH,BH.
882 static void *decode_register(struct x86_emulate_ctxt
*ctxt
, u8 modrm_reg
,
886 int highbyte_regs
= (ctxt
->rex_prefix
== 0) && byteop
;
888 if (highbyte_regs
&& modrm_reg
>= 4 && modrm_reg
< 8)
889 p
= (unsigned char *)reg_rmw(ctxt
, modrm_reg
& 3) + 1;
891 p
= reg_rmw(ctxt
, modrm_reg
);
895 static int read_descriptor(struct x86_emulate_ctxt
*ctxt
,
896 struct segmented_address addr
,
897 u16
*size
, unsigned long *address
, int op_bytes
)
904 rc
= segmented_read_std(ctxt
, addr
, size
, 2);
905 if (rc
!= X86EMUL_CONTINUE
)
908 rc
= segmented_read_std(ctxt
, addr
, address
, op_bytes
);
922 FASTOP1SRC2(mul
, mul_ex
);
923 FASTOP1SRC2(imul
, imul_ex
);
924 FASTOP1SRC2EX(div
, div_ex
);
925 FASTOP1SRC2EX(idiv
, idiv_ex
);
954 FASTOP2R(cmp
, cmp_r
);
956 static int em_bsf_c(struct x86_emulate_ctxt
*ctxt
)
958 /* If src is zero, do not writeback, but update flags */
959 if (ctxt
->src
.val
== 0)
960 ctxt
->dst
.type
= OP_NONE
;
961 return fastop(ctxt
, em_bsf
);
964 static int em_bsr_c(struct x86_emulate_ctxt
*ctxt
)
966 /* If src is zero, do not writeback, but update flags */
967 if (ctxt
->src
.val
== 0)
968 ctxt
->dst
.type
= OP_NONE
;
969 return fastop(ctxt
, em_bsr
);
972 static __always_inline u8
test_cc(unsigned int condition
, unsigned long flags
)
975 void (*fop
)(void) = (void *)em_setcc
+ 4 * (condition
& 0xf);
977 flags
= (flags
& EFLAGS_MASK
) | X86_EFLAGS_IF
;
978 asm("push %[flags]; popf; call *%[fastop]"
979 : "=a"(rc
) : [fastop
]"r"(fop
), [flags
]"r"(flags
));
983 static void fetch_register_operand(struct operand
*op
)
987 op
->val
= *(u8
*)op
->addr
.reg
;
990 op
->val
= *(u16
*)op
->addr
.reg
;
993 op
->val
= *(u32
*)op
->addr
.reg
;
996 op
->val
= *(u64
*)op
->addr
.reg
;
1001 static void read_sse_reg(struct x86_emulate_ctxt
*ctxt
, sse128_t
*data
, int reg
)
1003 ctxt
->ops
->get_fpu(ctxt
);
1005 case 0: asm("movdqa %%xmm0, %0" : "=m"(*data
)); break;
1006 case 1: asm("movdqa %%xmm1, %0" : "=m"(*data
)); break;
1007 case 2: asm("movdqa %%xmm2, %0" : "=m"(*data
)); break;
1008 case 3: asm("movdqa %%xmm3, %0" : "=m"(*data
)); break;
1009 case 4: asm("movdqa %%xmm4, %0" : "=m"(*data
)); break;
1010 case 5: asm("movdqa %%xmm5, %0" : "=m"(*data
)); break;
1011 case 6: asm("movdqa %%xmm6, %0" : "=m"(*data
)); break;
1012 case 7: asm("movdqa %%xmm7, %0" : "=m"(*data
)); break;
1013 #ifdef CONFIG_X86_64
1014 case 8: asm("movdqa %%xmm8, %0" : "=m"(*data
)); break;
1015 case 9: asm("movdqa %%xmm9, %0" : "=m"(*data
)); break;
1016 case 10: asm("movdqa %%xmm10, %0" : "=m"(*data
)); break;
1017 case 11: asm("movdqa %%xmm11, %0" : "=m"(*data
)); break;
1018 case 12: asm("movdqa %%xmm12, %0" : "=m"(*data
)); break;
1019 case 13: asm("movdqa %%xmm13, %0" : "=m"(*data
)); break;
1020 case 14: asm("movdqa %%xmm14, %0" : "=m"(*data
)); break;
1021 case 15: asm("movdqa %%xmm15, %0" : "=m"(*data
)); break;
1025 ctxt
->ops
->put_fpu(ctxt
);
1028 static void write_sse_reg(struct x86_emulate_ctxt
*ctxt
, sse128_t
*data
,
1031 ctxt
->ops
->get_fpu(ctxt
);
1033 case 0: asm("movdqa %0, %%xmm0" : : "m"(*data
)); break;
1034 case 1: asm("movdqa %0, %%xmm1" : : "m"(*data
)); break;
1035 case 2: asm("movdqa %0, %%xmm2" : : "m"(*data
)); break;
1036 case 3: asm("movdqa %0, %%xmm3" : : "m"(*data
)); break;
1037 case 4: asm("movdqa %0, %%xmm4" : : "m"(*data
)); break;
1038 case 5: asm("movdqa %0, %%xmm5" : : "m"(*data
)); break;
1039 case 6: asm("movdqa %0, %%xmm6" : : "m"(*data
)); break;
1040 case 7: asm("movdqa %0, %%xmm7" : : "m"(*data
)); break;
1041 #ifdef CONFIG_X86_64
1042 case 8: asm("movdqa %0, %%xmm8" : : "m"(*data
)); break;
1043 case 9: asm("movdqa %0, %%xmm9" : : "m"(*data
)); break;
1044 case 10: asm("movdqa %0, %%xmm10" : : "m"(*data
)); break;
1045 case 11: asm("movdqa %0, %%xmm11" : : "m"(*data
)); break;
1046 case 12: asm("movdqa %0, %%xmm12" : : "m"(*data
)); break;
1047 case 13: asm("movdqa %0, %%xmm13" : : "m"(*data
)); break;
1048 case 14: asm("movdqa %0, %%xmm14" : : "m"(*data
)); break;
1049 case 15: asm("movdqa %0, %%xmm15" : : "m"(*data
)); break;
1053 ctxt
->ops
->put_fpu(ctxt
);
1056 static void read_mmx_reg(struct x86_emulate_ctxt
*ctxt
, u64
*data
, int reg
)
1058 ctxt
->ops
->get_fpu(ctxt
);
1060 case 0: asm("movq %%mm0, %0" : "=m"(*data
)); break;
1061 case 1: asm("movq %%mm1, %0" : "=m"(*data
)); break;
1062 case 2: asm("movq %%mm2, %0" : "=m"(*data
)); break;
1063 case 3: asm("movq %%mm3, %0" : "=m"(*data
)); break;
1064 case 4: asm("movq %%mm4, %0" : "=m"(*data
)); break;
1065 case 5: asm("movq %%mm5, %0" : "=m"(*data
)); break;
1066 case 6: asm("movq %%mm6, %0" : "=m"(*data
)); break;
1067 case 7: asm("movq %%mm7, %0" : "=m"(*data
)); break;
1070 ctxt
->ops
->put_fpu(ctxt
);
1073 static void write_mmx_reg(struct x86_emulate_ctxt
*ctxt
, u64
*data
, int reg
)
1075 ctxt
->ops
->get_fpu(ctxt
);
1077 case 0: asm("movq %0, %%mm0" : : "m"(*data
)); break;
1078 case 1: asm("movq %0, %%mm1" : : "m"(*data
)); break;
1079 case 2: asm("movq %0, %%mm2" : : "m"(*data
)); break;
1080 case 3: asm("movq %0, %%mm3" : : "m"(*data
)); break;
1081 case 4: asm("movq %0, %%mm4" : : "m"(*data
)); break;
1082 case 5: asm("movq %0, %%mm5" : : "m"(*data
)); break;
1083 case 6: asm("movq %0, %%mm6" : : "m"(*data
)); break;
1084 case 7: asm("movq %0, %%mm7" : : "m"(*data
)); break;
1087 ctxt
->ops
->put_fpu(ctxt
);
1090 static int em_fninit(struct x86_emulate_ctxt
*ctxt
)
1092 if (ctxt
->ops
->get_cr(ctxt
, 0) & (X86_CR0_TS
| X86_CR0_EM
))
1093 return emulate_nm(ctxt
);
1095 ctxt
->ops
->get_fpu(ctxt
);
1096 asm volatile("fninit");
1097 ctxt
->ops
->put_fpu(ctxt
);
1098 return X86EMUL_CONTINUE
;
1101 static int em_fnstcw(struct x86_emulate_ctxt
*ctxt
)
1105 if (ctxt
->ops
->get_cr(ctxt
, 0) & (X86_CR0_TS
| X86_CR0_EM
))
1106 return emulate_nm(ctxt
);
1108 ctxt
->ops
->get_fpu(ctxt
);
1109 asm volatile("fnstcw %0": "+m"(fcw
));
1110 ctxt
->ops
->put_fpu(ctxt
);
1112 ctxt
->dst
.val
= fcw
;
1114 return X86EMUL_CONTINUE
;
1117 static int em_fnstsw(struct x86_emulate_ctxt
*ctxt
)
1121 if (ctxt
->ops
->get_cr(ctxt
, 0) & (X86_CR0_TS
| X86_CR0_EM
))
1122 return emulate_nm(ctxt
);
1124 ctxt
->ops
->get_fpu(ctxt
);
1125 asm volatile("fnstsw %0": "+m"(fsw
));
1126 ctxt
->ops
->put_fpu(ctxt
);
1128 ctxt
->dst
.val
= fsw
;
1130 return X86EMUL_CONTINUE
;
1133 static void decode_register_operand(struct x86_emulate_ctxt
*ctxt
,
1136 unsigned reg
= ctxt
->modrm_reg
;
1138 if (!(ctxt
->d
& ModRM
))
1139 reg
= (ctxt
->b
& 7) | ((ctxt
->rex_prefix
& 1) << 3);
1141 if (ctxt
->d
& Sse
) {
1145 read_sse_reg(ctxt
, &op
->vec_val
, reg
);
1148 if (ctxt
->d
& Mmx
) {
1157 op
->bytes
= (ctxt
->d
& ByteOp
) ? 1 : ctxt
->op_bytes
;
1158 op
->addr
.reg
= decode_register(ctxt
, reg
, ctxt
->d
& ByteOp
);
1160 fetch_register_operand(op
);
1161 op
->orig_val
= op
->val
;
1164 static void adjust_modrm_seg(struct x86_emulate_ctxt
*ctxt
, int base_reg
)
1166 if (base_reg
== VCPU_REGS_RSP
|| base_reg
== VCPU_REGS_RBP
)
1167 ctxt
->modrm_seg
= VCPU_SREG_SS
;
1170 static int decode_modrm(struct x86_emulate_ctxt
*ctxt
,
1174 int index_reg
, base_reg
, scale
;
1175 int rc
= X86EMUL_CONTINUE
;
1178 ctxt
->modrm_reg
= ((ctxt
->rex_prefix
<< 1) & 8); /* REX.R */
1179 index_reg
= (ctxt
->rex_prefix
<< 2) & 8; /* REX.X */
1180 base_reg
= (ctxt
->rex_prefix
<< 3) & 8; /* REX.B */
1182 ctxt
->modrm_mod
= (ctxt
->modrm
& 0xc0) >> 6;
1183 ctxt
->modrm_reg
|= (ctxt
->modrm
& 0x38) >> 3;
1184 ctxt
->modrm_rm
= base_reg
| (ctxt
->modrm
& 0x07);
1185 ctxt
->modrm_seg
= VCPU_SREG_DS
;
1187 if (ctxt
->modrm_mod
== 3 || (ctxt
->d
& NoMod
)) {
1189 op
->bytes
= (ctxt
->d
& ByteOp
) ? 1 : ctxt
->op_bytes
;
1190 op
->addr
.reg
= decode_register(ctxt
, ctxt
->modrm_rm
,
1192 if (ctxt
->d
& Sse
) {
1195 op
->addr
.xmm
= ctxt
->modrm_rm
;
1196 read_sse_reg(ctxt
, &op
->vec_val
, ctxt
->modrm_rm
);
1199 if (ctxt
->d
& Mmx
) {
1202 op
->addr
.mm
= ctxt
->modrm_rm
& 7;
1205 fetch_register_operand(op
);
1211 if (ctxt
->ad_bytes
== 2) {
1212 unsigned bx
= reg_read(ctxt
, VCPU_REGS_RBX
);
1213 unsigned bp
= reg_read(ctxt
, VCPU_REGS_RBP
);
1214 unsigned si
= reg_read(ctxt
, VCPU_REGS_RSI
);
1215 unsigned di
= reg_read(ctxt
, VCPU_REGS_RDI
);
1217 /* 16-bit ModR/M decode. */
1218 switch (ctxt
->modrm_mod
) {
1220 if (ctxt
->modrm_rm
== 6)
1221 modrm_ea
+= insn_fetch(u16
, ctxt
);
1224 modrm_ea
+= insn_fetch(s8
, ctxt
);
1227 modrm_ea
+= insn_fetch(u16
, ctxt
);
1230 switch (ctxt
->modrm_rm
) {
1232 modrm_ea
+= bx
+ si
;
1235 modrm_ea
+= bx
+ di
;
1238 modrm_ea
+= bp
+ si
;
1241 modrm_ea
+= bp
+ di
;
1250 if (ctxt
->modrm_mod
!= 0)
1257 if (ctxt
->modrm_rm
== 2 || ctxt
->modrm_rm
== 3 ||
1258 (ctxt
->modrm_rm
== 6 && ctxt
->modrm_mod
!= 0))
1259 ctxt
->modrm_seg
= VCPU_SREG_SS
;
1260 modrm_ea
= (u16
)modrm_ea
;
1262 /* 32/64-bit ModR/M decode. */
1263 if ((ctxt
->modrm_rm
& 7) == 4) {
1264 sib
= insn_fetch(u8
, ctxt
);
1265 index_reg
|= (sib
>> 3) & 7;
1266 base_reg
|= sib
& 7;
1269 if ((base_reg
& 7) == 5 && ctxt
->modrm_mod
== 0)
1270 modrm_ea
+= insn_fetch(s32
, ctxt
);
1272 modrm_ea
+= reg_read(ctxt
, base_reg
);
1273 adjust_modrm_seg(ctxt
, base_reg
);
1274 /* Increment ESP on POP [ESP] */
1275 if ((ctxt
->d
& IncSP
) &&
1276 base_reg
== VCPU_REGS_RSP
)
1277 modrm_ea
+= ctxt
->op_bytes
;
1280 modrm_ea
+= reg_read(ctxt
, index_reg
) << scale
;
1281 } else if ((ctxt
->modrm_rm
& 7) == 5 && ctxt
->modrm_mod
== 0) {
1282 modrm_ea
+= insn_fetch(s32
, ctxt
);
1283 if (ctxt
->mode
== X86EMUL_MODE_PROT64
)
1284 ctxt
->rip_relative
= 1;
1286 base_reg
= ctxt
->modrm_rm
;
1287 modrm_ea
+= reg_read(ctxt
, base_reg
);
1288 adjust_modrm_seg(ctxt
, base_reg
);
1290 switch (ctxt
->modrm_mod
) {
1292 modrm_ea
+= insn_fetch(s8
, ctxt
);
1295 modrm_ea
+= insn_fetch(s32
, ctxt
);
1299 op
->addr
.mem
.ea
= modrm_ea
;
1300 if (ctxt
->ad_bytes
!= 8)
1301 ctxt
->memop
.addr
.mem
.ea
= (u32
)ctxt
->memop
.addr
.mem
.ea
;
1307 static int decode_abs(struct x86_emulate_ctxt
*ctxt
,
1310 int rc
= X86EMUL_CONTINUE
;
1313 switch (ctxt
->ad_bytes
) {
1315 op
->addr
.mem
.ea
= insn_fetch(u16
, ctxt
);
1318 op
->addr
.mem
.ea
= insn_fetch(u32
, ctxt
);
1321 op
->addr
.mem
.ea
= insn_fetch(u64
, ctxt
);
1328 static void fetch_bit_operand(struct x86_emulate_ctxt
*ctxt
)
1332 if (ctxt
->dst
.type
== OP_MEM
&& ctxt
->src
.type
== OP_REG
) {
1333 mask
= ~((long)ctxt
->dst
.bytes
* 8 - 1);
1335 if (ctxt
->src
.bytes
== 2)
1336 sv
= (s16
)ctxt
->src
.val
& (s16
)mask
;
1337 else if (ctxt
->src
.bytes
== 4)
1338 sv
= (s32
)ctxt
->src
.val
& (s32
)mask
;
1340 sv
= (s64
)ctxt
->src
.val
& (s64
)mask
;
1342 ctxt
->dst
.addr
.mem
.ea
= address_mask(ctxt
,
1343 ctxt
->dst
.addr
.mem
.ea
+ (sv
>> 3));
1346 /* only subword offset */
1347 ctxt
->src
.val
&= (ctxt
->dst
.bytes
<< 3) - 1;
1350 static int read_emulated(struct x86_emulate_ctxt
*ctxt
,
1351 unsigned long addr
, void *dest
, unsigned size
)
1354 struct read_cache
*mc
= &ctxt
->mem_read
;
1356 if (mc
->pos
< mc
->end
)
1359 WARN_ON((mc
->end
+ size
) >= sizeof(mc
->data
));
1361 rc
= ctxt
->ops
->read_emulated(ctxt
, addr
, mc
->data
+ mc
->end
, size
,
1363 if (rc
!= X86EMUL_CONTINUE
)
1369 memcpy(dest
, mc
->data
+ mc
->pos
, size
);
1371 return X86EMUL_CONTINUE
;
1374 static int segmented_read(struct x86_emulate_ctxt
*ctxt
,
1375 struct segmented_address addr
,
1382 rc
= linearize(ctxt
, addr
, size
, false, &linear
);
1383 if (rc
!= X86EMUL_CONTINUE
)
1385 return read_emulated(ctxt
, linear
, data
, size
);
1388 static int segmented_write(struct x86_emulate_ctxt
*ctxt
,
1389 struct segmented_address addr
,
1396 rc
= linearize(ctxt
, addr
, size
, true, &linear
);
1397 if (rc
!= X86EMUL_CONTINUE
)
1399 return ctxt
->ops
->write_emulated(ctxt
, linear
, data
, size
,
1403 static int segmented_cmpxchg(struct x86_emulate_ctxt
*ctxt
,
1404 struct segmented_address addr
,
1405 const void *orig_data
, const void *data
,
1411 rc
= linearize(ctxt
, addr
, size
, true, &linear
);
1412 if (rc
!= X86EMUL_CONTINUE
)
1414 return ctxt
->ops
->cmpxchg_emulated(ctxt
, linear
, orig_data
, data
,
1415 size
, &ctxt
->exception
);
1418 static int pio_in_emulated(struct x86_emulate_ctxt
*ctxt
,
1419 unsigned int size
, unsigned short port
,
1422 struct read_cache
*rc
= &ctxt
->io_read
;
1424 if (rc
->pos
== rc
->end
) { /* refill pio read ahead */
1425 unsigned int in_page
, n
;
1426 unsigned int count
= ctxt
->rep_prefix
?
1427 address_mask(ctxt
, reg_read(ctxt
, VCPU_REGS_RCX
)) : 1;
1428 in_page
= (ctxt
->eflags
& X86_EFLAGS_DF
) ?
1429 offset_in_page(reg_read(ctxt
, VCPU_REGS_RDI
)) :
1430 PAGE_SIZE
- offset_in_page(reg_read(ctxt
, VCPU_REGS_RDI
));
1431 n
= min3(in_page
, (unsigned int)sizeof(rc
->data
) / size
, count
);
1434 rc
->pos
= rc
->end
= 0;
1435 if (!ctxt
->ops
->pio_in_emulated(ctxt
, size
, port
, rc
->data
, n
))
1440 if (ctxt
->rep_prefix
&& (ctxt
->d
& String
) &&
1441 !(ctxt
->eflags
& X86_EFLAGS_DF
)) {
1442 ctxt
->dst
.data
= rc
->data
+ rc
->pos
;
1443 ctxt
->dst
.type
= OP_MEM_STR
;
1444 ctxt
->dst
.count
= (rc
->end
- rc
->pos
) / size
;
1447 memcpy(dest
, rc
->data
+ rc
->pos
, size
);
1453 static int read_interrupt_descriptor(struct x86_emulate_ctxt
*ctxt
,
1454 u16 index
, struct desc_struct
*desc
)
1459 ctxt
->ops
->get_idt(ctxt
, &dt
);
1461 if (dt
.size
< index
* 8 + 7)
1462 return emulate_gp(ctxt
, index
<< 3 | 0x2);
1464 addr
= dt
.address
+ index
* 8;
1465 return ctxt
->ops
->read_std(ctxt
, addr
, desc
, sizeof *desc
,
1469 static void get_descriptor_table_ptr(struct x86_emulate_ctxt
*ctxt
,
1470 u16 selector
, struct desc_ptr
*dt
)
1472 const struct x86_emulate_ops
*ops
= ctxt
->ops
;
1475 if (selector
& 1 << 2) {
1476 struct desc_struct desc
;
1479 memset (dt
, 0, sizeof *dt
);
1480 if (!ops
->get_segment(ctxt
, &sel
, &desc
, &base3
,
1484 dt
->size
= desc_limit_scaled(&desc
); /* what if limit > 65535? */
1485 dt
->address
= get_desc_base(&desc
) | ((u64
)base3
<< 32);
1487 ops
->get_gdt(ctxt
, dt
);
1490 static int get_descriptor_ptr(struct x86_emulate_ctxt
*ctxt
,
1491 u16 selector
, ulong
*desc_addr_p
)
1494 u16 index
= selector
>> 3;
1497 get_descriptor_table_ptr(ctxt
, selector
, &dt
);
1499 if (dt
.size
< index
* 8 + 7)
1500 return emulate_gp(ctxt
, selector
& 0xfffc);
1502 addr
= dt
.address
+ index
* 8;
1504 #ifdef CONFIG_X86_64
1505 if (addr
>> 32 != 0) {
1508 ctxt
->ops
->get_msr(ctxt
, MSR_EFER
, &efer
);
1509 if (!(efer
& EFER_LMA
))
1514 *desc_addr_p
= addr
;
1515 return X86EMUL_CONTINUE
;
1518 /* allowed just for 8 bytes segments */
1519 static int read_segment_descriptor(struct x86_emulate_ctxt
*ctxt
,
1520 u16 selector
, struct desc_struct
*desc
,
1525 rc
= get_descriptor_ptr(ctxt
, selector
, desc_addr_p
);
1526 if (rc
!= X86EMUL_CONTINUE
)
1529 return ctxt
->ops
->read_std(ctxt
, *desc_addr_p
, desc
, sizeof(*desc
),
1533 /* allowed just for 8 bytes segments */
1534 static int write_segment_descriptor(struct x86_emulate_ctxt
*ctxt
,
1535 u16 selector
, struct desc_struct
*desc
)
1540 rc
= get_descriptor_ptr(ctxt
, selector
, &addr
);
1541 if (rc
!= X86EMUL_CONTINUE
)
1544 return ctxt
->ops
->write_std(ctxt
, addr
, desc
, sizeof *desc
,
1548 /* Does not support long mode */
1549 static int __load_segment_descriptor(struct x86_emulate_ctxt
*ctxt
,
1550 u16 selector
, int seg
, u8 cpl
,
1551 enum x86_transfer_type transfer
,
1552 struct desc_struct
*desc
)
1554 struct desc_struct seg_desc
, old_desc
;
1556 unsigned err_vec
= GP_VECTOR
;
1558 bool null_selector
= !(selector
& ~0x3); /* 0000-0003 are null */
1564 memset(&seg_desc
, 0, sizeof seg_desc
);
1566 if (ctxt
->mode
== X86EMUL_MODE_REAL
) {
1567 /* set real mode segment descriptor (keep limit etc. for
1569 ctxt
->ops
->get_segment(ctxt
, &dummy
, &seg_desc
, NULL
, seg
);
1570 set_desc_base(&seg_desc
, selector
<< 4);
1572 } else if (seg
<= VCPU_SREG_GS
&& ctxt
->mode
== X86EMUL_MODE_VM86
) {
1573 /* VM86 needs a clean new segment descriptor */
1574 set_desc_base(&seg_desc
, selector
<< 4);
1575 set_desc_limit(&seg_desc
, 0xffff);
1585 /* NULL selector is not valid for TR, CS and SS (except for long mode) */
1586 if ((seg
== VCPU_SREG_CS
1587 || (seg
== VCPU_SREG_SS
1588 && (ctxt
->mode
!= X86EMUL_MODE_PROT64
|| rpl
!= cpl
))
1589 || seg
== VCPU_SREG_TR
)
1593 /* TR should be in GDT only */
1594 if (seg
== VCPU_SREG_TR
&& (selector
& (1 << 2)))
1597 if (null_selector
) /* for NULL selector skip all following checks */
1600 ret
= read_segment_descriptor(ctxt
, selector
, &seg_desc
, &desc_addr
);
1601 if (ret
!= X86EMUL_CONTINUE
)
1604 err_code
= selector
& 0xfffc;
1605 err_vec
= (transfer
== X86_TRANSFER_TASK_SWITCH
) ? TS_VECTOR
:
1608 /* can't load system descriptor into segment selector */
1609 if (seg
<= VCPU_SREG_GS
&& !seg_desc
.s
) {
1610 if (transfer
== X86_TRANSFER_CALL_JMP
)
1611 return X86EMUL_UNHANDLEABLE
;
1616 err_vec
= (seg
== VCPU_SREG_SS
) ? SS_VECTOR
: NP_VECTOR
;
1625 * segment is not a writable data segment or segment
1626 * selector's RPL != CPL or segment selector's RPL != CPL
1628 if (rpl
!= cpl
|| (seg_desc
.type
& 0xa) != 0x2 || dpl
!= cpl
)
1632 if (!(seg_desc
.type
& 8))
1635 if (seg_desc
.type
& 4) {
1641 if (rpl
> cpl
|| dpl
!= cpl
)
1644 /* in long-mode d/b must be clear if l is set */
1645 if (seg_desc
.d
&& seg_desc
.l
) {
1648 ctxt
->ops
->get_msr(ctxt
, MSR_EFER
, &efer
);
1649 if (efer
& EFER_LMA
)
1653 /* CS(RPL) <- CPL */
1654 selector
= (selector
& 0xfffc) | cpl
;
1657 if (seg_desc
.s
|| (seg_desc
.type
!= 1 && seg_desc
.type
!= 9))
1659 old_desc
= seg_desc
;
1660 seg_desc
.type
|= 2; /* busy */
1661 ret
= ctxt
->ops
->cmpxchg_emulated(ctxt
, desc_addr
, &old_desc
, &seg_desc
,
1662 sizeof(seg_desc
), &ctxt
->exception
);
1663 if (ret
!= X86EMUL_CONTINUE
)
1666 case VCPU_SREG_LDTR
:
1667 if (seg_desc
.s
|| seg_desc
.type
!= 2)
1670 default: /* DS, ES, FS, or GS */
1672 * segment is not a data or readable code segment or
1673 * ((segment is a data or nonconforming code segment)
1674 * and (both RPL and CPL > DPL))
1676 if ((seg_desc
.type
& 0xa) == 0x8 ||
1677 (((seg_desc
.type
& 0xc) != 0xc) &&
1678 (rpl
> dpl
&& cpl
> dpl
)))
1684 /* mark segment as accessed */
1685 if (!(seg_desc
.type
& 1)) {
1687 ret
= write_segment_descriptor(ctxt
, selector
,
1689 if (ret
!= X86EMUL_CONTINUE
)
1692 } else if (ctxt
->mode
== X86EMUL_MODE_PROT64
) {
1693 ret
= ctxt
->ops
->read_std(ctxt
, desc_addr
+8, &base3
,
1694 sizeof(base3
), &ctxt
->exception
);
1695 if (ret
!= X86EMUL_CONTINUE
)
1697 if (is_noncanonical_address(get_desc_base(&seg_desc
) |
1698 ((u64
)base3
<< 32)))
1699 return emulate_gp(ctxt
, 0);
1702 ctxt
->ops
->set_segment(ctxt
, selector
, &seg_desc
, base3
, seg
);
1705 return X86EMUL_CONTINUE
;
1707 return emulate_exception(ctxt
, err_vec
, err_code
, true);
1710 static int load_segment_descriptor(struct x86_emulate_ctxt
*ctxt
,
1711 u16 selector
, int seg
)
1713 u8 cpl
= ctxt
->ops
->cpl(ctxt
);
1714 return __load_segment_descriptor(ctxt
, selector
, seg
, cpl
,
1715 X86_TRANSFER_NONE
, NULL
);
1718 static void write_register_operand(struct operand
*op
)
1720 return assign_register(op
->addr
.reg
, op
->val
, op
->bytes
);
1723 static int writeback(struct x86_emulate_ctxt
*ctxt
, struct operand
*op
)
1727 write_register_operand(op
);
1730 if (ctxt
->lock_prefix
)
1731 return segmented_cmpxchg(ctxt
,
1737 return segmented_write(ctxt
,
1743 return segmented_write(ctxt
,
1746 op
->bytes
* op
->count
);
1749 write_sse_reg(ctxt
, &op
->vec_val
, op
->addr
.xmm
);
1752 write_mmx_reg(ctxt
, &op
->mm_val
, op
->addr
.mm
);
1760 return X86EMUL_CONTINUE
;
1763 static int push(struct x86_emulate_ctxt
*ctxt
, void *data
, int bytes
)
1765 struct segmented_address addr
;
1767 rsp_increment(ctxt
, -bytes
);
1768 addr
.ea
= reg_read(ctxt
, VCPU_REGS_RSP
) & stack_mask(ctxt
);
1769 addr
.seg
= VCPU_SREG_SS
;
1771 return segmented_write(ctxt
, addr
, data
, bytes
);
1774 static int em_push(struct x86_emulate_ctxt
*ctxt
)
1776 /* Disable writeback. */
1777 ctxt
->dst
.type
= OP_NONE
;
1778 return push(ctxt
, &ctxt
->src
.val
, ctxt
->op_bytes
);
1781 static int emulate_pop(struct x86_emulate_ctxt
*ctxt
,
1782 void *dest
, int len
)
1785 struct segmented_address addr
;
1787 addr
.ea
= reg_read(ctxt
, VCPU_REGS_RSP
) & stack_mask(ctxt
);
1788 addr
.seg
= VCPU_SREG_SS
;
1789 rc
= segmented_read(ctxt
, addr
, dest
, len
);
1790 if (rc
!= X86EMUL_CONTINUE
)
1793 rsp_increment(ctxt
, len
);
1797 static int em_pop(struct x86_emulate_ctxt
*ctxt
)
1799 return emulate_pop(ctxt
, &ctxt
->dst
.val
, ctxt
->op_bytes
);
1802 static int emulate_popf(struct x86_emulate_ctxt
*ctxt
,
1803 void *dest
, int len
)
1806 unsigned long val
, change_mask
;
1807 int iopl
= (ctxt
->eflags
& X86_EFLAGS_IOPL
) >> X86_EFLAGS_IOPL_BIT
;
1808 int cpl
= ctxt
->ops
->cpl(ctxt
);
1810 rc
= emulate_pop(ctxt
, &val
, len
);
1811 if (rc
!= X86EMUL_CONTINUE
)
1814 change_mask
= X86_EFLAGS_CF
| X86_EFLAGS_PF
| X86_EFLAGS_AF
|
1815 X86_EFLAGS_ZF
| X86_EFLAGS_SF
| X86_EFLAGS_OF
|
1816 X86_EFLAGS_TF
| X86_EFLAGS_DF
| X86_EFLAGS_NT
|
1817 X86_EFLAGS_AC
| X86_EFLAGS_ID
;
1819 switch(ctxt
->mode
) {
1820 case X86EMUL_MODE_PROT64
:
1821 case X86EMUL_MODE_PROT32
:
1822 case X86EMUL_MODE_PROT16
:
1824 change_mask
|= X86_EFLAGS_IOPL
;
1826 change_mask
|= X86_EFLAGS_IF
;
1828 case X86EMUL_MODE_VM86
:
1830 return emulate_gp(ctxt
, 0);
1831 change_mask
|= X86_EFLAGS_IF
;
1833 default: /* real mode */
1834 change_mask
|= (X86_EFLAGS_IOPL
| X86_EFLAGS_IF
);
1838 *(unsigned long *)dest
=
1839 (ctxt
->eflags
& ~change_mask
) | (val
& change_mask
);
1844 static int em_popf(struct x86_emulate_ctxt
*ctxt
)
1846 ctxt
->dst
.type
= OP_REG
;
1847 ctxt
->dst
.addr
.reg
= &ctxt
->eflags
;
1848 ctxt
->dst
.bytes
= ctxt
->op_bytes
;
1849 return emulate_popf(ctxt
, &ctxt
->dst
.val
, ctxt
->op_bytes
);
1852 static int em_enter(struct x86_emulate_ctxt
*ctxt
)
1855 unsigned frame_size
= ctxt
->src
.val
;
1856 unsigned nesting_level
= ctxt
->src2
.val
& 31;
1860 return X86EMUL_UNHANDLEABLE
;
1862 rbp
= reg_read(ctxt
, VCPU_REGS_RBP
);
1863 rc
= push(ctxt
, &rbp
, stack_size(ctxt
));
1864 if (rc
!= X86EMUL_CONTINUE
)
1866 assign_masked(reg_rmw(ctxt
, VCPU_REGS_RBP
), reg_read(ctxt
, VCPU_REGS_RSP
),
1868 assign_masked(reg_rmw(ctxt
, VCPU_REGS_RSP
),
1869 reg_read(ctxt
, VCPU_REGS_RSP
) - frame_size
,
1871 return X86EMUL_CONTINUE
;
1874 static int em_leave(struct x86_emulate_ctxt
*ctxt
)
1876 assign_masked(reg_rmw(ctxt
, VCPU_REGS_RSP
), reg_read(ctxt
, VCPU_REGS_RBP
),
1878 return emulate_pop(ctxt
, reg_rmw(ctxt
, VCPU_REGS_RBP
), ctxt
->op_bytes
);
1881 static int em_push_sreg(struct x86_emulate_ctxt
*ctxt
)
1883 int seg
= ctxt
->src2
.val
;
1885 ctxt
->src
.val
= get_segment_selector(ctxt
, seg
);
1886 if (ctxt
->op_bytes
== 4) {
1887 rsp_increment(ctxt
, -2);
1891 return em_push(ctxt
);
1894 static int em_pop_sreg(struct x86_emulate_ctxt
*ctxt
)
1896 int seg
= ctxt
->src2
.val
;
1897 unsigned long selector
;
1900 rc
= emulate_pop(ctxt
, &selector
, 2);
1901 if (rc
!= X86EMUL_CONTINUE
)
1904 if (ctxt
->modrm_reg
== VCPU_SREG_SS
)
1905 ctxt
->interruptibility
= KVM_X86_SHADOW_INT_MOV_SS
;
1906 if (ctxt
->op_bytes
> 2)
1907 rsp_increment(ctxt
, ctxt
->op_bytes
- 2);
1909 rc
= load_segment_descriptor(ctxt
, (u16
)selector
, seg
);
1913 static int em_pusha(struct x86_emulate_ctxt
*ctxt
)
1915 unsigned long old_esp
= reg_read(ctxt
, VCPU_REGS_RSP
);
1916 int rc
= X86EMUL_CONTINUE
;
1917 int reg
= VCPU_REGS_RAX
;
1919 while (reg
<= VCPU_REGS_RDI
) {
1920 (reg
== VCPU_REGS_RSP
) ?
1921 (ctxt
->src
.val
= old_esp
) : (ctxt
->src
.val
= reg_read(ctxt
, reg
));
1924 if (rc
!= X86EMUL_CONTINUE
)
1933 static int em_pushf(struct x86_emulate_ctxt
*ctxt
)
1935 ctxt
->src
.val
= (unsigned long)ctxt
->eflags
& ~X86_EFLAGS_VM
;
1936 return em_push(ctxt
);
1939 static int em_popa(struct x86_emulate_ctxt
*ctxt
)
1941 int rc
= X86EMUL_CONTINUE
;
1942 int reg
= VCPU_REGS_RDI
;
1945 while (reg
>= VCPU_REGS_RAX
) {
1946 if (reg
== VCPU_REGS_RSP
) {
1947 rsp_increment(ctxt
, ctxt
->op_bytes
);
1951 rc
= emulate_pop(ctxt
, &val
, ctxt
->op_bytes
);
1952 if (rc
!= X86EMUL_CONTINUE
)
1954 assign_register(reg_rmw(ctxt
, reg
), val
, ctxt
->op_bytes
);
1960 static int __emulate_int_real(struct x86_emulate_ctxt
*ctxt
, int irq
)
1962 const struct x86_emulate_ops
*ops
= ctxt
->ops
;
1969 /* TODO: Add limit checks */
1970 ctxt
->src
.val
= ctxt
->eflags
;
1972 if (rc
!= X86EMUL_CONTINUE
)
1975 ctxt
->eflags
&= ~(X86_EFLAGS_IF
| X86_EFLAGS_TF
| X86_EFLAGS_AC
);
1977 ctxt
->src
.val
= get_segment_selector(ctxt
, VCPU_SREG_CS
);
1979 if (rc
!= X86EMUL_CONTINUE
)
1982 ctxt
->src
.val
= ctxt
->_eip
;
1984 if (rc
!= X86EMUL_CONTINUE
)
1987 ops
->get_idt(ctxt
, &dt
);
1989 eip_addr
= dt
.address
+ (irq
<< 2);
1990 cs_addr
= dt
.address
+ (irq
<< 2) + 2;
1992 rc
= ops
->read_std(ctxt
, cs_addr
, &cs
, 2, &ctxt
->exception
);
1993 if (rc
!= X86EMUL_CONTINUE
)
1996 rc
= ops
->read_std(ctxt
, eip_addr
, &eip
, 2, &ctxt
->exception
);
1997 if (rc
!= X86EMUL_CONTINUE
)
2000 rc
= load_segment_descriptor(ctxt
, cs
, VCPU_SREG_CS
);
2001 if (rc
!= X86EMUL_CONTINUE
)
2009 int emulate_int_real(struct x86_emulate_ctxt
*ctxt
, int irq
)
2013 invalidate_registers(ctxt
);
2014 rc
= __emulate_int_real(ctxt
, irq
);
2015 if (rc
== X86EMUL_CONTINUE
)
2016 writeback_registers(ctxt
);
2020 static int emulate_int(struct x86_emulate_ctxt
*ctxt
, int irq
)
2022 switch(ctxt
->mode
) {
2023 case X86EMUL_MODE_REAL
:
2024 return __emulate_int_real(ctxt
, irq
);
2025 case X86EMUL_MODE_VM86
:
2026 case X86EMUL_MODE_PROT16
:
2027 case X86EMUL_MODE_PROT32
:
2028 case X86EMUL_MODE_PROT64
:
2030 /* Protected mode interrupts unimplemented yet */
2031 return X86EMUL_UNHANDLEABLE
;
2035 static int emulate_iret_real(struct x86_emulate_ctxt
*ctxt
)
2037 int rc
= X86EMUL_CONTINUE
;
2038 unsigned long temp_eip
= 0;
2039 unsigned long temp_eflags
= 0;
2040 unsigned long cs
= 0;
2041 unsigned long mask
= X86_EFLAGS_CF
| X86_EFLAGS_PF
| X86_EFLAGS_AF
|
2042 X86_EFLAGS_ZF
| X86_EFLAGS_SF
| X86_EFLAGS_TF
|
2043 X86_EFLAGS_IF
| X86_EFLAGS_DF
| X86_EFLAGS_OF
|
2044 X86_EFLAGS_IOPL
| X86_EFLAGS_NT
| X86_EFLAGS_RF
|
2045 X86_EFLAGS_AC
| X86_EFLAGS_ID
|
2047 unsigned long vm86_mask
= X86_EFLAGS_VM
| X86_EFLAGS_VIF
|
2050 /* TODO: Add stack limit check */
2052 rc
= emulate_pop(ctxt
, &temp_eip
, ctxt
->op_bytes
);
2054 if (rc
!= X86EMUL_CONTINUE
)
2057 if (temp_eip
& ~0xffff)
2058 return emulate_gp(ctxt
, 0);
2060 rc
= emulate_pop(ctxt
, &cs
, ctxt
->op_bytes
);
2062 if (rc
!= X86EMUL_CONTINUE
)
2065 rc
= emulate_pop(ctxt
, &temp_eflags
, ctxt
->op_bytes
);
2067 if (rc
!= X86EMUL_CONTINUE
)
2070 rc
= load_segment_descriptor(ctxt
, (u16
)cs
, VCPU_SREG_CS
);
2072 if (rc
!= X86EMUL_CONTINUE
)
2075 ctxt
->_eip
= temp_eip
;
2077 if (ctxt
->op_bytes
== 4)
2078 ctxt
->eflags
= ((temp_eflags
& mask
) | (ctxt
->eflags
& vm86_mask
));
2079 else if (ctxt
->op_bytes
== 2) {
2080 ctxt
->eflags
&= ~0xffff;
2081 ctxt
->eflags
|= temp_eflags
;
2084 ctxt
->eflags
&= ~EFLG_RESERVED_ZEROS_MASK
; /* Clear reserved zeros */
2085 ctxt
->eflags
|= X86_EFLAGS_FIXED
;
2086 ctxt
->ops
->set_nmi_mask(ctxt
, false);
2091 static int em_iret(struct x86_emulate_ctxt
*ctxt
)
2093 switch(ctxt
->mode
) {
2094 case X86EMUL_MODE_REAL
:
2095 return emulate_iret_real(ctxt
);
2096 case X86EMUL_MODE_VM86
:
2097 case X86EMUL_MODE_PROT16
:
2098 case X86EMUL_MODE_PROT32
:
2099 case X86EMUL_MODE_PROT64
:
2101 /* iret from protected mode unimplemented yet */
2102 return X86EMUL_UNHANDLEABLE
;
2106 static int em_jmp_far(struct x86_emulate_ctxt
*ctxt
)
2109 unsigned short sel
, old_sel
;
2110 struct desc_struct old_desc
, new_desc
;
2111 const struct x86_emulate_ops
*ops
= ctxt
->ops
;
2112 u8 cpl
= ctxt
->ops
->cpl(ctxt
);
2114 /* Assignment of RIP may only fail in 64-bit mode */
2115 if (ctxt
->mode
== X86EMUL_MODE_PROT64
)
2116 ops
->get_segment(ctxt
, &old_sel
, &old_desc
, NULL
,
2119 memcpy(&sel
, ctxt
->src
.valptr
+ ctxt
->op_bytes
, 2);
2121 rc
= __load_segment_descriptor(ctxt
, sel
, VCPU_SREG_CS
, cpl
,
2122 X86_TRANSFER_CALL_JMP
,
2124 if (rc
!= X86EMUL_CONTINUE
)
2127 rc
= assign_eip_far(ctxt
, ctxt
->src
.val
, &new_desc
);
2128 if (rc
!= X86EMUL_CONTINUE
) {
2129 WARN_ON(ctxt
->mode
!= X86EMUL_MODE_PROT64
);
2130 /* assigning eip failed; restore the old cs */
2131 ops
->set_segment(ctxt
, old_sel
, &old_desc
, 0, VCPU_SREG_CS
);
2137 static int em_jmp_abs(struct x86_emulate_ctxt
*ctxt
)
2139 return assign_eip_near(ctxt
, ctxt
->src
.val
);
2142 static int em_call_near_abs(struct x86_emulate_ctxt
*ctxt
)
2147 old_eip
= ctxt
->_eip
;
2148 rc
= assign_eip_near(ctxt
, ctxt
->src
.val
);
2149 if (rc
!= X86EMUL_CONTINUE
)
2151 ctxt
->src
.val
= old_eip
;
2156 static int em_cmpxchg8b(struct x86_emulate_ctxt
*ctxt
)
2158 u64 old
= ctxt
->dst
.orig_val64
;
2160 if (ctxt
->dst
.bytes
== 16)
2161 return X86EMUL_UNHANDLEABLE
;
2163 if (((u32
) (old
>> 0) != (u32
) reg_read(ctxt
, VCPU_REGS_RAX
)) ||
2164 ((u32
) (old
>> 32) != (u32
) reg_read(ctxt
, VCPU_REGS_RDX
))) {
2165 *reg_write(ctxt
, VCPU_REGS_RAX
) = (u32
) (old
>> 0);
2166 *reg_write(ctxt
, VCPU_REGS_RDX
) = (u32
) (old
>> 32);
2167 ctxt
->eflags
&= ~X86_EFLAGS_ZF
;
2169 ctxt
->dst
.val64
= ((u64
)reg_read(ctxt
, VCPU_REGS_RCX
) << 32) |
2170 (u32
) reg_read(ctxt
, VCPU_REGS_RBX
);
2172 ctxt
->eflags
|= X86_EFLAGS_ZF
;
2174 return X86EMUL_CONTINUE
;
2177 static int em_ret(struct x86_emulate_ctxt
*ctxt
)
2182 rc
= emulate_pop(ctxt
, &eip
, ctxt
->op_bytes
);
2183 if (rc
!= X86EMUL_CONTINUE
)
2186 return assign_eip_near(ctxt
, eip
);
2189 static int em_ret_far(struct x86_emulate_ctxt
*ctxt
)
2192 unsigned long eip
, cs
;
2194 int cpl
= ctxt
->ops
->cpl(ctxt
);
2195 struct desc_struct old_desc
, new_desc
;
2196 const struct x86_emulate_ops
*ops
= ctxt
->ops
;
2198 if (ctxt
->mode
== X86EMUL_MODE_PROT64
)
2199 ops
->get_segment(ctxt
, &old_cs
, &old_desc
, NULL
,
2202 rc
= emulate_pop(ctxt
, &eip
, ctxt
->op_bytes
);
2203 if (rc
!= X86EMUL_CONTINUE
)
2205 rc
= emulate_pop(ctxt
, &cs
, ctxt
->op_bytes
);
2206 if (rc
!= X86EMUL_CONTINUE
)
2208 /* Outer-privilege level return is not implemented */
2209 if (ctxt
->mode
>= X86EMUL_MODE_PROT16
&& (cs
& 3) > cpl
)
2210 return X86EMUL_UNHANDLEABLE
;
2211 rc
= __load_segment_descriptor(ctxt
, (u16
)cs
, VCPU_SREG_CS
, cpl
,
2214 if (rc
!= X86EMUL_CONTINUE
)
2216 rc
= assign_eip_far(ctxt
, eip
, &new_desc
);
2217 if (rc
!= X86EMUL_CONTINUE
) {
2218 WARN_ON(ctxt
->mode
!= X86EMUL_MODE_PROT64
);
2219 ops
->set_segment(ctxt
, old_cs
, &old_desc
, 0, VCPU_SREG_CS
);
2224 static int em_ret_far_imm(struct x86_emulate_ctxt
*ctxt
)
2228 rc
= em_ret_far(ctxt
);
2229 if (rc
!= X86EMUL_CONTINUE
)
2231 rsp_increment(ctxt
, ctxt
->src
.val
);
2232 return X86EMUL_CONTINUE
;
2235 static int em_cmpxchg(struct x86_emulate_ctxt
*ctxt
)
2237 /* Save real source value, then compare EAX against destination. */
2238 ctxt
->dst
.orig_val
= ctxt
->dst
.val
;
2239 ctxt
->dst
.val
= reg_read(ctxt
, VCPU_REGS_RAX
);
2240 ctxt
->src
.orig_val
= ctxt
->src
.val
;
2241 ctxt
->src
.val
= ctxt
->dst
.orig_val
;
2242 fastop(ctxt
, em_cmp
);
2244 if (ctxt
->eflags
& X86_EFLAGS_ZF
) {
2245 /* Success: write back to memory; no update of EAX */
2246 ctxt
->src
.type
= OP_NONE
;
2247 ctxt
->dst
.val
= ctxt
->src
.orig_val
;
2249 /* Failure: write the value we saw to EAX. */
2250 ctxt
->src
.type
= OP_REG
;
2251 ctxt
->src
.addr
.reg
= reg_rmw(ctxt
, VCPU_REGS_RAX
);
2252 ctxt
->src
.val
= ctxt
->dst
.orig_val
;
2253 /* Create write-cycle to dest by writing the same value */
2254 ctxt
->dst
.val
= ctxt
->dst
.orig_val
;
2256 return X86EMUL_CONTINUE
;
2259 static int em_lseg(struct x86_emulate_ctxt
*ctxt
)
2261 int seg
= ctxt
->src2
.val
;
2265 memcpy(&sel
, ctxt
->src
.valptr
+ ctxt
->op_bytes
, 2);
2267 rc
= load_segment_descriptor(ctxt
, sel
, seg
);
2268 if (rc
!= X86EMUL_CONTINUE
)
2271 ctxt
->dst
.val
= ctxt
->src
.val
;
2275 static int emulator_has_longmode(struct x86_emulate_ctxt
*ctxt
)
2277 u32 eax
, ebx
, ecx
, edx
;
2281 ctxt
->ops
->get_cpuid(ctxt
, &eax
, &ebx
, &ecx
, &edx
);
2282 return edx
& bit(X86_FEATURE_LM
);
2285 #define GET_SMSTATE(type, smbase, offset) \
2288 int r = ctxt->ops->read_phys(ctxt, smbase + offset, &__val, \
2290 if (r != X86EMUL_CONTINUE) \
2291 return X86EMUL_UNHANDLEABLE; \
2295 static void rsm_set_desc_flags(struct desc_struct
*desc
, u32 flags
)
2297 desc
->g
= (flags
>> 23) & 1;
2298 desc
->d
= (flags
>> 22) & 1;
2299 desc
->l
= (flags
>> 21) & 1;
2300 desc
->avl
= (flags
>> 20) & 1;
2301 desc
->p
= (flags
>> 15) & 1;
2302 desc
->dpl
= (flags
>> 13) & 3;
2303 desc
->s
= (flags
>> 12) & 1;
2304 desc
->type
= (flags
>> 8) & 15;
2307 static int rsm_load_seg_32(struct x86_emulate_ctxt
*ctxt
, u64 smbase
, int n
)
2309 struct desc_struct desc
;
2313 selector
= GET_SMSTATE(u32
, smbase
, 0x7fa8 + n
* 4);
2316 offset
= 0x7f84 + n
* 12;
2318 offset
= 0x7f2c + (n
- 3) * 12;
2320 set_desc_base(&desc
, GET_SMSTATE(u32
, smbase
, offset
+ 8));
2321 set_desc_limit(&desc
, GET_SMSTATE(u32
, smbase
, offset
+ 4));
2322 rsm_set_desc_flags(&desc
, GET_SMSTATE(u32
, smbase
, offset
));
2323 ctxt
->ops
->set_segment(ctxt
, selector
, &desc
, 0, n
);
2324 return X86EMUL_CONTINUE
;
2327 static int rsm_load_seg_64(struct x86_emulate_ctxt
*ctxt
, u64 smbase
, int n
)
2329 struct desc_struct desc
;
2334 offset
= 0x7e00 + n
* 16;
2336 selector
= GET_SMSTATE(u16
, smbase
, offset
);
2337 rsm_set_desc_flags(&desc
, GET_SMSTATE(u16
, smbase
, offset
+ 2) << 8);
2338 set_desc_limit(&desc
, GET_SMSTATE(u32
, smbase
, offset
+ 4));
2339 set_desc_base(&desc
, GET_SMSTATE(u32
, smbase
, offset
+ 8));
2340 base3
= GET_SMSTATE(u32
, smbase
, offset
+ 12);
2342 ctxt
->ops
->set_segment(ctxt
, selector
, &desc
, base3
, n
);
2343 return X86EMUL_CONTINUE
;
2346 static int rsm_enter_protected_mode(struct x86_emulate_ctxt
*ctxt
,
2352 * First enable PAE, long mode needs it before CR0.PG = 1 is set.
2353 * Then enable protected mode. However, PCID cannot be enabled
2354 * if EFER.LMA=0, so set it separately.
2356 bad
= ctxt
->ops
->set_cr(ctxt
, 4, cr4
& ~X86_CR4_PCIDE
);
2358 return X86EMUL_UNHANDLEABLE
;
2360 bad
= ctxt
->ops
->set_cr(ctxt
, 0, cr0
);
2362 return X86EMUL_UNHANDLEABLE
;
2364 if (cr4
& X86_CR4_PCIDE
) {
2365 bad
= ctxt
->ops
->set_cr(ctxt
, 4, cr4
);
2367 return X86EMUL_UNHANDLEABLE
;
2370 return X86EMUL_CONTINUE
;
2373 static int rsm_load_state_32(struct x86_emulate_ctxt
*ctxt
, u64 smbase
)
2375 struct desc_struct desc
;
2381 cr0
= GET_SMSTATE(u32
, smbase
, 0x7ffc);
2382 ctxt
->ops
->set_cr(ctxt
, 3, GET_SMSTATE(u32
, smbase
, 0x7ff8));
2383 ctxt
->eflags
= GET_SMSTATE(u32
, smbase
, 0x7ff4) | X86_EFLAGS_FIXED
;
2384 ctxt
->_eip
= GET_SMSTATE(u32
, smbase
, 0x7ff0);
2386 for (i
= 0; i
< 8; i
++)
2387 *reg_write(ctxt
, i
) = GET_SMSTATE(u32
, smbase
, 0x7fd0 + i
* 4);
2389 val
= GET_SMSTATE(u32
, smbase
, 0x7fcc);
2390 ctxt
->ops
->set_dr(ctxt
, 6, (val
& DR6_VOLATILE
) | DR6_FIXED_1
);
2391 val
= GET_SMSTATE(u32
, smbase
, 0x7fc8);
2392 ctxt
->ops
->set_dr(ctxt
, 7, (val
& DR7_VOLATILE
) | DR7_FIXED_1
);
2394 selector
= GET_SMSTATE(u32
, smbase
, 0x7fc4);
2395 set_desc_base(&desc
, GET_SMSTATE(u32
, smbase
, 0x7f64));
2396 set_desc_limit(&desc
, GET_SMSTATE(u32
, smbase
, 0x7f60));
2397 rsm_set_desc_flags(&desc
, GET_SMSTATE(u32
, smbase
, 0x7f5c));
2398 ctxt
->ops
->set_segment(ctxt
, selector
, &desc
, 0, VCPU_SREG_TR
);
2400 selector
= GET_SMSTATE(u32
, smbase
, 0x7fc0);
2401 set_desc_base(&desc
, GET_SMSTATE(u32
, smbase
, 0x7f80));
2402 set_desc_limit(&desc
, GET_SMSTATE(u32
, smbase
, 0x7f7c));
2403 rsm_set_desc_flags(&desc
, GET_SMSTATE(u32
, smbase
, 0x7f78));
2404 ctxt
->ops
->set_segment(ctxt
, selector
, &desc
, 0, VCPU_SREG_LDTR
);
2406 dt
.address
= GET_SMSTATE(u32
, smbase
, 0x7f74);
2407 dt
.size
= GET_SMSTATE(u32
, smbase
, 0x7f70);
2408 ctxt
->ops
->set_gdt(ctxt
, &dt
);
2410 dt
.address
= GET_SMSTATE(u32
, smbase
, 0x7f58);
2411 dt
.size
= GET_SMSTATE(u32
, smbase
, 0x7f54);
2412 ctxt
->ops
->set_idt(ctxt
, &dt
);
2414 for (i
= 0; i
< 6; i
++) {
2415 int r
= rsm_load_seg_32(ctxt
, smbase
, i
);
2416 if (r
!= X86EMUL_CONTINUE
)
2420 cr4
= GET_SMSTATE(u32
, smbase
, 0x7f14);
2422 ctxt
->ops
->set_smbase(ctxt
, GET_SMSTATE(u32
, smbase
, 0x7ef8));
2424 return rsm_enter_protected_mode(ctxt
, cr0
, cr4
);
2427 static int rsm_load_state_64(struct x86_emulate_ctxt
*ctxt
, u64 smbase
)
2429 struct desc_struct desc
;
2436 for (i
= 0; i
< 16; i
++)
2437 *reg_write(ctxt
, i
) = GET_SMSTATE(u64
, smbase
, 0x7ff8 - i
* 8);
2439 ctxt
->_eip
= GET_SMSTATE(u64
, smbase
, 0x7f78);
2440 ctxt
->eflags
= GET_SMSTATE(u32
, smbase
, 0x7f70) | X86_EFLAGS_FIXED
;
2442 val
= GET_SMSTATE(u32
, smbase
, 0x7f68);
2443 ctxt
->ops
->set_dr(ctxt
, 6, (val
& DR6_VOLATILE
) | DR6_FIXED_1
);
2444 val
= GET_SMSTATE(u32
, smbase
, 0x7f60);
2445 ctxt
->ops
->set_dr(ctxt
, 7, (val
& DR7_VOLATILE
) | DR7_FIXED_1
);
2447 cr0
= GET_SMSTATE(u64
, smbase
, 0x7f58);
2448 ctxt
->ops
->set_cr(ctxt
, 3, GET_SMSTATE(u64
, smbase
, 0x7f50));
2449 cr4
= GET_SMSTATE(u64
, smbase
, 0x7f48);
2450 ctxt
->ops
->set_smbase(ctxt
, GET_SMSTATE(u32
, smbase
, 0x7f00));
2451 val
= GET_SMSTATE(u64
, smbase
, 0x7ed0);
2452 ctxt
->ops
->set_msr(ctxt
, MSR_EFER
, val
& ~EFER_LMA
);
2454 selector
= GET_SMSTATE(u32
, smbase
, 0x7e90);
2455 rsm_set_desc_flags(&desc
, GET_SMSTATE(u32
, smbase
, 0x7e92) << 8);
2456 set_desc_limit(&desc
, GET_SMSTATE(u32
, smbase
, 0x7e94));
2457 set_desc_base(&desc
, GET_SMSTATE(u32
, smbase
, 0x7e98));
2458 base3
= GET_SMSTATE(u32
, smbase
, 0x7e9c);
2459 ctxt
->ops
->set_segment(ctxt
, selector
, &desc
, base3
, VCPU_SREG_TR
);
2461 dt
.size
= GET_SMSTATE(u32
, smbase
, 0x7e84);
2462 dt
.address
= GET_SMSTATE(u64
, smbase
, 0x7e88);
2463 ctxt
->ops
->set_idt(ctxt
, &dt
);
2465 selector
= GET_SMSTATE(u32
, smbase
, 0x7e70);
2466 rsm_set_desc_flags(&desc
, GET_SMSTATE(u32
, smbase
, 0x7e72) << 8);
2467 set_desc_limit(&desc
, GET_SMSTATE(u32
, smbase
, 0x7e74));
2468 set_desc_base(&desc
, GET_SMSTATE(u32
, smbase
, 0x7e78));
2469 base3
= GET_SMSTATE(u32
, smbase
, 0x7e7c);
2470 ctxt
->ops
->set_segment(ctxt
, selector
, &desc
, base3
, VCPU_SREG_LDTR
);
2472 dt
.size
= GET_SMSTATE(u32
, smbase
, 0x7e64);
2473 dt
.address
= GET_SMSTATE(u64
, smbase
, 0x7e68);
2474 ctxt
->ops
->set_gdt(ctxt
, &dt
);
2476 r
= rsm_enter_protected_mode(ctxt
, cr0
, cr4
);
2477 if (r
!= X86EMUL_CONTINUE
)
2480 for (i
= 0; i
< 6; i
++) {
2481 r
= rsm_load_seg_64(ctxt
, smbase
, i
);
2482 if (r
!= X86EMUL_CONTINUE
)
2486 return X86EMUL_CONTINUE
;
2489 static int em_rsm(struct x86_emulate_ctxt
*ctxt
)
2491 unsigned long cr0
, cr4
, efer
;
2495 if ((ctxt
->emul_flags
& X86EMUL_SMM_MASK
) == 0)
2496 return emulate_ud(ctxt
);
2499 * Get back to real mode, to prepare a safe state in which to load
2500 * CR0/CR3/CR4/EFER. It's all a bit more complicated if the vCPU
2501 * supports long mode.
2503 cr4
= ctxt
->ops
->get_cr(ctxt
, 4);
2504 if (emulator_has_longmode(ctxt
)) {
2505 struct desc_struct cs_desc
;
2507 /* Zero CR4.PCIDE before CR0.PG. */
2508 if (cr4
& X86_CR4_PCIDE
) {
2509 ctxt
->ops
->set_cr(ctxt
, 4, cr4
& ~X86_CR4_PCIDE
);
2510 cr4
&= ~X86_CR4_PCIDE
;
2513 /* A 32-bit code segment is required to clear EFER.LMA. */
2514 memset(&cs_desc
, 0, sizeof(cs_desc
));
2516 cs_desc
.s
= cs_desc
.g
= cs_desc
.p
= 1;
2517 ctxt
->ops
->set_segment(ctxt
, 0, &cs_desc
, 0, VCPU_SREG_CS
);
2520 /* For the 64-bit case, this will clear EFER.LMA. */
2521 cr0
= ctxt
->ops
->get_cr(ctxt
, 0);
2522 if (cr0
& X86_CR0_PE
)
2523 ctxt
->ops
->set_cr(ctxt
, 0, cr0
& ~(X86_CR0_PG
| X86_CR0_PE
));
2525 /* Now clear CR4.PAE (which must be done before clearing EFER.LME). */
2526 if (cr4
& X86_CR4_PAE
)
2527 ctxt
->ops
->set_cr(ctxt
, 4, cr4
& ~X86_CR4_PAE
);
2529 /* And finally go back to 32-bit mode. */
2531 ctxt
->ops
->set_msr(ctxt
, MSR_EFER
, efer
);
2533 smbase
= ctxt
->ops
->get_smbase(ctxt
);
2534 if (emulator_has_longmode(ctxt
))
2535 ret
= rsm_load_state_64(ctxt
, smbase
+ 0x8000);
2537 ret
= rsm_load_state_32(ctxt
, smbase
+ 0x8000);
2539 if (ret
!= X86EMUL_CONTINUE
) {
2540 /* FIXME: should triple fault */
2541 return X86EMUL_UNHANDLEABLE
;
2544 if ((ctxt
->emul_flags
& X86EMUL_SMM_INSIDE_NMI_MASK
) == 0)
2545 ctxt
->ops
->set_nmi_mask(ctxt
, false);
2547 ctxt
->emul_flags
&= ~X86EMUL_SMM_INSIDE_NMI_MASK
;
2548 ctxt
->emul_flags
&= ~X86EMUL_SMM_MASK
;
2549 return X86EMUL_CONTINUE
;
2553 setup_syscalls_segments(struct x86_emulate_ctxt
*ctxt
,
2554 struct desc_struct
*cs
, struct desc_struct
*ss
)
2556 cs
->l
= 0; /* will be adjusted later */
2557 set_desc_base(cs
, 0); /* flat segment */
2558 cs
->g
= 1; /* 4kb granularity */
2559 set_desc_limit(cs
, 0xfffff); /* 4GB limit */
2560 cs
->type
= 0x0b; /* Read, Execute, Accessed */
2562 cs
->dpl
= 0; /* will be adjusted later */
2567 set_desc_base(ss
, 0); /* flat segment */
2568 set_desc_limit(ss
, 0xfffff); /* 4GB limit */
2569 ss
->g
= 1; /* 4kb granularity */
2571 ss
->type
= 0x03; /* Read/Write, Accessed */
2572 ss
->d
= 1; /* 32bit stack segment */
2579 static bool vendor_intel(struct x86_emulate_ctxt
*ctxt
)
2581 u32 eax
, ebx
, ecx
, edx
;
2584 ctxt
->ops
->get_cpuid(ctxt
, &eax
, &ebx
, &ecx
, &edx
);
2585 return ebx
== X86EMUL_CPUID_VENDOR_GenuineIntel_ebx
2586 && ecx
== X86EMUL_CPUID_VENDOR_GenuineIntel_ecx
2587 && edx
== X86EMUL_CPUID_VENDOR_GenuineIntel_edx
;
2590 static bool em_syscall_is_enabled(struct x86_emulate_ctxt
*ctxt
)
2592 const struct x86_emulate_ops
*ops
= ctxt
->ops
;
2593 u32 eax
, ebx
, ecx
, edx
;
2596 * syscall should always be enabled in longmode - so only become
2597 * vendor specific (cpuid) if other modes are active...
2599 if (ctxt
->mode
== X86EMUL_MODE_PROT64
)
2604 ops
->get_cpuid(ctxt
, &eax
, &ebx
, &ecx
, &edx
);
2606 * Intel ("GenuineIntel")
2607 * remark: Intel CPUs only support "syscall" in 64bit
2608 * longmode. Also an 64bit guest with a
2609 * 32bit compat-app running will #UD !! While this
2610 * behaviour can be fixed (by emulating) into AMD
2611 * response - CPUs of AMD can't behave like Intel.
2613 if (ebx
== X86EMUL_CPUID_VENDOR_GenuineIntel_ebx
&&
2614 ecx
== X86EMUL_CPUID_VENDOR_GenuineIntel_ecx
&&
2615 edx
== X86EMUL_CPUID_VENDOR_GenuineIntel_edx
)
2618 /* AMD ("AuthenticAMD") */
2619 if (ebx
== X86EMUL_CPUID_VENDOR_AuthenticAMD_ebx
&&
2620 ecx
== X86EMUL_CPUID_VENDOR_AuthenticAMD_ecx
&&
2621 edx
== X86EMUL_CPUID_VENDOR_AuthenticAMD_edx
)
2624 /* AMD ("AMDisbetter!") */
2625 if (ebx
== X86EMUL_CPUID_VENDOR_AMDisbetterI_ebx
&&
2626 ecx
== X86EMUL_CPUID_VENDOR_AMDisbetterI_ecx
&&
2627 edx
== X86EMUL_CPUID_VENDOR_AMDisbetterI_edx
)
2630 /* default: (not Intel, not AMD), apply Intel's stricter rules... */
2634 static int em_syscall(struct x86_emulate_ctxt
*ctxt
)
2636 const struct x86_emulate_ops
*ops
= ctxt
->ops
;
2637 struct desc_struct cs
, ss
;
2642 /* syscall is not available in real mode */
2643 if (ctxt
->mode
== X86EMUL_MODE_REAL
||
2644 ctxt
->mode
== X86EMUL_MODE_VM86
)
2645 return emulate_ud(ctxt
);
2647 if (!(em_syscall_is_enabled(ctxt
)))
2648 return emulate_ud(ctxt
);
2650 ops
->get_msr(ctxt
, MSR_EFER
, &efer
);
2651 setup_syscalls_segments(ctxt
, &cs
, &ss
);
2653 if (!(efer
& EFER_SCE
))
2654 return emulate_ud(ctxt
);
2656 ops
->get_msr(ctxt
, MSR_STAR
, &msr_data
);
2658 cs_sel
= (u16
)(msr_data
& 0xfffc);
2659 ss_sel
= (u16
)(msr_data
+ 8);
2661 if (efer
& EFER_LMA
) {
2665 ops
->set_segment(ctxt
, cs_sel
, &cs
, 0, VCPU_SREG_CS
);
2666 ops
->set_segment(ctxt
, ss_sel
, &ss
, 0, VCPU_SREG_SS
);
2668 *reg_write(ctxt
, VCPU_REGS_RCX
) = ctxt
->_eip
;
2669 if (efer
& EFER_LMA
) {
2670 #ifdef CONFIG_X86_64
2671 *reg_write(ctxt
, VCPU_REGS_R11
) = ctxt
->eflags
;
2674 ctxt
->mode
== X86EMUL_MODE_PROT64
?
2675 MSR_LSTAR
: MSR_CSTAR
, &msr_data
);
2676 ctxt
->_eip
= msr_data
;
2678 ops
->get_msr(ctxt
, MSR_SYSCALL_MASK
, &msr_data
);
2679 ctxt
->eflags
&= ~msr_data
;
2680 ctxt
->eflags
|= X86_EFLAGS_FIXED
;
2684 ops
->get_msr(ctxt
, MSR_STAR
, &msr_data
);
2685 ctxt
->_eip
= (u32
)msr_data
;
2687 ctxt
->eflags
&= ~(X86_EFLAGS_VM
| X86_EFLAGS_IF
);
2690 return X86EMUL_CONTINUE
;
2693 static int em_sysenter(struct x86_emulate_ctxt
*ctxt
)
2695 const struct x86_emulate_ops
*ops
= ctxt
->ops
;
2696 struct desc_struct cs
, ss
;
2701 ops
->get_msr(ctxt
, MSR_EFER
, &efer
);
2702 /* inject #GP if in real mode */
2703 if (ctxt
->mode
== X86EMUL_MODE_REAL
)
2704 return emulate_gp(ctxt
, 0);
2707 * Not recognized on AMD in compat mode (but is recognized in legacy
2710 if ((ctxt
->mode
!= X86EMUL_MODE_PROT64
) && (efer
& EFER_LMA
)
2711 && !vendor_intel(ctxt
))
2712 return emulate_ud(ctxt
);
2714 /* sysenter/sysexit have not been tested in 64bit mode. */
2715 if (ctxt
->mode
== X86EMUL_MODE_PROT64
)
2716 return X86EMUL_UNHANDLEABLE
;
2718 setup_syscalls_segments(ctxt
, &cs
, &ss
);
2720 ops
->get_msr(ctxt
, MSR_IA32_SYSENTER_CS
, &msr_data
);
2721 if ((msr_data
& 0xfffc) == 0x0)
2722 return emulate_gp(ctxt
, 0);
2724 ctxt
->eflags
&= ~(X86_EFLAGS_VM
| X86_EFLAGS_IF
);
2725 cs_sel
= (u16
)msr_data
& ~SEGMENT_RPL_MASK
;
2726 ss_sel
= cs_sel
+ 8;
2727 if (efer
& EFER_LMA
) {
2732 ops
->set_segment(ctxt
, cs_sel
, &cs
, 0, VCPU_SREG_CS
);
2733 ops
->set_segment(ctxt
, ss_sel
, &ss
, 0, VCPU_SREG_SS
);
2735 ops
->get_msr(ctxt
, MSR_IA32_SYSENTER_EIP
, &msr_data
);
2736 ctxt
->_eip
= (efer
& EFER_LMA
) ? msr_data
: (u32
)msr_data
;
2738 ops
->get_msr(ctxt
, MSR_IA32_SYSENTER_ESP
, &msr_data
);
2739 *reg_write(ctxt
, VCPU_REGS_RSP
) = (efer
& EFER_LMA
) ? msr_data
:
2742 return X86EMUL_CONTINUE
;
2745 static int em_sysexit(struct x86_emulate_ctxt
*ctxt
)
2747 const struct x86_emulate_ops
*ops
= ctxt
->ops
;
2748 struct desc_struct cs
, ss
;
2749 u64 msr_data
, rcx
, rdx
;
2751 u16 cs_sel
= 0, ss_sel
= 0;
2753 /* inject #GP if in real mode or Virtual 8086 mode */
2754 if (ctxt
->mode
== X86EMUL_MODE_REAL
||
2755 ctxt
->mode
== X86EMUL_MODE_VM86
)
2756 return emulate_gp(ctxt
, 0);
2758 setup_syscalls_segments(ctxt
, &cs
, &ss
);
2760 if ((ctxt
->rex_prefix
& 0x8) != 0x0)
2761 usermode
= X86EMUL_MODE_PROT64
;
2763 usermode
= X86EMUL_MODE_PROT32
;
2765 rcx
= reg_read(ctxt
, VCPU_REGS_RCX
);
2766 rdx
= reg_read(ctxt
, VCPU_REGS_RDX
);
2770 ops
->get_msr(ctxt
, MSR_IA32_SYSENTER_CS
, &msr_data
);
2772 case X86EMUL_MODE_PROT32
:
2773 cs_sel
= (u16
)(msr_data
+ 16);
2774 if ((msr_data
& 0xfffc) == 0x0)
2775 return emulate_gp(ctxt
, 0);
2776 ss_sel
= (u16
)(msr_data
+ 24);
2780 case X86EMUL_MODE_PROT64
:
2781 cs_sel
= (u16
)(msr_data
+ 32);
2782 if (msr_data
== 0x0)
2783 return emulate_gp(ctxt
, 0);
2784 ss_sel
= cs_sel
+ 8;
2787 if (is_noncanonical_address(rcx
) ||
2788 is_noncanonical_address(rdx
))
2789 return emulate_gp(ctxt
, 0);
2792 cs_sel
|= SEGMENT_RPL_MASK
;
2793 ss_sel
|= SEGMENT_RPL_MASK
;
2795 ops
->set_segment(ctxt
, cs_sel
, &cs
, 0, VCPU_SREG_CS
);
2796 ops
->set_segment(ctxt
, ss_sel
, &ss
, 0, VCPU_SREG_SS
);
2799 *reg_write(ctxt
, VCPU_REGS_RSP
) = rcx
;
2801 return X86EMUL_CONTINUE
;
2804 static bool emulator_bad_iopl(struct x86_emulate_ctxt
*ctxt
)
2807 if (ctxt
->mode
== X86EMUL_MODE_REAL
)
2809 if (ctxt
->mode
== X86EMUL_MODE_VM86
)
2811 iopl
= (ctxt
->eflags
& X86_EFLAGS_IOPL
) >> X86_EFLAGS_IOPL_BIT
;
2812 return ctxt
->ops
->cpl(ctxt
) > iopl
;
2815 static bool emulator_io_port_access_allowed(struct x86_emulate_ctxt
*ctxt
,
2818 const struct x86_emulate_ops
*ops
= ctxt
->ops
;
2819 struct desc_struct tr_seg
;
2822 u16 tr
, io_bitmap_ptr
, perm
, bit_idx
= port
& 0x7;
2823 unsigned mask
= (1 << len
) - 1;
2826 ops
->get_segment(ctxt
, &tr
, &tr_seg
, &base3
, VCPU_SREG_TR
);
2829 if (desc_limit_scaled(&tr_seg
) < 103)
2831 base
= get_desc_base(&tr_seg
);
2832 #ifdef CONFIG_X86_64
2833 base
|= ((u64
)base3
) << 32;
2835 r
= ops
->read_std(ctxt
, base
+ 102, &io_bitmap_ptr
, 2, NULL
);
2836 if (r
!= X86EMUL_CONTINUE
)
2838 if (io_bitmap_ptr
+ port
/8 > desc_limit_scaled(&tr_seg
))
2840 r
= ops
->read_std(ctxt
, base
+ io_bitmap_ptr
+ port
/8, &perm
, 2, NULL
);
2841 if (r
!= X86EMUL_CONTINUE
)
2843 if ((perm
>> bit_idx
) & mask
)
2848 static bool emulator_io_permited(struct x86_emulate_ctxt
*ctxt
,
2854 if (emulator_bad_iopl(ctxt
))
2855 if (!emulator_io_port_access_allowed(ctxt
, port
, len
))
2858 ctxt
->perm_ok
= true;
2863 static void string_registers_quirk(struct x86_emulate_ctxt
*ctxt
)
2866 * Intel CPUs mask the counter and pointers in quite strange
2867 * manner when ECX is zero due to REP-string optimizations.
2869 #ifdef CONFIG_X86_64
2870 if (ctxt
->ad_bytes
!= 4 || !vendor_intel(ctxt
))
2873 *reg_write(ctxt
, VCPU_REGS_RCX
) = 0;
2876 case 0xa4: /* movsb */
2877 case 0xa5: /* movsd/w */
2878 *reg_rmw(ctxt
, VCPU_REGS_RSI
) &= (u32
)-1;
2880 case 0xaa: /* stosb */
2881 case 0xab: /* stosd/w */
2882 *reg_rmw(ctxt
, VCPU_REGS_RDI
) &= (u32
)-1;
2887 static void save_state_to_tss16(struct x86_emulate_ctxt
*ctxt
,
2888 struct tss_segment_16
*tss
)
2890 tss
->ip
= ctxt
->_eip
;
2891 tss
->flag
= ctxt
->eflags
;
2892 tss
->ax
= reg_read(ctxt
, VCPU_REGS_RAX
);
2893 tss
->cx
= reg_read(ctxt
, VCPU_REGS_RCX
);
2894 tss
->dx
= reg_read(ctxt
, VCPU_REGS_RDX
);
2895 tss
->bx
= reg_read(ctxt
, VCPU_REGS_RBX
);
2896 tss
->sp
= reg_read(ctxt
, VCPU_REGS_RSP
);
2897 tss
->bp
= reg_read(ctxt
, VCPU_REGS_RBP
);
2898 tss
->si
= reg_read(ctxt
, VCPU_REGS_RSI
);
2899 tss
->di
= reg_read(ctxt
, VCPU_REGS_RDI
);
2901 tss
->es
= get_segment_selector(ctxt
, VCPU_SREG_ES
);
2902 tss
->cs
= get_segment_selector(ctxt
, VCPU_SREG_CS
);
2903 tss
->ss
= get_segment_selector(ctxt
, VCPU_SREG_SS
);
2904 tss
->ds
= get_segment_selector(ctxt
, VCPU_SREG_DS
);
2905 tss
->ldt
= get_segment_selector(ctxt
, VCPU_SREG_LDTR
);
2908 static int load_state_from_tss16(struct x86_emulate_ctxt
*ctxt
,
2909 struct tss_segment_16
*tss
)
2914 ctxt
->_eip
= tss
->ip
;
2915 ctxt
->eflags
= tss
->flag
| 2;
2916 *reg_write(ctxt
, VCPU_REGS_RAX
) = tss
->ax
;
2917 *reg_write(ctxt
, VCPU_REGS_RCX
) = tss
->cx
;
2918 *reg_write(ctxt
, VCPU_REGS_RDX
) = tss
->dx
;
2919 *reg_write(ctxt
, VCPU_REGS_RBX
) = tss
->bx
;
2920 *reg_write(ctxt
, VCPU_REGS_RSP
) = tss
->sp
;
2921 *reg_write(ctxt
, VCPU_REGS_RBP
) = tss
->bp
;
2922 *reg_write(ctxt
, VCPU_REGS_RSI
) = tss
->si
;
2923 *reg_write(ctxt
, VCPU_REGS_RDI
) = tss
->di
;
2926 * SDM says that segment selectors are loaded before segment
2929 set_segment_selector(ctxt
, tss
->ldt
, VCPU_SREG_LDTR
);
2930 set_segment_selector(ctxt
, tss
->es
, VCPU_SREG_ES
);
2931 set_segment_selector(ctxt
, tss
->cs
, VCPU_SREG_CS
);
2932 set_segment_selector(ctxt
, tss
->ss
, VCPU_SREG_SS
);
2933 set_segment_selector(ctxt
, tss
->ds
, VCPU_SREG_DS
);
2938 * Now load segment descriptors. If fault happens at this stage
2939 * it is handled in a context of new task
2941 ret
= __load_segment_descriptor(ctxt
, tss
->ldt
, VCPU_SREG_LDTR
, cpl
,
2942 X86_TRANSFER_TASK_SWITCH
, NULL
);
2943 if (ret
!= X86EMUL_CONTINUE
)
2945 ret
= __load_segment_descriptor(ctxt
, tss
->es
, VCPU_SREG_ES
, cpl
,
2946 X86_TRANSFER_TASK_SWITCH
, NULL
);
2947 if (ret
!= X86EMUL_CONTINUE
)
2949 ret
= __load_segment_descriptor(ctxt
, tss
->cs
, VCPU_SREG_CS
, cpl
,
2950 X86_TRANSFER_TASK_SWITCH
, NULL
);
2951 if (ret
!= X86EMUL_CONTINUE
)
2953 ret
= __load_segment_descriptor(ctxt
, tss
->ss
, VCPU_SREG_SS
, cpl
,
2954 X86_TRANSFER_TASK_SWITCH
, NULL
);
2955 if (ret
!= X86EMUL_CONTINUE
)
2957 ret
= __load_segment_descriptor(ctxt
, tss
->ds
, VCPU_SREG_DS
, cpl
,
2958 X86_TRANSFER_TASK_SWITCH
, NULL
);
2959 if (ret
!= X86EMUL_CONTINUE
)
2962 return X86EMUL_CONTINUE
;
2965 static int task_switch_16(struct x86_emulate_ctxt
*ctxt
,
2966 u16 tss_selector
, u16 old_tss_sel
,
2967 ulong old_tss_base
, struct desc_struct
*new_desc
)
2969 const struct x86_emulate_ops
*ops
= ctxt
->ops
;
2970 struct tss_segment_16 tss_seg
;
2972 u32 new_tss_base
= get_desc_base(new_desc
);
2974 ret
= ops
->read_std(ctxt
, old_tss_base
, &tss_seg
, sizeof tss_seg
,
2976 if (ret
!= X86EMUL_CONTINUE
)
2979 save_state_to_tss16(ctxt
, &tss_seg
);
2981 ret
= ops
->write_std(ctxt
, old_tss_base
, &tss_seg
, sizeof tss_seg
,
2983 if (ret
!= X86EMUL_CONTINUE
)
2986 ret
= ops
->read_std(ctxt
, new_tss_base
, &tss_seg
, sizeof tss_seg
,
2988 if (ret
!= X86EMUL_CONTINUE
)
2991 if (old_tss_sel
!= 0xffff) {
2992 tss_seg
.prev_task_link
= old_tss_sel
;
2994 ret
= ops
->write_std(ctxt
, new_tss_base
,
2995 &tss_seg
.prev_task_link
,
2996 sizeof tss_seg
.prev_task_link
,
2998 if (ret
!= X86EMUL_CONTINUE
)
3002 return load_state_from_tss16(ctxt
, &tss_seg
);
3005 static void save_state_to_tss32(struct x86_emulate_ctxt
*ctxt
,
3006 struct tss_segment_32
*tss
)
3008 /* CR3 and ldt selector are not saved intentionally */
3009 tss
->eip
= ctxt
->_eip
;
3010 tss
->eflags
= ctxt
->eflags
;
3011 tss
->eax
= reg_read(ctxt
, VCPU_REGS_RAX
);
3012 tss
->ecx
= reg_read(ctxt
, VCPU_REGS_RCX
);
3013 tss
->edx
= reg_read(ctxt
, VCPU_REGS_RDX
);
3014 tss
->ebx
= reg_read(ctxt
, VCPU_REGS_RBX
);
3015 tss
->esp
= reg_read(ctxt
, VCPU_REGS_RSP
);
3016 tss
->ebp
= reg_read(ctxt
, VCPU_REGS_RBP
);
3017 tss
->esi
= reg_read(ctxt
, VCPU_REGS_RSI
);
3018 tss
->edi
= reg_read(ctxt
, VCPU_REGS_RDI
);
3020 tss
->es
= get_segment_selector(ctxt
, VCPU_SREG_ES
);
3021 tss
->cs
= get_segment_selector(ctxt
, VCPU_SREG_CS
);
3022 tss
->ss
= get_segment_selector(ctxt
, VCPU_SREG_SS
);
3023 tss
->ds
= get_segment_selector(ctxt
, VCPU_SREG_DS
);
3024 tss
->fs
= get_segment_selector(ctxt
, VCPU_SREG_FS
);
3025 tss
->gs
= get_segment_selector(ctxt
, VCPU_SREG_GS
);
3028 static int load_state_from_tss32(struct x86_emulate_ctxt
*ctxt
,
3029 struct tss_segment_32
*tss
)
3034 if (ctxt
->ops
->set_cr(ctxt
, 3, tss
->cr3
))
3035 return emulate_gp(ctxt
, 0);
3036 ctxt
->_eip
= tss
->eip
;
3037 ctxt
->eflags
= tss
->eflags
| 2;
3039 /* General purpose registers */
3040 *reg_write(ctxt
, VCPU_REGS_RAX
) = tss
->eax
;
3041 *reg_write(ctxt
, VCPU_REGS_RCX
) = tss
->ecx
;
3042 *reg_write(ctxt
, VCPU_REGS_RDX
) = tss
->edx
;
3043 *reg_write(ctxt
, VCPU_REGS_RBX
) = tss
->ebx
;
3044 *reg_write(ctxt
, VCPU_REGS_RSP
) = tss
->esp
;
3045 *reg_write(ctxt
, VCPU_REGS_RBP
) = tss
->ebp
;
3046 *reg_write(ctxt
, VCPU_REGS_RSI
) = tss
->esi
;
3047 *reg_write(ctxt
, VCPU_REGS_RDI
) = tss
->edi
;
3050 * SDM says that segment selectors are loaded before segment
3051 * descriptors. This is important because CPL checks will
3054 set_segment_selector(ctxt
, tss
->ldt_selector
, VCPU_SREG_LDTR
);
3055 set_segment_selector(ctxt
, tss
->es
, VCPU_SREG_ES
);
3056 set_segment_selector(ctxt
, tss
->cs
, VCPU_SREG_CS
);
3057 set_segment_selector(ctxt
, tss
->ss
, VCPU_SREG_SS
);
3058 set_segment_selector(ctxt
, tss
->ds
, VCPU_SREG_DS
);
3059 set_segment_selector(ctxt
, tss
->fs
, VCPU_SREG_FS
);
3060 set_segment_selector(ctxt
, tss
->gs
, VCPU_SREG_GS
);
3063 * If we're switching between Protected Mode and VM86, we need to make
3064 * sure to update the mode before loading the segment descriptors so
3065 * that the selectors are interpreted correctly.
3067 if (ctxt
->eflags
& X86_EFLAGS_VM
) {
3068 ctxt
->mode
= X86EMUL_MODE_VM86
;
3071 ctxt
->mode
= X86EMUL_MODE_PROT32
;
3076 * Now load segment descriptors. If fault happenes at this stage
3077 * it is handled in a context of new task
3079 ret
= __load_segment_descriptor(ctxt
, tss
->ldt_selector
, VCPU_SREG_LDTR
,
3080 cpl
, X86_TRANSFER_TASK_SWITCH
, NULL
);
3081 if (ret
!= X86EMUL_CONTINUE
)
3083 ret
= __load_segment_descriptor(ctxt
, tss
->es
, VCPU_SREG_ES
, cpl
,
3084 X86_TRANSFER_TASK_SWITCH
, NULL
);
3085 if (ret
!= X86EMUL_CONTINUE
)
3087 ret
= __load_segment_descriptor(ctxt
, tss
->cs
, VCPU_SREG_CS
, cpl
,
3088 X86_TRANSFER_TASK_SWITCH
, NULL
);
3089 if (ret
!= X86EMUL_CONTINUE
)
3091 ret
= __load_segment_descriptor(ctxt
, tss
->ss
, VCPU_SREG_SS
, cpl
,
3092 X86_TRANSFER_TASK_SWITCH
, NULL
);
3093 if (ret
!= X86EMUL_CONTINUE
)
3095 ret
= __load_segment_descriptor(ctxt
, tss
->ds
, VCPU_SREG_DS
, cpl
,
3096 X86_TRANSFER_TASK_SWITCH
, NULL
);
3097 if (ret
!= X86EMUL_CONTINUE
)
3099 ret
= __load_segment_descriptor(ctxt
, tss
->fs
, VCPU_SREG_FS
, cpl
,
3100 X86_TRANSFER_TASK_SWITCH
, NULL
);
3101 if (ret
!= X86EMUL_CONTINUE
)
3103 ret
= __load_segment_descriptor(ctxt
, tss
->gs
, VCPU_SREG_GS
, cpl
,
3104 X86_TRANSFER_TASK_SWITCH
, NULL
);
3109 static int task_switch_32(struct x86_emulate_ctxt
*ctxt
,
3110 u16 tss_selector
, u16 old_tss_sel
,
3111 ulong old_tss_base
, struct desc_struct
*new_desc
)
3113 const struct x86_emulate_ops
*ops
= ctxt
->ops
;
3114 struct tss_segment_32 tss_seg
;
3116 u32 new_tss_base
= get_desc_base(new_desc
);
3117 u32 eip_offset
= offsetof(struct tss_segment_32
, eip
);
3118 u32 ldt_sel_offset
= offsetof(struct tss_segment_32
, ldt_selector
);
3120 ret
= ops
->read_std(ctxt
, old_tss_base
, &tss_seg
, sizeof tss_seg
,
3122 if (ret
!= X86EMUL_CONTINUE
)
3125 save_state_to_tss32(ctxt
, &tss_seg
);
3127 /* Only GP registers and segment selectors are saved */
3128 ret
= ops
->write_std(ctxt
, old_tss_base
+ eip_offset
, &tss_seg
.eip
,
3129 ldt_sel_offset
- eip_offset
, &ctxt
->exception
);
3130 if (ret
!= X86EMUL_CONTINUE
)
3133 ret
= ops
->read_std(ctxt
, new_tss_base
, &tss_seg
, sizeof tss_seg
,
3135 if (ret
!= X86EMUL_CONTINUE
)
3138 if (old_tss_sel
!= 0xffff) {
3139 tss_seg
.prev_task_link
= old_tss_sel
;
3141 ret
= ops
->write_std(ctxt
, new_tss_base
,
3142 &tss_seg
.prev_task_link
,
3143 sizeof tss_seg
.prev_task_link
,
3145 if (ret
!= X86EMUL_CONTINUE
)
3149 return load_state_from_tss32(ctxt
, &tss_seg
);
3152 static int emulator_do_task_switch(struct x86_emulate_ctxt
*ctxt
,
3153 u16 tss_selector
, int idt_index
, int reason
,
3154 bool has_error_code
, u32 error_code
)
3156 const struct x86_emulate_ops
*ops
= ctxt
->ops
;
3157 struct desc_struct curr_tss_desc
, next_tss_desc
;
3159 u16 old_tss_sel
= get_segment_selector(ctxt
, VCPU_SREG_TR
);
3160 ulong old_tss_base
=
3161 ops
->get_cached_segment_base(ctxt
, VCPU_SREG_TR
);
3163 ulong desc_addr
, dr7
;
3165 /* FIXME: old_tss_base == ~0 ? */
3167 ret
= read_segment_descriptor(ctxt
, tss_selector
, &next_tss_desc
, &desc_addr
);
3168 if (ret
!= X86EMUL_CONTINUE
)
3170 ret
= read_segment_descriptor(ctxt
, old_tss_sel
, &curr_tss_desc
, &desc_addr
);
3171 if (ret
!= X86EMUL_CONTINUE
)
3174 /* FIXME: check that next_tss_desc is tss */
3177 * Check privileges. The three cases are task switch caused by...
3179 * 1. jmp/call/int to task gate: Check against DPL of the task gate
3180 * 2. Exception/IRQ/iret: No check is performed
3181 * 3. jmp/call to TSS/task-gate: No check is performed since the
3182 * hardware checks it before exiting.
3184 if (reason
== TASK_SWITCH_GATE
) {
3185 if (idt_index
!= -1) {
3186 /* Software interrupts */
3187 struct desc_struct task_gate_desc
;
3190 ret
= read_interrupt_descriptor(ctxt
, idt_index
,
3192 if (ret
!= X86EMUL_CONTINUE
)
3195 dpl
= task_gate_desc
.dpl
;
3196 if ((tss_selector
& 3) > dpl
|| ops
->cpl(ctxt
) > dpl
)
3197 return emulate_gp(ctxt
, (idt_index
<< 3) | 0x2);
3201 desc_limit
= desc_limit_scaled(&next_tss_desc
);
3202 if (!next_tss_desc
.p
||
3203 ((desc_limit
< 0x67 && (next_tss_desc
.type
& 8)) ||
3204 desc_limit
< 0x2b)) {
3205 return emulate_ts(ctxt
, tss_selector
& 0xfffc);
3208 if (reason
== TASK_SWITCH_IRET
|| reason
== TASK_SWITCH_JMP
) {
3209 curr_tss_desc
.type
&= ~(1 << 1); /* clear busy flag */
3210 write_segment_descriptor(ctxt
, old_tss_sel
, &curr_tss_desc
);
3213 if (reason
== TASK_SWITCH_IRET
)
3214 ctxt
->eflags
= ctxt
->eflags
& ~X86_EFLAGS_NT
;
3216 /* set back link to prev task only if NT bit is set in eflags
3217 note that old_tss_sel is not used after this point */
3218 if (reason
!= TASK_SWITCH_CALL
&& reason
!= TASK_SWITCH_GATE
)
3219 old_tss_sel
= 0xffff;
3221 if (next_tss_desc
.type
& 8)
3222 ret
= task_switch_32(ctxt
, tss_selector
, old_tss_sel
,
3223 old_tss_base
, &next_tss_desc
);
3225 ret
= task_switch_16(ctxt
, tss_selector
, old_tss_sel
,
3226 old_tss_base
, &next_tss_desc
);
3227 if (ret
!= X86EMUL_CONTINUE
)
3230 if (reason
== TASK_SWITCH_CALL
|| reason
== TASK_SWITCH_GATE
)
3231 ctxt
->eflags
= ctxt
->eflags
| X86_EFLAGS_NT
;
3233 if (reason
!= TASK_SWITCH_IRET
) {
3234 next_tss_desc
.type
|= (1 << 1); /* set busy flag */
3235 write_segment_descriptor(ctxt
, tss_selector
, &next_tss_desc
);
3238 ops
->set_cr(ctxt
, 0, ops
->get_cr(ctxt
, 0) | X86_CR0_TS
);
3239 ops
->set_segment(ctxt
, tss_selector
, &next_tss_desc
, 0, VCPU_SREG_TR
);
3241 if (has_error_code
) {
3242 ctxt
->op_bytes
= ctxt
->ad_bytes
= (next_tss_desc
.type
& 8) ? 4 : 2;
3243 ctxt
->lock_prefix
= 0;
3244 ctxt
->src
.val
= (unsigned long) error_code
;
3245 ret
= em_push(ctxt
);
3248 ops
->get_dr(ctxt
, 7, &dr7
);
3249 ops
->set_dr(ctxt
, 7, dr7
& ~(DR_LOCAL_ENABLE_MASK
| DR_LOCAL_SLOWDOWN
));
3254 int emulator_task_switch(struct x86_emulate_ctxt
*ctxt
,
3255 u16 tss_selector
, int idt_index
, int reason
,
3256 bool has_error_code
, u32 error_code
)
3260 invalidate_registers(ctxt
);
3261 ctxt
->_eip
= ctxt
->eip
;
3262 ctxt
->dst
.type
= OP_NONE
;
3264 rc
= emulator_do_task_switch(ctxt
, tss_selector
, idt_index
, reason
,
3265 has_error_code
, error_code
);
3267 if (rc
== X86EMUL_CONTINUE
) {
3268 ctxt
->eip
= ctxt
->_eip
;
3269 writeback_registers(ctxt
);
3272 return (rc
== X86EMUL_UNHANDLEABLE
) ? EMULATION_FAILED
: EMULATION_OK
;
3275 static void string_addr_inc(struct x86_emulate_ctxt
*ctxt
, int reg
,
3278 int df
= (ctxt
->eflags
& X86_EFLAGS_DF
) ? -op
->count
: op
->count
;
3280 register_address_increment(ctxt
, reg
, df
* op
->bytes
);
3281 op
->addr
.mem
.ea
= register_address(ctxt
, reg
);
3284 static int em_das(struct x86_emulate_ctxt
*ctxt
)
3287 bool af
, cf
, old_cf
;
3289 cf
= ctxt
->eflags
& X86_EFLAGS_CF
;
3295 af
= ctxt
->eflags
& X86_EFLAGS_AF
;
3296 if ((al
& 0x0f) > 9 || af
) {
3298 cf
= old_cf
| (al
>= 250);
3303 if (old_al
> 0x99 || old_cf
) {
3309 /* Set PF, ZF, SF */
3310 ctxt
->src
.type
= OP_IMM
;
3312 ctxt
->src
.bytes
= 1;
3313 fastop(ctxt
, em_or
);
3314 ctxt
->eflags
&= ~(X86_EFLAGS_AF
| X86_EFLAGS_CF
);
3316 ctxt
->eflags
|= X86_EFLAGS_CF
;
3318 ctxt
->eflags
|= X86_EFLAGS_AF
;
3319 return X86EMUL_CONTINUE
;
3322 static int em_aam(struct x86_emulate_ctxt
*ctxt
)
3326 if (ctxt
->src
.val
== 0)
3327 return emulate_de(ctxt
);
3329 al
= ctxt
->dst
.val
& 0xff;
3330 ah
= al
/ ctxt
->src
.val
;
3331 al
%= ctxt
->src
.val
;
3333 ctxt
->dst
.val
= (ctxt
->dst
.val
& 0xffff0000) | al
| (ah
<< 8);
3335 /* Set PF, ZF, SF */
3336 ctxt
->src
.type
= OP_IMM
;
3338 ctxt
->src
.bytes
= 1;
3339 fastop(ctxt
, em_or
);
3341 return X86EMUL_CONTINUE
;
3344 static int em_aad(struct x86_emulate_ctxt
*ctxt
)
3346 u8 al
= ctxt
->dst
.val
& 0xff;
3347 u8 ah
= (ctxt
->dst
.val
>> 8) & 0xff;
3349 al
= (al
+ (ah
* ctxt
->src
.val
)) & 0xff;
3351 ctxt
->dst
.val
= (ctxt
->dst
.val
& 0xffff0000) | al
;
3353 /* Set PF, ZF, SF */
3354 ctxt
->src
.type
= OP_IMM
;
3356 ctxt
->src
.bytes
= 1;
3357 fastop(ctxt
, em_or
);
3359 return X86EMUL_CONTINUE
;
3362 static int em_call(struct x86_emulate_ctxt
*ctxt
)
3365 long rel
= ctxt
->src
.val
;
3367 ctxt
->src
.val
= (unsigned long)ctxt
->_eip
;
3368 rc
= jmp_rel(ctxt
, rel
);
3369 if (rc
!= X86EMUL_CONTINUE
)
3371 return em_push(ctxt
);
3374 static int em_call_far(struct x86_emulate_ctxt
*ctxt
)
3379 struct desc_struct old_desc
, new_desc
;
3380 const struct x86_emulate_ops
*ops
= ctxt
->ops
;
3381 int cpl
= ctxt
->ops
->cpl(ctxt
);
3382 enum x86emul_mode prev_mode
= ctxt
->mode
;
3384 old_eip
= ctxt
->_eip
;
3385 ops
->get_segment(ctxt
, &old_cs
, &old_desc
, NULL
, VCPU_SREG_CS
);
3387 memcpy(&sel
, ctxt
->src
.valptr
+ ctxt
->op_bytes
, 2);
3388 rc
= __load_segment_descriptor(ctxt
, sel
, VCPU_SREG_CS
, cpl
,
3389 X86_TRANSFER_CALL_JMP
, &new_desc
);
3390 if (rc
!= X86EMUL_CONTINUE
)
3393 rc
= assign_eip_far(ctxt
, ctxt
->src
.val
, &new_desc
);
3394 if (rc
!= X86EMUL_CONTINUE
)
3397 ctxt
->src
.val
= old_cs
;
3399 if (rc
!= X86EMUL_CONTINUE
)
3402 ctxt
->src
.val
= old_eip
;
3404 /* If we failed, we tainted the memory, but the very least we should
3406 if (rc
!= X86EMUL_CONTINUE
) {
3407 pr_warn_once("faulting far call emulation tainted memory\n");
3412 ops
->set_segment(ctxt
, old_cs
, &old_desc
, 0, VCPU_SREG_CS
);
3413 ctxt
->mode
= prev_mode
;
3418 static int em_ret_near_imm(struct x86_emulate_ctxt
*ctxt
)
3423 rc
= emulate_pop(ctxt
, &eip
, ctxt
->op_bytes
);
3424 if (rc
!= X86EMUL_CONTINUE
)
3426 rc
= assign_eip_near(ctxt
, eip
);
3427 if (rc
!= X86EMUL_CONTINUE
)
3429 rsp_increment(ctxt
, ctxt
->src
.val
);
3430 return X86EMUL_CONTINUE
;
3433 static int em_xchg(struct x86_emulate_ctxt
*ctxt
)
3435 /* Write back the register source. */
3436 ctxt
->src
.val
= ctxt
->dst
.val
;
3437 write_register_operand(&ctxt
->src
);
3439 /* Write back the memory destination with implicit LOCK prefix. */
3440 ctxt
->dst
.val
= ctxt
->src
.orig_val
;
3441 ctxt
->lock_prefix
= 1;
3442 return X86EMUL_CONTINUE
;
3445 static int em_imul_3op(struct x86_emulate_ctxt
*ctxt
)
3447 ctxt
->dst
.val
= ctxt
->src2
.val
;
3448 return fastop(ctxt
, em_imul
);
3451 static int em_cwd(struct x86_emulate_ctxt
*ctxt
)
3453 ctxt
->dst
.type
= OP_REG
;
3454 ctxt
->dst
.bytes
= ctxt
->src
.bytes
;
3455 ctxt
->dst
.addr
.reg
= reg_rmw(ctxt
, VCPU_REGS_RDX
);
3456 ctxt
->dst
.val
= ~((ctxt
->src
.val
>> (ctxt
->src
.bytes
* 8 - 1)) - 1);
3458 return X86EMUL_CONTINUE
;
3461 static int em_rdtsc(struct x86_emulate_ctxt
*ctxt
)
3465 ctxt
->ops
->get_msr(ctxt
, MSR_IA32_TSC
, &tsc
);
3466 *reg_write(ctxt
, VCPU_REGS_RAX
) = (u32
)tsc
;
3467 *reg_write(ctxt
, VCPU_REGS_RDX
) = tsc
>> 32;
3468 return X86EMUL_CONTINUE
;
3471 static int em_rdpmc(struct x86_emulate_ctxt
*ctxt
)
3475 if (ctxt
->ops
->read_pmc(ctxt
, reg_read(ctxt
, VCPU_REGS_RCX
), &pmc
))
3476 return emulate_gp(ctxt
, 0);
3477 *reg_write(ctxt
, VCPU_REGS_RAX
) = (u32
)pmc
;
3478 *reg_write(ctxt
, VCPU_REGS_RDX
) = pmc
>> 32;
3479 return X86EMUL_CONTINUE
;
3482 static int em_mov(struct x86_emulate_ctxt
*ctxt
)
3484 memcpy(ctxt
->dst
.valptr
, ctxt
->src
.valptr
, sizeof(ctxt
->src
.valptr
));
3485 return X86EMUL_CONTINUE
;
3488 #define FFL(x) bit(X86_FEATURE_##x)
3490 static int em_movbe(struct x86_emulate_ctxt
*ctxt
)
3492 u32 ebx
, ecx
, edx
, eax
= 1;
3496 * Check MOVBE is set in the guest-visible CPUID leaf.
3498 ctxt
->ops
->get_cpuid(ctxt
, &eax
, &ebx
, &ecx
, &edx
);
3499 if (!(ecx
& FFL(MOVBE
)))
3500 return emulate_ud(ctxt
);
3502 switch (ctxt
->op_bytes
) {
3505 * From MOVBE definition: "...When the operand size is 16 bits,
3506 * the upper word of the destination register remains unchanged
3509 * Both casting ->valptr and ->val to u16 breaks strict aliasing
3510 * rules so we have to do the operation almost per hand.
3512 tmp
= (u16
)ctxt
->src
.val
;
3513 ctxt
->dst
.val
&= ~0xffffUL
;
3514 ctxt
->dst
.val
|= (unsigned long)swab16(tmp
);
3517 ctxt
->dst
.val
= swab32((u32
)ctxt
->src
.val
);
3520 ctxt
->dst
.val
= swab64(ctxt
->src
.val
);
3525 return X86EMUL_CONTINUE
;
3528 static int em_cr_write(struct x86_emulate_ctxt
*ctxt
)
3530 if (ctxt
->ops
->set_cr(ctxt
, ctxt
->modrm_reg
, ctxt
->src
.val
))
3531 return emulate_gp(ctxt
, 0);
3533 /* Disable writeback. */
3534 ctxt
->dst
.type
= OP_NONE
;
3535 return X86EMUL_CONTINUE
;
3538 static int em_dr_write(struct x86_emulate_ctxt
*ctxt
)
3542 if (ctxt
->mode
== X86EMUL_MODE_PROT64
)
3543 val
= ctxt
->src
.val
& ~0ULL;
3545 val
= ctxt
->src
.val
& ~0U;
3547 /* #UD condition is already handled. */
3548 if (ctxt
->ops
->set_dr(ctxt
, ctxt
->modrm_reg
, val
) < 0)
3549 return emulate_gp(ctxt
, 0);
3551 /* Disable writeback. */
3552 ctxt
->dst
.type
= OP_NONE
;
3553 return X86EMUL_CONTINUE
;
3556 static int em_wrmsr(struct x86_emulate_ctxt
*ctxt
)
3560 msr_data
= (u32
)reg_read(ctxt
, VCPU_REGS_RAX
)
3561 | ((u64
)reg_read(ctxt
, VCPU_REGS_RDX
) << 32);
3562 if (ctxt
->ops
->set_msr(ctxt
, reg_read(ctxt
, VCPU_REGS_RCX
), msr_data
))
3563 return emulate_gp(ctxt
, 0);
3565 return X86EMUL_CONTINUE
;
3568 static int em_rdmsr(struct x86_emulate_ctxt
*ctxt
)
3572 if (ctxt
->ops
->get_msr(ctxt
, reg_read(ctxt
, VCPU_REGS_RCX
), &msr_data
))
3573 return emulate_gp(ctxt
, 0);
3575 *reg_write(ctxt
, VCPU_REGS_RAX
) = (u32
)msr_data
;
3576 *reg_write(ctxt
, VCPU_REGS_RDX
) = msr_data
>> 32;
3577 return X86EMUL_CONTINUE
;
3580 static int em_mov_rm_sreg(struct x86_emulate_ctxt
*ctxt
)
3582 if (ctxt
->modrm_reg
> VCPU_SREG_GS
)
3583 return emulate_ud(ctxt
);
3585 ctxt
->dst
.val
= get_segment_selector(ctxt
, ctxt
->modrm_reg
);
3586 if (ctxt
->dst
.bytes
== 4 && ctxt
->dst
.type
== OP_MEM
)
3587 ctxt
->dst
.bytes
= 2;
3588 return X86EMUL_CONTINUE
;
3591 static int em_mov_sreg_rm(struct x86_emulate_ctxt
*ctxt
)
3593 u16 sel
= ctxt
->src
.val
;
3595 if (ctxt
->modrm_reg
== VCPU_SREG_CS
|| ctxt
->modrm_reg
> VCPU_SREG_GS
)
3596 return emulate_ud(ctxt
);
3598 if (ctxt
->modrm_reg
== VCPU_SREG_SS
)
3599 ctxt
->interruptibility
= KVM_X86_SHADOW_INT_MOV_SS
;
3601 /* Disable writeback. */
3602 ctxt
->dst
.type
= OP_NONE
;
3603 return load_segment_descriptor(ctxt
, sel
, ctxt
->modrm_reg
);
3606 static int em_lldt(struct x86_emulate_ctxt
*ctxt
)
3608 u16 sel
= ctxt
->src
.val
;
3610 /* Disable writeback. */
3611 ctxt
->dst
.type
= OP_NONE
;
3612 return load_segment_descriptor(ctxt
, sel
, VCPU_SREG_LDTR
);
3615 static int em_ltr(struct x86_emulate_ctxt
*ctxt
)
3617 u16 sel
= ctxt
->src
.val
;
3619 /* Disable writeback. */
3620 ctxt
->dst
.type
= OP_NONE
;
3621 return load_segment_descriptor(ctxt
, sel
, VCPU_SREG_TR
);
3624 static int em_invlpg(struct x86_emulate_ctxt
*ctxt
)
3629 rc
= linearize(ctxt
, ctxt
->src
.addr
.mem
, 1, false, &linear
);
3630 if (rc
== X86EMUL_CONTINUE
)
3631 ctxt
->ops
->invlpg(ctxt
, linear
);
3632 /* Disable writeback. */
3633 ctxt
->dst
.type
= OP_NONE
;
3634 return X86EMUL_CONTINUE
;
3637 static int em_clts(struct x86_emulate_ctxt
*ctxt
)
3641 cr0
= ctxt
->ops
->get_cr(ctxt
, 0);
3643 ctxt
->ops
->set_cr(ctxt
, 0, cr0
);
3644 return X86EMUL_CONTINUE
;
3647 static int em_hypercall(struct x86_emulate_ctxt
*ctxt
)
3649 int rc
= ctxt
->ops
->fix_hypercall(ctxt
);
3651 if (rc
!= X86EMUL_CONTINUE
)
3654 /* Let the processor re-execute the fixed hypercall */
3655 ctxt
->_eip
= ctxt
->eip
;
3656 /* Disable writeback. */
3657 ctxt
->dst
.type
= OP_NONE
;
3658 return X86EMUL_CONTINUE
;
3661 static int emulate_store_desc_ptr(struct x86_emulate_ctxt
*ctxt
,
3662 void (*get
)(struct x86_emulate_ctxt
*ctxt
,
3663 struct desc_ptr
*ptr
))
3665 struct desc_ptr desc_ptr
;
3667 if (ctxt
->mode
== X86EMUL_MODE_PROT64
)
3669 get(ctxt
, &desc_ptr
);
3670 if (ctxt
->op_bytes
== 2) {
3672 desc_ptr
.address
&= 0x00ffffff;
3674 /* Disable writeback. */
3675 ctxt
->dst
.type
= OP_NONE
;
3676 return segmented_write(ctxt
, ctxt
->dst
.addr
.mem
,
3677 &desc_ptr
, 2 + ctxt
->op_bytes
);
3680 static int em_sgdt(struct x86_emulate_ctxt
*ctxt
)
3682 return emulate_store_desc_ptr(ctxt
, ctxt
->ops
->get_gdt
);
3685 static int em_sidt(struct x86_emulate_ctxt
*ctxt
)
3687 return emulate_store_desc_ptr(ctxt
, ctxt
->ops
->get_idt
);
3690 static int em_lgdt_lidt(struct x86_emulate_ctxt
*ctxt
, bool lgdt
)
3692 struct desc_ptr desc_ptr
;
3695 if (ctxt
->mode
== X86EMUL_MODE_PROT64
)
3697 rc
= read_descriptor(ctxt
, ctxt
->src
.addr
.mem
,
3698 &desc_ptr
.size
, &desc_ptr
.address
,
3700 if (rc
!= X86EMUL_CONTINUE
)
3702 if (ctxt
->mode
== X86EMUL_MODE_PROT64
&&
3703 is_noncanonical_address(desc_ptr
.address
))
3704 return emulate_gp(ctxt
, 0);
3706 ctxt
->ops
->set_gdt(ctxt
, &desc_ptr
);
3708 ctxt
->ops
->set_idt(ctxt
, &desc_ptr
);
3709 /* Disable writeback. */
3710 ctxt
->dst
.type
= OP_NONE
;
3711 return X86EMUL_CONTINUE
;
3714 static int em_lgdt(struct x86_emulate_ctxt
*ctxt
)
3716 return em_lgdt_lidt(ctxt
, true);
3719 static int em_lidt(struct x86_emulate_ctxt
*ctxt
)
3721 return em_lgdt_lidt(ctxt
, false);
3724 static int em_smsw(struct x86_emulate_ctxt
*ctxt
)
3726 if (ctxt
->dst
.type
== OP_MEM
)
3727 ctxt
->dst
.bytes
= 2;
3728 ctxt
->dst
.val
= ctxt
->ops
->get_cr(ctxt
, 0);
3729 return X86EMUL_CONTINUE
;
3732 static int em_lmsw(struct x86_emulate_ctxt
*ctxt
)
3734 ctxt
->ops
->set_cr(ctxt
, 0, (ctxt
->ops
->get_cr(ctxt
, 0) & ~0x0eul
)
3735 | (ctxt
->src
.val
& 0x0f));
3736 ctxt
->dst
.type
= OP_NONE
;
3737 return X86EMUL_CONTINUE
;
3740 static int em_loop(struct x86_emulate_ctxt
*ctxt
)
3742 int rc
= X86EMUL_CONTINUE
;
3744 register_address_increment(ctxt
, VCPU_REGS_RCX
, -1);
3745 if ((address_mask(ctxt
, reg_read(ctxt
, VCPU_REGS_RCX
)) != 0) &&
3746 (ctxt
->b
== 0xe2 || test_cc(ctxt
->b
^ 0x5, ctxt
->eflags
)))
3747 rc
= jmp_rel(ctxt
, ctxt
->src
.val
);
3752 static int em_jcxz(struct x86_emulate_ctxt
*ctxt
)
3754 int rc
= X86EMUL_CONTINUE
;
3756 if (address_mask(ctxt
, reg_read(ctxt
, VCPU_REGS_RCX
)) == 0)
3757 rc
= jmp_rel(ctxt
, ctxt
->src
.val
);
3762 static int em_in(struct x86_emulate_ctxt
*ctxt
)
3764 if (!pio_in_emulated(ctxt
, ctxt
->dst
.bytes
, ctxt
->src
.val
,
3766 return X86EMUL_IO_NEEDED
;
3768 return X86EMUL_CONTINUE
;
3771 static int em_out(struct x86_emulate_ctxt
*ctxt
)
3773 ctxt
->ops
->pio_out_emulated(ctxt
, ctxt
->src
.bytes
, ctxt
->dst
.val
,
3775 /* Disable writeback. */
3776 ctxt
->dst
.type
= OP_NONE
;
3777 return X86EMUL_CONTINUE
;
3780 static int em_cli(struct x86_emulate_ctxt
*ctxt
)
3782 if (emulator_bad_iopl(ctxt
))
3783 return emulate_gp(ctxt
, 0);
3785 ctxt
->eflags
&= ~X86_EFLAGS_IF
;
3786 return X86EMUL_CONTINUE
;
3789 static int em_sti(struct x86_emulate_ctxt
*ctxt
)
3791 if (emulator_bad_iopl(ctxt
))
3792 return emulate_gp(ctxt
, 0);
3794 ctxt
->interruptibility
= KVM_X86_SHADOW_INT_STI
;
3795 ctxt
->eflags
|= X86_EFLAGS_IF
;
3796 return X86EMUL_CONTINUE
;
3799 static int em_cpuid(struct x86_emulate_ctxt
*ctxt
)
3801 u32 eax
, ebx
, ecx
, edx
;
3803 eax
= reg_read(ctxt
, VCPU_REGS_RAX
);
3804 ecx
= reg_read(ctxt
, VCPU_REGS_RCX
);
3805 ctxt
->ops
->get_cpuid(ctxt
, &eax
, &ebx
, &ecx
, &edx
);
3806 *reg_write(ctxt
, VCPU_REGS_RAX
) = eax
;
3807 *reg_write(ctxt
, VCPU_REGS_RBX
) = ebx
;
3808 *reg_write(ctxt
, VCPU_REGS_RCX
) = ecx
;
3809 *reg_write(ctxt
, VCPU_REGS_RDX
) = edx
;
3810 return X86EMUL_CONTINUE
;
3813 static int em_sahf(struct x86_emulate_ctxt
*ctxt
)
3817 flags
= X86_EFLAGS_CF
| X86_EFLAGS_PF
| X86_EFLAGS_AF
| X86_EFLAGS_ZF
|
3819 flags
&= *reg_rmw(ctxt
, VCPU_REGS_RAX
) >> 8;
3821 ctxt
->eflags
&= ~0xffUL
;
3822 ctxt
->eflags
|= flags
| X86_EFLAGS_FIXED
;
3823 return X86EMUL_CONTINUE
;
3826 static int em_lahf(struct x86_emulate_ctxt
*ctxt
)
3828 *reg_rmw(ctxt
, VCPU_REGS_RAX
) &= ~0xff00UL
;
3829 *reg_rmw(ctxt
, VCPU_REGS_RAX
) |= (ctxt
->eflags
& 0xff) << 8;
3830 return X86EMUL_CONTINUE
;
3833 static int em_bswap(struct x86_emulate_ctxt
*ctxt
)
3835 switch (ctxt
->op_bytes
) {
3836 #ifdef CONFIG_X86_64
3838 asm("bswap %0" : "+r"(ctxt
->dst
.val
));
3842 asm("bswap %0" : "+r"(*(u32
*)&ctxt
->dst
.val
));
3845 return X86EMUL_CONTINUE
;
3848 static int em_clflush(struct x86_emulate_ctxt
*ctxt
)
3850 /* emulating clflush regardless of cpuid */
3851 return X86EMUL_CONTINUE
;
3854 static int em_movsxd(struct x86_emulate_ctxt
*ctxt
)
3856 ctxt
->dst
.val
= (s32
) ctxt
->src
.val
;
3857 return X86EMUL_CONTINUE
;
3860 static bool valid_cr(int nr
)
3872 static int check_cr_read(struct x86_emulate_ctxt
*ctxt
)
3874 if (!valid_cr(ctxt
->modrm_reg
))
3875 return emulate_ud(ctxt
);
3877 return X86EMUL_CONTINUE
;
3880 static int check_cr_write(struct x86_emulate_ctxt
*ctxt
)
3882 u64 new_val
= ctxt
->src
.val64
;
3883 int cr
= ctxt
->modrm_reg
;
3886 static u64 cr_reserved_bits
[] = {
3887 0xffffffff00000000ULL
,
3888 0, 0, 0, /* CR3 checked later */
3895 return emulate_ud(ctxt
);
3897 if (new_val
& cr_reserved_bits
[cr
])
3898 return emulate_gp(ctxt
, 0);
3903 if (((new_val
& X86_CR0_PG
) && !(new_val
& X86_CR0_PE
)) ||
3904 ((new_val
& X86_CR0_NW
) && !(new_val
& X86_CR0_CD
)))
3905 return emulate_gp(ctxt
, 0);
3907 cr4
= ctxt
->ops
->get_cr(ctxt
, 4);
3908 ctxt
->ops
->get_msr(ctxt
, MSR_EFER
, &efer
);
3910 if ((new_val
& X86_CR0_PG
) && (efer
& EFER_LME
) &&
3911 !(cr4
& X86_CR4_PAE
))
3912 return emulate_gp(ctxt
, 0);
3919 ctxt
->ops
->get_msr(ctxt
, MSR_EFER
, &efer
);
3920 if (efer
& EFER_LMA
)
3921 rsvd
= CR3_L_MODE_RESERVED_BITS
& ~CR3_PCID_INVD
;
3924 return emulate_gp(ctxt
, 0);
3929 ctxt
->ops
->get_msr(ctxt
, MSR_EFER
, &efer
);
3931 if ((efer
& EFER_LMA
) && !(new_val
& X86_CR4_PAE
))
3932 return emulate_gp(ctxt
, 0);
3938 return X86EMUL_CONTINUE
;
3941 static int check_dr7_gd(struct x86_emulate_ctxt
*ctxt
)
3945 ctxt
->ops
->get_dr(ctxt
, 7, &dr7
);
3947 /* Check if DR7.Global_Enable is set */
3948 return dr7
& (1 << 13);
3951 static int check_dr_read(struct x86_emulate_ctxt
*ctxt
)
3953 int dr
= ctxt
->modrm_reg
;
3957 return emulate_ud(ctxt
);
3959 cr4
= ctxt
->ops
->get_cr(ctxt
, 4);
3960 if ((cr4
& X86_CR4_DE
) && (dr
== 4 || dr
== 5))
3961 return emulate_ud(ctxt
);
3963 if (check_dr7_gd(ctxt
)) {
3966 ctxt
->ops
->get_dr(ctxt
, 6, &dr6
);
3968 dr6
|= DR6_BD
| DR6_RTM
;
3969 ctxt
->ops
->set_dr(ctxt
, 6, dr6
);
3970 return emulate_db(ctxt
);
3973 return X86EMUL_CONTINUE
;
3976 static int check_dr_write(struct x86_emulate_ctxt
*ctxt
)
3978 u64 new_val
= ctxt
->src
.val64
;
3979 int dr
= ctxt
->modrm_reg
;
3981 if ((dr
== 6 || dr
== 7) && (new_val
& 0xffffffff00000000ULL
))
3982 return emulate_gp(ctxt
, 0);
3984 return check_dr_read(ctxt
);
3987 static int check_svme(struct x86_emulate_ctxt
*ctxt
)
3991 ctxt
->ops
->get_msr(ctxt
, MSR_EFER
, &efer
);
3993 if (!(efer
& EFER_SVME
))
3994 return emulate_ud(ctxt
);
3996 return X86EMUL_CONTINUE
;
3999 static int check_svme_pa(struct x86_emulate_ctxt
*ctxt
)
4001 u64 rax
= reg_read(ctxt
, VCPU_REGS_RAX
);
4003 /* Valid physical address? */
4004 if (rax
& 0xffff000000000000ULL
)
4005 return emulate_gp(ctxt
, 0);
4007 return check_svme(ctxt
);
4010 static int check_rdtsc(struct x86_emulate_ctxt
*ctxt
)
4012 u64 cr4
= ctxt
->ops
->get_cr(ctxt
, 4);
4014 if (cr4
& X86_CR4_TSD
&& ctxt
->ops
->cpl(ctxt
))
4015 return emulate_ud(ctxt
);
4017 return X86EMUL_CONTINUE
;
4020 static int check_rdpmc(struct x86_emulate_ctxt
*ctxt
)
4022 u64 cr4
= ctxt
->ops
->get_cr(ctxt
, 4);
4023 u64 rcx
= reg_read(ctxt
, VCPU_REGS_RCX
);
4025 if ((!(cr4
& X86_CR4_PCE
) && ctxt
->ops
->cpl(ctxt
)) ||
4026 ctxt
->ops
->check_pmc(ctxt
, rcx
))
4027 return emulate_gp(ctxt
, 0);
4029 return X86EMUL_CONTINUE
;
4032 static int check_perm_in(struct x86_emulate_ctxt
*ctxt
)
4034 ctxt
->dst
.bytes
= min(ctxt
->dst
.bytes
, 4u);
4035 if (!emulator_io_permited(ctxt
, ctxt
->src
.val
, ctxt
->dst
.bytes
))
4036 return emulate_gp(ctxt
, 0);
4038 return X86EMUL_CONTINUE
;
4041 static int check_perm_out(struct x86_emulate_ctxt
*ctxt
)
4043 ctxt
->src
.bytes
= min(ctxt
->src
.bytes
, 4u);
4044 if (!emulator_io_permited(ctxt
, ctxt
->dst
.val
, ctxt
->src
.bytes
))
4045 return emulate_gp(ctxt
, 0);
4047 return X86EMUL_CONTINUE
;
4050 #define D(_y) { .flags = (_y) }
4051 #define DI(_y, _i) { .flags = (_y)|Intercept, .intercept = x86_intercept_##_i }
4052 #define DIP(_y, _i, _p) { .flags = (_y)|Intercept|CheckPerm, \
4053 .intercept = x86_intercept_##_i, .check_perm = (_p) }
4054 #define N D(NotImpl)
4055 #define EXT(_f, _e) { .flags = ((_f) | RMExt), .u.group = (_e) }
4056 #define G(_f, _g) { .flags = ((_f) | Group | ModRM), .u.group = (_g) }
4057 #define GD(_f, _g) { .flags = ((_f) | GroupDual | ModRM), .u.gdual = (_g) }
4058 #define ID(_f, _i) { .flags = ((_f) | InstrDual | ModRM), .u.idual = (_i) }
4059 #define MD(_f, _m) { .flags = ((_f) | ModeDual), .u.mdual = (_m) }
4060 #define E(_f, _e) { .flags = ((_f) | Escape | ModRM), .u.esc = (_e) }
4061 #define I(_f, _e) { .flags = (_f), .u.execute = (_e) }
4062 #define F(_f, _e) { .flags = (_f) | Fastop, .u.fastop = (_e) }
4063 #define II(_f, _e, _i) \
4064 { .flags = (_f)|Intercept, .u.execute = (_e), .intercept = x86_intercept_##_i }
4065 #define IIP(_f, _e, _i, _p) \
4066 { .flags = (_f)|Intercept|CheckPerm, .u.execute = (_e), \
4067 .intercept = x86_intercept_##_i, .check_perm = (_p) }
4068 #define GP(_f, _g) { .flags = ((_f) | Prefix), .u.gprefix = (_g) }
4070 #define D2bv(_f) D((_f) | ByteOp), D(_f)
4071 #define D2bvIP(_f, _i, _p) DIP((_f) | ByteOp, _i, _p), DIP(_f, _i, _p)
4072 #define I2bv(_f, _e) I((_f) | ByteOp, _e), I(_f, _e)
4073 #define F2bv(_f, _e) F((_f) | ByteOp, _e), F(_f, _e)
4074 #define I2bvIP(_f, _e, _i, _p) \
4075 IIP((_f) | ByteOp, _e, _i, _p), IIP(_f, _e, _i, _p)
4077 #define F6ALU(_f, _e) F2bv((_f) | DstMem | SrcReg | ModRM, _e), \
4078 F2bv(((_f) | DstReg | SrcMem | ModRM) & ~Lock, _e), \
4079 F2bv(((_f) & ~Lock) | DstAcc | SrcImm, _e)
4081 static const struct opcode group7_rm0
[] = {
4083 I(SrcNone
| Priv
| EmulateOnUD
, em_hypercall
),
4087 static const struct opcode group7_rm1
[] = {
4088 DI(SrcNone
| Priv
, monitor
),
4089 DI(SrcNone
| Priv
, mwait
),
4093 static const struct opcode group7_rm3
[] = {
4094 DIP(SrcNone
| Prot
| Priv
, vmrun
, check_svme_pa
),
4095 II(SrcNone
| Prot
| EmulateOnUD
, em_hypercall
, vmmcall
),
4096 DIP(SrcNone
| Prot
| Priv
, vmload
, check_svme_pa
),
4097 DIP(SrcNone
| Prot
| Priv
, vmsave
, check_svme_pa
),
4098 DIP(SrcNone
| Prot
| Priv
, stgi
, check_svme
),
4099 DIP(SrcNone
| Prot
| Priv
, clgi
, check_svme
),
4100 DIP(SrcNone
| Prot
| Priv
, skinit
, check_svme
),
4101 DIP(SrcNone
| Prot
| Priv
, invlpga
, check_svme
),
4104 static const struct opcode group7_rm7
[] = {
4106 DIP(SrcNone
, rdtscp
, check_rdtsc
),
4110 static const struct opcode group1
[] = {
4112 F(Lock
| PageTable
, em_or
),
4115 F(Lock
| PageTable
, em_and
),
4121 static const struct opcode group1A
[] = {
4122 I(DstMem
| SrcNone
| Mov
| Stack
| IncSP
, em_pop
), N
, N
, N
, N
, N
, N
, N
,
4125 static const struct opcode group2
[] = {
4126 F(DstMem
| ModRM
, em_rol
),
4127 F(DstMem
| ModRM
, em_ror
),
4128 F(DstMem
| ModRM
, em_rcl
),
4129 F(DstMem
| ModRM
, em_rcr
),
4130 F(DstMem
| ModRM
, em_shl
),
4131 F(DstMem
| ModRM
, em_shr
),
4132 F(DstMem
| ModRM
, em_shl
),
4133 F(DstMem
| ModRM
, em_sar
),
4136 static const struct opcode group3
[] = {
4137 F(DstMem
| SrcImm
| NoWrite
, em_test
),
4138 F(DstMem
| SrcImm
| NoWrite
, em_test
),
4139 F(DstMem
| SrcNone
| Lock
, em_not
),
4140 F(DstMem
| SrcNone
| Lock
, em_neg
),
4141 F(DstXacc
| Src2Mem
, em_mul_ex
),
4142 F(DstXacc
| Src2Mem
, em_imul_ex
),
4143 F(DstXacc
| Src2Mem
, em_div_ex
),
4144 F(DstXacc
| Src2Mem
, em_idiv_ex
),
4147 static const struct opcode group4
[] = {
4148 F(ByteOp
| DstMem
| SrcNone
| Lock
, em_inc
),
4149 F(ByteOp
| DstMem
| SrcNone
| Lock
, em_dec
),
4153 static const struct opcode group5
[] = {
4154 F(DstMem
| SrcNone
| Lock
, em_inc
),
4155 F(DstMem
| SrcNone
| Lock
, em_dec
),
4156 I(SrcMem
| NearBranch
, em_call_near_abs
),
4157 I(SrcMemFAddr
| ImplicitOps
, em_call_far
),
4158 I(SrcMem
| NearBranch
, em_jmp_abs
),
4159 I(SrcMemFAddr
| ImplicitOps
, em_jmp_far
),
4160 I(SrcMem
| Stack
, em_push
), D(Undefined
),
4163 static const struct opcode group6
[] = {
4164 DI(Prot
| DstMem
, sldt
),
4165 DI(Prot
| DstMem
, str
),
4166 II(Prot
| Priv
| SrcMem16
, em_lldt
, lldt
),
4167 II(Prot
| Priv
| SrcMem16
, em_ltr
, ltr
),
4171 static const struct group_dual group7
= { {
4172 II(Mov
| DstMem
, em_sgdt
, sgdt
),
4173 II(Mov
| DstMem
, em_sidt
, sidt
),
4174 II(SrcMem
| Priv
, em_lgdt
, lgdt
),
4175 II(SrcMem
| Priv
, em_lidt
, lidt
),
4176 II(SrcNone
| DstMem
| Mov
, em_smsw
, smsw
), N
,
4177 II(SrcMem16
| Mov
| Priv
, em_lmsw
, lmsw
),
4178 II(SrcMem
| ByteOp
| Priv
| NoAccess
, em_invlpg
, invlpg
),
4182 N
, EXT(0, group7_rm3
),
4183 II(SrcNone
| DstMem
| Mov
, em_smsw
, smsw
), N
,
4184 II(SrcMem16
| Mov
| Priv
, em_lmsw
, lmsw
),
4188 static const struct opcode group8
[] = {
4190 F(DstMem
| SrcImmByte
| NoWrite
, em_bt
),
4191 F(DstMem
| SrcImmByte
| Lock
| PageTable
, em_bts
),
4192 F(DstMem
| SrcImmByte
| Lock
, em_btr
),
4193 F(DstMem
| SrcImmByte
| Lock
| PageTable
, em_btc
),
4196 static const struct group_dual group9
= { {
4197 N
, I(DstMem64
| Lock
| PageTable
, em_cmpxchg8b
), N
, N
, N
, N
, N
, N
,
4199 N
, N
, N
, N
, N
, N
, N
, N
,
4202 static const struct opcode group11
[] = {
4203 I(DstMem
| SrcImm
| Mov
| PageTable
, em_mov
),
4207 static const struct gprefix pfx_0f_ae_7
= {
4208 I(SrcMem
| ByteOp
, em_clflush
), N
, N
, N
,
4211 static const struct group_dual group15
= { {
4212 N
, N
, N
, N
, N
, N
, N
, GP(0, &pfx_0f_ae_7
),
4214 N
, N
, N
, N
, N
, N
, N
, N
,
4217 static const struct gprefix pfx_0f_6f_0f_7f
= {
4218 I(Mmx
, em_mov
), I(Sse
| Aligned
, em_mov
), N
, I(Sse
| Unaligned
, em_mov
),
4221 static const struct instr_dual instr_dual_0f_2b
= {
4225 static const struct gprefix pfx_0f_2b
= {
4226 ID(0, &instr_dual_0f_2b
), ID(0, &instr_dual_0f_2b
), N
, N
,
4229 static const struct gprefix pfx_0f_28_0f_29
= {
4230 I(Aligned
, em_mov
), I(Aligned
, em_mov
), N
, N
,
4233 static const struct gprefix pfx_0f_e7
= {
4234 N
, I(Sse
, em_mov
), N
, N
,
4237 static const struct escape escape_d9
= { {
4238 N
, N
, N
, N
, N
, N
, N
, I(DstMem16
| Mov
, em_fnstcw
),
4241 N
, N
, N
, N
, N
, N
, N
, N
,
4243 N
, N
, N
, N
, N
, N
, N
, N
,
4245 N
, N
, N
, N
, N
, N
, N
, N
,
4247 N
, N
, N
, N
, N
, N
, N
, N
,
4249 N
, N
, N
, N
, N
, N
, N
, N
,
4251 N
, N
, N
, N
, N
, N
, N
, N
,
4253 N
, N
, N
, N
, N
, N
, N
, N
,
4255 N
, N
, N
, N
, N
, N
, N
, N
,
4258 static const struct escape escape_db
= { {
4259 N
, N
, N
, N
, N
, N
, N
, N
,
4262 N
, N
, N
, N
, N
, N
, N
, N
,
4264 N
, N
, N
, N
, N
, N
, N
, N
,
4266 N
, N
, N
, N
, N
, N
, N
, N
,
4268 N
, N
, N
, N
, N
, N
, N
, N
,
4270 N
, N
, N
, I(ImplicitOps
, em_fninit
), N
, N
, N
, N
,
4272 N
, N
, N
, N
, N
, N
, N
, N
,
4274 N
, N
, N
, N
, N
, N
, N
, N
,
4276 N
, N
, N
, N
, N
, N
, N
, N
,
4279 static const struct escape escape_dd
= { {
4280 N
, N
, N
, N
, N
, N
, N
, I(DstMem16
| Mov
, em_fnstsw
),
4283 N
, N
, N
, N
, N
, N
, N
, N
,
4285 N
, N
, N
, N
, N
, N
, N
, N
,
4287 N
, N
, N
, N
, N
, N
, N
, N
,
4289 N
, N
, N
, N
, N
, N
, N
, N
,
4291 N
, N
, N
, N
, N
, N
, N
, N
,
4293 N
, N
, N
, N
, N
, N
, N
, N
,
4295 N
, N
, N
, N
, N
, N
, N
, N
,
4297 N
, N
, N
, N
, N
, N
, N
, N
,
4300 static const struct instr_dual instr_dual_0f_c3
= {
4301 I(DstMem
| SrcReg
| ModRM
| No16
| Mov
, em_mov
), N
4304 static const struct mode_dual mode_dual_63
= {
4305 N
, I(DstReg
| SrcMem32
| ModRM
| Mov
, em_movsxd
)
4308 static const struct opcode opcode_table
[256] = {
4310 F6ALU(Lock
, em_add
),
4311 I(ImplicitOps
| Stack
| No64
| Src2ES
, em_push_sreg
),
4312 I(ImplicitOps
| Stack
| No64
| Src2ES
, em_pop_sreg
),
4314 F6ALU(Lock
| PageTable
, em_or
),
4315 I(ImplicitOps
| Stack
| No64
| Src2CS
, em_push_sreg
),
4318 F6ALU(Lock
, em_adc
),
4319 I(ImplicitOps
| Stack
| No64
| Src2SS
, em_push_sreg
),
4320 I(ImplicitOps
| Stack
| No64
| Src2SS
, em_pop_sreg
),
4322 F6ALU(Lock
, em_sbb
),
4323 I(ImplicitOps
| Stack
| No64
| Src2DS
, em_push_sreg
),
4324 I(ImplicitOps
| Stack
| No64
| Src2DS
, em_pop_sreg
),
4326 F6ALU(Lock
| PageTable
, em_and
), N
, N
,
4328 F6ALU(Lock
, em_sub
), N
, I(ByteOp
| DstAcc
| No64
, em_das
),
4330 F6ALU(Lock
, em_xor
), N
, N
,
4332 F6ALU(NoWrite
, em_cmp
), N
, N
,
4334 X8(F(DstReg
, em_inc
)), X8(F(DstReg
, em_dec
)),
4336 X8(I(SrcReg
| Stack
, em_push
)),
4338 X8(I(DstReg
| Stack
, em_pop
)),
4340 I(ImplicitOps
| Stack
| No64
, em_pusha
),
4341 I(ImplicitOps
| Stack
| No64
, em_popa
),
4342 N
, MD(ModRM
, &mode_dual_63
),
4345 I(SrcImm
| Mov
| Stack
, em_push
),
4346 I(DstReg
| SrcMem
| ModRM
| Src2Imm
, em_imul_3op
),
4347 I(SrcImmByte
| Mov
| Stack
, em_push
),
4348 I(DstReg
| SrcMem
| ModRM
| Src2ImmByte
, em_imul_3op
),
4349 I2bvIP(DstDI
| SrcDX
| Mov
| String
| Unaligned
, em_in
, ins
, check_perm_in
), /* insb, insw/insd */
4350 I2bvIP(SrcSI
| DstDX
| String
, em_out
, outs
, check_perm_out
), /* outsb, outsw/outsd */
4352 X16(D(SrcImmByte
| NearBranch
)),
4354 G(ByteOp
| DstMem
| SrcImm
, group1
),
4355 G(DstMem
| SrcImm
, group1
),
4356 G(ByteOp
| DstMem
| SrcImm
| No64
, group1
),
4357 G(DstMem
| SrcImmByte
, group1
),
4358 F2bv(DstMem
| SrcReg
| ModRM
| NoWrite
, em_test
),
4359 I2bv(DstMem
| SrcReg
| ModRM
| Lock
| PageTable
, em_xchg
),
4361 I2bv(DstMem
| SrcReg
| ModRM
| Mov
| PageTable
, em_mov
),
4362 I2bv(DstReg
| SrcMem
| ModRM
| Mov
, em_mov
),
4363 I(DstMem
| SrcNone
| ModRM
| Mov
| PageTable
, em_mov_rm_sreg
),
4364 D(ModRM
| SrcMem
| NoAccess
| DstReg
),
4365 I(ImplicitOps
| SrcMem16
| ModRM
, em_mov_sreg_rm
),
4368 DI(SrcAcc
| DstReg
, pause
), X7(D(SrcAcc
| DstReg
)),
4370 D(DstAcc
| SrcNone
), I(ImplicitOps
| SrcAcc
, em_cwd
),
4371 I(SrcImmFAddr
| No64
, em_call_far
), N
,
4372 II(ImplicitOps
| Stack
, em_pushf
, pushf
),
4373 II(ImplicitOps
| Stack
, em_popf
, popf
),
4374 I(ImplicitOps
, em_sahf
), I(ImplicitOps
, em_lahf
),
4376 I2bv(DstAcc
| SrcMem
| Mov
| MemAbs
, em_mov
),
4377 I2bv(DstMem
| SrcAcc
| Mov
| MemAbs
| PageTable
, em_mov
),
4378 I2bv(SrcSI
| DstDI
| Mov
| String
, em_mov
),
4379 F2bv(SrcSI
| DstDI
| String
| NoWrite
, em_cmp_r
),
4381 F2bv(DstAcc
| SrcImm
| NoWrite
, em_test
),
4382 I2bv(SrcAcc
| DstDI
| Mov
| String
, em_mov
),
4383 I2bv(SrcSI
| DstAcc
| Mov
| String
, em_mov
),
4384 F2bv(SrcAcc
| DstDI
| String
| NoWrite
, em_cmp_r
),
4386 X8(I(ByteOp
| DstReg
| SrcImm
| Mov
, em_mov
)),
4388 X8(I(DstReg
| SrcImm64
| Mov
, em_mov
)),
4390 G(ByteOp
| Src2ImmByte
, group2
), G(Src2ImmByte
, group2
),
4391 I(ImplicitOps
| NearBranch
| SrcImmU16
, em_ret_near_imm
),
4392 I(ImplicitOps
| NearBranch
, em_ret
),
4393 I(DstReg
| SrcMemFAddr
| ModRM
| No64
| Src2ES
, em_lseg
),
4394 I(DstReg
| SrcMemFAddr
| ModRM
| No64
| Src2DS
, em_lseg
),
4395 G(ByteOp
, group11
), G(0, group11
),
4397 I(Stack
| SrcImmU16
| Src2ImmByte
, em_enter
), I(Stack
, em_leave
),
4398 I(ImplicitOps
| SrcImmU16
, em_ret_far_imm
),
4399 I(ImplicitOps
, em_ret_far
),
4400 D(ImplicitOps
), DI(SrcImmByte
, intn
),
4401 D(ImplicitOps
| No64
), II(ImplicitOps
, em_iret
, iret
),
4403 G(Src2One
| ByteOp
, group2
), G(Src2One
, group2
),
4404 G(Src2CL
| ByteOp
, group2
), G(Src2CL
, group2
),
4405 I(DstAcc
| SrcImmUByte
| No64
, em_aam
),
4406 I(DstAcc
| SrcImmUByte
| No64
, em_aad
),
4407 F(DstAcc
| ByteOp
| No64
, em_salc
),
4408 I(DstAcc
| SrcXLat
| ByteOp
, em_mov
),
4410 N
, E(0, &escape_d9
), N
, E(0, &escape_db
), N
, E(0, &escape_dd
), N
, N
,
4412 X3(I(SrcImmByte
| NearBranch
, em_loop
)),
4413 I(SrcImmByte
| NearBranch
, em_jcxz
),
4414 I2bvIP(SrcImmUByte
| DstAcc
, em_in
, in
, check_perm_in
),
4415 I2bvIP(SrcAcc
| DstImmUByte
, em_out
, out
, check_perm_out
),
4417 I(SrcImm
| NearBranch
, em_call
), D(SrcImm
| ImplicitOps
| NearBranch
),
4418 I(SrcImmFAddr
| No64
, em_jmp_far
),
4419 D(SrcImmByte
| ImplicitOps
| NearBranch
),
4420 I2bvIP(SrcDX
| DstAcc
, em_in
, in
, check_perm_in
),
4421 I2bvIP(SrcAcc
| DstDX
, em_out
, out
, check_perm_out
),
4423 N
, DI(ImplicitOps
, icebp
), N
, N
,
4424 DI(ImplicitOps
| Priv
, hlt
), D(ImplicitOps
),
4425 G(ByteOp
, group3
), G(0, group3
),
4427 D(ImplicitOps
), D(ImplicitOps
),
4428 I(ImplicitOps
, em_cli
), I(ImplicitOps
, em_sti
),
4429 D(ImplicitOps
), D(ImplicitOps
), G(0, group4
), G(0, group5
),
4432 static const struct opcode twobyte_table
[256] = {
4434 G(0, group6
), GD(0, &group7
), N
, N
,
4435 N
, I(ImplicitOps
| EmulateOnUD
, em_syscall
),
4436 II(ImplicitOps
| Priv
, em_clts
, clts
), N
,
4437 DI(ImplicitOps
| Priv
, invd
), DI(ImplicitOps
| Priv
, wbinvd
), N
, N
,
4438 N
, D(ImplicitOps
| ModRM
| SrcMem
| NoAccess
), N
, N
,
4440 N
, N
, N
, N
, N
, N
, N
, N
,
4441 D(ImplicitOps
| ModRM
| SrcMem
| NoAccess
),
4442 N
, N
, N
, N
, N
, N
, D(ImplicitOps
| ModRM
| SrcMem
| NoAccess
),
4444 DIP(ModRM
| DstMem
| Priv
| Op3264
| NoMod
, cr_read
, check_cr_read
),
4445 DIP(ModRM
| DstMem
| Priv
| Op3264
| NoMod
, dr_read
, check_dr_read
),
4446 IIP(ModRM
| SrcMem
| Priv
| Op3264
| NoMod
, em_cr_write
, cr_write
,
4448 IIP(ModRM
| SrcMem
| Priv
| Op3264
| NoMod
, em_dr_write
, dr_write
,
4451 GP(ModRM
| DstReg
| SrcMem
| Mov
| Sse
, &pfx_0f_28_0f_29
),
4452 GP(ModRM
| DstMem
| SrcReg
| Mov
| Sse
, &pfx_0f_28_0f_29
),
4453 N
, GP(ModRM
| DstMem
| SrcReg
| Mov
| Sse
, &pfx_0f_2b
),
4456 II(ImplicitOps
| Priv
, em_wrmsr
, wrmsr
),
4457 IIP(ImplicitOps
, em_rdtsc
, rdtsc
, check_rdtsc
),
4458 II(ImplicitOps
| Priv
, em_rdmsr
, rdmsr
),
4459 IIP(ImplicitOps
, em_rdpmc
, rdpmc
, check_rdpmc
),
4460 I(ImplicitOps
| EmulateOnUD
, em_sysenter
),
4461 I(ImplicitOps
| Priv
| EmulateOnUD
, em_sysexit
),
4463 N
, N
, N
, N
, N
, N
, N
, N
,
4465 X16(D(DstReg
| SrcMem
| ModRM
)),
4467 N
, N
, N
, N
, N
, N
, N
, N
, N
, N
, N
, N
, N
, N
, N
, N
,
4472 N
, N
, N
, GP(SrcMem
| DstReg
| ModRM
| Mov
, &pfx_0f_6f_0f_7f
),
4477 N
, N
, N
, GP(SrcReg
| DstMem
| ModRM
| Mov
, &pfx_0f_6f_0f_7f
),
4479 X16(D(SrcImm
| NearBranch
)),
4481 X16(D(ByteOp
| DstMem
| SrcNone
| ModRM
| Mov
)),
4483 I(Stack
| Src2FS
, em_push_sreg
), I(Stack
| Src2FS
, em_pop_sreg
),
4484 II(ImplicitOps
, em_cpuid
, cpuid
),
4485 F(DstMem
| SrcReg
| ModRM
| BitOp
| NoWrite
, em_bt
),
4486 F(DstMem
| SrcReg
| Src2ImmByte
| ModRM
, em_shld
),
4487 F(DstMem
| SrcReg
| Src2CL
| ModRM
, em_shld
), N
, N
,
4489 I(Stack
| Src2GS
, em_push_sreg
), I(Stack
| Src2GS
, em_pop_sreg
),
4490 II(EmulateOnUD
| ImplicitOps
, em_rsm
, rsm
),
4491 F(DstMem
| SrcReg
| ModRM
| BitOp
| Lock
| PageTable
, em_bts
),
4492 F(DstMem
| SrcReg
| Src2ImmByte
| ModRM
, em_shrd
),
4493 F(DstMem
| SrcReg
| Src2CL
| ModRM
, em_shrd
),
4494 GD(0, &group15
), F(DstReg
| SrcMem
| ModRM
, em_imul
),
4496 I2bv(DstMem
| SrcReg
| ModRM
| Lock
| PageTable
| SrcWrite
, em_cmpxchg
),
4497 I(DstReg
| SrcMemFAddr
| ModRM
| Src2SS
, em_lseg
),
4498 F(DstMem
| SrcReg
| ModRM
| BitOp
| Lock
, em_btr
),
4499 I(DstReg
| SrcMemFAddr
| ModRM
| Src2FS
, em_lseg
),
4500 I(DstReg
| SrcMemFAddr
| ModRM
| Src2GS
, em_lseg
),
4501 D(DstReg
| SrcMem8
| ModRM
| Mov
), D(DstReg
| SrcMem16
| ModRM
| Mov
),
4505 F(DstMem
| SrcReg
| ModRM
| BitOp
| Lock
| PageTable
, em_btc
),
4506 I(DstReg
| SrcMem
| ModRM
, em_bsf_c
),
4507 I(DstReg
| SrcMem
| ModRM
, em_bsr_c
),
4508 D(DstReg
| SrcMem8
| ModRM
| Mov
), D(DstReg
| SrcMem16
| ModRM
| Mov
),
4510 F2bv(DstMem
| SrcReg
| ModRM
| SrcWrite
| Lock
, em_xadd
),
4511 N
, ID(0, &instr_dual_0f_c3
),
4512 N
, N
, N
, GD(0, &group9
),
4514 X8(I(DstReg
, em_bswap
)),
4516 N
, N
, N
, N
, N
, N
, N
, N
, N
, N
, N
, N
, N
, N
, N
, N
,
4518 N
, N
, N
, N
, N
, N
, N
, GP(SrcReg
| DstMem
| ModRM
| Mov
, &pfx_0f_e7
),
4519 N
, N
, N
, N
, N
, N
, N
, N
,
4521 N
, N
, N
, N
, N
, N
, N
, N
, N
, N
, N
, N
, N
, N
, N
, N
4524 static const struct instr_dual instr_dual_0f_38_f0
= {
4525 I(DstReg
| SrcMem
| Mov
, em_movbe
), N
4528 static const struct instr_dual instr_dual_0f_38_f1
= {
4529 I(DstMem
| SrcReg
| Mov
, em_movbe
), N
4532 static const struct gprefix three_byte_0f_38_f0
= {
4533 ID(0, &instr_dual_0f_38_f0
), N
, N
, N
4536 static const struct gprefix three_byte_0f_38_f1
= {
4537 ID(0, &instr_dual_0f_38_f1
), N
, N
, N
4541 * Insns below are selected by the prefix which indexed by the third opcode
4544 static const struct opcode opcode_map_0f_38
[256] = {
4546 X16(N
), X16(N
), X16(N
), X16(N
), X16(N
), X16(N
), X16(N
), X16(N
),
4548 X16(N
), X16(N
), X16(N
), X16(N
), X16(N
), X16(N
), X16(N
),
4550 GP(EmulateOnUD
| ModRM
, &three_byte_0f_38_f0
),
4551 GP(EmulateOnUD
| ModRM
, &three_byte_0f_38_f1
),
4572 static unsigned imm_size(struct x86_emulate_ctxt
*ctxt
)
4576 size
= (ctxt
->d
& ByteOp
) ? 1 : ctxt
->op_bytes
;
4582 static int decode_imm(struct x86_emulate_ctxt
*ctxt
, struct operand
*op
,
4583 unsigned size
, bool sign_extension
)
4585 int rc
= X86EMUL_CONTINUE
;
4589 op
->addr
.mem
.ea
= ctxt
->_eip
;
4590 /* NB. Immediates are sign-extended as necessary. */
4591 switch (op
->bytes
) {
4593 op
->val
= insn_fetch(s8
, ctxt
);
4596 op
->val
= insn_fetch(s16
, ctxt
);
4599 op
->val
= insn_fetch(s32
, ctxt
);
4602 op
->val
= insn_fetch(s64
, ctxt
);
4605 if (!sign_extension
) {
4606 switch (op
->bytes
) {
4614 op
->val
&= 0xffffffff;
4622 static int decode_operand(struct x86_emulate_ctxt
*ctxt
, struct operand
*op
,
4625 int rc
= X86EMUL_CONTINUE
;
4629 decode_register_operand(ctxt
, op
);
4632 rc
= decode_imm(ctxt
, op
, 1, false);
4635 ctxt
->memop
.bytes
= (ctxt
->d
& ByteOp
) ? 1 : ctxt
->op_bytes
;
4639 if (ctxt
->d
& BitOp
)
4640 fetch_bit_operand(ctxt
);
4641 op
->orig_val
= op
->val
;
4644 ctxt
->memop
.bytes
= (ctxt
->op_bytes
== 8) ? 16 : 8;
4648 op
->bytes
= (ctxt
->d
& ByteOp
) ? 1 : ctxt
->op_bytes
;
4649 op
->addr
.reg
= reg_rmw(ctxt
, VCPU_REGS_RAX
);
4650 fetch_register_operand(op
);
4651 op
->orig_val
= op
->val
;
4655 op
->bytes
= (ctxt
->d
& ByteOp
) ? 2 : ctxt
->op_bytes
;
4656 op
->addr
.reg
= reg_rmw(ctxt
, VCPU_REGS_RAX
);
4657 fetch_register_operand(op
);
4658 op
->orig_val
= op
->val
;
4661 if (ctxt
->d
& ByteOp
) {
4666 op
->bytes
= ctxt
->op_bytes
;
4667 op
->addr
.reg
= reg_rmw(ctxt
, VCPU_REGS_RDX
);
4668 fetch_register_operand(op
);
4669 op
->orig_val
= op
->val
;
4673 op
->bytes
= (ctxt
->d
& ByteOp
) ? 1 : ctxt
->op_bytes
;
4675 register_address(ctxt
, VCPU_REGS_RDI
);
4676 op
->addr
.mem
.seg
= VCPU_SREG_ES
;
4683 op
->addr
.reg
= reg_rmw(ctxt
, VCPU_REGS_RDX
);
4684 fetch_register_operand(op
);
4689 op
->val
= reg_read(ctxt
, VCPU_REGS_RCX
) & 0xff;
4692 rc
= decode_imm(ctxt
, op
, 1, true);
4700 rc
= decode_imm(ctxt
, op
, imm_size(ctxt
), true);
4703 rc
= decode_imm(ctxt
, op
, ctxt
->op_bytes
, true);
4706 ctxt
->memop
.bytes
= 1;
4707 if (ctxt
->memop
.type
== OP_REG
) {
4708 ctxt
->memop
.addr
.reg
= decode_register(ctxt
,
4709 ctxt
->modrm_rm
, true);
4710 fetch_register_operand(&ctxt
->memop
);
4714 ctxt
->memop
.bytes
= 2;
4717 ctxt
->memop
.bytes
= 4;
4720 rc
= decode_imm(ctxt
, op
, 2, false);
4723 rc
= decode_imm(ctxt
, op
, imm_size(ctxt
), false);
4727 op
->bytes
= (ctxt
->d
& ByteOp
) ? 1 : ctxt
->op_bytes
;
4729 register_address(ctxt
, VCPU_REGS_RSI
);
4730 op
->addr
.mem
.seg
= ctxt
->seg_override
;
4736 op
->bytes
= (ctxt
->d
& ByteOp
) ? 1 : ctxt
->op_bytes
;
4739 reg_read(ctxt
, VCPU_REGS_RBX
) +
4740 (reg_read(ctxt
, VCPU_REGS_RAX
) & 0xff));
4741 op
->addr
.mem
.seg
= ctxt
->seg_override
;
4746 op
->addr
.mem
.ea
= ctxt
->_eip
;
4747 op
->bytes
= ctxt
->op_bytes
+ 2;
4748 insn_fetch_arr(op
->valptr
, op
->bytes
, ctxt
);
4751 ctxt
->memop
.bytes
= ctxt
->op_bytes
+ 2;
4755 op
->val
= VCPU_SREG_ES
;
4759 op
->val
= VCPU_SREG_CS
;
4763 op
->val
= VCPU_SREG_SS
;
4767 op
->val
= VCPU_SREG_DS
;
4771 op
->val
= VCPU_SREG_FS
;
4775 op
->val
= VCPU_SREG_GS
;
4778 /* Special instructions do their own operand decoding. */
4780 op
->type
= OP_NONE
; /* Disable writeback. */
4788 int x86_decode_insn(struct x86_emulate_ctxt
*ctxt
, void *insn
, int insn_len
)
4790 int rc
= X86EMUL_CONTINUE
;
4791 int mode
= ctxt
->mode
;
4792 int def_op_bytes
, def_ad_bytes
, goffset
, simd_prefix
;
4793 bool op_prefix
= false;
4794 bool has_seg_override
= false;
4795 struct opcode opcode
;
4797 ctxt
->memop
.type
= OP_NONE
;
4798 ctxt
->memopp
= NULL
;
4799 ctxt
->_eip
= ctxt
->eip
;
4800 ctxt
->fetch
.ptr
= ctxt
->fetch
.data
;
4801 ctxt
->fetch
.end
= ctxt
->fetch
.data
+ insn_len
;
4802 ctxt
->opcode_len
= 1;
4804 memcpy(ctxt
->fetch
.data
, insn
, insn_len
);
4806 rc
= __do_insn_fetch_bytes(ctxt
, 1);
4807 if (rc
!= X86EMUL_CONTINUE
)
4812 case X86EMUL_MODE_REAL
:
4813 case X86EMUL_MODE_VM86
:
4814 case X86EMUL_MODE_PROT16
:
4815 def_op_bytes
= def_ad_bytes
= 2;
4817 case X86EMUL_MODE_PROT32
:
4818 def_op_bytes
= def_ad_bytes
= 4;
4820 #ifdef CONFIG_X86_64
4821 case X86EMUL_MODE_PROT64
:
4827 return EMULATION_FAILED
;
4830 ctxt
->op_bytes
= def_op_bytes
;
4831 ctxt
->ad_bytes
= def_ad_bytes
;
4833 /* Legacy prefixes. */
4835 switch (ctxt
->b
= insn_fetch(u8
, ctxt
)) {
4836 case 0x66: /* operand-size override */
4838 /* switch between 2/4 bytes */
4839 ctxt
->op_bytes
= def_op_bytes
^ 6;
4841 case 0x67: /* address-size override */
4842 if (mode
== X86EMUL_MODE_PROT64
)
4843 /* switch between 4/8 bytes */
4844 ctxt
->ad_bytes
= def_ad_bytes
^ 12;
4846 /* switch between 2/4 bytes */
4847 ctxt
->ad_bytes
= def_ad_bytes
^ 6;
4849 case 0x26: /* ES override */
4850 case 0x2e: /* CS override */
4851 case 0x36: /* SS override */
4852 case 0x3e: /* DS override */
4853 has_seg_override
= true;
4854 ctxt
->seg_override
= (ctxt
->b
>> 3) & 3;
4856 case 0x64: /* FS override */
4857 case 0x65: /* GS override */
4858 has_seg_override
= true;
4859 ctxt
->seg_override
= ctxt
->b
& 7;
4861 case 0x40 ... 0x4f: /* REX */
4862 if (mode
!= X86EMUL_MODE_PROT64
)
4864 ctxt
->rex_prefix
= ctxt
->b
;
4866 case 0xf0: /* LOCK */
4867 ctxt
->lock_prefix
= 1;
4869 case 0xf2: /* REPNE/REPNZ */
4870 case 0xf3: /* REP/REPE/REPZ */
4871 ctxt
->rep_prefix
= ctxt
->b
;
4877 /* Any legacy prefix after a REX prefix nullifies its effect. */
4879 ctxt
->rex_prefix
= 0;
4885 if (ctxt
->rex_prefix
& 8)
4886 ctxt
->op_bytes
= 8; /* REX.W */
4888 /* Opcode byte(s). */
4889 opcode
= opcode_table
[ctxt
->b
];
4890 /* Two-byte opcode? */
4891 if (ctxt
->b
== 0x0f) {
4892 ctxt
->opcode_len
= 2;
4893 ctxt
->b
= insn_fetch(u8
, ctxt
);
4894 opcode
= twobyte_table
[ctxt
->b
];
4896 /* 0F_38 opcode map */
4897 if (ctxt
->b
== 0x38) {
4898 ctxt
->opcode_len
= 3;
4899 ctxt
->b
= insn_fetch(u8
, ctxt
);
4900 opcode
= opcode_map_0f_38
[ctxt
->b
];
4903 ctxt
->d
= opcode
.flags
;
4905 if (ctxt
->d
& ModRM
)
4906 ctxt
->modrm
= insn_fetch(u8
, ctxt
);
4908 /* vex-prefix instructions are not implemented */
4909 if (ctxt
->opcode_len
== 1 && (ctxt
->b
== 0xc5 || ctxt
->b
== 0xc4) &&
4910 (mode
== X86EMUL_MODE_PROT64
|| (ctxt
->modrm
& 0xc0) == 0xc0)) {
4914 while (ctxt
->d
& GroupMask
) {
4915 switch (ctxt
->d
& GroupMask
) {
4917 goffset
= (ctxt
->modrm
>> 3) & 7;
4918 opcode
= opcode
.u
.group
[goffset
];
4921 goffset
= (ctxt
->modrm
>> 3) & 7;
4922 if ((ctxt
->modrm
>> 6) == 3)
4923 opcode
= opcode
.u
.gdual
->mod3
[goffset
];
4925 opcode
= opcode
.u
.gdual
->mod012
[goffset
];
4928 goffset
= ctxt
->modrm
& 7;
4929 opcode
= opcode
.u
.group
[goffset
];
4932 if (ctxt
->rep_prefix
&& op_prefix
)
4933 return EMULATION_FAILED
;
4934 simd_prefix
= op_prefix
? 0x66 : ctxt
->rep_prefix
;
4935 switch (simd_prefix
) {
4936 case 0x00: opcode
= opcode
.u
.gprefix
->pfx_no
; break;
4937 case 0x66: opcode
= opcode
.u
.gprefix
->pfx_66
; break;
4938 case 0xf2: opcode
= opcode
.u
.gprefix
->pfx_f2
; break;
4939 case 0xf3: opcode
= opcode
.u
.gprefix
->pfx_f3
; break;
4943 if (ctxt
->modrm
> 0xbf)
4944 opcode
= opcode
.u
.esc
->high
[ctxt
->modrm
- 0xc0];
4946 opcode
= opcode
.u
.esc
->op
[(ctxt
->modrm
>> 3) & 7];
4949 if ((ctxt
->modrm
>> 6) == 3)
4950 opcode
= opcode
.u
.idual
->mod3
;
4952 opcode
= opcode
.u
.idual
->mod012
;
4955 if (ctxt
->mode
== X86EMUL_MODE_PROT64
)
4956 opcode
= opcode
.u
.mdual
->mode64
;
4958 opcode
= opcode
.u
.mdual
->mode32
;
4961 return EMULATION_FAILED
;
4964 ctxt
->d
&= ~(u64
)GroupMask
;
4965 ctxt
->d
|= opcode
.flags
;
4970 return EMULATION_FAILED
;
4972 ctxt
->execute
= opcode
.u
.execute
;
4974 if (unlikely(ctxt
->ud
) && likely(!(ctxt
->d
& EmulateOnUD
)))
4975 return EMULATION_FAILED
;
4977 if (unlikely(ctxt
->d
&
4978 (NotImpl
|Stack
|Op3264
|Sse
|Mmx
|Intercept
|CheckPerm
|NearBranch
|
4981 * These are copied unconditionally here, and checked unconditionally
4982 * in x86_emulate_insn.
4984 ctxt
->check_perm
= opcode
.check_perm
;
4985 ctxt
->intercept
= opcode
.intercept
;
4987 if (ctxt
->d
& NotImpl
)
4988 return EMULATION_FAILED
;
4990 if (mode
== X86EMUL_MODE_PROT64
) {
4991 if (ctxt
->op_bytes
== 4 && (ctxt
->d
& Stack
))
4993 else if (ctxt
->d
& NearBranch
)
4997 if (ctxt
->d
& Op3264
) {
4998 if (mode
== X86EMUL_MODE_PROT64
)
5004 if ((ctxt
->d
& No16
) && ctxt
->op_bytes
== 2)
5008 ctxt
->op_bytes
= 16;
5009 else if (ctxt
->d
& Mmx
)
5013 /* ModRM and SIB bytes. */
5014 if (ctxt
->d
& ModRM
) {
5015 rc
= decode_modrm(ctxt
, &ctxt
->memop
);
5016 if (!has_seg_override
) {
5017 has_seg_override
= true;
5018 ctxt
->seg_override
= ctxt
->modrm_seg
;
5020 } else if (ctxt
->d
& MemAbs
)
5021 rc
= decode_abs(ctxt
, &ctxt
->memop
);
5022 if (rc
!= X86EMUL_CONTINUE
)
5025 if (!has_seg_override
)
5026 ctxt
->seg_override
= VCPU_SREG_DS
;
5028 ctxt
->memop
.addr
.mem
.seg
= ctxt
->seg_override
;
5031 * Decode and fetch the source operand: register, memory
5034 rc
= decode_operand(ctxt
, &ctxt
->src
, (ctxt
->d
>> SrcShift
) & OpMask
);
5035 if (rc
!= X86EMUL_CONTINUE
)
5039 * Decode and fetch the second source operand: register, memory
5042 rc
= decode_operand(ctxt
, &ctxt
->src2
, (ctxt
->d
>> Src2Shift
) & OpMask
);
5043 if (rc
!= X86EMUL_CONTINUE
)
5046 /* Decode and fetch the destination operand: register or memory. */
5047 rc
= decode_operand(ctxt
, &ctxt
->dst
, (ctxt
->d
>> DstShift
) & OpMask
);
5049 if (ctxt
->rip_relative
)
5050 ctxt
->memopp
->addr
.mem
.ea
= address_mask(ctxt
,
5051 ctxt
->memopp
->addr
.mem
.ea
+ ctxt
->_eip
);
5054 return (rc
!= X86EMUL_CONTINUE
) ? EMULATION_FAILED
: EMULATION_OK
;
5057 bool x86_page_table_writing_insn(struct x86_emulate_ctxt
*ctxt
)
5059 return ctxt
->d
& PageTable
;
5062 static bool string_insn_completed(struct x86_emulate_ctxt
*ctxt
)
5064 /* The second termination condition only applies for REPE
5065 * and REPNE. Test if the repeat string operation prefix is
5066 * REPE/REPZ or REPNE/REPNZ and if it's the case it tests the
5067 * corresponding termination condition according to:
5068 * - if REPE/REPZ and ZF = 0 then done
5069 * - if REPNE/REPNZ and ZF = 1 then done
5071 if (((ctxt
->b
== 0xa6) || (ctxt
->b
== 0xa7) ||
5072 (ctxt
->b
== 0xae) || (ctxt
->b
== 0xaf))
5073 && (((ctxt
->rep_prefix
== REPE_PREFIX
) &&
5074 ((ctxt
->eflags
& X86_EFLAGS_ZF
) == 0))
5075 || ((ctxt
->rep_prefix
== REPNE_PREFIX
) &&
5076 ((ctxt
->eflags
& X86_EFLAGS_ZF
) == X86_EFLAGS_ZF
))))
5082 static int flush_pending_x87_faults(struct x86_emulate_ctxt
*ctxt
)
5086 ctxt
->ops
->get_fpu(ctxt
);
5087 asm volatile("1: fwait \n\t"
5089 ".pushsection .fixup,\"ax\" \n\t"
5091 "movb $1, %[fault] \n\t"
5094 _ASM_EXTABLE(1b
, 3b
)
5095 : [fault
]"+qm"(fault
));
5096 ctxt
->ops
->put_fpu(ctxt
);
5098 if (unlikely(fault
))
5099 return emulate_exception(ctxt
, MF_VECTOR
, 0, false);
5101 return X86EMUL_CONTINUE
;
5104 static void fetch_possible_mmx_operand(struct x86_emulate_ctxt
*ctxt
,
5107 if (op
->type
== OP_MM
)
5108 read_mmx_reg(ctxt
, &op
->mm_val
, op
->addr
.mm
);
5111 static int fastop(struct x86_emulate_ctxt
*ctxt
, void (*fop
)(struct fastop
*))
5113 ulong flags
= (ctxt
->eflags
& EFLAGS_MASK
) | X86_EFLAGS_IF
;
5114 if (!(ctxt
->d
& ByteOp
))
5115 fop
+= __ffs(ctxt
->dst
.bytes
) * FASTOP_SIZE
;
5116 asm("push %[flags]; popf; call *%[fastop]; pushf; pop %[flags]\n"
5117 : "+a"(ctxt
->dst
.val
), "+d"(ctxt
->src
.val
), [flags
]"+D"(flags
),
5119 : "c"(ctxt
->src2
.val
));
5120 ctxt
->eflags
= (ctxt
->eflags
& ~EFLAGS_MASK
) | (flags
& EFLAGS_MASK
);
5121 if (!fop
) /* exception is returned in fop variable */
5122 return emulate_de(ctxt
);
5123 return X86EMUL_CONTINUE
;
5126 void init_decode_cache(struct x86_emulate_ctxt
*ctxt
)
5128 memset(&ctxt
->rip_relative
, 0,
5129 (void *)&ctxt
->modrm
- (void *)&ctxt
->rip_relative
);
5131 ctxt
->io_read
.pos
= 0;
5132 ctxt
->io_read
.end
= 0;
5133 ctxt
->mem_read
.end
= 0;
5136 int x86_emulate_insn(struct x86_emulate_ctxt
*ctxt
)
5138 const struct x86_emulate_ops
*ops
= ctxt
->ops
;
5139 int rc
= X86EMUL_CONTINUE
;
5140 int saved_dst_type
= ctxt
->dst
.type
;
5142 ctxt
->mem_read
.pos
= 0;
5144 /* LOCK prefix is allowed only with some instructions */
5145 if (ctxt
->lock_prefix
&& (!(ctxt
->d
& Lock
) || ctxt
->dst
.type
!= OP_MEM
)) {
5146 rc
= emulate_ud(ctxt
);
5150 if ((ctxt
->d
& SrcMask
) == SrcMemFAddr
&& ctxt
->src
.type
!= OP_MEM
) {
5151 rc
= emulate_ud(ctxt
);
5155 if (unlikely(ctxt
->d
&
5156 (No64
|Undefined
|Sse
|Mmx
|Intercept
|CheckPerm
|Priv
|Prot
|String
))) {
5157 if ((ctxt
->mode
== X86EMUL_MODE_PROT64
&& (ctxt
->d
& No64
)) ||
5158 (ctxt
->d
& Undefined
)) {
5159 rc
= emulate_ud(ctxt
);
5163 if (((ctxt
->d
& (Sse
|Mmx
)) && ((ops
->get_cr(ctxt
, 0) & X86_CR0_EM
)))
5164 || ((ctxt
->d
& Sse
) && !(ops
->get_cr(ctxt
, 4) & X86_CR4_OSFXSR
))) {
5165 rc
= emulate_ud(ctxt
);
5169 if ((ctxt
->d
& (Sse
|Mmx
)) && (ops
->get_cr(ctxt
, 0) & X86_CR0_TS
)) {
5170 rc
= emulate_nm(ctxt
);
5174 if (ctxt
->d
& Mmx
) {
5175 rc
= flush_pending_x87_faults(ctxt
);
5176 if (rc
!= X86EMUL_CONTINUE
)
5179 * Now that we know the fpu is exception safe, we can fetch
5182 fetch_possible_mmx_operand(ctxt
, &ctxt
->src
);
5183 fetch_possible_mmx_operand(ctxt
, &ctxt
->src2
);
5184 if (!(ctxt
->d
& Mov
))
5185 fetch_possible_mmx_operand(ctxt
, &ctxt
->dst
);
5188 if (unlikely(ctxt
->emul_flags
& X86EMUL_GUEST_MASK
) && ctxt
->intercept
) {
5189 rc
= emulator_check_intercept(ctxt
, ctxt
->intercept
,
5190 X86_ICPT_PRE_EXCEPT
);
5191 if (rc
!= X86EMUL_CONTINUE
)
5195 /* Instruction can only be executed in protected mode */
5196 if ((ctxt
->d
& Prot
) && ctxt
->mode
< X86EMUL_MODE_PROT16
) {
5197 rc
= emulate_ud(ctxt
);
5201 /* Privileged instruction can be executed only in CPL=0 */
5202 if ((ctxt
->d
& Priv
) && ops
->cpl(ctxt
)) {
5203 if (ctxt
->d
& PrivUD
)
5204 rc
= emulate_ud(ctxt
);
5206 rc
= emulate_gp(ctxt
, 0);
5210 /* Do instruction specific permission checks */
5211 if (ctxt
->d
& CheckPerm
) {
5212 rc
= ctxt
->check_perm(ctxt
);
5213 if (rc
!= X86EMUL_CONTINUE
)
5217 if (unlikely(ctxt
->emul_flags
& X86EMUL_GUEST_MASK
) && (ctxt
->d
& Intercept
)) {
5218 rc
= emulator_check_intercept(ctxt
, ctxt
->intercept
,
5219 X86_ICPT_POST_EXCEPT
);
5220 if (rc
!= X86EMUL_CONTINUE
)
5224 if (ctxt
->rep_prefix
&& (ctxt
->d
& String
)) {
5225 /* All REP prefixes have the same first termination condition */
5226 if (address_mask(ctxt
, reg_read(ctxt
, VCPU_REGS_RCX
)) == 0) {
5227 string_registers_quirk(ctxt
);
5228 ctxt
->eip
= ctxt
->_eip
;
5229 ctxt
->eflags
&= ~X86_EFLAGS_RF
;
5235 if ((ctxt
->src
.type
== OP_MEM
) && !(ctxt
->d
& NoAccess
)) {
5236 rc
= segmented_read(ctxt
, ctxt
->src
.addr
.mem
,
5237 ctxt
->src
.valptr
, ctxt
->src
.bytes
);
5238 if (rc
!= X86EMUL_CONTINUE
)
5240 ctxt
->src
.orig_val64
= ctxt
->src
.val64
;
5243 if (ctxt
->src2
.type
== OP_MEM
) {
5244 rc
= segmented_read(ctxt
, ctxt
->src2
.addr
.mem
,
5245 &ctxt
->src2
.val
, ctxt
->src2
.bytes
);
5246 if (rc
!= X86EMUL_CONTINUE
)
5250 if ((ctxt
->d
& DstMask
) == ImplicitOps
)
5254 if ((ctxt
->dst
.type
== OP_MEM
) && !(ctxt
->d
& Mov
)) {
5255 /* optimisation - avoid slow emulated read if Mov */
5256 rc
= segmented_read(ctxt
, ctxt
->dst
.addr
.mem
,
5257 &ctxt
->dst
.val
, ctxt
->dst
.bytes
);
5258 if (rc
!= X86EMUL_CONTINUE
) {
5259 if (!(ctxt
->d
& NoWrite
) &&
5260 rc
== X86EMUL_PROPAGATE_FAULT
&&
5261 ctxt
->exception
.vector
== PF_VECTOR
)
5262 ctxt
->exception
.error_code
|= PFERR_WRITE_MASK
;
5266 /* Copy full 64-bit value for CMPXCHG8B. */
5267 ctxt
->dst
.orig_val64
= ctxt
->dst
.val64
;
5271 if (unlikely(ctxt
->emul_flags
& X86EMUL_GUEST_MASK
) && (ctxt
->d
& Intercept
)) {
5272 rc
= emulator_check_intercept(ctxt
, ctxt
->intercept
,
5273 X86_ICPT_POST_MEMACCESS
);
5274 if (rc
!= X86EMUL_CONTINUE
)
5278 if (ctxt
->rep_prefix
&& (ctxt
->d
& String
))
5279 ctxt
->eflags
|= X86_EFLAGS_RF
;
5281 ctxt
->eflags
&= ~X86_EFLAGS_RF
;
5283 if (ctxt
->execute
) {
5284 if (ctxt
->d
& Fastop
) {
5285 void (*fop
)(struct fastop
*) = (void *)ctxt
->execute
;
5286 rc
= fastop(ctxt
, fop
);
5287 if (rc
!= X86EMUL_CONTINUE
)
5291 rc
= ctxt
->execute(ctxt
);
5292 if (rc
!= X86EMUL_CONTINUE
)
5297 if (ctxt
->opcode_len
== 2)
5299 else if (ctxt
->opcode_len
== 3)
5300 goto threebyte_insn
;
5303 case 0x70 ... 0x7f: /* jcc (short) */
5304 if (test_cc(ctxt
->b
, ctxt
->eflags
))
5305 rc
= jmp_rel(ctxt
, ctxt
->src
.val
);
5307 case 0x8d: /* lea r16/r32, m */
5308 ctxt
->dst
.val
= ctxt
->src
.addr
.mem
.ea
;
5310 case 0x90 ... 0x97: /* nop / xchg reg, rax */
5311 if (ctxt
->dst
.addr
.reg
== reg_rmw(ctxt
, VCPU_REGS_RAX
))
5312 ctxt
->dst
.type
= OP_NONE
;
5316 case 0x98: /* cbw/cwde/cdqe */
5317 switch (ctxt
->op_bytes
) {
5318 case 2: ctxt
->dst
.val
= (s8
)ctxt
->dst
.val
; break;
5319 case 4: ctxt
->dst
.val
= (s16
)ctxt
->dst
.val
; break;
5320 case 8: ctxt
->dst
.val
= (s32
)ctxt
->dst
.val
; break;
5323 case 0xcc: /* int3 */
5324 rc
= emulate_int(ctxt
, 3);
5326 case 0xcd: /* int n */
5327 rc
= emulate_int(ctxt
, ctxt
->src
.val
);
5329 case 0xce: /* into */
5330 if (ctxt
->eflags
& X86_EFLAGS_OF
)
5331 rc
= emulate_int(ctxt
, 4);
5333 case 0xe9: /* jmp rel */
5334 case 0xeb: /* jmp rel short */
5335 rc
= jmp_rel(ctxt
, ctxt
->src
.val
);
5336 ctxt
->dst
.type
= OP_NONE
; /* Disable writeback. */
5338 case 0xf4: /* hlt */
5339 ctxt
->ops
->halt(ctxt
);
5341 case 0xf5: /* cmc */
5342 /* complement carry flag from eflags reg */
5343 ctxt
->eflags
^= X86_EFLAGS_CF
;
5345 case 0xf8: /* clc */
5346 ctxt
->eflags
&= ~X86_EFLAGS_CF
;
5348 case 0xf9: /* stc */
5349 ctxt
->eflags
|= X86_EFLAGS_CF
;
5351 case 0xfc: /* cld */
5352 ctxt
->eflags
&= ~X86_EFLAGS_DF
;
5354 case 0xfd: /* std */
5355 ctxt
->eflags
|= X86_EFLAGS_DF
;
5358 goto cannot_emulate
;
5361 if (rc
!= X86EMUL_CONTINUE
)
5365 if (ctxt
->d
& SrcWrite
) {
5366 BUG_ON(ctxt
->src
.type
== OP_MEM
|| ctxt
->src
.type
== OP_MEM_STR
);
5367 rc
= writeback(ctxt
, &ctxt
->src
);
5368 if (rc
!= X86EMUL_CONTINUE
)
5371 if (!(ctxt
->d
& NoWrite
)) {
5372 rc
= writeback(ctxt
, &ctxt
->dst
);
5373 if (rc
!= X86EMUL_CONTINUE
)
5378 * restore dst type in case the decoding will be reused
5379 * (happens for string instruction )
5381 ctxt
->dst
.type
= saved_dst_type
;
5383 if ((ctxt
->d
& SrcMask
) == SrcSI
)
5384 string_addr_inc(ctxt
, VCPU_REGS_RSI
, &ctxt
->src
);
5386 if ((ctxt
->d
& DstMask
) == DstDI
)
5387 string_addr_inc(ctxt
, VCPU_REGS_RDI
, &ctxt
->dst
);
5389 if (ctxt
->rep_prefix
&& (ctxt
->d
& String
)) {
5391 struct read_cache
*r
= &ctxt
->io_read
;
5392 if ((ctxt
->d
& SrcMask
) == SrcSI
)
5393 count
= ctxt
->src
.count
;
5395 count
= ctxt
->dst
.count
;
5396 register_address_increment(ctxt
, VCPU_REGS_RCX
, -count
);
5398 if (!string_insn_completed(ctxt
)) {
5400 * Re-enter guest when pio read ahead buffer is empty
5401 * or, if it is not used, after each 1024 iteration.
5403 if ((r
->end
!= 0 || reg_read(ctxt
, VCPU_REGS_RCX
) & 0x3ff) &&
5404 (r
->end
== 0 || r
->end
!= r
->pos
)) {
5406 * Reset read cache. Usually happens before
5407 * decode, but since instruction is restarted
5408 * we have to do it here.
5410 ctxt
->mem_read
.end
= 0;
5411 writeback_registers(ctxt
);
5412 return EMULATION_RESTART
;
5414 goto done
; /* skip rip writeback */
5416 ctxt
->eflags
&= ~X86_EFLAGS_RF
;
5419 ctxt
->eip
= ctxt
->_eip
;
5422 if (rc
== X86EMUL_PROPAGATE_FAULT
) {
5423 WARN_ON(ctxt
->exception
.vector
> 0x1f);
5424 ctxt
->have_exception
= true;
5426 if (rc
== X86EMUL_INTERCEPTED
)
5427 return EMULATION_INTERCEPTED
;
5429 if (rc
== X86EMUL_CONTINUE
)
5430 writeback_registers(ctxt
);
5432 return (rc
== X86EMUL_UNHANDLEABLE
) ? EMULATION_FAILED
: EMULATION_OK
;
5436 case 0x09: /* wbinvd */
5437 (ctxt
->ops
->wbinvd
)(ctxt
);
5439 case 0x08: /* invd */
5440 case 0x0d: /* GrpP (prefetch) */
5441 case 0x18: /* Grp16 (prefetch/nop) */
5442 case 0x1f: /* nop */
5444 case 0x20: /* mov cr, reg */
5445 ctxt
->dst
.val
= ops
->get_cr(ctxt
, ctxt
->modrm_reg
);
5447 case 0x21: /* mov from dr to reg */
5448 ops
->get_dr(ctxt
, ctxt
->modrm_reg
, &ctxt
->dst
.val
);
5450 case 0x40 ... 0x4f: /* cmov */
5451 if (test_cc(ctxt
->b
, ctxt
->eflags
))
5452 ctxt
->dst
.val
= ctxt
->src
.val
;
5453 else if (ctxt
->op_bytes
!= 4)
5454 ctxt
->dst
.type
= OP_NONE
; /* no writeback */
5456 case 0x80 ... 0x8f: /* jnz rel, etc*/
5457 if (test_cc(ctxt
->b
, ctxt
->eflags
))
5458 rc
= jmp_rel(ctxt
, ctxt
->src
.val
);
5460 case 0x90 ... 0x9f: /* setcc r/m8 */
5461 ctxt
->dst
.val
= test_cc(ctxt
->b
, ctxt
->eflags
);
5463 case 0xb6 ... 0xb7: /* movzx */
5464 ctxt
->dst
.bytes
= ctxt
->op_bytes
;
5465 ctxt
->dst
.val
= (ctxt
->src
.bytes
== 1) ? (u8
) ctxt
->src
.val
5466 : (u16
) ctxt
->src
.val
;
5468 case 0xbe ... 0xbf: /* movsx */
5469 ctxt
->dst
.bytes
= ctxt
->op_bytes
;
5470 ctxt
->dst
.val
= (ctxt
->src
.bytes
== 1) ? (s8
) ctxt
->src
.val
:
5471 (s16
) ctxt
->src
.val
;
5474 goto cannot_emulate
;
5479 if (rc
!= X86EMUL_CONTINUE
)
5485 return EMULATION_FAILED
;
5488 void emulator_invalidate_register_cache(struct x86_emulate_ctxt
*ctxt
)
5490 invalidate_registers(ctxt
);
5493 void emulator_writeback_register_cache(struct x86_emulate_ctxt
*ctxt
)
5495 writeback_registers(ctxt
);