1 /******************************************************************************
4 * Generic x86 (32-bit and 64-bit) instruction decoder and emulator.
6 * Copyright (c) 2005 Keir Fraser
8 * Linux coding style, mod r/m decoder, segment base fixes, real-mode
9 * privileged instructions:
11 * Copyright (C) 2006 Qumranet
12 * Copyright 2010 Red Hat, Inc. and/or its affiliates.
14 * Avi Kivity <avi@qumranet.com>
15 * Yaniv Kamay <yaniv@qumranet.com>
17 * This work is licensed under the terms of the GNU GPL, version 2. See
18 * the COPYING file in the top-level directory.
20 * From: xen-unstable 10676:af9809f51f81a3c43f276f00c81a52ef558afda4
23 #include <linux/kvm_host.h>
24 #include "kvm_cache_regs.h"
25 #include <linux/module.h>
26 #include <asm/kvm_emulate.h>
27 #include <linux/stringify.h>
36 #define OpImplicit 1ull /* No generic decode */
37 #define OpReg 2ull /* Register */
38 #define OpMem 3ull /* Memory */
39 #define OpAcc 4ull /* Accumulator: AL/AX/EAX/RAX */
40 #define OpDI 5ull /* ES:DI/EDI/RDI */
41 #define OpMem64 6ull /* Memory, 64-bit */
42 #define OpImmUByte 7ull /* Zero-extended 8-bit immediate */
43 #define OpDX 8ull /* DX register */
44 #define OpCL 9ull /* CL register (for shifts) */
45 #define OpImmByte 10ull /* 8-bit sign extended immediate */
46 #define OpOne 11ull /* Implied 1 */
47 #define OpImm 12ull /* Sign extended up to 32-bit immediate */
48 #define OpMem16 13ull /* Memory operand (16-bit). */
49 #define OpMem32 14ull /* Memory operand (32-bit). */
50 #define OpImmU 15ull /* Immediate operand, zero extended */
51 #define OpSI 16ull /* SI/ESI/RSI */
52 #define OpImmFAddr 17ull /* Immediate far address */
53 #define OpMemFAddr 18ull /* Far address in memory */
54 #define OpImmU16 19ull /* Immediate operand, 16 bits, zero extended */
55 #define OpES 20ull /* ES */
56 #define OpCS 21ull /* CS */
57 #define OpSS 22ull /* SS */
58 #define OpDS 23ull /* DS */
59 #define OpFS 24ull /* FS */
60 #define OpGS 25ull /* GS */
61 #define OpMem8 26ull /* 8-bit zero extended memory operand */
62 #define OpImm64 27ull /* Sign extended 16/32/64-bit immediate */
63 #define OpXLat 28ull /* memory at BX/EBX/RBX + zero-extended AL */
64 #define OpAccLo 29ull /* Low part of extended acc (AX/AX/EAX/RAX) */
65 #define OpAccHi 30ull /* High part of extended acc (-/DX/EDX/RDX) */
67 #define OpBits 5 /* Width of operand field */
68 #define OpMask ((1ull << OpBits) - 1)
71 * Opcode effective-address decode tables.
72 * Note that we only emulate instructions that have at least one memory
73 * operand (excluding implicit stack references). We assume that stack
74 * references and instruction fetches will never occur in special memory
75 * areas that require emulation. So, for example, 'mov <imm>,<reg>' need
79 /* Operand sizes: 8-bit operands or specified/overridden size. */
80 #define ByteOp (1<<0) /* 8-bit operands. */
81 /* Destination operand type. */
83 #define ImplicitOps (OpImplicit << DstShift)
84 #define DstReg (OpReg << DstShift)
85 #define DstMem (OpMem << DstShift)
86 #define DstAcc (OpAcc << DstShift)
87 #define DstDI (OpDI << DstShift)
88 #define DstMem64 (OpMem64 << DstShift)
89 #define DstImmUByte (OpImmUByte << DstShift)
90 #define DstDX (OpDX << DstShift)
91 #define DstAccLo (OpAccLo << DstShift)
92 #define DstMask (OpMask << DstShift)
93 /* Source operand type. */
95 #define SrcNone (OpNone << SrcShift)
96 #define SrcReg (OpReg << SrcShift)
97 #define SrcMem (OpMem << SrcShift)
98 #define SrcMem16 (OpMem16 << SrcShift)
99 #define SrcMem32 (OpMem32 << SrcShift)
100 #define SrcImm (OpImm << SrcShift)
101 #define SrcImmByte (OpImmByte << SrcShift)
102 #define SrcOne (OpOne << SrcShift)
103 #define SrcImmUByte (OpImmUByte << SrcShift)
104 #define SrcImmU (OpImmU << SrcShift)
105 #define SrcSI (OpSI << SrcShift)
106 #define SrcXLat (OpXLat << SrcShift)
107 #define SrcImmFAddr (OpImmFAddr << SrcShift)
108 #define SrcMemFAddr (OpMemFAddr << SrcShift)
109 #define SrcAcc (OpAcc << SrcShift)
110 #define SrcImmU16 (OpImmU16 << SrcShift)
111 #define SrcImm64 (OpImm64 << SrcShift)
112 #define SrcDX (OpDX << SrcShift)
113 #define SrcMem8 (OpMem8 << SrcShift)
114 #define SrcAccHi (OpAccHi << SrcShift)
115 #define SrcMask (OpMask << SrcShift)
116 #define BitOp (1<<11)
117 #define MemAbs (1<<12) /* Memory operand is absolute displacement */
118 #define String (1<<13) /* String instruction (rep capable) */
119 #define Stack (1<<14) /* Stack instruction (push/pop) */
120 #define GroupMask (7<<15) /* Opcode uses one of the group mechanisms */
121 #define Group (1<<15) /* Bits 3:5 of modrm byte extend opcode */
122 #define GroupDual (2<<15) /* Alternate decoding of mod == 3 */
123 #define Prefix (3<<15) /* Instruction varies with 66/f2/f3 prefix */
124 #define RMExt (4<<15) /* Opcode extension in ModRM r/m if mod == 3 */
125 #define Escape (5<<15) /* Escape to coprocessor instruction */
126 #define Sse (1<<18) /* SSE Vector instruction */
127 /* Generic ModRM decode. */
128 #define ModRM (1<<19)
129 /* Destination is only written; never read. */
132 #define Prot (1<<21) /* instruction generates #UD if not in prot-mode */
133 #define EmulateOnUD (1<<22) /* Emulate if unsupported by the host */
134 #define NoAccess (1<<23) /* Don't access memory (lea/invlpg/verr etc) */
135 #define Op3264 (1<<24) /* Operand is 64b in long mode, 32b otherwise */
136 #define Undefined (1<<25) /* No Such Instruction */
137 #define Lock (1<<26) /* lock prefix is allowed for the instruction */
138 #define Priv (1<<27) /* instruction generates #GP if current CPL != 0 */
140 #define PageTable (1 << 29) /* instruction used to write page table */
141 #define NotImpl (1 << 30) /* instruction is not implemented */
142 /* Source 2 operand type */
143 #define Src2Shift (31)
144 #define Src2None (OpNone << Src2Shift)
145 #define Src2Mem (OpMem << Src2Shift)
146 #define Src2CL (OpCL << Src2Shift)
147 #define Src2ImmByte (OpImmByte << Src2Shift)
148 #define Src2One (OpOne << Src2Shift)
149 #define Src2Imm (OpImm << Src2Shift)
150 #define Src2ES (OpES << Src2Shift)
151 #define Src2CS (OpCS << Src2Shift)
152 #define Src2SS (OpSS << Src2Shift)
153 #define Src2DS (OpDS << Src2Shift)
154 #define Src2FS (OpFS << Src2Shift)
155 #define Src2GS (OpGS << Src2Shift)
156 #define Src2Mask (OpMask << Src2Shift)
157 #define Mmx ((u64)1 << 40) /* MMX Vector instruction */
158 #define Aligned ((u64)1 << 41) /* Explicitly aligned (e.g. MOVDQA) */
159 #define Unaligned ((u64)1 << 42) /* Explicitly unaligned (e.g. MOVDQU) */
160 #define Avx ((u64)1 << 43) /* Advanced Vector Extensions */
161 #define Fastop ((u64)1 << 44) /* Use opcode::u.fastop */
162 #define NoWrite ((u64)1 << 45) /* No writeback */
163 #define SrcWrite ((u64)1 << 46) /* Write back src operand */
164 #define NoMod ((u64)1 << 47) /* Mod field is ignored */
166 #define DstXacc (DstAccLo | SrcAccHi | SrcWrite)
168 #define X2(x...) x, x
169 #define X3(x...) X2(x), x
170 #define X4(x...) X2(x), X2(x)
171 #define X5(x...) X4(x), x
172 #define X6(x...) X4(x), X2(x)
173 #define X7(x...) X4(x), X3(x)
174 #define X8(x...) X4(x), X4(x)
175 #define X16(x...) X8(x), X8(x)
177 #define NR_FASTOP (ilog2(sizeof(ulong)) + 1)
178 #define FASTOP_SIZE 8
181 * fastop functions have a special calling convention:
186 * flags: rflags (in/out)
187 * ex: rsi (in:fastop pointer, out:zero if exception)
189 * Moreover, they are all exactly FASTOP_SIZE bytes long, so functions for
190 * different operand sizes can be reached by calculation, rather than a jump
191 * table (which would be bigger than the code).
193 * fastop functions are declared as taking a never-defined fastop parameter,
194 * so they can't be called from C directly.
203 int (*execute
)(struct x86_emulate_ctxt
*ctxt
);
204 const struct opcode
*group
;
205 const struct group_dual
*gdual
;
206 const struct gprefix
*gprefix
;
207 const struct escape
*esc
;
208 void (*fastop
)(struct fastop
*fake
);
210 int (*check_perm
)(struct x86_emulate_ctxt
*ctxt
);
214 struct opcode mod012
[8];
215 struct opcode mod3
[8];
219 struct opcode pfx_no
;
220 struct opcode pfx_66
;
221 struct opcode pfx_f2
;
222 struct opcode pfx_f3
;
227 struct opcode high
[64];
230 /* EFLAGS bit definitions. */
231 #define EFLG_ID (1<<21)
232 #define EFLG_VIP (1<<20)
233 #define EFLG_VIF (1<<19)
234 #define EFLG_AC (1<<18)
235 #define EFLG_VM (1<<17)
236 #define EFLG_RF (1<<16)
237 #define EFLG_IOPL (3<<12)
238 #define EFLG_NT (1<<14)
239 #define EFLG_OF (1<<11)
240 #define EFLG_DF (1<<10)
241 #define EFLG_IF (1<<9)
242 #define EFLG_TF (1<<8)
243 #define EFLG_SF (1<<7)
244 #define EFLG_ZF (1<<6)
245 #define EFLG_AF (1<<4)
246 #define EFLG_PF (1<<2)
247 #define EFLG_CF (1<<0)
249 #define EFLG_RESERVED_ZEROS_MASK 0xffc0802a
250 #define EFLG_RESERVED_ONE_MASK 2
252 static ulong
reg_read(struct x86_emulate_ctxt
*ctxt
, unsigned nr
)
254 if (!(ctxt
->regs_valid
& (1 << nr
))) {
255 ctxt
->regs_valid
|= 1 << nr
;
256 ctxt
->_regs
[nr
] = ctxt
->ops
->read_gpr(ctxt
, nr
);
258 return ctxt
->_regs
[nr
];
261 static ulong
*reg_write(struct x86_emulate_ctxt
*ctxt
, unsigned nr
)
263 ctxt
->regs_valid
|= 1 << nr
;
264 ctxt
->regs_dirty
|= 1 << nr
;
265 return &ctxt
->_regs
[nr
];
268 static ulong
*reg_rmw(struct x86_emulate_ctxt
*ctxt
, unsigned nr
)
271 return reg_write(ctxt
, nr
);
274 static void writeback_registers(struct x86_emulate_ctxt
*ctxt
)
278 for_each_set_bit(reg
, (ulong
*)&ctxt
->regs_dirty
, 16)
279 ctxt
->ops
->write_gpr(ctxt
, reg
, ctxt
->_regs
[reg
]);
282 static void invalidate_registers(struct x86_emulate_ctxt
*ctxt
)
284 ctxt
->regs_dirty
= 0;
285 ctxt
->regs_valid
= 0;
289 * These EFLAGS bits are restored from saved value during emulation, and
290 * any changes are written back to the saved value after emulation.
292 #define EFLAGS_MASK (EFLG_OF|EFLG_SF|EFLG_ZF|EFLG_AF|EFLG_PF|EFLG_CF)
300 static int fastop(struct x86_emulate_ctxt
*ctxt
, void (*fop
)(struct fastop
*));
302 #define FOP_ALIGN ".align " __stringify(FASTOP_SIZE) " \n\t"
303 #define FOP_RET "ret \n\t"
305 #define FOP_START(op) \
306 extern void em_##op(struct fastop *fake); \
307 asm(".pushsection .text, \"ax\" \n\t" \
308 ".global em_" #op " \n\t" \
315 #define FOPNOP() FOP_ALIGN FOP_RET
317 #define FOP1E(op, dst) \
318 FOP_ALIGN "10: " #op " %" #dst " \n\t" FOP_RET
320 #define FOP1EEX(op, dst) \
321 FOP1E(op, dst) _ASM_EXTABLE(10b, kvm_fastop_exception)
323 #define FASTOP1(op) \
328 ON64(FOP1E(op##q, rax)) \
331 /* 1-operand, using src2 (for MUL/DIV r/m) */
332 #define FASTOP1SRC2(op, name) \
337 ON64(FOP1E(op, rcx)) \
340 /* 1-operand, using src2 (for MUL/DIV r/m), with exceptions */
341 #define FASTOP1SRC2EX(op, name) \
346 ON64(FOP1EEX(op, rcx)) \
349 #define FOP2E(op, dst, src) \
350 FOP_ALIGN #op " %" #src ", %" #dst " \n\t" FOP_RET
352 #define FASTOP2(op) \
354 FOP2E(op##b, al, dl) \
355 FOP2E(op##w, ax, dx) \
356 FOP2E(op##l, eax, edx) \
357 ON64(FOP2E(op##q, rax, rdx)) \
360 /* 2 operand, word only */
361 #define FASTOP2W(op) \
364 FOP2E(op##w, ax, dx) \
365 FOP2E(op##l, eax, edx) \
366 ON64(FOP2E(op##q, rax, rdx)) \
369 /* 2 operand, src is CL */
370 #define FASTOP2CL(op) \
372 FOP2E(op##b, al, cl) \
373 FOP2E(op##w, ax, cl) \
374 FOP2E(op##l, eax, cl) \
375 ON64(FOP2E(op##q, rax, cl)) \
378 #define FOP3E(op, dst, src, src2) \
379 FOP_ALIGN #op " %" #src2 ", %" #src ", %" #dst " \n\t" FOP_RET
381 /* 3-operand, word-only, src2=cl */
382 #define FASTOP3WCL(op) \
385 FOP3E(op##w, ax, dx, cl) \
386 FOP3E(op##l, eax, edx, cl) \
387 ON64(FOP3E(op##q, rax, rdx, cl)) \
390 /* Special case for SETcc - 1 instruction per cc */
391 #define FOP_SETCC(op) ".align 4; " #op " %al; ret \n\t"
393 asm(".global kvm_fastop_exception \n"
394 "kvm_fastop_exception: xor %esi, %esi; ret");
415 FOP_START(salc
) "pushf; sbb %al, %al; popf \n\t" FOP_RET
418 static int emulator_check_intercept(struct x86_emulate_ctxt
*ctxt
,
419 enum x86_intercept intercept
,
420 enum x86_intercept_stage stage
)
422 struct x86_instruction_info info
= {
423 .intercept
= intercept
,
424 .rep_prefix
= ctxt
->rep_prefix
,
425 .modrm_mod
= ctxt
->modrm_mod
,
426 .modrm_reg
= ctxt
->modrm_reg
,
427 .modrm_rm
= ctxt
->modrm_rm
,
428 .src_val
= ctxt
->src
.val64
,
429 .src_bytes
= ctxt
->src
.bytes
,
430 .dst_bytes
= ctxt
->dst
.bytes
,
431 .ad_bytes
= ctxt
->ad_bytes
,
432 .next_rip
= ctxt
->eip
,
435 return ctxt
->ops
->intercept(ctxt
, &info
, stage
);
438 static void assign_masked(ulong
*dest
, ulong src
, ulong mask
)
440 *dest
= (*dest
& ~mask
) | (src
& mask
);
443 static inline unsigned long ad_mask(struct x86_emulate_ctxt
*ctxt
)
445 return (1UL << (ctxt
->ad_bytes
<< 3)) - 1;
448 static ulong
stack_mask(struct x86_emulate_ctxt
*ctxt
)
451 struct desc_struct ss
;
453 if (ctxt
->mode
== X86EMUL_MODE_PROT64
)
455 ctxt
->ops
->get_segment(ctxt
, &sel
, &ss
, NULL
, VCPU_SREG_SS
);
456 return ~0U >> ((ss
.d
^ 1) * 16); /* d=0: 0xffff; d=1: 0xffffffff */
459 static int stack_size(struct x86_emulate_ctxt
*ctxt
)
461 return (__fls(stack_mask(ctxt
)) + 1) >> 3;
464 /* Access/update address held in a register, based on addressing mode. */
465 static inline unsigned long
466 address_mask(struct x86_emulate_ctxt
*ctxt
, unsigned long reg
)
468 if (ctxt
->ad_bytes
== sizeof(unsigned long))
471 return reg
& ad_mask(ctxt
);
474 static inline unsigned long
475 register_address(struct x86_emulate_ctxt
*ctxt
, unsigned long reg
)
477 return address_mask(ctxt
, reg
);
480 static void masked_increment(ulong
*reg
, ulong mask
, int inc
)
482 assign_masked(reg
, *reg
+ inc
, mask
);
486 register_address_increment(struct x86_emulate_ctxt
*ctxt
, unsigned long *reg
, int inc
)
490 if (ctxt
->ad_bytes
== sizeof(unsigned long))
493 mask
= ad_mask(ctxt
);
494 masked_increment(reg
, mask
, inc
);
497 static void rsp_increment(struct x86_emulate_ctxt
*ctxt
, int inc
)
499 masked_increment(reg_rmw(ctxt
, VCPU_REGS_RSP
), stack_mask(ctxt
), inc
);
502 static inline void jmp_rel(struct x86_emulate_ctxt
*ctxt
, int rel
)
504 register_address_increment(ctxt
, &ctxt
->_eip
, rel
);
507 static u32
desc_limit_scaled(struct desc_struct
*desc
)
509 u32 limit
= get_desc_limit(desc
);
511 return desc
->g
? (limit
<< 12) | 0xfff : limit
;
514 static void set_seg_override(struct x86_emulate_ctxt
*ctxt
, int seg
)
516 ctxt
->has_seg_override
= true;
517 ctxt
->seg_override
= seg
;
520 static unsigned long seg_base(struct x86_emulate_ctxt
*ctxt
, int seg
)
522 if (ctxt
->mode
== X86EMUL_MODE_PROT64
&& seg
< VCPU_SREG_FS
)
525 return ctxt
->ops
->get_cached_segment_base(ctxt
, seg
);
528 static unsigned seg_override(struct x86_emulate_ctxt
*ctxt
)
530 if (!ctxt
->has_seg_override
)
533 return ctxt
->seg_override
;
536 static int emulate_exception(struct x86_emulate_ctxt
*ctxt
, int vec
,
537 u32 error
, bool valid
)
539 ctxt
->exception
.vector
= vec
;
540 ctxt
->exception
.error_code
= error
;
541 ctxt
->exception
.error_code_valid
= valid
;
542 return X86EMUL_PROPAGATE_FAULT
;
545 static int emulate_db(struct x86_emulate_ctxt
*ctxt
)
547 return emulate_exception(ctxt
, DB_VECTOR
, 0, false);
550 static int emulate_gp(struct x86_emulate_ctxt
*ctxt
, int err
)
552 return emulate_exception(ctxt
, GP_VECTOR
, err
, true);
555 static int emulate_ss(struct x86_emulate_ctxt
*ctxt
, int err
)
557 return emulate_exception(ctxt
, SS_VECTOR
, err
, true);
560 static int emulate_ud(struct x86_emulate_ctxt
*ctxt
)
562 return emulate_exception(ctxt
, UD_VECTOR
, 0, false);
565 static int emulate_ts(struct x86_emulate_ctxt
*ctxt
, int err
)
567 return emulate_exception(ctxt
, TS_VECTOR
, err
, true);
570 static int emulate_de(struct x86_emulate_ctxt
*ctxt
)
572 return emulate_exception(ctxt
, DE_VECTOR
, 0, false);
575 static int emulate_nm(struct x86_emulate_ctxt
*ctxt
)
577 return emulate_exception(ctxt
, NM_VECTOR
, 0, false);
580 static u16
get_segment_selector(struct x86_emulate_ctxt
*ctxt
, unsigned seg
)
583 struct desc_struct desc
;
585 ctxt
->ops
->get_segment(ctxt
, &selector
, &desc
, NULL
, seg
);
589 static void set_segment_selector(struct x86_emulate_ctxt
*ctxt
, u16 selector
,
594 struct desc_struct desc
;
596 ctxt
->ops
->get_segment(ctxt
, &dummy
, &desc
, &base3
, seg
);
597 ctxt
->ops
->set_segment(ctxt
, selector
, &desc
, base3
, seg
);
601 * x86 defines three classes of vector instructions: explicitly
602 * aligned, explicitly unaligned, and the rest, which change behaviour
603 * depending on whether they're AVX encoded or not.
605 * Also included is CMPXCHG16B which is not a vector instruction, yet it is
606 * subject to the same check.
608 static bool insn_aligned(struct x86_emulate_ctxt
*ctxt
, unsigned size
)
610 if (likely(size
< 16))
613 if (ctxt
->d
& Aligned
)
615 else if (ctxt
->d
& Unaligned
)
617 else if (ctxt
->d
& Avx
)
623 static int __linearize(struct x86_emulate_ctxt
*ctxt
,
624 struct segmented_address addr
,
625 unsigned size
, bool write
, bool fetch
,
628 struct desc_struct desc
;
635 la
= seg_base(ctxt
, addr
.seg
) + addr
.ea
;
636 switch (ctxt
->mode
) {
637 case X86EMUL_MODE_PROT64
:
638 if (((signed long)la
<< 16) >> 16 != la
)
639 return emulate_gp(ctxt
, 0);
642 usable
= ctxt
->ops
->get_segment(ctxt
, &sel
, &desc
, NULL
,
646 /* code segment in protected mode or read-only data segment */
647 if ((((ctxt
->mode
!= X86EMUL_MODE_REAL
) && (desc
.type
& 8))
648 || !(desc
.type
& 2)) && write
)
650 /* unreadable code segment */
651 if (!fetch
&& (desc
.type
& 8) && !(desc
.type
& 2))
653 lim
= desc_limit_scaled(&desc
);
654 if ((desc
.type
& 8) || !(desc
.type
& 4)) {
655 /* expand-up segment */
656 if (addr
.ea
> lim
|| (u32
)(addr
.ea
+ size
- 1) > lim
)
659 /* expand-down segment */
660 if (addr
.ea
<= lim
|| (u32
)(addr
.ea
+ size
- 1) <= lim
)
662 lim
= desc
.d
? 0xffffffff : 0xffff;
663 if (addr
.ea
> lim
|| (u32
)(addr
.ea
+ size
- 1) > lim
)
666 cpl
= ctxt
->ops
->cpl(ctxt
);
667 if (!(desc
.type
& 8)) {
671 } else if ((desc
.type
& 8) && !(desc
.type
& 4)) {
672 /* nonconforming code segment */
675 } else if ((desc
.type
& 8) && (desc
.type
& 4)) {
676 /* conforming code segment */
682 if (fetch
? ctxt
->mode
!= X86EMUL_MODE_PROT64
: ctxt
->ad_bytes
!= 8)
684 if (insn_aligned(ctxt
, size
) && ((la
& (size
- 1)) != 0))
685 return emulate_gp(ctxt
, 0);
687 return X86EMUL_CONTINUE
;
689 if (addr
.seg
== VCPU_SREG_SS
)
690 return emulate_ss(ctxt
, sel
);
692 return emulate_gp(ctxt
, sel
);
695 static int linearize(struct x86_emulate_ctxt
*ctxt
,
696 struct segmented_address addr
,
697 unsigned size
, bool write
,
700 return __linearize(ctxt
, addr
, size
, write
, false, linear
);
704 static int segmented_read_std(struct x86_emulate_ctxt
*ctxt
,
705 struct segmented_address addr
,
712 rc
= linearize(ctxt
, addr
, size
, false, &linear
);
713 if (rc
!= X86EMUL_CONTINUE
)
715 return ctxt
->ops
->read_std(ctxt
, linear
, data
, size
, &ctxt
->exception
);
719 * Fetch the next byte of the instruction being emulated which is pointed to
720 * by ctxt->_eip, then increment ctxt->_eip.
722 * Also prefetch the remaining bytes of the instruction without crossing page
723 * boundary if they are not in fetch_cache yet.
725 static int do_insn_fetch_byte(struct x86_emulate_ctxt
*ctxt
, u8
*dest
)
727 struct fetch_cache
*fc
= &ctxt
->fetch
;
731 if (ctxt
->_eip
== fc
->end
) {
732 unsigned long linear
;
733 struct segmented_address addr
= { .seg
= VCPU_SREG_CS
,
735 cur_size
= fc
->end
- fc
->start
;
736 size
= min(15UL - cur_size
,
737 PAGE_SIZE
- offset_in_page(ctxt
->_eip
));
738 rc
= __linearize(ctxt
, addr
, size
, false, true, &linear
);
739 if (unlikely(rc
!= X86EMUL_CONTINUE
))
741 rc
= ctxt
->ops
->fetch(ctxt
, linear
, fc
->data
+ cur_size
,
742 size
, &ctxt
->exception
);
743 if (unlikely(rc
!= X86EMUL_CONTINUE
))
747 *dest
= fc
->data
[ctxt
->_eip
- fc
->start
];
749 return X86EMUL_CONTINUE
;
752 static int do_insn_fetch(struct x86_emulate_ctxt
*ctxt
,
753 void *dest
, unsigned size
)
757 /* x86 instructions are limited to 15 bytes. */
758 if (unlikely(ctxt
->_eip
+ size
- ctxt
->eip
> 15))
759 return X86EMUL_UNHANDLEABLE
;
761 rc
= do_insn_fetch_byte(ctxt
, dest
++);
762 if (rc
!= X86EMUL_CONTINUE
)
765 return X86EMUL_CONTINUE
;
768 /* Fetch next part of the instruction being emulated. */
769 #define insn_fetch(_type, _ctxt) \
770 ({ unsigned long _x; \
771 rc = do_insn_fetch(_ctxt, &_x, sizeof(_type)); \
772 if (rc != X86EMUL_CONTINUE) \
777 #define insn_fetch_arr(_arr, _size, _ctxt) \
778 ({ rc = do_insn_fetch(_ctxt, _arr, (_size)); \
779 if (rc != X86EMUL_CONTINUE) \
784 * Given the 'reg' portion of a ModRM byte, and a register block, return a
785 * pointer into the block that addresses the relevant register.
786 * @highbyte_regs specifies whether to decode AH,CH,DH,BH.
788 static void *decode_register(struct x86_emulate_ctxt
*ctxt
, u8 modrm_reg
,
792 int highbyte_regs
= (ctxt
->rex_prefix
== 0) && byteop
;
794 if (highbyte_regs
&& modrm_reg
>= 4 && modrm_reg
< 8)
795 p
= (unsigned char *)reg_rmw(ctxt
, modrm_reg
& 3) + 1;
797 p
= reg_rmw(ctxt
, modrm_reg
);
801 static int read_descriptor(struct x86_emulate_ctxt
*ctxt
,
802 struct segmented_address addr
,
803 u16
*size
, unsigned long *address
, int op_bytes
)
810 rc
= segmented_read_std(ctxt
, addr
, size
, 2);
811 if (rc
!= X86EMUL_CONTINUE
)
814 rc
= segmented_read_std(ctxt
, addr
, address
, op_bytes
);
828 FASTOP1SRC2(mul
, mul_ex
);
829 FASTOP1SRC2(imul
, imul_ex
);
830 FASTOP1SRC2EX(div
, div_ex
);
831 FASTOP1SRC2EX(idiv
, idiv_ex
);
860 static u8
test_cc(unsigned int condition
, unsigned long flags
)
863 void (*fop
)(void) = (void *)em_setcc
+ 4 * (condition
& 0xf);
865 flags
= (flags
& EFLAGS_MASK
) | X86_EFLAGS_IF
;
866 asm("push %[flags]; popf; call *%[fastop]"
867 : "=a"(rc
) : [fastop
]"r"(fop
), [flags
]"r"(flags
));
871 static void fetch_register_operand(struct operand
*op
)
875 op
->val
= *(u8
*)op
->addr
.reg
;
878 op
->val
= *(u16
*)op
->addr
.reg
;
881 op
->val
= *(u32
*)op
->addr
.reg
;
884 op
->val
= *(u64
*)op
->addr
.reg
;
889 static void read_sse_reg(struct x86_emulate_ctxt
*ctxt
, sse128_t
*data
, int reg
)
891 ctxt
->ops
->get_fpu(ctxt
);
893 case 0: asm("movdqa %%xmm0, %0" : "=m"(*data
)); break;
894 case 1: asm("movdqa %%xmm1, %0" : "=m"(*data
)); break;
895 case 2: asm("movdqa %%xmm2, %0" : "=m"(*data
)); break;
896 case 3: asm("movdqa %%xmm3, %0" : "=m"(*data
)); break;
897 case 4: asm("movdqa %%xmm4, %0" : "=m"(*data
)); break;
898 case 5: asm("movdqa %%xmm5, %0" : "=m"(*data
)); break;
899 case 6: asm("movdqa %%xmm6, %0" : "=m"(*data
)); break;
900 case 7: asm("movdqa %%xmm7, %0" : "=m"(*data
)); break;
902 case 8: asm("movdqa %%xmm8, %0" : "=m"(*data
)); break;
903 case 9: asm("movdqa %%xmm9, %0" : "=m"(*data
)); break;
904 case 10: asm("movdqa %%xmm10, %0" : "=m"(*data
)); break;
905 case 11: asm("movdqa %%xmm11, %0" : "=m"(*data
)); break;
906 case 12: asm("movdqa %%xmm12, %0" : "=m"(*data
)); break;
907 case 13: asm("movdqa %%xmm13, %0" : "=m"(*data
)); break;
908 case 14: asm("movdqa %%xmm14, %0" : "=m"(*data
)); break;
909 case 15: asm("movdqa %%xmm15, %0" : "=m"(*data
)); break;
913 ctxt
->ops
->put_fpu(ctxt
);
916 static void write_sse_reg(struct x86_emulate_ctxt
*ctxt
, sse128_t
*data
,
919 ctxt
->ops
->get_fpu(ctxt
);
921 case 0: asm("movdqa %0, %%xmm0" : : "m"(*data
)); break;
922 case 1: asm("movdqa %0, %%xmm1" : : "m"(*data
)); break;
923 case 2: asm("movdqa %0, %%xmm2" : : "m"(*data
)); break;
924 case 3: asm("movdqa %0, %%xmm3" : : "m"(*data
)); break;
925 case 4: asm("movdqa %0, %%xmm4" : : "m"(*data
)); break;
926 case 5: asm("movdqa %0, %%xmm5" : : "m"(*data
)); break;
927 case 6: asm("movdqa %0, %%xmm6" : : "m"(*data
)); break;
928 case 7: asm("movdqa %0, %%xmm7" : : "m"(*data
)); break;
930 case 8: asm("movdqa %0, %%xmm8" : : "m"(*data
)); break;
931 case 9: asm("movdqa %0, %%xmm9" : : "m"(*data
)); break;
932 case 10: asm("movdqa %0, %%xmm10" : : "m"(*data
)); break;
933 case 11: asm("movdqa %0, %%xmm11" : : "m"(*data
)); break;
934 case 12: asm("movdqa %0, %%xmm12" : : "m"(*data
)); break;
935 case 13: asm("movdqa %0, %%xmm13" : : "m"(*data
)); break;
936 case 14: asm("movdqa %0, %%xmm14" : : "m"(*data
)); break;
937 case 15: asm("movdqa %0, %%xmm15" : : "m"(*data
)); break;
941 ctxt
->ops
->put_fpu(ctxt
);
944 static void read_mmx_reg(struct x86_emulate_ctxt
*ctxt
, u64
*data
, int reg
)
946 ctxt
->ops
->get_fpu(ctxt
);
948 case 0: asm("movq %%mm0, %0" : "=m"(*data
)); break;
949 case 1: asm("movq %%mm1, %0" : "=m"(*data
)); break;
950 case 2: asm("movq %%mm2, %0" : "=m"(*data
)); break;
951 case 3: asm("movq %%mm3, %0" : "=m"(*data
)); break;
952 case 4: asm("movq %%mm4, %0" : "=m"(*data
)); break;
953 case 5: asm("movq %%mm5, %0" : "=m"(*data
)); break;
954 case 6: asm("movq %%mm6, %0" : "=m"(*data
)); break;
955 case 7: asm("movq %%mm7, %0" : "=m"(*data
)); break;
958 ctxt
->ops
->put_fpu(ctxt
);
961 static void write_mmx_reg(struct x86_emulate_ctxt
*ctxt
, u64
*data
, int reg
)
963 ctxt
->ops
->get_fpu(ctxt
);
965 case 0: asm("movq %0, %%mm0" : : "m"(*data
)); break;
966 case 1: asm("movq %0, %%mm1" : : "m"(*data
)); break;
967 case 2: asm("movq %0, %%mm2" : : "m"(*data
)); break;
968 case 3: asm("movq %0, %%mm3" : : "m"(*data
)); break;
969 case 4: asm("movq %0, %%mm4" : : "m"(*data
)); break;
970 case 5: asm("movq %0, %%mm5" : : "m"(*data
)); break;
971 case 6: asm("movq %0, %%mm6" : : "m"(*data
)); break;
972 case 7: asm("movq %0, %%mm7" : : "m"(*data
)); break;
975 ctxt
->ops
->put_fpu(ctxt
);
978 static int em_fninit(struct x86_emulate_ctxt
*ctxt
)
980 if (ctxt
->ops
->get_cr(ctxt
, 0) & (X86_CR0_TS
| X86_CR0_EM
))
981 return emulate_nm(ctxt
);
983 ctxt
->ops
->get_fpu(ctxt
);
984 asm volatile("fninit");
985 ctxt
->ops
->put_fpu(ctxt
);
986 return X86EMUL_CONTINUE
;
989 static int em_fnstcw(struct x86_emulate_ctxt
*ctxt
)
993 if (ctxt
->ops
->get_cr(ctxt
, 0) & (X86_CR0_TS
| X86_CR0_EM
))
994 return emulate_nm(ctxt
);
996 ctxt
->ops
->get_fpu(ctxt
);
997 asm volatile("fnstcw %0": "+m"(fcw
));
998 ctxt
->ops
->put_fpu(ctxt
);
1000 /* force 2 byte destination */
1001 ctxt
->dst
.bytes
= 2;
1002 ctxt
->dst
.val
= fcw
;
1004 return X86EMUL_CONTINUE
;
1007 static int em_fnstsw(struct x86_emulate_ctxt
*ctxt
)
1011 if (ctxt
->ops
->get_cr(ctxt
, 0) & (X86_CR0_TS
| X86_CR0_EM
))
1012 return emulate_nm(ctxt
);
1014 ctxt
->ops
->get_fpu(ctxt
);
1015 asm volatile("fnstsw %0": "+m"(fsw
));
1016 ctxt
->ops
->put_fpu(ctxt
);
1018 /* force 2 byte destination */
1019 ctxt
->dst
.bytes
= 2;
1020 ctxt
->dst
.val
= fsw
;
1022 return X86EMUL_CONTINUE
;
1025 static void decode_register_operand(struct x86_emulate_ctxt
*ctxt
,
1028 unsigned reg
= ctxt
->modrm_reg
;
1030 if (!(ctxt
->d
& ModRM
))
1031 reg
= (ctxt
->b
& 7) | ((ctxt
->rex_prefix
& 1) << 3);
1033 if (ctxt
->d
& Sse
) {
1037 read_sse_reg(ctxt
, &op
->vec_val
, reg
);
1040 if (ctxt
->d
& Mmx
) {
1049 op
->bytes
= (ctxt
->d
& ByteOp
) ? 1 : ctxt
->op_bytes
;
1050 op
->addr
.reg
= decode_register(ctxt
, reg
, ctxt
->d
& ByteOp
);
1052 fetch_register_operand(op
);
1053 op
->orig_val
= op
->val
;
1056 static void adjust_modrm_seg(struct x86_emulate_ctxt
*ctxt
, int base_reg
)
1058 if (base_reg
== VCPU_REGS_RSP
|| base_reg
== VCPU_REGS_RBP
)
1059 ctxt
->modrm_seg
= VCPU_SREG_SS
;
1062 static int decode_modrm(struct x86_emulate_ctxt
*ctxt
,
1066 int index_reg
= 0, base_reg
= 0, scale
;
1067 int rc
= X86EMUL_CONTINUE
;
1070 if (ctxt
->rex_prefix
) {
1071 ctxt
->modrm_reg
= (ctxt
->rex_prefix
& 4) << 1; /* REX.R */
1072 index_reg
= (ctxt
->rex_prefix
& 2) << 2; /* REX.X */
1073 ctxt
->modrm_rm
= base_reg
= (ctxt
->rex_prefix
& 1) << 3; /* REG.B */
1076 ctxt
->modrm_mod
|= (ctxt
->modrm
& 0xc0) >> 6;
1077 ctxt
->modrm_reg
|= (ctxt
->modrm
& 0x38) >> 3;
1078 ctxt
->modrm_rm
|= (ctxt
->modrm
& 0x07);
1079 ctxt
->modrm_seg
= VCPU_SREG_DS
;
1081 if (ctxt
->modrm_mod
== 3 || (ctxt
->d
& NoMod
)) {
1083 op
->bytes
= (ctxt
->d
& ByteOp
) ? 1 : ctxt
->op_bytes
;
1084 op
->addr
.reg
= decode_register(ctxt
, ctxt
->modrm_rm
,
1086 if (ctxt
->d
& Sse
) {
1089 op
->addr
.xmm
= ctxt
->modrm_rm
;
1090 read_sse_reg(ctxt
, &op
->vec_val
, ctxt
->modrm_rm
);
1093 if (ctxt
->d
& Mmx
) {
1096 op
->addr
.mm
= ctxt
->modrm_rm
& 7;
1099 fetch_register_operand(op
);
1105 if (ctxt
->ad_bytes
== 2) {
1106 unsigned bx
= reg_read(ctxt
, VCPU_REGS_RBX
);
1107 unsigned bp
= reg_read(ctxt
, VCPU_REGS_RBP
);
1108 unsigned si
= reg_read(ctxt
, VCPU_REGS_RSI
);
1109 unsigned di
= reg_read(ctxt
, VCPU_REGS_RDI
);
1111 /* 16-bit ModR/M decode. */
1112 switch (ctxt
->modrm_mod
) {
1114 if (ctxt
->modrm_rm
== 6)
1115 modrm_ea
+= insn_fetch(u16
, ctxt
);
1118 modrm_ea
+= insn_fetch(s8
, ctxt
);
1121 modrm_ea
+= insn_fetch(u16
, ctxt
);
1124 switch (ctxt
->modrm_rm
) {
1126 modrm_ea
+= bx
+ si
;
1129 modrm_ea
+= bx
+ di
;
1132 modrm_ea
+= bp
+ si
;
1135 modrm_ea
+= bp
+ di
;
1144 if (ctxt
->modrm_mod
!= 0)
1151 if (ctxt
->modrm_rm
== 2 || ctxt
->modrm_rm
== 3 ||
1152 (ctxt
->modrm_rm
== 6 && ctxt
->modrm_mod
!= 0))
1153 ctxt
->modrm_seg
= VCPU_SREG_SS
;
1154 modrm_ea
= (u16
)modrm_ea
;
1156 /* 32/64-bit ModR/M decode. */
1157 if ((ctxt
->modrm_rm
& 7) == 4) {
1158 sib
= insn_fetch(u8
, ctxt
);
1159 index_reg
|= (sib
>> 3) & 7;
1160 base_reg
|= sib
& 7;
1163 if ((base_reg
& 7) == 5 && ctxt
->modrm_mod
== 0)
1164 modrm_ea
+= insn_fetch(s32
, ctxt
);
1166 modrm_ea
+= reg_read(ctxt
, base_reg
);
1167 adjust_modrm_seg(ctxt
, base_reg
);
1170 modrm_ea
+= reg_read(ctxt
, index_reg
) << scale
;
1171 } else if ((ctxt
->modrm_rm
& 7) == 5 && ctxt
->modrm_mod
== 0) {
1172 if (ctxt
->mode
== X86EMUL_MODE_PROT64
)
1173 ctxt
->rip_relative
= 1;
1175 base_reg
= ctxt
->modrm_rm
;
1176 modrm_ea
+= reg_read(ctxt
, base_reg
);
1177 adjust_modrm_seg(ctxt
, base_reg
);
1179 switch (ctxt
->modrm_mod
) {
1181 if (ctxt
->modrm_rm
== 5)
1182 modrm_ea
+= insn_fetch(s32
, ctxt
);
1185 modrm_ea
+= insn_fetch(s8
, ctxt
);
1188 modrm_ea
+= insn_fetch(s32
, ctxt
);
1192 op
->addr
.mem
.ea
= modrm_ea
;
1197 static int decode_abs(struct x86_emulate_ctxt
*ctxt
,
1200 int rc
= X86EMUL_CONTINUE
;
1203 switch (ctxt
->ad_bytes
) {
1205 op
->addr
.mem
.ea
= insn_fetch(u16
, ctxt
);
1208 op
->addr
.mem
.ea
= insn_fetch(u32
, ctxt
);
1211 op
->addr
.mem
.ea
= insn_fetch(u64
, ctxt
);
1218 static void fetch_bit_operand(struct x86_emulate_ctxt
*ctxt
)
1222 if (ctxt
->dst
.type
== OP_MEM
&& ctxt
->src
.type
== OP_REG
) {
1223 mask
= ~((long)ctxt
->dst
.bytes
* 8 - 1);
1225 if (ctxt
->src
.bytes
== 2)
1226 sv
= (s16
)ctxt
->src
.val
& (s16
)mask
;
1227 else if (ctxt
->src
.bytes
== 4)
1228 sv
= (s32
)ctxt
->src
.val
& (s32
)mask
;
1230 sv
= (s64
)ctxt
->src
.val
& (s64
)mask
;
1232 ctxt
->dst
.addr
.mem
.ea
+= (sv
>> 3);
1235 /* only subword offset */
1236 ctxt
->src
.val
&= (ctxt
->dst
.bytes
<< 3) - 1;
1239 static int read_emulated(struct x86_emulate_ctxt
*ctxt
,
1240 unsigned long addr
, void *dest
, unsigned size
)
1243 struct read_cache
*mc
= &ctxt
->mem_read
;
1245 if (mc
->pos
< mc
->end
)
1248 WARN_ON((mc
->end
+ size
) >= sizeof(mc
->data
));
1250 rc
= ctxt
->ops
->read_emulated(ctxt
, addr
, mc
->data
+ mc
->end
, size
,
1252 if (rc
!= X86EMUL_CONTINUE
)
1258 memcpy(dest
, mc
->data
+ mc
->pos
, size
);
1260 return X86EMUL_CONTINUE
;
1263 static int segmented_read(struct x86_emulate_ctxt
*ctxt
,
1264 struct segmented_address addr
,
1271 rc
= linearize(ctxt
, addr
, size
, false, &linear
);
1272 if (rc
!= X86EMUL_CONTINUE
)
1274 return read_emulated(ctxt
, linear
, data
, size
);
1277 static int segmented_write(struct x86_emulate_ctxt
*ctxt
,
1278 struct segmented_address addr
,
1285 rc
= linearize(ctxt
, addr
, size
, true, &linear
);
1286 if (rc
!= X86EMUL_CONTINUE
)
1288 return ctxt
->ops
->write_emulated(ctxt
, linear
, data
, size
,
1292 static int segmented_cmpxchg(struct x86_emulate_ctxt
*ctxt
,
1293 struct segmented_address addr
,
1294 const void *orig_data
, const void *data
,
1300 rc
= linearize(ctxt
, addr
, size
, true, &linear
);
1301 if (rc
!= X86EMUL_CONTINUE
)
1303 return ctxt
->ops
->cmpxchg_emulated(ctxt
, linear
, orig_data
, data
,
1304 size
, &ctxt
->exception
);
1307 static int pio_in_emulated(struct x86_emulate_ctxt
*ctxt
,
1308 unsigned int size
, unsigned short port
,
1311 struct read_cache
*rc
= &ctxt
->io_read
;
1313 if (rc
->pos
== rc
->end
) { /* refill pio read ahead */
1314 unsigned int in_page
, n
;
1315 unsigned int count
= ctxt
->rep_prefix
?
1316 address_mask(ctxt
, reg_read(ctxt
, VCPU_REGS_RCX
)) : 1;
1317 in_page
= (ctxt
->eflags
& EFLG_DF
) ?
1318 offset_in_page(reg_read(ctxt
, VCPU_REGS_RDI
)) :
1319 PAGE_SIZE
- offset_in_page(reg_read(ctxt
, VCPU_REGS_RDI
));
1320 n
= min(min(in_page
, (unsigned int)sizeof(rc
->data
)) / size
,
1324 rc
->pos
= rc
->end
= 0;
1325 if (!ctxt
->ops
->pio_in_emulated(ctxt
, size
, port
, rc
->data
, n
))
1330 if (ctxt
->rep_prefix
&& (ctxt
->d
& String
) &&
1331 !(ctxt
->eflags
& EFLG_DF
)) {
1332 ctxt
->dst
.data
= rc
->data
+ rc
->pos
;
1333 ctxt
->dst
.type
= OP_MEM_STR
;
1334 ctxt
->dst
.count
= (rc
->end
- rc
->pos
) / size
;
1337 memcpy(dest
, rc
->data
+ rc
->pos
, size
);
1343 static int read_interrupt_descriptor(struct x86_emulate_ctxt
*ctxt
,
1344 u16 index
, struct desc_struct
*desc
)
1349 ctxt
->ops
->get_idt(ctxt
, &dt
);
1351 if (dt
.size
< index
* 8 + 7)
1352 return emulate_gp(ctxt
, index
<< 3 | 0x2);
1354 addr
= dt
.address
+ index
* 8;
1355 return ctxt
->ops
->read_std(ctxt
, addr
, desc
, sizeof *desc
,
1359 static void get_descriptor_table_ptr(struct x86_emulate_ctxt
*ctxt
,
1360 u16 selector
, struct desc_ptr
*dt
)
1362 const struct x86_emulate_ops
*ops
= ctxt
->ops
;
1365 if (selector
& 1 << 2) {
1366 struct desc_struct desc
;
1369 memset (dt
, 0, sizeof *dt
);
1370 if (!ops
->get_segment(ctxt
, &sel
, &desc
, &base3
,
1374 dt
->size
= desc_limit_scaled(&desc
); /* what if limit > 65535? */
1375 dt
->address
= get_desc_base(&desc
) | ((u64
)base3
<< 32);
1377 ops
->get_gdt(ctxt
, dt
);
1380 /* allowed just for 8 bytes segments */
1381 static int read_segment_descriptor(struct x86_emulate_ctxt
*ctxt
,
1382 u16 selector
, struct desc_struct
*desc
,
1386 u16 index
= selector
>> 3;
1389 get_descriptor_table_ptr(ctxt
, selector
, &dt
);
1391 if (dt
.size
< index
* 8 + 7)
1392 return emulate_gp(ctxt
, selector
& 0xfffc);
1394 *desc_addr_p
= addr
= dt
.address
+ index
* 8;
1395 return ctxt
->ops
->read_std(ctxt
, addr
, desc
, sizeof *desc
,
1399 /* allowed just for 8 bytes segments */
1400 static int write_segment_descriptor(struct x86_emulate_ctxt
*ctxt
,
1401 u16 selector
, struct desc_struct
*desc
)
1404 u16 index
= selector
>> 3;
1407 get_descriptor_table_ptr(ctxt
, selector
, &dt
);
1409 if (dt
.size
< index
* 8 + 7)
1410 return emulate_gp(ctxt
, selector
& 0xfffc);
1412 addr
= dt
.address
+ index
* 8;
1413 return ctxt
->ops
->write_std(ctxt
, addr
, desc
, sizeof *desc
,
1417 /* Does not support long mode */
1418 static int __load_segment_descriptor(struct x86_emulate_ctxt
*ctxt
,
1419 u16 selector
, int seg
, u8 cpl
, bool in_task_switch
)
1421 struct desc_struct seg_desc
, old_desc
;
1423 unsigned err_vec
= GP_VECTOR
;
1425 bool null_selector
= !(selector
& ~0x3); /* 0000-0003 are null */
1431 memset(&seg_desc
, 0, sizeof seg_desc
);
1433 if (ctxt
->mode
== X86EMUL_MODE_REAL
) {
1434 /* set real mode segment descriptor (keep limit etc. for
1436 ctxt
->ops
->get_segment(ctxt
, &dummy
, &seg_desc
, NULL
, seg
);
1437 set_desc_base(&seg_desc
, selector
<< 4);
1439 } else if (seg
<= VCPU_SREG_GS
&& ctxt
->mode
== X86EMUL_MODE_VM86
) {
1440 /* VM86 needs a clean new segment descriptor */
1441 set_desc_base(&seg_desc
, selector
<< 4);
1442 set_desc_limit(&seg_desc
, 0xffff);
1452 /* NULL selector is not valid for TR, CS and SS (except for long mode) */
1453 if ((seg
== VCPU_SREG_CS
1454 || (seg
== VCPU_SREG_SS
1455 && (ctxt
->mode
!= X86EMUL_MODE_PROT64
|| rpl
!= cpl
))
1456 || seg
== VCPU_SREG_TR
)
1460 /* TR should be in GDT only */
1461 if (seg
== VCPU_SREG_TR
&& (selector
& (1 << 2)))
1464 if (null_selector
) /* for NULL selector skip all following checks */
1467 ret
= read_segment_descriptor(ctxt
, selector
, &seg_desc
, &desc_addr
);
1468 if (ret
!= X86EMUL_CONTINUE
)
1471 err_code
= selector
& 0xfffc;
1472 err_vec
= GP_VECTOR
;
1474 /* can't load system descriptor into segment selector */
1475 if (seg
<= VCPU_SREG_GS
&& !seg_desc
.s
)
1479 err_vec
= (seg
== VCPU_SREG_SS
) ? SS_VECTOR
: NP_VECTOR
;
1488 * segment is not a writable data segment or segment
1489 * selector's RPL != CPL or segment selector's RPL != CPL
1491 if (rpl
!= cpl
|| (seg_desc
.type
& 0xa) != 0x2 || dpl
!= cpl
)
1495 if (in_task_switch
&& rpl
!= dpl
)
1498 if (!(seg_desc
.type
& 8))
1501 if (seg_desc
.type
& 4) {
1507 if (rpl
> cpl
|| dpl
!= cpl
)
1510 /* CS(RPL) <- CPL */
1511 selector
= (selector
& 0xfffc) | cpl
;
1514 if (seg_desc
.s
|| (seg_desc
.type
!= 1 && seg_desc
.type
!= 9))
1516 old_desc
= seg_desc
;
1517 seg_desc
.type
|= 2; /* busy */
1518 ret
= ctxt
->ops
->cmpxchg_emulated(ctxt
, desc_addr
, &old_desc
, &seg_desc
,
1519 sizeof(seg_desc
), &ctxt
->exception
);
1520 if (ret
!= X86EMUL_CONTINUE
)
1523 case VCPU_SREG_LDTR
:
1524 if (seg_desc
.s
|| seg_desc
.type
!= 2)
1527 default: /* DS, ES, FS, or GS */
1529 * segment is not a data or readable code segment or
1530 * ((segment is a data or nonconforming code segment)
1531 * and (both RPL and CPL > DPL))
1533 if ((seg_desc
.type
& 0xa) == 0x8 ||
1534 (((seg_desc
.type
& 0xc) != 0xc) &&
1535 (rpl
> dpl
&& cpl
> dpl
)))
1541 /* mark segment as accessed */
1543 ret
= write_segment_descriptor(ctxt
, selector
, &seg_desc
);
1544 if (ret
!= X86EMUL_CONTINUE
)
1546 } else if (ctxt
->mode
== X86EMUL_MODE_PROT64
) {
1547 ret
= ctxt
->ops
->read_std(ctxt
, desc_addr
+8, &base3
,
1548 sizeof(base3
), &ctxt
->exception
);
1549 if (ret
!= X86EMUL_CONTINUE
)
1553 ctxt
->ops
->set_segment(ctxt
, selector
, &seg_desc
, base3
, seg
);
1554 return X86EMUL_CONTINUE
;
1556 emulate_exception(ctxt
, err_vec
, err_code
, true);
1557 return X86EMUL_PROPAGATE_FAULT
;
1560 static int load_segment_descriptor(struct x86_emulate_ctxt
*ctxt
,
1561 u16 selector
, int seg
)
1563 u8 cpl
= ctxt
->ops
->cpl(ctxt
);
1564 return __load_segment_descriptor(ctxt
, selector
, seg
, cpl
, false);
1567 static void write_register_operand(struct operand
*op
)
1569 /* The 4-byte case *is* correct: in 64-bit mode we zero-extend. */
1570 switch (op
->bytes
) {
1572 *(u8
*)op
->addr
.reg
= (u8
)op
->val
;
1575 *(u16
*)op
->addr
.reg
= (u16
)op
->val
;
1578 *op
->addr
.reg
= (u32
)op
->val
;
1579 break; /* 64b: zero-extend */
1581 *op
->addr
.reg
= op
->val
;
1586 static int writeback(struct x86_emulate_ctxt
*ctxt
, struct operand
*op
)
1592 write_register_operand(op
);
1595 if (ctxt
->lock_prefix
)
1596 rc
= segmented_cmpxchg(ctxt
,
1602 rc
= segmented_write(ctxt
,
1606 if (rc
!= X86EMUL_CONTINUE
)
1610 rc
= segmented_write(ctxt
,
1613 op
->bytes
* op
->count
);
1614 if (rc
!= X86EMUL_CONTINUE
)
1618 write_sse_reg(ctxt
, &op
->vec_val
, op
->addr
.xmm
);
1621 write_mmx_reg(ctxt
, &op
->mm_val
, op
->addr
.mm
);
1629 return X86EMUL_CONTINUE
;
1632 static int push(struct x86_emulate_ctxt
*ctxt
, void *data
, int bytes
)
1634 struct segmented_address addr
;
1636 rsp_increment(ctxt
, -bytes
);
1637 addr
.ea
= reg_read(ctxt
, VCPU_REGS_RSP
) & stack_mask(ctxt
);
1638 addr
.seg
= VCPU_SREG_SS
;
1640 return segmented_write(ctxt
, addr
, data
, bytes
);
1643 static int em_push(struct x86_emulate_ctxt
*ctxt
)
1645 /* Disable writeback. */
1646 ctxt
->dst
.type
= OP_NONE
;
1647 return push(ctxt
, &ctxt
->src
.val
, ctxt
->op_bytes
);
1650 static int emulate_pop(struct x86_emulate_ctxt
*ctxt
,
1651 void *dest
, int len
)
1654 struct segmented_address addr
;
1656 addr
.ea
= reg_read(ctxt
, VCPU_REGS_RSP
) & stack_mask(ctxt
);
1657 addr
.seg
= VCPU_SREG_SS
;
1658 rc
= segmented_read(ctxt
, addr
, dest
, len
);
1659 if (rc
!= X86EMUL_CONTINUE
)
1662 rsp_increment(ctxt
, len
);
1666 static int em_pop(struct x86_emulate_ctxt
*ctxt
)
1668 return emulate_pop(ctxt
, &ctxt
->dst
.val
, ctxt
->op_bytes
);
1671 static int emulate_popf(struct x86_emulate_ctxt
*ctxt
,
1672 void *dest
, int len
)
1675 unsigned long val
, change_mask
;
1676 int iopl
= (ctxt
->eflags
& X86_EFLAGS_IOPL
) >> IOPL_SHIFT
;
1677 int cpl
= ctxt
->ops
->cpl(ctxt
);
1679 rc
= emulate_pop(ctxt
, &val
, len
);
1680 if (rc
!= X86EMUL_CONTINUE
)
1683 change_mask
= EFLG_CF
| EFLG_PF
| EFLG_AF
| EFLG_ZF
| EFLG_SF
| EFLG_OF
1684 | EFLG_TF
| EFLG_DF
| EFLG_NT
| EFLG_RF
| EFLG_AC
| EFLG_ID
;
1686 switch(ctxt
->mode
) {
1687 case X86EMUL_MODE_PROT64
:
1688 case X86EMUL_MODE_PROT32
:
1689 case X86EMUL_MODE_PROT16
:
1691 change_mask
|= EFLG_IOPL
;
1693 change_mask
|= EFLG_IF
;
1695 case X86EMUL_MODE_VM86
:
1697 return emulate_gp(ctxt
, 0);
1698 change_mask
|= EFLG_IF
;
1700 default: /* real mode */
1701 change_mask
|= (EFLG_IOPL
| EFLG_IF
);
1705 *(unsigned long *)dest
=
1706 (ctxt
->eflags
& ~change_mask
) | (val
& change_mask
);
1711 static int em_popf(struct x86_emulate_ctxt
*ctxt
)
1713 ctxt
->dst
.type
= OP_REG
;
1714 ctxt
->dst
.addr
.reg
= &ctxt
->eflags
;
1715 ctxt
->dst
.bytes
= ctxt
->op_bytes
;
1716 return emulate_popf(ctxt
, &ctxt
->dst
.val
, ctxt
->op_bytes
);
1719 static int em_enter(struct x86_emulate_ctxt
*ctxt
)
1722 unsigned frame_size
= ctxt
->src
.val
;
1723 unsigned nesting_level
= ctxt
->src2
.val
& 31;
1727 return X86EMUL_UNHANDLEABLE
;
1729 rbp
= reg_read(ctxt
, VCPU_REGS_RBP
);
1730 rc
= push(ctxt
, &rbp
, stack_size(ctxt
));
1731 if (rc
!= X86EMUL_CONTINUE
)
1733 assign_masked(reg_rmw(ctxt
, VCPU_REGS_RBP
), reg_read(ctxt
, VCPU_REGS_RSP
),
1735 assign_masked(reg_rmw(ctxt
, VCPU_REGS_RSP
),
1736 reg_read(ctxt
, VCPU_REGS_RSP
) - frame_size
,
1738 return X86EMUL_CONTINUE
;
1741 static int em_leave(struct x86_emulate_ctxt
*ctxt
)
1743 assign_masked(reg_rmw(ctxt
, VCPU_REGS_RSP
), reg_read(ctxt
, VCPU_REGS_RBP
),
1745 return emulate_pop(ctxt
, reg_rmw(ctxt
, VCPU_REGS_RBP
), ctxt
->op_bytes
);
1748 static int em_push_sreg(struct x86_emulate_ctxt
*ctxt
)
1750 int seg
= ctxt
->src2
.val
;
1752 ctxt
->src
.val
= get_segment_selector(ctxt
, seg
);
1754 return em_push(ctxt
);
1757 static int em_pop_sreg(struct x86_emulate_ctxt
*ctxt
)
1759 int seg
= ctxt
->src2
.val
;
1760 unsigned long selector
;
1763 rc
= emulate_pop(ctxt
, &selector
, ctxt
->op_bytes
);
1764 if (rc
!= X86EMUL_CONTINUE
)
1767 if (ctxt
->modrm_reg
== VCPU_SREG_SS
)
1768 ctxt
->interruptibility
= KVM_X86_SHADOW_INT_MOV_SS
;
1770 rc
= load_segment_descriptor(ctxt
, (u16
)selector
, seg
);
1774 static int em_pusha(struct x86_emulate_ctxt
*ctxt
)
1776 unsigned long old_esp
= reg_read(ctxt
, VCPU_REGS_RSP
);
1777 int rc
= X86EMUL_CONTINUE
;
1778 int reg
= VCPU_REGS_RAX
;
1780 while (reg
<= VCPU_REGS_RDI
) {
1781 (reg
== VCPU_REGS_RSP
) ?
1782 (ctxt
->src
.val
= old_esp
) : (ctxt
->src
.val
= reg_read(ctxt
, reg
));
1785 if (rc
!= X86EMUL_CONTINUE
)
1794 static int em_pushf(struct x86_emulate_ctxt
*ctxt
)
1796 ctxt
->src
.val
= (unsigned long)ctxt
->eflags
;
1797 return em_push(ctxt
);
1800 static int em_popa(struct x86_emulate_ctxt
*ctxt
)
1802 int rc
= X86EMUL_CONTINUE
;
1803 int reg
= VCPU_REGS_RDI
;
1805 while (reg
>= VCPU_REGS_RAX
) {
1806 if (reg
== VCPU_REGS_RSP
) {
1807 rsp_increment(ctxt
, ctxt
->op_bytes
);
1811 rc
= emulate_pop(ctxt
, reg_rmw(ctxt
, reg
), ctxt
->op_bytes
);
1812 if (rc
!= X86EMUL_CONTINUE
)
1819 static int __emulate_int_real(struct x86_emulate_ctxt
*ctxt
, int irq
)
1821 const struct x86_emulate_ops
*ops
= ctxt
->ops
;
1828 /* TODO: Add limit checks */
1829 ctxt
->src
.val
= ctxt
->eflags
;
1831 if (rc
!= X86EMUL_CONTINUE
)
1834 ctxt
->eflags
&= ~(EFLG_IF
| EFLG_TF
| EFLG_AC
);
1836 ctxt
->src
.val
= get_segment_selector(ctxt
, VCPU_SREG_CS
);
1838 if (rc
!= X86EMUL_CONTINUE
)
1841 ctxt
->src
.val
= ctxt
->_eip
;
1843 if (rc
!= X86EMUL_CONTINUE
)
1846 ops
->get_idt(ctxt
, &dt
);
1848 eip_addr
= dt
.address
+ (irq
<< 2);
1849 cs_addr
= dt
.address
+ (irq
<< 2) + 2;
1851 rc
= ops
->read_std(ctxt
, cs_addr
, &cs
, 2, &ctxt
->exception
);
1852 if (rc
!= X86EMUL_CONTINUE
)
1855 rc
= ops
->read_std(ctxt
, eip_addr
, &eip
, 2, &ctxt
->exception
);
1856 if (rc
!= X86EMUL_CONTINUE
)
1859 rc
= load_segment_descriptor(ctxt
, cs
, VCPU_SREG_CS
);
1860 if (rc
!= X86EMUL_CONTINUE
)
1868 int emulate_int_real(struct x86_emulate_ctxt
*ctxt
, int irq
)
1872 invalidate_registers(ctxt
);
1873 rc
= __emulate_int_real(ctxt
, irq
);
1874 if (rc
== X86EMUL_CONTINUE
)
1875 writeback_registers(ctxt
);
1879 static int emulate_int(struct x86_emulate_ctxt
*ctxt
, int irq
)
1881 switch(ctxt
->mode
) {
1882 case X86EMUL_MODE_REAL
:
1883 return __emulate_int_real(ctxt
, irq
);
1884 case X86EMUL_MODE_VM86
:
1885 case X86EMUL_MODE_PROT16
:
1886 case X86EMUL_MODE_PROT32
:
1887 case X86EMUL_MODE_PROT64
:
1889 /* Protected mode interrupts unimplemented yet */
1890 return X86EMUL_UNHANDLEABLE
;
1894 static int emulate_iret_real(struct x86_emulate_ctxt
*ctxt
)
1896 int rc
= X86EMUL_CONTINUE
;
1897 unsigned long temp_eip
= 0;
1898 unsigned long temp_eflags
= 0;
1899 unsigned long cs
= 0;
1900 unsigned long mask
= EFLG_CF
| EFLG_PF
| EFLG_AF
| EFLG_ZF
| EFLG_SF
| EFLG_TF
|
1901 EFLG_IF
| EFLG_DF
| EFLG_OF
| EFLG_IOPL
| EFLG_NT
| EFLG_RF
|
1902 EFLG_AC
| EFLG_ID
| (1 << 1); /* Last one is the reserved bit */
1903 unsigned long vm86_mask
= EFLG_VM
| EFLG_VIF
| EFLG_VIP
;
1905 /* TODO: Add stack limit check */
1907 rc
= emulate_pop(ctxt
, &temp_eip
, ctxt
->op_bytes
);
1909 if (rc
!= X86EMUL_CONTINUE
)
1912 if (temp_eip
& ~0xffff)
1913 return emulate_gp(ctxt
, 0);
1915 rc
= emulate_pop(ctxt
, &cs
, ctxt
->op_bytes
);
1917 if (rc
!= X86EMUL_CONTINUE
)
1920 rc
= emulate_pop(ctxt
, &temp_eflags
, ctxt
->op_bytes
);
1922 if (rc
!= X86EMUL_CONTINUE
)
1925 rc
= load_segment_descriptor(ctxt
, (u16
)cs
, VCPU_SREG_CS
);
1927 if (rc
!= X86EMUL_CONTINUE
)
1930 ctxt
->_eip
= temp_eip
;
1933 if (ctxt
->op_bytes
== 4)
1934 ctxt
->eflags
= ((temp_eflags
& mask
) | (ctxt
->eflags
& vm86_mask
));
1935 else if (ctxt
->op_bytes
== 2) {
1936 ctxt
->eflags
&= ~0xffff;
1937 ctxt
->eflags
|= temp_eflags
;
1940 ctxt
->eflags
&= ~EFLG_RESERVED_ZEROS_MASK
; /* Clear reserved zeros */
1941 ctxt
->eflags
|= EFLG_RESERVED_ONE_MASK
;
1946 static int em_iret(struct x86_emulate_ctxt
*ctxt
)
1948 switch(ctxt
->mode
) {
1949 case X86EMUL_MODE_REAL
:
1950 return emulate_iret_real(ctxt
);
1951 case X86EMUL_MODE_VM86
:
1952 case X86EMUL_MODE_PROT16
:
1953 case X86EMUL_MODE_PROT32
:
1954 case X86EMUL_MODE_PROT64
:
1956 /* iret from protected mode unimplemented yet */
1957 return X86EMUL_UNHANDLEABLE
;
1961 static int em_jmp_far(struct x86_emulate_ctxt
*ctxt
)
1966 memcpy(&sel
, ctxt
->src
.valptr
+ ctxt
->op_bytes
, 2);
1968 rc
= load_segment_descriptor(ctxt
, sel
, VCPU_SREG_CS
);
1969 if (rc
!= X86EMUL_CONTINUE
)
1973 memcpy(&ctxt
->_eip
, ctxt
->src
.valptr
, ctxt
->op_bytes
);
1974 return X86EMUL_CONTINUE
;
1977 static int em_grp45(struct x86_emulate_ctxt
*ctxt
)
1979 int rc
= X86EMUL_CONTINUE
;
1981 switch (ctxt
->modrm_reg
) {
1982 case 2: /* call near abs */ {
1984 old_eip
= ctxt
->_eip
;
1985 ctxt
->_eip
= ctxt
->src
.val
;
1986 ctxt
->src
.val
= old_eip
;
1990 case 4: /* jmp abs */
1991 ctxt
->_eip
= ctxt
->src
.val
;
1993 case 5: /* jmp far */
1994 rc
= em_jmp_far(ctxt
);
2003 static int em_cmpxchg8b(struct x86_emulate_ctxt
*ctxt
)
2005 u64 old
= ctxt
->dst
.orig_val64
;
2007 if (ctxt
->dst
.bytes
== 16)
2008 return X86EMUL_UNHANDLEABLE
;
2010 if (((u32
) (old
>> 0) != (u32
) reg_read(ctxt
, VCPU_REGS_RAX
)) ||
2011 ((u32
) (old
>> 32) != (u32
) reg_read(ctxt
, VCPU_REGS_RDX
))) {
2012 *reg_write(ctxt
, VCPU_REGS_RAX
) = (u32
) (old
>> 0);
2013 *reg_write(ctxt
, VCPU_REGS_RDX
) = (u32
) (old
>> 32);
2014 ctxt
->eflags
&= ~EFLG_ZF
;
2016 ctxt
->dst
.val64
= ((u64
)reg_read(ctxt
, VCPU_REGS_RCX
) << 32) |
2017 (u32
) reg_read(ctxt
, VCPU_REGS_RBX
);
2019 ctxt
->eflags
|= EFLG_ZF
;
2021 return X86EMUL_CONTINUE
;
2024 static int em_ret(struct x86_emulate_ctxt
*ctxt
)
2026 ctxt
->dst
.type
= OP_REG
;
2027 ctxt
->dst
.addr
.reg
= &ctxt
->_eip
;
2028 ctxt
->dst
.bytes
= ctxt
->op_bytes
;
2029 return em_pop(ctxt
);
2032 static int em_ret_far(struct x86_emulate_ctxt
*ctxt
)
2036 int cpl
= ctxt
->ops
->cpl(ctxt
);
2038 rc
= emulate_pop(ctxt
, &ctxt
->_eip
, ctxt
->op_bytes
);
2039 if (rc
!= X86EMUL_CONTINUE
)
2041 if (ctxt
->op_bytes
== 4)
2042 ctxt
->_eip
= (u32
)ctxt
->_eip
;
2043 rc
= emulate_pop(ctxt
, &cs
, ctxt
->op_bytes
);
2044 if (rc
!= X86EMUL_CONTINUE
)
2046 /* Outer-privilege level return is not implemented */
2047 if (ctxt
->mode
>= X86EMUL_MODE_PROT16
&& (cs
& 3) > cpl
)
2048 return X86EMUL_UNHANDLEABLE
;
2049 rc
= load_segment_descriptor(ctxt
, (u16
)cs
, VCPU_SREG_CS
);
2053 static int em_ret_far_imm(struct x86_emulate_ctxt
*ctxt
)
2057 rc
= em_ret_far(ctxt
);
2058 if (rc
!= X86EMUL_CONTINUE
)
2060 rsp_increment(ctxt
, ctxt
->src
.val
);
2061 return X86EMUL_CONTINUE
;
2064 static int em_cmpxchg(struct x86_emulate_ctxt
*ctxt
)
2066 /* Save real source value, then compare EAX against destination. */
2067 ctxt
->dst
.orig_val
= ctxt
->dst
.val
;
2068 ctxt
->dst
.val
= reg_read(ctxt
, VCPU_REGS_RAX
);
2069 ctxt
->src
.orig_val
= ctxt
->src
.val
;
2070 ctxt
->src
.val
= ctxt
->dst
.orig_val
;
2071 fastop(ctxt
, em_cmp
);
2073 if (ctxt
->eflags
& EFLG_ZF
) {
2074 /* Success: write back to memory. */
2075 ctxt
->dst
.val
= ctxt
->src
.orig_val
;
2077 /* Failure: write the value we saw to EAX. */
2078 ctxt
->dst
.type
= OP_REG
;
2079 ctxt
->dst
.addr
.reg
= reg_rmw(ctxt
, VCPU_REGS_RAX
);
2080 ctxt
->dst
.val
= ctxt
->dst
.orig_val
;
2082 return X86EMUL_CONTINUE
;
2085 static int em_lseg(struct x86_emulate_ctxt
*ctxt
)
2087 int seg
= ctxt
->src2
.val
;
2091 memcpy(&sel
, ctxt
->src
.valptr
+ ctxt
->op_bytes
, 2);
2093 rc
= load_segment_descriptor(ctxt
, sel
, seg
);
2094 if (rc
!= X86EMUL_CONTINUE
)
2097 ctxt
->dst
.val
= ctxt
->src
.val
;
2102 setup_syscalls_segments(struct x86_emulate_ctxt
*ctxt
,
2103 struct desc_struct
*cs
, struct desc_struct
*ss
)
2105 cs
->l
= 0; /* will be adjusted later */
2106 set_desc_base(cs
, 0); /* flat segment */
2107 cs
->g
= 1; /* 4kb granularity */
2108 set_desc_limit(cs
, 0xfffff); /* 4GB limit */
2109 cs
->type
= 0x0b; /* Read, Execute, Accessed */
2111 cs
->dpl
= 0; /* will be adjusted later */
2116 set_desc_base(ss
, 0); /* flat segment */
2117 set_desc_limit(ss
, 0xfffff); /* 4GB limit */
2118 ss
->g
= 1; /* 4kb granularity */
2120 ss
->type
= 0x03; /* Read/Write, Accessed */
2121 ss
->d
= 1; /* 32bit stack segment */
2128 static bool vendor_intel(struct x86_emulate_ctxt
*ctxt
)
2130 u32 eax
, ebx
, ecx
, edx
;
2133 ctxt
->ops
->get_cpuid(ctxt
, &eax
, &ebx
, &ecx
, &edx
);
2134 return ebx
== X86EMUL_CPUID_VENDOR_GenuineIntel_ebx
2135 && ecx
== X86EMUL_CPUID_VENDOR_GenuineIntel_ecx
2136 && edx
== X86EMUL_CPUID_VENDOR_GenuineIntel_edx
;
2139 static bool em_syscall_is_enabled(struct x86_emulate_ctxt
*ctxt
)
2141 const struct x86_emulate_ops
*ops
= ctxt
->ops
;
2142 u32 eax
, ebx
, ecx
, edx
;
2145 * syscall should always be enabled in longmode - so only become
2146 * vendor specific (cpuid) if other modes are active...
2148 if (ctxt
->mode
== X86EMUL_MODE_PROT64
)
2153 ops
->get_cpuid(ctxt
, &eax
, &ebx
, &ecx
, &edx
);
2155 * Intel ("GenuineIntel")
2156 * remark: Intel CPUs only support "syscall" in 64bit
2157 * longmode. Also an 64bit guest with a
2158 * 32bit compat-app running will #UD !! While this
2159 * behaviour can be fixed (by emulating) into AMD
2160 * response - CPUs of AMD can't behave like Intel.
2162 if (ebx
== X86EMUL_CPUID_VENDOR_GenuineIntel_ebx
&&
2163 ecx
== X86EMUL_CPUID_VENDOR_GenuineIntel_ecx
&&
2164 edx
== X86EMUL_CPUID_VENDOR_GenuineIntel_edx
)
2167 /* AMD ("AuthenticAMD") */
2168 if (ebx
== X86EMUL_CPUID_VENDOR_AuthenticAMD_ebx
&&
2169 ecx
== X86EMUL_CPUID_VENDOR_AuthenticAMD_ecx
&&
2170 edx
== X86EMUL_CPUID_VENDOR_AuthenticAMD_edx
)
2173 /* AMD ("AMDisbetter!") */
2174 if (ebx
== X86EMUL_CPUID_VENDOR_AMDisbetterI_ebx
&&
2175 ecx
== X86EMUL_CPUID_VENDOR_AMDisbetterI_ecx
&&
2176 edx
== X86EMUL_CPUID_VENDOR_AMDisbetterI_edx
)
2179 /* default: (not Intel, not AMD), apply Intel's stricter rules... */
2183 static int em_syscall(struct x86_emulate_ctxt
*ctxt
)
2185 const struct x86_emulate_ops
*ops
= ctxt
->ops
;
2186 struct desc_struct cs
, ss
;
2191 /* syscall is not available in real mode */
2192 if (ctxt
->mode
== X86EMUL_MODE_REAL
||
2193 ctxt
->mode
== X86EMUL_MODE_VM86
)
2194 return emulate_ud(ctxt
);
2196 if (!(em_syscall_is_enabled(ctxt
)))
2197 return emulate_ud(ctxt
);
2199 ops
->get_msr(ctxt
, MSR_EFER
, &efer
);
2200 setup_syscalls_segments(ctxt
, &cs
, &ss
);
2202 if (!(efer
& EFER_SCE
))
2203 return emulate_ud(ctxt
);
2205 ops
->get_msr(ctxt
, MSR_STAR
, &msr_data
);
2207 cs_sel
= (u16
)(msr_data
& 0xfffc);
2208 ss_sel
= (u16
)(msr_data
+ 8);
2210 if (efer
& EFER_LMA
) {
2214 ops
->set_segment(ctxt
, cs_sel
, &cs
, 0, VCPU_SREG_CS
);
2215 ops
->set_segment(ctxt
, ss_sel
, &ss
, 0, VCPU_SREG_SS
);
2217 *reg_write(ctxt
, VCPU_REGS_RCX
) = ctxt
->_eip
;
2218 if (efer
& EFER_LMA
) {
2219 #ifdef CONFIG_X86_64
2220 *reg_write(ctxt
, VCPU_REGS_R11
) = ctxt
->eflags
& ~EFLG_RF
;
2223 ctxt
->mode
== X86EMUL_MODE_PROT64
?
2224 MSR_LSTAR
: MSR_CSTAR
, &msr_data
);
2225 ctxt
->_eip
= msr_data
;
2227 ops
->get_msr(ctxt
, MSR_SYSCALL_MASK
, &msr_data
);
2228 ctxt
->eflags
&= ~(msr_data
| EFLG_RF
);
2232 ops
->get_msr(ctxt
, MSR_STAR
, &msr_data
);
2233 ctxt
->_eip
= (u32
)msr_data
;
2235 ctxt
->eflags
&= ~(EFLG_VM
| EFLG_IF
| EFLG_RF
);
2238 return X86EMUL_CONTINUE
;
2241 static int em_sysenter(struct x86_emulate_ctxt
*ctxt
)
2243 const struct x86_emulate_ops
*ops
= ctxt
->ops
;
2244 struct desc_struct cs
, ss
;
2249 ops
->get_msr(ctxt
, MSR_EFER
, &efer
);
2250 /* inject #GP if in real mode */
2251 if (ctxt
->mode
== X86EMUL_MODE_REAL
)
2252 return emulate_gp(ctxt
, 0);
2255 * Not recognized on AMD in compat mode (but is recognized in legacy
2258 if ((ctxt
->mode
== X86EMUL_MODE_PROT32
) && (efer
& EFER_LMA
)
2259 && !vendor_intel(ctxt
))
2260 return emulate_ud(ctxt
);
2262 /* XXX sysenter/sysexit have not been tested in 64bit mode.
2263 * Therefore, we inject an #UD.
2265 if (ctxt
->mode
== X86EMUL_MODE_PROT64
)
2266 return emulate_ud(ctxt
);
2268 setup_syscalls_segments(ctxt
, &cs
, &ss
);
2270 ops
->get_msr(ctxt
, MSR_IA32_SYSENTER_CS
, &msr_data
);
2271 switch (ctxt
->mode
) {
2272 case X86EMUL_MODE_PROT32
:
2273 if ((msr_data
& 0xfffc) == 0x0)
2274 return emulate_gp(ctxt
, 0);
2276 case X86EMUL_MODE_PROT64
:
2277 if (msr_data
== 0x0)
2278 return emulate_gp(ctxt
, 0);
2284 ctxt
->eflags
&= ~(EFLG_VM
| EFLG_IF
| EFLG_RF
);
2285 cs_sel
= (u16
)msr_data
;
2286 cs_sel
&= ~SELECTOR_RPL_MASK
;
2287 ss_sel
= cs_sel
+ 8;
2288 ss_sel
&= ~SELECTOR_RPL_MASK
;
2289 if (ctxt
->mode
== X86EMUL_MODE_PROT64
|| (efer
& EFER_LMA
)) {
2294 ops
->set_segment(ctxt
, cs_sel
, &cs
, 0, VCPU_SREG_CS
);
2295 ops
->set_segment(ctxt
, ss_sel
, &ss
, 0, VCPU_SREG_SS
);
2297 ops
->get_msr(ctxt
, MSR_IA32_SYSENTER_EIP
, &msr_data
);
2298 ctxt
->_eip
= msr_data
;
2300 ops
->get_msr(ctxt
, MSR_IA32_SYSENTER_ESP
, &msr_data
);
2301 *reg_write(ctxt
, VCPU_REGS_RSP
) = msr_data
;
2303 return X86EMUL_CONTINUE
;
2306 static int em_sysexit(struct x86_emulate_ctxt
*ctxt
)
2308 const struct x86_emulate_ops
*ops
= ctxt
->ops
;
2309 struct desc_struct cs
, ss
;
2312 u16 cs_sel
= 0, ss_sel
= 0;
2314 /* inject #GP if in real mode or Virtual 8086 mode */
2315 if (ctxt
->mode
== X86EMUL_MODE_REAL
||
2316 ctxt
->mode
== X86EMUL_MODE_VM86
)
2317 return emulate_gp(ctxt
, 0);
2319 setup_syscalls_segments(ctxt
, &cs
, &ss
);
2321 if ((ctxt
->rex_prefix
& 0x8) != 0x0)
2322 usermode
= X86EMUL_MODE_PROT64
;
2324 usermode
= X86EMUL_MODE_PROT32
;
2328 ops
->get_msr(ctxt
, MSR_IA32_SYSENTER_CS
, &msr_data
);
2330 case X86EMUL_MODE_PROT32
:
2331 cs_sel
= (u16
)(msr_data
+ 16);
2332 if ((msr_data
& 0xfffc) == 0x0)
2333 return emulate_gp(ctxt
, 0);
2334 ss_sel
= (u16
)(msr_data
+ 24);
2336 case X86EMUL_MODE_PROT64
:
2337 cs_sel
= (u16
)(msr_data
+ 32);
2338 if (msr_data
== 0x0)
2339 return emulate_gp(ctxt
, 0);
2340 ss_sel
= cs_sel
+ 8;
2345 cs_sel
|= SELECTOR_RPL_MASK
;
2346 ss_sel
|= SELECTOR_RPL_MASK
;
2348 ops
->set_segment(ctxt
, cs_sel
, &cs
, 0, VCPU_SREG_CS
);
2349 ops
->set_segment(ctxt
, ss_sel
, &ss
, 0, VCPU_SREG_SS
);
2351 ctxt
->_eip
= reg_read(ctxt
, VCPU_REGS_RDX
);
2352 *reg_write(ctxt
, VCPU_REGS_RSP
) = reg_read(ctxt
, VCPU_REGS_RCX
);
2354 return X86EMUL_CONTINUE
;
2357 static bool emulator_bad_iopl(struct x86_emulate_ctxt
*ctxt
)
2360 if (ctxt
->mode
== X86EMUL_MODE_REAL
)
2362 if (ctxt
->mode
== X86EMUL_MODE_VM86
)
2364 iopl
= (ctxt
->eflags
& X86_EFLAGS_IOPL
) >> IOPL_SHIFT
;
2365 return ctxt
->ops
->cpl(ctxt
) > iopl
;
2368 static bool emulator_io_port_access_allowed(struct x86_emulate_ctxt
*ctxt
,
2371 const struct x86_emulate_ops
*ops
= ctxt
->ops
;
2372 struct desc_struct tr_seg
;
2375 u16 tr
, io_bitmap_ptr
, perm
, bit_idx
= port
& 0x7;
2376 unsigned mask
= (1 << len
) - 1;
2379 ops
->get_segment(ctxt
, &tr
, &tr_seg
, &base3
, VCPU_SREG_TR
);
2382 if (desc_limit_scaled(&tr_seg
) < 103)
2384 base
= get_desc_base(&tr_seg
);
2385 #ifdef CONFIG_X86_64
2386 base
|= ((u64
)base3
) << 32;
2388 r
= ops
->read_std(ctxt
, base
+ 102, &io_bitmap_ptr
, 2, NULL
);
2389 if (r
!= X86EMUL_CONTINUE
)
2391 if (io_bitmap_ptr
+ port
/8 > desc_limit_scaled(&tr_seg
))
2393 r
= ops
->read_std(ctxt
, base
+ io_bitmap_ptr
+ port
/8, &perm
, 2, NULL
);
2394 if (r
!= X86EMUL_CONTINUE
)
2396 if ((perm
>> bit_idx
) & mask
)
2401 static bool emulator_io_permited(struct x86_emulate_ctxt
*ctxt
,
2407 if (emulator_bad_iopl(ctxt
))
2408 if (!emulator_io_port_access_allowed(ctxt
, port
, len
))
2411 ctxt
->perm_ok
= true;
2416 static void save_state_to_tss16(struct x86_emulate_ctxt
*ctxt
,
2417 struct tss_segment_16
*tss
)
2419 tss
->ip
= ctxt
->_eip
;
2420 tss
->flag
= ctxt
->eflags
;
2421 tss
->ax
= reg_read(ctxt
, VCPU_REGS_RAX
);
2422 tss
->cx
= reg_read(ctxt
, VCPU_REGS_RCX
);
2423 tss
->dx
= reg_read(ctxt
, VCPU_REGS_RDX
);
2424 tss
->bx
= reg_read(ctxt
, VCPU_REGS_RBX
);
2425 tss
->sp
= reg_read(ctxt
, VCPU_REGS_RSP
);
2426 tss
->bp
= reg_read(ctxt
, VCPU_REGS_RBP
);
2427 tss
->si
= reg_read(ctxt
, VCPU_REGS_RSI
);
2428 tss
->di
= reg_read(ctxt
, VCPU_REGS_RDI
);
2430 tss
->es
= get_segment_selector(ctxt
, VCPU_SREG_ES
);
2431 tss
->cs
= get_segment_selector(ctxt
, VCPU_SREG_CS
);
2432 tss
->ss
= get_segment_selector(ctxt
, VCPU_SREG_SS
);
2433 tss
->ds
= get_segment_selector(ctxt
, VCPU_SREG_DS
);
2434 tss
->ldt
= get_segment_selector(ctxt
, VCPU_SREG_LDTR
);
2437 static int load_state_from_tss16(struct x86_emulate_ctxt
*ctxt
,
2438 struct tss_segment_16
*tss
)
2443 ctxt
->_eip
= tss
->ip
;
2444 ctxt
->eflags
= tss
->flag
| 2;
2445 *reg_write(ctxt
, VCPU_REGS_RAX
) = tss
->ax
;
2446 *reg_write(ctxt
, VCPU_REGS_RCX
) = tss
->cx
;
2447 *reg_write(ctxt
, VCPU_REGS_RDX
) = tss
->dx
;
2448 *reg_write(ctxt
, VCPU_REGS_RBX
) = tss
->bx
;
2449 *reg_write(ctxt
, VCPU_REGS_RSP
) = tss
->sp
;
2450 *reg_write(ctxt
, VCPU_REGS_RBP
) = tss
->bp
;
2451 *reg_write(ctxt
, VCPU_REGS_RSI
) = tss
->si
;
2452 *reg_write(ctxt
, VCPU_REGS_RDI
) = tss
->di
;
2455 * SDM says that segment selectors are loaded before segment
2458 set_segment_selector(ctxt
, tss
->ldt
, VCPU_SREG_LDTR
);
2459 set_segment_selector(ctxt
, tss
->es
, VCPU_SREG_ES
);
2460 set_segment_selector(ctxt
, tss
->cs
, VCPU_SREG_CS
);
2461 set_segment_selector(ctxt
, tss
->ss
, VCPU_SREG_SS
);
2462 set_segment_selector(ctxt
, tss
->ds
, VCPU_SREG_DS
);
2467 * Now load segment descriptors. If fault happens at this stage
2468 * it is handled in a context of new task
2470 ret
= __load_segment_descriptor(ctxt
, tss
->ldt
, VCPU_SREG_LDTR
, cpl
, true);
2471 if (ret
!= X86EMUL_CONTINUE
)
2473 ret
= __load_segment_descriptor(ctxt
, tss
->es
, VCPU_SREG_ES
, cpl
, true);
2474 if (ret
!= X86EMUL_CONTINUE
)
2476 ret
= __load_segment_descriptor(ctxt
, tss
->cs
, VCPU_SREG_CS
, cpl
, true);
2477 if (ret
!= X86EMUL_CONTINUE
)
2479 ret
= __load_segment_descriptor(ctxt
, tss
->ss
, VCPU_SREG_SS
, cpl
, true);
2480 if (ret
!= X86EMUL_CONTINUE
)
2482 ret
= __load_segment_descriptor(ctxt
, tss
->ds
, VCPU_SREG_DS
, cpl
, true);
2483 if (ret
!= X86EMUL_CONTINUE
)
2486 return X86EMUL_CONTINUE
;
2489 static int task_switch_16(struct x86_emulate_ctxt
*ctxt
,
2490 u16 tss_selector
, u16 old_tss_sel
,
2491 ulong old_tss_base
, struct desc_struct
*new_desc
)
2493 const struct x86_emulate_ops
*ops
= ctxt
->ops
;
2494 struct tss_segment_16 tss_seg
;
2496 u32 new_tss_base
= get_desc_base(new_desc
);
2498 ret
= ops
->read_std(ctxt
, old_tss_base
, &tss_seg
, sizeof tss_seg
,
2500 if (ret
!= X86EMUL_CONTINUE
)
2501 /* FIXME: need to provide precise fault address */
2504 save_state_to_tss16(ctxt
, &tss_seg
);
2506 ret
= ops
->write_std(ctxt
, old_tss_base
, &tss_seg
, sizeof tss_seg
,
2508 if (ret
!= X86EMUL_CONTINUE
)
2509 /* FIXME: need to provide precise fault address */
2512 ret
= ops
->read_std(ctxt
, new_tss_base
, &tss_seg
, sizeof tss_seg
,
2514 if (ret
!= X86EMUL_CONTINUE
)
2515 /* FIXME: need to provide precise fault address */
2518 if (old_tss_sel
!= 0xffff) {
2519 tss_seg
.prev_task_link
= old_tss_sel
;
2521 ret
= ops
->write_std(ctxt
, new_tss_base
,
2522 &tss_seg
.prev_task_link
,
2523 sizeof tss_seg
.prev_task_link
,
2525 if (ret
!= X86EMUL_CONTINUE
)
2526 /* FIXME: need to provide precise fault address */
2530 return load_state_from_tss16(ctxt
, &tss_seg
);
2533 static void save_state_to_tss32(struct x86_emulate_ctxt
*ctxt
,
2534 struct tss_segment_32
*tss
)
2536 /* CR3 and ldt selector are not saved intentionally */
2537 tss
->eip
= ctxt
->_eip
;
2538 tss
->eflags
= ctxt
->eflags
;
2539 tss
->eax
= reg_read(ctxt
, VCPU_REGS_RAX
);
2540 tss
->ecx
= reg_read(ctxt
, VCPU_REGS_RCX
);
2541 tss
->edx
= reg_read(ctxt
, VCPU_REGS_RDX
);
2542 tss
->ebx
= reg_read(ctxt
, VCPU_REGS_RBX
);
2543 tss
->esp
= reg_read(ctxt
, VCPU_REGS_RSP
);
2544 tss
->ebp
= reg_read(ctxt
, VCPU_REGS_RBP
);
2545 tss
->esi
= reg_read(ctxt
, VCPU_REGS_RSI
);
2546 tss
->edi
= reg_read(ctxt
, VCPU_REGS_RDI
);
2548 tss
->es
= get_segment_selector(ctxt
, VCPU_SREG_ES
);
2549 tss
->cs
= get_segment_selector(ctxt
, VCPU_SREG_CS
);
2550 tss
->ss
= get_segment_selector(ctxt
, VCPU_SREG_SS
);
2551 tss
->ds
= get_segment_selector(ctxt
, VCPU_SREG_DS
);
2552 tss
->fs
= get_segment_selector(ctxt
, VCPU_SREG_FS
);
2553 tss
->gs
= get_segment_selector(ctxt
, VCPU_SREG_GS
);
2556 static int load_state_from_tss32(struct x86_emulate_ctxt
*ctxt
,
2557 struct tss_segment_32
*tss
)
2562 if (ctxt
->ops
->set_cr(ctxt
, 3, tss
->cr3
))
2563 return emulate_gp(ctxt
, 0);
2564 ctxt
->_eip
= tss
->eip
;
2565 ctxt
->eflags
= tss
->eflags
| 2;
2567 /* General purpose registers */
2568 *reg_write(ctxt
, VCPU_REGS_RAX
) = tss
->eax
;
2569 *reg_write(ctxt
, VCPU_REGS_RCX
) = tss
->ecx
;
2570 *reg_write(ctxt
, VCPU_REGS_RDX
) = tss
->edx
;
2571 *reg_write(ctxt
, VCPU_REGS_RBX
) = tss
->ebx
;
2572 *reg_write(ctxt
, VCPU_REGS_RSP
) = tss
->esp
;
2573 *reg_write(ctxt
, VCPU_REGS_RBP
) = tss
->ebp
;
2574 *reg_write(ctxt
, VCPU_REGS_RSI
) = tss
->esi
;
2575 *reg_write(ctxt
, VCPU_REGS_RDI
) = tss
->edi
;
2578 * SDM says that segment selectors are loaded before segment
2579 * descriptors. This is important because CPL checks will
2582 set_segment_selector(ctxt
, tss
->ldt_selector
, VCPU_SREG_LDTR
);
2583 set_segment_selector(ctxt
, tss
->es
, VCPU_SREG_ES
);
2584 set_segment_selector(ctxt
, tss
->cs
, VCPU_SREG_CS
);
2585 set_segment_selector(ctxt
, tss
->ss
, VCPU_SREG_SS
);
2586 set_segment_selector(ctxt
, tss
->ds
, VCPU_SREG_DS
);
2587 set_segment_selector(ctxt
, tss
->fs
, VCPU_SREG_FS
);
2588 set_segment_selector(ctxt
, tss
->gs
, VCPU_SREG_GS
);
2591 * If we're switching between Protected Mode and VM86, we need to make
2592 * sure to update the mode before loading the segment descriptors so
2593 * that the selectors are interpreted correctly.
2595 if (ctxt
->eflags
& X86_EFLAGS_VM
) {
2596 ctxt
->mode
= X86EMUL_MODE_VM86
;
2599 ctxt
->mode
= X86EMUL_MODE_PROT32
;
2604 * Now load segment descriptors. If fault happenes at this stage
2605 * it is handled in a context of new task
2607 ret
= __load_segment_descriptor(ctxt
, tss
->ldt_selector
, VCPU_SREG_LDTR
, cpl
, true);
2608 if (ret
!= X86EMUL_CONTINUE
)
2610 ret
= __load_segment_descriptor(ctxt
, tss
->es
, VCPU_SREG_ES
, cpl
, true);
2611 if (ret
!= X86EMUL_CONTINUE
)
2613 ret
= __load_segment_descriptor(ctxt
, tss
->cs
, VCPU_SREG_CS
, cpl
, true);
2614 if (ret
!= X86EMUL_CONTINUE
)
2616 ret
= __load_segment_descriptor(ctxt
, tss
->ss
, VCPU_SREG_SS
, cpl
, true);
2617 if (ret
!= X86EMUL_CONTINUE
)
2619 ret
= __load_segment_descriptor(ctxt
, tss
->ds
, VCPU_SREG_DS
, cpl
, true);
2620 if (ret
!= X86EMUL_CONTINUE
)
2622 ret
= __load_segment_descriptor(ctxt
, tss
->fs
, VCPU_SREG_FS
, cpl
, true);
2623 if (ret
!= X86EMUL_CONTINUE
)
2625 ret
= __load_segment_descriptor(ctxt
, tss
->gs
, VCPU_SREG_GS
, cpl
, true);
2626 if (ret
!= X86EMUL_CONTINUE
)
2629 return X86EMUL_CONTINUE
;
2632 static int task_switch_32(struct x86_emulate_ctxt
*ctxt
,
2633 u16 tss_selector
, u16 old_tss_sel
,
2634 ulong old_tss_base
, struct desc_struct
*new_desc
)
2636 const struct x86_emulate_ops
*ops
= ctxt
->ops
;
2637 struct tss_segment_32 tss_seg
;
2639 u32 new_tss_base
= get_desc_base(new_desc
);
2640 u32 eip_offset
= offsetof(struct tss_segment_32
, eip
);
2641 u32 ldt_sel_offset
= offsetof(struct tss_segment_32
, ldt_selector
);
2643 ret
= ops
->read_std(ctxt
, old_tss_base
, &tss_seg
, sizeof tss_seg
,
2645 if (ret
!= X86EMUL_CONTINUE
)
2646 /* FIXME: need to provide precise fault address */
2649 save_state_to_tss32(ctxt
, &tss_seg
);
2651 /* Only GP registers and segment selectors are saved */
2652 ret
= ops
->write_std(ctxt
, old_tss_base
+ eip_offset
, &tss_seg
.eip
,
2653 ldt_sel_offset
- eip_offset
, &ctxt
->exception
);
2654 if (ret
!= X86EMUL_CONTINUE
)
2655 /* FIXME: need to provide precise fault address */
2658 ret
= ops
->read_std(ctxt
, new_tss_base
, &tss_seg
, sizeof tss_seg
,
2660 if (ret
!= X86EMUL_CONTINUE
)
2661 /* FIXME: need to provide precise fault address */
2664 if (old_tss_sel
!= 0xffff) {
2665 tss_seg
.prev_task_link
= old_tss_sel
;
2667 ret
= ops
->write_std(ctxt
, new_tss_base
,
2668 &tss_seg
.prev_task_link
,
2669 sizeof tss_seg
.prev_task_link
,
2671 if (ret
!= X86EMUL_CONTINUE
)
2672 /* FIXME: need to provide precise fault address */
2676 return load_state_from_tss32(ctxt
, &tss_seg
);
2679 static int emulator_do_task_switch(struct x86_emulate_ctxt
*ctxt
,
2680 u16 tss_selector
, int idt_index
, int reason
,
2681 bool has_error_code
, u32 error_code
)
2683 const struct x86_emulate_ops
*ops
= ctxt
->ops
;
2684 struct desc_struct curr_tss_desc
, next_tss_desc
;
2686 u16 old_tss_sel
= get_segment_selector(ctxt
, VCPU_SREG_TR
);
2687 ulong old_tss_base
=
2688 ops
->get_cached_segment_base(ctxt
, VCPU_SREG_TR
);
2692 /* FIXME: old_tss_base == ~0 ? */
2694 ret
= read_segment_descriptor(ctxt
, tss_selector
, &next_tss_desc
, &desc_addr
);
2695 if (ret
!= X86EMUL_CONTINUE
)
2697 ret
= read_segment_descriptor(ctxt
, old_tss_sel
, &curr_tss_desc
, &desc_addr
);
2698 if (ret
!= X86EMUL_CONTINUE
)
2701 /* FIXME: check that next_tss_desc is tss */
2704 * Check privileges. The three cases are task switch caused by...
2706 * 1. jmp/call/int to task gate: Check against DPL of the task gate
2707 * 2. Exception/IRQ/iret: No check is performed
2708 * 3. jmp/call to TSS: Check against DPL of the TSS
2710 if (reason
== TASK_SWITCH_GATE
) {
2711 if (idt_index
!= -1) {
2712 /* Software interrupts */
2713 struct desc_struct task_gate_desc
;
2716 ret
= read_interrupt_descriptor(ctxt
, idt_index
,
2718 if (ret
!= X86EMUL_CONTINUE
)
2721 dpl
= task_gate_desc
.dpl
;
2722 if ((tss_selector
& 3) > dpl
|| ops
->cpl(ctxt
) > dpl
)
2723 return emulate_gp(ctxt
, (idt_index
<< 3) | 0x2);
2725 } else if (reason
!= TASK_SWITCH_IRET
) {
2726 int dpl
= next_tss_desc
.dpl
;
2727 if ((tss_selector
& 3) > dpl
|| ops
->cpl(ctxt
) > dpl
)
2728 return emulate_gp(ctxt
, tss_selector
);
2732 desc_limit
= desc_limit_scaled(&next_tss_desc
);
2733 if (!next_tss_desc
.p
||
2734 ((desc_limit
< 0x67 && (next_tss_desc
.type
& 8)) ||
2735 desc_limit
< 0x2b)) {
2736 emulate_ts(ctxt
, tss_selector
& 0xfffc);
2737 return X86EMUL_PROPAGATE_FAULT
;
2740 if (reason
== TASK_SWITCH_IRET
|| reason
== TASK_SWITCH_JMP
) {
2741 curr_tss_desc
.type
&= ~(1 << 1); /* clear busy flag */
2742 write_segment_descriptor(ctxt
, old_tss_sel
, &curr_tss_desc
);
2745 if (reason
== TASK_SWITCH_IRET
)
2746 ctxt
->eflags
= ctxt
->eflags
& ~X86_EFLAGS_NT
;
2748 /* set back link to prev task only if NT bit is set in eflags
2749 note that old_tss_sel is not used after this point */
2750 if (reason
!= TASK_SWITCH_CALL
&& reason
!= TASK_SWITCH_GATE
)
2751 old_tss_sel
= 0xffff;
2753 if (next_tss_desc
.type
& 8)
2754 ret
= task_switch_32(ctxt
, tss_selector
, old_tss_sel
,
2755 old_tss_base
, &next_tss_desc
);
2757 ret
= task_switch_16(ctxt
, tss_selector
, old_tss_sel
,
2758 old_tss_base
, &next_tss_desc
);
2759 if (ret
!= X86EMUL_CONTINUE
)
2762 if (reason
== TASK_SWITCH_CALL
|| reason
== TASK_SWITCH_GATE
)
2763 ctxt
->eflags
= ctxt
->eflags
| X86_EFLAGS_NT
;
2765 if (reason
!= TASK_SWITCH_IRET
) {
2766 next_tss_desc
.type
|= (1 << 1); /* set busy flag */
2767 write_segment_descriptor(ctxt
, tss_selector
, &next_tss_desc
);
2770 ops
->set_cr(ctxt
, 0, ops
->get_cr(ctxt
, 0) | X86_CR0_TS
);
2771 ops
->set_segment(ctxt
, tss_selector
, &next_tss_desc
, 0, VCPU_SREG_TR
);
2773 if (has_error_code
) {
2774 ctxt
->op_bytes
= ctxt
->ad_bytes
= (next_tss_desc
.type
& 8) ? 4 : 2;
2775 ctxt
->lock_prefix
= 0;
2776 ctxt
->src
.val
= (unsigned long) error_code
;
2777 ret
= em_push(ctxt
);
2783 int emulator_task_switch(struct x86_emulate_ctxt
*ctxt
,
2784 u16 tss_selector
, int idt_index
, int reason
,
2785 bool has_error_code
, u32 error_code
)
2789 invalidate_registers(ctxt
);
2790 ctxt
->_eip
= ctxt
->eip
;
2791 ctxt
->dst
.type
= OP_NONE
;
2793 rc
= emulator_do_task_switch(ctxt
, tss_selector
, idt_index
, reason
,
2794 has_error_code
, error_code
);
2796 if (rc
== X86EMUL_CONTINUE
) {
2797 ctxt
->eip
= ctxt
->_eip
;
2798 writeback_registers(ctxt
);
2801 return (rc
== X86EMUL_UNHANDLEABLE
) ? EMULATION_FAILED
: EMULATION_OK
;
2804 static void string_addr_inc(struct x86_emulate_ctxt
*ctxt
, int reg
,
2807 int df
= (ctxt
->eflags
& EFLG_DF
) ? -op
->count
: op
->count
;
2809 register_address_increment(ctxt
, reg_rmw(ctxt
, reg
), df
* op
->bytes
);
2810 op
->addr
.mem
.ea
= register_address(ctxt
, reg_read(ctxt
, reg
));
2813 static int em_das(struct x86_emulate_ctxt
*ctxt
)
2816 bool af
, cf
, old_cf
;
2818 cf
= ctxt
->eflags
& X86_EFLAGS_CF
;
2824 af
= ctxt
->eflags
& X86_EFLAGS_AF
;
2825 if ((al
& 0x0f) > 9 || af
) {
2827 cf
= old_cf
| (al
>= 250);
2832 if (old_al
> 0x99 || old_cf
) {
2838 /* Set PF, ZF, SF */
2839 ctxt
->src
.type
= OP_IMM
;
2841 ctxt
->src
.bytes
= 1;
2842 fastop(ctxt
, em_or
);
2843 ctxt
->eflags
&= ~(X86_EFLAGS_AF
| X86_EFLAGS_CF
);
2845 ctxt
->eflags
|= X86_EFLAGS_CF
;
2847 ctxt
->eflags
|= X86_EFLAGS_AF
;
2848 return X86EMUL_CONTINUE
;
2851 static int em_aam(struct x86_emulate_ctxt
*ctxt
)
2855 if (ctxt
->src
.val
== 0)
2856 return emulate_de(ctxt
);
2858 al
= ctxt
->dst
.val
& 0xff;
2859 ah
= al
/ ctxt
->src
.val
;
2860 al
%= ctxt
->src
.val
;
2862 ctxt
->dst
.val
= (ctxt
->dst
.val
& 0xffff0000) | al
| (ah
<< 8);
2864 /* Set PF, ZF, SF */
2865 ctxt
->src
.type
= OP_IMM
;
2867 ctxt
->src
.bytes
= 1;
2868 fastop(ctxt
, em_or
);
2870 return X86EMUL_CONTINUE
;
2873 static int em_aad(struct x86_emulate_ctxt
*ctxt
)
2875 u8 al
= ctxt
->dst
.val
& 0xff;
2876 u8 ah
= (ctxt
->dst
.val
>> 8) & 0xff;
2878 al
= (al
+ (ah
* ctxt
->src
.val
)) & 0xff;
2880 ctxt
->dst
.val
= (ctxt
->dst
.val
& 0xffff0000) | al
;
2882 /* Set PF, ZF, SF */
2883 ctxt
->src
.type
= OP_IMM
;
2885 ctxt
->src
.bytes
= 1;
2886 fastop(ctxt
, em_or
);
2888 return X86EMUL_CONTINUE
;
2891 static int em_call(struct x86_emulate_ctxt
*ctxt
)
2893 long rel
= ctxt
->src
.val
;
2895 ctxt
->src
.val
= (unsigned long)ctxt
->_eip
;
2897 return em_push(ctxt
);
2900 static int em_call_far(struct x86_emulate_ctxt
*ctxt
)
2906 old_cs
= get_segment_selector(ctxt
, VCPU_SREG_CS
);
2907 old_eip
= ctxt
->_eip
;
2909 memcpy(&sel
, ctxt
->src
.valptr
+ ctxt
->op_bytes
, 2);
2910 if (load_segment_descriptor(ctxt
, sel
, VCPU_SREG_CS
))
2911 return X86EMUL_CONTINUE
;
2914 memcpy(&ctxt
->_eip
, ctxt
->src
.valptr
, ctxt
->op_bytes
);
2916 ctxt
->src
.val
= old_cs
;
2918 if (rc
!= X86EMUL_CONTINUE
)
2921 ctxt
->src
.val
= old_eip
;
2922 return em_push(ctxt
);
2925 static int em_ret_near_imm(struct x86_emulate_ctxt
*ctxt
)
2929 ctxt
->dst
.type
= OP_REG
;
2930 ctxt
->dst
.addr
.reg
= &ctxt
->_eip
;
2931 ctxt
->dst
.bytes
= ctxt
->op_bytes
;
2932 rc
= emulate_pop(ctxt
, &ctxt
->dst
.val
, ctxt
->op_bytes
);
2933 if (rc
!= X86EMUL_CONTINUE
)
2935 rsp_increment(ctxt
, ctxt
->src
.val
);
2936 return X86EMUL_CONTINUE
;
2939 static int em_xchg(struct x86_emulate_ctxt
*ctxt
)
2941 /* Write back the register source. */
2942 ctxt
->src
.val
= ctxt
->dst
.val
;
2943 write_register_operand(&ctxt
->src
);
2945 /* Write back the memory destination with implicit LOCK prefix. */
2946 ctxt
->dst
.val
= ctxt
->src
.orig_val
;
2947 ctxt
->lock_prefix
= 1;
2948 return X86EMUL_CONTINUE
;
2951 static int em_imul_3op(struct x86_emulate_ctxt
*ctxt
)
2953 ctxt
->dst
.val
= ctxt
->src2
.val
;
2954 return fastop(ctxt
, em_imul
);
2957 static int em_cwd(struct x86_emulate_ctxt
*ctxt
)
2959 ctxt
->dst
.type
= OP_REG
;
2960 ctxt
->dst
.bytes
= ctxt
->src
.bytes
;
2961 ctxt
->dst
.addr
.reg
= reg_rmw(ctxt
, VCPU_REGS_RDX
);
2962 ctxt
->dst
.val
= ~((ctxt
->src
.val
>> (ctxt
->src
.bytes
* 8 - 1)) - 1);
2964 return X86EMUL_CONTINUE
;
2967 static int em_rdtsc(struct x86_emulate_ctxt
*ctxt
)
2971 ctxt
->ops
->get_msr(ctxt
, MSR_IA32_TSC
, &tsc
);
2972 *reg_write(ctxt
, VCPU_REGS_RAX
) = (u32
)tsc
;
2973 *reg_write(ctxt
, VCPU_REGS_RDX
) = tsc
>> 32;
2974 return X86EMUL_CONTINUE
;
2977 static int em_rdpmc(struct x86_emulate_ctxt
*ctxt
)
2981 if (ctxt
->ops
->read_pmc(ctxt
, reg_read(ctxt
, VCPU_REGS_RCX
), &pmc
))
2982 return emulate_gp(ctxt
, 0);
2983 *reg_write(ctxt
, VCPU_REGS_RAX
) = (u32
)pmc
;
2984 *reg_write(ctxt
, VCPU_REGS_RDX
) = pmc
>> 32;
2985 return X86EMUL_CONTINUE
;
2988 static int em_mov(struct x86_emulate_ctxt
*ctxt
)
2990 memcpy(ctxt
->dst
.valptr
, ctxt
->src
.valptr
, ctxt
->op_bytes
);
2991 return X86EMUL_CONTINUE
;
2994 #define FFL(x) bit(X86_FEATURE_##x)
2996 static int em_movbe(struct x86_emulate_ctxt
*ctxt
)
2998 u32 ebx
, ecx
, edx
, eax
= 1;
3002 * Check MOVBE is set in the guest-visible CPUID leaf.
3004 ctxt
->ops
->get_cpuid(ctxt
, &eax
, &ebx
, &ecx
, &edx
);
3005 if (!(ecx
& FFL(MOVBE
)))
3006 return emulate_ud(ctxt
);
3008 switch (ctxt
->op_bytes
) {
3011 * From MOVBE definition: "...When the operand size is 16 bits,
3012 * the upper word of the destination register remains unchanged
3015 * Both casting ->valptr and ->val to u16 breaks strict aliasing
3016 * rules so we have to do the operation almost per hand.
3018 tmp
= (u16
)ctxt
->src
.val
;
3019 ctxt
->dst
.val
&= ~0xffffUL
;
3020 ctxt
->dst
.val
|= (unsigned long)swab16(tmp
);
3023 ctxt
->dst
.val
= swab32((u32
)ctxt
->src
.val
);
3026 ctxt
->dst
.val
= swab64(ctxt
->src
.val
);
3029 return X86EMUL_PROPAGATE_FAULT
;
3031 return X86EMUL_CONTINUE
;
3034 static int em_cr_write(struct x86_emulate_ctxt
*ctxt
)
3036 if (ctxt
->ops
->set_cr(ctxt
, ctxt
->modrm_reg
, ctxt
->src
.val
))
3037 return emulate_gp(ctxt
, 0);
3039 /* Disable writeback. */
3040 ctxt
->dst
.type
= OP_NONE
;
3041 return X86EMUL_CONTINUE
;
3044 static int em_dr_write(struct x86_emulate_ctxt
*ctxt
)
3048 if (ctxt
->mode
== X86EMUL_MODE_PROT64
)
3049 val
= ctxt
->src
.val
& ~0ULL;
3051 val
= ctxt
->src
.val
& ~0U;
3053 /* #UD condition is already handled. */
3054 if (ctxt
->ops
->set_dr(ctxt
, ctxt
->modrm_reg
, val
) < 0)
3055 return emulate_gp(ctxt
, 0);
3057 /* Disable writeback. */
3058 ctxt
->dst
.type
= OP_NONE
;
3059 return X86EMUL_CONTINUE
;
3062 static int em_wrmsr(struct x86_emulate_ctxt
*ctxt
)
3066 msr_data
= (u32
)reg_read(ctxt
, VCPU_REGS_RAX
)
3067 | ((u64
)reg_read(ctxt
, VCPU_REGS_RDX
) << 32);
3068 if (ctxt
->ops
->set_msr(ctxt
, reg_read(ctxt
, VCPU_REGS_RCX
), msr_data
))
3069 return emulate_gp(ctxt
, 0);
3071 return X86EMUL_CONTINUE
;
3074 static int em_rdmsr(struct x86_emulate_ctxt
*ctxt
)
3078 if (ctxt
->ops
->get_msr(ctxt
, reg_read(ctxt
, VCPU_REGS_RCX
), &msr_data
))
3079 return emulate_gp(ctxt
, 0);
3081 *reg_write(ctxt
, VCPU_REGS_RAX
) = (u32
)msr_data
;
3082 *reg_write(ctxt
, VCPU_REGS_RDX
) = msr_data
>> 32;
3083 return X86EMUL_CONTINUE
;
3086 static int em_mov_rm_sreg(struct x86_emulate_ctxt
*ctxt
)
3088 if (ctxt
->modrm_reg
> VCPU_SREG_GS
)
3089 return emulate_ud(ctxt
);
3091 ctxt
->dst
.val
= get_segment_selector(ctxt
, ctxt
->modrm_reg
);
3092 return X86EMUL_CONTINUE
;
3095 static int em_mov_sreg_rm(struct x86_emulate_ctxt
*ctxt
)
3097 u16 sel
= ctxt
->src
.val
;
3099 if (ctxt
->modrm_reg
== VCPU_SREG_CS
|| ctxt
->modrm_reg
> VCPU_SREG_GS
)
3100 return emulate_ud(ctxt
);
3102 if (ctxt
->modrm_reg
== VCPU_SREG_SS
)
3103 ctxt
->interruptibility
= KVM_X86_SHADOW_INT_MOV_SS
;
3105 /* Disable writeback. */
3106 ctxt
->dst
.type
= OP_NONE
;
3107 return load_segment_descriptor(ctxt
, sel
, ctxt
->modrm_reg
);
3110 static int em_lldt(struct x86_emulate_ctxt
*ctxt
)
3112 u16 sel
= ctxt
->src
.val
;
3114 /* Disable writeback. */
3115 ctxt
->dst
.type
= OP_NONE
;
3116 return load_segment_descriptor(ctxt
, sel
, VCPU_SREG_LDTR
);
3119 static int em_ltr(struct x86_emulate_ctxt
*ctxt
)
3121 u16 sel
= ctxt
->src
.val
;
3123 /* Disable writeback. */
3124 ctxt
->dst
.type
= OP_NONE
;
3125 return load_segment_descriptor(ctxt
, sel
, VCPU_SREG_TR
);
3128 static int em_invlpg(struct x86_emulate_ctxt
*ctxt
)
3133 rc
= linearize(ctxt
, ctxt
->src
.addr
.mem
, 1, false, &linear
);
3134 if (rc
== X86EMUL_CONTINUE
)
3135 ctxt
->ops
->invlpg(ctxt
, linear
);
3136 /* Disable writeback. */
3137 ctxt
->dst
.type
= OP_NONE
;
3138 return X86EMUL_CONTINUE
;
3141 static int em_clts(struct x86_emulate_ctxt
*ctxt
)
3145 cr0
= ctxt
->ops
->get_cr(ctxt
, 0);
3147 ctxt
->ops
->set_cr(ctxt
, 0, cr0
);
3148 return X86EMUL_CONTINUE
;
3151 static int em_vmcall(struct x86_emulate_ctxt
*ctxt
)
3155 if (ctxt
->modrm_mod
!= 3 || ctxt
->modrm_rm
!= 1)
3156 return X86EMUL_UNHANDLEABLE
;
3158 rc
= ctxt
->ops
->fix_hypercall(ctxt
);
3159 if (rc
!= X86EMUL_CONTINUE
)
3162 /* Let the processor re-execute the fixed hypercall */
3163 ctxt
->_eip
= ctxt
->eip
;
3164 /* Disable writeback. */
3165 ctxt
->dst
.type
= OP_NONE
;
3166 return X86EMUL_CONTINUE
;
3169 static int emulate_store_desc_ptr(struct x86_emulate_ctxt
*ctxt
,
3170 void (*get
)(struct x86_emulate_ctxt
*ctxt
,
3171 struct desc_ptr
*ptr
))
3173 struct desc_ptr desc_ptr
;
3175 if (ctxt
->mode
== X86EMUL_MODE_PROT64
)
3177 get(ctxt
, &desc_ptr
);
3178 if (ctxt
->op_bytes
== 2) {
3180 desc_ptr
.address
&= 0x00ffffff;
3182 /* Disable writeback. */
3183 ctxt
->dst
.type
= OP_NONE
;
3184 return segmented_write(ctxt
, ctxt
->dst
.addr
.mem
,
3185 &desc_ptr
, 2 + ctxt
->op_bytes
);
3188 static int em_sgdt(struct x86_emulate_ctxt
*ctxt
)
3190 return emulate_store_desc_ptr(ctxt
, ctxt
->ops
->get_gdt
);
3193 static int em_sidt(struct x86_emulate_ctxt
*ctxt
)
3195 return emulate_store_desc_ptr(ctxt
, ctxt
->ops
->get_idt
);
3198 static int em_lgdt(struct x86_emulate_ctxt
*ctxt
)
3200 struct desc_ptr desc_ptr
;
3203 if (ctxt
->mode
== X86EMUL_MODE_PROT64
)
3205 rc
= read_descriptor(ctxt
, ctxt
->src
.addr
.mem
,
3206 &desc_ptr
.size
, &desc_ptr
.address
,
3208 if (rc
!= X86EMUL_CONTINUE
)
3210 ctxt
->ops
->set_gdt(ctxt
, &desc_ptr
);
3211 /* Disable writeback. */
3212 ctxt
->dst
.type
= OP_NONE
;
3213 return X86EMUL_CONTINUE
;
3216 static int em_vmmcall(struct x86_emulate_ctxt
*ctxt
)
3220 rc
= ctxt
->ops
->fix_hypercall(ctxt
);
3222 /* Disable writeback. */
3223 ctxt
->dst
.type
= OP_NONE
;
3227 static int em_lidt(struct x86_emulate_ctxt
*ctxt
)
3229 struct desc_ptr desc_ptr
;
3232 if (ctxt
->mode
== X86EMUL_MODE_PROT64
)
3234 rc
= read_descriptor(ctxt
, ctxt
->src
.addr
.mem
,
3235 &desc_ptr
.size
, &desc_ptr
.address
,
3237 if (rc
!= X86EMUL_CONTINUE
)
3239 ctxt
->ops
->set_idt(ctxt
, &desc_ptr
);
3240 /* Disable writeback. */
3241 ctxt
->dst
.type
= OP_NONE
;
3242 return X86EMUL_CONTINUE
;
3245 static int em_smsw(struct x86_emulate_ctxt
*ctxt
)
3247 if (ctxt
->dst
.type
== OP_MEM
)
3248 ctxt
->dst
.bytes
= 2;
3249 ctxt
->dst
.val
= ctxt
->ops
->get_cr(ctxt
, 0);
3250 return X86EMUL_CONTINUE
;
3253 static int em_lmsw(struct x86_emulate_ctxt
*ctxt
)
3255 ctxt
->ops
->set_cr(ctxt
, 0, (ctxt
->ops
->get_cr(ctxt
, 0) & ~0x0eul
)
3256 | (ctxt
->src
.val
& 0x0f));
3257 ctxt
->dst
.type
= OP_NONE
;
3258 return X86EMUL_CONTINUE
;
3261 static int em_loop(struct x86_emulate_ctxt
*ctxt
)
3263 register_address_increment(ctxt
, reg_rmw(ctxt
, VCPU_REGS_RCX
), -1);
3264 if ((address_mask(ctxt
, reg_read(ctxt
, VCPU_REGS_RCX
)) != 0) &&
3265 (ctxt
->b
== 0xe2 || test_cc(ctxt
->b
^ 0x5, ctxt
->eflags
)))
3266 jmp_rel(ctxt
, ctxt
->src
.val
);
3268 return X86EMUL_CONTINUE
;
3271 static int em_jcxz(struct x86_emulate_ctxt
*ctxt
)
3273 if (address_mask(ctxt
, reg_read(ctxt
, VCPU_REGS_RCX
)) == 0)
3274 jmp_rel(ctxt
, ctxt
->src
.val
);
3276 return X86EMUL_CONTINUE
;
3279 static int em_in(struct x86_emulate_ctxt
*ctxt
)
3281 if (!pio_in_emulated(ctxt
, ctxt
->dst
.bytes
, ctxt
->src
.val
,
3283 return X86EMUL_IO_NEEDED
;
3285 return X86EMUL_CONTINUE
;
3288 static int em_out(struct x86_emulate_ctxt
*ctxt
)
3290 ctxt
->ops
->pio_out_emulated(ctxt
, ctxt
->src
.bytes
, ctxt
->dst
.val
,
3292 /* Disable writeback. */
3293 ctxt
->dst
.type
= OP_NONE
;
3294 return X86EMUL_CONTINUE
;
3297 static int em_cli(struct x86_emulate_ctxt
*ctxt
)
3299 if (emulator_bad_iopl(ctxt
))
3300 return emulate_gp(ctxt
, 0);
3302 ctxt
->eflags
&= ~X86_EFLAGS_IF
;
3303 return X86EMUL_CONTINUE
;
3306 static int em_sti(struct x86_emulate_ctxt
*ctxt
)
3308 if (emulator_bad_iopl(ctxt
))
3309 return emulate_gp(ctxt
, 0);
3311 ctxt
->interruptibility
= KVM_X86_SHADOW_INT_STI
;
3312 ctxt
->eflags
|= X86_EFLAGS_IF
;
3313 return X86EMUL_CONTINUE
;
3316 static int em_cpuid(struct x86_emulate_ctxt
*ctxt
)
3318 u32 eax
, ebx
, ecx
, edx
;
3320 eax
= reg_read(ctxt
, VCPU_REGS_RAX
);
3321 ecx
= reg_read(ctxt
, VCPU_REGS_RCX
);
3322 ctxt
->ops
->get_cpuid(ctxt
, &eax
, &ebx
, &ecx
, &edx
);
3323 *reg_write(ctxt
, VCPU_REGS_RAX
) = eax
;
3324 *reg_write(ctxt
, VCPU_REGS_RBX
) = ebx
;
3325 *reg_write(ctxt
, VCPU_REGS_RCX
) = ecx
;
3326 *reg_write(ctxt
, VCPU_REGS_RDX
) = edx
;
3327 return X86EMUL_CONTINUE
;
3330 static int em_sahf(struct x86_emulate_ctxt
*ctxt
)
3334 flags
= EFLG_CF
| EFLG_PF
| EFLG_AF
| EFLG_ZF
| EFLG_SF
;
3335 flags
&= *reg_rmw(ctxt
, VCPU_REGS_RAX
) >> 8;
3337 ctxt
->eflags
&= ~0xffUL
;
3338 ctxt
->eflags
|= flags
| X86_EFLAGS_FIXED
;
3339 return X86EMUL_CONTINUE
;
3342 static int em_lahf(struct x86_emulate_ctxt
*ctxt
)
3344 *reg_rmw(ctxt
, VCPU_REGS_RAX
) &= ~0xff00UL
;
3345 *reg_rmw(ctxt
, VCPU_REGS_RAX
) |= (ctxt
->eflags
& 0xff) << 8;
3346 return X86EMUL_CONTINUE
;
3349 static int em_bswap(struct x86_emulate_ctxt
*ctxt
)
3351 switch (ctxt
->op_bytes
) {
3352 #ifdef CONFIG_X86_64
3354 asm("bswap %0" : "+r"(ctxt
->dst
.val
));
3358 asm("bswap %0" : "+r"(*(u32
*)&ctxt
->dst
.val
));
3361 return X86EMUL_CONTINUE
;
3364 static bool valid_cr(int nr
)
3376 static int check_cr_read(struct x86_emulate_ctxt
*ctxt
)
3378 if (!valid_cr(ctxt
->modrm_reg
))
3379 return emulate_ud(ctxt
);
3381 return X86EMUL_CONTINUE
;
3384 static int check_cr_write(struct x86_emulate_ctxt
*ctxt
)
3386 u64 new_val
= ctxt
->src
.val64
;
3387 int cr
= ctxt
->modrm_reg
;
3390 static u64 cr_reserved_bits
[] = {
3391 0xffffffff00000000ULL
,
3392 0, 0, 0, /* CR3 checked later */
3399 return emulate_ud(ctxt
);
3401 if (new_val
& cr_reserved_bits
[cr
])
3402 return emulate_gp(ctxt
, 0);
3407 if (((new_val
& X86_CR0_PG
) && !(new_val
& X86_CR0_PE
)) ||
3408 ((new_val
& X86_CR0_NW
) && !(new_val
& X86_CR0_CD
)))
3409 return emulate_gp(ctxt
, 0);
3411 cr4
= ctxt
->ops
->get_cr(ctxt
, 4);
3412 ctxt
->ops
->get_msr(ctxt
, MSR_EFER
, &efer
);
3414 if ((new_val
& X86_CR0_PG
) && (efer
& EFER_LME
) &&
3415 !(cr4
& X86_CR4_PAE
))
3416 return emulate_gp(ctxt
, 0);
3423 ctxt
->ops
->get_msr(ctxt
, MSR_EFER
, &efer
);
3424 if (efer
& EFER_LMA
)
3425 rsvd
= CR3_L_MODE_RESERVED_BITS
;
3428 return emulate_gp(ctxt
, 0);
3433 ctxt
->ops
->get_msr(ctxt
, MSR_EFER
, &efer
);
3435 if ((efer
& EFER_LMA
) && !(new_val
& X86_CR4_PAE
))
3436 return emulate_gp(ctxt
, 0);
3442 return X86EMUL_CONTINUE
;
3445 static int check_dr7_gd(struct x86_emulate_ctxt
*ctxt
)
3449 ctxt
->ops
->get_dr(ctxt
, 7, &dr7
);
3451 /* Check if DR7.Global_Enable is set */
3452 return dr7
& (1 << 13);
3455 static int check_dr_read(struct x86_emulate_ctxt
*ctxt
)
3457 int dr
= ctxt
->modrm_reg
;
3461 return emulate_ud(ctxt
);
3463 cr4
= ctxt
->ops
->get_cr(ctxt
, 4);
3464 if ((cr4
& X86_CR4_DE
) && (dr
== 4 || dr
== 5))
3465 return emulate_ud(ctxt
);
3467 if (check_dr7_gd(ctxt
))
3468 return emulate_db(ctxt
);
3470 return X86EMUL_CONTINUE
;
3473 static int check_dr_write(struct x86_emulate_ctxt
*ctxt
)
3475 u64 new_val
= ctxt
->src
.val64
;
3476 int dr
= ctxt
->modrm_reg
;
3478 if ((dr
== 6 || dr
== 7) && (new_val
& 0xffffffff00000000ULL
))
3479 return emulate_gp(ctxt
, 0);
3481 return check_dr_read(ctxt
);
3484 static int check_svme(struct x86_emulate_ctxt
*ctxt
)
3488 ctxt
->ops
->get_msr(ctxt
, MSR_EFER
, &efer
);
3490 if (!(efer
& EFER_SVME
))
3491 return emulate_ud(ctxt
);
3493 return X86EMUL_CONTINUE
;
3496 static int check_svme_pa(struct x86_emulate_ctxt
*ctxt
)
3498 u64 rax
= reg_read(ctxt
, VCPU_REGS_RAX
);
3500 /* Valid physical address? */
3501 if (rax
& 0xffff000000000000ULL
)
3502 return emulate_gp(ctxt
, 0);
3504 return check_svme(ctxt
);
3507 static int check_rdtsc(struct x86_emulate_ctxt
*ctxt
)
3509 u64 cr4
= ctxt
->ops
->get_cr(ctxt
, 4);
3511 if (cr4
& X86_CR4_TSD
&& ctxt
->ops
->cpl(ctxt
))
3512 return emulate_ud(ctxt
);
3514 return X86EMUL_CONTINUE
;
3517 static int check_rdpmc(struct x86_emulate_ctxt
*ctxt
)
3519 u64 cr4
= ctxt
->ops
->get_cr(ctxt
, 4);
3520 u64 rcx
= reg_read(ctxt
, VCPU_REGS_RCX
);
3522 if ((!(cr4
& X86_CR4_PCE
) && ctxt
->ops
->cpl(ctxt
)) ||
3523 ctxt
->ops
->check_pmc(ctxt
, rcx
))
3524 return emulate_gp(ctxt
, 0);
3526 return X86EMUL_CONTINUE
;
3529 static int check_perm_in(struct x86_emulate_ctxt
*ctxt
)
3531 ctxt
->dst
.bytes
= min(ctxt
->dst
.bytes
, 4u);
3532 if (!emulator_io_permited(ctxt
, ctxt
->src
.val
, ctxt
->dst
.bytes
))
3533 return emulate_gp(ctxt
, 0);
3535 return X86EMUL_CONTINUE
;
3538 static int check_perm_out(struct x86_emulate_ctxt
*ctxt
)
3540 ctxt
->src
.bytes
= min(ctxt
->src
.bytes
, 4u);
3541 if (!emulator_io_permited(ctxt
, ctxt
->dst
.val
, ctxt
->src
.bytes
))
3542 return emulate_gp(ctxt
, 0);
3544 return X86EMUL_CONTINUE
;
3547 #define D(_y) { .flags = (_y) }
3548 #define DI(_y, _i) { .flags = (_y), .intercept = x86_intercept_##_i }
3549 #define DIP(_y, _i, _p) { .flags = (_y), .intercept = x86_intercept_##_i, \
3550 .check_perm = (_p) }
3551 #define N D(NotImpl)
3552 #define EXT(_f, _e) { .flags = ((_f) | RMExt), .u.group = (_e) }
3553 #define G(_f, _g) { .flags = ((_f) | Group | ModRM), .u.group = (_g) }
3554 #define GD(_f, _g) { .flags = ((_f) | GroupDual | ModRM), .u.gdual = (_g) }
3555 #define E(_f, _e) { .flags = ((_f) | Escape | ModRM), .u.esc = (_e) }
3556 #define I(_f, _e) { .flags = (_f), .u.execute = (_e) }
3557 #define F(_f, _e) { .flags = (_f) | Fastop, .u.fastop = (_e) }
3558 #define II(_f, _e, _i) \
3559 { .flags = (_f), .u.execute = (_e), .intercept = x86_intercept_##_i }
3560 #define IIP(_f, _e, _i, _p) \
3561 { .flags = (_f), .u.execute = (_e), .intercept = x86_intercept_##_i, \
3562 .check_perm = (_p) }
3563 #define GP(_f, _g) { .flags = ((_f) | Prefix), .u.gprefix = (_g) }
3565 #define D2bv(_f) D((_f) | ByteOp), D(_f)
3566 #define D2bvIP(_f, _i, _p) DIP((_f) | ByteOp, _i, _p), DIP(_f, _i, _p)
3567 #define I2bv(_f, _e) I((_f) | ByteOp, _e), I(_f, _e)
3568 #define F2bv(_f, _e) F((_f) | ByteOp, _e), F(_f, _e)
3569 #define I2bvIP(_f, _e, _i, _p) \
3570 IIP((_f) | ByteOp, _e, _i, _p), IIP(_f, _e, _i, _p)
3572 #define F6ALU(_f, _e) F2bv((_f) | DstMem | SrcReg | ModRM, _e), \
3573 F2bv(((_f) | DstReg | SrcMem | ModRM) & ~Lock, _e), \
3574 F2bv(((_f) & ~Lock) | DstAcc | SrcImm, _e)
3576 static const struct opcode group7_rm1
[] = {
3577 DI(SrcNone
| Priv
, monitor
),
3578 DI(SrcNone
| Priv
, mwait
),
3582 static const struct opcode group7_rm3
[] = {
3583 DIP(SrcNone
| Prot
| Priv
, vmrun
, check_svme_pa
),
3584 II(SrcNone
| Prot
| EmulateOnUD
, em_vmmcall
, vmmcall
),
3585 DIP(SrcNone
| Prot
| Priv
, vmload
, check_svme_pa
),
3586 DIP(SrcNone
| Prot
| Priv
, vmsave
, check_svme_pa
),
3587 DIP(SrcNone
| Prot
| Priv
, stgi
, check_svme
),
3588 DIP(SrcNone
| Prot
| Priv
, clgi
, check_svme
),
3589 DIP(SrcNone
| Prot
| Priv
, skinit
, check_svme
),
3590 DIP(SrcNone
| Prot
| Priv
, invlpga
, check_svme
),
3593 static const struct opcode group7_rm7
[] = {
3595 DIP(SrcNone
, rdtscp
, check_rdtsc
),
3599 static const struct opcode group1
[] = {
3601 F(Lock
| PageTable
, em_or
),
3604 F(Lock
| PageTable
, em_and
),
3610 static const struct opcode group1A
[] = {
3611 I(DstMem
| SrcNone
| Mov
| Stack
, em_pop
), N
, N
, N
, N
, N
, N
, N
,
3614 static const struct opcode group2
[] = {
3615 F(DstMem
| ModRM
, em_rol
),
3616 F(DstMem
| ModRM
, em_ror
),
3617 F(DstMem
| ModRM
, em_rcl
),
3618 F(DstMem
| ModRM
, em_rcr
),
3619 F(DstMem
| ModRM
, em_shl
),
3620 F(DstMem
| ModRM
, em_shr
),
3621 F(DstMem
| ModRM
, em_shl
),
3622 F(DstMem
| ModRM
, em_sar
),
3625 static const struct opcode group3
[] = {
3626 F(DstMem
| SrcImm
| NoWrite
, em_test
),
3627 F(DstMem
| SrcImm
| NoWrite
, em_test
),
3628 F(DstMem
| SrcNone
| Lock
, em_not
),
3629 F(DstMem
| SrcNone
| Lock
, em_neg
),
3630 F(DstXacc
| Src2Mem
, em_mul_ex
),
3631 F(DstXacc
| Src2Mem
, em_imul_ex
),
3632 F(DstXacc
| Src2Mem
, em_div_ex
),
3633 F(DstXacc
| Src2Mem
, em_idiv_ex
),
3636 static const struct opcode group4
[] = {
3637 F(ByteOp
| DstMem
| SrcNone
| Lock
, em_inc
),
3638 F(ByteOp
| DstMem
| SrcNone
| Lock
, em_dec
),
3642 static const struct opcode group5
[] = {
3643 F(DstMem
| SrcNone
| Lock
, em_inc
),
3644 F(DstMem
| SrcNone
| Lock
, em_dec
),
3645 I(SrcMem
| Stack
, em_grp45
),
3646 I(SrcMemFAddr
| ImplicitOps
| Stack
, em_call_far
),
3647 I(SrcMem
| Stack
, em_grp45
),
3648 I(SrcMemFAddr
| ImplicitOps
, em_grp45
),
3649 I(SrcMem
| Stack
, em_grp45
), D(Undefined
),
3652 static const struct opcode group6
[] = {
3655 II(Prot
| Priv
| SrcMem16
, em_lldt
, lldt
),
3656 II(Prot
| Priv
| SrcMem16
, em_ltr
, ltr
),
3660 static const struct group_dual group7
= { {
3661 II(Mov
| DstMem
, em_sgdt
, sgdt
),
3662 II(Mov
| DstMem
, em_sidt
, sidt
),
3663 II(SrcMem
| Priv
, em_lgdt
, lgdt
),
3664 II(SrcMem
| Priv
, em_lidt
, lidt
),
3665 II(SrcNone
| DstMem
| Mov
, em_smsw
, smsw
), N
,
3666 II(SrcMem16
| Mov
| Priv
, em_lmsw
, lmsw
),
3667 II(SrcMem
| ByteOp
| Priv
| NoAccess
, em_invlpg
, invlpg
),
3669 I(SrcNone
| Priv
| EmulateOnUD
, em_vmcall
),
3671 N
, EXT(0, group7_rm3
),
3672 II(SrcNone
| DstMem
| Mov
, em_smsw
, smsw
), N
,
3673 II(SrcMem16
| Mov
| Priv
, em_lmsw
, lmsw
),
3677 static const struct opcode group8
[] = {
3679 F(DstMem
| SrcImmByte
| NoWrite
, em_bt
),
3680 F(DstMem
| SrcImmByte
| Lock
| PageTable
, em_bts
),
3681 F(DstMem
| SrcImmByte
| Lock
, em_btr
),
3682 F(DstMem
| SrcImmByte
| Lock
| PageTable
, em_btc
),
3685 static const struct group_dual group9
= { {
3686 N
, I(DstMem64
| Lock
| PageTable
, em_cmpxchg8b
), N
, N
, N
, N
, N
, N
,
3688 N
, N
, N
, N
, N
, N
, N
, N
,
3691 static const struct opcode group11
[] = {
3692 I(DstMem
| SrcImm
| Mov
| PageTable
, em_mov
),
3696 static const struct gprefix pfx_0f_6f_0f_7f
= {
3697 I(Mmx
, em_mov
), I(Sse
| Aligned
, em_mov
), N
, I(Sse
| Unaligned
, em_mov
),
3700 static const struct gprefix pfx_vmovntpx
= {
3701 I(0, em_mov
), N
, N
, N
,
3704 static const struct gprefix pfx_0f_28_0f_29
= {
3705 I(Aligned
, em_mov
), I(Aligned
, em_mov
), N
, N
,
3708 static const struct escape escape_d9
= { {
3709 N
, N
, N
, N
, N
, N
, N
, I(DstMem
, em_fnstcw
),
3712 N
, N
, N
, N
, N
, N
, N
, N
,
3714 N
, N
, N
, N
, N
, N
, N
, N
,
3716 N
, N
, N
, N
, N
, N
, N
, N
,
3718 N
, N
, N
, N
, N
, N
, N
, N
,
3720 N
, N
, N
, N
, N
, N
, N
, N
,
3722 N
, N
, N
, N
, N
, N
, N
, N
,
3724 N
, N
, N
, N
, N
, N
, N
, N
,
3726 N
, N
, N
, N
, N
, N
, N
, N
,
3729 static const struct escape escape_db
= { {
3730 N
, N
, N
, N
, N
, N
, N
, N
,
3733 N
, N
, N
, N
, N
, N
, N
, N
,
3735 N
, N
, N
, N
, N
, N
, N
, N
,
3737 N
, N
, N
, N
, N
, N
, N
, N
,
3739 N
, N
, N
, N
, N
, N
, N
, N
,
3741 N
, N
, N
, I(ImplicitOps
, em_fninit
), N
, N
, N
, N
,
3743 N
, N
, N
, N
, N
, N
, N
, N
,
3745 N
, N
, N
, N
, N
, N
, N
, N
,
3747 N
, N
, N
, N
, N
, N
, N
, N
,
3750 static const struct escape escape_dd
= { {
3751 N
, N
, N
, N
, N
, N
, N
, I(DstMem
, em_fnstsw
),
3754 N
, N
, N
, N
, N
, N
, N
, N
,
3756 N
, N
, N
, N
, N
, N
, N
, N
,
3758 N
, N
, N
, N
, N
, N
, N
, N
,
3760 N
, N
, N
, N
, N
, N
, N
, N
,
3762 N
, N
, N
, N
, N
, N
, N
, N
,
3764 N
, N
, N
, N
, N
, N
, N
, N
,
3766 N
, N
, N
, N
, N
, N
, N
, N
,
3768 N
, N
, N
, N
, N
, N
, N
, N
,
3771 static const struct opcode opcode_table
[256] = {
3773 F6ALU(Lock
, em_add
),
3774 I(ImplicitOps
| Stack
| No64
| Src2ES
, em_push_sreg
),
3775 I(ImplicitOps
| Stack
| No64
| Src2ES
, em_pop_sreg
),
3777 F6ALU(Lock
| PageTable
, em_or
),
3778 I(ImplicitOps
| Stack
| No64
| Src2CS
, em_push_sreg
),
3781 F6ALU(Lock
, em_adc
),
3782 I(ImplicitOps
| Stack
| No64
| Src2SS
, em_push_sreg
),
3783 I(ImplicitOps
| Stack
| No64
| Src2SS
, em_pop_sreg
),
3785 F6ALU(Lock
, em_sbb
),
3786 I(ImplicitOps
| Stack
| No64
| Src2DS
, em_push_sreg
),
3787 I(ImplicitOps
| Stack
| No64
| Src2DS
, em_pop_sreg
),
3789 F6ALU(Lock
| PageTable
, em_and
), N
, N
,
3791 F6ALU(Lock
, em_sub
), N
, I(ByteOp
| DstAcc
| No64
, em_das
),
3793 F6ALU(Lock
, em_xor
), N
, N
,
3795 F6ALU(NoWrite
, em_cmp
), N
, N
,
3797 X8(F(DstReg
, em_inc
)), X8(F(DstReg
, em_dec
)),
3799 X8(I(SrcReg
| Stack
, em_push
)),
3801 X8(I(DstReg
| Stack
, em_pop
)),
3803 I(ImplicitOps
| Stack
| No64
, em_pusha
),
3804 I(ImplicitOps
| Stack
| No64
, em_popa
),
3805 N
, D(DstReg
| SrcMem32
| ModRM
| Mov
) /* movsxd (x86/64) */ ,
3808 I(SrcImm
| Mov
| Stack
, em_push
),
3809 I(DstReg
| SrcMem
| ModRM
| Src2Imm
, em_imul_3op
),
3810 I(SrcImmByte
| Mov
| Stack
, em_push
),
3811 I(DstReg
| SrcMem
| ModRM
| Src2ImmByte
, em_imul_3op
),
3812 I2bvIP(DstDI
| SrcDX
| Mov
| String
| Unaligned
, em_in
, ins
, check_perm_in
), /* insb, insw/insd */
3813 I2bvIP(SrcSI
| DstDX
| String
, em_out
, outs
, check_perm_out
), /* outsb, outsw/outsd */
3817 G(ByteOp
| DstMem
| SrcImm
, group1
),
3818 G(DstMem
| SrcImm
, group1
),
3819 G(ByteOp
| DstMem
| SrcImm
| No64
, group1
),
3820 G(DstMem
| SrcImmByte
, group1
),
3821 F2bv(DstMem
| SrcReg
| ModRM
| NoWrite
, em_test
),
3822 I2bv(DstMem
| SrcReg
| ModRM
| Lock
| PageTable
, em_xchg
),
3824 I2bv(DstMem
| SrcReg
| ModRM
| Mov
| PageTable
, em_mov
),
3825 I2bv(DstReg
| SrcMem
| ModRM
| Mov
, em_mov
),
3826 I(DstMem
| SrcNone
| ModRM
| Mov
| PageTable
, em_mov_rm_sreg
),
3827 D(ModRM
| SrcMem
| NoAccess
| DstReg
),
3828 I(ImplicitOps
| SrcMem16
| ModRM
, em_mov_sreg_rm
),
3831 DI(SrcAcc
| DstReg
, pause
), X7(D(SrcAcc
| DstReg
)),
3833 D(DstAcc
| SrcNone
), I(ImplicitOps
| SrcAcc
, em_cwd
),
3834 I(SrcImmFAddr
| No64
, em_call_far
), N
,
3835 II(ImplicitOps
| Stack
, em_pushf
, pushf
),
3836 II(ImplicitOps
| Stack
, em_popf
, popf
),
3837 I(ImplicitOps
, em_sahf
), I(ImplicitOps
, em_lahf
),
3839 I2bv(DstAcc
| SrcMem
| Mov
| MemAbs
, em_mov
),
3840 I2bv(DstMem
| SrcAcc
| Mov
| MemAbs
| PageTable
, em_mov
),
3841 I2bv(SrcSI
| DstDI
| Mov
| String
, em_mov
),
3842 F2bv(SrcSI
| DstDI
| String
| NoWrite
, em_cmp
),
3844 F2bv(DstAcc
| SrcImm
| NoWrite
, em_test
),
3845 I2bv(SrcAcc
| DstDI
| Mov
| String
, em_mov
),
3846 I2bv(SrcSI
| DstAcc
| Mov
| String
, em_mov
),
3847 F2bv(SrcAcc
| DstDI
| String
| NoWrite
, em_cmp
),
3849 X8(I(ByteOp
| DstReg
| SrcImm
| Mov
, em_mov
)),
3851 X8(I(DstReg
| SrcImm64
| Mov
, em_mov
)),
3853 G(ByteOp
| Src2ImmByte
, group2
), G(Src2ImmByte
, group2
),
3854 I(ImplicitOps
| Stack
| SrcImmU16
, em_ret_near_imm
),
3855 I(ImplicitOps
| Stack
, em_ret
),
3856 I(DstReg
| SrcMemFAddr
| ModRM
| No64
| Src2ES
, em_lseg
),
3857 I(DstReg
| SrcMemFAddr
| ModRM
| No64
| Src2DS
, em_lseg
),
3858 G(ByteOp
, group11
), G(0, group11
),
3860 I(Stack
| SrcImmU16
| Src2ImmByte
, em_enter
), I(Stack
, em_leave
),
3861 I(ImplicitOps
| Stack
| SrcImmU16
, em_ret_far_imm
),
3862 I(ImplicitOps
| Stack
, em_ret_far
),
3863 D(ImplicitOps
), DI(SrcImmByte
, intn
),
3864 D(ImplicitOps
| No64
), II(ImplicitOps
, em_iret
, iret
),
3866 G(Src2One
| ByteOp
, group2
), G(Src2One
, group2
),
3867 G(Src2CL
| ByteOp
, group2
), G(Src2CL
, group2
),
3868 I(DstAcc
| SrcImmUByte
| No64
, em_aam
),
3869 I(DstAcc
| SrcImmUByte
| No64
, em_aad
),
3870 F(DstAcc
| ByteOp
| No64
, em_salc
),
3871 I(DstAcc
| SrcXLat
| ByteOp
, em_mov
),
3873 N
, E(0, &escape_d9
), N
, E(0, &escape_db
), N
, E(0, &escape_dd
), N
, N
,
3875 X3(I(SrcImmByte
, em_loop
)),
3876 I(SrcImmByte
, em_jcxz
),
3877 I2bvIP(SrcImmUByte
| DstAcc
, em_in
, in
, check_perm_in
),
3878 I2bvIP(SrcAcc
| DstImmUByte
, em_out
, out
, check_perm_out
),
3880 I(SrcImm
| Stack
, em_call
), D(SrcImm
| ImplicitOps
),
3881 I(SrcImmFAddr
| No64
, em_jmp_far
), D(SrcImmByte
| ImplicitOps
),
3882 I2bvIP(SrcDX
| DstAcc
, em_in
, in
, check_perm_in
),
3883 I2bvIP(SrcAcc
| DstDX
, em_out
, out
, check_perm_out
),
3885 N
, DI(ImplicitOps
, icebp
), N
, N
,
3886 DI(ImplicitOps
| Priv
, hlt
), D(ImplicitOps
),
3887 G(ByteOp
, group3
), G(0, group3
),
3889 D(ImplicitOps
), D(ImplicitOps
),
3890 I(ImplicitOps
, em_cli
), I(ImplicitOps
, em_sti
),
3891 D(ImplicitOps
), D(ImplicitOps
), G(0, group4
), G(0, group5
),
3894 static const struct opcode twobyte_table
[256] = {
3896 G(0, group6
), GD(0, &group7
), N
, N
,
3897 N
, I(ImplicitOps
| EmulateOnUD
, em_syscall
),
3898 II(ImplicitOps
| Priv
, em_clts
, clts
), N
,
3899 DI(ImplicitOps
| Priv
, invd
), DI(ImplicitOps
| Priv
, wbinvd
), N
, N
,
3900 N
, D(ImplicitOps
| ModRM
), N
, N
,
3902 N
, N
, N
, N
, N
, N
, N
, N
,
3903 D(ImplicitOps
| ModRM
), N
, N
, N
, N
, N
, N
, D(ImplicitOps
| ModRM
),
3905 DIP(ModRM
| DstMem
| Priv
| Op3264
| NoMod
, cr_read
, check_cr_read
),
3906 DIP(ModRM
| DstMem
| Priv
| Op3264
| NoMod
, dr_read
, check_dr_read
),
3907 IIP(ModRM
| SrcMem
| Priv
| Op3264
| NoMod
, em_cr_write
, cr_write
,
3909 IIP(ModRM
| SrcMem
| Priv
| Op3264
| NoMod
, em_dr_write
, dr_write
,
3912 GP(ModRM
| DstReg
| SrcMem
| Mov
| Sse
, &pfx_0f_28_0f_29
),
3913 GP(ModRM
| DstMem
| SrcReg
| Mov
| Sse
, &pfx_0f_28_0f_29
),
3914 N
, GP(ModRM
| DstMem
| SrcReg
| Sse
| Mov
| Aligned
, &pfx_vmovntpx
),
3917 II(ImplicitOps
| Priv
, em_wrmsr
, wrmsr
),
3918 IIP(ImplicitOps
, em_rdtsc
, rdtsc
, check_rdtsc
),
3919 II(ImplicitOps
| Priv
, em_rdmsr
, rdmsr
),
3920 IIP(ImplicitOps
, em_rdpmc
, rdpmc
, check_rdpmc
),
3921 I(ImplicitOps
| EmulateOnUD
, em_sysenter
),
3922 I(ImplicitOps
| Priv
| EmulateOnUD
, em_sysexit
),
3924 N
, N
, N
, N
, N
, N
, N
, N
,
3926 X16(D(DstReg
| SrcMem
| ModRM
)),
3928 N
, N
, N
, N
, N
, N
, N
, N
, N
, N
, N
, N
, N
, N
, N
, N
,
3933 N
, N
, N
, GP(SrcMem
| DstReg
| ModRM
| Mov
, &pfx_0f_6f_0f_7f
),
3938 N
, N
, N
, GP(SrcReg
| DstMem
| ModRM
| Mov
, &pfx_0f_6f_0f_7f
),
3942 X16(D(ByteOp
| DstMem
| SrcNone
| ModRM
| Mov
)),
3944 I(Stack
| Src2FS
, em_push_sreg
), I(Stack
| Src2FS
, em_pop_sreg
),
3945 II(ImplicitOps
, em_cpuid
, cpuid
),
3946 F(DstMem
| SrcReg
| ModRM
| BitOp
| NoWrite
, em_bt
),
3947 F(DstMem
| SrcReg
| Src2ImmByte
| ModRM
, em_shld
),
3948 F(DstMem
| SrcReg
| Src2CL
| ModRM
, em_shld
), N
, N
,
3950 I(Stack
| Src2GS
, em_push_sreg
), I(Stack
| Src2GS
, em_pop_sreg
),
3951 DI(ImplicitOps
, rsm
),
3952 F(DstMem
| SrcReg
| ModRM
| BitOp
| Lock
| PageTable
, em_bts
),
3953 F(DstMem
| SrcReg
| Src2ImmByte
| ModRM
, em_shrd
),
3954 F(DstMem
| SrcReg
| Src2CL
| ModRM
, em_shrd
),
3955 D(ModRM
), F(DstReg
| SrcMem
| ModRM
, em_imul
),
3957 I2bv(DstMem
| SrcReg
| ModRM
| Lock
| PageTable
, em_cmpxchg
),
3958 I(DstReg
| SrcMemFAddr
| ModRM
| Src2SS
, em_lseg
),
3959 F(DstMem
| SrcReg
| ModRM
| BitOp
| Lock
, em_btr
),
3960 I(DstReg
| SrcMemFAddr
| ModRM
| Src2FS
, em_lseg
),
3961 I(DstReg
| SrcMemFAddr
| ModRM
| Src2GS
, em_lseg
),
3962 D(DstReg
| SrcMem8
| ModRM
| Mov
), D(DstReg
| SrcMem16
| ModRM
| Mov
),
3966 F(DstMem
| SrcReg
| ModRM
| BitOp
| Lock
| PageTable
, em_btc
),
3967 F(DstReg
| SrcMem
| ModRM
, em_bsf
), F(DstReg
| SrcMem
| ModRM
, em_bsr
),
3968 D(DstReg
| SrcMem8
| ModRM
| Mov
), D(DstReg
| SrcMem16
| ModRM
| Mov
),
3970 F2bv(DstMem
| SrcReg
| ModRM
| SrcWrite
| Lock
, em_xadd
),
3971 N
, D(DstMem
| SrcReg
| ModRM
| Mov
),
3972 N
, N
, N
, GD(0, &group9
),
3974 X8(I(DstReg
, em_bswap
)),
3976 N
, N
, N
, N
, N
, N
, N
, N
, N
, N
, N
, N
, N
, N
, N
, N
,
3978 N
, N
, N
, N
, N
, N
, N
, N
, N
, N
, N
, N
, N
, N
, N
, N
,
3980 N
, N
, N
, N
, N
, N
, N
, N
, N
, N
, N
, N
, N
, N
, N
, N
3983 static const struct gprefix three_byte_0f_38_f0
= {
3984 I(DstReg
| SrcMem
| Mov
, em_movbe
), N
, N
, N
3987 static const struct gprefix three_byte_0f_38_f1
= {
3988 I(DstMem
| SrcReg
| Mov
, em_movbe
), N
, N
, N
3992 * Insns below are selected by the prefix which indexed by the third opcode
3995 static const struct opcode opcode_map_0f_38
[256] = {
3997 X16(N
), X16(N
), X16(N
), X16(N
), X16(N
), X16(N
), X16(N
), X16(N
),
3999 X16(N
), X16(N
), X16(N
), X16(N
), X16(N
), X16(N
), X16(N
),
4001 GP(EmulateOnUD
| ModRM
| Prefix
, &three_byte_0f_38_f0
),
4002 GP(EmulateOnUD
| ModRM
| Prefix
, &three_byte_0f_38_f1
),
4021 static unsigned imm_size(struct x86_emulate_ctxt
*ctxt
)
4025 size
= (ctxt
->d
& ByteOp
) ? 1 : ctxt
->op_bytes
;
4031 static int decode_imm(struct x86_emulate_ctxt
*ctxt
, struct operand
*op
,
4032 unsigned size
, bool sign_extension
)
4034 int rc
= X86EMUL_CONTINUE
;
4038 op
->addr
.mem
.ea
= ctxt
->_eip
;
4039 /* NB. Immediates are sign-extended as necessary. */
4040 switch (op
->bytes
) {
4042 op
->val
= insn_fetch(s8
, ctxt
);
4045 op
->val
= insn_fetch(s16
, ctxt
);
4048 op
->val
= insn_fetch(s32
, ctxt
);
4051 op
->val
= insn_fetch(s64
, ctxt
);
4054 if (!sign_extension
) {
4055 switch (op
->bytes
) {
4063 op
->val
&= 0xffffffff;
4071 static int decode_operand(struct x86_emulate_ctxt
*ctxt
, struct operand
*op
,
4074 int rc
= X86EMUL_CONTINUE
;
4078 decode_register_operand(ctxt
, op
);
4081 rc
= decode_imm(ctxt
, op
, 1, false);
4084 ctxt
->memop
.bytes
= (ctxt
->d
& ByteOp
) ? 1 : ctxt
->op_bytes
;
4088 if (ctxt
->d
& BitOp
)
4089 fetch_bit_operand(ctxt
);
4090 op
->orig_val
= op
->val
;
4093 ctxt
->memop
.bytes
= (ctxt
->op_bytes
== 8) ? 16 : 8;
4097 op
->bytes
= (ctxt
->d
& ByteOp
) ? 1 : ctxt
->op_bytes
;
4098 op
->addr
.reg
= reg_rmw(ctxt
, VCPU_REGS_RAX
);
4099 fetch_register_operand(op
);
4100 op
->orig_val
= op
->val
;
4104 op
->bytes
= (ctxt
->d
& ByteOp
) ? 2 : ctxt
->op_bytes
;
4105 op
->addr
.reg
= reg_rmw(ctxt
, VCPU_REGS_RAX
);
4106 fetch_register_operand(op
);
4107 op
->orig_val
= op
->val
;
4110 if (ctxt
->d
& ByteOp
) {
4115 op
->bytes
= ctxt
->op_bytes
;
4116 op
->addr
.reg
= reg_rmw(ctxt
, VCPU_REGS_RDX
);
4117 fetch_register_operand(op
);
4118 op
->orig_val
= op
->val
;
4122 op
->bytes
= (ctxt
->d
& ByteOp
) ? 1 : ctxt
->op_bytes
;
4124 register_address(ctxt
, reg_read(ctxt
, VCPU_REGS_RDI
));
4125 op
->addr
.mem
.seg
= VCPU_SREG_ES
;
4132 op
->addr
.reg
= reg_rmw(ctxt
, VCPU_REGS_RDX
);
4133 fetch_register_operand(op
);
4137 op
->val
= reg_read(ctxt
, VCPU_REGS_RCX
) & 0xff;
4140 rc
= decode_imm(ctxt
, op
, 1, true);
4147 rc
= decode_imm(ctxt
, op
, imm_size(ctxt
), true);
4150 rc
= decode_imm(ctxt
, op
, ctxt
->op_bytes
, true);
4153 ctxt
->memop
.bytes
= 1;
4154 if (ctxt
->memop
.type
== OP_REG
) {
4155 ctxt
->memop
.addr
.reg
= decode_register(ctxt
,
4156 ctxt
->modrm_rm
, true);
4157 fetch_register_operand(&ctxt
->memop
);
4161 ctxt
->memop
.bytes
= 2;
4164 ctxt
->memop
.bytes
= 4;
4167 rc
= decode_imm(ctxt
, op
, 2, false);
4170 rc
= decode_imm(ctxt
, op
, imm_size(ctxt
), false);
4174 op
->bytes
= (ctxt
->d
& ByteOp
) ? 1 : ctxt
->op_bytes
;
4176 register_address(ctxt
, reg_read(ctxt
, VCPU_REGS_RSI
));
4177 op
->addr
.mem
.seg
= seg_override(ctxt
);
4183 op
->bytes
= (ctxt
->d
& ByteOp
) ? 1 : ctxt
->op_bytes
;
4185 register_address(ctxt
,
4186 reg_read(ctxt
, VCPU_REGS_RBX
) +
4187 (reg_read(ctxt
, VCPU_REGS_RAX
) & 0xff));
4188 op
->addr
.mem
.seg
= seg_override(ctxt
);
4193 op
->addr
.mem
.ea
= ctxt
->_eip
;
4194 op
->bytes
= ctxt
->op_bytes
+ 2;
4195 insn_fetch_arr(op
->valptr
, op
->bytes
, ctxt
);
4198 ctxt
->memop
.bytes
= ctxt
->op_bytes
+ 2;
4201 op
->val
= VCPU_SREG_ES
;
4204 op
->val
= VCPU_SREG_CS
;
4207 op
->val
= VCPU_SREG_SS
;
4210 op
->val
= VCPU_SREG_DS
;
4213 op
->val
= VCPU_SREG_FS
;
4216 op
->val
= VCPU_SREG_GS
;
4219 /* Special instructions do their own operand decoding. */
4221 op
->type
= OP_NONE
; /* Disable writeback. */
4229 int x86_decode_insn(struct x86_emulate_ctxt
*ctxt
, void *insn
, int insn_len
)
4231 int rc
= X86EMUL_CONTINUE
;
4232 int mode
= ctxt
->mode
;
4233 int def_op_bytes
, def_ad_bytes
, goffset
, simd_prefix
;
4234 bool op_prefix
= false;
4235 struct opcode opcode
;
4237 ctxt
->memop
.type
= OP_NONE
;
4238 ctxt
->memopp
= NULL
;
4239 ctxt
->_eip
= ctxt
->eip
;
4240 ctxt
->fetch
.start
= ctxt
->_eip
;
4241 ctxt
->fetch
.end
= ctxt
->fetch
.start
+ insn_len
;
4242 ctxt
->opcode_len
= 1;
4244 memcpy(ctxt
->fetch
.data
, insn
, insn_len
);
4247 case X86EMUL_MODE_REAL
:
4248 case X86EMUL_MODE_VM86
:
4249 case X86EMUL_MODE_PROT16
:
4250 def_op_bytes
= def_ad_bytes
= 2;
4252 case X86EMUL_MODE_PROT32
:
4253 def_op_bytes
= def_ad_bytes
= 4;
4255 #ifdef CONFIG_X86_64
4256 case X86EMUL_MODE_PROT64
:
4262 return EMULATION_FAILED
;
4265 ctxt
->op_bytes
= def_op_bytes
;
4266 ctxt
->ad_bytes
= def_ad_bytes
;
4268 /* Legacy prefixes. */
4270 switch (ctxt
->b
= insn_fetch(u8
, ctxt
)) {
4271 case 0x66: /* operand-size override */
4273 /* switch between 2/4 bytes */
4274 ctxt
->op_bytes
= def_op_bytes
^ 6;
4276 case 0x67: /* address-size override */
4277 if (mode
== X86EMUL_MODE_PROT64
)
4278 /* switch between 4/8 bytes */
4279 ctxt
->ad_bytes
= def_ad_bytes
^ 12;
4281 /* switch between 2/4 bytes */
4282 ctxt
->ad_bytes
= def_ad_bytes
^ 6;
4284 case 0x26: /* ES override */
4285 case 0x2e: /* CS override */
4286 case 0x36: /* SS override */
4287 case 0x3e: /* DS override */
4288 set_seg_override(ctxt
, (ctxt
->b
>> 3) & 3);
4290 case 0x64: /* FS override */
4291 case 0x65: /* GS override */
4292 set_seg_override(ctxt
, ctxt
->b
& 7);
4294 case 0x40 ... 0x4f: /* REX */
4295 if (mode
!= X86EMUL_MODE_PROT64
)
4297 ctxt
->rex_prefix
= ctxt
->b
;
4299 case 0xf0: /* LOCK */
4300 ctxt
->lock_prefix
= 1;
4302 case 0xf2: /* REPNE/REPNZ */
4303 case 0xf3: /* REP/REPE/REPZ */
4304 ctxt
->rep_prefix
= ctxt
->b
;
4310 /* Any legacy prefix after a REX prefix nullifies its effect. */
4312 ctxt
->rex_prefix
= 0;
4318 if (ctxt
->rex_prefix
& 8)
4319 ctxt
->op_bytes
= 8; /* REX.W */
4321 /* Opcode byte(s). */
4322 opcode
= opcode_table
[ctxt
->b
];
4323 /* Two-byte opcode? */
4324 if (ctxt
->b
== 0x0f) {
4325 ctxt
->opcode_len
= 2;
4326 ctxt
->b
= insn_fetch(u8
, ctxt
);
4327 opcode
= twobyte_table
[ctxt
->b
];
4329 /* 0F_38 opcode map */
4330 if (ctxt
->b
== 0x38) {
4331 ctxt
->opcode_len
= 3;
4332 ctxt
->b
= insn_fetch(u8
, ctxt
);
4333 opcode
= opcode_map_0f_38
[ctxt
->b
];
4336 ctxt
->d
= opcode
.flags
;
4338 if (ctxt
->d
& ModRM
)
4339 ctxt
->modrm
= insn_fetch(u8
, ctxt
);
4341 /* vex-prefix instructions are not implemented */
4342 if (ctxt
->opcode_len
== 1 && (ctxt
->b
== 0xc5 || ctxt
->b
== 0xc4) &&
4343 (mode
== X86EMUL_MODE_PROT64
||
4344 (mode
>= X86EMUL_MODE_PROT16
&& (ctxt
->modrm
& 0x80)))) {
4348 while (ctxt
->d
& GroupMask
) {
4349 switch (ctxt
->d
& GroupMask
) {
4351 goffset
= (ctxt
->modrm
>> 3) & 7;
4352 opcode
= opcode
.u
.group
[goffset
];
4355 goffset
= (ctxt
->modrm
>> 3) & 7;
4356 if ((ctxt
->modrm
>> 6) == 3)
4357 opcode
= opcode
.u
.gdual
->mod3
[goffset
];
4359 opcode
= opcode
.u
.gdual
->mod012
[goffset
];
4362 goffset
= ctxt
->modrm
& 7;
4363 opcode
= opcode
.u
.group
[goffset
];
4366 if (ctxt
->rep_prefix
&& op_prefix
)
4367 return EMULATION_FAILED
;
4368 simd_prefix
= op_prefix
? 0x66 : ctxt
->rep_prefix
;
4369 switch (simd_prefix
) {
4370 case 0x00: opcode
= opcode
.u
.gprefix
->pfx_no
; break;
4371 case 0x66: opcode
= opcode
.u
.gprefix
->pfx_66
; break;
4372 case 0xf2: opcode
= opcode
.u
.gprefix
->pfx_f2
; break;
4373 case 0xf3: opcode
= opcode
.u
.gprefix
->pfx_f3
; break;
4377 if (ctxt
->modrm
> 0xbf)
4378 opcode
= opcode
.u
.esc
->high
[ctxt
->modrm
- 0xc0];
4380 opcode
= opcode
.u
.esc
->op
[(ctxt
->modrm
>> 3) & 7];
4383 return EMULATION_FAILED
;
4386 ctxt
->d
&= ~(u64
)GroupMask
;
4387 ctxt
->d
|= opcode
.flags
;
4390 ctxt
->execute
= opcode
.u
.execute
;
4391 ctxt
->check_perm
= opcode
.check_perm
;
4392 ctxt
->intercept
= opcode
.intercept
;
4395 if (ctxt
->d
== 0 || (ctxt
->d
& NotImpl
))
4396 return EMULATION_FAILED
;
4398 if (!(ctxt
->d
& EmulateOnUD
) && ctxt
->ud
)
4399 return EMULATION_FAILED
;
4401 if (mode
== X86EMUL_MODE_PROT64
&& (ctxt
->d
& Stack
))
4404 if (ctxt
->d
& Op3264
) {
4405 if (mode
== X86EMUL_MODE_PROT64
)
4412 ctxt
->op_bytes
= 16;
4413 else if (ctxt
->d
& Mmx
)
4416 /* ModRM and SIB bytes. */
4417 if (ctxt
->d
& ModRM
) {
4418 rc
= decode_modrm(ctxt
, &ctxt
->memop
);
4419 if (!ctxt
->has_seg_override
)
4420 set_seg_override(ctxt
, ctxt
->modrm_seg
);
4421 } else if (ctxt
->d
& MemAbs
)
4422 rc
= decode_abs(ctxt
, &ctxt
->memop
);
4423 if (rc
!= X86EMUL_CONTINUE
)
4426 if (!ctxt
->has_seg_override
)
4427 set_seg_override(ctxt
, VCPU_SREG_DS
);
4429 ctxt
->memop
.addr
.mem
.seg
= seg_override(ctxt
);
4431 if (ctxt
->memop
.type
== OP_MEM
&& ctxt
->ad_bytes
!= 8)
4432 ctxt
->memop
.addr
.mem
.ea
= (u32
)ctxt
->memop
.addr
.mem
.ea
;
4435 * Decode and fetch the source operand: register, memory
4438 rc
= decode_operand(ctxt
, &ctxt
->src
, (ctxt
->d
>> SrcShift
) & OpMask
);
4439 if (rc
!= X86EMUL_CONTINUE
)
4443 * Decode and fetch the second source operand: register, memory
4446 rc
= decode_operand(ctxt
, &ctxt
->src2
, (ctxt
->d
>> Src2Shift
) & OpMask
);
4447 if (rc
!= X86EMUL_CONTINUE
)
4450 /* Decode and fetch the destination operand: register or memory. */
4451 rc
= decode_operand(ctxt
, &ctxt
->dst
, (ctxt
->d
>> DstShift
) & OpMask
);
4454 if (ctxt
->memopp
&& ctxt
->memopp
->type
== OP_MEM
&& ctxt
->rip_relative
)
4455 ctxt
->memopp
->addr
.mem
.ea
+= ctxt
->_eip
;
4457 return (rc
!= X86EMUL_CONTINUE
) ? EMULATION_FAILED
: EMULATION_OK
;
4460 bool x86_page_table_writing_insn(struct x86_emulate_ctxt
*ctxt
)
4462 return ctxt
->d
& PageTable
;
4465 static bool string_insn_completed(struct x86_emulate_ctxt
*ctxt
)
4467 /* The second termination condition only applies for REPE
4468 * and REPNE. Test if the repeat string operation prefix is
4469 * REPE/REPZ or REPNE/REPNZ and if it's the case it tests the
4470 * corresponding termination condition according to:
4471 * - if REPE/REPZ and ZF = 0 then done
4472 * - if REPNE/REPNZ and ZF = 1 then done
4474 if (((ctxt
->b
== 0xa6) || (ctxt
->b
== 0xa7) ||
4475 (ctxt
->b
== 0xae) || (ctxt
->b
== 0xaf))
4476 && (((ctxt
->rep_prefix
== REPE_PREFIX
) &&
4477 ((ctxt
->eflags
& EFLG_ZF
) == 0))
4478 || ((ctxt
->rep_prefix
== REPNE_PREFIX
) &&
4479 ((ctxt
->eflags
& EFLG_ZF
) == EFLG_ZF
))))
4485 static int flush_pending_x87_faults(struct x86_emulate_ctxt
*ctxt
)
4489 ctxt
->ops
->get_fpu(ctxt
);
4490 asm volatile("1: fwait \n\t"
4492 ".pushsection .fixup,\"ax\" \n\t"
4494 "movb $1, %[fault] \n\t"
4497 _ASM_EXTABLE(1b
, 3b
)
4498 : [fault
]"+qm"(fault
));
4499 ctxt
->ops
->put_fpu(ctxt
);
4501 if (unlikely(fault
))
4502 return emulate_exception(ctxt
, MF_VECTOR
, 0, false);
4504 return X86EMUL_CONTINUE
;
4507 static void fetch_possible_mmx_operand(struct x86_emulate_ctxt
*ctxt
,
4510 if (op
->type
== OP_MM
)
4511 read_mmx_reg(ctxt
, &op
->mm_val
, op
->addr
.mm
);
4514 static int fastop(struct x86_emulate_ctxt
*ctxt
, void (*fop
)(struct fastop
*))
4516 ulong flags
= (ctxt
->eflags
& EFLAGS_MASK
) | X86_EFLAGS_IF
;
4517 if (!(ctxt
->d
& ByteOp
))
4518 fop
+= __ffs(ctxt
->dst
.bytes
) * FASTOP_SIZE
;
4519 asm("push %[flags]; popf; call *%[fastop]; pushf; pop %[flags]\n"
4520 : "+a"(ctxt
->dst
.val
), "+d"(ctxt
->src
.val
), [flags
]"+D"(flags
),
4522 : "c"(ctxt
->src2
.val
));
4523 ctxt
->eflags
= (ctxt
->eflags
& ~EFLAGS_MASK
) | (flags
& EFLAGS_MASK
);
4524 if (!fop
) /* exception is returned in fop variable */
4525 return emulate_de(ctxt
);
4526 return X86EMUL_CONTINUE
;
4529 int x86_emulate_insn(struct x86_emulate_ctxt
*ctxt
)
4531 const struct x86_emulate_ops
*ops
= ctxt
->ops
;
4532 int rc
= X86EMUL_CONTINUE
;
4533 int saved_dst_type
= ctxt
->dst
.type
;
4535 ctxt
->mem_read
.pos
= 0;
4537 if ((ctxt
->mode
== X86EMUL_MODE_PROT64
&& (ctxt
->d
& No64
)) ||
4538 (ctxt
->d
& Undefined
)) {
4539 rc
= emulate_ud(ctxt
);
4543 /* LOCK prefix is allowed only with some instructions */
4544 if (ctxt
->lock_prefix
&& (!(ctxt
->d
& Lock
) || ctxt
->dst
.type
!= OP_MEM
)) {
4545 rc
= emulate_ud(ctxt
);
4549 if ((ctxt
->d
& SrcMask
) == SrcMemFAddr
&& ctxt
->src
.type
!= OP_MEM
) {
4550 rc
= emulate_ud(ctxt
);
4554 if (((ctxt
->d
& (Sse
|Mmx
)) && ((ops
->get_cr(ctxt
, 0) & X86_CR0_EM
)))
4555 || ((ctxt
->d
& Sse
) && !(ops
->get_cr(ctxt
, 4) & X86_CR4_OSFXSR
))) {
4556 rc
= emulate_ud(ctxt
);
4560 if ((ctxt
->d
& (Sse
|Mmx
)) && (ops
->get_cr(ctxt
, 0) & X86_CR0_TS
)) {
4561 rc
= emulate_nm(ctxt
);
4565 if (ctxt
->d
& Mmx
) {
4566 rc
= flush_pending_x87_faults(ctxt
);
4567 if (rc
!= X86EMUL_CONTINUE
)
4570 * Now that we know the fpu is exception safe, we can fetch
4573 fetch_possible_mmx_operand(ctxt
, &ctxt
->src
);
4574 fetch_possible_mmx_operand(ctxt
, &ctxt
->src2
);
4575 if (!(ctxt
->d
& Mov
))
4576 fetch_possible_mmx_operand(ctxt
, &ctxt
->dst
);
4579 if (unlikely(ctxt
->guest_mode
) && ctxt
->intercept
) {
4580 rc
= emulator_check_intercept(ctxt
, ctxt
->intercept
,
4581 X86_ICPT_PRE_EXCEPT
);
4582 if (rc
!= X86EMUL_CONTINUE
)
4586 /* Privileged instruction can be executed only in CPL=0 */
4587 if ((ctxt
->d
& Priv
) && ops
->cpl(ctxt
)) {
4588 rc
= emulate_gp(ctxt
, 0);
4592 /* Instruction can only be executed in protected mode */
4593 if ((ctxt
->d
& Prot
) && ctxt
->mode
< X86EMUL_MODE_PROT16
) {
4594 rc
= emulate_ud(ctxt
);
4598 /* Do instruction specific permission checks */
4599 if (ctxt
->check_perm
) {
4600 rc
= ctxt
->check_perm(ctxt
);
4601 if (rc
!= X86EMUL_CONTINUE
)
4605 if (unlikely(ctxt
->guest_mode
) && ctxt
->intercept
) {
4606 rc
= emulator_check_intercept(ctxt
, ctxt
->intercept
,
4607 X86_ICPT_POST_EXCEPT
);
4608 if (rc
!= X86EMUL_CONTINUE
)
4612 if (ctxt
->rep_prefix
&& (ctxt
->d
& String
)) {
4613 /* All REP prefixes have the same first termination condition */
4614 if (address_mask(ctxt
, reg_read(ctxt
, VCPU_REGS_RCX
)) == 0) {
4615 ctxt
->eip
= ctxt
->_eip
;
4620 if ((ctxt
->src
.type
== OP_MEM
) && !(ctxt
->d
& NoAccess
)) {
4621 rc
= segmented_read(ctxt
, ctxt
->src
.addr
.mem
,
4622 ctxt
->src
.valptr
, ctxt
->src
.bytes
);
4623 if (rc
!= X86EMUL_CONTINUE
)
4625 ctxt
->src
.orig_val64
= ctxt
->src
.val64
;
4628 if (ctxt
->src2
.type
== OP_MEM
) {
4629 rc
= segmented_read(ctxt
, ctxt
->src2
.addr
.mem
,
4630 &ctxt
->src2
.val
, ctxt
->src2
.bytes
);
4631 if (rc
!= X86EMUL_CONTINUE
)
4635 if ((ctxt
->d
& DstMask
) == ImplicitOps
)
4639 if ((ctxt
->dst
.type
== OP_MEM
) && !(ctxt
->d
& Mov
)) {
4640 /* optimisation - avoid slow emulated read if Mov */
4641 rc
= segmented_read(ctxt
, ctxt
->dst
.addr
.mem
,
4642 &ctxt
->dst
.val
, ctxt
->dst
.bytes
);
4643 if (rc
!= X86EMUL_CONTINUE
)
4646 ctxt
->dst
.orig_val
= ctxt
->dst
.val
;
4650 if (unlikely(ctxt
->guest_mode
) && ctxt
->intercept
) {
4651 rc
= emulator_check_intercept(ctxt
, ctxt
->intercept
,
4652 X86_ICPT_POST_MEMACCESS
);
4653 if (rc
!= X86EMUL_CONTINUE
)
4657 if (ctxt
->execute
) {
4658 if (ctxt
->d
& Fastop
) {
4659 void (*fop
)(struct fastop
*) = (void *)ctxt
->execute
;
4660 rc
= fastop(ctxt
, fop
);
4661 if (rc
!= X86EMUL_CONTINUE
)
4665 rc
= ctxt
->execute(ctxt
);
4666 if (rc
!= X86EMUL_CONTINUE
)
4671 if (ctxt
->opcode_len
== 2)
4673 else if (ctxt
->opcode_len
== 3)
4674 goto threebyte_insn
;
4677 case 0x63: /* movsxd */
4678 if (ctxt
->mode
!= X86EMUL_MODE_PROT64
)
4679 goto cannot_emulate
;
4680 ctxt
->dst
.val
= (s32
) ctxt
->src
.val
;
4682 case 0x70 ... 0x7f: /* jcc (short) */
4683 if (test_cc(ctxt
->b
, ctxt
->eflags
))
4684 jmp_rel(ctxt
, ctxt
->src
.val
);
4686 case 0x8d: /* lea r16/r32, m */
4687 ctxt
->dst
.val
= ctxt
->src
.addr
.mem
.ea
;
4689 case 0x90 ... 0x97: /* nop / xchg reg, rax */
4690 if (ctxt
->dst
.addr
.reg
== reg_rmw(ctxt
, VCPU_REGS_RAX
))
4691 ctxt
->dst
.type
= OP_NONE
;
4695 case 0x98: /* cbw/cwde/cdqe */
4696 switch (ctxt
->op_bytes
) {
4697 case 2: ctxt
->dst
.val
= (s8
)ctxt
->dst
.val
; break;
4698 case 4: ctxt
->dst
.val
= (s16
)ctxt
->dst
.val
; break;
4699 case 8: ctxt
->dst
.val
= (s32
)ctxt
->dst
.val
; break;
4702 case 0xcc: /* int3 */
4703 rc
= emulate_int(ctxt
, 3);
4705 case 0xcd: /* int n */
4706 rc
= emulate_int(ctxt
, ctxt
->src
.val
);
4708 case 0xce: /* into */
4709 if (ctxt
->eflags
& EFLG_OF
)
4710 rc
= emulate_int(ctxt
, 4);
4712 case 0xe9: /* jmp rel */
4713 case 0xeb: /* jmp rel short */
4714 jmp_rel(ctxt
, ctxt
->src
.val
);
4715 ctxt
->dst
.type
= OP_NONE
; /* Disable writeback. */
4717 case 0xf4: /* hlt */
4718 ctxt
->ops
->halt(ctxt
);
4720 case 0xf5: /* cmc */
4721 /* complement carry flag from eflags reg */
4722 ctxt
->eflags
^= EFLG_CF
;
4724 case 0xf8: /* clc */
4725 ctxt
->eflags
&= ~EFLG_CF
;
4727 case 0xf9: /* stc */
4728 ctxt
->eflags
|= EFLG_CF
;
4730 case 0xfc: /* cld */
4731 ctxt
->eflags
&= ~EFLG_DF
;
4733 case 0xfd: /* std */
4734 ctxt
->eflags
|= EFLG_DF
;
4737 goto cannot_emulate
;
4740 if (rc
!= X86EMUL_CONTINUE
)
4744 if (ctxt
->d
& SrcWrite
) {
4745 BUG_ON(ctxt
->src
.type
== OP_MEM
|| ctxt
->src
.type
== OP_MEM_STR
);
4746 rc
= writeback(ctxt
, &ctxt
->src
);
4747 if (rc
!= X86EMUL_CONTINUE
)
4750 if (!(ctxt
->d
& NoWrite
)) {
4751 rc
= writeback(ctxt
, &ctxt
->dst
);
4752 if (rc
!= X86EMUL_CONTINUE
)
4757 * restore dst type in case the decoding will be reused
4758 * (happens for string instruction )
4760 ctxt
->dst
.type
= saved_dst_type
;
4762 if ((ctxt
->d
& SrcMask
) == SrcSI
)
4763 string_addr_inc(ctxt
, VCPU_REGS_RSI
, &ctxt
->src
);
4765 if ((ctxt
->d
& DstMask
) == DstDI
)
4766 string_addr_inc(ctxt
, VCPU_REGS_RDI
, &ctxt
->dst
);
4768 if (ctxt
->rep_prefix
&& (ctxt
->d
& String
)) {
4770 struct read_cache
*r
= &ctxt
->io_read
;
4771 if ((ctxt
->d
& SrcMask
) == SrcSI
)
4772 count
= ctxt
->src
.count
;
4774 count
= ctxt
->dst
.count
;
4775 register_address_increment(ctxt
, reg_rmw(ctxt
, VCPU_REGS_RCX
),
4778 if (!string_insn_completed(ctxt
)) {
4780 * Re-enter guest when pio read ahead buffer is empty
4781 * or, if it is not used, after each 1024 iteration.
4783 if ((r
->end
!= 0 || reg_read(ctxt
, VCPU_REGS_RCX
) & 0x3ff) &&
4784 (r
->end
== 0 || r
->end
!= r
->pos
)) {
4786 * Reset read cache. Usually happens before
4787 * decode, but since instruction is restarted
4788 * we have to do it here.
4790 ctxt
->mem_read
.end
= 0;
4791 writeback_registers(ctxt
);
4792 return EMULATION_RESTART
;
4794 goto done
; /* skip rip writeback */
4798 ctxt
->eip
= ctxt
->_eip
;
4801 if (rc
== X86EMUL_PROPAGATE_FAULT
)
4802 ctxt
->have_exception
= true;
4803 if (rc
== X86EMUL_INTERCEPTED
)
4804 return EMULATION_INTERCEPTED
;
4806 if (rc
== X86EMUL_CONTINUE
)
4807 writeback_registers(ctxt
);
4809 return (rc
== X86EMUL_UNHANDLEABLE
) ? EMULATION_FAILED
: EMULATION_OK
;
4813 case 0x09: /* wbinvd */
4814 (ctxt
->ops
->wbinvd
)(ctxt
);
4816 case 0x08: /* invd */
4817 case 0x0d: /* GrpP (prefetch) */
4818 case 0x18: /* Grp16 (prefetch/nop) */
4819 case 0x1f: /* nop */
4821 case 0x20: /* mov cr, reg */
4822 ctxt
->dst
.val
= ops
->get_cr(ctxt
, ctxt
->modrm_reg
);
4824 case 0x21: /* mov from dr to reg */
4825 ops
->get_dr(ctxt
, ctxt
->modrm_reg
, &ctxt
->dst
.val
);
4827 case 0x40 ... 0x4f: /* cmov */
4828 if (test_cc(ctxt
->b
, ctxt
->eflags
))
4829 ctxt
->dst
.val
= ctxt
->src
.val
;
4830 else if (ctxt
->mode
!= X86EMUL_MODE_PROT64
||
4831 ctxt
->op_bytes
!= 4)
4832 ctxt
->dst
.type
= OP_NONE
; /* no writeback */
4834 case 0x80 ... 0x8f: /* jnz rel, etc*/
4835 if (test_cc(ctxt
->b
, ctxt
->eflags
))
4836 jmp_rel(ctxt
, ctxt
->src
.val
);
4838 case 0x90 ... 0x9f: /* setcc r/m8 */
4839 ctxt
->dst
.val
= test_cc(ctxt
->b
, ctxt
->eflags
);
4841 case 0xae: /* clflush */
4843 case 0xb6 ... 0xb7: /* movzx */
4844 ctxt
->dst
.bytes
= ctxt
->op_bytes
;
4845 ctxt
->dst
.val
= (ctxt
->src
.bytes
== 1) ? (u8
) ctxt
->src
.val
4846 : (u16
) ctxt
->src
.val
;
4848 case 0xbe ... 0xbf: /* movsx */
4849 ctxt
->dst
.bytes
= ctxt
->op_bytes
;
4850 ctxt
->dst
.val
= (ctxt
->src
.bytes
== 1) ? (s8
) ctxt
->src
.val
:
4851 (s16
) ctxt
->src
.val
;
4853 case 0xc3: /* movnti */
4854 ctxt
->dst
.bytes
= ctxt
->op_bytes
;
4855 ctxt
->dst
.val
= (ctxt
->op_bytes
== 8) ? (u64
) ctxt
->src
.val
:
4856 (u32
) ctxt
->src
.val
;
4859 goto cannot_emulate
;
4864 if (rc
!= X86EMUL_CONTINUE
)
4870 return EMULATION_FAILED
;
4873 void emulator_invalidate_register_cache(struct x86_emulate_ctxt
*ctxt
)
4875 invalidate_registers(ctxt
);
4878 void emulator_writeback_register_cache(struct x86_emulate_ctxt
*ctxt
)
4880 writeback_registers(ctxt
);