1 // SPDX-License-Identifier: GPL-2.0-only
2 /******************************************************************************
5 * Generic x86 (32-bit and 64-bit) instruction decoder and emulator.
7 * Copyright (c) 2005 Keir Fraser
9 * Linux coding style, mod r/m decoder, segment base fixes, real-mode
10 * privileged instructions:
12 * Copyright (C) 2006 Qumranet
13 * Copyright 2010 Red Hat, Inc. and/or its affiliates.
15 * Avi Kivity <avi@qumranet.com>
16 * Yaniv Kamay <yaniv@qumranet.com>
18 * From: xen-unstable 10676:af9809f51f81a3c43f276f00c81a52ef558afda4
21 #include <linux/kvm_host.h>
22 #include "kvm_cache_regs.h"
23 #include <asm/kvm_emulate.h>
24 #include <linux/stringify.h>
25 #include <asm/debugreg.h>
26 #include <asm/nospec-branch.h>
37 #define OpImplicit 1ull /* No generic decode */
38 #define OpReg 2ull /* Register */
39 #define OpMem 3ull /* Memory */
40 #define OpAcc 4ull /* Accumulator: AL/AX/EAX/RAX */
41 #define OpDI 5ull /* ES:DI/EDI/RDI */
42 #define OpMem64 6ull /* Memory, 64-bit */
43 #define OpImmUByte 7ull /* Zero-extended 8-bit immediate */
44 #define OpDX 8ull /* DX register */
45 #define OpCL 9ull /* CL register (for shifts) */
46 #define OpImmByte 10ull /* 8-bit sign extended immediate */
47 #define OpOne 11ull /* Implied 1 */
48 #define OpImm 12ull /* Sign extended up to 32-bit immediate */
49 #define OpMem16 13ull /* Memory operand (16-bit). */
50 #define OpMem32 14ull /* Memory operand (32-bit). */
51 #define OpImmU 15ull /* Immediate operand, zero extended */
52 #define OpSI 16ull /* SI/ESI/RSI */
53 #define OpImmFAddr 17ull /* Immediate far address */
54 #define OpMemFAddr 18ull /* Far address in memory */
55 #define OpImmU16 19ull /* Immediate operand, 16 bits, zero extended */
56 #define OpES 20ull /* ES */
57 #define OpCS 21ull /* CS */
58 #define OpSS 22ull /* SS */
59 #define OpDS 23ull /* DS */
60 #define OpFS 24ull /* FS */
61 #define OpGS 25ull /* GS */
62 #define OpMem8 26ull /* 8-bit zero extended memory operand */
63 #define OpImm64 27ull /* Sign extended 16/32/64-bit immediate */
64 #define OpXLat 28ull /* memory at BX/EBX/RBX + zero-extended AL */
65 #define OpAccLo 29ull /* Low part of extended acc (AX/AX/EAX/RAX) */
66 #define OpAccHi 30ull /* High part of extended acc (-/DX/EDX/RDX) */
68 #define OpBits 5 /* Width of operand field */
69 #define OpMask ((1ull << OpBits) - 1)
72 * Opcode effective-address decode tables.
73 * Note that we only emulate instructions that have at least one memory
74 * operand (excluding implicit stack references). We assume that stack
75 * references and instruction fetches will never occur in special memory
76 * areas that require emulation. So, for example, 'mov <imm>,<reg>' need
80 /* Operand sizes: 8-bit operands or specified/overridden size. */
81 #define ByteOp (1<<0) /* 8-bit operands. */
82 /* Destination operand type. */
84 #define ImplicitOps (OpImplicit << DstShift)
85 #define DstReg (OpReg << DstShift)
86 #define DstMem (OpMem << DstShift)
87 #define DstAcc (OpAcc << DstShift)
88 #define DstDI (OpDI << DstShift)
89 #define DstMem64 (OpMem64 << DstShift)
90 #define DstMem16 (OpMem16 << DstShift)
91 #define DstImmUByte (OpImmUByte << DstShift)
92 #define DstDX (OpDX << DstShift)
93 #define DstAccLo (OpAccLo << DstShift)
94 #define DstMask (OpMask << DstShift)
95 /* Source operand type. */
97 #define SrcNone (OpNone << SrcShift)
98 #define SrcReg (OpReg << SrcShift)
99 #define SrcMem (OpMem << SrcShift)
100 #define SrcMem16 (OpMem16 << SrcShift)
101 #define SrcMem32 (OpMem32 << SrcShift)
102 #define SrcImm (OpImm << SrcShift)
103 #define SrcImmByte (OpImmByte << SrcShift)
104 #define SrcOne (OpOne << SrcShift)
105 #define SrcImmUByte (OpImmUByte << SrcShift)
106 #define SrcImmU (OpImmU << SrcShift)
107 #define SrcSI (OpSI << SrcShift)
108 #define SrcXLat (OpXLat << SrcShift)
109 #define SrcImmFAddr (OpImmFAddr << SrcShift)
110 #define SrcMemFAddr (OpMemFAddr << SrcShift)
111 #define SrcAcc (OpAcc << SrcShift)
112 #define SrcImmU16 (OpImmU16 << SrcShift)
113 #define SrcImm64 (OpImm64 << SrcShift)
114 #define SrcDX (OpDX << SrcShift)
115 #define SrcMem8 (OpMem8 << SrcShift)
116 #define SrcAccHi (OpAccHi << SrcShift)
117 #define SrcMask (OpMask << SrcShift)
118 #define BitOp (1<<11)
119 #define MemAbs (1<<12) /* Memory operand is absolute displacement */
120 #define String (1<<13) /* String instruction (rep capable) */
121 #define Stack (1<<14) /* Stack instruction (push/pop) */
122 #define GroupMask (7<<15) /* Opcode uses one of the group mechanisms */
123 #define Group (1<<15) /* Bits 3:5 of modrm byte extend opcode */
124 #define GroupDual (2<<15) /* Alternate decoding of mod == 3 */
125 #define Prefix (3<<15) /* Instruction varies with 66/f2/f3 prefix */
126 #define RMExt (4<<15) /* Opcode extension in ModRM r/m if mod == 3 */
127 #define Escape (5<<15) /* Escape to coprocessor instruction */
128 #define InstrDual (6<<15) /* Alternate instruction decoding of mod == 3 */
129 #define ModeDual (7<<15) /* Different instruction for 32/64 bit */
130 #define Sse (1<<18) /* SSE Vector instruction */
131 /* Generic ModRM decode. */
132 #define ModRM (1<<19)
133 /* Destination is only written; never read. */
136 #define Prot (1<<21) /* instruction generates #UD if not in prot-mode */
137 #define EmulateOnUD (1<<22) /* Emulate if unsupported by the host */
138 #define NoAccess (1<<23) /* Don't access memory (lea/invlpg/verr etc) */
139 #define Op3264 (1<<24) /* Operand is 64b in long mode, 32b otherwise */
140 #define Undefined (1<<25) /* No Such Instruction */
141 #define Lock (1<<26) /* lock prefix is allowed for the instruction */
142 #define Priv (1<<27) /* instruction generates #GP if current CPL != 0 */
144 #define PageTable (1 << 29) /* instruction used to write page table */
145 #define NotImpl (1 << 30) /* instruction is not implemented */
146 /* Source 2 operand type */
147 #define Src2Shift (31)
148 #define Src2None (OpNone << Src2Shift)
149 #define Src2Mem (OpMem << Src2Shift)
150 #define Src2CL (OpCL << Src2Shift)
151 #define Src2ImmByte (OpImmByte << Src2Shift)
152 #define Src2One (OpOne << Src2Shift)
153 #define Src2Imm (OpImm << Src2Shift)
154 #define Src2ES (OpES << Src2Shift)
155 #define Src2CS (OpCS << Src2Shift)
156 #define Src2SS (OpSS << Src2Shift)
157 #define Src2DS (OpDS << Src2Shift)
158 #define Src2FS (OpFS << Src2Shift)
159 #define Src2GS (OpGS << Src2Shift)
160 #define Src2Mask (OpMask << Src2Shift)
161 #define Mmx ((u64)1 << 40) /* MMX Vector instruction */
162 #define AlignMask ((u64)7 << 41)
163 #define Aligned ((u64)1 << 41) /* Explicitly aligned (e.g. MOVDQA) */
164 #define Unaligned ((u64)2 << 41) /* Explicitly unaligned (e.g. MOVDQU) */
165 #define Avx ((u64)3 << 41) /* Advanced Vector Extensions */
166 #define Aligned16 ((u64)4 << 41) /* Aligned to 16 byte boundary (e.g. FXSAVE) */
167 #define Fastop ((u64)1 << 44) /* Use opcode::u.fastop */
168 #define NoWrite ((u64)1 << 45) /* No writeback */
169 #define SrcWrite ((u64)1 << 46) /* Write back src operand */
170 #define NoMod ((u64)1 << 47) /* Mod field is ignored */
171 #define Intercept ((u64)1 << 48) /* Has valid intercept field */
172 #define CheckPerm ((u64)1 << 49) /* Has valid check_perm field */
173 #define PrivUD ((u64)1 << 51) /* #UD instead of #GP on CPL > 0 */
174 #define NearBranch ((u64)1 << 52) /* Near branches */
175 #define No16 ((u64)1 << 53) /* No 16 bit operand */
176 #define IncSP ((u64)1 << 54) /* SP is incremented before ModRM calc */
177 #define TwoMemOp ((u64)1 << 55) /* Instruction has two memory operand */
179 #define DstXacc (DstAccLo | SrcAccHi | SrcWrite)
181 #define X2(x...) x, x
182 #define X3(x...) X2(x), x
183 #define X4(x...) X2(x), X2(x)
184 #define X5(x...) X4(x), x
185 #define X6(x...) X4(x), X2(x)
186 #define X7(x...) X4(x), X3(x)
187 #define X8(x...) X4(x), X4(x)
188 #define X16(x...) X8(x), X8(x)
190 #define NR_FASTOP (ilog2(sizeof(ulong)) + 1)
191 #define FASTOP_SIZE 8
194 * fastop functions have a special calling convention:
199 * flags: rflags (in/out)
200 * ex: rsi (in:fastop pointer, out:zero if exception)
202 * Moreover, they are all exactly FASTOP_SIZE bytes long, so functions for
203 * different operand sizes can be reached by calculation, rather than a jump
204 * table (which would be bigger than the code).
206 * fastop functions are declared as taking a never-defined fastop parameter,
207 * so they can't be called from C directly.
216 int (*execute
)(struct x86_emulate_ctxt
*ctxt
);
217 const struct opcode
*group
;
218 const struct group_dual
*gdual
;
219 const struct gprefix
*gprefix
;
220 const struct escape
*esc
;
221 const struct instr_dual
*idual
;
222 const struct mode_dual
*mdual
;
223 void (*fastop
)(struct fastop
*fake
);
225 int (*check_perm
)(struct x86_emulate_ctxt
*ctxt
);
229 struct opcode mod012
[8];
230 struct opcode mod3
[8];
234 struct opcode pfx_no
;
235 struct opcode pfx_66
;
236 struct opcode pfx_f2
;
237 struct opcode pfx_f3
;
242 struct opcode high
[64];
246 struct opcode mod012
;
251 struct opcode mode32
;
252 struct opcode mode64
;
255 #define EFLG_RESERVED_ZEROS_MASK 0xffc0802a
257 enum x86_transfer_type
{
259 X86_TRANSFER_CALL_JMP
,
261 X86_TRANSFER_TASK_SWITCH
,
264 static ulong
reg_read(struct x86_emulate_ctxt
*ctxt
, unsigned nr
)
266 if (!(ctxt
->regs_valid
& (1 << nr
))) {
267 ctxt
->regs_valid
|= 1 << nr
;
268 ctxt
->_regs
[nr
] = ctxt
->ops
->read_gpr(ctxt
, nr
);
270 return ctxt
->_regs
[nr
];
273 static ulong
*reg_write(struct x86_emulate_ctxt
*ctxt
, unsigned nr
)
275 ctxt
->regs_valid
|= 1 << nr
;
276 ctxt
->regs_dirty
|= 1 << nr
;
277 return &ctxt
->_regs
[nr
];
280 static ulong
*reg_rmw(struct x86_emulate_ctxt
*ctxt
, unsigned nr
)
283 return reg_write(ctxt
, nr
);
286 static void writeback_registers(struct x86_emulate_ctxt
*ctxt
)
290 for_each_set_bit(reg
, (ulong
*)&ctxt
->regs_dirty
, 16)
291 ctxt
->ops
->write_gpr(ctxt
, reg
, ctxt
->_regs
[reg
]);
294 static void invalidate_registers(struct x86_emulate_ctxt
*ctxt
)
296 ctxt
->regs_dirty
= 0;
297 ctxt
->regs_valid
= 0;
301 * These EFLAGS bits are restored from saved value during emulation, and
302 * any changes are written back to the saved value after emulation.
304 #define EFLAGS_MASK (X86_EFLAGS_OF|X86_EFLAGS_SF|X86_EFLAGS_ZF|X86_EFLAGS_AF|\
305 X86_EFLAGS_PF|X86_EFLAGS_CF)
313 static int fastop(struct x86_emulate_ctxt
*ctxt
, void (*fop
)(struct fastop
*));
315 #define __FOP_FUNC(name) \
316 ".align " __stringify(FASTOP_SIZE) " \n\t" \
317 ".type " name ", @function \n\t" \
320 #define FOP_FUNC(name) \
323 #define __FOP_RET(name) \
325 ".size " name ", .-" name "\n\t"
327 #define FOP_RET(name) \
330 #define FOP_START(op) \
331 extern void em_##op(struct fastop *fake); \
332 asm(".pushsection .text, \"ax\" \n\t" \
333 ".global em_" #op " \n\t" \
334 ".align " __stringify(FASTOP_SIZE) " \n\t" \
340 #define __FOPNOP(name) \
345 __FOPNOP(__stringify(__UNIQUE_ID(nop)))
347 #define FOP1E(op, dst) \
348 __FOP_FUNC(#op "_" #dst) \
349 "10: " #op " %" #dst " \n\t" \
350 __FOP_RET(#op "_" #dst)
352 #define FOP1EEX(op, dst) \
353 FOP1E(op, dst) _ASM_EXTABLE(10b, kvm_fastop_exception)
355 #define FASTOP1(op) \
360 ON64(FOP1E(op##q, rax)) \
363 /* 1-operand, using src2 (for MUL/DIV r/m) */
364 #define FASTOP1SRC2(op, name) \
369 ON64(FOP1E(op, rcx)) \
372 /* 1-operand, using src2 (for MUL/DIV r/m), with exceptions */
373 #define FASTOP1SRC2EX(op, name) \
378 ON64(FOP1EEX(op, rcx)) \
381 #define FOP2E(op, dst, src) \
382 __FOP_FUNC(#op "_" #dst "_" #src) \
383 #op " %" #src ", %" #dst " \n\t" \
384 __FOP_RET(#op "_" #dst "_" #src)
386 #define FASTOP2(op) \
388 FOP2E(op##b, al, dl) \
389 FOP2E(op##w, ax, dx) \
390 FOP2E(op##l, eax, edx) \
391 ON64(FOP2E(op##q, rax, rdx)) \
394 /* 2 operand, word only */
395 #define FASTOP2W(op) \
398 FOP2E(op##w, ax, dx) \
399 FOP2E(op##l, eax, edx) \
400 ON64(FOP2E(op##q, rax, rdx)) \
403 /* 2 operand, src is CL */
404 #define FASTOP2CL(op) \
406 FOP2E(op##b, al, cl) \
407 FOP2E(op##w, ax, cl) \
408 FOP2E(op##l, eax, cl) \
409 ON64(FOP2E(op##q, rax, cl)) \
412 /* 2 operand, src and dest are reversed */
413 #define FASTOP2R(op, name) \
415 FOP2E(op##b, dl, al) \
416 FOP2E(op##w, dx, ax) \
417 FOP2E(op##l, edx, eax) \
418 ON64(FOP2E(op##q, rdx, rax)) \
421 #define FOP3E(op, dst, src, src2) \
422 __FOP_FUNC(#op "_" #dst "_" #src "_" #src2) \
423 #op " %" #src2 ", %" #src ", %" #dst " \n\t"\
424 __FOP_RET(#op "_" #dst "_" #src "_" #src2)
426 /* 3-operand, word-only, src2=cl */
427 #define FASTOP3WCL(op) \
430 FOP3E(op##w, ax, dx, cl) \
431 FOP3E(op##l, eax, edx, cl) \
432 ON64(FOP3E(op##q, rax, rdx, cl)) \
435 /* Special case for SETcc - 1 instruction per cc */
436 #define FOP_SETCC(op) \
438 ".type " #op ", @function \n\t" \
443 asm(".pushsection .fixup, \"ax\"\n"
444 ".global kvm_fastop_exception \n"
445 "kvm_fastop_exception: xor %esi, %esi; ret\n"
469 "pushf; sbb %al, %al; popf \n\t"
474 * XXX: inoutclob user must know where the argument is being expanded.
475 * Relying on CONFIG_CC_HAS_ASM_GOTO would allow us to remove _fault.
477 #define asm_safe(insn, inoutclob...) \
481 asm volatile("1:" insn "\n" \
483 ".pushsection .fixup, \"ax\"\n" \
484 "3: movl $1, %[_fault]\n" \
487 _ASM_EXTABLE(1b, 3b) \
488 : [_fault] "+qm"(_fault) inoutclob ); \
490 _fault ? X86EMUL_UNHANDLEABLE : X86EMUL_CONTINUE; \
493 static int emulator_check_intercept(struct x86_emulate_ctxt
*ctxt
,
494 enum x86_intercept intercept
,
495 enum x86_intercept_stage stage
)
497 struct x86_instruction_info info
= {
498 .intercept
= intercept
,
499 .rep_prefix
= ctxt
->rep_prefix
,
500 .modrm_mod
= ctxt
->modrm_mod
,
501 .modrm_reg
= ctxt
->modrm_reg
,
502 .modrm_rm
= ctxt
->modrm_rm
,
503 .src_val
= ctxt
->src
.val64
,
504 .dst_val
= ctxt
->dst
.val64
,
505 .src_bytes
= ctxt
->src
.bytes
,
506 .dst_bytes
= ctxt
->dst
.bytes
,
507 .ad_bytes
= ctxt
->ad_bytes
,
508 .next_rip
= ctxt
->eip
,
511 return ctxt
->ops
->intercept(ctxt
, &info
, stage
);
514 static void assign_masked(ulong
*dest
, ulong src
, ulong mask
)
516 *dest
= (*dest
& ~mask
) | (src
& mask
);
519 static void assign_register(unsigned long *reg
, u64 val
, int bytes
)
521 /* The 4-byte case *is* correct: in 64-bit mode we zero-extend. */
524 *(u8
*)reg
= (u8
)val
;
527 *(u16
*)reg
= (u16
)val
;
531 break; /* 64b: zero-extend */
538 static inline unsigned long ad_mask(struct x86_emulate_ctxt
*ctxt
)
540 return (1UL << (ctxt
->ad_bytes
<< 3)) - 1;
543 static ulong
stack_mask(struct x86_emulate_ctxt
*ctxt
)
546 struct desc_struct ss
;
548 if (ctxt
->mode
== X86EMUL_MODE_PROT64
)
550 ctxt
->ops
->get_segment(ctxt
, &sel
, &ss
, NULL
, VCPU_SREG_SS
);
551 return ~0U >> ((ss
.d
^ 1) * 16); /* d=0: 0xffff; d=1: 0xffffffff */
554 static int stack_size(struct x86_emulate_ctxt
*ctxt
)
556 return (__fls(stack_mask(ctxt
)) + 1) >> 3;
559 /* Access/update address held in a register, based on addressing mode. */
560 static inline unsigned long
561 address_mask(struct x86_emulate_ctxt
*ctxt
, unsigned long reg
)
563 if (ctxt
->ad_bytes
== sizeof(unsigned long))
566 return reg
& ad_mask(ctxt
);
569 static inline unsigned long
570 register_address(struct x86_emulate_ctxt
*ctxt
, int reg
)
572 return address_mask(ctxt
, reg_read(ctxt
, reg
));
575 static void masked_increment(ulong
*reg
, ulong mask
, int inc
)
577 assign_masked(reg
, *reg
+ inc
, mask
);
581 register_address_increment(struct x86_emulate_ctxt
*ctxt
, int reg
, int inc
)
583 ulong
*preg
= reg_rmw(ctxt
, reg
);
585 assign_register(preg
, *preg
+ inc
, ctxt
->ad_bytes
);
588 static void rsp_increment(struct x86_emulate_ctxt
*ctxt
, int inc
)
590 masked_increment(reg_rmw(ctxt
, VCPU_REGS_RSP
), stack_mask(ctxt
), inc
);
593 static u32
desc_limit_scaled(struct desc_struct
*desc
)
595 u32 limit
= get_desc_limit(desc
);
597 return desc
->g
? (limit
<< 12) | 0xfff : limit
;
600 static unsigned long seg_base(struct x86_emulate_ctxt
*ctxt
, int seg
)
602 if (ctxt
->mode
== X86EMUL_MODE_PROT64
&& seg
< VCPU_SREG_FS
)
605 return ctxt
->ops
->get_cached_segment_base(ctxt
, seg
);
608 static int emulate_exception(struct x86_emulate_ctxt
*ctxt
, int vec
,
609 u32 error
, bool valid
)
612 ctxt
->exception
.vector
= vec
;
613 ctxt
->exception
.error_code
= error
;
614 ctxt
->exception
.error_code_valid
= valid
;
615 return X86EMUL_PROPAGATE_FAULT
;
618 static int emulate_db(struct x86_emulate_ctxt
*ctxt
)
620 return emulate_exception(ctxt
, DB_VECTOR
, 0, false);
623 static int emulate_gp(struct x86_emulate_ctxt
*ctxt
, int err
)
625 return emulate_exception(ctxt
, GP_VECTOR
, err
, true);
628 static int emulate_ss(struct x86_emulate_ctxt
*ctxt
, int err
)
630 return emulate_exception(ctxt
, SS_VECTOR
, err
, true);
633 static int emulate_ud(struct x86_emulate_ctxt
*ctxt
)
635 return emulate_exception(ctxt
, UD_VECTOR
, 0, false);
638 static int emulate_ts(struct x86_emulate_ctxt
*ctxt
, int err
)
640 return emulate_exception(ctxt
, TS_VECTOR
, err
, true);
643 static int emulate_de(struct x86_emulate_ctxt
*ctxt
)
645 return emulate_exception(ctxt
, DE_VECTOR
, 0, false);
648 static int emulate_nm(struct x86_emulate_ctxt
*ctxt
)
650 return emulate_exception(ctxt
, NM_VECTOR
, 0, false);
653 static u16
get_segment_selector(struct x86_emulate_ctxt
*ctxt
, unsigned seg
)
656 struct desc_struct desc
;
658 ctxt
->ops
->get_segment(ctxt
, &selector
, &desc
, NULL
, seg
);
662 static void set_segment_selector(struct x86_emulate_ctxt
*ctxt
, u16 selector
,
667 struct desc_struct desc
;
669 ctxt
->ops
->get_segment(ctxt
, &dummy
, &desc
, &base3
, seg
);
670 ctxt
->ops
->set_segment(ctxt
, selector
, &desc
, base3
, seg
);
674 * x86 defines three classes of vector instructions: explicitly
675 * aligned, explicitly unaligned, and the rest, which change behaviour
676 * depending on whether they're AVX encoded or not.
678 * Also included is CMPXCHG16B which is not a vector instruction, yet it is
679 * subject to the same check. FXSAVE and FXRSTOR are checked here too as their
680 * 512 bytes of data must be aligned to a 16 byte boundary.
682 static unsigned insn_alignment(struct x86_emulate_ctxt
*ctxt
, unsigned size
)
684 u64 alignment
= ctxt
->d
& AlignMask
;
686 if (likely(size
< 16))
701 static __always_inline
int __linearize(struct x86_emulate_ctxt
*ctxt
,
702 struct segmented_address addr
,
703 unsigned *max_size
, unsigned size
,
704 bool write
, bool fetch
,
705 enum x86emul_mode mode
, ulong
*linear
)
707 struct desc_struct desc
;
714 la
= seg_base(ctxt
, addr
.seg
) + addr
.ea
;
717 case X86EMUL_MODE_PROT64
:
719 va_bits
= ctxt_virt_addr_bits(ctxt
);
720 if (get_canonical(la
, va_bits
) != la
)
723 *max_size
= min_t(u64
, ~0u, (1ull << va_bits
) - la
);
724 if (size
> *max_size
)
728 *linear
= la
= (u32
)la
;
729 usable
= ctxt
->ops
->get_segment(ctxt
, &sel
, &desc
, NULL
,
733 /* code segment in protected mode or read-only data segment */
734 if ((((ctxt
->mode
!= X86EMUL_MODE_REAL
) && (desc
.type
& 8))
735 || !(desc
.type
& 2)) && write
)
737 /* unreadable code segment */
738 if (!fetch
&& (desc
.type
& 8) && !(desc
.type
& 2))
740 lim
= desc_limit_scaled(&desc
);
741 if (!(desc
.type
& 8) && (desc
.type
& 4)) {
742 /* expand-down segment */
745 lim
= desc
.d
? 0xffffffff : 0xffff;
749 if (lim
== 0xffffffff)
752 *max_size
= (u64
)lim
+ 1 - addr
.ea
;
753 if (size
> *max_size
)
758 if (la
& (insn_alignment(ctxt
, size
) - 1))
759 return emulate_gp(ctxt
, 0);
760 return X86EMUL_CONTINUE
;
762 if (addr
.seg
== VCPU_SREG_SS
)
763 return emulate_ss(ctxt
, 0);
765 return emulate_gp(ctxt
, 0);
768 static int linearize(struct x86_emulate_ctxt
*ctxt
,
769 struct segmented_address addr
,
770 unsigned size
, bool write
,
774 return __linearize(ctxt
, addr
, &max_size
, size
, write
, false,
778 static inline int assign_eip(struct x86_emulate_ctxt
*ctxt
, ulong dst
,
779 enum x86emul_mode mode
)
784 struct segmented_address addr
= { .seg
= VCPU_SREG_CS
,
787 if (ctxt
->op_bytes
!= sizeof(unsigned long))
788 addr
.ea
= dst
& ((1UL << (ctxt
->op_bytes
<< 3)) - 1);
789 rc
= __linearize(ctxt
, addr
, &max_size
, 1, false, true, mode
, &linear
);
790 if (rc
== X86EMUL_CONTINUE
)
791 ctxt
->_eip
= addr
.ea
;
795 static inline int assign_eip_near(struct x86_emulate_ctxt
*ctxt
, ulong dst
)
797 return assign_eip(ctxt
, dst
, ctxt
->mode
);
800 static int assign_eip_far(struct x86_emulate_ctxt
*ctxt
, ulong dst
,
801 const struct desc_struct
*cs_desc
)
803 enum x86emul_mode mode
= ctxt
->mode
;
807 if (ctxt
->mode
>= X86EMUL_MODE_PROT16
) {
811 ctxt
->ops
->get_msr(ctxt
, MSR_EFER
, &efer
);
813 mode
= X86EMUL_MODE_PROT64
;
815 mode
= X86EMUL_MODE_PROT32
; /* temporary value */
818 if (mode
== X86EMUL_MODE_PROT16
|| mode
== X86EMUL_MODE_PROT32
)
819 mode
= cs_desc
->d
? X86EMUL_MODE_PROT32
: X86EMUL_MODE_PROT16
;
820 rc
= assign_eip(ctxt
, dst
, mode
);
821 if (rc
== X86EMUL_CONTINUE
)
826 static inline int jmp_rel(struct x86_emulate_ctxt
*ctxt
, int rel
)
828 return assign_eip_near(ctxt
, ctxt
->_eip
+ rel
);
831 static int linear_read_system(struct x86_emulate_ctxt
*ctxt
, ulong linear
,
832 void *data
, unsigned size
)
834 return ctxt
->ops
->read_std(ctxt
, linear
, data
, size
, &ctxt
->exception
, true);
837 static int linear_write_system(struct x86_emulate_ctxt
*ctxt
,
838 ulong linear
, void *data
,
841 return ctxt
->ops
->write_std(ctxt
, linear
, data
, size
, &ctxt
->exception
, true);
844 static int segmented_read_std(struct x86_emulate_ctxt
*ctxt
,
845 struct segmented_address addr
,
852 rc
= linearize(ctxt
, addr
, size
, false, &linear
);
853 if (rc
!= X86EMUL_CONTINUE
)
855 return ctxt
->ops
->read_std(ctxt
, linear
, data
, size
, &ctxt
->exception
, false);
858 static int segmented_write_std(struct x86_emulate_ctxt
*ctxt
,
859 struct segmented_address addr
,
866 rc
= linearize(ctxt
, addr
, size
, true, &linear
);
867 if (rc
!= X86EMUL_CONTINUE
)
869 return ctxt
->ops
->write_std(ctxt
, linear
, data
, size
, &ctxt
->exception
, false);
873 * Prefetch the remaining bytes of the instruction without crossing page
874 * boundary if they are not in fetch_cache yet.
876 static int __do_insn_fetch_bytes(struct x86_emulate_ctxt
*ctxt
, int op_size
)
879 unsigned size
, max_size
;
880 unsigned long linear
;
881 int cur_size
= ctxt
->fetch
.end
- ctxt
->fetch
.data
;
882 struct segmented_address addr
= { .seg
= VCPU_SREG_CS
,
883 .ea
= ctxt
->eip
+ cur_size
};
886 * We do not know exactly how many bytes will be needed, and
887 * __linearize is expensive, so fetch as much as possible. We
888 * just have to avoid going beyond the 15 byte limit, the end
889 * of the segment, or the end of the page.
891 * __linearize is called with size 0 so that it does not do any
892 * boundary check itself. Instead, we use max_size to check
895 rc
= __linearize(ctxt
, addr
, &max_size
, 0, false, true, ctxt
->mode
,
897 if (unlikely(rc
!= X86EMUL_CONTINUE
))
900 size
= min_t(unsigned, 15UL ^ cur_size
, max_size
);
901 size
= min_t(unsigned, size
, PAGE_SIZE
- offset_in_page(linear
));
904 * One instruction can only straddle two pages,
905 * and one has been loaded at the beginning of
906 * x86_decode_insn. So, if not enough bytes
907 * still, we must have hit the 15-byte boundary.
909 if (unlikely(size
< op_size
))
910 return emulate_gp(ctxt
, 0);
912 rc
= ctxt
->ops
->fetch(ctxt
, linear
, ctxt
->fetch
.end
,
913 size
, &ctxt
->exception
);
914 if (unlikely(rc
!= X86EMUL_CONTINUE
))
916 ctxt
->fetch
.end
+= size
;
917 return X86EMUL_CONTINUE
;
920 static __always_inline
int do_insn_fetch_bytes(struct x86_emulate_ctxt
*ctxt
,
923 unsigned done_size
= ctxt
->fetch
.end
- ctxt
->fetch
.ptr
;
925 if (unlikely(done_size
< size
))
926 return __do_insn_fetch_bytes(ctxt
, size
- done_size
);
928 return X86EMUL_CONTINUE
;
931 /* Fetch next part of the instruction being emulated. */
932 #define insn_fetch(_type, _ctxt) \
935 rc = do_insn_fetch_bytes(_ctxt, sizeof(_type)); \
936 if (rc != X86EMUL_CONTINUE) \
938 ctxt->_eip += sizeof(_type); \
939 memcpy(&_x, ctxt->fetch.ptr, sizeof(_type)); \
940 ctxt->fetch.ptr += sizeof(_type); \
944 #define insn_fetch_arr(_arr, _size, _ctxt) \
946 rc = do_insn_fetch_bytes(_ctxt, _size); \
947 if (rc != X86EMUL_CONTINUE) \
949 ctxt->_eip += (_size); \
950 memcpy(_arr, ctxt->fetch.ptr, _size); \
951 ctxt->fetch.ptr += (_size); \
955 * Given the 'reg' portion of a ModRM byte, and a register block, return a
956 * pointer into the block that addresses the relevant register.
957 * @highbyte_regs specifies whether to decode AH,CH,DH,BH.
959 static void *decode_register(struct x86_emulate_ctxt
*ctxt
, u8 modrm_reg
,
963 int highbyte_regs
= (ctxt
->rex_prefix
== 0) && byteop
;
965 if (highbyte_regs
&& modrm_reg
>= 4 && modrm_reg
< 8)
966 p
= (unsigned char *)reg_rmw(ctxt
, modrm_reg
& 3) + 1;
968 p
= reg_rmw(ctxt
, modrm_reg
);
972 static int read_descriptor(struct x86_emulate_ctxt
*ctxt
,
973 struct segmented_address addr
,
974 u16
*size
, unsigned long *address
, int op_bytes
)
981 rc
= segmented_read_std(ctxt
, addr
, size
, 2);
982 if (rc
!= X86EMUL_CONTINUE
)
985 rc
= segmented_read_std(ctxt
, addr
, address
, op_bytes
);
999 FASTOP1SRC2(mul
, mul_ex
);
1000 FASTOP1SRC2(imul
, imul_ex
);
1001 FASTOP1SRC2EX(div
, div_ex
);
1002 FASTOP1SRC2EX(idiv
, idiv_ex
);
1031 FASTOP2R(cmp
, cmp_r
);
1033 static int em_bsf_c(struct x86_emulate_ctxt
*ctxt
)
1035 /* If src is zero, do not writeback, but update flags */
1036 if (ctxt
->src
.val
== 0)
1037 ctxt
->dst
.type
= OP_NONE
;
1038 return fastop(ctxt
, em_bsf
);
1041 static int em_bsr_c(struct x86_emulate_ctxt
*ctxt
)
1043 /* If src is zero, do not writeback, but update flags */
1044 if (ctxt
->src
.val
== 0)
1045 ctxt
->dst
.type
= OP_NONE
;
1046 return fastop(ctxt
, em_bsr
);
1049 static __always_inline u8
test_cc(unsigned int condition
, unsigned long flags
)
1052 void (*fop
)(void) = (void *)em_setcc
+ 4 * (condition
& 0xf);
1054 flags
= (flags
& EFLAGS_MASK
) | X86_EFLAGS_IF
;
1055 asm("push %[flags]; popf; " CALL_NOSPEC
1056 : "=a"(rc
) : [thunk_target
]"r"(fop
), [flags
]"r"(flags
));
1060 static void fetch_register_operand(struct operand
*op
)
1062 switch (op
->bytes
) {
1064 op
->val
= *(u8
*)op
->addr
.reg
;
1067 op
->val
= *(u16
*)op
->addr
.reg
;
1070 op
->val
= *(u32
*)op
->addr
.reg
;
1073 op
->val
= *(u64
*)op
->addr
.reg
;
1078 static void read_sse_reg(struct x86_emulate_ctxt
*ctxt
, sse128_t
*data
, int reg
)
1081 case 0: asm("movdqa %%xmm0, %0" : "=m"(*data
)); break;
1082 case 1: asm("movdqa %%xmm1, %0" : "=m"(*data
)); break;
1083 case 2: asm("movdqa %%xmm2, %0" : "=m"(*data
)); break;
1084 case 3: asm("movdqa %%xmm3, %0" : "=m"(*data
)); break;
1085 case 4: asm("movdqa %%xmm4, %0" : "=m"(*data
)); break;
1086 case 5: asm("movdqa %%xmm5, %0" : "=m"(*data
)); break;
1087 case 6: asm("movdqa %%xmm6, %0" : "=m"(*data
)); break;
1088 case 7: asm("movdqa %%xmm7, %0" : "=m"(*data
)); break;
1089 #ifdef CONFIG_X86_64
1090 case 8: asm("movdqa %%xmm8, %0" : "=m"(*data
)); break;
1091 case 9: asm("movdqa %%xmm9, %0" : "=m"(*data
)); break;
1092 case 10: asm("movdqa %%xmm10, %0" : "=m"(*data
)); break;
1093 case 11: asm("movdqa %%xmm11, %0" : "=m"(*data
)); break;
1094 case 12: asm("movdqa %%xmm12, %0" : "=m"(*data
)); break;
1095 case 13: asm("movdqa %%xmm13, %0" : "=m"(*data
)); break;
1096 case 14: asm("movdqa %%xmm14, %0" : "=m"(*data
)); break;
1097 case 15: asm("movdqa %%xmm15, %0" : "=m"(*data
)); break;
1103 static void write_sse_reg(struct x86_emulate_ctxt
*ctxt
, sse128_t
*data
,
1107 case 0: asm("movdqa %0, %%xmm0" : : "m"(*data
)); break;
1108 case 1: asm("movdqa %0, %%xmm1" : : "m"(*data
)); break;
1109 case 2: asm("movdqa %0, %%xmm2" : : "m"(*data
)); break;
1110 case 3: asm("movdqa %0, %%xmm3" : : "m"(*data
)); break;
1111 case 4: asm("movdqa %0, %%xmm4" : : "m"(*data
)); break;
1112 case 5: asm("movdqa %0, %%xmm5" : : "m"(*data
)); break;
1113 case 6: asm("movdqa %0, %%xmm6" : : "m"(*data
)); break;
1114 case 7: asm("movdqa %0, %%xmm7" : : "m"(*data
)); break;
1115 #ifdef CONFIG_X86_64
1116 case 8: asm("movdqa %0, %%xmm8" : : "m"(*data
)); break;
1117 case 9: asm("movdqa %0, %%xmm9" : : "m"(*data
)); break;
1118 case 10: asm("movdqa %0, %%xmm10" : : "m"(*data
)); break;
1119 case 11: asm("movdqa %0, %%xmm11" : : "m"(*data
)); break;
1120 case 12: asm("movdqa %0, %%xmm12" : : "m"(*data
)); break;
1121 case 13: asm("movdqa %0, %%xmm13" : : "m"(*data
)); break;
1122 case 14: asm("movdqa %0, %%xmm14" : : "m"(*data
)); break;
1123 case 15: asm("movdqa %0, %%xmm15" : : "m"(*data
)); break;
1129 static void read_mmx_reg(struct x86_emulate_ctxt
*ctxt
, u64
*data
, int reg
)
1132 case 0: asm("movq %%mm0, %0" : "=m"(*data
)); break;
1133 case 1: asm("movq %%mm1, %0" : "=m"(*data
)); break;
1134 case 2: asm("movq %%mm2, %0" : "=m"(*data
)); break;
1135 case 3: asm("movq %%mm3, %0" : "=m"(*data
)); break;
1136 case 4: asm("movq %%mm4, %0" : "=m"(*data
)); break;
1137 case 5: asm("movq %%mm5, %0" : "=m"(*data
)); break;
1138 case 6: asm("movq %%mm6, %0" : "=m"(*data
)); break;
1139 case 7: asm("movq %%mm7, %0" : "=m"(*data
)); break;
1144 static void write_mmx_reg(struct x86_emulate_ctxt
*ctxt
, u64
*data
, int reg
)
1147 case 0: asm("movq %0, %%mm0" : : "m"(*data
)); break;
1148 case 1: asm("movq %0, %%mm1" : : "m"(*data
)); break;
1149 case 2: asm("movq %0, %%mm2" : : "m"(*data
)); break;
1150 case 3: asm("movq %0, %%mm3" : : "m"(*data
)); break;
1151 case 4: asm("movq %0, %%mm4" : : "m"(*data
)); break;
1152 case 5: asm("movq %0, %%mm5" : : "m"(*data
)); break;
1153 case 6: asm("movq %0, %%mm6" : : "m"(*data
)); break;
1154 case 7: asm("movq %0, %%mm7" : : "m"(*data
)); break;
1159 static int em_fninit(struct x86_emulate_ctxt
*ctxt
)
1161 if (ctxt
->ops
->get_cr(ctxt
, 0) & (X86_CR0_TS
| X86_CR0_EM
))
1162 return emulate_nm(ctxt
);
1164 asm volatile("fninit");
1165 return X86EMUL_CONTINUE
;
1168 static int em_fnstcw(struct x86_emulate_ctxt
*ctxt
)
1172 if (ctxt
->ops
->get_cr(ctxt
, 0) & (X86_CR0_TS
| X86_CR0_EM
))
1173 return emulate_nm(ctxt
);
1175 asm volatile("fnstcw %0": "+m"(fcw
));
1177 ctxt
->dst
.val
= fcw
;
1179 return X86EMUL_CONTINUE
;
1182 static int em_fnstsw(struct x86_emulate_ctxt
*ctxt
)
1186 if (ctxt
->ops
->get_cr(ctxt
, 0) & (X86_CR0_TS
| X86_CR0_EM
))
1187 return emulate_nm(ctxt
);
1189 asm volatile("fnstsw %0": "+m"(fsw
));
1191 ctxt
->dst
.val
= fsw
;
1193 return X86EMUL_CONTINUE
;
1196 static void decode_register_operand(struct x86_emulate_ctxt
*ctxt
,
1199 unsigned reg
= ctxt
->modrm_reg
;
1201 if (!(ctxt
->d
& ModRM
))
1202 reg
= (ctxt
->b
& 7) | ((ctxt
->rex_prefix
& 1) << 3);
1204 if (ctxt
->d
& Sse
) {
1208 read_sse_reg(ctxt
, &op
->vec_val
, reg
);
1211 if (ctxt
->d
& Mmx
) {
1220 op
->bytes
= (ctxt
->d
& ByteOp
) ? 1 : ctxt
->op_bytes
;
1221 op
->addr
.reg
= decode_register(ctxt
, reg
, ctxt
->d
& ByteOp
);
1223 fetch_register_operand(op
);
1224 op
->orig_val
= op
->val
;
1227 static void adjust_modrm_seg(struct x86_emulate_ctxt
*ctxt
, int base_reg
)
1229 if (base_reg
== VCPU_REGS_RSP
|| base_reg
== VCPU_REGS_RBP
)
1230 ctxt
->modrm_seg
= VCPU_SREG_SS
;
1233 static int decode_modrm(struct x86_emulate_ctxt
*ctxt
,
1237 int index_reg
, base_reg
, scale
;
1238 int rc
= X86EMUL_CONTINUE
;
1241 ctxt
->modrm_reg
= ((ctxt
->rex_prefix
<< 1) & 8); /* REX.R */
1242 index_reg
= (ctxt
->rex_prefix
<< 2) & 8; /* REX.X */
1243 base_reg
= (ctxt
->rex_prefix
<< 3) & 8; /* REX.B */
1245 ctxt
->modrm_mod
= (ctxt
->modrm
& 0xc0) >> 6;
1246 ctxt
->modrm_reg
|= (ctxt
->modrm
& 0x38) >> 3;
1247 ctxt
->modrm_rm
= base_reg
| (ctxt
->modrm
& 0x07);
1248 ctxt
->modrm_seg
= VCPU_SREG_DS
;
1250 if (ctxt
->modrm_mod
== 3 || (ctxt
->d
& NoMod
)) {
1252 op
->bytes
= (ctxt
->d
& ByteOp
) ? 1 : ctxt
->op_bytes
;
1253 op
->addr
.reg
= decode_register(ctxt
, ctxt
->modrm_rm
,
1255 if (ctxt
->d
& Sse
) {
1258 op
->addr
.xmm
= ctxt
->modrm_rm
;
1259 read_sse_reg(ctxt
, &op
->vec_val
, ctxt
->modrm_rm
);
1262 if (ctxt
->d
& Mmx
) {
1265 op
->addr
.mm
= ctxt
->modrm_rm
& 7;
1268 fetch_register_operand(op
);
1274 if (ctxt
->ad_bytes
== 2) {
1275 unsigned bx
= reg_read(ctxt
, VCPU_REGS_RBX
);
1276 unsigned bp
= reg_read(ctxt
, VCPU_REGS_RBP
);
1277 unsigned si
= reg_read(ctxt
, VCPU_REGS_RSI
);
1278 unsigned di
= reg_read(ctxt
, VCPU_REGS_RDI
);
1280 /* 16-bit ModR/M decode. */
1281 switch (ctxt
->modrm_mod
) {
1283 if (ctxt
->modrm_rm
== 6)
1284 modrm_ea
+= insn_fetch(u16
, ctxt
);
1287 modrm_ea
+= insn_fetch(s8
, ctxt
);
1290 modrm_ea
+= insn_fetch(u16
, ctxt
);
1293 switch (ctxt
->modrm_rm
) {
1295 modrm_ea
+= bx
+ si
;
1298 modrm_ea
+= bx
+ di
;
1301 modrm_ea
+= bp
+ si
;
1304 modrm_ea
+= bp
+ di
;
1313 if (ctxt
->modrm_mod
!= 0)
1320 if (ctxt
->modrm_rm
== 2 || ctxt
->modrm_rm
== 3 ||
1321 (ctxt
->modrm_rm
== 6 && ctxt
->modrm_mod
!= 0))
1322 ctxt
->modrm_seg
= VCPU_SREG_SS
;
1323 modrm_ea
= (u16
)modrm_ea
;
1325 /* 32/64-bit ModR/M decode. */
1326 if ((ctxt
->modrm_rm
& 7) == 4) {
1327 sib
= insn_fetch(u8
, ctxt
);
1328 index_reg
|= (sib
>> 3) & 7;
1329 base_reg
|= sib
& 7;
1332 if ((base_reg
& 7) == 5 && ctxt
->modrm_mod
== 0)
1333 modrm_ea
+= insn_fetch(s32
, ctxt
);
1335 modrm_ea
+= reg_read(ctxt
, base_reg
);
1336 adjust_modrm_seg(ctxt
, base_reg
);
1337 /* Increment ESP on POP [ESP] */
1338 if ((ctxt
->d
& IncSP
) &&
1339 base_reg
== VCPU_REGS_RSP
)
1340 modrm_ea
+= ctxt
->op_bytes
;
1343 modrm_ea
+= reg_read(ctxt
, index_reg
) << scale
;
1344 } else if ((ctxt
->modrm_rm
& 7) == 5 && ctxt
->modrm_mod
== 0) {
1345 modrm_ea
+= insn_fetch(s32
, ctxt
);
1346 if (ctxt
->mode
== X86EMUL_MODE_PROT64
)
1347 ctxt
->rip_relative
= 1;
1349 base_reg
= ctxt
->modrm_rm
;
1350 modrm_ea
+= reg_read(ctxt
, base_reg
);
1351 adjust_modrm_seg(ctxt
, base_reg
);
1353 switch (ctxt
->modrm_mod
) {
1355 modrm_ea
+= insn_fetch(s8
, ctxt
);
1358 modrm_ea
+= insn_fetch(s32
, ctxt
);
1362 op
->addr
.mem
.ea
= modrm_ea
;
1363 if (ctxt
->ad_bytes
!= 8)
1364 ctxt
->memop
.addr
.mem
.ea
= (u32
)ctxt
->memop
.addr
.mem
.ea
;
1370 static int decode_abs(struct x86_emulate_ctxt
*ctxt
,
1373 int rc
= X86EMUL_CONTINUE
;
1376 switch (ctxt
->ad_bytes
) {
1378 op
->addr
.mem
.ea
= insn_fetch(u16
, ctxt
);
1381 op
->addr
.mem
.ea
= insn_fetch(u32
, ctxt
);
1384 op
->addr
.mem
.ea
= insn_fetch(u64
, ctxt
);
1391 static void fetch_bit_operand(struct x86_emulate_ctxt
*ctxt
)
1395 if (ctxt
->dst
.type
== OP_MEM
&& ctxt
->src
.type
== OP_REG
) {
1396 mask
= ~((long)ctxt
->dst
.bytes
* 8 - 1);
1398 if (ctxt
->src
.bytes
== 2)
1399 sv
= (s16
)ctxt
->src
.val
& (s16
)mask
;
1400 else if (ctxt
->src
.bytes
== 4)
1401 sv
= (s32
)ctxt
->src
.val
& (s32
)mask
;
1403 sv
= (s64
)ctxt
->src
.val
& (s64
)mask
;
1405 ctxt
->dst
.addr
.mem
.ea
= address_mask(ctxt
,
1406 ctxt
->dst
.addr
.mem
.ea
+ (sv
>> 3));
1409 /* only subword offset */
1410 ctxt
->src
.val
&= (ctxt
->dst
.bytes
<< 3) - 1;
1413 static int read_emulated(struct x86_emulate_ctxt
*ctxt
,
1414 unsigned long addr
, void *dest
, unsigned size
)
1417 struct read_cache
*mc
= &ctxt
->mem_read
;
1419 if (mc
->pos
< mc
->end
)
1422 WARN_ON((mc
->end
+ size
) >= sizeof(mc
->data
));
1424 rc
= ctxt
->ops
->read_emulated(ctxt
, addr
, mc
->data
+ mc
->end
, size
,
1426 if (rc
!= X86EMUL_CONTINUE
)
1432 memcpy(dest
, mc
->data
+ mc
->pos
, size
);
1434 return X86EMUL_CONTINUE
;
1437 static int segmented_read(struct x86_emulate_ctxt
*ctxt
,
1438 struct segmented_address addr
,
1445 rc
= linearize(ctxt
, addr
, size
, false, &linear
);
1446 if (rc
!= X86EMUL_CONTINUE
)
1448 return read_emulated(ctxt
, linear
, data
, size
);
1451 static int segmented_write(struct x86_emulate_ctxt
*ctxt
,
1452 struct segmented_address addr
,
1459 rc
= linearize(ctxt
, addr
, size
, true, &linear
);
1460 if (rc
!= X86EMUL_CONTINUE
)
1462 return ctxt
->ops
->write_emulated(ctxt
, linear
, data
, size
,
1466 static int segmented_cmpxchg(struct x86_emulate_ctxt
*ctxt
,
1467 struct segmented_address addr
,
1468 const void *orig_data
, const void *data
,
1474 rc
= linearize(ctxt
, addr
, size
, true, &linear
);
1475 if (rc
!= X86EMUL_CONTINUE
)
1477 return ctxt
->ops
->cmpxchg_emulated(ctxt
, linear
, orig_data
, data
,
1478 size
, &ctxt
->exception
);
1481 static int pio_in_emulated(struct x86_emulate_ctxt
*ctxt
,
1482 unsigned int size
, unsigned short port
,
1485 struct read_cache
*rc
= &ctxt
->io_read
;
1487 if (rc
->pos
== rc
->end
) { /* refill pio read ahead */
1488 unsigned int in_page
, n
;
1489 unsigned int count
= ctxt
->rep_prefix
?
1490 address_mask(ctxt
, reg_read(ctxt
, VCPU_REGS_RCX
)) : 1;
1491 in_page
= (ctxt
->eflags
& X86_EFLAGS_DF
) ?
1492 offset_in_page(reg_read(ctxt
, VCPU_REGS_RDI
)) :
1493 PAGE_SIZE
- offset_in_page(reg_read(ctxt
, VCPU_REGS_RDI
));
1494 n
= min3(in_page
, (unsigned int)sizeof(rc
->data
) / size
, count
);
1497 rc
->pos
= rc
->end
= 0;
1498 if (!ctxt
->ops
->pio_in_emulated(ctxt
, size
, port
, rc
->data
, n
))
1503 if (ctxt
->rep_prefix
&& (ctxt
->d
& String
) &&
1504 !(ctxt
->eflags
& X86_EFLAGS_DF
)) {
1505 ctxt
->dst
.data
= rc
->data
+ rc
->pos
;
1506 ctxt
->dst
.type
= OP_MEM_STR
;
1507 ctxt
->dst
.count
= (rc
->end
- rc
->pos
) / size
;
1510 memcpy(dest
, rc
->data
+ rc
->pos
, size
);
1516 static int read_interrupt_descriptor(struct x86_emulate_ctxt
*ctxt
,
1517 u16 index
, struct desc_struct
*desc
)
1522 ctxt
->ops
->get_idt(ctxt
, &dt
);
1524 if (dt
.size
< index
* 8 + 7)
1525 return emulate_gp(ctxt
, index
<< 3 | 0x2);
1527 addr
= dt
.address
+ index
* 8;
1528 return linear_read_system(ctxt
, addr
, desc
, sizeof(*desc
));
1531 static void get_descriptor_table_ptr(struct x86_emulate_ctxt
*ctxt
,
1532 u16 selector
, struct desc_ptr
*dt
)
1534 const struct x86_emulate_ops
*ops
= ctxt
->ops
;
1537 if (selector
& 1 << 2) {
1538 struct desc_struct desc
;
1541 memset(dt
, 0, sizeof(*dt
));
1542 if (!ops
->get_segment(ctxt
, &sel
, &desc
, &base3
,
1546 dt
->size
= desc_limit_scaled(&desc
); /* what if limit > 65535? */
1547 dt
->address
= get_desc_base(&desc
) | ((u64
)base3
<< 32);
1549 ops
->get_gdt(ctxt
, dt
);
1552 static int get_descriptor_ptr(struct x86_emulate_ctxt
*ctxt
,
1553 u16 selector
, ulong
*desc_addr_p
)
1556 u16 index
= selector
>> 3;
1559 get_descriptor_table_ptr(ctxt
, selector
, &dt
);
1561 if (dt
.size
< index
* 8 + 7)
1562 return emulate_gp(ctxt
, selector
& 0xfffc);
1564 addr
= dt
.address
+ index
* 8;
1566 #ifdef CONFIG_X86_64
1567 if (addr
>> 32 != 0) {
1570 ctxt
->ops
->get_msr(ctxt
, MSR_EFER
, &efer
);
1571 if (!(efer
& EFER_LMA
))
1576 *desc_addr_p
= addr
;
1577 return X86EMUL_CONTINUE
;
1580 /* allowed just for 8 bytes segments */
1581 static int read_segment_descriptor(struct x86_emulate_ctxt
*ctxt
,
1582 u16 selector
, struct desc_struct
*desc
,
1587 rc
= get_descriptor_ptr(ctxt
, selector
, desc_addr_p
);
1588 if (rc
!= X86EMUL_CONTINUE
)
1591 return linear_read_system(ctxt
, *desc_addr_p
, desc
, sizeof(*desc
));
1594 /* allowed just for 8 bytes segments */
1595 static int write_segment_descriptor(struct x86_emulate_ctxt
*ctxt
,
1596 u16 selector
, struct desc_struct
*desc
)
1601 rc
= get_descriptor_ptr(ctxt
, selector
, &addr
);
1602 if (rc
!= X86EMUL_CONTINUE
)
1605 return linear_write_system(ctxt
, addr
, desc
, sizeof(*desc
));
1608 static int __load_segment_descriptor(struct x86_emulate_ctxt
*ctxt
,
1609 u16 selector
, int seg
, u8 cpl
,
1610 enum x86_transfer_type transfer
,
1611 struct desc_struct
*desc
)
1613 struct desc_struct seg_desc
, old_desc
;
1615 unsigned err_vec
= GP_VECTOR
;
1617 bool null_selector
= !(selector
& ~0x3); /* 0000-0003 are null */
1623 memset(&seg_desc
, 0, sizeof(seg_desc
));
1625 if (ctxt
->mode
== X86EMUL_MODE_REAL
) {
1626 /* set real mode segment descriptor (keep limit etc. for
1628 ctxt
->ops
->get_segment(ctxt
, &dummy
, &seg_desc
, NULL
, seg
);
1629 set_desc_base(&seg_desc
, selector
<< 4);
1631 } else if (seg
<= VCPU_SREG_GS
&& ctxt
->mode
== X86EMUL_MODE_VM86
) {
1632 /* VM86 needs a clean new segment descriptor */
1633 set_desc_base(&seg_desc
, selector
<< 4);
1634 set_desc_limit(&seg_desc
, 0xffff);
1644 /* TR should be in GDT only */
1645 if (seg
== VCPU_SREG_TR
&& (selector
& (1 << 2)))
1648 /* NULL selector is not valid for TR, CS and (except for long mode) SS */
1649 if (null_selector
) {
1650 if (seg
== VCPU_SREG_CS
|| seg
== VCPU_SREG_TR
)
1653 if (seg
== VCPU_SREG_SS
) {
1654 if (ctxt
->mode
!= X86EMUL_MODE_PROT64
|| rpl
!= cpl
)
1658 * ctxt->ops->set_segment expects the CPL to be in
1659 * SS.DPL, so fake an expand-up 32-bit data segment.
1669 /* Skip all following checks */
1673 ret
= read_segment_descriptor(ctxt
, selector
, &seg_desc
, &desc_addr
);
1674 if (ret
!= X86EMUL_CONTINUE
)
1677 err_code
= selector
& 0xfffc;
1678 err_vec
= (transfer
== X86_TRANSFER_TASK_SWITCH
) ? TS_VECTOR
:
1681 /* can't load system descriptor into segment selector */
1682 if (seg
<= VCPU_SREG_GS
&& !seg_desc
.s
) {
1683 if (transfer
== X86_TRANSFER_CALL_JMP
)
1684 return X86EMUL_UNHANDLEABLE
;
1689 err_vec
= (seg
== VCPU_SREG_SS
) ? SS_VECTOR
: NP_VECTOR
;
1698 * segment is not a writable data segment or segment
1699 * selector's RPL != CPL or segment selector's RPL != CPL
1701 if (rpl
!= cpl
|| (seg_desc
.type
& 0xa) != 0x2 || dpl
!= cpl
)
1705 if (!(seg_desc
.type
& 8))
1708 if (seg_desc
.type
& 4) {
1714 if (rpl
> cpl
|| dpl
!= cpl
)
1717 /* in long-mode d/b must be clear if l is set */
1718 if (seg_desc
.d
&& seg_desc
.l
) {
1721 ctxt
->ops
->get_msr(ctxt
, MSR_EFER
, &efer
);
1722 if (efer
& EFER_LMA
)
1726 /* CS(RPL) <- CPL */
1727 selector
= (selector
& 0xfffc) | cpl
;
1730 if (seg_desc
.s
|| (seg_desc
.type
!= 1 && seg_desc
.type
!= 9))
1732 old_desc
= seg_desc
;
1733 seg_desc
.type
|= 2; /* busy */
1734 ret
= ctxt
->ops
->cmpxchg_emulated(ctxt
, desc_addr
, &old_desc
, &seg_desc
,
1735 sizeof(seg_desc
), &ctxt
->exception
);
1736 if (ret
!= X86EMUL_CONTINUE
)
1739 case VCPU_SREG_LDTR
:
1740 if (seg_desc
.s
|| seg_desc
.type
!= 2)
1743 default: /* DS, ES, FS, or GS */
1745 * segment is not a data or readable code segment or
1746 * ((segment is a data or nonconforming code segment)
1747 * and (both RPL and CPL > DPL))
1749 if ((seg_desc
.type
& 0xa) == 0x8 ||
1750 (((seg_desc
.type
& 0xc) != 0xc) &&
1751 (rpl
> dpl
&& cpl
> dpl
)))
1757 /* mark segment as accessed */
1758 if (!(seg_desc
.type
& 1)) {
1760 ret
= write_segment_descriptor(ctxt
, selector
,
1762 if (ret
!= X86EMUL_CONTINUE
)
1765 } else if (ctxt
->mode
== X86EMUL_MODE_PROT64
) {
1766 ret
= linear_read_system(ctxt
, desc_addr
+8, &base3
, sizeof(base3
));
1767 if (ret
!= X86EMUL_CONTINUE
)
1769 if (emul_is_noncanonical_address(get_desc_base(&seg_desc
) |
1770 ((u64
)base3
<< 32), ctxt
))
1771 return emulate_gp(ctxt
, 0);
1774 ctxt
->ops
->set_segment(ctxt
, selector
, &seg_desc
, base3
, seg
);
1777 return X86EMUL_CONTINUE
;
1779 return emulate_exception(ctxt
, err_vec
, err_code
, true);
1782 static int load_segment_descriptor(struct x86_emulate_ctxt
*ctxt
,
1783 u16 selector
, int seg
)
1785 u8 cpl
= ctxt
->ops
->cpl(ctxt
);
1788 * None of MOV, POP and LSS can load a NULL selector in CPL=3, but
1789 * they can load it at CPL<3 (Intel's manual says only LSS can,
1792 * However, the Intel manual says that putting IST=1/DPL=3 in
1793 * an interrupt gate will result in SS=3 (the AMD manual instead
1794 * says it doesn't), so allow SS=3 in __load_segment_descriptor
1795 * and only forbid it here.
1797 if (seg
== VCPU_SREG_SS
&& selector
== 3 &&
1798 ctxt
->mode
== X86EMUL_MODE_PROT64
)
1799 return emulate_exception(ctxt
, GP_VECTOR
, 0, true);
1801 return __load_segment_descriptor(ctxt
, selector
, seg
, cpl
,
1802 X86_TRANSFER_NONE
, NULL
);
1805 static void write_register_operand(struct operand
*op
)
1807 return assign_register(op
->addr
.reg
, op
->val
, op
->bytes
);
1810 static int writeback(struct x86_emulate_ctxt
*ctxt
, struct operand
*op
)
1814 write_register_operand(op
);
1817 if (ctxt
->lock_prefix
)
1818 return segmented_cmpxchg(ctxt
,
1824 return segmented_write(ctxt
,
1830 return segmented_write(ctxt
,
1833 op
->bytes
* op
->count
);
1836 write_sse_reg(ctxt
, &op
->vec_val
, op
->addr
.xmm
);
1839 write_mmx_reg(ctxt
, &op
->mm_val
, op
->addr
.mm
);
1847 return X86EMUL_CONTINUE
;
1850 static int push(struct x86_emulate_ctxt
*ctxt
, void *data
, int bytes
)
1852 struct segmented_address addr
;
1854 rsp_increment(ctxt
, -bytes
);
1855 addr
.ea
= reg_read(ctxt
, VCPU_REGS_RSP
) & stack_mask(ctxt
);
1856 addr
.seg
= VCPU_SREG_SS
;
1858 return segmented_write(ctxt
, addr
, data
, bytes
);
1861 static int em_push(struct x86_emulate_ctxt
*ctxt
)
1863 /* Disable writeback. */
1864 ctxt
->dst
.type
= OP_NONE
;
1865 return push(ctxt
, &ctxt
->src
.val
, ctxt
->op_bytes
);
1868 static int emulate_pop(struct x86_emulate_ctxt
*ctxt
,
1869 void *dest
, int len
)
1872 struct segmented_address addr
;
1874 addr
.ea
= reg_read(ctxt
, VCPU_REGS_RSP
) & stack_mask(ctxt
);
1875 addr
.seg
= VCPU_SREG_SS
;
1876 rc
= segmented_read(ctxt
, addr
, dest
, len
);
1877 if (rc
!= X86EMUL_CONTINUE
)
1880 rsp_increment(ctxt
, len
);
1884 static int em_pop(struct x86_emulate_ctxt
*ctxt
)
1886 return emulate_pop(ctxt
, &ctxt
->dst
.val
, ctxt
->op_bytes
);
1889 static int emulate_popf(struct x86_emulate_ctxt
*ctxt
,
1890 void *dest
, int len
)
1893 unsigned long val
, change_mask
;
1894 int iopl
= (ctxt
->eflags
& X86_EFLAGS_IOPL
) >> X86_EFLAGS_IOPL_BIT
;
1895 int cpl
= ctxt
->ops
->cpl(ctxt
);
1897 rc
= emulate_pop(ctxt
, &val
, len
);
1898 if (rc
!= X86EMUL_CONTINUE
)
1901 change_mask
= X86_EFLAGS_CF
| X86_EFLAGS_PF
| X86_EFLAGS_AF
|
1902 X86_EFLAGS_ZF
| X86_EFLAGS_SF
| X86_EFLAGS_OF
|
1903 X86_EFLAGS_TF
| X86_EFLAGS_DF
| X86_EFLAGS_NT
|
1904 X86_EFLAGS_AC
| X86_EFLAGS_ID
;
1906 switch(ctxt
->mode
) {
1907 case X86EMUL_MODE_PROT64
:
1908 case X86EMUL_MODE_PROT32
:
1909 case X86EMUL_MODE_PROT16
:
1911 change_mask
|= X86_EFLAGS_IOPL
;
1913 change_mask
|= X86_EFLAGS_IF
;
1915 case X86EMUL_MODE_VM86
:
1917 return emulate_gp(ctxt
, 0);
1918 change_mask
|= X86_EFLAGS_IF
;
1920 default: /* real mode */
1921 change_mask
|= (X86_EFLAGS_IOPL
| X86_EFLAGS_IF
);
1925 *(unsigned long *)dest
=
1926 (ctxt
->eflags
& ~change_mask
) | (val
& change_mask
);
1931 static int em_popf(struct x86_emulate_ctxt
*ctxt
)
1933 ctxt
->dst
.type
= OP_REG
;
1934 ctxt
->dst
.addr
.reg
= &ctxt
->eflags
;
1935 ctxt
->dst
.bytes
= ctxt
->op_bytes
;
1936 return emulate_popf(ctxt
, &ctxt
->dst
.val
, ctxt
->op_bytes
);
1939 static int em_enter(struct x86_emulate_ctxt
*ctxt
)
1942 unsigned frame_size
= ctxt
->src
.val
;
1943 unsigned nesting_level
= ctxt
->src2
.val
& 31;
1947 return X86EMUL_UNHANDLEABLE
;
1949 rbp
= reg_read(ctxt
, VCPU_REGS_RBP
);
1950 rc
= push(ctxt
, &rbp
, stack_size(ctxt
));
1951 if (rc
!= X86EMUL_CONTINUE
)
1953 assign_masked(reg_rmw(ctxt
, VCPU_REGS_RBP
), reg_read(ctxt
, VCPU_REGS_RSP
),
1955 assign_masked(reg_rmw(ctxt
, VCPU_REGS_RSP
),
1956 reg_read(ctxt
, VCPU_REGS_RSP
) - frame_size
,
1958 return X86EMUL_CONTINUE
;
1961 static int em_leave(struct x86_emulate_ctxt
*ctxt
)
1963 assign_masked(reg_rmw(ctxt
, VCPU_REGS_RSP
), reg_read(ctxt
, VCPU_REGS_RBP
),
1965 return emulate_pop(ctxt
, reg_rmw(ctxt
, VCPU_REGS_RBP
), ctxt
->op_bytes
);
1968 static int em_push_sreg(struct x86_emulate_ctxt
*ctxt
)
1970 int seg
= ctxt
->src2
.val
;
1972 ctxt
->src
.val
= get_segment_selector(ctxt
, seg
);
1973 if (ctxt
->op_bytes
== 4) {
1974 rsp_increment(ctxt
, -2);
1978 return em_push(ctxt
);
1981 static int em_pop_sreg(struct x86_emulate_ctxt
*ctxt
)
1983 int seg
= ctxt
->src2
.val
;
1984 unsigned long selector
;
1987 rc
= emulate_pop(ctxt
, &selector
, 2);
1988 if (rc
!= X86EMUL_CONTINUE
)
1991 if (ctxt
->modrm_reg
== VCPU_SREG_SS
)
1992 ctxt
->interruptibility
= KVM_X86_SHADOW_INT_MOV_SS
;
1993 if (ctxt
->op_bytes
> 2)
1994 rsp_increment(ctxt
, ctxt
->op_bytes
- 2);
1996 rc
= load_segment_descriptor(ctxt
, (u16
)selector
, seg
);
2000 static int em_pusha(struct x86_emulate_ctxt
*ctxt
)
2002 unsigned long old_esp
= reg_read(ctxt
, VCPU_REGS_RSP
);
2003 int rc
= X86EMUL_CONTINUE
;
2004 int reg
= VCPU_REGS_RAX
;
2006 while (reg
<= VCPU_REGS_RDI
) {
2007 (reg
== VCPU_REGS_RSP
) ?
2008 (ctxt
->src
.val
= old_esp
) : (ctxt
->src
.val
= reg_read(ctxt
, reg
));
2011 if (rc
!= X86EMUL_CONTINUE
)
2020 static int em_pushf(struct x86_emulate_ctxt
*ctxt
)
2022 ctxt
->src
.val
= (unsigned long)ctxt
->eflags
& ~X86_EFLAGS_VM
;
2023 return em_push(ctxt
);
2026 static int em_popa(struct x86_emulate_ctxt
*ctxt
)
2028 int rc
= X86EMUL_CONTINUE
;
2029 int reg
= VCPU_REGS_RDI
;
2032 while (reg
>= VCPU_REGS_RAX
) {
2033 if (reg
== VCPU_REGS_RSP
) {
2034 rsp_increment(ctxt
, ctxt
->op_bytes
);
2038 rc
= emulate_pop(ctxt
, &val
, ctxt
->op_bytes
);
2039 if (rc
!= X86EMUL_CONTINUE
)
2041 assign_register(reg_rmw(ctxt
, reg
), val
, ctxt
->op_bytes
);
2047 static int __emulate_int_real(struct x86_emulate_ctxt
*ctxt
, int irq
)
2049 const struct x86_emulate_ops
*ops
= ctxt
->ops
;
2056 /* TODO: Add limit checks */
2057 ctxt
->src
.val
= ctxt
->eflags
;
2059 if (rc
!= X86EMUL_CONTINUE
)
2062 ctxt
->eflags
&= ~(X86_EFLAGS_IF
| X86_EFLAGS_TF
| X86_EFLAGS_AC
);
2064 ctxt
->src
.val
= get_segment_selector(ctxt
, VCPU_SREG_CS
);
2066 if (rc
!= X86EMUL_CONTINUE
)
2069 ctxt
->src
.val
= ctxt
->_eip
;
2071 if (rc
!= X86EMUL_CONTINUE
)
2074 ops
->get_idt(ctxt
, &dt
);
2076 eip_addr
= dt
.address
+ (irq
<< 2);
2077 cs_addr
= dt
.address
+ (irq
<< 2) + 2;
2079 rc
= linear_read_system(ctxt
, cs_addr
, &cs
, 2);
2080 if (rc
!= X86EMUL_CONTINUE
)
2083 rc
= linear_read_system(ctxt
, eip_addr
, &eip
, 2);
2084 if (rc
!= X86EMUL_CONTINUE
)
2087 rc
= load_segment_descriptor(ctxt
, cs
, VCPU_SREG_CS
);
2088 if (rc
!= X86EMUL_CONTINUE
)
2096 int emulate_int_real(struct x86_emulate_ctxt
*ctxt
, int irq
)
2100 invalidate_registers(ctxt
);
2101 rc
= __emulate_int_real(ctxt
, irq
);
2102 if (rc
== X86EMUL_CONTINUE
)
2103 writeback_registers(ctxt
);
2107 static int emulate_int(struct x86_emulate_ctxt
*ctxt
, int irq
)
2109 switch(ctxt
->mode
) {
2110 case X86EMUL_MODE_REAL
:
2111 return __emulate_int_real(ctxt
, irq
);
2112 case X86EMUL_MODE_VM86
:
2113 case X86EMUL_MODE_PROT16
:
2114 case X86EMUL_MODE_PROT32
:
2115 case X86EMUL_MODE_PROT64
:
2117 /* Protected mode interrupts unimplemented yet */
2118 return X86EMUL_UNHANDLEABLE
;
2122 static int emulate_iret_real(struct x86_emulate_ctxt
*ctxt
)
2124 int rc
= X86EMUL_CONTINUE
;
2125 unsigned long temp_eip
= 0;
2126 unsigned long temp_eflags
= 0;
2127 unsigned long cs
= 0;
2128 unsigned long mask
= X86_EFLAGS_CF
| X86_EFLAGS_PF
| X86_EFLAGS_AF
|
2129 X86_EFLAGS_ZF
| X86_EFLAGS_SF
| X86_EFLAGS_TF
|
2130 X86_EFLAGS_IF
| X86_EFLAGS_DF
| X86_EFLAGS_OF
|
2131 X86_EFLAGS_IOPL
| X86_EFLAGS_NT
| X86_EFLAGS_RF
|
2132 X86_EFLAGS_AC
| X86_EFLAGS_ID
|
2134 unsigned long vm86_mask
= X86_EFLAGS_VM
| X86_EFLAGS_VIF
|
2137 /* TODO: Add stack limit check */
2139 rc
= emulate_pop(ctxt
, &temp_eip
, ctxt
->op_bytes
);
2141 if (rc
!= X86EMUL_CONTINUE
)
2144 if (temp_eip
& ~0xffff)
2145 return emulate_gp(ctxt
, 0);
2147 rc
= emulate_pop(ctxt
, &cs
, ctxt
->op_bytes
);
2149 if (rc
!= X86EMUL_CONTINUE
)
2152 rc
= emulate_pop(ctxt
, &temp_eflags
, ctxt
->op_bytes
);
2154 if (rc
!= X86EMUL_CONTINUE
)
2157 rc
= load_segment_descriptor(ctxt
, (u16
)cs
, VCPU_SREG_CS
);
2159 if (rc
!= X86EMUL_CONTINUE
)
2162 ctxt
->_eip
= temp_eip
;
2164 if (ctxt
->op_bytes
== 4)
2165 ctxt
->eflags
= ((temp_eflags
& mask
) | (ctxt
->eflags
& vm86_mask
));
2166 else if (ctxt
->op_bytes
== 2) {
2167 ctxt
->eflags
&= ~0xffff;
2168 ctxt
->eflags
|= temp_eflags
;
2171 ctxt
->eflags
&= ~EFLG_RESERVED_ZEROS_MASK
; /* Clear reserved zeros */
2172 ctxt
->eflags
|= X86_EFLAGS_FIXED
;
2173 ctxt
->ops
->set_nmi_mask(ctxt
, false);
2178 static int em_iret(struct x86_emulate_ctxt
*ctxt
)
2180 switch(ctxt
->mode
) {
2181 case X86EMUL_MODE_REAL
:
2182 return emulate_iret_real(ctxt
);
2183 case X86EMUL_MODE_VM86
:
2184 case X86EMUL_MODE_PROT16
:
2185 case X86EMUL_MODE_PROT32
:
2186 case X86EMUL_MODE_PROT64
:
2188 /* iret from protected mode unimplemented yet */
2189 return X86EMUL_UNHANDLEABLE
;
2193 static int em_jmp_far(struct x86_emulate_ctxt
*ctxt
)
2197 struct desc_struct new_desc
;
2198 u8 cpl
= ctxt
->ops
->cpl(ctxt
);
2200 memcpy(&sel
, ctxt
->src
.valptr
+ ctxt
->op_bytes
, 2);
2202 rc
= __load_segment_descriptor(ctxt
, sel
, VCPU_SREG_CS
, cpl
,
2203 X86_TRANSFER_CALL_JMP
,
2205 if (rc
!= X86EMUL_CONTINUE
)
2208 rc
= assign_eip_far(ctxt
, ctxt
->src
.val
, &new_desc
);
2209 /* Error handling is not implemented. */
2210 if (rc
!= X86EMUL_CONTINUE
)
2211 return X86EMUL_UNHANDLEABLE
;
2216 static int em_jmp_abs(struct x86_emulate_ctxt
*ctxt
)
2218 return assign_eip_near(ctxt
, ctxt
->src
.val
);
2221 static int em_call_near_abs(struct x86_emulate_ctxt
*ctxt
)
2226 old_eip
= ctxt
->_eip
;
2227 rc
= assign_eip_near(ctxt
, ctxt
->src
.val
);
2228 if (rc
!= X86EMUL_CONTINUE
)
2230 ctxt
->src
.val
= old_eip
;
2235 static int em_cmpxchg8b(struct x86_emulate_ctxt
*ctxt
)
2237 u64 old
= ctxt
->dst
.orig_val64
;
2239 if (ctxt
->dst
.bytes
== 16)
2240 return X86EMUL_UNHANDLEABLE
;
2242 if (((u32
) (old
>> 0) != (u32
) reg_read(ctxt
, VCPU_REGS_RAX
)) ||
2243 ((u32
) (old
>> 32) != (u32
) reg_read(ctxt
, VCPU_REGS_RDX
))) {
2244 *reg_write(ctxt
, VCPU_REGS_RAX
) = (u32
) (old
>> 0);
2245 *reg_write(ctxt
, VCPU_REGS_RDX
) = (u32
) (old
>> 32);
2246 ctxt
->eflags
&= ~X86_EFLAGS_ZF
;
2248 ctxt
->dst
.val64
= ((u64
)reg_read(ctxt
, VCPU_REGS_RCX
) << 32) |
2249 (u32
) reg_read(ctxt
, VCPU_REGS_RBX
);
2251 ctxt
->eflags
|= X86_EFLAGS_ZF
;
2253 return X86EMUL_CONTINUE
;
2256 static int em_ret(struct x86_emulate_ctxt
*ctxt
)
2261 rc
= emulate_pop(ctxt
, &eip
, ctxt
->op_bytes
);
2262 if (rc
!= X86EMUL_CONTINUE
)
2265 return assign_eip_near(ctxt
, eip
);
2268 static int em_ret_far(struct x86_emulate_ctxt
*ctxt
)
2271 unsigned long eip
, cs
;
2272 int cpl
= ctxt
->ops
->cpl(ctxt
);
2273 struct desc_struct new_desc
;
2275 rc
= emulate_pop(ctxt
, &eip
, ctxt
->op_bytes
);
2276 if (rc
!= X86EMUL_CONTINUE
)
2278 rc
= emulate_pop(ctxt
, &cs
, ctxt
->op_bytes
);
2279 if (rc
!= X86EMUL_CONTINUE
)
2281 /* Outer-privilege level return is not implemented */
2282 if (ctxt
->mode
>= X86EMUL_MODE_PROT16
&& (cs
& 3) > cpl
)
2283 return X86EMUL_UNHANDLEABLE
;
2284 rc
= __load_segment_descriptor(ctxt
, (u16
)cs
, VCPU_SREG_CS
, cpl
,
2287 if (rc
!= X86EMUL_CONTINUE
)
2289 rc
= assign_eip_far(ctxt
, eip
, &new_desc
);
2290 /* Error handling is not implemented. */
2291 if (rc
!= X86EMUL_CONTINUE
)
2292 return X86EMUL_UNHANDLEABLE
;
2297 static int em_ret_far_imm(struct x86_emulate_ctxt
*ctxt
)
2301 rc
= em_ret_far(ctxt
);
2302 if (rc
!= X86EMUL_CONTINUE
)
2304 rsp_increment(ctxt
, ctxt
->src
.val
);
2305 return X86EMUL_CONTINUE
;
2308 static int em_cmpxchg(struct x86_emulate_ctxt
*ctxt
)
2310 /* Save real source value, then compare EAX against destination. */
2311 ctxt
->dst
.orig_val
= ctxt
->dst
.val
;
2312 ctxt
->dst
.val
= reg_read(ctxt
, VCPU_REGS_RAX
);
2313 ctxt
->src
.orig_val
= ctxt
->src
.val
;
2314 ctxt
->src
.val
= ctxt
->dst
.orig_val
;
2315 fastop(ctxt
, em_cmp
);
2317 if (ctxt
->eflags
& X86_EFLAGS_ZF
) {
2318 /* Success: write back to memory; no update of EAX */
2319 ctxt
->src
.type
= OP_NONE
;
2320 ctxt
->dst
.val
= ctxt
->src
.orig_val
;
2322 /* Failure: write the value we saw to EAX. */
2323 ctxt
->src
.type
= OP_REG
;
2324 ctxt
->src
.addr
.reg
= reg_rmw(ctxt
, VCPU_REGS_RAX
);
2325 ctxt
->src
.val
= ctxt
->dst
.orig_val
;
2326 /* Create write-cycle to dest by writing the same value */
2327 ctxt
->dst
.val
= ctxt
->dst
.orig_val
;
2329 return X86EMUL_CONTINUE
;
2332 static int em_lseg(struct x86_emulate_ctxt
*ctxt
)
2334 int seg
= ctxt
->src2
.val
;
2338 memcpy(&sel
, ctxt
->src
.valptr
+ ctxt
->op_bytes
, 2);
2340 rc
= load_segment_descriptor(ctxt
, sel
, seg
);
2341 if (rc
!= X86EMUL_CONTINUE
)
2344 ctxt
->dst
.val
= ctxt
->src
.val
;
2348 static int emulator_has_longmode(struct x86_emulate_ctxt
*ctxt
)
2350 #ifdef CONFIG_X86_64
2351 u32 eax
, ebx
, ecx
, edx
;
2355 ctxt
->ops
->get_cpuid(ctxt
, &eax
, &ebx
, &ecx
, &edx
, false);
2356 return edx
& bit(X86_FEATURE_LM
);
2362 static void rsm_set_desc_flags(struct desc_struct
*desc
, u32 flags
)
2364 desc
->g
= (flags
>> 23) & 1;
2365 desc
->d
= (flags
>> 22) & 1;
2366 desc
->l
= (flags
>> 21) & 1;
2367 desc
->avl
= (flags
>> 20) & 1;
2368 desc
->p
= (flags
>> 15) & 1;
2369 desc
->dpl
= (flags
>> 13) & 3;
2370 desc
->s
= (flags
>> 12) & 1;
2371 desc
->type
= (flags
>> 8) & 15;
2374 static int rsm_load_seg_32(struct x86_emulate_ctxt
*ctxt
, const char *smstate
,
2377 struct desc_struct desc
;
2381 selector
= GET_SMSTATE(u32
, smstate
, 0x7fa8 + n
* 4);
2384 offset
= 0x7f84 + n
* 12;
2386 offset
= 0x7f2c + (n
- 3) * 12;
2388 set_desc_base(&desc
, GET_SMSTATE(u32
, smstate
, offset
+ 8));
2389 set_desc_limit(&desc
, GET_SMSTATE(u32
, smstate
, offset
+ 4));
2390 rsm_set_desc_flags(&desc
, GET_SMSTATE(u32
, smstate
, offset
));
2391 ctxt
->ops
->set_segment(ctxt
, selector
, &desc
, 0, n
);
2392 return X86EMUL_CONTINUE
;
2395 #ifdef CONFIG_X86_64
2396 static int rsm_load_seg_64(struct x86_emulate_ctxt
*ctxt
, const char *smstate
,
2399 struct desc_struct desc
;
2404 offset
= 0x7e00 + n
* 16;
2406 selector
= GET_SMSTATE(u16
, smstate
, offset
);
2407 rsm_set_desc_flags(&desc
, GET_SMSTATE(u16
, smstate
, offset
+ 2) << 8);
2408 set_desc_limit(&desc
, GET_SMSTATE(u32
, smstate
, offset
+ 4));
2409 set_desc_base(&desc
, GET_SMSTATE(u32
, smstate
, offset
+ 8));
2410 base3
= GET_SMSTATE(u32
, smstate
, offset
+ 12);
2412 ctxt
->ops
->set_segment(ctxt
, selector
, &desc
, base3
, n
);
2413 return X86EMUL_CONTINUE
;
2417 static int rsm_enter_protected_mode(struct x86_emulate_ctxt
*ctxt
,
2418 u64 cr0
, u64 cr3
, u64 cr4
)
2423 /* In order to later set CR4.PCIDE, CR3[11:0] must be zero. */
2425 if (cr4
& X86_CR4_PCIDE
) {
2430 bad
= ctxt
->ops
->set_cr(ctxt
, 3, cr3
);
2432 return X86EMUL_UNHANDLEABLE
;
2435 * First enable PAE, long mode needs it before CR0.PG = 1 is set.
2436 * Then enable protected mode. However, PCID cannot be enabled
2437 * if EFER.LMA=0, so set it separately.
2439 bad
= ctxt
->ops
->set_cr(ctxt
, 4, cr4
& ~X86_CR4_PCIDE
);
2441 return X86EMUL_UNHANDLEABLE
;
2443 bad
= ctxt
->ops
->set_cr(ctxt
, 0, cr0
);
2445 return X86EMUL_UNHANDLEABLE
;
2447 if (cr4
& X86_CR4_PCIDE
) {
2448 bad
= ctxt
->ops
->set_cr(ctxt
, 4, cr4
);
2450 return X86EMUL_UNHANDLEABLE
;
2452 bad
= ctxt
->ops
->set_cr(ctxt
, 3, cr3
| pcid
);
2454 return X86EMUL_UNHANDLEABLE
;
2459 return X86EMUL_CONTINUE
;
2462 static int rsm_load_state_32(struct x86_emulate_ctxt
*ctxt
,
2463 const char *smstate
)
2465 struct desc_struct desc
;
2468 u32 val
, cr0
, cr3
, cr4
;
2471 cr0
= GET_SMSTATE(u32
, smstate
, 0x7ffc);
2472 cr3
= GET_SMSTATE(u32
, smstate
, 0x7ff8);
2473 ctxt
->eflags
= GET_SMSTATE(u32
, smstate
, 0x7ff4) | X86_EFLAGS_FIXED
;
2474 ctxt
->_eip
= GET_SMSTATE(u32
, smstate
, 0x7ff0);
2476 for (i
= 0; i
< 8; i
++)
2477 *reg_write(ctxt
, i
) = GET_SMSTATE(u32
, smstate
, 0x7fd0 + i
* 4);
2479 val
= GET_SMSTATE(u32
, smstate
, 0x7fcc);
2480 ctxt
->ops
->set_dr(ctxt
, 6, (val
& DR6_VOLATILE
) | DR6_FIXED_1
);
2481 val
= GET_SMSTATE(u32
, smstate
, 0x7fc8);
2482 ctxt
->ops
->set_dr(ctxt
, 7, (val
& DR7_VOLATILE
) | DR7_FIXED_1
);
2484 selector
= GET_SMSTATE(u32
, smstate
, 0x7fc4);
2485 set_desc_base(&desc
, GET_SMSTATE(u32
, smstate
, 0x7f64));
2486 set_desc_limit(&desc
, GET_SMSTATE(u32
, smstate
, 0x7f60));
2487 rsm_set_desc_flags(&desc
, GET_SMSTATE(u32
, smstate
, 0x7f5c));
2488 ctxt
->ops
->set_segment(ctxt
, selector
, &desc
, 0, VCPU_SREG_TR
);
2490 selector
= GET_SMSTATE(u32
, smstate
, 0x7fc0);
2491 set_desc_base(&desc
, GET_SMSTATE(u32
, smstate
, 0x7f80));
2492 set_desc_limit(&desc
, GET_SMSTATE(u32
, smstate
, 0x7f7c));
2493 rsm_set_desc_flags(&desc
, GET_SMSTATE(u32
, smstate
, 0x7f78));
2494 ctxt
->ops
->set_segment(ctxt
, selector
, &desc
, 0, VCPU_SREG_LDTR
);
2496 dt
.address
= GET_SMSTATE(u32
, smstate
, 0x7f74);
2497 dt
.size
= GET_SMSTATE(u32
, smstate
, 0x7f70);
2498 ctxt
->ops
->set_gdt(ctxt
, &dt
);
2500 dt
.address
= GET_SMSTATE(u32
, smstate
, 0x7f58);
2501 dt
.size
= GET_SMSTATE(u32
, smstate
, 0x7f54);
2502 ctxt
->ops
->set_idt(ctxt
, &dt
);
2504 for (i
= 0; i
< 6; i
++) {
2505 int r
= rsm_load_seg_32(ctxt
, smstate
, i
);
2506 if (r
!= X86EMUL_CONTINUE
)
2510 cr4
= GET_SMSTATE(u32
, smstate
, 0x7f14);
2512 ctxt
->ops
->set_smbase(ctxt
, GET_SMSTATE(u32
, smstate
, 0x7ef8));
2514 return rsm_enter_protected_mode(ctxt
, cr0
, cr3
, cr4
);
2517 #ifdef CONFIG_X86_64
2518 static int rsm_load_state_64(struct x86_emulate_ctxt
*ctxt
,
2519 const char *smstate
)
2521 struct desc_struct desc
;
2523 u64 val
, cr0
, cr3
, cr4
;
2528 for (i
= 0; i
< 16; i
++)
2529 *reg_write(ctxt
, i
) = GET_SMSTATE(u64
, smstate
, 0x7ff8 - i
* 8);
2531 ctxt
->_eip
= GET_SMSTATE(u64
, smstate
, 0x7f78);
2532 ctxt
->eflags
= GET_SMSTATE(u32
, smstate
, 0x7f70) | X86_EFLAGS_FIXED
;
2534 val
= GET_SMSTATE(u32
, smstate
, 0x7f68);
2535 ctxt
->ops
->set_dr(ctxt
, 6, (val
& DR6_VOLATILE
) | DR6_FIXED_1
);
2536 val
= GET_SMSTATE(u32
, smstate
, 0x7f60);
2537 ctxt
->ops
->set_dr(ctxt
, 7, (val
& DR7_VOLATILE
) | DR7_FIXED_1
);
2539 cr0
= GET_SMSTATE(u64
, smstate
, 0x7f58);
2540 cr3
= GET_SMSTATE(u64
, smstate
, 0x7f50);
2541 cr4
= GET_SMSTATE(u64
, smstate
, 0x7f48);
2542 ctxt
->ops
->set_smbase(ctxt
, GET_SMSTATE(u32
, smstate
, 0x7f00));
2543 val
= GET_SMSTATE(u64
, smstate
, 0x7ed0);
2544 ctxt
->ops
->set_msr(ctxt
, MSR_EFER
, val
& ~EFER_LMA
);
2546 selector
= GET_SMSTATE(u32
, smstate
, 0x7e90);
2547 rsm_set_desc_flags(&desc
, GET_SMSTATE(u32
, smstate
, 0x7e92) << 8);
2548 set_desc_limit(&desc
, GET_SMSTATE(u32
, smstate
, 0x7e94));
2549 set_desc_base(&desc
, GET_SMSTATE(u32
, smstate
, 0x7e98));
2550 base3
= GET_SMSTATE(u32
, smstate
, 0x7e9c);
2551 ctxt
->ops
->set_segment(ctxt
, selector
, &desc
, base3
, VCPU_SREG_TR
);
2553 dt
.size
= GET_SMSTATE(u32
, smstate
, 0x7e84);
2554 dt
.address
= GET_SMSTATE(u64
, smstate
, 0x7e88);
2555 ctxt
->ops
->set_idt(ctxt
, &dt
);
2557 selector
= GET_SMSTATE(u32
, smstate
, 0x7e70);
2558 rsm_set_desc_flags(&desc
, GET_SMSTATE(u32
, smstate
, 0x7e72) << 8);
2559 set_desc_limit(&desc
, GET_SMSTATE(u32
, smstate
, 0x7e74));
2560 set_desc_base(&desc
, GET_SMSTATE(u32
, smstate
, 0x7e78));
2561 base3
= GET_SMSTATE(u32
, smstate
, 0x7e7c);
2562 ctxt
->ops
->set_segment(ctxt
, selector
, &desc
, base3
, VCPU_SREG_LDTR
);
2564 dt
.size
= GET_SMSTATE(u32
, smstate
, 0x7e64);
2565 dt
.address
= GET_SMSTATE(u64
, smstate
, 0x7e68);
2566 ctxt
->ops
->set_gdt(ctxt
, &dt
);
2568 r
= rsm_enter_protected_mode(ctxt
, cr0
, cr3
, cr4
);
2569 if (r
!= X86EMUL_CONTINUE
)
2572 for (i
= 0; i
< 6; i
++) {
2573 r
= rsm_load_seg_64(ctxt
, smstate
, i
);
2574 if (r
!= X86EMUL_CONTINUE
)
2578 return X86EMUL_CONTINUE
;
2582 static int em_rsm(struct x86_emulate_ctxt
*ctxt
)
2584 unsigned long cr0
, cr4
, efer
;
2589 if ((ctxt
->ops
->get_hflags(ctxt
) & X86EMUL_SMM_MASK
) == 0)
2590 return emulate_ud(ctxt
);
2592 smbase
= ctxt
->ops
->get_smbase(ctxt
);
2594 ret
= ctxt
->ops
->read_phys(ctxt
, smbase
+ 0xfe00, buf
, sizeof(buf
));
2595 if (ret
!= X86EMUL_CONTINUE
)
2596 return X86EMUL_UNHANDLEABLE
;
2598 if ((ctxt
->ops
->get_hflags(ctxt
) & X86EMUL_SMM_INSIDE_NMI_MASK
) == 0)
2599 ctxt
->ops
->set_nmi_mask(ctxt
, false);
2601 ctxt
->ops
->set_hflags(ctxt
, ctxt
->ops
->get_hflags(ctxt
) &
2602 ~(X86EMUL_SMM_INSIDE_NMI_MASK
| X86EMUL_SMM_MASK
));
2605 * Get back to real mode, to prepare a safe state in which to load
2606 * CR0/CR3/CR4/EFER. It's all a bit more complicated if the vCPU
2607 * supports long mode.
2609 if (emulator_has_longmode(ctxt
)) {
2610 struct desc_struct cs_desc
;
2612 /* Zero CR4.PCIDE before CR0.PG. */
2613 cr4
= ctxt
->ops
->get_cr(ctxt
, 4);
2614 if (cr4
& X86_CR4_PCIDE
)
2615 ctxt
->ops
->set_cr(ctxt
, 4, cr4
& ~X86_CR4_PCIDE
);
2617 /* A 32-bit code segment is required to clear EFER.LMA. */
2618 memset(&cs_desc
, 0, sizeof(cs_desc
));
2620 cs_desc
.s
= cs_desc
.g
= cs_desc
.p
= 1;
2621 ctxt
->ops
->set_segment(ctxt
, 0, &cs_desc
, 0, VCPU_SREG_CS
);
2624 /* For the 64-bit case, this will clear EFER.LMA. */
2625 cr0
= ctxt
->ops
->get_cr(ctxt
, 0);
2626 if (cr0
& X86_CR0_PE
)
2627 ctxt
->ops
->set_cr(ctxt
, 0, cr0
& ~(X86_CR0_PG
| X86_CR0_PE
));
2629 if (emulator_has_longmode(ctxt
)) {
2630 /* Clear CR4.PAE before clearing EFER.LME. */
2631 cr4
= ctxt
->ops
->get_cr(ctxt
, 4);
2632 if (cr4
& X86_CR4_PAE
)
2633 ctxt
->ops
->set_cr(ctxt
, 4, cr4
& ~X86_CR4_PAE
);
2635 /* And finally go back to 32-bit mode. */
2637 ctxt
->ops
->set_msr(ctxt
, MSR_EFER
, efer
);
2641 * Give pre_leave_smm() a chance to make ISA-specific changes to the
2642 * vCPU state (e.g. enter guest mode) before loading state from the SMM
2645 if (ctxt
->ops
->pre_leave_smm(ctxt
, buf
))
2646 return X86EMUL_UNHANDLEABLE
;
2648 #ifdef CONFIG_X86_64
2649 if (emulator_has_longmode(ctxt
))
2650 ret
= rsm_load_state_64(ctxt
, buf
);
2653 ret
= rsm_load_state_32(ctxt
, buf
);
2655 if (ret
!= X86EMUL_CONTINUE
) {
2656 /* FIXME: should triple fault */
2657 return X86EMUL_UNHANDLEABLE
;
2660 ctxt
->ops
->post_leave_smm(ctxt
);
2662 return X86EMUL_CONTINUE
;
2666 setup_syscalls_segments(struct x86_emulate_ctxt
*ctxt
,
2667 struct desc_struct
*cs
, struct desc_struct
*ss
)
2669 cs
->l
= 0; /* will be adjusted later */
2670 set_desc_base(cs
, 0); /* flat segment */
2671 cs
->g
= 1; /* 4kb granularity */
2672 set_desc_limit(cs
, 0xfffff); /* 4GB limit */
2673 cs
->type
= 0x0b; /* Read, Execute, Accessed */
2675 cs
->dpl
= 0; /* will be adjusted later */
2680 set_desc_base(ss
, 0); /* flat segment */
2681 set_desc_limit(ss
, 0xfffff); /* 4GB limit */
2682 ss
->g
= 1; /* 4kb granularity */
2684 ss
->type
= 0x03; /* Read/Write, Accessed */
2685 ss
->d
= 1; /* 32bit stack segment */
2692 static bool vendor_intel(struct x86_emulate_ctxt
*ctxt
)
2694 u32 eax
, ebx
, ecx
, edx
;
2697 ctxt
->ops
->get_cpuid(ctxt
, &eax
, &ebx
, &ecx
, &edx
, false);
2698 return ebx
== X86EMUL_CPUID_VENDOR_GenuineIntel_ebx
2699 && ecx
== X86EMUL_CPUID_VENDOR_GenuineIntel_ecx
2700 && edx
== X86EMUL_CPUID_VENDOR_GenuineIntel_edx
;
2703 static bool em_syscall_is_enabled(struct x86_emulate_ctxt
*ctxt
)
2705 const struct x86_emulate_ops
*ops
= ctxt
->ops
;
2706 u32 eax
, ebx
, ecx
, edx
;
2709 * syscall should always be enabled in longmode - so only become
2710 * vendor specific (cpuid) if other modes are active...
2712 if (ctxt
->mode
== X86EMUL_MODE_PROT64
)
2717 ops
->get_cpuid(ctxt
, &eax
, &ebx
, &ecx
, &edx
, false);
2719 * Intel ("GenuineIntel")
2720 * remark: Intel CPUs only support "syscall" in 64bit
2721 * longmode. Also an 64bit guest with a
2722 * 32bit compat-app running will #UD !! While this
2723 * behaviour can be fixed (by emulating) into AMD
2724 * response - CPUs of AMD can't behave like Intel.
2726 if (ebx
== X86EMUL_CPUID_VENDOR_GenuineIntel_ebx
&&
2727 ecx
== X86EMUL_CPUID_VENDOR_GenuineIntel_ecx
&&
2728 edx
== X86EMUL_CPUID_VENDOR_GenuineIntel_edx
)
2731 /* AMD ("AuthenticAMD") */
2732 if (ebx
== X86EMUL_CPUID_VENDOR_AuthenticAMD_ebx
&&
2733 ecx
== X86EMUL_CPUID_VENDOR_AuthenticAMD_ecx
&&
2734 edx
== X86EMUL_CPUID_VENDOR_AuthenticAMD_edx
)
2737 /* AMD ("AMDisbetter!") */
2738 if (ebx
== X86EMUL_CPUID_VENDOR_AMDisbetterI_ebx
&&
2739 ecx
== X86EMUL_CPUID_VENDOR_AMDisbetterI_ecx
&&
2740 edx
== X86EMUL_CPUID_VENDOR_AMDisbetterI_edx
)
2743 /* Hygon ("HygonGenuine") */
2744 if (ebx
== X86EMUL_CPUID_VENDOR_HygonGenuine_ebx
&&
2745 ecx
== X86EMUL_CPUID_VENDOR_HygonGenuine_ecx
&&
2746 edx
== X86EMUL_CPUID_VENDOR_HygonGenuine_edx
)
2750 * default: (not Intel, not AMD, not Hygon), apply Intel's
2756 static int em_syscall(struct x86_emulate_ctxt
*ctxt
)
2758 const struct x86_emulate_ops
*ops
= ctxt
->ops
;
2759 struct desc_struct cs
, ss
;
2764 /* syscall is not available in real mode */
2765 if (ctxt
->mode
== X86EMUL_MODE_REAL
||
2766 ctxt
->mode
== X86EMUL_MODE_VM86
)
2767 return emulate_ud(ctxt
);
2769 if (!(em_syscall_is_enabled(ctxt
)))
2770 return emulate_ud(ctxt
);
2772 ops
->get_msr(ctxt
, MSR_EFER
, &efer
);
2773 setup_syscalls_segments(ctxt
, &cs
, &ss
);
2775 if (!(efer
& EFER_SCE
))
2776 return emulate_ud(ctxt
);
2778 ops
->get_msr(ctxt
, MSR_STAR
, &msr_data
);
2780 cs_sel
= (u16
)(msr_data
& 0xfffc);
2781 ss_sel
= (u16
)(msr_data
+ 8);
2783 if (efer
& EFER_LMA
) {
2787 ops
->set_segment(ctxt
, cs_sel
, &cs
, 0, VCPU_SREG_CS
);
2788 ops
->set_segment(ctxt
, ss_sel
, &ss
, 0, VCPU_SREG_SS
);
2790 *reg_write(ctxt
, VCPU_REGS_RCX
) = ctxt
->_eip
;
2791 if (efer
& EFER_LMA
) {
2792 #ifdef CONFIG_X86_64
2793 *reg_write(ctxt
, VCPU_REGS_R11
) = ctxt
->eflags
;
2796 ctxt
->mode
== X86EMUL_MODE_PROT64
?
2797 MSR_LSTAR
: MSR_CSTAR
, &msr_data
);
2798 ctxt
->_eip
= msr_data
;
2800 ops
->get_msr(ctxt
, MSR_SYSCALL_MASK
, &msr_data
);
2801 ctxt
->eflags
&= ~msr_data
;
2802 ctxt
->eflags
|= X86_EFLAGS_FIXED
;
2806 ops
->get_msr(ctxt
, MSR_STAR
, &msr_data
);
2807 ctxt
->_eip
= (u32
)msr_data
;
2809 ctxt
->eflags
&= ~(X86_EFLAGS_VM
| X86_EFLAGS_IF
);
2812 ctxt
->tf
= (ctxt
->eflags
& X86_EFLAGS_TF
) != 0;
2813 return X86EMUL_CONTINUE
;
2816 static int em_sysenter(struct x86_emulate_ctxt
*ctxt
)
2818 const struct x86_emulate_ops
*ops
= ctxt
->ops
;
2819 struct desc_struct cs
, ss
;
2824 ops
->get_msr(ctxt
, MSR_EFER
, &efer
);
2825 /* inject #GP if in real mode */
2826 if (ctxt
->mode
== X86EMUL_MODE_REAL
)
2827 return emulate_gp(ctxt
, 0);
2830 * Not recognized on AMD in compat mode (but is recognized in legacy
2833 if ((ctxt
->mode
!= X86EMUL_MODE_PROT64
) && (efer
& EFER_LMA
)
2834 && !vendor_intel(ctxt
))
2835 return emulate_ud(ctxt
);
2837 /* sysenter/sysexit have not been tested in 64bit mode. */
2838 if (ctxt
->mode
== X86EMUL_MODE_PROT64
)
2839 return X86EMUL_UNHANDLEABLE
;
2841 setup_syscalls_segments(ctxt
, &cs
, &ss
);
2843 ops
->get_msr(ctxt
, MSR_IA32_SYSENTER_CS
, &msr_data
);
2844 if ((msr_data
& 0xfffc) == 0x0)
2845 return emulate_gp(ctxt
, 0);
2847 ctxt
->eflags
&= ~(X86_EFLAGS_VM
| X86_EFLAGS_IF
);
2848 cs_sel
= (u16
)msr_data
& ~SEGMENT_RPL_MASK
;
2849 ss_sel
= cs_sel
+ 8;
2850 if (efer
& EFER_LMA
) {
2855 ops
->set_segment(ctxt
, cs_sel
, &cs
, 0, VCPU_SREG_CS
);
2856 ops
->set_segment(ctxt
, ss_sel
, &ss
, 0, VCPU_SREG_SS
);
2858 ops
->get_msr(ctxt
, MSR_IA32_SYSENTER_EIP
, &msr_data
);
2859 ctxt
->_eip
= (efer
& EFER_LMA
) ? msr_data
: (u32
)msr_data
;
2861 ops
->get_msr(ctxt
, MSR_IA32_SYSENTER_ESP
, &msr_data
);
2862 *reg_write(ctxt
, VCPU_REGS_RSP
) = (efer
& EFER_LMA
) ? msr_data
:
2865 return X86EMUL_CONTINUE
;
2868 static int em_sysexit(struct x86_emulate_ctxt
*ctxt
)
2870 const struct x86_emulate_ops
*ops
= ctxt
->ops
;
2871 struct desc_struct cs
, ss
;
2872 u64 msr_data
, rcx
, rdx
;
2874 u16 cs_sel
= 0, ss_sel
= 0;
2876 /* inject #GP if in real mode or Virtual 8086 mode */
2877 if (ctxt
->mode
== X86EMUL_MODE_REAL
||
2878 ctxt
->mode
== X86EMUL_MODE_VM86
)
2879 return emulate_gp(ctxt
, 0);
2881 setup_syscalls_segments(ctxt
, &cs
, &ss
);
2883 if ((ctxt
->rex_prefix
& 0x8) != 0x0)
2884 usermode
= X86EMUL_MODE_PROT64
;
2886 usermode
= X86EMUL_MODE_PROT32
;
2888 rcx
= reg_read(ctxt
, VCPU_REGS_RCX
);
2889 rdx
= reg_read(ctxt
, VCPU_REGS_RDX
);
2893 ops
->get_msr(ctxt
, MSR_IA32_SYSENTER_CS
, &msr_data
);
2895 case X86EMUL_MODE_PROT32
:
2896 cs_sel
= (u16
)(msr_data
+ 16);
2897 if ((msr_data
& 0xfffc) == 0x0)
2898 return emulate_gp(ctxt
, 0);
2899 ss_sel
= (u16
)(msr_data
+ 24);
2903 case X86EMUL_MODE_PROT64
:
2904 cs_sel
= (u16
)(msr_data
+ 32);
2905 if (msr_data
== 0x0)
2906 return emulate_gp(ctxt
, 0);
2907 ss_sel
= cs_sel
+ 8;
2910 if (emul_is_noncanonical_address(rcx
, ctxt
) ||
2911 emul_is_noncanonical_address(rdx
, ctxt
))
2912 return emulate_gp(ctxt
, 0);
2915 cs_sel
|= SEGMENT_RPL_MASK
;
2916 ss_sel
|= SEGMENT_RPL_MASK
;
2918 ops
->set_segment(ctxt
, cs_sel
, &cs
, 0, VCPU_SREG_CS
);
2919 ops
->set_segment(ctxt
, ss_sel
, &ss
, 0, VCPU_SREG_SS
);
2922 *reg_write(ctxt
, VCPU_REGS_RSP
) = rcx
;
2924 return X86EMUL_CONTINUE
;
2927 static bool emulator_bad_iopl(struct x86_emulate_ctxt
*ctxt
)
2930 if (ctxt
->mode
== X86EMUL_MODE_REAL
)
2932 if (ctxt
->mode
== X86EMUL_MODE_VM86
)
2934 iopl
= (ctxt
->eflags
& X86_EFLAGS_IOPL
) >> X86_EFLAGS_IOPL_BIT
;
2935 return ctxt
->ops
->cpl(ctxt
) > iopl
;
2938 #define VMWARE_PORT_VMPORT (0x5658)
2939 #define VMWARE_PORT_VMRPC (0x5659)
2941 static bool emulator_io_port_access_allowed(struct x86_emulate_ctxt
*ctxt
,
2944 const struct x86_emulate_ops
*ops
= ctxt
->ops
;
2945 struct desc_struct tr_seg
;
2948 u16 tr
, io_bitmap_ptr
, perm
, bit_idx
= port
& 0x7;
2949 unsigned mask
= (1 << len
) - 1;
2953 * VMware allows access to these ports even if denied
2954 * by TSS I/O permission bitmap. Mimic behavior.
2956 if (enable_vmware_backdoor
&&
2957 ((port
== VMWARE_PORT_VMPORT
) || (port
== VMWARE_PORT_VMRPC
)))
2960 ops
->get_segment(ctxt
, &tr
, &tr_seg
, &base3
, VCPU_SREG_TR
);
2963 if (desc_limit_scaled(&tr_seg
) < 103)
2965 base
= get_desc_base(&tr_seg
);
2966 #ifdef CONFIG_X86_64
2967 base
|= ((u64
)base3
) << 32;
2969 r
= ops
->read_std(ctxt
, base
+ 102, &io_bitmap_ptr
, 2, NULL
, true);
2970 if (r
!= X86EMUL_CONTINUE
)
2972 if (io_bitmap_ptr
+ port
/8 > desc_limit_scaled(&tr_seg
))
2974 r
= ops
->read_std(ctxt
, base
+ io_bitmap_ptr
+ port
/8, &perm
, 2, NULL
, true);
2975 if (r
!= X86EMUL_CONTINUE
)
2977 if ((perm
>> bit_idx
) & mask
)
2982 static bool emulator_io_permited(struct x86_emulate_ctxt
*ctxt
,
2988 if (emulator_bad_iopl(ctxt
))
2989 if (!emulator_io_port_access_allowed(ctxt
, port
, len
))
2992 ctxt
->perm_ok
= true;
2997 static void string_registers_quirk(struct x86_emulate_ctxt
*ctxt
)
3000 * Intel CPUs mask the counter and pointers in quite strange
3001 * manner when ECX is zero due to REP-string optimizations.
3003 #ifdef CONFIG_X86_64
3004 if (ctxt
->ad_bytes
!= 4 || !vendor_intel(ctxt
))
3007 *reg_write(ctxt
, VCPU_REGS_RCX
) = 0;
3010 case 0xa4: /* movsb */
3011 case 0xa5: /* movsd/w */
3012 *reg_rmw(ctxt
, VCPU_REGS_RSI
) &= (u32
)-1;
3014 case 0xaa: /* stosb */
3015 case 0xab: /* stosd/w */
3016 *reg_rmw(ctxt
, VCPU_REGS_RDI
) &= (u32
)-1;
3021 static void save_state_to_tss16(struct x86_emulate_ctxt
*ctxt
,
3022 struct tss_segment_16
*tss
)
3024 tss
->ip
= ctxt
->_eip
;
3025 tss
->flag
= ctxt
->eflags
;
3026 tss
->ax
= reg_read(ctxt
, VCPU_REGS_RAX
);
3027 tss
->cx
= reg_read(ctxt
, VCPU_REGS_RCX
);
3028 tss
->dx
= reg_read(ctxt
, VCPU_REGS_RDX
);
3029 tss
->bx
= reg_read(ctxt
, VCPU_REGS_RBX
);
3030 tss
->sp
= reg_read(ctxt
, VCPU_REGS_RSP
);
3031 tss
->bp
= reg_read(ctxt
, VCPU_REGS_RBP
);
3032 tss
->si
= reg_read(ctxt
, VCPU_REGS_RSI
);
3033 tss
->di
= reg_read(ctxt
, VCPU_REGS_RDI
);
3035 tss
->es
= get_segment_selector(ctxt
, VCPU_SREG_ES
);
3036 tss
->cs
= get_segment_selector(ctxt
, VCPU_SREG_CS
);
3037 tss
->ss
= get_segment_selector(ctxt
, VCPU_SREG_SS
);
3038 tss
->ds
= get_segment_selector(ctxt
, VCPU_SREG_DS
);
3039 tss
->ldt
= get_segment_selector(ctxt
, VCPU_SREG_LDTR
);
3042 static int load_state_from_tss16(struct x86_emulate_ctxt
*ctxt
,
3043 struct tss_segment_16
*tss
)
3048 ctxt
->_eip
= tss
->ip
;
3049 ctxt
->eflags
= tss
->flag
| 2;
3050 *reg_write(ctxt
, VCPU_REGS_RAX
) = tss
->ax
;
3051 *reg_write(ctxt
, VCPU_REGS_RCX
) = tss
->cx
;
3052 *reg_write(ctxt
, VCPU_REGS_RDX
) = tss
->dx
;
3053 *reg_write(ctxt
, VCPU_REGS_RBX
) = tss
->bx
;
3054 *reg_write(ctxt
, VCPU_REGS_RSP
) = tss
->sp
;
3055 *reg_write(ctxt
, VCPU_REGS_RBP
) = tss
->bp
;
3056 *reg_write(ctxt
, VCPU_REGS_RSI
) = tss
->si
;
3057 *reg_write(ctxt
, VCPU_REGS_RDI
) = tss
->di
;
3060 * SDM says that segment selectors are loaded before segment
3063 set_segment_selector(ctxt
, tss
->ldt
, VCPU_SREG_LDTR
);
3064 set_segment_selector(ctxt
, tss
->es
, VCPU_SREG_ES
);
3065 set_segment_selector(ctxt
, tss
->cs
, VCPU_SREG_CS
);
3066 set_segment_selector(ctxt
, tss
->ss
, VCPU_SREG_SS
);
3067 set_segment_selector(ctxt
, tss
->ds
, VCPU_SREG_DS
);
3072 * Now load segment descriptors. If fault happens at this stage
3073 * it is handled in a context of new task
3075 ret
= __load_segment_descriptor(ctxt
, tss
->ldt
, VCPU_SREG_LDTR
, cpl
,
3076 X86_TRANSFER_TASK_SWITCH
, NULL
);
3077 if (ret
!= X86EMUL_CONTINUE
)
3079 ret
= __load_segment_descriptor(ctxt
, tss
->es
, VCPU_SREG_ES
, cpl
,
3080 X86_TRANSFER_TASK_SWITCH
, NULL
);
3081 if (ret
!= X86EMUL_CONTINUE
)
3083 ret
= __load_segment_descriptor(ctxt
, tss
->cs
, VCPU_SREG_CS
, cpl
,
3084 X86_TRANSFER_TASK_SWITCH
, NULL
);
3085 if (ret
!= X86EMUL_CONTINUE
)
3087 ret
= __load_segment_descriptor(ctxt
, tss
->ss
, VCPU_SREG_SS
, cpl
,
3088 X86_TRANSFER_TASK_SWITCH
, NULL
);
3089 if (ret
!= X86EMUL_CONTINUE
)
3091 ret
= __load_segment_descriptor(ctxt
, tss
->ds
, VCPU_SREG_DS
, cpl
,
3092 X86_TRANSFER_TASK_SWITCH
, NULL
);
3093 if (ret
!= X86EMUL_CONTINUE
)
3096 return X86EMUL_CONTINUE
;
3099 static int task_switch_16(struct x86_emulate_ctxt
*ctxt
,
3100 u16 tss_selector
, u16 old_tss_sel
,
3101 ulong old_tss_base
, struct desc_struct
*new_desc
)
3103 struct tss_segment_16 tss_seg
;
3105 u32 new_tss_base
= get_desc_base(new_desc
);
3107 ret
= linear_read_system(ctxt
, old_tss_base
, &tss_seg
, sizeof(tss_seg
));
3108 if (ret
!= X86EMUL_CONTINUE
)
3111 save_state_to_tss16(ctxt
, &tss_seg
);
3113 ret
= linear_write_system(ctxt
, old_tss_base
, &tss_seg
, sizeof(tss_seg
));
3114 if (ret
!= X86EMUL_CONTINUE
)
3117 ret
= linear_read_system(ctxt
, new_tss_base
, &tss_seg
, sizeof(tss_seg
));
3118 if (ret
!= X86EMUL_CONTINUE
)
3121 if (old_tss_sel
!= 0xffff) {
3122 tss_seg
.prev_task_link
= old_tss_sel
;
3124 ret
= linear_write_system(ctxt
, new_tss_base
,
3125 &tss_seg
.prev_task_link
,
3126 sizeof(tss_seg
.prev_task_link
));
3127 if (ret
!= X86EMUL_CONTINUE
)
3131 return load_state_from_tss16(ctxt
, &tss_seg
);
3134 static void save_state_to_tss32(struct x86_emulate_ctxt
*ctxt
,
3135 struct tss_segment_32
*tss
)
3137 /* CR3 and ldt selector are not saved intentionally */
3138 tss
->eip
= ctxt
->_eip
;
3139 tss
->eflags
= ctxt
->eflags
;
3140 tss
->eax
= reg_read(ctxt
, VCPU_REGS_RAX
);
3141 tss
->ecx
= reg_read(ctxt
, VCPU_REGS_RCX
);
3142 tss
->edx
= reg_read(ctxt
, VCPU_REGS_RDX
);
3143 tss
->ebx
= reg_read(ctxt
, VCPU_REGS_RBX
);
3144 tss
->esp
= reg_read(ctxt
, VCPU_REGS_RSP
);
3145 tss
->ebp
= reg_read(ctxt
, VCPU_REGS_RBP
);
3146 tss
->esi
= reg_read(ctxt
, VCPU_REGS_RSI
);
3147 tss
->edi
= reg_read(ctxt
, VCPU_REGS_RDI
);
3149 tss
->es
= get_segment_selector(ctxt
, VCPU_SREG_ES
);
3150 tss
->cs
= get_segment_selector(ctxt
, VCPU_SREG_CS
);
3151 tss
->ss
= get_segment_selector(ctxt
, VCPU_SREG_SS
);
3152 tss
->ds
= get_segment_selector(ctxt
, VCPU_SREG_DS
);
3153 tss
->fs
= get_segment_selector(ctxt
, VCPU_SREG_FS
);
3154 tss
->gs
= get_segment_selector(ctxt
, VCPU_SREG_GS
);
3157 static int load_state_from_tss32(struct x86_emulate_ctxt
*ctxt
,
3158 struct tss_segment_32
*tss
)
3163 if (ctxt
->ops
->set_cr(ctxt
, 3, tss
->cr3
))
3164 return emulate_gp(ctxt
, 0);
3165 ctxt
->_eip
= tss
->eip
;
3166 ctxt
->eflags
= tss
->eflags
| 2;
3168 /* General purpose registers */
3169 *reg_write(ctxt
, VCPU_REGS_RAX
) = tss
->eax
;
3170 *reg_write(ctxt
, VCPU_REGS_RCX
) = tss
->ecx
;
3171 *reg_write(ctxt
, VCPU_REGS_RDX
) = tss
->edx
;
3172 *reg_write(ctxt
, VCPU_REGS_RBX
) = tss
->ebx
;
3173 *reg_write(ctxt
, VCPU_REGS_RSP
) = tss
->esp
;
3174 *reg_write(ctxt
, VCPU_REGS_RBP
) = tss
->ebp
;
3175 *reg_write(ctxt
, VCPU_REGS_RSI
) = tss
->esi
;
3176 *reg_write(ctxt
, VCPU_REGS_RDI
) = tss
->edi
;
3179 * SDM says that segment selectors are loaded before segment
3180 * descriptors. This is important because CPL checks will
3183 set_segment_selector(ctxt
, tss
->ldt_selector
, VCPU_SREG_LDTR
);
3184 set_segment_selector(ctxt
, tss
->es
, VCPU_SREG_ES
);
3185 set_segment_selector(ctxt
, tss
->cs
, VCPU_SREG_CS
);
3186 set_segment_selector(ctxt
, tss
->ss
, VCPU_SREG_SS
);
3187 set_segment_selector(ctxt
, tss
->ds
, VCPU_SREG_DS
);
3188 set_segment_selector(ctxt
, tss
->fs
, VCPU_SREG_FS
);
3189 set_segment_selector(ctxt
, tss
->gs
, VCPU_SREG_GS
);
3192 * If we're switching between Protected Mode and VM86, we need to make
3193 * sure to update the mode before loading the segment descriptors so
3194 * that the selectors are interpreted correctly.
3196 if (ctxt
->eflags
& X86_EFLAGS_VM
) {
3197 ctxt
->mode
= X86EMUL_MODE_VM86
;
3200 ctxt
->mode
= X86EMUL_MODE_PROT32
;
3205 * Now load segment descriptors. If fault happenes at this stage
3206 * it is handled in a context of new task
3208 ret
= __load_segment_descriptor(ctxt
, tss
->ldt_selector
, VCPU_SREG_LDTR
,
3209 cpl
, X86_TRANSFER_TASK_SWITCH
, NULL
);
3210 if (ret
!= X86EMUL_CONTINUE
)
3212 ret
= __load_segment_descriptor(ctxt
, tss
->es
, VCPU_SREG_ES
, cpl
,
3213 X86_TRANSFER_TASK_SWITCH
, NULL
);
3214 if (ret
!= X86EMUL_CONTINUE
)
3216 ret
= __load_segment_descriptor(ctxt
, tss
->cs
, VCPU_SREG_CS
, cpl
,
3217 X86_TRANSFER_TASK_SWITCH
, NULL
);
3218 if (ret
!= X86EMUL_CONTINUE
)
3220 ret
= __load_segment_descriptor(ctxt
, tss
->ss
, VCPU_SREG_SS
, cpl
,
3221 X86_TRANSFER_TASK_SWITCH
, NULL
);
3222 if (ret
!= X86EMUL_CONTINUE
)
3224 ret
= __load_segment_descriptor(ctxt
, tss
->ds
, VCPU_SREG_DS
, cpl
,
3225 X86_TRANSFER_TASK_SWITCH
, NULL
);
3226 if (ret
!= X86EMUL_CONTINUE
)
3228 ret
= __load_segment_descriptor(ctxt
, tss
->fs
, VCPU_SREG_FS
, cpl
,
3229 X86_TRANSFER_TASK_SWITCH
, NULL
);
3230 if (ret
!= X86EMUL_CONTINUE
)
3232 ret
= __load_segment_descriptor(ctxt
, tss
->gs
, VCPU_SREG_GS
, cpl
,
3233 X86_TRANSFER_TASK_SWITCH
, NULL
);
3238 static int task_switch_32(struct x86_emulate_ctxt
*ctxt
,
3239 u16 tss_selector
, u16 old_tss_sel
,
3240 ulong old_tss_base
, struct desc_struct
*new_desc
)
3242 struct tss_segment_32 tss_seg
;
3244 u32 new_tss_base
= get_desc_base(new_desc
);
3245 u32 eip_offset
= offsetof(struct tss_segment_32
, eip
);
3246 u32 ldt_sel_offset
= offsetof(struct tss_segment_32
, ldt_selector
);
3248 ret
= linear_read_system(ctxt
, old_tss_base
, &tss_seg
, sizeof(tss_seg
));
3249 if (ret
!= X86EMUL_CONTINUE
)
3252 save_state_to_tss32(ctxt
, &tss_seg
);
3254 /* Only GP registers and segment selectors are saved */
3255 ret
= linear_write_system(ctxt
, old_tss_base
+ eip_offset
, &tss_seg
.eip
,
3256 ldt_sel_offset
- eip_offset
);
3257 if (ret
!= X86EMUL_CONTINUE
)
3260 ret
= linear_read_system(ctxt
, new_tss_base
, &tss_seg
, sizeof(tss_seg
));
3261 if (ret
!= X86EMUL_CONTINUE
)
3264 if (old_tss_sel
!= 0xffff) {
3265 tss_seg
.prev_task_link
= old_tss_sel
;
3267 ret
= linear_write_system(ctxt
, new_tss_base
,
3268 &tss_seg
.prev_task_link
,
3269 sizeof(tss_seg
.prev_task_link
));
3270 if (ret
!= X86EMUL_CONTINUE
)
3274 return load_state_from_tss32(ctxt
, &tss_seg
);
3277 static int emulator_do_task_switch(struct x86_emulate_ctxt
*ctxt
,
3278 u16 tss_selector
, int idt_index
, int reason
,
3279 bool has_error_code
, u32 error_code
)
3281 const struct x86_emulate_ops
*ops
= ctxt
->ops
;
3282 struct desc_struct curr_tss_desc
, next_tss_desc
;
3284 u16 old_tss_sel
= get_segment_selector(ctxt
, VCPU_SREG_TR
);
3285 ulong old_tss_base
=
3286 ops
->get_cached_segment_base(ctxt
, VCPU_SREG_TR
);
3288 ulong desc_addr
, dr7
;
3290 /* FIXME: old_tss_base == ~0 ? */
3292 ret
= read_segment_descriptor(ctxt
, tss_selector
, &next_tss_desc
, &desc_addr
);
3293 if (ret
!= X86EMUL_CONTINUE
)
3295 ret
= read_segment_descriptor(ctxt
, old_tss_sel
, &curr_tss_desc
, &desc_addr
);
3296 if (ret
!= X86EMUL_CONTINUE
)
3299 /* FIXME: check that next_tss_desc is tss */
3302 * Check privileges. The three cases are task switch caused by...
3304 * 1. jmp/call/int to task gate: Check against DPL of the task gate
3305 * 2. Exception/IRQ/iret: No check is performed
3306 * 3. jmp/call to TSS/task-gate: No check is performed since the
3307 * hardware checks it before exiting.
3309 if (reason
== TASK_SWITCH_GATE
) {
3310 if (idt_index
!= -1) {
3311 /* Software interrupts */
3312 struct desc_struct task_gate_desc
;
3315 ret
= read_interrupt_descriptor(ctxt
, idt_index
,
3317 if (ret
!= X86EMUL_CONTINUE
)
3320 dpl
= task_gate_desc
.dpl
;
3321 if ((tss_selector
& 3) > dpl
|| ops
->cpl(ctxt
) > dpl
)
3322 return emulate_gp(ctxt
, (idt_index
<< 3) | 0x2);
3326 desc_limit
= desc_limit_scaled(&next_tss_desc
);
3327 if (!next_tss_desc
.p
||
3328 ((desc_limit
< 0x67 && (next_tss_desc
.type
& 8)) ||
3329 desc_limit
< 0x2b)) {
3330 return emulate_ts(ctxt
, tss_selector
& 0xfffc);
3333 if (reason
== TASK_SWITCH_IRET
|| reason
== TASK_SWITCH_JMP
) {
3334 curr_tss_desc
.type
&= ~(1 << 1); /* clear busy flag */
3335 write_segment_descriptor(ctxt
, old_tss_sel
, &curr_tss_desc
);
3338 if (reason
== TASK_SWITCH_IRET
)
3339 ctxt
->eflags
= ctxt
->eflags
& ~X86_EFLAGS_NT
;
3341 /* set back link to prev task only if NT bit is set in eflags
3342 note that old_tss_sel is not used after this point */
3343 if (reason
!= TASK_SWITCH_CALL
&& reason
!= TASK_SWITCH_GATE
)
3344 old_tss_sel
= 0xffff;
3346 if (next_tss_desc
.type
& 8)
3347 ret
= task_switch_32(ctxt
, tss_selector
, old_tss_sel
,
3348 old_tss_base
, &next_tss_desc
);
3350 ret
= task_switch_16(ctxt
, tss_selector
, old_tss_sel
,
3351 old_tss_base
, &next_tss_desc
);
3352 if (ret
!= X86EMUL_CONTINUE
)
3355 if (reason
== TASK_SWITCH_CALL
|| reason
== TASK_SWITCH_GATE
)
3356 ctxt
->eflags
= ctxt
->eflags
| X86_EFLAGS_NT
;
3358 if (reason
!= TASK_SWITCH_IRET
) {
3359 next_tss_desc
.type
|= (1 << 1); /* set busy flag */
3360 write_segment_descriptor(ctxt
, tss_selector
, &next_tss_desc
);
3363 ops
->set_cr(ctxt
, 0, ops
->get_cr(ctxt
, 0) | X86_CR0_TS
);
3364 ops
->set_segment(ctxt
, tss_selector
, &next_tss_desc
, 0, VCPU_SREG_TR
);
3366 if (has_error_code
) {
3367 ctxt
->op_bytes
= ctxt
->ad_bytes
= (next_tss_desc
.type
& 8) ? 4 : 2;
3368 ctxt
->lock_prefix
= 0;
3369 ctxt
->src
.val
= (unsigned long) error_code
;
3370 ret
= em_push(ctxt
);
3373 ops
->get_dr(ctxt
, 7, &dr7
);
3374 ops
->set_dr(ctxt
, 7, dr7
& ~(DR_LOCAL_ENABLE_MASK
| DR_LOCAL_SLOWDOWN
));
3379 int emulator_task_switch(struct x86_emulate_ctxt
*ctxt
,
3380 u16 tss_selector
, int idt_index
, int reason
,
3381 bool has_error_code
, u32 error_code
)
3385 invalidate_registers(ctxt
);
3386 ctxt
->_eip
= ctxt
->eip
;
3387 ctxt
->dst
.type
= OP_NONE
;
3389 rc
= emulator_do_task_switch(ctxt
, tss_selector
, idt_index
, reason
,
3390 has_error_code
, error_code
);
3392 if (rc
== X86EMUL_CONTINUE
) {
3393 ctxt
->eip
= ctxt
->_eip
;
3394 writeback_registers(ctxt
);
3397 return (rc
== X86EMUL_UNHANDLEABLE
) ? EMULATION_FAILED
: EMULATION_OK
;
3400 static void string_addr_inc(struct x86_emulate_ctxt
*ctxt
, int reg
,
3403 int df
= (ctxt
->eflags
& X86_EFLAGS_DF
) ? -op
->count
: op
->count
;
3405 register_address_increment(ctxt
, reg
, df
* op
->bytes
);
3406 op
->addr
.mem
.ea
= register_address(ctxt
, reg
);
3409 static int em_das(struct x86_emulate_ctxt
*ctxt
)
3412 bool af
, cf
, old_cf
;
3414 cf
= ctxt
->eflags
& X86_EFLAGS_CF
;
3420 af
= ctxt
->eflags
& X86_EFLAGS_AF
;
3421 if ((al
& 0x0f) > 9 || af
) {
3423 cf
= old_cf
| (al
>= 250);
3428 if (old_al
> 0x99 || old_cf
) {
3434 /* Set PF, ZF, SF */
3435 ctxt
->src
.type
= OP_IMM
;
3437 ctxt
->src
.bytes
= 1;
3438 fastop(ctxt
, em_or
);
3439 ctxt
->eflags
&= ~(X86_EFLAGS_AF
| X86_EFLAGS_CF
);
3441 ctxt
->eflags
|= X86_EFLAGS_CF
;
3443 ctxt
->eflags
|= X86_EFLAGS_AF
;
3444 return X86EMUL_CONTINUE
;
3447 static int em_aam(struct x86_emulate_ctxt
*ctxt
)
3451 if (ctxt
->src
.val
== 0)
3452 return emulate_de(ctxt
);
3454 al
= ctxt
->dst
.val
& 0xff;
3455 ah
= al
/ ctxt
->src
.val
;
3456 al
%= ctxt
->src
.val
;
3458 ctxt
->dst
.val
= (ctxt
->dst
.val
& 0xffff0000) | al
| (ah
<< 8);
3460 /* Set PF, ZF, SF */
3461 ctxt
->src
.type
= OP_IMM
;
3463 ctxt
->src
.bytes
= 1;
3464 fastop(ctxt
, em_or
);
3466 return X86EMUL_CONTINUE
;
3469 static int em_aad(struct x86_emulate_ctxt
*ctxt
)
3471 u8 al
= ctxt
->dst
.val
& 0xff;
3472 u8 ah
= (ctxt
->dst
.val
>> 8) & 0xff;
3474 al
= (al
+ (ah
* ctxt
->src
.val
)) & 0xff;
3476 ctxt
->dst
.val
= (ctxt
->dst
.val
& 0xffff0000) | al
;
3478 /* Set PF, ZF, SF */
3479 ctxt
->src
.type
= OP_IMM
;
3481 ctxt
->src
.bytes
= 1;
3482 fastop(ctxt
, em_or
);
3484 return X86EMUL_CONTINUE
;
3487 static int em_call(struct x86_emulate_ctxt
*ctxt
)
3490 long rel
= ctxt
->src
.val
;
3492 ctxt
->src
.val
= (unsigned long)ctxt
->_eip
;
3493 rc
= jmp_rel(ctxt
, rel
);
3494 if (rc
!= X86EMUL_CONTINUE
)
3496 return em_push(ctxt
);
3499 static int em_call_far(struct x86_emulate_ctxt
*ctxt
)
3504 struct desc_struct old_desc
, new_desc
;
3505 const struct x86_emulate_ops
*ops
= ctxt
->ops
;
3506 int cpl
= ctxt
->ops
->cpl(ctxt
);
3507 enum x86emul_mode prev_mode
= ctxt
->mode
;
3509 old_eip
= ctxt
->_eip
;
3510 ops
->get_segment(ctxt
, &old_cs
, &old_desc
, NULL
, VCPU_SREG_CS
);
3512 memcpy(&sel
, ctxt
->src
.valptr
+ ctxt
->op_bytes
, 2);
3513 rc
= __load_segment_descriptor(ctxt
, sel
, VCPU_SREG_CS
, cpl
,
3514 X86_TRANSFER_CALL_JMP
, &new_desc
);
3515 if (rc
!= X86EMUL_CONTINUE
)
3518 rc
= assign_eip_far(ctxt
, ctxt
->src
.val
, &new_desc
);
3519 if (rc
!= X86EMUL_CONTINUE
)
3522 ctxt
->src
.val
= old_cs
;
3524 if (rc
!= X86EMUL_CONTINUE
)
3527 ctxt
->src
.val
= old_eip
;
3529 /* If we failed, we tainted the memory, but the very least we should
3531 if (rc
!= X86EMUL_CONTINUE
) {
3532 pr_warn_once("faulting far call emulation tainted memory\n");
3537 ops
->set_segment(ctxt
, old_cs
, &old_desc
, 0, VCPU_SREG_CS
);
3538 ctxt
->mode
= prev_mode
;
3543 static int em_ret_near_imm(struct x86_emulate_ctxt
*ctxt
)
3548 rc
= emulate_pop(ctxt
, &eip
, ctxt
->op_bytes
);
3549 if (rc
!= X86EMUL_CONTINUE
)
3551 rc
= assign_eip_near(ctxt
, eip
);
3552 if (rc
!= X86EMUL_CONTINUE
)
3554 rsp_increment(ctxt
, ctxt
->src
.val
);
3555 return X86EMUL_CONTINUE
;
3558 static int em_xchg(struct x86_emulate_ctxt
*ctxt
)
3560 /* Write back the register source. */
3561 ctxt
->src
.val
= ctxt
->dst
.val
;
3562 write_register_operand(&ctxt
->src
);
3564 /* Write back the memory destination with implicit LOCK prefix. */
3565 ctxt
->dst
.val
= ctxt
->src
.orig_val
;
3566 ctxt
->lock_prefix
= 1;
3567 return X86EMUL_CONTINUE
;
3570 static int em_imul_3op(struct x86_emulate_ctxt
*ctxt
)
3572 ctxt
->dst
.val
= ctxt
->src2
.val
;
3573 return fastop(ctxt
, em_imul
);
3576 static int em_cwd(struct x86_emulate_ctxt
*ctxt
)
3578 ctxt
->dst
.type
= OP_REG
;
3579 ctxt
->dst
.bytes
= ctxt
->src
.bytes
;
3580 ctxt
->dst
.addr
.reg
= reg_rmw(ctxt
, VCPU_REGS_RDX
);
3581 ctxt
->dst
.val
= ~((ctxt
->src
.val
>> (ctxt
->src
.bytes
* 8 - 1)) - 1);
3583 return X86EMUL_CONTINUE
;
3586 static int em_rdpid(struct x86_emulate_ctxt
*ctxt
)
3590 if (ctxt
->ops
->get_msr(ctxt
, MSR_TSC_AUX
, &tsc_aux
))
3591 return emulate_gp(ctxt
, 0);
3592 ctxt
->dst
.val
= tsc_aux
;
3593 return X86EMUL_CONTINUE
;
3596 static int em_rdtsc(struct x86_emulate_ctxt
*ctxt
)
3600 ctxt
->ops
->get_msr(ctxt
, MSR_IA32_TSC
, &tsc
);
3601 *reg_write(ctxt
, VCPU_REGS_RAX
) = (u32
)tsc
;
3602 *reg_write(ctxt
, VCPU_REGS_RDX
) = tsc
>> 32;
3603 return X86EMUL_CONTINUE
;
3606 static int em_rdpmc(struct x86_emulate_ctxt
*ctxt
)
3610 if (ctxt
->ops
->read_pmc(ctxt
, reg_read(ctxt
, VCPU_REGS_RCX
), &pmc
))
3611 return emulate_gp(ctxt
, 0);
3612 *reg_write(ctxt
, VCPU_REGS_RAX
) = (u32
)pmc
;
3613 *reg_write(ctxt
, VCPU_REGS_RDX
) = pmc
>> 32;
3614 return X86EMUL_CONTINUE
;
3617 static int em_mov(struct x86_emulate_ctxt
*ctxt
)
3619 memcpy(ctxt
->dst
.valptr
, ctxt
->src
.valptr
, sizeof(ctxt
->src
.valptr
));
3620 return X86EMUL_CONTINUE
;
3623 #define FFL(x) bit(X86_FEATURE_##x)
3625 static int em_movbe(struct x86_emulate_ctxt
*ctxt
)
3627 u32 ebx
, ecx
, edx
, eax
= 1;
3631 * Check MOVBE is set in the guest-visible CPUID leaf.
3633 ctxt
->ops
->get_cpuid(ctxt
, &eax
, &ebx
, &ecx
, &edx
, false);
3634 if (!(ecx
& FFL(MOVBE
)))
3635 return emulate_ud(ctxt
);
3637 switch (ctxt
->op_bytes
) {
3640 * From MOVBE definition: "...When the operand size is 16 bits,
3641 * the upper word of the destination register remains unchanged
3644 * Both casting ->valptr and ->val to u16 breaks strict aliasing
3645 * rules so we have to do the operation almost per hand.
3647 tmp
= (u16
)ctxt
->src
.val
;
3648 ctxt
->dst
.val
&= ~0xffffUL
;
3649 ctxt
->dst
.val
|= (unsigned long)swab16(tmp
);
3652 ctxt
->dst
.val
= swab32((u32
)ctxt
->src
.val
);
3655 ctxt
->dst
.val
= swab64(ctxt
->src
.val
);
3660 return X86EMUL_CONTINUE
;
3663 static int em_cr_write(struct x86_emulate_ctxt
*ctxt
)
3665 if (ctxt
->ops
->set_cr(ctxt
, ctxt
->modrm_reg
, ctxt
->src
.val
))
3666 return emulate_gp(ctxt
, 0);
3668 /* Disable writeback. */
3669 ctxt
->dst
.type
= OP_NONE
;
3670 return X86EMUL_CONTINUE
;
3673 static int em_dr_write(struct x86_emulate_ctxt
*ctxt
)
3677 if (ctxt
->mode
== X86EMUL_MODE_PROT64
)
3678 val
= ctxt
->src
.val
& ~0ULL;
3680 val
= ctxt
->src
.val
& ~0U;
3682 /* #UD condition is already handled. */
3683 if (ctxt
->ops
->set_dr(ctxt
, ctxt
->modrm_reg
, val
) < 0)
3684 return emulate_gp(ctxt
, 0);
3686 /* Disable writeback. */
3687 ctxt
->dst
.type
= OP_NONE
;
3688 return X86EMUL_CONTINUE
;
3691 static int em_wrmsr(struct x86_emulate_ctxt
*ctxt
)
3695 msr_data
= (u32
)reg_read(ctxt
, VCPU_REGS_RAX
)
3696 | ((u64
)reg_read(ctxt
, VCPU_REGS_RDX
) << 32);
3697 if (ctxt
->ops
->set_msr(ctxt
, reg_read(ctxt
, VCPU_REGS_RCX
), msr_data
))
3698 return emulate_gp(ctxt
, 0);
3700 return X86EMUL_CONTINUE
;
3703 static int em_rdmsr(struct x86_emulate_ctxt
*ctxt
)
3707 if (ctxt
->ops
->get_msr(ctxt
, reg_read(ctxt
, VCPU_REGS_RCX
), &msr_data
))
3708 return emulate_gp(ctxt
, 0);
3710 *reg_write(ctxt
, VCPU_REGS_RAX
) = (u32
)msr_data
;
3711 *reg_write(ctxt
, VCPU_REGS_RDX
) = msr_data
>> 32;
3712 return X86EMUL_CONTINUE
;
3715 static int em_store_sreg(struct x86_emulate_ctxt
*ctxt
, int segment
)
3717 if (segment
> VCPU_SREG_GS
&&
3718 (ctxt
->ops
->get_cr(ctxt
, 4) & X86_CR4_UMIP
) &&
3719 ctxt
->ops
->cpl(ctxt
) > 0)
3720 return emulate_gp(ctxt
, 0);
3722 ctxt
->dst
.val
= get_segment_selector(ctxt
, segment
);
3723 if (ctxt
->dst
.bytes
== 4 && ctxt
->dst
.type
== OP_MEM
)
3724 ctxt
->dst
.bytes
= 2;
3725 return X86EMUL_CONTINUE
;
3728 static int em_mov_rm_sreg(struct x86_emulate_ctxt
*ctxt
)
3730 if (ctxt
->modrm_reg
> VCPU_SREG_GS
)
3731 return emulate_ud(ctxt
);
3733 return em_store_sreg(ctxt
, ctxt
->modrm_reg
);
3736 static int em_mov_sreg_rm(struct x86_emulate_ctxt
*ctxt
)
3738 u16 sel
= ctxt
->src
.val
;
3740 if (ctxt
->modrm_reg
== VCPU_SREG_CS
|| ctxt
->modrm_reg
> VCPU_SREG_GS
)
3741 return emulate_ud(ctxt
);
3743 if (ctxt
->modrm_reg
== VCPU_SREG_SS
)
3744 ctxt
->interruptibility
= KVM_X86_SHADOW_INT_MOV_SS
;
3746 /* Disable writeback. */
3747 ctxt
->dst
.type
= OP_NONE
;
3748 return load_segment_descriptor(ctxt
, sel
, ctxt
->modrm_reg
);
3751 static int em_sldt(struct x86_emulate_ctxt
*ctxt
)
3753 return em_store_sreg(ctxt
, VCPU_SREG_LDTR
);
3756 static int em_lldt(struct x86_emulate_ctxt
*ctxt
)
3758 u16 sel
= ctxt
->src
.val
;
3760 /* Disable writeback. */
3761 ctxt
->dst
.type
= OP_NONE
;
3762 return load_segment_descriptor(ctxt
, sel
, VCPU_SREG_LDTR
);
3765 static int em_str(struct x86_emulate_ctxt
*ctxt
)
3767 return em_store_sreg(ctxt
, VCPU_SREG_TR
);
3770 static int em_ltr(struct x86_emulate_ctxt
*ctxt
)
3772 u16 sel
= ctxt
->src
.val
;
3774 /* Disable writeback. */
3775 ctxt
->dst
.type
= OP_NONE
;
3776 return load_segment_descriptor(ctxt
, sel
, VCPU_SREG_TR
);
3779 static int em_invlpg(struct x86_emulate_ctxt
*ctxt
)
3784 rc
= linearize(ctxt
, ctxt
->src
.addr
.mem
, 1, false, &linear
);
3785 if (rc
== X86EMUL_CONTINUE
)
3786 ctxt
->ops
->invlpg(ctxt
, linear
);
3787 /* Disable writeback. */
3788 ctxt
->dst
.type
= OP_NONE
;
3789 return X86EMUL_CONTINUE
;
3792 static int em_clts(struct x86_emulate_ctxt
*ctxt
)
3796 cr0
= ctxt
->ops
->get_cr(ctxt
, 0);
3798 ctxt
->ops
->set_cr(ctxt
, 0, cr0
);
3799 return X86EMUL_CONTINUE
;
3802 static int em_hypercall(struct x86_emulate_ctxt
*ctxt
)
3804 int rc
= ctxt
->ops
->fix_hypercall(ctxt
);
3806 if (rc
!= X86EMUL_CONTINUE
)
3809 /* Let the processor re-execute the fixed hypercall */
3810 ctxt
->_eip
= ctxt
->eip
;
3811 /* Disable writeback. */
3812 ctxt
->dst
.type
= OP_NONE
;
3813 return X86EMUL_CONTINUE
;
3816 static int emulate_store_desc_ptr(struct x86_emulate_ctxt
*ctxt
,
3817 void (*get
)(struct x86_emulate_ctxt
*ctxt
,
3818 struct desc_ptr
*ptr
))
3820 struct desc_ptr desc_ptr
;
3822 if ((ctxt
->ops
->get_cr(ctxt
, 4) & X86_CR4_UMIP
) &&
3823 ctxt
->ops
->cpl(ctxt
) > 0)
3824 return emulate_gp(ctxt
, 0);
3826 if (ctxt
->mode
== X86EMUL_MODE_PROT64
)
3828 get(ctxt
, &desc_ptr
);
3829 if (ctxt
->op_bytes
== 2) {
3831 desc_ptr
.address
&= 0x00ffffff;
3833 /* Disable writeback. */
3834 ctxt
->dst
.type
= OP_NONE
;
3835 return segmented_write_std(ctxt
, ctxt
->dst
.addr
.mem
,
3836 &desc_ptr
, 2 + ctxt
->op_bytes
);
3839 static int em_sgdt(struct x86_emulate_ctxt
*ctxt
)
3841 return emulate_store_desc_ptr(ctxt
, ctxt
->ops
->get_gdt
);
3844 static int em_sidt(struct x86_emulate_ctxt
*ctxt
)
3846 return emulate_store_desc_ptr(ctxt
, ctxt
->ops
->get_idt
);
3849 static int em_lgdt_lidt(struct x86_emulate_ctxt
*ctxt
, bool lgdt
)
3851 struct desc_ptr desc_ptr
;
3854 if (ctxt
->mode
== X86EMUL_MODE_PROT64
)
3856 rc
= read_descriptor(ctxt
, ctxt
->src
.addr
.mem
,
3857 &desc_ptr
.size
, &desc_ptr
.address
,
3859 if (rc
!= X86EMUL_CONTINUE
)
3861 if (ctxt
->mode
== X86EMUL_MODE_PROT64
&&
3862 emul_is_noncanonical_address(desc_ptr
.address
, ctxt
))
3863 return emulate_gp(ctxt
, 0);
3865 ctxt
->ops
->set_gdt(ctxt
, &desc_ptr
);
3867 ctxt
->ops
->set_idt(ctxt
, &desc_ptr
);
3868 /* Disable writeback. */
3869 ctxt
->dst
.type
= OP_NONE
;
3870 return X86EMUL_CONTINUE
;
3873 static int em_lgdt(struct x86_emulate_ctxt
*ctxt
)
3875 return em_lgdt_lidt(ctxt
, true);
3878 static int em_lidt(struct x86_emulate_ctxt
*ctxt
)
3880 return em_lgdt_lidt(ctxt
, false);
3883 static int em_smsw(struct x86_emulate_ctxt
*ctxt
)
3885 if ((ctxt
->ops
->get_cr(ctxt
, 4) & X86_CR4_UMIP
) &&
3886 ctxt
->ops
->cpl(ctxt
) > 0)
3887 return emulate_gp(ctxt
, 0);
3889 if (ctxt
->dst
.type
== OP_MEM
)
3890 ctxt
->dst
.bytes
= 2;
3891 ctxt
->dst
.val
= ctxt
->ops
->get_cr(ctxt
, 0);
3892 return X86EMUL_CONTINUE
;
3895 static int em_lmsw(struct x86_emulate_ctxt
*ctxt
)
3897 ctxt
->ops
->set_cr(ctxt
, 0, (ctxt
->ops
->get_cr(ctxt
, 0) & ~0x0eul
)
3898 | (ctxt
->src
.val
& 0x0f));
3899 ctxt
->dst
.type
= OP_NONE
;
3900 return X86EMUL_CONTINUE
;
3903 static int em_loop(struct x86_emulate_ctxt
*ctxt
)
3905 int rc
= X86EMUL_CONTINUE
;
3907 register_address_increment(ctxt
, VCPU_REGS_RCX
, -1);
3908 if ((address_mask(ctxt
, reg_read(ctxt
, VCPU_REGS_RCX
)) != 0) &&
3909 (ctxt
->b
== 0xe2 || test_cc(ctxt
->b
^ 0x5, ctxt
->eflags
)))
3910 rc
= jmp_rel(ctxt
, ctxt
->src
.val
);
3915 static int em_jcxz(struct x86_emulate_ctxt
*ctxt
)
3917 int rc
= X86EMUL_CONTINUE
;
3919 if (address_mask(ctxt
, reg_read(ctxt
, VCPU_REGS_RCX
)) == 0)
3920 rc
= jmp_rel(ctxt
, ctxt
->src
.val
);
3925 static int em_in(struct x86_emulate_ctxt
*ctxt
)
3927 if (!pio_in_emulated(ctxt
, ctxt
->dst
.bytes
, ctxt
->src
.val
,
3929 return X86EMUL_IO_NEEDED
;
3931 return X86EMUL_CONTINUE
;
3934 static int em_out(struct x86_emulate_ctxt
*ctxt
)
3936 ctxt
->ops
->pio_out_emulated(ctxt
, ctxt
->src
.bytes
, ctxt
->dst
.val
,
3938 /* Disable writeback. */
3939 ctxt
->dst
.type
= OP_NONE
;
3940 return X86EMUL_CONTINUE
;
3943 static int em_cli(struct x86_emulate_ctxt
*ctxt
)
3945 if (emulator_bad_iopl(ctxt
))
3946 return emulate_gp(ctxt
, 0);
3948 ctxt
->eflags
&= ~X86_EFLAGS_IF
;
3949 return X86EMUL_CONTINUE
;
3952 static int em_sti(struct x86_emulate_ctxt
*ctxt
)
3954 if (emulator_bad_iopl(ctxt
))
3955 return emulate_gp(ctxt
, 0);
3957 ctxt
->interruptibility
= KVM_X86_SHADOW_INT_STI
;
3958 ctxt
->eflags
|= X86_EFLAGS_IF
;
3959 return X86EMUL_CONTINUE
;
3962 static int em_cpuid(struct x86_emulate_ctxt
*ctxt
)
3964 u32 eax
, ebx
, ecx
, edx
;
3967 ctxt
->ops
->get_msr(ctxt
, MSR_MISC_FEATURES_ENABLES
, &msr
);
3968 if (msr
& MSR_MISC_FEATURES_ENABLES_CPUID_FAULT
&&
3969 ctxt
->ops
->cpl(ctxt
)) {
3970 return emulate_gp(ctxt
, 0);
3973 eax
= reg_read(ctxt
, VCPU_REGS_RAX
);
3974 ecx
= reg_read(ctxt
, VCPU_REGS_RCX
);
3975 ctxt
->ops
->get_cpuid(ctxt
, &eax
, &ebx
, &ecx
, &edx
, true);
3976 *reg_write(ctxt
, VCPU_REGS_RAX
) = eax
;
3977 *reg_write(ctxt
, VCPU_REGS_RBX
) = ebx
;
3978 *reg_write(ctxt
, VCPU_REGS_RCX
) = ecx
;
3979 *reg_write(ctxt
, VCPU_REGS_RDX
) = edx
;
3980 return X86EMUL_CONTINUE
;
3983 static int em_sahf(struct x86_emulate_ctxt
*ctxt
)
3987 flags
= X86_EFLAGS_CF
| X86_EFLAGS_PF
| X86_EFLAGS_AF
| X86_EFLAGS_ZF
|
3989 flags
&= *reg_rmw(ctxt
, VCPU_REGS_RAX
) >> 8;
3991 ctxt
->eflags
&= ~0xffUL
;
3992 ctxt
->eflags
|= flags
| X86_EFLAGS_FIXED
;
3993 return X86EMUL_CONTINUE
;
3996 static int em_lahf(struct x86_emulate_ctxt
*ctxt
)
3998 *reg_rmw(ctxt
, VCPU_REGS_RAX
) &= ~0xff00UL
;
3999 *reg_rmw(ctxt
, VCPU_REGS_RAX
) |= (ctxt
->eflags
& 0xff) << 8;
4000 return X86EMUL_CONTINUE
;
4003 static int em_bswap(struct x86_emulate_ctxt
*ctxt
)
4005 switch (ctxt
->op_bytes
) {
4006 #ifdef CONFIG_X86_64
4008 asm("bswap %0" : "+r"(ctxt
->dst
.val
));
4012 asm("bswap %0" : "+r"(*(u32
*)&ctxt
->dst
.val
));
4015 return X86EMUL_CONTINUE
;
4018 static int em_clflush(struct x86_emulate_ctxt
*ctxt
)
4020 /* emulating clflush regardless of cpuid */
4021 return X86EMUL_CONTINUE
;
4024 static int em_movsxd(struct x86_emulate_ctxt
*ctxt
)
4026 ctxt
->dst
.val
= (s32
) ctxt
->src
.val
;
4027 return X86EMUL_CONTINUE
;
4030 static int check_fxsr(struct x86_emulate_ctxt
*ctxt
)
4032 u32 eax
= 1, ebx
, ecx
= 0, edx
;
4034 ctxt
->ops
->get_cpuid(ctxt
, &eax
, &ebx
, &ecx
, &edx
, false);
4035 if (!(edx
& FFL(FXSR
)))
4036 return emulate_ud(ctxt
);
4038 if (ctxt
->ops
->get_cr(ctxt
, 0) & (X86_CR0_TS
| X86_CR0_EM
))
4039 return emulate_nm(ctxt
);
4042 * Don't emulate a case that should never be hit, instead of working
4043 * around a lack of fxsave64/fxrstor64 on old compilers.
4045 if (ctxt
->mode
>= X86EMUL_MODE_PROT64
)
4046 return X86EMUL_UNHANDLEABLE
;
4048 return X86EMUL_CONTINUE
;
4052 * Hardware doesn't save and restore XMM 0-7 without CR4.OSFXSR, but does save
4053 * and restore MXCSR.
4055 static size_t __fxstate_size(int nregs
)
4057 return offsetof(struct fxregs_state
, xmm_space
[0]) + nregs
* 16;
4060 static inline size_t fxstate_size(struct x86_emulate_ctxt
*ctxt
)
4063 if (ctxt
->mode
== X86EMUL_MODE_PROT64
)
4064 return __fxstate_size(16);
4066 cr4_osfxsr
= ctxt
->ops
->get_cr(ctxt
, 4) & X86_CR4_OSFXSR
;
4067 return __fxstate_size(cr4_osfxsr
? 8 : 0);
4071 * FXSAVE and FXRSTOR have 4 different formats depending on execution mode,
4074 * - like (1), but FIP and FDP (foo) are only 16 bit. At least Intel CPUs
4075 * preserve whole 32 bit values, though, so (1) and (2) are the same wrt.
4077 * 3) 64-bit mode with REX.W prefix
4078 * - like (2), but XMM 8-15 are being saved and restored
4079 * 4) 64-bit mode without REX.W prefix
4080 * - like (3), but FIP and FDP are 64 bit
4082 * Emulation uses (3) for (1) and (2) and preserves XMM 8-15 to reach the
4083 * desired result. (4) is not emulated.
4085 * Note: Guest and host CPUID.(EAX=07H,ECX=0H):EBX[bit 13] (deprecate FPU CS
4086 * and FPU DS) should match.
4088 static int em_fxsave(struct x86_emulate_ctxt
*ctxt
)
4090 struct fxregs_state fx_state
;
4093 rc
= check_fxsr(ctxt
);
4094 if (rc
!= X86EMUL_CONTINUE
)
4097 rc
= asm_safe("fxsave %[fx]", , [fx
] "+m"(fx_state
));
4099 if (rc
!= X86EMUL_CONTINUE
)
4102 return segmented_write_std(ctxt
, ctxt
->memop
.addr
.mem
, &fx_state
,
4103 fxstate_size(ctxt
));
4107 * FXRSTOR might restore XMM registers not provided by the guest. Fill
4108 * in the host registers (via FXSAVE) instead, so they won't be modified.
4109 * (preemption has to stay disabled until FXRSTOR).
4111 * Use noinline to keep the stack for other functions called by callers small.
4113 static noinline
int fxregs_fixup(struct fxregs_state
*fx_state
,
4114 const size_t used_size
)
4116 struct fxregs_state fx_tmp
;
4119 rc
= asm_safe("fxsave %[fx]", , [fx
] "+m"(fx_tmp
));
4120 memcpy((void *)fx_state
+ used_size
, (void *)&fx_tmp
+ used_size
,
4121 __fxstate_size(16) - used_size
);
4126 static int em_fxrstor(struct x86_emulate_ctxt
*ctxt
)
4128 struct fxregs_state fx_state
;
4132 rc
= check_fxsr(ctxt
);
4133 if (rc
!= X86EMUL_CONTINUE
)
4136 size
= fxstate_size(ctxt
);
4137 rc
= segmented_read_std(ctxt
, ctxt
->memop
.addr
.mem
, &fx_state
, size
);
4138 if (rc
!= X86EMUL_CONTINUE
)
4141 if (size
< __fxstate_size(16)) {
4142 rc
= fxregs_fixup(&fx_state
, size
);
4143 if (rc
!= X86EMUL_CONTINUE
)
4147 if (fx_state
.mxcsr
>> 16) {
4148 rc
= emulate_gp(ctxt
, 0);
4152 if (rc
== X86EMUL_CONTINUE
)
4153 rc
= asm_safe("fxrstor %[fx]", : [fx
] "m"(fx_state
));
4159 static int em_xsetbv(struct x86_emulate_ctxt
*ctxt
)
4163 eax
= reg_read(ctxt
, VCPU_REGS_RAX
);
4164 edx
= reg_read(ctxt
, VCPU_REGS_RDX
);
4165 ecx
= reg_read(ctxt
, VCPU_REGS_RCX
);
4167 if (ctxt
->ops
->set_xcr(ctxt
, ecx
, ((u64
)edx
<< 32) | eax
))
4168 return emulate_gp(ctxt
, 0);
4170 return X86EMUL_CONTINUE
;
4173 static bool valid_cr(int nr
)
4185 static int check_cr_read(struct x86_emulate_ctxt
*ctxt
)
4187 if (!valid_cr(ctxt
->modrm_reg
))
4188 return emulate_ud(ctxt
);
4190 return X86EMUL_CONTINUE
;
4193 static int check_cr_write(struct x86_emulate_ctxt
*ctxt
)
4195 u64 new_val
= ctxt
->src
.val64
;
4196 int cr
= ctxt
->modrm_reg
;
4199 static u64 cr_reserved_bits
[] = {
4200 0xffffffff00000000ULL
,
4201 0, 0, 0, /* CR3 checked later */
4208 return emulate_ud(ctxt
);
4210 if (new_val
& cr_reserved_bits
[cr
])
4211 return emulate_gp(ctxt
, 0);
4216 if (((new_val
& X86_CR0_PG
) && !(new_val
& X86_CR0_PE
)) ||
4217 ((new_val
& X86_CR0_NW
) && !(new_val
& X86_CR0_CD
)))
4218 return emulate_gp(ctxt
, 0);
4220 cr4
= ctxt
->ops
->get_cr(ctxt
, 4);
4221 ctxt
->ops
->get_msr(ctxt
, MSR_EFER
, &efer
);
4223 if ((new_val
& X86_CR0_PG
) && (efer
& EFER_LME
) &&
4224 !(cr4
& X86_CR4_PAE
))
4225 return emulate_gp(ctxt
, 0);
4232 ctxt
->ops
->get_msr(ctxt
, MSR_EFER
, &efer
);
4233 if (efer
& EFER_LMA
) {
4235 u32 eax
, ebx
, ecx
, edx
;
4239 if (ctxt
->ops
->get_cpuid(ctxt
, &eax
, &ebx
, &ecx
,
4241 maxphyaddr
= eax
& 0xff;
4244 rsvd
= rsvd_bits(maxphyaddr
, 63);
4245 if (ctxt
->ops
->get_cr(ctxt
, 4) & X86_CR4_PCIDE
)
4246 rsvd
&= ~X86_CR3_PCID_NOFLUSH
;
4250 return emulate_gp(ctxt
, 0);
4255 ctxt
->ops
->get_msr(ctxt
, MSR_EFER
, &efer
);
4257 if ((efer
& EFER_LMA
) && !(new_val
& X86_CR4_PAE
))
4258 return emulate_gp(ctxt
, 0);
4264 return X86EMUL_CONTINUE
;
4267 static int check_dr7_gd(struct x86_emulate_ctxt
*ctxt
)
4271 ctxt
->ops
->get_dr(ctxt
, 7, &dr7
);
4273 /* Check if DR7.Global_Enable is set */
4274 return dr7
& (1 << 13);
4277 static int check_dr_read(struct x86_emulate_ctxt
*ctxt
)
4279 int dr
= ctxt
->modrm_reg
;
4283 return emulate_ud(ctxt
);
4285 cr4
= ctxt
->ops
->get_cr(ctxt
, 4);
4286 if ((cr4
& X86_CR4_DE
) && (dr
== 4 || dr
== 5))
4287 return emulate_ud(ctxt
);
4289 if (check_dr7_gd(ctxt
)) {
4292 ctxt
->ops
->get_dr(ctxt
, 6, &dr6
);
4293 dr6
&= ~DR_TRAP_BITS
;
4294 dr6
|= DR6_BD
| DR6_RTM
;
4295 ctxt
->ops
->set_dr(ctxt
, 6, dr6
);
4296 return emulate_db(ctxt
);
4299 return X86EMUL_CONTINUE
;
4302 static int check_dr_write(struct x86_emulate_ctxt
*ctxt
)
4304 u64 new_val
= ctxt
->src
.val64
;
4305 int dr
= ctxt
->modrm_reg
;
4307 if ((dr
== 6 || dr
== 7) && (new_val
& 0xffffffff00000000ULL
))
4308 return emulate_gp(ctxt
, 0);
4310 return check_dr_read(ctxt
);
4313 static int check_svme(struct x86_emulate_ctxt
*ctxt
)
4317 ctxt
->ops
->get_msr(ctxt
, MSR_EFER
, &efer
);
4319 if (!(efer
& EFER_SVME
))
4320 return emulate_ud(ctxt
);
4322 return X86EMUL_CONTINUE
;
4325 static int check_svme_pa(struct x86_emulate_ctxt
*ctxt
)
4327 u64 rax
= reg_read(ctxt
, VCPU_REGS_RAX
);
4329 /* Valid physical address? */
4330 if (rax
& 0xffff000000000000ULL
)
4331 return emulate_gp(ctxt
, 0);
4333 return check_svme(ctxt
);
4336 static int check_rdtsc(struct x86_emulate_ctxt
*ctxt
)
4338 u64 cr4
= ctxt
->ops
->get_cr(ctxt
, 4);
4340 if (cr4
& X86_CR4_TSD
&& ctxt
->ops
->cpl(ctxt
))
4341 return emulate_ud(ctxt
);
4343 return X86EMUL_CONTINUE
;
4346 static int check_rdpmc(struct x86_emulate_ctxt
*ctxt
)
4348 u64 cr4
= ctxt
->ops
->get_cr(ctxt
, 4);
4349 u64 rcx
= reg_read(ctxt
, VCPU_REGS_RCX
);
4352 * VMware allows access to these Pseduo-PMCs even when read via RDPMC
4353 * in Ring3 when CR4.PCE=0.
4355 if (enable_vmware_backdoor
&& is_vmware_backdoor_pmc(rcx
))
4356 return X86EMUL_CONTINUE
;
4358 if ((!(cr4
& X86_CR4_PCE
) && ctxt
->ops
->cpl(ctxt
)) ||
4359 ctxt
->ops
->check_pmc(ctxt
, rcx
))
4360 return emulate_gp(ctxt
, 0);
4362 return X86EMUL_CONTINUE
;
4365 static int check_perm_in(struct x86_emulate_ctxt
*ctxt
)
4367 ctxt
->dst
.bytes
= min(ctxt
->dst
.bytes
, 4u);
4368 if (!emulator_io_permited(ctxt
, ctxt
->src
.val
, ctxt
->dst
.bytes
))
4369 return emulate_gp(ctxt
, 0);
4371 return X86EMUL_CONTINUE
;
4374 static int check_perm_out(struct x86_emulate_ctxt
*ctxt
)
4376 ctxt
->src
.bytes
= min(ctxt
->src
.bytes
, 4u);
4377 if (!emulator_io_permited(ctxt
, ctxt
->dst
.val
, ctxt
->src
.bytes
))
4378 return emulate_gp(ctxt
, 0);
4380 return X86EMUL_CONTINUE
;
4383 #define D(_y) { .flags = (_y) }
4384 #define DI(_y, _i) { .flags = (_y)|Intercept, .intercept = x86_intercept_##_i }
4385 #define DIP(_y, _i, _p) { .flags = (_y)|Intercept|CheckPerm, \
4386 .intercept = x86_intercept_##_i, .check_perm = (_p) }
4387 #define N D(NotImpl)
4388 #define EXT(_f, _e) { .flags = ((_f) | RMExt), .u.group = (_e) }
4389 #define G(_f, _g) { .flags = ((_f) | Group | ModRM), .u.group = (_g) }
4390 #define GD(_f, _g) { .flags = ((_f) | GroupDual | ModRM), .u.gdual = (_g) }
4391 #define ID(_f, _i) { .flags = ((_f) | InstrDual | ModRM), .u.idual = (_i) }
4392 #define MD(_f, _m) { .flags = ((_f) | ModeDual), .u.mdual = (_m) }
4393 #define E(_f, _e) { .flags = ((_f) | Escape | ModRM), .u.esc = (_e) }
4394 #define I(_f, _e) { .flags = (_f), .u.execute = (_e) }
4395 #define F(_f, _e) { .flags = (_f) | Fastop, .u.fastop = (_e) }
4396 #define II(_f, _e, _i) \
4397 { .flags = (_f)|Intercept, .u.execute = (_e), .intercept = x86_intercept_##_i }
4398 #define IIP(_f, _e, _i, _p) \
4399 { .flags = (_f)|Intercept|CheckPerm, .u.execute = (_e), \
4400 .intercept = x86_intercept_##_i, .check_perm = (_p) }
4401 #define GP(_f, _g) { .flags = ((_f) | Prefix), .u.gprefix = (_g) }
4403 #define D2bv(_f) D((_f) | ByteOp), D(_f)
4404 #define D2bvIP(_f, _i, _p) DIP((_f) | ByteOp, _i, _p), DIP(_f, _i, _p)
4405 #define I2bv(_f, _e) I((_f) | ByteOp, _e), I(_f, _e)
4406 #define F2bv(_f, _e) F((_f) | ByteOp, _e), F(_f, _e)
4407 #define I2bvIP(_f, _e, _i, _p) \
4408 IIP((_f) | ByteOp, _e, _i, _p), IIP(_f, _e, _i, _p)
4410 #define F6ALU(_f, _e) F2bv((_f) | DstMem | SrcReg | ModRM, _e), \
4411 F2bv(((_f) | DstReg | SrcMem | ModRM) & ~Lock, _e), \
4412 F2bv(((_f) & ~Lock) | DstAcc | SrcImm, _e)
4414 static const struct opcode group7_rm0
[] = {
4416 I(SrcNone
| Priv
| EmulateOnUD
, em_hypercall
),
4420 static const struct opcode group7_rm1
[] = {
4421 DI(SrcNone
| Priv
, monitor
),
4422 DI(SrcNone
| Priv
, mwait
),
4426 static const struct opcode group7_rm2
[] = {
4428 II(ImplicitOps
| Priv
, em_xsetbv
, xsetbv
),
4432 static const struct opcode group7_rm3
[] = {
4433 DIP(SrcNone
| Prot
| Priv
, vmrun
, check_svme_pa
),
4434 II(SrcNone
| Prot
| EmulateOnUD
, em_hypercall
, vmmcall
),
4435 DIP(SrcNone
| Prot
| Priv
, vmload
, check_svme_pa
),
4436 DIP(SrcNone
| Prot
| Priv
, vmsave
, check_svme_pa
),
4437 DIP(SrcNone
| Prot
| Priv
, stgi
, check_svme
),
4438 DIP(SrcNone
| Prot
| Priv
, clgi
, check_svme
),
4439 DIP(SrcNone
| Prot
| Priv
, skinit
, check_svme
),
4440 DIP(SrcNone
| Prot
| Priv
, invlpga
, check_svme
),
4443 static const struct opcode group7_rm7
[] = {
4445 DIP(SrcNone
, rdtscp
, check_rdtsc
),
4449 static const struct opcode group1
[] = {
4451 F(Lock
| PageTable
, em_or
),
4454 F(Lock
| PageTable
, em_and
),
4460 static const struct opcode group1A
[] = {
4461 I(DstMem
| SrcNone
| Mov
| Stack
| IncSP
| TwoMemOp
, em_pop
), N
, N
, N
, N
, N
, N
, N
,
4464 static const struct opcode group2
[] = {
4465 F(DstMem
| ModRM
, em_rol
),
4466 F(DstMem
| ModRM
, em_ror
),
4467 F(DstMem
| ModRM
, em_rcl
),
4468 F(DstMem
| ModRM
, em_rcr
),
4469 F(DstMem
| ModRM
, em_shl
),
4470 F(DstMem
| ModRM
, em_shr
),
4471 F(DstMem
| ModRM
, em_shl
),
4472 F(DstMem
| ModRM
, em_sar
),
4475 static const struct opcode group3
[] = {
4476 F(DstMem
| SrcImm
| NoWrite
, em_test
),
4477 F(DstMem
| SrcImm
| NoWrite
, em_test
),
4478 F(DstMem
| SrcNone
| Lock
, em_not
),
4479 F(DstMem
| SrcNone
| Lock
, em_neg
),
4480 F(DstXacc
| Src2Mem
, em_mul_ex
),
4481 F(DstXacc
| Src2Mem
, em_imul_ex
),
4482 F(DstXacc
| Src2Mem
, em_div_ex
),
4483 F(DstXacc
| Src2Mem
, em_idiv_ex
),
4486 static const struct opcode group4
[] = {
4487 F(ByteOp
| DstMem
| SrcNone
| Lock
, em_inc
),
4488 F(ByteOp
| DstMem
| SrcNone
| Lock
, em_dec
),
4492 static const struct opcode group5
[] = {
4493 F(DstMem
| SrcNone
| Lock
, em_inc
),
4494 F(DstMem
| SrcNone
| Lock
, em_dec
),
4495 I(SrcMem
| NearBranch
, em_call_near_abs
),
4496 I(SrcMemFAddr
| ImplicitOps
, em_call_far
),
4497 I(SrcMem
| NearBranch
, em_jmp_abs
),
4498 I(SrcMemFAddr
| ImplicitOps
, em_jmp_far
),
4499 I(SrcMem
| Stack
| TwoMemOp
, em_push
), D(Undefined
),
4502 static const struct opcode group6
[] = {
4503 II(Prot
| DstMem
, em_sldt
, sldt
),
4504 II(Prot
| DstMem
, em_str
, str
),
4505 II(Prot
| Priv
| SrcMem16
, em_lldt
, lldt
),
4506 II(Prot
| Priv
| SrcMem16
, em_ltr
, ltr
),
4510 static const struct group_dual group7
= { {
4511 II(Mov
| DstMem
, em_sgdt
, sgdt
),
4512 II(Mov
| DstMem
, em_sidt
, sidt
),
4513 II(SrcMem
| Priv
, em_lgdt
, lgdt
),
4514 II(SrcMem
| Priv
, em_lidt
, lidt
),
4515 II(SrcNone
| DstMem
| Mov
, em_smsw
, smsw
), N
,
4516 II(SrcMem16
| Mov
| Priv
, em_lmsw
, lmsw
),
4517 II(SrcMem
| ByteOp
| Priv
| NoAccess
, em_invlpg
, invlpg
),
4523 II(SrcNone
| DstMem
| Mov
, em_smsw
, smsw
), N
,
4524 II(SrcMem16
| Mov
| Priv
, em_lmsw
, lmsw
),
4528 static const struct opcode group8
[] = {
4530 F(DstMem
| SrcImmByte
| NoWrite
, em_bt
),
4531 F(DstMem
| SrcImmByte
| Lock
| PageTable
, em_bts
),
4532 F(DstMem
| SrcImmByte
| Lock
, em_btr
),
4533 F(DstMem
| SrcImmByte
| Lock
| PageTable
, em_btc
),
4537 * The "memory" destination is actually always a register, since we come
4538 * from the register case of group9.
4540 static const struct gprefix pfx_0f_c7_7
= {
4541 N
, N
, N
, II(DstMem
| ModRM
| Op3264
| EmulateOnUD
, em_rdpid
, rdtscp
),
4545 static const struct group_dual group9
= { {
4546 N
, I(DstMem64
| Lock
| PageTable
, em_cmpxchg8b
), N
, N
, N
, N
, N
, N
,
4548 N
, N
, N
, N
, N
, N
, N
,
4549 GP(0, &pfx_0f_c7_7
),
4552 static const struct opcode group11
[] = {
4553 I(DstMem
| SrcImm
| Mov
| PageTable
, em_mov
),
4557 static const struct gprefix pfx_0f_ae_7
= {
4558 I(SrcMem
| ByteOp
, em_clflush
), N
, N
, N
,
4561 static const struct group_dual group15
= { {
4562 I(ModRM
| Aligned16
, em_fxsave
),
4563 I(ModRM
| Aligned16
, em_fxrstor
),
4564 N
, N
, N
, N
, N
, GP(0, &pfx_0f_ae_7
),
4566 N
, N
, N
, N
, N
, N
, N
, N
,
4569 static const struct gprefix pfx_0f_6f_0f_7f
= {
4570 I(Mmx
, em_mov
), I(Sse
| Aligned
, em_mov
), N
, I(Sse
| Unaligned
, em_mov
),
4573 static const struct instr_dual instr_dual_0f_2b
= {
4577 static const struct gprefix pfx_0f_2b
= {
4578 ID(0, &instr_dual_0f_2b
), ID(0, &instr_dual_0f_2b
), N
, N
,
4581 static const struct gprefix pfx_0f_10_0f_11
= {
4582 I(Unaligned
, em_mov
), I(Unaligned
, em_mov
), N
, N
,
4585 static const struct gprefix pfx_0f_28_0f_29
= {
4586 I(Aligned
, em_mov
), I(Aligned
, em_mov
), N
, N
,
4589 static const struct gprefix pfx_0f_e7
= {
4590 N
, I(Sse
, em_mov
), N
, N
,
4593 static const struct escape escape_d9
= { {
4594 N
, N
, N
, N
, N
, N
, N
, I(DstMem16
| Mov
, em_fnstcw
),
4597 N
, N
, N
, N
, N
, N
, N
, N
,
4599 N
, N
, N
, N
, N
, N
, N
, N
,
4601 N
, N
, N
, N
, N
, N
, N
, N
,
4603 N
, N
, N
, N
, N
, N
, N
, N
,
4605 N
, N
, N
, N
, N
, N
, N
, N
,
4607 N
, N
, N
, N
, N
, N
, N
, N
,
4609 N
, N
, N
, N
, N
, N
, N
, N
,
4611 N
, N
, N
, N
, N
, N
, N
, N
,
4614 static const struct escape escape_db
= { {
4615 N
, N
, N
, N
, N
, N
, N
, N
,
4618 N
, N
, N
, N
, N
, N
, N
, N
,
4620 N
, N
, N
, N
, N
, N
, N
, N
,
4622 N
, N
, N
, N
, N
, N
, N
, N
,
4624 N
, N
, N
, N
, N
, N
, N
, N
,
4626 N
, N
, N
, I(ImplicitOps
, em_fninit
), N
, N
, N
, N
,
4628 N
, N
, N
, N
, N
, N
, N
, N
,
4630 N
, N
, N
, N
, N
, N
, N
, N
,
4632 N
, N
, N
, N
, N
, N
, N
, N
,
4635 static const struct escape escape_dd
= { {
4636 N
, N
, N
, N
, N
, N
, N
, I(DstMem16
| Mov
, em_fnstsw
),
4639 N
, N
, N
, N
, N
, N
, N
, N
,
4641 N
, N
, N
, N
, N
, N
, N
, N
,
4643 N
, N
, N
, N
, N
, N
, N
, N
,
4645 N
, N
, N
, N
, N
, N
, N
, N
,
4647 N
, N
, N
, N
, N
, N
, N
, N
,
4649 N
, N
, N
, N
, N
, N
, N
, N
,
4651 N
, N
, N
, N
, N
, N
, N
, N
,
4653 N
, N
, N
, N
, N
, N
, N
, N
,
4656 static const struct instr_dual instr_dual_0f_c3
= {
4657 I(DstMem
| SrcReg
| ModRM
| No16
| Mov
, em_mov
), N
4660 static const struct mode_dual mode_dual_63
= {
4661 N
, I(DstReg
| SrcMem32
| ModRM
| Mov
, em_movsxd
)
4664 static const struct opcode opcode_table
[256] = {
4666 F6ALU(Lock
, em_add
),
4667 I(ImplicitOps
| Stack
| No64
| Src2ES
, em_push_sreg
),
4668 I(ImplicitOps
| Stack
| No64
| Src2ES
, em_pop_sreg
),
4670 F6ALU(Lock
| PageTable
, em_or
),
4671 I(ImplicitOps
| Stack
| No64
| Src2CS
, em_push_sreg
),
4674 F6ALU(Lock
, em_adc
),
4675 I(ImplicitOps
| Stack
| No64
| Src2SS
, em_push_sreg
),
4676 I(ImplicitOps
| Stack
| No64
| Src2SS
, em_pop_sreg
),
4678 F6ALU(Lock
, em_sbb
),
4679 I(ImplicitOps
| Stack
| No64
| Src2DS
, em_push_sreg
),
4680 I(ImplicitOps
| Stack
| No64
| Src2DS
, em_pop_sreg
),
4682 F6ALU(Lock
| PageTable
, em_and
), N
, N
,
4684 F6ALU(Lock
, em_sub
), N
, I(ByteOp
| DstAcc
| No64
, em_das
),
4686 F6ALU(Lock
, em_xor
), N
, N
,
4688 F6ALU(NoWrite
, em_cmp
), N
, N
,
4690 X8(F(DstReg
, em_inc
)), X8(F(DstReg
, em_dec
)),
4692 X8(I(SrcReg
| Stack
, em_push
)),
4694 X8(I(DstReg
| Stack
, em_pop
)),
4696 I(ImplicitOps
| Stack
| No64
, em_pusha
),
4697 I(ImplicitOps
| Stack
| No64
, em_popa
),
4698 N
, MD(ModRM
, &mode_dual_63
),
4701 I(SrcImm
| Mov
| Stack
, em_push
),
4702 I(DstReg
| SrcMem
| ModRM
| Src2Imm
, em_imul_3op
),
4703 I(SrcImmByte
| Mov
| Stack
, em_push
),
4704 I(DstReg
| SrcMem
| ModRM
| Src2ImmByte
, em_imul_3op
),
4705 I2bvIP(DstDI
| SrcDX
| Mov
| String
| Unaligned
, em_in
, ins
, check_perm_in
), /* insb, insw/insd */
4706 I2bvIP(SrcSI
| DstDX
| String
, em_out
, outs
, check_perm_out
), /* outsb, outsw/outsd */
4708 X16(D(SrcImmByte
| NearBranch
)),
4710 G(ByteOp
| DstMem
| SrcImm
, group1
),
4711 G(DstMem
| SrcImm
, group1
),
4712 G(ByteOp
| DstMem
| SrcImm
| No64
, group1
),
4713 G(DstMem
| SrcImmByte
, group1
),
4714 F2bv(DstMem
| SrcReg
| ModRM
| NoWrite
, em_test
),
4715 I2bv(DstMem
| SrcReg
| ModRM
| Lock
| PageTable
, em_xchg
),
4717 I2bv(DstMem
| SrcReg
| ModRM
| Mov
| PageTable
, em_mov
),
4718 I2bv(DstReg
| SrcMem
| ModRM
| Mov
, em_mov
),
4719 I(DstMem
| SrcNone
| ModRM
| Mov
| PageTable
, em_mov_rm_sreg
),
4720 D(ModRM
| SrcMem
| NoAccess
| DstReg
),
4721 I(ImplicitOps
| SrcMem16
| ModRM
, em_mov_sreg_rm
),
4724 DI(SrcAcc
| DstReg
, pause
), X7(D(SrcAcc
| DstReg
)),
4726 D(DstAcc
| SrcNone
), I(ImplicitOps
| SrcAcc
, em_cwd
),
4727 I(SrcImmFAddr
| No64
, em_call_far
), N
,
4728 II(ImplicitOps
| Stack
, em_pushf
, pushf
),
4729 II(ImplicitOps
| Stack
, em_popf
, popf
),
4730 I(ImplicitOps
, em_sahf
), I(ImplicitOps
, em_lahf
),
4732 I2bv(DstAcc
| SrcMem
| Mov
| MemAbs
, em_mov
),
4733 I2bv(DstMem
| SrcAcc
| Mov
| MemAbs
| PageTable
, em_mov
),
4734 I2bv(SrcSI
| DstDI
| Mov
| String
| TwoMemOp
, em_mov
),
4735 F2bv(SrcSI
| DstDI
| String
| NoWrite
| TwoMemOp
, em_cmp_r
),
4737 F2bv(DstAcc
| SrcImm
| NoWrite
, em_test
),
4738 I2bv(SrcAcc
| DstDI
| Mov
| String
, em_mov
),
4739 I2bv(SrcSI
| DstAcc
| Mov
| String
, em_mov
),
4740 F2bv(SrcAcc
| DstDI
| String
| NoWrite
, em_cmp_r
),
4742 X8(I(ByteOp
| DstReg
| SrcImm
| Mov
, em_mov
)),
4744 X8(I(DstReg
| SrcImm64
| Mov
, em_mov
)),
4746 G(ByteOp
| Src2ImmByte
, group2
), G(Src2ImmByte
, group2
),
4747 I(ImplicitOps
| NearBranch
| SrcImmU16
, em_ret_near_imm
),
4748 I(ImplicitOps
| NearBranch
, em_ret
),
4749 I(DstReg
| SrcMemFAddr
| ModRM
| No64
| Src2ES
, em_lseg
),
4750 I(DstReg
| SrcMemFAddr
| ModRM
| No64
| Src2DS
, em_lseg
),
4751 G(ByteOp
, group11
), G(0, group11
),
4753 I(Stack
| SrcImmU16
| Src2ImmByte
, em_enter
), I(Stack
, em_leave
),
4754 I(ImplicitOps
| SrcImmU16
, em_ret_far_imm
),
4755 I(ImplicitOps
, em_ret_far
),
4756 D(ImplicitOps
), DI(SrcImmByte
, intn
),
4757 D(ImplicitOps
| No64
), II(ImplicitOps
, em_iret
, iret
),
4759 G(Src2One
| ByteOp
, group2
), G(Src2One
, group2
),
4760 G(Src2CL
| ByteOp
, group2
), G(Src2CL
, group2
),
4761 I(DstAcc
| SrcImmUByte
| No64
, em_aam
),
4762 I(DstAcc
| SrcImmUByte
| No64
, em_aad
),
4763 F(DstAcc
| ByteOp
| No64
, em_salc
),
4764 I(DstAcc
| SrcXLat
| ByteOp
, em_mov
),
4766 N
, E(0, &escape_d9
), N
, E(0, &escape_db
), N
, E(0, &escape_dd
), N
, N
,
4768 X3(I(SrcImmByte
| NearBranch
, em_loop
)),
4769 I(SrcImmByte
| NearBranch
, em_jcxz
),
4770 I2bvIP(SrcImmUByte
| DstAcc
, em_in
, in
, check_perm_in
),
4771 I2bvIP(SrcAcc
| DstImmUByte
, em_out
, out
, check_perm_out
),
4773 I(SrcImm
| NearBranch
, em_call
), D(SrcImm
| ImplicitOps
| NearBranch
),
4774 I(SrcImmFAddr
| No64
, em_jmp_far
),
4775 D(SrcImmByte
| ImplicitOps
| NearBranch
),
4776 I2bvIP(SrcDX
| DstAcc
, em_in
, in
, check_perm_in
),
4777 I2bvIP(SrcAcc
| DstDX
, em_out
, out
, check_perm_out
),
4779 N
, DI(ImplicitOps
, icebp
), N
, N
,
4780 DI(ImplicitOps
| Priv
, hlt
), D(ImplicitOps
),
4781 G(ByteOp
, group3
), G(0, group3
),
4783 D(ImplicitOps
), D(ImplicitOps
),
4784 I(ImplicitOps
, em_cli
), I(ImplicitOps
, em_sti
),
4785 D(ImplicitOps
), D(ImplicitOps
), G(0, group4
), G(0, group5
),
4788 static const struct opcode twobyte_table
[256] = {
4790 G(0, group6
), GD(0, &group7
), N
, N
,
4791 N
, I(ImplicitOps
| EmulateOnUD
, em_syscall
),
4792 II(ImplicitOps
| Priv
, em_clts
, clts
), N
,
4793 DI(ImplicitOps
| Priv
, invd
), DI(ImplicitOps
| Priv
, wbinvd
), N
, N
,
4794 N
, D(ImplicitOps
| ModRM
| SrcMem
| NoAccess
), N
, N
,
4796 GP(ModRM
| DstReg
| SrcMem
| Mov
| Sse
, &pfx_0f_10_0f_11
),
4797 GP(ModRM
| DstMem
| SrcReg
| Mov
| Sse
, &pfx_0f_10_0f_11
),
4799 D(ImplicitOps
| ModRM
| SrcMem
| NoAccess
),
4800 N
, N
, N
, N
, N
, N
, D(ImplicitOps
| ModRM
| SrcMem
| NoAccess
),
4802 DIP(ModRM
| DstMem
| Priv
| Op3264
| NoMod
, cr_read
, check_cr_read
),
4803 DIP(ModRM
| DstMem
| Priv
| Op3264
| NoMod
, dr_read
, check_dr_read
),
4804 IIP(ModRM
| SrcMem
| Priv
| Op3264
| NoMod
, em_cr_write
, cr_write
,
4806 IIP(ModRM
| SrcMem
| Priv
| Op3264
| NoMod
, em_dr_write
, dr_write
,
4809 GP(ModRM
| DstReg
| SrcMem
| Mov
| Sse
, &pfx_0f_28_0f_29
),
4810 GP(ModRM
| DstMem
| SrcReg
| Mov
| Sse
, &pfx_0f_28_0f_29
),
4811 N
, GP(ModRM
| DstMem
| SrcReg
| Mov
| Sse
, &pfx_0f_2b
),
4814 II(ImplicitOps
| Priv
, em_wrmsr
, wrmsr
),
4815 IIP(ImplicitOps
, em_rdtsc
, rdtsc
, check_rdtsc
),
4816 II(ImplicitOps
| Priv
, em_rdmsr
, rdmsr
),
4817 IIP(ImplicitOps
, em_rdpmc
, rdpmc
, check_rdpmc
),
4818 I(ImplicitOps
| EmulateOnUD
, em_sysenter
),
4819 I(ImplicitOps
| Priv
| EmulateOnUD
, em_sysexit
),
4821 N
, N
, N
, N
, N
, N
, N
, N
,
4823 X16(D(DstReg
| SrcMem
| ModRM
)),
4825 N
, N
, N
, N
, N
, N
, N
, N
, N
, N
, N
, N
, N
, N
, N
, N
,
4830 N
, N
, N
, GP(SrcMem
| DstReg
| ModRM
| Mov
, &pfx_0f_6f_0f_7f
),
4835 N
, N
, N
, GP(SrcReg
| DstMem
| ModRM
| Mov
, &pfx_0f_6f_0f_7f
),
4837 X16(D(SrcImm
| NearBranch
)),
4839 X16(D(ByteOp
| DstMem
| SrcNone
| ModRM
| Mov
)),
4841 I(Stack
| Src2FS
, em_push_sreg
), I(Stack
| Src2FS
, em_pop_sreg
),
4842 II(ImplicitOps
, em_cpuid
, cpuid
),
4843 F(DstMem
| SrcReg
| ModRM
| BitOp
| NoWrite
, em_bt
),
4844 F(DstMem
| SrcReg
| Src2ImmByte
| ModRM
, em_shld
),
4845 F(DstMem
| SrcReg
| Src2CL
| ModRM
, em_shld
), N
, N
,
4847 I(Stack
| Src2GS
, em_push_sreg
), I(Stack
| Src2GS
, em_pop_sreg
),
4848 II(EmulateOnUD
| ImplicitOps
, em_rsm
, rsm
),
4849 F(DstMem
| SrcReg
| ModRM
| BitOp
| Lock
| PageTable
, em_bts
),
4850 F(DstMem
| SrcReg
| Src2ImmByte
| ModRM
, em_shrd
),
4851 F(DstMem
| SrcReg
| Src2CL
| ModRM
, em_shrd
),
4852 GD(0, &group15
), F(DstReg
| SrcMem
| ModRM
, em_imul
),
4854 I2bv(DstMem
| SrcReg
| ModRM
| Lock
| PageTable
| SrcWrite
, em_cmpxchg
),
4855 I(DstReg
| SrcMemFAddr
| ModRM
| Src2SS
, em_lseg
),
4856 F(DstMem
| SrcReg
| ModRM
| BitOp
| Lock
, em_btr
),
4857 I(DstReg
| SrcMemFAddr
| ModRM
| Src2FS
, em_lseg
),
4858 I(DstReg
| SrcMemFAddr
| ModRM
| Src2GS
, em_lseg
),
4859 D(DstReg
| SrcMem8
| ModRM
| Mov
), D(DstReg
| SrcMem16
| ModRM
| Mov
),
4863 F(DstMem
| SrcReg
| ModRM
| BitOp
| Lock
| PageTable
, em_btc
),
4864 I(DstReg
| SrcMem
| ModRM
, em_bsf_c
),
4865 I(DstReg
| SrcMem
| ModRM
, em_bsr_c
),
4866 D(DstReg
| SrcMem8
| ModRM
| Mov
), D(DstReg
| SrcMem16
| ModRM
| Mov
),
4868 F2bv(DstMem
| SrcReg
| ModRM
| SrcWrite
| Lock
, em_xadd
),
4869 N
, ID(0, &instr_dual_0f_c3
),
4870 N
, N
, N
, GD(0, &group9
),
4872 X8(I(DstReg
, em_bswap
)),
4874 N
, N
, N
, N
, N
, N
, N
, N
, N
, N
, N
, N
, N
, N
, N
, N
,
4876 N
, N
, N
, N
, N
, N
, N
, GP(SrcReg
| DstMem
| ModRM
| Mov
, &pfx_0f_e7
),
4877 N
, N
, N
, N
, N
, N
, N
, N
,
4879 N
, N
, N
, N
, N
, N
, N
, N
, N
, N
, N
, N
, N
, N
, N
, N
4882 static const struct instr_dual instr_dual_0f_38_f0
= {
4883 I(DstReg
| SrcMem
| Mov
, em_movbe
), N
4886 static const struct instr_dual instr_dual_0f_38_f1
= {
4887 I(DstMem
| SrcReg
| Mov
, em_movbe
), N
4890 static const struct gprefix three_byte_0f_38_f0
= {
4891 ID(0, &instr_dual_0f_38_f0
), N
, N
, N
4894 static const struct gprefix three_byte_0f_38_f1
= {
4895 ID(0, &instr_dual_0f_38_f1
), N
, N
, N
4899 * Insns below are selected by the prefix which indexed by the third opcode
4902 static const struct opcode opcode_map_0f_38
[256] = {
4904 X16(N
), X16(N
), X16(N
), X16(N
), X16(N
), X16(N
), X16(N
), X16(N
),
4906 X16(N
), X16(N
), X16(N
), X16(N
), X16(N
), X16(N
), X16(N
),
4908 GP(EmulateOnUD
| ModRM
, &three_byte_0f_38_f0
),
4909 GP(EmulateOnUD
| ModRM
, &three_byte_0f_38_f1
),
4930 static unsigned imm_size(struct x86_emulate_ctxt
*ctxt
)
4934 size
= (ctxt
->d
& ByteOp
) ? 1 : ctxt
->op_bytes
;
4940 static int decode_imm(struct x86_emulate_ctxt
*ctxt
, struct operand
*op
,
4941 unsigned size
, bool sign_extension
)
4943 int rc
= X86EMUL_CONTINUE
;
4947 op
->addr
.mem
.ea
= ctxt
->_eip
;
4948 /* NB. Immediates are sign-extended as necessary. */
4949 switch (op
->bytes
) {
4951 op
->val
= insn_fetch(s8
, ctxt
);
4954 op
->val
= insn_fetch(s16
, ctxt
);
4957 op
->val
= insn_fetch(s32
, ctxt
);
4960 op
->val
= insn_fetch(s64
, ctxt
);
4963 if (!sign_extension
) {
4964 switch (op
->bytes
) {
4972 op
->val
&= 0xffffffff;
4980 static int decode_operand(struct x86_emulate_ctxt
*ctxt
, struct operand
*op
,
4983 int rc
= X86EMUL_CONTINUE
;
4987 decode_register_operand(ctxt
, op
);
4990 rc
= decode_imm(ctxt
, op
, 1, false);
4993 ctxt
->memop
.bytes
= (ctxt
->d
& ByteOp
) ? 1 : ctxt
->op_bytes
;
4997 if (ctxt
->d
& BitOp
)
4998 fetch_bit_operand(ctxt
);
4999 op
->orig_val
= op
->val
;
5002 ctxt
->memop
.bytes
= (ctxt
->op_bytes
== 8) ? 16 : 8;
5006 op
->bytes
= (ctxt
->d
& ByteOp
) ? 1 : ctxt
->op_bytes
;
5007 op
->addr
.reg
= reg_rmw(ctxt
, VCPU_REGS_RAX
);
5008 fetch_register_operand(op
);
5009 op
->orig_val
= op
->val
;
5013 op
->bytes
= (ctxt
->d
& ByteOp
) ? 2 : ctxt
->op_bytes
;
5014 op
->addr
.reg
= reg_rmw(ctxt
, VCPU_REGS_RAX
);
5015 fetch_register_operand(op
);
5016 op
->orig_val
= op
->val
;
5019 if (ctxt
->d
& ByteOp
) {
5024 op
->bytes
= ctxt
->op_bytes
;
5025 op
->addr
.reg
= reg_rmw(ctxt
, VCPU_REGS_RDX
);
5026 fetch_register_operand(op
);
5027 op
->orig_val
= op
->val
;
5031 op
->bytes
= (ctxt
->d
& ByteOp
) ? 1 : ctxt
->op_bytes
;
5033 register_address(ctxt
, VCPU_REGS_RDI
);
5034 op
->addr
.mem
.seg
= VCPU_SREG_ES
;
5041 op
->addr
.reg
= reg_rmw(ctxt
, VCPU_REGS_RDX
);
5042 fetch_register_operand(op
);
5047 op
->val
= reg_read(ctxt
, VCPU_REGS_RCX
) & 0xff;
5050 rc
= decode_imm(ctxt
, op
, 1, true);
5058 rc
= decode_imm(ctxt
, op
, imm_size(ctxt
), true);
5061 rc
= decode_imm(ctxt
, op
, ctxt
->op_bytes
, true);
5064 ctxt
->memop
.bytes
= 1;
5065 if (ctxt
->memop
.type
== OP_REG
) {
5066 ctxt
->memop
.addr
.reg
= decode_register(ctxt
,
5067 ctxt
->modrm_rm
, true);
5068 fetch_register_operand(&ctxt
->memop
);
5072 ctxt
->memop
.bytes
= 2;
5075 ctxt
->memop
.bytes
= 4;
5078 rc
= decode_imm(ctxt
, op
, 2, false);
5081 rc
= decode_imm(ctxt
, op
, imm_size(ctxt
), false);
5085 op
->bytes
= (ctxt
->d
& ByteOp
) ? 1 : ctxt
->op_bytes
;
5087 register_address(ctxt
, VCPU_REGS_RSI
);
5088 op
->addr
.mem
.seg
= ctxt
->seg_override
;
5094 op
->bytes
= (ctxt
->d
& ByteOp
) ? 1 : ctxt
->op_bytes
;
5097 reg_read(ctxt
, VCPU_REGS_RBX
) +
5098 (reg_read(ctxt
, VCPU_REGS_RAX
) & 0xff));
5099 op
->addr
.mem
.seg
= ctxt
->seg_override
;
5104 op
->addr
.mem
.ea
= ctxt
->_eip
;
5105 op
->bytes
= ctxt
->op_bytes
+ 2;
5106 insn_fetch_arr(op
->valptr
, op
->bytes
, ctxt
);
5109 ctxt
->memop
.bytes
= ctxt
->op_bytes
+ 2;
5113 op
->val
= VCPU_SREG_ES
;
5117 op
->val
= VCPU_SREG_CS
;
5121 op
->val
= VCPU_SREG_SS
;
5125 op
->val
= VCPU_SREG_DS
;
5129 op
->val
= VCPU_SREG_FS
;
5133 op
->val
= VCPU_SREG_GS
;
5136 /* Special instructions do their own operand decoding. */
5138 op
->type
= OP_NONE
; /* Disable writeback. */
5146 int x86_decode_insn(struct x86_emulate_ctxt
*ctxt
, void *insn
, int insn_len
)
5148 int rc
= X86EMUL_CONTINUE
;
5149 int mode
= ctxt
->mode
;
5150 int def_op_bytes
, def_ad_bytes
, goffset
, simd_prefix
;
5151 bool op_prefix
= false;
5152 bool has_seg_override
= false;
5153 struct opcode opcode
;
5155 struct desc_struct desc
;
5157 ctxt
->memop
.type
= OP_NONE
;
5158 ctxt
->memopp
= NULL
;
5159 ctxt
->_eip
= ctxt
->eip
;
5160 ctxt
->fetch
.ptr
= ctxt
->fetch
.data
;
5161 ctxt
->fetch
.end
= ctxt
->fetch
.data
+ insn_len
;
5162 ctxt
->opcode_len
= 1;
5164 memcpy(ctxt
->fetch
.data
, insn
, insn_len
);
5166 rc
= __do_insn_fetch_bytes(ctxt
, 1);
5167 if (rc
!= X86EMUL_CONTINUE
)
5172 case X86EMUL_MODE_REAL
:
5173 case X86EMUL_MODE_VM86
:
5174 def_op_bytes
= def_ad_bytes
= 2;
5175 ctxt
->ops
->get_segment(ctxt
, &dummy
, &desc
, NULL
, VCPU_SREG_CS
);
5177 def_op_bytes
= def_ad_bytes
= 4;
5179 case X86EMUL_MODE_PROT16
:
5180 def_op_bytes
= def_ad_bytes
= 2;
5182 case X86EMUL_MODE_PROT32
:
5183 def_op_bytes
= def_ad_bytes
= 4;
5185 #ifdef CONFIG_X86_64
5186 case X86EMUL_MODE_PROT64
:
5192 return EMULATION_FAILED
;
5195 ctxt
->op_bytes
= def_op_bytes
;
5196 ctxt
->ad_bytes
= def_ad_bytes
;
5198 /* Legacy prefixes. */
5200 switch (ctxt
->b
= insn_fetch(u8
, ctxt
)) {
5201 case 0x66: /* operand-size override */
5203 /* switch between 2/4 bytes */
5204 ctxt
->op_bytes
= def_op_bytes
^ 6;
5206 case 0x67: /* address-size override */
5207 if (mode
== X86EMUL_MODE_PROT64
)
5208 /* switch between 4/8 bytes */
5209 ctxt
->ad_bytes
= def_ad_bytes
^ 12;
5211 /* switch between 2/4 bytes */
5212 ctxt
->ad_bytes
= def_ad_bytes
^ 6;
5214 case 0x26: /* ES override */
5215 case 0x2e: /* CS override */
5216 case 0x36: /* SS override */
5217 case 0x3e: /* DS override */
5218 has_seg_override
= true;
5219 ctxt
->seg_override
= (ctxt
->b
>> 3) & 3;
5221 case 0x64: /* FS override */
5222 case 0x65: /* GS override */
5223 has_seg_override
= true;
5224 ctxt
->seg_override
= ctxt
->b
& 7;
5226 case 0x40 ... 0x4f: /* REX */
5227 if (mode
!= X86EMUL_MODE_PROT64
)
5229 ctxt
->rex_prefix
= ctxt
->b
;
5231 case 0xf0: /* LOCK */
5232 ctxt
->lock_prefix
= 1;
5234 case 0xf2: /* REPNE/REPNZ */
5235 case 0xf3: /* REP/REPE/REPZ */
5236 ctxt
->rep_prefix
= ctxt
->b
;
5242 /* Any legacy prefix after a REX prefix nullifies its effect. */
5244 ctxt
->rex_prefix
= 0;
5250 if (ctxt
->rex_prefix
& 8)
5251 ctxt
->op_bytes
= 8; /* REX.W */
5253 /* Opcode byte(s). */
5254 opcode
= opcode_table
[ctxt
->b
];
5255 /* Two-byte opcode? */
5256 if (ctxt
->b
== 0x0f) {
5257 ctxt
->opcode_len
= 2;
5258 ctxt
->b
= insn_fetch(u8
, ctxt
);
5259 opcode
= twobyte_table
[ctxt
->b
];
5261 /* 0F_38 opcode map */
5262 if (ctxt
->b
== 0x38) {
5263 ctxt
->opcode_len
= 3;
5264 ctxt
->b
= insn_fetch(u8
, ctxt
);
5265 opcode
= opcode_map_0f_38
[ctxt
->b
];
5268 ctxt
->d
= opcode
.flags
;
5270 if (ctxt
->d
& ModRM
)
5271 ctxt
->modrm
= insn_fetch(u8
, ctxt
);
5273 /* vex-prefix instructions are not implemented */
5274 if (ctxt
->opcode_len
== 1 && (ctxt
->b
== 0xc5 || ctxt
->b
== 0xc4) &&
5275 (mode
== X86EMUL_MODE_PROT64
|| (ctxt
->modrm
& 0xc0) == 0xc0)) {
5279 while (ctxt
->d
& GroupMask
) {
5280 switch (ctxt
->d
& GroupMask
) {
5282 goffset
= (ctxt
->modrm
>> 3) & 7;
5283 opcode
= opcode
.u
.group
[goffset
];
5286 goffset
= (ctxt
->modrm
>> 3) & 7;
5287 if ((ctxt
->modrm
>> 6) == 3)
5288 opcode
= opcode
.u
.gdual
->mod3
[goffset
];
5290 opcode
= opcode
.u
.gdual
->mod012
[goffset
];
5293 goffset
= ctxt
->modrm
& 7;
5294 opcode
= opcode
.u
.group
[goffset
];
5297 if (ctxt
->rep_prefix
&& op_prefix
)
5298 return EMULATION_FAILED
;
5299 simd_prefix
= op_prefix
? 0x66 : ctxt
->rep_prefix
;
5300 switch (simd_prefix
) {
5301 case 0x00: opcode
= opcode
.u
.gprefix
->pfx_no
; break;
5302 case 0x66: opcode
= opcode
.u
.gprefix
->pfx_66
; break;
5303 case 0xf2: opcode
= opcode
.u
.gprefix
->pfx_f2
; break;
5304 case 0xf3: opcode
= opcode
.u
.gprefix
->pfx_f3
; break;
5308 if (ctxt
->modrm
> 0xbf)
5309 opcode
= opcode
.u
.esc
->high
[ctxt
->modrm
- 0xc0];
5311 opcode
= opcode
.u
.esc
->op
[(ctxt
->modrm
>> 3) & 7];
5314 if ((ctxt
->modrm
>> 6) == 3)
5315 opcode
= opcode
.u
.idual
->mod3
;
5317 opcode
= opcode
.u
.idual
->mod012
;
5320 if (ctxt
->mode
== X86EMUL_MODE_PROT64
)
5321 opcode
= opcode
.u
.mdual
->mode64
;
5323 opcode
= opcode
.u
.mdual
->mode32
;
5326 return EMULATION_FAILED
;
5329 ctxt
->d
&= ~(u64
)GroupMask
;
5330 ctxt
->d
|= opcode
.flags
;
5335 return EMULATION_FAILED
;
5337 ctxt
->execute
= opcode
.u
.execute
;
5339 if (unlikely(ctxt
->ud
) && likely(!(ctxt
->d
& EmulateOnUD
)))
5340 return EMULATION_FAILED
;
5342 if (unlikely(ctxt
->d
&
5343 (NotImpl
|Stack
|Op3264
|Sse
|Mmx
|Intercept
|CheckPerm
|NearBranch
|
5346 * These are copied unconditionally here, and checked unconditionally
5347 * in x86_emulate_insn.
5349 ctxt
->check_perm
= opcode
.check_perm
;
5350 ctxt
->intercept
= opcode
.intercept
;
5352 if (ctxt
->d
& NotImpl
)
5353 return EMULATION_FAILED
;
5355 if (mode
== X86EMUL_MODE_PROT64
) {
5356 if (ctxt
->op_bytes
== 4 && (ctxt
->d
& Stack
))
5358 else if (ctxt
->d
& NearBranch
)
5362 if (ctxt
->d
& Op3264
) {
5363 if (mode
== X86EMUL_MODE_PROT64
)
5369 if ((ctxt
->d
& No16
) && ctxt
->op_bytes
== 2)
5373 ctxt
->op_bytes
= 16;
5374 else if (ctxt
->d
& Mmx
)
5378 /* ModRM and SIB bytes. */
5379 if (ctxt
->d
& ModRM
) {
5380 rc
= decode_modrm(ctxt
, &ctxt
->memop
);
5381 if (!has_seg_override
) {
5382 has_seg_override
= true;
5383 ctxt
->seg_override
= ctxt
->modrm_seg
;
5385 } else if (ctxt
->d
& MemAbs
)
5386 rc
= decode_abs(ctxt
, &ctxt
->memop
);
5387 if (rc
!= X86EMUL_CONTINUE
)
5390 if (!has_seg_override
)
5391 ctxt
->seg_override
= VCPU_SREG_DS
;
5393 ctxt
->memop
.addr
.mem
.seg
= ctxt
->seg_override
;
5396 * Decode and fetch the source operand: register, memory
5399 rc
= decode_operand(ctxt
, &ctxt
->src
, (ctxt
->d
>> SrcShift
) & OpMask
);
5400 if (rc
!= X86EMUL_CONTINUE
)
5404 * Decode and fetch the second source operand: register, memory
5407 rc
= decode_operand(ctxt
, &ctxt
->src2
, (ctxt
->d
>> Src2Shift
) & OpMask
);
5408 if (rc
!= X86EMUL_CONTINUE
)
5411 /* Decode and fetch the destination operand: register or memory. */
5412 rc
= decode_operand(ctxt
, &ctxt
->dst
, (ctxt
->d
>> DstShift
) & OpMask
);
5414 if (ctxt
->rip_relative
&& likely(ctxt
->memopp
))
5415 ctxt
->memopp
->addr
.mem
.ea
= address_mask(ctxt
,
5416 ctxt
->memopp
->addr
.mem
.ea
+ ctxt
->_eip
);
5419 if (rc
== X86EMUL_PROPAGATE_FAULT
)
5420 ctxt
->have_exception
= true;
5421 return (rc
!= X86EMUL_CONTINUE
) ? EMULATION_FAILED
: EMULATION_OK
;
5424 bool x86_page_table_writing_insn(struct x86_emulate_ctxt
*ctxt
)
5426 return ctxt
->d
& PageTable
;
5429 static bool string_insn_completed(struct x86_emulate_ctxt
*ctxt
)
5431 /* The second termination condition only applies for REPE
5432 * and REPNE. Test if the repeat string operation prefix is
5433 * REPE/REPZ or REPNE/REPNZ and if it's the case it tests the
5434 * corresponding termination condition according to:
5435 * - if REPE/REPZ and ZF = 0 then done
5436 * - if REPNE/REPNZ and ZF = 1 then done
5438 if (((ctxt
->b
== 0xa6) || (ctxt
->b
== 0xa7) ||
5439 (ctxt
->b
== 0xae) || (ctxt
->b
== 0xaf))
5440 && (((ctxt
->rep_prefix
== REPE_PREFIX
) &&
5441 ((ctxt
->eflags
& X86_EFLAGS_ZF
) == 0))
5442 || ((ctxt
->rep_prefix
== REPNE_PREFIX
) &&
5443 ((ctxt
->eflags
& X86_EFLAGS_ZF
) == X86_EFLAGS_ZF
))))
5449 static int flush_pending_x87_faults(struct x86_emulate_ctxt
*ctxt
)
5453 rc
= asm_safe("fwait");
5455 if (unlikely(rc
!= X86EMUL_CONTINUE
))
5456 return emulate_exception(ctxt
, MF_VECTOR
, 0, false);
5458 return X86EMUL_CONTINUE
;
5461 static void fetch_possible_mmx_operand(struct x86_emulate_ctxt
*ctxt
,
5464 if (op
->type
== OP_MM
)
5465 read_mmx_reg(ctxt
, &op
->mm_val
, op
->addr
.mm
);
5468 static int fastop(struct x86_emulate_ctxt
*ctxt
, void (*fop
)(struct fastop
*))
5470 ulong flags
= (ctxt
->eflags
& EFLAGS_MASK
) | X86_EFLAGS_IF
;
5472 if (!(ctxt
->d
& ByteOp
))
5473 fop
+= __ffs(ctxt
->dst
.bytes
) * FASTOP_SIZE
;
5475 asm("push %[flags]; popf; " CALL_NOSPEC
" ; pushf; pop %[flags]\n"
5476 : "+a"(ctxt
->dst
.val
), "+d"(ctxt
->src
.val
), [flags
]"+D"(flags
),
5477 [thunk_target
]"+S"(fop
), ASM_CALL_CONSTRAINT
5478 : "c"(ctxt
->src2
.val
));
5480 ctxt
->eflags
= (ctxt
->eflags
& ~EFLAGS_MASK
) | (flags
& EFLAGS_MASK
);
5481 if (!fop
) /* exception is returned in fop variable */
5482 return emulate_de(ctxt
);
5483 return X86EMUL_CONTINUE
;
5486 void init_decode_cache(struct x86_emulate_ctxt
*ctxt
)
5488 memset(&ctxt
->rip_relative
, 0,
5489 (void *)&ctxt
->modrm
- (void *)&ctxt
->rip_relative
);
5491 ctxt
->io_read
.pos
= 0;
5492 ctxt
->io_read
.end
= 0;
5493 ctxt
->mem_read
.end
= 0;
5496 int x86_emulate_insn(struct x86_emulate_ctxt
*ctxt
)
5498 const struct x86_emulate_ops
*ops
= ctxt
->ops
;
5499 int rc
= X86EMUL_CONTINUE
;
5500 int saved_dst_type
= ctxt
->dst
.type
;
5501 unsigned emul_flags
;
5503 ctxt
->mem_read
.pos
= 0;
5505 /* LOCK prefix is allowed only with some instructions */
5506 if (ctxt
->lock_prefix
&& (!(ctxt
->d
& Lock
) || ctxt
->dst
.type
!= OP_MEM
)) {
5507 rc
= emulate_ud(ctxt
);
5511 if ((ctxt
->d
& SrcMask
) == SrcMemFAddr
&& ctxt
->src
.type
!= OP_MEM
) {
5512 rc
= emulate_ud(ctxt
);
5516 emul_flags
= ctxt
->ops
->get_hflags(ctxt
);
5517 if (unlikely(ctxt
->d
&
5518 (No64
|Undefined
|Sse
|Mmx
|Intercept
|CheckPerm
|Priv
|Prot
|String
))) {
5519 if ((ctxt
->mode
== X86EMUL_MODE_PROT64
&& (ctxt
->d
& No64
)) ||
5520 (ctxt
->d
& Undefined
)) {
5521 rc
= emulate_ud(ctxt
);
5525 if (((ctxt
->d
& (Sse
|Mmx
)) && ((ops
->get_cr(ctxt
, 0) & X86_CR0_EM
)))
5526 || ((ctxt
->d
& Sse
) && !(ops
->get_cr(ctxt
, 4) & X86_CR4_OSFXSR
))) {
5527 rc
= emulate_ud(ctxt
);
5531 if ((ctxt
->d
& (Sse
|Mmx
)) && (ops
->get_cr(ctxt
, 0) & X86_CR0_TS
)) {
5532 rc
= emulate_nm(ctxt
);
5536 if (ctxt
->d
& Mmx
) {
5537 rc
= flush_pending_x87_faults(ctxt
);
5538 if (rc
!= X86EMUL_CONTINUE
)
5541 * Now that we know the fpu is exception safe, we can fetch
5544 fetch_possible_mmx_operand(ctxt
, &ctxt
->src
);
5545 fetch_possible_mmx_operand(ctxt
, &ctxt
->src2
);
5546 if (!(ctxt
->d
& Mov
))
5547 fetch_possible_mmx_operand(ctxt
, &ctxt
->dst
);
5550 if (unlikely(emul_flags
& X86EMUL_GUEST_MASK
) && ctxt
->intercept
) {
5551 rc
= emulator_check_intercept(ctxt
, ctxt
->intercept
,
5552 X86_ICPT_PRE_EXCEPT
);
5553 if (rc
!= X86EMUL_CONTINUE
)
5557 /* Instruction can only be executed in protected mode */
5558 if ((ctxt
->d
& Prot
) && ctxt
->mode
< X86EMUL_MODE_PROT16
) {
5559 rc
= emulate_ud(ctxt
);
5563 /* Privileged instruction can be executed only in CPL=0 */
5564 if ((ctxt
->d
& Priv
) && ops
->cpl(ctxt
)) {
5565 if (ctxt
->d
& PrivUD
)
5566 rc
= emulate_ud(ctxt
);
5568 rc
= emulate_gp(ctxt
, 0);
5572 /* Do instruction specific permission checks */
5573 if (ctxt
->d
& CheckPerm
) {
5574 rc
= ctxt
->check_perm(ctxt
);
5575 if (rc
!= X86EMUL_CONTINUE
)
5579 if (unlikely(emul_flags
& X86EMUL_GUEST_MASK
) && (ctxt
->d
& Intercept
)) {
5580 rc
= emulator_check_intercept(ctxt
, ctxt
->intercept
,
5581 X86_ICPT_POST_EXCEPT
);
5582 if (rc
!= X86EMUL_CONTINUE
)
5586 if (ctxt
->rep_prefix
&& (ctxt
->d
& String
)) {
5587 /* All REP prefixes have the same first termination condition */
5588 if (address_mask(ctxt
, reg_read(ctxt
, VCPU_REGS_RCX
)) == 0) {
5589 string_registers_quirk(ctxt
);
5590 ctxt
->eip
= ctxt
->_eip
;
5591 ctxt
->eflags
&= ~X86_EFLAGS_RF
;
5597 if ((ctxt
->src
.type
== OP_MEM
) && !(ctxt
->d
& NoAccess
)) {
5598 rc
= segmented_read(ctxt
, ctxt
->src
.addr
.mem
,
5599 ctxt
->src
.valptr
, ctxt
->src
.bytes
);
5600 if (rc
!= X86EMUL_CONTINUE
)
5602 ctxt
->src
.orig_val64
= ctxt
->src
.val64
;
5605 if (ctxt
->src2
.type
== OP_MEM
) {
5606 rc
= segmented_read(ctxt
, ctxt
->src2
.addr
.mem
,
5607 &ctxt
->src2
.val
, ctxt
->src2
.bytes
);
5608 if (rc
!= X86EMUL_CONTINUE
)
5612 if ((ctxt
->d
& DstMask
) == ImplicitOps
)
5616 if ((ctxt
->dst
.type
== OP_MEM
) && !(ctxt
->d
& Mov
)) {
5617 /* optimisation - avoid slow emulated read if Mov */
5618 rc
= segmented_read(ctxt
, ctxt
->dst
.addr
.mem
,
5619 &ctxt
->dst
.val
, ctxt
->dst
.bytes
);
5620 if (rc
!= X86EMUL_CONTINUE
) {
5621 if (!(ctxt
->d
& NoWrite
) &&
5622 rc
== X86EMUL_PROPAGATE_FAULT
&&
5623 ctxt
->exception
.vector
== PF_VECTOR
)
5624 ctxt
->exception
.error_code
|= PFERR_WRITE_MASK
;
5628 /* Copy full 64-bit value for CMPXCHG8B. */
5629 ctxt
->dst
.orig_val64
= ctxt
->dst
.val64
;
5633 if (unlikely(emul_flags
& X86EMUL_GUEST_MASK
) && (ctxt
->d
& Intercept
)) {
5634 rc
= emulator_check_intercept(ctxt
, ctxt
->intercept
,
5635 X86_ICPT_POST_MEMACCESS
);
5636 if (rc
!= X86EMUL_CONTINUE
)
5640 if (ctxt
->rep_prefix
&& (ctxt
->d
& String
))
5641 ctxt
->eflags
|= X86_EFLAGS_RF
;
5643 ctxt
->eflags
&= ~X86_EFLAGS_RF
;
5645 if (ctxt
->execute
) {
5646 if (ctxt
->d
& Fastop
) {
5647 void (*fop
)(struct fastop
*) = (void *)ctxt
->execute
;
5648 rc
= fastop(ctxt
, fop
);
5649 if (rc
!= X86EMUL_CONTINUE
)
5653 rc
= ctxt
->execute(ctxt
);
5654 if (rc
!= X86EMUL_CONTINUE
)
5659 if (ctxt
->opcode_len
== 2)
5661 else if (ctxt
->opcode_len
== 3)
5662 goto threebyte_insn
;
5665 case 0x70 ... 0x7f: /* jcc (short) */
5666 if (test_cc(ctxt
->b
, ctxt
->eflags
))
5667 rc
= jmp_rel(ctxt
, ctxt
->src
.val
);
5669 case 0x8d: /* lea r16/r32, m */
5670 ctxt
->dst
.val
= ctxt
->src
.addr
.mem
.ea
;
5672 case 0x90 ... 0x97: /* nop / xchg reg, rax */
5673 if (ctxt
->dst
.addr
.reg
== reg_rmw(ctxt
, VCPU_REGS_RAX
))
5674 ctxt
->dst
.type
= OP_NONE
;
5678 case 0x98: /* cbw/cwde/cdqe */
5679 switch (ctxt
->op_bytes
) {
5680 case 2: ctxt
->dst
.val
= (s8
)ctxt
->dst
.val
; break;
5681 case 4: ctxt
->dst
.val
= (s16
)ctxt
->dst
.val
; break;
5682 case 8: ctxt
->dst
.val
= (s32
)ctxt
->dst
.val
; break;
5685 case 0xcc: /* int3 */
5686 rc
= emulate_int(ctxt
, 3);
5688 case 0xcd: /* int n */
5689 rc
= emulate_int(ctxt
, ctxt
->src
.val
);
5691 case 0xce: /* into */
5692 if (ctxt
->eflags
& X86_EFLAGS_OF
)
5693 rc
= emulate_int(ctxt
, 4);
5695 case 0xe9: /* jmp rel */
5696 case 0xeb: /* jmp rel short */
5697 rc
= jmp_rel(ctxt
, ctxt
->src
.val
);
5698 ctxt
->dst
.type
= OP_NONE
; /* Disable writeback. */
5700 case 0xf4: /* hlt */
5701 ctxt
->ops
->halt(ctxt
);
5703 case 0xf5: /* cmc */
5704 /* complement carry flag from eflags reg */
5705 ctxt
->eflags
^= X86_EFLAGS_CF
;
5707 case 0xf8: /* clc */
5708 ctxt
->eflags
&= ~X86_EFLAGS_CF
;
5710 case 0xf9: /* stc */
5711 ctxt
->eflags
|= X86_EFLAGS_CF
;
5713 case 0xfc: /* cld */
5714 ctxt
->eflags
&= ~X86_EFLAGS_DF
;
5716 case 0xfd: /* std */
5717 ctxt
->eflags
|= X86_EFLAGS_DF
;
5720 goto cannot_emulate
;
5723 if (rc
!= X86EMUL_CONTINUE
)
5727 if (ctxt
->d
& SrcWrite
) {
5728 BUG_ON(ctxt
->src
.type
== OP_MEM
|| ctxt
->src
.type
== OP_MEM_STR
);
5729 rc
= writeback(ctxt
, &ctxt
->src
);
5730 if (rc
!= X86EMUL_CONTINUE
)
5733 if (!(ctxt
->d
& NoWrite
)) {
5734 rc
= writeback(ctxt
, &ctxt
->dst
);
5735 if (rc
!= X86EMUL_CONTINUE
)
5740 * restore dst type in case the decoding will be reused
5741 * (happens for string instruction )
5743 ctxt
->dst
.type
= saved_dst_type
;
5745 if ((ctxt
->d
& SrcMask
) == SrcSI
)
5746 string_addr_inc(ctxt
, VCPU_REGS_RSI
, &ctxt
->src
);
5748 if ((ctxt
->d
& DstMask
) == DstDI
)
5749 string_addr_inc(ctxt
, VCPU_REGS_RDI
, &ctxt
->dst
);
5751 if (ctxt
->rep_prefix
&& (ctxt
->d
& String
)) {
5753 struct read_cache
*r
= &ctxt
->io_read
;
5754 if ((ctxt
->d
& SrcMask
) == SrcSI
)
5755 count
= ctxt
->src
.count
;
5757 count
= ctxt
->dst
.count
;
5758 register_address_increment(ctxt
, VCPU_REGS_RCX
, -count
);
5760 if (!string_insn_completed(ctxt
)) {
5762 * Re-enter guest when pio read ahead buffer is empty
5763 * or, if it is not used, after each 1024 iteration.
5765 if ((r
->end
!= 0 || reg_read(ctxt
, VCPU_REGS_RCX
) & 0x3ff) &&
5766 (r
->end
== 0 || r
->end
!= r
->pos
)) {
5768 * Reset read cache. Usually happens before
5769 * decode, but since instruction is restarted
5770 * we have to do it here.
5772 ctxt
->mem_read
.end
= 0;
5773 writeback_registers(ctxt
);
5774 return EMULATION_RESTART
;
5776 goto done
; /* skip rip writeback */
5778 ctxt
->eflags
&= ~X86_EFLAGS_RF
;
5781 ctxt
->eip
= ctxt
->_eip
;
5784 if (rc
== X86EMUL_PROPAGATE_FAULT
) {
5785 WARN_ON(ctxt
->exception
.vector
> 0x1f);
5786 ctxt
->have_exception
= true;
5788 if (rc
== X86EMUL_INTERCEPTED
)
5789 return EMULATION_INTERCEPTED
;
5791 if (rc
== X86EMUL_CONTINUE
)
5792 writeback_registers(ctxt
);
5794 return (rc
== X86EMUL_UNHANDLEABLE
) ? EMULATION_FAILED
: EMULATION_OK
;
5798 case 0x09: /* wbinvd */
5799 (ctxt
->ops
->wbinvd
)(ctxt
);
5801 case 0x08: /* invd */
5802 case 0x0d: /* GrpP (prefetch) */
5803 case 0x18: /* Grp16 (prefetch/nop) */
5804 case 0x1f: /* nop */
5806 case 0x20: /* mov cr, reg */
5807 ctxt
->dst
.val
= ops
->get_cr(ctxt
, ctxt
->modrm_reg
);
5809 case 0x21: /* mov from dr to reg */
5810 ops
->get_dr(ctxt
, ctxt
->modrm_reg
, &ctxt
->dst
.val
);
5812 case 0x40 ... 0x4f: /* cmov */
5813 if (test_cc(ctxt
->b
, ctxt
->eflags
))
5814 ctxt
->dst
.val
= ctxt
->src
.val
;
5815 else if (ctxt
->op_bytes
!= 4)
5816 ctxt
->dst
.type
= OP_NONE
; /* no writeback */
5818 case 0x80 ... 0x8f: /* jnz rel, etc*/
5819 if (test_cc(ctxt
->b
, ctxt
->eflags
))
5820 rc
= jmp_rel(ctxt
, ctxt
->src
.val
);
5822 case 0x90 ... 0x9f: /* setcc r/m8 */
5823 ctxt
->dst
.val
= test_cc(ctxt
->b
, ctxt
->eflags
);
5825 case 0xb6 ... 0xb7: /* movzx */
5826 ctxt
->dst
.bytes
= ctxt
->op_bytes
;
5827 ctxt
->dst
.val
= (ctxt
->src
.bytes
== 1) ? (u8
) ctxt
->src
.val
5828 : (u16
) ctxt
->src
.val
;
5830 case 0xbe ... 0xbf: /* movsx */
5831 ctxt
->dst
.bytes
= ctxt
->op_bytes
;
5832 ctxt
->dst
.val
= (ctxt
->src
.bytes
== 1) ? (s8
) ctxt
->src
.val
:
5833 (s16
) ctxt
->src
.val
;
5836 goto cannot_emulate
;
5841 if (rc
!= X86EMUL_CONTINUE
)
5847 return EMULATION_FAILED
;
5850 void emulator_invalidate_register_cache(struct x86_emulate_ctxt
*ctxt
)
5852 invalidate_registers(ctxt
);
5855 void emulator_writeback_register_cache(struct x86_emulate_ctxt
*ctxt
)
5857 writeback_registers(ctxt
);
5860 bool emulator_can_use_gpa(struct x86_emulate_ctxt
*ctxt
)
5862 if (ctxt
->rep_prefix
&& (ctxt
->d
& String
))
5865 if (ctxt
->d
& TwoMemOp
)