1 // SPDX-License-Identifier: GPL-2.0-only
2 /******************************************************************************
5 * Generic x86 (32-bit and 64-bit) instruction decoder and emulator.
7 * Copyright (c) 2005 Keir Fraser
9 * Linux coding style, mod r/m decoder, segment base fixes, real-mode
10 * privileged instructions:
12 * Copyright (C) 2006 Qumranet
13 * Copyright 2010 Red Hat, Inc. and/or its affiliates.
15 * Avi Kivity <avi@qumranet.com>
16 * Yaniv Kamay <yaniv@qumranet.com>
18 * From: xen-unstable 10676:af9809f51f81a3c43f276f00c81a52ef558afda4
21 #include <linux/kvm_host.h>
22 #include "kvm_cache_regs.h"
23 #include <asm/kvm_emulate.h>
24 #include <linux/stringify.h>
25 #include <asm/fpu/api.h>
26 #include <asm/debugreg.h>
27 #include <asm/nospec-branch.h>
38 #define OpImplicit 1ull /* No generic decode */
39 #define OpReg 2ull /* Register */
40 #define OpMem 3ull /* Memory */
41 #define OpAcc 4ull /* Accumulator: AL/AX/EAX/RAX */
42 #define OpDI 5ull /* ES:DI/EDI/RDI */
43 #define OpMem64 6ull /* Memory, 64-bit */
44 #define OpImmUByte 7ull /* Zero-extended 8-bit immediate */
45 #define OpDX 8ull /* DX register */
46 #define OpCL 9ull /* CL register (for shifts) */
47 #define OpImmByte 10ull /* 8-bit sign extended immediate */
48 #define OpOne 11ull /* Implied 1 */
49 #define OpImm 12ull /* Sign extended up to 32-bit immediate */
50 #define OpMem16 13ull /* Memory operand (16-bit). */
51 #define OpMem32 14ull /* Memory operand (32-bit). */
52 #define OpImmU 15ull /* Immediate operand, zero extended */
53 #define OpSI 16ull /* SI/ESI/RSI */
54 #define OpImmFAddr 17ull /* Immediate far address */
55 #define OpMemFAddr 18ull /* Far address in memory */
56 #define OpImmU16 19ull /* Immediate operand, 16 bits, zero extended */
57 #define OpES 20ull /* ES */
58 #define OpCS 21ull /* CS */
59 #define OpSS 22ull /* SS */
60 #define OpDS 23ull /* DS */
61 #define OpFS 24ull /* FS */
62 #define OpGS 25ull /* GS */
63 #define OpMem8 26ull /* 8-bit zero extended memory operand */
64 #define OpImm64 27ull /* Sign extended 16/32/64-bit immediate */
65 #define OpXLat 28ull /* memory at BX/EBX/RBX + zero-extended AL */
66 #define OpAccLo 29ull /* Low part of extended acc (AX/AX/EAX/RAX) */
67 #define OpAccHi 30ull /* High part of extended acc (-/DX/EDX/RDX) */
69 #define OpBits 5 /* Width of operand field */
70 #define OpMask ((1ull << OpBits) - 1)
73 * Opcode effective-address decode tables.
74 * Note that we only emulate instructions that have at least one memory
75 * operand (excluding implicit stack references). We assume that stack
76 * references and instruction fetches will never occur in special memory
77 * areas that require emulation. So, for example, 'mov <imm>,<reg>' need
81 /* Operand sizes: 8-bit operands or specified/overridden size. */
82 #define ByteOp (1<<0) /* 8-bit operands. */
83 /* Destination operand type. */
85 #define ImplicitOps (OpImplicit << DstShift)
86 #define DstReg (OpReg << DstShift)
87 #define DstMem (OpMem << DstShift)
88 #define DstAcc (OpAcc << DstShift)
89 #define DstDI (OpDI << DstShift)
90 #define DstMem64 (OpMem64 << DstShift)
91 #define DstMem16 (OpMem16 << DstShift)
92 #define DstImmUByte (OpImmUByte << DstShift)
93 #define DstDX (OpDX << DstShift)
94 #define DstAccLo (OpAccLo << DstShift)
95 #define DstMask (OpMask << DstShift)
96 /* Source operand type. */
98 #define SrcNone (OpNone << SrcShift)
99 #define SrcReg (OpReg << SrcShift)
100 #define SrcMem (OpMem << SrcShift)
101 #define SrcMem16 (OpMem16 << SrcShift)
102 #define SrcMem32 (OpMem32 << SrcShift)
103 #define SrcImm (OpImm << SrcShift)
104 #define SrcImmByte (OpImmByte << SrcShift)
105 #define SrcOne (OpOne << SrcShift)
106 #define SrcImmUByte (OpImmUByte << SrcShift)
107 #define SrcImmU (OpImmU << SrcShift)
108 #define SrcSI (OpSI << SrcShift)
109 #define SrcXLat (OpXLat << SrcShift)
110 #define SrcImmFAddr (OpImmFAddr << SrcShift)
111 #define SrcMemFAddr (OpMemFAddr << SrcShift)
112 #define SrcAcc (OpAcc << SrcShift)
113 #define SrcImmU16 (OpImmU16 << SrcShift)
114 #define SrcImm64 (OpImm64 << SrcShift)
115 #define SrcDX (OpDX << SrcShift)
116 #define SrcMem8 (OpMem8 << SrcShift)
117 #define SrcAccHi (OpAccHi << SrcShift)
118 #define SrcMask (OpMask << SrcShift)
119 #define BitOp (1<<11)
120 #define MemAbs (1<<12) /* Memory operand is absolute displacement */
121 #define String (1<<13) /* String instruction (rep capable) */
122 #define Stack (1<<14) /* Stack instruction (push/pop) */
123 #define GroupMask (7<<15) /* Opcode uses one of the group mechanisms */
124 #define Group (1<<15) /* Bits 3:5 of modrm byte extend opcode */
125 #define GroupDual (2<<15) /* Alternate decoding of mod == 3 */
126 #define Prefix (3<<15) /* Instruction varies with 66/f2/f3 prefix */
127 #define RMExt (4<<15) /* Opcode extension in ModRM r/m if mod == 3 */
128 #define Escape (5<<15) /* Escape to coprocessor instruction */
129 #define InstrDual (6<<15) /* Alternate instruction decoding of mod == 3 */
130 #define ModeDual (7<<15) /* Different instruction for 32/64 bit */
131 #define Sse (1<<18) /* SSE Vector instruction */
132 /* Generic ModRM decode. */
133 #define ModRM (1<<19)
134 /* Destination is only written; never read. */
137 #define Prot (1<<21) /* instruction generates #UD if not in prot-mode */
138 #define EmulateOnUD (1<<22) /* Emulate if unsupported by the host */
139 #define NoAccess (1<<23) /* Don't access memory (lea/invlpg/verr etc) */
140 #define Op3264 (1<<24) /* Operand is 64b in long mode, 32b otherwise */
141 #define Undefined (1<<25) /* No Such Instruction */
142 #define Lock (1<<26) /* lock prefix is allowed for the instruction */
143 #define Priv (1<<27) /* instruction generates #GP if current CPL != 0 */
145 #define PageTable (1 << 29) /* instruction used to write page table */
146 #define NotImpl (1 << 30) /* instruction is not implemented */
147 /* Source 2 operand type */
148 #define Src2Shift (31)
149 #define Src2None (OpNone << Src2Shift)
150 #define Src2Mem (OpMem << Src2Shift)
151 #define Src2CL (OpCL << Src2Shift)
152 #define Src2ImmByte (OpImmByte << Src2Shift)
153 #define Src2One (OpOne << Src2Shift)
154 #define Src2Imm (OpImm << Src2Shift)
155 #define Src2ES (OpES << Src2Shift)
156 #define Src2CS (OpCS << Src2Shift)
157 #define Src2SS (OpSS << Src2Shift)
158 #define Src2DS (OpDS << Src2Shift)
159 #define Src2FS (OpFS << Src2Shift)
160 #define Src2GS (OpGS << Src2Shift)
161 #define Src2Mask (OpMask << Src2Shift)
162 #define Mmx ((u64)1 << 40) /* MMX Vector instruction */
163 #define AlignMask ((u64)7 << 41)
164 #define Aligned ((u64)1 << 41) /* Explicitly aligned (e.g. MOVDQA) */
165 #define Unaligned ((u64)2 << 41) /* Explicitly unaligned (e.g. MOVDQU) */
166 #define Avx ((u64)3 << 41) /* Advanced Vector Extensions */
167 #define Aligned16 ((u64)4 << 41) /* Aligned to 16 byte boundary (e.g. FXSAVE) */
168 #define Fastop ((u64)1 << 44) /* Use opcode::u.fastop */
169 #define NoWrite ((u64)1 << 45) /* No writeback */
170 #define SrcWrite ((u64)1 << 46) /* Write back src operand */
171 #define NoMod ((u64)1 << 47) /* Mod field is ignored */
172 #define Intercept ((u64)1 << 48) /* Has valid intercept field */
173 #define CheckPerm ((u64)1 << 49) /* Has valid check_perm field */
174 #define PrivUD ((u64)1 << 51) /* #UD instead of #GP on CPL > 0 */
175 #define NearBranch ((u64)1 << 52) /* Near branches */
176 #define No16 ((u64)1 << 53) /* No 16 bit operand */
177 #define IncSP ((u64)1 << 54) /* SP is incremented before ModRM calc */
178 #define TwoMemOp ((u64)1 << 55) /* Instruction has two memory operand */
180 #define DstXacc (DstAccLo | SrcAccHi | SrcWrite)
182 #define X2(x...) x, x
183 #define X3(x...) X2(x), x
184 #define X4(x...) X2(x), X2(x)
185 #define X5(x...) X4(x), x
186 #define X6(x...) X4(x), X2(x)
187 #define X7(x...) X4(x), X3(x)
188 #define X8(x...) X4(x), X4(x)
189 #define X16(x...) X8(x), X8(x)
191 #define NR_FASTOP (ilog2(sizeof(ulong)) + 1)
192 #define FASTOP_SIZE 8
195 * fastop functions have a special calling convention:
200 * flags: rflags (in/out)
201 * ex: rsi (in:fastop pointer, out:zero if exception)
203 * Moreover, they are all exactly FASTOP_SIZE bytes long, so functions for
204 * different operand sizes can be reached by calculation, rather than a jump
205 * table (which would be bigger than the code).
207 * fastop functions are declared as taking a never-defined fastop parameter,
208 * so they can't be called from C directly.
217 int (*execute
)(struct x86_emulate_ctxt
*ctxt
);
218 const struct opcode
*group
;
219 const struct group_dual
*gdual
;
220 const struct gprefix
*gprefix
;
221 const struct escape
*esc
;
222 const struct instr_dual
*idual
;
223 const struct mode_dual
*mdual
;
224 void (*fastop
)(struct fastop
*fake
);
226 int (*check_perm
)(struct x86_emulate_ctxt
*ctxt
);
230 struct opcode mod012
[8];
231 struct opcode mod3
[8];
235 struct opcode pfx_no
;
236 struct opcode pfx_66
;
237 struct opcode pfx_f2
;
238 struct opcode pfx_f3
;
243 struct opcode high
[64];
247 struct opcode mod012
;
252 struct opcode mode32
;
253 struct opcode mode64
;
256 #define EFLG_RESERVED_ZEROS_MASK 0xffc0802a
258 enum x86_transfer_type
{
260 X86_TRANSFER_CALL_JMP
,
262 X86_TRANSFER_TASK_SWITCH
,
265 static ulong
reg_read(struct x86_emulate_ctxt
*ctxt
, unsigned nr
)
267 if (!(ctxt
->regs_valid
& (1 << nr
))) {
268 ctxt
->regs_valid
|= 1 << nr
;
269 ctxt
->_regs
[nr
] = ctxt
->ops
->read_gpr(ctxt
, nr
);
271 return ctxt
->_regs
[nr
];
274 static ulong
*reg_write(struct x86_emulate_ctxt
*ctxt
, unsigned nr
)
276 ctxt
->regs_valid
|= 1 << nr
;
277 ctxt
->regs_dirty
|= 1 << nr
;
278 return &ctxt
->_regs
[nr
];
281 static ulong
*reg_rmw(struct x86_emulate_ctxt
*ctxt
, unsigned nr
)
284 return reg_write(ctxt
, nr
);
287 static void writeback_registers(struct x86_emulate_ctxt
*ctxt
)
291 for_each_set_bit(reg
, (ulong
*)&ctxt
->regs_dirty
, 16)
292 ctxt
->ops
->write_gpr(ctxt
, reg
, ctxt
->_regs
[reg
]);
295 static void invalidate_registers(struct x86_emulate_ctxt
*ctxt
)
297 ctxt
->regs_dirty
= 0;
298 ctxt
->regs_valid
= 0;
302 * These EFLAGS bits are restored from saved value during emulation, and
303 * any changes are written back to the saved value after emulation.
305 #define EFLAGS_MASK (X86_EFLAGS_OF|X86_EFLAGS_SF|X86_EFLAGS_ZF|X86_EFLAGS_AF|\
306 X86_EFLAGS_PF|X86_EFLAGS_CF)
314 typedef void (*fastop_t
)(struct fastop
*);
316 static int fastop(struct x86_emulate_ctxt
*ctxt
, fastop_t fop
);
318 #define __FOP_FUNC(name) \
319 ".align " __stringify(FASTOP_SIZE) " \n\t" \
320 ".type " name ", @function \n\t" \
323 #define FOP_FUNC(name) \
326 #define __FOP_RET(name) \
328 ".size " name ", .-" name "\n\t"
330 #define FOP_RET(name) \
333 #define FOP_START(op) \
334 extern void em_##op(struct fastop *fake); \
335 asm(".pushsection .text, \"ax\" \n\t" \
336 ".global em_" #op " \n\t" \
337 ".align " __stringify(FASTOP_SIZE) " \n\t" \
343 #define __FOPNOP(name) \
348 __FOPNOP(__stringify(__UNIQUE_ID(nop)))
350 #define FOP1E(op, dst) \
351 __FOP_FUNC(#op "_" #dst) \
352 "10: " #op " %" #dst " \n\t" \
353 __FOP_RET(#op "_" #dst)
355 #define FOP1EEX(op, dst) \
356 FOP1E(op, dst) _ASM_EXTABLE(10b, kvm_fastop_exception)
358 #define FASTOP1(op) \
363 ON64(FOP1E(op##q, rax)) \
366 /* 1-operand, using src2 (for MUL/DIV r/m) */
367 #define FASTOP1SRC2(op, name) \
372 ON64(FOP1E(op, rcx)) \
375 /* 1-operand, using src2 (for MUL/DIV r/m), with exceptions */
376 #define FASTOP1SRC2EX(op, name) \
381 ON64(FOP1EEX(op, rcx)) \
384 #define FOP2E(op, dst, src) \
385 __FOP_FUNC(#op "_" #dst "_" #src) \
386 #op " %" #src ", %" #dst " \n\t" \
387 __FOP_RET(#op "_" #dst "_" #src)
389 #define FASTOP2(op) \
391 FOP2E(op##b, al, dl) \
392 FOP2E(op##w, ax, dx) \
393 FOP2E(op##l, eax, edx) \
394 ON64(FOP2E(op##q, rax, rdx)) \
397 /* 2 operand, word only */
398 #define FASTOP2W(op) \
401 FOP2E(op##w, ax, dx) \
402 FOP2E(op##l, eax, edx) \
403 ON64(FOP2E(op##q, rax, rdx)) \
406 /* 2 operand, src is CL */
407 #define FASTOP2CL(op) \
409 FOP2E(op##b, al, cl) \
410 FOP2E(op##w, ax, cl) \
411 FOP2E(op##l, eax, cl) \
412 ON64(FOP2E(op##q, rax, cl)) \
415 /* 2 operand, src and dest are reversed */
416 #define FASTOP2R(op, name) \
418 FOP2E(op##b, dl, al) \
419 FOP2E(op##w, dx, ax) \
420 FOP2E(op##l, edx, eax) \
421 ON64(FOP2E(op##q, rdx, rax)) \
424 #define FOP3E(op, dst, src, src2) \
425 __FOP_FUNC(#op "_" #dst "_" #src "_" #src2) \
426 #op " %" #src2 ", %" #src ", %" #dst " \n\t"\
427 __FOP_RET(#op "_" #dst "_" #src "_" #src2)
429 /* 3-operand, word-only, src2=cl */
430 #define FASTOP3WCL(op) \
433 FOP3E(op##w, ax, dx, cl) \
434 FOP3E(op##l, eax, edx, cl) \
435 ON64(FOP3E(op##q, rax, rdx, cl)) \
438 /* Special case for SETcc - 1 instruction per cc */
439 #define FOP_SETCC(op) \
441 ".type " #op ", @function \n\t" \
446 asm(".pushsection .fixup, \"ax\"\n"
447 ".global kvm_fastop_exception \n"
448 "kvm_fastop_exception: xor %esi, %esi; ret\n"
472 "pushf; sbb %al, %al; popf \n\t"
477 * XXX: inoutclob user must know where the argument is being expanded.
478 * Relying on CONFIG_CC_HAS_ASM_GOTO would allow us to remove _fault.
480 #define asm_safe(insn, inoutclob...) \
484 asm volatile("1:" insn "\n" \
486 ".pushsection .fixup, \"ax\"\n" \
487 "3: movl $1, %[_fault]\n" \
490 _ASM_EXTABLE(1b, 3b) \
491 : [_fault] "+qm"(_fault) inoutclob ); \
493 _fault ? X86EMUL_UNHANDLEABLE : X86EMUL_CONTINUE; \
496 static int emulator_check_intercept(struct x86_emulate_ctxt
*ctxt
,
497 enum x86_intercept intercept
,
498 enum x86_intercept_stage stage
)
500 struct x86_instruction_info info
= {
501 .intercept
= intercept
,
502 .rep_prefix
= ctxt
->rep_prefix
,
503 .modrm_mod
= ctxt
->modrm_mod
,
504 .modrm_reg
= ctxt
->modrm_reg
,
505 .modrm_rm
= ctxt
->modrm_rm
,
506 .src_val
= ctxt
->src
.val64
,
507 .dst_val
= ctxt
->dst
.val64
,
508 .src_bytes
= ctxt
->src
.bytes
,
509 .dst_bytes
= ctxt
->dst
.bytes
,
510 .ad_bytes
= ctxt
->ad_bytes
,
511 .next_rip
= ctxt
->eip
,
514 return ctxt
->ops
->intercept(ctxt
, &info
, stage
);
517 static void assign_masked(ulong
*dest
, ulong src
, ulong mask
)
519 *dest
= (*dest
& ~mask
) | (src
& mask
);
522 static void assign_register(unsigned long *reg
, u64 val
, int bytes
)
524 /* The 4-byte case *is* correct: in 64-bit mode we zero-extend. */
527 *(u8
*)reg
= (u8
)val
;
530 *(u16
*)reg
= (u16
)val
;
534 break; /* 64b: zero-extend */
541 static inline unsigned long ad_mask(struct x86_emulate_ctxt
*ctxt
)
543 return (1UL << (ctxt
->ad_bytes
<< 3)) - 1;
546 static ulong
stack_mask(struct x86_emulate_ctxt
*ctxt
)
549 struct desc_struct ss
;
551 if (ctxt
->mode
== X86EMUL_MODE_PROT64
)
553 ctxt
->ops
->get_segment(ctxt
, &sel
, &ss
, NULL
, VCPU_SREG_SS
);
554 return ~0U >> ((ss
.d
^ 1) * 16); /* d=0: 0xffff; d=1: 0xffffffff */
557 static int stack_size(struct x86_emulate_ctxt
*ctxt
)
559 return (__fls(stack_mask(ctxt
)) + 1) >> 3;
562 /* Access/update address held in a register, based on addressing mode. */
563 static inline unsigned long
564 address_mask(struct x86_emulate_ctxt
*ctxt
, unsigned long reg
)
566 if (ctxt
->ad_bytes
== sizeof(unsigned long))
569 return reg
& ad_mask(ctxt
);
572 static inline unsigned long
573 register_address(struct x86_emulate_ctxt
*ctxt
, int reg
)
575 return address_mask(ctxt
, reg_read(ctxt
, reg
));
578 static void masked_increment(ulong
*reg
, ulong mask
, int inc
)
580 assign_masked(reg
, *reg
+ inc
, mask
);
584 register_address_increment(struct x86_emulate_ctxt
*ctxt
, int reg
, int inc
)
586 ulong
*preg
= reg_rmw(ctxt
, reg
);
588 assign_register(preg
, *preg
+ inc
, ctxt
->ad_bytes
);
591 static void rsp_increment(struct x86_emulate_ctxt
*ctxt
, int inc
)
593 masked_increment(reg_rmw(ctxt
, VCPU_REGS_RSP
), stack_mask(ctxt
), inc
);
596 static u32
desc_limit_scaled(struct desc_struct
*desc
)
598 u32 limit
= get_desc_limit(desc
);
600 return desc
->g
? (limit
<< 12) | 0xfff : limit
;
603 static unsigned long seg_base(struct x86_emulate_ctxt
*ctxt
, int seg
)
605 if (ctxt
->mode
== X86EMUL_MODE_PROT64
&& seg
< VCPU_SREG_FS
)
608 return ctxt
->ops
->get_cached_segment_base(ctxt
, seg
);
611 static int emulate_exception(struct x86_emulate_ctxt
*ctxt
, int vec
,
612 u32 error
, bool valid
)
615 ctxt
->exception
.vector
= vec
;
616 ctxt
->exception
.error_code
= error
;
617 ctxt
->exception
.error_code_valid
= valid
;
618 return X86EMUL_PROPAGATE_FAULT
;
621 static int emulate_db(struct x86_emulate_ctxt
*ctxt
)
623 return emulate_exception(ctxt
, DB_VECTOR
, 0, false);
626 static int emulate_gp(struct x86_emulate_ctxt
*ctxt
, int err
)
628 return emulate_exception(ctxt
, GP_VECTOR
, err
, true);
631 static int emulate_ss(struct x86_emulate_ctxt
*ctxt
, int err
)
633 return emulate_exception(ctxt
, SS_VECTOR
, err
, true);
636 static int emulate_ud(struct x86_emulate_ctxt
*ctxt
)
638 return emulate_exception(ctxt
, UD_VECTOR
, 0, false);
641 static int emulate_ts(struct x86_emulate_ctxt
*ctxt
, int err
)
643 return emulate_exception(ctxt
, TS_VECTOR
, err
, true);
646 static int emulate_de(struct x86_emulate_ctxt
*ctxt
)
648 return emulate_exception(ctxt
, DE_VECTOR
, 0, false);
651 static int emulate_nm(struct x86_emulate_ctxt
*ctxt
)
653 return emulate_exception(ctxt
, NM_VECTOR
, 0, false);
656 static u16
get_segment_selector(struct x86_emulate_ctxt
*ctxt
, unsigned seg
)
659 struct desc_struct desc
;
661 ctxt
->ops
->get_segment(ctxt
, &selector
, &desc
, NULL
, seg
);
665 static void set_segment_selector(struct x86_emulate_ctxt
*ctxt
, u16 selector
,
670 struct desc_struct desc
;
672 ctxt
->ops
->get_segment(ctxt
, &dummy
, &desc
, &base3
, seg
);
673 ctxt
->ops
->set_segment(ctxt
, selector
, &desc
, base3
, seg
);
677 * x86 defines three classes of vector instructions: explicitly
678 * aligned, explicitly unaligned, and the rest, which change behaviour
679 * depending on whether they're AVX encoded or not.
681 * Also included is CMPXCHG16B which is not a vector instruction, yet it is
682 * subject to the same check. FXSAVE and FXRSTOR are checked here too as their
683 * 512 bytes of data must be aligned to a 16 byte boundary.
685 static unsigned insn_alignment(struct x86_emulate_ctxt
*ctxt
, unsigned size
)
687 u64 alignment
= ctxt
->d
& AlignMask
;
689 if (likely(size
< 16))
704 static __always_inline
int __linearize(struct x86_emulate_ctxt
*ctxt
,
705 struct segmented_address addr
,
706 unsigned *max_size
, unsigned size
,
707 bool write
, bool fetch
,
708 enum x86emul_mode mode
, ulong
*linear
)
710 struct desc_struct desc
;
717 la
= seg_base(ctxt
, addr
.seg
) + addr
.ea
;
720 case X86EMUL_MODE_PROT64
:
722 va_bits
= ctxt_virt_addr_bits(ctxt
);
723 if (get_canonical(la
, va_bits
) != la
)
726 *max_size
= min_t(u64
, ~0u, (1ull << va_bits
) - la
);
727 if (size
> *max_size
)
731 *linear
= la
= (u32
)la
;
732 usable
= ctxt
->ops
->get_segment(ctxt
, &sel
, &desc
, NULL
,
736 /* code segment in protected mode or read-only data segment */
737 if ((((ctxt
->mode
!= X86EMUL_MODE_REAL
) && (desc
.type
& 8))
738 || !(desc
.type
& 2)) && write
)
740 /* unreadable code segment */
741 if (!fetch
&& (desc
.type
& 8) && !(desc
.type
& 2))
743 lim
= desc_limit_scaled(&desc
);
744 if (!(desc
.type
& 8) && (desc
.type
& 4)) {
745 /* expand-down segment */
748 lim
= desc
.d
? 0xffffffff : 0xffff;
752 if (lim
== 0xffffffff)
755 *max_size
= (u64
)lim
+ 1 - addr
.ea
;
756 if (size
> *max_size
)
761 if (la
& (insn_alignment(ctxt
, size
) - 1))
762 return emulate_gp(ctxt
, 0);
763 return X86EMUL_CONTINUE
;
765 if (addr
.seg
== VCPU_SREG_SS
)
766 return emulate_ss(ctxt
, 0);
768 return emulate_gp(ctxt
, 0);
771 static int linearize(struct x86_emulate_ctxt
*ctxt
,
772 struct segmented_address addr
,
773 unsigned size
, bool write
,
777 return __linearize(ctxt
, addr
, &max_size
, size
, write
, false,
781 static inline int assign_eip(struct x86_emulate_ctxt
*ctxt
, ulong dst
,
782 enum x86emul_mode mode
)
787 struct segmented_address addr
= { .seg
= VCPU_SREG_CS
,
790 if (ctxt
->op_bytes
!= sizeof(unsigned long))
791 addr
.ea
= dst
& ((1UL << (ctxt
->op_bytes
<< 3)) - 1);
792 rc
= __linearize(ctxt
, addr
, &max_size
, 1, false, true, mode
, &linear
);
793 if (rc
== X86EMUL_CONTINUE
)
794 ctxt
->_eip
= addr
.ea
;
798 static inline int assign_eip_near(struct x86_emulate_ctxt
*ctxt
, ulong dst
)
800 return assign_eip(ctxt
, dst
, ctxt
->mode
);
803 static int assign_eip_far(struct x86_emulate_ctxt
*ctxt
, ulong dst
,
804 const struct desc_struct
*cs_desc
)
806 enum x86emul_mode mode
= ctxt
->mode
;
810 if (ctxt
->mode
>= X86EMUL_MODE_PROT16
) {
814 ctxt
->ops
->get_msr(ctxt
, MSR_EFER
, &efer
);
816 mode
= X86EMUL_MODE_PROT64
;
818 mode
= X86EMUL_MODE_PROT32
; /* temporary value */
821 if (mode
== X86EMUL_MODE_PROT16
|| mode
== X86EMUL_MODE_PROT32
)
822 mode
= cs_desc
->d
? X86EMUL_MODE_PROT32
: X86EMUL_MODE_PROT16
;
823 rc
= assign_eip(ctxt
, dst
, mode
);
824 if (rc
== X86EMUL_CONTINUE
)
829 static inline int jmp_rel(struct x86_emulate_ctxt
*ctxt
, int rel
)
831 return assign_eip_near(ctxt
, ctxt
->_eip
+ rel
);
834 static int linear_read_system(struct x86_emulate_ctxt
*ctxt
, ulong linear
,
835 void *data
, unsigned size
)
837 return ctxt
->ops
->read_std(ctxt
, linear
, data
, size
, &ctxt
->exception
, true);
840 static int linear_write_system(struct x86_emulate_ctxt
*ctxt
,
841 ulong linear
, void *data
,
844 return ctxt
->ops
->write_std(ctxt
, linear
, data
, size
, &ctxt
->exception
, true);
847 static int segmented_read_std(struct x86_emulate_ctxt
*ctxt
,
848 struct segmented_address addr
,
855 rc
= linearize(ctxt
, addr
, size
, false, &linear
);
856 if (rc
!= X86EMUL_CONTINUE
)
858 return ctxt
->ops
->read_std(ctxt
, linear
, data
, size
, &ctxt
->exception
, false);
861 static int segmented_write_std(struct x86_emulate_ctxt
*ctxt
,
862 struct segmented_address addr
,
869 rc
= linearize(ctxt
, addr
, size
, true, &linear
);
870 if (rc
!= X86EMUL_CONTINUE
)
872 return ctxt
->ops
->write_std(ctxt
, linear
, data
, size
, &ctxt
->exception
, false);
876 * Prefetch the remaining bytes of the instruction without crossing page
877 * boundary if they are not in fetch_cache yet.
879 static int __do_insn_fetch_bytes(struct x86_emulate_ctxt
*ctxt
, int op_size
)
882 unsigned size
, max_size
;
883 unsigned long linear
;
884 int cur_size
= ctxt
->fetch
.end
- ctxt
->fetch
.data
;
885 struct segmented_address addr
= { .seg
= VCPU_SREG_CS
,
886 .ea
= ctxt
->eip
+ cur_size
};
889 * We do not know exactly how many bytes will be needed, and
890 * __linearize is expensive, so fetch as much as possible. We
891 * just have to avoid going beyond the 15 byte limit, the end
892 * of the segment, or the end of the page.
894 * __linearize is called with size 0 so that it does not do any
895 * boundary check itself. Instead, we use max_size to check
898 rc
= __linearize(ctxt
, addr
, &max_size
, 0, false, true, ctxt
->mode
,
900 if (unlikely(rc
!= X86EMUL_CONTINUE
))
903 size
= min_t(unsigned, 15UL ^ cur_size
, max_size
);
904 size
= min_t(unsigned, size
, PAGE_SIZE
- offset_in_page(linear
));
907 * One instruction can only straddle two pages,
908 * and one has been loaded at the beginning of
909 * x86_decode_insn. So, if not enough bytes
910 * still, we must have hit the 15-byte boundary.
912 if (unlikely(size
< op_size
))
913 return emulate_gp(ctxt
, 0);
915 rc
= ctxt
->ops
->fetch(ctxt
, linear
, ctxt
->fetch
.end
,
916 size
, &ctxt
->exception
);
917 if (unlikely(rc
!= X86EMUL_CONTINUE
))
919 ctxt
->fetch
.end
+= size
;
920 return X86EMUL_CONTINUE
;
923 static __always_inline
int do_insn_fetch_bytes(struct x86_emulate_ctxt
*ctxt
,
926 unsigned done_size
= ctxt
->fetch
.end
- ctxt
->fetch
.ptr
;
928 if (unlikely(done_size
< size
))
929 return __do_insn_fetch_bytes(ctxt
, size
- done_size
);
931 return X86EMUL_CONTINUE
;
934 /* Fetch next part of the instruction being emulated. */
935 #define insn_fetch(_type, _ctxt) \
938 rc = do_insn_fetch_bytes(_ctxt, sizeof(_type)); \
939 if (rc != X86EMUL_CONTINUE) \
941 ctxt->_eip += sizeof(_type); \
942 memcpy(&_x, ctxt->fetch.ptr, sizeof(_type)); \
943 ctxt->fetch.ptr += sizeof(_type); \
947 #define insn_fetch_arr(_arr, _size, _ctxt) \
949 rc = do_insn_fetch_bytes(_ctxt, _size); \
950 if (rc != X86EMUL_CONTINUE) \
952 ctxt->_eip += (_size); \
953 memcpy(_arr, ctxt->fetch.ptr, _size); \
954 ctxt->fetch.ptr += (_size); \
958 * Given the 'reg' portion of a ModRM byte, and a register block, return a
959 * pointer into the block that addresses the relevant register.
960 * @highbyte_regs specifies whether to decode AH,CH,DH,BH.
962 static void *decode_register(struct x86_emulate_ctxt
*ctxt
, u8 modrm_reg
,
966 int highbyte_regs
= (ctxt
->rex_prefix
== 0) && byteop
;
968 if (highbyte_regs
&& modrm_reg
>= 4 && modrm_reg
< 8)
969 p
= (unsigned char *)reg_rmw(ctxt
, modrm_reg
& 3) + 1;
971 p
= reg_rmw(ctxt
, modrm_reg
);
975 static int read_descriptor(struct x86_emulate_ctxt
*ctxt
,
976 struct segmented_address addr
,
977 u16
*size
, unsigned long *address
, int op_bytes
)
984 rc
= segmented_read_std(ctxt
, addr
, size
, 2);
985 if (rc
!= X86EMUL_CONTINUE
)
988 rc
= segmented_read_std(ctxt
, addr
, address
, op_bytes
);
1002 FASTOP1SRC2(mul
, mul_ex
);
1003 FASTOP1SRC2(imul
, imul_ex
);
1004 FASTOP1SRC2EX(div
, div_ex
);
1005 FASTOP1SRC2EX(idiv
, idiv_ex
);
1034 FASTOP2R(cmp
, cmp_r
);
1036 static int em_bsf_c(struct x86_emulate_ctxt
*ctxt
)
1038 /* If src is zero, do not writeback, but update flags */
1039 if (ctxt
->src
.val
== 0)
1040 ctxt
->dst
.type
= OP_NONE
;
1041 return fastop(ctxt
, em_bsf
);
1044 static int em_bsr_c(struct x86_emulate_ctxt
*ctxt
)
1046 /* If src is zero, do not writeback, but update flags */
1047 if (ctxt
->src
.val
== 0)
1048 ctxt
->dst
.type
= OP_NONE
;
1049 return fastop(ctxt
, em_bsr
);
1052 static __always_inline u8
test_cc(unsigned int condition
, unsigned long flags
)
1055 void (*fop
)(void) = (void *)em_setcc
+ 4 * (condition
& 0xf);
1057 flags
= (flags
& EFLAGS_MASK
) | X86_EFLAGS_IF
;
1058 asm("push %[flags]; popf; " CALL_NOSPEC
1059 : "=a"(rc
) : [thunk_target
]"r"(fop
), [flags
]"r"(flags
));
1063 static void fetch_register_operand(struct operand
*op
)
1065 switch (op
->bytes
) {
1067 op
->val
= *(u8
*)op
->addr
.reg
;
1070 op
->val
= *(u16
*)op
->addr
.reg
;
1073 op
->val
= *(u32
*)op
->addr
.reg
;
1076 op
->val
= *(u64
*)op
->addr
.reg
;
1081 static void emulator_get_fpu(void)
1085 fpregs_assert_state_consistent();
1086 if (test_thread_flag(TIF_NEED_FPU_LOAD
))
1087 switch_fpu_return();
1090 static void emulator_put_fpu(void)
1095 static void read_sse_reg(sse128_t
*data
, int reg
)
1099 case 0: asm("movdqa %%xmm0, %0" : "=m"(*data
)); break;
1100 case 1: asm("movdqa %%xmm1, %0" : "=m"(*data
)); break;
1101 case 2: asm("movdqa %%xmm2, %0" : "=m"(*data
)); break;
1102 case 3: asm("movdqa %%xmm3, %0" : "=m"(*data
)); break;
1103 case 4: asm("movdqa %%xmm4, %0" : "=m"(*data
)); break;
1104 case 5: asm("movdqa %%xmm5, %0" : "=m"(*data
)); break;
1105 case 6: asm("movdqa %%xmm6, %0" : "=m"(*data
)); break;
1106 case 7: asm("movdqa %%xmm7, %0" : "=m"(*data
)); break;
1107 #ifdef CONFIG_X86_64
1108 case 8: asm("movdqa %%xmm8, %0" : "=m"(*data
)); break;
1109 case 9: asm("movdqa %%xmm9, %0" : "=m"(*data
)); break;
1110 case 10: asm("movdqa %%xmm10, %0" : "=m"(*data
)); break;
1111 case 11: asm("movdqa %%xmm11, %0" : "=m"(*data
)); break;
1112 case 12: asm("movdqa %%xmm12, %0" : "=m"(*data
)); break;
1113 case 13: asm("movdqa %%xmm13, %0" : "=m"(*data
)); break;
1114 case 14: asm("movdqa %%xmm14, %0" : "=m"(*data
)); break;
1115 case 15: asm("movdqa %%xmm15, %0" : "=m"(*data
)); break;
1122 static void write_sse_reg(sse128_t
*data
, int reg
)
1126 case 0: asm("movdqa %0, %%xmm0" : : "m"(*data
)); break;
1127 case 1: asm("movdqa %0, %%xmm1" : : "m"(*data
)); break;
1128 case 2: asm("movdqa %0, %%xmm2" : : "m"(*data
)); break;
1129 case 3: asm("movdqa %0, %%xmm3" : : "m"(*data
)); break;
1130 case 4: asm("movdqa %0, %%xmm4" : : "m"(*data
)); break;
1131 case 5: asm("movdqa %0, %%xmm5" : : "m"(*data
)); break;
1132 case 6: asm("movdqa %0, %%xmm6" : : "m"(*data
)); break;
1133 case 7: asm("movdqa %0, %%xmm7" : : "m"(*data
)); break;
1134 #ifdef CONFIG_X86_64
1135 case 8: asm("movdqa %0, %%xmm8" : : "m"(*data
)); break;
1136 case 9: asm("movdqa %0, %%xmm9" : : "m"(*data
)); break;
1137 case 10: asm("movdqa %0, %%xmm10" : : "m"(*data
)); break;
1138 case 11: asm("movdqa %0, %%xmm11" : : "m"(*data
)); break;
1139 case 12: asm("movdqa %0, %%xmm12" : : "m"(*data
)); break;
1140 case 13: asm("movdqa %0, %%xmm13" : : "m"(*data
)); break;
1141 case 14: asm("movdqa %0, %%xmm14" : : "m"(*data
)); break;
1142 case 15: asm("movdqa %0, %%xmm15" : : "m"(*data
)); break;
1149 static void read_mmx_reg(u64
*data
, int reg
)
1153 case 0: asm("movq %%mm0, %0" : "=m"(*data
)); break;
1154 case 1: asm("movq %%mm1, %0" : "=m"(*data
)); break;
1155 case 2: asm("movq %%mm2, %0" : "=m"(*data
)); break;
1156 case 3: asm("movq %%mm3, %0" : "=m"(*data
)); break;
1157 case 4: asm("movq %%mm4, %0" : "=m"(*data
)); break;
1158 case 5: asm("movq %%mm5, %0" : "=m"(*data
)); break;
1159 case 6: asm("movq %%mm6, %0" : "=m"(*data
)); break;
1160 case 7: asm("movq %%mm7, %0" : "=m"(*data
)); break;
1166 static void write_mmx_reg(u64
*data
, int reg
)
1170 case 0: asm("movq %0, %%mm0" : : "m"(*data
)); break;
1171 case 1: asm("movq %0, %%mm1" : : "m"(*data
)); break;
1172 case 2: asm("movq %0, %%mm2" : : "m"(*data
)); break;
1173 case 3: asm("movq %0, %%mm3" : : "m"(*data
)); break;
1174 case 4: asm("movq %0, %%mm4" : : "m"(*data
)); break;
1175 case 5: asm("movq %0, %%mm5" : : "m"(*data
)); break;
1176 case 6: asm("movq %0, %%mm6" : : "m"(*data
)); break;
1177 case 7: asm("movq %0, %%mm7" : : "m"(*data
)); break;
1183 static int em_fninit(struct x86_emulate_ctxt
*ctxt
)
1185 if (ctxt
->ops
->get_cr(ctxt
, 0) & (X86_CR0_TS
| X86_CR0_EM
))
1186 return emulate_nm(ctxt
);
1189 asm volatile("fninit");
1191 return X86EMUL_CONTINUE
;
1194 static int em_fnstcw(struct x86_emulate_ctxt
*ctxt
)
1198 if (ctxt
->ops
->get_cr(ctxt
, 0) & (X86_CR0_TS
| X86_CR0_EM
))
1199 return emulate_nm(ctxt
);
1202 asm volatile("fnstcw %0": "+m"(fcw
));
1205 ctxt
->dst
.val
= fcw
;
1207 return X86EMUL_CONTINUE
;
1210 static int em_fnstsw(struct x86_emulate_ctxt
*ctxt
)
1214 if (ctxt
->ops
->get_cr(ctxt
, 0) & (X86_CR0_TS
| X86_CR0_EM
))
1215 return emulate_nm(ctxt
);
1218 asm volatile("fnstsw %0": "+m"(fsw
));
1221 ctxt
->dst
.val
= fsw
;
1223 return X86EMUL_CONTINUE
;
1226 static void decode_register_operand(struct x86_emulate_ctxt
*ctxt
,
1229 unsigned reg
= ctxt
->modrm_reg
;
1231 if (!(ctxt
->d
& ModRM
))
1232 reg
= (ctxt
->b
& 7) | ((ctxt
->rex_prefix
& 1) << 3);
1234 if (ctxt
->d
& Sse
) {
1238 read_sse_reg(&op
->vec_val
, reg
);
1241 if (ctxt
->d
& Mmx
) {
1250 op
->bytes
= (ctxt
->d
& ByteOp
) ? 1 : ctxt
->op_bytes
;
1251 op
->addr
.reg
= decode_register(ctxt
, reg
, ctxt
->d
& ByteOp
);
1253 fetch_register_operand(op
);
1254 op
->orig_val
= op
->val
;
1257 static void adjust_modrm_seg(struct x86_emulate_ctxt
*ctxt
, int base_reg
)
1259 if (base_reg
== VCPU_REGS_RSP
|| base_reg
== VCPU_REGS_RBP
)
1260 ctxt
->modrm_seg
= VCPU_SREG_SS
;
1263 static int decode_modrm(struct x86_emulate_ctxt
*ctxt
,
1267 int index_reg
, base_reg
, scale
;
1268 int rc
= X86EMUL_CONTINUE
;
1271 ctxt
->modrm_reg
= ((ctxt
->rex_prefix
<< 1) & 8); /* REX.R */
1272 index_reg
= (ctxt
->rex_prefix
<< 2) & 8; /* REX.X */
1273 base_reg
= (ctxt
->rex_prefix
<< 3) & 8; /* REX.B */
1275 ctxt
->modrm_mod
= (ctxt
->modrm
& 0xc0) >> 6;
1276 ctxt
->modrm_reg
|= (ctxt
->modrm
& 0x38) >> 3;
1277 ctxt
->modrm_rm
= base_reg
| (ctxt
->modrm
& 0x07);
1278 ctxt
->modrm_seg
= VCPU_SREG_DS
;
1280 if (ctxt
->modrm_mod
== 3 || (ctxt
->d
& NoMod
)) {
1282 op
->bytes
= (ctxt
->d
& ByteOp
) ? 1 : ctxt
->op_bytes
;
1283 op
->addr
.reg
= decode_register(ctxt
, ctxt
->modrm_rm
,
1285 if (ctxt
->d
& Sse
) {
1288 op
->addr
.xmm
= ctxt
->modrm_rm
;
1289 read_sse_reg(&op
->vec_val
, ctxt
->modrm_rm
);
1292 if (ctxt
->d
& Mmx
) {
1295 op
->addr
.mm
= ctxt
->modrm_rm
& 7;
1298 fetch_register_operand(op
);
1304 if (ctxt
->ad_bytes
== 2) {
1305 unsigned bx
= reg_read(ctxt
, VCPU_REGS_RBX
);
1306 unsigned bp
= reg_read(ctxt
, VCPU_REGS_RBP
);
1307 unsigned si
= reg_read(ctxt
, VCPU_REGS_RSI
);
1308 unsigned di
= reg_read(ctxt
, VCPU_REGS_RDI
);
1310 /* 16-bit ModR/M decode. */
1311 switch (ctxt
->modrm_mod
) {
1313 if (ctxt
->modrm_rm
== 6)
1314 modrm_ea
+= insn_fetch(u16
, ctxt
);
1317 modrm_ea
+= insn_fetch(s8
, ctxt
);
1320 modrm_ea
+= insn_fetch(u16
, ctxt
);
1323 switch (ctxt
->modrm_rm
) {
1325 modrm_ea
+= bx
+ si
;
1328 modrm_ea
+= bx
+ di
;
1331 modrm_ea
+= bp
+ si
;
1334 modrm_ea
+= bp
+ di
;
1343 if (ctxt
->modrm_mod
!= 0)
1350 if (ctxt
->modrm_rm
== 2 || ctxt
->modrm_rm
== 3 ||
1351 (ctxt
->modrm_rm
== 6 && ctxt
->modrm_mod
!= 0))
1352 ctxt
->modrm_seg
= VCPU_SREG_SS
;
1353 modrm_ea
= (u16
)modrm_ea
;
1355 /* 32/64-bit ModR/M decode. */
1356 if ((ctxt
->modrm_rm
& 7) == 4) {
1357 sib
= insn_fetch(u8
, ctxt
);
1358 index_reg
|= (sib
>> 3) & 7;
1359 base_reg
|= sib
& 7;
1362 if ((base_reg
& 7) == 5 && ctxt
->modrm_mod
== 0)
1363 modrm_ea
+= insn_fetch(s32
, ctxt
);
1365 modrm_ea
+= reg_read(ctxt
, base_reg
);
1366 adjust_modrm_seg(ctxt
, base_reg
);
1367 /* Increment ESP on POP [ESP] */
1368 if ((ctxt
->d
& IncSP
) &&
1369 base_reg
== VCPU_REGS_RSP
)
1370 modrm_ea
+= ctxt
->op_bytes
;
1373 modrm_ea
+= reg_read(ctxt
, index_reg
) << scale
;
1374 } else if ((ctxt
->modrm_rm
& 7) == 5 && ctxt
->modrm_mod
== 0) {
1375 modrm_ea
+= insn_fetch(s32
, ctxt
);
1376 if (ctxt
->mode
== X86EMUL_MODE_PROT64
)
1377 ctxt
->rip_relative
= 1;
1379 base_reg
= ctxt
->modrm_rm
;
1380 modrm_ea
+= reg_read(ctxt
, base_reg
);
1381 adjust_modrm_seg(ctxt
, base_reg
);
1383 switch (ctxt
->modrm_mod
) {
1385 modrm_ea
+= insn_fetch(s8
, ctxt
);
1388 modrm_ea
+= insn_fetch(s32
, ctxt
);
1392 op
->addr
.mem
.ea
= modrm_ea
;
1393 if (ctxt
->ad_bytes
!= 8)
1394 ctxt
->memop
.addr
.mem
.ea
= (u32
)ctxt
->memop
.addr
.mem
.ea
;
1400 static int decode_abs(struct x86_emulate_ctxt
*ctxt
,
1403 int rc
= X86EMUL_CONTINUE
;
1406 switch (ctxt
->ad_bytes
) {
1408 op
->addr
.mem
.ea
= insn_fetch(u16
, ctxt
);
1411 op
->addr
.mem
.ea
= insn_fetch(u32
, ctxt
);
1414 op
->addr
.mem
.ea
= insn_fetch(u64
, ctxt
);
1421 static void fetch_bit_operand(struct x86_emulate_ctxt
*ctxt
)
1425 if (ctxt
->dst
.type
== OP_MEM
&& ctxt
->src
.type
== OP_REG
) {
1426 mask
= ~((long)ctxt
->dst
.bytes
* 8 - 1);
1428 if (ctxt
->src
.bytes
== 2)
1429 sv
= (s16
)ctxt
->src
.val
& (s16
)mask
;
1430 else if (ctxt
->src
.bytes
== 4)
1431 sv
= (s32
)ctxt
->src
.val
& (s32
)mask
;
1433 sv
= (s64
)ctxt
->src
.val
& (s64
)mask
;
1435 ctxt
->dst
.addr
.mem
.ea
= address_mask(ctxt
,
1436 ctxt
->dst
.addr
.mem
.ea
+ (sv
>> 3));
1439 /* only subword offset */
1440 ctxt
->src
.val
&= (ctxt
->dst
.bytes
<< 3) - 1;
1443 static int read_emulated(struct x86_emulate_ctxt
*ctxt
,
1444 unsigned long addr
, void *dest
, unsigned size
)
1447 struct read_cache
*mc
= &ctxt
->mem_read
;
1449 if (mc
->pos
< mc
->end
)
1452 WARN_ON((mc
->end
+ size
) >= sizeof(mc
->data
));
1454 rc
= ctxt
->ops
->read_emulated(ctxt
, addr
, mc
->data
+ mc
->end
, size
,
1456 if (rc
!= X86EMUL_CONTINUE
)
1462 memcpy(dest
, mc
->data
+ mc
->pos
, size
);
1464 return X86EMUL_CONTINUE
;
1467 static int segmented_read(struct x86_emulate_ctxt
*ctxt
,
1468 struct segmented_address addr
,
1475 rc
= linearize(ctxt
, addr
, size
, false, &linear
);
1476 if (rc
!= X86EMUL_CONTINUE
)
1478 return read_emulated(ctxt
, linear
, data
, size
);
1481 static int segmented_write(struct x86_emulate_ctxt
*ctxt
,
1482 struct segmented_address addr
,
1489 rc
= linearize(ctxt
, addr
, size
, true, &linear
);
1490 if (rc
!= X86EMUL_CONTINUE
)
1492 return ctxt
->ops
->write_emulated(ctxt
, linear
, data
, size
,
1496 static int segmented_cmpxchg(struct x86_emulate_ctxt
*ctxt
,
1497 struct segmented_address addr
,
1498 const void *orig_data
, const void *data
,
1504 rc
= linearize(ctxt
, addr
, size
, true, &linear
);
1505 if (rc
!= X86EMUL_CONTINUE
)
1507 return ctxt
->ops
->cmpxchg_emulated(ctxt
, linear
, orig_data
, data
,
1508 size
, &ctxt
->exception
);
1511 static int pio_in_emulated(struct x86_emulate_ctxt
*ctxt
,
1512 unsigned int size
, unsigned short port
,
1515 struct read_cache
*rc
= &ctxt
->io_read
;
1517 if (rc
->pos
== rc
->end
) { /* refill pio read ahead */
1518 unsigned int in_page
, n
;
1519 unsigned int count
= ctxt
->rep_prefix
?
1520 address_mask(ctxt
, reg_read(ctxt
, VCPU_REGS_RCX
)) : 1;
1521 in_page
= (ctxt
->eflags
& X86_EFLAGS_DF
) ?
1522 offset_in_page(reg_read(ctxt
, VCPU_REGS_RDI
)) :
1523 PAGE_SIZE
- offset_in_page(reg_read(ctxt
, VCPU_REGS_RDI
));
1524 n
= min3(in_page
, (unsigned int)sizeof(rc
->data
) / size
, count
);
1527 rc
->pos
= rc
->end
= 0;
1528 if (!ctxt
->ops
->pio_in_emulated(ctxt
, size
, port
, rc
->data
, n
))
1533 if (ctxt
->rep_prefix
&& (ctxt
->d
& String
) &&
1534 !(ctxt
->eflags
& X86_EFLAGS_DF
)) {
1535 ctxt
->dst
.data
= rc
->data
+ rc
->pos
;
1536 ctxt
->dst
.type
= OP_MEM_STR
;
1537 ctxt
->dst
.count
= (rc
->end
- rc
->pos
) / size
;
1540 memcpy(dest
, rc
->data
+ rc
->pos
, size
);
1546 static int read_interrupt_descriptor(struct x86_emulate_ctxt
*ctxt
,
1547 u16 index
, struct desc_struct
*desc
)
1552 ctxt
->ops
->get_idt(ctxt
, &dt
);
1554 if (dt
.size
< index
* 8 + 7)
1555 return emulate_gp(ctxt
, index
<< 3 | 0x2);
1557 addr
= dt
.address
+ index
* 8;
1558 return linear_read_system(ctxt
, addr
, desc
, sizeof(*desc
));
1561 static void get_descriptor_table_ptr(struct x86_emulate_ctxt
*ctxt
,
1562 u16 selector
, struct desc_ptr
*dt
)
1564 const struct x86_emulate_ops
*ops
= ctxt
->ops
;
1567 if (selector
& 1 << 2) {
1568 struct desc_struct desc
;
1571 memset(dt
, 0, sizeof(*dt
));
1572 if (!ops
->get_segment(ctxt
, &sel
, &desc
, &base3
,
1576 dt
->size
= desc_limit_scaled(&desc
); /* what if limit > 65535? */
1577 dt
->address
= get_desc_base(&desc
) | ((u64
)base3
<< 32);
1579 ops
->get_gdt(ctxt
, dt
);
1582 static int get_descriptor_ptr(struct x86_emulate_ctxt
*ctxt
,
1583 u16 selector
, ulong
*desc_addr_p
)
1586 u16 index
= selector
>> 3;
1589 get_descriptor_table_ptr(ctxt
, selector
, &dt
);
1591 if (dt
.size
< index
* 8 + 7)
1592 return emulate_gp(ctxt
, selector
& 0xfffc);
1594 addr
= dt
.address
+ index
* 8;
1596 #ifdef CONFIG_X86_64
1597 if (addr
>> 32 != 0) {
1600 ctxt
->ops
->get_msr(ctxt
, MSR_EFER
, &efer
);
1601 if (!(efer
& EFER_LMA
))
1606 *desc_addr_p
= addr
;
1607 return X86EMUL_CONTINUE
;
1610 /* allowed just for 8 bytes segments */
1611 static int read_segment_descriptor(struct x86_emulate_ctxt
*ctxt
,
1612 u16 selector
, struct desc_struct
*desc
,
1617 rc
= get_descriptor_ptr(ctxt
, selector
, desc_addr_p
);
1618 if (rc
!= X86EMUL_CONTINUE
)
1621 return linear_read_system(ctxt
, *desc_addr_p
, desc
, sizeof(*desc
));
1624 /* allowed just for 8 bytes segments */
1625 static int write_segment_descriptor(struct x86_emulate_ctxt
*ctxt
,
1626 u16 selector
, struct desc_struct
*desc
)
1631 rc
= get_descriptor_ptr(ctxt
, selector
, &addr
);
1632 if (rc
!= X86EMUL_CONTINUE
)
1635 return linear_write_system(ctxt
, addr
, desc
, sizeof(*desc
));
1638 static int __load_segment_descriptor(struct x86_emulate_ctxt
*ctxt
,
1639 u16 selector
, int seg
, u8 cpl
,
1640 enum x86_transfer_type transfer
,
1641 struct desc_struct
*desc
)
1643 struct desc_struct seg_desc
, old_desc
;
1645 unsigned err_vec
= GP_VECTOR
;
1647 bool null_selector
= !(selector
& ~0x3); /* 0000-0003 are null */
1653 memset(&seg_desc
, 0, sizeof(seg_desc
));
1655 if (ctxt
->mode
== X86EMUL_MODE_REAL
) {
1656 /* set real mode segment descriptor (keep limit etc. for
1658 ctxt
->ops
->get_segment(ctxt
, &dummy
, &seg_desc
, NULL
, seg
);
1659 set_desc_base(&seg_desc
, selector
<< 4);
1661 } else if (seg
<= VCPU_SREG_GS
&& ctxt
->mode
== X86EMUL_MODE_VM86
) {
1662 /* VM86 needs a clean new segment descriptor */
1663 set_desc_base(&seg_desc
, selector
<< 4);
1664 set_desc_limit(&seg_desc
, 0xffff);
1674 /* TR should be in GDT only */
1675 if (seg
== VCPU_SREG_TR
&& (selector
& (1 << 2)))
1678 /* NULL selector is not valid for TR, CS and (except for long mode) SS */
1679 if (null_selector
) {
1680 if (seg
== VCPU_SREG_CS
|| seg
== VCPU_SREG_TR
)
1683 if (seg
== VCPU_SREG_SS
) {
1684 if (ctxt
->mode
!= X86EMUL_MODE_PROT64
|| rpl
!= cpl
)
1688 * ctxt->ops->set_segment expects the CPL to be in
1689 * SS.DPL, so fake an expand-up 32-bit data segment.
1699 /* Skip all following checks */
1703 ret
= read_segment_descriptor(ctxt
, selector
, &seg_desc
, &desc_addr
);
1704 if (ret
!= X86EMUL_CONTINUE
)
1707 err_code
= selector
& 0xfffc;
1708 err_vec
= (transfer
== X86_TRANSFER_TASK_SWITCH
) ? TS_VECTOR
:
1711 /* can't load system descriptor into segment selector */
1712 if (seg
<= VCPU_SREG_GS
&& !seg_desc
.s
) {
1713 if (transfer
== X86_TRANSFER_CALL_JMP
)
1714 return X86EMUL_UNHANDLEABLE
;
1719 err_vec
= (seg
== VCPU_SREG_SS
) ? SS_VECTOR
: NP_VECTOR
;
1728 * segment is not a writable data segment or segment
1729 * selector's RPL != CPL or segment selector's RPL != CPL
1731 if (rpl
!= cpl
|| (seg_desc
.type
& 0xa) != 0x2 || dpl
!= cpl
)
1735 if (!(seg_desc
.type
& 8))
1738 if (seg_desc
.type
& 4) {
1744 if (rpl
> cpl
|| dpl
!= cpl
)
1747 /* in long-mode d/b must be clear if l is set */
1748 if (seg_desc
.d
&& seg_desc
.l
) {
1751 ctxt
->ops
->get_msr(ctxt
, MSR_EFER
, &efer
);
1752 if (efer
& EFER_LMA
)
1756 /* CS(RPL) <- CPL */
1757 selector
= (selector
& 0xfffc) | cpl
;
1760 if (seg_desc
.s
|| (seg_desc
.type
!= 1 && seg_desc
.type
!= 9))
1762 old_desc
= seg_desc
;
1763 seg_desc
.type
|= 2; /* busy */
1764 ret
= ctxt
->ops
->cmpxchg_emulated(ctxt
, desc_addr
, &old_desc
, &seg_desc
,
1765 sizeof(seg_desc
), &ctxt
->exception
);
1766 if (ret
!= X86EMUL_CONTINUE
)
1769 case VCPU_SREG_LDTR
:
1770 if (seg_desc
.s
|| seg_desc
.type
!= 2)
1773 default: /* DS, ES, FS, or GS */
1775 * segment is not a data or readable code segment or
1776 * ((segment is a data or nonconforming code segment)
1777 * and (both RPL and CPL > DPL))
1779 if ((seg_desc
.type
& 0xa) == 0x8 ||
1780 (((seg_desc
.type
& 0xc) != 0xc) &&
1781 (rpl
> dpl
&& cpl
> dpl
)))
1787 /* mark segment as accessed */
1788 if (!(seg_desc
.type
& 1)) {
1790 ret
= write_segment_descriptor(ctxt
, selector
,
1792 if (ret
!= X86EMUL_CONTINUE
)
1795 } else if (ctxt
->mode
== X86EMUL_MODE_PROT64
) {
1796 ret
= linear_read_system(ctxt
, desc_addr
+8, &base3
, sizeof(base3
));
1797 if (ret
!= X86EMUL_CONTINUE
)
1799 if (emul_is_noncanonical_address(get_desc_base(&seg_desc
) |
1800 ((u64
)base3
<< 32), ctxt
))
1801 return emulate_gp(ctxt
, 0);
1804 ctxt
->ops
->set_segment(ctxt
, selector
, &seg_desc
, base3
, seg
);
1807 return X86EMUL_CONTINUE
;
1809 return emulate_exception(ctxt
, err_vec
, err_code
, true);
1812 static int load_segment_descriptor(struct x86_emulate_ctxt
*ctxt
,
1813 u16 selector
, int seg
)
1815 u8 cpl
= ctxt
->ops
->cpl(ctxt
);
1818 * None of MOV, POP and LSS can load a NULL selector in CPL=3, but
1819 * they can load it at CPL<3 (Intel's manual says only LSS can,
1822 * However, the Intel manual says that putting IST=1/DPL=3 in
1823 * an interrupt gate will result in SS=3 (the AMD manual instead
1824 * says it doesn't), so allow SS=3 in __load_segment_descriptor
1825 * and only forbid it here.
1827 if (seg
== VCPU_SREG_SS
&& selector
== 3 &&
1828 ctxt
->mode
== X86EMUL_MODE_PROT64
)
1829 return emulate_exception(ctxt
, GP_VECTOR
, 0, true);
1831 return __load_segment_descriptor(ctxt
, selector
, seg
, cpl
,
1832 X86_TRANSFER_NONE
, NULL
);
1835 static void write_register_operand(struct operand
*op
)
1837 return assign_register(op
->addr
.reg
, op
->val
, op
->bytes
);
1840 static int writeback(struct x86_emulate_ctxt
*ctxt
, struct operand
*op
)
1844 write_register_operand(op
);
1847 if (ctxt
->lock_prefix
)
1848 return segmented_cmpxchg(ctxt
,
1854 return segmented_write(ctxt
,
1860 return segmented_write(ctxt
,
1863 op
->bytes
* op
->count
);
1866 write_sse_reg(&op
->vec_val
, op
->addr
.xmm
);
1869 write_mmx_reg(&op
->mm_val
, op
->addr
.mm
);
1877 return X86EMUL_CONTINUE
;
1880 static int push(struct x86_emulate_ctxt
*ctxt
, void *data
, int bytes
)
1882 struct segmented_address addr
;
1884 rsp_increment(ctxt
, -bytes
);
1885 addr
.ea
= reg_read(ctxt
, VCPU_REGS_RSP
) & stack_mask(ctxt
);
1886 addr
.seg
= VCPU_SREG_SS
;
1888 return segmented_write(ctxt
, addr
, data
, bytes
);
1891 static int em_push(struct x86_emulate_ctxt
*ctxt
)
1893 /* Disable writeback. */
1894 ctxt
->dst
.type
= OP_NONE
;
1895 return push(ctxt
, &ctxt
->src
.val
, ctxt
->op_bytes
);
1898 static int emulate_pop(struct x86_emulate_ctxt
*ctxt
,
1899 void *dest
, int len
)
1902 struct segmented_address addr
;
1904 addr
.ea
= reg_read(ctxt
, VCPU_REGS_RSP
) & stack_mask(ctxt
);
1905 addr
.seg
= VCPU_SREG_SS
;
1906 rc
= segmented_read(ctxt
, addr
, dest
, len
);
1907 if (rc
!= X86EMUL_CONTINUE
)
1910 rsp_increment(ctxt
, len
);
1914 static int em_pop(struct x86_emulate_ctxt
*ctxt
)
1916 return emulate_pop(ctxt
, &ctxt
->dst
.val
, ctxt
->op_bytes
);
1919 static int emulate_popf(struct x86_emulate_ctxt
*ctxt
,
1920 void *dest
, int len
)
1923 unsigned long val
, change_mask
;
1924 int iopl
= (ctxt
->eflags
& X86_EFLAGS_IOPL
) >> X86_EFLAGS_IOPL_BIT
;
1925 int cpl
= ctxt
->ops
->cpl(ctxt
);
1927 rc
= emulate_pop(ctxt
, &val
, len
);
1928 if (rc
!= X86EMUL_CONTINUE
)
1931 change_mask
= X86_EFLAGS_CF
| X86_EFLAGS_PF
| X86_EFLAGS_AF
|
1932 X86_EFLAGS_ZF
| X86_EFLAGS_SF
| X86_EFLAGS_OF
|
1933 X86_EFLAGS_TF
| X86_EFLAGS_DF
| X86_EFLAGS_NT
|
1934 X86_EFLAGS_AC
| X86_EFLAGS_ID
;
1936 switch(ctxt
->mode
) {
1937 case X86EMUL_MODE_PROT64
:
1938 case X86EMUL_MODE_PROT32
:
1939 case X86EMUL_MODE_PROT16
:
1941 change_mask
|= X86_EFLAGS_IOPL
;
1943 change_mask
|= X86_EFLAGS_IF
;
1945 case X86EMUL_MODE_VM86
:
1947 return emulate_gp(ctxt
, 0);
1948 change_mask
|= X86_EFLAGS_IF
;
1950 default: /* real mode */
1951 change_mask
|= (X86_EFLAGS_IOPL
| X86_EFLAGS_IF
);
1955 *(unsigned long *)dest
=
1956 (ctxt
->eflags
& ~change_mask
) | (val
& change_mask
);
1961 static int em_popf(struct x86_emulate_ctxt
*ctxt
)
1963 ctxt
->dst
.type
= OP_REG
;
1964 ctxt
->dst
.addr
.reg
= &ctxt
->eflags
;
1965 ctxt
->dst
.bytes
= ctxt
->op_bytes
;
1966 return emulate_popf(ctxt
, &ctxt
->dst
.val
, ctxt
->op_bytes
);
1969 static int em_enter(struct x86_emulate_ctxt
*ctxt
)
1972 unsigned frame_size
= ctxt
->src
.val
;
1973 unsigned nesting_level
= ctxt
->src2
.val
& 31;
1977 return X86EMUL_UNHANDLEABLE
;
1979 rbp
= reg_read(ctxt
, VCPU_REGS_RBP
);
1980 rc
= push(ctxt
, &rbp
, stack_size(ctxt
));
1981 if (rc
!= X86EMUL_CONTINUE
)
1983 assign_masked(reg_rmw(ctxt
, VCPU_REGS_RBP
), reg_read(ctxt
, VCPU_REGS_RSP
),
1985 assign_masked(reg_rmw(ctxt
, VCPU_REGS_RSP
),
1986 reg_read(ctxt
, VCPU_REGS_RSP
) - frame_size
,
1988 return X86EMUL_CONTINUE
;
1991 static int em_leave(struct x86_emulate_ctxt
*ctxt
)
1993 assign_masked(reg_rmw(ctxt
, VCPU_REGS_RSP
), reg_read(ctxt
, VCPU_REGS_RBP
),
1995 return emulate_pop(ctxt
, reg_rmw(ctxt
, VCPU_REGS_RBP
), ctxt
->op_bytes
);
1998 static int em_push_sreg(struct x86_emulate_ctxt
*ctxt
)
2000 int seg
= ctxt
->src2
.val
;
2002 ctxt
->src
.val
= get_segment_selector(ctxt
, seg
);
2003 if (ctxt
->op_bytes
== 4) {
2004 rsp_increment(ctxt
, -2);
2008 return em_push(ctxt
);
2011 static int em_pop_sreg(struct x86_emulate_ctxt
*ctxt
)
2013 int seg
= ctxt
->src2
.val
;
2014 unsigned long selector
;
2017 rc
= emulate_pop(ctxt
, &selector
, 2);
2018 if (rc
!= X86EMUL_CONTINUE
)
2021 if (ctxt
->modrm_reg
== VCPU_SREG_SS
)
2022 ctxt
->interruptibility
= KVM_X86_SHADOW_INT_MOV_SS
;
2023 if (ctxt
->op_bytes
> 2)
2024 rsp_increment(ctxt
, ctxt
->op_bytes
- 2);
2026 rc
= load_segment_descriptor(ctxt
, (u16
)selector
, seg
);
2030 static int em_pusha(struct x86_emulate_ctxt
*ctxt
)
2032 unsigned long old_esp
= reg_read(ctxt
, VCPU_REGS_RSP
);
2033 int rc
= X86EMUL_CONTINUE
;
2034 int reg
= VCPU_REGS_RAX
;
2036 while (reg
<= VCPU_REGS_RDI
) {
2037 (reg
== VCPU_REGS_RSP
) ?
2038 (ctxt
->src
.val
= old_esp
) : (ctxt
->src
.val
= reg_read(ctxt
, reg
));
2041 if (rc
!= X86EMUL_CONTINUE
)
2050 static int em_pushf(struct x86_emulate_ctxt
*ctxt
)
2052 ctxt
->src
.val
= (unsigned long)ctxt
->eflags
& ~X86_EFLAGS_VM
;
2053 return em_push(ctxt
);
2056 static int em_popa(struct x86_emulate_ctxt
*ctxt
)
2058 int rc
= X86EMUL_CONTINUE
;
2059 int reg
= VCPU_REGS_RDI
;
2062 while (reg
>= VCPU_REGS_RAX
) {
2063 if (reg
== VCPU_REGS_RSP
) {
2064 rsp_increment(ctxt
, ctxt
->op_bytes
);
2068 rc
= emulate_pop(ctxt
, &val
, ctxt
->op_bytes
);
2069 if (rc
!= X86EMUL_CONTINUE
)
2071 assign_register(reg_rmw(ctxt
, reg
), val
, ctxt
->op_bytes
);
2077 static int __emulate_int_real(struct x86_emulate_ctxt
*ctxt
, int irq
)
2079 const struct x86_emulate_ops
*ops
= ctxt
->ops
;
2086 /* TODO: Add limit checks */
2087 ctxt
->src
.val
= ctxt
->eflags
;
2089 if (rc
!= X86EMUL_CONTINUE
)
2092 ctxt
->eflags
&= ~(X86_EFLAGS_IF
| X86_EFLAGS_TF
| X86_EFLAGS_AC
);
2094 ctxt
->src
.val
= get_segment_selector(ctxt
, VCPU_SREG_CS
);
2096 if (rc
!= X86EMUL_CONTINUE
)
2099 ctxt
->src
.val
= ctxt
->_eip
;
2101 if (rc
!= X86EMUL_CONTINUE
)
2104 ops
->get_idt(ctxt
, &dt
);
2106 eip_addr
= dt
.address
+ (irq
<< 2);
2107 cs_addr
= dt
.address
+ (irq
<< 2) + 2;
2109 rc
= linear_read_system(ctxt
, cs_addr
, &cs
, 2);
2110 if (rc
!= X86EMUL_CONTINUE
)
2113 rc
= linear_read_system(ctxt
, eip_addr
, &eip
, 2);
2114 if (rc
!= X86EMUL_CONTINUE
)
2117 rc
= load_segment_descriptor(ctxt
, cs
, VCPU_SREG_CS
);
2118 if (rc
!= X86EMUL_CONTINUE
)
2126 int emulate_int_real(struct x86_emulate_ctxt
*ctxt
, int irq
)
2130 invalidate_registers(ctxt
);
2131 rc
= __emulate_int_real(ctxt
, irq
);
2132 if (rc
== X86EMUL_CONTINUE
)
2133 writeback_registers(ctxt
);
2137 static int emulate_int(struct x86_emulate_ctxt
*ctxt
, int irq
)
2139 switch(ctxt
->mode
) {
2140 case X86EMUL_MODE_REAL
:
2141 return __emulate_int_real(ctxt
, irq
);
2142 case X86EMUL_MODE_VM86
:
2143 case X86EMUL_MODE_PROT16
:
2144 case X86EMUL_MODE_PROT32
:
2145 case X86EMUL_MODE_PROT64
:
2147 /* Protected mode interrupts unimplemented yet */
2148 return X86EMUL_UNHANDLEABLE
;
2152 static int emulate_iret_real(struct x86_emulate_ctxt
*ctxt
)
2154 int rc
= X86EMUL_CONTINUE
;
2155 unsigned long temp_eip
= 0;
2156 unsigned long temp_eflags
= 0;
2157 unsigned long cs
= 0;
2158 unsigned long mask
= X86_EFLAGS_CF
| X86_EFLAGS_PF
| X86_EFLAGS_AF
|
2159 X86_EFLAGS_ZF
| X86_EFLAGS_SF
| X86_EFLAGS_TF
|
2160 X86_EFLAGS_IF
| X86_EFLAGS_DF
| X86_EFLAGS_OF
|
2161 X86_EFLAGS_IOPL
| X86_EFLAGS_NT
| X86_EFLAGS_RF
|
2162 X86_EFLAGS_AC
| X86_EFLAGS_ID
|
2164 unsigned long vm86_mask
= X86_EFLAGS_VM
| X86_EFLAGS_VIF
|
2167 /* TODO: Add stack limit check */
2169 rc
= emulate_pop(ctxt
, &temp_eip
, ctxt
->op_bytes
);
2171 if (rc
!= X86EMUL_CONTINUE
)
2174 if (temp_eip
& ~0xffff)
2175 return emulate_gp(ctxt
, 0);
2177 rc
= emulate_pop(ctxt
, &cs
, ctxt
->op_bytes
);
2179 if (rc
!= X86EMUL_CONTINUE
)
2182 rc
= emulate_pop(ctxt
, &temp_eflags
, ctxt
->op_bytes
);
2184 if (rc
!= X86EMUL_CONTINUE
)
2187 rc
= load_segment_descriptor(ctxt
, (u16
)cs
, VCPU_SREG_CS
);
2189 if (rc
!= X86EMUL_CONTINUE
)
2192 ctxt
->_eip
= temp_eip
;
2194 if (ctxt
->op_bytes
== 4)
2195 ctxt
->eflags
= ((temp_eflags
& mask
) | (ctxt
->eflags
& vm86_mask
));
2196 else if (ctxt
->op_bytes
== 2) {
2197 ctxt
->eflags
&= ~0xffff;
2198 ctxt
->eflags
|= temp_eflags
;
2201 ctxt
->eflags
&= ~EFLG_RESERVED_ZEROS_MASK
; /* Clear reserved zeros */
2202 ctxt
->eflags
|= X86_EFLAGS_FIXED
;
2203 ctxt
->ops
->set_nmi_mask(ctxt
, false);
2208 static int em_iret(struct x86_emulate_ctxt
*ctxt
)
2210 switch(ctxt
->mode
) {
2211 case X86EMUL_MODE_REAL
:
2212 return emulate_iret_real(ctxt
);
2213 case X86EMUL_MODE_VM86
:
2214 case X86EMUL_MODE_PROT16
:
2215 case X86EMUL_MODE_PROT32
:
2216 case X86EMUL_MODE_PROT64
:
2218 /* iret from protected mode unimplemented yet */
2219 return X86EMUL_UNHANDLEABLE
;
2223 static int em_jmp_far(struct x86_emulate_ctxt
*ctxt
)
2227 struct desc_struct new_desc
;
2228 u8 cpl
= ctxt
->ops
->cpl(ctxt
);
2230 memcpy(&sel
, ctxt
->src
.valptr
+ ctxt
->op_bytes
, 2);
2232 rc
= __load_segment_descriptor(ctxt
, sel
, VCPU_SREG_CS
, cpl
,
2233 X86_TRANSFER_CALL_JMP
,
2235 if (rc
!= X86EMUL_CONTINUE
)
2238 rc
= assign_eip_far(ctxt
, ctxt
->src
.val
, &new_desc
);
2239 /* Error handling is not implemented. */
2240 if (rc
!= X86EMUL_CONTINUE
)
2241 return X86EMUL_UNHANDLEABLE
;
2246 static int em_jmp_abs(struct x86_emulate_ctxt
*ctxt
)
2248 return assign_eip_near(ctxt
, ctxt
->src
.val
);
2251 static int em_call_near_abs(struct x86_emulate_ctxt
*ctxt
)
2256 old_eip
= ctxt
->_eip
;
2257 rc
= assign_eip_near(ctxt
, ctxt
->src
.val
);
2258 if (rc
!= X86EMUL_CONTINUE
)
2260 ctxt
->src
.val
= old_eip
;
2265 static int em_cmpxchg8b(struct x86_emulate_ctxt
*ctxt
)
2267 u64 old
= ctxt
->dst
.orig_val64
;
2269 if (ctxt
->dst
.bytes
== 16)
2270 return X86EMUL_UNHANDLEABLE
;
2272 if (((u32
) (old
>> 0) != (u32
) reg_read(ctxt
, VCPU_REGS_RAX
)) ||
2273 ((u32
) (old
>> 32) != (u32
) reg_read(ctxt
, VCPU_REGS_RDX
))) {
2274 *reg_write(ctxt
, VCPU_REGS_RAX
) = (u32
) (old
>> 0);
2275 *reg_write(ctxt
, VCPU_REGS_RDX
) = (u32
) (old
>> 32);
2276 ctxt
->eflags
&= ~X86_EFLAGS_ZF
;
2278 ctxt
->dst
.val64
= ((u64
)reg_read(ctxt
, VCPU_REGS_RCX
) << 32) |
2279 (u32
) reg_read(ctxt
, VCPU_REGS_RBX
);
2281 ctxt
->eflags
|= X86_EFLAGS_ZF
;
2283 return X86EMUL_CONTINUE
;
2286 static int em_ret(struct x86_emulate_ctxt
*ctxt
)
2291 rc
= emulate_pop(ctxt
, &eip
, ctxt
->op_bytes
);
2292 if (rc
!= X86EMUL_CONTINUE
)
2295 return assign_eip_near(ctxt
, eip
);
2298 static int em_ret_far(struct x86_emulate_ctxt
*ctxt
)
2301 unsigned long eip
, cs
;
2302 int cpl
= ctxt
->ops
->cpl(ctxt
);
2303 struct desc_struct new_desc
;
2305 rc
= emulate_pop(ctxt
, &eip
, ctxt
->op_bytes
);
2306 if (rc
!= X86EMUL_CONTINUE
)
2308 rc
= emulate_pop(ctxt
, &cs
, ctxt
->op_bytes
);
2309 if (rc
!= X86EMUL_CONTINUE
)
2311 /* Outer-privilege level return is not implemented */
2312 if (ctxt
->mode
>= X86EMUL_MODE_PROT16
&& (cs
& 3) > cpl
)
2313 return X86EMUL_UNHANDLEABLE
;
2314 rc
= __load_segment_descriptor(ctxt
, (u16
)cs
, VCPU_SREG_CS
, cpl
,
2317 if (rc
!= X86EMUL_CONTINUE
)
2319 rc
= assign_eip_far(ctxt
, eip
, &new_desc
);
2320 /* Error handling is not implemented. */
2321 if (rc
!= X86EMUL_CONTINUE
)
2322 return X86EMUL_UNHANDLEABLE
;
2327 static int em_ret_far_imm(struct x86_emulate_ctxt
*ctxt
)
2331 rc
= em_ret_far(ctxt
);
2332 if (rc
!= X86EMUL_CONTINUE
)
2334 rsp_increment(ctxt
, ctxt
->src
.val
);
2335 return X86EMUL_CONTINUE
;
2338 static int em_cmpxchg(struct x86_emulate_ctxt
*ctxt
)
2340 /* Save real source value, then compare EAX against destination. */
2341 ctxt
->dst
.orig_val
= ctxt
->dst
.val
;
2342 ctxt
->dst
.val
= reg_read(ctxt
, VCPU_REGS_RAX
);
2343 ctxt
->src
.orig_val
= ctxt
->src
.val
;
2344 ctxt
->src
.val
= ctxt
->dst
.orig_val
;
2345 fastop(ctxt
, em_cmp
);
2347 if (ctxt
->eflags
& X86_EFLAGS_ZF
) {
2348 /* Success: write back to memory; no update of EAX */
2349 ctxt
->src
.type
= OP_NONE
;
2350 ctxt
->dst
.val
= ctxt
->src
.orig_val
;
2352 /* Failure: write the value we saw to EAX. */
2353 ctxt
->src
.type
= OP_REG
;
2354 ctxt
->src
.addr
.reg
= reg_rmw(ctxt
, VCPU_REGS_RAX
);
2355 ctxt
->src
.val
= ctxt
->dst
.orig_val
;
2356 /* Create write-cycle to dest by writing the same value */
2357 ctxt
->dst
.val
= ctxt
->dst
.orig_val
;
2359 return X86EMUL_CONTINUE
;
2362 static int em_lseg(struct x86_emulate_ctxt
*ctxt
)
2364 int seg
= ctxt
->src2
.val
;
2368 memcpy(&sel
, ctxt
->src
.valptr
+ ctxt
->op_bytes
, 2);
2370 rc
= load_segment_descriptor(ctxt
, sel
, seg
);
2371 if (rc
!= X86EMUL_CONTINUE
)
2374 ctxt
->dst
.val
= ctxt
->src
.val
;
2378 static int emulator_has_longmode(struct x86_emulate_ctxt
*ctxt
)
2380 #ifdef CONFIG_X86_64
2381 return ctxt
->ops
->guest_has_long_mode(ctxt
);
2387 static void rsm_set_desc_flags(struct desc_struct
*desc
, u32 flags
)
2389 desc
->g
= (flags
>> 23) & 1;
2390 desc
->d
= (flags
>> 22) & 1;
2391 desc
->l
= (flags
>> 21) & 1;
2392 desc
->avl
= (flags
>> 20) & 1;
2393 desc
->p
= (flags
>> 15) & 1;
2394 desc
->dpl
= (flags
>> 13) & 3;
2395 desc
->s
= (flags
>> 12) & 1;
2396 desc
->type
= (flags
>> 8) & 15;
2399 static int rsm_load_seg_32(struct x86_emulate_ctxt
*ctxt
, const char *smstate
,
2402 struct desc_struct desc
;
2406 selector
= GET_SMSTATE(u32
, smstate
, 0x7fa8 + n
* 4);
2409 offset
= 0x7f84 + n
* 12;
2411 offset
= 0x7f2c + (n
- 3) * 12;
2413 set_desc_base(&desc
, GET_SMSTATE(u32
, smstate
, offset
+ 8));
2414 set_desc_limit(&desc
, GET_SMSTATE(u32
, smstate
, offset
+ 4));
2415 rsm_set_desc_flags(&desc
, GET_SMSTATE(u32
, smstate
, offset
));
2416 ctxt
->ops
->set_segment(ctxt
, selector
, &desc
, 0, n
);
2417 return X86EMUL_CONTINUE
;
2420 #ifdef CONFIG_X86_64
2421 static int rsm_load_seg_64(struct x86_emulate_ctxt
*ctxt
, const char *smstate
,
2424 struct desc_struct desc
;
2429 offset
= 0x7e00 + n
* 16;
2431 selector
= GET_SMSTATE(u16
, smstate
, offset
);
2432 rsm_set_desc_flags(&desc
, GET_SMSTATE(u16
, smstate
, offset
+ 2) << 8);
2433 set_desc_limit(&desc
, GET_SMSTATE(u32
, smstate
, offset
+ 4));
2434 set_desc_base(&desc
, GET_SMSTATE(u32
, smstate
, offset
+ 8));
2435 base3
= GET_SMSTATE(u32
, smstate
, offset
+ 12);
2437 ctxt
->ops
->set_segment(ctxt
, selector
, &desc
, base3
, n
);
2438 return X86EMUL_CONTINUE
;
2442 static int rsm_enter_protected_mode(struct x86_emulate_ctxt
*ctxt
,
2443 u64 cr0
, u64 cr3
, u64 cr4
)
2448 /* In order to later set CR4.PCIDE, CR3[11:0] must be zero. */
2450 if (cr4
& X86_CR4_PCIDE
) {
2455 bad
= ctxt
->ops
->set_cr(ctxt
, 3, cr3
);
2457 return X86EMUL_UNHANDLEABLE
;
2460 * First enable PAE, long mode needs it before CR0.PG = 1 is set.
2461 * Then enable protected mode. However, PCID cannot be enabled
2462 * if EFER.LMA=0, so set it separately.
2464 bad
= ctxt
->ops
->set_cr(ctxt
, 4, cr4
& ~X86_CR4_PCIDE
);
2466 return X86EMUL_UNHANDLEABLE
;
2468 bad
= ctxt
->ops
->set_cr(ctxt
, 0, cr0
);
2470 return X86EMUL_UNHANDLEABLE
;
2472 if (cr4
& X86_CR4_PCIDE
) {
2473 bad
= ctxt
->ops
->set_cr(ctxt
, 4, cr4
);
2475 return X86EMUL_UNHANDLEABLE
;
2477 bad
= ctxt
->ops
->set_cr(ctxt
, 3, cr3
| pcid
);
2479 return X86EMUL_UNHANDLEABLE
;
2484 return X86EMUL_CONTINUE
;
2487 static int rsm_load_state_32(struct x86_emulate_ctxt
*ctxt
,
2488 const char *smstate
)
2490 struct desc_struct desc
;
2493 u32 val
, cr0
, cr3
, cr4
;
2496 cr0
= GET_SMSTATE(u32
, smstate
, 0x7ffc);
2497 cr3
= GET_SMSTATE(u32
, smstate
, 0x7ff8);
2498 ctxt
->eflags
= GET_SMSTATE(u32
, smstate
, 0x7ff4) | X86_EFLAGS_FIXED
;
2499 ctxt
->_eip
= GET_SMSTATE(u32
, smstate
, 0x7ff0);
2501 for (i
= 0; i
< 8; i
++)
2502 *reg_write(ctxt
, i
) = GET_SMSTATE(u32
, smstate
, 0x7fd0 + i
* 4);
2504 val
= GET_SMSTATE(u32
, smstate
, 0x7fcc);
2505 ctxt
->ops
->set_dr(ctxt
, 6, (val
& DR6_VOLATILE
) | DR6_FIXED_1
);
2506 val
= GET_SMSTATE(u32
, smstate
, 0x7fc8);
2507 ctxt
->ops
->set_dr(ctxt
, 7, (val
& DR7_VOLATILE
) | DR7_FIXED_1
);
2509 selector
= GET_SMSTATE(u32
, smstate
, 0x7fc4);
2510 set_desc_base(&desc
, GET_SMSTATE(u32
, smstate
, 0x7f64));
2511 set_desc_limit(&desc
, GET_SMSTATE(u32
, smstate
, 0x7f60));
2512 rsm_set_desc_flags(&desc
, GET_SMSTATE(u32
, smstate
, 0x7f5c));
2513 ctxt
->ops
->set_segment(ctxt
, selector
, &desc
, 0, VCPU_SREG_TR
);
2515 selector
= GET_SMSTATE(u32
, smstate
, 0x7fc0);
2516 set_desc_base(&desc
, GET_SMSTATE(u32
, smstate
, 0x7f80));
2517 set_desc_limit(&desc
, GET_SMSTATE(u32
, smstate
, 0x7f7c));
2518 rsm_set_desc_flags(&desc
, GET_SMSTATE(u32
, smstate
, 0x7f78));
2519 ctxt
->ops
->set_segment(ctxt
, selector
, &desc
, 0, VCPU_SREG_LDTR
);
2521 dt
.address
= GET_SMSTATE(u32
, smstate
, 0x7f74);
2522 dt
.size
= GET_SMSTATE(u32
, smstate
, 0x7f70);
2523 ctxt
->ops
->set_gdt(ctxt
, &dt
);
2525 dt
.address
= GET_SMSTATE(u32
, smstate
, 0x7f58);
2526 dt
.size
= GET_SMSTATE(u32
, smstate
, 0x7f54);
2527 ctxt
->ops
->set_idt(ctxt
, &dt
);
2529 for (i
= 0; i
< 6; i
++) {
2530 int r
= rsm_load_seg_32(ctxt
, smstate
, i
);
2531 if (r
!= X86EMUL_CONTINUE
)
2535 cr4
= GET_SMSTATE(u32
, smstate
, 0x7f14);
2537 ctxt
->ops
->set_smbase(ctxt
, GET_SMSTATE(u32
, smstate
, 0x7ef8));
2539 return rsm_enter_protected_mode(ctxt
, cr0
, cr3
, cr4
);
2542 #ifdef CONFIG_X86_64
2543 static int rsm_load_state_64(struct x86_emulate_ctxt
*ctxt
,
2544 const char *smstate
)
2546 struct desc_struct desc
;
2548 u64 val
, cr0
, cr3
, cr4
;
2553 for (i
= 0; i
< 16; i
++)
2554 *reg_write(ctxt
, i
) = GET_SMSTATE(u64
, smstate
, 0x7ff8 - i
* 8);
2556 ctxt
->_eip
= GET_SMSTATE(u64
, smstate
, 0x7f78);
2557 ctxt
->eflags
= GET_SMSTATE(u32
, smstate
, 0x7f70) | X86_EFLAGS_FIXED
;
2559 val
= GET_SMSTATE(u32
, smstate
, 0x7f68);
2560 ctxt
->ops
->set_dr(ctxt
, 6, (val
& DR6_VOLATILE
) | DR6_FIXED_1
);
2561 val
= GET_SMSTATE(u32
, smstate
, 0x7f60);
2562 ctxt
->ops
->set_dr(ctxt
, 7, (val
& DR7_VOLATILE
) | DR7_FIXED_1
);
2564 cr0
= GET_SMSTATE(u64
, smstate
, 0x7f58);
2565 cr3
= GET_SMSTATE(u64
, smstate
, 0x7f50);
2566 cr4
= GET_SMSTATE(u64
, smstate
, 0x7f48);
2567 ctxt
->ops
->set_smbase(ctxt
, GET_SMSTATE(u32
, smstate
, 0x7f00));
2568 val
= GET_SMSTATE(u64
, smstate
, 0x7ed0);
2569 ctxt
->ops
->set_msr(ctxt
, MSR_EFER
, val
& ~EFER_LMA
);
2571 selector
= GET_SMSTATE(u32
, smstate
, 0x7e90);
2572 rsm_set_desc_flags(&desc
, GET_SMSTATE(u32
, smstate
, 0x7e92) << 8);
2573 set_desc_limit(&desc
, GET_SMSTATE(u32
, smstate
, 0x7e94));
2574 set_desc_base(&desc
, GET_SMSTATE(u32
, smstate
, 0x7e98));
2575 base3
= GET_SMSTATE(u32
, smstate
, 0x7e9c);
2576 ctxt
->ops
->set_segment(ctxt
, selector
, &desc
, base3
, VCPU_SREG_TR
);
2578 dt
.size
= GET_SMSTATE(u32
, smstate
, 0x7e84);
2579 dt
.address
= GET_SMSTATE(u64
, smstate
, 0x7e88);
2580 ctxt
->ops
->set_idt(ctxt
, &dt
);
2582 selector
= GET_SMSTATE(u32
, smstate
, 0x7e70);
2583 rsm_set_desc_flags(&desc
, GET_SMSTATE(u32
, smstate
, 0x7e72) << 8);
2584 set_desc_limit(&desc
, GET_SMSTATE(u32
, smstate
, 0x7e74));
2585 set_desc_base(&desc
, GET_SMSTATE(u32
, smstate
, 0x7e78));
2586 base3
= GET_SMSTATE(u32
, smstate
, 0x7e7c);
2587 ctxt
->ops
->set_segment(ctxt
, selector
, &desc
, base3
, VCPU_SREG_LDTR
);
2589 dt
.size
= GET_SMSTATE(u32
, smstate
, 0x7e64);
2590 dt
.address
= GET_SMSTATE(u64
, smstate
, 0x7e68);
2591 ctxt
->ops
->set_gdt(ctxt
, &dt
);
2593 r
= rsm_enter_protected_mode(ctxt
, cr0
, cr3
, cr4
);
2594 if (r
!= X86EMUL_CONTINUE
)
2597 for (i
= 0; i
< 6; i
++) {
2598 r
= rsm_load_seg_64(ctxt
, smstate
, i
);
2599 if (r
!= X86EMUL_CONTINUE
)
2603 return X86EMUL_CONTINUE
;
2607 static int em_rsm(struct x86_emulate_ctxt
*ctxt
)
2609 unsigned long cr0
, cr4
, efer
;
2614 if ((ctxt
->ops
->get_hflags(ctxt
) & X86EMUL_SMM_MASK
) == 0)
2615 return emulate_ud(ctxt
);
2617 smbase
= ctxt
->ops
->get_smbase(ctxt
);
2619 ret
= ctxt
->ops
->read_phys(ctxt
, smbase
+ 0xfe00, buf
, sizeof(buf
));
2620 if (ret
!= X86EMUL_CONTINUE
)
2621 return X86EMUL_UNHANDLEABLE
;
2623 if ((ctxt
->ops
->get_hflags(ctxt
) & X86EMUL_SMM_INSIDE_NMI_MASK
) == 0)
2624 ctxt
->ops
->set_nmi_mask(ctxt
, false);
2626 ctxt
->ops
->set_hflags(ctxt
, ctxt
->ops
->get_hflags(ctxt
) &
2627 ~(X86EMUL_SMM_INSIDE_NMI_MASK
| X86EMUL_SMM_MASK
));
2630 * Get back to real mode, to prepare a safe state in which to load
2631 * CR0/CR3/CR4/EFER. It's all a bit more complicated if the vCPU
2632 * supports long mode.
2634 if (emulator_has_longmode(ctxt
)) {
2635 struct desc_struct cs_desc
;
2637 /* Zero CR4.PCIDE before CR0.PG. */
2638 cr4
= ctxt
->ops
->get_cr(ctxt
, 4);
2639 if (cr4
& X86_CR4_PCIDE
)
2640 ctxt
->ops
->set_cr(ctxt
, 4, cr4
& ~X86_CR4_PCIDE
);
2642 /* A 32-bit code segment is required to clear EFER.LMA. */
2643 memset(&cs_desc
, 0, sizeof(cs_desc
));
2645 cs_desc
.s
= cs_desc
.g
= cs_desc
.p
= 1;
2646 ctxt
->ops
->set_segment(ctxt
, 0, &cs_desc
, 0, VCPU_SREG_CS
);
2649 /* For the 64-bit case, this will clear EFER.LMA. */
2650 cr0
= ctxt
->ops
->get_cr(ctxt
, 0);
2651 if (cr0
& X86_CR0_PE
)
2652 ctxt
->ops
->set_cr(ctxt
, 0, cr0
& ~(X86_CR0_PG
| X86_CR0_PE
));
2654 if (emulator_has_longmode(ctxt
)) {
2655 /* Clear CR4.PAE before clearing EFER.LME. */
2656 cr4
= ctxt
->ops
->get_cr(ctxt
, 4);
2657 if (cr4
& X86_CR4_PAE
)
2658 ctxt
->ops
->set_cr(ctxt
, 4, cr4
& ~X86_CR4_PAE
);
2660 /* And finally go back to 32-bit mode. */
2662 ctxt
->ops
->set_msr(ctxt
, MSR_EFER
, efer
);
2666 * Give pre_leave_smm() a chance to make ISA-specific changes to the
2667 * vCPU state (e.g. enter guest mode) before loading state from the SMM
2670 if (ctxt
->ops
->pre_leave_smm(ctxt
, buf
))
2671 return X86EMUL_UNHANDLEABLE
;
2673 #ifdef CONFIG_X86_64
2674 if (emulator_has_longmode(ctxt
))
2675 ret
= rsm_load_state_64(ctxt
, buf
);
2678 ret
= rsm_load_state_32(ctxt
, buf
);
2680 if (ret
!= X86EMUL_CONTINUE
) {
2681 /* FIXME: should triple fault */
2682 return X86EMUL_UNHANDLEABLE
;
2685 ctxt
->ops
->post_leave_smm(ctxt
);
2687 return X86EMUL_CONTINUE
;
2691 setup_syscalls_segments(struct x86_emulate_ctxt
*ctxt
,
2692 struct desc_struct
*cs
, struct desc_struct
*ss
)
2694 cs
->l
= 0; /* will be adjusted later */
2695 set_desc_base(cs
, 0); /* flat segment */
2696 cs
->g
= 1; /* 4kb granularity */
2697 set_desc_limit(cs
, 0xfffff); /* 4GB limit */
2698 cs
->type
= 0x0b; /* Read, Execute, Accessed */
2700 cs
->dpl
= 0; /* will be adjusted later */
2705 set_desc_base(ss
, 0); /* flat segment */
2706 set_desc_limit(ss
, 0xfffff); /* 4GB limit */
2707 ss
->g
= 1; /* 4kb granularity */
2709 ss
->type
= 0x03; /* Read/Write, Accessed */
2710 ss
->d
= 1; /* 32bit stack segment */
2717 static bool vendor_intel(struct x86_emulate_ctxt
*ctxt
)
2719 u32 eax
, ebx
, ecx
, edx
;
2722 ctxt
->ops
->get_cpuid(ctxt
, &eax
, &ebx
, &ecx
, &edx
, false);
2723 return ebx
== X86EMUL_CPUID_VENDOR_GenuineIntel_ebx
2724 && ecx
== X86EMUL_CPUID_VENDOR_GenuineIntel_ecx
2725 && edx
== X86EMUL_CPUID_VENDOR_GenuineIntel_edx
;
2728 static bool em_syscall_is_enabled(struct x86_emulate_ctxt
*ctxt
)
2730 const struct x86_emulate_ops
*ops
= ctxt
->ops
;
2731 u32 eax
, ebx
, ecx
, edx
;
2734 * syscall should always be enabled in longmode - so only become
2735 * vendor specific (cpuid) if other modes are active...
2737 if (ctxt
->mode
== X86EMUL_MODE_PROT64
)
2742 ops
->get_cpuid(ctxt
, &eax
, &ebx
, &ecx
, &edx
, false);
2744 * Intel ("GenuineIntel")
2745 * remark: Intel CPUs only support "syscall" in 64bit
2746 * longmode. Also an 64bit guest with a
2747 * 32bit compat-app running will #UD !! While this
2748 * behaviour can be fixed (by emulating) into AMD
2749 * response - CPUs of AMD can't behave like Intel.
2751 if (ebx
== X86EMUL_CPUID_VENDOR_GenuineIntel_ebx
&&
2752 ecx
== X86EMUL_CPUID_VENDOR_GenuineIntel_ecx
&&
2753 edx
== X86EMUL_CPUID_VENDOR_GenuineIntel_edx
)
2756 /* AMD ("AuthenticAMD") */
2757 if (ebx
== X86EMUL_CPUID_VENDOR_AuthenticAMD_ebx
&&
2758 ecx
== X86EMUL_CPUID_VENDOR_AuthenticAMD_ecx
&&
2759 edx
== X86EMUL_CPUID_VENDOR_AuthenticAMD_edx
)
2762 /* AMD ("AMDisbetter!") */
2763 if (ebx
== X86EMUL_CPUID_VENDOR_AMDisbetterI_ebx
&&
2764 ecx
== X86EMUL_CPUID_VENDOR_AMDisbetterI_ecx
&&
2765 edx
== X86EMUL_CPUID_VENDOR_AMDisbetterI_edx
)
2768 /* Hygon ("HygonGenuine") */
2769 if (ebx
== X86EMUL_CPUID_VENDOR_HygonGenuine_ebx
&&
2770 ecx
== X86EMUL_CPUID_VENDOR_HygonGenuine_ecx
&&
2771 edx
== X86EMUL_CPUID_VENDOR_HygonGenuine_edx
)
2775 * default: (not Intel, not AMD, not Hygon), apply Intel's
2781 static int em_syscall(struct x86_emulate_ctxt
*ctxt
)
2783 const struct x86_emulate_ops
*ops
= ctxt
->ops
;
2784 struct desc_struct cs
, ss
;
2789 /* syscall is not available in real mode */
2790 if (ctxt
->mode
== X86EMUL_MODE_REAL
||
2791 ctxt
->mode
== X86EMUL_MODE_VM86
)
2792 return emulate_ud(ctxt
);
2794 if (!(em_syscall_is_enabled(ctxt
)))
2795 return emulate_ud(ctxt
);
2797 ops
->get_msr(ctxt
, MSR_EFER
, &efer
);
2798 if (!(efer
& EFER_SCE
))
2799 return emulate_ud(ctxt
);
2801 setup_syscalls_segments(ctxt
, &cs
, &ss
);
2802 ops
->get_msr(ctxt
, MSR_STAR
, &msr_data
);
2804 cs_sel
= (u16
)(msr_data
& 0xfffc);
2805 ss_sel
= (u16
)(msr_data
+ 8);
2807 if (efer
& EFER_LMA
) {
2811 ops
->set_segment(ctxt
, cs_sel
, &cs
, 0, VCPU_SREG_CS
);
2812 ops
->set_segment(ctxt
, ss_sel
, &ss
, 0, VCPU_SREG_SS
);
2814 *reg_write(ctxt
, VCPU_REGS_RCX
) = ctxt
->_eip
;
2815 if (efer
& EFER_LMA
) {
2816 #ifdef CONFIG_X86_64
2817 *reg_write(ctxt
, VCPU_REGS_R11
) = ctxt
->eflags
;
2820 ctxt
->mode
== X86EMUL_MODE_PROT64
?
2821 MSR_LSTAR
: MSR_CSTAR
, &msr_data
);
2822 ctxt
->_eip
= msr_data
;
2824 ops
->get_msr(ctxt
, MSR_SYSCALL_MASK
, &msr_data
);
2825 ctxt
->eflags
&= ~msr_data
;
2826 ctxt
->eflags
|= X86_EFLAGS_FIXED
;
2830 ops
->get_msr(ctxt
, MSR_STAR
, &msr_data
);
2831 ctxt
->_eip
= (u32
)msr_data
;
2833 ctxt
->eflags
&= ~(X86_EFLAGS_VM
| X86_EFLAGS_IF
);
2836 ctxt
->tf
= (ctxt
->eflags
& X86_EFLAGS_TF
) != 0;
2837 return X86EMUL_CONTINUE
;
2840 static int em_sysenter(struct x86_emulate_ctxt
*ctxt
)
2842 const struct x86_emulate_ops
*ops
= ctxt
->ops
;
2843 struct desc_struct cs
, ss
;
2848 ops
->get_msr(ctxt
, MSR_EFER
, &efer
);
2849 /* inject #GP if in real mode */
2850 if (ctxt
->mode
== X86EMUL_MODE_REAL
)
2851 return emulate_gp(ctxt
, 0);
2854 * Not recognized on AMD in compat mode (but is recognized in legacy
2857 if ((ctxt
->mode
!= X86EMUL_MODE_PROT64
) && (efer
& EFER_LMA
)
2858 && !vendor_intel(ctxt
))
2859 return emulate_ud(ctxt
);
2861 /* sysenter/sysexit have not been tested in 64bit mode. */
2862 if (ctxt
->mode
== X86EMUL_MODE_PROT64
)
2863 return X86EMUL_UNHANDLEABLE
;
2865 ops
->get_msr(ctxt
, MSR_IA32_SYSENTER_CS
, &msr_data
);
2866 if ((msr_data
& 0xfffc) == 0x0)
2867 return emulate_gp(ctxt
, 0);
2869 setup_syscalls_segments(ctxt
, &cs
, &ss
);
2870 ctxt
->eflags
&= ~(X86_EFLAGS_VM
| X86_EFLAGS_IF
);
2871 cs_sel
= (u16
)msr_data
& ~SEGMENT_RPL_MASK
;
2872 ss_sel
= cs_sel
+ 8;
2873 if (efer
& EFER_LMA
) {
2878 ops
->set_segment(ctxt
, cs_sel
, &cs
, 0, VCPU_SREG_CS
);
2879 ops
->set_segment(ctxt
, ss_sel
, &ss
, 0, VCPU_SREG_SS
);
2881 ops
->get_msr(ctxt
, MSR_IA32_SYSENTER_EIP
, &msr_data
);
2882 ctxt
->_eip
= (efer
& EFER_LMA
) ? msr_data
: (u32
)msr_data
;
2884 ops
->get_msr(ctxt
, MSR_IA32_SYSENTER_ESP
, &msr_data
);
2885 *reg_write(ctxt
, VCPU_REGS_RSP
) = (efer
& EFER_LMA
) ? msr_data
:
2888 return X86EMUL_CONTINUE
;
2891 static int em_sysexit(struct x86_emulate_ctxt
*ctxt
)
2893 const struct x86_emulate_ops
*ops
= ctxt
->ops
;
2894 struct desc_struct cs
, ss
;
2895 u64 msr_data
, rcx
, rdx
;
2897 u16 cs_sel
= 0, ss_sel
= 0;
2899 /* inject #GP if in real mode or Virtual 8086 mode */
2900 if (ctxt
->mode
== X86EMUL_MODE_REAL
||
2901 ctxt
->mode
== X86EMUL_MODE_VM86
)
2902 return emulate_gp(ctxt
, 0);
2904 setup_syscalls_segments(ctxt
, &cs
, &ss
);
2906 if ((ctxt
->rex_prefix
& 0x8) != 0x0)
2907 usermode
= X86EMUL_MODE_PROT64
;
2909 usermode
= X86EMUL_MODE_PROT32
;
2911 rcx
= reg_read(ctxt
, VCPU_REGS_RCX
);
2912 rdx
= reg_read(ctxt
, VCPU_REGS_RDX
);
2916 ops
->get_msr(ctxt
, MSR_IA32_SYSENTER_CS
, &msr_data
);
2918 case X86EMUL_MODE_PROT32
:
2919 cs_sel
= (u16
)(msr_data
+ 16);
2920 if ((msr_data
& 0xfffc) == 0x0)
2921 return emulate_gp(ctxt
, 0);
2922 ss_sel
= (u16
)(msr_data
+ 24);
2926 case X86EMUL_MODE_PROT64
:
2927 cs_sel
= (u16
)(msr_data
+ 32);
2928 if (msr_data
== 0x0)
2929 return emulate_gp(ctxt
, 0);
2930 ss_sel
= cs_sel
+ 8;
2933 if (emul_is_noncanonical_address(rcx
, ctxt
) ||
2934 emul_is_noncanonical_address(rdx
, ctxt
))
2935 return emulate_gp(ctxt
, 0);
2938 cs_sel
|= SEGMENT_RPL_MASK
;
2939 ss_sel
|= SEGMENT_RPL_MASK
;
2941 ops
->set_segment(ctxt
, cs_sel
, &cs
, 0, VCPU_SREG_CS
);
2942 ops
->set_segment(ctxt
, ss_sel
, &ss
, 0, VCPU_SREG_SS
);
2945 *reg_write(ctxt
, VCPU_REGS_RSP
) = rcx
;
2947 return X86EMUL_CONTINUE
;
2950 static bool emulator_bad_iopl(struct x86_emulate_ctxt
*ctxt
)
2953 if (ctxt
->mode
== X86EMUL_MODE_REAL
)
2955 if (ctxt
->mode
== X86EMUL_MODE_VM86
)
2957 iopl
= (ctxt
->eflags
& X86_EFLAGS_IOPL
) >> X86_EFLAGS_IOPL_BIT
;
2958 return ctxt
->ops
->cpl(ctxt
) > iopl
;
2961 #define VMWARE_PORT_VMPORT (0x5658)
2962 #define VMWARE_PORT_VMRPC (0x5659)
2964 static bool emulator_io_port_access_allowed(struct x86_emulate_ctxt
*ctxt
,
2967 const struct x86_emulate_ops
*ops
= ctxt
->ops
;
2968 struct desc_struct tr_seg
;
2971 u16 tr
, io_bitmap_ptr
, perm
, bit_idx
= port
& 0x7;
2972 unsigned mask
= (1 << len
) - 1;
2976 * VMware allows access to these ports even if denied
2977 * by TSS I/O permission bitmap. Mimic behavior.
2979 if (enable_vmware_backdoor
&&
2980 ((port
== VMWARE_PORT_VMPORT
) || (port
== VMWARE_PORT_VMRPC
)))
2983 ops
->get_segment(ctxt
, &tr
, &tr_seg
, &base3
, VCPU_SREG_TR
);
2986 if (desc_limit_scaled(&tr_seg
) < 103)
2988 base
= get_desc_base(&tr_seg
);
2989 #ifdef CONFIG_X86_64
2990 base
|= ((u64
)base3
) << 32;
2992 r
= ops
->read_std(ctxt
, base
+ 102, &io_bitmap_ptr
, 2, NULL
, true);
2993 if (r
!= X86EMUL_CONTINUE
)
2995 if (io_bitmap_ptr
+ port
/8 > desc_limit_scaled(&tr_seg
))
2997 r
= ops
->read_std(ctxt
, base
+ io_bitmap_ptr
+ port
/8, &perm
, 2, NULL
, true);
2998 if (r
!= X86EMUL_CONTINUE
)
3000 if ((perm
>> bit_idx
) & mask
)
3005 static bool emulator_io_permited(struct x86_emulate_ctxt
*ctxt
,
3011 if (emulator_bad_iopl(ctxt
))
3012 if (!emulator_io_port_access_allowed(ctxt
, port
, len
))
3015 ctxt
->perm_ok
= true;
3020 static void string_registers_quirk(struct x86_emulate_ctxt
*ctxt
)
3023 * Intel CPUs mask the counter and pointers in quite strange
3024 * manner when ECX is zero due to REP-string optimizations.
3026 #ifdef CONFIG_X86_64
3027 if (ctxt
->ad_bytes
!= 4 || !vendor_intel(ctxt
))
3030 *reg_write(ctxt
, VCPU_REGS_RCX
) = 0;
3033 case 0xa4: /* movsb */
3034 case 0xa5: /* movsd/w */
3035 *reg_rmw(ctxt
, VCPU_REGS_RSI
) &= (u32
)-1;
3037 case 0xaa: /* stosb */
3038 case 0xab: /* stosd/w */
3039 *reg_rmw(ctxt
, VCPU_REGS_RDI
) &= (u32
)-1;
3044 static void save_state_to_tss16(struct x86_emulate_ctxt
*ctxt
,
3045 struct tss_segment_16
*tss
)
3047 tss
->ip
= ctxt
->_eip
;
3048 tss
->flag
= ctxt
->eflags
;
3049 tss
->ax
= reg_read(ctxt
, VCPU_REGS_RAX
);
3050 tss
->cx
= reg_read(ctxt
, VCPU_REGS_RCX
);
3051 tss
->dx
= reg_read(ctxt
, VCPU_REGS_RDX
);
3052 tss
->bx
= reg_read(ctxt
, VCPU_REGS_RBX
);
3053 tss
->sp
= reg_read(ctxt
, VCPU_REGS_RSP
);
3054 tss
->bp
= reg_read(ctxt
, VCPU_REGS_RBP
);
3055 tss
->si
= reg_read(ctxt
, VCPU_REGS_RSI
);
3056 tss
->di
= reg_read(ctxt
, VCPU_REGS_RDI
);
3058 tss
->es
= get_segment_selector(ctxt
, VCPU_SREG_ES
);
3059 tss
->cs
= get_segment_selector(ctxt
, VCPU_SREG_CS
);
3060 tss
->ss
= get_segment_selector(ctxt
, VCPU_SREG_SS
);
3061 tss
->ds
= get_segment_selector(ctxt
, VCPU_SREG_DS
);
3062 tss
->ldt
= get_segment_selector(ctxt
, VCPU_SREG_LDTR
);
3065 static int load_state_from_tss16(struct x86_emulate_ctxt
*ctxt
,
3066 struct tss_segment_16
*tss
)
3071 ctxt
->_eip
= tss
->ip
;
3072 ctxt
->eflags
= tss
->flag
| 2;
3073 *reg_write(ctxt
, VCPU_REGS_RAX
) = tss
->ax
;
3074 *reg_write(ctxt
, VCPU_REGS_RCX
) = tss
->cx
;
3075 *reg_write(ctxt
, VCPU_REGS_RDX
) = tss
->dx
;
3076 *reg_write(ctxt
, VCPU_REGS_RBX
) = tss
->bx
;
3077 *reg_write(ctxt
, VCPU_REGS_RSP
) = tss
->sp
;
3078 *reg_write(ctxt
, VCPU_REGS_RBP
) = tss
->bp
;
3079 *reg_write(ctxt
, VCPU_REGS_RSI
) = tss
->si
;
3080 *reg_write(ctxt
, VCPU_REGS_RDI
) = tss
->di
;
3083 * SDM says that segment selectors are loaded before segment
3086 set_segment_selector(ctxt
, tss
->ldt
, VCPU_SREG_LDTR
);
3087 set_segment_selector(ctxt
, tss
->es
, VCPU_SREG_ES
);
3088 set_segment_selector(ctxt
, tss
->cs
, VCPU_SREG_CS
);
3089 set_segment_selector(ctxt
, tss
->ss
, VCPU_SREG_SS
);
3090 set_segment_selector(ctxt
, tss
->ds
, VCPU_SREG_DS
);
3095 * Now load segment descriptors. If fault happens at this stage
3096 * it is handled in a context of new task
3098 ret
= __load_segment_descriptor(ctxt
, tss
->ldt
, VCPU_SREG_LDTR
, cpl
,
3099 X86_TRANSFER_TASK_SWITCH
, NULL
);
3100 if (ret
!= X86EMUL_CONTINUE
)
3102 ret
= __load_segment_descriptor(ctxt
, tss
->es
, VCPU_SREG_ES
, cpl
,
3103 X86_TRANSFER_TASK_SWITCH
, NULL
);
3104 if (ret
!= X86EMUL_CONTINUE
)
3106 ret
= __load_segment_descriptor(ctxt
, tss
->cs
, VCPU_SREG_CS
, cpl
,
3107 X86_TRANSFER_TASK_SWITCH
, NULL
);
3108 if (ret
!= X86EMUL_CONTINUE
)
3110 ret
= __load_segment_descriptor(ctxt
, tss
->ss
, VCPU_SREG_SS
, cpl
,
3111 X86_TRANSFER_TASK_SWITCH
, NULL
);
3112 if (ret
!= X86EMUL_CONTINUE
)
3114 ret
= __load_segment_descriptor(ctxt
, tss
->ds
, VCPU_SREG_DS
, cpl
,
3115 X86_TRANSFER_TASK_SWITCH
, NULL
);
3116 if (ret
!= X86EMUL_CONTINUE
)
3119 return X86EMUL_CONTINUE
;
3122 static int task_switch_16(struct x86_emulate_ctxt
*ctxt
,
3123 u16 tss_selector
, u16 old_tss_sel
,
3124 ulong old_tss_base
, struct desc_struct
*new_desc
)
3126 struct tss_segment_16 tss_seg
;
3128 u32 new_tss_base
= get_desc_base(new_desc
);
3130 ret
= linear_read_system(ctxt
, old_tss_base
, &tss_seg
, sizeof(tss_seg
));
3131 if (ret
!= X86EMUL_CONTINUE
)
3134 save_state_to_tss16(ctxt
, &tss_seg
);
3136 ret
= linear_write_system(ctxt
, old_tss_base
, &tss_seg
, sizeof(tss_seg
));
3137 if (ret
!= X86EMUL_CONTINUE
)
3140 ret
= linear_read_system(ctxt
, new_tss_base
, &tss_seg
, sizeof(tss_seg
));
3141 if (ret
!= X86EMUL_CONTINUE
)
3144 if (old_tss_sel
!= 0xffff) {
3145 tss_seg
.prev_task_link
= old_tss_sel
;
3147 ret
= linear_write_system(ctxt
, new_tss_base
,
3148 &tss_seg
.prev_task_link
,
3149 sizeof(tss_seg
.prev_task_link
));
3150 if (ret
!= X86EMUL_CONTINUE
)
3154 return load_state_from_tss16(ctxt
, &tss_seg
);
3157 static void save_state_to_tss32(struct x86_emulate_ctxt
*ctxt
,
3158 struct tss_segment_32
*tss
)
3160 /* CR3 and ldt selector are not saved intentionally */
3161 tss
->eip
= ctxt
->_eip
;
3162 tss
->eflags
= ctxt
->eflags
;
3163 tss
->eax
= reg_read(ctxt
, VCPU_REGS_RAX
);
3164 tss
->ecx
= reg_read(ctxt
, VCPU_REGS_RCX
);
3165 tss
->edx
= reg_read(ctxt
, VCPU_REGS_RDX
);
3166 tss
->ebx
= reg_read(ctxt
, VCPU_REGS_RBX
);
3167 tss
->esp
= reg_read(ctxt
, VCPU_REGS_RSP
);
3168 tss
->ebp
= reg_read(ctxt
, VCPU_REGS_RBP
);
3169 tss
->esi
= reg_read(ctxt
, VCPU_REGS_RSI
);
3170 tss
->edi
= reg_read(ctxt
, VCPU_REGS_RDI
);
3172 tss
->es
= get_segment_selector(ctxt
, VCPU_SREG_ES
);
3173 tss
->cs
= get_segment_selector(ctxt
, VCPU_SREG_CS
);
3174 tss
->ss
= get_segment_selector(ctxt
, VCPU_SREG_SS
);
3175 tss
->ds
= get_segment_selector(ctxt
, VCPU_SREG_DS
);
3176 tss
->fs
= get_segment_selector(ctxt
, VCPU_SREG_FS
);
3177 tss
->gs
= get_segment_selector(ctxt
, VCPU_SREG_GS
);
3180 static int load_state_from_tss32(struct x86_emulate_ctxt
*ctxt
,
3181 struct tss_segment_32
*tss
)
3186 if (ctxt
->ops
->set_cr(ctxt
, 3, tss
->cr3
))
3187 return emulate_gp(ctxt
, 0);
3188 ctxt
->_eip
= tss
->eip
;
3189 ctxt
->eflags
= tss
->eflags
| 2;
3191 /* General purpose registers */
3192 *reg_write(ctxt
, VCPU_REGS_RAX
) = tss
->eax
;
3193 *reg_write(ctxt
, VCPU_REGS_RCX
) = tss
->ecx
;
3194 *reg_write(ctxt
, VCPU_REGS_RDX
) = tss
->edx
;
3195 *reg_write(ctxt
, VCPU_REGS_RBX
) = tss
->ebx
;
3196 *reg_write(ctxt
, VCPU_REGS_RSP
) = tss
->esp
;
3197 *reg_write(ctxt
, VCPU_REGS_RBP
) = tss
->ebp
;
3198 *reg_write(ctxt
, VCPU_REGS_RSI
) = tss
->esi
;
3199 *reg_write(ctxt
, VCPU_REGS_RDI
) = tss
->edi
;
3202 * SDM says that segment selectors are loaded before segment
3203 * descriptors. This is important because CPL checks will
3206 set_segment_selector(ctxt
, tss
->ldt_selector
, VCPU_SREG_LDTR
);
3207 set_segment_selector(ctxt
, tss
->es
, VCPU_SREG_ES
);
3208 set_segment_selector(ctxt
, tss
->cs
, VCPU_SREG_CS
);
3209 set_segment_selector(ctxt
, tss
->ss
, VCPU_SREG_SS
);
3210 set_segment_selector(ctxt
, tss
->ds
, VCPU_SREG_DS
);
3211 set_segment_selector(ctxt
, tss
->fs
, VCPU_SREG_FS
);
3212 set_segment_selector(ctxt
, tss
->gs
, VCPU_SREG_GS
);
3215 * If we're switching between Protected Mode and VM86, we need to make
3216 * sure to update the mode before loading the segment descriptors so
3217 * that the selectors are interpreted correctly.
3219 if (ctxt
->eflags
& X86_EFLAGS_VM
) {
3220 ctxt
->mode
= X86EMUL_MODE_VM86
;
3223 ctxt
->mode
= X86EMUL_MODE_PROT32
;
3228 * Now load segment descriptors. If fault happenes at this stage
3229 * it is handled in a context of new task
3231 ret
= __load_segment_descriptor(ctxt
, tss
->ldt_selector
, VCPU_SREG_LDTR
,
3232 cpl
, X86_TRANSFER_TASK_SWITCH
, NULL
);
3233 if (ret
!= X86EMUL_CONTINUE
)
3235 ret
= __load_segment_descriptor(ctxt
, tss
->es
, VCPU_SREG_ES
, cpl
,
3236 X86_TRANSFER_TASK_SWITCH
, NULL
);
3237 if (ret
!= X86EMUL_CONTINUE
)
3239 ret
= __load_segment_descriptor(ctxt
, tss
->cs
, VCPU_SREG_CS
, cpl
,
3240 X86_TRANSFER_TASK_SWITCH
, NULL
);
3241 if (ret
!= X86EMUL_CONTINUE
)
3243 ret
= __load_segment_descriptor(ctxt
, tss
->ss
, VCPU_SREG_SS
, cpl
,
3244 X86_TRANSFER_TASK_SWITCH
, NULL
);
3245 if (ret
!= X86EMUL_CONTINUE
)
3247 ret
= __load_segment_descriptor(ctxt
, tss
->ds
, VCPU_SREG_DS
, cpl
,
3248 X86_TRANSFER_TASK_SWITCH
, NULL
);
3249 if (ret
!= X86EMUL_CONTINUE
)
3251 ret
= __load_segment_descriptor(ctxt
, tss
->fs
, VCPU_SREG_FS
, cpl
,
3252 X86_TRANSFER_TASK_SWITCH
, NULL
);
3253 if (ret
!= X86EMUL_CONTINUE
)
3255 ret
= __load_segment_descriptor(ctxt
, tss
->gs
, VCPU_SREG_GS
, cpl
,
3256 X86_TRANSFER_TASK_SWITCH
, NULL
);
3261 static int task_switch_32(struct x86_emulate_ctxt
*ctxt
,
3262 u16 tss_selector
, u16 old_tss_sel
,
3263 ulong old_tss_base
, struct desc_struct
*new_desc
)
3265 struct tss_segment_32 tss_seg
;
3267 u32 new_tss_base
= get_desc_base(new_desc
);
3268 u32 eip_offset
= offsetof(struct tss_segment_32
, eip
);
3269 u32 ldt_sel_offset
= offsetof(struct tss_segment_32
, ldt_selector
);
3271 ret
= linear_read_system(ctxt
, old_tss_base
, &tss_seg
, sizeof(tss_seg
));
3272 if (ret
!= X86EMUL_CONTINUE
)
3275 save_state_to_tss32(ctxt
, &tss_seg
);
3277 /* Only GP registers and segment selectors are saved */
3278 ret
= linear_write_system(ctxt
, old_tss_base
+ eip_offset
, &tss_seg
.eip
,
3279 ldt_sel_offset
- eip_offset
);
3280 if (ret
!= X86EMUL_CONTINUE
)
3283 ret
= linear_read_system(ctxt
, new_tss_base
, &tss_seg
, sizeof(tss_seg
));
3284 if (ret
!= X86EMUL_CONTINUE
)
3287 if (old_tss_sel
!= 0xffff) {
3288 tss_seg
.prev_task_link
= old_tss_sel
;
3290 ret
= linear_write_system(ctxt
, new_tss_base
,
3291 &tss_seg
.prev_task_link
,
3292 sizeof(tss_seg
.prev_task_link
));
3293 if (ret
!= X86EMUL_CONTINUE
)
3297 return load_state_from_tss32(ctxt
, &tss_seg
);
3300 static int emulator_do_task_switch(struct x86_emulate_ctxt
*ctxt
,
3301 u16 tss_selector
, int idt_index
, int reason
,
3302 bool has_error_code
, u32 error_code
)
3304 const struct x86_emulate_ops
*ops
= ctxt
->ops
;
3305 struct desc_struct curr_tss_desc
, next_tss_desc
;
3307 u16 old_tss_sel
= get_segment_selector(ctxt
, VCPU_SREG_TR
);
3308 ulong old_tss_base
=
3309 ops
->get_cached_segment_base(ctxt
, VCPU_SREG_TR
);
3311 ulong desc_addr
, dr7
;
3313 /* FIXME: old_tss_base == ~0 ? */
3315 ret
= read_segment_descriptor(ctxt
, tss_selector
, &next_tss_desc
, &desc_addr
);
3316 if (ret
!= X86EMUL_CONTINUE
)
3318 ret
= read_segment_descriptor(ctxt
, old_tss_sel
, &curr_tss_desc
, &desc_addr
);
3319 if (ret
!= X86EMUL_CONTINUE
)
3322 /* FIXME: check that next_tss_desc is tss */
3325 * Check privileges. The three cases are task switch caused by...
3327 * 1. jmp/call/int to task gate: Check against DPL of the task gate
3328 * 2. Exception/IRQ/iret: No check is performed
3329 * 3. jmp/call to TSS/task-gate: No check is performed since the
3330 * hardware checks it before exiting.
3332 if (reason
== TASK_SWITCH_GATE
) {
3333 if (idt_index
!= -1) {
3334 /* Software interrupts */
3335 struct desc_struct task_gate_desc
;
3338 ret
= read_interrupt_descriptor(ctxt
, idt_index
,
3340 if (ret
!= X86EMUL_CONTINUE
)
3343 dpl
= task_gate_desc
.dpl
;
3344 if ((tss_selector
& 3) > dpl
|| ops
->cpl(ctxt
) > dpl
)
3345 return emulate_gp(ctxt
, (idt_index
<< 3) | 0x2);
3349 desc_limit
= desc_limit_scaled(&next_tss_desc
);
3350 if (!next_tss_desc
.p
||
3351 ((desc_limit
< 0x67 && (next_tss_desc
.type
& 8)) ||
3352 desc_limit
< 0x2b)) {
3353 return emulate_ts(ctxt
, tss_selector
& 0xfffc);
3356 if (reason
== TASK_SWITCH_IRET
|| reason
== TASK_SWITCH_JMP
) {
3357 curr_tss_desc
.type
&= ~(1 << 1); /* clear busy flag */
3358 write_segment_descriptor(ctxt
, old_tss_sel
, &curr_tss_desc
);
3361 if (reason
== TASK_SWITCH_IRET
)
3362 ctxt
->eflags
= ctxt
->eflags
& ~X86_EFLAGS_NT
;
3364 /* set back link to prev task only if NT bit is set in eflags
3365 note that old_tss_sel is not used after this point */
3366 if (reason
!= TASK_SWITCH_CALL
&& reason
!= TASK_SWITCH_GATE
)
3367 old_tss_sel
= 0xffff;
3369 if (next_tss_desc
.type
& 8)
3370 ret
= task_switch_32(ctxt
, tss_selector
, old_tss_sel
,
3371 old_tss_base
, &next_tss_desc
);
3373 ret
= task_switch_16(ctxt
, tss_selector
, old_tss_sel
,
3374 old_tss_base
, &next_tss_desc
);
3375 if (ret
!= X86EMUL_CONTINUE
)
3378 if (reason
== TASK_SWITCH_CALL
|| reason
== TASK_SWITCH_GATE
)
3379 ctxt
->eflags
= ctxt
->eflags
| X86_EFLAGS_NT
;
3381 if (reason
!= TASK_SWITCH_IRET
) {
3382 next_tss_desc
.type
|= (1 << 1); /* set busy flag */
3383 write_segment_descriptor(ctxt
, tss_selector
, &next_tss_desc
);
3386 ops
->set_cr(ctxt
, 0, ops
->get_cr(ctxt
, 0) | X86_CR0_TS
);
3387 ops
->set_segment(ctxt
, tss_selector
, &next_tss_desc
, 0, VCPU_SREG_TR
);
3389 if (has_error_code
) {
3390 ctxt
->op_bytes
= ctxt
->ad_bytes
= (next_tss_desc
.type
& 8) ? 4 : 2;
3391 ctxt
->lock_prefix
= 0;
3392 ctxt
->src
.val
= (unsigned long) error_code
;
3393 ret
= em_push(ctxt
);
3396 ops
->get_dr(ctxt
, 7, &dr7
);
3397 ops
->set_dr(ctxt
, 7, dr7
& ~(DR_LOCAL_ENABLE_MASK
| DR_LOCAL_SLOWDOWN
));
3402 int emulator_task_switch(struct x86_emulate_ctxt
*ctxt
,
3403 u16 tss_selector
, int idt_index
, int reason
,
3404 bool has_error_code
, u32 error_code
)
3408 invalidate_registers(ctxt
);
3409 ctxt
->_eip
= ctxt
->eip
;
3410 ctxt
->dst
.type
= OP_NONE
;
3412 rc
= emulator_do_task_switch(ctxt
, tss_selector
, idt_index
, reason
,
3413 has_error_code
, error_code
);
3415 if (rc
== X86EMUL_CONTINUE
) {
3416 ctxt
->eip
= ctxt
->_eip
;
3417 writeback_registers(ctxt
);
3420 return (rc
== X86EMUL_UNHANDLEABLE
) ? EMULATION_FAILED
: EMULATION_OK
;
3423 static void string_addr_inc(struct x86_emulate_ctxt
*ctxt
, int reg
,
3426 int df
= (ctxt
->eflags
& X86_EFLAGS_DF
) ? -op
->count
: op
->count
;
3428 register_address_increment(ctxt
, reg
, df
* op
->bytes
);
3429 op
->addr
.mem
.ea
= register_address(ctxt
, reg
);
3432 static int em_das(struct x86_emulate_ctxt
*ctxt
)
3435 bool af
, cf
, old_cf
;
3437 cf
= ctxt
->eflags
& X86_EFLAGS_CF
;
3443 af
= ctxt
->eflags
& X86_EFLAGS_AF
;
3444 if ((al
& 0x0f) > 9 || af
) {
3446 cf
= old_cf
| (al
>= 250);
3451 if (old_al
> 0x99 || old_cf
) {
3457 /* Set PF, ZF, SF */
3458 ctxt
->src
.type
= OP_IMM
;
3460 ctxt
->src
.bytes
= 1;
3461 fastop(ctxt
, em_or
);
3462 ctxt
->eflags
&= ~(X86_EFLAGS_AF
| X86_EFLAGS_CF
);
3464 ctxt
->eflags
|= X86_EFLAGS_CF
;
3466 ctxt
->eflags
|= X86_EFLAGS_AF
;
3467 return X86EMUL_CONTINUE
;
3470 static int em_aam(struct x86_emulate_ctxt
*ctxt
)
3474 if (ctxt
->src
.val
== 0)
3475 return emulate_de(ctxt
);
3477 al
= ctxt
->dst
.val
& 0xff;
3478 ah
= al
/ ctxt
->src
.val
;
3479 al
%= ctxt
->src
.val
;
3481 ctxt
->dst
.val
= (ctxt
->dst
.val
& 0xffff0000) | al
| (ah
<< 8);
3483 /* Set PF, ZF, SF */
3484 ctxt
->src
.type
= OP_IMM
;
3486 ctxt
->src
.bytes
= 1;
3487 fastop(ctxt
, em_or
);
3489 return X86EMUL_CONTINUE
;
3492 static int em_aad(struct x86_emulate_ctxt
*ctxt
)
3494 u8 al
= ctxt
->dst
.val
& 0xff;
3495 u8 ah
= (ctxt
->dst
.val
>> 8) & 0xff;
3497 al
= (al
+ (ah
* ctxt
->src
.val
)) & 0xff;
3499 ctxt
->dst
.val
= (ctxt
->dst
.val
& 0xffff0000) | al
;
3501 /* Set PF, ZF, SF */
3502 ctxt
->src
.type
= OP_IMM
;
3504 ctxt
->src
.bytes
= 1;
3505 fastop(ctxt
, em_or
);
3507 return X86EMUL_CONTINUE
;
3510 static int em_call(struct x86_emulate_ctxt
*ctxt
)
3513 long rel
= ctxt
->src
.val
;
3515 ctxt
->src
.val
= (unsigned long)ctxt
->_eip
;
3516 rc
= jmp_rel(ctxt
, rel
);
3517 if (rc
!= X86EMUL_CONTINUE
)
3519 return em_push(ctxt
);
3522 static int em_call_far(struct x86_emulate_ctxt
*ctxt
)
3527 struct desc_struct old_desc
, new_desc
;
3528 const struct x86_emulate_ops
*ops
= ctxt
->ops
;
3529 int cpl
= ctxt
->ops
->cpl(ctxt
);
3530 enum x86emul_mode prev_mode
= ctxt
->mode
;
3532 old_eip
= ctxt
->_eip
;
3533 ops
->get_segment(ctxt
, &old_cs
, &old_desc
, NULL
, VCPU_SREG_CS
);
3535 memcpy(&sel
, ctxt
->src
.valptr
+ ctxt
->op_bytes
, 2);
3536 rc
= __load_segment_descriptor(ctxt
, sel
, VCPU_SREG_CS
, cpl
,
3537 X86_TRANSFER_CALL_JMP
, &new_desc
);
3538 if (rc
!= X86EMUL_CONTINUE
)
3541 rc
= assign_eip_far(ctxt
, ctxt
->src
.val
, &new_desc
);
3542 if (rc
!= X86EMUL_CONTINUE
)
3545 ctxt
->src
.val
= old_cs
;
3547 if (rc
!= X86EMUL_CONTINUE
)
3550 ctxt
->src
.val
= old_eip
;
3552 /* If we failed, we tainted the memory, but the very least we should
3554 if (rc
!= X86EMUL_CONTINUE
) {
3555 pr_warn_once("faulting far call emulation tainted memory\n");
3560 ops
->set_segment(ctxt
, old_cs
, &old_desc
, 0, VCPU_SREG_CS
);
3561 ctxt
->mode
= prev_mode
;
3566 static int em_ret_near_imm(struct x86_emulate_ctxt
*ctxt
)
3571 rc
= emulate_pop(ctxt
, &eip
, ctxt
->op_bytes
);
3572 if (rc
!= X86EMUL_CONTINUE
)
3574 rc
= assign_eip_near(ctxt
, eip
);
3575 if (rc
!= X86EMUL_CONTINUE
)
3577 rsp_increment(ctxt
, ctxt
->src
.val
);
3578 return X86EMUL_CONTINUE
;
3581 static int em_xchg(struct x86_emulate_ctxt
*ctxt
)
3583 /* Write back the register source. */
3584 ctxt
->src
.val
= ctxt
->dst
.val
;
3585 write_register_operand(&ctxt
->src
);
3587 /* Write back the memory destination with implicit LOCK prefix. */
3588 ctxt
->dst
.val
= ctxt
->src
.orig_val
;
3589 ctxt
->lock_prefix
= 1;
3590 return X86EMUL_CONTINUE
;
3593 static int em_imul_3op(struct x86_emulate_ctxt
*ctxt
)
3595 ctxt
->dst
.val
= ctxt
->src2
.val
;
3596 return fastop(ctxt
, em_imul
);
3599 static int em_cwd(struct x86_emulate_ctxt
*ctxt
)
3601 ctxt
->dst
.type
= OP_REG
;
3602 ctxt
->dst
.bytes
= ctxt
->src
.bytes
;
3603 ctxt
->dst
.addr
.reg
= reg_rmw(ctxt
, VCPU_REGS_RDX
);
3604 ctxt
->dst
.val
= ~((ctxt
->src
.val
>> (ctxt
->src
.bytes
* 8 - 1)) - 1);
3606 return X86EMUL_CONTINUE
;
3609 static int em_rdpid(struct x86_emulate_ctxt
*ctxt
)
3613 if (ctxt
->ops
->get_msr(ctxt
, MSR_TSC_AUX
, &tsc_aux
))
3614 return emulate_gp(ctxt
, 0);
3615 ctxt
->dst
.val
= tsc_aux
;
3616 return X86EMUL_CONTINUE
;
3619 static int em_rdtsc(struct x86_emulate_ctxt
*ctxt
)
3623 ctxt
->ops
->get_msr(ctxt
, MSR_IA32_TSC
, &tsc
);
3624 *reg_write(ctxt
, VCPU_REGS_RAX
) = (u32
)tsc
;
3625 *reg_write(ctxt
, VCPU_REGS_RDX
) = tsc
>> 32;
3626 return X86EMUL_CONTINUE
;
3629 static int em_rdpmc(struct x86_emulate_ctxt
*ctxt
)
3633 if (ctxt
->ops
->read_pmc(ctxt
, reg_read(ctxt
, VCPU_REGS_RCX
), &pmc
))
3634 return emulate_gp(ctxt
, 0);
3635 *reg_write(ctxt
, VCPU_REGS_RAX
) = (u32
)pmc
;
3636 *reg_write(ctxt
, VCPU_REGS_RDX
) = pmc
>> 32;
3637 return X86EMUL_CONTINUE
;
3640 static int em_mov(struct x86_emulate_ctxt
*ctxt
)
3642 memcpy(ctxt
->dst
.valptr
, ctxt
->src
.valptr
, sizeof(ctxt
->src
.valptr
));
3643 return X86EMUL_CONTINUE
;
3646 static int em_movbe(struct x86_emulate_ctxt
*ctxt
)
3650 if (!ctxt
->ops
->guest_has_movbe(ctxt
))
3651 return emulate_ud(ctxt
);
3653 switch (ctxt
->op_bytes
) {
3656 * From MOVBE definition: "...When the operand size is 16 bits,
3657 * the upper word of the destination register remains unchanged
3660 * Both casting ->valptr and ->val to u16 breaks strict aliasing
3661 * rules so we have to do the operation almost per hand.
3663 tmp
= (u16
)ctxt
->src
.val
;
3664 ctxt
->dst
.val
&= ~0xffffUL
;
3665 ctxt
->dst
.val
|= (unsigned long)swab16(tmp
);
3668 ctxt
->dst
.val
= swab32((u32
)ctxt
->src
.val
);
3671 ctxt
->dst
.val
= swab64(ctxt
->src
.val
);
3676 return X86EMUL_CONTINUE
;
3679 static int em_cr_write(struct x86_emulate_ctxt
*ctxt
)
3681 if (ctxt
->ops
->set_cr(ctxt
, ctxt
->modrm_reg
, ctxt
->src
.val
))
3682 return emulate_gp(ctxt
, 0);
3684 /* Disable writeback. */
3685 ctxt
->dst
.type
= OP_NONE
;
3686 return X86EMUL_CONTINUE
;
3689 static int em_dr_write(struct x86_emulate_ctxt
*ctxt
)
3693 if (ctxt
->mode
== X86EMUL_MODE_PROT64
)
3694 val
= ctxt
->src
.val
& ~0ULL;
3696 val
= ctxt
->src
.val
& ~0U;
3698 /* #UD condition is already handled. */
3699 if (ctxt
->ops
->set_dr(ctxt
, ctxt
->modrm_reg
, val
) < 0)
3700 return emulate_gp(ctxt
, 0);
3702 /* Disable writeback. */
3703 ctxt
->dst
.type
= OP_NONE
;
3704 return X86EMUL_CONTINUE
;
3707 static int em_wrmsr(struct x86_emulate_ctxt
*ctxt
)
3711 msr_data
= (u32
)reg_read(ctxt
, VCPU_REGS_RAX
)
3712 | ((u64
)reg_read(ctxt
, VCPU_REGS_RDX
) << 32);
3713 if (ctxt
->ops
->set_msr(ctxt
, reg_read(ctxt
, VCPU_REGS_RCX
), msr_data
))
3714 return emulate_gp(ctxt
, 0);
3716 return X86EMUL_CONTINUE
;
3719 static int em_rdmsr(struct x86_emulate_ctxt
*ctxt
)
3723 if (ctxt
->ops
->get_msr(ctxt
, reg_read(ctxt
, VCPU_REGS_RCX
), &msr_data
))
3724 return emulate_gp(ctxt
, 0);
3726 *reg_write(ctxt
, VCPU_REGS_RAX
) = (u32
)msr_data
;
3727 *reg_write(ctxt
, VCPU_REGS_RDX
) = msr_data
>> 32;
3728 return X86EMUL_CONTINUE
;
3731 static int em_store_sreg(struct x86_emulate_ctxt
*ctxt
, int segment
)
3733 if (segment
> VCPU_SREG_GS
&&
3734 (ctxt
->ops
->get_cr(ctxt
, 4) & X86_CR4_UMIP
) &&
3735 ctxt
->ops
->cpl(ctxt
) > 0)
3736 return emulate_gp(ctxt
, 0);
3738 ctxt
->dst
.val
= get_segment_selector(ctxt
, segment
);
3739 if (ctxt
->dst
.bytes
== 4 && ctxt
->dst
.type
== OP_MEM
)
3740 ctxt
->dst
.bytes
= 2;
3741 return X86EMUL_CONTINUE
;
3744 static int em_mov_rm_sreg(struct x86_emulate_ctxt
*ctxt
)
3746 if (ctxt
->modrm_reg
> VCPU_SREG_GS
)
3747 return emulate_ud(ctxt
);
3749 return em_store_sreg(ctxt
, ctxt
->modrm_reg
);
3752 static int em_mov_sreg_rm(struct x86_emulate_ctxt
*ctxt
)
3754 u16 sel
= ctxt
->src
.val
;
3756 if (ctxt
->modrm_reg
== VCPU_SREG_CS
|| ctxt
->modrm_reg
> VCPU_SREG_GS
)
3757 return emulate_ud(ctxt
);
3759 if (ctxt
->modrm_reg
== VCPU_SREG_SS
)
3760 ctxt
->interruptibility
= KVM_X86_SHADOW_INT_MOV_SS
;
3762 /* Disable writeback. */
3763 ctxt
->dst
.type
= OP_NONE
;
3764 return load_segment_descriptor(ctxt
, sel
, ctxt
->modrm_reg
);
3767 static int em_sldt(struct x86_emulate_ctxt
*ctxt
)
3769 return em_store_sreg(ctxt
, VCPU_SREG_LDTR
);
3772 static int em_lldt(struct x86_emulate_ctxt
*ctxt
)
3774 u16 sel
= ctxt
->src
.val
;
3776 /* Disable writeback. */
3777 ctxt
->dst
.type
= OP_NONE
;
3778 return load_segment_descriptor(ctxt
, sel
, VCPU_SREG_LDTR
);
3781 static int em_str(struct x86_emulate_ctxt
*ctxt
)
3783 return em_store_sreg(ctxt
, VCPU_SREG_TR
);
3786 static int em_ltr(struct x86_emulate_ctxt
*ctxt
)
3788 u16 sel
= ctxt
->src
.val
;
3790 /* Disable writeback. */
3791 ctxt
->dst
.type
= OP_NONE
;
3792 return load_segment_descriptor(ctxt
, sel
, VCPU_SREG_TR
);
3795 static int em_invlpg(struct x86_emulate_ctxt
*ctxt
)
3800 rc
= linearize(ctxt
, ctxt
->src
.addr
.mem
, 1, false, &linear
);
3801 if (rc
== X86EMUL_CONTINUE
)
3802 ctxt
->ops
->invlpg(ctxt
, linear
);
3803 /* Disable writeback. */
3804 ctxt
->dst
.type
= OP_NONE
;
3805 return X86EMUL_CONTINUE
;
3808 static int em_clts(struct x86_emulate_ctxt
*ctxt
)
3812 cr0
= ctxt
->ops
->get_cr(ctxt
, 0);
3814 ctxt
->ops
->set_cr(ctxt
, 0, cr0
);
3815 return X86EMUL_CONTINUE
;
3818 static int em_hypercall(struct x86_emulate_ctxt
*ctxt
)
3820 int rc
= ctxt
->ops
->fix_hypercall(ctxt
);
3822 if (rc
!= X86EMUL_CONTINUE
)
3825 /* Let the processor re-execute the fixed hypercall */
3826 ctxt
->_eip
= ctxt
->eip
;
3827 /* Disable writeback. */
3828 ctxt
->dst
.type
= OP_NONE
;
3829 return X86EMUL_CONTINUE
;
3832 static int emulate_store_desc_ptr(struct x86_emulate_ctxt
*ctxt
,
3833 void (*get
)(struct x86_emulate_ctxt
*ctxt
,
3834 struct desc_ptr
*ptr
))
3836 struct desc_ptr desc_ptr
;
3838 if ((ctxt
->ops
->get_cr(ctxt
, 4) & X86_CR4_UMIP
) &&
3839 ctxt
->ops
->cpl(ctxt
) > 0)
3840 return emulate_gp(ctxt
, 0);
3842 if (ctxt
->mode
== X86EMUL_MODE_PROT64
)
3844 get(ctxt
, &desc_ptr
);
3845 if (ctxt
->op_bytes
== 2) {
3847 desc_ptr
.address
&= 0x00ffffff;
3849 /* Disable writeback. */
3850 ctxt
->dst
.type
= OP_NONE
;
3851 return segmented_write_std(ctxt
, ctxt
->dst
.addr
.mem
,
3852 &desc_ptr
, 2 + ctxt
->op_bytes
);
3855 static int em_sgdt(struct x86_emulate_ctxt
*ctxt
)
3857 return emulate_store_desc_ptr(ctxt
, ctxt
->ops
->get_gdt
);
3860 static int em_sidt(struct x86_emulate_ctxt
*ctxt
)
3862 return emulate_store_desc_ptr(ctxt
, ctxt
->ops
->get_idt
);
3865 static int em_lgdt_lidt(struct x86_emulate_ctxt
*ctxt
, bool lgdt
)
3867 struct desc_ptr desc_ptr
;
3870 if (ctxt
->mode
== X86EMUL_MODE_PROT64
)
3872 rc
= read_descriptor(ctxt
, ctxt
->src
.addr
.mem
,
3873 &desc_ptr
.size
, &desc_ptr
.address
,
3875 if (rc
!= X86EMUL_CONTINUE
)
3877 if (ctxt
->mode
== X86EMUL_MODE_PROT64
&&
3878 emul_is_noncanonical_address(desc_ptr
.address
, ctxt
))
3879 return emulate_gp(ctxt
, 0);
3881 ctxt
->ops
->set_gdt(ctxt
, &desc_ptr
);
3883 ctxt
->ops
->set_idt(ctxt
, &desc_ptr
);
3884 /* Disable writeback. */
3885 ctxt
->dst
.type
= OP_NONE
;
3886 return X86EMUL_CONTINUE
;
3889 static int em_lgdt(struct x86_emulate_ctxt
*ctxt
)
3891 return em_lgdt_lidt(ctxt
, true);
3894 static int em_lidt(struct x86_emulate_ctxt
*ctxt
)
3896 return em_lgdt_lidt(ctxt
, false);
3899 static int em_smsw(struct x86_emulate_ctxt
*ctxt
)
3901 if ((ctxt
->ops
->get_cr(ctxt
, 4) & X86_CR4_UMIP
) &&
3902 ctxt
->ops
->cpl(ctxt
) > 0)
3903 return emulate_gp(ctxt
, 0);
3905 if (ctxt
->dst
.type
== OP_MEM
)
3906 ctxt
->dst
.bytes
= 2;
3907 ctxt
->dst
.val
= ctxt
->ops
->get_cr(ctxt
, 0);
3908 return X86EMUL_CONTINUE
;
3911 static int em_lmsw(struct x86_emulate_ctxt
*ctxt
)
3913 ctxt
->ops
->set_cr(ctxt
, 0, (ctxt
->ops
->get_cr(ctxt
, 0) & ~0x0eul
)
3914 | (ctxt
->src
.val
& 0x0f));
3915 ctxt
->dst
.type
= OP_NONE
;
3916 return X86EMUL_CONTINUE
;
3919 static int em_loop(struct x86_emulate_ctxt
*ctxt
)
3921 int rc
= X86EMUL_CONTINUE
;
3923 register_address_increment(ctxt
, VCPU_REGS_RCX
, -1);
3924 if ((address_mask(ctxt
, reg_read(ctxt
, VCPU_REGS_RCX
)) != 0) &&
3925 (ctxt
->b
== 0xe2 || test_cc(ctxt
->b
^ 0x5, ctxt
->eflags
)))
3926 rc
= jmp_rel(ctxt
, ctxt
->src
.val
);
3931 static int em_jcxz(struct x86_emulate_ctxt
*ctxt
)
3933 int rc
= X86EMUL_CONTINUE
;
3935 if (address_mask(ctxt
, reg_read(ctxt
, VCPU_REGS_RCX
)) == 0)
3936 rc
= jmp_rel(ctxt
, ctxt
->src
.val
);
3941 static int em_in(struct x86_emulate_ctxt
*ctxt
)
3943 if (!pio_in_emulated(ctxt
, ctxt
->dst
.bytes
, ctxt
->src
.val
,
3945 return X86EMUL_IO_NEEDED
;
3947 return X86EMUL_CONTINUE
;
3950 static int em_out(struct x86_emulate_ctxt
*ctxt
)
3952 ctxt
->ops
->pio_out_emulated(ctxt
, ctxt
->src
.bytes
, ctxt
->dst
.val
,
3954 /* Disable writeback. */
3955 ctxt
->dst
.type
= OP_NONE
;
3956 return X86EMUL_CONTINUE
;
3959 static int em_cli(struct x86_emulate_ctxt
*ctxt
)
3961 if (emulator_bad_iopl(ctxt
))
3962 return emulate_gp(ctxt
, 0);
3964 ctxt
->eflags
&= ~X86_EFLAGS_IF
;
3965 return X86EMUL_CONTINUE
;
3968 static int em_sti(struct x86_emulate_ctxt
*ctxt
)
3970 if (emulator_bad_iopl(ctxt
))
3971 return emulate_gp(ctxt
, 0);
3973 ctxt
->interruptibility
= KVM_X86_SHADOW_INT_STI
;
3974 ctxt
->eflags
|= X86_EFLAGS_IF
;
3975 return X86EMUL_CONTINUE
;
3978 static int em_cpuid(struct x86_emulate_ctxt
*ctxt
)
3980 u32 eax
, ebx
, ecx
, edx
;
3983 ctxt
->ops
->get_msr(ctxt
, MSR_MISC_FEATURES_ENABLES
, &msr
);
3984 if (msr
& MSR_MISC_FEATURES_ENABLES_CPUID_FAULT
&&
3985 ctxt
->ops
->cpl(ctxt
)) {
3986 return emulate_gp(ctxt
, 0);
3989 eax
= reg_read(ctxt
, VCPU_REGS_RAX
);
3990 ecx
= reg_read(ctxt
, VCPU_REGS_RCX
);
3991 ctxt
->ops
->get_cpuid(ctxt
, &eax
, &ebx
, &ecx
, &edx
, true);
3992 *reg_write(ctxt
, VCPU_REGS_RAX
) = eax
;
3993 *reg_write(ctxt
, VCPU_REGS_RBX
) = ebx
;
3994 *reg_write(ctxt
, VCPU_REGS_RCX
) = ecx
;
3995 *reg_write(ctxt
, VCPU_REGS_RDX
) = edx
;
3996 return X86EMUL_CONTINUE
;
3999 static int em_sahf(struct x86_emulate_ctxt
*ctxt
)
4003 flags
= X86_EFLAGS_CF
| X86_EFLAGS_PF
| X86_EFLAGS_AF
| X86_EFLAGS_ZF
|
4005 flags
&= *reg_rmw(ctxt
, VCPU_REGS_RAX
) >> 8;
4007 ctxt
->eflags
&= ~0xffUL
;
4008 ctxt
->eflags
|= flags
| X86_EFLAGS_FIXED
;
4009 return X86EMUL_CONTINUE
;
4012 static int em_lahf(struct x86_emulate_ctxt
*ctxt
)
4014 *reg_rmw(ctxt
, VCPU_REGS_RAX
) &= ~0xff00UL
;
4015 *reg_rmw(ctxt
, VCPU_REGS_RAX
) |= (ctxt
->eflags
& 0xff) << 8;
4016 return X86EMUL_CONTINUE
;
4019 static int em_bswap(struct x86_emulate_ctxt
*ctxt
)
4021 switch (ctxt
->op_bytes
) {
4022 #ifdef CONFIG_X86_64
4024 asm("bswap %0" : "+r"(ctxt
->dst
.val
));
4028 asm("bswap %0" : "+r"(*(u32
*)&ctxt
->dst
.val
));
4031 return X86EMUL_CONTINUE
;
4034 static int em_clflush(struct x86_emulate_ctxt
*ctxt
)
4036 /* emulating clflush regardless of cpuid */
4037 return X86EMUL_CONTINUE
;
4040 static int em_movsxd(struct x86_emulate_ctxt
*ctxt
)
4042 ctxt
->dst
.val
= (s32
) ctxt
->src
.val
;
4043 return X86EMUL_CONTINUE
;
4046 static int check_fxsr(struct x86_emulate_ctxt
*ctxt
)
4048 if (!ctxt
->ops
->guest_has_fxsr(ctxt
))
4049 return emulate_ud(ctxt
);
4051 if (ctxt
->ops
->get_cr(ctxt
, 0) & (X86_CR0_TS
| X86_CR0_EM
))
4052 return emulate_nm(ctxt
);
4055 * Don't emulate a case that should never be hit, instead of working
4056 * around a lack of fxsave64/fxrstor64 on old compilers.
4058 if (ctxt
->mode
>= X86EMUL_MODE_PROT64
)
4059 return X86EMUL_UNHANDLEABLE
;
4061 return X86EMUL_CONTINUE
;
4065 * Hardware doesn't save and restore XMM 0-7 without CR4.OSFXSR, but does save
4066 * and restore MXCSR.
4068 static size_t __fxstate_size(int nregs
)
4070 return offsetof(struct fxregs_state
, xmm_space
[0]) + nregs
* 16;
4073 static inline size_t fxstate_size(struct x86_emulate_ctxt
*ctxt
)
4076 if (ctxt
->mode
== X86EMUL_MODE_PROT64
)
4077 return __fxstate_size(16);
4079 cr4_osfxsr
= ctxt
->ops
->get_cr(ctxt
, 4) & X86_CR4_OSFXSR
;
4080 return __fxstate_size(cr4_osfxsr
? 8 : 0);
4084 * FXSAVE and FXRSTOR have 4 different formats depending on execution mode,
4087 * - like (1), but FIP and FDP (foo) are only 16 bit. At least Intel CPUs
4088 * preserve whole 32 bit values, though, so (1) and (2) are the same wrt.
4090 * 3) 64-bit mode with REX.W prefix
4091 * - like (2), but XMM 8-15 are being saved and restored
4092 * 4) 64-bit mode without REX.W prefix
4093 * - like (3), but FIP and FDP are 64 bit
4095 * Emulation uses (3) for (1) and (2) and preserves XMM 8-15 to reach the
4096 * desired result. (4) is not emulated.
4098 * Note: Guest and host CPUID.(EAX=07H,ECX=0H):EBX[bit 13] (deprecate FPU CS
4099 * and FPU DS) should match.
4101 static int em_fxsave(struct x86_emulate_ctxt
*ctxt
)
4103 struct fxregs_state fx_state
;
4106 rc
= check_fxsr(ctxt
);
4107 if (rc
!= X86EMUL_CONTINUE
)
4112 rc
= asm_safe("fxsave %[fx]", , [fx
] "+m"(fx_state
));
4116 if (rc
!= X86EMUL_CONTINUE
)
4119 return segmented_write_std(ctxt
, ctxt
->memop
.addr
.mem
, &fx_state
,
4120 fxstate_size(ctxt
));
4124 * FXRSTOR might restore XMM registers not provided by the guest. Fill
4125 * in the host registers (via FXSAVE) instead, so they won't be modified.
4126 * (preemption has to stay disabled until FXRSTOR).
4128 * Use noinline to keep the stack for other functions called by callers small.
4130 static noinline
int fxregs_fixup(struct fxregs_state
*fx_state
,
4131 const size_t used_size
)
4133 struct fxregs_state fx_tmp
;
4136 rc
= asm_safe("fxsave %[fx]", , [fx
] "+m"(fx_tmp
));
4137 memcpy((void *)fx_state
+ used_size
, (void *)&fx_tmp
+ used_size
,
4138 __fxstate_size(16) - used_size
);
4143 static int em_fxrstor(struct x86_emulate_ctxt
*ctxt
)
4145 struct fxregs_state fx_state
;
4149 rc
= check_fxsr(ctxt
);
4150 if (rc
!= X86EMUL_CONTINUE
)
4153 size
= fxstate_size(ctxt
);
4154 rc
= segmented_read_std(ctxt
, ctxt
->memop
.addr
.mem
, &fx_state
, size
);
4155 if (rc
!= X86EMUL_CONTINUE
)
4160 if (size
< __fxstate_size(16)) {
4161 rc
= fxregs_fixup(&fx_state
, size
);
4162 if (rc
!= X86EMUL_CONTINUE
)
4166 if (fx_state
.mxcsr
>> 16) {
4167 rc
= emulate_gp(ctxt
, 0);
4171 if (rc
== X86EMUL_CONTINUE
)
4172 rc
= asm_safe("fxrstor %[fx]", : [fx
] "m"(fx_state
));
4180 static int em_xsetbv(struct x86_emulate_ctxt
*ctxt
)
4184 eax
= reg_read(ctxt
, VCPU_REGS_RAX
);
4185 edx
= reg_read(ctxt
, VCPU_REGS_RDX
);
4186 ecx
= reg_read(ctxt
, VCPU_REGS_RCX
);
4188 if (ctxt
->ops
->set_xcr(ctxt
, ecx
, ((u64
)edx
<< 32) | eax
))
4189 return emulate_gp(ctxt
, 0);
4191 return X86EMUL_CONTINUE
;
4194 static bool valid_cr(int nr
)
4206 static int check_cr_read(struct x86_emulate_ctxt
*ctxt
)
4208 if (!valid_cr(ctxt
->modrm_reg
))
4209 return emulate_ud(ctxt
);
4211 return X86EMUL_CONTINUE
;
4214 static int check_cr_write(struct x86_emulate_ctxt
*ctxt
)
4216 u64 new_val
= ctxt
->src
.val64
;
4217 int cr
= ctxt
->modrm_reg
;
4220 static u64 cr_reserved_bits
[] = {
4221 0xffffffff00000000ULL
,
4222 0, 0, 0, /* CR3 checked later */
4229 return emulate_ud(ctxt
);
4231 if (new_val
& cr_reserved_bits
[cr
])
4232 return emulate_gp(ctxt
, 0);
4237 if (((new_val
& X86_CR0_PG
) && !(new_val
& X86_CR0_PE
)) ||
4238 ((new_val
& X86_CR0_NW
) && !(new_val
& X86_CR0_CD
)))
4239 return emulate_gp(ctxt
, 0);
4241 cr4
= ctxt
->ops
->get_cr(ctxt
, 4);
4242 ctxt
->ops
->get_msr(ctxt
, MSR_EFER
, &efer
);
4244 if ((new_val
& X86_CR0_PG
) && (efer
& EFER_LME
) &&
4245 !(cr4
& X86_CR4_PAE
))
4246 return emulate_gp(ctxt
, 0);
4253 ctxt
->ops
->get_msr(ctxt
, MSR_EFER
, &efer
);
4254 if (efer
& EFER_LMA
) {
4256 u32 eax
, ebx
, ecx
, edx
;
4260 if (ctxt
->ops
->get_cpuid(ctxt
, &eax
, &ebx
, &ecx
,
4262 maxphyaddr
= eax
& 0xff;
4265 rsvd
= rsvd_bits(maxphyaddr
, 63);
4266 if (ctxt
->ops
->get_cr(ctxt
, 4) & X86_CR4_PCIDE
)
4267 rsvd
&= ~X86_CR3_PCID_NOFLUSH
;
4271 return emulate_gp(ctxt
, 0);
4276 ctxt
->ops
->get_msr(ctxt
, MSR_EFER
, &efer
);
4278 if ((efer
& EFER_LMA
) && !(new_val
& X86_CR4_PAE
))
4279 return emulate_gp(ctxt
, 0);
4285 return X86EMUL_CONTINUE
;
4288 static int check_dr7_gd(struct x86_emulate_ctxt
*ctxt
)
4292 ctxt
->ops
->get_dr(ctxt
, 7, &dr7
);
4294 /* Check if DR7.Global_Enable is set */
4295 return dr7
& (1 << 13);
4298 static int check_dr_read(struct x86_emulate_ctxt
*ctxt
)
4300 int dr
= ctxt
->modrm_reg
;
4304 return emulate_ud(ctxt
);
4306 cr4
= ctxt
->ops
->get_cr(ctxt
, 4);
4307 if ((cr4
& X86_CR4_DE
) && (dr
== 4 || dr
== 5))
4308 return emulate_ud(ctxt
);
4310 if (check_dr7_gd(ctxt
)) {
4313 ctxt
->ops
->get_dr(ctxt
, 6, &dr6
);
4314 dr6
&= ~DR_TRAP_BITS
;
4315 dr6
|= DR6_BD
| DR6_RTM
;
4316 ctxt
->ops
->set_dr(ctxt
, 6, dr6
);
4317 return emulate_db(ctxt
);
4320 return X86EMUL_CONTINUE
;
4323 static int check_dr_write(struct x86_emulate_ctxt
*ctxt
)
4325 u64 new_val
= ctxt
->src
.val64
;
4326 int dr
= ctxt
->modrm_reg
;
4328 if ((dr
== 6 || dr
== 7) && (new_val
& 0xffffffff00000000ULL
))
4329 return emulate_gp(ctxt
, 0);
4331 return check_dr_read(ctxt
);
4334 static int check_svme(struct x86_emulate_ctxt
*ctxt
)
4338 ctxt
->ops
->get_msr(ctxt
, MSR_EFER
, &efer
);
4340 if (!(efer
& EFER_SVME
))
4341 return emulate_ud(ctxt
);
4343 return X86EMUL_CONTINUE
;
4346 static int check_svme_pa(struct x86_emulate_ctxt
*ctxt
)
4348 u64 rax
= reg_read(ctxt
, VCPU_REGS_RAX
);
4350 /* Valid physical address? */
4351 if (rax
& 0xffff000000000000ULL
)
4352 return emulate_gp(ctxt
, 0);
4354 return check_svme(ctxt
);
4357 static int check_rdtsc(struct x86_emulate_ctxt
*ctxt
)
4359 u64 cr4
= ctxt
->ops
->get_cr(ctxt
, 4);
4361 if (cr4
& X86_CR4_TSD
&& ctxt
->ops
->cpl(ctxt
))
4362 return emulate_ud(ctxt
);
4364 return X86EMUL_CONTINUE
;
4367 static int check_rdpmc(struct x86_emulate_ctxt
*ctxt
)
4369 u64 cr4
= ctxt
->ops
->get_cr(ctxt
, 4);
4370 u64 rcx
= reg_read(ctxt
, VCPU_REGS_RCX
);
4373 * VMware allows access to these Pseduo-PMCs even when read via RDPMC
4374 * in Ring3 when CR4.PCE=0.
4376 if (enable_vmware_backdoor
&& is_vmware_backdoor_pmc(rcx
))
4377 return X86EMUL_CONTINUE
;
4379 if ((!(cr4
& X86_CR4_PCE
) && ctxt
->ops
->cpl(ctxt
)) ||
4380 ctxt
->ops
->check_pmc(ctxt
, rcx
))
4381 return emulate_gp(ctxt
, 0);
4383 return X86EMUL_CONTINUE
;
4386 static int check_perm_in(struct x86_emulate_ctxt
*ctxt
)
4388 ctxt
->dst
.bytes
= min(ctxt
->dst
.bytes
, 4u);
4389 if (!emulator_io_permited(ctxt
, ctxt
->src
.val
, ctxt
->dst
.bytes
))
4390 return emulate_gp(ctxt
, 0);
4392 return X86EMUL_CONTINUE
;
4395 static int check_perm_out(struct x86_emulate_ctxt
*ctxt
)
4397 ctxt
->src
.bytes
= min(ctxt
->src
.bytes
, 4u);
4398 if (!emulator_io_permited(ctxt
, ctxt
->dst
.val
, ctxt
->src
.bytes
))
4399 return emulate_gp(ctxt
, 0);
4401 return X86EMUL_CONTINUE
;
4404 #define D(_y) { .flags = (_y) }
4405 #define DI(_y, _i) { .flags = (_y)|Intercept, .intercept = x86_intercept_##_i }
4406 #define DIP(_y, _i, _p) { .flags = (_y)|Intercept|CheckPerm, \
4407 .intercept = x86_intercept_##_i, .check_perm = (_p) }
4408 #define N D(NotImpl)
4409 #define EXT(_f, _e) { .flags = ((_f) | RMExt), .u.group = (_e) }
4410 #define G(_f, _g) { .flags = ((_f) | Group | ModRM), .u.group = (_g) }
4411 #define GD(_f, _g) { .flags = ((_f) | GroupDual | ModRM), .u.gdual = (_g) }
4412 #define ID(_f, _i) { .flags = ((_f) | InstrDual | ModRM), .u.idual = (_i) }
4413 #define MD(_f, _m) { .flags = ((_f) | ModeDual), .u.mdual = (_m) }
4414 #define E(_f, _e) { .flags = ((_f) | Escape | ModRM), .u.esc = (_e) }
4415 #define I(_f, _e) { .flags = (_f), .u.execute = (_e) }
4416 #define F(_f, _e) { .flags = (_f) | Fastop, .u.fastop = (_e) }
4417 #define II(_f, _e, _i) \
4418 { .flags = (_f)|Intercept, .u.execute = (_e), .intercept = x86_intercept_##_i }
4419 #define IIP(_f, _e, _i, _p) \
4420 { .flags = (_f)|Intercept|CheckPerm, .u.execute = (_e), \
4421 .intercept = x86_intercept_##_i, .check_perm = (_p) }
4422 #define GP(_f, _g) { .flags = ((_f) | Prefix), .u.gprefix = (_g) }
4424 #define D2bv(_f) D((_f) | ByteOp), D(_f)
4425 #define D2bvIP(_f, _i, _p) DIP((_f) | ByteOp, _i, _p), DIP(_f, _i, _p)
4426 #define I2bv(_f, _e) I((_f) | ByteOp, _e), I(_f, _e)
4427 #define F2bv(_f, _e) F((_f) | ByteOp, _e), F(_f, _e)
4428 #define I2bvIP(_f, _e, _i, _p) \
4429 IIP((_f) | ByteOp, _e, _i, _p), IIP(_f, _e, _i, _p)
4431 #define F6ALU(_f, _e) F2bv((_f) | DstMem | SrcReg | ModRM, _e), \
4432 F2bv(((_f) | DstReg | SrcMem | ModRM) & ~Lock, _e), \
4433 F2bv(((_f) & ~Lock) | DstAcc | SrcImm, _e)
4435 static const struct opcode group7_rm0
[] = {
4437 I(SrcNone
| Priv
| EmulateOnUD
, em_hypercall
),
4441 static const struct opcode group7_rm1
[] = {
4442 DI(SrcNone
| Priv
, monitor
),
4443 DI(SrcNone
| Priv
, mwait
),
4447 static const struct opcode group7_rm2
[] = {
4449 II(ImplicitOps
| Priv
, em_xsetbv
, xsetbv
),
4453 static const struct opcode group7_rm3
[] = {
4454 DIP(SrcNone
| Prot
| Priv
, vmrun
, check_svme_pa
),
4455 II(SrcNone
| Prot
| EmulateOnUD
, em_hypercall
, vmmcall
),
4456 DIP(SrcNone
| Prot
| Priv
, vmload
, check_svme_pa
),
4457 DIP(SrcNone
| Prot
| Priv
, vmsave
, check_svme_pa
),
4458 DIP(SrcNone
| Prot
| Priv
, stgi
, check_svme
),
4459 DIP(SrcNone
| Prot
| Priv
, clgi
, check_svme
),
4460 DIP(SrcNone
| Prot
| Priv
, skinit
, check_svme
),
4461 DIP(SrcNone
| Prot
| Priv
, invlpga
, check_svme
),
4464 static const struct opcode group7_rm7
[] = {
4466 DIP(SrcNone
, rdtscp
, check_rdtsc
),
4470 static const struct opcode group1
[] = {
4472 F(Lock
| PageTable
, em_or
),
4475 F(Lock
| PageTable
, em_and
),
4481 static const struct opcode group1A
[] = {
4482 I(DstMem
| SrcNone
| Mov
| Stack
| IncSP
| TwoMemOp
, em_pop
), N
, N
, N
, N
, N
, N
, N
,
4485 static const struct opcode group2
[] = {
4486 F(DstMem
| ModRM
, em_rol
),
4487 F(DstMem
| ModRM
, em_ror
),
4488 F(DstMem
| ModRM
, em_rcl
),
4489 F(DstMem
| ModRM
, em_rcr
),
4490 F(DstMem
| ModRM
, em_shl
),
4491 F(DstMem
| ModRM
, em_shr
),
4492 F(DstMem
| ModRM
, em_shl
),
4493 F(DstMem
| ModRM
, em_sar
),
4496 static const struct opcode group3
[] = {
4497 F(DstMem
| SrcImm
| NoWrite
, em_test
),
4498 F(DstMem
| SrcImm
| NoWrite
, em_test
),
4499 F(DstMem
| SrcNone
| Lock
, em_not
),
4500 F(DstMem
| SrcNone
| Lock
, em_neg
),
4501 F(DstXacc
| Src2Mem
, em_mul_ex
),
4502 F(DstXacc
| Src2Mem
, em_imul_ex
),
4503 F(DstXacc
| Src2Mem
, em_div_ex
),
4504 F(DstXacc
| Src2Mem
, em_idiv_ex
),
4507 static const struct opcode group4
[] = {
4508 F(ByteOp
| DstMem
| SrcNone
| Lock
, em_inc
),
4509 F(ByteOp
| DstMem
| SrcNone
| Lock
, em_dec
),
4513 static const struct opcode group5
[] = {
4514 F(DstMem
| SrcNone
| Lock
, em_inc
),
4515 F(DstMem
| SrcNone
| Lock
, em_dec
),
4516 I(SrcMem
| NearBranch
, em_call_near_abs
),
4517 I(SrcMemFAddr
| ImplicitOps
, em_call_far
),
4518 I(SrcMem
| NearBranch
, em_jmp_abs
),
4519 I(SrcMemFAddr
| ImplicitOps
, em_jmp_far
),
4520 I(SrcMem
| Stack
| TwoMemOp
, em_push
), D(Undefined
),
4523 static const struct opcode group6
[] = {
4524 II(Prot
| DstMem
, em_sldt
, sldt
),
4525 II(Prot
| DstMem
, em_str
, str
),
4526 II(Prot
| Priv
| SrcMem16
, em_lldt
, lldt
),
4527 II(Prot
| Priv
| SrcMem16
, em_ltr
, ltr
),
4531 static const struct group_dual group7
= { {
4532 II(Mov
| DstMem
, em_sgdt
, sgdt
),
4533 II(Mov
| DstMem
, em_sidt
, sidt
),
4534 II(SrcMem
| Priv
, em_lgdt
, lgdt
),
4535 II(SrcMem
| Priv
, em_lidt
, lidt
),
4536 II(SrcNone
| DstMem
| Mov
, em_smsw
, smsw
), N
,
4537 II(SrcMem16
| Mov
| Priv
, em_lmsw
, lmsw
),
4538 II(SrcMem
| ByteOp
| Priv
| NoAccess
, em_invlpg
, invlpg
),
4544 II(SrcNone
| DstMem
| Mov
, em_smsw
, smsw
), N
,
4545 II(SrcMem16
| Mov
| Priv
, em_lmsw
, lmsw
),
4549 static const struct opcode group8
[] = {
4551 F(DstMem
| SrcImmByte
| NoWrite
, em_bt
),
4552 F(DstMem
| SrcImmByte
| Lock
| PageTable
, em_bts
),
4553 F(DstMem
| SrcImmByte
| Lock
, em_btr
),
4554 F(DstMem
| SrcImmByte
| Lock
| PageTable
, em_btc
),
4558 * The "memory" destination is actually always a register, since we come
4559 * from the register case of group9.
4561 static const struct gprefix pfx_0f_c7_7
= {
4562 N
, N
, N
, II(DstMem
| ModRM
| Op3264
| EmulateOnUD
, em_rdpid
, rdtscp
),
4566 static const struct group_dual group9
= { {
4567 N
, I(DstMem64
| Lock
| PageTable
, em_cmpxchg8b
), N
, N
, N
, N
, N
, N
,
4569 N
, N
, N
, N
, N
, N
, N
,
4570 GP(0, &pfx_0f_c7_7
),
4573 static const struct opcode group11
[] = {
4574 I(DstMem
| SrcImm
| Mov
| PageTable
, em_mov
),
4578 static const struct gprefix pfx_0f_ae_7
= {
4579 I(SrcMem
| ByteOp
, em_clflush
), N
, N
, N
,
4582 static const struct group_dual group15
= { {
4583 I(ModRM
| Aligned16
, em_fxsave
),
4584 I(ModRM
| Aligned16
, em_fxrstor
),
4585 N
, N
, N
, N
, N
, GP(0, &pfx_0f_ae_7
),
4587 N
, N
, N
, N
, N
, N
, N
, N
,
4590 static const struct gprefix pfx_0f_6f_0f_7f
= {
4591 I(Mmx
, em_mov
), I(Sse
| Aligned
, em_mov
), N
, I(Sse
| Unaligned
, em_mov
),
4594 static const struct instr_dual instr_dual_0f_2b
= {
4598 static const struct gprefix pfx_0f_2b
= {
4599 ID(0, &instr_dual_0f_2b
), ID(0, &instr_dual_0f_2b
), N
, N
,
4602 static const struct gprefix pfx_0f_10_0f_11
= {
4603 I(Unaligned
, em_mov
), I(Unaligned
, em_mov
), N
, N
,
4606 static const struct gprefix pfx_0f_28_0f_29
= {
4607 I(Aligned
, em_mov
), I(Aligned
, em_mov
), N
, N
,
4610 static const struct gprefix pfx_0f_e7
= {
4611 N
, I(Sse
, em_mov
), N
, N
,
4614 static const struct escape escape_d9
= { {
4615 N
, N
, N
, N
, N
, N
, N
, I(DstMem16
| Mov
, em_fnstcw
),
4618 N
, N
, N
, N
, N
, N
, N
, N
,
4620 N
, N
, N
, N
, N
, N
, N
, N
,
4622 N
, N
, N
, N
, N
, N
, N
, N
,
4624 N
, N
, N
, N
, N
, N
, N
, N
,
4626 N
, N
, N
, N
, N
, N
, N
, N
,
4628 N
, N
, N
, N
, N
, N
, N
, N
,
4630 N
, N
, N
, N
, N
, N
, N
, N
,
4632 N
, N
, N
, N
, N
, N
, N
, N
,
4635 static const struct escape escape_db
= { {
4636 N
, N
, N
, N
, N
, N
, N
, N
,
4639 N
, N
, N
, N
, N
, N
, N
, N
,
4641 N
, N
, N
, N
, N
, N
, N
, N
,
4643 N
, N
, N
, N
, N
, N
, N
, N
,
4645 N
, N
, N
, N
, N
, N
, N
, N
,
4647 N
, N
, N
, I(ImplicitOps
, em_fninit
), N
, N
, N
, N
,
4649 N
, N
, N
, N
, N
, N
, N
, N
,
4651 N
, N
, N
, N
, N
, N
, N
, N
,
4653 N
, N
, N
, N
, N
, N
, N
, N
,
4656 static const struct escape escape_dd
= { {
4657 N
, N
, N
, N
, N
, N
, N
, I(DstMem16
| Mov
, em_fnstsw
),
4660 N
, N
, N
, N
, N
, N
, N
, N
,
4662 N
, N
, N
, N
, N
, N
, N
, N
,
4664 N
, N
, N
, N
, N
, N
, N
, N
,
4666 N
, N
, N
, N
, N
, N
, N
, N
,
4668 N
, N
, N
, N
, N
, N
, N
, N
,
4670 N
, N
, N
, N
, N
, N
, N
, N
,
4672 N
, N
, N
, N
, N
, N
, N
, N
,
4674 N
, N
, N
, N
, N
, N
, N
, N
,
4677 static const struct instr_dual instr_dual_0f_c3
= {
4678 I(DstMem
| SrcReg
| ModRM
| No16
| Mov
, em_mov
), N
4681 static const struct mode_dual mode_dual_63
= {
4682 N
, I(DstReg
| SrcMem32
| ModRM
| Mov
, em_movsxd
)
4685 static const struct opcode opcode_table
[256] = {
4687 F6ALU(Lock
, em_add
),
4688 I(ImplicitOps
| Stack
| No64
| Src2ES
, em_push_sreg
),
4689 I(ImplicitOps
| Stack
| No64
| Src2ES
, em_pop_sreg
),
4691 F6ALU(Lock
| PageTable
, em_or
),
4692 I(ImplicitOps
| Stack
| No64
| Src2CS
, em_push_sreg
),
4695 F6ALU(Lock
, em_adc
),
4696 I(ImplicitOps
| Stack
| No64
| Src2SS
, em_push_sreg
),
4697 I(ImplicitOps
| Stack
| No64
| Src2SS
, em_pop_sreg
),
4699 F6ALU(Lock
, em_sbb
),
4700 I(ImplicitOps
| Stack
| No64
| Src2DS
, em_push_sreg
),
4701 I(ImplicitOps
| Stack
| No64
| Src2DS
, em_pop_sreg
),
4703 F6ALU(Lock
| PageTable
, em_and
), N
, N
,
4705 F6ALU(Lock
, em_sub
), N
, I(ByteOp
| DstAcc
| No64
, em_das
),
4707 F6ALU(Lock
, em_xor
), N
, N
,
4709 F6ALU(NoWrite
, em_cmp
), N
, N
,
4711 X8(F(DstReg
, em_inc
)), X8(F(DstReg
, em_dec
)),
4713 X8(I(SrcReg
| Stack
, em_push
)),
4715 X8(I(DstReg
| Stack
, em_pop
)),
4717 I(ImplicitOps
| Stack
| No64
, em_pusha
),
4718 I(ImplicitOps
| Stack
| No64
, em_popa
),
4719 N
, MD(ModRM
, &mode_dual_63
),
4722 I(SrcImm
| Mov
| Stack
, em_push
),
4723 I(DstReg
| SrcMem
| ModRM
| Src2Imm
, em_imul_3op
),
4724 I(SrcImmByte
| Mov
| Stack
, em_push
),
4725 I(DstReg
| SrcMem
| ModRM
| Src2ImmByte
, em_imul_3op
),
4726 I2bvIP(DstDI
| SrcDX
| Mov
| String
| Unaligned
, em_in
, ins
, check_perm_in
), /* insb, insw/insd */
4727 I2bvIP(SrcSI
| DstDX
| String
, em_out
, outs
, check_perm_out
), /* outsb, outsw/outsd */
4729 X16(D(SrcImmByte
| NearBranch
)),
4731 G(ByteOp
| DstMem
| SrcImm
, group1
),
4732 G(DstMem
| SrcImm
, group1
),
4733 G(ByteOp
| DstMem
| SrcImm
| No64
, group1
),
4734 G(DstMem
| SrcImmByte
, group1
),
4735 F2bv(DstMem
| SrcReg
| ModRM
| NoWrite
, em_test
),
4736 I2bv(DstMem
| SrcReg
| ModRM
| Lock
| PageTable
, em_xchg
),
4738 I2bv(DstMem
| SrcReg
| ModRM
| Mov
| PageTable
, em_mov
),
4739 I2bv(DstReg
| SrcMem
| ModRM
| Mov
, em_mov
),
4740 I(DstMem
| SrcNone
| ModRM
| Mov
| PageTable
, em_mov_rm_sreg
),
4741 D(ModRM
| SrcMem
| NoAccess
| DstReg
),
4742 I(ImplicitOps
| SrcMem16
| ModRM
, em_mov_sreg_rm
),
4745 DI(SrcAcc
| DstReg
, pause
), X7(D(SrcAcc
| DstReg
)),
4747 D(DstAcc
| SrcNone
), I(ImplicitOps
| SrcAcc
, em_cwd
),
4748 I(SrcImmFAddr
| No64
, em_call_far
), N
,
4749 II(ImplicitOps
| Stack
, em_pushf
, pushf
),
4750 II(ImplicitOps
| Stack
, em_popf
, popf
),
4751 I(ImplicitOps
, em_sahf
), I(ImplicitOps
, em_lahf
),
4753 I2bv(DstAcc
| SrcMem
| Mov
| MemAbs
, em_mov
),
4754 I2bv(DstMem
| SrcAcc
| Mov
| MemAbs
| PageTable
, em_mov
),
4755 I2bv(SrcSI
| DstDI
| Mov
| String
| TwoMemOp
, em_mov
),
4756 F2bv(SrcSI
| DstDI
| String
| NoWrite
| TwoMemOp
, em_cmp_r
),
4758 F2bv(DstAcc
| SrcImm
| NoWrite
, em_test
),
4759 I2bv(SrcAcc
| DstDI
| Mov
| String
, em_mov
),
4760 I2bv(SrcSI
| DstAcc
| Mov
| String
, em_mov
),
4761 F2bv(SrcAcc
| DstDI
| String
| NoWrite
, em_cmp_r
),
4763 X8(I(ByteOp
| DstReg
| SrcImm
| Mov
, em_mov
)),
4765 X8(I(DstReg
| SrcImm64
| Mov
, em_mov
)),
4767 G(ByteOp
| Src2ImmByte
, group2
), G(Src2ImmByte
, group2
),
4768 I(ImplicitOps
| NearBranch
| SrcImmU16
, em_ret_near_imm
),
4769 I(ImplicitOps
| NearBranch
, em_ret
),
4770 I(DstReg
| SrcMemFAddr
| ModRM
| No64
| Src2ES
, em_lseg
),
4771 I(DstReg
| SrcMemFAddr
| ModRM
| No64
| Src2DS
, em_lseg
),
4772 G(ByteOp
, group11
), G(0, group11
),
4774 I(Stack
| SrcImmU16
| Src2ImmByte
, em_enter
), I(Stack
, em_leave
),
4775 I(ImplicitOps
| SrcImmU16
, em_ret_far_imm
),
4776 I(ImplicitOps
, em_ret_far
),
4777 D(ImplicitOps
), DI(SrcImmByte
, intn
),
4778 D(ImplicitOps
| No64
), II(ImplicitOps
, em_iret
, iret
),
4780 G(Src2One
| ByteOp
, group2
), G(Src2One
, group2
),
4781 G(Src2CL
| ByteOp
, group2
), G(Src2CL
, group2
),
4782 I(DstAcc
| SrcImmUByte
| No64
, em_aam
),
4783 I(DstAcc
| SrcImmUByte
| No64
, em_aad
),
4784 F(DstAcc
| ByteOp
| No64
, em_salc
),
4785 I(DstAcc
| SrcXLat
| ByteOp
, em_mov
),
4787 N
, E(0, &escape_d9
), N
, E(0, &escape_db
), N
, E(0, &escape_dd
), N
, N
,
4789 X3(I(SrcImmByte
| NearBranch
, em_loop
)),
4790 I(SrcImmByte
| NearBranch
, em_jcxz
),
4791 I2bvIP(SrcImmUByte
| DstAcc
, em_in
, in
, check_perm_in
),
4792 I2bvIP(SrcAcc
| DstImmUByte
, em_out
, out
, check_perm_out
),
4794 I(SrcImm
| NearBranch
, em_call
), D(SrcImm
| ImplicitOps
| NearBranch
),
4795 I(SrcImmFAddr
| No64
, em_jmp_far
),
4796 D(SrcImmByte
| ImplicitOps
| NearBranch
),
4797 I2bvIP(SrcDX
| DstAcc
, em_in
, in
, check_perm_in
),
4798 I2bvIP(SrcAcc
| DstDX
, em_out
, out
, check_perm_out
),
4800 N
, DI(ImplicitOps
, icebp
), N
, N
,
4801 DI(ImplicitOps
| Priv
, hlt
), D(ImplicitOps
),
4802 G(ByteOp
, group3
), G(0, group3
),
4804 D(ImplicitOps
), D(ImplicitOps
),
4805 I(ImplicitOps
, em_cli
), I(ImplicitOps
, em_sti
),
4806 D(ImplicitOps
), D(ImplicitOps
), G(0, group4
), G(0, group5
),
4809 static const struct opcode twobyte_table
[256] = {
4811 G(0, group6
), GD(0, &group7
), N
, N
,
4812 N
, I(ImplicitOps
| EmulateOnUD
, em_syscall
),
4813 II(ImplicitOps
| Priv
, em_clts
, clts
), N
,
4814 DI(ImplicitOps
| Priv
, invd
), DI(ImplicitOps
| Priv
, wbinvd
), N
, N
,
4815 N
, D(ImplicitOps
| ModRM
| SrcMem
| NoAccess
), N
, N
,
4817 GP(ModRM
| DstReg
| SrcMem
| Mov
| Sse
, &pfx_0f_10_0f_11
),
4818 GP(ModRM
| DstMem
| SrcReg
| Mov
| Sse
, &pfx_0f_10_0f_11
),
4820 D(ImplicitOps
| ModRM
| SrcMem
| NoAccess
),
4821 N
, N
, N
, N
, N
, N
, D(ImplicitOps
| ModRM
| SrcMem
| NoAccess
),
4823 DIP(ModRM
| DstMem
| Priv
| Op3264
| NoMod
, cr_read
, check_cr_read
),
4824 DIP(ModRM
| DstMem
| Priv
| Op3264
| NoMod
, dr_read
, check_dr_read
),
4825 IIP(ModRM
| SrcMem
| Priv
| Op3264
| NoMod
, em_cr_write
, cr_write
,
4827 IIP(ModRM
| SrcMem
| Priv
| Op3264
| NoMod
, em_dr_write
, dr_write
,
4830 GP(ModRM
| DstReg
| SrcMem
| Mov
| Sse
, &pfx_0f_28_0f_29
),
4831 GP(ModRM
| DstMem
| SrcReg
| Mov
| Sse
, &pfx_0f_28_0f_29
),
4832 N
, GP(ModRM
| DstMem
| SrcReg
| Mov
| Sse
, &pfx_0f_2b
),
4835 II(ImplicitOps
| Priv
, em_wrmsr
, wrmsr
),
4836 IIP(ImplicitOps
, em_rdtsc
, rdtsc
, check_rdtsc
),
4837 II(ImplicitOps
| Priv
, em_rdmsr
, rdmsr
),
4838 IIP(ImplicitOps
, em_rdpmc
, rdpmc
, check_rdpmc
),
4839 I(ImplicitOps
| EmulateOnUD
, em_sysenter
),
4840 I(ImplicitOps
| Priv
| EmulateOnUD
, em_sysexit
),
4842 N
, N
, N
, N
, N
, N
, N
, N
,
4844 X16(D(DstReg
| SrcMem
| ModRM
)),
4846 N
, N
, N
, N
, N
, N
, N
, N
, N
, N
, N
, N
, N
, N
, N
, N
,
4851 N
, N
, N
, GP(SrcMem
| DstReg
| ModRM
| Mov
, &pfx_0f_6f_0f_7f
),
4856 N
, N
, N
, GP(SrcReg
| DstMem
| ModRM
| Mov
, &pfx_0f_6f_0f_7f
),
4858 X16(D(SrcImm
| NearBranch
)),
4860 X16(D(ByteOp
| DstMem
| SrcNone
| ModRM
| Mov
)),
4862 I(Stack
| Src2FS
, em_push_sreg
), I(Stack
| Src2FS
, em_pop_sreg
),
4863 II(ImplicitOps
, em_cpuid
, cpuid
),
4864 F(DstMem
| SrcReg
| ModRM
| BitOp
| NoWrite
, em_bt
),
4865 F(DstMem
| SrcReg
| Src2ImmByte
| ModRM
, em_shld
),
4866 F(DstMem
| SrcReg
| Src2CL
| ModRM
, em_shld
), N
, N
,
4868 I(Stack
| Src2GS
, em_push_sreg
), I(Stack
| Src2GS
, em_pop_sreg
),
4869 II(EmulateOnUD
| ImplicitOps
, em_rsm
, rsm
),
4870 F(DstMem
| SrcReg
| ModRM
| BitOp
| Lock
| PageTable
, em_bts
),
4871 F(DstMem
| SrcReg
| Src2ImmByte
| ModRM
, em_shrd
),
4872 F(DstMem
| SrcReg
| Src2CL
| ModRM
, em_shrd
),
4873 GD(0, &group15
), F(DstReg
| SrcMem
| ModRM
, em_imul
),
4875 I2bv(DstMem
| SrcReg
| ModRM
| Lock
| PageTable
| SrcWrite
, em_cmpxchg
),
4876 I(DstReg
| SrcMemFAddr
| ModRM
| Src2SS
, em_lseg
),
4877 F(DstMem
| SrcReg
| ModRM
| BitOp
| Lock
, em_btr
),
4878 I(DstReg
| SrcMemFAddr
| ModRM
| Src2FS
, em_lseg
),
4879 I(DstReg
| SrcMemFAddr
| ModRM
| Src2GS
, em_lseg
),
4880 D(DstReg
| SrcMem8
| ModRM
| Mov
), D(DstReg
| SrcMem16
| ModRM
| Mov
),
4884 F(DstMem
| SrcReg
| ModRM
| BitOp
| Lock
| PageTable
, em_btc
),
4885 I(DstReg
| SrcMem
| ModRM
, em_bsf_c
),
4886 I(DstReg
| SrcMem
| ModRM
, em_bsr_c
),
4887 D(DstReg
| SrcMem8
| ModRM
| Mov
), D(DstReg
| SrcMem16
| ModRM
| Mov
),
4889 F2bv(DstMem
| SrcReg
| ModRM
| SrcWrite
| Lock
, em_xadd
),
4890 N
, ID(0, &instr_dual_0f_c3
),
4891 N
, N
, N
, GD(0, &group9
),
4893 X8(I(DstReg
, em_bswap
)),
4895 N
, N
, N
, N
, N
, N
, N
, N
, N
, N
, N
, N
, N
, N
, N
, N
,
4897 N
, N
, N
, N
, N
, N
, N
, GP(SrcReg
| DstMem
| ModRM
| Mov
, &pfx_0f_e7
),
4898 N
, N
, N
, N
, N
, N
, N
, N
,
4900 N
, N
, N
, N
, N
, N
, N
, N
, N
, N
, N
, N
, N
, N
, N
, N
4903 static const struct instr_dual instr_dual_0f_38_f0
= {
4904 I(DstReg
| SrcMem
| Mov
, em_movbe
), N
4907 static const struct instr_dual instr_dual_0f_38_f1
= {
4908 I(DstMem
| SrcReg
| Mov
, em_movbe
), N
4911 static const struct gprefix three_byte_0f_38_f0
= {
4912 ID(0, &instr_dual_0f_38_f0
), N
, N
, N
4915 static const struct gprefix three_byte_0f_38_f1
= {
4916 ID(0, &instr_dual_0f_38_f1
), N
, N
, N
4920 * Insns below are selected by the prefix which indexed by the third opcode
4923 static const struct opcode opcode_map_0f_38
[256] = {
4925 X16(N
), X16(N
), X16(N
), X16(N
), X16(N
), X16(N
), X16(N
), X16(N
),
4927 X16(N
), X16(N
), X16(N
), X16(N
), X16(N
), X16(N
), X16(N
),
4929 GP(EmulateOnUD
| ModRM
, &three_byte_0f_38_f0
),
4930 GP(EmulateOnUD
| ModRM
, &three_byte_0f_38_f1
),
4951 static unsigned imm_size(struct x86_emulate_ctxt
*ctxt
)
4955 size
= (ctxt
->d
& ByteOp
) ? 1 : ctxt
->op_bytes
;
4961 static int decode_imm(struct x86_emulate_ctxt
*ctxt
, struct operand
*op
,
4962 unsigned size
, bool sign_extension
)
4964 int rc
= X86EMUL_CONTINUE
;
4968 op
->addr
.mem
.ea
= ctxt
->_eip
;
4969 /* NB. Immediates are sign-extended as necessary. */
4970 switch (op
->bytes
) {
4972 op
->val
= insn_fetch(s8
, ctxt
);
4975 op
->val
= insn_fetch(s16
, ctxt
);
4978 op
->val
= insn_fetch(s32
, ctxt
);
4981 op
->val
= insn_fetch(s64
, ctxt
);
4984 if (!sign_extension
) {
4985 switch (op
->bytes
) {
4993 op
->val
&= 0xffffffff;
5001 static int decode_operand(struct x86_emulate_ctxt
*ctxt
, struct operand
*op
,
5004 int rc
= X86EMUL_CONTINUE
;
5008 decode_register_operand(ctxt
, op
);
5011 rc
= decode_imm(ctxt
, op
, 1, false);
5014 ctxt
->memop
.bytes
= (ctxt
->d
& ByteOp
) ? 1 : ctxt
->op_bytes
;
5018 if (ctxt
->d
& BitOp
)
5019 fetch_bit_operand(ctxt
);
5020 op
->orig_val
= op
->val
;
5023 ctxt
->memop
.bytes
= (ctxt
->op_bytes
== 8) ? 16 : 8;
5027 op
->bytes
= (ctxt
->d
& ByteOp
) ? 1 : ctxt
->op_bytes
;
5028 op
->addr
.reg
= reg_rmw(ctxt
, VCPU_REGS_RAX
);
5029 fetch_register_operand(op
);
5030 op
->orig_val
= op
->val
;
5034 op
->bytes
= (ctxt
->d
& ByteOp
) ? 2 : ctxt
->op_bytes
;
5035 op
->addr
.reg
= reg_rmw(ctxt
, VCPU_REGS_RAX
);
5036 fetch_register_operand(op
);
5037 op
->orig_val
= op
->val
;
5040 if (ctxt
->d
& ByteOp
) {
5045 op
->bytes
= ctxt
->op_bytes
;
5046 op
->addr
.reg
= reg_rmw(ctxt
, VCPU_REGS_RDX
);
5047 fetch_register_operand(op
);
5048 op
->orig_val
= op
->val
;
5052 op
->bytes
= (ctxt
->d
& ByteOp
) ? 1 : ctxt
->op_bytes
;
5054 register_address(ctxt
, VCPU_REGS_RDI
);
5055 op
->addr
.mem
.seg
= VCPU_SREG_ES
;
5062 op
->addr
.reg
= reg_rmw(ctxt
, VCPU_REGS_RDX
);
5063 fetch_register_operand(op
);
5068 op
->val
= reg_read(ctxt
, VCPU_REGS_RCX
) & 0xff;
5071 rc
= decode_imm(ctxt
, op
, 1, true);
5079 rc
= decode_imm(ctxt
, op
, imm_size(ctxt
), true);
5082 rc
= decode_imm(ctxt
, op
, ctxt
->op_bytes
, true);
5085 ctxt
->memop
.bytes
= 1;
5086 if (ctxt
->memop
.type
== OP_REG
) {
5087 ctxt
->memop
.addr
.reg
= decode_register(ctxt
,
5088 ctxt
->modrm_rm
, true);
5089 fetch_register_operand(&ctxt
->memop
);
5093 ctxt
->memop
.bytes
= 2;
5096 ctxt
->memop
.bytes
= 4;
5099 rc
= decode_imm(ctxt
, op
, 2, false);
5102 rc
= decode_imm(ctxt
, op
, imm_size(ctxt
), false);
5106 op
->bytes
= (ctxt
->d
& ByteOp
) ? 1 : ctxt
->op_bytes
;
5108 register_address(ctxt
, VCPU_REGS_RSI
);
5109 op
->addr
.mem
.seg
= ctxt
->seg_override
;
5115 op
->bytes
= (ctxt
->d
& ByteOp
) ? 1 : ctxt
->op_bytes
;
5118 reg_read(ctxt
, VCPU_REGS_RBX
) +
5119 (reg_read(ctxt
, VCPU_REGS_RAX
) & 0xff));
5120 op
->addr
.mem
.seg
= ctxt
->seg_override
;
5125 op
->addr
.mem
.ea
= ctxt
->_eip
;
5126 op
->bytes
= ctxt
->op_bytes
+ 2;
5127 insn_fetch_arr(op
->valptr
, op
->bytes
, ctxt
);
5130 ctxt
->memop
.bytes
= ctxt
->op_bytes
+ 2;
5134 op
->val
= VCPU_SREG_ES
;
5138 op
->val
= VCPU_SREG_CS
;
5142 op
->val
= VCPU_SREG_SS
;
5146 op
->val
= VCPU_SREG_DS
;
5150 op
->val
= VCPU_SREG_FS
;
5154 op
->val
= VCPU_SREG_GS
;
5157 /* Special instructions do their own operand decoding. */
5159 op
->type
= OP_NONE
; /* Disable writeback. */
5167 int x86_decode_insn(struct x86_emulate_ctxt
*ctxt
, void *insn
, int insn_len
)
5169 int rc
= X86EMUL_CONTINUE
;
5170 int mode
= ctxt
->mode
;
5171 int def_op_bytes
, def_ad_bytes
, goffset
, simd_prefix
;
5172 bool op_prefix
= false;
5173 bool has_seg_override
= false;
5174 struct opcode opcode
;
5176 struct desc_struct desc
;
5178 ctxt
->memop
.type
= OP_NONE
;
5179 ctxt
->memopp
= NULL
;
5180 ctxt
->_eip
= ctxt
->eip
;
5181 ctxt
->fetch
.ptr
= ctxt
->fetch
.data
;
5182 ctxt
->fetch
.end
= ctxt
->fetch
.data
+ insn_len
;
5183 ctxt
->opcode_len
= 1;
5185 memcpy(ctxt
->fetch
.data
, insn
, insn_len
);
5187 rc
= __do_insn_fetch_bytes(ctxt
, 1);
5188 if (rc
!= X86EMUL_CONTINUE
)
5193 case X86EMUL_MODE_REAL
:
5194 case X86EMUL_MODE_VM86
:
5195 def_op_bytes
= def_ad_bytes
= 2;
5196 ctxt
->ops
->get_segment(ctxt
, &dummy
, &desc
, NULL
, VCPU_SREG_CS
);
5198 def_op_bytes
= def_ad_bytes
= 4;
5200 case X86EMUL_MODE_PROT16
:
5201 def_op_bytes
= def_ad_bytes
= 2;
5203 case X86EMUL_MODE_PROT32
:
5204 def_op_bytes
= def_ad_bytes
= 4;
5206 #ifdef CONFIG_X86_64
5207 case X86EMUL_MODE_PROT64
:
5213 return EMULATION_FAILED
;
5216 ctxt
->op_bytes
= def_op_bytes
;
5217 ctxt
->ad_bytes
= def_ad_bytes
;
5219 /* Legacy prefixes. */
5221 switch (ctxt
->b
= insn_fetch(u8
, ctxt
)) {
5222 case 0x66: /* operand-size override */
5224 /* switch between 2/4 bytes */
5225 ctxt
->op_bytes
= def_op_bytes
^ 6;
5227 case 0x67: /* address-size override */
5228 if (mode
== X86EMUL_MODE_PROT64
)
5229 /* switch between 4/8 bytes */
5230 ctxt
->ad_bytes
= def_ad_bytes
^ 12;
5232 /* switch between 2/4 bytes */
5233 ctxt
->ad_bytes
= def_ad_bytes
^ 6;
5235 case 0x26: /* ES override */
5236 has_seg_override
= true;
5237 ctxt
->seg_override
= VCPU_SREG_ES
;
5239 case 0x2e: /* CS override */
5240 has_seg_override
= true;
5241 ctxt
->seg_override
= VCPU_SREG_CS
;
5243 case 0x36: /* SS override */
5244 has_seg_override
= true;
5245 ctxt
->seg_override
= VCPU_SREG_SS
;
5247 case 0x3e: /* DS override */
5248 has_seg_override
= true;
5249 ctxt
->seg_override
= VCPU_SREG_DS
;
5251 case 0x64: /* FS override */
5252 has_seg_override
= true;
5253 ctxt
->seg_override
= VCPU_SREG_FS
;
5255 case 0x65: /* GS override */
5256 has_seg_override
= true;
5257 ctxt
->seg_override
= VCPU_SREG_GS
;
5259 case 0x40 ... 0x4f: /* REX */
5260 if (mode
!= X86EMUL_MODE_PROT64
)
5262 ctxt
->rex_prefix
= ctxt
->b
;
5264 case 0xf0: /* LOCK */
5265 ctxt
->lock_prefix
= 1;
5267 case 0xf2: /* REPNE/REPNZ */
5268 case 0xf3: /* REP/REPE/REPZ */
5269 ctxt
->rep_prefix
= ctxt
->b
;
5275 /* Any legacy prefix after a REX prefix nullifies its effect. */
5277 ctxt
->rex_prefix
= 0;
5283 if (ctxt
->rex_prefix
& 8)
5284 ctxt
->op_bytes
= 8; /* REX.W */
5286 /* Opcode byte(s). */
5287 opcode
= opcode_table
[ctxt
->b
];
5288 /* Two-byte opcode? */
5289 if (ctxt
->b
== 0x0f) {
5290 ctxt
->opcode_len
= 2;
5291 ctxt
->b
= insn_fetch(u8
, ctxt
);
5292 opcode
= twobyte_table
[ctxt
->b
];
5294 /* 0F_38 opcode map */
5295 if (ctxt
->b
== 0x38) {
5296 ctxt
->opcode_len
= 3;
5297 ctxt
->b
= insn_fetch(u8
, ctxt
);
5298 opcode
= opcode_map_0f_38
[ctxt
->b
];
5301 ctxt
->d
= opcode
.flags
;
5303 if (ctxt
->d
& ModRM
)
5304 ctxt
->modrm
= insn_fetch(u8
, ctxt
);
5306 /* vex-prefix instructions are not implemented */
5307 if (ctxt
->opcode_len
== 1 && (ctxt
->b
== 0xc5 || ctxt
->b
== 0xc4) &&
5308 (mode
== X86EMUL_MODE_PROT64
|| (ctxt
->modrm
& 0xc0) == 0xc0)) {
5312 while (ctxt
->d
& GroupMask
) {
5313 switch (ctxt
->d
& GroupMask
) {
5315 goffset
= (ctxt
->modrm
>> 3) & 7;
5316 opcode
= opcode
.u
.group
[goffset
];
5319 goffset
= (ctxt
->modrm
>> 3) & 7;
5320 if ((ctxt
->modrm
>> 6) == 3)
5321 opcode
= opcode
.u
.gdual
->mod3
[goffset
];
5323 opcode
= opcode
.u
.gdual
->mod012
[goffset
];
5326 goffset
= ctxt
->modrm
& 7;
5327 opcode
= opcode
.u
.group
[goffset
];
5330 if (ctxt
->rep_prefix
&& op_prefix
)
5331 return EMULATION_FAILED
;
5332 simd_prefix
= op_prefix
? 0x66 : ctxt
->rep_prefix
;
5333 switch (simd_prefix
) {
5334 case 0x00: opcode
= opcode
.u
.gprefix
->pfx_no
; break;
5335 case 0x66: opcode
= opcode
.u
.gprefix
->pfx_66
; break;
5336 case 0xf2: opcode
= opcode
.u
.gprefix
->pfx_f2
; break;
5337 case 0xf3: opcode
= opcode
.u
.gprefix
->pfx_f3
; break;
5341 if (ctxt
->modrm
> 0xbf) {
5342 size_t size
= ARRAY_SIZE(opcode
.u
.esc
->high
);
5343 u32 index
= array_index_nospec(
5344 ctxt
->modrm
- 0xc0, size
);
5346 opcode
= opcode
.u
.esc
->high
[index
];
5348 opcode
= opcode
.u
.esc
->op
[(ctxt
->modrm
>> 3) & 7];
5352 if ((ctxt
->modrm
>> 6) == 3)
5353 opcode
= opcode
.u
.idual
->mod3
;
5355 opcode
= opcode
.u
.idual
->mod012
;
5358 if (ctxt
->mode
== X86EMUL_MODE_PROT64
)
5359 opcode
= opcode
.u
.mdual
->mode64
;
5361 opcode
= opcode
.u
.mdual
->mode32
;
5364 return EMULATION_FAILED
;
5367 ctxt
->d
&= ~(u64
)GroupMask
;
5368 ctxt
->d
|= opcode
.flags
;
5373 return EMULATION_FAILED
;
5375 ctxt
->execute
= opcode
.u
.execute
;
5377 if (unlikely(ctxt
->ud
) && likely(!(ctxt
->d
& EmulateOnUD
)))
5378 return EMULATION_FAILED
;
5380 if (unlikely(ctxt
->d
&
5381 (NotImpl
|Stack
|Op3264
|Sse
|Mmx
|Intercept
|CheckPerm
|NearBranch
|
5384 * These are copied unconditionally here, and checked unconditionally
5385 * in x86_emulate_insn.
5387 ctxt
->check_perm
= opcode
.check_perm
;
5388 ctxt
->intercept
= opcode
.intercept
;
5390 if (ctxt
->d
& NotImpl
)
5391 return EMULATION_FAILED
;
5393 if (mode
== X86EMUL_MODE_PROT64
) {
5394 if (ctxt
->op_bytes
== 4 && (ctxt
->d
& Stack
))
5396 else if (ctxt
->d
& NearBranch
)
5400 if (ctxt
->d
& Op3264
) {
5401 if (mode
== X86EMUL_MODE_PROT64
)
5407 if ((ctxt
->d
& No16
) && ctxt
->op_bytes
== 2)
5411 ctxt
->op_bytes
= 16;
5412 else if (ctxt
->d
& Mmx
)
5416 /* ModRM and SIB bytes. */
5417 if (ctxt
->d
& ModRM
) {
5418 rc
= decode_modrm(ctxt
, &ctxt
->memop
);
5419 if (!has_seg_override
) {
5420 has_seg_override
= true;
5421 ctxt
->seg_override
= ctxt
->modrm_seg
;
5423 } else if (ctxt
->d
& MemAbs
)
5424 rc
= decode_abs(ctxt
, &ctxt
->memop
);
5425 if (rc
!= X86EMUL_CONTINUE
)
5428 if (!has_seg_override
)
5429 ctxt
->seg_override
= VCPU_SREG_DS
;
5431 ctxt
->memop
.addr
.mem
.seg
= ctxt
->seg_override
;
5434 * Decode and fetch the source operand: register, memory
5437 rc
= decode_operand(ctxt
, &ctxt
->src
, (ctxt
->d
>> SrcShift
) & OpMask
);
5438 if (rc
!= X86EMUL_CONTINUE
)
5442 * Decode and fetch the second source operand: register, memory
5445 rc
= decode_operand(ctxt
, &ctxt
->src2
, (ctxt
->d
>> Src2Shift
) & OpMask
);
5446 if (rc
!= X86EMUL_CONTINUE
)
5449 /* Decode and fetch the destination operand: register or memory. */
5450 rc
= decode_operand(ctxt
, &ctxt
->dst
, (ctxt
->d
>> DstShift
) & OpMask
);
5452 if (ctxt
->rip_relative
&& likely(ctxt
->memopp
))
5453 ctxt
->memopp
->addr
.mem
.ea
= address_mask(ctxt
,
5454 ctxt
->memopp
->addr
.mem
.ea
+ ctxt
->_eip
);
5457 if (rc
== X86EMUL_PROPAGATE_FAULT
)
5458 ctxt
->have_exception
= true;
5459 return (rc
!= X86EMUL_CONTINUE
) ? EMULATION_FAILED
: EMULATION_OK
;
5462 bool x86_page_table_writing_insn(struct x86_emulate_ctxt
*ctxt
)
5464 return ctxt
->d
& PageTable
;
5467 static bool string_insn_completed(struct x86_emulate_ctxt
*ctxt
)
5469 /* The second termination condition only applies for REPE
5470 * and REPNE. Test if the repeat string operation prefix is
5471 * REPE/REPZ or REPNE/REPNZ and if it's the case it tests the
5472 * corresponding termination condition according to:
5473 * - if REPE/REPZ and ZF = 0 then done
5474 * - if REPNE/REPNZ and ZF = 1 then done
5476 if (((ctxt
->b
== 0xa6) || (ctxt
->b
== 0xa7) ||
5477 (ctxt
->b
== 0xae) || (ctxt
->b
== 0xaf))
5478 && (((ctxt
->rep_prefix
== REPE_PREFIX
) &&
5479 ((ctxt
->eflags
& X86_EFLAGS_ZF
) == 0))
5480 || ((ctxt
->rep_prefix
== REPNE_PREFIX
) &&
5481 ((ctxt
->eflags
& X86_EFLAGS_ZF
) == X86_EFLAGS_ZF
))))
5487 static int flush_pending_x87_faults(struct x86_emulate_ctxt
*ctxt
)
5492 rc
= asm_safe("fwait");
5495 if (unlikely(rc
!= X86EMUL_CONTINUE
))
5496 return emulate_exception(ctxt
, MF_VECTOR
, 0, false);
5498 return X86EMUL_CONTINUE
;
5501 static void fetch_possible_mmx_operand(struct operand
*op
)
5503 if (op
->type
== OP_MM
)
5504 read_mmx_reg(&op
->mm_val
, op
->addr
.mm
);
5507 static int fastop(struct x86_emulate_ctxt
*ctxt
, fastop_t fop
)
5509 ulong flags
= (ctxt
->eflags
& EFLAGS_MASK
) | X86_EFLAGS_IF
;
5511 if (!(ctxt
->d
& ByteOp
))
5512 fop
+= __ffs(ctxt
->dst
.bytes
) * FASTOP_SIZE
;
5514 asm("push %[flags]; popf; " CALL_NOSPEC
" ; pushf; pop %[flags]\n"
5515 : "+a"(ctxt
->dst
.val
), "+d"(ctxt
->src
.val
), [flags
]"+D"(flags
),
5516 [thunk_target
]"+S"(fop
), ASM_CALL_CONSTRAINT
5517 : "c"(ctxt
->src2
.val
));
5519 ctxt
->eflags
= (ctxt
->eflags
& ~EFLAGS_MASK
) | (flags
& EFLAGS_MASK
);
5520 if (!fop
) /* exception is returned in fop variable */
5521 return emulate_de(ctxt
);
5522 return X86EMUL_CONTINUE
;
5525 void init_decode_cache(struct x86_emulate_ctxt
*ctxt
)
5527 memset(&ctxt
->rip_relative
, 0,
5528 (void *)&ctxt
->modrm
- (void *)&ctxt
->rip_relative
);
5530 ctxt
->io_read
.pos
= 0;
5531 ctxt
->io_read
.end
= 0;
5532 ctxt
->mem_read
.end
= 0;
5535 int x86_emulate_insn(struct x86_emulate_ctxt
*ctxt
)
5537 const struct x86_emulate_ops
*ops
= ctxt
->ops
;
5538 int rc
= X86EMUL_CONTINUE
;
5539 int saved_dst_type
= ctxt
->dst
.type
;
5540 unsigned emul_flags
;
5542 ctxt
->mem_read
.pos
= 0;
5544 /* LOCK prefix is allowed only with some instructions */
5545 if (ctxt
->lock_prefix
&& (!(ctxt
->d
& Lock
) || ctxt
->dst
.type
!= OP_MEM
)) {
5546 rc
= emulate_ud(ctxt
);
5550 if ((ctxt
->d
& SrcMask
) == SrcMemFAddr
&& ctxt
->src
.type
!= OP_MEM
) {
5551 rc
= emulate_ud(ctxt
);
5555 emul_flags
= ctxt
->ops
->get_hflags(ctxt
);
5556 if (unlikely(ctxt
->d
&
5557 (No64
|Undefined
|Sse
|Mmx
|Intercept
|CheckPerm
|Priv
|Prot
|String
))) {
5558 if ((ctxt
->mode
== X86EMUL_MODE_PROT64
&& (ctxt
->d
& No64
)) ||
5559 (ctxt
->d
& Undefined
)) {
5560 rc
= emulate_ud(ctxt
);
5564 if (((ctxt
->d
& (Sse
|Mmx
)) && ((ops
->get_cr(ctxt
, 0) & X86_CR0_EM
)))
5565 || ((ctxt
->d
& Sse
) && !(ops
->get_cr(ctxt
, 4) & X86_CR4_OSFXSR
))) {
5566 rc
= emulate_ud(ctxt
);
5570 if ((ctxt
->d
& (Sse
|Mmx
)) && (ops
->get_cr(ctxt
, 0) & X86_CR0_TS
)) {
5571 rc
= emulate_nm(ctxt
);
5575 if (ctxt
->d
& Mmx
) {
5576 rc
= flush_pending_x87_faults(ctxt
);
5577 if (rc
!= X86EMUL_CONTINUE
)
5580 * Now that we know the fpu is exception safe, we can fetch
5583 fetch_possible_mmx_operand(&ctxt
->src
);
5584 fetch_possible_mmx_operand(&ctxt
->src2
);
5585 if (!(ctxt
->d
& Mov
))
5586 fetch_possible_mmx_operand(&ctxt
->dst
);
5589 if (unlikely(emul_flags
& X86EMUL_GUEST_MASK
) && ctxt
->intercept
) {
5590 rc
= emulator_check_intercept(ctxt
, ctxt
->intercept
,
5591 X86_ICPT_PRE_EXCEPT
);
5592 if (rc
!= X86EMUL_CONTINUE
)
5596 /* Instruction can only be executed in protected mode */
5597 if ((ctxt
->d
& Prot
) && ctxt
->mode
< X86EMUL_MODE_PROT16
) {
5598 rc
= emulate_ud(ctxt
);
5602 /* Privileged instruction can be executed only in CPL=0 */
5603 if ((ctxt
->d
& Priv
) && ops
->cpl(ctxt
)) {
5604 if (ctxt
->d
& PrivUD
)
5605 rc
= emulate_ud(ctxt
);
5607 rc
= emulate_gp(ctxt
, 0);
5611 /* Do instruction specific permission checks */
5612 if (ctxt
->d
& CheckPerm
) {
5613 rc
= ctxt
->check_perm(ctxt
);
5614 if (rc
!= X86EMUL_CONTINUE
)
5618 if (unlikely(emul_flags
& X86EMUL_GUEST_MASK
) && (ctxt
->d
& Intercept
)) {
5619 rc
= emulator_check_intercept(ctxt
, ctxt
->intercept
,
5620 X86_ICPT_POST_EXCEPT
);
5621 if (rc
!= X86EMUL_CONTINUE
)
5625 if (ctxt
->rep_prefix
&& (ctxt
->d
& String
)) {
5626 /* All REP prefixes have the same first termination condition */
5627 if (address_mask(ctxt
, reg_read(ctxt
, VCPU_REGS_RCX
)) == 0) {
5628 string_registers_quirk(ctxt
);
5629 ctxt
->eip
= ctxt
->_eip
;
5630 ctxt
->eflags
&= ~X86_EFLAGS_RF
;
5636 if ((ctxt
->src
.type
== OP_MEM
) && !(ctxt
->d
& NoAccess
)) {
5637 rc
= segmented_read(ctxt
, ctxt
->src
.addr
.mem
,
5638 ctxt
->src
.valptr
, ctxt
->src
.bytes
);
5639 if (rc
!= X86EMUL_CONTINUE
)
5641 ctxt
->src
.orig_val64
= ctxt
->src
.val64
;
5644 if (ctxt
->src2
.type
== OP_MEM
) {
5645 rc
= segmented_read(ctxt
, ctxt
->src2
.addr
.mem
,
5646 &ctxt
->src2
.val
, ctxt
->src2
.bytes
);
5647 if (rc
!= X86EMUL_CONTINUE
)
5651 if ((ctxt
->d
& DstMask
) == ImplicitOps
)
5655 if ((ctxt
->dst
.type
== OP_MEM
) && !(ctxt
->d
& Mov
)) {
5656 /* optimisation - avoid slow emulated read if Mov */
5657 rc
= segmented_read(ctxt
, ctxt
->dst
.addr
.mem
,
5658 &ctxt
->dst
.val
, ctxt
->dst
.bytes
);
5659 if (rc
!= X86EMUL_CONTINUE
) {
5660 if (!(ctxt
->d
& NoWrite
) &&
5661 rc
== X86EMUL_PROPAGATE_FAULT
&&
5662 ctxt
->exception
.vector
== PF_VECTOR
)
5663 ctxt
->exception
.error_code
|= PFERR_WRITE_MASK
;
5667 /* Copy full 64-bit value for CMPXCHG8B. */
5668 ctxt
->dst
.orig_val64
= ctxt
->dst
.val64
;
5672 if (unlikely(emul_flags
& X86EMUL_GUEST_MASK
) && (ctxt
->d
& Intercept
)) {
5673 rc
= emulator_check_intercept(ctxt
, ctxt
->intercept
,
5674 X86_ICPT_POST_MEMACCESS
);
5675 if (rc
!= X86EMUL_CONTINUE
)
5679 if (ctxt
->rep_prefix
&& (ctxt
->d
& String
))
5680 ctxt
->eflags
|= X86_EFLAGS_RF
;
5682 ctxt
->eflags
&= ~X86_EFLAGS_RF
;
5684 if (ctxt
->execute
) {
5685 if (ctxt
->d
& Fastop
)
5686 rc
= fastop(ctxt
, (fastop_t
)ctxt
->execute
);
5688 rc
= ctxt
->execute(ctxt
);
5689 if (rc
!= X86EMUL_CONTINUE
)
5694 if (ctxt
->opcode_len
== 2)
5696 else if (ctxt
->opcode_len
== 3)
5697 goto threebyte_insn
;
5700 case 0x70 ... 0x7f: /* jcc (short) */
5701 if (test_cc(ctxt
->b
, ctxt
->eflags
))
5702 rc
= jmp_rel(ctxt
, ctxt
->src
.val
);
5704 case 0x8d: /* lea r16/r32, m */
5705 ctxt
->dst
.val
= ctxt
->src
.addr
.mem
.ea
;
5707 case 0x90 ... 0x97: /* nop / xchg reg, rax */
5708 if (ctxt
->dst
.addr
.reg
== reg_rmw(ctxt
, VCPU_REGS_RAX
))
5709 ctxt
->dst
.type
= OP_NONE
;
5713 case 0x98: /* cbw/cwde/cdqe */
5714 switch (ctxt
->op_bytes
) {
5715 case 2: ctxt
->dst
.val
= (s8
)ctxt
->dst
.val
; break;
5716 case 4: ctxt
->dst
.val
= (s16
)ctxt
->dst
.val
; break;
5717 case 8: ctxt
->dst
.val
= (s32
)ctxt
->dst
.val
; break;
5720 case 0xcc: /* int3 */
5721 rc
= emulate_int(ctxt
, 3);
5723 case 0xcd: /* int n */
5724 rc
= emulate_int(ctxt
, ctxt
->src
.val
);
5726 case 0xce: /* into */
5727 if (ctxt
->eflags
& X86_EFLAGS_OF
)
5728 rc
= emulate_int(ctxt
, 4);
5730 case 0xe9: /* jmp rel */
5731 case 0xeb: /* jmp rel short */
5732 rc
= jmp_rel(ctxt
, ctxt
->src
.val
);
5733 ctxt
->dst
.type
= OP_NONE
; /* Disable writeback. */
5735 case 0xf4: /* hlt */
5736 ctxt
->ops
->halt(ctxt
);
5738 case 0xf5: /* cmc */
5739 /* complement carry flag from eflags reg */
5740 ctxt
->eflags
^= X86_EFLAGS_CF
;
5742 case 0xf8: /* clc */
5743 ctxt
->eflags
&= ~X86_EFLAGS_CF
;
5745 case 0xf9: /* stc */
5746 ctxt
->eflags
|= X86_EFLAGS_CF
;
5748 case 0xfc: /* cld */
5749 ctxt
->eflags
&= ~X86_EFLAGS_DF
;
5751 case 0xfd: /* std */
5752 ctxt
->eflags
|= X86_EFLAGS_DF
;
5755 goto cannot_emulate
;
5758 if (rc
!= X86EMUL_CONTINUE
)
5762 if (ctxt
->d
& SrcWrite
) {
5763 BUG_ON(ctxt
->src
.type
== OP_MEM
|| ctxt
->src
.type
== OP_MEM_STR
);
5764 rc
= writeback(ctxt
, &ctxt
->src
);
5765 if (rc
!= X86EMUL_CONTINUE
)
5768 if (!(ctxt
->d
& NoWrite
)) {
5769 rc
= writeback(ctxt
, &ctxt
->dst
);
5770 if (rc
!= X86EMUL_CONTINUE
)
5775 * restore dst type in case the decoding will be reused
5776 * (happens for string instruction )
5778 ctxt
->dst
.type
= saved_dst_type
;
5780 if ((ctxt
->d
& SrcMask
) == SrcSI
)
5781 string_addr_inc(ctxt
, VCPU_REGS_RSI
, &ctxt
->src
);
5783 if ((ctxt
->d
& DstMask
) == DstDI
)
5784 string_addr_inc(ctxt
, VCPU_REGS_RDI
, &ctxt
->dst
);
5786 if (ctxt
->rep_prefix
&& (ctxt
->d
& String
)) {
5788 struct read_cache
*r
= &ctxt
->io_read
;
5789 if ((ctxt
->d
& SrcMask
) == SrcSI
)
5790 count
= ctxt
->src
.count
;
5792 count
= ctxt
->dst
.count
;
5793 register_address_increment(ctxt
, VCPU_REGS_RCX
, -count
);
5795 if (!string_insn_completed(ctxt
)) {
5797 * Re-enter guest when pio read ahead buffer is empty
5798 * or, if it is not used, after each 1024 iteration.
5800 if ((r
->end
!= 0 || reg_read(ctxt
, VCPU_REGS_RCX
) & 0x3ff) &&
5801 (r
->end
== 0 || r
->end
!= r
->pos
)) {
5803 * Reset read cache. Usually happens before
5804 * decode, but since instruction is restarted
5805 * we have to do it here.
5807 ctxt
->mem_read
.end
= 0;
5808 writeback_registers(ctxt
);
5809 return EMULATION_RESTART
;
5811 goto done
; /* skip rip writeback */
5813 ctxt
->eflags
&= ~X86_EFLAGS_RF
;
5816 ctxt
->eip
= ctxt
->_eip
;
5819 if (rc
== X86EMUL_PROPAGATE_FAULT
) {
5820 WARN_ON(ctxt
->exception
.vector
> 0x1f);
5821 ctxt
->have_exception
= true;
5823 if (rc
== X86EMUL_INTERCEPTED
)
5824 return EMULATION_INTERCEPTED
;
5826 if (rc
== X86EMUL_CONTINUE
)
5827 writeback_registers(ctxt
);
5829 return (rc
== X86EMUL_UNHANDLEABLE
) ? EMULATION_FAILED
: EMULATION_OK
;
5833 case 0x09: /* wbinvd */
5834 (ctxt
->ops
->wbinvd
)(ctxt
);
5836 case 0x08: /* invd */
5837 case 0x0d: /* GrpP (prefetch) */
5838 case 0x18: /* Grp16 (prefetch/nop) */
5839 case 0x1f: /* nop */
5841 case 0x20: /* mov cr, reg */
5842 ctxt
->dst
.val
= ops
->get_cr(ctxt
, ctxt
->modrm_reg
);
5844 case 0x21: /* mov from dr to reg */
5845 ops
->get_dr(ctxt
, ctxt
->modrm_reg
, &ctxt
->dst
.val
);
5847 case 0x40 ... 0x4f: /* cmov */
5848 if (test_cc(ctxt
->b
, ctxt
->eflags
))
5849 ctxt
->dst
.val
= ctxt
->src
.val
;
5850 else if (ctxt
->op_bytes
!= 4)
5851 ctxt
->dst
.type
= OP_NONE
; /* no writeback */
5853 case 0x80 ... 0x8f: /* jnz rel, etc*/
5854 if (test_cc(ctxt
->b
, ctxt
->eflags
))
5855 rc
= jmp_rel(ctxt
, ctxt
->src
.val
);
5857 case 0x90 ... 0x9f: /* setcc r/m8 */
5858 ctxt
->dst
.val
= test_cc(ctxt
->b
, ctxt
->eflags
);
5860 case 0xb6 ... 0xb7: /* movzx */
5861 ctxt
->dst
.bytes
= ctxt
->op_bytes
;
5862 ctxt
->dst
.val
= (ctxt
->src
.bytes
== 1) ? (u8
) ctxt
->src
.val
5863 : (u16
) ctxt
->src
.val
;
5865 case 0xbe ... 0xbf: /* movsx */
5866 ctxt
->dst
.bytes
= ctxt
->op_bytes
;
5867 ctxt
->dst
.val
= (ctxt
->src
.bytes
== 1) ? (s8
) ctxt
->src
.val
:
5868 (s16
) ctxt
->src
.val
;
5871 goto cannot_emulate
;
5876 if (rc
!= X86EMUL_CONTINUE
)
5882 return EMULATION_FAILED
;
5885 void emulator_invalidate_register_cache(struct x86_emulate_ctxt
*ctxt
)
5887 invalidate_registers(ctxt
);
5890 void emulator_writeback_register_cache(struct x86_emulate_ctxt
*ctxt
)
5892 writeback_registers(ctxt
);
5895 bool emulator_can_use_gpa(struct x86_emulate_ctxt
*ctxt
)
5897 if (ctxt
->rep_prefix
&& (ctxt
->d
& String
))
5900 if (ctxt
->d
& TwoMemOp
)