4 * Copyright (C) 2004-2008 Fabrice Bellard
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * version 2 as published by the Free Software Foundation.
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
20 register unsigned long pc
asm("%r12");
22 register unsigned long pc
asm("%esi");
25 #include "kqemu_int.h"
29 * - do not use cs.base for CS64 code
30 * - test all segment limits in 16/32 bit mode
34 //#define DEBUG_INTERP
38 static inline uint32_t lduw_kernel1(struct kqemu_state
*s
, unsigned long addr
)
40 if (likely(s
->cpu_state
.cpl
!= 3)) {
41 return lduw_mem(s
, addr
);
43 return lduw_fast(s
, addr
, 0);
47 static inline uint32_t ldl_kernel1(struct kqemu_state
*s
, unsigned long addr
)
49 if (likely(s
->cpu_state
.cpl
!= 3)) {
50 return ldl_mem(s
, addr
);
52 return ldl_fast(s
, addr
, 0);
56 #if defined (__x86_64__)
57 static inline uint64_t ldq_kernel1(struct kqemu_state
*s
, unsigned long addr
)
59 if (likely(s
->cpu_state
.cpl
!= 3)) {
60 return ldq_mem(s
, addr
);
62 return ldq_fast(s
, addr
, 0);
67 static inline void stw_kernel1(struct kqemu_state
*s
, unsigned long addr
, uint32_t val
)
69 if (likely(s
->cpu_state
.cpl
!= 3)) {
70 return stw_mem(s
, addr
, val
);
72 return stw_fast(s
, addr
, val
, 0);
76 static inline void stl_kernel1(struct kqemu_state
*s
, unsigned long addr
, uint32_t val
)
78 if (likely(s
->cpu_state
.cpl
!= 3)) {
79 return stl_mem(s
, addr
, val
);
81 return stl_fast(s
, addr
, val
, 0);
85 #if defined (__x86_64__)
86 static inline void stq_kernel1(struct kqemu_state
*s
, unsigned long addr
, uint64_t val
)
88 if (likely(s
->cpu_state
.cpl
!= 3)) {
89 return stq_mem(s
, addr
, val
);
91 return stq_fast(s
, addr
, val
, 0);
96 #define ldq_kernel(addr) ldq_kernel1(s, addr)
97 #define ldl_kernel(addr) ldl_kernel1(s, addr)
98 #define lduw_kernel(addr) lduw_kernel1(s, addr)
99 #define stq_kernel(addr, val) stq_kernel1(s, addr, val)
100 #define stl_kernel(addr, val) stl_kernel1(s, addr, val)
101 #define stw_kernel(addr, val) stw_kernel1(s, addr, val)
103 #define ldub(s, addr) ldub_mem(s, addr)
104 #define lduw(s, addr) lduw_mem(s, addr)
105 #define ldl(s, addr) ldl_mem(s, addr)
106 #define ldq(s, addr) ldq_mem(s, addr)
107 #define stb(s, addr, val) stb_mem(s, addr, val)
108 #define stw(s, addr, val) stw_mem(s, addr, val)
109 #define stl(s, addr, val) stl_mem(s, addr, val)
110 #define stq(s, addr, val) stq_mem(s, addr, val)
112 #define ldq_kernel(addr) ldq_fast(s, addr, 0)
113 #define ldl_kernel(addr) ldl_fast(s, addr, 0)
114 #define lduw_kernel(addr) lduw_fast(s, addr, 0)
115 #define stq_kernel(addr, val) stq_fast(s, addr, val, 0)
116 #define stl_kernel(addr, val) stl_fast(s, addr, val, 0)
117 #define stw_kernel(addr, val) stw_fast(s, addr, val, 0)
119 #define ldub(s, addr) ldub_fast(s, addr, (s->cpu_state.cpl == 3))
120 #define lduw(s, addr) lduw_fast(s, addr, (s->cpu_state.cpl == 3))
121 #define ldl(s, addr) ldl_fast(s, addr, (s->cpu_state.cpl == 3))
122 #define ldq(s, addr) ldq_fast(s, addr, (s->cpu_state.cpl == 3))
123 #define stb(s, addr, val) stb_fast(s, addr, val, (s->cpu_state.cpl == 3))
124 #define stw(s, addr, val) stw_fast(s, addr, val, (s->cpu_state.cpl == 3))
125 #define stl(s, addr, val) stl_fast(s, addr, val, (s->cpu_state.cpl == 3))
126 #define stq(s, addr, val) stq_fast(s, addr, val, (s->cpu_state.cpl == 3))
127 #endif /* !USE_HARD_MMU */
130 #define CODE64(s) ((s)->cpu_state.segs[R_CS].flags & DESC_L_MASK)
131 #define REX_R(s) ((s)->rex_r)
132 #define REX_X(s) ((s)->rex_x)
133 #define REX_B(s) ((s)->rex_b)
141 #define PREFIX_REPZ 0x01
142 #define PREFIX_REPNZ 0x02
143 #define PREFIX_LOCK 0x04
144 #define PREFIX_REX 0x08
146 static inline unsigned int get_sp_mask(unsigned int e2
)
148 if (e2
& DESC_B_MASK
)
154 /* XXX: add a is_user flag to have proper security support */
155 #define PUSHW(ssp, sp, sp_mask, val)\
158 stw_kernel((ssp) + (sp & (sp_mask)), (val));\
161 #define PUSHL(ssp, sp, sp_mask, val)\
164 stl_kernel((ssp) + (sp & (sp_mask)), (val));\
167 #define POPW(ssp, sp, sp_mask, val)\
169 val = lduw_kernel((ssp) + (sp & (sp_mask)));\
173 #define POPL(ssp, sp, sp_mask, val)\
175 val = (uint32_t)ldl_kernel((ssp) + (sp & (sp_mask)));\
179 #define PUSHQ(sp, val)\
182 stq_kernel(sp, (val));\
185 #define POPQ(sp, val)\
187 val = ldq_kernel(sp);\
191 #define ESP (s->regs1.esp)
192 #define EIP (s->regs1.eip)
194 static inline unsigned int get_seg_sel(struct kqemu_state
*s
, int seg_reg
)
199 val
= (s
->regs1
.cs_sel
& ~3) | s
->cpu_state
.cpl
;
202 val
= (s
->regs1
.ss_sel
& ~3) | s
->cpu_state
.cpl
;
206 asm volatile ("mov %%ds, %0" : "=r" (val
));
207 val
&= 0xffff; /* XXX: see if it is really necessary */
210 asm volatile ("mov %%es, %0" : "=r" (val
));
211 val
&= 0xffff; /* XXX: see if it is really necessary */
215 val
= s
->regs1
.ds_sel
;
218 val
= s
->regs1
.es_sel
;
222 asm volatile ("mov %%fs, %0" : "=r" (val
));
223 val
&= 0xffff; /* XXX: see if it is really necessary */
227 asm volatile ("mov %%gs, %0" : "=r" (val
));
228 val
&= 0xffff; /* XXX: see if it is really necessary */
235 static inline void set_seg_desc_cache(struct kqemu_state
*s
,
238 struct kqemu_segment_cache
*sc
;
240 unsigned long base
, limit
;
242 sc
= &s
->cpu_state
.segs
[seg_reg
];
245 e2
= (sc
->flags
& 0x0070ff00) | (3 << DESC_DPL_SHIFT
) |
246 DESC_S_MASK
| DESC_A_MASK
;
247 if (limit
> 0xfffff) {
251 e1
= (base
<< 16) | (limit
& 0xffff);
252 e2
|= ((base
>> 16) & 0xff) | (base
& 0xff000000) | (limit
& 0x000f0000);
253 s
->seg_desc_cache
[seg_reg
][0] = e1
;
254 s
->seg_desc_cache
[seg_reg
][1] = e2
;
257 /* seg_reg must be R_CS or R_SS */
258 static inline void set_descriptor_entry(struct kqemu_state
*s
,
259 int seg_reg
, int selector
)
264 /* reset the previous one */
265 sel
= s
->seg_desc_entries
[seg_reg
- R_CS
];
266 ptr
= (uint8_t *)s
->dt_table
+ sel
;
267 *(uint64_t *)(ptr
) = 0;
269 if ((selector
& 0xfffc) != 0) {
270 sel
= (selector
& ~7) | ((selector
& 4) << 14);
271 ptr
= (uint8_t *)s
->dt_table
+ sel
;
272 *(uint32_t *)(ptr
) = s
->seg_desc_cache
[seg_reg
][0];
273 *(uint32_t *)(ptr
+ 4) = s
->seg_desc_cache
[seg_reg
][1];
277 s
->seg_desc_entries
[seg_reg
- R_CS
] = sel
;
281 /* NOTE: in the interpreter we only need the base value and flags for
282 CS and SS. The selector is loaded at its real place (either real
284 static void cpu_x86_load_seg_cache(struct kqemu_state
*s
,
285 int seg_reg
, unsigned int selector
,
286 uint32_t base
, unsigned int limit
,
287 uint32_t e1
, uint32_t e2
)
289 struct kqemu_segment_cache
*sc
;
291 monitor_log(s
, "%08x: load_seg_cache seg_reg=%d sel=0x%04x e2=0x%08x\n",
292 s
->regs1
.eip
, seg_reg
, selector
, e2
);
294 sc
= &s
->cpu_state
.segs
[seg_reg
];
299 /* update CPU state if needed */
301 if (s
->cpu_state
.cpl
!= 3) {
304 s
->regs1
.cs_sel
= selector
| 3;
305 set_seg_desc_cache(s
, R_CS
);
306 set_descriptor_entry(s
, R_CS
, selector
);
309 s
->regs1
.ss_sel
= selector
| 3;
310 set_seg_desc_cache(s
, R_SS
);
311 set_descriptor_entry(s
, R_SS
, selector
);
315 set_seg_desc_cache(s
, R_DS
);
316 set_cpu_seg_cache(s
, R_DS
, selector
);
319 set_seg_desc_cache(s
, R_ES
);
320 set_cpu_seg_cache(s
, R_ES
, selector
);
324 s
->regs1
.ds_sel
= selector
;
325 set_seg_desc_cache(s
, R_DS
);
328 s
->regs1
.es_sel
= selector
;
329 set_seg_desc_cache(s
, R_ES
);
333 set_seg_desc_cache(s
, R_FS
);
334 set_cpu_seg_cache(s
, R_FS
, selector
);
337 set_seg_desc_cache(s
, R_GS
);
338 set_cpu_seg_cache(s
, R_GS
, selector
);
346 s
->regs1
.cs_sel
= selector
| 3;
349 s
->regs1
.ss_sel
= selector
| 3;
353 LOAD_SEG(ds
, selector
);
356 LOAD_SEG(es
, selector
);
360 s
->regs1
.ds_sel
= selector
;
363 s
->regs1
.es_sel
= selector
;
367 LOAD_SEG(fs
, selector
);
370 LOAD_SEG(gs
, selector
);
376 void update_seg_desc_caches(struct kqemu_state
*s
)
379 if (s
->cpu_state
.cpl
!= 3) {
380 /* update the seg caches */
381 set_seg_desc_cache(s
, R_CS
);
382 set_descriptor_entry(s
, R_CS
, s
->regs1
.cs_sel
);
384 set_seg_desc_cache(s
, R_SS
);
385 set_descriptor_entry(s
, R_SS
, s
->regs1
.ss_sel
);
387 set_seg_desc_cache(s
, R_DS
);
388 set_seg_desc_cache(s
, R_ES
);
389 set_seg_desc_cache(s
, R_FS
);
390 set_seg_desc_cache(s
, R_GS
);
395 #define REG_PTR(reg) (&s->regs1.eax + (reg))
397 static inline unsigned long get_regb(struct kqemu_state
*s
, int reg
)
401 if (s
->prefix
& PREFIX_REX
) {
402 val
= *(uint8_t *)REG_PTR(reg
);
406 val
= *((uint8_t *)REG_PTR(reg
& 3) + (reg
>> 2));
411 static inline unsigned long get_reg(struct kqemu_state
*s
, int reg
)
413 return *(unsigned long *)REG_PTR(reg
);
416 static inline void set_reg(struct kqemu_state
*s
, int reg
, unsigned long val
)
418 *(unsigned long *)REG_PTR(reg
) = val
;
421 static inline void set_regl(struct kqemu_state
*s
, int reg
, uint32_t val
)
423 *(unsigned long *)REG_PTR(reg
) = val
;
426 static inline void set_regw(struct kqemu_state
*s
, int reg
, uint32_t val
)
428 *(uint16_t *)REG_PTR(reg
) = val
;
431 static inline void set_regb(struct kqemu_state
*s
, int reg
, uint32_t val
)
434 if (s
->prefix
& PREFIX_REX
) {
435 *(uint8_t *)REG_PTR(reg
) = val
;
439 *((uint8_t *)REG_PTR(reg
& 3) + (reg
>> 2)) = val
;
443 static inline unsigned long ldS(struct kqemu_state
*s
, int bsize
,
470 static inline void stS(struct kqemu_state
*s
, int bsize
, unsigned long addr
,
495 static inline unsigned long get_regS(struct kqemu_state
*s
, int bsize
,
500 val
= get_regb(s
, reg
);
502 val
= get_reg(s
, reg
);
513 static inline void set_regS(struct kqemu_state
*s
, int bsize
,
514 int reg
, unsigned long val
)
517 set_regb(s
, reg
, val
);
518 } else if (bsize
== 1) {
519 *(uint16_t *)REG_PTR(reg
) = val
;
522 else if (bsize
== 3) {
523 *(unsigned long *)REG_PTR(reg
) = val
;\
527 *(unsigned long *)REG_PTR(reg
) = (uint32_t)val
;
532 static inline unsigned long stack_pop(struct kqemu_state
*s
)
534 unsigned long addr
, sp_mask
, val
;
547 sp_mask
= get_sp_mask(s
->cpu_state
.segs
[R_SS
].flags
);
548 addr
= (s
->regs1
.esp
& sp_mask
) + s
->cpu_state
.segs
[R_SS
].base
;
558 static inline void sp_add(struct kqemu_state
*s
, long addend
)
562 s
->regs1
.esp
+= addend
;
566 if (s
->cpu_state
.segs
[R_SS
].flags
& DESC_B_MASK
)
567 s
->regs1
.esp
= (uint32_t)(s
->regs1
.esp
+ addend
);
569 *(uint16_t *)&s
->regs1
.esp
+= addend
;
573 static inline void stack_pop_update(struct kqemu_state
*s
)
592 static inline void stack_pushS(struct kqemu_state
*s
, unsigned long val
,
595 unsigned long addr
, sp_mask
, sp
;
611 sp_mask
= get_sp_mask(s
->cpu_state
.segs
[R_SS
].flags
);
613 sp
= (s
->regs1
.esp
- 4) & sp_mask
;
614 addr
= sp
+ s
->cpu_state
.segs
[R_SS
].base
;
617 sp
= (s
->regs1
.esp
- 2) & sp_mask
;
618 addr
= sp
+ s
->cpu_state
.segs
[R_SS
].base
;
621 s
->regs1
.esp
= sp
| (s
->regs1
.esp
& ~sp_mask
);
625 static inline void stack_push(struct kqemu_state
*s
, unsigned long val
)
627 stack_pushS(s
, val
, s
->dflag
);
630 static inline int get_jcc_cond(unsigned long eflags
, int b
)
634 return eflags
& CC_O
;
636 return (eflags
^ CC_O
) & CC_O
;
638 return eflags
& CC_C
;
640 return (eflags
^ CC_C
) & CC_C
;
642 return eflags
& CC_Z
;
644 return (eflags
^ CC_Z
) & CC_Z
;
646 return ((eflags
>> 6) | eflags
) & 1;
648 return (((eflags
>> 6) | eflags
) & 1) ^ 1;
650 return eflags
& CC_S
;
652 return (eflags
^ CC_S
) & CC_S
;
654 return eflags
& CC_P
;
656 return (eflags
^ CC_P
) & CC_P
;
658 return ((eflags
>> 4) ^ eflags
) & CC_S
;
660 return (((eflags
>> 4) ^ eflags
) ^ CC_S
) & CC_S
;
662 return (((eflags
>> 4) ^ eflags
) | (eflags
<< 1)) & CC_S
;
665 return ((((eflags
>> 4) ^ eflags
) | (eflags
<< 1)) ^ CC_S
) & CC_S
;
669 static inline unsigned long compute_eflags(struct kqemu_state
*s
)
671 return (s
->comm_page
.virt_eflags
& EFLAGS_MASK
) |
672 (s
->regs1
.eflags
& ~EFLAGS_MASK
);
675 static inline void set_eflags(struct kqemu_state
*s
, unsigned long val
)
677 s
->comm_page
.virt_eflags
= val
& EFLAGS_MASK
;
678 s
->regs1
.eflags
= compute_eflags_user(s
, val
);
681 static inline void load_eflags(struct kqemu_state
*s
,
682 unsigned long val
, unsigned long update_mask
)
684 unsigned long org_eflags
;
686 update_mask
|= 0xcff; /* DF + all condition codes */
687 org_eflags
= compute_eflags(s
);
688 val
= (val
& update_mask
) | (org_eflags
& ~update_mask
);
692 static inline void set_reset_eflags(struct kqemu_state
*s
,
693 unsigned long set_val
,
694 unsigned long reset_val
)
697 val
= compute_eflags(s
);
698 val
= (val
| set_val
) & ~reset_val
;
702 static inline int get_eflags_iopl(struct kqemu_state
*s
)
704 return (s
->comm_page
.virt_eflags
>> IOPL_SHIFT
) & 3;
707 /* return IF_MASK or 0 */
708 static inline int get_eflags_if(struct kqemu_state
*s
)
710 return (s
->comm_page
.virt_eflags
& IF_MASK
);
713 /* return VM_MASK or 0 */
714 static inline int get_eflags_vm(struct kqemu_state
*s
)
716 return 0; /* currently VM_MASK cannot be set */
719 /* return NT_MASK or 0 */
720 static inline int get_eflags_nt(struct kqemu_state
*s
)
722 return s
->regs1
.eflags
& NT_MASK
;
725 static void cpu_x86_set_cpl(struct kqemu_state
*s
, int cpl
)
730 /* update GDT/LDT cache for cpl == 3 because GDT and LDT could
731 have been modified by guest kernel code */
733 update_gdt_ldt_cache(s
);
736 /* switch the address space */
737 is_user
= (cpl
== 3);
738 s
->monitor_cr3
= s
->pgds_cr3
[is_user
];
739 asm volatile ("mov %0, %%cr3" : : "r" (s
->monitor_cr3
));
741 s
->cpu_state
.cpl
= cpl
;
743 /* just needed for AM bit */
746 /* may be needed for TSD */
749 update_seg_desc_caches(s
);
751 /* switch the GDT and the LDT */
753 s
->monitor_gdt
.base
= s
->monitor_data_vaddr
+
754 offsetof(struct kqemu_state
, dt_table
) + 0x20000 * is_user
;
756 s
->monitor_gdt
.base
= s
->monitor_data_vaddr
+
757 offsetof(struct kqemu_state
, dt_table
) + 0x20000 * cpl
;
759 /* XXX: check op size for x86_64 */
760 asm volatile ("lgdt %0" : "=m" (s
->monitor_gdt
));
761 asm volatile ("lldt %0" : "=m" (s
->monitor_ldt_sel
));
764 /* load a segment descriptor */
765 static void load_seg_desc(struct kqemu_state
*s
,
766 int seg_reg
, uint16_t selector
)
768 struct kqemu_cpu_state
*env
= &s
->cpu_state
;
771 struct kqemu_segment_cache
*dt
;
776 monitor_log(s
, "load_seg_desc: reg=%d sel=0x%04x\n", seg_reg
, selector
);
778 if (selector
>= s
->monitor_selector_base
&&
779 selector
<= (s
->monitor_selector_base
+ MONITOR_SEL_RANGE
)) {
780 monitor_panic(s
, "Trying to load a reserved selector\n");
783 if ((selector
& 0xfffc) == 0) {
786 && (!(env
->segs
[R_CS
].flags
& DESC_L_MASK
) || env
->cpl
== 3)
789 raise_exception_err(s
, EXCP0D_GPF
, 0);
790 cpu_x86_load_seg_cache(s
, seg_reg
, selector
, 0, 0, 0, 0);
796 index
= selector
& ~7;
797 if ((index
+ 7) > dt
->limit
)
798 raise_exception_err(s
, EXCP0D_GPF
, selector
& 0xfffc);
799 ptr
= dt
->base
+ index
;
800 e1
= ldl_kernel(ptr
);
801 e2
= ldl_kernel(ptr
+ 4);
803 if (!(e2
& DESC_S_MASK
))
804 raise_exception_err(s
, EXCP0D_GPF
, selector
& 0xfffc);
806 dpl
= (e2
>> DESC_DPL_SHIFT
) & 3;
808 if (seg_reg
== R_SS
) {
809 /* must be writable segment */
810 if ((e2
& DESC_CS_MASK
) || !(e2
& DESC_W_MASK
))
811 raise_exception_err(s
, EXCP0D_GPF
, selector
& 0xfffc);
812 if (rpl
!= cpl
|| dpl
!= cpl
)
813 raise_exception_err(s
, EXCP0D_GPF
, selector
& 0xfffc);
814 } else if (seg_reg
== R_CS
) {
815 if (!(e2
& DESC_CS_MASK
))
816 raise_exception_err(s
, EXCP0D_GPF
, selector
& 0xfffc);
817 if (e2
& DESC_C_MASK
) {
819 raise_exception_err(s
, EXCP0D_GPF
, selector
& 0xfffc);
822 raise_exception_err(s
, EXCP0D_GPF
, selector
& 0xfffc);
825 /* must be readable segment */
826 if ((e2
& (DESC_CS_MASK
| DESC_R_MASK
)) == DESC_CS_MASK
)
827 raise_exception_err(s
, EXCP0D_GPF
, selector
& 0xfffc);
829 if (!(e2
& DESC_CS_MASK
) || !(e2
& DESC_C_MASK
)) {
830 /* if not conforming code, test rights */
831 if (dpl
< cpl
|| dpl
< rpl
)
832 raise_exception_err(s
, EXCP0D_GPF
, selector
& 0xfffc);
836 if (!(e2
& DESC_P_MASK
)) {
838 raise_exception_err(s
, EXCP0C_STACK
, selector
& 0xfffc);
840 raise_exception_err(s
, EXCP0B_NOSEG
, selector
& 0xfffc);
844 /* set the access bit if not already set */
845 if (!(e2
& DESC_A_MASK
)) {
847 stl_kernel(ptr
+ 4, e2
);
851 /* reset the long mode bit if we are in legacy mode */
852 if (seg_reg
== R_CS
&& !(env
->efer
& MSR_EFER_LMA
))
855 cpu_x86_load_seg_cache(s
, seg_reg
, selector
, get_seg_base(e1
, e2
),
856 get_seg_limit(e1
, e2
), e1
, e2
);
860 /* return non zero if error */
861 static inline int load_segment(struct kqemu_state
*s
,
862 uint32_t *e1_ptr
, uint32_t *e2_ptr
,
865 struct kqemu_cpu_state
*env
= &s
->cpu_state
;
866 struct kqemu_segment_cache
*dt
;
874 index
= selector
& ~7;
875 if ((index
+ 7) > dt
->limit
)
877 ptr
= dt
->base
+ index
;
878 *e1_ptr
= ldl_kernel(ptr
);
879 *e2_ptr
= ldl_kernel(ptr
+ 4);
883 static inline void get_ss_esp_from_tss(struct kqemu_state
*s
,
885 uint32_t *esp_ptr
, int dpl
)
887 struct kqemu_cpu_state
*env
= &s
->cpu_state
;
888 int type
, index
, shift
;
890 if (!(env
->tr
.flags
& DESC_P_MASK
))
891 cpu_abort(env
, "invalid tss");
893 type
= (env
->tr
.flags
>> DESC_TYPE_SHIFT
) & 0xf;
896 cpu_abort(env
, "invalid tss type");
899 index
= (dpl
* 4 + 2) << shift
;
900 if (index
+ (4 << shift
) - 1 > env
->tr
.limit
)
901 raise_exception_err(s
, EXCP0A_TSS
, env
->tr
.selector
& 0xfffc);
903 *esp_ptr
= lduw_kernel(env
->tr
.base
+ index
);
904 *ss_ptr
= lduw_kernel(env
->tr
.base
+ index
+ 2);
906 *esp_ptr
= ldl_kernel(env
->tr
.base
+ index
);
907 *ss_ptr
= lduw_kernel(env
->tr
.base
+ index
+ 4);
911 /* protected mode interrupt */
912 static void do_interrupt_protected(struct kqemu_state
*s
,
913 int intno
, int is_int
, int error_code
,
914 unsigned int next_eip
, int is_hw
)
916 struct kqemu_cpu_state
*env
= &s
->cpu_state
;
917 struct kqemu_segment_cache
*dt
;
918 unsigned long ptr
, ssp
;
919 int type
, dpl
, selector
, ss_dpl
, cpl
, sp_mask
;
920 int has_error_code
, new_stack
, shift
;
921 uint32_t e1
, e2
, offset
, ss
, esp
, ss_e1
, ss_e2
;
925 if (!is_int
&& !is_hw
) {
944 if (intno
* 8 + 7 > dt
->limit
)
945 raise_exception_err(s
, EXCP0D_GPF
, intno
* 8 + 2);
946 ptr
= dt
->base
+ intno
* 8;
947 e1
= ldl_kernel(ptr
);
948 e2
= ldl_kernel(ptr
+ 4);
949 /* check gate type */
950 type
= (e2
>> DESC_TYPE_SHIFT
) & 0x1f;
952 case 5: /* task gate */
953 raise_exception(s
, KQEMU_RET_SOFTMMU
);
955 case 6: /* 286 interrupt gate */
956 case 7: /* 286 trap gate */
957 case 14: /* 386 interrupt gate */
958 case 15: /* 386 trap gate */
961 raise_exception_err(s
, EXCP0D_GPF
, intno
* 8 + 2);
964 dpl
= (e2
>> DESC_DPL_SHIFT
) & 3;
966 /* check privledge if software int */
967 if (is_int
&& dpl
< cpl
)
968 raise_exception_err(s
, EXCP0D_GPF
, intno
* 8 + 2);
969 /* check valid bit */
970 if (!(e2
& DESC_P_MASK
))
971 raise_exception_err(s
, EXCP0B_NOSEG
, intno
* 8 + 2);
973 offset
= (e2
& 0xffff0000) | (e1
& 0x0000ffff);
974 if ((selector
& 0xfffc) == 0)
975 raise_exception_err(s
, EXCP0D_GPF
, 0);
977 if (load_segment(s
, &e1
, &e2
, selector
) != 0)
978 raise_exception_err(s
, EXCP0D_GPF
, selector
& 0xfffc);
979 if (!(e2
& DESC_S_MASK
) || !(e2
& (DESC_CS_MASK
)))
980 raise_exception_err(s
, EXCP0D_GPF
, selector
& 0xfffc);
981 dpl
= (e2
>> DESC_DPL_SHIFT
) & 3;
983 raise_exception_err(s
, EXCP0D_GPF
, selector
& 0xfffc);
984 if (!(e2
& DESC_P_MASK
))
985 raise_exception_err(s
, EXCP0B_NOSEG
, selector
& 0xfffc);
986 if (!(e2
& DESC_C_MASK
) && dpl
< cpl
) {
987 /* to inner priviledge */
988 get_ss_esp_from_tss(s
, &ss
, &esp
, dpl
);
989 if ((ss
& 0xfffc) == 0)
990 raise_exception_err(s
, EXCP0A_TSS
, ss
& 0xfffc);
992 raise_exception_err(s
, EXCP0A_TSS
, ss
& 0xfffc);
993 if (load_segment(s
, &ss_e1
, &ss_e2
, ss
) != 0)
994 raise_exception_err(s
, EXCP0A_TSS
, ss
& 0xfffc);
995 ss_dpl
= (ss_e2
>> DESC_DPL_SHIFT
) & 3;
997 raise_exception_err(s
, EXCP0A_TSS
, ss
& 0xfffc);
998 if (!(ss_e2
& DESC_S_MASK
) ||
999 (ss_e2
& DESC_CS_MASK
) ||
1000 !(ss_e2
& DESC_W_MASK
))
1001 raise_exception_err(s
, EXCP0A_TSS
, ss
& 0xfffc);
1002 if (!(ss_e2
& DESC_P_MASK
))
1003 raise_exception_err(s
, EXCP0A_TSS
, ss
& 0xfffc);
1005 sp_mask
= get_sp_mask(ss_e2
);
1006 ssp
= get_seg_base(ss_e1
, ss_e2
);
1007 } else if ((e2
& DESC_C_MASK
) || dpl
== cpl
) {
1008 /* to same priviledge */
1009 if (get_eflags_vm(s
))
1010 raise_exception_err(s
, EXCP0D_GPF
, selector
& 0xfffc);
1012 sp_mask
= get_sp_mask(env
->segs
[R_SS
].flags
);
1013 ssp
= env
->segs
[R_SS
].base
;
1016 ss_e1
= ss_e2
= ss
= 0; /* avoid warning */
1018 raise_exception_err(s
, EXCP0D_GPF
, selector
& 0xfffc);
1019 new_stack
= 0; /* avoid warning */
1020 sp_mask
= 0; /* avoid warning */
1021 ssp
= 0; /* avoid warning */
1022 esp
= 0; /* avoid warning */
1028 /* XXX: check that enough room is available */
1029 push_size
= 6 + (new_stack
<< 2) + (has_error_code
<< 1);
1030 if (env
->eflags
& VM_MASK
)
1032 push_size
<<= shift
;
1036 if (get_eflags_vm(s
)) {
1037 PUSHL(ssp
, esp
, sp_mask
, get_seg_sel(s
, R_GS
));
1038 PUSHL(ssp
, esp
, sp_mask
, get_seg_sel(s
, R_FS
));
1039 PUSHL(ssp
, esp
, sp_mask
, get_seg_sel(s
, R_DS
));
1040 PUSHL(ssp
, esp
, sp_mask
, get_seg_sel(s
, R_ES
));
1042 PUSHL(ssp
, esp
, sp_mask
, get_seg_sel(s
, R_SS
));
1043 PUSHL(ssp
, esp
, sp_mask
, ESP
);
1045 PUSHL(ssp
, esp
, sp_mask
, compute_eflags(s
));
1046 PUSHL(ssp
, esp
, sp_mask
, get_seg_sel(s
, R_CS
));
1047 PUSHL(ssp
, esp
, sp_mask
, old_eip
);
1048 if (has_error_code
) {
1049 PUSHL(ssp
, esp
, sp_mask
, error_code
);
1053 if (get_eflags_vm(s
)) {
1054 PUSHW(ssp
, esp
, sp_mask
, get_seg_sel(s
, R_GS
));
1055 PUSHW(ssp
, esp
, sp_mask
, get_seg_sel(s
, R_FS
));
1056 PUSHW(ssp
, esp
, sp_mask
, get_seg_sel(s
, R_DS
));
1057 PUSHW(ssp
, esp
, sp_mask
, get_seg_sel(s
, R_ES
));
1059 PUSHW(ssp
, esp
, sp_mask
, get_seg_sel(s
, R_SS
));
1060 PUSHW(ssp
, esp
, sp_mask
, ESP
);
1062 PUSHW(ssp
, esp
, sp_mask
, compute_eflags(s
));
1063 PUSHW(ssp
, esp
, sp_mask
, get_seg_sel(s
, R_CS
));
1064 PUSHW(ssp
, esp
, sp_mask
, old_eip
);
1065 if (has_error_code
) {
1066 PUSHW(ssp
, esp
, sp_mask
, error_code
);
1070 cpu_x86_set_cpl(s
, dpl
);
1072 if (get_eflags_vm(s
)) {
1073 cpu_x86_load_seg_cache(s
, R_ES
, 0, 0, 0, 0, 0);
1074 cpu_x86_load_seg_cache(s
, R_DS
, 0, 0, 0, 0, 0);
1075 cpu_x86_load_seg_cache(s
, R_FS
, 0, 0, 0, 0, 0);
1076 cpu_x86_load_seg_cache(s
, R_GS
, 0, 0, 0, 0, 0);
1078 ss
= (ss
& ~3) | dpl
;
1079 cpu_x86_load_seg_cache(s
, R_SS
, ss
,
1080 ssp
, get_seg_limit(ss_e1
, ss_e2
), ss_e1
, ss_e2
);
1082 ESP
= (ESP
& ~sp_mask
) | (esp
& sp_mask
);
1084 selector
= (selector
& ~3) | dpl
;
1085 cpu_x86_load_seg_cache(s
, R_CS
, selector
,
1086 get_seg_base(e1
, e2
),
1087 get_seg_limit(e1
, e2
),
1091 /* interrupt gate clear IF mask */
1092 if ((type
& 1) == 0) {
1093 set_reset_eflags(s
, 0, IF_MASK
);
1095 set_reset_eflags(s
, 0, VM_MASK
| RF_MASK
| TF_MASK
| NT_MASK
);
1100 static inline unsigned long get_rsp_from_tss(struct kqemu_state
*s
, int level
)
1102 struct kqemu_cpu_state
*env
= &s
->cpu_state
;
1106 printf("TR: base=" TARGET_FMT_lx
" limit=%x\n",
1107 env
->tr
.base
, env
->tr
.limit
);
1111 if (!(env
->tr
.flags
& DESC_P_MASK
))
1112 cpu_abort(env
, "invalid tss");
1114 index
= 8 * level
+ 4;
1115 if ((index
+ 7) > env
->tr
.limit
)
1116 raise_exception_err(s
, EXCP0A_TSS
, env
->tr
.selector
& 0xfffc);
1117 return ldq_kernel(env
->tr
.base
+ index
);
1120 /* 64 bit interrupt */
1121 static void do_interrupt64(struct kqemu_state
*s
,
1122 int intno
, int is_int
, int error_code
,
1123 unsigned long next_eip
, int is_hw
)
1125 struct kqemu_cpu_state
*env
= &s
->cpu_state
;
1126 struct kqemu_segment_cache
*dt
;
1128 int type
, dpl
, selector
, cpl
, ist
;
1129 int has_error_code
, new_stack
;
1130 uint32_t e1
, e2
, e3
, ss
;
1131 unsigned long old_eip
, esp
, offset
;
1134 if (!is_int
&& !is_hw
) {
1153 if (intno
* 16 + 15 > dt
->limit
)
1154 raise_exception_err(s
, EXCP0D_GPF
, intno
* 16 + 2);
1155 ptr
= dt
->base
+ intno
* 16;
1156 e1
= ldl_kernel(ptr
);
1157 e2
= ldl_kernel(ptr
+ 4);
1158 e3
= ldl_kernel(ptr
+ 8);
1159 /* check gate type */
1160 type
= (e2
>> DESC_TYPE_SHIFT
) & 0x1f;
1162 case 14: /* 386 interrupt gate */
1163 case 15: /* 386 trap gate */
1166 raise_exception_err(s
, EXCP0D_GPF
, intno
* 16 + 2);
1169 dpl
= (e2
>> DESC_DPL_SHIFT
) & 3;
1171 /* check privledge if software int */
1172 if (is_int
&& dpl
< cpl
)
1173 raise_exception_err(s
, EXCP0D_GPF
, intno
* 16 + 2);
1174 /* check valid bit */
1175 if (!(e2
& DESC_P_MASK
))
1176 raise_exception_err(s
, EXCP0B_NOSEG
, intno
* 16 + 2);
1177 selector
= e1
>> 16;
1178 offset
= ((unsigned long)e3
<< 32) | (e2
& 0xffff0000) | (e1
& 0x0000ffff);
1180 if ((selector
& 0xfffc) == 0)
1181 raise_exception_err(s
, EXCP0D_GPF
, 0);
1183 if (load_segment(s
, &e1
, &e2
, selector
) != 0)
1184 raise_exception_err(s
, EXCP0D_GPF
, selector
& 0xfffc);
1185 if (!(e2
& DESC_S_MASK
) || !(e2
& (DESC_CS_MASK
)))
1186 raise_exception_err(s
, EXCP0D_GPF
, selector
& 0xfffc);
1187 dpl
= (e2
>> DESC_DPL_SHIFT
) & 3;
1189 raise_exception_err(s
, EXCP0D_GPF
, selector
& 0xfffc);
1190 if (!(e2
& DESC_P_MASK
))
1191 raise_exception_err(s
, EXCP0B_NOSEG
, selector
& 0xfffc);
1192 if (!(e2
& DESC_L_MASK
) || (e2
& DESC_B_MASK
))
1193 raise_exception_err(s
, EXCP0D_GPF
, selector
& 0xfffc);
1194 if ((!(e2
& DESC_C_MASK
) && dpl
< cpl
) || ist
!= 0) {
1195 /* to inner priviledge */
1197 esp
= get_rsp_from_tss(s
, ist
+ 3);
1199 esp
= get_rsp_from_tss(s
, dpl
);
1200 esp
&= ~0xfLL
; /* align stack */
1203 } else if ((e2
& DESC_C_MASK
) || dpl
== cpl
) {
1204 /* to same priviledge */
1205 if (env
->eflags
& VM_MASK
)
1206 raise_exception_err(s
, EXCP0D_GPF
, selector
& 0xfffc);
1209 esp
= get_rsp_from_tss(s
, ist
+ 3);
1212 esp
&= ~0xfLL
; /* align stack */
1215 raise_exception_err(s
, EXCP0D_GPF
, selector
& 0xfffc);
1216 new_stack
= 0; /* avoid warning */
1217 esp
= 0; /* avoid warning */
1220 PUSHQ(esp
, get_seg_sel(s
, R_SS
));
1222 PUSHQ(esp
, compute_eflags(s
));
1223 PUSHQ(esp
, get_seg_sel(s
, R_CS
));
1224 PUSHQ(esp
, old_eip
);
1225 if (has_error_code
) {
1226 PUSHQ(esp
, error_code
);
1229 cpu_x86_set_cpl(s
, dpl
);
1232 cpu_x86_load_seg_cache(s
, R_SS
, ss
, 0, 0, 0, 0);
1236 selector
= (selector
& ~3) | dpl
;
1237 cpu_x86_load_seg_cache(s
, R_CS
, selector
,
1238 get_seg_base(e1
, e2
),
1239 get_seg_limit(e1
, e2
),
1243 /* interrupt gate clear IF mask */
1244 if ((type
& 1) == 0) {
1245 set_reset_eflags(s
, 0, IF_MASK
);
1247 set_reset_eflags(s
, 0, VM_MASK
| RF_MASK
| TF_MASK
| NT_MASK
);
1251 static void do_interrupt(struct kqemu_state
*s
,
1252 int intno
, int is_int
, int error_code
,
1253 unsigned long next_eip
, int is_hw
)
1256 if (s
->cpu_state
.efer
& MSR_EFER_LMA
) {
1257 do_interrupt64(s
, intno
, is_int
, error_code
, next_eip
, is_hw
);
1261 do_interrupt_protected(s
, intno
, is_int
, error_code
, next_eip
, is_hw
);
1265 static inline void validate_seg(struct kqemu_state
*s
, int seg_reg
, int cpl
)
1270 e2
= s
->cpu_state
.segs
[seg_reg
].flags
;
1271 dpl
= (e2
>> DESC_DPL_SHIFT
) & 3;
1272 if (!(e2
& DESC_CS_MASK
) || !(e2
& DESC_C_MASK
)) {
1273 /* data or non conforming code segment */
1275 cpu_x86_load_seg_cache(s
, seg_reg
, 0, 0, 0, 0, 0);
1280 /* protected mode iret */
1281 static inline void helper_ret_protected(struct kqemu_state
*s
,
1282 int shift
, int is_iret
, int addend
)
1284 struct kqemu_cpu_state
*env
= &s
->cpu_state
;
1285 uint32_t new_cs
, new_eflags
, new_ss
;
1286 uint32_t e1
, e2
, ss_e1
, ss_e2
;
1287 int cpl
, dpl
, rpl
, eflags_mask
, iopl
;
1288 unsigned long ssp
, sp
, new_eip
, new_esp
, sp_mask
;
1295 sp_mask
= get_sp_mask(env
->segs
[R_SS
].flags
);
1297 /* XXX: ssp is zero in 64 bit ? */
1298 ssp
= env
->segs
[R_SS
].base
;
1299 new_eflags
= 0; /* avoid warning */
1306 POPQ(sp
, new_eflags
);
1312 POPL(ssp
, sp
, sp_mask
, new_eip
);
1313 POPL(ssp
, sp
, sp_mask
, new_cs
);
1316 POPL(ssp
, sp
, sp_mask
, new_eflags
);
1317 if (new_eflags
& VM_MASK
)
1318 goto return_to_vm86
;
1322 POPW(ssp
, sp
, sp_mask
, new_eip
);
1323 POPW(ssp
, sp
, sp_mask
, new_cs
);
1325 POPW(ssp
, sp
, sp_mask
, new_eflags
);
1328 monitor_log(s
, "lret new %04x:" FMT_lx
" s=%d addend=0x%x\n",
1329 new_cs
, new_eip
, shift
, addend
);
1331 if ((new_cs
& 0xfffc) == 0)
1332 raise_exception_err(s
, EXCP0D_GPF
, new_cs
& 0xfffc);
1333 if (load_segment(s
, &e1
, &e2
, new_cs
) != 0)
1334 raise_exception_err(s
, EXCP0D_GPF
, new_cs
& 0xfffc);
1335 if (!(e2
& DESC_S_MASK
) ||
1336 !(e2
& DESC_CS_MASK
))
1337 raise_exception_err(s
, EXCP0D_GPF
, new_cs
& 0xfffc);
1341 raise_exception_err(s
, EXCP0D_GPF
, new_cs
& 0xfffc);
1342 dpl
= (e2
>> DESC_DPL_SHIFT
) & 3;
1343 if (e2
& DESC_C_MASK
) {
1345 raise_exception_err(s
, EXCP0D_GPF
, new_cs
& 0xfffc);
1348 raise_exception_err(s
, EXCP0D_GPF
, new_cs
& 0xfffc);
1350 if (!(e2
& DESC_P_MASK
))
1351 raise_exception_err(s
, EXCP0B_NOSEG
, new_cs
& 0xfffc);
1354 if (rpl
== cpl
&& (!CODE64(s
) ||
1355 (CODE64(s
) && !is_iret
))) {
1356 /* return to same priledge level */
1357 cpu_x86_load_seg_cache(s
, R_CS
, new_cs
,
1358 get_seg_base(e1
, e2
),
1359 get_seg_limit(e1
, e2
),
1362 /* return to different priviledge level */
1372 POPL(ssp
, sp
, sp_mask
, new_esp
);
1373 POPL(ssp
, sp
, sp_mask
, new_ss
);
1377 POPW(ssp
, sp
, sp_mask
, new_esp
);
1378 POPW(ssp
, sp
, sp_mask
, new_ss
);
1381 if (loglevel
& CPU_LOG_PCALL
) {
1382 fprintf(logfile
, "new ss:esp=%04x:" TARGET_FMT_lx
"\n",
1386 if ((new_ss
& 0xfffc) == 0) {
1388 /* NULL ss is allowed in long mode if cpl != 3*/
1389 if ((env
->efer
& MSR_EFER_LMA
) && rpl
!= 3) {
1390 cpu_x86_set_cpl(s
, rpl
);
1391 cpu_x86_load_seg_cache(s
, R_SS
, new_ss
,
1394 DESC_G_MASK
| DESC_B_MASK
| DESC_P_MASK
|
1395 DESC_S_MASK
| (rpl
<< DESC_DPL_SHIFT
) |
1396 DESC_W_MASK
| DESC_A_MASK
| 0x000f0000);
1397 ss_e2
= DESC_B_MASK
; /* XXX: should not be needed ? */
1401 raise_exception_err(s
, EXCP0D_GPF
, 0);
1404 if ((new_ss
& 3) != rpl
)
1405 raise_exception_err(s
, EXCP0D_GPF
, new_ss
& 0xfffc);
1406 if (load_segment(s
, &ss_e1
, &ss_e2
, new_ss
) != 0)
1407 raise_exception_err(s
, EXCP0D_GPF
, new_ss
& 0xfffc);
1408 if (!(ss_e2
& DESC_S_MASK
) ||
1409 (ss_e2
& DESC_CS_MASK
) ||
1410 !(ss_e2
& DESC_W_MASK
))
1411 raise_exception_err(s
, EXCP0D_GPF
, new_ss
& 0xfffc);
1412 dpl
= (ss_e2
>> DESC_DPL_SHIFT
) & 3;
1414 raise_exception_err(s
, EXCP0D_GPF
, new_ss
& 0xfffc);
1415 if (!(ss_e2
& DESC_P_MASK
))
1416 raise_exception_err(s
, EXCP0B_NOSEG
, new_ss
& 0xfffc);
1417 cpu_x86_set_cpl(s
, rpl
);
1418 cpu_x86_load_seg_cache(s
, R_SS
, new_ss
,
1419 get_seg_base(ss_e1
, ss_e2
),
1420 get_seg_limit(ss_e1
, ss_e2
),
1424 cpu_x86_load_seg_cache(s
, R_CS
, new_cs
,
1425 get_seg_base(e1
, e2
),
1426 get_seg_limit(e1
, e2
),
1434 sp_mask
= get_sp_mask(ss_e2
);
1436 /* validate data segments */
1437 validate_seg(s
, R_ES
, cpl
);
1438 validate_seg(s
, R_DS
, cpl
);
1439 validate_seg(s
, R_FS
, cpl
);
1440 validate_seg(s
, R_GS
, cpl
);
1444 ESP
= (ESP
& ~sp_mask
) | (sp
& sp_mask
);
1447 /* NOTE: 'cpl' is the _old_ CPL */
1448 eflags_mask
= TF_MASK
| AC_MASK
| ID_MASK
| RF_MASK
| NT_MASK
;
1450 eflags_mask
|= IOPL_MASK
;
1451 iopl
= get_eflags_iopl(s
);
1453 eflags_mask
|= IF_MASK
;
1455 eflags_mask
&= 0xffff;
1456 load_eflags(s
, new_eflags
, eflags_mask
);
1461 raise_exception(s
, KQEMU_RET_SOFTMMU
);
1464 void helper_iret_protected(struct kqemu_state
*s
, int shift
)
1466 /* specific case for TSS */
1467 if (get_eflags_nt(s
)) {
1469 if (s
->cpu_state
.efer
& MSR_EFER_LMA
)
1470 raise_exception_err(s
, EXCP0D_GPF
, 0);
1472 raise_exception(s
, KQEMU_RET_SOFTMMU
);
1474 helper_ret_protected(s
, shift
, 1, 0);
1478 void helper_lret_protected(struct kqemu_state
*s
, int shift
, int addend
)
1480 helper_ret_protected(s
, shift
, 0, addend
);
1483 void do_int(struct kqemu_state
*s
, int intno
)
1485 unsigned long next_eip
;
1487 if (s
->cpu_state
.user_only
) {
1488 s
->cpu_state
.next_eip
= next_eip
;
1489 raise_exception(s
, KQEMU_RET_INT
+ intno
);
1491 do_interrupt(s
, intno
, 1, 0, next_eip
, 0);
1495 static void helper_syscall(struct kqemu_state
*s
)
1497 struct kqemu_cpu_state
*env
= &s
->cpu_state
;
1500 if (!(env
->efer
& MSR_EFER_SCE
)) {
1501 raise_exception_err(s
, EXCP06_ILLOP
, 0);
1503 if (env
->user_only
) {
1505 raise_exception(s
, KQEMU_RET_SYSCALL
);
1508 selector
= (env
->star
>> 32) & 0xffff;
1510 if (env
->efer
& MSR_EFER_LMA
) {
1514 s
->regs1
.r11
= compute_eflags(s
);
1518 cpu_x86_set_cpl(s
, 0);
1519 cpu_x86_load_seg_cache(s
, R_CS
, selector
& 0xfffc,
1521 DESC_G_MASK
| DESC_P_MASK
|
1523 DESC_CS_MASK
| DESC_R_MASK
| DESC_A_MASK
| DESC_L_MASK
);
1524 cpu_x86_load_seg_cache(s
, R_SS
, (selector
+ 8) & 0xfffc,
1526 DESC_G_MASK
| DESC_B_MASK
| DESC_P_MASK
|
1528 DESC_W_MASK
| DESC_A_MASK
);
1529 set_reset_eflags(s
, 0, env
->fmask
);
1537 s
->regs1
.ecx
= (uint32_t)(pc
);
1539 cpu_x86_set_cpl(s
, 0);
1540 cpu_x86_load_seg_cache(s
, R_CS
, selector
& 0xfffc,
1542 DESC_G_MASK
| DESC_B_MASK
| DESC_P_MASK
|
1544 DESC_CS_MASK
| DESC_R_MASK
| DESC_A_MASK
);
1545 cpu_x86_load_seg_cache(s
, R_SS
, (selector
+ 8) & 0xfffc,
1547 DESC_G_MASK
| DESC_B_MASK
| DESC_P_MASK
|
1549 DESC_W_MASK
| DESC_A_MASK
);
1550 set_reset_eflags(s
, 0, IF_MASK
| RF_MASK
| VM_MASK
);
1551 EIP
= (uint32_t)env
->star
;
1555 static void helper_sysret(struct kqemu_state
*s
)
1557 struct kqemu_cpu_state
*env
= &s
->cpu_state
;
1560 if (!(env
->efer
& MSR_EFER_SCE
)) {
1561 raise_exception_err(s
, EXCP06_ILLOP
, 0);
1563 if (!(env
->cr0
& CR0_PE_MASK
) || env
->cpl
!= 0) {
1564 raise_exception_err(s
, EXCP0D_GPF
, 0);
1566 selector
= (env
->star
>> 48) & 0xffff;
1568 if (env
->efer
& MSR_EFER_LMA
) {
1569 cpu_x86_set_cpl(s
, 3);
1570 if (s
->dflag
== 2) {
1571 cpu_x86_load_seg_cache(s
, R_CS
, (selector
+ 16) | 3,
1573 DESC_G_MASK
| DESC_P_MASK
|
1574 DESC_S_MASK
| (3 << DESC_DPL_SHIFT
) |
1575 DESC_CS_MASK
| DESC_R_MASK
| DESC_A_MASK
|
1579 cpu_x86_load_seg_cache(s
, R_CS
, selector
| 3,
1581 DESC_G_MASK
| DESC_B_MASK
| DESC_P_MASK
|
1582 DESC_S_MASK
| (3 << DESC_DPL_SHIFT
) |
1583 DESC_CS_MASK
| DESC_R_MASK
| DESC_A_MASK
);
1584 EIP
= (uint32_t)s
->regs1
.ecx
;
1586 cpu_x86_load_seg_cache(s
, R_SS
, selector
+ 8,
1588 DESC_G_MASK
| DESC_B_MASK
| DESC_P_MASK
|
1589 DESC_S_MASK
| (3 << DESC_DPL_SHIFT
) |
1590 DESC_W_MASK
| DESC_A_MASK
);
1591 load_eflags(s
, (uint32_t)(s
->regs1
.r11
), TF_MASK
| AC_MASK
| ID_MASK
|
1592 IF_MASK
| IOPL_MASK
| VM_MASK
| RF_MASK
| NT_MASK
);
1596 cpu_x86_set_cpl(s
, 3);
1597 cpu_x86_load_seg_cache(s
, R_CS
, selector
| 3,
1599 DESC_G_MASK
| DESC_B_MASK
| DESC_P_MASK
|
1600 DESC_S_MASK
| (3 << DESC_DPL_SHIFT
) |
1601 DESC_CS_MASK
| DESC_R_MASK
| DESC_A_MASK
);
1602 EIP
= (uint32_t)s
->regs1
.ecx
;
1603 cpu_x86_load_seg_cache(s
, R_SS
, selector
+ 8,
1605 DESC_G_MASK
| DESC_B_MASK
| DESC_P_MASK
|
1606 DESC_S_MASK
| (3 << DESC_DPL_SHIFT
) |
1607 DESC_W_MASK
| DESC_A_MASK
);
1608 set_reset_eflags(s
, IF_MASK
, 0);
1612 static void helper_sysenter(struct kqemu_state
*s
)
1614 struct kqemu_cpu_state
*env
= &s
->cpu_state
;
1617 raise_exception(s
, KQEMU_RET_SOFTMMU
);
1618 if (env
->sysenter_cs
== 0) {
1619 raise_exception_err(s
, EXCP0D_GPF
, 0);
1621 set_reset_eflags(s
, 0, VM_MASK
| IF_MASK
| RF_MASK
);
1622 cpu_x86_set_cpl(s
, 0);
1623 cpu_x86_load_seg_cache(s
, R_CS
, env
->sysenter_cs
& 0xfffc,
1626 DESC_G_MASK
| DESC_B_MASK
| DESC_P_MASK
|
1628 DESC_CS_MASK
| DESC_R_MASK
| DESC_A_MASK
);
1629 cpu_x86_load_seg_cache(s
, R_SS
, (env
->sysenter_cs
+ 8) & 0xfffc,
1632 DESC_G_MASK
| DESC_B_MASK
| DESC_P_MASK
|
1634 DESC_W_MASK
| DESC_A_MASK
);
1635 ESP
= env
->sysenter_esp
;
1636 EIP
= env
->sysenter_eip
;
1639 static void helper_sysexit(struct kqemu_state
*s
)
1641 struct kqemu_cpu_state
*env
= &s
->cpu_state
;
1645 if (env
->sysenter_cs
== 0 || cpl
!= 0) {
1646 raise_exception_err(s
, EXCP0D_GPF
, 0);
1648 cpu_x86_set_cpl(s
, 3);
1649 cpu_x86_load_seg_cache(s
, R_CS
, ((env
->sysenter_cs
+ 16) & 0xfffc) | 3,
1652 DESC_G_MASK
| DESC_B_MASK
| DESC_P_MASK
|
1653 DESC_S_MASK
| (3 << DESC_DPL_SHIFT
) |
1654 DESC_CS_MASK
| DESC_R_MASK
| DESC_A_MASK
);
1655 cpu_x86_load_seg_cache(s
, R_SS
, ((env
->sysenter_cs
+ 24) & 0xfffc) | 3,
1658 DESC_G_MASK
| DESC_B_MASK
| DESC_P_MASK
|
1659 DESC_S_MASK
| (3 << DESC_DPL_SHIFT
) |
1660 DESC_W_MASK
| DESC_A_MASK
);
1665 static inline void load_seg_cache_raw_dt(struct kqemu_segment_cache
*sc
,
1666 uint32_t e1
, uint32_t e2
)
1668 sc
->base
= get_seg_base(e1
, e2
);
1669 sc
->limit
= get_seg_limit(e1
, e2
);
1673 void helper_lldt(struct kqemu_state
*s
, int selector
)
1675 struct kqemu_cpu_state
*env
= &s
->cpu_state
;
1676 struct kqemu_segment_cache
*dt
;
1678 int index
, entry_limit
;
1681 if ((selector
& 0xfffc) == 0) {
1682 /* XXX: NULL selector case: invalid LDT */
1687 raise_exception_err(s
, EXCP0D_GPF
, selector
& 0xfffc);
1689 index
= selector
& ~7;
1691 if (env
->efer
& MSR_EFER_LMA
)
1696 if ((index
+ entry_limit
) > dt
->limit
)
1697 raise_exception_err(s
, EXCP0D_GPF
, selector
& 0xfffc);
1698 ptr
= dt
->base
+ index
;
1699 e1
= ldl_kernel(ptr
);
1700 e2
= ldl_kernel(ptr
+ 4);
1701 if ((e2
& DESC_S_MASK
) || ((e2
>> DESC_TYPE_SHIFT
) & 0xf) != 2)
1702 raise_exception_err(s
, EXCP0D_GPF
, selector
& 0xfffc);
1703 if (!(e2
& DESC_P_MASK
))
1704 raise_exception_err(s
, EXCP0B_NOSEG
, selector
& 0xfffc);
1706 if (env
->efer
& MSR_EFER_LMA
) {
1708 e3
= ldl_kernel(ptr
+ 8);
1709 load_seg_cache_raw_dt(&env
->ldt
, e1
, e2
);
1710 env
->ldt
.base
|= (unsigned long)e3
<< 32;
1714 load_seg_cache_raw_dt(&env
->ldt
, e1
, e2
);
1717 env
->ldt
.selector
= selector
;
1720 static void helper_wrmsr(struct kqemu_state
*s
)
1723 struct kqemu_cpu_state
*env
= &s
->cpu_state
;
1727 val
= ((uint32_t)s
->regs1
.eax
) |
1728 ((uint64_t)((uint32_t)s
->regs1
.edx
) << 32);
1730 switch((uint32_t)s
->regs1
.ecx
) {
1733 env
->segs
[R_FS
].base
= val
;
1734 wrmsrl(MSR_FSBASE
, val
);
1737 env
->segs
[R_GS
].base
= val
;
1738 wrmsrl(MSR_GSBASE
, val
);
1740 case MSR_KERNELGSBASE
:
1741 env
->kernelgsbase
= val
;
1745 raise_exception(s
, KQEMU_RET_SOFTMMU
);
1749 static void helper_rdmsr(struct kqemu_state
*s
)
1751 struct kqemu_cpu_state
*env
= &s
->cpu_state
;
1754 switch((uint32_t)s
->regs1
.ecx
) {
1755 case MSR_IA32_SYSENTER_CS
:
1756 val
= env
->sysenter_cs
;
1758 case MSR_IA32_SYSENTER_ESP
:
1759 val
= env
->sysenter_esp
;
1761 case MSR_IA32_SYSENTER_EIP
:
1762 val
= env
->sysenter_eip
;
1781 val
= env
->segs
[R_FS
].base
;
1784 val
= env
->segs
[R_GS
].base
;
1786 case MSR_KERNELGSBASE
:
1787 val
= env
->kernelgsbase
;
1791 raise_exception(s
, KQEMU_RET_SOFTMMU
);
1793 s
->regs1
.eax
= (uint32_t)(val
);
1794 s
->regs1
.edx
= (uint32_t)(val
>> 32);
1798 static void helper_swapgs(struct kqemu_state
*s
)
1800 struct kqemu_cpu_state
*env
= &s
->cpu_state
;
1802 val
= env
->kernelgsbase
;
1803 env
->kernelgsbase
= env
->segs
[R_GS
].base
;
1804 env
->segs
[R_GS
].base
= val
;
1806 wrmsrl(MSR_GSBASE
, val
);
1810 /* XXX: optimize by reloading just the needed fields ? */
1811 static inline void reload_seg_cache2(struct kqemu_state
*s
, int seg_reg
,
1812 unsigned int selector
)
1814 struct kqemu_segment_cache
*sc
;
1815 uint32_t e1
, e2
, sel
;
1818 sel
= (selector
& ~7) | ((selector
& 4) << 14);
1819 ptr
= (uint8_t *)s
->dt_table
+ ((NB_DT_TABLES
- 1) << 17) + sel
;
1821 e1
= *(uint16_t *)(ptr
+ 2);
1822 e2
= *(uint32_t *)(ptr
+ 4);
1823 sc
= &s
->cpu_state
.segs
[seg_reg
];
1824 /* only useful for SS and CS */
1825 if (seg_reg
== R_CS
|| seg_reg
== R_SS
)
1827 sc
->base
= (e1
| ((e2
& 0xff) << 16) | (e2
& 0xff000000));
1828 /* limit not needed */
1830 e1
= *(uint32_t *)(ptr
);
1831 e2
= *(uint32_t *)(ptr
+ 4);
1832 sc
= &s
->cpu_state
.segs
[seg_reg
];
1834 sc
->base
= get_seg_base(e1
, e2
);
1835 sc
->limit
= get_seg_limit(e1
, e2
);
1840 static inline void reload_seg_cache3(struct kqemu_state
*s
, int seg_reg
,
1841 unsigned int selector
)
1843 struct kqemu_segment_cache
*sc
;
1844 unsigned int sel1
, sel
;
1848 sc
= &s
->cpu_state
.segs
[seg_reg
];
1849 sel1
= selector
| 3;
1851 if (sel1
== s
->regs1
.cs_sel
|| sel1
== s
->regs1
.ss_sel
) {
1852 sel
= (selector
& ~7) | ((selector
& 4) << 14);
1853 ptr
= (uint8_t *)s
->dt_table
+ sel
;
1854 e1
= *(uint32_t *)(ptr
);
1855 e2
= *(uint32_t *)(ptr
+ 4);
1857 e1
= s
->seg_desc_cache
[seg_reg
][0];
1858 e2
= s
->seg_desc_cache
[seg_reg
][1];
1861 sc
->base
= get_seg_base(e1
, e2
);
1862 sc
->limit
= get_seg_limit(e1
, e2
);
1871 void update_seg_cache(struct kqemu_state
*s
)
1875 /* we must reload the segment caches to have all the necessary
1876 values. Another solution could be to reload them on demand */
1878 if (s
->cpu_state
.cpl
!= 3) {
1879 reload_seg_cache3(s
, R_CS
, s
->regs1
.cs_sel
);
1880 reload_seg_cache3(s
, R_SS
, s
->regs1
.ss_sel
);
1882 asm volatile ("mov %%ds, %0" : "=r" (sel
));
1884 sel
= s
->regs1
.ds_sel
;
1886 reload_seg_cache3(s
, R_DS
, sel
);
1888 asm volatile ("mov %%es, %0" : "=r" (sel
));
1890 sel
= s
->regs1
.es_sel
;
1892 reload_seg_cache3(s
, R_ES
, sel
);
1893 asm volatile ("mov %%fs, %0" : "=r" (sel
));
1894 reload_seg_cache3(s
, R_FS
, sel
);
1895 asm volatile ("mov %%gs, %0" : "=r" (sel
));
1896 reload_seg_cache3(s
, R_GS
, sel
);
1898 #endif /* USE_SEG_GP */
1900 reload_seg_cache2(s
, R_CS
, s
->regs1
.cs_sel
);
1901 reload_seg_cache2(s
, R_SS
, s
->regs1
.ss_sel
);
1905 asm volatile ("mov %%ds, %0" : "=r" (sel
));
1906 reload_seg_cache2(s
, R_DS
, sel
);
1907 asm volatile ("mov %%es, %0" : "=r" (sel
));
1908 reload_seg_cache2(s
, R_ES
, sel
);
1911 reload_seg_cache2(s
, R_DS
, s
->regs1
.ds_sel
);
1912 reload_seg_cache2(s
, R_ES
, s
->regs1
.es_sel
);
1914 asm volatile ("mov %%fs, %0" : "=r" (sel
));
1915 reload_seg_cache2(s
, R_FS
, sel
);
1916 asm volatile ("mov %%gs, %0" : "=r" (sel
));
1917 reload_seg_cache2(s
, R_GS
, sel
);
1920 rdmsrl(MSR_FSBASE
, s
->cpu_state
.segs
[R_FS
].base
);
1921 rdmsrl(MSR_GSBASE
, s
->cpu_state
.segs
[R_GS
].base
);
1923 s
->seg_cache_loaded
= 1;
1924 s
->insn_count
= MAX_INSN_COUNT
;
1927 /* handle the exception in the monitor */
1928 void raise_exception_interp(void *opaque
)
1930 struct kqemu_state
*s
= opaque
;
1931 int intno
= s
->arg0
;
1932 #ifdef PROFILE_INTERP2
1936 #ifdef PROFILE_INTERP2
1939 if (!s
->seg_cache_loaded
)
1940 update_seg_cache(s
);
1942 /* the exception handling counts as one instruction so that we can
1943 detect exception loops */
1944 /* XXX: it would be better to detect double or triple faults */
1945 if (unlikely(--s
->insn_count
<= 0))
1946 raise_exception(s
, KQEMU_RET_SOFTMMU
);
1948 do_interrupt(s
, intno
, 0, s
->cpu_state
.error_code
, 0, 0);
1950 if (!get_eflags_if(s
)) {
1953 #ifdef PROFILE_INTERP2
1954 s
->interp_interrupt_count
++;
1955 s
->interp_interrupt_cycles
+= (getclock() - ti
);
1957 goto_user(s
, s
->regs
);
1960 #define MAX_INSN_LEN 15
1962 static inline uint32_t ldub_code(struct kqemu_state
*s
)
1966 val
= ldub_mem_fast(s
, pc
+ s
->cpu_state
.segs
[R_CS
].base
);
1971 static inline uint32_t lduw_code(struct kqemu_state
*s
)
1975 val
= lduw_mem_fast(s
, pc
+ s
->cpu_state
.segs
[R_CS
].base
);
1980 static inline uint32_t ldl_code(struct kqemu_state
*s
)
1984 val
= ldl_mem_fast(s
, pc
+ s
->cpu_state
.segs
[R_CS
].base
);
1989 static inline uint64_t ldq_code(struct kqemu_state
*s
)
1993 val
= ldl_mem_fast(s
, pc
+ s
->cpu_state
.segs
[R_CS
].base
);
1994 val
|= (uint64_t)ldl_mem_fast(s
, pc
+ s
->cpu_state
.segs
[R_CS
].base
+ 4) << 32;
1999 static unsigned long __attribute__((regparm(2))) get_modrm(struct kqemu_state
*s
, int modrm
)
2001 unsigned long disp
, addr
;
2005 int mod
, rm
, code
, override
;
2006 static const void *modrm_table32
[0x88] = {
2007 [0x00] = &&modrm32_00
,
2008 [0x01] = &&modrm32_01
,
2009 [0x02] = &&modrm32_02
,
2010 [0x03] = &&modrm32_03
,
2011 [0x04] = &&modrm32_04
,
2012 [0x05] = &&modrm32_05
,
2013 [0x06] = &&modrm32_06
,
2014 [0x07] = &&modrm32_07
,
2016 [0x40] = &&modrm32_40
,
2017 [0x41] = &&modrm32_41
,
2018 [0x42] = &&modrm32_42
,
2019 [0x43] = &&modrm32_43
,
2020 [0x44] = &&modrm32_44
,
2021 [0x45] = &&modrm32_45
,
2022 [0x46] = &&modrm32_46
,
2023 [0x47] = &&modrm32_47
,
2025 [0x80] = &&modrm32_80
,
2026 [0x81] = &&modrm32_81
,
2027 [0x82] = &&modrm32_82
,
2028 [0x83] = &&modrm32_83
,
2029 [0x84] = &&modrm32_84
,
2030 [0x85] = &&modrm32_85
,
2031 [0x86] = &&modrm32_86
,
2032 [0x87] = &&modrm32_87
,
2035 if (likely(s
->aflag
)) {
2037 goto *modrm_table32
[modrm
& 0xc7];
2039 /* sib, most common case ? */
2040 code
= ldub_code(s
);
2041 addr
= (int8_t)ldub_code(s
);
2043 base
= (code
& 7) | REX_B(s
);
2044 addr
+= get_reg(s
, base
);
2045 index
= ((code
>> 3) & 7) | REX_X(s
);
2047 scale
= (code
>> 6);
2048 addr
+= get_reg(s
, index
) << scale
;
2054 code
= ldub_code(s
);
2057 addr
= (int32_t)ldl_code(s
);
2058 base
= 0; /* force DS override */
2061 addr
= get_reg(s
, base
);
2063 index
= ((code
>> 3) & 7) | REX_X(s
);
2065 scale
= (code
>> 6);
2066 addr
+= get_reg(s
, index
) << scale
;
2072 code
= ldub_code(s
);
2073 addr
= (int32_t)ldl_code(s
);
2077 addr
= (int32_t)ldl_code(s
);
2078 base
= 0; /* force DS override */
2080 addr
+= pc
+ s
->rip_offset
;
2088 base
= (modrm
& 7) | REX_B(s
);
2089 addr
= get_reg(s
, base
);
2099 addr
= (int8_t)ldub_code(s
);
2100 base
= (modrm
& 7) | REX_B(s
);
2101 addr
+= get_reg(s
, base
);
2110 addr
= (int32_t)ldl_code(s
);
2111 base
= (modrm
& 7) | REX_B(s
);
2112 addr
+= get_reg(s
, base
);
2114 if (unlikely(s
->popl_esp_hack
)) {
2116 addr
+= s
->popl_esp_hack
;
2121 mod
= (modrm
>> 6) & 3;
2130 code
= ldub_code(s
);
2131 scale
= (code
>> 6) & 3;
2132 index
= ((code
>> 3) & 7) | REX_X(s
);
2139 if ((base
& 7) == 5) {
2141 disp
= (int32_t)ldl_code(s
);
2142 if (CODE64(s
) && !havesib
) {
2143 disp
+= pc
+ s
->rip_offset
;
2150 disp
= (int8_t)ldub_code(s
);
2154 disp
= (int32_t)ldl_code(s
);
2160 /* for correct popl handling with esp */
2161 if (base
== 4 && s
->popl_esp_hack
)
2162 addr
+= s
->popl_esp_hack
;
2163 addr
+= get_reg(s
, base
);
2165 /* XXX: index == 4 is always invalid */
2166 if (havesib
&& (index
!= 4 || scale
!= 0)) {
2167 addr
+= get_reg(s
, index
) << scale
;
2170 override
= s
->override
;
2172 if (override
== R_FS
|| override
== R_GS
)
2173 addr
+= s
->cpu_state
.segs
[override
].base
;
2175 addr
= (uint32_t)addr
;
2177 if (override
!= -2) {
2179 if (base
== R_EBP
|| base
== R_ESP
)
2184 addr
+= s
->cpu_state
.segs
[override
].base
;
2186 addr
= (uint32_t)addr
;
2189 mod
= (modrm
>> 6) & 3;
2194 disp
= lduw_code(s
);
2196 rm
= 0; /* avoid SS override */
2203 disp
= (int8_t)ldub_code(s
);
2207 disp
= lduw_code(s
);
2212 addr
= s
->regs1
.ebx
+ s
->regs1
.esi
;
2215 addr
= s
->regs1
.ebx
+ s
->regs1
.edi
;
2218 addr
= s
->regs1
.ebp
+ s
->regs1
.esi
;
2221 addr
= s
->regs1
.ebp
+ s
->regs1
.edi
;
2224 addr
= s
->regs1
.esi
;
2227 addr
= s
->regs1
.edi
;
2230 addr
= s
->regs1
.ebp
;
2234 addr
= s
->regs1
.ebx
;
2240 override
= s
->override
;
2241 if (override
!= -2) {
2243 if (rm
== 2 || rm
== 3 || rm
== 6)
2248 addr
+= s
->cpu_state
.segs
[override
].base
;
2252 monitor_log(s
, "get_modrm: addr=%08lx\n", addr
);
2265 static inline int insn_const_size(unsigned int ot
)
2273 static inline int insn_get(struct kqemu_state
*s
, int ot
)
2292 #define EB_ADD (0 * 4)
2293 #define EB_AND (4 * 4)
2294 #define EB_SUB (5 * 4)
2295 #define EB_INC (8 * 4)
2296 #define EB_DEC (9 * 4)
2297 #define EB_ROL (10 * 4)
2298 #define EB_BT (18 * 4)
2299 #define EB_BSF (22 * 4)
2302 #define UPDATE_CODE32()\
2306 flags_initval = 0x00ff0201;\
2308 code32 = (s->cpu_state.segs[R_CS].flags >> DESC_B_SHIFT) & 1;\
2309 flags_initval = code32 | (code32 << 8) | 0x00ff0000;\
2313 #define UPDATE_CODE32()\
2315 code32 = (s->cpu_state.segs[R_CS].flags >> DESC_B_SHIFT) & 1;\
2316 flags_initval = code32 | (code32 << 8) | 0x00ff0000;\
2324 "andl $0x8d5, %%ecx\n"\
2327 "andl $~0x8d5, %%eax\n"\
2328 "orl %%ecx, %%eax\n"\
2336 "andl $0x8d5, %%eax\n"\
2337 "andl $~0x8d5, %%ecx\n"\
2338 "orl %%eax, %%ecx\n"
2340 #define SAVE_CC_LOGIC() SAVE_CC()
2343 #define SAHF ".byte 0x9e"
2344 #define LAHF ".byte 0x9f"
2349 #define SAHF ".byte 0x9e"
2350 #define LAHF ".byte 0x9f"
2357 "movb %%cl, %%ah\n"\
2363 "movb %%ah, %%cl\n"\
2365 "andl $~0x0800, %%ecx\n"\
2368 #define SAVE_CC_LOGIC()\
2370 "movb %%ah, %%cl\n"\
2371 "andl $~0x0800, %%ecx\n"
2373 #endif /* !__x86_64__ */
2375 /* return -1 if unsupported insn */
2376 int insn_interp(struct kqemu_state
*s
)
2379 int modrm
, mod
, op
, code32
, reg
, rm
, iopl
;
2381 unsigned long next_eip
, addr
, saved_pc
, eflags
;
2382 uint32_t flags_initval
;
2389 #define NB_INSN_TABLES 3
2391 #define NB_INSN_TABLES 2
2393 static const void *insn_table
[NB_INSN_TABLES
][512] = {
2395 #define INSN(x) &&insn_ ## x,
2396 #define INSN_S(x) &&insn_ ## x ## w,
2397 #include "insn_table.h"
2402 #define INSN(x) &&insn_ ## x,
2403 #define INSN_S(x) &&insn_ ## x ## l,
2404 #include "insn_table.h"
2410 #define INSN(x) &&insn_ ## x,
2411 #define INSN_S(x) &&insn_ ## x ## q,
2412 #include "insn_table.h"
2419 #define LABEL(x) insn_ ## x: asm volatile(".globl insn_" #x " ; insn_" #x ":\n") ;
2421 saved_pc
= pc
; /* save register variable */
2422 #ifdef PROFILE_INTERP2
2423 s
->total_interp_count
++;
2425 s
->popl_esp_hack
= 0;
2426 s
->rip_offset
= 0; /* for relative ip address */
2432 if (unlikely(get_eflags_if(s
)))
2434 /* XXX: since we run with the IRQs disabled, it is better to
2435 stop executing after a few instructions */
2437 if (unlikely(--s
->insn_count
<= 0))
2438 raise_exception(s
, KQEMU_RET_SOFTMMU
);
2440 #if defined(DEBUG_INTERP)
2441 monitor_log(s
, "%05d: %04x:" FMT_lx
" %04x:" FMT_lx
" eax=" FMT_lx
"\n",
2443 get_seg_sel(s
, R_CS
),
2445 get_seg_sel(s
, R_SS
),
2447 (long)s
->regs1
.eax
);
2450 *(uint64_t *)&s
->dflag
= flags_initval
;
2452 *(uint32_t *)&s
->dflag
= flags_initval
;
2458 /* XXX: more precise test */
2459 if (unlikely((pc
- (unsigned long)&_start
) < MONITOR_MEM_SIZE
))
2460 raise_exception(s
, KQEMU_RET_SOFTMMU
);
2461 b
= ldub_mem_fast(s
, pc
+ s
->cpu_state
.segs
[R_CS
].base
);
2467 goto *insn_table
[s
->dflag
][b
];
2469 /* prefix processing */
2471 s
->prefix
|= PREFIX_REPZ
;
2474 s
->prefix
|= PREFIX_REPNZ
;
2477 s
->prefix
|= PREFIX_LOCK
;
2512 rex_w
= (b
>> 3) & 1;
2513 s
->rex_r
= (b
& 0x4) << 1;
2514 s
->rex_x
= (b
& 0x2) << 2;
2515 s
->rex_b
= (b
& 0x1) << 3;
2516 s
->prefix
|= PREFIX_REX
;
2518 /* we suppose, as in the AMD spec, that it comes after the
2521 /* 0x66 is ignored if rex.w is set */
2528 /**************************/
2529 /* extended op code */
2530 b
= ldub_code(s
) | 0x100;
2533 /**************************/
2536 #define ARITH_OP(op, eflags, val, val2)\
2537 asm volatile(op "\n"\
2546 #define ARITH_OPC(op, eflags, val, val2)\
2547 asm volatile(LOAD_CC() \
2557 #define LOGIC_OP(op, eflags, val, val2)\
2558 asm volatile(op "\n"\
2567 #define ARITH_EXEC(eflags, op, ot, val, val2)\
2571 case 0: ARITH_OP("addb %b4, %b1", eflags, val, val2); break;\
2572 case 1: LOGIC_OP("orb %b4, %b1", eflags, val, val2); break;\
2573 case 2: ARITH_OPC("adcb %b4, %b1", eflags, val, val2); break;\
2574 case 3: ARITH_OPC("sbbb %b4, %b1", eflags, val, val2); break;\
2575 case 4: LOGIC_OP("andb %b4, %b1", eflags, val, val2); break;\
2576 case 5: ARITH_OP("subb %b4, %b1", eflags, val, val2); break;\
2577 case 6: LOGIC_OP("xorb %b4, %b1", eflags, val, val2); break;\
2578 default: ARITH_OP("cmpb %b4, %b1", eflags, val, val2); break;\
2583 case 0: ARITH_OP("addw %w4, %w1", eflags, val, val2); break;\
2584 case 1: LOGIC_OP("orw %w4, %w1", eflags, val, val2); break;\
2585 case 2: ARITH_OPC("adcw %w4, %w1", eflags, val, val2); break;\
2586 case 3: ARITH_OPC("sbbw %w4, %w1", eflags, val, val2); break;\
2587 case 4: LOGIC_OP("andw %w4, %w1", eflags, val, val2); break;\
2588 case 5: ARITH_OP("subw %w4, %w1", eflags, val, val2); break;\
2589 case 6: LOGIC_OP("xorw %w4, %w1", eflags, val, val2); break;\
2590 default: ARITH_OP("cmpw %w4, %w1", eflags, val, val2); break;\
2595 case 0: ARITH_OP("addl %k4, %k1", eflags, val, val2); break;\
2596 case 1: LOGIC_OP("orl %k4, %k1", eflags, val, val2); break;\
2597 case 2: ARITH_OPC("adcl %k4, %k1", eflags, val, val2); break;\
2598 case 3: ARITH_OPC("sbbl %k4, %k1", eflags, val, val2); break;\
2599 case 4: LOGIC_OP("andl %k4, %k1", eflags, val, val2); break;\
2600 case 5: ARITH_OP("subl %k4, %k1", eflags, val, val2); break;\
2601 case 6: LOGIC_OP("xorl %k4, %k1", eflags, val, val2); break;\
2602 default: ARITH_OP("cmpl %k4, %k1", eflags, val, val2); break;\
2607 case 0: ARITH_OP("addq %4, %1", eflags, val, val2); break;\
2608 case 1: LOGIC_OP("orq %4, %1", eflags, val, val2); break;\
2609 case 2: ARITH_OPC("adcq %4, %1", eflags, val, val2); break;\
2610 case 3: ARITH_OPC("sbbq %4, %1", eflags, val, val2); break;\
2611 case 4: LOGIC_OP("andq %4, %1", eflags, val, val2); break;\
2612 case 5: ARITH_OP("subq %4, %1", eflags, val, val2); break;\
2613 case 6: LOGIC_OP("xorq %4, %1", eflags, val, val2); break;\
2614 default: ARITH_OP("cmpq %4, %1", eflags, val, val2); break;\
2619 #define ARITH_Ev_Gv(op, ot) \
2620 { int modrm, reg, mod; unsigned long val, val2, eflags;\
2621 modrm = ldub_code(s);\
2622 reg = ((modrm >> 3) & 7) | REX_R(s);\
2623 mod = (modrm >> 6);\
2624 val2 = get_regS(s, ot, reg);\
2626 addr = get_modrm(s, modrm);\
2627 val = ldS(s, ot, addr);\
2628 eflags = s->regs1.eflags;\
2629 ARITH_EXEC(eflags, op, ot, val, val2);\
2631 stS(s, ot, addr, val);\
2632 s->regs1.eflags = eflags;\
2634 rm = (modrm & 7) | REX_B(s);\
2635 val = get_regS(s, ot, rm);\
2636 ARITH_EXEC(s->regs1.eflags, op, ot, val, val2);\
2638 set_regS(s, ot, rm, val);\
2643 #define ARITH_Gv_Ev(op, ot)\
2644 modrm = ldub_code(s);\
2645 mod = (modrm >> 6);\
2646 reg = ((modrm >> 3) & 7) | REX_R(s);\
2648 addr = get_modrm(s, modrm);\
2649 val2 = ldS(s, ot, addr);\
2651 rm = (modrm & 7) | REX_B(s);\
2652 val2 = get_regS(s, ot, rm);\
2654 val = get_regS(s, ot, reg);\
2655 ARITH_EXEC(s->regs1.eflags, op, ot, val, val2);\
2657 set_regS(s, ot, reg, val);\
2660 #define ARITH_A_Iv(op, ot)\
2662 val2 = (int8_t)ldub_code(s);\
2664 val2 = (int16_t)lduw_code(s);\
2666 val2 = (int32_t)ldl_code(s);\
2667 val = s->regs1.eax;\
2668 ARITH_EXEC(s->regs1.eflags, op, ot, val, val2);\
2670 set_regS(s, ot, R_EAX, val);\
2674 LABEL(00) ARITH_Ev_Gv(0, OT_BYTE
);
2675 LABEL(01w
) ARITH_Ev_Gv(0, OT_WORD
);
2676 LABEL(01l) ARITH_Ev_Gv(0, OT_LONG
);
2677 QO( LABEL(01q
) ARITH_Ev_Gv(0, OT_QUAD
); )
2678 LABEL(02) ARITH_Gv_Ev(0, OT_BYTE
);
2679 LABEL(03w
) ARITH_Gv_Ev(0, OT_WORD
);
2680 LABEL(03l) ARITH_Gv_Ev(0, OT_LONG
);
2681 QO( LABEL(03q
) ARITH_Gv_Ev(0, OT_QUAD
); )
2682 LABEL(04) ARITH_A_Iv(0, OT_BYTE
);
2683 LABEL(05w
) ARITH_A_Iv(0, OT_WORD
);
2684 LABEL(05l) ARITH_A_Iv(0, OT_LONG
);
2685 QO( LABEL(05q
) ARITH_A_Iv(0, OT_QUAD
); )
2687 LABEL(08) ARITH_Ev_Gv(1, OT_BYTE
);
2688 LABEL(09w
) ARITH_Ev_Gv(1, OT_WORD
);
2689 LABEL(09l) ARITH_Ev_Gv(1, OT_LONG
);
2690 QO( LABEL(09q
) ARITH_Ev_Gv(1, OT_QUAD
); )
2691 LABEL(0a
) ARITH_Gv_Ev(1, OT_BYTE
);
2692 LABEL(0bw
) ARITH_Gv_Ev(1, OT_WORD
);
2693 LABEL(0bl
) ARITH_Gv_Ev(1, OT_LONG
);
2694 QO( LABEL(0bq
) ARITH_Gv_Ev(1, OT_QUAD
); )
2695 LABEL(0c
) ARITH_A_Iv(1, OT_BYTE
);
2696 LABEL(0dw
) ARITH_A_Iv(1, OT_WORD
);
2697 LABEL(0dl
) ARITH_A_Iv(1, OT_LONG
);
2698 QO( LABEL(0dq
) ARITH_A_Iv(1, OT_QUAD
); )
2700 LABEL(10) ARITH_Ev_Gv(2, OT_BYTE
);
2701 LABEL(11w
) ARITH_Ev_Gv(2, OT_WORD
);
2702 LABEL(11l) ARITH_Ev_Gv(2, OT_LONG
);
2703 QO( LABEL(11q
) ARITH_Ev_Gv(2, OT_QUAD
); )
2704 LABEL(12) ARITH_Gv_Ev(2, OT_BYTE
);
2705 LABEL(13w
) ARITH_Gv_Ev(2, OT_WORD
);
2706 LABEL(13l) ARITH_Gv_Ev(2, OT_LONG
);
2707 QO( LABEL(13q
) ARITH_Gv_Ev(2, OT_QUAD
); )
2708 LABEL(14) ARITH_A_Iv(2, OT_BYTE
);
2709 LABEL(15w
) ARITH_A_Iv(2, OT_WORD
);
2710 LABEL(15l) ARITH_A_Iv(2, OT_LONG
);
2711 QO( LABEL(15q
) ARITH_A_Iv(2, OT_QUAD
); )
2713 LABEL(18) ARITH_Ev_Gv(3, OT_BYTE
);
2714 LABEL(19w
) ARITH_Ev_Gv(3, OT_WORD
);
2715 LABEL(19l) ARITH_Ev_Gv(3, OT_LONG
);
2716 QO( LABEL(19q
) ARITH_Ev_Gv(3, OT_QUAD
); )
2717 LABEL(1a
) ARITH_Gv_Ev(3, OT_BYTE
);
2718 LABEL(1bw
) ARITH_Gv_Ev(3, OT_WORD
);
2719 LABEL(1bl
) ARITH_Gv_Ev(3, OT_LONG
);
2720 QO( LABEL(1bq
) ARITH_Gv_Ev(3, OT_QUAD
); )
2721 LABEL(1c
) ARITH_A_Iv(3, OT_BYTE
);
2722 LABEL(1dw
) ARITH_A_Iv(3, OT_WORD
);
2723 LABEL(1dl
) ARITH_A_Iv(3, OT_LONG
);
2724 QO( LABEL(1dq
) ARITH_A_Iv(3, OT_QUAD
); )
2726 LABEL(20) ARITH_Ev_Gv(4, OT_BYTE
);
2727 LABEL(21w
) ARITH_Ev_Gv(4, OT_WORD
);
2728 LABEL(21l) ARITH_Ev_Gv(4, OT_LONG
);
2729 QO( LABEL(21q
) ARITH_Ev_Gv(4, OT_QUAD
); )
2730 LABEL(22) ARITH_Gv_Ev(4, OT_BYTE
);
2731 LABEL(23w
) ARITH_Gv_Ev(4, OT_WORD
);
2732 LABEL(23l) ARITH_Gv_Ev(4, OT_LONG
);
2733 QO( LABEL(23q
) ARITH_Gv_Ev(4, OT_QUAD
); )
2734 LABEL(24) ARITH_A_Iv(4, OT_BYTE
);
2735 LABEL(25w
) ARITH_A_Iv(4, OT_WORD
);
2736 LABEL(25l) ARITH_A_Iv(4, OT_LONG
);
2737 QO( LABEL(25q
) ARITH_A_Iv(4, OT_QUAD
); )
2739 LABEL(28) ARITH_Ev_Gv(5, OT_BYTE
);
2740 LABEL(29w
) ARITH_Ev_Gv(5, OT_WORD
);
2741 LABEL(29l) ARITH_Ev_Gv(5, OT_LONG
);
2742 QO( LABEL(29q
) ARITH_Ev_Gv(5, OT_QUAD
); )
2743 LABEL(2a
) ARITH_Gv_Ev(5, OT_BYTE
);
2744 LABEL(2bw
) ARITH_Gv_Ev(5, OT_WORD
);
2745 LABEL(2bl
) ARITH_Gv_Ev(5, OT_LONG
);
2746 QO( LABEL(2bq
) ARITH_Gv_Ev(5, OT_QUAD
); )
2747 LABEL(2c
) ARITH_A_Iv(5, OT_BYTE
);
2748 LABEL(2dw
) ARITH_A_Iv(5, OT_WORD
);
2749 LABEL(2dl
) ARITH_A_Iv(5, OT_LONG
);
2750 QO( LABEL(2dq
) ARITH_A_Iv(5, OT_QUAD
); )
2752 LABEL(30) ARITH_Ev_Gv(6, OT_BYTE
);
2753 LABEL(31w
) ARITH_Ev_Gv(6, OT_WORD
);
2754 LABEL(31l) ARITH_Ev_Gv(6, OT_LONG
);
2755 QO( LABEL(31q
) ARITH_Ev_Gv(6, OT_QUAD
); )
2756 LABEL(32) ARITH_Gv_Ev(6, OT_BYTE
);
2757 LABEL(33w
) ARITH_Gv_Ev(6, OT_WORD
);
2758 LABEL(33l) ARITH_Gv_Ev(6, OT_LONG
);
2759 QO( LABEL(33q
) ARITH_Gv_Ev(6, OT_QUAD
); )
2760 LABEL(34) ARITH_A_Iv(6, OT_BYTE
);
2761 LABEL(35w
) ARITH_A_Iv(6, OT_WORD
);
2762 LABEL(35l) ARITH_A_Iv(6, OT_LONG
);
2763 QO( LABEL(35q
) ARITH_A_Iv(6, OT_QUAD
); )
2765 LABEL(38) ARITH_Ev_Gv(7, OT_BYTE
);
2766 LABEL(39w
) ARITH_Ev_Gv(7, OT_WORD
);
2767 LABEL(39l) ARITH_Ev_Gv(7, OT_LONG
);
2768 QO( LABEL(39q
) ARITH_Ev_Gv(7, OT_QUAD
); )
2769 LABEL(3a
) ARITH_Gv_Ev(7, OT_BYTE
);
2770 LABEL(3bw
) ARITH_Gv_Ev(7, OT_WORD
);
2771 LABEL(3bl
) ARITH_Gv_Ev(7, OT_LONG
);
2772 QO( LABEL(3bq
) ARITH_Gv_Ev(7, OT_QUAD
); )
2773 LABEL(3c
) ARITH_A_Iv(7, OT_BYTE
);
2774 LABEL(3dw
) ARITH_A_Iv(7, OT_WORD
);
2775 LABEL(3dl
) ARITH_A_Iv(7, OT_LONG
);
2776 QO( LABEL(3dq
) ARITH_A_Iv(7, OT_QUAD
); )
2778 #define ARITH_GRP1(b, ot) \
2779 modrm = ldub_code(s);\
2780 mod = (modrm >> 6);\
2781 op = (modrm >> 3) & 7;\
2786 s->rip_offset = insn_const_size(ot);\
2787 addr = get_modrm(s, modrm);\
2789 val = ldS(s, ot, addr);\
2795 val2 = insn_get(s, ot);\
2798 val2 = (int8_t)ldub_code(s);\
2801 eflags = s->regs1.eflags;\
2802 ARITH_EXEC(eflags, op, ot, val, val2);\
2804 stS(s, ot, addr, val);\
2805 s->regs1.eflags = eflags;\
2807 rm = (modrm & 7) | REX_B(s);\
2808 val = get_regS(s, ot, rm);\
2814 val2 = insn_get(s, ot);\
2817 val2 = (int8_t)ldub_code(s);\
2820 ARITH_EXEC(s->regs1.eflags, op, ot, val, val2);\
2822 set_regS(s, ot, rm, val);\
2826 LABEL(80) /* GRP1 */
2828 ARITH_GRP1(0x80, OT_BYTE
);
2829 LABEL(81w
) ARITH_GRP1(0x81, OT_WORD
);
2830 LABEL(81l) ARITH_GRP1(0x81, OT_LONG
);
2831 QO( LABEL(81q
) ARITH_GRP1(0x81, OT_QUAD
); )
2832 LABEL(83w
) ARITH_GRP1(0x83, OT_WORD
);
2833 LABEL(83l) ARITH_GRP1(0x83, OT_LONG
);
2834 QO( LABEL(83q
) ARITH_GRP1(0x83, OT_QUAD
); )
2836 LABEL(84) /* test Ev, Gv */
2841 ot
= s
->dflag
+ OT_WORD
;
2843 modrm
= ldub_code(s
);
2846 addr
= get_modrm(s
, modrm
);
2847 val
= ldS(s
, ot
, addr
);
2849 rm
= (modrm
& 7) | REX_B(s
);
2850 val
= get_regS(s
, ot
, rm
);
2852 reg
= ((modrm
>> 3) & 7) | REX_R(s
);
2853 val2
= get_regS(s
, ot
, reg
);
2854 exec_binary(&s
->regs1
.eflags
,
2859 LABEL(a8
) /* test eAX, Iv */
2864 ot
= s
->dflag
+ OT_WORD
;
2865 val2
= insn_get(s
, ot
);
2867 val
= get_regS(s
, ot
, R_EAX
);
2868 exec_binary(&s
->regs1
.eflags
,
2873 LABEL(40) /* inc Gv */
2882 LABEL(48) /* dec Gv */
2894 ot
= s
->dflag
+ OT_WORD
;
2896 val
= get_regS(s
, ot
, reg
);
2897 val
= exec_binary(&s
->regs1
.eflags
,
2898 EB_INC
+ ((b
>> 1) & 4) + ot
,
2900 set_regS(s
, ot
, reg
, val
);
2903 LABEL(f6
) /* GRP3 */
2908 ot
= s
->dflag
+ OT_WORD
;
2910 modrm
= ldub_code(s
);
2912 rm
= (modrm
& 7) | REX_B(s
);
2913 op
= (modrm
>> 3) & 7;
2918 s
->rip_offset
= insn_const_size(ot
);
2919 addr
= get_modrm(s
, modrm
);
2921 val
= ldS(s
, ot
, addr
);
2923 val
= get_regS(s
, ot
, rm
);
2925 val2
= insn_get(s
, ot
);
2926 exec_binary(&s
->regs1
.eflags
,
2932 addr
= get_modrm(s
, modrm
);
2933 val
= ldS(s
, ot
, addr
);
2935 stS(s
, ot
, addr
, val
);
2937 val
= get_regS(s
, ot
, rm
);
2939 set_regS(s
, ot
, rm
, val
);
2944 addr
= get_modrm(s
, modrm
);
2945 val
= ldS(s
, ot
, addr
);
2946 eflags
= s
->regs1
.eflags
;
2947 val
= exec_binary(&eflags
, EB_SUB
+ ot
,
2949 stS(s
, ot
, addr
, val
);
2950 s
->regs1
.eflags
= eflags
;
2952 val
= get_regS(s
, ot
, rm
);
2953 val
= exec_binary(&s
->regs1
.eflags
, EB_SUB
+ ot
,
2955 set_regS(s
, ot
, rm
, val
);
2960 addr
= get_modrm(s
, modrm
);
2961 val
= ldS(s
, ot
, addr
);
2963 val
= get_regS(s
, ot
, rm
);
2967 asm volatile(LOAD_CC()
2972 : "=c" (s
->regs1
.eflags
),
2974 : "0" (s
->regs1
.eflags
),
2980 asm volatile(LOAD_CC()
2986 : "=c" (s
->regs1
.eflags
),
2987 "=m" (s
->regs1
.eax
),
2989 : "0" (s
->regs1
.eflags
),
2995 asm volatile(LOAD_CC()
3001 : "=c" (s
->regs1
.eflags
),
3002 "=m" (s
->regs1
.eax
),
3004 : "0" (s
->regs1
.eflags
),
3011 asm volatile(LOAD_CC()
3017 : "=c" (s
->regs1
.eflags
),
3018 "=m" (s
->regs1
.eax
),
3020 : "0" (s
->regs1
.eflags
),
3030 addr
= get_modrm(s
, modrm
);
3031 val
= ldS(s
, ot
, addr
);
3033 val
= get_regS(s
, ot
, rm
);
3037 asm volatile(LOAD_CC()
3042 : "=c" (s
->regs1
.eflags
),
3044 : "0" (s
->regs1
.eflags
),
3050 asm volatile(LOAD_CC()
3056 : "=c" (s
->regs1
.eflags
),
3057 "=m" (s
->regs1
.eax
),
3059 : "0" (s
->regs1
.eflags
),
3065 asm volatile(LOAD_CC()
3071 : "=c" (s
->regs1
.eflags
),
3072 "=m" (s
->regs1
.eax
),
3074 : "0" (s
->regs1
.eflags
),
3081 asm volatile(LOAD_CC()
3087 : "=c" (s
->regs1
.eflags
),
3088 "=m" (s
->regs1
.eax
),
3090 : "0" (s
->regs1
.eflags
),
3100 addr
= get_modrm(s
, modrm
);
3101 val
= ldS(s
, ot
, addr
);
3103 val
= get_regS(s
, ot
, rm
);
3107 asm volatile("movw %0, %%ax\n"
3111 : "=m" (s
->regs1
.eax
)
3112 : "m" (s
->regs1
.eax
),
3117 asm volatile("movw %0, %%ax\n"
3123 : "=m" (s
->regs1
.eax
),
3125 : "m" (s
->regs1
.eax
),
3131 asm volatile("1: divl %4\n"
3133 : "=a" (s
->regs1
.eax
),
3135 : "0" (s
->regs1
.eax
),
3141 asm volatile("movq %0, %%rax\n"
3147 : "=m" (s
->regs1
.eax
),
3149 : "m" (s
->regs1
.eax
),
3159 addr
= get_modrm(s
, modrm
);
3160 val
= ldS(s
, ot
, addr
);
3162 val
= get_regS(s
, ot
, rm
);
3166 asm volatile("movw %0, %%ax\n"
3170 : "=m" (s
->regs1
.eax
)
3171 : "m" (s
->regs1
.eax
),
3176 asm volatile("movw %0, %%ax\n"
3182 : "=m" (s
->regs1
.eax
),
3184 : "m" (s
->regs1
.eax
),
3190 asm volatile("1: idivl %4\n"
3192 : "=a" (s
->regs1
.eax
),
3194 : "0" (s
->regs1
.eax
),
3200 asm volatile("movq %0, %%rax\n"
3206 : "=m" (s
->regs1
.eax
),
3208 : "m" (s
->regs1
.eax
),
3221 LABEL(69) /* imul Gv, Ev, I */
3223 ot
= s
->dflag
+ OT_WORD
;
3224 modrm
= ldub_code(s
);
3228 s
->rip_offset
= insn_const_size(ot
);
3231 addr
= get_modrm(s
, modrm
);
3233 val
= ldS(s
, ot
, addr
);
3235 rm
= (modrm
& 7) | REX_B(s
);
3236 val
= get_regS(s
, ot
, rm
);
3238 reg
= ((modrm
>> 3) & 7) | REX_R(s
);
3240 val2
= insn_get(s
, ot
);
3242 val2
= (int8_t)ldub_code(s
);
3244 reg
= ((modrm
>> 3) & 7) | REX_R(s
);
3246 LABEL(1af
) /* imul Gv, Ev */
3247 ot
= s
->dflag
+ OT_WORD
;
3248 modrm
= ldub_code(s
);
3251 addr
= get_modrm(s
, modrm
);
3252 val
= ldS(s
, ot
, addr
);
3254 rm
= (modrm
& 7) | REX_B(s
);
3255 val
= get_regS(s
, ot
, rm
);
3257 reg
= ((modrm
>> 3) & 7) | REX_R(s
);
3258 val2
= get_regS(s
, ot
, reg
);
3262 asm volatile(LOAD_CC()
3265 : "=c" (s
->regs1
.eflags
),
3267 : "0" (s
->regs1
.eflags
),
3273 asm volatile(LOAD_CC()
3276 : "=c" (s
->regs1
.eflags
),
3278 : "0" (s
->regs1
.eflags
),
3285 asm volatile(LOAD_CC()
3288 : "=c" (s
->regs1
.eflags
),
3290 : "0" (s
->regs1
.eflags
),
3297 set_regS(s
, ot
, reg
, val
);
3300 LABEL(fe
) /* GRP4 */
3301 LABEL(ff
) /* GRP5 */
3305 ot
= s
->dflag
+ OT_WORD
;
3307 modrm
= ldub_code(s
);
3309 rm
= (modrm
& 7) | REX_B(s
);
3310 op
= (modrm
>> 3) & 7;
3313 case 0: /* inc Ev */
3314 case 1: /* dec Ev */
3316 addr
= get_modrm(s
, modrm
);
3317 val
= ldS(s
, ot
, addr
);
3318 eflags
= s
->regs1
.eflags
;
3319 val
= exec_binary(&eflags
,
3320 EB_INC
+ (op
<< 2) + ot
,
3322 stS(s
, ot
, addr
, val
);
3323 s
->regs1
.eflags
= eflags
;
3325 val
= get_regS(s
, ot
, rm
);
3326 val
= exec_binary(&s
->regs1
.eflags
,
3327 EB_INC
+ (op
<< 2) + ot
,
3329 set_regS(s
, ot
, rm
, val
);
3332 case 2: /* call Ev */
3338 addr
= get_modrm(s
, modrm
);
3339 val
= ldS(s
, ot
, addr
);
3341 val
= get_regS(s
, ot
, rm
);
3346 stack_push(s
, next_eip
);
3349 case 4: /* jmp Ev */
3355 addr
= get_modrm(s
, modrm
);
3356 val
= ldS(s
, ot
, addr
);
3358 val
= get_regS(s
, ot
, rm
);
3364 case 3: /* lcall Ev */
3365 case 5: /* ljmp Ev */
3368 raise_exception(s
, KQEMU_RET_SOFTMMU
);
3370 case 6: /* push Ev */
3373 if (CODE64(s
) && s
->dflag
)
3376 addr
= get_modrm(s
, modrm
);
3377 val
= ldS(s
, ot
, addr
);
3379 val
= get_regS(s
, ot
, rm
);
3388 LABEL(50w
) /* push w */
3396 reg
= (b
& 7) | REX_B(s
);
3397 stack_pushS(s
, get_reg(s
, reg
), 0);
3400 LABEL(50l) /* push l */
3409 LABEL(50q
) /* push l */
3418 reg
= (b
& 7) | REX_B(s
);
3419 stack_pushS(s
, get_reg(s
, reg
), 1);
3430 reg
= (b
& 7) | REX_B(s
);
3431 if (likely(!CODE64(s
) && s
->dflag
== 1 &&
3432 (s
->cpu_state
.segs
[R_SS
].flags
& DESC_B_MASK
))) {
3433 addr
= s
->regs1
.esp
+ s
->cpu_state
.segs
[R_SS
].base
;
3435 /* NOTE: order is important for pop %sp */
3437 set_regl(s
, reg
, val
);
3440 /* NOTE: order is important for pop %sp */
3441 stack_pop_update(s
);
3444 set_reg(s
, reg
, val
);
3446 set_regw(s
, reg
, val
);
3449 set_regl(s
, reg
, val
);
3451 set_regw(s
, reg
, val
);
3456 LABEL(68) /* push Iv */
3458 val
= (int32_t)ldl_code(s
);
3463 LABEL(6a
) /* push Iv */
3464 val
= (int8_t)ldub_code(s
);
3467 LABEL(8f
) /* pop Ev */
3468 if (CODE64(s
) && s
->dflag
)
3470 ot
= s
->dflag
+ OT_WORD
;
3471 modrm
= ldub_code(s
);
3475 /* NOTE: order is important for pop %sp */
3476 stack_pop_update(s
);
3477 rm
= (modrm
& 7) | REX_B(s
);
3478 set_regS(s
, ot
, rm
, val
);
3480 /* NOTE: order is important too for MMU exceptions */
3481 s
->popl_esp_hack
= 1 << ot
;
3482 addr
= get_modrm(s
, modrm
);
3483 s
->popl_esp_hack
= 0;
3484 stS(s
, ot
, addr
, val
);
3485 stack_pop_update(s
);
3488 LABEL(06) /* push es */
3489 LABEL(0e
) /* push cs */
3490 LABEL(16) /* push ss */
3491 LABEL(1e
) /* push ds */
3496 val
= get_seg_sel(s
, reg
);
3499 LABEL(1a0
) /* push fs */
3500 LABEL(1a8
) /* push gs */
3503 LABEL(07) /* pop es */
3504 LABEL(17) /* pop ss */
3505 LABEL(1f
) /* pop ds */
3511 load_seg_desc(s
, reg
, val
& 0xffff);
3512 stack_pop_update(s
);
3514 LABEL(1a1
) /* pop fs */
3515 LABEL(1a9
) /* pop gs */
3518 LABEL(c9
) /* leave */
3520 set_reg(s
, R_ESP
, s
->regs1
.ebp
);
3521 } else if (s
->cpu_state
.segs
[R_SS
].flags
& DESC_B_MASK
) {
3522 set_regl(s
, R_ESP
, s
->regs1
.ebp
);
3524 set_regw(s
, R_ESP
, s
->regs1
.ebp
);
3527 if (CODE64(s
) && s
->dflag
) {
3528 set_reg(s
, R_EBP
, val
);
3529 } else if (s
->dflag
) {
3530 set_regl(s
, R_EBP
, val
);
3532 set_regw(s
, R_EBP
, val
);
3534 stack_pop_update(s
);
3536 /**************************/
3539 #define MOV_Gv_Ev(ot)\
3540 modrm = ldub_code(s);\
3541 reg = ((modrm >> 3) & 7) | REX_R(s);\
3542 val = get_regS(s, ot, reg);\
3543 mod = (modrm >> 6);\
3545 rm = (modrm & 7) | REX_B(s);\
3546 set_regS(s, ot, rm, val);\
3548 addr = get_modrm(s, modrm);\
3549 stS(s, ot, addr, val);\
3554 LABEL(88) MOV_Gv_Ev(OT_BYTE
);
3555 LABEL(89w
) MOV_Gv_Ev(OT_WORD
);
3556 LABEL(89l) MOV_Gv_Ev(OT_LONG
);
3557 QO( LABEL(89q
) MOV_Gv_Ev(OT_QUAD
); )
3559 #define MOV_Ev_Iv(ot)\
3560 modrm = ldub_code(s);\
3561 mod = (modrm >> 6);\
3563 s->rip_offset = insn_const_size(ot);\
3564 addr = get_modrm(s, modrm);\
3566 val = insn_get(s, ot);\
3567 stS(s, ot, addr, val);\
3569 val = insn_get(s, ot);\
3570 rm = (modrm & 7) | REX_B(s);\
3571 set_regS(s, ot, rm, val);\
3575 LABEL(c6
) MOV_Ev_Iv(OT_BYTE
);
3576 LABEL(c7w
) MOV_Ev_Iv(OT_WORD
);
3577 LABEL(c7l
) MOV_Ev_Iv(OT_LONG
);
3578 QO( LABEL(c7q
) MOV_Ev_Iv(OT_QUAD
); )
3580 #define MOV_Ev_Gv(ot)\
3581 modrm = ldub_code(s);\
3582 reg = ((modrm >> 3) & 7) | REX_R(s);\
3583 mod = (modrm >> 6);\
3585 rm = (modrm & 7) | REX_B(s);\
3586 val = get_regS(s, ot, rm);\
3588 addr = get_modrm(s, modrm);\
3589 val = ldS(s, ot, addr);\
3591 set_regS(s, ot, reg, val);\
3595 LABEL(8a
) MOV_Ev_Gv(OT_BYTE
);
3596 LABEL(8bw
) MOV_Ev_Gv(OT_WORD
);
3597 LABEL(8bl
) MOV_Ev_Gv(OT_LONG
);
3598 QO( LABEL(8bq
) MOV_Ev_Gv(OT_QUAD
); )
3600 LABEL(8e
) /* mov seg, Gv */
3601 modrm
= ldub_code(s
);
3602 reg
= (modrm
>> 3) & 7;
3603 if (reg
>= 6 || reg
== R_CS
)
3607 val
= get_reg(s
, modrm
& 7) & 0xffff;
3609 addr
= get_modrm(s
, modrm
);
3610 val
= lduw(s
, addr
);
3612 load_seg_desc(s
, reg
, val
);
3614 LABEL(8c
) /* mov Gv, seg */
3615 modrm
= ldub_code(s
);
3616 reg
= (modrm
>> 3) & 7;
3620 val
= get_seg_sel(s
, reg
);
3622 ot
= OT_WORD
+ s
->dflag
;
3623 rm
= (modrm
& 7) | REX_B(s
);
3624 set_regS(s
, ot
, rm
, val
);
3626 addr
= get_modrm(s
, modrm
);
3631 LABEL(b0
) /* mov R, Ib */
3640 reg
= (b
& 7) | REX_B(s
);
3641 set_regb(s
, reg
, val
);
3644 #if defined(__x86_64__)
3645 LABEL(b8q
) /* mov R, Iv */
3653 reg
= (b
& 7) | REX_B(s
);
3655 set_reg(s
, reg
, val
);
3659 LABEL(b8l
) /* mov R, Iv */
3667 reg
= (b
& 7) | REX_B(s
);
3669 set_regl(s
, reg
, val
);
3672 LABEL(b8w
) /* mov R, Iv */
3680 reg
= (b
& 7) | REX_B(s
);
3682 set_regw(s
, reg
, val
);
3685 LABEL(91) /* xchg R, EAX */
3692 ot
= s
->dflag
+ OT_WORD
;
3693 reg
= (b
& 7) | REX_B(s
);
3697 LABEL(87) /* xchg Ev, Gv */
3701 ot
= s
->dflag
+ OT_WORD
;
3702 modrm
= ldub_code(s
);
3703 reg
= ((modrm
>> 3) & 7) | REX_R(s
);
3704 mod
= (modrm
>> 6) & 3;
3706 rm
= (modrm
& 7) | REX_B(s
);
3708 val
= get_regS(s
, ot
, reg
);
3709 val2
= get_regS(s
, ot
, rm
);
3710 set_regS(s
, ot
, rm
, val
);
3711 set_regS(s
, ot
, reg
, val2
);
3713 /* XXX: lock for SMP */
3714 addr
= get_modrm(s
, modrm
);
3715 val
= get_regS(s
, ot
, reg
);
3716 val2
= ldS(s
, ot
, addr
);
3717 stS(s
, ot
, addr
, val
);
3718 set_regS(s
, ot
, reg
, val2
);
3722 #define MOVZS(sgn, ot, d_ot)\
3724 /* d_ot is the size of destination */\
3725 /* ot is the size of source */\
3726 modrm = ldub_code(s);\
3727 reg = ((modrm >> 3) & 7) | REX_R(s);\
3728 mod = (modrm >> 6);\
3729 rm = (modrm & 7) | REX_B(s);\
3731 val = get_regS(s, ot, rm);\
3732 switch(ot | (sgn << 3)) {\
3734 val = (uint8_t)val;\
3740 val = (uint16_t)val;\
3744 val = (int16_t)val;\
3746 QO( case OT_LONG | 8:\
3747 val = (int32_t)val;\
3751 addr = get_modrm(s, modrm);\
3752 switch(ot | (sgn << 3)) {\
3754 val = ldub(s, addr);\
3757 val = (int8_t)ldub(s, addr);\
3760 val = (uint16_t)lduw(s, addr);\
3764 val = (int16_t)lduw(s, addr);\
3766 QO( case OT_LONG | 8:\
3767 val = (int32_t)ldl(s, addr);\
3771 set_regS(s, d_ot, reg, val);\
3776 LABEL(1b6w
) MOVZS(0, OT_BYTE
, OT_WORD
);
3777 LABEL(1b6l
) MOVZS(0, OT_BYTE
, OT_LONG
);
3778 QO( LABEL(1b6q
) MOVZS(0, OT_BYTE
, OT_QUAD
); )
3781 LABEL(1b7w
) MOVZS(0, OT_WORD
, OT_WORD
);
3782 LABEL(1b7l
) MOVZS(0, OT_WORD
, OT_LONG
);
3783 QO( LABEL(1b7q
) MOVZS(0, OT_WORD
, OT_QUAD
); )
3786 LABEL(1bew
) MOVZS(1, OT_BYTE
, OT_WORD
);
3787 LABEL(1bel
) MOVZS(1, OT_BYTE
, OT_LONG
);
3788 QO( LABEL(1beq
) MOVZS(1, OT_BYTE
, OT_QUAD
); )
3791 LABEL(1bfw
) MOVZS(1, OT_WORD
, OT_WORD
);
3792 LABEL(1bfl
) MOVZS(1, OT_WORD
, OT_LONG
);
3793 QO( LABEL(1bfq
) MOVZS(1, OT_WORD
, OT_QUAD
); )
3799 MOVZS(1, OT_LONG
, OT_WORD
);
3803 MOVZS(1, OT_LONG
, OT_LONG
);
3804 QO( LABEL(63q
) MOVZS(1, OT_LONG
, OT_QUAD
); )
3807 modrm = ldub_code(s);\
3808 mod = (modrm >> 6);\
3811 reg = ((modrm >> 3) & 7) | REX_R(s);\
3813 addr = get_modrm(s, modrm);\
3814 set_regS(s, ot, reg, addr);\
3818 LABEL(8dw
) LEA(OT_WORD
);
3819 LABEL(8dl
) LEA(OT_LONG
);
3820 QO( LABEL(8dq
) LEA(OT_QUAD
); )
3822 LABEL(a0
) /* mov EAX, Ov */
3824 LABEL(a2
) /* mov Ov, EAX */
3829 ot
= s
->dflag
+ OT_WORD
;
3831 if (s
->aflag
== 2) {
3833 if (s
->override
== R_FS
|| s
->override
== R_GS
)
3834 addr
+= s
->cpu_state
.segs
[s
->override
].base
;
3842 addr
= lduw_code(s
);
3844 override
= s
->override
;
3847 addr
= (uint32_t)(addr
+ s
->cpu_state
.segs
[override
].base
);
3850 val
= ldS(s
, ot
, addr
);
3851 set_regS(s
, ot
, R_EAX
, val
);
3853 val
= get_regS(s
, ot
, R_EAX
);
3854 stS(s
, ot
, addr
, val
);
3858 /************************/
3860 LABEL(9c
) /* pushf */
3861 iopl
= get_eflags_iopl(s
);
3862 if (get_eflags_vm(s
) && iopl
!= 3)
3863 raise_exception_err(s
, EXCP0D_GPF
, 0);
3864 val
= compute_eflags(s
);
3865 val
&= ~(VM_MASK
| RF_MASK
);
3868 LABEL(9d
) /* popf */
3871 iopl
= get_eflags_iopl(s
);
3872 if (get_eflags_vm(s
) && iopl
!= 3)
3873 raise_exception_err(s
, EXCP0D_GPF
, 0);
3874 if (s
->cpu_state
.cpl
== 0) {
3875 mask
= TF_MASK
| AC_MASK
| ID_MASK
| NT_MASK
| IF_MASK
| IOPL_MASK
;
3877 if (s
->cpu_state
.cpl
<= iopl
) {
3878 mask
= TF_MASK
| AC_MASK
| ID_MASK
| NT_MASK
| IF_MASK
;
3880 mask
= TF_MASK
| AC_MASK
| ID_MASK
| NT_MASK
;
3886 load_eflags(s
, val
, mask
);
3887 stack_pop_update(s
);
3891 s
->regs1
.eflags
^= CC_C
;
3894 s
->regs1
.eflags
&= ~CC_C
;
3897 s
->regs1
.eflags
|= CC_C
;
3900 s
->regs1
.eflags
&= ~DF_MASK
;
3903 s
->regs1
.eflags
|= DF_MASK
;
3906 /************************/
3907 /* bit operations */
3908 LABEL(1ba
) /* bt/bts/btr/btc Gv, im */
3909 ot
= s
->dflag
+ OT_WORD
;
3910 modrm
= ldub_code(s
);
3911 op
= (modrm
>> 3) & 7;
3916 rm
= (modrm
& 7) | REX_B(s
);
3919 addr
= get_modrm(s
, modrm
);
3921 val2
= ldub_code(s
);
3922 val
= ldS(s
, ot
, addr
);
3923 eflags
= s
->regs1
.eflags
;
3924 val
= exec_binary(&eflags
, EB_BT
+ (op
<< 2) + ot
,
3927 stS(s
, ot
, addr
, val
);
3928 s
->regs1
.eflags
= eflags
;
3930 val2
= ldub_code(s
);
3931 val
= get_regS(s
, ot
, rm
);
3932 val
= exec_binary(&s
->regs1
.eflags
, EB_BT
+ (op
<< 2) + ot
,
3935 set_regS(s
, ot
, rm
, val
);
3938 LABEL(1a3
) /* bt Gv, Ev */
3941 LABEL(1ab
) /* bts */
3944 LABEL(1b3
) /* btr */
3947 LABEL(1bb
) /* btc */
3950 ot
= s
->dflag
+ OT_WORD
;
3951 modrm
= ldub_code(s
);
3952 reg
= ((modrm
>> 3) & 7) | REX_R(s
);
3954 rm
= (modrm
& 7) | REX_B(s
);
3955 val2
= get_regS(s
, ot
, reg
);
3957 addr
= get_modrm(s
, modrm
);
3958 /* add the offset */
3961 addr
+= ((int16_t)val2
>> 4) << 1;
3964 addr
+= ((int32_t)val2
>> 5) << 2;
3968 addr
+= ((long)val2
>> 6) << 3;
3971 val
= ldS(s
, ot
, addr
);
3972 eflags
= s
->regs1
.eflags
;
3973 val
= exec_binary(&eflags
, EB_BT
+ (op
<< 2) + ot
,
3976 stS(s
, ot
, addr
, val
);
3977 s
->regs1
.eflags
= eflags
;
3979 val
= get_regS(s
, ot
, rm
);
3980 val
= exec_binary(&s
->regs1
.eflags
, EB_BT
+ (op
<< 2) + ot
,
3983 set_regS(s
, ot
, rm
, val
);
3986 LABEL(1bc
) /* bsf */
3987 LABEL(1bd
) /* bsr */
3988 ot
= s
->dflag
+ OT_WORD
;
3989 modrm
= ldub_code(s
);
3991 rm
= (modrm
& 7) | REX_B(s
);
3992 reg
= ((modrm
>> 3) & 7) | REX_R(s
);
3993 val
= get_regS(s
, ot
, reg
);
3995 addr
= get_modrm(s
, modrm
);
3996 val2
= ldS(s
, ot
, addr
);
3998 val2
= get_regS(s
, ot
, rm
);
4001 val
= exec_binary(&s
->regs1
.eflags
, EB_BSF
+ (op
<< 2) + ot
,
4003 set_regS(s
, ot
, reg
, val
);
4006 /************************/
4008 LABEL(c2
) /* ret im */
4011 addend
= (int16_t)lduw_code(s
);
4015 if (CODE64(s
) && s
->dflag
)
4017 sp_add(s
, addend
+ (2 << s
->dflag
));
4025 stack_pop_update(s
);
4028 LABEL(ca
) /* lret im */
4029 val
= (int16_t)lduw_code(s
);
4030 helper_lret_protected(s
, s
->dflag
, val
);
4032 LABEL(cb
) /* lret */
4033 helper_lret_protected(s
, s
->dflag
, 0);
4035 LABEL(cf
) /* iret */
4036 helper_iret_protected(s
, s
->dflag
);
4038 LABEL(e8
) /* call im */
4040 val
= (int32_t)ldl_code(s
);
4042 val
= (int16_t)lduw_code(s
);
4047 stack_push(s
, next_eip
);
4050 LABEL(e9
) /* jmp im */
4052 val
= (int32_t)ldl_code(s
);
4054 val
= (int16_t)lduw_code(s
);
4062 LABEL(eb
) /* jmp Jb */
4063 val
= (int8_t)ldub_code(s
);
4069 val = (int8_t)ldub_code(s);\
4070 else if (ot == OT_WORD)\
4071 val = (int16_t)lduw_code(s);\
4073 val = (int32_t)ldl_code(s);\
4074 if (get_jcc_cond(s->regs1.eflags, v))\
4080 LABEL(70) JCC(OT_BYTE
, 0x0)
4081 LABEL(71) JCC(OT_BYTE
, 0x1)
4082 LABEL(72) JCC(OT_BYTE
, 0x2)
4083 LABEL(73) JCC(OT_BYTE
, 0x3)
4084 LABEL(74) JCC(OT_BYTE
, 0x4)
4085 LABEL(75) JCC(OT_BYTE
, 0x5)
4086 LABEL(76) JCC(OT_BYTE
, 0x6)
4087 LABEL(77) JCC(OT_BYTE
, 0x7)
4088 LABEL(78) JCC(OT_BYTE
, 0x8)
4089 LABEL(79) JCC(OT_BYTE
, 0x9)
4090 LABEL(7a
) JCC(OT_BYTE
, 0xa)
4091 LABEL(7b
) JCC(OT_BYTE
, 0xb)
4092 LABEL(7c
) JCC(OT_BYTE
, 0xc)
4093 LABEL(7d
) JCC(OT_BYTE
, 0xd)
4094 LABEL(7e
) JCC(OT_BYTE
, 0xe)
4095 LABEL(7f
) JCC(OT_BYTE
, 0xf)
4098 LABEL(180w
) JCC(OT_WORD
, 0x0)
4099 LABEL(181w
) JCC(OT_WORD
, 0x1)
4100 LABEL(182w
) JCC(OT_WORD
, 0x2)
4101 LABEL(183w
) JCC(OT_WORD
, 0x3)
4102 LABEL(184w
) JCC(OT_WORD
, 0x4)
4103 LABEL(185w
) JCC(OT_WORD
, 0x5)
4104 LABEL(186w
) JCC(OT_WORD
, 0x6)
4105 LABEL(187w
) JCC(OT_WORD
, 0x7)
4106 LABEL(188w
) JCC(OT_WORD
, 0x8)
4107 LABEL(189w
) JCC(OT_WORD
, 0x9)
4108 LABEL(18aw
) JCC(OT_WORD
, 0xa)
4109 LABEL(18bw
) JCC(OT_WORD
, 0xb)
4110 LABEL(18cw
) JCC(OT_WORD
, 0xc)
4111 LABEL(18dw
) JCC(OT_WORD
, 0xd)
4112 LABEL(18ew
) JCC(OT_WORD
, 0xe)
4113 LABEL(18fw
) JCC(OT_WORD
, 0xf)
4116 QO(LABEL(180q
)) LABEL(180l) JCC(OT_LONG
, 0x0)
4117 QO(LABEL(181q
)) LABEL(181l) JCC(OT_LONG
, 0x1)
4118 QO(LABEL(182q
)) LABEL(182l) JCC(OT_LONG
, 0x2)
4119 QO(LABEL(183q
)) LABEL(183l) JCC(OT_LONG
, 0x3)
4120 QO(LABEL(184q
)) LABEL(184l) JCC(OT_LONG
, 0x4)
4121 QO(LABEL(185q
)) LABEL(185l) JCC(OT_LONG
, 0x5)
4122 QO(LABEL(186q
)) LABEL(186l) JCC(OT_LONG
, 0x6)
4123 QO(LABEL(187q
)) LABEL(187l) JCC(OT_LONG
, 0x7)
4124 QO(LABEL(188q
)) LABEL(188l) JCC(OT_LONG
, 0x8)
4125 QO(LABEL(189q
)) LABEL(189l) JCC(OT_LONG
, 0x9)
4126 QO(LABEL(18aq
)) LABEL(18al
) JCC(OT_LONG
, 0xa)
4127 QO(LABEL(18bq
)) LABEL(18bl
) JCC(OT_LONG
, 0xb)
4128 QO(LABEL(18cq
)) LABEL(18cl
) JCC(OT_LONG
, 0xc)
4129 QO(LABEL(18dq
)) LABEL(18dl
) JCC(OT_LONG
, 0xd)
4130 QO(LABEL(18eq
)) LABEL(18el
) JCC(OT_LONG
, 0xe)
4131 QO(LABEL(18fq
)) LABEL(18fl
) JCC(OT_LONG
, 0xf)
4133 LABEL(190) /* setcc Gv */
4149 modrm
= ldub_code(s
);
4151 val
= (get_jcc_cond(s
->regs1
.eflags
, b
& 0xf) != 0);
4153 addr
= get_modrm(s
, modrm
);
4154 stS(s
, OT_BYTE
, addr
, val
);
4156 rm
= (modrm
& 7) | REX_B(s
);
4157 set_regS(s
, OT_BYTE
, rm
, val
);
4160 LABEL(140) /* cmov Gv, Ev */
4176 ot
= s
->dflag
+ OT_WORD
;
4177 modrm
= ldub_code(s
);
4179 reg
= ((modrm
>> 3) & 7) | REX_R(s
);
4181 addr
= get_modrm(s
, modrm
);
4182 val
= ldS(s
, ot
, addr
);
4184 rm
= (modrm
& 7) | REX_B(s
);
4185 val
= get_regS(s
, ot
, rm
);
4187 if (get_jcc_cond(s
->regs1
.eflags
, b
& 0xf)) {
4188 set_regS(s
, ot
, reg
, val
);
4192 LABEL(c4
) /* les Gv */
4195 LABEL(c5
) /* lds Gv */
4198 LABEL(1b2
) /* lss Gv */
4201 LABEL(1b4
) /* lfs Gv */
4204 LABEL(1b5
) /* lgs Gv */
4207 modrm
= ldub_code(s
);
4208 reg
= ((modrm
>> 3) & 7);
4212 addr
= get_modrm(s
, modrm
);
4217 val
= lduw(s
, addr
);
4220 sel
= lduw(s
, addr
);
4221 load_seg_desc(s
, op
, sel
);
4223 set_regl(s
, reg
, val
);
4225 set_regw(s
, reg
, val
);
4228 /************************/
4236 ot
= s
->dflag
+ OT_WORD
;
4238 modrm
= ldub_code(s
);
4240 op
= (modrm
>> 3) & 7;
4243 addr
= get_modrm(s
, modrm
);
4245 val
= ldS(s
, ot
, addr
);
4246 val2
= ldub_code(s
);
4247 eflags
= s
->regs1
.eflags
;
4248 val
= exec_binary(&eflags
,
4249 EB_ROL
+ (op
<< 2) + ot
,
4251 stS(s
, ot
, addr
, val
);
4252 s
->regs1
.eflags
= eflags
;
4254 rm
= (modrm
& 7) | REX_B(s
);
4255 val
= get_regS(s
, ot
, rm
);
4256 val2
= ldub_code(s
);
4257 val
= exec_binary(&s
->regs1
.eflags
,
4258 EB_ROL
+ (op
<< 2) + ot
,
4260 set_regS(s
, ot
, rm
, val
);
4271 ot
= s
->dflag
+ OT_WORD
;
4273 modrm
= ldub_code(s
);
4275 op
= (modrm
>> 3) & 7;
4277 addr
= get_modrm(s
, modrm
);
4278 val
= ldS(s
, ot
, addr
);
4279 eflags
= s
->regs1
.eflags
;
4280 val
= exec_binary(&eflags
,
4281 EB_ROL
+ (op
<< 2) + ot
,
4283 stS(s
, ot
, addr
, val
);
4284 s
->regs1
.eflags
= eflags
;
4286 rm
= (modrm
& 7) | REX_B(s
);
4287 val
= get_regS(s
, ot
, rm
);
4288 val
= exec_binary(&s
->regs1
.eflags
,
4289 EB_ROL
+ (op
<< 2) + ot
,
4291 set_regS(s
, ot
, rm
, val
);
4297 val2
= s
->regs1
.ecx
;
4301 #define SHIFTD1(op, eflags, val, val2, shift) \
4302 asm volatile( op "\n"\
4305 "andl $0x8d5, %%eax\n"\
4306 "andl $~0x8d5, %%ebx\n"\
4307 "orl %%eax, %%ebx\n"\
4316 #define SHIFTD1(op, eflags, val, val2, shift) \
4317 asm volatile( op "\n"\
4320 "movb %%ah, %%bl\n"\
4322 "andl $~0x0800, %%ebx\n"\
4333 #define SHIFTD(eflags, op, val, val2, shift) \
4335 case 1: SHIFTD1("shld %%cl, %w4, %w1", eflags, val, val2, shift); break;\
4336 case 2: SHIFTD1("shld %%cl, %k4, %k1", eflags, val, val2, shift); break;\
4337 QO(case 3: SHIFTD1("shld %%cl, %4, %1", eflags, val, val2, shift); break;)\
4338 case 5: SHIFTD1("shrd %%cl, %w4, %w1", eflags, val, val2, shift); break;\
4339 case 6: SHIFTD1("shrd %%cl, %k4, %k1", eflags, val, val2, shift); break;\
4340 QO(case 7: SHIFTD1("shrd %%cl, %4, %1", eflags, val, val2, shift); break;)\
4343 LABEL(1a4
) /* shld imm */
4346 LABEL(1ac
) /* shrd imm */
4351 ot
= s
->dflag
+ OT_WORD
;
4352 modrm
= ldub_code(s
);
4354 reg
= ((modrm
>> 3) & 7) | REX_R(s
);
4355 val2
= get_regS(s
, ot
, reg
);
4358 addr
= get_modrm(s
, modrm
);
4360 val
= ldS(s
, ot
, addr
);
4361 shift
= ldub_code(s
);
4366 eflags
= s
->regs1
.eflags
;
4368 SHIFTD(eflags
, (op
<< 2) + ot
, val
, val2
, shift
);
4370 stS(s
, ot
, addr
, val
);
4371 s
->regs1
.eflags
= eflags
;
4373 rm
= (modrm
& 7) | REX_B(s
);
4374 val
= get_regS(s
, ot
, rm
);
4375 shift
= ldub_code(s
);
4381 SHIFTD(eflags
, (op
<< 2) + ot
, val
, val2
, shift
);
4383 set_regS(s
, ot
, rm
, val
);
4388 LABEL(1a5
) /* shld cl */
4391 LABEL(1ad
) /* shrd cl */
4396 ot
= s
->dflag
+ OT_WORD
;
4397 modrm
= ldub_code(s
);
4399 reg
= ((modrm
>> 3) & 7) | REX_R(s
);
4400 val2
= get_regS(s
, s
->dflag
+ OT_WORD
, reg
);
4401 shift
= s
->regs1
.ecx
;
4407 addr
= get_modrm(s
, modrm
);
4408 val
= ldS(s
, ot
, addr
);
4409 eflags
= s
->regs1
.eflags
;
4411 SHIFTD(eflags
, (op
<< 2) + ot
, val
, val2
, shift
);
4413 stS(s
, ot
, addr
, val
);
4414 s
->regs1
.eflags
= eflags
;
4416 rm
= (modrm
& 7) | REX_B(s
);
4417 val
= get_regS(s
, ot
, rm
);
4419 SHIFTD(eflags
, (op
<< 2) + ot
, val
, val2
, shift
);
4421 set_regS(s
, ot
, rm
, val
);
4426 LABEL(cd
) /* int N */
4431 if (s
->cpu_state
.cpl
!= 0)
4432 raise_exception_err(s
, EXCP0D_GPF
, 0);
4433 raise_exception(s
, KQEMU_RET_SOFTMMU
);
4436 modrm
= ldub_code(s
);
4438 op
= (modrm
>> 3) & 7;
4441 if (!(s
->cpu_state
.cr0
& CR0_PE_MASK
) || get_eflags_vm(s
))
4443 raise_exception(s
, KQEMU_RET_SOFTMMU
);
4446 if (!(s
->cpu_state
.cr0
& CR0_PE_MASK
) || get_eflags_vm(s
))
4448 if (s
->cpu_state
.cpl
!= 0)
4449 raise_exception_err(s
, EXCP0D_GPF
, 0);
4452 rm
= (modrm
& 7) | REX_B(s
);
4453 val
= get_regS(s
, OT_WORD
, rm
) & 0xffff;
4455 addr
= get_modrm(s
, modrm
);
4456 val
= ldS(s
, OT_WORD
, addr
);
4458 helper_lldt(s
, val
);
4460 raise_exception(s
, KQEMU_RET_SOFTMMU
);
4464 if (!(s
->cpu_state
.cr0
& CR0_PE_MASK
) || get_eflags_vm(s
))
4466 raise_exception(s
, KQEMU_RET_SOFTMMU
);
4469 if (!(s
->cpu_state
.cr0
& CR0_PE_MASK
) || get_eflags_vm(s
))
4471 if (s
->cpu_state
.cpl
!= 0)
4472 raise_exception_err(s
, EXCP0D_GPF
, 0);
4473 raise_exception(s
, KQEMU_RET_SOFTMMU
);
4477 if (!(s
->cpu_state
.cr0
& CR0_PE_MASK
) || get_eflags_vm(s
))
4479 raise_exception(s
, KQEMU_RET_SOFTMMU
);
4486 modrm
= ldub_code(s
);
4488 op
= (modrm
>> 3) & 7;
4493 raise_exception(s
, KQEMU_RET_SOFTMMU
);
4498 case 0: /* monitor */
4499 if (/* !(s->cpuid_ext_features & CPUID_EXT_MONITOR) || */
4500 s
->cpu_state
.cpl
!= 0)
4502 if ((uint32_t)s
->regs1
.ecx
!= 0)
4503 raise_exception_err(s
, EXCP0D_GPF
, 0);
4510 raise_exception(s
, KQEMU_RET_SOFTMMU
);
4517 if (s
->cpu_state
.cpl
!= 0)
4518 raise_exception_err(s
, EXCP0D_GPF
, 0);
4519 raise_exception(s
, KQEMU_RET_SOFTMMU
);
4521 raise_exception(s
, KQEMU_RET_SOFTMMU
);
4523 if (s
->cpu_state
.cpl
!= 0)
4524 raise_exception_err(s
, EXCP0D_GPF
, 0);
4525 raise_exception(s
, KQEMU_RET_SOFTMMU
);
4526 case 7: /* invlpg/swapgs */
4527 if (s
->cpu_state
.cpl
!= 0)
4528 raise_exception_err(s
, EXCP0D_GPF
, 0);
4531 if (CODE64(s
) && (modrm
& 7) == 0) {
4539 addr
= get_modrm(s
, modrm
);
4547 LABEL(108) /* invd */
4548 LABEL(109) /* wbinvd */
4549 if (s
->cpu_state
.cpl
!= 0)
4550 raise_exception_err(s
, EXCP0D_GPF
, 0);
4552 LABEL(121) /* mov reg, drN */
4553 LABEL(123) /* mov drN, reg */
4554 if (s
->cpu_state
.cpl
!= 0)
4555 raise_exception_err(s
, EXCP0D_GPF
, 0);
4556 modrm
= ldub_code(s
);
4557 if ((modrm
& 0xc0) != 0xc0)
4559 rm
= (modrm
& 7) | REX_B(s
);
4560 reg
= ((modrm
>> 3) & 7) | REX_R(s
);
4566 val
= get_reg(s
, rm
);
4568 val
= (uint32_t)val
;
4575 raise_exception(s
, KQEMU_RET_SOFTMMU
);
4577 /* better than nothing: do nothing if no change */
4578 if (val
!= s
->cpu_state
.dr7
)
4579 raise_exception(s
, KQEMU_RET_SOFTMMU
);
4587 val
= s
->cpu_state
.dr0
;
4590 val
= s
->cpu_state
.dr1
;
4593 val
= s
->cpu_state
.dr2
;
4596 val
= s
->cpu_state
.dr3
;
4599 val
= s
->cpu_state
.dr6
;
4602 val
= s
->cpu_state
.dr7
;
4607 set_regS(s
, ot
, rm
, val
);
4610 LABEL(106) /* clts */
4611 if (s
->cpu_state
.cpl
!= 0)
4612 raise_exception_err(s
, EXCP0D_GPF
, 0);
4613 do_update_cr0(s
, s
->cpu_state
.cr0
& ~CR0_TS_MASK
);
4617 modrm
= ldub_code(s
);
4619 op
= (modrm
>> 3) & 7;
4621 case 0: /* prefetchnta */
4622 case 1: /* prefetchnt0 */
4623 case 2: /* prefetchnt0 */
4624 case 3: /* prefetchnt0 */
4627 addr
= get_modrm(s
, modrm
);
4628 /* nothing more to do */
4630 default: /* nop (multi byte) */
4631 addr
= get_modrm(s
, modrm
);
4636 LABEL(119) /* nop (multi byte) */
4643 modrm
= ldub_code(s
);
4644 addr
= get_modrm(s
, modrm
);
4647 LABEL(120) /* mov reg, crN */
4648 LABEL(122) /* mov crN, reg */
4649 if (s
->cpu_state
.cpl
!= 0)
4650 raise_exception_err(s
, EXCP0D_GPF
, 0);
4651 modrm
= ldub_code(s
);
4652 if ((modrm
& 0xc0) != 0xc0)
4654 rm
= (modrm
& 7) | REX_B(s
);
4655 reg
= ((modrm
>> 3) & 7) | REX_R(s
);
4657 val
= get_reg(s
, rm
);
4660 do_update_cr0(s
, val
);
4663 do_update_cr3(s
, val
);
4666 do_update_cr4(s
, val
);
4670 raise_exception(s
, KQEMU_RET_SOFTMMU
);
4677 set_reg(s
, rm
, s
->cpu_state
.cr0
);
4680 set_reg(s
, rm
, s
->cpu_state
.cr2
);
4683 set_reg(s
, rm
, s
->cpu_state
.cr3
);
4686 set_reg(s
, rm
, s
->cpu_state
.cr4
);
4689 raise_exception(s
, KQEMU_RET_SOFTMMU
);
4695 LABEL(130) /* wrmsr */
4696 if (s
->cpu_state
.cpl
!= 0)
4697 raise_exception_err(s
, EXCP0D_GPF
, 0);
4700 LABEL(132) /* rdmsr */
4701 if (s
->cpu_state
.cpl
!= 0)
4702 raise_exception_err(s
, EXCP0D_GPF
, 0);
4706 iopl
= get_eflags_iopl(s
);
4707 if (likely(s
->cpu_state
.cpl
<= iopl
)) {
4708 set_reset_eflags(s
, 0, IF_MASK
);
4710 raise_exception_err(s
, EXCP0D_GPF
, 0);
4714 iopl
= get_eflags_iopl(s
);
4715 if (likely(s
->cpu_state
.cpl
<= iopl
)) {
4716 set_reset_eflags(s
, IF_MASK
, 0);
4718 raise_exception_err(s
, EXCP0D_GPF
, 0);
4720 /* NOTE: irq should be disabled for the instruction after
4721 STI. As it would be too complicated to ensure this, we
4722 handle the "sti ; sysenter" case found in XP
4723 specifically. XXX: see why we cannot execute the
4724 next insn in every case. */
4725 val
= lduw_mem_fast(s
, pc
+ s
->cpu_state
.segs
[R_CS
].base
);
4726 if (val
== 0x350f) {
4736 LABEL(131) /* rdtsc */
4739 if ((s
->cpu_state
.cr4
& CR4_TSD_MASK
) &&
4740 s
->cpu_state
.cpl
!= 0) {
4741 raise_exception_err(s
, EXCP0D_GPF
, 0);
4743 asm volatile("rdtsc" : "=a" (low
), "=d" (high
));
4745 s
->regs1
.edx
= high
;
4749 LABEL(105) /* syscall */
4753 LABEL(107) /* sysret */
4757 LABEL(134) /* sysenter */
4763 LABEL(135) /* sysexit */
4769 LABEL(9a
) /* lcall im */
4770 LABEL(ea
) /* ljmp im */
4772 LABEL(e4
) /* in im */
4774 LABEL(e6
) /* out im */
4776 LABEL(ec
) /* in dx */
4778 LABEL(ee
) /* out dx */
4780 LABEL(6c
) /* insS */
4782 LABEL(6e
) /* outS */
4784 raise_exception(s
, KQEMU_RET_SOFTMMU
);
4786 LABEL(a4
) /* movs */
4789 unsigned long saddr
, daddr
, incr
, mask
;
4795 ot
= s
->dflag
+ OT_WORD
;
4803 if (s
->prefix
& (PREFIX_REPZ
| PREFIX_REPNZ
)) {
4804 if ((s
->regs1
.ecx
& mask
) == 0)
4808 incr
= (1 - (2 * ((s
->regs1
.eflags
>> 10) & 1))) << ot
;
4810 if (s
->aflag
== 2) {
4811 saddr
= s
->regs1
.esi
;
4812 if (s
->override
== R_FS
|| s
->override
== R_GS
)
4813 saddr
+= s
->cpu_state
.segs
[s
->override
].base
;
4815 daddr
= s
->regs1
.edi
;
4817 s
->regs1
.esi
+= incr
;
4818 s
->regs1
.edi
+= incr
;
4822 saddr
= s
->regs1
.esi
& mask
;
4823 override
= s
->override
;
4826 saddr
= (uint32_t)(saddr
+ s
->cpu_state
.segs
[override
].base
);
4828 daddr
= s
->regs1
.edi
& mask
;
4829 daddr
= (uint32_t)(daddr
+ s
->cpu_state
.segs
[R_ES
].base
);
4831 val
= s
->regs1
.esi
+ incr
;
4832 s
->regs1
.esi
= (s
->regs1
.esi
& ~mask
) | (val
& mask
);
4833 val
= s
->regs1
.edi
+ incr
;
4834 s
->regs1
.edi
= (s
->regs1
.edi
& ~mask
) | (val
& mask
);
4836 val
= ldS(s
, ot
, saddr
);
4837 stS(s
, ot
, daddr
, val
);
4839 if (s
->prefix
& (PREFIX_REPZ
| PREFIX_REPNZ
)) {
4840 val
= s
->regs1
.ecx
- 1;
4841 s
->regs1
.ecx
= (s
->regs1
.ecx
& ~mask
) | (val
& mask
);
4847 LABEL(98) /* CWDE/CBW */
4849 if (s
->dflag
== 2) {
4850 s
->regs1
.eax
= (int32_t)s
->regs1
.eax
;
4854 s
->regs1
.eax
= (uint32_t)((int16_t)s
->regs1
.eax
);
4856 s
->regs1
.eax
= (s
->regs1
.eax
& ~0xffff) |
4857 ((int8_t)s
->regs1
.eax
& 0xffff);
4861 LABEL(99) /* cltd */
4863 if (s
->dflag
== 2) {
4864 s
->regs1
.edx
= (int64_t)s
->regs1
.eax
>> 63;
4868 s
->regs1
.edx
= (uint32_t)((int32_t)s
->regs1
.eax
>> 31);
4870 s
->regs1
.edx
= (s
->regs1
.edx
& ~0xffff) |
4871 (((int16_t)s
->regs1
.eax
>> 15) & 0xffff);
4875 LABEL(1c0
) /* xadd */
4880 ot
= s
->dflag
+ OT_WORD
;
4882 modrm
= ldub_code(s
);
4884 reg
= ((modrm
>> 3) & 7) | REX_R(s
);
4885 val
= get_regS(s
, ot
, reg
);
4887 rm
= (modrm
& 7) | REX_B(s
);
4888 val2
= get_regS(s
, ot
, rm
);
4889 val
= exec_binary(&s
->regs1
.eflags
,
4892 set_regS(s
, ot
, rm
, val
);
4893 set_regS(s
, ot
, reg
, val2
);
4895 addr
= get_modrm(s
, modrm
);
4896 val2
= ldS(s
, ot
, addr
);
4897 eflags
= s
->regs1
.eflags
;
4898 val
= exec_binary(&eflags
,
4901 stS(s
, ot
, addr
, val
);
4902 set_regS(s
, ot
, reg
, val2
);
4903 s
->regs1
.eflags
= eflags
;
4907 modrm
= ldub_code(s
);
4909 op
= (modrm
>> 3) & 7;
4911 case 0: /* fxsave */
4912 if (mod
== 3 || !(s
->cpuid_features
& CPUID_FXSR
))
4914 addr
= get_modrm(s
, modrm
);
4915 if (unlikely((addr
- ((unsigned long)&_start
- 511)) <
4916 (MONITOR_MEM_SIZE
+ 511)))
4917 raise_exception(s
, KQEMU_RET_SOFTMMU
);
4919 if (s
->dflag
== 2) {
4921 "rex64 ; fxsave (%0)\n"
4923 : : "r" (addr
) : "memory");
4930 : : "r" (addr
) : "memory");
4933 case 1: /* fxrstor */
4934 if (mod
== 3 || !(s
->cpuid_features
& CPUID_FXSR
))
4936 addr
= get_modrm(s
, modrm
);
4937 if (unlikely((addr
- ((unsigned long)&_start
- 511)) <
4938 (MONITOR_MEM_SIZE
+ 511)))
4939 raise_exception(s
, KQEMU_RET_SOFTMMU
);
4941 if (s
->dflag
== 2) {
4943 "rex64 ; fxrstor (%0)\n"
4955 case 5: /* lfence */
4956 case 6: /* mfence */
4957 if ((modrm
& 0xc7) != 0xc0 || !(s
->cpuid_features
& CPUID_SSE
))
4960 case 7: /* sfence / clflush */
4961 if ((modrm
& 0xc7) == 0xc0) {
4963 if (!(s
->cpuid_features
& CPUID_SSE
))
4967 if (!(s
->cpuid_features
& CPUID_CLFLUSH
))
4969 addr
= get_modrm(s
, modrm
);
4973 raise_exception(s
, KQEMU_RET_SOFTMMU
);
4976 LABEL(e3
) /* jecxz */
4977 val
= (int8_t)ldub_code(s
);
4978 val2
= s
->regs1
.ecx
;
4980 val2
= (uint16_t)val2
;
4982 else if (s
->aflag
== 1)
4983 val2
= (uint32_t)val2
;
5187 raise_exception(s
, KQEMU_RET_SOFTMMU
);
5189 /* instruction modifying CS:EIP */
5190 if (get_eflags_if(s
))
5203 n
= getclock() - ti
;
5204 s
->tab_insn_count
[opcode
]++;
5205 s
->tab_insn_cycles
[opcode
] += n
;
5206 if (n
< s
->tab_insn_cycles_min
[opcode
])
5207 s
->tab_insn_cycles_min
[opcode
] = n
;
5208 if (n
> s
->tab_insn_cycles_max
[opcode
])
5209 s
->tab_insn_cycles_max
[opcode
] = n
;