2 * i386 virtual CPU header
4 * Copyright (c) 2003 Fabrice Bellard
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, write to the Free Software
18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
26 #define TARGET_LONG_BITS 64
28 #define TARGET_LONG_BITS 32
31 /* target supports implicit self modifying code */
32 #define TARGET_HAS_SMC
33 /* support for self modifying code even if the modified instruction is
34 close to the modifying instruction */
35 #define TARGET_HAS_PRECISE_SMC
37 #define TARGET_HAS_ICE 1
40 #define ELF_MACHINE EM_X86_64
42 #define ELF_MACHINE EM_386
47 #include "softfloat.h"
49 #if defined(__i386__) && !defined(CONFIG_SOFTMMU) && !defined(__APPLE__)
78 /* segment descriptor fields */
79 #define DESC_G_MASK (1 << 23)
80 #define DESC_B_SHIFT 22
81 #define DESC_B_MASK (1 << DESC_B_SHIFT)
82 #define DESC_L_SHIFT 21 /* x86_64 only : 64 bit code segment */
83 #define DESC_L_MASK (1 << DESC_L_SHIFT)
84 #define DESC_AVL_MASK (1 << 20)
85 #define DESC_P_MASK (1 << 15)
86 #define DESC_DPL_SHIFT 13
87 #define DESC_S_MASK (1 << 12)
88 #define DESC_TYPE_SHIFT 8
89 #define DESC_A_MASK (1 << 8)
91 #define DESC_CS_MASK (1 << 11) /* 1=code segment 0=data segment */
92 #define DESC_C_MASK (1 << 10) /* code: conforming */
93 #define DESC_R_MASK (1 << 9) /* code: readable */
95 #define DESC_E_MASK (1 << 10) /* data: expansion direction */
96 #define DESC_W_MASK (1 << 9) /* data: writable */
98 #define DESC_TSS_BUSY_MASK (1 << 9)
109 #define IOPL_SHIFT 12
112 #define TF_MASK 0x00000100
113 #define IF_MASK 0x00000200
114 #define DF_MASK 0x00000400
115 #define IOPL_MASK 0x00003000
116 #define NT_MASK 0x00004000
117 #define RF_MASK 0x00010000
118 #define VM_MASK 0x00020000
119 #define AC_MASK 0x00040000
120 #define VIF_MASK 0x00080000
121 #define VIP_MASK 0x00100000
122 #define ID_MASK 0x00200000
124 /* hidden flags - used internally by qemu to represent additionnal cpu
125 states. Only the CPL, INHIBIT_IRQ and HALTED are not redundant. We avoid
126 using the IOPL_MASK, TF_MASK and VM_MASK bit position to ease oring
129 #define HF_CPL_SHIFT 0
130 /* true if soft mmu is being used */
131 #define HF_SOFTMMU_SHIFT 2
132 /* true if hardware interrupts must be disabled for next instruction */
133 #define HF_INHIBIT_IRQ_SHIFT 3
134 /* 16 or 32 segments */
135 #define HF_CS32_SHIFT 4
136 #define HF_SS32_SHIFT 5
137 /* zero base for DS, ES and SS : can be '0' only in 32 bit CS segment */
138 #define HF_ADDSEG_SHIFT 6
139 /* copy of CR0.PE (protected mode) */
140 #define HF_PE_SHIFT 7
141 #define HF_TF_SHIFT 8 /* must be same as eflags */
142 #define HF_MP_SHIFT 9 /* the order must be MP, EM, TS */
143 #define HF_EM_SHIFT 10
144 #define HF_TS_SHIFT 11
145 #define HF_IOPL_SHIFT 12 /* must be same as eflags */
146 #define HF_LMA_SHIFT 14 /* only used on x86_64: long mode active */
147 #define HF_CS64_SHIFT 15 /* only used on x86_64: 64 bit code segment */
148 #define HF_OSFXSR_SHIFT 16 /* CR4.OSFXSR */
149 #define HF_VM_SHIFT 17 /* must be same as eflags */
150 #define HF_HALTED_SHIFT 18 /* CPU halted */
151 #define HF_SMM_SHIFT 19 /* CPU in SMM mode */
153 #define HF_CPL_MASK (3 << HF_CPL_SHIFT)
154 #define HF_SOFTMMU_MASK (1 << HF_SOFTMMU_SHIFT)
155 #define HF_INHIBIT_IRQ_MASK (1 << HF_INHIBIT_IRQ_SHIFT)
156 #define HF_CS32_MASK (1 << HF_CS32_SHIFT)
157 #define HF_SS32_MASK (1 << HF_SS32_SHIFT)
158 #define HF_ADDSEG_MASK (1 << HF_ADDSEG_SHIFT)
159 #define HF_PE_MASK (1 << HF_PE_SHIFT)
160 #define HF_TF_MASK (1 << HF_TF_SHIFT)
161 #define HF_MP_MASK (1 << HF_MP_SHIFT)
162 #define HF_EM_MASK (1 << HF_EM_SHIFT)
163 #define HF_TS_MASK (1 << HF_TS_SHIFT)
164 #define HF_LMA_MASK (1 << HF_LMA_SHIFT)
165 #define HF_CS64_MASK (1 << HF_CS64_SHIFT)
166 #define HF_OSFXSR_MASK (1 << HF_OSFXSR_SHIFT)
167 #define HF_HALTED_MASK (1 << HF_HALTED_SHIFT)
168 #define HF_SMM_MASK (1 << HF_SMM_SHIFT)
170 #define CR0_PE_MASK (1 << 0)
171 #define CR0_MP_MASK (1 << 1)
172 #define CR0_EM_MASK (1 << 2)
173 #define CR0_TS_MASK (1 << 3)
174 #define CR0_ET_MASK (1 << 4)
175 #define CR0_NE_MASK (1 << 5)
176 #define CR0_WP_MASK (1 << 16)
177 #define CR0_AM_MASK (1 << 18)
178 #define CR0_PG_MASK (1 << 31)
180 #define CR4_VME_MASK (1 << 0)
181 #define CR4_PVI_MASK (1 << 1)
182 #define CR4_TSD_MASK (1 << 2)
183 #define CR4_DE_MASK (1 << 3)
184 #define CR4_PSE_MASK (1 << 4)
185 #define CR4_PAE_MASK (1 << 5)
186 #define CR4_PGE_MASK (1 << 7)
187 #define CR4_PCE_MASK (1 << 8)
188 #define CR4_OSFXSR_MASK (1 << 9)
189 #define CR4_OSXMMEXCPT_MASK (1 << 10)
191 #define PG_PRESENT_BIT 0
193 #define PG_USER_BIT 2
196 #define PG_ACCESSED_BIT 5
197 #define PG_DIRTY_BIT 6
199 #define PG_GLOBAL_BIT 8
202 #define PG_PRESENT_MASK (1 << PG_PRESENT_BIT)
203 #define PG_RW_MASK (1 << PG_RW_BIT)
204 #define PG_USER_MASK (1 << PG_USER_BIT)
205 #define PG_PWT_MASK (1 << PG_PWT_BIT)
206 #define PG_PCD_MASK (1 << PG_PCD_BIT)
207 #define PG_ACCESSED_MASK (1 << PG_ACCESSED_BIT)
208 #define PG_DIRTY_MASK (1 << PG_DIRTY_BIT)
209 #define PG_PSE_MASK (1 << PG_PSE_BIT)
210 #define PG_GLOBAL_MASK (1 << PG_GLOBAL_BIT)
211 #define PG_NX_MASK (1LL << PG_NX_BIT)
213 #define PG_ERROR_W_BIT 1
215 #define PG_ERROR_P_MASK 0x01
216 #define PG_ERROR_W_MASK (1 << PG_ERROR_W_BIT)
217 #define PG_ERROR_U_MASK 0x04
218 #define PG_ERROR_RSVD_MASK 0x08
219 #define PG_ERROR_I_D_MASK 0x10
221 #define MSR_IA32_APICBASE 0x1b
222 #define MSR_IA32_APICBASE_BSP (1<<8)
223 #define MSR_IA32_APICBASE_ENABLE (1<<11)
224 #define MSR_IA32_APICBASE_BASE (0xfffff<<12)
226 #define MSR_IA32_SYSENTER_CS 0x174
227 #define MSR_IA32_SYSENTER_ESP 0x175
228 #define MSR_IA32_SYSENTER_EIP 0x176
230 #define MSR_MCG_CAP 0x179
231 #define MSR_MCG_STATUS 0x17a
232 #define MSR_MCG_CTL 0x17b
234 #define MSR_PAT 0x277
236 #define MSR_EFER 0xc0000080
238 #define MSR_EFER_SCE (1 << 0)
239 #define MSR_EFER_LME (1 << 8)
240 #define MSR_EFER_LMA (1 << 10)
241 #define MSR_EFER_NXE (1 << 11)
242 #define MSR_EFER_FFXSR (1 << 14)
244 #define MSR_STAR 0xc0000081
245 #define MSR_LSTAR 0xc0000082
246 #define MSR_CSTAR 0xc0000083
247 #define MSR_FMASK 0xc0000084
248 #define MSR_FSBASE 0xc0000100
249 #define MSR_GSBASE 0xc0000101
250 #define MSR_KERNELGSBASE 0xc0000102
252 /* cpuid_features bits */
253 #define CPUID_FP87 (1 << 0)
254 #define CPUID_VME (1 << 1)
255 #define CPUID_DE (1 << 2)
256 #define CPUID_PSE (1 << 3)
257 #define CPUID_TSC (1 << 4)
258 #define CPUID_MSR (1 << 5)
259 #define CPUID_PAE (1 << 6)
260 #define CPUID_MCE (1 << 7)
261 #define CPUID_CX8 (1 << 8)
262 #define CPUID_APIC (1 << 9)
263 #define CPUID_SEP (1 << 11) /* sysenter/sysexit */
264 #define CPUID_MTRR (1 << 12)
265 #define CPUID_PGE (1 << 13)
266 #define CPUID_MCA (1 << 14)
267 #define CPUID_CMOV (1 << 15)
268 #define CPUID_PAT (1 << 16)
269 #define CPUID_PSE36 (1 << 17)
270 #define CPUID_CLFLUSH (1 << 19)
272 #define CPUID_MMX (1 << 23)
273 #define CPUID_FXSR (1 << 24)
274 #define CPUID_SSE (1 << 25)
275 #define CPUID_SSE2 (1 << 26)
277 #define CPUID_EXT_SSE3 (1 << 0)
278 #define CPUID_EXT_MONITOR (1 << 3)
279 #define CPUID_EXT_CX16 (1 << 13)
281 #define CPUID_EXT2_SYSCALL (1 << 11)
282 #define CPUID_EXT2_NX (1 << 20)
283 #define CPUID_EXT2_FFXSR (1 << 25)
284 #define CPUID_EXT2_LM (1 << 29)
286 #define EXCP00_DIVZ 0
287 #define EXCP01_SSTP 1
289 #define EXCP03_INT3 3
290 #define EXCP04_INTO 4
291 #define EXCP05_BOUND 5
292 #define EXCP06_ILLOP 6
293 #define EXCP07_PREX 7
294 #define EXCP08_DBLE 8
295 #define EXCP09_XERR 9
296 #define EXCP0A_TSS 10
297 #define EXCP0B_NOSEG 11
298 #define EXCP0C_STACK 12
299 #define EXCP0D_GPF 13
300 #define EXCP0E_PAGE 14
301 #define EXCP10_COPR 16
302 #define EXCP11_ALGN 17
303 #define EXCP12_MCHK 18
306 CC_OP_DYNAMIC
, /* must use dynamic code to get cc_op */
307 CC_OP_EFLAGS
, /* all cc are explicitely computed, CC_SRC = flags */
309 CC_OP_MULB
, /* modify all flags, C, O = (CC_SRC != 0) */
314 CC_OP_ADDB
, /* modify all flags, CC_DST = res, CC_SRC = src1 */
319 CC_OP_ADCB
, /* modify all flags, CC_DST = res, CC_SRC = src1 */
324 CC_OP_SUBB
, /* modify all flags, CC_DST = res, CC_SRC = src1 */
329 CC_OP_SBBB
, /* modify all flags, CC_DST = res, CC_SRC = src1 */
334 CC_OP_LOGICB
, /* modify all flags, CC_DST = res */
339 CC_OP_INCB
, /* modify all flags except, CC_DST = res, CC_SRC = C */
344 CC_OP_DECB
, /* modify all flags except, CC_DST = res, CC_SRC = C */
349 CC_OP_SHLB
, /* modify all flags, CC_DST = res, CC_SRC.msb = C */
354 CC_OP_SARB
, /* modify all flags, CC_DST = res, CC_SRC.lsb = C */
363 #define USE_X86LDOUBLE
366 #ifdef USE_X86LDOUBLE
367 typedef floatx80 CPU86_LDouble
;
369 typedef float64 CPU86_LDouble
;
372 typedef struct SegmentCache
{
395 #ifdef WORDS_BIGENDIAN
396 #define XMM_B(n) _b[15 - (n)]
397 #define XMM_W(n) _w[7 - (n)]
398 #define XMM_L(n) _l[3 - (n)]
399 #define XMM_S(n) _s[3 - (n)]
400 #define XMM_Q(n) _q[1 - (n)]
401 #define XMM_D(n) _d[1 - (n)]
403 #define MMX_B(n) _b[7 - (n)]
404 #define MMX_W(n) _w[3 - (n)]
405 #define MMX_L(n) _l[1 - (n)]
407 #define XMM_B(n) _b[n]
408 #define XMM_W(n) _w[n]
409 #define XMM_L(n) _l[n]
410 #define XMM_S(n) _s[n]
411 #define XMM_Q(n) _q[n]
412 #define XMM_D(n) _d[n]
414 #define MMX_B(n) _b[n]
415 #define MMX_W(n) _w[n]
416 #define MMX_L(n) _l[n]
421 #define CPU_NB_REGS 16
423 #define CPU_NB_REGS 8
426 typedef struct CPUX86State
{
427 #if TARGET_LONG_BITS > HOST_LONG_BITS
428 /* temporaries if we cannot store them in host registers */
429 target_ulong t0
, t1
, t2
;
432 /* standard registers */
433 target_ulong regs
[CPU_NB_REGS
];
435 target_ulong eflags
; /* eflags register. During CPU emulation, CC
436 flags and DF are set to zero because they are
439 /* emulator internal eflags handling */
443 int32_t df
; /* D flag : 1 if D = 0, -1 if D = 1 */
444 uint32_t hflags
; /* hidden flags, see HF_xxx constants */
447 SegmentCache segs
[6]; /* selector values */
450 SegmentCache gdt
; /* only base and limit are used */
451 SegmentCache idt
; /* only base and limit are used */
453 target_ulong cr
[5]; /* NOTE: cr1 is unused */
457 unsigned int fpstt
; /* top of stack index */
460 uint8_t fptags
[8]; /* 0 = valid, 1 = empty */
462 #ifdef USE_X86LDOUBLE
463 CPU86_LDouble d
__attribute__((aligned(16)));
470 /* emulator internal variables */
471 float_status fp_status
;
480 float_status sse_status
;
482 XMMReg xmm_regs
[CPU_NB_REGS
];
486 /* sysenter registers */
487 uint32_t sysenter_cs
;
488 uint32_t sysenter_esp
;
489 uint32_t sysenter_eip
;
496 target_ulong kernelgsbase
;
501 /* temporary data for USE_CODE_COPY mode */
505 int native_fp_regs
; /* if true, the FPU state is in the native CPU regs */
508 /* exception/interrupt handling */
512 int exception_is_int
;
513 target_ulong exception_next_eip
;
514 target_ulong dr
[8]; /* debug registers */
516 int interrupt_request
;
517 int user_mode_only
; /* user mode only simulation */
521 /* processor features (e.g. for CPUID insn) */
522 uint32_t cpuid_level
;
523 uint32_t cpuid_vendor1
;
524 uint32_t cpuid_vendor2
;
525 uint32_t cpuid_vendor3
;
526 uint32_t cpuid_version
;
527 uint32_t cpuid_features
;
528 uint32_t cpuid_ext_features
;
529 uint32_t cpuid_xlevel
;
530 uint32_t cpuid_model
[12];
531 uint32_t cpuid_ext2_features
;
537 /* in order to simplify APIC support, we leave this pointer to the
539 struct APICState
*apic_state
;
542 CPUX86State
*cpu_x86_init(void);
543 int cpu_x86_exec(CPUX86State
*s
);
544 void cpu_x86_close(CPUX86State
*s
);
545 int cpu_get_pic_interrupt(CPUX86State
*s
);
546 /* MSDOS compatibility mode FPU exception support */
547 void cpu_set_ferr(CPUX86State
*s
);
549 /* this function must always be used to load data in the segment
550 cache: it synchronizes the hflags with the segment cache values */
551 static inline void cpu_x86_load_seg_cache(CPUX86State
*env
,
552 int seg_reg
, unsigned int selector
,
558 unsigned int new_hflags
;
560 sc
= &env
->segs
[seg_reg
];
561 sc
->selector
= selector
;
566 /* update the hidden flags */
568 if (seg_reg
== R_CS
) {
570 if ((env
->hflags
& HF_LMA_MASK
) && (flags
& DESC_L_MASK
)) {
572 env
->hflags
|= HF_CS32_MASK
| HF_SS32_MASK
| HF_CS64_MASK
;
573 env
->hflags
&= ~(HF_ADDSEG_MASK
);
577 /* legacy / compatibility case */
578 new_hflags
= (env
->segs
[R_CS
].flags
& DESC_B_MASK
)
579 >> (DESC_B_SHIFT
- HF_CS32_SHIFT
);
580 env
->hflags
= (env
->hflags
& ~(HF_CS32_MASK
| HF_CS64_MASK
)) |
584 new_hflags
= (env
->segs
[R_SS
].flags
& DESC_B_MASK
)
585 >> (DESC_B_SHIFT
- HF_SS32_SHIFT
);
586 if (env
->hflags
& HF_CS64_MASK
) {
587 /* zero base assumed for DS, ES and SS in long mode */
588 } else if (!(env
->cr
[0] & CR0_PE_MASK
) ||
589 (env
->eflags
& VM_MASK
) ||
590 !(env
->hflags
& HF_CS32_MASK
)) {
591 /* XXX: try to avoid this test. The problem comes from the
592 fact that is real mode or vm86 mode we only modify the
593 'base' and 'selector' fields of the segment cache to go
594 faster. A solution may be to force addseg to one in
596 new_hflags
|= HF_ADDSEG_MASK
;
598 new_hflags
|= ((env
->segs
[R_DS
].base
|
599 env
->segs
[R_ES
].base
|
600 env
->segs
[R_SS
].base
) != 0) <<
603 env
->hflags
= (env
->hflags
&
604 ~(HF_SS32_MASK
| HF_ADDSEG_MASK
)) | new_hflags
;
608 /* wrapper, just in case memory mappings must be changed */
609 static inline void cpu_x86_set_cpl(CPUX86State
*s
, int cpl
)
612 s
->hflags
= (s
->hflags
& ~HF_CPL_MASK
) | cpl
;
614 #error HF_CPL_MASK is hardcoded
618 /* used for debug or cpu save/restore */
619 void cpu_get_fp80(uint64_t *pmant
, uint16_t *pexp
, CPU86_LDouble f
);
620 CPU86_LDouble
cpu_set_fp80(uint64_t mant
, uint16_t upper
);
622 /* the following helpers are only usable in user mode simulation as
623 they can trigger unexpected exceptions */
624 void cpu_x86_load_seg(CPUX86State
*s
, int seg_reg
, int selector
);
625 void cpu_x86_fsave(CPUX86State
*s
, uint8_t *ptr
, int data32
);
626 void cpu_x86_frstor(CPUX86State
*s
, uint8_t *ptr
, int data32
);
628 /* you can call this signal handler from your SIGBUS and SIGSEGV
629 signal handlers to inform the virtual CPU of exceptions. non zero
630 is returned if the signal was handled by the virtual CPU. */
631 int cpu_x86_signal_handler(int host_signum
, void *pinfo
,
633 void cpu_x86_set_a20(CPUX86State
*env
, int a20_state
);
635 uint64_t cpu_get_tsc(CPUX86State
*env
);
637 void cpu_set_apic_base(CPUX86State
*env
, uint64_t val
);
638 uint64_t cpu_get_apic_base(CPUX86State
*env
);
639 void cpu_set_apic_tpr(CPUX86State
*env
, uint8_t val
);
640 #ifndef NO_CPU_IO_DEFS
641 uint8_t cpu_get_apic_tpr(CPUX86State
*env
);
643 void cpu_smm_update(CPUX86State
*env
);
645 /* will be suppressed */
646 void cpu_x86_update_cr0(CPUX86State
*env
, uint32_t new_cr0
);
649 #define X86_DUMP_FPU 0x0001 /* dump FPU state too */
650 #define X86_DUMP_CCOP 0x0002 /* dump qemu flag cache */
653 static inline int cpu_get_time_fast(void)
656 asm volatile("rdtsc" : "=a" (low
), "=d" (high
));
661 #define TARGET_PAGE_BITS 12
664 #endif /* CPU_I386_H */