virtagent: Makefile fixups
[qemu/mdroth.git] / target-i386 / helper.c
blob26ea1e58e08fb580f52a1f0b6516673b11aea2d1
1 /*
2 * i386 helpers (without register variable usage)
4 * Copyright (c) 2003 Fabrice Bellard
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
19 #include <stdarg.h>
20 #include <stdlib.h>
21 #include <stdio.h>
22 #include <string.h>
23 #include <inttypes.h>
24 #include <signal.h>
26 #include "cpu.h"
27 #include "exec-all.h"
28 #include "qemu-common.h"
29 #include "kvm.h"
30 #include "kvm_x86.h"
32 //#define DEBUG_MMU
34 /* NOTE: must be called outside the CPU execute loop */
35 void cpu_reset(CPUX86State *env)
37 int i;
39 if (qemu_loglevel_mask(CPU_LOG_RESET)) {
40 qemu_log("CPU Reset (CPU %d)\n", env->cpu_index);
41 log_cpu_state(env, X86_DUMP_FPU | X86_DUMP_CCOP);
44 memset(env, 0, offsetof(CPUX86State, breakpoints));
46 tlb_flush(env, 1);
48 env->old_exception = -1;
50 /* init to reset state */
52 #ifdef CONFIG_SOFTMMU
53 env->hflags |= HF_SOFTMMU_MASK;
54 #endif
55 env->hflags2 |= HF2_GIF_MASK;
57 cpu_x86_update_cr0(env, 0x60000010);
58 env->a20_mask = ~0x0;
59 env->smbase = 0x30000;
61 env->idt.limit = 0xffff;
62 env->gdt.limit = 0xffff;
63 env->ldt.limit = 0xffff;
64 env->ldt.flags = DESC_P_MASK | (2 << DESC_TYPE_SHIFT);
65 env->tr.limit = 0xffff;
66 env->tr.flags = DESC_P_MASK | (11 << DESC_TYPE_SHIFT);
68 cpu_x86_load_seg_cache(env, R_CS, 0xf000, 0xffff0000, 0xffff,
69 DESC_P_MASK | DESC_S_MASK | DESC_CS_MASK |
70 DESC_R_MASK | DESC_A_MASK);
71 cpu_x86_load_seg_cache(env, R_DS, 0, 0, 0xffff,
72 DESC_P_MASK | DESC_S_MASK | DESC_W_MASK |
73 DESC_A_MASK);
74 cpu_x86_load_seg_cache(env, R_ES, 0, 0, 0xffff,
75 DESC_P_MASK | DESC_S_MASK | DESC_W_MASK |
76 DESC_A_MASK);
77 cpu_x86_load_seg_cache(env, R_SS, 0, 0, 0xffff,
78 DESC_P_MASK | DESC_S_MASK | DESC_W_MASK |
79 DESC_A_MASK);
80 cpu_x86_load_seg_cache(env, R_FS, 0, 0, 0xffff,
81 DESC_P_MASK | DESC_S_MASK | DESC_W_MASK |
82 DESC_A_MASK);
83 cpu_x86_load_seg_cache(env, R_GS, 0, 0, 0xffff,
84 DESC_P_MASK | DESC_S_MASK | DESC_W_MASK |
85 DESC_A_MASK);
87 env->eip = 0xfff0;
88 env->regs[R_EDX] = env->cpuid_version;
90 env->eflags = 0x2;
92 /* FPU init */
93 for(i = 0;i < 8; i++)
94 env->fptags[i] = 1;
95 env->fpuc = 0x37f;
97 env->mxcsr = 0x1f80;
99 memset(env->dr, 0, sizeof(env->dr));
100 env->dr[6] = DR6_FIXED_1;
101 env->dr[7] = DR7_FIXED_1;
102 cpu_breakpoint_remove_all(env, BP_CPU);
103 cpu_watchpoint_remove_all(env, BP_CPU);
105 env->mcg_status = 0;
108 void cpu_x86_close(CPUX86State *env)
110 qemu_free(env);
113 /***********************************************************/
114 /* x86 debug */
116 static const char *cc_op_str[] = {
117 "DYNAMIC",
118 "EFLAGS",
120 "MULB",
121 "MULW",
122 "MULL",
123 "MULQ",
125 "ADDB",
126 "ADDW",
127 "ADDL",
128 "ADDQ",
130 "ADCB",
131 "ADCW",
132 "ADCL",
133 "ADCQ",
135 "SUBB",
136 "SUBW",
137 "SUBL",
138 "SUBQ",
140 "SBBB",
141 "SBBW",
142 "SBBL",
143 "SBBQ",
145 "LOGICB",
146 "LOGICW",
147 "LOGICL",
148 "LOGICQ",
150 "INCB",
151 "INCW",
152 "INCL",
153 "INCQ",
155 "DECB",
156 "DECW",
157 "DECL",
158 "DECQ",
160 "SHLB",
161 "SHLW",
162 "SHLL",
163 "SHLQ",
165 "SARB",
166 "SARW",
167 "SARL",
168 "SARQ",
171 static void
172 cpu_x86_dump_seg_cache(CPUState *env, FILE *f, fprintf_function cpu_fprintf,
173 const char *name, struct SegmentCache *sc)
175 #ifdef TARGET_X86_64
176 if (env->hflags & HF_CS64_MASK) {
177 cpu_fprintf(f, "%-3s=%04x %016" PRIx64 " %08x %08x", name,
178 sc->selector, sc->base, sc->limit, sc->flags);
179 } else
180 #endif
182 cpu_fprintf(f, "%-3s=%04x %08x %08x %08x", name, sc->selector,
183 (uint32_t)sc->base, sc->limit, sc->flags);
186 if (!(env->hflags & HF_PE_MASK) || !(sc->flags & DESC_P_MASK))
187 goto done;
189 cpu_fprintf(f, " DPL=%d ", (sc->flags & DESC_DPL_MASK) >> DESC_DPL_SHIFT);
190 if (sc->flags & DESC_S_MASK) {
191 if (sc->flags & DESC_CS_MASK) {
192 cpu_fprintf(f, (sc->flags & DESC_L_MASK) ? "CS64" :
193 ((sc->flags & DESC_B_MASK) ? "CS32" : "CS16"));
194 cpu_fprintf(f, " [%c%c", (sc->flags & DESC_C_MASK) ? 'C' : '-',
195 (sc->flags & DESC_R_MASK) ? 'R' : '-');
196 } else {
197 cpu_fprintf(f, (sc->flags & DESC_B_MASK) ? "DS " : "DS16");
198 cpu_fprintf(f, " [%c%c", (sc->flags & DESC_E_MASK) ? 'E' : '-',
199 (sc->flags & DESC_W_MASK) ? 'W' : '-');
201 cpu_fprintf(f, "%c]", (sc->flags & DESC_A_MASK) ? 'A' : '-');
202 } else {
203 static const char *sys_type_name[2][16] = {
204 { /* 32 bit mode */
205 "Reserved", "TSS16-avl", "LDT", "TSS16-busy",
206 "CallGate16", "TaskGate", "IntGate16", "TrapGate16",
207 "Reserved", "TSS32-avl", "Reserved", "TSS32-busy",
208 "CallGate32", "Reserved", "IntGate32", "TrapGate32"
210 { /* 64 bit mode */
211 "<hiword>", "Reserved", "LDT", "Reserved", "Reserved",
212 "Reserved", "Reserved", "Reserved", "Reserved",
213 "TSS64-avl", "Reserved", "TSS64-busy", "CallGate64",
214 "Reserved", "IntGate64", "TrapGate64"
217 cpu_fprintf(f, "%s",
218 sys_type_name[(env->hflags & HF_LMA_MASK) ? 1 : 0]
219 [(sc->flags & DESC_TYPE_MASK)
220 >> DESC_TYPE_SHIFT]);
222 done:
223 cpu_fprintf(f, "\n");
226 void cpu_dump_state(CPUState *env, FILE *f, fprintf_function cpu_fprintf,
227 int flags)
229 int eflags, i, nb;
230 char cc_op_name[32];
231 static const char *seg_name[6] = { "ES", "CS", "SS", "DS", "FS", "GS" };
233 cpu_synchronize_state(env);
235 eflags = env->eflags;
236 #ifdef TARGET_X86_64
237 if (env->hflags & HF_CS64_MASK) {
238 cpu_fprintf(f,
239 "RAX=%016" PRIx64 " RBX=%016" PRIx64 " RCX=%016" PRIx64 " RDX=%016" PRIx64 "\n"
240 "RSI=%016" PRIx64 " RDI=%016" PRIx64 " RBP=%016" PRIx64 " RSP=%016" PRIx64 "\n"
241 "R8 =%016" PRIx64 " R9 =%016" PRIx64 " R10=%016" PRIx64 " R11=%016" PRIx64 "\n"
242 "R12=%016" PRIx64 " R13=%016" PRIx64 " R14=%016" PRIx64 " R15=%016" PRIx64 "\n"
243 "RIP=%016" PRIx64 " RFL=%08x [%c%c%c%c%c%c%c] CPL=%d II=%d A20=%d SMM=%d HLT=%d\n",
244 env->regs[R_EAX],
245 env->regs[R_EBX],
246 env->regs[R_ECX],
247 env->regs[R_EDX],
248 env->regs[R_ESI],
249 env->regs[R_EDI],
250 env->regs[R_EBP],
251 env->regs[R_ESP],
252 env->regs[8],
253 env->regs[9],
254 env->regs[10],
255 env->regs[11],
256 env->regs[12],
257 env->regs[13],
258 env->regs[14],
259 env->regs[15],
260 env->eip, eflags,
261 eflags & DF_MASK ? 'D' : '-',
262 eflags & CC_O ? 'O' : '-',
263 eflags & CC_S ? 'S' : '-',
264 eflags & CC_Z ? 'Z' : '-',
265 eflags & CC_A ? 'A' : '-',
266 eflags & CC_P ? 'P' : '-',
267 eflags & CC_C ? 'C' : '-',
268 env->hflags & HF_CPL_MASK,
269 (env->hflags >> HF_INHIBIT_IRQ_SHIFT) & 1,
270 (env->a20_mask >> 20) & 1,
271 (env->hflags >> HF_SMM_SHIFT) & 1,
272 env->halted);
273 } else
274 #endif
276 cpu_fprintf(f, "EAX=%08x EBX=%08x ECX=%08x EDX=%08x\n"
277 "ESI=%08x EDI=%08x EBP=%08x ESP=%08x\n"
278 "EIP=%08x EFL=%08x [%c%c%c%c%c%c%c] CPL=%d II=%d A20=%d SMM=%d HLT=%d\n",
279 (uint32_t)env->regs[R_EAX],
280 (uint32_t)env->regs[R_EBX],
281 (uint32_t)env->regs[R_ECX],
282 (uint32_t)env->regs[R_EDX],
283 (uint32_t)env->regs[R_ESI],
284 (uint32_t)env->regs[R_EDI],
285 (uint32_t)env->regs[R_EBP],
286 (uint32_t)env->regs[R_ESP],
287 (uint32_t)env->eip, eflags,
288 eflags & DF_MASK ? 'D' : '-',
289 eflags & CC_O ? 'O' : '-',
290 eflags & CC_S ? 'S' : '-',
291 eflags & CC_Z ? 'Z' : '-',
292 eflags & CC_A ? 'A' : '-',
293 eflags & CC_P ? 'P' : '-',
294 eflags & CC_C ? 'C' : '-',
295 env->hflags & HF_CPL_MASK,
296 (env->hflags >> HF_INHIBIT_IRQ_SHIFT) & 1,
297 (env->a20_mask >> 20) & 1,
298 (env->hflags >> HF_SMM_SHIFT) & 1,
299 env->halted);
302 for(i = 0; i < 6; i++) {
303 cpu_x86_dump_seg_cache(env, f, cpu_fprintf, seg_name[i],
304 &env->segs[i]);
306 cpu_x86_dump_seg_cache(env, f, cpu_fprintf, "LDT", &env->ldt);
307 cpu_x86_dump_seg_cache(env, f, cpu_fprintf, "TR", &env->tr);
309 #ifdef TARGET_X86_64
310 if (env->hflags & HF_LMA_MASK) {
311 cpu_fprintf(f, "GDT= %016" PRIx64 " %08x\n",
312 env->gdt.base, env->gdt.limit);
313 cpu_fprintf(f, "IDT= %016" PRIx64 " %08x\n",
314 env->idt.base, env->idt.limit);
315 cpu_fprintf(f, "CR0=%08x CR2=%016" PRIx64 " CR3=%016" PRIx64 " CR4=%08x\n",
316 (uint32_t)env->cr[0],
317 env->cr[2],
318 env->cr[3],
319 (uint32_t)env->cr[4]);
320 for(i = 0; i < 4; i++)
321 cpu_fprintf(f, "DR%d=%016" PRIx64 " ", i, env->dr[i]);
322 cpu_fprintf(f, "\nDR6=%016" PRIx64 " DR7=%016" PRIx64 "\n",
323 env->dr[6], env->dr[7]);
324 } else
325 #endif
327 cpu_fprintf(f, "GDT= %08x %08x\n",
328 (uint32_t)env->gdt.base, env->gdt.limit);
329 cpu_fprintf(f, "IDT= %08x %08x\n",
330 (uint32_t)env->idt.base, env->idt.limit);
331 cpu_fprintf(f, "CR0=%08x CR2=%08x CR3=%08x CR4=%08x\n",
332 (uint32_t)env->cr[0],
333 (uint32_t)env->cr[2],
334 (uint32_t)env->cr[3],
335 (uint32_t)env->cr[4]);
336 for(i = 0; i < 4; i++) {
337 cpu_fprintf(f, "DR%d=" TARGET_FMT_lx " ", i, env->dr[i]);
339 cpu_fprintf(f, "\nDR6=" TARGET_FMT_lx " DR7=" TARGET_FMT_lx "\n",
340 env->dr[6], env->dr[7]);
342 if (flags & X86_DUMP_CCOP) {
343 if ((unsigned)env->cc_op < CC_OP_NB)
344 snprintf(cc_op_name, sizeof(cc_op_name), "%s", cc_op_str[env->cc_op]);
345 else
346 snprintf(cc_op_name, sizeof(cc_op_name), "[%d]", env->cc_op);
347 #ifdef TARGET_X86_64
348 if (env->hflags & HF_CS64_MASK) {
349 cpu_fprintf(f, "CCS=%016" PRIx64 " CCD=%016" PRIx64 " CCO=%-8s\n",
350 env->cc_src, env->cc_dst,
351 cc_op_name);
352 } else
353 #endif
355 cpu_fprintf(f, "CCS=%08x CCD=%08x CCO=%-8s\n",
356 (uint32_t)env->cc_src, (uint32_t)env->cc_dst,
357 cc_op_name);
360 cpu_fprintf(f, "EFER=%016" PRIx64 "\n", env->efer);
361 if (flags & X86_DUMP_FPU) {
362 int fptag;
363 fptag = 0;
364 for(i = 0; i < 8; i++) {
365 fptag |= ((!env->fptags[i]) << i);
367 cpu_fprintf(f, "FCW=%04x FSW=%04x [ST=%d] FTW=%02x MXCSR=%08x\n",
368 env->fpuc,
369 (env->fpus & ~0x3800) | (env->fpstt & 0x7) << 11,
370 env->fpstt,
371 fptag,
372 env->mxcsr);
373 for(i=0;i<8;i++) {
374 #if defined(USE_X86LDOUBLE)
375 union {
376 long double d;
377 struct {
378 uint64_t lower;
379 uint16_t upper;
380 } l;
381 } tmp;
382 tmp.d = env->fpregs[i].d;
383 cpu_fprintf(f, "FPR%d=%016" PRIx64 " %04x",
384 i, tmp.l.lower, tmp.l.upper);
385 #else
386 cpu_fprintf(f, "FPR%d=%016" PRIx64,
387 i, env->fpregs[i].mmx.q);
388 #endif
389 if ((i & 1) == 1)
390 cpu_fprintf(f, "\n");
391 else
392 cpu_fprintf(f, " ");
394 if (env->hflags & HF_CS64_MASK)
395 nb = 16;
396 else
397 nb = 8;
398 for(i=0;i<nb;i++) {
399 cpu_fprintf(f, "XMM%02d=%08x%08x%08x%08x",
401 env->xmm_regs[i].XMM_L(3),
402 env->xmm_regs[i].XMM_L(2),
403 env->xmm_regs[i].XMM_L(1),
404 env->xmm_regs[i].XMM_L(0));
405 if ((i & 1) == 1)
406 cpu_fprintf(f, "\n");
407 else
408 cpu_fprintf(f, " ");
413 /***********************************************************/
414 /* x86 mmu */
415 /* XXX: add PGE support */
417 void cpu_x86_set_a20(CPUX86State *env, int a20_state)
419 a20_state = (a20_state != 0);
420 if (a20_state != ((env->a20_mask >> 20) & 1)) {
421 #if defined(DEBUG_MMU)
422 printf("A20 update: a20=%d\n", a20_state);
423 #endif
424 /* if the cpu is currently executing code, we must unlink it and
425 all the potentially executing TB */
426 cpu_interrupt(env, CPU_INTERRUPT_EXITTB);
428 /* when a20 is changed, all the MMU mappings are invalid, so
429 we must flush everything */
430 tlb_flush(env, 1);
431 env->a20_mask = ~(1 << 20) | (a20_state << 20);
435 void cpu_x86_update_cr0(CPUX86State *env, uint32_t new_cr0)
437 int pe_state;
439 #if defined(DEBUG_MMU)
440 printf("CR0 update: CR0=0x%08x\n", new_cr0);
441 #endif
442 if ((new_cr0 & (CR0_PG_MASK | CR0_WP_MASK | CR0_PE_MASK)) !=
443 (env->cr[0] & (CR0_PG_MASK | CR0_WP_MASK | CR0_PE_MASK))) {
444 tlb_flush(env, 1);
447 #ifdef TARGET_X86_64
448 if (!(env->cr[0] & CR0_PG_MASK) && (new_cr0 & CR0_PG_MASK) &&
449 (env->efer & MSR_EFER_LME)) {
450 /* enter in long mode */
451 /* XXX: generate an exception */
452 if (!(env->cr[4] & CR4_PAE_MASK))
453 return;
454 env->efer |= MSR_EFER_LMA;
455 env->hflags |= HF_LMA_MASK;
456 } else if ((env->cr[0] & CR0_PG_MASK) && !(new_cr0 & CR0_PG_MASK) &&
457 (env->efer & MSR_EFER_LMA)) {
458 /* exit long mode */
459 env->efer &= ~MSR_EFER_LMA;
460 env->hflags &= ~(HF_LMA_MASK | HF_CS64_MASK);
461 env->eip &= 0xffffffff;
463 #endif
464 env->cr[0] = new_cr0 | CR0_ET_MASK;
466 /* update PE flag in hidden flags */
467 pe_state = (env->cr[0] & CR0_PE_MASK);
468 env->hflags = (env->hflags & ~HF_PE_MASK) | (pe_state << HF_PE_SHIFT);
469 /* ensure that ADDSEG is always set in real mode */
470 env->hflags |= ((pe_state ^ 1) << HF_ADDSEG_SHIFT);
471 /* update FPU flags */
472 env->hflags = (env->hflags & ~(HF_MP_MASK | HF_EM_MASK | HF_TS_MASK)) |
473 ((new_cr0 << (HF_MP_SHIFT - 1)) & (HF_MP_MASK | HF_EM_MASK | HF_TS_MASK));
476 /* XXX: in legacy PAE mode, generate a GPF if reserved bits are set in
477 the PDPT */
478 void cpu_x86_update_cr3(CPUX86State *env, target_ulong new_cr3)
480 env->cr[3] = new_cr3;
481 if (env->cr[0] & CR0_PG_MASK) {
482 #if defined(DEBUG_MMU)
483 printf("CR3 update: CR3=" TARGET_FMT_lx "\n", new_cr3);
484 #endif
485 tlb_flush(env, 0);
489 void cpu_x86_update_cr4(CPUX86State *env, uint32_t new_cr4)
491 #if defined(DEBUG_MMU)
492 printf("CR4 update: CR4=%08x\n", (uint32_t)env->cr[4]);
493 #endif
494 if ((new_cr4 & (CR4_PGE_MASK | CR4_PAE_MASK | CR4_PSE_MASK)) !=
495 (env->cr[4] & (CR4_PGE_MASK | CR4_PAE_MASK | CR4_PSE_MASK))) {
496 tlb_flush(env, 1);
498 /* SSE handling */
499 if (!(env->cpuid_features & CPUID_SSE))
500 new_cr4 &= ~CR4_OSFXSR_MASK;
501 if (new_cr4 & CR4_OSFXSR_MASK)
502 env->hflags |= HF_OSFXSR_MASK;
503 else
504 env->hflags &= ~HF_OSFXSR_MASK;
506 env->cr[4] = new_cr4;
509 #if defined(CONFIG_USER_ONLY)
511 int cpu_x86_handle_mmu_fault(CPUX86State *env, target_ulong addr,
512 int is_write, int mmu_idx, int is_softmmu)
514 /* user mode only emulation */
515 is_write &= 1;
516 env->cr[2] = addr;
517 env->error_code = (is_write << PG_ERROR_W_BIT);
518 env->error_code |= PG_ERROR_U_MASK;
519 env->exception_index = EXCP0E_PAGE;
520 return 1;
523 #else
525 /* XXX: This value should match the one returned by CPUID
526 * and in exec.c */
527 # if defined(TARGET_X86_64)
528 # define PHYS_ADDR_MASK 0xfffffff000LL
529 # else
530 # define PHYS_ADDR_MASK 0xffffff000LL
531 # endif
533 /* return value:
534 -1 = cannot handle fault
535 0 = nothing more to do
536 1 = generate PF fault
538 int cpu_x86_handle_mmu_fault(CPUX86State *env, target_ulong addr,
539 int is_write1, int mmu_idx, int is_softmmu)
541 uint64_t ptep, pte;
542 target_ulong pde_addr, pte_addr;
543 int error_code, is_dirty, prot, page_size, is_write, is_user;
544 target_phys_addr_t paddr;
545 uint32_t page_offset;
546 target_ulong vaddr, virt_addr;
548 is_user = mmu_idx == MMU_USER_IDX;
549 #if defined(DEBUG_MMU)
550 printf("MMU fault: addr=" TARGET_FMT_lx " w=%d u=%d eip=" TARGET_FMT_lx "\n",
551 addr, is_write1, is_user, env->eip);
552 #endif
553 is_write = is_write1 & 1;
555 if (!(env->cr[0] & CR0_PG_MASK)) {
556 pte = addr;
557 virt_addr = addr & TARGET_PAGE_MASK;
558 prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC;
559 page_size = 4096;
560 goto do_mapping;
563 if (env->cr[4] & CR4_PAE_MASK) {
564 uint64_t pde, pdpe;
565 target_ulong pdpe_addr;
567 #ifdef TARGET_X86_64
568 if (env->hflags & HF_LMA_MASK) {
569 uint64_t pml4e_addr, pml4e;
570 int32_t sext;
572 /* test virtual address sign extension */
573 sext = (int64_t)addr >> 47;
574 if (sext != 0 && sext != -1) {
575 env->error_code = 0;
576 env->exception_index = EXCP0D_GPF;
577 return 1;
580 pml4e_addr = ((env->cr[3] & ~0xfff) + (((addr >> 39) & 0x1ff) << 3)) &
581 env->a20_mask;
582 pml4e = ldq_phys(pml4e_addr);
583 if (!(pml4e & PG_PRESENT_MASK)) {
584 error_code = 0;
585 goto do_fault;
587 if (!(env->efer & MSR_EFER_NXE) && (pml4e & PG_NX_MASK)) {
588 error_code = PG_ERROR_RSVD_MASK;
589 goto do_fault;
591 if (!(pml4e & PG_ACCESSED_MASK)) {
592 pml4e |= PG_ACCESSED_MASK;
593 stl_phys_notdirty(pml4e_addr, pml4e);
595 ptep = pml4e ^ PG_NX_MASK;
596 pdpe_addr = ((pml4e & PHYS_ADDR_MASK) + (((addr >> 30) & 0x1ff) << 3)) &
597 env->a20_mask;
598 pdpe = ldq_phys(pdpe_addr);
599 if (!(pdpe & PG_PRESENT_MASK)) {
600 error_code = 0;
601 goto do_fault;
603 if (!(env->efer & MSR_EFER_NXE) && (pdpe & PG_NX_MASK)) {
604 error_code = PG_ERROR_RSVD_MASK;
605 goto do_fault;
607 ptep &= pdpe ^ PG_NX_MASK;
608 if (!(pdpe & PG_ACCESSED_MASK)) {
609 pdpe |= PG_ACCESSED_MASK;
610 stl_phys_notdirty(pdpe_addr, pdpe);
612 } else
613 #endif
615 /* XXX: load them when cr3 is loaded ? */
616 pdpe_addr = ((env->cr[3] & ~0x1f) + ((addr >> 27) & 0x18)) &
617 env->a20_mask;
618 pdpe = ldq_phys(pdpe_addr);
619 if (!(pdpe & PG_PRESENT_MASK)) {
620 error_code = 0;
621 goto do_fault;
623 ptep = PG_NX_MASK | PG_USER_MASK | PG_RW_MASK;
626 pde_addr = ((pdpe & PHYS_ADDR_MASK) + (((addr >> 21) & 0x1ff) << 3)) &
627 env->a20_mask;
628 pde = ldq_phys(pde_addr);
629 if (!(pde & PG_PRESENT_MASK)) {
630 error_code = 0;
631 goto do_fault;
633 if (!(env->efer & MSR_EFER_NXE) && (pde & PG_NX_MASK)) {
634 error_code = PG_ERROR_RSVD_MASK;
635 goto do_fault;
637 ptep &= pde ^ PG_NX_MASK;
638 if (pde & PG_PSE_MASK) {
639 /* 2 MB page */
640 page_size = 2048 * 1024;
641 ptep ^= PG_NX_MASK;
642 if ((ptep & PG_NX_MASK) && is_write1 == 2)
643 goto do_fault_protect;
644 if (is_user) {
645 if (!(ptep & PG_USER_MASK))
646 goto do_fault_protect;
647 if (is_write && !(ptep & PG_RW_MASK))
648 goto do_fault_protect;
649 } else {
650 if ((env->cr[0] & CR0_WP_MASK) &&
651 is_write && !(ptep & PG_RW_MASK))
652 goto do_fault_protect;
654 is_dirty = is_write && !(pde & PG_DIRTY_MASK);
655 if (!(pde & PG_ACCESSED_MASK) || is_dirty) {
656 pde |= PG_ACCESSED_MASK;
657 if (is_dirty)
658 pde |= PG_DIRTY_MASK;
659 stl_phys_notdirty(pde_addr, pde);
661 /* align to page_size */
662 pte = pde & ((PHYS_ADDR_MASK & ~(page_size - 1)) | 0xfff);
663 virt_addr = addr & ~(page_size - 1);
664 } else {
665 /* 4 KB page */
666 if (!(pde & PG_ACCESSED_MASK)) {
667 pde |= PG_ACCESSED_MASK;
668 stl_phys_notdirty(pde_addr, pde);
670 pte_addr = ((pde & PHYS_ADDR_MASK) + (((addr >> 12) & 0x1ff) << 3)) &
671 env->a20_mask;
672 pte = ldq_phys(pte_addr);
673 if (!(pte & PG_PRESENT_MASK)) {
674 error_code = 0;
675 goto do_fault;
677 if (!(env->efer & MSR_EFER_NXE) && (pte & PG_NX_MASK)) {
678 error_code = PG_ERROR_RSVD_MASK;
679 goto do_fault;
681 /* combine pde and pte nx, user and rw protections */
682 ptep &= pte ^ PG_NX_MASK;
683 ptep ^= PG_NX_MASK;
684 if ((ptep & PG_NX_MASK) && is_write1 == 2)
685 goto do_fault_protect;
686 if (is_user) {
687 if (!(ptep & PG_USER_MASK))
688 goto do_fault_protect;
689 if (is_write && !(ptep & PG_RW_MASK))
690 goto do_fault_protect;
691 } else {
692 if ((env->cr[0] & CR0_WP_MASK) &&
693 is_write && !(ptep & PG_RW_MASK))
694 goto do_fault_protect;
696 is_dirty = is_write && !(pte & PG_DIRTY_MASK);
697 if (!(pte & PG_ACCESSED_MASK) || is_dirty) {
698 pte |= PG_ACCESSED_MASK;
699 if (is_dirty)
700 pte |= PG_DIRTY_MASK;
701 stl_phys_notdirty(pte_addr, pte);
703 page_size = 4096;
704 virt_addr = addr & ~0xfff;
705 pte = pte & (PHYS_ADDR_MASK | 0xfff);
707 } else {
708 uint32_t pde;
710 /* page directory entry */
711 pde_addr = ((env->cr[3] & ~0xfff) + ((addr >> 20) & 0xffc)) &
712 env->a20_mask;
713 pde = ldl_phys(pde_addr);
714 if (!(pde & PG_PRESENT_MASK)) {
715 error_code = 0;
716 goto do_fault;
718 /* if PSE bit is set, then we use a 4MB page */
719 if ((pde & PG_PSE_MASK) && (env->cr[4] & CR4_PSE_MASK)) {
720 page_size = 4096 * 1024;
721 if (is_user) {
722 if (!(pde & PG_USER_MASK))
723 goto do_fault_protect;
724 if (is_write && !(pde & PG_RW_MASK))
725 goto do_fault_protect;
726 } else {
727 if ((env->cr[0] & CR0_WP_MASK) &&
728 is_write && !(pde & PG_RW_MASK))
729 goto do_fault_protect;
731 is_dirty = is_write && !(pde & PG_DIRTY_MASK);
732 if (!(pde & PG_ACCESSED_MASK) || is_dirty) {
733 pde |= PG_ACCESSED_MASK;
734 if (is_dirty)
735 pde |= PG_DIRTY_MASK;
736 stl_phys_notdirty(pde_addr, pde);
739 pte = pde & ~( (page_size - 1) & ~0xfff); /* align to page_size */
740 ptep = pte;
741 virt_addr = addr & ~(page_size - 1);
742 } else {
743 if (!(pde & PG_ACCESSED_MASK)) {
744 pde |= PG_ACCESSED_MASK;
745 stl_phys_notdirty(pde_addr, pde);
748 /* page directory entry */
749 pte_addr = ((pde & ~0xfff) + ((addr >> 10) & 0xffc)) &
750 env->a20_mask;
751 pte = ldl_phys(pte_addr);
752 if (!(pte & PG_PRESENT_MASK)) {
753 error_code = 0;
754 goto do_fault;
756 /* combine pde and pte user and rw protections */
757 ptep = pte & pde;
758 if (is_user) {
759 if (!(ptep & PG_USER_MASK))
760 goto do_fault_protect;
761 if (is_write && !(ptep & PG_RW_MASK))
762 goto do_fault_protect;
763 } else {
764 if ((env->cr[0] & CR0_WP_MASK) &&
765 is_write && !(ptep & PG_RW_MASK))
766 goto do_fault_protect;
768 is_dirty = is_write && !(pte & PG_DIRTY_MASK);
769 if (!(pte & PG_ACCESSED_MASK) || is_dirty) {
770 pte |= PG_ACCESSED_MASK;
771 if (is_dirty)
772 pte |= PG_DIRTY_MASK;
773 stl_phys_notdirty(pte_addr, pte);
775 page_size = 4096;
776 virt_addr = addr & ~0xfff;
779 /* the page can be put in the TLB */
780 prot = PAGE_READ;
781 if (!(ptep & PG_NX_MASK))
782 prot |= PAGE_EXEC;
783 if (pte & PG_DIRTY_MASK) {
784 /* only set write access if already dirty... otherwise wait
785 for dirty access */
786 if (is_user) {
787 if (ptep & PG_RW_MASK)
788 prot |= PAGE_WRITE;
789 } else {
790 if (!(env->cr[0] & CR0_WP_MASK) ||
791 (ptep & PG_RW_MASK))
792 prot |= PAGE_WRITE;
795 do_mapping:
796 pte = pte & env->a20_mask;
798 /* Even if 4MB pages, we map only one 4KB page in the cache to
799 avoid filling it too fast */
800 page_offset = (addr & TARGET_PAGE_MASK) & (page_size - 1);
801 paddr = (pte & TARGET_PAGE_MASK) + page_offset;
802 vaddr = virt_addr + page_offset;
804 tlb_set_page(env, vaddr, paddr, prot, mmu_idx, page_size);
805 return 0;
806 do_fault_protect:
807 error_code = PG_ERROR_P_MASK;
808 do_fault:
809 error_code |= (is_write << PG_ERROR_W_BIT);
810 if (is_user)
811 error_code |= PG_ERROR_U_MASK;
812 if (is_write1 == 2 &&
813 (env->efer & MSR_EFER_NXE) &&
814 (env->cr[4] & CR4_PAE_MASK))
815 error_code |= PG_ERROR_I_D_MASK;
816 if (env->intercept_exceptions & (1 << EXCP0E_PAGE)) {
817 /* cr2 is not modified in case of exceptions */
818 stq_phys(env->vm_vmcb + offsetof(struct vmcb, control.exit_info_2),
819 addr);
820 } else {
821 env->cr[2] = addr;
823 env->error_code = error_code;
824 env->exception_index = EXCP0E_PAGE;
825 return 1;
828 target_phys_addr_t cpu_get_phys_page_debug(CPUState *env, target_ulong addr)
830 target_ulong pde_addr, pte_addr;
831 uint64_t pte;
832 target_phys_addr_t paddr;
833 uint32_t page_offset;
834 int page_size;
836 if (env->cr[4] & CR4_PAE_MASK) {
837 target_ulong pdpe_addr;
838 uint64_t pde, pdpe;
840 #ifdef TARGET_X86_64
841 if (env->hflags & HF_LMA_MASK) {
842 uint64_t pml4e_addr, pml4e;
843 int32_t sext;
845 /* test virtual address sign extension */
846 sext = (int64_t)addr >> 47;
847 if (sext != 0 && sext != -1)
848 return -1;
850 pml4e_addr = ((env->cr[3] & ~0xfff) + (((addr >> 39) & 0x1ff) << 3)) &
851 env->a20_mask;
852 pml4e = ldq_phys(pml4e_addr);
853 if (!(pml4e & PG_PRESENT_MASK))
854 return -1;
856 pdpe_addr = ((pml4e & ~0xfff) + (((addr >> 30) & 0x1ff) << 3)) &
857 env->a20_mask;
858 pdpe = ldq_phys(pdpe_addr);
859 if (!(pdpe & PG_PRESENT_MASK))
860 return -1;
861 } else
862 #endif
864 pdpe_addr = ((env->cr[3] & ~0x1f) + ((addr >> 27) & 0x18)) &
865 env->a20_mask;
866 pdpe = ldq_phys(pdpe_addr);
867 if (!(pdpe & PG_PRESENT_MASK))
868 return -1;
871 pde_addr = ((pdpe & ~0xfff) + (((addr >> 21) & 0x1ff) << 3)) &
872 env->a20_mask;
873 pde = ldq_phys(pde_addr);
874 if (!(pde & PG_PRESENT_MASK)) {
875 return -1;
877 if (pde & PG_PSE_MASK) {
878 /* 2 MB page */
879 page_size = 2048 * 1024;
880 pte = pde & ~( (page_size - 1) & ~0xfff); /* align to page_size */
881 } else {
882 /* 4 KB page */
883 pte_addr = ((pde & ~0xfff) + (((addr >> 12) & 0x1ff) << 3)) &
884 env->a20_mask;
885 page_size = 4096;
886 pte = ldq_phys(pte_addr);
888 if (!(pte & PG_PRESENT_MASK))
889 return -1;
890 } else {
891 uint32_t pde;
893 if (!(env->cr[0] & CR0_PG_MASK)) {
894 pte = addr;
895 page_size = 4096;
896 } else {
897 /* page directory entry */
898 pde_addr = ((env->cr[3] & ~0xfff) + ((addr >> 20) & 0xffc)) & env->a20_mask;
899 pde = ldl_phys(pde_addr);
900 if (!(pde & PG_PRESENT_MASK))
901 return -1;
902 if ((pde & PG_PSE_MASK) && (env->cr[4] & CR4_PSE_MASK)) {
903 pte = pde & ~0x003ff000; /* align to 4MB */
904 page_size = 4096 * 1024;
905 } else {
906 /* page directory entry */
907 pte_addr = ((pde & ~0xfff) + ((addr >> 10) & 0xffc)) & env->a20_mask;
908 pte = ldl_phys(pte_addr);
909 if (!(pte & PG_PRESENT_MASK))
910 return -1;
911 page_size = 4096;
914 pte = pte & env->a20_mask;
917 page_offset = (addr & TARGET_PAGE_MASK) & (page_size - 1);
918 paddr = (pte & TARGET_PAGE_MASK) + page_offset;
919 return paddr;
922 void hw_breakpoint_insert(CPUState *env, int index)
924 int type, err = 0;
926 switch (hw_breakpoint_type(env->dr[7], index)) {
927 case 0:
928 if (hw_breakpoint_enabled(env->dr[7], index))
929 err = cpu_breakpoint_insert(env, env->dr[index], BP_CPU,
930 &env->cpu_breakpoint[index]);
931 break;
932 case 1:
933 type = BP_CPU | BP_MEM_WRITE;
934 goto insert_wp;
935 case 2:
936 /* No support for I/O watchpoints yet */
937 break;
938 case 3:
939 type = BP_CPU | BP_MEM_ACCESS;
940 insert_wp:
941 err = cpu_watchpoint_insert(env, env->dr[index],
942 hw_breakpoint_len(env->dr[7], index),
943 type, &env->cpu_watchpoint[index]);
944 break;
946 if (err)
947 env->cpu_breakpoint[index] = NULL;
950 void hw_breakpoint_remove(CPUState *env, int index)
952 if (!env->cpu_breakpoint[index])
953 return;
954 switch (hw_breakpoint_type(env->dr[7], index)) {
955 case 0:
956 if (hw_breakpoint_enabled(env->dr[7], index))
957 cpu_breakpoint_remove_by_ref(env, env->cpu_breakpoint[index]);
958 break;
959 case 1:
960 case 3:
961 cpu_watchpoint_remove_by_ref(env, env->cpu_watchpoint[index]);
962 break;
963 case 2:
964 /* No support for I/O watchpoints yet */
965 break;
969 int check_hw_breakpoints(CPUState *env, int force_dr6_update)
971 target_ulong dr6;
972 int reg, type;
973 int hit_enabled = 0;
975 dr6 = env->dr[6] & ~0xf;
976 for (reg = 0; reg < 4; reg++) {
977 type = hw_breakpoint_type(env->dr[7], reg);
978 if ((type == 0 && env->dr[reg] == env->eip) ||
979 ((type & 1) && env->cpu_watchpoint[reg] &&
980 (env->cpu_watchpoint[reg]->flags & BP_WATCHPOINT_HIT))) {
981 dr6 |= 1 << reg;
982 if (hw_breakpoint_enabled(env->dr[7], reg))
983 hit_enabled = 1;
986 if (hit_enabled || force_dr6_update)
987 env->dr[6] = dr6;
988 return hit_enabled;
991 static CPUDebugExcpHandler *prev_debug_excp_handler;
993 void raise_exception_env(int exception_index, CPUState *env);
995 static void breakpoint_handler(CPUState *env)
997 CPUBreakpoint *bp;
999 if (env->watchpoint_hit) {
1000 if (env->watchpoint_hit->flags & BP_CPU) {
1001 env->watchpoint_hit = NULL;
1002 if (check_hw_breakpoints(env, 0))
1003 raise_exception_env(EXCP01_DB, env);
1004 else
1005 cpu_resume_from_signal(env, NULL);
1007 } else {
1008 QTAILQ_FOREACH(bp, &env->breakpoints, entry)
1009 if (bp->pc == env->eip) {
1010 if (bp->flags & BP_CPU) {
1011 check_hw_breakpoints(env, 1);
1012 raise_exception_env(EXCP01_DB, env);
1014 break;
1017 if (prev_debug_excp_handler)
1018 prev_debug_excp_handler(env);
1021 /* This should come from sysemu.h - if we could include it here... */
1022 void qemu_system_reset_request(void);
1024 void cpu_inject_x86_mce(CPUState *cenv, int bank, uint64_t status,
1025 uint64_t mcg_status, uint64_t addr, uint64_t misc)
1027 uint64_t mcg_cap = cenv->mcg_cap;
1028 unsigned bank_num = mcg_cap & 0xff;
1029 uint64_t *banks = cenv->mce_banks;
1031 if (bank >= bank_num || !(status & MCI_STATUS_VAL))
1032 return;
1034 if (kvm_enabled()) {
1035 kvm_inject_x86_mce(cenv, bank, status, mcg_status, addr, misc, 0);
1036 return;
1040 * if MSR_MCG_CTL is not all 1s, the uncorrected error
1041 * reporting is disabled
1043 if ((status & MCI_STATUS_UC) && (mcg_cap & MCG_CTL_P) &&
1044 cenv->mcg_ctl != ~(uint64_t)0)
1045 return;
1046 banks += 4 * bank;
1048 * if MSR_MCi_CTL is not all 1s, the uncorrected error
1049 * reporting is disabled for the bank
1051 if ((status & MCI_STATUS_UC) && banks[0] != ~(uint64_t)0)
1052 return;
1053 if (status & MCI_STATUS_UC) {
1054 if ((cenv->mcg_status & MCG_STATUS_MCIP) ||
1055 !(cenv->cr[4] & CR4_MCE_MASK)) {
1056 fprintf(stderr, "injects mce exception while previous "
1057 "one is in progress!\n");
1058 qemu_log_mask(CPU_LOG_RESET, "Triple fault\n");
1059 qemu_system_reset_request();
1060 return;
1062 if (banks[1] & MCI_STATUS_VAL)
1063 status |= MCI_STATUS_OVER;
1064 banks[2] = addr;
1065 banks[3] = misc;
1066 cenv->mcg_status = mcg_status;
1067 banks[1] = status;
1068 cpu_interrupt(cenv, CPU_INTERRUPT_MCE);
1069 } else if (!(banks[1] & MCI_STATUS_VAL)
1070 || !(banks[1] & MCI_STATUS_UC)) {
1071 if (banks[1] & MCI_STATUS_VAL)
1072 status |= MCI_STATUS_OVER;
1073 banks[2] = addr;
1074 banks[3] = misc;
1075 banks[1] = status;
1076 } else
1077 banks[1] |= MCI_STATUS_OVER;
1079 #endif /* !CONFIG_USER_ONLY */
1081 static void mce_init(CPUX86State *cenv)
1083 unsigned int bank, bank_num;
1085 if (((cenv->cpuid_version >> 8)&0xf) >= 6
1086 && (cenv->cpuid_features&(CPUID_MCE|CPUID_MCA)) == (CPUID_MCE|CPUID_MCA)) {
1087 cenv->mcg_cap = MCE_CAP_DEF | MCE_BANKS_DEF;
1088 cenv->mcg_ctl = ~(uint64_t)0;
1089 bank_num = MCE_BANKS_DEF;
1090 for (bank = 0; bank < bank_num; bank++)
1091 cenv->mce_banks[bank*4] = ~(uint64_t)0;
1095 int cpu_x86_get_descr_debug(CPUX86State *env, unsigned int selector,
1096 target_ulong *base, unsigned int *limit,
1097 unsigned int *flags)
1099 SegmentCache *dt;
1100 target_ulong ptr;
1101 uint32_t e1, e2;
1102 int index;
1104 if (selector & 0x4)
1105 dt = &env->ldt;
1106 else
1107 dt = &env->gdt;
1108 index = selector & ~7;
1109 ptr = dt->base + index;
1110 if ((index + 7) > dt->limit
1111 || cpu_memory_rw_debug(env, ptr, (uint8_t *)&e1, sizeof(e1), 0) != 0
1112 || cpu_memory_rw_debug(env, ptr+4, (uint8_t *)&e2, sizeof(e2), 0) != 0)
1113 return 0;
1115 *base = ((e1 >> 16) | ((e2 & 0xff) << 16) | (e2 & 0xff000000));
1116 *limit = (e1 & 0xffff) | (e2 & 0x000f0000);
1117 if (e2 & DESC_G_MASK)
1118 *limit = (*limit << 12) | 0xfff;
1119 *flags = e2;
1121 return 1;
1124 CPUX86State *cpu_x86_init(const char *cpu_model)
1126 CPUX86State *env;
1127 static int inited;
1129 env = qemu_mallocz(sizeof(CPUX86State));
1130 cpu_exec_init(env);
1131 env->cpu_model_str = cpu_model;
1133 /* init various static tables */
1134 if (!inited) {
1135 inited = 1;
1136 optimize_flags_init();
1137 #ifndef CONFIG_USER_ONLY
1138 prev_debug_excp_handler =
1139 cpu_set_debug_excp_handler(breakpoint_handler);
1140 #endif
1142 if (cpu_x86_register(env, cpu_model) < 0) {
1143 cpu_x86_close(env);
1144 return NULL;
1146 mce_init(env);
1148 qemu_init_vcpu(env);
1150 return env;
1153 #if !defined(CONFIG_USER_ONLY)
1154 void do_cpu_init(CPUState *env)
1156 int sipi = env->interrupt_request & CPU_INTERRUPT_SIPI;
1157 cpu_reset(env);
1158 env->interrupt_request = sipi;
1159 apic_init_reset(env->apic_state);
1160 env->halted = !cpu_is_bsp(env);
1163 void do_cpu_sipi(CPUState *env)
1165 apic_sipi(env->apic_state);
1167 #else
1168 void do_cpu_init(CPUState *env)
1171 void do_cpu_sipi(CPUState *env)
1174 #endif