Remove tests of user memory from vl.c
[qemu-kvm/fedora.git] / target-i386 / helper.c
blobabda4150a8d1028bea05d2f22b01c0f074c3eec7
1 /*
2 * i386 helpers (without register variable usage)
4 * Copyright (c) 2003 Fabrice Bellard
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, write to the Free Software
18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
20 #include <stdarg.h>
21 #include <stdlib.h>
22 #include <stdio.h>
23 #include <string.h>
24 #include <inttypes.h>
25 #include <signal.h>
26 #include <assert.h>
28 #include "cpu.h"
29 #include "exec-all.h"
30 #include "svm.h"
31 #include "qemu-common.h"
33 #include "qemu-kvm.h"
35 //#define DEBUG_MMU
37 static int cpu_x86_register (CPUX86State *env, const char *cpu_model);
39 static void add_flagname_to_bitmaps(char *flagname, uint32_t *features,
40 uint32_t *ext_features,
41 uint32_t *ext2_features,
42 uint32_t *ext3_features)
44 int i;
45 /* feature flags taken from "Intel Processor Identification and the CPUID
46 * Instruction" and AMD's "CPUID Specification". In cases of disagreement
47 * about feature names, the Linux name is used. */
48 static const char *feature_name[] = {
49 "fpu", "vme", "de", "pse", "tsc", "msr", "pae", "mce",
50 "cx8", "apic", NULL, "sep", "mtrr", "pge", "mca", "cmov",
51 "pat", "pse36", "pn" /* Intel psn */, "clflush" /* Intel clfsh */, NULL, "ds" /* Intel dts */, "acpi", "mmx",
52 "fxsr", "sse", "sse2", "ss", "ht" /* Intel htt */, "tm", "ia64", "pbe",
54 static const char *ext_feature_name[] = {
55 "pni" /* Intel,AMD sse3 */, NULL, NULL, "monitor", "ds_cpl", "vmx", NULL /* Linux smx */, "est",
56 "tm2", "ssse3", "cid", NULL, NULL, "cx16", "xtpr", NULL,
57 NULL, NULL, "dca", NULL, NULL, NULL, NULL, "popcnt",
58 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
60 static const char *ext2_feature_name[] = {
61 "fpu", "vme", "de", "pse", "tsc", "msr", "pae", "mce",
62 "cx8" /* AMD CMPXCHG8B */, "apic", NULL, "syscall", "mttr", "pge", "mca", "cmov",
63 "pat", "pse36", NULL, NULL /* Linux mp */, "nx" /* Intel xd */, NULL, "mmxext", "mmx",
64 "fxsr", "fxsr_opt" /* AMD ffxsr */, "pdpe1gb" /* AMD Page1GB */, "rdtscp", NULL, "lm" /* Intel 64 */, "3dnowext", "3dnow",
66 static const char *ext3_feature_name[] = {
67 "lahf_lm" /* AMD LahfSahf */, "cmp_legacy", "svm", "extapic" /* AMD ExtApicSpace */, "cr8legacy" /* AMD AltMovCr8 */, "abm", "sse4a", "misalignsse",
68 "3dnowprefetch", "osvw", NULL /* Linux ibs */, NULL, "skinit", "wdt", NULL, NULL,
69 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
70 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
73 for ( i = 0 ; i < 32 ; i++ )
74 if (feature_name[i] && !strcmp (flagname, feature_name[i])) {
75 *features |= 1 << i;
76 return;
78 for ( i = 0 ; i < 32 ; i++ )
79 if (ext_feature_name[i] && !strcmp (flagname, ext_feature_name[i])) {
80 *ext_features |= 1 << i;
81 return;
83 for ( i = 0 ; i < 32 ; i++ )
84 if (ext2_feature_name[i] && !strcmp (flagname, ext2_feature_name[i])) {
85 *ext2_features |= 1 << i;
86 return;
88 for ( i = 0 ; i < 32 ; i++ )
89 if (ext3_feature_name[i] && !strcmp (flagname, ext3_feature_name[i])) {
90 *ext3_features |= 1 << i;
91 return;
93 fprintf(stderr, "CPU feature %s not found\n", flagname);
96 extern const char *cpu_vendor_string;
98 CPUX86State *cpu_x86_init(const char *cpu_model)
100 CPUX86State *env;
101 static int inited;
103 env = qemu_mallocz(sizeof(CPUX86State));
104 if (!env)
105 return NULL;
106 cpu_exec_init(env);
107 env->cpu_model_str = cpu_model;
109 /* init various static tables */
110 if (!inited) {
111 inited = 1;
112 optimize_flags_init();
114 if (cpu_x86_register(env, cpu_model) < 0) {
115 cpu_x86_close(env);
116 return NULL;
118 cpu_reset(env);
119 #ifdef USE_KQEMU
120 kqemu_init(env);
121 #endif
122 #ifdef USE_KVM
123 if (kvm_enabled())
124 kvm_init_new_ap(env->cpu_index, env);
125 #endif
126 return env;
129 typedef struct x86_def_t {
130 const char *name;
131 uint32_t level;
132 uint32_t vendor1, vendor2, vendor3;
133 int family;
134 int model;
135 int stepping;
136 uint32_t features, ext_features, ext2_features, ext3_features;
137 uint32_t xlevel;
138 char model_id[48];
139 } x86_def_t;
141 #define I486_FEATURES (CPUID_FP87 | CPUID_VME | CPUID_PSE)
142 #define PENTIUM_FEATURES (I486_FEATURES | CPUID_DE | CPUID_TSC | \
143 CPUID_MSR | CPUID_MCE | CPUID_CX8 | CPUID_MMX)
144 #define PENTIUM2_FEATURES (PENTIUM_FEATURES | CPUID_PAE | CPUID_SEP | \
145 CPUID_MTRR | CPUID_PGE | CPUID_MCA | CPUID_CMOV | CPUID_PAT | \
146 CPUID_PSE36 | CPUID_FXSR)
147 #define PENTIUM3_FEATURES (PENTIUM2_FEATURES | CPUID_SSE)
148 #define PPRO_FEATURES (CPUID_FP87 | CPUID_DE | CPUID_PSE | CPUID_TSC | \
149 CPUID_MSR | CPUID_MCE | CPUID_CX8 | CPUID_PGE | CPUID_CMOV | \
150 CPUID_PAT | CPUID_FXSR | CPUID_MMX | CPUID_SSE | CPUID_SSE2 | \
151 CPUID_PAE | CPUID_SEP | CPUID_APIC)
152 static x86_def_t x86_defs[] = {
153 #ifdef TARGET_X86_64
155 .name = "qemu64",
156 .level = 2,
157 .vendor1 = 0x68747541, /* "Auth" */
158 .vendor2 = 0x69746e65, /* "enti" */
159 .vendor3 = 0x444d4163, /* "cAMD" */
160 .family = 6,
161 .model = 2,
162 .stepping = 3,
163 .features = PPRO_FEATURES |
164 /* these features are needed for Win64 and aren't fully implemented */
165 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA |
166 /* this feature is needed for Solaris and isn't fully implemented */
167 CPUID_PSE36,
168 .ext_features = CPUID_EXT_SSE3,
169 .ext2_features = (PPRO_FEATURES & 0x0183F3FF) |
170 CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX |
171 CPUID_EXT2_3DNOW | CPUID_EXT2_3DNOWEXT,
172 .ext3_features = CPUID_EXT3_SVM,
173 .xlevel = 0x8000000A,
174 .model_id = "QEMU Virtual CPU version " QEMU_VERSION,
176 #endif
178 .name = "qemu32",
179 .level = 2,
180 .family = 6,
181 .model = 3,
182 .stepping = 3,
183 .features = PPRO_FEATURES,
184 .ext_features = CPUID_EXT_SSE3,
185 .xlevel = 0,
186 .model_id = "QEMU Virtual CPU version " QEMU_VERSION,
189 .name = "486",
190 .level = 0,
191 .family = 4,
192 .model = 0,
193 .stepping = 0,
194 .features = I486_FEATURES,
195 .xlevel = 0,
198 .name = "pentium",
199 .level = 1,
200 .family = 5,
201 .model = 4,
202 .stepping = 3,
203 .features = PENTIUM_FEATURES,
204 .xlevel = 0,
207 .name = "pentium2",
208 .level = 2,
209 .family = 6,
210 .model = 5,
211 .stepping = 2,
212 .features = PENTIUM2_FEATURES,
213 .xlevel = 0,
216 .name = "pentium3",
217 .level = 2,
218 .family = 6,
219 .model = 7,
220 .stepping = 3,
221 .features = PENTIUM3_FEATURES,
222 .xlevel = 0,
225 .name = "athlon",
226 .level = 2,
227 .vendor1 = 0x68747541, /* "Auth" */
228 .vendor2 = 0x69746e65, /* "enti" */
229 .vendor3 = 0x444d4163, /* "cAMD" */
230 .family = 6,
231 .model = 2,
232 .stepping = 3,
233 .features = PPRO_FEATURES | PPRO_FEATURES | CPUID_PSE36 | CPUID_VME | CPUID_MTRR | CPUID_MCA,
234 .ext2_features = (PPRO_FEATURES & 0x0183F3FF) | CPUID_EXT2_MMXEXT | CPUID_EXT2_3DNOW | CPUID_EXT2_3DNOWEXT,
235 .xlevel = 0x80000008,
236 /* XXX: put another string ? */
237 .model_id = "QEMU Virtual CPU version " QEMU_VERSION,
241 static int cpu_x86_find_by_name(x86_def_t *x86_cpu_def, const char *cpu_model)
243 unsigned int i;
244 x86_def_t *def;
246 char *s = strdup(cpu_model);
247 char *featurestr, *name = strtok(s, ",");
248 uint32_t plus_features = 0, plus_ext_features = 0, plus_ext2_features = 0, plus_ext3_features = 0;
249 uint32_t minus_features = 0, minus_ext_features = 0, minus_ext2_features = 0, minus_ext3_features = 0;
250 int family = -1, model = -1, stepping = -1;
252 def = NULL;
253 for (i = 0; i < sizeof(x86_defs) / sizeof(x86_def_t); i++) {
254 if (strcmp(name, x86_defs[i].name) == 0) {
255 def = &x86_defs[i];
256 break;
259 if (!def)
260 goto error;
261 memcpy(x86_cpu_def, def, sizeof(*def));
263 featurestr = strtok(NULL, ",");
265 while (featurestr) {
266 char *val;
267 if (featurestr[0] == '+') {
268 add_flagname_to_bitmaps(featurestr + 1, &plus_features, &plus_ext_features, &plus_ext2_features, &plus_ext3_features);
269 } else if (featurestr[0] == '-') {
270 add_flagname_to_bitmaps(featurestr + 1, &minus_features, &minus_ext_features, &minus_ext2_features, &minus_ext3_features);
271 } else if ((val = strchr(featurestr, '='))) {
272 *val = 0; val++;
273 if (!strcmp(featurestr, "family")) {
274 char *err;
275 family = strtol(val, &err, 10);
276 if (!*val || *err || family < 0) {
277 fprintf(stderr, "bad numerical value %s\n", val);
278 goto error;
280 x86_cpu_def->family = family;
281 } else if (!strcmp(featurestr, "model")) {
282 char *err;
283 model = strtol(val, &err, 10);
284 if (!*val || *err || model < 0 || model > 0xf) {
285 fprintf(stderr, "bad numerical value %s\n", val);
286 goto error;
288 x86_cpu_def->model = model;
289 } else if (!strcmp(featurestr, "stepping")) {
290 char *err;
291 stepping = strtol(val, &err, 10);
292 if (!*val || *err || stepping < 0 || stepping > 0xf) {
293 fprintf(stderr, "bad numerical value %s\n", val);
294 goto error;
296 x86_cpu_def->stepping = stepping;
297 } else if (!strcmp(featurestr, "vendor")) {
298 if (strlen(val) != 12) {
299 fprintf(stderr, "vendor string must be 12 chars long\n");
300 goto error;
302 x86_cpu_def->vendor1 = 0;
303 x86_cpu_def->vendor2 = 0;
304 x86_cpu_def->vendor3 = 0;
305 for(i = 0; i < 4; i++) {
306 x86_cpu_def->vendor1 |= ((uint8_t)val[i ]) << (8 * i);
307 x86_cpu_def->vendor2 |= ((uint8_t)val[i + 4]) << (8 * i);
308 x86_cpu_def->vendor3 |= ((uint8_t)val[i + 8]) << (8 * i);
310 } else if (!strcmp(featurestr, "model_id")) {
311 pstrcpy(x86_cpu_def->model_id, sizeof(x86_cpu_def->model_id),
312 val);
313 } else {
314 fprintf(stderr, "unrecognized feature %s\n", featurestr);
315 goto error;
317 } else {
318 fprintf(stderr, "feature string `%s' not in format (+feature|-feature|feature=xyz)\n", featurestr);
319 goto error;
321 featurestr = strtok(NULL, ",");
323 x86_cpu_def->features |= plus_features;
324 x86_cpu_def->ext_features |= plus_ext_features;
325 x86_cpu_def->ext2_features |= plus_ext2_features;
326 x86_cpu_def->ext3_features |= plus_ext3_features;
327 x86_cpu_def->features &= ~minus_features;
328 x86_cpu_def->ext_features &= ~minus_ext_features;
329 x86_cpu_def->ext2_features &= ~minus_ext2_features;
330 x86_cpu_def->ext3_features &= ~minus_ext3_features;
331 free(s);
332 return 0;
334 error:
335 free(s);
336 return -1;
339 void x86_cpu_list (FILE *f, int (*cpu_fprintf)(FILE *f, const char *fmt, ...))
341 unsigned int i;
343 for (i = 0; i < sizeof(x86_defs) / sizeof(x86_def_t); i++)
344 (*cpu_fprintf)(f, "x86 %16s\n", x86_defs[i].name);
347 static int cpu_x86_register (CPUX86State *env, const char *cpu_model)
349 x86_def_t def1, *def = &def1;
351 if (cpu_x86_find_by_name(def, cpu_model) < 0)
352 return -1;
353 if (def->vendor1) {
354 env->cpuid_vendor1 = def->vendor1;
355 env->cpuid_vendor2 = def->vendor2;
356 env->cpuid_vendor3 = def->vendor3;
357 } else {
358 env->cpuid_vendor1 = 0x756e6547; /* "Genu" */
359 env->cpuid_vendor2 = 0x49656e69; /* "ineI" */
360 env->cpuid_vendor3 = 0x6c65746e; /* "ntel" */
362 env->cpuid_level = def->level;
363 env->cpuid_version = (def->family << 8) | (def->model << 4) | def->stepping;
364 env->cpuid_features = def->features;
365 env->pat = 0x0007040600070406ULL;
366 env->cpuid_ext_features = def->ext_features;
367 env->cpuid_ext2_features = def->ext2_features;
368 env->cpuid_xlevel = def->xlevel;
369 env->cpuid_ext3_features = def->ext3_features;
371 const char *model_id = def->model_id;
372 int c, len, i;
374 if (cpu_vendor_string != NULL)
375 model_id = cpu_vendor_string;
376 if (!model_id)
377 model_id = "";
378 len = strlen(model_id);
379 for(i = 0; i < 48; i++) {
380 if (i >= len)
381 c = '\0';
382 else
383 c = (uint8_t)model_id[i];
384 env->cpuid_model[i >> 2] |= c << (8 * (i & 3));
387 return 0;
390 /* NOTE: must be called outside the CPU execute loop */
391 void cpu_reset(CPUX86State *env)
393 int i;
395 memset(env, 0, offsetof(CPUX86State, breakpoints));
397 tlb_flush(env, 1);
399 env->old_exception = -1;
401 /* init to reset state */
403 #ifdef CONFIG_SOFTMMU
404 env->hflags |= HF_SOFTMMU_MASK;
405 #endif
406 env->hflags2 |= HF2_GIF_MASK;
408 cpu_x86_update_cr0(env, 0x60000010);
409 env->a20_mask = ~0x0;
410 env->smbase = 0x30000;
412 env->idt.limit = 0xffff;
413 env->gdt.limit = 0xffff;
414 env->ldt.limit = 0xffff;
415 env->ldt.flags = DESC_P_MASK | (2 << DESC_TYPE_SHIFT);
416 env->tr.limit = 0xffff;
417 env->tr.flags = DESC_P_MASK | (11 << DESC_TYPE_SHIFT);
419 cpu_x86_load_seg_cache(env, R_CS, 0xf000, 0xffff0000, 0xffff,
420 DESC_P_MASK | DESC_S_MASK | DESC_CS_MASK | DESC_R_MASK);
421 cpu_x86_load_seg_cache(env, R_DS, 0, 0, 0xffff,
422 DESC_P_MASK | DESC_S_MASK | DESC_W_MASK);
423 cpu_x86_load_seg_cache(env, R_ES, 0, 0, 0xffff,
424 DESC_P_MASK | DESC_S_MASK | DESC_W_MASK);
425 cpu_x86_load_seg_cache(env, R_SS, 0, 0, 0xffff,
426 DESC_P_MASK | DESC_S_MASK | DESC_W_MASK);
427 cpu_x86_load_seg_cache(env, R_FS, 0, 0, 0xffff,
428 DESC_P_MASK | DESC_S_MASK | DESC_W_MASK);
429 cpu_x86_load_seg_cache(env, R_GS, 0, 0, 0xffff,
430 DESC_P_MASK | DESC_S_MASK | DESC_W_MASK);
432 env->eip = 0xfff0;
433 env->regs[R_EDX] = env->cpuid_version;
435 env->eflags = 0x2;
437 /* FPU init */
438 for(i = 0;i < 8; i++)
439 env->fptags[i] = 1;
440 env->fpuc = 0x37f;
442 env->mxcsr = 0x1f80;
445 void cpu_x86_close(CPUX86State *env)
447 free(env);
450 /***********************************************************/
451 /* x86 debug */
453 static const char *cc_op_str[] = {
454 "DYNAMIC",
455 "EFLAGS",
457 "MULB",
458 "MULW",
459 "MULL",
460 "MULQ",
462 "ADDB",
463 "ADDW",
464 "ADDL",
465 "ADDQ",
467 "ADCB",
468 "ADCW",
469 "ADCL",
470 "ADCQ",
472 "SUBB",
473 "SUBW",
474 "SUBL",
475 "SUBQ",
477 "SBBB",
478 "SBBW",
479 "SBBL",
480 "SBBQ",
482 "LOGICB",
483 "LOGICW",
484 "LOGICL",
485 "LOGICQ",
487 "INCB",
488 "INCW",
489 "INCL",
490 "INCQ",
492 "DECB",
493 "DECW",
494 "DECL",
495 "DECQ",
497 "SHLB",
498 "SHLW",
499 "SHLL",
500 "SHLQ",
502 "SARB",
503 "SARW",
504 "SARL",
505 "SARQ",
508 void cpu_dump_state(CPUState *env, FILE *f,
509 int (*cpu_fprintf)(FILE *f, const char *fmt, ...),
510 int flags)
512 int eflags, i, nb;
513 char cc_op_name[32];
514 static const char *seg_name[6] = { "ES", "CS", "SS", "DS", "FS", "GS" };
516 eflags = env->eflags;
517 #ifdef TARGET_X86_64
518 if (env->hflags & HF_CS64_MASK) {
519 cpu_fprintf(f,
520 "RAX=%016" PRIx64 " RBX=%016" PRIx64 " RCX=%016" PRIx64 " RDX=%016" PRIx64 "\n"
521 "RSI=%016" PRIx64 " RDI=%016" PRIx64 " RBP=%016" PRIx64 " RSP=%016" PRIx64 "\n"
522 "R8 =%016" PRIx64 " R9 =%016" PRIx64 " R10=%016" PRIx64 " R11=%016" PRIx64 "\n"
523 "R12=%016" PRIx64 " R13=%016" PRIx64 " R14=%016" PRIx64 " R15=%016" PRIx64 "\n"
524 "RIP=%016" PRIx64 " RFL=%08x [%c%c%c%c%c%c%c] CPL=%d II=%d A20=%d SMM=%d HLT=%d\n",
525 env->regs[R_EAX],
526 env->regs[R_EBX],
527 env->regs[R_ECX],
528 env->regs[R_EDX],
529 env->regs[R_ESI],
530 env->regs[R_EDI],
531 env->regs[R_EBP],
532 env->regs[R_ESP],
533 env->regs[8],
534 env->regs[9],
535 env->regs[10],
536 env->regs[11],
537 env->regs[12],
538 env->regs[13],
539 env->regs[14],
540 env->regs[15],
541 env->eip, eflags,
542 eflags & DF_MASK ? 'D' : '-',
543 eflags & CC_O ? 'O' : '-',
544 eflags & CC_S ? 'S' : '-',
545 eflags & CC_Z ? 'Z' : '-',
546 eflags & CC_A ? 'A' : '-',
547 eflags & CC_P ? 'P' : '-',
548 eflags & CC_C ? 'C' : '-',
549 env->hflags & HF_CPL_MASK,
550 (env->hflags >> HF_INHIBIT_IRQ_SHIFT) & 1,
551 (int)(env->a20_mask >> 20) & 1,
552 (env->hflags >> HF_SMM_SHIFT) & 1,
553 env->halted);
554 } else
555 #endif
557 cpu_fprintf(f, "EAX=%08x EBX=%08x ECX=%08x EDX=%08x\n"
558 "ESI=%08x EDI=%08x EBP=%08x ESP=%08x\n"
559 "EIP=%08x EFL=%08x [%c%c%c%c%c%c%c] CPL=%d II=%d A20=%d SMM=%d HLT=%d\n",
560 (uint32_t)env->regs[R_EAX],
561 (uint32_t)env->regs[R_EBX],
562 (uint32_t)env->regs[R_ECX],
563 (uint32_t)env->regs[R_EDX],
564 (uint32_t)env->regs[R_ESI],
565 (uint32_t)env->regs[R_EDI],
566 (uint32_t)env->regs[R_EBP],
567 (uint32_t)env->regs[R_ESP],
568 (uint32_t)env->eip, eflags,
569 eflags & DF_MASK ? 'D' : '-',
570 eflags & CC_O ? 'O' : '-',
571 eflags & CC_S ? 'S' : '-',
572 eflags & CC_Z ? 'Z' : '-',
573 eflags & CC_A ? 'A' : '-',
574 eflags & CC_P ? 'P' : '-',
575 eflags & CC_C ? 'C' : '-',
576 env->hflags & HF_CPL_MASK,
577 (env->hflags >> HF_INHIBIT_IRQ_SHIFT) & 1,
578 (int)(env->a20_mask >> 20) & 1,
579 (env->hflags >> HF_SMM_SHIFT) & 1,
580 env->halted);
583 #ifdef TARGET_X86_64
584 if (env->hflags & HF_LMA_MASK) {
585 for(i = 0; i < 6; i++) {
586 SegmentCache *sc = &env->segs[i];
587 cpu_fprintf(f, "%s =%04x %016" PRIx64 " %08x %08x\n",
588 seg_name[i],
589 sc->selector,
590 sc->base,
591 sc->limit,
592 sc->flags);
594 cpu_fprintf(f, "LDT=%04x %016" PRIx64 " %08x %08x\n",
595 env->ldt.selector,
596 env->ldt.base,
597 env->ldt.limit,
598 env->ldt.flags);
599 cpu_fprintf(f, "TR =%04x %016" PRIx64 " %08x %08x\n",
600 env->tr.selector,
601 env->tr.base,
602 env->tr.limit,
603 env->tr.flags);
604 cpu_fprintf(f, "GDT= %016" PRIx64 " %08x\n",
605 env->gdt.base, env->gdt.limit);
606 cpu_fprintf(f, "IDT= %016" PRIx64 " %08x\n",
607 env->idt.base, env->idt.limit);
608 cpu_fprintf(f, "CR0=%08x CR2=%016" PRIx64 " CR3=%016" PRIx64 " CR4=%08x\n",
609 (uint32_t)env->cr[0],
610 env->cr[2],
611 env->cr[3],
612 (uint32_t)env->cr[4]);
613 } else
614 #endif
616 for(i = 0; i < 6; i++) {
617 SegmentCache *sc = &env->segs[i];
618 cpu_fprintf(f, "%s =%04x %08x %08x %08x\n",
619 seg_name[i],
620 sc->selector,
621 (uint32_t)sc->base,
622 sc->limit,
623 sc->flags);
625 cpu_fprintf(f, "LDT=%04x %08x %08x %08x\n",
626 env->ldt.selector,
627 (uint32_t)env->ldt.base,
628 env->ldt.limit,
629 env->ldt.flags);
630 cpu_fprintf(f, "TR =%04x %08x %08x %08x\n",
631 env->tr.selector,
632 (uint32_t)env->tr.base,
633 env->tr.limit,
634 env->tr.flags);
635 cpu_fprintf(f, "GDT= %08x %08x\n",
636 (uint32_t)env->gdt.base, env->gdt.limit);
637 cpu_fprintf(f, "IDT= %08x %08x\n",
638 (uint32_t)env->idt.base, env->idt.limit);
639 cpu_fprintf(f, "CR0=%08x CR2=%08x CR3=%08x CR4=%08x\n",
640 (uint32_t)env->cr[0],
641 (uint32_t)env->cr[2],
642 (uint32_t)env->cr[3],
643 (uint32_t)env->cr[4]);
645 if (flags & X86_DUMP_CCOP) {
646 if ((unsigned)env->cc_op < CC_OP_NB)
647 snprintf(cc_op_name, sizeof(cc_op_name), "%s", cc_op_str[env->cc_op]);
648 else
649 snprintf(cc_op_name, sizeof(cc_op_name), "[%d]", env->cc_op);
650 #ifdef TARGET_X86_64
651 if (env->hflags & HF_CS64_MASK) {
652 cpu_fprintf(f, "CCS=%016" PRIx64 " CCD=%016" PRIx64 " CCO=%-8s\n",
653 env->cc_src, env->cc_dst,
654 cc_op_name);
655 } else
656 #endif
658 cpu_fprintf(f, "CCS=%08x CCD=%08x CCO=%-8s\n",
659 (uint32_t)env->cc_src, (uint32_t)env->cc_dst,
660 cc_op_name);
663 if (flags & X86_DUMP_FPU) {
664 int fptag;
665 fptag = 0;
666 for(i = 0; i < 8; i++) {
667 fptag |= ((!env->fptags[i]) << i);
669 cpu_fprintf(f, "FCW=%04x FSW=%04x [ST=%d] FTW=%02x MXCSR=%08x\n",
670 env->fpuc,
671 (env->fpus & ~0x3800) | (env->fpstt & 0x7) << 11,
672 env->fpstt,
673 fptag,
674 env->mxcsr);
675 for(i=0;i<8;i++) {
676 #if defined(USE_X86LDOUBLE)
677 union {
678 long double d;
679 struct {
680 uint64_t lower;
681 uint16_t upper;
682 } l;
683 } tmp;
684 tmp.d = env->fpregs[i].d;
685 cpu_fprintf(f, "FPR%d=%016" PRIx64 " %04x",
686 i, tmp.l.lower, tmp.l.upper);
687 #else
688 cpu_fprintf(f, "FPR%d=%016" PRIx64,
689 i, env->fpregs[i].mmx.q);
690 #endif
691 if ((i & 1) == 1)
692 cpu_fprintf(f, "\n");
693 else
694 cpu_fprintf(f, " ");
696 if (env->hflags & HF_CS64_MASK)
697 nb = 16;
698 else
699 nb = 8;
700 for(i=0;i<nb;i++) {
701 cpu_fprintf(f, "XMM%02d=%08x%08x%08x%08x",
703 env->xmm_regs[i].XMM_L(3),
704 env->xmm_regs[i].XMM_L(2),
705 env->xmm_regs[i].XMM_L(1),
706 env->xmm_regs[i].XMM_L(0));
707 if ((i & 1) == 1)
708 cpu_fprintf(f, "\n");
709 else
710 cpu_fprintf(f, " ");
715 /***********************************************************/
716 /* x86 mmu */
717 /* XXX: add PGE support */
719 void cpu_x86_set_a20(CPUX86State *env, int a20_state)
721 a20_state = (a20_state != 0);
722 if (a20_state != ((env->a20_mask >> 20) & 1)) {
723 #if defined(DEBUG_MMU)
724 printf("A20 update: a20=%d\n", a20_state);
725 #endif
726 /* if the cpu is currently executing code, we must unlink it and
727 all the potentially executing TB */
728 cpu_interrupt(env, CPU_INTERRUPT_EXITTB);
730 /* when a20 is changed, all the MMU mappings are invalid, so
731 we must flush everything */
732 tlb_flush(env, 1);
733 env->a20_mask = (~0x100000) | (a20_state << 20);
737 void cpu_x86_update_cr0(CPUX86State *env, uint32_t new_cr0)
739 int pe_state;
741 #if defined(DEBUG_MMU)
742 printf("CR0 update: CR0=0x%08x\n", new_cr0);
743 #endif
744 if ((new_cr0 & (CR0_PG_MASK | CR0_WP_MASK | CR0_PE_MASK)) !=
745 (env->cr[0] & (CR0_PG_MASK | CR0_WP_MASK | CR0_PE_MASK))) {
746 tlb_flush(env, 1);
749 #ifdef TARGET_X86_64
750 if (!(env->cr[0] & CR0_PG_MASK) && (new_cr0 & CR0_PG_MASK) &&
751 (env->efer & MSR_EFER_LME)) {
752 /* enter in long mode */
753 /* XXX: generate an exception */
754 if (!(env->cr[4] & CR4_PAE_MASK))
755 return;
756 env->efer |= MSR_EFER_LMA;
757 env->hflags |= HF_LMA_MASK;
758 } else if ((env->cr[0] & CR0_PG_MASK) && !(new_cr0 & CR0_PG_MASK) &&
759 (env->efer & MSR_EFER_LMA)) {
760 /* exit long mode */
761 env->efer &= ~MSR_EFER_LMA;
762 env->hflags &= ~(HF_LMA_MASK | HF_CS64_MASK);
763 env->eip &= 0xffffffff;
765 #endif
766 env->cr[0] = new_cr0 | CR0_ET_MASK;
768 /* update PE flag in hidden flags */
769 pe_state = (env->cr[0] & CR0_PE_MASK);
770 env->hflags = (env->hflags & ~HF_PE_MASK) | (pe_state << HF_PE_SHIFT);
771 /* ensure that ADDSEG is always set in real mode */
772 env->hflags |= ((pe_state ^ 1) << HF_ADDSEG_SHIFT);
773 /* update FPU flags */
774 env->hflags = (env->hflags & ~(HF_MP_MASK | HF_EM_MASK | HF_TS_MASK)) |
775 ((new_cr0 << (HF_MP_SHIFT - 1)) & (HF_MP_MASK | HF_EM_MASK | HF_TS_MASK));
778 /* XXX: in legacy PAE mode, generate a GPF if reserved bits are set in
779 the PDPT */
780 void cpu_x86_update_cr3(CPUX86State *env, target_ulong new_cr3)
782 env->cr[3] = new_cr3;
783 if (env->cr[0] & CR0_PG_MASK) {
784 #if defined(DEBUG_MMU)
785 printf("CR3 update: CR3=" TARGET_FMT_lx "\n", new_cr3);
786 #endif
787 tlb_flush(env, 0);
791 void cpu_x86_update_cr4(CPUX86State *env, uint32_t new_cr4)
793 #if defined(DEBUG_MMU)
794 printf("CR4 update: CR4=%08x\n", (uint32_t)env->cr[4]);
795 #endif
796 if ((new_cr4 & (CR4_PGE_MASK | CR4_PAE_MASK | CR4_PSE_MASK)) !=
797 (env->cr[4] & (CR4_PGE_MASK | CR4_PAE_MASK | CR4_PSE_MASK))) {
798 tlb_flush(env, 1);
800 /* SSE handling */
801 if (!(env->cpuid_features & CPUID_SSE))
802 new_cr4 &= ~CR4_OSFXSR_MASK;
803 if (new_cr4 & CR4_OSFXSR_MASK)
804 env->hflags |= HF_OSFXSR_MASK;
805 else
806 env->hflags &= ~HF_OSFXSR_MASK;
808 env->cr[4] = new_cr4;
811 /* XXX: also flush 4MB pages */
812 void cpu_x86_flush_tlb(CPUX86State *env, target_ulong addr)
814 tlb_flush_page(env, addr);
817 #if defined(CONFIG_USER_ONLY)
819 int cpu_x86_handle_mmu_fault(CPUX86State *env, target_ulong addr,
820 int is_write, int mmu_idx, int is_softmmu)
822 /* user mode only emulation */
823 is_write &= 1;
824 env->cr[2] = addr;
825 env->error_code = (is_write << PG_ERROR_W_BIT);
826 env->error_code |= PG_ERROR_U_MASK;
827 env->exception_index = EXCP0E_PAGE;
828 return 1;
831 target_phys_addr_t cpu_get_phys_page_debug(CPUState *env, target_ulong addr)
833 return addr;
836 #else
838 /* XXX: This value should match the one returned by CPUID
839 * and in exec.c */
840 #if defined(USE_KQEMU)
841 #define PHYS_ADDR_MASK 0xfffff000LL
842 #else
843 # if defined(TARGET_X86_64)
844 # define PHYS_ADDR_MASK 0xfffffff000LL
845 # else
846 # define PHYS_ADDR_MASK 0xffffff000LL
847 # endif
848 #endif
850 /* return value:
851 -1 = cannot handle fault
852 0 = nothing more to do
853 1 = generate PF fault
854 2 = soft MMU activation required for this block
856 int cpu_x86_handle_mmu_fault(CPUX86State *env, target_ulong addr,
857 int is_write1, int mmu_idx, int is_softmmu)
859 uint64_t ptep, pte;
860 target_ulong pde_addr, pte_addr;
861 int error_code, is_dirty, prot, page_size, ret, is_write, is_user;
862 target_phys_addr_t paddr;
863 uint32_t page_offset;
864 target_ulong vaddr, virt_addr;
866 is_user = mmu_idx == MMU_USER_IDX;
867 #if defined(DEBUG_MMU)
868 printf("MMU fault: addr=" TARGET_FMT_lx " w=%d u=%d eip=" TARGET_FMT_lx "\n",
869 addr, is_write1, is_user, env->eip);
870 #endif
871 is_write = is_write1 & 1;
873 if (!(env->cr[0] & CR0_PG_MASK)) {
874 pte = addr;
875 virt_addr = addr & TARGET_PAGE_MASK;
876 prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC;
877 page_size = 4096;
878 goto do_mapping;
881 if (env->cr[4] & CR4_PAE_MASK) {
882 uint64_t pde, pdpe;
883 target_ulong pdpe_addr;
885 #ifdef TARGET_X86_64
886 if (env->hflags & HF_LMA_MASK) {
887 uint64_t pml4e_addr, pml4e;
888 int32_t sext;
890 /* test virtual address sign extension */
891 sext = (int64_t)addr >> 47;
892 if (sext != 0 && sext != -1) {
893 env->error_code = 0;
894 env->exception_index = EXCP0D_GPF;
895 return 1;
898 pml4e_addr = ((env->cr[3] & ~0xfff) + (((addr >> 39) & 0x1ff) << 3)) &
899 env->a20_mask;
900 pml4e = ldq_phys(pml4e_addr);
901 if (!(pml4e & PG_PRESENT_MASK)) {
902 error_code = 0;
903 goto do_fault;
905 if (!(env->efer & MSR_EFER_NXE) && (pml4e & PG_NX_MASK)) {
906 error_code = PG_ERROR_RSVD_MASK;
907 goto do_fault;
909 if (!(pml4e & PG_ACCESSED_MASK)) {
910 pml4e |= PG_ACCESSED_MASK;
911 stl_phys_notdirty(pml4e_addr, pml4e);
913 ptep = pml4e ^ PG_NX_MASK;
914 pdpe_addr = ((pml4e & PHYS_ADDR_MASK) + (((addr >> 30) & 0x1ff) << 3)) &
915 env->a20_mask;
916 pdpe = ldq_phys(pdpe_addr);
917 if (!(pdpe & PG_PRESENT_MASK)) {
918 error_code = 0;
919 goto do_fault;
921 if (!(env->efer & MSR_EFER_NXE) && (pdpe & PG_NX_MASK)) {
922 error_code = PG_ERROR_RSVD_MASK;
923 goto do_fault;
925 ptep &= pdpe ^ PG_NX_MASK;
926 if (!(pdpe & PG_ACCESSED_MASK)) {
927 pdpe |= PG_ACCESSED_MASK;
928 stl_phys_notdirty(pdpe_addr, pdpe);
930 } else
931 #endif
933 /* XXX: load them when cr3 is loaded ? */
934 pdpe_addr = ((env->cr[3] & ~0x1f) + ((addr >> 27) & 0x18)) &
935 env->a20_mask;
936 pdpe = ldq_phys(pdpe_addr);
937 if (!(pdpe & PG_PRESENT_MASK)) {
938 error_code = 0;
939 goto do_fault;
941 ptep = PG_NX_MASK | PG_USER_MASK | PG_RW_MASK;
944 pde_addr = ((pdpe & PHYS_ADDR_MASK) + (((addr >> 21) & 0x1ff) << 3)) &
945 env->a20_mask;
946 pde = ldq_phys(pde_addr);
947 if (!(pde & PG_PRESENT_MASK)) {
948 error_code = 0;
949 goto do_fault;
951 if (!(env->efer & MSR_EFER_NXE) && (pde & PG_NX_MASK)) {
952 error_code = PG_ERROR_RSVD_MASK;
953 goto do_fault;
955 ptep &= pde ^ PG_NX_MASK;
956 if (pde & PG_PSE_MASK) {
957 /* 2 MB page */
958 page_size = 2048 * 1024;
959 ptep ^= PG_NX_MASK;
960 if ((ptep & PG_NX_MASK) && is_write1 == 2)
961 goto do_fault_protect;
962 if (is_user) {
963 if (!(ptep & PG_USER_MASK))
964 goto do_fault_protect;
965 if (is_write && !(ptep & PG_RW_MASK))
966 goto do_fault_protect;
967 } else {
968 if ((env->cr[0] & CR0_WP_MASK) &&
969 is_write && !(ptep & PG_RW_MASK))
970 goto do_fault_protect;
972 is_dirty = is_write && !(pde & PG_DIRTY_MASK);
973 if (!(pde & PG_ACCESSED_MASK) || is_dirty) {
974 pde |= PG_ACCESSED_MASK;
975 if (is_dirty)
976 pde |= PG_DIRTY_MASK;
977 stl_phys_notdirty(pde_addr, pde);
979 /* align to page_size */
980 pte = pde & ((PHYS_ADDR_MASK & ~(page_size - 1)) | 0xfff);
981 virt_addr = addr & ~(page_size - 1);
982 } else {
983 /* 4 KB page */
984 if (!(pde & PG_ACCESSED_MASK)) {
985 pde |= PG_ACCESSED_MASK;
986 stl_phys_notdirty(pde_addr, pde);
988 pte_addr = ((pde & PHYS_ADDR_MASK) + (((addr >> 12) & 0x1ff) << 3)) &
989 env->a20_mask;
990 pte = ldq_phys(pte_addr);
991 if (!(pte & PG_PRESENT_MASK)) {
992 error_code = 0;
993 goto do_fault;
995 if (!(env->efer & MSR_EFER_NXE) && (pte & PG_NX_MASK)) {
996 error_code = PG_ERROR_RSVD_MASK;
997 goto do_fault;
999 /* combine pde and pte nx, user and rw protections */
1000 ptep &= pte ^ PG_NX_MASK;
1001 ptep ^= PG_NX_MASK;
1002 if ((ptep & PG_NX_MASK) && is_write1 == 2)
1003 goto do_fault_protect;
1004 if (is_user) {
1005 if (!(ptep & PG_USER_MASK))
1006 goto do_fault_protect;
1007 if (is_write && !(ptep & PG_RW_MASK))
1008 goto do_fault_protect;
1009 } else {
1010 if ((env->cr[0] & CR0_WP_MASK) &&
1011 is_write && !(ptep & PG_RW_MASK))
1012 goto do_fault_protect;
1014 is_dirty = is_write && !(pte & PG_DIRTY_MASK);
1015 if (!(pte & PG_ACCESSED_MASK) || is_dirty) {
1016 pte |= PG_ACCESSED_MASK;
1017 if (is_dirty)
1018 pte |= PG_DIRTY_MASK;
1019 stl_phys_notdirty(pte_addr, pte);
1021 page_size = 4096;
1022 virt_addr = addr & ~0xfff;
1023 pte = pte & (PHYS_ADDR_MASK | 0xfff);
1025 } else {
1026 uint32_t pde;
1028 /* page directory entry */
1029 pde_addr = ((env->cr[3] & ~0xfff) + ((addr >> 20) & 0xffc)) &
1030 env->a20_mask;
1031 pde = ldl_phys(pde_addr);
1032 if (!(pde & PG_PRESENT_MASK)) {
1033 error_code = 0;
1034 goto do_fault;
1036 /* if PSE bit is set, then we use a 4MB page */
1037 if ((pde & PG_PSE_MASK) && (env->cr[4] & CR4_PSE_MASK)) {
1038 page_size = 4096 * 1024;
1039 if (is_user) {
1040 if (!(pde & PG_USER_MASK))
1041 goto do_fault_protect;
1042 if (is_write && !(pde & PG_RW_MASK))
1043 goto do_fault_protect;
1044 } else {
1045 if ((env->cr[0] & CR0_WP_MASK) &&
1046 is_write && !(pde & PG_RW_MASK))
1047 goto do_fault_protect;
1049 is_dirty = is_write && !(pde & PG_DIRTY_MASK);
1050 if (!(pde & PG_ACCESSED_MASK) || is_dirty) {
1051 pde |= PG_ACCESSED_MASK;
1052 if (is_dirty)
1053 pde |= PG_DIRTY_MASK;
1054 stl_phys_notdirty(pde_addr, pde);
1057 pte = pde & ~( (page_size - 1) & ~0xfff); /* align to page_size */
1058 ptep = pte;
1059 virt_addr = addr & ~(page_size - 1);
1060 } else {
1061 if (!(pde & PG_ACCESSED_MASK)) {
1062 pde |= PG_ACCESSED_MASK;
1063 stl_phys_notdirty(pde_addr, pde);
1066 /* page directory entry */
1067 pte_addr = ((pde & ~0xfff) + ((addr >> 10) & 0xffc)) &
1068 env->a20_mask;
1069 pte = ldl_phys(pte_addr);
1070 if (!(pte & PG_PRESENT_MASK)) {
1071 error_code = 0;
1072 goto do_fault;
1074 /* combine pde and pte user and rw protections */
1075 ptep = pte & pde;
1076 if (is_user) {
1077 if (!(ptep & PG_USER_MASK))
1078 goto do_fault_protect;
1079 if (is_write && !(ptep & PG_RW_MASK))
1080 goto do_fault_protect;
1081 } else {
1082 if ((env->cr[0] & CR0_WP_MASK) &&
1083 is_write && !(ptep & PG_RW_MASK))
1084 goto do_fault_protect;
1086 is_dirty = is_write && !(pte & PG_DIRTY_MASK);
1087 if (!(pte & PG_ACCESSED_MASK) || is_dirty) {
1088 pte |= PG_ACCESSED_MASK;
1089 if (is_dirty)
1090 pte |= PG_DIRTY_MASK;
1091 stl_phys_notdirty(pte_addr, pte);
1093 page_size = 4096;
1094 virt_addr = addr & ~0xfff;
1097 /* the page can be put in the TLB */
1098 prot = PAGE_READ;
1099 if (!(ptep & PG_NX_MASK))
1100 prot |= PAGE_EXEC;
1101 if (pte & PG_DIRTY_MASK) {
1102 /* only set write access if already dirty... otherwise wait
1103 for dirty access */
1104 if (is_user) {
1105 if (ptep & PG_RW_MASK)
1106 prot |= PAGE_WRITE;
1107 } else {
1108 if (!(env->cr[0] & CR0_WP_MASK) ||
1109 (ptep & PG_RW_MASK))
1110 prot |= PAGE_WRITE;
1113 do_mapping:
1114 pte = pte & env->a20_mask;
1116 /* Even if 4MB pages, we map only one 4KB page in the cache to
1117 avoid filling it too fast */
1118 page_offset = (addr & TARGET_PAGE_MASK) & (page_size - 1);
1119 paddr = (pte & TARGET_PAGE_MASK) + page_offset;
1120 vaddr = virt_addr + page_offset;
1122 ret = tlb_set_page_exec(env, vaddr, paddr, prot, mmu_idx, is_softmmu);
1123 return ret;
1124 do_fault_protect:
1125 error_code = PG_ERROR_P_MASK;
1126 do_fault:
1127 error_code |= (is_write << PG_ERROR_W_BIT);
1128 if (is_user)
1129 error_code |= PG_ERROR_U_MASK;
1130 if (is_write1 == 2 &&
1131 (env->efer & MSR_EFER_NXE) &&
1132 (env->cr[4] & CR4_PAE_MASK))
1133 error_code |= PG_ERROR_I_D_MASK;
1134 if (env->intercept_exceptions & (1 << EXCP0E_PAGE)) {
1135 /* cr2 is not modified in case of exceptions */
1136 stq_phys(env->vm_vmcb + offsetof(struct vmcb, control.exit_info_2),
1137 addr);
1138 } else {
1139 env->cr[2] = addr;
1141 env->error_code = error_code;
1142 env->exception_index = EXCP0E_PAGE;
1143 return 1;
1146 target_phys_addr_t cpu_get_phys_page_debug(CPUState *env, target_ulong addr)
1148 target_ulong pde_addr, pte_addr;
1149 uint64_t pte;
1150 target_phys_addr_t paddr;
1151 uint32_t page_offset;
1152 int page_size;
1154 if (env->cr[4] & CR4_PAE_MASK) {
1155 target_ulong pdpe_addr;
1156 uint64_t pde, pdpe;
1158 #ifdef TARGET_X86_64
1159 if (env->hflags & HF_LMA_MASK) {
1160 uint64_t pml4e_addr, pml4e;
1161 int32_t sext;
1163 /* test virtual address sign extension */
1164 sext = (int64_t)addr >> 47;
1165 if (sext != 0 && sext != -1)
1166 return -1;
1168 pml4e_addr = ((env->cr[3] & ~0xfff) + (((addr >> 39) & 0x1ff) << 3)) &
1169 env->a20_mask;
1170 pml4e = ldq_phys(pml4e_addr);
1171 if (!(pml4e & PG_PRESENT_MASK))
1172 return -1;
1174 pdpe_addr = ((pml4e & ~0xfff) + (((addr >> 30) & 0x1ff) << 3)) &
1175 env->a20_mask;
1176 pdpe = ldq_phys(pdpe_addr);
1177 if (!(pdpe & PG_PRESENT_MASK))
1178 return -1;
1179 } else
1180 #endif
1182 pdpe_addr = ((env->cr[3] & ~0x1f) + ((addr >> 27) & 0x18)) &
1183 env->a20_mask;
1184 pdpe = ldq_phys(pdpe_addr);
1185 if (!(pdpe & PG_PRESENT_MASK))
1186 return -1;
1189 pde_addr = ((pdpe & ~0xfff) + (((addr >> 21) & 0x1ff) << 3)) &
1190 env->a20_mask;
1191 pde = ldq_phys(pde_addr);
1192 if (!(pde & PG_PRESENT_MASK)) {
1193 return -1;
1195 if (pde & PG_PSE_MASK) {
1196 /* 2 MB page */
1197 page_size = 2048 * 1024;
1198 pte = pde & ~( (page_size - 1) & ~0xfff); /* align to page_size */
1199 } else {
1200 /* 4 KB page */
1201 pte_addr = ((pde & ~0xfff) + (((addr >> 12) & 0x1ff) << 3)) &
1202 env->a20_mask;
1203 page_size = 4096;
1204 pte = ldq_phys(pte_addr);
1206 if (!(pte & PG_PRESENT_MASK))
1207 return -1;
1208 } else {
1209 uint32_t pde;
1211 if (!(env->cr[0] & CR0_PG_MASK)) {
1212 pte = addr;
1213 page_size = 4096;
1214 } else {
1215 /* page directory entry */
1216 pde_addr = ((env->cr[3] & ~0xfff) + ((addr >> 20) & 0xffc)) & env->a20_mask;
1217 pde = ldl_phys(pde_addr);
1218 if (!(pde & PG_PRESENT_MASK))
1219 return -1;
1220 if ((pde & PG_PSE_MASK) && (env->cr[4] & CR4_PSE_MASK)) {
1221 pte = pde & ~0x003ff000; /* align to 4MB */
1222 page_size = 4096 * 1024;
1223 } else {
1224 /* page directory entry */
1225 pte_addr = ((pde & ~0xfff) + ((addr >> 10) & 0xffc)) & env->a20_mask;
1226 pte = ldl_phys(pte_addr);
1227 if (!(pte & PG_PRESENT_MASK))
1228 return -1;
1229 page_size = 4096;
1232 pte = pte & env->a20_mask;
1235 page_offset = (addr & TARGET_PAGE_MASK) & (page_size - 1);
1236 paddr = (pte & TARGET_PAGE_MASK) + page_offset;
1237 return paddr;
1239 #endif /* !CONFIG_USER_ONLY */