Merge branch 'master' of git://git.sv.gnu.org/qemu
[qemu-kvm/fedora.git] / target-i386 / helper.c
blob6dc011111248d2d96ab6019a430e20822e21e3e6
1 /*
2 * i386 helpers (without register variable usage)
4 * Copyright (c) 2003 Fabrice Bellard
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, write to the Free Software
18 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston MA 02110-1301 USA
20 #include <stdarg.h>
21 #include <stdlib.h>
22 #include <stdio.h>
23 #include <string.h>
24 #include <inttypes.h>
25 #include <signal.h>
27 #include "cpu.h"
28 #include "exec-all.h"
29 #include "qemu-common.h"
30 #include "kvm.h"
32 #include "qemu-kvm.h"
34 //#define DEBUG_MMU
36 /* feature flags taken from "Intel Processor Identification and the CPUID
37 * Instruction" and AMD's "CPUID Specification". In cases of disagreement
38 * about feature names, the Linux name is used. */
39 static const char *feature_name[] = {
40 "fpu", "vme", "de", "pse", "tsc", "msr", "pae", "mce",
41 "cx8", "apic", NULL, "sep", "mtrr", "pge", "mca", "cmov",
42 "pat", "pse36", "pn" /* Intel psn */, "clflush" /* Intel clfsh */, NULL, "ds" /* Intel dts */, "acpi", "mmx",
43 "fxsr", "sse", "sse2", "ss", "ht" /* Intel htt */, "tm", "ia64", "pbe",
45 static const char *ext_feature_name[] = {
46 "pni" /* Intel,AMD sse3 */, NULL, NULL, "monitor", "ds_cpl", "vmx", NULL /* Linux smx */, "est",
47 "tm2", "ssse3", "cid", NULL, NULL, "cx16", "xtpr", NULL,
48 NULL, NULL, "dca", NULL, NULL, NULL, NULL, "popcnt",
49 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
51 static const char *ext2_feature_name[] = {
52 "fpu", "vme", "de", "pse", "tsc", "msr", "pae", "mce",
53 "cx8" /* AMD CMPXCHG8B */, "apic", NULL, "syscall", "mtrr", "pge", "mca", "cmov",
54 "pat", "pse36", NULL, NULL /* Linux mp */, "nx" /* Intel xd */, NULL, "mmxext", "mmx",
55 "fxsr", "fxsr_opt" /* AMD ffxsr */, "pdpe1gb" /* AMD Page1GB */, "rdtscp", NULL, "lm" /* Intel 64 */, "3dnowext", "3dnow",
57 static const char *ext3_feature_name[] = {
58 "lahf_lm" /* AMD LahfSahf */, "cmp_legacy", "svm", "extapic" /* AMD ExtApicSpace */, "cr8legacy" /* AMD AltMovCr8 */, "abm", "sse4a", "misalignsse",
59 "3dnowprefetch", "osvw", NULL /* Linux ibs */, NULL, "skinit", "wdt", NULL, NULL,
60 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
61 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
64 static void add_flagname_to_bitmaps(char *flagname, uint32_t *features,
65 uint32_t *ext_features,
66 uint32_t *ext2_features,
67 uint32_t *ext3_features)
69 int i;
70 int found = 0;
72 for ( i = 0 ; i < 32 ; i++ )
73 if (feature_name[i] && !strcmp (flagname, feature_name[i])) {
74 *features |= 1 << i;
75 found = 1;
77 for ( i = 0 ; i < 32 ; i++ )
78 if (ext_feature_name[i] && !strcmp (flagname, ext_feature_name[i])) {
79 *ext_features |= 1 << i;
80 found = 1;
82 for ( i = 0 ; i < 32 ; i++ )
83 if (ext2_feature_name[i] && !strcmp (flagname, ext2_feature_name[i])) {
84 *ext2_features |= 1 << i;
85 found = 1;
87 for ( i = 0 ; i < 32 ; i++ )
88 if (ext3_feature_name[i] && !strcmp (flagname, ext3_feature_name[i])) {
89 *ext3_features |= 1 << i;
90 found = 1;
92 if (!found) {
93 fprintf(stderr, "CPU feature %s not found\n", flagname);
97 typedef struct x86_def_t {
98 const char *name;
99 uint32_t level;
100 uint32_t vendor1, vendor2, vendor3;
101 int family;
102 int model;
103 int stepping;
104 uint32_t features, ext_features, ext2_features, ext3_features;
105 uint32_t xlevel;
106 char model_id[48];
107 } x86_def_t;
109 #define I486_FEATURES (CPUID_FP87 | CPUID_VME | CPUID_PSE)
110 #define PENTIUM_FEATURES (I486_FEATURES | CPUID_DE | CPUID_TSC | \
111 CPUID_MSR | CPUID_MCE | CPUID_CX8 | CPUID_MMX)
112 #define PENTIUM2_FEATURES (PENTIUM_FEATURES | CPUID_PAE | CPUID_SEP | \
113 CPUID_MTRR | CPUID_PGE | CPUID_MCA | CPUID_CMOV | CPUID_PAT | \
114 CPUID_PSE36 | CPUID_FXSR)
115 #define PENTIUM3_FEATURES (PENTIUM2_FEATURES | CPUID_SSE)
116 #define PPRO_FEATURES (CPUID_FP87 | CPUID_DE | CPUID_PSE | CPUID_TSC | \
117 CPUID_MSR | CPUID_MCE | CPUID_CX8 | CPUID_PGE | CPUID_CMOV | \
118 CPUID_PAT | CPUID_FXSR | CPUID_MMX | CPUID_SSE | CPUID_SSE2 | \
119 CPUID_PAE | CPUID_SEP | CPUID_APIC)
120 static x86_def_t x86_defs[] = {
121 #ifdef TARGET_X86_64
123 .name = "qemu64",
124 .level = 2,
125 .vendor1 = CPUID_VENDOR_AMD_1,
126 .vendor2 = CPUID_VENDOR_AMD_2,
127 .vendor3 = CPUID_VENDOR_AMD_3,
128 .family = 6,
129 .model = 2,
130 .stepping = 3,
131 .features = PPRO_FEATURES |
132 /* these features are needed for Win64 and aren't fully implemented */
133 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA |
134 /* this feature is needed for Solaris and isn't fully implemented */
135 CPUID_PSE36,
136 .ext_features = CPUID_EXT_SSE3,
137 .ext2_features = (PPRO_FEATURES & 0x0183F3FF) |
138 CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX |
139 CPUID_EXT2_3DNOW | CPUID_EXT2_3DNOWEXT,
140 .ext3_features = CPUID_EXT3_SVM,
141 .xlevel = 0x8000000A,
142 .model_id = "QEMU Virtual CPU version " QEMU_VERSION,
145 .name = "phenom",
146 .level = 5,
147 .vendor1 = CPUID_VENDOR_AMD_1,
148 .vendor2 = CPUID_VENDOR_AMD_2,
149 .vendor3 = CPUID_VENDOR_AMD_3,
150 .family = 16,
151 .model = 2,
152 .stepping = 3,
153 /* Missing: CPUID_VME, CPUID_HT */
154 .features = PPRO_FEATURES |
155 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA |
156 CPUID_PSE36,
157 /* Missing: CPUID_EXT_CX16, CPUID_EXT_POPCNT */
158 .ext_features = CPUID_EXT_SSE3 | CPUID_EXT_MONITOR,
159 /* Missing: CPUID_EXT2_PDPE1GB, CPUID_EXT2_RDTSCP */
160 .ext2_features = (PPRO_FEATURES & 0x0183F3FF) |
161 CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX |
162 CPUID_EXT2_3DNOW | CPUID_EXT2_3DNOWEXT | CPUID_EXT2_MMXEXT |
163 CPUID_EXT2_FFXSR,
164 /* Missing: CPUID_EXT3_LAHF_LM, CPUID_EXT3_CMP_LEG, CPUID_EXT3_EXTAPIC,
165 CPUID_EXT3_CR8LEG, CPUID_EXT3_ABM, CPUID_EXT3_SSE4A,
166 CPUID_EXT3_MISALIGNSSE, CPUID_EXT3_3DNOWPREFETCH,
167 CPUID_EXT3_OSVW, CPUID_EXT3_IBS */
168 .ext3_features = CPUID_EXT3_SVM,
169 .xlevel = 0x8000001A,
170 .model_id = "AMD Phenom(tm) 9550 Quad-Core Processor"
173 .name = "core2duo",
174 .level = 10,
175 .family = 6,
176 .model = 15,
177 .stepping = 11,
178 /* The original CPU also implements these features:
179 CPUID_VME, CPUID_DTS, CPUID_ACPI, CPUID_SS, CPUID_HT,
180 CPUID_TM, CPUID_PBE */
181 .features = PPRO_FEATURES |
182 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA |
183 CPUID_PSE36,
184 /* The original CPU also implements these ext features:
185 CPUID_EXT_DTES64, CPUID_EXT_DSCPL, CPUID_EXT_VMX, CPUID_EXT_EST,
186 CPUID_EXT_TM2, CPUID_EXT_CX16, CPUID_EXT_XTPR, CPUID_EXT_PDCM */
187 .ext_features = CPUID_EXT_SSE3 | CPUID_EXT_MONITOR | CPUID_EXT_SSSE3,
188 .ext2_features = CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX,
189 /* Missing: .ext3_features = CPUID_EXT3_LAHF_LM */
190 .xlevel = 0x80000008,
191 .model_id = "Intel(R) Core(TM)2 Duo CPU T7700 @ 2.40GHz",
193 #endif
195 .name = "qemu32",
196 .level = 2,
197 .family = 6,
198 .model = 3,
199 .stepping = 3,
200 .features = PPRO_FEATURES,
201 .ext_features = CPUID_EXT_SSE3,
202 .xlevel = 0,
203 .model_id = "QEMU Virtual CPU version " QEMU_VERSION,
206 .name = "coreduo",
207 .level = 10,
208 .family = 6,
209 .model = 14,
210 .stepping = 8,
211 /* The original CPU also implements these features:
212 CPUID_DTS, CPUID_ACPI, CPUID_SS, CPUID_HT,
213 CPUID_TM, CPUID_PBE */
214 .features = PPRO_FEATURES | CPUID_VME |
215 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA,
216 /* The original CPU also implements these ext features:
217 CPUID_EXT_VMX, CPUID_EXT_EST, CPUID_EXT_TM2, CPUID_EXT_XTPR,
218 CPUID_EXT_PDCM */
219 .ext_features = CPUID_EXT_SSE3 | CPUID_EXT_MONITOR,
220 .ext2_features = CPUID_EXT2_NX,
221 .xlevel = 0x80000008,
222 .model_id = "Genuine Intel(R) CPU T2600 @ 2.16GHz",
225 .name = "486",
226 .level = 0,
227 .family = 4,
228 .model = 0,
229 .stepping = 0,
230 .features = I486_FEATURES,
231 .xlevel = 0,
234 .name = "pentium",
235 .level = 1,
236 .family = 5,
237 .model = 4,
238 .stepping = 3,
239 .features = PENTIUM_FEATURES,
240 .xlevel = 0,
243 .name = "pentium2",
244 .level = 2,
245 .family = 6,
246 .model = 5,
247 .stepping = 2,
248 .features = PENTIUM2_FEATURES,
249 .xlevel = 0,
252 .name = "pentium3",
253 .level = 2,
254 .family = 6,
255 .model = 7,
256 .stepping = 3,
257 .features = PENTIUM3_FEATURES,
258 .xlevel = 0,
261 .name = "athlon",
262 .level = 2,
263 .vendor1 = 0x68747541, /* "Auth" */
264 .vendor2 = 0x69746e65, /* "enti" */
265 .vendor3 = 0x444d4163, /* "cAMD" */
266 .family = 6,
267 .model = 2,
268 .stepping = 3,
269 .features = PPRO_FEATURES | CPUID_PSE36 | CPUID_VME | CPUID_MTRR | CPUID_MCA,
270 .ext2_features = (PPRO_FEATURES & 0x0183F3FF) | CPUID_EXT2_MMXEXT | CPUID_EXT2_3DNOW | CPUID_EXT2_3DNOWEXT,
271 .xlevel = 0x80000008,
272 /* XXX: put another string ? */
273 .model_id = "QEMU Virtual CPU version " QEMU_VERSION,
276 .name = "n270",
277 /* original is on level 10 */
278 .level = 5,
279 .family = 6,
280 .model = 28,
281 .stepping = 2,
282 .features = PPRO_FEATURES |
283 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA | CPUID_VME,
284 /* Missing: CPUID_DTS | CPUID_ACPI | CPUID_SS |
285 * CPUID_HT | CPUID_TM | CPUID_PBE */
286 /* Some CPUs got no CPUID_SEP */
287 .ext_features = CPUID_EXT_MONITOR |
288 CPUID_EXT_SSE3 /* PNI */ | CPUID_EXT_SSSE3,
289 /* Missing: CPUID_EXT_DSCPL | CPUID_EXT_EST |
290 * CPUID_EXT_TM2 | CPUID_EXT_XTPR */
291 .ext2_features = (PPRO_FEATURES & 0x0183F3FF) | CPUID_EXT2_NX,
292 /* Missing: .ext3_features = CPUID_EXT3_LAHF_LM */
293 .xlevel = 0x8000000A,
294 .model_id = "Intel(R) Atom(TM) CPU N270 @ 1.60GHz",
298 static int cpu_x86_find_by_name(x86_def_t *x86_cpu_def, const char *cpu_model)
300 unsigned int i;
301 x86_def_t *def;
303 char *s = strdup(cpu_model);
304 char *featurestr, *name = strtok(s, ",");
305 uint32_t plus_features = 0, plus_ext_features = 0, plus_ext2_features = 0, plus_ext3_features = 0;
306 uint32_t minus_features = 0, minus_ext_features = 0, minus_ext2_features = 0, minus_ext3_features = 0;
307 int family = -1, model = -1, stepping = -1;
309 def = NULL;
310 for (i = 0; i < ARRAY_SIZE(x86_defs); i++) {
311 if (strcmp(name, x86_defs[i].name) == 0) {
312 def = &x86_defs[i];
313 break;
316 if (!def)
317 goto error;
318 memcpy(x86_cpu_def, def, sizeof(*def));
320 featurestr = strtok(NULL, ",");
322 while (featurestr) {
323 char *val;
324 if (featurestr[0] == '+') {
325 add_flagname_to_bitmaps(featurestr + 1, &plus_features, &plus_ext_features, &plus_ext2_features, &plus_ext3_features);
326 } else if (featurestr[0] == '-') {
327 add_flagname_to_bitmaps(featurestr + 1, &minus_features, &minus_ext_features, &minus_ext2_features, &minus_ext3_features);
328 } else if ((val = strchr(featurestr, '='))) {
329 *val = 0; val++;
330 if (!strcmp(featurestr, "family")) {
331 char *err;
332 family = strtol(val, &err, 10);
333 if (!*val || *err || family < 0) {
334 fprintf(stderr, "bad numerical value %s\n", val);
335 goto error;
337 x86_cpu_def->family = family;
338 } else if (!strcmp(featurestr, "model")) {
339 char *err;
340 model = strtol(val, &err, 10);
341 if (!*val || *err || model < 0 || model > 0xff) {
342 fprintf(stderr, "bad numerical value %s\n", val);
343 goto error;
345 x86_cpu_def->model = model;
346 } else if (!strcmp(featurestr, "stepping")) {
347 char *err;
348 stepping = strtol(val, &err, 10);
349 if (!*val || *err || stepping < 0 || stepping > 0xf) {
350 fprintf(stderr, "bad numerical value %s\n", val);
351 goto error;
353 x86_cpu_def->stepping = stepping;
354 } else if (!strcmp(featurestr, "vendor")) {
355 if (strlen(val) != 12) {
356 fprintf(stderr, "vendor string must be 12 chars long\n");
357 goto error;
359 x86_cpu_def->vendor1 = 0;
360 x86_cpu_def->vendor2 = 0;
361 x86_cpu_def->vendor3 = 0;
362 for(i = 0; i < 4; i++) {
363 x86_cpu_def->vendor1 |= ((uint8_t)val[i ]) << (8 * i);
364 x86_cpu_def->vendor2 |= ((uint8_t)val[i + 4]) << (8 * i);
365 x86_cpu_def->vendor3 |= ((uint8_t)val[i + 8]) << (8 * i);
367 } else if (!strcmp(featurestr, "model_id")) {
368 pstrcpy(x86_cpu_def->model_id, sizeof(x86_cpu_def->model_id),
369 val);
370 } else {
371 fprintf(stderr, "unrecognized feature %s\n", featurestr);
372 goto error;
374 } else {
375 fprintf(stderr, "feature string `%s' not in format (+feature|-feature|feature=xyz)\n", featurestr);
376 goto error;
378 featurestr = strtok(NULL, ",");
380 x86_cpu_def->features |= plus_features;
381 x86_cpu_def->ext_features |= plus_ext_features;
382 x86_cpu_def->ext2_features |= plus_ext2_features;
383 x86_cpu_def->ext3_features |= plus_ext3_features;
384 x86_cpu_def->features &= ~minus_features;
385 x86_cpu_def->ext_features &= ~minus_ext_features;
386 x86_cpu_def->ext2_features &= ~minus_ext2_features;
387 x86_cpu_def->ext3_features &= ~minus_ext3_features;
388 free(s);
389 return 0;
391 error:
392 free(s);
393 return -1;
396 void x86_cpu_list (FILE *f, int (*cpu_fprintf)(FILE *f, const char *fmt, ...))
398 unsigned int i;
400 for (i = 0; i < ARRAY_SIZE(x86_defs); i++)
401 (*cpu_fprintf)(f, "x86 %16s\n", x86_defs[i].name);
404 static int cpu_x86_register (CPUX86State *env, const char *cpu_model)
406 x86_def_t def1, *def = &def1;
408 if (cpu_x86_find_by_name(def, cpu_model) < 0)
409 return -1;
410 if (def->vendor1) {
411 env->cpuid_vendor1 = def->vendor1;
412 env->cpuid_vendor2 = def->vendor2;
413 env->cpuid_vendor3 = def->vendor3;
414 } else {
415 env->cpuid_vendor1 = CPUID_VENDOR_INTEL_1;
416 env->cpuid_vendor2 = CPUID_VENDOR_INTEL_2;
417 env->cpuid_vendor3 = CPUID_VENDOR_INTEL_3;
419 env->cpuid_level = def->level;
420 if (def->family > 0x0f)
421 env->cpuid_version = 0xf00 | ((def->family - 0x0f) << 20);
422 else
423 env->cpuid_version = def->family << 8;
424 env->cpuid_version |= ((def->model & 0xf) << 4) | ((def->model >> 4) << 16);
425 env->cpuid_version |= def->stepping;
426 env->cpuid_features = def->features;
427 env->pat = 0x0007040600070406ULL;
428 env->cpuid_ext_features = def->ext_features;
429 env->cpuid_ext2_features = def->ext2_features;
430 env->cpuid_xlevel = def->xlevel;
431 env->cpuid_ext3_features = def->ext3_features;
433 const char *model_id = def->model_id;
434 int c, len, i;
435 if (!model_id)
436 model_id = "";
437 len = strlen(model_id);
438 for(i = 0; i < 48; i++) {
439 if (i >= len)
440 c = '\0';
441 else
442 c = (uint8_t)model_id[i];
443 env->cpuid_model[i >> 2] |= c << (8 * (i & 3));
446 return 0;
449 /* NOTE: must be called outside the CPU execute loop */
450 void cpu_reset(CPUX86State *env)
452 int i;
454 if (qemu_loglevel_mask(CPU_LOG_RESET)) {
455 qemu_log("CPU Reset (CPU %d)\n", env->cpu_index);
456 log_cpu_state(env, X86_DUMP_FPU | X86_DUMP_CCOP);
459 memset(env, 0, offsetof(CPUX86State, breakpoints));
461 tlb_flush(env, 1);
463 env->old_exception = -1;
465 /* init to reset state */
467 #ifdef CONFIG_SOFTMMU
468 env->hflags |= HF_SOFTMMU_MASK;
469 #endif
470 env->hflags2 |= HF2_GIF_MASK;
472 cpu_x86_update_cr0(env, 0x60000010);
473 env->a20_mask = ~0x0;
474 env->smbase = 0x30000;
476 env->idt.limit = 0xffff;
477 env->gdt.limit = 0xffff;
478 env->ldt.limit = 0xffff;
479 env->ldt.flags = DESC_P_MASK | (2 << DESC_TYPE_SHIFT);
480 env->tr.limit = 0xffff;
481 env->tr.flags = DESC_P_MASK | (11 << DESC_TYPE_SHIFT);
483 cpu_x86_load_seg_cache(env, R_CS, 0xf000, 0xffff0000, 0xffff,
484 DESC_P_MASK | DESC_S_MASK | DESC_CS_MASK | DESC_R_MASK);
485 cpu_x86_load_seg_cache(env, R_DS, 0, 0, 0xffff,
486 DESC_P_MASK | DESC_S_MASK | DESC_W_MASK);
487 cpu_x86_load_seg_cache(env, R_ES, 0, 0, 0xffff,
488 DESC_P_MASK | DESC_S_MASK | DESC_W_MASK);
489 cpu_x86_load_seg_cache(env, R_SS, 0, 0, 0xffff,
490 DESC_P_MASK | DESC_S_MASK | DESC_W_MASK);
491 cpu_x86_load_seg_cache(env, R_FS, 0, 0, 0xffff,
492 DESC_P_MASK | DESC_S_MASK | DESC_W_MASK);
493 cpu_x86_load_seg_cache(env, R_GS, 0, 0, 0xffff,
494 DESC_P_MASK | DESC_S_MASK | DESC_W_MASK);
496 env->eip = 0xfff0;
497 env->regs[R_EDX] = env->cpuid_version;
499 env->eflags = 0x2;
501 /* FPU init */
502 for(i = 0;i < 8; i++)
503 env->fptags[i] = 1;
504 env->fpuc = 0x37f;
506 env->mxcsr = 0x1f80;
508 memset(env->dr, 0, sizeof(env->dr));
509 env->dr[6] = DR6_FIXED_1;
510 env->dr[7] = DR7_FIXED_1;
511 cpu_breakpoint_remove_all(env, BP_CPU);
512 cpu_watchpoint_remove_all(env, BP_CPU);
515 void cpu_x86_close(CPUX86State *env)
517 qemu_free(env);
520 /***********************************************************/
521 /* x86 debug */
523 static const char *cc_op_str[] = {
524 "DYNAMIC",
525 "EFLAGS",
527 "MULB",
528 "MULW",
529 "MULL",
530 "MULQ",
532 "ADDB",
533 "ADDW",
534 "ADDL",
535 "ADDQ",
537 "ADCB",
538 "ADCW",
539 "ADCL",
540 "ADCQ",
542 "SUBB",
543 "SUBW",
544 "SUBL",
545 "SUBQ",
547 "SBBB",
548 "SBBW",
549 "SBBL",
550 "SBBQ",
552 "LOGICB",
553 "LOGICW",
554 "LOGICL",
555 "LOGICQ",
557 "INCB",
558 "INCW",
559 "INCL",
560 "INCQ",
562 "DECB",
563 "DECW",
564 "DECL",
565 "DECQ",
567 "SHLB",
568 "SHLW",
569 "SHLL",
570 "SHLQ",
572 "SARB",
573 "SARW",
574 "SARL",
575 "SARQ",
578 static void
579 cpu_x86_dump_seg_cache(CPUState *env, FILE *f,
580 int (*cpu_fprintf)(FILE *f, const char *fmt, ...),
581 const char *name, struct SegmentCache *sc)
583 #ifdef TARGET_X86_64
584 if (env->hflags & HF_CS64_MASK) {
585 cpu_fprintf(f, "%-3s=%04x %016" PRIx64 " %08x %08x", name,
586 sc->selector, sc->base, sc->limit, sc->flags);
587 } else
588 #endif
590 cpu_fprintf(f, "%-3s=%04x %08x %08x %08x", name, sc->selector,
591 (uint32_t)sc->base, sc->limit, sc->flags);
594 if (!(env->hflags & HF_PE_MASK) || !(sc->flags & DESC_P_MASK))
595 goto done;
597 cpu_fprintf(f, " DPL=%d ", (sc->flags & DESC_DPL_MASK) >> DESC_DPL_SHIFT);
598 if (sc->flags & DESC_S_MASK) {
599 if (sc->flags & DESC_CS_MASK) {
600 cpu_fprintf(f, (sc->flags & DESC_L_MASK) ? "CS64" :
601 ((sc->flags & DESC_B_MASK) ? "CS32" : "CS16"));
602 cpu_fprintf(f, " [%c%c", (sc->flags & DESC_C_MASK) ? 'C' : '-',
603 (sc->flags & DESC_R_MASK) ? 'R' : '-');
604 } else {
605 cpu_fprintf(f, (sc->flags & DESC_B_MASK) ? "DS " : "DS16");
606 cpu_fprintf(f, " [%c%c", (sc->flags & DESC_E_MASK) ? 'E' : '-',
607 (sc->flags & DESC_W_MASK) ? 'W' : '-');
609 cpu_fprintf(f, "%c]", (sc->flags & DESC_A_MASK) ? 'A' : '-');
610 } else {
611 static const char *sys_type_name[2][16] = {
612 { /* 32 bit mode */
613 "Reserved", "TSS16-avl", "LDT", "TSS16-busy",
614 "CallGate16", "TaskGate", "IntGate16", "TrapGate16",
615 "Reserved", "TSS32-avl", "Reserved", "TSS32-busy",
616 "CallGate32", "Reserved", "IntGate32", "TrapGate32"
618 { /* 64 bit mode */
619 "<hiword>", "Reserved", "LDT", "Reserved", "Reserved",
620 "Reserved", "Reserved", "Reserved", "Reserved",
621 "TSS64-avl", "Reserved", "TSS64-busy", "CallGate64",
622 "Reserved", "IntGate64", "TrapGate64"
625 cpu_fprintf(f, sys_type_name[(env->hflags & HF_LMA_MASK) ? 1 : 0]
626 [(sc->flags & DESC_TYPE_MASK)
627 >> DESC_TYPE_SHIFT]);
629 done:
630 cpu_fprintf(f, "\n");
633 void cpu_dump_state(CPUState *env, FILE *f,
634 int (*cpu_fprintf)(FILE *f, const char *fmt, ...),
635 int flags)
637 int eflags, i, nb;
638 char cc_op_name[32];
639 static const char *seg_name[6] = { "ES", "CS", "SS", "DS", "FS", "GS" };
641 if (kvm_enabled())
642 kvm_arch_get_registers(env);
644 eflags = env->eflags;
645 #ifdef TARGET_X86_64
646 if (env->hflags & HF_CS64_MASK) {
647 cpu_fprintf(f,
648 "RAX=%016" PRIx64 " RBX=%016" PRIx64 " RCX=%016" PRIx64 " RDX=%016" PRIx64 "\n"
649 "RSI=%016" PRIx64 " RDI=%016" PRIx64 " RBP=%016" PRIx64 " RSP=%016" PRIx64 "\n"
650 "R8 =%016" PRIx64 " R9 =%016" PRIx64 " R10=%016" PRIx64 " R11=%016" PRIx64 "\n"
651 "R12=%016" PRIx64 " R13=%016" PRIx64 " R14=%016" PRIx64 " R15=%016" PRIx64 "\n"
652 "RIP=%016" PRIx64 " RFL=%08x [%c%c%c%c%c%c%c] CPL=%d II=%d A20=%d SMM=%d HLT=%d\n",
653 env->regs[R_EAX],
654 env->regs[R_EBX],
655 env->regs[R_ECX],
656 env->regs[R_EDX],
657 env->regs[R_ESI],
658 env->regs[R_EDI],
659 env->regs[R_EBP],
660 env->regs[R_ESP],
661 env->regs[8],
662 env->regs[9],
663 env->regs[10],
664 env->regs[11],
665 env->regs[12],
666 env->regs[13],
667 env->regs[14],
668 env->regs[15],
669 env->eip, eflags,
670 eflags & DF_MASK ? 'D' : '-',
671 eflags & CC_O ? 'O' : '-',
672 eflags & CC_S ? 'S' : '-',
673 eflags & CC_Z ? 'Z' : '-',
674 eflags & CC_A ? 'A' : '-',
675 eflags & CC_P ? 'P' : '-',
676 eflags & CC_C ? 'C' : '-',
677 env->hflags & HF_CPL_MASK,
678 (env->hflags >> HF_INHIBIT_IRQ_SHIFT) & 1,
679 (int)(env->a20_mask >> 20) & 1,
680 (env->hflags >> HF_SMM_SHIFT) & 1,
681 env->halted);
682 } else
683 #endif
685 cpu_fprintf(f, "EAX=%08x EBX=%08x ECX=%08x EDX=%08x\n"
686 "ESI=%08x EDI=%08x EBP=%08x ESP=%08x\n"
687 "EIP=%08x EFL=%08x [%c%c%c%c%c%c%c] CPL=%d II=%d A20=%d SMM=%d HLT=%d\n",
688 (uint32_t)env->regs[R_EAX],
689 (uint32_t)env->regs[R_EBX],
690 (uint32_t)env->regs[R_ECX],
691 (uint32_t)env->regs[R_EDX],
692 (uint32_t)env->regs[R_ESI],
693 (uint32_t)env->regs[R_EDI],
694 (uint32_t)env->regs[R_EBP],
695 (uint32_t)env->regs[R_ESP],
696 (uint32_t)env->eip, eflags,
697 eflags & DF_MASK ? 'D' : '-',
698 eflags & CC_O ? 'O' : '-',
699 eflags & CC_S ? 'S' : '-',
700 eflags & CC_Z ? 'Z' : '-',
701 eflags & CC_A ? 'A' : '-',
702 eflags & CC_P ? 'P' : '-',
703 eflags & CC_C ? 'C' : '-',
704 env->hflags & HF_CPL_MASK,
705 (env->hflags >> HF_INHIBIT_IRQ_SHIFT) & 1,
706 (int)(env->a20_mask >> 20) & 1,
707 (env->hflags >> HF_SMM_SHIFT) & 1,
708 env->halted);
711 for(i = 0; i < 6; i++) {
712 cpu_x86_dump_seg_cache(env, f, cpu_fprintf, seg_name[i],
713 &env->segs[i]);
715 cpu_x86_dump_seg_cache(env, f, cpu_fprintf, "LDT", &env->ldt);
716 cpu_x86_dump_seg_cache(env, f, cpu_fprintf, "TR", &env->tr);
718 #ifdef TARGET_X86_64
719 if (env->hflags & HF_LMA_MASK) {
720 cpu_fprintf(f, "GDT= %016" PRIx64 " %08x\n",
721 env->gdt.base, env->gdt.limit);
722 cpu_fprintf(f, "IDT= %016" PRIx64 " %08x\n",
723 env->idt.base, env->idt.limit);
724 cpu_fprintf(f, "CR0=%08x CR2=%016" PRIx64 " CR3=%016" PRIx64 " CR4=%08x\n",
725 (uint32_t)env->cr[0],
726 env->cr[2],
727 env->cr[3],
728 (uint32_t)env->cr[4]);
729 for(i = 0; i < 4; i++)
730 cpu_fprintf(f, "DR%d=%016" PRIx64 " ", i, env->dr[i]);
731 cpu_fprintf(f, "\nDR6=%016" PRIx64 " DR7=%016" PRIx64 "\n",
732 env->dr[6], env->dr[7]);
733 } else
734 #endif
736 cpu_fprintf(f, "GDT= %08x %08x\n",
737 (uint32_t)env->gdt.base, env->gdt.limit);
738 cpu_fprintf(f, "IDT= %08x %08x\n",
739 (uint32_t)env->idt.base, env->idt.limit);
740 cpu_fprintf(f, "CR0=%08x CR2=%08x CR3=%08x CR4=%08x\n",
741 (uint32_t)env->cr[0],
742 (uint32_t)env->cr[2],
743 (uint32_t)env->cr[3],
744 (uint32_t)env->cr[4]);
745 for(i = 0; i < 4; i++)
746 cpu_fprintf(f, "DR%d=%08x ", i, env->dr[i]);
747 cpu_fprintf(f, "\nDR6=%08x DR7=%08x\n", env->dr[6], env->dr[7]);
749 if (flags & X86_DUMP_CCOP) {
750 if ((unsigned)env->cc_op < CC_OP_NB)
751 snprintf(cc_op_name, sizeof(cc_op_name), "%s", cc_op_str[env->cc_op]);
752 else
753 snprintf(cc_op_name, sizeof(cc_op_name), "[%d]", env->cc_op);
754 #ifdef TARGET_X86_64
755 if (env->hflags & HF_CS64_MASK) {
756 cpu_fprintf(f, "CCS=%016" PRIx64 " CCD=%016" PRIx64 " CCO=%-8s\n",
757 env->cc_src, env->cc_dst,
758 cc_op_name);
759 } else
760 #endif
762 cpu_fprintf(f, "CCS=%08x CCD=%08x CCO=%-8s\n",
763 (uint32_t)env->cc_src, (uint32_t)env->cc_dst,
764 cc_op_name);
767 if (flags & X86_DUMP_FPU) {
768 int fptag;
769 fptag = 0;
770 for(i = 0; i < 8; i++) {
771 fptag |= ((!env->fptags[i]) << i);
773 cpu_fprintf(f, "FCW=%04x FSW=%04x [ST=%d] FTW=%02x MXCSR=%08x\n",
774 env->fpuc,
775 (env->fpus & ~0x3800) | (env->fpstt & 0x7) << 11,
776 env->fpstt,
777 fptag,
778 env->mxcsr);
779 for(i=0;i<8;i++) {
780 #if defined(USE_X86LDOUBLE)
781 union {
782 long double d;
783 struct {
784 uint64_t lower;
785 uint16_t upper;
786 } l;
787 } tmp;
788 tmp.d = env->fpregs[i].d;
789 cpu_fprintf(f, "FPR%d=%016" PRIx64 " %04x",
790 i, tmp.l.lower, tmp.l.upper);
791 #else
792 cpu_fprintf(f, "FPR%d=%016" PRIx64,
793 i, env->fpregs[i].mmx.q);
794 #endif
795 if ((i & 1) == 1)
796 cpu_fprintf(f, "\n");
797 else
798 cpu_fprintf(f, " ");
800 if (env->hflags & HF_CS64_MASK)
801 nb = 16;
802 else
803 nb = 8;
804 for(i=0;i<nb;i++) {
805 cpu_fprintf(f, "XMM%02d=%08x%08x%08x%08x",
807 env->xmm_regs[i].XMM_L(3),
808 env->xmm_regs[i].XMM_L(2),
809 env->xmm_regs[i].XMM_L(1),
810 env->xmm_regs[i].XMM_L(0));
811 if ((i & 1) == 1)
812 cpu_fprintf(f, "\n");
813 else
814 cpu_fprintf(f, " ");
819 /***********************************************************/
820 /* x86 mmu */
821 /* XXX: add PGE support */
823 void cpu_x86_set_a20(CPUX86State *env, int a20_state)
825 a20_state = (a20_state != 0);
826 if (a20_state != ((env->a20_mask >> 20) & 1)) {
827 #if defined(DEBUG_MMU)
828 printf("A20 update: a20=%d\n", a20_state);
829 #endif
830 /* if the cpu is currently executing code, we must unlink it and
831 all the potentially executing TB */
832 cpu_interrupt(env, CPU_INTERRUPT_EXITTB);
834 /* when a20 is changed, all the MMU mappings are invalid, so
835 we must flush everything */
836 tlb_flush(env, 1);
837 env->a20_mask = (~0x100000) | (a20_state << 20);
841 void cpu_x86_update_cr0(CPUX86State *env, uint32_t new_cr0)
843 int pe_state;
845 #if defined(DEBUG_MMU)
846 printf("CR0 update: CR0=0x%08x\n", new_cr0);
847 #endif
848 if ((new_cr0 & (CR0_PG_MASK | CR0_WP_MASK | CR0_PE_MASK)) !=
849 (env->cr[0] & (CR0_PG_MASK | CR0_WP_MASK | CR0_PE_MASK))) {
850 tlb_flush(env, 1);
853 #ifdef TARGET_X86_64
854 if (!(env->cr[0] & CR0_PG_MASK) && (new_cr0 & CR0_PG_MASK) &&
855 (env->efer & MSR_EFER_LME)) {
856 /* enter in long mode */
857 /* XXX: generate an exception */
858 if (!(env->cr[4] & CR4_PAE_MASK))
859 return;
860 env->efer |= MSR_EFER_LMA;
861 env->hflags |= HF_LMA_MASK;
862 } else if ((env->cr[0] & CR0_PG_MASK) && !(new_cr0 & CR0_PG_MASK) &&
863 (env->efer & MSR_EFER_LMA)) {
864 /* exit long mode */
865 env->efer &= ~MSR_EFER_LMA;
866 env->hflags &= ~(HF_LMA_MASK | HF_CS64_MASK);
867 env->eip &= 0xffffffff;
869 #endif
870 env->cr[0] = new_cr0 | CR0_ET_MASK;
872 /* update PE flag in hidden flags */
873 pe_state = (env->cr[0] & CR0_PE_MASK);
874 env->hflags = (env->hflags & ~HF_PE_MASK) | (pe_state << HF_PE_SHIFT);
875 /* ensure that ADDSEG is always set in real mode */
876 env->hflags |= ((pe_state ^ 1) << HF_ADDSEG_SHIFT);
877 /* update FPU flags */
878 env->hflags = (env->hflags & ~(HF_MP_MASK | HF_EM_MASK | HF_TS_MASK)) |
879 ((new_cr0 << (HF_MP_SHIFT - 1)) & (HF_MP_MASK | HF_EM_MASK | HF_TS_MASK));
882 /* XXX: in legacy PAE mode, generate a GPF if reserved bits are set in
883 the PDPT */
884 void cpu_x86_update_cr3(CPUX86State *env, target_ulong new_cr3)
886 env->cr[3] = new_cr3;
887 if (env->cr[0] & CR0_PG_MASK) {
888 #if defined(DEBUG_MMU)
889 printf("CR3 update: CR3=" TARGET_FMT_lx "\n", new_cr3);
890 #endif
891 tlb_flush(env, 0);
895 void cpu_x86_update_cr4(CPUX86State *env, uint32_t new_cr4)
897 #if defined(DEBUG_MMU)
898 printf("CR4 update: CR4=%08x\n", (uint32_t)env->cr[4]);
899 #endif
900 if ((new_cr4 & (CR4_PGE_MASK | CR4_PAE_MASK | CR4_PSE_MASK)) !=
901 (env->cr[4] & (CR4_PGE_MASK | CR4_PAE_MASK | CR4_PSE_MASK))) {
902 tlb_flush(env, 1);
904 /* SSE handling */
905 if (!(env->cpuid_features & CPUID_SSE))
906 new_cr4 &= ~CR4_OSFXSR_MASK;
907 if (new_cr4 & CR4_OSFXSR_MASK)
908 env->hflags |= HF_OSFXSR_MASK;
909 else
910 env->hflags &= ~HF_OSFXSR_MASK;
912 env->cr[4] = new_cr4;
915 #if defined(CONFIG_USER_ONLY)
917 int cpu_x86_handle_mmu_fault(CPUX86State *env, target_ulong addr,
918 int is_write, int mmu_idx, int is_softmmu)
920 /* user mode only emulation */
921 is_write &= 1;
922 env->cr[2] = addr;
923 env->error_code = (is_write << PG_ERROR_W_BIT);
924 env->error_code |= PG_ERROR_U_MASK;
925 env->exception_index = EXCP0E_PAGE;
926 return 1;
929 target_phys_addr_t cpu_get_phys_page_debug(CPUState *env, target_ulong addr)
931 return addr;
934 #else
936 /* XXX: This value should match the one returned by CPUID
937 * and in exec.c */
938 #if defined(CONFIG_KQEMU)
939 #define PHYS_ADDR_MASK 0xfffff000LL
940 #else
941 # if defined(TARGET_X86_64)
942 # define PHYS_ADDR_MASK 0xfffffff000LL
943 # else
944 # define PHYS_ADDR_MASK 0xffffff000LL
945 # endif
946 #endif
948 /* return value:
949 -1 = cannot handle fault
950 0 = nothing more to do
951 1 = generate PF fault
952 2 = soft MMU activation required for this block
954 int cpu_x86_handle_mmu_fault(CPUX86State *env, target_ulong addr,
955 int is_write1, int mmu_idx, int is_softmmu)
957 uint64_t ptep, pte;
958 target_ulong pde_addr, pte_addr;
959 int error_code, is_dirty, prot, page_size, ret, is_write, is_user;
960 target_phys_addr_t paddr;
961 uint32_t page_offset;
962 target_ulong vaddr, virt_addr;
964 is_user = mmu_idx == MMU_USER_IDX;
965 #if defined(DEBUG_MMU)
966 printf("MMU fault: addr=" TARGET_FMT_lx " w=%d u=%d eip=" TARGET_FMT_lx "\n",
967 addr, is_write1, is_user, env->eip);
968 #endif
969 is_write = is_write1 & 1;
971 if (!(env->cr[0] & CR0_PG_MASK)) {
972 pte = addr;
973 virt_addr = addr & TARGET_PAGE_MASK;
974 prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC;
975 page_size = 4096;
976 goto do_mapping;
979 if (env->cr[4] & CR4_PAE_MASK) {
980 uint64_t pde, pdpe;
981 target_ulong pdpe_addr;
983 #ifdef TARGET_X86_64
984 if (env->hflags & HF_LMA_MASK) {
985 uint64_t pml4e_addr, pml4e;
986 int32_t sext;
988 /* test virtual address sign extension */
989 sext = (int64_t)addr >> 47;
990 if (sext != 0 && sext != -1) {
991 env->error_code = 0;
992 env->exception_index = EXCP0D_GPF;
993 return 1;
996 pml4e_addr = ((env->cr[3] & ~0xfff) + (((addr >> 39) & 0x1ff) << 3)) &
997 env->a20_mask;
998 pml4e = ldq_phys(pml4e_addr);
999 if (!(pml4e & PG_PRESENT_MASK)) {
1000 error_code = 0;
1001 goto do_fault;
1003 if (!(env->efer & MSR_EFER_NXE) && (pml4e & PG_NX_MASK)) {
1004 error_code = PG_ERROR_RSVD_MASK;
1005 goto do_fault;
1007 if (!(pml4e & PG_ACCESSED_MASK)) {
1008 pml4e |= PG_ACCESSED_MASK;
1009 stl_phys_notdirty(pml4e_addr, pml4e);
1011 ptep = pml4e ^ PG_NX_MASK;
1012 pdpe_addr = ((pml4e & PHYS_ADDR_MASK) + (((addr >> 30) & 0x1ff) << 3)) &
1013 env->a20_mask;
1014 pdpe = ldq_phys(pdpe_addr);
1015 if (!(pdpe & PG_PRESENT_MASK)) {
1016 error_code = 0;
1017 goto do_fault;
1019 if (!(env->efer & MSR_EFER_NXE) && (pdpe & PG_NX_MASK)) {
1020 error_code = PG_ERROR_RSVD_MASK;
1021 goto do_fault;
1023 ptep &= pdpe ^ PG_NX_MASK;
1024 if (!(pdpe & PG_ACCESSED_MASK)) {
1025 pdpe |= PG_ACCESSED_MASK;
1026 stl_phys_notdirty(pdpe_addr, pdpe);
1028 } else
1029 #endif
1031 /* XXX: load them when cr3 is loaded ? */
1032 pdpe_addr = ((env->cr[3] & ~0x1f) + ((addr >> 27) & 0x18)) &
1033 env->a20_mask;
1034 pdpe = ldq_phys(pdpe_addr);
1035 if (!(pdpe & PG_PRESENT_MASK)) {
1036 error_code = 0;
1037 goto do_fault;
1039 ptep = PG_NX_MASK | PG_USER_MASK | PG_RW_MASK;
1042 pde_addr = ((pdpe & PHYS_ADDR_MASK) + (((addr >> 21) & 0x1ff) << 3)) &
1043 env->a20_mask;
1044 pde = ldq_phys(pde_addr);
1045 if (!(pde & PG_PRESENT_MASK)) {
1046 error_code = 0;
1047 goto do_fault;
1049 if (!(env->efer & MSR_EFER_NXE) && (pde & PG_NX_MASK)) {
1050 error_code = PG_ERROR_RSVD_MASK;
1051 goto do_fault;
1053 ptep &= pde ^ PG_NX_MASK;
1054 if (pde & PG_PSE_MASK) {
1055 /* 2 MB page */
1056 page_size = 2048 * 1024;
1057 ptep ^= PG_NX_MASK;
1058 if ((ptep & PG_NX_MASK) && is_write1 == 2)
1059 goto do_fault_protect;
1060 if (is_user) {
1061 if (!(ptep & PG_USER_MASK))
1062 goto do_fault_protect;
1063 if (is_write && !(ptep & PG_RW_MASK))
1064 goto do_fault_protect;
1065 } else {
1066 if ((env->cr[0] & CR0_WP_MASK) &&
1067 is_write && !(ptep & PG_RW_MASK))
1068 goto do_fault_protect;
1070 is_dirty = is_write && !(pde & PG_DIRTY_MASK);
1071 if (!(pde & PG_ACCESSED_MASK) || is_dirty) {
1072 pde |= PG_ACCESSED_MASK;
1073 if (is_dirty)
1074 pde |= PG_DIRTY_MASK;
1075 stl_phys_notdirty(pde_addr, pde);
1077 /* align to page_size */
1078 pte = pde & ((PHYS_ADDR_MASK & ~(page_size - 1)) | 0xfff);
1079 virt_addr = addr & ~(page_size - 1);
1080 } else {
1081 /* 4 KB page */
1082 if (!(pde & PG_ACCESSED_MASK)) {
1083 pde |= PG_ACCESSED_MASK;
1084 stl_phys_notdirty(pde_addr, pde);
1086 pte_addr = ((pde & PHYS_ADDR_MASK) + (((addr >> 12) & 0x1ff) << 3)) &
1087 env->a20_mask;
1088 pte = ldq_phys(pte_addr);
1089 if (!(pte & PG_PRESENT_MASK)) {
1090 error_code = 0;
1091 goto do_fault;
1093 if (!(env->efer & MSR_EFER_NXE) && (pte & PG_NX_MASK)) {
1094 error_code = PG_ERROR_RSVD_MASK;
1095 goto do_fault;
1097 /* combine pde and pte nx, user and rw protections */
1098 ptep &= pte ^ PG_NX_MASK;
1099 ptep ^= PG_NX_MASK;
1100 if ((ptep & PG_NX_MASK) && is_write1 == 2)
1101 goto do_fault_protect;
1102 if (is_user) {
1103 if (!(ptep & PG_USER_MASK))
1104 goto do_fault_protect;
1105 if (is_write && !(ptep & PG_RW_MASK))
1106 goto do_fault_protect;
1107 } else {
1108 if ((env->cr[0] & CR0_WP_MASK) &&
1109 is_write && !(ptep & PG_RW_MASK))
1110 goto do_fault_protect;
1112 is_dirty = is_write && !(pte & PG_DIRTY_MASK);
1113 if (!(pte & PG_ACCESSED_MASK) || is_dirty) {
1114 pte |= PG_ACCESSED_MASK;
1115 if (is_dirty)
1116 pte |= PG_DIRTY_MASK;
1117 stl_phys_notdirty(pte_addr, pte);
1119 page_size = 4096;
1120 virt_addr = addr & ~0xfff;
1121 pte = pte & (PHYS_ADDR_MASK | 0xfff);
1123 } else {
1124 uint32_t pde;
1126 /* page directory entry */
1127 pde_addr = ((env->cr[3] & ~0xfff) + ((addr >> 20) & 0xffc)) &
1128 env->a20_mask;
1129 pde = ldl_phys(pde_addr);
1130 if (!(pde & PG_PRESENT_MASK)) {
1131 error_code = 0;
1132 goto do_fault;
1134 /* if PSE bit is set, then we use a 4MB page */
1135 if ((pde & PG_PSE_MASK) && (env->cr[4] & CR4_PSE_MASK)) {
1136 page_size = 4096 * 1024;
1137 if (is_user) {
1138 if (!(pde & PG_USER_MASK))
1139 goto do_fault_protect;
1140 if (is_write && !(pde & PG_RW_MASK))
1141 goto do_fault_protect;
1142 } else {
1143 if ((env->cr[0] & CR0_WP_MASK) &&
1144 is_write && !(pde & PG_RW_MASK))
1145 goto do_fault_protect;
1147 is_dirty = is_write && !(pde & PG_DIRTY_MASK);
1148 if (!(pde & PG_ACCESSED_MASK) || is_dirty) {
1149 pde |= PG_ACCESSED_MASK;
1150 if (is_dirty)
1151 pde |= PG_DIRTY_MASK;
1152 stl_phys_notdirty(pde_addr, pde);
1155 pte = pde & ~( (page_size - 1) & ~0xfff); /* align to page_size */
1156 ptep = pte;
1157 virt_addr = addr & ~(page_size - 1);
1158 } else {
1159 if (!(pde & PG_ACCESSED_MASK)) {
1160 pde |= PG_ACCESSED_MASK;
1161 stl_phys_notdirty(pde_addr, pde);
1164 /* page directory entry */
1165 pte_addr = ((pde & ~0xfff) + ((addr >> 10) & 0xffc)) &
1166 env->a20_mask;
1167 pte = ldl_phys(pte_addr);
1168 if (!(pte & PG_PRESENT_MASK)) {
1169 error_code = 0;
1170 goto do_fault;
1172 /* combine pde and pte user and rw protections */
1173 ptep = pte & pde;
1174 if (is_user) {
1175 if (!(ptep & PG_USER_MASK))
1176 goto do_fault_protect;
1177 if (is_write && !(ptep & PG_RW_MASK))
1178 goto do_fault_protect;
1179 } else {
1180 if ((env->cr[0] & CR0_WP_MASK) &&
1181 is_write && !(ptep & PG_RW_MASK))
1182 goto do_fault_protect;
1184 is_dirty = is_write && !(pte & PG_DIRTY_MASK);
1185 if (!(pte & PG_ACCESSED_MASK) || is_dirty) {
1186 pte |= PG_ACCESSED_MASK;
1187 if (is_dirty)
1188 pte |= PG_DIRTY_MASK;
1189 stl_phys_notdirty(pte_addr, pte);
1191 page_size = 4096;
1192 virt_addr = addr & ~0xfff;
1195 /* the page can be put in the TLB */
1196 prot = PAGE_READ;
1197 if (!(ptep & PG_NX_MASK))
1198 prot |= PAGE_EXEC;
1199 if (pte & PG_DIRTY_MASK) {
1200 /* only set write access if already dirty... otherwise wait
1201 for dirty access */
1202 if (is_user) {
1203 if (ptep & PG_RW_MASK)
1204 prot |= PAGE_WRITE;
1205 } else {
1206 if (!(env->cr[0] & CR0_WP_MASK) ||
1207 (ptep & PG_RW_MASK))
1208 prot |= PAGE_WRITE;
1211 do_mapping:
1212 pte = pte & env->a20_mask;
1214 /* Even if 4MB pages, we map only one 4KB page in the cache to
1215 avoid filling it too fast */
1216 page_offset = (addr & TARGET_PAGE_MASK) & (page_size - 1);
1217 paddr = (pte & TARGET_PAGE_MASK) + page_offset;
1218 vaddr = virt_addr + page_offset;
1220 ret = tlb_set_page_exec(env, vaddr, paddr, prot, mmu_idx, is_softmmu);
1221 return ret;
1222 do_fault_protect:
1223 error_code = PG_ERROR_P_MASK;
1224 do_fault:
1225 error_code |= (is_write << PG_ERROR_W_BIT);
1226 if (is_user)
1227 error_code |= PG_ERROR_U_MASK;
1228 if (is_write1 == 2 &&
1229 (env->efer & MSR_EFER_NXE) &&
1230 (env->cr[4] & CR4_PAE_MASK))
1231 error_code |= PG_ERROR_I_D_MASK;
1232 if (env->intercept_exceptions & (1 << EXCP0E_PAGE)) {
1233 /* cr2 is not modified in case of exceptions */
1234 stq_phys(env->vm_vmcb + offsetof(struct vmcb, control.exit_info_2),
1235 addr);
1236 } else {
1237 env->cr[2] = addr;
1239 env->error_code = error_code;
1240 env->exception_index = EXCP0E_PAGE;
1241 return 1;
1244 target_phys_addr_t cpu_get_phys_page_debug(CPUState *env, target_ulong addr)
1246 target_ulong pde_addr, pte_addr;
1247 uint64_t pte;
1248 target_phys_addr_t paddr;
1249 uint32_t page_offset;
1250 int page_size;
1252 if (env->cr[4] & CR4_PAE_MASK) {
1253 target_ulong pdpe_addr;
1254 uint64_t pde, pdpe;
1256 #ifdef TARGET_X86_64
1257 if (env->hflags & HF_LMA_MASK) {
1258 uint64_t pml4e_addr, pml4e;
1259 int32_t sext;
1261 /* test virtual address sign extension */
1262 sext = (int64_t)addr >> 47;
1263 if (sext != 0 && sext != -1)
1264 return -1;
1266 pml4e_addr = ((env->cr[3] & ~0xfff) + (((addr >> 39) & 0x1ff) << 3)) &
1267 env->a20_mask;
1268 pml4e = ldq_phys(pml4e_addr);
1269 if (!(pml4e & PG_PRESENT_MASK))
1270 return -1;
1272 pdpe_addr = ((pml4e & ~0xfff) + (((addr >> 30) & 0x1ff) << 3)) &
1273 env->a20_mask;
1274 pdpe = ldq_phys(pdpe_addr);
1275 if (!(pdpe & PG_PRESENT_MASK))
1276 return -1;
1277 } else
1278 #endif
1280 pdpe_addr = ((env->cr[3] & ~0x1f) + ((addr >> 27) & 0x18)) &
1281 env->a20_mask;
1282 pdpe = ldq_phys(pdpe_addr);
1283 if (!(pdpe & PG_PRESENT_MASK))
1284 return -1;
1287 pde_addr = ((pdpe & ~0xfff) + (((addr >> 21) & 0x1ff) << 3)) &
1288 env->a20_mask;
1289 pde = ldq_phys(pde_addr);
1290 if (!(pde & PG_PRESENT_MASK)) {
1291 return -1;
1293 if (pde & PG_PSE_MASK) {
1294 /* 2 MB page */
1295 page_size = 2048 * 1024;
1296 pte = pde & ~( (page_size - 1) & ~0xfff); /* align to page_size */
1297 } else {
1298 /* 4 KB page */
1299 pte_addr = ((pde & ~0xfff) + (((addr >> 12) & 0x1ff) << 3)) &
1300 env->a20_mask;
1301 page_size = 4096;
1302 pte = ldq_phys(pte_addr);
1304 if (!(pte & PG_PRESENT_MASK))
1305 return -1;
1306 } else {
1307 uint32_t pde;
1309 if (!(env->cr[0] & CR0_PG_MASK)) {
1310 pte = addr;
1311 page_size = 4096;
1312 } else {
1313 /* page directory entry */
1314 pde_addr = ((env->cr[3] & ~0xfff) + ((addr >> 20) & 0xffc)) & env->a20_mask;
1315 pde = ldl_phys(pde_addr);
1316 if (!(pde & PG_PRESENT_MASK))
1317 return -1;
1318 if ((pde & PG_PSE_MASK) && (env->cr[4] & CR4_PSE_MASK)) {
1319 pte = pde & ~0x003ff000; /* align to 4MB */
1320 page_size = 4096 * 1024;
1321 } else {
1322 /* page directory entry */
1323 pte_addr = ((pde & ~0xfff) + ((addr >> 10) & 0xffc)) & env->a20_mask;
1324 pte = ldl_phys(pte_addr);
1325 if (!(pte & PG_PRESENT_MASK))
1326 return -1;
1327 page_size = 4096;
1330 pte = pte & env->a20_mask;
1333 page_offset = (addr & TARGET_PAGE_MASK) & (page_size - 1);
1334 paddr = (pte & TARGET_PAGE_MASK) + page_offset;
1335 return paddr;
1338 void hw_breakpoint_insert(CPUState *env, int index)
1340 int type, err = 0;
1342 switch (hw_breakpoint_type(env->dr[7], index)) {
1343 case 0:
1344 if (hw_breakpoint_enabled(env->dr[7], index))
1345 err = cpu_breakpoint_insert(env, env->dr[index], BP_CPU,
1346 &env->cpu_breakpoint[index]);
1347 break;
1348 case 1:
1349 type = BP_CPU | BP_MEM_WRITE;
1350 goto insert_wp;
1351 case 2:
1352 /* No support for I/O watchpoints yet */
1353 break;
1354 case 3:
1355 type = BP_CPU | BP_MEM_ACCESS;
1356 insert_wp:
1357 err = cpu_watchpoint_insert(env, env->dr[index],
1358 hw_breakpoint_len(env->dr[7], index),
1359 type, &env->cpu_watchpoint[index]);
1360 break;
1362 if (err)
1363 env->cpu_breakpoint[index] = NULL;
1366 void hw_breakpoint_remove(CPUState *env, int index)
1368 if (!env->cpu_breakpoint[index])
1369 return;
1370 switch (hw_breakpoint_type(env->dr[7], index)) {
1371 case 0:
1372 if (hw_breakpoint_enabled(env->dr[7], index))
1373 cpu_breakpoint_remove_by_ref(env, env->cpu_breakpoint[index]);
1374 break;
1375 case 1:
1376 case 3:
1377 cpu_watchpoint_remove_by_ref(env, env->cpu_watchpoint[index]);
1378 break;
1379 case 2:
1380 /* No support for I/O watchpoints yet */
1381 break;
1385 int check_hw_breakpoints(CPUState *env, int force_dr6_update)
1387 target_ulong dr6;
1388 int reg, type;
1389 int hit_enabled = 0;
1391 dr6 = env->dr[6] & ~0xf;
1392 for (reg = 0; reg < 4; reg++) {
1393 type = hw_breakpoint_type(env->dr[7], reg);
1394 if ((type == 0 && env->dr[reg] == env->eip) ||
1395 ((type & 1) && env->cpu_watchpoint[reg] &&
1396 (env->cpu_watchpoint[reg]->flags & BP_WATCHPOINT_HIT))) {
1397 dr6 |= 1 << reg;
1398 if (hw_breakpoint_enabled(env->dr[7], reg))
1399 hit_enabled = 1;
1402 if (hit_enabled || force_dr6_update)
1403 env->dr[6] = dr6;
1404 return hit_enabled;
1407 static CPUDebugExcpHandler *prev_debug_excp_handler;
1409 void raise_exception(int exception_index);
1411 static void breakpoint_handler(CPUState *env)
1413 CPUBreakpoint *bp;
1415 if (env->watchpoint_hit) {
1416 if (env->watchpoint_hit->flags & BP_CPU) {
1417 env->watchpoint_hit = NULL;
1418 if (check_hw_breakpoints(env, 0))
1419 raise_exception(EXCP01_DB);
1420 else
1421 cpu_resume_from_signal(env, NULL);
1423 } else {
1424 TAILQ_FOREACH(bp, &env->breakpoints, entry)
1425 if (bp->pc == env->eip) {
1426 if (bp->flags & BP_CPU) {
1427 check_hw_breakpoints(env, 1);
1428 raise_exception(EXCP01_DB);
1430 break;
1433 if (prev_debug_excp_handler)
1434 prev_debug_excp_handler(env);
1436 #endif /* !CONFIG_USER_ONLY */
1438 static void host_cpuid(uint32_t function, uint32_t count,
1439 uint32_t *eax, uint32_t *ebx,
1440 uint32_t *ecx, uint32_t *edx)
1442 #if defined(CONFIG_KVM) || defined(USE_KVM)
1443 uint32_t vec[4];
1445 #ifdef __x86_64__
1446 asm volatile("cpuid"
1447 : "=a"(vec[0]), "=b"(vec[1]),
1448 "=c"(vec[2]), "=d"(vec[3])
1449 : "0"(function), "c"(count) : "cc");
1450 #else
1451 asm volatile("pusha \n\t"
1452 "cpuid \n\t"
1453 "mov %%eax, 0(%2) \n\t"
1454 "mov %%ebx, 4(%2) \n\t"
1455 "mov %%ecx, 8(%2) \n\t"
1456 "mov %%edx, 12(%2) \n\t"
1457 "popa"
1458 : : "a"(function), "c"(count), "S"(vec)
1459 : "memory", "cc");
1460 #endif
1462 if (eax)
1463 *eax = vec[0];
1464 if (ebx)
1465 *ebx = vec[1];
1466 if (ecx)
1467 *ecx = vec[2];
1468 if (edx)
1469 *edx = vec[3];
1470 #endif
1473 void cpu_x86_cpuid(CPUX86State *env, uint32_t index, uint32_t count,
1474 uint32_t *eax, uint32_t *ebx,
1475 uint32_t *ecx, uint32_t *edx)
1477 /* test if maximum index reached */
1478 if (index & 0x80000000) {
1479 if (index > env->cpuid_xlevel)
1480 index = env->cpuid_level;
1481 } else {
1482 if (index > env->cpuid_level)
1483 index = env->cpuid_level;
1486 switch(index) {
1487 case 0:
1488 *eax = env->cpuid_level;
1489 *ebx = env->cpuid_vendor1;
1490 *edx = env->cpuid_vendor2;
1491 *ecx = env->cpuid_vendor3;
1493 /* sysenter isn't supported on compatibility mode on AMD. and syscall
1494 * isn't supported in compatibility mode on Intel. so advertise the
1495 * actuall cpu, and say goodbye to migration between different vendors
1496 * is you use compatibility mode. */
1497 if (kvm_enabled())
1498 host_cpuid(0, 0, NULL, ebx, ecx, edx);
1499 break;
1500 case 1:
1501 *eax = env->cpuid_version;
1502 *ebx = (env->cpuid_apic_id << 24) | 8 << 8; /* CLFLUSH size in quad words, Linux wants it. */
1503 *ecx = env->cpuid_ext_features;
1504 *edx = env->cpuid_features;
1506 /* "Hypervisor present" bit required for Microsoft SVVP */
1507 if (kvm_enabled())
1508 *ecx |= (1 << 31);
1509 break;
1510 case 2:
1511 /* cache info: needed for Pentium Pro compatibility */
1512 *eax = 1;
1513 *ebx = 0;
1514 *ecx = 0;
1515 *edx = 0x2c307d;
1516 break;
1517 case 4:
1518 /* cache info: needed for Core compatibility */
1519 switch (count) {
1520 case 0: /* L1 dcache info */
1521 *eax = 0x0000121;
1522 *ebx = 0x1c0003f;
1523 *ecx = 0x000003f;
1524 *edx = 0x0000001;
1525 break;
1526 case 1: /* L1 icache info */
1527 *eax = 0x0000122;
1528 *ebx = 0x1c0003f;
1529 *ecx = 0x000003f;
1530 *edx = 0x0000001;
1531 break;
1532 case 2: /* L2 cache info */
1533 *eax = 0x0000143;
1534 *ebx = 0x3c0003f;
1535 *ecx = 0x0000fff;
1536 *edx = 0x0000001;
1537 break;
1538 default: /* end of info */
1539 *eax = 0;
1540 *ebx = 0;
1541 *ecx = 0;
1542 *edx = 0;
1543 break;
1545 break;
1546 case 5:
1547 /* mwait info: needed for Core compatibility */
1548 *eax = 0; /* Smallest monitor-line size in bytes */
1549 *ebx = 0; /* Largest monitor-line size in bytes */
1550 *ecx = CPUID_MWAIT_EMX | CPUID_MWAIT_IBE;
1551 *edx = 0;
1552 break;
1553 case 6:
1554 /* Thermal and Power Leaf */
1555 *eax = 0;
1556 *ebx = 0;
1557 *ecx = 0;
1558 *edx = 0;
1559 break;
1560 case 9:
1561 /* Direct Cache Access Information Leaf */
1562 *eax = 0; /* Bits 0-31 in DCA_CAP MSR */
1563 *ebx = 0;
1564 *ecx = 0;
1565 *edx = 0;
1566 break;
1567 case 0xA:
1568 /* Architectural Performance Monitoring Leaf */
1569 *eax = 0;
1570 *ebx = 0;
1571 *ecx = 0;
1572 *edx = 0;
1573 break;
1574 case 0x80000000:
1575 *eax = env->cpuid_xlevel;
1576 *ebx = env->cpuid_vendor1;
1577 *edx = env->cpuid_vendor2;
1578 *ecx = env->cpuid_vendor3;
1579 break;
1580 case 0x80000001:
1581 *eax = env->cpuid_features;
1582 *ebx = 0;
1583 *ecx = env->cpuid_ext3_features;
1584 *edx = env->cpuid_ext2_features;
1586 if (kvm_enabled()) {
1587 uint32_t h_eax, h_edx;
1589 host_cpuid(index, 0, &h_eax, NULL, NULL, &h_edx);
1591 /* disable CPU features that the host does not support */
1593 /* long mode */
1594 if ((h_edx & 0x20000000) == 0 /* || !lm_capable_kernel */)
1595 *edx &= ~0x20000000;
1596 /* syscall */
1597 if ((h_edx & 0x00000800) == 0)
1598 *edx &= ~0x00000800;
1599 /* nx */
1600 if ((h_edx & 0x00100000) == 0)
1601 *edx &= ~0x00100000;
1603 /* disable CPU features that KVM cannot support */
1605 /* svm */
1606 if (!kvm_nested)
1607 *ecx &= ~4UL;
1608 /* 3dnow */
1609 *edx &= ~0xc0000000;
1611 break;
1612 case 0x80000002:
1613 case 0x80000003:
1614 case 0x80000004:
1615 *eax = env->cpuid_model[(index - 0x80000002) * 4 + 0];
1616 *ebx = env->cpuid_model[(index - 0x80000002) * 4 + 1];
1617 *ecx = env->cpuid_model[(index - 0x80000002) * 4 + 2];
1618 *edx = env->cpuid_model[(index - 0x80000002) * 4 + 3];
1619 break;
1620 case 0x80000005:
1621 /* cache info (L1 cache) */
1622 *eax = 0x01ff01ff;
1623 *ebx = 0x01ff01ff;
1624 *ecx = 0x40020140;
1625 *edx = 0x40020140;
1626 break;
1627 case 0x80000006:
1628 /* cache info (L2 cache) */
1629 *eax = 0;
1630 *ebx = 0x42004200;
1631 *ecx = 0x02008140;
1632 *edx = 0;
1633 break;
1634 case 0x80000008:
1635 /* virtual & phys address size in low 2 bytes. */
1636 /* XXX: This value must match the one used in the MMU code. */
1637 if (env->cpuid_ext2_features & CPUID_EXT2_LM) {
1638 /* 64 bit processor */
1639 #if defined(CONFIG_KQEMU)
1640 *eax = 0x00003020; /* 48 bits virtual, 32 bits physical */
1641 #else
1642 /* XXX: The physical address space is limited to 42 bits in exec.c. */
1643 *eax = 0x00003028; /* 48 bits virtual, 40 bits physical */
1644 #endif
1645 } else {
1646 #if defined(CONFIG_KQEMU)
1647 *eax = 0x00000020; /* 32 bits physical */
1648 #else
1649 if (env->cpuid_features & CPUID_PSE36)
1650 *eax = 0x00000024; /* 36 bits physical */
1651 else
1652 *eax = 0x00000020; /* 32 bits physical */
1653 #endif
1655 *ebx = 0;
1656 *ecx = 0;
1657 *edx = 0;
1658 break;
1659 case 0x8000000A:
1660 *eax = 0x00000001; /* SVM Revision */
1661 *ebx = 0x00000010; /* nr of ASIDs */
1662 *ecx = 0;
1663 *edx = 0; /* optional features */
1664 break;
1665 default:
1666 /* reserved values: zero */
1667 *eax = 0;
1668 *ebx = 0;
1669 *ecx = 0;
1670 *edx = 0;
1671 break;
1675 CPUX86State *cpu_x86_init(const char *cpu_model)
1677 CPUX86State *env;
1678 static int inited;
1680 env = qemu_mallocz(sizeof(CPUX86State));
1681 cpu_exec_init(env);
1682 env->cpu_model_str = cpu_model;
1684 /* init various static tables */
1685 if (!inited) {
1686 inited = 1;
1687 optimize_flags_init();
1688 #ifndef CONFIG_USER_ONLY
1689 prev_debug_excp_handler =
1690 cpu_set_debug_excp_handler(breakpoint_handler);
1691 #endif
1693 if (cpu_x86_register(env, cpu_model) < 0) {
1694 cpu_x86_close(env);
1695 return NULL;
1697 cpu_reset(env);
1698 #ifdef CONFIG_KQEMU
1699 kqemu_init(env);
1700 #endif
1702 return env;