Merge branch 'master' of ssh://repo.or.cz/srv/git/qemu
[qemu/hppa.git] / target-i386 / helper.c
blob7152dc42979a58a4e6dbc8349b1e441206f15419
1 /*
2 * i386 helpers (without register variable usage)
4 * Copyright (c) 2003 Fabrice Bellard
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, write to the Free Software
18 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston MA 02110-1301 USA
20 #include <stdarg.h>
21 #include <stdlib.h>
22 #include <stdio.h>
23 #include <string.h>
24 #include <inttypes.h>
25 #include <signal.h>
26 #include <assert.h>
28 #include "cpu.h"
29 #include "exec-all.h"
30 #include "qemu-common.h"
31 #include "kvm.h"
33 //#define DEBUG_MMU
35 static void add_flagname_to_bitmaps(char *flagname, uint32_t *features,
36 uint32_t *ext_features,
37 uint32_t *ext2_features,
38 uint32_t *ext3_features)
40 int i;
41 /* feature flags taken from "Intel Processor Identification and the CPUID
42 * Instruction" and AMD's "CPUID Specification". In cases of disagreement
43 * about feature names, the Linux name is used. */
44 static const char *feature_name[] = {
45 "fpu", "vme", "de", "pse", "tsc", "msr", "pae", "mce",
46 "cx8", "apic", NULL, "sep", "mtrr", "pge", "mca", "cmov",
47 "pat", "pse36", "pn" /* Intel psn */, "clflush" /* Intel clfsh */, NULL, "ds" /* Intel dts */, "acpi", "mmx",
48 "fxsr", "sse", "sse2", "ss", "ht" /* Intel htt */, "tm", "ia64", "pbe",
50 static const char *ext_feature_name[] = {
51 "pni" /* Intel,AMD sse3 */, NULL, NULL, "monitor", "ds_cpl", "vmx", NULL /* Linux smx */, "est",
52 "tm2", "ssse3", "cid", NULL, NULL, "cx16", "xtpr", NULL,
53 NULL, NULL, "dca", NULL, NULL, NULL, NULL, "popcnt",
54 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
56 static const char *ext2_feature_name[] = {
57 "fpu", "vme", "de", "pse", "tsc", "msr", "pae", "mce",
58 "cx8" /* AMD CMPXCHG8B */, "apic", NULL, "syscall", "mtrr", "pge", "mca", "cmov",
59 "pat", "pse36", NULL, NULL /* Linux mp */, "nx" /* Intel xd */, NULL, "mmxext", "mmx",
60 "fxsr", "fxsr_opt" /* AMD ffxsr */, "pdpe1gb" /* AMD Page1GB */, "rdtscp", NULL, "lm" /* Intel 64 */, "3dnowext", "3dnow",
62 static const char *ext3_feature_name[] = {
63 "lahf_lm" /* AMD LahfSahf */, "cmp_legacy", "svm", "extapic" /* AMD ExtApicSpace */, "cr8legacy" /* AMD AltMovCr8 */, "abm", "sse4a", "misalignsse",
64 "3dnowprefetch", "osvw", NULL /* Linux ibs */, NULL, "skinit", "wdt", NULL, NULL,
65 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
66 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
69 for ( i = 0 ; i < 32 ; i++ )
70 if (feature_name[i] && !strcmp (flagname, feature_name[i])) {
71 *features |= 1 << i;
72 return;
74 for ( i = 0 ; i < 32 ; i++ )
75 if (ext_feature_name[i] && !strcmp (flagname, ext_feature_name[i])) {
76 *ext_features |= 1 << i;
77 return;
79 for ( i = 0 ; i < 32 ; i++ )
80 if (ext2_feature_name[i] && !strcmp (flagname, ext2_feature_name[i])) {
81 *ext2_features |= 1 << i;
82 return;
84 for ( i = 0 ; i < 32 ; i++ )
85 if (ext3_feature_name[i] && !strcmp (flagname, ext3_feature_name[i])) {
86 *ext3_features |= 1 << i;
87 return;
89 fprintf(stderr, "CPU feature %s not found\n", flagname);
92 typedef struct x86_def_t {
93 const char *name;
94 uint32_t level;
95 uint32_t vendor1, vendor2, vendor3;
96 int family;
97 int model;
98 int stepping;
99 uint32_t features, ext_features, ext2_features, ext3_features;
100 uint32_t xlevel;
101 char model_id[48];
102 } x86_def_t;
104 #define I486_FEATURES (CPUID_FP87 | CPUID_VME | CPUID_PSE)
105 #define PENTIUM_FEATURES (I486_FEATURES | CPUID_DE | CPUID_TSC | \
106 CPUID_MSR | CPUID_MCE | CPUID_CX8 | CPUID_MMX)
107 #define PENTIUM2_FEATURES (PENTIUM_FEATURES | CPUID_PAE | CPUID_SEP | \
108 CPUID_MTRR | CPUID_PGE | CPUID_MCA | CPUID_CMOV | CPUID_PAT | \
109 CPUID_PSE36 | CPUID_FXSR)
110 #define PENTIUM3_FEATURES (PENTIUM2_FEATURES | CPUID_SSE)
111 #define PPRO_FEATURES (CPUID_FP87 | CPUID_DE | CPUID_PSE | CPUID_TSC | \
112 CPUID_MSR | CPUID_MCE | CPUID_CX8 | CPUID_PGE | CPUID_CMOV | \
113 CPUID_PAT | CPUID_FXSR | CPUID_MMX | CPUID_SSE | CPUID_SSE2 | \
114 CPUID_PAE | CPUID_SEP | CPUID_APIC)
115 static x86_def_t x86_defs[] = {
116 #ifdef TARGET_X86_64
118 .name = "qemu64",
119 .level = 2,
120 .vendor1 = CPUID_VENDOR_AMD_1,
121 .vendor2 = CPUID_VENDOR_AMD_2,
122 .vendor3 = CPUID_VENDOR_AMD_3,
123 .family = 6,
124 .model = 2,
125 .stepping = 3,
126 .features = PPRO_FEATURES |
127 /* these features are needed for Win64 and aren't fully implemented */
128 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA |
129 /* this feature is needed for Solaris and isn't fully implemented */
130 CPUID_PSE36,
131 .ext_features = CPUID_EXT_SSE3,
132 .ext2_features = (PPRO_FEATURES & 0x0183F3FF) |
133 CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX |
134 CPUID_EXT2_3DNOW | CPUID_EXT2_3DNOWEXT,
135 .ext3_features = CPUID_EXT3_SVM,
136 .xlevel = 0x8000000A,
137 .model_id = "QEMU Virtual CPU version " QEMU_VERSION,
140 .name = "phenom",
141 .level = 5,
142 .vendor1 = CPUID_VENDOR_AMD_1,
143 .vendor2 = CPUID_VENDOR_AMD_2,
144 .vendor3 = CPUID_VENDOR_AMD_3,
145 .family = 16,
146 .model = 2,
147 .stepping = 3,
148 /* Missing: CPUID_VME, CPUID_HT */
149 .features = PPRO_FEATURES |
150 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA |
151 CPUID_PSE36,
152 /* Missing: CPUID_EXT_CX16, CPUID_EXT_POPCNT */
153 .ext_features = CPUID_EXT_SSE3 | CPUID_EXT_MONITOR,
154 /* Missing: CPUID_EXT2_PDPE1GB, CPUID_EXT2_RDTSCP */
155 .ext2_features = (PPRO_FEATURES & 0x0183F3FF) |
156 CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX |
157 CPUID_EXT2_3DNOW | CPUID_EXT2_3DNOWEXT | CPUID_EXT2_MMXEXT |
158 CPUID_EXT2_FFXSR,
159 /* Missing: CPUID_EXT3_LAHF_LM, CPUID_EXT3_CMP_LEG, CPUID_EXT3_EXTAPIC,
160 CPUID_EXT3_CR8LEG, CPUID_EXT3_ABM, CPUID_EXT3_SSE4A,
161 CPUID_EXT3_MISALIGNSSE, CPUID_EXT3_3DNOWPREFETCH,
162 CPUID_EXT3_OSVW, CPUID_EXT3_IBS */
163 .ext3_features = CPUID_EXT3_SVM,
164 .xlevel = 0x8000001A,
165 .model_id = "AMD Phenom(tm) 9550 Quad-Core Processor"
168 .name = "core2duo",
169 .level = 10,
170 .family = 6,
171 .model = 15,
172 .stepping = 11,
173 /* The original CPU also implements these features:
174 CPUID_VME, CPUID_DTS, CPUID_ACPI, CPUID_SS, CPUID_HT,
175 CPUID_TM, CPUID_PBE */
176 .features = PPRO_FEATURES |
177 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA |
178 CPUID_PSE36,
179 /* The original CPU also implements these ext features:
180 CPUID_EXT_DTES64, CPUID_EXT_DSCPL, CPUID_EXT_VMX, CPUID_EXT_EST,
181 CPUID_EXT_TM2, CPUID_EXT_CX16, CPUID_EXT_XTPR, CPUID_EXT_PDCM */
182 .ext_features = CPUID_EXT_SSE3 | CPUID_EXT_MONITOR | CPUID_EXT_SSSE3,
183 .ext2_features = CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX,
184 /* Missing: .ext3_features = CPUID_EXT3_LAHF_LM */
185 .xlevel = 0x80000008,
186 .model_id = "Intel(R) Core(TM)2 Duo CPU T7700 @ 2.40GHz",
188 #endif
190 .name = "qemu32",
191 .level = 2,
192 .family = 6,
193 .model = 3,
194 .stepping = 3,
195 .features = PPRO_FEATURES,
196 .ext_features = CPUID_EXT_SSE3,
197 .xlevel = 0,
198 .model_id = "QEMU Virtual CPU version " QEMU_VERSION,
201 .name = "coreduo",
202 .level = 10,
203 .family = 6,
204 .model = 14,
205 .stepping = 8,
206 /* The original CPU also implements these features:
207 CPUID_DTS, CPUID_ACPI, CPUID_SS, CPUID_HT,
208 CPUID_TM, CPUID_PBE */
209 .features = PPRO_FEATURES | CPUID_VME |
210 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA,
211 /* The original CPU also implements these ext features:
212 CPUID_EXT_VMX, CPUID_EXT_EST, CPUID_EXT_TM2, CPUID_EXT_XTPR,
213 CPUID_EXT_PDCM */
214 .ext_features = CPUID_EXT_SSE3 | CPUID_EXT_MONITOR,
215 .ext2_features = CPUID_EXT2_NX,
216 .xlevel = 0x80000008,
217 .model_id = "Genuine Intel(R) CPU T2600 @ 2.16GHz",
220 .name = "486",
221 .level = 0,
222 .family = 4,
223 .model = 0,
224 .stepping = 0,
225 .features = I486_FEATURES,
226 .xlevel = 0,
229 .name = "pentium",
230 .level = 1,
231 .family = 5,
232 .model = 4,
233 .stepping = 3,
234 .features = PENTIUM_FEATURES,
235 .xlevel = 0,
238 .name = "pentium2",
239 .level = 2,
240 .family = 6,
241 .model = 5,
242 .stepping = 2,
243 .features = PENTIUM2_FEATURES,
244 .xlevel = 0,
247 .name = "pentium3",
248 .level = 2,
249 .family = 6,
250 .model = 7,
251 .stepping = 3,
252 .features = PENTIUM3_FEATURES,
253 .xlevel = 0,
256 .name = "athlon",
257 .level = 2,
258 .vendor1 = 0x68747541, /* "Auth" */
259 .vendor2 = 0x69746e65, /* "enti" */
260 .vendor3 = 0x444d4163, /* "cAMD" */
261 .family = 6,
262 .model = 2,
263 .stepping = 3,
264 .features = PPRO_FEATURES | CPUID_PSE36 | CPUID_VME | CPUID_MTRR | CPUID_MCA,
265 .ext2_features = (PPRO_FEATURES & 0x0183F3FF) | CPUID_EXT2_MMXEXT | CPUID_EXT2_3DNOW | CPUID_EXT2_3DNOWEXT,
266 .xlevel = 0x80000008,
267 /* XXX: put another string ? */
268 .model_id = "QEMU Virtual CPU version " QEMU_VERSION,
271 .name = "n270",
272 /* original is on level 10 */
273 .level = 5,
274 .family = 6,
275 .model = 28,
276 .stepping = 2,
277 .features = PPRO_FEATURES |
278 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA | CPUID_VME,
279 /* Missing: CPUID_DTS | CPUID_ACPI | CPUID_SS |
280 * CPUID_HT | CPUID_TM | CPUID_PBE */
281 /* Some CPUs got no CPUID_SEP */
282 .ext_features = CPUID_EXT_MONITOR |
283 CPUID_EXT_SSE3 /* PNI */ | CPUID_EXT_SSSE3,
284 /* Missing: CPUID_EXT_DSCPL | CPUID_EXT_EST |
285 * CPUID_EXT_TM2 | CPUID_EXT_XTPR */
286 .ext2_features = (PPRO_FEATURES & 0x0183F3FF) | CPUID_EXT2_NX,
287 /* Missing: .ext3_features = CPUID_EXT3_LAHF_LM */
288 .xlevel = 0x8000000A,
289 .model_id = "Intel(R) Atom(TM) CPU N270 @ 1.60GHz",
293 static int cpu_x86_find_by_name(x86_def_t *x86_cpu_def, const char *cpu_model)
295 unsigned int i;
296 x86_def_t *def;
298 char *s = strdup(cpu_model);
299 char *featurestr, *name = strtok(s, ",");
300 uint32_t plus_features = 0, plus_ext_features = 0, plus_ext2_features = 0, plus_ext3_features = 0;
301 uint32_t minus_features = 0, minus_ext_features = 0, minus_ext2_features = 0, minus_ext3_features = 0;
302 int family = -1, model = -1, stepping = -1;
304 def = NULL;
305 for (i = 0; i < ARRAY_SIZE(x86_defs); i++) {
306 if (strcmp(name, x86_defs[i].name) == 0) {
307 def = &x86_defs[i];
308 break;
311 if (!def)
312 goto error;
313 memcpy(x86_cpu_def, def, sizeof(*def));
315 featurestr = strtok(NULL, ",");
317 while (featurestr) {
318 char *val;
319 if (featurestr[0] == '+') {
320 add_flagname_to_bitmaps(featurestr + 1, &plus_features, &plus_ext_features, &plus_ext2_features, &plus_ext3_features);
321 } else if (featurestr[0] == '-') {
322 add_flagname_to_bitmaps(featurestr + 1, &minus_features, &minus_ext_features, &minus_ext2_features, &minus_ext3_features);
323 } else if ((val = strchr(featurestr, '='))) {
324 *val = 0; val++;
325 if (!strcmp(featurestr, "family")) {
326 char *err;
327 family = strtol(val, &err, 10);
328 if (!*val || *err || family < 0) {
329 fprintf(stderr, "bad numerical value %s\n", val);
330 goto error;
332 x86_cpu_def->family = family;
333 } else if (!strcmp(featurestr, "model")) {
334 char *err;
335 model = strtol(val, &err, 10);
336 if (!*val || *err || model < 0 || model > 0xff) {
337 fprintf(stderr, "bad numerical value %s\n", val);
338 goto error;
340 x86_cpu_def->model = model;
341 } else if (!strcmp(featurestr, "stepping")) {
342 char *err;
343 stepping = strtol(val, &err, 10);
344 if (!*val || *err || stepping < 0 || stepping > 0xf) {
345 fprintf(stderr, "bad numerical value %s\n", val);
346 goto error;
348 x86_cpu_def->stepping = stepping;
349 } else if (!strcmp(featurestr, "vendor")) {
350 if (strlen(val) != 12) {
351 fprintf(stderr, "vendor string must be 12 chars long\n");
352 goto error;
354 x86_cpu_def->vendor1 = 0;
355 x86_cpu_def->vendor2 = 0;
356 x86_cpu_def->vendor3 = 0;
357 for(i = 0; i < 4; i++) {
358 x86_cpu_def->vendor1 |= ((uint8_t)val[i ]) << (8 * i);
359 x86_cpu_def->vendor2 |= ((uint8_t)val[i + 4]) << (8 * i);
360 x86_cpu_def->vendor3 |= ((uint8_t)val[i + 8]) << (8 * i);
362 } else if (!strcmp(featurestr, "model_id")) {
363 pstrcpy(x86_cpu_def->model_id, sizeof(x86_cpu_def->model_id),
364 val);
365 } else {
366 fprintf(stderr, "unrecognized feature %s\n", featurestr);
367 goto error;
369 } else {
370 fprintf(stderr, "feature string `%s' not in format (+feature|-feature|feature=xyz)\n", featurestr);
371 goto error;
373 featurestr = strtok(NULL, ",");
375 x86_cpu_def->features |= plus_features;
376 x86_cpu_def->ext_features |= plus_ext_features;
377 x86_cpu_def->ext2_features |= plus_ext2_features;
378 x86_cpu_def->ext3_features |= plus_ext3_features;
379 x86_cpu_def->features &= ~minus_features;
380 x86_cpu_def->ext_features &= ~minus_ext_features;
381 x86_cpu_def->ext2_features &= ~minus_ext2_features;
382 x86_cpu_def->ext3_features &= ~minus_ext3_features;
383 free(s);
384 return 0;
386 error:
387 free(s);
388 return -1;
391 void x86_cpu_list (FILE *f, int (*cpu_fprintf)(FILE *f, const char *fmt, ...))
393 unsigned int i;
395 for (i = 0; i < ARRAY_SIZE(x86_defs); i++)
396 (*cpu_fprintf)(f, "x86 %16s\n", x86_defs[i].name);
399 static int cpu_x86_register (CPUX86State *env, const char *cpu_model)
401 x86_def_t def1, *def = &def1;
403 if (cpu_x86_find_by_name(def, cpu_model) < 0)
404 return -1;
405 if (def->vendor1) {
406 env->cpuid_vendor1 = def->vendor1;
407 env->cpuid_vendor2 = def->vendor2;
408 env->cpuid_vendor3 = def->vendor3;
409 } else {
410 env->cpuid_vendor1 = CPUID_VENDOR_INTEL_1;
411 env->cpuid_vendor2 = CPUID_VENDOR_INTEL_2;
412 env->cpuid_vendor3 = CPUID_VENDOR_INTEL_3;
414 env->cpuid_level = def->level;
415 if (def->family > 0x0f)
416 env->cpuid_version = 0xf00 | ((def->family - 0x0f) << 20);
417 else
418 env->cpuid_version = def->family << 8;
419 env->cpuid_version |= ((def->model & 0xf) << 4) | ((def->model >> 4) << 16);
420 env->cpuid_version |= def->stepping;
421 env->cpuid_features = def->features;
422 env->pat = 0x0007040600070406ULL;
423 env->cpuid_ext_features = def->ext_features;
424 env->cpuid_ext2_features = def->ext2_features;
425 env->cpuid_xlevel = def->xlevel;
426 env->cpuid_ext3_features = def->ext3_features;
428 const char *model_id = def->model_id;
429 int c, len, i;
430 if (!model_id)
431 model_id = "";
432 len = strlen(model_id);
433 for(i = 0; i < 48; i++) {
434 if (i >= len)
435 c = '\0';
436 else
437 c = (uint8_t)model_id[i];
438 env->cpuid_model[i >> 2] |= c << (8 * (i & 3));
441 return 0;
444 /* NOTE: must be called outside the CPU execute loop */
445 void cpu_reset(CPUX86State *env)
447 int i;
449 if (qemu_loglevel_mask(CPU_LOG_RESET)) {
450 qemu_log("CPU Reset (CPU %d)\n", env->cpu_index);
451 log_cpu_state(env, X86_DUMP_FPU | X86_DUMP_CCOP);
454 memset(env, 0, offsetof(CPUX86State, breakpoints));
456 tlb_flush(env, 1);
458 env->old_exception = -1;
460 /* init to reset state */
462 #ifdef CONFIG_SOFTMMU
463 env->hflags |= HF_SOFTMMU_MASK;
464 #endif
465 env->hflags2 |= HF2_GIF_MASK;
467 cpu_x86_update_cr0(env, 0x60000010);
468 env->a20_mask = ~0x0;
469 env->smbase = 0x30000;
471 env->idt.limit = 0xffff;
472 env->gdt.limit = 0xffff;
473 env->ldt.limit = 0xffff;
474 env->ldt.flags = DESC_P_MASK | (2 << DESC_TYPE_SHIFT);
475 env->tr.limit = 0xffff;
476 env->tr.flags = DESC_P_MASK | (11 << DESC_TYPE_SHIFT);
478 cpu_x86_load_seg_cache(env, R_CS, 0xf000, 0xffff0000, 0xffff,
479 DESC_P_MASK | DESC_S_MASK | DESC_CS_MASK | DESC_R_MASK);
480 cpu_x86_load_seg_cache(env, R_DS, 0, 0, 0xffff,
481 DESC_P_MASK | DESC_S_MASK | DESC_W_MASK);
482 cpu_x86_load_seg_cache(env, R_ES, 0, 0, 0xffff,
483 DESC_P_MASK | DESC_S_MASK | DESC_W_MASK);
484 cpu_x86_load_seg_cache(env, R_SS, 0, 0, 0xffff,
485 DESC_P_MASK | DESC_S_MASK | DESC_W_MASK);
486 cpu_x86_load_seg_cache(env, R_FS, 0, 0, 0xffff,
487 DESC_P_MASK | DESC_S_MASK | DESC_W_MASK);
488 cpu_x86_load_seg_cache(env, R_GS, 0, 0, 0xffff,
489 DESC_P_MASK | DESC_S_MASK | DESC_W_MASK);
491 env->eip = 0xfff0;
492 env->regs[R_EDX] = env->cpuid_version;
494 env->eflags = 0x2;
496 /* FPU init */
497 for(i = 0;i < 8; i++)
498 env->fptags[i] = 1;
499 env->fpuc = 0x37f;
501 env->mxcsr = 0x1f80;
503 memset(env->dr, 0, sizeof(env->dr));
504 env->dr[6] = DR6_FIXED_1;
505 env->dr[7] = DR7_FIXED_1;
506 cpu_breakpoint_remove_all(env, BP_CPU);
507 cpu_watchpoint_remove_all(env, BP_CPU);
510 void cpu_x86_close(CPUX86State *env)
512 qemu_free(env);
515 /***********************************************************/
516 /* x86 debug */
518 static const char *cc_op_str[] = {
519 "DYNAMIC",
520 "EFLAGS",
522 "MULB",
523 "MULW",
524 "MULL",
525 "MULQ",
527 "ADDB",
528 "ADDW",
529 "ADDL",
530 "ADDQ",
532 "ADCB",
533 "ADCW",
534 "ADCL",
535 "ADCQ",
537 "SUBB",
538 "SUBW",
539 "SUBL",
540 "SUBQ",
542 "SBBB",
543 "SBBW",
544 "SBBL",
545 "SBBQ",
547 "LOGICB",
548 "LOGICW",
549 "LOGICL",
550 "LOGICQ",
552 "INCB",
553 "INCW",
554 "INCL",
555 "INCQ",
557 "DECB",
558 "DECW",
559 "DECL",
560 "DECQ",
562 "SHLB",
563 "SHLW",
564 "SHLL",
565 "SHLQ",
567 "SARB",
568 "SARW",
569 "SARL",
570 "SARQ",
573 void cpu_dump_state(CPUState *env, FILE *f,
574 int (*cpu_fprintf)(FILE *f, const char *fmt, ...),
575 int flags)
577 int eflags, i, nb;
578 char cc_op_name[32];
579 static const char *seg_name[6] = { "ES", "CS", "SS", "DS", "FS", "GS" };
581 eflags = env->eflags;
582 #ifdef TARGET_X86_64
583 if (env->hflags & HF_CS64_MASK) {
584 cpu_fprintf(f,
585 "RAX=%016" PRIx64 " RBX=%016" PRIx64 " RCX=%016" PRIx64 " RDX=%016" PRIx64 "\n"
586 "RSI=%016" PRIx64 " RDI=%016" PRIx64 " RBP=%016" PRIx64 " RSP=%016" PRIx64 "\n"
587 "R8 =%016" PRIx64 " R9 =%016" PRIx64 " R10=%016" PRIx64 " R11=%016" PRIx64 "\n"
588 "R12=%016" PRIx64 " R13=%016" PRIx64 " R14=%016" PRIx64 " R15=%016" PRIx64 "\n"
589 "RIP=%016" PRIx64 " RFL=%08x [%c%c%c%c%c%c%c] CPL=%d II=%d A20=%d SMM=%d HLT=%d\n",
590 env->regs[R_EAX],
591 env->regs[R_EBX],
592 env->regs[R_ECX],
593 env->regs[R_EDX],
594 env->regs[R_ESI],
595 env->regs[R_EDI],
596 env->regs[R_EBP],
597 env->regs[R_ESP],
598 env->regs[8],
599 env->regs[9],
600 env->regs[10],
601 env->regs[11],
602 env->regs[12],
603 env->regs[13],
604 env->regs[14],
605 env->regs[15],
606 env->eip, eflags,
607 eflags & DF_MASK ? 'D' : '-',
608 eflags & CC_O ? 'O' : '-',
609 eflags & CC_S ? 'S' : '-',
610 eflags & CC_Z ? 'Z' : '-',
611 eflags & CC_A ? 'A' : '-',
612 eflags & CC_P ? 'P' : '-',
613 eflags & CC_C ? 'C' : '-',
614 env->hflags & HF_CPL_MASK,
615 (env->hflags >> HF_INHIBIT_IRQ_SHIFT) & 1,
616 (int)(env->a20_mask >> 20) & 1,
617 (env->hflags >> HF_SMM_SHIFT) & 1,
618 env->halted);
619 } else
620 #endif
622 cpu_fprintf(f, "EAX=%08x EBX=%08x ECX=%08x EDX=%08x\n"
623 "ESI=%08x EDI=%08x EBP=%08x ESP=%08x\n"
624 "EIP=%08x EFL=%08x [%c%c%c%c%c%c%c] CPL=%d II=%d A20=%d SMM=%d HLT=%d\n",
625 (uint32_t)env->regs[R_EAX],
626 (uint32_t)env->regs[R_EBX],
627 (uint32_t)env->regs[R_ECX],
628 (uint32_t)env->regs[R_EDX],
629 (uint32_t)env->regs[R_ESI],
630 (uint32_t)env->regs[R_EDI],
631 (uint32_t)env->regs[R_EBP],
632 (uint32_t)env->regs[R_ESP],
633 (uint32_t)env->eip, eflags,
634 eflags & DF_MASK ? 'D' : '-',
635 eflags & CC_O ? 'O' : '-',
636 eflags & CC_S ? 'S' : '-',
637 eflags & CC_Z ? 'Z' : '-',
638 eflags & CC_A ? 'A' : '-',
639 eflags & CC_P ? 'P' : '-',
640 eflags & CC_C ? 'C' : '-',
641 env->hflags & HF_CPL_MASK,
642 (env->hflags >> HF_INHIBIT_IRQ_SHIFT) & 1,
643 (int)(env->a20_mask >> 20) & 1,
644 (env->hflags >> HF_SMM_SHIFT) & 1,
645 env->halted);
648 #ifdef TARGET_X86_64
649 if (env->hflags & HF_LMA_MASK) {
650 for(i = 0; i < 6; i++) {
651 SegmentCache *sc = &env->segs[i];
652 cpu_fprintf(f, "%s =%04x %016" PRIx64 " %08x %08x\n",
653 seg_name[i],
654 sc->selector,
655 sc->base,
656 sc->limit,
657 sc->flags);
659 cpu_fprintf(f, "LDT=%04x %016" PRIx64 " %08x %08x\n",
660 env->ldt.selector,
661 env->ldt.base,
662 env->ldt.limit,
663 env->ldt.flags);
664 cpu_fprintf(f, "TR =%04x %016" PRIx64 " %08x %08x\n",
665 env->tr.selector,
666 env->tr.base,
667 env->tr.limit,
668 env->tr.flags);
669 cpu_fprintf(f, "GDT= %016" PRIx64 " %08x\n",
670 env->gdt.base, env->gdt.limit);
671 cpu_fprintf(f, "IDT= %016" PRIx64 " %08x\n",
672 env->idt.base, env->idt.limit);
673 cpu_fprintf(f, "CR0=%08x CR2=%016" PRIx64 " CR3=%016" PRIx64 " CR4=%08x\n",
674 (uint32_t)env->cr[0],
675 env->cr[2],
676 env->cr[3],
677 (uint32_t)env->cr[4]);
678 for(i = 0; i < 4; i++)
679 cpu_fprintf(f, "DR%d=%016" PRIx64 " ", i, env->dr[i]);
680 cpu_fprintf(f, "\nDR6=%016" PRIx64 " DR7=%016" PRIx64 "\n",
681 env->dr[6], env->dr[7]);
682 } else
683 #endif
685 for(i = 0; i < 6; i++) {
686 SegmentCache *sc = &env->segs[i];
687 cpu_fprintf(f, "%s =%04x %08x %08x %08x\n",
688 seg_name[i],
689 sc->selector,
690 (uint32_t)sc->base,
691 sc->limit,
692 sc->flags);
694 cpu_fprintf(f, "LDT=%04x %08x %08x %08x\n",
695 env->ldt.selector,
696 (uint32_t)env->ldt.base,
697 env->ldt.limit,
698 env->ldt.flags);
699 cpu_fprintf(f, "TR =%04x %08x %08x %08x\n",
700 env->tr.selector,
701 (uint32_t)env->tr.base,
702 env->tr.limit,
703 env->tr.flags);
704 cpu_fprintf(f, "GDT= %08x %08x\n",
705 (uint32_t)env->gdt.base, env->gdt.limit);
706 cpu_fprintf(f, "IDT= %08x %08x\n",
707 (uint32_t)env->idt.base, env->idt.limit);
708 cpu_fprintf(f, "CR0=%08x CR2=%08x CR3=%08x CR4=%08x\n",
709 (uint32_t)env->cr[0],
710 (uint32_t)env->cr[2],
711 (uint32_t)env->cr[3],
712 (uint32_t)env->cr[4]);
713 for(i = 0; i < 4; i++)
714 cpu_fprintf(f, "DR%d=%08x ", i, env->dr[i]);
715 cpu_fprintf(f, "\nDR6=%08x DR7=%08x\n", env->dr[6], env->dr[7]);
717 if (flags & X86_DUMP_CCOP) {
718 if ((unsigned)env->cc_op < CC_OP_NB)
719 snprintf(cc_op_name, sizeof(cc_op_name), "%s", cc_op_str[env->cc_op]);
720 else
721 snprintf(cc_op_name, sizeof(cc_op_name), "[%d]", env->cc_op);
722 #ifdef TARGET_X86_64
723 if (env->hflags & HF_CS64_MASK) {
724 cpu_fprintf(f, "CCS=%016" PRIx64 " CCD=%016" PRIx64 " CCO=%-8s\n",
725 env->cc_src, env->cc_dst,
726 cc_op_name);
727 } else
728 #endif
730 cpu_fprintf(f, "CCS=%08x CCD=%08x CCO=%-8s\n",
731 (uint32_t)env->cc_src, (uint32_t)env->cc_dst,
732 cc_op_name);
735 if (flags & X86_DUMP_FPU) {
736 int fptag;
737 fptag = 0;
738 for(i = 0; i < 8; i++) {
739 fptag |= ((!env->fptags[i]) << i);
741 cpu_fprintf(f, "FCW=%04x FSW=%04x [ST=%d] FTW=%02x MXCSR=%08x\n",
742 env->fpuc,
743 (env->fpus & ~0x3800) | (env->fpstt & 0x7) << 11,
744 env->fpstt,
745 fptag,
746 env->mxcsr);
747 for(i=0;i<8;i++) {
748 #if defined(USE_X86LDOUBLE)
749 union {
750 long double d;
751 struct {
752 uint64_t lower;
753 uint16_t upper;
754 } l;
755 } tmp;
756 tmp.d = env->fpregs[i].d;
757 cpu_fprintf(f, "FPR%d=%016" PRIx64 " %04x",
758 i, tmp.l.lower, tmp.l.upper);
759 #else
760 cpu_fprintf(f, "FPR%d=%016" PRIx64,
761 i, env->fpregs[i].mmx.q);
762 #endif
763 if ((i & 1) == 1)
764 cpu_fprintf(f, "\n");
765 else
766 cpu_fprintf(f, " ");
768 if (env->hflags & HF_CS64_MASK)
769 nb = 16;
770 else
771 nb = 8;
772 for(i=0;i<nb;i++) {
773 cpu_fprintf(f, "XMM%02d=%08x%08x%08x%08x",
775 env->xmm_regs[i].XMM_L(3),
776 env->xmm_regs[i].XMM_L(2),
777 env->xmm_regs[i].XMM_L(1),
778 env->xmm_regs[i].XMM_L(0));
779 if ((i & 1) == 1)
780 cpu_fprintf(f, "\n");
781 else
782 cpu_fprintf(f, " ");
787 /***********************************************************/
788 /* x86 mmu */
789 /* XXX: add PGE support */
791 void cpu_x86_set_a20(CPUX86State *env, int a20_state)
793 a20_state = (a20_state != 0);
794 if (a20_state != ((env->a20_mask >> 20) & 1)) {
795 #if defined(DEBUG_MMU)
796 printf("A20 update: a20=%d\n", a20_state);
797 #endif
798 /* if the cpu is currently executing code, we must unlink it and
799 all the potentially executing TB */
800 cpu_interrupt(env, CPU_INTERRUPT_EXITTB);
802 /* when a20 is changed, all the MMU mappings are invalid, so
803 we must flush everything */
804 tlb_flush(env, 1);
805 env->a20_mask = (~0x100000) | (a20_state << 20);
809 void cpu_x86_update_cr0(CPUX86State *env, uint32_t new_cr0)
811 int pe_state;
813 #if defined(DEBUG_MMU)
814 printf("CR0 update: CR0=0x%08x\n", new_cr0);
815 #endif
816 if ((new_cr0 & (CR0_PG_MASK | CR0_WP_MASK | CR0_PE_MASK)) !=
817 (env->cr[0] & (CR0_PG_MASK | CR0_WP_MASK | CR0_PE_MASK))) {
818 tlb_flush(env, 1);
821 #ifdef TARGET_X86_64
822 if (!(env->cr[0] & CR0_PG_MASK) && (new_cr0 & CR0_PG_MASK) &&
823 (env->efer & MSR_EFER_LME)) {
824 /* enter in long mode */
825 /* XXX: generate an exception */
826 if (!(env->cr[4] & CR4_PAE_MASK))
827 return;
828 env->efer |= MSR_EFER_LMA;
829 env->hflags |= HF_LMA_MASK;
830 } else if ((env->cr[0] & CR0_PG_MASK) && !(new_cr0 & CR0_PG_MASK) &&
831 (env->efer & MSR_EFER_LMA)) {
832 /* exit long mode */
833 env->efer &= ~MSR_EFER_LMA;
834 env->hflags &= ~(HF_LMA_MASK | HF_CS64_MASK);
835 env->eip &= 0xffffffff;
837 #endif
838 env->cr[0] = new_cr0 | CR0_ET_MASK;
840 /* update PE flag in hidden flags */
841 pe_state = (env->cr[0] & CR0_PE_MASK);
842 env->hflags = (env->hflags & ~HF_PE_MASK) | (pe_state << HF_PE_SHIFT);
843 /* ensure that ADDSEG is always set in real mode */
844 env->hflags |= ((pe_state ^ 1) << HF_ADDSEG_SHIFT);
845 /* update FPU flags */
846 env->hflags = (env->hflags & ~(HF_MP_MASK | HF_EM_MASK | HF_TS_MASK)) |
847 ((new_cr0 << (HF_MP_SHIFT - 1)) & (HF_MP_MASK | HF_EM_MASK | HF_TS_MASK));
850 /* XXX: in legacy PAE mode, generate a GPF if reserved bits are set in
851 the PDPT */
852 void cpu_x86_update_cr3(CPUX86State *env, target_ulong new_cr3)
854 env->cr[3] = new_cr3;
855 if (env->cr[0] & CR0_PG_MASK) {
856 #if defined(DEBUG_MMU)
857 printf("CR3 update: CR3=" TARGET_FMT_lx "\n", new_cr3);
858 #endif
859 tlb_flush(env, 0);
863 void cpu_x86_update_cr4(CPUX86State *env, uint32_t new_cr4)
865 #if defined(DEBUG_MMU)
866 printf("CR4 update: CR4=%08x\n", (uint32_t)env->cr[4]);
867 #endif
868 if ((new_cr4 & (CR4_PGE_MASK | CR4_PAE_MASK | CR4_PSE_MASK)) !=
869 (env->cr[4] & (CR4_PGE_MASK | CR4_PAE_MASK | CR4_PSE_MASK))) {
870 tlb_flush(env, 1);
872 /* SSE handling */
873 if (!(env->cpuid_features & CPUID_SSE))
874 new_cr4 &= ~CR4_OSFXSR_MASK;
875 if (new_cr4 & CR4_OSFXSR_MASK)
876 env->hflags |= HF_OSFXSR_MASK;
877 else
878 env->hflags &= ~HF_OSFXSR_MASK;
880 env->cr[4] = new_cr4;
883 #if defined(CONFIG_USER_ONLY)
885 int cpu_x86_handle_mmu_fault(CPUX86State *env, target_ulong addr,
886 int is_write, int mmu_idx, int is_softmmu)
888 /* user mode only emulation */
889 is_write &= 1;
890 env->cr[2] = addr;
891 env->error_code = (is_write << PG_ERROR_W_BIT);
892 env->error_code |= PG_ERROR_U_MASK;
893 env->exception_index = EXCP0E_PAGE;
894 return 1;
897 target_phys_addr_t cpu_get_phys_page_debug(CPUState *env, target_ulong addr)
899 return addr;
902 #else
904 /* XXX: This value should match the one returned by CPUID
905 * and in exec.c */
906 #if defined(USE_KQEMU)
907 #define PHYS_ADDR_MASK 0xfffff000LL
908 #else
909 # if defined(TARGET_X86_64)
910 # define PHYS_ADDR_MASK 0xfffffff000LL
911 # else
912 # define PHYS_ADDR_MASK 0xffffff000LL
913 # endif
914 #endif
916 /* return value:
917 -1 = cannot handle fault
918 0 = nothing more to do
919 1 = generate PF fault
920 2 = soft MMU activation required for this block
922 int cpu_x86_handle_mmu_fault(CPUX86State *env, target_ulong addr,
923 int is_write1, int mmu_idx, int is_softmmu)
925 uint64_t ptep, pte;
926 target_ulong pde_addr, pte_addr;
927 int error_code, is_dirty, prot, page_size, ret, is_write, is_user;
928 target_phys_addr_t paddr;
929 uint32_t page_offset;
930 target_ulong vaddr, virt_addr;
932 is_user = mmu_idx == MMU_USER_IDX;
933 #if defined(DEBUG_MMU)
934 printf("MMU fault: addr=" TARGET_FMT_lx " w=%d u=%d eip=" TARGET_FMT_lx "\n",
935 addr, is_write1, is_user, env->eip);
936 #endif
937 is_write = is_write1 & 1;
939 if (!(env->cr[0] & CR0_PG_MASK)) {
940 pte = addr;
941 virt_addr = addr & TARGET_PAGE_MASK;
942 prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC;
943 page_size = 4096;
944 goto do_mapping;
947 if (env->cr[4] & CR4_PAE_MASK) {
948 uint64_t pde, pdpe;
949 target_ulong pdpe_addr;
951 #ifdef TARGET_X86_64
952 if (env->hflags & HF_LMA_MASK) {
953 uint64_t pml4e_addr, pml4e;
954 int32_t sext;
956 /* test virtual address sign extension */
957 sext = (int64_t)addr >> 47;
958 if (sext != 0 && sext != -1) {
959 env->error_code = 0;
960 env->exception_index = EXCP0D_GPF;
961 return 1;
964 pml4e_addr = ((env->cr[3] & ~0xfff) + (((addr >> 39) & 0x1ff) << 3)) &
965 env->a20_mask;
966 pml4e = ldq_phys(pml4e_addr);
967 if (!(pml4e & PG_PRESENT_MASK)) {
968 error_code = 0;
969 goto do_fault;
971 if (!(env->efer & MSR_EFER_NXE) && (pml4e & PG_NX_MASK)) {
972 error_code = PG_ERROR_RSVD_MASK;
973 goto do_fault;
975 if (!(pml4e & PG_ACCESSED_MASK)) {
976 pml4e |= PG_ACCESSED_MASK;
977 stl_phys_notdirty(pml4e_addr, pml4e);
979 ptep = pml4e ^ PG_NX_MASK;
980 pdpe_addr = ((pml4e & PHYS_ADDR_MASK) + (((addr >> 30) & 0x1ff) << 3)) &
981 env->a20_mask;
982 pdpe = ldq_phys(pdpe_addr);
983 if (!(pdpe & PG_PRESENT_MASK)) {
984 error_code = 0;
985 goto do_fault;
987 if (!(env->efer & MSR_EFER_NXE) && (pdpe & PG_NX_MASK)) {
988 error_code = PG_ERROR_RSVD_MASK;
989 goto do_fault;
991 ptep &= pdpe ^ PG_NX_MASK;
992 if (!(pdpe & PG_ACCESSED_MASK)) {
993 pdpe |= PG_ACCESSED_MASK;
994 stl_phys_notdirty(pdpe_addr, pdpe);
996 } else
997 #endif
999 /* XXX: load them when cr3 is loaded ? */
1000 pdpe_addr = ((env->cr[3] & ~0x1f) + ((addr >> 27) & 0x18)) &
1001 env->a20_mask;
1002 pdpe = ldq_phys(pdpe_addr);
1003 if (!(pdpe & PG_PRESENT_MASK)) {
1004 error_code = 0;
1005 goto do_fault;
1007 ptep = PG_NX_MASK | PG_USER_MASK | PG_RW_MASK;
1010 pde_addr = ((pdpe & PHYS_ADDR_MASK) + (((addr >> 21) & 0x1ff) << 3)) &
1011 env->a20_mask;
1012 pde = ldq_phys(pde_addr);
1013 if (!(pde & PG_PRESENT_MASK)) {
1014 error_code = 0;
1015 goto do_fault;
1017 if (!(env->efer & MSR_EFER_NXE) && (pde & PG_NX_MASK)) {
1018 error_code = PG_ERROR_RSVD_MASK;
1019 goto do_fault;
1021 ptep &= pde ^ PG_NX_MASK;
1022 if (pde & PG_PSE_MASK) {
1023 /* 2 MB page */
1024 page_size = 2048 * 1024;
1025 ptep ^= PG_NX_MASK;
1026 if ((ptep & PG_NX_MASK) && is_write1 == 2)
1027 goto do_fault_protect;
1028 if (is_user) {
1029 if (!(ptep & PG_USER_MASK))
1030 goto do_fault_protect;
1031 if (is_write && !(ptep & PG_RW_MASK))
1032 goto do_fault_protect;
1033 } else {
1034 if ((env->cr[0] & CR0_WP_MASK) &&
1035 is_write && !(ptep & PG_RW_MASK))
1036 goto do_fault_protect;
1038 is_dirty = is_write && !(pde & PG_DIRTY_MASK);
1039 if (!(pde & PG_ACCESSED_MASK) || is_dirty) {
1040 pde |= PG_ACCESSED_MASK;
1041 if (is_dirty)
1042 pde |= PG_DIRTY_MASK;
1043 stl_phys_notdirty(pde_addr, pde);
1045 /* align to page_size */
1046 pte = pde & ((PHYS_ADDR_MASK & ~(page_size - 1)) | 0xfff);
1047 virt_addr = addr & ~(page_size - 1);
1048 } else {
1049 /* 4 KB page */
1050 if (!(pde & PG_ACCESSED_MASK)) {
1051 pde |= PG_ACCESSED_MASK;
1052 stl_phys_notdirty(pde_addr, pde);
1054 pte_addr = ((pde & PHYS_ADDR_MASK) + (((addr >> 12) & 0x1ff) << 3)) &
1055 env->a20_mask;
1056 pte = ldq_phys(pte_addr);
1057 if (!(pte & PG_PRESENT_MASK)) {
1058 error_code = 0;
1059 goto do_fault;
1061 if (!(env->efer & MSR_EFER_NXE) && (pte & PG_NX_MASK)) {
1062 error_code = PG_ERROR_RSVD_MASK;
1063 goto do_fault;
1065 /* combine pde and pte nx, user and rw protections */
1066 ptep &= pte ^ PG_NX_MASK;
1067 ptep ^= PG_NX_MASK;
1068 if ((ptep & PG_NX_MASK) && is_write1 == 2)
1069 goto do_fault_protect;
1070 if (is_user) {
1071 if (!(ptep & PG_USER_MASK))
1072 goto do_fault_protect;
1073 if (is_write && !(ptep & PG_RW_MASK))
1074 goto do_fault_protect;
1075 } else {
1076 if ((env->cr[0] & CR0_WP_MASK) &&
1077 is_write && !(ptep & PG_RW_MASK))
1078 goto do_fault_protect;
1080 is_dirty = is_write && !(pte & PG_DIRTY_MASK);
1081 if (!(pte & PG_ACCESSED_MASK) || is_dirty) {
1082 pte |= PG_ACCESSED_MASK;
1083 if (is_dirty)
1084 pte |= PG_DIRTY_MASK;
1085 stl_phys_notdirty(pte_addr, pte);
1087 page_size = 4096;
1088 virt_addr = addr & ~0xfff;
1089 pte = pte & (PHYS_ADDR_MASK | 0xfff);
1091 } else {
1092 uint32_t pde;
1094 /* page directory entry */
1095 pde_addr = ((env->cr[3] & ~0xfff) + ((addr >> 20) & 0xffc)) &
1096 env->a20_mask;
1097 pde = ldl_phys(pde_addr);
1098 if (!(pde & PG_PRESENT_MASK)) {
1099 error_code = 0;
1100 goto do_fault;
1102 /* if PSE bit is set, then we use a 4MB page */
1103 if ((pde & PG_PSE_MASK) && (env->cr[4] & CR4_PSE_MASK)) {
1104 page_size = 4096 * 1024;
1105 if (is_user) {
1106 if (!(pde & PG_USER_MASK))
1107 goto do_fault_protect;
1108 if (is_write && !(pde & PG_RW_MASK))
1109 goto do_fault_protect;
1110 } else {
1111 if ((env->cr[0] & CR0_WP_MASK) &&
1112 is_write && !(pde & PG_RW_MASK))
1113 goto do_fault_protect;
1115 is_dirty = is_write && !(pde & PG_DIRTY_MASK);
1116 if (!(pde & PG_ACCESSED_MASK) || is_dirty) {
1117 pde |= PG_ACCESSED_MASK;
1118 if (is_dirty)
1119 pde |= PG_DIRTY_MASK;
1120 stl_phys_notdirty(pde_addr, pde);
1123 pte = pde & ~( (page_size - 1) & ~0xfff); /* align to page_size */
1124 ptep = pte;
1125 virt_addr = addr & ~(page_size - 1);
1126 } else {
1127 if (!(pde & PG_ACCESSED_MASK)) {
1128 pde |= PG_ACCESSED_MASK;
1129 stl_phys_notdirty(pde_addr, pde);
1132 /* page directory entry */
1133 pte_addr = ((pde & ~0xfff) + ((addr >> 10) & 0xffc)) &
1134 env->a20_mask;
1135 pte = ldl_phys(pte_addr);
1136 if (!(pte & PG_PRESENT_MASK)) {
1137 error_code = 0;
1138 goto do_fault;
1140 /* combine pde and pte user and rw protections */
1141 ptep = pte & pde;
1142 if (is_user) {
1143 if (!(ptep & PG_USER_MASK))
1144 goto do_fault_protect;
1145 if (is_write && !(ptep & PG_RW_MASK))
1146 goto do_fault_protect;
1147 } else {
1148 if ((env->cr[0] & CR0_WP_MASK) &&
1149 is_write && !(ptep & PG_RW_MASK))
1150 goto do_fault_protect;
1152 is_dirty = is_write && !(pte & PG_DIRTY_MASK);
1153 if (!(pte & PG_ACCESSED_MASK) || is_dirty) {
1154 pte |= PG_ACCESSED_MASK;
1155 if (is_dirty)
1156 pte |= PG_DIRTY_MASK;
1157 stl_phys_notdirty(pte_addr, pte);
1159 page_size = 4096;
1160 virt_addr = addr & ~0xfff;
1163 /* the page can be put in the TLB */
1164 prot = PAGE_READ;
1165 if (!(ptep & PG_NX_MASK))
1166 prot |= PAGE_EXEC;
1167 if (pte & PG_DIRTY_MASK) {
1168 /* only set write access if already dirty... otherwise wait
1169 for dirty access */
1170 if (is_user) {
1171 if (ptep & PG_RW_MASK)
1172 prot |= PAGE_WRITE;
1173 } else {
1174 if (!(env->cr[0] & CR0_WP_MASK) ||
1175 (ptep & PG_RW_MASK))
1176 prot |= PAGE_WRITE;
1179 do_mapping:
1180 pte = pte & env->a20_mask;
1182 /* Even if 4MB pages, we map only one 4KB page in the cache to
1183 avoid filling it too fast */
1184 page_offset = (addr & TARGET_PAGE_MASK) & (page_size - 1);
1185 paddr = (pte & TARGET_PAGE_MASK) + page_offset;
1186 vaddr = virt_addr + page_offset;
1188 ret = tlb_set_page_exec(env, vaddr, paddr, prot, mmu_idx, is_softmmu);
1189 return ret;
1190 do_fault_protect:
1191 error_code = PG_ERROR_P_MASK;
1192 do_fault:
1193 error_code |= (is_write << PG_ERROR_W_BIT);
1194 if (is_user)
1195 error_code |= PG_ERROR_U_MASK;
1196 if (is_write1 == 2 &&
1197 (env->efer & MSR_EFER_NXE) &&
1198 (env->cr[4] & CR4_PAE_MASK))
1199 error_code |= PG_ERROR_I_D_MASK;
1200 if (env->intercept_exceptions & (1 << EXCP0E_PAGE)) {
1201 /* cr2 is not modified in case of exceptions */
1202 stq_phys(env->vm_vmcb + offsetof(struct vmcb, control.exit_info_2),
1203 addr);
1204 } else {
1205 env->cr[2] = addr;
1207 env->error_code = error_code;
1208 env->exception_index = EXCP0E_PAGE;
1209 return 1;
1212 target_phys_addr_t cpu_get_phys_page_debug(CPUState *env, target_ulong addr)
1214 target_ulong pde_addr, pte_addr;
1215 uint64_t pte;
1216 target_phys_addr_t paddr;
1217 uint32_t page_offset;
1218 int page_size;
1220 if (env->cr[4] & CR4_PAE_MASK) {
1221 target_ulong pdpe_addr;
1222 uint64_t pde, pdpe;
1224 #ifdef TARGET_X86_64
1225 if (env->hflags & HF_LMA_MASK) {
1226 uint64_t pml4e_addr, pml4e;
1227 int32_t sext;
1229 /* test virtual address sign extension */
1230 sext = (int64_t)addr >> 47;
1231 if (sext != 0 && sext != -1)
1232 return -1;
1234 pml4e_addr = ((env->cr[3] & ~0xfff) + (((addr >> 39) & 0x1ff) << 3)) &
1235 env->a20_mask;
1236 pml4e = ldq_phys(pml4e_addr);
1237 if (!(pml4e & PG_PRESENT_MASK))
1238 return -1;
1240 pdpe_addr = ((pml4e & ~0xfff) + (((addr >> 30) & 0x1ff) << 3)) &
1241 env->a20_mask;
1242 pdpe = ldq_phys(pdpe_addr);
1243 if (!(pdpe & PG_PRESENT_MASK))
1244 return -1;
1245 } else
1246 #endif
1248 pdpe_addr = ((env->cr[3] & ~0x1f) + ((addr >> 27) & 0x18)) &
1249 env->a20_mask;
1250 pdpe = ldq_phys(pdpe_addr);
1251 if (!(pdpe & PG_PRESENT_MASK))
1252 return -1;
1255 pde_addr = ((pdpe & ~0xfff) + (((addr >> 21) & 0x1ff) << 3)) &
1256 env->a20_mask;
1257 pde = ldq_phys(pde_addr);
1258 if (!(pde & PG_PRESENT_MASK)) {
1259 return -1;
1261 if (pde & PG_PSE_MASK) {
1262 /* 2 MB page */
1263 page_size = 2048 * 1024;
1264 pte = pde & ~( (page_size - 1) & ~0xfff); /* align to page_size */
1265 } else {
1266 /* 4 KB page */
1267 pte_addr = ((pde & ~0xfff) + (((addr >> 12) & 0x1ff) << 3)) &
1268 env->a20_mask;
1269 page_size = 4096;
1270 pte = ldq_phys(pte_addr);
1272 if (!(pte & PG_PRESENT_MASK))
1273 return -1;
1274 } else {
1275 uint32_t pde;
1277 if (!(env->cr[0] & CR0_PG_MASK)) {
1278 pte = addr;
1279 page_size = 4096;
1280 } else {
1281 /* page directory entry */
1282 pde_addr = ((env->cr[3] & ~0xfff) + ((addr >> 20) & 0xffc)) & env->a20_mask;
1283 pde = ldl_phys(pde_addr);
1284 if (!(pde & PG_PRESENT_MASK))
1285 return -1;
1286 if ((pde & PG_PSE_MASK) && (env->cr[4] & CR4_PSE_MASK)) {
1287 pte = pde & ~0x003ff000; /* align to 4MB */
1288 page_size = 4096 * 1024;
1289 } else {
1290 /* page directory entry */
1291 pte_addr = ((pde & ~0xfff) + ((addr >> 10) & 0xffc)) & env->a20_mask;
1292 pte = ldl_phys(pte_addr);
1293 if (!(pte & PG_PRESENT_MASK))
1294 return -1;
1295 page_size = 4096;
1298 pte = pte & env->a20_mask;
1301 page_offset = (addr & TARGET_PAGE_MASK) & (page_size - 1);
1302 paddr = (pte & TARGET_PAGE_MASK) + page_offset;
1303 return paddr;
1306 void hw_breakpoint_insert(CPUState *env, int index)
1308 int type, err = 0;
1310 switch (hw_breakpoint_type(env->dr[7], index)) {
1311 case 0:
1312 if (hw_breakpoint_enabled(env->dr[7], index))
1313 err = cpu_breakpoint_insert(env, env->dr[index], BP_CPU,
1314 &env->cpu_breakpoint[index]);
1315 break;
1316 case 1:
1317 type = BP_CPU | BP_MEM_WRITE;
1318 goto insert_wp;
1319 case 2:
1320 /* No support for I/O watchpoints yet */
1321 break;
1322 case 3:
1323 type = BP_CPU | BP_MEM_ACCESS;
1324 insert_wp:
1325 err = cpu_watchpoint_insert(env, env->dr[index],
1326 hw_breakpoint_len(env->dr[7], index),
1327 type, &env->cpu_watchpoint[index]);
1328 break;
1330 if (err)
1331 env->cpu_breakpoint[index] = NULL;
1334 void hw_breakpoint_remove(CPUState *env, int index)
1336 if (!env->cpu_breakpoint[index])
1337 return;
1338 switch (hw_breakpoint_type(env->dr[7], index)) {
1339 case 0:
1340 if (hw_breakpoint_enabled(env->dr[7], index))
1341 cpu_breakpoint_remove_by_ref(env, env->cpu_breakpoint[index]);
1342 break;
1343 case 1:
1344 case 3:
1345 cpu_watchpoint_remove_by_ref(env, env->cpu_watchpoint[index]);
1346 break;
1347 case 2:
1348 /* No support for I/O watchpoints yet */
1349 break;
1353 int check_hw_breakpoints(CPUState *env, int force_dr6_update)
1355 target_ulong dr6;
1356 int reg, type;
1357 int hit_enabled = 0;
1359 dr6 = env->dr[6] & ~0xf;
1360 for (reg = 0; reg < 4; reg++) {
1361 type = hw_breakpoint_type(env->dr[7], reg);
1362 if ((type == 0 && env->dr[reg] == env->eip) ||
1363 ((type & 1) && env->cpu_watchpoint[reg] &&
1364 (env->cpu_watchpoint[reg]->flags & BP_WATCHPOINT_HIT))) {
1365 dr6 |= 1 << reg;
1366 if (hw_breakpoint_enabled(env->dr[7], reg))
1367 hit_enabled = 1;
1370 if (hit_enabled || force_dr6_update)
1371 env->dr[6] = dr6;
1372 return hit_enabled;
1375 static CPUDebugExcpHandler *prev_debug_excp_handler;
1377 void raise_exception(int exception_index);
1379 static void breakpoint_handler(CPUState *env)
1381 CPUBreakpoint *bp;
1383 if (env->watchpoint_hit) {
1384 if (env->watchpoint_hit->flags & BP_CPU) {
1385 env->watchpoint_hit = NULL;
1386 if (check_hw_breakpoints(env, 0))
1387 raise_exception(EXCP01_DB);
1388 else
1389 cpu_resume_from_signal(env, NULL);
1391 } else {
1392 TAILQ_FOREACH(bp, &env->breakpoints, entry)
1393 if (bp->pc == env->eip) {
1394 if (bp->flags & BP_CPU) {
1395 check_hw_breakpoints(env, 1);
1396 raise_exception(EXCP01_DB);
1398 break;
1401 if (prev_debug_excp_handler)
1402 prev_debug_excp_handler(env);
1404 #endif /* !CONFIG_USER_ONLY */
1406 static void host_cpuid(uint32_t function, uint32_t count,
1407 uint32_t *eax, uint32_t *ebx,
1408 uint32_t *ecx, uint32_t *edx)
1410 #if defined(CONFIG_KVM)
1411 uint32_t vec[4];
1413 #ifdef __x86_64__
1414 asm volatile("cpuid"
1415 : "=a"(vec[0]), "=b"(vec[1]),
1416 "=c"(vec[2]), "=d"(vec[3])
1417 : "0"(function), "c"(count) : "cc");
1418 #else
1419 asm volatile("pusha \n\t"
1420 "cpuid \n\t"
1421 "mov %%eax, 0(%1) \n\t"
1422 "mov %%ebx, 4(%1) \n\t"
1423 "mov %%ecx, 8(%1) \n\t"
1424 "mov %%edx, 12(%1) \n\t"
1425 "popa"
1426 : : "a"(function), "c"(count), "S"(vec)
1427 : "memory", "cc");
1428 #endif
1430 if (eax)
1431 *eax = vec[0];
1432 if (ebx)
1433 *ebx = vec[1];
1434 if (ecx)
1435 *ecx = vec[2];
1436 if (edx)
1437 *edx = vec[3];
1438 #endif
1441 void cpu_x86_cpuid(CPUX86State *env, uint32_t index, uint32_t count,
1442 uint32_t *eax, uint32_t *ebx,
1443 uint32_t *ecx, uint32_t *edx)
1445 /* test if maximum index reached */
1446 if (index & 0x80000000) {
1447 if (index > env->cpuid_xlevel)
1448 index = env->cpuid_level;
1449 } else {
1450 if (index > env->cpuid_level)
1451 index = env->cpuid_level;
1454 switch(index) {
1455 case 0:
1456 *eax = env->cpuid_level;
1457 *ebx = env->cpuid_vendor1;
1458 *edx = env->cpuid_vendor2;
1459 *ecx = env->cpuid_vendor3;
1461 /* sysenter isn't supported on compatibility mode on AMD. and syscall
1462 * isn't supported in compatibility mode on Intel. so advertise the
1463 * actuall cpu, and say goodbye to migration between different vendors
1464 * is you use compatibility mode. */
1465 if (kvm_enabled())
1466 host_cpuid(0, 0, NULL, ebx, ecx, edx);
1467 break;
1468 case 1:
1469 *eax = env->cpuid_version;
1470 *ebx = (env->cpuid_apic_id << 24) | 8 << 8; /* CLFLUSH size in quad words, Linux wants it. */
1471 *ecx = env->cpuid_ext_features;
1472 *edx = env->cpuid_features;
1474 /* "Hypervisor present" bit required for Microsoft SVVP */
1475 if (kvm_enabled())
1476 *ecx |= (1 << 31);
1477 break;
1478 case 2:
1479 /* cache info: needed for Pentium Pro compatibility */
1480 *eax = 1;
1481 *ebx = 0;
1482 *ecx = 0;
1483 *edx = 0x2c307d;
1484 break;
1485 case 4:
1486 /* cache info: needed for Core compatibility */
1487 switch (count) {
1488 case 0: /* L1 dcache info */
1489 *eax = 0x0000121;
1490 *ebx = 0x1c0003f;
1491 *ecx = 0x000003f;
1492 *edx = 0x0000001;
1493 break;
1494 case 1: /* L1 icache info */
1495 *eax = 0x0000122;
1496 *ebx = 0x1c0003f;
1497 *ecx = 0x000003f;
1498 *edx = 0x0000001;
1499 break;
1500 case 2: /* L2 cache info */
1501 *eax = 0x0000143;
1502 *ebx = 0x3c0003f;
1503 *ecx = 0x0000fff;
1504 *edx = 0x0000001;
1505 break;
1506 default: /* end of info */
1507 *eax = 0;
1508 *ebx = 0;
1509 *ecx = 0;
1510 *edx = 0;
1511 break;
1513 break;
1514 case 5:
1515 /* mwait info: needed for Core compatibility */
1516 *eax = 0; /* Smallest monitor-line size in bytes */
1517 *ebx = 0; /* Largest monitor-line size in bytes */
1518 *ecx = CPUID_MWAIT_EMX | CPUID_MWAIT_IBE;
1519 *edx = 0;
1520 break;
1521 case 6:
1522 /* Thermal and Power Leaf */
1523 *eax = 0;
1524 *ebx = 0;
1525 *ecx = 0;
1526 *edx = 0;
1527 break;
1528 case 9:
1529 /* Direct Cache Access Information Leaf */
1530 *eax = 0; /* Bits 0-31 in DCA_CAP MSR */
1531 *ebx = 0;
1532 *ecx = 0;
1533 *edx = 0;
1534 break;
1535 case 0xA:
1536 /* Architectural Performance Monitoring Leaf */
1537 *eax = 0;
1538 *ebx = 0;
1539 *ecx = 0;
1540 *edx = 0;
1541 break;
1542 case 0x80000000:
1543 *eax = env->cpuid_xlevel;
1544 *ebx = env->cpuid_vendor1;
1545 *edx = env->cpuid_vendor2;
1546 *ecx = env->cpuid_vendor3;
1547 break;
1548 case 0x80000001:
1549 *eax = env->cpuid_features;
1550 *ebx = 0;
1551 *ecx = env->cpuid_ext3_features;
1552 *edx = env->cpuid_ext2_features;
1554 if (kvm_enabled()) {
1555 uint32_t h_eax, h_edx;
1557 host_cpuid(index, 0, &h_eax, NULL, NULL, &h_edx);
1559 /* disable CPU features that the host does not support */
1561 /* long mode */
1562 if ((h_edx & 0x20000000) == 0 /* || !lm_capable_kernel */)
1563 *edx &= ~0x20000000;
1564 /* syscall */
1565 if ((h_edx & 0x00000800) == 0)
1566 *edx &= ~0x00000800;
1567 /* nx */
1568 if ((h_edx & 0x00100000) == 0)
1569 *edx &= ~0x00100000;
1571 /* disable CPU features that KVM cannot support */
1573 /* svm */
1574 *ecx &= ~4UL;
1575 /* 3dnow */
1576 *edx &= ~0xc0000000;
1578 break;
1579 case 0x80000002:
1580 case 0x80000003:
1581 case 0x80000004:
1582 *eax = env->cpuid_model[(index - 0x80000002) * 4 + 0];
1583 *ebx = env->cpuid_model[(index - 0x80000002) * 4 + 1];
1584 *ecx = env->cpuid_model[(index - 0x80000002) * 4 + 2];
1585 *edx = env->cpuid_model[(index - 0x80000002) * 4 + 3];
1586 break;
1587 case 0x80000005:
1588 /* cache info (L1 cache) */
1589 *eax = 0x01ff01ff;
1590 *ebx = 0x01ff01ff;
1591 *ecx = 0x40020140;
1592 *edx = 0x40020140;
1593 break;
1594 case 0x80000006:
1595 /* cache info (L2 cache) */
1596 *eax = 0;
1597 *ebx = 0x42004200;
1598 *ecx = 0x02008140;
1599 *edx = 0;
1600 break;
1601 case 0x80000008:
1602 /* virtual & phys address size in low 2 bytes. */
1603 /* XXX: This value must match the one used in the MMU code. */
1604 if (env->cpuid_ext2_features & CPUID_EXT2_LM) {
1605 /* 64 bit processor */
1606 #if defined(USE_KQEMU)
1607 *eax = 0x00003020; /* 48 bits virtual, 32 bits physical */
1608 #else
1609 /* XXX: The physical address space is limited to 42 bits in exec.c. */
1610 *eax = 0x00003028; /* 48 bits virtual, 40 bits physical */
1611 #endif
1612 } else {
1613 #if defined(USE_KQEMU)
1614 *eax = 0x00000020; /* 32 bits physical */
1615 #else
1616 if (env->cpuid_features & CPUID_PSE36)
1617 *eax = 0x00000024; /* 36 bits physical */
1618 else
1619 *eax = 0x00000020; /* 32 bits physical */
1620 #endif
1622 *ebx = 0;
1623 *ecx = 0;
1624 *edx = 0;
1625 break;
1626 case 0x8000000A:
1627 *eax = 0x00000001; /* SVM Revision */
1628 *ebx = 0x00000010; /* nr of ASIDs */
1629 *ecx = 0;
1630 *edx = 0; /* optional features */
1631 break;
1632 default:
1633 /* reserved values: zero */
1634 *eax = 0;
1635 *ebx = 0;
1636 *ecx = 0;
1637 *edx = 0;
1638 break;
1642 CPUX86State *cpu_x86_init(const char *cpu_model)
1644 CPUX86State *env;
1645 static int inited;
1647 env = qemu_mallocz(sizeof(CPUX86State));
1648 cpu_exec_init(env);
1649 env->cpu_model_str = cpu_model;
1651 /* init various static tables */
1652 if (!inited) {
1653 inited = 1;
1654 optimize_flags_init();
1655 #ifndef CONFIG_USER_ONLY
1656 prev_debug_excp_handler =
1657 cpu_set_debug_excp_handler(breakpoint_handler);
1658 #endif
1660 if (cpu_x86_register(env, cpu_model) < 0) {
1661 cpu_x86_close(env);
1662 return NULL;
1664 cpu_reset(env);
1665 #ifdef USE_KQEMU
1666 kqemu_init(env);
1667 #endif
1668 if (kvm_enabled())
1669 kvm_init_vcpu(env);
1670 return env;