tcg/ppc32: proper setcond implementation
[qemu/mdroth.git] / target-i386 / helper.c
blob70762bb10a4ca5e116161e1ba64c63e0beeb34b2
1 /*
2 * i386 helpers (without register variable usage)
4 * Copyright (c) 2003 Fabrice Bellard
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
19 #include <stdarg.h>
20 #include <stdlib.h>
21 #include <stdio.h>
22 #include <string.h>
23 #include <inttypes.h>
24 #include <signal.h>
26 #include "cpu.h"
27 #include "exec-all.h"
28 #include "qemu-common.h"
29 #include "kvm.h"
31 //#define DEBUG_MMU
33 /* feature flags taken from "Intel Processor Identification and the CPUID
34 * Instruction" and AMD's "CPUID Specification". In cases of disagreement
35 * about feature names, the Linux name is used. */
36 static const char *feature_name[] = {
37 "fpu", "vme", "de", "pse", "tsc", "msr", "pae", "mce",
38 "cx8", "apic", NULL, "sep", "mtrr", "pge", "mca", "cmov",
39 "pat", "pse36", "pn" /* Intel psn */, "clflush" /* Intel clfsh */, NULL, "ds" /* Intel dts */, "acpi", "mmx",
40 "fxsr", "sse", "sse2", "ss", "ht" /* Intel htt */, "tm", "ia64", "pbe",
42 static const char *ext_feature_name[] = {
43 "pni" /* Intel,AMD sse3 */, NULL, NULL, "monitor", "ds_cpl", "vmx", NULL /* Linux smx */, "est",
44 "tm2", "ssse3", "cid", NULL, NULL, "cx16", "xtpr", NULL,
45 NULL, NULL, "dca", NULL, NULL, NULL, NULL, "popcnt",
46 NULL, NULL, NULL, NULL, NULL, NULL, NULL, "hypervisor",
48 static const char *ext2_feature_name[] = {
49 "fpu", "vme", "de", "pse", "tsc", "msr", "pae", "mce",
50 "cx8" /* AMD CMPXCHG8B */, "apic", NULL, "syscall", "mtrr", "pge", "mca", "cmov",
51 "pat", "pse36", NULL, NULL /* Linux mp */, "nx" /* Intel xd */, NULL, "mmxext", "mmx",
52 "fxsr", "fxsr_opt" /* AMD ffxsr */, "pdpe1gb" /* AMD Page1GB */, "rdtscp", NULL, "lm" /* Intel 64 */, "3dnowext", "3dnow",
54 static const char *ext3_feature_name[] = {
55 "lahf_lm" /* AMD LahfSahf */, "cmp_legacy", "svm", "extapic" /* AMD ExtApicSpace */, "cr8legacy" /* AMD AltMovCr8 */, "abm", "sse4a", "misalignsse",
56 "3dnowprefetch", "osvw", NULL /* Linux ibs */, NULL, "skinit", "wdt", NULL, NULL,
57 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
58 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
61 static const char *kvm_feature_name[] = {
62 "kvmclock", "kvm_nopiodelay", "kvm_mmu", NULL, NULL, NULL, NULL, NULL,
63 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
64 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
65 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
68 static void add_flagname_to_bitmaps(const char *flagname, uint32_t *features,
69 uint32_t *ext_features,
70 uint32_t *ext2_features,
71 uint32_t *ext3_features,
72 uint32_t *kvm_features)
74 int i;
75 int found = 0;
77 for ( i = 0 ; i < 32 ; i++ )
78 if (feature_name[i] && !strcmp (flagname, feature_name[i])) {
79 *features |= 1 << i;
80 found = 1;
82 for ( i = 0 ; i < 32 ; i++ )
83 if (ext_feature_name[i] && !strcmp (flagname, ext_feature_name[i])) {
84 *ext_features |= 1 << i;
85 found = 1;
87 for ( i = 0 ; i < 32 ; i++ )
88 if (ext2_feature_name[i] && !strcmp (flagname, ext2_feature_name[i])) {
89 *ext2_features |= 1 << i;
90 found = 1;
92 for ( i = 0 ; i < 32 ; i++ )
93 if (ext3_feature_name[i] && !strcmp (flagname, ext3_feature_name[i])) {
94 *ext3_features |= 1 << i;
95 found = 1;
97 for ( i = 0 ; i < 32 ; i++ )
98 if (kvm_feature_name[i] && !strcmp (flagname, kvm_feature_name[i])) {
99 *kvm_features |= 1 << i;
100 found = 1;
103 if (!found) {
104 fprintf(stderr, "CPU feature %s not found\n", flagname);
108 typedef struct x86_def_t {
109 const char *name;
110 uint32_t level;
111 uint32_t vendor1, vendor2, vendor3;
112 int family;
113 int model;
114 int stepping;
115 uint32_t features, ext_features, ext2_features, ext3_features, kvm_features;
116 uint32_t xlevel;
117 char model_id[48];
118 int vendor_override;
119 } x86_def_t;
121 #define I486_FEATURES (CPUID_FP87 | CPUID_VME | CPUID_PSE)
122 #define PENTIUM_FEATURES (I486_FEATURES | CPUID_DE | CPUID_TSC | \
123 CPUID_MSR | CPUID_MCE | CPUID_CX8 | CPUID_MMX | CPUID_APIC)
124 #define PENTIUM2_FEATURES (PENTIUM_FEATURES | CPUID_PAE | CPUID_SEP | \
125 CPUID_MTRR | CPUID_PGE | CPUID_MCA | CPUID_CMOV | CPUID_PAT | \
126 CPUID_PSE36 | CPUID_FXSR)
127 #define PENTIUM3_FEATURES (PENTIUM2_FEATURES | CPUID_SSE)
128 #define PPRO_FEATURES (CPUID_FP87 | CPUID_DE | CPUID_PSE | CPUID_TSC | \
129 CPUID_MSR | CPUID_MCE | CPUID_CX8 | CPUID_PGE | CPUID_CMOV | \
130 CPUID_PAT | CPUID_FXSR | CPUID_MMX | CPUID_SSE | CPUID_SSE2 | \
131 CPUID_PAE | CPUID_SEP | CPUID_APIC)
132 static x86_def_t x86_defs[] = {
133 #ifdef TARGET_X86_64
135 .name = "qemu64",
136 .level = 4,
137 .vendor1 = CPUID_VENDOR_AMD_1,
138 .vendor2 = CPUID_VENDOR_AMD_2,
139 .vendor3 = CPUID_VENDOR_AMD_3,
140 .family = 6,
141 .model = 2,
142 .stepping = 3,
143 .features = PPRO_FEATURES |
144 /* these features are needed for Win64 and aren't fully implemented */
145 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA |
146 /* this feature is needed for Solaris and isn't fully implemented */
147 CPUID_PSE36,
148 .ext_features = CPUID_EXT_SSE3 | CPUID_EXT_CX16 | CPUID_EXT_POPCNT,
149 .ext2_features = (PPRO_FEATURES & 0x0183F3FF) |
150 CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX,
151 .ext3_features = CPUID_EXT3_LAHF_LM | CPUID_EXT3_SVM |
152 CPUID_EXT3_ABM | CPUID_EXT3_SSE4A,
153 .xlevel = 0x8000000A,
154 .model_id = "QEMU Virtual CPU version " QEMU_VERSION,
157 .name = "phenom",
158 .level = 5,
159 .vendor1 = CPUID_VENDOR_AMD_1,
160 .vendor2 = CPUID_VENDOR_AMD_2,
161 .vendor3 = CPUID_VENDOR_AMD_3,
162 .family = 16,
163 .model = 2,
164 .stepping = 3,
165 /* Missing: CPUID_VME, CPUID_HT */
166 .features = PPRO_FEATURES |
167 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA |
168 CPUID_PSE36,
169 .ext_features = CPUID_EXT_SSE3 | CPUID_EXT_MONITOR | CPUID_EXT_CX16 |
170 CPUID_EXT_POPCNT,
171 /* Missing: CPUID_EXT2_PDPE1GB, CPUID_EXT2_RDTSCP */
172 .ext2_features = (PPRO_FEATURES & 0x0183F3FF) |
173 CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX |
174 CPUID_EXT2_3DNOW | CPUID_EXT2_3DNOWEXT | CPUID_EXT2_MMXEXT |
175 CPUID_EXT2_FFXSR,
176 /* Missing: CPUID_EXT3_CMP_LEG, CPUID_EXT3_EXTAPIC,
177 CPUID_EXT3_CR8LEG,
178 CPUID_EXT3_MISALIGNSSE, CPUID_EXT3_3DNOWPREFETCH,
179 CPUID_EXT3_OSVW, CPUID_EXT3_IBS */
180 .ext3_features = CPUID_EXT3_LAHF_LM | CPUID_EXT3_SVM |
181 CPUID_EXT3_ABM | CPUID_EXT3_SSE4A,
182 .xlevel = 0x8000001A,
183 .model_id = "AMD Phenom(tm) 9550 Quad-Core Processor"
186 .name = "core2duo",
187 .level = 10,
188 .family = 6,
189 .model = 15,
190 .stepping = 11,
191 /* The original CPU also implements these features:
192 CPUID_VME, CPUID_DTS, CPUID_ACPI, CPUID_SS, CPUID_HT,
193 CPUID_TM, CPUID_PBE */
194 .features = PPRO_FEATURES |
195 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA |
196 CPUID_PSE36,
197 /* The original CPU also implements these ext features:
198 CPUID_EXT_DTES64, CPUID_EXT_DSCPL, CPUID_EXT_VMX, CPUID_EXT_EST,
199 CPUID_EXT_TM2, CPUID_EXT_CX16, CPUID_EXT_XTPR, CPUID_EXT_PDCM */
200 .ext_features = CPUID_EXT_SSE3 | CPUID_EXT_MONITOR | CPUID_EXT_SSSE3,
201 .ext2_features = CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX,
202 .ext3_features = CPUID_EXT3_LAHF_LM,
203 .xlevel = 0x80000008,
204 .model_id = "Intel(R) Core(TM)2 Duo CPU T7700 @ 2.40GHz",
207 .name = "kvm64",
208 .level = 5,
209 .vendor1 = CPUID_VENDOR_INTEL_1,
210 .vendor2 = CPUID_VENDOR_INTEL_2,
211 .vendor3 = CPUID_VENDOR_INTEL_3,
212 .family = 15,
213 .model = 6,
214 .stepping = 1,
215 /* Missing: CPUID_VME, CPUID_HT */
216 .features = PPRO_FEATURES |
217 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA |
218 CPUID_PSE36,
219 /* Missing: CPUID_EXT_POPCNT, CPUID_EXT_MONITOR */
220 .ext_features = CPUID_EXT_SSE3 | CPUID_EXT_CX16,
221 /* Missing: CPUID_EXT2_PDPE1GB, CPUID_EXT2_RDTSCP */
222 .ext2_features = (PPRO_FEATURES & 0x0183F3FF) |
223 CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX,
224 /* Missing: CPUID_EXT3_LAHF_LM, CPUID_EXT3_CMP_LEG, CPUID_EXT3_EXTAPIC,
225 CPUID_EXT3_CR8LEG, CPUID_EXT3_ABM, CPUID_EXT3_SSE4A,
226 CPUID_EXT3_MISALIGNSSE, CPUID_EXT3_3DNOWPREFETCH,
227 CPUID_EXT3_OSVW, CPUID_EXT3_IBS, CPUID_EXT3_SVM */
228 .ext3_features = 0,
229 .xlevel = 0x80000008,
230 .model_id = "Common KVM processor"
232 #endif
234 .name = "qemu32",
235 .level = 4,
236 .family = 6,
237 .model = 3,
238 .stepping = 3,
239 .features = PPRO_FEATURES,
240 .ext_features = CPUID_EXT_SSE3 | CPUID_EXT_POPCNT,
241 .xlevel = 0,
242 .model_id = "QEMU Virtual CPU version " QEMU_VERSION,
245 .name = "coreduo",
246 .level = 10,
247 .family = 6,
248 .model = 14,
249 .stepping = 8,
250 /* The original CPU also implements these features:
251 CPUID_DTS, CPUID_ACPI, CPUID_SS, CPUID_HT,
252 CPUID_TM, CPUID_PBE */
253 .features = PPRO_FEATURES | CPUID_VME |
254 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA,
255 /* The original CPU also implements these ext features:
256 CPUID_EXT_VMX, CPUID_EXT_EST, CPUID_EXT_TM2, CPUID_EXT_XTPR,
257 CPUID_EXT_PDCM */
258 .ext_features = CPUID_EXT_SSE3 | CPUID_EXT_MONITOR,
259 .ext2_features = CPUID_EXT2_NX,
260 .xlevel = 0x80000008,
261 .model_id = "Genuine Intel(R) CPU T2600 @ 2.16GHz",
264 .name = "486",
265 .level = 0,
266 .family = 4,
267 .model = 0,
268 .stepping = 0,
269 .features = I486_FEATURES,
270 .xlevel = 0,
273 .name = "pentium",
274 .level = 1,
275 .family = 5,
276 .model = 4,
277 .stepping = 3,
278 .features = PENTIUM_FEATURES,
279 .xlevel = 0,
282 .name = "pentium2",
283 .level = 2,
284 .family = 6,
285 .model = 5,
286 .stepping = 2,
287 .features = PENTIUM2_FEATURES,
288 .xlevel = 0,
291 .name = "pentium3",
292 .level = 2,
293 .family = 6,
294 .model = 7,
295 .stepping = 3,
296 .features = PENTIUM3_FEATURES,
297 .xlevel = 0,
300 .name = "athlon",
301 .level = 2,
302 .vendor1 = CPUID_VENDOR_AMD_1,
303 .vendor2 = CPUID_VENDOR_AMD_2,
304 .vendor3 = CPUID_VENDOR_AMD_3,
305 .family = 6,
306 .model = 2,
307 .stepping = 3,
308 .features = PPRO_FEATURES | CPUID_PSE36 | CPUID_VME | CPUID_MTRR | CPUID_MCA,
309 .ext2_features = (PPRO_FEATURES & 0x0183F3FF) | CPUID_EXT2_MMXEXT | CPUID_EXT2_3DNOW | CPUID_EXT2_3DNOWEXT,
310 .xlevel = 0x80000008,
311 /* XXX: put another string ? */
312 .model_id = "QEMU Virtual CPU version " QEMU_VERSION,
315 .name = "n270",
316 /* original is on level 10 */
317 .level = 5,
318 .family = 6,
319 .model = 28,
320 .stepping = 2,
321 .features = PPRO_FEATURES |
322 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA | CPUID_VME,
323 /* Missing: CPUID_DTS | CPUID_ACPI | CPUID_SS |
324 * CPUID_HT | CPUID_TM | CPUID_PBE */
325 /* Some CPUs got no CPUID_SEP */
326 .ext_features = CPUID_EXT_MONITOR |
327 CPUID_EXT_SSE3 /* PNI */ | CPUID_EXT_SSSE3,
328 /* Missing: CPUID_EXT_DSCPL | CPUID_EXT_EST |
329 * CPUID_EXT_TM2 | CPUID_EXT_XTPR */
330 .ext2_features = (PPRO_FEATURES & 0x0183F3FF) | CPUID_EXT2_NX,
331 /* Missing: .ext3_features = CPUID_EXT3_LAHF_LM */
332 .xlevel = 0x8000000A,
333 .model_id = "Intel(R) Atom(TM) CPU N270 @ 1.60GHz",
337 static void host_cpuid(uint32_t function, uint32_t count, uint32_t *eax,
338 uint32_t *ebx, uint32_t *ecx, uint32_t *edx);
340 static int cpu_x86_fill_model_id(char *str)
342 uint32_t eax = 0, ebx = 0, ecx = 0, edx = 0;
343 int i;
345 for (i = 0; i < 3; i++) {
346 host_cpuid(0x80000002 + i, 0, &eax, &ebx, &ecx, &edx);
347 memcpy(str + i * 16 + 0, &eax, 4);
348 memcpy(str + i * 16 + 4, &ebx, 4);
349 memcpy(str + i * 16 + 8, &ecx, 4);
350 memcpy(str + i * 16 + 12, &edx, 4);
352 return 0;
355 static int cpu_x86_fill_host(x86_def_t *x86_cpu_def)
357 uint32_t eax = 0, ebx = 0, ecx = 0, edx = 0;
359 x86_cpu_def->name = "host";
360 host_cpuid(0x0, 0, &eax, &ebx, &ecx, &edx);
361 x86_cpu_def->level = eax;
362 x86_cpu_def->vendor1 = ebx;
363 x86_cpu_def->vendor2 = edx;
364 x86_cpu_def->vendor3 = ecx;
366 host_cpuid(0x1, 0, &eax, &ebx, &ecx, &edx);
367 x86_cpu_def->family = ((eax >> 8) & 0x0F) + ((eax >> 20) & 0xFF);
368 x86_cpu_def->model = ((eax >> 4) & 0x0F) | ((eax & 0xF0000) >> 12);
369 x86_cpu_def->stepping = eax & 0x0F;
370 x86_cpu_def->ext_features = ecx;
371 x86_cpu_def->features = edx;
373 host_cpuid(0x80000000, 0, &eax, &ebx, &ecx, &edx);
374 x86_cpu_def->xlevel = eax;
376 host_cpuid(0x80000001, 0, &eax, &ebx, &ecx, &edx);
377 x86_cpu_def->ext2_features = edx;
378 x86_cpu_def->ext3_features = ecx;
379 cpu_x86_fill_model_id(x86_cpu_def->model_id);
380 x86_cpu_def->vendor_override = 0;
382 return 0;
385 static int cpu_x86_find_by_name(x86_def_t *x86_cpu_def, const char *cpu_model)
387 unsigned int i;
388 x86_def_t *def;
390 char *s = strdup(cpu_model);
391 char *featurestr, *name = strtok(s, ",");
392 uint32_t plus_features = 0, plus_ext_features = 0, plus_ext2_features = 0, plus_ext3_features = 0, plus_kvm_features = 0;
393 uint32_t minus_features = 0, minus_ext_features = 0, minus_ext2_features = 0, minus_ext3_features = 0, minus_kvm_features = 0;
394 uint32_t numvalue;
396 def = NULL;
397 for (i = 0; i < ARRAY_SIZE(x86_defs); i++) {
398 if (strcmp(name, x86_defs[i].name) == 0) {
399 def = &x86_defs[i];
400 break;
403 if (kvm_enabled() && strcmp(name, "host") == 0) {
404 cpu_x86_fill_host(x86_cpu_def);
405 } else if (!def) {
406 goto error;
407 } else {
408 memcpy(x86_cpu_def, def, sizeof(*def));
411 plus_kvm_features = ~0; /* not supported bits will be filtered out later */
413 add_flagname_to_bitmaps("hypervisor", &plus_features,
414 &plus_ext_features, &plus_ext2_features, &plus_ext3_features,
415 &plus_kvm_features);
417 featurestr = strtok(NULL, ",");
419 while (featurestr) {
420 char *val;
421 if (featurestr[0] == '+') {
422 add_flagname_to_bitmaps(featurestr + 1, &plus_features, &plus_ext_features, &plus_ext2_features, &plus_ext3_features, &plus_kvm_features);
423 } else if (featurestr[0] == '-') {
424 add_flagname_to_bitmaps(featurestr + 1, &minus_features, &minus_ext_features, &minus_ext2_features, &minus_ext3_features, &minus_kvm_features);
425 } else if ((val = strchr(featurestr, '='))) {
426 *val = 0; val++;
427 if (!strcmp(featurestr, "family")) {
428 char *err;
429 numvalue = strtoul(val, &err, 0);
430 if (!*val || *err) {
431 fprintf(stderr, "bad numerical value %s\n", val);
432 goto error;
434 x86_cpu_def->family = numvalue;
435 } else if (!strcmp(featurestr, "model")) {
436 char *err;
437 numvalue = strtoul(val, &err, 0);
438 if (!*val || *err || numvalue > 0xff) {
439 fprintf(stderr, "bad numerical value %s\n", val);
440 goto error;
442 x86_cpu_def->model = numvalue;
443 } else if (!strcmp(featurestr, "stepping")) {
444 char *err;
445 numvalue = strtoul(val, &err, 0);
446 if (!*val || *err || numvalue > 0xf) {
447 fprintf(stderr, "bad numerical value %s\n", val);
448 goto error;
450 x86_cpu_def->stepping = numvalue ;
451 } else if (!strcmp(featurestr, "level")) {
452 char *err;
453 numvalue = strtoul(val, &err, 0);
454 if (!*val || *err) {
455 fprintf(stderr, "bad numerical value %s\n", val);
456 goto error;
458 x86_cpu_def->level = numvalue;
459 } else if (!strcmp(featurestr, "xlevel")) {
460 char *err;
461 numvalue = strtoul(val, &err, 0);
462 if (!*val || *err) {
463 fprintf(stderr, "bad numerical value %s\n", val);
464 goto error;
466 if (numvalue < 0x80000000) {
467 numvalue += 0x80000000;
469 x86_cpu_def->xlevel = numvalue;
470 } else if (!strcmp(featurestr, "vendor")) {
471 if (strlen(val) != 12) {
472 fprintf(stderr, "vendor string must be 12 chars long\n");
473 goto error;
475 x86_cpu_def->vendor1 = 0;
476 x86_cpu_def->vendor2 = 0;
477 x86_cpu_def->vendor3 = 0;
478 for(i = 0; i < 4; i++) {
479 x86_cpu_def->vendor1 |= ((uint8_t)val[i ]) << (8 * i);
480 x86_cpu_def->vendor2 |= ((uint8_t)val[i + 4]) << (8 * i);
481 x86_cpu_def->vendor3 |= ((uint8_t)val[i + 8]) << (8 * i);
483 x86_cpu_def->vendor_override = 1;
484 } else if (!strcmp(featurestr, "model_id")) {
485 pstrcpy(x86_cpu_def->model_id, sizeof(x86_cpu_def->model_id),
486 val);
487 } else {
488 fprintf(stderr, "unrecognized feature %s\n", featurestr);
489 goto error;
491 } else {
492 fprintf(stderr, "feature string `%s' not in format (+feature|-feature|feature=xyz)\n", featurestr);
493 goto error;
495 featurestr = strtok(NULL, ",");
497 x86_cpu_def->features |= plus_features;
498 x86_cpu_def->ext_features |= plus_ext_features;
499 x86_cpu_def->ext2_features |= plus_ext2_features;
500 x86_cpu_def->ext3_features |= plus_ext3_features;
501 x86_cpu_def->kvm_features |= plus_kvm_features;
502 x86_cpu_def->features &= ~minus_features;
503 x86_cpu_def->ext_features &= ~minus_ext_features;
504 x86_cpu_def->ext2_features &= ~minus_ext2_features;
505 x86_cpu_def->ext3_features &= ~minus_ext3_features;
506 x86_cpu_def->kvm_features &= ~minus_kvm_features;
507 free(s);
508 return 0;
510 error:
511 free(s);
512 return -1;
515 void x86_cpu_list (FILE *f, int (*cpu_fprintf)(FILE *f, const char *fmt, ...))
517 unsigned int i;
519 for (i = 0; i < ARRAY_SIZE(x86_defs); i++)
520 (*cpu_fprintf)(f, "x86 %16s\n", x86_defs[i].name);
523 static int cpu_x86_register (CPUX86State *env, const char *cpu_model)
525 x86_def_t def1, *def = &def1;
527 if (cpu_x86_find_by_name(def, cpu_model) < 0)
528 return -1;
529 if (def->vendor1) {
530 env->cpuid_vendor1 = def->vendor1;
531 env->cpuid_vendor2 = def->vendor2;
532 env->cpuid_vendor3 = def->vendor3;
533 } else {
534 env->cpuid_vendor1 = CPUID_VENDOR_INTEL_1;
535 env->cpuid_vendor2 = CPUID_VENDOR_INTEL_2;
536 env->cpuid_vendor3 = CPUID_VENDOR_INTEL_3;
538 env->cpuid_vendor_override = def->vendor_override;
539 env->cpuid_level = def->level;
540 if (def->family > 0x0f)
541 env->cpuid_version = 0xf00 | ((def->family - 0x0f) << 20);
542 else
543 env->cpuid_version = def->family << 8;
544 env->cpuid_version |= ((def->model & 0xf) << 4) | ((def->model >> 4) << 16);
545 env->cpuid_version |= def->stepping;
546 env->cpuid_features = def->features;
547 env->pat = 0x0007040600070406ULL;
548 env->cpuid_ext_features = def->ext_features;
549 env->cpuid_ext2_features = def->ext2_features;
550 env->cpuid_xlevel = def->xlevel;
551 env->cpuid_kvm_features = def->kvm_features;
553 const char *model_id = def->model_id;
554 int c, len, i;
555 if (!model_id)
556 model_id = "";
557 len = strlen(model_id);
558 for(i = 0; i < 48; i++) {
559 if (i >= len)
560 c = '\0';
561 else
562 c = (uint8_t)model_id[i];
563 env->cpuid_model[i >> 2] |= c << (8 * (i & 3));
566 return 0;
569 /* NOTE: must be called outside the CPU execute loop */
570 void cpu_reset(CPUX86State *env)
572 int i;
574 if (qemu_loglevel_mask(CPU_LOG_RESET)) {
575 qemu_log("CPU Reset (CPU %d)\n", env->cpu_index);
576 log_cpu_state(env, X86_DUMP_FPU | X86_DUMP_CCOP);
579 memset(env, 0, offsetof(CPUX86State, breakpoints));
581 tlb_flush(env, 1);
583 env->old_exception = -1;
585 /* init to reset state */
587 #ifdef CONFIG_SOFTMMU
588 env->hflags |= HF_SOFTMMU_MASK;
589 #endif
590 env->hflags2 |= HF2_GIF_MASK;
592 cpu_x86_update_cr0(env, 0x60000010);
593 env->a20_mask = ~0x0;
594 env->smbase = 0x30000;
596 env->idt.limit = 0xffff;
597 env->gdt.limit = 0xffff;
598 env->ldt.limit = 0xffff;
599 env->ldt.flags = DESC_P_MASK | (2 << DESC_TYPE_SHIFT);
600 env->tr.limit = 0xffff;
601 env->tr.flags = DESC_P_MASK | (11 << DESC_TYPE_SHIFT);
603 cpu_x86_load_seg_cache(env, R_CS, 0xf000, 0xffff0000, 0xffff,
604 DESC_P_MASK | DESC_S_MASK | DESC_CS_MASK |
605 DESC_R_MASK | DESC_A_MASK);
606 cpu_x86_load_seg_cache(env, R_DS, 0, 0, 0xffff,
607 DESC_P_MASK | DESC_S_MASK | DESC_W_MASK |
608 DESC_A_MASK);
609 cpu_x86_load_seg_cache(env, R_ES, 0, 0, 0xffff,
610 DESC_P_MASK | DESC_S_MASK | DESC_W_MASK |
611 DESC_A_MASK);
612 cpu_x86_load_seg_cache(env, R_SS, 0, 0, 0xffff,
613 DESC_P_MASK | DESC_S_MASK | DESC_W_MASK |
614 DESC_A_MASK);
615 cpu_x86_load_seg_cache(env, R_FS, 0, 0, 0xffff,
616 DESC_P_MASK | DESC_S_MASK | DESC_W_MASK |
617 DESC_A_MASK);
618 cpu_x86_load_seg_cache(env, R_GS, 0, 0, 0xffff,
619 DESC_P_MASK | DESC_S_MASK | DESC_W_MASK |
620 DESC_A_MASK);
622 env->eip = 0xfff0;
623 env->regs[R_EDX] = env->cpuid_version;
625 env->eflags = 0x2;
627 /* FPU init */
628 for(i = 0;i < 8; i++)
629 env->fptags[i] = 1;
630 env->fpuc = 0x37f;
632 env->mxcsr = 0x1f80;
634 memset(env->dr, 0, sizeof(env->dr));
635 env->dr[6] = DR6_FIXED_1;
636 env->dr[7] = DR7_FIXED_1;
637 cpu_breakpoint_remove_all(env, BP_CPU);
638 cpu_watchpoint_remove_all(env, BP_CPU);
640 env->mcg_status = 0;
643 void cpu_x86_close(CPUX86State *env)
645 qemu_free(env);
648 /***********************************************************/
649 /* x86 debug */
651 static const char *cc_op_str[] = {
652 "DYNAMIC",
653 "EFLAGS",
655 "MULB",
656 "MULW",
657 "MULL",
658 "MULQ",
660 "ADDB",
661 "ADDW",
662 "ADDL",
663 "ADDQ",
665 "ADCB",
666 "ADCW",
667 "ADCL",
668 "ADCQ",
670 "SUBB",
671 "SUBW",
672 "SUBL",
673 "SUBQ",
675 "SBBB",
676 "SBBW",
677 "SBBL",
678 "SBBQ",
680 "LOGICB",
681 "LOGICW",
682 "LOGICL",
683 "LOGICQ",
685 "INCB",
686 "INCW",
687 "INCL",
688 "INCQ",
690 "DECB",
691 "DECW",
692 "DECL",
693 "DECQ",
695 "SHLB",
696 "SHLW",
697 "SHLL",
698 "SHLQ",
700 "SARB",
701 "SARW",
702 "SARL",
703 "SARQ",
706 static void
707 cpu_x86_dump_seg_cache(CPUState *env, FILE *f,
708 int (*cpu_fprintf)(FILE *f, const char *fmt, ...),
709 const char *name, struct SegmentCache *sc)
711 #ifdef TARGET_X86_64
712 if (env->hflags & HF_CS64_MASK) {
713 cpu_fprintf(f, "%-3s=%04x %016" PRIx64 " %08x %08x", name,
714 sc->selector, sc->base, sc->limit, sc->flags);
715 } else
716 #endif
718 cpu_fprintf(f, "%-3s=%04x %08x %08x %08x", name, sc->selector,
719 (uint32_t)sc->base, sc->limit, sc->flags);
722 if (!(env->hflags & HF_PE_MASK) || !(sc->flags & DESC_P_MASK))
723 goto done;
725 cpu_fprintf(f, " DPL=%d ", (sc->flags & DESC_DPL_MASK) >> DESC_DPL_SHIFT);
726 if (sc->flags & DESC_S_MASK) {
727 if (sc->flags & DESC_CS_MASK) {
728 cpu_fprintf(f, (sc->flags & DESC_L_MASK) ? "CS64" :
729 ((sc->flags & DESC_B_MASK) ? "CS32" : "CS16"));
730 cpu_fprintf(f, " [%c%c", (sc->flags & DESC_C_MASK) ? 'C' : '-',
731 (sc->flags & DESC_R_MASK) ? 'R' : '-');
732 } else {
733 cpu_fprintf(f, (sc->flags & DESC_B_MASK) ? "DS " : "DS16");
734 cpu_fprintf(f, " [%c%c", (sc->flags & DESC_E_MASK) ? 'E' : '-',
735 (sc->flags & DESC_W_MASK) ? 'W' : '-');
737 cpu_fprintf(f, "%c]", (sc->flags & DESC_A_MASK) ? 'A' : '-');
738 } else {
739 static const char *sys_type_name[2][16] = {
740 { /* 32 bit mode */
741 "Reserved", "TSS16-avl", "LDT", "TSS16-busy",
742 "CallGate16", "TaskGate", "IntGate16", "TrapGate16",
743 "Reserved", "TSS32-avl", "Reserved", "TSS32-busy",
744 "CallGate32", "Reserved", "IntGate32", "TrapGate32"
746 { /* 64 bit mode */
747 "<hiword>", "Reserved", "LDT", "Reserved", "Reserved",
748 "Reserved", "Reserved", "Reserved", "Reserved",
749 "TSS64-avl", "Reserved", "TSS64-busy", "CallGate64",
750 "Reserved", "IntGate64", "TrapGate64"
753 cpu_fprintf(f, sys_type_name[(env->hflags & HF_LMA_MASK) ? 1 : 0]
754 [(sc->flags & DESC_TYPE_MASK)
755 >> DESC_TYPE_SHIFT]);
757 done:
758 cpu_fprintf(f, "\n");
761 void cpu_dump_state(CPUState *env, FILE *f,
762 int (*cpu_fprintf)(FILE *f, const char *fmt, ...),
763 int flags)
765 int eflags, i, nb;
766 char cc_op_name[32];
767 static const char *seg_name[6] = { "ES", "CS", "SS", "DS", "FS", "GS" };
769 cpu_synchronize_state(env);
771 eflags = env->eflags;
772 #ifdef TARGET_X86_64
773 if (env->hflags & HF_CS64_MASK) {
774 cpu_fprintf(f,
775 "RAX=%016" PRIx64 " RBX=%016" PRIx64 " RCX=%016" PRIx64 " RDX=%016" PRIx64 "\n"
776 "RSI=%016" PRIx64 " RDI=%016" PRIx64 " RBP=%016" PRIx64 " RSP=%016" PRIx64 "\n"
777 "R8 =%016" PRIx64 " R9 =%016" PRIx64 " R10=%016" PRIx64 " R11=%016" PRIx64 "\n"
778 "R12=%016" PRIx64 " R13=%016" PRIx64 " R14=%016" PRIx64 " R15=%016" PRIx64 "\n"
779 "RIP=%016" PRIx64 " RFL=%08x [%c%c%c%c%c%c%c] CPL=%d II=%d A20=%d SMM=%d HLT=%d\n",
780 env->regs[R_EAX],
781 env->regs[R_EBX],
782 env->regs[R_ECX],
783 env->regs[R_EDX],
784 env->regs[R_ESI],
785 env->regs[R_EDI],
786 env->regs[R_EBP],
787 env->regs[R_ESP],
788 env->regs[8],
789 env->regs[9],
790 env->regs[10],
791 env->regs[11],
792 env->regs[12],
793 env->regs[13],
794 env->regs[14],
795 env->regs[15],
796 env->eip, eflags,
797 eflags & DF_MASK ? 'D' : '-',
798 eflags & CC_O ? 'O' : '-',
799 eflags & CC_S ? 'S' : '-',
800 eflags & CC_Z ? 'Z' : '-',
801 eflags & CC_A ? 'A' : '-',
802 eflags & CC_P ? 'P' : '-',
803 eflags & CC_C ? 'C' : '-',
804 env->hflags & HF_CPL_MASK,
805 (env->hflags >> HF_INHIBIT_IRQ_SHIFT) & 1,
806 (env->a20_mask >> 20) & 1,
807 (env->hflags >> HF_SMM_SHIFT) & 1,
808 env->halted);
809 } else
810 #endif
812 cpu_fprintf(f, "EAX=%08x EBX=%08x ECX=%08x EDX=%08x\n"
813 "ESI=%08x EDI=%08x EBP=%08x ESP=%08x\n"
814 "EIP=%08x EFL=%08x [%c%c%c%c%c%c%c] CPL=%d II=%d A20=%d SMM=%d HLT=%d\n",
815 (uint32_t)env->regs[R_EAX],
816 (uint32_t)env->regs[R_EBX],
817 (uint32_t)env->regs[R_ECX],
818 (uint32_t)env->regs[R_EDX],
819 (uint32_t)env->regs[R_ESI],
820 (uint32_t)env->regs[R_EDI],
821 (uint32_t)env->regs[R_EBP],
822 (uint32_t)env->regs[R_ESP],
823 (uint32_t)env->eip, eflags,
824 eflags & DF_MASK ? 'D' : '-',
825 eflags & CC_O ? 'O' : '-',
826 eflags & CC_S ? 'S' : '-',
827 eflags & CC_Z ? 'Z' : '-',
828 eflags & CC_A ? 'A' : '-',
829 eflags & CC_P ? 'P' : '-',
830 eflags & CC_C ? 'C' : '-',
831 env->hflags & HF_CPL_MASK,
832 (env->hflags >> HF_INHIBIT_IRQ_SHIFT) & 1,
833 (env->a20_mask >> 20) & 1,
834 (env->hflags >> HF_SMM_SHIFT) & 1,
835 env->halted);
838 for(i = 0; i < 6; i++) {
839 cpu_x86_dump_seg_cache(env, f, cpu_fprintf, seg_name[i],
840 &env->segs[i]);
842 cpu_x86_dump_seg_cache(env, f, cpu_fprintf, "LDT", &env->ldt);
843 cpu_x86_dump_seg_cache(env, f, cpu_fprintf, "TR", &env->tr);
845 #ifdef TARGET_X86_64
846 if (env->hflags & HF_LMA_MASK) {
847 cpu_fprintf(f, "GDT= %016" PRIx64 " %08x\n",
848 env->gdt.base, env->gdt.limit);
849 cpu_fprintf(f, "IDT= %016" PRIx64 " %08x\n",
850 env->idt.base, env->idt.limit);
851 cpu_fprintf(f, "CR0=%08x CR2=%016" PRIx64 " CR3=%016" PRIx64 " CR4=%08x\n",
852 (uint32_t)env->cr[0],
853 env->cr[2],
854 env->cr[3],
855 (uint32_t)env->cr[4]);
856 for(i = 0; i < 4; i++)
857 cpu_fprintf(f, "DR%d=%016" PRIx64 " ", i, env->dr[i]);
858 cpu_fprintf(f, "\nDR6=%016" PRIx64 " DR7=%016" PRIx64 "\n",
859 env->dr[6], env->dr[7]);
860 } else
861 #endif
863 cpu_fprintf(f, "GDT= %08x %08x\n",
864 (uint32_t)env->gdt.base, env->gdt.limit);
865 cpu_fprintf(f, "IDT= %08x %08x\n",
866 (uint32_t)env->idt.base, env->idt.limit);
867 cpu_fprintf(f, "CR0=%08x CR2=%08x CR3=%08x CR4=%08x\n",
868 (uint32_t)env->cr[0],
869 (uint32_t)env->cr[2],
870 (uint32_t)env->cr[3],
871 (uint32_t)env->cr[4]);
872 for(i = 0; i < 4; i++)
873 cpu_fprintf(f, "DR%d=%08x ", i, env->dr[i]);
874 cpu_fprintf(f, "\nDR6=%08x DR7=%08x\n", env->dr[6], env->dr[7]);
876 if (flags & X86_DUMP_CCOP) {
877 if ((unsigned)env->cc_op < CC_OP_NB)
878 snprintf(cc_op_name, sizeof(cc_op_name), "%s", cc_op_str[env->cc_op]);
879 else
880 snprintf(cc_op_name, sizeof(cc_op_name), "[%d]", env->cc_op);
881 #ifdef TARGET_X86_64
882 if (env->hflags & HF_CS64_MASK) {
883 cpu_fprintf(f, "CCS=%016" PRIx64 " CCD=%016" PRIx64 " CCO=%-8s\n",
884 env->cc_src, env->cc_dst,
885 cc_op_name);
886 } else
887 #endif
889 cpu_fprintf(f, "CCS=%08x CCD=%08x CCO=%-8s\n",
890 (uint32_t)env->cc_src, (uint32_t)env->cc_dst,
891 cc_op_name);
894 if (flags & X86_DUMP_FPU) {
895 int fptag;
896 fptag = 0;
897 for(i = 0; i < 8; i++) {
898 fptag |= ((!env->fptags[i]) << i);
900 cpu_fprintf(f, "FCW=%04x FSW=%04x [ST=%d] FTW=%02x MXCSR=%08x\n",
901 env->fpuc,
902 (env->fpus & ~0x3800) | (env->fpstt & 0x7) << 11,
903 env->fpstt,
904 fptag,
905 env->mxcsr);
906 for(i=0;i<8;i++) {
907 #if defined(USE_X86LDOUBLE)
908 union {
909 long double d;
910 struct {
911 uint64_t lower;
912 uint16_t upper;
913 } l;
914 } tmp;
915 tmp.d = env->fpregs[i].d;
916 cpu_fprintf(f, "FPR%d=%016" PRIx64 " %04x",
917 i, tmp.l.lower, tmp.l.upper);
918 #else
919 cpu_fprintf(f, "FPR%d=%016" PRIx64,
920 i, env->fpregs[i].mmx.q);
921 #endif
922 if ((i & 1) == 1)
923 cpu_fprintf(f, "\n");
924 else
925 cpu_fprintf(f, " ");
927 if (env->hflags & HF_CS64_MASK)
928 nb = 16;
929 else
930 nb = 8;
931 for(i=0;i<nb;i++) {
932 cpu_fprintf(f, "XMM%02d=%08x%08x%08x%08x",
934 env->xmm_regs[i].XMM_L(3),
935 env->xmm_regs[i].XMM_L(2),
936 env->xmm_regs[i].XMM_L(1),
937 env->xmm_regs[i].XMM_L(0));
938 if ((i & 1) == 1)
939 cpu_fprintf(f, "\n");
940 else
941 cpu_fprintf(f, " ");
946 /***********************************************************/
947 /* x86 mmu */
948 /* XXX: add PGE support */
950 void cpu_x86_set_a20(CPUX86State *env, int a20_state)
952 a20_state = (a20_state != 0);
953 if (a20_state != ((env->a20_mask >> 20) & 1)) {
954 #if defined(DEBUG_MMU)
955 printf("A20 update: a20=%d\n", a20_state);
956 #endif
957 /* if the cpu is currently executing code, we must unlink it and
958 all the potentially executing TB */
959 cpu_interrupt(env, CPU_INTERRUPT_EXITTB);
961 /* when a20 is changed, all the MMU mappings are invalid, so
962 we must flush everything */
963 tlb_flush(env, 1);
964 env->a20_mask = ~(1 << 20) | (a20_state << 20);
968 void cpu_x86_update_cr0(CPUX86State *env, uint32_t new_cr0)
970 int pe_state;
972 #if defined(DEBUG_MMU)
973 printf("CR0 update: CR0=0x%08x\n", new_cr0);
974 #endif
975 if ((new_cr0 & (CR0_PG_MASK | CR0_WP_MASK | CR0_PE_MASK)) !=
976 (env->cr[0] & (CR0_PG_MASK | CR0_WP_MASK | CR0_PE_MASK))) {
977 tlb_flush(env, 1);
980 #ifdef TARGET_X86_64
981 if (!(env->cr[0] & CR0_PG_MASK) && (new_cr0 & CR0_PG_MASK) &&
982 (env->efer & MSR_EFER_LME)) {
983 /* enter in long mode */
984 /* XXX: generate an exception */
985 if (!(env->cr[4] & CR4_PAE_MASK))
986 return;
987 env->efer |= MSR_EFER_LMA;
988 env->hflags |= HF_LMA_MASK;
989 } else if ((env->cr[0] & CR0_PG_MASK) && !(new_cr0 & CR0_PG_MASK) &&
990 (env->efer & MSR_EFER_LMA)) {
991 /* exit long mode */
992 env->efer &= ~MSR_EFER_LMA;
993 env->hflags &= ~(HF_LMA_MASK | HF_CS64_MASK);
994 env->eip &= 0xffffffff;
996 #endif
997 env->cr[0] = new_cr0 | CR0_ET_MASK;
999 /* update PE flag in hidden flags */
1000 pe_state = (env->cr[0] & CR0_PE_MASK);
1001 env->hflags = (env->hflags & ~HF_PE_MASK) | (pe_state << HF_PE_SHIFT);
1002 /* ensure that ADDSEG is always set in real mode */
1003 env->hflags |= ((pe_state ^ 1) << HF_ADDSEG_SHIFT);
1004 /* update FPU flags */
1005 env->hflags = (env->hflags & ~(HF_MP_MASK | HF_EM_MASK | HF_TS_MASK)) |
1006 ((new_cr0 << (HF_MP_SHIFT - 1)) & (HF_MP_MASK | HF_EM_MASK | HF_TS_MASK));
1009 /* XXX: in legacy PAE mode, generate a GPF if reserved bits are set in
1010 the PDPT */
1011 void cpu_x86_update_cr3(CPUX86State *env, target_ulong new_cr3)
1013 env->cr[3] = new_cr3;
1014 if (env->cr[0] & CR0_PG_MASK) {
1015 #if defined(DEBUG_MMU)
1016 printf("CR3 update: CR3=" TARGET_FMT_lx "\n", new_cr3);
1017 #endif
1018 tlb_flush(env, 0);
1022 void cpu_x86_update_cr4(CPUX86State *env, uint32_t new_cr4)
1024 #if defined(DEBUG_MMU)
1025 printf("CR4 update: CR4=%08x\n", (uint32_t)env->cr[4]);
1026 #endif
1027 if ((new_cr4 & (CR4_PGE_MASK | CR4_PAE_MASK | CR4_PSE_MASK)) !=
1028 (env->cr[4] & (CR4_PGE_MASK | CR4_PAE_MASK | CR4_PSE_MASK))) {
1029 tlb_flush(env, 1);
1031 /* SSE handling */
1032 if (!(env->cpuid_features & CPUID_SSE))
1033 new_cr4 &= ~CR4_OSFXSR_MASK;
1034 if (new_cr4 & CR4_OSFXSR_MASK)
1035 env->hflags |= HF_OSFXSR_MASK;
1036 else
1037 env->hflags &= ~HF_OSFXSR_MASK;
1039 env->cr[4] = new_cr4;
1042 #if defined(CONFIG_USER_ONLY)
1044 int cpu_x86_handle_mmu_fault(CPUX86State *env, target_ulong addr,
1045 int is_write, int mmu_idx, int is_softmmu)
1047 /* user mode only emulation */
1048 is_write &= 1;
1049 env->cr[2] = addr;
1050 env->error_code = (is_write << PG_ERROR_W_BIT);
1051 env->error_code |= PG_ERROR_U_MASK;
1052 env->exception_index = EXCP0E_PAGE;
1053 return 1;
1056 target_phys_addr_t cpu_get_phys_page_debug(CPUState *env, target_ulong addr)
1058 return addr;
1061 #else
1063 /* XXX: This value should match the one returned by CPUID
1064 * and in exec.c */
1065 # if defined(TARGET_X86_64)
1066 # define PHYS_ADDR_MASK 0xfffffff000LL
1067 # else
1068 # define PHYS_ADDR_MASK 0xffffff000LL
1069 # endif
1071 /* return value:
1072 -1 = cannot handle fault
1073 0 = nothing more to do
1074 1 = generate PF fault
1075 2 = soft MMU activation required for this block
1077 int cpu_x86_handle_mmu_fault(CPUX86State *env, target_ulong addr,
1078 int is_write1, int mmu_idx, int is_softmmu)
1080 uint64_t ptep, pte;
1081 target_ulong pde_addr, pte_addr;
1082 int error_code, is_dirty, prot, page_size, ret, is_write, is_user;
1083 target_phys_addr_t paddr;
1084 uint32_t page_offset;
1085 target_ulong vaddr, virt_addr;
1087 is_user = mmu_idx == MMU_USER_IDX;
1088 #if defined(DEBUG_MMU)
1089 printf("MMU fault: addr=" TARGET_FMT_lx " w=%d u=%d eip=" TARGET_FMT_lx "\n",
1090 addr, is_write1, is_user, env->eip);
1091 #endif
1092 is_write = is_write1 & 1;
1094 if (!(env->cr[0] & CR0_PG_MASK)) {
1095 pte = addr;
1096 virt_addr = addr & TARGET_PAGE_MASK;
1097 prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC;
1098 page_size = 4096;
1099 goto do_mapping;
1102 if (env->cr[4] & CR4_PAE_MASK) {
1103 uint64_t pde, pdpe;
1104 target_ulong pdpe_addr;
1106 #ifdef TARGET_X86_64
1107 if (env->hflags & HF_LMA_MASK) {
1108 uint64_t pml4e_addr, pml4e;
1109 int32_t sext;
1111 /* test virtual address sign extension */
1112 sext = (int64_t)addr >> 47;
1113 if (sext != 0 && sext != -1) {
1114 env->error_code = 0;
1115 env->exception_index = EXCP0D_GPF;
1116 return 1;
1119 pml4e_addr = ((env->cr[3] & ~0xfff) + (((addr >> 39) & 0x1ff) << 3)) &
1120 env->a20_mask;
1121 pml4e = ldq_phys(pml4e_addr);
1122 if (!(pml4e & PG_PRESENT_MASK)) {
1123 error_code = 0;
1124 goto do_fault;
1126 if (!(env->efer & MSR_EFER_NXE) && (pml4e & PG_NX_MASK)) {
1127 error_code = PG_ERROR_RSVD_MASK;
1128 goto do_fault;
1130 if (!(pml4e & PG_ACCESSED_MASK)) {
1131 pml4e |= PG_ACCESSED_MASK;
1132 stl_phys_notdirty(pml4e_addr, pml4e);
1134 ptep = pml4e ^ PG_NX_MASK;
1135 pdpe_addr = ((pml4e & PHYS_ADDR_MASK) + (((addr >> 30) & 0x1ff) << 3)) &
1136 env->a20_mask;
1137 pdpe = ldq_phys(pdpe_addr);
1138 if (!(pdpe & PG_PRESENT_MASK)) {
1139 error_code = 0;
1140 goto do_fault;
1142 if (!(env->efer & MSR_EFER_NXE) && (pdpe & PG_NX_MASK)) {
1143 error_code = PG_ERROR_RSVD_MASK;
1144 goto do_fault;
1146 ptep &= pdpe ^ PG_NX_MASK;
1147 if (!(pdpe & PG_ACCESSED_MASK)) {
1148 pdpe |= PG_ACCESSED_MASK;
1149 stl_phys_notdirty(pdpe_addr, pdpe);
1151 } else
1152 #endif
1154 /* XXX: load them when cr3 is loaded ? */
1155 pdpe_addr = ((env->cr[3] & ~0x1f) + ((addr >> 27) & 0x18)) &
1156 env->a20_mask;
1157 pdpe = ldq_phys(pdpe_addr);
1158 if (!(pdpe & PG_PRESENT_MASK)) {
1159 error_code = 0;
1160 goto do_fault;
1162 ptep = PG_NX_MASK | PG_USER_MASK | PG_RW_MASK;
1165 pde_addr = ((pdpe & PHYS_ADDR_MASK) + (((addr >> 21) & 0x1ff) << 3)) &
1166 env->a20_mask;
1167 pde = ldq_phys(pde_addr);
1168 if (!(pde & PG_PRESENT_MASK)) {
1169 error_code = 0;
1170 goto do_fault;
1172 if (!(env->efer & MSR_EFER_NXE) && (pde & PG_NX_MASK)) {
1173 error_code = PG_ERROR_RSVD_MASK;
1174 goto do_fault;
1176 ptep &= pde ^ PG_NX_MASK;
1177 if (pde & PG_PSE_MASK) {
1178 /* 2 MB page */
1179 page_size = 2048 * 1024;
1180 ptep ^= PG_NX_MASK;
1181 if ((ptep & PG_NX_MASK) && is_write1 == 2)
1182 goto do_fault_protect;
1183 if (is_user) {
1184 if (!(ptep & PG_USER_MASK))
1185 goto do_fault_protect;
1186 if (is_write && !(ptep & PG_RW_MASK))
1187 goto do_fault_protect;
1188 } else {
1189 if ((env->cr[0] & CR0_WP_MASK) &&
1190 is_write && !(ptep & PG_RW_MASK))
1191 goto do_fault_protect;
1193 is_dirty = is_write && !(pde & PG_DIRTY_MASK);
1194 if (!(pde & PG_ACCESSED_MASK) || is_dirty) {
1195 pde |= PG_ACCESSED_MASK;
1196 if (is_dirty)
1197 pde |= PG_DIRTY_MASK;
1198 stl_phys_notdirty(pde_addr, pde);
1200 /* align to page_size */
1201 pte = pde & ((PHYS_ADDR_MASK & ~(page_size - 1)) | 0xfff);
1202 virt_addr = addr & ~(page_size - 1);
1203 } else {
1204 /* 4 KB page */
1205 if (!(pde & PG_ACCESSED_MASK)) {
1206 pde |= PG_ACCESSED_MASK;
1207 stl_phys_notdirty(pde_addr, pde);
1209 pte_addr = ((pde & PHYS_ADDR_MASK) + (((addr >> 12) & 0x1ff) << 3)) &
1210 env->a20_mask;
1211 pte = ldq_phys(pte_addr);
1212 if (!(pte & PG_PRESENT_MASK)) {
1213 error_code = 0;
1214 goto do_fault;
1216 if (!(env->efer & MSR_EFER_NXE) && (pte & PG_NX_MASK)) {
1217 error_code = PG_ERROR_RSVD_MASK;
1218 goto do_fault;
1220 /* combine pde and pte nx, user and rw protections */
1221 ptep &= pte ^ PG_NX_MASK;
1222 ptep ^= PG_NX_MASK;
1223 if ((ptep & PG_NX_MASK) && is_write1 == 2)
1224 goto do_fault_protect;
1225 if (is_user) {
1226 if (!(ptep & PG_USER_MASK))
1227 goto do_fault_protect;
1228 if (is_write && !(ptep & PG_RW_MASK))
1229 goto do_fault_protect;
1230 } else {
1231 if ((env->cr[0] & CR0_WP_MASK) &&
1232 is_write && !(ptep & PG_RW_MASK))
1233 goto do_fault_protect;
1235 is_dirty = is_write && !(pte & PG_DIRTY_MASK);
1236 if (!(pte & PG_ACCESSED_MASK) || is_dirty) {
1237 pte |= PG_ACCESSED_MASK;
1238 if (is_dirty)
1239 pte |= PG_DIRTY_MASK;
1240 stl_phys_notdirty(pte_addr, pte);
1242 page_size = 4096;
1243 virt_addr = addr & ~0xfff;
1244 pte = pte & (PHYS_ADDR_MASK | 0xfff);
1246 } else {
1247 uint32_t pde;
1249 /* page directory entry */
1250 pde_addr = ((env->cr[3] & ~0xfff) + ((addr >> 20) & 0xffc)) &
1251 env->a20_mask;
1252 pde = ldl_phys(pde_addr);
1253 if (!(pde & PG_PRESENT_MASK)) {
1254 error_code = 0;
1255 goto do_fault;
1257 /* if PSE bit is set, then we use a 4MB page */
1258 if ((pde & PG_PSE_MASK) && (env->cr[4] & CR4_PSE_MASK)) {
1259 page_size = 4096 * 1024;
1260 if (is_user) {
1261 if (!(pde & PG_USER_MASK))
1262 goto do_fault_protect;
1263 if (is_write && !(pde & PG_RW_MASK))
1264 goto do_fault_protect;
1265 } else {
1266 if ((env->cr[0] & CR0_WP_MASK) &&
1267 is_write && !(pde & PG_RW_MASK))
1268 goto do_fault_protect;
1270 is_dirty = is_write && !(pde & PG_DIRTY_MASK);
1271 if (!(pde & PG_ACCESSED_MASK) || is_dirty) {
1272 pde |= PG_ACCESSED_MASK;
1273 if (is_dirty)
1274 pde |= PG_DIRTY_MASK;
1275 stl_phys_notdirty(pde_addr, pde);
1278 pte = pde & ~( (page_size - 1) & ~0xfff); /* align to page_size */
1279 ptep = pte;
1280 virt_addr = addr & ~(page_size - 1);
1281 } else {
1282 if (!(pde & PG_ACCESSED_MASK)) {
1283 pde |= PG_ACCESSED_MASK;
1284 stl_phys_notdirty(pde_addr, pde);
1287 /* page directory entry */
1288 pte_addr = ((pde & ~0xfff) + ((addr >> 10) & 0xffc)) &
1289 env->a20_mask;
1290 pte = ldl_phys(pte_addr);
1291 if (!(pte & PG_PRESENT_MASK)) {
1292 error_code = 0;
1293 goto do_fault;
1295 /* combine pde and pte user and rw protections */
1296 ptep = pte & pde;
1297 if (is_user) {
1298 if (!(ptep & PG_USER_MASK))
1299 goto do_fault_protect;
1300 if (is_write && !(ptep & PG_RW_MASK))
1301 goto do_fault_protect;
1302 } else {
1303 if ((env->cr[0] & CR0_WP_MASK) &&
1304 is_write && !(ptep & PG_RW_MASK))
1305 goto do_fault_protect;
1307 is_dirty = is_write && !(pte & PG_DIRTY_MASK);
1308 if (!(pte & PG_ACCESSED_MASK) || is_dirty) {
1309 pte |= PG_ACCESSED_MASK;
1310 if (is_dirty)
1311 pte |= PG_DIRTY_MASK;
1312 stl_phys_notdirty(pte_addr, pte);
1314 page_size = 4096;
1315 virt_addr = addr & ~0xfff;
1318 /* the page can be put in the TLB */
1319 prot = PAGE_READ;
1320 if (!(ptep & PG_NX_MASK))
1321 prot |= PAGE_EXEC;
1322 if (pte & PG_DIRTY_MASK) {
1323 /* only set write access if already dirty... otherwise wait
1324 for dirty access */
1325 if (is_user) {
1326 if (ptep & PG_RW_MASK)
1327 prot |= PAGE_WRITE;
1328 } else {
1329 if (!(env->cr[0] & CR0_WP_MASK) ||
1330 (ptep & PG_RW_MASK))
1331 prot |= PAGE_WRITE;
1334 do_mapping:
1335 pte = pte & env->a20_mask;
1337 /* Even if 4MB pages, we map only one 4KB page in the cache to
1338 avoid filling it too fast */
1339 page_offset = (addr & TARGET_PAGE_MASK) & (page_size - 1);
1340 paddr = (pte & TARGET_PAGE_MASK) + page_offset;
1341 vaddr = virt_addr + page_offset;
1343 ret = tlb_set_page_exec(env, vaddr, paddr, prot, mmu_idx, is_softmmu);
1344 return ret;
1345 do_fault_protect:
1346 error_code = PG_ERROR_P_MASK;
1347 do_fault:
1348 error_code |= (is_write << PG_ERROR_W_BIT);
1349 if (is_user)
1350 error_code |= PG_ERROR_U_MASK;
1351 if (is_write1 == 2 &&
1352 (env->efer & MSR_EFER_NXE) &&
1353 (env->cr[4] & CR4_PAE_MASK))
1354 error_code |= PG_ERROR_I_D_MASK;
1355 if (env->intercept_exceptions & (1 << EXCP0E_PAGE)) {
1356 /* cr2 is not modified in case of exceptions */
1357 stq_phys(env->vm_vmcb + offsetof(struct vmcb, control.exit_info_2),
1358 addr);
1359 } else {
1360 env->cr[2] = addr;
1362 env->error_code = error_code;
1363 env->exception_index = EXCP0E_PAGE;
1364 return 1;
1367 target_phys_addr_t cpu_get_phys_page_debug(CPUState *env, target_ulong addr)
1369 target_ulong pde_addr, pte_addr;
1370 uint64_t pte;
1371 target_phys_addr_t paddr;
1372 uint32_t page_offset;
1373 int page_size;
1375 if (env->cr[4] & CR4_PAE_MASK) {
1376 target_ulong pdpe_addr;
1377 uint64_t pde, pdpe;
1379 #ifdef TARGET_X86_64
1380 if (env->hflags & HF_LMA_MASK) {
1381 uint64_t pml4e_addr, pml4e;
1382 int32_t sext;
1384 /* test virtual address sign extension */
1385 sext = (int64_t)addr >> 47;
1386 if (sext != 0 && sext != -1)
1387 return -1;
1389 pml4e_addr = ((env->cr[3] & ~0xfff) + (((addr >> 39) & 0x1ff) << 3)) &
1390 env->a20_mask;
1391 pml4e = ldq_phys(pml4e_addr);
1392 if (!(pml4e & PG_PRESENT_MASK))
1393 return -1;
1395 pdpe_addr = ((pml4e & ~0xfff) + (((addr >> 30) & 0x1ff) << 3)) &
1396 env->a20_mask;
1397 pdpe = ldq_phys(pdpe_addr);
1398 if (!(pdpe & PG_PRESENT_MASK))
1399 return -1;
1400 } else
1401 #endif
1403 pdpe_addr = ((env->cr[3] & ~0x1f) + ((addr >> 27) & 0x18)) &
1404 env->a20_mask;
1405 pdpe = ldq_phys(pdpe_addr);
1406 if (!(pdpe & PG_PRESENT_MASK))
1407 return -1;
1410 pde_addr = ((pdpe & ~0xfff) + (((addr >> 21) & 0x1ff) << 3)) &
1411 env->a20_mask;
1412 pde = ldq_phys(pde_addr);
1413 if (!(pde & PG_PRESENT_MASK)) {
1414 return -1;
1416 if (pde & PG_PSE_MASK) {
1417 /* 2 MB page */
1418 page_size = 2048 * 1024;
1419 pte = pde & ~( (page_size - 1) & ~0xfff); /* align to page_size */
1420 } else {
1421 /* 4 KB page */
1422 pte_addr = ((pde & ~0xfff) + (((addr >> 12) & 0x1ff) << 3)) &
1423 env->a20_mask;
1424 page_size = 4096;
1425 pte = ldq_phys(pte_addr);
1427 if (!(pte & PG_PRESENT_MASK))
1428 return -1;
1429 } else {
1430 uint32_t pde;
1432 if (!(env->cr[0] & CR0_PG_MASK)) {
1433 pte = addr;
1434 page_size = 4096;
1435 } else {
1436 /* page directory entry */
1437 pde_addr = ((env->cr[3] & ~0xfff) + ((addr >> 20) & 0xffc)) & env->a20_mask;
1438 pde = ldl_phys(pde_addr);
1439 if (!(pde & PG_PRESENT_MASK))
1440 return -1;
1441 if ((pde & PG_PSE_MASK) && (env->cr[4] & CR4_PSE_MASK)) {
1442 pte = pde & ~0x003ff000; /* align to 4MB */
1443 page_size = 4096 * 1024;
1444 } else {
1445 /* page directory entry */
1446 pte_addr = ((pde & ~0xfff) + ((addr >> 10) & 0xffc)) & env->a20_mask;
1447 pte = ldl_phys(pte_addr);
1448 if (!(pte & PG_PRESENT_MASK))
1449 return -1;
1450 page_size = 4096;
1453 pte = pte & env->a20_mask;
1456 page_offset = (addr & TARGET_PAGE_MASK) & (page_size - 1);
1457 paddr = (pte & TARGET_PAGE_MASK) + page_offset;
1458 return paddr;
1461 void hw_breakpoint_insert(CPUState *env, int index)
1463 int type, err = 0;
1465 switch (hw_breakpoint_type(env->dr[7], index)) {
1466 case 0:
1467 if (hw_breakpoint_enabled(env->dr[7], index))
1468 err = cpu_breakpoint_insert(env, env->dr[index], BP_CPU,
1469 &env->cpu_breakpoint[index]);
1470 break;
1471 case 1:
1472 type = BP_CPU | BP_MEM_WRITE;
1473 goto insert_wp;
1474 case 2:
1475 /* No support for I/O watchpoints yet */
1476 break;
1477 case 3:
1478 type = BP_CPU | BP_MEM_ACCESS;
1479 insert_wp:
1480 err = cpu_watchpoint_insert(env, env->dr[index],
1481 hw_breakpoint_len(env->dr[7], index),
1482 type, &env->cpu_watchpoint[index]);
1483 break;
1485 if (err)
1486 env->cpu_breakpoint[index] = NULL;
1489 void hw_breakpoint_remove(CPUState *env, int index)
1491 if (!env->cpu_breakpoint[index])
1492 return;
1493 switch (hw_breakpoint_type(env->dr[7], index)) {
1494 case 0:
1495 if (hw_breakpoint_enabled(env->dr[7], index))
1496 cpu_breakpoint_remove_by_ref(env, env->cpu_breakpoint[index]);
1497 break;
1498 case 1:
1499 case 3:
1500 cpu_watchpoint_remove_by_ref(env, env->cpu_watchpoint[index]);
1501 break;
1502 case 2:
1503 /* No support for I/O watchpoints yet */
1504 break;
1508 int check_hw_breakpoints(CPUState *env, int force_dr6_update)
1510 target_ulong dr6;
1511 int reg, type;
1512 int hit_enabled = 0;
1514 dr6 = env->dr[6] & ~0xf;
1515 for (reg = 0; reg < 4; reg++) {
1516 type = hw_breakpoint_type(env->dr[7], reg);
1517 if ((type == 0 && env->dr[reg] == env->eip) ||
1518 ((type & 1) && env->cpu_watchpoint[reg] &&
1519 (env->cpu_watchpoint[reg]->flags & BP_WATCHPOINT_HIT))) {
1520 dr6 |= 1 << reg;
1521 if (hw_breakpoint_enabled(env->dr[7], reg))
1522 hit_enabled = 1;
1525 if (hit_enabled || force_dr6_update)
1526 env->dr[6] = dr6;
1527 return hit_enabled;
1530 static CPUDebugExcpHandler *prev_debug_excp_handler;
1532 void raise_exception(int exception_index);
1534 static void breakpoint_handler(CPUState *env)
1536 CPUBreakpoint *bp;
1538 if (env->watchpoint_hit) {
1539 if (env->watchpoint_hit->flags & BP_CPU) {
1540 env->watchpoint_hit = NULL;
1541 if (check_hw_breakpoints(env, 0))
1542 raise_exception(EXCP01_DB);
1543 else
1544 cpu_resume_from_signal(env, NULL);
1546 } else {
1547 QTAILQ_FOREACH(bp, &env->breakpoints, entry)
1548 if (bp->pc == env->eip) {
1549 if (bp->flags & BP_CPU) {
1550 check_hw_breakpoints(env, 1);
1551 raise_exception(EXCP01_DB);
1553 break;
1556 if (prev_debug_excp_handler)
1557 prev_debug_excp_handler(env);
1560 /* This should come from sysemu.h - if we could include it here... */
1561 void qemu_system_reset_request(void);
1563 void cpu_inject_x86_mce(CPUState *cenv, int bank, uint64_t status,
1564 uint64_t mcg_status, uint64_t addr, uint64_t misc)
1566 uint64_t mcg_cap = cenv->mcg_cap;
1567 unsigned bank_num = mcg_cap & 0xff;
1568 uint64_t *banks = cenv->mce_banks;
1570 if (bank >= bank_num || !(status & MCI_STATUS_VAL))
1571 return;
1574 * if MSR_MCG_CTL is not all 1s, the uncorrected error
1575 * reporting is disabled
1577 if ((status & MCI_STATUS_UC) && (mcg_cap & MCG_CTL_P) &&
1578 cenv->mcg_ctl != ~(uint64_t)0)
1579 return;
1580 banks += 4 * bank;
1582 * if MSR_MCi_CTL is not all 1s, the uncorrected error
1583 * reporting is disabled for the bank
1585 if ((status & MCI_STATUS_UC) && banks[0] != ~(uint64_t)0)
1586 return;
1587 if (status & MCI_STATUS_UC) {
1588 if ((cenv->mcg_status & MCG_STATUS_MCIP) ||
1589 !(cenv->cr[4] & CR4_MCE_MASK)) {
1590 fprintf(stderr, "injects mce exception while previous "
1591 "one is in progress!\n");
1592 qemu_log_mask(CPU_LOG_RESET, "Triple fault\n");
1593 qemu_system_reset_request();
1594 return;
1596 if (banks[1] & MCI_STATUS_VAL)
1597 status |= MCI_STATUS_OVER;
1598 banks[2] = addr;
1599 banks[3] = misc;
1600 cenv->mcg_status = mcg_status;
1601 banks[1] = status;
1602 cpu_interrupt(cenv, CPU_INTERRUPT_MCE);
1603 } else if (!(banks[1] & MCI_STATUS_VAL)
1604 || !(banks[1] & MCI_STATUS_UC)) {
1605 if (banks[1] & MCI_STATUS_VAL)
1606 status |= MCI_STATUS_OVER;
1607 banks[2] = addr;
1608 banks[3] = misc;
1609 banks[1] = status;
1610 } else
1611 banks[1] |= MCI_STATUS_OVER;
1613 #endif /* !CONFIG_USER_ONLY */
1615 static void mce_init(CPUX86State *cenv)
1617 unsigned int bank, bank_num;
1619 if (((cenv->cpuid_version >> 8)&0xf) >= 6
1620 && (cenv->cpuid_features&(CPUID_MCE|CPUID_MCA)) == (CPUID_MCE|CPUID_MCA)) {
1621 cenv->mcg_cap = MCE_CAP_DEF | MCE_BANKS_DEF;
1622 cenv->mcg_ctl = ~(uint64_t)0;
1623 bank_num = MCE_BANKS_DEF;
1624 for (bank = 0; bank < bank_num; bank++)
1625 cenv->mce_banks[bank*4] = ~(uint64_t)0;
1629 static void host_cpuid(uint32_t function, uint32_t count,
1630 uint32_t *eax, uint32_t *ebx,
1631 uint32_t *ecx, uint32_t *edx)
1633 #if defined(CONFIG_KVM)
1634 uint32_t vec[4];
1636 #ifdef __x86_64__
1637 asm volatile("cpuid"
1638 : "=a"(vec[0]), "=b"(vec[1]),
1639 "=c"(vec[2]), "=d"(vec[3])
1640 : "0"(function), "c"(count) : "cc");
1641 #else
1642 asm volatile("pusha \n\t"
1643 "cpuid \n\t"
1644 "mov %%eax, 0(%2) \n\t"
1645 "mov %%ebx, 4(%2) \n\t"
1646 "mov %%ecx, 8(%2) \n\t"
1647 "mov %%edx, 12(%2) \n\t"
1648 "popa"
1649 : : "a"(function), "c"(count), "S"(vec)
1650 : "memory", "cc");
1651 #endif
1653 if (eax)
1654 *eax = vec[0];
1655 if (ebx)
1656 *ebx = vec[1];
1657 if (ecx)
1658 *ecx = vec[2];
1659 if (edx)
1660 *edx = vec[3];
1661 #endif
1664 static void get_cpuid_vendor(CPUX86State *env, uint32_t *ebx,
1665 uint32_t *ecx, uint32_t *edx)
1667 *ebx = env->cpuid_vendor1;
1668 *edx = env->cpuid_vendor2;
1669 *ecx = env->cpuid_vendor3;
1671 /* sysenter isn't supported on compatibility mode on AMD, syscall
1672 * isn't supported in compatibility mode on Intel.
1673 * Normally we advertise the actual cpu vendor, but you can override
1674 * this if you want to use KVM's sysenter/syscall emulation
1675 * in compatibility mode and when doing cross vendor migration
1677 if (kvm_enabled() && env->cpuid_vendor_override) {
1678 host_cpuid(0, 0, NULL, ebx, ecx, edx);
1682 void cpu_x86_cpuid(CPUX86State *env, uint32_t index, uint32_t count,
1683 uint32_t *eax, uint32_t *ebx,
1684 uint32_t *ecx, uint32_t *edx)
1686 /* test if maximum index reached */
1687 if (index & 0x80000000) {
1688 if (index > env->cpuid_xlevel)
1689 index = env->cpuid_level;
1690 } else {
1691 if (index > env->cpuid_level)
1692 index = env->cpuid_level;
1695 switch(index) {
1696 case 0:
1697 *eax = env->cpuid_level;
1698 get_cpuid_vendor(env, ebx, ecx, edx);
1699 break;
1700 case 1:
1701 *eax = env->cpuid_version;
1702 *ebx = (env->cpuid_apic_id << 24) | 8 << 8; /* CLFLUSH size in quad words, Linux wants it. */
1703 *ecx = env->cpuid_ext_features;
1704 *edx = env->cpuid_features;
1705 if (env->nr_cores * env->nr_threads > 1) {
1706 *ebx |= (env->nr_cores * env->nr_threads) << 16;
1707 *edx |= 1 << 28; /* HTT bit */
1709 break;
1710 case 2:
1711 /* cache info: needed for Pentium Pro compatibility */
1712 *eax = 1;
1713 *ebx = 0;
1714 *ecx = 0;
1715 *edx = 0x2c307d;
1716 break;
1717 case 4:
1718 /* cache info: needed for Core compatibility */
1719 if (env->nr_cores > 1) {
1720 *eax = (env->nr_cores - 1) << 26;
1721 } else {
1722 *eax = 0;
1724 switch (count) {
1725 case 0: /* L1 dcache info */
1726 *eax |= 0x0000121;
1727 *ebx = 0x1c0003f;
1728 *ecx = 0x000003f;
1729 *edx = 0x0000001;
1730 break;
1731 case 1: /* L1 icache info */
1732 *eax |= 0x0000122;
1733 *ebx = 0x1c0003f;
1734 *ecx = 0x000003f;
1735 *edx = 0x0000001;
1736 break;
1737 case 2: /* L2 cache info */
1738 *eax |= 0x0000143;
1739 if (env->nr_threads > 1) {
1740 *eax |= (env->nr_threads - 1) << 14;
1742 *ebx = 0x3c0003f;
1743 *ecx = 0x0000fff;
1744 *edx = 0x0000001;
1745 break;
1746 default: /* end of info */
1747 *eax = 0;
1748 *ebx = 0;
1749 *ecx = 0;
1750 *edx = 0;
1751 break;
1753 break;
1754 case 5:
1755 /* mwait info: needed for Core compatibility */
1756 *eax = 0; /* Smallest monitor-line size in bytes */
1757 *ebx = 0; /* Largest monitor-line size in bytes */
1758 *ecx = CPUID_MWAIT_EMX | CPUID_MWAIT_IBE;
1759 *edx = 0;
1760 break;
1761 case 6:
1762 /* Thermal and Power Leaf */
1763 *eax = 0;
1764 *ebx = 0;
1765 *ecx = 0;
1766 *edx = 0;
1767 break;
1768 case 9:
1769 /* Direct Cache Access Information Leaf */
1770 *eax = 0; /* Bits 0-31 in DCA_CAP MSR */
1771 *ebx = 0;
1772 *ecx = 0;
1773 *edx = 0;
1774 break;
1775 case 0xA:
1776 /* Architectural Performance Monitoring Leaf */
1777 *eax = 0;
1778 *ebx = 0;
1779 *ecx = 0;
1780 *edx = 0;
1781 break;
1782 case 0x80000000:
1783 *eax = env->cpuid_xlevel;
1784 *ebx = env->cpuid_vendor1;
1785 *edx = env->cpuid_vendor2;
1786 *ecx = env->cpuid_vendor3;
1787 break;
1788 case 0x80000001:
1789 *eax = env->cpuid_version;
1790 *ebx = 0;
1791 *ecx = env->cpuid_ext3_features;
1792 *edx = env->cpuid_ext2_features;
1794 /* The Linux kernel checks for the CMPLegacy bit and
1795 * discards multiple thread information if it is set.
1796 * So dont set it here for Intel to make Linux guests happy.
1798 if (env->nr_cores * env->nr_threads > 1) {
1799 uint32_t tebx, tecx, tedx;
1800 get_cpuid_vendor(env, &tebx, &tecx, &tedx);
1801 if (tebx != CPUID_VENDOR_INTEL_1 ||
1802 tedx != CPUID_VENDOR_INTEL_2 ||
1803 tecx != CPUID_VENDOR_INTEL_3) {
1804 *ecx |= 1 << 1; /* CmpLegacy bit */
1808 if (kvm_enabled()) {
1809 /* Nested SVM not yet supported in upstream QEMU */
1810 *ecx &= ~CPUID_EXT3_SVM;
1812 break;
1813 case 0x80000002:
1814 case 0x80000003:
1815 case 0x80000004:
1816 *eax = env->cpuid_model[(index - 0x80000002) * 4 + 0];
1817 *ebx = env->cpuid_model[(index - 0x80000002) * 4 + 1];
1818 *ecx = env->cpuid_model[(index - 0x80000002) * 4 + 2];
1819 *edx = env->cpuid_model[(index - 0x80000002) * 4 + 3];
1820 break;
1821 case 0x80000005:
1822 /* cache info (L1 cache) */
1823 *eax = 0x01ff01ff;
1824 *ebx = 0x01ff01ff;
1825 *ecx = 0x40020140;
1826 *edx = 0x40020140;
1827 break;
1828 case 0x80000006:
1829 /* cache info (L2 cache) */
1830 *eax = 0;
1831 *ebx = 0x42004200;
1832 *ecx = 0x02008140;
1833 *edx = 0;
1834 break;
1835 case 0x80000008:
1836 /* virtual & phys address size in low 2 bytes. */
1837 /* XXX: This value must match the one used in the MMU code. */
1838 if (env->cpuid_ext2_features & CPUID_EXT2_LM) {
1839 /* 64 bit processor */
1840 /* XXX: The physical address space is limited to 42 bits in exec.c. */
1841 *eax = 0x00003028; /* 48 bits virtual, 40 bits physical */
1842 } else {
1843 if (env->cpuid_features & CPUID_PSE36)
1844 *eax = 0x00000024; /* 36 bits physical */
1845 else
1846 *eax = 0x00000020; /* 32 bits physical */
1848 *ebx = 0;
1849 *ecx = 0;
1850 *edx = 0;
1851 if (env->nr_cores * env->nr_threads > 1) {
1852 *ecx |= (env->nr_cores * env->nr_threads) - 1;
1854 break;
1855 case 0x8000000A:
1856 *eax = 0x00000001; /* SVM Revision */
1857 *ebx = 0x00000010; /* nr of ASIDs */
1858 *ecx = 0;
1859 *edx = 0; /* optional features */
1860 break;
1861 default:
1862 /* reserved values: zero */
1863 *eax = 0;
1864 *ebx = 0;
1865 *ecx = 0;
1866 *edx = 0;
1867 break;
1872 int cpu_x86_get_descr_debug(CPUX86State *env, unsigned int selector,
1873 target_ulong *base, unsigned int *limit,
1874 unsigned int *flags)
1876 SegmentCache *dt;
1877 target_ulong ptr;
1878 uint32_t e1, e2;
1879 int index;
1881 if (selector & 0x4)
1882 dt = &env->ldt;
1883 else
1884 dt = &env->gdt;
1885 index = selector & ~7;
1886 ptr = dt->base + index;
1887 if ((index + 7) > dt->limit
1888 || cpu_memory_rw_debug(env, ptr, (uint8_t *)&e1, sizeof(e1), 0) != 0
1889 || cpu_memory_rw_debug(env, ptr+4, (uint8_t *)&e2, sizeof(e2), 0) != 0)
1890 return 0;
1892 *base = ((e1 >> 16) | ((e2 & 0xff) << 16) | (e2 & 0xff000000));
1893 *limit = (e1 & 0xffff) | (e2 & 0x000f0000);
1894 if (e2 & DESC_G_MASK)
1895 *limit = (*limit << 12) | 0xfff;
1896 *flags = e2;
1898 return 1;
1901 CPUX86State *cpu_x86_init(const char *cpu_model)
1903 CPUX86State *env;
1904 static int inited;
1906 env = qemu_mallocz(sizeof(CPUX86State));
1907 cpu_exec_init(env);
1908 env->cpu_model_str = cpu_model;
1910 /* init various static tables */
1911 if (!inited) {
1912 inited = 1;
1913 optimize_flags_init();
1914 #ifndef CONFIG_USER_ONLY
1915 prev_debug_excp_handler =
1916 cpu_set_debug_excp_handler(breakpoint_handler);
1917 #endif
1919 if (cpu_x86_register(env, cpu_model) < 0) {
1920 cpu_x86_close(env);
1921 return NULL;
1923 mce_init(env);
1925 qemu_init_vcpu(env);
1927 return env;
1930 #if !defined(CONFIG_USER_ONLY)
1931 void do_cpu_init(CPUState *env)
1933 int sipi = env->interrupt_request & CPU_INTERRUPT_SIPI;
1934 cpu_reset(env);
1935 env->interrupt_request = sipi;
1936 apic_init_reset(env);
1939 void do_cpu_sipi(CPUState *env)
1941 apic_sipi(env);
1943 #else
1944 void do_cpu_init(CPUState *env)
1947 void do_cpu_sipi(CPUState *env)
1950 #endif