console: don't set PixelFormat alpha fields for 32bpp
[qemu/opensuse.git] / target-i386 / cpu.c
blobd4f2e65cd91ed7a2e8a24691c7d795230b50ebd8
1 /*
2 * i386 CPUID helper functions
4 * Copyright (c) 2003 Fabrice Bellard
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
19 #include <stdlib.h>
20 #include <stdio.h>
21 #include <string.h>
22 #include <inttypes.h>
24 #include "cpu.h"
25 #include "kvm.h"
27 #include "qemu-option.h"
28 #include "qemu-config.h"
30 #include "qapi/qapi-visit-core.h"
31 #include "arch_init.h"
33 #include "hyperv.h"
35 #include "hw/hw.h"
36 #if defined(CONFIG_KVM)
37 #include <linux/kvm_para.h>
38 #endif
40 /* feature flags taken from "Intel Processor Identification and the CPUID
41 * Instruction" and AMD's "CPUID Specification". In cases of disagreement
42 * between feature naming conventions, aliases may be added.
44 static const char *feature_name[] = {
45 "fpu", "vme", "de", "pse",
46 "tsc", "msr", "pae", "mce",
47 "cx8", "apic", NULL, "sep",
48 "mtrr", "pge", "mca", "cmov",
49 "pat", "pse36", "pn" /* Intel psn */, "clflush" /* Intel clfsh */,
50 NULL, "ds" /* Intel dts */, "acpi", "mmx",
51 "fxsr", "sse", "sse2", "ss",
52 "ht" /* Intel htt */, "tm", "ia64", "pbe",
54 static const char *ext_feature_name[] = {
55 "pni|sse3" /* Intel,AMD sse3 */, "pclmulqdq|pclmuldq", "dtes64", "monitor",
56 "ds_cpl", "vmx", "smx", "est",
57 "tm2", "ssse3", "cid", NULL,
58 "fma", "cx16", "xtpr", "pdcm",
59 NULL, "pcid", "dca", "sse4.1|sse4_1",
60 "sse4.2|sse4_2", "x2apic", "movbe", "popcnt",
61 "tsc-deadline", "aes", "xsave", "osxsave",
62 "avx", NULL, NULL, "hypervisor",
64 /* Feature names that are already defined on feature_name[] but are set on
65 * CPUID[8000_0001].EDX on AMD CPUs don't have their names on
66 * ext2_feature_name[]. They are copied automatically to cpuid_ext2_features
67 * if and only if CPU vendor is AMD.
69 static const char *ext2_feature_name[] = {
70 NULL /* fpu */, NULL /* vme */, NULL /* de */, NULL /* pse */,
71 NULL /* tsc */, NULL /* msr */, NULL /* pae */, NULL /* mce */,
72 NULL /* cx8 */ /* AMD CMPXCHG8B */, NULL /* apic */, NULL, "syscall",
73 NULL /* mtrr */, NULL /* pge */, NULL /* mca */, NULL /* cmov */,
74 NULL /* pat */, NULL /* pse36 */, NULL, NULL /* Linux mp */,
75 "nx|xd", NULL, "mmxext", NULL /* mmx */,
76 NULL /* fxsr */, "fxsr_opt|ffxsr", "pdpe1gb" /* AMD Page1GB */, "rdtscp",
77 NULL, "lm|i64", "3dnowext", "3dnow",
79 static const char *ext3_feature_name[] = {
80 "lahf_lm" /* AMD LahfSahf */, "cmp_legacy", "svm", "extapic" /* AMD ExtApicSpace */,
81 "cr8legacy" /* AMD AltMovCr8 */, "abm", "sse4a", "misalignsse",
82 "3dnowprefetch", "osvw", "ibs", "xop",
83 "skinit", "wdt", NULL, NULL,
84 "fma4", NULL, "cvt16", "nodeid_msr",
85 NULL, NULL, NULL, NULL,
86 NULL, NULL, NULL, NULL,
87 NULL, NULL, NULL, NULL,
90 static const char *kvm_feature_name[] = {
91 "kvmclock", "kvm_nopiodelay", "kvm_mmu", "kvmclock", "kvm_asyncpf", NULL, "kvm_pv_eoi", NULL,
92 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
93 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
94 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
97 static const char *svm_feature_name[] = {
98 "npt", "lbrv", "svm_lock", "nrip_save",
99 "tsc_scale", "vmcb_clean", "flushbyasid", "decodeassists",
100 NULL, NULL, "pause_filter", NULL,
101 "pfthreshold", NULL, NULL, NULL,
102 NULL, NULL, NULL, NULL,
103 NULL, NULL, NULL, NULL,
104 NULL, NULL, NULL, NULL,
105 NULL, NULL, NULL, NULL,
108 static const char *cpuid_7_0_ebx_feature_name[] = {
109 NULL, NULL, NULL, NULL, NULL, NULL, NULL, "smep",
110 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
111 NULL, NULL, NULL, NULL, "smap", NULL, NULL, NULL,
112 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
115 /* collects per-function cpuid data
117 typedef struct model_features_t {
118 uint32_t *guest_feat;
119 uint32_t *host_feat;
120 uint32_t check_feat;
121 const char **flag_names;
122 uint32_t cpuid;
123 } model_features_t;
125 int check_cpuid = 0;
126 int enforce_cpuid = 0;
128 #if defined(CONFIG_KVM)
129 static uint32_t kvm_default_features = (1 << KVM_FEATURE_CLOCKSOURCE) |
130 (1 << KVM_FEATURE_NOP_IO_DELAY) |
131 (1 << KVM_FEATURE_MMU_OP) |
132 (1 << KVM_FEATURE_CLOCKSOURCE2) |
133 (1 << KVM_FEATURE_ASYNC_PF) |
134 (1 << KVM_FEATURE_STEAL_TIME) |
135 (1 << KVM_FEATURE_CLOCKSOURCE_STABLE_BIT);
136 static const uint32_t kvm_pv_eoi_features = (0x1 << KVM_FEATURE_PV_EOI);
137 #else
138 static uint32_t kvm_default_features = 0;
139 static const uint32_t kvm_pv_eoi_features = 0;
140 #endif
142 void enable_kvm_pv_eoi(void)
144 kvm_default_features |= kvm_pv_eoi_features;
147 void host_cpuid(uint32_t function, uint32_t count,
148 uint32_t *eax, uint32_t *ebx, uint32_t *ecx, uint32_t *edx)
150 #if defined(CONFIG_KVM)
151 uint32_t vec[4];
153 #ifdef __x86_64__
154 asm volatile("cpuid"
155 : "=a"(vec[0]), "=b"(vec[1]),
156 "=c"(vec[2]), "=d"(vec[3])
157 : "0"(function), "c"(count) : "cc");
158 #else
159 asm volatile("pusha \n\t"
160 "cpuid \n\t"
161 "mov %%eax, 0(%2) \n\t"
162 "mov %%ebx, 4(%2) \n\t"
163 "mov %%ecx, 8(%2) \n\t"
164 "mov %%edx, 12(%2) \n\t"
165 "popa"
166 : : "a"(function), "c"(count), "S"(vec)
167 : "memory", "cc");
168 #endif
170 if (eax)
171 *eax = vec[0];
172 if (ebx)
173 *ebx = vec[1];
174 if (ecx)
175 *ecx = vec[2];
176 if (edx)
177 *edx = vec[3];
178 #endif
181 #define iswhite(c) ((c) && ((c) <= ' ' || '~' < (c)))
183 /* general substring compare of *[s1..e1) and *[s2..e2). sx is start of
184 * a substring. ex if !NULL points to the first char after a substring,
185 * otherwise the string is assumed to sized by a terminating nul.
186 * Return lexical ordering of *s1:*s2.
188 static int sstrcmp(const char *s1, const char *e1, const char *s2,
189 const char *e2)
191 for (;;) {
192 if (!*s1 || !*s2 || *s1 != *s2)
193 return (*s1 - *s2);
194 ++s1, ++s2;
195 if (s1 == e1 && s2 == e2)
196 return (0);
197 else if (s1 == e1)
198 return (*s2);
199 else if (s2 == e2)
200 return (*s1);
204 /* compare *[s..e) to *altstr. *altstr may be a simple string or multiple
205 * '|' delimited (possibly empty) strings in which case search for a match
206 * within the alternatives proceeds left to right. Return 0 for success,
207 * non-zero otherwise.
209 static int altcmp(const char *s, const char *e, const char *altstr)
211 const char *p, *q;
213 for (q = p = altstr; ; ) {
214 while (*p && *p != '|')
215 ++p;
216 if ((q == p && !*s) || (q != p && !sstrcmp(s, e, q, p)))
217 return (0);
218 if (!*p)
219 return (1);
220 else
221 q = ++p;
225 /* search featureset for flag *[s..e), if found set corresponding bit in
226 * *pval and return true, otherwise return false
228 static bool lookup_feature(uint32_t *pval, const char *s, const char *e,
229 const char **featureset)
231 uint32_t mask;
232 const char **ppc;
233 bool found = false;
235 for (mask = 1, ppc = featureset; mask; mask <<= 1, ++ppc) {
236 if (*ppc && !altcmp(s, e, *ppc)) {
237 *pval |= mask;
238 found = true;
241 return found;
244 static void add_flagname_to_bitmaps(const char *flagname, uint32_t *features,
245 uint32_t *ext_features,
246 uint32_t *ext2_features,
247 uint32_t *ext3_features,
248 uint32_t *kvm_features,
249 uint32_t *svm_features,
250 uint32_t *cpuid_7_0_ebx_features)
252 if (!lookup_feature(features, flagname, NULL, feature_name) &&
253 !lookup_feature(ext_features, flagname, NULL, ext_feature_name) &&
254 !lookup_feature(ext2_features, flagname, NULL, ext2_feature_name) &&
255 !lookup_feature(ext3_features, flagname, NULL, ext3_feature_name) &&
256 !lookup_feature(kvm_features, flagname, NULL, kvm_feature_name) &&
257 !lookup_feature(svm_features, flagname, NULL, svm_feature_name) &&
258 !lookup_feature(cpuid_7_0_ebx_features, flagname, NULL,
259 cpuid_7_0_ebx_feature_name))
260 fprintf(stderr, "CPU feature %s not found\n", flagname);
263 typedef struct x86_def_t {
264 struct x86_def_t *next;
265 const char *name;
266 uint32_t level;
267 uint32_t vendor1, vendor2, vendor3;
268 int family;
269 int model;
270 int stepping;
271 int tsc_khz;
272 uint32_t features, ext_features, ext2_features, ext3_features;
273 uint32_t kvm_features, svm_features;
274 uint32_t xlevel;
275 char model_id[48];
276 int vendor_override;
277 /* Store the results of Centaur's CPUID instructions */
278 uint32_t ext4_features;
279 uint32_t xlevel2;
280 /* The feature bits on CPUID[EAX=7,ECX=0].EBX */
281 uint32_t cpuid_7_0_ebx_features;
282 } x86_def_t;
284 #define I486_FEATURES (CPUID_FP87 | CPUID_VME | CPUID_PSE)
285 #define PENTIUM_FEATURES (I486_FEATURES | CPUID_DE | CPUID_TSC | \
286 CPUID_MSR | CPUID_MCE | CPUID_CX8 | CPUID_MMX | CPUID_APIC)
287 #define PENTIUM2_FEATURES (PENTIUM_FEATURES | CPUID_PAE | CPUID_SEP | \
288 CPUID_MTRR | CPUID_PGE | CPUID_MCA | CPUID_CMOV | CPUID_PAT | \
289 CPUID_PSE36 | CPUID_FXSR)
290 #define PENTIUM3_FEATURES (PENTIUM2_FEATURES | CPUID_SSE)
291 #define PPRO_FEATURES (CPUID_FP87 | CPUID_DE | CPUID_PSE | CPUID_TSC | \
292 CPUID_MSR | CPUID_MCE | CPUID_CX8 | CPUID_PGE | CPUID_CMOV | \
293 CPUID_PAT | CPUID_FXSR | CPUID_MMX | CPUID_SSE | CPUID_SSE2 | \
294 CPUID_PAE | CPUID_SEP | CPUID_APIC)
296 #define TCG_FEATURES (CPUID_FP87 | CPUID_PSE | CPUID_TSC | CPUID_MSR | \
297 CPUID_PAE | CPUID_MCE | CPUID_CX8 | CPUID_APIC | CPUID_SEP | \
298 CPUID_MTRR | CPUID_PGE | CPUID_MCA | CPUID_CMOV | CPUID_PAT | \
299 CPUID_PSE36 | CPUID_CLFLUSH | CPUID_ACPI | CPUID_MMX | \
300 CPUID_FXSR | CPUID_SSE | CPUID_SSE2 | CPUID_SS)
301 /* partly implemented:
302 CPUID_MTRR, CPUID_MCA, CPUID_CLFLUSH (needed for Win64)
303 CPUID_PSE36 (needed for Solaris) */
304 /* missing:
305 CPUID_VME, CPUID_DTS, CPUID_SS, CPUID_HT, CPUID_TM, CPUID_PBE */
306 #define TCG_EXT_FEATURES (CPUID_EXT_SSE3 | CPUID_EXT_MONITOR | \
307 CPUID_EXT_CX16 | CPUID_EXT_POPCNT | \
308 CPUID_EXT_HYPERVISOR)
309 /* missing:
310 CPUID_EXT_DTES64, CPUID_EXT_DSCPL, CPUID_EXT_VMX, CPUID_EXT_EST,
311 CPUID_EXT_TM2, CPUID_EXT_XTPR, CPUID_EXT_PDCM, CPUID_EXT_XSAVE */
312 #define TCG_EXT2_FEATURES ((TCG_FEATURES & CPUID_EXT2_AMD_ALIASES) | \
313 CPUID_EXT2_NX | CPUID_EXT2_MMXEXT | CPUID_EXT2_RDTSCP | \
314 CPUID_EXT2_3DNOW | CPUID_EXT2_3DNOWEXT)
315 /* missing:
316 CPUID_EXT2_PDPE1GB */
317 #define TCG_EXT3_FEATURES (CPUID_EXT3_LAHF_LM | CPUID_EXT3_SVM | \
318 CPUID_EXT3_CR8LEG | CPUID_EXT3_ABM | CPUID_EXT3_SSE4A)
319 #define TCG_SVM_FEATURES 0
320 #define TCG_7_0_EBX_FEATURES (CPUID_7_0_EBX_SMEP | CPUID_7_0_EBX_SMAP)
322 /* maintains list of cpu model definitions
324 static x86_def_t *x86_defs = {NULL};
326 /* built-in cpu model definitions (deprecated)
328 static x86_def_t builtin_x86_defs[] = {
330 .name = "qemu64",
331 .level = 4,
332 .vendor1 = CPUID_VENDOR_AMD_1,
333 .vendor2 = CPUID_VENDOR_AMD_2,
334 .vendor3 = CPUID_VENDOR_AMD_3,
335 .family = 6,
336 .model = 2,
337 .stepping = 3,
338 .features = PPRO_FEATURES |
339 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA |
340 CPUID_PSE36,
341 .ext_features = CPUID_EXT_SSE3 | CPUID_EXT_CX16 | CPUID_EXT_POPCNT,
342 .ext2_features = (PPRO_FEATURES & CPUID_EXT2_AMD_ALIASES) |
343 CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX,
344 .ext3_features = CPUID_EXT3_LAHF_LM | CPUID_EXT3_SVM |
345 CPUID_EXT3_ABM | CPUID_EXT3_SSE4A,
346 .xlevel = 0x8000000A,
349 .name = "phenom",
350 .level = 5,
351 .vendor1 = CPUID_VENDOR_AMD_1,
352 .vendor2 = CPUID_VENDOR_AMD_2,
353 .vendor3 = CPUID_VENDOR_AMD_3,
354 .family = 16,
355 .model = 2,
356 .stepping = 3,
357 .features = PPRO_FEATURES |
358 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA |
359 CPUID_PSE36 | CPUID_VME | CPUID_HT,
360 .ext_features = CPUID_EXT_SSE3 | CPUID_EXT_MONITOR | CPUID_EXT_CX16 |
361 CPUID_EXT_POPCNT,
362 .ext2_features = (PPRO_FEATURES & CPUID_EXT2_AMD_ALIASES) |
363 CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX |
364 CPUID_EXT2_3DNOW | CPUID_EXT2_3DNOWEXT | CPUID_EXT2_MMXEXT |
365 CPUID_EXT2_FFXSR | CPUID_EXT2_PDPE1GB | CPUID_EXT2_RDTSCP,
366 /* Missing: CPUID_EXT3_CMP_LEG, CPUID_EXT3_EXTAPIC,
367 CPUID_EXT3_CR8LEG,
368 CPUID_EXT3_MISALIGNSSE, CPUID_EXT3_3DNOWPREFETCH,
369 CPUID_EXT3_OSVW, CPUID_EXT3_IBS */
370 .ext3_features = CPUID_EXT3_LAHF_LM | CPUID_EXT3_SVM |
371 CPUID_EXT3_ABM | CPUID_EXT3_SSE4A,
372 .svm_features = CPUID_SVM_NPT | CPUID_SVM_LBRV,
373 .xlevel = 0x8000001A,
374 .model_id = "AMD Phenom(tm) 9550 Quad-Core Processor"
377 .name = "core2duo",
378 .level = 10,
379 .family = 6,
380 .model = 15,
381 .stepping = 11,
382 .features = PPRO_FEATURES |
383 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA |
384 CPUID_PSE36 | CPUID_VME | CPUID_DTS | CPUID_ACPI | CPUID_SS |
385 CPUID_HT | CPUID_TM | CPUID_PBE,
386 .ext_features = CPUID_EXT_SSE3 | CPUID_EXT_MONITOR | CPUID_EXT_SSSE3 |
387 CPUID_EXT_DTES64 | CPUID_EXT_DSCPL | CPUID_EXT_VMX | CPUID_EXT_EST |
388 CPUID_EXT_TM2 | CPUID_EXT_CX16 | CPUID_EXT_XTPR | CPUID_EXT_PDCM,
389 .ext2_features = CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX,
390 .ext3_features = CPUID_EXT3_LAHF_LM,
391 .xlevel = 0x80000008,
392 .model_id = "Intel(R) Core(TM)2 Duo CPU T7700 @ 2.40GHz",
395 .name = "kvm64",
396 .level = 5,
397 .vendor1 = CPUID_VENDOR_INTEL_1,
398 .vendor2 = CPUID_VENDOR_INTEL_2,
399 .vendor3 = CPUID_VENDOR_INTEL_3,
400 .family = 15,
401 .model = 6,
402 .stepping = 1,
403 /* Missing: CPUID_VME, CPUID_HT */
404 .features = PPRO_FEATURES |
405 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA |
406 CPUID_PSE36,
407 /* Missing: CPUID_EXT_POPCNT, CPUID_EXT_MONITOR */
408 .ext_features = CPUID_EXT_SSE3 | CPUID_EXT_CX16,
409 /* Missing: CPUID_EXT2_PDPE1GB, CPUID_EXT2_RDTSCP */
410 .ext2_features = (PPRO_FEATURES & CPUID_EXT2_AMD_ALIASES) |
411 CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX,
412 /* Missing: CPUID_EXT3_LAHF_LM, CPUID_EXT3_CMP_LEG, CPUID_EXT3_EXTAPIC,
413 CPUID_EXT3_CR8LEG, CPUID_EXT3_ABM, CPUID_EXT3_SSE4A,
414 CPUID_EXT3_MISALIGNSSE, CPUID_EXT3_3DNOWPREFETCH,
415 CPUID_EXT3_OSVW, CPUID_EXT3_IBS, CPUID_EXT3_SVM */
416 .ext3_features = 0,
417 .xlevel = 0x80000008,
418 .model_id = "Common KVM processor"
421 .name = "qemu32",
422 .level = 4,
423 .family = 6,
424 .model = 3,
425 .stepping = 3,
426 .features = PPRO_FEATURES,
427 .ext_features = CPUID_EXT_SSE3 | CPUID_EXT_POPCNT,
428 .xlevel = 0x80000004,
431 .name = "kvm32",
432 .level = 5,
433 .family = 15,
434 .model = 6,
435 .stepping = 1,
436 .features = PPRO_FEATURES |
437 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA | CPUID_PSE36,
438 .ext_features = CPUID_EXT_SSE3,
439 .ext2_features = PPRO_FEATURES & CPUID_EXT2_AMD_ALIASES,
440 .ext3_features = 0,
441 .xlevel = 0x80000008,
442 .model_id = "Common 32-bit KVM processor"
445 .name = "coreduo",
446 .level = 10,
447 .family = 6,
448 .model = 14,
449 .stepping = 8,
450 .features = PPRO_FEATURES | CPUID_VME |
451 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA | CPUID_DTS | CPUID_ACPI |
452 CPUID_SS | CPUID_HT | CPUID_TM | CPUID_PBE,
453 .ext_features = CPUID_EXT_SSE3 | CPUID_EXT_MONITOR | CPUID_EXT_VMX |
454 CPUID_EXT_EST | CPUID_EXT_TM2 | CPUID_EXT_XTPR | CPUID_EXT_PDCM,
455 .ext2_features = CPUID_EXT2_NX,
456 .xlevel = 0x80000008,
457 .model_id = "Genuine Intel(R) CPU T2600 @ 2.16GHz",
460 .name = "486",
461 .level = 1,
462 .family = 4,
463 .model = 0,
464 .stepping = 0,
465 .features = I486_FEATURES,
466 .xlevel = 0,
469 .name = "pentium",
470 .level = 1,
471 .family = 5,
472 .model = 4,
473 .stepping = 3,
474 .features = PENTIUM_FEATURES,
475 .xlevel = 0,
478 .name = "pentium2",
479 .level = 2,
480 .family = 6,
481 .model = 5,
482 .stepping = 2,
483 .features = PENTIUM2_FEATURES,
484 .xlevel = 0,
487 .name = "pentium3",
488 .level = 2,
489 .family = 6,
490 .model = 7,
491 .stepping = 3,
492 .features = PENTIUM3_FEATURES,
493 .xlevel = 0,
496 .name = "athlon",
497 .level = 2,
498 .vendor1 = CPUID_VENDOR_AMD_1,
499 .vendor2 = CPUID_VENDOR_AMD_2,
500 .vendor3 = CPUID_VENDOR_AMD_3,
501 .family = 6,
502 .model = 2,
503 .stepping = 3,
504 .features = PPRO_FEATURES | CPUID_PSE36 | CPUID_VME | CPUID_MTRR |
505 CPUID_MCA,
506 .ext2_features = (PPRO_FEATURES & CPUID_EXT2_AMD_ALIASES) |
507 CPUID_EXT2_MMXEXT | CPUID_EXT2_3DNOW | CPUID_EXT2_3DNOWEXT,
508 .xlevel = 0x80000008,
511 .name = "n270",
512 /* original is on level 10 */
513 .level = 5,
514 .family = 6,
515 .model = 28,
516 .stepping = 2,
517 .features = PPRO_FEATURES |
518 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA | CPUID_VME | CPUID_DTS |
519 CPUID_ACPI | CPUID_SS | CPUID_HT | CPUID_TM | CPUID_PBE,
520 /* Some CPUs got no CPUID_SEP */
521 .ext_features = CPUID_EXT_SSE3 | CPUID_EXT_MONITOR | CPUID_EXT_SSSE3 |
522 CPUID_EXT_DSCPL | CPUID_EXT_EST | CPUID_EXT_TM2 | CPUID_EXT_XTPR,
523 .ext2_features = (PPRO_FEATURES & CPUID_EXT2_AMD_ALIASES) |
524 CPUID_EXT2_NX,
525 .ext3_features = CPUID_EXT3_LAHF_LM,
526 .xlevel = 0x8000000A,
527 .model_id = "Intel(R) Atom(TM) CPU N270 @ 1.60GHz",
530 .name = "Conroe",
531 .level = 2,
532 .vendor1 = CPUID_VENDOR_INTEL_1,
533 .vendor2 = CPUID_VENDOR_INTEL_2,
534 .vendor3 = CPUID_VENDOR_INTEL_3,
535 .family = 6,
536 .model = 2,
537 .stepping = 3,
538 .features = CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
539 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
540 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
541 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
542 CPUID_DE | CPUID_FP87,
543 .ext_features = CPUID_EXT_SSSE3 | CPUID_EXT_SSE3,
544 .ext2_features = CPUID_EXT2_LM | CPUID_EXT2_NX | CPUID_EXT2_SYSCALL,
545 .ext3_features = CPUID_EXT3_LAHF_LM,
546 .xlevel = 0x8000000A,
547 .model_id = "Intel Celeron_4x0 (Conroe/Merom Class Core 2)",
550 .name = "Penryn",
551 .level = 2,
552 .vendor1 = CPUID_VENDOR_INTEL_1,
553 .vendor2 = CPUID_VENDOR_INTEL_2,
554 .vendor3 = CPUID_VENDOR_INTEL_3,
555 .family = 6,
556 .model = 2,
557 .stepping = 3,
558 .features = CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
559 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
560 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
561 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
562 CPUID_DE | CPUID_FP87,
563 .ext_features = CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
564 CPUID_EXT_SSE3,
565 .ext2_features = CPUID_EXT2_LM | CPUID_EXT2_NX | CPUID_EXT2_SYSCALL,
566 .ext3_features = CPUID_EXT3_LAHF_LM,
567 .xlevel = 0x8000000A,
568 .model_id = "Intel Core 2 Duo P9xxx (Penryn Class Core 2)",
571 .name = "Nehalem",
572 .level = 2,
573 .vendor1 = CPUID_VENDOR_INTEL_1,
574 .vendor2 = CPUID_VENDOR_INTEL_2,
575 .vendor3 = CPUID_VENDOR_INTEL_3,
576 .family = 6,
577 .model = 2,
578 .stepping = 3,
579 .features = CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
580 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
581 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
582 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
583 CPUID_DE | CPUID_FP87,
584 .ext_features = CPUID_EXT_POPCNT | CPUID_EXT_SSE42 | CPUID_EXT_SSE41 |
585 CPUID_EXT_CX16 | CPUID_EXT_SSSE3 | CPUID_EXT_SSE3,
586 .ext2_features = CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX,
587 .ext3_features = CPUID_EXT3_LAHF_LM,
588 .xlevel = 0x8000000A,
589 .model_id = "Intel Core i7 9xx (Nehalem Class Core i7)",
592 .name = "Westmere",
593 .level = 11,
594 .vendor1 = CPUID_VENDOR_INTEL_1,
595 .vendor2 = CPUID_VENDOR_INTEL_2,
596 .vendor3 = CPUID_VENDOR_INTEL_3,
597 .family = 6,
598 .model = 44,
599 .stepping = 1,
600 .features = CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
601 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
602 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
603 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
604 CPUID_DE | CPUID_FP87,
605 .ext_features = CPUID_EXT_AES | CPUID_EXT_POPCNT | CPUID_EXT_SSE42 |
606 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
607 CPUID_EXT_SSE3,
608 .ext2_features = CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX,
609 .ext3_features = CPUID_EXT3_LAHF_LM,
610 .xlevel = 0x8000000A,
611 .model_id = "Westmere E56xx/L56xx/X56xx (Nehalem-C)",
614 .name = "SandyBridge",
615 .level = 0xd,
616 .vendor1 = CPUID_VENDOR_INTEL_1,
617 .vendor2 = CPUID_VENDOR_INTEL_2,
618 .vendor3 = CPUID_VENDOR_INTEL_3,
619 .family = 6,
620 .model = 42,
621 .stepping = 1,
622 .features = CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
623 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
624 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
625 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
626 CPUID_DE | CPUID_FP87,
627 .ext_features = CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
628 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_POPCNT |
629 CPUID_EXT_X2APIC | CPUID_EXT_SSE42 | CPUID_EXT_SSE41 |
630 CPUID_EXT_CX16 | CPUID_EXT_SSSE3 | CPUID_EXT_PCLMULQDQ |
631 CPUID_EXT_SSE3,
632 .ext2_features = CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX |
633 CPUID_EXT2_SYSCALL,
634 .ext3_features = CPUID_EXT3_LAHF_LM,
635 .xlevel = 0x8000000A,
636 .model_id = "Intel Xeon E312xx (Sandy Bridge)",
639 .name = "Opteron_G1",
640 .level = 5,
641 .vendor1 = CPUID_VENDOR_AMD_1,
642 .vendor2 = CPUID_VENDOR_AMD_2,
643 .vendor3 = CPUID_VENDOR_AMD_3,
644 .family = 15,
645 .model = 6,
646 .stepping = 1,
647 .features = CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
648 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
649 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
650 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
651 CPUID_DE | CPUID_FP87,
652 .ext_features = CPUID_EXT_SSE3,
653 .ext2_features = CPUID_EXT2_LM | CPUID_EXT2_FXSR | CPUID_EXT2_MMX |
654 CPUID_EXT2_NX | CPUID_EXT2_PSE36 | CPUID_EXT2_PAT |
655 CPUID_EXT2_CMOV | CPUID_EXT2_MCA | CPUID_EXT2_PGE |
656 CPUID_EXT2_MTRR | CPUID_EXT2_SYSCALL | CPUID_EXT2_APIC |
657 CPUID_EXT2_CX8 | CPUID_EXT2_MCE | CPUID_EXT2_PAE | CPUID_EXT2_MSR |
658 CPUID_EXT2_TSC | CPUID_EXT2_PSE | CPUID_EXT2_DE | CPUID_EXT2_FPU,
659 .xlevel = 0x80000008,
660 .model_id = "AMD Opteron 240 (Gen 1 Class Opteron)",
663 .name = "Opteron_G2",
664 .level = 5,
665 .vendor1 = CPUID_VENDOR_AMD_1,
666 .vendor2 = CPUID_VENDOR_AMD_2,
667 .vendor3 = CPUID_VENDOR_AMD_3,
668 .family = 15,
669 .model = 6,
670 .stepping = 1,
671 .features = CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
672 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
673 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
674 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
675 CPUID_DE | CPUID_FP87,
676 .ext_features = CPUID_EXT_CX16 | CPUID_EXT_SSE3,
677 .ext2_features = CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_FXSR |
678 CPUID_EXT2_MMX | CPUID_EXT2_NX | CPUID_EXT2_PSE36 |
679 CPUID_EXT2_PAT | CPUID_EXT2_CMOV | CPUID_EXT2_MCA |
680 CPUID_EXT2_PGE | CPUID_EXT2_MTRR | CPUID_EXT2_SYSCALL |
681 CPUID_EXT2_APIC | CPUID_EXT2_CX8 | CPUID_EXT2_MCE |
682 CPUID_EXT2_PAE | CPUID_EXT2_MSR | CPUID_EXT2_TSC | CPUID_EXT2_PSE |
683 CPUID_EXT2_DE | CPUID_EXT2_FPU,
684 .ext3_features = CPUID_EXT3_SVM | CPUID_EXT3_LAHF_LM,
685 .xlevel = 0x80000008,
686 .model_id = "AMD Opteron 22xx (Gen 2 Class Opteron)",
689 .name = "Opteron_G3",
690 .level = 5,
691 .vendor1 = CPUID_VENDOR_AMD_1,
692 .vendor2 = CPUID_VENDOR_AMD_2,
693 .vendor3 = CPUID_VENDOR_AMD_3,
694 .family = 15,
695 .model = 6,
696 .stepping = 1,
697 .features = CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
698 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
699 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
700 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
701 CPUID_DE | CPUID_FP87,
702 .ext_features = CPUID_EXT_POPCNT | CPUID_EXT_CX16 | CPUID_EXT_MONITOR |
703 CPUID_EXT_SSE3,
704 .ext2_features = CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_FXSR |
705 CPUID_EXT2_MMX | CPUID_EXT2_NX | CPUID_EXT2_PSE36 |
706 CPUID_EXT2_PAT | CPUID_EXT2_CMOV | CPUID_EXT2_MCA |
707 CPUID_EXT2_PGE | CPUID_EXT2_MTRR | CPUID_EXT2_SYSCALL |
708 CPUID_EXT2_APIC | CPUID_EXT2_CX8 | CPUID_EXT2_MCE |
709 CPUID_EXT2_PAE | CPUID_EXT2_MSR | CPUID_EXT2_TSC | CPUID_EXT2_PSE |
710 CPUID_EXT2_DE | CPUID_EXT2_FPU,
711 .ext3_features = CPUID_EXT3_MISALIGNSSE | CPUID_EXT3_SSE4A |
712 CPUID_EXT3_ABM | CPUID_EXT3_SVM | CPUID_EXT3_LAHF_LM,
713 .xlevel = 0x80000008,
714 .model_id = "AMD Opteron 23xx (Gen 3 Class Opteron)",
717 .name = "Opteron_G4",
718 .level = 0xd,
719 .vendor1 = CPUID_VENDOR_AMD_1,
720 .vendor2 = CPUID_VENDOR_AMD_2,
721 .vendor3 = CPUID_VENDOR_AMD_3,
722 .family = 21,
723 .model = 1,
724 .stepping = 2,
725 .features = CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
726 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
727 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
728 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
729 CPUID_DE | CPUID_FP87,
730 .ext_features = CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
731 CPUID_EXT_POPCNT | CPUID_EXT_SSE42 | CPUID_EXT_SSE41 |
732 CPUID_EXT_CX16 | CPUID_EXT_SSSE3 | CPUID_EXT_PCLMULQDQ |
733 CPUID_EXT_SSE3,
734 .ext2_features = CPUID_EXT2_LM | CPUID_EXT2_RDTSCP |
735 CPUID_EXT2_PDPE1GB | CPUID_EXT2_FXSR | CPUID_EXT2_MMX |
736 CPUID_EXT2_NX | CPUID_EXT2_PSE36 | CPUID_EXT2_PAT |
737 CPUID_EXT2_CMOV | CPUID_EXT2_MCA | CPUID_EXT2_PGE |
738 CPUID_EXT2_MTRR | CPUID_EXT2_SYSCALL | CPUID_EXT2_APIC |
739 CPUID_EXT2_CX8 | CPUID_EXT2_MCE | CPUID_EXT2_PAE | CPUID_EXT2_MSR |
740 CPUID_EXT2_TSC | CPUID_EXT2_PSE | CPUID_EXT2_DE | CPUID_EXT2_FPU,
741 .ext3_features = CPUID_EXT3_FMA4 | CPUID_EXT3_XOP |
742 CPUID_EXT3_3DNOWPREFETCH | CPUID_EXT3_MISALIGNSSE |
743 CPUID_EXT3_SSE4A | CPUID_EXT3_ABM | CPUID_EXT3_SVM |
744 CPUID_EXT3_LAHF_LM,
745 .xlevel = 0x8000001A,
746 .model_id = "AMD Opteron 62xx class CPU",
750 static int cpu_x86_fill_model_id(char *str)
752 uint32_t eax = 0, ebx = 0, ecx = 0, edx = 0;
753 int i;
755 for (i = 0; i < 3; i++) {
756 host_cpuid(0x80000002 + i, 0, &eax, &ebx, &ecx, &edx);
757 memcpy(str + i * 16 + 0, &eax, 4);
758 memcpy(str + i * 16 + 4, &ebx, 4);
759 memcpy(str + i * 16 + 8, &ecx, 4);
760 memcpy(str + i * 16 + 12, &edx, 4);
762 return 0;
765 static int cpu_x86_fill_host(x86_def_t *x86_cpu_def)
767 uint32_t eax = 0, ebx = 0, ecx = 0, edx = 0;
769 x86_cpu_def->name = "host";
770 host_cpuid(0x0, 0, &eax, &ebx, &ecx, &edx);
771 x86_cpu_def->level = eax;
772 x86_cpu_def->vendor1 = ebx;
773 x86_cpu_def->vendor2 = edx;
774 x86_cpu_def->vendor3 = ecx;
776 host_cpuid(0x1, 0, &eax, &ebx, &ecx, &edx);
777 x86_cpu_def->family = ((eax >> 8) & 0x0F) + ((eax >> 20) & 0xFF);
778 x86_cpu_def->model = ((eax >> 4) & 0x0F) | ((eax & 0xF0000) >> 12);
779 x86_cpu_def->stepping = eax & 0x0F;
780 x86_cpu_def->ext_features = ecx;
781 x86_cpu_def->features = edx;
783 if (kvm_enabled() && x86_cpu_def->level >= 7) {
784 x86_cpu_def->cpuid_7_0_ebx_features = kvm_arch_get_supported_cpuid(kvm_state, 0x7, 0, R_EBX);
785 } else {
786 x86_cpu_def->cpuid_7_0_ebx_features = 0;
789 host_cpuid(0x80000000, 0, &eax, &ebx, &ecx, &edx);
790 x86_cpu_def->xlevel = eax;
792 host_cpuid(0x80000001, 0, &eax, &ebx, &ecx, &edx);
793 x86_cpu_def->ext2_features = edx;
794 x86_cpu_def->ext3_features = ecx;
795 cpu_x86_fill_model_id(x86_cpu_def->model_id);
796 x86_cpu_def->vendor_override = 0;
798 /* Call Centaur's CPUID instruction. */
799 if (x86_cpu_def->vendor1 == CPUID_VENDOR_VIA_1 &&
800 x86_cpu_def->vendor2 == CPUID_VENDOR_VIA_2 &&
801 x86_cpu_def->vendor3 == CPUID_VENDOR_VIA_3) {
802 host_cpuid(0xC0000000, 0, &eax, &ebx, &ecx, &edx);
803 if (eax >= 0xC0000001) {
804 /* Support VIA max extended level */
805 x86_cpu_def->xlevel2 = eax;
806 host_cpuid(0xC0000001, 0, &eax, &ebx, &ecx, &edx);
807 x86_cpu_def->ext4_features = edx;
812 * Every SVM feature requires emulation support in KVM - so we can't just
813 * read the host features here. KVM might even support SVM features not
814 * available on the host hardware. Just set all bits and mask out the
815 * unsupported ones later.
817 x86_cpu_def->svm_features = -1;
819 return 0;
822 static int unavailable_host_feature(struct model_features_t *f, uint32_t mask)
824 int i;
826 for (i = 0; i < 32; ++i)
827 if (1 << i & mask) {
828 fprintf(stderr, "warning: host cpuid %04x_%04x lacks requested"
829 " flag '%s' [0x%08x]\n",
830 f->cpuid >> 16, f->cpuid & 0xffff,
831 f->flag_names[i] ? f->flag_names[i] : "[reserved]", mask);
832 break;
834 return 0;
837 /* best effort attempt to inform user requested cpu flags aren't making
838 * their way to the guest. Note: ft[].check_feat ideally should be
839 * specified via a guest_def field to suppress report of extraneous flags.
841 static int check_features_against_host(x86_def_t *guest_def)
843 x86_def_t host_def;
844 uint32_t mask;
845 int rv, i;
846 struct model_features_t ft[] = {
847 {&guest_def->features, &host_def.features,
848 ~0, feature_name, 0x00000000},
849 {&guest_def->ext_features, &host_def.ext_features,
850 ~CPUID_EXT_HYPERVISOR, ext_feature_name, 0x00000001},
851 {&guest_def->ext2_features, &host_def.ext2_features,
852 ~PPRO_FEATURES, ext2_feature_name, 0x80000000},
853 {&guest_def->ext3_features, &host_def.ext3_features,
854 ~CPUID_EXT3_SVM, ext3_feature_name, 0x80000001}};
856 cpu_x86_fill_host(&host_def);
857 for (rv = 0, i = 0; i < ARRAY_SIZE(ft); ++i)
858 for (mask = 1; mask; mask <<= 1)
859 if (ft[i].check_feat & mask && *ft[i].guest_feat & mask &&
860 !(*ft[i].host_feat & mask)) {
861 unavailable_host_feature(&ft[i], mask);
862 rv = 1;
864 return rv;
867 static void x86_cpuid_version_get_family(Object *obj, Visitor *v, void *opaque,
868 const char *name, Error **errp)
870 X86CPU *cpu = X86_CPU(obj);
871 CPUX86State *env = &cpu->env;
872 int64_t value;
874 value = (env->cpuid_version >> 8) & 0xf;
875 if (value == 0xf) {
876 value += (env->cpuid_version >> 20) & 0xff;
878 visit_type_int(v, &value, name, errp);
881 static void x86_cpuid_version_set_family(Object *obj, Visitor *v, void *opaque,
882 const char *name, Error **errp)
884 X86CPU *cpu = X86_CPU(obj);
885 CPUX86State *env = &cpu->env;
886 const int64_t min = 0;
887 const int64_t max = 0xff + 0xf;
888 int64_t value;
890 visit_type_int(v, &value, name, errp);
891 if (error_is_set(errp)) {
892 return;
894 if (value < min || value > max) {
895 error_set(errp, QERR_PROPERTY_VALUE_OUT_OF_RANGE, "",
896 name ? name : "null", value, min, max);
897 return;
900 env->cpuid_version &= ~0xff00f00;
901 if (value > 0x0f) {
902 env->cpuid_version |= 0xf00 | ((value - 0x0f) << 20);
903 } else {
904 env->cpuid_version |= value << 8;
908 static void x86_cpuid_version_get_model(Object *obj, Visitor *v, void *opaque,
909 const char *name, Error **errp)
911 X86CPU *cpu = X86_CPU(obj);
912 CPUX86State *env = &cpu->env;
913 int64_t value;
915 value = (env->cpuid_version >> 4) & 0xf;
916 value |= ((env->cpuid_version >> 16) & 0xf) << 4;
917 visit_type_int(v, &value, name, errp);
920 static void x86_cpuid_version_set_model(Object *obj, Visitor *v, void *opaque,
921 const char *name, Error **errp)
923 X86CPU *cpu = X86_CPU(obj);
924 CPUX86State *env = &cpu->env;
925 const int64_t min = 0;
926 const int64_t max = 0xff;
927 int64_t value;
929 visit_type_int(v, &value, name, errp);
930 if (error_is_set(errp)) {
931 return;
933 if (value < min || value > max) {
934 error_set(errp, QERR_PROPERTY_VALUE_OUT_OF_RANGE, "",
935 name ? name : "null", value, min, max);
936 return;
939 env->cpuid_version &= ~0xf00f0;
940 env->cpuid_version |= ((value & 0xf) << 4) | ((value >> 4) << 16);
943 static void x86_cpuid_version_get_stepping(Object *obj, Visitor *v,
944 void *opaque, const char *name,
945 Error **errp)
947 X86CPU *cpu = X86_CPU(obj);
948 CPUX86State *env = &cpu->env;
949 int64_t value;
951 value = env->cpuid_version & 0xf;
952 visit_type_int(v, &value, name, errp);
955 static void x86_cpuid_version_set_stepping(Object *obj, Visitor *v,
956 void *opaque, const char *name,
957 Error **errp)
959 X86CPU *cpu = X86_CPU(obj);
960 CPUX86State *env = &cpu->env;
961 const int64_t min = 0;
962 const int64_t max = 0xf;
963 int64_t value;
965 visit_type_int(v, &value, name, errp);
966 if (error_is_set(errp)) {
967 return;
969 if (value < min || value > max) {
970 error_set(errp, QERR_PROPERTY_VALUE_OUT_OF_RANGE, "",
971 name ? name : "null", value, min, max);
972 return;
975 env->cpuid_version &= ~0xf;
976 env->cpuid_version |= value & 0xf;
979 static void x86_cpuid_get_level(Object *obj, Visitor *v, void *opaque,
980 const char *name, Error **errp)
982 X86CPU *cpu = X86_CPU(obj);
984 visit_type_uint32(v, &cpu->env.cpuid_level, name, errp);
987 static void x86_cpuid_set_level(Object *obj, Visitor *v, void *opaque,
988 const char *name, Error **errp)
990 X86CPU *cpu = X86_CPU(obj);
992 visit_type_uint32(v, &cpu->env.cpuid_level, name, errp);
995 static void x86_cpuid_get_xlevel(Object *obj, Visitor *v, void *opaque,
996 const char *name, Error **errp)
998 X86CPU *cpu = X86_CPU(obj);
1000 visit_type_uint32(v, &cpu->env.cpuid_xlevel, name, errp);
1003 static void x86_cpuid_set_xlevel(Object *obj, Visitor *v, void *opaque,
1004 const char *name, Error **errp)
1006 X86CPU *cpu = X86_CPU(obj);
1008 visit_type_uint32(v, &cpu->env.cpuid_xlevel, name, errp);
1011 static char *x86_cpuid_get_vendor(Object *obj, Error **errp)
1013 X86CPU *cpu = X86_CPU(obj);
1014 CPUX86State *env = &cpu->env;
1015 char *value;
1016 int i;
1018 value = (char *)g_malloc(12 + 1);
1019 for (i = 0; i < 4; i++) {
1020 value[i ] = env->cpuid_vendor1 >> (8 * i);
1021 value[i + 4] = env->cpuid_vendor2 >> (8 * i);
1022 value[i + 8] = env->cpuid_vendor3 >> (8 * i);
1024 value[12] = '\0';
1025 return value;
1028 static void x86_cpuid_set_vendor(Object *obj, const char *value,
1029 Error **errp)
1031 X86CPU *cpu = X86_CPU(obj);
1032 CPUX86State *env = &cpu->env;
1033 int i;
1035 if (strlen(value) != 12) {
1036 error_set(errp, QERR_PROPERTY_VALUE_BAD, "",
1037 "vendor", value);
1038 return;
1041 env->cpuid_vendor1 = 0;
1042 env->cpuid_vendor2 = 0;
1043 env->cpuid_vendor3 = 0;
1044 for (i = 0; i < 4; i++) {
1045 env->cpuid_vendor1 |= ((uint8_t)value[i ]) << (8 * i);
1046 env->cpuid_vendor2 |= ((uint8_t)value[i + 4]) << (8 * i);
1047 env->cpuid_vendor3 |= ((uint8_t)value[i + 8]) << (8 * i);
1049 env->cpuid_vendor_override = 1;
1052 static char *x86_cpuid_get_model_id(Object *obj, Error **errp)
1054 X86CPU *cpu = X86_CPU(obj);
1055 CPUX86State *env = &cpu->env;
1056 char *value;
1057 int i;
1059 value = g_malloc(48 + 1);
1060 for (i = 0; i < 48; i++) {
1061 value[i] = env->cpuid_model[i >> 2] >> (8 * (i & 3));
1063 value[48] = '\0';
1064 return value;
1067 static void x86_cpuid_set_model_id(Object *obj, const char *model_id,
1068 Error **errp)
1070 X86CPU *cpu = X86_CPU(obj);
1071 CPUX86State *env = &cpu->env;
1072 int c, len, i;
1074 if (model_id == NULL) {
1075 model_id = "";
1077 len = strlen(model_id);
1078 memset(env->cpuid_model, 0, 48);
1079 for (i = 0; i < 48; i++) {
1080 if (i >= len) {
1081 c = '\0';
1082 } else {
1083 c = (uint8_t)model_id[i];
1085 env->cpuid_model[i >> 2] |= c << (8 * (i & 3));
1089 static void x86_cpuid_get_tsc_freq(Object *obj, Visitor *v, void *opaque,
1090 const char *name, Error **errp)
1092 X86CPU *cpu = X86_CPU(obj);
1093 int64_t value;
1095 value = cpu->env.tsc_khz * 1000;
1096 visit_type_int(v, &value, name, errp);
1099 static void x86_cpuid_set_tsc_freq(Object *obj, Visitor *v, void *opaque,
1100 const char *name, Error **errp)
1102 X86CPU *cpu = X86_CPU(obj);
1103 const int64_t min = 0;
1104 const int64_t max = INT64_MAX;
1105 int64_t value;
1107 visit_type_int(v, &value, name, errp);
1108 if (error_is_set(errp)) {
1109 return;
1111 if (value < min || value > max) {
1112 error_set(errp, QERR_PROPERTY_VALUE_OUT_OF_RANGE, "",
1113 name ? name : "null", value, min, max);
1114 return;
1117 cpu->env.tsc_khz = value / 1000;
1120 static int cpu_x86_find_by_name(x86_def_t *x86_cpu_def, const char *cpu_model)
1122 unsigned int i;
1123 x86_def_t *def;
1125 char *s = g_strdup(cpu_model);
1126 char *featurestr, *name = strtok(s, ",");
1127 /* Features to be added*/
1128 uint32_t plus_features = 0, plus_ext_features = 0;
1129 uint32_t plus_ext2_features = 0, plus_ext3_features = 0;
1130 uint32_t plus_kvm_features = kvm_default_features, plus_svm_features = 0;
1131 uint32_t plus_7_0_ebx_features = 0;
1132 /* Features to be removed */
1133 uint32_t minus_features = 0, minus_ext_features = 0;
1134 uint32_t minus_ext2_features = 0, minus_ext3_features = 0;
1135 uint32_t minus_kvm_features = 0, minus_svm_features = 0;
1136 uint32_t minus_7_0_ebx_features = 0;
1137 uint32_t numvalue;
1139 for (def = x86_defs; def; def = def->next)
1140 if (name && !strcmp(name, def->name))
1141 break;
1142 if (kvm_enabled() && name && strcmp(name, "host") == 0) {
1143 cpu_x86_fill_host(x86_cpu_def);
1144 } else if (!def) {
1145 goto error;
1146 } else {
1147 memcpy(x86_cpu_def, def, sizeof(*def));
1150 add_flagname_to_bitmaps("hypervisor", &plus_features,
1151 &plus_ext_features, &plus_ext2_features, &plus_ext3_features,
1152 &plus_kvm_features, &plus_svm_features, &plus_7_0_ebx_features);
1154 featurestr = strtok(NULL, ",");
1156 while (featurestr) {
1157 char *val;
1158 if (featurestr[0] == '+') {
1159 add_flagname_to_bitmaps(featurestr + 1, &plus_features,
1160 &plus_ext_features, &plus_ext2_features,
1161 &plus_ext3_features, &plus_kvm_features,
1162 &plus_svm_features, &plus_7_0_ebx_features);
1163 } else if (featurestr[0] == '-') {
1164 add_flagname_to_bitmaps(featurestr + 1, &minus_features,
1165 &minus_ext_features, &minus_ext2_features,
1166 &minus_ext3_features, &minus_kvm_features,
1167 &minus_svm_features, &minus_7_0_ebx_features);
1168 } else if ((val = strchr(featurestr, '='))) {
1169 *val = 0; val++;
1170 if (!strcmp(featurestr, "family")) {
1171 char *err;
1172 numvalue = strtoul(val, &err, 0);
1173 if (!*val || *err || numvalue > 0xff + 0xf) {
1174 fprintf(stderr, "bad numerical value %s\n", val);
1175 goto error;
1177 x86_cpu_def->family = numvalue;
1178 } else if (!strcmp(featurestr, "model")) {
1179 char *err;
1180 numvalue = strtoul(val, &err, 0);
1181 if (!*val || *err || numvalue > 0xff) {
1182 fprintf(stderr, "bad numerical value %s\n", val);
1183 goto error;
1185 x86_cpu_def->model = numvalue;
1186 } else if (!strcmp(featurestr, "stepping")) {
1187 char *err;
1188 numvalue = strtoul(val, &err, 0);
1189 if (!*val || *err || numvalue > 0xf) {
1190 fprintf(stderr, "bad numerical value %s\n", val);
1191 goto error;
1193 x86_cpu_def->stepping = numvalue ;
1194 } else if (!strcmp(featurestr, "level")) {
1195 char *err;
1196 numvalue = strtoul(val, &err, 0);
1197 if (!*val || *err) {
1198 fprintf(stderr, "bad numerical value %s\n", val);
1199 goto error;
1201 x86_cpu_def->level = numvalue;
1202 } else if (!strcmp(featurestr, "xlevel")) {
1203 char *err;
1204 numvalue = strtoul(val, &err, 0);
1205 if (!*val || *err) {
1206 fprintf(stderr, "bad numerical value %s\n", val);
1207 goto error;
1209 if (numvalue < 0x80000000) {
1210 numvalue += 0x80000000;
1212 x86_cpu_def->xlevel = numvalue;
1213 } else if (!strcmp(featurestr, "vendor")) {
1214 if (strlen(val) != 12) {
1215 fprintf(stderr, "vendor string must be 12 chars long\n");
1216 goto error;
1218 x86_cpu_def->vendor1 = 0;
1219 x86_cpu_def->vendor2 = 0;
1220 x86_cpu_def->vendor3 = 0;
1221 for(i = 0; i < 4; i++) {
1222 x86_cpu_def->vendor1 |= ((uint8_t)val[i ]) << (8 * i);
1223 x86_cpu_def->vendor2 |= ((uint8_t)val[i + 4]) << (8 * i);
1224 x86_cpu_def->vendor3 |= ((uint8_t)val[i + 8]) << (8 * i);
1226 x86_cpu_def->vendor_override = 1;
1227 } else if (!strcmp(featurestr, "model_id")) {
1228 pstrcpy(x86_cpu_def->model_id, sizeof(x86_cpu_def->model_id),
1229 val);
1230 } else if (!strcmp(featurestr, "tsc_freq")) {
1231 int64_t tsc_freq;
1232 char *err;
1234 tsc_freq = strtosz_suffix_unit(val, &err,
1235 STRTOSZ_DEFSUFFIX_B, 1000);
1236 if (tsc_freq < 0 || *err) {
1237 fprintf(stderr, "bad numerical value %s\n", val);
1238 goto error;
1240 x86_cpu_def->tsc_khz = tsc_freq / 1000;
1241 } else if (!strcmp(featurestr, "hv_spinlocks")) {
1242 char *err;
1243 numvalue = strtoul(val, &err, 0);
1244 if (!*val || *err) {
1245 fprintf(stderr, "bad numerical value %s\n", val);
1246 goto error;
1248 hyperv_set_spinlock_retries(numvalue);
1249 } else {
1250 fprintf(stderr, "unrecognized feature %s\n", featurestr);
1251 goto error;
1253 } else if (!strcmp(featurestr, "check")) {
1254 check_cpuid = 1;
1255 } else if (!strcmp(featurestr, "enforce")) {
1256 check_cpuid = enforce_cpuid = 1;
1257 } else if (!strcmp(featurestr, "hv_relaxed")) {
1258 hyperv_enable_relaxed_timing(true);
1259 } else if (!strcmp(featurestr, "hv_vapic")) {
1260 hyperv_enable_vapic_recommended(true);
1261 } else {
1262 fprintf(stderr, "feature string `%s' not in format (+feature|-feature|feature=xyz)\n", featurestr);
1263 goto error;
1265 featurestr = strtok(NULL, ",");
1267 x86_cpu_def->features |= plus_features;
1268 x86_cpu_def->ext_features |= plus_ext_features;
1269 x86_cpu_def->ext2_features |= plus_ext2_features;
1270 x86_cpu_def->ext3_features |= plus_ext3_features;
1271 x86_cpu_def->kvm_features |= plus_kvm_features;
1272 x86_cpu_def->svm_features |= plus_svm_features;
1273 x86_cpu_def->cpuid_7_0_ebx_features |= plus_7_0_ebx_features;
1274 x86_cpu_def->features &= ~minus_features;
1275 x86_cpu_def->ext_features &= ~minus_ext_features;
1276 x86_cpu_def->ext2_features &= ~minus_ext2_features;
1277 x86_cpu_def->ext3_features &= ~minus_ext3_features;
1278 x86_cpu_def->kvm_features &= ~minus_kvm_features;
1279 x86_cpu_def->svm_features &= ~minus_svm_features;
1280 x86_cpu_def->cpuid_7_0_ebx_features &= ~minus_7_0_ebx_features;
1281 if (check_cpuid) {
1282 if (check_features_against_host(x86_cpu_def) && enforce_cpuid)
1283 goto error;
1285 if (x86_cpu_def->cpuid_7_0_ebx_features && x86_cpu_def->level < 7) {
1286 x86_cpu_def->level = 7;
1288 g_free(s);
1289 return 0;
1291 error:
1292 g_free(s);
1293 return -1;
1296 /* generate a composite string into buf of all cpuid names in featureset
1297 * selected by fbits. indicate truncation at bufsize in the event of overflow.
1298 * if flags, suppress names undefined in featureset.
1300 static void listflags(char *buf, int bufsize, uint32_t fbits,
1301 const char **featureset, uint32_t flags)
1303 const char **p = &featureset[31];
1304 char *q, *b, bit;
1305 int nc;
1307 b = 4 <= bufsize ? buf + (bufsize -= 3) - 1 : NULL;
1308 *buf = '\0';
1309 for (q = buf, bit = 31; fbits && bufsize; --p, fbits &= ~(1 << bit), --bit)
1310 if (fbits & 1 << bit && (*p || !flags)) {
1311 if (*p)
1312 nc = snprintf(q, bufsize, "%s%s", q == buf ? "" : " ", *p);
1313 else
1314 nc = snprintf(q, bufsize, "%s[%d]", q == buf ? "" : " ", bit);
1315 if (bufsize <= nc) {
1316 if (b) {
1317 memcpy(b, "...", sizeof("..."));
1319 return;
1321 q += nc;
1322 bufsize -= nc;
1326 /* generate CPU information. */
1327 void x86_cpu_list(FILE *f, fprintf_function cpu_fprintf)
1329 x86_def_t *def;
1330 char buf[256];
1332 for (def = x86_defs; def; def = def->next) {
1333 snprintf(buf, sizeof(buf), "%s", def->name);
1334 (*cpu_fprintf)(f, "x86 %16s %-48s\n", buf, def->model_id);
1336 if (kvm_enabled()) {
1337 (*cpu_fprintf)(f, "x86 %16s\n", "[host]");
1339 (*cpu_fprintf)(f, "\nRecognized CPUID flags:\n");
1340 listflags(buf, sizeof(buf), (uint32_t)~0, feature_name, 1);
1341 (*cpu_fprintf)(f, " %s\n", buf);
1342 listflags(buf, sizeof(buf), (uint32_t)~0, ext_feature_name, 1);
1343 (*cpu_fprintf)(f, " %s\n", buf);
1344 listflags(buf, sizeof(buf), (uint32_t)~0, ext2_feature_name, 1);
1345 (*cpu_fprintf)(f, " %s\n", buf);
1346 listflags(buf, sizeof(buf), (uint32_t)~0, ext3_feature_name, 1);
1347 (*cpu_fprintf)(f, " %s\n", buf);
1350 CpuDefinitionInfoList *arch_query_cpu_definitions(Error **errp)
1352 CpuDefinitionInfoList *cpu_list = NULL;
1353 x86_def_t *def;
1355 for (def = x86_defs; def; def = def->next) {
1356 CpuDefinitionInfoList *entry;
1357 CpuDefinitionInfo *info;
1359 info = g_malloc0(sizeof(*info));
1360 info->name = g_strdup(def->name);
1362 entry = g_malloc0(sizeof(*entry));
1363 entry->value = info;
1364 entry->next = cpu_list;
1365 cpu_list = entry;
1368 return cpu_list;
1371 int cpu_x86_register(X86CPU *cpu, const char *cpu_model)
1373 CPUX86State *env = &cpu->env;
1374 x86_def_t def1, *def = &def1;
1375 Error *error = NULL;
1377 memset(def, 0, sizeof(*def));
1379 if (cpu_x86_find_by_name(def, cpu_model) < 0)
1380 return -1;
1381 if (def->vendor1) {
1382 env->cpuid_vendor1 = def->vendor1;
1383 env->cpuid_vendor2 = def->vendor2;
1384 env->cpuid_vendor3 = def->vendor3;
1385 } else {
1386 env->cpuid_vendor1 = CPUID_VENDOR_INTEL_1;
1387 env->cpuid_vendor2 = CPUID_VENDOR_INTEL_2;
1388 env->cpuid_vendor3 = CPUID_VENDOR_INTEL_3;
1390 env->cpuid_vendor_override = def->vendor_override;
1391 object_property_set_int(OBJECT(cpu), def->level, "level", &error);
1392 object_property_set_int(OBJECT(cpu), def->family, "family", &error);
1393 object_property_set_int(OBJECT(cpu), def->model, "model", &error);
1394 object_property_set_int(OBJECT(cpu), def->stepping, "stepping", &error);
1395 env->cpuid_features = def->features;
1396 env->cpuid_ext_features = def->ext_features;
1397 env->cpuid_ext2_features = def->ext2_features;
1398 env->cpuid_ext3_features = def->ext3_features;
1399 object_property_set_int(OBJECT(cpu), def->xlevel, "xlevel", &error);
1400 env->cpuid_kvm_features = def->kvm_features;
1401 env->cpuid_svm_features = def->svm_features;
1402 env->cpuid_ext4_features = def->ext4_features;
1403 env->cpuid_7_0_ebx_features = def->cpuid_7_0_ebx_features;
1404 env->cpuid_xlevel2 = def->xlevel2;
1405 object_property_set_int(OBJECT(cpu), (int64_t)def->tsc_khz * 1000,
1406 "tsc-frequency", &error);
1408 /* On AMD CPUs, some CPUID[8000_0001].EDX bits must match the bits on
1409 * CPUID[1].EDX.
1411 if (env->cpuid_vendor1 == CPUID_VENDOR_AMD_1 &&
1412 env->cpuid_vendor2 == CPUID_VENDOR_AMD_2 &&
1413 env->cpuid_vendor3 == CPUID_VENDOR_AMD_3) {
1414 env->cpuid_ext2_features &= ~CPUID_EXT2_AMD_ALIASES;
1415 env->cpuid_ext2_features |= (def->features & CPUID_EXT2_AMD_ALIASES);
1418 if (!kvm_enabled()) {
1419 env->cpuid_features &= TCG_FEATURES;
1420 env->cpuid_ext_features &= TCG_EXT_FEATURES;
1421 env->cpuid_ext2_features &= (TCG_EXT2_FEATURES
1422 #ifdef TARGET_X86_64
1423 | CPUID_EXT2_SYSCALL | CPUID_EXT2_LM
1424 #endif
1426 env->cpuid_ext3_features &= TCG_EXT3_FEATURES;
1427 env->cpuid_svm_features &= TCG_SVM_FEATURES;
1429 object_property_set_str(OBJECT(cpu), def->model_id, "model-id", &error);
1430 if (error_is_set(&error)) {
1431 error_free(error);
1432 return -1;
1434 return 0;
1437 #if !defined(CONFIG_USER_ONLY)
1439 void cpu_clear_apic_feature(CPUX86State *env)
1441 env->cpuid_features &= ~CPUID_APIC;
1444 #endif /* !CONFIG_USER_ONLY */
1446 /* Initialize list of CPU models, filling some non-static fields if necessary
1448 void x86_cpudef_setup(void)
1450 int i, j;
1451 static const char *model_with_versions[] = { "qemu32", "qemu64", "athlon" };
1453 for (i = 0; i < ARRAY_SIZE(builtin_x86_defs); ++i) {
1454 x86_def_t *def = &builtin_x86_defs[i];
1455 def->next = x86_defs;
1457 /* Look for specific "cpudef" models that */
1458 /* have the QEMU version in .model_id */
1459 for (j = 0; j < ARRAY_SIZE(model_with_versions); j++) {
1460 if (strcmp(model_with_versions[j], def->name) == 0) {
1461 pstrcpy(def->model_id, sizeof(def->model_id),
1462 "QEMU Virtual CPU version ");
1463 pstrcat(def->model_id, sizeof(def->model_id),
1464 qemu_get_version());
1465 break;
1469 x86_defs = def;
1473 static void get_cpuid_vendor(CPUX86State *env, uint32_t *ebx,
1474 uint32_t *ecx, uint32_t *edx)
1476 *ebx = env->cpuid_vendor1;
1477 *edx = env->cpuid_vendor2;
1478 *ecx = env->cpuid_vendor3;
1480 /* sysenter isn't supported on compatibility mode on AMD, syscall
1481 * isn't supported in compatibility mode on Intel.
1482 * Normally we advertise the actual cpu vendor, but you can override
1483 * this if you want to use KVM's sysenter/syscall emulation
1484 * in compatibility mode and when doing cross vendor migration
1486 if (kvm_enabled() && ! env->cpuid_vendor_override) {
1487 host_cpuid(0, 0, NULL, ebx, ecx, edx);
1491 void cpu_x86_cpuid(CPUX86State *env, uint32_t index, uint32_t count,
1492 uint32_t *eax, uint32_t *ebx,
1493 uint32_t *ecx, uint32_t *edx)
1495 /* test if maximum index reached */
1496 if (index & 0x80000000) {
1497 if (index > env->cpuid_xlevel) {
1498 if (env->cpuid_xlevel2 > 0) {
1499 /* Handle the Centaur's CPUID instruction. */
1500 if (index > env->cpuid_xlevel2) {
1501 index = env->cpuid_xlevel2;
1502 } else if (index < 0xC0000000) {
1503 index = env->cpuid_xlevel;
1505 } else {
1506 index = env->cpuid_xlevel;
1509 } else {
1510 if (index > env->cpuid_level)
1511 index = env->cpuid_level;
1514 switch(index) {
1515 case 0:
1516 *eax = env->cpuid_level;
1517 get_cpuid_vendor(env, ebx, ecx, edx);
1518 break;
1519 case 1:
1520 *eax = env->cpuid_version;
1521 *ebx = (env->cpuid_apic_id << 24) | 8 << 8; /* CLFLUSH size in quad words, Linux wants it. */
1522 *ecx = env->cpuid_ext_features;
1523 *edx = env->cpuid_features;
1524 if (env->nr_cores * env->nr_threads > 1) {
1525 *ebx |= (env->nr_cores * env->nr_threads) << 16;
1526 *edx |= 1 << 28; /* HTT bit */
1528 break;
1529 case 2:
1530 /* cache info: needed for Pentium Pro compatibility */
1531 *eax = 1;
1532 *ebx = 0;
1533 *ecx = 0;
1534 *edx = 0x2c307d;
1535 break;
1536 case 4:
1537 /* cache info: needed for Core compatibility */
1538 if (env->nr_cores > 1) {
1539 *eax = (env->nr_cores - 1) << 26;
1540 } else {
1541 *eax = 0;
1543 switch (count) {
1544 case 0: /* L1 dcache info */
1545 *eax |= 0x0000121;
1546 *ebx = 0x1c0003f;
1547 *ecx = 0x000003f;
1548 *edx = 0x0000001;
1549 break;
1550 case 1: /* L1 icache info */
1551 *eax |= 0x0000122;
1552 *ebx = 0x1c0003f;
1553 *ecx = 0x000003f;
1554 *edx = 0x0000001;
1555 break;
1556 case 2: /* L2 cache info */
1557 *eax |= 0x0000143;
1558 if (env->nr_threads > 1) {
1559 *eax |= (env->nr_threads - 1) << 14;
1561 *ebx = 0x3c0003f;
1562 *ecx = 0x0000fff;
1563 *edx = 0x0000001;
1564 break;
1565 default: /* end of info */
1566 *eax = 0;
1567 *ebx = 0;
1568 *ecx = 0;
1569 *edx = 0;
1570 break;
1572 break;
1573 case 5:
1574 /* mwait info: needed for Core compatibility */
1575 *eax = 0; /* Smallest monitor-line size in bytes */
1576 *ebx = 0; /* Largest monitor-line size in bytes */
1577 *ecx = CPUID_MWAIT_EMX | CPUID_MWAIT_IBE;
1578 *edx = 0;
1579 break;
1580 case 6:
1581 /* Thermal and Power Leaf */
1582 *eax = 0;
1583 *ebx = 0;
1584 *ecx = 0;
1585 *edx = 0;
1586 break;
1587 case 7:
1588 /* Structured Extended Feature Flags Enumeration Leaf */
1589 if (count == 0) {
1590 *eax = 0; /* Maximum ECX value for sub-leaves */
1591 *ebx = env->cpuid_7_0_ebx_features; /* Feature flags */
1592 *ecx = 0; /* Reserved */
1593 *edx = 0; /* Reserved */
1594 } else {
1595 *eax = 0;
1596 *ebx = 0;
1597 *ecx = 0;
1598 *edx = 0;
1600 break;
1601 case 9:
1602 /* Direct Cache Access Information Leaf */
1603 *eax = 0; /* Bits 0-31 in DCA_CAP MSR */
1604 *ebx = 0;
1605 *ecx = 0;
1606 *edx = 0;
1607 break;
1608 case 0xA:
1609 /* Architectural Performance Monitoring Leaf */
1610 if (kvm_enabled()) {
1611 KVMState *s = env->kvm_state;
1613 *eax = kvm_arch_get_supported_cpuid(s, 0xA, count, R_EAX);
1614 *ebx = kvm_arch_get_supported_cpuid(s, 0xA, count, R_EBX);
1615 *ecx = kvm_arch_get_supported_cpuid(s, 0xA, count, R_ECX);
1616 *edx = kvm_arch_get_supported_cpuid(s, 0xA, count, R_EDX);
1617 } else {
1618 *eax = 0;
1619 *ebx = 0;
1620 *ecx = 0;
1621 *edx = 0;
1623 break;
1624 case 0xD:
1625 /* Processor Extended State */
1626 if (!(env->cpuid_ext_features & CPUID_EXT_XSAVE)) {
1627 *eax = 0;
1628 *ebx = 0;
1629 *ecx = 0;
1630 *edx = 0;
1631 break;
1633 if (kvm_enabled()) {
1634 KVMState *s = env->kvm_state;
1636 *eax = kvm_arch_get_supported_cpuid(s, 0xd, count, R_EAX);
1637 *ebx = kvm_arch_get_supported_cpuid(s, 0xd, count, R_EBX);
1638 *ecx = kvm_arch_get_supported_cpuid(s, 0xd, count, R_ECX);
1639 *edx = kvm_arch_get_supported_cpuid(s, 0xd, count, R_EDX);
1640 } else {
1641 *eax = 0;
1642 *ebx = 0;
1643 *ecx = 0;
1644 *edx = 0;
1646 break;
1647 case 0x80000000:
1648 *eax = env->cpuid_xlevel;
1649 *ebx = env->cpuid_vendor1;
1650 *edx = env->cpuid_vendor2;
1651 *ecx = env->cpuid_vendor3;
1652 break;
1653 case 0x80000001:
1654 *eax = env->cpuid_version;
1655 *ebx = 0;
1656 *ecx = env->cpuid_ext3_features;
1657 *edx = env->cpuid_ext2_features;
1659 /* The Linux kernel checks for the CMPLegacy bit and
1660 * discards multiple thread information if it is set.
1661 * So dont set it here for Intel to make Linux guests happy.
1663 if (env->nr_cores * env->nr_threads > 1) {
1664 uint32_t tebx, tecx, tedx;
1665 get_cpuid_vendor(env, &tebx, &tecx, &tedx);
1666 if (tebx != CPUID_VENDOR_INTEL_1 ||
1667 tedx != CPUID_VENDOR_INTEL_2 ||
1668 tecx != CPUID_VENDOR_INTEL_3) {
1669 *ecx |= 1 << 1; /* CmpLegacy bit */
1672 break;
1673 case 0x80000002:
1674 case 0x80000003:
1675 case 0x80000004:
1676 *eax = env->cpuid_model[(index - 0x80000002) * 4 + 0];
1677 *ebx = env->cpuid_model[(index - 0x80000002) * 4 + 1];
1678 *ecx = env->cpuid_model[(index - 0x80000002) * 4 + 2];
1679 *edx = env->cpuid_model[(index - 0x80000002) * 4 + 3];
1680 break;
1681 case 0x80000005:
1682 /* cache info (L1 cache) */
1683 *eax = 0x01ff01ff;
1684 *ebx = 0x01ff01ff;
1685 *ecx = 0x40020140;
1686 *edx = 0x40020140;
1687 break;
1688 case 0x80000006:
1689 /* cache info (L2 cache) */
1690 *eax = 0;
1691 *ebx = 0x42004200;
1692 *ecx = 0x02008140;
1693 *edx = 0;
1694 break;
1695 case 0x80000008:
1696 /* virtual & phys address size in low 2 bytes. */
1697 /* XXX: This value must match the one used in the MMU code. */
1698 if (env->cpuid_ext2_features & CPUID_EXT2_LM) {
1699 /* 64 bit processor */
1700 /* XXX: The physical address space is limited to 42 bits in exec.c. */
1701 *eax = 0x00003028; /* 48 bits virtual, 40 bits physical */
1702 } else {
1703 if (env->cpuid_features & CPUID_PSE36)
1704 *eax = 0x00000024; /* 36 bits physical */
1705 else
1706 *eax = 0x00000020; /* 32 bits physical */
1708 *ebx = 0;
1709 *ecx = 0;
1710 *edx = 0;
1711 if (env->nr_cores * env->nr_threads > 1) {
1712 *ecx |= (env->nr_cores * env->nr_threads) - 1;
1714 break;
1715 case 0x8000000A:
1716 if (env->cpuid_ext3_features & CPUID_EXT3_SVM) {
1717 *eax = 0x00000001; /* SVM Revision */
1718 *ebx = 0x00000010; /* nr of ASIDs */
1719 *ecx = 0;
1720 *edx = env->cpuid_svm_features; /* optional features */
1721 } else {
1722 *eax = 0;
1723 *ebx = 0;
1724 *ecx = 0;
1725 *edx = 0;
1727 break;
1728 case 0xC0000000:
1729 *eax = env->cpuid_xlevel2;
1730 *ebx = 0;
1731 *ecx = 0;
1732 *edx = 0;
1733 break;
1734 case 0xC0000001:
1735 /* Support for VIA CPU's CPUID instruction */
1736 *eax = env->cpuid_version;
1737 *ebx = 0;
1738 *ecx = 0;
1739 *edx = env->cpuid_ext4_features;
1740 break;
1741 case 0xC0000002:
1742 case 0xC0000003:
1743 case 0xC0000004:
1744 /* Reserved for the future, and now filled with zero */
1745 *eax = 0;
1746 *ebx = 0;
1747 *ecx = 0;
1748 *edx = 0;
1749 break;
1750 default:
1751 /* reserved values: zero */
1752 *eax = 0;
1753 *ebx = 0;
1754 *ecx = 0;
1755 *edx = 0;
1756 break;
1760 /* CPUClass::reset() */
1761 static void x86_cpu_reset(CPUState *s)
1763 X86CPU *cpu = X86_CPU(s);
1764 X86CPUClass *xcc = X86_CPU_GET_CLASS(cpu);
1765 CPUX86State *env = &cpu->env;
1766 int i;
1768 if (qemu_loglevel_mask(CPU_LOG_RESET)) {
1769 qemu_log("CPU Reset (CPU %d)\n", env->cpu_index);
1770 log_cpu_state(env, CPU_DUMP_FPU | CPU_DUMP_CCOP);
1773 xcc->parent_reset(s);
1776 memset(env, 0, offsetof(CPUX86State, breakpoints));
1778 tlb_flush(env, 1);
1780 env->old_exception = -1;
1782 /* init to reset state */
1784 #ifdef CONFIG_SOFTMMU
1785 env->hflags |= HF_SOFTMMU_MASK;
1786 #endif
1787 env->hflags2 |= HF2_GIF_MASK;
1789 cpu_x86_update_cr0(env, 0x60000010);
1790 env->a20_mask = ~0x0;
1791 env->smbase = 0x30000;
1793 env->idt.limit = 0xffff;
1794 env->gdt.limit = 0xffff;
1795 env->ldt.limit = 0xffff;
1796 env->ldt.flags = DESC_P_MASK | (2 << DESC_TYPE_SHIFT);
1797 env->tr.limit = 0xffff;
1798 env->tr.flags = DESC_P_MASK | (11 << DESC_TYPE_SHIFT);
1800 cpu_x86_load_seg_cache(env, R_CS, 0xf000, 0xffff0000, 0xffff,
1801 DESC_P_MASK | DESC_S_MASK | DESC_CS_MASK |
1802 DESC_R_MASK | DESC_A_MASK);
1803 cpu_x86_load_seg_cache(env, R_DS, 0, 0, 0xffff,
1804 DESC_P_MASK | DESC_S_MASK | DESC_W_MASK |
1805 DESC_A_MASK);
1806 cpu_x86_load_seg_cache(env, R_ES, 0, 0, 0xffff,
1807 DESC_P_MASK | DESC_S_MASK | DESC_W_MASK |
1808 DESC_A_MASK);
1809 cpu_x86_load_seg_cache(env, R_SS, 0, 0, 0xffff,
1810 DESC_P_MASK | DESC_S_MASK | DESC_W_MASK |
1811 DESC_A_MASK);
1812 cpu_x86_load_seg_cache(env, R_FS, 0, 0, 0xffff,
1813 DESC_P_MASK | DESC_S_MASK | DESC_W_MASK |
1814 DESC_A_MASK);
1815 cpu_x86_load_seg_cache(env, R_GS, 0, 0, 0xffff,
1816 DESC_P_MASK | DESC_S_MASK | DESC_W_MASK |
1817 DESC_A_MASK);
1819 env->eip = 0xfff0;
1820 env->regs[R_EDX] = env->cpuid_version;
1822 env->eflags = 0x2;
1824 /* FPU init */
1825 for (i = 0; i < 8; i++) {
1826 env->fptags[i] = 1;
1828 env->fpuc = 0x37f;
1830 env->mxcsr = 0x1f80;
1832 env->pat = 0x0007040600070406ULL;
1833 env->msr_ia32_misc_enable = MSR_IA32_MISC_ENABLE_DEFAULT;
1835 memset(env->dr, 0, sizeof(env->dr));
1836 env->dr[6] = DR6_FIXED_1;
1837 env->dr[7] = DR7_FIXED_1;
1838 cpu_breakpoint_remove_all(env, BP_CPU);
1839 cpu_watchpoint_remove_all(env, BP_CPU);
1841 #if !defined(CONFIG_USER_ONLY)
1842 /* We hard-wire the BSP to the first CPU. */
1843 if (env->cpu_index == 0) {
1844 apic_designate_bsp(env->apic_state);
1847 env->halted = !cpu_is_bsp(cpu);
1848 #endif
1851 #ifndef CONFIG_USER_ONLY
1852 bool cpu_is_bsp(X86CPU *cpu)
1854 return cpu_get_apic_base(cpu->env.apic_state) & MSR_IA32_APICBASE_BSP;
1857 /* TODO: remove me, when reset over QOM tree is implemented */
1858 static void x86_cpu_machine_reset_cb(void *opaque)
1860 X86CPU *cpu = opaque;
1861 cpu_reset(CPU(cpu));
1863 #endif
1865 static void mce_init(X86CPU *cpu)
1867 CPUX86State *cenv = &cpu->env;
1868 unsigned int bank;
1870 if (((cenv->cpuid_version >> 8) & 0xf) >= 6
1871 && (cenv->cpuid_features & (CPUID_MCE | CPUID_MCA)) ==
1872 (CPUID_MCE | CPUID_MCA)) {
1873 cenv->mcg_cap = MCE_CAP_DEF | MCE_BANKS_DEF;
1874 cenv->mcg_ctl = ~(uint64_t)0;
1875 for (bank = 0; bank < MCE_BANKS_DEF; bank++) {
1876 cenv->mce_banks[bank * 4] = ~(uint64_t)0;
1881 void x86_cpu_realize(Object *obj, Error **errp)
1883 X86CPU *cpu = X86_CPU(obj);
1885 #ifndef CONFIG_USER_ONLY
1886 qemu_register_reset(x86_cpu_machine_reset_cb, cpu);
1887 #endif
1889 mce_init(cpu);
1890 qemu_init_vcpu(&cpu->env);
1891 cpu_reset(CPU(cpu));
1894 static void x86_cpu_initfn(Object *obj)
1896 X86CPU *cpu = X86_CPU(obj);
1897 CPUX86State *env = &cpu->env;
1898 static int inited;
1900 cpu_exec_init(env);
1902 object_property_add(obj, "family", "int",
1903 x86_cpuid_version_get_family,
1904 x86_cpuid_version_set_family, NULL, NULL, NULL);
1905 object_property_add(obj, "model", "int",
1906 x86_cpuid_version_get_model,
1907 x86_cpuid_version_set_model, NULL, NULL, NULL);
1908 object_property_add(obj, "stepping", "int",
1909 x86_cpuid_version_get_stepping,
1910 x86_cpuid_version_set_stepping, NULL, NULL, NULL);
1911 object_property_add(obj, "level", "int",
1912 x86_cpuid_get_level,
1913 x86_cpuid_set_level, NULL, NULL, NULL);
1914 object_property_add(obj, "xlevel", "int",
1915 x86_cpuid_get_xlevel,
1916 x86_cpuid_set_xlevel, NULL, NULL, NULL);
1917 object_property_add_str(obj, "vendor",
1918 x86_cpuid_get_vendor,
1919 x86_cpuid_set_vendor, NULL);
1920 object_property_add_str(obj, "model-id",
1921 x86_cpuid_get_model_id,
1922 x86_cpuid_set_model_id, NULL);
1923 object_property_add(obj, "tsc-frequency", "int",
1924 x86_cpuid_get_tsc_freq,
1925 x86_cpuid_set_tsc_freq, NULL, NULL, NULL);
1927 env->cpuid_apic_id = env->cpu_index;
1929 /* init various static tables used in TCG mode */
1930 if (tcg_enabled() && !inited) {
1931 inited = 1;
1932 optimize_flags_init();
1933 #ifndef CONFIG_USER_ONLY
1934 cpu_set_debug_excp_handler(breakpoint_handler);
1935 #endif
1939 static void x86_cpu_common_class_init(ObjectClass *oc, void *data)
1941 X86CPUClass *xcc = X86_CPU_CLASS(oc);
1942 CPUClass *cc = CPU_CLASS(oc);
1944 xcc->parent_reset = cc->reset;
1945 cc->reset = x86_cpu_reset;
1948 static const TypeInfo x86_cpu_type_info = {
1949 .name = TYPE_X86_CPU,
1950 .parent = TYPE_CPU,
1951 .instance_size = sizeof(X86CPU),
1952 .instance_init = x86_cpu_initfn,
1953 .abstract = false,
1954 .class_size = sizeof(X86CPUClass),
1955 .class_init = x86_cpu_common_class_init,
1958 static void x86_cpu_register_types(void)
1960 type_register_static(&x86_cpu_type_info);
1963 type_init(x86_cpu_register_types)