configure: Disable (clang) initializer-overrides warnings
[qemu/pbrook.git] / target-i386 / cpu.c
blobf3708e63b76541b12dd9eafb3b42f4b4b85b48a9
1 /*
2 * i386 CPUID helper functions
4 * Copyright (c) 2003 Fabrice Bellard
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
19 #include <stdlib.h>
20 #include <stdio.h>
21 #include <string.h>
22 #include <inttypes.h>
24 #include "cpu.h"
25 #include "kvm.h"
27 #include "qemu-option.h"
28 #include "qemu-config.h"
30 #include "qapi/qapi-visit-core.h"
31 #include "arch_init.h"
33 #include "hyperv.h"
35 #include "hw/hw.h"
36 #if defined(CONFIG_KVM)
37 #include <linux/kvm_para.h>
38 #endif
40 /* feature flags taken from "Intel Processor Identification and the CPUID
41 * Instruction" and AMD's "CPUID Specification". In cases of disagreement
42 * between feature naming conventions, aliases may be added.
44 static const char *feature_name[] = {
45 "fpu", "vme", "de", "pse",
46 "tsc", "msr", "pae", "mce",
47 "cx8", "apic", NULL, "sep",
48 "mtrr", "pge", "mca", "cmov",
49 "pat", "pse36", "pn" /* Intel psn */, "clflush" /* Intel clfsh */,
50 NULL, "ds" /* Intel dts */, "acpi", "mmx",
51 "fxsr", "sse", "sse2", "ss",
52 "ht" /* Intel htt */, "tm", "ia64", "pbe",
54 static const char *ext_feature_name[] = {
55 "pni|sse3" /* Intel,AMD sse3 */, "pclmulqdq|pclmuldq", "dtes64", "monitor",
56 "ds_cpl", "vmx", "smx", "est",
57 "tm2", "ssse3", "cid", NULL,
58 "fma", "cx16", "xtpr", "pdcm",
59 NULL, "pcid", "dca", "sse4.1|sse4_1",
60 "sse4.2|sse4_2", "x2apic", "movbe", "popcnt",
61 "tsc-deadline", "aes", "xsave", "osxsave",
62 "avx", NULL, NULL, "hypervisor",
64 /* Feature names that are already defined on feature_name[] but are set on
65 * CPUID[8000_0001].EDX on AMD CPUs don't have their names on
66 * ext2_feature_name[]. They are copied automatically to cpuid_ext2_features
67 * if and only if CPU vendor is AMD.
69 static const char *ext2_feature_name[] = {
70 NULL /* fpu */, NULL /* vme */, NULL /* de */, NULL /* pse */,
71 NULL /* tsc */, NULL /* msr */, NULL /* pae */, NULL /* mce */,
72 NULL /* cx8 */ /* AMD CMPXCHG8B */, NULL /* apic */, NULL, "syscall",
73 NULL /* mtrr */, NULL /* pge */, NULL /* mca */, NULL /* cmov */,
74 NULL /* pat */, NULL /* pse36 */, NULL, NULL /* Linux mp */,
75 "nx|xd", NULL, "mmxext", NULL /* mmx */,
76 NULL /* fxsr */, "fxsr_opt|ffxsr", "pdpe1gb" /* AMD Page1GB */, "rdtscp",
78 static const char *ext3_feature_name[] = {
79 "lahf_lm" /* AMD LahfSahf */, "cmp_legacy", "svm", "extapic" /* AMD ExtApicSpace */,
80 "cr8legacy" /* AMD AltMovCr8 */, "abm", "sse4a", "misalignsse",
81 "3dnowprefetch", "osvw", "ibs", "xop",
82 "skinit", "wdt", NULL, NULL,
83 "fma4", NULL, "cvt16", "nodeid_msr",
84 NULL, NULL, NULL, NULL,
85 NULL, NULL, NULL, NULL,
86 NULL, NULL, NULL, NULL,
89 static const char *kvm_feature_name[] = {
90 "kvmclock", "kvm_nopiodelay", "kvm_mmu", "kvmclock", "kvm_asyncpf", NULL, "kvm_pv_eoi", NULL,
91 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
92 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
93 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
96 static const char *svm_feature_name[] = {
97 "npt", "lbrv", "svm_lock", "nrip_save",
98 "tsc_scale", "vmcb_clean", "flushbyasid", "decodeassists",
99 NULL, NULL, "pause_filter", NULL,
100 "pfthreshold", NULL, NULL, NULL,
101 NULL, NULL, NULL, NULL,
102 NULL, NULL, NULL, NULL,
103 NULL, NULL, NULL, NULL,
104 NULL, NULL, NULL, NULL,
107 static const char *cpuid_7_0_ebx_feature_name[] = {
108 NULL, NULL, NULL, NULL, NULL, NULL, NULL, "smep",
109 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
110 NULL, NULL, NULL, NULL, "smap", NULL, NULL, NULL,
111 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
114 /* collects per-function cpuid data
116 typedef struct model_features_t {
117 uint32_t *guest_feat;
118 uint32_t *host_feat;
119 uint32_t check_feat;
120 const char **flag_names;
121 uint32_t cpuid;
122 } model_features_t;
124 int check_cpuid = 0;
125 int enforce_cpuid = 0;
127 void host_cpuid(uint32_t function, uint32_t count,
128 uint32_t *eax, uint32_t *ebx, uint32_t *ecx, uint32_t *edx)
130 #if defined(CONFIG_KVM)
131 uint32_t vec[4];
133 #ifdef __x86_64__
134 asm volatile("cpuid"
135 : "=a"(vec[0]), "=b"(vec[1]),
136 "=c"(vec[2]), "=d"(vec[3])
137 : "0"(function), "c"(count) : "cc");
138 #else
139 asm volatile("pusha \n\t"
140 "cpuid \n\t"
141 "mov %%eax, 0(%2) \n\t"
142 "mov %%ebx, 4(%2) \n\t"
143 "mov %%ecx, 8(%2) \n\t"
144 "mov %%edx, 12(%2) \n\t"
145 "popa"
146 : : "a"(function), "c"(count), "S"(vec)
147 : "memory", "cc");
148 #endif
150 if (eax)
151 *eax = vec[0];
152 if (ebx)
153 *ebx = vec[1];
154 if (ecx)
155 *ecx = vec[2];
156 if (edx)
157 *edx = vec[3];
158 #endif
161 #define iswhite(c) ((c) && ((c) <= ' ' || '~' < (c)))
163 /* general substring compare of *[s1..e1) and *[s2..e2). sx is start of
164 * a substring. ex if !NULL points to the first char after a substring,
165 * otherwise the string is assumed to sized by a terminating nul.
166 * Return lexical ordering of *s1:*s2.
168 static int sstrcmp(const char *s1, const char *e1, const char *s2,
169 const char *e2)
171 for (;;) {
172 if (!*s1 || !*s2 || *s1 != *s2)
173 return (*s1 - *s2);
174 ++s1, ++s2;
175 if (s1 == e1 && s2 == e2)
176 return (0);
177 else if (s1 == e1)
178 return (*s2);
179 else if (s2 == e2)
180 return (*s1);
184 /* compare *[s..e) to *altstr. *altstr may be a simple string or multiple
185 * '|' delimited (possibly empty) strings in which case search for a match
186 * within the alternatives proceeds left to right. Return 0 for success,
187 * non-zero otherwise.
189 static int altcmp(const char *s, const char *e, const char *altstr)
191 const char *p, *q;
193 for (q = p = altstr; ; ) {
194 while (*p && *p != '|')
195 ++p;
196 if ((q == p && !*s) || (q != p && !sstrcmp(s, e, q, p)))
197 return (0);
198 if (!*p)
199 return (1);
200 else
201 q = ++p;
205 /* search featureset for flag *[s..e), if found set corresponding bit in
206 * *pval and return true, otherwise return false
208 static bool lookup_feature(uint32_t *pval, const char *s, const char *e,
209 const char **featureset)
211 uint32_t mask;
212 const char **ppc;
213 bool found = false;
215 for (mask = 1, ppc = featureset; mask; mask <<= 1, ++ppc) {
216 if (*ppc && !altcmp(s, e, *ppc)) {
217 *pval |= mask;
218 found = true;
221 return found;
224 static void add_flagname_to_bitmaps(const char *flagname, uint32_t *features,
225 uint32_t *ext_features,
226 uint32_t *ext2_features,
227 uint32_t *ext3_features,
228 uint32_t *kvm_features,
229 uint32_t *svm_features,
230 uint32_t *cpuid_7_0_ebx_features)
232 if (!lookup_feature(features, flagname, NULL, feature_name) &&
233 !lookup_feature(ext_features, flagname, NULL, ext_feature_name) &&
234 !lookup_feature(ext2_features, flagname, NULL, ext2_feature_name) &&
235 !lookup_feature(ext3_features, flagname, NULL, ext3_feature_name) &&
236 !lookup_feature(kvm_features, flagname, NULL, kvm_feature_name) &&
237 !lookup_feature(svm_features, flagname, NULL, svm_feature_name) &&
238 !lookup_feature(cpuid_7_0_ebx_features, flagname, NULL,
239 cpuid_7_0_ebx_feature_name))
240 fprintf(stderr, "CPU feature %s not found\n", flagname);
243 typedef struct x86_def_t {
244 struct x86_def_t *next;
245 const char *name;
246 uint32_t level;
247 uint32_t vendor1, vendor2, vendor3;
248 int family;
249 int model;
250 int stepping;
251 int tsc_khz;
252 uint32_t features, ext_features, ext2_features, ext3_features;
253 uint32_t kvm_features, svm_features;
254 uint32_t xlevel;
255 char model_id[48];
256 int vendor_override;
257 /* Store the results of Centaur's CPUID instructions */
258 uint32_t ext4_features;
259 uint32_t xlevel2;
260 /* The feature bits on CPUID[EAX=7,ECX=0].EBX */
261 uint32_t cpuid_7_0_ebx_features;
262 } x86_def_t;
264 #define I486_FEATURES (CPUID_FP87 | CPUID_VME | CPUID_PSE)
265 #define PENTIUM_FEATURES (I486_FEATURES | CPUID_DE | CPUID_TSC | \
266 CPUID_MSR | CPUID_MCE | CPUID_CX8 | CPUID_MMX | CPUID_APIC)
267 #define PENTIUM2_FEATURES (PENTIUM_FEATURES | CPUID_PAE | CPUID_SEP | \
268 CPUID_MTRR | CPUID_PGE | CPUID_MCA | CPUID_CMOV | CPUID_PAT | \
269 CPUID_PSE36 | CPUID_FXSR)
270 #define PENTIUM3_FEATURES (PENTIUM2_FEATURES | CPUID_SSE)
271 #define PPRO_FEATURES (CPUID_FP87 | CPUID_DE | CPUID_PSE | CPUID_TSC | \
272 CPUID_MSR | CPUID_MCE | CPUID_CX8 | CPUID_PGE | CPUID_CMOV | \
273 CPUID_PAT | CPUID_FXSR | CPUID_MMX | CPUID_SSE | CPUID_SSE2 | \
274 CPUID_PAE | CPUID_SEP | CPUID_APIC)
276 #define TCG_FEATURES (CPUID_FP87 | CPUID_PSE | CPUID_TSC | CPUID_MSR | \
277 CPUID_PAE | CPUID_MCE | CPUID_CX8 | CPUID_APIC | CPUID_SEP | \
278 CPUID_MTRR | CPUID_PGE | CPUID_MCA | CPUID_CMOV | CPUID_PAT | \
279 CPUID_PSE36 | CPUID_CLFLUSH | CPUID_ACPI | CPUID_MMX | \
280 CPUID_FXSR | CPUID_SSE | CPUID_SSE2 | CPUID_SS)
281 /* partly implemented:
282 CPUID_MTRR, CPUID_MCA, CPUID_CLFLUSH (needed for Win64)
283 CPUID_PSE36 (needed for Solaris) */
284 /* missing:
285 CPUID_VME, CPUID_DTS, CPUID_SS, CPUID_HT, CPUID_TM, CPUID_PBE */
286 #define TCG_EXT_FEATURES (CPUID_EXT_SSE3 | CPUID_EXT_MONITOR | \
287 CPUID_EXT_CX16 | CPUID_EXT_POPCNT | \
288 CPUID_EXT_HYPERVISOR)
289 /* missing:
290 CPUID_EXT_DTES64, CPUID_EXT_DSCPL, CPUID_EXT_VMX, CPUID_EXT_EST,
291 CPUID_EXT_TM2, CPUID_EXT_XTPR, CPUID_EXT_PDCM, CPUID_EXT_XSAVE */
292 #define TCG_EXT2_FEATURES ((TCG_FEATURES & CPUID_EXT2_AMD_ALIASES) | \
293 CPUID_EXT2_NX | CPUID_EXT2_MMXEXT | CPUID_EXT2_RDTSCP | \
294 CPUID_EXT2_3DNOW | CPUID_EXT2_3DNOWEXT)
295 /* missing:
296 CPUID_EXT2_PDPE1GB */
297 #define TCG_EXT3_FEATURES (CPUID_EXT3_LAHF_LM | CPUID_EXT3_SVM | \
298 CPUID_EXT3_CR8LEG | CPUID_EXT3_ABM | CPUID_EXT3_SSE4A)
299 #define TCG_SVM_FEATURES 0
300 #define TCG_7_0_EBX_FEATURES (CPUID_7_0_EBX_SMEP | CPUID_7_0_EBX_SMAP)
302 /* maintains list of cpu model definitions
304 static x86_def_t *x86_defs = {NULL};
306 /* built-in cpu model definitions (deprecated)
308 static x86_def_t builtin_x86_defs[] = {
310 .name = "qemu64",
311 .level = 4,
312 .vendor1 = CPUID_VENDOR_AMD_1,
313 .vendor2 = CPUID_VENDOR_AMD_2,
314 .vendor3 = CPUID_VENDOR_AMD_3,
315 .family = 6,
316 .model = 2,
317 .stepping = 3,
318 .features = PPRO_FEATURES |
319 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA |
320 CPUID_PSE36,
321 .ext_features = CPUID_EXT_SSE3 | CPUID_EXT_CX16 | CPUID_EXT_POPCNT,
322 .ext2_features = (PPRO_FEATURES & CPUID_EXT2_AMD_ALIASES) |
323 CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX,
324 .ext3_features = CPUID_EXT3_LAHF_LM | CPUID_EXT3_SVM |
325 CPUID_EXT3_ABM | CPUID_EXT3_SSE4A,
326 .xlevel = 0x8000000A,
329 .name = "phenom",
330 .level = 5,
331 .vendor1 = CPUID_VENDOR_AMD_1,
332 .vendor2 = CPUID_VENDOR_AMD_2,
333 .vendor3 = CPUID_VENDOR_AMD_3,
334 .family = 16,
335 .model = 2,
336 .stepping = 3,
337 .features = PPRO_FEATURES |
338 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA |
339 CPUID_PSE36 | CPUID_VME | CPUID_HT,
340 .ext_features = CPUID_EXT_SSE3 | CPUID_EXT_MONITOR | CPUID_EXT_CX16 |
341 CPUID_EXT_POPCNT,
342 .ext2_features = (PPRO_FEATURES & CPUID_EXT2_AMD_ALIASES) |
343 CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX |
344 CPUID_EXT2_3DNOW | CPUID_EXT2_3DNOWEXT | CPUID_EXT2_MMXEXT |
345 CPUID_EXT2_FFXSR | CPUID_EXT2_PDPE1GB | CPUID_EXT2_RDTSCP,
346 /* Missing: CPUID_EXT3_CMP_LEG, CPUID_EXT3_EXTAPIC,
347 CPUID_EXT3_CR8LEG,
348 CPUID_EXT3_MISALIGNSSE, CPUID_EXT3_3DNOWPREFETCH,
349 CPUID_EXT3_OSVW, CPUID_EXT3_IBS */
350 .ext3_features = CPUID_EXT3_LAHF_LM | CPUID_EXT3_SVM |
351 CPUID_EXT3_ABM | CPUID_EXT3_SSE4A,
352 .svm_features = CPUID_SVM_NPT | CPUID_SVM_LBRV,
353 .xlevel = 0x8000001A,
354 .model_id = "AMD Phenom(tm) 9550 Quad-Core Processor"
357 .name = "core2duo",
358 .level = 10,
359 .family = 6,
360 .model = 15,
361 .stepping = 11,
362 .features = PPRO_FEATURES |
363 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA |
364 CPUID_PSE36 | CPUID_VME | CPUID_DTS | CPUID_ACPI | CPUID_SS |
365 CPUID_HT | CPUID_TM | CPUID_PBE,
366 .ext_features = CPUID_EXT_SSE3 | CPUID_EXT_MONITOR | CPUID_EXT_SSSE3 |
367 CPUID_EXT_DTES64 | CPUID_EXT_DSCPL | CPUID_EXT_VMX | CPUID_EXT_EST |
368 CPUID_EXT_TM2 | CPUID_EXT_CX16 | CPUID_EXT_XTPR | CPUID_EXT_PDCM,
369 .ext2_features = CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX,
370 .ext3_features = CPUID_EXT3_LAHF_LM,
371 .xlevel = 0x80000008,
372 .model_id = "Intel(R) Core(TM)2 Duo CPU T7700 @ 2.40GHz",
375 .name = "kvm64",
376 .level = 5,
377 .vendor1 = CPUID_VENDOR_INTEL_1,
378 .vendor2 = CPUID_VENDOR_INTEL_2,
379 .vendor3 = CPUID_VENDOR_INTEL_3,
380 .family = 15,
381 .model = 6,
382 .stepping = 1,
383 /* Missing: CPUID_VME, CPUID_HT */
384 .features = PPRO_FEATURES |
385 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA |
386 CPUID_PSE36,
387 /* Missing: CPUID_EXT_POPCNT, CPUID_EXT_MONITOR */
388 .ext_features = CPUID_EXT_SSE3 | CPUID_EXT_CX16,
389 /* Missing: CPUID_EXT2_PDPE1GB, CPUID_EXT2_RDTSCP */
390 .ext2_features = (PPRO_FEATURES & CPUID_EXT2_AMD_ALIASES) |
391 CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX,
392 /* Missing: CPUID_EXT3_LAHF_LM, CPUID_EXT3_CMP_LEG, CPUID_EXT3_EXTAPIC,
393 CPUID_EXT3_CR8LEG, CPUID_EXT3_ABM, CPUID_EXT3_SSE4A,
394 CPUID_EXT3_MISALIGNSSE, CPUID_EXT3_3DNOWPREFETCH,
395 CPUID_EXT3_OSVW, CPUID_EXT3_IBS, CPUID_EXT3_SVM */
396 .ext3_features = 0,
397 .xlevel = 0x80000008,
398 .model_id = "Common KVM processor"
401 .name = "qemu32",
402 .level = 4,
403 .family = 6,
404 .model = 3,
405 .stepping = 3,
406 .features = PPRO_FEATURES,
407 .ext_features = CPUID_EXT_SSE3 | CPUID_EXT_POPCNT,
408 .xlevel = 0x80000004,
411 .name = "kvm32",
412 .level = 5,
413 .family = 15,
414 .model = 6,
415 .stepping = 1,
416 .features = PPRO_FEATURES |
417 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA | CPUID_PSE36,
418 .ext_features = CPUID_EXT_SSE3,
419 .ext2_features = PPRO_FEATURES & CPUID_EXT2_AMD_ALIASES,
420 .ext3_features = 0,
421 .xlevel = 0x80000008,
422 .model_id = "Common 32-bit KVM processor"
425 .name = "coreduo",
426 .level = 10,
427 .family = 6,
428 .model = 14,
429 .stepping = 8,
430 .features = PPRO_FEATURES | CPUID_VME |
431 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA | CPUID_DTS | CPUID_ACPI |
432 CPUID_SS | CPUID_HT | CPUID_TM | CPUID_PBE,
433 .ext_features = CPUID_EXT_SSE3 | CPUID_EXT_MONITOR | CPUID_EXT_VMX |
434 CPUID_EXT_EST | CPUID_EXT_TM2 | CPUID_EXT_XTPR | CPUID_EXT_PDCM,
435 .ext2_features = CPUID_EXT2_NX,
436 .xlevel = 0x80000008,
437 .model_id = "Genuine Intel(R) CPU T2600 @ 2.16GHz",
440 .name = "486",
441 .level = 1,
442 .family = 4,
443 .model = 0,
444 .stepping = 0,
445 .features = I486_FEATURES,
446 .xlevel = 0,
449 .name = "pentium",
450 .level = 1,
451 .family = 5,
452 .model = 4,
453 .stepping = 3,
454 .features = PENTIUM_FEATURES,
455 .xlevel = 0,
458 .name = "pentium2",
459 .level = 2,
460 .family = 6,
461 .model = 5,
462 .stepping = 2,
463 .features = PENTIUM2_FEATURES,
464 .xlevel = 0,
467 .name = "pentium3",
468 .level = 2,
469 .family = 6,
470 .model = 7,
471 .stepping = 3,
472 .features = PENTIUM3_FEATURES,
473 .xlevel = 0,
476 .name = "athlon",
477 .level = 2,
478 .vendor1 = CPUID_VENDOR_AMD_1,
479 .vendor2 = CPUID_VENDOR_AMD_2,
480 .vendor3 = CPUID_VENDOR_AMD_3,
481 .family = 6,
482 .model = 2,
483 .stepping = 3,
484 .features = PPRO_FEATURES | CPUID_PSE36 | CPUID_VME | CPUID_MTRR |
485 CPUID_MCA,
486 .ext2_features = (PPRO_FEATURES & CPUID_EXT2_AMD_ALIASES) |
487 CPUID_EXT2_MMXEXT | CPUID_EXT2_3DNOW | CPUID_EXT2_3DNOWEXT,
488 .xlevel = 0x80000008,
491 .name = "n270",
492 /* original is on level 10 */
493 .level = 5,
494 .family = 6,
495 .model = 28,
496 .stepping = 2,
497 .features = PPRO_FEATURES |
498 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA | CPUID_VME | CPUID_DTS |
499 CPUID_ACPI | CPUID_SS | CPUID_HT | CPUID_TM | CPUID_PBE,
500 /* Some CPUs got no CPUID_SEP */
501 .ext_features = CPUID_EXT_SSE3 | CPUID_EXT_MONITOR | CPUID_EXT_SSSE3 |
502 CPUID_EXT_DSCPL | CPUID_EXT_EST | CPUID_EXT_TM2 | CPUID_EXT_XTPR,
503 .ext2_features = (PPRO_FEATURES & CPUID_EXT2_AMD_ALIASES) |
504 CPUID_EXT2_NX,
505 .ext3_features = CPUID_EXT3_LAHF_LM,
506 .xlevel = 0x8000000A,
507 .model_id = "Intel(R) Atom(TM) CPU N270 @ 1.60GHz",
510 .name = "Conroe",
511 .level = 2,
512 .vendor1 = CPUID_VENDOR_INTEL_1,
513 .vendor2 = CPUID_VENDOR_INTEL_2,
514 .vendor3 = CPUID_VENDOR_INTEL_3,
515 .family = 6,
516 .model = 2,
517 .stepping = 3,
518 .features = CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
519 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
520 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
521 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
522 CPUID_DE | CPUID_FP87,
523 .ext_features = CPUID_EXT_SSSE3 | CPUID_EXT_SSE3,
524 .ext2_features = CPUID_EXT2_LM | CPUID_EXT2_NX | CPUID_EXT2_SYSCALL,
525 .ext3_features = CPUID_EXT3_LAHF_LM,
526 .xlevel = 0x8000000A,
527 .model_id = "Intel Celeron_4x0 (Conroe/Merom Class Core 2)",
530 .name = "Penryn",
531 .level = 2,
532 .vendor1 = CPUID_VENDOR_INTEL_1,
533 .vendor2 = CPUID_VENDOR_INTEL_2,
534 .vendor3 = CPUID_VENDOR_INTEL_3,
535 .family = 6,
536 .model = 2,
537 .stepping = 3,
538 .features = CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
539 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
540 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
541 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
542 CPUID_DE | CPUID_FP87,
543 .ext_features = CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
544 CPUID_EXT_SSE3,
545 .ext2_features = CPUID_EXT2_LM | CPUID_EXT2_NX | CPUID_EXT2_SYSCALL,
546 .ext3_features = CPUID_EXT3_LAHF_LM,
547 .xlevel = 0x8000000A,
548 .model_id = "Intel Core 2 Duo P9xxx (Penryn Class Core 2)",
551 .name = "Nehalem",
552 .level = 2,
553 .vendor1 = CPUID_VENDOR_INTEL_1,
554 .vendor2 = CPUID_VENDOR_INTEL_2,
555 .vendor3 = CPUID_VENDOR_INTEL_3,
556 .family = 6,
557 .model = 2,
558 .stepping = 3,
559 .features = CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
560 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
561 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
562 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
563 CPUID_DE | CPUID_FP87,
564 .ext_features = CPUID_EXT_POPCNT | CPUID_EXT_SSE42 | CPUID_EXT_SSE41 |
565 CPUID_EXT_CX16 | CPUID_EXT_SSSE3 | CPUID_EXT_SSE3,
566 .ext2_features = CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX,
567 .ext3_features = CPUID_EXT3_LAHF_LM,
568 .xlevel = 0x8000000A,
569 .model_id = "Intel Core i7 9xx (Nehalem Class Core i7)",
572 .name = "Westmere",
573 .level = 11,
574 .vendor1 = CPUID_VENDOR_INTEL_1,
575 .vendor2 = CPUID_VENDOR_INTEL_2,
576 .vendor3 = CPUID_VENDOR_INTEL_3,
577 .family = 6,
578 .model = 44,
579 .stepping = 1,
580 .features = CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
581 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
582 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
583 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
584 CPUID_DE | CPUID_FP87,
585 .ext_features = CPUID_EXT_AES | CPUID_EXT_POPCNT | CPUID_EXT_SSE42 |
586 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
587 CPUID_EXT_SSE3,
588 .ext2_features = CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX,
589 .ext3_features = CPUID_EXT3_LAHF_LM,
590 .xlevel = 0x8000000A,
591 .model_id = "Westmere E56xx/L56xx/X56xx (Nehalem-C)",
594 .name = "SandyBridge",
595 .level = 0xd,
596 .vendor1 = CPUID_VENDOR_INTEL_1,
597 .vendor2 = CPUID_VENDOR_INTEL_2,
598 .vendor3 = CPUID_VENDOR_INTEL_3,
599 .family = 6,
600 .model = 42,
601 .stepping = 1,
602 .features = CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
603 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
604 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
605 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
606 CPUID_DE | CPUID_FP87,
607 .ext_features = CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
608 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_POPCNT |
609 CPUID_EXT_X2APIC | CPUID_EXT_SSE42 | CPUID_EXT_SSE41 |
610 CPUID_EXT_CX16 | CPUID_EXT_SSSE3 | CPUID_EXT_PCLMULQDQ |
611 CPUID_EXT_SSE3,
612 .ext2_features = CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX |
613 CPUID_EXT2_SYSCALL,
614 .ext3_features = CPUID_EXT3_LAHF_LM,
615 .xlevel = 0x8000000A,
616 .model_id = "Intel Xeon E312xx (Sandy Bridge)",
619 .name = "Opteron_G1",
620 .level = 5,
621 .vendor1 = CPUID_VENDOR_AMD_1,
622 .vendor2 = CPUID_VENDOR_AMD_2,
623 .vendor3 = CPUID_VENDOR_AMD_3,
624 .family = 15,
625 .model = 6,
626 .stepping = 1,
627 .features = CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
628 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
629 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
630 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
631 CPUID_DE | CPUID_FP87,
632 .ext_features = CPUID_EXT_SSE3,
633 .ext2_features = CPUID_EXT2_LM | CPUID_EXT2_FXSR | CPUID_EXT2_MMX |
634 CPUID_EXT2_NX | CPUID_EXT2_PSE36 | CPUID_EXT2_PAT |
635 CPUID_EXT2_CMOV | CPUID_EXT2_MCA | CPUID_EXT2_PGE |
636 CPUID_EXT2_MTRR | CPUID_EXT2_SYSCALL | CPUID_EXT2_APIC |
637 CPUID_EXT2_CX8 | CPUID_EXT2_MCE | CPUID_EXT2_PAE | CPUID_EXT2_MSR |
638 CPUID_EXT2_TSC | CPUID_EXT2_PSE | CPUID_EXT2_DE | CPUID_EXT2_FPU,
639 .xlevel = 0x80000008,
640 .model_id = "AMD Opteron 240 (Gen 1 Class Opteron)",
643 .name = "Opteron_G2",
644 .level = 5,
645 .vendor1 = CPUID_VENDOR_AMD_1,
646 .vendor2 = CPUID_VENDOR_AMD_2,
647 .vendor3 = CPUID_VENDOR_AMD_3,
648 .family = 15,
649 .model = 6,
650 .stepping = 1,
651 .features = CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
652 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
653 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
654 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
655 CPUID_DE | CPUID_FP87,
656 .ext_features = CPUID_EXT_CX16 | CPUID_EXT_SSE3,
657 .ext2_features = CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_FXSR |
658 CPUID_EXT2_MMX | CPUID_EXT2_NX | CPUID_EXT2_PSE36 |
659 CPUID_EXT2_PAT | CPUID_EXT2_CMOV | CPUID_EXT2_MCA |
660 CPUID_EXT2_PGE | CPUID_EXT2_MTRR | CPUID_EXT2_SYSCALL |
661 CPUID_EXT2_APIC | CPUID_EXT2_CX8 | CPUID_EXT2_MCE |
662 CPUID_EXT2_PAE | CPUID_EXT2_MSR | CPUID_EXT2_TSC | CPUID_EXT2_PSE |
663 CPUID_EXT2_DE | CPUID_EXT2_FPU,
664 .ext3_features = CPUID_EXT3_SVM | CPUID_EXT3_LAHF_LM,
665 .xlevel = 0x80000008,
666 .model_id = "AMD Opteron 22xx (Gen 2 Class Opteron)",
669 .name = "Opteron_G3",
670 .level = 5,
671 .vendor1 = CPUID_VENDOR_AMD_1,
672 .vendor2 = CPUID_VENDOR_AMD_2,
673 .vendor3 = CPUID_VENDOR_AMD_3,
674 .family = 15,
675 .model = 6,
676 .stepping = 1,
677 .features = CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
678 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
679 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
680 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
681 CPUID_DE | CPUID_FP87,
682 .ext_features = CPUID_EXT_POPCNT | CPUID_EXT_CX16 | CPUID_EXT_MONITOR |
683 CPUID_EXT_SSE3,
684 .ext2_features = CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_FXSR |
685 CPUID_EXT2_MMX | CPUID_EXT2_NX | CPUID_EXT2_PSE36 |
686 CPUID_EXT2_PAT | CPUID_EXT2_CMOV | CPUID_EXT2_MCA |
687 CPUID_EXT2_PGE | CPUID_EXT2_MTRR | CPUID_EXT2_SYSCALL |
688 CPUID_EXT2_APIC | CPUID_EXT2_CX8 | CPUID_EXT2_MCE |
689 CPUID_EXT2_PAE | CPUID_EXT2_MSR | CPUID_EXT2_TSC | CPUID_EXT2_PSE |
690 CPUID_EXT2_DE | CPUID_EXT2_FPU,
691 .ext3_features = CPUID_EXT3_MISALIGNSSE | CPUID_EXT3_SSE4A |
692 CPUID_EXT3_ABM | CPUID_EXT3_SVM | CPUID_EXT3_LAHF_LM,
693 .xlevel = 0x80000008,
694 .model_id = "AMD Opteron 23xx (Gen 3 Class Opteron)",
697 .name = "Opteron_G4",
698 .level = 0xd,
699 .vendor1 = CPUID_VENDOR_AMD_1,
700 .vendor2 = CPUID_VENDOR_AMD_2,
701 .vendor3 = CPUID_VENDOR_AMD_3,
702 .family = 21,
703 .model = 1,
704 .stepping = 2,
705 .features = CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
706 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
707 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
708 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
709 CPUID_DE | CPUID_FP87,
710 .ext_features = CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
711 CPUID_EXT_POPCNT | CPUID_EXT_SSE42 | CPUID_EXT_SSE41 |
712 CPUID_EXT_CX16 | CPUID_EXT_SSSE3 | CPUID_EXT_PCLMULQDQ |
713 CPUID_EXT_SSE3,
714 .ext2_features = CPUID_EXT2_LM | CPUID_EXT2_RDTSCP |
715 CPUID_EXT2_PDPE1GB | CPUID_EXT2_FXSR | CPUID_EXT2_MMX |
716 CPUID_EXT2_NX | CPUID_EXT2_PSE36 | CPUID_EXT2_PAT |
717 CPUID_EXT2_CMOV | CPUID_EXT2_MCA | CPUID_EXT2_PGE |
718 CPUID_EXT2_MTRR | CPUID_EXT2_SYSCALL | CPUID_EXT2_APIC |
719 CPUID_EXT2_CX8 | CPUID_EXT2_MCE | CPUID_EXT2_PAE | CPUID_EXT2_MSR |
720 CPUID_EXT2_TSC | CPUID_EXT2_PSE | CPUID_EXT2_DE | CPUID_EXT2_FPU,
721 .ext3_features = CPUID_EXT3_FMA4 | CPUID_EXT3_XOP |
722 CPUID_EXT3_3DNOWPREFETCH | CPUID_EXT3_MISALIGNSSE |
723 CPUID_EXT3_SSE4A | CPUID_EXT3_ABM | CPUID_EXT3_SVM |
724 CPUID_EXT3_LAHF_LM,
725 .xlevel = 0x8000001A,
726 .model_id = "AMD Opteron 62xx class CPU",
730 static int cpu_x86_fill_model_id(char *str)
732 uint32_t eax = 0, ebx = 0, ecx = 0, edx = 0;
733 int i;
735 for (i = 0; i < 3; i++) {
736 host_cpuid(0x80000002 + i, 0, &eax, &ebx, &ecx, &edx);
737 memcpy(str + i * 16 + 0, &eax, 4);
738 memcpy(str + i * 16 + 4, &ebx, 4);
739 memcpy(str + i * 16 + 8, &ecx, 4);
740 memcpy(str + i * 16 + 12, &edx, 4);
742 return 0;
745 static int cpu_x86_fill_host(x86_def_t *x86_cpu_def)
747 uint32_t eax = 0, ebx = 0, ecx = 0, edx = 0;
749 x86_cpu_def->name = "host";
750 host_cpuid(0x0, 0, &eax, &ebx, &ecx, &edx);
751 x86_cpu_def->level = eax;
752 x86_cpu_def->vendor1 = ebx;
753 x86_cpu_def->vendor2 = edx;
754 x86_cpu_def->vendor3 = ecx;
756 host_cpuid(0x1, 0, &eax, &ebx, &ecx, &edx);
757 x86_cpu_def->family = ((eax >> 8) & 0x0F) + ((eax >> 20) & 0xFF);
758 x86_cpu_def->model = ((eax >> 4) & 0x0F) | ((eax & 0xF0000) >> 12);
759 x86_cpu_def->stepping = eax & 0x0F;
760 x86_cpu_def->ext_features = ecx;
761 x86_cpu_def->features = edx;
763 if (kvm_enabled() && x86_cpu_def->level >= 7) {
764 x86_cpu_def->cpuid_7_0_ebx_features = kvm_arch_get_supported_cpuid(kvm_state, 0x7, 0, R_EBX);
765 } else {
766 x86_cpu_def->cpuid_7_0_ebx_features = 0;
769 host_cpuid(0x80000000, 0, &eax, &ebx, &ecx, &edx);
770 x86_cpu_def->xlevel = eax;
772 host_cpuid(0x80000001, 0, &eax, &ebx, &ecx, &edx);
773 x86_cpu_def->ext2_features = edx;
774 x86_cpu_def->ext3_features = ecx;
775 cpu_x86_fill_model_id(x86_cpu_def->model_id);
776 x86_cpu_def->vendor_override = 0;
778 /* Call Centaur's CPUID instruction. */
779 if (x86_cpu_def->vendor1 == CPUID_VENDOR_VIA_1 &&
780 x86_cpu_def->vendor2 == CPUID_VENDOR_VIA_2 &&
781 x86_cpu_def->vendor3 == CPUID_VENDOR_VIA_3) {
782 host_cpuid(0xC0000000, 0, &eax, &ebx, &ecx, &edx);
783 if (eax >= 0xC0000001) {
784 /* Support VIA max extended level */
785 x86_cpu_def->xlevel2 = eax;
786 host_cpuid(0xC0000001, 0, &eax, &ebx, &ecx, &edx);
787 x86_cpu_def->ext4_features = edx;
792 * Every SVM feature requires emulation support in KVM - so we can't just
793 * read the host features here. KVM might even support SVM features not
794 * available on the host hardware. Just set all bits and mask out the
795 * unsupported ones later.
797 x86_cpu_def->svm_features = -1;
799 return 0;
802 static int unavailable_host_feature(struct model_features_t *f, uint32_t mask)
804 int i;
806 for (i = 0; i < 32; ++i)
807 if (1 << i & mask) {
808 fprintf(stderr, "warning: host cpuid %04x_%04x lacks requested"
809 " flag '%s' [0x%08x]\n",
810 f->cpuid >> 16, f->cpuid & 0xffff,
811 f->flag_names[i] ? f->flag_names[i] : "[reserved]", mask);
812 break;
814 return 0;
817 /* best effort attempt to inform user requested cpu flags aren't making
818 * their way to the guest. Note: ft[].check_feat ideally should be
819 * specified via a guest_def field to suppress report of extraneous flags.
821 static int check_features_against_host(x86_def_t *guest_def)
823 x86_def_t host_def;
824 uint32_t mask;
825 int rv, i;
826 struct model_features_t ft[] = {
827 {&guest_def->features, &host_def.features,
828 ~0, feature_name, 0x00000000},
829 {&guest_def->ext_features, &host_def.ext_features,
830 ~CPUID_EXT_HYPERVISOR, ext_feature_name, 0x00000001},
831 {&guest_def->ext2_features, &host_def.ext2_features,
832 ~PPRO_FEATURES, ext2_feature_name, 0x80000000},
833 {&guest_def->ext3_features, &host_def.ext3_features,
834 ~CPUID_EXT3_SVM, ext3_feature_name, 0x80000001}};
836 cpu_x86_fill_host(&host_def);
837 for (rv = 0, i = 0; i < ARRAY_SIZE(ft); ++i)
838 for (mask = 1; mask; mask <<= 1)
839 if (ft[i].check_feat & mask && *ft[i].guest_feat & mask &&
840 !(*ft[i].host_feat & mask)) {
841 unavailable_host_feature(&ft[i], mask);
842 rv = 1;
844 return rv;
847 static void x86_cpuid_version_get_family(Object *obj, Visitor *v, void *opaque,
848 const char *name, Error **errp)
850 X86CPU *cpu = X86_CPU(obj);
851 CPUX86State *env = &cpu->env;
852 int64_t value;
854 value = (env->cpuid_version >> 8) & 0xf;
855 if (value == 0xf) {
856 value += (env->cpuid_version >> 20) & 0xff;
858 visit_type_int(v, &value, name, errp);
861 static void x86_cpuid_version_set_family(Object *obj, Visitor *v, void *opaque,
862 const char *name, Error **errp)
864 X86CPU *cpu = X86_CPU(obj);
865 CPUX86State *env = &cpu->env;
866 const int64_t min = 0;
867 const int64_t max = 0xff + 0xf;
868 int64_t value;
870 visit_type_int(v, &value, name, errp);
871 if (error_is_set(errp)) {
872 return;
874 if (value < min || value > max) {
875 error_set(errp, QERR_PROPERTY_VALUE_OUT_OF_RANGE, "",
876 name ? name : "null", value, min, max);
877 return;
880 env->cpuid_version &= ~0xff00f00;
881 if (value > 0x0f) {
882 env->cpuid_version |= 0xf00 | ((value - 0x0f) << 20);
883 } else {
884 env->cpuid_version |= value << 8;
888 static void x86_cpuid_version_get_model(Object *obj, Visitor *v, void *opaque,
889 const char *name, Error **errp)
891 X86CPU *cpu = X86_CPU(obj);
892 CPUX86State *env = &cpu->env;
893 int64_t value;
895 value = (env->cpuid_version >> 4) & 0xf;
896 value |= ((env->cpuid_version >> 16) & 0xf) << 4;
897 visit_type_int(v, &value, name, errp);
900 static void x86_cpuid_version_set_model(Object *obj, Visitor *v, void *opaque,
901 const char *name, Error **errp)
903 X86CPU *cpu = X86_CPU(obj);
904 CPUX86State *env = &cpu->env;
905 const int64_t min = 0;
906 const int64_t max = 0xff;
907 int64_t value;
909 visit_type_int(v, &value, name, errp);
910 if (error_is_set(errp)) {
911 return;
913 if (value < min || value > max) {
914 error_set(errp, QERR_PROPERTY_VALUE_OUT_OF_RANGE, "",
915 name ? name : "null", value, min, max);
916 return;
919 env->cpuid_version &= ~0xf00f0;
920 env->cpuid_version |= ((value & 0xf) << 4) | ((value >> 4) << 16);
923 static void x86_cpuid_version_get_stepping(Object *obj, Visitor *v,
924 void *opaque, const char *name,
925 Error **errp)
927 X86CPU *cpu = X86_CPU(obj);
928 CPUX86State *env = &cpu->env;
929 int64_t value;
931 value = env->cpuid_version & 0xf;
932 visit_type_int(v, &value, name, errp);
935 static void x86_cpuid_version_set_stepping(Object *obj, Visitor *v,
936 void *opaque, const char *name,
937 Error **errp)
939 X86CPU *cpu = X86_CPU(obj);
940 CPUX86State *env = &cpu->env;
941 const int64_t min = 0;
942 const int64_t max = 0xf;
943 int64_t value;
945 visit_type_int(v, &value, name, errp);
946 if (error_is_set(errp)) {
947 return;
949 if (value < min || value > max) {
950 error_set(errp, QERR_PROPERTY_VALUE_OUT_OF_RANGE, "",
951 name ? name : "null", value, min, max);
952 return;
955 env->cpuid_version &= ~0xf;
956 env->cpuid_version |= value & 0xf;
959 static void x86_cpuid_get_level(Object *obj, Visitor *v, void *opaque,
960 const char *name, Error **errp)
962 X86CPU *cpu = X86_CPU(obj);
964 visit_type_uint32(v, &cpu->env.cpuid_level, name, errp);
967 static void x86_cpuid_set_level(Object *obj, Visitor *v, void *opaque,
968 const char *name, Error **errp)
970 X86CPU *cpu = X86_CPU(obj);
972 visit_type_uint32(v, &cpu->env.cpuid_level, name, errp);
975 static void x86_cpuid_get_xlevel(Object *obj, Visitor *v, void *opaque,
976 const char *name, Error **errp)
978 X86CPU *cpu = X86_CPU(obj);
980 visit_type_uint32(v, &cpu->env.cpuid_xlevel, name, errp);
983 static void x86_cpuid_set_xlevel(Object *obj, Visitor *v, void *opaque,
984 const char *name, Error **errp)
986 X86CPU *cpu = X86_CPU(obj);
988 visit_type_uint32(v, &cpu->env.cpuid_xlevel, name, errp);
991 static char *x86_cpuid_get_vendor(Object *obj, Error **errp)
993 X86CPU *cpu = X86_CPU(obj);
994 CPUX86State *env = &cpu->env;
995 char *value;
996 int i;
998 value = (char *)g_malloc(12 + 1);
999 for (i = 0; i < 4; i++) {
1000 value[i ] = env->cpuid_vendor1 >> (8 * i);
1001 value[i + 4] = env->cpuid_vendor2 >> (8 * i);
1002 value[i + 8] = env->cpuid_vendor3 >> (8 * i);
1004 value[12] = '\0';
1005 return value;
1008 static void x86_cpuid_set_vendor(Object *obj, const char *value,
1009 Error **errp)
1011 X86CPU *cpu = X86_CPU(obj);
1012 CPUX86State *env = &cpu->env;
1013 int i;
1015 if (strlen(value) != 12) {
1016 error_set(errp, QERR_PROPERTY_VALUE_BAD, "",
1017 "vendor", value);
1018 return;
1021 env->cpuid_vendor1 = 0;
1022 env->cpuid_vendor2 = 0;
1023 env->cpuid_vendor3 = 0;
1024 for (i = 0; i < 4; i++) {
1025 env->cpuid_vendor1 |= ((uint8_t)value[i ]) << (8 * i);
1026 env->cpuid_vendor2 |= ((uint8_t)value[i + 4]) << (8 * i);
1027 env->cpuid_vendor3 |= ((uint8_t)value[i + 8]) << (8 * i);
1029 env->cpuid_vendor_override = 1;
1032 static char *x86_cpuid_get_model_id(Object *obj, Error **errp)
1034 X86CPU *cpu = X86_CPU(obj);
1035 CPUX86State *env = &cpu->env;
1036 char *value;
1037 int i;
1039 value = g_malloc(48 + 1);
1040 for (i = 0; i < 48; i++) {
1041 value[i] = env->cpuid_model[i >> 2] >> (8 * (i & 3));
1043 value[48] = '\0';
1044 return value;
1047 static void x86_cpuid_set_model_id(Object *obj, const char *model_id,
1048 Error **errp)
1050 X86CPU *cpu = X86_CPU(obj);
1051 CPUX86State *env = &cpu->env;
1052 int c, len, i;
1054 if (model_id == NULL) {
1055 model_id = "";
1057 len = strlen(model_id);
1058 memset(env->cpuid_model, 0, 48);
1059 for (i = 0; i < 48; i++) {
1060 if (i >= len) {
1061 c = '\0';
1062 } else {
1063 c = (uint8_t)model_id[i];
1065 env->cpuid_model[i >> 2] |= c << (8 * (i & 3));
1069 static void x86_cpuid_get_tsc_freq(Object *obj, Visitor *v, void *opaque,
1070 const char *name, Error **errp)
1072 X86CPU *cpu = X86_CPU(obj);
1073 int64_t value;
1075 value = cpu->env.tsc_khz * 1000;
1076 visit_type_int(v, &value, name, errp);
1079 static void x86_cpuid_set_tsc_freq(Object *obj, Visitor *v, void *opaque,
1080 const char *name, Error **errp)
1082 X86CPU *cpu = X86_CPU(obj);
1083 const int64_t min = 0;
1084 const int64_t max = INT64_MAX;
1085 int64_t value;
1087 visit_type_int(v, &value, name, errp);
1088 if (error_is_set(errp)) {
1089 return;
1091 if (value < min || value > max) {
1092 error_set(errp, QERR_PROPERTY_VALUE_OUT_OF_RANGE, "",
1093 name ? name : "null", value, min, max);
1094 return;
1097 cpu->env.tsc_khz = value / 1000;
1100 static int cpu_x86_find_by_name(x86_def_t *x86_cpu_def, const char *cpu_model)
1102 unsigned int i;
1103 x86_def_t *def;
1105 char *s = g_strdup(cpu_model);
1106 char *featurestr, *name = strtok(s, ",");
1107 /* Features to be added*/
1108 uint32_t plus_features = 0, plus_ext_features = 0;
1109 uint32_t plus_ext2_features = 0, plus_ext3_features = 0;
1110 uint32_t plus_kvm_features = 0, plus_svm_features = 0;
1111 uint32_t plus_7_0_ebx_features = 0;
1112 /* Features to be removed */
1113 uint32_t minus_features = 0, minus_ext_features = 0;
1114 uint32_t minus_ext2_features = 0, minus_ext3_features = 0;
1115 uint32_t minus_kvm_features = 0, minus_svm_features = 0;
1116 uint32_t minus_7_0_ebx_features = 0;
1117 uint32_t numvalue;
1119 for (def = x86_defs; def; def = def->next)
1120 if (name && !strcmp(name, def->name))
1121 break;
1122 if (kvm_enabled() && name && strcmp(name, "host") == 0) {
1123 cpu_x86_fill_host(x86_cpu_def);
1124 } else if (!def) {
1125 goto error;
1126 } else {
1127 memcpy(x86_cpu_def, def, sizeof(*def));
1130 #if defined(CONFIG_KVM)
1131 plus_kvm_features = (1 << KVM_FEATURE_CLOCKSOURCE) |
1132 (1 << KVM_FEATURE_NOP_IO_DELAY) |
1133 (1 << KVM_FEATURE_MMU_OP) |
1134 (1 << KVM_FEATURE_CLOCKSOURCE2) |
1135 (1 << KVM_FEATURE_ASYNC_PF) |
1136 (1 << KVM_FEATURE_STEAL_TIME) |
1137 (1 << KVM_FEATURE_CLOCKSOURCE_STABLE_BIT);
1138 #else
1139 plus_kvm_features = 0;
1140 #endif
1142 add_flagname_to_bitmaps("hypervisor", &plus_features,
1143 &plus_ext_features, &plus_ext2_features, &plus_ext3_features,
1144 &plus_kvm_features, &plus_svm_features, &plus_7_0_ebx_features);
1146 featurestr = strtok(NULL, ",");
1148 while (featurestr) {
1149 char *val;
1150 if (featurestr[0] == '+') {
1151 add_flagname_to_bitmaps(featurestr + 1, &plus_features,
1152 &plus_ext_features, &plus_ext2_features,
1153 &plus_ext3_features, &plus_kvm_features,
1154 &plus_svm_features, &plus_7_0_ebx_features);
1155 } else if (featurestr[0] == '-') {
1156 add_flagname_to_bitmaps(featurestr + 1, &minus_features,
1157 &minus_ext_features, &minus_ext2_features,
1158 &minus_ext3_features, &minus_kvm_features,
1159 &minus_svm_features, &minus_7_0_ebx_features);
1160 } else if ((val = strchr(featurestr, '='))) {
1161 *val = 0; val++;
1162 if (!strcmp(featurestr, "family")) {
1163 char *err;
1164 numvalue = strtoul(val, &err, 0);
1165 if (!*val || *err || numvalue > 0xff + 0xf) {
1166 fprintf(stderr, "bad numerical value %s\n", val);
1167 goto error;
1169 x86_cpu_def->family = numvalue;
1170 } else if (!strcmp(featurestr, "model")) {
1171 char *err;
1172 numvalue = strtoul(val, &err, 0);
1173 if (!*val || *err || numvalue > 0xff) {
1174 fprintf(stderr, "bad numerical value %s\n", val);
1175 goto error;
1177 x86_cpu_def->model = numvalue;
1178 } else if (!strcmp(featurestr, "stepping")) {
1179 char *err;
1180 numvalue = strtoul(val, &err, 0);
1181 if (!*val || *err || numvalue > 0xf) {
1182 fprintf(stderr, "bad numerical value %s\n", val);
1183 goto error;
1185 x86_cpu_def->stepping = numvalue ;
1186 } else if (!strcmp(featurestr, "level")) {
1187 char *err;
1188 numvalue = strtoul(val, &err, 0);
1189 if (!*val || *err) {
1190 fprintf(stderr, "bad numerical value %s\n", val);
1191 goto error;
1193 x86_cpu_def->level = numvalue;
1194 } else if (!strcmp(featurestr, "xlevel")) {
1195 char *err;
1196 numvalue = strtoul(val, &err, 0);
1197 if (!*val || *err) {
1198 fprintf(stderr, "bad numerical value %s\n", val);
1199 goto error;
1201 if (numvalue < 0x80000000) {
1202 numvalue += 0x80000000;
1204 x86_cpu_def->xlevel = numvalue;
1205 } else if (!strcmp(featurestr, "vendor")) {
1206 if (strlen(val) != 12) {
1207 fprintf(stderr, "vendor string must be 12 chars long\n");
1208 goto error;
1210 x86_cpu_def->vendor1 = 0;
1211 x86_cpu_def->vendor2 = 0;
1212 x86_cpu_def->vendor3 = 0;
1213 for(i = 0; i < 4; i++) {
1214 x86_cpu_def->vendor1 |= ((uint8_t)val[i ]) << (8 * i);
1215 x86_cpu_def->vendor2 |= ((uint8_t)val[i + 4]) << (8 * i);
1216 x86_cpu_def->vendor3 |= ((uint8_t)val[i + 8]) << (8 * i);
1218 x86_cpu_def->vendor_override = 1;
1219 } else if (!strcmp(featurestr, "model_id")) {
1220 pstrcpy(x86_cpu_def->model_id, sizeof(x86_cpu_def->model_id),
1221 val);
1222 } else if (!strcmp(featurestr, "tsc_freq")) {
1223 int64_t tsc_freq;
1224 char *err;
1226 tsc_freq = strtosz_suffix_unit(val, &err,
1227 STRTOSZ_DEFSUFFIX_B, 1000);
1228 if (tsc_freq < 0 || *err) {
1229 fprintf(stderr, "bad numerical value %s\n", val);
1230 goto error;
1232 x86_cpu_def->tsc_khz = tsc_freq / 1000;
1233 } else if (!strcmp(featurestr, "hv_spinlocks")) {
1234 char *err;
1235 numvalue = strtoul(val, &err, 0);
1236 if (!*val || *err) {
1237 fprintf(stderr, "bad numerical value %s\n", val);
1238 goto error;
1240 hyperv_set_spinlock_retries(numvalue);
1241 } else {
1242 fprintf(stderr, "unrecognized feature %s\n", featurestr);
1243 goto error;
1245 } else if (!strcmp(featurestr, "check")) {
1246 check_cpuid = 1;
1247 } else if (!strcmp(featurestr, "enforce")) {
1248 check_cpuid = enforce_cpuid = 1;
1249 } else if (!strcmp(featurestr, "hv_relaxed")) {
1250 hyperv_enable_relaxed_timing(true);
1251 } else if (!strcmp(featurestr, "hv_vapic")) {
1252 hyperv_enable_vapic_recommended(true);
1253 } else {
1254 fprintf(stderr, "feature string `%s' not in format (+feature|-feature|feature=xyz)\n", featurestr);
1255 goto error;
1257 featurestr = strtok(NULL, ",");
1259 x86_cpu_def->features |= plus_features;
1260 x86_cpu_def->ext_features |= plus_ext_features;
1261 x86_cpu_def->ext2_features |= plus_ext2_features;
1262 x86_cpu_def->ext3_features |= plus_ext3_features;
1263 x86_cpu_def->kvm_features |= plus_kvm_features;
1264 x86_cpu_def->svm_features |= plus_svm_features;
1265 x86_cpu_def->cpuid_7_0_ebx_features |= plus_7_0_ebx_features;
1266 x86_cpu_def->features &= ~minus_features;
1267 x86_cpu_def->ext_features &= ~minus_ext_features;
1268 x86_cpu_def->ext2_features &= ~minus_ext2_features;
1269 x86_cpu_def->ext3_features &= ~minus_ext3_features;
1270 x86_cpu_def->kvm_features &= ~minus_kvm_features;
1271 x86_cpu_def->svm_features &= ~minus_svm_features;
1272 x86_cpu_def->cpuid_7_0_ebx_features &= ~minus_7_0_ebx_features;
1273 if (check_cpuid) {
1274 if (check_features_against_host(x86_cpu_def) && enforce_cpuid)
1275 goto error;
1277 if (x86_cpu_def->cpuid_7_0_ebx_features && x86_cpu_def->level < 7) {
1278 x86_cpu_def->level = 7;
1280 g_free(s);
1281 return 0;
1283 error:
1284 g_free(s);
1285 return -1;
1288 /* generate a composite string into buf of all cpuid names in featureset
1289 * selected by fbits. indicate truncation at bufsize in the event of overflow.
1290 * if flags, suppress names undefined in featureset.
1292 static void listflags(char *buf, int bufsize, uint32_t fbits,
1293 const char **featureset, uint32_t flags)
1295 const char **p = &featureset[31];
1296 char *q, *b, bit;
1297 int nc;
1299 b = 4 <= bufsize ? buf + (bufsize -= 3) - 1 : NULL;
1300 *buf = '\0';
1301 for (q = buf, bit = 31; fbits && bufsize; --p, fbits &= ~(1 << bit), --bit)
1302 if (fbits & 1 << bit && (*p || !flags)) {
1303 if (*p)
1304 nc = snprintf(q, bufsize, "%s%s", q == buf ? "" : " ", *p);
1305 else
1306 nc = snprintf(q, bufsize, "%s[%d]", q == buf ? "" : " ", bit);
1307 if (bufsize <= nc) {
1308 if (b) {
1309 memcpy(b, "...", sizeof("..."));
1311 return;
1313 q += nc;
1314 bufsize -= nc;
1318 /* generate CPU information. */
1319 void x86_cpu_list(FILE *f, fprintf_function cpu_fprintf)
1321 x86_def_t *def;
1322 char buf[256];
1324 for (def = x86_defs; def; def = def->next) {
1325 snprintf(buf, sizeof(buf), "%s", def->name);
1326 (*cpu_fprintf)(f, "x86 %16s %-48s\n", buf, def->model_id);
1328 if (kvm_enabled()) {
1329 (*cpu_fprintf)(f, "x86 %16s\n", "[host]");
1331 (*cpu_fprintf)(f, "\nRecognized CPUID flags:\n");
1332 listflags(buf, sizeof(buf), (uint32_t)~0, feature_name, 1);
1333 (*cpu_fprintf)(f, " %s\n", buf);
1334 listflags(buf, sizeof(buf), (uint32_t)~0, ext_feature_name, 1);
1335 (*cpu_fprintf)(f, " %s\n", buf);
1336 listflags(buf, sizeof(buf), (uint32_t)~0, ext2_feature_name, 1);
1337 (*cpu_fprintf)(f, " %s\n", buf);
1338 listflags(buf, sizeof(buf), (uint32_t)~0, ext3_feature_name, 1);
1339 (*cpu_fprintf)(f, " %s\n", buf);
1342 CpuDefinitionInfoList *arch_query_cpu_definitions(Error **errp)
1344 CpuDefinitionInfoList *cpu_list = NULL;
1345 x86_def_t *def;
1347 for (def = x86_defs; def; def = def->next) {
1348 CpuDefinitionInfoList *entry;
1349 CpuDefinitionInfo *info;
1351 info = g_malloc0(sizeof(*info));
1352 info->name = g_strdup(def->name);
1354 entry = g_malloc0(sizeof(*entry));
1355 entry->value = info;
1356 entry->next = cpu_list;
1357 cpu_list = entry;
1360 return cpu_list;
1363 int cpu_x86_register(X86CPU *cpu, const char *cpu_model)
1365 CPUX86State *env = &cpu->env;
1366 x86_def_t def1, *def = &def1;
1367 Error *error = NULL;
1369 memset(def, 0, sizeof(*def));
1371 if (cpu_x86_find_by_name(def, cpu_model) < 0)
1372 return -1;
1373 if (def->vendor1) {
1374 env->cpuid_vendor1 = def->vendor1;
1375 env->cpuid_vendor2 = def->vendor2;
1376 env->cpuid_vendor3 = def->vendor3;
1377 } else {
1378 env->cpuid_vendor1 = CPUID_VENDOR_INTEL_1;
1379 env->cpuid_vendor2 = CPUID_VENDOR_INTEL_2;
1380 env->cpuid_vendor3 = CPUID_VENDOR_INTEL_3;
1382 env->cpuid_vendor_override = def->vendor_override;
1383 object_property_set_int(OBJECT(cpu), def->level, "level", &error);
1384 object_property_set_int(OBJECT(cpu), def->family, "family", &error);
1385 object_property_set_int(OBJECT(cpu), def->model, "model", &error);
1386 object_property_set_int(OBJECT(cpu), def->stepping, "stepping", &error);
1387 env->cpuid_features = def->features;
1388 env->cpuid_ext_features = def->ext_features;
1389 env->cpuid_ext2_features = def->ext2_features;
1390 env->cpuid_ext3_features = def->ext3_features;
1391 object_property_set_int(OBJECT(cpu), def->xlevel, "xlevel", &error);
1392 env->cpuid_kvm_features = def->kvm_features;
1393 env->cpuid_svm_features = def->svm_features;
1394 env->cpuid_ext4_features = def->ext4_features;
1395 env->cpuid_7_0_ebx_features = def->cpuid_7_0_ebx_features;
1396 env->cpuid_xlevel2 = def->xlevel2;
1397 object_property_set_int(OBJECT(cpu), (int64_t)def->tsc_khz * 1000,
1398 "tsc-frequency", &error);
1400 /* On AMD CPUs, some CPUID[8000_0001].EDX bits must match the bits on
1401 * CPUID[1].EDX.
1403 if (env->cpuid_vendor1 == CPUID_VENDOR_AMD_1 &&
1404 env->cpuid_vendor2 == CPUID_VENDOR_AMD_2 &&
1405 env->cpuid_vendor3 == CPUID_VENDOR_AMD_3) {
1406 env->cpuid_ext2_features &= ~CPUID_EXT2_AMD_ALIASES;
1407 env->cpuid_ext2_features |= (def->features & CPUID_EXT2_AMD_ALIASES);
1410 if (!kvm_enabled()) {
1411 env->cpuid_features &= TCG_FEATURES;
1412 env->cpuid_ext_features &= TCG_EXT_FEATURES;
1413 env->cpuid_ext2_features &= (TCG_EXT2_FEATURES
1414 #ifdef TARGET_X86_64
1415 | CPUID_EXT2_SYSCALL | CPUID_EXT2_LM
1416 #endif
1418 env->cpuid_ext3_features &= TCG_EXT3_FEATURES;
1419 env->cpuid_svm_features &= TCG_SVM_FEATURES;
1421 object_property_set_str(OBJECT(cpu), def->model_id, "model-id", &error);
1422 if (error_is_set(&error)) {
1423 error_free(error);
1424 return -1;
1426 return 0;
1429 #if !defined(CONFIG_USER_ONLY)
1431 void cpu_clear_apic_feature(CPUX86State *env)
1433 env->cpuid_features &= ~CPUID_APIC;
1436 #endif /* !CONFIG_USER_ONLY */
1438 /* Initialize list of CPU models, filling some non-static fields if necessary
1440 void x86_cpudef_setup(void)
1442 int i, j;
1443 static const char *model_with_versions[] = { "qemu32", "qemu64", "athlon" };
1445 for (i = 0; i < ARRAY_SIZE(builtin_x86_defs); ++i) {
1446 x86_def_t *def = &builtin_x86_defs[i];
1447 def->next = x86_defs;
1449 /* Look for specific "cpudef" models that */
1450 /* have the QEMU version in .model_id */
1451 for (j = 0; j < ARRAY_SIZE(model_with_versions); j++) {
1452 if (strcmp(model_with_versions[j], def->name) == 0) {
1453 pstrcpy(def->model_id, sizeof(def->model_id),
1454 "QEMU Virtual CPU version ");
1455 pstrcat(def->model_id, sizeof(def->model_id),
1456 qemu_get_version());
1457 break;
1461 x86_defs = def;
1465 static void get_cpuid_vendor(CPUX86State *env, uint32_t *ebx,
1466 uint32_t *ecx, uint32_t *edx)
1468 *ebx = env->cpuid_vendor1;
1469 *edx = env->cpuid_vendor2;
1470 *ecx = env->cpuid_vendor3;
1472 /* sysenter isn't supported on compatibility mode on AMD, syscall
1473 * isn't supported in compatibility mode on Intel.
1474 * Normally we advertise the actual cpu vendor, but you can override
1475 * this if you want to use KVM's sysenter/syscall emulation
1476 * in compatibility mode and when doing cross vendor migration
1478 if (kvm_enabled() && ! env->cpuid_vendor_override) {
1479 host_cpuid(0, 0, NULL, ebx, ecx, edx);
1483 void cpu_x86_cpuid(CPUX86State *env, uint32_t index, uint32_t count,
1484 uint32_t *eax, uint32_t *ebx,
1485 uint32_t *ecx, uint32_t *edx)
1487 /* test if maximum index reached */
1488 if (index & 0x80000000) {
1489 if (index > env->cpuid_xlevel) {
1490 if (env->cpuid_xlevel2 > 0) {
1491 /* Handle the Centaur's CPUID instruction. */
1492 if (index > env->cpuid_xlevel2) {
1493 index = env->cpuid_xlevel2;
1494 } else if (index < 0xC0000000) {
1495 index = env->cpuid_xlevel;
1497 } else {
1498 index = env->cpuid_xlevel;
1501 } else {
1502 if (index > env->cpuid_level)
1503 index = env->cpuid_level;
1506 switch(index) {
1507 case 0:
1508 *eax = env->cpuid_level;
1509 get_cpuid_vendor(env, ebx, ecx, edx);
1510 break;
1511 case 1:
1512 *eax = env->cpuid_version;
1513 *ebx = (env->cpuid_apic_id << 24) | 8 << 8; /* CLFLUSH size in quad words, Linux wants it. */
1514 *ecx = env->cpuid_ext_features;
1515 *edx = env->cpuid_features;
1516 if (env->nr_cores * env->nr_threads > 1) {
1517 *ebx |= (env->nr_cores * env->nr_threads) << 16;
1518 *edx |= 1 << 28; /* HTT bit */
1520 break;
1521 case 2:
1522 /* cache info: needed for Pentium Pro compatibility */
1523 *eax = 1;
1524 *ebx = 0;
1525 *ecx = 0;
1526 *edx = 0x2c307d;
1527 break;
1528 case 4:
1529 /* cache info: needed for Core compatibility */
1530 if (env->nr_cores > 1) {
1531 *eax = (env->nr_cores - 1) << 26;
1532 } else {
1533 *eax = 0;
1535 switch (count) {
1536 case 0: /* L1 dcache info */
1537 *eax |= 0x0000121;
1538 *ebx = 0x1c0003f;
1539 *ecx = 0x000003f;
1540 *edx = 0x0000001;
1541 break;
1542 case 1: /* L1 icache info */
1543 *eax |= 0x0000122;
1544 *ebx = 0x1c0003f;
1545 *ecx = 0x000003f;
1546 *edx = 0x0000001;
1547 break;
1548 case 2: /* L2 cache info */
1549 *eax |= 0x0000143;
1550 if (env->nr_threads > 1) {
1551 *eax |= (env->nr_threads - 1) << 14;
1553 *ebx = 0x3c0003f;
1554 *ecx = 0x0000fff;
1555 *edx = 0x0000001;
1556 break;
1557 default: /* end of info */
1558 *eax = 0;
1559 *ebx = 0;
1560 *ecx = 0;
1561 *edx = 0;
1562 break;
1564 break;
1565 case 5:
1566 /* mwait info: needed for Core compatibility */
1567 *eax = 0; /* Smallest monitor-line size in bytes */
1568 *ebx = 0; /* Largest monitor-line size in bytes */
1569 *ecx = CPUID_MWAIT_EMX | CPUID_MWAIT_IBE;
1570 *edx = 0;
1571 break;
1572 case 6:
1573 /* Thermal and Power Leaf */
1574 *eax = 0;
1575 *ebx = 0;
1576 *ecx = 0;
1577 *edx = 0;
1578 break;
1579 case 7:
1580 /* Structured Extended Feature Flags Enumeration Leaf */
1581 if (count == 0) {
1582 *eax = 0; /* Maximum ECX value for sub-leaves */
1583 *ebx = env->cpuid_7_0_ebx_features; /* Feature flags */
1584 *ecx = 0; /* Reserved */
1585 *edx = 0; /* Reserved */
1586 } else {
1587 *eax = 0;
1588 *ebx = 0;
1589 *ecx = 0;
1590 *edx = 0;
1592 break;
1593 case 9:
1594 /* Direct Cache Access Information Leaf */
1595 *eax = 0; /* Bits 0-31 in DCA_CAP MSR */
1596 *ebx = 0;
1597 *ecx = 0;
1598 *edx = 0;
1599 break;
1600 case 0xA:
1601 /* Architectural Performance Monitoring Leaf */
1602 if (kvm_enabled()) {
1603 KVMState *s = env->kvm_state;
1605 *eax = kvm_arch_get_supported_cpuid(s, 0xA, count, R_EAX);
1606 *ebx = kvm_arch_get_supported_cpuid(s, 0xA, count, R_EBX);
1607 *ecx = kvm_arch_get_supported_cpuid(s, 0xA, count, R_ECX);
1608 *edx = kvm_arch_get_supported_cpuid(s, 0xA, count, R_EDX);
1609 } else {
1610 *eax = 0;
1611 *ebx = 0;
1612 *ecx = 0;
1613 *edx = 0;
1615 break;
1616 case 0xD:
1617 /* Processor Extended State */
1618 if (!(env->cpuid_ext_features & CPUID_EXT_XSAVE)) {
1619 *eax = 0;
1620 *ebx = 0;
1621 *ecx = 0;
1622 *edx = 0;
1623 break;
1625 if (kvm_enabled()) {
1626 KVMState *s = env->kvm_state;
1628 *eax = kvm_arch_get_supported_cpuid(s, 0xd, count, R_EAX);
1629 *ebx = kvm_arch_get_supported_cpuid(s, 0xd, count, R_EBX);
1630 *ecx = kvm_arch_get_supported_cpuid(s, 0xd, count, R_ECX);
1631 *edx = kvm_arch_get_supported_cpuid(s, 0xd, count, R_EDX);
1632 } else {
1633 *eax = 0;
1634 *ebx = 0;
1635 *ecx = 0;
1636 *edx = 0;
1638 break;
1639 case 0x80000000:
1640 *eax = env->cpuid_xlevel;
1641 *ebx = env->cpuid_vendor1;
1642 *edx = env->cpuid_vendor2;
1643 *ecx = env->cpuid_vendor3;
1644 break;
1645 case 0x80000001:
1646 *eax = env->cpuid_version;
1647 *ebx = 0;
1648 *ecx = env->cpuid_ext3_features;
1649 *edx = env->cpuid_ext2_features;
1651 /* The Linux kernel checks for the CMPLegacy bit and
1652 * discards multiple thread information if it is set.
1653 * So dont set it here for Intel to make Linux guests happy.
1655 if (env->nr_cores * env->nr_threads > 1) {
1656 uint32_t tebx, tecx, tedx;
1657 get_cpuid_vendor(env, &tebx, &tecx, &tedx);
1658 if (tebx != CPUID_VENDOR_INTEL_1 ||
1659 tedx != CPUID_VENDOR_INTEL_2 ||
1660 tecx != CPUID_VENDOR_INTEL_3) {
1661 *ecx |= 1 << 1; /* CmpLegacy bit */
1664 break;
1665 case 0x80000002:
1666 case 0x80000003:
1667 case 0x80000004:
1668 *eax = env->cpuid_model[(index - 0x80000002) * 4 + 0];
1669 *ebx = env->cpuid_model[(index - 0x80000002) * 4 + 1];
1670 *ecx = env->cpuid_model[(index - 0x80000002) * 4 + 2];
1671 *edx = env->cpuid_model[(index - 0x80000002) * 4 + 3];
1672 break;
1673 case 0x80000005:
1674 /* cache info (L1 cache) */
1675 *eax = 0x01ff01ff;
1676 *ebx = 0x01ff01ff;
1677 *ecx = 0x40020140;
1678 *edx = 0x40020140;
1679 break;
1680 case 0x80000006:
1681 /* cache info (L2 cache) */
1682 *eax = 0;
1683 *ebx = 0x42004200;
1684 *ecx = 0x02008140;
1685 *edx = 0;
1686 break;
1687 case 0x80000008:
1688 /* virtual & phys address size in low 2 bytes. */
1689 /* XXX: This value must match the one used in the MMU code. */
1690 if (env->cpuid_ext2_features & CPUID_EXT2_LM) {
1691 /* 64 bit processor */
1692 /* XXX: The physical address space is limited to 42 bits in exec.c. */
1693 *eax = 0x00003028; /* 48 bits virtual, 40 bits physical */
1694 } else {
1695 if (env->cpuid_features & CPUID_PSE36)
1696 *eax = 0x00000024; /* 36 bits physical */
1697 else
1698 *eax = 0x00000020; /* 32 bits physical */
1700 *ebx = 0;
1701 *ecx = 0;
1702 *edx = 0;
1703 if (env->nr_cores * env->nr_threads > 1) {
1704 *ecx |= (env->nr_cores * env->nr_threads) - 1;
1706 break;
1707 case 0x8000000A:
1708 if (env->cpuid_ext3_features & CPUID_EXT3_SVM) {
1709 *eax = 0x00000001; /* SVM Revision */
1710 *ebx = 0x00000010; /* nr of ASIDs */
1711 *ecx = 0;
1712 *edx = env->cpuid_svm_features; /* optional features */
1713 } else {
1714 *eax = 0;
1715 *ebx = 0;
1716 *ecx = 0;
1717 *edx = 0;
1719 break;
1720 case 0xC0000000:
1721 *eax = env->cpuid_xlevel2;
1722 *ebx = 0;
1723 *ecx = 0;
1724 *edx = 0;
1725 break;
1726 case 0xC0000001:
1727 /* Support for VIA CPU's CPUID instruction */
1728 *eax = env->cpuid_version;
1729 *ebx = 0;
1730 *ecx = 0;
1731 *edx = env->cpuid_ext4_features;
1732 break;
1733 case 0xC0000002:
1734 case 0xC0000003:
1735 case 0xC0000004:
1736 /* Reserved for the future, and now filled with zero */
1737 *eax = 0;
1738 *ebx = 0;
1739 *ecx = 0;
1740 *edx = 0;
1741 break;
1742 default:
1743 /* reserved values: zero */
1744 *eax = 0;
1745 *ebx = 0;
1746 *ecx = 0;
1747 *edx = 0;
1748 break;
1752 /* CPUClass::reset() */
1753 static void x86_cpu_reset(CPUState *s)
1755 X86CPU *cpu = X86_CPU(s);
1756 X86CPUClass *xcc = X86_CPU_GET_CLASS(cpu);
1757 CPUX86State *env = &cpu->env;
1758 int i;
1760 if (qemu_loglevel_mask(CPU_LOG_RESET)) {
1761 qemu_log("CPU Reset (CPU %d)\n", env->cpu_index);
1762 log_cpu_state(env, CPU_DUMP_FPU | CPU_DUMP_CCOP);
1765 xcc->parent_reset(s);
1768 memset(env, 0, offsetof(CPUX86State, breakpoints));
1770 tlb_flush(env, 1);
1772 env->old_exception = -1;
1774 /* init to reset state */
1776 #ifdef CONFIG_SOFTMMU
1777 env->hflags |= HF_SOFTMMU_MASK;
1778 #endif
1779 env->hflags2 |= HF2_GIF_MASK;
1781 cpu_x86_update_cr0(env, 0x60000010);
1782 env->a20_mask = ~0x0;
1783 env->smbase = 0x30000;
1785 env->idt.limit = 0xffff;
1786 env->gdt.limit = 0xffff;
1787 env->ldt.limit = 0xffff;
1788 env->ldt.flags = DESC_P_MASK | (2 << DESC_TYPE_SHIFT);
1789 env->tr.limit = 0xffff;
1790 env->tr.flags = DESC_P_MASK | (11 << DESC_TYPE_SHIFT);
1792 cpu_x86_load_seg_cache(env, R_CS, 0xf000, 0xffff0000, 0xffff,
1793 DESC_P_MASK | DESC_S_MASK | DESC_CS_MASK |
1794 DESC_R_MASK | DESC_A_MASK);
1795 cpu_x86_load_seg_cache(env, R_DS, 0, 0, 0xffff,
1796 DESC_P_MASK | DESC_S_MASK | DESC_W_MASK |
1797 DESC_A_MASK);
1798 cpu_x86_load_seg_cache(env, R_ES, 0, 0, 0xffff,
1799 DESC_P_MASK | DESC_S_MASK | DESC_W_MASK |
1800 DESC_A_MASK);
1801 cpu_x86_load_seg_cache(env, R_SS, 0, 0, 0xffff,
1802 DESC_P_MASK | DESC_S_MASK | DESC_W_MASK |
1803 DESC_A_MASK);
1804 cpu_x86_load_seg_cache(env, R_FS, 0, 0, 0xffff,
1805 DESC_P_MASK | DESC_S_MASK | DESC_W_MASK |
1806 DESC_A_MASK);
1807 cpu_x86_load_seg_cache(env, R_GS, 0, 0, 0xffff,
1808 DESC_P_MASK | DESC_S_MASK | DESC_W_MASK |
1809 DESC_A_MASK);
1811 env->eip = 0xfff0;
1812 env->regs[R_EDX] = env->cpuid_version;
1814 env->eflags = 0x2;
1816 /* FPU init */
1817 for (i = 0; i < 8; i++) {
1818 env->fptags[i] = 1;
1820 env->fpuc = 0x37f;
1822 env->mxcsr = 0x1f80;
1824 env->pat = 0x0007040600070406ULL;
1825 env->msr_ia32_misc_enable = MSR_IA32_MISC_ENABLE_DEFAULT;
1827 memset(env->dr, 0, sizeof(env->dr));
1828 env->dr[6] = DR6_FIXED_1;
1829 env->dr[7] = DR7_FIXED_1;
1830 cpu_breakpoint_remove_all(env, BP_CPU);
1831 cpu_watchpoint_remove_all(env, BP_CPU);
1833 #if !defined(CONFIG_USER_ONLY)
1834 /* We hard-wire the BSP to the first CPU. */
1835 if (env->cpu_index == 0) {
1836 apic_designate_bsp(env->apic_state);
1839 env->halted = !cpu_is_bsp(cpu);
1840 #endif
1843 #ifndef CONFIG_USER_ONLY
1844 bool cpu_is_bsp(X86CPU *cpu)
1846 return cpu_get_apic_base(cpu->env.apic_state) & MSR_IA32_APICBASE_BSP;
1849 /* TODO: remove me, when reset over QOM tree is implemented */
1850 static void x86_cpu_machine_reset_cb(void *opaque)
1852 X86CPU *cpu = opaque;
1853 cpu_reset(CPU(cpu));
1855 #endif
1857 static void mce_init(X86CPU *cpu)
1859 CPUX86State *cenv = &cpu->env;
1860 unsigned int bank;
1862 if (((cenv->cpuid_version >> 8) & 0xf) >= 6
1863 && (cenv->cpuid_features & (CPUID_MCE | CPUID_MCA)) ==
1864 (CPUID_MCE | CPUID_MCA)) {
1865 cenv->mcg_cap = MCE_CAP_DEF | MCE_BANKS_DEF;
1866 cenv->mcg_ctl = ~(uint64_t)0;
1867 for (bank = 0; bank < MCE_BANKS_DEF; bank++) {
1868 cenv->mce_banks[bank * 4] = ~(uint64_t)0;
1873 void x86_cpu_realize(Object *obj, Error **errp)
1875 X86CPU *cpu = X86_CPU(obj);
1877 #ifndef CONFIG_USER_ONLY
1878 qemu_register_reset(x86_cpu_machine_reset_cb, cpu);
1879 #endif
1881 mce_init(cpu);
1882 qemu_init_vcpu(&cpu->env);
1883 cpu_reset(CPU(cpu));
1886 static void x86_cpu_initfn(Object *obj)
1888 X86CPU *cpu = X86_CPU(obj);
1889 CPUX86State *env = &cpu->env;
1890 static int inited;
1892 cpu_exec_init(env);
1894 object_property_add(obj, "family", "int",
1895 x86_cpuid_version_get_family,
1896 x86_cpuid_version_set_family, NULL, NULL, NULL);
1897 object_property_add(obj, "model", "int",
1898 x86_cpuid_version_get_model,
1899 x86_cpuid_version_set_model, NULL, NULL, NULL);
1900 object_property_add(obj, "stepping", "int",
1901 x86_cpuid_version_get_stepping,
1902 x86_cpuid_version_set_stepping, NULL, NULL, NULL);
1903 object_property_add(obj, "level", "int",
1904 x86_cpuid_get_level,
1905 x86_cpuid_set_level, NULL, NULL, NULL);
1906 object_property_add(obj, "xlevel", "int",
1907 x86_cpuid_get_xlevel,
1908 x86_cpuid_set_xlevel, NULL, NULL, NULL);
1909 object_property_add_str(obj, "vendor",
1910 x86_cpuid_get_vendor,
1911 x86_cpuid_set_vendor, NULL);
1912 object_property_add_str(obj, "model-id",
1913 x86_cpuid_get_model_id,
1914 x86_cpuid_set_model_id, NULL);
1915 object_property_add(obj, "tsc-frequency", "int",
1916 x86_cpuid_get_tsc_freq,
1917 x86_cpuid_set_tsc_freq, NULL, NULL, NULL);
1919 env->cpuid_apic_id = env->cpu_index;
1921 /* init various static tables used in TCG mode */
1922 if (tcg_enabled() && !inited) {
1923 inited = 1;
1924 optimize_flags_init();
1925 #ifndef CONFIG_USER_ONLY
1926 cpu_set_debug_excp_handler(breakpoint_handler);
1927 #endif
1931 static void x86_cpu_common_class_init(ObjectClass *oc, void *data)
1933 X86CPUClass *xcc = X86_CPU_CLASS(oc);
1934 CPUClass *cc = CPU_CLASS(oc);
1936 xcc->parent_reset = cc->reset;
1937 cc->reset = x86_cpu_reset;
1940 static const TypeInfo x86_cpu_type_info = {
1941 .name = TYPE_X86_CPU,
1942 .parent = TYPE_CPU,
1943 .instance_size = sizeof(X86CPU),
1944 .instance_init = x86_cpu_initfn,
1945 .abstract = false,
1946 .class_size = sizeof(X86CPUClass),
1947 .class_init = x86_cpu_common_class_init,
1950 static void x86_cpu_register_types(void)
1952 type_register_static(&x86_cpu_type_info);
1955 type_init(x86_cpu_register_types)