2 * Portions of this file taken from the Linux kernel,
3 * Copyright 1991-2009 Linus Torvalds and contributors
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation; either version 2 of the License, or
8 * (at your option) any later version.
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write to the Free Software
16 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
23 struct cpu_dev
*cpu_devs
[X86_VENDOR_NUM
] = { };
26 * CPUID functions returning a single datum
29 /* Probe for the CPUID instruction */
30 static int have_cpuid_p(void)
32 return cpu_has_eflag(X86_EFLAGS_ID
);
35 static struct cpu_dev amd_cpu_dev
= {
37 .c_ident
= {"AuthenticAMD"}
40 static struct cpu_dev intel_cpu_dev
= {
42 .c_ident
= {"GenuineIntel"}
45 static struct cpu_dev cyrix_cpu_dev
= {
47 .c_ident
= {"CyrixInstead"}
50 static struct cpu_dev umc_cpu_dev
= {
52 .c_ident
= {"UMC UMC UMC"}
56 static struct cpu_dev nexgen_cpu_dev
= {
58 .c_ident
= {"NexGenDriven"}
61 static struct cpu_dev centaur_cpu_dev
= {
62 .c_vendor
= "Centaur",
63 .c_ident
= {"CentaurHauls"}
66 static struct cpu_dev rise_cpu_dev
= {
68 .c_ident
= {"RiseRiseRise"}
71 static struct cpu_dev transmeta_cpu_dev
= {
72 .c_vendor
= "Transmeta",
73 .c_ident
= {"GenuineTMx86", "TransmetaCPU"}
76 void init_cpu_devs(void)
78 cpu_devs
[X86_VENDOR_INTEL
] = &intel_cpu_dev
;
79 cpu_devs
[X86_VENDOR_CYRIX
] = &cyrix_cpu_dev
;
80 cpu_devs
[X86_VENDOR_AMD
] = &amd_cpu_dev
;
81 cpu_devs
[X86_VENDOR_UMC
] = &umc_cpu_dev
;
82 cpu_devs
[X86_VENDOR_NEXGEN
] = &nexgen_cpu_dev
;
83 cpu_devs
[X86_VENDOR_CENTAUR
] = ¢aur_cpu_dev
;
84 cpu_devs
[X86_VENDOR_RISE
] = &rise_cpu_dev
;
85 cpu_devs
[X86_VENDOR_TRANSMETA
] = &transmeta_cpu_dev
;
88 void get_cpu_vendor(struct cpuinfo_x86
*c
)
90 char *v
= c
->x86_vendor_id
;
93 for (i
= 0; i
< X86_VENDOR_NUM
; i
++) {
95 if (!strcmp(v
, cpu_devs
[i
]->c_ident
[0]) ||
96 (cpu_devs
[i
]->c_ident
[1] &&
97 !strcmp(v
, cpu_devs
[i
]->c_ident
[1]))) {
104 c
->x86_vendor
= X86_VENDOR_UNKNOWN
;
107 int get_model_name(struct cpuinfo_x86
*c
)
112 if (cpuid_eax(0x80000000) < 0x80000004)
115 v
= (unsigned int *)c
->x86_model_id
;
116 cpuid(0x80000002, &v
[0], &v
[1], &v
[2], &v
[3]);
117 cpuid(0x80000003, &v
[4], &v
[5], &v
[6], &v
[7]);
118 cpuid(0x80000004, &v
[8], &v
[9], &v
[10], &v
[11]);
119 c
->x86_model_id
[48] = 0;
121 /* Intel chips right-justify this string for some dumb reason;
122 undo that brain damage */
123 p
= q
= &c
->x86_model_id
[0];
129 while (q
<= &c
->x86_model_id
[48])
130 *q
++ = '\0'; /* Zero-pad the rest */
136 void detect_cache(uint32_t xlvl
, struct cpuinfo_x86
*c
)
138 uint32_t eax
, ebx
, ecx
, edx
, l2size
;
139 /* Detecting L1 cache */
140 if (xlvl
>= 0x80000005) {
141 cpuid(0x80000005, &eax
, &ebx
, &ecx
, &edx
);
142 c
->x86_l1_data_cache_size
= ecx
>> 24;
143 c
->x86_l1_instruction_cache_size
= edx
>> 24;
146 /* Detecting L2 cache */
147 c
->x86_l2_cache_size
= 0;
149 if (xlvl
< 0x80000006) /* Some chips just has a large L1. */
152 cpuid(0x80000006, &eax
, &ebx
, &ecx
, &edx
);
155 /* Vendor based fixes */
156 switch (c
->x86_vendor
) {
157 case X86_VENDOR_INTEL
:
159 * Intel PIII Tualatin. This comes in two flavours.
160 * One has 256kb of cache, the other 512. We have no way
161 * to determine which, so we use a boottime override
162 * for the 512kb model, and assume 256 otherwise.
164 if ((c
->x86
== 6) && (c
->x86_model
== 11) && (l2size
== 0))
168 /* AMD errata T13 (order #21922) */
170 if (c
->x86_model
== 3 && c
->x86_mask
== 0) /* Duron Rev A0 */
172 if (c
->x86_model
== 4 && (c
->x86_mask
== 0 || c
->x86_mask
== 1)) /* Tbird rev A1/A2 */
177 c
->x86_l2_cache_size
= l2size
;
180 void generic_identify(struct cpuinfo_x86
*c
)
183 uint32_t eax
, ebx
, ecx
, edx
;
185 /* Get vendor name */
187 (uint32_t *) & c
->cpuid_level
,
188 (uint32_t *) & c
->x86_vendor_id
[0],
189 (uint32_t *) & c
->x86_vendor_id
[8],
190 (uint32_t *) & c
->x86_vendor_id
[4]);
194 /* Intel-defined flags: level 0x00000001 */
195 if (c
->cpuid_level
>= 0x00000001) {
196 uint32_t capability
, excap
;
197 cpuid(0x00000001, &tfms
, &ebx
, &excap
, &capability
);
198 c
->x86_capability
[0] = capability
;
199 c
->x86_capability
[4] = excap
;
200 c
->x86
= (tfms
>> 8) & 15;
201 c
->x86_model
= (tfms
>> 4) & 15;
203 c
->x86
+= (tfms
>> 20) & 0xff;
205 c
->x86_model
+= ((tfms
>> 16) & 0xF) << 4;
206 c
->x86_mask
= tfms
& 15;
207 if (cpu_has(c
, X86_FEATURE_CLFLSH
))
208 c
->x86_clflush_size
= ((ebx
>> 8) & 0xff) * 8;
210 /* Have CPUID level 0 only - unheard of */
214 /* AMD-defined flags: level 0x80000001 */
215 xlvl
= cpuid_eax(0x80000000);
216 if ((xlvl
& 0xffff0000) == 0x80000000) {
217 if (xlvl
>= 0x80000001) {
218 c
->x86_capability
[1] = cpuid_edx(0x80000001);
219 c
->x86_capability
[6] = cpuid_ecx(0x80000001);
221 if (xlvl
>= 0x80000004)
222 get_model_name(c
); /* Default name */
225 /* Detecting the number of cores */
226 switch (c
->x86_vendor
) {
228 if (xlvl
>= 0x80000008) {
229 c
->x86_num_cores
= (cpuid_ecx(0x80000008) & 0xff) + 1;
230 if (c
->x86_num_cores
& (c
->x86_num_cores
- 1))
231 c
->x86_num_cores
= 1;
234 case X86_VENDOR_INTEL
:
235 if (c
->cpuid_level
>= 0x00000004) {
236 cpuid(0x4, &eax
, &ebx
, &ecx
, &edx
);
237 c
->x86_num_cores
= ((eax
& 0xfc000000) >> 26) + 1;
241 c
->x86_num_cores
= 1;
245 detect_cache(xlvl
, c
);
249 * Checksum an MP configuration block.
252 static int mpf_checksum(unsigned char *mp
, int len
)
262 static int smp_scan_config(unsigned long base
, unsigned long length
)
264 unsigned long *bp
= (unsigned long *)base
;
265 struct intel_mp_floating
*mpf
;
267 // printf("Scan SMP from %p for %ld bytes.\n", bp,length);
268 if (sizeof(*mpf
) != 16) {
269 printf("Error: MPF size\n");
274 mpf
= (struct intel_mp_floating
*)bp
;
275 if ((*bp
== SMP_MAGIC_IDENT
) &&
276 (mpf
->mpf_length
== 1) &&
277 !mpf_checksum((unsigned char *)bp
, 16) &&
278 ((mpf
->mpf_specification
== 1)
279 || (mpf
->mpf_specification
== 4))) {
288 int find_smp_config(void)
290 // unsigned int address;
293 * FIXME: Linux assumes you have 640K of base ram..
294 * this continues the error...
296 * 1) Scan the bottom 1K for a signature
297 * 2) Scan the top 1K of base RAM
298 * 3) Scan the 64K of bios
300 if (smp_scan_config(0x0, 0x400) ||
301 smp_scan_config(639 * 0x400, 0x400) ||
302 smp_scan_config(0xF0000, 0x10000))
305 * If it is an SMP machine we should know now, unless the
306 * configuration is in an EISA/MCA bus machine with an
307 * extended bios data area.
309 * there is a real-mode segmented pointer pointing to the
310 * 4K EBDA area at 0x40E, calculate and scan it here.
312 * NOTE! There are Linux loaders that will corrupt the EBDA
313 * area, and as such this kind of SMP config may be less
314 * trustworthy, simply because the SMP table may have been
315 * stomped on during early boot. These loaders are buggy and
318 * MP1.4 SPEC states to only scan first 1K of 4K EBDA.
321 // address = get_bios_ebda();
323 // smp_scan_config(address, 0x400);
327 void set_cpu_flags(struct cpuinfo_x86
*c
, s_cpu
* cpu
)
329 cpu
->flags
.fpu
= cpu_has(c
, X86_FEATURE_FPU
);
330 cpu
->flags
.vme
= cpu_has(c
, X86_FEATURE_VME
);
331 cpu
->flags
.de
= cpu_has(c
, X86_FEATURE_DE
);
332 cpu
->flags
.pse
= cpu_has(c
, X86_FEATURE_PSE
);
333 cpu
->flags
.tsc
= cpu_has(c
, X86_FEATURE_TSC
);
334 cpu
->flags
.msr
= cpu_has(c
, X86_FEATURE_MSR
);
335 cpu
->flags
.pae
= cpu_has(c
, X86_FEATURE_PAE
);
336 cpu
->flags
.mce
= cpu_has(c
, X86_FEATURE_MCE
);
337 cpu
->flags
.cx8
= cpu_has(c
, X86_FEATURE_CX8
);
338 cpu
->flags
.apic
= cpu_has(c
, X86_FEATURE_APIC
);
339 cpu
->flags
.sep
= cpu_has(c
, X86_FEATURE_SEP
);
340 cpu
->flags
.mtrr
= cpu_has(c
, X86_FEATURE_MTRR
);
341 cpu
->flags
.pge
= cpu_has(c
, X86_FEATURE_PGE
);
342 cpu
->flags
.mca
= cpu_has(c
, X86_FEATURE_MCA
);
343 cpu
->flags
.cmov
= cpu_has(c
, X86_FEATURE_CMOV
);
344 cpu
->flags
.pat
= cpu_has(c
, X86_FEATURE_PAT
);
345 cpu
->flags
.pse_36
= cpu_has(c
, X86_FEATURE_PSE36
);
346 cpu
->flags
.psn
= cpu_has(c
, X86_FEATURE_PN
);
347 cpu
->flags
.clflsh
= cpu_has(c
, X86_FEATURE_CLFLSH
);
348 cpu
->flags
.dts
= cpu_has(c
, X86_FEATURE_DTES
);
349 cpu
->flags
.acpi
= cpu_has(c
, X86_FEATURE_ACPI
);
350 cpu
->flags
.pbe
= cpu_has(c
, X86_FEATURE_PBE
);
351 cpu
->flags
.mmx
= cpu_has(c
, X86_FEATURE_MMX
);
352 cpu
->flags
.fxsr
= cpu_has(c
, X86_FEATURE_FXSR
);
353 cpu
->flags
.sse
= cpu_has(c
, X86_FEATURE_XMM
);
354 cpu
->flags
.sse2
= cpu_has(c
, X86_FEATURE_XMM2
);
355 cpu
->flags
.ss
= cpu_has(c
, X86_FEATURE_SELFSNOOP
);
356 cpu
->flags
.htt
= cpu_has(c
, X86_FEATURE_HT
);
357 cpu
->flags
.acc
= cpu_has(c
, X86_FEATURE_ACC
);
358 cpu
->flags
.syscall
= cpu_has(c
, X86_FEATURE_SYSCALL
);
359 cpu
->flags
.mp
= cpu_has(c
, X86_FEATURE_MP
);
360 cpu
->flags
.nx
= cpu_has(c
, X86_FEATURE_NX
);
361 cpu
->flags
.mmxext
= cpu_has(c
, X86_FEATURE_MMXEXT
);
362 cpu
->flags
.fxsr_opt
= cpu_has(c
, X86_FEATURE_FXSR_OPT
);
363 cpu
->flags
.gbpages
= cpu_has(c
, X86_FEATURE_GBPAGES
);
364 cpu
->flags
.rdtscp
= cpu_has(c
, X86_FEATURE_RDTSCP
);
365 cpu
->flags
.lm
= cpu_has(c
, X86_FEATURE_LM
);
366 cpu
->flags
.nowext
= cpu_has(c
, X86_FEATURE_3DNOWEXT
);
367 cpu
->flags
.now
= cpu_has(c
, X86_FEATURE_3DNOW
);
368 cpu
->flags
.smp
= find_smp_config();
369 cpu
->flags
.pni
= cpu_has(c
, X86_FEATURE_XMM3
);
370 cpu
->flags
.pclmulqd
= cpu_has(c
, X86_FEATURE_PCLMULQDQ
);
371 cpu
->flags
.dtes64
= cpu_has(c
, X86_FEATURE_DTES64
);
372 cpu
->flags
.vmx
= cpu_has(c
, X86_FEATURE_VMX
);
373 cpu
->flags
.smx
= cpu_has(c
, X86_FEATURE_SMX
);
374 cpu
->flags
.est
= cpu_has(c
, X86_FEATURE_EST
);
375 cpu
->flags
.tm2
= cpu_has(c
, X86_FEATURE_TM2
);
376 cpu
->flags
.sse3
= cpu_has(c
, X86_FEATURE_SSE3
);
377 cpu
->flags
.cid
= cpu_has(c
, X86_FEATURE_CID
);
378 cpu
->flags
.fma
= cpu_has(c
, X86_FEATURE_FMA
);
379 cpu
->flags
.cx16
= cpu_has(c
, X86_FEATURE_CX16
);
380 cpu
->flags
.xtpr
= cpu_has(c
, X86_FEATURE_XTPR
);
381 cpu
->flags
.pdcm
= cpu_has(c
, X86_FEATURE_PDCM
);
382 cpu
->flags
.dca
= cpu_has(c
, X86_FEATURE_DCA
);
383 cpu
->flags
.xmm4_1
= cpu_has(c
, X86_FEATURE_XMM4_1
);
384 cpu
->flags
.xmm4_2
= cpu_has(c
, X86_FEATURE_XMM4_2
);
385 cpu
->flags
.x2apic
= cpu_has(c
, X86_FEATURE_X2APIC
);
386 cpu
->flags
.movbe
= cpu_has(c
, X86_FEATURE_MOVBE
);
387 cpu
->flags
.popcnt
= cpu_has(c
, X86_FEATURE_POPCNT
);
388 cpu
->flags
.aes
= cpu_has(c
, X86_FEATURE_AES
);
389 cpu
->flags
.xsave
= cpu_has(c
, X86_FEATURE_XSAVE
);
390 cpu
->flags
.osxsave
= cpu_has(c
, X86_FEATURE_OSXSAVE
);
391 cpu
->flags
.avx
= cpu_has(c
, X86_FEATURE_AVX
);
392 cpu
->flags
.hypervisor
= cpu_has(c
, X86_FEATURE_HYPERVISOR
);
393 cpu
->flags
.ace2
= cpu_has(c
, X86_FEATURE_ACE2
);
394 cpu
->flags
.ace2_en
= cpu_has(c
, X86_FEATURE_ACE2_EN
);
395 cpu
->flags
.phe
= cpu_has(c
, X86_FEATURE_PHE
);
396 cpu
->flags
.phe_en
= cpu_has(c
, X86_FEATURE_PHE_EN
);
397 cpu
->flags
.pmm
= cpu_has(c
, X86_FEATURE_PMM
);
398 cpu
->flags
.pmm_en
= cpu_has(c
, X86_FEATURE_PMM_EN
);
399 cpu
->flags
.extapic
= cpu_has(c
, X86_FEATURE_EXTAPIC
);
400 cpu
->flags
.cr8_legacy
= cpu_has(c
, X86_FEATURE_CR8_LEGACY
);
401 cpu
->flags
.abm
= cpu_has(c
, X86_FEATURE_ABM
);
402 cpu
->flags
.sse4a
= cpu_has(c
, X86_FEATURE_SSE4A
);
403 cpu
->flags
.misalignsse
= cpu_has(c
, X86_FEATURE_MISALIGNSSE
);
404 cpu
->flags
.nowprefetch
= cpu_has(c
, X86_FEATURE_3DNOWPREFETCH
);
405 cpu
->flags
.osvw
= cpu_has(c
, X86_FEATURE_OSVW
);
406 cpu
->flags
.ibs
= cpu_has(c
, X86_FEATURE_IBS
);
407 cpu
->flags
.sse5
= cpu_has(c
, X86_FEATURE_SSE5
);
408 cpu
->flags
.skinit
= cpu_has(c
, X86_FEATURE_SKINIT
);
409 cpu
->flags
.wdt
= cpu_has(c
, X86_FEATURE_WDT
);
410 cpu
->flags
.ida
= cpu_has(c
, X86_FEATURE_IDA
);
411 cpu
->flags
.arat
= cpu_has(c
, X86_FEATURE_ARAT
);
412 cpu
->flags
.tpr_shadow
= cpu_has(c
, X86_FEATURE_TPR_SHADOW
);
413 cpu
->flags
.vnmi
= cpu_has(c
, X86_FEATURE_VNMI
);
414 cpu
->flags
.flexpriority
= cpu_has(c
, X86_FEATURE_FLEXPRIORITY
);
415 cpu
->flags
.ept
= cpu_has(c
, X86_FEATURE_EPT
);
416 cpu
->flags
.vpid
= cpu_has(c
, X86_FEATURE_VPID
);
417 cpu
->flags
.svm
= cpu_has(c
, X86_FEATURE_SVM
);
420 void set_generic_info(struct cpuinfo_x86
*c
, s_cpu
* cpu
)
422 cpu
->family
= c
->x86
;
423 cpu
->vendor_id
= c
->x86_vendor
;
424 cpu
->model_id
= c
->x86_model
;
425 cpu
->stepping
= c
->x86_mask
;
426 strncpy(cpu
->vendor
, cpu_devs
[c
->x86_vendor
]->c_vendor
,
427 sizeof(cpu
->vendor
));
428 strncpy(cpu
->model
, c
->x86_model_id
, sizeof(cpu
->model
));
429 cpu
->num_cores
= c
->x86_num_cores
;
430 cpu
->l1_data_cache_size
= c
->x86_l1_data_cache_size
;
431 cpu
->l1_instruction_cache_size
= c
->x86_l1_instruction_cache_size
;
432 cpu
->l2_cache_size
= c
->x86_l2_cache_size
;
435 void detect_cpu(s_cpu
* cpu
)
437 struct cpuinfo_x86 c
;
438 c
.x86_clflush_size
= 32;
439 c
.x86_l1_data_cache_size
= 0;
440 c
.x86_l1_instruction_cache_size
= 0;
441 c
.x86_l2_cache_size
= 0;
442 c
.x86_vendor
= X86_VENDOR_UNKNOWN
;
443 c
.cpuid_level
= -1; /* CPUID not detected */
444 c
.x86_model
= c
.x86_mask
= 0; /* So far unknown... */
446 memset(&c
.x86_capability
, 0, sizeof(c
.x86_capability
));
447 memset(&c
.x86_vendor_id
, 0, sizeof(c
.x86_vendor_id
));
448 memset(&c
.x86_model_id
, 0, sizeof(c
.x86_model_id
));
453 generic_identify(&c
);
454 set_generic_info(&c
, cpu
);
455 set_cpu_flags(&c
, cpu
);