2 * Portions of this file taken from the Linux kernel,
3 * Copyright 1991-2009 Linus Torvalds and contributors
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation; either version 2 of the License, or
8 * (at your option) any later version.
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write to the Free Software
16 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
23 const char *cpu_flags_names
[] = {
24 CPU_FLAGS(STRUCT_MEMBER_NAMES
)
27 size_t cpu_flags_offset
[] = {
28 CPU_FLAGS(STRUCTURE_MEMBER_OFFSETS
)
31 size_t cpu_flags_count
= sizeof cpu_flags_names
/ sizeof *cpu_flags_names
;
33 struct cpu_dev
*cpu_devs
[X86_VENDOR_NUM
] = { };
35 bool get_cpu_flag_value_from_name(s_cpu
*cpu
, const char * flag_name
) {
37 bool cpu_flag_present
=false, *flag_value
= &cpu_flag_present
;
39 for (i
= 0; i
< cpu_flags_count
; i
++) {
40 if (strcmp(cpu_flags_names
[i
],flag_name
) == 0) {
41 flag_value
= (bool *)((char *)&cpu
->flags
+ cpu_flags_offset
[i
]);
49 * CPUID functions returning a single datum
52 /* Probe for the CPUID instruction */
53 static int have_cpuid_p(void)
55 return cpu_has_eflag(X86_EFLAGS_ID
);
58 static struct cpu_dev amd_cpu_dev
= {
60 .c_ident
= {"AuthenticAMD"}
63 static struct cpu_dev intel_cpu_dev
= {
65 .c_ident
= {"GenuineIntel"}
68 static struct cpu_dev cyrix_cpu_dev
= {
70 .c_ident
= {"CyrixInstead"}
73 static struct cpu_dev umc_cpu_dev
= {
75 .c_ident
= {"UMC UMC UMC"}
79 static struct cpu_dev nexgen_cpu_dev
= {
81 .c_ident
= {"NexGenDriven"}
84 static struct cpu_dev centaur_cpu_dev
= {
85 .c_vendor
= "Centaur",
86 .c_ident
= {"CentaurHauls"}
89 static struct cpu_dev rise_cpu_dev
= {
91 .c_ident
= {"RiseRiseRise"}
94 static struct cpu_dev transmeta_cpu_dev
= {
95 .c_vendor
= "Transmeta",
96 .c_ident
= {"GenuineTMx86", "TransmetaCPU"}
99 static struct cpu_dev nsc_cpu_dev
= {
100 .c_vendor
= "National Semiconductor",
101 .c_ident
= {"Geode by NSC"}
104 static struct cpu_dev unknown_cpu_dev
= {
105 .c_vendor
= "Unknown Vendor",
106 .c_ident
= {"Unknown CPU"}
110 * Read NSC/Cyrix DEVID registers (DIR) to get more detailed info. about the CPU
112 void do_cyrix_devid(unsigned char *dir0
, unsigned char *dir1
)
114 unsigned char ccr2
, ccr3
;
116 /* we test for DEVID by checking whether CCR3 is writable */
117 ccr3
= getCx86(CX86_CCR3
);
118 setCx86(CX86_CCR3
, ccr3
^ 0x80);
119 getCx86(0xc0); /* dummy to change bus */
121 if (getCx86(CX86_CCR3
) == ccr3
) { /* no DEVID regs. */
122 ccr2
= getCx86(CX86_CCR2
);
123 setCx86(CX86_CCR2
, ccr2
^ 0x04);
124 getCx86(0xc0); /* dummy */
126 if (getCx86(CX86_CCR2
) == ccr2
) /* old Cx486SLC/DLC */
128 else { /* Cx486S A step */
129 setCx86(CX86_CCR2
, ccr2
);
133 setCx86(CX86_CCR3
, ccr3
); /* restore CCR3 */
135 /* read DIR0 and DIR1 CPU registers */
136 *dir0
= getCx86(CX86_DIR0
);
137 *dir1
= getCx86(CX86_DIR1
);
141 void init_cpu_devs(void)
143 cpu_devs
[X86_VENDOR_INTEL
] = &intel_cpu_dev
;
144 cpu_devs
[X86_VENDOR_CYRIX
] = &cyrix_cpu_dev
;
145 cpu_devs
[X86_VENDOR_AMD
] = &amd_cpu_dev
;
146 cpu_devs
[X86_VENDOR_UMC
] = &umc_cpu_dev
;
147 cpu_devs
[X86_VENDOR_NEXGEN
] = &nexgen_cpu_dev
;
148 cpu_devs
[X86_VENDOR_CENTAUR
] = ¢aur_cpu_dev
;
149 cpu_devs
[X86_VENDOR_RISE
] = &rise_cpu_dev
;
150 cpu_devs
[X86_VENDOR_TRANSMETA
] = &transmeta_cpu_dev
;
151 cpu_devs
[X86_VENDOR_NSC
] = &nsc_cpu_dev
;
152 cpu_devs
[X86_VENDOR_UNKNOWN
] = &unknown_cpu_dev
;
155 void get_cpu_vendor(struct cpuinfo_x86
*c
)
157 char *v
= c
->x86_vendor_id
;
160 for (i
= 0; i
< X86_VENDOR_NUM
-1; i
++) {
162 if (!strcmp(v
, cpu_devs
[i
]->c_ident
[0]) ||
163 (cpu_devs
[i
]->c_ident
[1] &&
164 !strcmp(v
, cpu_devs
[i
]->c_ident
[1]))) {
171 c
->x86_vendor
= X86_VENDOR_UNKNOWN
;
174 int get_model_name(struct cpuinfo_x86
*c
)
179 if (cpuid_eax(0x80000000) < 0x80000004)
182 v
= (unsigned int *)c
->x86_model_id
;
183 cpuid(0x80000002, &v
[0], &v
[1], &v
[2], &v
[3]);
184 cpuid(0x80000003, &v
[4], &v
[5], &v
[6], &v
[7]);
185 cpuid(0x80000004, &v
[8], &v
[9], &v
[10], &v
[11]);
186 c
->x86_model_id
[48] = 0;
188 /* Intel chips right-justify this string for some dumb reason;
189 undo that brain damage */
190 p
= q
= &c
->x86_model_id
[0];
196 while (q
<= &c
->x86_model_id
[48])
197 *q
++ = '\0'; /* Zero-pad the rest */
203 void detect_cache(uint32_t xlvl
, struct cpuinfo_x86
*c
)
205 uint32_t eax
, ebx
, ecx
, edx
, l2size
;
206 /* Detecting L1 cache */
207 if (xlvl
>= 0x80000005) {
208 cpuid(0x80000005, &eax
, &ebx
, &ecx
, &edx
);
209 c
->x86_l1_data_cache_size
= ecx
>> 24;
210 c
->x86_l1_instruction_cache_size
= edx
>> 24;
213 /* Detecting L2 cache */
214 c
->x86_l2_cache_size
= 0;
216 if (xlvl
< 0x80000006) /* Some chips just has a large L1. */
219 cpuid(0x80000006, &eax
, &ebx
, &ecx
, &edx
);
222 /* Vendor based fixes */
223 switch (c
->x86_vendor
) {
224 case X86_VENDOR_INTEL
:
226 * Intel PIII Tualatin. This comes in two flavours.
227 * One has 256kb of cache, the other 512. We have no way
228 * to determine which, so we use a boottime override
229 * for the 512kb model, and assume 256 otherwise.
231 if ((c
->x86
== 6) && (c
->x86_model
== 11) && (l2size
== 0))
235 /* AMD errata T13 (order #21922) */
237 if (c
->x86_model
== 3 && c
->x86_mask
== 0) /* Duron Rev A0 */
239 if (c
->x86_model
== 4 && (c
->x86_mask
== 0 || c
->x86_mask
== 1)) /* Tbird rev A1/A2 */
244 c
->x86_l2_cache_size
= l2size
;
247 void detect_cyrix(struct cpuinfo_x86
*c
) {
248 unsigned char dir0
, dir0_msn
, dir0_lsn
, dir1
= 0;
249 char *buf
= c
->x86_model_id
;
250 char Cx86_cb
[] = "?.5x Core/Bus Clock";
251 const char cyrix_model_mult1
[] = "12??43";
252 const char cyrix_model_mult2
[] = "12233445";
253 const char *p
= NULL
;
255 do_cyrix_devid(&dir0
, &dir1
);
256 dir0_msn
= dir0
>> 4; /* identifies CPU "family" */
257 dir0_lsn
= dir0
& 0xf; /* model or clock multiplier */
258 c
->x86_model
= (dir1
>> 4) + 1;
259 c
->x86_mask
= dir1
& 0xf;
263 case 0: /* Cx486SLC/DLC/SRx/DRx */
264 p
= Cx486_name
[dir0_lsn
& 7];
267 case 1: /* Cx486S/DX/DX2/DX4 */
268 p
= (dir0_lsn
& 8) ? Cx486D_name
[dir0_lsn
& 5] : Cx486S_name
[dir0_lsn
& 3];
272 Cx86_cb
[2] = cyrix_model_mult1
[dir0_lsn
& 5];
276 case 3: /* 6x86/6x86L */
278 Cx86_cb
[2] = cyrix_model_mult1
[dir0_lsn
& 5];
279 if (dir1
> 0x21) { /* 686L */
289 c
->x86_l1_data_cache_size
= 16; /* Yep 16K integrated cache thats it */
290 if (c
->cpuid_level
!= 2) { /* Media GX */
291 Cx86_cb
[2] = (dir0_lsn
& 1) ? '3' : '4';
296 case 5: /* 6x86MX/M II */
298 dir0_msn
++; /* M II */
300 c
->coma_bug
= 1; /* 6x86MX, it has the bug. */
303 tmp
= (!(dir0_lsn
& 7) || dir0_lsn
& 1) ? 2 : 0;
304 Cx86_cb
[tmp
] = cyrix_model_mult2
[dir0_lsn
& 7];
306 if (((dir1
& 0x0f) > 4) || ((dir1
& 0xf0) == 0x20))
310 case 0xf: /* Cyrix 486 without DEVID registers */
312 case 0xd: /* either a 486SLC or DLC w/o DEVID */
314 p
= Cx486_name
[(c
->hard_math
) ? 1 : 0];
317 case 0xe: /* a 486S A step */
329 /* If the processor is unknown, we keep the model name we got
330 * from the generic call */
332 strcpy(buf
, Cx86_model
[dir0_msn
& 7]);
333 if (p
) strcat(buf
, p
);
337 void generic_identify(struct cpuinfo_x86
*c
)
340 uint32_t eax
, ebx
, ecx
, edx
;
342 /* Get vendor name */
344 (uint32_t *) & c
->cpuid_level
,
345 (uint32_t *) & c
->x86_vendor_id
[0],
346 (uint32_t *) & c
->x86_vendor_id
[8],
347 (uint32_t *) & c
->x86_vendor_id
[4]);
351 /* Intel-defined flags: level 0x00000001 */
352 if (c
->cpuid_level
>= 0x00000001) {
353 uint32_t capability
, excap
;
354 cpuid(0x00000001, &tfms
, &ebx
, &excap
, &capability
);
355 c
->x86_capability
[0] = capability
;
356 c
->x86_capability
[4] = excap
;
357 c
->x86
= (tfms
>> 8) & 15;
358 c
->x86_model
= (tfms
>> 4) & 15;
360 c
->x86
+= (tfms
>> 20) & 0xff;
362 c
->x86_model
+= ((tfms
>> 16) & 0xF) << 4;
363 c
->x86_mask
= tfms
& 15;
364 if (cpu_has(c
, X86_FEATURE_CLFLSH
))
365 c
->x86_clflush_size
= ((ebx
>> 8) & 0xff) * 8;
367 /* Have CPUID level 0 only - unheard of */
371 /* AMD-defined flags: level 0x80000001 */
372 xlvl
= cpuid_eax(0x80000000);
373 if ((xlvl
& 0xffff0000) == 0x80000000) {
374 if (xlvl
>= 0x80000001) {
375 c
->x86_capability
[1] = cpuid_edx(0x80000001);
376 c
->x86_capability
[6] = cpuid_ecx(0x80000001);
378 if (xlvl
>= 0x80000004)
379 get_model_name(c
); /* Default name */
382 /* Specific detection code */
383 switch (c
->x86_vendor
) {
384 case X86_VENDOR_CYRIX
:
385 case X86_VENDOR_NSC
: detect_cyrix(c
); break;
389 /* Detecting the number of cores */
390 switch (c
->x86_vendor
) {
392 if (xlvl
>= 0x80000008) {
393 c
->x86_num_cores
= (cpuid_ecx(0x80000008) & 0xff) + 1;
394 if (c
->x86_num_cores
& (c
->x86_num_cores
- 1))
395 c
->x86_num_cores
= 1;
398 case X86_VENDOR_INTEL
:
399 if (c
->cpuid_level
>= 0x00000004) {
400 cpuid(0x4, &eax
, &ebx
, &ecx
, &edx
);
401 c
->x86_num_cores
= ((eax
& 0xfc000000) >> 26) + 1;
405 c
->x86_num_cores
= 1;
409 detect_cache(xlvl
, c
);
413 * Checksum an MP configuration block.
416 static int mpf_checksum(unsigned char *mp
, int len
)
426 static int smp_scan_config(unsigned long base
, unsigned long length
)
428 unsigned long *bp
= (unsigned long *)base
;
429 struct intel_mp_floating
*mpf
;
431 // printf("Scan SMP from %p for %ld bytes.\n", bp,length);
432 if (sizeof(*mpf
) != 16) {
433 printf("Error: MPF size\n");
438 mpf
= (struct intel_mp_floating
*)bp
;
439 if ((*bp
== SMP_MAGIC_IDENT
) &&
440 (mpf
->mpf_length
== 1) &&
441 !mpf_checksum((unsigned char *)bp
, 16) &&
442 ((mpf
->mpf_specification
== 1)
443 || (mpf
->mpf_specification
== 4))) {
452 int find_smp_config(void)
454 // unsigned int address;
457 * FIXME: Linux assumes you have 640K of base ram..
458 * this continues the error...
460 * 1) Scan the bottom 1K for a signature
461 * 2) Scan the top 1K of base RAM
462 * 3) Scan the 64K of bios
464 if (smp_scan_config(0x0, 0x400) ||
465 smp_scan_config(639 * 0x400, 0x400) ||
466 smp_scan_config(0xF0000, 0x10000))
469 * If it is an SMP machine we should know now, unless the
470 * configuration is in an EISA/MCA bus machine with an
471 * extended bios data area.
473 * there is a real-mode segmented pointer pointing to the
474 * 4K EBDA area at 0x40E, calculate and scan it here.
476 * NOTE! There are Linux loaders that will corrupt the EBDA
477 * area, and as such this kind of SMP config may be less
478 * trustworthy, simply because the SMP table may have been
479 * stomped on during early boot. These loaders are buggy and
482 * MP1.4 SPEC states to only scan first 1K of 4K EBDA.
485 // address = get_bios_ebda();
487 // smp_scan_config(address, 0x400);
491 void set_cpu_flags(struct cpuinfo_x86
*c
, s_cpu
* cpu
)
493 cpu
->flags
.fpu
= cpu_has(c
, X86_FEATURE_FPU
);
494 cpu
->flags
.vme
= cpu_has(c
, X86_FEATURE_VME
);
495 cpu
->flags
.de
= cpu_has(c
, X86_FEATURE_DE
);
496 cpu
->flags
.pse
= cpu_has(c
, X86_FEATURE_PSE
);
497 cpu
->flags
.tsc
= cpu_has(c
, X86_FEATURE_TSC
);
498 cpu
->flags
.msr
= cpu_has(c
, X86_FEATURE_MSR
);
499 cpu
->flags
.pae
= cpu_has(c
, X86_FEATURE_PAE
);
500 cpu
->flags
.mce
= cpu_has(c
, X86_FEATURE_MCE
);
501 cpu
->flags
.cx8
= cpu_has(c
, X86_FEATURE_CX8
);
502 cpu
->flags
.apic
= cpu_has(c
, X86_FEATURE_APIC
);
503 cpu
->flags
.sep
= cpu_has(c
, X86_FEATURE_SEP
);
504 cpu
->flags
.mtrr
= cpu_has(c
, X86_FEATURE_MTRR
);
505 cpu
->flags
.pge
= cpu_has(c
, X86_FEATURE_PGE
);
506 cpu
->flags
.mca
= cpu_has(c
, X86_FEATURE_MCA
);
507 cpu
->flags
.cmov
= cpu_has(c
, X86_FEATURE_CMOV
);
508 cpu
->flags
.pat
= cpu_has(c
, X86_FEATURE_PAT
);
509 cpu
->flags
.pse_36
= cpu_has(c
, X86_FEATURE_PSE36
);
510 cpu
->flags
.psn
= cpu_has(c
, X86_FEATURE_PN
);
511 cpu
->flags
.clflsh
= cpu_has(c
, X86_FEATURE_CLFLSH
);
512 cpu
->flags
.dts
= cpu_has(c
, X86_FEATURE_DTES
);
513 cpu
->flags
.acpi
= cpu_has(c
, X86_FEATURE_ACPI
);
514 cpu
->flags
.pbe
= cpu_has(c
, X86_FEATURE_PBE
);
515 cpu
->flags
.mmx
= cpu_has(c
, X86_FEATURE_MMX
);
516 cpu
->flags
.fxsr
= cpu_has(c
, X86_FEATURE_FXSR
);
517 cpu
->flags
.sse
= cpu_has(c
, X86_FEATURE_XMM
);
518 cpu
->flags
.sse2
= cpu_has(c
, X86_FEATURE_XMM2
);
519 cpu
->flags
.ss
= cpu_has(c
, X86_FEATURE_SELFSNOOP
);
520 cpu
->flags
.htt
= cpu_has(c
, X86_FEATURE_HT
);
521 cpu
->flags
.acc
= cpu_has(c
, X86_FEATURE_ACC
);
522 cpu
->flags
.syscall
= cpu_has(c
, X86_FEATURE_SYSCALL
);
523 cpu
->flags
.mp
= cpu_has(c
, X86_FEATURE_MP
);
524 cpu
->flags
.nx
= cpu_has(c
, X86_FEATURE_NX
);
525 cpu
->flags
.mmxext
= cpu_has(c
, X86_FEATURE_MMXEXT
);
526 cpu
->flags
.fxsr_opt
= cpu_has(c
, X86_FEATURE_FXSR_OPT
);
527 cpu
->flags
.gbpages
= cpu_has(c
, X86_FEATURE_GBPAGES
);
528 cpu
->flags
.rdtscp
= cpu_has(c
, X86_FEATURE_RDTSCP
);
529 cpu
->flags
.lm
= cpu_has(c
, X86_FEATURE_LM
);
530 cpu
->flags
.nowext
= cpu_has(c
, X86_FEATURE_3DNOWEXT
);
531 cpu
->flags
.now
= cpu_has(c
, X86_FEATURE_3DNOW
);
532 cpu
->flags
.smp
= find_smp_config();
533 cpu
->flags
.pni
= cpu_has(c
, X86_FEATURE_XMM3
);
534 cpu
->flags
.pclmulqd
= cpu_has(c
, X86_FEATURE_PCLMULQDQ
);
535 cpu
->flags
.dtes64
= cpu_has(c
, X86_FEATURE_DTES64
);
536 cpu
->flags
.vmx
= cpu_has(c
, X86_FEATURE_VMX
);
537 cpu
->flags
.smx
= cpu_has(c
, X86_FEATURE_SMX
);
538 cpu
->flags
.est
= cpu_has(c
, X86_FEATURE_EST
);
539 cpu
->flags
.tm2
= cpu_has(c
, X86_FEATURE_TM2
);
540 cpu
->flags
.sse3
= cpu_has(c
, X86_FEATURE_SSE3
);
541 cpu
->flags
.cid
= cpu_has(c
, X86_FEATURE_CID
);
542 cpu
->flags
.fma
= cpu_has(c
, X86_FEATURE_FMA
);
543 cpu
->flags
.cx16
= cpu_has(c
, X86_FEATURE_CX16
);
544 cpu
->flags
.xtpr
= cpu_has(c
, X86_FEATURE_XTPR
);
545 cpu
->flags
.pdcm
= cpu_has(c
, X86_FEATURE_PDCM
);
546 cpu
->flags
.dca
= cpu_has(c
, X86_FEATURE_DCA
);
547 cpu
->flags
.xmm4_1
= cpu_has(c
, X86_FEATURE_XMM4_1
);
548 cpu
->flags
.xmm4_2
= cpu_has(c
, X86_FEATURE_XMM4_2
);
549 cpu
->flags
.x2apic
= cpu_has(c
, X86_FEATURE_X2APIC
);
550 cpu
->flags
.movbe
= cpu_has(c
, X86_FEATURE_MOVBE
);
551 cpu
->flags
.popcnt
= cpu_has(c
, X86_FEATURE_POPCNT
);
552 cpu
->flags
.aes
= cpu_has(c
, X86_FEATURE_AES
);
553 cpu
->flags
.xsave
= cpu_has(c
, X86_FEATURE_XSAVE
);
554 cpu
->flags
.osxsave
= cpu_has(c
, X86_FEATURE_OSXSAVE
);
555 cpu
->flags
.avx
= cpu_has(c
, X86_FEATURE_AVX
);
556 cpu
->flags
.hypervisor
= cpu_has(c
, X86_FEATURE_HYPERVISOR
);
557 cpu
->flags
.ace2
= cpu_has(c
, X86_FEATURE_ACE2
);
558 cpu
->flags
.ace2_en
= cpu_has(c
, X86_FEATURE_ACE2_EN
);
559 cpu
->flags
.phe
= cpu_has(c
, X86_FEATURE_PHE
);
560 cpu
->flags
.phe_en
= cpu_has(c
, X86_FEATURE_PHE_EN
);
561 cpu
->flags
.pmm
= cpu_has(c
, X86_FEATURE_PMM
);
562 cpu
->flags
.pmm_en
= cpu_has(c
, X86_FEATURE_PMM_EN
);
563 cpu
->flags
.extapic
= cpu_has(c
, X86_FEATURE_EXTAPIC
);
564 cpu
->flags
.cr8_legacy
= cpu_has(c
, X86_FEATURE_CR8_LEGACY
);
565 cpu
->flags
.abm
= cpu_has(c
, X86_FEATURE_ABM
);
566 cpu
->flags
.sse4a
= cpu_has(c
, X86_FEATURE_SSE4A
);
567 cpu
->flags
.misalignsse
= cpu_has(c
, X86_FEATURE_MISALIGNSSE
);
568 cpu
->flags
.nowprefetch
= cpu_has(c
, X86_FEATURE_3DNOWPREFETCH
);
569 cpu
->flags
.osvw
= cpu_has(c
, X86_FEATURE_OSVW
);
570 cpu
->flags
.ibs
= cpu_has(c
, X86_FEATURE_IBS
);
571 cpu
->flags
.sse5
= cpu_has(c
, X86_FEATURE_SSE5
);
572 cpu
->flags
.skinit
= cpu_has(c
, X86_FEATURE_SKINIT
);
573 cpu
->flags
.wdt
= cpu_has(c
, X86_FEATURE_WDT
);
574 cpu
->flags
.ida
= cpu_has(c
, X86_FEATURE_IDA
);
575 cpu
->flags
.arat
= cpu_has(c
, X86_FEATURE_ARAT
);
576 cpu
->flags
.tpr_shadow
= cpu_has(c
, X86_FEATURE_TPR_SHADOW
);
577 cpu
->flags
.vnmi
= cpu_has(c
, X86_FEATURE_VNMI
);
578 cpu
->flags
.flexpriority
= cpu_has(c
, X86_FEATURE_FLEXPRIORITY
);
579 cpu
->flags
.ept
= cpu_has(c
, X86_FEATURE_EPT
);
580 cpu
->flags
.vpid
= cpu_has(c
, X86_FEATURE_VPID
);
581 cpu
->flags
.svm
= cpu_has(c
, X86_FEATURE_SVM
);
584 void set_generic_info(struct cpuinfo_x86
*c
, s_cpu
* cpu
)
586 cpu
->family
= c
->x86
;
587 cpu
->vendor_id
= c
->x86_vendor
;
588 cpu
->model_id
= c
->x86_model
;
589 cpu
->stepping
= c
->x86_mask
;
590 strlcpy(cpu
->vendor
, cpu_devs
[c
->x86_vendor
]->c_vendor
,
591 sizeof(cpu
->vendor
));
592 strlcpy(cpu
->model
, c
->x86_model_id
, sizeof(cpu
->model
));
593 cpu
->num_cores
= c
->x86_num_cores
;
594 cpu
->l1_data_cache_size
= c
->x86_l1_data_cache_size
;
595 cpu
->l1_instruction_cache_size
= c
->x86_l1_instruction_cache_size
;
596 cpu
->l2_cache_size
= c
->x86_l2_cache_size
;
599 void detect_cpu(s_cpu
* cpu
)
601 struct cpuinfo_x86 c
;
602 memset(&c
,0,sizeof(c
));
603 c
.x86_clflush_size
= 32;
604 c
.x86_vendor
= X86_VENDOR_UNKNOWN
;
605 c
.cpuid_level
= -1; /* CPUID not detected */
607 memset(&cpu
->flags
, 0, sizeof(s_cpu_flags
));
612 generic_identify(&c
);
613 set_generic_info(&c
, cpu
);
614 set_cpu_flags(&c
, cpu
);