Adding upstream version 3.50~pre5.
[syslinux-debian/hramrach.git] / com32 / modules / cpuid.c
blob00f810e87d4599b976b72eae674383f281795d3c
1 /* ----------------------------------------------------------------------- *
3 * Copyright 2006 Erwan Velu - All Rights Reserved
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation, Inc., 53 Temple Place Ste 330,
8 * Boston MA 02111-1307, USA; either version 2 of the License, or
9 * (at your option) any later version; incorporated herein by reference.
11 * ----------------------------------------------------------------------- */
13 #include <stdio.h>
14 #include <string.h>
15 #include "cpuid.h"
17 struct cpu_dev * cpu_devs[X86_VENDOR_NUM] = {};
20 * CPUID functions returning a single datum
22 static inline unsigned int cpuid_eax(unsigned int op)
24 unsigned int eax;
26 __asm__("cpuid"
27 : "=a" (eax)
28 : "0" (op)
29 : "bx", "cx", "dx");
30 return eax;
33 static inline unsigned int cpuid_ecx(unsigned int op)
35 unsigned int eax, ecx;
37 __asm__("cpuid"
38 : "=a" (eax), "=c" (ecx)
39 : "0" (op)
40 : "bx", "dx" );
41 return ecx;
43 static inline unsigned int cpuid_edx(unsigned int op)
45 unsigned int eax, edx;
47 __asm__("cpuid"
48 : "=a" (eax), "=d" (edx)
49 : "0" (op)
50 : "bx", "cx");
51 return edx;
54 /* Standard macro to see if a specific flag is changeable */
55 static inline int flag_is_changeable_p(u32 flag)
57 u32 f1, f2;
59 asm("pushfl\n\t"
60 "pushfl\n\t"
61 "popl %0\n\t"
62 "movl %0,%1\n\t"
63 "xorl %2,%0\n\t"
64 "pushl %0\n\t"
65 "popfl\n\t"
66 "pushfl\n\t"
67 "popl %0\n\t"
68 "popfl\n\t"
69 : "=&r" (f1), "=&r" (f2)
70 : "ir" (flag));
72 return ((f1^f2) & flag) != 0;
75 /* Probe for the CPUID instruction */
76 static int have_cpuid_p(void)
78 return flag_is_changeable_p(X86_EFLAGS_ID);
81 static struct cpu_dev amd_cpu_dev = {
82 .c_vendor = "AMD",
83 .c_ident = { "AuthenticAMD" }
86 static struct cpu_dev intel_cpu_dev = {
87 .c_vendor = "Intel",
88 .c_ident = { "GenuineIntel" }
91 static struct cpu_dev cyrix_cpu_dev = {
92 .c_vendor = "Cyrix",
93 .c_ident = { "CyrixInstead" }
96 static struct cpu_dev umc_cpu_dev = {
97 .c_vendor = "UMC",
98 .c_ident = { "UMC UMC UMC" }
102 static struct cpu_dev nexgen_cpu_dev = {
103 .c_vendor = "Nexgen",
104 .c_ident = { "NexGenDriven" }
107 static struct cpu_dev centaur_cpu_dev = {
108 .c_vendor = "Centaur",
109 .c_ident = { "CentaurHauls" }
112 static struct cpu_dev rise_cpu_dev = {
113 .c_vendor = "Rise",
114 .c_ident = { "RiseRiseRise" }
117 static struct cpu_dev transmeta_cpu_dev = {
118 .c_vendor = "Transmeta",
119 .c_ident = { "GenuineTMx86", "TransmetaCPU" }
122 void init_cpu_devs(void)
124 cpu_devs[X86_VENDOR_INTEL] = &intel_cpu_dev;
125 cpu_devs[X86_VENDOR_CYRIX] = &cyrix_cpu_dev;
126 cpu_devs[X86_VENDOR_AMD] = &amd_cpu_dev;
127 cpu_devs[X86_VENDOR_UMC] = &umc_cpu_dev;
128 cpu_devs[X86_VENDOR_NEXGEN] = &nexgen_cpu_dev;
129 cpu_devs[X86_VENDOR_CENTAUR] = &centaur_cpu_dev;
130 cpu_devs[X86_VENDOR_RISE] = &rise_cpu_dev;
131 cpu_devs[X86_VENDOR_TRANSMETA] = &transmeta_cpu_dev;
134 void get_cpu_vendor(struct cpuinfo_x86 *c)
136 char *v = c->x86_vendor_id;
137 int i;
138 init_cpu_devs();
139 for (i = 0; i < X86_VENDOR_NUM; i++) {
140 if (cpu_devs[i]) {
141 if (!strcmp(v,cpu_devs[i]->c_ident[0]) ||
142 (cpu_devs[i]->c_ident[1] &&
143 !strcmp(v,cpu_devs[i]->c_ident[1]))) {
144 c->x86_vendor = i;
145 return;
150 c->x86_vendor = X86_VENDOR_UNKNOWN;
153 int get_model_name(struct cpuinfo_x86 *c)
155 unsigned int *v;
156 char *p, *q;
158 if (cpuid_eax(0x80000000) < 0x80000004)
159 return 0;
161 v = (unsigned int *) c->x86_model_id;
162 cpuid(0x80000002, &v[0], &v[1], &v[2], &v[3]);
163 cpuid(0x80000003, &v[4], &v[5], &v[6], &v[7]);
164 cpuid(0x80000004, &v[8], &v[9], &v[10], &v[11]);
165 c->x86_model_id[48] = 0;
167 /* Intel chips right-justify this string for some dumb reason;
168 undo that brain damage */
169 p = q = &c->x86_model_id[0];
170 while ( *p == ' ' )
171 p++;
172 if ( p != q ) {
173 while ( *p )
174 *q++ = *p++;
175 while ( q <= &c->x86_model_id[48] )
176 *q++ = '\0'; /* Zero-pad the rest */
179 return 1;
182 void generic_identify(struct cpuinfo_x86 *c)
184 u32 tfms, xlvl;
185 int junk;
186 /* Get vendor name */
187 cpuid(0x00000000, &c->cpuid_level,
188 (int *)&c->x86_vendor_id[0],
189 (int *)&c->x86_vendor_id[8],
190 (int *)&c->x86_vendor_id[4]);
192 get_cpu_vendor(c);
193 /* Intel-defined flags: level 0x00000001 */
194 if ( c->cpuid_level >= 0x00000001 ) {
195 u32 capability, excap;
196 cpuid(0x00000001, &tfms, &junk, &excap, &capability);
197 c->x86_capability[0] = capability;
198 c->x86_capability[4] = excap;
199 c->x86 = (tfms >> 8) & 15;
200 c->x86_model = (tfms >> 4) & 15;
201 if (c->x86 == 0xf) {
202 c->x86 += (tfms >> 20) & 0xff;
203 c->x86_model += ((tfms >> 16) & 0xF) << 4;
205 c->x86_mask = tfms & 15;
206 if (capability & (1<<19))
207 c->x86_cache_alignment = ((junk >> 8) & 0xff) * 8;
208 } else {
209 /* Have CPUID level 0 only - unheard of */
210 c->x86 = 4;
213 /* AMD-defined flags: level 0x80000001 */
214 xlvl = cpuid_eax(0x80000000);
215 if ( (xlvl & 0xffff0000) == 0x80000000 ) {
216 if ( xlvl >= 0x80000001 ) {
217 c->x86_capability[1] = cpuid_edx(0x80000001);
218 c->x86_capability[6] = cpuid_ecx(0x80000001);
220 if ( xlvl >= 0x80000004 )
221 get_model_name(c); /* Default name */
226 * Checksum an MP configuration block.
229 static int mpf_checksum(unsigned char *mp, int len)
231 int sum = 0;
233 while (len--)
234 sum += *mp++;
236 return sum & 0xFF;
239 static int smp_scan_config (unsigned long base, unsigned long length)
241 unsigned long *bp = base;
242 struct intel_mp_floating *mpf;
244 // printf("Scan SMP from %p for %ld bytes.\n", bp,length);
245 if (sizeof(*mpf) != 16) {
246 printf("Error: MPF size\n");
247 return 0;
250 while (length > 0) {
251 mpf = (struct intel_mp_floating *)bp;
252 if ((*bp == SMP_MAGIC_IDENT) &&
253 (mpf->mpf_length == 1) &&
254 !mpf_checksum((unsigned char *)bp, 16) &&
255 ((mpf->mpf_specification == 1)
256 || (mpf->mpf_specification == 4)) ) {
257 return 1;
259 bp += 4;
260 length -= 16;
262 return 0;
265 int find_smp_config (void)
267 // unsigned int address;
270 * FIXME: Linux assumes you have 640K of base ram..
271 * this continues the error...
273 * 1) Scan the bottom 1K for a signature
274 * 2) Scan the top 1K of base RAM
275 * 3) Scan the 64K of bios
277 if (smp_scan_config(0x0,0x400) ||
278 smp_scan_config(639*0x400,0x400) ||
279 smp_scan_config(0xF0000,0x10000))
280 return 1;
282 * If it is an SMP machine we should know now, unless the
283 * configuration is in an EISA/MCA bus machine with an
284 * extended bios data area.
286 * there is a real-mode segmented pointer pointing to the
287 * 4K EBDA area at 0x40E, calculate and scan it here.
289 * NOTE! There are Linux loaders that will corrupt the EBDA
290 * area, and as such this kind of SMP config may be less
291 * trustworthy, simply because the SMP table may have been
292 * stomped on during early boot. These loaders are buggy and
293 * should be fixed.
295 * MP1.4 SPEC states to only scan first 1K of 4K EBDA.
298 // address = get_bios_ebda();
299 // if (address)
300 // smp_scan_config(address, 0x400);
301 return 0;
305 void set_cpu_flags(struct cpuinfo_x86 *c, s_cpu *cpu) {
306 cpu->flags.fpu=cpu_has(c, X86_FEATURE_FPU);
307 cpu->flags.vme=cpu_has(c, X86_FEATURE_VME);
308 cpu->flags.de=cpu_has(c, X86_FEATURE_DE);
309 cpu->flags.pse=cpu_has(c, X86_FEATURE_PSE);
310 cpu->flags.tsc=cpu_has(c, X86_FEATURE_TSC);
311 cpu->flags.msr=cpu_has(c, X86_FEATURE_MSR);
312 cpu->flags.pae=cpu_has(c, X86_FEATURE_PAE);
313 cpu->flags.mce=cpu_has(c, X86_FEATURE_MCE);
314 cpu->flags.cx8=cpu_has(c, X86_FEATURE_CX8);
315 cpu->flags.apic=cpu_has(c, X86_FEATURE_APIC);
316 cpu->flags.sep=cpu_has(c, X86_FEATURE_SEP);
317 cpu->flags.mtrr=cpu_has(c, X86_FEATURE_MTRR);
318 cpu->flags.pge=cpu_has(c, X86_FEATURE_PGE);
319 cpu->flags.mca=cpu_has(c, X86_FEATURE_MCA);
320 cpu->flags.cmov=cpu_has(c, X86_FEATURE_CMOV);
321 cpu->flags.pat=cpu_has(c, X86_FEATURE_PAT);
322 cpu->flags.pse_36=cpu_has(c, X86_FEATURE_PSE36);
323 cpu->flags.psn=cpu_has(c, X86_FEATURE_PN);
324 cpu->flags.clflsh=cpu_has(c, X86_FEATURE_CLFLSH);
325 cpu->flags.dts=cpu_has(c, X86_FEATURE_DTES);
326 cpu->flags.acpi=cpu_has(c, X86_FEATURE_ACPI);
327 cpu->flags.mmx=cpu_has(c, X86_FEATURE_MMX);
328 cpu->flags.fxsr=cpu_has(c, X86_FEATURE_FXSR);
329 cpu->flags.sse=cpu_has(c, X86_FEATURE_XMM);
330 cpu->flags.sse2=cpu_has(c, X86_FEATURE_XMM2);
331 cpu->flags.ss=cpu_has(c, X86_FEATURE_SELFSNOOP);
332 cpu->flags.htt=cpu_has(c, X86_FEATURE_HT);
333 cpu->flags.acc=cpu_has(c, X86_FEATURE_ACC);
334 cpu->flags.syscall=cpu_has(c, X86_FEATURE_SYSCALL);
335 cpu->flags.mp=cpu_has(c, X86_FEATURE_MP);
336 cpu->flags.nx=cpu_has(c, X86_FEATURE_NX);
337 cpu->flags.mmxext=cpu_has(c, X86_FEATURE_MMXEXT);
338 cpu->flags.lm=cpu_has(c, X86_FEATURE_LM);
339 cpu->flags.nowext=cpu_has(c, X86_FEATURE_3DNOWEXT);
340 cpu->flags.now=cpu_has(c, X86_FEATURE_3DNOW);
341 cpu->flags.smp = find_smp_config();
344 void set_generic_info(struct cpuinfo_x86 *c,s_cpu *cpu) {
345 cpu->family=c->x86;
346 cpu->vendor_id=c->x86_vendor;
347 cpu->model_id=c->x86_model;
348 cpu->stepping=c->x86_mask;
349 strncpy(cpu->vendor,cpu_devs[c->x86_vendor]->c_vendor,CPU_VENDOR_SIZE);
350 strncpy(cpu->model,c->x86_model_id,CPU_MODEL_SIZE);
353 void detect_cpu(s_cpu *cpu)
355 struct cpuinfo_x86 c;
356 c.x86_cache_alignment = 32;
357 c.x86_cache_size = -1;
358 c.x86_vendor = X86_VENDOR_UNKNOWN;
359 c.cpuid_level = -1; /* CPUID not detected */
360 c.x86_model = c.x86_mask = 0; /* So far unknown... */
361 c.x86_vendor_id[0] = '\0'; /* Unset */
362 c.x86_model_id[0] = '\0'; /* Unset */
363 memset(&c.x86_vendor_id,'\0',CPU_VENDOR_SIZE);
365 if (!have_cpuid_p())
366 return;
368 generic_identify(&c);
369 set_generic_info(&c,cpu);
370 set_cpu_flags(&c,cpu);