2 < /* CpuArch.c -- CPU specific code
3 < 2010-10-26: Igor Pavlov : Public domain */
7 < #ifdef MY_CPU_X86_OR_AMD64
9 < #if (defined(_MSC_VER) && !defined(MY_CPU_AMD64)) || defined(__GNUC__)
13 < #if defined(USE_ASM) && !defined(MY_CPU_AMD64)
14 < static UInt32 CheckFlag(UInt32 flag)
20 < __asm xor EAX, flag;
28 < __asm and flag, EAX;
30 < __asm__ __volatile__ (
33 < "movl %%EAX,%%EDX\n\t"
39 < "xorl %%EDX,%%EAX\n\t"
42 < "andl %%EAX, %0\n\t":
43 < "=c" (flag) : "c" (flag));
47 < #define CHECK_CPUID_IS_SUPPORTED if (CheckFlag(1 << 18) == 0 || CheckFlag(1 << 21) == 0) return False;
49 < #define CHECK_CPUID_IS_SUPPORTED
52 < static void MyCPUID(UInt32 function, UInt32 *a, UInt32 *b, UInt32 *c, UInt32 *d)
58 < UInt32 a2, b2, c2, d2;
62 < __asm mov EAX, function;
76 < __asm__ __volatile__ (
89 < __cpuid(CPUInfo, function);
98 < Bool x86cpuid_CheckAndRead(Cx86cpuid *p)
100 < CHECK_CPUID_IS_SUPPORTED
101 < MyCPUID(0, &p->maxFunc, &p->vendor[0], &p->vendor[2], &p->vendor[1]);
102 < MyCPUID(1, &p->ver, &p->b, &p->c, &p->d);
106 < static UInt32 kVendors[][3] =
108 < { 0x756E6547, 0x49656E69, 0x6C65746E},
109 < { 0x68747541, 0x69746E65, 0x444D4163},
110 < { 0x746E6543, 0x48727561, 0x736C7561}
113 < int x86cpuid_GetFirm(const Cx86cpuid *p)
116 < for (i = 0; i < sizeof(kVendors) / sizeof(kVendors[i]); i++)
118 < const UInt32 *v = kVendors[i];
119 < if (v[0] == p->vendor[0] &&
120 < v[1] == p->vendor[1] &&
121 < v[2] == p->vendor[2])
127 < Bool CPU_Is_InOrder()
131 < UInt32 family, model;
132 < if (!x86cpuid_CheckAndRead(&p))
134 < family = x86cpuid_GetFamily(&p);
135 < model = x86cpuid_GetModel(&p);
136 < firm = x86cpuid_GetFirm(&p);
139 < case CPU_FIRM_INTEL: return (family < 6 || (family == 6 && model == 0x100C));
140 < case CPU_FIRM_AMD: return (family < 5 || (family == 5 && (model < 6 || model == 0xA)));
141 < case CPU_FIRM_VIA: return (family < 6 || (family == 6 && model < 0xF));
146 < #if !defined(MY_CPU_AMD64) && defined(_WIN32)
147 < static Bool CPU_Sys_Is_SSE_Supported()
150 < vi.dwOSVersionInfoSize = sizeof(vi);
151 < if (!GetVersionEx(&vi))
153 < return (vi.dwMajorVersion >= 5);
155 < #define CHECK_SYS_SSE_SUPPORT if (!CPU_Sys_Is_SSE_Supported()) return False;
157 < #define CHECK_SYS_SSE_SUPPORT
160 < Bool CPU_Is_Aes_Supported()
163 < CHECK_SYS_SSE_SUPPORT
164 < if (!x86cpuid_CheckAndRead(&p))
166 < return (p.c >> 25) & 1;
171 > /* CpuArch.c -- CPU specific code
172 > 2010-10-26: Igor Pavlov : Public domain */
174 > #include "CpuArch.h"
176 > #ifdef MY_CPU_X86_OR_AMD64
178 > #if (defined(_MSC_VER) && !defined(MY_CPU_AMD64)) || defined(__GNUC__)
182 > #if defined(USE_ASM) && !defined(MY_CPU_AMD64)
183 > static UInt32 CheckFlag(UInt32 flag)
188 > __asm mov EDX, EAX;
189 > __asm xor EAX, flag;
194 > __asm xor EAX, EDX;
197 > __asm and flag, EAX;
199 > __asm__ __volatile__ (
202 > "movl %%EAX,%%EDX\n\t"
203 > "xorl %0,%%EAX\n\t"
208 > "xorl %%EDX,%%EAX\n\t"
211 > "andl %%EAX, %0\n\t":
212 > "=c" (flag) : "c" (flag):
217 > #define CHECK_CPUID_IS_SUPPORTED if (CheckFlag(1 << 18) == 0 || CheckFlag(1 << 21) == 0) return False;
219 > #define CHECK_CPUID_IS_SUPPORTED
222 > static void MyCPUID(UInt32 function, UInt32 *a, UInt32 *b, UInt32 *c, UInt32 *d)
228 > UInt32 a2, b2, c2, d2;
229 > __asm xor EBX, EBX;
230 > __asm xor ECX, ECX;
231 > __asm xor EDX, EDX;
232 > __asm mov EAX, function;
246 > #if defined(MY_CPU_AMD64)
248 > __asm__ __volatile__ (
249 > "mov %%rbx, %%rdi\n"
251 > "xchg %%rdi, %%rbx\n"
256 > : "0" (function)) ;
260 > __asm__ __volatile__ (
261 > "mov %%ebx, %%edi\n"
263 > "xchg %%edi, %%ebx\n"
268 > : "0" (function)) ;
277 > __cpuid(CPUInfo, function);
286 > Bool x86cpuid_CheckAndRead(Cx86cpuid *p)
288 > CHECK_CPUID_IS_SUPPORTED
289 > MyCPUID(0, &p->maxFunc, &p->vendor[0], &p->vendor[2], &p->vendor[1]);
290 > MyCPUID(1, &p->ver, &p->b, &p->c, &p->d);
294 > static UInt32 kVendors[][3] =
296 > { 0x756E6547, 0x49656E69, 0x6C65746E},
297 > { 0x68747541, 0x69746E65, 0x444D4163},
298 > { 0x746E6543, 0x48727561, 0x736C7561}
301 > int x86cpuid_GetFirm(const Cx86cpuid *p)
304 > for (i = 0; i < sizeof(kVendors) / sizeof(kVendors[i]); i++)
306 > const UInt32 *v = kVendors[i];
307 > if (v[0] == p->vendor[0] &&
308 > v[1] == p->vendor[1] &&
309 > v[2] == p->vendor[2])
315 > Bool CPU_Is_InOrder()
319 > UInt32 family, model;
320 > if (!x86cpuid_CheckAndRead(&p))
322 > family = x86cpuid_GetFamily(&p);
323 > model = x86cpuid_GetModel(&p);
324 > firm = x86cpuid_GetFirm(&p);
327 > case CPU_FIRM_INTEL: return (family < 6 || (family == 6 && model == 0x100C));
328 > case CPU_FIRM_AMD: return (family < 5 || (family == 5 && (model < 6 || model == 0xA)));
329 > case CPU_FIRM_VIA: return (family < 6 || (family == 6 && model < 0xF));
334 > #if !defined(MY_CPU_AMD64) && defined(_WIN32)
335 > static Bool CPU_Sys_Is_SSE_Supported()
338 > vi.dwOSVersionInfoSize = sizeof(vi);
339 > if (!GetVersionEx(&vi))
341 > return (vi.dwMajorVersion >= 5);
343 > #define CHECK_SYS_SSE_SUPPORT if (!CPU_Sys_Is_SSE_Supported()) return False;
345 > #define CHECK_SYS_SSE_SUPPORT
348 > Bool CPU_Is_Aes_Supported()
351 > CHECK_SYS_SSE_SUPPORT
352 > if (!x86cpuid_CheckAndRead(&p))
354 > return (p.c >> 25) & 1;