Merge git://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux-2.6
[wrt350n-kernel.git] / arch / x86 / boot / cpucheck.c
blob769065bd23d776dc6c96a171f87b7071fd3719a6
1 /* -*- linux-c -*- ------------------------------------------------------- *
3 * Copyright (C) 1991, 1992 Linus Torvalds
4 * Copyright 2007 rPath, Inc. - All Rights Reserved
6 * This file is part of the Linux kernel, and is made available under
7 * the terms of the GNU General Public License version 2.
9 * ----------------------------------------------------------------------- */
12 * arch/i386/boot/cpucheck.c
14 * Check for obligatory CPU features and abort if the features are not
15 * present. This code should be compilable as 16-, 32- or 64-bit
16 * code, so be very careful with types and inline assembly.
18 * This code should not contain any messages; that requires an
19 * additional wrapper.
21 * As written, this code is not safe for inclusion into the kernel
22 * proper (after FPU initialization, in particular).
25 #ifdef _SETUP
26 # include "boot.h"
27 # include "bitops.h"
28 #endif
29 #include <linux/types.h>
30 #include <asm/cpufeature.h>
31 #include <asm/processor-flags.h>
32 #include <asm/required-features.h>
33 #include <asm/msr-index.h>
35 struct cpu_features {
36 int level; /* Family, or 64 for x86-64 */
37 int model;
38 u32 flags[NCAPINTS];
41 static struct cpu_features cpu;
42 static u32 cpu_vendor[3];
43 static u32 err_flags[NCAPINTS];
45 static const int req_level = CONFIG_X86_MINIMUM_CPU_FAMILY;
47 static const u32 req_flags[NCAPINTS] =
49 REQUIRED_MASK0,
50 REQUIRED_MASK1,
51 REQUIRED_MASK2,
52 REQUIRED_MASK3,
53 REQUIRED_MASK4,
54 REQUIRED_MASK5,
55 REQUIRED_MASK6,
56 REQUIRED_MASK7,
59 #define A32(a,b,c,d) (((d) << 24)+((c) << 16)+((b) << 8)+(a))
61 static int is_amd(void)
63 return cpu_vendor[0] == A32('A','u','t','h') &&
64 cpu_vendor[1] == A32('e','n','t','i') &&
65 cpu_vendor[2] == A32('c','A','M','D');
68 static int is_centaur(void)
70 return cpu_vendor[0] == A32('C','e','n','t') &&
71 cpu_vendor[1] == A32('a','u','r','H') &&
72 cpu_vendor[2] == A32('a','u','l','s');
75 static int is_transmeta(void)
77 return cpu_vendor[0] == A32('G','e','n','u') &&
78 cpu_vendor[1] == A32('i','n','e','T') &&
79 cpu_vendor[2] == A32('M','x','8','6');
82 static int has_fpu(void)
84 u16 fcw = -1, fsw = -1;
85 u32 cr0;
87 asm("movl %%cr0,%0" : "=r" (cr0));
88 if (cr0 & (X86_CR0_EM|X86_CR0_TS)) {
89 cr0 &= ~(X86_CR0_EM|X86_CR0_TS);
90 asm volatile("movl %0,%%cr0" : : "r" (cr0));
93 asm volatile("fninit ; fnstsw %0 ; fnstcw %1"
94 : "+m" (fsw), "+m" (fcw));
96 return fsw == 0 && (fcw & 0x103f) == 0x003f;
99 static int has_eflag(u32 mask)
101 u32 f0, f1;
103 asm("pushfl ; "
104 "pushfl ; "
105 "popl %0 ; "
106 "movl %0,%1 ; "
107 "xorl %2,%1 ; "
108 "pushl %1 ; "
109 "popfl ; "
110 "pushfl ; "
111 "popl %1 ; "
112 "popfl"
113 : "=&r" (f0), "=&r" (f1)
114 : "ri" (mask));
116 return !!((f0^f1) & mask);
119 static void get_flags(void)
121 u32 max_intel_level, max_amd_level;
122 u32 tfms;
124 if (has_fpu())
125 set_bit(X86_FEATURE_FPU, cpu.flags);
127 if (has_eflag(X86_EFLAGS_ID)) {
128 asm("cpuid"
129 : "=a" (max_intel_level),
130 "=b" (cpu_vendor[0]),
131 "=d" (cpu_vendor[1]),
132 "=c" (cpu_vendor[2])
133 : "a" (0));
135 if (max_intel_level >= 0x00000001 &&
136 max_intel_level <= 0x0000ffff) {
137 asm("cpuid"
138 : "=a" (tfms),
139 "=c" (cpu.flags[4]),
140 "=d" (cpu.flags[0])
141 : "a" (0x00000001)
142 : "ebx");
143 cpu.level = (tfms >> 8) & 15;
144 cpu.model = (tfms >> 4) & 15;
145 if (cpu.level >= 6)
146 cpu.model += ((tfms >> 16) & 0xf) << 4;
149 asm("cpuid"
150 : "=a" (max_amd_level)
151 : "a" (0x80000000)
152 : "ebx", "ecx", "edx");
154 if (max_amd_level >= 0x80000001 &&
155 max_amd_level <= 0x8000ffff) {
156 u32 eax = 0x80000001;
157 asm("cpuid"
158 : "+a" (eax),
159 "=c" (cpu.flags[6]),
160 "=d" (cpu.flags[1])
161 : : "ebx");
166 /* Returns a bitmask of which words we have error bits in */
167 static int check_flags(void)
169 u32 err;
170 int i;
172 err = 0;
173 for (i = 0; i < NCAPINTS; i++) {
174 err_flags[i] = req_flags[i] & ~cpu.flags[i];
175 if (err_flags[i])
176 err |= 1 << i;
179 return err;
183 * Returns -1 on error.
185 * *cpu_level is set to the current CPU level; *req_level to the required
186 * level. x86-64 is considered level 64 for this purpose.
188 * *err_flags_ptr is set to the flags error array if there are flags missing.
190 int check_cpu(int *cpu_level_ptr, int *req_level_ptr, u32 **err_flags_ptr)
192 int err;
194 memset(&cpu.flags, 0, sizeof cpu.flags);
195 cpu.level = 3;
197 if (has_eflag(X86_EFLAGS_AC))
198 cpu.level = 4;
200 get_flags();
201 err = check_flags();
203 if (test_bit(X86_FEATURE_LM, cpu.flags))
204 cpu.level = 64;
206 if (err == 0x01 &&
207 !(err_flags[0] &
208 ~((1 << X86_FEATURE_XMM)|(1 << X86_FEATURE_XMM2))) &&
209 is_amd()) {
210 /* If this is an AMD and we're only missing SSE+SSE2, try to
211 turn them on */
213 u32 ecx = MSR_K7_HWCR;
214 u32 eax, edx;
216 asm("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
217 eax &= ~(1 << 15);
218 asm("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
220 get_flags(); /* Make sure it really did something */
221 err = check_flags();
222 } else if (err == 0x01 &&
223 !(err_flags[0] & ~(1 << X86_FEATURE_CX8)) &&
224 is_centaur() && cpu.model >= 6) {
225 /* If this is a VIA C3, we might have to enable CX8
226 explicitly */
228 u32 ecx = MSR_VIA_FCR;
229 u32 eax, edx;
231 asm("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
232 eax |= (1<<1)|(1<<7);
233 asm("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
235 set_bit(X86_FEATURE_CX8, cpu.flags);
236 err = check_flags();
237 } else if (err == 0x01 && is_transmeta()) {
238 /* Transmeta might have masked feature bits in word 0 */
240 u32 ecx = 0x80860004;
241 u32 eax, edx;
242 u32 level = 1;
244 asm("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
245 asm("wrmsr" : : "a" (~0), "d" (edx), "c" (ecx));
246 asm("cpuid"
247 : "+a" (level), "=d" (cpu.flags[0])
248 : : "ecx", "ebx");
249 asm("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
251 err = check_flags();
254 if (err_flags_ptr)
255 *err_flags_ptr = err ? err_flags : NULL;
256 if (cpu_level_ptr)
257 *cpu_level_ptr = cpu.level;
258 if (req_level_ptr)
259 *req_level_ptr = req_level;
261 return (cpu.level < req_level || err) ? -1 : 0;