PRCM: 34XX: Fix wrong shift value used in dpll4_m4x2_ck enable bit
[linux-ginger.git] / arch / x86 / boot / cpucheck.c
blob7804389ee0059eb8f4be589cccd24a2623f9567f
1 /* -*- linux-c -*- ------------------------------------------------------- *
3 * Copyright (C) 1991, 1992 Linus Torvalds
4 * Copyright 2007 rPath, Inc. - All Rights Reserved
6 * This file is part of the Linux kernel, and is made available under
7 * the terms of the GNU General Public License version 2.
9 * ----------------------------------------------------------------------- */
12 * Check for obligatory CPU features and abort if the features are not
13 * present. This code should be compilable as 16-, 32- or 64-bit
14 * code, so be very careful with types and inline assembly.
16 * This code should not contain any messages; that requires an
17 * additional wrapper.
19 * As written, this code is not safe for inclusion into the kernel
20 * proper (after FPU initialization, in particular).
23 #ifdef _SETUP
24 # include "boot.h"
25 # include "bitops.h"
26 #endif
27 #include <linux/types.h>
28 #include <asm/cpufeature.h>
29 #include <asm/processor-flags.h>
30 #include <asm/required-features.h>
31 #include <asm/msr-index.h>
33 struct cpu_features {
34 int level; /* Family, or 64 for x86-64 */
35 int model;
36 u32 flags[NCAPINTS];
39 static struct cpu_features cpu;
40 static u32 cpu_vendor[3];
41 static u32 err_flags[NCAPINTS];
43 static const int req_level = CONFIG_X86_MINIMUM_CPU_FAMILY;
45 static const u32 req_flags[NCAPINTS] =
47 REQUIRED_MASK0,
48 REQUIRED_MASK1,
49 REQUIRED_MASK2,
50 REQUIRED_MASK3,
51 REQUIRED_MASK4,
52 REQUIRED_MASK5,
53 REQUIRED_MASK6,
54 REQUIRED_MASK7,
57 #define A32(a, b, c, d) (((d) << 24)+((c) << 16)+((b) << 8)+(a))
59 static int is_amd(void)
61 return cpu_vendor[0] == A32('A', 'u', 't', 'h') &&
62 cpu_vendor[1] == A32('e', 'n', 't', 'i') &&
63 cpu_vendor[2] == A32('c', 'A', 'M', 'D');
66 static int is_centaur(void)
68 return cpu_vendor[0] == A32('C', 'e', 'n', 't') &&
69 cpu_vendor[1] == A32('a', 'u', 'r', 'H') &&
70 cpu_vendor[2] == A32('a', 'u', 'l', 's');
73 static int is_transmeta(void)
75 return cpu_vendor[0] == A32('G', 'e', 'n', 'u') &&
76 cpu_vendor[1] == A32('i', 'n', 'e', 'T') &&
77 cpu_vendor[2] == A32('M', 'x', '8', '6');
80 static int has_fpu(void)
82 u16 fcw = -1, fsw = -1;
83 u32 cr0;
85 asm("movl %%cr0,%0" : "=r" (cr0));
86 if (cr0 & (X86_CR0_EM|X86_CR0_TS)) {
87 cr0 &= ~(X86_CR0_EM|X86_CR0_TS);
88 asm volatile("movl %0,%%cr0" : : "r" (cr0));
91 asm volatile("fninit ; fnstsw %0 ; fnstcw %1"
92 : "+m" (fsw), "+m" (fcw));
94 return fsw == 0 && (fcw & 0x103f) == 0x003f;
97 static int has_eflag(u32 mask)
99 u32 f0, f1;
101 asm("pushfl ; "
102 "pushfl ; "
103 "popl %0 ; "
104 "movl %0,%1 ; "
105 "xorl %2,%1 ; "
106 "pushl %1 ; "
107 "popfl ; "
108 "pushfl ; "
109 "popl %1 ; "
110 "popfl"
111 : "=&r" (f0), "=&r" (f1)
112 : "ri" (mask));
114 return !!((f0^f1) & mask);
117 static void get_flags(void)
119 u32 max_intel_level, max_amd_level;
120 u32 tfms;
122 if (has_fpu())
123 set_bit(X86_FEATURE_FPU, cpu.flags);
125 if (has_eflag(X86_EFLAGS_ID)) {
126 asm("cpuid"
127 : "=a" (max_intel_level),
128 "=b" (cpu_vendor[0]),
129 "=d" (cpu_vendor[1]),
130 "=c" (cpu_vendor[2])
131 : "a" (0));
133 if (max_intel_level >= 0x00000001 &&
134 max_intel_level <= 0x0000ffff) {
135 asm("cpuid"
136 : "=a" (tfms),
137 "=c" (cpu.flags[4]),
138 "=d" (cpu.flags[0])
139 : "a" (0x00000001)
140 : "ebx");
141 cpu.level = (tfms >> 8) & 15;
142 cpu.model = (tfms >> 4) & 15;
143 if (cpu.level >= 6)
144 cpu.model += ((tfms >> 16) & 0xf) << 4;
147 asm("cpuid"
148 : "=a" (max_amd_level)
149 : "a" (0x80000000)
150 : "ebx", "ecx", "edx");
152 if (max_amd_level >= 0x80000001 &&
153 max_amd_level <= 0x8000ffff) {
154 u32 eax = 0x80000001;
155 asm("cpuid"
156 : "+a" (eax),
157 "=c" (cpu.flags[6]),
158 "=d" (cpu.flags[1])
159 : : "ebx");
164 /* Returns a bitmask of which words we have error bits in */
165 static int check_flags(void)
167 u32 err;
168 int i;
170 err = 0;
171 for (i = 0; i < NCAPINTS; i++) {
172 err_flags[i] = req_flags[i] & ~cpu.flags[i];
173 if (err_flags[i])
174 err |= 1 << i;
177 return err;
181 * Returns -1 on error.
183 * *cpu_level is set to the current CPU level; *req_level to the required
184 * level. x86-64 is considered level 64 for this purpose.
186 * *err_flags_ptr is set to the flags error array if there are flags missing.
188 int check_cpu(int *cpu_level_ptr, int *req_level_ptr, u32 **err_flags_ptr)
190 int err;
192 memset(&cpu.flags, 0, sizeof cpu.flags);
193 cpu.level = 3;
195 if (has_eflag(X86_EFLAGS_AC))
196 cpu.level = 4;
198 get_flags();
199 err = check_flags();
201 if (test_bit(X86_FEATURE_LM, cpu.flags))
202 cpu.level = 64;
204 if (err == 0x01 &&
205 !(err_flags[0] &
206 ~((1 << X86_FEATURE_XMM)|(1 << X86_FEATURE_XMM2))) &&
207 is_amd()) {
208 /* If this is an AMD and we're only missing SSE+SSE2, try to
209 turn them on */
211 u32 ecx = MSR_K7_HWCR;
212 u32 eax, edx;
214 asm("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
215 eax &= ~(1 << 15);
216 asm("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
218 get_flags(); /* Make sure it really did something */
219 err = check_flags();
220 } else if (err == 0x01 &&
221 !(err_flags[0] & ~(1 << X86_FEATURE_CX8)) &&
222 is_centaur() && cpu.model >= 6) {
223 /* If this is a VIA C3, we might have to enable CX8
224 explicitly */
226 u32 ecx = MSR_VIA_FCR;
227 u32 eax, edx;
229 asm("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
230 eax |= (1<<1)|(1<<7);
231 asm("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
233 set_bit(X86_FEATURE_CX8, cpu.flags);
234 err = check_flags();
235 } else if (err == 0x01 && is_transmeta()) {
236 /* Transmeta might have masked feature bits in word 0 */
238 u32 ecx = 0x80860004;
239 u32 eax, edx;
240 u32 level = 1;
242 asm("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
243 asm("wrmsr" : : "a" (~0), "d" (edx), "c" (ecx));
244 asm("cpuid"
245 : "+a" (level), "=d" (cpu.flags[0])
246 : : "ecx", "ebx");
247 asm("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
249 err = check_flags();
252 if (err_flags_ptr)
253 *err_flags_ptr = err ? err_flags : NULL;
254 if (cpu_level_ptr)
255 *cpu_level_ptr = cpu.level;
256 if (req_level_ptr)
257 *req_level_ptr = req_level;
259 return (cpu.level < req_level || err) ? -1 : 0;