1 #include <linux/init.h>
2 #include <linux/bitops.h>
5 #include <asm/processor.h>
10 * B step AMD K6 before B 9730xxxx have hardware bugs that can cause
11 * misexecution of code under Linux. Owners of such processors should
12 * contact AMD for precise details and a CPU swap.
14 * See http://www.multimania.com/poulot/k6bug.html
15 * http://www.amd.com/K6/k6docs/revgd.html
17 * The following test is erm.. interesting. AMD neglected to up
18 * the chip setting when fixing the bug but they also tweaked some
19 * performance at the same time..
22 extern void vide(void);
23 __asm__(".align 4\nvide: ret");
25 static void __init
init_amd(struct cpuinfo_x86
*c
)
28 int mbytes
= num_physpages
>> (20-PAGE_SHIFT
);
32 unsigned long long value
;
34 /* Disable TLB flush filter by setting HWCR.FFDIS on K8
35 * bit 6 of msr C001_0015
37 * Errata 63 for SH-B3 steppings
38 * Errata 122 for all steppings (F+ have it disabled by default)
41 rdmsrl(MSR_K7_HWCR
, value
);
43 wrmsrl(MSR_K7_HWCR
, value
);
48 * FIXME: We should handle the K5 here. Set up the write
49 * range and also turn on MSR 83 bits 4 and 31 (write alloc,
53 /* Bit 31 in normal CPUID used for nonstandard 3DNow ID;
54 3DNow is IDd by bit 31 in extended CPUID (1*32+31) anyway */
55 clear_bit(0*32+31, c
->x86_capability
);
57 r
= get_model_name(c
);
63 * General Systems BIOSen alias the cpu frequency registers
64 * of the Elan at 0x000df000. Unfortuantly, one of the Linux
65 * drivers subsequently pokes it, and changes the CPU speed.
66 * Workaround : Remove the unneeded alias.
68 #define CBAR (0xfffc) /* Configuration Base Address (32-bit) */
69 #define CBAR_ENB (0x80000000)
70 #define CBAR_KEY (0X000000CB)
71 if (c
->x86_model
==9 || c
->x86_model
== 10) {
72 if (inl (CBAR
) & CBAR_ENB
)
73 outl (0 | CBAR_KEY
, CBAR
);
77 if( c
->x86_model
< 6 )
79 /* Based on AMD doc 20734R - June 2000 */
80 if ( c
->x86_model
== 0 ) {
81 clear_bit(X86_FEATURE_APIC
, c
->x86_capability
);
82 set_bit(X86_FEATURE_PGE
, c
->x86_capability
);
87 if ( c
->x86_model
== 6 && c
->x86_mask
== 1 ) {
88 const int K6_BUG_LOOP
= 1000000;
93 printk(KERN_INFO
"AMD K6 stepping B detected - ");
96 * It looks like AMD fixed the 2.6.2 bug and improved indirect
97 * calls at the same time.
108 /* Knock these two lines out if it debugs out ok */
109 printk(KERN_INFO
"AMD K6 stepping B detected - ");
111 if (d
> 20*K6_BUG_LOOP
)
112 printk("system stability may be impaired when more than 32 MB are used.\n");
114 printk("probably OK (after B9730xxxx).\n");
115 printk(KERN_INFO
"Please see http://membres.lycos.fr/poulot/k6bug.html\n");
118 /* K6 with old style WHCR */
119 if (c
->x86_model
< 8 ||
120 (c
->x86_model
== 8 && c
->x86_mask
< 8)) {
121 /* We can only write allocate on the low 508Mb */
125 rdmsr(MSR_K6_WHCR
, l
, h
);
126 if ((l
&0x0000FFFF)==0) {
128 l
=(1<<0)|((mbytes
/4)<<1);
129 local_irq_save(flags
);
131 wrmsr(MSR_K6_WHCR
, l
, h
);
132 local_irq_restore(flags
);
133 printk(KERN_INFO
"Enabling old style K6 write allocation for %d Mb\n",
139 if ((c
->x86_model
== 8 && c
->x86_mask
>7) ||
140 c
->x86_model
== 9 || c
->x86_model
== 13) {
141 /* The more serious chips .. */
146 rdmsr(MSR_K6_WHCR
, l
, h
);
147 if ((l
&0xFFFF0000)==0) {
149 l
=((mbytes
>>2)<<22)|(1<<16);
150 local_irq_save(flags
);
152 wrmsr(MSR_K6_WHCR
, l
, h
);
153 local_irq_restore(flags
);
154 printk(KERN_INFO
"Enabling new style K6 write allocation for %d Mb\n",
158 /* Set MTRR capability flag if appropriate */
159 if (c
->x86_model
== 13 || c
->x86_model
== 9 ||
160 (c
->x86_model
== 8 && c
->x86_mask
>= 8))
161 set_bit(X86_FEATURE_K6_MTRR
, c
->x86_capability
);
165 if (c
->x86_model
== 10) {
166 /* AMD Geode LX is model 10 */
167 /* placeholder for any needed mods */
171 case 6: /* An Athlon/Duron */
173 /* Bit 15 of Athlon specific MSR 15, needs to be 0
174 * to enable SSE on Palomino/Morgan/Barton CPU's.
175 * If the BIOS didn't enable it already, enable it here.
177 if (c
->x86_model
>= 6 && c
->x86_model
<= 10) {
178 if (!cpu_has(c
, X86_FEATURE_XMM
)) {
179 printk(KERN_INFO
"Enabling disabled K7/SSE Support.\n");
180 rdmsr(MSR_K7_HWCR
, l
, h
);
182 wrmsr(MSR_K7_HWCR
, l
, h
);
183 set_bit(X86_FEATURE_XMM
, c
->x86_capability
);
187 /* It's been determined by AMD that Athlons since model 8 stepping 1
188 * are more robust with CLK_CTL set to 200xxxxx instead of 600xxxxx
189 * As per AMD technical note 27212 0.2
191 if ((c
->x86_model
== 8 && c
->x86_mask
>=1) || (c
->x86_model
> 8)) {
192 rdmsr(MSR_K7_CLK_CTL
, l
, h
);
193 if ((l
& 0xfff00000) != 0x20000000) {
194 printk ("CPU: CLK_CTL MSR was %x. Reprogramming to %x\n", l
,
195 ((l
& 0x000fffff)|0x20000000));
196 wrmsr(MSR_K7_CLK_CTL
, (l
& 0x000fffff)|0x20000000, h
);
204 set_bit(X86_FEATURE_K8
, c
->x86_capability
);
207 set_bit(X86_FEATURE_K7
, c
->x86_capability
);
211 set_bit(X86_FEATURE_FXSAVE_LEAK
, c
->x86_capability
);
213 display_cacheinfo(c
);
215 if (cpuid_eax(0x80000000) >= 0x80000008) {
216 c
->x86_max_cores
= (cpuid_ecx(0x80000008) & 0xff) + 1;
219 if (cpuid_eax(0x80000000) >= 0x80000007) {
220 c
->x86_power
= cpuid_edx(0x80000007);
221 if (c
->x86_power
& (1<<8))
222 set_bit(X86_FEATURE_CONSTANT_TSC
, c
->x86_capability
);
227 * On a AMD multi core setup the lower bits of the APIC id
228 * distingush the cores.
230 if (c
->x86_max_cores
> 1) {
231 int cpu
= smp_processor_id();
232 unsigned bits
= (cpuid_ecx(0x80000008) >> 12) & 0xf;
235 while ((1 << bits
) < c
->x86_max_cores
)
238 c
->cpu_core_id
= c
->phys_proc_id
& ((1<<bits
)-1);
239 c
->phys_proc_id
>>= bits
;
240 printk(KERN_INFO
"CPU %d(%d) -> Core %d\n",
241 cpu
, c
->x86_max_cores
, c
->cpu_core_id
);
245 if (cpuid_eax(0x80000000) >= 0x80000006)
246 num_cache_leaves
= 3;
249 static unsigned int amd_size_cache(struct cpuinfo_x86
* c
, unsigned int size
)
251 /* AMD errata T13 (order #21922) */
253 if (c
->x86_model
== 3 && c
->x86_mask
== 0) /* Duron Rev A0 */
255 if (c
->x86_model
== 4 &&
256 (c
->x86_mask
==0 || c
->x86_mask
==1)) /* Tbird rev A1/A2 */
262 static struct cpu_dev amd_cpu_dev __initdata
= {
264 .c_ident
= { "AuthenticAMD" },
266 { .vendor
= X86_VENDOR_AMD
, .family
= 4, .model_names
=
278 .c_identify
= generic_identify
,
279 .c_size_cache
= amd_size_cache
,
282 int __init
amd_init_cpu(void)
284 cpu_devs
[X86_VENDOR_AMD
] = &amd_cpu_dev
;
288 //early_arch_initcall(amd_init_cpu);
290 static int __init
amd_exit_cpu(void)
292 cpu_devs
[X86_VENDOR_AMD
] = NULL
;
296 late_initcall(amd_exit_cpu
);