1 #include <linux/init.h>
2 #include <linux/bitops.h>
6 #include <asm/processor.h>
11 # include <asm/numa_64.h>
12 # include <asm/mmconfig.h>
13 # include <asm/cacheflush.h>
20 * B step AMD K6 before B 9730xxxx have hardware bugs that can cause
21 * misexecution of code under Linux. Owners of such processors should
22 * contact AMD for precise details and a CPU swap.
24 * See http://www.multimania.com/poulot/k6bug.html
25 * http://www.amd.com/K6/k6docs/revgd.html
27 * The following test is erm.. interesting. AMD neglected to up
28 * the chip setting when fixing the bug but they also tweaked some
29 * performance at the same time..
32 extern void vide(void);
33 __asm__(".align 4\nvide: ret");
35 static void __cpuinit
init_amd_k5(struct cpuinfo_x86
*c
)
38 * General Systems BIOSen alias the cpu frequency registers
39 * of the Elan at 0x000df000. Unfortuantly, one of the Linux
40 * drivers subsequently pokes it, and changes the CPU speed.
41 * Workaround : Remove the unneeded alias.
43 #define CBAR (0xfffc) /* Configuration Base Address (32-bit) */
44 #define CBAR_ENB (0x80000000)
45 #define CBAR_KEY (0X000000CB)
46 if (c
->x86_model
== 9 || c
->x86_model
== 10) {
47 if (inl (CBAR
) & CBAR_ENB
)
48 outl (0 | CBAR_KEY
, CBAR
);
53 static void __cpuinit
init_amd_k6(struct cpuinfo_x86
*c
)
56 int mbytes
= num_physpages
>> (20-PAGE_SHIFT
);
58 if (c
->x86_model
< 6) {
59 /* Based on AMD doc 20734R - June 2000 */
60 if (c
->x86_model
== 0) {
61 clear_cpu_cap(c
, X86_FEATURE_APIC
);
62 set_cpu_cap(c
, X86_FEATURE_PGE
);
67 if (c
->x86_model
== 6 && c
->x86_mask
== 1) {
68 const int K6_BUG_LOOP
= 1000000;
73 printk(KERN_INFO
"AMD K6 stepping B detected - ");
76 * It looks like AMD fixed the 2.6.2 bug and improved indirect
77 * calls at the same time.
88 if (d
> 20*K6_BUG_LOOP
)
89 printk("system stability may be impaired when more than 32 MB are used.\n");
91 printk("probably OK (after B9730xxxx).\n");
92 printk(KERN_INFO
"Please see http://membres.lycos.fr/poulot/k6bug.html\n");
95 /* K6 with old style WHCR */
96 if (c
->x86_model
< 8 ||
97 (c
->x86_model
== 8 && c
->x86_mask
< 8)) {
98 /* We can only write allocate on the low 508Mb */
102 rdmsr(MSR_K6_WHCR
, l
, h
);
103 if ((l
&0x0000FFFF) == 0) {
105 l
= (1<<0)|((mbytes
/4)<<1);
106 local_irq_save(flags
);
108 wrmsr(MSR_K6_WHCR
, l
, h
);
109 local_irq_restore(flags
);
110 printk(KERN_INFO
"Enabling old style K6 write allocation for %d Mb\n",
116 if ((c
->x86_model
== 8 && c
->x86_mask
> 7) ||
117 c
->x86_model
== 9 || c
->x86_model
== 13) {
118 /* The more serious chips .. */
123 rdmsr(MSR_K6_WHCR
, l
, h
);
124 if ((l
&0xFFFF0000) == 0) {
126 l
= ((mbytes
>>2)<<22)|(1<<16);
127 local_irq_save(flags
);
129 wrmsr(MSR_K6_WHCR
, l
, h
);
130 local_irq_restore(flags
);
131 printk(KERN_INFO
"Enabling new style K6 write allocation for %d Mb\n",
138 if (c
->x86_model
== 10) {
139 /* AMD Geode LX is model 10 */
140 /* placeholder for any needed mods */
145 static void __cpuinit
amd_k7_smp_check(struct cpuinfo_x86
*c
)
148 /* calling is from identify_secondary_cpu() ? */
149 if (c
->cpu_index
== boot_cpu_id
)
153 * Certain Athlons might work (for various values of 'work') in SMP
154 * but they are not certified as MP capable.
156 /* Athlon 660/661 is valid. */
157 if ((c
->x86_model
== 6) && ((c
->x86_mask
== 0) ||
161 /* Duron 670 is valid */
162 if ((c
->x86_model
== 7) && (c
->x86_mask
== 0))
166 * Athlon 662, Duron 671, and Athlon >model 7 have capability
167 * bit. It's worth noting that the A5 stepping (662) of some
168 * Athlon XP's have the MP bit set.
169 * See http://www.heise.de/newsticker/data/jow-18.10.01-000 for
172 if (((c
->x86_model
== 6) && (c
->x86_mask
>= 2)) ||
173 ((c
->x86_model
== 7) && (c
->x86_mask
>= 1)) ||
178 /* If we get here, not a certified SMP capable AMD system. */
181 * Don't taint if we are running SMP kernel on a single non-MP
184 WARN_ONCE(1, "WARNING: This combination of AMD"
185 "processors is not suitable for SMP.\n");
186 if (!test_taint(TAINT_UNSAFE_SMP
))
187 add_taint(TAINT_UNSAFE_SMP
);
194 static void __cpuinit
init_amd_k7(struct cpuinfo_x86
*c
)
199 * Bit 15 of Athlon specific MSR 15, needs to be 0
200 * to enable SSE on Palomino/Morgan/Barton CPU's.
201 * If the BIOS didn't enable it already, enable it here.
203 if (c
->x86_model
>= 6 && c
->x86_model
<= 10) {
204 if (!cpu_has(c
, X86_FEATURE_XMM
)) {
205 printk(KERN_INFO
"Enabling disabled K7/SSE Support.\n");
206 rdmsr(MSR_K7_HWCR
, l
, h
);
208 wrmsr(MSR_K7_HWCR
, l
, h
);
209 set_cpu_cap(c
, X86_FEATURE_XMM
);
214 * It's been determined by AMD that Athlons since model 8 stepping 1
215 * are more robust with CLK_CTL set to 200xxxxx instead of 600xxxxx
216 * As per AMD technical note 27212 0.2
218 if ((c
->x86_model
== 8 && c
->x86_mask
>= 1) || (c
->x86_model
> 8)) {
219 rdmsr(MSR_K7_CLK_CTL
, l
, h
);
220 if ((l
& 0xfff00000) != 0x20000000) {
221 printk ("CPU: CLK_CTL MSR was %x. Reprogramming to %x\n", l
,
222 ((l
& 0x000fffff)|0x20000000));
223 wrmsr(MSR_K7_CLK_CTL
, (l
& 0x000fffff)|0x20000000, h
);
227 set_cpu_cap(c
, X86_FEATURE_K7
);
233 #if defined(CONFIG_NUMA) && defined(CONFIG_X86_64)
234 static int __cpuinit
nearby_node(int apicid
)
238 for (i
= apicid
- 1; i
>= 0; i
--) {
239 node
= apicid_to_node
[i
];
240 if (node
!= NUMA_NO_NODE
&& node_online(node
))
243 for (i
= apicid
+ 1; i
< MAX_LOCAL_APIC
; i
++) {
244 node
= apicid_to_node
[i
];
245 if (node
!= NUMA_NO_NODE
&& node_online(node
))
248 return first_node(node_online_map
); /* Shouldn't happen */
253 * On a AMD dual core setup the lower bits of the APIC id distingush the cores.
254 * Assumes number of cores is a power of two.
256 static void __cpuinit
amd_detect_cmp(struct cpuinfo_x86
*c
)
260 int cpu
= smp_processor_id();
262 bits
= c
->x86_coreid_bits
;
263 /* Low order bits define the core id (index of core in socket) */
264 c
->cpu_core_id
= c
->initial_apicid
& ((1 << bits
)-1);
265 /* Convert the initial APIC ID into the socket ID */
266 c
->phys_proc_id
= c
->initial_apicid
>> bits
;
267 /* use socket ID also for last level cache */
268 per_cpu(cpu_llc_id
, cpu
) = c
->phys_proc_id
;
272 static void __cpuinit
srat_detect_node(struct cpuinfo_x86
*c
)
274 #if defined(CONFIG_NUMA) && defined(CONFIG_X86_64)
275 int cpu
= smp_processor_id();
277 unsigned apicid
= hard_smp_processor_id();
279 node
= c
->phys_proc_id
;
280 if (apicid_to_node
[apicid
] != NUMA_NO_NODE
)
281 node
= apicid_to_node
[apicid
];
282 if (!node_online(node
)) {
283 /* Two possibilities here:
284 - The CPU is missing memory and no node was created.
285 In that case try picking one from a nearby CPU
286 - The APIC IDs differ from the HyperTransport node IDs
287 which the K8 northbridge parsing fills in.
288 Assume they are all increased by a constant offset,
289 but in the same order as the HT nodeids.
290 If that doesn't result in a usable node fall back to the
291 path for the previous case. */
293 int ht_nodeid
= c
->initial_apicid
;
295 if (ht_nodeid
>= 0 &&
296 apicid_to_node
[ht_nodeid
] != NUMA_NO_NODE
)
297 node
= apicid_to_node
[ht_nodeid
];
298 /* Pick a nearby node */
299 if (!node_online(node
))
300 node
= nearby_node(apicid
);
302 numa_set_node(cpu
, node
);
304 printk(KERN_INFO
"CPU %d/0x%x -> Node %d\n", cpu
, apicid
, node
);
308 static void __cpuinit
early_init_amd_mc(struct cpuinfo_x86
*c
)
313 /* Multi core CPU? */
314 if (c
->extended_cpuid_level
< 0x80000008)
317 ecx
= cpuid_ecx(0x80000008);
319 c
->x86_max_cores
= (ecx
& 0xff) + 1;
321 /* CPU telling us the core id bits shift? */
322 bits
= (ecx
>> 12) & 0xF;
324 /* Otherwise recompute */
326 while ((1 << bits
) < c
->x86_max_cores
)
330 c
->x86_coreid_bits
= bits
;
334 static void __cpuinit
early_init_amd(struct cpuinfo_x86
*c
)
336 early_init_amd_mc(c
);
339 * c->x86_power is 8000_0007 edx. Bit 8 is TSC runs at constant rate
340 * with P/T states and does not stop in deep C-states
342 if (c
->x86_power
& (1 << 8)) {
343 set_cpu_cap(c
, X86_FEATURE_CONSTANT_TSC
);
344 set_cpu_cap(c
, X86_FEATURE_NONSTOP_TSC
);
348 set_cpu_cap(c
, X86_FEATURE_SYSCALL32
);
350 /* Set MTRR capability flag if appropriate */
352 if (c
->x86_model
== 13 || c
->x86_model
== 9 ||
353 (c
->x86_model
== 8 && c
->x86_mask
>= 8))
354 set_cpu_cap(c
, X86_FEATURE_K6_MTRR
);
358 static void __cpuinit
init_amd(struct cpuinfo_x86
*c
)
361 unsigned long long value
;
364 * Disable TLB flush filter by setting HWCR.FFDIS on K8
365 * bit 6 of msr C001_0015
367 * Errata 63 for SH-B3 steppings
368 * Errata 122 for all steppings (F+ have it disabled by default)
371 rdmsrl(MSR_K7_HWCR
, value
);
373 wrmsrl(MSR_K7_HWCR
, value
);
380 * Bit 31 in normal CPUID used for nonstandard 3DNow ID;
381 * 3DNow is IDd by bit 31 in extended CPUID (1*32+31) anyway
383 clear_cpu_cap(c
, 0*32+31);
386 /* On C+ stepping K8 rep microcode works well for copy/memset */
390 level
= cpuid_eax(1);
391 if((level
>= 0x0f48 && level
< 0x0f50) || level
>= 0x0f58)
392 set_cpu_cap(c
, X86_FEATURE_REP_GOOD
);
394 if (c
->x86
== 0x10 || c
->x86
== 0x11)
395 set_cpu_cap(c
, X86_FEATURE_REP_GOOD
);
399 * FIXME: We should handle the K5 here. Set up the write
400 * range and also turn on MSR 83 bits 4 and 31 (write alloc,
411 case 6: /* An Athlon/Duron */
416 /* K6s reports MCEs but don't actually have all the MSRs */
418 clear_cpu_cap(c
, X86_FEATURE_MCE
);
421 /* Enable workaround for FXSAVE leak */
423 set_cpu_cap(c
, X86_FEATURE_FXSAVE_LEAK
);
425 /* Enable Performance counter for K7 and later */
426 if (c
->x86
> 6 && c
->x86
<= 0x11)
427 set_cpu_cap(c
, X86_FEATURE_ARCH_PERFMON
);
429 if (!c
->x86_model_id
[0]) {
432 /* Should distinguish Models here, but this is only
433 a fallback anyways. */
434 strcpy(c
->x86_model_id
, "Hammer");
439 display_cacheinfo(c
);
441 /* Multi core CPU? */
442 if (c
->extended_cpuid_level
>= 0x80000008) {
451 if (c
->extended_cpuid_level
>= 0x80000006) {
452 if ((c
->x86
>= 0x0f) && (cpuid_edx(0x80000006) & 0xf000))
453 num_cache_leaves
= 4;
455 num_cache_leaves
= 3;
458 if (c
->x86
>= 0xf && c
->x86
<= 0x11)
459 set_cpu_cap(c
, X86_FEATURE_K8
);
462 /* MFENCE stops RDTSC speculation */
463 set_cpu_cap(c
, X86_FEATURE_MFENCE_RDTSC
);
467 if (c
->x86
== 0x10) {
468 /* do this for boot cpu */
469 if (c
== &boot_cpu_data
)
470 check_enable_amd_mmconf_dmi();
472 fam10h_check_enable_mmcfg();
475 if (c
== &boot_cpu_data
&& c
->x86
>= 0xf && c
->x86
<= 0x11) {
476 unsigned long long tseg
;
479 * Split up direct mapping around the TSEG SMM area.
480 * Don't do it for gbpages because there seems very little
481 * benefit in doing so.
483 if (!rdmsrl_safe(MSR_K8_TSEG_ADDR
, &tseg
)) {
484 printk(KERN_DEBUG
"tseg: %010llx\n", tseg
);
485 if ((tseg
>>PMD_SHIFT
) <
486 (max_low_pfn_mapped
>>(PMD_SHIFT
-PAGE_SHIFT
)) ||
488 (max_pfn_mapped
>>(PMD_SHIFT
-PAGE_SHIFT
)) &&
489 (tseg
>>PMD_SHIFT
) >= (1ULL<<(32 - PMD_SHIFT
))))
490 set_memory_4k((unsigned long)__va(tseg
), 1);
497 static unsigned int __cpuinit
amd_size_cache(struct cpuinfo_x86
*c
, unsigned int size
)
499 /* AMD errata T13 (order #21922) */
501 if (c
->x86_model
== 3 && c
->x86_mask
== 0) /* Duron Rev A0 */
503 if (c
->x86_model
== 4 &&
504 (c
->x86_mask
== 0 || c
->x86_mask
== 1)) /* Tbird rev A1/A2 */
511 static const struct cpu_dev __cpuinitconst amd_cpu_dev
= {
513 .c_ident
= { "AuthenticAMD" },
516 { .vendor
= X86_VENDOR_AMD
, .family
= 4, .model_names
=
527 .c_size_cache
= amd_size_cache
,
529 .c_early_init
= early_init_amd
,
531 .c_x86_vendor
= X86_VENDOR_AMD
,
534 cpu_dev_register(amd_cpu_dev
);