repo init
[linux-rt-nao.git] / arch / x86 / kernel / cpu / intel_cacheinfo.c
blob483eda96e102062b23f3e29820d911a9c7d6ab59
1 /*
2 * Routines to indentify caches on Intel CPU.
4 * Changes:
5 * Venkatesh Pallipadi : Adding cache identification through cpuid(4)
6 * Ashok Raj <ashok.raj@intel.com>: Work with CPU hotplug infrastructure.
7 * Andi Kleen / Andreas Herrmann : CPUID4 emulation on AMD.
8 */
10 #include <linux/init.h>
11 #include <linux/slab.h>
12 #include <linux/device.h>
13 #include <linux/compiler.h>
14 #include <linux/cpu.h>
15 #include <linux/sched.h>
16 #include <linux/pci.h>
18 #include <asm/processor.h>
19 #include <asm/smp.h>
21 #define LVL_1_INST 1
22 #define LVL_1_DATA 2
23 #define LVL_2 3
24 #define LVL_3 4
25 #define LVL_TRACE 5
27 struct _cache_table
29 unsigned char descriptor;
30 char cache_type;
31 short size;
34 /* all the cache descriptor types we care about (no TLB or trace cache entries) */
35 static const struct _cache_table __cpuinitconst cache_table[] =
37 { 0x06, LVL_1_INST, 8 }, /* 4-way set assoc, 32 byte line size */
38 { 0x08, LVL_1_INST, 16 }, /* 4-way set assoc, 32 byte line size */
39 { 0x09, LVL_1_INST, 32 }, /* 4-way set assoc, 64 byte line size */
40 { 0x0a, LVL_1_DATA, 8 }, /* 2 way set assoc, 32 byte line size */
41 { 0x0c, LVL_1_DATA, 16 }, /* 4-way set assoc, 32 byte line size */
42 { 0x0d, LVL_1_DATA, 16 }, /* 4-way set assoc, 64 byte line size */
43 { 0x21, LVL_2, 256 }, /* 8-way set assoc, 64 byte line size */
44 { 0x22, LVL_3, 512 }, /* 4-way set assoc, sectored cache, 64 byte line size */
45 { 0x23, LVL_3, 1024 }, /* 8-way set assoc, sectored cache, 64 byte line size */
46 { 0x25, LVL_3, 2048 }, /* 8-way set assoc, sectored cache, 64 byte line size */
47 { 0x29, LVL_3, 4096 }, /* 8-way set assoc, sectored cache, 64 byte line size */
48 { 0x2c, LVL_1_DATA, 32 }, /* 8-way set assoc, 64 byte line size */
49 { 0x30, LVL_1_INST, 32 }, /* 8-way set assoc, 64 byte line size */
50 { 0x39, LVL_2, 128 }, /* 4-way set assoc, sectored cache, 64 byte line size */
51 { 0x3a, LVL_2, 192 }, /* 6-way set assoc, sectored cache, 64 byte line size */
52 { 0x3b, LVL_2, 128 }, /* 2-way set assoc, sectored cache, 64 byte line size */
53 { 0x3c, LVL_2, 256 }, /* 4-way set assoc, sectored cache, 64 byte line size */
54 { 0x3d, LVL_2, 384 }, /* 6-way set assoc, sectored cache, 64 byte line size */
55 { 0x3e, LVL_2, 512 }, /* 4-way set assoc, sectored cache, 64 byte line size */
56 { 0x3f, LVL_2, 256 }, /* 2-way set assoc, 64 byte line size */
57 { 0x41, LVL_2, 128 }, /* 4-way set assoc, 32 byte line size */
58 { 0x42, LVL_2, 256 }, /* 4-way set assoc, 32 byte line size */
59 { 0x43, LVL_2, 512 }, /* 4-way set assoc, 32 byte line size */
60 { 0x44, LVL_2, 1024 }, /* 4-way set assoc, 32 byte line size */
61 { 0x45, LVL_2, 2048 }, /* 4-way set assoc, 32 byte line size */
62 { 0x46, LVL_3, 4096 }, /* 4-way set assoc, 64 byte line size */
63 { 0x47, LVL_3, 8192 }, /* 8-way set assoc, 64 byte line size */
64 { 0x49, LVL_3, 4096 }, /* 16-way set assoc, 64 byte line size */
65 { 0x4a, LVL_3, 6144 }, /* 12-way set assoc, 64 byte line size */
66 { 0x4b, LVL_3, 8192 }, /* 16-way set assoc, 64 byte line size */
67 { 0x4c, LVL_3, 12288 }, /* 12-way set assoc, 64 byte line size */
68 { 0x4d, LVL_3, 16384 }, /* 16-way set assoc, 64 byte line size */
69 { 0x4e, LVL_2, 6144 }, /* 24-way set assoc, 64 byte line size */
70 { 0x60, LVL_1_DATA, 16 }, /* 8-way set assoc, sectored cache, 64 byte line size */
71 { 0x66, LVL_1_DATA, 8 }, /* 4-way set assoc, sectored cache, 64 byte line size */
72 { 0x67, LVL_1_DATA, 16 }, /* 4-way set assoc, sectored cache, 64 byte line size */
73 { 0x68, LVL_1_DATA, 32 }, /* 4-way set assoc, sectored cache, 64 byte line size */
74 { 0x70, LVL_TRACE, 12 }, /* 8-way set assoc */
75 { 0x71, LVL_TRACE, 16 }, /* 8-way set assoc */
76 { 0x72, LVL_TRACE, 32 }, /* 8-way set assoc */
77 { 0x73, LVL_TRACE, 64 }, /* 8-way set assoc */
78 { 0x78, LVL_2, 1024 }, /* 4-way set assoc, 64 byte line size */
79 { 0x79, LVL_2, 128 }, /* 8-way set assoc, sectored cache, 64 byte line size */
80 { 0x7a, LVL_2, 256 }, /* 8-way set assoc, sectored cache, 64 byte line size */
81 { 0x7b, LVL_2, 512 }, /* 8-way set assoc, sectored cache, 64 byte line size */
82 { 0x7c, LVL_2, 1024 }, /* 8-way set assoc, sectored cache, 64 byte line size */
83 { 0x7d, LVL_2, 2048 }, /* 8-way set assoc, 64 byte line size */
84 { 0x7f, LVL_2, 512 }, /* 2-way set assoc, 64 byte line size */
85 { 0x82, LVL_2, 256 }, /* 8-way set assoc, 32 byte line size */
86 { 0x83, LVL_2, 512 }, /* 8-way set assoc, 32 byte line size */
87 { 0x84, LVL_2, 1024 }, /* 8-way set assoc, 32 byte line size */
88 { 0x85, LVL_2, 2048 }, /* 8-way set assoc, 32 byte line size */
89 { 0x86, LVL_2, 512 }, /* 4-way set assoc, 64 byte line size */
90 { 0x87, LVL_2, 1024 }, /* 8-way set assoc, 64 byte line size */
91 { 0xd0, LVL_3, 512 }, /* 4-way set assoc, 64 byte line size */
92 { 0xd1, LVL_3, 1024 }, /* 4-way set assoc, 64 byte line size */
93 { 0xd2, LVL_3, 2048 }, /* 4-way set assoc, 64 byte line size */
94 { 0xd6, LVL_3, 1024 }, /* 8-way set assoc, 64 byte line size */
95 { 0xd7, LVL_3, 2038 }, /* 8-way set assoc, 64 byte line size */
96 { 0xd8, LVL_3, 4096 }, /* 12-way set assoc, 64 byte line size */
97 { 0xdc, LVL_3, 2048 }, /* 12-way set assoc, 64 byte line size */
98 { 0xdd, LVL_3, 4096 }, /* 12-way set assoc, 64 byte line size */
99 { 0xde, LVL_3, 8192 }, /* 12-way set assoc, 64 byte line size */
100 { 0xe2, LVL_3, 2048 }, /* 16-way set assoc, 64 byte line size */
101 { 0xe3, LVL_3, 4096 }, /* 16-way set assoc, 64 byte line size */
102 { 0xe4, LVL_3, 8192 }, /* 16-way set assoc, 64 byte line size */
103 { 0x00, 0, 0}
107 enum _cache_type
109 CACHE_TYPE_NULL = 0,
110 CACHE_TYPE_DATA = 1,
111 CACHE_TYPE_INST = 2,
112 CACHE_TYPE_UNIFIED = 3
115 union _cpuid4_leaf_eax {
116 struct {
117 enum _cache_type type:5;
118 unsigned int level:3;
119 unsigned int is_self_initializing:1;
120 unsigned int is_fully_associative:1;
121 unsigned int reserved:4;
122 unsigned int num_threads_sharing:12;
123 unsigned int num_cores_on_die:6;
124 } split;
125 u32 full;
128 union _cpuid4_leaf_ebx {
129 struct {
130 unsigned int coherency_line_size:12;
131 unsigned int physical_line_partition:10;
132 unsigned int ways_of_associativity:10;
133 } split;
134 u32 full;
137 union _cpuid4_leaf_ecx {
138 struct {
139 unsigned int number_of_sets:32;
140 } split;
141 u32 full;
144 struct _cpuid4_info {
145 union _cpuid4_leaf_eax eax;
146 union _cpuid4_leaf_ebx ebx;
147 union _cpuid4_leaf_ecx ecx;
148 unsigned long size;
149 unsigned long can_disable;
150 DECLARE_BITMAP(shared_cpu_map, NR_CPUS);
153 /* subset of above _cpuid4_info w/o shared_cpu_map */
154 struct _cpuid4_info_regs {
155 union _cpuid4_leaf_eax eax;
156 union _cpuid4_leaf_ebx ebx;
157 union _cpuid4_leaf_ecx ecx;
158 unsigned long size;
159 unsigned long can_disable;
162 #if defined(CONFIG_PCI) && defined(CONFIG_SYSFS)
163 static struct pci_device_id k8_nb_id[] = {
164 { PCI_DEVICE(PCI_VENDOR_ID_AMD, 0x1103) },
165 { PCI_DEVICE(PCI_VENDOR_ID_AMD, 0x1203) },
168 #endif
170 unsigned short num_cache_leaves;
172 /* AMD doesn't have CPUID4. Emulate it here to report the same
173 information to the user. This makes some assumptions about the machine:
174 L2 not shared, no SMT etc. that is currently true on AMD CPUs.
176 In theory the TLBs could be reported as fake type (they are in "dummy").
177 Maybe later */
178 union l1_cache {
179 struct {
180 unsigned line_size : 8;
181 unsigned lines_per_tag : 8;
182 unsigned assoc : 8;
183 unsigned size_in_kb : 8;
185 unsigned val;
188 union l2_cache {
189 struct {
190 unsigned line_size : 8;
191 unsigned lines_per_tag : 4;
192 unsigned assoc : 4;
193 unsigned size_in_kb : 16;
195 unsigned val;
198 union l3_cache {
199 struct {
200 unsigned line_size : 8;
201 unsigned lines_per_tag : 4;
202 unsigned assoc : 4;
203 unsigned res : 2;
204 unsigned size_encoded : 14;
206 unsigned val;
209 static const unsigned short __cpuinitconst assocs[] = {
210 [1] = 1, [2] = 2, [4] = 4, [6] = 8,
211 [8] = 16, [0xa] = 32, [0xb] = 48,
212 [0xc] = 64,
213 [0xf] = 0xffff // ??
216 static const unsigned char __cpuinitconst levels[] = { 1, 1, 2, 3 };
217 static const unsigned char __cpuinitconst types[] = { 1, 2, 3, 3 };
219 static void __cpuinit
220 amd_cpuid4(int leaf, union _cpuid4_leaf_eax *eax,
221 union _cpuid4_leaf_ebx *ebx,
222 union _cpuid4_leaf_ecx *ecx)
224 unsigned dummy;
225 unsigned line_size, lines_per_tag, assoc, size_in_kb;
226 union l1_cache l1i, l1d;
227 union l2_cache l2;
228 union l3_cache l3;
229 union l1_cache *l1 = &l1d;
231 eax->full = 0;
232 ebx->full = 0;
233 ecx->full = 0;
235 cpuid(0x80000005, &dummy, &dummy, &l1d.val, &l1i.val);
236 cpuid(0x80000006, &dummy, &dummy, &l2.val, &l3.val);
238 switch (leaf) {
239 case 1:
240 l1 = &l1i;
241 case 0:
242 if (!l1->val)
243 return;
244 assoc = l1->assoc;
245 line_size = l1->line_size;
246 lines_per_tag = l1->lines_per_tag;
247 size_in_kb = l1->size_in_kb;
248 break;
249 case 2:
250 if (!l2.val)
251 return;
252 assoc = l2.assoc;
253 line_size = l2.line_size;
254 lines_per_tag = l2.lines_per_tag;
255 /* cpu_data has errata corrections for K7 applied */
256 size_in_kb = current_cpu_data.x86_cache_size;
257 break;
258 case 3:
259 if (!l3.val)
260 return;
261 assoc = l3.assoc;
262 line_size = l3.line_size;
263 lines_per_tag = l3.lines_per_tag;
264 size_in_kb = l3.size_encoded * 512;
265 break;
266 default:
267 return;
270 eax->split.is_self_initializing = 1;
271 eax->split.type = types[leaf];
272 eax->split.level = levels[leaf];
273 if (leaf == 3)
274 eax->split.num_threads_sharing = current_cpu_data.x86_max_cores - 1;
275 else
276 eax->split.num_threads_sharing = 0;
277 eax->split.num_cores_on_die = current_cpu_data.x86_max_cores - 1;
280 if (assoc == 0xf)
281 eax->split.is_fully_associative = 1;
282 ebx->split.coherency_line_size = line_size - 1;
283 ebx->split.ways_of_associativity = assocs[assoc] - 1;
284 ebx->split.physical_line_partition = lines_per_tag - 1;
285 ecx->split.number_of_sets = (size_in_kb * 1024) / line_size /
286 (ebx->split.ways_of_associativity + 1) - 1;
289 static void __cpuinit
290 amd_check_l3_disable(int index, struct _cpuid4_info_regs *this_leaf)
292 if (index < 3)
293 return;
294 this_leaf->can_disable = 1;
297 static int
298 __cpuinit cpuid4_cache_lookup_regs(int index,
299 struct _cpuid4_info_regs *this_leaf)
301 union _cpuid4_leaf_eax eax;
302 union _cpuid4_leaf_ebx ebx;
303 union _cpuid4_leaf_ecx ecx;
304 unsigned edx;
306 if (boot_cpu_data.x86_vendor == X86_VENDOR_AMD) {
307 amd_cpuid4(index, &eax, &ebx, &ecx);
308 if (boot_cpu_data.x86 >= 0x10)
309 amd_check_l3_disable(index, this_leaf);
310 } else {
311 cpuid_count(4, index, &eax.full, &ebx.full, &ecx.full, &edx);
314 if (eax.split.type == CACHE_TYPE_NULL)
315 return -EIO; /* better error ? */
317 this_leaf->eax = eax;
318 this_leaf->ebx = ebx;
319 this_leaf->ecx = ecx;
320 this_leaf->size = (ecx.split.number_of_sets + 1) *
321 (ebx.split.coherency_line_size + 1) *
322 (ebx.split.physical_line_partition + 1) *
323 (ebx.split.ways_of_associativity + 1);
324 return 0;
327 static int __cpuinit find_num_cache_leaves(void)
329 unsigned int eax, ebx, ecx, edx;
330 union _cpuid4_leaf_eax cache_eax;
331 int i = -1;
333 do {
334 ++i;
335 /* Do cpuid(4) loop to find out num_cache_leaves */
336 cpuid_count(4, i, &eax, &ebx, &ecx, &edx);
337 cache_eax.full = eax;
338 } while (cache_eax.split.type != CACHE_TYPE_NULL);
339 return i;
342 unsigned int __cpuinit init_intel_cacheinfo(struct cpuinfo_x86 *c)
344 unsigned int trace = 0, l1i = 0, l1d = 0, l2 = 0, l3 = 0; /* Cache sizes */
345 unsigned int new_l1d = 0, new_l1i = 0; /* Cache sizes from cpuid(4) */
346 unsigned int new_l2 = 0, new_l3 = 0, i; /* Cache sizes from cpuid(4) */
347 unsigned int l2_id = 0, l3_id = 0, num_threads_sharing, index_msb;
348 #ifdef CONFIG_X86_HT
349 unsigned int cpu = c->cpu_index;
350 #endif
352 if (c->cpuid_level > 3) {
353 static int is_initialized;
355 if (is_initialized == 0) {
356 /* Init num_cache_leaves from boot CPU */
357 num_cache_leaves = find_num_cache_leaves();
358 is_initialized++;
362 * Whenever possible use cpuid(4), deterministic cache
363 * parameters cpuid leaf to find the cache details
365 for (i = 0; i < num_cache_leaves; i++) {
366 struct _cpuid4_info_regs this_leaf;
367 int retval;
369 retval = cpuid4_cache_lookup_regs(i, &this_leaf);
370 if (retval >= 0) {
371 switch(this_leaf.eax.split.level) {
372 case 1:
373 if (this_leaf.eax.split.type ==
374 CACHE_TYPE_DATA)
375 new_l1d = this_leaf.size/1024;
376 else if (this_leaf.eax.split.type ==
377 CACHE_TYPE_INST)
378 new_l1i = this_leaf.size/1024;
379 break;
380 case 2:
381 new_l2 = this_leaf.size/1024;
382 num_threads_sharing = 1 + this_leaf.eax.split.num_threads_sharing;
383 index_msb = get_count_order(num_threads_sharing);
384 l2_id = c->apicid >> index_msb;
385 break;
386 case 3:
387 new_l3 = this_leaf.size/1024;
388 num_threads_sharing = 1 + this_leaf.eax.split.num_threads_sharing;
389 index_msb = get_count_order(num_threads_sharing);
390 l3_id = c->apicid >> index_msb;
391 break;
392 default:
393 break;
399 * Don't use cpuid2 if cpuid4 is supported. For P4, we use cpuid2 for
400 * trace cache
402 if ((num_cache_leaves == 0 || c->x86 == 15) && c->cpuid_level > 1) {
403 /* supports eax=2 call */
404 int j, n;
405 unsigned int regs[4];
406 unsigned char *dp = (unsigned char *)regs;
407 int only_trace = 0;
409 if (num_cache_leaves != 0 && c->x86 == 15)
410 only_trace = 1;
412 /* Number of times to iterate */
413 n = cpuid_eax(2) & 0xFF;
415 for ( i = 0 ; i < n ; i++ ) {
416 cpuid(2, &regs[0], &regs[1], &regs[2], &regs[3]);
418 /* If bit 31 is set, this is an unknown format */
419 for ( j = 0 ; j < 3 ; j++ ) {
420 if (regs[j] & (1 << 31)) regs[j] = 0;
423 /* Byte 0 is level count, not a descriptor */
424 for ( j = 1 ; j < 16 ; j++ ) {
425 unsigned char des = dp[j];
426 unsigned char k = 0;
428 /* look up this descriptor in the table */
429 while (cache_table[k].descriptor != 0)
431 if (cache_table[k].descriptor == des) {
432 if (only_trace && cache_table[k].cache_type != LVL_TRACE)
433 break;
434 switch (cache_table[k].cache_type) {
435 case LVL_1_INST:
436 l1i += cache_table[k].size;
437 break;
438 case LVL_1_DATA:
439 l1d += cache_table[k].size;
440 break;
441 case LVL_2:
442 l2 += cache_table[k].size;
443 break;
444 case LVL_3:
445 l3 += cache_table[k].size;
446 break;
447 case LVL_TRACE:
448 trace += cache_table[k].size;
449 break;
452 break;
455 k++;
461 if (new_l1d)
462 l1d = new_l1d;
464 if (new_l1i)
465 l1i = new_l1i;
467 if (new_l2) {
468 l2 = new_l2;
469 #ifdef CONFIG_X86_HT
470 per_cpu(cpu_llc_id, cpu) = l2_id;
471 #endif
474 if (new_l3) {
475 l3 = new_l3;
476 #ifdef CONFIG_X86_HT
477 per_cpu(cpu_llc_id, cpu) = l3_id;
478 #endif
481 if (trace)
482 printk (KERN_INFO "CPU: Trace cache: %dK uops", trace);
483 else if ( l1i )
484 printk (KERN_INFO "CPU: L1 I cache: %dK", l1i);
486 if (l1d)
487 printk(", L1 D cache: %dK\n", l1d);
488 else
489 printk("\n");
491 if (l2)
492 printk(KERN_INFO "CPU: L2 cache: %dK\n", l2);
494 if (l3)
495 printk(KERN_INFO "CPU: L3 cache: %dK\n", l3);
497 c->x86_cache_size = l3 ? l3 : (l2 ? l2 : (l1i+l1d));
499 return l2;
502 #ifdef CONFIG_SYSFS
504 /* pointer to _cpuid4_info array (for each cache leaf) */
505 static DEFINE_PER_CPU(struct _cpuid4_info *, cpuid4_info);
506 #define CPUID4_INFO_IDX(x, y) (&((per_cpu(cpuid4_info, x))[y]))
508 #ifdef CONFIG_SMP
509 static void __cpuinit cache_shared_cpu_map_setup(unsigned int cpu, int index)
511 struct _cpuid4_info *this_leaf, *sibling_leaf;
512 unsigned long num_threads_sharing;
513 int index_msb, i;
514 struct cpuinfo_x86 *c = &cpu_data(cpu);
516 this_leaf = CPUID4_INFO_IDX(cpu, index);
517 num_threads_sharing = 1 + this_leaf->eax.split.num_threads_sharing;
519 if (num_threads_sharing == 1)
520 cpumask_set_cpu(cpu, to_cpumask(this_leaf->shared_cpu_map));
521 else {
522 index_msb = get_count_order(num_threads_sharing);
524 for_each_online_cpu(i) {
525 if (cpu_data(i).apicid >> index_msb ==
526 c->apicid >> index_msb) {
527 cpumask_set_cpu(i,
528 to_cpumask(this_leaf->shared_cpu_map));
529 if (i != cpu && per_cpu(cpuid4_info, i)) {
530 sibling_leaf =
531 CPUID4_INFO_IDX(i, index);
532 cpumask_set_cpu(cpu, to_cpumask(
533 sibling_leaf->shared_cpu_map));
539 static void __cpuinit cache_remove_shared_cpu_map(unsigned int cpu, int index)
541 struct _cpuid4_info *this_leaf, *sibling_leaf;
542 int sibling;
544 this_leaf = CPUID4_INFO_IDX(cpu, index);
545 for_each_cpu(sibling, to_cpumask(this_leaf->shared_cpu_map)) {
546 sibling_leaf = CPUID4_INFO_IDX(sibling, index);
547 cpumask_clear_cpu(cpu,
548 to_cpumask(sibling_leaf->shared_cpu_map));
551 #else
552 static void __cpuinit cache_shared_cpu_map_setup(unsigned int cpu, int index) {}
553 static void __cpuinit cache_remove_shared_cpu_map(unsigned int cpu, int index) {}
554 #endif
556 static void __cpuinit free_cache_attributes(unsigned int cpu)
558 int i;
560 for (i = 0; i < num_cache_leaves; i++)
561 cache_remove_shared_cpu_map(cpu, i);
563 kfree(per_cpu(cpuid4_info, cpu));
564 per_cpu(cpuid4_info, cpu) = NULL;
567 static int
568 __cpuinit cpuid4_cache_lookup(int index, struct _cpuid4_info *this_leaf)
570 struct _cpuid4_info_regs *leaf_regs =
571 (struct _cpuid4_info_regs *)this_leaf;
573 return cpuid4_cache_lookup_regs(index, leaf_regs);
576 static void __cpuinit get_cpu_leaves(void *_retval)
578 int j, *retval = _retval, cpu = smp_processor_id();
580 /* Do cpuid and store the results */
581 for (j = 0; j < num_cache_leaves; j++) {
582 struct _cpuid4_info *this_leaf;
583 this_leaf = CPUID4_INFO_IDX(cpu, j);
584 *retval = cpuid4_cache_lookup(j, this_leaf);
585 if (unlikely(*retval < 0)) {
586 int i;
588 for (i = 0; i < j; i++)
589 cache_remove_shared_cpu_map(cpu, i);
590 break;
592 cache_shared_cpu_map_setup(cpu, j);
596 static int __cpuinit detect_cache_attributes(unsigned int cpu)
598 int retval;
600 if (num_cache_leaves == 0)
601 return -ENOENT;
603 per_cpu(cpuid4_info, cpu) = kzalloc(
604 sizeof(struct _cpuid4_info) * num_cache_leaves, GFP_KERNEL);
605 if (per_cpu(cpuid4_info, cpu) == NULL)
606 return -ENOMEM;
608 smp_call_function_single(cpu, get_cpu_leaves, &retval, true);
609 if (retval) {
610 kfree(per_cpu(cpuid4_info, cpu));
611 per_cpu(cpuid4_info, cpu) = NULL;
614 return retval;
617 #include <linux/kobject.h>
618 #include <linux/sysfs.h>
620 extern struct sysdev_class cpu_sysdev_class; /* from drivers/base/cpu.c */
622 /* pointer to kobject for cpuX/cache */
623 static DEFINE_PER_CPU(struct kobject *, cache_kobject);
625 struct _index_kobject {
626 struct kobject kobj;
627 unsigned int cpu;
628 unsigned short index;
631 /* pointer to array of kobjects for cpuX/cache/indexY */
632 static DEFINE_PER_CPU(struct _index_kobject *, index_kobject);
633 #define INDEX_KOBJECT_PTR(x, y) (&((per_cpu(index_kobject, x))[y]))
635 #define show_one_plus(file_name, object, val) \
636 static ssize_t show_##file_name \
637 (struct _cpuid4_info *this_leaf, char *buf) \
639 return sprintf (buf, "%lu\n", (unsigned long)this_leaf->object + val); \
642 show_one_plus(level, eax.split.level, 0);
643 show_one_plus(coherency_line_size, ebx.split.coherency_line_size, 1);
644 show_one_plus(physical_line_partition, ebx.split.physical_line_partition, 1);
645 show_one_plus(ways_of_associativity, ebx.split.ways_of_associativity, 1);
646 show_one_plus(number_of_sets, ecx.split.number_of_sets, 1);
648 static ssize_t show_size(struct _cpuid4_info *this_leaf, char *buf)
650 return sprintf (buf, "%luK\n", this_leaf->size / 1024);
653 static ssize_t show_shared_cpu_map_func(struct _cpuid4_info *this_leaf,
654 int type, char *buf)
656 ptrdiff_t len = PTR_ALIGN(buf + PAGE_SIZE - 1, PAGE_SIZE) - buf;
657 int n = 0;
659 if (len > 1) {
660 const struct cpumask *mask;
662 mask = to_cpumask(this_leaf->shared_cpu_map);
663 n = type?
664 cpulist_scnprintf(buf, len-2, mask) :
665 cpumask_scnprintf(buf, len-2, mask);
666 buf[n++] = '\n';
667 buf[n] = '\0';
669 return n;
672 static inline ssize_t show_shared_cpu_map(struct _cpuid4_info *leaf, char *buf)
674 return show_shared_cpu_map_func(leaf, 0, buf);
677 static inline ssize_t show_shared_cpu_list(struct _cpuid4_info *leaf, char *buf)
679 return show_shared_cpu_map_func(leaf, 1, buf);
682 static ssize_t show_type(struct _cpuid4_info *this_leaf, char *buf)
684 switch (this_leaf->eax.split.type) {
685 case CACHE_TYPE_DATA:
686 return sprintf(buf, "Data\n");
687 case CACHE_TYPE_INST:
688 return sprintf(buf, "Instruction\n");
689 case CACHE_TYPE_UNIFIED:
690 return sprintf(buf, "Unified\n");
691 default:
692 return sprintf(buf, "Unknown\n");
696 #define to_object(k) container_of(k, struct _index_kobject, kobj)
697 #define to_attr(a) container_of(a, struct _cache_attr, attr)
699 #ifdef CONFIG_PCI
700 static struct pci_dev *get_k8_northbridge(int node)
702 struct pci_dev *dev = NULL;
703 int i;
705 for (i = 0; i <= node; i++) {
706 do {
707 dev = pci_get_device(PCI_ANY_ID, PCI_ANY_ID, dev);
708 if (!dev)
709 break;
710 } while (!pci_match_id(&k8_nb_id[0], dev));
711 if (!dev)
712 break;
714 return dev;
716 #else
717 static struct pci_dev *get_k8_northbridge(int node)
719 return NULL;
721 #endif
723 static ssize_t show_cache_disable(struct _cpuid4_info *this_leaf, char *buf)
725 const struct cpumask *mask = to_cpumask(this_leaf->shared_cpu_map);
726 int node = cpu_to_node(cpumask_first(mask));
727 struct pci_dev *dev = NULL;
728 ssize_t ret = 0;
729 int i;
731 if (!this_leaf->can_disable)
732 return sprintf(buf, "Feature not enabled\n");
734 dev = get_k8_northbridge(node);
735 if (!dev) {
736 printk(KERN_ERR "Attempting AMD northbridge operation on a system with no northbridge\n");
737 return -EINVAL;
740 for (i = 0; i < 2; i++) {
741 unsigned int reg;
743 pci_read_config_dword(dev, 0x1BC + i * 4, &reg);
745 ret += sprintf(buf, "%sEntry: %d\n", buf, i);
746 ret += sprintf(buf, "%sReads: %s\tNew Entries: %s\n",
747 buf,
748 reg & 0x80000000 ? "Disabled" : "Allowed",
749 reg & 0x40000000 ? "Disabled" : "Allowed");
750 ret += sprintf(buf, "%sSubCache: %x\tIndex: %x\n",
751 buf, (reg & 0x30000) >> 16, reg & 0xfff);
753 return ret;
756 static ssize_t
757 store_cache_disable(struct _cpuid4_info *this_leaf, const char *buf,
758 size_t count)
760 const struct cpumask *mask = to_cpumask(this_leaf->shared_cpu_map);
761 int node = cpu_to_node(cpumask_first(mask));
762 struct pci_dev *dev = NULL;
763 unsigned int ret, index, val;
765 if (!this_leaf->can_disable)
766 return 0;
768 if (strlen(buf) > 15)
769 return -EINVAL;
771 ret = sscanf(buf, "%x %x", &index, &val);
772 if (ret != 2)
773 return -EINVAL;
774 if (index > 1)
775 return -EINVAL;
777 val |= 0xc0000000;
778 dev = get_k8_northbridge(node);
779 if (!dev) {
780 printk(KERN_ERR "Attempting AMD northbridge operation on a system with no northbridge\n");
781 return -EINVAL;
784 pci_write_config_dword(dev, 0x1BC + index * 4, val & ~0x40000000);
785 wbinvd();
786 pci_write_config_dword(dev, 0x1BC + index * 4, val);
788 return 1;
791 struct _cache_attr {
792 struct attribute attr;
793 ssize_t (*show)(struct _cpuid4_info *, char *);
794 ssize_t (*store)(struct _cpuid4_info *, const char *, size_t count);
797 #define define_one_ro(_name) \
798 static struct _cache_attr _name = \
799 __ATTR(_name, 0444, show_##_name, NULL)
801 define_one_ro(level);
802 define_one_ro(type);
803 define_one_ro(coherency_line_size);
804 define_one_ro(physical_line_partition);
805 define_one_ro(ways_of_associativity);
806 define_one_ro(number_of_sets);
807 define_one_ro(size);
808 define_one_ro(shared_cpu_map);
809 define_one_ro(shared_cpu_list);
811 static struct _cache_attr cache_disable = __ATTR(cache_disable, 0644, show_cache_disable, store_cache_disable);
813 static struct attribute * default_attrs[] = {
814 &type.attr,
815 &level.attr,
816 &coherency_line_size.attr,
817 &physical_line_partition.attr,
818 &ways_of_associativity.attr,
819 &number_of_sets.attr,
820 &size.attr,
821 &shared_cpu_map.attr,
822 &shared_cpu_list.attr,
823 &cache_disable.attr,
824 NULL
827 static ssize_t show(struct kobject * kobj, struct attribute * attr, char * buf)
829 struct _cache_attr *fattr = to_attr(attr);
830 struct _index_kobject *this_leaf = to_object(kobj);
831 ssize_t ret;
833 ret = fattr->show ?
834 fattr->show(CPUID4_INFO_IDX(this_leaf->cpu, this_leaf->index),
835 buf) :
837 return ret;
840 static ssize_t store(struct kobject * kobj, struct attribute * attr,
841 const char * buf, size_t count)
843 struct _cache_attr *fattr = to_attr(attr);
844 struct _index_kobject *this_leaf = to_object(kobj);
845 ssize_t ret;
847 ret = fattr->store ?
848 fattr->store(CPUID4_INFO_IDX(this_leaf->cpu, this_leaf->index),
849 buf, count) :
851 return ret;
854 static struct sysfs_ops sysfs_ops = {
855 .show = show,
856 .store = store,
859 static struct kobj_type ktype_cache = {
860 .sysfs_ops = &sysfs_ops,
861 .default_attrs = default_attrs,
864 static struct kobj_type ktype_percpu_entry = {
865 .sysfs_ops = &sysfs_ops,
868 static void __cpuinit cpuid4_cache_sysfs_exit(unsigned int cpu)
870 kfree(per_cpu(cache_kobject, cpu));
871 kfree(per_cpu(index_kobject, cpu));
872 per_cpu(cache_kobject, cpu) = NULL;
873 per_cpu(index_kobject, cpu) = NULL;
874 free_cache_attributes(cpu);
877 static int __cpuinit cpuid4_cache_sysfs_init(unsigned int cpu)
879 int err;
881 if (num_cache_leaves == 0)
882 return -ENOENT;
884 err = detect_cache_attributes(cpu);
885 if (err)
886 return err;
888 /* Allocate all required memory */
889 per_cpu(cache_kobject, cpu) =
890 kzalloc(sizeof(struct kobject), GFP_KERNEL);
891 if (unlikely(per_cpu(cache_kobject, cpu) == NULL))
892 goto err_out;
894 per_cpu(index_kobject, cpu) = kzalloc(
895 sizeof(struct _index_kobject ) * num_cache_leaves, GFP_KERNEL);
896 if (unlikely(per_cpu(index_kobject, cpu) == NULL))
897 goto err_out;
899 return 0;
901 err_out:
902 cpuid4_cache_sysfs_exit(cpu);
903 return -ENOMEM;
906 static DECLARE_BITMAP(cache_dev_map, NR_CPUS);
908 /* Add/Remove cache interface for CPU device */
909 static int __cpuinit cache_add_dev(struct sys_device * sys_dev)
911 unsigned int cpu = sys_dev->id;
912 unsigned long i, j;
913 struct _index_kobject *this_object;
914 int retval;
916 retval = cpuid4_cache_sysfs_init(cpu);
917 if (unlikely(retval < 0))
918 return retval;
920 retval = kobject_init_and_add(per_cpu(cache_kobject, cpu),
921 &ktype_percpu_entry,
922 &sys_dev->kobj, "%s", "cache");
923 if (retval < 0) {
924 cpuid4_cache_sysfs_exit(cpu);
925 return retval;
928 for (i = 0; i < num_cache_leaves; i++) {
929 this_object = INDEX_KOBJECT_PTR(cpu,i);
930 this_object->cpu = cpu;
931 this_object->index = i;
932 retval = kobject_init_and_add(&(this_object->kobj),
933 &ktype_cache,
934 per_cpu(cache_kobject, cpu),
935 "index%1lu", i);
936 if (unlikely(retval)) {
937 for (j = 0; j < i; j++) {
938 kobject_put(&(INDEX_KOBJECT_PTR(cpu,j)->kobj));
940 kobject_put(per_cpu(cache_kobject, cpu));
941 cpuid4_cache_sysfs_exit(cpu);
942 return retval;
944 kobject_uevent(&(this_object->kobj), KOBJ_ADD);
946 cpumask_set_cpu(cpu, to_cpumask(cache_dev_map));
948 kobject_uevent(per_cpu(cache_kobject, cpu), KOBJ_ADD);
949 return 0;
952 static void __cpuinit cache_remove_dev(struct sys_device * sys_dev)
954 unsigned int cpu = sys_dev->id;
955 unsigned long i;
957 if (per_cpu(cpuid4_info, cpu) == NULL)
958 return;
959 if (!cpumask_test_cpu(cpu, to_cpumask(cache_dev_map)))
960 return;
961 cpumask_clear_cpu(cpu, to_cpumask(cache_dev_map));
963 for (i = 0; i < num_cache_leaves; i++)
964 kobject_put(&(INDEX_KOBJECT_PTR(cpu,i)->kobj));
965 kobject_put(per_cpu(cache_kobject, cpu));
966 cpuid4_cache_sysfs_exit(cpu);
969 static int __cpuinit cacheinfo_cpu_callback(struct notifier_block *nfb,
970 unsigned long action, void *hcpu)
972 unsigned int cpu = (unsigned long)hcpu;
973 struct sys_device *sys_dev;
975 sys_dev = get_cpu_sysdev(cpu);
976 switch (action) {
977 case CPU_ONLINE:
978 case CPU_ONLINE_FROZEN:
979 cache_add_dev(sys_dev);
980 break;
981 case CPU_DEAD:
982 case CPU_DEAD_FROZEN:
983 cache_remove_dev(sys_dev);
984 break;
986 return NOTIFY_OK;
989 static struct notifier_block __cpuinitdata cacheinfo_cpu_notifier =
991 .notifier_call = cacheinfo_cpu_callback,
994 static int __cpuinit cache_sysfs_init(void)
996 int i;
998 if (num_cache_leaves == 0)
999 return 0;
1001 for_each_online_cpu(i) {
1002 int err;
1003 struct sys_device *sys_dev = get_cpu_sysdev(i);
1005 err = cache_add_dev(sys_dev);
1006 if (err)
1007 return err;
1009 register_hotcpu_notifier(&cacheinfo_cpu_notifier);
1010 return 0;
1013 device_initcall(cache_sysfs_init);
1015 #endif