ASoC: Remove duplicate ADC/DAC widgets from wm_hubs.c
[linux/fpc-iii.git] / arch / x86 / kernel / cpu / intel_cacheinfo.c
blob789efe217e1ab89a8862df2387a980d2cca9a60a
1 /*
2 * Routines to indentify caches on Intel CPU.
4 * Changes:
5 * Venkatesh Pallipadi : Adding cache identification through cpuid(4)
6 * Ashok Raj <ashok.raj@intel.com>: Work with CPU hotplug infrastructure.
7 * Andi Kleen / Andreas Herrmann : CPUID4 emulation on AMD.
8 */
10 #include <linux/init.h>
11 #include <linux/slab.h>
12 #include <linux/device.h>
13 #include <linux/compiler.h>
14 #include <linux/cpu.h>
15 #include <linux/sched.h>
16 #include <linux/pci.h>
18 #include <asm/processor.h>
19 #include <asm/smp.h>
20 #include <asm/k8.h>
22 #define LVL_1_INST 1
23 #define LVL_1_DATA 2
24 #define LVL_2 3
25 #define LVL_3 4
26 #define LVL_TRACE 5
28 struct _cache_table
30 unsigned char descriptor;
31 char cache_type;
32 short size;
35 /* all the cache descriptor types we care about (no TLB or trace cache entries) */
36 static const struct _cache_table __cpuinitconst cache_table[] =
38 { 0x06, LVL_1_INST, 8 }, /* 4-way set assoc, 32 byte line size */
39 { 0x08, LVL_1_INST, 16 }, /* 4-way set assoc, 32 byte line size */
40 { 0x09, LVL_1_INST, 32 }, /* 4-way set assoc, 64 byte line size */
41 { 0x0a, LVL_1_DATA, 8 }, /* 2 way set assoc, 32 byte line size */
42 { 0x0c, LVL_1_DATA, 16 }, /* 4-way set assoc, 32 byte line size */
43 { 0x0d, LVL_1_DATA, 16 }, /* 4-way set assoc, 64 byte line size */
44 { 0x21, LVL_2, 256 }, /* 8-way set assoc, 64 byte line size */
45 { 0x22, LVL_3, 512 }, /* 4-way set assoc, sectored cache, 64 byte line size */
46 { 0x23, LVL_3, 1024 }, /* 8-way set assoc, sectored cache, 64 byte line size */
47 { 0x25, LVL_3, 2048 }, /* 8-way set assoc, sectored cache, 64 byte line size */
48 { 0x29, LVL_3, 4096 }, /* 8-way set assoc, sectored cache, 64 byte line size */
49 { 0x2c, LVL_1_DATA, 32 }, /* 8-way set assoc, 64 byte line size */
50 { 0x30, LVL_1_INST, 32 }, /* 8-way set assoc, 64 byte line size */
51 { 0x39, LVL_2, 128 }, /* 4-way set assoc, sectored cache, 64 byte line size */
52 { 0x3a, LVL_2, 192 }, /* 6-way set assoc, sectored cache, 64 byte line size */
53 { 0x3b, LVL_2, 128 }, /* 2-way set assoc, sectored cache, 64 byte line size */
54 { 0x3c, LVL_2, 256 }, /* 4-way set assoc, sectored cache, 64 byte line size */
55 { 0x3d, LVL_2, 384 }, /* 6-way set assoc, sectored cache, 64 byte line size */
56 { 0x3e, LVL_2, 512 }, /* 4-way set assoc, sectored cache, 64 byte line size */
57 { 0x3f, LVL_2, 256 }, /* 2-way set assoc, 64 byte line size */
58 { 0x41, LVL_2, 128 }, /* 4-way set assoc, 32 byte line size */
59 { 0x42, LVL_2, 256 }, /* 4-way set assoc, 32 byte line size */
60 { 0x43, LVL_2, 512 }, /* 4-way set assoc, 32 byte line size */
61 { 0x44, LVL_2, 1024 }, /* 4-way set assoc, 32 byte line size */
62 { 0x45, LVL_2, 2048 }, /* 4-way set assoc, 32 byte line size */
63 { 0x46, LVL_3, 4096 }, /* 4-way set assoc, 64 byte line size */
64 { 0x47, LVL_3, 8192 }, /* 8-way set assoc, 64 byte line size */
65 { 0x49, LVL_3, 4096 }, /* 16-way set assoc, 64 byte line size */
66 { 0x4a, LVL_3, 6144 }, /* 12-way set assoc, 64 byte line size */
67 { 0x4b, LVL_3, 8192 }, /* 16-way set assoc, 64 byte line size */
68 { 0x4c, LVL_3, 12288 }, /* 12-way set assoc, 64 byte line size */
69 { 0x4d, LVL_3, 16384 }, /* 16-way set assoc, 64 byte line size */
70 { 0x4e, LVL_2, 6144 }, /* 24-way set assoc, 64 byte line size */
71 { 0x60, LVL_1_DATA, 16 }, /* 8-way set assoc, sectored cache, 64 byte line size */
72 { 0x66, LVL_1_DATA, 8 }, /* 4-way set assoc, sectored cache, 64 byte line size */
73 { 0x67, LVL_1_DATA, 16 }, /* 4-way set assoc, sectored cache, 64 byte line size */
74 { 0x68, LVL_1_DATA, 32 }, /* 4-way set assoc, sectored cache, 64 byte line size */
75 { 0x70, LVL_TRACE, 12 }, /* 8-way set assoc */
76 { 0x71, LVL_TRACE, 16 }, /* 8-way set assoc */
77 { 0x72, LVL_TRACE, 32 }, /* 8-way set assoc */
78 { 0x73, LVL_TRACE, 64 }, /* 8-way set assoc */
79 { 0x78, LVL_2, 1024 }, /* 4-way set assoc, 64 byte line size */
80 { 0x79, LVL_2, 128 }, /* 8-way set assoc, sectored cache, 64 byte line size */
81 { 0x7a, LVL_2, 256 }, /* 8-way set assoc, sectored cache, 64 byte line size */
82 { 0x7b, LVL_2, 512 }, /* 8-way set assoc, sectored cache, 64 byte line size */
83 { 0x7c, LVL_2, 1024 }, /* 8-way set assoc, sectored cache, 64 byte line size */
84 { 0x7d, LVL_2, 2048 }, /* 8-way set assoc, 64 byte line size */
85 { 0x7f, LVL_2, 512 }, /* 2-way set assoc, 64 byte line size */
86 { 0x82, LVL_2, 256 }, /* 8-way set assoc, 32 byte line size */
87 { 0x83, LVL_2, 512 }, /* 8-way set assoc, 32 byte line size */
88 { 0x84, LVL_2, 1024 }, /* 8-way set assoc, 32 byte line size */
89 { 0x85, LVL_2, 2048 }, /* 8-way set assoc, 32 byte line size */
90 { 0x86, LVL_2, 512 }, /* 4-way set assoc, 64 byte line size */
91 { 0x87, LVL_2, 1024 }, /* 8-way set assoc, 64 byte line size */
92 { 0xd0, LVL_3, 512 }, /* 4-way set assoc, 64 byte line size */
93 { 0xd1, LVL_3, 1024 }, /* 4-way set assoc, 64 byte line size */
94 { 0xd2, LVL_3, 2048 }, /* 4-way set assoc, 64 byte line size */
95 { 0xd6, LVL_3, 1024 }, /* 8-way set assoc, 64 byte line size */
96 { 0xd7, LVL_3, 2038 }, /* 8-way set assoc, 64 byte line size */
97 { 0xd8, LVL_3, 4096 }, /* 12-way set assoc, 64 byte line size */
98 { 0xdc, LVL_3, 2048 }, /* 12-way set assoc, 64 byte line size */
99 { 0xdd, LVL_3, 4096 }, /* 12-way set assoc, 64 byte line size */
100 { 0xde, LVL_3, 8192 }, /* 12-way set assoc, 64 byte line size */
101 { 0xe2, LVL_3, 2048 }, /* 16-way set assoc, 64 byte line size */
102 { 0xe3, LVL_3, 4096 }, /* 16-way set assoc, 64 byte line size */
103 { 0xe4, LVL_3, 8192 }, /* 16-way set assoc, 64 byte line size */
104 { 0x00, 0, 0}
108 enum _cache_type
110 CACHE_TYPE_NULL = 0,
111 CACHE_TYPE_DATA = 1,
112 CACHE_TYPE_INST = 2,
113 CACHE_TYPE_UNIFIED = 3
116 union _cpuid4_leaf_eax {
117 struct {
118 enum _cache_type type:5;
119 unsigned int level:3;
120 unsigned int is_self_initializing:1;
121 unsigned int is_fully_associative:1;
122 unsigned int reserved:4;
123 unsigned int num_threads_sharing:12;
124 unsigned int num_cores_on_die:6;
125 } split;
126 u32 full;
129 union _cpuid4_leaf_ebx {
130 struct {
131 unsigned int coherency_line_size:12;
132 unsigned int physical_line_partition:10;
133 unsigned int ways_of_associativity:10;
134 } split;
135 u32 full;
138 union _cpuid4_leaf_ecx {
139 struct {
140 unsigned int number_of_sets:32;
141 } split;
142 u32 full;
145 struct _cpuid4_info {
146 union _cpuid4_leaf_eax eax;
147 union _cpuid4_leaf_ebx ebx;
148 union _cpuid4_leaf_ecx ecx;
149 unsigned long size;
150 unsigned long can_disable;
151 DECLARE_BITMAP(shared_cpu_map, NR_CPUS);
154 /* subset of above _cpuid4_info w/o shared_cpu_map */
155 struct _cpuid4_info_regs {
156 union _cpuid4_leaf_eax eax;
157 union _cpuid4_leaf_ebx ebx;
158 union _cpuid4_leaf_ecx ecx;
159 unsigned long size;
160 unsigned long can_disable;
163 unsigned short num_cache_leaves;
165 /* AMD doesn't have CPUID4. Emulate it here to report the same
166 information to the user. This makes some assumptions about the machine:
167 L2 not shared, no SMT etc. that is currently true on AMD CPUs.
169 In theory the TLBs could be reported as fake type (they are in "dummy").
170 Maybe later */
171 union l1_cache {
172 struct {
173 unsigned line_size : 8;
174 unsigned lines_per_tag : 8;
175 unsigned assoc : 8;
176 unsigned size_in_kb : 8;
178 unsigned val;
181 union l2_cache {
182 struct {
183 unsigned line_size : 8;
184 unsigned lines_per_tag : 4;
185 unsigned assoc : 4;
186 unsigned size_in_kb : 16;
188 unsigned val;
191 union l3_cache {
192 struct {
193 unsigned line_size : 8;
194 unsigned lines_per_tag : 4;
195 unsigned assoc : 4;
196 unsigned res : 2;
197 unsigned size_encoded : 14;
199 unsigned val;
202 static const unsigned short __cpuinitconst assocs[] = {
203 [1] = 1,
204 [2] = 2,
205 [4] = 4,
206 [6] = 8,
207 [8] = 16,
208 [0xa] = 32,
209 [0xb] = 48,
210 [0xc] = 64,
211 [0xd] = 96,
212 [0xe] = 128,
213 [0xf] = 0xffff /* fully associative - no way to show this currently */
216 static const unsigned char __cpuinitconst levels[] = { 1, 1, 2, 3 };
217 static const unsigned char __cpuinitconst types[] = { 1, 2, 3, 3 };
219 static void __cpuinit
220 amd_cpuid4(int leaf, union _cpuid4_leaf_eax *eax,
221 union _cpuid4_leaf_ebx *ebx,
222 union _cpuid4_leaf_ecx *ecx)
224 unsigned dummy;
225 unsigned line_size, lines_per_tag, assoc, size_in_kb;
226 union l1_cache l1i, l1d;
227 union l2_cache l2;
228 union l3_cache l3;
229 union l1_cache *l1 = &l1d;
231 eax->full = 0;
232 ebx->full = 0;
233 ecx->full = 0;
235 cpuid(0x80000005, &dummy, &dummy, &l1d.val, &l1i.val);
236 cpuid(0x80000006, &dummy, &dummy, &l2.val, &l3.val);
238 switch (leaf) {
239 case 1:
240 l1 = &l1i;
241 case 0:
242 if (!l1->val)
243 return;
244 assoc = l1->assoc;
245 line_size = l1->line_size;
246 lines_per_tag = l1->lines_per_tag;
247 size_in_kb = l1->size_in_kb;
248 break;
249 case 2:
250 if (!l2.val)
251 return;
252 assoc = l2.assoc;
253 line_size = l2.line_size;
254 lines_per_tag = l2.lines_per_tag;
255 /* cpu_data has errata corrections for K7 applied */
256 size_in_kb = current_cpu_data.x86_cache_size;
257 break;
258 case 3:
259 if (!l3.val)
260 return;
261 assoc = l3.assoc;
262 line_size = l3.line_size;
263 lines_per_tag = l3.lines_per_tag;
264 size_in_kb = l3.size_encoded * 512;
265 break;
266 default:
267 return;
270 eax->split.is_self_initializing = 1;
271 eax->split.type = types[leaf];
272 eax->split.level = levels[leaf];
273 if (leaf == 3)
274 eax->split.num_threads_sharing =
275 current_cpu_data.x86_max_cores - 1;
276 else
277 eax->split.num_threads_sharing = 0;
278 eax->split.num_cores_on_die = current_cpu_data.x86_max_cores - 1;
281 if (assoc == 0xf)
282 eax->split.is_fully_associative = 1;
283 ebx->split.coherency_line_size = line_size - 1;
284 ebx->split.ways_of_associativity = assocs[assoc] - 1;
285 ebx->split.physical_line_partition = lines_per_tag - 1;
286 ecx->split.number_of_sets = (size_in_kb * 1024) / line_size /
287 (ebx->split.ways_of_associativity + 1) - 1;
290 static void __cpuinit
291 amd_check_l3_disable(int index, struct _cpuid4_info_regs *this_leaf)
293 if (index < 3)
294 return;
296 if (boot_cpu_data.x86 == 0x11)
297 return;
299 /* see erratum #382 */
300 if ((boot_cpu_data.x86 == 0x10) && (boot_cpu_data.x86_model < 0x8))
301 return;
303 this_leaf->can_disable = 1;
306 static int
307 __cpuinit cpuid4_cache_lookup_regs(int index,
308 struct _cpuid4_info_regs *this_leaf)
310 union _cpuid4_leaf_eax eax;
311 union _cpuid4_leaf_ebx ebx;
312 union _cpuid4_leaf_ecx ecx;
313 unsigned edx;
315 if (boot_cpu_data.x86_vendor == X86_VENDOR_AMD) {
316 amd_cpuid4(index, &eax, &ebx, &ecx);
317 if (boot_cpu_data.x86 >= 0x10)
318 amd_check_l3_disable(index, this_leaf);
319 } else {
320 cpuid_count(4, index, &eax.full, &ebx.full, &ecx.full, &edx);
323 if (eax.split.type == CACHE_TYPE_NULL)
324 return -EIO; /* better error ? */
326 this_leaf->eax = eax;
327 this_leaf->ebx = ebx;
328 this_leaf->ecx = ecx;
329 this_leaf->size = (ecx.split.number_of_sets + 1) *
330 (ebx.split.coherency_line_size + 1) *
331 (ebx.split.physical_line_partition + 1) *
332 (ebx.split.ways_of_associativity + 1);
333 return 0;
336 static int __cpuinit find_num_cache_leaves(void)
338 unsigned int eax, ebx, ecx, edx;
339 union _cpuid4_leaf_eax cache_eax;
340 int i = -1;
342 do {
343 ++i;
344 /* Do cpuid(4) loop to find out num_cache_leaves */
345 cpuid_count(4, i, &eax, &ebx, &ecx, &edx);
346 cache_eax.full = eax;
347 } while (cache_eax.split.type != CACHE_TYPE_NULL);
348 return i;
351 unsigned int __cpuinit init_intel_cacheinfo(struct cpuinfo_x86 *c)
353 unsigned int trace = 0, l1i = 0, l1d = 0, l2 = 0, l3 = 0; /* Cache sizes */
354 unsigned int new_l1d = 0, new_l1i = 0; /* Cache sizes from cpuid(4) */
355 unsigned int new_l2 = 0, new_l3 = 0, i; /* Cache sizes from cpuid(4) */
356 unsigned int l2_id = 0, l3_id = 0, num_threads_sharing, index_msb;
357 #ifdef CONFIG_X86_HT
358 unsigned int cpu = c->cpu_index;
359 #endif
361 if (c->cpuid_level > 3) {
362 static int is_initialized;
364 if (is_initialized == 0) {
365 /* Init num_cache_leaves from boot CPU */
366 num_cache_leaves = find_num_cache_leaves();
367 is_initialized++;
371 * Whenever possible use cpuid(4), deterministic cache
372 * parameters cpuid leaf to find the cache details
374 for (i = 0; i < num_cache_leaves; i++) {
375 struct _cpuid4_info_regs this_leaf;
376 int retval;
378 retval = cpuid4_cache_lookup_regs(i, &this_leaf);
379 if (retval >= 0) {
380 switch(this_leaf.eax.split.level) {
381 case 1:
382 if (this_leaf.eax.split.type ==
383 CACHE_TYPE_DATA)
384 new_l1d = this_leaf.size/1024;
385 else if (this_leaf.eax.split.type ==
386 CACHE_TYPE_INST)
387 new_l1i = this_leaf.size/1024;
388 break;
389 case 2:
390 new_l2 = this_leaf.size/1024;
391 num_threads_sharing = 1 + this_leaf.eax.split.num_threads_sharing;
392 index_msb = get_count_order(num_threads_sharing);
393 l2_id = c->apicid >> index_msb;
394 break;
395 case 3:
396 new_l3 = this_leaf.size/1024;
397 num_threads_sharing = 1 + this_leaf.eax.split.num_threads_sharing;
398 index_msb = get_count_order(num_threads_sharing);
399 l3_id = c->apicid >> index_msb;
400 break;
401 default:
402 break;
408 * Don't use cpuid2 if cpuid4 is supported. For P4, we use cpuid2 for
409 * trace cache
411 if ((num_cache_leaves == 0 || c->x86 == 15) && c->cpuid_level > 1) {
412 /* supports eax=2 call */
413 int j, n;
414 unsigned int regs[4];
415 unsigned char *dp = (unsigned char *)regs;
416 int only_trace = 0;
418 if (num_cache_leaves != 0 && c->x86 == 15)
419 only_trace = 1;
421 /* Number of times to iterate */
422 n = cpuid_eax(2) & 0xFF;
424 for ( i = 0 ; i < n ; i++ ) {
425 cpuid(2, &regs[0], &regs[1], &regs[2], &regs[3]);
427 /* If bit 31 is set, this is an unknown format */
428 for ( j = 0 ; j < 3 ; j++ ) {
429 if (regs[j] & (1 << 31)) regs[j] = 0;
432 /* Byte 0 is level count, not a descriptor */
433 for ( j = 1 ; j < 16 ; j++ ) {
434 unsigned char des = dp[j];
435 unsigned char k = 0;
437 /* look up this descriptor in the table */
438 while (cache_table[k].descriptor != 0)
440 if (cache_table[k].descriptor == des) {
441 if (only_trace && cache_table[k].cache_type != LVL_TRACE)
442 break;
443 switch (cache_table[k].cache_type) {
444 case LVL_1_INST:
445 l1i += cache_table[k].size;
446 break;
447 case LVL_1_DATA:
448 l1d += cache_table[k].size;
449 break;
450 case LVL_2:
451 l2 += cache_table[k].size;
452 break;
453 case LVL_3:
454 l3 += cache_table[k].size;
455 break;
456 case LVL_TRACE:
457 trace += cache_table[k].size;
458 break;
461 break;
464 k++;
470 if (new_l1d)
471 l1d = new_l1d;
473 if (new_l1i)
474 l1i = new_l1i;
476 if (new_l2) {
477 l2 = new_l2;
478 #ifdef CONFIG_X86_HT
479 per_cpu(cpu_llc_id, cpu) = l2_id;
480 #endif
483 if (new_l3) {
484 l3 = new_l3;
485 #ifdef CONFIG_X86_HT
486 per_cpu(cpu_llc_id, cpu) = l3_id;
487 #endif
490 if (trace)
491 printk (KERN_INFO "CPU: Trace cache: %dK uops", trace);
492 else if ( l1i )
493 printk (KERN_INFO "CPU: L1 I cache: %dK", l1i);
495 if (l1d)
496 printk(", L1 D cache: %dK\n", l1d);
497 else
498 printk("\n");
500 if (l2)
501 printk(KERN_INFO "CPU: L2 cache: %dK\n", l2);
503 if (l3)
504 printk(KERN_INFO "CPU: L3 cache: %dK\n", l3);
506 c->x86_cache_size = l3 ? l3 : (l2 ? l2 : (l1i+l1d));
508 return l2;
511 #ifdef CONFIG_SYSFS
513 /* pointer to _cpuid4_info array (for each cache leaf) */
514 static DEFINE_PER_CPU(struct _cpuid4_info *, cpuid4_info);
515 #define CPUID4_INFO_IDX(x, y) (&((per_cpu(cpuid4_info, x))[y]))
517 #ifdef CONFIG_SMP
518 static void __cpuinit cache_shared_cpu_map_setup(unsigned int cpu, int index)
520 struct _cpuid4_info *this_leaf, *sibling_leaf;
521 unsigned long num_threads_sharing;
522 int index_msb, i;
523 struct cpuinfo_x86 *c = &cpu_data(cpu);
525 this_leaf = CPUID4_INFO_IDX(cpu, index);
526 num_threads_sharing = 1 + this_leaf->eax.split.num_threads_sharing;
528 if (num_threads_sharing == 1)
529 cpumask_set_cpu(cpu, to_cpumask(this_leaf->shared_cpu_map));
530 else {
531 index_msb = get_count_order(num_threads_sharing);
533 for_each_online_cpu(i) {
534 if (cpu_data(i).apicid >> index_msb ==
535 c->apicid >> index_msb) {
536 cpumask_set_cpu(i,
537 to_cpumask(this_leaf->shared_cpu_map));
538 if (i != cpu && per_cpu(cpuid4_info, i)) {
539 sibling_leaf =
540 CPUID4_INFO_IDX(i, index);
541 cpumask_set_cpu(cpu, to_cpumask(
542 sibling_leaf->shared_cpu_map));
548 static void __cpuinit cache_remove_shared_cpu_map(unsigned int cpu, int index)
550 struct _cpuid4_info *this_leaf, *sibling_leaf;
551 int sibling;
553 this_leaf = CPUID4_INFO_IDX(cpu, index);
554 for_each_cpu(sibling, to_cpumask(this_leaf->shared_cpu_map)) {
555 sibling_leaf = CPUID4_INFO_IDX(sibling, index);
556 cpumask_clear_cpu(cpu,
557 to_cpumask(sibling_leaf->shared_cpu_map));
560 #else
561 static void __cpuinit cache_shared_cpu_map_setup(unsigned int cpu, int index) {}
562 static void __cpuinit cache_remove_shared_cpu_map(unsigned int cpu, int index) {}
563 #endif
565 static void __cpuinit free_cache_attributes(unsigned int cpu)
567 int i;
569 for (i = 0; i < num_cache_leaves; i++)
570 cache_remove_shared_cpu_map(cpu, i);
572 kfree(per_cpu(cpuid4_info, cpu));
573 per_cpu(cpuid4_info, cpu) = NULL;
576 static int
577 __cpuinit cpuid4_cache_lookup(int index, struct _cpuid4_info *this_leaf)
579 struct _cpuid4_info_regs *leaf_regs =
580 (struct _cpuid4_info_regs *)this_leaf;
582 return cpuid4_cache_lookup_regs(index, leaf_regs);
585 static void __cpuinit get_cpu_leaves(void *_retval)
587 int j, *retval = _retval, cpu = smp_processor_id();
589 /* Do cpuid and store the results */
590 for (j = 0; j < num_cache_leaves; j++) {
591 struct _cpuid4_info *this_leaf;
592 this_leaf = CPUID4_INFO_IDX(cpu, j);
593 *retval = cpuid4_cache_lookup(j, this_leaf);
594 if (unlikely(*retval < 0)) {
595 int i;
597 for (i = 0; i < j; i++)
598 cache_remove_shared_cpu_map(cpu, i);
599 break;
601 cache_shared_cpu_map_setup(cpu, j);
605 static int __cpuinit detect_cache_attributes(unsigned int cpu)
607 int retval;
609 if (num_cache_leaves == 0)
610 return -ENOENT;
612 per_cpu(cpuid4_info, cpu) = kzalloc(
613 sizeof(struct _cpuid4_info) * num_cache_leaves, GFP_KERNEL);
614 if (per_cpu(cpuid4_info, cpu) == NULL)
615 return -ENOMEM;
617 smp_call_function_single(cpu, get_cpu_leaves, &retval, true);
618 if (retval) {
619 kfree(per_cpu(cpuid4_info, cpu));
620 per_cpu(cpuid4_info, cpu) = NULL;
623 return retval;
626 #include <linux/kobject.h>
627 #include <linux/sysfs.h>
629 extern struct sysdev_class cpu_sysdev_class; /* from drivers/base/cpu.c */
631 /* pointer to kobject for cpuX/cache */
632 static DEFINE_PER_CPU(struct kobject *, cache_kobject);
634 struct _index_kobject {
635 struct kobject kobj;
636 unsigned int cpu;
637 unsigned short index;
640 /* pointer to array of kobjects for cpuX/cache/indexY */
641 static DEFINE_PER_CPU(struct _index_kobject *, index_kobject);
642 #define INDEX_KOBJECT_PTR(x, y) (&((per_cpu(index_kobject, x))[y]))
644 #define show_one_plus(file_name, object, val) \
645 static ssize_t show_##file_name \
646 (struct _cpuid4_info *this_leaf, char *buf) \
648 return sprintf (buf, "%lu\n", (unsigned long)this_leaf->object + val); \
651 show_one_plus(level, eax.split.level, 0);
652 show_one_plus(coherency_line_size, ebx.split.coherency_line_size, 1);
653 show_one_plus(physical_line_partition, ebx.split.physical_line_partition, 1);
654 show_one_plus(ways_of_associativity, ebx.split.ways_of_associativity, 1);
655 show_one_plus(number_of_sets, ecx.split.number_of_sets, 1);
657 static ssize_t show_size(struct _cpuid4_info *this_leaf, char *buf)
659 return sprintf (buf, "%luK\n", this_leaf->size / 1024);
662 static ssize_t show_shared_cpu_map_func(struct _cpuid4_info *this_leaf,
663 int type, char *buf)
665 ptrdiff_t len = PTR_ALIGN(buf + PAGE_SIZE - 1, PAGE_SIZE) - buf;
666 int n = 0;
668 if (len > 1) {
669 const struct cpumask *mask;
671 mask = to_cpumask(this_leaf->shared_cpu_map);
672 n = type?
673 cpulist_scnprintf(buf, len-2, mask) :
674 cpumask_scnprintf(buf, len-2, mask);
675 buf[n++] = '\n';
676 buf[n] = '\0';
678 return n;
681 static inline ssize_t show_shared_cpu_map(struct _cpuid4_info *leaf, char *buf)
683 return show_shared_cpu_map_func(leaf, 0, buf);
686 static inline ssize_t show_shared_cpu_list(struct _cpuid4_info *leaf, char *buf)
688 return show_shared_cpu_map_func(leaf, 1, buf);
691 static ssize_t show_type(struct _cpuid4_info *this_leaf, char *buf)
693 switch (this_leaf->eax.split.type) {
694 case CACHE_TYPE_DATA:
695 return sprintf(buf, "Data\n");
696 case CACHE_TYPE_INST:
697 return sprintf(buf, "Instruction\n");
698 case CACHE_TYPE_UNIFIED:
699 return sprintf(buf, "Unified\n");
700 default:
701 return sprintf(buf, "Unknown\n");
705 #define to_object(k) container_of(k, struct _index_kobject, kobj)
706 #define to_attr(a) container_of(a, struct _cache_attr, attr)
708 static ssize_t show_cache_disable(struct _cpuid4_info *this_leaf, char *buf,
709 unsigned int index)
711 int cpu = cpumask_first(to_cpumask(this_leaf->shared_cpu_map));
712 int node = cpu_to_node(cpu);
713 struct pci_dev *dev = node_to_k8_nb_misc(node);
714 unsigned int reg = 0;
716 if (!this_leaf->can_disable)
717 return -EINVAL;
719 if (!dev)
720 return -EINVAL;
722 pci_read_config_dword(dev, 0x1BC + index * 4, &reg);
723 return sprintf(buf, "%x\n", reg);
726 #define SHOW_CACHE_DISABLE(index) \
727 static ssize_t \
728 show_cache_disable_##index(struct _cpuid4_info *this_leaf, char *buf) \
730 return show_cache_disable(this_leaf, buf, index); \
732 SHOW_CACHE_DISABLE(0)
733 SHOW_CACHE_DISABLE(1)
735 static ssize_t store_cache_disable(struct _cpuid4_info *this_leaf,
736 const char *buf, size_t count, unsigned int index)
738 int cpu = cpumask_first(to_cpumask(this_leaf->shared_cpu_map));
739 int node = cpu_to_node(cpu);
740 struct pci_dev *dev = node_to_k8_nb_misc(node);
741 unsigned long val = 0;
742 unsigned int scrubber = 0;
744 if (!this_leaf->can_disable)
745 return -EINVAL;
747 if (!capable(CAP_SYS_ADMIN))
748 return -EPERM;
750 if (!dev)
751 return -EINVAL;
753 if (strict_strtoul(buf, 10, &val) < 0)
754 return -EINVAL;
756 val |= 0xc0000000;
758 pci_read_config_dword(dev, 0x58, &scrubber);
759 scrubber &= ~0x1f000000;
760 pci_write_config_dword(dev, 0x58, scrubber);
762 pci_write_config_dword(dev, 0x1BC + index * 4, val & ~0x40000000);
763 wbinvd();
764 pci_write_config_dword(dev, 0x1BC + index * 4, val);
765 return count;
768 #define STORE_CACHE_DISABLE(index) \
769 static ssize_t \
770 store_cache_disable_##index(struct _cpuid4_info *this_leaf, \
771 const char *buf, size_t count) \
773 return store_cache_disable(this_leaf, buf, count, index); \
775 STORE_CACHE_DISABLE(0)
776 STORE_CACHE_DISABLE(1)
778 struct _cache_attr {
779 struct attribute attr;
780 ssize_t (*show)(struct _cpuid4_info *, char *);
781 ssize_t (*store)(struct _cpuid4_info *, const char *, size_t count);
784 #define define_one_ro(_name) \
785 static struct _cache_attr _name = \
786 __ATTR(_name, 0444, show_##_name, NULL)
788 define_one_ro(level);
789 define_one_ro(type);
790 define_one_ro(coherency_line_size);
791 define_one_ro(physical_line_partition);
792 define_one_ro(ways_of_associativity);
793 define_one_ro(number_of_sets);
794 define_one_ro(size);
795 define_one_ro(shared_cpu_map);
796 define_one_ro(shared_cpu_list);
798 static struct _cache_attr cache_disable_0 = __ATTR(cache_disable_0, 0644,
799 show_cache_disable_0, store_cache_disable_0);
800 static struct _cache_attr cache_disable_1 = __ATTR(cache_disable_1, 0644,
801 show_cache_disable_1, store_cache_disable_1);
803 static struct attribute * default_attrs[] = {
804 &type.attr,
805 &level.attr,
806 &coherency_line_size.attr,
807 &physical_line_partition.attr,
808 &ways_of_associativity.attr,
809 &number_of_sets.attr,
810 &size.attr,
811 &shared_cpu_map.attr,
812 &shared_cpu_list.attr,
813 &cache_disable_0.attr,
814 &cache_disable_1.attr,
815 NULL
818 static ssize_t show(struct kobject * kobj, struct attribute * attr, char * buf)
820 struct _cache_attr *fattr = to_attr(attr);
821 struct _index_kobject *this_leaf = to_object(kobj);
822 ssize_t ret;
824 ret = fattr->show ?
825 fattr->show(CPUID4_INFO_IDX(this_leaf->cpu, this_leaf->index),
826 buf) :
828 return ret;
831 static ssize_t store(struct kobject * kobj, struct attribute * attr,
832 const char * buf, size_t count)
834 struct _cache_attr *fattr = to_attr(attr);
835 struct _index_kobject *this_leaf = to_object(kobj);
836 ssize_t ret;
838 ret = fattr->store ?
839 fattr->store(CPUID4_INFO_IDX(this_leaf->cpu, this_leaf->index),
840 buf, count) :
842 return ret;
845 static struct sysfs_ops sysfs_ops = {
846 .show = show,
847 .store = store,
850 static struct kobj_type ktype_cache = {
851 .sysfs_ops = &sysfs_ops,
852 .default_attrs = default_attrs,
855 static struct kobj_type ktype_percpu_entry = {
856 .sysfs_ops = &sysfs_ops,
859 static void __cpuinit cpuid4_cache_sysfs_exit(unsigned int cpu)
861 kfree(per_cpu(cache_kobject, cpu));
862 kfree(per_cpu(index_kobject, cpu));
863 per_cpu(cache_kobject, cpu) = NULL;
864 per_cpu(index_kobject, cpu) = NULL;
865 free_cache_attributes(cpu);
868 static int __cpuinit cpuid4_cache_sysfs_init(unsigned int cpu)
870 int err;
872 if (num_cache_leaves == 0)
873 return -ENOENT;
875 err = detect_cache_attributes(cpu);
876 if (err)
877 return err;
879 /* Allocate all required memory */
880 per_cpu(cache_kobject, cpu) =
881 kzalloc(sizeof(struct kobject), GFP_KERNEL);
882 if (unlikely(per_cpu(cache_kobject, cpu) == NULL))
883 goto err_out;
885 per_cpu(index_kobject, cpu) = kzalloc(
886 sizeof(struct _index_kobject ) * num_cache_leaves, GFP_KERNEL);
887 if (unlikely(per_cpu(index_kobject, cpu) == NULL))
888 goto err_out;
890 return 0;
892 err_out:
893 cpuid4_cache_sysfs_exit(cpu);
894 return -ENOMEM;
897 static DECLARE_BITMAP(cache_dev_map, NR_CPUS);
899 /* Add/Remove cache interface for CPU device */
900 static int __cpuinit cache_add_dev(struct sys_device * sys_dev)
902 unsigned int cpu = sys_dev->id;
903 unsigned long i, j;
904 struct _index_kobject *this_object;
905 int retval;
907 retval = cpuid4_cache_sysfs_init(cpu);
908 if (unlikely(retval < 0))
909 return retval;
911 retval = kobject_init_and_add(per_cpu(cache_kobject, cpu),
912 &ktype_percpu_entry,
913 &sys_dev->kobj, "%s", "cache");
914 if (retval < 0) {
915 cpuid4_cache_sysfs_exit(cpu);
916 return retval;
919 for (i = 0; i < num_cache_leaves; i++) {
920 this_object = INDEX_KOBJECT_PTR(cpu,i);
921 this_object->cpu = cpu;
922 this_object->index = i;
923 retval = kobject_init_and_add(&(this_object->kobj),
924 &ktype_cache,
925 per_cpu(cache_kobject, cpu),
926 "index%1lu", i);
927 if (unlikely(retval)) {
928 for (j = 0; j < i; j++) {
929 kobject_put(&(INDEX_KOBJECT_PTR(cpu,j)->kobj));
931 kobject_put(per_cpu(cache_kobject, cpu));
932 cpuid4_cache_sysfs_exit(cpu);
933 return retval;
935 kobject_uevent(&(this_object->kobj), KOBJ_ADD);
937 cpumask_set_cpu(cpu, to_cpumask(cache_dev_map));
939 kobject_uevent(per_cpu(cache_kobject, cpu), KOBJ_ADD);
940 return 0;
943 static void __cpuinit cache_remove_dev(struct sys_device * sys_dev)
945 unsigned int cpu = sys_dev->id;
946 unsigned long i;
948 if (per_cpu(cpuid4_info, cpu) == NULL)
949 return;
950 if (!cpumask_test_cpu(cpu, to_cpumask(cache_dev_map)))
951 return;
952 cpumask_clear_cpu(cpu, to_cpumask(cache_dev_map));
954 for (i = 0; i < num_cache_leaves; i++)
955 kobject_put(&(INDEX_KOBJECT_PTR(cpu,i)->kobj));
956 kobject_put(per_cpu(cache_kobject, cpu));
957 cpuid4_cache_sysfs_exit(cpu);
960 static int __cpuinit cacheinfo_cpu_callback(struct notifier_block *nfb,
961 unsigned long action, void *hcpu)
963 unsigned int cpu = (unsigned long)hcpu;
964 struct sys_device *sys_dev;
966 sys_dev = get_cpu_sysdev(cpu);
967 switch (action) {
968 case CPU_ONLINE:
969 case CPU_ONLINE_FROZEN:
970 cache_add_dev(sys_dev);
971 break;
972 case CPU_DEAD:
973 case CPU_DEAD_FROZEN:
974 cache_remove_dev(sys_dev);
975 break;
977 return NOTIFY_OK;
980 static struct notifier_block __cpuinitdata cacheinfo_cpu_notifier =
982 .notifier_call = cacheinfo_cpu_callback,
985 static int __cpuinit cache_sysfs_init(void)
987 int i;
989 if (num_cache_leaves == 0)
990 return 0;
992 for_each_online_cpu(i) {
993 int err;
994 struct sys_device *sys_dev = get_cpu_sysdev(i);
996 err = cache_add_dev(sys_dev);
997 if (err)
998 return err;
1000 register_hotcpu_notifier(&cacheinfo_cpu_notifier);
1001 return 0;
1004 device_initcall(cache_sysfs_init);
1006 #endif