Merge git://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux-2.6
[wrt350n-kernel.git] / arch / x86 / kernel / cpu / intel_cacheinfo.c
blob1b889860eb730fc3081b31d02fd304c923667711
1 /*
2 * Routines to indentify caches on Intel CPU.
4 * Changes:
5 * Venkatesh Pallipadi : Adding cache identification through cpuid(4)
6 * Ashok Raj <ashok.raj@intel.com>: Work with CPU hotplug infrastructure.
7 * Andi Kleen / Andreas Herrmann : CPUID4 emulation on AMD.
8 */
10 #include <linux/init.h>
11 #include <linux/slab.h>
12 #include <linux/device.h>
13 #include <linux/compiler.h>
14 #include <linux/cpu.h>
15 #include <linux/sched.h>
17 #include <asm/processor.h>
18 #include <asm/smp.h>
20 #define LVL_1_INST 1
21 #define LVL_1_DATA 2
22 #define LVL_2 3
23 #define LVL_3 4
24 #define LVL_TRACE 5
26 struct _cache_table
28 unsigned char descriptor;
29 char cache_type;
30 short size;
33 /* all the cache descriptor types we care about (no TLB or trace cache entries) */
34 static struct _cache_table cache_table[] __cpuinitdata =
36 { 0x06, LVL_1_INST, 8 }, /* 4-way set assoc, 32 byte line size */
37 { 0x08, LVL_1_INST, 16 }, /* 4-way set assoc, 32 byte line size */
38 { 0x0a, LVL_1_DATA, 8 }, /* 2 way set assoc, 32 byte line size */
39 { 0x0c, LVL_1_DATA, 16 }, /* 4-way set assoc, 32 byte line size */
40 { 0x22, LVL_3, 512 }, /* 4-way set assoc, sectored cache, 64 byte line size */
41 { 0x23, LVL_3, 1024 }, /* 8-way set assoc, sectored cache, 64 byte line size */
42 { 0x25, LVL_3, 2048 }, /* 8-way set assoc, sectored cache, 64 byte line size */
43 { 0x29, LVL_3, 4096 }, /* 8-way set assoc, sectored cache, 64 byte line size */
44 { 0x2c, LVL_1_DATA, 32 }, /* 8-way set assoc, 64 byte line size */
45 { 0x30, LVL_1_INST, 32 }, /* 8-way set assoc, 64 byte line size */
46 { 0x39, LVL_2, 128 }, /* 4-way set assoc, sectored cache, 64 byte line size */
47 { 0x3a, LVL_2, 192 }, /* 6-way set assoc, sectored cache, 64 byte line size */
48 { 0x3b, LVL_2, 128 }, /* 2-way set assoc, sectored cache, 64 byte line size */
49 { 0x3c, LVL_2, 256 }, /* 4-way set assoc, sectored cache, 64 byte line size */
50 { 0x3d, LVL_2, 384 }, /* 6-way set assoc, sectored cache, 64 byte line size */
51 { 0x3e, LVL_2, 512 }, /* 4-way set assoc, sectored cache, 64 byte line size */
52 { 0x3f, LVL_2, 256 }, /* 2-way set assoc, 64 byte line size */
53 { 0x41, LVL_2, 128 }, /* 4-way set assoc, 32 byte line size */
54 { 0x42, LVL_2, 256 }, /* 4-way set assoc, 32 byte line size */
55 { 0x43, LVL_2, 512 }, /* 4-way set assoc, 32 byte line size */
56 { 0x44, LVL_2, 1024 }, /* 4-way set assoc, 32 byte line size */
57 { 0x45, LVL_2, 2048 }, /* 4-way set assoc, 32 byte line size */
58 { 0x46, LVL_3, 4096 }, /* 4-way set assoc, 64 byte line size */
59 { 0x47, LVL_3, 8192 }, /* 8-way set assoc, 64 byte line size */
60 { 0x49, LVL_3, 4096 }, /* 16-way set assoc, 64 byte line size */
61 { 0x4a, LVL_3, 6144 }, /* 12-way set assoc, 64 byte line size */
62 { 0x4b, LVL_3, 8192 }, /* 16-way set assoc, 64 byte line size */
63 { 0x4c, LVL_3, 12288 }, /* 12-way set assoc, 64 byte line size */
64 { 0x4d, LVL_3, 16384 }, /* 16-way set assoc, 64 byte line size */
65 { 0x60, LVL_1_DATA, 16 }, /* 8-way set assoc, sectored cache, 64 byte line size */
66 { 0x66, LVL_1_DATA, 8 }, /* 4-way set assoc, sectored cache, 64 byte line size */
67 { 0x67, LVL_1_DATA, 16 }, /* 4-way set assoc, sectored cache, 64 byte line size */
68 { 0x68, LVL_1_DATA, 32 }, /* 4-way set assoc, sectored cache, 64 byte line size */
69 { 0x70, LVL_TRACE, 12 }, /* 8-way set assoc */
70 { 0x71, LVL_TRACE, 16 }, /* 8-way set assoc */
71 { 0x72, LVL_TRACE, 32 }, /* 8-way set assoc */
72 { 0x73, LVL_TRACE, 64 }, /* 8-way set assoc */
73 { 0x78, LVL_2, 1024 }, /* 4-way set assoc, 64 byte line size */
74 { 0x79, LVL_2, 128 }, /* 8-way set assoc, sectored cache, 64 byte line size */
75 { 0x7a, LVL_2, 256 }, /* 8-way set assoc, sectored cache, 64 byte line size */
76 { 0x7b, LVL_2, 512 }, /* 8-way set assoc, sectored cache, 64 byte line size */
77 { 0x7c, LVL_2, 1024 }, /* 8-way set assoc, sectored cache, 64 byte line size */
78 { 0x7d, LVL_2, 2048 }, /* 8-way set assoc, 64 byte line size */
79 { 0x7f, LVL_2, 512 }, /* 2-way set assoc, 64 byte line size */
80 { 0x82, LVL_2, 256 }, /* 8-way set assoc, 32 byte line size */
81 { 0x83, LVL_2, 512 }, /* 8-way set assoc, 32 byte line size */
82 { 0x84, LVL_2, 1024 }, /* 8-way set assoc, 32 byte line size */
83 { 0x85, LVL_2, 2048 }, /* 8-way set assoc, 32 byte line size */
84 { 0x86, LVL_2, 512 }, /* 4-way set assoc, 64 byte line size */
85 { 0x87, LVL_2, 1024 }, /* 8-way set assoc, 64 byte line size */
86 { 0x00, 0, 0}
90 enum _cache_type
92 CACHE_TYPE_NULL = 0,
93 CACHE_TYPE_DATA = 1,
94 CACHE_TYPE_INST = 2,
95 CACHE_TYPE_UNIFIED = 3
98 union _cpuid4_leaf_eax {
99 struct {
100 enum _cache_type type:5;
101 unsigned int level:3;
102 unsigned int is_self_initializing:1;
103 unsigned int is_fully_associative:1;
104 unsigned int reserved:4;
105 unsigned int num_threads_sharing:12;
106 unsigned int num_cores_on_die:6;
107 } split;
108 u32 full;
111 union _cpuid4_leaf_ebx {
112 struct {
113 unsigned int coherency_line_size:12;
114 unsigned int physical_line_partition:10;
115 unsigned int ways_of_associativity:10;
116 } split;
117 u32 full;
120 union _cpuid4_leaf_ecx {
121 struct {
122 unsigned int number_of_sets:32;
123 } split;
124 u32 full;
127 struct _cpuid4_info {
128 union _cpuid4_leaf_eax eax;
129 union _cpuid4_leaf_ebx ebx;
130 union _cpuid4_leaf_ecx ecx;
131 unsigned long size;
132 cpumask_t shared_cpu_map;
135 unsigned short num_cache_leaves;
137 /* AMD doesn't have CPUID4. Emulate it here to report the same
138 information to the user. This makes some assumptions about the machine:
139 L2 not shared, no SMT etc. that is currently true on AMD CPUs.
141 In theory the TLBs could be reported as fake type (they are in "dummy").
142 Maybe later */
143 union l1_cache {
144 struct {
145 unsigned line_size : 8;
146 unsigned lines_per_tag : 8;
147 unsigned assoc : 8;
148 unsigned size_in_kb : 8;
150 unsigned val;
153 union l2_cache {
154 struct {
155 unsigned line_size : 8;
156 unsigned lines_per_tag : 4;
157 unsigned assoc : 4;
158 unsigned size_in_kb : 16;
160 unsigned val;
163 union l3_cache {
164 struct {
165 unsigned line_size : 8;
166 unsigned lines_per_tag : 4;
167 unsigned assoc : 4;
168 unsigned res : 2;
169 unsigned size_encoded : 14;
171 unsigned val;
174 static unsigned short assocs[] __cpuinitdata = {
175 [1] = 1, [2] = 2, [4] = 4, [6] = 8,
176 [8] = 16, [0xa] = 32, [0xb] = 48,
177 [0xc] = 64,
178 [0xf] = 0xffff // ??
181 static unsigned char levels[] __cpuinitdata = { 1, 1, 2, 3 };
182 static unsigned char types[] __cpuinitdata = { 1, 2, 3, 3 };
184 static void __cpuinit amd_cpuid4(int leaf, union _cpuid4_leaf_eax *eax,
185 union _cpuid4_leaf_ebx *ebx,
186 union _cpuid4_leaf_ecx *ecx)
188 unsigned dummy;
189 unsigned line_size, lines_per_tag, assoc, size_in_kb;
190 union l1_cache l1i, l1d;
191 union l2_cache l2;
192 union l3_cache l3;
193 union l1_cache *l1 = &l1d;
195 eax->full = 0;
196 ebx->full = 0;
197 ecx->full = 0;
199 cpuid(0x80000005, &dummy, &dummy, &l1d.val, &l1i.val);
200 cpuid(0x80000006, &dummy, &dummy, &l2.val, &l3.val);
202 switch (leaf) {
203 case 1:
204 l1 = &l1i;
205 case 0:
206 if (!l1->val)
207 return;
208 assoc = l1->assoc;
209 line_size = l1->line_size;
210 lines_per_tag = l1->lines_per_tag;
211 size_in_kb = l1->size_in_kb;
212 break;
213 case 2:
214 if (!l2.val)
215 return;
216 assoc = l2.assoc;
217 line_size = l2.line_size;
218 lines_per_tag = l2.lines_per_tag;
219 /* cpu_data has errata corrections for K7 applied */
220 size_in_kb = current_cpu_data.x86_cache_size;
221 break;
222 case 3:
223 if (!l3.val)
224 return;
225 assoc = l3.assoc;
226 line_size = l3.line_size;
227 lines_per_tag = l3.lines_per_tag;
228 size_in_kb = l3.size_encoded * 512;
229 break;
230 default:
231 return;
234 eax->split.is_self_initializing = 1;
235 eax->split.type = types[leaf];
236 eax->split.level = levels[leaf];
237 if (leaf == 3)
238 eax->split.num_threads_sharing = current_cpu_data.x86_max_cores - 1;
239 else
240 eax->split.num_threads_sharing = 0;
241 eax->split.num_cores_on_die = current_cpu_data.x86_max_cores - 1;
244 if (assoc == 0xf)
245 eax->split.is_fully_associative = 1;
246 ebx->split.coherency_line_size = line_size - 1;
247 ebx->split.ways_of_associativity = assocs[assoc] - 1;
248 ebx->split.physical_line_partition = lines_per_tag - 1;
249 ecx->split.number_of_sets = (size_in_kb * 1024) / line_size /
250 (ebx->split.ways_of_associativity + 1) - 1;
253 static int __cpuinit cpuid4_cache_lookup(int index, struct _cpuid4_info *this_leaf)
255 union _cpuid4_leaf_eax eax;
256 union _cpuid4_leaf_ebx ebx;
257 union _cpuid4_leaf_ecx ecx;
258 unsigned edx;
260 if (boot_cpu_data.x86_vendor == X86_VENDOR_AMD)
261 amd_cpuid4(index, &eax, &ebx, &ecx);
262 else
263 cpuid_count(4, index, &eax.full, &ebx.full, &ecx.full, &edx);
264 if (eax.split.type == CACHE_TYPE_NULL)
265 return -EIO; /* better error ? */
267 this_leaf->eax = eax;
268 this_leaf->ebx = ebx;
269 this_leaf->ecx = ecx;
270 this_leaf->size = (ecx.split.number_of_sets + 1) *
271 (ebx.split.coherency_line_size + 1) *
272 (ebx.split.physical_line_partition + 1) *
273 (ebx.split.ways_of_associativity + 1);
274 return 0;
277 static int __cpuinit find_num_cache_leaves(void)
279 unsigned int eax, ebx, ecx, edx;
280 union _cpuid4_leaf_eax cache_eax;
281 int i = -1;
283 do {
284 ++i;
285 /* Do cpuid(4) loop to find out num_cache_leaves */
286 cpuid_count(4, i, &eax, &ebx, &ecx, &edx);
287 cache_eax.full = eax;
288 } while (cache_eax.split.type != CACHE_TYPE_NULL);
289 return i;
292 unsigned int __cpuinit init_intel_cacheinfo(struct cpuinfo_x86 *c)
294 unsigned int trace = 0, l1i = 0, l1d = 0, l2 = 0, l3 = 0; /* Cache sizes */
295 unsigned int new_l1d = 0, new_l1i = 0; /* Cache sizes from cpuid(4) */
296 unsigned int new_l2 = 0, new_l3 = 0, i; /* Cache sizes from cpuid(4) */
297 unsigned int l2_id = 0, l3_id = 0, num_threads_sharing, index_msb;
298 #ifdef CONFIG_X86_HT
299 unsigned int cpu = c->cpu_index;
300 #endif
302 if (c->cpuid_level > 3) {
303 static int is_initialized;
305 if (is_initialized == 0) {
306 /* Init num_cache_leaves from boot CPU */
307 num_cache_leaves = find_num_cache_leaves();
308 is_initialized++;
312 * Whenever possible use cpuid(4), deterministic cache
313 * parameters cpuid leaf to find the cache details
315 for (i = 0; i < num_cache_leaves; i++) {
316 struct _cpuid4_info this_leaf;
318 int retval;
320 retval = cpuid4_cache_lookup(i, &this_leaf);
321 if (retval >= 0) {
322 switch(this_leaf.eax.split.level) {
323 case 1:
324 if (this_leaf.eax.split.type ==
325 CACHE_TYPE_DATA)
326 new_l1d = this_leaf.size/1024;
327 else if (this_leaf.eax.split.type ==
328 CACHE_TYPE_INST)
329 new_l1i = this_leaf.size/1024;
330 break;
331 case 2:
332 new_l2 = this_leaf.size/1024;
333 num_threads_sharing = 1 + this_leaf.eax.split.num_threads_sharing;
334 index_msb = get_count_order(num_threads_sharing);
335 l2_id = c->apicid >> index_msb;
336 break;
337 case 3:
338 new_l3 = this_leaf.size/1024;
339 num_threads_sharing = 1 + this_leaf.eax.split.num_threads_sharing;
340 index_msb = get_count_order(num_threads_sharing);
341 l3_id = c->apicid >> index_msb;
342 break;
343 default:
344 break;
350 * Don't use cpuid2 if cpuid4 is supported. For P4, we use cpuid2 for
351 * trace cache
353 if ((num_cache_leaves == 0 || c->x86 == 15) && c->cpuid_level > 1) {
354 /* supports eax=2 call */
355 int j, n;
356 unsigned int regs[4];
357 unsigned char *dp = (unsigned char *)regs;
358 int only_trace = 0;
360 if (num_cache_leaves != 0 && c->x86 == 15)
361 only_trace = 1;
363 /* Number of times to iterate */
364 n = cpuid_eax(2) & 0xFF;
366 for ( i = 0 ; i < n ; i++ ) {
367 cpuid(2, &regs[0], &regs[1], &regs[2], &regs[3]);
369 /* If bit 31 is set, this is an unknown format */
370 for ( j = 0 ; j < 3 ; j++ ) {
371 if (regs[j] & (1 << 31)) regs[j] = 0;
374 /* Byte 0 is level count, not a descriptor */
375 for ( j = 1 ; j < 16 ; j++ ) {
376 unsigned char des = dp[j];
377 unsigned char k = 0;
379 /* look up this descriptor in the table */
380 while (cache_table[k].descriptor != 0)
382 if (cache_table[k].descriptor == des) {
383 if (only_trace && cache_table[k].cache_type != LVL_TRACE)
384 break;
385 switch (cache_table[k].cache_type) {
386 case LVL_1_INST:
387 l1i += cache_table[k].size;
388 break;
389 case LVL_1_DATA:
390 l1d += cache_table[k].size;
391 break;
392 case LVL_2:
393 l2 += cache_table[k].size;
394 break;
395 case LVL_3:
396 l3 += cache_table[k].size;
397 break;
398 case LVL_TRACE:
399 trace += cache_table[k].size;
400 break;
403 break;
406 k++;
412 if (new_l1d)
413 l1d = new_l1d;
415 if (new_l1i)
416 l1i = new_l1i;
418 if (new_l2) {
419 l2 = new_l2;
420 #ifdef CONFIG_X86_HT
421 per_cpu(cpu_llc_id, cpu) = l2_id;
422 #endif
425 if (new_l3) {
426 l3 = new_l3;
427 #ifdef CONFIG_X86_HT
428 per_cpu(cpu_llc_id, cpu) = l3_id;
429 #endif
432 if (trace)
433 printk (KERN_INFO "CPU: Trace cache: %dK uops", trace);
434 else if ( l1i )
435 printk (KERN_INFO "CPU: L1 I cache: %dK", l1i);
437 if (l1d)
438 printk(", L1 D cache: %dK\n", l1d);
439 else
440 printk("\n");
442 if (l2)
443 printk(KERN_INFO "CPU: L2 cache: %dK\n", l2);
445 if (l3)
446 printk(KERN_INFO "CPU: L3 cache: %dK\n", l3);
448 c->x86_cache_size = l3 ? l3 : (l2 ? l2 : (l1i+l1d));
450 return l2;
453 /* pointer to _cpuid4_info array (for each cache leaf) */
454 static struct _cpuid4_info *cpuid4_info[NR_CPUS];
455 #define CPUID4_INFO_IDX(x,y) (&((cpuid4_info[x])[y]))
457 #ifdef CONFIG_SMP
458 static void __cpuinit cache_shared_cpu_map_setup(unsigned int cpu, int index)
460 struct _cpuid4_info *this_leaf, *sibling_leaf;
461 unsigned long num_threads_sharing;
462 int index_msb, i;
463 struct cpuinfo_x86 *c = &cpu_data(cpu);
465 this_leaf = CPUID4_INFO_IDX(cpu, index);
466 num_threads_sharing = 1 + this_leaf->eax.split.num_threads_sharing;
468 if (num_threads_sharing == 1)
469 cpu_set(cpu, this_leaf->shared_cpu_map);
470 else {
471 index_msb = get_count_order(num_threads_sharing);
473 for_each_online_cpu(i) {
474 if (cpu_data(i).apicid >> index_msb ==
475 c->apicid >> index_msb) {
476 cpu_set(i, this_leaf->shared_cpu_map);
477 if (i != cpu && cpuid4_info[i]) {
478 sibling_leaf = CPUID4_INFO_IDX(i, index);
479 cpu_set(cpu, sibling_leaf->shared_cpu_map);
485 static void __cpuinit cache_remove_shared_cpu_map(unsigned int cpu, int index)
487 struct _cpuid4_info *this_leaf, *sibling_leaf;
488 int sibling;
490 this_leaf = CPUID4_INFO_IDX(cpu, index);
491 for_each_cpu_mask(sibling, this_leaf->shared_cpu_map) {
492 sibling_leaf = CPUID4_INFO_IDX(sibling, index);
493 cpu_clear(cpu, sibling_leaf->shared_cpu_map);
496 #else
497 static void __cpuinit cache_shared_cpu_map_setup(unsigned int cpu, int index) {}
498 static void __cpuinit cache_remove_shared_cpu_map(unsigned int cpu, int index) {}
499 #endif
501 static void __cpuinit free_cache_attributes(unsigned int cpu)
503 int i;
505 for (i = 0; i < num_cache_leaves; i++)
506 cache_remove_shared_cpu_map(cpu, i);
508 kfree(cpuid4_info[cpu]);
509 cpuid4_info[cpu] = NULL;
512 static int __cpuinit detect_cache_attributes(unsigned int cpu)
514 struct _cpuid4_info *this_leaf;
515 unsigned long j;
516 int retval;
517 cpumask_t oldmask;
519 if (num_cache_leaves == 0)
520 return -ENOENT;
522 cpuid4_info[cpu] = kzalloc(
523 sizeof(struct _cpuid4_info) * num_cache_leaves, GFP_KERNEL);
524 if (cpuid4_info[cpu] == NULL)
525 return -ENOMEM;
527 oldmask = current->cpus_allowed;
528 retval = set_cpus_allowed(current, cpumask_of_cpu(cpu));
529 if (retval)
530 goto out;
532 /* Do cpuid and store the results */
533 for (j = 0; j < num_cache_leaves; j++) {
534 this_leaf = CPUID4_INFO_IDX(cpu, j);
535 retval = cpuid4_cache_lookup(j, this_leaf);
536 if (unlikely(retval < 0)) {
537 int i;
539 for (i = 0; i < j; i++)
540 cache_remove_shared_cpu_map(cpu, i);
541 break;
543 cache_shared_cpu_map_setup(cpu, j);
545 set_cpus_allowed(current, oldmask);
547 out:
548 if (retval) {
549 kfree(cpuid4_info[cpu]);
550 cpuid4_info[cpu] = NULL;
553 return retval;
556 #ifdef CONFIG_SYSFS
558 #include <linux/kobject.h>
559 #include <linux/sysfs.h>
561 extern struct sysdev_class cpu_sysdev_class; /* from drivers/base/cpu.c */
563 /* pointer to kobject for cpuX/cache */
564 static struct kobject * cache_kobject[NR_CPUS];
566 struct _index_kobject {
567 struct kobject kobj;
568 unsigned int cpu;
569 unsigned short index;
572 /* pointer to array of kobjects for cpuX/cache/indexY */
573 static struct _index_kobject *index_kobject[NR_CPUS];
574 #define INDEX_KOBJECT_PTR(x,y) (&((index_kobject[x])[y]))
576 #define show_one_plus(file_name, object, val) \
577 static ssize_t show_##file_name \
578 (struct _cpuid4_info *this_leaf, char *buf) \
580 return sprintf (buf, "%lu\n", (unsigned long)this_leaf->object + val); \
583 show_one_plus(level, eax.split.level, 0);
584 show_one_plus(coherency_line_size, ebx.split.coherency_line_size, 1);
585 show_one_plus(physical_line_partition, ebx.split.physical_line_partition, 1);
586 show_one_plus(ways_of_associativity, ebx.split.ways_of_associativity, 1);
587 show_one_plus(number_of_sets, ecx.split.number_of_sets, 1);
589 static ssize_t show_size(struct _cpuid4_info *this_leaf, char *buf)
591 return sprintf (buf, "%luK\n", this_leaf->size / 1024);
594 static ssize_t show_shared_cpu_map(struct _cpuid4_info *this_leaf, char *buf)
596 char mask_str[NR_CPUS];
597 cpumask_scnprintf(mask_str, NR_CPUS, this_leaf->shared_cpu_map);
598 return sprintf(buf, "%s\n", mask_str);
601 static ssize_t show_type(struct _cpuid4_info *this_leaf, char *buf) {
602 switch(this_leaf->eax.split.type) {
603 case CACHE_TYPE_DATA:
604 return sprintf(buf, "Data\n");
605 break;
606 case CACHE_TYPE_INST:
607 return sprintf(buf, "Instruction\n");
608 break;
609 case CACHE_TYPE_UNIFIED:
610 return sprintf(buf, "Unified\n");
611 break;
612 default:
613 return sprintf(buf, "Unknown\n");
614 break;
618 struct _cache_attr {
619 struct attribute attr;
620 ssize_t (*show)(struct _cpuid4_info *, char *);
621 ssize_t (*store)(struct _cpuid4_info *, const char *, size_t count);
624 #define define_one_ro(_name) \
625 static struct _cache_attr _name = \
626 __ATTR(_name, 0444, show_##_name, NULL)
628 define_one_ro(level);
629 define_one_ro(type);
630 define_one_ro(coherency_line_size);
631 define_one_ro(physical_line_partition);
632 define_one_ro(ways_of_associativity);
633 define_one_ro(number_of_sets);
634 define_one_ro(size);
635 define_one_ro(shared_cpu_map);
637 static struct attribute * default_attrs[] = {
638 &type.attr,
639 &level.attr,
640 &coherency_line_size.attr,
641 &physical_line_partition.attr,
642 &ways_of_associativity.attr,
643 &number_of_sets.attr,
644 &size.attr,
645 &shared_cpu_map.attr,
646 NULL
649 #define to_object(k) container_of(k, struct _index_kobject, kobj)
650 #define to_attr(a) container_of(a, struct _cache_attr, attr)
652 static ssize_t show(struct kobject * kobj, struct attribute * attr, char * buf)
654 struct _cache_attr *fattr = to_attr(attr);
655 struct _index_kobject *this_leaf = to_object(kobj);
656 ssize_t ret;
658 ret = fattr->show ?
659 fattr->show(CPUID4_INFO_IDX(this_leaf->cpu, this_leaf->index),
660 buf) :
662 return ret;
665 static ssize_t store(struct kobject * kobj, struct attribute * attr,
666 const char * buf, size_t count)
668 return 0;
671 static struct sysfs_ops sysfs_ops = {
672 .show = show,
673 .store = store,
676 static struct kobj_type ktype_cache = {
677 .sysfs_ops = &sysfs_ops,
678 .default_attrs = default_attrs,
681 static struct kobj_type ktype_percpu_entry = {
682 .sysfs_ops = &sysfs_ops,
685 static void __cpuinit cpuid4_cache_sysfs_exit(unsigned int cpu)
687 kfree(cache_kobject[cpu]);
688 kfree(index_kobject[cpu]);
689 cache_kobject[cpu] = NULL;
690 index_kobject[cpu] = NULL;
691 free_cache_attributes(cpu);
694 static int __cpuinit cpuid4_cache_sysfs_init(unsigned int cpu)
696 int err;
698 if (num_cache_leaves == 0)
699 return -ENOENT;
701 err = detect_cache_attributes(cpu);
702 if (err)
703 return err;
705 /* Allocate all required memory */
706 cache_kobject[cpu] = kzalloc(sizeof(struct kobject), GFP_KERNEL);
707 if (unlikely(cache_kobject[cpu] == NULL))
708 goto err_out;
710 index_kobject[cpu] = kzalloc(
711 sizeof(struct _index_kobject ) * num_cache_leaves, GFP_KERNEL);
712 if (unlikely(index_kobject[cpu] == NULL))
713 goto err_out;
715 return 0;
717 err_out:
718 cpuid4_cache_sysfs_exit(cpu);
719 return -ENOMEM;
722 static cpumask_t cache_dev_map = CPU_MASK_NONE;
724 /* Add/Remove cache interface for CPU device */
725 static int __cpuinit cache_add_dev(struct sys_device * sys_dev)
727 unsigned int cpu = sys_dev->id;
728 unsigned long i, j;
729 struct _index_kobject *this_object;
730 int retval;
732 retval = cpuid4_cache_sysfs_init(cpu);
733 if (unlikely(retval < 0))
734 return retval;
736 retval = kobject_init_and_add(cache_kobject[cpu], &ktype_percpu_entry,
737 &sys_dev->kobj, "%s", "cache");
738 if (retval < 0) {
739 cpuid4_cache_sysfs_exit(cpu);
740 return retval;
743 for (i = 0; i < num_cache_leaves; i++) {
744 this_object = INDEX_KOBJECT_PTR(cpu,i);
745 this_object->cpu = cpu;
746 this_object->index = i;
747 retval = kobject_init_and_add(&(this_object->kobj),
748 &ktype_cache, cache_kobject[cpu],
749 "index%1lu", i);
750 if (unlikely(retval)) {
751 for (j = 0; j < i; j++) {
752 kobject_put(&(INDEX_KOBJECT_PTR(cpu,j)->kobj));
754 kobject_put(cache_kobject[cpu]);
755 cpuid4_cache_sysfs_exit(cpu);
756 break;
758 kobject_uevent(&(this_object->kobj), KOBJ_ADD);
760 if (!retval)
761 cpu_set(cpu, cache_dev_map);
763 kobject_uevent(cache_kobject[cpu], KOBJ_ADD);
764 return retval;
767 static void __cpuinit cache_remove_dev(struct sys_device * sys_dev)
769 unsigned int cpu = sys_dev->id;
770 unsigned long i;
772 if (cpuid4_info[cpu] == NULL)
773 return;
774 if (!cpu_isset(cpu, cache_dev_map))
775 return;
776 cpu_clear(cpu, cache_dev_map);
778 for (i = 0; i < num_cache_leaves; i++)
779 kobject_put(&(INDEX_KOBJECT_PTR(cpu,i)->kobj));
780 kobject_put(cache_kobject[cpu]);
781 cpuid4_cache_sysfs_exit(cpu);
784 static int __cpuinit cacheinfo_cpu_callback(struct notifier_block *nfb,
785 unsigned long action, void *hcpu)
787 unsigned int cpu = (unsigned long)hcpu;
788 struct sys_device *sys_dev;
790 sys_dev = get_cpu_sysdev(cpu);
791 switch (action) {
792 case CPU_ONLINE:
793 case CPU_ONLINE_FROZEN:
794 cache_add_dev(sys_dev);
795 break;
796 case CPU_DEAD:
797 case CPU_DEAD_FROZEN:
798 cache_remove_dev(sys_dev);
799 break;
801 return NOTIFY_OK;
804 static struct notifier_block __cpuinitdata cacheinfo_cpu_notifier =
806 .notifier_call = cacheinfo_cpu_callback,
809 static int __cpuinit cache_sysfs_init(void)
811 int i;
813 if (num_cache_leaves == 0)
814 return 0;
816 for_each_online_cpu(i) {
817 int err;
818 struct sys_device *sys_dev = get_cpu_sysdev(i);
820 err = cache_add_dev(sys_dev);
821 if (err)
822 return err;
824 register_hotcpu_notifier(&cacheinfo_cpu_notifier);
825 return 0;
828 device_initcall(cache_sysfs_init);
830 #endif