2 * Routines to indentify caches on Intel CPU.
5 * Venkatesh Pallipadi : Adding cache identification through cpuid(4)
6 * Ashok Raj <ashok.raj@intel.com>: Work with CPU hotplug infrastructure.
7 * Andi Kleen / Andreas Herrmann : CPUID4 emulation on AMD.
10 #include <linux/init.h>
11 #include <linux/slab.h>
12 #include <linux/device.h>
13 #include <linux/compiler.h>
14 #include <linux/cpu.h>
15 #include <linux/sched.h>
17 #include <asm/processor.h>
28 unsigned char descriptor
;
33 /* all the cache descriptor types we care about (no TLB or trace cache entries) */
34 static struct _cache_table cache_table
[] __cpuinitdata
=
36 { 0x06, LVL_1_INST
, 8 }, /* 4-way set assoc, 32 byte line size */
37 { 0x08, LVL_1_INST
, 16 }, /* 4-way set assoc, 32 byte line size */
38 { 0x0a, LVL_1_DATA
, 8 }, /* 2 way set assoc, 32 byte line size */
39 { 0x0c, LVL_1_DATA
, 16 }, /* 4-way set assoc, 32 byte line size */
40 { 0x22, LVL_3
, 512 }, /* 4-way set assoc, sectored cache, 64 byte line size */
41 { 0x23, LVL_3
, 1024 }, /* 8-way set assoc, sectored cache, 64 byte line size */
42 { 0x25, LVL_3
, 2048 }, /* 8-way set assoc, sectored cache, 64 byte line size */
43 { 0x29, LVL_3
, 4096 }, /* 8-way set assoc, sectored cache, 64 byte line size */
44 { 0x2c, LVL_1_DATA
, 32 }, /* 8-way set assoc, 64 byte line size */
45 { 0x30, LVL_1_INST
, 32 }, /* 8-way set assoc, 64 byte line size */
46 { 0x39, LVL_2
, 128 }, /* 4-way set assoc, sectored cache, 64 byte line size */
47 { 0x3a, LVL_2
, 192 }, /* 6-way set assoc, sectored cache, 64 byte line size */
48 { 0x3b, LVL_2
, 128 }, /* 2-way set assoc, sectored cache, 64 byte line size */
49 { 0x3c, LVL_2
, 256 }, /* 4-way set assoc, sectored cache, 64 byte line size */
50 { 0x3d, LVL_2
, 384 }, /* 6-way set assoc, sectored cache, 64 byte line size */
51 { 0x3e, LVL_2
, 512 }, /* 4-way set assoc, sectored cache, 64 byte line size */
52 { 0x41, LVL_2
, 128 }, /* 4-way set assoc, 32 byte line size */
53 { 0x42, LVL_2
, 256 }, /* 4-way set assoc, 32 byte line size */
54 { 0x43, LVL_2
, 512 }, /* 4-way set assoc, 32 byte line size */
55 { 0x44, LVL_2
, 1024 }, /* 4-way set assoc, 32 byte line size */
56 { 0x45, LVL_2
, 2048 }, /* 4-way set assoc, 32 byte line size */
57 { 0x46, LVL_3
, 4096 }, /* 4-way set assoc, 64 byte line size */
58 { 0x47, LVL_3
, 8192 }, /* 8-way set assoc, 64 byte line size */
59 { 0x49, LVL_3
, 4096 }, /* 16-way set assoc, 64 byte line size */
60 { 0x4a, LVL_3
, 6144 }, /* 12-way set assoc, 64 byte line size */
61 { 0x4b, LVL_3
, 8192 }, /* 16-way set assoc, 64 byte line size */
62 { 0x4c, LVL_3
, 12288 }, /* 12-way set assoc, 64 byte line size */
63 { 0x4d, LVL_3
, 16384 }, /* 16-way set assoc, 64 byte line size */
64 { 0x60, LVL_1_DATA
, 16 }, /* 8-way set assoc, sectored cache, 64 byte line size */
65 { 0x66, LVL_1_DATA
, 8 }, /* 4-way set assoc, sectored cache, 64 byte line size */
66 { 0x67, LVL_1_DATA
, 16 }, /* 4-way set assoc, sectored cache, 64 byte line size */
67 { 0x68, LVL_1_DATA
, 32 }, /* 4-way set assoc, sectored cache, 64 byte line size */
68 { 0x70, LVL_TRACE
, 12 }, /* 8-way set assoc */
69 { 0x71, LVL_TRACE
, 16 }, /* 8-way set assoc */
70 { 0x72, LVL_TRACE
, 32 }, /* 8-way set assoc */
71 { 0x73, LVL_TRACE
, 64 }, /* 8-way set assoc */
72 { 0x78, LVL_2
, 1024 }, /* 4-way set assoc, 64 byte line size */
73 { 0x79, LVL_2
, 128 }, /* 8-way set assoc, sectored cache, 64 byte line size */
74 { 0x7a, LVL_2
, 256 }, /* 8-way set assoc, sectored cache, 64 byte line size */
75 { 0x7b, LVL_2
, 512 }, /* 8-way set assoc, sectored cache, 64 byte line size */
76 { 0x7c, LVL_2
, 1024 }, /* 8-way set assoc, sectored cache, 64 byte line size */
77 { 0x7d, LVL_2
, 2048 }, /* 8-way set assoc, 64 byte line size */
78 { 0x7f, LVL_2
, 512 }, /* 2-way set assoc, 64 byte line size */
79 { 0x82, LVL_2
, 256 }, /* 8-way set assoc, 32 byte line size */
80 { 0x83, LVL_2
, 512 }, /* 8-way set assoc, 32 byte line size */
81 { 0x84, LVL_2
, 1024 }, /* 8-way set assoc, 32 byte line size */
82 { 0x85, LVL_2
, 2048 }, /* 8-way set assoc, 32 byte line size */
83 { 0x86, LVL_2
, 512 }, /* 4-way set assoc, 64 byte line size */
84 { 0x87, LVL_2
, 1024 }, /* 8-way set assoc, 64 byte line size */
94 CACHE_TYPE_UNIFIED
= 3
97 union _cpuid4_leaf_eax
{
99 enum _cache_type type
:5;
100 unsigned int level
:3;
101 unsigned int is_self_initializing
:1;
102 unsigned int is_fully_associative
:1;
103 unsigned int reserved
:4;
104 unsigned int num_threads_sharing
:12;
105 unsigned int num_cores_on_die
:6;
110 union _cpuid4_leaf_ebx
{
112 unsigned int coherency_line_size
:12;
113 unsigned int physical_line_partition
:10;
114 unsigned int ways_of_associativity
:10;
119 union _cpuid4_leaf_ecx
{
121 unsigned int number_of_sets
:32;
126 struct _cpuid4_info
{
127 union _cpuid4_leaf_eax eax
;
128 union _cpuid4_leaf_ebx ebx
;
129 union _cpuid4_leaf_ecx ecx
;
131 cpumask_t shared_cpu_map
;
134 unsigned short num_cache_leaves
;
136 /* AMD doesn't have CPUID4. Emulate it here to report the same
137 information to the user. This makes some assumptions about the machine:
138 L2 not shared, no SMT etc. that is currently true on AMD CPUs.
140 In theory the TLBs could be reported as fake type (they are in "dummy").
144 unsigned line_size
: 8;
145 unsigned lines_per_tag
: 8;
147 unsigned size_in_kb
: 8;
154 unsigned line_size
: 8;
155 unsigned lines_per_tag
: 4;
157 unsigned size_in_kb
: 16;
164 unsigned line_size
: 8;
165 unsigned lines_per_tag
: 4;
168 unsigned size_encoded
: 14;
173 static const unsigned short assocs
[] = {
174 [1] = 1, [2] = 2, [4] = 4, [6] = 8,
175 [8] = 16, [0xa] = 32, [0xb] = 48,
180 static const unsigned char levels
[] = { 1, 1, 2, 3 };
181 static const unsigned char types
[] = { 1, 2, 3, 3 };
183 static void __cpuinit
amd_cpuid4(int leaf
, union _cpuid4_leaf_eax
*eax
,
184 union _cpuid4_leaf_ebx
*ebx
,
185 union _cpuid4_leaf_ecx
*ecx
)
188 unsigned line_size
, lines_per_tag
, assoc
, size_in_kb
;
189 union l1_cache l1i
, l1d
;
192 union l1_cache
*l1
= &l1d
;
198 cpuid(0x80000005, &dummy
, &dummy
, &l1d
.val
, &l1i
.val
);
199 cpuid(0x80000006, &dummy
, &dummy
, &l2
.val
, &l3
.val
);
208 line_size
= l1
->line_size
;
209 lines_per_tag
= l1
->lines_per_tag
;
210 size_in_kb
= l1
->size_in_kb
;
216 line_size
= l2
.line_size
;
217 lines_per_tag
= l2
.lines_per_tag
;
218 /* cpu_data has errata corrections for K7 applied */
219 size_in_kb
= current_cpu_data
.x86_cache_size
;
225 line_size
= l3
.line_size
;
226 lines_per_tag
= l3
.lines_per_tag
;
227 size_in_kb
= l3
.size_encoded
* 512;
233 eax
->split
.is_self_initializing
= 1;
234 eax
->split
.type
= types
[leaf
];
235 eax
->split
.level
= levels
[leaf
];
237 eax
->split
.num_threads_sharing
= current_cpu_data
.x86_max_cores
- 1;
239 eax
->split
.num_threads_sharing
= 0;
240 eax
->split
.num_cores_on_die
= current_cpu_data
.x86_max_cores
- 1;
244 eax
->split
.is_fully_associative
= 1;
245 ebx
->split
.coherency_line_size
= line_size
- 1;
246 ebx
->split
.ways_of_associativity
= assocs
[assoc
] - 1;
247 ebx
->split
.physical_line_partition
= lines_per_tag
- 1;
248 ecx
->split
.number_of_sets
= (size_in_kb
* 1024) / line_size
/
249 (ebx
->split
.ways_of_associativity
+ 1) - 1;
252 static int __cpuinit
cpuid4_cache_lookup(int index
, struct _cpuid4_info
*this_leaf
)
254 union _cpuid4_leaf_eax eax
;
255 union _cpuid4_leaf_ebx ebx
;
256 union _cpuid4_leaf_ecx ecx
;
259 if (boot_cpu_data
.x86_vendor
== X86_VENDOR_AMD
)
260 amd_cpuid4(index
, &eax
, &ebx
, &ecx
);
262 cpuid_count(4, index
, &eax
.full
, &ebx
.full
, &ecx
.full
, &edx
);
263 if (eax
.split
.type
== CACHE_TYPE_NULL
)
264 return -EIO
; /* better error ? */
266 this_leaf
->eax
= eax
;
267 this_leaf
->ebx
= ebx
;
268 this_leaf
->ecx
= ecx
;
269 this_leaf
->size
= (ecx
.split
.number_of_sets
+ 1) *
270 (ebx
.split
.coherency_line_size
+ 1) *
271 (ebx
.split
.physical_line_partition
+ 1) *
272 (ebx
.split
.ways_of_associativity
+ 1);
276 static int __cpuinit
find_num_cache_leaves(void)
278 unsigned int eax
, ebx
, ecx
, edx
;
279 union _cpuid4_leaf_eax cache_eax
;
284 /* Do cpuid(4) loop to find out num_cache_leaves */
285 cpuid_count(4, i
, &eax
, &ebx
, &ecx
, &edx
);
286 cache_eax
.full
= eax
;
287 } while (cache_eax
.split
.type
!= CACHE_TYPE_NULL
);
291 unsigned int __cpuinit
init_intel_cacheinfo(struct cpuinfo_x86
*c
)
293 unsigned int trace
= 0, l1i
= 0, l1d
= 0, l2
= 0, l3
= 0; /* Cache sizes */
294 unsigned int new_l1d
= 0, new_l1i
= 0; /* Cache sizes from cpuid(4) */
295 unsigned int new_l2
= 0, new_l3
= 0, i
; /* Cache sizes from cpuid(4) */
296 unsigned int l2_id
= 0, l3_id
= 0, num_threads_sharing
, index_msb
;
298 unsigned int cpu
= (c
== &boot_cpu_data
) ? 0 : (c
- cpu_data
);
301 if (c
->cpuid_level
> 3) {
302 static int is_initialized
;
304 if (is_initialized
== 0) {
305 /* Init num_cache_leaves from boot CPU */
306 num_cache_leaves
= find_num_cache_leaves();
311 * Whenever possible use cpuid(4), deterministic cache
312 * parameters cpuid leaf to find the cache details
314 for (i
= 0; i
< num_cache_leaves
; i
++) {
315 struct _cpuid4_info this_leaf
;
319 retval
= cpuid4_cache_lookup(i
, &this_leaf
);
321 switch(this_leaf
.eax
.split
.level
) {
323 if (this_leaf
.eax
.split
.type
==
325 new_l1d
= this_leaf
.size
/1024;
326 else if (this_leaf
.eax
.split
.type
==
328 new_l1i
= this_leaf
.size
/1024;
331 new_l2
= this_leaf
.size
/1024;
332 num_threads_sharing
= 1 + this_leaf
.eax
.split
.num_threads_sharing
;
333 index_msb
= get_count_order(num_threads_sharing
);
334 l2_id
= c
->apicid
>> index_msb
;
337 new_l3
= this_leaf
.size
/1024;
338 num_threads_sharing
= 1 + this_leaf
.eax
.split
.num_threads_sharing
;
339 index_msb
= get_count_order(num_threads_sharing
);
340 l3_id
= c
->apicid
>> index_msb
;
349 * Don't use cpuid2 if cpuid4 is supported. For P4, we use cpuid2 for
352 if ((num_cache_leaves
== 0 || c
->x86
== 15) && c
->cpuid_level
> 1) {
353 /* supports eax=2 call */
356 unsigned char *dp
= (unsigned char *)regs
;
359 if (num_cache_leaves
!= 0 && c
->x86
== 15)
362 /* Number of times to iterate */
363 n
= cpuid_eax(2) & 0xFF;
365 for ( i
= 0 ; i
< n
; i
++ ) {
366 cpuid(2, ®s
[0], ®s
[1], ®s
[2], ®s
[3]);
368 /* If bit 31 is set, this is an unknown format */
369 for ( j
= 0 ; j
< 3 ; j
++ ) {
370 if ( regs
[j
] < 0 ) regs
[j
] = 0;
373 /* Byte 0 is level count, not a descriptor */
374 for ( j
= 1 ; j
< 16 ; j
++ ) {
375 unsigned char des
= dp
[j
];
378 /* look up this descriptor in the table */
379 while (cache_table
[k
].descriptor
!= 0)
381 if (cache_table
[k
].descriptor
== des
) {
382 if (only_trace
&& cache_table
[k
].cache_type
!= LVL_TRACE
)
384 switch (cache_table
[k
].cache_type
) {
386 l1i
+= cache_table
[k
].size
;
389 l1d
+= cache_table
[k
].size
;
392 l2
+= cache_table
[k
].size
;
395 l3
+= cache_table
[k
].size
;
398 trace
+= cache_table
[k
].size
;
420 cpu_llc_id
[cpu
] = l2_id
;
427 cpu_llc_id
[cpu
] = l3_id
;
432 printk (KERN_INFO
"CPU: Trace cache: %dK uops", trace
);
434 printk (KERN_INFO
"CPU: L1 I cache: %dK", l1i
);
437 printk(", L1 D cache: %dK\n", l1d
);
442 printk(KERN_INFO
"CPU: L2 cache: %dK\n", l2
);
445 printk(KERN_INFO
"CPU: L3 cache: %dK\n", l3
);
447 c
->x86_cache_size
= l3
? l3
: (l2
? l2
: (l1i
+l1d
));
452 /* pointer to _cpuid4_info array (for each cache leaf) */
453 static struct _cpuid4_info
*cpuid4_info
[NR_CPUS
];
454 #define CPUID4_INFO_IDX(x,y) (&((cpuid4_info[x])[y]))
457 static void __cpuinit
cache_shared_cpu_map_setup(unsigned int cpu
, int index
)
459 struct _cpuid4_info
*this_leaf
, *sibling_leaf
;
460 unsigned long num_threads_sharing
;
462 struct cpuinfo_x86
*c
= cpu_data
;
464 this_leaf
= CPUID4_INFO_IDX(cpu
, index
);
465 num_threads_sharing
= 1 + this_leaf
->eax
.split
.num_threads_sharing
;
467 if (num_threads_sharing
== 1)
468 cpu_set(cpu
, this_leaf
->shared_cpu_map
);
470 index_msb
= get_count_order(num_threads_sharing
);
472 for_each_online_cpu(i
) {
473 if (c
[i
].apicid
>> index_msb
==
474 c
[cpu
].apicid
>> index_msb
) {
475 cpu_set(i
, this_leaf
->shared_cpu_map
);
476 if (i
!= cpu
&& cpuid4_info
[i
]) {
477 sibling_leaf
= CPUID4_INFO_IDX(i
, index
);
478 cpu_set(cpu
, sibling_leaf
->shared_cpu_map
);
484 static void __cpuinit
cache_remove_shared_cpu_map(unsigned int cpu
, int index
)
486 struct _cpuid4_info
*this_leaf
, *sibling_leaf
;
489 this_leaf
= CPUID4_INFO_IDX(cpu
, index
);
490 for_each_cpu_mask(sibling
, this_leaf
->shared_cpu_map
) {
491 sibling_leaf
= CPUID4_INFO_IDX(sibling
, index
);
492 cpu_clear(cpu
, sibling_leaf
->shared_cpu_map
);
496 static void __init
cache_shared_cpu_map_setup(unsigned int cpu
, int index
) {}
497 static void __init
cache_remove_shared_cpu_map(unsigned int cpu
, int index
) {}
500 static void free_cache_attributes(unsigned int cpu
)
502 kfree(cpuid4_info
[cpu
]);
503 cpuid4_info
[cpu
] = NULL
;
506 static int __cpuinit
detect_cache_attributes(unsigned int cpu
)
508 struct _cpuid4_info
*this_leaf
;
513 if (num_cache_leaves
== 0)
516 cpuid4_info
[cpu
] = kzalloc(
517 sizeof(struct _cpuid4_info
) * num_cache_leaves
, GFP_KERNEL
);
518 if (cpuid4_info
[cpu
] == NULL
)
521 oldmask
= current
->cpus_allowed
;
522 retval
= set_cpus_allowed(current
, cpumask_of_cpu(cpu
));
526 /* Do cpuid and store the results */
528 for (j
= 0; j
< num_cache_leaves
; j
++) {
529 this_leaf
= CPUID4_INFO_IDX(cpu
, j
);
530 retval
= cpuid4_cache_lookup(j
, this_leaf
);
531 if (unlikely(retval
< 0))
533 cache_shared_cpu_map_setup(cpu
, j
);
535 set_cpus_allowed(current
, oldmask
);
539 free_cache_attributes(cpu
);
545 #include <linux/kobject.h>
546 #include <linux/sysfs.h>
548 extern struct sysdev_class cpu_sysdev_class
; /* from drivers/base/cpu.c */
550 /* pointer to kobject for cpuX/cache */
551 static struct kobject
* cache_kobject
[NR_CPUS
];
553 struct _index_kobject
{
556 unsigned short index
;
559 /* pointer to array of kobjects for cpuX/cache/indexY */
560 static struct _index_kobject
*index_kobject
[NR_CPUS
];
561 #define INDEX_KOBJECT_PTR(x,y) (&((index_kobject[x])[y]))
563 #define show_one_plus(file_name, object, val) \
564 static ssize_t show_##file_name \
565 (struct _cpuid4_info *this_leaf, char *buf) \
567 return sprintf (buf, "%lu\n", (unsigned long)this_leaf->object + val); \
570 show_one_plus(level
, eax
.split
.level
, 0);
571 show_one_plus(coherency_line_size
, ebx
.split
.coherency_line_size
, 1);
572 show_one_plus(physical_line_partition
, ebx
.split
.physical_line_partition
, 1);
573 show_one_plus(ways_of_associativity
, ebx
.split
.ways_of_associativity
, 1);
574 show_one_plus(number_of_sets
, ecx
.split
.number_of_sets
, 1);
576 static ssize_t
show_size(struct _cpuid4_info
*this_leaf
, char *buf
)
578 return sprintf (buf
, "%luK\n", this_leaf
->size
/ 1024);
581 static ssize_t
show_shared_cpu_map(struct _cpuid4_info
*this_leaf
, char *buf
)
583 char mask_str
[NR_CPUS
];
584 cpumask_scnprintf(mask_str
, NR_CPUS
, this_leaf
->shared_cpu_map
);
585 return sprintf(buf
, "%s\n", mask_str
);
588 static ssize_t
show_type(struct _cpuid4_info
*this_leaf
, char *buf
) {
589 switch(this_leaf
->eax
.split
.type
) {
590 case CACHE_TYPE_DATA
:
591 return sprintf(buf
, "Data\n");
593 case CACHE_TYPE_INST
:
594 return sprintf(buf
, "Instruction\n");
596 case CACHE_TYPE_UNIFIED
:
597 return sprintf(buf
, "Unified\n");
600 return sprintf(buf
, "Unknown\n");
606 struct attribute attr
;
607 ssize_t (*show
)(struct _cpuid4_info
*, char *);
608 ssize_t (*store
)(struct _cpuid4_info
*, const char *, size_t count
);
611 #define define_one_ro(_name) \
612 static struct _cache_attr _name = \
613 __ATTR(_name, 0444, show_##_name, NULL)
615 define_one_ro(level
);
617 define_one_ro(coherency_line_size
);
618 define_one_ro(physical_line_partition
);
619 define_one_ro(ways_of_associativity
);
620 define_one_ro(number_of_sets
);
622 define_one_ro(shared_cpu_map
);
624 static struct attribute
* default_attrs
[] = {
627 &coherency_line_size
.attr
,
628 &physical_line_partition
.attr
,
629 &ways_of_associativity
.attr
,
630 &number_of_sets
.attr
,
632 &shared_cpu_map
.attr
,
636 #define to_object(k) container_of(k, struct _index_kobject, kobj)
637 #define to_attr(a) container_of(a, struct _cache_attr, attr)
639 static ssize_t
show(struct kobject
* kobj
, struct attribute
* attr
, char * buf
)
641 struct _cache_attr
*fattr
= to_attr(attr
);
642 struct _index_kobject
*this_leaf
= to_object(kobj
);
646 fattr
->show(CPUID4_INFO_IDX(this_leaf
->cpu
, this_leaf
->index
),
652 static ssize_t
store(struct kobject
* kobj
, struct attribute
* attr
,
653 const char * buf
, size_t count
)
658 static struct sysfs_ops sysfs_ops
= {
663 static struct kobj_type ktype_cache
= {
664 .sysfs_ops
= &sysfs_ops
,
665 .default_attrs
= default_attrs
,
668 static struct kobj_type ktype_percpu_entry
= {
669 .sysfs_ops
= &sysfs_ops
,
672 static void cpuid4_cache_sysfs_exit(unsigned int cpu
)
674 kfree(cache_kobject
[cpu
]);
675 kfree(index_kobject
[cpu
]);
676 cache_kobject
[cpu
] = NULL
;
677 index_kobject
[cpu
] = NULL
;
678 free_cache_attributes(cpu
);
681 static int __cpuinit
cpuid4_cache_sysfs_init(unsigned int cpu
)
684 if (num_cache_leaves
== 0)
687 detect_cache_attributes(cpu
);
688 if (cpuid4_info
[cpu
] == NULL
)
691 /* Allocate all required memory */
692 cache_kobject
[cpu
] = kzalloc(sizeof(struct kobject
), GFP_KERNEL
);
693 if (unlikely(cache_kobject
[cpu
] == NULL
))
696 index_kobject
[cpu
] = kzalloc(
697 sizeof(struct _index_kobject
) * num_cache_leaves
, GFP_KERNEL
);
698 if (unlikely(index_kobject
[cpu
] == NULL
))
704 cpuid4_cache_sysfs_exit(cpu
);
708 /* Add/Remove cache interface for CPU device */
709 static int __cpuinit
cache_add_dev(struct sys_device
* sys_dev
)
711 unsigned int cpu
= sys_dev
->id
;
713 struct _index_kobject
*this_object
;
716 retval
= cpuid4_cache_sysfs_init(cpu
);
717 if (unlikely(retval
< 0))
720 cache_kobject
[cpu
]->parent
= &sys_dev
->kobj
;
721 kobject_set_name(cache_kobject
[cpu
], "%s", "cache");
722 cache_kobject
[cpu
]->ktype
= &ktype_percpu_entry
;
723 retval
= kobject_register(cache_kobject
[cpu
]);
725 for (i
= 0; i
< num_cache_leaves
; i
++) {
726 this_object
= INDEX_KOBJECT_PTR(cpu
,i
);
727 this_object
->cpu
= cpu
;
728 this_object
->index
= i
;
729 this_object
->kobj
.parent
= cache_kobject
[cpu
];
730 kobject_set_name(&(this_object
->kobj
), "index%1lu", i
);
731 this_object
->kobj
.ktype
= &ktype_cache
;
732 retval
= kobject_register(&(this_object
->kobj
));
733 if (unlikely(retval
)) {
734 for (j
= 0; j
< i
; j
++) {
736 &(INDEX_KOBJECT_PTR(cpu
,j
)->kobj
));
738 kobject_unregister(cache_kobject
[cpu
]);
739 cpuid4_cache_sysfs_exit(cpu
);
746 static void __cpuinit
cache_remove_dev(struct sys_device
* sys_dev
)
748 unsigned int cpu
= sys_dev
->id
;
751 if (cpuid4_info
[cpu
] == NULL
)
753 for (i
= 0; i
< num_cache_leaves
; i
++) {
754 cache_remove_shared_cpu_map(cpu
, i
);
755 kobject_unregister(&(INDEX_KOBJECT_PTR(cpu
,i
)->kobj
));
757 kobject_unregister(cache_kobject
[cpu
]);
758 cpuid4_cache_sysfs_exit(cpu
);
762 static int __cpuinit
cacheinfo_cpu_callback(struct notifier_block
*nfb
,
763 unsigned long action
, void *hcpu
)
765 unsigned int cpu
= (unsigned long)hcpu
;
766 struct sys_device
*sys_dev
;
768 sys_dev
= get_cpu_sysdev(cpu
);
771 case CPU_ONLINE_FROZEN
:
772 cache_add_dev(sys_dev
);
775 case CPU_DEAD_FROZEN
:
776 cache_remove_dev(sys_dev
);
782 static struct notifier_block __cpuinitdata cacheinfo_cpu_notifier
=
784 .notifier_call
= cacheinfo_cpu_callback
,
787 static int __cpuinit
cache_sysfs_init(void)
791 if (num_cache_leaves
== 0)
794 register_hotcpu_notifier(&cacheinfo_cpu_notifier
);
796 for_each_online_cpu(i
) {
797 cacheinfo_cpu_callback(&cacheinfo_cpu_notifier
, CPU_ONLINE
,
804 device_initcall(cache_sysfs_init
);