2 * Routines to indentify caches on Intel CPU.
5 * Venkatesh Pallipadi : Adding cache identification through cpuid(4)
6 * Ashok Raj <ashok.raj@intel.com>: Work with CPU hotplug infrastructure.
7 * Andi Kleen / Andreas Herrmann : CPUID4 emulation on AMD.
10 #include <linux/init.h>
11 #include <linux/slab.h>
12 #include <linux/device.h>
13 #include <linux/compiler.h>
14 #include <linux/cpu.h>
15 #include <linux/sched.h>
16 #include <linux/pci.h>
18 #include <asm/processor.h>
19 #include <linux/smp.h>
29 unsigned char descriptor
;
34 /* All the cache descriptor types we care about (no TLB or
35 trace cache entries) */
37 static const struct _cache_table __cpuinitconst cache_table
[] =
39 { 0x06, LVL_1_INST
, 8 }, /* 4-way set assoc, 32 byte line size */
40 { 0x08, LVL_1_INST
, 16 }, /* 4-way set assoc, 32 byte line size */
41 { 0x09, LVL_1_INST
, 32 }, /* 4-way set assoc, 64 byte line size */
42 { 0x0a, LVL_1_DATA
, 8 }, /* 2 way set assoc, 32 byte line size */
43 { 0x0c, LVL_1_DATA
, 16 }, /* 4-way set assoc, 32 byte line size */
44 { 0x0d, LVL_1_DATA
, 16 }, /* 4-way set assoc, 64 byte line size */
45 { 0x21, LVL_2
, 256 }, /* 8-way set assoc, 64 byte line size */
46 { 0x22, LVL_3
, 512 }, /* 4-way set assoc, sectored cache, 64 byte line size */
47 { 0x23, LVL_3
, 1024 }, /* 8-way set assoc, sectored cache, 64 byte line size */
48 { 0x25, LVL_3
, 2048 }, /* 8-way set assoc, sectored cache, 64 byte line size */
49 { 0x29, LVL_3
, 4096 }, /* 8-way set assoc, sectored cache, 64 byte line size */
50 { 0x2c, LVL_1_DATA
, 32 }, /* 8-way set assoc, 64 byte line size */
51 { 0x30, LVL_1_INST
, 32 }, /* 8-way set assoc, 64 byte line size */
52 { 0x39, LVL_2
, 128 }, /* 4-way set assoc, sectored cache, 64 byte line size */
53 { 0x3a, LVL_2
, 192 }, /* 6-way set assoc, sectored cache, 64 byte line size */
54 { 0x3b, LVL_2
, 128 }, /* 2-way set assoc, sectored cache, 64 byte line size */
55 { 0x3c, LVL_2
, 256 }, /* 4-way set assoc, sectored cache, 64 byte line size */
56 { 0x3d, LVL_2
, 384 }, /* 6-way set assoc, sectored cache, 64 byte line size */
57 { 0x3e, LVL_2
, 512 }, /* 4-way set assoc, sectored cache, 64 byte line size */
58 { 0x3f, LVL_2
, 256 }, /* 2-way set assoc, 64 byte line size */
59 { 0x41, LVL_2
, 128 }, /* 4-way set assoc, 32 byte line size */
60 { 0x42, LVL_2
, 256 }, /* 4-way set assoc, 32 byte line size */
61 { 0x43, LVL_2
, 512 }, /* 4-way set assoc, 32 byte line size */
62 { 0x44, LVL_2
, 1024 }, /* 4-way set assoc, 32 byte line size */
63 { 0x45, LVL_2
, 2048 }, /* 4-way set assoc, 32 byte line size */
64 { 0x46, LVL_3
, 4096 }, /* 4-way set assoc, 64 byte line size */
65 { 0x47, LVL_3
, 8192 }, /* 8-way set assoc, 64 byte line size */
66 { 0x49, LVL_3
, 4096 }, /* 16-way set assoc, 64 byte line size */
67 { 0x4a, LVL_3
, 6144 }, /* 12-way set assoc, 64 byte line size */
68 { 0x4b, LVL_3
, 8192 }, /* 16-way set assoc, 64 byte line size */
69 { 0x4c, LVL_3
, 12288 }, /* 12-way set assoc, 64 byte line size */
70 { 0x4d, LVL_3
, 16384 }, /* 16-way set assoc, 64 byte line size */
71 { 0x4e, LVL_2
, 6144 }, /* 24-way set assoc, 64 byte line size */
72 { 0x60, LVL_1_DATA
, 16 }, /* 8-way set assoc, sectored cache, 64 byte line size */
73 { 0x66, LVL_1_DATA
, 8 }, /* 4-way set assoc, sectored cache, 64 byte line size */
74 { 0x67, LVL_1_DATA
, 16 }, /* 4-way set assoc, sectored cache, 64 byte line size */
75 { 0x68, LVL_1_DATA
, 32 }, /* 4-way set assoc, sectored cache, 64 byte line size */
76 { 0x70, LVL_TRACE
, 12 }, /* 8-way set assoc */
77 { 0x71, LVL_TRACE
, 16 }, /* 8-way set assoc */
78 { 0x72, LVL_TRACE
, 32 }, /* 8-way set assoc */
79 { 0x73, LVL_TRACE
, 64 }, /* 8-way set assoc */
80 { 0x78, LVL_2
, 1024 }, /* 4-way set assoc, 64 byte line size */
81 { 0x79, LVL_2
, 128 }, /* 8-way set assoc, sectored cache, 64 byte line size */
82 { 0x7a, LVL_2
, 256 }, /* 8-way set assoc, sectored cache, 64 byte line size */
83 { 0x7b, LVL_2
, 512 }, /* 8-way set assoc, sectored cache, 64 byte line size */
84 { 0x7c, LVL_2
, 1024 }, /* 8-way set assoc, sectored cache, 64 byte line size */
85 { 0x7d, LVL_2
, 2048 }, /* 8-way set assoc, 64 byte line size */
86 { 0x7f, LVL_2
, 512 }, /* 2-way set assoc, 64 byte line size */
87 { 0x82, LVL_2
, 256 }, /* 8-way set assoc, 32 byte line size */
88 { 0x83, LVL_2
, 512 }, /* 8-way set assoc, 32 byte line size */
89 { 0x84, LVL_2
, 1024 }, /* 8-way set assoc, 32 byte line size */
90 { 0x85, LVL_2
, 2048 }, /* 8-way set assoc, 32 byte line size */
91 { 0x86, LVL_2
, 512 }, /* 4-way set assoc, 64 byte line size */
92 { 0x87, LVL_2
, 1024 }, /* 8-way set assoc, 64 byte line size */
93 { 0xd0, LVL_3
, 512 }, /* 4-way set assoc, 64 byte line size */
94 { 0xd1, LVL_3
, 1024 }, /* 4-way set assoc, 64 byte line size */
95 { 0xd2, LVL_3
, 2048 }, /* 4-way set assoc, 64 byte line size */
96 { 0xd6, LVL_3
, 1024 }, /* 8-way set assoc, 64 byte line size */
97 { 0xd7, LVL_3
, 2038 }, /* 8-way set assoc, 64 byte line size */
98 { 0xd8, LVL_3
, 4096 }, /* 12-way set assoc, 64 byte line size */
99 { 0xdc, LVL_3
, 2048 }, /* 12-way set assoc, 64 byte line size */
100 { 0xdd, LVL_3
, 4096 }, /* 12-way set assoc, 64 byte line size */
101 { 0xde, LVL_3
, 8192 }, /* 12-way set assoc, 64 byte line size */
102 { 0xe2, LVL_3
, 2048 }, /* 16-way set assoc, 64 byte line size */
103 { 0xe3, LVL_3
, 4096 }, /* 16-way set assoc, 64 byte line size */
104 { 0xe4, LVL_3
, 8192 }, /* 16-way set assoc, 64 byte line size */
113 CACHE_TYPE_UNIFIED
= 3
116 union _cpuid4_leaf_eax
{
118 enum _cache_type type
:5;
119 unsigned int level
:3;
120 unsigned int is_self_initializing
:1;
121 unsigned int is_fully_associative
:1;
122 unsigned int reserved
:4;
123 unsigned int num_threads_sharing
:12;
124 unsigned int num_cores_on_die
:6;
129 union _cpuid4_leaf_ebx
{
131 unsigned int coherency_line_size
:12;
132 unsigned int physical_line_partition
:10;
133 unsigned int ways_of_associativity
:10;
138 union _cpuid4_leaf_ecx
{
140 unsigned int number_of_sets
:32;
145 struct _cpuid4_info
{
146 union _cpuid4_leaf_eax eax
;
147 union _cpuid4_leaf_ebx ebx
;
148 union _cpuid4_leaf_ecx ecx
;
150 unsigned long can_disable
;
151 DECLARE_BITMAP(shared_cpu_map
, NR_CPUS
);
154 /* subset of above _cpuid4_info w/o shared_cpu_map */
155 struct _cpuid4_info_regs
{
156 union _cpuid4_leaf_eax eax
;
157 union _cpuid4_leaf_ebx ebx
;
158 union _cpuid4_leaf_ecx ecx
;
160 unsigned long can_disable
;
163 unsigned short num_cache_leaves
;
165 /* AMD doesn't have CPUID4. Emulate it here to report the same
166 information to the user. This makes some assumptions about the machine:
167 L2 not shared, no SMT etc. that is currently true on AMD CPUs.
169 In theory the TLBs could be reported as fake type (they are in "dummy").
173 unsigned line_size
:8;
174 unsigned lines_per_tag
:8;
176 unsigned size_in_kb
:8;
183 unsigned line_size
:8;
184 unsigned lines_per_tag
:4;
186 unsigned size_in_kb
:16;
193 unsigned line_size
:8;
194 unsigned lines_per_tag
:4;
197 unsigned size_encoded
:14;
202 static const unsigned short __cpuinitconst assocs
[] = {
213 [0xf] = 0xffff /* fully associative - no way to show this currently */
216 static const unsigned char __cpuinitconst levels
[] = { 1, 1, 2, 3 };
217 static const unsigned char __cpuinitconst types
[] = { 1, 2, 3, 3 };
219 static void __cpuinit
220 amd_cpuid4(int leaf
, union _cpuid4_leaf_eax
*eax
,
221 union _cpuid4_leaf_ebx
*ebx
,
222 union _cpuid4_leaf_ecx
*ecx
)
225 unsigned line_size
, lines_per_tag
, assoc
, size_in_kb
;
226 union l1_cache l1i
, l1d
;
229 union l1_cache
*l1
= &l1d
;
235 cpuid(0x80000005, &dummy
, &dummy
, &l1d
.val
, &l1i
.val
);
236 cpuid(0x80000006, &dummy
, &dummy
, &l2
.val
, &l3
.val
);
244 assoc
= assocs
[l1
->assoc
];
245 line_size
= l1
->line_size
;
246 lines_per_tag
= l1
->lines_per_tag
;
247 size_in_kb
= l1
->size_in_kb
;
252 assoc
= assocs
[l2
.assoc
];
253 line_size
= l2
.line_size
;
254 lines_per_tag
= l2
.lines_per_tag
;
255 /* cpu_data has errata corrections for K7 applied */
256 size_in_kb
= current_cpu_data
.x86_cache_size
;
261 assoc
= assocs
[l3
.assoc
];
262 line_size
= l3
.line_size
;
263 lines_per_tag
= l3
.lines_per_tag
;
264 size_in_kb
= l3
.size_encoded
* 512;
265 if (boot_cpu_has(X86_FEATURE_AMD_DCM
)) {
266 size_in_kb
= size_in_kb
>> 1;
274 eax
->split
.is_self_initializing
= 1;
275 eax
->split
.type
= types
[leaf
];
276 eax
->split
.level
= levels
[leaf
];
277 eax
->split
.num_threads_sharing
= 0;
278 eax
->split
.num_cores_on_die
= current_cpu_data
.x86_max_cores
- 1;
282 eax
->split
.is_fully_associative
= 1;
283 ebx
->split
.coherency_line_size
= line_size
- 1;
284 ebx
->split
.ways_of_associativity
= assoc
- 1;
285 ebx
->split
.physical_line_partition
= lines_per_tag
- 1;
286 ecx
->split
.number_of_sets
= (size_in_kb
* 1024) / line_size
/
287 (ebx
->split
.ways_of_associativity
+ 1) - 1;
290 static void __cpuinit
291 amd_check_l3_disable(int index
, struct _cpuid4_info_regs
*this_leaf
)
296 if (boot_cpu_data
.x86
== 0x11)
299 /* see erratum #382 */
300 if ((boot_cpu_data
.x86
== 0x10) && (boot_cpu_data
.x86_model
< 0x8))
303 this_leaf
->can_disable
= 1;
307 __cpuinit
cpuid4_cache_lookup_regs(int index
,
308 struct _cpuid4_info_regs
*this_leaf
)
310 union _cpuid4_leaf_eax eax
;
311 union _cpuid4_leaf_ebx ebx
;
312 union _cpuid4_leaf_ecx ecx
;
315 if (boot_cpu_data
.x86_vendor
== X86_VENDOR_AMD
) {
316 amd_cpuid4(index
, &eax
, &ebx
, &ecx
);
317 if (boot_cpu_data
.x86
>= 0x10)
318 amd_check_l3_disable(index
, this_leaf
);
320 cpuid_count(4, index
, &eax
.full
, &ebx
.full
, &ecx
.full
, &edx
);
323 if (eax
.split
.type
== CACHE_TYPE_NULL
)
324 return -EIO
; /* better error ? */
326 this_leaf
->eax
= eax
;
327 this_leaf
->ebx
= ebx
;
328 this_leaf
->ecx
= ecx
;
329 this_leaf
->size
= (ecx
.split
.number_of_sets
+ 1) *
330 (ebx
.split
.coherency_line_size
+ 1) *
331 (ebx
.split
.physical_line_partition
+ 1) *
332 (ebx
.split
.ways_of_associativity
+ 1);
336 static int __cpuinit
find_num_cache_leaves(void)
338 unsigned int eax
, ebx
, ecx
, edx
;
339 union _cpuid4_leaf_eax cache_eax
;
344 /* Do cpuid(4) loop to find out num_cache_leaves */
345 cpuid_count(4, i
, &eax
, &ebx
, &ecx
, &edx
);
346 cache_eax
.full
= eax
;
347 } while (cache_eax
.split
.type
!= CACHE_TYPE_NULL
);
351 unsigned int __cpuinit
init_intel_cacheinfo(struct cpuinfo_x86
*c
)
354 unsigned int trace
= 0, l1i
= 0, l1d
= 0, l2
= 0, l3
= 0;
355 unsigned int new_l1d
= 0, new_l1i
= 0; /* Cache sizes from cpuid(4) */
356 unsigned int new_l2
= 0, new_l3
= 0, i
; /* Cache sizes from cpuid(4) */
357 unsigned int l2_id
= 0, l3_id
= 0, num_threads_sharing
, index_msb
;
359 unsigned int cpu
= c
->cpu_index
;
362 if (c
->cpuid_level
> 3) {
363 static int is_initialized
;
365 if (is_initialized
== 0) {
366 /* Init num_cache_leaves from boot CPU */
367 num_cache_leaves
= find_num_cache_leaves();
372 * Whenever possible use cpuid(4), deterministic cache
373 * parameters cpuid leaf to find the cache details
375 for (i
= 0; i
< num_cache_leaves
; i
++) {
376 struct _cpuid4_info_regs this_leaf
;
379 retval
= cpuid4_cache_lookup_regs(i
, &this_leaf
);
381 switch (this_leaf
.eax
.split
.level
) {
383 if (this_leaf
.eax
.split
.type
==
385 new_l1d
= this_leaf
.size
/1024;
386 else if (this_leaf
.eax
.split
.type
==
388 new_l1i
= this_leaf
.size
/1024;
391 new_l2
= this_leaf
.size
/1024;
392 num_threads_sharing
= 1 + this_leaf
.eax
.split
.num_threads_sharing
;
393 index_msb
= get_count_order(num_threads_sharing
);
394 l2_id
= c
->apicid
>> index_msb
;
397 new_l3
= this_leaf
.size
/1024;
398 num_threads_sharing
= 1 + this_leaf
.eax
.split
.num_threads_sharing
;
399 index_msb
= get_count_order(
400 num_threads_sharing
);
401 l3_id
= c
->apicid
>> index_msb
;
410 * Don't use cpuid2 if cpuid4 is supported. For P4, we use cpuid2 for
413 if ((num_cache_leaves
== 0 || c
->x86
== 15) && c
->cpuid_level
> 1) {
414 /* supports eax=2 call */
416 unsigned int regs
[4];
417 unsigned char *dp
= (unsigned char *)regs
;
420 if (num_cache_leaves
!= 0 && c
->x86
== 15)
423 /* Number of times to iterate */
424 n
= cpuid_eax(2) & 0xFF;
426 for (i
= 0 ; i
< n
; i
++) {
427 cpuid(2, ®s
[0], ®s
[1], ®s
[2], ®s
[3]);
429 /* If bit 31 is set, this is an unknown format */
430 for (j
= 0 ; j
< 3 ; j
++)
431 if (regs
[j
] & (1 << 31))
434 /* Byte 0 is level count, not a descriptor */
435 for (j
= 1 ; j
< 16 ; j
++) {
436 unsigned char des
= dp
[j
];
439 /* look up this descriptor in the table */
440 while (cache_table
[k
].descriptor
!= 0) {
441 if (cache_table
[k
].descriptor
== des
) {
442 if (only_trace
&& cache_table
[k
].cache_type
!= LVL_TRACE
)
444 switch (cache_table
[k
].cache_type
) {
446 l1i
+= cache_table
[k
].size
;
449 l1d
+= cache_table
[k
].size
;
452 l2
+= cache_table
[k
].size
;
455 l3
+= cache_table
[k
].size
;
458 trace
+= cache_table
[k
].size
;
480 per_cpu(cpu_llc_id
, cpu
) = l2_id
;
487 per_cpu(cpu_llc_id
, cpu
) = l3_id
;
492 printk(KERN_INFO
"CPU: Trace cache: %dK uops", trace
);
494 printk(KERN_INFO
"CPU: L1 I cache: %dK", l1i
);
497 printk(KERN_CONT
", L1 D cache: %dK\n", l1d
);
499 printk(KERN_CONT
"\n");
502 printk(KERN_INFO
"CPU: L2 cache: %dK\n", l2
);
505 printk(KERN_INFO
"CPU: L3 cache: %dK\n", l3
);
507 c
->x86_cache_size
= l3
? l3
: (l2
? l2
: (l1i
+l1d
));
514 /* pointer to _cpuid4_info array (for each cache leaf) */
515 static DEFINE_PER_CPU(struct _cpuid4_info
*, cpuid4_info
);
516 #define CPUID4_INFO_IDX(x, y) (&((per_cpu(cpuid4_info, x))[y]))
519 static void __cpuinit
cache_shared_cpu_map_setup(unsigned int cpu
, int index
)
521 struct _cpuid4_info
*this_leaf
, *sibling_leaf
;
522 unsigned long num_threads_sharing
;
524 struct cpuinfo_x86
*c
= &cpu_data(cpu
);
526 if ((index
== 3) && (c
->x86_vendor
== X86_VENDOR_AMD
)) {
527 struct cpuinfo_x86
*d
;
528 for_each_online_cpu(i
) {
529 if (!per_cpu(cpuid4_info
, i
))
532 this_leaf
= CPUID4_INFO_IDX(i
, index
);
533 cpumask_copy(to_cpumask(this_leaf
->shared_cpu_map
),
538 this_leaf
= CPUID4_INFO_IDX(cpu
, index
);
539 num_threads_sharing
= 1 + this_leaf
->eax
.split
.num_threads_sharing
;
541 if (num_threads_sharing
== 1)
542 cpumask_set_cpu(cpu
, to_cpumask(this_leaf
->shared_cpu_map
));
544 index_msb
= get_count_order(num_threads_sharing
);
546 for_each_online_cpu(i
) {
547 if (cpu_data(i
).apicid
>> index_msb
==
548 c
->apicid
>> index_msb
) {
550 to_cpumask(this_leaf
->shared_cpu_map
));
551 if (i
!= cpu
&& per_cpu(cpuid4_info
, i
)) {
553 CPUID4_INFO_IDX(i
, index
);
554 cpumask_set_cpu(cpu
, to_cpumask(
555 sibling_leaf
->shared_cpu_map
));
561 static void __cpuinit
cache_remove_shared_cpu_map(unsigned int cpu
, int index
)
563 struct _cpuid4_info
*this_leaf
, *sibling_leaf
;
566 this_leaf
= CPUID4_INFO_IDX(cpu
, index
);
567 for_each_cpu(sibling
, to_cpumask(this_leaf
->shared_cpu_map
)) {
568 sibling_leaf
= CPUID4_INFO_IDX(sibling
, index
);
569 cpumask_clear_cpu(cpu
,
570 to_cpumask(sibling_leaf
->shared_cpu_map
));
574 static void __cpuinit
cache_shared_cpu_map_setup(unsigned int cpu
, int index
)
578 static void __cpuinit
cache_remove_shared_cpu_map(unsigned int cpu
, int index
)
583 static void __cpuinit
free_cache_attributes(unsigned int cpu
)
587 for (i
= 0; i
< num_cache_leaves
; i
++)
588 cache_remove_shared_cpu_map(cpu
, i
);
590 kfree(per_cpu(cpuid4_info
, cpu
));
591 per_cpu(cpuid4_info
, cpu
) = NULL
;
595 __cpuinit
cpuid4_cache_lookup(int index
, struct _cpuid4_info
*this_leaf
)
597 struct _cpuid4_info_regs
*leaf_regs
=
598 (struct _cpuid4_info_regs
*)this_leaf
;
600 return cpuid4_cache_lookup_regs(index
, leaf_regs
);
603 static void __cpuinit
get_cpu_leaves(void *_retval
)
605 int j
, *retval
= _retval
, cpu
= smp_processor_id();
607 /* Do cpuid and store the results */
608 for (j
= 0; j
< num_cache_leaves
; j
++) {
609 struct _cpuid4_info
*this_leaf
;
610 this_leaf
= CPUID4_INFO_IDX(cpu
, j
);
611 *retval
= cpuid4_cache_lookup(j
, this_leaf
);
612 if (unlikely(*retval
< 0)) {
615 for (i
= 0; i
< j
; i
++)
616 cache_remove_shared_cpu_map(cpu
, i
);
619 cache_shared_cpu_map_setup(cpu
, j
);
623 static int __cpuinit
detect_cache_attributes(unsigned int cpu
)
627 if (num_cache_leaves
== 0)
630 per_cpu(cpuid4_info
, cpu
) = kzalloc(
631 sizeof(struct _cpuid4_info
) * num_cache_leaves
, GFP_KERNEL
);
632 if (per_cpu(cpuid4_info
, cpu
) == NULL
)
635 smp_call_function_single(cpu
, get_cpu_leaves
, &retval
, true);
637 kfree(per_cpu(cpuid4_info
, cpu
));
638 per_cpu(cpuid4_info
, cpu
) = NULL
;
644 #include <linux/kobject.h>
645 #include <linux/sysfs.h>
647 extern struct sysdev_class cpu_sysdev_class
; /* from drivers/base/cpu.c */
649 /* pointer to kobject for cpuX/cache */
650 static DEFINE_PER_CPU(struct kobject
*, cache_kobject
);
652 struct _index_kobject
{
655 unsigned short index
;
658 /* pointer to array of kobjects for cpuX/cache/indexY */
659 static DEFINE_PER_CPU(struct _index_kobject
*, index_kobject
);
660 #define INDEX_KOBJECT_PTR(x, y) (&((per_cpu(index_kobject, x))[y]))
662 #define show_one_plus(file_name, object, val) \
663 static ssize_t show_##file_name \
664 (struct _cpuid4_info *this_leaf, char *buf) \
666 return sprintf(buf, "%lu\n", (unsigned long)this_leaf->object + val); \
669 show_one_plus(level
, eax
.split
.level
, 0);
670 show_one_plus(coherency_line_size
, ebx
.split
.coherency_line_size
, 1);
671 show_one_plus(physical_line_partition
, ebx
.split
.physical_line_partition
, 1);
672 show_one_plus(ways_of_associativity
, ebx
.split
.ways_of_associativity
, 1);
673 show_one_plus(number_of_sets
, ecx
.split
.number_of_sets
, 1);
675 static ssize_t
show_size(struct _cpuid4_info
*this_leaf
, char *buf
)
677 return sprintf(buf
, "%luK\n", this_leaf
->size
/ 1024);
680 static ssize_t
show_shared_cpu_map_func(struct _cpuid4_info
*this_leaf
,
683 ptrdiff_t len
= PTR_ALIGN(buf
+ PAGE_SIZE
- 1, PAGE_SIZE
) - buf
;
687 const struct cpumask
*mask
;
689 mask
= to_cpumask(this_leaf
->shared_cpu_map
);
691 cpulist_scnprintf(buf
, len
-2, mask
) :
692 cpumask_scnprintf(buf
, len
-2, mask
);
699 static inline ssize_t
show_shared_cpu_map(struct _cpuid4_info
*leaf
, char *buf
)
701 return show_shared_cpu_map_func(leaf
, 0, buf
);
704 static inline ssize_t
show_shared_cpu_list(struct _cpuid4_info
*leaf
, char *buf
)
706 return show_shared_cpu_map_func(leaf
, 1, buf
);
709 static ssize_t
show_type(struct _cpuid4_info
*this_leaf
, char *buf
)
711 switch (this_leaf
->eax
.split
.type
) {
712 case CACHE_TYPE_DATA
:
713 return sprintf(buf
, "Data\n");
714 case CACHE_TYPE_INST
:
715 return sprintf(buf
, "Instruction\n");
716 case CACHE_TYPE_UNIFIED
:
717 return sprintf(buf
, "Unified\n");
719 return sprintf(buf
, "Unknown\n");
723 #define to_object(k) container_of(k, struct _index_kobject, kobj)
724 #define to_attr(a) container_of(a, struct _cache_attr, attr)
726 static ssize_t
show_cache_disable(struct _cpuid4_info
*this_leaf
, char *buf
,
729 int cpu
= cpumask_first(to_cpumask(this_leaf
->shared_cpu_map
));
730 int node
= cpu_to_node(cpu
);
731 struct pci_dev
*dev
= node_to_k8_nb_misc(node
);
732 unsigned int reg
= 0;
734 if (!this_leaf
->can_disable
)
740 pci_read_config_dword(dev
, 0x1BC + index
* 4, ®
);
741 return sprintf(buf
, "%x\n", reg
);
744 #define SHOW_CACHE_DISABLE(index) \
746 show_cache_disable_##index(struct _cpuid4_info *this_leaf, char *buf) \
748 return show_cache_disable(this_leaf, buf, index); \
750 SHOW_CACHE_DISABLE(0)
751 SHOW_CACHE_DISABLE(1)
753 static ssize_t
store_cache_disable(struct _cpuid4_info
*this_leaf
,
754 const char *buf
, size_t count
, unsigned int index
)
756 int cpu
= cpumask_first(to_cpumask(this_leaf
->shared_cpu_map
));
757 int node
= cpu_to_node(cpu
);
758 struct pci_dev
*dev
= node_to_k8_nb_misc(node
);
759 unsigned long val
= 0;
760 unsigned int scrubber
= 0;
762 if (!this_leaf
->can_disable
)
765 if (!capable(CAP_SYS_ADMIN
))
771 if (strict_strtoul(buf
, 10, &val
) < 0)
776 pci_read_config_dword(dev
, 0x58, &scrubber
);
777 scrubber
&= ~0x1f000000;
778 pci_write_config_dword(dev
, 0x58, scrubber
);
780 pci_write_config_dword(dev
, 0x1BC + index
* 4, val
& ~0x40000000);
782 pci_write_config_dword(dev
, 0x1BC + index
* 4, val
);
786 #define STORE_CACHE_DISABLE(index) \
788 store_cache_disable_##index(struct _cpuid4_info *this_leaf, \
789 const char *buf, size_t count) \
791 return store_cache_disable(this_leaf, buf, count, index); \
793 STORE_CACHE_DISABLE(0)
794 STORE_CACHE_DISABLE(1)
797 struct attribute attr
;
798 ssize_t (*show
)(struct _cpuid4_info
*, char *);
799 ssize_t (*store
)(struct _cpuid4_info
*, const char *, size_t count
);
802 #define define_one_ro(_name) \
803 static struct _cache_attr _name = \
804 __ATTR(_name, 0444, show_##_name, NULL)
806 define_one_ro(level
);
808 define_one_ro(coherency_line_size
);
809 define_one_ro(physical_line_partition
);
810 define_one_ro(ways_of_associativity
);
811 define_one_ro(number_of_sets
);
813 define_one_ro(shared_cpu_map
);
814 define_one_ro(shared_cpu_list
);
816 static struct _cache_attr cache_disable_0
= __ATTR(cache_disable_0
, 0644,
817 show_cache_disable_0
, store_cache_disable_0
);
818 static struct _cache_attr cache_disable_1
= __ATTR(cache_disable_1
, 0644,
819 show_cache_disable_1
, store_cache_disable_1
);
821 static struct attribute
*default_attrs
[] = {
824 &coherency_line_size
.attr
,
825 &physical_line_partition
.attr
,
826 &ways_of_associativity
.attr
,
827 &number_of_sets
.attr
,
829 &shared_cpu_map
.attr
,
830 &shared_cpu_list
.attr
,
831 &cache_disable_0
.attr
,
832 &cache_disable_1
.attr
,
836 static ssize_t
show(struct kobject
*kobj
, struct attribute
*attr
, char *buf
)
838 struct _cache_attr
*fattr
= to_attr(attr
);
839 struct _index_kobject
*this_leaf
= to_object(kobj
);
843 fattr
->show(CPUID4_INFO_IDX(this_leaf
->cpu
, this_leaf
->index
),
849 static ssize_t
store(struct kobject
*kobj
, struct attribute
*attr
,
850 const char *buf
, size_t count
)
852 struct _cache_attr
*fattr
= to_attr(attr
);
853 struct _index_kobject
*this_leaf
= to_object(kobj
);
857 fattr
->store(CPUID4_INFO_IDX(this_leaf
->cpu
, this_leaf
->index
),
863 static struct sysfs_ops sysfs_ops
= {
868 static struct kobj_type ktype_cache
= {
869 .sysfs_ops
= &sysfs_ops
,
870 .default_attrs
= default_attrs
,
873 static struct kobj_type ktype_percpu_entry
= {
874 .sysfs_ops
= &sysfs_ops
,
877 static void __cpuinit
cpuid4_cache_sysfs_exit(unsigned int cpu
)
879 kfree(per_cpu(cache_kobject
, cpu
));
880 kfree(per_cpu(index_kobject
, cpu
));
881 per_cpu(cache_kobject
, cpu
) = NULL
;
882 per_cpu(index_kobject
, cpu
) = NULL
;
883 free_cache_attributes(cpu
);
886 static int __cpuinit
cpuid4_cache_sysfs_init(unsigned int cpu
)
890 if (num_cache_leaves
== 0)
893 err
= detect_cache_attributes(cpu
);
897 /* Allocate all required memory */
898 per_cpu(cache_kobject
, cpu
) =
899 kzalloc(sizeof(struct kobject
), GFP_KERNEL
);
900 if (unlikely(per_cpu(cache_kobject
, cpu
) == NULL
))
903 per_cpu(index_kobject
, cpu
) = kzalloc(
904 sizeof(struct _index_kobject
) * num_cache_leaves
, GFP_KERNEL
);
905 if (unlikely(per_cpu(index_kobject
, cpu
) == NULL
))
911 cpuid4_cache_sysfs_exit(cpu
);
915 static DECLARE_BITMAP(cache_dev_map
, NR_CPUS
);
917 /* Add/Remove cache interface for CPU device */
918 static int __cpuinit
cache_add_dev(struct sys_device
* sys_dev
)
920 unsigned int cpu
= sys_dev
->id
;
922 struct _index_kobject
*this_object
;
925 retval
= cpuid4_cache_sysfs_init(cpu
);
926 if (unlikely(retval
< 0))
929 retval
= kobject_init_and_add(per_cpu(cache_kobject
, cpu
),
931 &sys_dev
->kobj
, "%s", "cache");
933 cpuid4_cache_sysfs_exit(cpu
);
937 for (i
= 0; i
< num_cache_leaves
; i
++) {
938 this_object
= INDEX_KOBJECT_PTR(cpu
, i
);
939 this_object
->cpu
= cpu
;
940 this_object
->index
= i
;
941 retval
= kobject_init_and_add(&(this_object
->kobj
),
943 per_cpu(cache_kobject
, cpu
),
945 if (unlikely(retval
)) {
946 for (j
= 0; j
< i
; j
++)
947 kobject_put(&(INDEX_KOBJECT_PTR(cpu
, j
)->kobj
));
948 kobject_put(per_cpu(cache_kobject
, cpu
));
949 cpuid4_cache_sysfs_exit(cpu
);
952 kobject_uevent(&(this_object
->kobj
), KOBJ_ADD
);
954 cpumask_set_cpu(cpu
, to_cpumask(cache_dev_map
));
956 kobject_uevent(per_cpu(cache_kobject
, cpu
), KOBJ_ADD
);
960 static void __cpuinit
cache_remove_dev(struct sys_device
* sys_dev
)
962 unsigned int cpu
= sys_dev
->id
;
965 if (per_cpu(cpuid4_info
, cpu
) == NULL
)
967 if (!cpumask_test_cpu(cpu
, to_cpumask(cache_dev_map
)))
969 cpumask_clear_cpu(cpu
, to_cpumask(cache_dev_map
));
971 for (i
= 0; i
< num_cache_leaves
; i
++)
972 kobject_put(&(INDEX_KOBJECT_PTR(cpu
, i
)->kobj
));
973 kobject_put(per_cpu(cache_kobject
, cpu
));
974 cpuid4_cache_sysfs_exit(cpu
);
977 static int __cpuinit
cacheinfo_cpu_callback(struct notifier_block
*nfb
,
978 unsigned long action
, void *hcpu
)
980 unsigned int cpu
= (unsigned long)hcpu
;
981 struct sys_device
*sys_dev
;
983 sys_dev
= get_cpu_sysdev(cpu
);
986 case CPU_ONLINE_FROZEN
:
987 cache_add_dev(sys_dev
);
990 case CPU_DEAD_FROZEN
:
991 cache_remove_dev(sys_dev
);
997 static struct notifier_block __cpuinitdata cacheinfo_cpu_notifier
= {
998 .notifier_call
= cacheinfo_cpu_callback
,
1001 static int __cpuinit
cache_sysfs_init(void)
1005 if (num_cache_leaves
== 0)
1008 for_each_online_cpu(i
) {
1010 struct sys_device
*sys_dev
= get_cpu_sysdev(i
);
1012 err
= cache_add_dev(sys_dev
);
1016 register_hotcpu_notifier(&cacheinfo_cpu_notifier
);
1020 device_initcall(cache_sysfs_init
);