2 * Routines to indentify caches on Intel CPU.
5 * Venkatesh Pallipadi : Adding cache identification through cpuid(4)
6 * Ashok Raj <ashok.raj@intel.com>: Work with CPU hotplug infrastructure.
7 * Andi Kleen / Andreas Herrmann : CPUID4 emulation on AMD.
10 #include <linux/init.h>
11 #include <linux/slab.h>
12 #include <linux/device.h>
13 #include <linux/compiler.h>
14 #include <linux/cpu.h>
15 #include <linux/sched.h>
16 #include <linux/pci.h>
18 #include <asm/processor.h>
19 #include <linux/smp.h>
20 #include <asm/amd_nb.h>
30 unsigned char descriptor
;
35 #define MB(x) ((x) * 1024)
37 /* All the cache descriptor types we care about (no TLB or
38 trace cache entries) */
40 static const struct _cache_table __cpuinitconst cache_table
[] =
42 { 0x06, LVL_1_INST
, 8 }, /* 4-way set assoc, 32 byte line size */
43 { 0x08, LVL_1_INST
, 16 }, /* 4-way set assoc, 32 byte line size */
44 { 0x09, LVL_1_INST
, 32 }, /* 4-way set assoc, 64 byte line size */
45 { 0x0a, LVL_1_DATA
, 8 }, /* 2 way set assoc, 32 byte line size */
46 { 0x0c, LVL_1_DATA
, 16 }, /* 4-way set assoc, 32 byte line size */
47 { 0x0d, LVL_1_DATA
, 16 }, /* 4-way set assoc, 64 byte line size */
48 { 0x0e, LVL_1_DATA
, 24 }, /* 6-way set assoc, 64 byte line size */
49 { 0x21, LVL_2
, 256 }, /* 8-way set assoc, 64 byte line size */
50 { 0x22, LVL_3
, 512 }, /* 4-way set assoc, sectored cache, 64 byte line size */
51 { 0x23, LVL_3
, MB(1) }, /* 8-way set assoc, sectored cache, 64 byte line size */
52 { 0x25, LVL_3
, MB(2) }, /* 8-way set assoc, sectored cache, 64 byte line size */
53 { 0x29, LVL_3
, MB(4) }, /* 8-way set assoc, sectored cache, 64 byte line size */
54 { 0x2c, LVL_1_DATA
, 32 }, /* 8-way set assoc, 64 byte line size */
55 { 0x30, LVL_1_INST
, 32 }, /* 8-way set assoc, 64 byte line size */
56 { 0x39, LVL_2
, 128 }, /* 4-way set assoc, sectored cache, 64 byte line size */
57 { 0x3a, LVL_2
, 192 }, /* 6-way set assoc, sectored cache, 64 byte line size */
58 { 0x3b, LVL_2
, 128 }, /* 2-way set assoc, sectored cache, 64 byte line size */
59 { 0x3c, LVL_2
, 256 }, /* 4-way set assoc, sectored cache, 64 byte line size */
60 { 0x3d, LVL_2
, 384 }, /* 6-way set assoc, sectored cache, 64 byte line size */
61 { 0x3e, LVL_2
, 512 }, /* 4-way set assoc, sectored cache, 64 byte line size */
62 { 0x3f, LVL_2
, 256 }, /* 2-way set assoc, 64 byte line size */
63 { 0x41, LVL_2
, 128 }, /* 4-way set assoc, 32 byte line size */
64 { 0x42, LVL_2
, 256 }, /* 4-way set assoc, 32 byte line size */
65 { 0x43, LVL_2
, 512 }, /* 4-way set assoc, 32 byte line size */
66 { 0x44, LVL_2
, MB(1) }, /* 4-way set assoc, 32 byte line size */
67 { 0x45, LVL_2
, MB(2) }, /* 4-way set assoc, 32 byte line size */
68 { 0x46, LVL_3
, MB(4) }, /* 4-way set assoc, 64 byte line size */
69 { 0x47, LVL_3
, MB(8) }, /* 8-way set assoc, 64 byte line size */
70 { 0x48, LVL_2
, MB(3) }, /* 12-way set assoc, 64 byte line size */
71 { 0x49, LVL_3
, MB(4) }, /* 16-way set assoc, 64 byte line size */
72 { 0x4a, LVL_3
, MB(6) }, /* 12-way set assoc, 64 byte line size */
73 { 0x4b, LVL_3
, MB(8) }, /* 16-way set assoc, 64 byte line size */
74 { 0x4c, LVL_3
, MB(12) }, /* 12-way set assoc, 64 byte line size */
75 { 0x4d, LVL_3
, MB(16) }, /* 16-way set assoc, 64 byte line size */
76 { 0x4e, LVL_2
, MB(6) }, /* 24-way set assoc, 64 byte line size */
77 { 0x60, LVL_1_DATA
, 16 }, /* 8-way set assoc, sectored cache, 64 byte line size */
78 { 0x66, LVL_1_DATA
, 8 }, /* 4-way set assoc, sectored cache, 64 byte line size */
79 { 0x67, LVL_1_DATA
, 16 }, /* 4-way set assoc, sectored cache, 64 byte line size */
80 { 0x68, LVL_1_DATA
, 32 }, /* 4-way set assoc, sectored cache, 64 byte line size */
81 { 0x70, LVL_TRACE
, 12 }, /* 8-way set assoc */
82 { 0x71, LVL_TRACE
, 16 }, /* 8-way set assoc */
83 { 0x72, LVL_TRACE
, 32 }, /* 8-way set assoc */
84 { 0x73, LVL_TRACE
, 64 }, /* 8-way set assoc */
85 { 0x78, LVL_2
, MB(1) }, /* 4-way set assoc, 64 byte line size */
86 { 0x79, LVL_2
, 128 }, /* 8-way set assoc, sectored cache, 64 byte line size */
87 { 0x7a, LVL_2
, 256 }, /* 8-way set assoc, sectored cache, 64 byte line size */
88 { 0x7b, LVL_2
, 512 }, /* 8-way set assoc, sectored cache, 64 byte line size */
89 { 0x7c, LVL_2
, MB(1) }, /* 8-way set assoc, sectored cache, 64 byte line size */
90 { 0x7d, LVL_2
, MB(2) }, /* 8-way set assoc, 64 byte line size */
91 { 0x7f, LVL_2
, 512 }, /* 2-way set assoc, 64 byte line size */
92 { 0x80, LVL_2
, 512 }, /* 8-way set assoc, 64 byte line size */
93 { 0x82, LVL_2
, 256 }, /* 8-way set assoc, 32 byte line size */
94 { 0x83, LVL_2
, 512 }, /* 8-way set assoc, 32 byte line size */
95 { 0x84, LVL_2
, MB(1) }, /* 8-way set assoc, 32 byte line size */
96 { 0x85, LVL_2
, MB(2) }, /* 8-way set assoc, 32 byte line size */
97 { 0x86, LVL_2
, 512 }, /* 4-way set assoc, 64 byte line size */
98 { 0x87, LVL_2
, MB(1) }, /* 8-way set assoc, 64 byte line size */
99 { 0xd0, LVL_3
, 512 }, /* 4-way set assoc, 64 byte line size */
100 { 0xd1, LVL_3
, MB(1) }, /* 4-way set assoc, 64 byte line size */
101 { 0xd2, LVL_3
, MB(2) }, /* 4-way set assoc, 64 byte line size */
102 { 0xd6, LVL_3
, MB(1) }, /* 8-way set assoc, 64 byte line size */
103 { 0xd7, LVL_3
, MB(2) }, /* 8-way set assoc, 64 byte line size */
104 { 0xd8, LVL_3
, MB(4) }, /* 12-way set assoc, 64 byte line size */
105 { 0xdc, LVL_3
, MB(2) }, /* 12-way set assoc, 64 byte line size */
106 { 0xdd, LVL_3
, MB(4) }, /* 12-way set assoc, 64 byte line size */
107 { 0xde, LVL_3
, MB(8) }, /* 12-way set assoc, 64 byte line size */
108 { 0xe2, LVL_3
, MB(2) }, /* 16-way set assoc, 64 byte line size */
109 { 0xe3, LVL_3
, MB(4) }, /* 16-way set assoc, 64 byte line size */
110 { 0xe4, LVL_3
, MB(8) }, /* 16-way set assoc, 64 byte line size */
111 { 0xea, LVL_3
, MB(12) }, /* 24-way set assoc, 64 byte line size */
112 { 0xeb, LVL_3
, MB(18) }, /* 24-way set assoc, 64 byte line size */
113 { 0xec, LVL_3
, MB(24) }, /* 24-way set assoc, 64 byte line size */
122 CACHE_TYPE_UNIFIED
= 3
125 union _cpuid4_leaf_eax
{
127 enum _cache_type type
:5;
128 unsigned int level
:3;
129 unsigned int is_self_initializing
:1;
130 unsigned int is_fully_associative
:1;
131 unsigned int reserved
:4;
132 unsigned int num_threads_sharing
:12;
133 unsigned int num_cores_on_die
:6;
138 union _cpuid4_leaf_ebx
{
140 unsigned int coherency_line_size
:12;
141 unsigned int physical_line_partition
:10;
142 unsigned int ways_of_associativity
:10;
147 union _cpuid4_leaf_ecx
{
149 unsigned int number_of_sets
:32;
154 struct _cpuid4_info_regs
{
155 union _cpuid4_leaf_eax eax
;
156 union _cpuid4_leaf_ebx ebx
;
157 union _cpuid4_leaf_ecx ecx
;
159 struct amd_northbridge
*nb
;
162 struct _cpuid4_info
{
163 struct _cpuid4_info_regs base
;
164 DECLARE_BITMAP(shared_cpu_map
, NR_CPUS
);
167 unsigned short num_cache_leaves
;
169 /* AMD doesn't have CPUID4. Emulate it here to report the same
170 information to the user. This makes some assumptions about the machine:
171 L2 not shared, no SMT etc. that is currently true on AMD CPUs.
173 In theory the TLBs could be reported as fake type (they are in "dummy").
177 unsigned line_size
:8;
178 unsigned lines_per_tag
:8;
180 unsigned size_in_kb
:8;
187 unsigned line_size
:8;
188 unsigned lines_per_tag
:4;
190 unsigned size_in_kb
:16;
197 unsigned line_size
:8;
198 unsigned lines_per_tag
:4;
201 unsigned size_encoded
:14;
206 static const unsigned short __cpuinitconst assocs
[] = {
217 [0xf] = 0xffff /* fully associative - no way to show this currently */
220 static const unsigned char __cpuinitconst levels
[] = { 1, 1, 2, 3 };
221 static const unsigned char __cpuinitconst types
[] = { 1, 2, 3, 3 };
223 static void __cpuinit
224 amd_cpuid4(int leaf
, union _cpuid4_leaf_eax
*eax
,
225 union _cpuid4_leaf_ebx
*ebx
,
226 union _cpuid4_leaf_ecx
*ecx
)
229 unsigned line_size
, lines_per_tag
, assoc
, size_in_kb
;
230 union l1_cache l1i
, l1d
;
233 union l1_cache
*l1
= &l1d
;
239 cpuid(0x80000005, &dummy
, &dummy
, &l1d
.val
, &l1i
.val
);
240 cpuid(0x80000006, &dummy
, &dummy
, &l2
.val
, &l3
.val
);
248 assoc
= assocs
[l1
->assoc
];
249 line_size
= l1
->line_size
;
250 lines_per_tag
= l1
->lines_per_tag
;
251 size_in_kb
= l1
->size_in_kb
;
256 assoc
= assocs
[l2
.assoc
];
257 line_size
= l2
.line_size
;
258 lines_per_tag
= l2
.lines_per_tag
;
259 /* cpu_data has errata corrections for K7 applied */
260 size_in_kb
= __this_cpu_read(cpu_info
.x86_cache_size
);
265 assoc
= assocs
[l3
.assoc
];
266 line_size
= l3
.line_size
;
267 lines_per_tag
= l3
.lines_per_tag
;
268 size_in_kb
= l3
.size_encoded
* 512;
269 if (boot_cpu_has(X86_FEATURE_AMD_DCM
)) {
270 size_in_kb
= size_in_kb
>> 1;
278 eax
->split
.is_self_initializing
= 1;
279 eax
->split
.type
= types
[leaf
];
280 eax
->split
.level
= levels
[leaf
];
281 eax
->split
.num_threads_sharing
= 0;
282 eax
->split
.num_cores_on_die
= __this_cpu_read(cpu_info
.x86_max_cores
) - 1;
286 eax
->split
.is_fully_associative
= 1;
287 ebx
->split
.coherency_line_size
= line_size
- 1;
288 ebx
->split
.ways_of_associativity
= assoc
- 1;
289 ebx
->split
.physical_line_partition
= lines_per_tag
- 1;
290 ecx
->split
.number_of_sets
= (size_in_kb
* 1024) / line_size
/
291 (ebx
->split
.ways_of_associativity
+ 1) - 1;
295 struct attribute attr
;
296 ssize_t (*show
)(struct _cpuid4_info
*, char *, unsigned int);
297 ssize_t (*store
)(struct _cpuid4_info
*, const char *, size_t count
,
304 * L3 cache descriptors
306 static void __cpuinit
amd_calc_l3_indices(struct amd_northbridge
*nb
)
308 struct amd_l3_cache
*l3
= &nb
->l3_cache
;
309 unsigned int sc0
, sc1
, sc2
, sc3
;
312 pci_read_config_dword(nb
->misc
, 0x1C4, &val
);
314 /* calculate subcache sizes */
315 l3
->subcaches
[0] = sc0
= !(val
& BIT(0));
316 l3
->subcaches
[1] = sc1
= !(val
& BIT(4));
318 if (boot_cpu_data
.x86
== 0x15) {
319 l3
->subcaches
[0] = sc0
+= !(val
& BIT(1));
320 l3
->subcaches
[1] = sc1
+= !(val
& BIT(5));
323 l3
->subcaches
[2] = sc2
= !(val
& BIT(8)) + !(val
& BIT(9));
324 l3
->subcaches
[3] = sc3
= !(val
& BIT(12)) + !(val
& BIT(13));
326 l3
->indices
= (max(max3(sc0
, sc1
, sc2
), sc3
) << 10) - 1;
329 static void __cpuinit
amd_init_l3_cache(struct _cpuid4_info_regs
*this_leaf
, int index
)
333 /* only for L3, and not in virtualized environments */
337 node
= amd_get_nb_id(smp_processor_id());
338 this_leaf
->nb
= node_to_amd_nb(node
);
339 if (this_leaf
->nb
&& !this_leaf
->nb
->l3_cache
.indices
)
340 amd_calc_l3_indices(this_leaf
->nb
);
344 * check whether a slot used for disabling an L3 index is occupied.
345 * @l3: L3 cache descriptor
346 * @slot: slot number (0..1)
348 * @returns: the disabled index if used or negative value if slot free.
350 int amd_get_l3_disable_slot(struct amd_northbridge
*nb
, unsigned slot
)
352 unsigned int reg
= 0;
354 pci_read_config_dword(nb
->misc
, 0x1BC + slot
* 4, ®
);
356 /* check whether this slot is activated already */
357 if (reg
& (3UL << 30))
363 static ssize_t
show_cache_disable(struct _cpuid4_info
*this_leaf
, char *buf
,
368 if (!this_leaf
->base
.nb
|| !amd_nb_has_feature(AMD_NB_L3_INDEX_DISABLE
))
371 index
= amd_get_l3_disable_slot(this_leaf
->base
.nb
, slot
);
373 return sprintf(buf
, "%d\n", index
);
375 return sprintf(buf
, "FREE\n");
378 #define SHOW_CACHE_DISABLE(slot) \
380 show_cache_disable_##slot(struct _cpuid4_info *this_leaf, char *buf, \
383 return show_cache_disable(this_leaf, buf, slot); \
385 SHOW_CACHE_DISABLE(0)
386 SHOW_CACHE_DISABLE(1)
388 static void amd_l3_disable_index(struct amd_northbridge
*nb
, int cpu
,
389 unsigned slot
, unsigned long idx
)
396 * disable index in all 4 subcaches
398 for (i
= 0; i
< 4; i
++) {
399 u32 reg
= idx
| (i
<< 20);
401 if (!nb
->l3_cache
.subcaches
[i
])
404 pci_write_config_dword(nb
->misc
, 0x1BC + slot
* 4, reg
);
407 * We need to WBINVD on a core on the node containing the L3
408 * cache which indices we disable therefore a simple wbinvd()
414 pci_write_config_dword(nb
->misc
, 0x1BC + slot
* 4, reg
);
419 * disable a L3 cache index by using a disable-slot
421 * @l3: L3 cache descriptor
422 * @cpu: A CPU on the node containing the L3 cache
423 * @slot: slot number (0..1)
424 * @index: index to disable
426 * @return: 0 on success, error status on failure
428 int amd_set_l3_disable_slot(struct amd_northbridge
*nb
, int cpu
, unsigned slot
,
433 /* check if @slot is already used or the index is already disabled */
434 ret
= amd_get_l3_disable_slot(nb
, slot
);
438 if (index
> nb
->l3_cache
.indices
)
441 /* check whether the other slot has disabled the same index already */
442 if (index
== amd_get_l3_disable_slot(nb
, !slot
))
445 amd_l3_disable_index(nb
, cpu
, slot
, index
);
450 static ssize_t
store_cache_disable(struct _cpuid4_info
*this_leaf
,
451 const char *buf
, size_t count
,
454 unsigned long val
= 0;
457 if (!capable(CAP_SYS_ADMIN
))
460 if (!this_leaf
->base
.nb
|| !amd_nb_has_feature(AMD_NB_L3_INDEX_DISABLE
))
463 cpu
= cpumask_first(to_cpumask(this_leaf
->shared_cpu_map
));
465 if (strict_strtoul(buf
, 10, &val
) < 0)
468 err
= amd_set_l3_disable_slot(this_leaf
->base
.nb
, cpu
, slot
, val
);
471 printk(KERN_WARNING
"L3 disable slot %d in use!\n",
478 #define STORE_CACHE_DISABLE(slot) \
480 store_cache_disable_##slot(struct _cpuid4_info *this_leaf, \
481 const char *buf, size_t count, \
484 return store_cache_disable(this_leaf, buf, count, slot); \
486 STORE_CACHE_DISABLE(0)
487 STORE_CACHE_DISABLE(1)
489 static struct _cache_attr cache_disable_0
= __ATTR(cache_disable_0
, 0644,
490 show_cache_disable_0
, store_cache_disable_0
);
491 static struct _cache_attr cache_disable_1
= __ATTR(cache_disable_1
, 0644,
492 show_cache_disable_1
, store_cache_disable_1
);
495 show_subcaches(struct _cpuid4_info
*this_leaf
, char *buf
, unsigned int cpu
)
497 if (!this_leaf
->base
.nb
|| !amd_nb_has_feature(AMD_NB_L3_PARTITIONING
))
500 return sprintf(buf
, "%x\n", amd_get_subcaches(cpu
));
504 store_subcaches(struct _cpuid4_info
*this_leaf
, const char *buf
, size_t count
,
509 if (!capable(CAP_SYS_ADMIN
))
512 if (!this_leaf
->base
.nb
|| !amd_nb_has_feature(AMD_NB_L3_PARTITIONING
))
515 if (strict_strtoul(buf
, 16, &val
) < 0)
518 if (amd_set_subcaches(cpu
, val
))
524 static struct _cache_attr subcaches
=
525 __ATTR(subcaches
, 0644, show_subcaches
, store_subcaches
);
527 #else /* CONFIG_AMD_NB */
528 #define amd_init_l3_cache(x, y)
529 #endif /* CONFIG_AMD_NB */
532 __cpuinit
cpuid4_cache_lookup_regs(int index
,
533 struct _cpuid4_info_regs
*this_leaf
)
535 union _cpuid4_leaf_eax eax
;
536 union _cpuid4_leaf_ebx ebx
;
537 union _cpuid4_leaf_ecx ecx
;
540 if (boot_cpu_data
.x86_vendor
== X86_VENDOR_AMD
) {
541 amd_cpuid4(index
, &eax
, &ebx
, &ecx
);
542 amd_init_l3_cache(this_leaf
, index
);
544 cpuid_count(4, index
, &eax
.full
, &ebx
.full
, &ecx
.full
, &edx
);
547 if (eax
.split
.type
== CACHE_TYPE_NULL
)
548 return -EIO
; /* better error ? */
550 this_leaf
->eax
= eax
;
551 this_leaf
->ebx
= ebx
;
552 this_leaf
->ecx
= ecx
;
553 this_leaf
->size
= (ecx
.split
.number_of_sets
+ 1) *
554 (ebx
.split
.coherency_line_size
+ 1) *
555 (ebx
.split
.physical_line_partition
+ 1) *
556 (ebx
.split
.ways_of_associativity
+ 1);
560 static int __cpuinit
find_num_cache_leaves(void)
562 unsigned int eax
, ebx
, ecx
, edx
;
563 union _cpuid4_leaf_eax cache_eax
;
568 /* Do cpuid(4) loop to find out num_cache_leaves */
569 cpuid_count(4, i
, &eax
, &ebx
, &ecx
, &edx
);
570 cache_eax
.full
= eax
;
571 } while (cache_eax
.split
.type
!= CACHE_TYPE_NULL
);
575 unsigned int __cpuinit
init_intel_cacheinfo(struct cpuinfo_x86
*c
)
578 unsigned int trace
= 0, l1i
= 0, l1d
= 0, l2
= 0, l3
= 0;
579 unsigned int new_l1d
= 0, new_l1i
= 0; /* Cache sizes from cpuid(4) */
580 unsigned int new_l2
= 0, new_l3
= 0, i
; /* Cache sizes from cpuid(4) */
581 unsigned int l2_id
= 0, l3_id
= 0, num_threads_sharing
, index_msb
;
583 unsigned int cpu
= c
->cpu_index
;
586 if (c
->cpuid_level
> 3) {
587 static int is_initialized
;
589 if (is_initialized
== 0) {
590 /* Init num_cache_leaves from boot CPU */
591 num_cache_leaves
= find_num_cache_leaves();
596 * Whenever possible use cpuid(4), deterministic cache
597 * parameters cpuid leaf to find the cache details
599 for (i
= 0; i
< num_cache_leaves
; i
++) {
600 struct _cpuid4_info_regs this_leaf
;
603 retval
= cpuid4_cache_lookup_regs(i
, &this_leaf
);
605 switch (this_leaf
.eax
.split
.level
) {
607 if (this_leaf
.eax
.split
.type
==
609 new_l1d
= this_leaf
.size
/1024;
610 else if (this_leaf
.eax
.split
.type
==
612 new_l1i
= this_leaf
.size
/1024;
615 new_l2
= this_leaf
.size
/1024;
616 num_threads_sharing
= 1 + this_leaf
.eax
.split
.num_threads_sharing
;
617 index_msb
= get_count_order(num_threads_sharing
);
618 l2_id
= c
->apicid
>> index_msb
;
621 new_l3
= this_leaf
.size
/1024;
622 num_threads_sharing
= 1 + this_leaf
.eax
.split
.num_threads_sharing
;
623 index_msb
= get_count_order(
624 num_threads_sharing
);
625 l3_id
= c
->apicid
>> index_msb
;
634 * Don't use cpuid2 if cpuid4 is supported. For P4, we use cpuid2 for
637 if ((num_cache_leaves
== 0 || c
->x86
== 15) && c
->cpuid_level
> 1) {
638 /* supports eax=2 call */
640 unsigned int regs
[4];
641 unsigned char *dp
= (unsigned char *)regs
;
644 if (num_cache_leaves
!= 0 && c
->x86
== 15)
647 /* Number of times to iterate */
648 n
= cpuid_eax(2) & 0xFF;
650 for (i
= 0 ; i
< n
; i
++) {
651 cpuid(2, ®s
[0], ®s
[1], ®s
[2], ®s
[3]);
653 /* If bit 31 is set, this is an unknown format */
654 for (j
= 0 ; j
< 3 ; j
++)
655 if (regs
[j
] & (1 << 31))
658 /* Byte 0 is level count, not a descriptor */
659 for (j
= 1 ; j
< 16 ; j
++) {
660 unsigned char des
= dp
[j
];
663 /* look up this descriptor in the table */
664 while (cache_table
[k
].descriptor
!= 0) {
665 if (cache_table
[k
].descriptor
== des
) {
666 if (only_trace
&& cache_table
[k
].cache_type
!= LVL_TRACE
)
668 switch (cache_table
[k
].cache_type
) {
670 l1i
+= cache_table
[k
].size
;
673 l1d
+= cache_table
[k
].size
;
676 l2
+= cache_table
[k
].size
;
679 l3
+= cache_table
[k
].size
;
682 trace
+= cache_table
[k
].size
;
704 per_cpu(cpu_llc_id
, cpu
) = l2_id
;
711 per_cpu(cpu_llc_id
, cpu
) = l3_id
;
715 c
->x86_cache_size
= l3
? l3
: (l2
? l2
: (l1i
+l1d
));
722 /* pointer to _cpuid4_info array (for each cache leaf) */
723 static DEFINE_PER_CPU(struct _cpuid4_info
*, ici_cpuid4_info
);
724 #define CPUID4_INFO_IDX(x, y) (&((per_cpu(ici_cpuid4_info, x))[y]))
728 static int __cpuinit
cache_shared_amd_cpu_map_setup(unsigned int cpu
, int index
)
730 struct _cpuid4_info
*this_leaf
;
732 struct cpuinfo_x86
*c
= &cpu_data(cpu
);
737 for_each_cpu(i
, cpu_llc_shared_mask(cpu
)) {
738 if (!per_cpu(ici_cpuid4_info
, i
))
740 this_leaf
= CPUID4_INFO_IDX(i
, index
);
741 for_each_cpu(sibling
, cpu_llc_shared_mask(cpu
)) {
742 if (!cpu_online(sibling
))
744 set_bit(sibling
, this_leaf
->shared_cpu_map
);
747 } else if ((c
->x86
== 0x15) && ((index
== 1) || (index
== 2))) {
749 for_each_cpu(i
, cpu_sibling_mask(cpu
)) {
750 if (!per_cpu(ici_cpuid4_info
, i
))
752 this_leaf
= CPUID4_INFO_IDX(i
, index
);
753 for_each_cpu(sibling
, cpu_sibling_mask(cpu
)) {
754 if (!cpu_online(sibling
))
756 set_bit(sibling
, this_leaf
->shared_cpu_map
);
764 static void __cpuinit
cache_shared_cpu_map_setup(unsigned int cpu
, int index
)
766 struct _cpuid4_info
*this_leaf
, *sibling_leaf
;
767 unsigned long num_threads_sharing
;
769 struct cpuinfo_x86
*c
= &cpu_data(cpu
);
771 if (c
->x86_vendor
== X86_VENDOR_AMD
) {
772 if (cache_shared_amd_cpu_map_setup(cpu
, index
))
776 this_leaf
= CPUID4_INFO_IDX(cpu
, index
);
777 num_threads_sharing
= 1 + this_leaf
->base
.eax
.split
.num_threads_sharing
;
779 if (num_threads_sharing
== 1)
780 cpumask_set_cpu(cpu
, to_cpumask(this_leaf
->shared_cpu_map
));
782 index_msb
= get_count_order(num_threads_sharing
);
784 for_each_online_cpu(i
) {
785 if (cpu_data(i
).apicid
>> index_msb
==
786 c
->apicid
>> index_msb
) {
788 to_cpumask(this_leaf
->shared_cpu_map
));
789 if (i
!= cpu
&& per_cpu(ici_cpuid4_info
, i
)) {
791 CPUID4_INFO_IDX(i
, index
);
792 cpumask_set_cpu(cpu
, to_cpumask(
793 sibling_leaf
->shared_cpu_map
));
799 static void __cpuinit
cache_remove_shared_cpu_map(unsigned int cpu
, int index
)
801 struct _cpuid4_info
*this_leaf
, *sibling_leaf
;
804 this_leaf
= CPUID4_INFO_IDX(cpu
, index
);
805 for_each_cpu(sibling
, to_cpumask(this_leaf
->shared_cpu_map
)) {
806 sibling_leaf
= CPUID4_INFO_IDX(sibling
, index
);
807 cpumask_clear_cpu(cpu
,
808 to_cpumask(sibling_leaf
->shared_cpu_map
));
812 static void __cpuinit
cache_shared_cpu_map_setup(unsigned int cpu
, int index
)
816 static void __cpuinit
cache_remove_shared_cpu_map(unsigned int cpu
, int index
)
821 static void __cpuinit
free_cache_attributes(unsigned int cpu
)
825 for (i
= 0; i
< num_cache_leaves
; i
++)
826 cache_remove_shared_cpu_map(cpu
, i
);
828 kfree(per_cpu(ici_cpuid4_info
, cpu
));
829 per_cpu(ici_cpuid4_info
, cpu
) = NULL
;
832 static void __cpuinit
get_cpu_leaves(void *_retval
)
834 int j
, *retval
= _retval
, cpu
= smp_processor_id();
836 /* Do cpuid and store the results */
837 for (j
= 0; j
< num_cache_leaves
; j
++) {
838 struct _cpuid4_info
*this_leaf
= CPUID4_INFO_IDX(cpu
, j
);
840 *retval
= cpuid4_cache_lookup_regs(j
, &this_leaf
->base
);
841 if (unlikely(*retval
< 0)) {
844 for (i
= 0; i
< j
; i
++)
845 cache_remove_shared_cpu_map(cpu
, i
);
848 cache_shared_cpu_map_setup(cpu
, j
);
852 static int __cpuinit
detect_cache_attributes(unsigned int cpu
)
856 if (num_cache_leaves
== 0)
859 per_cpu(ici_cpuid4_info
, cpu
) = kzalloc(
860 sizeof(struct _cpuid4_info
) * num_cache_leaves
, GFP_KERNEL
);
861 if (per_cpu(ici_cpuid4_info
, cpu
) == NULL
)
864 smp_call_function_single(cpu
, get_cpu_leaves
, &retval
, true);
866 kfree(per_cpu(ici_cpuid4_info
, cpu
));
867 per_cpu(ici_cpuid4_info
, cpu
) = NULL
;
873 #include <linux/kobject.h>
874 #include <linux/sysfs.h>
875 #include <linux/cpu.h>
877 /* pointer to kobject for cpuX/cache */
878 static DEFINE_PER_CPU(struct kobject
*, ici_cache_kobject
);
880 struct _index_kobject
{
883 unsigned short index
;
886 /* pointer to array of kobjects for cpuX/cache/indexY */
887 static DEFINE_PER_CPU(struct _index_kobject
*, ici_index_kobject
);
888 #define INDEX_KOBJECT_PTR(x, y) (&((per_cpu(ici_index_kobject, x))[y]))
890 #define show_one_plus(file_name, object, val) \
891 static ssize_t show_##file_name(struct _cpuid4_info *this_leaf, char *buf, \
894 return sprintf(buf, "%lu\n", (unsigned long)this_leaf->object + val); \
897 show_one_plus(level
, base
.eax
.split
.level
, 0);
898 show_one_plus(coherency_line_size
, base
.ebx
.split
.coherency_line_size
, 1);
899 show_one_plus(physical_line_partition
, base
.ebx
.split
.physical_line_partition
, 1);
900 show_one_plus(ways_of_associativity
, base
.ebx
.split
.ways_of_associativity
, 1);
901 show_one_plus(number_of_sets
, base
.ecx
.split
.number_of_sets
, 1);
903 static ssize_t
show_size(struct _cpuid4_info
*this_leaf
, char *buf
,
906 return sprintf(buf
, "%luK\n", this_leaf
->base
.size
/ 1024);
909 static ssize_t
show_shared_cpu_map_func(struct _cpuid4_info
*this_leaf
,
912 ptrdiff_t len
= PTR_ALIGN(buf
+ PAGE_SIZE
- 1, PAGE_SIZE
) - buf
;
916 const struct cpumask
*mask
;
918 mask
= to_cpumask(this_leaf
->shared_cpu_map
);
920 cpulist_scnprintf(buf
, len
-2, mask
) :
921 cpumask_scnprintf(buf
, len
-2, mask
);
928 static inline ssize_t
show_shared_cpu_map(struct _cpuid4_info
*leaf
, char *buf
,
931 return show_shared_cpu_map_func(leaf
, 0, buf
);
934 static inline ssize_t
show_shared_cpu_list(struct _cpuid4_info
*leaf
, char *buf
,
937 return show_shared_cpu_map_func(leaf
, 1, buf
);
940 static ssize_t
show_type(struct _cpuid4_info
*this_leaf
, char *buf
,
943 switch (this_leaf
->base
.eax
.split
.type
) {
944 case CACHE_TYPE_DATA
:
945 return sprintf(buf
, "Data\n");
946 case CACHE_TYPE_INST
:
947 return sprintf(buf
, "Instruction\n");
948 case CACHE_TYPE_UNIFIED
:
949 return sprintf(buf
, "Unified\n");
951 return sprintf(buf
, "Unknown\n");
955 #define to_object(k) container_of(k, struct _index_kobject, kobj)
956 #define to_attr(a) container_of(a, struct _cache_attr, attr)
958 #define define_one_ro(_name) \
959 static struct _cache_attr _name = \
960 __ATTR(_name, 0444, show_##_name, NULL)
962 define_one_ro(level
);
964 define_one_ro(coherency_line_size
);
965 define_one_ro(physical_line_partition
);
966 define_one_ro(ways_of_associativity
);
967 define_one_ro(number_of_sets
);
969 define_one_ro(shared_cpu_map
);
970 define_one_ro(shared_cpu_list
);
972 static struct attribute
*default_attrs
[] = {
975 &coherency_line_size
.attr
,
976 &physical_line_partition
.attr
,
977 &ways_of_associativity
.attr
,
978 &number_of_sets
.attr
,
980 &shared_cpu_map
.attr
,
981 &shared_cpu_list
.attr
,
986 static struct attribute
** __cpuinit
amd_l3_attrs(void)
988 static struct attribute
**attrs
;
994 n
= sizeof (default_attrs
) / sizeof (struct attribute
*);
996 if (amd_nb_has_feature(AMD_NB_L3_INDEX_DISABLE
))
999 if (amd_nb_has_feature(AMD_NB_L3_PARTITIONING
))
1002 attrs
= kzalloc(n
* sizeof (struct attribute
*), GFP_KERNEL
);
1004 return attrs
= default_attrs
;
1006 for (n
= 0; default_attrs
[n
]; n
++)
1007 attrs
[n
] = default_attrs
[n
];
1009 if (amd_nb_has_feature(AMD_NB_L3_INDEX_DISABLE
)) {
1010 attrs
[n
++] = &cache_disable_0
.attr
;
1011 attrs
[n
++] = &cache_disable_1
.attr
;
1014 if (amd_nb_has_feature(AMD_NB_L3_PARTITIONING
))
1015 attrs
[n
++] = &subcaches
.attr
;
1021 static ssize_t
show(struct kobject
*kobj
, struct attribute
*attr
, char *buf
)
1023 struct _cache_attr
*fattr
= to_attr(attr
);
1024 struct _index_kobject
*this_leaf
= to_object(kobj
);
1028 fattr
->show(CPUID4_INFO_IDX(this_leaf
->cpu
, this_leaf
->index
),
1029 buf
, this_leaf
->cpu
) :
1034 static ssize_t
store(struct kobject
*kobj
, struct attribute
*attr
,
1035 const char *buf
, size_t count
)
1037 struct _cache_attr
*fattr
= to_attr(attr
);
1038 struct _index_kobject
*this_leaf
= to_object(kobj
);
1041 ret
= fattr
->store
?
1042 fattr
->store(CPUID4_INFO_IDX(this_leaf
->cpu
, this_leaf
->index
),
1043 buf
, count
, this_leaf
->cpu
) :
1048 static const struct sysfs_ops sysfs_ops
= {
1053 static struct kobj_type ktype_cache
= {
1054 .sysfs_ops
= &sysfs_ops
,
1055 .default_attrs
= default_attrs
,
1058 static struct kobj_type ktype_percpu_entry
= {
1059 .sysfs_ops
= &sysfs_ops
,
1062 static void __cpuinit
cpuid4_cache_sysfs_exit(unsigned int cpu
)
1064 kfree(per_cpu(ici_cache_kobject
, cpu
));
1065 kfree(per_cpu(ici_index_kobject
, cpu
));
1066 per_cpu(ici_cache_kobject
, cpu
) = NULL
;
1067 per_cpu(ici_index_kobject
, cpu
) = NULL
;
1068 free_cache_attributes(cpu
);
1071 static int __cpuinit
cpuid4_cache_sysfs_init(unsigned int cpu
)
1075 if (num_cache_leaves
== 0)
1078 err
= detect_cache_attributes(cpu
);
1082 /* Allocate all required memory */
1083 per_cpu(ici_cache_kobject
, cpu
) =
1084 kzalloc(sizeof(struct kobject
), GFP_KERNEL
);
1085 if (unlikely(per_cpu(ici_cache_kobject
, cpu
) == NULL
))
1088 per_cpu(ici_index_kobject
, cpu
) = kzalloc(
1089 sizeof(struct _index_kobject
) * num_cache_leaves
, GFP_KERNEL
);
1090 if (unlikely(per_cpu(ici_index_kobject
, cpu
) == NULL
))
1096 cpuid4_cache_sysfs_exit(cpu
);
1100 static DECLARE_BITMAP(cache_dev_map
, NR_CPUS
);
1102 /* Add/Remove cache interface for CPU device */
1103 static int __cpuinit
cache_add_dev(struct device
*dev
)
1105 unsigned int cpu
= dev
->id
;
1107 struct _index_kobject
*this_object
;
1108 struct _cpuid4_info
*this_leaf
;
1111 retval
= cpuid4_cache_sysfs_init(cpu
);
1112 if (unlikely(retval
< 0))
1115 retval
= kobject_init_and_add(per_cpu(ici_cache_kobject
, cpu
),
1116 &ktype_percpu_entry
,
1117 &dev
->kobj
, "%s", "cache");
1119 cpuid4_cache_sysfs_exit(cpu
);
1123 for (i
= 0; i
< num_cache_leaves
; i
++) {
1124 this_object
= INDEX_KOBJECT_PTR(cpu
, i
);
1125 this_object
->cpu
= cpu
;
1126 this_object
->index
= i
;
1128 this_leaf
= CPUID4_INFO_IDX(cpu
, i
);
1130 ktype_cache
.default_attrs
= default_attrs
;
1131 #ifdef CONFIG_AMD_NB
1132 if (this_leaf
->base
.nb
)
1133 ktype_cache
.default_attrs
= amd_l3_attrs();
1135 retval
= kobject_init_and_add(&(this_object
->kobj
),
1137 per_cpu(ici_cache_kobject
, cpu
),
1139 if (unlikely(retval
)) {
1140 for (j
= 0; j
< i
; j
++)
1141 kobject_put(&(INDEX_KOBJECT_PTR(cpu
, j
)->kobj
));
1142 kobject_put(per_cpu(ici_cache_kobject
, cpu
));
1143 cpuid4_cache_sysfs_exit(cpu
);
1146 kobject_uevent(&(this_object
->kobj
), KOBJ_ADD
);
1148 cpumask_set_cpu(cpu
, to_cpumask(cache_dev_map
));
1150 kobject_uevent(per_cpu(ici_cache_kobject
, cpu
), KOBJ_ADD
);
1154 static void __cpuinit
cache_remove_dev(struct device
*dev
)
1156 unsigned int cpu
= dev
->id
;
1159 if (per_cpu(ici_cpuid4_info
, cpu
) == NULL
)
1161 if (!cpumask_test_cpu(cpu
, to_cpumask(cache_dev_map
)))
1163 cpumask_clear_cpu(cpu
, to_cpumask(cache_dev_map
));
1165 for (i
= 0; i
< num_cache_leaves
; i
++)
1166 kobject_put(&(INDEX_KOBJECT_PTR(cpu
, i
)->kobj
));
1167 kobject_put(per_cpu(ici_cache_kobject
, cpu
));
1168 cpuid4_cache_sysfs_exit(cpu
);
1171 static int __cpuinit
cacheinfo_cpu_callback(struct notifier_block
*nfb
,
1172 unsigned long action
, void *hcpu
)
1174 unsigned int cpu
= (unsigned long)hcpu
;
1177 dev
= get_cpu_device(cpu
);
1180 case CPU_ONLINE_FROZEN
:
1184 case CPU_DEAD_FROZEN
:
1185 cache_remove_dev(dev
);
1191 static struct notifier_block __cpuinitdata cacheinfo_cpu_notifier
= {
1192 .notifier_call
= cacheinfo_cpu_callback
,
1195 static int __cpuinit
cache_sysfs_init(void)
1199 if (num_cache_leaves
== 0)
1202 for_each_online_cpu(i
) {
1204 struct device
*dev
= get_cpu_device(i
);
1206 err
= cache_add_dev(dev
);
1210 register_hotcpu_notifier(&cacheinfo_cpu_notifier
);
1214 device_initcall(cache_sysfs_init
);