2 * Routines to indentify caches on Intel CPU.
5 * Venkatesh Pallipadi : Adding cache identification through cpuid(4)
8 #include <linux/init.h>
9 #include <linux/slab.h>
10 #include <linux/device.h>
11 #include <linux/compiler.h>
12 #include <linux/cpu.h>
14 #include <asm/processor.h>
25 unsigned char descriptor
;
30 /* all the cache descriptor types we care about (no TLB or trace cache entries) */
31 static struct _cache_table cache_table
[] __initdata
=
33 { 0x06, LVL_1_INST
, 8 }, /* 4-way set assoc, 32 byte line size */
34 { 0x08, LVL_1_INST
, 16 }, /* 4-way set assoc, 32 byte line size */
35 { 0x0a, LVL_1_DATA
, 8 }, /* 2 way set assoc, 32 byte line size */
36 { 0x0c, LVL_1_DATA
, 16 }, /* 4-way set assoc, 32 byte line size */
37 { 0x22, LVL_3
, 512 }, /* 4-way set assoc, sectored cache, 64 byte line size */
38 { 0x23, LVL_3
, 1024 }, /* 8-way set assoc, sectored cache, 64 byte line size */
39 { 0x25, LVL_3
, 2048 }, /* 8-way set assoc, sectored cache, 64 byte line size */
40 { 0x29, LVL_3
, 4096 }, /* 8-way set assoc, sectored cache, 64 byte line size */
41 { 0x2c, LVL_1_DATA
, 32 }, /* 8-way set assoc, 64 byte line size */
42 { 0x30, LVL_1_INST
, 32 }, /* 8-way set assoc, 64 byte line size */
43 { 0x39, LVL_2
, 128 }, /* 4-way set assoc, sectored cache, 64 byte line size */
44 { 0x3b, LVL_2
, 128 }, /* 2-way set assoc, sectored cache, 64 byte line size */
45 { 0x3c, LVL_2
, 256 }, /* 4-way set assoc, sectored cache, 64 byte line size */
46 { 0x41, LVL_2
, 128 }, /* 4-way set assoc, 32 byte line size */
47 { 0x42, LVL_2
, 256 }, /* 4-way set assoc, 32 byte line size */
48 { 0x43, LVL_2
, 512 }, /* 4-way set assoc, 32 byte line size */
49 { 0x44, LVL_2
, 1024 }, /* 4-way set assoc, 32 byte line size */
50 { 0x45, LVL_2
, 2048 }, /* 4-way set assoc, 32 byte line size */
51 { 0x60, LVL_1_DATA
, 16 }, /* 8-way set assoc, sectored cache, 64 byte line size */
52 { 0x66, LVL_1_DATA
, 8 }, /* 4-way set assoc, sectored cache, 64 byte line size */
53 { 0x67, LVL_1_DATA
, 16 }, /* 4-way set assoc, sectored cache, 64 byte line size */
54 { 0x68, LVL_1_DATA
, 32 }, /* 4-way set assoc, sectored cache, 64 byte line size */
55 { 0x70, LVL_TRACE
, 12 }, /* 8-way set assoc */
56 { 0x71, LVL_TRACE
, 16 }, /* 8-way set assoc */
57 { 0x72, LVL_TRACE
, 32 }, /* 8-way set assoc */
58 { 0x78, LVL_2
, 1024 }, /* 4-way set assoc, 64 byte line size */
59 { 0x79, LVL_2
, 128 }, /* 8-way set assoc, sectored cache, 64 byte line size */
60 { 0x7a, LVL_2
, 256 }, /* 8-way set assoc, sectored cache, 64 byte line size */
61 { 0x7b, LVL_2
, 512 }, /* 8-way set assoc, sectored cache, 64 byte line size */
62 { 0x7c, LVL_2
, 1024 }, /* 8-way set assoc, sectored cache, 64 byte line size */
63 { 0x7d, LVL_2
, 2048 }, /* 8-way set assoc, 64 byte line size */
64 { 0x7f, LVL_2
, 512 }, /* 2-way set assoc, 64 byte line size */
65 { 0x82, LVL_2
, 256 }, /* 8-way set assoc, 32 byte line size */
66 { 0x83, LVL_2
, 512 }, /* 8-way set assoc, 32 byte line size */
67 { 0x84, LVL_2
, 1024 }, /* 8-way set assoc, 32 byte line size */
68 { 0x85, LVL_2
, 2048 }, /* 8-way set assoc, 32 byte line size */
69 { 0x86, LVL_2
, 512 }, /* 4-way set assoc, 64 byte line size */
70 { 0x87, LVL_2
, 1024 }, /* 8-way set assoc, 64 byte line size */
80 CACHE_TYPE_UNIFIED
= 3
83 union _cpuid4_leaf_eax
{
85 enum _cache_type type
:5;
87 unsigned int is_self_initializing
:1;
88 unsigned int is_fully_associative
:1;
89 unsigned int reserved
:4;
90 unsigned int num_threads_sharing
:12;
91 unsigned int num_cores_on_die
:6;
96 union _cpuid4_leaf_ebx
{
98 unsigned int coherency_line_size
:12;
99 unsigned int physical_line_partition
:10;
100 unsigned int ways_of_associativity
:10;
105 union _cpuid4_leaf_ecx
{
107 unsigned int number_of_sets
:32;
112 struct _cpuid4_info
{
113 union _cpuid4_leaf_eax eax
;
114 union _cpuid4_leaf_ebx ebx
;
115 union _cpuid4_leaf_ecx ecx
;
117 cpumask_t shared_cpu_map
;
120 #define MAX_CACHE_LEAVES 4
121 static unsigned short __devinitdata num_cache_leaves
;
123 static int __devinit
cpuid4_cache_lookup(int index
, struct _cpuid4_info
*this_leaf
)
125 unsigned int eax
, ebx
, ecx
, edx
;
126 union _cpuid4_leaf_eax cache_eax
;
128 cpuid_count(4, index
, &eax
, &ebx
, &ecx
, &edx
);
129 cache_eax
.full
= eax
;
130 if (cache_eax
.split
.type
== CACHE_TYPE_NULL
)
133 this_leaf
->eax
.full
= eax
;
134 this_leaf
->ebx
.full
= ebx
;
135 this_leaf
->ecx
.full
= ecx
;
136 this_leaf
->size
= (this_leaf
->ecx
.split
.number_of_sets
+ 1) *
137 (this_leaf
->ebx
.split
.coherency_line_size
+ 1) *
138 (this_leaf
->ebx
.split
.physical_line_partition
+ 1) *
139 (this_leaf
->ebx
.split
.ways_of_associativity
+ 1);
143 static int __init
find_num_cache_leaves(void)
145 unsigned int eax
, ebx
, ecx
, edx
;
146 union _cpuid4_leaf_eax cache_eax
;
150 retval
= MAX_CACHE_LEAVES
;
151 /* Do cpuid(4) loop to find out num_cache_leaves */
152 for (i
= 0; i
< MAX_CACHE_LEAVES
; i
++) {
153 cpuid_count(4, i
, &eax
, &ebx
, &ecx
, &edx
);
154 cache_eax
.full
= eax
;
155 if (cache_eax
.split
.type
== CACHE_TYPE_NULL
) {
163 unsigned int __init
init_intel_cacheinfo(struct cpuinfo_x86
*c
)
165 unsigned int trace
= 0, l1i
= 0, l1d
= 0, l2
= 0, l3
= 0; /* Cache sizes */
166 unsigned int new_l1d
= 0, new_l1i
= 0; /* Cache sizes from cpuid(4) */
167 unsigned int new_l2
= 0, new_l3
= 0, i
; /* Cache sizes from cpuid(4) */
169 if (c
->cpuid_level
> 4) {
170 static int is_initialized
;
172 if (is_initialized
== 0) {
173 /* Init num_cache_leaves from boot CPU */
174 num_cache_leaves
= find_num_cache_leaves();
179 * Whenever possible use cpuid(4), deterministic cache
180 * parameters cpuid leaf to find the cache details
182 for (i
= 0; i
< num_cache_leaves
; i
++) {
183 struct _cpuid4_info this_leaf
;
187 retval
= cpuid4_cache_lookup(i
, &this_leaf
);
189 switch(this_leaf
.eax
.split
.level
) {
191 if (this_leaf
.eax
.split
.type
==
193 new_l1d
= this_leaf
.size
/1024;
194 else if (this_leaf
.eax
.split
.type
==
196 new_l1i
= this_leaf
.size
/1024;
199 new_l2
= this_leaf
.size
/1024;
202 new_l3
= this_leaf
.size
/1024;
210 if (c
->cpuid_level
> 1) {
211 /* supports eax=2 call */
214 unsigned char *dp
= (unsigned char *)regs
;
216 /* Number of times to iterate */
217 n
= cpuid_eax(2) & 0xFF;
219 for ( i
= 0 ; i
< n
; i
++ ) {
220 cpuid(2, ®s
[0], ®s
[1], ®s
[2], ®s
[3]);
222 /* If bit 31 is set, this is an unknown format */
223 for ( j
= 0 ; j
< 3 ; j
++ ) {
224 if ( regs
[j
] < 0 ) regs
[j
] = 0;
227 /* Byte 0 is level count, not a descriptor */
228 for ( j
= 1 ; j
< 16 ; j
++ ) {
229 unsigned char des
= dp
[j
];
232 /* look up this descriptor in the table */
233 while (cache_table
[k
].descriptor
!= 0)
235 if (cache_table
[k
].descriptor
== des
) {
236 switch (cache_table
[k
].cache_type
) {
238 l1i
+= cache_table
[k
].size
;
241 l1d
+= cache_table
[k
].size
;
244 l2
+= cache_table
[k
].size
;
247 l3
+= cache_table
[k
].size
;
250 trace
+= cache_table
[k
].size
;
275 printk (KERN_INFO
"CPU: Trace cache: %dK uops", trace
);
277 printk (KERN_INFO
"CPU: L1 I cache: %dK", l1i
);
279 printk(", L1 D cache: %dK\n", l1d
);
283 printk(KERN_INFO
"CPU: L2 cache: %dK\n", l2
);
285 printk(KERN_INFO
"CPU: L3 cache: %dK\n", l3
);
288 * This assumes the L3 cache is shared; it typically lives in
289 * the northbridge. The L1 caches are included by the L2
290 * cache, and so should not be included for the purpose of
291 * SMP switching weights.
293 c
->x86_cache_size
= l2
? l2
: (l1i
+l1d
);
299 /* pointer to _cpuid4_info array (for each cache leaf) */
300 static struct _cpuid4_info
*cpuid4_info
[NR_CPUS
];
301 #define CPUID4_INFO_IDX(x,y) (&((cpuid4_info[x])[y]))
304 static void __devinit
cache_shared_cpu_map_setup(unsigned int cpu
, int index
)
306 struct _cpuid4_info
*this_leaf
;
307 unsigned long num_threads_sharing
;
309 this_leaf
= CPUID4_INFO_IDX(cpu
, index
);
310 num_threads_sharing
= 1 + this_leaf
->eax
.split
.num_threads_sharing
;
312 if (num_threads_sharing
== 1)
313 cpu_set(cpu
, this_leaf
->shared_cpu_map
);
315 else if (num_threads_sharing
== smp_num_siblings
)
316 this_leaf
->shared_cpu_map
= cpu_sibling_map
[cpu
];
319 printk(KERN_INFO
"Number of CPUs sharing cache didn't match "
320 "any known set of CPUs\n");
323 static void __init
cache_shared_cpu_map_setup(unsigned int cpu
, int index
) {}
326 static void free_cache_attributes(unsigned int cpu
)
328 kfree(cpuid4_info
[cpu
]);
329 cpuid4_info
[cpu
] = NULL
;
332 static int __devinit
detect_cache_attributes(unsigned int cpu
)
334 struct _cpuid4_info
*this_leaf
;
338 if (num_cache_leaves
== 0)
341 cpuid4_info
[cpu
] = kmalloc(
342 sizeof(struct _cpuid4_info
) * num_cache_leaves
, GFP_KERNEL
);
343 if (unlikely(cpuid4_info
[cpu
] == NULL
))
345 memset(cpuid4_info
[cpu
], 0,
346 sizeof(struct _cpuid4_info
) * num_cache_leaves
);
348 /* Do cpuid and store the results */
349 for (j
= 0; j
< num_cache_leaves
; j
++) {
350 this_leaf
= CPUID4_INFO_IDX(cpu
, j
);
351 retval
= cpuid4_cache_lookup(j
, this_leaf
);
352 if (unlikely(retval
< 0))
354 cache_shared_cpu_map_setup(cpu
, j
);
359 free_cache_attributes(cpu
);
365 #include <linux/kobject.h>
366 #include <linux/sysfs.h>
368 extern struct sysdev_class cpu_sysdev_class
; /* from drivers/base/cpu.c */
370 /* pointer to kobject for cpuX/cache */
371 static struct kobject
* cache_kobject
[NR_CPUS
];
373 struct _index_kobject
{
376 unsigned short index
;
379 /* pointer to array of kobjects for cpuX/cache/indexY */
380 static struct _index_kobject
*index_kobject
[NR_CPUS
];
381 #define INDEX_KOBJECT_PTR(x,y) (&((index_kobject[x])[y]))
383 #define show_one_plus(file_name, object, val) \
384 static ssize_t show_##file_name \
385 (struct _cpuid4_info *this_leaf, char *buf) \
387 return sprintf (buf, "%lu\n", (unsigned long)this_leaf->object + val); \
390 show_one_plus(level
, eax
.split
.level
, 0);
391 show_one_plus(coherency_line_size
, ebx
.split
.coherency_line_size
, 1);
392 show_one_plus(physical_line_partition
, ebx
.split
.physical_line_partition
, 1);
393 show_one_plus(ways_of_associativity
, ebx
.split
.ways_of_associativity
, 1);
394 show_one_plus(number_of_sets
, ecx
.split
.number_of_sets
, 1);
396 static ssize_t
show_size(struct _cpuid4_info
*this_leaf
, char *buf
)
398 return sprintf (buf
, "%luK\n", this_leaf
->size
/ 1024);
401 static ssize_t
show_shared_cpu_map(struct _cpuid4_info
*this_leaf
, char *buf
)
403 char mask_str
[NR_CPUS
];
404 cpumask_scnprintf(mask_str
, NR_CPUS
, this_leaf
->shared_cpu_map
);
405 return sprintf(buf
, "%s\n", mask_str
);
408 static ssize_t
show_type(struct _cpuid4_info
*this_leaf
, char *buf
) {
409 switch(this_leaf
->eax
.split
.type
) {
410 case CACHE_TYPE_DATA
:
411 return sprintf(buf
, "Data\n");
413 case CACHE_TYPE_INST
:
414 return sprintf(buf
, "Instruction\n");
416 case CACHE_TYPE_UNIFIED
:
417 return sprintf(buf
, "Unified\n");
420 return sprintf(buf
, "Unknown\n");
426 struct attribute attr
;
427 ssize_t (*show
)(struct _cpuid4_info
*, char *);
428 ssize_t (*store
)(struct _cpuid4_info
*, const char *, size_t count
);
431 #define define_one_ro(_name) \
432 static struct _cache_attr _name = \
433 __ATTR(_name, 0444, show_##_name, NULL)
435 define_one_ro(level
);
437 define_one_ro(coherency_line_size
);
438 define_one_ro(physical_line_partition
);
439 define_one_ro(ways_of_associativity
);
440 define_one_ro(number_of_sets
);
442 define_one_ro(shared_cpu_map
);
444 static struct attribute
* default_attrs
[] = {
447 &coherency_line_size
.attr
,
448 &physical_line_partition
.attr
,
449 &ways_of_associativity
.attr
,
450 &number_of_sets
.attr
,
452 &shared_cpu_map
.attr
,
456 #define to_object(k) container_of(k, struct _index_kobject, kobj)
457 #define to_attr(a) container_of(a, struct _cache_attr, attr)
459 static ssize_t
show(struct kobject
* kobj
, struct attribute
* attr
, char * buf
)
461 struct _cache_attr
*fattr
= to_attr(attr
);
462 struct _index_kobject
*this_leaf
= to_object(kobj
);
466 fattr
->show(CPUID4_INFO_IDX(this_leaf
->cpu
, this_leaf
->index
),
472 static ssize_t
store(struct kobject
* kobj
, struct attribute
* attr
,
473 const char * buf
, size_t count
)
478 static struct sysfs_ops sysfs_ops
= {
483 static struct kobj_type ktype_cache
= {
484 .sysfs_ops
= &sysfs_ops
,
485 .default_attrs
= default_attrs
,
488 static struct kobj_type ktype_percpu_entry
= {
489 .sysfs_ops
= &sysfs_ops
,
492 static void cpuid4_cache_sysfs_exit(unsigned int cpu
)
494 kfree(cache_kobject
[cpu
]);
495 kfree(index_kobject
[cpu
]);
496 cache_kobject
[cpu
] = NULL
;
497 index_kobject
[cpu
] = NULL
;
498 free_cache_attributes(cpu
);
501 static int __devinit
cpuid4_cache_sysfs_init(unsigned int cpu
)
504 if (num_cache_leaves
== 0)
507 detect_cache_attributes(cpu
);
508 if (cpuid4_info
[cpu
] == NULL
)
511 /* Allocate all required memory */
512 cache_kobject
[cpu
] = kmalloc(sizeof(struct kobject
), GFP_KERNEL
);
513 if (unlikely(cache_kobject
[cpu
] == NULL
))
515 memset(cache_kobject
[cpu
], 0, sizeof(struct kobject
));
517 index_kobject
[cpu
] = kmalloc(
518 sizeof(struct _index_kobject
) * num_cache_leaves
, GFP_KERNEL
);
519 if (unlikely(index_kobject
[cpu
] == NULL
))
521 memset(index_kobject
[cpu
], 0,
522 sizeof(struct _index_kobject
) * num_cache_leaves
);
527 cpuid4_cache_sysfs_exit(cpu
);
531 /* Add/Remove cache interface for CPU device */
532 static int __devinit
cache_add_dev(struct sys_device
* sys_dev
)
534 unsigned int cpu
= sys_dev
->id
;
536 struct _index_kobject
*this_object
;
539 retval
= cpuid4_cache_sysfs_init(cpu
);
540 if (unlikely(retval
< 0))
543 cache_kobject
[cpu
]->parent
= &sys_dev
->kobj
;
544 kobject_set_name(cache_kobject
[cpu
], "%s", "cache");
545 cache_kobject
[cpu
]->ktype
= &ktype_percpu_entry
;
546 retval
= kobject_register(cache_kobject
[cpu
]);
548 for (i
= 0; i
< num_cache_leaves
; i
++) {
549 this_object
= INDEX_KOBJECT_PTR(cpu
,i
);
550 this_object
->cpu
= cpu
;
551 this_object
->index
= i
;
552 this_object
->kobj
.parent
= cache_kobject
[cpu
];
553 kobject_set_name(&(this_object
->kobj
), "index%1lu", i
);
554 this_object
->kobj
.ktype
= &ktype_cache
;
555 retval
= kobject_register(&(this_object
->kobj
));
556 if (unlikely(retval
)) {
557 for (j
= 0; j
< i
; j
++) {
559 &(INDEX_KOBJECT_PTR(cpu
,j
)->kobj
));
561 kobject_unregister(cache_kobject
[cpu
]);
562 cpuid4_cache_sysfs_exit(cpu
);
569 static int __devexit
cache_remove_dev(struct sys_device
* sys_dev
)
571 unsigned int cpu
= sys_dev
->id
;
574 for (i
= 0; i
< num_cache_leaves
; i
++)
575 kobject_unregister(&(INDEX_KOBJECT_PTR(cpu
,i
)->kobj
));
576 kobject_unregister(cache_kobject
[cpu
]);
577 cpuid4_cache_sysfs_exit(cpu
);
581 static struct sysdev_driver cache_sysdev_driver
= {
582 .add
= cache_add_dev
,
583 .remove
= __devexit_p(cache_remove_dev
),
586 /* Register/Unregister the cpu_cache driver */
587 static int __devinit
cache_register_driver(void)
589 if (num_cache_leaves
== 0)
592 return sysdev_driver_register(&cpu_sysdev_class
,&cache_sysdev_driver
);
595 device_initcall(cache_register_driver
);