First Support on Ginger and OMAP TI
[linux-ginger.git] / arch / x86 / kernel / cpu / intel_cacheinfo.c
blob804c40e2bc3e1fd588f08f50db90dc83e02ae1b4
1 /*
2 * Routines to indentify caches on Intel CPU.
4 * Changes:
5 * Venkatesh Pallipadi : Adding cache identification through cpuid(4)
6 * Ashok Raj <ashok.raj@intel.com>: Work with CPU hotplug infrastructure.
7 * Andi Kleen / Andreas Herrmann : CPUID4 emulation on AMD.
8 */
10 #include <linux/init.h>
11 #include <linux/slab.h>
12 #include <linux/device.h>
13 #include <linux/compiler.h>
14 #include <linux/cpu.h>
15 #include <linux/sched.h>
16 #include <linux/pci.h>
18 #include <asm/processor.h>
19 #include <linux/smp.h>
20 #include <asm/k8.h>
22 #define LVL_1_INST 1
23 #define LVL_1_DATA 2
24 #define LVL_2 3
25 #define LVL_3 4
26 #define LVL_TRACE 5
28 struct _cache_table {
29 unsigned char descriptor;
30 char cache_type;
31 short size;
34 /* All the cache descriptor types we care about (no TLB or
35 trace cache entries) */
37 static const struct _cache_table __cpuinitconst cache_table[] =
39 { 0x06, LVL_1_INST, 8 }, /* 4-way set assoc, 32 byte line size */
40 { 0x08, LVL_1_INST, 16 }, /* 4-way set assoc, 32 byte line size */
41 { 0x09, LVL_1_INST, 32 }, /* 4-way set assoc, 64 byte line size */
42 { 0x0a, LVL_1_DATA, 8 }, /* 2 way set assoc, 32 byte line size */
43 { 0x0c, LVL_1_DATA, 16 }, /* 4-way set assoc, 32 byte line size */
44 { 0x0d, LVL_1_DATA, 16 }, /* 4-way set assoc, 64 byte line size */
45 { 0x21, LVL_2, 256 }, /* 8-way set assoc, 64 byte line size */
46 { 0x22, LVL_3, 512 }, /* 4-way set assoc, sectored cache, 64 byte line size */
47 { 0x23, LVL_3, 1024 }, /* 8-way set assoc, sectored cache, 64 byte line size */
48 { 0x25, LVL_3, 2048 }, /* 8-way set assoc, sectored cache, 64 byte line size */
49 { 0x29, LVL_3, 4096 }, /* 8-way set assoc, sectored cache, 64 byte line size */
50 { 0x2c, LVL_1_DATA, 32 }, /* 8-way set assoc, 64 byte line size */
51 { 0x30, LVL_1_INST, 32 }, /* 8-way set assoc, 64 byte line size */
52 { 0x39, LVL_2, 128 }, /* 4-way set assoc, sectored cache, 64 byte line size */
53 { 0x3a, LVL_2, 192 }, /* 6-way set assoc, sectored cache, 64 byte line size */
54 { 0x3b, LVL_2, 128 }, /* 2-way set assoc, sectored cache, 64 byte line size */
55 { 0x3c, LVL_2, 256 }, /* 4-way set assoc, sectored cache, 64 byte line size */
56 { 0x3d, LVL_2, 384 }, /* 6-way set assoc, sectored cache, 64 byte line size */
57 { 0x3e, LVL_2, 512 }, /* 4-way set assoc, sectored cache, 64 byte line size */
58 { 0x3f, LVL_2, 256 }, /* 2-way set assoc, 64 byte line size */
59 { 0x41, LVL_2, 128 }, /* 4-way set assoc, 32 byte line size */
60 { 0x42, LVL_2, 256 }, /* 4-way set assoc, 32 byte line size */
61 { 0x43, LVL_2, 512 }, /* 4-way set assoc, 32 byte line size */
62 { 0x44, LVL_2, 1024 }, /* 4-way set assoc, 32 byte line size */
63 { 0x45, LVL_2, 2048 }, /* 4-way set assoc, 32 byte line size */
64 { 0x46, LVL_3, 4096 }, /* 4-way set assoc, 64 byte line size */
65 { 0x47, LVL_3, 8192 }, /* 8-way set assoc, 64 byte line size */
66 { 0x49, LVL_3, 4096 }, /* 16-way set assoc, 64 byte line size */
67 { 0x4a, LVL_3, 6144 }, /* 12-way set assoc, 64 byte line size */
68 { 0x4b, LVL_3, 8192 }, /* 16-way set assoc, 64 byte line size */
69 { 0x4c, LVL_3, 12288 }, /* 12-way set assoc, 64 byte line size */
70 { 0x4d, LVL_3, 16384 }, /* 16-way set assoc, 64 byte line size */
71 { 0x4e, LVL_2, 6144 }, /* 24-way set assoc, 64 byte line size */
72 { 0x60, LVL_1_DATA, 16 }, /* 8-way set assoc, sectored cache, 64 byte line size */
73 { 0x66, LVL_1_DATA, 8 }, /* 4-way set assoc, sectored cache, 64 byte line size */
74 { 0x67, LVL_1_DATA, 16 }, /* 4-way set assoc, sectored cache, 64 byte line size */
75 { 0x68, LVL_1_DATA, 32 }, /* 4-way set assoc, sectored cache, 64 byte line size */
76 { 0x70, LVL_TRACE, 12 }, /* 8-way set assoc */
77 { 0x71, LVL_TRACE, 16 }, /* 8-way set assoc */
78 { 0x72, LVL_TRACE, 32 }, /* 8-way set assoc */
79 { 0x73, LVL_TRACE, 64 }, /* 8-way set assoc */
80 { 0x78, LVL_2, 1024 }, /* 4-way set assoc, 64 byte line size */
81 { 0x79, LVL_2, 128 }, /* 8-way set assoc, sectored cache, 64 byte line size */
82 { 0x7a, LVL_2, 256 }, /* 8-way set assoc, sectored cache, 64 byte line size */
83 { 0x7b, LVL_2, 512 }, /* 8-way set assoc, sectored cache, 64 byte line size */
84 { 0x7c, LVL_2, 1024 }, /* 8-way set assoc, sectored cache, 64 byte line size */
85 { 0x7d, LVL_2, 2048 }, /* 8-way set assoc, 64 byte line size */
86 { 0x7f, LVL_2, 512 }, /* 2-way set assoc, 64 byte line size */
87 { 0x82, LVL_2, 256 }, /* 8-way set assoc, 32 byte line size */
88 { 0x83, LVL_2, 512 }, /* 8-way set assoc, 32 byte line size */
89 { 0x84, LVL_2, 1024 }, /* 8-way set assoc, 32 byte line size */
90 { 0x85, LVL_2, 2048 }, /* 8-way set assoc, 32 byte line size */
91 { 0x86, LVL_2, 512 }, /* 4-way set assoc, 64 byte line size */
92 { 0x87, LVL_2, 1024 }, /* 8-way set assoc, 64 byte line size */
93 { 0xd0, LVL_3, 512 }, /* 4-way set assoc, 64 byte line size */
94 { 0xd1, LVL_3, 1024 }, /* 4-way set assoc, 64 byte line size */
95 { 0xd2, LVL_3, 2048 }, /* 4-way set assoc, 64 byte line size */
96 { 0xd6, LVL_3, 1024 }, /* 8-way set assoc, 64 byte line size */
97 { 0xd7, LVL_3, 2038 }, /* 8-way set assoc, 64 byte line size */
98 { 0xd8, LVL_3, 4096 }, /* 12-way set assoc, 64 byte line size */
99 { 0xdc, LVL_3, 2048 }, /* 12-way set assoc, 64 byte line size */
100 { 0xdd, LVL_3, 4096 }, /* 12-way set assoc, 64 byte line size */
101 { 0xde, LVL_3, 8192 }, /* 12-way set assoc, 64 byte line size */
102 { 0xe2, LVL_3, 2048 }, /* 16-way set assoc, 64 byte line size */
103 { 0xe3, LVL_3, 4096 }, /* 16-way set assoc, 64 byte line size */
104 { 0xe4, LVL_3, 8192 }, /* 16-way set assoc, 64 byte line size */
105 { 0x00, 0, 0}
109 enum _cache_type {
110 CACHE_TYPE_NULL = 0,
111 CACHE_TYPE_DATA = 1,
112 CACHE_TYPE_INST = 2,
113 CACHE_TYPE_UNIFIED = 3
116 union _cpuid4_leaf_eax {
117 struct {
118 enum _cache_type type:5;
119 unsigned int level:3;
120 unsigned int is_self_initializing:1;
121 unsigned int is_fully_associative:1;
122 unsigned int reserved:4;
123 unsigned int num_threads_sharing:12;
124 unsigned int num_cores_on_die:6;
125 } split;
126 u32 full;
129 union _cpuid4_leaf_ebx {
130 struct {
131 unsigned int coherency_line_size:12;
132 unsigned int physical_line_partition:10;
133 unsigned int ways_of_associativity:10;
134 } split;
135 u32 full;
138 union _cpuid4_leaf_ecx {
139 struct {
140 unsigned int number_of_sets:32;
141 } split;
142 u32 full;
145 struct _cpuid4_info {
146 union _cpuid4_leaf_eax eax;
147 union _cpuid4_leaf_ebx ebx;
148 union _cpuid4_leaf_ecx ecx;
149 unsigned long size;
150 unsigned long can_disable;
151 DECLARE_BITMAP(shared_cpu_map, NR_CPUS);
154 /* subset of above _cpuid4_info w/o shared_cpu_map */
155 struct _cpuid4_info_regs {
156 union _cpuid4_leaf_eax eax;
157 union _cpuid4_leaf_ebx ebx;
158 union _cpuid4_leaf_ecx ecx;
159 unsigned long size;
160 unsigned long can_disable;
163 unsigned short num_cache_leaves;
165 /* AMD doesn't have CPUID4. Emulate it here to report the same
166 information to the user. This makes some assumptions about the machine:
167 L2 not shared, no SMT etc. that is currently true on AMD CPUs.
169 In theory the TLBs could be reported as fake type (they are in "dummy").
170 Maybe later */
171 union l1_cache {
172 struct {
173 unsigned line_size:8;
174 unsigned lines_per_tag:8;
175 unsigned assoc:8;
176 unsigned size_in_kb:8;
178 unsigned val;
181 union l2_cache {
182 struct {
183 unsigned line_size:8;
184 unsigned lines_per_tag:4;
185 unsigned assoc:4;
186 unsigned size_in_kb:16;
188 unsigned val;
191 union l3_cache {
192 struct {
193 unsigned line_size:8;
194 unsigned lines_per_tag:4;
195 unsigned assoc:4;
196 unsigned res:2;
197 unsigned size_encoded:14;
199 unsigned val;
202 static const unsigned short __cpuinitconst assocs[] = {
203 [1] = 1,
204 [2] = 2,
205 [4] = 4,
206 [6] = 8,
207 [8] = 16,
208 [0xa] = 32,
209 [0xb] = 48,
210 [0xc] = 64,
211 [0xd] = 96,
212 [0xe] = 128,
213 [0xf] = 0xffff /* fully associative - no way to show this currently */
216 static const unsigned char __cpuinitconst levels[] = { 1, 1, 2, 3 };
217 static const unsigned char __cpuinitconst types[] = { 1, 2, 3, 3 };
219 static void __cpuinit
220 amd_cpuid4(int leaf, union _cpuid4_leaf_eax *eax,
221 union _cpuid4_leaf_ebx *ebx,
222 union _cpuid4_leaf_ecx *ecx)
224 unsigned dummy;
225 unsigned line_size, lines_per_tag, assoc, size_in_kb;
226 union l1_cache l1i, l1d;
227 union l2_cache l2;
228 union l3_cache l3;
229 union l1_cache *l1 = &l1d;
231 eax->full = 0;
232 ebx->full = 0;
233 ecx->full = 0;
235 cpuid(0x80000005, &dummy, &dummy, &l1d.val, &l1i.val);
236 cpuid(0x80000006, &dummy, &dummy, &l2.val, &l3.val);
238 switch (leaf) {
239 case 1:
240 l1 = &l1i;
241 case 0:
242 if (!l1->val)
243 return;
244 assoc = assocs[l1->assoc];
245 line_size = l1->line_size;
246 lines_per_tag = l1->lines_per_tag;
247 size_in_kb = l1->size_in_kb;
248 break;
249 case 2:
250 if (!l2.val)
251 return;
252 assoc = assocs[l2.assoc];
253 line_size = l2.line_size;
254 lines_per_tag = l2.lines_per_tag;
255 /* cpu_data has errata corrections for K7 applied */
256 size_in_kb = current_cpu_data.x86_cache_size;
257 break;
258 case 3:
259 if (!l3.val)
260 return;
261 assoc = assocs[l3.assoc];
262 line_size = l3.line_size;
263 lines_per_tag = l3.lines_per_tag;
264 size_in_kb = l3.size_encoded * 512;
265 if (boot_cpu_has(X86_FEATURE_AMD_DCM)) {
266 size_in_kb = size_in_kb >> 1;
267 assoc = assoc >> 1;
269 break;
270 default:
271 return;
274 eax->split.is_self_initializing = 1;
275 eax->split.type = types[leaf];
276 eax->split.level = levels[leaf];
277 eax->split.num_threads_sharing = 0;
278 eax->split.num_cores_on_die = current_cpu_data.x86_max_cores - 1;
281 if (assoc == 0xffff)
282 eax->split.is_fully_associative = 1;
283 ebx->split.coherency_line_size = line_size - 1;
284 ebx->split.ways_of_associativity = assoc - 1;
285 ebx->split.physical_line_partition = lines_per_tag - 1;
286 ecx->split.number_of_sets = (size_in_kb * 1024) / line_size /
287 (ebx->split.ways_of_associativity + 1) - 1;
290 static void __cpuinit
291 amd_check_l3_disable(int index, struct _cpuid4_info_regs *this_leaf)
293 if (index < 3)
294 return;
296 if (boot_cpu_data.x86 == 0x11)
297 return;
299 /* see erratum #382 */
300 if ((boot_cpu_data.x86 == 0x10) && (boot_cpu_data.x86_model < 0x8))
301 return;
303 this_leaf->can_disable = 1;
306 static int
307 __cpuinit cpuid4_cache_lookup_regs(int index,
308 struct _cpuid4_info_regs *this_leaf)
310 union _cpuid4_leaf_eax eax;
311 union _cpuid4_leaf_ebx ebx;
312 union _cpuid4_leaf_ecx ecx;
313 unsigned edx;
315 if (boot_cpu_data.x86_vendor == X86_VENDOR_AMD) {
316 amd_cpuid4(index, &eax, &ebx, &ecx);
317 if (boot_cpu_data.x86 >= 0x10)
318 amd_check_l3_disable(index, this_leaf);
319 } else {
320 cpuid_count(4, index, &eax.full, &ebx.full, &ecx.full, &edx);
323 if (eax.split.type == CACHE_TYPE_NULL)
324 return -EIO; /* better error ? */
326 this_leaf->eax = eax;
327 this_leaf->ebx = ebx;
328 this_leaf->ecx = ecx;
329 this_leaf->size = (ecx.split.number_of_sets + 1) *
330 (ebx.split.coherency_line_size + 1) *
331 (ebx.split.physical_line_partition + 1) *
332 (ebx.split.ways_of_associativity + 1);
333 return 0;
336 static int __cpuinit find_num_cache_leaves(void)
338 unsigned int eax, ebx, ecx, edx;
339 union _cpuid4_leaf_eax cache_eax;
340 int i = -1;
342 do {
343 ++i;
344 /* Do cpuid(4) loop to find out num_cache_leaves */
345 cpuid_count(4, i, &eax, &ebx, &ecx, &edx);
346 cache_eax.full = eax;
347 } while (cache_eax.split.type != CACHE_TYPE_NULL);
348 return i;
351 unsigned int __cpuinit init_intel_cacheinfo(struct cpuinfo_x86 *c)
353 /* Cache sizes */
354 unsigned int trace = 0, l1i = 0, l1d = 0, l2 = 0, l3 = 0;
355 unsigned int new_l1d = 0, new_l1i = 0; /* Cache sizes from cpuid(4) */
356 unsigned int new_l2 = 0, new_l3 = 0, i; /* Cache sizes from cpuid(4) */
357 unsigned int l2_id = 0, l3_id = 0, num_threads_sharing, index_msb;
358 #ifdef CONFIG_X86_HT
359 unsigned int cpu = c->cpu_index;
360 #endif
362 if (c->cpuid_level > 3) {
363 static int is_initialized;
365 if (is_initialized == 0) {
366 /* Init num_cache_leaves from boot CPU */
367 num_cache_leaves = find_num_cache_leaves();
368 is_initialized++;
372 * Whenever possible use cpuid(4), deterministic cache
373 * parameters cpuid leaf to find the cache details
375 for (i = 0; i < num_cache_leaves; i++) {
376 struct _cpuid4_info_regs this_leaf;
377 int retval;
379 retval = cpuid4_cache_lookup_regs(i, &this_leaf);
380 if (retval >= 0) {
381 switch (this_leaf.eax.split.level) {
382 case 1:
383 if (this_leaf.eax.split.type ==
384 CACHE_TYPE_DATA)
385 new_l1d = this_leaf.size/1024;
386 else if (this_leaf.eax.split.type ==
387 CACHE_TYPE_INST)
388 new_l1i = this_leaf.size/1024;
389 break;
390 case 2:
391 new_l2 = this_leaf.size/1024;
392 num_threads_sharing = 1 + this_leaf.eax.split.num_threads_sharing;
393 index_msb = get_count_order(num_threads_sharing);
394 l2_id = c->apicid >> index_msb;
395 break;
396 case 3:
397 new_l3 = this_leaf.size/1024;
398 num_threads_sharing = 1 + this_leaf.eax.split.num_threads_sharing;
399 index_msb = get_count_order(
400 num_threads_sharing);
401 l3_id = c->apicid >> index_msb;
402 break;
403 default:
404 break;
410 * Don't use cpuid2 if cpuid4 is supported. For P4, we use cpuid2 for
411 * trace cache
413 if ((num_cache_leaves == 0 || c->x86 == 15) && c->cpuid_level > 1) {
414 /* supports eax=2 call */
415 int j, n;
416 unsigned int regs[4];
417 unsigned char *dp = (unsigned char *)regs;
418 int only_trace = 0;
420 if (num_cache_leaves != 0 && c->x86 == 15)
421 only_trace = 1;
423 /* Number of times to iterate */
424 n = cpuid_eax(2) & 0xFF;
426 for (i = 0 ; i < n ; i++) {
427 cpuid(2, &regs[0], &regs[1], &regs[2], &regs[3]);
429 /* If bit 31 is set, this is an unknown format */
430 for (j = 0 ; j < 3 ; j++)
431 if (regs[j] & (1 << 31))
432 regs[j] = 0;
434 /* Byte 0 is level count, not a descriptor */
435 for (j = 1 ; j < 16 ; j++) {
436 unsigned char des = dp[j];
437 unsigned char k = 0;
439 /* look up this descriptor in the table */
440 while (cache_table[k].descriptor != 0) {
441 if (cache_table[k].descriptor == des) {
442 if (only_trace && cache_table[k].cache_type != LVL_TRACE)
443 break;
444 switch (cache_table[k].cache_type) {
445 case LVL_1_INST:
446 l1i += cache_table[k].size;
447 break;
448 case LVL_1_DATA:
449 l1d += cache_table[k].size;
450 break;
451 case LVL_2:
452 l2 += cache_table[k].size;
453 break;
454 case LVL_3:
455 l3 += cache_table[k].size;
456 break;
457 case LVL_TRACE:
458 trace += cache_table[k].size;
459 break;
462 break;
465 k++;
471 if (new_l1d)
472 l1d = new_l1d;
474 if (new_l1i)
475 l1i = new_l1i;
477 if (new_l2) {
478 l2 = new_l2;
479 #ifdef CONFIG_X86_HT
480 per_cpu(cpu_llc_id, cpu) = l2_id;
481 #endif
484 if (new_l3) {
485 l3 = new_l3;
486 #ifdef CONFIG_X86_HT
487 per_cpu(cpu_llc_id, cpu) = l3_id;
488 #endif
491 if (trace)
492 printk(KERN_INFO "CPU: Trace cache: %dK uops", trace);
493 else if (l1i)
494 printk(KERN_INFO "CPU: L1 I cache: %dK", l1i);
496 if (l1d)
497 printk(KERN_CONT ", L1 D cache: %dK\n", l1d);
498 else
499 printk(KERN_CONT "\n");
501 if (l2)
502 printk(KERN_INFO "CPU: L2 cache: %dK\n", l2);
504 if (l3)
505 printk(KERN_INFO "CPU: L3 cache: %dK\n", l3);
507 c->x86_cache_size = l3 ? l3 : (l2 ? l2 : (l1i+l1d));
509 return l2;
512 #ifdef CONFIG_SYSFS
514 /* pointer to _cpuid4_info array (for each cache leaf) */
515 static DEFINE_PER_CPU(struct _cpuid4_info *, cpuid4_info);
516 #define CPUID4_INFO_IDX(x, y) (&((per_cpu(cpuid4_info, x))[y]))
518 #ifdef CONFIG_SMP
519 static void __cpuinit cache_shared_cpu_map_setup(unsigned int cpu, int index)
521 struct _cpuid4_info *this_leaf, *sibling_leaf;
522 unsigned long num_threads_sharing;
523 int index_msb, i;
524 struct cpuinfo_x86 *c = &cpu_data(cpu);
526 if ((index == 3) && (c->x86_vendor == X86_VENDOR_AMD)) {
527 struct cpuinfo_x86 *d;
528 for_each_online_cpu(i) {
529 if (!per_cpu(cpuid4_info, i))
530 continue;
531 d = &cpu_data(i);
532 this_leaf = CPUID4_INFO_IDX(i, index);
533 cpumask_copy(to_cpumask(this_leaf->shared_cpu_map),
534 d->llc_shared_map);
536 return;
538 this_leaf = CPUID4_INFO_IDX(cpu, index);
539 num_threads_sharing = 1 + this_leaf->eax.split.num_threads_sharing;
541 if (num_threads_sharing == 1)
542 cpumask_set_cpu(cpu, to_cpumask(this_leaf->shared_cpu_map));
543 else {
544 index_msb = get_count_order(num_threads_sharing);
546 for_each_online_cpu(i) {
547 if (cpu_data(i).apicid >> index_msb ==
548 c->apicid >> index_msb) {
549 cpumask_set_cpu(i,
550 to_cpumask(this_leaf->shared_cpu_map));
551 if (i != cpu && per_cpu(cpuid4_info, i)) {
552 sibling_leaf =
553 CPUID4_INFO_IDX(i, index);
554 cpumask_set_cpu(cpu, to_cpumask(
555 sibling_leaf->shared_cpu_map));
561 static void __cpuinit cache_remove_shared_cpu_map(unsigned int cpu, int index)
563 struct _cpuid4_info *this_leaf, *sibling_leaf;
564 int sibling;
566 this_leaf = CPUID4_INFO_IDX(cpu, index);
567 for_each_cpu(sibling, to_cpumask(this_leaf->shared_cpu_map)) {
568 sibling_leaf = CPUID4_INFO_IDX(sibling, index);
569 cpumask_clear_cpu(cpu,
570 to_cpumask(sibling_leaf->shared_cpu_map));
573 #else
574 static void __cpuinit cache_shared_cpu_map_setup(unsigned int cpu, int index)
578 static void __cpuinit cache_remove_shared_cpu_map(unsigned int cpu, int index)
581 #endif
583 static void __cpuinit free_cache_attributes(unsigned int cpu)
585 int i;
587 for (i = 0; i < num_cache_leaves; i++)
588 cache_remove_shared_cpu_map(cpu, i);
590 kfree(per_cpu(cpuid4_info, cpu));
591 per_cpu(cpuid4_info, cpu) = NULL;
594 static int
595 __cpuinit cpuid4_cache_lookup(int index, struct _cpuid4_info *this_leaf)
597 struct _cpuid4_info_regs *leaf_regs =
598 (struct _cpuid4_info_regs *)this_leaf;
600 return cpuid4_cache_lookup_regs(index, leaf_regs);
603 static void __cpuinit get_cpu_leaves(void *_retval)
605 int j, *retval = _retval, cpu = smp_processor_id();
607 /* Do cpuid and store the results */
608 for (j = 0; j < num_cache_leaves; j++) {
609 struct _cpuid4_info *this_leaf;
610 this_leaf = CPUID4_INFO_IDX(cpu, j);
611 *retval = cpuid4_cache_lookup(j, this_leaf);
612 if (unlikely(*retval < 0)) {
613 int i;
615 for (i = 0; i < j; i++)
616 cache_remove_shared_cpu_map(cpu, i);
617 break;
619 cache_shared_cpu_map_setup(cpu, j);
623 static int __cpuinit detect_cache_attributes(unsigned int cpu)
625 int retval;
627 if (num_cache_leaves == 0)
628 return -ENOENT;
630 per_cpu(cpuid4_info, cpu) = kzalloc(
631 sizeof(struct _cpuid4_info) * num_cache_leaves, GFP_KERNEL);
632 if (per_cpu(cpuid4_info, cpu) == NULL)
633 return -ENOMEM;
635 smp_call_function_single(cpu, get_cpu_leaves, &retval, true);
636 if (retval) {
637 kfree(per_cpu(cpuid4_info, cpu));
638 per_cpu(cpuid4_info, cpu) = NULL;
641 return retval;
644 #include <linux/kobject.h>
645 #include <linux/sysfs.h>
647 extern struct sysdev_class cpu_sysdev_class; /* from drivers/base/cpu.c */
649 /* pointer to kobject for cpuX/cache */
650 static DEFINE_PER_CPU(struct kobject *, cache_kobject);
652 struct _index_kobject {
653 struct kobject kobj;
654 unsigned int cpu;
655 unsigned short index;
658 /* pointer to array of kobjects for cpuX/cache/indexY */
659 static DEFINE_PER_CPU(struct _index_kobject *, index_kobject);
660 #define INDEX_KOBJECT_PTR(x, y) (&((per_cpu(index_kobject, x))[y]))
662 #define show_one_plus(file_name, object, val) \
663 static ssize_t show_##file_name \
664 (struct _cpuid4_info *this_leaf, char *buf) \
666 return sprintf(buf, "%lu\n", (unsigned long)this_leaf->object + val); \
669 show_one_plus(level, eax.split.level, 0);
670 show_one_plus(coherency_line_size, ebx.split.coherency_line_size, 1);
671 show_one_plus(physical_line_partition, ebx.split.physical_line_partition, 1);
672 show_one_plus(ways_of_associativity, ebx.split.ways_of_associativity, 1);
673 show_one_plus(number_of_sets, ecx.split.number_of_sets, 1);
675 static ssize_t show_size(struct _cpuid4_info *this_leaf, char *buf)
677 return sprintf(buf, "%luK\n", this_leaf->size / 1024);
680 static ssize_t show_shared_cpu_map_func(struct _cpuid4_info *this_leaf,
681 int type, char *buf)
683 ptrdiff_t len = PTR_ALIGN(buf + PAGE_SIZE - 1, PAGE_SIZE) - buf;
684 int n = 0;
686 if (len > 1) {
687 const struct cpumask *mask;
689 mask = to_cpumask(this_leaf->shared_cpu_map);
690 n = type ?
691 cpulist_scnprintf(buf, len-2, mask) :
692 cpumask_scnprintf(buf, len-2, mask);
693 buf[n++] = '\n';
694 buf[n] = '\0';
696 return n;
699 static inline ssize_t show_shared_cpu_map(struct _cpuid4_info *leaf, char *buf)
701 return show_shared_cpu_map_func(leaf, 0, buf);
704 static inline ssize_t show_shared_cpu_list(struct _cpuid4_info *leaf, char *buf)
706 return show_shared_cpu_map_func(leaf, 1, buf);
709 static ssize_t show_type(struct _cpuid4_info *this_leaf, char *buf)
711 switch (this_leaf->eax.split.type) {
712 case CACHE_TYPE_DATA:
713 return sprintf(buf, "Data\n");
714 case CACHE_TYPE_INST:
715 return sprintf(buf, "Instruction\n");
716 case CACHE_TYPE_UNIFIED:
717 return sprintf(buf, "Unified\n");
718 default:
719 return sprintf(buf, "Unknown\n");
723 #define to_object(k) container_of(k, struct _index_kobject, kobj)
724 #define to_attr(a) container_of(a, struct _cache_attr, attr)
726 static ssize_t show_cache_disable(struct _cpuid4_info *this_leaf, char *buf,
727 unsigned int index)
729 int cpu = cpumask_first(to_cpumask(this_leaf->shared_cpu_map));
730 int node = cpu_to_node(cpu);
731 struct pci_dev *dev = node_to_k8_nb_misc(node);
732 unsigned int reg = 0;
734 if (!this_leaf->can_disable)
735 return -EINVAL;
737 if (!dev)
738 return -EINVAL;
740 pci_read_config_dword(dev, 0x1BC + index * 4, &reg);
741 return sprintf(buf, "%x\n", reg);
744 #define SHOW_CACHE_DISABLE(index) \
745 static ssize_t \
746 show_cache_disable_##index(struct _cpuid4_info *this_leaf, char *buf) \
748 return show_cache_disable(this_leaf, buf, index); \
750 SHOW_CACHE_DISABLE(0)
751 SHOW_CACHE_DISABLE(1)
753 static ssize_t store_cache_disable(struct _cpuid4_info *this_leaf,
754 const char *buf, size_t count, unsigned int index)
756 int cpu = cpumask_first(to_cpumask(this_leaf->shared_cpu_map));
757 int node = cpu_to_node(cpu);
758 struct pci_dev *dev = node_to_k8_nb_misc(node);
759 unsigned long val = 0;
760 unsigned int scrubber = 0;
762 if (!this_leaf->can_disable)
763 return -EINVAL;
765 if (!capable(CAP_SYS_ADMIN))
766 return -EPERM;
768 if (!dev)
769 return -EINVAL;
771 if (strict_strtoul(buf, 10, &val) < 0)
772 return -EINVAL;
774 val |= 0xc0000000;
776 pci_read_config_dword(dev, 0x58, &scrubber);
777 scrubber &= ~0x1f000000;
778 pci_write_config_dword(dev, 0x58, scrubber);
780 pci_write_config_dword(dev, 0x1BC + index * 4, val & ~0x40000000);
781 wbinvd();
782 pci_write_config_dword(dev, 0x1BC + index * 4, val);
783 return count;
786 #define STORE_CACHE_DISABLE(index) \
787 static ssize_t \
788 store_cache_disable_##index(struct _cpuid4_info *this_leaf, \
789 const char *buf, size_t count) \
791 return store_cache_disable(this_leaf, buf, count, index); \
793 STORE_CACHE_DISABLE(0)
794 STORE_CACHE_DISABLE(1)
796 struct _cache_attr {
797 struct attribute attr;
798 ssize_t (*show)(struct _cpuid4_info *, char *);
799 ssize_t (*store)(struct _cpuid4_info *, const char *, size_t count);
802 #define define_one_ro(_name) \
803 static struct _cache_attr _name = \
804 __ATTR(_name, 0444, show_##_name, NULL)
806 define_one_ro(level);
807 define_one_ro(type);
808 define_one_ro(coherency_line_size);
809 define_one_ro(physical_line_partition);
810 define_one_ro(ways_of_associativity);
811 define_one_ro(number_of_sets);
812 define_one_ro(size);
813 define_one_ro(shared_cpu_map);
814 define_one_ro(shared_cpu_list);
816 static struct _cache_attr cache_disable_0 = __ATTR(cache_disable_0, 0644,
817 show_cache_disable_0, store_cache_disable_0);
818 static struct _cache_attr cache_disable_1 = __ATTR(cache_disable_1, 0644,
819 show_cache_disable_1, store_cache_disable_1);
821 static struct attribute *default_attrs[] = {
822 &type.attr,
823 &level.attr,
824 &coherency_line_size.attr,
825 &physical_line_partition.attr,
826 &ways_of_associativity.attr,
827 &number_of_sets.attr,
828 &size.attr,
829 &shared_cpu_map.attr,
830 &shared_cpu_list.attr,
831 &cache_disable_0.attr,
832 &cache_disable_1.attr,
833 NULL
836 static ssize_t show(struct kobject *kobj, struct attribute *attr, char *buf)
838 struct _cache_attr *fattr = to_attr(attr);
839 struct _index_kobject *this_leaf = to_object(kobj);
840 ssize_t ret;
842 ret = fattr->show ?
843 fattr->show(CPUID4_INFO_IDX(this_leaf->cpu, this_leaf->index),
844 buf) :
846 return ret;
849 static ssize_t store(struct kobject *kobj, struct attribute *attr,
850 const char *buf, size_t count)
852 struct _cache_attr *fattr = to_attr(attr);
853 struct _index_kobject *this_leaf = to_object(kobj);
854 ssize_t ret;
856 ret = fattr->store ?
857 fattr->store(CPUID4_INFO_IDX(this_leaf->cpu, this_leaf->index),
858 buf, count) :
860 return ret;
863 static struct sysfs_ops sysfs_ops = {
864 .show = show,
865 .store = store,
868 static struct kobj_type ktype_cache = {
869 .sysfs_ops = &sysfs_ops,
870 .default_attrs = default_attrs,
873 static struct kobj_type ktype_percpu_entry = {
874 .sysfs_ops = &sysfs_ops,
877 static void __cpuinit cpuid4_cache_sysfs_exit(unsigned int cpu)
879 kfree(per_cpu(cache_kobject, cpu));
880 kfree(per_cpu(index_kobject, cpu));
881 per_cpu(cache_kobject, cpu) = NULL;
882 per_cpu(index_kobject, cpu) = NULL;
883 free_cache_attributes(cpu);
886 static int __cpuinit cpuid4_cache_sysfs_init(unsigned int cpu)
888 int err;
890 if (num_cache_leaves == 0)
891 return -ENOENT;
893 err = detect_cache_attributes(cpu);
894 if (err)
895 return err;
897 /* Allocate all required memory */
898 per_cpu(cache_kobject, cpu) =
899 kzalloc(sizeof(struct kobject), GFP_KERNEL);
900 if (unlikely(per_cpu(cache_kobject, cpu) == NULL))
901 goto err_out;
903 per_cpu(index_kobject, cpu) = kzalloc(
904 sizeof(struct _index_kobject) * num_cache_leaves, GFP_KERNEL);
905 if (unlikely(per_cpu(index_kobject, cpu) == NULL))
906 goto err_out;
908 return 0;
910 err_out:
911 cpuid4_cache_sysfs_exit(cpu);
912 return -ENOMEM;
915 static DECLARE_BITMAP(cache_dev_map, NR_CPUS);
917 /* Add/Remove cache interface for CPU device */
918 static int __cpuinit cache_add_dev(struct sys_device * sys_dev)
920 unsigned int cpu = sys_dev->id;
921 unsigned long i, j;
922 struct _index_kobject *this_object;
923 int retval;
925 retval = cpuid4_cache_sysfs_init(cpu);
926 if (unlikely(retval < 0))
927 return retval;
929 retval = kobject_init_and_add(per_cpu(cache_kobject, cpu),
930 &ktype_percpu_entry,
931 &sys_dev->kobj, "%s", "cache");
932 if (retval < 0) {
933 cpuid4_cache_sysfs_exit(cpu);
934 return retval;
937 for (i = 0; i < num_cache_leaves; i++) {
938 this_object = INDEX_KOBJECT_PTR(cpu, i);
939 this_object->cpu = cpu;
940 this_object->index = i;
941 retval = kobject_init_and_add(&(this_object->kobj),
942 &ktype_cache,
943 per_cpu(cache_kobject, cpu),
944 "index%1lu", i);
945 if (unlikely(retval)) {
946 for (j = 0; j < i; j++)
947 kobject_put(&(INDEX_KOBJECT_PTR(cpu, j)->kobj));
948 kobject_put(per_cpu(cache_kobject, cpu));
949 cpuid4_cache_sysfs_exit(cpu);
950 return retval;
952 kobject_uevent(&(this_object->kobj), KOBJ_ADD);
954 cpumask_set_cpu(cpu, to_cpumask(cache_dev_map));
956 kobject_uevent(per_cpu(cache_kobject, cpu), KOBJ_ADD);
957 return 0;
960 static void __cpuinit cache_remove_dev(struct sys_device * sys_dev)
962 unsigned int cpu = sys_dev->id;
963 unsigned long i;
965 if (per_cpu(cpuid4_info, cpu) == NULL)
966 return;
967 if (!cpumask_test_cpu(cpu, to_cpumask(cache_dev_map)))
968 return;
969 cpumask_clear_cpu(cpu, to_cpumask(cache_dev_map));
971 for (i = 0; i < num_cache_leaves; i++)
972 kobject_put(&(INDEX_KOBJECT_PTR(cpu, i)->kobj));
973 kobject_put(per_cpu(cache_kobject, cpu));
974 cpuid4_cache_sysfs_exit(cpu);
977 static int __cpuinit cacheinfo_cpu_callback(struct notifier_block *nfb,
978 unsigned long action, void *hcpu)
980 unsigned int cpu = (unsigned long)hcpu;
981 struct sys_device *sys_dev;
983 sys_dev = get_cpu_sysdev(cpu);
984 switch (action) {
985 case CPU_ONLINE:
986 case CPU_ONLINE_FROZEN:
987 cache_add_dev(sys_dev);
988 break;
989 case CPU_DEAD:
990 case CPU_DEAD_FROZEN:
991 cache_remove_dev(sys_dev);
992 break;
994 return NOTIFY_OK;
997 static struct notifier_block __cpuinitdata cacheinfo_cpu_notifier = {
998 .notifier_call = cacheinfo_cpu_callback,
1001 static int __cpuinit cache_sysfs_init(void)
1003 int i;
1005 if (num_cache_leaves == 0)
1006 return 0;
1008 for_each_online_cpu(i) {
1009 int err;
1010 struct sys_device *sys_dev = get_cpu_sysdev(i);
1012 err = cache_add_dev(sys_dev);
1013 if (err)
1014 return err;
1016 register_hotcpu_notifier(&cacheinfo_cpu_notifier);
1017 return 0;
1020 device_initcall(cache_sysfs_init);
1022 #endif