MIPS: Yosemite, Emma: Fix off-by-two in arcs_cmdline buffer size check
[linux-2.6/linux-mips.git] / arch / x86 / kernel / cpu / intel_cacheinfo.c
bloba3b0811693c96d086d6880aa91223347df7e8b32
1 /*
2 * Routines to indentify caches on Intel CPU.
4 * Changes:
5 * Venkatesh Pallipadi : Adding cache identification through cpuid(4)
6 * Ashok Raj <ashok.raj@intel.com>: Work with CPU hotplug infrastructure.
7 * Andi Kleen / Andreas Herrmann : CPUID4 emulation on AMD.
8 */
10 #include <linux/init.h>
11 #include <linux/slab.h>
12 #include <linux/device.h>
13 #include <linux/compiler.h>
14 #include <linux/cpu.h>
15 #include <linux/sched.h>
16 #include <linux/pci.h>
18 #include <asm/processor.h>
19 #include <linux/smp.h>
20 #include <asm/amd_nb.h>
21 #include <asm/smp.h>
23 #define LVL_1_INST 1
24 #define LVL_1_DATA 2
25 #define LVL_2 3
26 #define LVL_3 4
27 #define LVL_TRACE 5
29 struct _cache_table {
30 unsigned char descriptor;
31 char cache_type;
32 short size;
35 #define MB(x) ((x) * 1024)
37 /* All the cache descriptor types we care about (no TLB or
38 trace cache entries) */
40 static const struct _cache_table __cpuinitconst cache_table[] =
42 { 0x06, LVL_1_INST, 8 }, /* 4-way set assoc, 32 byte line size */
43 { 0x08, LVL_1_INST, 16 }, /* 4-way set assoc, 32 byte line size */
44 { 0x09, LVL_1_INST, 32 }, /* 4-way set assoc, 64 byte line size */
45 { 0x0a, LVL_1_DATA, 8 }, /* 2 way set assoc, 32 byte line size */
46 { 0x0c, LVL_1_DATA, 16 }, /* 4-way set assoc, 32 byte line size */
47 { 0x0d, LVL_1_DATA, 16 }, /* 4-way set assoc, 64 byte line size */
48 { 0x0e, LVL_1_DATA, 24 }, /* 6-way set assoc, 64 byte line size */
49 { 0x21, LVL_2, 256 }, /* 8-way set assoc, 64 byte line size */
50 { 0x22, LVL_3, 512 }, /* 4-way set assoc, sectored cache, 64 byte line size */
51 { 0x23, LVL_3, MB(1) }, /* 8-way set assoc, sectored cache, 64 byte line size */
52 { 0x25, LVL_3, MB(2) }, /* 8-way set assoc, sectored cache, 64 byte line size */
53 { 0x29, LVL_3, MB(4) }, /* 8-way set assoc, sectored cache, 64 byte line size */
54 { 0x2c, LVL_1_DATA, 32 }, /* 8-way set assoc, 64 byte line size */
55 { 0x30, LVL_1_INST, 32 }, /* 8-way set assoc, 64 byte line size */
56 { 0x39, LVL_2, 128 }, /* 4-way set assoc, sectored cache, 64 byte line size */
57 { 0x3a, LVL_2, 192 }, /* 6-way set assoc, sectored cache, 64 byte line size */
58 { 0x3b, LVL_2, 128 }, /* 2-way set assoc, sectored cache, 64 byte line size */
59 { 0x3c, LVL_2, 256 }, /* 4-way set assoc, sectored cache, 64 byte line size */
60 { 0x3d, LVL_2, 384 }, /* 6-way set assoc, sectored cache, 64 byte line size */
61 { 0x3e, LVL_2, 512 }, /* 4-way set assoc, sectored cache, 64 byte line size */
62 { 0x3f, LVL_2, 256 }, /* 2-way set assoc, 64 byte line size */
63 { 0x41, LVL_2, 128 }, /* 4-way set assoc, 32 byte line size */
64 { 0x42, LVL_2, 256 }, /* 4-way set assoc, 32 byte line size */
65 { 0x43, LVL_2, 512 }, /* 4-way set assoc, 32 byte line size */
66 { 0x44, LVL_2, MB(1) }, /* 4-way set assoc, 32 byte line size */
67 { 0x45, LVL_2, MB(2) }, /* 4-way set assoc, 32 byte line size */
68 { 0x46, LVL_3, MB(4) }, /* 4-way set assoc, 64 byte line size */
69 { 0x47, LVL_3, MB(8) }, /* 8-way set assoc, 64 byte line size */
70 { 0x48, LVL_2, MB(3) }, /* 12-way set assoc, 64 byte line size */
71 { 0x49, LVL_3, MB(4) }, /* 16-way set assoc, 64 byte line size */
72 { 0x4a, LVL_3, MB(6) }, /* 12-way set assoc, 64 byte line size */
73 { 0x4b, LVL_3, MB(8) }, /* 16-way set assoc, 64 byte line size */
74 { 0x4c, LVL_3, MB(12) }, /* 12-way set assoc, 64 byte line size */
75 { 0x4d, LVL_3, MB(16) }, /* 16-way set assoc, 64 byte line size */
76 { 0x4e, LVL_2, MB(6) }, /* 24-way set assoc, 64 byte line size */
77 { 0x60, LVL_1_DATA, 16 }, /* 8-way set assoc, sectored cache, 64 byte line size */
78 { 0x66, LVL_1_DATA, 8 }, /* 4-way set assoc, sectored cache, 64 byte line size */
79 { 0x67, LVL_1_DATA, 16 }, /* 4-way set assoc, sectored cache, 64 byte line size */
80 { 0x68, LVL_1_DATA, 32 }, /* 4-way set assoc, sectored cache, 64 byte line size */
81 { 0x70, LVL_TRACE, 12 }, /* 8-way set assoc */
82 { 0x71, LVL_TRACE, 16 }, /* 8-way set assoc */
83 { 0x72, LVL_TRACE, 32 }, /* 8-way set assoc */
84 { 0x73, LVL_TRACE, 64 }, /* 8-way set assoc */
85 { 0x78, LVL_2, MB(1) }, /* 4-way set assoc, 64 byte line size */
86 { 0x79, LVL_2, 128 }, /* 8-way set assoc, sectored cache, 64 byte line size */
87 { 0x7a, LVL_2, 256 }, /* 8-way set assoc, sectored cache, 64 byte line size */
88 { 0x7b, LVL_2, 512 }, /* 8-way set assoc, sectored cache, 64 byte line size */
89 { 0x7c, LVL_2, MB(1) }, /* 8-way set assoc, sectored cache, 64 byte line size */
90 { 0x7d, LVL_2, MB(2) }, /* 8-way set assoc, 64 byte line size */
91 { 0x7f, LVL_2, 512 }, /* 2-way set assoc, 64 byte line size */
92 { 0x80, LVL_2, 512 }, /* 8-way set assoc, 64 byte line size */
93 { 0x82, LVL_2, 256 }, /* 8-way set assoc, 32 byte line size */
94 { 0x83, LVL_2, 512 }, /* 8-way set assoc, 32 byte line size */
95 { 0x84, LVL_2, MB(1) }, /* 8-way set assoc, 32 byte line size */
96 { 0x85, LVL_2, MB(2) }, /* 8-way set assoc, 32 byte line size */
97 { 0x86, LVL_2, 512 }, /* 4-way set assoc, 64 byte line size */
98 { 0x87, LVL_2, MB(1) }, /* 8-way set assoc, 64 byte line size */
99 { 0xd0, LVL_3, 512 }, /* 4-way set assoc, 64 byte line size */
100 { 0xd1, LVL_3, MB(1) }, /* 4-way set assoc, 64 byte line size */
101 { 0xd2, LVL_3, MB(2) }, /* 4-way set assoc, 64 byte line size */
102 { 0xd6, LVL_3, MB(1) }, /* 8-way set assoc, 64 byte line size */
103 { 0xd7, LVL_3, MB(2) }, /* 8-way set assoc, 64 byte line size */
104 { 0xd8, LVL_3, MB(4) }, /* 12-way set assoc, 64 byte line size */
105 { 0xdc, LVL_3, MB(2) }, /* 12-way set assoc, 64 byte line size */
106 { 0xdd, LVL_3, MB(4) }, /* 12-way set assoc, 64 byte line size */
107 { 0xde, LVL_3, MB(8) }, /* 12-way set assoc, 64 byte line size */
108 { 0xe2, LVL_3, MB(2) }, /* 16-way set assoc, 64 byte line size */
109 { 0xe3, LVL_3, MB(4) }, /* 16-way set assoc, 64 byte line size */
110 { 0xe4, LVL_3, MB(8) }, /* 16-way set assoc, 64 byte line size */
111 { 0xea, LVL_3, MB(12) }, /* 24-way set assoc, 64 byte line size */
112 { 0xeb, LVL_3, MB(18) }, /* 24-way set assoc, 64 byte line size */
113 { 0xec, LVL_3, MB(24) }, /* 24-way set assoc, 64 byte line size */
114 { 0x00, 0, 0}
118 enum _cache_type {
119 CACHE_TYPE_NULL = 0,
120 CACHE_TYPE_DATA = 1,
121 CACHE_TYPE_INST = 2,
122 CACHE_TYPE_UNIFIED = 3
125 union _cpuid4_leaf_eax {
126 struct {
127 enum _cache_type type:5;
128 unsigned int level:3;
129 unsigned int is_self_initializing:1;
130 unsigned int is_fully_associative:1;
131 unsigned int reserved:4;
132 unsigned int num_threads_sharing:12;
133 unsigned int num_cores_on_die:6;
134 } split;
135 u32 full;
138 union _cpuid4_leaf_ebx {
139 struct {
140 unsigned int coherency_line_size:12;
141 unsigned int physical_line_partition:10;
142 unsigned int ways_of_associativity:10;
143 } split;
144 u32 full;
147 union _cpuid4_leaf_ecx {
148 struct {
149 unsigned int number_of_sets:32;
150 } split;
151 u32 full;
154 struct _cpuid4_info_regs {
155 union _cpuid4_leaf_eax eax;
156 union _cpuid4_leaf_ebx ebx;
157 union _cpuid4_leaf_ecx ecx;
158 unsigned long size;
159 struct amd_northbridge *nb;
162 struct _cpuid4_info {
163 struct _cpuid4_info_regs base;
164 DECLARE_BITMAP(shared_cpu_map, NR_CPUS);
167 unsigned short num_cache_leaves;
169 /* AMD doesn't have CPUID4. Emulate it here to report the same
170 information to the user. This makes some assumptions about the machine:
171 L2 not shared, no SMT etc. that is currently true on AMD CPUs.
173 In theory the TLBs could be reported as fake type (they are in "dummy").
174 Maybe later */
175 union l1_cache {
176 struct {
177 unsigned line_size:8;
178 unsigned lines_per_tag:8;
179 unsigned assoc:8;
180 unsigned size_in_kb:8;
182 unsigned val;
185 union l2_cache {
186 struct {
187 unsigned line_size:8;
188 unsigned lines_per_tag:4;
189 unsigned assoc:4;
190 unsigned size_in_kb:16;
192 unsigned val;
195 union l3_cache {
196 struct {
197 unsigned line_size:8;
198 unsigned lines_per_tag:4;
199 unsigned assoc:4;
200 unsigned res:2;
201 unsigned size_encoded:14;
203 unsigned val;
206 static const unsigned short __cpuinitconst assocs[] = {
207 [1] = 1,
208 [2] = 2,
209 [4] = 4,
210 [6] = 8,
211 [8] = 16,
212 [0xa] = 32,
213 [0xb] = 48,
214 [0xc] = 64,
215 [0xd] = 96,
216 [0xe] = 128,
217 [0xf] = 0xffff /* fully associative - no way to show this currently */
220 static const unsigned char __cpuinitconst levels[] = { 1, 1, 2, 3 };
221 static const unsigned char __cpuinitconst types[] = { 1, 2, 3, 3 };
223 static void __cpuinit
224 amd_cpuid4(int leaf, union _cpuid4_leaf_eax *eax,
225 union _cpuid4_leaf_ebx *ebx,
226 union _cpuid4_leaf_ecx *ecx)
228 unsigned dummy;
229 unsigned line_size, lines_per_tag, assoc, size_in_kb;
230 union l1_cache l1i, l1d;
231 union l2_cache l2;
232 union l3_cache l3;
233 union l1_cache *l1 = &l1d;
235 eax->full = 0;
236 ebx->full = 0;
237 ecx->full = 0;
239 cpuid(0x80000005, &dummy, &dummy, &l1d.val, &l1i.val);
240 cpuid(0x80000006, &dummy, &dummy, &l2.val, &l3.val);
242 switch (leaf) {
243 case 1:
244 l1 = &l1i;
245 case 0:
246 if (!l1->val)
247 return;
248 assoc = assocs[l1->assoc];
249 line_size = l1->line_size;
250 lines_per_tag = l1->lines_per_tag;
251 size_in_kb = l1->size_in_kb;
252 break;
253 case 2:
254 if (!l2.val)
255 return;
256 assoc = assocs[l2.assoc];
257 line_size = l2.line_size;
258 lines_per_tag = l2.lines_per_tag;
259 /* cpu_data has errata corrections for K7 applied */
260 size_in_kb = __this_cpu_read(cpu_info.x86_cache_size);
261 break;
262 case 3:
263 if (!l3.val)
264 return;
265 assoc = assocs[l3.assoc];
266 line_size = l3.line_size;
267 lines_per_tag = l3.lines_per_tag;
268 size_in_kb = l3.size_encoded * 512;
269 if (boot_cpu_has(X86_FEATURE_AMD_DCM)) {
270 size_in_kb = size_in_kb >> 1;
271 assoc = assoc >> 1;
273 break;
274 default:
275 return;
278 eax->split.is_self_initializing = 1;
279 eax->split.type = types[leaf];
280 eax->split.level = levels[leaf];
281 eax->split.num_threads_sharing = 0;
282 eax->split.num_cores_on_die = __this_cpu_read(cpu_info.x86_max_cores) - 1;
285 if (assoc == 0xffff)
286 eax->split.is_fully_associative = 1;
287 ebx->split.coherency_line_size = line_size - 1;
288 ebx->split.ways_of_associativity = assoc - 1;
289 ebx->split.physical_line_partition = lines_per_tag - 1;
290 ecx->split.number_of_sets = (size_in_kb * 1024) / line_size /
291 (ebx->split.ways_of_associativity + 1) - 1;
294 struct _cache_attr {
295 struct attribute attr;
296 ssize_t (*show)(struct _cpuid4_info *, char *, unsigned int);
297 ssize_t (*store)(struct _cpuid4_info *, const char *, size_t count,
298 unsigned int);
301 #ifdef CONFIG_AMD_NB
304 * L3 cache descriptors
306 static void __cpuinit amd_calc_l3_indices(struct amd_northbridge *nb)
308 struct amd_l3_cache *l3 = &nb->l3_cache;
309 unsigned int sc0, sc1, sc2, sc3;
310 u32 val = 0;
312 pci_read_config_dword(nb->misc, 0x1C4, &val);
314 /* calculate subcache sizes */
315 l3->subcaches[0] = sc0 = !(val & BIT(0));
316 l3->subcaches[1] = sc1 = !(val & BIT(4));
318 if (boot_cpu_data.x86 == 0x15) {
319 l3->subcaches[0] = sc0 += !(val & BIT(1));
320 l3->subcaches[1] = sc1 += !(val & BIT(5));
323 l3->subcaches[2] = sc2 = !(val & BIT(8)) + !(val & BIT(9));
324 l3->subcaches[3] = sc3 = !(val & BIT(12)) + !(val & BIT(13));
326 l3->indices = (max(max3(sc0, sc1, sc2), sc3) << 10) - 1;
329 static void __cpuinit amd_init_l3_cache(struct _cpuid4_info_regs *this_leaf,
330 int index)
332 int node;
334 /* only for L3, and not in virtualized environments */
335 if (index < 3)
336 return;
338 node = amd_get_nb_id(smp_processor_id());
339 this_leaf->nb = node_to_amd_nb(node);
340 if (this_leaf->nb && !this_leaf->nb->l3_cache.indices)
341 amd_calc_l3_indices(this_leaf->nb);
345 * check whether a slot used for disabling an L3 index is occupied.
346 * @l3: L3 cache descriptor
347 * @slot: slot number (0..1)
349 * @returns: the disabled index if used or negative value if slot free.
351 int amd_get_l3_disable_slot(struct amd_northbridge *nb, unsigned slot)
353 unsigned int reg = 0;
355 pci_read_config_dword(nb->misc, 0x1BC + slot * 4, &reg);
357 /* check whether this slot is activated already */
358 if (reg & (3UL << 30))
359 return reg & 0xfff;
361 return -1;
364 static ssize_t show_cache_disable(struct _cpuid4_info *this_leaf, char *buf,
365 unsigned int slot)
367 int index;
369 if (!this_leaf->base.nb || !amd_nb_has_feature(AMD_NB_L3_INDEX_DISABLE))
370 return -EINVAL;
372 index = amd_get_l3_disable_slot(this_leaf->base.nb, slot);
373 if (index >= 0)
374 return sprintf(buf, "%d\n", index);
376 return sprintf(buf, "FREE\n");
379 #define SHOW_CACHE_DISABLE(slot) \
380 static ssize_t \
381 show_cache_disable_##slot(struct _cpuid4_info *this_leaf, char *buf, \
382 unsigned int cpu) \
384 return show_cache_disable(this_leaf, buf, slot); \
386 SHOW_CACHE_DISABLE(0)
387 SHOW_CACHE_DISABLE(1)
389 static void amd_l3_disable_index(struct amd_northbridge *nb, int cpu,
390 unsigned slot, unsigned long idx)
392 int i;
394 idx |= BIT(30);
397 * disable index in all 4 subcaches
399 for (i = 0; i < 4; i++) {
400 u32 reg = idx | (i << 20);
402 if (!nb->l3_cache.subcaches[i])
403 continue;
405 pci_write_config_dword(nb->misc, 0x1BC + slot * 4, reg);
408 * We need to WBINVD on a core on the node containing the L3
409 * cache which indices we disable therefore a simple wbinvd()
410 * is not sufficient.
412 wbinvd_on_cpu(cpu);
414 reg |= BIT(31);
415 pci_write_config_dword(nb->misc, 0x1BC + slot * 4, reg);
420 * disable a L3 cache index by using a disable-slot
422 * @l3: L3 cache descriptor
423 * @cpu: A CPU on the node containing the L3 cache
424 * @slot: slot number (0..1)
425 * @index: index to disable
427 * @return: 0 on success, error status on failure
429 int amd_set_l3_disable_slot(struct amd_northbridge *nb, int cpu, unsigned slot,
430 unsigned long index)
432 int ret = 0;
434 /* check if @slot is already used or the index is already disabled */
435 ret = amd_get_l3_disable_slot(nb, slot);
436 if (ret >= 0)
437 return -EINVAL;
439 if (index > nb->l3_cache.indices)
440 return -EINVAL;
442 /* check whether the other slot has disabled the same index already */
443 if (index == amd_get_l3_disable_slot(nb, !slot))
444 return -EINVAL;
446 amd_l3_disable_index(nb, cpu, slot, index);
448 return 0;
451 static ssize_t store_cache_disable(struct _cpuid4_info *this_leaf,
452 const char *buf, size_t count,
453 unsigned int slot)
455 unsigned long val = 0;
456 int cpu, err = 0;
458 if (!capable(CAP_SYS_ADMIN))
459 return -EPERM;
461 if (!this_leaf->base.nb || !amd_nb_has_feature(AMD_NB_L3_INDEX_DISABLE))
462 return -EINVAL;
464 cpu = cpumask_first(to_cpumask(this_leaf->shared_cpu_map));
466 if (strict_strtoul(buf, 10, &val) < 0)
467 return -EINVAL;
469 err = amd_set_l3_disable_slot(this_leaf->base.nb, cpu, slot, val);
470 if (err) {
471 if (err == -EEXIST)
472 printk(KERN_WARNING "L3 disable slot %d in use!\n",
473 slot);
474 return err;
476 return count;
479 #define STORE_CACHE_DISABLE(slot) \
480 static ssize_t \
481 store_cache_disable_##slot(struct _cpuid4_info *this_leaf, \
482 const char *buf, size_t count, \
483 unsigned int cpu) \
485 return store_cache_disable(this_leaf, buf, count, slot); \
487 STORE_CACHE_DISABLE(0)
488 STORE_CACHE_DISABLE(1)
490 static struct _cache_attr cache_disable_0 = __ATTR(cache_disable_0, 0644,
491 show_cache_disable_0, store_cache_disable_0);
492 static struct _cache_attr cache_disable_1 = __ATTR(cache_disable_1, 0644,
493 show_cache_disable_1, store_cache_disable_1);
495 static ssize_t
496 show_subcaches(struct _cpuid4_info *this_leaf, char *buf, unsigned int cpu)
498 if (!this_leaf->base.nb || !amd_nb_has_feature(AMD_NB_L3_PARTITIONING))
499 return -EINVAL;
501 return sprintf(buf, "%x\n", amd_get_subcaches(cpu));
504 static ssize_t
505 store_subcaches(struct _cpuid4_info *this_leaf, const char *buf, size_t count,
506 unsigned int cpu)
508 unsigned long val;
510 if (!capable(CAP_SYS_ADMIN))
511 return -EPERM;
513 if (!this_leaf->base.nb || !amd_nb_has_feature(AMD_NB_L3_PARTITIONING))
514 return -EINVAL;
516 if (strict_strtoul(buf, 16, &val) < 0)
517 return -EINVAL;
519 if (amd_set_subcaches(cpu, val))
520 return -EINVAL;
522 return count;
525 static struct _cache_attr subcaches =
526 __ATTR(subcaches, 0644, show_subcaches, store_subcaches);
528 #else /* CONFIG_AMD_NB */
529 #define amd_init_l3_cache(x, y)
530 #endif /* CONFIG_AMD_NB */
532 static int
533 __cpuinit cpuid4_cache_lookup_regs(int index,
534 struct _cpuid4_info_regs *this_leaf)
536 union _cpuid4_leaf_eax eax;
537 union _cpuid4_leaf_ebx ebx;
538 union _cpuid4_leaf_ecx ecx;
539 unsigned edx;
541 if (boot_cpu_data.x86_vendor == X86_VENDOR_AMD) {
542 amd_cpuid4(index, &eax, &ebx, &ecx);
543 amd_init_l3_cache(this_leaf, index);
544 } else {
545 cpuid_count(4, index, &eax.full, &ebx.full, &ecx.full, &edx);
548 if (eax.split.type == CACHE_TYPE_NULL)
549 return -EIO; /* better error ? */
551 this_leaf->eax = eax;
552 this_leaf->ebx = ebx;
553 this_leaf->ecx = ecx;
554 this_leaf->size = (ecx.split.number_of_sets + 1) *
555 (ebx.split.coherency_line_size + 1) *
556 (ebx.split.physical_line_partition + 1) *
557 (ebx.split.ways_of_associativity + 1);
558 return 0;
561 static int __cpuinit find_num_cache_leaves(void)
563 unsigned int eax, ebx, ecx, edx;
564 union _cpuid4_leaf_eax cache_eax;
565 int i = -1;
567 do {
568 ++i;
569 /* Do cpuid(4) loop to find out num_cache_leaves */
570 cpuid_count(4, i, &eax, &ebx, &ecx, &edx);
571 cache_eax.full = eax;
572 } while (cache_eax.split.type != CACHE_TYPE_NULL);
573 return i;
576 unsigned int __cpuinit init_intel_cacheinfo(struct cpuinfo_x86 *c)
578 /* Cache sizes */
579 unsigned int trace = 0, l1i = 0, l1d = 0, l2 = 0, l3 = 0;
580 unsigned int new_l1d = 0, new_l1i = 0; /* Cache sizes from cpuid(4) */
581 unsigned int new_l2 = 0, new_l3 = 0, i; /* Cache sizes from cpuid(4) */
582 unsigned int l2_id = 0, l3_id = 0, num_threads_sharing, index_msb;
583 #ifdef CONFIG_X86_HT
584 unsigned int cpu = c->cpu_index;
585 #endif
587 if (c->cpuid_level > 3) {
588 static int is_initialized;
590 if (is_initialized == 0) {
591 /* Init num_cache_leaves from boot CPU */
592 num_cache_leaves = find_num_cache_leaves();
593 is_initialized++;
597 * Whenever possible use cpuid(4), deterministic cache
598 * parameters cpuid leaf to find the cache details
600 for (i = 0; i < num_cache_leaves; i++) {
601 struct _cpuid4_info_regs this_leaf;
602 int retval;
604 retval = cpuid4_cache_lookup_regs(i, &this_leaf);
605 if (retval >= 0) {
606 switch (this_leaf.eax.split.level) {
607 case 1:
608 if (this_leaf.eax.split.type ==
609 CACHE_TYPE_DATA)
610 new_l1d = this_leaf.size/1024;
611 else if (this_leaf.eax.split.type ==
612 CACHE_TYPE_INST)
613 new_l1i = this_leaf.size/1024;
614 break;
615 case 2:
616 new_l2 = this_leaf.size/1024;
617 num_threads_sharing = 1 + this_leaf.eax.split.num_threads_sharing;
618 index_msb = get_count_order(num_threads_sharing);
619 l2_id = c->apicid >> index_msb;
620 break;
621 case 3:
622 new_l3 = this_leaf.size/1024;
623 num_threads_sharing = 1 + this_leaf.eax.split.num_threads_sharing;
624 index_msb = get_count_order(
625 num_threads_sharing);
626 l3_id = c->apicid >> index_msb;
627 break;
628 default:
629 break;
635 * Don't use cpuid2 if cpuid4 is supported. For P4, we use cpuid2 for
636 * trace cache
638 if ((num_cache_leaves == 0 || c->x86 == 15) && c->cpuid_level > 1) {
639 /* supports eax=2 call */
640 int j, n;
641 unsigned int regs[4];
642 unsigned char *dp = (unsigned char *)regs;
643 int only_trace = 0;
645 if (num_cache_leaves != 0 && c->x86 == 15)
646 only_trace = 1;
648 /* Number of times to iterate */
649 n = cpuid_eax(2) & 0xFF;
651 for (i = 0 ; i < n ; i++) {
652 cpuid(2, &regs[0], &regs[1], &regs[2], &regs[3]);
654 /* If bit 31 is set, this is an unknown format */
655 for (j = 0 ; j < 3 ; j++)
656 if (regs[j] & (1 << 31))
657 regs[j] = 0;
659 /* Byte 0 is level count, not a descriptor */
660 for (j = 1 ; j < 16 ; j++) {
661 unsigned char des = dp[j];
662 unsigned char k = 0;
664 /* look up this descriptor in the table */
665 while (cache_table[k].descriptor != 0) {
666 if (cache_table[k].descriptor == des) {
667 if (only_trace && cache_table[k].cache_type != LVL_TRACE)
668 break;
669 switch (cache_table[k].cache_type) {
670 case LVL_1_INST:
671 l1i += cache_table[k].size;
672 break;
673 case LVL_1_DATA:
674 l1d += cache_table[k].size;
675 break;
676 case LVL_2:
677 l2 += cache_table[k].size;
678 break;
679 case LVL_3:
680 l3 += cache_table[k].size;
681 break;
682 case LVL_TRACE:
683 trace += cache_table[k].size;
684 break;
687 break;
690 k++;
696 if (new_l1d)
697 l1d = new_l1d;
699 if (new_l1i)
700 l1i = new_l1i;
702 if (new_l2) {
703 l2 = new_l2;
704 #ifdef CONFIG_X86_HT
705 per_cpu(cpu_llc_id, cpu) = l2_id;
706 #endif
709 if (new_l3) {
710 l3 = new_l3;
711 #ifdef CONFIG_X86_HT
712 per_cpu(cpu_llc_id, cpu) = l3_id;
713 #endif
716 c->x86_cache_size = l3 ? l3 : (l2 ? l2 : (l1i+l1d));
718 return l2;
721 #ifdef CONFIG_SYSFS
723 /* pointer to _cpuid4_info array (for each cache leaf) */
724 static DEFINE_PER_CPU(struct _cpuid4_info *, ici_cpuid4_info);
725 #define CPUID4_INFO_IDX(x, y) (&((per_cpu(ici_cpuid4_info, x))[y]))
727 #ifdef CONFIG_SMP
728 static void __cpuinit cache_shared_cpu_map_setup(unsigned int cpu, int index)
730 struct _cpuid4_info *this_leaf, *sibling_leaf;
731 unsigned long num_threads_sharing;
732 int index_msb, i, sibling;
733 struct cpuinfo_x86 *c = &cpu_data(cpu);
735 if ((index == 3) && (c->x86_vendor == X86_VENDOR_AMD)) {
736 for_each_cpu(i, cpu_llc_shared_mask(cpu)) {
737 if (!per_cpu(ici_cpuid4_info, i))
738 continue;
739 this_leaf = CPUID4_INFO_IDX(i, index);
740 for_each_cpu(sibling, cpu_llc_shared_mask(cpu)) {
741 if (!cpu_online(sibling))
742 continue;
743 set_bit(sibling, this_leaf->shared_cpu_map);
746 return;
748 this_leaf = CPUID4_INFO_IDX(cpu, index);
749 num_threads_sharing = 1 + this_leaf->base.eax.split.num_threads_sharing;
751 if (num_threads_sharing == 1)
752 cpumask_set_cpu(cpu, to_cpumask(this_leaf->shared_cpu_map));
753 else {
754 index_msb = get_count_order(num_threads_sharing);
756 for_each_online_cpu(i) {
757 if (cpu_data(i).apicid >> index_msb ==
758 c->apicid >> index_msb) {
759 cpumask_set_cpu(i,
760 to_cpumask(this_leaf->shared_cpu_map));
761 if (i != cpu && per_cpu(ici_cpuid4_info, i)) {
762 sibling_leaf =
763 CPUID4_INFO_IDX(i, index);
764 cpumask_set_cpu(cpu, to_cpumask(
765 sibling_leaf->shared_cpu_map));
771 static void __cpuinit cache_remove_shared_cpu_map(unsigned int cpu, int index)
773 struct _cpuid4_info *this_leaf, *sibling_leaf;
774 int sibling;
776 this_leaf = CPUID4_INFO_IDX(cpu, index);
777 for_each_cpu(sibling, to_cpumask(this_leaf->shared_cpu_map)) {
778 sibling_leaf = CPUID4_INFO_IDX(sibling, index);
779 cpumask_clear_cpu(cpu,
780 to_cpumask(sibling_leaf->shared_cpu_map));
783 #else
784 static void __cpuinit cache_shared_cpu_map_setup(unsigned int cpu, int index)
788 static void __cpuinit cache_remove_shared_cpu_map(unsigned int cpu, int index)
791 #endif
793 static void __cpuinit free_cache_attributes(unsigned int cpu)
795 int i;
797 for (i = 0; i < num_cache_leaves; i++)
798 cache_remove_shared_cpu_map(cpu, i);
800 kfree(per_cpu(ici_cpuid4_info, cpu));
801 per_cpu(ici_cpuid4_info, cpu) = NULL;
804 static void __cpuinit get_cpu_leaves(void *_retval)
806 int j, *retval = _retval, cpu = smp_processor_id();
808 /* Do cpuid and store the results */
809 for (j = 0; j < num_cache_leaves; j++) {
810 struct _cpuid4_info *this_leaf = CPUID4_INFO_IDX(cpu, j);
812 *retval = cpuid4_cache_lookup_regs(j, &this_leaf->base);
813 if (unlikely(*retval < 0)) {
814 int i;
816 for (i = 0; i < j; i++)
817 cache_remove_shared_cpu_map(cpu, i);
818 break;
820 cache_shared_cpu_map_setup(cpu, j);
824 static int __cpuinit detect_cache_attributes(unsigned int cpu)
826 int retval;
828 if (num_cache_leaves == 0)
829 return -ENOENT;
831 per_cpu(ici_cpuid4_info, cpu) = kzalloc(
832 sizeof(struct _cpuid4_info) * num_cache_leaves, GFP_KERNEL);
833 if (per_cpu(ici_cpuid4_info, cpu) == NULL)
834 return -ENOMEM;
836 smp_call_function_single(cpu, get_cpu_leaves, &retval, true);
837 if (retval) {
838 kfree(per_cpu(ici_cpuid4_info, cpu));
839 per_cpu(ici_cpuid4_info, cpu) = NULL;
842 return retval;
845 #include <linux/kobject.h>
846 #include <linux/sysfs.h>
848 extern struct sysdev_class cpu_sysdev_class; /* from drivers/base/cpu.c */
850 /* pointer to kobject for cpuX/cache */
851 static DEFINE_PER_CPU(struct kobject *, ici_cache_kobject);
853 struct _index_kobject {
854 struct kobject kobj;
855 unsigned int cpu;
856 unsigned short index;
859 /* pointer to array of kobjects for cpuX/cache/indexY */
860 static DEFINE_PER_CPU(struct _index_kobject *, ici_index_kobject);
861 #define INDEX_KOBJECT_PTR(x, y) (&((per_cpu(ici_index_kobject, x))[y]))
863 #define show_one_plus(file_name, object, val) \
864 static ssize_t show_##file_name(struct _cpuid4_info *this_leaf, char *buf, \
865 unsigned int cpu) \
867 return sprintf(buf, "%lu\n", (unsigned long)this_leaf->object + val); \
870 show_one_plus(level, base.eax.split.level, 0);
871 show_one_plus(coherency_line_size, base.ebx.split.coherency_line_size, 1);
872 show_one_plus(physical_line_partition, base.ebx.split.physical_line_partition, 1);
873 show_one_plus(ways_of_associativity, base.ebx.split.ways_of_associativity, 1);
874 show_one_plus(number_of_sets, base.ecx.split.number_of_sets, 1);
876 static ssize_t show_size(struct _cpuid4_info *this_leaf, char *buf,
877 unsigned int cpu)
879 return sprintf(buf, "%luK\n", this_leaf->base.size / 1024);
882 static ssize_t show_shared_cpu_map_func(struct _cpuid4_info *this_leaf,
883 int type, char *buf)
885 ptrdiff_t len = PTR_ALIGN(buf + PAGE_SIZE - 1, PAGE_SIZE) - buf;
886 int n = 0;
888 if (len > 1) {
889 const struct cpumask *mask;
891 mask = to_cpumask(this_leaf->shared_cpu_map);
892 n = type ?
893 cpulist_scnprintf(buf, len-2, mask) :
894 cpumask_scnprintf(buf, len-2, mask);
895 buf[n++] = '\n';
896 buf[n] = '\0';
898 return n;
901 static inline ssize_t show_shared_cpu_map(struct _cpuid4_info *leaf, char *buf,
902 unsigned int cpu)
904 return show_shared_cpu_map_func(leaf, 0, buf);
907 static inline ssize_t show_shared_cpu_list(struct _cpuid4_info *leaf, char *buf,
908 unsigned int cpu)
910 return show_shared_cpu_map_func(leaf, 1, buf);
913 static ssize_t show_type(struct _cpuid4_info *this_leaf, char *buf,
914 unsigned int cpu)
916 switch (this_leaf->base.eax.split.type) {
917 case CACHE_TYPE_DATA:
918 return sprintf(buf, "Data\n");
919 case CACHE_TYPE_INST:
920 return sprintf(buf, "Instruction\n");
921 case CACHE_TYPE_UNIFIED:
922 return sprintf(buf, "Unified\n");
923 default:
924 return sprintf(buf, "Unknown\n");
928 #define to_object(k) container_of(k, struct _index_kobject, kobj)
929 #define to_attr(a) container_of(a, struct _cache_attr, attr)
931 #define define_one_ro(_name) \
932 static struct _cache_attr _name = \
933 __ATTR(_name, 0444, show_##_name, NULL)
935 define_one_ro(level);
936 define_one_ro(type);
937 define_one_ro(coherency_line_size);
938 define_one_ro(physical_line_partition);
939 define_one_ro(ways_of_associativity);
940 define_one_ro(number_of_sets);
941 define_one_ro(size);
942 define_one_ro(shared_cpu_map);
943 define_one_ro(shared_cpu_list);
945 static struct attribute *default_attrs[] = {
946 &type.attr,
947 &level.attr,
948 &coherency_line_size.attr,
949 &physical_line_partition.attr,
950 &ways_of_associativity.attr,
951 &number_of_sets.attr,
952 &size.attr,
953 &shared_cpu_map.attr,
954 &shared_cpu_list.attr,
955 NULL
958 #ifdef CONFIG_AMD_NB
959 static struct attribute ** __cpuinit amd_l3_attrs(void)
961 static struct attribute **attrs;
962 int n;
964 if (attrs)
965 return attrs;
967 n = sizeof (default_attrs) / sizeof (struct attribute *);
969 if (amd_nb_has_feature(AMD_NB_L3_INDEX_DISABLE))
970 n += 2;
972 if (amd_nb_has_feature(AMD_NB_L3_PARTITIONING))
973 n += 1;
975 attrs = kzalloc(n * sizeof (struct attribute *), GFP_KERNEL);
976 if (attrs == NULL)
977 return attrs = default_attrs;
979 for (n = 0; default_attrs[n]; n++)
980 attrs[n] = default_attrs[n];
982 if (amd_nb_has_feature(AMD_NB_L3_INDEX_DISABLE)) {
983 attrs[n++] = &cache_disable_0.attr;
984 attrs[n++] = &cache_disable_1.attr;
987 if (amd_nb_has_feature(AMD_NB_L3_PARTITIONING))
988 attrs[n++] = &subcaches.attr;
990 return attrs;
992 #endif
994 static ssize_t show(struct kobject *kobj, struct attribute *attr, char *buf)
996 struct _cache_attr *fattr = to_attr(attr);
997 struct _index_kobject *this_leaf = to_object(kobj);
998 ssize_t ret;
1000 ret = fattr->show ?
1001 fattr->show(CPUID4_INFO_IDX(this_leaf->cpu, this_leaf->index),
1002 buf, this_leaf->cpu) :
1004 return ret;
1007 static ssize_t store(struct kobject *kobj, struct attribute *attr,
1008 const char *buf, size_t count)
1010 struct _cache_attr *fattr = to_attr(attr);
1011 struct _index_kobject *this_leaf = to_object(kobj);
1012 ssize_t ret;
1014 ret = fattr->store ?
1015 fattr->store(CPUID4_INFO_IDX(this_leaf->cpu, this_leaf->index),
1016 buf, count, this_leaf->cpu) :
1018 return ret;
1021 static const struct sysfs_ops sysfs_ops = {
1022 .show = show,
1023 .store = store,
1026 static struct kobj_type ktype_cache = {
1027 .sysfs_ops = &sysfs_ops,
1028 .default_attrs = default_attrs,
1031 static struct kobj_type ktype_percpu_entry = {
1032 .sysfs_ops = &sysfs_ops,
1035 static void __cpuinit cpuid4_cache_sysfs_exit(unsigned int cpu)
1037 kfree(per_cpu(ici_cache_kobject, cpu));
1038 kfree(per_cpu(ici_index_kobject, cpu));
1039 per_cpu(ici_cache_kobject, cpu) = NULL;
1040 per_cpu(ici_index_kobject, cpu) = NULL;
1041 free_cache_attributes(cpu);
1044 static int __cpuinit cpuid4_cache_sysfs_init(unsigned int cpu)
1046 int err;
1048 if (num_cache_leaves == 0)
1049 return -ENOENT;
1051 err = detect_cache_attributes(cpu);
1052 if (err)
1053 return err;
1055 /* Allocate all required memory */
1056 per_cpu(ici_cache_kobject, cpu) =
1057 kzalloc(sizeof(struct kobject), GFP_KERNEL);
1058 if (unlikely(per_cpu(ici_cache_kobject, cpu) == NULL))
1059 goto err_out;
1061 per_cpu(ici_index_kobject, cpu) = kzalloc(
1062 sizeof(struct _index_kobject) * num_cache_leaves, GFP_KERNEL);
1063 if (unlikely(per_cpu(ici_index_kobject, cpu) == NULL))
1064 goto err_out;
1066 return 0;
1068 err_out:
1069 cpuid4_cache_sysfs_exit(cpu);
1070 return -ENOMEM;
1073 static DECLARE_BITMAP(cache_dev_map, NR_CPUS);
1075 /* Add/Remove cache interface for CPU device */
1076 static int __cpuinit cache_add_dev(struct sys_device * sys_dev)
1078 unsigned int cpu = sys_dev->id;
1079 unsigned long i, j;
1080 struct _index_kobject *this_object;
1081 struct _cpuid4_info *this_leaf;
1082 int retval;
1084 retval = cpuid4_cache_sysfs_init(cpu);
1085 if (unlikely(retval < 0))
1086 return retval;
1088 retval = kobject_init_and_add(per_cpu(ici_cache_kobject, cpu),
1089 &ktype_percpu_entry,
1090 &sys_dev->kobj, "%s", "cache");
1091 if (retval < 0) {
1092 cpuid4_cache_sysfs_exit(cpu);
1093 return retval;
1096 for (i = 0; i < num_cache_leaves; i++) {
1097 this_object = INDEX_KOBJECT_PTR(cpu, i);
1098 this_object->cpu = cpu;
1099 this_object->index = i;
1101 this_leaf = CPUID4_INFO_IDX(cpu, i);
1103 ktype_cache.default_attrs = default_attrs;
1104 #ifdef CONFIG_AMD_NB
1105 if (this_leaf->base.nb)
1106 ktype_cache.default_attrs = amd_l3_attrs();
1107 #endif
1108 retval = kobject_init_and_add(&(this_object->kobj),
1109 &ktype_cache,
1110 per_cpu(ici_cache_kobject, cpu),
1111 "index%1lu", i);
1112 if (unlikely(retval)) {
1113 for (j = 0; j < i; j++)
1114 kobject_put(&(INDEX_KOBJECT_PTR(cpu, j)->kobj));
1115 kobject_put(per_cpu(ici_cache_kobject, cpu));
1116 cpuid4_cache_sysfs_exit(cpu);
1117 return retval;
1119 kobject_uevent(&(this_object->kobj), KOBJ_ADD);
1121 cpumask_set_cpu(cpu, to_cpumask(cache_dev_map));
1123 kobject_uevent(per_cpu(ici_cache_kobject, cpu), KOBJ_ADD);
1124 return 0;
1127 static void __cpuinit cache_remove_dev(struct sys_device * sys_dev)
1129 unsigned int cpu = sys_dev->id;
1130 unsigned long i;
1132 if (per_cpu(ici_cpuid4_info, cpu) == NULL)
1133 return;
1134 if (!cpumask_test_cpu(cpu, to_cpumask(cache_dev_map)))
1135 return;
1136 cpumask_clear_cpu(cpu, to_cpumask(cache_dev_map));
1138 for (i = 0; i < num_cache_leaves; i++)
1139 kobject_put(&(INDEX_KOBJECT_PTR(cpu, i)->kobj));
1140 kobject_put(per_cpu(ici_cache_kobject, cpu));
1141 cpuid4_cache_sysfs_exit(cpu);
1144 static int __cpuinit cacheinfo_cpu_callback(struct notifier_block *nfb,
1145 unsigned long action, void *hcpu)
1147 unsigned int cpu = (unsigned long)hcpu;
1148 struct sys_device *sys_dev;
1150 sys_dev = get_cpu_sysdev(cpu);
1151 switch (action) {
1152 case CPU_ONLINE:
1153 case CPU_ONLINE_FROZEN:
1154 cache_add_dev(sys_dev);
1155 break;
1156 case CPU_DEAD:
1157 case CPU_DEAD_FROZEN:
1158 cache_remove_dev(sys_dev);
1159 break;
1161 return NOTIFY_OK;
1164 static struct notifier_block __cpuinitdata cacheinfo_cpu_notifier = {
1165 .notifier_call = cacheinfo_cpu_callback,
1168 static int __cpuinit cache_sysfs_init(void)
1170 int i;
1172 if (num_cache_leaves == 0)
1173 return 0;
1175 for_each_online_cpu(i) {
1176 int err;
1177 struct sys_device *sys_dev = get_cpu_sysdev(i);
1179 err = cache_add_dev(sys_dev);
1180 if (err)
1181 return err;
1183 register_hotcpu_notifier(&cacheinfo_cpu_notifier);
1184 return 0;
1187 device_initcall(cache_sysfs_init);
1189 #endif