2 * Routines to identify caches on Intel CPU.
5 * Venkatesh Pallipadi : Adding cache identification through cpuid(4)
6 * Ashok Raj <ashok.raj@intel.com>: Work with CPU hotplug infrastructure.
7 * Andi Kleen / Andreas Herrmann : CPUID4 emulation on AMD.
10 #include <linux/slab.h>
11 #include <linux/cacheinfo.h>
12 #include <linux/cpu.h>
13 #include <linux/sched.h>
14 #include <linux/sysfs.h>
15 #include <linux/pci.h>
17 #include <asm/processor.h>
18 #include <asm/amd_nb.h>
28 unsigned char descriptor
;
33 #define MB(x) ((x) * 1024)
35 /* All the cache descriptor types we care about (no TLB or
36 trace cache entries) */
38 static const struct _cache_table cache_table
[] =
40 { 0x06, LVL_1_INST
, 8 }, /* 4-way set assoc, 32 byte line size */
41 { 0x08, LVL_1_INST
, 16 }, /* 4-way set assoc, 32 byte line size */
42 { 0x09, LVL_1_INST
, 32 }, /* 4-way set assoc, 64 byte line size */
43 { 0x0a, LVL_1_DATA
, 8 }, /* 2 way set assoc, 32 byte line size */
44 { 0x0c, LVL_1_DATA
, 16 }, /* 4-way set assoc, 32 byte line size */
45 { 0x0d, LVL_1_DATA
, 16 }, /* 4-way set assoc, 64 byte line size */
46 { 0x0e, LVL_1_DATA
, 24 }, /* 6-way set assoc, 64 byte line size */
47 { 0x21, LVL_2
, 256 }, /* 8-way set assoc, 64 byte line size */
48 { 0x22, LVL_3
, 512 }, /* 4-way set assoc, sectored cache, 64 byte line size */
49 { 0x23, LVL_3
, MB(1) }, /* 8-way set assoc, sectored cache, 64 byte line size */
50 { 0x25, LVL_3
, MB(2) }, /* 8-way set assoc, sectored cache, 64 byte line size */
51 { 0x29, LVL_3
, MB(4) }, /* 8-way set assoc, sectored cache, 64 byte line size */
52 { 0x2c, LVL_1_DATA
, 32 }, /* 8-way set assoc, 64 byte line size */
53 { 0x30, LVL_1_INST
, 32 }, /* 8-way set assoc, 64 byte line size */
54 { 0x39, LVL_2
, 128 }, /* 4-way set assoc, sectored cache, 64 byte line size */
55 { 0x3a, LVL_2
, 192 }, /* 6-way set assoc, sectored cache, 64 byte line size */
56 { 0x3b, LVL_2
, 128 }, /* 2-way set assoc, sectored cache, 64 byte line size */
57 { 0x3c, LVL_2
, 256 }, /* 4-way set assoc, sectored cache, 64 byte line size */
58 { 0x3d, LVL_2
, 384 }, /* 6-way set assoc, sectored cache, 64 byte line size */
59 { 0x3e, LVL_2
, 512 }, /* 4-way set assoc, sectored cache, 64 byte line size */
60 { 0x3f, LVL_2
, 256 }, /* 2-way set assoc, 64 byte line size */
61 { 0x41, LVL_2
, 128 }, /* 4-way set assoc, 32 byte line size */
62 { 0x42, LVL_2
, 256 }, /* 4-way set assoc, 32 byte line size */
63 { 0x43, LVL_2
, 512 }, /* 4-way set assoc, 32 byte line size */
64 { 0x44, LVL_2
, MB(1) }, /* 4-way set assoc, 32 byte line size */
65 { 0x45, LVL_2
, MB(2) }, /* 4-way set assoc, 32 byte line size */
66 { 0x46, LVL_3
, MB(4) }, /* 4-way set assoc, 64 byte line size */
67 { 0x47, LVL_3
, MB(8) }, /* 8-way set assoc, 64 byte line size */
68 { 0x48, LVL_2
, MB(3) }, /* 12-way set assoc, 64 byte line size */
69 { 0x49, LVL_3
, MB(4) }, /* 16-way set assoc, 64 byte line size */
70 { 0x4a, LVL_3
, MB(6) }, /* 12-way set assoc, 64 byte line size */
71 { 0x4b, LVL_3
, MB(8) }, /* 16-way set assoc, 64 byte line size */
72 { 0x4c, LVL_3
, MB(12) }, /* 12-way set assoc, 64 byte line size */
73 { 0x4d, LVL_3
, MB(16) }, /* 16-way set assoc, 64 byte line size */
74 { 0x4e, LVL_2
, MB(6) }, /* 24-way set assoc, 64 byte line size */
75 { 0x60, LVL_1_DATA
, 16 }, /* 8-way set assoc, sectored cache, 64 byte line size */
76 { 0x66, LVL_1_DATA
, 8 }, /* 4-way set assoc, sectored cache, 64 byte line size */
77 { 0x67, LVL_1_DATA
, 16 }, /* 4-way set assoc, sectored cache, 64 byte line size */
78 { 0x68, LVL_1_DATA
, 32 }, /* 4-way set assoc, sectored cache, 64 byte line size */
79 { 0x70, LVL_TRACE
, 12 }, /* 8-way set assoc */
80 { 0x71, LVL_TRACE
, 16 }, /* 8-way set assoc */
81 { 0x72, LVL_TRACE
, 32 }, /* 8-way set assoc */
82 { 0x73, LVL_TRACE
, 64 }, /* 8-way set assoc */
83 { 0x78, LVL_2
, MB(1) }, /* 4-way set assoc, 64 byte line size */
84 { 0x79, LVL_2
, 128 }, /* 8-way set assoc, sectored cache, 64 byte line size */
85 { 0x7a, LVL_2
, 256 }, /* 8-way set assoc, sectored cache, 64 byte line size */
86 { 0x7b, LVL_2
, 512 }, /* 8-way set assoc, sectored cache, 64 byte line size */
87 { 0x7c, LVL_2
, MB(1) }, /* 8-way set assoc, sectored cache, 64 byte line size */
88 { 0x7d, LVL_2
, MB(2) }, /* 8-way set assoc, 64 byte line size */
89 { 0x7f, LVL_2
, 512 }, /* 2-way set assoc, 64 byte line size */
90 { 0x80, LVL_2
, 512 }, /* 8-way set assoc, 64 byte line size */
91 { 0x82, LVL_2
, 256 }, /* 8-way set assoc, 32 byte line size */
92 { 0x83, LVL_2
, 512 }, /* 8-way set assoc, 32 byte line size */
93 { 0x84, LVL_2
, MB(1) }, /* 8-way set assoc, 32 byte line size */
94 { 0x85, LVL_2
, MB(2) }, /* 8-way set assoc, 32 byte line size */
95 { 0x86, LVL_2
, 512 }, /* 4-way set assoc, 64 byte line size */
96 { 0x87, LVL_2
, MB(1) }, /* 8-way set assoc, 64 byte line size */
97 { 0xd0, LVL_3
, 512 }, /* 4-way set assoc, 64 byte line size */
98 { 0xd1, LVL_3
, MB(1) }, /* 4-way set assoc, 64 byte line size */
99 { 0xd2, LVL_3
, MB(2) }, /* 4-way set assoc, 64 byte line size */
100 { 0xd6, LVL_3
, MB(1) }, /* 8-way set assoc, 64 byte line size */
101 { 0xd7, LVL_3
, MB(2) }, /* 8-way set assoc, 64 byte line size */
102 { 0xd8, LVL_3
, MB(4) }, /* 12-way set assoc, 64 byte line size */
103 { 0xdc, LVL_3
, MB(2) }, /* 12-way set assoc, 64 byte line size */
104 { 0xdd, LVL_3
, MB(4) }, /* 12-way set assoc, 64 byte line size */
105 { 0xde, LVL_3
, MB(8) }, /* 12-way set assoc, 64 byte line size */
106 { 0xe2, LVL_3
, MB(2) }, /* 16-way set assoc, 64 byte line size */
107 { 0xe3, LVL_3
, MB(4) }, /* 16-way set assoc, 64 byte line size */
108 { 0xe4, LVL_3
, MB(8) }, /* 16-way set assoc, 64 byte line size */
109 { 0xea, LVL_3
, MB(12) }, /* 24-way set assoc, 64 byte line size */
110 { 0xeb, LVL_3
, MB(18) }, /* 24-way set assoc, 64 byte line size */
111 { 0xec, LVL_3
, MB(24) }, /* 24-way set assoc, 64 byte line size */
123 union _cpuid4_leaf_eax
{
125 enum _cache_type type
:5;
126 unsigned int level
:3;
127 unsigned int is_self_initializing
:1;
128 unsigned int is_fully_associative
:1;
129 unsigned int reserved
:4;
130 unsigned int num_threads_sharing
:12;
131 unsigned int num_cores_on_die
:6;
136 union _cpuid4_leaf_ebx
{
138 unsigned int coherency_line_size
:12;
139 unsigned int physical_line_partition
:10;
140 unsigned int ways_of_associativity
:10;
145 union _cpuid4_leaf_ecx
{
147 unsigned int number_of_sets
:32;
152 struct _cpuid4_info_regs
{
153 union _cpuid4_leaf_eax eax
;
154 union _cpuid4_leaf_ebx ebx
;
155 union _cpuid4_leaf_ecx ecx
;
157 struct amd_northbridge
*nb
;
160 unsigned short num_cache_leaves
;
162 /* AMD doesn't have CPUID4. Emulate it here to report the same
163 information to the user. This makes some assumptions about the machine:
164 L2 not shared, no SMT etc. that is currently true on AMD CPUs.
166 In theory the TLBs could be reported as fake type (they are in "dummy").
170 unsigned line_size
:8;
171 unsigned lines_per_tag
:8;
173 unsigned size_in_kb
:8;
180 unsigned line_size
:8;
181 unsigned lines_per_tag
:4;
183 unsigned size_in_kb
:16;
190 unsigned line_size
:8;
191 unsigned lines_per_tag
:4;
194 unsigned size_encoded
:14;
199 static const unsigned short assocs
[] = {
210 [0xf] = 0xffff /* fully associative - no way to show this currently */
213 static const unsigned char levels
[] = { 1, 1, 2, 3 };
214 static const unsigned char types
[] = { 1, 2, 3, 3 };
216 static const enum cache_type cache_type_map
[] = {
217 [CTYPE_NULL
] = CACHE_TYPE_NOCACHE
,
218 [CTYPE_DATA
] = CACHE_TYPE_DATA
,
219 [CTYPE_INST
] = CACHE_TYPE_INST
,
220 [CTYPE_UNIFIED
] = CACHE_TYPE_UNIFIED
,
224 amd_cpuid4(int leaf
, union _cpuid4_leaf_eax
*eax
,
225 union _cpuid4_leaf_ebx
*ebx
,
226 union _cpuid4_leaf_ecx
*ecx
)
229 unsigned line_size
, lines_per_tag
, assoc
, size_in_kb
;
230 union l1_cache l1i
, l1d
;
233 union l1_cache
*l1
= &l1d
;
239 cpuid(0x80000005, &dummy
, &dummy
, &l1d
.val
, &l1i
.val
);
240 cpuid(0x80000006, &dummy
, &dummy
, &l2
.val
, &l3
.val
);
248 assoc
= assocs
[l1
->assoc
];
249 line_size
= l1
->line_size
;
250 lines_per_tag
= l1
->lines_per_tag
;
251 size_in_kb
= l1
->size_in_kb
;
256 assoc
= assocs
[l2
.assoc
];
257 line_size
= l2
.line_size
;
258 lines_per_tag
= l2
.lines_per_tag
;
259 /* cpu_data has errata corrections for K7 applied */
260 size_in_kb
= __this_cpu_read(cpu_info
.x86_cache_size
);
265 assoc
= assocs
[l3
.assoc
];
266 line_size
= l3
.line_size
;
267 lines_per_tag
= l3
.lines_per_tag
;
268 size_in_kb
= l3
.size_encoded
* 512;
269 if (boot_cpu_has(X86_FEATURE_AMD_DCM
)) {
270 size_in_kb
= size_in_kb
>> 1;
278 eax
->split
.is_self_initializing
= 1;
279 eax
->split
.type
= types
[leaf
];
280 eax
->split
.level
= levels
[leaf
];
281 eax
->split
.num_threads_sharing
= 0;
282 eax
->split
.num_cores_on_die
= __this_cpu_read(cpu_info
.x86_max_cores
) - 1;
286 eax
->split
.is_fully_associative
= 1;
287 ebx
->split
.coherency_line_size
= line_size
- 1;
288 ebx
->split
.ways_of_associativity
= assoc
- 1;
289 ebx
->split
.physical_line_partition
= lines_per_tag
- 1;
290 ecx
->split
.number_of_sets
= (size_in_kb
* 1024) / line_size
/
291 (ebx
->split
.ways_of_associativity
+ 1) - 1;
294 #if defined(CONFIG_AMD_NB) && defined(CONFIG_SYSFS)
297 * L3 cache descriptors
299 static void amd_calc_l3_indices(struct amd_northbridge
*nb
)
301 struct amd_l3_cache
*l3
= &nb
->l3_cache
;
302 unsigned int sc0
, sc1
, sc2
, sc3
;
305 pci_read_config_dword(nb
->misc
, 0x1C4, &val
);
307 /* calculate subcache sizes */
308 l3
->subcaches
[0] = sc0
= !(val
& BIT(0));
309 l3
->subcaches
[1] = sc1
= !(val
& BIT(4));
311 if (boot_cpu_data
.x86
== 0x15) {
312 l3
->subcaches
[0] = sc0
+= !(val
& BIT(1));
313 l3
->subcaches
[1] = sc1
+= !(val
& BIT(5));
316 l3
->subcaches
[2] = sc2
= !(val
& BIT(8)) + !(val
& BIT(9));
317 l3
->subcaches
[3] = sc3
= !(val
& BIT(12)) + !(val
& BIT(13));
319 l3
->indices
= (max(max3(sc0
, sc1
, sc2
), sc3
) << 10) - 1;
323 * check whether a slot used for disabling an L3 index is occupied.
324 * @l3: L3 cache descriptor
325 * @slot: slot number (0..1)
327 * @returns: the disabled index if used or negative value if slot free.
329 int amd_get_l3_disable_slot(struct amd_northbridge
*nb
, unsigned slot
)
331 unsigned int reg
= 0;
333 pci_read_config_dword(nb
->misc
, 0x1BC + slot
* 4, ®
);
335 /* check whether this slot is activated already */
336 if (reg
& (3UL << 30))
342 static ssize_t
show_cache_disable(struct cacheinfo
*this_leaf
, char *buf
,
346 struct amd_northbridge
*nb
= this_leaf
->priv
;
348 index
= amd_get_l3_disable_slot(nb
, slot
);
350 return sprintf(buf
, "%d\n", index
);
352 return sprintf(buf
, "FREE\n");
355 #define SHOW_CACHE_DISABLE(slot) \
357 cache_disable_##slot##_show(struct device *dev, \
358 struct device_attribute *attr, char *buf) \
360 struct cacheinfo *this_leaf = dev_get_drvdata(dev); \
361 return show_cache_disable(this_leaf, buf, slot); \
363 SHOW_CACHE_DISABLE(0)
364 SHOW_CACHE_DISABLE(1)
366 static void amd_l3_disable_index(struct amd_northbridge
*nb
, int cpu
,
367 unsigned slot
, unsigned long idx
)
374 * disable index in all 4 subcaches
376 for (i
= 0; i
< 4; i
++) {
377 u32 reg
= idx
| (i
<< 20);
379 if (!nb
->l3_cache
.subcaches
[i
])
382 pci_write_config_dword(nb
->misc
, 0x1BC + slot
* 4, reg
);
385 * We need to WBINVD on a core on the node containing the L3
386 * cache which indices we disable therefore a simple wbinvd()
392 pci_write_config_dword(nb
->misc
, 0x1BC + slot
* 4, reg
);
397 * disable a L3 cache index by using a disable-slot
399 * @l3: L3 cache descriptor
400 * @cpu: A CPU on the node containing the L3 cache
401 * @slot: slot number (0..1)
402 * @index: index to disable
404 * @return: 0 on success, error status on failure
406 int amd_set_l3_disable_slot(struct amd_northbridge
*nb
, int cpu
, unsigned slot
,
411 /* check if @slot is already used or the index is already disabled */
412 ret
= amd_get_l3_disable_slot(nb
, slot
);
416 if (index
> nb
->l3_cache
.indices
)
419 /* check whether the other slot has disabled the same index already */
420 if (index
== amd_get_l3_disable_slot(nb
, !slot
))
423 amd_l3_disable_index(nb
, cpu
, slot
, index
);
428 static ssize_t
store_cache_disable(struct cacheinfo
*this_leaf
,
429 const char *buf
, size_t count
,
432 unsigned long val
= 0;
434 struct amd_northbridge
*nb
= this_leaf
->priv
;
436 if (!capable(CAP_SYS_ADMIN
))
439 cpu
= cpumask_first(&this_leaf
->shared_cpu_map
);
441 if (kstrtoul(buf
, 10, &val
) < 0)
444 err
= amd_set_l3_disable_slot(nb
, cpu
, slot
, val
);
447 pr_warning("L3 slot %d in use/index already disabled!\n",
454 #define STORE_CACHE_DISABLE(slot) \
456 cache_disable_##slot##_store(struct device *dev, \
457 struct device_attribute *attr, \
458 const char *buf, size_t count) \
460 struct cacheinfo *this_leaf = dev_get_drvdata(dev); \
461 return store_cache_disable(this_leaf, buf, count, slot); \
463 STORE_CACHE_DISABLE(0)
464 STORE_CACHE_DISABLE(1)
466 static ssize_t
subcaches_show(struct device
*dev
,
467 struct device_attribute
*attr
, char *buf
)
469 struct cacheinfo
*this_leaf
= dev_get_drvdata(dev
);
470 int cpu
= cpumask_first(&this_leaf
->shared_cpu_map
);
472 return sprintf(buf
, "%x\n", amd_get_subcaches(cpu
));
475 static ssize_t
subcaches_store(struct device
*dev
,
476 struct device_attribute
*attr
,
477 const char *buf
, size_t count
)
479 struct cacheinfo
*this_leaf
= dev_get_drvdata(dev
);
480 int cpu
= cpumask_first(&this_leaf
->shared_cpu_map
);
483 if (!capable(CAP_SYS_ADMIN
))
486 if (kstrtoul(buf
, 16, &val
) < 0)
489 if (amd_set_subcaches(cpu
, val
))
495 static DEVICE_ATTR_RW(cache_disable_0
);
496 static DEVICE_ATTR_RW(cache_disable_1
);
497 static DEVICE_ATTR_RW(subcaches
);
500 cache_private_attrs_is_visible(struct kobject
*kobj
,
501 struct attribute
*attr
, int unused
)
503 struct device
*dev
= kobj_to_dev(kobj
);
504 struct cacheinfo
*this_leaf
= dev_get_drvdata(dev
);
505 umode_t mode
= attr
->mode
;
507 if (!this_leaf
->priv
)
510 if ((attr
== &dev_attr_subcaches
.attr
) &&
511 amd_nb_has_feature(AMD_NB_L3_PARTITIONING
))
514 if ((attr
== &dev_attr_cache_disable_0
.attr
||
515 attr
== &dev_attr_cache_disable_1
.attr
) &&
516 amd_nb_has_feature(AMD_NB_L3_INDEX_DISABLE
))
522 static struct attribute_group cache_private_group
= {
523 .is_visible
= cache_private_attrs_is_visible
,
526 static void init_amd_l3_attrs(void)
529 static struct attribute
**amd_l3_attrs
;
531 if (amd_l3_attrs
) /* already initialized */
534 if (amd_nb_has_feature(AMD_NB_L3_INDEX_DISABLE
))
536 if (amd_nb_has_feature(AMD_NB_L3_PARTITIONING
))
539 amd_l3_attrs
= kcalloc(n
, sizeof(*amd_l3_attrs
), GFP_KERNEL
);
544 if (amd_nb_has_feature(AMD_NB_L3_INDEX_DISABLE
)) {
545 amd_l3_attrs
[n
++] = &dev_attr_cache_disable_0
.attr
;
546 amd_l3_attrs
[n
++] = &dev_attr_cache_disable_1
.attr
;
548 if (amd_nb_has_feature(AMD_NB_L3_PARTITIONING
))
549 amd_l3_attrs
[n
++] = &dev_attr_subcaches
.attr
;
551 cache_private_group
.attrs
= amd_l3_attrs
;
554 const struct attribute_group
*
555 cache_get_priv_group(struct cacheinfo
*this_leaf
)
557 struct amd_northbridge
*nb
= this_leaf
->priv
;
559 if (this_leaf
->level
< 3 || !nb
)
562 if (nb
&& nb
->l3_cache
.indices
)
565 return &cache_private_group
;
568 static void amd_init_l3_cache(struct _cpuid4_info_regs
*this_leaf
, int index
)
572 /* only for L3, and not in virtualized environments */
576 node
= amd_get_nb_id(smp_processor_id());
577 this_leaf
->nb
= node_to_amd_nb(node
);
578 if (this_leaf
->nb
&& !this_leaf
->nb
->l3_cache
.indices
)
579 amd_calc_l3_indices(this_leaf
->nb
);
582 #define amd_init_l3_cache(x, y)
583 #endif /* CONFIG_AMD_NB && CONFIG_SYSFS */
586 cpuid4_cache_lookup_regs(int index
, struct _cpuid4_info_regs
*this_leaf
)
588 union _cpuid4_leaf_eax eax
;
589 union _cpuid4_leaf_ebx ebx
;
590 union _cpuid4_leaf_ecx ecx
;
593 if (boot_cpu_data
.x86_vendor
== X86_VENDOR_AMD
) {
595 cpuid_count(0x8000001d, index
, &eax
.full
,
596 &ebx
.full
, &ecx
.full
, &edx
);
598 amd_cpuid4(index
, &eax
, &ebx
, &ecx
);
599 amd_init_l3_cache(this_leaf
, index
);
601 cpuid_count(4, index
, &eax
.full
, &ebx
.full
, &ecx
.full
, &edx
);
604 if (eax
.split
.type
== CTYPE_NULL
)
605 return -EIO
; /* better error ? */
607 this_leaf
->eax
= eax
;
608 this_leaf
->ebx
= ebx
;
609 this_leaf
->ecx
= ecx
;
610 this_leaf
->size
= (ecx
.split
.number_of_sets
+ 1) *
611 (ebx
.split
.coherency_line_size
+ 1) *
612 (ebx
.split
.physical_line_partition
+ 1) *
613 (ebx
.split
.ways_of_associativity
+ 1);
617 static int find_num_cache_leaves(struct cpuinfo_x86
*c
)
619 unsigned int eax
, ebx
, ecx
, edx
, op
;
620 union _cpuid4_leaf_eax cache_eax
;
623 if (c
->x86_vendor
== X86_VENDOR_AMD
)
630 /* Do cpuid(op) loop to find out num_cache_leaves */
631 cpuid_count(op
, i
, &eax
, &ebx
, &ecx
, &edx
);
632 cache_eax
.full
= eax
;
633 } while (cache_eax
.split
.type
!= CTYPE_NULL
);
637 void init_amd_cacheinfo(struct cpuinfo_x86
*c
)
640 if (cpu_has_topoext
) {
641 num_cache_leaves
= find_num_cache_leaves(c
);
642 } else if (c
->extended_cpuid_level
>= 0x80000006) {
643 if (cpuid_edx(0x80000006) & 0xf000)
644 num_cache_leaves
= 4;
646 num_cache_leaves
= 3;
650 unsigned int init_intel_cacheinfo(struct cpuinfo_x86
*c
)
653 unsigned int trace
= 0, l1i
= 0, l1d
= 0, l2
= 0, l3
= 0;
654 unsigned int new_l1d
= 0, new_l1i
= 0; /* Cache sizes from cpuid(4) */
655 unsigned int new_l2
= 0, new_l3
= 0, i
; /* Cache sizes from cpuid(4) */
656 unsigned int l2_id
= 0, l3_id
= 0, num_threads_sharing
, index_msb
;
658 unsigned int cpu
= c
->cpu_index
;
661 if (c
->cpuid_level
> 3) {
662 static int is_initialized
;
664 if (is_initialized
== 0) {
665 /* Init num_cache_leaves from boot CPU */
666 num_cache_leaves
= find_num_cache_leaves(c
);
671 * Whenever possible use cpuid(4), deterministic cache
672 * parameters cpuid leaf to find the cache details
674 for (i
= 0; i
< num_cache_leaves
; i
++) {
675 struct _cpuid4_info_regs this_leaf
= {};
678 retval
= cpuid4_cache_lookup_regs(i
, &this_leaf
);
682 switch (this_leaf
.eax
.split
.level
) {
684 if (this_leaf
.eax
.split
.type
== CTYPE_DATA
)
685 new_l1d
= this_leaf
.size
/1024;
686 else if (this_leaf
.eax
.split
.type
== CTYPE_INST
)
687 new_l1i
= this_leaf
.size
/1024;
690 new_l2
= this_leaf
.size
/1024;
691 num_threads_sharing
= 1 + this_leaf
.eax
.split
.num_threads_sharing
;
692 index_msb
= get_count_order(num_threads_sharing
);
693 l2_id
= c
->apicid
& ~((1 << index_msb
) - 1);
696 new_l3
= this_leaf
.size
/1024;
697 num_threads_sharing
= 1 + this_leaf
.eax
.split
.num_threads_sharing
;
698 index_msb
= get_count_order(num_threads_sharing
);
699 l3_id
= c
->apicid
& ~((1 << index_msb
) - 1);
707 * Don't use cpuid2 if cpuid4 is supported. For P4, we use cpuid2 for
710 if ((num_cache_leaves
== 0 || c
->x86
== 15) && c
->cpuid_level
> 1) {
711 /* supports eax=2 call */
713 unsigned int regs
[4];
714 unsigned char *dp
= (unsigned char *)regs
;
717 if (num_cache_leaves
!= 0 && c
->x86
== 15)
720 /* Number of times to iterate */
721 n
= cpuid_eax(2) & 0xFF;
723 for (i
= 0 ; i
< n
; i
++) {
724 cpuid(2, ®s
[0], ®s
[1], ®s
[2], ®s
[3]);
726 /* If bit 31 is set, this is an unknown format */
727 for (j
= 0 ; j
< 3 ; j
++)
728 if (regs
[j
] & (1 << 31))
731 /* Byte 0 is level count, not a descriptor */
732 for (j
= 1 ; j
< 16 ; j
++) {
733 unsigned char des
= dp
[j
];
736 /* look up this descriptor in the table */
737 while (cache_table
[k
].descriptor
!= 0) {
738 if (cache_table
[k
].descriptor
== des
) {
739 if (only_trace
&& cache_table
[k
].cache_type
!= LVL_TRACE
)
741 switch (cache_table
[k
].cache_type
) {
743 l1i
+= cache_table
[k
].size
;
746 l1d
+= cache_table
[k
].size
;
749 l2
+= cache_table
[k
].size
;
752 l3
+= cache_table
[k
].size
;
755 trace
+= cache_table
[k
].size
;
777 per_cpu(cpu_llc_id
, cpu
) = l2_id
;
784 per_cpu(cpu_llc_id
, cpu
) = l3_id
;
790 * If cpu_llc_id is not yet set, this means cpuid_level < 4 which in
791 * turns means that the only possibility is SMT (as indicated in
792 * cpuid1). Since cpuid2 doesn't specify shared caches, and we know
793 * that SMT shares all caches, we can unconditionally set cpu_llc_id to
796 if (per_cpu(cpu_llc_id
, cpu
) == BAD_APICID
)
797 per_cpu(cpu_llc_id
, cpu
) = c
->phys_proc_id
;
800 c
->x86_cache_size
= l3
? l3
: (l2
? l2
: (l1i
+l1d
));
805 static int __cache_amd_cpumap_setup(unsigned int cpu
, int index
,
806 struct _cpuid4_info_regs
*base
)
808 struct cpu_cacheinfo
*this_cpu_ci
= get_cpu_cacheinfo(cpu
);
809 struct cacheinfo
*this_leaf
;
812 if (cpu_has_topoext
) {
813 unsigned int apicid
, nshared
, first
, last
;
815 this_leaf
= this_cpu_ci
->info_list
+ index
;
816 nshared
= base
->eax
.split
.num_threads_sharing
+ 1;
817 apicid
= cpu_data(cpu
).apicid
;
818 first
= apicid
- (apicid
% nshared
);
819 last
= first
+ nshared
- 1;
821 for_each_online_cpu(i
) {
822 this_cpu_ci
= get_cpu_cacheinfo(i
);
823 if (!this_cpu_ci
->info_list
)
826 apicid
= cpu_data(i
).apicid
;
827 if ((apicid
< first
) || (apicid
> last
))
830 this_leaf
= this_cpu_ci
->info_list
+ index
;
832 for_each_online_cpu(sibling
) {
833 apicid
= cpu_data(sibling
).apicid
;
834 if ((apicid
< first
) || (apicid
> last
))
836 cpumask_set_cpu(sibling
,
837 &this_leaf
->shared_cpu_map
);
840 } else if (index
== 3) {
841 for_each_cpu(i
, cpu_llc_shared_mask(cpu
)) {
842 this_cpu_ci
= get_cpu_cacheinfo(i
);
843 if (!this_cpu_ci
->info_list
)
845 this_leaf
= this_cpu_ci
->info_list
+ index
;
846 for_each_cpu(sibling
, cpu_llc_shared_mask(cpu
)) {
847 if (!cpu_online(sibling
))
849 cpumask_set_cpu(sibling
,
850 &this_leaf
->shared_cpu_map
);
859 static void __cache_cpumap_setup(unsigned int cpu
, int index
,
860 struct _cpuid4_info_regs
*base
)
862 struct cpu_cacheinfo
*this_cpu_ci
= get_cpu_cacheinfo(cpu
);
863 struct cacheinfo
*this_leaf
, *sibling_leaf
;
864 unsigned long num_threads_sharing
;
866 struct cpuinfo_x86
*c
= &cpu_data(cpu
);
868 if (c
->x86_vendor
== X86_VENDOR_AMD
) {
869 if (__cache_amd_cpumap_setup(cpu
, index
, base
))
873 this_leaf
= this_cpu_ci
->info_list
+ index
;
874 num_threads_sharing
= 1 + base
->eax
.split
.num_threads_sharing
;
876 cpumask_set_cpu(cpu
, &this_leaf
->shared_cpu_map
);
877 if (num_threads_sharing
== 1)
880 index_msb
= get_count_order(num_threads_sharing
);
882 for_each_online_cpu(i
)
883 if (cpu_data(i
).apicid
>> index_msb
== c
->apicid
>> index_msb
) {
884 struct cpu_cacheinfo
*sib_cpu_ci
= get_cpu_cacheinfo(i
);
886 if (i
== cpu
|| !sib_cpu_ci
->info_list
)
887 continue;/* skip if itself or no cacheinfo */
888 sibling_leaf
= sib_cpu_ci
->info_list
+ index
;
889 cpumask_set_cpu(i
, &this_leaf
->shared_cpu_map
);
890 cpumask_set_cpu(cpu
, &sibling_leaf
->shared_cpu_map
);
894 static void ci_leaf_init(struct cacheinfo
*this_leaf
,
895 struct _cpuid4_info_regs
*base
)
897 this_leaf
->level
= base
->eax
.split
.level
;
898 this_leaf
->type
= cache_type_map
[base
->eax
.split
.type
];
899 this_leaf
->coherency_line_size
=
900 base
->ebx
.split
.coherency_line_size
+ 1;
901 this_leaf
->ways_of_associativity
=
902 base
->ebx
.split
.ways_of_associativity
+ 1;
903 this_leaf
->size
= base
->size
;
904 this_leaf
->number_of_sets
= base
->ecx
.split
.number_of_sets
+ 1;
905 this_leaf
->physical_line_partition
=
906 base
->ebx
.split
.physical_line_partition
+ 1;
907 this_leaf
->priv
= base
->nb
;
910 static int __init_cache_level(unsigned int cpu
)
912 struct cpu_cacheinfo
*this_cpu_ci
= get_cpu_cacheinfo(cpu
);
914 if (!num_cache_leaves
)
918 this_cpu_ci
->num_levels
= 3;
919 this_cpu_ci
->num_leaves
= num_cache_leaves
;
923 static int __populate_cache_leaves(unsigned int cpu
)
925 unsigned int idx
, ret
;
926 struct cpu_cacheinfo
*this_cpu_ci
= get_cpu_cacheinfo(cpu
);
927 struct cacheinfo
*this_leaf
= this_cpu_ci
->info_list
;
928 struct _cpuid4_info_regs id4_regs
= {};
930 for (idx
= 0; idx
< this_cpu_ci
->num_leaves
; idx
++) {
931 ret
= cpuid4_cache_lookup_regs(idx
, &id4_regs
);
934 ci_leaf_init(this_leaf
++, &id4_regs
);
935 __cache_cpumap_setup(cpu
, idx
, &id4_regs
);
940 DEFINE_SMP_CALL_CACHE_FUNCTION(init_cache_level
)
941 DEFINE_SMP_CALL_CACHE_FUNCTION(populate_cache_leaves
)