hugetlb: introduce generic version of hugetlb_free_pgd_range
[linux/fpc-iii.git] / arch / x86 / kernel / cpu / cacheinfo.c
blobdc1b9342e9c4f9ff7fbdd6a26309186b37e47ec0
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * Routines to identify caches on Intel CPU.
5 * Changes:
6 * Venkatesh Pallipadi : Adding cache identification through cpuid(4)
7 * Ashok Raj <ashok.raj@intel.com>: Work with CPU hotplug infrastructure.
8 * Andi Kleen / Andreas Herrmann : CPUID4 emulation on AMD.
9 */
11 #include <linux/slab.h>
12 #include <linux/cacheinfo.h>
13 #include <linux/cpu.h>
14 #include <linux/sched.h>
15 #include <linux/capability.h>
16 #include <linux/sysfs.h>
17 #include <linux/pci.h>
19 #include <asm/cpufeature.h>
20 #include <asm/amd_nb.h>
21 #include <asm/smp.h>
23 #include "cpu.h"
25 #define LVL_1_INST 1
26 #define LVL_1_DATA 2
27 #define LVL_2 3
28 #define LVL_3 4
29 #define LVL_TRACE 5
31 struct _cache_table {
32 unsigned char descriptor;
33 char cache_type;
34 short size;
37 #define MB(x) ((x) * 1024)
39 /* All the cache descriptor types we care about (no TLB or
40 trace cache entries) */
42 static const struct _cache_table cache_table[] =
44 { 0x06, LVL_1_INST, 8 }, /* 4-way set assoc, 32 byte line size */
45 { 0x08, LVL_1_INST, 16 }, /* 4-way set assoc, 32 byte line size */
46 { 0x09, LVL_1_INST, 32 }, /* 4-way set assoc, 64 byte line size */
47 { 0x0a, LVL_1_DATA, 8 }, /* 2 way set assoc, 32 byte line size */
48 { 0x0c, LVL_1_DATA, 16 }, /* 4-way set assoc, 32 byte line size */
49 { 0x0d, LVL_1_DATA, 16 }, /* 4-way set assoc, 64 byte line size */
50 { 0x0e, LVL_1_DATA, 24 }, /* 6-way set assoc, 64 byte line size */
51 { 0x21, LVL_2, 256 }, /* 8-way set assoc, 64 byte line size */
52 { 0x22, LVL_3, 512 }, /* 4-way set assoc, sectored cache, 64 byte line size */
53 { 0x23, LVL_3, MB(1) }, /* 8-way set assoc, sectored cache, 64 byte line size */
54 { 0x25, LVL_3, MB(2) }, /* 8-way set assoc, sectored cache, 64 byte line size */
55 { 0x29, LVL_3, MB(4) }, /* 8-way set assoc, sectored cache, 64 byte line size */
56 { 0x2c, LVL_1_DATA, 32 }, /* 8-way set assoc, 64 byte line size */
57 { 0x30, LVL_1_INST, 32 }, /* 8-way set assoc, 64 byte line size */
58 { 0x39, LVL_2, 128 }, /* 4-way set assoc, sectored cache, 64 byte line size */
59 { 0x3a, LVL_2, 192 }, /* 6-way set assoc, sectored cache, 64 byte line size */
60 { 0x3b, LVL_2, 128 }, /* 2-way set assoc, sectored cache, 64 byte line size */
61 { 0x3c, LVL_2, 256 }, /* 4-way set assoc, sectored cache, 64 byte line size */
62 { 0x3d, LVL_2, 384 }, /* 6-way set assoc, sectored cache, 64 byte line size */
63 { 0x3e, LVL_2, 512 }, /* 4-way set assoc, sectored cache, 64 byte line size */
64 { 0x3f, LVL_2, 256 }, /* 2-way set assoc, 64 byte line size */
65 { 0x41, LVL_2, 128 }, /* 4-way set assoc, 32 byte line size */
66 { 0x42, LVL_2, 256 }, /* 4-way set assoc, 32 byte line size */
67 { 0x43, LVL_2, 512 }, /* 4-way set assoc, 32 byte line size */
68 { 0x44, LVL_2, MB(1) }, /* 4-way set assoc, 32 byte line size */
69 { 0x45, LVL_2, MB(2) }, /* 4-way set assoc, 32 byte line size */
70 { 0x46, LVL_3, MB(4) }, /* 4-way set assoc, 64 byte line size */
71 { 0x47, LVL_3, MB(8) }, /* 8-way set assoc, 64 byte line size */
72 { 0x48, LVL_2, MB(3) }, /* 12-way set assoc, 64 byte line size */
73 { 0x49, LVL_3, MB(4) }, /* 16-way set assoc, 64 byte line size */
74 { 0x4a, LVL_3, MB(6) }, /* 12-way set assoc, 64 byte line size */
75 { 0x4b, LVL_3, MB(8) }, /* 16-way set assoc, 64 byte line size */
76 { 0x4c, LVL_3, MB(12) }, /* 12-way set assoc, 64 byte line size */
77 { 0x4d, LVL_3, MB(16) }, /* 16-way set assoc, 64 byte line size */
78 { 0x4e, LVL_2, MB(6) }, /* 24-way set assoc, 64 byte line size */
79 { 0x60, LVL_1_DATA, 16 }, /* 8-way set assoc, sectored cache, 64 byte line size */
80 { 0x66, LVL_1_DATA, 8 }, /* 4-way set assoc, sectored cache, 64 byte line size */
81 { 0x67, LVL_1_DATA, 16 }, /* 4-way set assoc, sectored cache, 64 byte line size */
82 { 0x68, LVL_1_DATA, 32 }, /* 4-way set assoc, sectored cache, 64 byte line size */
83 { 0x70, LVL_TRACE, 12 }, /* 8-way set assoc */
84 { 0x71, LVL_TRACE, 16 }, /* 8-way set assoc */
85 { 0x72, LVL_TRACE, 32 }, /* 8-way set assoc */
86 { 0x73, LVL_TRACE, 64 }, /* 8-way set assoc */
87 { 0x78, LVL_2, MB(1) }, /* 4-way set assoc, 64 byte line size */
88 { 0x79, LVL_2, 128 }, /* 8-way set assoc, sectored cache, 64 byte line size */
89 { 0x7a, LVL_2, 256 }, /* 8-way set assoc, sectored cache, 64 byte line size */
90 { 0x7b, LVL_2, 512 }, /* 8-way set assoc, sectored cache, 64 byte line size */
91 { 0x7c, LVL_2, MB(1) }, /* 8-way set assoc, sectored cache, 64 byte line size */
92 { 0x7d, LVL_2, MB(2) }, /* 8-way set assoc, 64 byte line size */
93 { 0x7f, LVL_2, 512 }, /* 2-way set assoc, 64 byte line size */
94 { 0x80, LVL_2, 512 }, /* 8-way set assoc, 64 byte line size */
95 { 0x82, LVL_2, 256 }, /* 8-way set assoc, 32 byte line size */
96 { 0x83, LVL_2, 512 }, /* 8-way set assoc, 32 byte line size */
97 { 0x84, LVL_2, MB(1) }, /* 8-way set assoc, 32 byte line size */
98 { 0x85, LVL_2, MB(2) }, /* 8-way set assoc, 32 byte line size */
99 { 0x86, LVL_2, 512 }, /* 4-way set assoc, 64 byte line size */
100 { 0x87, LVL_2, MB(1) }, /* 8-way set assoc, 64 byte line size */
101 { 0xd0, LVL_3, 512 }, /* 4-way set assoc, 64 byte line size */
102 { 0xd1, LVL_3, MB(1) }, /* 4-way set assoc, 64 byte line size */
103 { 0xd2, LVL_3, MB(2) }, /* 4-way set assoc, 64 byte line size */
104 { 0xd6, LVL_3, MB(1) }, /* 8-way set assoc, 64 byte line size */
105 { 0xd7, LVL_3, MB(2) }, /* 8-way set assoc, 64 byte line size */
106 { 0xd8, LVL_3, MB(4) }, /* 12-way set assoc, 64 byte line size */
107 { 0xdc, LVL_3, MB(2) }, /* 12-way set assoc, 64 byte line size */
108 { 0xdd, LVL_3, MB(4) }, /* 12-way set assoc, 64 byte line size */
109 { 0xde, LVL_3, MB(8) }, /* 12-way set assoc, 64 byte line size */
110 { 0xe2, LVL_3, MB(2) }, /* 16-way set assoc, 64 byte line size */
111 { 0xe3, LVL_3, MB(4) }, /* 16-way set assoc, 64 byte line size */
112 { 0xe4, LVL_3, MB(8) }, /* 16-way set assoc, 64 byte line size */
113 { 0xea, LVL_3, MB(12) }, /* 24-way set assoc, 64 byte line size */
114 { 0xeb, LVL_3, MB(18) }, /* 24-way set assoc, 64 byte line size */
115 { 0xec, LVL_3, MB(24) }, /* 24-way set assoc, 64 byte line size */
116 { 0x00, 0, 0}
120 enum _cache_type {
121 CTYPE_NULL = 0,
122 CTYPE_DATA = 1,
123 CTYPE_INST = 2,
124 CTYPE_UNIFIED = 3
127 union _cpuid4_leaf_eax {
128 struct {
129 enum _cache_type type:5;
130 unsigned int level:3;
131 unsigned int is_self_initializing:1;
132 unsigned int is_fully_associative:1;
133 unsigned int reserved:4;
134 unsigned int num_threads_sharing:12;
135 unsigned int num_cores_on_die:6;
136 } split;
137 u32 full;
140 union _cpuid4_leaf_ebx {
141 struct {
142 unsigned int coherency_line_size:12;
143 unsigned int physical_line_partition:10;
144 unsigned int ways_of_associativity:10;
145 } split;
146 u32 full;
149 union _cpuid4_leaf_ecx {
150 struct {
151 unsigned int number_of_sets:32;
152 } split;
153 u32 full;
156 struct _cpuid4_info_regs {
157 union _cpuid4_leaf_eax eax;
158 union _cpuid4_leaf_ebx ebx;
159 union _cpuid4_leaf_ecx ecx;
160 unsigned int id;
161 unsigned long size;
162 struct amd_northbridge *nb;
165 static unsigned short num_cache_leaves;
167 /* AMD doesn't have CPUID4. Emulate it here to report the same
168 information to the user. This makes some assumptions about the machine:
169 L2 not shared, no SMT etc. that is currently true on AMD CPUs.
171 In theory the TLBs could be reported as fake type (they are in "dummy").
172 Maybe later */
173 union l1_cache {
174 struct {
175 unsigned line_size:8;
176 unsigned lines_per_tag:8;
177 unsigned assoc:8;
178 unsigned size_in_kb:8;
180 unsigned val;
183 union l2_cache {
184 struct {
185 unsigned line_size:8;
186 unsigned lines_per_tag:4;
187 unsigned assoc:4;
188 unsigned size_in_kb:16;
190 unsigned val;
193 union l3_cache {
194 struct {
195 unsigned line_size:8;
196 unsigned lines_per_tag:4;
197 unsigned assoc:4;
198 unsigned res:2;
199 unsigned size_encoded:14;
201 unsigned val;
204 static const unsigned short assocs[] = {
205 [1] = 1,
206 [2] = 2,
207 [4] = 4,
208 [6] = 8,
209 [8] = 16,
210 [0xa] = 32,
211 [0xb] = 48,
212 [0xc] = 64,
213 [0xd] = 96,
214 [0xe] = 128,
215 [0xf] = 0xffff /* fully associative - no way to show this currently */
218 static const unsigned char levels[] = { 1, 1, 2, 3 };
219 static const unsigned char types[] = { 1, 2, 3, 3 };
221 static const enum cache_type cache_type_map[] = {
222 [CTYPE_NULL] = CACHE_TYPE_NOCACHE,
223 [CTYPE_DATA] = CACHE_TYPE_DATA,
224 [CTYPE_INST] = CACHE_TYPE_INST,
225 [CTYPE_UNIFIED] = CACHE_TYPE_UNIFIED,
228 static void
229 amd_cpuid4(int leaf, union _cpuid4_leaf_eax *eax,
230 union _cpuid4_leaf_ebx *ebx,
231 union _cpuid4_leaf_ecx *ecx)
233 unsigned dummy;
234 unsigned line_size, lines_per_tag, assoc, size_in_kb;
235 union l1_cache l1i, l1d;
236 union l2_cache l2;
237 union l3_cache l3;
238 union l1_cache *l1 = &l1d;
240 eax->full = 0;
241 ebx->full = 0;
242 ecx->full = 0;
244 cpuid(0x80000005, &dummy, &dummy, &l1d.val, &l1i.val);
245 cpuid(0x80000006, &dummy, &dummy, &l2.val, &l3.val);
247 switch (leaf) {
248 case 1:
249 l1 = &l1i;
250 case 0:
251 if (!l1->val)
252 return;
253 assoc = assocs[l1->assoc];
254 line_size = l1->line_size;
255 lines_per_tag = l1->lines_per_tag;
256 size_in_kb = l1->size_in_kb;
257 break;
258 case 2:
259 if (!l2.val)
260 return;
261 assoc = assocs[l2.assoc];
262 line_size = l2.line_size;
263 lines_per_tag = l2.lines_per_tag;
264 /* cpu_data has errata corrections for K7 applied */
265 size_in_kb = __this_cpu_read(cpu_info.x86_cache_size);
266 break;
267 case 3:
268 if (!l3.val)
269 return;
270 assoc = assocs[l3.assoc];
271 line_size = l3.line_size;
272 lines_per_tag = l3.lines_per_tag;
273 size_in_kb = l3.size_encoded * 512;
274 if (boot_cpu_has(X86_FEATURE_AMD_DCM)) {
275 size_in_kb = size_in_kb >> 1;
276 assoc = assoc >> 1;
278 break;
279 default:
280 return;
283 eax->split.is_self_initializing = 1;
284 eax->split.type = types[leaf];
285 eax->split.level = levels[leaf];
286 eax->split.num_threads_sharing = 0;
287 eax->split.num_cores_on_die = __this_cpu_read(cpu_info.x86_max_cores) - 1;
290 if (assoc == 0xffff)
291 eax->split.is_fully_associative = 1;
292 ebx->split.coherency_line_size = line_size - 1;
293 ebx->split.ways_of_associativity = assoc - 1;
294 ebx->split.physical_line_partition = lines_per_tag - 1;
295 ecx->split.number_of_sets = (size_in_kb * 1024) / line_size /
296 (ebx->split.ways_of_associativity + 1) - 1;
299 #if defined(CONFIG_AMD_NB) && defined(CONFIG_SYSFS)
302 * L3 cache descriptors
304 static void amd_calc_l3_indices(struct amd_northbridge *nb)
306 struct amd_l3_cache *l3 = &nb->l3_cache;
307 unsigned int sc0, sc1, sc2, sc3;
308 u32 val = 0;
310 pci_read_config_dword(nb->misc, 0x1C4, &val);
312 /* calculate subcache sizes */
313 l3->subcaches[0] = sc0 = !(val & BIT(0));
314 l3->subcaches[1] = sc1 = !(val & BIT(4));
316 if (boot_cpu_data.x86 == 0x15) {
317 l3->subcaches[0] = sc0 += !(val & BIT(1));
318 l3->subcaches[1] = sc1 += !(val & BIT(5));
321 l3->subcaches[2] = sc2 = !(val & BIT(8)) + !(val & BIT(9));
322 l3->subcaches[3] = sc3 = !(val & BIT(12)) + !(val & BIT(13));
324 l3->indices = (max(max3(sc0, sc1, sc2), sc3) << 10) - 1;
328 * check whether a slot used for disabling an L3 index is occupied.
329 * @l3: L3 cache descriptor
330 * @slot: slot number (0..1)
332 * @returns: the disabled index if used or negative value if slot free.
334 static int amd_get_l3_disable_slot(struct amd_northbridge *nb, unsigned slot)
336 unsigned int reg = 0;
338 pci_read_config_dword(nb->misc, 0x1BC + slot * 4, &reg);
340 /* check whether this slot is activated already */
341 if (reg & (3UL << 30))
342 return reg & 0xfff;
344 return -1;
347 static ssize_t show_cache_disable(struct cacheinfo *this_leaf, char *buf,
348 unsigned int slot)
350 int index;
351 struct amd_northbridge *nb = this_leaf->priv;
353 index = amd_get_l3_disable_slot(nb, slot);
354 if (index >= 0)
355 return sprintf(buf, "%d\n", index);
357 return sprintf(buf, "FREE\n");
360 #define SHOW_CACHE_DISABLE(slot) \
361 static ssize_t \
362 cache_disable_##slot##_show(struct device *dev, \
363 struct device_attribute *attr, char *buf) \
365 struct cacheinfo *this_leaf = dev_get_drvdata(dev); \
366 return show_cache_disable(this_leaf, buf, slot); \
368 SHOW_CACHE_DISABLE(0)
369 SHOW_CACHE_DISABLE(1)
371 static void amd_l3_disable_index(struct amd_northbridge *nb, int cpu,
372 unsigned slot, unsigned long idx)
374 int i;
376 idx |= BIT(30);
379 * disable index in all 4 subcaches
381 for (i = 0; i < 4; i++) {
382 u32 reg = idx | (i << 20);
384 if (!nb->l3_cache.subcaches[i])
385 continue;
387 pci_write_config_dword(nb->misc, 0x1BC + slot * 4, reg);
390 * We need to WBINVD on a core on the node containing the L3
391 * cache which indices we disable therefore a simple wbinvd()
392 * is not sufficient.
394 wbinvd_on_cpu(cpu);
396 reg |= BIT(31);
397 pci_write_config_dword(nb->misc, 0x1BC + slot * 4, reg);
402 * disable a L3 cache index by using a disable-slot
404 * @l3: L3 cache descriptor
405 * @cpu: A CPU on the node containing the L3 cache
406 * @slot: slot number (0..1)
407 * @index: index to disable
409 * @return: 0 on success, error status on failure
411 static int amd_set_l3_disable_slot(struct amd_northbridge *nb, int cpu,
412 unsigned slot, unsigned long index)
414 int ret = 0;
416 /* check if @slot is already used or the index is already disabled */
417 ret = amd_get_l3_disable_slot(nb, slot);
418 if (ret >= 0)
419 return -EEXIST;
421 if (index > nb->l3_cache.indices)
422 return -EINVAL;
424 /* check whether the other slot has disabled the same index already */
425 if (index == amd_get_l3_disable_slot(nb, !slot))
426 return -EEXIST;
428 amd_l3_disable_index(nb, cpu, slot, index);
430 return 0;
433 static ssize_t store_cache_disable(struct cacheinfo *this_leaf,
434 const char *buf, size_t count,
435 unsigned int slot)
437 unsigned long val = 0;
438 int cpu, err = 0;
439 struct amd_northbridge *nb = this_leaf->priv;
441 if (!capable(CAP_SYS_ADMIN))
442 return -EPERM;
444 cpu = cpumask_first(&this_leaf->shared_cpu_map);
446 if (kstrtoul(buf, 10, &val) < 0)
447 return -EINVAL;
449 err = amd_set_l3_disable_slot(nb, cpu, slot, val);
450 if (err) {
451 if (err == -EEXIST)
452 pr_warn("L3 slot %d in use/index already disabled!\n",
453 slot);
454 return err;
456 return count;
459 #define STORE_CACHE_DISABLE(slot) \
460 static ssize_t \
461 cache_disable_##slot##_store(struct device *dev, \
462 struct device_attribute *attr, \
463 const char *buf, size_t count) \
465 struct cacheinfo *this_leaf = dev_get_drvdata(dev); \
466 return store_cache_disable(this_leaf, buf, count, slot); \
468 STORE_CACHE_DISABLE(0)
469 STORE_CACHE_DISABLE(1)
471 static ssize_t subcaches_show(struct device *dev,
472 struct device_attribute *attr, char *buf)
474 struct cacheinfo *this_leaf = dev_get_drvdata(dev);
475 int cpu = cpumask_first(&this_leaf->shared_cpu_map);
477 return sprintf(buf, "%x\n", amd_get_subcaches(cpu));
480 static ssize_t subcaches_store(struct device *dev,
481 struct device_attribute *attr,
482 const char *buf, size_t count)
484 struct cacheinfo *this_leaf = dev_get_drvdata(dev);
485 int cpu = cpumask_first(&this_leaf->shared_cpu_map);
486 unsigned long val;
488 if (!capable(CAP_SYS_ADMIN))
489 return -EPERM;
491 if (kstrtoul(buf, 16, &val) < 0)
492 return -EINVAL;
494 if (amd_set_subcaches(cpu, val))
495 return -EINVAL;
497 return count;
500 static DEVICE_ATTR_RW(cache_disable_0);
501 static DEVICE_ATTR_RW(cache_disable_1);
502 static DEVICE_ATTR_RW(subcaches);
504 static umode_t
505 cache_private_attrs_is_visible(struct kobject *kobj,
506 struct attribute *attr, int unused)
508 struct device *dev = kobj_to_dev(kobj);
509 struct cacheinfo *this_leaf = dev_get_drvdata(dev);
510 umode_t mode = attr->mode;
512 if (!this_leaf->priv)
513 return 0;
515 if ((attr == &dev_attr_subcaches.attr) &&
516 amd_nb_has_feature(AMD_NB_L3_PARTITIONING))
517 return mode;
519 if ((attr == &dev_attr_cache_disable_0.attr ||
520 attr == &dev_attr_cache_disable_1.attr) &&
521 amd_nb_has_feature(AMD_NB_L3_INDEX_DISABLE))
522 return mode;
524 return 0;
527 static struct attribute_group cache_private_group = {
528 .is_visible = cache_private_attrs_is_visible,
531 static void init_amd_l3_attrs(void)
533 int n = 1;
534 static struct attribute **amd_l3_attrs;
536 if (amd_l3_attrs) /* already initialized */
537 return;
539 if (amd_nb_has_feature(AMD_NB_L3_INDEX_DISABLE))
540 n += 2;
541 if (amd_nb_has_feature(AMD_NB_L3_PARTITIONING))
542 n += 1;
544 amd_l3_attrs = kcalloc(n, sizeof(*amd_l3_attrs), GFP_KERNEL);
545 if (!amd_l3_attrs)
546 return;
548 n = 0;
549 if (amd_nb_has_feature(AMD_NB_L3_INDEX_DISABLE)) {
550 amd_l3_attrs[n++] = &dev_attr_cache_disable_0.attr;
551 amd_l3_attrs[n++] = &dev_attr_cache_disable_1.attr;
553 if (amd_nb_has_feature(AMD_NB_L3_PARTITIONING))
554 amd_l3_attrs[n++] = &dev_attr_subcaches.attr;
556 cache_private_group.attrs = amd_l3_attrs;
559 const struct attribute_group *
560 cache_get_priv_group(struct cacheinfo *this_leaf)
562 struct amd_northbridge *nb = this_leaf->priv;
564 if (this_leaf->level < 3 || !nb)
565 return NULL;
567 if (nb && nb->l3_cache.indices)
568 init_amd_l3_attrs();
570 return &cache_private_group;
573 static void amd_init_l3_cache(struct _cpuid4_info_regs *this_leaf, int index)
575 int node;
577 /* only for L3, and not in virtualized environments */
578 if (index < 3)
579 return;
581 node = amd_get_nb_id(smp_processor_id());
582 this_leaf->nb = node_to_amd_nb(node);
583 if (this_leaf->nb && !this_leaf->nb->l3_cache.indices)
584 amd_calc_l3_indices(this_leaf->nb);
586 #else
587 #define amd_init_l3_cache(x, y)
588 #endif /* CONFIG_AMD_NB && CONFIG_SYSFS */
590 static int
591 cpuid4_cache_lookup_regs(int index, struct _cpuid4_info_regs *this_leaf)
593 union _cpuid4_leaf_eax eax;
594 union _cpuid4_leaf_ebx ebx;
595 union _cpuid4_leaf_ecx ecx;
596 unsigned edx;
598 if (boot_cpu_data.x86_vendor == X86_VENDOR_AMD) {
599 if (boot_cpu_has(X86_FEATURE_TOPOEXT))
600 cpuid_count(0x8000001d, index, &eax.full,
601 &ebx.full, &ecx.full, &edx);
602 else
603 amd_cpuid4(index, &eax, &ebx, &ecx);
604 amd_init_l3_cache(this_leaf, index);
605 } else if (boot_cpu_data.x86_vendor == X86_VENDOR_HYGON) {
606 cpuid_count(0x8000001d, index, &eax.full,
607 &ebx.full, &ecx.full, &edx);
608 amd_init_l3_cache(this_leaf, index);
609 } else {
610 cpuid_count(4, index, &eax.full, &ebx.full, &ecx.full, &edx);
613 if (eax.split.type == CTYPE_NULL)
614 return -EIO; /* better error ? */
616 this_leaf->eax = eax;
617 this_leaf->ebx = ebx;
618 this_leaf->ecx = ecx;
619 this_leaf->size = (ecx.split.number_of_sets + 1) *
620 (ebx.split.coherency_line_size + 1) *
621 (ebx.split.physical_line_partition + 1) *
622 (ebx.split.ways_of_associativity + 1);
623 return 0;
626 static int find_num_cache_leaves(struct cpuinfo_x86 *c)
628 unsigned int eax, ebx, ecx, edx, op;
629 union _cpuid4_leaf_eax cache_eax;
630 int i = -1;
632 if (c->x86_vendor == X86_VENDOR_AMD ||
633 c->x86_vendor == X86_VENDOR_HYGON)
634 op = 0x8000001d;
635 else
636 op = 4;
638 do {
639 ++i;
640 /* Do cpuid(op) loop to find out num_cache_leaves */
641 cpuid_count(op, i, &eax, &ebx, &ecx, &edx);
642 cache_eax.full = eax;
643 } while (cache_eax.split.type != CTYPE_NULL);
644 return i;
647 void cacheinfo_amd_init_llc_id(struct cpuinfo_x86 *c, int cpu, u8 node_id)
650 * We may have multiple LLCs if L3 caches exist, so check if we
651 * have an L3 cache by looking at the L3 cache CPUID leaf.
653 if (!cpuid_edx(0x80000006))
654 return;
656 if (c->x86 < 0x17) {
657 /* LLC is at the node level. */
658 per_cpu(cpu_llc_id, cpu) = node_id;
659 } else if (c->x86 == 0x17 &&
660 c->x86_model >= 0 && c->x86_model <= 0x1F) {
662 * LLC is at the core complex level.
663 * Core complex ID is ApicId[3] for these processors.
665 per_cpu(cpu_llc_id, cpu) = c->apicid >> 3;
666 } else {
668 * LLC ID is calculated from the number of threads sharing the
669 * cache.
670 * */
671 u32 eax, ebx, ecx, edx, num_sharing_cache = 0;
672 u32 llc_index = find_num_cache_leaves(c) - 1;
674 cpuid_count(0x8000001d, llc_index, &eax, &ebx, &ecx, &edx);
675 if (eax)
676 num_sharing_cache = ((eax >> 14) & 0xfff) + 1;
678 if (num_sharing_cache) {
679 int bits = get_count_order(num_sharing_cache);
681 per_cpu(cpu_llc_id, cpu) = c->apicid >> bits;
686 void cacheinfo_hygon_init_llc_id(struct cpuinfo_x86 *c, int cpu, u8 node_id)
689 * We may have multiple LLCs if L3 caches exist, so check if we
690 * have an L3 cache by looking at the L3 cache CPUID leaf.
692 if (!cpuid_edx(0x80000006))
693 return;
696 * LLC is at the core complex level.
697 * Core complex ID is ApicId[3] for these processors.
699 per_cpu(cpu_llc_id, cpu) = c->apicid >> 3;
702 void init_amd_cacheinfo(struct cpuinfo_x86 *c)
705 if (boot_cpu_has(X86_FEATURE_TOPOEXT)) {
706 num_cache_leaves = find_num_cache_leaves(c);
707 } else if (c->extended_cpuid_level >= 0x80000006) {
708 if (cpuid_edx(0x80000006) & 0xf000)
709 num_cache_leaves = 4;
710 else
711 num_cache_leaves = 3;
715 void init_hygon_cacheinfo(struct cpuinfo_x86 *c)
717 num_cache_leaves = find_num_cache_leaves(c);
720 void init_intel_cacheinfo(struct cpuinfo_x86 *c)
722 /* Cache sizes */
723 unsigned int trace = 0, l1i = 0, l1d = 0, l2 = 0, l3 = 0;
724 unsigned int new_l1d = 0, new_l1i = 0; /* Cache sizes from cpuid(4) */
725 unsigned int new_l2 = 0, new_l3 = 0, i; /* Cache sizes from cpuid(4) */
726 unsigned int l2_id = 0, l3_id = 0, num_threads_sharing, index_msb;
727 #ifdef CONFIG_SMP
728 unsigned int cpu = c->cpu_index;
729 #endif
731 if (c->cpuid_level > 3) {
732 static int is_initialized;
734 if (is_initialized == 0) {
735 /* Init num_cache_leaves from boot CPU */
736 num_cache_leaves = find_num_cache_leaves(c);
737 is_initialized++;
741 * Whenever possible use cpuid(4), deterministic cache
742 * parameters cpuid leaf to find the cache details
744 for (i = 0; i < num_cache_leaves; i++) {
745 struct _cpuid4_info_regs this_leaf = {};
746 int retval;
748 retval = cpuid4_cache_lookup_regs(i, &this_leaf);
749 if (retval < 0)
750 continue;
752 switch (this_leaf.eax.split.level) {
753 case 1:
754 if (this_leaf.eax.split.type == CTYPE_DATA)
755 new_l1d = this_leaf.size/1024;
756 else if (this_leaf.eax.split.type == CTYPE_INST)
757 new_l1i = this_leaf.size/1024;
758 break;
759 case 2:
760 new_l2 = this_leaf.size/1024;
761 num_threads_sharing = 1 + this_leaf.eax.split.num_threads_sharing;
762 index_msb = get_count_order(num_threads_sharing);
763 l2_id = c->apicid & ~((1 << index_msb) - 1);
764 break;
765 case 3:
766 new_l3 = this_leaf.size/1024;
767 num_threads_sharing = 1 + this_leaf.eax.split.num_threads_sharing;
768 index_msb = get_count_order(num_threads_sharing);
769 l3_id = c->apicid & ~((1 << index_msb) - 1);
770 break;
771 default:
772 break;
777 * Don't use cpuid2 if cpuid4 is supported. For P4, we use cpuid2 for
778 * trace cache
780 if ((num_cache_leaves == 0 || c->x86 == 15) && c->cpuid_level > 1) {
781 /* supports eax=2 call */
782 int j, n;
783 unsigned int regs[4];
784 unsigned char *dp = (unsigned char *)regs;
785 int only_trace = 0;
787 if (num_cache_leaves != 0 && c->x86 == 15)
788 only_trace = 1;
790 /* Number of times to iterate */
791 n = cpuid_eax(2) & 0xFF;
793 for (i = 0 ; i < n ; i++) {
794 cpuid(2, &regs[0], &regs[1], &regs[2], &regs[3]);
796 /* If bit 31 is set, this is an unknown format */
797 for (j = 0 ; j < 3 ; j++)
798 if (regs[j] & (1 << 31))
799 regs[j] = 0;
801 /* Byte 0 is level count, not a descriptor */
802 for (j = 1 ; j < 16 ; j++) {
803 unsigned char des = dp[j];
804 unsigned char k = 0;
806 /* look up this descriptor in the table */
807 while (cache_table[k].descriptor != 0) {
808 if (cache_table[k].descriptor == des) {
809 if (only_trace && cache_table[k].cache_type != LVL_TRACE)
810 break;
811 switch (cache_table[k].cache_type) {
812 case LVL_1_INST:
813 l1i += cache_table[k].size;
814 break;
815 case LVL_1_DATA:
816 l1d += cache_table[k].size;
817 break;
818 case LVL_2:
819 l2 += cache_table[k].size;
820 break;
821 case LVL_3:
822 l3 += cache_table[k].size;
823 break;
824 case LVL_TRACE:
825 trace += cache_table[k].size;
826 break;
829 break;
832 k++;
838 if (new_l1d)
839 l1d = new_l1d;
841 if (new_l1i)
842 l1i = new_l1i;
844 if (new_l2) {
845 l2 = new_l2;
846 #ifdef CONFIG_SMP
847 per_cpu(cpu_llc_id, cpu) = l2_id;
848 #endif
851 if (new_l3) {
852 l3 = new_l3;
853 #ifdef CONFIG_SMP
854 per_cpu(cpu_llc_id, cpu) = l3_id;
855 #endif
858 #ifdef CONFIG_SMP
860 * If cpu_llc_id is not yet set, this means cpuid_level < 4 which in
861 * turns means that the only possibility is SMT (as indicated in
862 * cpuid1). Since cpuid2 doesn't specify shared caches, and we know
863 * that SMT shares all caches, we can unconditionally set cpu_llc_id to
864 * c->phys_proc_id.
866 if (per_cpu(cpu_llc_id, cpu) == BAD_APICID)
867 per_cpu(cpu_llc_id, cpu) = c->phys_proc_id;
868 #endif
870 c->x86_cache_size = l3 ? l3 : (l2 ? l2 : (l1i+l1d));
872 if (!l2)
873 cpu_detect_cache_sizes(c);
876 static int __cache_amd_cpumap_setup(unsigned int cpu, int index,
877 struct _cpuid4_info_regs *base)
879 struct cpu_cacheinfo *this_cpu_ci = get_cpu_cacheinfo(cpu);
880 struct cacheinfo *this_leaf;
881 int i, sibling;
884 * For L3, always use the pre-calculated cpu_llc_shared_mask
885 * to derive shared_cpu_map.
887 if (index == 3) {
888 for_each_cpu(i, cpu_llc_shared_mask(cpu)) {
889 this_cpu_ci = get_cpu_cacheinfo(i);
890 if (!this_cpu_ci->info_list)
891 continue;
892 this_leaf = this_cpu_ci->info_list + index;
893 for_each_cpu(sibling, cpu_llc_shared_mask(cpu)) {
894 if (!cpu_online(sibling))
895 continue;
896 cpumask_set_cpu(sibling,
897 &this_leaf->shared_cpu_map);
900 } else if (boot_cpu_has(X86_FEATURE_TOPOEXT)) {
901 unsigned int apicid, nshared, first, last;
903 nshared = base->eax.split.num_threads_sharing + 1;
904 apicid = cpu_data(cpu).apicid;
905 first = apicid - (apicid % nshared);
906 last = first + nshared - 1;
908 for_each_online_cpu(i) {
909 this_cpu_ci = get_cpu_cacheinfo(i);
910 if (!this_cpu_ci->info_list)
911 continue;
913 apicid = cpu_data(i).apicid;
914 if ((apicid < first) || (apicid > last))
915 continue;
917 this_leaf = this_cpu_ci->info_list + index;
919 for_each_online_cpu(sibling) {
920 apicid = cpu_data(sibling).apicid;
921 if ((apicid < first) || (apicid > last))
922 continue;
923 cpumask_set_cpu(sibling,
924 &this_leaf->shared_cpu_map);
927 } else
928 return 0;
930 return 1;
933 static void __cache_cpumap_setup(unsigned int cpu, int index,
934 struct _cpuid4_info_regs *base)
936 struct cpu_cacheinfo *this_cpu_ci = get_cpu_cacheinfo(cpu);
937 struct cacheinfo *this_leaf, *sibling_leaf;
938 unsigned long num_threads_sharing;
939 int index_msb, i;
940 struct cpuinfo_x86 *c = &cpu_data(cpu);
942 if (c->x86_vendor == X86_VENDOR_AMD ||
943 c->x86_vendor == X86_VENDOR_HYGON) {
944 if (__cache_amd_cpumap_setup(cpu, index, base))
945 return;
948 this_leaf = this_cpu_ci->info_list + index;
949 num_threads_sharing = 1 + base->eax.split.num_threads_sharing;
951 cpumask_set_cpu(cpu, &this_leaf->shared_cpu_map);
952 if (num_threads_sharing == 1)
953 return;
955 index_msb = get_count_order(num_threads_sharing);
957 for_each_online_cpu(i)
958 if (cpu_data(i).apicid >> index_msb == c->apicid >> index_msb) {
959 struct cpu_cacheinfo *sib_cpu_ci = get_cpu_cacheinfo(i);
961 if (i == cpu || !sib_cpu_ci->info_list)
962 continue;/* skip if itself or no cacheinfo */
963 sibling_leaf = sib_cpu_ci->info_list + index;
964 cpumask_set_cpu(i, &this_leaf->shared_cpu_map);
965 cpumask_set_cpu(cpu, &sibling_leaf->shared_cpu_map);
969 static void ci_leaf_init(struct cacheinfo *this_leaf,
970 struct _cpuid4_info_regs *base)
972 this_leaf->id = base->id;
973 this_leaf->attributes = CACHE_ID;
974 this_leaf->level = base->eax.split.level;
975 this_leaf->type = cache_type_map[base->eax.split.type];
976 this_leaf->coherency_line_size =
977 base->ebx.split.coherency_line_size + 1;
978 this_leaf->ways_of_associativity =
979 base->ebx.split.ways_of_associativity + 1;
980 this_leaf->size = base->size;
981 this_leaf->number_of_sets = base->ecx.split.number_of_sets + 1;
982 this_leaf->physical_line_partition =
983 base->ebx.split.physical_line_partition + 1;
984 this_leaf->priv = base->nb;
987 static int __init_cache_level(unsigned int cpu)
989 struct cpu_cacheinfo *this_cpu_ci = get_cpu_cacheinfo(cpu);
991 if (!num_cache_leaves)
992 return -ENOENT;
993 if (!this_cpu_ci)
994 return -EINVAL;
995 this_cpu_ci->num_levels = 3;
996 this_cpu_ci->num_leaves = num_cache_leaves;
997 return 0;
1001 * The max shared threads number comes from CPUID.4:EAX[25-14] with input
1002 * ECX as cache index. Then right shift apicid by the number's order to get
1003 * cache id for this cache node.
1005 static void get_cache_id(int cpu, struct _cpuid4_info_regs *id4_regs)
1007 struct cpuinfo_x86 *c = &cpu_data(cpu);
1008 unsigned long num_threads_sharing;
1009 int index_msb;
1011 num_threads_sharing = 1 + id4_regs->eax.split.num_threads_sharing;
1012 index_msb = get_count_order(num_threads_sharing);
1013 id4_regs->id = c->apicid >> index_msb;
1016 static int __populate_cache_leaves(unsigned int cpu)
1018 unsigned int idx, ret;
1019 struct cpu_cacheinfo *this_cpu_ci = get_cpu_cacheinfo(cpu);
1020 struct cacheinfo *this_leaf = this_cpu_ci->info_list;
1021 struct _cpuid4_info_regs id4_regs = {};
1023 for (idx = 0; idx < this_cpu_ci->num_leaves; idx++) {
1024 ret = cpuid4_cache_lookup_regs(idx, &id4_regs);
1025 if (ret)
1026 return ret;
1027 get_cache_id(cpu, &id4_regs);
1028 ci_leaf_init(this_leaf++, &id4_regs);
1029 __cache_cpumap_setup(cpu, idx, &id4_regs);
1031 this_cpu_ci->cpu_map_populated = true;
1033 return 0;
1036 DEFINE_SMP_CALL_CACHE_FUNCTION(init_cache_level)
1037 DEFINE_SMP_CALL_CACHE_FUNCTION(populate_cache_leaves)