1 // SPDX-License-Identifier: GPL-2.0
3 * Routines to identify caches on Intel CPU.
6 * Venkatesh Pallipadi : Adding cache identification through cpuid(4)
7 * Ashok Raj <ashok.raj@intel.com>: Work with CPU hotplug infrastructure.
8 * Andi Kleen / Andreas Herrmann : CPUID4 emulation on AMD.
11 #include <linux/slab.h>
12 #include <linux/cacheinfo.h>
13 #include <linux/cpu.h>
14 #include <linux/cpuhotplug.h>
15 #include <linux/sched.h>
16 #include <linux/capability.h>
17 #include <linux/sysfs.h>
18 #include <linux/pci.h>
19 #include <linux/stop_machine.h>
21 #include <asm/cpufeature.h>
22 #include <asm/cacheinfo.h>
23 #include <asm/amd_nb.h>
26 #include <asm/tlbflush.h>
36 /* Shared last level cache maps */
37 DEFINE_PER_CPU_READ_MOSTLY(cpumask_var_t
, cpu_llc_shared_map
);
39 /* Shared L2 cache maps */
40 DEFINE_PER_CPU_READ_MOSTLY(cpumask_var_t
, cpu_l2c_shared_map
);
42 static cpumask_var_t cpu_cacheinfo_mask
;
44 /* Kernel controls MTRR and/or PAT MSRs. */
45 unsigned int memory_caching_control __ro_after_init
;
48 unsigned char descriptor
;
53 #define MB(x) ((x) * 1024)
55 /* All the cache descriptor types we care about (no TLB or
56 trace cache entries) */
58 static const struct _cache_table cache_table
[] =
60 { 0x06, LVL_1_INST
, 8 }, /* 4-way set assoc, 32 byte line size */
61 { 0x08, LVL_1_INST
, 16 }, /* 4-way set assoc, 32 byte line size */
62 { 0x09, LVL_1_INST
, 32 }, /* 4-way set assoc, 64 byte line size */
63 { 0x0a, LVL_1_DATA
, 8 }, /* 2 way set assoc, 32 byte line size */
64 { 0x0c, LVL_1_DATA
, 16 }, /* 4-way set assoc, 32 byte line size */
65 { 0x0d, LVL_1_DATA
, 16 }, /* 4-way set assoc, 64 byte line size */
66 { 0x0e, LVL_1_DATA
, 24 }, /* 6-way set assoc, 64 byte line size */
67 { 0x21, LVL_2
, 256 }, /* 8-way set assoc, 64 byte line size */
68 { 0x22, LVL_3
, 512 }, /* 4-way set assoc, sectored cache, 64 byte line size */
69 { 0x23, LVL_3
, MB(1) }, /* 8-way set assoc, sectored cache, 64 byte line size */
70 { 0x25, LVL_3
, MB(2) }, /* 8-way set assoc, sectored cache, 64 byte line size */
71 { 0x29, LVL_3
, MB(4) }, /* 8-way set assoc, sectored cache, 64 byte line size */
72 { 0x2c, LVL_1_DATA
, 32 }, /* 8-way set assoc, 64 byte line size */
73 { 0x30, LVL_1_INST
, 32 }, /* 8-way set assoc, 64 byte line size */
74 { 0x39, LVL_2
, 128 }, /* 4-way set assoc, sectored cache, 64 byte line size */
75 { 0x3a, LVL_2
, 192 }, /* 6-way set assoc, sectored cache, 64 byte line size */
76 { 0x3b, LVL_2
, 128 }, /* 2-way set assoc, sectored cache, 64 byte line size */
77 { 0x3c, LVL_2
, 256 }, /* 4-way set assoc, sectored cache, 64 byte line size */
78 { 0x3d, LVL_2
, 384 }, /* 6-way set assoc, sectored cache, 64 byte line size */
79 { 0x3e, LVL_2
, 512 }, /* 4-way set assoc, sectored cache, 64 byte line size */
80 { 0x3f, LVL_2
, 256 }, /* 2-way set assoc, 64 byte line size */
81 { 0x41, LVL_2
, 128 }, /* 4-way set assoc, 32 byte line size */
82 { 0x42, LVL_2
, 256 }, /* 4-way set assoc, 32 byte line size */
83 { 0x43, LVL_2
, 512 }, /* 4-way set assoc, 32 byte line size */
84 { 0x44, LVL_2
, MB(1) }, /* 4-way set assoc, 32 byte line size */
85 { 0x45, LVL_2
, MB(2) }, /* 4-way set assoc, 32 byte line size */
86 { 0x46, LVL_3
, MB(4) }, /* 4-way set assoc, 64 byte line size */
87 { 0x47, LVL_3
, MB(8) }, /* 8-way set assoc, 64 byte line size */
88 { 0x48, LVL_2
, MB(3) }, /* 12-way set assoc, 64 byte line size */
89 { 0x49, LVL_3
, MB(4) }, /* 16-way set assoc, 64 byte line size */
90 { 0x4a, LVL_3
, MB(6) }, /* 12-way set assoc, 64 byte line size */
91 { 0x4b, LVL_3
, MB(8) }, /* 16-way set assoc, 64 byte line size */
92 { 0x4c, LVL_3
, MB(12) }, /* 12-way set assoc, 64 byte line size */
93 { 0x4d, LVL_3
, MB(16) }, /* 16-way set assoc, 64 byte line size */
94 { 0x4e, LVL_2
, MB(6) }, /* 24-way set assoc, 64 byte line size */
95 { 0x60, LVL_1_DATA
, 16 }, /* 8-way set assoc, sectored cache, 64 byte line size */
96 { 0x66, LVL_1_DATA
, 8 }, /* 4-way set assoc, sectored cache, 64 byte line size */
97 { 0x67, LVL_1_DATA
, 16 }, /* 4-way set assoc, sectored cache, 64 byte line size */
98 { 0x68, LVL_1_DATA
, 32 }, /* 4-way set assoc, sectored cache, 64 byte line size */
99 { 0x70, LVL_TRACE
, 12 }, /* 8-way set assoc */
100 { 0x71, LVL_TRACE
, 16 }, /* 8-way set assoc */
101 { 0x72, LVL_TRACE
, 32 }, /* 8-way set assoc */
102 { 0x73, LVL_TRACE
, 64 }, /* 8-way set assoc */
103 { 0x78, LVL_2
, MB(1) }, /* 4-way set assoc, 64 byte line size */
104 { 0x79, LVL_2
, 128 }, /* 8-way set assoc, sectored cache, 64 byte line size */
105 { 0x7a, LVL_2
, 256 }, /* 8-way set assoc, sectored cache, 64 byte line size */
106 { 0x7b, LVL_2
, 512 }, /* 8-way set assoc, sectored cache, 64 byte line size */
107 { 0x7c, LVL_2
, MB(1) }, /* 8-way set assoc, sectored cache, 64 byte line size */
108 { 0x7d, LVL_2
, MB(2) }, /* 8-way set assoc, 64 byte line size */
109 { 0x7f, LVL_2
, 512 }, /* 2-way set assoc, 64 byte line size */
110 { 0x80, LVL_2
, 512 }, /* 8-way set assoc, 64 byte line size */
111 { 0x82, LVL_2
, 256 }, /* 8-way set assoc, 32 byte line size */
112 { 0x83, LVL_2
, 512 }, /* 8-way set assoc, 32 byte line size */
113 { 0x84, LVL_2
, MB(1) }, /* 8-way set assoc, 32 byte line size */
114 { 0x85, LVL_2
, MB(2) }, /* 8-way set assoc, 32 byte line size */
115 { 0x86, LVL_2
, 512 }, /* 4-way set assoc, 64 byte line size */
116 { 0x87, LVL_2
, MB(1) }, /* 8-way set assoc, 64 byte line size */
117 { 0xd0, LVL_3
, 512 }, /* 4-way set assoc, 64 byte line size */
118 { 0xd1, LVL_3
, MB(1) }, /* 4-way set assoc, 64 byte line size */
119 { 0xd2, LVL_3
, MB(2) }, /* 4-way set assoc, 64 byte line size */
120 { 0xd6, LVL_3
, MB(1) }, /* 8-way set assoc, 64 byte line size */
121 { 0xd7, LVL_3
, MB(2) }, /* 8-way set assoc, 64 byte line size */
122 { 0xd8, LVL_3
, MB(4) }, /* 12-way set assoc, 64 byte line size */
123 { 0xdc, LVL_3
, MB(2) }, /* 12-way set assoc, 64 byte line size */
124 { 0xdd, LVL_3
, MB(4) }, /* 12-way set assoc, 64 byte line size */
125 { 0xde, LVL_3
, MB(8) }, /* 12-way set assoc, 64 byte line size */
126 { 0xe2, LVL_3
, MB(2) }, /* 16-way set assoc, 64 byte line size */
127 { 0xe3, LVL_3
, MB(4) }, /* 16-way set assoc, 64 byte line size */
128 { 0xe4, LVL_3
, MB(8) }, /* 16-way set assoc, 64 byte line size */
129 { 0xea, LVL_3
, MB(12) }, /* 24-way set assoc, 64 byte line size */
130 { 0xeb, LVL_3
, MB(18) }, /* 24-way set assoc, 64 byte line size */
131 { 0xec, LVL_3
, MB(24) }, /* 24-way set assoc, 64 byte line size */
143 union _cpuid4_leaf_eax
{
145 enum _cache_type type
:5;
146 unsigned int level
:3;
147 unsigned int is_self_initializing
:1;
148 unsigned int is_fully_associative
:1;
149 unsigned int reserved
:4;
150 unsigned int num_threads_sharing
:12;
151 unsigned int num_cores_on_die
:6;
156 union _cpuid4_leaf_ebx
{
158 unsigned int coherency_line_size
:12;
159 unsigned int physical_line_partition
:10;
160 unsigned int ways_of_associativity
:10;
165 union _cpuid4_leaf_ecx
{
167 unsigned int number_of_sets
:32;
172 struct _cpuid4_info_regs
{
173 union _cpuid4_leaf_eax eax
;
174 union _cpuid4_leaf_ebx ebx
;
175 union _cpuid4_leaf_ecx ecx
;
178 struct amd_northbridge
*nb
;
181 static unsigned short num_cache_leaves
;
183 /* AMD doesn't have CPUID4. Emulate it here to report the same
184 information to the user. This makes some assumptions about the machine:
185 L2 not shared, no SMT etc. that is currently true on AMD CPUs.
187 In theory the TLBs could be reported as fake type (they are in "dummy").
191 unsigned line_size
:8;
192 unsigned lines_per_tag
:8;
194 unsigned size_in_kb
:8;
201 unsigned line_size
:8;
202 unsigned lines_per_tag
:4;
204 unsigned size_in_kb
:16;
211 unsigned line_size
:8;
212 unsigned lines_per_tag
:4;
215 unsigned size_encoded
:14;
220 static const unsigned short assocs
[] = {
231 [0xf] = 0xffff /* fully associative - no way to show this currently */
234 static const unsigned char levels
[] = { 1, 1, 2, 3 };
235 static const unsigned char types
[] = { 1, 2, 3, 3 };
237 static const enum cache_type cache_type_map
[] = {
238 [CTYPE_NULL
] = CACHE_TYPE_NOCACHE
,
239 [CTYPE_DATA
] = CACHE_TYPE_DATA
,
240 [CTYPE_INST
] = CACHE_TYPE_INST
,
241 [CTYPE_UNIFIED
] = CACHE_TYPE_UNIFIED
,
245 amd_cpuid4(int leaf
, union _cpuid4_leaf_eax
*eax
,
246 union _cpuid4_leaf_ebx
*ebx
,
247 union _cpuid4_leaf_ecx
*ecx
)
250 unsigned line_size
, lines_per_tag
, assoc
, size_in_kb
;
251 union l1_cache l1i
, l1d
;
254 union l1_cache
*l1
= &l1d
;
260 cpuid(0x80000005, &dummy
, &dummy
, &l1d
.val
, &l1i
.val
);
261 cpuid(0x80000006, &dummy
, &dummy
, &l2
.val
, &l3
.val
);
270 assoc
= assocs
[l1
->assoc
];
271 line_size
= l1
->line_size
;
272 lines_per_tag
= l1
->lines_per_tag
;
273 size_in_kb
= l1
->size_in_kb
;
278 assoc
= assocs
[l2
.assoc
];
279 line_size
= l2
.line_size
;
280 lines_per_tag
= l2
.lines_per_tag
;
281 /* cpu_data has errata corrections for K7 applied */
282 size_in_kb
= __this_cpu_read(cpu_info
.x86_cache_size
);
287 assoc
= assocs
[l3
.assoc
];
288 line_size
= l3
.line_size
;
289 lines_per_tag
= l3
.lines_per_tag
;
290 size_in_kb
= l3
.size_encoded
* 512;
291 if (boot_cpu_has(X86_FEATURE_AMD_DCM
)) {
292 size_in_kb
= size_in_kb
>> 1;
300 eax
->split
.is_self_initializing
= 1;
301 eax
->split
.type
= types
[leaf
];
302 eax
->split
.level
= levels
[leaf
];
303 eax
->split
.num_threads_sharing
= 0;
304 eax
->split
.num_cores_on_die
= topology_num_cores_per_package();
308 eax
->split
.is_fully_associative
= 1;
309 ebx
->split
.coherency_line_size
= line_size
- 1;
310 ebx
->split
.ways_of_associativity
= assoc
- 1;
311 ebx
->split
.physical_line_partition
= lines_per_tag
- 1;
312 ecx
->split
.number_of_sets
= (size_in_kb
* 1024) / line_size
/
313 (ebx
->split
.ways_of_associativity
+ 1) - 1;
316 #if defined(CONFIG_AMD_NB) && defined(CONFIG_SYSFS)
319 * L3 cache descriptors
321 static void amd_calc_l3_indices(struct amd_northbridge
*nb
)
323 struct amd_l3_cache
*l3
= &nb
->l3_cache
;
324 unsigned int sc0
, sc1
, sc2
, sc3
;
327 pci_read_config_dword(nb
->misc
, 0x1C4, &val
);
329 /* calculate subcache sizes */
330 l3
->subcaches
[0] = sc0
= !(val
& BIT(0));
331 l3
->subcaches
[1] = sc1
= !(val
& BIT(4));
333 if (boot_cpu_data
.x86
== 0x15) {
334 l3
->subcaches
[0] = sc0
+= !(val
& BIT(1));
335 l3
->subcaches
[1] = sc1
+= !(val
& BIT(5));
338 l3
->subcaches
[2] = sc2
= !(val
& BIT(8)) + !(val
& BIT(9));
339 l3
->subcaches
[3] = sc3
= !(val
& BIT(12)) + !(val
& BIT(13));
341 l3
->indices
= (max(max3(sc0
, sc1
, sc2
), sc3
) << 10) - 1;
345 * check whether a slot used for disabling an L3 index is occupied.
346 * @l3: L3 cache descriptor
347 * @slot: slot number (0..1)
349 * @returns: the disabled index if used or negative value if slot free.
351 static int amd_get_l3_disable_slot(struct amd_northbridge
*nb
, unsigned slot
)
353 unsigned int reg
= 0;
355 pci_read_config_dword(nb
->misc
, 0x1BC + slot
* 4, ®
);
357 /* check whether this slot is activated already */
358 if (reg
& (3UL << 30))
364 static ssize_t
show_cache_disable(struct cacheinfo
*this_leaf
, char *buf
,
368 struct amd_northbridge
*nb
= this_leaf
->priv
;
370 index
= amd_get_l3_disable_slot(nb
, slot
);
372 return sprintf(buf
, "%d\n", index
);
374 return sprintf(buf
, "FREE\n");
377 #define SHOW_CACHE_DISABLE(slot) \
379 cache_disable_##slot##_show(struct device *dev, \
380 struct device_attribute *attr, char *buf) \
382 struct cacheinfo *this_leaf = dev_get_drvdata(dev); \
383 return show_cache_disable(this_leaf, buf, slot); \
385 SHOW_CACHE_DISABLE(0)
386 SHOW_CACHE_DISABLE(1)
388 static void amd_l3_disable_index(struct amd_northbridge
*nb
, int cpu
,
389 unsigned slot
, unsigned long idx
)
396 * disable index in all 4 subcaches
398 for (i
= 0; i
< 4; i
++) {
399 u32 reg
= idx
| (i
<< 20);
401 if (!nb
->l3_cache
.subcaches
[i
])
404 pci_write_config_dword(nb
->misc
, 0x1BC + slot
* 4, reg
);
407 * We need to WBINVD on a core on the node containing the L3
408 * cache which indices we disable therefore a simple wbinvd()
414 pci_write_config_dword(nb
->misc
, 0x1BC + slot
* 4, reg
);
419 * disable a L3 cache index by using a disable-slot
421 * @l3: L3 cache descriptor
422 * @cpu: A CPU on the node containing the L3 cache
423 * @slot: slot number (0..1)
424 * @index: index to disable
426 * @return: 0 on success, error status on failure
428 static int amd_set_l3_disable_slot(struct amd_northbridge
*nb
, int cpu
,
429 unsigned slot
, unsigned long index
)
433 /* check if @slot is already used or the index is already disabled */
434 ret
= amd_get_l3_disable_slot(nb
, slot
);
438 if (index
> nb
->l3_cache
.indices
)
441 /* check whether the other slot has disabled the same index already */
442 if (index
== amd_get_l3_disable_slot(nb
, !slot
))
445 amd_l3_disable_index(nb
, cpu
, slot
, index
);
450 static ssize_t
store_cache_disable(struct cacheinfo
*this_leaf
,
451 const char *buf
, size_t count
,
454 unsigned long val
= 0;
456 struct amd_northbridge
*nb
= this_leaf
->priv
;
458 if (!capable(CAP_SYS_ADMIN
))
461 cpu
= cpumask_first(&this_leaf
->shared_cpu_map
);
463 if (kstrtoul(buf
, 10, &val
) < 0)
466 err
= amd_set_l3_disable_slot(nb
, cpu
, slot
, val
);
469 pr_warn("L3 slot %d in use/index already disabled!\n",
476 #define STORE_CACHE_DISABLE(slot) \
478 cache_disable_##slot##_store(struct device *dev, \
479 struct device_attribute *attr, \
480 const char *buf, size_t count) \
482 struct cacheinfo *this_leaf = dev_get_drvdata(dev); \
483 return store_cache_disable(this_leaf, buf, count, slot); \
485 STORE_CACHE_DISABLE(0)
486 STORE_CACHE_DISABLE(1)
488 static ssize_t
subcaches_show(struct device
*dev
,
489 struct device_attribute
*attr
, char *buf
)
491 struct cacheinfo
*this_leaf
= dev_get_drvdata(dev
);
492 int cpu
= cpumask_first(&this_leaf
->shared_cpu_map
);
494 return sprintf(buf
, "%x\n", amd_get_subcaches(cpu
));
497 static ssize_t
subcaches_store(struct device
*dev
,
498 struct device_attribute
*attr
,
499 const char *buf
, size_t count
)
501 struct cacheinfo
*this_leaf
= dev_get_drvdata(dev
);
502 int cpu
= cpumask_first(&this_leaf
->shared_cpu_map
);
505 if (!capable(CAP_SYS_ADMIN
))
508 if (kstrtoul(buf
, 16, &val
) < 0)
511 if (amd_set_subcaches(cpu
, val
))
517 static DEVICE_ATTR_RW(cache_disable_0
);
518 static DEVICE_ATTR_RW(cache_disable_1
);
519 static DEVICE_ATTR_RW(subcaches
);
522 cache_private_attrs_is_visible(struct kobject
*kobj
,
523 struct attribute
*attr
, int unused
)
525 struct device
*dev
= kobj_to_dev(kobj
);
526 struct cacheinfo
*this_leaf
= dev_get_drvdata(dev
);
527 umode_t mode
= attr
->mode
;
529 if (!this_leaf
->priv
)
532 if ((attr
== &dev_attr_subcaches
.attr
) &&
533 amd_nb_has_feature(AMD_NB_L3_PARTITIONING
))
536 if ((attr
== &dev_attr_cache_disable_0
.attr
||
537 attr
== &dev_attr_cache_disable_1
.attr
) &&
538 amd_nb_has_feature(AMD_NB_L3_INDEX_DISABLE
))
544 static struct attribute_group cache_private_group
= {
545 .is_visible
= cache_private_attrs_is_visible
,
548 static void init_amd_l3_attrs(void)
551 static struct attribute
**amd_l3_attrs
;
553 if (amd_l3_attrs
) /* already initialized */
556 if (amd_nb_has_feature(AMD_NB_L3_INDEX_DISABLE
))
558 if (amd_nb_has_feature(AMD_NB_L3_PARTITIONING
))
561 amd_l3_attrs
= kcalloc(n
, sizeof(*amd_l3_attrs
), GFP_KERNEL
);
566 if (amd_nb_has_feature(AMD_NB_L3_INDEX_DISABLE
)) {
567 amd_l3_attrs
[n
++] = &dev_attr_cache_disable_0
.attr
;
568 amd_l3_attrs
[n
++] = &dev_attr_cache_disable_1
.attr
;
570 if (amd_nb_has_feature(AMD_NB_L3_PARTITIONING
))
571 amd_l3_attrs
[n
++] = &dev_attr_subcaches
.attr
;
573 cache_private_group
.attrs
= amd_l3_attrs
;
576 const struct attribute_group
*
577 cache_get_priv_group(struct cacheinfo
*this_leaf
)
579 struct amd_northbridge
*nb
= this_leaf
->priv
;
581 if (this_leaf
->level
< 3 || !nb
)
584 if (nb
&& nb
->l3_cache
.indices
)
587 return &cache_private_group
;
590 static void amd_init_l3_cache(struct _cpuid4_info_regs
*this_leaf
, int index
)
594 /* only for L3, and not in virtualized environments */
598 node
= topology_amd_node_id(smp_processor_id());
599 this_leaf
->nb
= node_to_amd_nb(node
);
600 if (this_leaf
->nb
&& !this_leaf
->nb
->l3_cache
.indices
)
601 amd_calc_l3_indices(this_leaf
->nb
);
604 #define amd_init_l3_cache(x, y)
605 #endif /* CONFIG_AMD_NB && CONFIG_SYSFS */
608 cpuid4_cache_lookup_regs(int index
, struct _cpuid4_info_regs
*this_leaf
)
610 union _cpuid4_leaf_eax eax
;
611 union _cpuid4_leaf_ebx ebx
;
612 union _cpuid4_leaf_ecx ecx
;
615 if (boot_cpu_data
.x86_vendor
== X86_VENDOR_AMD
) {
616 if (boot_cpu_has(X86_FEATURE_TOPOEXT
))
617 cpuid_count(0x8000001d, index
, &eax
.full
,
618 &ebx
.full
, &ecx
.full
, &edx
);
620 amd_cpuid4(index
, &eax
, &ebx
, &ecx
);
621 amd_init_l3_cache(this_leaf
, index
);
622 } else if (boot_cpu_data
.x86_vendor
== X86_VENDOR_HYGON
) {
623 cpuid_count(0x8000001d, index
, &eax
.full
,
624 &ebx
.full
, &ecx
.full
, &edx
);
625 amd_init_l3_cache(this_leaf
, index
);
627 cpuid_count(4, index
, &eax
.full
, &ebx
.full
, &ecx
.full
, &edx
);
630 if (eax
.split
.type
== CTYPE_NULL
)
631 return -EIO
; /* better error ? */
633 this_leaf
->eax
= eax
;
634 this_leaf
->ebx
= ebx
;
635 this_leaf
->ecx
= ecx
;
636 this_leaf
->size
= (ecx
.split
.number_of_sets
+ 1) *
637 (ebx
.split
.coherency_line_size
+ 1) *
638 (ebx
.split
.physical_line_partition
+ 1) *
639 (ebx
.split
.ways_of_associativity
+ 1);
643 static int find_num_cache_leaves(struct cpuinfo_x86
*c
)
645 unsigned int eax
, ebx
, ecx
, edx
, op
;
646 union _cpuid4_leaf_eax cache_eax
;
649 if (c
->x86_vendor
== X86_VENDOR_AMD
||
650 c
->x86_vendor
== X86_VENDOR_HYGON
)
657 /* Do cpuid(op) loop to find out num_cache_leaves */
658 cpuid_count(op
, i
, &eax
, &ebx
, &ecx
, &edx
);
659 cache_eax
.full
= eax
;
660 } while (cache_eax
.split
.type
!= CTYPE_NULL
);
664 void cacheinfo_amd_init_llc_id(struct cpuinfo_x86
*c
, u16 die_id
)
667 * We may have multiple LLCs if L3 caches exist, so check if we
668 * have an L3 cache by looking at the L3 cache CPUID leaf.
670 if (!cpuid_edx(0x80000006))
674 /* LLC is at the node level. */
675 c
->topo
.llc_id
= die_id
;
676 } else if (c
->x86
== 0x17 && c
->x86_model
<= 0x1F) {
678 * LLC is at the core complex level.
679 * Core complex ID is ApicId[3] for these processors.
681 c
->topo
.llc_id
= c
->topo
.apicid
>> 3;
684 * LLC ID is calculated from the number of threads sharing the
687 u32 eax
, ebx
, ecx
, edx
, num_sharing_cache
= 0;
688 u32 llc_index
= find_num_cache_leaves(c
) - 1;
690 cpuid_count(0x8000001d, llc_index
, &eax
, &ebx
, &ecx
, &edx
);
692 num_sharing_cache
= ((eax
>> 14) & 0xfff) + 1;
694 if (num_sharing_cache
) {
695 int bits
= get_count_order(num_sharing_cache
);
697 c
->topo
.llc_id
= c
->topo
.apicid
>> bits
;
702 void cacheinfo_hygon_init_llc_id(struct cpuinfo_x86
*c
)
705 * We may have multiple LLCs if L3 caches exist, so check if we
706 * have an L3 cache by looking at the L3 cache CPUID leaf.
708 if (!cpuid_edx(0x80000006))
712 * LLC is at the core complex level.
713 * Core complex ID is ApicId[3] for these processors.
715 c
->topo
.llc_id
= c
->topo
.apicid
>> 3;
718 void init_amd_cacheinfo(struct cpuinfo_x86
*c
)
721 if (boot_cpu_has(X86_FEATURE_TOPOEXT
)) {
722 num_cache_leaves
= find_num_cache_leaves(c
);
723 } else if (c
->extended_cpuid_level
>= 0x80000006) {
724 if (cpuid_edx(0x80000006) & 0xf000)
725 num_cache_leaves
= 4;
727 num_cache_leaves
= 3;
731 void init_hygon_cacheinfo(struct cpuinfo_x86
*c
)
733 num_cache_leaves
= find_num_cache_leaves(c
);
736 void init_intel_cacheinfo(struct cpuinfo_x86
*c
)
739 unsigned int l1i
= 0, l1d
= 0, l2
= 0, l3
= 0;
740 unsigned int new_l1d
= 0, new_l1i
= 0; /* Cache sizes from cpuid(4) */
741 unsigned int new_l2
= 0, new_l3
= 0, i
; /* Cache sizes from cpuid(4) */
742 unsigned int l2_id
= 0, l3_id
= 0, num_threads_sharing
, index_msb
;
744 if (c
->cpuid_level
> 3) {
745 static int is_initialized
;
747 if (is_initialized
== 0) {
748 /* Init num_cache_leaves from boot CPU */
749 num_cache_leaves
= find_num_cache_leaves(c
);
754 * Whenever possible use cpuid(4), deterministic cache
755 * parameters cpuid leaf to find the cache details
757 for (i
= 0; i
< num_cache_leaves
; i
++) {
758 struct _cpuid4_info_regs this_leaf
= {};
761 retval
= cpuid4_cache_lookup_regs(i
, &this_leaf
);
765 switch (this_leaf
.eax
.split
.level
) {
767 if (this_leaf
.eax
.split
.type
== CTYPE_DATA
)
768 new_l1d
= this_leaf
.size
/1024;
769 else if (this_leaf
.eax
.split
.type
== CTYPE_INST
)
770 new_l1i
= this_leaf
.size
/1024;
773 new_l2
= this_leaf
.size
/1024;
774 num_threads_sharing
= 1 + this_leaf
.eax
.split
.num_threads_sharing
;
775 index_msb
= get_count_order(num_threads_sharing
);
776 l2_id
= c
->topo
.apicid
& ~((1 << index_msb
) - 1);
779 new_l3
= this_leaf
.size
/1024;
780 num_threads_sharing
= 1 + this_leaf
.eax
.split
.num_threads_sharing
;
781 index_msb
= get_count_order(num_threads_sharing
);
782 l3_id
= c
->topo
.apicid
& ~((1 << index_msb
) - 1);
790 * Don't use cpuid2 if cpuid4 is supported. For P4, we use cpuid2 for
793 if ((num_cache_leaves
== 0 || c
->x86
== 15) && c
->cpuid_level
> 1) {
794 /* supports eax=2 call */
796 unsigned int regs
[4];
797 unsigned char *dp
= (unsigned char *)regs
;
800 if (num_cache_leaves
!= 0 && c
->x86
== 15)
803 /* Number of times to iterate */
804 n
= cpuid_eax(2) & 0xFF;
806 for (i
= 0 ; i
< n
; i
++) {
807 cpuid(2, ®s
[0], ®s
[1], ®s
[2], ®s
[3]);
809 /* If bit 31 is set, this is an unknown format */
810 for (j
= 0 ; j
< 3 ; j
++)
811 if (regs
[j
] & (1 << 31))
814 /* Byte 0 is level count, not a descriptor */
815 for (j
= 1 ; j
< 16 ; j
++) {
816 unsigned char des
= dp
[j
];
819 /* look up this descriptor in the table */
820 while (cache_table
[k
].descriptor
!= 0) {
821 if (cache_table
[k
].descriptor
== des
) {
822 if (only_trace
&& cache_table
[k
].cache_type
!= LVL_TRACE
)
824 switch (cache_table
[k
].cache_type
) {
826 l1i
+= cache_table
[k
].size
;
829 l1d
+= cache_table
[k
].size
;
832 l2
+= cache_table
[k
].size
;
835 l3
+= cache_table
[k
].size
;
856 c
->topo
.llc_id
= l2_id
;
857 c
->topo
.l2c_id
= l2_id
;
862 c
->topo
.llc_id
= l3_id
;
866 * If llc_id is not yet set, this means cpuid_level < 4 which in
867 * turns means that the only possibility is SMT (as indicated in
868 * cpuid1). Since cpuid2 doesn't specify shared caches, and we know
869 * that SMT shares all caches, we can unconditionally set cpu_llc_id to
872 if (c
->topo
.llc_id
== BAD_APICID
)
873 c
->topo
.llc_id
= c
->topo
.pkg_id
;
875 c
->x86_cache_size
= l3
? l3
: (l2
? l2
: (l1i
+l1d
));
878 cpu_detect_cache_sizes(c
);
881 static int __cache_amd_cpumap_setup(unsigned int cpu
, int index
,
882 struct _cpuid4_info_regs
*base
)
884 struct cpu_cacheinfo
*this_cpu_ci
;
885 struct cacheinfo
*this_leaf
;
889 * For L3, always use the pre-calculated cpu_llc_shared_mask
890 * to derive shared_cpu_map.
893 for_each_cpu(i
, cpu_llc_shared_mask(cpu
)) {
894 this_cpu_ci
= get_cpu_cacheinfo(i
);
895 if (!this_cpu_ci
->info_list
)
897 this_leaf
= this_cpu_ci
->info_list
+ index
;
898 for_each_cpu(sibling
, cpu_llc_shared_mask(cpu
)) {
899 if (!cpu_online(sibling
))
901 cpumask_set_cpu(sibling
,
902 &this_leaf
->shared_cpu_map
);
905 } else if (boot_cpu_has(X86_FEATURE_TOPOEXT
)) {
906 unsigned int apicid
, nshared
, first
, last
;
908 nshared
= base
->eax
.split
.num_threads_sharing
+ 1;
909 apicid
= cpu_data(cpu
).topo
.apicid
;
910 first
= apicid
- (apicid
% nshared
);
911 last
= first
+ nshared
- 1;
913 for_each_online_cpu(i
) {
914 this_cpu_ci
= get_cpu_cacheinfo(i
);
915 if (!this_cpu_ci
->info_list
)
918 apicid
= cpu_data(i
).topo
.apicid
;
919 if ((apicid
< first
) || (apicid
> last
))
922 this_leaf
= this_cpu_ci
->info_list
+ index
;
924 for_each_online_cpu(sibling
) {
925 apicid
= cpu_data(sibling
).topo
.apicid
;
926 if ((apicid
< first
) || (apicid
> last
))
928 cpumask_set_cpu(sibling
,
929 &this_leaf
->shared_cpu_map
);
938 static void __cache_cpumap_setup(unsigned int cpu
, int index
,
939 struct _cpuid4_info_regs
*base
)
941 struct cpu_cacheinfo
*this_cpu_ci
= get_cpu_cacheinfo(cpu
);
942 struct cacheinfo
*this_leaf
, *sibling_leaf
;
943 unsigned long num_threads_sharing
;
945 struct cpuinfo_x86
*c
= &cpu_data(cpu
);
947 if (c
->x86_vendor
== X86_VENDOR_AMD
||
948 c
->x86_vendor
== X86_VENDOR_HYGON
) {
949 if (__cache_amd_cpumap_setup(cpu
, index
, base
))
953 this_leaf
= this_cpu_ci
->info_list
+ index
;
954 num_threads_sharing
= 1 + base
->eax
.split
.num_threads_sharing
;
956 cpumask_set_cpu(cpu
, &this_leaf
->shared_cpu_map
);
957 if (num_threads_sharing
== 1)
960 index_msb
= get_count_order(num_threads_sharing
);
962 for_each_online_cpu(i
)
963 if (cpu_data(i
).topo
.apicid
>> index_msb
== c
->topo
.apicid
>> index_msb
) {
964 struct cpu_cacheinfo
*sib_cpu_ci
= get_cpu_cacheinfo(i
);
966 if (i
== cpu
|| !sib_cpu_ci
->info_list
)
967 continue;/* skip if itself or no cacheinfo */
968 sibling_leaf
= sib_cpu_ci
->info_list
+ index
;
969 cpumask_set_cpu(i
, &this_leaf
->shared_cpu_map
);
970 cpumask_set_cpu(cpu
, &sibling_leaf
->shared_cpu_map
);
974 static void ci_leaf_init(struct cacheinfo
*this_leaf
,
975 struct _cpuid4_info_regs
*base
)
977 this_leaf
->id
= base
->id
;
978 this_leaf
->attributes
= CACHE_ID
;
979 this_leaf
->level
= base
->eax
.split
.level
;
980 this_leaf
->type
= cache_type_map
[base
->eax
.split
.type
];
981 this_leaf
->coherency_line_size
=
982 base
->ebx
.split
.coherency_line_size
+ 1;
983 this_leaf
->ways_of_associativity
=
984 base
->ebx
.split
.ways_of_associativity
+ 1;
985 this_leaf
->size
= base
->size
;
986 this_leaf
->number_of_sets
= base
->ecx
.split
.number_of_sets
+ 1;
987 this_leaf
->physical_line_partition
=
988 base
->ebx
.split
.physical_line_partition
+ 1;
989 this_leaf
->priv
= base
->nb
;
992 int init_cache_level(unsigned int cpu
)
994 struct cpu_cacheinfo
*this_cpu_ci
= get_cpu_cacheinfo(cpu
);
996 if (!num_cache_leaves
)
1000 this_cpu_ci
->num_levels
= 3;
1001 this_cpu_ci
->num_leaves
= num_cache_leaves
;
1006 * The max shared threads number comes from CPUID.4:EAX[25-14] with input
1007 * ECX as cache index. Then right shift apicid by the number's order to get
1008 * cache id for this cache node.
1010 static void get_cache_id(int cpu
, struct _cpuid4_info_regs
*id4_regs
)
1012 struct cpuinfo_x86
*c
= &cpu_data(cpu
);
1013 unsigned long num_threads_sharing
;
1016 num_threads_sharing
= 1 + id4_regs
->eax
.split
.num_threads_sharing
;
1017 index_msb
= get_count_order(num_threads_sharing
);
1018 id4_regs
->id
= c
->topo
.apicid
>> index_msb
;
1021 int populate_cache_leaves(unsigned int cpu
)
1023 unsigned int idx
, ret
;
1024 struct cpu_cacheinfo
*this_cpu_ci
= get_cpu_cacheinfo(cpu
);
1025 struct cacheinfo
*this_leaf
= this_cpu_ci
->info_list
;
1026 struct _cpuid4_info_regs id4_regs
= {};
1028 for (idx
= 0; idx
< this_cpu_ci
->num_leaves
; idx
++) {
1029 ret
= cpuid4_cache_lookup_regs(idx
, &id4_regs
);
1032 get_cache_id(cpu
, &id4_regs
);
1033 ci_leaf_init(this_leaf
++, &id4_regs
);
1034 __cache_cpumap_setup(cpu
, idx
, &id4_regs
);
1036 this_cpu_ci
->cpu_map_populated
= true;
1042 * Disable and enable caches. Needed for changing MTRRs and the PAT MSR.
1044 * Since we are disabling the cache don't allow any interrupts,
1045 * they would run extremely slow and would only increase the pain.
1047 * The caller must ensure that local interrupts are disabled and
1048 * are reenabled after cache_enable() has been called.
1050 static unsigned long saved_cr4
;
1051 static DEFINE_RAW_SPINLOCK(cache_disable_lock
);
1053 void cache_disable(void) __acquires(cache_disable_lock
)
1058 * Note that this is not ideal
1059 * since the cache is only flushed/disabled for this CPU while the
1060 * MTRRs are changed, but changing this requires more invasive
1061 * changes to the way the kernel boots
1064 raw_spin_lock(&cache_disable_lock
);
1066 /* Enter the no-fill (CD=1, NW=0) cache mode and flush caches. */
1067 cr0
= read_cr0() | X86_CR0_CD
;
1071 * Cache flushing is the most time-consuming step when programming
1072 * the MTRRs. Fortunately, as per the Intel Software Development
1073 * Manual, we can skip it if the processor supports cache self-
1076 if (!static_cpu_has(X86_FEATURE_SELFSNOOP
))
1079 /* Save value of CR4 and clear Page Global Enable (bit 7) */
1080 if (cpu_feature_enabled(X86_FEATURE_PGE
)) {
1081 saved_cr4
= __read_cr4();
1082 __write_cr4(saved_cr4
& ~X86_CR4_PGE
);
1085 /* Flush all TLBs via a mov %cr3, %reg; mov %reg, %cr3 */
1086 count_vm_tlb_event(NR_TLB_LOCAL_FLUSH_ALL
);
1089 if (cpu_feature_enabled(X86_FEATURE_MTRR
))
1092 /* Again, only flush caches if we have to. */
1093 if (!static_cpu_has(X86_FEATURE_SELFSNOOP
))
1097 void cache_enable(void) __releases(cache_disable_lock
)
1099 /* Flush TLBs (no need to flush caches - they are disabled) */
1100 count_vm_tlb_event(NR_TLB_LOCAL_FLUSH_ALL
);
1103 if (cpu_feature_enabled(X86_FEATURE_MTRR
))
1107 write_cr0(read_cr0() & ~X86_CR0_CD
);
1109 /* Restore value of CR4 */
1110 if (cpu_feature_enabled(X86_FEATURE_PGE
))
1111 __write_cr4(saved_cr4
);
1113 raw_spin_unlock(&cache_disable_lock
);
1116 static void cache_cpu_init(void)
1118 unsigned long flags
;
1120 local_irq_save(flags
);
1122 if (memory_caching_control
& CACHE_MTRR
) {
1124 mtrr_generic_set_state();
1128 if (memory_caching_control
& CACHE_PAT
)
1131 local_irq_restore(flags
);
1134 static bool cache_aps_delayed_init
= true;
1136 void set_cache_aps_delayed_init(bool val
)
1138 cache_aps_delayed_init
= val
;
1141 bool get_cache_aps_delayed_init(void)
1143 return cache_aps_delayed_init
;
1146 static int cache_rendezvous_handler(void *unused
)
1148 if (get_cache_aps_delayed_init() || !cpu_online(smp_processor_id()))
1154 void __init
cache_bp_init(void)
1159 if (memory_caching_control
)
1163 void cache_bp_restore(void)
1165 if (memory_caching_control
)
1169 static int cache_ap_online(unsigned int cpu
)
1171 cpumask_set_cpu(cpu
, cpu_cacheinfo_mask
);
1173 if (!memory_caching_control
|| get_cache_aps_delayed_init())
1177 * Ideally we should hold mtrr_mutex here to avoid MTRR entries
1178 * changed, but this routine will be called in CPU boot time,
1179 * holding the lock breaks it.
1181 * This routine is called in two cases:
1183 * 1. very early time of software resume, when there absolutely
1184 * isn't MTRR entry changes;
1186 * 2. CPU hotadd time. We let mtrr_add/del_page hold cpuhotplug
1187 * lock to prevent MTRR entry changes
1189 stop_machine_from_inactive_cpu(cache_rendezvous_handler
, NULL
,
1190 cpu_cacheinfo_mask
);
1195 static int cache_ap_offline(unsigned int cpu
)
1197 cpumask_clear_cpu(cpu
, cpu_cacheinfo_mask
);
1202 * Delayed cache initialization for all AP's
1204 void cache_aps_init(void)
1206 if (!memory_caching_control
|| !get_cache_aps_delayed_init())
1209 stop_machine(cache_rendezvous_handler
, NULL
, cpu_online_mask
);
1210 set_cache_aps_delayed_init(false);
1213 static int __init
cache_ap_register(void)
1215 zalloc_cpumask_var(&cpu_cacheinfo_mask
, GFP_KERNEL
);
1216 cpumask_set_cpu(smp_processor_id(), cpu_cacheinfo_mask
);
1218 cpuhp_setup_state_nocalls(CPUHP_AP_CACHECTRL_STARTING
,
1219 "x86/cachectrl:starting",
1220 cache_ap_online
, cache_ap_offline
);
1223 early_initcall(cache_ap_register
);