1 // SPDX-License-Identifier: GPL-2.0
3 * cacheinfo support - processor cache information via sysfs
5 * Based on arch/x86/kernel/cpu/intel_cacheinfo.c
6 * Author: Sudeep Holla <sudeep.holla@arm.com>
8 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
10 #include <linux/acpi.h>
11 #include <linux/bitops.h>
12 #include <linux/cacheinfo.h>
13 #include <linux/compiler.h>
14 #include <linux/cpu.h>
15 #include <linux/device.h>
16 #include <linux/init.h>
18 #include <linux/sched.h>
19 #include <linux/slab.h>
20 #include <linux/smp.h>
21 #include <linux/sysfs.h>
23 /* pointer to per cpu cacheinfo */
24 static DEFINE_PER_CPU(struct cpu_cacheinfo
, ci_cpu_cacheinfo
);
25 #define ci_cacheinfo(cpu) (&per_cpu(ci_cpu_cacheinfo, cpu))
26 #define cache_leaves(cpu) (ci_cacheinfo(cpu)->num_leaves)
27 #define per_cpu_cacheinfo(cpu) (ci_cacheinfo(cpu)->info_list)
29 struct cpu_cacheinfo
*get_cpu_cacheinfo(unsigned int cpu
)
31 return ci_cacheinfo(cpu
);
35 static inline bool cache_leaves_are_shared(struct cacheinfo
*this_leaf
,
36 struct cacheinfo
*sib_leaf
)
38 return sib_leaf
->fw_token
== this_leaf
->fw_token
;
41 /* OF properties to query for a given cache type */
42 struct cache_type_info
{
43 const char *size_prop
;
44 const char *line_size_props
[2];
45 const char *nr_sets_prop
;
48 static const struct cache_type_info cache_type_info
[] = {
50 .size_prop
= "cache-size",
51 .line_size_props
= { "cache-line-size",
52 "cache-block-size", },
53 .nr_sets_prop
= "cache-sets",
55 .size_prop
= "i-cache-size",
56 .line_size_props
= { "i-cache-line-size",
57 "i-cache-block-size", },
58 .nr_sets_prop
= "i-cache-sets",
60 .size_prop
= "d-cache-size",
61 .line_size_props
= { "d-cache-line-size",
62 "d-cache-block-size", },
63 .nr_sets_prop
= "d-cache-sets",
67 static inline int get_cacheinfo_idx(enum cache_type type
)
69 if (type
== CACHE_TYPE_UNIFIED
)
74 static void cache_size(struct cacheinfo
*this_leaf
, struct device_node
*np
)
79 ct_idx
= get_cacheinfo_idx(this_leaf
->type
);
80 propname
= cache_type_info
[ct_idx
].size_prop
;
82 of_property_read_u32(np
, propname
, &this_leaf
->size
);
85 /* not cache_line_size() because that's a macro in include/linux/cache.h */
86 static void cache_get_line_size(struct cacheinfo
*this_leaf
,
87 struct device_node
*np
)
91 ct_idx
= get_cacheinfo_idx(this_leaf
->type
);
92 lim
= ARRAY_SIZE(cache_type_info
[ct_idx
].line_size_props
);
94 for (i
= 0; i
< lim
; i
++) {
99 propname
= cache_type_info
[ct_idx
].line_size_props
[i
];
100 ret
= of_property_read_u32(np
, propname
, &line_size
);
102 this_leaf
->coherency_line_size
= line_size
;
108 static void cache_nr_sets(struct cacheinfo
*this_leaf
, struct device_node
*np
)
110 const char *propname
;
113 ct_idx
= get_cacheinfo_idx(this_leaf
->type
);
114 propname
= cache_type_info
[ct_idx
].nr_sets_prop
;
116 of_property_read_u32(np
, propname
, &this_leaf
->number_of_sets
);
119 static void cache_associativity(struct cacheinfo
*this_leaf
)
121 unsigned int line_size
= this_leaf
->coherency_line_size
;
122 unsigned int nr_sets
= this_leaf
->number_of_sets
;
123 unsigned int size
= this_leaf
->size
;
126 * If the cache is fully associative, there is no need to
127 * check the other properties.
129 if (!(nr_sets
== 1) && (nr_sets
> 0 && size
> 0 && line_size
> 0))
130 this_leaf
->ways_of_associativity
= (size
/ nr_sets
) / line_size
;
133 static bool cache_node_is_unified(struct cacheinfo
*this_leaf
,
134 struct device_node
*np
)
136 return of_property_read_bool(np
, "cache-unified");
139 static void cache_of_set_props(struct cacheinfo
*this_leaf
,
140 struct device_node
*np
)
143 * init_cache_level must setup the cache level correctly
144 * overriding the architecturally specified levels, so
145 * if type is NONE at this stage, it should be unified
147 if (this_leaf
->type
== CACHE_TYPE_NOCACHE
&&
148 cache_node_is_unified(this_leaf
, np
))
149 this_leaf
->type
= CACHE_TYPE_UNIFIED
;
150 cache_size(this_leaf
, np
);
151 cache_get_line_size(this_leaf
, np
);
152 cache_nr_sets(this_leaf
, np
);
153 cache_associativity(this_leaf
);
156 static int cache_setup_of_node(unsigned int cpu
)
158 struct device_node
*np
;
159 struct cacheinfo
*this_leaf
;
160 struct device
*cpu_dev
= get_cpu_device(cpu
);
161 struct cpu_cacheinfo
*this_cpu_ci
= get_cpu_cacheinfo(cpu
);
162 unsigned int index
= 0;
164 /* skip if fw_token is already populated */
165 if (this_cpu_ci
->info_list
->fw_token
) {
170 pr_err("No cpu device for CPU %d\n", cpu
);
173 np
= cpu_dev
->of_node
;
175 pr_err("Failed to find cpu%d device node\n", cpu
);
179 while (index
< cache_leaves(cpu
)) {
180 this_leaf
= this_cpu_ci
->info_list
+ index
;
181 if (this_leaf
->level
!= 1)
182 np
= of_find_next_cache_node(np
);
184 np
= of_node_get(np
);/* cpu node itself */
187 cache_of_set_props(this_leaf
, np
);
188 this_leaf
->fw_token
= np
;
192 if (index
!= cache_leaves(cpu
)) /* not all OF nodes populated */
198 static inline int cache_setup_of_node(unsigned int cpu
) { return 0; }
199 static inline bool cache_leaves_are_shared(struct cacheinfo
*this_leaf
,
200 struct cacheinfo
*sib_leaf
)
203 * For non-DT/ACPI systems, assume unique level 1 caches, system-wide
204 * shared caches for all other levels. This will be used only if
205 * arch specific code has not populated shared_cpu_map
207 return !(this_leaf
->level
== 1);
211 int __weak
cache_setup_acpi(unsigned int cpu
)
216 static int cache_shared_cpu_map_setup(unsigned int cpu
)
218 struct cpu_cacheinfo
*this_cpu_ci
= get_cpu_cacheinfo(cpu
);
219 struct cacheinfo
*this_leaf
, *sib_leaf
;
223 if (this_cpu_ci
->cpu_map_populated
)
226 if (of_have_populated_dt())
227 ret
= cache_setup_of_node(cpu
);
228 else if (!acpi_disabled
)
229 ret
= cache_setup_acpi(cpu
);
234 for (index
= 0; index
< cache_leaves(cpu
); index
++) {
237 this_leaf
= this_cpu_ci
->info_list
+ index
;
238 /* skip if shared_cpu_map is already populated */
239 if (!cpumask_empty(&this_leaf
->shared_cpu_map
))
242 cpumask_set_cpu(cpu
, &this_leaf
->shared_cpu_map
);
243 for_each_online_cpu(i
) {
244 struct cpu_cacheinfo
*sib_cpu_ci
= get_cpu_cacheinfo(i
);
246 if (i
== cpu
|| !sib_cpu_ci
->info_list
)
247 continue;/* skip if itself or no cacheinfo */
248 sib_leaf
= sib_cpu_ci
->info_list
+ index
;
249 if (cache_leaves_are_shared(this_leaf
, sib_leaf
)) {
250 cpumask_set_cpu(cpu
, &sib_leaf
->shared_cpu_map
);
251 cpumask_set_cpu(i
, &this_leaf
->shared_cpu_map
);
259 static void cache_shared_cpu_map_remove(unsigned int cpu
)
261 struct cpu_cacheinfo
*this_cpu_ci
= get_cpu_cacheinfo(cpu
);
262 struct cacheinfo
*this_leaf
, *sib_leaf
;
263 unsigned int sibling
, index
;
265 for (index
= 0; index
< cache_leaves(cpu
); index
++) {
266 this_leaf
= this_cpu_ci
->info_list
+ index
;
267 for_each_cpu(sibling
, &this_leaf
->shared_cpu_map
) {
268 struct cpu_cacheinfo
*sib_cpu_ci
;
270 if (sibling
== cpu
) /* skip itself */
273 sib_cpu_ci
= get_cpu_cacheinfo(sibling
);
274 if (!sib_cpu_ci
->info_list
)
277 sib_leaf
= sib_cpu_ci
->info_list
+ index
;
278 cpumask_clear_cpu(cpu
, &sib_leaf
->shared_cpu_map
);
279 cpumask_clear_cpu(sibling
, &this_leaf
->shared_cpu_map
);
281 if (of_have_populated_dt())
282 of_node_put(this_leaf
->fw_token
);
286 static void free_cache_attributes(unsigned int cpu
)
288 if (!per_cpu_cacheinfo(cpu
))
291 cache_shared_cpu_map_remove(cpu
);
293 kfree(per_cpu_cacheinfo(cpu
));
294 per_cpu_cacheinfo(cpu
) = NULL
;
297 int __weak
init_cache_level(unsigned int cpu
)
302 int __weak
populate_cache_leaves(unsigned int cpu
)
307 static int detect_cache_attributes(unsigned int cpu
)
311 if (init_cache_level(cpu
) || !cache_leaves(cpu
))
314 per_cpu_cacheinfo(cpu
) = kcalloc(cache_leaves(cpu
),
315 sizeof(struct cacheinfo
), GFP_KERNEL
);
316 if (per_cpu_cacheinfo(cpu
) == NULL
)
320 * populate_cache_leaves() may completely setup the cache leaves and
321 * shared_cpu_map or it may leave it partially setup.
323 ret
= populate_cache_leaves(cpu
);
327 * For systems using DT for cache hierarchy, fw_token
328 * and shared_cpu_map will be set up here only if they are
329 * not populated already
331 ret
= cache_shared_cpu_map_setup(cpu
);
333 pr_warn("Unable to detect cache hierarchy for CPU %d\n", cpu
);
340 free_cache_attributes(cpu
);
344 /* pointer to cpuX/cache device */
345 static DEFINE_PER_CPU(struct device
*, ci_cache_dev
);
346 #define per_cpu_cache_dev(cpu) (per_cpu(ci_cache_dev, cpu))
348 static cpumask_t cache_dev_map
;
350 /* pointer to array of devices for cpuX/cache/indexY */
351 static DEFINE_PER_CPU(struct device
**, ci_index_dev
);
352 #define per_cpu_index_dev(cpu) (per_cpu(ci_index_dev, cpu))
353 #define per_cache_index_dev(cpu, idx) ((per_cpu_index_dev(cpu))[idx])
355 #define show_one(file_name, object) \
356 static ssize_t file_name##_show(struct device *dev, \
357 struct device_attribute *attr, char *buf) \
359 struct cacheinfo *this_leaf = dev_get_drvdata(dev); \
360 return sprintf(buf, "%u\n", this_leaf->object); \
364 show_one(level
, level
);
365 show_one(coherency_line_size
, coherency_line_size
);
366 show_one(number_of_sets
, number_of_sets
);
367 show_one(physical_line_partition
, physical_line_partition
);
368 show_one(ways_of_associativity
, ways_of_associativity
);
370 static ssize_t
size_show(struct device
*dev
,
371 struct device_attribute
*attr
, char *buf
)
373 struct cacheinfo
*this_leaf
= dev_get_drvdata(dev
);
375 return sprintf(buf
, "%uK\n", this_leaf
->size
>> 10);
378 static ssize_t
shared_cpumap_show_func(struct device
*dev
, bool list
, char *buf
)
380 struct cacheinfo
*this_leaf
= dev_get_drvdata(dev
);
381 const struct cpumask
*mask
= &this_leaf
->shared_cpu_map
;
383 return cpumap_print_to_pagebuf(list
, buf
, mask
);
386 static ssize_t
shared_cpu_map_show(struct device
*dev
,
387 struct device_attribute
*attr
, char *buf
)
389 return shared_cpumap_show_func(dev
, false, buf
);
392 static ssize_t
shared_cpu_list_show(struct device
*dev
,
393 struct device_attribute
*attr
, char *buf
)
395 return shared_cpumap_show_func(dev
, true, buf
);
398 static ssize_t
type_show(struct device
*dev
,
399 struct device_attribute
*attr
, char *buf
)
401 struct cacheinfo
*this_leaf
= dev_get_drvdata(dev
);
403 switch (this_leaf
->type
) {
404 case CACHE_TYPE_DATA
:
405 return sprintf(buf
, "Data\n");
406 case CACHE_TYPE_INST
:
407 return sprintf(buf
, "Instruction\n");
408 case CACHE_TYPE_UNIFIED
:
409 return sprintf(buf
, "Unified\n");
415 static ssize_t
allocation_policy_show(struct device
*dev
,
416 struct device_attribute
*attr
, char *buf
)
418 struct cacheinfo
*this_leaf
= dev_get_drvdata(dev
);
419 unsigned int ci_attr
= this_leaf
->attributes
;
422 if ((ci_attr
& CACHE_READ_ALLOCATE
) && (ci_attr
& CACHE_WRITE_ALLOCATE
))
423 n
= sprintf(buf
, "ReadWriteAllocate\n");
424 else if (ci_attr
& CACHE_READ_ALLOCATE
)
425 n
= sprintf(buf
, "ReadAllocate\n");
426 else if (ci_attr
& CACHE_WRITE_ALLOCATE
)
427 n
= sprintf(buf
, "WriteAllocate\n");
431 static ssize_t
write_policy_show(struct device
*dev
,
432 struct device_attribute
*attr
, char *buf
)
434 struct cacheinfo
*this_leaf
= dev_get_drvdata(dev
);
435 unsigned int ci_attr
= this_leaf
->attributes
;
438 if (ci_attr
& CACHE_WRITE_THROUGH
)
439 n
= sprintf(buf
, "WriteThrough\n");
440 else if (ci_attr
& CACHE_WRITE_BACK
)
441 n
= sprintf(buf
, "WriteBack\n");
445 static DEVICE_ATTR_RO(id
);
446 static DEVICE_ATTR_RO(level
);
447 static DEVICE_ATTR_RO(type
);
448 static DEVICE_ATTR_RO(coherency_line_size
);
449 static DEVICE_ATTR_RO(ways_of_associativity
);
450 static DEVICE_ATTR_RO(number_of_sets
);
451 static DEVICE_ATTR_RO(size
);
452 static DEVICE_ATTR_RO(allocation_policy
);
453 static DEVICE_ATTR_RO(write_policy
);
454 static DEVICE_ATTR_RO(shared_cpu_map
);
455 static DEVICE_ATTR_RO(shared_cpu_list
);
456 static DEVICE_ATTR_RO(physical_line_partition
);
458 static struct attribute
*cache_default_attrs
[] = {
461 &dev_attr_level
.attr
,
462 &dev_attr_shared_cpu_map
.attr
,
463 &dev_attr_shared_cpu_list
.attr
,
464 &dev_attr_coherency_line_size
.attr
,
465 &dev_attr_ways_of_associativity
.attr
,
466 &dev_attr_number_of_sets
.attr
,
468 &dev_attr_allocation_policy
.attr
,
469 &dev_attr_write_policy
.attr
,
470 &dev_attr_physical_line_partition
.attr
,
475 cache_default_attrs_is_visible(struct kobject
*kobj
,
476 struct attribute
*attr
, int unused
)
478 struct device
*dev
= kobj_to_dev(kobj
);
479 struct cacheinfo
*this_leaf
= dev_get_drvdata(dev
);
480 const struct cpumask
*mask
= &this_leaf
->shared_cpu_map
;
481 umode_t mode
= attr
->mode
;
483 if ((attr
== &dev_attr_id
.attr
) && (this_leaf
->attributes
& CACHE_ID
))
485 if ((attr
== &dev_attr_type
.attr
) && this_leaf
->type
)
487 if ((attr
== &dev_attr_level
.attr
) && this_leaf
->level
)
489 if ((attr
== &dev_attr_shared_cpu_map
.attr
) && !cpumask_empty(mask
))
491 if ((attr
== &dev_attr_shared_cpu_list
.attr
) && !cpumask_empty(mask
))
493 if ((attr
== &dev_attr_coherency_line_size
.attr
) &&
494 this_leaf
->coherency_line_size
)
496 if ((attr
== &dev_attr_ways_of_associativity
.attr
) &&
497 this_leaf
->size
) /* allow 0 = full associativity */
499 if ((attr
== &dev_attr_number_of_sets
.attr
) &&
500 this_leaf
->number_of_sets
)
502 if ((attr
== &dev_attr_size
.attr
) && this_leaf
->size
)
504 if ((attr
== &dev_attr_write_policy
.attr
) &&
505 (this_leaf
->attributes
& CACHE_WRITE_POLICY_MASK
))
507 if ((attr
== &dev_attr_allocation_policy
.attr
) &&
508 (this_leaf
->attributes
& CACHE_ALLOCATE_POLICY_MASK
))
510 if ((attr
== &dev_attr_physical_line_partition
.attr
) &&
511 this_leaf
->physical_line_partition
)
517 static const struct attribute_group cache_default_group
= {
518 .attrs
= cache_default_attrs
,
519 .is_visible
= cache_default_attrs_is_visible
,
522 static const struct attribute_group
*cache_default_groups
[] = {
523 &cache_default_group
,
527 static const struct attribute_group
*cache_private_groups
[] = {
528 &cache_default_group
,
529 NULL
, /* Place holder for private group */
533 const struct attribute_group
*
534 __weak
cache_get_priv_group(struct cacheinfo
*this_leaf
)
539 static const struct attribute_group
**
540 cache_get_attribute_groups(struct cacheinfo
*this_leaf
)
542 const struct attribute_group
*priv_group
=
543 cache_get_priv_group(this_leaf
);
546 return cache_default_groups
;
548 if (!cache_private_groups
[1])
549 cache_private_groups
[1] = priv_group
;
551 return cache_private_groups
;
554 /* Add/Remove cache interface for CPU device */
555 static void cpu_cache_sysfs_exit(unsigned int cpu
)
558 struct device
*ci_dev
;
560 if (per_cpu_index_dev(cpu
)) {
561 for (i
= 0; i
< cache_leaves(cpu
); i
++) {
562 ci_dev
= per_cache_index_dev(cpu
, i
);
565 device_unregister(ci_dev
);
567 kfree(per_cpu_index_dev(cpu
));
568 per_cpu_index_dev(cpu
) = NULL
;
570 device_unregister(per_cpu_cache_dev(cpu
));
571 per_cpu_cache_dev(cpu
) = NULL
;
574 static int cpu_cache_sysfs_init(unsigned int cpu
)
576 struct device
*dev
= get_cpu_device(cpu
);
578 if (per_cpu_cacheinfo(cpu
) == NULL
)
581 per_cpu_cache_dev(cpu
) = cpu_device_create(dev
, NULL
, NULL
, "cache");
582 if (IS_ERR(per_cpu_cache_dev(cpu
)))
583 return PTR_ERR(per_cpu_cache_dev(cpu
));
585 /* Allocate all required memory */
586 per_cpu_index_dev(cpu
) = kcalloc(cache_leaves(cpu
),
587 sizeof(struct device
*), GFP_KERNEL
);
588 if (unlikely(per_cpu_index_dev(cpu
) == NULL
))
594 cpu_cache_sysfs_exit(cpu
);
598 static int cache_add_dev(unsigned int cpu
)
602 struct device
*ci_dev
, *parent
;
603 struct cacheinfo
*this_leaf
;
604 struct cpu_cacheinfo
*this_cpu_ci
= get_cpu_cacheinfo(cpu
);
605 const struct attribute_group
**cache_groups
;
607 rc
= cpu_cache_sysfs_init(cpu
);
608 if (unlikely(rc
< 0))
611 parent
= per_cpu_cache_dev(cpu
);
612 for (i
= 0; i
< cache_leaves(cpu
); i
++) {
613 this_leaf
= this_cpu_ci
->info_list
+ i
;
614 if (this_leaf
->disable_sysfs
)
616 if (this_leaf
->type
== CACHE_TYPE_NOCACHE
)
618 cache_groups
= cache_get_attribute_groups(this_leaf
);
619 ci_dev
= cpu_device_create(parent
, this_leaf
, cache_groups
,
621 if (IS_ERR(ci_dev
)) {
622 rc
= PTR_ERR(ci_dev
);
625 per_cache_index_dev(cpu
, i
) = ci_dev
;
627 cpumask_set_cpu(cpu
, &cache_dev_map
);
631 cpu_cache_sysfs_exit(cpu
);
635 static int cacheinfo_cpu_online(unsigned int cpu
)
637 int rc
= detect_cache_attributes(cpu
);
641 rc
= cache_add_dev(cpu
);
643 free_cache_attributes(cpu
);
647 static int cacheinfo_cpu_pre_down(unsigned int cpu
)
649 if (cpumask_test_and_clear_cpu(cpu
, &cache_dev_map
))
650 cpu_cache_sysfs_exit(cpu
);
652 free_cache_attributes(cpu
);
656 static int __init
cacheinfo_sysfs_init(void)
658 return cpuhp_setup_state(CPUHP_AP_ONLINE_DYN
, "base/cacheinfo:online",
659 cacheinfo_cpu_online
, cacheinfo_cpu_pre_down
);
661 device_initcall(cacheinfo_sysfs_init
);