1 // SPDX-License-Identifier: GPL-2.0
3 * cacheinfo support - processor cache information via sysfs
5 * Based on arch/x86/kernel/cpu/intel_cacheinfo.c
6 * Author: Sudeep Holla <sudeep.holla@arm.com>
8 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
10 #include <linux/acpi.h>
11 #include <linux/bitops.h>
12 #include <linux/cacheinfo.h>
13 #include <linux/compiler.h>
14 #include <linux/cpu.h>
15 #include <linux/device.h>
16 #include <linux/init.h>
18 #include <linux/sched.h>
19 #include <linux/slab.h>
20 #include <linux/smp.h>
21 #include <linux/sysfs.h>
23 /* pointer to per cpu cacheinfo */
24 static DEFINE_PER_CPU(struct cpu_cacheinfo
, ci_cpu_cacheinfo
);
25 #define ci_cacheinfo(cpu) (&per_cpu(ci_cpu_cacheinfo, cpu))
26 #define cache_leaves(cpu) (ci_cacheinfo(cpu)->num_leaves)
27 #define per_cpu_cacheinfo(cpu) (ci_cacheinfo(cpu)->info_list)
29 struct cpu_cacheinfo
*get_cpu_cacheinfo(unsigned int cpu
)
31 return ci_cacheinfo(cpu
);
35 static int cache_setup_of_node(unsigned int cpu
)
37 struct device_node
*np
;
38 struct cacheinfo
*this_leaf
;
39 struct device
*cpu_dev
= get_cpu_device(cpu
);
40 struct cpu_cacheinfo
*this_cpu_ci
= get_cpu_cacheinfo(cpu
);
41 unsigned int index
= 0;
43 /* skip if of_node is already populated */
44 if (this_cpu_ci
->info_list
->of_node
)
48 pr_err("No cpu device for CPU %d\n", cpu
);
51 np
= cpu_dev
->of_node
;
53 pr_err("Failed to find cpu%d device node\n", cpu
);
57 while (index
< cache_leaves(cpu
)) {
58 this_leaf
= this_cpu_ci
->info_list
+ index
;
59 if (this_leaf
->level
!= 1)
60 np
= of_find_next_cache_node(np
);
62 np
= of_node_get(np
);/* cpu node itself */
65 this_leaf
->of_node
= np
;
69 if (index
!= cache_leaves(cpu
)) /* not all OF nodes populated */
75 static inline bool cache_leaves_are_shared(struct cacheinfo
*this_leaf
,
76 struct cacheinfo
*sib_leaf
)
78 return sib_leaf
->of_node
== this_leaf
->of_node
;
81 /* OF properties to query for a given cache type */
82 struct cache_type_info
{
83 const char *size_prop
;
84 const char *line_size_props
[2];
85 const char *nr_sets_prop
;
88 static const struct cache_type_info cache_type_info
[] = {
90 .size_prop
= "cache-size",
91 .line_size_props
= { "cache-line-size",
92 "cache-block-size", },
93 .nr_sets_prop
= "cache-sets",
95 .size_prop
= "i-cache-size",
96 .line_size_props
= { "i-cache-line-size",
97 "i-cache-block-size", },
98 .nr_sets_prop
= "i-cache-sets",
100 .size_prop
= "d-cache-size",
101 .line_size_props
= { "d-cache-line-size",
102 "d-cache-block-size", },
103 .nr_sets_prop
= "d-cache-sets",
107 static inline int get_cacheinfo_idx(enum cache_type type
)
109 if (type
== CACHE_TYPE_UNIFIED
)
114 static void cache_size(struct cacheinfo
*this_leaf
)
116 const char *propname
;
117 const __be32
*cache_size
;
120 ct_idx
= get_cacheinfo_idx(this_leaf
->type
);
121 propname
= cache_type_info
[ct_idx
].size_prop
;
123 cache_size
= of_get_property(this_leaf
->of_node
, propname
, NULL
);
125 this_leaf
->size
= of_read_number(cache_size
, 1);
128 /* not cache_line_size() because that's a macro in include/linux/cache.h */
129 static void cache_get_line_size(struct cacheinfo
*this_leaf
)
131 const __be32
*line_size
;
134 ct_idx
= get_cacheinfo_idx(this_leaf
->type
);
135 lim
= ARRAY_SIZE(cache_type_info
[ct_idx
].line_size_props
);
137 for (i
= 0; i
< lim
; i
++) {
138 const char *propname
;
140 propname
= cache_type_info
[ct_idx
].line_size_props
[i
];
141 line_size
= of_get_property(this_leaf
->of_node
, propname
, NULL
);
147 this_leaf
->coherency_line_size
= of_read_number(line_size
, 1);
150 static void cache_nr_sets(struct cacheinfo
*this_leaf
)
152 const char *propname
;
153 const __be32
*nr_sets
;
156 ct_idx
= get_cacheinfo_idx(this_leaf
->type
);
157 propname
= cache_type_info
[ct_idx
].nr_sets_prop
;
159 nr_sets
= of_get_property(this_leaf
->of_node
, propname
, NULL
);
161 this_leaf
->number_of_sets
= of_read_number(nr_sets
, 1);
164 static void cache_associativity(struct cacheinfo
*this_leaf
)
166 unsigned int line_size
= this_leaf
->coherency_line_size
;
167 unsigned int nr_sets
= this_leaf
->number_of_sets
;
168 unsigned int size
= this_leaf
->size
;
171 * If the cache is fully associative, there is no need to
172 * check the other properties.
174 if (!(nr_sets
== 1) && (nr_sets
> 0 && size
> 0 && line_size
> 0))
175 this_leaf
->ways_of_associativity
= (size
/ nr_sets
) / line_size
;
178 static bool cache_node_is_unified(struct cacheinfo
*this_leaf
)
180 return of_property_read_bool(this_leaf
->of_node
, "cache-unified");
183 static void cache_of_override_properties(unsigned int cpu
)
186 struct cacheinfo
*this_leaf
;
187 struct cpu_cacheinfo
*this_cpu_ci
= get_cpu_cacheinfo(cpu
);
189 for (index
= 0; index
< cache_leaves(cpu
); index
++) {
190 this_leaf
= this_cpu_ci
->info_list
+ index
;
192 * init_cache_level must setup the cache level correctly
193 * overriding the architecturally specified levels, so
194 * if type is NONE at this stage, it should be unified
196 if (this_leaf
->type
== CACHE_TYPE_NOCACHE
&&
197 cache_node_is_unified(this_leaf
))
198 this_leaf
->type
= CACHE_TYPE_UNIFIED
;
199 cache_size(this_leaf
);
200 cache_get_line_size(this_leaf
);
201 cache_nr_sets(this_leaf
);
202 cache_associativity(this_leaf
);
206 static void cache_of_override_properties(unsigned int cpu
) { }
207 static inline int cache_setup_of_node(unsigned int cpu
) { return 0; }
208 static inline bool cache_leaves_are_shared(struct cacheinfo
*this_leaf
,
209 struct cacheinfo
*sib_leaf
)
212 * For non-DT systems, assume unique level 1 cache, system-wide
213 * shared caches for all other levels. This will be used only if
214 * arch specific code has not populated shared_cpu_map
216 return !(this_leaf
->level
== 1);
220 static int cache_shared_cpu_map_setup(unsigned int cpu
)
222 struct cpu_cacheinfo
*this_cpu_ci
= get_cpu_cacheinfo(cpu
);
223 struct cacheinfo
*this_leaf
, *sib_leaf
;
227 if (this_cpu_ci
->cpu_map_populated
)
230 if (of_have_populated_dt())
231 ret
= cache_setup_of_node(cpu
);
232 else if (!acpi_disabled
)
233 /* No cache property/hierarchy support yet in ACPI */
238 for (index
= 0; index
< cache_leaves(cpu
); index
++) {
241 this_leaf
= this_cpu_ci
->info_list
+ index
;
242 /* skip if shared_cpu_map is already populated */
243 if (!cpumask_empty(&this_leaf
->shared_cpu_map
))
246 cpumask_set_cpu(cpu
, &this_leaf
->shared_cpu_map
);
247 for_each_online_cpu(i
) {
248 struct cpu_cacheinfo
*sib_cpu_ci
= get_cpu_cacheinfo(i
);
250 if (i
== cpu
|| !sib_cpu_ci
->info_list
)
251 continue;/* skip if itself or no cacheinfo */
252 sib_leaf
= sib_cpu_ci
->info_list
+ index
;
253 if (cache_leaves_are_shared(this_leaf
, sib_leaf
)) {
254 cpumask_set_cpu(cpu
, &sib_leaf
->shared_cpu_map
);
255 cpumask_set_cpu(i
, &this_leaf
->shared_cpu_map
);
263 static void cache_shared_cpu_map_remove(unsigned int cpu
)
265 struct cpu_cacheinfo
*this_cpu_ci
= get_cpu_cacheinfo(cpu
);
266 struct cacheinfo
*this_leaf
, *sib_leaf
;
267 unsigned int sibling
, index
;
269 for (index
= 0; index
< cache_leaves(cpu
); index
++) {
270 this_leaf
= this_cpu_ci
->info_list
+ index
;
271 for_each_cpu(sibling
, &this_leaf
->shared_cpu_map
) {
272 struct cpu_cacheinfo
*sib_cpu_ci
;
274 if (sibling
== cpu
) /* skip itself */
277 sib_cpu_ci
= get_cpu_cacheinfo(sibling
);
278 if (!sib_cpu_ci
->info_list
)
281 sib_leaf
= sib_cpu_ci
->info_list
+ index
;
282 cpumask_clear_cpu(cpu
, &sib_leaf
->shared_cpu_map
);
283 cpumask_clear_cpu(sibling
, &this_leaf
->shared_cpu_map
);
285 of_node_put(this_leaf
->of_node
);
289 static void cache_override_properties(unsigned int cpu
)
291 if (of_have_populated_dt())
292 return cache_of_override_properties(cpu
);
295 static void free_cache_attributes(unsigned int cpu
)
297 if (!per_cpu_cacheinfo(cpu
))
300 cache_shared_cpu_map_remove(cpu
);
302 kfree(per_cpu_cacheinfo(cpu
));
303 per_cpu_cacheinfo(cpu
) = NULL
;
306 int __weak
init_cache_level(unsigned int cpu
)
311 int __weak
populate_cache_leaves(unsigned int cpu
)
316 static int detect_cache_attributes(unsigned int cpu
)
320 if (init_cache_level(cpu
) || !cache_leaves(cpu
))
323 per_cpu_cacheinfo(cpu
) = kcalloc(cache_leaves(cpu
),
324 sizeof(struct cacheinfo
), GFP_KERNEL
);
325 if (per_cpu_cacheinfo(cpu
) == NULL
)
328 ret
= populate_cache_leaves(cpu
);
332 * For systems using DT for cache hierarchy, of_node and shared_cpu_map
333 * will be set up here only if they are not populated already
335 ret
= cache_shared_cpu_map_setup(cpu
);
337 pr_warn("Unable to detect cache hierarchy for CPU %d\n", cpu
);
341 cache_override_properties(cpu
);
345 free_cache_attributes(cpu
);
349 /* pointer to cpuX/cache device */
350 static DEFINE_PER_CPU(struct device
*, ci_cache_dev
);
351 #define per_cpu_cache_dev(cpu) (per_cpu(ci_cache_dev, cpu))
353 static cpumask_t cache_dev_map
;
355 /* pointer to array of devices for cpuX/cache/indexY */
356 static DEFINE_PER_CPU(struct device
**, ci_index_dev
);
357 #define per_cpu_index_dev(cpu) (per_cpu(ci_index_dev, cpu))
358 #define per_cache_index_dev(cpu, idx) ((per_cpu_index_dev(cpu))[idx])
360 #define show_one(file_name, object) \
361 static ssize_t file_name##_show(struct device *dev, \
362 struct device_attribute *attr, char *buf) \
364 struct cacheinfo *this_leaf = dev_get_drvdata(dev); \
365 return sprintf(buf, "%u\n", this_leaf->object); \
369 show_one(level
, level
);
370 show_one(coherency_line_size
, coherency_line_size
);
371 show_one(number_of_sets
, number_of_sets
);
372 show_one(physical_line_partition
, physical_line_partition
);
373 show_one(ways_of_associativity
, ways_of_associativity
);
375 static ssize_t
size_show(struct device
*dev
,
376 struct device_attribute
*attr
, char *buf
)
378 struct cacheinfo
*this_leaf
= dev_get_drvdata(dev
);
380 return sprintf(buf
, "%uK\n", this_leaf
->size
>> 10);
383 static ssize_t
shared_cpumap_show_func(struct device
*dev
, bool list
, char *buf
)
385 struct cacheinfo
*this_leaf
= dev_get_drvdata(dev
);
386 const struct cpumask
*mask
= &this_leaf
->shared_cpu_map
;
388 return cpumap_print_to_pagebuf(list
, buf
, mask
);
391 static ssize_t
shared_cpu_map_show(struct device
*dev
,
392 struct device_attribute
*attr
, char *buf
)
394 return shared_cpumap_show_func(dev
, false, buf
);
397 static ssize_t
shared_cpu_list_show(struct device
*dev
,
398 struct device_attribute
*attr
, char *buf
)
400 return shared_cpumap_show_func(dev
, true, buf
);
403 static ssize_t
type_show(struct device
*dev
,
404 struct device_attribute
*attr
, char *buf
)
406 struct cacheinfo
*this_leaf
= dev_get_drvdata(dev
);
408 switch (this_leaf
->type
) {
409 case CACHE_TYPE_DATA
:
410 return sprintf(buf
, "Data\n");
411 case CACHE_TYPE_INST
:
412 return sprintf(buf
, "Instruction\n");
413 case CACHE_TYPE_UNIFIED
:
414 return sprintf(buf
, "Unified\n");
420 static ssize_t
allocation_policy_show(struct device
*dev
,
421 struct device_attribute
*attr
, char *buf
)
423 struct cacheinfo
*this_leaf
= dev_get_drvdata(dev
);
424 unsigned int ci_attr
= this_leaf
->attributes
;
427 if ((ci_attr
& CACHE_READ_ALLOCATE
) && (ci_attr
& CACHE_WRITE_ALLOCATE
))
428 n
= sprintf(buf
, "ReadWriteAllocate\n");
429 else if (ci_attr
& CACHE_READ_ALLOCATE
)
430 n
= sprintf(buf
, "ReadAllocate\n");
431 else if (ci_attr
& CACHE_WRITE_ALLOCATE
)
432 n
= sprintf(buf
, "WriteAllocate\n");
436 static ssize_t
write_policy_show(struct device
*dev
,
437 struct device_attribute
*attr
, char *buf
)
439 struct cacheinfo
*this_leaf
= dev_get_drvdata(dev
);
440 unsigned int ci_attr
= this_leaf
->attributes
;
443 if (ci_attr
& CACHE_WRITE_THROUGH
)
444 n
= sprintf(buf
, "WriteThrough\n");
445 else if (ci_attr
& CACHE_WRITE_BACK
)
446 n
= sprintf(buf
, "WriteBack\n");
450 static DEVICE_ATTR_RO(id
);
451 static DEVICE_ATTR_RO(level
);
452 static DEVICE_ATTR_RO(type
);
453 static DEVICE_ATTR_RO(coherency_line_size
);
454 static DEVICE_ATTR_RO(ways_of_associativity
);
455 static DEVICE_ATTR_RO(number_of_sets
);
456 static DEVICE_ATTR_RO(size
);
457 static DEVICE_ATTR_RO(allocation_policy
);
458 static DEVICE_ATTR_RO(write_policy
);
459 static DEVICE_ATTR_RO(shared_cpu_map
);
460 static DEVICE_ATTR_RO(shared_cpu_list
);
461 static DEVICE_ATTR_RO(physical_line_partition
);
463 static struct attribute
*cache_default_attrs
[] = {
466 &dev_attr_level
.attr
,
467 &dev_attr_shared_cpu_map
.attr
,
468 &dev_attr_shared_cpu_list
.attr
,
469 &dev_attr_coherency_line_size
.attr
,
470 &dev_attr_ways_of_associativity
.attr
,
471 &dev_attr_number_of_sets
.attr
,
473 &dev_attr_allocation_policy
.attr
,
474 &dev_attr_write_policy
.attr
,
475 &dev_attr_physical_line_partition
.attr
,
480 cache_default_attrs_is_visible(struct kobject
*kobj
,
481 struct attribute
*attr
, int unused
)
483 struct device
*dev
= kobj_to_dev(kobj
);
484 struct cacheinfo
*this_leaf
= dev_get_drvdata(dev
);
485 const struct cpumask
*mask
= &this_leaf
->shared_cpu_map
;
486 umode_t mode
= attr
->mode
;
488 if ((attr
== &dev_attr_id
.attr
) && (this_leaf
->attributes
& CACHE_ID
))
490 if ((attr
== &dev_attr_type
.attr
) && this_leaf
->type
)
492 if ((attr
== &dev_attr_level
.attr
) && this_leaf
->level
)
494 if ((attr
== &dev_attr_shared_cpu_map
.attr
) && !cpumask_empty(mask
))
496 if ((attr
== &dev_attr_shared_cpu_list
.attr
) && !cpumask_empty(mask
))
498 if ((attr
== &dev_attr_coherency_line_size
.attr
) &&
499 this_leaf
->coherency_line_size
)
501 if ((attr
== &dev_attr_ways_of_associativity
.attr
) &&
502 this_leaf
->size
) /* allow 0 = full associativity */
504 if ((attr
== &dev_attr_number_of_sets
.attr
) &&
505 this_leaf
->number_of_sets
)
507 if ((attr
== &dev_attr_size
.attr
) && this_leaf
->size
)
509 if ((attr
== &dev_attr_write_policy
.attr
) &&
510 (this_leaf
->attributes
& CACHE_WRITE_POLICY_MASK
))
512 if ((attr
== &dev_attr_allocation_policy
.attr
) &&
513 (this_leaf
->attributes
& CACHE_ALLOCATE_POLICY_MASK
))
515 if ((attr
== &dev_attr_physical_line_partition
.attr
) &&
516 this_leaf
->physical_line_partition
)
522 static const struct attribute_group cache_default_group
= {
523 .attrs
= cache_default_attrs
,
524 .is_visible
= cache_default_attrs_is_visible
,
527 static const struct attribute_group
*cache_default_groups
[] = {
528 &cache_default_group
,
532 static const struct attribute_group
*cache_private_groups
[] = {
533 &cache_default_group
,
534 NULL
, /* Place holder for private group */
538 const struct attribute_group
*
539 __weak
cache_get_priv_group(struct cacheinfo
*this_leaf
)
544 static const struct attribute_group
**
545 cache_get_attribute_groups(struct cacheinfo
*this_leaf
)
547 const struct attribute_group
*priv_group
=
548 cache_get_priv_group(this_leaf
);
551 return cache_default_groups
;
553 if (!cache_private_groups
[1])
554 cache_private_groups
[1] = priv_group
;
556 return cache_private_groups
;
559 /* Add/Remove cache interface for CPU device */
560 static void cpu_cache_sysfs_exit(unsigned int cpu
)
563 struct device
*ci_dev
;
565 if (per_cpu_index_dev(cpu
)) {
566 for (i
= 0; i
< cache_leaves(cpu
); i
++) {
567 ci_dev
= per_cache_index_dev(cpu
, i
);
570 device_unregister(ci_dev
);
572 kfree(per_cpu_index_dev(cpu
));
573 per_cpu_index_dev(cpu
) = NULL
;
575 device_unregister(per_cpu_cache_dev(cpu
));
576 per_cpu_cache_dev(cpu
) = NULL
;
579 static int cpu_cache_sysfs_init(unsigned int cpu
)
581 struct device
*dev
= get_cpu_device(cpu
);
583 if (per_cpu_cacheinfo(cpu
) == NULL
)
586 per_cpu_cache_dev(cpu
) = cpu_device_create(dev
, NULL
, NULL
, "cache");
587 if (IS_ERR(per_cpu_cache_dev(cpu
)))
588 return PTR_ERR(per_cpu_cache_dev(cpu
));
590 /* Allocate all required memory */
591 per_cpu_index_dev(cpu
) = kcalloc(cache_leaves(cpu
),
592 sizeof(struct device
*), GFP_KERNEL
);
593 if (unlikely(per_cpu_index_dev(cpu
) == NULL
))
599 cpu_cache_sysfs_exit(cpu
);
603 static int cache_add_dev(unsigned int cpu
)
607 struct device
*ci_dev
, *parent
;
608 struct cacheinfo
*this_leaf
;
609 struct cpu_cacheinfo
*this_cpu_ci
= get_cpu_cacheinfo(cpu
);
610 const struct attribute_group
**cache_groups
;
612 rc
= cpu_cache_sysfs_init(cpu
);
613 if (unlikely(rc
< 0))
616 parent
= per_cpu_cache_dev(cpu
);
617 for (i
= 0; i
< cache_leaves(cpu
); i
++) {
618 this_leaf
= this_cpu_ci
->info_list
+ i
;
619 if (this_leaf
->disable_sysfs
)
621 cache_groups
= cache_get_attribute_groups(this_leaf
);
622 ci_dev
= cpu_device_create(parent
, this_leaf
, cache_groups
,
624 if (IS_ERR(ci_dev
)) {
625 rc
= PTR_ERR(ci_dev
);
628 per_cache_index_dev(cpu
, i
) = ci_dev
;
630 cpumask_set_cpu(cpu
, &cache_dev_map
);
634 cpu_cache_sysfs_exit(cpu
);
638 static int cacheinfo_cpu_online(unsigned int cpu
)
640 int rc
= detect_cache_attributes(cpu
);
644 rc
= cache_add_dev(cpu
);
646 free_cache_attributes(cpu
);
650 static int cacheinfo_cpu_pre_down(unsigned int cpu
)
652 if (cpumask_test_and_clear_cpu(cpu
, &cache_dev_map
))
653 cpu_cache_sysfs_exit(cpu
);
655 free_cache_attributes(cpu
);
659 static int __init
cacheinfo_sysfs_init(void)
661 return cpuhp_setup_state(CPUHP_AP_ONLINE_DYN
, "base/cacheinfo:online",
662 cacheinfo_cpu_online
, cacheinfo_cpu_pre_down
);
664 device_initcall(cacheinfo_sysfs_init
);