2 * cacheinfo support - processor cache information via sysfs
4 * Based on arch/x86/kernel/cpu/intel_cacheinfo.c
5 * Author: Sudeep Holla <sudeep.holla@arm.com>
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
11 * This program is distributed "as is" WITHOUT ANY WARRANTY of any
12 * kind, whether express or implied; without even the implied warranty
13 * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
16 * You should have received a copy of the GNU General Public License
17 * along with this program. If not, see <http://www.gnu.org/licenses/>.
19 #include <linux/bitops.h>
20 #include <linux/cacheinfo.h>
21 #include <linux/compiler.h>
22 #include <linux/cpu.h>
23 #include <linux/device.h>
24 #include <linux/init.h>
26 #include <linux/sched.h>
27 #include <linux/slab.h>
28 #include <linux/smp.h>
29 #include <linux/sysfs.h>
31 /* pointer to per cpu cacheinfo */
32 static DEFINE_PER_CPU(struct cpu_cacheinfo
, ci_cpu_cacheinfo
);
33 #define ci_cacheinfo(cpu) (&per_cpu(ci_cpu_cacheinfo, cpu))
34 #define cache_leaves(cpu) (ci_cacheinfo(cpu)->num_leaves)
35 #define per_cpu_cacheinfo(cpu) (ci_cacheinfo(cpu)->info_list)
37 struct cpu_cacheinfo
*get_cpu_cacheinfo(unsigned int cpu
)
39 return ci_cacheinfo(cpu
);
43 static int cache_setup_of_node(unsigned int cpu
)
45 struct device_node
*np
;
46 struct cacheinfo
*this_leaf
;
47 struct device
*cpu_dev
= get_cpu_device(cpu
);
48 struct cpu_cacheinfo
*this_cpu_ci
= get_cpu_cacheinfo(cpu
);
49 unsigned int index
= 0;
51 /* skip if of_node is already populated */
52 if (this_cpu_ci
->info_list
->of_node
)
56 pr_err("No cpu device for CPU %d\n", cpu
);
59 np
= cpu_dev
->of_node
;
61 pr_err("Failed to find cpu%d device node\n", cpu
);
65 while (index
< cache_leaves(cpu
)) {
66 this_leaf
= this_cpu_ci
->info_list
+ index
;
67 if (this_leaf
->level
!= 1)
68 np
= of_find_next_cache_node(np
);
70 np
= of_node_get(np
);/* cpu node itself */
73 this_leaf
->of_node
= np
;
77 if (index
!= cache_leaves(cpu
)) /* not all OF nodes populated */
83 static inline bool cache_leaves_are_shared(struct cacheinfo
*this_leaf
,
84 struct cacheinfo
*sib_leaf
)
86 return sib_leaf
->of_node
== this_leaf
->of_node
;
89 static inline int cache_setup_of_node(unsigned int cpu
) { return 0; }
90 static inline bool cache_leaves_are_shared(struct cacheinfo
*this_leaf
,
91 struct cacheinfo
*sib_leaf
)
94 * For non-DT systems, assume unique level 1 cache, system-wide
95 * shared caches for all other levels. This will be used only if
96 * arch specific code has not populated shared_cpu_map
98 return !(this_leaf
->level
== 1);
102 static int cache_shared_cpu_map_setup(unsigned int cpu
)
104 struct cpu_cacheinfo
*this_cpu_ci
= get_cpu_cacheinfo(cpu
);
105 struct cacheinfo
*this_leaf
, *sib_leaf
;
109 ret
= cache_setup_of_node(cpu
);
113 for (index
= 0; index
< cache_leaves(cpu
); index
++) {
116 this_leaf
= this_cpu_ci
->info_list
+ index
;
117 /* skip if shared_cpu_map is already populated */
118 if (!cpumask_empty(&this_leaf
->shared_cpu_map
))
121 cpumask_set_cpu(cpu
, &this_leaf
->shared_cpu_map
);
122 for_each_online_cpu(i
) {
123 struct cpu_cacheinfo
*sib_cpu_ci
= get_cpu_cacheinfo(i
);
125 if (i
== cpu
|| !sib_cpu_ci
->info_list
)
126 continue;/* skip if itself or no cacheinfo */
127 sib_leaf
= sib_cpu_ci
->info_list
+ index
;
128 if (cache_leaves_are_shared(this_leaf
, sib_leaf
)) {
129 cpumask_set_cpu(cpu
, &sib_leaf
->shared_cpu_map
);
130 cpumask_set_cpu(i
, &this_leaf
->shared_cpu_map
);
138 static void cache_shared_cpu_map_remove(unsigned int cpu
)
140 struct cpu_cacheinfo
*this_cpu_ci
= get_cpu_cacheinfo(cpu
);
141 struct cacheinfo
*this_leaf
, *sib_leaf
;
142 unsigned int sibling
, index
;
144 for (index
= 0; index
< cache_leaves(cpu
); index
++) {
145 this_leaf
= this_cpu_ci
->info_list
+ index
;
146 for_each_cpu(sibling
, &this_leaf
->shared_cpu_map
) {
147 struct cpu_cacheinfo
*sib_cpu_ci
;
149 if (sibling
== cpu
) /* skip itself */
151 sib_cpu_ci
= get_cpu_cacheinfo(sibling
);
152 sib_leaf
= sib_cpu_ci
->info_list
+ index
;
153 cpumask_clear_cpu(cpu
, &sib_leaf
->shared_cpu_map
);
154 cpumask_clear_cpu(sibling
, &this_leaf
->shared_cpu_map
);
156 of_node_put(this_leaf
->of_node
);
160 static void free_cache_attributes(unsigned int cpu
)
162 cache_shared_cpu_map_remove(cpu
);
164 kfree(per_cpu_cacheinfo(cpu
));
165 per_cpu_cacheinfo(cpu
) = NULL
;
168 int __weak
init_cache_level(unsigned int cpu
)
173 int __weak
populate_cache_leaves(unsigned int cpu
)
178 static int detect_cache_attributes(unsigned int cpu
)
182 if (init_cache_level(cpu
))
185 per_cpu_cacheinfo(cpu
) = kcalloc(cache_leaves(cpu
),
186 sizeof(struct cacheinfo
), GFP_KERNEL
);
187 if (per_cpu_cacheinfo(cpu
) == NULL
)
190 ret
= populate_cache_leaves(cpu
);
194 * For systems using DT for cache hierarcy, of_node and shared_cpu_map
195 * will be set up here only if they are not populated already
197 ret
= cache_shared_cpu_map_setup(cpu
);
199 pr_warn("Unable to detect cache hierarcy from DT for CPU %d\n",
206 free_cache_attributes(cpu
);
210 /* pointer to cpuX/cache device */
211 static DEFINE_PER_CPU(struct device
*, ci_cache_dev
);
212 #define per_cpu_cache_dev(cpu) (per_cpu(ci_cache_dev, cpu))
214 static cpumask_t cache_dev_map
;
216 /* pointer to array of devices for cpuX/cache/indexY */
217 static DEFINE_PER_CPU(struct device
**, ci_index_dev
);
218 #define per_cpu_index_dev(cpu) (per_cpu(ci_index_dev, cpu))
219 #define per_cache_index_dev(cpu, idx) ((per_cpu_index_dev(cpu))[idx])
221 #define show_one(file_name, object) \
222 static ssize_t file_name##_show(struct device *dev, \
223 struct device_attribute *attr, char *buf) \
225 struct cacheinfo *this_leaf = dev_get_drvdata(dev); \
226 return sprintf(buf, "%u\n", this_leaf->object); \
229 show_one(level
, level
);
230 show_one(coherency_line_size
, coherency_line_size
);
231 show_one(number_of_sets
, number_of_sets
);
232 show_one(physical_line_partition
, physical_line_partition
);
233 show_one(ways_of_associativity
, ways_of_associativity
);
235 static ssize_t
size_show(struct device
*dev
,
236 struct device_attribute
*attr
, char *buf
)
238 struct cacheinfo
*this_leaf
= dev_get_drvdata(dev
);
240 return sprintf(buf
, "%uK\n", this_leaf
->size
>> 10);
243 static ssize_t
shared_cpumap_show_func(struct device
*dev
, bool list
, char *buf
)
245 struct cacheinfo
*this_leaf
= dev_get_drvdata(dev
);
246 const struct cpumask
*mask
= &this_leaf
->shared_cpu_map
;
248 return cpumap_print_to_pagebuf(list
, buf
, mask
);
251 static ssize_t
shared_cpu_map_show(struct device
*dev
,
252 struct device_attribute
*attr
, char *buf
)
254 return shared_cpumap_show_func(dev
, false, buf
);
257 static ssize_t
shared_cpu_list_show(struct device
*dev
,
258 struct device_attribute
*attr
, char *buf
)
260 return shared_cpumap_show_func(dev
, true, buf
);
263 static ssize_t
type_show(struct device
*dev
,
264 struct device_attribute
*attr
, char *buf
)
266 struct cacheinfo
*this_leaf
= dev_get_drvdata(dev
);
268 switch (this_leaf
->type
) {
269 case CACHE_TYPE_DATA
:
270 return sprintf(buf
, "Data\n");
271 case CACHE_TYPE_INST
:
272 return sprintf(buf
, "Instruction\n");
273 case CACHE_TYPE_UNIFIED
:
274 return sprintf(buf
, "Unified\n");
280 static ssize_t
allocation_policy_show(struct device
*dev
,
281 struct device_attribute
*attr
, char *buf
)
283 struct cacheinfo
*this_leaf
= dev_get_drvdata(dev
);
284 unsigned int ci_attr
= this_leaf
->attributes
;
287 if ((ci_attr
& CACHE_READ_ALLOCATE
) && (ci_attr
& CACHE_WRITE_ALLOCATE
))
288 n
= sprintf(buf
, "ReadWriteAllocate\n");
289 else if (ci_attr
& CACHE_READ_ALLOCATE
)
290 n
= sprintf(buf
, "ReadAllocate\n");
291 else if (ci_attr
& CACHE_WRITE_ALLOCATE
)
292 n
= sprintf(buf
, "WriteAllocate\n");
296 static ssize_t
write_policy_show(struct device
*dev
,
297 struct device_attribute
*attr
, char *buf
)
299 struct cacheinfo
*this_leaf
= dev_get_drvdata(dev
);
300 unsigned int ci_attr
= this_leaf
->attributes
;
303 if (ci_attr
& CACHE_WRITE_THROUGH
)
304 n
= sprintf(buf
, "WriteThrough\n");
305 else if (ci_attr
& CACHE_WRITE_BACK
)
306 n
= sprintf(buf
, "WriteBack\n");
310 static DEVICE_ATTR_RO(level
);
311 static DEVICE_ATTR_RO(type
);
312 static DEVICE_ATTR_RO(coherency_line_size
);
313 static DEVICE_ATTR_RO(ways_of_associativity
);
314 static DEVICE_ATTR_RO(number_of_sets
);
315 static DEVICE_ATTR_RO(size
);
316 static DEVICE_ATTR_RO(allocation_policy
);
317 static DEVICE_ATTR_RO(write_policy
);
318 static DEVICE_ATTR_RO(shared_cpu_map
);
319 static DEVICE_ATTR_RO(shared_cpu_list
);
320 static DEVICE_ATTR_RO(physical_line_partition
);
322 static struct attribute
*cache_default_attrs
[] = {
324 &dev_attr_level
.attr
,
325 &dev_attr_shared_cpu_map
.attr
,
326 &dev_attr_shared_cpu_list
.attr
,
327 &dev_attr_coherency_line_size
.attr
,
328 &dev_attr_ways_of_associativity
.attr
,
329 &dev_attr_number_of_sets
.attr
,
331 &dev_attr_allocation_policy
.attr
,
332 &dev_attr_write_policy
.attr
,
333 &dev_attr_physical_line_partition
.attr
,
338 cache_default_attrs_is_visible(struct kobject
*kobj
,
339 struct attribute
*attr
, int unused
)
341 struct device
*dev
= kobj_to_dev(kobj
);
342 struct cacheinfo
*this_leaf
= dev_get_drvdata(dev
);
343 const struct cpumask
*mask
= &this_leaf
->shared_cpu_map
;
344 umode_t mode
= attr
->mode
;
346 if ((attr
== &dev_attr_type
.attr
) && this_leaf
->type
)
348 if ((attr
== &dev_attr_level
.attr
) && this_leaf
->level
)
350 if ((attr
== &dev_attr_shared_cpu_map
.attr
) && !cpumask_empty(mask
))
352 if ((attr
== &dev_attr_shared_cpu_list
.attr
) && !cpumask_empty(mask
))
354 if ((attr
== &dev_attr_coherency_line_size
.attr
) &&
355 this_leaf
->coherency_line_size
)
357 if ((attr
== &dev_attr_ways_of_associativity
.attr
) &&
358 this_leaf
->size
) /* allow 0 = full associativity */
360 if ((attr
== &dev_attr_number_of_sets
.attr
) &&
361 this_leaf
->number_of_sets
)
363 if ((attr
== &dev_attr_size
.attr
) && this_leaf
->size
)
365 if ((attr
== &dev_attr_write_policy
.attr
) &&
366 (this_leaf
->attributes
& CACHE_WRITE_POLICY_MASK
))
368 if ((attr
== &dev_attr_allocation_policy
.attr
) &&
369 (this_leaf
->attributes
& CACHE_ALLOCATE_POLICY_MASK
))
371 if ((attr
== &dev_attr_physical_line_partition
.attr
) &&
372 this_leaf
->physical_line_partition
)
378 static const struct attribute_group cache_default_group
= {
379 .attrs
= cache_default_attrs
,
380 .is_visible
= cache_default_attrs_is_visible
,
383 static const struct attribute_group
*cache_default_groups
[] = {
384 &cache_default_group
,
388 static const struct attribute_group
*cache_private_groups
[] = {
389 &cache_default_group
,
390 NULL
, /* Place holder for private group */
394 const struct attribute_group
*
395 __weak
cache_get_priv_group(struct cacheinfo
*this_leaf
)
400 static const struct attribute_group
**
401 cache_get_attribute_groups(struct cacheinfo
*this_leaf
)
403 const struct attribute_group
*priv_group
=
404 cache_get_priv_group(this_leaf
);
407 return cache_default_groups
;
409 if (!cache_private_groups
[1])
410 cache_private_groups
[1] = priv_group
;
412 return cache_private_groups
;
415 /* Add/Remove cache interface for CPU device */
416 static void cpu_cache_sysfs_exit(unsigned int cpu
)
419 struct device
*ci_dev
;
421 if (per_cpu_index_dev(cpu
)) {
422 for (i
= 0; i
< cache_leaves(cpu
); i
++) {
423 ci_dev
= per_cache_index_dev(cpu
, i
);
426 device_unregister(ci_dev
);
428 kfree(per_cpu_index_dev(cpu
));
429 per_cpu_index_dev(cpu
) = NULL
;
431 device_unregister(per_cpu_cache_dev(cpu
));
432 per_cpu_cache_dev(cpu
) = NULL
;
435 static int cpu_cache_sysfs_init(unsigned int cpu
)
437 struct device
*dev
= get_cpu_device(cpu
);
439 if (per_cpu_cacheinfo(cpu
) == NULL
)
442 per_cpu_cache_dev(cpu
) = cpu_device_create(dev
, NULL
, NULL
, "cache");
443 if (IS_ERR(per_cpu_cache_dev(cpu
)))
444 return PTR_ERR(per_cpu_cache_dev(cpu
));
446 /* Allocate all required memory */
447 per_cpu_index_dev(cpu
) = kcalloc(cache_leaves(cpu
),
448 sizeof(struct device
*), GFP_KERNEL
);
449 if (unlikely(per_cpu_index_dev(cpu
) == NULL
))
455 cpu_cache_sysfs_exit(cpu
);
459 static int cache_add_dev(unsigned int cpu
)
463 struct device
*ci_dev
, *parent
;
464 struct cacheinfo
*this_leaf
;
465 struct cpu_cacheinfo
*this_cpu_ci
= get_cpu_cacheinfo(cpu
);
466 const struct attribute_group
**cache_groups
;
468 rc
= cpu_cache_sysfs_init(cpu
);
469 if (unlikely(rc
< 0))
472 parent
= per_cpu_cache_dev(cpu
);
473 for (i
= 0; i
< cache_leaves(cpu
); i
++) {
474 this_leaf
= this_cpu_ci
->info_list
+ i
;
475 if (this_leaf
->disable_sysfs
)
477 cache_groups
= cache_get_attribute_groups(this_leaf
);
478 ci_dev
= cpu_device_create(parent
, this_leaf
, cache_groups
,
480 if (IS_ERR(ci_dev
)) {
481 rc
= PTR_ERR(ci_dev
);
484 per_cache_index_dev(cpu
, i
) = ci_dev
;
486 cpumask_set_cpu(cpu
, &cache_dev_map
);
490 cpu_cache_sysfs_exit(cpu
);
494 static void cache_remove_dev(unsigned int cpu
)
496 if (!cpumask_test_cpu(cpu
, &cache_dev_map
))
498 cpumask_clear_cpu(cpu
, &cache_dev_map
);
500 cpu_cache_sysfs_exit(cpu
);
503 static int cacheinfo_cpu_callback(struct notifier_block
*nfb
,
504 unsigned long action
, void *hcpu
)
506 unsigned int cpu
= (unsigned long)hcpu
;
509 switch (action
& ~CPU_TASKS_FROZEN
) {
511 rc
= detect_cache_attributes(cpu
);
513 rc
= cache_add_dev(cpu
);
516 cache_remove_dev(cpu
);
517 if (per_cpu_cacheinfo(cpu
))
518 free_cache_attributes(cpu
);
521 return notifier_from_errno(rc
);
524 static int __init
cacheinfo_sysfs_init(void)
528 cpu_notifier_register_begin();
530 for_each_online_cpu(cpu
) {
531 rc
= detect_cache_attributes(cpu
);
534 rc
= cache_add_dev(cpu
);
536 free_cache_attributes(cpu
);
537 pr_err("error populating cacheinfo..cpu%d\n", cpu
);
541 __hotcpu_notifier(cacheinfo_cpu_callback
, 0);
544 cpu_notifier_register_done();
548 device_initcall(cacheinfo_sysfs_init
);