2 * cacheinfo support - processor cache information via sysfs
4 * Based on arch/x86/kernel/cpu/intel_cacheinfo.c
5 * Author: Sudeep Holla <sudeep.holla@arm.com>
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
11 * This program is distributed "as is" WITHOUT ANY WARRANTY of any
12 * kind, whether express or implied; without even the implied warranty
13 * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
16 * You should have received a copy of the GNU General Public License
17 * along with this program. If not, see <http://www.gnu.org/licenses/>.
19 #include <linux/bitops.h>
20 #include <linux/cacheinfo.h>
21 #include <linux/compiler.h>
22 #include <linux/cpu.h>
23 #include <linux/device.h>
24 #include <linux/init.h>
26 #include <linux/sched.h>
27 #include <linux/slab.h>
28 #include <linux/smp.h>
29 #include <linux/sysfs.h>
31 /* pointer to per cpu cacheinfo */
32 static DEFINE_PER_CPU(struct cpu_cacheinfo
, ci_cpu_cacheinfo
);
33 #define ci_cacheinfo(cpu) (&per_cpu(ci_cpu_cacheinfo, cpu))
34 #define cache_leaves(cpu) (ci_cacheinfo(cpu)->num_leaves)
35 #define per_cpu_cacheinfo(cpu) (ci_cacheinfo(cpu)->info_list)
37 struct cpu_cacheinfo
*get_cpu_cacheinfo(unsigned int cpu
)
39 return ci_cacheinfo(cpu
);
43 static int cache_setup_of_node(unsigned int cpu
)
45 struct device_node
*np
;
46 struct cacheinfo
*this_leaf
;
47 struct device
*cpu_dev
= get_cpu_device(cpu
);
48 struct cpu_cacheinfo
*this_cpu_ci
= get_cpu_cacheinfo(cpu
);
49 unsigned int index
= 0;
51 /* skip if of_node is already populated */
52 if (this_cpu_ci
->info_list
->of_node
)
56 pr_err("No cpu device for CPU %d\n", cpu
);
59 np
= cpu_dev
->of_node
;
61 pr_err("Failed to find cpu%d device node\n", cpu
);
65 while (np
&& index
< cache_leaves(cpu
)) {
66 this_leaf
= this_cpu_ci
->info_list
+ index
;
67 if (this_leaf
->level
!= 1)
68 np
= of_find_next_cache_node(np
);
70 np
= of_node_get(np
);/* cpu node itself */
71 this_leaf
->of_node
= np
;
77 static inline bool cache_leaves_are_shared(struct cacheinfo
*this_leaf
,
78 struct cacheinfo
*sib_leaf
)
80 return sib_leaf
->of_node
== this_leaf
->of_node
;
83 static inline int cache_setup_of_node(unsigned int cpu
) { return 0; }
84 static inline bool cache_leaves_are_shared(struct cacheinfo
*this_leaf
,
85 struct cacheinfo
*sib_leaf
)
88 * For non-DT systems, assume unique level 1 cache, system-wide
89 * shared caches for all other levels. This will be used only if
90 * arch specific code has not populated shared_cpu_map
92 return !(this_leaf
->level
== 1);
96 static int cache_shared_cpu_map_setup(unsigned int cpu
)
98 struct cpu_cacheinfo
*this_cpu_ci
= get_cpu_cacheinfo(cpu
);
99 struct cacheinfo
*this_leaf
, *sib_leaf
;
103 ret
= cache_setup_of_node(cpu
);
107 for (index
= 0; index
< cache_leaves(cpu
); index
++) {
110 this_leaf
= this_cpu_ci
->info_list
+ index
;
111 /* skip if shared_cpu_map is already populated */
112 if (!cpumask_empty(&this_leaf
->shared_cpu_map
))
115 cpumask_set_cpu(cpu
, &this_leaf
->shared_cpu_map
);
116 for_each_online_cpu(i
) {
117 struct cpu_cacheinfo
*sib_cpu_ci
= get_cpu_cacheinfo(i
);
119 if (i
== cpu
|| !sib_cpu_ci
->info_list
)
120 continue;/* skip if itself or no cacheinfo */
121 sib_leaf
= sib_cpu_ci
->info_list
+ index
;
122 if (cache_leaves_are_shared(this_leaf
, sib_leaf
)) {
123 cpumask_set_cpu(cpu
, &sib_leaf
->shared_cpu_map
);
124 cpumask_set_cpu(i
, &this_leaf
->shared_cpu_map
);
132 static void cache_shared_cpu_map_remove(unsigned int cpu
)
134 struct cpu_cacheinfo
*this_cpu_ci
= get_cpu_cacheinfo(cpu
);
135 struct cacheinfo
*this_leaf
, *sib_leaf
;
136 unsigned int sibling
, index
;
138 for (index
= 0; index
< cache_leaves(cpu
); index
++) {
139 this_leaf
= this_cpu_ci
->info_list
+ index
;
140 for_each_cpu(sibling
, &this_leaf
->shared_cpu_map
) {
141 struct cpu_cacheinfo
*sib_cpu_ci
;
143 if (sibling
== cpu
) /* skip itself */
145 sib_cpu_ci
= get_cpu_cacheinfo(sibling
);
146 sib_leaf
= sib_cpu_ci
->info_list
+ index
;
147 cpumask_clear_cpu(cpu
, &sib_leaf
->shared_cpu_map
);
148 cpumask_clear_cpu(sibling
, &this_leaf
->shared_cpu_map
);
150 of_node_put(this_leaf
->of_node
);
154 static void free_cache_attributes(unsigned int cpu
)
156 cache_shared_cpu_map_remove(cpu
);
158 kfree(per_cpu_cacheinfo(cpu
));
159 per_cpu_cacheinfo(cpu
) = NULL
;
162 int __weak
init_cache_level(unsigned int cpu
)
167 int __weak
populate_cache_leaves(unsigned int cpu
)
172 static int detect_cache_attributes(unsigned int cpu
)
176 if (init_cache_level(cpu
))
179 per_cpu_cacheinfo(cpu
) = kcalloc(cache_leaves(cpu
),
180 sizeof(struct cacheinfo
), GFP_KERNEL
);
181 if (per_cpu_cacheinfo(cpu
) == NULL
)
184 ret
= populate_cache_leaves(cpu
);
188 * For systems using DT for cache hierarcy, of_node and shared_cpu_map
189 * will be set up here only if they are not populated already
191 ret
= cache_shared_cpu_map_setup(cpu
);
197 free_cache_attributes(cpu
);
201 /* pointer to cpuX/cache device */
202 static DEFINE_PER_CPU(struct device
*, ci_cache_dev
);
203 #define per_cpu_cache_dev(cpu) (per_cpu(ci_cache_dev, cpu))
205 static cpumask_t cache_dev_map
;
207 /* pointer to array of devices for cpuX/cache/indexY */
208 static DEFINE_PER_CPU(struct device
**, ci_index_dev
);
209 #define per_cpu_index_dev(cpu) (per_cpu(ci_index_dev, cpu))
210 #define per_cache_index_dev(cpu, idx) ((per_cpu_index_dev(cpu))[idx])
212 #define show_one(file_name, object) \
213 static ssize_t file_name##_show(struct device *dev, \
214 struct device_attribute *attr, char *buf) \
216 struct cacheinfo *this_leaf = dev_get_drvdata(dev); \
217 return sprintf(buf, "%u\n", this_leaf->object); \
220 show_one(level
, level
);
221 show_one(coherency_line_size
, coherency_line_size
);
222 show_one(number_of_sets
, number_of_sets
);
223 show_one(physical_line_partition
, physical_line_partition
);
224 show_one(ways_of_associativity
, ways_of_associativity
);
226 static ssize_t
size_show(struct device
*dev
,
227 struct device_attribute
*attr
, char *buf
)
229 struct cacheinfo
*this_leaf
= dev_get_drvdata(dev
);
231 return sprintf(buf
, "%uK\n", this_leaf
->size
>> 10);
234 static ssize_t
shared_cpumap_show_func(struct device
*dev
, bool list
, char *buf
)
236 struct cacheinfo
*this_leaf
= dev_get_drvdata(dev
);
237 const struct cpumask
*mask
= &this_leaf
->shared_cpu_map
;
239 return cpumap_print_to_pagebuf(list
, buf
, mask
);
242 static ssize_t
shared_cpu_map_show(struct device
*dev
,
243 struct device_attribute
*attr
, char *buf
)
245 return shared_cpumap_show_func(dev
, false, buf
);
248 static ssize_t
shared_cpu_list_show(struct device
*dev
,
249 struct device_attribute
*attr
, char *buf
)
251 return shared_cpumap_show_func(dev
, true, buf
);
254 static ssize_t
type_show(struct device
*dev
,
255 struct device_attribute
*attr
, char *buf
)
257 struct cacheinfo
*this_leaf
= dev_get_drvdata(dev
);
259 switch (this_leaf
->type
) {
260 case CACHE_TYPE_DATA
:
261 return sprintf(buf
, "Data\n");
262 case CACHE_TYPE_INST
:
263 return sprintf(buf
, "Instruction\n");
264 case CACHE_TYPE_UNIFIED
:
265 return sprintf(buf
, "Unified\n");
271 static ssize_t
allocation_policy_show(struct device
*dev
,
272 struct device_attribute
*attr
, char *buf
)
274 struct cacheinfo
*this_leaf
= dev_get_drvdata(dev
);
275 unsigned int ci_attr
= this_leaf
->attributes
;
278 if ((ci_attr
& CACHE_READ_ALLOCATE
) && (ci_attr
& CACHE_WRITE_ALLOCATE
))
279 n
= sprintf(buf
, "ReadWriteAllocate\n");
280 else if (ci_attr
& CACHE_READ_ALLOCATE
)
281 n
= sprintf(buf
, "ReadAllocate\n");
282 else if (ci_attr
& CACHE_WRITE_ALLOCATE
)
283 n
= sprintf(buf
, "WriteAllocate\n");
287 static ssize_t
write_policy_show(struct device
*dev
,
288 struct device_attribute
*attr
, char *buf
)
290 struct cacheinfo
*this_leaf
= dev_get_drvdata(dev
);
291 unsigned int ci_attr
= this_leaf
->attributes
;
294 if (ci_attr
& CACHE_WRITE_THROUGH
)
295 n
= sprintf(buf
, "WriteThrough\n");
296 else if (ci_attr
& CACHE_WRITE_BACK
)
297 n
= sprintf(buf
, "WriteBack\n");
301 static DEVICE_ATTR_RO(level
);
302 static DEVICE_ATTR_RO(type
);
303 static DEVICE_ATTR_RO(coherency_line_size
);
304 static DEVICE_ATTR_RO(ways_of_associativity
);
305 static DEVICE_ATTR_RO(number_of_sets
);
306 static DEVICE_ATTR_RO(size
);
307 static DEVICE_ATTR_RO(allocation_policy
);
308 static DEVICE_ATTR_RO(write_policy
);
309 static DEVICE_ATTR_RO(shared_cpu_map
);
310 static DEVICE_ATTR_RO(shared_cpu_list
);
311 static DEVICE_ATTR_RO(physical_line_partition
);
313 static struct attribute
*cache_default_attrs
[] = {
315 &dev_attr_level
.attr
,
316 &dev_attr_shared_cpu_map
.attr
,
317 &dev_attr_shared_cpu_list
.attr
,
318 &dev_attr_coherency_line_size
.attr
,
319 &dev_attr_ways_of_associativity
.attr
,
320 &dev_attr_number_of_sets
.attr
,
322 &dev_attr_allocation_policy
.attr
,
323 &dev_attr_write_policy
.attr
,
324 &dev_attr_physical_line_partition
.attr
,
329 cache_default_attrs_is_visible(struct kobject
*kobj
,
330 struct attribute
*attr
, int unused
)
332 struct device
*dev
= kobj_to_dev(kobj
);
333 struct cacheinfo
*this_leaf
= dev_get_drvdata(dev
);
334 const struct cpumask
*mask
= &this_leaf
->shared_cpu_map
;
335 umode_t mode
= attr
->mode
;
337 if ((attr
== &dev_attr_type
.attr
) && this_leaf
->type
)
339 if ((attr
== &dev_attr_level
.attr
) && this_leaf
->level
)
341 if ((attr
== &dev_attr_shared_cpu_map
.attr
) && !cpumask_empty(mask
))
343 if ((attr
== &dev_attr_shared_cpu_list
.attr
) && !cpumask_empty(mask
))
345 if ((attr
== &dev_attr_coherency_line_size
.attr
) &&
346 this_leaf
->coherency_line_size
)
348 if ((attr
== &dev_attr_ways_of_associativity
.attr
) &&
349 this_leaf
->size
) /* allow 0 = full associativity */
351 if ((attr
== &dev_attr_number_of_sets
.attr
) &&
352 this_leaf
->number_of_sets
)
354 if ((attr
== &dev_attr_size
.attr
) && this_leaf
->size
)
356 if ((attr
== &dev_attr_write_policy
.attr
) &&
357 (this_leaf
->attributes
& CACHE_WRITE_POLICY_MASK
))
359 if ((attr
== &dev_attr_allocation_policy
.attr
) &&
360 (this_leaf
->attributes
& CACHE_ALLOCATE_POLICY_MASK
))
362 if ((attr
== &dev_attr_physical_line_partition
.attr
) &&
363 this_leaf
->physical_line_partition
)
369 static const struct attribute_group cache_default_group
= {
370 .attrs
= cache_default_attrs
,
371 .is_visible
= cache_default_attrs_is_visible
,
374 static const struct attribute_group
*cache_default_groups
[] = {
375 &cache_default_group
,
379 static const struct attribute_group
*cache_private_groups
[] = {
380 &cache_default_group
,
381 NULL
, /* Place holder for private group */
385 const struct attribute_group
*
386 __weak
cache_get_priv_group(struct cacheinfo
*this_leaf
)
391 static const struct attribute_group
**
392 cache_get_attribute_groups(struct cacheinfo
*this_leaf
)
394 const struct attribute_group
*priv_group
=
395 cache_get_priv_group(this_leaf
);
398 return cache_default_groups
;
400 if (!cache_private_groups
[1])
401 cache_private_groups
[1] = priv_group
;
403 return cache_private_groups
;
406 /* Add/Remove cache interface for CPU device */
407 static void cpu_cache_sysfs_exit(unsigned int cpu
)
410 struct device
*ci_dev
;
412 if (per_cpu_index_dev(cpu
)) {
413 for (i
= 0; i
< cache_leaves(cpu
); i
++) {
414 ci_dev
= per_cache_index_dev(cpu
, i
);
417 device_unregister(ci_dev
);
419 kfree(per_cpu_index_dev(cpu
));
420 per_cpu_index_dev(cpu
) = NULL
;
422 device_unregister(per_cpu_cache_dev(cpu
));
423 per_cpu_cache_dev(cpu
) = NULL
;
426 static int cpu_cache_sysfs_init(unsigned int cpu
)
428 struct device
*dev
= get_cpu_device(cpu
);
430 if (per_cpu_cacheinfo(cpu
) == NULL
)
433 per_cpu_cache_dev(cpu
) = cpu_device_create(dev
, NULL
, NULL
, "cache");
434 if (IS_ERR(per_cpu_cache_dev(cpu
)))
435 return PTR_ERR(per_cpu_cache_dev(cpu
));
437 /* Allocate all required memory */
438 per_cpu_index_dev(cpu
) = kcalloc(cache_leaves(cpu
),
439 sizeof(struct device
*), GFP_KERNEL
);
440 if (unlikely(per_cpu_index_dev(cpu
) == NULL
))
446 cpu_cache_sysfs_exit(cpu
);
450 static int cache_add_dev(unsigned int cpu
)
454 struct device
*ci_dev
, *parent
;
455 struct cacheinfo
*this_leaf
;
456 struct cpu_cacheinfo
*this_cpu_ci
= get_cpu_cacheinfo(cpu
);
457 const struct attribute_group
**cache_groups
;
459 rc
= cpu_cache_sysfs_init(cpu
);
460 if (unlikely(rc
< 0))
463 parent
= per_cpu_cache_dev(cpu
);
464 for (i
= 0; i
< cache_leaves(cpu
); i
++) {
465 this_leaf
= this_cpu_ci
->info_list
+ i
;
466 if (this_leaf
->disable_sysfs
)
468 cache_groups
= cache_get_attribute_groups(this_leaf
);
469 ci_dev
= cpu_device_create(parent
, this_leaf
, cache_groups
,
471 if (IS_ERR(ci_dev
)) {
472 rc
= PTR_ERR(ci_dev
);
475 per_cache_index_dev(cpu
, i
) = ci_dev
;
477 cpumask_set_cpu(cpu
, &cache_dev_map
);
481 cpu_cache_sysfs_exit(cpu
);
485 static void cache_remove_dev(unsigned int cpu
)
487 if (!cpumask_test_cpu(cpu
, &cache_dev_map
))
489 cpumask_clear_cpu(cpu
, &cache_dev_map
);
491 cpu_cache_sysfs_exit(cpu
);
494 static int cacheinfo_cpu_callback(struct notifier_block
*nfb
,
495 unsigned long action
, void *hcpu
)
497 unsigned int cpu
= (unsigned long)hcpu
;
500 switch (action
& ~CPU_TASKS_FROZEN
) {
502 rc
= detect_cache_attributes(cpu
);
504 rc
= cache_add_dev(cpu
);
507 cache_remove_dev(cpu
);
508 if (per_cpu_cacheinfo(cpu
))
509 free_cache_attributes(cpu
);
512 return notifier_from_errno(rc
);
515 static int __init
cacheinfo_sysfs_init(void)
519 cpu_notifier_register_begin();
521 for_each_online_cpu(cpu
) {
522 rc
= detect_cache_attributes(cpu
);
525 rc
= cache_add_dev(cpu
);
527 free_cache_attributes(cpu
);
528 pr_err("error populating cacheinfo..cpu%d\n", cpu
);
532 __hotcpu_notifier(cacheinfo_cpu_callback
, 0);
535 cpu_notifier_register_done();
539 device_initcall(cacheinfo_sysfs_init
);