1 // SPDX-License-Identifier: GPL-2.0-only
3 * Processor cache information made available to userspace via sysfs;
4 * intended to be compatible with x86 intel_cacheinfo implementation.
6 * Copyright 2008 IBM Corporation
10 #define pr_fmt(fmt) "cacheinfo: " fmt
12 #include <linux/cpu.h>
13 #include <linux/cpumask.h>
14 #include <linux/kernel.h>
15 #include <linux/kobject.h>
16 #include <linux/list.h>
17 #include <linux/notifier.h>
19 #include <linux/percpu.h>
20 #include <linux/slab.h>
22 #include <asm/cputhreads.h>
25 #include "cacheinfo.h"
27 /* per-cpu object for tracking:
28 * - a "cache" kobject for the top-level directory
29 * - a list of "index" objects representing the cpu's local cache hierarchy
32 struct kobject
*kobj
; /* bare (not embedded) kobject for cache
34 struct cache_index_dir
*index
; /* list of index objects */
37 /* "index" object: each cpu's cache directory has an index
38 * subdirectory corresponding to a cache object associated with the
39 * cpu. This object's lifetime is managed via the embedded kobject.
41 struct cache_index_dir
{
43 struct cache_index_dir
*next
; /* next index in parent directory */
47 /* Template for determining which OF properties to query for a given
49 struct cache_type_info
{
51 const char *size_prop
;
53 /* Allow for both [di]-cache-line-size and
54 * [di]-cache-block-size properties. According to the PowerPC
55 * Processor binding, -line-size should be provided if it
56 * differs from the cache block size (that which is operated
57 * on by cache instructions), so we look for -line-size first.
58 * See cache_get_line_size(). */
60 const char *line_size_props
[2];
61 const char *nr_sets_prop
;
64 /* These are used to index the cache_type_info array. */
65 #define CACHE_TYPE_UNIFIED 0 /* cache-size, cache-block-size, etc. */
66 #define CACHE_TYPE_UNIFIED_D 1 /* d-cache-size, d-cache-block-size, etc */
67 #define CACHE_TYPE_INSTRUCTION 2
68 #define CACHE_TYPE_DATA 3
70 static const struct cache_type_info cache_type_info
[] = {
72 /* Embedded systems that use cache-size, cache-block-size,
73 * etc. for the Unified (typically L2) cache. */
75 .size_prop
= "cache-size",
76 .line_size_props
= { "cache-line-size",
77 "cache-block-size", },
78 .nr_sets_prop
= "cache-sets",
81 /* PowerPC Processor binding says the [di]-cache-*
82 * must be equal on unified caches, so just use
83 * d-cache properties. */
85 .size_prop
= "d-cache-size",
86 .line_size_props
= { "d-cache-line-size",
87 "d-cache-block-size", },
88 .nr_sets_prop
= "d-cache-sets",
91 .name
= "Instruction",
92 .size_prop
= "i-cache-size",
93 .line_size_props
= { "i-cache-line-size",
94 "i-cache-block-size", },
95 .nr_sets_prop
= "i-cache-sets",
99 .size_prop
= "d-cache-size",
100 .line_size_props
= { "d-cache-line-size",
101 "d-cache-block-size", },
102 .nr_sets_prop
= "d-cache-sets",
106 /* Cache object: each instance of this corresponds to a distinct cache
107 * in the system. There are separate objects for Harvard caches: one
108 * each for instruction and data, and each refers to the same OF node.
109 * The refcount of the OF node is elevated for the lifetime of the
110 * cache object. A cache object is released when its shared_cpu_map
111 * is cleared (see cache_cpu_clear).
113 * A cache object is on two lists: an unsorted global list
114 * (cache_list) of cache objects; and a singly-linked list
115 * representing the local cache hierarchy, which is ordered by level
116 * (e.g. L1d -> L1i -> L2 -> L3).
119 struct device_node
*ofnode
; /* OF node for this cache, may be cpu */
120 struct cpumask shared_cpu_map
; /* online CPUs using this cache */
121 int type
; /* split cache disambiguation */
122 int level
; /* level not explicit in device tree */
123 struct list_head list
; /* global list of cache objects */
124 struct cache
*next_local
; /* next cache of >= level */
127 static DEFINE_PER_CPU(struct cache_dir
*, cache_dir_pcpu
);
129 /* traversal/modification of this list occurs only at cpu hotplug time;
130 * access is serialized by cpu hotplug locking
132 static LIST_HEAD(cache_list
);
134 static struct cache_index_dir
*kobj_to_cache_index_dir(struct kobject
*k
)
136 return container_of(k
, struct cache_index_dir
, kobj
);
139 static const char *cache_type_string(const struct cache
*cache
)
141 return cache_type_info
[cache
->type
].name
;
144 static void cache_init(struct cache
*cache
, int type
, int level
,
145 struct device_node
*ofnode
)
148 cache
->level
= level
;
149 cache
->ofnode
= of_node_get(ofnode
);
150 INIT_LIST_HEAD(&cache
->list
);
151 list_add(&cache
->list
, &cache_list
);
154 static struct cache
*new_cache(int type
, int level
, struct device_node
*ofnode
)
158 cache
= kzalloc(sizeof(*cache
), GFP_KERNEL
);
160 cache_init(cache
, type
, level
, ofnode
);
165 static void release_cache_debugcheck(struct cache
*cache
)
169 list_for_each_entry(iter
, &cache_list
, list
)
170 WARN_ONCE(iter
->next_local
== cache
,
171 "cache for %pOFP(%s) refers to cache for %pOFP(%s)\n",
173 cache_type_string(iter
),
175 cache_type_string(cache
));
178 static void release_cache(struct cache
*cache
)
183 pr_debug("freeing L%d %s cache for %pOFP\n", cache
->level
,
184 cache_type_string(cache
), cache
->ofnode
);
186 release_cache_debugcheck(cache
);
187 list_del(&cache
->list
);
188 of_node_put(cache
->ofnode
);
192 static void cache_cpu_set(struct cache
*cache
, int cpu
)
194 struct cache
*next
= cache
;
197 WARN_ONCE(cpumask_test_cpu(cpu
, &next
->shared_cpu_map
),
198 "CPU %i already accounted in %pOFP(%s)\n",
200 cache_type_string(next
));
201 cpumask_set_cpu(cpu
, &next
->shared_cpu_map
);
202 next
= next
->next_local
;
206 static int cache_size(const struct cache
*cache
, unsigned int *ret
)
208 const char *propname
;
209 const __be32
*cache_size
;
211 propname
= cache_type_info
[cache
->type
].size_prop
;
213 cache_size
= of_get_property(cache
->ofnode
, propname
, NULL
);
217 *ret
= of_read_number(cache_size
, 1);
221 static int cache_size_kb(const struct cache
*cache
, unsigned int *ret
)
225 if (cache_size(cache
, &size
))
232 /* not cache_line_size() because that's a macro in include/linux/cache.h */
233 static int cache_get_line_size(const struct cache
*cache
, unsigned int *ret
)
235 const __be32
*line_size
;
238 lim
= ARRAY_SIZE(cache_type_info
[cache
->type
].line_size_props
);
240 for (i
= 0; i
< lim
; i
++) {
241 const char *propname
;
243 propname
= cache_type_info
[cache
->type
].line_size_props
[i
];
244 line_size
= of_get_property(cache
->ofnode
, propname
, NULL
);
252 *ret
= of_read_number(line_size
, 1);
256 static int cache_nr_sets(const struct cache
*cache
, unsigned int *ret
)
258 const char *propname
;
259 const __be32
*nr_sets
;
261 propname
= cache_type_info
[cache
->type
].nr_sets_prop
;
263 nr_sets
= of_get_property(cache
->ofnode
, propname
, NULL
);
267 *ret
= of_read_number(nr_sets
, 1);
271 static int cache_associativity(const struct cache
*cache
, unsigned int *ret
)
273 unsigned int line_size
;
274 unsigned int nr_sets
;
277 if (cache_nr_sets(cache
, &nr_sets
))
280 /* If the cache is fully associative, there is no need to
281 * check the other properties.
288 if (cache_get_line_size(cache
, &line_size
))
290 if (cache_size(cache
, &size
))
293 if (!(nr_sets
> 0 && size
> 0 && line_size
> 0))
296 *ret
= (size
/ nr_sets
) / line_size
;
302 /* helper for dealing with split caches */
303 static struct cache
*cache_find_first_sibling(struct cache
*cache
)
307 if (cache
->type
== CACHE_TYPE_UNIFIED
||
308 cache
->type
== CACHE_TYPE_UNIFIED_D
)
311 list_for_each_entry(iter
, &cache_list
, list
)
312 if (iter
->ofnode
== cache
->ofnode
&& iter
->next_local
== cache
)
318 /* return the first cache on a local list matching node */
319 static struct cache
*cache_lookup_by_node(const struct device_node
*node
)
321 struct cache
*cache
= NULL
;
324 list_for_each_entry(iter
, &cache_list
, list
) {
325 if (iter
->ofnode
!= node
)
327 cache
= cache_find_first_sibling(iter
);
334 static bool cache_node_is_unified(const struct device_node
*np
)
336 return of_get_property(np
, "cache-unified", NULL
);
340 * Unified caches can have two different sets of tags. Most embedded
341 * use cache-size, etc. for the unified cache size, but open firmware systems
342 * use d-cache-size, etc. Check on initialization for which type we have, and
343 * return the appropriate structure type. Assume it's embedded if it isn't
344 * open firmware. If it's yet a 3rd type, then there will be missing entries
345 * in /sys/devices/system/cpu/cpu0/cache/index2/, and this code will need
346 * to be extended further.
348 static int cache_is_unified_d(const struct device_node
*np
)
350 return of_get_property(np
,
351 cache_type_info
[CACHE_TYPE_UNIFIED_D
].size_prop
, NULL
) ?
352 CACHE_TYPE_UNIFIED_D
: CACHE_TYPE_UNIFIED
;
355 static struct cache
*cache_do_one_devnode_unified(struct device_node
*node
, int level
)
357 pr_debug("creating L%d ucache for %pOFP\n", level
, node
);
359 return new_cache(cache_is_unified_d(node
), level
, node
);
362 static struct cache
*cache_do_one_devnode_split(struct device_node
*node
,
365 struct cache
*dcache
, *icache
;
367 pr_debug("creating L%d dcache and icache for %pOFP\n", level
,
370 dcache
= new_cache(CACHE_TYPE_DATA
, level
, node
);
371 icache
= new_cache(CACHE_TYPE_INSTRUCTION
, level
, node
);
373 if (!dcache
|| !icache
)
376 dcache
->next_local
= icache
;
380 release_cache(dcache
);
381 release_cache(icache
);
385 static struct cache
*cache_do_one_devnode(struct device_node
*node
, int level
)
389 if (cache_node_is_unified(node
))
390 cache
= cache_do_one_devnode_unified(node
, level
);
392 cache
= cache_do_one_devnode_split(node
, level
);
397 static struct cache
*cache_lookup_or_instantiate(struct device_node
*node
,
402 cache
= cache_lookup_by_node(node
);
404 WARN_ONCE(cache
&& cache
->level
!= level
,
405 "cache level mismatch on lookup (got %d, expected %d)\n",
406 cache
->level
, level
);
409 cache
= cache_do_one_devnode(node
, level
);
414 static void link_cache_lists(struct cache
*smaller
, struct cache
*bigger
)
416 while (smaller
->next_local
) {
417 if (smaller
->next_local
== bigger
)
418 return; /* already linked */
419 smaller
= smaller
->next_local
;
422 smaller
->next_local
= bigger
;
425 * The cache->next_local list sorts by level ascending:
426 * L1d -> L1i -> L2 -> L3 ...
428 WARN_ONCE((smaller
->level
== 1 && bigger
->level
> 2) ||
429 (smaller
->level
> 1 && bigger
->level
!= smaller
->level
+ 1),
430 "linking L%i cache %pOFP to L%i cache %pOFP; skipped a level?\n",
431 smaller
->level
, smaller
->ofnode
, bigger
->level
, bigger
->ofnode
);
434 static void do_subsidiary_caches_debugcheck(struct cache
*cache
)
436 WARN_ONCE(cache
->level
!= 1,
437 "instantiating cache chain from L%d %s cache for "
438 "%pOFP instead of an L1\n", cache
->level
,
439 cache_type_string(cache
), cache
->ofnode
);
440 WARN_ONCE(!of_node_is_type(cache
->ofnode
, "cpu"),
441 "instantiating cache chain from node %pOFP of type '%s' "
442 "instead of a cpu node\n", cache
->ofnode
,
443 of_node_get_device_type(cache
->ofnode
));
446 static void do_subsidiary_caches(struct cache
*cache
)
448 struct device_node
*subcache_node
;
449 int level
= cache
->level
;
451 do_subsidiary_caches_debugcheck(cache
);
453 while ((subcache_node
= of_find_next_cache_node(cache
->ofnode
))) {
454 struct cache
*subcache
;
457 subcache
= cache_lookup_or_instantiate(subcache_node
, level
);
458 of_node_put(subcache_node
);
462 link_cache_lists(cache
, subcache
);
467 static struct cache
*cache_chain_instantiate(unsigned int cpu_id
)
469 struct device_node
*cpu_node
;
470 struct cache
*cpu_cache
= NULL
;
472 pr_debug("creating cache object(s) for CPU %i\n", cpu_id
);
474 cpu_node
= of_get_cpu_node(cpu_id
, NULL
);
475 WARN_ONCE(!cpu_node
, "no OF node found for CPU %i\n", cpu_id
);
479 cpu_cache
= cache_lookup_or_instantiate(cpu_node
, 1);
483 do_subsidiary_caches(cpu_cache
);
485 cache_cpu_set(cpu_cache
, cpu_id
);
487 of_node_put(cpu_node
);
492 static struct cache_dir
*cacheinfo_create_cache_dir(unsigned int cpu_id
)
494 struct cache_dir
*cache_dir
;
496 struct kobject
*kobj
= NULL
;
498 dev
= get_cpu_device(cpu_id
);
499 WARN_ONCE(!dev
, "no dev for CPU %i\n", cpu_id
);
503 kobj
= kobject_create_and_add("cache", &dev
->kobj
);
507 cache_dir
= kzalloc(sizeof(*cache_dir
), GFP_KERNEL
);
511 cache_dir
->kobj
= kobj
;
513 WARN_ON_ONCE(per_cpu(cache_dir_pcpu
, cpu_id
) != NULL
);
515 per_cpu(cache_dir_pcpu
, cpu_id
) = cache_dir
;
523 static void cache_index_release(struct kobject
*kobj
)
525 struct cache_index_dir
*index
;
527 index
= kobj_to_cache_index_dir(kobj
);
529 pr_debug("freeing index directory for L%d %s cache\n",
530 index
->cache
->level
, cache_type_string(index
->cache
));
535 static ssize_t
cache_index_show(struct kobject
*k
, struct attribute
*attr
, char *buf
)
537 struct kobj_attribute
*kobj_attr
;
539 kobj_attr
= container_of(attr
, struct kobj_attribute
, attr
);
541 return kobj_attr
->show(k
, kobj_attr
, buf
);
544 static struct cache
*index_kobj_to_cache(struct kobject
*k
)
546 struct cache_index_dir
*index
;
548 index
= kobj_to_cache_index_dir(k
);
553 static ssize_t
size_show(struct kobject
*k
, struct kobj_attribute
*attr
, char *buf
)
555 unsigned int size_kb
;
558 cache
= index_kobj_to_cache(k
);
560 if (cache_size_kb(cache
, &size_kb
))
563 return sprintf(buf
, "%uK\n", size_kb
);
566 static struct kobj_attribute cache_size_attr
=
567 __ATTR(size
, 0444, size_show
, NULL
);
570 static ssize_t
line_size_show(struct kobject
*k
, struct kobj_attribute
*attr
, char *buf
)
572 unsigned int line_size
;
575 cache
= index_kobj_to_cache(k
);
577 if (cache_get_line_size(cache
, &line_size
))
580 return sprintf(buf
, "%u\n", line_size
);
583 static struct kobj_attribute cache_line_size_attr
=
584 __ATTR(coherency_line_size
, 0444, line_size_show
, NULL
);
586 static ssize_t
nr_sets_show(struct kobject
*k
, struct kobj_attribute
*attr
, char *buf
)
588 unsigned int nr_sets
;
591 cache
= index_kobj_to_cache(k
);
593 if (cache_nr_sets(cache
, &nr_sets
))
596 return sprintf(buf
, "%u\n", nr_sets
);
599 static struct kobj_attribute cache_nr_sets_attr
=
600 __ATTR(number_of_sets
, 0444, nr_sets_show
, NULL
);
602 static ssize_t
associativity_show(struct kobject
*k
, struct kobj_attribute
*attr
, char *buf
)
604 unsigned int associativity
;
607 cache
= index_kobj_to_cache(k
);
609 if (cache_associativity(cache
, &associativity
))
612 return sprintf(buf
, "%u\n", associativity
);
615 static struct kobj_attribute cache_assoc_attr
=
616 __ATTR(ways_of_associativity
, 0444, associativity_show
, NULL
);
618 static ssize_t
type_show(struct kobject
*k
, struct kobj_attribute
*attr
, char *buf
)
622 cache
= index_kobj_to_cache(k
);
624 return sprintf(buf
, "%s\n", cache_type_string(cache
));
627 static struct kobj_attribute cache_type_attr
=
628 __ATTR(type
, 0444, type_show
, NULL
);
630 static ssize_t
level_show(struct kobject
*k
, struct kobj_attribute
*attr
, char *buf
)
632 struct cache_index_dir
*index
;
635 index
= kobj_to_cache_index_dir(k
);
636 cache
= index
->cache
;
638 return sprintf(buf
, "%d\n", cache
->level
);
641 static struct kobj_attribute cache_level_attr
=
642 __ATTR(level
, 0444, level_show
, NULL
);
644 static unsigned int index_dir_to_cpu(struct cache_index_dir
*index
)
646 struct kobject
*index_dir_kobj
= &index
->kobj
;
647 struct kobject
*cache_dir_kobj
= index_dir_kobj
->parent
;
648 struct kobject
*cpu_dev_kobj
= cache_dir_kobj
->parent
;
649 struct device
*dev
= kobj_to_dev(cpu_dev_kobj
);
655 * On big-core systems, each core has two groups of CPUs each of which
656 * has its own L1-cache. The thread-siblings which share l1-cache with
657 * @cpu can be obtained via cpu_smallcore_mask().
659 * On some big-core systems, the L2 cache is shared only between some
660 * groups of siblings. This is already parsed and encoded in
661 * cpu_l2_cache_mask().
663 * TODO: cache_lookup_or_instantiate() needs to be made aware of the
664 * "ibm,thread-groups" property so that cache->shared_cpu_map
665 * reflects the correct siblings on platforms that have this
666 * device-tree property. This helper function is only a stop-gap
667 * solution so that we report the correct siblings to the
668 * userspace via sysfs.
670 static const struct cpumask
*get_shared_cpu_map(struct cache_index_dir
*index
, struct cache
*cache
)
673 int cpu
= index_dir_to_cpu(index
);
674 if (cache
->level
== 1)
675 return cpu_smallcore_mask(cpu
);
676 if (cache
->level
== 2 && thread_group_shares_l2
)
677 return cpu_l2_cache_mask(cpu
);
680 return &cache
->shared_cpu_map
;
684 show_shared_cpumap(struct kobject
*k
, struct kobj_attribute
*attr
, char *buf
, bool list
)
686 struct cache_index_dir
*index
;
688 const struct cpumask
*mask
;
690 index
= kobj_to_cache_index_dir(k
);
691 cache
= index
->cache
;
693 mask
= get_shared_cpu_map(index
, cache
);
695 return cpumap_print_to_pagebuf(list
, buf
, mask
);
698 static ssize_t
shared_cpu_map_show(struct kobject
*k
, struct kobj_attribute
*attr
, char *buf
)
700 return show_shared_cpumap(k
, attr
, buf
, false);
703 static ssize_t
shared_cpu_list_show(struct kobject
*k
, struct kobj_attribute
*attr
, char *buf
)
705 return show_shared_cpumap(k
, attr
, buf
, true);
708 static struct kobj_attribute cache_shared_cpu_map_attr
=
709 __ATTR(shared_cpu_map
, 0444, shared_cpu_map_show
, NULL
);
711 static struct kobj_attribute cache_shared_cpu_list_attr
=
712 __ATTR(shared_cpu_list
, 0444, shared_cpu_list_show
, NULL
);
714 /* Attributes which should always be created -- the kobject/sysfs core
715 * does this automatically via kobj_type->default_attrs. This is the
716 * minimum data required to uniquely identify a cache.
718 static struct attribute
*cache_index_default_attrs
[] = {
719 &cache_type_attr
.attr
,
720 &cache_level_attr
.attr
,
721 &cache_shared_cpu_map_attr
.attr
,
722 &cache_shared_cpu_list_attr
.attr
,
726 /* Attributes which should be created if the cache device node has the
727 * right properties -- see cacheinfo_create_index_opt_attrs
729 static struct kobj_attribute
*cache_index_opt_attrs
[] = {
731 &cache_line_size_attr
,
736 static const struct sysfs_ops cache_index_ops
= {
737 .show
= cache_index_show
,
740 static struct kobj_type cache_index_type
= {
741 .release
= cache_index_release
,
742 .sysfs_ops
= &cache_index_ops
,
743 .default_attrs
= cache_index_default_attrs
,
746 static void cacheinfo_create_index_opt_attrs(struct cache_index_dir
*dir
)
748 const char *cache_type
;
753 buf
= kmalloc(PAGE_SIZE
, GFP_KERNEL
);
758 cache_type
= cache_type_string(cache
);
760 /* We don't want to create an attribute that can't provide a
761 * meaningful value. Check the return value of each optional
762 * attribute's ->show method before registering the
765 for (i
= 0; i
< ARRAY_SIZE(cache_index_opt_attrs
); i
++) {
766 struct kobj_attribute
*attr
;
769 attr
= cache_index_opt_attrs
[i
];
771 rc
= attr
->show(&dir
->kobj
, attr
, buf
);
773 pr_debug("not creating %s attribute for "
774 "%pOFP(%s) (rc = %zd)\n",
775 attr
->attr
.name
, cache
->ofnode
,
779 if (sysfs_create_file(&dir
->kobj
, &attr
->attr
))
780 pr_debug("could not create %s attribute for %pOFP(%s)\n",
781 attr
->attr
.name
, cache
->ofnode
, cache_type
);
787 static void cacheinfo_create_index_dir(struct cache
*cache
, int index
,
788 struct cache_dir
*cache_dir
)
790 struct cache_index_dir
*index_dir
;
793 index_dir
= kzalloc(sizeof(*index_dir
), GFP_KERNEL
);
797 index_dir
->cache
= cache
;
799 rc
= kobject_init_and_add(&index_dir
->kobj
, &cache_index_type
,
800 cache_dir
->kobj
, "index%d", index
);
802 kobject_put(&index_dir
->kobj
);
806 index_dir
->next
= cache_dir
->index
;
807 cache_dir
->index
= index_dir
;
809 cacheinfo_create_index_opt_attrs(index_dir
);
812 static void cacheinfo_sysfs_populate(unsigned int cpu_id
,
813 struct cache
*cache_list
)
815 struct cache_dir
*cache_dir
;
819 cache_dir
= cacheinfo_create_cache_dir(cpu_id
);
825 cacheinfo_create_index_dir(cache
, index
, cache_dir
);
827 cache
= cache
->next_local
;
831 void cacheinfo_cpu_online(unsigned int cpu_id
)
835 cache
= cache_chain_instantiate(cpu_id
);
839 cacheinfo_sysfs_populate(cpu_id
, cache
);
842 /* functions needed to remove cache entry for cpu offline or suspend/resume */
844 #if (defined(CONFIG_PPC_PSERIES) && defined(CONFIG_SUSPEND)) || \
845 defined(CONFIG_HOTPLUG_CPU)
847 static struct cache
*cache_lookup_by_cpu(unsigned int cpu_id
)
849 struct device_node
*cpu_node
;
852 cpu_node
= of_get_cpu_node(cpu_id
, NULL
);
853 WARN_ONCE(!cpu_node
, "no OF node found for CPU %i\n", cpu_id
);
857 cache
= cache_lookup_by_node(cpu_node
);
858 of_node_put(cpu_node
);
863 static void remove_index_dirs(struct cache_dir
*cache_dir
)
865 struct cache_index_dir
*index
;
867 index
= cache_dir
->index
;
870 struct cache_index_dir
*next
;
873 kobject_put(&index
->kobj
);
878 static void remove_cache_dir(struct cache_dir
*cache_dir
)
880 remove_index_dirs(cache_dir
);
882 /* Remove cache dir from sysfs */
883 kobject_del(cache_dir
->kobj
);
885 kobject_put(cache_dir
->kobj
);
890 static void cache_cpu_clear(struct cache
*cache
, int cpu
)
893 struct cache
*next
= cache
->next_local
;
895 WARN_ONCE(!cpumask_test_cpu(cpu
, &cache
->shared_cpu_map
),
896 "CPU %i not accounted in %pOFP(%s)\n",
898 cache_type_string(cache
));
900 cpumask_clear_cpu(cpu
, &cache
->shared_cpu_map
);
902 /* Release the cache object if all the cpus using it
904 if (cpumask_empty(&cache
->shared_cpu_map
))
905 release_cache(cache
);
911 void cacheinfo_cpu_offline(unsigned int cpu_id
)
913 struct cache_dir
*cache_dir
;
916 /* Prevent userspace from seeing inconsistent state - remove
917 * the sysfs hierarchy first */
918 cache_dir
= per_cpu(cache_dir_pcpu
, cpu_id
);
920 /* careful, sysfs population may have failed */
922 remove_cache_dir(cache_dir
);
924 per_cpu(cache_dir_pcpu
, cpu_id
) = NULL
;
926 /* clear the CPU's bit in its cache chain, possibly freeing
928 cache
= cache_lookup_by_cpu(cpu_id
);
930 cache_cpu_clear(cache
, cpu_id
);
933 void cacheinfo_teardown(void)
937 lockdep_assert_cpus_held();
939 for_each_online_cpu(cpu
)
940 cacheinfo_cpu_offline(cpu
);
943 void cacheinfo_rebuild(void)
947 lockdep_assert_cpus_held();
949 for_each_online_cpu(cpu
)
950 cacheinfo_cpu_online(cpu
);
953 #endif /* (CONFIG_PPC_PSERIES && CONFIG_SUSPEND) || CONFIG_HOTPLUG_CPU */