Merge tag 'v3.3.7' into 3.3/master
[zen-stable.git] / arch / powerpc / kernel / cacheinfo.c
blob92c6b008dd2b6ea28514318b5d51edf8423c96db
1 /*
2 * Processor cache information made available to userspace via sysfs;
3 * intended to be compatible with x86 intel_cacheinfo implementation.
5 * Copyright 2008 IBM Corporation
6 * Author: Nathan Lynch
8 * This program is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU General Public License version
10 * 2 as published by the Free Software Foundation.
13 #include <linux/cpu.h>
14 #include <linux/cpumask.h>
15 #include <linux/init.h>
16 #include <linux/kernel.h>
17 #include <linux/kobject.h>
18 #include <linux/list.h>
19 #include <linux/notifier.h>
20 #include <linux/of.h>
21 #include <linux/percpu.h>
22 #include <linux/slab.h>
23 #include <asm/prom.h>
25 #include "cacheinfo.h"
27 /* per-cpu object for tracking:
28 * - a "cache" kobject for the top-level directory
29 * - a list of "index" objects representing the cpu's local cache hierarchy
31 struct cache_dir {
32 struct kobject *kobj; /* bare (not embedded) kobject for cache
33 * directory */
34 struct cache_index_dir *index; /* list of index objects */
37 /* "index" object: each cpu's cache directory has an index
38 * subdirectory corresponding to a cache object associated with the
39 * cpu. This object's lifetime is managed via the embedded kobject.
41 struct cache_index_dir {
42 struct kobject kobj;
43 struct cache_index_dir *next; /* next index in parent directory */
44 struct cache *cache;
47 /* Template for determining which OF properties to query for a given
48 * cache type */
49 struct cache_type_info {
50 const char *name;
51 const char *size_prop;
53 /* Allow for both [di]-cache-line-size and
54 * [di]-cache-block-size properties. According to the PowerPC
55 * Processor binding, -line-size should be provided if it
56 * differs from the cache block size (that which is operated
57 * on by cache instructions), so we look for -line-size first.
58 * See cache_get_line_size(). */
60 const char *line_size_props[2];
61 const char *nr_sets_prop;
64 /* These are used to index the cache_type_info array. */
65 #define CACHE_TYPE_UNIFIED 0
66 #define CACHE_TYPE_INSTRUCTION 1
67 #define CACHE_TYPE_DATA 2
69 static const struct cache_type_info cache_type_info[] = {
71 /* PowerPC Processor binding says the [di]-cache-*
72 * must be equal on unified caches, so just use
73 * d-cache properties. */
74 .name = "Unified",
75 .size_prop = "d-cache-size",
76 .line_size_props = { "d-cache-line-size",
77 "d-cache-block-size", },
78 .nr_sets_prop = "d-cache-sets",
81 .name = "Instruction",
82 .size_prop = "i-cache-size",
83 .line_size_props = { "i-cache-line-size",
84 "i-cache-block-size", },
85 .nr_sets_prop = "i-cache-sets",
88 .name = "Data",
89 .size_prop = "d-cache-size",
90 .line_size_props = { "d-cache-line-size",
91 "d-cache-block-size", },
92 .nr_sets_prop = "d-cache-sets",
96 /* Cache object: each instance of this corresponds to a distinct cache
97 * in the system. There are separate objects for Harvard caches: one
98 * each for instruction and data, and each refers to the same OF node.
99 * The refcount of the OF node is elevated for the lifetime of the
100 * cache object. A cache object is released when its shared_cpu_map
101 * is cleared (see cache_cpu_clear).
103 * A cache object is on two lists: an unsorted global list
104 * (cache_list) of cache objects; and a singly-linked list
105 * representing the local cache hierarchy, which is ordered by level
106 * (e.g. L1d -> L1i -> L2 -> L3).
108 struct cache {
109 struct device_node *ofnode; /* OF node for this cache, may be cpu */
110 struct cpumask shared_cpu_map; /* online CPUs using this cache */
111 int type; /* split cache disambiguation */
112 int level; /* level not explicit in device tree */
113 struct list_head list; /* global list of cache objects */
114 struct cache *next_local; /* next cache of >= level */
117 static DEFINE_PER_CPU(struct cache_dir *, cache_dir_pcpu);
119 /* traversal/modification of this list occurs only at cpu hotplug time;
120 * access is serialized by cpu hotplug locking
122 static LIST_HEAD(cache_list);
124 static struct cache_index_dir *kobj_to_cache_index_dir(struct kobject *k)
126 return container_of(k, struct cache_index_dir, kobj);
129 static const char *cache_type_string(const struct cache *cache)
131 return cache_type_info[cache->type].name;
134 static void __cpuinit cache_init(struct cache *cache, int type, int level, struct device_node *ofnode)
136 cache->type = type;
137 cache->level = level;
138 cache->ofnode = of_node_get(ofnode);
139 INIT_LIST_HEAD(&cache->list);
140 list_add(&cache->list, &cache_list);
143 static struct cache *__cpuinit new_cache(int type, int level, struct device_node *ofnode)
145 struct cache *cache;
147 cache = kzalloc(sizeof(*cache), GFP_KERNEL);
148 if (cache)
149 cache_init(cache, type, level, ofnode);
151 return cache;
154 static void release_cache_debugcheck(struct cache *cache)
156 struct cache *iter;
158 list_for_each_entry(iter, &cache_list, list)
159 WARN_ONCE(iter->next_local == cache,
160 "cache for %s(%s) refers to cache for %s(%s)\n",
161 iter->ofnode->full_name,
162 cache_type_string(iter),
163 cache->ofnode->full_name,
164 cache_type_string(cache));
167 static void release_cache(struct cache *cache)
169 if (!cache)
170 return;
172 pr_debug("freeing L%d %s cache for %s\n", cache->level,
173 cache_type_string(cache), cache->ofnode->full_name);
175 release_cache_debugcheck(cache);
176 list_del(&cache->list);
177 of_node_put(cache->ofnode);
178 kfree(cache);
181 static void cache_cpu_set(struct cache *cache, int cpu)
183 struct cache *next = cache;
185 while (next) {
186 WARN_ONCE(cpumask_test_cpu(cpu, &next->shared_cpu_map),
187 "CPU %i already accounted in %s(%s)\n",
188 cpu, next->ofnode->full_name,
189 cache_type_string(next));
190 cpumask_set_cpu(cpu, &next->shared_cpu_map);
191 next = next->next_local;
195 static int cache_size(const struct cache *cache, unsigned int *ret)
197 const char *propname;
198 const u32 *cache_size;
200 propname = cache_type_info[cache->type].size_prop;
202 cache_size = of_get_property(cache->ofnode, propname, NULL);
203 if (!cache_size)
204 return -ENODEV;
206 *ret = *cache_size;
207 return 0;
210 static int cache_size_kb(const struct cache *cache, unsigned int *ret)
212 unsigned int size;
214 if (cache_size(cache, &size))
215 return -ENODEV;
217 *ret = size / 1024;
218 return 0;
221 /* not cache_line_size() because that's a macro in include/linux/cache.h */
222 static int cache_get_line_size(const struct cache *cache, unsigned int *ret)
224 const u32 *line_size;
225 int i, lim;
227 lim = ARRAY_SIZE(cache_type_info[cache->type].line_size_props);
229 for (i = 0; i < lim; i++) {
230 const char *propname;
232 propname = cache_type_info[cache->type].line_size_props[i];
233 line_size = of_get_property(cache->ofnode, propname, NULL);
234 if (line_size)
235 break;
238 if (!line_size)
239 return -ENODEV;
241 *ret = *line_size;
242 return 0;
245 static int cache_nr_sets(const struct cache *cache, unsigned int *ret)
247 const char *propname;
248 const u32 *nr_sets;
250 propname = cache_type_info[cache->type].nr_sets_prop;
252 nr_sets = of_get_property(cache->ofnode, propname, NULL);
253 if (!nr_sets)
254 return -ENODEV;
256 *ret = *nr_sets;
257 return 0;
260 static int cache_associativity(const struct cache *cache, unsigned int *ret)
262 unsigned int line_size;
263 unsigned int nr_sets;
264 unsigned int size;
266 if (cache_nr_sets(cache, &nr_sets))
267 goto err;
269 /* If the cache is fully associative, there is no need to
270 * check the other properties.
272 if (nr_sets == 1) {
273 *ret = 0;
274 return 0;
277 if (cache_get_line_size(cache, &line_size))
278 goto err;
279 if (cache_size(cache, &size))
280 goto err;
282 if (!(nr_sets > 0 && size > 0 && line_size > 0))
283 goto err;
285 *ret = (size / nr_sets) / line_size;
286 return 0;
287 err:
288 return -ENODEV;
291 /* helper for dealing with split caches */
292 static struct cache *cache_find_first_sibling(struct cache *cache)
294 struct cache *iter;
296 if (cache->type == CACHE_TYPE_UNIFIED)
297 return cache;
299 list_for_each_entry(iter, &cache_list, list)
300 if (iter->ofnode == cache->ofnode && iter->next_local == cache)
301 return iter;
303 return cache;
306 /* return the first cache on a local list matching node */
307 static struct cache *cache_lookup_by_node(const struct device_node *node)
309 struct cache *cache = NULL;
310 struct cache *iter;
312 list_for_each_entry(iter, &cache_list, list) {
313 if (iter->ofnode != node)
314 continue;
315 cache = cache_find_first_sibling(iter);
316 break;
319 return cache;
322 static bool cache_node_is_unified(const struct device_node *np)
324 return of_get_property(np, "cache-unified", NULL);
327 static struct cache *__cpuinit cache_do_one_devnode_unified(struct device_node *node, int level)
329 struct cache *cache;
331 pr_debug("creating L%d ucache for %s\n", level, node->full_name);
333 cache = new_cache(CACHE_TYPE_UNIFIED, level, node);
335 return cache;
338 static struct cache *__cpuinit cache_do_one_devnode_split(struct device_node *node, int level)
340 struct cache *dcache, *icache;
342 pr_debug("creating L%d dcache and icache for %s\n", level,
343 node->full_name);
345 dcache = new_cache(CACHE_TYPE_DATA, level, node);
346 icache = new_cache(CACHE_TYPE_INSTRUCTION, level, node);
348 if (!dcache || !icache)
349 goto err;
351 dcache->next_local = icache;
353 return dcache;
354 err:
355 release_cache(dcache);
356 release_cache(icache);
357 return NULL;
360 static struct cache *__cpuinit cache_do_one_devnode(struct device_node *node, int level)
362 struct cache *cache;
364 if (cache_node_is_unified(node))
365 cache = cache_do_one_devnode_unified(node, level);
366 else
367 cache = cache_do_one_devnode_split(node, level);
369 return cache;
372 static struct cache *__cpuinit cache_lookup_or_instantiate(struct device_node *node, int level)
374 struct cache *cache;
376 cache = cache_lookup_by_node(node);
378 WARN_ONCE(cache && cache->level != level,
379 "cache level mismatch on lookup (got %d, expected %d)\n",
380 cache->level, level);
382 if (!cache)
383 cache = cache_do_one_devnode(node, level);
385 return cache;
388 static void __cpuinit link_cache_lists(struct cache *smaller, struct cache *bigger)
390 while (smaller->next_local) {
391 if (smaller->next_local == bigger)
392 return; /* already linked */
393 smaller = smaller->next_local;
396 smaller->next_local = bigger;
399 static void __cpuinit do_subsidiary_caches_debugcheck(struct cache *cache)
401 WARN_ON_ONCE(cache->level != 1);
402 WARN_ON_ONCE(strcmp(cache->ofnode->type, "cpu"));
405 static void __cpuinit do_subsidiary_caches(struct cache *cache)
407 struct device_node *subcache_node;
408 int level = cache->level;
410 do_subsidiary_caches_debugcheck(cache);
412 while ((subcache_node = of_find_next_cache_node(cache->ofnode))) {
413 struct cache *subcache;
415 level++;
416 subcache = cache_lookup_or_instantiate(subcache_node, level);
417 of_node_put(subcache_node);
418 if (!subcache)
419 break;
421 link_cache_lists(cache, subcache);
422 cache = subcache;
426 static struct cache *__cpuinit cache_chain_instantiate(unsigned int cpu_id)
428 struct device_node *cpu_node;
429 struct cache *cpu_cache = NULL;
431 pr_debug("creating cache object(s) for CPU %i\n", cpu_id);
433 cpu_node = of_get_cpu_node(cpu_id, NULL);
434 WARN_ONCE(!cpu_node, "no OF node found for CPU %i\n", cpu_id);
435 if (!cpu_node)
436 goto out;
438 cpu_cache = cache_lookup_or_instantiate(cpu_node, 1);
439 if (!cpu_cache)
440 goto out;
442 do_subsidiary_caches(cpu_cache);
444 cache_cpu_set(cpu_cache, cpu_id);
445 out:
446 of_node_put(cpu_node);
448 return cpu_cache;
451 static struct cache_dir *__cpuinit cacheinfo_create_cache_dir(unsigned int cpu_id)
453 struct cache_dir *cache_dir;
454 struct device *dev;
455 struct kobject *kobj = NULL;
457 dev = get_cpu_device(cpu_id);
458 WARN_ONCE(!dev, "no dev for CPU %i\n", cpu_id);
459 if (!dev)
460 goto err;
462 kobj = kobject_create_and_add("cache", &dev->kobj);
463 if (!kobj)
464 goto err;
466 cache_dir = kzalloc(sizeof(*cache_dir), GFP_KERNEL);
467 if (!cache_dir)
468 goto err;
470 cache_dir->kobj = kobj;
472 WARN_ON_ONCE(per_cpu(cache_dir_pcpu, cpu_id) != NULL);
474 per_cpu(cache_dir_pcpu, cpu_id) = cache_dir;
476 return cache_dir;
477 err:
478 kobject_put(kobj);
479 return NULL;
482 static void cache_index_release(struct kobject *kobj)
484 struct cache_index_dir *index;
486 index = kobj_to_cache_index_dir(kobj);
488 pr_debug("freeing index directory for L%d %s cache\n",
489 index->cache->level, cache_type_string(index->cache));
491 kfree(index);
494 static ssize_t cache_index_show(struct kobject *k, struct attribute *attr, char *buf)
496 struct kobj_attribute *kobj_attr;
498 kobj_attr = container_of(attr, struct kobj_attribute, attr);
500 return kobj_attr->show(k, kobj_attr, buf);
503 static struct cache *index_kobj_to_cache(struct kobject *k)
505 struct cache_index_dir *index;
507 index = kobj_to_cache_index_dir(k);
509 return index->cache;
512 static ssize_t size_show(struct kobject *k, struct kobj_attribute *attr, char *buf)
514 unsigned int size_kb;
515 struct cache *cache;
517 cache = index_kobj_to_cache(k);
519 if (cache_size_kb(cache, &size_kb))
520 return -ENODEV;
522 return sprintf(buf, "%uK\n", size_kb);
525 static struct kobj_attribute cache_size_attr =
526 __ATTR(size, 0444, size_show, NULL);
529 static ssize_t line_size_show(struct kobject *k, struct kobj_attribute *attr, char *buf)
531 unsigned int line_size;
532 struct cache *cache;
534 cache = index_kobj_to_cache(k);
536 if (cache_get_line_size(cache, &line_size))
537 return -ENODEV;
539 return sprintf(buf, "%u\n", line_size);
542 static struct kobj_attribute cache_line_size_attr =
543 __ATTR(coherency_line_size, 0444, line_size_show, NULL);
545 static ssize_t nr_sets_show(struct kobject *k, struct kobj_attribute *attr, char *buf)
547 unsigned int nr_sets;
548 struct cache *cache;
550 cache = index_kobj_to_cache(k);
552 if (cache_nr_sets(cache, &nr_sets))
553 return -ENODEV;
555 return sprintf(buf, "%u\n", nr_sets);
558 static struct kobj_attribute cache_nr_sets_attr =
559 __ATTR(number_of_sets, 0444, nr_sets_show, NULL);
561 static ssize_t associativity_show(struct kobject *k, struct kobj_attribute *attr, char *buf)
563 unsigned int associativity;
564 struct cache *cache;
566 cache = index_kobj_to_cache(k);
568 if (cache_associativity(cache, &associativity))
569 return -ENODEV;
571 return sprintf(buf, "%u\n", associativity);
574 static struct kobj_attribute cache_assoc_attr =
575 __ATTR(ways_of_associativity, 0444, associativity_show, NULL);
577 static ssize_t type_show(struct kobject *k, struct kobj_attribute *attr, char *buf)
579 struct cache *cache;
581 cache = index_kobj_to_cache(k);
583 return sprintf(buf, "%s\n", cache_type_string(cache));
586 static struct kobj_attribute cache_type_attr =
587 __ATTR(type, 0444, type_show, NULL);
589 static ssize_t level_show(struct kobject *k, struct kobj_attribute *attr, char *buf)
591 struct cache_index_dir *index;
592 struct cache *cache;
594 index = kobj_to_cache_index_dir(k);
595 cache = index->cache;
597 return sprintf(buf, "%d\n", cache->level);
600 static struct kobj_attribute cache_level_attr =
601 __ATTR(level, 0444, level_show, NULL);
603 static ssize_t shared_cpu_map_show(struct kobject *k, struct kobj_attribute *attr, char *buf)
605 struct cache_index_dir *index;
606 struct cache *cache;
607 int len;
608 int n = 0;
610 index = kobj_to_cache_index_dir(k);
611 cache = index->cache;
612 len = PAGE_SIZE - 2;
614 if (len > 1) {
615 n = cpumask_scnprintf(buf, len, &cache->shared_cpu_map);
616 buf[n++] = '\n';
617 buf[n] = '\0';
619 return n;
622 static struct kobj_attribute cache_shared_cpu_map_attr =
623 __ATTR(shared_cpu_map, 0444, shared_cpu_map_show, NULL);
625 /* Attributes which should always be created -- the kobject/sysfs core
626 * does this automatically via kobj_type->default_attrs. This is the
627 * minimum data required to uniquely identify a cache.
629 static struct attribute *cache_index_default_attrs[] = {
630 &cache_type_attr.attr,
631 &cache_level_attr.attr,
632 &cache_shared_cpu_map_attr.attr,
633 NULL,
636 /* Attributes which should be created if the cache device node has the
637 * right properties -- see cacheinfo_create_index_opt_attrs
639 static struct kobj_attribute *cache_index_opt_attrs[] = {
640 &cache_size_attr,
641 &cache_line_size_attr,
642 &cache_nr_sets_attr,
643 &cache_assoc_attr,
646 static const struct sysfs_ops cache_index_ops = {
647 .show = cache_index_show,
650 static struct kobj_type cache_index_type = {
651 .release = cache_index_release,
652 .sysfs_ops = &cache_index_ops,
653 .default_attrs = cache_index_default_attrs,
656 static void __cpuinit cacheinfo_create_index_opt_attrs(struct cache_index_dir *dir)
658 const char *cache_name;
659 const char *cache_type;
660 struct cache *cache;
661 char *buf;
662 int i;
664 buf = kmalloc(PAGE_SIZE, GFP_KERNEL);
665 if (!buf)
666 return;
668 cache = dir->cache;
669 cache_name = cache->ofnode->full_name;
670 cache_type = cache_type_string(cache);
672 /* We don't want to create an attribute that can't provide a
673 * meaningful value. Check the return value of each optional
674 * attribute's ->show method before registering the
675 * attribute.
677 for (i = 0; i < ARRAY_SIZE(cache_index_opt_attrs); i++) {
678 struct kobj_attribute *attr;
679 ssize_t rc;
681 attr = cache_index_opt_attrs[i];
683 rc = attr->show(&dir->kobj, attr, buf);
684 if (rc <= 0) {
685 pr_debug("not creating %s attribute for "
686 "%s(%s) (rc = %zd)\n",
687 attr->attr.name, cache_name,
688 cache_type, rc);
689 continue;
691 if (sysfs_create_file(&dir->kobj, &attr->attr))
692 pr_debug("could not create %s attribute for %s(%s)\n",
693 attr->attr.name, cache_name, cache_type);
696 kfree(buf);
699 static void __cpuinit cacheinfo_create_index_dir(struct cache *cache, int index, struct cache_dir *cache_dir)
701 struct cache_index_dir *index_dir;
702 int rc;
704 index_dir = kzalloc(sizeof(*index_dir), GFP_KERNEL);
705 if (!index_dir)
706 goto err;
708 index_dir->cache = cache;
710 rc = kobject_init_and_add(&index_dir->kobj, &cache_index_type,
711 cache_dir->kobj, "index%d", index);
712 if (rc)
713 goto err;
715 index_dir->next = cache_dir->index;
716 cache_dir->index = index_dir;
718 cacheinfo_create_index_opt_attrs(index_dir);
720 return;
721 err:
722 kfree(index_dir);
725 static void __cpuinit cacheinfo_sysfs_populate(unsigned int cpu_id, struct cache *cache_list)
727 struct cache_dir *cache_dir;
728 struct cache *cache;
729 int index = 0;
731 cache_dir = cacheinfo_create_cache_dir(cpu_id);
732 if (!cache_dir)
733 return;
735 cache = cache_list;
736 while (cache) {
737 cacheinfo_create_index_dir(cache, index, cache_dir);
738 index++;
739 cache = cache->next_local;
743 void __cpuinit cacheinfo_cpu_online(unsigned int cpu_id)
745 struct cache *cache;
747 cache = cache_chain_instantiate(cpu_id);
748 if (!cache)
749 return;
751 cacheinfo_sysfs_populate(cpu_id, cache);
754 #ifdef CONFIG_HOTPLUG_CPU /* functions needed for cpu offline */
756 static struct cache *cache_lookup_by_cpu(unsigned int cpu_id)
758 struct device_node *cpu_node;
759 struct cache *cache;
761 cpu_node = of_get_cpu_node(cpu_id, NULL);
762 WARN_ONCE(!cpu_node, "no OF node found for CPU %i\n", cpu_id);
763 if (!cpu_node)
764 return NULL;
766 cache = cache_lookup_by_node(cpu_node);
767 of_node_put(cpu_node);
769 return cache;
772 static void remove_index_dirs(struct cache_dir *cache_dir)
774 struct cache_index_dir *index;
776 index = cache_dir->index;
778 while (index) {
779 struct cache_index_dir *next;
781 next = index->next;
782 kobject_put(&index->kobj);
783 index = next;
787 static void remove_cache_dir(struct cache_dir *cache_dir)
789 remove_index_dirs(cache_dir);
791 kobject_put(cache_dir->kobj);
793 kfree(cache_dir);
796 static void cache_cpu_clear(struct cache *cache, int cpu)
798 while (cache) {
799 struct cache *next = cache->next_local;
801 WARN_ONCE(!cpumask_test_cpu(cpu, &cache->shared_cpu_map),
802 "CPU %i not accounted in %s(%s)\n",
803 cpu, cache->ofnode->full_name,
804 cache_type_string(cache));
806 cpumask_clear_cpu(cpu, &cache->shared_cpu_map);
808 /* Release the cache object if all the cpus using it
809 * are offline */
810 if (cpumask_empty(&cache->shared_cpu_map))
811 release_cache(cache);
813 cache = next;
817 void cacheinfo_cpu_offline(unsigned int cpu_id)
819 struct cache_dir *cache_dir;
820 struct cache *cache;
822 /* Prevent userspace from seeing inconsistent state - remove
823 * the sysfs hierarchy first */
824 cache_dir = per_cpu(cache_dir_pcpu, cpu_id);
826 /* careful, sysfs population may have failed */
827 if (cache_dir)
828 remove_cache_dir(cache_dir);
830 per_cpu(cache_dir_pcpu, cpu_id) = NULL;
832 /* clear the CPU's bit in its cache chain, possibly freeing
833 * cache objects */
834 cache = cache_lookup_by_cpu(cpu_id);
835 if (cache)
836 cache_cpu_clear(cache, cpu_id);
838 #endif /* CONFIG_HOTPLUG_CPU */