2 * Extract CPU cache information and expose them via sysfs.
4 * Copyright IBM Corp. 2012
5 * Author(s): Heiko Carstens <heiko.carstens@de.ibm.com>
8 #include <linux/notifier.h>
9 #include <linux/seq_file.h>
10 #include <linux/init.h>
11 #include <linux/list.h>
12 #include <linux/slab.h>
13 #include <linux/cpu.h>
14 #include <asm/facility.h>
18 unsigned int line_size
;
19 unsigned int associativity
;
21 unsigned int level
: 3;
22 unsigned int type
: 2;
23 unsigned int private : 1;
24 struct list_head list
;
29 struct cache_index_dir
*index
;
32 struct cache_index_dir
{
36 struct cache_index_dir
*next
;
40 CACHE_SCOPE_NOTEXISTS
,
49 CACHE_TYPE_INSTRUCTION
,
57 EXTRACT_ASSOCIATIVITY
,
68 unsigned char scope
: 2;
69 unsigned char type
: 2;
72 #define CACHE_MAX_LEVEL 8
74 union cache_topology
{
75 struct cache_info ci
[CACHE_MAX_LEVEL
];
76 unsigned long long raw
;
79 static const char * const cache_type_string
[] = {
85 static struct cache_dir
*cache_dir_cpu
[NR_CPUS
];
86 static LIST_HEAD(cache_list
);
88 void show_cacheinfo(struct seq_file
*m
)
93 list_for_each_entry(cache
, &cache_list
, list
) {
94 seq_printf(m
, "cache%-11d: ", index
);
95 seq_printf(m
, "level=%d ", cache
->level
);
96 seq_printf(m
, "type=%s ", cache_type_string
[cache
->type
]);
97 seq_printf(m
, "scope=%s ", cache
->private ? "Private" : "Shared");
98 seq_printf(m
, "size=%luK ", cache
->size
>> 10);
99 seq_printf(m
, "line_size=%u ", cache
->line_size
);
100 seq_printf(m
, "associativity=%d", cache
->associativity
);
106 static inline unsigned long ecag(int ai
, int li
, int ti
)
108 unsigned long cmd
, val
;
110 cmd
= ai
<< 4 | li
<< 1 | ti
;
111 asm volatile(".insn rsy,0xeb000000004c,%0,0,0(%1)" /* ecag */
112 : "=d" (val
) : "a" (cmd
));
116 static int __init
cache_add(int level
, int private, int type
)
121 cache
= kzalloc(sizeof(*cache
), GFP_KERNEL
);
124 if (type
== CACHE_TYPE_INSTRUCTION
)
125 ti
= CACHE_TI_INSTRUCTION
;
127 ti
= CACHE_TI_UNIFIED
;
128 cache
->size
= ecag(EXTRACT_SIZE
, level
, ti
);
129 cache
->line_size
= ecag(EXTRACT_LINE_SIZE
, level
, ti
);
130 cache
->associativity
= ecag(EXTRACT_ASSOCIATIVITY
, level
, ti
);
131 cache
->nr_sets
= cache
->size
/ cache
->associativity
;
132 cache
->nr_sets
/= cache
->line_size
;
133 cache
->private = private;
134 cache
->level
= level
+ 1;
135 cache
->type
= type
- 1;
136 list_add_tail(&cache
->list
, &cache_list
);
140 static void __init
cache_build_info(void)
142 struct cache
*cache
, *next
;
143 union cache_topology ct
;
144 int level
, private, rc
;
146 ct
.raw
= ecag(EXTRACT_TOPOLOGY
, 0, 0);
147 for (level
= 0; level
< CACHE_MAX_LEVEL
; level
++) {
148 switch (ct
.ci
[level
].scope
) {
149 case CACHE_SCOPE_SHARED
:
152 case CACHE_SCOPE_PRIVATE
:
158 if (ct
.ci
[level
].type
== CACHE_TYPE_SEPARATE
) {
159 rc
= cache_add(level
, private, CACHE_TYPE_DATA
);
160 rc
|= cache_add(level
, private, CACHE_TYPE_INSTRUCTION
);
162 rc
= cache_add(level
, private, ct
.ci
[level
].type
);
169 list_for_each_entry_safe(cache
, next
, &cache_list
, list
) {
170 list_del(&cache
->list
);
175 static struct cache_dir
*cache_create_cache_dir(int cpu
)
177 struct cache_dir
*cache_dir
;
178 struct kobject
*kobj
= NULL
;
181 dev
= get_cpu_device(cpu
);
184 kobj
= kobject_create_and_add("cache", &dev
->kobj
);
187 cache_dir
= kzalloc(sizeof(*cache_dir
), GFP_KERNEL
);
190 cache_dir
->kobj
= kobj
;
191 cache_dir_cpu
[cpu
] = cache_dir
;
198 static struct cache_index_dir
*kobj_to_cache_index_dir(struct kobject
*kobj
)
200 return container_of(kobj
, struct cache_index_dir
, kobj
);
203 static void cache_index_release(struct kobject
*kobj
)
205 struct cache_index_dir
*index
;
207 index
= kobj_to_cache_index_dir(kobj
);
211 static ssize_t
cache_index_show(struct kobject
*kobj
,
212 struct attribute
*attr
, char *buf
)
214 struct kobj_attribute
*kobj_attr
;
216 kobj_attr
= container_of(attr
, struct kobj_attribute
, attr
);
217 return kobj_attr
->show(kobj
, kobj_attr
, buf
);
220 #define DEFINE_CACHE_ATTR(_name, _format, _value) \
221 static ssize_t cache_##_name##_show(struct kobject *kobj, \
222 struct kobj_attribute *attr, \
225 struct cache_index_dir *index; \
227 index = kobj_to_cache_index_dir(kobj); \
228 return sprintf(buf, _format, _value); \
230 static struct kobj_attribute cache_##_name##_attr = \
231 __ATTR(_name, 0444, cache_##_name##_show, NULL);
233 DEFINE_CACHE_ATTR(size
, "%luK\n", index
->cache
->size
>> 10);
234 DEFINE_CACHE_ATTR(coherency_line_size
, "%u\n", index
->cache
->line_size
);
235 DEFINE_CACHE_ATTR(number_of_sets
, "%u\n", index
->cache
->nr_sets
);
236 DEFINE_CACHE_ATTR(ways_of_associativity
, "%u\n", index
->cache
->associativity
);
237 DEFINE_CACHE_ATTR(type
, "%s\n", cache_type_string
[index
->cache
->type
]);
238 DEFINE_CACHE_ATTR(level
, "%d\n", index
->cache
->level
);
240 static ssize_t
shared_cpu_map_func(struct kobject
*kobj
, int type
, char *buf
)
242 struct cache_index_dir
*index
;
245 index
= kobj_to_cache_index_dir(kobj
);
247 cpulist_scnprintf(buf
, PAGE_SIZE
- 2, cpumask_of(index
->cpu
)) :
248 cpumask_scnprintf(buf
, PAGE_SIZE
- 2, cpumask_of(index
->cpu
));
249 len
+= sprintf(&buf
[len
], "\n");
253 static ssize_t
shared_cpu_map_show(struct kobject
*kobj
,
254 struct kobj_attribute
*attr
, char *buf
)
256 return shared_cpu_map_func(kobj
, 0, buf
);
258 static struct kobj_attribute cache_shared_cpu_map_attr
=
259 __ATTR(shared_cpu_map
, 0444, shared_cpu_map_show
, NULL
);
261 static ssize_t
shared_cpu_list_show(struct kobject
*kobj
,
262 struct kobj_attribute
*attr
, char *buf
)
264 return shared_cpu_map_func(kobj
, 1, buf
);
266 static struct kobj_attribute cache_shared_cpu_list_attr
=
267 __ATTR(shared_cpu_list
, 0444, shared_cpu_list_show
, NULL
);
269 static struct attribute
*cache_index_default_attrs
[] = {
270 &cache_type_attr
.attr
,
271 &cache_size_attr
.attr
,
272 &cache_number_of_sets_attr
.attr
,
273 &cache_ways_of_associativity_attr
.attr
,
274 &cache_level_attr
.attr
,
275 &cache_coherency_line_size_attr
.attr
,
276 &cache_shared_cpu_map_attr
.attr
,
277 &cache_shared_cpu_list_attr
.attr
,
281 static const struct sysfs_ops cache_index_ops
= {
282 .show
= cache_index_show
,
285 static struct kobj_type cache_index_type
= {
286 .sysfs_ops
= &cache_index_ops
,
287 .release
= cache_index_release
,
288 .default_attrs
= cache_index_default_attrs
,
291 static int cache_create_index_dir(struct cache_dir
*cache_dir
,
292 struct cache
*cache
, int index
, int cpu
)
294 struct cache_index_dir
*index_dir
;
297 index_dir
= kzalloc(sizeof(*index_dir
), GFP_KERNEL
);
300 index_dir
->cache
= cache
;
301 index_dir
->cpu
= cpu
;
302 rc
= kobject_init_and_add(&index_dir
->kobj
, &cache_index_type
,
303 cache_dir
->kobj
, "index%d", index
);
306 index_dir
->next
= cache_dir
->index
;
307 cache_dir
->index
= index_dir
;
314 static int cache_add_cpu(int cpu
)
316 struct cache_dir
*cache_dir
;
320 if (list_empty(&cache_list
))
322 cache_dir
= cache_create_cache_dir(cpu
);
325 list_for_each_entry(cache
, &cache_list
, list
) {
328 rc
= cache_create_index_dir(cache_dir
, cache
, index
, cpu
);
336 static void cache_remove_cpu(int cpu
)
338 struct cache_index_dir
*index
, *next
;
339 struct cache_dir
*cache_dir
;
341 cache_dir
= cache_dir_cpu
[cpu
];
344 index
= cache_dir
->index
;
347 kobject_put(&index
->kobj
);
350 kobject_put(cache_dir
->kobj
);
352 cache_dir_cpu
[cpu
] = NULL
;
355 static int cache_hotplug(struct notifier_block
*nfb
, unsigned long action
,
358 int cpu
= (long)hcpu
;
361 switch (action
& ~CPU_TASKS_FROZEN
) {
363 rc
= cache_add_cpu(cpu
);
365 cache_remove_cpu(cpu
);
368 cache_remove_cpu(cpu
);
371 return rc
? NOTIFY_BAD
: NOTIFY_OK
;
374 static int __init
cache_init(void)
378 if (!test_facility(34))
382 cpu_notifier_register_begin();
383 for_each_online_cpu(cpu
)
385 __hotcpu_notifier(cache_hotplug
, 0);
386 cpu_notifier_register_done();
389 device_initcall(cache_init
);