mm: hugetlb: fix hugepage memory leak caused by wrong reserve count
[linux/fpc-iii.git] / arch / sparc / kernel / sysfs.c
blob7f41d40b7e6e8ccf89b5ce12a9422bbf4e84ac2e
1 /* sysfs.c: Toplogy sysfs support code for sparc64.
3 * Copyright (C) 2007 David S. Miller <davem@davemloft.net>
4 */
5 #include <linux/sched.h>
6 #include <linux/device.h>
7 #include <linux/cpu.h>
8 #include <linux/smp.h>
9 #include <linux/percpu.h>
10 #include <linux/init.h>
12 #include <asm/cpudata.h>
13 #include <asm/hypervisor.h>
14 #include <asm/spitfire.h>
16 static DEFINE_PER_CPU(struct hv_mmu_statistics, mmu_stats) __attribute__((aligned(64)));
18 #define SHOW_MMUSTAT_ULONG(NAME) \
19 static ssize_t show_##NAME(struct device *dev, \
20 struct device_attribute *attr, char *buf) \
21 { \
22 struct hv_mmu_statistics *p = &per_cpu(mmu_stats, dev->id); \
23 return sprintf(buf, "%lu\n", p->NAME); \
24 } \
25 static DEVICE_ATTR(NAME, 0444, show_##NAME, NULL)
27 SHOW_MMUSTAT_ULONG(immu_tsb_hits_ctx0_8k_tte);
28 SHOW_MMUSTAT_ULONG(immu_tsb_ticks_ctx0_8k_tte);
29 SHOW_MMUSTAT_ULONG(immu_tsb_hits_ctx0_64k_tte);
30 SHOW_MMUSTAT_ULONG(immu_tsb_ticks_ctx0_64k_tte);
31 SHOW_MMUSTAT_ULONG(immu_tsb_hits_ctx0_4mb_tte);
32 SHOW_MMUSTAT_ULONG(immu_tsb_ticks_ctx0_4mb_tte);
33 SHOW_MMUSTAT_ULONG(immu_tsb_hits_ctx0_256mb_tte);
34 SHOW_MMUSTAT_ULONG(immu_tsb_ticks_ctx0_256mb_tte);
35 SHOW_MMUSTAT_ULONG(immu_tsb_hits_ctxnon0_8k_tte);
36 SHOW_MMUSTAT_ULONG(immu_tsb_ticks_ctxnon0_8k_tte);
37 SHOW_MMUSTAT_ULONG(immu_tsb_hits_ctxnon0_64k_tte);
38 SHOW_MMUSTAT_ULONG(immu_tsb_ticks_ctxnon0_64k_tte);
39 SHOW_MMUSTAT_ULONG(immu_tsb_hits_ctxnon0_4mb_tte);
40 SHOW_MMUSTAT_ULONG(immu_tsb_ticks_ctxnon0_4mb_tte);
41 SHOW_MMUSTAT_ULONG(immu_tsb_hits_ctxnon0_256mb_tte);
42 SHOW_MMUSTAT_ULONG(immu_tsb_ticks_ctxnon0_256mb_tte);
43 SHOW_MMUSTAT_ULONG(dmmu_tsb_hits_ctx0_8k_tte);
44 SHOW_MMUSTAT_ULONG(dmmu_tsb_ticks_ctx0_8k_tte);
45 SHOW_MMUSTAT_ULONG(dmmu_tsb_hits_ctx0_64k_tte);
46 SHOW_MMUSTAT_ULONG(dmmu_tsb_ticks_ctx0_64k_tte);
47 SHOW_MMUSTAT_ULONG(dmmu_tsb_hits_ctx0_4mb_tte);
48 SHOW_MMUSTAT_ULONG(dmmu_tsb_ticks_ctx0_4mb_tte);
49 SHOW_MMUSTAT_ULONG(dmmu_tsb_hits_ctx0_256mb_tte);
50 SHOW_MMUSTAT_ULONG(dmmu_tsb_ticks_ctx0_256mb_tte);
51 SHOW_MMUSTAT_ULONG(dmmu_tsb_hits_ctxnon0_8k_tte);
52 SHOW_MMUSTAT_ULONG(dmmu_tsb_ticks_ctxnon0_8k_tte);
53 SHOW_MMUSTAT_ULONG(dmmu_tsb_hits_ctxnon0_64k_tte);
54 SHOW_MMUSTAT_ULONG(dmmu_tsb_ticks_ctxnon0_64k_tte);
55 SHOW_MMUSTAT_ULONG(dmmu_tsb_hits_ctxnon0_4mb_tte);
56 SHOW_MMUSTAT_ULONG(dmmu_tsb_ticks_ctxnon0_4mb_tte);
57 SHOW_MMUSTAT_ULONG(dmmu_tsb_hits_ctxnon0_256mb_tte);
58 SHOW_MMUSTAT_ULONG(dmmu_tsb_ticks_ctxnon0_256mb_tte);
60 static struct attribute *mmu_stat_attrs[] = {
61 &dev_attr_immu_tsb_hits_ctx0_8k_tte.attr,
62 &dev_attr_immu_tsb_ticks_ctx0_8k_tte.attr,
63 &dev_attr_immu_tsb_hits_ctx0_64k_tte.attr,
64 &dev_attr_immu_tsb_ticks_ctx0_64k_tte.attr,
65 &dev_attr_immu_tsb_hits_ctx0_4mb_tte.attr,
66 &dev_attr_immu_tsb_ticks_ctx0_4mb_tte.attr,
67 &dev_attr_immu_tsb_hits_ctx0_256mb_tte.attr,
68 &dev_attr_immu_tsb_ticks_ctx0_256mb_tte.attr,
69 &dev_attr_immu_tsb_hits_ctxnon0_8k_tte.attr,
70 &dev_attr_immu_tsb_ticks_ctxnon0_8k_tte.attr,
71 &dev_attr_immu_tsb_hits_ctxnon0_64k_tte.attr,
72 &dev_attr_immu_tsb_ticks_ctxnon0_64k_tte.attr,
73 &dev_attr_immu_tsb_hits_ctxnon0_4mb_tte.attr,
74 &dev_attr_immu_tsb_ticks_ctxnon0_4mb_tte.attr,
75 &dev_attr_immu_tsb_hits_ctxnon0_256mb_tte.attr,
76 &dev_attr_immu_tsb_ticks_ctxnon0_256mb_tte.attr,
77 &dev_attr_dmmu_tsb_hits_ctx0_8k_tte.attr,
78 &dev_attr_dmmu_tsb_ticks_ctx0_8k_tte.attr,
79 &dev_attr_dmmu_tsb_hits_ctx0_64k_tte.attr,
80 &dev_attr_dmmu_tsb_ticks_ctx0_64k_tte.attr,
81 &dev_attr_dmmu_tsb_hits_ctx0_4mb_tte.attr,
82 &dev_attr_dmmu_tsb_ticks_ctx0_4mb_tte.attr,
83 &dev_attr_dmmu_tsb_hits_ctx0_256mb_tte.attr,
84 &dev_attr_dmmu_tsb_ticks_ctx0_256mb_tte.attr,
85 &dev_attr_dmmu_tsb_hits_ctxnon0_8k_tte.attr,
86 &dev_attr_dmmu_tsb_ticks_ctxnon0_8k_tte.attr,
87 &dev_attr_dmmu_tsb_hits_ctxnon0_64k_tte.attr,
88 &dev_attr_dmmu_tsb_ticks_ctxnon0_64k_tte.attr,
89 &dev_attr_dmmu_tsb_hits_ctxnon0_4mb_tte.attr,
90 &dev_attr_dmmu_tsb_ticks_ctxnon0_4mb_tte.attr,
91 &dev_attr_dmmu_tsb_hits_ctxnon0_256mb_tte.attr,
92 &dev_attr_dmmu_tsb_ticks_ctxnon0_256mb_tte.attr,
93 NULL,
96 static struct attribute_group mmu_stat_group = {
97 .attrs = mmu_stat_attrs,
98 .name = "mmu_stats",
101 /* XXX convert to rusty's on_one_cpu */
102 static unsigned long run_on_cpu(unsigned long cpu,
103 unsigned long (*func)(unsigned long),
104 unsigned long arg)
106 cpumask_t old_affinity;
107 unsigned long ret;
109 cpumask_copy(&old_affinity, tsk_cpus_allowed(current));
110 /* should return -EINVAL to userspace */
111 if (set_cpus_allowed_ptr(current, cpumask_of(cpu)))
112 return 0;
114 ret = func(arg);
116 set_cpus_allowed_ptr(current, &old_affinity);
118 return ret;
121 static unsigned long read_mmustat_enable(unsigned long junk)
123 unsigned long ra = 0;
125 sun4v_mmustat_info(&ra);
127 return ra != 0;
130 static unsigned long write_mmustat_enable(unsigned long val)
132 unsigned long ra, orig_ra;
134 if (val)
135 ra = __pa(&per_cpu(mmu_stats, smp_processor_id()));
136 else
137 ra = 0UL;
139 return sun4v_mmustat_conf(ra, &orig_ra);
142 static ssize_t show_mmustat_enable(struct device *s,
143 struct device_attribute *attr, char *buf)
145 unsigned long val = run_on_cpu(s->id, read_mmustat_enable, 0);
146 return sprintf(buf, "%lx\n", val);
149 static ssize_t store_mmustat_enable(struct device *s,
150 struct device_attribute *attr, const char *buf,
151 size_t count)
153 unsigned long val, err;
154 int ret = sscanf(buf, "%lu", &val);
156 if (ret != 1)
157 return -EINVAL;
159 err = run_on_cpu(s->id, write_mmustat_enable, val);
160 if (err)
161 return -EIO;
163 return count;
166 static DEVICE_ATTR(mmustat_enable, 0644, show_mmustat_enable, store_mmustat_enable);
168 static int mmu_stats_supported;
170 static int register_mmu_stats(struct device *s)
172 if (!mmu_stats_supported)
173 return 0;
174 device_create_file(s, &dev_attr_mmustat_enable);
175 return sysfs_create_group(&s->kobj, &mmu_stat_group);
178 #ifdef CONFIG_HOTPLUG_CPU
179 static void unregister_mmu_stats(struct device *s)
181 if (!mmu_stats_supported)
182 return;
183 sysfs_remove_group(&s->kobj, &mmu_stat_group);
184 device_remove_file(s, &dev_attr_mmustat_enable);
186 #endif
188 #define SHOW_CPUDATA_ULONG_NAME(NAME, MEMBER) \
189 static ssize_t show_##NAME(struct device *dev, \
190 struct device_attribute *attr, char *buf) \
192 cpuinfo_sparc *c = &cpu_data(dev->id); \
193 return sprintf(buf, "%lu\n", c->MEMBER); \
196 #define SHOW_CPUDATA_UINT_NAME(NAME, MEMBER) \
197 static ssize_t show_##NAME(struct device *dev, \
198 struct device_attribute *attr, char *buf) \
200 cpuinfo_sparc *c = &cpu_data(dev->id); \
201 return sprintf(buf, "%u\n", c->MEMBER); \
204 SHOW_CPUDATA_ULONG_NAME(clock_tick, clock_tick);
205 SHOW_CPUDATA_UINT_NAME(l1_dcache_size, dcache_size);
206 SHOW_CPUDATA_UINT_NAME(l1_dcache_line_size, dcache_line_size);
207 SHOW_CPUDATA_UINT_NAME(l1_icache_size, icache_size);
208 SHOW_CPUDATA_UINT_NAME(l1_icache_line_size, icache_line_size);
209 SHOW_CPUDATA_UINT_NAME(l2_cache_size, ecache_size);
210 SHOW_CPUDATA_UINT_NAME(l2_cache_line_size, ecache_line_size);
212 static struct device_attribute cpu_core_attrs[] = {
213 __ATTR(clock_tick, 0444, show_clock_tick, NULL),
214 __ATTR(l1_dcache_size, 0444, show_l1_dcache_size, NULL),
215 __ATTR(l1_dcache_line_size, 0444, show_l1_dcache_line_size, NULL),
216 __ATTR(l1_icache_size, 0444, show_l1_icache_size, NULL),
217 __ATTR(l1_icache_line_size, 0444, show_l1_icache_line_size, NULL),
218 __ATTR(l2_cache_size, 0444, show_l2_cache_size, NULL),
219 __ATTR(l2_cache_line_size, 0444, show_l2_cache_line_size, NULL),
222 static DEFINE_PER_CPU(struct cpu, cpu_devices);
224 static void register_cpu_online(unsigned int cpu)
226 struct cpu *c = &per_cpu(cpu_devices, cpu);
227 struct device *s = &c->dev;
228 int i;
230 for (i = 0; i < ARRAY_SIZE(cpu_core_attrs); i++)
231 device_create_file(s, &cpu_core_attrs[i]);
233 register_mmu_stats(s);
236 #ifdef CONFIG_HOTPLUG_CPU
237 static void unregister_cpu_online(unsigned int cpu)
239 struct cpu *c = &per_cpu(cpu_devices, cpu);
240 struct device *s = &c->dev;
241 int i;
243 unregister_mmu_stats(s);
244 for (i = 0; i < ARRAY_SIZE(cpu_core_attrs); i++)
245 device_remove_file(s, &cpu_core_attrs[i]);
247 #endif
249 static int sysfs_cpu_notify(struct notifier_block *self,
250 unsigned long action, void *hcpu)
252 unsigned int cpu = (unsigned int)(long)hcpu;
254 switch (action) {
255 case CPU_ONLINE:
256 case CPU_ONLINE_FROZEN:
257 register_cpu_online(cpu);
258 break;
259 #ifdef CONFIG_HOTPLUG_CPU
260 case CPU_DEAD:
261 case CPU_DEAD_FROZEN:
262 unregister_cpu_online(cpu);
263 break;
264 #endif
266 return NOTIFY_OK;
269 static struct notifier_block sysfs_cpu_nb = {
270 .notifier_call = sysfs_cpu_notify,
273 static void __init check_mmu_stats(void)
275 unsigned long dummy1, err;
277 if (tlb_type != hypervisor)
278 return;
280 err = sun4v_mmustat_info(&dummy1);
281 if (!err)
282 mmu_stats_supported = 1;
285 static void register_nodes(void)
287 #ifdef CONFIG_NUMA
288 int i;
290 for (i = 0; i < MAX_NUMNODES; i++)
291 register_one_node(i);
292 #endif
295 static int __init topology_init(void)
297 int cpu;
299 register_nodes();
301 check_mmu_stats();
303 cpu_notifier_register_begin();
305 for_each_possible_cpu(cpu) {
306 struct cpu *c = &per_cpu(cpu_devices, cpu);
308 register_cpu(c, cpu);
309 if (cpu_online(cpu))
310 register_cpu_online(cpu);
313 __register_cpu_notifier(&sysfs_cpu_nb);
315 cpu_notifier_register_done();
317 return 0;
320 subsys_initcall(topology_init);