mm: hugetlb: fix hugepage memory leak caused by wrong reserve count
[linux/fpc-iii.git] / arch / sparc / kernel / mdesc.c
blob6f80936e0eea4d0dab82966b8f69cd7e6127b1dd
1 /* mdesc.c: Sun4V machine description handling.
3 * Copyright (C) 2007, 2008 David S. Miller <davem@davemloft.net>
4 */
5 #include <linux/kernel.h>
6 #include <linux/types.h>
7 #include <linux/memblock.h>
8 #include <linux/log2.h>
9 #include <linux/list.h>
10 #include <linux/slab.h>
11 #include <linux/mm.h>
12 #include <linux/miscdevice.h>
13 #include <linux/bootmem.h>
14 #include <linux/export.h>
16 #include <asm/cpudata.h>
17 #include <asm/hypervisor.h>
18 #include <asm/mdesc.h>
19 #include <asm/prom.h>
20 #include <asm/uaccess.h>
21 #include <asm/oplib.h>
22 #include <asm/smp.h>
24 /* Unlike the OBP device tree, the machine description is a full-on
25 * DAG. An arbitrary number of ARCs are possible from one
26 * node to other nodes and thus we can't use the OBP device_node
27 * data structure to represent these nodes inside of the kernel.
29 * Actually, it isn't even a DAG, because there are back pointers
30 * which create cycles in the graph.
32 * mdesc_hdr and mdesc_elem describe the layout of the data structure
33 * we get from the Hypervisor.
35 struct mdesc_hdr {
36 u32 version; /* Transport version */
37 u32 node_sz; /* node block size */
38 u32 name_sz; /* name block size */
39 u32 data_sz; /* data block size */
40 } __attribute__((aligned(16)));
42 struct mdesc_elem {
43 u8 tag;
44 #define MD_LIST_END 0x00
45 #define MD_NODE 0x4e
46 #define MD_NODE_END 0x45
47 #define MD_NOOP 0x20
48 #define MD_PROP_ARC 0x61
49 #define MD_PROP_VAL 0x76
50 #define MD_PROP_STR 0x73
51 #define MD_PROP_DATA 0x64
52 u8 name_len;
53 u16 resv;
54 u32 name_offset;
55 union {
56 struct {
57 u32 data_len;
58 u32 data_offset;
59 } data;
60 u64 val;
61 } d;
64 struct mdesc_mem_ops {
65 struct mdesc_handle *(*alloc)(unsigned int mdesc_size);
66 void (*free)(struct mdesc_handle *handle);
69 struct mdesc_handle {
70 struct list_head list;
71 struct mdesc_mem_ops *mops;
72 void *self_base;
73 atomic_t refcnt;
74 unsigned int handle_size;
75 struct mdesc_hdr mdesc;
78 static void mdesc_handle_init(struct mdesc_handle *hp,
79 unsigned int handle_size,
80 void *base)
82 BUG_ON(((unsigned long)&hp->mdesc) & (16UL - 1));
84 memset(hp, 0, handle_size);
85 INIT_LIST_HEAD(&hp->list);
86 hp->self_base = base;
87 atomic_set(&hp->refcnt, 1);
88 hp->handle_size = handle_size;
91 static struct mdesc_handle * __init mdesc_memblock_alloc(unsigned int mdesc_size)
93 unsigned int handle_size, alloc_size;
94 struct mdesc_handle *hp;
95 unsigned long paddr;
97 handle_size = (sizeof(struct mdesc_handle) -
98 sizeof(struct mdesc_hdr) +
99 mdesc_size);
100 alloc_size = PAGE_ALIGN(handle_size);
102 paddr = memblock_alloc(alloc_size, PAGE_SIZE);
104 hp = NULL;
105 if (paddr) {
106 hp = __va(paddr);
107 mdesc_handle_init(hp, handle_size, hp);
109 return hp;
112 static void __init mdesc_memblock_free(struct mdesc_handle *hp)
114 unsigned int alloc_size;
115 unsigned long start;
117 BUG_ON(atomic_read(&hp->refcnt) != 0);
118 BUG_ON(!list_empty(&hp->list));
120 alloc_size = PAGE_ALIGN(hp->handle_size);
121 start = __pa(hp);
122 free_bootmem_late(start, alloc_size);
125 static struct mdesc_mem_ops memblock_mdesc_ops = {
126 .alloc = mdesc_memblock_alloc,
127 .free = mdesc_memblock_free,
130 static struct mdesc_handle *mdesc_kmalloc(unsigned int mdesc_size)
132 unsigned int handle_size;
133 struct mdesc_handle *hp;
134 unsigned long addr;
135 void *base;
137 handle_size = (sizeof(struct mdesc_handle) -
138 sizeof(struct mdesc_hdr) +
139 mdesc_size);
142 * Allocation has to succeed because mdesc update would be missed
143 * and such events are not retransmitted.
145 base = kmalloc(handle_size + 15, GFP_KERNEL | __GFP_NOFAIL);
146 addr = (unsigned long)base;
147 addr = (addr + 15UL) & ~15UL;
148 hp = (struct mdesc_handle *) addr;
150 mdesc_handle_init(hp, handle_size, base);
152 return hp;
155 static void mdesc_kfree(struct mdesc_handle *hp)
157 BUG_ON(atomic_read(&hp->refcnt) != 0);
158 BUG_ON(!list_empty(&hp->list));
160 kfree(hp->self_base);
163 static struct mdesc_mem_ops kmalloc_mdesc_memops = {
164 .alloc = mdesc_kmalloc,
165 .free = mdesc_kfree,
168 static struct mdesc_handle *mdesc_alloc(unsigned int mdesc_size,
169 struct mdesc_mem_ops *mops)
171 struct mdesc_handle *hp = mops->alloc(mdesc_size);
173 if (hp)
174 hp->mops = mops;
176 return hp;
179 static void mdesc_free(struct mdesc_handle *hp)
181 hp->mops->free(hp);
184 static struct mdesc_handle *cur_mdesc;
185 static LIST_HEAD(mdesc_zombie_list);
186 static DEFINE_SPINLOCK(mdesc_lock);
188 struct mdesc_handle *mdesc_grab(void)
190 struct mdesc_handle *hp;
191 unsigned long flags;
193 spin_lock_irqsave(&mdesc_lock, flags);
194 hp = cur_mdesc;
195 if (hp)
196 atomic_inc(&hp->refcnt);
197 spin_unlock_irqrestore(&mdesc_lock, flags);
199 return hp;
201 EXPORT_SYMBOL(mdesc_grab);
203 void mdesc_release(struct mdesc_handle *hp)
205 unsigned long flags;
207 spin_lock_irqsave(&mdesc_lock, flags);
208 if (atomic_dec_and_test(&hp->refcnt)) {
209 list_del_init(&hp->list);
210 hp->mops->free(hp);
212 spin_unlock_irqrestore(&mdesc_lock, flags);
214 EXPORT_SYMBOL(mdesc_release);
216 static DEFINE_MUTEX(mdesc_mutex);
217 static struct mdesc_notifier_client *client_list;
219 void mdesc_register_notifier(struct mdesc_notifier_client *client)
221 u64 node;
223 mutex_lock(&mdesc_mutex);
224 client->next = client_list;
225 client_list = client;
227 mdesc_for_each_node_by_name(cur_mdesc, node, client->node_name)
228 client->add(cur_mdesc, node);
230 mutex_unlock(&mdesc_mutex);
233 static const u64 *parent_cfg_handle(struct mdesc_handle *hp, u64 node)
235 const u64 *id;
236 u64 a;
238 id = NULL;
239 mdesc_for_each_arc(a, hp, node, MDESC_ARC_TYPE_BACK) {
240 u64 target;
242 target = mdesc_arc_target(hp, a);
243 id = mdesc_get_property(hp, target,
244 "cfg-handle", NULL);
245 if (id)
246 break;
249 return id;
252 /* Run 'func' on nodes which are in A but not in B. */
253 static void invoke_on_missing(const char *name,
254 struct mdesc_handle *a,
255 struct mdesc_handle *b,
256 void (*func)(struct mdesc_handle *, u64))
258 u64 node;
260 mdesc_for_each_node_by_name(a, node, name) {
261 int found = 0, is_vdc_port = 0;
262 const char *name_prop;
263 const u64 *id;
264 u64 fnode;
266 name_prop = mdesc_get_property(a, node, "name", NULL);
267 if (name_prop && !strcmp(name_prop, "vdc-port")) {
268 is_vdc_port = 1;
269 id = parent_cfg_handle(a, node);
270 } else
271 id = mdesc_get_property(a, node, "id", NULL);
273 if (!id) {
274 printk(KERN_ERR "MD: Cannot find ID for %s node.\n",
275 (name_prop ? name_prop : name));
276 continue;
279 mdesc_for_each_node_by_name(b, fnode, name) {
280 const u64 *fid;
282 if (is_vdc_port) {
283 name_prop = mdesc_get_property(b, fnode,
284 "name", NULL);
285 if (!name_prop ||
286 strcmp(name_prop, "vdc-port"))
287 continue;
288 fid = parent_cfg_handle(b, fnode);
289 if (!fid) {
290 printk(KERN_ERR "MD: Cannot find ID "
291 "for vdc-port node.\n");
292 continue;
294 } else
295 fid = mdesc_get_property(b, fnode,
296 "id", NULL);
298 if (*id == *fid) {
299 found = 1;
300 break;
303 if (!found)
304 func(a, node);
308 static void notify_one(struct mdesc_notifier_client *p,
309 struct mdesc_handle *old_hp,
310 struct mdesc_handle *new_hp)
312 invoke_on_missing(p->node_name, old_hp, new_hp, p->remove);
313 invoke_on_missing(p->node_name, new_hp, old_hp, p->add);
316 static void mdesc_notify_clients(struct mdesc_handle *old_hp,
317 struct mdesc_handle *new_hp)
319 struct mdesc_notifier_client *p = client_list;
321 while (p) {
322 notify_one(p, old_hp, new_hp);
323 p = p->next;
327 void mdesc_update(void)
329 unsigned long len, real_len, status;
330 struct mdesc_handle *hp, *orig_hp;
331 unsigned long flags;
333 mutex_lock(&mdesc_mutex);
335 (void) sun4v_mach_desc(0UL, 0UL, &len);
337 hp = mdesc_alloc(len, &kmalloc_mdesc_memops);
338 if (!hp) {
339 printk(KERN_ERR "MD: mdesc alloc fails\n");
340 goto out;
343 status = sun4v_mach_desc(__pa(&hp->mdesc), len, &real_len);
344 if (status != HV_EOK || real_len > len) {
345 printk(KERN_ERR "MD: mdesc reread fails with %lu\n",
346 status);
347 atomic_dec(&hp->refcnt);
348 mdesc_free(hp);
349 goto out;
352 spin_lock_irqsave(&mdesc_lock, flags);
353 orig_hp = cur_mdesc;
354 cur_mdesc = hp;
355 spin_unlock_irqrestore(&mdesc_lock, flags);
357 mdesc_notify_clients(orig_hp, hp);
359 spin_lock_irqsave(&mdesc_lock, flags);
360 if (atomic_dec_and_test(&orig_hp->refcnt))
361 mdesc_free(orig_hp);
362 else
363 list_add(&orig_hp->list, &mdesc_zombie_list);
364 spin_unlock_irqrestore(&mdesc_lock, flags);
366 out:
367 mutex_unlock(&mdesc_mutex);
370 static struct mdesc_elem *node_block(struct mdesc_hdr *mdesc)
372 return (struct mdesc_elem *) (mdesc + 1);
375 static void *name_block(struct mdesc_hdr *mdesc)
377 return ((void *) node_block(mdesc)) + mdesc->node_sz;
380 static void *data_block(struct mdesc_hdr *mdesc)
382 return ((void *) name_block(mdesc)) + mdesc->name_sz;
385 u64 mdesc_node_by_name(struct mdesc_handle *hp,
386 u64 from_node, const char *name)
388 struct mdesc_elem *ep = node_block(&hp->mdesc);
389 const char *names = name_block(&hp->mdesc);
390 u64 last_node = hp->mdesc.node_sz / 16;
391 u64 ret;
393 if (from_node == MDESC_NODE_NULL) {
394 ret = from_node = 0;
395 } else if (from_node >= last_node) {
396 return MDESC_NODE_NULL;
397 } else {
398 ret = ep[from_node].d.val;
401 while (ret < last_node) {
402 if (ep[ret].tag != MD_NODE)
403 return MDESC_NODE_NULL;
404 if (!strcmp(names + ep[ret].name_offset, name))
405 break;
406 ret = ep[ret].d.val;
408 if (ret >= last_node)
409 ret = MDESC_NODE_NULL;
410 return ret;
412 EXPORT_SYMBOL(mdesc_node_by_name);
414 const void *mdesc_get_property(struct mdesc_handle *hp, u64 node,
415 const char *name, int *lenp)
417 const char *names = name_block(&hp->mdesc);
418 u64 last_node = hp->mdesc.node_sz / 16;
419 void *data = data_block(&hp->mdesc);
420 struct mdesc_elem *ep;
422 if (node == MDESC_NODE_NULL || node >= last_node)
423 return NULL;
425 ep = node_block(&hp->mdesc) + node;
426 ep++;
427 for (; ep->tag != MD_NODE_END; ep++) {
428 void *val = NULL;
429 int len = 0;
431 switch (ep->tag) {
432 case MD_PROP_VAL:
433 val = &ep->d.val;
434 len = 8;
435 break;
437 case MD_PROP_STR:
438 case MD_PROP_DATA:
439 val = data + ep->d.data.data_offset;
440 len = ep->d.data.data_len;
441 break;
443 default:
444 break;
446 if (!val)
447 continue;
449 if (!strcmp(names + ep->name_offset, name)) {
450 if (lenp)
451 *lenp = len;
452 return val;
456 return NULL;
458 EXPORT_SYMBOL(mdesc_get_property);
460 u64 mdesc_next_arc(struct mdesc_handle *hp, u64 from, const char *arc_type)
462 struct mdesc_elem *ep, *base = node_block(&hp->mdesc);
463 const char *names = name_block(&hp->mdesc);
464 u64 last_node = hp->mdesc.node_sz / 16;
466 if (from == MDESC_NODE_NULL || from >= last_node)
467 return MDESC_NODE_NULL;
469 ep = base + from;
471 ep++;
472 for (; ep->tag != MD_NODE_END; ep++) {
473 if (ep->tag != MD_PROP_ARC)
474 continue;
476 if (strcmp(names + ep->name_offset, arc_type))
477 continue;
479 return ep - base;
482 return MDESC_NODE_NULL;
484 EXPORT_SYMBOL(mdesc_next_arc);
486 u64 mdesc_arc_target(struct mdesc_handle *hp, u64 arc)
488 struct mdesc_elem *ep, *base = node_block(&hp->mdesc);
490 ep = base + arc;
492 return ep->d.val;
494 EXPORT_SYMBOL(mdesc_arc_target);
496 const char *mdesc_node_name(struct mdesc_handle *hp, u64 node)
498 struct mdesc_elem *ep, *base = node_block(&hp->mdesc);
499 const char *names = name_block(&hp->mdesc);
500 u64 last_node = hp->mdesc.node_sz / 16;
502 if (node == MDESC_NODE_NULL || node >= last_node)
503 return NULL;
505 ep = base + node;
506 if (ep->tag != MD_NODE)
507 return NULL;
509 return names + ep->name_offset;
511 EXPORT_SYMBOL(mdesc_node_name);
513 static u64 max_cpus = 64;
515 static void __init report_platform_properties(void)
517 struct mdesc_handle *hp = mdesc_grab();
518 u64 pn = mdesc_node_by_name(hp, MDESC_NODE_NULL, "platform");
519 const char *s;
520 const u64 *v;
522 if (pn == MDESC_NODE_NULL) {
523 prom_printf("No platform node in machine-description.\n");
524 prom_halt();
527 s = mdesc_get_property(hp, pn, "banner-name", NULL);
528 printk("PLATFORM: banner-name [%s]\n", s);
529 s = mdesc_get_property(hp, pn, "name", NULL);
530 printk("PLATFORM: name [%s]\n", s);
532 v = mdesc_get_property(hp, pn, "hostid", NULL);
533 if (v)
534 printk("PLATFORM: hostid [%08llx]\n", *v);
535 v = mdesc_get_property(hp, pn, "serial#", NULL);
536 if (v)
537 printk("PLATFORM: serial# [%08llx]\n", *v);
538 v = mdesc_get_property(hp, pn, "stick-frequency", NULL);
539 printk("PLATFORM: stick-frequency [%08llx]\n", *v);
540 v = mdesc_get_property(hp, pn, "mac-address", NULL);
541 if (v)
542 printk("PLATFORM: mac-address [%llx]\n", *v);
543 v = mdesc_get_property(hp, pn, "watchdog-resolution", NULL);
544 if (v)
545 printk("PLATFORM: watchdog-resolution [%llu ms]\n", *v);
546 v = mdesc_get_property(hp, pn, "watchdog-max-timeout", NULL);
547 if (v)
548 printk("PLATFORM: watchdog-max-timeout [%llu ms]\n", *v);
549 v = mdesc_get_property(hp, pn, "max-cpus", NULL);
550 if (v) {
551 max_cpus = *v;
552 printk("PLATFORM: max-cpus [%llu]\n", max_cpus);
555 #ifdef CONFIG_SMP
557 int max_cpu, i;
559 if (v) {
560 max_cpu = *v;
561 if (max_cpu > NR_CPUS)
562 max_cpu = NR_CPUS;
563 } else {
564 max_cpu = NR_CPUS;
566 for (i = 0; i < max_cpu; i++)
567 set_cpu_possible(i, true);
569 #endif
571 mdesc_release(hp);
574 static void fill_in_one_cache(cpuinfo_sparc *c, struct mdesc_handle *hp, u64 mp)
576 const u64 *level = mdesc_get_property(hp, mp, "level", NULL);
577 const u64 *size = mdesc_get_property(hp, mp, "size", NULL);
578 const u64 *line_size = mdesc_get_property(hp, mp, "line-size", NULL);
579 const char *type;
580 int type_len;
582 type = mdesc_get_property(hp, mp, "type", &type_len);
584 switch (*level) {
585 case 1:
586 if (of_find_in_proplist(type, "instn", type_len)) {
587 c->icache_size = *size;
588 c->icache_line_size = *line_size;
589 } else if (of_find_in_proplist(type, "data", type_len)) {
590 c->dcache_size = *size;
591 c->dcache_line_size = *line_size;
593 break;
595 case 2:
596 c->ecache_size = *size;
597 c->ecache_line_size = *line_size;
598 break;
600 default:
601 break;
604 if (*level == 1) {
605 u64 a;
607 mdesc_for_each_arc(a, hp, mp, MDESC_ARC_TYPE_FWD) {
608 u64 target = mdesc_arc_target(hp, a);
609 const char *name = mdesc_node_name(hp, target);
611 if (!strcmp(name, "cache"))
612 fill_in_one_cache(c, hp, target);
617 static void find_back_node_value(struct mdesc_handle *hp, u64 node,
618 char *srch_val,
619 void (*func)(struct mdesc_handle *, u64, int),
620 u64 val, int depth)
622 u64 arc;
624 /* Since we have an estimate of recursion depth, do a sanity check. */
625 if (depth == 0)
626 return;
628 mdesc_for_each_arc(arc, hp, node, MDESC_ARC_TYPE_BACK) {
629 u64 n = mdesc_arc_target(hp, arc);
630 const char *name = mdesc_node_name(hp, n);
632 if (!strcmp(srch_val, name))
633 (*func)(hp, n, val);
635 find_back_node_value(hp, n, srch_val, func, val, depth-1);
639 static void __mark_core_id(struct mdesc_handle *hp, u64 node,
640 int core_id)
642 const u64 *id = mdesc_get_property(hp, node, "id", NULL);
644 if (*id < num_possible_cpus())
645 cpu_data(*id).core_id = core_id;
648 static void __mark_sock_id(struct mdesc_handle *hp, u64 node,
649 int sock_id)
651 const u64 *id = mdesc_get_property(hp, node, "id", NULL);
653 if (*id < num_possible_cpus())
654 cpu_data(*id).sock_id = sock_id;
657 static void mark_core_ids(struct mdesc_handle *hp, u64 mp,
658 int core_id)
660 find_back_node_value(hp, mp, "cpu", __mark_core_id, core_id, 10);
663 static void mark_sock_ids(struct mdesc_handle *hp, u64 mp,
664 int sock_id)
666 find_back_node_value(hp, mp, "cpu", __mark_sock_id, sock_id, 10);
669 static void set_core_ids(struct mdesc_handle *hp)
671 int idx;
672 u64 mp;
674 idx = 1;
676 /* Identify unique cores by looking for cpus backpointed to by
677 * level 1 instruction caches.
679 mdesc_for_each_node_by_name(hp, mp, "cache") {
680 const u64 *level;
681 const char *type;
682 int len;
684 level = mdesc_get_property(hp, mp, "level", NULL);
685 if (*level != 1)
686 continue;
688 type = mdesc_get_property(hp, mp, "type", &len);
689 if (!of_find_in_proplist(type, "instn", len))
690 continue;
692 mark_core_ids(hp, mp, idx);
693 idx++;
697 static int set_sock_ids_by_cache(struct mdesc_handle *hp, int level)
699 u64 mp;
700 int idx = 1;
701 int fnd = 0;
703 /* Identify unique sockets by looking for cpus backpointed to by
704 * shared level n caches.
706 mdesc_for_each_node_by_name(hp, mp, "cache") {
707 const u64 *cur_lvl;
709 cur_lvl = mdesc_get_property(hp, mp, "level", NULL);
710 if (*cur_lvl != level)
711 continue;
713 mark_sock_ids(hp, mp, idx);
714 idx++;
715 fnd = 1;
717 return fnd;
720 static void set_sock_ids_by_socket(struct mdesc_handle *hp, u64 mp)
722 int idx = 1;
724 mdesc_for_each_node_by_name(hp, mp, "socket") {
725 u64 a;
727 mdesc_for_each_arc(a, hp, mp, MDESC_ARC_TYPE_FWD) {
728 u64 t = mdesc_arc_target(hp, a);
729 const char *name;
730 const u64 *id;
732 name = mdesc_node_name(hp, t);
733 if (strcmp(name, "cpu"))
734 continue;
736 id = mdesc_get_property(hp, t, "id", NULL);
737 if (*id < num_possible_cpus())
738 cpu_data(*id).sock_id = idx;
740 idx++;
744 static void set_sock_ids(struct mdesc_handle *hp)
746 u64 mp;
748 /* If machine description exposes sockets data use it.
749 * Otherwise fallback to use shared L3 or L2 caches.
751 mp = mdesc_node_by_name(hp, MDESC_NODE_NULL, "sockets");
752 if (mp != MDESC_NODE_NULL)
753 return set_sock_ids_by_socket(hp, mp);
755 if (!set_sock_ids_by_cache(hp, 3))
756 set_sock_ids_by_cache(hp, 2);
759 static void mark_proc_ids(struct mdesc_handle *hp, u64 mp, int proc_id)
761 u64 a;
763 mdesc_for_each_arc(a, hp, mp, MDESC_ARC_TYPE_BACK) {
764 u64 t = mdesc_arc_target(hp, a);
765 const char *name;
766 const u64 *id;
768 name = mdesc_node_name(hp, t);
769 if (strcmp(name, "cpu"))
770 continue;
772 id = mdesc_get_property(hp, t, "id", NULL);
773 if (*id < NR_CPUS)
774 cpu_data(*id).proc_id = proc_id;
778 static void __set_proc_ids(struct mdesc_handle *hp, const char *exec_unit_name)
780 int idx;
781 u64 mp;
783 idx = 0;
784 mdesc_for_each_node_by_name(hp, mp, exec_unit_name) {
785 const char *type;
786 int len;
788 type = mdesc_get_property(hp, mp, "type", &len);
789 if (!of_find_in_proplist(type, "int", len) &&
790 !of_find_in_proplist(type, "integer", len))
791 continue;
793 mark_proc_ids(hp, mp, idx);
794 idx++;
798 static void set_proc_ids(struct mdesc_handle *hp)
800 __set_proc_ids(hp, "exec_unit");
801 __set_proc_ids(hp, "exec-unit");
804 static void get_one_mondo_bits(const u64 *p, unsigned int *mask,
805 unsigned long def, unsigned long max)
807 u64 val;
809 if (!p)
810 goto use_default;
811 val = *p;
813 if (!val || val >= 64)
814 goto use_default;
816 if (val > max)
817 val = max;
819 *mask = ((1U << val) * 64U) - 1U;
820 return;
822 use_default:
823 *mask = ((1U << def) * 64U) - 1U;
826 static void get_mondo_data(struct mdesc_handle *hp, u64 mp,
827 struct trap_per_cpu *tb)
829 static int printed;
830 const u64 *val;
832 val = mdesc_get_property(hp, mp, "q-cpu-mondo-#bits", NULL);
833 get_one_mondo_bits(val, &tb->cpu_mondo_qmask, 7, ilog2(max_cpus * 2));
835 val = mdesc_get_property(hp, mp, "q-dev-mondo-#bits", NULL);
836 get_one_mondo_bits(val, &tb->dev_mondo_qmask, 7, 8);
838 val = mdesc_get_property(hp, mp, "q-resumable-#bits", NULL);
839 get_one_mondo_bits(val, &tb->resum_qmask, 6, 7);
841 val = mdesc_get_property(hp, mp, "q-nonresumable-#bits", NULL);
842 get_one_mondo_bits(val, &tb->nonresum_qmask, 2, 2);
843 if (!printed++) {
844 pr_info("SUN4V: Mondo queue sizes "
845 "[cpu(%u) dev(%u) r(%u) nr(%u)]\n",
846 tb->cpu_mondo_qmask + 1,
847 tb->dev_mondo_qmask + 1,
848 tb->resum_qmask + 1,
849 tb->nonresum_qmask + 1);
853 static void *mdesc_iterate_over_cpus(void *(*func)(struct mdesc_handle *, u64, int, void *), void *arg, cpumask_t *mask)
855 struct mdesc_handle *hp = mdesc_grab();
856 void *ret = NULL;
857 u64 mp;
859 mdesc_for_each_node_by_name(hp, mp, "cpu") {
860 const u64 *id = mdesc_get_property(hp, mp, "id", NULL);
861 int cpuid = *id;
863 #ifdef CONFIG_SMP
864 if (cpuid >= NR_CPUS) {
865 printk(KERN_WARNING "Ignoring CPU %d which is "
866 ">= NR_CPUS (%d)\n",
867 cpuid, NR_CPUS);
868 continue;
870 if (!cpumask_test_cpu(cpuid, mask))
871 continue;
872 #endif
874 ret = func(hp, mp, cpuid, arg);
875 if (ret)
876 goto out;
878 out:
879 mdesc_release(hp);
880 return ret;
883 static void *record_one_cpu(struct mdesc_handle *hp, u64 mp, int cpuid,
884 void *arg)
886 ncpus_probed++;
887 #ifdef CONFIG_SMP
888 set_cpu_present(cpuid, true);
889 #endif
890 return NULL;
893 void mdesc_populate_present_mask(cpumask_t *mask)
895 if (tlb_type != hypervisor)
896 return;
898 ncpus_probed = 0;
899 mdesc_iterate_over_cpus(record_one_cpu, NULL, mask);
902 static void * __init check_one_pgsz(struct mdesc_handle *hp, u64 mp, int cpuid, void *arg)
904 const u64 *pgsz_prop = mdesc_get_property(hp, mp, "mmu-page-size-list", NULL);
905 unsigned long *pgsz_mask = arg;
906 u64 val;
908 val = (HV_PGSZ_MASK_8K | HV_PGSZ_MASK_64K |
909 HV_PGSZ_MASK_512K | HV_PGSZ_MASK_4MB);
910 if (pgsz_prop)
911 val = *pgsz_prop;
913 if (!*pgsz_mask)
914 *pgsz_mask = val;
915 else
916 *pgsz_mask &= val;
917 return NULL;
920 void __init mdesc_get_page_sizes(cpumask_t *mask, unsigned long *pgsz_mask)
922 *pgsz_mask = 0;
923 mdesc_iterate_over_cpus(check_one_pgsz, pgsz_mask, mask);
926 static void *fill_in_one_cpu(struct mdesc_handle *hp, u64 mp, int cpuid,
927 void *arg)
929 const u64 *cfreq = mdesc_get_property(hp, mp, "clock-frequency", NULL);
930 struct trap_per_cpu *tb;
931 cpuinfo_sparc *c;
932 u64 a;
934 #ifndef CONFIG_SMP
935 /* On uniprocessor we only want the values for the
936 * real physical cpu the kernel booted onto, however
937 * cpu_data() only has one entry at index 0.
939 if (cpuid != real_hard_smp_processor_id())
940 return NULL;
941 cpuid = 0;
942 #endif
944 c = &cpu_data(cpuid);
945 c->clock_tick = *cfreq;
947 tb = &trap_block[cpuid];
948 get_mondo_data(hp, mp, tb);
950 mdesc_for_each_arc(a, hp, mp, MDESC_ARC_TYPE_FWD) {
951 u64 j, t = mdesc_arc_target(hp, a);
952 const char *t_name;
954 t_name = mdesc_node_name(hp, t);
955 if (!strcmp(t_name, "cache")) {
956 fill_in_one_cache(c, hp, t);
957 continue;
960 mdesc_for_each_arc(j, hp, t, MDESC_ARC_TYPE_FWD) {
961 u64 n = mdesc_arc_target(hp, j);
962 const char *n_name;
964 n_name = mdesc_node_name(hp, n);
965 if (!strcmp(n_name, "cache"))
966 fill_in_one_cache(c, hp, n);
970 c->core_id = 0;
971 c->proc_id = -1;
973 return NULL;
976 void mdesc_fill_in_cpu_data(cpumask_t *mask)
978 struct mdesc_handle *hp;
980 mdesc_iterate_over_cpus(fill_in_one_cpu, NULL, mask);
982 hp = mdesc_grab();
984 set_core_ids(hp);
985 set_proc_ids(hp);
986 set_sock_ids(hp);
988 mdesc_release(hp);
990 smp_fill_in_sib_core_maps();
993 /* mdesc_open() - Grab a reference to mdesc_handle when /dev/mdesc is
994 * opened. Hold this reference until /dev/mdesc is closed to ensure
995 * mdesc data structure is not released underneath us. Store the
996 * pointer to mdesc structure in private_data for read and seek to use
998 static int mdesc_open(struct inode *inode, struct file *file)
1000 struct mdesc_handle *hp = mdesc_grab();
1002 if (!hp)
1003 return -ENODEV;
1005 file->private_data = hp;
1007 return 0;
1010 static ssize_t mdesc_read(struct file *file, char __user *buf,
1011 size_t len, loff_t *offp)
1013 struct mdesc_handle *hp = file->private_data;
1014 unsigned char *mdesc;
1015 int bytes_left, count = len;
1017 if (*offp >= hp->handle_size)
1018 return 0;
1020 bytes_left = hp->handle_size - *offp;
1021 if (count > bytes_left)
1022 count = bytes_left;
1024 mdesc = (unsigned char *)&hp->mdesc;
1025 mdesc += *offp;
1026 if (!copy_to_user(buf, mdesc, count)) {
1027 *offp += count;
1028 return count;
1029 } else {
1030 return -EFAULT;
1034 static loff_t mdesc_llseek(struct file *file, loff_t offset, int whence)
1036 struct mdesc_handle *hp;
1038 switch (whence) {
1039 case SEEK_CUR:
1040 offset += file->f_pos;
1041 break;
1042 case SEEK_SET:
1043 break;
1044 default:
1045 return -EINVAL;
1048 hp = file->private_data;
1049 if (offset > hp->handle_size)
1050 return -EINVAL;
1051 else
1052 file->f_pos = offset;
1054 return offset;
1057 /* mdesc_close() - /dev/mdesc is being closed, release the reference to
1058 * mdesc structure.
1060 static int mdesc_close(struct inode *inode, struct file *file)
1062 mdesc_release(file->private_data);
1063 return 0;
1066 static const struct file_operations mdesc_fops = {
1067 .open = mdesc_open,
1068 .read = mdesc_read,
1069 .llseek = mdesc_llseek,
1070 .release = mdesc_close,
1071 .owner = THIS_MODULE,
1074 static struct miscdevice mdesc_misc = {
1075 .minor = MISC_DYNAMIC_MINOR,
1076 .name = "mdesc",
1077 .fops = &mdesc_fops,
1080 static int __init mdesc_misc_init(void)
1082 return misc_register(&mdesc_misc);
1085 __initcall(mdesc_misc_init);
1087 void __init sun4v_mdesc_init(void)
1089 struct mdesc_handle *hp;
1090 unsigned long len, real_len, status;
1092 (void) sun4v_mach_desc(0UL, 0UL, &len);
1094 printk("MDESC: Size is %lu bytes.\n", len);
1096 hp = mdesc_alloc(len, &memblock_mdesc_ops);
1097 if (hp == NULL) {
1098 prom_printf("MDESC: alloc of %lu bytes failed.\n", len);
1099 prom_halt();
1102 status = sun4v_mach_desc(__pa(&hp->mdesc), len, &real_len);
1103 if (status != HV_EOK || real_len > len) {
1104 prom_printf("sun4v_mach_desc fails, err(%lu), "
1105 "len(%lu), real_len(%lu)\n",
1106 status, len, real_len);
1107 mdesc_free(hp);
1108 prom_halt();
1111 cur_mdesc = hp;
1113 report_platform_properties();