1 // SPDX-License-Identifier: GPL-2.0
2 /* mdesc.c: Sun4V machine description handling.
4 * Copyright (C) 2007, 2008 David S. Miller <davem@davemloft.net>
6 #include <linux/kernel.h>
7 #include <linux/types.h>
8 #include <linux/log2.h>
9 #include <linux/list.h>
10 #include <linux/slab.h>
12 #include <linux/miscdevice.h>
13 #include <linux/memblock.h>
14 #include <linux/export.h>
15 #include <linux/refcount.h>
17 #include <asm/cpudata.h>
18 #include <asm/hypervisor.h>
19 #include <asm/mdesc.h>
21 #include <linux/uaccess.h>
22 #include <asm/oplib.h>
26 /* Unlike the OBP device tree, the machine description is a full-on
27 * DAG. An arbitrary number of ARCs are possible from one
28 * node to other nodes and thus we can't use the OBP device_node
29 * data structure to represent these nodes inside of the kernel.
31 * Actually, it isn't even a DAG, because there are back pointers
32 * which create cycles in the graph.
34 * mdesc_hdr and mdesc_elem describe the layout of the data structure
35 * we get from the Hypervisor.
38 u32 version
; /* Transport version */
39 u32 node_sz
; /* node block size */
40 u32 name_sz
; /* name block size */
41 u32 data_sz
; /* data block size */
42 } __attribute__((aligned(16)));
46 #define MD_LIST_END 0x00
48 #define MD_NODE_END 0x45
50 #define MD_PROP_ARC 0x61
51 #define MD_PROP_VAL 0x76
52 #define MD_PROP_STR 0x73
53 #define MD_PROP_DATA 0x64
66 struct mdesc_mem_ops
{
67 struct mdesc_handle
*(*alloc
)(unsigned int mdesc_size
);
68 void (*free
)(struct mdesc_handle
*handle
);
72 struct list_head list
;
73 struct mdesc_mem_ops
*mops
;
76 unsigned int handle_size
;
77 struct mdesc_hdr mdesc
;
80 typedef int (*mdesc_node_info_get_f
)(struct mdesc_handle
*, u64
,
81 union md_node_info
*);
82 typedef void (*mdesc_node_info_rel_f
)(union md_node_info
*);
83 typedef bool (*mdesc_node_match_f
)(union md_node_info
*, union md_node_info
*);
87 mdesc_node_info_get_f get_info
;
88 mdesc_node_info_rel_f rel_info
;
89 mdesc_node_match_f node_match
;
92 static int get_vdev_port_node_info(struct mdesc_handle
*md
, u64 node
,
93 union md_node_info
*node_info
);
94 static void rel_vdev_port_node_info(union md_node_info
*node_info
);
95 static bool vdev_port_node_match(union md_node_info
*a_node_info
,
96 union md_node_info
*b_node_info
);
98 static int get_ds_port_node_info(struct mdesc_handle
*md
, u64 node
,
99 union md_node_info
*node_info
);
100 static void rel_ds_port_node_info(union md_node_info
*node_info
);
101 static bool ds_port_node_match(union md_node_info
*a_node_info
,
102 union md_node_info
*b_node_info
);
104 /* supported node types which can be registered */
105 static struct md_node_ops md_node_ops_table
[] = {
106 {"virtual-device-port", get_vdev_port_node_info
,
107 rel_vdev_port_node_info
, vdev_port_node_match
},
108 {"domain-services-port", get_ds_port_node_info
,
109 rel_ds_port_node_info
, ds_port_node_match
},
110 {NULL
, NULL
, NULL
, NULL
}
113 static void mdesc_get_node_ops(const char *node_name
,
114 mdesc_node_info_get_f
*get_info_f
,
115 mdesc_node_info_rel_f
*rel_info_f
,
116 mdesc_node_match_f
*match_f
)
132 for (i
= 0; md_node_ops_table
[i
].name
!= NULL
; i
++) {
133 if (strcmp(md_node_ops_table
[i
].name
, node_name
) == 0) {
135 *get_info_f
= md_node_ops_table
[i
].get_info
;
138 *rel_info_f
= md_node_ops_table
[i
].rel_info
;
141 *match_f
= md_node_ops_table
[i
].node_match
;
148 static void mdesc_handle_init(struct mdesc_handle
*hp
,
149 unsigned int handle_size
,
152 BUG_ON(((unsigned long)&hp
->mdesc
) & (16UL - 1));
154 memset(hp
, 0, handle_size
);
155 INIT_LIST_HEAD(&hp
->list
);
156 hp
->self_base
= base
;
157 refcount_set(&hp
->refcnt
, 1);
158 hp
->handle_size
= handle_size
;
161 static struct mdesc_handle
* __init
mdesc_memblock_alloc(unsigned int mdesc_size
)
163 unsigned int handle_size
, alloc_size
;
164 struct mdesc_handle
*hp
;
167 handle_size
= (sizeof(struct mdesc_handle
) -
168 sizeof(struct mdesc_hdr
) +
170 alloc_size
= PAGE_ALIGN(handle_size
);
172 paddr
= memblock_phys_alloc(alloc_size
, PAGE_SIZE
);
177 mdesc_handle_init(hp
, handle_size
, hp
);
182 static void __init
mdesc_memblock_free(struct mdesc_handle
*hp
)
184 unsigned int alloc_size
;
187 BUG_ON(refcount_read(&hp
->refcnt
) != 0);
188 BUG_ON(!list_empty(&hp
->list
));
190 alloc_size
= PAGE_ALIGN(hp
->handle_size
);
192 memblock_free_late(start
, alloc_size
);
195 static struct mdesc_mem_ops memblock_mdesc_ops
= {
196 .alloc
= mdesc_memblock_alloc
,
197 .free
= mdesc_memblock_free
,
200 static struct mdesc_handle
*mdesc_kmalloc(unsigned int mdesc_size
)
202 unsigned int handle_size
;
203 struct mdesc_handle
*hp
;
207 handle_size
= (sizeof(struct mdesc_handle
) -
208 sizeof(struct mdesc_hdr
) +
210 base
= kmalloc(handle_size
+ 15, GFP_KERNEL
| __GFP_RETRY_MAYFAIL
);
214 addr
= (unsigned long)base
;
215 addr
= (addr
+ 15UL) & ~15UL;
216 hp
= (struct mdesc_handle
*) addr
;
218 mdesc_handle_init(hp
, handle_size
, base
);
223 static void mdesc_kfree(struct mdesc_handle
*hp
)
225 BUG_ON(refcount_read(&hp
->refcnt
) != 0);
226 BUG_ON(!list_empty(&hp
->list
));
228 kfree(hp
->self_base
);
231 static struct mdesc_mem_ops kmalloc_mdesc_memops
= {
232 .alloc
= mdesc_kmalloc
,
236 static struct mdesc_handle
*mdesc_alloc(unsigned int mdesc_size
,
237 struct mdesc_mem_ops
*mops
)
239 struct mdesc_handle
*hp
= mops
->alloc(mdesc_size
);
247 static void mdesc_free(struct mdesc_handle
*hp
)
252 static struct mdesc_handle
*cur_mdesc
;
253 static LIST_HEAD(mdesc_zombie_list
);
254 static DEFINE_SPINLOCK(mdesc_lock
);
256 struct mdesc_handle
*mdesc_grab(void)
258 struct mdesc_handle
*hp
;
261 spin_lock_irqsave(&mdesc_lock
, flags
);
264 refcount_inc(&hp
->refcnt
);
265 spin_unlock_irqrestore(&mdesc_lock
, flags
);
269 EXPORT_SYMBOL(mdesc_grab
);
271 void mdesc_release(struct mdesc_handle
*hp
)
275 spin_lock_irqsave(&mdesc_lock
, flags
);
276 if (refcount_dec_and_test(&hp
->refcnt
)) {
277 list_del_init(&hp
->list
);
280 spin_unlock_irqrestore(&mdesc_lock
, flags
);
282 EXPORT_SYMBOL(mdesc_release
);
284 static DEFINE_MUTEX(mdesc_mutex
);
285 static struct mdesc_notifier_client
*client_list
;
287 void mdesc_register_notifier(struct mdesc_notifier_client
*client
)
289 bool supported
= false;
293 mutex_lock(&mdesc_mutex
);
295 /* check to see if the node is supported for registration */
296 for (i
= 0; md_node_ops_table
[i
].name
!= NULL
; i
++) {
297 if (strcmp(md_node_ops_table
[i
].name
, client
->node_name
) == 0) {
304 pr_err("MD: %s node not supported\n", client
->node_name
);
305 mutex_unlock(&mdesc_mutex
);
309 client
->next
= client_list
;
310 client_list
= client
;
312 mdesc_for_each_node_by_name(cur_mdesc
, node
, client
->node_name
)
313 client
->add(cur_mdesc
, node
, client
->node_name
);
315 mutex_unlock(&mdesc_mutex
);
318 static const u64
*parent_cfg_handle(struct mdesc_handle
*hp
, u64 node
)
324 mdesc_for_each_arc(a
, hp
, node
, MDESC_ARC_TYPE_BACK
) {
327 target
= mdesc_arc_target(hp
, a
);
328 id
= mdesc_get_property(hp
, target
,
337 static int get_vdev_port_node_info(struct mdesc_handle
*md
, u64 node
,
338 union md_node_info
*node_info
)
340 const u64
*parent_cfg_hdlp
;
345 * Virtual device nodes are distinguished by:
348 * 3. parent node "cfg-handle" property
350 idp
= mdesc_get_property(md
, node
, "id", NULL
);
351 name
= mdesc_get_property(md
, node
, "name", NULL
);
352 parent_cfg_hdlp
= parent_cfg_handle(md
, node
);
354 if (!idp
|| !name
|| !parent_cfg_hdlp
)
357 node_info
->vdev_port
.id
= *idp
;
358 node_info
->vdev_port
.name
= kstrdup_const(name
, GFP_KERNEL
);
359 if (!node_info
->vdev_port
.name
)
361 node_info
->vdev_port
.parent_cfg_hdl
= *parent_cfg_hdlp
;
366 static void rel_vdev_port_node_info(union md_node_info
*node_info
)
368 if (node_info
&& node_info
->vdev_port
.name
) {
369 kfree_const(node_info
->vdev_port
.name
);
370 node_info
->vdev_port
.name
= NULL
;
374 static bool vdev_port_node_match(union md_node_info
*a_node_info
,
375 union md_node_info
*b_node_info
)
377 if (a_node_info
->vdev_port
.id
!= b_node_info
->vdev_port
.id
)
380 if (a_node_info
->vdev_port
.parent_cfg_hdl
!=
381 b_node_info
->vdev_port
.parent_cfg_hdl
)
384 if (strncmp(a_node_info
->vdev_port
.name
,
385 b_node_info
->vdev_port
.name
, MDESC_MAX_STR_LEN
) != 0)
391 static int get_ds_port_node_info(struct mdesc_handle
*md
, u64 node
,
392 union md_node_info
*node_info
)
396 /* DS port nodes use the "id" property to distinguish them */
397 idp
= mdesc_get_property(md
, node
, "id", NULL
);
401 node_info
->ds_port
.id
= *idp
;
406 static void rel_ds_port_node_info(union md_node_info
*node_info
)
410 static bool ds_port_node_match(union md_node_info
*a_node_info
,
411 union md_node_info
*b_node_info
)
413 if (a_node_info
->ds_port
.id
!= b_node_info
->ds_port
.id
)
419 /* Run 'func' on nodes which are in A but not in B. */
420 static void invoke_on_missing(const char *name
,
421 struct mdesc_handle
*a
,
422 struct mdesc_handle
*b
,
423 void (*func
)(struct mdesc_handle
*, u64
,
424 const char *node_name
))
426 mdesc_node_info_get_f get_info_func
;
427 mdesc_node_info_rel_f rel_info_func
;
428 mdesc_node_match_f node_match_func
;
429 union md_node_info a_node_info
;
430 union md_node_info b_node_info
;
437 * Find the get_info, rel_info and node_match ops for the given
440 mdesc_get_node_ops(name
, &get_info_func
, &rel_info_func
,
443 /* If we didn't find a match, the node type is not supported */
444 if (!get_info_func
|| !rel_info_func
|| !node_match_func
) {
445 pr_err("MD: %s node type is not supported\n", name
);
449 mdesc_for_each_node_by_name(a
, a_node
, name
) {
452 rv
= get_info_func(a
, a_node
, &a_node_info
);
454 pr_err("MD: Cannot find 1 or more required match properties for %s node.\n",
459 /* Check each node in B for node matching a_node */
460 mdesc_for_each_node_by_name(b
, b_node
, name
) {
461 rv
= get_info_func(b
, b_node
, &b_node_info
);
465 if (node_match_func(&a_node_info
, &b_node_info
)) {
467 rel_info_func(&b_node_info
);
471 rel_info_func(&b_node_info
);
474 rel_info_func(&a_node_info
);
477 func(a
, a_node
, name
);
481 static void notify_one(struct mdesc_notifier_client
*p
,
482 struct mdesc_handle
*old_hp
,
483 struct mdesc_handle
*new_hp
)
485 invoke_on_missing(p
->node_name
, old_hp
, new_hp
, p
->remove
);
486 invoke_on_missing(p
->node_name
, new_hp
, old_hp
, p
->add
);
489 static void mdesc_notify_clients(struct mdesc_handle
*old_hp
,
490 struct mdesc_handle
*new_hp
)
492 struct mdesc_notifier_client
*p
= client_list
;
495 notify_one(p
, old_hp
, new_hp
);
500 void mdesc_update(void)
502 unsigned long len
, real_len
, status
;
503 struct mdesc_handle
*hp
, *orig_hp
;
506 mutex_lock(&mdesc_mutex
);
508 (void) sun4v_mach_desc(0UL, 0UL, &len
);
510 hp
= mdesc_alloc(len
, &kmalloc_mdesc_memops
);
512 printk(KERN_ERR
"MD: mdesc alloc fails\n");
516 status
= sun4v_mach_desc(__pa(&hp
->mdesc
), len
, &real_len
);
517 if (status
!= HV_EOK
|| real_len
> len
) {
518 printk(KERN_ERR
"MD: mdesc reread fails with %lu\n",
520 refcount_dec(&hp
->refcnt
);
525 spin_lock_irqsave(&mdesc_lock
, flags
);
528 spin_unlock_irqrestore(&mdesc_lock
, flags
);
530 mdesc_notify_clients(orig_hp
, hp
);
532 spin_lock_irqsave(&mdesc_lock
, flags
);
533 if (refcount_dec_and_test(&orig_hp
->refcnt
))
536 list_add(&orig_hp
->list
, &mdesc_zombie_list
);
537 spin_unlock_irqrestore(&mdesc_lock
, flags
);
540 mutex_unlock(&mdesc_mutex
);
543 u64
mdesc_get_node(struct mdesc_handle
*hp
, const char *node_name
,
544 union md_node_info
*node_info
)
546 mdesc_node_info_get_f get_info_func
;
547 mdesc_node_info_rel_f rel_info_func
;
548 mdesc_node_match_f node_match_func
;
549 union md_node_info hp_node_info
;
553 if (hp
== NULL
|| node_name
== NULL
|| node_info
== NULL
)
554 return MDESC_NODE_NULL
;
556 /* Find the ops for the given node name */
557 mdesc_get_node_ops(node_name
, &get_info_func
, &rel_info_func
,
560 /* If we didn't find ops for the given node name, it is not supported */
561 if (!get_info_func
|| !rel_info_func
|| !node_match_func
) {
562 pr_err("MD: %s node is not supported\n", node_name
);
566 mdesc_for_each_node_by_name(hp
, hp_node
, node_name
) {
567 rv
= get_info_func(hp
, hp_node
, &hp_node_info
);
571 if (node_match_func(node_info
, &hp_node_info
))
574 rel_info_func(&hp_node_info
);
577 rel_info_func(&hp_node_info
);
581 EXPORT_SYMBOL(mdesc_get_node
);
583 int mdesc_get_node_info(struct mdesc_handle
*hp
, u64 node
,
584 const char *node_name
, union md_node_info
*node_info
)
586 mdesc_node_info_get_f get_info_func
;
589 if (hp
== NULL
|| node
== MDESC_NODE_NULL
||
590 node_name
== NULL
|| node_info
== NULL
)
593 /* Find the get_info op for the given node name */
594 mdesc_get_node_ops(node_name
, &get_info_func
, NULL
, NULL
);
596 /* If we didn't find a get_info_func, the node name is not supported */
597 if (get_info_func
== NULL
) {
598 pr_err("MD: %s node is not supported\n", node_name
);
602 rv
= get_info_func(hp
, node
, node_info
);
604 pr_err("MD: Cannot find 1 or more required match properties for %s node.\n",
611 EXPORT_SYMBOL(mdesc_get_node_info
);
613 static struct mdesc_elem
*node_block(struct mdesc_hdr
*mdesc
)
615 return (struct mdesc_elem
*) (mdesc
+ 1);
618 static void *name_block(struct mdesc_hdr
*mdesc
)
620 return ((void *) node_block(mdesc
)) + mdesc
->node_sz
;
623 static void *data_block(struct mdesc_hdr
*mdesc
)
625 return ((void *) name_block(mdesc
)) + mdesc
->name_sz
;
628 u64
mdesc_node_by_name(struct mdesc_handle
*hp
,
629 u64 from_node
, const char *name
)
631 struct mdesc_elem
*ep
= node_block(&hp
->mdesc
);
632 const char *names
= name_block(&hp
->mdesc
);
633 u64 last_node
= hp
->mdesc
.node_sz
/ 16;
636 if (from_node
== MDESC_NODE_NULL
) {
638 } else if (from_node
>= last_node
) {
639 return MDESC_NODE_NULL
;
641 ret
= ep
[from_node
].d
.val
;
644 while (ret
< last_node
) {
645 if (ep
[ret
].tag
!= MD_NODE
)
646 return MDESC_NODE_NULL
;
647 if (!strcmp(names
+ ep
[ret
].name_offset
, name
))
651 if (ret
>= last_node
)
652 ret
= MDESC_NODE_NULL
;
655 EXPORT_SYMBOL(mdesc_node_by_name
);
657 const void *mdesc_get_property(struct mdesc_handle
*hp
, u64 node
,
658 const char *name
, int *lenp
)
660 const char *names
= name_block(&hp
->mdesc
);
661 u64 last_node
= hp
->mdesc
.node_sz
/ 16;
662 void *data
= data_block(&hp
->mdesc
);
663 struct mdesc_elem
*ep
;
665 if (node
== MDESC_NODE_NULL
|| node
>= last_node
)
668 ep
= node_block(&hp
->mdesc
) + node
;
670 for (; ep
->tag
!= MD_NODE_END
; ep
++) {
682 val
= data
+ ep
->d
.data
.data_offset
;
683 len
= ep
->d
.data
.data_len
;
692 if (!strcmp(names
+ ep
->name_offset
, name
)) {
701 EXPORT_SYMBOL(mdesc_get_property
);
703 u64
mdesc_next_arc(struct mdesc_handle
*hp
, u64 from
, const char *arc_type
)
705 struct mdesc_elem
*ep
, *base
= node_block(&hp
->mdesc
);
706 const char *names
= name_block(&hp
->mdesc
);
707 u64 last_node
= hp
->mdesc
.node_sz
/ 16;
709 if (from
== MDESC_NODE_NULL
|| from
>= last_node
)
710 return MDESC_NODE_NULL
;
715 for (; ep
->tag
!= MD_NODE_END
; ep
++) {
716 if (ep
->tag
!= MD_PROP_ARC
)
719 if (strcmp(names
+ ep
->name_offset
, arc_type
))
725 return MDESC_NODE_NULL
;
727 EXPORT_SYMBOL(mdesc_next_arc
);
729 u64
mdesc_arc_target(struct mdesc_handle
*hp
, u64 arc
)
731 struct mdesc_elem
*ep
, *base
= node_block(&hp
->mdesc
);
737 EXPORT_SYMBOL(mdesc_arc_target
);
739 const char *mdesc_node_name(struct mdesc_handle
*hp
, u64 node
)
741 struct mdesc_elem
*ep
, *base
= node_block(&hp
->mdesc
);
742 const char *names
= name_block(&hp
->mdesc
);
743 u64 last_node
= hp
->mdesc
.node_sz
/ 16;
745 if (node
== MDESC_NODE_NULL
|| node
>= last_node
)
749 if (ep
->tag
!= MD_NODE
)
752 return names
+ ep
->name_offset
;
754 EXPORT_SYMBOL(mdesc_node_name
);
756 static u64 max_cpus
= 64;
758 static void __init
report_platform_properties(void)
760 struct mdesc_handle
*hp
= mdesc_grab();
761 u64 pn
= mdesc_node_by_name(hp
, MDESC_NODE_NULL
, "platform");
765 if (pn
== MDESC_NODE_NULL
) {
766 prom_printf("No platform node in machine-description.\n");
770 s
= mdesc_get_property(hp
, pn
, "banner-name", NULL
);
771 printk("PLATFORM: banner-name [%s]\n", s
);
772 s
= mdesc_get_property(hp
, pn
, "name", NULL
);
773 printk("PLATFORM: name [%s]\n", s
);
775 v
= mdesc_get_property(hp
, pn
, "hostid", NULL
);
777 printk("PLATFORM: hostid [%08llx]\n", *v
);
778 v
= mdesc_get_property(hp
, pn
, "serial#", NULL
);
780 printk("PLATFORM: serial# [%08llx]\n", *v
);
781 v
= mdesc_get_property(hp
, pn
, "stick-frequency", NULL
);
782 printk("PLATFORM: stick-frequency [%08llx]\n", *v
);
783 v
= mdesc_get_property(hp
, pn
, "mac-address", NULL
);
785 printk("PLATFORM: mac-address [%llx]\n", *v
);
786 v
= mdesc_get_property(hp
, pn
, "watchdog-resolution", NULL
);
788 printk("PLATFORM: watchdog-resolution [%llu ms]\n", *v
);
789 v
= mdesc_get_property(hp
, pn
, "watchdog-max-timeout", NULL
);
791 printk("PLATFORM: watchdog-max-timeout [%llu ms]\n", *v
);
792 v
= mdesc_get_property(hp
, pn
, "max-cpus", NULL
);
795 printk("PLATFORM: max-cpus [%llu]\n", max_cpus
);
804 if (max_cpu
> NR_CPUS
)
809 for (i
= 0; i
< max_cpu
; i
++)
810 set_cpu_possible(i
, true);
817 static void fill_in_one_cache(cpuinfo_sparc
*c
, struct mdesc_handle
*hp
, u64 mp
)
819 const u64
*level
= mdesc_get_property(hp
, mp
, "level", NULL
);
820 const u64
*size
= mdesc_get_property(hp
, mp
, "size", NULL
);
821 const u64
*line_size
= mdesc_get_property(hp
, mp
, "line-size", NULL
);
825 type
= mdesc_get_property(hp
, mp
, "type", &type_len
);
829 if (of_find_in_proplist(type
, "instn", type_len
)) {
830 c
->icache_size
= *size
;
831 c
->icache_line_size
= *line_size
;
832 } else if (of_find_in_proplist(type
, "data", type_len
)) {
833 c
->dcache_size
= *size
;
834 c
->dcache_line_size
= *line_size
;
839 c
->ecache_size
= *size
;
840 c
->ecache_line_size
= *line_size
;
850 mdesc_for_each_arc(a
, hp
, mp
, MDESC_ARC_TYPE_FWD
) {
851 u64 target
= mdesc_arc_target(hp
, a
);
852 const char *name
= mdesc_node_name(hp
, target
);
854 if (!strcmp(name
, "cache"))
855 fill_in_one_cache(c
, hp
, target
);
860 static void find_back_node_value(struct mdesc_handle
*hp
, u64 node
,
862 void (*func
)(struct mdesc_handle
*, u64
, int),
867 /* Since we have an estimate of recursion depth, do a sanity check. */
871 mdesc_for_each_arc(arc
, hp
, node
, MDESC_ARC_TYPE_BACK
) {
872 u64 n
= mdesc_arc_target(hp
, arc
);
873 const char *name
= mdesc_node_name(hp
, n
);
875 if (!strcmp(srch_val
, name
))
878 find_back_node_value(hp
, n
, srch_val
, func
, val
, depth
-1);
882 static void __mark_core_id(struct mdesc_handle
*hp
, u64 node
,
885 const u64
*id
= mdesc_get_property(hp
, node
, "id", NULL
);
887 if (*id
< num_possible_cpus())
888 cpu_data(*id
).core_id
= core_id
;
891 static void __mark_max_cache_id(struct mdesc_handle
*hp
, u64 node
,
894 const u64
*id
= mdesc_get_property(hp
, node
, "id", NULL
);
896 if (*id
< num_possible_cpus()) {
897 cpu_data(*id
).max_cache_id
= max_cache_id
;
900 * On systems without explicit socket descriptions socket
903 cpu_data(*id
).sock_id
= max_cache_id
;
907 static void mark_core_ids(struct mdesc_handle
*hp
, u64 mp
,
910 find_back_node_value(hp
, mp
, "cpu", __mark_core_id
, core_id
, 10);
913 static void mark_max_cache_ids(struct mdesc_handle
*hp
, u64 mp
,
916 find_back_node_value(hp
, mp
, "cpu", __mark_max_cache_id
,
920 static void set_core_ids(struct mdesc_handle
*hp
)
927 /* Identify unique cores by looking for cpus backpointed to by
928 * level 1 instruction caches.
930 mdesc_for_each_node_by_name(hp
, mp
, "cache") {
935 level
= mdesc_get_property(hp
, mp
, "level", NULL
);
939 type
= mdesc_get_property(hp
, mp
, "type", &len
);
940 if (!of_find_in_proplist(type
, "instn", len
))
943 mark_core_ids(hp
, mp
, idx
);
948 static int set_max_cache_ids_by_cache(struct mdesc_handle
*hp
, int level
)
955 * Identify unique highest level of shared cache by looking for cpus
956 * backpointed to by shared level N caches.
958 mdesc_for_each_node_by_name(hp
, mp
, "cache") {
961 cur_lvl
= mdesc_get_property(hp
, mp
, "level", NULL
);
962 if (*cur_lvl
!= level
)
964 mark_max_cache_ids(hp
, mp
, idx
);
971 static void set_sock_ids_by_socket(struct mdesc_handle
*hp
, u64 mp
)
975 mdesc_for_each_node_by_name(hp
, mp
, "socket") {
978 mdesc_for_each_arc(a
, hp
, mp
, MDESC_ARC_TYPE_FWD
) {
979 u64 t
= mdesc_arc_target(hp
, a
);
983 name
= mdesc_node_name(hp
, t
);
984 if (strcmp(name
, "cpu"))
987 id
= mdesc_get_property(hp
, t
, "id", NULL
);
988 if (*id
< num_possible_cpus())
989 cpu_data(*id
).sock_id
= idx
;
995 static void set_sock_ids(struct mdesc_handle
*hp
)
1000 * Find the highest level of shared cache which pre-T7 is also
1003 if (!set_max_cache_ids_by_cache(hp
, 3))
1004 set_max_cache_ids_by_cache(hp
, 2);
1006 /* If machine description exposes sockets data use it.*/
1007 mp
= mdesc_node_by_name(hp
, MDESC_NODE_NULL
, "sockets");
1008 if (mp
!= MDESC_NODE_NULL
)
1009 set_sock_ids_by_socket(hp
, mp
);
1012 static void mark_proc_ids(struct mdesc_handle
*hp
, u64 mp
, int proc_id
)
1016 mdesc_for_each_arc(a
, hp
, mp
, MDESC_ARC_TYPE_BACK
) {
1017 u64 t
= mdesc_arc_target(hp
, a
);
1021 name
= mdesc_node_name(hp
, t
);
1022 if (strcmp(name
, "cpu"))
1025 id
= mdesc_get_property(hp
, t
, "id", NULL
);
1027 cpu_data(*id
).proc_id
= proc_id
;
1031 static void __set_proc_ids(struct mdesc_handle
*hp
, const char *exec_unit_name
)
1037 mdesc_for_each_node_by_name(hp
, mp
, exec_unit_name
) {
1041 type
= mdesc_get_property(hp
, mp
, "type", &len
);
1042 if (!of_find_in_proplist(type
, "int", len
) &&
1043 !of_find_in_proplist(type
, "integer", len
))
1046 mark_proc_ids(hp
, mp
, idx
);
1051 static void set_proc_ids(struct mdesc_handle
*hp
)
1053 __set_proc_ids(hp
, "exec_unit");
1054 __set_proc_ids(hp
, "exec-unit");
1057 static void get_one_mondo_bits(const u64
*p
, unsigned int *mask
,
1058 unsigned long def
, unsigned long max
)
1066 if (!val
|| val
>= 64)
1072 *mask
= ((1U << val
) * 64U) - 1U;
1076 *mask
= ((1U << def
) * 64U) - 1U;
1079 static void get_mondo_data(struct mdesc_handle
*hp
, u64 mp
,
1080 struct trap_per_cpu
*tb
)
1085 val
= mdesc_get_property(hp
, mp
, "q-cpu-mondo-#bits", NULL
);
1086 get_one_mondo_bits(val
, &tb
->cpu_mondo_qmask
, 7, ilog2(max_cpus
* 2));
1088 val
= mdesc_get_property(hp
, mp
, "q-dev-mondo-#bits", NULL
);
1089 get_one_mondo_bits(val
, &tb
->dev_mondo_qmask
, 7, 8);
1091 val
= mdesc_get_property(hp
, mp
, "q-resumable-#bits", NULL
);
1092 get_one_mondo_bits(val
, &tb
->resum_qmask
, 6, 7);
1094 val
= mdesc_get_property(hp
, mp
, "q-nonresumable-#bits", NULL
);
1095 get_one_mondo_bits(val
, &tb
->nonresum_qmask
, 2, 2);
1097 pr_info("SUN4V: Mondo queue sizes "
1098 "[cpu(%u) dev(%u) r(%u) nr(%u)]\n",
1099 tb
->cpu_mondo_qmask
+ 1,
1100 tb
->dev_mondo_qmask
+ 1,
1101 tb
->resum_qmask
+ 1,
1102 tb
->nonresum_qmask
+ 1);
1106 static void *mdesc_iterate_over_cpus(void *(*func
)(struct mdesc_handle
*, u64
, int, void *), void *arg
, cpumask_t
*mask
)
1108 struct mdesc_handle
*hp
= mdesc_grab();
1112 mdesc_for_each_node_by_name(hp
, mp
, "cpu") {
1113 const u64
*id
= mdesc_get_property(hp
, mp
, "id", NULL
);
1117 if (cpuid
>= NR_CPUS
) {
1118 printk(KERN_WARNING
"Ignoring CPU %d which is "
1119 ">= NR_CPUS (%d)\n",
1123 if (!cpumask_test_cpu(cpuid
, mask
))
1127 ret
= func(hp
, mp
, cpuid
, arg
);
1136 static void *record_one_cpu(struct mdesc_handle
*hp
, u64 mp
, int cpuid
,
1141 set_cpu_present(cpuid
, true);
1146 void mdesc_populate_present_mask(cpumask_t
*mask
)
1148 if (tlb_type
!= hypervisor
)
1152 mdesc_iterate_over_cpus(record_one_cpu
, NULL
, mask
);
1155 static void * __init
check_one_pgsz(struct mdesc_handle
*hp
, u64 mp
, int cpuid
, void *arg
)
1157 const u64
*pgsz_prop
= mdesc_get_property(hp
, mp
, "mmu-page-size-list", NULL
);
1158 unsigned long *pgsz_mask
= arg
;
1161 val
= (HV_PGSZ_MASK_8K
| HV_PGSZ_MASK_64K
|
1162 HV_PGSZ_MASK_512K
| HV_PGSZ_MASK_4MB
);
1173 void __init
mdesc_get_page_sizes(cpumask_t
*mask
, unsigned long *pgsz_mask
)
1176 mdesc_iterate_over_cpus(check_one_pgsz
, pgsz_mask
, mask
);
1179 static void *fill_in_one_cpu(struct mdesc_handle
*hp
, u64 mp
, int cpuid
,
1182 const u64
*cfreq
= mdesc_get_property(hp
, mp
, "clock-frequency", NULL
);
1183 struct trap_per_cpu
*tb
;
1188 /* On uniprocessor we only want the values for the
1189 * real physical cpu the kernel booted onto, however
1190 * cpu_data() only has one entry at index 0.
1192 if (cpuid
!= real_hard_smp_processor_id())
1197 c
= &cpu_data(cpuid
);
1198 c
->clock_tick
= *cfreq
;
1200 tb
= &trap_block
[cpuid
];
1201 get_mondo_data(hp
, mp
, tb
);
1203 mdesc_for_each_arc(a
, hp
, mp
, MDESC_ARC_TYPE_FWD
) {
1204 u64 j
, t
= mdesc_arc_target(hp
, a
);
1207 t_name
= mdesc_node_name(hp
, t
);
1208 if (!strcmp(t_name
, "cache")) {
1209 fill_in_one_cache(c
, hp
, t
);
1213 mdesc_for_each_arc(j
, hp
, t
, MDESC_ARC_TYPE_FWD
) {
1214 u64 n
= mdesc_arc_target(hp
, j
);
1217 n_name
= mdesc_node_name(hp
, n
);
1218 if (!strcmp(n_name
, "cache"))
1219 fill_in_one_cache(c
, hp
, n
);
1229 void mdesc_fill_in_cpu_data(cpumask_t
*mask
)
1231 struct mdesc_handle
*hp
;
1233 mdesc_iterate_over_cpus(fill_in_one_cpu
, NULL
, mask
);
1243 smp_fill_in_sib_core_maps();
1246 /* mdesc_open() - Grab a reference to mdesc_handle when /dev/mdesc is
1247 * opened. Hold this reference until /dev/mdesc is closed to ensure
1248 * mdesc data structure is not released underneath us. Store the
1249 * pointer to mdesc structure in private_data for read and seek to use
1251 static int mdesc_open(struct inode
*inode
, struct file
*file
)
1253 struct mdesc_handle
*hp
= mdesc_grab();
1258 file
->private_data
= hp
;
1263 static ssize_t
mdesc_read(struct file
*file
, char __user
*buf
,
1264 size_t len
, loff_t
*offp
)
1266 struct mdesc_handle
*hp
= file
->private_data
;
1267 unsigned char *mdesc
;
1268 int bytes_left
, count
= len
;
1270 if (*offp
>= hp
->handle_size
)
1273 bytes_left
= hp
->handle_size
- *offp
;
1274 if (count
> bytes_left
)
1277 mdesc
= (unsigned char *)&hp
->mdesc
;
1279 if (!copy_to_user(buf
, mdesc
, count
)) {
1287 static loff_t
mdesc_llseek(struct file
*file
, loff_t offset
, int whence
)
1289 struct mdesc_handle
*hp
= file
->private_data
;
1291 return no_seek_end_llseek_size(file
, offset
, whence
, hp
->handle_size
);
1294 /* mdesc_close() - /dev/mdesc is being closed, release the reference to
1297 static int mdesc_close(struct inode
*inode
, struct file
*file
)
1299 mdesc_release(file
->private_data
);
1303 static const struct file_operations mdesc_fops
= {
1306 .llseek
= mdesc_llseek
,
1307 .release
= mdesc_close
,
1308 .owner
= THIS_MODULE
,
1311 static struct miscdevice mdesc_misc
= {
1312 .minor
= MISC_DYNAMIC_MINOR
,
1314 .fops
= &mdesc_fops
,
1317 static int __init
mdesc_misc_init(void)
1319 return misc_register(&mdesc_misc
);
1322 __initcall(mdesc_misc_init
);
1324 void __init
sun4v_mdesc_init(void)
1326 struct mdesc_handle
*hp
;
1327 unsigned long len
, real_len
, status
;
1329 (void) sun4v_mach_desc(0UL, 0UL, &len
);
1331 printk("MDESC: Size is %lu bytes.\n", len
);
1333 hp
= mdesc_alloc(len
, &memblock_mdesc_ops
);
1335 prom_printf("MDESC: alloc of %lu bytes failed.\n", len
);
1339 status
= sun4v_mach_desc(__pa(&hp
->mdesc
), len
, &real_len
);
1340 if (status
!= HV_EOK
|| real_len
> len
) {
1341 prom_printf("sun4v_mach_desc fails, err(%lu), "
1342 "len(%lu), real_len(%lu)\n",
1343 status
, len
, real_len
);
1351 report_platform_properties();