1 /* mdesc.c: Sun4V machine description handling.
3 * Copyright (C) 2007, 2008 David S. Miller <davem@davemloft.net>
5 #include <linux/kernel.h>
6 #include <linux/types.h>
7 #include <linux/memblock.h>
8 #include <linux/log2.h>
9 #include <linux/list.h>
10 #include <linux/slab.h>
12 #include <linux/miscdevice.h>
13 #include <linux/bootmem.h>
14 #include <linux/export.h>
16 #include <asm/cpudata.h>
17 #include <asm/hypervisor.h>
18 #include <asm/mdesc.h>
20 #include <asm/uaccess.h>
21 #include <asm/oplib.h>
24 /* Unlike the OBP device tree, the machine description is a full-on
25 * DAG. An arbitrary number of ARCs are possible from one
26 * node to other nodes and thus we can't use the OBP device_node
27 * data structure to represent these nodes inside of the kernel.
29 * Actually, it isn't even a DAG, because there are back pointers
30 * which create cycles in the graph.
32 * mdesc_hdr and mdesc_elem describe the layout of the data structure
33 * we get from the Hypervisor.
36 u32 version
; /* Transport version */
37 u32 node_sz
; /* node block size */
38 u32 name_sz
; /* name block size */
39 u32 data_sz
; /* data block size */
40 } __attribute__((aligned(16)));
44 #define MD_LIST_END 0x00
46 #define MD_NODE_END 0x45
48 #define MD_PROP_ARC 0x61
49 #define MD_PROP_VAL 0x76
50 #define MD_PROP_STR 0x73
51 #define MD_PROP_DATA 0x64
64 struct mdesc_mem_ops
{
65 struct mdesc_handle
*(*alloc
)(unsigned int mdesc_size
);
66 void (*free
)(struct mdesc_handle
*handle
);
70 struct list_head list
;
71 struct mdesc_mem_ops
*mops
;
74 unsigned int handle_size
;
75 struct mdesc_hdr mdesc
;
78 static void mdesc_handle_init(struct mdesc_handle
*hp
,
79 unsigned int handle_size
,
82 BUG_ON(((unsigned long)&hp
->mdesc
) & (16UL - 1));
84 memset(hp
, 0, handle_size
);
85 INIT_LIST_HEAD(&hp
->list
);
87 atomic_set(&hp
->refcnt
, 1);
88 hp
->handle_size
= handle_size
;
91 static struct mdesc_handle
* __init
mdesc_memblock_alloc(unsigned int mdesc_size
)
93 unsigned int handle_size
, alloc_size
;
94 struct mdesc_handle
*hp
;
97 handle_size
= (sizeof(struct mdesc_handle
) -
98 sizeof(struct mdesc_hdr
) +
100 alloc_size
= PAGE_ALIGN(handle_size
);
102 paddr
= memblock_alloc(alloc_size
, PAGE_SIZE
);
107 mdesc_handle_init(hp
, handle_size
, hp
);
112 static void __init
mdesc_memblock_free(struct mdesc_handle
*hp
)
114 unsigned int alloc_size
;
117 BUG_ON(atomic_read(&hp
->refcnt
) != 0);
118 BUG_ON(!list_empty(&hp
->list
));
120 alloc_size
= PAGE_ALIGN(hp
->handle_size
);
122 free_bootmem_late(start
, alloc_size
);
125 static struct mdesc_mem_ops memblock_mdesc_ops
= {
126 .alloc
= mdesc_memblock_alloc
,
127 .free
= mdesc_memblock_free
,
130 static struct mdesc_handle
*mdesc_kmalloc(unsigned int mdesc_size
)
132 unsigned int handle_size
;
133 struct mdesc_handle
*hp
;
137 handle_size
= (sizeof(struct mdesc_handle
) -
138 sizeof(struct mdesc_hdr
) +
142 * Allocation has to succeed because mdesc update would be missed
143 * and such events are not retransmitted.
145 base
= kmalloc(handle_size
+ 15, GFP_KERNEL
| __GFP_NOFAIL
);
146 addr
= (unsigned long)base
;
147 addr
= (addr
+ 15UL) & ~15UL;
148 hp
= (struct mdesc_handle
*) addr
;
150 mdesc_handle_init(hp
, handle_size
, base
);
155 static void mdesc_kfree(struct mdesc_handle
*hp
)
157 BUG_ON(atomic_read(&hp
->refcnt
) != 0);
158 BUG_ON(!list_empty(&hp
->list
));
160 kfree(hp
->self_base
);
163 static struct mdesc_mem_ops kmalloc_mdesc_memops
= {
164 .alloc
= mdesc_kmalloc
,
168 static struct mdesc_handle
*mdesc_alloc(unsigned int mdesc_size
,
169 struct mdesc_mem_ops
*mops
)
171 struct mdesc_handle
*hp
= mops
->alloc(mdesc_size
);
179 static void mdesc_free(struct mdesc_handle
*hp
)
184 static struct mdesc_handle
*cur_mdesc
;
185 static LIST_HEAD(mdesc_zombie_list
);
186 static DEFINE_SPINLOCK(mdesc_lock
);
188 struct mdesc_handle
*mdesc_grab(void)
190 struct mdesc_handle
*hp
;
193 spin_lock_irqsave(&mdesc_lock
, flags
);
196 atomic_inc(&hp
->refcnt
);
197 spin_unlock_irqrestore(&mdesc_lock
, flags
);
201 EXPORT_SYMBOL(mdesc_grab
);
203 void mdesc_release(struct mdesc_handle
*hp
)
207 spin_lock_irqsave(&mdesc_lock
, flags
);
208 if (atomic_dec_and_test(&hp
->refcnt
)) {
209 list_del_init(&hp
->list
);
212 spin_unlock_irqrestore(&mdesc_lock
, flags
);
214 EXPORT_SYMBOL(mdesc_release
);
216 static DEFINE_MUTEX(mdesc_mutex
);
217 static struct mdesc_notifier_client
*client_list
;
219 void mdesc_register_notifier(struct mdesc_notifier_client
*client
)
223 mutex_lock(&mdesc_mutex
);
224 client
->next
= client_list
;
225 client_list
= client
;
227 mdesc_for_each_node_by_name(cur_mdesc
, node
, client
->node_name
)
228 client
->add(cur_mdesc
, node
);
230 mutex_unlock(&mdesc_mutex
);
233 static const u64
*parent_cfg_handle(struct mdesc_handle
*hp
, u64 node
)
239 mdesc_for_each_arc(a
, hp
, node
, MDESC_ARC_TYPE_BACK
) {
242 target
= mdesc_arc_target(hp
, a
);
243 id
= mdesc_get_property(hp
, target
,
252 /* Run 'func' on nodes which are in A but not in B. */
253 static void invoke_on_missing(const char *name
,
254 struct mdesc_handle
*a
,
255 struct mdesc_handle
*b
,
256 void (*func
)(struct mdesc_handle
*, u64
))
260 mdesc_for_each_node_by_name(a
, node
, name
) {
261 int found
= 0, is_vdc_port
= 0;
262 const char *name_prop
;
266 name_prop
= mdesc_get_property(a
, node
, "name", NULL
);
267 if (name_prop
&& !strcmp(name_prop
, "vdc-port")) {
269 id
= parent_cfg_handle(a
, node
);
271 id
= mdesc_get_property(a
, node
, "id", NULL
);
274 printk(KERN_ERR
"MD: Cannot find ID for %s node.\n",
275 (name_prop
? name_prop
: name
));
279 mdesc_for_each_node_by_name(b
, fnode
, name
) {
283 name_prop
= mdesc_get_property(b
, fnode
,
286 strcmp(name_prop
, "vdc-port"))
288 fid
= parent_cfg_handle(b
, fnode
);
290 printk(KERN_ERR
"MD: Cannot find ID "
291 "for vdc-port node.\n");
295 fid
= mdesc_get_property(b
, fnode
,
308 static void notify_one(struct mdesc_notifier_client
*p
,
309 struct mdesc_handle
*old_hp
,
310 struct mdesc_handle
*new_hp
)
312 invoke_on_missing(p
->node_name
, old_hp
, new_hp
, p
->remove
);
313 invoke_on_missing(p
->node_name
, new_hp
, old_hp
, p
->add
);
316 static void mdesc_notify_clients(struct mdesc_handle
*old_hp
,
317 struct mdesc_handle
*new_hp
)
319 struct mdesc_notifier_client
*p
= client_list
;
322 notify_one(p
, old_hp
, new_hp
);
327 void mdesc_update(void)
329 unsigned long len
, real_len
, status
;
330 struct mdesc_handle
*hp
, *orig_hp
;
333 mutex_lock(&mdesc_mutex
);
335 (void) sun4v_mach_desc(0UL, 0UL, &len
);
337 hp
= mdesc_alloc(len
, &kmalloc_mdesc_memops
);
339 printk(KERN_ERR
"MD: mdesc alloc fails\n");
343 status
= sun4v_mach_desc(__pa(&hp
->mdesc
), len
, &real_len
);
344 if (status
!= HV_EOK
|| real_len
> len
) {
345 printk(KERN_ERR
"MD: mdesc reread fails with %lu\n",
347 atomic_dec(&hp
->refcnt
);
352 spin_lock_irqsave(&mdesc_lock
, flags
);
355 spin_unlock_irqrestore(&mdesc_lock
, flags
);
357 mdesc_notify_clients(orig_hp
, hp
);
359 spin_lock_irqsave(&mdesc_lock
, flags
);
360 if (atomic_dec_and_test(&orig_hp
->refcnt
))
363 list_add(&orig_hp
->list
, &mdesc_zombie_list
);
364 spin_unlock_irqrestore(&mdesc_lock
, flags
);
367 mutex_unlock(&mdesc_mutex
);
370 static struct mdesc_elem
*node_block(struct mdesc_hdr
*mdesc
)
372 return (struct mdesc_elem
*) (mdesc
+ 1);
375 static void *name_block(struct mdesc_hdr
*mdesc
)
377 return ((void *) node_block(mdesc
)) + mdesc
->node_sz
;
380 static void *data_block(struct mdesc_hdr
*mdesc
)
382 return ((void *) name_block(mdesc
)) + mdesc
->name_sz
;
385 u64
mdesc_node_by_name(struct mdesc_handle
*hp
,
386 u64 from_node
, const char *name
)
388 struct mdesc_elem
*ep
= node_block(&hp
->mdesc
);
389 const char *names
= name_block(&hp
->mdesc
);
390 u64 last_node
= hp
->mdesc
.node_sz
/ 16;
393 if (from_node
== MDESC_NODE_NULL
) {
395 } else if (from_node
>= last_node
) {
396 return MDESC_NODE_NULL
;
398 ret
= ep
[from_node
].d
.val
;
401 while (ret
< last_node
) {
402 if (ep
[ret
].tag
!= MD_NODE
)
403 return MDESC_NODE_NULL
;
404 if (!strcmp(names
+ ep
[ret
].name_offset
, name
))
408 if (ret
>= last_node
)
409 ret
= MDESC_NODE_NULL
;
412 EXPORT_SYMBOL(mdesc_node_by_name
);
414 const void *mdesc_get_property(struct mdesc_handle
*hp
, u64 node
,
415 const char *name
, int *lenp
)
417 const char *names
= name_block(&hp
->mdesc
);
418 u64 last_node
= hp
->mdesc
.node_sz
/ 16;
419 void *data
= data_block(&hp
->mdesc
);
420 struct mdesc_elem
*ep
;
422 if (node
== MDESC_NODE_NULL
|| node
>= last_node
)
425 ep
= node_block(&hp
->mdesc
) + node
;
427 for (; ep
->tag
!= MD_NODE_END
; ep
++) {
439 val
= data
+ ep
->d
.data
.data_offset
;
440 len
= ep
->d
.data
.data_len
;
449 if (!strcmp(names
+ ep
->name_offset
, name
)) {
458 EXPORT_SYMBOL(mdesc_get_property
);
460 u64
mdesc_next_arc(struct mdesc_handle
*hp
, u64 from
, const char *arc_type
)
462 struct mdesc_elem
*ep
, *base
= node_block(&hp
->mdesc
);
463 const char *names
= name_block(&hp
->mdesc
);
464 u64 last_node
= hp
->mdesc
.node_sz
/ 16;
466 if (from
== MDESC_NODE_NULL
|| from
>= last_node
)
467 return MDESC_NODE_NULL
;
472 for (; ep
->tag
!= MD_NODE_END
; ep
++) {
473 if (ep
->tag
!= MD_PROP_ARC
)
476 if (strcmp(names
+ ep
->name_offset
, arc_type
))
482 return MDESC_NODE_NULL
;
484 EXPORT_SYMBOL(mdesc_next_arc
);
486 u64
mdesc_arc_target(struct mdesc_handle
*hp
, u64 arc
)
488 struct mdesc_elem
*ep
, *base
= node_block(&hp
->mdesc
);
494 EXPORT_SYMBOL(mdesc_arc_target
);
496 const char *mdesc_node_name(struct mdesc_handle
*hp
, u64 node
)
498 struct mdesc_elem
*ep
, *base
= node_block(&hp
->mdesc
);
499 const char *names
= name_block(&hp
->mdesc
);
500 u64 last_node
= hp
->mdesc
.node_sz
/ 16;
502 if (node
== MDESC_NODE_NULL
|| node
>= last_node
)
506 if (ep
->tag
!= MD_NODE
)
509 return names
+ ep
->name_offset
;
511 EXPORT_SYMBOL(mdesc_node_name
);
513 static u64 max_cpus
= 64;
515 static void __init
report_platform_properties(void)
517 struct mdesc_handle
*hp
= mdesc_grab();
518 u64 pn
= mdesc_node_by_name(hp
, MDESC_NODE_NULL
, "platform");
522 if (pn
== MDESC_NODE_NULL
) {
523 prom_printf("No platform node in machine-description.\n");
527 s
= mdesc_get_property(hp
, pn
, "banner-name", NULL
);
528 printk("PLATFORM: banner-name [%s]\n", s
);
529 s
= mdesc_get_property(hp
, pn
, "name", NULL
);
530 printk("PLATFORM: name [%s]\n", s
);
532 v
= mdesc_get_property(hp
, pn
, "hostid", NULL
);
534 printk("PLATFORM: hostid [%08llx]\n", *v
);
535 v
= mdesc_get_property(hp
, pn
, "serial#", NULL
);
537 printk("PLATFORM: serial# [%08llx]\n", *v
);
538 v
= mdesc_get_property(hp
, pn
, "stick-frequency", NULL
);
539 printk("PLATFORM: stick-frequency [%08llx]\n", *v
);
540 v
= mdesc_get_property(hp
, pn
, "mac-address", NULL
);
542 printk("PLATFORM: mac-address [%llx]\n", *v
);
543 v
= mdesc_get_property(hp
, pn
, "watchdog-resolution", NULL
);
545 printk("PLATFORM: watchdog-resolution [%llu ms]\n", *v
);
546 v
= mdesc_get_property(hp
, pn
, "watchdog-max-timeout", NULL
);
548 printk("PLATFORM: watchdog-max-timeout [%llu ms]\n", *v
);
549 v
= mdesc_get_property(hp
, pn
, "max-cpus", NULL
);
552 printk("PLATFORM: max-cpus [%llu]\n", max_cpus
);
561 if (max_cpu
> NR_CPUS
)
566 for (i
= 0; i
< max_cpu
; i
++)
567 set_cpu_possible(i
, true);
574 static void fill_in_one_cache(cpuinfo_sparc
*c
, struct mdesc_handle
*hp
, u64 mp
)
576 const u64
*level
= mdesc_get_property(hp
, mp
, "level", NULL
);
577 const u64
*size
= mdesc_get_property(hp
, mp
, "size", NULL
);
578 const u64
*line_size
= mdesc_get_property(hp
, mp
, "line-size", NULL
);
582 type
= mdesc_get_property(hp
, mp
, "type", &type_len
);
586 if (of_find_in_proplist(type
, "instn", type_len
)) {
587 c
->icache_size
= *size
;
588 c
->icache_line_size
= *line_size
;
589 } else if (of_find_in_proplist(type
, "data", type_len
)) {
590 c
->dcache_size
= *size
;
591 c
->dcache_line_size
= *line_size
;
596 c
->ecache_size
= *size
;
597 c
->ecache_line_size
= *line_size
;
607 mdesc_for_each_arc(a
, hp
, mp
, MDESC_ARC_TYPE_FWD
) {
608 u64 target
= mdesc_arc_target(hp
, a
);
609 const char *name
= mdesc_node_name(hp
, target
);
611 if (!strcmp(name
, "cache"))
612 fill_in_one_cache(c
, hp
, target
);
617 static void find_back_node_value(struct mdesc_handle
*hp
, u64 node
,
619 void (*func
)(struct mdesc_handle
*, u64
, int),
624 /* Since we have an estimate of recursion depth, do a sanity check. */
628 mdesc_for_each_arc(arc
, hp
, node
, MDESC_ARC_TYPE_BACK
) {
629 u64 n
= mdesc_arc_target(hp
, arc
);
630 const char *name
= mdesc_node_name(hp
, n
);
632 if (!strcmp(srch_val
, name
))
635 find_back_node_value(hp
, n
, srch_val
, func
, val
, depth
-1);
639 static void __mark_core_id(struct mdesc_handle
*hp
, u64 node
,
642 const u64
*id
= mdesc_get_property(hp
, node
, "id", NULL
);
644 if (*id
< num_possible_cpus())
645 cpu_data(*id
).core_id
= core_id
;
648 static void __mark_sock_id(struct mdesc_handle
*hp
, u64 node
,
651 const u64
*id
= mdesc_get_property(hp
, node
, "id", NULL
);
653 if (*id
< num_possible_cpus())
654 cpu_data(*id
).sock_id
= sock_id
;
657 static void mark_core_ids(struct mdesc_handle
*hp
, u64 mp
,
660 find_back_node_value(hp
, mp
, "cpu", __mark_core_id
, core_id
, 10);
663 static void mark_sock_ids(struct mdesc_handle
*hp
, u64 mp
,
666 find_back_node_value(hp
, mp
, "cpu", __mark_sock_id
, sock_id
, 10);
669 static void set_core_ids(struct mdesc_handle
*hp
)
676 /* Identify unique cores by looking for cpus backpointed to by
677 * level 1 instruction caches.
679 mdesc_for_each_node_by_name(hp
, mp
, "cache") {
684 level
= mdesc_get_property(hp
, mp
, "level", NULL
);
688 type
= mdesc_get_property(hp
, mp
, "type", &len
);
689 if (!of_find_in_proplist(type
, "instn", len
))
692 mark_core_ids(hp
, mp
, idx
);
697 static int set_sock_ids_by_cache(struct mdesc_handle
*hp
, int level
)
703 /* Identify unique sockets by looking for cpus backpointed to by
704 * shared level n caches.
706 mdesc_for_each_node_by_name(hp
, mp
, "cache") {
709 cur_lvl
= mdesc_get_property(hp
, mp
, "level", NULL
);
710 if (*cur_lvl
!= level
)
713 mark_sock_ids(hp
, mp
, idx
);
720 static void set_sock_ids_by_socket(struct mdesc_handle
*hp
, u64 mp
)
724 mdesc_for_each_node_by_name(hp
, mp
, "socket") {
727 mdesc_for_each_arc(a
, hp
, mp
, MDESC_ARC_TYPE_FWD
) {
728 u64 t
= mdesc_arc_target(hp
, a
);
732 name
= mdesc_node_name(hp
, t
);
733 if (strcmp(name
, "cpu"))
736 id
= mdesc_get_property(hp
, t
, "id", NULL
);
737 if (*id
< num_possible_cpus())
738 cpu_data(*id
).sock_id
= idx
;
744 static void set_sock_ids(struct mdesc_handle
*hp
)
748 /* If machine description exposes sockets data use it.
749 * Otherwise fallback to use shared L3 or L2 caches.
751 mp
= mdesc_node_by_name(hp
, MDESC_NODE_NULL
, "sockets");
752 if (mp
!= MDESC_NODE_NULL
)
753 return set_sock_ids_by_socket(hp
, mp
);
755 if (!set_sock_ids_by_cache(hp
, 3))
756 set_sock_ids_by_cache(hp
, 2);
759 static void mark_proc_ids(struct mdesc_handle
*hp
, u64 mp
, int proc_id
)
763 mdesc_for_each_arc(a
, hp
, mp
, MDESC_ARC_TYPE_BACK
) {
764 u64 t
= mdesc_arc_target(hp
, a
);
768 name
= mdesc_node_name(hp
, t
);
769 if (strcmp(name
, "cpu"))
772 id
= mdesc_get_property(hp
, t
, "id", NULL
);
774 cpu_data(*id
).proc_id
= proc_id
;
778 static void __set_proc_ids(struct mdesc_handle
*hp
, const char *exec_unit_name
)
784 mdesc_for_each_node_by_name(hp
, mp
, exec_unit_name
) {
788 type
= mdesc_get_property(hp
, mp
, "type", &len
);
789 if (!of_find_in_proplist(type
, "int", len
) &&
790 !of_find_in_proplist(type
, "integer", len
))
793 mark_proc_ids(hp
, mp
, idx
);
798 static void set_proc_ids(struct mdesc_handle
*hp
)
800 __set_proc_ids(hp
, "exec_unit");
801 __set_proc_ids(hp
, "exec-unit");
804 static void get_one_mondo_bits(const u64
*p
, unsigned int *mask
,
805 unsigned long def
, unsigned long max
)
813 if (!val
|| val
>= 64)
819 *mask
= ((1U << val
) * 64U) - 1U;
823 *mask
= ((1U << def
) * 64U) - 1U;
826 static void get_mondo_data(struct mdesc_handle
*hp
, u64 mp
,
827 struct trap_per_cpu
*tb
)
832 val
= mdesc_get_property(hp
, mp
, "q-cpu-mondo-#bits", NULL
);
833 get_one_mondo_bits(val
, &tb
->cpu_mondo_qmask
, 7, ilog2(max_cpus
* 2));
835 val
= mdesc_get_property(hp
, mp
, "q-dev-mondo-#bits", NULL
);
836 get_one_mondo_bits(val
, &tb
->dev_mondo_qmask
, 7, 8);
838 val
= mdesc_get_property(hp
, mp
, "q-resumable-#bits", NULL
);
839 get_one_mondo_bits(val
, &tb
->resum_qmask
, 6, 7);
841 val
= mdesc_get_property(hp
, mp
, "q-nonresumable-#bits", NULL
);
842 get_one_mondo_bits(val
, &tb
->nonresum_qmask
, 2, 2);
844 pr_info("SUN4V: Mondo queue sizes "
845 "[cpu(%u) dev(%u) r(%u) nr(%u)]\n",
846 tb
->cpu_mondo_qmask
+ 1,
847 tb
->dev_mondo_qmask
+ 1,
849 tb
->nonresum_qmask
+ 1);
853 static void *mdesc_iterate_over_cpus(void *(*func
)(struct mdesc_handle
*, u64
, int, void *), void *arg
, cpumask_t
*mask
)
855 struct mdesc_handle
*hp
= mdesc_grab();
859 mdesc_for_each_node_by_name(hp
, mp
, "cpu") {
860 const u64
*id
= mdesc_get_property(hp
, mp
, "id", NULL
);
864 if (cpuid
>= NR_CPUS
) {
865 printk(KERN_WARNING
"Ignoring CPU %d which is "
870 if (!cpumask_test_cpu(cpuid
, mask
))
874 ret
= func(hp
, mp
, cpuid
, arg
);
883 static void *record_one_cpu(struct mdesc_handle
*hp
, u64 mp
, int cpuid
,
888 set_cpu_present(cpuid
, true);
893 void mdesc_populate_present_mask(cpumask_t
*mask
)
895 if (tlb_type
!= hypervisor
)
899 mdesc_iterate_over_cpus(record_one_cpu
, NULL
, mask
);
902 static void * __init
check_one_pgsz(struct mdesc_handle
*hp
, u64 mp
, int cpuid
, void *arg
)
904 const u64
*pgsz_prop
= mdesc_get_property(hp
, mp
, "mmu-page-size-list", NULL
);
905 unsigned long *pgsz_mask
= arg
;
908 val
= (HV_PGSZ_MASK_8K
| HV_PGSZ_MASK_64K
|
909 HV_PGSZ_MASK_512K
| HV_PGSZ_MASK_4MB
);
920 void __init
mdesc_get_page_sizes(cpumask_t
*mask
, unsigned long *pgsz_mask
)
923 mdesc_iterate_over_cpus(check_one_pgsz
, pgsz_mask
, mask
);
926 static void *fill_in_one_cpu(struct mdesc_handle
*hp
, u64 mp
, int cpuid
,
929 const u64
*cfreq
= mdesc_get_property(hp
, mp
, "clock-frequency", NULL
);
930 struct trap_per_cpu
*tb
;
935 /* On uniprocessor we only want the values for the
936 * real physical cpu the kernel booted onto, however
937 * cpu_data() only has one entry at index 0.
939 if (cpuid
!= real_hard_smp_processor_id())
944 c
= &cpu_data(cpuid
);
945 c
->clock_tick
= *cfreq
;
947 tb
= &trap_block
[cpuid
];
948 get_mondo_data(hp
, mp
, tb
);
950 mdesc_for_each_arc(a
, hp
, mp
, MDESC_ARC_TYPE_FWD
) {
951 u64 j
, t
= mdesc_arc_target(hp
, a
);
954 t_name
= mdesc_node_name(hp
, t
);
955 if (!strcmp(t_name
, "cache")) {
956 fill_in_one_cache(c
, hp
, t
);
960 mdesc_for_each_arc(j
, hp
, t
, MDESC_ARC_TYPE_FWD
) {
961 u64 n
= mdesc_arc_target(hp
, j
);
964 n_name
= mdesc_node_name(hp
, n
);
965 if (!strcmp(n_name
, "cache"))
966 fill_in_one_cache(c
, hp
, n
);
976 void mdesc_fill_in_cpu_data(cpumask_t
*mask
)
978 struct mdesc_handle
*hp
;
980 mdesc_iterate_over_cpus(fill_in_one_cpu
, NULL
, mask
);
990 smp_fill_in_sib_core_maps();
993 /* mdesc_open() - Grab a reference to mdesc_handle when /dev/mdesc is
994 * opened. Hold this reference until /dev/mdesc is closed to ensure
995 * mdesc data structure is not released underneath us. Store the
996 * pointer to mdesc structure in private_data for read and seek to use
998 static int mdesc_open(struct inode
*inode
, struct file
*file
)
1000 struct mdesc_handle
*hp
= mdesc_grab();
1005 file
->private_data
= hp
;
1010 static ssize_t
mdesc_read(struct file
*file
, char __user
*buf
,
1011 size_t len
, loff_t
*offp
)
1013 struct mdesc_handle
*hp
= file
->private_data
;
1014 unsigned char *mdesc
;
1015 int bytes_left
, count
= len
;
1017 if (*offp
>= hp
->handle_size
)
1020 bytes_left
= hp
->handle_size
- *offp
;
1021 if (count
> bytes_left
)
1024 mdesc
= (unsigned char *)&hp
->mdesc
;
1026 if (!copy_to_user(buf
, mdesc
, count
)) {
1034 static loff_t
mdesc_llseek(struct file
*file
, loff_t offset
, int whence
)
1036 struct mdesc_handle
*hp
;
1040 offset
+= file
->f_pos
;
1048 hp
= file
->private_data
;
1049 if (offset
> hp
->handle_size
)
1052 file
->f_pos
= offset
;
1057 /* mdesc_close() - /dev/mdesc is being closed, release the reference to
1060 static int mdesc_close(struct inode
*inode
, struct file
*file
)
1062 mdesc_release(file
->private_data
);
1066 static const struct file_operations mdesc_fops
= {
1069 .llseek
= mdesc_llseek
,
1070 .release
= mdesc_close
,
1071 .owner
= THIS_MODULE
,
1074 static struct miscdevice mdesc_misc
= {
1075 .minor
= MISC_DYNAMIC_MINOR
,
1077 .fops
= &mdesc_fops
,
1080 static int __init
mdesc_misc_init(void)
1082 return misc_register(&mdesc_misc
);
1085 __initcall(mdesc_misc_init
);
1087 void __init
sun4v_mdesc_init(void)
1089 struct mdesc_handle
*hp
;
1090 unsigned long len
, real_len
, status
;
1092 (void) sun4v_mach_desc(0UL, 0UL, &len
);
1094 printk("MDESC: Size is %lu bytes.\n", len
);
1096 hp
= mdesc_alloc(len
, &memblock_mdesc_ops
);
1098 prom_printf("MDESC: alloc of %lu bytes failed.\n", len
);
1102 status
= sun4v_mach_desc(__pa(&hp
->mdesc
), len
, &real_len
);
1103 if (status
!= HV_EOK
|| real_len
> len
) {
1104 prom_printf("sun4v_mach_desc fails, err(%lu), "
1105 "len(%lu), real_len(%lu)\n",
1106 status
, len
, real_len
);
1113 report_platform_properties();