1 /******************************************************************************
2 * Talks to Xen Store to figure out what devices we have.
4 * Copyright (C) 2005 Rusty Russell, IBM Corporation
5 * Copyright (C) 2005 Mike Wray, Hewlett-Packard
6 * Copyright (C) 2005, 2006 XenSource Ltd
8 * This program is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU General Public License version 2
10 * as published by the Free Software Foundation; or, when distributed
11 * separately from the Linux kernel or incorporated into other
12 * software packages, subject to the following license:
14 * Permission is hereby granted, free of charge, to any person obtaining a copy
15 * of this source file (the "Software"), to deal in the Software without
16 * restriction, including without limitation the rights to use, copy, modify,
17 * merge, publish, distribute, sublicense, and/or sell copies of the Software,
18 * and to permit persons to whom the Software is furnished to do so, subject to
19 * the following conditions:
21 * The above copyright notice and this permission notice shall be included in
22 * all copies or substantial portions of the Software.
24 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
25 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
26 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
27 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
28 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
29 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
33 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
34 #define dev_fmt pr_fmt
36 #define DPRINTK(fmt, args...) \
37 pr_debug("xenbus_probe (%s:%d) " fmt ".\n", \
38 __func__, __LINE__, ##args)
40 #include <linux/kernel.h>
41 #include <linux/err.h>
42 #include <linux/string.h>
43 #include <linux/ctype.h>
44 #include <linux/fcntl.h>
46 #include <linux/proc_fs.h>
47 #include <linux/notifier.h>
48 #include <linux/kthread.h>
49 #include <linux/mutex.h>
51 #include <linux/slab.h>
52 #include <linux/module.h>
55 #include <asm/xen/hypervisor.h>
58 #include <xen/xenbus.h>
59 #include <xen/events.h>
60 #include <xen/xen-ops.h>
68 static int xs_init_irq
= -1;
70 EXPORT_SYMBOL_GPL(xen_store_evtchn
);
72 struct xenstore_domain_interface
*xen_store_interface
;
73 EXPORT_SYMBOL_GPL(xen_store_interface
);
75 #define XS_INTERFACE_READY \
76 ((xen_store_interface != NULL) && \
77 (xen_store_interface->connection == XENSTORE_CONNECTED))
79 enum xenstore_init xen_store_domain_type
;
80 EXPORT_SYMBOL_GPL(xen_store_domain_type
);
82 static unsigned long xen_store_gfn
;
84 static BLOCKING_NOTIFIER_HEAD(xenstore_chain
);
86 /* If something in array of ids matches this device, return it. */
87 static const struct xenbus_device_id
*
88 match_device(const struct xenbus_device_id
*arr
, struct xenbus_device
*dev
)
90 for (; *arr
->devicetype
!= '\0'; arr
++) {
91 if (!strcmp(arr
->devicetype
, dev
->devicetype
))
97 int xenbus_match(struct device
*_dev
, const struct device_driver
*_drv
)
99 const struct xenbus_driver
*drv
= to_xenbus_driver(_drv
);
104 return match_device(drv
->ids
, to_xenbus_device(_dev
)) != NULL
;
106 EXPORT_SYMBOL_GPL(xenbus_match
);
109 static void free_otherend_details(struct xenbus_device
*dev
)
111 kfree(dev
->otherend
);
112 dev
->otherend
= NULL
;
116 static void free_otherend_watch(struct xenbus_device
*dev
)
118 if (dev
->otherend_watch
.node
) {
119 unregister_xenbus_watch(&dev
->otherend_watch
);
120 kfree(dev
->otherend_watch
.node
);
121 dev
->otherend_watch
.node
= NULL
;
126 static int talk_to_otherend(struct xenbus_device
*dev
)
128 struct xenbus_driver
*drv
= to_xenbus_driver(dev
->dev
.driver
);
130 free_otherend_watch(dev
);
131 free_otherend_details(dev
);
133 return drv
->read_otherend_details(dev
);
138 static int watch_otherend(struct xenbus_device
*dev
)
140 struct xen_bus_type
*bus
=
141 container_of(dev
->dev
.bus
, struct xen_bus_type
, bus
);
143 return xenbus_watch_pathfmt(dev
, &dev
->otherend_watch
,
144 bus
->otherend_will_handle
,
145 bus
->otherend_changed
,
146 "%s/%s", dev
->otherend
, "state");
150 int xenbus_read_otherend_details(struct xenbus_device
*xendev
,
151 char *id_node
, char *path_node
)
153 int err
= xenbus_gather(XBT_NIL
, xendev
->nodename
,
154 id_node
, "%i", &xendev
->otherend_id
,
155 path_node
, NULL
, &xendev
->otherend
,
158 xenbus_dev_fatal(xendev
, err
,
159 "reading other end details from %s",
163 if (strlen(xendev
->otherend
) == 0 ||
164 !xenbus_exists(XBT_NIL
, xendev
->otherend
, "")) {
165 xenbus_dev_fatal(xendev
, -ENOENT
,
166 "unable to read other end from %s. "
167 "missing or inaccessible.",
169 free_otherend_details(xendev
);
175 EXPORT_SYMBOL_GPL(xenbus_read_otherend_details
);
177 void xenbus_otherend_changed(struct xenbus_watch
*watch
,
178 const char *path
, const char *token
,
179 int ignore_on_shutdown
)
181 struct xenbus_device
*dev
=
182 container_of(watch
, struct xenbus_device
, otherend_watch
);
183 struct xenbus_driver
*drv
= to_xenbus_driver(dev
->dev
.driver
);
184 enum xenbus_state state
;
186 /* Protect us against watches firing on old details when the otherend
187 details change, say immediately after a resume. */
188 if (!dev
->otherend
||
189 strncmp(dev
->otherend
, path
, strlen(dev
->otherend
))) {
190 dev_dbg(&dev
->dev
, "Ignoring watch at %s\n", path
);
194 state
= xenbus_read_driver_state(dev
->otherend
);
196 dev_dbg(&dev
->dev
, "state is %d, (%s), %s, %s\n",
197 state
, xenbus_strstate(state
), dev
->otherend_watch
.node
, path
);
200 * Ignore xenbus transitions during shutdown. This prevents us doing
201 * work that can fail e.g., when the rootfs is gone.
203 if (system_state
> SYSTEM_RUNNING
) {
204 if (ignore_on_shutdown
&& (state
== XenbusStateClosing
))
205 xenbus_frontend_closed(dev
);
209 if (drv
->otherend_changed
)
210 drv
->otherend_changed(dev
, state
);
212 EXPORT_SYMBOL_GPL(xenbus_otherend_changed
);
214 #define XENBUS_SHOW_STAT(name) \
215 static ssize_t name##_show(struct device *_dev, \
216 struct device_attribute *attr, \
219 struct xenbus_device *dev = to_xenbus_device(_dev); \
221 return sprintf(buf, "%d\n", atomic_read(&dev->name)); \
223 static DEVICE_ATTR_RO(name)
225 XENBUS_SHOW_STAT(event_channels
);
226 XENBUS_SHOW_STAT(events
);
227 XENBUS_SHOW_STAT(spurious_events
);
228 XENBUS_SHOW_STAT(jiffies_eoi_delayed
);
230 static ssize_t
spurious_threshold_show(struct device
*_dev
,
231 struct device_attribute
*attr
,
234 struct xenbus_device
*dev
= to_xenbus_device(_dev
);
236 return sprintf(buf
, "%d\n", dev
->spurious_threshold
);
239 static ssize_t
spurious_threshold_store(struct device
*_dev
,
240 struct device_attribute
*attr
,
241 const char *buf
, size_t count
)
243 struct xenbus_device
*dev
= to_xenbus_device(_dev
);
247 ret
= kstrtouint(buf
, 0, &val
);
251 dev
->spurious_threshold
= val
;
256 static DEVICE_ATTR_RW(spurious_threshold
);
258 static struct attribute
*xenbus_attrs
[] = {
259 &dev_attr_event_channels
.attr
,
260 &dev_attr_events
.attr
,
261 &dev_attr_spurious_events
.attr
,
262 &dev_attr_jiffies_eoi_delayed
.attr
,
263 &dev_attr_spurious_threshold
.attr
,
267 static const struct attribute_group xenbus_group
= {
269 .attrs
= xenbus_attrs
,
272 int xenbus_dev_probe(struct device
*_dev
)
274 struct xenbus_device
*dev
= to_xenbus_device(_dev
);
275 struct xenbus_driver
*drv
= to_xenbus_driver(_dev
->driver
);
276 const struct xenbus_device_id
*id
;
279 DPRINTK("%s", dev
->nodename
);
286 id
= match_device(drv
->ids
, dev
);
292 err
= talk_to_otherend(dev
);
294 dev_warn(&dev
->dev
, "talk_to_otherend on %s failed.\n",
299 if (!try_module_get(drv
->driver
.owner
)) {
300 dev_warn(&dev
->dev
, "failed to acquire module reference on '%s'\n",
306 down(&dev
->reclaim_sem
);
307 err
= drv
->probe(dev
, id
);
308 up(&dev
->reclaim_sem
);
312 err
= watch_otherend(dev
);
314 dev_warn(&dev
->dev
, "watch_otherend on %s failed.\n",
319 dev
->spurious_threshold
= 1;
320 if (sysfs_create_group(&dev
->dev
.kobj
, &xenbus_group
))
321 dev_warn(&dev
->dev
, "sysfs_create_group on %s failed.\n",
327 down(&dev
->reclaim_sem
);
329 up(&dev
->reclaim_sem
);
332 module_put(drv
->driver
.owner
);
334 xenbus_dev_error(dev
, err
, "xenbus_dev_probe on %s", dev
->nodename
);
337 EXPORT_SYMBOL_GPL(xenbus_dev_probe
);
339 void xenbus_dev_remove(struct device
*_dev
)
341 struct xenbus_device
*dev
= to_xenbus_device(_dev
);
342 struct xenbus_driver
*drv
= to_xenbus_driver(_dev
->driver
);
344 DPRINTK("%s", dev
->nodename
);
346 sysfs_remove_group(&dev
->dev
.kobj
, &xenbus_group
);
348 free_otherend_watch(dev
);
351 down(&dev
->reclaim_sem
);
353 up(&dev
->reclaim_sem
);
356 module_put(drv
->driver
.owner
);
358 free_otherend_details(dev
);
361 * If the toolstack has forced the device state to closing then set
362 * the state to closed now to allow it to be cleaned up.
363 * Similarly, if the driver does not support re-bind, set the
366 if (!drv
->allow_rebind
||
367 xenbus_read_driver_state(dev
->nodename
) == XenbusStateClosing
)
368 xenbus_switch_state(dev
, XenbusStateClosed
);
370 EXPORT_SYMBOL_GPL(xenbus_dev_remove
);
372 int xenbus_register_driver_common(struct xenbus_driver
*drv
,
373 struct xen_bus_type
*bus
,
374 struct module
*owner
, const char *mod_name
)
376 drv
->driver
.name
= drv
->name
? drv
->name
: drv
->ids
[0].devicetype
;
377 drv
->driver
.bus
= &bus
->bus
;
378 drv
->driver
.owner
= owner
;
379 drv
->driver
.mod_name
= mod_name
;
381 return driver_register(&drv
->driver
);
383 EXPORT_SYMBOL_GPL(xenbus_register_driver_common
);
385 void xenbus_unregister_driver(struct xenbus_driver
*drv
)
387 driver_unregister(&drv
->driver
);
389 EXPORT_SYMBOL_GPL(xenbus_unregister_driver
);
391 struct xb_find_info
{
392 struct xenbus_device
*dev
;
393 const char *nodename
;
396 static int cmp_dev(struct device
*dev
, void *data
)
398 struct xenbus_device
*xendev
= to_xenbus_device(dev
);
399 struct xb_find_info
*info
= data
;
401 if (!strcmp(xendev
->nodename
, info
->nodename
)) {
409 static struct xenbus_device
*xenbus_device_find(const char *nodename
,
410 struct bus_type
*bus
)
412 struct xb_find_info info
= { .dev
= NULL
, .nodename
= nodename
};
414 bus_for_each_dev(bus
, NULL
, &info
, cmp_dev
);
418 static int cleanup_dev(struct device
*dev
, void *data
)
420 struct xenbus_device
*xendev
= to_xenbus_device(dev
);
421 struct xb_find_info
*info
= data
;
422 int len
= strlen(info
->nodename
);
424 DPRINTK("%s", info
->nodename
);
426 /* Match the info->nodename path, or any subdirectory of that path. */
427 if (strncmp(xendev
->nodename
, info
->nodename
, len
))
430 /* If the node name is longer, ensure it really is a subdirectory. */
431 if ((strlen(xendev
->nodename
) > len
) && (xendev
->nodename
[len
] != '/'))
439 static void xenbus_cleanup_devices(const char *path
, struct bus_type
*bus
)
441 struct xb_find_info info
= { .nodename
= path
};
445 bus_for_each_dev(bus
, NULL
, &info
, cleanup_dev
);
447 device_unregister(&info
.dev
->dev
);
448 put_device(&info
.dev
->dev
);
453 static void xenbus_dev_release(struct device
*dev
)
456 kfree(to_xenbus_device(dev
));
459 static ssize_t
nodename_show(struct device
*dev
,
460 struct device_attribute
*attr
, char *buf
)
462 return sprintf(buf
, "%s\n", to_xenbus_device(dev
)->nodename
);
464 static DEVICE_ATTR_RO(nodename
);
466 static ssize_t
devtype_show(struct device
*dev
,
467 struct device_attribute
*attr
, char *buf
)
469 return sprintf(buf
, "%s\n", to_xenbus_device(dev
)->devicetype
);
471 static DEVICE_ATTR_RO(devtype
);
473 static ssize_t
modalias_show(struct device
*dev
,
474 struct device_attribute
*attr
, char *buf
)
476 return sprintf(buf
, "%s:%s\n", dev
->bus
->name
,
477 to_xenbus_device(dev
)->devicetype
);
479 static DEVICE_ATTR_RO(modalias
);
481 static ssize_t
state_show(struct device
*dev
,
482 struct device_attribute
*attr
, char *buf
)
484 return sprintf(buf
, "%s\n",
485 xenbus_strstate(to_xenbus_device(dev
)->state
));
487 static DEVICE_ATTR_RO(state
);
489 static struct attribute
*xenbus_dev_attrs
[] = {
490 &dev_attr_nodename
.attr
,
491 &dev_attr_devtype
.attr
,
492 &dev_attr_modalias
.attr
,
493 &dev_attr_state
.attr
,
497 static const struct attribute_group xenbus_dev_group
= {
498 .attrs
= xenbus_dev_attrs
,
501 const struct attribute_group
*xenbus_dev_groups
[] = {
505 EXPORT_SYMBOL_GPL(xenbus_dev_groups
);
507 int xenbus_probe_node(struct xen_bus_type
*bus
,
509 const char *nodename
)
511 char devname
[XEN_BUS_ID_SIZE
];
513 struct xenbus_device
*xendev
;
517 enum xenbus_state state
= xenbus_read_driver_state(nodename
);
519 if (state
!= XenbusStateInitialising
) {
520 /* Device is not new, so ignore it. This can happen if a
521 device is going away after switching to Closed. */
525 stringlen
= strlen(nodename
) + 1 + strlen(type
) + 1;
526 xendev
= kzalloc(sizeof(*xendev
) + stringlen
, GFP_KERNEL
);
530 xendev
->state
= XenbusStateInitialising
;
532 /* Copy the strings into the extra space. */
534 tmpstring
= (char *)(xendev
+ 1);
535 strcpy(tmpstring
, nodename
);
536 xendev
->nodename
= tmpstring
;
538 tmpstring
+= strlen(tmpstring
) + 1;
539 strcpy(tmpstring
, type
);
540 xendev
->devicetype
= tmpstring
;
541 init_completion(&xendev
->down
);
543 xendev
->dev
.bus
= &bus
->bus
;
544 xendev
->dev
.release
= xenbus_dev_release
;
546 err
= bus
->get_bus_id(devname
, xendev
->nodename
);
550 dev_set_name(&xendev
->dev
, "%s", devname
);
551 sema_init(&xendev
->reclaim_sem
, 1);
553 /* Register with generic device framework. */
554 err
= device_register(&xendev
->dev
);
556 put_device(&xendev
->dev
);
566 EXPORT_SYMBOL_GPL(xenbus_probe_node
);
568 static int xenbus_probe_device_type(struct xen_bus_type
*bus
, const char *type
)
572 unsigned int dir_n
= 0;
575 dir
= xenbus_directory(XBT_NIL
, bus
->root
, type
, &dir_n
);
579 for (i
= 0; i
< dir_n
; i
++) {
580 err
= bus
->probe(bus
, type
, dir
[i
]);
589 int xenbus_probe_devices(struct xen_bus_type
*bus
)
593 unsigned int i
, dir_n
;
595 dir
= xenbus_directory(XBT_NIL
, bus
->root
, "", &dir_n
);
599 for (i
= 0; i
< dir_n
; i
++) {
600 err
= xenbus_probe_device_type(bus
, dir
[i
]);
608 EXPORT_SYMBOL_GPL(xenbus_probe_devices
);
610 static unsigned int char_count(const char *str
, char c
)
612 unsigned int i
, ret
= 0;
614 for (i
= 0; str
[i
]; i
++)
620 static int strsep_len(const char *str
, char c
, unsigned int len
)
624 for (i
= 0; str
[i
]; i
++)
630 return (len
== 0) ? i
: -ERANGE
;
633 void xenbus_dev_changed(const char *node
, struct xen_bus_type
*bus
)
636 struct xenbus_device
*dev
;
637 char type
[XEN_BUS_ID_SIZE
];
638 const char *p
, *root
;
640 if (char_count(node
, '/') < 2)
643 exists
= xenbus_exists(XBT_NIL
, node
, "");
645 xenbus_cleanup_devices(node
, &bus
->bus
);
649 /* backend/<type>/... or device/<type>/... */
650 p
= strchr(node
, '/') + 1;
651 snprintf(type
, XEN_BUS_ID_SIZE
, "%.*s", (int)strcspn(p
, "/"), p
);
652 type
[XEN_BUS_ID_SIZE
-1] = '\0';
654 rootlen
= strsep_len(node
, '/', bus
->levels
);
657 root
= kasprintf(GFP_KERNEL
, "%.*s", rootlen
, node
);
661 dev
= xenbus_device_find(root
, &bus
->bus
);
663 xenbus_probe_node(bus
, type
, root
);
665 put_device(&dev
->dev
);
669 EXPORT_SYMBOL_GPL(xenbus_dev_changed
);
671 int xenbus_dev_suspend(struct device
*dev
)
674 struct xenbus_driver
*drv
;
675 struct xenbus_device
*xdev
676 = container_of(dev
, struct xenbus_device
, dev
);
678 DPRINTK("%s", xdev
->nodename
);
680 if (dev
->driver
== NULL
)
682 drv
= to_xenbus_driver(dev
->driver
);
684 err
= drv
->suspend(xdev
);
686 dev_warn(dev
, "suspend failed: %i\n", err
);
689 EXPORT_SYMBOL_GPL(xenbus_dev_suspend
);
691 int xenbus_dev_resume(struct device
*dev
)
694 struct xenbus_driver
*drv
;
695 struct xenbus_device
*xdev
696 = container_of(dev
, struct xenbus_device
, dev
);
698 DPRINTK("%s", xdev
->nodename
);
700 if (dev
->driver
== NULL
)
702 drv
= to_xenbus_driver(dev
->driver
);
703 err
= talk_to_otherend(xdev
);
705 dev_warn(dev
, "resume (talk_to_otherend) failed: %i\n", err
);
709 xdev
->state
= XenbusStateInitialising
;
712 err
= drv
->resume(xdev
);
714 dev_warn(dev
, "resume failed: %i\n", err
);
719 err
= watch_otherend(xdev
);
721 dev_warn(dev
, "resume (watch_otherend) failed: %d\n", err
);
727 EXPORT_SYMBOL_GPL(xenbus_dev_resume
);
729 int xenbus_dev_cancel(struct device
*dev
)
735 EXPORT_SYMBOL_GPL(xenbus_dev_cancel
);
737 /* A flag to determine if xenstored is 'ready' (i.e. has started) */
741 int register_xenstore_notifier(struct notifier_block
*nb
)
745 if (xenstored_ready
> 0)
746 ret
= nb
->notifier_call(nb
, 0, NULL
);
748 blocking_notifier_chain_register(&xenstore_chain
, nb
);
752 EXPORT_SYMBOL_GPL(register_xenstore_notifier
);
754 void unregister_xenstore_notifier(struct notifier_block
*nb
)
756 blocking_notifier_chain_unregister(&xenstore_chain
, nb
);
758 EXPORT_SYMBOL_GPL(unregister_xenstore_notifier
);
760 static void xenbus_probe(void)
764 if (!xen_store_interface
)
765 xen_store_interface
= memremap(xen_store_gfn
<< XEN_PAGE_SHIFT
,
766 XEN_PAGE_SIZE
, MEMREMAP_WB
);
768 * Now it is safe to free the IRQ used for xenstore late
769 * initialization. No need to unbind: it is about to be
770 * bound again from xb_init_comms. Note that calling
771 * unbind_from_irqhandler now would result in xen_evtchn_close()
772 * being called and the event channel not being enabled again
773 * afterwards, resulting in missed event notifications.
775 if (xs_init_irq
>= 0)
776 free_irq(xs_init_irq
, &xb_waitq
);
779 * In the HVM case, xenbus_init() deferred its call to
780 * xs_init() in case callbacks were not operational yet.
783 if (xen_store_domain_type
== XS_HVM
)
786 /* Notify others that xenstore is up */
787 blocking_notifier_call_chain(&xenstore_chain
, 0, NULL
);
791 * Returns true when XenStore init must be deferred in order to
792 * allow the PCI platform device to be initialised, before we
793 * can actually have event channel interrupts working.
795 static bool xs_hvm_defer_init_for_callback(void)
797 #ifdef CONFIG_XEN_PVHVM
798 return xen_store_domain_type
== XS_HVM
&&
799 !xen_have_vector_callback
;
805 static int xenbus_probe_thread(void *unused
)
810 * We actually just want to wait for *any* trigger of xb_waitq,
811 * and run xenbus_probe() the moment it occurs.
813 prepare_to_wait(&xb_waitq
, &w
, TASK_INTERRUPTIBLE
);
815 finish_wait(&xb_waitq
, &w
);
822 static int __init
xenbus_probe_initcall(void)
828 * Probe XenBus here in the XS_PV case, and also XS_HVM unless we
829 * need to wait for the platform PCI device to come up or
830 * xen_store_interface is not ready.
832 if (xen_store_domain_type
== XS_PV
||
833 (xen_store_domain_type
== XS_HVM
&&
834 !xs_hvm_defer_init_for_callback() &&
839 * For XS_LOCAL or when xen_store_interface is not ready, spawn a
840 * thread which will wait for xenstored or a xenstore-stubdom to be
841 * started, then probe. It will be triggered when communication
842 * starts happening, by waiting on xb_waitq.
844 if (xen_store_domain_type
== XS_LOCAL
|| !XS_INTERFACE_READY
) {
845 struct task_struct
*probe_task
;
847 probe_task
= kthread_run(xenbus_probe_thread
, NULL
,
849 if (IS_ERR(probe_task
))
850 return PTR_ERR(probe_task
);
854 device_initcall(xenbus_probe_initcall
);
856 int xen_set_callback_via(uint64_t via
)
858 struct xen_hvm_param a
;
861 a
.domid
= DOMID_SELF
;
862 a
.index
= HVM_PARAM_CALLBACK_IRQ
;
865 ret
= HYPERVISOR_hvm_op(HVMOP_set_param
, &a
);
870 * If xenbus_probe_initcall() deferred the xenbus_probe()
871 * due to the callback not functioning yet, we can do it now.
873 if (!xenstored_ready
&& xs_hvm_defer_init_for_callback())
878 EXPORT_SYMBOL_GPL(xen_set_callback_via
);
880 /* Set up event channel for xenstored which is run as a local process
881 * (this is normally used only in dom0)
883 static int __init
xenstored_local_init(void)
886 unsigned long page
= 0;
887 struct evtchn_alloc_unbound alloc_unbound
;
889 /* Allocate Xenstore page */
890 page
= get_zeroed_page(GFP_KERNEL
);
894 xen_store_gfn
= virt_to_gfn((void *)page
);
896 /* Next allocate a local port which xenstored can bind to */
897 alloc_unbound
.dom
= DOMID_SELF
;
898 alloc_unbound
.remote_dom
= DOMID_SELF
;
900 err
= HYPERVISOR_event_channel_op(EVTCHNOP_alloc_unbound
,
906 xen_store_evtchn
= alloc_unbound
.port
;
916 static int xenbus_resume_cb(struct notifier_block
*nb
,
917 unsigned long action
, void *data
)
921 if (xen_hvm_domain()) {
924 err
= hvm_get_parameter(HVM_PARAM_STORE_EVTCHN
, &v
);
926 xen_store_evtchn
= v
;
928 pr_warn("Cannot update xenstore event channel: %d\n",
931 xen_store_evtchn
= xen_start_info
->store_evtchn
;
936 static struct notifier_block xenbus_resume_nb
= {
937 .notifier_call
= xenbus_resume_cb
,
940 static irqreturn_t
xenbus_late_init(int irq
, void *unused
)
945 err
= hvm_get_parameter(HVM_PARAM_STORE_PFN
, &v
);
946 if (err
|| !v
|| !~v
)
948 xen_store_gfn
= (unsigned long)v
;
954 static int __init
xenbus_init(void)
959 xen_store_domain_type
= XS_UNKNOWN
;
964 xenbus_ring_ops_init();
967 xen_store_domain_type
= XS_PV
;
968 if (xen_hvm_domain())
969 xen_store_domain_type
= XS_HVM
;
970 if (xen_hvm_domain() && xen_initial_domain())
971 xen_store_domain_type
= XS_LOCAL
;
972 if (xen_pv_domain() && !xen_start_info
->store_evtchn
)
973 xen_store_domain_type
= XS_LOCAL
;
974 if (xen_pv_domain() && xen_start_info
->store_evtchn
)
977 switch (xen_store_domain_type
) {
979 err
= xenstored_local_init();
982 xen_store_interface
= gfn_to_virt(xen_store_gfn
);
985 xen_store_evtchn
= xen_start_info
->store_evtchn
;
986 xen_store_gfn
= xen_start_info
->store_mfn
;
987 xen_store_interface
= gfn_to_virt(xen_store_gfn
);
990 err
= hvm_get_parameter(HVM_PARAM_STORE_EVTCHN
, &v
);
993 xen_store_evtchn
= (int)v
;
994 err
= hvm_get_parameter(HVM_PARAM_STORE_PFN
, &v
);
998 * Uninitialized hvm_params are zero and return no error.
999 * Although it is theoretically possible to have
1000 * HVM_PARAM_STORE_PFN set to zero on purpose, in reality it is
1001 * not zero when valid. If zero, it means that Xenstore hasn't
1002 * been properly initialized. Instead of attempting to map a
1003 * wrong guest physical address return error.
1005 * Also recognize all bits set as an invalid/uninitialized value.
1014 /* Avoid truncation on 32-bit. */
1015 #if BITS_PER_LONG == 32
1016 if (v
> ULONG_MAX
) {
1017 pr_err("%s: cannot handle HVM_PARAM_STORE_PFN=%llx > ULONG_MAX\n",
1023 xen_store_gfn
= (unsigned long)v
;
1024 xen_store_interface
=
1025 memremap(xen_store_gfn
<< XEN_PAGE_SHIFT
,
1026 XEN_PAGE_SIZE
, MEMREMAP_WB
);
1027 if (!xen_store_interface
) {
1028 pr_err("%s: cannot map HVM_PARAM_STORE_PFN=%llx\n",
1033 if (xen_store_interface
->connection
!= XENSTORE_CONNECTED
)
1037 err
= bind_evtchn_to_irqhandler(xen_store_evtchn
,
1039 0, "xenstore_late_init",
1042 pr_err("xenstore_late_init couldn't bind irq err=%d\n",
1051 pr_warn("Xenstore state unknown\n");
1056 * HVM domains may not have a functional callback yet. In that
1057 * case let xs_init() be called from xenbus_probe(), which will
1058 * get invoked at an appropriate time.
1060 if (xen_store_domain_type
!= XS_HVM
) {
1063 pr_warn("Error initializing xenstore comms: %i\n", err
);
1068 if ((xen_store_domain_type
!= XS_LOCAL
) &&
1069 (xen_store_domain_type
!= XS_UNKNOWN
))
1070 xen_resume_notifier_register(&xenbus_resume_nb
);
1072 #ifdef CONFIG_XEN_COMPAT_XENFS
1074 * Create xenfs mountpoint in /proc for compatibility with
1075 * utilities that expect to find "xenbus" under "/proc/xen".
1077 proc_create_mount_point("xen");
1082 xen_store_domain_type
= XS_UNKNOWN
;
1086 postcore_initcall(xenbus_init
);
1088 MODULE_LICENSE("GPL");