4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
23 * Copyright 2009 Sun Microsystems, Inc. All rights reserved.
24 * Use is subject to license terms.
31 * This is the kernel equivalent of the "xs" library. We don't need everything
32 * and we use xenbus_comms for communication.
34 * Copyright (C) 2005 Rusty Russell, IBM Corporation
36 * This file may be distributed separately from the Linux kernel, or
37 * incorporated into other software packages, subject to the following license:
39 * Permission is hereby granted, free of charge, to any person obtaining a copy
40 * of this source file (the "Software"), to deal in the Software without
41 * restriction, including without limitation the rights to use, copy, modify,
42 * merge, publish, distribute, sublicense, and/or sell copies of the Software,
43 * and to permit persons to whom the Software is furnished to do so, subject to
44 * the following conditions:
46 * The above copyright notice and this permission notice shall be included in
47 * all copies or substantial portions of the Software.
49 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
50 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
51 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
52 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
53 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
54 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
59 * NOTE: To future maintainers of the Solaris version of this file:
60 * I found the Linux version of this code to be very disgusting in
61 * overloading pointers and error codes into void * return values.
62 * The main difference you will find is that all such usage is changed
63 * to pass pointers to void* to be filled in with return values and
64 * the functions return error codes.
67 #include <sys/errno.h>
68 #include <sys/types.h>
69 #include <sys/sysmacros.h>
71 #include <sys/mutex.h>
72 #include <sys/condvar.h>
73 #include <sys/rwlock.h>
76 #include <sys/sunddi.h>
77 #include <sys/avintr.h>
78 #include <sys/cmn_err.h>
79 #include <sys/mach_mmu.h>
80 #include <util/sscanf.h>
81 #define _XSD_ERRORS_DEFINED
83 #include <sys/xpv_support.h>
85 #include <sys/hypervisor.h>
86 #include <sys/taskq.h>
88 #include <xen/sys/xenbus_impl.h>
89 #include <xen/sys/xenbus_comms.h>
90 #include <xen/sys/xendev.h>
91 #include <xen/public/io/xs_wire.h>
93 #define streq(a, b) (strcmp((a), (b)) == 0)
95 #define list_empty(list) (list_head(list) == NULL)
97 struct xs_stored_msg
{
100 struct xsd_sockmsg hdr
;
103 /* Queued replies. */
108 /* Queued watch events. */
110 struct xenbus_watch
*handle
;
112 unsigned int vec_size
;
117 static struct xs_handle
{
118 /* A list of replies. Currently only one will ever be outstanding. */
123 /* One request at a time. */
124 kmutex_t request_mutex
;
126 /* Protect transactions against save/restore. */
127 krwlock_t suspend_lock
;
130 static int last_req_id
;
133 * List of clients wanting a xenstore up notification, and a lock to protect it
135 static boolean_t xenstore_up
;
136 static list_t notify_list
;
137 static kmutex_t notify_list_lock
;
138 static taskq_t
*xenbus_taskq
;
140 /* List of registered watches, and a lock to protect it. */
141 static list_t watches
;
142 static kmutex_t watches_lock
;
144 /* List of pending watch callback events, and a lock to protect it. */
145 static list_t watch_events
;
146 static kmutex_t watch_events_lock
;
149 * Details of the xenwatch callback kernel thread. The thread waits on the
150 * watch_events_cv for work to do (queued on watch_events list). When it
151 * wakes up it acquires the xenwatch_mutex before reading the list and
154 static kmutex_t xenwatch_mutex
;
155 static kcondvar_t watch_events_cv
;
157 static int process_msg(void);
160 get_error(const char *errorstring
)
164 for (i
= 0; !streq(errorstring
, xsd_errors
[i
].errstring
); i
++) {
165 if (i
== (sizeof (xsd_errors
) / sizeof (xsd_errors
[0])) - 1) {
167 "XENBUS xen store gave: unknown error %s",
172 return (xsd_errors
[i
].errnum
);
176 * Read a synchronous reply from xenstore. Since we can return early before
177 * reading a relevant reply, we discard any messages not matching the request
178 * ID. Caller must free returned message on success.
181 read_reply(struct xsd_sockmsg
*req_hdr
, struct xs_stored_msg
**reply
)
183 extern int do_polled_io
;
185 mutex_enter(&xs_state
.reply_lock
);
188 while (list_empty(&xs_state
.reply_list
)) {
189 if (interrupts_unleashed
&& !do_polled_io
) {
190 if (cv_wait_sig(&xs_state
.reply_cv
,
191 &xs_state
.reply_lock
) == 0) {
192 mutex_exit(&xs_state
.reply_lock
);
196 } else { /* polled mode needed for early probes */
197 mutex_exit(&xs_state
.reply_lock
);
198 (void) HYPERVISOR_yield();
199 (void) process_msg();
200 mutex_enter(&xs_state
.reply_lock
);
204 *reply
= list_head(&xs_state
.reply_list
);
205 list_remove(&xs_state
.reply_list
, *reply
);
207 if ((*reply
)->hdr
.req_id
== req_hdr
->req_id
)
211 mutex_exit(&xs_state
.reply_lock
);
215 /* Emergency write. */
217 xenbus_debug_write(const char *str
, unsigned int count
)
219 struct xsd_sockmsg msg
= { 0 };
222 msg
.len
= sizeof ("print") + count
+ 1;
224 mutex_enter(&xs_state
.request_mutex
);
225 (void) xb_write(&msg
, sizeof (msg
));
226 (void) xb_write("print", sizeof ("print"));
227 (void) xb_write(str
, count
);
228 (void) xb_write("", 1);
229 mutex_exit(&xs_state
.request_mutex
);
233 * This is pretty unpleasant. First off, there's the horrible logic around
234 * suspend_lock and transactions. Also, we can be interrupted either before we
235 * write a message, or before we receive a reply. A client that wants to
236 * survive this can't know which case happened. Luckily all clients don't care
237 * about signals currently, and the alternative (a hard wait on a userspace
238 * daemon) isn't exactly preferable. Caller must free 'reply' on success.
241 xenbus_dev_request_and_reply(struct xsd_sockmsg
*msg
, void **reply
)
243 struct xsd_sockmsg req_msg
= *msg
;
244 struct xs_stored_msg
*reply_msg
= NULL
;
247 if (req_msg
.type
== XS_TRANSACTION_START
)
248 rw_enter(&xs_state
.suspend_lock
, RW_READER
);
250 mutex_enter(&xs_state
.request_mutex
);
252 msg
->req_id
= last_req_id
++;
254 err
= xb_write(msg
, sizeof (*msg
) + msg
->len
);
256 if (req_msg
.type
== XS_TRANSACTION_START
)
257 rw_exit(&xs_state
.suspend_lock
);
258 msg
->type
= XS_ERROR
;
263 err
= read_reply(msg
, &reply_msg
);
266 if (msg
->type
== XS_TRANSACTION_START
)
267 rw_exit(&xs_state
.suspend_lock
);
272 *reply
= reply_msg
->un
.reply
.body
;
273 *msg
= reply_msg
->hdr
;
275 if (reply_msg
->hdr
.type
== XS_TRANSACTION_END
)
276 rw_exit(&xs_state
.suspend_lock
);
279 if (reply_msg
!= NULL
)
280 kmem_free(reply_msg
, sizeof (*reply_msg
));
282 mutex_exit(&xs_state
.request_mutex
);
287 * Send message to xs, return errcode, rval filled in with pointer
288 * to kmem_alloc'ed reply.
291 xs_talkv(xenbus_transaction_t t
,
292 enum xsd_sockmsg_type type
,
293 const iovec_t
*iovec
,
294 unsigned int num_vecs
,
298 struct xsd_sockmsg msg
;
299 struct xs_stored_msg
*reply_msg
;
304 msg
.tx_id
= (uint32_t)(unsigned long)t
;
307 for (i
= 0; i
< num_vecs
; i
++)
308 msg
.len
+= iovec
[i
].iov_len
;
310 mutex_enter(&xs_state
.request_mutex
);
312 msg
.req_id
= last_req_id
++;
314 err
= xb_write(&msg
, sizeof (msg
));
316 mutex_exit(&xs_state
.request_mutex
);
320 for (i
= 0; i
< num_vecs
; i
++) {
321 err
= xb_write(iovec
[i
].iov_base
, iovec
[i
].iov_len
);
323 mutex_exit(&xs_state
.request_mutex
);
328 err
= read_reply(&msg
, &reply_msg
);
330 mutex_exit(&xs_state
.request_mutex
);
335 reply
= reply_msg
->un
.reply
.body
;
337 if (reply_msg
->hdr
.type
== XS_ERROR
) {
338 err
= get_error(reply
);
339 kmem_free(reply
, reply_msg
->hdr
.len
+ 1);
344 *len
= reply_msg
->hdr
.len
+ 1;
346 ASSERT(reply_msg
->hdr
.type
== type
);
351 kmem_free(reply
, reply_msg
->hdr
.len
+ 1);
354 kmem_free(reply_msg
, sizeof (*reply_msg
));
358 /* Simplified version of xs_talkv: single message. */
360 xs_single(xenbus_transaction_t t
,
361 enum xsd_sockmsg_type type
,
362 const char *string
, void **ret
,
367 iovec
.iov_base
= (char *)string
;
368 iovec
.iov_len
= strlen(string
) + 1;
369 return (xs_talkv(t
, type
, &iovec
, 1, ret
, len
));
373 count_strings(const char *strings
, unsigned int len
)
378 for (p
= strings
, num
= 0; p
< strings
+ len
; p
+= strlen(p
) + 1)
384 /* Return the path to dir with /name appended. Buffer must be kmem_free()'ed */
386 join(const char *dir
, const char *name
)
391 slashlen
= streq(name
, "") ? 0 : 1;
392 buffer
= kmem_alloc(strlen(dir
) + slashlen
+ strlen(name
) + 1,
395 (void) strcpy(buffer
, dir
);
397 (void) strcat(buffer
, "/");
398 (void) strcat(buffer
, name
);
404 split(char *strings
, unsigned int len
, unsigned int *num
)
408 /* Count the strings. */
409 if ((*num
= count_strings(strings
, len
- 1)) == 0)
412 /* Transfer to one big alloc for easy freeing. */
413 ret
= kmem_alloc(*num
* sizeof (char *) + (len
- 1), KM_SLEEP
);
414 (void) memcpy(&ret
[*num
], strings
, len
- 1);
415 kmem_free(strings
, len
);
417 strings
= (char *)&ret
[*num
];
418 for (p
= strings
, *num
= 0; p
< strings
+ (len
- 1);
419 p
+= strlen(p
) + 1) {
427 xenbus_directory(xenbus_transaction_t t
,
428 const char *dir
, const char *node
, unsigned int *num
)
430 char *strings
, *path
;
434 path
= join(dir
, node
);
435 err
= xs_single(t
, XS_DIRECTORY
, path
, (void **)&strings
, &len
);
436 kmem_free(path
, strlen(path
) + 1);
437 if (err
!= 0 || strings
== NULL
) {
438 /* sigh, we lose error code info here */
443 return (split(strings
, len
, num
));
446 /* Check if a path exists. */
448 xenbus_exists(const char *dir
, const char *node
)
453 if (xenbus_read(XBT_NULL
, dir
, node
, &p
, &n
) != 0)
459 /* Check if a directory path exists. */
461 xenbus_exists_dir(const char *dir
, const char *node
)
467 d
= xenbus_directory(XBT_NULL
, dir
, node
, &dir_n
);
470 for (i
= 0, len
= 0; i
< dir_n
; i
++)
471 len
+= strlen(d
[i
]) + 1 + sizeof (char *);
477 * Get the value of a single file.
478 * Returns a kmem_alloced value in retp: call kmem_free() on it after use.
479 * len indicates length in bytes.
482 xenbus_read(xenbus_transaction_t t
,
483 const char *dir
, const char *node
, void **retp
, unsigned int *len
)
488 path
= join(dir
, node
);
489 err
= xs_single(t
, XS_READ
, path
, retp
, len
);
490 kmem_free(path
, strlen(path
) + 1);
495 xenbus_read_str(const char *dir
, const char *node
, char **retp
)
502 * Since we access the xenbus value immediatly we can't be
503 * part of a transaction.
505 if ((err
= xenbus_read(XBT_NULL
, dir
, node
, (void **)&str
, &n
)) != 0)
507 ASSERT((str
!= NULL
) && (n
> 0));
510 * Why bother with this? Because xenbus is truly annoying in the
511 * fact that when it returns a string, it doesn't guarantee that
512 * the memory that holds the string is of size strlen() + 1.
513 * This forces callers to keep track of the size of the memory
514 * containing the string. Ugh. We'll work around this by
515 * re-allocate strings to always be of size strlen() + 1.
523 * Write the value of a single file.
524 * Returns err on failure.
527 xenbus_write(xenbus_transaction_t t
,
528 const char *dir
, const char *node
, const char *string
)
534 path
= join(dir
, node
);
536 iovec
[0].iov_base
= (void *)path
;
537 iovec
[0].iov_len
= strlen(path
) + 1;
538 iovec
[1].iov_base
= (void *)string
;
539 iovec
[1].iov_len
= strlen(string
);
541 ret
= xs_talkv(t
, XS_WRITE
, iovec
, 2, NULL
, NULL
);
542 kmem_free(path
, iovec
[0].iov_len
);
546 /* Create a new directory. */
548 xenbus_mkdir(xenbus_transaction_t t
, const char *dir
, const char *node
)
553 path
= join(dir
, node
);
554 ret
= xs_single(t
, XS_MKDIR
, path
, NULL
, NULL
);
555 kmem_free(path
, strlen(path
) + 1);
559 /* Destroy a file or directory (directories must be empty). */
561 xenbus_rm(xenbus_transaction_t t
, const char *dir
, const char *node
)
566 path
= join(dir
, node
);
567 ret
= xs_single(t
, XS_RM
, path
, NULL
, NULL
);
568 kmem_free(path
, strlen(path
) + 1);
573 * Start a transaction: changes by others will not be seen during this
574 * transaction, and changes will not be visible to others until end.
577 xenbus_transaction_start(xenbus_transaction_t
*t
)
584 rw_enter(&xs_state
.suspend_lock
, RW_READER
);
586 err
= xs_single(XBT_NULL
, XS_TRANSACTION_START
, "", &id_str
, &len
);
588 rw_exit(&xs_state
.suspend_lock
);
592 (void) ddi_strtoul((char *)id_str
, NULL
, 0, &id
);
593 *t
= (xenbus_transaction_t
)id
;
594 kmem_free(id_str
, len
);
601 * If abandon is true, transaction is discarded instead of committed.
604 xenbus_transaction_end(xenbus_transaction_t t
, int abort
)
610 (void) strcpy(abortstr
, "F");
612 (void) strcpy(abortstr
, "T");
614 err
= xs_single(t
, XS_TRANSACTION_END
, abortstr
, NULL
, NULL
);
616 rw_exit(&xs_state
.suspend_lock
);
622 * Single read and scanf: returns errno or 0. This can only handle a single
623 * conversion specifier.
627 xenbus_scanf(xenbus_transaction_t t
,
628 const char *dir
, const char *node
, const char *fmt
, ...)
635 ret
= xenbus_read(t
, dir
, node
, (void **)&val
, &len
);
640 if (vsscanf(val
, fmt
, ap
) != 1)
647 /* Single printf and write: returns errno or 0. */
650 xenbus_printf(xenbus_transaction_t t
,
651 const char *dir
, const char *node
, const char *fmt
, ...)
655 #define PRINTF_BUFFER_SIZE 4096
658 printf_buffer
= kmem_alloc(PRINTF_BUFFER_SIZE
, KM_SLEEP
);
661 ret
= vsnprintf(printf_buffer
, PRINTF_BUFFER_SIZE
, fmt
, ap
);
664 ASSERT(ret
<= PRINTF_BUFFER_SIZE
-1);
665 ret
= xenbus_write(t
, dir
, node
, printf_buffer
);
667 kmem_free(printf_buffer
, PRINTF_BUFFER_SIZE
);
673 /* Takes tuples of names, scanf-style args, and void **, NULL terminated. */
675 xenbus_gather(xenbus_transaction_t t
, const char *dir
, ...)
683 while (ret
== 0 && (name
= va_arg(ap
, char *)) != NULL
) {
684 const char *fmt
= va_arg(ap
, char *);
685 void *result
= va_arg(ap
, void *);
688 ret
= xenbus_read(t
, dir
, name
, (void **)&p
, &len
);
692 ASSERT(result
!= NULL
);
693 if (sscanf(p
, fmt
, result
) != 1)
697 *(char **)result
= p
;
704 xs_watch(const char *path
, const char *token
)
708 iov
[0].iov_base
= (void *)path
;
709 iov
[0].iov_len
= strlen(path
) + 1;
710 iov
[1].iov_base
= (void *)token
;
711 iov
[1].iov_len
= strlen(token
) + 1;
713 return (xs_talkv(XBT_NULL
, XS_WATCH
, iov
, 2, NULL
, NULL
));
717 xs_unwatch(const char *path
, const char *token
)
721 iov
[0].iov_base
= (char *)path
;
722 iov
[0].iov_len
= strlen(path
) + 1;
723 iov
[1].iov_base
= (char *)token
;
724 iov
[1].iov_len
= strlen(token
) + 1;
726 return (xs_talkv(XBT_NULL
, XS_UNWATCH
, iov
, 2, NULL
, NULL
));
729 static struct xenbus_watch
*
730 find_watch(const char *token
)
732 struct xenbus_watch
*i
, *cmp
;
734 (void) ddi_strtoul(token
, NULL
, 16, (unsigned long *)&cmp
);
736 for (i
= list_head(&watches
); i
!= NULL
; i
= list_next(&watches
, i
))
743 /* Register a xenstore state notify callback */
745 xs_register_xenbus_callback(void (*callback
)(int))
747 struct xenbus_notify
*xbn
, *xnp
;
749 xbn
= kmem_alloc(sizeof (struct xenbus_notify
), KM_SLEEP
);
750 xbn
->notify_func
= callback
;
751 mutex_enter(¬ify_list_lock
);
753 * Make sure not already on the list
755 xnp
= list_head(¬ify_list
);
756 for (; xnp
!= NULL
; xnp
= list_next(¬ify_list
, xnp
)) {
757 if (xnp
->notify_func
== callback
) {
758 kmem_free(xbn
, sizeof (struct xenbus_notify
));
759 mutex_exit(¬ify_list_lock
);
764 list_insert_tail(¬ify_list
, xbn
);
767 xnp
->notify_func(XENSTORE_UP
);
768 mutex_exit(¬ify_list_lock
);
773 * Notify clients of xenstore state
776 do_notify_callbacks(void *arg
)
778 struct xenbus_notify
*xnp
;
780 mutex_enter(¬ify_list_lock
);
781 xnp
= list_head(¬ify_list
);
782 for (; xnp
!= NULL
; xnp
= list_next(¬ify_list
, xnp
)) {
783 xnp
->notify_func((int)((uintptr_t)arg
));
785 mutex_exit(¬ify_list_lock
);
789 xs_notify_xenstore_up(void)
791 xenstore_up
= B_TRUE
;
792 (void) taskq_dispatch(xenbus_taskq
, do_notify_callbacks
,
793 (void *)XENSTORE_UP
, 0);
797 xs_notify_xenstore_down(void)
799 xenstore_up
= B_FALSE
;
800 (void) taskq_dispatch(xenbus_taskq
, do_notify_callbacks
,
801 (void *)XENSTORE_DOWN
, 0);
804 /* Register callback to watch this node. */
806 register_xenbus_watch(struct xenbus_watch
*watch
)
808 /* Pointer in ascii is the token. */
809 char token
[sizeof (watch
) * 2 + 1];
813 (void) snprintf(token
, sizeof (token
), "%lX", (long)watch
);
815 rw_enter(&xs_state
.suspend_lock
, RW_READER
);
817 mutex_enter(&watches_lock
);
819 * May be re-registering a watch if xenstore daemon was restarted
821 if (find_watch(token
) == NULL
)
822 list_insert_tail(&watches
, watch
);
823 mutex_exit(&watches_lock
);
825 DTRACE_XPV3(xenbus__register__watch
, const char *, watch
->node
,
826 uintptr_t, watch
->callback
, struct xenbus_watch
*, watch
);
828 err
= xs_watch(watch
->node
, token
);
830 /* Ignore errors due to multiple registration. */
831 if ((err
!= 0) && (err
!= EEXIST
)) {
832 mutex_enter(&watches_lock
);
833 list_remove(&watches
, watch
);
834 mutex_exit(&watches_lock
);
837 rw_exit(&xs_state
.suspend_lock
);
843 free_stored_msg(struct xs_stored_msg
*msg
)
847 for (i
= 0; i
< msg
->un
.watch
.vec_size
; i
++)
848 len
+= strlen(msg
->un
.watch
.vec
[i
]) + 1 + sizeof (char *);
849 kmem_free(msg
->un
.watch
.vec
, len
);
850 kmem_free(msg
, sizeof (*msg
));
854 unregister_xenbus_watch(struct xenbus_watch
*watch
)
856 struct xs_stored_msg
*msg
;
857 char token
[sizeof (watch
) * 2 + 1];
860 (void) snprintf(token
, sizeof (token
), "%lX", (long)watch
);
862 rw_enter(&xs_state
.suspend_lock
, RW_READER
);
864 mutex_enter(&watches_lock
);
865 ASSERT(find_watch(token
));
866 list_remove(&watches
, watch
);
867 mutex_exit(&watches_lock
);
869 DTRACE_XPV3(xenbus__unregister__watch
, const char *, watch
->node
,
870 uintptr_t, watch
->callback
, struct xenbus_watch
*, watch
);
872 err
= xs_unwatch(watch
->node
, token
);
874 cmn_err(CE_WARN
, "XENBUS Failed to release watch %s: %d",
877 rw_exit(&xs_state
.suspend_lock
);
879 /* Cancel pending watch events. */
880 mutex_enter(&watch_events_lock
);
881 msg
= list_head(&watch_events
);
883 while (msg
!= NULL
) {
884 struct xs_stored_msg
*tmp
= list_next(&watch_events
, msg
);
885 if (msg
->un
.watch
.handle
== watch
) {
886 list_remove(&watch_events
, msg
);
887 free_stored_msg(msg
);
892 mutex_exit(&watch_events_lock
);
894 /* Flush any currently-executing callback, unless we are it. :-) */
895 if (mutex_owner(&xenwatch_mutex
) != curthread
) {
896 mutex_enter(&xenwatch_mutex
);
897 mutex_exit(&xenwatch_mutex
);
904 rw_enter(&xs_state
.suspend_lock
, RW_WRITER
);
905 mutex_enter(&xs_state
.request_mutex
);
913 struct xenbus_watch
*watch
;
914 char token
[sizeof (watch
) * 2 + 1];
916 mutex_exit(&xs_state
.request_mutex
);
921 /* No need for watches_lock: the suspend_lock is sufficient. */
922 for (watch
= list_head(&watches
); watch
!= NULL
;
923 watch
= list_next(&watches
, watch
)) {
924 (void) snprintf(token
, sizeof (token
), "%lX", (long)watch
);
925 (void) xs_watch(watch
->node
, token
);
928 rw_exit(&xs_state
.suspend_lock
);
932 xenwatch_thread(void)
934 struct xs_stored_msg
*msg
;
935 struct xenbus_watch
*watch
;
938 mutex_enter(&watch_events_lock
);
939 while (list_empty(&watch_events
))
940 cv_wait(&watch_events_cv
, &watch_events_lock
);
941 msg
= list_head(&watch_events
);
943 list_remove(&watch_events
, msg
);
944 watch
= msg
->un
.watch
.handle
;
945 mutex_exit(&watch_events_lock
);
947 mutex_enter(&xenwatch_mutex
);
949 DTRACE_XPV4(xenbus__fire__watch
,
950 const char *, watch
->node
,
951 uintptr_t, watch
->callback
,
952 struct xenbus_watch
*, watch
,
953 const char *, msg
->un
.watch
.vec
[XS_WATCH_PATH
]);
955 watch
->callback(watch
, (const char **)msg
->un
.watch
.vec
,
956 msg
->un
.watch
.vec_size
);
958 free_stored_msg(msg
);
959 mutex_exit(&xenwatch_mutex
);
966 struct xs_stored_msg
*msg
;
970 msg
= kmem_alloc(sizeof (*msg
), KM_SLEEP
);
972 err
= xb_read(&msg
->hdr
, sizeof (msg
->hdr
));
974 kmem_free(msg
, sizeof (*msg
));
978 mlen
= msg
->hdr
.len
+ 1;
979 body
= kmem_alloc(mlen
, KM_SLEEP
);
981 err
= xb_read(body
, msg
->hdr
.len
);
983 kmem_free(body
, mlen
);
984 kmem_free(msg
, sizeof (*msg
));
988 body
[mlen
- 1] = '\0';
990 if (msg
->hdr
.type
== XS_WATCH_EVENT
) {
992 msg
->un
.watch
.vec
= split(body
, msg
->hdr
.len
+ 1,
993 &msg
->un
.watch
.vec_size
);
994 if (msg
->un
.watch
.vec
== NULL
) {
995 kmem_free(msg
, sizeof (*msg
));
999 mutex_enter(&watches_lock
);
1000 token
= msg
->un
.watch
.vec
[XS_WATCH_TOKEN
];
1001 if ((msg
->un
.watch
.handle
= find_watch(token
)) != NULL
) {
1002 mutex_enter(&watch_events_lock
);
1004 DTRACE_XPV4(xenbus__enqueue__watch
,
1005 const char *, msg
->un
.watch
.handle
->node
,
1006 uintptr_t, msg
->un
.watch
.handle
->callback
,
1007 struct xenbus_watch
*, msg
->un
.watch
.handle
,
1008 const char *, msg
->un
.watch
.vec
[XS_WATCH_PATH
]);
1010 list_insert_tail(&watch_events
, msg
);
1011 cv_broadcast(&watch_events_cv
);
1012 mutex_exit(&watch_events_lock
);
1014 free_stored_msg(msg
);
1016 mutex_exit(&watches_lock
);
1018 msg
->un
.reply
.body
= body
;
1019 mutex_enter(&xs_state
.reply_lock
);
1020 list_insert_tail(&xs_state
.reply_list
, msg
);
1021 mutex_exit(&xs_state
.reply_lock
);
1022 cv_signal(&xs_state
.reply_cv
);
1034 * We have to wait for interrupts to be ready, so we don't clash
1035 * with the polled-IO code in read_reply().
1037 while (!interrupts_unleashed
)
1041 err
= process_msg();
1043 cmn_err(CE_WARN
, "XENBUS error %d while reading "
1049 * When setting up xenbus, dom0 and domU have to take different paths, which
1050 * makes this code a little confusing. For dom0:
1052 * xs_early_init - mutex init only
1053 * xs_dom0_init - called on xenbus dev attach: set up our xenstore page and
1054 * event channel; start xenbus threads for responding to interrupts.
1058 * xs_early_init - mutex init; set up our xenstore page and event channel
1059 * xs_domu_init - installation of IRQ handler; start xenbus threads.
1061 * We need an early init on domU so we can use xenbus in polled mode to
1062 * discover devices, VCPUs etc.
1064 * On resume, we use xb_init() and xb_setup_intr() to restore xenbus to a
1071 list_create(&xs_state
.reply_list
, sizeof (struct xs_stored_msg
),
1072 offsetof(struct xs_stored_msg
, list
));
1073 list_create(&watch_events
, sizeof (struct xs_stored_msg
),
1074 offsetof(struct xs_stored_msg
, list
));
1075 list_create(&watches
, sizeof (struct xenbus_watch
),
1076 offsetof(struct xenbus_watch
, list
));
1077 list_create(¬ify_list
, sizeof (struct xenbus_notify
),
1078 offsetof(struct xenbus_notify
, list
));
1079 mutex_init(&xs_state
.reply_lock
, NULL
, MUTEX_DEFAULT
, NULL
);
1080 mutex_init(&xs_state
.request_mutex
, NULL
, MUTEX_DEFAULT
, NULL
);
1081 mutex_init(¬ify_list_lock
, NULL
, MUTEX_DEFAULT
, NULL
);
1082 rw_init(&xs_state
.suspend_lock
, NULL
, RW_DEFAULT
, NULL
);
1083 cv_init(&xs_state
.reply_cv
, NULL
, CV_DEFAULT
, NULL
);
1085 if (DOMAIN_IS_INITDOMAIN(xen_info
))
1089 xenstore_up
= B_TRUE
;
1093 xs_thread_init(void)
1095 (void) thread_create(NULL
, 0, xenwatch_thread
, NULL
, 0, &p0
,
1096 TS_RUN
, minclsyspri
);
1097 (void) thread_create(NULL
, 0, xenbus_thread
, NULL
, 0, &p0
,
1098 TS_RUN
, minclsyspri
);
1099 xenbus_taskq
= taskq_create("xenbus_taskq", 1,
1100 maxclsyspri
- 1, 1, 1, TASKQ_PREPOPULATE
);
1101 ASSERT(xenbus_taskq
!= NULL
);
1107 if (DOMAIN_IS_INITDOMAIN(xen_info
))
1111 * Add interrupt handler for xenbus now, must wait till after
1112 * psm module is loaded. All use of xenbus is in polled mode
1113 * until xs_init is called since it is what kicks off the xs
1124 static boolean_t initialized
= B_FALSE
;
1126 ASSERT(DOMAIN_IS_INITDOMAIN(xen_info
));
1129 * The xenbus driver might be re-attaching.
1138 initialized
= B_TRUE
;