conn rcv_lock converted to spinlock, struct cor_sock created, kernel_packet skb_clone...
[cor_2_6_31.git] / drivers / xen / xenbus / xenbus_xs.c
blobeab33f1dbdf7013f451af491b882973ea017f718
1 /******************************************************************************
2 * xenbus_xs.c
4 * This is the kernel equivalent of the "xs" library. We don't need everything
5 * and we use xenbus_comms for communication.
7 * Copyright (C) 2005 Rusty Russell, IBM Corporation
9 * This program is free software; you can redistribute it and/or
10 * modify it under the terms of the GNU General Public License version 2
11 * as published by the Free Software Foundation; or, when distributed
12 * separately from the Linux kernel or incorporated into other
13 * software packages, subject to the following license:
15 * Permission is hereby granted, free of charge, to any person obtaining a copy
16 * of this source file (the "Software"), to deal in the Software without
17 * restriction, including without limitation the rights to use, copy, modify,
18 * merge, publish, distribute, sublicense, and/or sell copies of the Software,
19 * and to permit persons to whom the Software is furnished to do so, subject to
20 * the following conditions:
22 * The above copyright notice and this permission notice shall be included in
23 * all copies or substantial portions of the Software.
25 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
26 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
27 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
28 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
29 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
30 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
31 * IN THE SOFTWARE.
34 #include <linux/unistd.h>
35 #include <linux/errno.h>
36 #include <linux/types.h>
37 #include <linux/uio.h>
38 #include <linux/kernel.h>
39 #include <linux/string.h>
40 #include <linux/err.h>
41 #include <linux/slab.h>
42 #include <linux/fcntl.h>
43 #include <linux/kthread.h>
44 #include <linux/rwsem.h>
45 #include <linux/module.h>
46 #include <linux/mutex.h>
47 #include <xen/xenbus.h>
48 #include "xenbus_comms.h"
50 struct xs_stored_msg {
51 struct list_head list;
53 struct xsd_sockmsg hdr;
55 union {
56 /* Queued replies. */
57 struct {
58 char *body;
59 } reply;
61 /* Queued watch events. */
62 struct {
63 struct xenbus_watch *handle;
64 char **vec;
65 unsigned int vec_size;
66 } watch;
67 } u;
70 struct xs_handle {
71 /* A list of replies. Currently only one will ever be outstanding. */
72 struct list_head reply_list;
73 spinlock_t reply_lock;
74 wait_queue_head_t reply_waitq;
77 * Mutex ordering: transaction_mutex -> watch_mutex -> request_mutex.
78 * response_mutex is never taken simultaneously with the other three.
81 /* One request at a time. */
82 struct mutex request_mutex;
84 /* Protect xenbus reader thread against save/restore. */
85 struct mutex response_mutex;
87 /* Protect transactions against save/restore. */
88 struct rw_semaphore transaction_mutex;
90 /* Protect watch (de)register against save/restore. */
91 struct rw_semaphore watch_mutex;
94 static struct xs_handle xs_state;
96 /* List of registered watches, and a lock to protect it. */
97 static LIST_HEAD(watches);
98 static DEFINE_SPINLOCK(watches_lock);
100 /* List of pending watch callback events, and a lock to protect it. */
101 static LIST_HEAD(watch_events);
102 static DEFINE_SPINLOCK(watch_events_lock);
105 * Details of the xenwatch callback kernel thread. The thread waits on the
106 * watch_events_waitq for work to do (queued on watch_events list). When it
107 * wakes up it acquires the xenwatch_mutex before reading the list and
108 * carrying out work.
110 static pid_t xenwatch_pid;
111 static DEFINE_MUTEX(xenwatch_mutex);
112 static DECLARE_WAIT_QUEUE_HEAD(watch_events_waitq);
114 static int get_error(const char *errorstring)
116 unsigned int i;
118 for (i = 0; strcmp(errorstring, xsd_errors[i].errstring) != 0; i++) {
119 if (i == ARRAY_SIZE(xsd_errors) - 1) {
120 printk(KERN_WARNING
121 "XENBUS xen store gave: unknown error %s",
122 errorstring);
123 return EINVAL;
126 return xsd_errors[i].errnum;
129 static void *read_reply(enum xsd_sockmsg_type *type, unsigned int *len)
131 struct xs_stored_msg *msg;
132 char *body;
134 spin_lock(&xs_state.reply_lock);
136 while (list_empty(&xs_state.reply_list)) {
137 spin_unlock(&xs_state.reply_lock);
138 /* XXX FIXME: Avoid synchronous wait for response here. */
139 wait_event(xs_state.reply_waitq,
140 !list_empty(&xs_state.reply_list));
141 spin_lock(&xs_state.reply_lock);
144 msg = list_entry(xs_state.reply_list.next,
145 struct xs_stored_msg, list);
146 list_del(&msg->list);
148 spin_unlock(&xs_state.reply_lock);
150 *type = msg->hdr.type;
151 if (len)
152 *len = msg->hdr.len;
153 body = msg->u.reply.body;
155 kfree(msg);
157 return body;
160 void *xenbus_dev_request_and_reply(struct xsd_sockmsg *msg)
162 void *ret;
163 struct xsd_sockmsg req_msg = *msg;
164 int err;
166 if (req_msg.type == XS_TRANSACTION_START)
167 down_read(&xs_state.transaction_mutex);
169 mutex_lock(&xs_state.request_mutex);
171 err = xb_write(msg, sizeof(*msg) + msg->len);
172 if (err) {
173 msg->type = XS_ERROR;
174 ret = ERR_PTR(err);
175 } else
176 ret = read_reply(&msg->type, &msg->len);
178 mutex_unlock(&xs_state.request_mutex);
180 if ((msg->type == XS_TRANSACTION_END) ||
181 ((req_msg.type == XS_TRANSACTION_START) &&
182 (msg->type == XS_ERROR)))
183 up_read(&xs_state.transaction_mutex);
185 return ret;
187 EXPORT_SYMBOL(xenbus_dev_request_and_reply);
189 /* Send message to xs, get kmalloc'ed reply. ERR_PTR() on error. */
190 static void *xs_talkv(struct xenbus_transaction t,
191 enum xsd_sockmsg_type type,
192 const struct kvec *iovec,
193 unsigned int num_vecs,
194 unsigned int *len)
196 struct xsd_sockmsg msg;
197 void *ret = NULL;
198 unsigned int i;
199 int err;
201 msg.tx_id = t.id;
202 msg.req_id = 0;
203 msg.type = type;
204 msg.len = 0;
205 for (i = 0; i < num_vecs; i++)
206 msg.len += iovec[i].iov_len;
208 mutex_lock(&xs_state.request_mutex);
210 err = xb_write(&msg, sizeof(msg));
211 if (err) {
212 mutex_unlock(&xs_state.request_mutex);
213 return ERR_PTR(err);
216 for (i = 0; i < num_vecs; i++) {
217 err = xb_write(iovec[i].iov_base, iovec[i].iov_len);
218 if (err) {
219 mutex_unlock(&xs_state.request_mutex);
220 return ERR_PTR(err);
224 ret = read_reply(&msg.type, len);
226 mutex_unlock(&xs_state.request_mutex);
228 if (IS_ERR(ret))
229 return ret;
231 if (msg.type == XS_ERROR) {
232 err = get_error(ret);
233 kfree(ret);
234 return ERR_PTR(-err);
237 if (msg.type != type) {
238 if (printk_ratelimit())
239 printk(KERN_WARNING
240 "XENBUS unexpected type [%d], expected [%d]\n",
241 msg.type, type);
242 kfree(ret);
243 return ERR_PTR(-EINVAL);
245 return ret;
248 /* Simplified version of xs_talkv: single message. */
249 static void *xs_single(struct xenbus_transaction t,
250 enum xsd_sockmsg_type type,
251 const char *string,
252 unsigned int *len)
254 struct kvec iovec;
256 iovec.iov_base = (void *)string;
257 iovec.iov_len = strlen(string) + 1;
258 return xs_talkv(t, type, &iovec, 1, len);
261 /* Many commands only need an ack, don't care what it says. */
262 static int xs_error(char *reply)
264 if (IS_ERR(reply))
265 return PTR_ERR(reply);
266 kfree(reply);
267 return 0;
270 static unsigned int count_strings(const char *strings, unsigned int len)
272 unsigned int num;
273 const char *p;
275 for (p = strings, num = 0; p < strings + len; p += strlen(p) + 1)
276 num++;
278 return num;
281 /* Return the path to dir with /name appended. Buffer must be kfree()'ed. */
282 static char *join(const char *dir, const char *name)
284 char *buffer;
286 if (strlen(name) == 0)
287 buffer = kasprintf(GFP_NOIO | __GFP_HIGH, "%s", dir);
288 else
289 buffer = kasprintf(GFP_NOIO | __GFP_HIGH, "%s/%s", dir, name);
290 return (!buffer) ? ERR_PTR(-ENOMEM) : buffer;
293 static char **split(char *strings, unsigned int len, unsigned int *num)
295 char *p, **ret;
297 /* Count the strings. */
298 *num = count_strings(strings, len);
300 /* Transfer to one big alloc for easy freeing. */
301 ret = kmalloc(*num * sizeof(char *) + len, GFP_NOIO | __GFP_HIGH);
302 if (!ret) {
303 kfree(strings);
304 return ERR_PTR(-ENOMEM);
306 memcpy(&ret[*num], strings, len);
307 kfree(strings);
309 strings = (char *)&ret[*num];
310 for (p = strings, *num = 0; p < strings + len; p += strlen(p) + 1)
311 ret[(*num)++] = p;
313 return ret;
316 char **xenbus_directory(struct xenbus_transaction t,
317 const char *dir, const char *node, unsigned int *num)
319 char *strings, *path;
320 unsigned int len;
322 path = join(dir, node);
323 if (IS_ERR(path))
324 return (char **)path;
326 strings = xs_single(t, XS_DIRECTORY, path, &len);
327 kfree(path);
328 if (IS_ERR(strings))
329 return (char **)strings;
331 return split(strings, len, num);
333 EXPORT_SYMBOL_GPL(xenbus_directory);
335 /* Check if a path exists. Return 1 if it does. */
336 int xenbus_exists(struct xenbus_transaction t,
337 const char *dir, const char *node)
339 char **d;
340 int dir_n;
342 d = xenbus_directory(t, dir, node, &dir_n);
343 if (IS_ERR(d))
344 return 0;
345 kfree(d);
346 return 1;
348 EXPORT_SYMBOL_GPL(xenbus_exists);
350 /* Get the value of a single file.
351 * Returns a kmalloced value: call free() on it after use.
352 * len indicates length in bytes.
354 void *xenbus_read(struct xenbus_transaction t,
355 const char *dir, const char *node, unsigned int *len)
357 char *path;
358 void *ret;
360 path = join(dir, node);
361 if (IS_ERR(path))
362 return (void *)path;
364 ret = xs_single(t, XS_READ, path, len);
365 kfree(path);
366 return ret;
368 EXPORT_SYMBOL_GPL(xenbus_read);
370 /* Write the value of a single file.
371 * Returns -err on failure.
373 int xenbus_write(struct xenbus_transaction t,
374 const char *dir, const char *node, const char *string)
376 const char *path;
377 struct kvec iovec[2];
378 int ret;
380 path = join(dir, node);
381 if (IS_ERR(path))
382 return PTR_ERR(path);
384 iovec[0].iov_base = (void *)path;
385 iovec[0].iov_len = strlen(path) + 1;
386 iovec[1].iov_base = (void *)string;
387 iovec[1].iov_len = strlen(string);
389 ret = xs_error(xs_talkv(t, XS_WRITE, iovec, ARRAY_SIZE(iovec), NULL));
390 kfree(path);
391 return ret;
393 EXPORT_SYMBOL_GPL(xenbus_write);
395 /* Create a new directory. */
396 int xenbus_mkdir(struct xenbus_transaction t,
397 const char *dir, const char *node)
399 char *path;
400 int ret;
402 path = join(dir, node);
403 if (IS_ERR(path))
404 return PTR_ERR(path);
406 ret = xs_error(xs_single(t, XS_MKDIR, path, NULL));
407 kfree(path);
408 return ret;
410 EXPORT_SYMBOL_GPL(xenbus_mkdir);
412 /* Destroy a file or directory (directories must be empty). */
413 int xenbus_rm(struct xenbus_transaction t, const char *dir, const char *node)
415 char *path;
416 int ret;
418 path = join(dir, node);
419 if (IS_ERR(path))
420 return PTR_ERR(path);
422 ret = xs_error(xs_single(t, XS_RM, path, NULL));
423 kfree(path);
424 return ret;
426 EXPORT_SYMBOL_GPL(xenbus_rm);
428 /* Start a transaction: changes by others will not be seen during this
429 * transaction, and changes will not be visible to others until end.
431 int xenbus_transaction_start(struct xenbus_transaction *t)
433 char *id_str;
435 down_read(&xs_state.transaction_mutex);
437 id_str = xs_single(XBT_NIL, XS_TRANSACTION_START, "", NULL);
438 if (IS_ERR(id_str)) {
439 up_read(&xs_state.transaction_mutex);
440 return PTR_ERR(id_str);
443 t->id = simple_strtoul(id_str, NULL, 0);
444 kfree(id_str);
445 return 0;
447 EXPORT_SYMBOL_GPL(xenbus_transaction_start);
449 /* End a transaction.
450 * If abandon is true, transaction is discarded instead of committed.
452 int xenbus_transaction_end(struct xenbus_transaction t, int abort)
454 char abortstr[2];
455 int err;
457 if (abort)
458 strcpy(abortstr, "F");
459 else
460 strcpy(abortstr, "T");
462 err = xs_error(xs_single(t, XS_TRANSACTION_END, abortstr, NULL));
464 up_read(&xs_state.transaction_mutex);
466 return err;
468 EXPORT_SYMBOL_GPL(xenbus_transaction_end);
470 /* Single read and scanf: returns -errno or num scanned. */
471 int xenbus_scanf(struct xenbus_transaction t,
472 const char *dir, const char *node, const char *fmt, ...)
474 va_list ap;
475 int ret;
476 char *val;
478 val = xenbus_read(t, dir, node, NULL);
479 if (IS_ERR(val))
480 return PTR_ERR(val);
482 va_start(ap, fmt);
483 ret = vsscanf(val, fmt, ap);
484 va_end(ap);
485 kfree(val);
486 /* Distinctive errno. */
487 if (ret == 0)
488 return -ERANGE;
489 return ret;
491 EXPORT_SYMBOL_GPL(xenbus_scanf);
493 /* Single printf and write: returns -errno or 0. */
494 int xenbus_printf(struct xenbus_transaction t,
495 const char *dir, const char *node, const char *fmt, ...)
497 va_list ap;
498 int ret;
499 #define PRINTF_BUFFER_SIZE 4096
500 char *printf_buffer;
502 printf_buffer = kmalloc(PRINTF_BUFFER_SIZE, GFP_KERNEL);
503 if (printf_buffer == NULL)
504 return -ENOMEM;
506 va_start(ap, fmt);
507 ret = vsnprintf(printf_buffer, PRINTF_BUFFER_SIZE, fmt, ap);
508 va_end(ap);
510 BUG_ON(ret > PRINTF_BUFFER_SIZE-1);
511 ret = xenbus_write(t, dir, node, printf_buffer);
513 kfree(printf_buffer);
515 return ret;
517 EXPORT_SYMBOL_GPL(xenbus_printf);
519 /* Takes tuples of names, scanf-style args, and void **, NULL terminated. */
520 int xenbus_gather(struct xenbus_transaction t, const char *dir, ...)
522 va_list ap;
523 const char *name;
524 int ret = 0;
526 va_start(ap, dir);
527 while (ret == 0 && (name = va_arg(ap, char *)) != NULL) {
528 const char *fmt = va_arg(ap, char *);
529 void *result = va_arg(ap, void *);
530 char *p;
532 p = xenbus_read(t, dir, name, NULL);
533 if (IS_ERR(p)) {
534 ret = PTR_ERR(p);
535 break;
537 if (fmt) {
538 if (sscanf(p, fmt, result) == 0)
539 ret = -EINVAL;
540 kfree(p);
541 } else
542 *(char **)result = p;
544 va_end(ap);
545 return ret;
547 EXPORT_SYMBOL_GPL(xenbus_gather);
549 static int xs_watch(const char *path, const char *token)
551 struct kvec iov[2];
553 iov[0].iov_base = (void *)path;
554 iov[0].iov_len = strlen(path) + 1;
555 iov[1].iov_base = (void *)token;
556 iov[1].iov_len = strlen(token) + 1;
558 return xs_error(xs_talkv(XBT_NIL, XS_WATCH, iov,
559 ARRAY_SIZE(iov), NULL));
562 static int xs_unwatch(const char *path, const char *token)
564 struct kvec iov[2];
566 iov[0].iov_base = (char *)path;
567 iov[0].iov_len = strlen(path) + 1;
568 iov[1].iov_base = (char *)token;
569 iov[1].iov_len = strlen(token) + 1;
571 return xs_error(xs_talkv(XBT_NIL, XS_UNWATCH, iov,
572 ARRAY_SIZE(iov), NULL));
575 static struct xenbus_watch *find_watch(const char *token)
577 struct xenbus_watch *i, *cmp;
579 cmp = (void *)simple_strtoul(token, NULL, 16);
581 list_for_each_entry(i, &watches, list)
582 if (i == cmp)
583 return i;
585 return NULL;
588 /* Register callback to watch this node. */
589 int register_xenbus_watch(struct xenbus_watch *watch)
591 /* Pointer in ascii is the token. */
592 char token[sizeof(watch) * 2 + 1];
593 int err;
595 sprintf(token, "%lX", (long)watch);
597 down_read(&xs_state.watch_mutex);
599 spin_lock(&watches_lock);
600 BUG_ON(find_watch(token));
601 list_add(&watch->list, &watches);
602 spin_unlock(&watches_lock);
604 err = xs_watch(watch->node, token);
606 /* Ignore errors due to multiple registration. */
607 if ((err != 0) && (err != -EEXIST)) {
608 spin_lock(&watches_lock);
609 list_del(&watch->list);
610 spin_unlock(&watches_lock);
613 up_read(&xs_state.watch_mutex);
615 return err;
617 EXPORT_SYMBOL_GPL(register_xenbus_watch);
619 void unregister_xenbus_watch(struct xenbus_watch *watch)
621 struct xs_stored_msg *msg, *tmp;
622 char token[sizeof(watch) * 2 + 1];
623 int err;
625 sprintf(token, "%lX", (long)watch);
627 down_read(&xs_state.watch_mutex);
629 spin_lock(&watches_lock);
630 BUG_ON(!find_watch(token));
631 list_del(&watch->list);
632 spin_unlock(&watches_lock);
634 err = xs_unwatch(watch->node, token);
635 if (err)
636 printk(KERN_WARNING
637 "XENBUS Failed to release watch %s: %i\n",
638 watch->node, err);
640 up_read(&xs_state.watch_mutex);
642 /* Make sure there are no callbacks running currently (unless
643 its us) */
644 if (current->pid != xenwatch_pid)
645 mutex_lock(&xenwatch_mutex);
647 /* Cancel pending watch events. */
648 spin_lock(&watch_events_lock);
649 list_for_each_entry_safe(msg, tmp, &watch_events, list) {
650 if (msg->u.watch.handle != watch)
651 continue;
652 list_del(&msg->list);
653 kfree(msg->u.watch.vec);
654 kfree(msg);
656 spin_unlock(&watch_events_lock);
658 if (current->pid != xenwatch_pid)
659 mutex_unlock(&xenwatch_mutex);
661 EXPORT_SYMBOL_GPL(unregister_xenbus_watch);
663 void xs_suspend(void)
665 down_write(&xs_state.transaction_mutex);
666 down_write(&xs_state.watch_mutex);
667 mutex_lock(&xs_state.request_mutex);
668 mutex_lock(&xs_state.response_mutex);
671 void xs_resume(void)
673 struct xenbus_watch *watch;
674 char token[sizeof(watch) * 2 + 1];
676 xb_init_comms();
678 mutex_unlock(&xs_state.response_mutex);
679 mutex_unlock(&xs_state.request_mutex);
680 up_write(&xs_state.transaction_mutex);
682 /* No need for watches_lock: the watch_mutex is sufficient. */
683 list_for_each_entry(watch, &watches, list) {
684 sprintf(token, "%lX", (long)watch);
685 xs_watch(watch->node, token);
688 up_write(&xs_state.watch_mutex);
691 void xs_suspend_cancel(void)
693 mutex_unlock(&xs_state.response_mutex);
694 mutex_unlock(&xs_state.request_mutex);
695 up_write(&xs_state.watch_mutex);
696 up_write(&xs_state.transaction_mutex);
699 static int xenwatch_thread(void *unused)
701 struct list_head *ent;
702 struct xs_stored_msg *msg;
704 for (;;) {
705 wait_event_interruptible(watch_events_waitq,
706 !list_empty(&watch_events));
708 if (kthread_should_stop())
709 break;
711 mutex_lock(&xenwatch_mutex);
713 spin_lock(&watch_events_lock);
714 ent = watch_events.next;
715 if (ent != &watch_events)
716 list_del(ent);
717 spin_unlock(&watch_events_lock);
719 if (ent != &watch_events) {
720 msg = list_entry(ent, struct xs_stored_msg, list);
721 msg->u.watch.handle->callback(
722 msg->u.watch.handle,
723 (const char **)msg->u.watch.vec,
724 msg->u.watch.vec_size);
725 kfree(msg->u.watch.vec);
726 kfree(msg);
729 mutex_unlock(&xenwatch_mutex);
732 return 0;
735 static int process_msg(void)
737 struct xs_stored_msg *msg;
738 char *body;
739 int err;
742 * We must disallow save/restore while reading a xenstore message.
743 * A partial read across s/r leaves us out of sync with xenstored.
745 for (;;) {
746 err = xb_wait_for_data_to_read();
747 if (err)
748 return err;
749 mutex_lock(&xs_state.response_mutex);
750 if (xb_data_to_read())
751 break;
752 /* We raced with save/restore: pending data 'disappeared'. */
753 mutex_unlock(&xs_state.response_mutex);
757 msg = kmalloc(sizeof(*msg), GFP_NOIO | __GFP_HIGH);
758 if (msg == NULL) {
759 err = -ENOMEM;
760 goto out;
763 err = xb_read(&msg->hdr, sizeof(msg->hdr));
764 if (err) {
765 kfree(msg);
766 goto out;
769 body = kmalloc(msg->hdr.len + 1, GFP_NOIO | __GFP_HIGH);
770 if (body == NULL) {
771 kfree(msg);
772 err = -ENOMEM;
773 goto out;
776 err = xb_read(body, msg->hdr.len);
777 if (err) {
778 kfree(body);
779 kfree(msg);
780 goto out;
782 body[msg->hdr.len] = '\0';
784 if (msg->hdr.type == XS_WATCH_EVENT) {
785 msg->u.watch.vec = split(body, msg->hdr.len,
786 &msg->u.watch.vec_size);
787 if (IS_ERR(msg->u.watch.vec)) {
788 err = PTR_ERR(msg->u.watch.vec);
789 kfree(msg);
790 goto out;
793 spin_lock(&watches_lock);
794 msg->u.watch.handle = find_watch(
795 msg->u.watch.vec[XS_WATCH_TOKEN]);
796 if (msg->u.watch.handle != NULL) {
797 spin_lock(&watch_events_lock);
798 list_add_tail(&msg->list, &watch_events);
799 wake_up(&watch_events_waitq);
800 spin_unlock(&watch_events_lock);
801 } else {
802 kfree(msg->u.watch.vec);
803 kfree(msg);
805 spin_unlock(&watches_lock);
806 } else {
807 msg->u.reply.body = body;
808 spin_lock(&xs_state.reply_lock);
809 list_add_tail(&msg->list, &xs_state.reply_list);
810 spin_unlock(&xs_state.reply_lock);
811 wake_up(&xs_state.reply_waitq);
814 out:
815 mutex_unlock(&xs_state.response_mutex);
816 return err;
819 static int xenbus_thread(void *unused)
821 int err;
823 for (;;) {
824 err = process_msg();
825 if (err)
826 printk(KERN_WARNING "XENBUS error %d while reading "
827 "message\n", err);
828 if (kthread_should_stop())
829 break;
832 return 0;
835 int xs_init(void)
837 int err;
838 struct task_struct *task;
840 INIT_LIST_HEAD(&xs_state.reply_list);
841 spin_lock_init(&xs_state.reply_lock);
842 init_waitqueue_head(&xs_state.reply_waitq);
844 mutex_init(&xs_state.request_mutex);
845 mutex_init(&xs_state.response_mutex);
846 init_rwsem(&xs_state.transaction_mutex);
847 init_rwsem(&xs_state.watch_mutex);
849 /* Initialize the shared memory rings to talk to xenstored */
850 err = xb_init_comms();
851 if (err)
852 return err;
854 task = kthread_run(xenwatch_thread, NULL, "xenwatch");
855 if (IS_ERR(task))
856 return PTR_ERR(task);
857 xenwatch_pid = task->pid;
859 task = kthread_run(xenbus_thread, NULL, "xenbus");
860 if (IS_ERR(task))
861 return PTR_ERR(task);
863 return 0;