PM / sleep: Asynchronous threads for suspend_noirq
[linux/fpc-iii.git] / drivers / xen / xenbus / xenbus_client.c
blob01d59e66565d65a901b196dc3a84830584cf81a9
1 /******************************************************************************
2 * Client-facing interface for the Xenbus driver. In other words, the
3 * interface between the Xenbus and the device-specific code, be it the
4 * frontend or the backend of that driver.
6 * Copyright (C) 2005 XenSource Ltd
8 * This program is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU General Public License version 2
10 * as published by the Free Software Foundation; or, when distributed
11 * separately from the Linux kernel or incorporated into other
12 * software packages, subject to the following license:
14 * Permission is hereby granted, free of charge, to any person obtaining a copy
15 * of this source file (the "Software"), to deal in the Software without
16 * restriction, including without limitation the rights to use, copy, modify,
17 * merge, publish, distribute, sublicense, and/or sell copies of the Software,
18 * and to permit persons to whom the Software is furnished to do so, subject to
19 * the following conditions:
21 * The above copyright notice and this permission notice shall be included in
22 * all copies or substantial portions of the Software.
24 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
25 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
26 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
27 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
28 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
29 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
30 * IN THE SOFTWARE.
33 #include <linux/mm.h>
34 #include <linux/slab.h>
35 #include <linux/types.h>
36 #include <linux/spinlock.h>
37 #include <linux/vmalloc.h>
38 #include <linux/export.h>
39 #include <asm/xen/hypervisor.h>
40 #include <asm/xen/page.h>
41 #include <xen/interface/xen.h>
42 #include <xen/interface/event_channel.h>
43 #include <xen/balloon.h>
44 #include <xen/events.h>
45 #include <xen/grant_table.h>
46 #include <xen/xenbus.h>
47 #include <xen/xen.h>
48 #include <xen/features.h>
50 #include "xenbus_probe.h"
52 struct xenbus_map_node {
53 struct list_head next;
54 union {
55 struct vm_struct *area; /* PV */
56 struct page *page; /* HVM */
58 grant_handle_t handle;
61 static DEFINE_SPINLOCK(xenbus_valloc_lock);
62 static LIST_HEAD(xenbus_valloc_pages);
64 struct xenbus_ring_ops {
65 int (*map)(struct xenbus_device *dev, int gnt, void **vaddr);
66 int (*unmap)(struct xenbus_device *dev, void *vaddr);
69 static const struct xenbus_ring_ops *ring_ops __read_mostly;
71 const char *xenbus_strstate(enum xenbus_state state)
73 static const char *const name[] = {
74 [ XenbusStateUnknown ] = "Unknown",
75 [ XenbusStateInitialising ] = "Initialising",
76 [ XenbusStateInitWait ] = "InitWait",
77 [ XenbusStateInitialised ] = "Initialised",
78 [ XenbusStateConnected ] = "Connected",
79 [ XenbusStateClosing ] = "Closing",
80 [ XenbusStateClosed ] = "Closed",
81 [XenbusStateReconfiguring] = "Reconfiguring",
82 [XenbusStateReconfigured] = "Reconfigured",
84 return (state < ARRAY_SIZE(name)) ? name[state] : "INVALID";
86 EXPORT_SYMBOL_GPL(xenbus_strstate);
88 /**
89 * xenbus_watch_path - register a watch
90 * @dev: xenbus device
91 * @path: path to watch
92 * @watch: watch to register
93 * @callback: callback to register
95 * Register a @watch on the given path, using the given xenbus_watch structure
96 * for storage, and the given @callback function as the callback. Return 0 on
97 * success, or -errno on error. On success, the given @path will be saved as
98 * @watch->node, and remains the caller's to free. On error, @watch->node will
99 * be NULL, the device will switch to %XenbusStateClosing, and the error will
100 * be saved in the store.
102 int xenbus_watch_path(struct xenbus_device *dev, const char *path,
103 struct xenbus_watch *watch,
104 void (*callback)(struct xenbus_watch *,
105 const char **, unsigned int))
107 int err;
109 watch->node = path;
110 watch->callback = callback;
112 err = register_xenbus_watch(watch);
114 if (err) {
115 watch->node = NULL;
116 watch->callback = NULL;
117 xenbus_dev_fatal(dev, err, "adding watch on %s", path);
120 return err;
122 EXPORT_SYMBOL_GPL(xenbus_watch_path);
126 * xenbus_watch_pathfmt - register a watch on a sprintf-formatted path
127 * @dev: xenbus device
128 * @watch: watch to register
129 * @callback: callback to register
130 * @pathfmt: format of path to watch
132 * Register a watch on the given @path, using the given xenbus_watch
133 * structure for storage, and the given @callback function as the callback.
134 * Return 0 on success, or -errno on error. On success, the watched path
135 * (@path/@path2) will be saved as @watch->node, and becomes the caller's to
136 * kfree(). On error, watch->node will be NULL, so the caller has nothing to
137 * free, the device will switch to %XenbusStateClosing, and the error will be
138 * saved in the store.
140 int xenbus_watch_pathfmt(struct xenbus_device *dev,
141 struct xenbus_watch *watch,
142 void (*callback)(struct xenbus_watch *,
143 const char **, unsigned int),
144 const char *pathfmt, ...)
146 int err;
147 va_list ap;
148 char *path;
150 va_start(ap, pathfmt);
151 path = kvasprintf(GFP_NOIO | __GFP_HIGH, pathfmt, ap);
152 va_end(ap);
154 if (!path) {
155 xenbus_dev_fatal(dev, -ENOMEM, "allocating path for watch");
156 return -ENOMEM;
158 err = xenbus_watch_path(dev, path, watch, callback);
160 if (err)
161 kfree(path);
162 return err;
164 EXPORT_SYMBOL_GPL(xenbus_watch_pathfmt);
166 static void xenbus_switch_fatal(struct xenbus_device *, int, int,
167 const char *, ...);
169 static int
170 __xenbus_switch_state(struct xenbus_device *dev,
171 enum xenbus_state state, int depth)
173 /* We check whether the state is currently set to the given value, and
174 if not, then the state is set. We don't want to unconditionally
175 write the given state, because we don't want to fire watches
176 unnecessarily. Furthermore, if the node has gone, we don't write
177 to it, as the device will be tearing down, and we don't want to
178 resurrect that directory.
180 Note that, because of this cached value of our state, this
181 function will not take a caller's Xenstore transaction
182 (something it was trying to in the past) because dev->state
183 would not get reset if the transaction was aborted.
186 struct xenbus_transaction xbt;
187 int current_state;
188 int err, abort;
190 if (state == dev->state)
191 return 0;
193 again:
194 abort = 1;
196 err = xenbus_transaction_start(&xbt);
197 if (err) {
198 xenbus_switch_fatal(dev, depth, err, "starting transaction");
199 return 0;
202 err = xenbus_scanf(xbt, dev->nodename, "state", "%d", &current_state);
203 if (err != 1)
204 goto abort;
206 err = xenbus_printf(xbt, dev->nodename, "state", "%d", state);
207 if (err) {
208 xenbus_switch_fatal(dev, depth, err, "writing new state");
209 goto abort;
212 abort = 0;
213 abort:
214 err = xenbus_transaction_end(xbt, abort);
215 if (err) {
216 if (err == -EAGAIN && !abort)
217 goto again;
218 xenbus_switch_fatal(dev, depth, err, "ending transaction");
219 } else
220 dev->state = state;
222 return 0;
226 * xenbus_switch_state
227 * @dev: xenbus device
228 * @state: new state
230 * Advertise in the store a change of the given driver to the given new_state.
231 * Return 0 on success, or -errno on error. On error, the device will switch
232 * to XenbusStateClosing, and the error will be saved in the store.
234 int xenbus_switch_state(struct xenbus_device *dev, enum xenbus_state state)
236 return __xenbus_switch_state(dev, state, 0);
239 EXPORT_SYMBOL_GPL(xenbus_switch_state);
241 int xenbus_frontend_closed(struct xenbus_device *dev)
243 xenbus_switch_state(dev, XenbusStateClosed);
244 complete(&dev->down);
245 return 0;
247 EXPORT_SYMBOL_GPL(xenbus_frontend_closed);
250 * Return the path to the error node for the given device, or NULL on failure.
251 * If the value returned is non-NULL, then it is the caller's to kfree.
253 static char *error_path(struct xenbus_device *dev)
255 return kasprintf(GFP_KERNEL, "error/%s", dev->nodename);
259 static void xenbus_va_dev_error(struct xenbus_device *dev, int err,
260 const char *fmt, va_list ap)
262 int ret;
263 unsigned int len;
264 char *printf_buffer = NULL;
265 char *path_buffer = NULL;
267 #define PRINTF_BUFFER_SIZE 4096
268 printf_buffer = kmalloc(PRINTF_BUFFER_SIZE, GFP_KERNEL);
269 if (printf_buffer == NULL)
270 goto fail;
272 len = sprintf(printf_buffer, "%i ", -err);
273 ret = vsnprintf(printf_buffer+len, PRINTF_BUFFER_SIZE-len, fmt, ap);
275 BUG_ON(len + ret > PRINTF_BUFFER_SIZE-1);
277 dev_err(&dev->dev, "%s\n", printf_buffer);
279 path_buffer = error_path(dev);
281 if (path_buffer == NULL) {
282 dev_err(&dev->dev, "failed to write error node for %s (%s)\n",
283 dev->nodename, printf_buffer);
284 goto fail;
287 if (xenbus_write(XBT_NIL, path_buffer, "error", printf_buffer) != 0) {
288 dev_err(&dev->dev, "failed to write error node for %s (%s)\n",
289 dev->nodename, printf_buffer);
290 goto fail;
293 fail:
294 kfree(printf_buffer);
295 kfree(path_buffer);
300 * xenbus_dev_error
301 * @dev: xenbus device
302 * @err: error to report
303 * @fmt: error message format
305 * Report the given negative errno into the store, along with the given
306 * formatted message.
308 void xenbus_dev_error(struct xenbus_device *dev, int err, const char *fmt, ...)
310 va_list ap;
312 va_start(ap, fmt);
313 xenbus_va_dev_error(dev, err, fmt, ap);
314 va_end(ap);
316 EXPORT_SYMBOL_GPL(xenbus_dev_error);
319 * xenbus_dev_fatal
320 * @dev: xenbus device
321 * @err: error to report
322 * @fmt: error message format
324 * Equivalent to xenbus_dev_error(dev, err, fmt, args), followed by
325 * xenbus_switch_state(dev, XenbusStateClosing) to schedule an orderly
326 * closedown of this driver and its peer.
329 void xenbus_dev_fatal(struct xenbus_device *dev, int err, const char *fmt, ...)
331 va_list ap;
333 va_start(ap, fmt);
334 xenbus_va_dev_error(dev, err, fmt, ap);
335 va_end(ap);
337 xenbus_switch_state(dev, XenbusStateClosing);
339 EXPORT_SYMBOL_GPL(xenbus_dev_fatal);
342 * Equivalent to xenbus_dev_fatal(dev, err, fmt, args), but helps
343 * avoiding recursion within xenbus_switch_state.
345 static void xenbus_switch_fatal(struct xenbus_device *dev, int depth, int err,
346 const char *fmt, ...)
348 va_list ap;
350 va_start(ap, fmt);
351 xenbus_va_dev_error(dev, err, fmt, ap);
352 va_end(ap);
354 if (!depth)
355 __xenbus_switch_state(dev, XenbusStateClosing, 1);
359 * xenbus_grant_ring
360 * @dev: xenbus device
361 * @ring_mfn: mfn of ring to grant
363 * Grant access to the given @ring_mfn to the peer of the given device. Return
364 * 0 on success, or -errno on error. On error, the device will switch to
365 * XenbusStateClosing, and the error will be saved in the store.
367 int xenbus_grant_ring(struct xenbus_device *dev, unsigned long ring_mfn)
369 int err = gnttab_grant_foreign_access(dev->otherend_id, ring_mfn, 0);
370 if (err < 0)
371 xenbus_dev_fatal(dev, err, "granting access to ring page");
372 return err;
374 EXPORT_SYMBOL_GPL(xenbus_grant_ring);
378 * Allocate an event channel for the given xenbus_device, assigning the newly
379 * created local port to *port. Return 0 on success, or -errno on error. On
380 * error, the device will switch to XenbusStateClosing, and the error will be
381 * saved in the store.
383 int xenbus_alloc_evtchn(struct xenbus_device *dev, int *port)
385 struct evtchn_alloc_unbound alloc_unbound;
386 int err;
388 alloc_unbound.dom = DOMID_SELF;
389 alloc_unbound.remote_dom = dev->otherend_id;
391 err = HYPERVISOR_event_channel_op(EVTCHNOP_alloc_unbound,
392 &alloc_unbound);
393 if (err)
394 xenbus_dev_fatal(dev, err, "allocating event channel");
395 else
396 *port = alloc_unbound.port;
398 return err;
400 EXPORT_SYMBOL_GPL(xenbus_alloc_evtchn);
404 * Bind to an existing interdomain event channel in another domain. Returns 0
405 * on success and stores the local port in *port. On error, returns -errno,
406 * switches the device to XenbusStateClosing, and saves the error in XenStore.
408 int xenbus_bind_evtchn(struct xenbus_device *dev, int remote_port, int *port)
410 struct evtchn_bind_interdomain bind_interdomain;
411 int err;
413 bind_interdomain.remote_dom = dev->otherend_id;
414 bind_interdomain.remote_port = remote_port;
416 err = HYPERVISOR_event_channel_op(EVTCHNOP_bind_interdomain,
417 &bind_interdomain);
418 if (err)
419 xenbus_dev_fatal(dev, err,
420 "binding to event channel %d from domain %d",
421 remote_port, dev->otherend_id);
422 else
423 *port = bind_interdomain.local_port;
425 return err;
427 EXPORT_SYMBOL_GPL(xenbus_bind_evtchn);
431 * Free an existing event channel. Returns 0 on success or -errno on error.
433 int xenbus_free_evtchn(struct xenbus_device *dev, int port)
435 struct evtchn_close close;
436 int err;
438 close.port = port;
440 err = HYPERVISOR_event_channel_op(EVTCHNOP_close, &close);
441 if (err)
442 xenbus_dev_error(dev, err, "freeing event channel %d", port);
444 return err;
446 EXPORT_SYMBOL_GPL(xenbus_free_evtchn);
450 * xenbus_map_ring_valloc
451 * @dev: xenbus device
452 * @gnt_ref: grant reference
453 * @vaddr: pointer to address to be filled out by mapping
455 * Based on Rusty Russell's skeleton driver's map_page.
456 * Map a page of memory into this domain from another domain's grant table.
457 * xenbus_map_ring_valloc allocates a page of virtual address space, maps the
458 * page to that address, and sets *vaddr to that address.
459 * Returns 0 on success, and GNTST_* (see xen/include/interface/grant_table.h)
460 * or -ENOMEM on error. If an error is returned, device will switch to
461 * XenbusStateClosing and the error message will be saved in XenStore.
463 int xenbus_map_ring_valloc(struct xenbus_device *dev, int gnt_ref, void **vaddr)
465 return ring_ops->map(dev, gnt_ref, vaddr);
467 EXPORT_SYMBOL_GPL(xenbus_map_ring_valloc);
469 static int xenbus_map_ring_valloc_pv(struct xenbus_device *dev,
470 int gnt_ref, void **vaddr)
472 struct gnttab_map_grant_ref op = {
473 .flags = GNTMAP_host_map | GNTMAP_contains_pte,
474 .ref = gnt_ref,
475 .dom = dev->otherend_id,
477 struct xenbus_map_node *node;
478 struct vm_struct *area;
479 pte_t *pte;
481 *vaddr = NULL;
483 node = kzalloc(sizeof(*node), GFP_KERNEL);
484 if (!node)
485 return -ENOMEM;
487 area = alloc_vm_area(PAGE_SIZE, &pte);
488 if (!area) {
489 kfree(node);
490 return -ENOMEM;
493 op.host_addr = arbitrary_virt_to_machine(pte).maddr;
495 gnttab_batch_map(&op, 1);
497 if (op.status != GNTST_okay) {
498 free_vm_area(area);
499 kfree(node);
500 xenbus_dev_fatal(dev, op.status,
501 "mapping in shared page %d from domain %d",
502 gnt_ref, dev->otherend_id);
503 return op.status;
506 node->handle = op.handle;
507 node->area = area;
509 spin_lock(&xenbus_valloc_lock);
510 list_add(&node->next, &xenbus_valloc_pages);
511 spin_unlock(&xenbus_valloc_lock);
513 *vaddr = area->addr;
514 return 0;
517 static int xenbus_map_ring_valloc_hvm(struct xenbus_device *dev,
518 int gnt_ref, void **vaddr)
520 struct xenbus_map_node *node;
521 int err;
522 void *addr;
524 *vaddr = NULL;
526 node = kzalloc(sizeof(*node), GFP_KERNEL);
527 if (!node)
528 return -ENOMEM;
530 err = alloc_xenballooned_pages(1, &node->page, false /* lowmem */);
531 if (err)
532 goto out_err;
534 addr = pfn_to_kaddr(page_to_pfn(node->page));
536 err = xenbus_map_ring(dev, gnt_ref, &node->handle, addr);
537 if (err)
538 goto out_err_free_ballooned_pages;
540 spin_lock(&xenbus_valloc_lock);
541 list_add(&node->next, &xenbus_valloc_pages);
542 spin_unlock(&xenbus_valloc_lock);
544 *vaddr = addr;
545 return 0;
547 out_err_free_ballooned_pages:
548 free_xenballooned_pages(1, &node->page);
549 out_err:
550 kfree(node);
551 return err;
556 * xenbus_map_ring
557 * @dev: xenbus device
558 * @gnt_ref: grant reference
559 * @handle: pointer to grant handle to be filled
560 * @vaddr: address to be mapped to
562 * Map a page of memory into this domain from another domain's grant table.
563 * xenbus_map_ring does not allocate the virtual address space (you must do
564 * this yourself!). It only maps in the page to the specified address.
565 * Returns 0 on success, and GNTST_* (see xen/include/interface/grant_table.h)
566 * or -ENOMEM on error. If an error is returned, device will switch to
567 * XenbusStateClosing and the error message will be saved in XenStore.
569 int xenbus_map_ring(struct xenbus_device *dev, int gnt_ref,
570 grant_handle_t *handle, void *vaddr)
572 struct gnttab_map_grant_ref op;
574 gnttab_set_map_op(&op, (unsigned long)vaddr, GNTMAP_host_map, gnt_ref,
575 dev->otherend_id);
577 gnttab_batch_map(&op, 1);
579 if (op.status != GNTST_okay) {
580 xenbus_dev_fatal(dev, op.status,
581 "mapping in shared page %d from domain %d",
582 gnt_ref, dev->otherend_id);
583 } else
584 *handle = op.handle;
586 return op.status;
588 EXPORT_SYMBOL_GPL(xenbus_map_ring);
592 * xenbus_unmap_ring_vfree
593 * @dev: xenbus device
594 * @vaddr: addr to unmap
596 * Based on Rusty Russell's skeleton driver's unmap_page.
597 * Unmap a page of memory in this domain that was imported from another domain.
598 * Use xenbus_unmap_ring_vfree if you mapped in your memory with
599 * xenbus_map_ring_valloc (it will free the virtual address space).
600 * Returns 0 on success and returns GNTST_* on error
601 * (see xen/include/interface/grant_table.h).
603 int xenbus_unmap_ring_vfree(struct xenbus_device *dev, void *vaddr)
605 return ring_ops->unmap(dev, vaddr);
607 EXPORT_SYMBOL_GPL(xenbus_unmap_ring_vfree);
609 static int xenbus_unmap_ring_vfree_pv(struct xenbus_device *dev, void *vaddr)
611 struct xenbus_map_node *node;
612 struct gnttab_unmap_grant_ref op = {
613 .host_addr = (unsigned long)vaddr,
615 unsigned int level;
617 spin_lock(&xenbus_valloc_lock);
618 list_for_each_entry(node, &xenbus_valloc_pages, next) {
619 if (node->area->addr == vaddr) {
620 list_del(&node->next);
621 goto found;
624 node = NULL;
625 found:
626 spin_unlock(&xenbus_valloc_lock);
628 if (!node) {
629 xenbus_dev_error(dev, -ENOENT,
630 "can't find mapped virtual address %p", vaddr);
631 return GNTST_bad_virt_addr;
634 op.handle = node->handle;
635 op.host_addr = arbitrary_virt_to_machine(
636 lookup_address((unsigned long)vaddr, &level)).maddr;
638 if (HYPERVISOR_grant_table_op(GNTTABOP_unmap_grant_ref, &op, 1))
639 BUG();
641 if (op.status == GNTST_okay)
642 free_vm_area(node->area);
643 else
644 xenbus_dev_error(dev, op.status,
645 "unmapping page at handle %d error %d",
646 node->handle, op.status);
648 kfree(node);
649 return op.status;
652 static int xenbus_unmap_ring_vfree_hvm(struct xenbus_device *dev, void *vaddr)
654 int rv;
655 struct xenbus_map_node *node;
656 void *addr;
658 spin_lock(&xenbus_valloc_lock);
659 list_for_each_entry(node, &xenbus_valloc_pages, next) {
660 addr = pfn_to_kaddr(page_to_pfn(node->page));
661 if (addr == vaddr) {
662 list_del(&node->next);
663 goto found;
666 node = addr = NULL;
667 found:
668 spin_unlock(&xenbus_valloc_lock);
670 if (!node) {
671 xenbus_dev_error(dev, -ENOENT,
672 "can't find mapped virtual address %p", vaddr);
673 return GNTST_bad_virt_addr;
676 rv = xenbus_unmap_ring(dev, node->handle, addr);
678 if (!rv)
679 free_xenballooned_pages(1, &node->page);
680 else
681 WARN(1, "Leaking %p\n", vaddr);
683 kfree(node);
684 return rv;
688 * xenbus_unmap_ring
689 * @dev: xenbus device
690 * @handle: grant handle
691 * @vaddr: addr to unmap
693 * Unmap a page of memory in this domain that was imported from another domain.
694 * Returns 0 on success and returns GNTST_* on error
695 * (see xen/include/interface/grant_table.h).
697 int xenbus_unmap_ring(struct xenbus_device *dev,
698 grant_handle_t handle, void *vaddr)
700 struct gnttab_unmap_grant_ref op;
702 gnttab_set_unmap_op(&op, (unsigned long)vaddr, GNTMAP_host_map, handle);
704 if (HYPERVISOR_grant_table_op(GNTTABOP_unmap_grant_ref, &op, 1))
705 BUG();
707 if (op.status != GNTST_okay)
708 xenbus_dev_error(dev, op.status,
709 "unmapping page at handle %d error %d",
710 handle, op.status);
712 return op.status;
714 EXPORT_SYMBOL_GPL(xenbus_unmap_ring);
718 * xenbus_read_driver_state
719 * @path: path for driver
721 * Return the state of the driver rooted at the given store path, or
722 * XenbusStateUnknown if no state can be read.
724 enum xenbus_state xenbus_read_driver_state(const char *path)
726 enum xenbus_state result;
727 int err = xenbus_gather(XBT_NIL, path, "state", "%d", &result, NULL);
728 if (err)
729 result = XenbusStateUnknown;
731 return result;
733 EXPORT_SYMBOL_GPL(xenbus_read_driver_state);
735 static const struct xenbus_ring_ops ring_ops_pv = {
736 .map = xenbus_map_ring_valloc_pv,
737 .unmap = xenbus_unmap_ring_vfree_pv,
740 static const struct xenbus_ring_ops ring_ops_hvm = {
741 .map = xenbus_map_ring_valloc_hvm,
742 .unmap = xenbus_unmap_ring_vfree_hvm,
745 void __init xenbus_ring_ops_init(void)
747 if (!xen_feature(XENFEAT_auto_translated_physmap))
748 ring_ops = &ring_ops_pv;
749 else
750 ring_ops = &ring_ops_hvm;