Merge tag 'v3.1-rc9' of git://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux
[linux-2.6/linux-mips.git] / drivers / xen / xenbus / xenbus_client.c
blobcdacf923e0730d42c05b46befd97494d6be2a550
1 /******************************************************************************
2 * Client-facing interface for the Xenbus driver. In other words, the
3 * interface between the Xenbus and the device-specific code, be it the
4 * frontend or the backend of that driver.
6 * Copyright (C) 2005 XenSource Ltd
8 * This program is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU General Public License version 2
10 * as published by the Free Software Foundation; or, when distributed
11 * separately from the Linux kernel or incorporated into other
12 * software packages, subject to the following license:
14 * Permission is hereby granted, free of charge, to any person obtaining a copy
15 * of this source file (the "Software"), to deal in the Software without
16 * restriction, including without limitation the rights to use, copy, modify,
17 * merge, publish, distribute, sublicense, and/or sell copies of the Software,
18 * and to permit persons to whom the Software is furnished to do so, subject to
19 * the following conditions:
21 * The above copyright notice and this permission notice shall be included in
22 * all copies or substantial portions of the Software.
24 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
25 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
26 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
27 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
28 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
29 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
30 * IN THE SOFTWARE.
33 #include <linux/slab.h>
34 #include <linux/types.h>
35 #include <linux/vmalloc.h>
36 #include <asm/xen/hypervisor.h>
37 #include <xen/interface/xen.h>
38 #include <xen/interface/event_channel.h>
39 #include <xen/events.h>
40 #include <xen/grant_table.h>
41 #include <xen/xenbus.h>
43 const char *xenbus_strstate(enum xenbus_state state)
45 static const char *const name[] = {
46 [ XenbusStateUnknown ] = "Unknown",
47 [ XenbusStateInitialising ] = "Initialising",
48 [ XenbusStateInitWait ] = "InitWait",
49 [ XenbusStateInitialised ] = "Initialised",
50 [ XenbusStateConnected ] = "Connected",
51 [ XenbusStateClosing ] = "Closing",
52 [ XenbusStateClosed ] = "Closed",
53 [XenbusStateReconfiguring] = "Reconfiguring",
54 [XenbusStateReconfigured] = "Reconfigured",
56 return (state < ARRAY_SIZE(name)) ? name[state] : "INVALID";
58 EXPORT_SYMBOL_GPL(xenbus_strstate);
60 /**
61 * xenbus_watch_path - register a watch
62 * @dev: xenbus device
63 * @path: path to watch
64 * @watch: watch to register
65 * @callback: callback to register
67 * Register a @watch on the given path, using the given xenbus_watch structure
68 * for storage, and the given @callback function as the callback. Return 0 on
69 * success, or -errno on error. On success, the given @path will be saved as
70 * @watch->node, and remains the caller's to free. On error, @watch->node will
71 * be NULL, the device will switch to %XenbusStateClosing, and the error will
72 * be saved in the store.
74 int xenbus_watch_path(struct xenbus_device *dev, const char *path,
75 struct xenbus_watch *watch,
76 void (*callback)(struct xenbus_watch *,
77 const char **, unsigned int))
79 int err;
81 watch->node = path;
82 watch->callback = callback;
84 err = register_xenbus_watch(watch);
86 if (err) {
87 watch->node = NULL;
88 watch->callback = NULL;
89 xenbus_dev_fatal(dev, err, "adding watch on %s", path);
92 return err;
94 EXPORT_SYMBOL_GPL(xenbus_watch_path);
97 /**
98 * xenbus_watch_pathfmt - register a watch on a sprintf-formatted path
99 * @dev: xenbus device
100 * @watch: watch to register
101 * @callback: callback to register
102 * @pathfmt: format of path to watch
104 * Register a watch on the given @path, using the given xenbus_watch
105 * structure for storage, and the given @callback function as the callback.
106 * Return 0 on success, or -errno on error. On success, the watched path
107 * (@path/@path2) will be saved as @watch->node, and becomes the caller's to
108 * kfree(). On error, watch->node will be NULL, so the caller has nothing to
109 * free, the device will switch to %XenbusStateClosing, and the error will be
110 * saved in the store.
112 int xenbus_watch_pathfmt(struct xenbus_device *dev,
113 struct xenbus_watch *watch,
114 void (*callback)(struct xenbus_watch *,
115 const char **, unsigned int),
116 const char *pathfmt, ...)
118 int err;
119 va_list ap;
120 char *path;
122 va_start(ap, pathfmt);
123 path = kvasprintf(GFP_NOIO | __GFP_HIGH, pathfmt, ap);
124 va_end(ap);
126 if (!path) {
127 xenbus_dev_fatal(dev, -ENOMEM, "allocating path for watch");
128 return -ENOMEM;
130 err = xenbus_watch_path(dev, path, watch, callback);
132 if (err)
133 kfree(path);
134 return err;
136 EXPORT_SYMBOL_GPL(xenbus_watch_pathfmt);
138 static void xenbus_switch_fatal(struct xenbus_device *, int, int,
139 const char *, ...);
141 static int
142 __xenbus_switch_state(struct xenbus_device *dev,
143 enum xenbus_state state, int depth)
145 /* We check whether the state is currently set to the given value, and
146 if not, then the state is set. We don't want to unconditionally
147 write the given state, because we don't want to fire watches
148 unnecessarily. Furthermore, if the node has gone, we don't write
149 to it, as the device will be tearing down, and we don't want to
150 resurrect that directory.
152 Note that, because of this cached value of our state, this
153 function will not take a caller's Xenstore transaction
154 (something it was trying to in the past) because dev->state
155 would not get reset if the transaction was aborted.
158 struct xenbus_transaction xbt;
159 int current_state;
160 int err, abort;
162 if (state == dev->state)
163 return 0;
165 again:
166 abort = 1;
168 err = xenbus_transaction_start(&xbt);
169 if (err) {
170 xenbus_switch_fatal(dev, depth, err, "starting transaction");
171 return 0;
174 err = xenbus_scanf(xbt, dev->nodename, "state", "%d", &current_state);
175 if (err != 1)
176 goto abort;
178 err = xenbus_printf(xbt, dev->nodename, "state", "%d", state);
179 if (err) {
180 xenbus_switch_fatal(dev, depth, err, "writing new state");
181 goto abort;
184 abort = 0;
185 abort:
186 err = xenbus_transaction_end(xbt, abort);
187 if (err) {
188 if (err == -EAGAIN && !abort)
189 goto again;
190 xenbus_switch_fatal(dev, depth, err, "ending transaction");
191 } else
192 dev->state = state;
194 return 0;
198 * xenbus_switch_state
199 * @dev: xenbus device
200 * @state: new state
202 * Advertise in the store a change of the given driver to the given new_state.
203 * Return 0 on success, or -errno on error. On error, the device will switch
204 * to XenbusStateClosing, and the error will be saved in the store.
206 int xenbus_switch_state(struct xenbus_device *dev, enum xenbus_state state)
208 return __xenbus_switch_state(dev, state, 0);
211 EXPORT_SYMBOL_GPL(xenbus_switch_state);
213 int xenbus_frontend_closed(struct xenbus_device *dev)
215 xenbus_switch_state(dev, XenbusStateClosed);
216 complete(&dev->down);
217 return 0;
219 EXPORT_SYMBOL_GPL(xenbus_frontend_closed);
222 * Return the path to the error node for the given device, or NULL on failure.
223 * If the value returned is non-NULL, then it is the caller's to kfree.
225 static char *error_path(struct xenbus_device *dev)
227 return kasprintf(GFP_KERNEL, "error/%s", dev->nodename);
231 static void xenbus_va_dev_error(struct xenbus_device *dev, int err,
232 const char *fmt, va_list ap)
234 int ret;
235 unsigned int len;
236 char *printf_buffer = NULL;
237 char *path_buffer = NULL;
239 #define PRINTF_BUFFER_SIZE 4096
240 printf_buffer = kmalloc(PRINTF_BUFFER_SIZE, GFP_KERNEL);
241 if (printf_buffer == NULL)
242 goto fail;
244 len = sprintf(printf_buffer, "%i ", -err);
245 ret = vsnprintf(printf_buffer+len, PRINTF_BUFFER_SIZE-len, fmt, ap);
247 BUG_ON(len + ret > PRINTF_BUFFER_SIZE-1);
249 dev_err(&dev->dev, "%s\n", printf_buffer);
251 path_buffer = error_path(dev);
253 if (path_buffer == NULL) {
254 dev_err(&dev->dev, "failed to write error node for %s (%s)\n",
255 dev->nodename, printf_buffer);
256 goto fail;
259 if (xenbus_write(XBT_NIL, path_buffer, "error", printf_buffer) != 0) {
260 dev_err(&dev->dev, "failed to write error node for %s (%s)\n",
261 dev->nodename, printf_buffer);
262 goto fail;
265 fail:
266 kfree(printf_buffer);
267 kfree(path_buffer);
272 * xenbus_dev_error
273 * @dev: xenbus device
274 * @err: error to report
275 * @fmt: error message format
277 * Report the given negative errno into the store, along with the given
278 * formatted message.
280 void xenbus_dev_error(struct xenbus_device *dev, int err, const char *fmt, ...)
282 va_list ap;
284 va_start(ap, fmt);
285 xenbus_va_dev_error(dev, err, fmt, ap);
286 va_end(ap);
288 EXPORT_SYMBOL_GPL(xenbus_dev_error);
291 * xenbus_dev_fatal
292 * @dev: xenbus device
293 * @err: error to report
294 * @fmt: error message format
296 * Equivalent to xenbus_dev_error(dev, err, fmt, args), followed by
297 * xenbus_switch_state(dev, XenbusStateClosing) to schedule an orderly
298 * closedown of this driver and its peer.
301 void xenbus_dev_fatal(struct xenbus_device *dev, int err, const char *fmt, ...)
303 va_list ap;
305 va_start(ap, fmt);
306 xenbus_va_dev_error(dev, err, fmt, ap);
307 va_end(ap);
309 xenbus_switch_state(dev, XenbusStateClosing);
311 EXPORT_SYMBOL_GPL(xenbus_dev_fatal);
314 * Equivalent to xenbus_dev_fatal(dev, err, fmt, args), but helps
315 * avoiding recursion within xenbus_switch_state.
317 static void xenbus_switch_fatal(struct xenbus_device *dev, int depth, int err,
318 const char *fmt, ...)
320 va_list ap;
322 va_start(ap, fmt);
323 xenbus_va_dev_error(dev, err, fmt, ap);
324 va_end(ap);
326 if (!depth)
327 __xenbus_switch_state(dev, XenbusStateClosing, 1);
331 * xenbus_grant_ring
332 * @dev: xenbus device
333 * @ring_mfn: mfn of ring to grant
335 * Grant access to the given @ring_mfn to the peer of the given device. Return
336 * 0 on success, or -errno on error. On error, the device will switch to
337 * XenbusStateClosing, and the error will be saved in the store.
339 int xenbus_grant_ring(struct xenbus_device *dev, unsigned long ring_mfn)
341 int err = gnttab_grant_foreign_access(dev->otherend_id, ring_mfn, 0);
342 if (err < 0)
343 xenbus_dev_fatal(dev, err, "granting access to ring page");
344 return err;
346 EXPORT_SYMBOL_GPL(xenbus_grant_ring);
350 * Allocate an event channel for the given xenbus_device, assigning the newly
351 * created local port to *port. Return 0 on success, or -errno on error. On
352 * error, the device will switch to XenbusStateClosing, and the error will be
353 * saved in the store.
355 int xenbus_alloc_evtchn(struct xenbus_device *dev, int *port)
357 struct evtchn_alloc_unbound alloc_unbound;
358 int err;
360 alloc_unbound.dom = DOMID_SELF;
361 alloc_unbound.remote_dom = dev->otherend_id;
363 err = HYPERVISOR_event_channel_op(EVTCHNOP_alloc_unbound,
364 &alloc_unbound);
365 if (err)
366 xenbus_dev_fatal(dev, err, "allocating event channel");
367 else
368 *port = alloc_unbound.port;
370 return err;
372 EXPORT_SYMBOL_GPL(xenbus_alloc_evtchn);
376 * Bind to an existing interdomain event channel in another domain. Returns 0
377 * on success and stores the local port in *port. On error, returns -errno,
378 * switches the device to XenbusStateClosing, and saves the error in XenStore.
380 int xenbus_bind_evtchn(struct xenbus_device *dev, int remote_port, int *port)
382 struct evtchn_bind_interdomain bind_interdomain;
383 int err;
385 bind_interdomain.remote_dom = dev->otherend_id;
386 bind_interdomain.remote_port = remote_port;
388 err = HYPERVISOR_event_channel_op(EVTCHNOP_bind_interdomain,
389 &bind_interdomain);
390 if (err)
391 xenbus_dev_fatal(dev, err,
392 "binding to event channel %d from domain %d",
393 remote_port, dev->otherend_id);
394 else
395 *port = bind_interdomain.local_port;
397 return err;
399 EXPORT_SYMBOL_GPL(xenbus_bind_evtchn);
403 * Free an existing event channel. Returns 0 on success or -errno on error.
405 int xenbus_free_evtchn(struct xenbus_device *dev, int port)
407 struct evtchn_close close;
408 int err;
410 close.port = port;
412 err = HYPERVISOR_event_channel_op(EVTCHNOP_close, &close);
413 if (err)
414 xenbus_dev_error(dev, err, "freeing event channel %d", port);
416 return err;
418 EXPORT_SYMBOL_GPL(xenbus_free_evtchn);
422 * xenbus_map_ring_valloc
423 * @dev: xenbus device
424 * @gnt_ref: grant reference
425 * @vaddr: pointer to address to be filled out by mapping
427 * Based on Rusty Russell's skeleton driver's map_page.
428 * Map a page of memory into this domain from another domain's grant table.
429 * xenbus_map_ring_valloc allocates a page of virtual address space, maps the
430 * page to that address, and sets *vaddr to that address.
431 * Returns 0 on success, and GNTST_* (see xen/include/interface/grant_table.h)
432 * or -ENOMEM on error. If an error is returned, device will switch to
433 * XenbusStateClosing and the error message will be saved in XenStore.
435 int xenbus_map_ring_valloc(struct xenbus_device *dev, int gnt_ref, void **vaddr)
437 struct gnttab_map_grant_ref op = {
438 .flags = GNTMAP_host_map,
439 .ref = gnt_ref,
440 .dom = dev->otherend_id,
442 struct vm_struct *area;
444 *vaddr = NULL;
446 area = xen_alloc_vm_area(PAGE_SIZE);
447 if (!area)
448 return -ENOMEM;
450 op.host_addr = (unsigned long)area->addr;
452 if (HYPERVISOR_grant_table_op(GNTTABOP_map_grant_ref, &op, 1))
453 BUG();
455 if (op.status != GNTST_okay) {
456 xen_free_vm_area(area);
457 xenbus_dev_fatal(dev, op.status,
458 "mapping in shared page %d from domain %d",
459 gnt_ref, dev->otherend_id);
460 return op.status;
463 /* Stuff the handle in an unused field */
464 area->phys_addr = (unsigned long)op.handle;
466 *vaddr = area->addr;
467 return 0;
469 EXPORT_SYMBOL_GPL(xenbus_map_ring_valloc);
473 * xenbus_map_ring
474 * @dev: xenbus device
475 * @gnt_ref: grant reference
476 * @handle: pointer to grant handle to be filled
477 * @vaddr: address to be mapped to
479 * Map a page of memory into this domain from another domain's grant table.
480 * xenbus_map_ring does not allocate the virtual address space (you must do
481 * this yourself!). It only maps in the page to the specified address.
482 * Returns 0 on success, and GNTST_* (see xen/include/interface/grant_table.h)
483 * or -ENOMEM on error. If an error is returned, device will switch to
484 * XenbusStateClosing and the error message will be saved in XenStore.
486 int xenbus_map_ring(struct xenbus_device *dev, int gnt_ref,
487 grant_handle_t *handle, void *vaddr)
489 struct gnttab_map_grant_ref op = {
490 .host_addr = (unsigned long)vaddr,
491 .flags = GNTMAP_host_map,
492 .ref = gnt_ref,
493 .dom = dev->otherend_id,
496 if (HYPERVISOR_grant_table_op(GNTTABOP_map_grant_ref, &op, 1))
497 BUG();
499 if (op.status != GNTST_okay) {
500 xenbus_dev_fatal(dev, op.status,
501 "mapping in shared page %d from domain %d",
502 gnt_ref, dev->otherend_id);
503 } else
504 *handle = op.handle;
506 return op.status;
508 EXPORT_SYMBOL_GPL(xenbus_map_ring);
512 * xenbus_unmap_ring_vfree
513 * @dev: xenbus device
514 * @vaddr: addr to unmap
516 * Based on Rusty Russell's skeleton driver's unmap_page.
517 * Unmap a page of memory in this domain that was imported from another domain.
518 * Use xenbus_unmap_ring_vfree if you mapped in your memory with
519 * xenbus_map_ring_valloc (it will free the virtual address space).
520 * Returns 0 on success and returns GNTST_* on error
521 * (see xen/include/interface/grant_table.h).
523 int xenbus_unmap_ring_vfree(struct xenbus_device *dev, void *vaddr)
525 struct vm_struct *area;
526 struct gnttab_unmap_grant_ref op = {
527 .host_addr = (unsigned long)vaddr,
530 /* It'd be nice if linux/vmalloc.h provided a find_vm_area(void *addr)
531 * method so that we don't have to muck with vmalloc internals here.
532 * We could force the user to hang on to their struct vm_struct from
533 * xenbus_map_ring_valloc, but these 6 lines considerably simplify
534 * this API.
536 read_lock(&vmlist_lock);
537 for (area = vmlist; area != NULL; area = area->next) {
538 if (area->addr == vaddr)
539 break;
541 read_unlock(&vmlist_lock);
543 if (!area) {
544 xenbus_dev_error(dev, -ENOENT,
545 "can't find mapped virtual address %p", vaddr);
546 return GNTST_bad_virt_addr;
549 op.handle = (grant_handle_t)area->phys_addr;
551 if (HYPERVISOR_grant_table_op(GNTTABOP_unmap_grant_ref, &op, 1))
552 BUG();
554 if (op.status == GNTST_okay)
555 xen_free_vm_area(area);
556 else
557 xenbus_dev_error(dev, op.status,
558 "unmapping page at handle %d error %d",
559 (int16_t)area->phys_addr, op.status);
561 return op.status;
563 EXPORT_SYMBOL_GPL(xenbus_unmap_ring_vfree);
567 * xenbus_unmap_ring
568 * @dev: xenbus device
569 * @handle: grant handle
570 * @vaddr: addr to unmap
572 * Unmap a page of memory in this domain that was imported from another domain.
573 * Returns 0 on success and returns GNTST_* on error
574 * (see xen/include/interface/grant_table.h).
576 int xenbus_unmap_ring(struct xenbus_device *dev,
577 grant_handle_t handle, void *vaddr)
579 struct gnttab_unmap_grant_ref op = {
580 .host_addr = (unsigned long)vaddr,
581 .handle = handle,
584 if (HYPERVISOR_grant_table_op(GNTTABOP_unmap_grant_ref, &op, 1))
585 BUG();
587 if (op.status != GNTST_okay)
588 xenbus_dev_error(dev, op.status,
589 "unmapping page at handle %d error %d",
590 handle, op.status);
592 return op.status;
594 EXPORT_SYMBOL_GPL(xenbus_unmap_ring);
598 * xenbus_read_driver_state
599 * @path: path for driver
601 * Return the state of the driver rooted at the given store path, or
602 * XenbusStateUnknown if no state can be read.
604 enum xenbus_state xenbus_read_driver_state(const char *path)
606 enum xenbus_state result;
607 int err = xenbus_gather(XBT_NIL, path, "state", "%d", &result, NULL);
608 if (err)
609 result = XenbusStateUnknown;
611 return result;
613 EXPORT_SYMBOL_GPL(xenbus_read_driver_state);