1 /******************************************************************************
2 * Client-facing interface for the Xenbus driver. In other words, the
3 * interface between the Xenbus and the device-specific code, be it the
4 * frontend or the backend of that driver.
6 * Copyright (C) 2005 XenSource Ltd
8 * This program is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU General Public License version 2
10 * as published by the Free Software Foundation; or, when distributed
11 * separately from the Linux kernel or incorporated into other
12 * software packages, subject to the following license:
14 * Permission is hereby granted, free of charge, to any person obtaining a copy
15 * of this source file (the "Software"), to deal in the Software without
16 * restriction, including without limitation the rights to use, copy, modify,
17 * merge, publish, distribute, sublicense, and/or sell copies of the Software,
18 * and to permit persons to whom the Software is furnished to do so, subject to
19 * the following conditions:
21 * The above copyright notice and this permission notice shall be included in
22 * all copies or substantial portions of the Software.
24 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
25 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
26 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
27 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
28 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
29 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
34 #include <linux/slab.h>
35 #include <linux/types.h>
36 #include <linux/spinlock.h>
37 #include <linux/vmalloc.h>
38 #include <linux/export.h>
39 #include <asm/xen/hypervisor.h>
41 #include <xen/interface/xen.h>
42 #include <xen/interface/event_channel.h>
43 #include <xen/balloon.h>
44 #include <xen/events.h>
45 #include <xen/grant_table.h>
46 #include <xen/xenbus.h>
48 #include <xen/features.h>
52 #define XENBUS_PAGES(_grants) (DIV_ROUND_UP(_grants, XEN_PFN_PER_PAGE))
54 #define XENBUS_MAX_RING_PAGES (XENBUS_PAGES(XENBUS_MAX_RING_GRANTS))
56 struct xenbus_map_node
{
57 struct list_head next
;
60 struct vm_struct
*area
;
63 struct page
*pages
[XENBUS_MAX_RING_PAGES
];
64 unsigned long addrs
[XENBUS_MAX_RING_GRANTS
];
68 grant_handle_t handles
[XENBUS_MAX_RING_GRANTS
];
69 unsigned int nr_handles
;
72 struct map_ring_valloc
{
73 struct xenbus_map_node
*node
;
75 /* Why do we need two arrays? See comment of __xenbus_map_ring */
76 unsigned long addrs
[XENBUS_MAX_RING_GRANTS
];
77 phys_addr_t phys_addrs
[XENBUS_MAX_RING_GRANTS
];
79 struct gnttab_map_grant_ref map
[XENBUS_MAX_RING_GRANTS
];
80 struct gnttab_unmap_grant_ref unmap
[XENBUS_MAX_RING_GRANTS
];
85 static DEFINE_SPINLOCK(xenbus_valloc_lock
);
86 static LIST_HEAD(xenbus_valloc_pages
);
88 struct xenbus_ring_ops
{
89 int (*map
)(struct xenbus_device
*dev
, struct map_ring_valloc
*info
,
90 grant_ref_t
*gnt_refs
, unsigned int nr_grefs
,
92 int (*unmap
)(struct xenbus_device
*dev
, void *vaddr
);
95 static const struct xenbus_ring_ops
*ring_ops __read_mostly
;
97 const char *xenbus_strstate(enum xenbus_state state
)
99 static const char *const name
[] = {
100 [ XenbusStateUnknown
] = "Unknown",
101 [ XenbusStateInitialising
] = "Initialising",
102 [ XenbusStateInitWait
] = "InitWait",
103 [ XenbusStateInitialised
] = "Initialised",
104 [ XenbusStateConnected
] = "Connected",
105 [ XenbusStateClosing
] = "Closing",
106 [ XenbusStateClosed
] = "Closed",
107 [XenbusStateReconfiguring
] = "Reconfiguring",
108 [XenbusStateReconfigured
] = "Reconfigured",
110 return (state
< ARRAY_SIZE(name
)) ? name
[state
] : "INVALID";
112 EXPORT_SYMBOL_GPL(xenbus_strstate
);
115 * xenbus_watch_path - register a watch
116 * @dev: xenbus device
117 * @path: path to watch
118 * @watch: watch to register
119 * @will_handle: events queuing determine callback
120 * @callback: callback to register
122 * Register a @watch on the given path, using the given xenbus_watch structure
123 * for storage, @will_handle function as the callback to determine if each
124 * event need to be queued, and the given @callback function as the callback.
125 * On success, the given @path will be saved as @watch->node, and remains the
126 * caller's to free. On error, @watch->node will be NULL, the device will
127 * switch to %XenbusStateClosing, and the error will be saved in the store.
129 * Returns: %0 on success or -errno on error
131 int xenbus_watch_path(struct xenbus_device
*dev
, const char *path
,
132 struct xenbus_watch
*watch
,
133 bool (*will_handle
)(struct xenbus_watch
*,
134 const char *, const char *),
135 void (*callback
)(struct xenbus_watch
*,
136 const char *, const char *))
141 watch
->will_handle
= will_handle
;
142 watch
->callback
= callback
;
144 err
= register_xenbus_watch(watch
);
148 watch
->will_handle
= NULL
;
149 watch
->callback
= NULL
;
150 xenbus_dev_fatal(dev
, err
, "adding watch on %s", path
);
155 EXPORT_SYMBOL_GPL(xenbus_watch_path
);
159 * xenbus_watch_pathfmt - register a watch on a sprintf-formatted path
160 * @dev: xenbus device
161 * @watch: watch to register
162 * @will_handle: events queuing determine callback
163 * @callback: callback to register
164 * @pathfmt: format of path to watch
166 * Register a watch on the given @path, using the given xenbus_watch
167 * structure for storage, @will_handle function as the callback to determine if
168 * each event need to be queued, and the given @callback function as the
169 * callback. On success, the watched path (@path/@path2) will be saved
170 * as @watch->node, and becomes the caller's to kfree().
171 * On error, watch->node will be NULL, so the caller has nothing to
172 * free, the device will switch to %XenbusStateClosing, and the error will be
173 * saved in the store.
175 * Returns: %0 on success or -errno on error
177 int xenbus_watch_pathfmt(struct xenbus_device
*dev
,
178 struct xenbus_watch
*watch
,
179 bool (*will_handle
)(struct xenbus_watch
*,
180 const char *, const char *),
181 void (*callback
)(struct xenbus_watch
*,
182 const char *, const char *),
183 const char *pathfmt
, ...)
189 va_start(ap
, pathfmt
);
190 path
= kvasprintf(GFP_NOIO
| __GFP_HIGH
, pathfmt
, ap
);
194 xenbus_dev_fatal(dev
, -ENOMEM
, "allocating path for watch");
197 err
= xenbus_watch_path(dev
, path
, watch
, will_handle
, callback
);
203 EXPORT_SYMBOL_GPL(xenbus_watch_pathfmt
);
205 static void xenbus_switch_fatal(struct xenbus_device
*, int, int,
209 __xenbus_switch_state(struct xenbus_device
*dev
,
210 enum xenbus_state state
, int depth
)
212 /* We check whether the state is currently set to the given value, and
213 if not, then the state is set. We don't want to unconditionally
214 write the given state, because we don't want to fire watches
215 unnecessarily. Furthermore, if the node has gone, we don't write
216 to it, as the device will be tearing down, and we don't want to
217 resurrect that directory.
219 Note that, because of this cached value of our state, this
220 function will not take a caller's Xenstore transaction
221 (something it was trying to in the past) because dev->state
222 would not get reset if the transaction was aborted.
225 struct xenbus_transaction xbt
;
229 if (state
== dev
->state
)
235 err
= xenbus_transaction_start(&xbt
);
237 xenbus_switch_fatal(dev
, depth
, err
, "starting transaction");
241 err
= xenbus_scanf(xbt
, dev
->nodename
, "state", "%d", ¤t_state
);
245 err
= xenbus_printf(xbt
, dev
->nodename
, "state", "%d", state
);
247 xenbus_switch_fatal(dev
, depth
, err
, "writing new state");
253 err
= xenbus_transaction_end(xbt
, abort
);
255 if (err
== -EAGAIN
&& !abort
)
257 xenbus_switch_fatal(dev
, depth
, err
, "ending transaction");
265 * xenbus_switch_state - save the new state of a driver
266 * @dev: xenbus device
269 * Advertise in the store a change of the given driver to the given new_state.
270 * On error, the device will switch to XenbusStateClosing, and the error
271 * will be saved in the store.
273 * Returns: %0 on success or -errno on error
275 int xenbus_switch_state(struct xenbus_device
*dev
, enum xenbus_state state
)
277 return __xenbus_switch_state(dev
, state
, 0);
280 EXPORT_SYMBOL_GPL(xenbus_switch_state
);
282 int xenbus_frontend_closed(struct xenbus_device
*dev
)
284 xenbus_switch_state(dev
, XenbusStateClosed
);
285 complete(&dev
->down
);
288 EXPORT_SYMBOL_GPL(xenbus_frontend_closed
);
290 static void xenbus_va_dev_error(struct xenbus_device
*dev
, int err
,
291 const char *fmt
, va_list ap
)
297 #define PRINTF_BUFFER_SIZE 4096
299 printf_buffer
= kmalloc(PRINTF_BUFFER_SIZE
, GFP_KERNEL
);
303 len
= sprintf(printf_buffer
, "%i ", -err
);
304 vsnprintf(printf_buffer
+ len
, PRINTF_BUFFER_SIZE
- len
, fmt
, ap
);
306 dev_err(&dev
->dev
, "%s\n", printf_buffer
);
308 path_buffer
= kasprintf(GFP_KERNEL
, "error/%s", dev
->nodename
);
310 xenbus_write(XBT_NIL
, path_buffer
, "error", printf_buffer
);
312 kfree(printf_buffer
);
317 * xenbus_dev_error - place an error message into the store
318 * @dev: xenbus device
319 * @err: error to report
320 * @fmt: error message format
322 * Report the given negative errno into the store, along with the given
325 void xenbus_dev_error(struct xenbus_device
*dev
, int err
, const char *fmt
, ...)
330 xenbus_va_dev_error(dev
, err
, fmt
, ap
);
333 EXPORT_SYMBOL_GPL(xenbus_dev_error
);
336 * xenbus_dev_fatal - put an error messages into the store and then shutdown
337 * @dev: xenbus device
338 * @err: error to report
339 * @fmt: error message format
341 * Equivalent to xenbus_dev_error(dev, err, fmt, args), followed by
342 * xenbus_switch_state(dev, XenbusStateClosing) to schedule an orderly
343 * closedown of this driver and its peer.
346 void xenbus_dev_fatal(struct xenbus_device
*dev
, int err
, const char *fmt
, ...)
351 xenbus_va_dev_error(dev
, err
, fmt
, ap
);
354 xenbus_switch_state(dev
, XenbusStateClosing
);
356 EXPORT_SYMBOL_GPL(xenbus_dev_fatal
);
359 * Equivalent to xenbus_dev_fatal(dev, err, fmt, args), but helps
360 * avoiding recursion within xenbus_switch_state.
362 static void xenbus_switch_fatal(struct xenbus_device
*dev
, int depth
, int err
,
363 const char *fmt
, ...)
368 xenbus_va_dev_error(dev
, err
, fmt
, ap
);
372 __xenbus_switch_state(dev
, XenbusStateClosing
, 1);
377 * @dev: xenbus device
378 * @vaddr: pointer to starting virtual address of the ring
379 * @nr_pages: number of pages to be granted
380 * @grefs: grant reference array to be filled in
382 * Allocate physically contiguous pages for a shared ring buffer and grant it
383 * to the peer of the given device. The ring buffer is initially filled with
384 * zeroes. The virtual address of the ring is stored at @vaddr and the
385 * grant references are stored in the @grefs array. In case of error @vaddr
386 * will be set to NULL and @grefs will be filled with INVALID_GRANT_REF.
388 int xenbus_setup_ring(struct xenbus_device
*dev
, gfp_t gfp
, void **vaddr
,
389 unsigned int nr_pages
, grant_ref_t
*grefs
)
391 unsigned long ring_size
= nr_pages
* XEN_PAGE_SIZE
;
392 grant_ref_t gref_head
;
397 addr
= *vaddr
= alloc_pages_exact(ring_size
, gfp
| __GFP_ZERO
);
403 ret
= gnttab_alloc_grant_references(nr_pages
, &gref_head
);
405 xenbus_dev_fatal(dev
, ret
, "granting access to %u ring pages",
410 for (i
= 0; i
< nr_pages
; i
++) {
413 if (is_vmalloc_addr(*vaddr
))
414 gfn
= pfn_to_gfn(vmalloc_to_pfn(addr
));
416 gfn
= virt_to_gfn(addr
);
418 grefs
[i
] = gnttab_claim_grant_reference(&gref_head
);
419 gnttab_grant_foreign_access_ref(grefs
[i
], dev
->otherend_id
,
422 addr
+= XEN_PAGE_SIZE
;
429 free_pages_exact(*vaddr
, ring_size
);
430 for (i
= 0; i
< nr_pages
; i
++)
431 grefs
[i
] = INVALID_GRANT_REF
;
436 EXPORT_SYMBOL_GPL(xenbus_setup_ring
);
439 * xenbus_teardown_ring
440 * @vaddr: starting virtual address of the ring
441 * @nr_pages: number of pages
442 * @grefs: grant reference array
444 * Remove grants for the shared ring buffer and free the associated memory.
445 * On return the grant reference array is filled with INVALID_GRANT_REF.
447 void xenbus_teardown_ring(void **vaddr
, unsigned int nr_pages
,
452 for (i
= 0; i
< nr_pages
; i
++) {
453 if (grefs
[i
] != INVALID_GRANT_REF
) {
454 gnttab_end_foreign_access(grefs
[i
], NULL
);
455 grefs
[i
] = INVALID_GRANT_REF
;
460 free_pages_exact(*vaddr
, nr_pages
* XEN_PAGE_SIZE
);
463 EXPORT_SYMBOL_GPL(xenbus_teardown_ring
);
466 * Allocate an event channel for the given xenbus_device, assigning the newly
467 * created local port to *port. Return 0 on success, or -errno on error. On
468 * error, the device will switch to XenbusStateClosing, and the error will be
469 * saved in the store.
471 int xenbus_alloc_evtchn(struct xenbus_device
*dev
, evtchn_port_t
*port
)
473 struct evtchn_alloc_unbound alloc_unbound
;
476 alloc_unbound
.dom
= DOMID_SELF
;
477 alloc_unbound
.remote_dom
= dev
->otherend_id
;
479 err
= HYPERVISOR_event_channel_op(EVTCHNOP_alloc_unbound
,
482 xenbus_dev_fatal(dev
, err
, "allocating event channel");
484 *port
= alloc_unbound
.port
;
488 EXPORT_SYMBOL_GPL(xenbus_alloc_evtchn
);
492 * Free an existing event channel. Returns 0 on success or -errno on error.
494 int xenbus_free_evtchn(struct xenbus_device
*dev
, evtchn_port_t port
)
496 struct evtchn_close close
;
501 err
= HYPERVISOR_event_channel_op(EVTCHNOP_close
, &close
);
503 xenbus_dev_error(dev
, err
, "freeing event channel %u", port
);
507 EXPORT_SYMBOL_GPL(xenbus_free_evtchn
);
511 * xenbus_map_ring_valloc - allocate & map pages of VA space
512 * @dev: xenbus device
513 * @gnt_refs: grant reference array
514 * @nr_grefs: number of grant references
515 * @vaddr: pointer to address to be filled out by mapping
517 * Map @nr_grefs pages of memory into this domain from another
518 * domain's grant table. xenbus_map_ring_valloc allocates @nr_grefs
519 * pages of virtual address space, maps the pages to that address, and sets
520 * *vaddr to that address. If an error is returned, device will switch to
521 * XenbusStateClosing and the error message will be saved in XenStore.
523 * Returns: %0 on success or -errno on error
525 int xenbus_map_ring_valloc(struct xenbus_device
*dev
, grant_ref_t
*gnt_refs
,
526 unsigned int nr_grefs
, void **vaddr
)
529 struct map_ring_valloc
*info
;
533 if (nr_grefs
> XENBUS_MAX_RING_GRANTS
)
536 info
= kzalloc(sizeof(*info
), GFP_KERNEL
);
540 info
->node
= kzalloc(sizeof(*info
->node
), GFP_KERNEL
);
544 err
= ring_ops
->map(dev
, info
, gnt_refs
, nr_grefs
, vaddr
);
550 EXPORT_SYMBOL_GPL(xenbus_map_ring_valloc
);
552 /* N.B. sizeof(phys_addr_t) doesn't always equal to sizeof(unsigned
553 * long), e.g. 32-on-64. Caller is responsible for preparing the
554 * right array to feed into this function */
555 static int __xenbus_map_ring(struct xenbus_device
*dev
,
556 grant_ref_t
*gnt_refs
,
557 unsigned int nr_grefs
,
558 grant_handle_t
*handles
,
559 struct map_ring_valloc
*info
,
565 if (nr_grefs
> XENBUS_MAX_RING_GRANTS
)
568 for (i
= 0; i
< nr_grefs
; i
++) {
569 gnttab_set_map_op(&info
->map
[i
], info
->phys_addrs
[i
], flags
,
570 gnt_refs
[i
], dev
->otherend_id
);
571 handles
[i
] = INVALID_GRANT_HANDLE
;
574 gnttab_batch_map(info
->map
, i
);
576 for (i
= 0; i
< nr_grefs
; i
++) {
577 if (info
->map
[i
].status
!= GNTST_okay
) {
578 xenbus_dev_fatal(dev
, info
->map
[i
].status
,
579 "mapping in shared page %d from domain %d",
580 gnt_refs
[i
], dev
->otherend_id
);
583 handles
[i
] = info
->map
[i
].handle
;
589 for (i
= j
= 0; i
< nr_grefs
; i
++) {
590 if (handles
[i
] != INVALID_GRANT_HANDLE
) {
591 gnttab_set_unmap_op(&info
->unmap
[j
],
593 GNTMAP_host_map
, handles
[i
]);
598 BUG_ON(HYPERVISOR_grant_table_op(GNTTABOP_unmap_grant_ref
, info
->unmap
, j
));
601 for (i
= 0; i
< j
; i
++) {
602 if (info
->unmap
[i
].status
!= GNTST_okay
) {
612 * xenbus_unmap_ring - unmap memory from another domain
613 * @dev: xenbus device
614 * @handles: grant handle array
615 * @nr_handles: number of handles in the array
616 * @vaddrs: addresses to unmap
618 * Unmap memory in this domain that was imported from another domain.
620 * Returns: %0 on success or GNTST_* on error
621 * (see xen/include/interface/grant_table.h).
623 static int xenbus_unmap_ring(struct xenbus_device
*dev
, grant_handle_t
*handles
,
624 unsigned int nr_handles
, unsigned long *vaddrs
)
626 struct gnttab_unmap_grant_ref unmap
[XENBUS_MAX_RING_GRANTS
];
630 if (nr_handles
> XENBUS_MAX_RING_GRANTS
)
633 for (i
= 0; i
< nr_handles
; i
++)
634 gnttab_set_unmap_op(&unmap
[i
], vaddrs
[i
],
635 GNTMAP_host_map
, handles
[i
]);
637 BUG_ON(HYPERVISOR_grant_table_op(GNTTABOP_unmap_grant_ref
, unmap
, i
));
640 for (i
= 0; i
< nr_handles
; i
++) {
641 if (unmap
[i
].status
!= GNTST_okay
) {
642 xenbus_dev_error(dev
, unmap
[i
].status
,
643 "unmapping page at handle %d error %d",
644 handles
[i
], unmap
[i
].status
);
645 err
= unmap
[i
].status
;
653 static void xenbus_map_ring_setup_grant_hvm(unsigned long gfn
,
654 unsigned int goffset
,
658 struct map_ring_valloc
*info
= data
;
659 unsigned long vaddr
= (unsigned long)gfn_to_virt(gfn
);
661 info
->phys_addrs
[info
->idx
] = vaddr
;
662 info
->addrs
[info
->idx
] = vaddr
;
667 static int xenbus_map_ring_hvm(struct xenbus_device
*dev
,
668 struct map_ring_valloc
*info
,
669 grant_ref_t
*gnt_ref
,
670 unsigned int nr_grefs
,
673 struct xenbus_map_node
*node
= info
->node
;
677 unsigned int nr_pages
= XENBUS_PAGES(nr_grefs
);
679 err
= xen_alloc_unpopulated_pages(nr_pages
, node
->hvm
.pages
);
683 gnttab_foreach_grant(node
->hvm
.pages
, nr_grefs
,
684 xenbus_map_ring_setup_grant_hvm
,
687 err
= __xenbus_map_ring(dev
, gnt_ref
, nr_grefs
, node
->handles
,
688 info
, GNTMAP_host_map
, &leaked
);
689 node
->nr_handles
= nr_grefs
;
692 goto out_free_ballooned_pages
;
694 addr
= vmap(node
->hvm
.pages
, nr_pages
, VM_MAP
| VM_IOREMAP
,
698 goto out_xenbus_unmap_ring
;
701 node
->hvm
.addr
= addr
;
703 spin_lock(&xenbus_valloc_lock
);
704 list_add(&node
->next
, &xenbus_valloc_pages
);
705 spin_unlock(&xenbus_valloc_lock
);
712 out_xenbus_unmap_ring
:
714 xenbus_unmap_ring(dev
, node
->handles
, nr_grefs
, info
->addrs
);
716 pr_alert("leaking %p size %u page(s)",
718 out_free_ballooned_pages
:
720 xen_free_unpopulated_pages(nr_pages
, node
->hvm
.pages
);
726 * xenbus_unmap_ring_vfree - unmap a page of memory from another domain
727 * @dev: xenbus device
728 * @vaddr: addr to unmap
730 * Based on Rusty Russell's skeleton driver's unmap_page.
731 * Unmap a page of memory in this domain that was imported from another domain.
732 * Use xenbus_unmap_ring_vfree if you mapped in your memory with
733 * xenbus_map_ring_valloc (it will free the virtual address space).
735 * Returns: %0 on success or GNTST_* on error
736 * (see xen/include/interface/grant_table.h).
738 int xenbus_unmap_ring_vfree(struct xenbus_device
*dev
, void *vaddr
)
740 return ring_ops
->unmap(dev
, vaddr
);
742 EXPORT_SYMBOL_GPL(xenbus_unmap_ring_vfree
);
745 static int map_ring_apply(pte_t
*pte
, unsigned long addr
, void *data
)
747 struct map_ring_valloc
*info
= data
;
749 info
->phys_addrs
[info
->idx
++] = arbitrary_virt_to_machine(pte
).maddr
;
753 static int xenbus_map_ring_pv(struct xenbus_device
*dev
,
754 struct map_ring_valloc
*info
,
755 grant_ref_t
*gnt_refs
,
756 unsigned int nr_grefs
,
759 struct xenbus_map_node
*node
= info
->node
;
760 struct vm_struct
*area
;
764 area
= get_vm_area(XEN_PAGE_SIZE
* nr_grefs
, VM_IOREMAP
);
767 if (apply_to_page_range(&init_mm
, (unsigned long)area
->addr
,
768 XEN_PAGE_SIZE
* nr_grefs
, map_ring_apply
, info
))
770 err
= __xenbus_map_ring(dev
, gnt_refs
, nr_grefs
, node
->handles
,
771 info
, GNTMAP_host_map
| GNTMAP_contains_pte
,
776 node
->nr_handles
= nr_grefs
;
777 node
->pv
.area
= area
;
779 spin_lock(&xenbus_valloc_lock
);
780 list_add(&node
->next
, &xenbus_valloc_pages
);
781 spin_unlock(&xenbus_valloc_lock
);
792 pr_alert("leaking VM area %p size %u page(s)", area
, nr_grefs
);
797 static int xenbus_unmap_ring_pv(struct xenbus_device
*dev
, void *vaddr
)
799 struct xenbus_map_node
*node
;
800 struct gnttab_unmap_grant_ref unmap
[XENBUS_MAX_RING_GRANTS
];
806 spin_lock(&xenbus_valloc_lock
);
807 list_for_each_entry(node
, &xenbus_valloc_pages
, next
) {
808 if (node
->pv
.area
->addr
== vaddr
) {
809 list_del(&node
->next
);
815 spin_unlock(&xenbus_valloc_lock
);
818 xenbus_dev_error(dev
, -ENOENT
,
819 "can't find mapped virtual address %p", vaddr
);
820 return GNTST_bad_virt_addr
;
823 for (i
= 0; i
< node
->nr_handles
; i
++) {
826 memset(&unmap
[i
], 0, sizeof(unmap
[i
]));
827 addr
= (unsigned long)vaddr
+ (XEN_PAGE_SIZE
* i
);
828 unmap
[i
].host_addr
= arbitrary_virt_to_machine(
829 lookup_address(addr
, &level
)).maddr
;
830 unmap
[i
].dev_bus_addr
= 0;
831 unmap
[i
].handle
= node
->handles
[i
];
834 BUG_ON(HYPERVISOR_grant_table_op(GNTTABOP_unmap_grant_ref
, unmap
, i
));
838 for (i
= 0; i
< node
->nr_handles
; i
++) {
839 if (unmap
[i
].status
!= GNTST_okay
) {
841 xenbus_dev_error(dev
, unmap
[i
].status
,
842 "unmapping page at handle %d error %d",
843 node
->handles
[i
], unmap
[i
].status
);
844 err
= unmap
[i
].status
;
850 free_vm_area(node
->pv
.area
);
852 pr_alert("leaking VM area %p size %u page(s)",
853 node
->pv
.area
, node
->nr_handles
);
859 static const struct xenbus_ring_ops ring_ops_pv
= {
860 .map
= xenbus_map_ring_pv
,
861 .unmap
= xenbus_unmap_ring_pv
,
865 struct unmap_ring_hvm
868 unsigned long addrs
[XENBUS_MAX_RING_GRANTS
];
871 static void xenbus_unmap_ring_setup_grant_hvm(unsigned long gfn
,
872 unsigned int goffset
,
876 struct unmap_ring_hvm
*info
= data
;
878 info
->addrs
[info
->idx
] = (unsigned long)gfn_to_virt(gfn
);
883 static int xenbus_unmap_ring_hvm(struct xenbus_device
*dev
, void *vaddr
)
886 struct xenbus_map_node
*node
;
888 struct unmap_ring_hvm info
= {
891 unsigned int nr_pages
;
893 spin_lock(&xenbus_valloc_lock
);
894 list_for_each_entry(node
, &xenbus_valloc_pages
, next
) {
895 addr
= node
->hvm
.addr
;
897 list_del(&node
->next
);
903 spin_unlock(&xenbus_valloc_lock
);
906 xenbus_dev_error(dev
, -ENOENT
,
907 "can't find mapped virtual address %p", vaddr
);
908 return GNTST_bad_virt_addr
;
911 nr_pages
= XENBUS_PAGES(node
->nr_handles
);
913 gnttab_foreach_grant(node
->hvm
.pages
, node
->nr_handles
,
914 xenbus_unmap_ring_setup_grant_hvm
,
917 rv
= xenbus_unmap_ring(dev
, node
->handles
, node
->nr_handles
,
921 xen_free_unpopulated_pages(nr_pages
, node
->hvm
.pages
);
924 WARN(1, "Leaking %p, size %u page(s)\n", vaddr
, nr_pages
);
931 * xenbus_read_driver_state - read state from a store path
932 * @path: path for driver
934 * Returns: the state of the driver rooted at the given store path, or
935 * XenbusStateUnknown if no state can be read.
937 enum xenbus_state
xenbus_read_driver_state(const char *path
)
939 enum xenbus_state result
;
940 int err
= xenbus_gather(XBT_NIL
, path
, "state", "%d", &result
, NULL
);
942 result
= XenbusStateUnknown
;
946 EXPORT_SYMBOL_GPL(xenbus_read_driver_state
);
948 static const struct xenbus_ring_ops ring_ops_hvm
= {
949 .map
= xenbus_map_ring_hvm
,
950 .unmap
= xenbus_unmap_ring_hvm
,
953 void __init
xenbus_ring_ops_init(void)
956 if (!xen_feature(XENFEAT_auto_translated_physmap
))
957 ring_ops
= &ring_ops_pv
;
960 ring_ops
= &ring_ops_hvm
;