2 * QEMU Xen emulation: Shared/overlay pages support
4 * Copyright © 2022 Amazon.com, Inc. or its affiliates. All Rights Reserved.
6 * Authors: David Woodhouse <dwmw2@infradead.org>
8 * This work is licensed under the terms of the GNU GPL, version 2 or later.
9 * See the COPYING file in the top-level directory.
12 #include "qemu/osdep.h"
14 #include "qemu/host-utils.h"
15 #include "qemu/module.h"
16 #include "qemu/main-loop.h"
17 #include "qemu/cutils.h"
18 #include "qemu/error-report.h"
19 #include "qapi/error.h"
20 #include "qom/object.h"
21 #include "migration/vmstate.h"
23 #include "hw/sysbus.h"
24 #include "hw/xen/xen.h"
25 #include "hw/xen/xen_backend_ops.h"
26 #include "xen_overlay.h"
27 #include "xen_evtchn.h"
28 #include "xen_primary_console.h"
29 #include "xen_xenstore.h"
31 #include "sysemu/kvm.h"
32 #include "sysemu/kvm_xen.h"
36 #include "xenstore_impl.h"
38 #include "hw/xen/interface/io/xs_wire.h"
39 #include "hw/xen/interface/event_channel.h"
40 #include "hw/xen/interface/grant_table.h"
42 #define TYPE_XEN_XENSTORE "xen-xenstore"
43 OBJECT_DECLARE_SIMPLE_TYPE(XenXenstoreState
, XEN_XENSTORE
)
45 #define ENTRIES_PER_FRAME_V1 (XEN_PAGE_SIZE / sizeof(grant_entry_v1_t))
46 #define ENTRIES_PER_FRAME_V2 (XEN_PAGE_SIZE / sizeof(grant_entry_v2_t))
48 #define XENSTORE_HEADER_SIZE ((unsigned int)sizeof(struct xsd_sockmsg))
50 struct XenXenstoreState
{
55 XenstoreImplState
*impl
;
56 GList
*watch_events
; /* for the guest */
58 MemoryRegion xenstore_page
;
59 struct xenstore_domain_interface
*xs
;
60 uint8_t req_data
[XENSTORE_HEADER_SIZE
+ XENSTORE_PAYLOAD_MAX
];
61 uint8_t rsp_data
[XENSTORE_HEADER_SIZE
+ XENSTORE_PAYLOAD_MAX
];
67 evtchn_port_t guest_port
;
68 evtchn_port_t be_port
;
69 struct xenevtchn_handle
*eh
;
72 uint32_t impl_state_size
;
74 struct xengntdev_handle
*gt
;
78 struct XenXenstoreState
*xen_xenstore_singleton
;
80 static void xen_xenstore_event(void *opaque
);
81 static void fire_watch_cb(void *opaque
, const char *path
, const char *token
);
83 static struct xenstore_backend_ops emu_xenstore_backend_ops
;
85 static void G_GNUC_PRINTF (4, 5) relpath_printf(XenXenstoreState
*s
,
96 abspath
= g_strdup_printf("/local/domain/%u/%s", xen_domid
, relpath
);
98 value
= g_strdup_vprintf(fmt
, args
);
101 data
= g_byte_array_new_take((void *)value
, strlen(value
));
103 err
= xs_impl_write(s
->impl
, DOMID_QEMU
, XBT_NULL
, abspath
, data
);
106 g_byte_array_unref(data
);
108 err
= xs_impl_set_perms(s
->impl
, DOMID_QEMU
, XBT_NULL
, abspath
, perms
);
114 static void xen_xenstore_realize(DeviceState
*dev
, Error
**errp
)
116 XenXenstoreState
*s
= XEN_XENSTORE(dev
);
119 if (xen_mode
!= XEN_EMULATE
) {
120 error_setg(errp
, "Xen xenstore support is for Xen emulation");
123 memory_region_init_ram(&s
->xenstore_page
, OBJECT(dev
), "xen:xenstore_page",
124 XEN_PAGE_SIZE
, &error_abort
);
125 memory_region_set_enabled(&s
->xenstore_page
, true);
126 s
->xs
= memory_region_get_ram_ptr(&s
->xenstore_page
);
127 memset(s
->xs
, 0, XEN_PAGE_SIZE
);
129 /* We can't map it this early as KVM isn't ready */
130 xen_xenstore_singleton
= s
;
132 s
->eh
= xen_be_evtchn_open();
134 error_setg(errp
, "Xenstore evtchn port init failed");
137 aio_set_fd_handler(qemu_get_aio_context(), xen_be_evtchn_fd(s
->eh
),
138 xen_xenstore_event
, NULL
, NULL
, NULL
, s
);
140 s
->impl
= xs_impl_create(xen_domid
);
142 /* Populate the default nodes */
144 /* Nodes owned by 'dom0' but readable by the guest */
145 perms
= g_list_append(NULL
, xs_perm_as_string(XS_PERM_NONE
, DOMID_QEMU
));
146 perms
= g_list_append(perms
, xs_perm_as_string(XS_PERM_READ
, xen_domid
));
148 relpath_printf(s
, perms
, "", "%s", "");
150 relpath_printf(s
, perms
, "domid", "%u", xen_domid
);
152 relpath_printf(s
, perms
, "control/platform-feature-xs_reset_watches", "%u", 1);
153 relpath_printf(s
, perms
, "control/platform-feature-multiprocessor-suspend", "%u", 1);
155 relpath_printf(s
, perms
, "platform/acpi", "%u", 1);
156 relpath_printf(s
, perms
, "platform/acpi_s3", "%u", 1);
157 relpath_printf(s
, perms
, "platform/acpi_s4", "%u", 1);
158 relpath_printf(s
, perms
, "platform/acpi_laptop_slate", "%u", 0);
160 g_list_free_full(perms
, g_free
);
162 /* Nodes owned by the guest */
163 perms
= g_list_append(NULL
, xs_perm_as_string(XS_PERM_NONE
, xen_domid
));
165 relpath_printf(s
, perms
, "attr", "%s", "");
167 relpath_printf(s
, perms
, "control/shutdown", "%s", "");
168 relpath_printf(s
, perms
, "control/feature-poweroff", "%u", 1);
169 relpath_printf(s
, perms
, "control/feature-reboot", "%u", 1);
170 relpath_printf(s
, perms
, "control/feature-suspend", "%u", 1);
171 relpath_printf(s
, perms
, "control/feature-s3", "%u", 1);
172 relpath_printf(s
, perms
, "control/feature-s4", "%u", 1);
174 relpath_printf(s
, perms
, "data", "%s", "");
175 relpath_printf(s
, perms
, "device", "%s", "");
176 relpath_printf(s
, perms
, "drivers", "%s", "");
177 relpath_printf(s
, perms
, "error", "%s", "");
178 relpath_printf(s
, perms
, "feature", "%s", "");
180 g_list_free_full(perms
, g_free
);
182 xen_xenstore_ops
= &emu_xenstore_backend_ops
;
185 static bool xen_xenstore_is_needed(void *opaque
)
187 return xen_mode
== XEN_EMULATE
;
190 static int xen_xenstore_pre_save(void *opaque
)
192 XenXenstoreState
*s
= opaque
;
196 s
->guest_port
= xen_be_evtchn_get_guest_port(s
->eh
);
199 g_free(s
->impl_state
);
200 save
= xs_impl_serialize(s
->impl
);
201 s
->impl_state
= save
->data
;
202 s
->impl_state_size
= save
->len
;
203 g_byte_array_free(save
, false);
208 static int xen_xenstore_post_load(void *opaque
, int ver
)
210 XenXenstoreState
*s
= opaque
;
215 * As qemu/dom0, rebind to the guest's port. The Windows drivers may
216 * unbind the XenStore evtchn and rebind to it, having obtained the
217 * "remote" port through EVTCHNOP_status. In the case that migration
218 * occurs while it's unbound, the "remote" port needs to be the same
219 * as before so that the guest can find it, but should remain unbound.
222 int be_port
= xen_be_evtchn_bind_interdomain(s
->eh
, xen_domid
,
227 s
->be_port
= be_port
;
230 save
= g_byte_array_new_take(s
->impl_state
, s
->impl_state_size
);
231 s
->impl_state
= NULL
;
232 s
->impl_state_size
= 0;
234 ret
= xs_impl_deserialize(s
->impl
, save
, xen_domid
, fire_watch_cb
, s
);
238 static const VMStateDescription xen_xenstore_vmstate
= {
239 .name
= "xen_xenstore",
240 .unmigratable
= 1, /* The PV back ends don't migrate yet */
242 .minimum_version_id
= 1,
243 .needed
= xen_xenstore_is_needed
,
244 .pre_save
= xen_xenstore_pre_save
,
245 .post_load
= xen_xenstore_post_load
,
246 .fields
= (const VMStateField
[]) {
247 VMSTATE_UINT8_ARRAY(req_data
, XenXenstoreState
,
248 sizeof_field(XenXenstoreState
, req_data
)),
249 VMSTATE_UINT8_ARRAY(rsp_data
, XenXenstoreState
,
250 sizeof_field(XenXenstoreState
, rsp_data
)),
251 VMSTATE_UINT32(req_offset
, XenXenstoreState
),
252 VMSTATE_UINT32(rsp_offset
, XenXenstoreState
),
253 VMSTATE_BOOL(rsp_pending
, XenXenstoreState
),
254 VMSTATE_UINT32(guest_port
, XenXenstoreState
),
255 VMSTATE_BOOL(fatal_error
, XenXenstoreState
),
256 VMSTATE_UINT32(impl_state_size
, XenXenstoreState
),
257 VMSTATE_VARRAY_UINT32_ALLOC(impl_state
, XenXenstoreState
,
259 vmstate_info_uint8
, uint8_t),
260 VMSTATE_END_OF_LIST()
264 static void xen_xenstore_class_init(ObjectClass
*klass
, void *data
)
266 DeviceClass
*dc
= DEVICE_CLASS(klass
);
268 dc
->realize
= xen_xenstore_realize
;
269 dc
->vmsd
= &xen_xenstore_vmstate
;
272 static const TypeInfo xen_xenstore_info
= {
273 .name
= TYPE_XEN_XENSTORE
,
274 .parent
= TYPE_SYS_BUS_DEVICE
,
275 .instance_size
= sizeof(XenXenstoreState
),
276 .class_init
= xen_xenstore_class_init
,
279 void xen_xenstore_create(void)
281 DeviceState
*dev
= sysbus_create_simple(TYPE_XEN_XENSTORE
, -1, NULL
);
283 xen_xenstore_singleton
= XEN_XENSTORE(dev
);
286 * Defer the init (xen_xenstore_reset()) until KVM is set up and the
287 * overlay page can be mapped.
291 static void xen_xenstore_register_types(void)
293 type_register_static(&xen_xenstore_info
);
296 type_init(xen_xenstore_register_types
)
298 uint16_t xen_xenstore_get_port(void)
300 XenXenstoreState
*s
= xen_xenstore_singleton
;
304 return s
->guest_port
;
307 static bool req_pending(XenXenstoreState
*s
)
309 struct xsd_sockmsg
*req
= (struct xsd_sockmsg
*)s
->req_data
;
311 return s
->req_offset
== XENSTORE_HEADER_SIZE
+ req
->len
;
314 static void reset_req(XenXenstoreState
*s
)
316 memset(s
->req_data
, 0, sizeof(s
->req_data
));
320 static void reset_rsp(XenXenstoreState
*s
)
322 s
->rsp_pending
= false;
324 memset(s
->rsp_data
, 0, sizeof(s
->rsp_data
));
328 static void xs_error(XenXenstoreState
*s
, unsigned int id
,
329 xs_transaction_t tx_id
, int errnum
)
331 struct xsd_sockmsg
*rsp
= (struct xsd_sockmsg
*)s
->rsp_data
;
332 const char *errstr
= NULL
;
334 for (unsigned int i
= 0; i
< ARRAY_SIZE(xsd_errors
); i
++) {
335 const struct xsd_errors
*xsd_error
= &xsd_errors
[i
];
337 if (xsd_error
->errnum
== errnum
) {
338 errstr
= xsd_error
->errstring
;
344 trace_xenstore_error(id
, tx_id
, errstr
);
346 rsp
->type
= XS_ERROR
;
349 rsp
->len
= (uint32_t)strlen(errstr
) + 1;
351 memcpy(&rsp
[1], errstr
, rsp
->len
);
354 static void xs_ok(XenXenstoreState
*s
, unsigned int type
, unsigned int req_id
,
355 xs_transaction_t tx_id
)
357 struct xsd_sockmsg
*rsp
= (struct xsd_sockmsg
*)s
->rsp_data
;
358 const char *okstr
= "OK";
361 rsp
->req_id
= req_id
;
363 rsp
->len
= (uint32_t)strlen(okstr
) + 1;
365 memcpy(&rsp
[1], okstr
, rsp
->len
);
369 * The correct request and response formats are documented in xen.git:
370 * docs/misc/xenstore.txt. A summary is given below for convenience.
371 * The '|' symbol represents a NUL character.
373 * ---------- Database read, write and permissions operations ----------
375 * READ <path>| <value|>
376 * WRITE <path>|<value|>
377 * Store and read the octet string <value> at <path>.
378 * WRITE creates any missing parent paths, with empty values.
381 * Ensures that the <path> exists, by necessary by creating
382 * it and any missing parents with empty values. If <path>
383 * or any parent already exists, its value is left unchanged.
386 * Ensures that the <path> does not exist, by deleting
387 * it and all of its children. It is not an error if <path> does
388 * not exist, but it _is_ an error if <path>'s immediate parent
389 * does not exist either.
391 * DIRECTORY <path>| <child-leaf-name>|*
392 * Gives a list of the immediate children of <path>, as only the
393 * leafnames. The resulting children are each named
394 * <path>/<child-leaf-name>.
396 * DIRECTORY_PART <path>|<offset> <gencnt>|<child-leaf-name>|*
397 * Same as DIRECTORY, but to be used for children lists longer than
398 * XENSTORE_PAYLOAD_MAX. Input are <path> and the byte offset into
399 * the list of children to return. Return values are the generation
400 * count <gencnt> of the node (to be used to ensure the node hasn't
401 * changed between two reads: <gencnt> being the same for multiple
402 * reads guarantees the node hasn't changed) and the list of children
403 * starting at the specified <offset> of the complete list.
405 * GET_PERMS <path>| <perm-as-string>|+
406 * SET_PERMS <path>|<perm-as-string>|+?
407 * <perm-as-string> is one of the following
408 * w<domid> write only
410 * b<domid> both read and write
412 * See https://wiki.xen.org/wiki/XenBus section
413 * `Permissions' for details of the permissions system.
414 * It is possible to set permissions for the special watch paths
415 * "@introduceDomain" and "@releaseDomain" to enable receiving those
416 * watches in unprivileged domains.
418 * ---------- Watches ----------
420 * WATCH <wpath>|<token>|?
423 * When a <path> is modified (including path creation, removal,
424 * contents change or permissions change) this generates an event
425 * on the changed <path>. Changes made in transactions cause an
426 * event only if and when committed. Each occurring event is
427 * matched against all the watches currently set up, and each
428 * matching watch results in a WATCH_EVENT message (see below).
430 * The event's path matches the watch's <wpath> if it is an child
433 * <wpath> can be a <path> to watch or @<wspecial>. In the
434 * latter case <wspecial> may have any syntax but it matches
435 * (according to the rules above) only the following special
436 * events which are invented by xenstored:
437 * @introduceDomain occurs on INTRODUCE
438 * @releaseDomain occurs on any domain crash or
439 * shutdown, and also on RELEASE
440 * and domain destruction
441 * <wspecial> events are sent to privileged callers or explicitly
442 * via SET_PERMS enabled domains only.
444 * When a watch is first set up it is triggered once straight
445 * away, with <path> equal to <wpath>. Watches may be triggered
446 * spuriously. The tx_id in a WATCH request is ignored.
448 * Watches are supposed to be restricted by the permissions
449 * system but in practice the implementation is imperfect.
450 * Applications should not rely on being sent a notification for
451 * paths that they cannot read; however, an application may rely
452 * on being sent a watch when a path which it _is_ able to read
453 * is deleted even if that leaves only a nonexistent unreadable
454 * parent. A notification may omitted if a node's permissions
455 * are changed so as to make it unreadable, in which case future
456 * notifications may be suppressed (and if the node is later made
457 * readable, some notifications may have been lost).
459 * WATCH_EVENT <epath>|<token>|
460 * Unsolicited `reply' generated for matching modification events
461 * as described above. req_id and tx_id are both 0.
463 * <epath> is the event's path, ie the actual path that was
464 * modified; however if the event was the recursive removal of an
465 * parent of <wpath>, <epath> is just
466 * <wpath> (rather than the actual path which was removed). So
467 * <epath> is a child of <wpath>, regardless.
469 * Iff <wpath> for the watch was specified as a relative pathname,
470 * the <epath> path will also be relative (with the same base,
473 * UNWATCH <wpath>|<token>|?
476 * Reset all watches and transactions of the caller.
478 * ---------- Transactions ----------
480 * TRANSACTION_START | <transid>|
481 * <transid> is an opaque uint32_t allocated by xenstored
482 * represented as unsigned decimal. After this, transaction may
483 * be referenced by using <transid> (as 32-bit binary) in the
484 * tx_id request header field. When transaction is started whole
485 * db is copied; reads and writes happen on the copy.
486 * It is not legal to send non-0 tx_id in TRANSACTION_START.
490 * tx_id must refer to existing transaction. After this
491 * request the tx_id is no longer valid and may be reused by
492 * xenstore. If F, the transaction is discarded. If T,
493 * it is committed: if there were any other intervening writes
494 * then our END gets get EAGAIN.
496 * The plan is that in the future only intervening `conflicting'
497 * writes cause EAGAIN, meaning only writes or other commits
498 * which changed paths which were read or written in the
499 * transaction at hand.
503 static void xs_read(XenXenstoreState
*s
, unsigned int req_id
,
504 xs_transaction_t tx_id
, uint8_t *req_data
, unsigned int len
)
506 const char *path
= (const char *)req_data
;
507 struct xsd_sockmsg
*rsp
= (struct xsd_sockmsg
*)s
->rsp_data
;
508 uint8_t *rsp_data
= (uint8_t *)&rsp
[1];
509 g_autoptr(GByteArray
) data
= g_byte_array_new();
512 if (len
== 0 || req_data
[len
- 1] != '\0') {
513 xs_error(s
, req_id
, tx_id
, EINVAL
);
517 trace_xenstore_read(tx_id
, path
);
518 err
= xs_impl_read(s
->impl
, xen_domid
, tx_id
, path
, data
);
520 xs_error(s
, req_id
, tx_id
, err
);
525 rsp
->req_id
= req_id
;
530 if (len
> XENSTORE_PAYLOAD_MAX
) {
531 xs_error(s
, req_id
, tx_id
, E2BIG
);
535 memcpy(&rsp_data
[rsp
->len
], data
->data
, len
);
539 static void xs_write(XenXenstoreState
*s
, unsigned int req_id
,
540 xs_transaction_t tx_id
, uint8_t *req_data
,
543 g_autoptr(GByteArray
) data
= g_byte_array_new();
548 xs_error(s
, req_id
, tx_id
, EINVAL
);
552 path
= (const char *)req_data
;
555 if (*req_data
++ == '\0') {
559 xs_error(s
, req_id
, tx_id
, EINVAL
);
564 g_byte_array_append(data
, req_data
, len
);
566 trace_xenstore_write(tx_id
, path
);
567 err
= xs_impl_write(s
->impl
, xen_domid
, tx_id
, path
, data
);
569 xs_error(s
, req_id
, tx_id
, err
);
573 xs_ok(s
, XS_WRITE
, req_id
, tx_id
);
576 static void xs_mkdir(XenXenstoreState
*s
, unsigned int req_id
,
577 xs_transaction_t tx_id
, uint8_t *req_data
,
580 g_autoptr(GByteArray
) data
= g_byte_array_new();
584 if (len
== 0 || req_data
[len
- 1] != '\0') {
585 xs_error(s
, req_id
, tx_id
, EINVAL
);
589 path
= (const char *)req_data
;
591 trace_xenstore_mkdir(tx_id
, path
);
592 err
= xs_impl_read(s
->impl
, xen_domid
, tx_id
, path
, data
);
594 err
= xs_impl_write(s
->impl
, xen_domid
, tx_id
, path
, data
);
598 xs_error(s
, req_id
, tx_id
, err
);
602 xs_ok(s
, XS_MKDIR
, req_id
, tx_id
);
605 static void xs_append_strings(XenXenstoreState
*s
, struct xsd_sockmsg
*rsp
,
606 GList
*strings
, unsigned int start
, bool truncate
)
608 uint8_t *rsp_data
= (uint8_t *)&rsp
[1];
611 for (l
= strings
; l
; l
= l
->next
) {
612 size_t len
= strlen(l
->data
) + 1; /* Including the NUL termination */
615 if (rsp
->len
+ len
> XENSTORE_PAYLOAD_MAX
) {
617 len
= XENSTORE_PAYLOAD_MAX
- rsp
->len
;
622 xs_error(s
, rsp
->req_id
, rsp
->tx_id
, E2BIG
);
638 memcpy(&rsp_data
[rsp
->len
], str
, len
);
641 /* XS_DIRECTORY_PART wants an extra NUL to indicate the end */
642 if (truncate
&& rsp
->len
< XENSTORE_PAYLOAD_MAX
) {
643 rsp_data
[rsp
->len
++] = '\0';
647 static void xs_directory(XenXenstoreState
*s
, unsigned int req_id
,
648 xs_transaction_t tx_id
, uint8_t *req_data
,
651 struct xsd_sockmsg
*rsp
= (struct xsd_sockmsg
*)s
->rsp_data
;
656 if (len
== 0 || req_data
[len
- 1] != '\0') {
657 xs_error(s
, req_id
, tx_id
, EINVAL
);
661 path
= (const char *)req_data
;
663 trace_xenstore_directory(tx_id
, path
);
664 err
= xs_impl_directory(s
->impl
, xen_domid
, tx_id
, path
, NULL
, &items
);
666 xs_error(s
, req_id
, tx_id
, err
);
670 rsp
->type
= XS_DIRECTORY
;
671 rsp
->req_id
= req_id
;
675 xs_append_strings(s
, rsp
, items
, 0, false);
677 g_list_free_full(items
, g_free
);
680 static void xs_directory_part(XenXenstoreState
*s
, unsigned int req_id
,
681 xs_transaction_t tx_id
, uint8_t *req_data
,
684 const char *offset_str
, *path
= (const char *)req_data
;
685 struct xsd_sockmsg
*rsp
= (struct xsd_sockmsg
*)s
->rsp_data
;
686 char *rsp_data
= (char *)&rsp
[1];
693 xs_error(s
, req_id
, tx_id
, EINVAL
);
698 if (*req_data
++ == '\0') {
702 xs_error(s
, req_id
, tx_id
, EINVAL
);
707 offset_str
= (const char *)req_data
;
709 if (*req_data
++ == '\0') {
713 xs_error(s
, req_id
, tx_id
, EINVAL
);
719 xs_error(s
, req_id
, tx_id
, EINVAL
);
723 if (qemu_strtoui(offset_str
, NULL
, 10, &offset
) < 0) {
724 xs_error(s
, req_id
, tx_id
, EINVAL
);
728 trace_xenstore_directory_part(tx_id
, path
, offset
);
729 err
= xs_impl_directory(s
->impl
, xen_domid
, tx_id
, path
, &gencnt
, &items
);
731 xs_error(s
, req_id
, tx_id
, err
);
735 rsp
->type
= XS_DIRECTORY_PART
;
736 rsp
->req_id
= req_id
;
738 rsp
->len
= snprintf(rsp_data
, XENSTORE_PAYLOAD_MAX
, "%" PRIu64
, gencnt
) + 1;
740 xs_append_strings(s
, rsp
, items
, offset
, true);
742 g_list_free_full(items
, g_free
);
745 static void xs_transaction_start(XenXenstoreState
*s
, unsigned int req_id
,
746 xs_transaction_t tx_id
, uint8_t *req_data
,
749 struct xsd_sockmsg
*rsp
= (struct xsd_sockmsg
*)s
->rsp_data
;
750 char *rsp_data
= (char *)&rsp
[1];
753 if (len
!= 1 || req_data
[0] != '\0') {
754 xs_error(s
, req_id
, tx_id
, EINVAL
);
758 rsp
->type
= XS_TRANSACTION_START
;
759 rsp
->req_id
= req_id
;
763 err
= xs_impl_transaction_start(s
->impl
, xen_domid
, &tx_id
);
765 xs_error(s
, req_id
, tx_id
, err
);
769 trace_xenstore_transaction_start(tx_id
);
771 rsp
->len
= snprintf(rsp_data
, XENSTORE_PAYLOAD_MAX
, "%u", tx_id
);
772 assert(rsp
->len
< XENSTORE_PAYLOAD_MAX
);
776 static void xs_transaction_end(XenXenstoreState
*s
, unsigned int req_id
,
777 xs_transaction_t tx_id
, uint8_t *req_data
,
783 if (len
!= 2 || req_data
[1] != '\0') {
784 xs_error(s
, req_id
, tx_id
, EINVAL
);
788 switch (req_data
[0]) {
796 xs_error(s
, req_id
, tx_id
, EINVAL
);
800 trace_xenstore_transaction_end(tx_id
, commit
);
801 err
= xs_impl_transaction_end(s
->impl
, xen_domid
, tx_id
, commit
);
803 xs_error(s
, req_id
, tx_id
, err
);
807 xs_ok(s
, XS_TRANSACTION_END
, req_id
, tx_id
);
810 static void xs_rm(XenXenstoreState
*s
, unsigned int req_id
,
811 xs_transaction_t tx_id
, uint8_t *req_data
, unsigned int len
)
813 const char *path
= (const char *)req_data
;
816 if (len
== 0 || req_data
[len
- 1] != '\0') {
817 xs_error(s
, req_id
, tx_id
, EINVAL
);
821 trace_xenstore_rm(tx_id
, path
);
822 err
= xs_impl_rm(s
->impl
, xen_domid
, tx_id
, path
);
824 xs_error(s
, req_id
, tx_id
, err
);
828 xs_ok(s
, XS_RM
, req_id
, tx_id
);
831 static void xs_get_perms(XenXenstoreState
*s
, unsigned int req_id
,
832 xs_transaction_t tx_id
, uint8_t *req_data
,
835 const char *path
= (const char *)req_data
;
836 struct xsd_sockmsg
*rsp
= (struct xsd_sockmsg
*)s
->rsp_data
;
840 if (len
== 0 || req_data
[len
- 1] != '\0') {
841 xs_error(s
, req_id
, tx_id
, EINVAL
);
845 trace_xenstore_get_perms(tx_id
, path
);
846 err
= xs_impl_get_perms(s
->impl
, xen_domid
, tx_id
, path
, &perms
);
848 xs_error(s
, req_id
, tx_id
, err
);
852 rsp
->type
= XS_GET_PERMS
;
853 rsp
->req_id
= req_id
;
857 xs_append_strings(s
, rsp
, perms
, 0, false);
859 g_list_free_full(perms
, g_free
);
862 static void xs_set_perms(XenXenstoreState
*s
, unsigned int req_id
,
863 xs_transaction_t tx_id
, uint8_t *req_data
,
866 const char *path
= (const char *)req_data
;
872 xs_error(s
, req_id
, tx_id
, EINVAL
);
877 if (*req_data
++ == '\0') {
881 xs_error(s
, req_id
, tx_id
, EINVAL
);
888 if (*req_data
++ == '\0') {
889 perms
= g_list_append(perms
, perm
);
895 * Note that there may be trailing garbage at the end of the buffer.
896 * This is explicitly permitted by the '?' at the end of the definition:
898 * SET_PERMS <path>|<perm-as-string>|+?
901 trace_xenstore_set_perms(tx_id
, path
);
902 err
= xs_impl_set_perms(s
->impl
, xen_domid
, tx_id
, path
, perms
);
905 xs_error(s
, req_id
, tx_id
, err
);
909 xs_ok(s
, XS_SET_PERMS
, req_id
, tx_id
);
912 static void xs_watch(XenXenstoreState
*s
, unsigned int req_id
,
913 xs_transaction_t tx_id
, uint8_t *req_data
,
916 const char *token
, *path
= (const char *)req_data
;
920 xs_error(s
, req_id
, tx_id
, EINVAL
);
925 if (*req_data
++ == '\0') {
929 xs_error(s
, req_id
, tx_id
, EINVAL
);
934 token
= (const char *)req_data
;
936 if (*req_data
++ == '\0') {
940 xs_error(s
, req_id
, tx_id
, EINVAL
);
946 * Note that there may be trailing garbage at the end of the buffer.
947 * This is explicitly permitted by the '?' at the end of the definition:
949 * WATCH <wpath>|<token>|?
952 trace_xenstore_watch(path
, token
);
953 err
= xs_impl_watch(s
->impl
, xen_domid
, path
, token
, fire_watch_cb
, s
);
955 xs_error(s
, req_id
, tx_id
, err
);
959 xs_ok(s
, XS_WATCH
, req_id
, tx_id
);
962 static void xs_unwatch(XenXenstoreState
*s
, unsigned int req_id
,
963 xs_transaction_t tx_id
, uint8_t *req_data
,
966 const char *token
, *path
= (const char *)req_data
;
970 xs_error(s
, req_id
, tx_id
, EINVAL
);
975 if (*req_data
++ == '\0') {
979 xs_error(s
, req_id
, tx_id
, EINVAL
);
984 token
= (const char *)req_data
;
986 if (*req_data
++ == '\0') {
990 xs_error(s
, req_id
, tx_id
, EINVAL
);
995 trace_xenstore_unwatch(path
, token
);
996 err
= xs_impl_unwatch(s
->impl
, xen_domid
, path
, token
, fire_watch_cb
, s
);
998 xs_error(s
, req_id
, tx_id
, err
);
1002 xs_ok(s
, XS_UNWATCH
, req_id
, tx_id
);
1005 static void xs_reset_watches(XenXenstoreState
*s
, unsigned int req_id
,
1006 xs_transaction_t tx_id
, uint8_t *req_data
,
1009 if (len
== 0 || req_data
[len
- 1] != '\0') {
1010 xs_error(s
, req_id
, tx_id
, EINVAL
);
1014 trace_xenstore_reset_watches();
1015 xs_impl_reset_watches(s
->impl
, xen_domid
);
1017 xs_ok(s
, XS_RESET_WATCHES
, req_id
, tx_id
);
1020 static void xs_priv(XenXenstoreState
*s
, unsigned int req_id
,
1021 xs_transaction_t tx_id
, uint8_t *data
,
1024 xs_error(s
, req_id
, tx_id
, EACCES
);
1027 static void xs_unimpl(XenXenstoreState
*s
, unsigned int req_id
,
1028 xs_transaction_t tx_id
, uint8_t *data
,
1031 xs_error(s
, req_id
, tx_id
, ENOSYS
);
1034 typedef void (*xs_impl
)(XenXenstoreState
*s
, unsigned int req_id
,
1035 xs_transaction_t tx_id
, uint8_t *data
,
1042 #define XSD_REQ(_type, _fn) \
1043 [_type] = { .name = #_type, .fn = _fn }
1045 struct xsd_req xsd_reqs
[] = {
1046 XSD_REQ(XS_READ
, xs_read
),
1047 XSD_REQ(XS_WRITE
, xs_write
),
1048 XSD_REQ(XS_MKDIR
, xs_mkdir
),
1049 XSD_REQ(XS_DIRECTORY
, xs_directory
),
1050 XSD_REQ(XS_DIRECTORY_PART
, xs_directory_part
),
1051 XSD_REQ(XS_TRANSACTION_START
, xs_transaction_start
),
1052 XSD_REQ(XS_TRANSACTION_END
, xs_transaction_end
),
1053 XSD_REQ(XS_RM
, xs_rm
),
1054 XSD_REQ(XS_GET_PERMS
, xs_get_perms
),
1055 XSD_REQ(XS_SET_PERMS
, xs_set_perms
),
1056 XSD_REQ(XS_WATCH
, xs_watch
),
1057 XSD_REQ(XS_UNWATCH
, xs_unwatch
),
1058 XSD_REQ(XS_CONTROL
, xs_priv
),
1059 XSD_REQ(XS_INTRODUCE
, xs_priv
),
1060 XSD_REQ(XS_RELEASE
, xs_priv
),
1061 XSD_REQ(XS_IS_DOMAIN_INTRODUCED
, xs_priv
),
1062 XSD_REQ(XS_RESUME
, xs_priv
),
1063 XSD_REQ(XS_SET_TARGET
, xs_priv
),
1064 XSD_REQ(XS_RESET_WATCHES
, xs_reset_watches
),
1067 static void process_req(XenXenstoreState
*s
)
1069 struct xsd_sockmsg
*req
= (struct xsd_sockmsg
*)s
->req_data
;
1070 xs_impl handler
= NULL
;
1072 assert(req_pending(s
));
1073 assert(!s
->rsp_pending
);
1075 if (req
->type
< ARRAY_SIZE(xsd_reqs
)) {
1076 handler
= xsd_reqs
[req
->type
].fn
;
1079 handler
= &xs_unimpl
;
1082 handler(s
, req
->req_id
, req
->tx_id
, (uint8_t *)&req
[1], req
->len
);
1084 s
->rsp_pending
= true;
1088 static unsigned int copy_from_ring(XenXenstoreState
*s
, uint8_t *ptr
,
1095 XENSTORE_RING_IDX prod
= qatomic_read(&s
->xs
->req_prod
);
1096 XENSTORE_RING_IDX cons
= qatomic_read(&s
->xs
->req_cons
);
1097 unsigned int copied
= 0;
1099 /* Ensure the ring contents don't cross the req_prod access. */
1103 unsigned int avail
= prod
- cons
;
1104 unsigned int offset
= MASK_XENSTORE_IDX(cons
);
1105 unsigned int copylen
= avail
;
1107 if (avail
> XENSTORE_RING_SIZE
) {
1108 error_report("XenStore ring handling error");
1109 s
->fatal_error
= true;
1111 } else if (avail
== 0) {
1115 if (copylen
> len
) {
1118 if (copylen
> XENSTORE_RING_SIZE
- offset
) {
1119 copylen
= XENSTORE_RING_SIZE
- offset
;
1122 memcpy(ptr
, &s
->xs
->req
[offset
], copylen
);
1132 * Not sure this ever mattered except on Alpha, but this barrier
1133 * is to ensure that the update to req_cons is globally visible
1134 * only after we have consumed all the data from the ring, and we
1135 * don't end up seeing data written to the ring *after* the other
1136 * end sees the update and writes more to the ring. Xen's own
1137 * xenstored has the same barrier here (although with no comment
1138 * at all, obviously, because it's Xen code).
1142 qatomic_set(&s
->xs
->req_cons
, cons
);
1147 static unsigned int copy_to_ring(XenXenstoreState
*s
, uint8_t *ptr
,
1154 XENSTORE_RING_IDX cons
= qatomic_read(&s
->xs
->rsp_cons
);
1155 XENSTORE_RING_IDX prod
= qatomic_read(&s
->xs
->rsp_prod
);
1156 unsigned int copied
= 0;
1159 * This matches the barrier in copy_to_ring() (or the guest's
1160 * equivalent) between writing the data to the ring and updating
1161 * rsp_prod. It protects against the pathological case (which
1162 * again I think never happened except on Alpha) where our
1163 * subsequent writes to the ring could *cross* the read of
1164 * rsp_cons and the guest could see the new data when it was
1165 * intending to read the old.
1170 unsigned int avail
= cons
+ XENSTORE_RING_SIZE
- prod
;
1171 unsigned int offset
= MASK_XENSTORE_IDX(prod
);
1172 unsigned int copylen
= len
;
1174 if (avail
> XENSTORE_RING_SIZE
) {
1175 error_report("XenStore ring handling error");
1176 s
->fatal_error
= true;
1178 } else if (avail
== 0) {
1182 if (copylen
> avail
) {
1185 if (copylen
> XENSTORE_RING_SIZE
- offset
) {
1186 copylen
= XENSTORE_RING_SIZE
- offset
;
1190 memcpy(&s
->xs
->rsp
[offset
], ptr
, copylen
);
1199 /* Ensure the ring contents are seen before rsp_prod update. */
1202 qatomic_set(&s
->xs
->rsp_prod
, prod
);
1207 static unsigned int get_req(XenXenstoreState
*s
)
1209 unsigned int copied
= 0;
1211 if (s
->fatal_error
) {
1215 assert(!req_pending(s
));
1217 if (s
->req_offset
< XENSTORE_HEADER_SIZE
) {
1218 void *ptr
= s
->req_data
+ s
->req_offset
;
1219 unsigned int len
= XENSTORE_HEADER_SIZE
;
1220 unsigned int copylen
= copy_from_ring(s
, ptr
, len
);
1223 s
->req_offset
+= copylen
;
1226 if (s
->req_offset
>= XENSTORE_HEADER_SIZE
) {
1227 struct xsd_sockmsg
*req
= (struct xsd_sockmsg
*)s
->req_data
;
1229 if (req
->len
> (uint32_t)XENSTORE_PAYLOAD_MAX
) {
1230 error_report("Illegal XenStore request");
1231 s
->fatal_error
= true;
1235 void *ptr
= s
->req_data
+ s
->req_offset
;
1236 unsigned int len
= XENSTORE_HEADER_SIZE
+ req
->len
- s
->req_offset
;
1237 unsigned int copylen
= copy_from_ring(s
, ptr
, len
);
1240 s
->req_offset
+= copylen
;
1246 static unsigned int put_rsp(XenXenstoreState
*s
)
1248 if (s
->fatal_error
) {
1252 assert(s
->rsp_pending
);
1254 struct xsd_sockmsg
*rsp
= (struct xsd_sockmsg
*)s
->rsp_data
;
1255 assert(s
->rsp_offset
< XENSTORE_HEADER_SIZE
+ rsp
->len
);
1257 void *ptr
= s
->rsp_data
+ s
->rsp_offset
;
1258 unsigned int len
= XENSTORE_HEADER_SIZE
+ rsp
->len
- s
->rsp_offset
;
1259 unsigned int copylen
= copy_to_ring(s
, ptr
, len
);
1261 s
->rsp_offset
+= copylen
;
1263 /* Have we produced a complete response? */
1264 if (s
->rsp_offset
== XENSTORE_HEADER_SIZE
+ rsp
->len
) {
1271 static void deliver_watch(XenXenstoreState
*s
, const char *path
,
1274 struct xsd_sockmsg
*rsp
= (struct xsd_sockmsg
*)s
->rsp_data
;
1275 uint8_t *rsp_data
= (uint8_t *)&rsp
[1];
1278 assert(!s
->rsp_pending
);
1280 trace_xenstore_watch_event(path
, token
);
1282 rsp
->type
= XS_WATCH_EVENT
;
1289 /* XENSTORE_ABS/REL_PATH_MAX should ensure there can be no overflow */
1290 assert(rsp
->len
+ len
< XENSTORE_PAYLOAD_MAX
);
1292 memcpy(&rsp_data
[rsp
->len
], path
, len
);
1294 rsp_data
[rsp
->len
] = '\0';
1297 len
= strlen(token
);
1299 * It is possible for the guest to have chosen a token that will
1300 * not fit (along with the patch) into a watch event. We have no
1301 * choice but to drop the event if this is the case.
1303 if (rsp
->len
+ len
>= XENSTORE_PAYLOAD_MAX
) {
1307 memcpy(&rsp_data
[rsp
->len
], token
, len
);
1309 rsp_data
[rsp
->len
] = '\0';
1312 s
->rsp_pending
= true;
1315 struct watch_event
{
1320 static void free_watch_event(struct watch_event
*ev
)
1329 static void queue_watch(XenXenstoreState
*s
, const char *path
,
1332 struct watch_event
*ev
= g_new0(struct watch_event
, 1);
1334 ev
->path
= g_strdup(path
);
1335 ev
->token
= g_strdup(token
);
1337 s
->watch_events
= g_list_append(s
->watch_events
, ev
);
1340 static void fire_watch_cb(void *opaque
, const char *path
, const char *token
)
1342 XenXenstoreState
*s
= opaque
;
1344 assert(bql_locked());
1347 * If there's a response pending, we obviously can't scribble over
1348 * it. But if there's a request pending, it has dibs on the buffer
1351 * In the common case of a watch firing due to backend activity
1352 * when the ring was otherwise idle, we should be able to copy the
1353 * strings directly into the rsp_data and thence the actual ring,
1354 * without needing to perform any allocations and queue them.
1356 if (s
->rsp_pending
|| req_pending(s
)) {
1357 queue_watch(s
, path
, token
);
1359 deliver_watch(s
, path
, token
);
1361 * Attempt to queue the message into the actual ring, and send
1362 * the event channel notification if any bytes are copied.
1364 if (s
->rsp_pending
&& put_rsp(s
) > 0) {
1365 xen_be_evtchn_notify(s
->eh
, s
->be_port
);
1370 static void process_watch_events(XenXenstoreState
*s
)
1372 struct watch_event
*ev
= s
->watch_events
->data
;
1374 deliver_watch(s
, ev
->path
, ev
->token
);
1376 s
->watch_events
= g_list_remove(s
->watch_events
, ev
);
1377 free_watch_event(ev
);
1380 static void xen_xenstore_event(void *opaque
)
1382 XenXenstoreState
*s
= opaque
;
1383 evtchn_port_t port
= xen_be_evtchn_pending(s
->eh
);
1384 unsigned int copied_to
, copied_from
;
1385 bool processed
, notify
= false;
1387 if (port
!= s
->be_port
) {
1391 /* We know this is a no-op. */
1392 xen_be_evtchn_unmask(s
->eh
, port
);
1395 copied_to
= copied_from
= 0;
1398 if (!s
->rsp_pending
&& s
->watch_events
) {
1399 process_watch_events(s
);
1402 if (s
->rsp_pending
) {
1403 copied_to
= put_rsp(s
);
1406 if (!req_pending(s
)) {
1407 copied_from
= get_req(s
);
1410 if (req_pending(s
) && !s
->rsp_pending
&& !s
->watch_events
) {
1415 notify
|= copied_to
|| copied_from
;
1416 } while (copied_to
|| copied_from
|| processed
);
1419 xen_be_evtchn_notify(s
->eh
, s
->be_port
);
1423 static void alloc_guest_port(XenXenstoreState
*s
)
1425 struct evtchn_alloc_unbound alloc
= {
1427 .remote_dom
= DOMID_QEMU
,
1430 if (!xen_evtchn_alloc_unbound_op(&alloc
)) {
1431 s
->guest_port
= alloc
.port
;
1435 int xen_xenstore_reset(void)
1437 XenXenstoreState
*s
= xen_xenstore_singleton
;
1446 s
->req_offset
= s
->rsp_offset
= 0;
1447 s
->rsp_pending
= false;
1449 if (!memory_region_is_mapped(&s
->xenstore_page
)) {
1450 uint64_t gpa
= XEN_SPECIAL_PFN(XENSTORE
) << TARGET_PAGE_BITS
;
1451 xen_overlay_do_map_page(&s
->xenstore_page
, gpa
);
1454 alloc_guest_port(s
);
1457 * As qemu/dom0, bind to the guest's port. For incoming migration, this
1458 * will be unbound as the guest's evtchn table is overwritten. We then
1459 * rebind to the correct guest port in xen_xenstore_post_load().
1461 err
= xen_be_evtchn_bind_interdomain(s
->eh
, xen_domid
, s
->guest_port
);
1467 /* Create frontend store nodes */
1468 perms
= g_list_append(NULL
, xs_perm_as_string(XS_PERM_NONE
, DOMID_QEMU
));
1469 perms
= g_list_append(perms
, xs_perm_as_string(XS_PERM_READ
, xen_domid
));
1471 relpath_printf(s
, perms
, "store/port", "%u", s
->guest_port
);
1472 relpath_printf(s
, perms
, "store/ring-ref", "%lu",
1473 XEN_SPECIAL_PFN(XENSTORE
));
1475 console_port
= xen_primary_console_get_port();
1477 relpath_printf(s
, perms
, "console/ring-ref", "%lu",
1478 XEN_SPECIAL_PFN(CONSOLE
));
1479 relpath_printf(s
, perms
, "console/port", "%u", console_port
);
1480 relpath_printf(s
, perms
, "console/state", "%u", XenbusStateInitialised
);
1483 g_list_free_full(perms
, g_free
);
1486 * We don't actually access the guest's page through the grant, because
1487 * this isn't real Xen, and we can just use the page we gave it in the
1488 * first place. Map the grant anyway, mostly for cosmetic purposes so
1489 * it *looks* like it's in use in the guest-visible grant table.
1491 s
->gt
= qemu_xen_gnttab_open();
1492 uint32_t xs_gntref
= GNTTAB_RESERVED_XENSTORE
;
1493 s
->granted_xs
= qemu_xen_gnttab_map_refs(s
->gt
, 1, xen_domid
, &xs_gntref
,
1494 PROT_READ
| PROT_WRITE
);
1499 struct qemu_xs_handle
{
1500 XenstoreImplState
*impl
;
1505 struct qemu_xs_watch
{
1506 struct qemu_xs_handle
*h
;
1513 static char *xs_be_get_domain_path(struct qemu_xs_handle
*h
, unsigned int domid
)
1515 return g_strdup_printf("/local/domain/%u", domid
);
1518 static char **xs_be_directory(struct qemu_xs_handle
*h
, xs_transaction_t t
,
1519 const char *path
, unsigned int *num
)
1521 GList
*items
= NULL
, *l
;
1526 err
= xs_impl_directory(h
->impl
, DOMID_QEMU
, t
, path
, NULL
, &items
);
1532 items_ret
= g_new0(char *, g_list_length(items
) + 1);
1534 for (l
= items
; l
; l
= l
->next
) {
1535 items_ret
[i
++] = l
->data
;
1542 static void *xs_be_read(struct qemu_xs_handle
*h
, xs_transaction_t t
,
1543 const char *path
, unsigned int *len
)
1545 GByteArray
*data
= g_byte_array_new();
1546 bool free_segment
= false;
1549 err
= xs_impl_read(h
->impl
, DOMID_QEMU
, t
, path
, data
);
1551 free_segment
= true;
1557 /* The xen-bus-helper code expects to get NUL terminated string! */
1558 g_byte_array_append(data
, (void *)"", 1);
1561 return g_byte_array_free(data
, free_segment
);
1564 static bool xs_be_write(struct qemu_xs_handle
*h
, xs_transaction_t t
,
1565 const char *path
, const void *data
, unsigned int len
)
1567 GByteArray
*gdata
= g_byte_array_new();
1570 g_byte_array_append(gdata
, data
, len
);
1571 err
= xs_impl_write(h
->impl
, DOMID_QEMU
, t
, path
, gdata
);
1572 g_byte_array_unref(gdata
);
1580 static bool xs_be_create(struct qemu_xs_handle
*h
, xs_transaction_t t
,
1581 unsigned int owner
, unsigned int domid
,
1582 unsigned int perms
, const char *path
)
1584 g_autoptr(GByteArray
) data
= g_byte_array_new();
1585 GList
*perms_list
= NULL
;
1588 /* mkdir does this */
1589 err
= xs_impl_read(h
->impl
, DOMID_QEMU
, t
, path
, data
);
1590 if (err
== ENOENT
) {
1591 err
= xs_impl_write(h
->impl
, DOMID_QEMU
, t
, path
, data
);
1598 perms_list
= g_list_append(perms_list
,
1599 xs_perm_as_string(XS_PERM_NONE
, owner
));
1600 perms_list
= g_list_append(perms_list
,
1601 xs_perm_as_string(perms
, domid
));
1603 err
= xs_impl_set_perms(h
->impl
, DOMID_QEMU
, t
, path
, perms_list
);
1604 g_list_free_full(perms_list
, g_free
);
1612 static bool xs_be_destroy(struct qemu_xs_handle
*h
, xs_transaction_t t
,
1615 int err
= xs_impl_rm(h
->impl
, DOMID_QEMU
, t
, path
);
1623 static void be_watch_bh(void *_h
)
1625 struct qemu_xs_handle
*h
= _h
;
1628 for (l
= h
->watches
; l
; l
= l
->next
) {
1629 struct qemu_xs_watch
*w
= l
->data
;
1632 struct watch_event
*ev
= w
->events
->data
;
1634 w
->fn(w
->opaque
, ev
->path
);
1636 w
->events
= g_list_remove(w
->events
, ev
);
1637 free_watch_event(ev
);
1642 static void xs_be_watch_cb(void *opaque
, const char *path
, const char *token
)
1644 struct watch_event
*ev
= g_new0(struct watch_event
, 1);
1645 struct qemu_xs_watch
*w
= opaque
;
1647 /* We don't care about the token */
1648 ev
->path
= g_strdup(path
);
1649 w
->events
= g_list_append(w
->events
, ev
);
1651 qemu_bh_schedule(w
->h
->watch_bh
);
1654 static struct qemu_xs_watch
*xs_be_watch(struct qemu_xs_handle
*h
,
1655 const char *path
, xs_watch_fn fn
,
1658 struct qemu_xs_watch
*w
= g_new0(struct qemu_xs_watch
, 1);
1665 err
= xs_impl_watch(h
->impl
, DOMID_QEMU
, path
, NULL
, xs_be_watch_cb
, w
);
1672 w
->path
= g_strdup(path
);
1673 h
->watches
= g_list_append(h
->watches
, w
);
1677 static void xs_be_unwatch(struct qemu_xs_handle
*h
, struct qemu_xs_watch
*w
)
1679 xs_impl_unwatch(h
->impl
, DOMID_QEMU
, w
->path
, NULL
, xs_be_watch_cb
, w
);
1681 h
->watches
= g_list_remove(h
->watches
, w
);
1682 g_list_free_full(w
->events
, (GDestroyNotify
)free_watch_event
);
1687 static xs_transaction_t
xs_be_transaction_start(struct qemu_xs_handle
*h
)
1689 unsigned int new_tx
= XBT_NULL
;
1690 int err
= xs_impl_transaction_start(h
->impl
, DOMID_QEMU
, &new_tx
);
1698 static bool xs_be_transaction_end(struct qemu_xs_handle
*h
, xs_transaction_t t
,
1701 int err
= xs_impl_transaction_end(h
->impl
, DOMID_QEMU
, t
, !abort
);
1709 static struct qemu_xs_handle
*xs_be_open(void)
1711 XenXenstoreState
*s
= xen_xenstore_singleton
;
1712 struct qemu_xs_handle
*h
;
1714 if (!s
|| !s
->impl
) {
1719 h
= g_new0(struct qemu_xs_handle
, 1);
1722 h
->watch_bh
= aio_bh_new(qemu_get_aio_context(), be_watch_bh
, h
);
1727 static void xs_be_close(struct qemu_xs_handle
*h
)
1729 while (h
->watches
) {
1730 struct qemu_xs_watch
*w
= h
->watches
->data
;
1731 xs_be_unwatch(h
, w
);
1734 qemu_bh_delete(h
->watch_bh
);
1738 static struct xenstore_backend_ops emu_xenstore_backend_ops
= {
1740 .close
= xs_be_close
,
1741 .get_domain_path
= xs_be_get_domain_path
,
1742 .directory
= xs_be_directory
,
1744 .write
= xs_be_write
,
1745 .create
= xs_be_create
,
1746 .destroy
= xs_be_destroy
,
1747 .watch
= xs_be_watch
,
1748 .unwatch
= xs_be_unwatch
,
1749 .transaction_start
= xs_be_transaction_start
,
1750 .transaction_end
= xs_be_transaction_end
,