Merge tag 'pull-loongarch-20241016' of https://gitlab.com/gaosong/qemu into staging
[qemu/armbru.git] / include / hw / xen / xen_backend_ops.h
blob90cca85f52ed7d41a332ae58a9700920758a6965
1 /*
2 * QEMU Xen backend support
4 * Copyright © 2022 Amazon.com, Inc. or its affiliates. All Rights Reserved.
6 * Authors: David Woodhouse <dwmw2@infradead.org>
8 * This work is licensed under the terms of the GNU GPL, version 2 or later.
9 * See the COPYING file in the top-level directory.
12 #ifndef QEMU_XEN_BACKEND_OPS_H
13 #define QEMU_XEN_BACKEND_OPS_H
15 #include "hw/xen/xen.h"
16 #include "hw/xen/interface/xen.h"
17 #include "hw/xen/interface/io/xenbus.h"
20 * For the time being, these operations map fairly closely to the API of
21 * the actual Xen libraries, e.g. libxenevtchn. As we complete the migration
22 * from XenLegacyDevice back ends to the new XenDevice model, they may
23 * evolve to slightly higher-level APIs.
25 * The internal emulations do not emulate the Xen APIs entirely faithfully;
26 * only enough to be used by the Xen backend devices. For example, only one
27 * event channel can be bound to each handle, since that's sufficient for
28 * the device support (only the true Xen HVM backend uses more). And the
29 * behaviour of unmask() and pending() is different too because the device
30 * backends don't care.
33 typedef struct xenevtchn_handle xenevtchn_handle;
34 typedef int xenevtchn_port_or_error_t;
35 typedef uint32_t evtchn_port_t;
36 typedef uint16_t domid_t;
37 typedef uint32_t grant_ref_t;
39 #define XEN_PAGE_SHIFT 12
40 #define XEN_PAGE_SIZE (1UL << XEN_PAGE_SHIFT)
41 #define XEN_PAGE_MASK (~(XEN_PAGE_SIZE - 1))
43 #ifndef xen_rmb
44 #define xen_rmb() smp_rmb()
45 #endif
46 #ifndef xen_wmb
47 #define xen_wmb() smp_wmb()
48 #endif
49 #ifndef xen_mb
50 #define xen_mb() smp_mb()
51 #endif
53 struct evtchn_backend_ops {
54 xenevtchn_handle *(*open)(void);
55 int (*bind_interdomain)(xenevtchn_handle *xc, uint32_t domid,
56 evtchn_port_t guest_port);
57 int (*unbind)(xenevtchn_handle *xc, evtchn_port_t port);
58 int (*close)(struct xenevtchn_handle *xc);
59 int (*get_fd)(struct xenevtchn_handle *xc);
60 int (*notify)(struct xenevtchn_handle *xc, evtchn_port_t port);
61 int (*unmask)(struct xenevtchn_handle *xc, evtchn_port_t port);
62 int (*pending)(struct xenevtchn_handle *xc);
65 extern struct evtchn_backend_ops *xen_evtchn_ops;
67 static inline xenevtchn_handle *qemu_xen_evtchn_open(void)
69 if (!xen_evtchn_ops) {
70 return NULL;
72 return xen_evtchn_ops->open();
75 static inline int qemu_xen_evtchn_bind_interdomain(xenevtchn_handle *xc,
76 uint32_t domid,
77 evtchn_port_t guest_port)
79 if (!xen_evtchn_ops) {
80 return -ENOSYS;
82 return xen_evtchn_ops->bind_interdomain(xc, domid, guest_port);
85 static inline int qemu_xen_evtchn_unbind(xenevtchn_handle *xc,
86 evtchn_port_t port)
88 if (!xen_evtchn_ops) {
89 return -ENOSYS;
91 return xen_evtchn_ops->unbind(xc, port);
94 static inline int qemu_xen_evtchn_close(xenevtchn_handle *xc)
96 if (!xen_evtchn_ops) {
97 return -ENOSYS;
99 return xen_evtchn_ops->close(xc);
102 static inline int qemu_xen_evtchn_fd(xenevtchn_handle *xc)
104 if (!xen_evtchn_ops) {
105 return -ENOSYS;
107 return xen_evtchn_ops->get_fd(xc);
110 static inline int qemu_xen_evtchn_notify(xenevtchn_handle *xc,
111 evtchn_port_t port)
113 if (!xen_evtchn_ops) {
114 return -ENOSYS;
116 return xen_evtchn_ops->notify(xc, port);
119 static inline int qemu_xen_evtchn_unmask(xenevtchn_handle *xc,
120 evtchn_port_t port)
122 if (!xen_evtchn_ops) {
123 return -ENOSYS;
125 return xen_evtchn_ops->unmask(xc, port);
128 static inline int qemu_xen_evtchn_pending(xenevtchn_handle *xc)
130 if (!xen_evtchn_ops) {
131 return -ENOSYS;
133 return xen_evtchn_ops->pending(xc);
136 typedef struct xengntdev_handle xengnttab_handle;
138 typedef struct XenGrantCopySegment {
139 union {
140 void *virt;
141 struct {
142 uint32_t ref;
143 off_t offset;
144 } foreign;
145 } source, dest;
146 size_t len;
147 } XenGrantCopySegment;
149 #define XEN_GNTTAB_OP_FEATURE_MAP_MULTIPLE (1U << 0)
151 struct gnttab_backend_ops {
152 uint32_t features;
153 xengnttab_handle *(*open)(void);
154 int (*close)(xengnttab_handle *xgt);
155 int (*grant_copy)(xengnttab_handle *xgt, bool to_domain, uint32_t domid,
156 XenGrantCopySegment *segs, uint32_t nr_segs,
157 Error **errp);
158 int (*set_max_grants)(xengnttab_handle *xgt, uint32_t nr_grants);
159 void *(*map_refs)(xengnttab_handle *xgt, uint32_t count, uint32_t domid,
160 uint32_t *refs, int prot);
161 int (*unmap)(xengnttab_handle *xgt, void *start_address, uint32_t *refs,
162 uint32_t count);
165 extern struct gnttab_backend_ops *xen_gnttab_ops;
167 static inline bool qemu_xen_gnttab_can_map_multi(void)
169 return xen_gnttab_ops &&
170 !!(xen_gnttab_ops->features & XEN_GNTTAB_OP_FEATURE_MAP_MULTIPLE);
173 static inline xengnttab_handle *qemu_xen_gnttab_open(void)
175 if (!xen_gnttab_ops) {
176 return NULL;
178 return xen_gnttab_ops->open();
181 static inline int qemu_xen_gnttab_close(xengnttab_handle *xgt)
183 if (!xen_gnttab_ops) {
184 return -ENOSYS;
186 return xen_gnttab_ops->close(xgt);
189 static inline int qemu_xen_gnttab_grant_copy(xengnttab_handle *xgt,
190 bool to_domain, uint32_t domid,
191 XenGrantCopySegment *segs,
192 uint32_t nr_segs, Error **errp)
194 if (!xen_gnttab_ops) {
195 return -ENOSYS;
198 return xen_gnttab_ops->grant_copy(xgt, to_domain, domid, segs, nr_segs,
199 errp);
202 static inline int qemu_xen_gnttab_set_max_grants(xengnttab_handle *xgt,
203 uint32_t nr_grants)
205 if (!xen_gnttab_ops) {
206 return -ENOSYS;
208 return xen_gnttab_ops->set_max_grants(xgt, nr_grants);
211 static inline void *qemu_xen_gnttab_map_refs(xengnttab_handle *xgt,
212 uint32_t count, uint32_t domid,
213 uint32_t *refs, int prot)
215 if (!xen_gnttab_ops) {
216 return NULL;
218 return xen_gnttab_ops->map_refs(xgt, count, domid, refs, prot);
221 static inline int qemu_xen_gnttab_unmap(xengnttab_handle *xgt,
222 void *start_address, uint32_t *refs,
223 uint32_t count)
225 if (!xen_gnttab_ops) {
226 return -ENOSYS;
228 return xen_gnttab_ops->unmap(xgt, start_address, refs, count);
231 struct foreignmem_backend_ops {
232 void *(*map)(uint32_t dom, void *addr, int prot, size_t pages,
233 xen_pfn_t *pfns, int *errs);
234 int (*unmap)(void *addr, size_t pages);
237 extern struct foreignmem_backend_ops *xen_foreignmem_ops;
239 static inline void *qemu_xen_foreignmem_map(uint32_t dom, void *addr, int prot,
240 size_t pages, xen_pfn_t *pfns,
241 int *errs)
243 if (!xen_foreignmem_ops) {
244 return NULL;
246 return xen_foreignmem_ops->map(dom, addr, prot, pages, pfns, errs);
249 static inline int qemu_xen_foreignmem_unmap(void *addr, size_t pages)
251 if (!xen_foreignmem_ops) {
252 return -ENOSYS;
254 return xen_foreignmem_ops->unmap(addr, pages);
257 typedef void (*xs_watch_fn)(void *opaque, const char *path);
259 struct qemu_xs_handle;
260 struct qemu_xs_watch;
261 typedef uint32_t xs_transaction_t;
263 #define XBT_NULL 0
265 #define XS_PERM_NONE 0x00
266 #define XS_PERM_READ 0x01
267 #define XS_PERM_WRITE 0x02
269 struct xenstore_backend_ops {
270 struct qemu_xs_handle *(*open)(void);
271 void (*close)(struct qemu_xs_handle *h);
272 char *(*get_domain_path)(struct qemu_xs_handle *h, unsigned int domid);
273 char **(*directory)(struct qemu_xs_handle *h, xs_transaction_t t,
274 const char *path, unsigned int *num);
275 void *(*read)(struct qemu_xs_handle *h, xs_transaction_t t,
276 const char *path, unsigned int *len);
277 bool (*write)(struct qemu_xs_handle *h, xs_transaction_t t,
278 const char *path, const void *data, unsigned int len);
279 bool (*create)(struct qemu_xs_handle *h, xs_transaction_t t,
280 unsigned int owner, unsigned int domid,
281 unsigned int perms, const char *path);
282 bool (*destroy)(struct qemu_xs_handle *h, xs_transaction_t t,
283 const char *path);
284 struct qemu_xs_watch *(*watch)(struct qemu_xs_handle *h, const char *path,
285 xs_watch_fn fn, void *opaque);
286 void (*unwatch)(struct qemu_xs_handle *h, struct qemu_xs_watch *w);
287 xs_transaction_t (*transaction_start)(struct qemu_xs_handle *h);
288 bool (*transaction_end)(struct qemu_xs_handle *h, xs_transaction_t t,
289 bool abort);
292 extern struct xenstore_backend_ops *xen_xenstore_ops;
294 static inline struct qemu_xs_handle *qemu_xen_xs_open(void)
296 if (!xen_xenstore_ops) {
297 return NULL;
299 return xen_xenstore_ops->open();
302 static inline void qemu_xen_xs_close(struct qemu_xs_handle *h)
304 if (!xen_xenstore_ops) {
305 return;
307 xen_xenstore_ops->close(h);
310 static inline char *qemu_xen_xs_get_domain_path(struct qemu_xs_handle *h,
311 unsigned int domid)
313 if (!xen_xenstore_ops) {
314 return NULL;
316 return xen_xenstore_ops->get_domain_path(h, domid);
319 static inline char **qemu_xen_xs_directory(struct qemu_xs_handle *h,
320 xs_transaction_t t, const char *path,
321 unsigned int *num)
323 if (!xen_xenstore_ops) {
324 return NULL;
326 return xen_xenstore_ops->directory(h, t, path, num);
329 static inline void *qemu_xen_xs_read(struct qemu_xs_handle *h,
330 xs_transaction_t t, const char *path,
331 unsigned int *len)
333 if (!xen_xenstore_ops) {
334 return NULL;
336 return xen_xenstore_ops->read(h, t, path, len);
339 static inline bool qemu_xen_xs_write(struct qemu_xs_handle *h,
340 xs_transaction_t t, const char *path,
341 const void *data, unsigned int len)
343 if (!xen_xenstore_ops) {
344 return false;
346 return xen_xenstore_ops->write(h, t, path, data, len);
349 static inline bool qemu_xen_xs_create(struct qemu_xs_handle *h,
350 xs_transaction_t t, unsigned int owner,
351 unsigned int domid, unsigned int perms,
352 const char *path)
354 if (!xen_xenstore_ops) {
355 return false;
357 return xen_xenstore_ops->create(h, t, owner, domid, perms, path);
360 static inline bool qemu_xen_xs_destroy(struct qemu_xs_handle *h,
361 xs_transaction_t t, const char *path)
363 if (!xen_xenstore_ops) {
364 return false;
366 return xen_xenstore_ops->destroy(h, t, path);
369 static inline struct qemu_xs_watch *qemu_xen_xs_watch(struct qemu_xs_handle *h,
370 const char *path,
371 xs_watch_fn fn,
372 void *opaque)
374 if (!xen_xenstore_ops) {
375 return NULL;
377 return xen_xenstore_ops->watch(h, path, fn, opaque);
380 static inline void qemu_xen_xs_unwatch(struct qemu_xs_handle *h,
381 struct qemu_xs_watch *w)
383 if (!xen_xenstore_ops) {
384 return;
386 xen_xenstore_ops->unwatch(h, w);
389 static inline xs_transaction_t qemu_xen_xs_transaction_start(struct qemu_xs_handle *h)
391 if (!xen_xenstore_ops) {
392 return XBT_NULL;
394 return xen_xenstore_ops->transaction_start(h);
397 static inline bool qemu_xen_xs_transaction_end(struct qemu_xs_handle *h,
398 xs_transaction_t t, bool abort)
400 if (!xen_xenstore_ops) {
401 return false;
403 return xen_xenstore_ops->transaction_end(h, t, abort);
406 void setup_xen_backend_ops(void);
408 #endif /* QEMU_XEN_BACKEND_OPS_H */