qapi: Improve specificity of type/member descriptions
[qemu/armbru.git] / hw / 9pfs / xen-9p-backend.c
blob74f3a05f880c313d136056152a484e75e08f2f65
1 /*
2 * Xen 9p backend
4 * Copyright Aporeto 2017
6 * Authors:
7 * Stefano Stabellini <stefano@aporeto.com>
9 */
12 * Not so fast! You might want to read the 9p developer docs first:
13 * https://wiki.qemu.org/Documentation/9p
16 #include "qemu/osdep.h"
18 #include "hw/9pfs/9p.h"
19 #include "hw/xen/xen-legacy-backend.h"
20 #include "hw/9pfs/xen-9pfs.h"
21 #include "qapi/error.h"
22 #include "qemu/config-file.h"
23 #include "qemu/main-loop.h"
24 #include "qemu/option.h"
25 #include "qemu/iov.h"
26 #include "fsdev/qemu-fsdev.h"
28 #define VERSIONS "1"
29 #define MAX_RINGS 8
30 #define MAX_RING_ORDER 9
32 typedef struct Xen9pfsRing {
33 struct Xen9pfsDev *priv;
35 int ref;
36 xenevtchn_handle *evtchndev;
37 int evtchn;
38 int local_port;
39 int ring_order;
40 struct xen_9pfs_data_intf *intf;
41 unsigned char *data;
42 struct xen_9pfs_data ring;
44 struct iovec *sg;
45 QEMUBH *bh;
46 Coroutine *co;
48 /* local copies, so that we can read/write PDU data directly from
49 * the ring */
50 RING_IDX out_cons, out_size, in_cons;
51 bool inprogress;
52 } Xen9pfsRing;
54 typedef struct Xen9pfsDev {
55 struct XenLegacyDevice xendev; /* must be first */
56 V9fsState state;
57 char *path;
58 char *security_model;
59 char *tag;
60 char *id;
62 int num_rings;
63 Xen9pfsRing *rings;
64 } Xen9pfsDev;
66 static void xen_9pfs_disconnect(struct XenLegacyDevice *xendev);
68 static void xen_9pfs_in_sg(Xen9pfsRing *ring,
69 struct iovec *in_sg,
70 int *num,
71 uint32_t idx,
72 uint32_t size)
74 RING_IDX cons, prod, masked_prod, masked_cons;
76 cons = ring->intf->in_cons;
77 prod = ring->intf->in_prod;
78 xen_rmb();
79 masked_prod = xen_9pfs_mask(prod, XEN_FLEX_RING_SIZE(ring->ring_order));
80 masked_cons = xen_9pfs_mask(cons, XEN_FLEX_RING_SIZE(ring->ring_order));
82 if (masked_prod < masked_cons) {
83 in_sg[0].iov_base = ring->ring.in + masked_prod;
84 in_sg[0].iov_len = masked_cons - masked_prod;
85 *num = 1;
86 } else {
87 in_sg[0].iov_base = ring->ring.in + masked_prod;
88 in_sg[0].iov_len = XEN_FLEX_RING_SIZE(ring->ring_order) - masked_prod;
89 in_sg[1].iov_base = ring->ring.in;
90 in_sg[1].iov_len = masked_cons;
91 *num = 2;
95 static void xen_9pfs_out_sg(Xen9pfsRing *ring,
96 struct iovec *out_sg,
97 int *num,
98 uint32_t idx)
100 RING_IDX cons, prod, masked_prod, masked_cons;
102 cons = ring->intf->out_cons;
103 prod = ring->intf->out_prod;
104 xen_rmb();
105 masked_prod = xen_9pfs_mask(prod, XEN_FLEX_RING_SIZE(ring->ring_order));
106 masked_cons = xen_9pfs_mask(cons, XEN_FLEX_RING_SIZE(ring->ring_order));
108 if (masked_cons < masked_prod) {
109 out_sg[0].iov_base = ring->ring.out + masked_cons;
110 out_sg[0].iov_len = ring->out_size;
111 *num = 1;
112 } else {
113 if (ring->out_size >
114 (XEN_FLEX_RING_SIZE(ring->ring_order) - masked_cons)) {
115 out_sg[0].iov_base = ring->ring.out + masked_cons;
116 out_sg[0].iov_len = XEN_FLEX_RING_SIZE(ring->ring_order) -
117 masked_cons;
118 out_sg[1].iov_base = ring->ring.out;
119 out_sg[1].iov_len = ring->out_size -
120 (XEN_FLEX_RING_SIZE(ring->ring_order) -
121 masked_cons);
122 *num = 2;
123 } else {
124 out_sg[0].iov_base = ring->ring.out + masked_cons;
125 out_sg[0].iov_len = ring->out_size;
126 *num = 1;
131 static ssize_t xen_9pfs_pdu_vmarshal(V9fsPDU *pdu,
132 size_t offset,
133 const char *fmt,
134 va_list ap)
136 Xen9pfsDev *xen_9pfs = container_of(pdu->s, Xen9pfsDev, state);
137 struct iovec in_sg[2];
138 int num;
139 ssize_t ret;
141 xen_9pfs_in_sg(&xen_9pfs->rings[pdu->tag % xen_9pfs->num_rings],
142 in_sg, &num, pdu->idx, ROUND_UP(offset + 128, 512));
144 ret = v9fs_iov_vmarshal(in_sg, num, offset, 0, fmt, ap);
145 if (ret < 0) {
146 xen_pv_printf(&xen_9pfs->xendev, 0,
147 "Failed to encode VirtFS reply type %d\n",
148 pdu->id + 1);
149 xen_be_set_state(&xen_9pfs->xendev, XenbusStateClosing);
150 xen_9pfs_disconnect(&xen_9pfs->xendev);
152 return ret;
155 static ssize_t xen_9pfs_pdu_vunmarshal(V9fsPDU *pdu,
156 size_t offset,
157 const char *fmt,
158 va_list ap)
160 Xen9pfsDev *xen_9pfs = container_of(pdu->s, Xen9pfsDev, state);
161 struct iovec out_sg[2];
162 int num;
163 ssize_t ret;
165 xen_9pfs_out_sg(&xen_9pfs->rings[pdu->tag % xen_9pfs->num_rings],
166 out_sg, &num, pdu->idx);
168 ret = v9fs_iov_vunmarshal(out_sg, num, offset, 0, fmt, ap);
169 if (ret < 0) {
170 xen_pv_printf(&xen_9pfs->xendev, 0,
171 "Failed to decode VirtFS request type %d\n", pdu->id);
172 xen_be_set_state(&xen_9pfs->xendev, XenbusStateClosing);
173 xen_9pfs_disconnect(&xen_9pfs->xendev);
175 return ret;
178 static void xen_9pfs_init_out_iov_from_pdu(V9fsPDU *pdu,
179 struct iovec **piov,
180 unsigned int *pniov,
181 size_t size)
183 Xen9pfsDev *xen_9pfs = container_of(pdu->s, Xen9pfsDev, state);
184 Xen9pfsRing *ring = &xen_9pfs->rings[pdu->tag % xen_9pfs->num_rings];
185 int num;
187 g_free(ring->sg);
189 ring->sg = g_new0(struct iovec, 2);
190 xen_9pfs_out_sg(ring, ring->sg, &num, pdu->idx);
191 *piov = ring->sg;
192 *pniov = num;
195 static void xen_9pfs_init_in_iov_from_pdu(V9fsPDU *pdu,
196 struct iovec **piov,
197 unsigned int *pniov,
198 size_t size)
200 Xen9pfsDev *xen_9pfs = container_of(pdu->s, Xen9pfsDev, state);
201 Xen9pfsRing *ring = &xen_9pfs->rings[pdu->tag % xen_9pfs->num_rings];
202 int num;
203 size_t buf_size;
205 g_free(ring->sg);
207 ring->sg = g_new0(struct iovec, 2);
208 ring->co = qemu_coroutine_self();
209 /* make sure other threads see ring->co changes before continuing */
210 smp_wmb();
212 again:
213 xen_9pfs_in_sg(ring, ring->sg, &num, pdu->idx, size);
214 buf_size = iov_size(ring->sg, num);
215 if (buf_size < size) {
216 qemu_coroutine_yield();
217 goto again;
219 ring->co = NULL;
220 /* make sure other threads see ring->co changes before continuing */
221 smp_wmb();
223 *piov = ring->sg;
224 *pniov = num;
227 static void xen_9pfs_push_and_notify(V9fsPDU *pdu)
229 RING_IDX prod;
230 Xen9pfsDev *priv = container_of(pdu->s, Xen9pfsDev, state);
231 Xen9pfsRing *ring = &priv->rings[pdu->tag % priv->num_rings];
233 g_free(ring->sg);
234 ring->sg = NULL;
236 ring->intf->out_cons = ring->out_cons;
237 xen_wmb();
239 prod = ring->intf->in_prod;
240 xen_rmb();
241 ring->intf->in_prod = prod + pdu->size;
242 xen_wmb();
244 ring->inprogress = false;
245 qemu_xen_evtchn_notify(ring->evtchndev, ring->local_port);
247 qemu_bh_schedule(ring->bh);
250 static const V9fsTransport xen_9p_transport = {
251 .pdu_vmarshal = xen_9pfs_pdu_vmarshal,
252 .pdu_vunmarshal = xen_9pfs_pdu_vunmarshal,
253 .init_in_iov_from_pdu = xen_9pfs_init_in_iov_from_pdu,
254 .init_out_iov_from_pdu = xen_9pfs_init_out_iov_from_pdu,
255 .push_and_notify = xen_9pfs_push_and_notify,
258 static int xen_9pfs_init(struct XenLegacyDevice *xendev)
260 return 0;
263 static int xen_9pfs_receive(Xen9pfsRing *ring)
265 P9MsgHeader h;
266 RING_IDX cons, prod, masked_prod, masked_cons, queued;
267 V9fsPDU *pdu;
269 if (ring->inprogress) {
270 return 0;
273 cons = ring->intf->out_cons;
274 prod = ring->intf->out_prod;
275 xen_rmb();
277 queued = xen_9pfs_queued(prod, cons, XEN_FLEX_RING_SIZE(ring->ring_order));
278 if (queued < sizeof(h)) {
279 return 0;
281 ring->inprogress = true;
283 masked_prod = xen_9pfs_mask(prod, XEN_FLEX_RING_SIZE(ring->ring_order));
284 masked_cons = xen_9pfs_mask(cons, XEN_FLEX_RING_SIZE(ring->ring_order));
286 xen_9pfs_read_packet((uint8_t *) &h, ring->ring.out, sizeof(h),
287 masked_prod, &masked_cons,
288 XEN_FLEX_RING_SIZE(ring->ring_order));
289 if (queued < le32_to_cpu(h.size_le)) {
290 return 0;
293 /* cannot fail, because we only handle one request per ring at a time */
294 pdu = pdu_alloc(&ring->priv->state);
295 ring->out_size = le32_to_cpu(h.size_le);
296 ring->out_cons = cons + le32_to_cpu(h.size_le);
298 pdu_submit(pdu, &h);
300 return 0;
303 static void xen_9pfs_bh(void *opaque)
305 Xen9pfsRing *ring = opaque;
306 bool wait;
308 again:
309 wait = ring->co != NULL && qemu_coroutine_entered(ring->co);
310 /* paired with the smb_wmb barriers in xen_9pfs_init_in_iov_from_pdu */
311 smp_rmb();
312 if (wait) {
313 cpu_relax();
314 goto again;
317 if (ring->co != NULL) {
318 qemu_coroutine_enter_if_inactive(ring->co);
320 xen_9pfs_receive(ring);
323 static void xen_9pfs_evtchn_event(void *opaque)
325 Xen9pfsRing *ring = opaque;
326 evtchn_port_t port;
328 port = qemu_xen_evtchn_pending(ring->evtchndev);
329 qemu_xen_evtchn_unmask(ring->evtchndev, port);
331 qemu_bh_schedule(ring->bh);
334 static void xen_9pfs_disconnect(struct XenLegacyDevice *xendev)
336 Xen9pfsDev *xen_9pdev = container_of(xendev, Xen9pfsDev, xendev);
337 int i;
339 for (i = 0; i < xen_9pdev->num_rings; i++) {
340 if (xen_9pdev->rings[i].evtchndev != NULL) {
341 qemu_set_fd_handler(qemu_xen_evtchn_fd(xen_9pdev->rings[i].evtchndev),
342 NULL, NULL, NULL);
343 qemu_xen_evtchn_unbind(xen_9pdev->rings[i].evtchndev,
344 xen_9pdev->rings[i].local_port);
345 xen_9pdev->rings[i].evtchndev = NULL;
350 static int xen_9pfs_free(struct XenLegacyDevice *xendev)
352 Xen9pfsDev *xen_9pdev = container_of(xendev, Xen9pfsDev, xendev);
353 int i;
355 if (xen_9pdev->rings[0].evtchndev != NULL) {
356 xen_9pfs_disconnect(xendev);
359 for (i = 0; i < xen_9pdev->num_rings; i++) {
360 if (xen_9pdev->rings[i].data != NULL) {
361 xen_be_unmap_grant_refs(&xen_9pdev->xendev,
362 xen_9pdev->rings[i].data,
363 xen_9pdev->rings[i].intf->ref,
364 (1 << xen_9pdev->rings[i].ring_order));
366 if (xen_9pdev->rings[i].intf != NULL) {
367 xen_be_unmap_grant_ref(&xen_9pdev->xendev,
368 xen_9pdev->rings[i].intf,
369 xen_9pdev->rings[i].ref);
371 if (xen_9pdev->rings[i].bh != NULL) {
372 qemu_bh_delete(xen_9pdev->rings[i].bh);
376 g_free(xen_9pdev->id);
377 g_free(xen_9pdev->tag);
378 g_free(xen_9pdev->path);
379 g_free(xen_9pdev->security_model);
380 g_free(xen_9pdev->rings);
381 return 0;
384 static int xen_9pfs_connect(struct XenLegacyDevice *xendev)
386 Error *err = NULL;
387 int i;
388 Xen9pfsDev *xen_9pdev = container_of(xendev, Xen9pfsDev, xendev);
389 V9fsState *s = &xen_9pdev->state;
390 QemuOpts *fsdev;
392 if (xenstore_read_fe_int(&xen_9pdev->xendev, "num-rings",
393 &xen_9pdev->num_rings) == -1 ||
394 xen_9pdev->num_rings > MAX_RINGS || xen_9pdev->num_rings < 1) {
395 return -1;
398 xen_9pdev->rings = g_new0(Xen9pfsRing, xen_9pdev->num_rings);
399 for (i = 0; i < xen_9pdev->num_rings; i++) {
400 char *str;
401 int ring_order;
403 xen_9pdev->rings[i].priv = xen_9pdev;
404 xen_9pdev->rings[i].evtchn = -1;
405 xen_9pdev->rings[i].local_port = -1;
407 str = g_strdup_printf("ring-ref%u", i);
408 if (xenstore_read_fe_int(&xen_9pdev->xendev, str,
409 &xen_9pdev->rings[i].ref) == -1) {
410 g_free(str);
411 goto out;
413 g_free(str);
414 str = g_strdup_printf("event-channel-%u", i);
415 if (xenstore_read_fe_int(&xen_9pdev->xendev, str,
416 &xen_9pdev->rings[i].evtchn) == -1) {
417 g_free(str);
418 goto out;
420 g_free(str);
422 xen_9pdev->rings[i].intf =
423 xen_be_map_grant_ref(&xen_9pdev->xendev,
424 xen_9pdev->rings[i].ref,
425 PROT_READ | PROT_WRITE);
426 if (!xen_9pdev->rings[i].intf) {
427 goto out;
429 ring_order = xen_9pdev->rings[i].intf->ring_order;
430 if (ring_order > MAX_RING_ORDER) {
431 goto out;
433 xen_9pdev->rings[i].ring_order = ring_order;
434 xen_9pdev->rings[i].data =
435 xen_be_map_grant_refs(&xen_9pdev->xendev,
436 xen_9pdev->rings[i].intf->ref,
437 (1 << ring_order),
438 PROT_READ | PROT_WRITE);
439 if (!xen_9pdev->rings[i].data) {
440 goto out;
442 xen_9pdev->rings[i].ring.in = xen_9pdev->rings[i].data;
443 xen_9pdev->rings[i].ring.out = xen_9pdev->rings[i].data +
444 XEN_FLEX_RING_SIZE(ring_order);
446 xen_9pdev->rings[i].bh = qemu_bh_new(xen_9pfs_bh, &xen_9pdev->rings[i]);
447 xen_9pdev->rings[i].out_cons = 0;
448 xen_9pdev->rings[i].out_size = 0;
449 xen_9pdev->rings[i].inprogress = false;
452 xen_9pdev->rings[i].evtchndev = qemu_xen_evtchn_open();
453 if (xen_9pdev->rings[i].evtchndev == NULL) {
454 goto out;
456 qemu_set_cloexec(qemu_xen_evtchn_fd(xen_9pdev->rings[i].evtchndev));
457 xen_9pdev->rings[i].local_port = qemu_xen_evtchn_bind_interdomain
458 (xen_9pdev->rings[i].evtchndev,
459 xendev->dom,
460 xen_9pdev->rings[i].evtchn);
461 if (xen_9pdev->rings[i].local_port == -1) {
462 xen_pv_printf(xendev, 0,
463 "xenevtchn_bind_interdomain failed port=%d\n",
464 xen_9pdev->rings[i].evtchn);
465 goto out;
467 xen_pv_printf(xendev, 2, "bind evtchn port %d\n", xendev->local_port);
468 qemu_set_fd_handler(qemu_xen_evtchn_fd(xen_9pdev->rings[i].evtchndev),
469 xen_9pfs_evtchn_event, NULL, &xen_9pdev->rings[i]);
472 xen_9pdev->security_model = xenstore_read_be_str(xendev, "security_model");
473 xen_9pdev->path = xenstore_read_be_str(xendev, "path");
474 xen_9pdev->id = s->fsconf.fsdev_id =
475 g_strdup_printf("xen9p%d", xendev->dev);
476 xen_9pdev->tag = s->fsconf.tag = xenstore_read_fe_str(xendev, "tag");
477 fsdev = qemu_opts_create(qemu_find_opts("fsdev"),
478 s->fsconf.tag,
479 1, NULL);
480 qemu_opt_set(fsdev, "fsdriver", "local", NULL);
481 qemu_opt_set(fsdev, "path", xen_9pdev->path, NULL);
482 qemu_opt_set(fsdev, "security_model", xen_9pdev->security_model, NULL);
483 qemu_opts_set_id(fsdev, s->fsconf.fsdev_id);
484 qemu_fsdev_add(fsdev, &err);
485 if (err) {
486 error_report_err(err);
488 v9fs_device_realize_common(s, &xen_9p_transport, NULL);
490 return 0;
492 out:
493 xen_9pfs_free(xendev);
494 return -1;
497 static void xen_9pfs_alloc(struct XenLegacyDevice *xendev)
499 xenstore_write_be_str(xendev, "versions", VERSIONS);
500 xenstore_write_be_int(xendev, "max-rings", MAX_RINGS);
501 xenstore_write_be_int(xendev, "max-ring-page-order", MAX_RING_ORDER);
504 struct XenDevOps xen_9pfs_ops = {
505 .size = sizeof(Xen9pfsDev),
506 .flags = DEVOPS_FLAG_NEED_GNTDEV,
507 .alloc = xen_9pfs_alloc,
508 .init = xen_9pfs_init,
509 .initialise = xen_9pfs_connect,
510 .disconnect = xen_9pfs_disconnect,
511 .free = xen_9pfs_free,