2 * linux/fs/9p/trans_xen
6 * Copyright (C) 2017 by Stefano Stabellini <stefano@aporeto.com>
8 * This program is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU General Public License version 2
10 * as published by the Free Software Foundation; or, when distributed
11 * separately from the Linux kernel or incorporated into other
12 * software packages, subject to the following license:
14 * Permission is hereby granted, free of charge, to any person obtaining a copy
15 * of this source file (the "Software"), to deal in the Software without
16 * restriction, including without limitation the rights to use, copy, modify,
17 * merge, publish, distribute, sublicense, and/or sell copies of the Software,
18 * and to permit persons to whom the Software is furnished to do so, subject to
19 * the following conditions:
21 * The above copyright notice and this permission notice shall be included in
22 * all copies or substantial portions of the Software.
24 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
25 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
26 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
27 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
28 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
29 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
33 #include <xen/events.h>
34 #include <xen/grant_table.h>
36 #include <xen/xenbus.h>
37 #include <xen/interface/io/9pfs.h>
39 #include <linux/module.h>
40 #include <linux/spinlock.h>
41 #include <net/9p/9p.h>
42 #include <net/9p/client.h>
43 #include <net/9p/transport.h>
45 #define XEN_9PFS_NUM_RINGS 2
46 #define XEN_9PFS_RING_ORDER 9
47 #define XEN_9PFS_RING_SIZE(ring) XEN_FLEX_RING_SIZE(ring->intf->ring_order)
49 struct xen_9pfs_header
{
54 /* uint8_t sdata[]; */
55 } __attribute__((packed
));
57 /* One per ring, more than one per 9pfs share */
58 struct xen_9pfs_dataring
{
59 struct xen_9pfs_front_priv
*priv
;
61 struct xen_9pfs_data_intf
*intf
;
65 /* protect a ring from concurrent accesses */
68 struct xen_9pfs_data data
;
70 struct work_struct work
;
73 /* One per 9pfs share */
74 struct xen_9pfs_front_priv
{
75 struct list_head list
;
76 struct xenbus_device
*dev
;
78 struct p9_client
*client
;
81 struct xen_9pfs_dataring
*rings
;
84 static LIST_HEAD(xen_9pfs_devs
);
85 static DEFINE_RWLOCK(xen_9pfs_lock
);
87 /* We don't currently allow canceling of requests */
88 static int p9_xen_cancel(struct p9_client
*client
, struct p9_req_t
*req
)
93 static int p9_xen_create(struct p9_client
*client
, const char *addr
, char *args
)
95 struct xen_9pfs_front_priv
*priv
;
100 read_lock(&xen_9pfs_lock
);
101 list_for_each_entry(priv
, &xen_9pfs_devs
, list
) {
102 if (!strcmp(priv
->tag
, addr
)) {
103 priv
->client
= client
;
104 read_unlock(&xen_9pfs_lock
);
108 read_unlock(&xen_9pfs_lock
);
112 static void p9_xen_close(struct p9_client
*client
)
114 struct xen_9pfs_front_priv
*priv
;
116 read_lock(&xen_9pfs_lock
);
117 list_for_each_entry(priv
, &xen_9pfs_devs
, list
) {
118 if (priv
->client
== client
) {
120 read_unlock(&xen_9pfs_lock
);
124 read_unlock(&xen_9pfs_lock
);
127 static bool p9_xen_write_todo(struct xen_9pfs_dataring
*ring
, RING_IDX size
)
131 cons
= ring
->intf
->out_cons
;
132 prod
= ring
->intf
->out_prod
;
135 return XEN_9PFS_RING_SIZE(ring
) -
136 xen_9pfs_queued(prod
, cons
, XEN_9PFS_RING_SIZE(ring
)) >= size
;
139 static int p9_xen_request(struct p9_client
*client
, struct p9_req_t
*p9_req
)
141 struct xen_9pfs_front_priv
*priv
= NULL
;
142 RING_IDX cons
, prod
, masked_cons
, masked_prod
;
144 u32 size
= p9_req
->tc
.size
;
145 struct xen_9pfs_dataring
*ring
;
148 read_lock(&xen_9pfs_lock
);
149 list_for_each_entry(priv
, &xen_9pfs_devs
, list
) {
150 if (priv
->client
== client
)
153 read_unlock(&xen_9pfs_lock
);
154 if (!priv
|| priv
->client
!= client
)
157 num
= p9_req
->tc
.tag
% priv
->num_rings
;
158 ring
= &priv
->rings
[num
];
161 while (wait_event_killable(ring
->wq
,
162 p9_xen_write_todo(ring
, size
)) != 0)
165 spin_lock_irqsave(&ring
->lock
, flags
);
166 cons
= ring
->intf
->out_cons
;
167 prod
= ring
->intf
->out_prod
;
170 if (XEN_9PFS_RING_SIZE(ring
) -
171 xen_9pfs_queued(prod
, cons
, XEN_9PFS_RING_SIZE(ring
)) < size
) {
172 spin_unlock_irqrestore(&ring
->lock
, flags
);
176 masked_prod
= xen_9pfs_mask(prod
, XEN_9PFS_RING_SIZE(ring
));
177 masked_cons
= xen_9pfs_mask(cons
, XEN_9PFS_RING_SIZE(ring
));
179 xen_9pfs_write_packet(ring
->data
.out
, p9_req
->tc
.sdata
, size
,
180 &masked_prod
, masked_cons
,
181 XEN_9PFS_RING_SIZE(ring
));
183 p9_req
->status
= REQ_STATUS_SENT
;
184 virt_wmb(); /* write ring before updating pointer */
186 ring
->intf
->out_prod
= prod
;
187 spin_unlock_irqrestore(&ring
->lock
, flags
);
188 notify_remote_via_irq(ring
->irq
);
194 static void p9_xen_response(struct work_struct
*work
)
196 struct xen_9pfs_front_priv
*priv
;
197 struct xen_9pfs_dataring
*ring
;
198 RING_IDX cons
, prod
, masked_cons
, masked_prod
;
199 struct xen_9pfs_header h
;
200 struct p9_req_t
*req
;
203 ring
= container_of(work
, struct xen_9pfs_dataring
, work
);
207 cons
= ring
->intf
->in_cons
;
208 prod
= ring
->intf
->in_prod
;
211 if (xen_9pfs_queued(prod
, cons
, XEN_9PFS_RING_SIZE(ring
)) <
213 notify_remote_via_irq(ring
->irq
);
217 masked_prod
= xen_9pfs_mask(prod
, XEN_9PFS_RING_SIZE(ring
));
218 masked_cons
= xen_9pfs_mask(cons
, XEN_9PFS_RING_SIZE(ring
));
220 /* First, read just the header */
221 xen_9pfs_read_packet(&h
, ring
->data
.in
, sizeof(h
),
222 masked_prod
, &masked_cons
,
223 XEN_9PFS_RING_SIZE(ring
));
225 req
= p9_tag_lookup(priv
->client
, h
.tag
);
226 if (!req
|| req
->status
!= REQ_STATUS_SENT
) {
227 dev_warn(&priv
->dev
->dev
, "Wrong req tag=%x\n", h
.tag
);
230 ring
->intf
->in_cons
= cons
;
234 memcpy(&req
->rc
, &h
, sizeof(h
));
237 masked_cons
= xen_9pfs_mask(cons
, XEN_9PFS_RING_SIZE(ring
));
238 /* Then, read the whole packet (including the header) */
239 xen_9pfs_read_packet(req
->rc
.sdata
, ring
->data
.in
, h
.size
,
240 masked_prod
, &masked_cons
,
241 XEN_9PFS_RING_SIZE(ring
));
245 ring
->intf
->in_cons
= cons
;
247 status
= (req
->status
!= REQ_STATUS_ERROR
) ?
248 REQ_STATUS_RCVD
: REQ_STATUS_ERROR
;
250 p9_client_cb(priv
->client
, req
, status
);
254 static irqreturn_t
xen_9pfs_front_event_handler(int irq
, void *r
)
256 struct xen_9pfs_dataring
*ring
= r
;
258 if (!ring
|| !ring
->priv
->client
) {
259 /* ignore spurious interrupt */
263 wake_up_interruptible(&ring
->wq
);
264 schedule_work(&ring
->work
);
269 static struct p9_trans_module p9_xen_trans
= {
271 .maxsize
= 1 << (XEN_9PFS_RING_ORDER
+ XEN_PAGE_SHIFT
- 2),
273 .create
= p9_xen_create
,
274 .close
= p9_xen_close
,
275 .request
= p9_xen_request
,
276 .cancel
= p9_xen_cancel
,
277 .owner
= THIS_MODULE
,
280 static const struct xenbus_device_id xen_9pfs_front_ids
[] = {
285 static void xen_9pfs_front_free(struct xen_9pfs_front_priv
*priv
)
289 write_lock(&xen_9pfs_lock
);
290 list_del(&priv
->list
);
291 write_unlock(&xen_9pfs_lock
);
293 for (i
= 0; i
< priv
->num_rings
; i
++) {
294 if (!priv
->rings
[i
].intf
)
296 if (priv
->rings
[i
].irq
> 0)
297 unbind_from_irqhandler(priv
->rings
[i
].irq
, priv
->dev
);
298 if (priv
->rings
[i
].data
.in
) {
300 j
< (1 << priv
->rings
[i
].intf
->ring_order
);
304 ref
= priv
->rings
[i
].intf
->ref
[j
];
305 gnttab_end_foreign_access(ref
, 0, 0);
307 free_pages((unsigned long)priv
->rings
[i
].data
.in
,
308 priv
->rings
[i
].intf
->ring_order
-
309 (PAGE_SHIFT
- XEN_PAGE_SHIFT
));
311 gnttab_end_foreign_access(priv
->rings
[i
].ref
, 0, 0);
312 free_page((unsigned long)priv
->rings
[i
].intf
);
319 static int xen_9pfs_front_remove(struct xenbus_device
*dev
)
321 struct xen_9pfs_front_priv
*priv
= dev_get_drvdata(&dev
->dev
);
323 dev_set_drvdata(&dev
->dev
, NULL
);
324 xen_9pfs_front_free(priv
);
328 static int xen_9pfs_front_alloc_dataring(struct xenbus_device
*dev
,
329 struct xen_9pfs_dataring
*ring
,
336 init_waitqueue_head(&ring
->wq
);
337 spin_lock_init(&ring
->lock
);
338 INIT_WORK(&ring
->work
, p9_xen_response
);
340 ring
->intf
= (struct xen_9pfs_data_intf
*)get_zeroed_page(GFP_KERNEL
);
343 ret
= gnttab_grant_foreign_access(dev
->otherend_id
,
344 virt_to_gfn(ring
->intf
), 0);
348 bytes
= (void *)__get_free_pages(GFP_KERNEL
| __GFP_ZERO
,
349 order
- (PAGE_SHIFT
- XEN_PAGE_SHIFT
));
354 for (; i
< (1 << order
); i
++) {
355 ret
= gnttab_grant_foreign_access(
356 dev
->otherend_id
, virt_to_gfn(bytes
) + i
, 0);
359 ring
->intf
->ref
[i
] = ret
;
361 ring
->intf
->ring_order
= order
;
362 ring
->data
.in
= bytes
;
363 ring
->data
.out
= bytes
+ XEN_FLEX_RING_SIZE(order
);
365 ret
= xenbus_alloc_evtchn(dev
, &ring
->evtchn
);
368 ring
->irq
= bind_evtchn_to_irqhandler(ring
->evtchn
,
369 xen_9pfs_front_event_handler
,
370 0, "xen_9pfs-frontend", ring
);
374 xenbus_free_evtchn(dev
, ring
->evtchn
);
378 for (i
--; i
>= 0; i
--)
379 gnttab_end_foreign_access(ring
->intf
->ref
[i
], 0, 0);
380 free_pages((unsigned long)bytes
,
381 ring
->intf
->ring_order
-
382 (PAGE_SHIFT
- XEN_PAGE_SHIFT
));
384 gnttab_end_foreign_access(ring
->ref
, 0, 0);
385 free_page((unsigned long)ring
->intf
);
389 static int xen_9pfs_front_probe(struct xenbus_device
*dev
,
390 const struct xenbus_device_id
*id
)
393 struct xenbus_transaction xbt
;
394 struct xen_9pfs_front_priv
*priv
= NULL
;
396 unsigned int max_rings
, max_ring_order
, len
= 0;
398 versions
= xenbus_read(XBT_NIL
, dev
->otherend
, "versions", &len
);
399 if (IS_ERR(versions
))
400 return PTR_ERR(versions
);
401 if (strcmp(versions
, "1")) {
406 max_rings
= xenbus_read_unsigned(dev
->otherend
, "max-rings", 0);
407 if (max_rings
< XEN_9PFS_NUM_RINGS
)
409 max_ring_order
= xenbus_read_unsigned(dev
->otherend
,
410 "max-ring-page-order", 0);
411 if (max_ring_order
> XEN_9PFS_RING_ORDER
)
412 max_ring_order
= XEN_9PFS_RING_ORDER
;
413 if (p9_xen_trans
.maxsize
> XEN_FLEX_RING_SIZE(max_ring_order
))
414 p9_xen_trans
.maxsize
= XEN_FLEX_RING_SIZE(max_ring_order
) / 2;
416 priv
= kzalloc(sizeof(*priv
), GFP_KERNEL
);
421 priv
->num_rings
= XEN_9PFS_NUM_RINGS
;
422 priv
->rings
= kcalloc(priv
->num_rings
, sizeof(*priv
->rings
),
429 for (i
= 0; i
< priv
->num_rings
; i
++) {
430 priv
->rings
[i
].priv
= priv
;
431 ret
= xen_9pfs_front_alloc_dataring(dev
, &priv
->rings
[i
],
438 ret
= xenbus_transaction_start(&xbt
);
440 xenbus_dev_fatal(dev
, ret
, "starting transaction");
443 ret
= xenbus_printf(xbt
, dev
->nodename
, "version", "%u", 1);
446 ret
= xenbus_printf(xbt
, dev
->nodename
, "num-rings", "%u",
450 for (i
= 0; i
< priv
->num_rings
; i
++) {
453 BUILD_BUG_ON(XEN_9PFS_NUM_RINGS
> 9);
454 sprintf(str
, "ring-ref%d", i
);
455 ret
= xenbus_printf(xbt
, dev
->nodename
, str
, "%d",
460 sprintf(str
, "event-channel-%d", i
);
461 ret
= xenbus_printf(xbt
, dev
->nodename
, str
, "%u",
462 priv
->rings
[i
].evtchn
);
466 priv
->tag
= xenbus_read(xbt
, dev
->nodename
, "tag", NULL
);
467 if (IS_ERR(priv
->tag
)) {
468 ret
= PTR_ERR(priv
->tag
);
471 ret
= xenbus_transaction_end(xbt
, 0);
475 xenbus_dev_fatal(dev
, ret
, "completing transaction");
479 write_lock(&xen_9pfs_lock
);
480 list_add_tail(&priv
->list
, &xen_9pfs_devs
);
481 write_unlock(&xen_9pfs_lock
);
482 dev_set_drvdata(&dev
->dev
, priv
);
483 xenbus_switch_state(dev
, XenbusStateInitialised
);
488 xenbus_transaction_end(xbt
, 1);
489 xenbus_dev_fatal(dev
, ret
, "writing xenstore");
491 dev_set_drvdata(&dev
->dev
, NULL
);
492 xen_9pfs_front_free(priv
);
496 static int xen_9pfs_front_resume(struct xenbus_device
*dev
)
498 dev_warn(&dev
->dev
, "suspend/resume unsupported\n");
502 static void xen_9pfs_front_changed(struct xenbus_device
*dev
,
503 enum xenbus_state backend_state
)
505 switch (backend_state
) {
506 case XenbusStateReconfiguring
:
507 case XenbusStateReconfigured
:
508 case XenbusStateInitialising
:
509 case XenbusStateInitialised
:
510 case XenbusStateUnknown
:
513 case XenbusStateInitWait
:
516 case XenbusStateConnected
:
517 xenbus_switch_state(dev
, XenbusStateConnected
);
520 case XenbusStateClosed
:
521 if (dev
->state
== XenbusStateClosed
)
523 fallthrough
; /* Missed the backend's CLOSING state */
524 case XenbusStateClosing
:
525 xenbus_frontend_closed(dev
);
530 static struct xenbus_driver xen_9pfs_front_driver
= {
531 .ids
= xen_9pfs_front_ids
,
532 .probe
= xen_9pfs_front_probe
,
533 .remove
= xen_9pfs_front_remove
,
534 .resume
= xen_9pfs_front_resume
,
535 .otherend_changed
= xen_9pfs_front_changed
,
538 static int p9_trans_xen_init(void)
545 pr_info("Initialising Xen transport for 9pfs\n");
547 v9fs_register_trans(&p9_xen_trans
);
548 rc
= xenbus_register_frontend(&xen_9pfs_front_driver
);
550 v9fs_unregister_trans(&p9_xen_trans
);
554 module_init(p9_trans_xen_init
);
556 static void p9_trans_xen_exit(void)
558 v9fs_unregister_trans(&p9_xen_trans
);
559 return xenbus_unregister_driver(&xen_9pfs_front_driver
);
561 module_exit(p9_trans_xen_exit
);
563 MODULE_AUTHOR("Stefano Stabellini <stefano@aporeto.com>");
564 MODULE_DESCRIPTION("Xen Transport for 9P");
565 MODULE_LICENSE("GPL");