1 // SPDX-License-Identifier: GPL-2.0-only
3 * linux/fs/9p/trans_xen
7 * Copyright (C) 2017 by Stefano Stabellini <stefano@aporeto.com>
10 #include <xen/events.h>
11 #include <xen/grant_table.h>
13 #include <xen/xenbus.h>
14 #include <xen/interface/io/9pfs.h>
16 #include <linux/module.h>
17 #include <linux/spinlock.h>
18 #include <net/9p/9p.h>
19 #include <net/9p/client.h>
20 #include <net/9p/transport.h>
22 #define XEN_9PFS_NUM_RINGS 2
23 #define XEN_9PFS_RING_ORDER 9
24 #define XEN_9PFS_RING_SIZE(ring) XEN_FLEX_RING_SIZE(ring->intf->ring_order)
26 struct xen_9pfs_header
{
31 /* uint8_t sdata[]; */
32 } __attribute__((packed
));
34 /* One per ring, more than one per 9pfs share */
35 struct xen_9pfs_dataring
{
36 struct xen_9pfs_front_priv
*priv
;
38 struct xen_9pfs_data_intf
*intf
;
42 /* protect a ring from concurrent accesses */
45 struct xen_9pfs_data data
;
47 struct work_struct work
;
50 /* One per 9pfs share */
51 struct xen_9pfs_front_priv
{
52 struct list_head list
;
53 struct xenbus_device
*dev
;
55 struct p9_client
*client
;
57 struct xen_9pfs_dataring
*rings
;
60 static LIST_HEAD(xen_9pfs_devs
);
61 static DEFINE_RWLOCK(xen_9pfs_lock
);
63 /* We don't currently allow canceling of requests */
64 static int p9_xen_cancel(struct p9_client
*client
, struct p9_req_t
*req
)
69 static int p9_xen_create(struct p9_client
*client
, const char *addr
, char *args
)
71 struct xen_9pfs_front_priv
*priv
;
76 read_lock(&xen_9pfs_lock
);
77 list_for_each_entry(priv
, &xen_9pfs_devs
, list
) {
78 if (!strcmp(priv
->tag
, addr
)) {
79 priv
->client
= client
;
80 read_unlock(&xen_9pfs_lock
);
84 read_unlock(&xen_9pfs_lock
);
88 static void p9_xen_close(struct p9_client
*client
)
90 struct xen_9pfs_front_priv
*priv
;
92 read_lock(&xen_9pfs_lock
);
93 list_for_each_entry(priv
, &xen_9pfs_devs
, list
) {
94 if (priv
->client
== client
) {
96 read_unlock(&xen_9pfs_lock
);
100 read_unlock(&xen_9pfs_lock
);
103 static bool p9_xen_write_todo(struct xen_9pfs_dataring
*ring
, RING_IDX size
)
107 cons
= ring
->intf
->out_cons
;
108 prod
= ring
->intf
->out_prod
;
111 return XEN_9PFS_RING_SIZE(ring
) -
112 xen_9pfs_queued(prod
, cons
, XEN_9PFS_RING_SIZE(ring
)) >= size
;
115 static int p9_xen_request(struct p9_client
*client
, struct p9_req_t
*p9_req
)
117 struct xen_9pfs_front_priv
*priv
;
118 RING_IDX cons
, prod
, masked_cons
, masked_prod
;
120 u32 size
= p9_req
->tc
.size
;
121 struct xen_9pfs_dataring
*ring
;
124 read_lock(&xen_9pfs_lock
);
125 list_for_each_entry(priv
, &xen_9pfs_devs
, list
) {
126 if (priv
->client
== client
)
129 read_unlock(&xen_9pfs_lock
);
130 if (list_entry_is_head(priv
, &xen_9pfs_devs
, list
))
133 num
= p9_req
->tc
.tag
% XEN_9PFS_NUM_RINGS
;
134 ring
= &priv
->rings
[num
];
137 while (wait_event_killable(ring
->wq
,
138 p9_xen_write_todo(ring
, size
)) != 0)
141 spin_lock_irqsave(&ring
->lock
, flags
);
142 cons
= ring
->intf
->out_cons
;
143 prod
= ring
->intf
->out_prod
;
146 if (XEN_9PFS_RING_SIZE(ring
) -
147 xen_9pfs_queued(prod
, cons
, XEN_9PFS_RING_SIZE(ring
)) < size
) {
148 spin_unlock_irqrestore(&ring
->lock
, flags
);
152 masked_prod
= xen_9pfs_mask(prod
, XEN_9PFS_RING_SIZE(ring
));
153 masked_cons
= xen_9pfs_mask(cons
, XEN_9PFS_RING_SIZE(ring
));
155 xen_9pfs_write_packet(ring
->data
.out
, p9_req
->tc
.sdata
, size
,
156 &masked_prod
, masked_cons
,
157 XEN_9PFS_RING_SIZE(ring
));
159 WRITE_ONCE(p9_req
->status
, REQ_STATUS_SENT
);
160 virt_wmb(); /* write ring before updating pointer */
162 ring
->intf
->out_prod
= prod
;
163 spin_unlock_irqrestore(&ring
->lock
, flags
);
164 notify_remote_via_irq(ring
->irq
);
165 p9_req_put(client
, p9_req
);
170 static void p9_xen_response(struct work_struct
*work
)
172 struct xen_9pfs_front_priv
*priv
;
173 struct xen_9pfs_dataring
*ring
;
174 RING_IDX cons
, prod
, masked_cons
, masked_prod
;
175 struct xen_9pfs_header h
;
176 struct p9_req_t
*req
;
179 ring
= container_of(work
, struct xen_9pfs_dataring
, work
);
183 cons
= ring
->intf
->in_cons
;
184 prod
= ring
->intf
->in_prod
;
187 if (xen_9pfs_queued(prod
, cons
, XEN_9PFS_RING_SIZE(ring
)) <
189 notify_remote_via_irq(ring
->irq
);
193 masked_prod
= xen_9pfs_mask(prod
, XEN_9PFS_RING_SIZE(ring
));
194 masked_cons
= xen_9pfs_mask(cons
, XEN_9PFS_RING_SIZE(ring
));
196 /* First, read just the header */
197 xen_9pfs_read_packet(&h
, ring
->data
.in
, sizeof(h
),
198 masked_prod
, &masked_cons
,
199 XEN_9PFS_RING_SIZE(ring
));
201 req
= p9_tag_lookup(priv
->client
, h
.tag
);
202 if (!req
|| req
->status
!= REQ_STATUS_SENT
) {
203 dev_warn(&priv
->dev
->dev
, "Wrong req tag=%x\n", h
.tag
);
206 ring
->intf
->in_cons
= cons
;
210 if (h
.size
> req
->rc
.capacity
) {
211 dev_warn(&priv
->dev
->dev
,
212 "requested packet size too big: %d for tag %d with capacity %zd\n",
213 h
.size
, h
.tag
, req
->rc
.capacity
);
214 WRITE_ONCE(req
->status
, REQ_STATUS_ERROR
);
218 req
->rc
.size
= h
.size
;
223 masked_cons
= xen_9pfs_mask(cons
, XEN_9PFS_RING_SIZE(ring
));
224 /* Then, read the whole packet (including the header) */
225 xen_9pfs_read_packet(req
->rc
.sdata
, ring
->data
.in
, h
.size
,
226 masked_prod
, &masked_cons
,
227 XEN_9PFS_RING_SIZE(ring
));
232 ring
->intf
->in_cons
= cons
;
234 status
= (req
->status
!= REQ_STATUS_ERROR
) ?
235 REQ_STATUS_RCVD
: REQ_STATUS_ERROR
;
237 p9_client_cb(priv
->client
, req
, status
);
241 static irqreturn_t
xen_9pfs_front_event_handler(int irq
, void *r
)
243 struct xen_9pfs_dataring
*ring
= r
;
245 if (!ring
|| !ring
->priv
->client
) {
246 /* ignore spurious interrupt */
250 wake_up_interruptible(&ring
->wq
);
251 schedule_work(&ring
->work
);
256 static struct p9_trans_module p9_xen_trans
= {
258 .maxsize
= 1 << (XEN_9PFS_RING_ORDER
+ XEN_PAGE_SHIFT
- 2),
259 .pooled_rbuffers
= false,
261 .create
= p9_xen_create
,
262 .close
= p9_xen_close
,
263 .request
= p9_xen_request
,
264 .cancel
= p9_xen_cancel
,
265 .owner
= THIS_MODULE
,
268 static const struct xenbus_device_id xen_9pfs_front_ids
[] = {
273 static void xen_9pfs_front_free(struct xen_9pfs_front_priv
*priv
)
277 write_lock(&xen_9pfs_lock
);
278 list_del(&priv
->list
);
279 write_unlock(&xen_9pfs_lock
);
281 for (i
= 0; i
< XEN_9PFS_NUM_RINGS
; i
++) {
282 struct xen_9pfs_dataring
*ring
= &priv
->rings
[i
];
284 cancel_work_sync(&ring
->work
);
286 if (!priv
->rings
[i
].intf
)
288 if (priv
->rings
[i
].irq
> 0)
289 unbind_from_irqhandler(priv
->rings
[i
].irq
, ring
);
290 if (priv
->rings
[i
].data
.in
) {
292 j
< (1 << priv
->rings
[i
].intf
->ring_order
);
296 ref
= priv
->rings
[i
].intf
->ref
[j
];
297 gnttab_end_foreign_access(ref
, NULL
);
299 free_pages_exact(priv
->rings
[i
].data
.in
,
300 1UL << (priv
->rings
[i
].intf
->ring_order
+
303 gnttab_end_foreign_access(priv
->rings
[i
].ref
, NULL
);
304 free_page((unsigned long)priv
->rings
[i
].intf
);
311 static void xen_9pfs_front_remove(struct xenbus_device
*dev
)
313 struct xen_9pfs_front_priv
*priv
= dev_get_drvdata(&dev
->dev
);
315 dev_set_drvdata(&dev
->dev
, NULL
);
316 xen_9pfs_front_free(priv
);
319 static int xen_9pfs_front_alloc_dataring(struct xenbus_device
*dev
,
320 struct xen_9pfs_dataring
*ring
,
327 init_waitqueue_head(&ring
->wq
);
328 spin_lock_init(&ring
->lock
);
329 INIT_WORK(&ring
->work
, p9_xen_response
);
331 ring
->intf
= (struct xen_9pfs_data_intf
*)get_zeroed_page(GFP_KERNEL
);
334 ret
= gnttab_grant_foreign_access(dev
->otherend_id
,
335 virt_to_gfn(ring
->intf
), 0);
339 bytes
= alloc_pages_exact(1UL << (order
+ XEN_PAGE_SHIFT
),
340 GFP_KERNEL
| __GFP_ZERO
);
345 for (; i
< (1 << order
); i
++) {
346 ret
= gnttab_grant_foreign_access(
347 dev
->otherend_id
, virt_to_gfn(bytes
) + i
, 0);
350 ring
->intf
->ref
[i
] = ret
;
352 ring
->intf
->ring_order
= order
;
353 ring
->data
.in
= bytes
;
354 ring
->data
.out
= bytes
+ XEN_FLEX_RING_SIZE(order
);
356 ret
= xenbus_alloc_evtchn(dev
, &ring
->evtchn
);
359 ring
->irq
= bind_evtchn_to_irqhandler(ring
->evtchn
,
360 xen_9pfs_front_event_handler
,
361 0, "xen_9pfs-frontend", ring
);
365 xenbus_free_evtchn(dev
, ring
->evtchn
);
369 for (i
--; i
>= 0; i
--)
370 gnttab_end_foreign_access(ring
->intf
->ref
[i
], NULL
);
371 free_pages_exact(bytes
, 1UL << (order
+ XEN_PAGE_SHIFT
));
373 gnttab_end_foreign_access(ring
->ref
, NULL
);
374 free_page((unsigned long)ring
->intf
);
378 static int xen_9pfs_front_init(struct xenbus_device
*dev
)
381 struct xenbus_transaction xbt
;
382 struct xen_9pfs_front_priv
*priv
= dev_get_drvdata(&dev
->dev
);
384 unsigned int max_rings
, max_ring_order
, len
= 0;
386 versions
= xenbus_read(XBT_NIL
, dev
->otherend
, "versions", &len
);
387 if (IS_ERR(versions
))
388 return PTR_ERR(versions
);
389 for (v
= versions
; *v
; v
++) {
390 if (simple_strtoul(v
, &v
, 10) == 1) {
400 max_rings
= xenbus_read_unsigned(dev
->otherend
, "max-rings", 0);
401 if (max_rings
< XEN_9PFS_NUM_RINGS
)
403 max_ring_order
= xenbus_read_unsigned(dev
->otherend
,
404 "max-ring-page-order", 0);
405 if (max_ring_order
> XEN_9PFS_RING_ORDER
)
406 max_ring_order
= XEN_9PFS_RING_ORDER
;
407 if (p9_xen_trans
.maxsize
> XEN_FLEX_RING_SIZE(max_ring_order
))
408 p9_xen_trans
.maxsize
= XEN_FLEX_RING_SIZE(max_ring_order
) / 2;
410 priv
->rings
= kcalloc(XEN_9PFS_NUM_RINGS
, sizeof(*priv
->rings
),
417 for (i
= 0; i
< XEN_9PFS_NUM_RINGS
; i
++) {
418 priv
->rings
[i
].priv
= priv
;
419 ret
= xen_9pfs_front_alloc_dataring(dev
, &priv
->rings
[i
],
426 ret
= xenbus_transaction_start(&xbt
);
428 xenbus_dev_fatal(dev
, ret
, "starting transaction");
431 ret
= xenbus_printf(xbt
, dev
->nodename
, "version", "%u", 1);
434 ret
= xenbus_printf(xbt
, dev
->nodename
, "num-rings", "%u",
439 for (i
= 0; i
< XEN_9PFS_NUM_RINGS
; i
++) {
442 BUILD_BUG_ON(XEN_9PFS_NUM_RINGS
> 9);
443 sprintf(str
, "ring-ref%d", i
);
444 ret
= xenbus_printf(xbt
, dev
->nodename
, str
, "%d",
449 sprintf(str
, "event-channel-%d", i
);
450 ret
= xenbus_printf(xbt
, dev
->nodename
, str
, "%u",
451 priv
->rings
[i
].evtchn
);
455 priv
->tag
= xenbus_read(xbt
, dev
->nodename
, "tag", NULL
);
456 if (IS_ERR(priv
->tag
)) {
457 ret
= PTR_ERR(priv
->tag
);
460 ret
= xenbus_transaction_end(xbt
, 0);
464 xenbus_dev_fatal(dev
, ret
, "completing transaction");
468 xenbus_switch_state(dev
, XenbusStateInitialised
);
472 xenbus_transaction_end(xbt
, 1);
473 xenbus_dev_fatal(dev
, ret
, "writing xenstore");
475 xen_9pfs_front_free(priv
);
479 static int xen_9pfs_front_probe(struct xenbus_device
*dev
,
480 const struct xenbus_device_id
*id
)
482 struct xen_9pfs_front_priv
*priv
= NULL
;
484 priv
= kzalloc(sizeof(*priv
), GFP_KERNEL
);
489 dev_set_drvdata(&dev
->dev
, priv
);
491 write_lock(&xen_9pfs_lock
);
492 list_add_tail(&priv
->list
, &xen_9pfs_devs
);
493 write_unlock(&xen_9pfs_lock
);
498 static int xen_9pfs_front_resume(struct xenbus_device
*dev
)
500 dev_warn(&dev
->dev
, "suspend/resume unsupported\n");
504 static void xen_9pfs_front_changed(struct xenbus_device
*dev
,
505 enum xenbus_state backend_state
)
507 switch (backend_state
) {
508 case XenbusStateReconfiguring
:
509 case XenbusStateReconfigured
:
510 case XenbusStateInitialising
:
511 case XenbusStateInitialised
:
512 case XenbusStateUnknown
:
515 case XenbusStateInitWait
:
516 if (dev
->state
!= XenbusStateInitialising
)
519 xen_9pfs_front_init(dev
);
522 case XenbusStateConnected
:
523 xenbus_switch_state(dev
, XenbusStateConnected
);
526 case XenbusStateClosed
:
527 if (dev
->state
== XenbusStateClosed
)
529 fallthrough
; /* Missed the backend's CLOSING state */
530 case XenbusStateClosing
:
531 xenbus_frontend_closed(dev
);
536 static struct xenbus_driver xen_9pfs_front_driver
= {
537 .ids
= xen_9pfs_front_ids
,
538 .probe
= xen_9pfs_front_probe
,
539 .remove
= xen_9pfs_front_remove
,
540 .resume
= xen_9pfs_front_resume
,
541 .otherend_changed
= xen_9pfs_front_changed
,
544 static int __init
p9_trans_xen_init(void)
551 pr_info("Initialising Xen transport for 9pfs\n");
553 v9fs_register_trans(&p9_xen_trans
);
554 rc
= xenbus_register_frontend(&xen_9pfs_front_driver
);
556 v9fs_unregister_trans(&p9_xen_trans
);
560 module_init(p9_trans_xen_init
);
561 MODULE_ALIAS_9P("xen");
563 static void __exit
p9_trans_xen_exit(void)
565 v9fs_unregister_trans(&p9_xen_trans
);
566 return xenbus_unregister_driver(&xen_9pfs_front_driver
);
568 module_exit(p9_trans_xen_exit
);
570 MODULE_ALIAS("xen:9pfs");
571 MODULE_AUTHOR("Stefano Stabellini <stefano@aporeto.com>");
572 MODULE_DESCRIPTION("Xen Transport for 9P");
573 MODULE_LICENSE("GPL");