1 // SPDX-License-Identifier: GPL-2.0-or-later
3 * Xenbus code for netif backend
5 * Copyright (C) 2005 Rusty Russell <rusty@rustcorp.com.au>
6 * Copyright (C) 2005 XenSource Ltd
10 #include <linux/vmalloc.h>
11 #include <linux/rtnetlink.h>
13 static int connect_data_rings(struct backend_info
*be
,
14 struct xenvif_queue
*queue
);
15 static void connect(struct backend_info
*be
);
16 static int read_xenbus_vif_flags(struct backend_info
*be
);
17 static int backend_create_xenvif(struct backend_info
*be
);
18 static void unregister_hotplug_status_watch(struct backend_info
*be
);
19 static void xen_unregister_watchers(struct xenvif
*vif
);
20 static void set_backend_state(struct backend_info
*be
,
21 enum xenbus_state state
);
23 #ifdef CONFIG_DEBUG_FS
24 struct dentry
*xen_netback_dbg_root
= NULL
;
26 static int xenvif_read_io_ring(struct seq_file
*m
, void *v
)
28 struct xenvif_queue
*queue
= m
->private;
29 struct xen_netif_tx_back_ring
*tx_ring
= &queue
->tx
;
30 struct xen_netif_rx_back_ring
*rx_ring
= &queue
->rx
;
31 struct netdev_queue
*dev_queue
;
34 struct xen_netif_tx_sring
*sring
= tx_ring
->sring
;
36 seq_printf(m
, "Queue %d\nTX: nr_ents %u\n", queue
->id
,
38 seq_printf(m
, "req prod %u (%d) cons %u (%d) event %u (%d)\n",
40 sring
->req_prod
- sring
->rsp_prod
,
42 tx_ring
->req_cons
- sring
->rsp_prod
,
44 sring
->req_event
- sring
->rsp_prod
);
45 seq_printf(m
, "rsp prod %u (base) pvt %u (%d) event %u (%d)\n",
47 tx_ring
->rsp_prod_pvt
,
48 tx_ring
->rsp_prod_pvt
- sring
->rsp_prod
,
50 sring
->rsp_event
- sring
->rsp_prod
);
51 seq_printf(m
, "pending prod %u pending cons %u nr_pending_reqs %u\n",
54 nr_pending_reqs(queue
));
55 seq_printf(m
, "dealloc prod %u dealloc cons %u dealloc_queue %u\n\n",
58 queue
->dealloc_prod
- queue
->dealloc_cons
);
62 struct xen_netif_rx_sring
*sring
= rx_ring
->sring
;
64 seq_printf(m
, "RX: nr_ents %u\n", rx_ring
->nr_ents
);
65 seq_printf(m
, "req prod %u (%d) cons %u (%d) event %u (%d)\n",
67 sring
->req_prod
- sring
->rsp_prod
,
69 rx_ring
->req_cons
- sring
->rsp_prod
,
71 sring
->req_event
- sring
->rsp_prod
);
72 seq_printf(m
, "rsp prod %u (base) pvt %u (%d) event %u (%d)\n\n",
74 rx_ring
->rsp_prod_pvt
,
75 rx_ring
->rsp_prod_pvt
- sring
->rsp_prod
,
77 sring
->rsp_event
- sring
->rsp_prod
);
80 seq_printf(m
, "NAPI state: %lx NAPI weight: %d TX queue len %u\n"
81 "Credit timer_pending: %d, credit: %lu, usec: %lu\n"
82 "remaining: %lu, expires: %lu, now: %lu\n",
83 queue
->napi
.state
, queue
->napi
.weight
,
84 skb_queue_len(&queue
->tx_queue
),
85 timer_pending(&queue
->credit_timeout
),
88 queue
->remaining_credit
,
89 queue
->credit_timeout
.expires
,
92 dev_queue
= netdev_get_tx_queue(queue
->vif
->dev
, queue
->id
);
94 seq_printf(m
, "\nRx internal queue: len %u max %u pkts %u %s\n",
95 queue
->rx_queue_len
, queue
->rx_queue_max
,
96 skb_queue_len(&queue
->rx_queue
),
97 netif_tx_queue_stopped(dev_queue
) ? "stopped" : "running");
102 #define XENVIF_KICK_STR "kick"
103 #define BUFFER_SIZE 32
106 xenvif_write_io_ring(struct file
*filp
, const char __user
*buf
, size_t count
,
109 struct xenvif_queue
*queue
=
110 ((struct seq_file
*)filp
->private_data
)->private;
112 char write
[BUFFER_SIZE
];
114 /* don't allow partial writes and check the length */
117 if (count
>= sizeof(write
))
120 len
= simple_write_to_buffer(write
,
130 if (!strncmp(write
, XENVIF_KICK_STR
, sizeof(XENVIF_KICK_STR
) - 1))
131 xenvif_interrupt(0, (void *)queue
);
133 pr_warn("Unknown command to io_ring_q%d. Available: kick\n",
140 static int xenvif_io_ring_open(struct inode
*inode
, struct file
*filp
)
145 if (inode
->i_private
)
146 queue
= inode
->i_private
;
147 ret
= single_open(filp
, xenvif_read_io_ring
, queue
);
148 filp
->f_mode
|= FMODE_PWRITE
;
152 static const struct file_operations xenvif_dbg_io_ring_ops_fops
= {
153 .owner
= THIS_MODULE
,
154 .open
= xenvif_io_ring_open
,
157 .release
= single_release
,
158 .write
= xenvif_write_io_ring
,
161 static int xenvif_ctrl_show(struct seq_file
*m
, void *v
)
163 struct xenvif
*vif
= m
->private;
165 xenvif_dump_hash_info(vif
, m
);
169 DEFINE_SHOW_ATTRIBUTE(xenvif_ctrl
);
171 static void xenvif_debugfs_addif(struct xenvif
*vif
)
175 vif
->xenvif_dbg_root
= debugfs_create_dir(vif
->dev
->name
,
176 xen_netback_dbg_root
);
177 for (i
= 0; i
< vif
->num_queues
; ++i
) {
178 char filename
[sizeof("io_ring_q") + 4];
180 snprintf(filename
, sizeof(filename
), "io_ring_q%d", i
);
181 debugfs_create_file(filename
, 0600, vif
->xenvif_dbg_root
,
183 &xenvif_dbg_io_ring_ops_fops
);
187 debugfs_create_file("ctrl", 0400, vif
->xenvif_dbg_root
, vif
,
191 static void xenvif_debugfs_delif(struct xenvif
*vif
)
193 debugfs_remove_recursive(vif
->xenvif_dbg_root
);
194 vif
->xenvif_dbg_root
= NULL
;
196 #endif /* CONFIG_DEBUG_FS */
199 * Handle the creation of the hotplug script environment. We add the script
200 * and vif variables to the environment, for the benefit of the vif-* hotplug
203 static int netback_uevent(struct xenbus_device
*xdev
,
204 struct kobj_uevent_env
*env
)
206 struct backend_info
*be
= dev_get_drvdata(&xdev
->dev
);
211 if (add_uevent_var(env
, "script=%s", be
->hotplug_script
))
217 return add_uevent_var(env
, "vif=%s", be
->vif
->dev
->name
);
221 static int backend_create_xenvif(struct backend_info
*be
)
225 struct xenbus_device
*dev
= be
->dev
;
231 err
= xenbus_scanf(XBT_NIL
, dev
->nodename
, "handle", "%li", &handle
);
233 xenbus_dev_fatal(dev
, err
, "reading handle");
234 return (err
< 0) ? err
: -EINVAL
;
237 vif
= xenvif_alloc(&dev
->dev
, dev
->otherend_id
, handle
);
240 xenbus_dev_fatal(dev
, err
, "creating interface");
246 kobject_uevent(&dev
->dev
.kobj
, KOBJ_ONLINE
);
250 static void backend_disconnect(struct backend_info
*be
)
252 struct xenvif
*vif
= be
->vif
;
255 unsigned int num_queues
= vif
->num_queues
;
256 unsigned int queue_index
;
258 xen_unregister_watchers(vif
);
259 #ifdef CONFIG_DEBUG_FS
260 xenvif_debugfs_delif(vif
);
261 #endif /* CONFIG_DEBUG_FS */
262 xenvif_disconnect_data(vif
);
264 /* At this point some of the handlers may still be active
265 * so we need to have additional synchronization here.
270 for (queue_index
= 0; queue_index
< num_queues
; ++queue_index
)
271 xenvif_deinit_queue(&vif
->queues
[queue_index
]);
276 xenvif_disconnect_ctrl(vif
);
280 static void backend_connect(struct backend_info
*be
)
286 static inline void backend_switch_state(struct backend_info
*be
,
287 enum xenbus_state state
)
289 struct xenbus_device
*dev
= be
->dev
;
291 pr_debug("%s -> %s\n", dev
->nodename
, xenbus_strstate(state
));
294 /* If we are waiting for a hotplug script then defer the
295 * actual xenbus state change.
297 if (!be
->have_hotplug_status_watch
)
298 xenbus_switch_state(dev
, state
);
301 /* Handle backend state transitions:
303 * The backend state starts in Initialising and the following transitions are
306 * Initialising -> InitWait -> Connected
318 * The state argument specifies the eventual state of the backend and the
319 * function transitions to that state via the shortest path.
321 static void set_backend_state(struct backend_info
*be
,
322 enum xenbus_state state
)
324 while (be
->state
!= state
) {
326 case XenbusStateInitialising
:
328 case XenbusStateInitWait
:
329 case XenbusStateConnected
:
330 case XenbusStateClosing
:
331 backend_switch_state(be
, XenbusStateInitWait
);
333 case XenbusStateClosed
:
334 backend_switch_state(be
, XenbusStateClosed
);
340 case XenbusStateClosed
:
342 case XenbusStateInitWait
:
343 case XenbusStateConnected
:
344 backend_switch_state(be
, XenbusStateInitWait
);
346 case XenbusStateClosing
:
347 backend_switch_state(be
, XenbusStateClosing
);
353 case XenbusStateInitWait
:
355 case XenbusStateConnected
:
357 backend_switch_state(be
, XenbusStateConnected
);
359 case XenbusStateClosing
:
360 case XenbusStateClosed
:
361 backend_switch_state(be
, XenbusStateClosing
);
367 case XenbusStateConnected
:
369 case XenbusStateInitWait
:
370 case XenbusStateClosing
:
371 case XenbusStateClosed
:
372 backend_disconnect(be
);
373 backend_switch_state(be
, XenbusStateClosing
);
379 case XenbusStateClosing
:
381 case XenbusStateInitWait
:
382 case XenbusStateConnected
:
383 case XenbusStateClosed
:
384 backend_switch_state(be
, XenbusStateClosed
);
396 static void read_xenbus_frontend_xdp(struct backend_info
*be
,
397 struct xenbus_device
*dev
)
399 struct xenvif
*vif
= be
->vif
;
403 err
= xenbus_scanf(XBT_NIL
, dev
->otherend
,
404 "xdp-headroom", "%hu", &headroom
);
406 vif
->xdp_headroom
= 0;
409 if (headroom
> XEN_NETIF_MAX_XDP_HEADROOM
)
410 headroom
= XEN_NETIF_MAX_XDP_HEADROOM
;
411 vif
->xdp_headroom
= headroom
;
415 * Callback received when the frontend's state changes.
417 static void frontend_changed(struct xenbus_device
*dev
,
418 enum xenbus_state frontend_state
)
420 struct backend_info
*be
= dev_get_drvdata(&dev
->dev
);
422 pr_debug("%s -> %s\n", dev
->otherend
, xenbus_strstate(frontend_state
));
424 be
->frontend_state
= frontend_state
;
426 switch (frontend_state
) {
427 case XenbusStateInitialising
:
428 set_backend_state(be
, XenbusStateInitWait
);
431 case XenbusStateInitialised
:
434 case XenbusStateConnected
:
435 set_backend_state(be
, XenbusStateConnected
);
438 case XenbusStateReconfiguring
:
439 read_xenbus_frontend_xdp(be
, dev
);
440 xenbus_switch_state(dev
, XenbusStateReconfigured
);
443 case XenbusStateClosing
:
444 set_backend_state(be
, XenbusStateClosing
);
447 case XenbusStateClosed
:
448 set_backend_state(be
, XenbusStateClosed
);
449 if (xenbus_dev_is_online(dev
))
451 fallthrough
; /* if not online */
452 case XenbusStateUnknown
:
453 set_backend_state(be
, XenbusStateClosed
);
454 device_unregister(&dev
->dev
);
458 xenbus_dev_fatal(dev
, -EINVAL
, "saw state %d at frontend",
465 static void xen_net_read_rate(struct xenbus_device
*dev
,
466 unsigned long *bytes
, unsigned long *usec
)
472 /* Default to unlimited bandwidth. */
476 ratestr
= xenbus_read(XBT_NIL
, dev
->nodename
, "rate", NULL
);
481 b
= simple_strtoul(s
, &e
, 10);
482 if ((s
== e
) || (*e
!= ','))
486 u
= simple_strtoul(s
, &e
, 10);
487 if ((s
== e
) || (*e
!= '\0'))
497 pr_warn("Failed to parse network rate limit. Traffic unlimited.\n");
501 static int xen_net_read_mac(struct xenbus_device
*dev
, u8 mac
[])
503 char *s
, *e
, *macstr
;
506 macstr
= s
= xenbus_read(XBT_NIL
, dev
->nodename
, "mac", NULL
);
508 return PTR_ERR(macstr
);
510 for (i
= 0; i
< ETH_ALEN
; i
++) {
511 mac
[i
] = simple_strtoul(s
, &e
, 16);
512 if ((s
== e
) || (*e
!= ((i
== ETH_ALEN
-1) ? '\0' : ':'))) {
523 static void xen_net_rate_changed(struct xenbus_watch
*watch
,
524 const char *path
, const char *token
)
526 struct xenvif
*vif
= container_of(watch
, struct xenvif
, credit_watch
);
527 struct xenbus_device
*dev
= xenvif_to_xenbus_device(vif
);
528 unsigned long credit_bytes
;
529 unsigned long credit_usec
;
530 unsigned int queue_index
;
532 xen_net_read_rate(dev
, &credit_bytes
, &credit_usec
);
533 for (queue_index
= 0; queue_index
< vif
->num_queues
; queue_index
++) {
534 struct xenvif_queue
*queue
= &vif
->queues
[queue_index
];
536 queue
->credit_bytes
= credit_bytes
;
537 queue
->credit_usec
= credit_usec
;
538 if (!mod_timer_pending(&queue
->credit_timeout
, jiffies
) &&
539 queue
->remaining_credit
> queue
->credit_bytes
) {
540 queue
->remaining_credit
= queue
->credit_bytes
;
545 static int xen_register_credit_watch(struct xenbus_device
*dev
,
550 unsigned maxlen
= strlen(dev
->nodename
) + sizeof("/rate");
552 if (vif
->credit_watch
.node
)
555 node
= kmalloc(maxlen
, GFP_KERNEL
);
558 snprintf(node
, maxlen
, "%s/rate", dev
->nodename
);
559 vif
->credit_watch
.node
= node
;
560 vif
->credit_watch
.will_handle
= NULL
;
561 vif
->credit_watch
.callback
= xen_net_rate_changed
;
562 err
= register_xenbus_watch(&vif
->credit_watch
);
564 pr_err("Failed to set watcher %s\n", vif
->credit_watch
.node
);
566 vif
->credit_watch
.node
= NULL
;
567 vif
->credit_watch
.will_handle
= NULL
;
568 vif
->credit_watch
.callback
= NULL
;
573 static void xen_unregister_credit_watch(struct xenvif
*vif
)
575 if (vif
->credit_watch
.node
) {
576 unregister_xenbus_watch(&vif
->credit_watch
);
577 kfree(vif
->credit_watch
.node
);
578 vif
->credit_watch
.node
= NULL
;
582 static void xen_mcast_ctrl_changed(struct xenbus_watch
*watch
,
583 const char *path
, const char *token
)
585 struct xenvif
*vif
= container_of(watch
, struct xenvif
,
587 struct xenbus_device
*dev
= xenvif_to_xenbus_device(vif
);
589 vif
->multicast_control
= !!xenbus_read_unsigned(dev
->otherend
,
590 "request-multicast-control", 0);
593 static int xen_register_mcast_ctrl_watch(struct xenbus_device
*dev
,
598 unsigned maxlen
= strlen(dev
->otherend
) +
599 sizeof("/request-multicast-control");
601 if (vif
->mcast_ctrl_watch
.node
) {
602 pr_err_ratelimited("Watch is already registered\n");
606 node
= kmalloc(maxlen
, GFP_KERNEL
);
608 pr_err("Failed to allocate memory for watch\n");
611 snprintf(node
, maxlen
, "%s/request-multicast-control",
613 vif
->mcast_ctrl_watch
.node
= node
;
614 vif
->mcast_ctrl_watch
.will_handle
= NULL
;
615 vif
->mcast_ctrl_watch
.callback
= xen_mcast_ctrl_changed
;
616 err
= register_xenbus_watch(&vif
->mcast_ctrl_watch
);
618 pr_err("Failed to set watcher %s\n",
619 vif
->mcast_ctrl_watch
.node
);
621 vif
->mcast_ctrl_watch
.node
= NULL
;
622 vif
->mcast_ctrl_watch
.will_handle
= NULL
;
623 vif
->mcast_ctrl_watch
.callback
= NULL
;
628 static void xen_unregister_mcast_ctrl_watch(struct xenvif
*vif
)
630 if (vif
->mcast_ctrl_watch
.node
) {
631 unregister_xenbus_watch(&vif
->mcast_ctrl_watch
);
632 kfree(vif
->mcast_ctrl_watch
.node
);
633 vif
->mcast_ctrl_watch
.node
= NULL
;
637 static void xen_register_watchers(struct xenbus_device
*dev
,
640 xen_register_credit_watch(dev
, vif
);
641 xen_register_mcast_ctrl_watch(dev
, vif
);
644 static void xen_unregister_watchers(struct xenvif
*vif
)
646 xen_unregister_mcast_ctrl_watch(vif
);
647 xen_unregister_credit_watch(vif
);
650 static void unregister_hotplug_status_watch(struct backend_info
*be
)
652 if (be
->have_hotplug_status_watch
) {
653 unregister_xenbus_watch(&be
->hotplug_status_watch
);
654 kfree(be
->hotplug_status_watch
.node
);
656 be
->have_hotplug_status_watch
= 0;
659 static void hotplug_status_changed(struct xenbus_watch
*watch
,
663 struct backend_info
*be
= container_of(watch
,
665 hotplug_status_watch
);
669 str
= xenbus_read(XBT_NIL
, be
->dev
->nodename
, "hotplug-status", &len
);
672 if (len
== sizeof("connected")-1 && !memcmp(str
, "connected", len
)) {
673 /* Complete any pending state change */
674 xenbus_switch_state(be
->dev
, be
->state
);
676 /* Not interested in this watch anymore. */
677 unregister_hotplug_status_watch(be
);
678 xenbus_rm(XBT_NIL
, be
->dev
->nodename
, "hotplug-status");
683 static int connect_ctrl_ring(struct backend_info
*be
)
685 struct xenbus_device
*dev
= be
->dev
;
686 struct xenvif
*vif
= be
->vif
;
688 grant_ref_t ring_ref
;
692 err
= xenbus_scanf(XBT_NIL
, dev
->otherend
,
693 "ctrl-ring-ref", "%u", &val
);
695 goto done
; /* The frontend does not have a control ring */
699 err
= xenbus_scanf(XBT_NIL
, dev
->otherend
,
700 "event-channel-ctrl", "%u", &val
);
702 xenbus_dev_fatal(dev
, err
,
703 "reading %s/event-channel-ctrl",
710 err
= xenvif_connect_ctrl(vif
, ring_ref
, evtchn
);
712 xenbus_dev_fatal(dev
, err
,
713 "mapping shared-frame %u port %u",
725 static void connect(struct backend_info
*be
)
728 struct xenbus_device
*dev
= be
->dev
;
729 unsigned long credit_bytes
, credit_usec
;
730 unsigned int queue_index
;
731 unsigned int requested_num_queues
;
732 struct xenvif_queue
*queue
;
734 /* Check whether the frontend requested multiple queues
735 * and read the number requested.
737 requested_num_queues
= xenbus_read_unsigned(dev
->otherend
,
738 "multi-queue-num-queues", 1);
739 if (requested_num_queues
> xenvif_max_queues
) {
740 /* buggy or malicious guest */
741 xenbus_dev_fatal(dev
, -EINVAL
,
742 "guest requested %u queues, exceeding the maximum of %u.",
743 requested_num_queues
, xenvif_max_queues
);
747 err
= xen_net_read_mac(dev
, be
->vif
->fe_dev_addr
);
749 xenbus_dev_fatal(dev
, err
, "parsing %s/mac", dev
->nodename
);
753 xen_net_read_rate(dev
, &credit_bytes
, &credit_usec
);
754 xen_unregister_watchers(be
->vif
);
755 xen_register_watchers(dev
, be
->vif
);
756 read_xenbus_vif_flags(be
);
758 err
= connect_ctrl_ring(be
);
760 xenbus_dev_fatal(dev
, err
, "connecting control ring");
764 /* Use the number of queues requested by the frontend */
765 be
->vif
->queues
= vzalloc(array_size(requested_num_queues
,
766 sizeof(struct xenvif_queue
)));
767 if (!be
->vif
->queues
) {
768 xenbus_dev_fatal(dev
, -ENOMEM
,
769 "allocating queues");
773 be
->vif
->num_queues
= requested_num_queues
;
774 be
->vif
->stalled_queues
= requested_num_queues
;
776 for (queue_index
= 0; queue_index
< requested_num_queues
; ++queue_index
) {
777 queue
= &be
->vif
->queues
[queue_index
];
778 queue
->vif
= be
->vif
;
779 queue
->id
= queue_index
;
780 snprintf(queue
->name
, sizeof(queue
->name
), "%s-q%u",
781 be
->vif
->dev
->name
, queue
->id
);
783 err
= xenvif_init_queue(queue
);
785 /* xenvif_init_queue() cleans up after itself on
786 * failure, but we need to clean up any previously
787 * initialised queues. Set num_queues to i so that
788 * earlier queues can be destroyed using the regular
791 be
->vif
->num_queues
= queue_index
;
795 queue
->credit_bytes
= credit_bytes
;
796 queue
->remaining_credit
= credit_bytes
;
797 queue
->credit_usec
= credit_usec
;
799 err
= connect_data_rings(be
, queue
);
801 /* connect_data_rings() cleans up after itself on
802 * failure, but we need to clean up after
803 * xenvif_init_queue() here, and also clean up any
804 * previously initialised queues.
806 xenvif_deinit_queue(queue
);
807 be
->vif
->num_queues
= queue_index
;
812 #ifdef CONFIG_DEBUG_FS
813 xenvif_debugfs_addif(be
->vif
);
814 #endif /* CONFIG_DEBUG_FS */
816 /* Initialisation completed, tell core driver the number of
820 netif_set_real_num_tx_queues(be
->vif
->dev
, requested_num_queues
);
821 netif_set_real_num_rx_queues(be
->vif
->dev
, requested_num_queues
);
824 xenvif_carrier_on(be
->vif
);
826 unregister_hotplug_status_watch(be
);
827 err
= xenbus_watch_pathfmt(dev
, &be
->hotplug_status_watch
, NULL
,
828 hotplug_status_changed
,
829 "%s/%s", dev
->nodename
, "hotplug-status");
831 be
->have_hotplug_status_watch
= 1;
833 netif_tx_wake_all_queues(be
->vif
->dev
);
838 if (be
->vif
->num_queues
> 0)
839 xenvif_disconnect_data(be
->vif
); /* Clean up existing queues */
840 for (queue_index
= 0; queue_index
< be
->vif
->num_queues
; ++queue_index
)
841 xenvif_deinit_queue(&be
->vif
->queues
[queue_index
]);
842 vfree(be
->vif
->queues
);
843 be
->vif
->queues
= NULL
;
844 be
->vif
->num_queues
= 0;
845 xenvif_disconnect_ctrl(be
->vif
);
850 static int connect_data_rings(struct backend_info
*be
,
851 struct xenvif_queue
*queue
)
853 struct xenbus_device
*dev
= be
->dev
;
854 unsigned int num_queues
= queue
->vif
->num_queues
;
855 unsigned long tx_ring_ref
, rx_ring_ref
;
856 unsigned int tx_evtchn
, rx_evtchn
;
860 const size_t xenstore_path_ext_size
= 11; /* sufficient for "/queue-NNN" */
862 /* If the frontend requested 1 queue, or we have fallen back
863 * to single queue due to lack of frontend support for multi-
864 * queue, expect the remaining XenStore keys in the toplevel
865 * directory. Otherwise, expect them in a subdirectory called
868 if (num_queues
== 1) {
869 xspath
= kzalloc(strlen(dev
->otherend
) + 1, GFP_KERNEL
);
871 xenbus_dev_fatal(dev
, -ENOMEM
,
872 "reading ring references");
875 strcpy(xspath
, dev
->otherend
);
877 xspathsize
= strlen(dev
->otherend
) + xenstore_path_ext_size
;
878 xspath
= kzalloc(xspathsize
, GFP_KERNEL
);
880 xenbus_dev_fatal(dev
, -ENOMEM
,
881 "reading ring references");
884 snprintf(xspath
, xspathsize
, "%s/queue-%u", dev
->otherend
,
888 err
= xenbus_gather(XBT_NIL
, xspath
,
889 "tx-ring-ref", "%lu", &tx_ring_ref
,
890 "rx-ring-ref", "%lu", &rx_ring_ref
, NULL
);
892 xenbus_dev_fatal(dev
, err
,
893 "reading %s/ring-ref",
898 /* Try split event channels first, then single event channel. */
899 err
= xenbus_gather(XBT_NIL
, xspath
,
900 "event-channel-tx", "%u", &tx_evtchn
,
901 "event-channel-rx", "%u", &rx_evtchn
, NULL
);
903 err
= xenbus_scanf(XBT_NIL
, xspath
,
904 "event-channel", "%u", &tx_evtchn
);
906 xenbus_dev_fatal(dev
, err
,
907 "reading %s/event-channel(-tx/rx)",
911 rx_evtchn
= tx_evtchn
;
914 /* Map the shared frame, irq etc. */
915 err
= xenvif_connect_data(queue
, tx_ring_ref
, rx_ring_ref
,
916 tx_evtchn
, rx_evtchn
);
918 xenbus_dev_fatal(dev
, err
,
919 "mapping shared-frames %lu/%lu port tx %u rx %u",
920 tx_ring_ref
, rx_ring_ref
,
921 tx_evtchn
, rx_evtchn
);
926 err
: /* Regular return falls through with err == 0 */
931 static int read_xenbus_vif_flags(struct backend_info
*be
)
933 struct xenvif
*vif
= be
->vif
;
934 struct xenbus_device
*dev
= be
->dev
;
935 unsigned int rx_copy
;
938 err
= xenbus_scanf(XBT_NIL
, dev
->otherend
, "request-rx-copy", "%u",
940 if (err
== -ENOENT
) {
945 xenbus_dev_fatal(dev
, err
, "reading %s/request-rx-copy",
952 if (!xenbus_read_unsigned(dev
->otherend
, "feature-rx-notify", 0)) {
953 /* - Reduce drain timeout to poll more frequently for
955 * - Disable Rx stall detection.
957 be
->vif
->drain_timeout
= msecs_to_jiffies(30);
958 be
->vif
->stall_timeout
= 0;
961 vif
->can_sg
= !!xenbus_read_unsigned(dev
->otherend
, "feature-sg", 0);
965 if (xenbus_read_unsigned(dev
->otherend
, "feature-gso-tcpv4", 0))
966 vif
->gso_mask
|= GSO_BIT(TCPV4
);
968 if (xenbus_read_unsigned(dev
->otherend
, "feature-gso-tcpv6", 0))
969 vif
->gso_mask
|= GSO_BIT(TCPV6
);
971 vif
->ip_csum
= !xenbus_read_unsigned(dev
->otherend
,
972 "feature-no-csum-offload", 0);
974 vif
->ipv6_csum
= !!xenbus_read_unsigned(dev
->otherend
,
975 "feature-ipv6-csum-offload", 0);
977 read_xenbus_frontend_xdp(be
, dev
);
982 static int netback_remove(struct xenbus_device
*dev
)
984 struct backend_info
*be
= dev_get_drvdata(&dev
->dev
);
986 unregister_hotplug_status_watch(be
);
988 kobject_uevent(&dev
->dev
.kobj
, KOBJ_OFFLINE
);
989 backend_disconnect(be
);
990 xenvif_free(be
->vif
);
993 kfree(be
->hotplug_script
);
995 dev_set_drvdata(&dev
->dev
, NULL
);
1000 * Entry point to this code when a new device is created. Allocate the basic
1001 * structures and switch to InitWait.
1003 static int netback_probe(struct xenbus_device
*dev
,
1004 const struct xenbus_device_id
*id
)
1006 const char *message
;
1007 struct xenbus_transaction xbt
;
1011 struct backend_info
*be
= kzalloc(sizeof(*be
), GFP_KERNEL
);
1014 xenbus_dev_fatal(dev
, -ENOMEM
,
1015 "allocating backend structure");
1020 dev_set_drvdata(&dev
->dev
, be
);
1025 err
= xenbus_transaction_start(&xbt
);
1027 xenbus_dev_fatal(dev
, err
, "starting transaction");
1031 err
= xenbus_printf(xbt
, dev
->nodename
, "feature-sg", "%d", sg
);
1033 message
= "writing feature-sg";
1034 goto abort_transaction
;
1037 err
= xenbus_printf(xbt
, dev
->nodename
, "feature-gso-tcpv4",
1040 message
= "writing feature-gso-tcpv4";
1041 goto abort_transaction
;
1044 err
= xenbus_printf(xbt
, dev
->nodename
, "feature-gso-tcpv6",
1047 message
= "writing feature-gso-tcpv6";
1048 goto abort_transaction
;
1051 /* We support partial checksum setup for IPv6 packets */
1052 err
= xenbus_printf(xbt
, dev
->nodename
,
1053 "feature-ipv6-csum-offload",
1056 message
= "writing feature-ipv6-csum-offload";
1057 goto abort_transaction
;
1060 /* We support rx-copy path. */
1061 err
= xenbus_printf(xbt
, dev
->nodename
,
1062 "feature-rx-copy", "%d", 1);
1064 message
= "writing feature-rx-copy";
1065 goto abort_transaction
;
1068 /* we can adjust a headroom for netfront XDP processing */
1069 err
= xenbus_printf(xbt
, dev
->nodename
,
1070 "feature-xdp-headroom", "%d",
1071 provides_xdp_headroom
);
1073 message
= "writing feature-xdp-headroom";
1074 goto abort_transaction
;
1077 /* We don't support rx-flip path (except old guests who
1078 * don't grok this feature flag).
1080 err
= xenbus_printf(xbt
, dev
->nodename
,
1081 "feature-rx-flip", "%d", 0);
1083 message
= "writing feature-rx-flip";
1084 goto abort_transaction
;
1087 /* We support dynamic multicast-control. */
1088 err
= xenbus_printf(xbt
, dev
->nodename
,
1089 "feature-multicast-control", "%d", 1);
1091 message
= "writing feature-multicast-control";
1092 goto abort_transaction
;
1095 err
= xenbus_printf(xbt
, dev
->nodename
,
1096 "feature-dynamic-multicast-control",
1099 message
= "writing feature-dynamic-multicast-control";
1100 goto abort_transaction
;
1103 err
= xenbus_transaction_end(xbt
, 0);
1104 } while (err
== -EAGAIN
);
1107 xenbus_dev_fatal(dev
, err
, "completing transaction");
1111 /* Split event channels support, this is optional so it is not
1112 * put inside the above loop.
1114 err
= xenbus_printf(XBT_NIL
, dev
->nodename
,
1115 "feature-split-event-channels",
1116 "%u", separate_tx_rx_irq
);
1118 pr_debug("Error writing feature-split-event-channels\n");
1120 /* Multi-queue support: This is an optional feature. */
1121 err
= xenbus_printf(XBT_NIL
, dev
->nodename
,
1122 "multi-queue-max-queues", "%u", xenvif_max_queues
);
1124 pr_debug("Error writing multi-queue-max-queues\n");
1126 err
= xenbus_printf(XBT_NIL
, dev
->nodename
,
1127 "feature-ctrl-ring",
1130 pr_debug("Error writing feature-ctrl-ring\n");
1132 backend_switch_state(be
, XenbusStateInitWait
);
1134 script
= xenbus_read(XBT_NIL
, dev
->nodename
, "script", NULL
);
1135 if (IS_ERR(script
)) {
1136 err
= PTR_ERR(script
);
1137 xenbus_dev_fatal(dev
, err
, "reading script");
1141 be
->hotplug_script
= script
;
1143 /* This kicks hotplug scripts, so do it immediately. */
1144 err
= backend_create_xenvif(be
);
1151 xenbus_transaction_end(xbt
, 1);
1152 xenbus_dev_fatal(dev
, err
, "%s", message
);
1154 pr_debug("failed\n");
1155 netback_remove(dev
);
1159 static const struct xenbus_device_id netback_ids
[] = {
1164 static struct xenbus_driver netback_driver
= {
1166 .probe
= netback_probe
,
1167 .remove
= netback_remove
,
1168 .uevent
= netback_uevent
,
1169 .otherend_changed
= frontend_changed
,
1170 .allow_rebind
= true,
1173 int xenvif_xenbus_init(void)
1175 return xenbus_register_backend(&netback_driver
);
1178 void xenvif_xenbus_fini(void)
1180 return xenbus_unregister_driver(&netback_driver
);