1 // SPDX-License-Identifier: GPL-2.0-or-later
3 * Xenbus code for netif backend
5 * Copyright (C) 2005 Rusty Russell <rusty@rustcorp.com.au>
6 * Copyright (C) 2005 XenSource Ltd
10 #include <linux/vmalloc.h>
11 #include <linux/rtnetlink.h>
13 static int connect_data_rings(struct backend_info
*be
,
14 struct xenvif_queue
*queue
);
15 static void connect(struct backend_info
*be
);
16 static int read_xenbus_vif_flags(struct backend_info
*be
);
17 static int backend_create_xenvif(struct backend_info
*be
);
18 static void unregister_hotplug_status_watch(struct backend_info
*be
);
19 static void xen_unregister_watchers(struct xenvif
*vif
);
20 static void set_backend_state(struct backend_info
*be
,
21 enum xenbus_state state
);
23 #ifdef CONFIG_DEBUG_FS
24 struct dentry
*xen_netback_dbg_root
= NULL
;
26 static int xenvif_read_io_ring(struct seq_file
*m
, void *v
)
28 struct xenvif_queue
*queue
= m
->private;
29 struct xen_netif_tx_back_ring
*tx_ring
= &queue
->tx
;
30 struct xen_netif_rx_back_ring
*rx_ring
= &queue
->rx
;
31 struct netdev_queue
*dev_queue
;
34 struct xen_netif_tx_sring
*sring
= tx_ring
->sring
;
36 seq_printf(m
, "Queue %d\nTX: nr_ents %u\n", queue
->id
,
38 seq_printf(m
, "req prod %u (%d) cons %u (%d) event %u (%d)\n",
40 sring
->req_prod
- sring
->rsp_prod
,
42 tx_ring
->req_cons
- sring
->rsp_prod
,
44 sring
->req_event
- sring
->rsp_prod
);
45 seq_printf(m
, "rsp prod %u (base) pvt %u (%d) event %u (%d)\n",
47 tx_ring
->rsp_prod_pvt
,
48 tx_ring
->rsp_prod_pvt
- sring
->rsp_prod
,
50 sring
->rsp_event
- sring
->rsp_prod
);
51 seq_printf(m
, "pending prod %u pending cons %u nr_pending_reqs %u\n",
54 nr_pending_reqs(queue
));
55 seq_printf(m
, "dealloc prod %u dealloc cons %u dealloc_queue %u\n\n",
58 queue
->dealloc_prod
- queue
->dealloc_cons
);
62 struct xen_netif_rx_sring
*sring
= rx_ring
->sring
;
64 seq_printf(m
, "RX: nr_ents %u\n", rx_ring
->nr_ents
);
65 seq_printf(m
, "req prod %u (%d) cons %u (%d) event %u (%d)\n",
67 sring
->req_prod
- sring
->rsp_prod
,
69 rx_ring
->req_cons
- sring
->rsp_prod
,
71 sring
->req_event
- sring
->rsp_prod
);
72 seq_printf(m
, "rsp prod %u (base) pvt %u (%d) event %u (%d)\n\n",
74 rx_ring
->rsp_prod_pvt
,
75 rx_ring
->rsp_prod_pvt
- sring
->rsp_prod
,
77 sring
->rsp_event
- sring
->rsp_prod
);
80 seq_printf(m
, "NAPI state: %lx NAPI weight: %d TX queue len %u\n"
81 "Credit timer_pending: %d, credit: %lu, usec: %lu\n"
82 "remaining: %lu, expires: %lu, now: %lu\n",
83 queue
->napi
.state
, queue
->napi
.weight
,
84 skb_queue_len(&queue
->tx_queue
),
85 timer_pending(&queue
->credit_timeout
),
88 queue
->remaining_credit
,
89 queue
->credit_timeout
.expires
,
92 dev_queue
= netdev_get_tx_queue(queue
->vif
->dev
, queue
->id
);
94 seq_printf(m
, "\nRx internal queue: len %u max %u pkts %u %s\n",
95 queue
->rx_queue_len
, queue
->rx_queue_max
,
96 skb_queue_len(&queue
->rx_queue
),
97 netif_tx_queue_stopped(dev_queue
) ? "stopped" : "running");
102 #define XENVIF_KICK_STR "kick"
103 #define BUFFER_SIZE 32
106 xenvif_write_io_ring(struct file
*filp
, const char __user
*buf
, size_t count
,
109 struct xenvif_queue
*queue
=
110 ((struct seq_file
*)filp
->private_data
)->private;
112 char write
[BUFFER_SIZE
];
114 /* don't allow partial writes and check the length */
117 if (count
>= sizeof(write
))
120 len
= simple_write_to_buffer(write
,
130 if (!strncmp(write
, XENVIF_KICK_STR
, sizeof(XENVIF_KICK_STR
) - 1))
131 xenvif_interrupt(0, (void *)queue
);
133 pr_warn("Unknown command to io_ring_q%d. Available: kick\n",
140 static int xenvif_io_ring_open(struct inode
*inode
, struct file
*filp
)
145 if (inode
->i_private
)
146 queue
= inode
->i_private
;
147 ret
= single_open(filp
, xenvif_read_io_ring
, queue
);
148 filp
->f_mode
|= FMODE_PWRITE
;
152 static const struct file_operations xenvif_dbg_io_ring_ops_fops
= {
153 .owner
= THIS_MODULE
,
154 .open
= xenvif_io_ring_open
,
157 .release
= single_release
,
158 .write
= xenvif_write_io_ring
,
161 static int xenvif_ctrl_show(struct seq_file
*m
, void *v
)
163 struct xenvif
*vif
= m
->private;
165 xenvif_dump_hash_info(vif
, m
);
169 DEFINE_SHOW_ATTRIBUTE(xenvif_ctrl
);
171 static void xenvif_debugfs_addif(struct xenvif
*vif
)
175 vif
->xenvif_dbg_root
= debugfs_create_dir(vif
->dev
->name
,
176 xen_netback_dbg_root
);
177 for (i
= 0; i
< vif
->num_queues
; ++i
) {
178 char filename
[sizeof("io_ring_q") + 4];
180 snprintf(filename
, sizeof(filename
), "io_ring_q%d", i
);
181 debugfs_create_file(filename
, 0600, vif
->xenvif_dbg_root
,
183 &xenvif_dbg_io_ring_ops_fops
);
187 debugfs_create_file("ctrl", 0400, vif
->xenvif_dbg_root
, vif
,
191 static void xenvif_debugfs_delif(struct xenvif
*vif
)
193 debugfs_remove_recursive(vif
->xenvif_dbg_root
);
194 vif
->xenvif_dbg_root
= NULL
;
196 #endif /* CONFIG_DEBUG_FS */
199 * Handle the creation of the hotplug script environment. We add the script
200 * and vif variables to the environment, for the benefit of the vif-* hotplug
203 static int netback_uevent(const struct xenbus_device
*xdev
,
204 struct kobj_uevent_env
*env
)
206 struct backend_info
*be
= dev_get_drvdata(&xdev
->dev
);
211 if (add_uevent_var(env
, "script=%s", be
->hotplug_script
))
217 return add_uevent_var(env
, "vif=%s", be
->vif
->dev
->name
);
221 static int backend_create_xenvif(struct backend_info
*be
)
225 struct xenbus_device
*dev
= be
->dev
;
231 err
= xenbus_scanf(XBT_NIL
, dev
->nodename
, "handle", "%li", &handle
);
233 xenbus_dev_fatal(dev
, err
, "reading handle");
234 return (err
< 0) ? err
: -EINVAL
;
237 vif
= xenvif_alloc(&dev
->dev
, dev
->otherend_id
, handle
);
240 xenbus_dev_fatal(dev
, err
, "creating interface");
246 kobject_uevent(&dev
->dev
.kobj
, KOBJ_ONLINE
);
250 static void backend_disconnect(struct backend_info
*be
)
252 struct xenvif
*vif
= be
->vif
;
255 unsigned int num_queues
= vif
->num_queues
;
256 unsigned int queue_index
;
258 xen_unregister_watchers(vif
);
259 #ifdef CONFIG_DEBUG_FS
260 xenvif_debugfs_delif(vif
);
261 #endif /* CONFIG_DEBUG_FS */
262 xenvif_disconnect_data(vif
);
264 /* At this point some of the handlers may still be active
265 * so we need to have additional synchronization here.
270 for (queue_index
= 0; queue_index
< num_queues
; ++queue_index
)
271 xenvif_deinit_queue(&vif
->queues
[queue_index
]);
276 xenvif_disconnect_ctrl(vif
);
280 static void backend_connect(struct backend_info
*be
)
286 static inline void backend_switch_state(struct backend_info
*be
,
287 enum xenbus_state state
)
289 struct xenbus_device
*dev
= be
->dev
;
291 pr_debug("%s -> %s\n", dev
->nodename
, xenbus_strstate(state
));
294 /* If we are waiting for a hotplug script then defer the
295 * actual xenbus state change.
297 if (!be
->have_hotplug_status_watch
)
298 xenbus_switch_state(dev
, state
);
301 /* Handle backend state transitions:
303 * The backend state starts in Initialising and the following transitions are
306 * Initialising -> InitWait -> Connected
318 * The state argument specifies the eventual state of the backend and the
319 * function transitions to that state via the shortest path.
321 static void set_backend_state(struct backend_info
*be
,
322 enum xenbus_state state
)
324 while (be
->state
!= state
) {
326 case XenbusStateInitialising
:
328 case XenbusStateInitWait
:
329 case XenbusStateConnected
:
330 case XenbusStateClosing
:
331 backend_switch_state(be
, XenbusStateInitWait
);
333 case XenbusStateClosed
:
334 backend_switch_state(be
, XenbusStateClosed
);
340 case XenbusStateClosed
:
342 case XenbusStateInitWait
:
343 case XenbusStateConnected
:
344 backend_switch_state(be
, XenbusStateInitWait
);
346 case XenbusStateClosing
:
347 backend_switch_state(be
, XenbusStateClosing
);
353 case XenbusStateInitWait
:
355 case XenbusStateConnected
:
357 backend_switch_state(be
, XenbusStateConnected
);
359 case XenbusStateClosing
:
360 case XenbusStateClosed
:
361 backend_switch_state(be
, XenbusStateClosing
);
367 case XenbusStateConnected
:
369 case XenbusStateInitWait
:
370 case XenbusStateClosing
:
371 case XenbusStateClosed
:
372 backend_disconnect(be
);
373 backend_switch_state(be
, XenbusStateClosing
);
379 case XenbusStateClosing
:
381 case XenbusStateInitWait
:
382 case XenbusStateConnected
:
383 case XenbusStateClosed
:
384 backend_switch_state(be
, XenbusStateClosed
);
396 static void read_xenbus_frontend_xdp(struct backend_info
*be
,
397 struct xenbus_device
*dev
)
399 struct xenvif
*vif
= be
->vif
;
403 err
= xenbus_scanf(XBT_NIL
, dev
->otherend
,
404 "xdp-headroom", "%hu", &headroom
);
406 vif
->xdp_headroom
= 0;
409 if (headroom
> XEN_NETIF_MAX_XDP_HEADROOM
)
410 headroom
= XEN_NETIF_MAX_XDP_HEADROOM
;
411 vif
->xdp_headroom
= headroom
;
415 * Callback received when the frontend's state changes.
417 static void frontend_changed(struct xenbus_device
*dev
,
418 enum xenbus_state frontend_state
)
420 struct backend_info
*be
= dev_get_drvdata(&dev
->dev
);
422 pr_debug("%s -> %s\n", dev
->otherend
, xenbus_strstate(frontend_state
));
424 be
->frontend_state
= frontend_state
;
426 switch (frontend_state
) {
427 case XenbusStateInitialising
:
428 set_backend_state(be
, XenbusStateInitWait
);
431 case XenbusStateInitialised
:
434 case XenbusStateConnected
:
435 set_backend_state(be
, XenbusStateConnected
);
438 case XenbusStateReconfiguring
:
439 read_xenbus_frontend_xdp(be
, dev
);
440 xenbus_switch_state(dev
, XenbusStateReconfigured
);
443 case XenbusStateClosing
:
444 set_backend_state(be
, XenbusStateClosing
);
447 case XenbusStateClosed
:
448 set_backend_state(be
, XenbusStateClosed
);
449 if (xenbus_dev_is_online(dev
))
451 fallthrough
; /* if not online */
452 case XenbusStateUnknown
:
453 set_backend_state(be
, XenbusStateClosed
);
454 device_unregister(&dev
->dev
);
458 xenbus_dev_fatal(dev
, -EINVAL
, "saw state %d at frontend",
465 static void xen_net_read_rate(struct xenbus_device
*dev
,
466 unsigned long *bytes
, unsigned long *usec
)
472 /* Default to unlimited bandwidth. */
476 ratestr
= xenbus_read(XBT_NIL
, dev
->nodename
, "rate", NULL
);
481 b
= simple_strtoul(s
, &e
, 10);
482 if ((s
== e
) || (*e
!= ','))
486 u
= simple_strtoul(s
, &e
, 10);
487 if ((s
== e
) || (*e
!= '\0'))
497 pr_warn("Failed to parse network rate limit. Traffic unlimited.\n");
501 static int xen_net_read_mac(struct xenbus_device
*dev
, u8 mac
[])
503 char *s
, *e
, *macstr
;
506 macstr
= s
= xenbus_read(XBT_NIL
, dev
->nodename
, "mac", NULL
);
508 return PTR_ERR(macstr
);
510 for (i
= 0; i
< ETH_ALEN
; i
++) {
511 mac
[i
] = simple_strtoul(s
, &e
, 16);
512 if ((s
== e
) || (*e
!= ((i
== ETH_ALEN
-1) ? '\0' : ':'))) {
523 static void xen_net_rate_changed(struct xenbus_watch
*watch
,
524 const char *path
, const char *token
)
526 struct xenvif
*vif
= container_of(watch
, struct xenvif
, credit_watch
);
527 struct xenbus_device
*dev
= xenvif_to_xenbus_device(vif
);
528 unsigned long credit_bytes
;
529 unsigned long credit_usec
;
530 unsigned int queue_index
;
532 xen_net_read_rate(dev
, &credit_bytes
, &credit_usec
);
533 for (queue_index
= 0; queue_index
< vif
->num_queues
; queue_index
++) {
534 struct xenvif_queue
*queue
= &vif
->queues
[queue_index
];
536 queue
->credit_bytes
= credit_bytes
;
537 queue
->credit_usec
= credit_usec
;
538 if (!mod_timer_pending(&queue
->credit_timeout
, jiffies
) &&
539 queue
->remaining_credit
> queue
->credit_bytes
) {
540 queue
->remaining_credit
= queue
->credit_bytes
;
545 static int xen_register_credit_watch(struct xenbus_device
*dev
,
550 unsigned maxlen
= strlen(dev
->nodename
) + sizeof("/rate");
552 if (vif
->credit_watch
.node
)
555 node
= kmalloc(maxlen
, GFP_KERNEL
);
558 snprintf(node
, maxlen
, "%s/rate", dev
->nodename
);
559 vif
->credit_watch
.node
= node
;
560 vif
->credit_watch
.will_handle
= NULL
;
561 vif
->credit_watch
.callback
= xen_net_rate_changed
;
562 err
= register_xenbus_watch(&vif
->credit_watch
);
564 pr_err("Failed to set watcher %s\n", vif
->credit_watch
.node
);
566 vif
->credit_watch
.node
= NULL
;
567 vif
->credit_watch
.will_handle
= NULL
;
568 vif
->credit_watch
.callback
= NULL
;
573 static void xen_unregister_credit_watch(struct xenvif
*vif
)
575 if (vif
->credit_watch
.node
) {
576 unregister_xenbus_watch(&vif
->credit_watch
);
577 kfree(vif
->credit_watch
.node
);
578 vif
->credit_watch
.node
= NULL
;
582 static void xen_mcast_ctrl_changed(struct xenbus_watch
*watch
,
583 const char *path
, const char *token
)
585 struct xenvif
*vif
= container_of(watch
, struct xenvif
,
587 struct xenbus_device
*dev
= xenvif_to_xenbus_device(vif
);
589 vif
->multicast_control
= !!xenbus_read_unsigned(dev
->otherend
,
590 "request-multicast-control", 0);
593 static int xen_register_mcast_ctrl_watch(struct xenbus_device
*dev
,
598 unsigned maxlen
= strlen(dev
->otherend
) +
599 sizeof("/request-multicast-control");
601 if (vif
->mcast_ctrl_watch
.node
) {
602 pr_err_ratelimited("Watch is already registered\n");
606 node
= kmalloc(maxlen
, GFP_KERNEL
);
608 pr_err("Failed to allocate memory for watch\n");
611 snprintf(node
, maxlen
, "%s/request-multicast-control",
613 vif
->mcast_ctrl_watch
.node
= node
;
614 vif
->mcast_ctrl_watch
.will_handle
= NULL
;
615 vif
->mcast_ctrl_watch
.callback
= xen_mcast_ctrl_changed
;
616 err
= register_xenbus_watch(&vif
->mcast_ctrl_watch
);
618 pr_err("Failed to set watcher %s\n",
619 vif
->mcast_ctrl_watch
.node
);
621 vif
->mcast_ctrl_watch
.node
= NULL
;
622 vif
->mcast_ctrl_watch
.will_handle
= NULL
;
623 vif
->mcast_ctrl_watch
.callback
= NULL
;
628 static void xen_unregister_mcast_ctrl_watch(struct xenvif
*vif
)
630 if (vif
->mcast_ctrl_watch
.node
) {
631 unregister_xenbus_watch(&vif
->mcast_ctrl_watch
);
632 kfree(vif
->mcast_ctrl_watch
.node
);
633 vif
->mcast_ctrl_watch
.node
= NULL
;
637 static void xen_register_watchers(struct xenbus_device
*dev
,
640 xen_register_credit_watch(dev
, vif
);
641 xen_register_mcast_ctrl_watch(dev
, vif
);
644 static void xen_unregister_watchers(struct xenvif
*vif
)
646 xen_unregister_mcast_ctrl_watch(vif
);
647 xen_unregister_credit_watch(vif
);
650 static void unregister_hotplug_status_watch(struct backend_info
*be
)
652 if (be
->have_hotplug_status_watch
) {
653 unregister_xenbus_watch(&be
->hotplug_status_watch
);
654 kfree(be
->hotplug_status_watch
.node
);
656 be
->have_hotplug_status_watch
= 0;
659 static void hotplug_status_changed(struct xenbus_watch
*watch
,
663 struct backend_info
*be
= container_of(watch
,
665 hotplug_status_watch
);
669 str
= xenbus_read(XBT_NIL
, be
->dev
->nodename
, "hotplug-status", &len
);
672 if (len
== sizeof("connected")-1 && !memcmp(str
, "connected", len
)) {
673 /* Complete any pending state change */
674 xenbus_switch_state(be
->dev
, be
->state
);
676 /* Not interested in this watch anymore. */
677 unregister_hotplug_status_watch(be
);
682 static int connect_ctrl_ring(struct backend_info
*be
)
684 struct xenbus_device
*dev
= be
->dev
;
685 struct xenvif
*vif
= be
->vif
;
687 grant_ref_t ring_ref
;
691 err
= xenbus_scanf(XBT_NIL
, dev
->otherend
,
692 "ctrl-ring-ref", "%u", &val
);
694 goto done
; /* The frontend does not have a control ring */
698 err
= xenbus_scanf(XBT_NIL
, dev
->otherend
,
699 "event-channel-ctrl", "%u", &val
);
701 xenbus_dev_fatal(dev
, err
,
702 "reading %s/event-channel-ctrl",
709 err
= xenvif_connect_ctrl(vif
, ring_ref
, evtchn
);
711 xenbus_dev_fatal(dev
, err
,
712 "mapping shared-frame %u port %u",
724 static void connect(struct backend_info
*be
)
727 struct xenbus_device
*dev
= be
->dev
;
728 unsigned long credit_bytes
, credit_usec
;
729 unsigned int queue_index
;
730 unsigned int requested_num_queues
;
731 struct xenvif_queue
*queue
;
733 /* Check whether the frontend requested multiple queues
734 * and read the number requested.
736 requested_num_queues
= xenbus_read_unsigned(dev
->otherend
,
737 "multi-queue-num-queues", 1);
738 if (requested_num_queues
> xenvif_max_queues
) {
739 /* buggy or malicious guest */
740 xenbus_dev_fatal(dev
, -EINVAL
,
741 "guest requested %u queues, exceeding the maximum of %u.",
742 requested_num_queues
, xenvif_max_queues
);
746 err
= xen_net_read_mac(dev
, be
->vif
->fe_dev_addr
);
748 xenbus_dev_fatal(dev
, err
, "parsing %s/mac", dev
->nodename
);
752 xen_net_read_rate(dev
, &credit_bytes
, &credit_usec
);
753 xen_unregister_watchers(be
->vif
);
754 xen_register_watchers(dev
, be
->vif
);
755 read_xenbus_vif_flags(be
);
757 err
= connect_ctrl_ring(be
);
759 xenbus_dev_fatal(dev
, err
, "connecting control ring");
763 /* Use the number of queues requested by the frontend */
764 be
->vif
->queues
= vzalloc(array_size(requested_num_queues
,
765 sizeof(struct xenvif_queue
)));
766 if (!be
->vif
->queues
) {
767 xenbus_dev_fatal(dev
, -ENOMEM
,
768 "allocating queues");
772 be
->vif
->num_queues
= requested_num_queues
;
773 be
->vif
->stalled_queues
= requested_num_queues
;
775 for (queue_index
= 0; queue_index
< requested_num_queues
; ++queue_index
) {
776 queue
= &be
->vif
->queues
[queue_index
];
777 queue
->vif
= be
->vif
;
778 queue
->id
= queue_index
;
779 snprintf(queue
->name
, sizeof(queue
->name
), "%s-q%u",
780 be
->vif
->dev
->name
, queue
->id
);
782 err
= xenvif_init_queue(queue
);
784 /* xenvif_init_queue() cleans up after itself on
785 * failure, but we need to clean up any previously
786 * initialised queues. Set num_queues to i so that
787 * earlier queues can be destroyed using the regular
790 be
->vif
->num_queues
= queue_index
;
794 queue
->credit_bytes
= credit_bytes
;
795 queue
->remaining_credit
= credit_bytes
;
796 queue
->credit_usec
= credit_usec
;
798 err
= connect_data_rings(be
, queue
);
800 /* connect_data_rings() cleans up after itself on
801 * failure, but we need to clean up after
802 * xenvif_init_queue() here, and also clean up any
803 * previously initialised queues.
805 xenvif_deinit_queue(queue
);
806 be
->vif
->num_queues
= queue_index
;
811 #ifdef CONFIG_DEBUG_FS
812 xenvif_debugfs_addif(be
->vif
);
813 #endif /* CONFIG_DEBUG_FS */
815 /* Initialisation completed, tell core driver the number of
819 netif_set_real_num_tx_queues(be
->vif
->dev
, requested_num_queues
);
820 netif_set_real_num_rx_queues(be
->vif
->dev
, requested_num_queues
);
823 xenvif_carrier_on(be
->vif
);
825 unregister_hotplug_status_watch(be
);
826 err
= xenbus_watch_pathfmt(dev
, &be
->hotplug_status_watch
, NULL
,
827 hotplug_status_changed
,
828 "%s/%s", dev
->nodename
, "hotplug-status");
830 be
->have_hotplug_status_watch
= 1;
832 netif_tx_wake_all_queues(be
->vif
->dev
);
837 if (be
->vif
->num_queues
> 0)
838 xenvif_disconnect_data(be
->vif
); /* Clean up existing queues */
839 for (queue_index
= 0; queue_index
< be
->vif
->num_queues
; ++queue_index
)
840 xenvif_deinit_queue(&be
->vif
->queues
[queue_index
]);
841 vfree(be
->vif
->queues
);
842 be
->vif
->queues
= NULL
;
843 be
->vif
->num_queues
= 0;
844 xenvif_disconnect_ctrl(be
->vif
);
849 static int connect_data_rings(struct backend_info
*be
,
850 struct xenvif_queue
*queue
)
852 struct xenbus_device
*dev
= be
->dev
;
853 unsigned int num_queues
= queue
->vif
->num_queues
;
854 unsigned long tx_ring_ref
, rx_ring_ref
;
855 unsigned int tx_evtchn
, rx_evtchn
;
859 const size_t xenstore_path_ext_size
= 11; /* sufficient for "/queue-NNN" */
861 /* If the frontend requested 1 queue, or we have fallen back
862 * to single queue due to lack of frontend support for multi-
863 * queue, expect the remaining XenStore keys in the toplevel
864 * directory. Otherwise, expect them in a subdirectory called
867 if (num_queues
== 1) {
868 xspath
= kstrdup(dev
->otherend
, GFP_KERNEL
);
870 xenbus_dev_fatal(dev
, -ENOMEM
,
871 "reading ring references");
875 xspathsize
= strlen(dev
->otherend
) + xenstore_path_ext_size
;
876 xspath
= kzalloc(xspathsize
, GFP_KERNEL
);
878 xenbus_dev_fatal(dev
, -ENOMEM
,
879 "reading ring references");
882 snprintf(xspath
, xspathsize
, "%s/queue-%u", dev
->otherend
,
886 err
= xenbus_gather(XBT_NIL
, xspath
,
887 "tx-ring-ref", "%lu", &tx_ring_ref
,
888 "rx-ring-ref", "%lu", &rx_ring_ref
, NULL
);
890 xenbus_dev_fatal(dev
, err
,
891 "reading %s/ring-ref",
896 /* Try split event channels first, then single event channel. */
897 err
= xenbus_gather(XBT_NIL
, xspath
,
898 "event-channel-tx", "%u", &tx_evtchn
,
899 "event-channel-rx", "%u", &rx_evtchn
, NULL
);
901 err
= xenbus_scanf(XBT_NIL
, xspath
,
902 "event-channel", "%u", &tx_evtchn
);
904 xenbus_dev_fatal(dev
, err
,
905 "reading %s/event-channel(-tx/rx)",
909 rx_evtchn
= tx_evtchn
;
912 /* Map the shared frame, irq etc. */
913 err
= xenvif_connect_data(queue
, tx_ring_ref
, rx_ring_ref
,
914 tx_evtchn
, rx_evtchn
);
916 xenbus_dev_fatal(dev
, err
,
917 "mapping shared-frames %lu/%lu port tx %u rx %u",
918 tx_ring_ref
, rx_ring_ref
,
919 tx_evtchn
, rx_evtchn
);
924 err
: /* Regular return falls through with err == 0 */
929 static int read_xenbus_vif_flags(struct backend_info
*be
)
931 struct xenvif
*vif
= be
->vif
;
932 struct xenbus_device
*dev
= be
->dev
;
933 unsigned int rx_copy
;
936 err
= xenbus_scanf(XBT_NIL
, dev
->otherend
, "request-rx-copy", "%u",
938 if (err
== -ENOENT
) {
943 xenbus_dev_fatal(dev
, err
, "reading %s/request-rx-copy",
950 if (!xenbus_read_unsigned(dev
->otherend
, "feature-rx-notify", 0)) {
951 /* - Reduce drain timeout to poll more frequently for
953 * - Disable Rx stall detection.
955 be
->vif
->drain_timeout
= msecs_to_jiffies(30);
956 be
->vif
->stall_timeout
= 0;
959 vif
->can_sg
= !!xenbus_read_unsigned(dev
->otherend
, "feature-sg", 0);
963 if (xenbus_read_unsigned(dev
->otherend
, "feature-gso-tcpv4", 0))
964 vif
->gso_mask
|= GSO_BIT(TCPV4
);
966 if (xenbus_read_unsigned(dev
->otherend
, "feature-gso-tcpv6", 0))
967 vif
->gso_mask
|= GSO_BIT(TCPV6
);
969 vif
->ip_csum
= !xenbus_read_unsigned(dev
->otherend
,
970 "feature-no-csum-offload", 0);
972 vif
->ipv6_csum
= !!xenbus_read_unsigned(dev
->otherend
,
973 "feature-ipv6-csum-offload", 0);
975 read_xenbus_frontend_xdp(be
, dev
);
980 static void netback_remove(struct xenbus_device
*dev
)
982 struct backend_info
*be
= dev_get_drvdata(&dev
->dev
);
984 unregister_hotplug_status_watch(be
);
985 xenbus_rm(XBT_NIL
, dev
->nodename
, "hotplug-status");
987 kobject_uevent(&dev
->dev
.kobj
, KOBJ_OFFLINE
);
988 backend_disconnect(be
);
989 xenvif_free(be
->vif
);
992 kfree(be
->hotplug_script
);
994 dev_set_drvdata(&dev
->dev
, NULL
);
998 * Entry point to this code when a new device is created. Allocate the basic
999 * structures and switch to InitWait.
1001 static int netback_probe(struct xenbus_device
*dev
,
1002 const struct xenbus_device_id
*id
)
1004 const char *message
;
1005 struct xenbus_transaction xbt
;
1009 struct backend_info
*be
= kzalloc(sizeof(*be
), GFP_KERNEL
);
1012 xenbus_dev_fatal(dev
, -ENOMEM
,
1013 "allocating backend structure");
1018 dev_set_drvdata(&dev
->dev
, be
);
1023 err
= xenbus_transaction_start(&xbt
);
1025 xenbus_dev_fatal(dev
, err
, "starting transaction");
1029 err
= xenbus_printf(xbt
, dev
->nodename
, "feature-sg", "%d", sg
);
1031 message
= "writing feature-sg";
1032 goto abort_transaction
;
1035 err
= xenbus_printf(xbt
, dev
->nodename
, "feature-gso-tcpv4",
1038 message
= "writing feature-gso-tcpv4";
1039 goto abort_transaction
;
1042 err
= xenbus_printf(xbt
, dev
->nodename
, "feature-gso-tcpv6",
1045 message
= "writing feature-gso-tcpv6";
1046 goto abort_transaction
;
1049 /* We support partial checksum setup for IPv6 packets */
1050 err
= xenbus_printf(xbt
, dev
->nodename
,
1051 "feature-ipv6-csum-offload",
1054 message
= "writing feature-ipv6-csum-offload";
1055 goto abort_transaction
;
1058 /* We support rx-copy path. */
1059 err
= xenbus_printf(xbt
, dev
->nodename
,
1060 "feature-rx-copy", "%d", 1);
1062 message
= "writing feature-rx-copy";
1063 goto abort_transaction
;
1066 /* we can adjust a headroom for netfront XDP processing */
1067 err
= xenbus_printf(xbt
, dev
->nodename
,
1068 "feature-xdp-headroom", "%d",
1069 provides_xdp_headroom
);
1071 message
= "writing feature-xdp-headroom";
1072 goto abort_transaction
;
1075 /* We don't support rx-flip path (except old guests who
1076 * don't grok this feature flag).
1078 err
= xenbus_printf(xbt
, dev
->nodename
,
1079 "feature-rx-flip", "%d", 0);
1081 message
= "writing feature-rx-flip";
1082 goto abort_transaction
;
1085 /* We support dynamic multicast-control. */
1086 err
= xenbus_printf(xbt
, dev
->nodename
,
1087 "feature-multicast-control", "%d", 1);
1089 message
= "writing feature-multicast-control";
1090 goto abort_transaction
;
1093 err
= xenbus_printf(xbt
, dev
->nodename
,
1094 "feature-dynamic-multicast-control",
1097 message
= "writing feature-dynamic-multicast-control";
1098 goto abort_transaction
;
1101 err
= xenbus_transaction_end(xbt
, 0);
1102 } while (err
== -EAGAIN
);
1105 xenbus_dev_fatal(dev
, err
, "completing transaction");
1109 /* Split event channels support, this is optional so it is not
1110 * put inside the above loop.
1112 err
= xenbus_printf(XBT_NIL
, dev
->nodename
,
1113 "feature-split-event-channels",
1114 "%u", separate_tx_rx_irq
);
1116 pr_debug("Error writing feature-split-event-channels\n");
1118 /* Multi-queue support: This is an optional feature. */
1119 err
= xenbus_printf(XBT_NIL
, dev
->nodename
,
1120 "multi-queue-max-queues", "%u", xenvif_max_queues
);
1122 pr_debug("Error writing multi-queue-max-queues\n");
1124 err
= xenbus_printf(XBT_NIL
, dev
->nodename
,
1125 "feature-ctrl-ring",
1128 pr_debug("Error writing feature-ctrl-ring\n");
1130 backend_switch_state(be
, XenbusStateInitWait
);
1132 script
= xenbus_read(XBT_NIL
, dev
->nodename
, "script", NULL
);
1133 if (IS_ERR(script
)) {
1134 err
= PTR_ERR(script
);
1135 xenbus_dev_fatal(dev
, err
, "reading script");
1139 be
->hotplug_script
= script
;
1141 /* This kicks hotplug scripts, so do it immediately. */
1142 err
= backend_create_xenvif(be
);
1149 xenbus_transaction_end(xbt
, 1);
1150 xenbus_dev_fatal(dev
, err
, "%s", message
);
1152 pr_debug("failed\n");
1153 netback_remove(dev
);
1157 static const struct xenbus_device_id netback_ids
[] = {
1162 static struct xenbus_driver netback_driver
= {
1164 .probe
= netback_probe
,
1165 .remove
= netback_remove
,
1166 .uevent
= netback_uevent
,
1167 .otherend_changed
= frontend_changed
,
1168 .allow_rebind
= true,
1171 int xenvif_xenbus_init(void)
1173 return xenbus_register_backend(&netback_driver
);
1176 void xenvif_xenbus_fini(void)
1178 return xenbus_unregister_driver(&netback_driver
);