2 * Network-device interface management.
4 * Copyright (c) 2004-2005, Keir Fraser
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License version 2
8 * as published by the Free Software Foundation; or, when distributed
9 * separately from the Linux kernel or incorporated into other
10 * software packages, subject to the following license:
12 * Permission is hereby granted, free of charge, to any person obtaining a copy
13 * of this source file (the "Software"), to deal in the Software without
14 * restriction, including without limitation the rights to use, copy, modify,
15 * merge, publish, distribute, sublicense, and/or sell copies of the Software,
16 * and to permit persons to whom the Software is furnished to do so, subject to
17 * the following conditions:
19 * The above copyright notice and this permission notice shall be included in
20 * all copies or substantial portions of the Software.
22 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
23 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
24 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
25 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
26 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
27 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
33 #include <linux/kthread.h>
34 #include <linux/sched/task.h>
35 #include <linux/ethtool.h>
36 #include <linux/rtnetlink.h>
37 #include <linux/if_vlan.h>
38 #include <linux/vmalloc.h>
40 #include <xen/events.h>
41 #include <asm/xen/hypercall.h>
42 #include <xen/balloon.h>
44 /* Number of bytes allowed on the internal guest Rx queue. */
45 #define XENVIF_RX_QUEUE_BYTES (XEN_NETIF_RX_RING_SIZE/2 * PAGE_SIZE)
47 /* This function is used to set SKBFL_ZEROCOPY_ENABLE as well as
48 * increasing the inflight counter. We need to increase the inflight
49 * counter because core driver calls into xenvif_zerocopy_callback
50 * which calls xenvif_skb_zerocopy_complete.
52 void xenvif_skb_zerocopy_prepare(struct xenvif_queue
*queue
,
55 skb_shinfo(skb
)->flags
|= SKBFL_ZEROCOPY_ENABLE
;
56 atomic_inc(&queue
->inflight_packets
);
59 void xenvif_skb_zerocopy_complete(struct xenvif_queue
*queue
)
61 atomic_dec(&queue
->inflight_packets
);
63 /* Wake the dealloc thread _after_ decrementing inflight_packets so
64 * that if kthread_stop() has already been called, the dealloc thread
65 * does not wait forever with nothing to wake it.
67 wake_up(&queue
->dealloc_wq
);
70 static int xenvif_schedulable(struct xenvif
*vif
)
72 return netif_running(vif
->dev
) &&
73 test_bit(VIF_STATUS_CONNECTED
, &vif
->status
) &&
77 static bool xenvif_handle_tx_interrupt(struct xenvif_queue
*queue
)
81 rc
= RING_HAS_UNCONSUMED_REQUESTS(&queue
->tx
);
83 napi_schedule(&queue
->napi
);
87 static irqreturn_t
xenvif_tx_interrupt(int irq
, void *dev_id
)
89 struct xenvif_queue
*queue
= dev_id
;
92 old
= atomic_fetch_or(NETBK_TX_EOI
, &queue
->eoi_pending
);
93 WARN(old
& NETBK_TX_EOI
, "Interrupt while EOI pending\n");
95 if (!xenvif_handle_tx_interrupt(queue
)) {
96 atomic_andnot(NETBK_TX_EOI
, &queue
->eoi_pending
);
97 xen_irq_lateeoi(irq
, XEN_EOI_FLAG_SPURIOUS
);
103 static int xenvif_poll(struct napi_struct
*napi
, int budget
)
105 struct xenvif_queue
*queue
=
106 container_of(napi
, struct xenvif_queue
, napi
);
109 /* This vif is rogue, we pretend we've there is nothing to do
110 * for this vif to deschedule it from NAPI. But this interface
111 * will be turned off in thread context later.
113 if (unlikely(queue
->vif
->disabled
)) {
118 work_done
= xenvif_tx_action(queue
, budget
);
120 if (work_done
< budget
) {
121 napi_complete_done(napi
, work_done
);
122 /* If the queue is rate-limited, it shall be
123 * rescheduled in the timer callback.
125 if (likely(!queue
->rate_limited
))
126 xenvif_napi_schedule_or_enable_events(queue
);
132 static bool xenvif_handle_rx_interrupt(struct xenvif_queue
*queue
)
136 rc
= xenvif_have_rx_work(queue
, false);
138 xenvif_kick_thread(queue
);
142 static irqreturn_t
xenvif_rx_interrupt(int irq
, void *dev_id
)
144 struct xenvif_queue
*queue
= dev_id
;
147 old
= atomic_fetch_or(NETBK_RX_EOI
, &queue
->eoi_pending
);
148 WARN(old
& NETBK_RX_EOI
, "Interrupt while EOI pending\n");
150 if (!xenvif_handle_rx_interrupt(queue
)) {
151 atomic_andnot(NETBK_RX_EOI
, &queue
->eoi_pending
);
152 xen_irq_lateeoi(irq
, XEN_EOI_FLAG_SPURIOUS
);
158 irqreturn_t
xenvif_interrupt(int irq
, void *dev_id
)
160 struct xenvif_queue
*queue
= dev_id
;
164 old
= atomic_fetch_or(NETBK_COMMON_EOI
, &queue
->eoi_pending
);
165 WARN(old
, "Interrupt while EOI pending\n");
167 has_tx
= xenvif_handle_tx_interrupt(queue
);
168 has_rx
= xenvif_handle_rx_interrupt(queue
);
170 if (!has_rx
&& !has_tx
) {
171 atomic_andnot(NETBK_COMMON_EOI
, &queue
->eoi_pending
);
172 xen_irq_lateeoi(irq
, XEN_EOI_FLAG_SPURIOUS
);
178 static u16
xenvif_select_queue(struct net_device
*dev
, struct sk_buff
*skb
,
179 struct net_device
*sb_dev
)
181 struct xenvif
*vif
= netdev_priv(dev
);
182 unsigned int size
= vif
->hash
.size
;
183 unsigned int num_queues
;
185 /* If queues are not set up internally - always return 0
186 * as the packet going to be dropped anyway */
187 num_queues
= READ_ONCE(vif
->num_queues
);
191 if (vif
->hash
.alg
== XEN_NETIF_CTRL_HASH_ALGORITHM_NONE
)
192 return netdev_pick_tx(dev
, skb
, NULL
) %
193 dev
->real_num_tx_queues
;
195 xenvif_set_skb_hash(vif
, skb
);
198 return skb_get_hash_raw(skb
) % dev
->real_num_tx_queues
;
200 return vif
->hash
.mapping
[vif
->hash
.mapping_sel
]
201 [skb_get_hash_raw(skb
) % size
];
205 xenvif_start_xmit(struct sk_buff
*skb
, struct net_device
*dev
)
207 struct xenvif
*vif
= netdev_priv(dev
);
208 struct xenvif_queue
*queue
= NULL
;
209 unsigned int num_queues
;
211 struct xenvif_rx_cb
*cb
;
213 BUG_ON(skb
->dev
!= dev
);
215 /* Drop the packet if queues are not set up.
216 * This handler should be called inside an RCU read section
217 * so we don't need to enter it here explicitly.
219 num_queues
= READ_ONCE(vif
->num_queues
);
223 /* Obtain the queue to be used to transmit this packet */
224 index
= skb_get_queue_mapping(skb
);
225 if (index
>= num_queues
) {
226 pr_warn_ratelimited("Invalid queue %hu for packet on interface %s\n",
227 index
, vif
->dev
->name
);
230 queue
= &vif
->queues
[index
];
232 /* Drop the packet if queue is not ready */
233 if (queue
->task
== NULL
||
234 queue
->dealloc_task
== NULL
||
235 !xenvif_schedulable(vif
))
238 if (vif
->multicast_control
&& skb
->pkt_type
== PACKET_MULTICAST
) {
239 struct ethhdr
*eth
= (struct ethhdr
*)skb
->data
;
241 if (!xenvif_mcast_match(vif
, eth
->h_dest
))
245 cb
= XENVIF_RX_CB(skb
);
246 cb
->expires
= jiffies
+ vif
->drain_timeout
;
248 /* If there is no hash algorithm configured then make sure there
249 * is no hash information in the socket buffer otherwise it
250 * would be incorrectly forwarded to the frontend.
252 if (vif
->hash
.alg
== XEN_NETIF_CTRL_HASH_ALGORITHM_NONE
)
255 /* timestamp packet in software */
256 skb_tx_timestamp(skb
);
258 if (!xenvif_rx_queue_tail(queue
, skb
))
261 xenvif_kick_thread(queue
);
266 vif
->dev
->stats
.tx_dropped
++;
267 dev_kfree_skb_any(skb
);
271 static struct net_device_stats
*xenvif_get_stats(struct net_device
*dev
)
273 struct xenvif
*vif
= netdev_priv(dev
);
274 struct xenvif_queue
*queue
= NULL
;
275 unsigned int num_queues
;
283 num_queues
= READ_ONCE(vif
->num_queues
);
285 /* Aggregate tx and rx stats from each queue */
286 for (index
= 0; index
< num_queues
; ++index
) {
287 queue
= &vif
->queues
[index
];
288 rx_bytes
+= queue
->stats
.rx_bytes
;
289 rx_packets
+= queue
->stats
.rx_packets
;
290 tx_bytes
+= queue
->stats
.tx_bytes
;
291 tx_packets
+= queue
->stats
.tx_packets
;
296 vif
->dev
->stats
.rx_bytes
= rx_bytes
;
297 vif
->dev
->stats
.rx_packets
= rx_packets
;
298 vif
->dev
->stats
.tx_bytes
= tx_bytes
;
299 vif
->dev
->stats
.tx_packets
= tx_packets
;
301 return &vif
->dev
->stats
;
304 static void xenvif_up(struct xenvif
*vif
)
306 struct xenvif_queue
*queue
= NULL
;
307 unsigned int num_queues
= vif
->num_queues
;
308 unsigned int queue_index
;
310 for (queue_index
= 0; queue_index
< num_queues
; ++queue_index
) {
311 queue
= &vif
->queues
[queue_index
];
312 napi_enable(&queue
->napi
);
313 enable_irq(queue
->tx_irq
);
314 if (queue
->tx_irq
!= queue
->rx_irq
)
315 enable_irq(queue
->rx_irq
);
316 xenvif_napi_schedule_or_enable_events(queue
);
320 static void xenvif_down(struct xenvif
*vif
)
322 struct xenvif_queue
*queue
= NULL
;
323 unsigned int num_queues
= vif
->num_queues
;
324 unsigned int queue_index
;
326 for (queue_index
= 0; queue_index
< num_queues
; ++queue_index
) {
327 queue
= &vif
->queues
[queue_index
];
328 disable_irq(queue
->tx_irq
);
329 if (queue
->tx_irq
!= queue
->rx_irq
)
330 disable_irq(queue
->rx_irq
);
331 napi_disable(&queue
->napi
);
332 del_timer_sync(&queue
->credit_timeout
);
336 static int xenvif_open(struct net_device
*dev
)
338 struct xenvif
*vif
= netdev_priv(dev
);
339 if (test_bit(VIF_STATUS_CONNECTED
, &vif
->status
))
341 netif_tx_start_all_queues(dev
);
345 static int xenvif_close(struct net_device
*dev
)
347 struct xenvif
*vif
= netdev_priv(dev
);
348 if (test_bit(VIF_STATUS_CONNECTED
, &vif
->status
))
350 netif_tx_stop_all_queues(dev
);
354 static int xenvif_change_mtu(struct net_device
*dev
, int mtu
)
356 struct xenvif
*vif
= netdev_priv(dev
);
357 int max
= vif
->can_sg
? ETH_MAX_MTU
- VLAN_ETH_HLEN
: ETH_DATA_LEN
;
361 WRITE_ONCE(dev
->mtu
, mtu
);
365 static netdev_features_t
xenvif_fix_features(struct net_device
*dev
,
366 netdev_features_t features
)
368 struct xenvif
*vif
= netdev_priv(dev
);
371 features
&= ~NETIF_F_SG
;
372 if (~(vif
->gso_mask
) & GSO_BIT(TCPV4
))
373 features
&= ~NETIF_F_TSO
;
374 if (~(vif
->gso_mask
) & GSO_BIT(TCPV6
))
375 features
&= ~NETIF_F_TSO6
;
377 features
&= ~NETIF_F_IP_CSUM
;
379 features
&= ~NETIF_F_IPV6_CSUM
;
384 static const struct xenvif_stat
{
385 char name
[ETH_GSTRING_LEN
];
389 "rx_gso_checksum_fixup",
390 offsetof(struct xenvif_stats
, rx_gso_checksum_fixup
)
392 /* If (sent != success + fail), there are probably packets never
397 offsetof(struct xenvif_stats
, tx_zerocopy_sent
),
400 "tx_zerocopy_success",
401 offsetof(struct xenvif_stats
, tx_zerocopy_success
),
405 offsetof(struct xenvif_stats
, tx_zerocopy_fail
)
407 /* Number of packets exceeding MAX_SKB_FRAG slots. You should use
408 * a guest with the same MAX_SKB_FRAG
412 offsetof(struct xenvif_stats
, tx_frag_overflow
)
416 static int xenvif_get_sset_count(struct net_device
*dev
, int string_set
)
418 switch (string_set
) {
420 return ARRAY_SIZE(xenvif_stats
);
426 static void xenvif_get_ethtool_stats(struct net_device
*dev
,
427 struct ethtool_stats
*stats
, u64
* data
)
429 struct xenvif
*vif
= netdev_priv(dev
);
430 unsigned int num_queues
;
432 unsigned int queue_index
;
435 num_queues
= READ_ONCE(vif
->num_queues
);
437 for (i
= 0; i
< ARRAY_SIZE(xenvif_stats
); i
++) {
438 unsigned long accum
= 0;
439 for (queue_index
= 0; queue_index
< num_queues
; ++queue_index
) {
440 void *vif_stats
= &vif
->queues
[queue_index
].stats
;
441 accum
+= *(unsigned long *)(vif_stats
+ xenvif_stats
[i
].offset
);
449 static void xenvif_get_strings(struct net_device
*dev
, u32 stringset
, u8
* data
)
455 for (i
= 0; i
< ARRAY_SIZE(xenvif_stats
); i
++)
456 memcpy(data
+ i
* ETH_GSTRING_LEN
,
457 xenvif_stats
[i
].name
, ETH_GSTRING_LEN
);
462 static const struct ethtool_ops xenvif_ethtool_ops
= {
463 .get_link
= ethtool_op_get_link
,
464 .get_ts_info
= ethtool_op_get_ts_info
,
465 .get_sset_count
= xenvif_get_sset_count
,
466 .get_ethtool_stats
= xenvif_get_ethtool_stats
,
467 .get_strings
= xenvif_get_strings
,
470 static const struct net_device_ops xenvif_netdev_ops
= {
471 .ndo_select_queue
= xenvif_select_queue
,
472 .ndo_start_xmit
= xenvif_start_xmit
,
473 .ndo_get_stats
= xenvif_get_stats
,
474 .ndo_open
= xenvif_open
,
475 .ndo_stop
= xenvif_close
,
476 .ndo_change_mtu
= xenvif_change_mtu
,
477 .ndo_fix_features
= xenvif_fix_features
,
478 .ndo_set_mac_address
= eth_mac_addr
,
479 .ndo_validate_addr
= eth_validate_addr
,
482 struct xenvif
*xenvif_alloc(struct device
*parent
, domid_t domid
,
485 static const u8 dummy_addr
[ETH_ALEN
] = {
486 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff,
489 struct net_device
*dev
;
491 char name
[IFNAMSIZ
] = {};
493 snprintf(name
, IFNAMSIZ
- 1, "vif%u.%u", domid
, handle
);
494 /* Allocate a netdev with the max. supported number of queues.
495 * When the guest selects the desired number, it will be updated
496 * via netif_set_real_num_*_queues().
498 dev
= alloc_netdev_mq(sizeof(struct xenvif
), name
, NET_NAME_UNKNOWN
,
499 ether_setup
, xenvif_max_queues
);
501 pr_warn("Could not allocate netdev for %s\n", name
);
502 return ERR_PTR(-ENOMEM
);
505 SET_NETDEV_DEV(dev
, parent
);
507 vif
= netdev_priv(dev
);
510 vif
->handle
= handle
;
514 vif
->disabled
= false;
515 vif
->drain_timeout
= msecs_to_jiffies(rx_drain_timeout_msecs
);
516 vif
->stall_timeout
= msecs_to_jiffies(rx_stall_timeout_msecs
);
518 /* Start out with no queues. */
522 vif
->xdp_headroom
= 0;
524 spin_lock_init(&vif
->lock
);
525 INIT_LIST_HEAD(&vif
->fe_mcast_addr
);
527 dev
->netdev_ops
= &xenvif_netdev_ops
;
528 dev
->hw_features
= NETIF_F_SG
|
529 NETIF_F_IP_CSUM
| NETIF_F_IPV6_CSUM
|
530 NETIF_F_TSO
| NETIF_F_TSO6
| NETIF_F_FRAGLIST
;
531 dev
->features
= dev
->hw_features
| NETIF_F_RXCSUM
;
532 dev
->ethtool_ops
= &xenvif_ethtool_ops
;
534 dev
->min_mtu
= ETH_MIN_MTU
;
535 dev
->max_mtu
= ETH_MAX_MTU
- VLAN_ETH_HLEN
;
538 * Initialise a dummy MAC address. We choose the numerically
539 * largest non-broadcast address to prevent the address getting
540 * stolen by an Ethernet bridge for STP purposes.
541 * (FE:FF:FF:FF:FF:FF)
543 eth_hw_addr_set(dev
, dummy_addr
);
545 netif_carrier_off(dev
);
547 err
= register_netdev(dev
);
549 netdev_warn(dev
, "Could not register device: err=%d\n", err
);
554 netdev_dbg(dev
, "Successfully created xenvif\n");
556 __module_get(THIS_MODULE
);
561 int xenvif_init_queue(struct xenvif_queue
*queue
)
565 queue
->credit_bytes
= queue
->remaining_credit
= ~0UL;
566 queue
->credit_usec
= 0UL;
567 timer_setup(&queue
->credit_timeout
, xenvif_tx_credit_callback
, 0);
568 queue
->credit_window_start
= get_jiffies_64();
570 queue
->rx_queue_max
= XENVIF_RX_QUEUE_BYTES
;
572 skb_queue_head_init(&queue
->rx_queue
);
573 skb_queue_head_init(&queue
->tx_queue
);
575 queue
->pending_cons
= 0;
576 queue
->pending_prod
= MAX_PENDING_REQS
;
577 for (i
= 0; i
< MAX_PENDING_REQS
; ++i
)
578 queue
->pending_ring
[i
] = i
;
580 spin_lock_init(&queue
->callback_lock
);
581 spin_lock_init(&queue
->response_lock
);
583 /* If ballooning is disabled, this will consume real memory, so you
584 * better enable it. The long term solution would be to use just a
585 * bunch of valid page descriptors, without dependency on ballooning
587 err
= gnttab_alloc_pages(MAX_PENDING_REQS
,
590 netdev_err(queue
->vif
->dev
, "Could not reserve mmap_pages\n");
594 for (i
= 0; i
< MAX_PENDING_REQS
; i
++) {
595 queue
->pending_tx_info
[i
].callback_struct
= (struct ubuf_info_msgzc
)
596 { { .ops
= &xenvif_ubuf_ops
},
599 queue
->grant_tx_handle
[i
] = NETBACK_INVALID_HANDLE
;
605 void xenvif_carrier_on(struct xenvif
*vif
)
608 if (!vif
->can_sg
&& vif
->dev
->mtu
> ETH_DATA_LEN
)
609 dev_set_mtu(vif
->dev
, ETH_DATA_LEN
);
610 netdev_update_features(vif
->dev
);
611 set_bit(VIF_STATUS_CONNECTED
, &vif
->status
);
612 if (netif_running(vif
->dev
))
617 int xenvif_connect_ctrl(struct xenvif
*vif
, grant_ref_t ring_ref
,
620 struct net_device
*dev
= vif
->dev
;
621 struct xenbus_device
*xendev
= xenvif_to_xenbus_device(vif
);
623 struct xen_netif_ctrl_sring
*shared
;
624 RING_IDX rsp_prod
, req_prod
;
627 err
= xenbus_map_ring_valloc(xendev
, &ring_ref
, 1, &addr
);
631 shared
= (struct xen_netif_ctrl_sring
*)addr
;
632 rsp_prod
= READ_ONCE(shared
->rsp_prod
);
633 req_prod
= READ_ONCE(shared
->req_prod
);
635 BACK_RING_ATTACH(&vif
->ctrl
, shared
, rsp_prod
, XEN_PAGE_SIZE
);
638 if (req_prod
- rsp_prod
> RING_SIZE(&vif
->ctrl
))
641 err
= bind_interdomain_evtchn_to_irq_lateeoi(xendev
, evtchn
);
647 xenvif_init_hash(vif
);
649 err
= request_threaded_irq(vif
->ctrl_irq
, NULL
, xenvif_ctrl_irq_fn
,
650 IRQF_ONESHOT
, "xen-netback-ctrl", vif
);
652 pr_warn("Could not setup irq handler for %s\n", dev
->name
);
659 xenvif_deinit_hash(vif
);
660 unbind_from_irqhandler(vif
->ctrl_irq
, vif
);
664 xenbus_unmap_ring_vfree(xendev
, vif
->ctrl
.sring
);
665 vif
->ctrl
.sring
= NULL
;
671 static void xenvif_disconnect_queue(struct xenvif_queue
*queue
)
674 kthread_stop_put(queue
->task
);
678 if (queue
->dealloc_task
) {
679 kthread_stop(queue
->dealloc_task
);
680 queue
->dealloc_task
= NULL
;
683 if (queue
->napi
.poll
) {
684 netif_napi_del(&queue
->napi
);
685 queue
->napi
.poll
= NULL
;
689 unbind_from_irqhandler(queue
->tx_irq
, queue
);
690 if (queue
->tx_irq
== queue
->rx_irq
)
696 unbind_from_irqhandler(queue
->rx_irq
, queue
);
700 xenvif_unmap_frontend_data_rings(queue
);
703 int xenvif_connect_data(struct xenvif_queue
*queue
,
704 unsigned long tx_ring_ref
,
705 unsigned long rx_ring_ref
,
706 unsigned int tx_evtchn
,
707 unsigned int rx_evtchn
)
709 struct xenbus_device
*dev
= xenvif_to_xenbus_device(queue
->vif
);
710 struct task_struct
*task
;
713 BUG_ON(queue
->tx_irq
);
715 BUG_ON(queue
->dealloc_task
);
717 err
= xenvif_map_frontend_data_rings(queue
, tx_ring_ref
,
722 init_waitqueue_head(&queue
->wq
);
723 init_waitqueue_head(&queue
->dealloc_wq
);
724 atomic_set(&queue
->inflight_packets
, 0);
726 netif_napi_add(queue
->vif
->dev
, &queue
->napi
, xenvif_poll
);
728 queue
->stalled
= true;
730 task
= kthread_run(xenvif_kthread_guest_rx
, queue
,
731 "%s-guest-rx", queue
->name
);
736 * Take a reference to the task in order to prevent it from being freed
737 * if the thread function returns before kthread_stop is called.
739 get_task_struct(task
);
741 task
= kthread_run(xenvif_dealloc_kthread
, queue
,
742 "%s-dealloc", queue
->name
);
745 queue
->dealloc_task
= task
;
747 if (tx_evtchn
== rx_evtchn
) {
748 /* feature-split-event-channels == 0 */
749 err
= bind_interdomain_evtchn_to_irqhandler_lateeoi(
750 dev
, tx_evtchn
, xenvif_interrupt
, 0,
754 queue
->tx_irq
= queue
->rx_irq
= err
;
755 disable_irq(queue
->tx_irq
);
757 /* feature-split-event-channels == 1 */
758 snprintf(queue
->tx_irq_name
, sizeof(queue
->tx_irq_name
),
759 "%s-tx", queue
->name
);
760 err
= bind_interdomain_evtchn_to_irqhandler_lateeoi(
761 dev
, tx_evtchn
, xenvif_tx_interrupt
, 0,
762 queue
->tx_irq_name
, queue
);
766 disable_irq(queue
->tx_irq
);
768 snprintf(queue
->rx_irq_name
, sizeof(queue
->rx_irq_name
),
769 "%s-rx", queue
->name
);
770 err
= bind_interdomain_evtchn_to_irqhandler_lateeoi(
771 dev
, rx_evtchn
, xenvif_rx_interrupt
, 0,
772 queue
->rx_irq_name
, queue
);
776 disable_irq(queue
->rx_irq
);
782 pr_warn("Could not allocate kthread for %s\n", queue
->name
);
785 xenvif_disconnect_queue(queue
);
789 void xenvif_carrier_off(struct xenvif
*vif
)
791 struct net_device
*dev
= vif
->dev
;
794 if (test_and_clear_bit(VIF_STATUS_CONNECTED
, &vif
->status
)) {
795 netif_carrier_off(dev
); /* discard queued packets */
796 if (netif_running(dev
))
802 void xenvif_disconnect_data(struct xenvif
*vif
)
804 struct xenvif_queue
*queue
= NULL
;
805 unsigned int num_queues
= vif
->num_queues
;
806 unsigned int queue_index
;
808 xenvif_carrier_off(vif
);
810 for (queue_index
= 0; queue_index
< num_queues
; ++queue_index
) {
811 queue
= &vif
->queues
[queue_index
];
813 xenvif_disconnect_queue(queue
);
816 xenvif_mcast_addr_list_free(vif
);
819 void xenvif_disconnect_ctrl(struct xenvif
*vif
)
822 xenvif_deinit_hash(vif
);
823 unbind_from_irqhandler(vif
->ctrl_irq
, vif
);
827 if (vif
->ctrl
.sring
) {
828 xenbus_unmap_ring_vfree(xenvif_to_xenbus_device(vif
),
830 vif
->ctrl
.sring
= NULL
;
834 /* Reverse the relevant parts of xenvif_init_queue().
835 * Used for queue teardown from xenvif_free(), and on the
836 * error handling paths in xenbus.c:connect().
838 void xenvif_deinit_queue(struct xenvif_queue
*queue
)
840 gnttab_free_pages(MAX_PENDING_REQS
, queue
->mmap_pages
);
843 void xenvif_free(struct xenvif
*vif
)
845 struct xenvif_queue
*queues
= vif
->queues
;
846 unsigned int num_queues
= vif
->num_queues
;
847 unsigned int queue_index
;
849 unregister_netdev(vif
->dev
);
850 free_netdev(vif
->dev
);
852 for (queue_index
= 0; queue_index
< num_queues
; ++queue_index
)
853 xenvif_deinit_queue(&queues
[queue_index
]);
856 module_put(THIS_MODULE
);