Linux 4.16.11
[linux/fpc-iii.git] / drivers / net / xen-netback / interface.c
blob78ebe494fef02b8d31505262f8c551e27aee7dc5
1 /*
2 * Network-device interface management.
4 * Copyright (c) 2004-2005, Keir Fraser
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License version 2
8 * as published by the Free Software Foundation; or, when distributed
9 * separately from the Linux kernel or incorporated into other
10 * software packages, subject to the following license:
12 * Permission is hereby granted, free of charge, to any person obtaining a copy
13 * of this source file (the "Software"), to deal in the Software without
14 * restriction, including without limitation the rights to use, copy, modify,
15 * merge, publish, distribute, sublicense, and/or sell copies of the Software,
16 * and to permit persons to whom the Software is furnished to do so, subject to
17 * the following conditions:
19 * The above copyright notice and this permission notice shall be included in
20 * all copies or substantial portions of the Software.
22 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
23 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
24 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
25 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
26 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
27 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
28 * IN THE SOFTWARE.
31 #include "common.h"
33 #include <linux/kthread.h>
34 #include <linux/sched/task.h>
35 #include <linux/ethtool.h>
36 #include <linux/rtnetlink.h>
37 #include <linux/if_vlan.h>
38 #include <linux/vmalloc.h>
40 #include <xen/events.h>
41 #include <asm/xen/hypercall.h>
42 #include <xen/balloon.h>
44 #define XENVIF_QUEUE_LENGTH 32
45 #define XENVIF_NAPI_WEIGHT 64
47 /* Number of bytes allowed on the internal guest Rx queue. */
48 #define XENVIF_RX_QUEUE_BYTES (XEN_NETIF_RX_RING_SIZE/2 * PAGE_SIZE)
50 /* This function is used to set SKBTX_DEV_ZEROCOPY as well as
51 * increasing the inflight counter. We need to increase the inflight
52 * counter because core driver calls into xenvif_zerocopy_callback
53 * which calls xenvif_skb_zerocopy_complete.
55 void xenvif_skb_zerocopy_prepare(struct xenvif_queue *queue,
56 struct sk_buff *skb)
58 skb_shinfo(skb)->tx_flags |= SKBTX_DEV_ZEROCOPY;
59 atomic_inc(&queue->inflight_packets);
62 void xenvif_skb_zerocopy_complete(struct xenvif_queue *queue)
64 atomic_dec(&queue->inflight_packets);
66 /* Wake the dealloc thread _after_ decrementing inflight_packets so
67 * that if kthread_stop() has already been called, the dealloc thread
68 * does not wait forever with nothing to wake it.
70 wake_up(&queue->dealloc_wq);
73 int xenvif_schedulable(struct xenvif *vif)
75 return netif_running(vif->dev) &&
76 test_bit(VIF_STATUS_CONNECTED, &vif->status) &&
77 !vif->disabled;
80 static irqreturn_t xenvif_tx_interrupt(int irq, void *dev_id)
82 struct xenvif_queue *queue = dev_id;
84 if (RING_HAS_UNCONSUMED_REQUESTS(&queue->tx))
85 napi_schedule(&queue->napi);
87 return IRQ_HANDLED;
90 static int xenvif_poll(struct napi_struct *napi, int budget)
92 struct xenvif_queue *queue =
93 container_of(napi, struct xenvif_queue, napi);
94 int work_done;
96 /* This vif is rogue, we pretend we've there is nothing to do
97 * for this vif to deschedule it from NAPI. But this interface
98 * will be turned off in thread context later.
100 if (unlikely(queue->vif->disabled)) {
101 napi_complete(napi);
102 return 0;
105 work_done = xenvif_tx_action(queue, budget);
107 if (work_done < budget) {
108 napi_complete_done(napi, work_done);
109 /* If the queue is rate-limited, it shall be
110 * rescheduled in the timer callback.
112 if (likely(!queue->rate_limited))
113 xenvif_napi_schedule_or_enable_events(queue);
116 return work_done;
119 static irqreturn_t xenvif_rx_interrupt(int irq, void *dev_id)
121 struct xenvif_queue *queue = dev_id;
123 xenvif_kick_thread(queue);
125 return IRQ_HANDLED;
128 irqreturn_t xenvif_interrupt(int irq, void *dev_id)
130 xenvif_tx_interrupt(irq, dev_id);
131 xenvif_rx_interrupt(irq, dev_id);
133 return IRQ_HANDLED;
136 int xenvif_queue_stopped(struct xenvif_queue *queue)
138 struct net_device *dev = queue->vif->dev;
139 unsigned int id = queue->id;
140 return netif_tx_queue_stopped(netdev_get_tx_queue(dev, id));
143 void xenvif_wake_queue(struct xenvif_queue *queue)
145 struct net_device *dev = queue->vif->dev;
146 unsigned int id = queue->id;
147 netif_tx_wake_queue(netdev_get_tx_queue(dev, id));
150 static u16 xenvif_select_queue(struct net_device *dev, struct sk_buff *skb,
151 void *accel_priv,
152 select_queue_fallback_t fallback)
154 struct xenvif *vif = netdev_priv(dev);
155 unsigned int size = vif->hash.size;
157 if (vif->hash.alg == XEN_NETIF_CTRL_HASH_ALGORITHM_NONE)
158 return fallback(dev, skb) % dev->real_num_tx_queues;
160 xenvif_set_skb_hash(vif, skb);
162 if (size == 0)
163 return skb_get_hash_raw(skb) % dev->real_num_tx_queues;
165 return vif->hash.mapping[skb_get_hash_raw(skb) % size];
168 static int xenvif_start_xmit(struct sk_buff *skb, struct net_device *dev)
170 struct xenvif *vif = netdev_priv(dev);
171 struct xenvif_queue *queue = NULL;
172 unsigned int num_queues;
173 u16 index;
174 struct xenvif_rx_cb *cb;
176 BUG_ON(skb->dev != dev);
178 /* Drop the packet if queues are not set up.
179 * This handler should be called inside an RCU read section
180 * so we don't need to enter it here explicitly.
182 num_queues = READ_ONCE(vif->num_queues);
183 if (num_queues < 1)
184 goto drop;
186 /* Obtain the queue to be used to transmit this packet */
187 index = skb_get_queue_mapping(skb);
188 if (index >= num_queues) {
189 pr_warn_ratelimited("Invalid queue %hu for packet on interface %s\n",
190 index, vif->dev->name);
191 index %= num_queues;
193 queue = &vif->queues[index];
195 /* Drop the packet if queue is not ready */
196 if (queue->task == NULL ||
197 queue->dealloc_task == NULL ||
198 !xenvif_schedulable(vif))
199 goto drop;
201 if (vif->multicast_control && skb->pkt_type == PACKET_MULTICAST) {
202 struct ethhdr *eth = (struct ethhdr *)skb->data;
204 if (!xenvif_mcast_match(vif, eth->h_dest))
205 goto drop;
208 cb = XENVIF_RX_CB(skb);
209 cb->expires = jiffies + vif->drain_timeout;
211 /* If there is no hash algorithm configured then make sure there
212 * is no hash information in the socket buffer otherwise it
213 * would be incorrectly forwarded to the frontend.
215 if (vif->hash.alg == XEN_NETIF_CTRL_HASH_ALGORITHM_NONE)
216 skb_clear_hash(skb);
218 xenvif_rx_queue_tail(queue, skb);
219 xenvif_kick_thread(queue);
221 return NETDEV_TX_OK;
223 drop:
224 vif->dev->stats.tx_dropped++;
225 dev_kfree_skb(skb);
226 return NETDEV_TX_OK;
229 static struct net_device_stats *xenvif_get_stats(struct net_device *dev)
231 struct xenvif *vif = netdev_priv(dev);
232 struct xenvif_queue *queue = NULL;
233 unsigned int num_queues;
234 u64 rx_bytes = 0;
235 u64 rx_packets = 0;
236 u64 tx_bytes = 0;
237 u64 tx_packets = 0;
238 unsigned int index;
240 rcu_read_lock();
241 num_queues = READ_ONCE(vif->num_queues);
243 /* Aggregate tx and rx stats from each queue */
244 for (index = 0; index < num_queues; ++index) {
245 queue = &vif->queues[index];
246 rx_bytes += queue->stats.rx_bytes;
247 rx_packets += queue->stats.rx_packets;
248 tx_bytes += queue->stats.tx_bytes;
249 tx_packets += queue->stats.tx_packets;
252 rcu_read_unlock();
254 vif->dev->stats.rx_bytes = rx_bytes;
255 vif->dev->stats.rx_packets = rx_packets;
256 vif->dev->stats.tx_bytes = tx_bytes;
257 vif->dev->stats.tx_packets = tx_packets;
259 return &vif->dev->stats;
262 static void xenvif_up(struct xenvif *vif)
264 struct xenvif_queue *queue = NULL;
265 unsigned int num_queues = vif->num_queues;
266 unsigned int queue_index;
268 for (queue_index = 0; queue_index < num_queues; ++queue_index) {
269 queue = &vif->queues[queue_index];
270 napi_enable(&queue->napi);
271 enable_irq(queue->tx_irq);
272 if (queue->tx_irq != queue->rx_irq)
273 enable_irq(queue->rx_irq);
274 xenvif_napi_schedule_or_enable_events(queue);
278 static void xenvif_down(struct xenvif *vif)
280 struct xenvif_queue *queue = NULL;
281 unsigned int num_queues = vif->num_queues;
282 unsigned int queue_index;
284 for (queue_index = 0; queue_index < num_queues; ++queue_index) {
285 queue = &vif->queues[queue_index];
286 disable_irq(queue->tx_irq);
287 if (queue->tx_irq != queue->rx_irq)
288 disable_irq(queue->rx_irq);
289 napi_disable(&queue->napi);
290 del_timer_sync(&queue->credit_timeout);
294 static int xenvif_open(struct net_device *dev)
296 struct xenvif *vif = netdev_priv(dev);
297 if (test_bit(VIF_STATUS_CONNECTED, &vif->status))
298 xenvif_up(vif);
299 netif_tx_start_all_queues(dev);
300 return 0;
303 static int xenvif_close(struct net_device *dev)
305 struct xenvif *vif = netdev_priv(dev);
306 if (test_bit(VIF_STATUS_CONNECTED, &vif->status))
307 xenvif_down(vif);
308 netif_tx_stop_all_queues(dev);
309 return 0;
312 static int xenvif_change_mtu(struct net_device *dev, int mtu)
314 struct xenvif *vif = netdev_priv(dev);
315 int max = vif->can_sg ? ETH_MAX_MTU - VLAN_ETH_HLEN : ETH_DATA_LEN;
317 if (mtu > max)
318 return -EINVAL;
319 dev->mtu = mtu;
320 return 0;
323 static netdev_features_t xenvif_fix_features(struct net_device *dev,
324 netdev_features_t features)
326 struct xenvif *vif = netdev_priv(dev);
328 if (!vif->can_sg)
329 features &= ~NETIF_F_SG;
330 if (~(vif->gso_mask) & GSO_BIT(TCPV4))
331 features &= ~NETIF_F_TSO;
332 if (~(vif->gso_mask) & GSO_BIT(TCPV6))
333 features &= ~NETIF_F_TSO6;
334 if (!vif->ip_csum)
335 features &= ~NETIF_F_IP_CSUM;
336 if (!vif->ipv6_csum)
337 features &= ~NETIF_F_IPV6_CSUM;
339 return features;
342 static const struct xenvif_stat {
343 char name[ETH_GSTRING_LEN];
344 u16 offset;
345 } xenvif_stats[] = {
347 "rx_gso_checksum_fixup",
348 offsetof(struct xenvif_stats, rx_gso_checksum_fixup)
350 /* If (sent != success + fail), there are probably packets never
351 * freed up properly!
354 "tx_zerocopy_sent",
355 offsetof(struct xenvif_stats, tx_zerocopy_sent),
358 "tx_zerocopy_success",
359 offsetof(struct xenvif_stats, tx_zerocopy_success),
362 "tx_zerocopy_fail",
363 offsetof(struct xenvif_stats, tx_zerocopy_fail)
365 /* Number of packets exceeding MAX_SKB_FRAG slots. You should use
366 * a guest with the same MAX_SKB_FRAG
369 "tx_frag_overflow",
370 offsetof(struct xenvif_stats, tx_frag_overflow)
374 static int xenvif_get_sset_count(struct net_device *dev, int string_set)
376 switch (string_set) {
377 case ETH_SS_STATS:
378 return ARRAY_SIZE(xenvif_stats);
379 default:
380 return -EINVAL;
384 static void xenvif_get_ethtool_stats(struct net_device *dev,
385 struct ethtool_stats *stats, u64 * data)
387 struct xenvif *vif = netdev_priv(dev);
388 unsigned int num_queues;
389 int i;
390 unsigned int queue_index;
392 rcu_read_lock();
393 num_queues = READ_ONCE(vif->num_queues);
395 for (i = 0; i < ARRAY_SIZE(xenvif_stats); i++) {
396 unsigned long accum = 0;
397 for (queue_index = 0; queue_index < num_queues; ++queue_index) {
398 void *vif_stats = &vif->queues[queue_index].stats;
399 accum += *(unsigned long *)(vif_stats + xenvif_stats[i].offset);
401 data[i] = accum;
404 rcu_read_unlock();
407 static void xenvif_get_strings(struct net_device *dev, u32 stringset, u8 * data)
409 int i;
411 switch (stringset) {
412 case ETH_SS_STATS:
413 for (i = 0; i < ARRAY_SIZE(xenvif_stats); i++)
414 memcpy(data + i * ETH_GSTRING_LEN,
415 xenvif_stats[i].name, ETH_GSTRING_LEN);
416 break;
420 static const struct ethtool_ops xenvif_ethtool_ops = {
421 .get_link = ethtool_op_get_link,
423 .get_sset_count = xenvif_get_sset_count,
424 .get_ethtool_stats = xenvif_get_ethtool_stats,
425 .get_strings = xenvif_get_strings,
428 static const struct net_device_ops xenvif_netdev_ops = {
429 .ndo_select_queue = xenvif_select_queue,
430 .ndo_start_xmit = xenvif_start_xmit,
431 .ndo_get_stats = xenvif_get_stats,
432 .ndo_open = xenvif_open,
433 .ndo_stop = xenvif_close,
434 .ndo_change_mtu = xenvif_change_mtu,
435 .ndo_fix_features = xenvif_fix_features,
436 .ndo_set_mac_address = eth_mac_addr,
437 .ndo_validate_addr = eth_validate_addr,
440 struct xenvif *xenvif_alloc(struct device *parent, domid_t domid,
441 unsigned int handle)
443 int err;
444 struct net_device *dev;
445 struct xenvif *vif;
446 char name[IFNAMSIZ] = {};
448 snprintf(name, IFNAMSIZ - 1, "vif%u.%u", domid, handle);
449 /* Allocate a netdev with the max. supported number of queues.
450 * When the guest selects the desired number, it will be updated
451 * via netif_set_real_num_*_queues().
453 dev = alloc_netdev_mq(sizeof(struct xenvif), name, NET_NAME_UNKNOWN,
454 ether_setup, xenvif_max_queues);
455 if (dev == NULL) {
456 pr_warn("Could not allocate netdev for %s\n", name);
457 return ERR_PTR(-ENOMEM);
460 SET_NETDEV_DEV(dev, parent);
462 vif = netdev_priv(dev);
464 vif->domid = domid;
465 vif->handle = handle;
466 vif->can_sg = 1;
467 vif->ip_csum = 1;
468 vif->dev = dev;
469 vif->disabled = false;
470 vif->drain_timeout = msecs_to_jiffies(rx_drain_timeout_msecs);
471 vif->stall_timeout = msecs_to_jiffies(rx_stall_timeout_msecs);
473 /* Start out with no queues. */
474 vif->queues = NULL;
475 vif->num_queues = 0;
477 spin_lock_init(&vif->lock);
478 INIT_LIST_HEAD(&vif->fe_mcast_addr);
480 dev->netdev_ops = &xenvif_netdev_ops;
481 dev->hw_features = NETIF_F_SG |
482 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
483 NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_FRAGLIST;
484 dev->features = dev->hw_features | NETIF_F_RXCSUM;
485 dev->ethtool_ops = &xenvif_ethtool_ops;
487 dev->tx_queue_len = XENVIF_QUEUE_LENGTH;
489 dev->min_mtu = ETH_MIN_MTU;
490 dev->max_mtu = ETH_MAX_MTU - VLAN_ETH_HLEN;
493 * Initialise a dummy MAC address. We choose the numerically
494 * largest non-broadcast address to prevent the address getting
495 * stolen by an Ethernet bridge for STP purposes.
496 * (FE:FF:FF:FF:FF:FF)
498 eth_broadcast_addr(dev->dev_addr);
499 dev->dev_addr[0] &= ~0x01;
501 netif_carrier_off(dev);
503 err = register_netdev(dev);
504 if (err) {
505 netdev_warn(dev, "Could not register device: err=%d\n", err);
506 free_netdev(dev);
507 return ERR_PTR(err);
510 netdev_dbg(dev, "Successfully created xenvif\n");
512 __module_get(THIS_MODULE);
514 return vif;
517 int xenvif_init_queue(struct xenvif_queue *queue)
519 int err, i;
521 queue->credit_bytes = queue->remaining_credit = ~0UL;
522 queue->credit_usec = 0UL;
523 timer_setup(&queue->credit_timeout, xenvif_tx_credit_callback, 0);
524 queue->credit_window_start = get_jiffies_64();
526 queue->rx_queue_max = XENVIF_RX_QUEUE_BYTES;
528 skb_queue_head_init(&queue->rx_queue);
529 skb_queue_head_init(&queue->tx_queue);
531 queue->pending_cons = 0;
532 queue->pending_prod = MAX_PENDING_REQS;
533 for (i = 0; i < MAX_PENDING_REQS; ++i)
534 queue->pending_ring[i] = i;
536 spin_lock_init(&queue->callback_lock);
537 spin_lock_init(&queue->response_lock);
539 /* If ballooning is disabled, this will consume real memory, so you
540 * better enable it. The long term solution would be to use just a
541 * bunch of valid page descriptors, without dependency on ballooning
543 err = gnttab_alloc_pages(MAX_PENDING_REQS,
544 queue->mmap_pages);
545 if (err) {
546 netdev_err(queue->vif->dev, "Could not reserve mmap_pages\n");
547 return -ENOMEM;
550 for (i = 0; i < MAX_PENDING_REQS; i++) {
551 queue->pending_tx_info[i].callback_struct = (struct ubuf_info)
552 { .callback = xenvif_zerocopy_callback,
553 { { .ctx = NULL,
554 .desc = i } } };
555 queue->grant_tx_handle[i] = NETBACK_INVALID_HANDLE;
558 return 0;
561 void xenvif_carrier_on(struct xenvif *vif)
563 rtnl_lock();
564 if (!vif->can_sg && vif->dev->mtu > ETH_DATA_LEN)
565 dev_set_mtu(vif->dev, ETH_DATA_LEN);
566 netdev_update_features(vif->dev);
567 set_bit(VIF_STATUS_CONNECTED, &vif->status);
568 if (netif_running(vif->dev))
569 xenvif_up(vif);
570 rtnl_unlock();
573 int xenvif_connect_ctrl(struct xenvif *vif, grant_ref_t ring_ref,
574 unsigned int evtchn)
576 struct net_device *dev = vif->dev;
577 void *addr;
578 struct xen_netif_ctrl_sring *shared;
579 int err;
581 err = xenbus_map_ring_valloc(xenvif_to_xenbus_device(vif),
582 &ring_ref, 1, &addr);
583 if (err)
584 goto err;
586 shared = (struct xen_netif_ctrl_sring *)addr;
587 BACK_RING_INIT(&vif->ctrl, shared, XEN_PAGE_SIZE);
589 err = bind_interdomain_evtchn_to_irq(vif->domid, evtchn);
590 if (err < 0)
591 goto err_unmap;
593 vif->ctrl_irq = err;
595 xenvif_init_hash(vif);
597 err = request_threaded_irq(vif->ctrl_irq, NULL, xenvif_ctrl_irq_fn,
598 IRQF_ONESHOT, "xen-netback-ctrl", vif);
599 if (err) {
600 pr_warn("Could not setup irq handler for %s\n", dev->name);
601 goto err_deinit;
604 return 0;
606 err_deinit:
607 xenvif_deinit_hash(vif);
608 unbind_from_irqhandler(vif->ctrl_irq, vif);
609 vif->ctrl_irq = 0;
611 err_unmap:
612 xenbus_unmap_ring_vfree(xenvif_to_xenbus_device(vif),
613 vif->ctrl.sring);
614 vif->ctrl.sring = NULL;
616 err:
617 return err;
620 int xenvif_connect_data(struct xenvif_queue *queue,
621 unsigned long tx_ring_ref,
622 unsigned long rx_ring_ref,
623 unsigned int tx_evtchn,
624 unsigned int rx_evtchn)
626 struct task_struct *task;
627 int err = -ENOMEM;
629 BUG_ON(queue->tx_irq);
630 BUG_ON(queue->task);
631 BUG_ON(queue->dealloc_task);
633 err = xenvif_map_frontend_data_rings(queue, tx_ring_ref,
634 rx_ring_ref);
635 if (err < 0)
636 goto err;
638 init_waitqueue_head(&queue->wq);
639 init_waitqueue_head(&queue->dealloc_wq);
640 atomic_set(&queue->inflight_packets, 0);
642 netif_napi_add(queue->vif->dev, &queue->napi, xenvif_poll,
643 XENVIF_NAPI_WEIGHT);
645 if (tx_evtchn == rx_evtchn) {
646 /* feature-split-event-channels == 0 */
647 err = bind_interdomain_evtchn_to_irqhandler(
648 queue->vif->domid, tx_evtchn, xenvif_interrupt, 0,
649 queue->name, queue);
650 if (err < 0)
651 goto err_unmap;
652 queue->tx_irq = queue->rx_irq = err;
653 disable_irq(queue->tx_irq);
654 } else {
655 /* feature-split-event-channels == 1 */
656 snprintf(queue->tx_irq_name, sizeof(queue->tx_irq_name),
657 "%s-tx", queue->name);
658 err = bind_interdomain_evtchn_to_irqhandler(
659 queue->vif->domid, tx_evtchn, xenvif_tx_interrupt, 0,
660 queue->tx_irq_name, queue);
661 if (err < 0)
662 goto err_unmap;
663 queue->tx_irq = err;
664 disable_irq(queue->tx_irq);
666 snprintf(queue->rx_irq_name, sizeof(queue->rx_irq_name),
667 "%s-rx", queue->name);
668 err = bind_interdomain_evtchn_to_irqhandler(
669 queue->vif->domid, rx_evtchn, xenvif_rx_interrupt, 0,
670 queue->rx_irq_name, queue);
671 if (err < 0)
672 goto err_tx_unbind;
673 queue->rx_irq = err;
674 disable_irq(queue->rx_irq);
677 queue->stalled = true;
679 task = kthread_create(xenvif_kthread_guest_rx,
680 (void *)queue, "%s-guest-rx", queue->name);
681 if (IS_ERR(task)) {
682 pr_warn("Could not allocate kthread for %s\n", queue->name);
683 err = PTR_ERR(task);
684 goto err_rx_unbind;
686 queue->task = task;
687 get_task_struct(task);
689 task = kthread_create(xenvif_dealloc_kthread,
690 (void *)queue, "%s-dealloc", queue->name);
691 if (IS_ERR(task)) {
692 pr_warn("Could not allocate kthread for %s\n", queue->name);
693 err = PTR_ERR(task);
694 goto err_rx_unbind;
696 queue->dealloc_task = task;
698 wake_up_process(queue->task);
699 wake_up_process(queue->dealloc_task);
701 return 0;
703 err_rx_unbind:
704 unbind_from_irqhandler(queue->rx_irq, queue);
705 queue->rx_irq = 0;
706 err_tx_unbind:
707 unbind_from_irqhandler(queue->tx_irq, queue);
708 queue->tx_irq = 0;
709 err_unmap:
710 xenvif_unmap_frontend_data_rings(queue);
711 netif_napi_del(&queue->napi);
712 err:
713 module_put(THIS_MODULE);
714 return err;
717 void xenvif_carrier_off(struct xenvif *vif)
719 struct net_device *dev = vif->dev;
721 rtnl_lock();
722 if (test_and_clear_bit(VIF_STATUS_CONNECTED, &vif->status)) {
723 netif_carrier_off(dev); /* discard queued packets */
724 if (netif_running(dev))
725 xenvif_down(vif);
727 rtnl_unlock();
730 void xenvif_disconnect_data(struct xenvif *vif)
732 struct xenvif_queue *queue = NULL;
733 unsigned int num_queues = vif->num_queues;
734 unsigned int queue_index;
736 xenvif_carrier_off(vif);
738 for (queue_index = 0; queue_index < num_queues; ++queue_index) {
739 queue = &vif->queues[queue_index];
741 netif_napi_del(&queue->napi);
743 if (queue->task) {
744 kthread_stop(queue->task);
745 put_task_struct(queue->task);
746 queue->task = NULL;
749 if (queue->dealloc_task) {
750 kthread_stop(queue->dealloc_task);
751 queue->dealloc_task = NULL;
754 if (queue->tx_irq) {
755 if (queue->tx_irq == queue->rx_irq)
756 unbind_from_irqhandler(queue->tx_irq, queue);
757 else {
758 unbind_from_irqhandler(queue->tx_irq, queue);
759 unbind_from_irqhandler(queue->rx_irq, queue);
761 queue->tx_irq = 0;
764 xenvif_unmap_frontend_data_rings(queue);
767 xenvif_mcast_addr_list_free(vif);
770 void xenvif_disconnect_ctrl(struct xenvif *vif)
772 if (vif->ctrl_irq) {
773 xenvif_deinit_hash(vif);
774 unbind_from_irqhandler(vif->ctrl_irq, vif);
775 vif->ctrl_irq = 0;
778 if (vif->ctrl.sring) {
779 xenbus_unmap_ring_vfree(xenvif_to_xenbus_device(vif),
780 vif->ctrl.sring);
781 vif->ctrl.sring = NULL;
785 /* Reverse the relevant parts of xenvif_init_queue().
786 * Used for queue teardown from xenvif_free(), and on the
787 * error handling paths in xenbus.c:connect().
789 void xenvif_deinit_queue(struct xenvif_queue *queue)
791 gnttab_free_pages(MAX_PENDING_REQS, queue->mmap_pages);
794 void xenvif_free(struct xenvif *vif)
796 struct xenvif_queue *queues = vif->queues;
797 unsigned int num_queues = vif->num_queues;
798 unsigned int queue_index;
800 unregister_netdev(vif->dev);
801 free_netdev(vif->dev);
803 for (queue_index = 0; queue_index < num_queues; ++queue_index)
804 xenvif_deinit_queue(&queues[queue_index]);
805 vfree(queues);
807 module_put(THIS_MODULE);