staging: hv: Fix GARP not sent after Quick Migration
[zen-stable.git] / drivers / staging / hv / netvsc_drv.c
blob33973568214fdb44c2547a09f7db2b09970c90e9
1 /*
2 * Copyright (c) 2009, Microsoft Corporation.
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms and conditions of the GNU General Public License,
6 * version 2, as published by the Free Software Foundation.
8 * This program is distributed in the hope it will be useful, but WITHOUT
9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
11 * more details.
13 * You should have received a copy of the GNU General Public License along with
14 * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
15 * Place - Suite 330, Boston, MA 02111-1307 USA.
17 * Authors:
18 * Haiyang Zhang <haiyangz@microsoft.com>
19 * Hank Janssen <hjanssen@microsoft.com>
21 #include <linux/init.h>
22 #include <linux/module.h>
23 #include <linux/highmem.h>
24 #include <linux/device.h>
25 #include <linux/io.h>
26 #include <linux/delay.h>
27 #include <linux/netdevice.h>
28 #include <linux/inetdevice.h>
29 #include <linux/etherdevice.h>
30 #include <linux/skbuff.h>
31 #include <linux/in.h>
32 #include <linux/slab.h>
33 #include <linux/dmi.h>
34 #include <linux/pci.h>
35 #include <net/arp.h>
36 #include <net/route.h>
37 #include <net/sock.h>
38 #include <net/pkt_sched.h>
39 #include "hv_api.h"
40 #include "logging.h"
41 #include "version_info.h"
42 #include "vmbus.h"
43 #include "netvsc_api.h"
45 struct net_device_context {
46 /* point back to our device context */
47 struct hv_device *device_ctx;
48 unsigned long avail;
49 struct work_struct work;
53 #define PACKET_PAGES_LOWATER 8
54 /* Need this many pages to handle worst case fragmented packet */
55 #define PACKET_PAGES_HIWATER (MAX_SKB_FRAGS + 2)
57 static int ring_size = 128;
58 module_param(ring_size, int, S_IRUGO);
59 MODULE_PARM_DESC(ring_size, "Ring buffer size (# of pages)");
61 /* The one and only one */
62 static struct netvsc_driver g_netvsc_drv;
64 /* no-op so the netdev core doesn't return -EINVAL when modifying the the
65 * multicast address list in SIOCADDMULTI. hv is setup to get all multicast
66 * when it calls RndisFilterOnOpen() */
67 static void netvsc_set_multicast_list(struct net_device *net)
71 static int netvsc_open(struct net_device *net)
73 struct net_device_context *net_device_ctx = netdev_priv(net);
74 struct hv_device *device_obj = net_device_ctx->device_ctx;
75 int ret = 0;
77 if (netif_carrier_ok(net)) {
78 /* Open up the device */
79 ret = rndis_filter_open(device_obj);
80 if (ret != 0) {
81 DPRINT_ERR(NETVSC_DRV,
82 "unable to open device (ret %d).", ret);
83 return ret;
86 netif_start_queue(net);
87 } else {
88 DPRINT_ERR(NETVSC_DRV, "unable to open device...link is down.");
91 return ret;
94 static int netvsc_close(struct net_device *net)
96 struct net_device_context *net_device_ctx = netdev_priv(net);
97 struct hv_device *device_obj = net_device_ctx->device_ctx;
98 int ret;
100 netif_stop_queue(net);
102 ret = rndis_filter_close(device_obj);
103 if (ret != 0)
104 DPRINT_ERR(NETVSC_DRV, "unable to close device (ret %d).", ret);
106 return ret;
109 static void netvsc_xmit_completion(void *context)
111 struct hv_netvsc_packet *packet = (struct hv_netvsc_packet *)context;
112 struct sk_buff *skb = (struct sk_buff *)
113 (unsigned long)packet->completion.send.send_completion_tid;
115 kfree(packet);
117 if (skb) {
118 struct net_device *net = skb->dev;
119 struct net_device_context *net_device_ctx = netdev_priv(net);
120 unsigned int num_pages = skb_shinfo(skb)->nr_frags + 2;
122 dev_kfree_skb_any(skb);
124 net_device_ctx->avail += num_pages;
125 if (net_device_ctx->avail >= PACKET_PAGES_HIWATER)
126 netif_wake_queue(net);
130 static int netvsc_start_xmit(struct sk_buff *skb, struct net_device *net)
132 struct net_device_context *net_device_ctx = netdev_priv(net);
133 struct hv_driver *drv =
134 drv_to_hv_drv(net_device_ctx->device_ctx->device.driver);
135 struct netvsc_driver *net_drv_obj = drv->priv;
136 struct hv_netvsc_packet *packet;
137 int ret;
138 unsigned int i, num_pages;
140 DPRINT_DBG(NETVSC_DRV, "xmit packet - len %d data_len %d",
141 skb->len, skb->data_len);
143 /* Add 1 for skb->data and additional one for RNDIS */
144 num_pages = skb_shinfo(skb)->nr_frags + 1 + 1;
145 if (num_pages > net_device_ctx->avail)
146 return NETDEV_TX_BUSY;
148 /* Allocate a netvsc packet based on # of frags. */
149 packet = kzalloc(sizeof(struct hv_netvsc_packet) +
150 (num_pages * sizeof(struct hv_page_buffer)) +
151 net_drv_obj->req_ext_size, GFP_ATOMIC);
152 if (!packet) {
153 /* out of memory, silently drop packet */
154 DPRINT_ERR(NETVSC_DRV, "unable to allocate hv_netvsc_packet");
156 dev_kfree_skb(skb);
157 net->stats.tx_dropped++;
158 return NETDEV_TX_OK;
161 packet->extension = (void *)(unsigned long)packet +
162 sizeof(struct hv_netvsc_packet) +
163 (num_pages * sizeof(struct hv_page_buffer));
165 /* Setup the rndis header */
166 packet->page_buf_cnt = num_pages;
168 /* TODO: Flush all write buffers/ memory fence ??? */
169 /* wmb(); */
171 /* Initialize it from the skb */
172 packet->total_data_buflen = skb->len;
174 /* Start filling in the page buffers starting after RNDIS buffer. */
175 packet->page_buf[1].pfn = virt_to_phys(skb->data) >> PAGE_SHIFT;
176 packet->page_buf[1].offset
177 = (unsigned long)skb->data & (PAGE_SIZE - 1);
178 packet->page_buf[1].len = skb_headlen(skb);
180 /* Additional fragments are after SKB data */
181 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
182 skb_frag_t *f = &skb_shinfo(skb)->frags[i];
184 packet->page_buf[i+2].pfn = page_to_pfn(f->page);
185 packet->page_buf[i+2].offset = f->page_offset;
186 packet->page_buf[i+2].len = f->size;
189 /* Set the completion routine */
190 packet->completion.send.send_completion = netvsc_xmit_completion;
191 packet->completion.send.send_completion_ctx = packet;
192 packet->completion.send.send_completion_tid = (unsigned long)skb;
194 ret = net_drv_obj->send(net_device_ctx->device_ctx,
195 packet);
196 if (ret == 0) {
197 net->stats.tx_bytes += skb->len;
198 net->stats.tx_packets++;
200 DPRINT_DBG(NETVSC_DRV, "# of xmits %lu total size %lu",
201 net->stats.tx_packets,
202 net->stats.tx_bytes);
204 net_device_ctx->avail -= num_pages;
205 if (net_device_ctx->avail < PACKET_PAGES_LOWATER)
206 netif_stop_queue(net);
207 } else {
208 /* we are shutting down or bus overloaded, just drop packet */
209 net->stats.tx_dropped++;
210 netvsc_xmit_completion(packet);
213 return NETDEV_TX_OK;
217 * netvsc_linkstatus_callback - Link up/down notification
219 static void netvsc_linkstatus_callback(struct hv_device *device_obj,
220 unsigned int status)
222 struct net_device *net = dev_get_drvdata(&device_obj->device);
223 struct net_device_context *ndev_ctx;
225 if (!net) {
226 DPRINT_ERR(NETVSC_DRV, "got link status but net device "
227 "not initialized yet");
228 return;
231 if (status == 1) {
232 netif_carrier_on(net);
233 netif_wake_queue(net);
234 netif_notify_peers(net);
235 ndev_ctx = netdev_priv(net);
236 schedule_work(&ndev_ctx->work);
237 } else {
238 netif_carrier_off(net);
239 netif_stop_queue(net);
244 * netvsc_recv_callback - Callback when we receive a packet from the
245 * "wire" on the specified device.
247 static int netvsc_recv_callback(struct hv_device *device_obj,
248 struct hv_netvsc_packet *packet)
250 struct net_device *net = dev_get_drvdata(&device_obj->device);
251 struct sk_buff *skb;
252 void *data;
253 int i;
254 unsigned long flags;
256 if (!net) {
257 DPRINT_ERR(NETVSC_DRV, "got receive callback but net device "
258 "not initialized yet");
259 return 0;
262 /* Allocate a skb - TODO direct I/O to pages? */
263 skb = netdev_alloc_skb_ip_align(net, packet->total_data_buflen);
264 if (unlikely(!skb)) {
265 ++net->stats.rx_dropped;
266 return 0;
269 /* for kmap_atomic */
270 local_irq_save(flags);
273 * Copy to skb. This copy is needed here since the memory pointed by
274 * hv_netvsc_packet cannot be deallocated
276 for (i = 0; i < packet->page_buf_cnt; i++) {
277 data = kmap_atomic(pfn_to_page(packet->page_buf[i].pfn),
278 KM_IRQ1);
279 data = (void *)(unsigned long)data +
280 packet->page_buf[i].offset;
282 memcpy(skb_put(skb, packet->page_buf[i].len), data,
283 packet->page_buf[i].len);
285 kunmap_atomic((void *)((unsigned long)data -
286 packet->page_buf[i].offset), KM_IRQ1);
289 local_irq_restore(flags);
291 skb->protocol = eth_type_trans(skb, net);
292 skb->ip_summed = CHECKSUM_NONE;
294 net->stats.rx_packets++;
295 net->stats.rx_bytes += skb->len;
298 * Pass the skb back up. Network stack will deallocate the skb when it
299 * is done.
300 * TODO - use NAPI?
302 netif_rx(skb);
304 DPRINT_DBG(NETVSC_DRV, "# of recvs %lu total size %lu",
305 net->stats.rx_packets, net->stats.rx_bytes);
307 return 0;
310 static void netvsc_get_drvinfo(struct net_device *net,
311 struct ethtool_drvinfo *info)
313 strcpy(info->driver, "hv_netvsc");
314 strcpy(info->version, HV_DRV_VERSION);
315 strcpy(info->fw_version, "N/A");
318 static const struct ethtool_ops ethtool_ops = {
319 .get_drvinfo = netvsc_get_drvinfo,
320 .get_sg = ethtool_op_get_sg,
321 .set_sg = ethtool_op_set_sg,
322 .get_link = ethtool_op_get_link,
325 static const struct net_device_ops device_ops = {
326 .ndo_open = netvsc_open,
327 .ndo_stop = netvsc_close,
328 .ndo_start_xmit = netvsc_start_xmit,
329 .ndo_set_multicast_list = netvsc_set_multicast_list,
330 .ndo_change_mtu = eth_change_mtu,
331 .ndo_validate_addr = eth_validate_addr,
332 .ndo_set_mac_address = eth_mac_addr,
336 * Send GARP packet to network peers after migrations.
337 * After Quick Migration, the network is not immediately operational in the
338 * current context when receiving RNDIS_STATUS_MEDIA_CONNECT event. So, add
339 * another netif_notify_peers() into a scheduled work, otherwise GARP packet
340 * will not be sent after quick migration, and cause network disconnection.
342 static void netvsc_send_garp(struct work_struct *w)
344 struct net_device_context *ndev_ctx;
345 struct net_device *net;
347 msleep(20);
348 ndev_ctx = container_of(w, struct net_device_context, work);
349 net = dev_get_drvdata(&ndev_ctx->device_ctx->device);
350 netif_notify_peers(net);
354 static int netvsc_probe(struct device *device)
356 struct hv_driver *drv =
357 drv_to_hv_drv(device->driver);
358 struct netvsc_driver *net_drv_obj = drv->priv;
359 struct hv_device *device_obj = device_to_hv_device(device);
360 struct net_device *net = NULL;
361 struct net_device_context *net_device_ctx;
362 struct netvsc_device_info device_info;
363 int ret;
365 if (!net_drv_obj->base.dev_add)
366 return -1;
368 net = alloc_etherdev(sizeof(struct net_device_context));
369 if (!net)
370 return -1;
372 /* Set initial state */
373 netif_carrier_off(net);
375 net_device_ctx = netdev_priv(net);
376 net_device_ctx->device_ctx = device_obj;
377 net_device_ctx->avail = ring_size;
378 dev_set_drvdata(device, net);
379 INIT_WORK(&net_device_ctx->work, netvsc_send_garp);
381 /* Notify the netvsc driver of the new device */
382 ret = net_drv_obj->base.dev_add(device_obj, &device_info);
383 if (ret != 0) {
384 free_netdev(net);
385 dev_set_drvdata(device, NULL);
387 DPRINT_ERR(NETVSC_DRV, "unable to add netvsc device (ret %d)",
388 ret);
389 return ret;
393 * If carrier is still off ie we did not get a link status callback,
394 * update it if necessary
397 * FIXME: We should use a atomic or test/set instead to avoid getting
398 * out of sync with the device's link status
400 if (!netif_carrier_ok(net))
401 if (!device_info.link_state)
402 netif_carrier_on(net);
404 memcpy(net->dev_addr, device_info.mac_adr, ETH_ALEN);
406 net->netdev_ops = &device_ops;
408 /* TODO: Add GSO and Checksum offload */
409 net->features = NETIF_F_SG;
411 SET_ETHTOOL_OPS(net, &ethtool_ops);
412 SET_NETDEV_DEV(net, device);
414 ret = register_netdev(net);
415 if (ret != 0) {
416 /* Remove the device and release the resource */
417 net_drv_obj->base.dev_rm(device_obj);
418 free_netdev(net);
421 return ret;
424 static int netvsc_remove(struct device *device)
426 struct hv_driver *drv =
427 drv_to_hv_drv(device->driver);
428 struct netvsc_driver *net_drv_obj = drv->priv;
429 struct hv_device *device_obj = device_to_hv_device(device);
430 struct net_device *net = dev_get_drvdata(&device_obj->device);
431 int ret;
433 if (net == NULL) {
434 DPRINT_INFO(NETVSC, "no net device to remove");
435 return 0;
438 if (!net_drv_obj->base.dev_rm)
439 return -1;
441 /* Stop outbound asap */
442 netif_stop_queue(net);
443 /* netif_carrier_off(net); */
445 unregister_netdev(net);
448 * Call to the vsc driver to let it know that the device is being
449 * removed
451 ret = net_drv_obj->base.dev_rm(device_obj);
452 if (ret != 0) {
453 /* TODO: */
454 DPRINT_ERR(NETVSC, "unable to remove vsc device (ret %d)", ret);
457 free_netdev(net);
458 return ret;
461 static int netvsc_drv_exit_cb(struct device *dev, void *data)
463 struct device **curr = (struct device **)data;
465 *curr = dev;
466 /* stop iterating */
467 return 1;
470 static void netvsc_drv_exit(void)
472 struct netvsc_driver *netvsc_drv_obj = &g_netvsc_drv;
473 struct hv_driver *drv = &g_netvsc_drv.base;
474 struct device *current_dev;
475 int ret;
477 while (1) {
478 current_dev = NULL;
480 /* Get the device */
481 ret = driver_for_each_device(&drv->driver, NULL,
482 &current_dev, netvsc_drv_exit_cb);
483 if (ret)
484 DPRINT_WARN(NETVSC_DRV,
485 "driver_for_each_device returned %d", ret);
487 if (current_dev == NULL)
488 break;
490 /* Initiate removal from the top-down */
491 DPRINT_INFO(NETVSC_DRV, "unregistering device (%p)...",
492 current_dev);
494 device_unregister(current_dev);
497 if (netvsc_drv_obj->base.cleanup)
498 netvsc_drv_obj->base.cleanup(&netvsc_drv_obj->base);
500 vmbus_child_driver_unregister(&drv->driver);
502 return;
505 static int netvsc_drv_init(int (*drv_init)(struct hv_driver *drv))
507 struct netvsc_driver *net_drv_obj = &g_netvsc_drv;
508 struct hv_driver *drv = &g_netvsc_drv.base;
509 int ret;
511 net_drv_obj->ring_buf_size = ring_size * PAGE_SIZE;
512 net_drv_obj->recv_cb = netvsc_recv_callback;
513 net_drv_obj->link_status_change = netvsc_linkstatus_callback;
514 drv->priv = net_drv_obj;
516 /* Callback to client driver to complete the initialization */
517 drv_init(&net_drv_obj->base);
519 drv->driver.name = net_drv_obj->base.name;
521 drv->driver.probe = netvsc_probe;
522 drv->driver.remove = netvsc_remove;
524 /* The driver belongs to vmbus */
525 ret = vmbus_child_driver_register(&drv->driver);
527 return ret;
530 static const struct dmi_system_id __initconst
531 hv_netvsc_dmi_table[] __maybe_unused = {
533 .ident = "Hyper-V",
534 .matches = {
535 DMI_MATCH(DMI_SYS_VENDOR, "Microsoft Corporation"),
536 DMI_MATCH(DMI_PRODUCT_NAME, "Virtual Machine"),
537 DMI_MATCH(DMI_BOARD_NAME, "Virtual Machine"),
540 { },
542 MODULE_DEVICE_TABLE(dmi, hv_netvsc_dmi_table);
544 static int __init netvsc_init(void)
546 DPRINT_INFO(NETVSC_DRV, "Netvsc initializing....");
548 if (!dmi_check_system(hv_netvsc_dmi_table))
549 return -ENODEV;
551 return netvsc_drv_init(netvsc_initialize);
554 static void __exit netvsc_exit(void)
556 netvsc_drv_exit();
559 static const struct pci_device_id __initconst
560 hv_netvsc_pci_table[] __maybe_unused = {
561 { PCI_DEVICE(0x1414, 0x5353) }, /* VGA compatible controller */
562 { 0 }
564 MODULE_DEVICE_TABLE(pci, hv_netvsc_pci_table);
566 MODULE_LICENSE("GPL");
567 MODULE_VERSION(HV_DRV_VERSION);
568 MODULE_DESCRIPTION("Microsoft Hyper-V network driver");
570 module_init(netvsc_init);
571 module_exit(netvsc_exit);