2 * Copyright (c) 2009, Microsoft Corporation.
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms and conditions of the GNU General Public License,
6 * version 2, as published by the Free Software Foundation.
8 * This program is distributed in the hope it will be useful, but WITHOUT
9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 * You should have received a copy of the GNU General Public License along with
14 * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
15 * Place - Suite 330, Boston, MA 02111-1307 USA.
18 * Haiyang Zhang <haiyangz@microsoft.com>
19 * Hank Janssen <hjanssen@microsoft.com>
21 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
23 #include <linux/init.h>
24 #include <linux/atomic.h>
25 #include <linux/module.h>
26 #include <linux/highmem.h>
27 #include <linux/device.h>
29 #include <linux/delay.h>
30 #include <linux/netdevice.h>
31 #include <linux/inetdevice.h>
32 #include <linux/etherdevice.h>
33 #include <linux/skbuff.h>
35 #include <linux/slab.h>
36 #include <linux/dmi.h>
37 #include <linux/pci.h>
39 #include <net/route.h>
41 #include <net/pkt_sched.h>
44 #include "hyperv_net.h"
46 struct net_device_context
{
47 /* point back to our device context */
48 struct hv_device
*device_ctx
;
50 struct delayed_work dwork
;
54 #define PACKET_PAGES_LOWATER 8
55 /* Need this many pages to handle worst case fragmented packet */
56 #define PACKET_PAGES_HIWATER (MAX_SKB_FRAGS + 2)
58 static int ring_size
= 128;
59 module_param(ring_size
, int, S_IRUGO
);
60 MODULE_PARM_DESC(ring_size
, "Ring buffer size (# of pages)");
62 /* no-op so the netdev core doesn't return -EINVAL when modifying the the
63 * multicast address list in SIOCADDMULTI. hv is setup to get all multicast
64 * when it calls RndisFilterOnOpen() */
65 static void netvsc_set_multicast_list(struct net_device
*net
)
69 static int netvsc_open(struct net_device
*net
)
71 struct net_device_context
*net_device_ctx
= netdev_priv(net
);
72 struct hv_device
*device_obj
= net_device_ctx
->device_ctx
;
75 if (netif_carrier_ok(net
)) {
76 /* Open up the device */
77 ret
= rndis_filter_open(device_obj
);
79 netdev_err(net
, "unable to open device (ret %d).\n",
84 netif_start_queue(net
);
86 netdev_err(net
, "unable to open device...link is down.\n");
92 static int netvsc_close(struct net_device
*net
)
94 struct net_device_context
*net_device_ctx
= netdev_priv(net
);
95 struct hv_device
*device_obj
= net_device_ctx
->device_ctx
;
98 netif_stop_queue(net
);
100 ret
= rndis_filter_close(device_obj
);
102 netdev_err(net
, "unable to close device (ret %d).\n", ret
);
107 static void netvsc_xmit_completion(void *context
)
109 struct hv_netvsc_packet
*packet
= (struct hv_netvsc_packet
*)context
;
110 struct sk_buff
*skb
= (struct sk_buff
*)
111 (unsigned long)packet
->completion
.send
.send_completion_tid
;
116 struct net_device
*net
= skb
->dev
;
117 struct net_device_context
*net_device_ctx
= netdev_priv(net
);
118 unsigned int num_pages
= skb_shinfo(skb
)->nr_frags
+ 2;
120 dev_kfree_skb_any(skb
);
122 atomic_add(num_pages
, &net_device_ctx
->avail
);
123 if (atomic_read(&net_device_ctx
->avail
) >=
124 PACKET_PAGES_HIWATER
)
125 netif_wake_queue(net
);
129 static int netvsc_start_xmit(struct sk_buff
*skb
, struct net_device
*net
)
131 struct net_device_context
*net_device_ctx
= netdev_priv(net
);
132 struct hv_netvsc_packet
*packet
;
134 unsigned int i
, num_pages
;
136 /* Add 1 for skb->data and additional one for RNDIS */
137 num_pages
= skb_shinfo(skb
)->nr_frags
+ 1 + 1;
138 if (num_pages
> atomic_read(&net_device_ctx
->avail
))
139 return NETDEV_TX_BUSY
;
141 /* Allocate a netvsc packet based on # of frags. */
142 packet
= kzalloc(sizeof(struct hv_netvsc_packet
) +
143 (num_pages
* sizeof(struct hv_page_buffer
)) +
144 sizeof(struct rndis_filter_packet
), GFP_ATOMIC
);
146 /* out of memory, silently drop packet */
147 netdev_err(net
, "unable to allocate hv_netvsc_packet\n");
150 net
->stats
.tx_dropped
++;
154 packet
->extension
= (void *)(unsigned long)packet
+
155 sizeof(struct hv_netvsc_packet
) +
156 (num_pages
* sizeof(struct hv_page_buffer
));
158 /* Setup the rndis header */
159 packet
->page_buf_cnt
= num_pages
;
161 /* Initialize it from the skb */
162 packet
->total_data_buflen
= skb
->len
;
164 /* Start filling in the page buffers starting after RNDIS buffer. */
165 packet
->page_buf
[1].pfn
= virt_to_phys(skb
->data
) >> PAGE_SHIFT
;
166 packet
->page_buf
[1].offset
167 = (unsigned long)skb
->data
& (PAGE_SIZE
- 1);
168 packet
->page_buf
[1].len
= skb_headlen(skb
);
170 /* Additional fragments are after SKB data */
171 for (i
= 0; i
< skb_shinfo(skb
)->nr_frags
; i
++) {
172 skb_frag_t
*f
= &skb_shinfo(skb
)->frags
[i
];
174 packet
->page_buf
[i
+2].pfn
= page_to_pfn(f
->page
);
175 packet
->page_buf
[i
+2].offset
= f
->page_offset
;
176 packet
->page_buf
[i
+2].len
= f
->size
;
179 /* Set the completion routine */
180 packet
->completion
.send
.send_completion
= netvsc_xmit_completion
;
181 packet
->completion
.send
.send_completion_ctx
= packet
;
182 packet
->completion
.send
.send_completion_tid
= (unsigned long)skb
;
184 ret
= rndis_filter_send(net_device_ctx
->device_ctx
,
187 net
->stats
.tx_bytes
+= skb
->len
;
188 net
->stats
.tx_packets
++;
190 atomic_sub(num_pages
, &net_device_ctx
->avail
);
191 if (atomic_read(&net_device_ctx
->avail
) < PACKET_PAGES_LOWATER
)
192 netif_stop_queue(net
);
194 /* we are shutting down or bus overloaded, just drop packet */
195 net
->stats
.tx_dropped
++;
196 netvsc_xmit_completion(packet
);
203 * netvsc_linkstatus_callback - Link up/down notification
205 void netvsc_linkstatus_callback(struct hv_device
*device_obj
,
208 struct net_device
*net
= dev_get_drvdata(&device_obj
->device
);
209 struct net_device_context
*ndev_ctx
;
212 netdev_err(net
, "got link status but net device "
213 "not initialized yet\n");
218 netif_carrier_on(net
);
219 netif_wake_queue(net
);
220 netif_notify_peers(net
);
221 ndev_ctx
= netdev_priv(net
);
222 schedule_delayed_work(&ndev_ctx
->dwork
, msecs_to_jiffies(20));
224 netif_carrier_off(net
);
225 netif_stop_queue(net
);
230 * netvsc_recv_callback - Callback when we receive a packet from the
231 * "wire" on the specified device.
233 int netvsc_recv_callback(struct hv_device
*device_obj
,
234 struct hv_netvsc_packet
*packet
)
236 struct net_device
*net
= dev_get_drvdata(&device_obj
->device
);
243 netdev_err(net
, "got receive callback but net device"
244 " not initialized yet\n");
248 /* Allocate a skb - TODO direct I/O to pages? */
249 skb
= netdev_alloc_skb_ip_align(net
, packet
->total_data_buflen
);
250 if (unlikely(!skb
)) {
251 ++net
->stats
.rx_dropped
;
255 /* for kmap_atomic */
256 local_irq_save(flags
);
259 * Copy to skb. This copy is needed here since the memory pointed by
260 * hv_netvsc_packet cannot be deallocated
262 for (i
= 0; i
< packet
->page_buf_cnt
; i
++) {
263 data
= kmap_atomic(pfn_to_page(packet
->page_buf
[i
].pfn
),
265 data
= (void *)(unsigned long)data
+
266 packet
->page_buf
[i
].offset
;
268 memcpy(skb_put(skb
, packet
->page_buf
[i
].len
), data
,
269 packet
->page_buf
[i
].len
);
271 kunmap_atomic((void *)((unsigned long)data
-
272 packet
->page_buf
[i
].offset
), KM_IRQ1
);
275 local_irq_restore(flags
);
277 skb
->protocol
= eth_type_trans(skb
, net
);
278 skb
->ip_summed
= CHECKSUM_NONE
;
280 net
->stats
.rx_packets
++;
281 net
->stats
.rx_bytes
+= skb
->len
;
284 * Pass the skb back up. Network stack will deallocate the skb when it
293 static void netvsc_get_drvinfo(struct net_device
*net
,
294 struct ethtool_drvinfo
*info
)
296 strcpy(info
->driver
, "hv_netvsc");
297 strcpy(info
->version
, HV_DRV_VERSION
);
298 strcpy(info
->fw_version
, "N/A");
301 static const struct ethtool_ops ethtool_ops
= {
302 .get_drvinfo
= netvsc_get_drvinfo
,
303 .get_link
= ethtool_op_get_link
,
306 static const struct net_device_ops device_ops
= {
307 .ndo_open
= netvsc_open
,
308 .ndo_stop
= netvsc_close
,
309 .ndo_start_xmit
= netvsc_start_xmit
,
310 .ndo_set_multicast_list
= netvsc_set_multicast_list
,
311 .ndo_change_mtu
= eth_change_mtu
,
312 .ndo_validate_addr
= eth_validate_addr
,
313 .ndo_set_mac_address
= eth_mac_addr
,
317 * Send GARP packet to network peers after migrations.
318 * After Quick Migration, the network is not immediately operational in the
319 * current context when receiving RNDIS_STATUS_MEDIA_CONNECT event. So, add
320 * another netif_notify_peers() into a delayed work, otherwise GARP packet
321 * will not be sent after quick migration, and cause network disconnection.
323 static void netvsc_send_garp(struct work_struct
*w
)
325 struct net_device_context
*ndev_ctx
;
326 struct net_device
*net
;
328 ndev_ctx
= container_of(w
, struct net_device_context
, dwork
.work
);
329 net
= dev_get_drvdata(&ndev_ctx
->device_ctx
->device
);
330 netif_notify_peers(net
);
334 static int netvsc_probe(struct hv_device
*dev
)
336 struct net_device
*net
= NULL
;
337 struct net_device_context
*net_device_ctx
;
338 struct netvsc_device_info device_info
;
341 net
= alloc_etherdev(sizeof(struct net_device_context
));
345 /* Set initial state */
346 netif_carrier_off(net
);
348 net_device_ctx
= netdev_priv(net
);
349 net_device_ctx
->device_ctx
= dev
;
350 atomic_set(&net_device_ctx
->avail
, ring_size
);
351 dev_set_drvdata(&dev
->device
, net
);
352 INIT_DELAYED_WORK(&net_device_ctx
->dwork
, netvsc_send_garp
);
354 /* Notify the netvsc driver of the new device */
355 device_info
.ring_size
= ring_size
;
356 ret
= rndis_filter_device_add(dev
, &device_info
);
359 dev_set_drvdata(&dev
->device
, NULL
);
361 netdev_err(net
, "unable to add netvsc device (ret %d)\n", ret
);
365 netif_carrier_on(net
);
367 memcpy(net
->dev_addr
, device_info
.mac_adr
, ETH_ALEN
);
369 net
->netdev_ops
= &device_ops
;
371 /* TODO: Add GSO and Checksum offload */
372 net
->hw_features
= NETIF_F_SG
;
373 net
->features
= NETIF_F_SG
;
375 SET_ETHTOOL_OPS(net
, ðtool_ops
);
376 SET_NETDEV_DEV(net
, &dev
->device
);
378 ret
= register_netdev(net
);
380 /* Remove the device and release the resource */
381 rndis_filter_device_remove(dev
);
388 static int netvsc_remove(struct hv_device
*dev
)
390 struct net_device
*net
= dev_get_drvdata(&dev
->device
);
391 struct net_device_context
*ndev_ctx
;
394 dev_err(&dev
->device
, "No net device to remove\n");
398 ndev_ctx
= netdev_priv(net
);
399 cancel_delayed_work_sync(&ndev_ctx
->dwork
);
401 /* Stop outbound asap */
402 netif_stop_queue(net
);
404 unregister_netdev(net
);
407 * Call to the vsc driver to let it know that the device is being
410 rndis_filter_device_remove(dev
);
416 /* The one and only one */
417 static struct hv_driver netvsc_drv
= {
418 .probe
= netvsc_probe
,
419 .remove
= netvsc_remove
,
422 static void __exit
netvsc_drv_exit(void)
424 vmbus_child_driver_unregister(&netvsc_drv
.driver
);
428 static const struct dmi_system_id __initconst
429 hv_netvsc_dmi_table
[] __maybe_unused
= {
433 DMI_MATCH(DMI_SYS_VENDOR
, "Microsoft Corporation"),
434 DMI_MATCH(DMI_PRODUCT_NAME
, "Virtual Machine"),
435 DMI_MATCH(DMI_BOARD_NAME
, "Virtual Machine"),
440 MODULE_DEVICE_TABLE(dmi
, hv_netvsc_dmi_table
);
442 static int __init
netvsc_drv_init(void)
444 struct hv_driver
*drv
= &netvsc_drv
;
447 pr_info("initializing....");
449 if (!dmi_check_system(hv_netvsc_dmi_table
))
453 /* Callback to client driver to complete the initialization */
454 netvsc_initialize(drv
);
456 drv
->driver
.name
= drv
->name
;
458 /* The driver belongs to vmbus */
459 ret
= vmbus_child_driver_register(&drv
->driver
);
464 static const struct pci_device_id __initconst
465 hv_netvsc_pci_table
[] __maybe_unused
= {
466 { PCI_DEVICE(0x1414, 0x5353) }, /* VGA compatible controller */
469 MODULE_DEVICE_TABLE(pci
, hv_netvsc_pci_table
);
471 MODULE_LICENSE("GPL");
472 MODULE_VERSION(HV_DRV_VERSION
);
473 MODULE_DESCRIPTION("Microsoft Hyper-V network driver");
475 module_init(netvsc_drv_init
);
476 module_exit(netvsc_drv_exit
);