2 * Copyright (c) 2009, Microsoft Corporation.
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms and conditions of the GNU General Public License,
6 * version 2, as published by the Free Software Foundation.
8 * This program is distributed in the hope it will be useful, but WITHOUT
9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 * You should have received a copy of the GNU General Public License along with
14 * this program; if not, see <http://www.gnu.org/licenses/>.
17 * Haiyang Zhang <haiyangz@microsoft.com>
18 * Hank Janssen <hjanssen@microsoft.com>
20 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
22 #include <linux/init.h>
23 #include <linux/atomic.h>
24 #include <linux/module.h>
25 #include <linux/highmem.h>
26 #include <linux/device.h>
28 #include <linux/delay.h>
29 #include <linux/netdevice.h>
30 #include <linux/inetdevice.h>
31 #include <linux/etherdevice.h>
32 #include <linux/skbuff.h>
33 #include <linux/if_vlan.h>
35 #include <linux/slab.h>
37 #include <net/route.h>
39 #include <net/pkt_sched.h>
41 #include "hyperv_net.h"
44 #define RING_SIZE_MIN 64
45 #define LINKCHANGE_INT (2 * HZ)
46 #define NETVSC_HW_FEATURES (NETIF_F_RXCSUM | \
51 static int ring_size
= 128;
52 module_param(ring_size
, int, S_IRUGO
);
53 MODULE_PARM_DESC(ring_size
, "Ring buffer size (# of pages)");
55 static int max_num_vrss_chns
= 8;
57 static const u32 default_msg
= NETIF_MSG_DRV
| NETIF_MSG_PROBE
|
58 NETIF_MSG_LINK
| NETIF_MSG_IFUP
|
59 NETIF_MSG_IFDOWN
| NETIF_MSG_RX_ERR
|
62 static int debug
= -1;
63 module_param(debug
, int, S_IRUGO
);
64 MODULE_PARM_DESC(debug
, "Debug level (0=none,...,16=all)");
66 static void do_set_multicast(struct work_struct
*w
)
68 struct net_device_context
*ndevctx
=
69 container_of(w
, struct net_device_context
, work
);
70 struct hv_device
*device_obj
= ndevctx
->device_ctx
;
71 struct net_device
*ndev
= hv_get_drvdata(device_obj
);
72 struct netvsc_device
*nvdev
= ndevctx
->nvdev
;
73 struct rndis_device
*rdev
;
78 rdev
= nvdev
->extension
;
82 if (ndev
->flags
& IFF_PROMISC
)
83 rndis_filter_set_packet_filter(rdev
,
84 NDIS_PACKET_TYPE_PROMISCUOUS
);
86 rndis_filter_set_packet_filter(rdev
,
87 NDIS_PACKET_TYPE_BROADCAST
|
88 NDIS_PACKET_TYPE_ALL_MULTICAST
|
89 NDIS_PACKET_TYPE_DIRECTED
);
92 static void netvsc_set_multicast_list(struct net_device
*net
)
94 struct net_device_context
*net_device_ctx
= netdev_priv(net
);
96 schedule_work(&net_device_ctx
->work
);
99 static int netvsc_open(struct net_device
*net
)
101 struct netvsc_device
*nvdev
= net_device_to_netvsc_device(net
);
102 struct rndis_device
*rdev
;
105 netif_carrier_off(net
);
107 /* Open up the device */
108 ret
= rndis_filter_open(nvdev
);
110 netdev_err(net
, "unable to open device (ret %d).\n", ret
);
114 netif_tx_wake_all_queues(net
);
116 rdev
= nvdev
->extension
;
117 if (!rdev
->link_state
)
118 netif_carrier_on(net
);
123 static int netvsc_close(struct net_device
*net
)
125 struct net_device_context
*net_device_ctx
= netdev_priv(net
);
126 struct netvsc_device
*nvdev
= net_device_ctx
->nvdev
;
128 u32 aread
, awrite
, i
, msec
= 10, retry
= 0, retry_max
= 20;
129 struct vmbus_channel
*chn
;
131 netif_tx_disable(net
);
133 /* Make sure netvsc_set_multicast_list doesn't re-enable filter! */
134 cancel_work_sync(&net_device_ctx
->work
);
135 ret
= rndis_filter_close(nvdev
);
137 netdev_err(net
, "unable to close device (ret %d).\n", ret
);
141 /* Ensure pending bytes in ring are read */
144 for (i
= 0; i
< nvdev
->num_chn
; i
++) {
145 chn
= nvdev
->chn_table
[i
];
149 hv_get_ringbuffer_availbytes(&chn
->inbound
, &aread
,
155 hv_get_ringbuffer_availbytes(&chn
->outbound
, &aread
,
163 if (retry
> retry_max
|| aread
== 0)
173 netdev_err(net
, "Ring buffer not empty after closing rndis\n");
180 static void *init_ppi_data(struct rndis_message
*msg
, u32 ppi_size
,
183 struct rndis_packet
*rndis_pkt
;
184 struct rndis_per_packet_info
*ppi
;
186 rndis_pkt
= &msg
->msg
.pkt
;
187 rndis_pkt
->data_offset
+= ppi_size
;
189 ppi
= (struct rndis_per_packet_info
*)((void *)rndis_pkt
+
190 rndis_pkt
->per_pkt_info_offset
+ rndis_pkt
->per_pkt_info_len
);
192 ppi
->size
= ppi_size
;
193 ppi
->type
= pkt_type
;
194 ppi
->ppi_offset
= sizeof(struct rndis_per_packet_info
);
196 rndis_pkt
->per_pkt_info_len
+= ppi_size
;
201 static u16
netvsc_select_queue(struct net_device
*ndev
, struct sk_buff
*skb
,
202 void *accel_priv
, select_queue_fallback_t fallback
)
204 struct net_device_context
*net_device_ctx
= netdev_priv(ndev
);
205 struct netvsc_device
*nvsc_dev
= net_device_ctx
->nvdev
;
209 if (nvsc_dev
== NULL
|| ndev
->real_num_tx_queues
<= 1)
212 hash
= skb_get_hash(skb
);
213 q_idx
= nvsc_dev
->send_table
[hash
% VRSS_SEND_TAB_SIZE
] %
214 ndev
->real_num_tx_queues
;
216 if (!nvsc_dev
->chn_table
[q_idx
])
222 static u32
fill_pg_buf(struct page
*page
, u32 offset
, u32 len
,
223 struct hv_page_buffer
*pb
)
227 /* Deal with compund pages by ignoring unused part
230 page
+= (offset
>> PAGE_SHIFT
);
231 offset
&= ~PAGE_MASK
;
236 bytes
= PAGE_SIZE
- offset
;
239 pb
[j
].pfn
= page_to_pfn(page
);
240 pb
[j
].offset
= offset
;
246 if (offset
== PAGE_SIZE
&& len
) {
256 static u32
init_page_array(void *hdr
, u32 len
, struct sk_buff
*skb
,
257 struct hv_netvsc_packet
*packet
,
258 struct hv_page_buffer
**page_buf
)
260 struct hv_page_buffer
*pb
= *page_buf
;
262 char *data
= skb
->data
;
263 int frags
= skb_shinfo(skb
)->nr_frags
;
266 /* The packet is laid out thus:
267 * 1. hdr: RNDIS header and PPI
269 * 3. skb fragment data
272 slots_used
+= fill_pg_buf(virt_to_page(hdr
),
274 len
, &pb
[slots_used
]);
276 packet
->rmsg_size
= len
;
277 packet
->rmsg_pgcnt
= slots_used
;
279 slots_used
+= fill_pg_buf(virt_to_page(data
),
280 offset_in_page(data
),
281 skb_headlen(skb
), &pb
[slots_used
]);
283 for (i
= 0; i
< frags
; i
++) {
284 skb_frag_t
*frag
= skb_shinfo(skb
)->frags
+ i
;
286 slots_used
+= fill_pg_buf(skb_frag_page(frag
),
288 skb_frag_size(frag
), &pb
[slots_used
]);
293 static int count_skb_frag_slots(struct sk_buff
*skb
)
295 int i
, frags
= skb_shinfo(skb
)->nr_frags
;
298 for (i
= 0; i
< frags
; i
++) {
299 skb_frag_t
*frag
= skb_shinfo(skb
)->frags
+ i
;
300 unsigned long size
= skb_frag_size(frag
);
301 unsigned long offset
= frag
->page_offset
;
303 /* Skip unused frames from start of page */
304 offset
&= ~PAGE_MASK
;
305 pages
+= PFN_UP(offset
+ size
);
310 static int netvsc_get_slots(struct sk_buff
*skb
)
312 char *data
= skb
->data
;
313 unsigned int offset
= offset_in_page(data
);
314 unsigned int len
= skb_headlen(skb
);
318 slots
= DIV_ROUND_UP(offset
+ len
, PAGE_SIZE
);
319 frag_slots
= count_skb_frag_slots(skb
);
320 return slots
+ frag_slots
;
323 static u32
get_net_transport_info(struct sk_buff
*skb
, u32
*trans_off
)
325 u32 ret_val
= TRANSPORT_INFO_NOT_IP
;
327 if ((eth_hdr(skb
)->h_proto
!= htons(ETH_P_IP
)) &&
328 (eth_hdr(skb
)->h_proto
!= htons(ETH_P_IPV6
))) {
332 *trans_off
= skb_transport_offset(skb
);
334 if ((eth_hdr(skb
)->h_proto
== htons(ETH_P_IP
))) {
335 struct iphdr
*iphdr
= ip_hdr(skb
);
337 if (iphdr
->protocol
== IPPROTO_TCP
)
338 ret_val
= TRANSPORT_INFO_IPV4_TCP
;
339 else if (iphdr
->protocol
== IPPROTO_UDP
)
340 ret_val
= TRANSPORT_INFO_IPV4_UDP
;
342 if (ipv6_hdr(skb
)->nexthdr
== IPPROTO_TCP
)
343 ret_val
= TRANSPORT_INFO_IPV6_TCP
;
344 else if (ipv6_hdr(skb
)->nexthdr
== IPPROTO_UDP
)
345 ret_val
= TRANSPORT_INFO_IPV6_UDP
;
352 static int netvsc_start_xmit(struct sk_buff
*skb
, struct net_device
*net
)
354 struct net_device_context
*net_device_ctx
= netdev_priv(net
);
355 struct hv_netvsc_packet
*packet
= NULL
;
357 unsigned int num_data_pgs
;
358 struct rndis_message
*rndis_msg
;
359 struct rndis_packet
*rndis_pkt
;
363 struct rndis_per_packet_info
*ppi
;
364 struct ndis_tcp_ip_checksum_info
*csum_info
;
365 struct ndis_tcp_lso_info
*lso_info
;
370 struct hv_page_buffer page_buf
[MAX_PAGE_BUFFER_COUNT
];
371 struct hv_page_buffer
*pb
= page_buf
;
372 struct netvsc_stats
*tx_stats
= this_cpu_ptr(net_device_ctx
->tx_stats
);
374 /* We will atmost need two pages to describe the rndis
375 * header. We can only transmit MAX_PAGE_BUFFER_COUNT number
376 * of pages in a single packet. If skb is scattered around
377 * more pages we try linearizing it.
381 skb_length
= skb
->len
;
382 num_data_pgs
= netvsc_get_slots(skb
) + 2;
383 if (num_data_pgs
> MAX_PAGE_BUFFER_COUNT
&& linear
) {
384 net_alert_ratelimited("packet too big: %u pages (%u bytes)\n",
385 num_data_pgs
, skb
->len
);
388 } else if (num_data_pgs
> MAX_PAGE_BUFFER_COUNT
) {
389 if (skb_linearize(skb
)) {
390 net_alert_ratelimited("failed to linearize skb\n");
399 * Place the rndis header in the skb head room and
400 * the skb->cb will be used for hv_netvsc_packet
403 ret
= skb_cow_head(skb
, RNDIS_AND_PPI_SIZE
);
405 netdev_err(net
, "unable to alloc hv_netvsc_packet\n");
409 /* Use the skb control buffer for building up the packet */
410 BUILD_BUG_ON(sizeof(struct hv_netvsc_packet
) >
411 FIELD_SIZEOF(struct sk_buff
, cb
));
412 packet
= (struct hv_netvsc_packet
*)skb
->cb
;
415 packet
->q_idx
= skb_get_queue_mapping(skb
);
417 packet
->total_data_buflen
= skb
->len
;
419 rndis_msg
= (struct rndis_message
*)skb
->head
;
421 memset(rndis_msg
, 0, RNDIS_AND_PPI_SIZE
);
423 isvlan
= skb
->vlan_tci
& VLAN_TAG_PRESENT
;
425 /* Add the rndis header */
426 rndis_msg
->ndis_msg_type
= RNDIS_MSG_PACKET
;
427 rndis_msg
->msg_len
= packet
->total_data_buflen
;
428 rndis_pkt
= &rndis_msg
->msg
.pkt
;
429 rndis_pkt
->data_offset
= sizeof(struct rndis_packet
);
430 rndis_pkt
->data_len
= packet
->total_data_buflen
;
431 rndis_pkt
->per_pkt_info_offset
= sizeof(struct rndis_packet
);
433 rndis_msg_size
= RNDIS_MESSAGE_SIZE(struct rndis_packet
);
435 hash
= skb_get_hash_raw(skb
);
436 if (hash
!= 0 && net
->real_num_tx_queues
> 1) {
437 rndis_msg_size
+= NDIS_HASH_PPI_SIZE
;
438 ppi
= init_ppi_data(rndis_msg
, NDIS_HASH_PPI_SIZE
,
440 *(u32
*)((void *)ppi
+ ppi
->ppi_offset
) = hash
;
444 struct ndis_pkt_8021q_info
*vlan
;
446 rndis_msg_size
+= NDIS_VLAN_PPI_SIZE
;
447 ppi
= init_ppi_data(rndis_msg
, NDIS_VLAN_PPI_SIZE
,
449 vlan
= (struct ndis_pkt_8021q_info
*)((void *)ppi
+
451 vlan
->vlanid
= skb
->vlan_tci
& VLAN_VID_MASK
;
452 vlan
->pri
= (skb
->vlan_tci
& VLAN_PRIO_MASK
) >>
456 net_trans_info
= get_net_transport_info(skb
, &hdr_offset
);
457 if (net_trans_info
== TRANSPORT_INFO_NOT_IP
)
461 * Setup the sendside checksum offload only if this is not a
467 if ((skb
->ip_summed
== CHECKSUM_NONE
) ||
468 (skb
->ip_summed
== CHECKSUM_UNNECESSARY
))
471 rndis_msg_size
+= NDIS_CSUM_PPI_SIZE
;
472 ppi
= init_ppi_data(rndis_msg
, NDIS_CSUM_PPI_SIZE
,
473 TCPIP_CHKSUM_PKTINFO
);
475 csum_info
= (struct ndis_tcp_ip_checksum_info
*)((void *)ppi
+
478 if (net_trans_info
& (INFO_IPV4
<< 16))
479 csum_info
->transmit
.is_ipv4
= 1;
481 csum_info
->transmit
.is_ipv6
= 1;
483 if (net_trans_info
& INFO_TCP
) {
484 csum_info
->transmit
.tcp_checksum
= 1;
485 csum_info
->transmit
.tcp_header_offset
= hdr_offset
;
486 } else if (net_trans_info
& INFO_UDP
) {
487 /* UDP checksum offload is not supported on ws2008r2.
488 * Furthermore, on ws2012 and ws2012r2, there are some
489 * issues with udp checksum offload from Linux guests.
490 * (these are host issues).
491 * For now compute the checksum here.
496 ret
= skb_cow_head(skb
, 0);
501 udp_len
= ntohs(uh
->len
);
503 uh
->check
= csum_tcpudp_magic(ip_hdr(skb
)->saddr
,
505 udp_len
, IPPROTO_UDP
,
506 csum_partial(uh
, udp_len
, 0));
508 uh
->check
= CSUM_MANGLED_0
;
510 csum_info
->transmit
.udp_checksum
= 0;
515 rndis_msg_size
+= NDIS_LSO_PPI_SIZE
;
516 ppi
= init_ppi_data(rndis_msg
, NDIS_LSO_PPI_SIZE
,
517 TCP_LARGESEND_PKTINFO
);
519 lso_info
= (struct ndis_tcp_lso_info
*)((void *)ppi
+
522 lso_info
->lso_v2_transmit
.type
= NDIS_TCP_LARGE_SEND_OFFLOAD_V2_TYPE
;
523 if (net_trans_info
& (INFO_IPV4
<< 16)) {
524 lso_info
->lso_v2_transmit
.ip_version
=
525 NDIS_TCP_LARGE_SEND_OFFLOAD_IPV4
;
526 ip_hdr(skb
)->tot_len
= 0;
527 ip_hdr(skb
)->check
= 0;
528 tcp_hdr(skb
)->check
=
529 ~csum_tcpudp_magic(ip_hdr(skb
)->saddr
,
530 ip_hdr(skb
)->daddr
, 0, IPPROTO_TCP
, 0);
532 lso_info
->lso_v2_transmit
.ip_version
=
533 NDIS_TCP_LARGE_SEND_OFFLOAD_IPV6
;
534 ipv6_hdr(skb
)->payload_len
= 0;
535 tcp_hdr(skb
)->check
=
536 ~csum_ipv6_magic(&ipv6_hdr(skb
)->saddr
,
537 &ipv6_hdr(skb
)->daddr
, 0, IPPROTO_TCP
, 0);
539 lso_info
->lso_v2_transmit
.tcp_header_offset
= hdr_offset
;
540 lso_info
->lso_v2_transmit
.mss
= skb_shinfo(skb
)->gso_size
;
543 /* Start filling in the page buffers with the rndis hdr */
544 rndis_msg
->msg_len
+= rndis_msg_size
;
545 packet
->total_data_buflen
= rndis_msg
->msg_len
;
546 packet
->page_buf_cnt
= init_page_array(rndis_msg
, rndis_msg_size
,
549 /* timestamp packet in software */
550 skb_tx_timestamp(skb
);
551 ret
= netvsc_send(net_device_ctx
->device_ctx
, packet
,
552 rndis_msg
, &pb
, skb
);
556 u64_stats_update_begin(&tx_stats
->syncp
);
558 tx_stats
->bytes
+= skb_length
;
559 u64_stats_update_end(&tx_stats
->syncp
);
561 if (ret
!= -EAGAIN
) {
562 dev_kfree_skb_any(skb
);
563 net
->stats
.tx_dropped
++;
567 return (ret
== -EAGAIN
) ? NETDEV_TX_BUSY
: NETDEV_TX_OK
;
571 * netvsc_linkstatus_callback - Link up/down notification
573 void netvsc_linkstatus_callback(struct hv_device
*device_obj
,
574 struct rndis_message
*resp
)
576 struct rndis_indicate_status
*indicate
= &resp
->msg
.indicate_status
;
577 struct net_device
*net
;
578 struct net_device_context
*ndev_ctx
;
579 struct netvsc_reconfig
*event
;
582 /* Handle link change statuses only */
583 if (indicate
->status
!= RNDIS_STATUS_NETWORK_CHANGE
&&
584 indicate
->status
!= RNDIS_STATUS_MEDIA_CONNECT
&&
585 indicate
->status
!= RNDIS_STATUS_MEDIA_DISCONNECT
)
588 net
= hv_get_drvdata(device_obj
);
590 if (!net
|| net
->reg_state
!= NETREG_REGISTERED
)
593 ndev_ctx
= netdev_priv(net
);
595 event
= kzalloc(sizeof(*event
), GFP_ATOMIC
);
598 event
->event
= indicate
->status
;
600 spin_lock_irqsave(&ndev_ctx
->lock
, flags
);
601 list_add_tail(&event
->list
, &ndev_ctx
->reconfig_events
);
602 spin_unlock_irqrestore(&ndev_ctx
->lock
, flags
);
604 schedule_delayed_work(&ndev_ctx
->dwork
, 0);
608 static struct sk_buff
*netvsc_alloc_recv_skb(struct net_device
*net
,
609 struct hv_netvsc_packet
*packet
,
610 struct ndis_tcp_ip_checksum_info
*csum_info
,
611 void *data
, u16 vlan_tci
)
615 skb
= netdev_alloc_skb_ip_align(net
, packet
->total_data_buflen
);
620 * Copy to skb. This copy is needed here since the memory pointed by
621 * hv_netvsc_packet cannot be deallocated
623 memcpy(skb_put(skb
, packet
->total_data_buflen
), data
,
624 packet
->total_data_buflen
);
626 skb
->protocol
= eth_type_trans(skb
, net
);
628 /* We only look at the IP checksum here.
629 * Should we be dropping the packet if checksum
630 * failed? How do we deal with other checksums - TCP/UDP?
632 if (csum_info
->receive
.ip_checksum_succeeded
)
633 skb
->ip_summed
= CHECKSUM_UNNECESSARY
;
635 skb
->ip_summed
= CHECKSUM_NONE
;
638 if (vlan_tci
& VLAN_TAG_PRESENT
)
639 __vlan_hwaccel_put_tag(skb
, htons(ETH_P_8021Q
),
646 * netvsc_recv_callback - Callback when we receive a packet from the
647 * "wire" on the specified device.
649 int netvsc_recv_callback(struct hv_device
*device_obj
,
650 struct hv_netvsc_packet
*packet
,
652 struct ndis_tcp_ip_checksum_info
*csum_info
,
653 struct vmbus_channel
*channel
,
656 struct net_device
*net
= hv_get_drvdata(device_obj
);
657 struct net_device_context
*net_device_ctx
= netdev_priv(net
);
659 struct sk_buff
*vf_skb
;
660 struct netvsc_stats
*rx_stats
;
661 u32 bytes_recvd
= packet
->total_data_buflen
;
664 if (!net
|| net
->reg_state
!= NETREG_REGISTERED
)
665 return NVSP_STAT_FAIL
;
667 if (READ_ONCE(net_device_ctx
->vf_inject
)) {
668 atomic_inc(&net_device_ctx
->vf_use_cnt
);
669 if (!READ_ONCE(net_device_ctx
->vf_inject
)) {
671 * We raced; just move on.
673 atomic_dec(&net_device_ctx
->vf_use_cnt
);
674 goto vf_injection_done
;
678 * Inject this packet into the VF inerface.
679 * On Hyper-V, multicast and brodcast packets
680 * are only delivered on the synthetic interface
681 * (after subjecting these to policy filters on
682 * the host). Deliver these via the VF interface
685 vf_skb
= netvsc_alloc_recv_skb(net_device_ctx
->vf_netdev
,
686 packet
, csum_info
, *data
,
688 if (vf_skb
!= NULL
) {
689 ++net_device_ctx
->vf_netdev
->stats
.rx_packets
;
690 net_device_ctx
->vf_netdev
->stats
.rx_bytes
+=
692 netif_receive_skb(vf_skb
);
694 ++net
->stats
.rx_dropped
;
695 ret
= NVSP_STAT_FAIL
;
697 atomic_dec(&net_device_ctx
->vf_use_cnt
);
702 rx_stats
= this_cpu_ptr(net_device_ctx
->rx_stats
);
704 /* Allocate a skb - TODO direct I/O to pages? */
705 skb
= netvsc_alloc_recv_skb(net
, packet
, csum_info
, *data
, vlan_tci
);
706 if (unlikely(!skb
)) {
707 ++net
->stats
.rx_dropped
;
708 return NVSP_STAT_FAIL
;
710 skb_record_rx_queue(skb
, channel
->
711 offermsg
.offer
.sub_channel_index
);
713 u64_stats_update_begin(&rx_stats
->syncp
);
715 rx_stats
->bytes
+= packet
->total_data_buflen
;
716 u64_stats_update_end(&rx_stats
->syncp
);
719 * Pass the skb back up. Network stack will deallocate the skb when it
728 static void netvsc_get_drvinfo(struct net_device
*net
,
729 struct ethtool_drvinfo
*info
)
731 strlcpy(info
->driver
, KBUILD_MODNAME
, sizeof(info
->driver
));
732 strlcpy(info
->fw_version
, "N/A", sizeof(info
->fw_version
));
735 static void netvsc_get_channels(struct net_device
*net
,
736 struct ethtool_channels
*channel
)
738 struct net_device_context
*net_device_ctx
= netdev_priv(net
);
739 struct netvsc_device
*nvdev
= net_device_ctx
->nvdev
;
742 channel
->max_combined
= nvdev
->max_chn
;
743 channel
->combined_count
= nvdev
->num_chn
;
747 static int netvsc_set_channels(struct net_device
*net
,
748 struct ethtool_channels
*channels
)
750 struct net_device_context
*net_device_ctx
= netdev_priv(net
);
751 struct hv_device
*dev
= net_device_ctx
->device_ctx
;
752 struct netvsc_device
*nvdev
= net_device_ctx
->nvdev
;
753 struct netvsc_device_info device_info
;
757 bool recovering
= false;
759 if (net_device_ctx
->start_remove
|| !nvdev
|| nvdev
->destroy
)
762 num_chn
= nvdev
->num_chn
;
763 max_chn
= min_t(u32
, nvdev
->max_chn
, num_online_cpus());
765 if (nvdev
->nvsp_version
< NVSP_PROTOCOL_VERSION_5
) {
766 pr_info("vRSS unsupported before NVSP Version 5\n");
770 /* We do not support rx, tx, or other */
772 channels
->rx_count
||
773 channels
->tx_count
||
774 channels
->other_count
||
775 (channels
->combined_count
< 1))
778 if (channels
->combined_count
> max_chn
) {
779 pr_info("combined channels too high, using %d\n", max_chn
);
780 channels
->combined_count
= max_chn
;
783 ret
= netvsc_close(net
);
788 net_device_ctx
->start_remove
= true;
789 rndis_filter_device_remove(dev
);
791 nvdev
->num_chn
= channels
->combined_count
;
793 memset(&device_info
, 0, sizeof(device_info
));
794 device_info
.num_chn
= nvdev
->num_chn
; /* passed to RNDIS */
795 device_info
.ring_size
= ring_size
;
796 device_info
.max_num_vrss_chns
= max_num_vrss_chns
;
798 ret
= rndis_filter_device_add(dev
, &device_info
);
801 netdev_err(net
, "unable to add netvsc device (ret %d)\n", ret
);
807 nvdev
= net_device_ctx
->nvdev
;
809 ret
= netif_set_real_num_tx_queues(net
, nvdev
->num_chn
);
812 netdev_err(net
, "could not set tx queue count (ret %d)\n", ret
);
818 ret
= netif_set_real_num_rx_queues(net
, nvdev
->num_chn
);
821 netdev_err(net
, "could not set rx queue count (ret %d)\n", ret
);
829 net_device_ctx
->start_remove
= false;
830 /* We may have missed link change notifications */
831 schedule_delayed_work(&net_device_ctx
->dwork
, 0);
836 /* If the above failed, we attempt to recover through the same
837 * process but with the original number of channels.
839 netdev_err(net
, "could not set channels, recovering\n");
841 channels
->combined_count
= num_chn
;
845 static bool netvsc_validate_ethtool_ss_cmd(const struct ethtool_cmd
*cmd
)
847 struct ethtool_cmd diff1
= *cmd
;
848 struct ethtool_cmd diff2
= {};
850 ethtool_cmd_speed_set(&diff1
, 0);
852 /* advertising and cmd are usually set */
853 diff1
.advertising
= 0;
855 /* We set port to PORT_OTHER */
856 diff2
.port
= PORT_OTHER
;
858 return !memcmp(&diff1
, &diff2
, sizeof(diff1
));
861 static void netvsc_init_settings(struct net_device
*dev
)
863 struct net_device_context
*ndc
= netdev_priv(dev
);
865 ndc
->speed
= SPEED_UNKNOWN
;
866 ndc
->duplex
= DUPLEX_UNKNOWN
;
869 static int netvsc_get_settings(struct net_device
*dev
, struct ethtool_cmd
*cmd
)
871 struct net_device_context
*ndc
= netdev_priv(dev
);
873 ethtool_cmd_speed_set(cmd
, ndc
->speed
);
874 cmd
->duplex
= ndc
->duplex
;
875 cmd
->port
= PORT_OTHER
;
880 static int netvsc_set_settings(struct net_device
*dev
, struct ethtool_cmd
*cmd
)
882 struct net_device_context
*ndc
= netdev_priv(dev
);
885 speed
= ethtool_cmd_speed(cmd
);
886 if (!ethtool_validate_speed(speed
) ||
887 !ethtool_validate_duplex(cmd
->duplex
) ||
888 !netvsc_validate_ethtool_ss_cmd(cmd
))
892 ndc
->duplex
= cmd
->duplex
;
897 static int netvsc_change_mtu(struct net_device
*ndev
, int mtu
)
899 struct net_device_context
*ndevctx
= netdev_priv(ndev
);
900 struct netvsc_device
*nvdev
= ndevctx
->nvdev
;
901 struct hv_device
*hdev
= ndevctx
->device_ctx
;
902 struct netvsc_device_info device_info
;
903 int limit
= ETH_DATA_LEN
;
907 if (ndevctx
->start_remove
|| !nvdev
|| nvdev
->destroy
)
910 if (nvdev
->nvsp_version
>= NVSP_PROTOCOL_VERSION_2
)
911 limit
= NETVSC_MTU
- ETH_HLEN
;
913 if (mtu
< NETVSC_MTU_MIN
|| mtu
> limit
)
916 ret
= netvsc_close(ndev
);
920 num_chn
= nvdev
->num_chn
;
922 ndevctx
->start_remove
= true;
923 rndis_filter_device_remove(hdev
);
927 memset(&device_info
, 0, sizeof(device_info
));
928 device_info
.ring_size
= ring_size
;
929 device_info
.num_chn
= num_chn
;
930 device_info
.max_num_vrss_chns
= max_num_vrss_chns
;
931 rndis_filter_device_add(hdev
, &device_info
);
935 ndevctx
->start_remove
= false;
937 /* We may have missed link change notifications */
938 schedule_delayed_work(&ndevctx
->dwork
, 0);
943 static struct rtnl_link_stats64
*netvsc_get_stats64(struct net_device
*net
,
944 struct rtnl_link_stats64
*t
)
946 struct net_device_context
*ndev_ctx
= netdev_priv(net
);
949 for_each_possible_cpu(cpu
) {
950 struct netvsc_stats
*tx_stats
= per_cpu_ptr(ndev_ctx
->tx_stats
,
952 struct netvsc_stats
*rx_stats
= per_cpu_ptr(ndev_ctx
->rx_stats
,
954 u64 tx_packets
, tx_bytes
, rx_packets
, rx_bytes
;
958 start
= u64_stats_fetch_begin_irq(&tx_stats
->syncp
);
959 tx_packets
= tx_stats
->packets
;
960 tx_bytes
= tx_stats
->bytes
;
961 } while (u64_stats_fetch_retry_irq(&tx_stats
->syncp
, start
));
964 start
= u64_stats_fetch_begin_irq(&rx_stats
->syncp
);
965 rx_packets
= rx_stats
->packets
;
966 rx_bytes
= rx_stats
->bytes
;
967 } while (u64_stats_fetch_retry_irq(&rx_stats
->syncp
, start
));
969 t
->tx_bytes
+= tx_bytes
;
970 t
->tx_packets
+= tx_packets
;
971 t
->rx_bytes
+= rx_bytes
;
972 t
->rx_packets
+= rx_packets
;
975 t
->tx_dropped
= net
->stats
.tx_dropped
;
976 t
->tx_errors
= net
->stats
.tx_dropped
;
978 t
->rx_dropped
= net
->stats
.rx_dropped
;
979 t
->rx_errors
= net
->stats
.rx_errors
;
984 static int netvsc_set_mac_addr(struct net_device
*ndev
, void *p
)
986 struct sockaddr
*addr
= p
;
987 char save_adr
[ETH_ALEN
];
988 unsigned char save_aatype
;
991 memcpy(save_adr
, ndev
->dev_addr
, ETH_ALEN
);
992 save_aatype
= ndev
->addr_assign_type
;
994 err
= eth_mac_addr(ndev
, p
);
998 err
= rndis_filter_set_device_mac(ndev
, addr
->sa_data
);
1000 /* roll back to saved MAC */
1001 memcpy(ndev
->dev_addr
, save_adr
, ETH_ALEN
);
1002 ndev
->addr_assign_type
= save_aatype
;
1008 #ifdef CONFIG_NET_POLL_CONTROLLER
1009 static void netvsc_poll_controller(struct net_device
*net
)
1011 /* As netvsc_start_xmit() works synchronous we don't have to
1012 * trigger anything here.
1017 static const struct ethtool_ops ethtool_ops
= {
1018 .get_drvinfo
= netvsc_get_drvinfo
,
1019 .get_link
= ethtool_op_get_link
,
1020 .get_channels
= netvsc_get_channels
,
1021 .set_channels
= netvsc_set_channels
,
1022 .get_ts_info
= ethtool_op_get_ts_info
,
1023 .get_settings
= netvsc_get_settings
,
1024 .set_settings
= netvsc_set_settings
,
1027 static const struct net_device_ops device_ops
= {
1028 .ndo_open
= netvsc_open
,
1029 .ndo_stop
= netvsc_close
,
1030 .ndo_start_xmit
= netvsc_start_xmit
,
1031 .ndo_set_rx_mode
= netvsc_set_multicast_list
,
1032 .ndo_change_mtu
= netvsc_change_mtu
,
1033 .ndo_validate_addr
= eth_validate_addr
,
1034 .ndo_set_mac_address
= netvsc_set_mac_addr
,
1035 .ndo_select_queue
= netvsc_select_queue
,
1036 .ndo_get_stats64
= netvsc_get_stats64
,
1037 #ifdef CONFIG_NET_POLL_CONTROLLER
1038 .ndo_poll_controller
= netvsc_poll_controller
,
1043 * Handle link status changes. For RNDIS_STATUS_NETWORK_CHANGE emulate link
1044 * down/up sequence. In case of RNDIS_STATUS_MEDIA_CONNECT when carrier is
1045 * present send GARP packet to network peers with netif_notify_peers().
1047 static void netvsc_link_change(struct work_struct
*w
)
1049 struct net_device_context
*ndev_ctx
=
1050 container_of(w
, struct net_device_context
, dwork
.work
);
1051 struct hv_device
*device_obj
= ndev_ctx
->device_ctx
;
1052 struct net_device
*net
= hv_get_drvdata(device_obj
);
1053 struct netvsc_device
*net_device
;
1054 struct rndis_device
*rdev
;
1055 struct netvsc_reconfig
*event
= NULL
;
1056 bool notify
= false, reschedule
= false;
1057 unsigned long flags
, next_reconfig
, delay
;
1060 if (ndev_ctx
->start_remove
)
1063 net_device
= ndev_ctx
->nvdev
;
1064 rdev
= net_device
->extension
;
1066 next_reconfig
= ndev_ctx
->last_reconfig
+ LINKCHANGE_INT
;
1067 if (time_is_after_jiffies(next_reconfig
)) {
1068 /* link_watch only sends one notification with current state
1069 * per second, avoid doing reconfig more frequently. Handle
1072 delay
= next_reconfig
- jiffies
;
1073 delay
= delay
< LINKCHANGE_INT
? delay
: LINKCHANGE_INT
;
1074 schedule_delayed_work(&ndev_ctx
->dwork
, delay
);
1077 ndev_ctx
->last_reconfig
= jiffies
;
1079 spin_lock_irqsave(&ndev_ctx
->lock
, flags
);
1080 if (!list_empty(&ndev_ctx
->reconfig_events
)) {
1081 event
= list_first_entry(&ndev_ctx
->reconfig_events
,
1082 struct netvsc_reconfig
, list
);
1083 list_del(&event
->list
);
1084 reschedule
= !list_empty(&ndev_ctx
->reconfig_events
);
1086 spin_unlock_irqrestore(&ndev_ctx
->lock
, flags
);
1091 switch (event
->event
) {
1092 /* Only the following events are possible due to the check in
1093 * netvsc_linkstatus_callback()
1095 case RNDIS_STATUS_MEDIA_CONNECT
:
1096 if (rdev
->link_state
) {
1097 rdev
->link_state
= false;
1098 netif_carrier_on(net
);
1099 netif_tx_wake_all_queues(net
);
1105 case RNDIS_STATUS_MEDIA_DISCONNECT
:
1106 if (!rdev
->link_state
) {
1107 rdev
->link_state
= true;
1108 netif_carrier_off(net
);
1109 netif_tx_stop_all_queues(net
);
1113 case RNDIS_STATUS_NETWORK_CHANGE
:
1114 /* Only makes sense if carrier is present */
1115 if (!rdev
->link_state
) {
1116 rdev
->link_state
= true;
1117 netif_carrier_off(net
);
1118 netif_tx_stop_all_queues(net
);
1119 event
->event
= RNDIS_STATUS_MEDIA_CONNECT
;
1120 spin_lock_irqsave(&ndev_ctx
->lock
, flags
);
1121 list_add(&event
->list
, &ndev_ctx
->reconfig_events
);
1122 spin_unlock_irqrestore(&ndev_ctx
->lock
, flags
);
1131 netdev_notify_peers(net
);
1133 /* link_watch only sends one notification with current state per
1134 * second, handle next reconfig event in 2 seconds.
1137 schedule_delayed_work(&ndev_ctx
->dwork
, LINKCHANGE_INT
);
1145 static void netvsc_free_netdev(struct net_device
*netdev
)
1147 struct net_device_context
*net_device_ctx
= netdev_priv(netdev
);
1149 free_percpu(net_device_ctx
->tx_stats
);
1150 free_percpu(net_device_ctx
->rx_stats
);
1151 free_netdev(netdev
);
1154 static struct net_device
*get_netvsc_net_device(char *mac
)
1156 struct net_device
*dev
, *found
= NULL
;
1159 rtnl_locked
= rtnl_trylock();
1161 for_each_netdev(&init_net
, dev
) {
1162 if (memcmp(dev
->dev_addr
, mac
, ETH_ALEN
) == 0) {
1163 if (dev
->netdev_ops
!= &device_ops
)
1175 static int netvsc_register_vf(struct net_device
*vf_netdev
)
1177 struct net_device
*ndev
;
1178 struct net_device_context
*net_device_ctx
;
1179 struct netvsc_device
*netvsc_dev
;
1180 const struct ethtool_ops
*eth_ops
= vf_netdev
->ethtool_ops
;
1182 if (eth_ops
== NULL
|| eth_ops
== ðtool_ops
)
1186 * We will use the MAC address to locate the synthetic interface to
1187 * associate with the VF interface. If we don't find a matching
1188 * synthetic interface, move on.
1190 ndev
= get_netvsc_net_device(vf_netdev
->dev_addr
);
1194 net_device_ctx
= netdev_priv(ndev
);
1195 netvsc_dev
= net_device_ctx
->nvdev
;
1196 if (!netvsc_dev
|| net_device_ctx
->vf_netdev
)
1199 netdev_info(ndev
, "VF registering: %s\n", vf_netdev
->name
);
1201 * Take a reference on the module.
1203 try_module_get(THIS_MODULE
);
1204 net_device_ctx
->vf_netdev
= vf_netdev
;
1208 static void netvsc_inject_enable(struct net_device_context
*net_device_ctx
)
1210 net_device_ctx
->vf_inject
= true;
1213 static void netvsc_inject_disable(struct net_device_context
*net_device_ctx
)
1215 net_device_ctx
->vf_inject
= false;
1217 /* Wait for currently active users to drain out. */
1218 while (atomic_read(&net_device_ctx
->vf_use_cnt
) != 0)
1222 static int netvsc_vf_up(struct net_device
*vf_netdev
)
1224 struct net_device
*ndev
;
1225 struct netvsc_device
*netvsc_dev
;
1226 const struct ethtool_ops
*eth_ops
= vf_netdev
->ethtool_ops
;
1227 struct net_device_context
*net_device_ctx
;
1229 if (eth_ops
== ðtool_ops
)
1232 ndev
= get_netvsc_net_device(vf_netdev
->dev_addr
);
1236 net_device_ctx
= netdev_priv(ndev
);
1237 netvsc_dev
= net_device_ctx
->nvdev
;
1239 if (!netvsc_dev
|| !net_device_ctx
->vf_netdev
)
1242 netdev_info(ndev
, "VF up: %s\n", vf_netdev
->name
);
1243 netvsc_inject_enable(net_device_ctx
);
1246 * Open the device before switching data path.
1248 rndis_filter_open(netvsc_dev
);
1251 * notify the host to switch the data path.
1253 netvsc_switch_datapath(ndev
, true);
1254 netdev_info(ndev
, "Data path switched to VF: %s\n", vf_netdev
->name
);
1256 netif_carrier_off(ndev
);
1258 /* Now notify peers through VF device. */
1259 call_netdevice_notifiers(NETDEV_NOTIFY_PEERS
, vf_netdev
);
1265 static int netvsc_vf_down(struct net_device
*vf_netdev
)
1267 struct net_device
*ndev
;
1268 struct netvsc_device
*netvsc_dev
;
1269 struct net_device_context
*net_device_ctx
;
1270 const struct ethtool_ops
*eth_ops
= vf_netdev
->ethtool_ops
;
1272 if (eth_ops
== ðtool_ops
)
1275 ndev
= get_netvsc_net_device(vf_netdev
->dev_addr
);
1279 net_device_ctx
= netdev_priv(ndev
);
1280 netvsc_dev
= net_device_ctx
->nvdev
;
1282 if (!netvsc_dev
|| !net_device_ctx
->vf_netdev
)
1285 netdev_info(ndev
, "VF down: %s\n", vf_netdev
->name
);
1286 netvsc_inject_disable(net_device_ctx
);
1287 netvsc_switch_datapath(ndev
, false);
1288 netdev_info(ndev
, "Data path switched from VF: %s\n", vf_netdev
->name
);
1289 rndis_filter_close(netvsc_dev
);
1290 netif_carrier_on(ndev
);
1292 /* Now notify peers through netvsc device. */
1293 call_netdevice_notifiers(NETDEV_NOTIFY_PEERS
, ndev
);
1299 static int netvsc_unregister_vf(struct net_device
*vf_netdev
)
1301 struct net_device
*ndev
;
1302 struct netvsc_device
*netvsc_dev
;
1303 const struct ethtool_ops
*eth_ops
= vf_netdev
->ethtool_ops
;
1304 struct net_device_context
*net_device_ctx
;
1306 if (eth_ops
== ðtool_ops
)
1309 ndev
= get_netvsc_net_device(vf_netdev
->dev_addr
);
1313 net_device_ctx
= netdev_priv(ndev
);
1314 netvsc_dev
= net_device_ctx
->nvdev
;
1315 if (!netvsc_dev
|| !net_device_ctx
->vf_netdev
)
1317 netdev_info(ndev
, "VF unregistering: %s\n", vf_netdev
->name
);
1318 netvsc_inject_disable(net_device_ctx
);
1319 net_device_ctx
->vf_netdev
= NULL
;
1320 module_put(THIS_MODULE
);
1324 static int netvsc_probe(struct hv_device
*dev
,
1325 const struct hv_vmbus_device_id
*dev_id
)
1327 struct net_device
*net
= NULL
;
1328 struct net_device_context
*net_device_ctx
;
1329 struct netvsc_device_info device_info
;
1330 struct netvsc_device
*nvdev
;
1333 net
= alloc_etherdev_mq(sizeof(struct net_device_context
),
1338 netif_carrier_off(net
);
1340 net_device_ctx
= netdev_priv(net
);
1341 net_device_ctx
->device_ctx
= dev
;
1342 net_device_ctx
->msg_enable
= netif_msg_init(debug
, default_msg
);
1343 if (netif_msg_probe(net_device_ctx
))
1344 netdev_dbg(net
, "netvsc msg_enable: %d\n",
1345 net_device_ctx
->msg_enable
);
1347 net_device_ctx
->tx_stats
= netdev_alloc_pcpu_stats(struct netvsc_stats
);
1348 if (!net_device_ctx
->tx_stats
) {
1352 net_device_ctx
->rx_stats
= netdev_alloc_pcpu_stats(struct netvsc_stats
);
1353 if (!net_device_ctx
->rx_stats
) {
1354 free_percpu(net_device_ctx
->tx_stats
);
1359 hv_set_drvdata(dev
, net
);
1361 net_device_ctx
->start_remove
= false;
1363 INIT_DELAYED_WORK(&net_device_ctx
->dwork
, netvsc_link_change
);
1364 INIT_WORK(&net_device_ctx
->work
, do_set_multicast
);
1366 spin_lock_init(&net_device_ctx
->lock
);
1367 INIT_LIST_HEAD(&net_device_ctx
->reconfig_events
);
1369 atomic_set(&net_device_ctx
->vf_use_cnt
, 0);
1370 net_device_ctx
->vf_netdev
= NULL
;
1371 net_device_ctx
->vf_inject
= false;
1373 net
->netdev_ops
= &device_ops
;
1375 net
->hw_features
= NETVSC_HW_FEATURES
;
1376 net
->features
= NETVSC_HW_FEATURES
| NETIF_F_HW_VLAN_CTAG_TX
;
1378 net
->ethtool_ops
= ðtool_ops
;
1379 SET_NETDEV_DEV(net
, &dev
->device
);
1381 /* We always need headroom for rndis header */
1382 net
->needed_headroom
= RNDIS_AND_PPI_SIZE
;
1384 /* Notify the netvsc driver of the new device */
1385 memset(&device_info
, 0, sizeof(device_info
));
1386 device_info
.ring_size
= ring_size
;
1387 device_info
.max_num_vrss_chns
= max_num_vrss_chns
;
1388 ret
= rndis_filter_device_add(dev
, &device_info
);
1390 netdev_err(net
, "unable to add netvsc device (ret %d)\n", ret
);
1391 netvsc_free_netdev(net
);
1392 hv_set_drvdata(dev
, NULL
);
1395 memcpy(net
->dev_addr
, device_info
.mac_adr
, ETH_ALEN
);
1397 nvdev
= net_device_ctx
->nvdev
;
1398 netif_set_real_num_tx_queues(net
, nvdev
->num_chn
);
1399 netif_set_real_num_rx_queues(net
, nvdev
->num_chn
);
1401 netvsc_init_settings(net
);
1403 ret
= register_netdev(net
);
1405 pr_err("Unable to register netdev.\n");
1406 rndis_filter_device_remove(dev
);
1407 netvsc_free_netdev(net
);
1413 static int netvsc_remove(struct hv_device
*dev
)
1415 struct net_device
*net
;
1416 struct net_device_context
*ndev_ctx
;
1417 struct netvsc_device
*net_device
;
1419 net
= hv_get_drvdata(dev
);
1422 dev_err(&dev
->device
, "No net device to remove\n");
1427 ndev_ctx
= netdev_priv(net
);
1428 net_device
= ndev_ctx
->nvdev
;
1430 /* Avoid racing with netvsc_change_mtu()/netvsc_set_channels()
1431 * removing the device.
1434 ndev_ctx
->start_remove
= true;
1437 cancel_delayed_work_sync(&ndev_ctx
->dwork
);
1438 cancel_work_sync(&ndev_ctx
->work
);
1440 /* Stop outbound asap */
1441 netif_tx_disable(net
);
1443 unregister_netdev(net
);
1446 * Call to the vsc driver to let it know that the device is being
1449 rndis_filter_device_remove(dev
);
1451 hv_set_drvdata(dev
, NULL
);
1453 netvsc_free_netdev(net
);
1457 static const struct hv_vmbus_device_id id_table
[] = {
1463 MODULE_DEVICE_TABLE(vmbus
, id_table
);
1465 /* The one and only one */
1466 static struct hv_driver netvsc_drv
= {
1467 .name
= KBUILD_MODNAME
,
1468 .id_table
= id_table
,
1469 .probe
= netvsc_probe
,
1470 .remove
= netvsc_remove
,
1475 * On Hyper-V, every VF interface is matched with a corresponding
1476 * synthetic interface. The synthetic interface is presented first
1477 * to the guest. When the corresponding VF instance is registered,
1478 * we will take care of switching the data path.
1480 static int netvsc_netdev_event(struct notifier_block
*this,
1481 unsigned long event
, void *ptr
)
1483 struct net_device
*event_dev
= netdev_notifier_info_to_dev(ptr
);
1485 /* Avoid Vlan dev with same MAC registering as VF */
1486 if (event_dev
->priv_flags
& IFF_802_1Q_VLAN
)
1489 /* Avoid Bonding master dev with same MAC registering as VF */
1490 if (event_dev
->priv_flags
& IFF_BONDING
&&
1491 event_dev
->flags
& IFF_MASTER
)
1495 case NETDEV_REGISTER
:
1496 return netvsc_register_vf(event_dev
);
1497 case NETDEV_UNREGISTER
:
1498 return netvsc_unregister_vf(event_dev
);
1500 return netvsc_vf_up(event_dev
);
1502 return netvsc_vf_down(event_dev
);
1508 static struct notifier_block netvsc_netdev_notifier
= {
1509 .notifier_call
= netvsc_netdev_event
,
1512 static void __exit
netvsc_drv_exit(void)
1514 unregister_netdevice_notifier(&netvsc_netdev_notifier
);
1515 vmbus_driver_unregister(&netvsc_drv
);
1518 static int __init
netvsc_drv_init(void)
1522 if (ring_size
< RING_SIZE_MIN
) {
1523 ring_size
= RING_SIZE_MIN
;
1524 pr_info("Increased ring_size to %d (min allowed)\n",
1527 ret
= vmbus_driver_register(&netvsc_drv
);
1532 register_netdevice_notifier(&netvsc_netdev_notifier
);
1536 MODULE_LICENSE("GPL");
1537 MODULE_DESCRIPTION("Microsoft Hyper-V network driver");
1539 module_init(netvsc_drv_init
);
1540 module_exit(netvsc_drv_exit
);