2 * Copyright (c) 2009, Microsoft Corporation.
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms and conditions of the GNU General Public License,
6 * version 2, as published by the Free Software Foundation.
8 * This program is distributed in the hope it will be useful, but WITHOUT
9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 * You should have received a copy of the GNU General Public License along with
14 * this program; if not, see <http://www.gnu.org/licenses/>.
17 * Haiyang Zhang <haiyangz@microsoft.com>
18 * Hank Janssen <hjanssen@microsoft.com>
20 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
22 #include <linux/init.h>
23 #include <linux/atomic.h>
24 #include <linux/module.h>
25 #include <linux/highmem.h>
26 #include <linux/device.h>
28 #include <linux/delay.h>
29 #include <linux/netdevice.h>
30 #include <linux/inetdevice.h>
31 #include <linux/etherdevice.h>
32 #include <linux/pci.h>
33 #include <linux/skbuff.h>
34 #include <linux/if_vlan.h>
36 #include <linux/slab.h>
37 #include <linux/rtnetlink.h>
38 #include <linux/netpoll.h>
41 #include <net/route.h>
43 #include <net/pkt_sched.h>
44 #include <net/checksum.h>
45 #include <net/ip6_checksum.h>
47 #include "hyperv_net.h"
49 #define RING_SIZE_MIN 64
50 #define RETRY_US_LO 5000
51 #define RETRY_US_HI 10000
52 #define RETRY_MAX 2000 /* >10 sec */
54 #define LINKCHANGE_INT (2 * HZ)
55 #define VF_TAKEOVER_INT (HZ / 10)
57 static unsigned int ring_size __ro_after_init
= 128;
58 module_param(ring_size
, uint
, 0444);
59 MODULE_PARM_DESC(ring_size
, "Ring buffer size (# of pages)");
60 unsigned int netvsc_ring_bytes __ro_after_init
;
62 static const u32 default_msg
= NETIF_MSG_DRV
| NETIF_MSG_PROBE
|
63 NETIF_MSG_LINK
| NETIF_MSG_IFUP
|
64 NETIF_MSG_IFDOWN
| NETIF_MSG_RX_ERR
|
67 static int debug
= -1;
68 module_param(debug
, int, 0444);
69 MODULE_PARM_DESC(debug
, "Debug level (0=none,...,16=all)");
71 static LIST_HEAD(netvsc_dev_list
);
73 static void netvsc_change_rx_flags(struct net_device
*net
, int change
)
75 struct net_device_context
*ndev_ctx
= netdev_priv(net
);
76 struct net_device
*vf_netdev
= rtnl_dereference(ndev_ctx
->vf_netdev
);
82 if (change
& IFF_PROMISC
) {
83 inc
= (net
->flags
& IFF_PROMISC
) ? 1 : -1;
84 dev_set_promiscuity(vf_netdev
, inc
);
87 if (change
& IFF_ALLMULTI
) {
88 inc
= (net
->flags
& IFF_ALLMULTI
) ? 1 : -1;
89 dev_set_allmulti(vf_netdev
, inc
);
93 static void netvsc_set_rx_mode(struct net_device
*net
)
95 struct net_device_context
*ndev_ctx
= netdev_priv(net
);
96 struct net_device
*vf_netdev
;
97 struct netvsc_device
*nvdev
;
100 vf_netdev
= rcu_dereference(ndev_ctx
->vf_netdev
);
102 dev_uc_sync(vf_netdev
, net
);
103 dev_mc_sync(vf_netdev
, net
);
106 nvdev
= rcu_dereference(ndev_ctx
->nvdev
);
108 rndis_filter_update(nvdev
);
112 static void netvsc_tx_enable(struct netvsc_device
*nvscdev
,
113 struct net_device
*ndev
)
115 nvscdev
->tx_disable
= false;
116 virt_wmb(); /* ensure queue wake up mechanism is on */
118 netif_tx_wake_all_queues(ndev
);
121 static int netvsc_open(struct net_device
*net
)
123 struct net_device_context
*ndev_ctx
= netdev_priv(net
);
124 struct net_device
*vf_netdev
= rtnl_dereference(ndev_ctx
->vf_netdev
);
125 struct netvsc_device
*nvdev
= rtnl_dereference(ndev_ctx
->nvdev
);
126 struct rndis_device
*rdev
;
129 netif_carrier_off(net
);
131 /* Open up the device */
132 ret
= rndis_filter_open(nvdev
);
134 netdev_err(net
, "unable to open device (ret %d).\n", ret
);
138 rdev
= nvdev
->extension
;
139 if (!rdev
->link_state
) {
140 netif_carrier_on(net
);
141 netvsc_tx_enable(nvdev
, net
);
145 /* Setting synthetic device up transparently sets
146 * slave as up. If open fails, then slave will be
147 * still be offline (and not used).
149 ret
= dev_open(vf_netdev
);
152 "unable to open slave: %s: %d\n",
153 vf_netdev
->name
, ret
);
158 static int netvsc_wait_until_empty(struct netvsc_device
*nvdev
)
160 unsigned int retry
= 0;
163 /* Ensure pending bytes in ring are read */
167 for (i
= 0; i
< nvdev
->num_chn
; i
++) {
168 struct vmbus_channel
*chn
169 = nvdev
->chan_table
[i
].channel
;
174 /* make sure receive not running now */
175 napi_synchronize(&nvdev
->chan_table
[i
].napi
);
177 aread
= hv_get_bytes_to_read(&chn
->inbound
);
181 aread
= hv_get_bytes_to_read(&chn
->outbound
);
189 if (++retry
> RETRY_MAX
)
192 usleep_range(RETRY_US_LO
, RETRY_US_HI
);
196 static void netvsc_tx_disable(struct netvsc_device
*nvscdev
,
197 struct net_device
*ndev
)
200 nvscdev
->tx_disable
= true;
201 virt_wmb(); /* ensure txq will not wake up after stop */
204 netif_tx_disable(ndev
);
207 static int netvsc_close(struct net_device
*net
)
209 struct net_device_context
*net_device_ctx
= netdev_priv(net
);
210 struct net_device
*vf_netdev
211 = rtnl_dereference(net_device_ctx
->vf_netdev
);
212 struct netvsc_device
*nvdev
= rtnl_dereference(net_device_ctx
->nvdev
);
215 netvsc_tx_disable(nvdev
, net
);
217 /* No need to close rndis filter if it is removed already */
221 ret
= rndis_filter_close(nvdev
);
223 netdev_err(net
, "unable to close device (ret %d).\n", ret
);
227 ret
= netvsc_wait_until_empty(nvdev
);
229 netdev_err(net
, "Ring buffer not empty after closing rndis\n");
232 dev_close(vf_netdev
);
237 static inline void *init_ppi_data(struct rndis_message
*msg
,
238 u32 ppi_size
, u32 pkt_type
)
240 struct rndis_packet
*rndis_pkt
= &msg
->msg
.pkt
;
241 struct rndis_per_packet_info
*ppi
;
243 rndis_pkt
->data_offset
+= ppi_size
;
244 ppi
= (void *)rndis_pkt
+ rndis_pkt
->per_pkt_info_offset
245 + rndis_pkt
->per_pkt_info_len
;
247 ppi
->size
= ppi_size
;
248 ppi
->type
= pkt_type
;
249 ppi
->ppi_offset
= sizeof(struct rndis_per_packet_info
);
251 rndis_pkt
->per_pkt_info_len
+= ppi_size
;
256 /* Azure hosts don't support non-TCP port numbers in hashing for fragmented
257 * packets. We can use ethtool to change UDP hash level when necessary.
259 static inline u32
netvsc_get_hash(
261 const struct net_device_context
*ndc
)
263 struct flow_keys flow
;
264 u32 hash
, pkt_proto
= 0;
265 static u32 hashrnd __read_mostly
;
267 net_get_random_once(&hashrnd
, sizeof(hashrnd
));
269 if (!skb_flow_dissect_flow_keys(skb
, &flow
, 0))
272 switch (flow
.basic
.ip_proto
) {
274 if (flow
.basic
.n_proto
== htons(ETH_P_IP
))
275 pkt_proto
= HV_TCP4_L4HASH
;
276 else if (flow
.basic
.n_proto
== htons(ETH_P_IPV6
))
277 pkt_proto
= HV_TCP6_L4HASH
;
282 if (flow
.basic
.n_proto
== htons(ETH_P_IP
))
283 pkt_proto
= HV_UDP4_L4HASH
;
284 else if (flow
.basic
.n_proto
== htons(ETH_P_IPV6
))
285 pkt_proto
= HV_UDP6_L4HASH
;
290 if (pkt_proto
& ndc
->l4_hash
) {
291 return skb_get_hash(skb
);
293 if (flow
.basic
.n_proto
== htons(ETH_P_IP
))
294 hash
= jhash2((u32
*)&flow
.addrs
.v4addrs
, 2, hashrnd
);
295 else if (flow
.basic
.n_proto
== htons(ETH_P_IPV6
))
296 hash
= jhash2((u32
*)&flow
.addrs
.v6addrs
, 8, hashrnd
);
300 __skb_set_sw_hash(skb
, hash
, false);
306 static inline int netvsc_get_tx_queue(struct net_device
*ndev
,
307 struct sk_buff
*skb
, int old_idx
)
309 const struct net_device_context
*ndc
= netdev_priv(ndev
);
310 struct sock
*sk
= skb
->sk
;
313 q_idx
= ndc
->tx_table
[netvsc_get_hash(skb
, ndc
) &
314 (VRSS_SEND_TAB_SIZE
- 1)];
316 /* If queue index changed record the new value */
317 if (q_idx
!= old_idx
&&
318 sk
&& sk_fullsock(sk
) && rcu_access_pointer(sk
->sk_dst_cache
))
319 sk_tx_queue_set(sk
, q_idx
);
325 * Select queue for transmit.
327 * If a valid queue has already been assigned, then use that.
328 * Otherwise compute tx queue based on hash and the send table.
330 * This is basically similar to default (__netdev_pick_tx) with the added step
331 * of using the host send_table when no other queue has been assigned.
333 * TODO support XPS - but get_xps_queue not exported
335 static u16
netvsc_pick_tx(struct net_device
*ndev
, struct sk_buff
*skb
)
337 int q_idx
= sk_tx_queue_get(skb
->sk
);
339 if (q_idx
< 0 || skb
->ooo_okay
|| q_idx
>= ndev
->real_num_tx_queues
) {
340 /* If forwarding a packet, we use the recorded queue when
341 * available for better cache locality.
343 if (skb_rx_queue_recorded(skb
))
344 q_idx
= skb_get_rx_queue(skb
);
346 q_idx
= netvsc_get_tx_queue(ndev
, skb
, q_idx
);
352 static u16
netvsc_select_queue(struct net_device
*ndev
, struct sk_buff
*skb
,
353 struct net_device
*sb_dev
,
354 select_queue_fallback_t fallback
)
356 struct net_device_context
*ndc
= netdev_priv(ndev
);
357 struct net_device
*vf_netdev
;
361 vf_netdev
= rcu_dereference(ndc
->vf_netdev
);
363 const struct net_device_ops
*vf_ops
= vf_netdev
->netdev_ops
;
365 if (vf_ops
->ndo_select_queue
)
366 txq
= vf_ops
->ndo_select_queue(vf_netdev
, skb
,
369 txq
= fallback(vf_netdev
, skb
, NULL
);
371 /* Record the queue selected by VF so that it can be
372 * used for common case where VF has more queues than
373 * the synthetic device.
375 qdisc_skb_cb(skb
)->slave_dev_queue_mapping
= txq
;
377 txq
= netvsc_pick_tx(ndev
, skb
);
381 while (unlikely(txq
>= ndev
->real_num_tx_queues
))
382 txq
-= ndev
->real_num_tx_queues
;
387 static u32
fill_pg_buf(struct page
*page
, u32 offset
, u32 len
,
388 struct hv_page_buffer
*pb
)
392 /* Deal with compund pages by ignoring unused part
395 page
+= (offset
>> PAGE_SHIFT
);
396 offset
&= ~PAGE_MASK
;
401 bytes
= PAGE_SIZE
- offset
;
404 pb
[j
].pfn
= page_to_pfn(page
);
405 pb
[j
].offset
= offset
;
411 if (offset
== PAGE_SIZE
&& len
) {
421 static u32
init_page_array(void *hdr
, u32 len
, struct sk_buff
*skb
,
422 struct hv_netvsc_packet
*packet
,
423 struct hv_page_buffer
*pb
)
426 char *data
= skb
->data
;
427 int frags
= skb_shinfo(skb
)->nr_frags
;
430 /* The packet is laid out thus:
431 * 1. hdr: RNDIS header and PPI
433 * 3. skb fragment data
435 slots_used
+= fill_pg_buf(virt_to_page(hdr
),
437 len
, &pb
[slots_used
]);
439 packet
->rmsg_size
= len
;
440 packet
->rmsg_pgcnt
= slots_used
;
442 slots_used
+= fill_pg_buf(virt_to_page(data
),
443 offset_in_page(data
),
444 skb_headlen(skb
), &pb
[slots_used
]);
446 for (i
= 0; i
< frags
; i
++) {
447 skb_frag_t
*frag
= skb_shinfo(skb
)->frags
+ i
;
449 slots_used
+= fill_pg_buf(skb_frag_page(frag
),
451 skb_frag_size(frag
), &pb
[slots_used
]);
456 static int count_skb_frag_slots(struct sk_buff
*skb
)
458 int i
, frags
= skb_shinfo(skb
)->nr_frags
;
461 for (i
= 0; i
< frags
; i
++) {
462 skb_frag_t
*frag
= skb_shinfo(skb
)->frags
+ i
;
463 unsigned long size
= skb_frag_size(frag
);
464 unsigned long offset
= frag
->page_offset
;
466 /* Skip unused frames from start of page */
467 offset
&= ~PAGE_MASK
;
468 pages
+= PFN_UP(offset
+ size
);
473 static int netvsc_get_slots(struct sk_buff
*skb
)
475 char *data
= skb
->data
;
476 unsigned int offset
= offset_in_page(data
);
477 unsigned int len
= skb_headlen(skb
);
481 slots
= DIV_ROUND_UP(offset
+ len
, PAGE_SIZE
);
482 frag_slots
= count_skb_frag_slots(skb
);
483 return slots
+ frag_slots
;
486 static u32
net_checksum_info(struct sk_buff
*skb
)
488 if (skb
->protocol
== htons(ETH_P_IP
)) {
489 struct iphdr
*ip
= ip_hdr(skb
);
491 if (ip
->protocol
== IPPROTO_TCP
)
492 return TRANSPORT_INFO_IPV4_TCP
;
493 else if (ip
->protocol
== IPPROTO_UDP
)
494 return TRANSPORT_INFO_IPV4_UDP
;
496 struct ipv6hdr
*ip6
= ipv6_hdr(skb
);
498 if (ip6
->nexthdr
== IPPROTO_TCP
)
499 return TRANSPORT_INFO_IPV6_TCP
;
500 else if (ip6
->nexthdr
== IPPROTO_UDP
)
501 return TRANSPORT_INFO_IPV6_UDP
;
504 return TRANSPORT_INFO_NOT_IP
;
507 /* Send skb on the slave VF device. */
508 static int netvsc_vf_xmit(struct net_device
*net
, struct net_device
*vf_netdev
,
511 struct net_device_context
*ndev_ctx
= netdev_priv(net
);
512 unsigned int len
= skb
->len
;
515 skb
->dev
= vf_netdev
;
516 skb
->queue_mapping
= qdisc_skb_cb(skb
)->slave_dev_queue_mapping
;
518 rc
= dev_queue_xmit(skb
);
519 if (likely(rc
== NET_XMIT_SUCCESS
|| rc
== NET_XMIT_CN
)) {
520 struct netvsc_vf_pcpu_stats
*pcpu_stats
521 = this_cpu_ptr(ndev_ctx
->vf_stats
);
523 u64_stats_update_begin(&pcpu_stats
->syncp
);
524 pcpu_stats
->tx_packets
++;
525 pcpu_stats
->tx_bytes
+= len
;
526 u64_stats_update_end(&pcpu_stats
->syncp
);
528 this_cpu_inc(ndev_ctx
->vf_stats
->tx_dropped
);
534 static int netvsc_start_xmit(struct sk_buff
*skb
, struct net_device
*net
)
536 struct net_device_context
*net_device_ctx
= netdev_priv(net
);
537 struct hv_netvsc_packet
*packet
= NULL
;
539 unsigned int num_data_pgs
;
540 struct rndis_message
*rndis_msg
;
541 struct net_device
*vf_netdev
;
544 struct hv_page_buffer pb
[MAX_PAGE_BUFFER_COUNT
];
546 /* if VF is present and up then redirect packets
547 * already called with rcu_read_lock_bh
549 vf_netdev
= rcu_dereference_bh(net_device_ctx
->vf_netdev
);
550 if (vf_netdev
&& netif_running(vf_netdev
) &&
551 !netpoll_tx_running(net
))
552 return netvsc_vf_xmit(net
, vf_netdev
, skb
);
554 /* We will atmost need two pages to describe the rndis
555 * header. We can only transmit MAX_PAGE_BUFFER_COUNT number
556 * of pages in a single packet. If skb is scattered around
557 * more pages we try linearizing it.
560 num_data_pgs
= netvsc_get_slots(skb
) + 2;
562 if (unlikely(num_data_pgs
> MAX_PAGE_BUFFER_COUNT
)) {
563 ++net_device_ctx
->eth_stats
.tx_scattered
;
565 if (skb_linearize(skb
))
568 num_data_pgs
= netvsc_get_slots(skb
) + 2;
569 if (num_data_pgs
> MAX_PAGE_BUFFER_COUNT
) {
570 ++net_device_ctx
->eth_stats
.tx_too_big
;
576 * Place the rndis header in the skb head room and
577 * the skb->cb will be used for hv_netvsc_packet
580 ret
= skb_cow_head(skb
, RNDIS_AND_PPI_SIZE
);
584 /* Use the skb control buffer for building up the packet */
585 BUILD_BUG_ON(sizeof(struct hv_netvsc_packet
) >
586 FIELD_SIZEOF(struct sk_buff
, cb
));
587 packet
= (struct hv_netvsc_packet
*)skb
->cb
;
589 packet
->q_idx
= skb_get_queue_mapping(skb
);
591 packet
->total_data_buflen
= skb
->len
;
592 packet
->total_bytes
= skb
->len
;
593 packet
->total_packets
= 1;
595 rndis_msg
= (struct rndis_message
*)skb
->head
;
597 /* Add the rndis header */
598 rndis_msg
->ndis_msg_type
= RNDIS_MSG_PACKET
;
599 rndis_msg
->msg_len
= packet
->total_data_buflen
;
601 rndis_msg
->msg
.pkt
= (struct rndis_packet
) {
602 .data_offset
= sizeof(struct rndis_packet
),
603 .data_len
= packet
->total_data_buflen
,
604 .per_pkt_info_offset
= sizeof(struct rndis_packet
),
607 rndis_msg_size
= RNDIS_MESSAGE_SIZE(struct rndis_packet
);
609 hash
= skb_get_hash_raw(skb
);
610 if (hash
!= 0 && net
->real_num_tx_queues
> 1) {
613 rndis_msg_size
+= NDIS_HASH_PPI_SIZE
;
614 hash_info
= init_ppi_data(rndis_msg
, NDIS_HASH_PPI_SIZE
,
619 if (skb_vlan_tag_present(skb
)) {
620 struct ndis_pkt_8021q_info
*vlan
;
622 rndis_msg_size
+= NDIS_VLAN_PPI_SIZE
;
623 vlan
= init_ppi_data(rndis_msg
, NDIS_VLAN_PPI_SIZE
,
627 vlan
->vlanid
= skb
->vlan_tci
& VLAN_VID_MASK
;
628 vlan
->pri
= (skb
->vlan_tci
& VLAN_PRIO_MASK
) >>
632 if (skb_is_gso(skb
)) {
633 struct ndis_tcp_lso_info
*lso_info
;
635 rndis_msg_size
+= NDIS_LSO_PPI_SIZE
;
636 lso_info
= init_ppi_data(rndis_msg
, NDIS_LSO_PPI_SIZE
,
637 TCP_LARGESEND_PKTINFO
);
640 lso_info
->lso_v2_transmit
.type
= NDIS_TCP_LARGE_SEND_OFFLOAD_V2_TYPE
;
641 if (skb
->protocol
== htons(ETH_P_IP
)) {
642 lso_info
->lso_v2_transmit
.ip_version
=
643 NDIS_TCP_LARGE_SEND_OFFLOAD_IPV4
;
644 ip_hdr(skb
)->tot_len
= 0;
645 ip_hdr(skb
)->check
= 0;
646 tcp_hdr(skb
)->check
=
647 ~csum_tcpudp_magic(ip_hdr(skb
)->saddr
,
648 ip_hdr(skb
)->daddr
, 0, IPPROTO_TCP
, 0);
650 lso_info
->lso_v2_transmit
.ip_version
=
651 NDIS_TCP_LARGE_SEND_OFFLOAD_IPV6
;
652 ipv6_hdr(skb
)->payload_len
= 0;
653 tcp_hdr(skb
)->check
=
654 ~csum_ipv6_magic(&ipv6_hdr(skb
)->saddr
,
655 &ipv6_hdr(skb
)->daddr
, 0, IPPROTO_TCP
, 0);
657 lso_info
->lso_v2_transmit
.tcp_header_offset
= skb_transport_offset(skb
);
658 lso_info
->lso_v2_transmit
.mss
= skb_shinfo(skb
)->gso_size
;
659 } else if (skb
->ip_summed
== CHECKSUM_PARTIAL
) {
660 if (net_checksum_info(skb
) & net_device_ctx
->tx_checksum_mask
) {
661 struct ndis_tcp_ip_checksum_info
*csum_info
;
663 rndis_msg_size
+= NDIS_CSUM_PPI_SIZE
;
664 csum_info
= init_ppi_data(rndis_msg
, NDIS_CSUM_PPI_SIZE
,
665 TCPIP_CHKSUM_PKTINFO
);
667 csum_info
->value
= 0;
668 csum_info
->transmit
.tcp_header_offset
= skb_transport_offset(skb
);
670 if (skb
->protocol
== htons(ETH_P_IP
)) {
671 csum_info
->transmit
.is_ipv4
= 1;
673 if (ip_hdr(skb
)->protocol
== IPPROTO_TCP
)
674 csum_info
->transmit
.tcp_checksum
= 1;
676 csum_info
->transmit
.udp_checksum
= 1;
678 csum_info
->transmit
.is_ipv6
= 1;
680 if (ipv6_hdr(skb
)->nexthdr
== IPPROTO_TCP
)
681 csum_info
->transmit
.tcp_checksum
= 1;
683 csum_info
->transmit
.udp_checksum
= 1;
686 /* Can't do offload of this type of checksum */
687 if (skb_checksum_help(skb
))
692 /* Start filling in the page buffers with the rndis hdr */
693 rndis_msg
->msg_len
+= rndis_msg_size
;
694 packet
->total_data_buflen
= rndis_msg
->msg_len
;
695 packet
->page_buf_cnt
= init_page_array(rndis_msg
, rndis_msg_size
,
698 /* timestamp packet in software */
699 skb_tx_timestamp(skb
);
701 ret
= netvsc_send(net
, packet
, rndis_msg
, pb
, skb
);
702 if (likely(ret
== 0))
705 if (ret
== -EAGAIN
) {
706 ++net_device_ctx
->eth_stats
.tx_busy
;
707 return NETDEV_TX_BUSY
;
711 ++net_device_ctx
->eth_stats
.tx_no_space
;
714 dev_kfree_skb_any(skb
);
715 net
->stats
.tx_dropped
++;
720 ++net_device_ctx
->eth_stats
.tx_no_memory
;
725 * netvsc_linkstatus_callback - Link up/down notification
727 void netvsc_linkstatus_callback(struct net_device
*net
,
728 struct rndis_message
*resp
)
730 struct rndis_indicate_status
*indicate
= &resp
->msg
.indicate_status
;
731 struct net_device_context
*ndev_ctx
= netdev_priv(net
);
732 struct netvsc_reconfig
*event
;
735 /* Update the physical link speed when changing to another vSwitch */
736 if (indicate
->status
== RNDIS_STATUS_LINK_SPEED_CHANGE
) {
739 speed
= *(u32
*)((void *)indicate
740 + indicate
->status_buf_offset
) / 10000;
741 ndev_ctx
->speed
= speed
;
745 /* Handle these link change statuses below */
746 if (indicate
->status
!= RNDIS_STATUS_NETWORK_CHANGE
&&
747 indicate
->status
!= RNDIS_STATUS_MEDIA_CONNECT
&&
748 indicate
->status
!= RNDIS_STATUS_MEDIA_DISCONNECT
)
751 if (net
->reg_state
!= NETREG_REGISTERED
)
754 event
= kzalloc(sizeof(*event
), GFP_ATOMIC
);
757 event
->event
= indicate
->status
;
759 spin_lock_irqsave(&ndev_ctx
->lock
, flags
);
760 list_add_tail(&event
->list
, &ndev_ctx
->reconfig_events
);
761 spin_unlock_irqrestore(&ndev_ctx
->lock
, flags
);
763 schedule_delayed_work(&ndev_ctx
->dwork
, 0);
766 static void netvsc_comp_ipcsum(struct sk_buff
*skb
)
768 struct iphdr
*iph
= (struct iphdr
*)skb
->data
;
771 iph
->check
= ip_fast_csum(iph
, iph
->ihl
);
774 static struct sk_buff
*netvsc_alloc_recv_skb(struct net_device
*net
,
775 struct napi_struct
*napi
,
776 const struct ndis_tcp_ip_checksum_info
*csum_info
,
777 const struct ndis_pkt_8021q_info
*vlan
,
778 void *data
, u32 buflen
)
782 skb
= napi_alloc_skb(napi
, buflen
);
787 * Copy to skb. This copy is needed here since the memory pointed by
788 * hv_netvsc_packet cannot be deallocated
790 skb_put_data(skb
, data
, buflen
);
792 skb
->protocol
= eth_type_trans(skb
, net
);
794 /* skb is already created with CHECKSUM_NONE */
795 skb_checksum_none_assert(skb
);
797 /* Incoming packets may have IP header checksum verified by the host.
798 * They may not have IP header checksum computed after coalescing.
799 * We compute it here if the flags are set, because on Linux, the IP
800 * checksum is always checked.
802 if (csum_info
&& csum_info
->receive
.ip_checksum_value_invalid
&&
803 csum_info
->receive
.ip_checksum_succeeded
&&
804 skb
->protocol
== htons(ETH_P_IP
))
805 netvsc_comp_ipcsum(skb
);
807 /* Do L4 checksum offload if enabled and present. */
808 if (csum_info
&& (net
->features
& NETIF_F_RXCSUM
)) {
809 if (csum_info
->receive
.tcp_checksum_succeeded
||
810 csum_info
->receive
.udp_checksum_succeeded
)
811 skb
->ip_summed
= CHECKSUM_UNNECESSARY
;
815 u16 vlan_tci
= vlan
->vlanid
| (vlan
->pri
<< VLAN_PRIO_SHIFT
);
817 __vlan_hwaccel_put_tag(skb
, htons(ETH_P_8021Q
),
825 * netvsc_recv_callback - Callback when we receive a packet from the
826 * "wire" on the specified device.
828 int netvsc_recv_callback(struct net_device
*net
,
829 struct netvsc_device
*net_device
,
830 struct vmbus_channel
*channel
,
832 const struct ndis_tcp_ip_checksum_info
*csum_info
,
833 const struct ndis_pkt_8021q_info
*vlan
)
835 struct net_device_context
*net_device_ctx
= netdev_priv(net
);
836 u16 q_idx
= channel
->offermsg
.offer
.sub_channel_index
;
837 struct netvsc_channel
*nvchan
= &net_device
->chan_table
[q_idx
];
839 struct netvsc_stats
*rx_stats
;
841 if (net
->reg_state
!= NETREG_REGISTERED
)
842 return NVSP_STAT_FAIL
;
844 /* Allocate a skb - TODO direct I/O to pages? */
845 skb
= netvsc_alloc_recv_skb(net
, &nvchan
->napi
,
846 csum_info
, vlan
, data
, len
);
847 if (unlikely(!skb
)) {
848 ++net_device_ctx
->eth_stats
.rx_no_memory
;
849 return NVSP_STAT_FAIL
;
852 skb_record_rx_queue(skb
, q_idx
);
855 * Even if injecting the packet, record the statistics
856 * on the synthetic device because modifying the VF device
857 * statistics will not work correctly.
859 rx_stats
= &nvchan
->rx_stats
;
860 u64_stats_update_begin(&rx_stats
->syncp
);
862 rx_stats
->bytes
+= len
;
864 if (skb
->pkt_type
== PACKET_BROADCAST
)
865 ++rx_stats
->broadcast
;
866 else if (skb
->pkt_type
== PACKET_MULTICAST
)
867 ++rx_stats
->multicast
;
868 u64_stats_update_end(&rx_stats
->syncp
);
870 napi_gro_receive(&nvchan
->napi
, skb
);
871 return NVSP_STAT_SUCCESS
;
874 static void netvsc_get_drvinfo(struct net_device
*net
,
875 struct ethtool_drvinfo
*info
)
877 strlcpy(info
->driver
, KBUILD_MODNAME
, sizeof(info
->driver
));
878 strlcpy(info
->fw_version
, "N/A", sizeof(info
->fw_version
));
881 static void netvsc_get_channels(struct net_device
*net
,
882 struct ethtool_channels
*channel
)
884 struct net_device_context
*net_device_ctx
= netdev_priv(net
);
885 struct netvsc_device
*nvdev
= rtnl_dereference(net_device_ctx
->nvdev
);
888 channel
->max_combined
= nvdev
->max_chn
;
889 channel
->combined_count
= nvdev
->num_chn
;
893 /* Alloc struct netvsc_device_info, and initialize it from either existing
894 * struct netvsc_device, or from default values.
896 static struct netvsc_device_info
*netvsc_devinfo_get
897 (struct netvsc_device
*nvdev
)
899 struct netvsc_device_info
*dev_info
;
901 dev_info
= kzalloc(sizeof(*dev_info
), GFP_ATOMIC
);
907 dev_info
->num_chn
= nvdev
->num_chn
;
908 dev_info
->send_sections
= nvdev
->send_section_cnt
;
909 dev_info
->send_section_size
= nvdev
->send_section_size
;
910 dev_info
->recv_sections
= nvdev
->recv_section_cnt
;
911 dev_info
->recv_section_size
= nvdev
->recv_section_size
;
913 memcpy(dev_info
->rss_key
, nvdev
->extension
->rss_key
,
916 dev_info
->num_chn
= VRSS_CHANNEL_DEFAULT
;
917 dev_info
->send_sections
= NETVSC_DEFAULT_TX
;
918 dev_info
->send_section_size
= NETVSC_SEND_SECTION_SIZE
;
919 dev_info
->recv_sections
= NETVSC_DEFAULT_RX
;
920 dev_info
->recv_section_size
= NETVSC_RECV_SECTION_SIZE
;
926 static int netvsc_detach(struct net_device
*ndev
,
927 struct netvsc_device
*nvdev
)
929 struct net_device_context
*ndev_ctx
= netdev_priv(ndev
);
930 struct hv_device
*hdev
= ndev_ctx
->device_ctx
;
933 /* Don't try continuing to try and setup sub channels */
934 if (cancel_work_sync(&nvdev
->subchan_work
))
937 /* If device was up (receiving) then shutdown */
938 if (netif_running(ndev
)) {
939 netvsc_tx_disable(nvdev
, ndev
);
941 ret
= rndis_filter_close(nvdev
);
944 "unable to close device (ret %d).\n", ret
);
948 ret
= netvsc_wait_until_empty(nvdev
);
951 "Ring buffer not empty after closing rndis\n");
956 netif_device_detach(ndev
);
958 rndis_filter_device_remove(hdev
, nvdev
);
963 static int netvsc_attach(struct net_device
*ndev
,
964 struct netvsc_device_info
*dev_info
)
966 struct net_device_context
*ndev_ctx
= netdev_priv(ndev
);
967 struct hv_device
*hdev
= ndev_ctx
->device_ctx
;
968 struct netvsc_device
*nvdev
;
969 struct rndis_device
*rdev
;
972 nvdev
= rndis_filter_device_add(hdev
, dev_info
);
974 return PTR_ERR(nvdev
);
976 if (nvdev
->num_chn
> 1) {
977 ret
= rndis_set_subchannel(ndev
, nvdev
, dev_info
);
979 /* if unavailable, just proceed with one queue */
986 /* In any case device is now ready */
987 nvdev
->tx_disable
= false;
988 netif_device_attach(ndev
);
990 /* Note: enable and attach happen when sub-channels setup */
991 netif_carrier_off(ndev
);
993 if (netif_running(ndev
)) {
994 ret
= rndis_filter_open(nvdev
);
998 rdev
= nvdev
->extension
;
999 if (!rdev
->link_state
)
1000 netif_carrier_on(ndev
);
1006 netif_device_detach(ndev
);
1008 rndis_filter_device_remove(hdev
, nvdev
);
1013 static int netvsc_set_channels(struct net_device
*net
,
1014 struct ethtool_channels
*channels
)
1016 struct net_device_context
*net_device_ctx
= netdev_priv(net
);
1017 struct netvsc_device
*nvdev
= rtnl_dereference(net_device_ctx
->nvdev
);
1018 unsigned int orig
, count
= channels
->combined_count
;
1019 struct netvsc_device_info
*device_info
;
1022 /* We do not support separate count for rx, tx, or other */
1024 channels
->rx_count
|| channels
->tx_count
|| channels
->other_count
)
1027 if (!nvdev
|| nvdev
->destroy
)
1030 if (nvdev
->nvsp_version
< NVSP_PROTOCOL_VERSION_5
)
1033 if (count
> nvdev
->max_chn
)
1036 orig
= nvdev
->num_chn
;
1038 device_info
= netvsc_devinfo_get(nvdev
);
1043 device_info
->num_chn
= count
;
1045 ret
= netvsc_detach(net
, nvdev
);
1049 ret
= netvsc_attach(net
, device_info
);
1051 device_info
->num_chn
= orig
;
1052 if (netvsc_attach(net
, device_info
))
1053 netdev_err(net
, "restoring channel setting failed\n");
1062 netvsc_validate_ethtool_ss_cmd(const struct ethtool_link_ksettings
*cmd
)
1064 struct ethtool_link_ksettings diff1
= *cmd
;
1065 struct ethtool_link_ksettings diff2
= {};
1067 diff1
.base
.speed
= 0;
1068 diff1
.base
.duplex
= 0;
1069 /* advertising and cmd are usually set */
1070 ethtool_link_ksettings_zero_link_mode(&diff1
, advertising
);
1072 /* We set port to PORT_OTHER */
1073 diff2
.base
.port
= PORT_OTHER
;
1075 return !memcmp(&diff1
, &diff2
, sizeof(diff1
));
1078 static void netvsc_init_settings(struct net_device
*dev
)
1080 struct net_device_context
*ndc
= netdev_priv(dev
);
1082 ndc
->l4_hash
= HV_DEFAULT_L4HASH
;
1084 ndc
->speed
= SPEED_UNKNOWN
;
1085 ndc
->duplex
= DUPLEX_FULL
;
1088 static int netvsc_get_link_ksettings(struct net_device
*dev
,
1089 struct ethtool_link_ksettings
*cmd
)
1091 struct net_device_context
*ndc
= netdev_priv(dev
);
1093 cmd
->base
.speed
= ndc
->speed
;
1094 cmd
->base
.duplex
= ndc
->duplex
;
1095 cmd
->base
.port
= PORT_OTHER
;
1100 static int netvsc_set_link_ksettings(struct net_device
*dev
,
1101 const struct ethtool_link_ksettings
*cmd
)
1103 struct net_device_context
*ndc
= netdev_priv(dev
);
1106 speed
= cmd
->base
.speed
;
1107 if (!ethtool_validate_speed(speed
) ||
1108 !ethtool_validate_duplex(cmd
->base
.duplex
) ||
1109 !netvsc_validate_ethtool_ss_cmd(cmd
))
1113 ndc
->duplex
= cmd
->base
.duplex
;
1118 static int netvsc_change_mtu(struct net_device
*ndev
, int mtu
)
1120 struct net_device_context
*ndevctx
= netdev_priv(ndev
);
1121 struct net_device
*vf_netdev
= rtnl_dereference(ndevctx
->vf_netdev
);
1122 struct netvsc_device
*nvdev
= rtnl_dereference(ndevctx
->nvdev
);
1123 int orig_mtu
= ndev
->mtu
;
1124 struct netvsc_device_info
*device_info
;
1127 if (!nvdev
|| nvdev
->destroy
)
1130 device_info
= netvsc_devinfo_get(nvdev
);
1135 /* Change MTU of underlying VF netdev first. */
1137 ret
= dev_set_mtu(vf_netdev
, mtu
);
1142 ret
= netvsc_detach(ndev
, nvdev
);
1148 ret
= netvsc_attach(ndev
, device_info
);
1152 /* Attempt rollback to original MTU */
1153 ndev
->mtu
= orig_mtu
;
1155 if (netvsc_attach(ndev
, device_info
))
1156 netdev_err(ndev
, "restoring mtu failed\n");
1159 dev_set_mtu(vf_netdev
, orig_mtu
);
1166 static void netvsc_get_vf_stats(struct net_device
*net
,
1167 struct netvsc_vf_pcpu_stats
*tot
)
1169 struct net_device_context
*ndev_ctx
= netdev_priv(net
);
1172 memset(tot
, 0, sizeof(*tot
));
1174 for_each_possible_cpu(i
) {
1175 const struct netvsc_vf_pcpu_stats
*stats
1176 = per_cpu_ptr(ndev_ctx
->vf_stats
, i
);
1177 u64 rx_packets
, rx_bytes
, tx_packets
, tx_bytes
;
1181 start
= u64_stats_fetch_begin_irq(&stats
->syncp
);
1182 rx_packets
= stats
->rx_packets
;
1183 tx_packets
= stats
->tx_packets
;
1184 rx_bytes
= stats
->rx_bytes
;
1185 tx_bytes
= stats
->tx_bytes
;
1186 } while (u64_stats_fetch_retry_irq(&stats
->syncp
, start
));
1188 tot
->rx_packets
+= rx_packets
;
1189 tot
->tx_packets
+= tx_packets
;
1190 tot
->rx_bytes
+= rx_bytes
;
1191 tot
->tx_bytes
+= tx_bytes
;
1192 tot
->tx_dropped
+= stats
->tx_dropped
;
1196 static void netvsc_get_pcpu_stats(struct net_device
*net
,
1197 struct netvsc_ethtool_pcpu_stats
*pcpu_tot
)
1199 struct net_device_context
*ndev_ctx
= netdev_priv(net
);
1200 struct netvsc_device
*nvdev
= rcu_dereference_rtnl(ndev_ctx
->nvdev
);
1203 /* fetch percpu stats of vf */
1204 for_each_possible_cpu(i
) {
1205 const struct netvsc_vf_pcpu_stats
*stats
=
1206 per_cpu_ptr(ndev_ctx
->vf_stats
, i
);
1207 struct netvsc_ethtool_pcpu_stats
*this_tot
= &pcpu_tot
[i
];
1211 start
= u64_stats_fetch_begin_irq(&stats
->syncp
);
1212 this_tot
->vf_rx_packets
= stats
->rx_packets
;
1213 this_tot
->vf_tx_packets
= stats
->tx_packets
;
1214 this_tot
->vf_rx_bytes
= stats
->rx_bytes
;
1215 this_tot
->vf_tx_bytes
= stats
->tx_bytes
;
1216 } while (u64_stats_fetch_retry_irq(&stats
->syncp
, start
));
1217 this_tot
->rx_packets
= this_tot
->vf_rx_packets
;
1218 this_tot
->tx_packets
= this_tot
->vf_tx_packets
;
1219 this_tot
->rx_bytes
= this_tot
->vf_rx_bytes
;
1220 this_tot
->tx_bytes
= this_tot
->vf_tx_bytes
;
1223 /* fetch percpu stats of netvsc */
1224 for (i
= 0; i
< nvdev
->num_chn
; i
++) {
1225 const struct netvsc_channel
*nvchan
= &nvdev
->chan_table
[i
];
1226 const struct netvsc_stats
*stats
;
1227 struct netvsc_ethtool_pcpu_stats
*this_tot
=
1228 &pcpu_tot
[nvchan
->channel
->target_cpu
];
1232 stats
= &nvchan
->tx_stats
;
1234 start
= u64_stats_fetch_begin_irq(&stats
->syncp
);
1235 packets
= stats
->packets
;
1236 bytes
= stats
->bytes
;
1237 } while (u64_stats_fetch_retry_irq(&stats
->syncp
, start
));
1239 this_tot
->tx_bytes
+= bytes
;
1240 this_tot
->tx_packets
+= packets
;
1242 stats
= &nvchan
->rx_stats
;
1244 start
= u64_stats_fetch_begin_irq(&stats
->syncp
);
1245 packets
= stats
->packets
;
1246 bytes
= stats
->bytes
;
1247 } while (u64_stats_fetch_retry_irq(&stats
->syncp
, start
));
1249 this_tot
->rx_bytes
+= bytes
;
1250 this_tot
->rx_packets
+= packets
;
1254 static void netvsc_get_stats64(struct net_device
*net
,
1255 struct rtnl_link_stats64
*t
)
1257 struct net_device_context
*ndev_ctx
= netdev_priv(net
);
1258 struct netvsc_device
*nvdev
;
1259 struct netvsc_vf_pcpu_stats vf_tot
;
1264 nvdev
= rcu_dereference(ndev_ctx
->nvdev
);
1268 netdev_stats_to_stats64(t
, &net
->stats
);
1270 netvsc_get_vf_stats(net
, &vf_tot
);
1271 t
->rx_packets
+= vf_tot
.rx_packets
;
1272 t
->tx_packets
+= vf_tot
.tx_packets
;
1273 t
->rx_bytes
+= vf_tot
.rx_bytes
;
1274 t
->tx_bytes
+= vf_tot
.tx_bytes
;
1275 t
->tx_dropped
+= vf_tot
.tx_dropped
;
1277 for (i
= 0; i
< nvdev
->num_chn
; i
++) {
1278 const struct netvsc_channel
*nvchan
= &nvdev
->chan_table
[i
];
1279 const struct netvsc_stats
*stats
;
1280 u64 packets
, bytes
, multicast
;
1283 stats
= &nvchan
->tx_stats
;
1285 start
= u64_stats_fetch_begin_irq(&stats
->syncp
);
1286 packets
= stats
->packets
;
1287 bytes
= stats
->bytes
;
1288 } while (u64_stats_fetch_retry_irq(&stats
->syncp
, start
));
1290 t
->tx_bytes
+= bytes
;
1291 t
->tx_packets
+= packets
;
1293 stats
= &nvchan
->rx_stats
;
1295 start
= u64_stats_fetch_begin_irq(&stats
->syncp
);
1296 packets
= stats
->packets
;
1297 bytes
= stats
->bytes
;
1298 multicast
= stats
->multicast
+ stats
->broadcast
;
1299 } while (u64_stats_fetch_retry_irq(&stats
->syncp
, start
));
1301 t
->rx_bytes
+= bytes
;
1302 t
->rx_packets
+= packets
;
1303 t
->multicast
+= multicast
;
1309 static int netvsc_set_mac_addr(struct net_device
*ndev
, void *p
)
1311 struct net_device_context
*ndc
= netdev_priv(ndev
);
1312 struct net_device
*vf_netdev
= rtnl_dereference(ndc
->vf_netdev
);
1313 struct netvsc_device
*nvdev
= rtnl_dereference(ndc
->nvdev
);
1314 struct sockaddr
*addr
= p
;
1317 err
= eth_prepare_mac_addr_change(ndev
, p
);
1325 err
= dev_set_mac_address(vf_netdev
, addr
);
1330 err
= rndis_filter_set_device_mac(nvdev
, addr
->sa_data
);
1332 eth_commit_mac_addr_change(ndev
, p
);
1333 } else if (vf_netdev
) {
1334 /* rollback change on VF */
1335 memcpy(addr
->sa_data
, ndev
->dev_addr
, ETH_ALEN
);
1336 dev_set_mac_address(vf_netdev
, addr
);
1342 static const struct {
1343 char name
[ETH_GSTRING_LEN
];
1345 } netvsc_stats
[] = {
1346 { "tx_scattered", offsetof(struct netvsc_ethtool_stats
, tx_scattered
) },
1347 { "tx_no_memory", offsetof(struct netvsc_ethtool_stats
, tx_no_memory
) },
1348 { "tx_no_space", offsetof(struct netvsc_ethtool_stats
, tx_no_space
) },
1349 { "tx_too_big", offsetof(struct netvsc_ethtool_stats
, tx_too_big
) },
1350 { "tx_busy", offsetof(struct netvsc_ethtool_stats
, tx_busy
) },
1351 { "tx_send_full", offsetof(struct netvsc_ethtool_stats
, tx_send_full
) },
1352 { "rx_comp_busy", offsetof(struct netvsc_ethtool_stats
, rx_comp_busy
) },
1353 { "rx_no_memory", offsetof(struct netvsc_ethtool_stats
, rx_no_memory
) },
1354 { "stop_queue", offsetof(struct netvsc_ethtool_stats
, stop_queue
) },
1355 { "wake_queue", offsetof(struct netvsc_ethtool_stats
, wake_queue
) },
1357 { "cpu%u_rx_packets",
1358 offsetof(struct netvsc_ethtool_pcpu_stats
, rx_packets
) },
1360 offsetof(struct netvsc_ethtool_pcpu_stats
, rx_bytes
) },
1361 { "cpu%u_tx_packets",
1362 offsetof(struct netvsc_ethtool_pcpu_stats
, tx_packets
) },
1364 offsetof(struct netvsc_ethtool_pcpu_stats
, tx_bytes
) },
1365 { "cpu%u_vf_rx_packets",
1366 offsetof(struct netvsc_ethtool_pcpu_stats
, vf_rx_packets
) },
1367 { "cpu%u_vf_rx_bytes",
1368 offsetof(struct netvsc_ethtool_pcpu_stats
, vf_rx_bytes
) },
1369 { "cpu%u_vf_tx_packets",
1370 offsetof(struct netvsc_ethtool_pcpu_stats
, vf_tx_packets
) },
1371 { "cpu%u_vf_tx_bytes",
1372 offsetof(struct netvsc_ethtool_pcpu_stats
, vf_tx_bytes
) },
1374 { "vf_rx_packets", offsetof(struct netvsc_vf_pcpu_stats
, rx_packets
) },
1375 { "vf_rx_bytes", offsetof(struct netvsc_vf_pcpu_stats
, rx_bytes
) },
1376 { "vf_tx_packets", offsetof(struct netvsc_vf_pcpu_stats
, tx_packets
) },
1377 { "vf_tx_bytes", offsetof(struct netvsc_vf_pcpu_stats
, tx_bytes
) },
1378 { "vf_tx_dropped", offsetof(struct netvsc_vf_pcpu_stats
, tx_dropped
) },
1381 #define NETVSC_GLOBAL_STATS_LEN ARRAY_SIZE(netvsc_stats)
1382 #define NETVSC_VF_STATS_LEN ARRAY_SIZE(vf_stats)
1384 /* statistics per queue (rx/tx packets/bytes) */
1385 #define NETVSC_PCPU_STATS_LEN (num_present_cpus() * ARRAY_SIZE(pcpu_stats))
1387 /* 4 statistics per queue (rx/tx packets/bytes) */
1388 #define NETVSC_QUEUE_STATS_LEN(dev) ((dev)->num_chn * 4)
1390 static int netvsc_get_sset_count(struct net_device
*dev
, int string_set
)
1392 struct net_device_context
*ndc
= netdev_priv(dev
);
1393 struct netvsc_device
*nvdev
= rtnl_dereference(ndc
->nvdev
);
1398 switch (string_set
) {
1400 return NETVSC_GLOBAL_STATS_LEN
1401 + NETVSC_VF_STATS_LEN
1402 + NETVSC_QUEUE_STATS_LEN(nvdev
)
1403 + NETVSC_PCPU_STATS_LEN
;
1409 static void netvsc_get_ethtool_stats(struct net_device
*dev
,
1410 struct ethtool_stats
*stats
, u64
*data
)
1412 struct net_device_context
*ndc
= netdev_priv(dev
);
1413 struct netvsc_device
*nvdev
= rtnl_dereference(ndc
->nvdev
);
1414 const void *nds
= &ndc
->eth_stats
;
1415 const struct netvsc_stats
*qstats
;
1416 struct netvsc_vf_pcpu_stats sum
;
1417 struct netvsc_ethtool_pcpu_stats
*pcpu_sum
;
1425 for (i
= 0; i
< NETVSC_GLOBAL_STATS_LEN
; i
++)
1426 data
[i
] = *(unsigned long *)(nds
+ netvsc_stats
[i
].offset
);
1428 netvsc_get_vf_stats(dev
, &sum
);
1429 for (j
= 0; j
< NETVSC_VF_STATS_LEN
; j
++)
1430 data
[i
++] = *(u64
*)((void *)&sum
+ vf_stats
[j
].offset
);
1432 for (j
= 0; j
< nvdev
->num_chn
; j
++) {
1433 qstats
= &nvdev
->chan_table
[j
].tx_stats
;
1436 start
= u64_stats_fetch_begin_irq(&qstats
->syncp
);
1437 packets
= qstats
->packets
;
1438 bytes
= qstats
->bytes
;
1439 } while (u64_stats_fetch_retry_irq(&qstats
->syncp
, start
));
1440 data
[i
++] = packets
;
1443 qstats
= &nvdev
->chan_table
[j
].rx_stats
;
1445 start
= u64_stats_fetch_begin_irq(&qstats
->syncp
);
1446 packets
= qstats
->packets
;
1447 bytes
= qstats
->bytes
;
1448 } while (u64_stats_fetch_retry_irq(&qstats
->syncp
, start
));
1449 data
[i
++] = packets
;
1453 pcpu_sum
= kvmalloc_array(num_possible_cpus(),
1454 sizeof(struct netvsc_ethtool_pcpu_stats
),
1456 netvsc_get_pcpu_stats(dev
, pcpu_sum
);
1457 for_each_present_cpu(cpu
) {
1458 struct netvsc_ethtool_pcpu_stats
*this_sum
= &pcpu_sum
[cpu
];
1460 for (j
= 0; j
< ARRAY_SIZE(pcpu_stats
); j
++)
1461 data
[i
++] = *(u64
*)((void *)this_sum
1462 + pcpu_stats
[j
].offset
);
1467 static void netvsc_get_strings(struct net_device
*dev
, u32 stringset
, u8
*data
)
1469 struct net_device_context
*ndc
= netdev_priv(dev
);
1470 struct netvsc_device
*nvdev
= rtnl_dereference(ndc
->nvdev
);
1477 switch (stringset
) {
1479 for (i
= 0; i
< ARRAY_SIZE(netvsc_stats
); i
++) {
1480 memcpy(p
, netvsc_stats
[i
].name
, ETH_GSTRING_LEN
);
1481 p
+= ETH_GSTRING_LEN
;
1484 for (i
= 0; i
< ARRAY_SIZE(vf_stats
); i
++) {
1485 memcpy(p
, vf_stats
[i
].name
, ETH_GSTRING_LEN
);
1486 p
+= ETH_GSTRING_LEN
;
1489 for (i
= 0; i
< nvdev
->num_chn
; i
++) {
1490 sprintf(p
, "tx_queue_%u_packets", i
);
1491 p
+= ETH_GSTRING_LEN
;
1492 sprintf(p
, "tx_queue_%u_bytes", i
);
1493 p
+= ETH_GSTRING_LEN
;
1494 sprintf(p
, "rx_queue_%u_packets", i
);
1495 p
+= ETH_GSTRING_LEN
;
1496 sprintf(p
, "rx_queue_%u_bytes", i
);
1497 p
+= ETH_GSTRING_LEN
;
1500 for_each_present_cpu(cpu
) {
1501 for (i
= 0; i
< ARRAY_SIZE(pcpu_stats
); i
++) {
1502 sprintf(p
, pcpu_stats
[i
].name
, cpu
);
1503 p
+= ETH_GSTRING_LEN
;
1512 netvsc_get_rss_hash_opts(struct net_device_context
*ndc
,
1513 struct ethtool_rxnfc
*info
)
1515 const u32 l4_flag
= RXH_L4_B_0_1
| RXH_L4_B_2_3
;
1517 info
->data
= RXH_IP_SRC
| RXH_IP_DST
;
1519 switch (info
->flow_type
) {
1521 if (ndc
->l4_hash
& HV_TCP4_L4HASH
)
1522 info
->data
|= l4_flag
;
1527 if (ndc
->l4_hash
& HV_TCP6_L4HASH
)
1528 info
->data
|= l4_flag
;
1533 if (ndc
->l4_hash
& HV_UDP4_L4HASH
)
1534 info
->data
|= l4_flag
;
1539 if (ndc
->l4_hash
& HV_UDP6_L4HASH
)
1540 info
->data
|= l4_flag
;
1556 netvsc_get_rxnfc(struct net_device
*dev
, struct ethtool_rxnfc
*info
,
1559 struct net_device_context
*ndc
= netdev_priv(dev
);
1560 struct netvsc_device
*nvdev
= rtnl_dereference(ndc
->nvdev
);
1565 switch (info
->cmd
) {
1566 case ETHTOOL_GRXRINGS
:
1567 info
->data
= nvdev
->num_chn
;
1571 return netvsc_get_rss_hash_opts(ndc
, info
);
1576 static int netvsc_set_rss_hash_opts(struct net_device_context
*ndc
,
1577 struct ethtool_rxnfc
*info
)
1579 if (info
->data
== (RXH_IP_SRC
| RXH_IP_DST
|
1580 RXH_L4_B_0_1
| RXH_L4_B_2_3
)) {
1581 switch (info
->flow_type
) {
1583 ndc
->l4_hash
|= HV_TCP4_L4HASH
;
1587 ndc
->l4_hash
|= HV_TCP6_L4HASH
;
1591 ndc
->l4_hash
|= HV_UDP4_L4HASH
;
1595 ndc
->l4_hash
|= HV_UDP6_L4HASH
;
1605 if (info
->data
== (RXH_IP_SRC
| RXH_IP_DST
)) {
1606 switch (info
->flow_type
) {
1608 ndc
->l4_hash
&= ~HV_TCP4_L4HASH
;
1612 ndc
->l4_hash
&= ~HV_TCP6_L4HASH
;
1616 ndc
->l4_hash
&= ~HV_UDP4_L4HASH
;
1620 ndc
->l4_hash
&= ~HV_UDP6_L4HASH
;
1634 netvsc_set_rxnfc(struct net_device
*ndev
, struct ethtool_rxnfc
*info
)
1636 struct net_device_context
*ndc
= netdev_priv(ndev
);
1638 if (info
->cmd
== ETHTOOL_SRXFH
)
1639 return netvsc_set_rss_hash_opts(ndc
, info
);
1644 #ifdef CONFIG_NET_POLL_CONTROLLER
1645 static void netvsc_poll_controller(struct net_device
*dev
)
1647 struct net_device_context
*ndc
= netdev_priv(dev
);
1648 struct netvsc_device
*ndev
;
1652 ndev
= rcu_dereference(ndc
->nvdev
);
1654 for (i
= 0; i
< ndev
->num_chn
; i
++) {
1655 struct netvsc_channel
*nvchan
= &ndev
->chan_table
[i
];
1657 napi_schedule(&nvchan
->napi
);
1664 static u32
netvsc_get_rxfh_key_size(struct net_device
*dev
)
1666 return NETVSC_HASH_KEYLEN
;
1669 static u32
netvsc_rss_indir_size(struct net_device
*dev
)
1674 static int netvsc_get_rxfh(struct net_device
*dev
, u32
*indir
, u8
*key
,
1677 struct net_device_context
*ndc
= netdev_priv(dev
);
1678 struct netvsc_device
*ndev
= rtnl_dereference(ndc
->nvdev
);
1679 struct rndis_device
*rndis_dev
;
1686 *hfunc
= ETH_RSS_HASH_TOP
; /* Toeplitz */
1688 rndis_dev
= ndev
->extension
;
1690 for (i
= 0; i
< ITAB_NUM
; i
++)
1691 indir
[i
] = ndc
->rx_table
[i
];
1695 memcpy(key
, rndis_dev
->rss_key
, NETVSC_HASH_KEYLEN
);
1700 static int netvsc_set_rxfh(struct net_device
*dev
, const u32
*indir
,
1701 const u8
*key
, const u8 hfunc
)
1703 struct net_device_context
*ndc
= netdev_priv(dev
);
1704 struct netvsc_device
*ndev
= rtnl_dereference(ndc
->nvdev
);
1705 struct rndis_device
*rndis_dev
;
1711 if (hfunc
!= ETH_RSS_HASH_NO_CHANGE
&& hfunc
!= ETH_RSS_HASH_TOP
)
1714 rndis_dev
= ndev
->extension
;
1716 for (i
= 0; i
< ITAB_NUM
; i
++)
1717 if (indir
[i
] >= ndev
->num_chn
)
1720 for (i
= 0; i
< ITAB_NUM
; i
++)
1721 ndc
->rx_table
[i
] = indir
[i
];
1728 key
= rndis_dev
->rss_key
;
1731 return rndis_filter_set_rss_param(rndis_dev
, key
);
1734 /* Hyper-V RNDIS protocol does not have ring in the HW sense.
1735 * It does have pre-allocated receive area which is divided into sections.
1737 static void __netvsc_get_ringparam(struct netvsc_device
*nvdev
,
1738 struct ethtool_ringparam
*ring
)
1742 ring
->rx_pending
= nvdev
->recv_section_cnt
;
1743 ring
->tx_pending
= nvdev
->send_section_cnt
;
1745 if (nvdev
->nvsp_version
<= NVSP_PROTOCOL_VERSION_2
)
1746 max_buf_size
= NETVSC_RECEIVE_BUFFER_SIZE_LEGACY
;
1748 max_buf_size
= NETVSC_RECEIVE_BUFFER_SIZE
;
1750 ring
->rx_max_pending
= max_buf_size
/ nvdev
->recv_section_size
;
1751 ring
->tx_max_pending
= NETVSC_SEND_BUFFER_SIZE
1752 / nvdev
->send_section_size
;
1755 static void netvsc_get_ringparam(struct net_device
*ndev
,
1756 struct ethtool_ringparam
*ring
)
1758 struct net_device_context
*ndevctx
= netdev_priv(ndev
);
1759 struct netvsc_device
*nvdev
= rtnl_dereference(ndevctx
->nvdev
);
1764 __netvsc_get_ringparam(nvdev
, ring
);
1767 static int netvsc_set_ringparam(struct net_device
*ndev
,
1768 struct ethtool_ringparam
*ring
)
1770 struct net_device_context
*ndevctx
= netdev_priv(ndev
);
1771 struct netvsc_device
*nvdev
= rtnl_dereference(ndevctx
->nvdev
);
1772 struct netvsc_device_info
*device_info
;
1773 struct ethtool_ringparam orig
;
1777 if (!nvdev
|| nvdev
->destroy
)
1780 memset(&orig
, 0, sizeof(orig
));
1781 __netvsc_get_ringparam(nvdev
, &orig
);
1783 new_tx
= clamp_t(u32
, ring
->tx_pending
,
1784 NETVSC_MIN_TX_SECTIONS
, orig
.tx_max_pending
);
1785 new_rx
= clamp_t(u32
, ring
->rx_pending
,
1786 NETVSC_MIN_RX_SECTIONS
, orig
.rx_max_pending
);
1788 if (new_tx
== orig
.tx_pending
&&
1789 new_rx
== orig
.rx_pending
)
1790 return 0; /* no change */
1792 device_info
= netvsc_devinfo_get(nvdev
);
1797 device_info
->send_sections
= new_tx
;
1798 device_info
->recv_sections
= new_rx
;
1800 ret
= netvsc_detach(ndev
, nvdev
);
1804 ret
= netvsc_attach(ndev
, device_info
);
1806 device_info
->send_sections
= orig
.tx_pending
;
1807 device_info
->recv_sections
= orig
.rx_pending
;
1809 if (netvsc_attach(ndev
, device_info
))
1810 netdev_err(ndev
, "restoring ringparam failed");
1818 static u32
netvsc_get_msglevel(struct net_device
*ndev
)
1820 struct net_device_context
*ndev_ctx
= netdev_priv(ndev
);
1822 return ndev_ctx
->msg_enable
;
1825 static void netvsc_set_msglevel(struct net_device
*ndev
, u32 val
)
1827 struct net_device_context
*ndev_ctx
= netdev_priv(ndev
);
1829 ndev_ctx
->msg_enable
= val
;
1832 static const struct ethtool_ops ethtool_ops
= {
1833 .get_drvinfo
= netvsc_get_drvinfo
,
1834 .get_msglevel
= netvsc_get_msglevel
,
1835 .set_msglevel
= netvsc_set_msglevel
,
1836 .get_link
= ethtool_op_get_link
,
1837 .get_ethtool_stats
= netvsc_get_ethtool_stats
,
1838 .get_sset_count
= netvsc_get_sset_count
,
1839 .get_strings
= netvsc_get_strings
,
1840 .get_channels
= netvsc_get_channels
,
1841 .set_channels
= netvsc_set_channels
,
1842 .get_ts_info
= ethtool_op_get_ts_info
,
1843 .get_rxnfc
= netvsc_get_rxnfc
,
1844 .set_rxnfc
= netvsc_set_rxnfc
,
1845 .get_rxfh_key_size
= netvsc_get_rxfh_key_size
,
1846 .get_rxfh_indir_size
= netvsc_rss_indir_size
,
1847 .get_rxfh
= netvsc_get_rxfh
,
1848 .set_rxfh
= netvsc_set_rxfh
,
1849 .get_link_ksettings
= netvsc_get_link_ksettings
,
1850 .set_link_ksettings
= netvsc_set_link_ksettings
,
1851 .get_ringparam
= netvsc_get_ringparam
,
1852 .set_ringparam
= netvsc_set_ringparam
,
1855 static const struct net_device_ops device_ops
= {
1856 .ndo_open
= netvsc_open
,
1857 .ndo_stop
= netvsc_close
,
1858 .ndo_start_xmit
= netvsc_start_xmit
,
1859 .ndo_change_rx_flags
= netvsc_change_rx_flags
,
1860 .ndo_set_rx_mode
= netvsc_set_rx_mode
,
1861 .ndo_change_mtu
= netvsc_change_mtu
,
1862 .ndo_validate_addr
= eth_validate_addr
,
1863 .ndo_set_mac_address
= netvsc_set_mac_addr
,
1864 .ndo_select_queue
= netvsc_select_queue
,
1865 .ndo_get_stats64
= netvsc_get_stats64
,
1866 #ifdef CONFIG_NET_POLL_CONTROLLER
1867 .ndo_poll_controller
= netvsc_poll_controller
,
1872 * Handle link status changes. For RNDIS_STATUS_NETWORK_CHANGE emulate link
1873 * down/up sequence. In case of RNDIS_STATUS_MEDIA_CONNECT when carrier is
1874 * present send GARP packet to network peers with netif_notify_peers().
1876 static void netvsc_link_change(struct work_struct
*w
)
1878 struct net_device_context
*ndev_ctx
=
1879 container_of(w
, struct net_device_context
, dwork
.work
);
1880 struct hv_device
*device_obj
= ndev_ctx
->device_ctx
;
1881 struct net_device
*net
= hv_get_drvdata(device_obj
);
1882 struct netvsc_device
*net_device
;
1883 struct rndis_device
*rdev
;
1884 struct netvsc_reconfig
*event
= NULL
;
1885 bool notify
= false, reschedule
= false;
1886 unsigned long flags
, next_reconfig
, delay
;
1888 /* if changes are happening, comeback later */
1889 if (!rtnl_trylock()) {
1890 schedule_delayed_work(&ndev_ctx
->dwork
, LINKCHANGE_INT
);
1894 net_device
= rtnl_dereference(ndev_ctx
->nvdev
);
1898 rdev
= net_device
->extension
;
1900 next_reconfig
= ndev_ctx
->last_reconfig
+ LINKCHANGE_INT
;
1901 if (time_is_after_jiffies(next_reconfig
)) {
1902 /* link_watch only sends one notification with current state
1903 * per second, avoid doing reconfig more frequently. Handle
1906 delay
= next_reconfig
- jiffies
;
1907 delay
= delay
< LINKCHANGE_INT
? delay
: LINKCHANGE_INT
;
1908 schedule_delayed_work(&ndev_ctx
->dwork
, delay
);
1911 ndev_ctx
->last_reconfig
= jiffies
;
1913 spin_lock_irqsave(&ndev_ctx
->lock
, flags
);
1914 if (!list_empty(&ndev_ctx
->reconfig_events
)) {
1915 event
= list_first_entry(&ndev_ctx
->reconfig_events
,
1916 struct netvsc_reconfig
, list
);
1917 list_del(&event
->list
);
1918 reschedule
= !list_empty(&ndev_ctx
->reconfig_events
);
1920 spin_unlock_irqrestore(&ndev_ctx
->lock
, flags
);
1925 switch (event
->event
) {
1926 /* Only the following events are possible due to the check in
1927 * netvsc_linkstatus_callback()
1929 case RNDIS_STATUS_MEDIA_CONNECT
:
1930 if (rdev
->link_state
) {
1931 rdev
->link_state
= false;
1932 netif_carrier_on(net
);
1933 netvsc_tx_enable(net_device
, net
);
1939 case RNDIS_STATUS_MEDIA_DISCONNECT
:
1940 if (!rdev
->link_state
) {
1941 rdev
->link_state
= true;
1942 netif_carrier_off(net
);
1943 netvsc_tx_disable(net_device
, net
);
1947 case RNDIS_STATUS_NETWORK_CHANGE
:
1948 /* Only makes sense if carrier is present */
1949 if (!rdev
->link_state
) {
1950 rdev
->link_state
= true;
1951 netif_carrier_off(net
);
1952 netvsc_tx_disable(net_device
, net
);
1953 event
->event
= RNDIS_STATUS_MEDIA_CONNECT
;
1954 spin_lock_irqsave(&ndev_ctx
->lock
, flags
);
1955 list_add(&event
->list
, &ndev_ctx
->reconfig_events
);
1956 spin_unlock_irqrestore(&ndev_ctx
->lock
, flags
);
1965 netdev_notify_peers(net
);
1967 /* link_watch only sends one notification with current state per
1968 * second, handle next reconfig event in 2 seconds.
1971 schedule_delayed_work(&ndev_ctx
->dwork
, LINKCHANGE_INT
);
1979 static struct net_device
*get_netvsc_byref(struct net_device
*vf_netdev
)
1981 struct net_device_context
*net_device_ctx
;
1982 struct net_device
*dev
;
1984 dev
= netdev_master_upper_dev_get(vf_netdev
);
1985 if (!dev
|| dev
->netdev_ops
!= &device_ops
)
1986 return NULL
; /* not a netvsc device */
1988 net_device_ctx
= netdev_priv(dev
);
1989 if (!rtnl_dereference(net_device_ctx
->nvdev
))
1990 return NULL
; /* device is removed */
1995 /* Called when VF is injecting data into network stack.
1996 * Change the associated network device from VF to netvsc.
1997 * note: already called with rcu_read_lock
1999 static rx_handler_result_t
netvsc_vf_handle_frame(struct sk_buff
**pskb
)
2001 struct sk_buff
*skb
= *pskb
;
2002 struct net_device
*ndev
= rcu_dereference(skb
->dev
->rx_handler_data
);
2003 struct net_device_context
*ndev_ctx
= netdev_priv(ndev
);
2004 struct netvsc_vf_pcpu_stats
*pcpu_stats
2005 = this_cpu_ptr(ndev_ctx
->vf_stats
);
2007 skb
= skb_share_check(skb
, GFP_ATOMIC
);
2009 return RX_HANDLER_CONSUMED
;
2015 u64_stats_update_begin(&pcpu_stats
->syncp
);
2016 pcpu_stats
->rx_packets
++;
2017 pcpu_stats
->rx_bytes
+= skb
->len
;
2018 u64_stats_update_end(&pcpu_stats
->syncp
);
2020 return RX_HANDLER_ANOTHER
;
2023 static int netvsc_vf_join(struct net_device
*vf_netdev
,
2024 struct net_device
*ndev
)
2026 struct net_device_context
*ndev_ctx
= netdev_priv(ndev
);
2029 ret
= netdev_rx_handler_register(vf_netdev
,
2030 netvsc_vf_handle_frame
, ndev
);
2032 netdev_err(vf_netdev
,
2033 "can not register netvsc VF receive handler (err = %d)\n",
2035 goto rx_handler_failed
;
2038 ret
= netdev_master_upper_dev_link(vf_netdev
, ndev
,
2041 netdev_err(vf_netdev
,
2042 "can not set master device %s (err = %d)\n",
2044 goto upper_link_failed
;
2047 /* set slave flag before open to prevent IPv6 addrconf */
2048 vf_netdev
->flags
|= IFF_SLAVE
;
2050 schedule_delayed_work(&ndev_ctx
->vf_takeover
, VF_TAKEOVER_INT
);
2052 call_netdevice_notifiers(NETDEV_JOIN
, vf_netdev
);
2054 netdev_info(vf_netdev
, "joined to %s\n", ndev
->name
);
2058 netdev_rx_handler_unregister(vf_netdev
);
2063 static void __netvsc_vf_setup(struct net_device
*ndev
,
2064 struct net_device
*vf_netdev
)
2068 /* Align MTU of VF with master */
2069 ret
= dev_set_mtu(vf_netdev
, ndev
->mtu
);
2071 netdev_warn(vf_netdev
,
2072 "unable to change mtu to %u\n", ndev
->mtu
);
2074 /* set multicast etc flags on VF */
2075 dev_change_flags(vf_netdev
, ndev
->flags
| IFF_SLAVE
);
2077 /* sync address list from ndev to VF */
2078 netif_addr_lock_bh(ndev
);
2079 dev_uc_sync(vf_netdev
, ndev
);
2080 dev_mc_sync(vf_netdev
, ndev
);
2081 netif_addr_unlock_bh(ndev
);
2083 if (netif_running(ndev
)) {
2084 ret
= dev_open(vf_netdev
);
2086 netdev_warn(vf_netdev
,
2087 "unable to open: %d\n", ret
);
2091 /* Setup VF as slave of the synthetic device.
2092 * Runs in workqueue to avoid recursion in netlink callbacks.
2094 static void netvsc_vf_setup(struct work_struct
*w
)
2096 struct net_device_context
*ndev_ctx
2097 = container_of(w
, struct net_device_context
, vf_takeover
.work
);
2098 struct net_device
*ndev
= hv_get_drvdata(ndev_ctx
->device_ctx
);
2099 struct net_device
*vf_netdev
;
2101 if (!rtnl_trylock()) {
2102 schedule_delayed_work(&ndev_ctx
->vf_takeover
, 0);
2106 vf_netdev
= rtnl_dereference(ndev_ctx
->vf_netdev
);
2108 __netvsc_vf_setup(ndev
, vf_netdev
);
2113 /* Find netvsc by VF serial number.
2114 * The PCI hyperv controller records the serial number as the slot kobj name.
2116 static struct net_device
*get_netvsc_byslot(const struct net_device
*vf_netdev
)
2118 struct device
*parent
= vf_netdev
->dev
.parent
;
2119 struct net_device_context
*ndev_ctx
;
2120 struct pci_dev
*pdev
;
2123 if (!parent
|| !dev_is_pci(parent
))
2124 return NULL
; /* not a PCI device */
2126 pdev
= to_pci_dev(parent
);
2128 netdev_notice(vf_netdev
, "no PCI slot information\n");
2132 if (kstrtou32(pci_slot_name(pdev
->slot
), 10, &serial
)) {
2133 netdev_notice(vf_netdev
, "Invalid vf serial:%s\n",
2134 pci_slot_name(pdev
->slot
));
2138 list_for_each_entry(ndev_ctx
, &netvsc_dev_list
, list
) {
2139 if (!ndev_ctx
->vf_alloc
)
2142 if (ndev_ctx
->vf_serial
== serial
)
2143 return hv_get_drvdata(ndev_ctx
->device_ctx
);
2146 netdev_notice(vf_netdev
,
2147 "no netdev found for vf serial:%u\n", serial
);
2151 static int netvsc_register_vf(struct net_device
*vf_netdev
)
2153 struct net_device_context
*net_device_ctx
;
2154 struct netvsc_device
*netvsc_dev
;
2155 struct net_device
*ndev
;
2158 if (vf_netdev
->addr_len
!= ETH_ALEN
)
2161 ndev
= get_netvsc_byslot(vf_netdev
);
2165 net_device_ctx
= netdev_priv(ndev
);
2166 netvsc_dev
= rtnl_dereference(net_device_ctx
->nvdev
);
2167 if (!netvsc_dev
|| rtnl_dereference(net_device_ctx
->vf_netdev
))
2170 /* if syntihetic interface is a different namespace,
2171 * then move the VF to that namespace; join will be
2172 * done again in that context.
2174 if (!net_eq(dev_net(ndev
), dev_net(vf_netdev
))) {
2175 ret
= dev_change_net_namespace(vf_netdev
,
2176 dev_net(ndev
), "eth%d");
2178 netdev_err(vf_netdev
,
2179 "could not move to same namespace as %s: %d\n",
2182 netdev_info(vf_netdev
,
2183 "VF moved to namespace with: %s\n",
2188 netdev_info(ndev
, "VF registering: %s\n", vf_netdev
->name
);
2190 if (netvsc_vf_join(vf_netdev
, ndev
) != 0)
2193 dev_hold(vf_netdev
);
2194 rcu_assign_pointer(net_device_ctx
->vf_netdev
, vf_netdev
);
2198 /* VF up/down change detected, schedule to change data path */
2199 static int netvsc_vf_changed(struct net_device
*vf_netdev
)
2201 struct net_device_context
*net_device_ctx
;
2202 struct netvsc_device
*netvsc_dev
;
2203 struct net_device
*ndev
;
2204 bool vf_is_up
= netif_running(vf_netdev
);
2206 ndev
= get_netvsc_byref(vf_netdev
);
2210 net_device_ctx
= netdev_priv(ndev
);
2211 netvsc_dev
= rtnl_dereference(net_device_ctx
->nvdev
);
2215 netvsc_switch_datapath(ndev
, vf_is_up
);
2216 netdev_info(ndev
, "Data path switched %s VF: %s\n",
2217 vf_is_up
? "to" : "from", vf_netdev
->name
);
2222 static int netvsc_unregister_vf(struct net_device
*vf_netdev
)
2224 struct net_device
*ndev
;
2225 struct net_device_context
*net_device_ctx
;
2227 ndev
= get_netvsc_byref(vf_netdev
);
2231 net_device_ctx
= netdev_priv(ndev
);
2232 cancel_delayed_work_sync(&net_device_ctx
->vf_takeover
);
2234 netdev_info(ndev
, "VF unregistering: %s\n", vf_netdev
->name
);
2236 netdev_rx_handler_unregister(vf_netdev
);
2237 netdev_upper_dev_unlink(vf_netdev
, ndev
);
2238 RCU_INIT_POINTER(net_device_ctx
->vf_netdev
, NULL
);
2244 static int netvsc_probe(struct hv_device
*dev
,
2245 const struct hv_vmbus_device_id
*dev_id
)
2247 struct net_device
*net
= NULL
;
2248 struct net_device_context
*net_device_ctx
;
2249 struct netvsc_device_info
*device_info
= NULL
;
2250 struct netvsc_device
*nvdev
;
2253 net
= alloc_etherdev_mq(sizeof(struct net_device_context
),
2258 netif_carrier_off(net
);
2260 netvsc_init_settings(net
);
2262 net_device_ctx
= netdev_priv(net
);
2263 net_device_ctx
->device_ctx
= dev
;
2264 net_device_ctx
->msg_enable
= netif_msg_init(debug
, default_msg
);
2265 if (netif_msg_probe(net_device_ctx
))
2266 netdev_dbg(net
, "netvsc msg_enable: %d\n",
2267 net_device_ctx
->msg_enable
);
2269 hv_set_drvdata(dev
, net
);
2271 INIT_DELAYED_WORK(&net_device_ctx
->dwork
, netvsc_link_change
);
2273 spin_lock_init(&net_device_ctx
->lock
);
2274 INIT_LIST_HEAD(&net_device_ctx
->reconfig_events
);
2275 INIT_DELAYED_WORK(&net_device_ctx
->vf_takeover
, netvsc_vf_setup
);
2277 net_device_ctx
->vf_stats
2278 = netdev_alloc_pcpu_stats(struct netvsc_vf_pcpu_stats
);
2279 if (!net_device_ctx
->vf_stats
)
2282 net
->netdev_ops
= &device_ops
;
2283 net
->ethtool_ops
= ðtool_ops
;
2284 SET_NETDEV_DEV(net
, &dev
->device
);
2286 /* We always need headroom for rndis header */
2287 net
->needed_headroom
= RNDIS_AND_PPI_SIZE
;
2289 /* Initialize the number of queues to be 1, we may change it if more
2290 * channels are offered later.
2292 netif_set_real_num_tx_queues(net
, 1);
2293 netif_set_real_num_rx_queues(net
, 1);
2295 /* Notify the netvsc driver of the new device */
2296 device_info
= netvsc_devinfo_get(NULL
);
2300 goto devinfo_failed
;
2303 nvdev
= rndis_filter_device_add(dev
, device_info
);
2304 if (IS_ERR(nvdev
)) {
2305 ret
= PTR_ERR(nvdev
);
2306 netdev_err(net
, "unable to add netvsc device (ret %d)\n", ret
);
2310 memcpy(net
->dev_addr
, device_info
->mac_adr
, ETH_ALEN
);
2312 /* We must get rtnl lock before scheduling nvdev->subchan_work,
2313 * otherwise netvsc_subchan_work() can get rtnl lock first and wait
2314 * all subchannels to show up, but that may not happen because
2315 * netvsc_probe() can't get rtnl lock and as a result vmbus_onoffer()
2316 * -> ... -> device_add() -> ... -> __device_attach() can't get
2317 * the device lock, so all the subchannels can't be processed --
2318 * finally netvsc_subchan_work() hangs for ever.
2322 if (nvdev
->num_chn
> 1)
2323 schedule_work(&nvdev
->subchan_work
);
2325 /* hw_features computed in rndis_netdev_set_hwcaps() */
2326 net
->features
= net
->hw_features
|
2327 NETIF_F_HIGHDMA
| NETIF_F_SG
|
2328 NETIF_F_HW_VLAN_CTAG_TX
| NETIF_F_HW_VLAN_CTAG_RX
;
2329 net
->vlan_features
= net
->features
;
2331 netdev_lockdep_set_classes(net
);
2333 /* MTU range: 68 - 1500 or 65521 */
2334 net
->min_mtu
= NETVSC_MTU_MIN
;
2335 if (nvdev
->nvsp_version
>= NVSP_PROTOCOL_VERSION_2
)
2336 net
->max_mtu
= NETVSC_MTU
- ETH_HLEN
;
2338 net
->max_mtu
= ETH_DATA_LEN
;
2340 nvdev
->tx_disable
= false;
2342 ret
= register_netdevice(net
);
2344 pr_err("Unable to register netdev.\n");
2345 goto register_failed
;
2348 list_add(&net_device_ctx
->list
, &netvsc_dev_list
);
2356 rndis_filter_device_remove(dev
, nvdev
);
2360 free_percpu(net_device_ctx
->vf_stats
);
2362 hv_set_drvdata(dev
, NULL
);
2368 static int netvsc_remove(struct hv_device
*dev
)
2370 struct net_device_context
*ndev_ctx
;
2371 struct net_device
*vf_netdev
, *net
;
2372 struct netvsc_device
*nvdev
;
2374 net
= hv_get_drvdata(dev
);
2376 dev_err(&dev
->device
, "No net device to remove\n");
2380 ndev_ctx
= netdev_priv(net
);
2382 cancel_delayed_work_sync(&ndev_ctx
->dwork
);
2385 nvdev
= rtnl_dereference(ndev_ctx
->nvdev
);
2387 cancel_work_sync(&nvdev
->subchan_work
);
2390 * Call to the vsc driver to let it know that the device is being
2391 * removed. Also blocks mtu and channel changes.
2393 vf_netdev
= rtnl_dereference(ndev_ctx
->vf_netdev
);
2395 netvsc_unregister_vf(vf_netdev
);
2398 rndis_filter_device_remove(dev
, nvdev
);
2400 unregister_netdevice(net
);
2401 list_del(&ndev_ctx
->list
);
2405 hv_set_drvdata(dev
, NULL
);
2407 free_percpu(ndev_ctx
->vf_stats
);
2412 static const struct hv_vmbus_device_id id_table
[] = {
2418 MODULE_DEVICE_TABLE(vmbus
, id_table
);
2420 /* The one and only one */
2421 static struct hv_driver netvsc_drv
= {
2422 .name
= KBUILD_MODNAME
,
2423 .id_table
= id_table
,
2424 .probe
= netvsc_probe
,
2425 .remove
= netvsc_remove
,
2427 .probe_type
= PROBE_FORCE_SYNCHRONOUS
,
2432 * On Hyper-V, every VF interface is matched with a corresponding
2433 * synthetic interface. The synthetic interface is presented first
2434 * to the guest. When the corresponding VF instance is registered,
2435 * we will take care of switching the data path.
2437 static int netvsc_netdev_event(struct notifier_block
*this,
2438 unsigned long event
, void *ptr
)
2440 struct net_device
*event_dev
= netdev_notifier_info_to_dev(ptr
);
2442 /* Skip our own events */
2443 if (event_dev
->netdev_ops
== &device_ops
)
2446 /* Avoid non-Ethernet type devices */
2447 if (event_dev
->type
!= ARPHRD_ETHER
)
2450 /* Avoid Vlan dev with same MAC registering as VF */
2451 if (is_vlan_dev(event_dev
))
2454 /* Avoid Bonding master dev with same MAC registering as VF */
2455 if ((event_dev
->priv_flags
& IFF_BONDING
) &&
2456 (event_dev
->flags
& IFF_MASTER
))
2460 case NETDEV_REGISTER
:
2461 return netvsc_register_vf(event_dev
);
2462 case NETDEV_UNREGISTER
:
2463 return netvsc_unregister_vf(event_dev
);
2466 return netvsc_vf_changed(event_dev
);
2472 static struct notifier_block netvsc_netdev_notifier
= {
2473 .notifier_call
= netvsc_netdev_event
,
2476 static void __exit
netvsc_drv_exit(void)
2478 unregister_netdevice_notifier(&netvsc_netdev_notifier
);
2479 vmbus_driver_unregister(&netvsc_drv
);
2482 static int __init
netvsc_drv_init(void)
2486 if (ring_size
< RING_SIZE_MIN
) {
2487 ring_size
= RING_SIZE_MIN
;
2488 pr_info("Increased ring_size to %u (min allowed)\n",
2491 netvsc_ring_bytes
= ring_size
* PAGE_SIZE
;
2493 ret
= vmbus_driver_register(&netvsc_drv
);
2497 register_netdevice_notifier(&netvsc_netdev_notifier
);
2501 MODULE_LICENSE("GPL");
2502 MODULE_DESCRIPTION("Microsoft Hyper-V network driver");
2504 module_init(netvsc_drv_init
);
2505 module_exit(netvsc_drv_exit
);