1 // SPDX-License-Identifier: GPL-2.0-only
3 * Copyright (c) 2009, Microsoft Corporation.
6 * Haiyang Zhang <haiyangz@microsoft.com>
7 * Hank Janssen <hjanssen@microsoft.com>
9 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
11 #include <linux/init.h>
12 #include <linux/atomic.h>
13 #include <linux/module.h>
14 #include <linux/highmem.h>
15 #include <linux/device.h>
17 #include <linux/delay.h>
18 #include <linux/netdevice.h>
19 #include <linux/inetdevice.h>
20 #include <linux/etherdevice.h>
21 #include <linux/pci.h>
22 #include <linux/skbuff.h>
23 #include <linux/if_vlan.h>
25 #include <linux/slab.h>
26 #include <linux/rtnetlink.h>
27 #include <linux/netpoll.h>
30 #include <net/route.h>
32 #include <net/pkt_sched.h>
33 #include <net/checksum.h>
34 #include <net/ip6_checksum.h>
36 #include "hyperv_net.h"
38 #define RING_SIZE_MIN 64
39 #define RETRY_US_LO 5000
40 #define RETRY_US_HI 10000
41 #define RETRY_MAX 2000 /* >10 sec */
43 #define LINKCHANGE_INT (2 * HZ)
44 #define VF_TAKEOVER_INT (HZ / 10)
46 static unsigned int ring_size __ro_after_init
= 128;
47 module_param(ring_size
, uint
, 0444);
48 MODULE_PARM_DESC(ring_size
, "Ring buffer size (# of pages)");
49 unsigned int netvsc_ring_bytes __ro_after_init
;
51 static const u32 default_msg
= NETIF_MSG_DRV
| NETIF_MSG_PROBE
|
52 NETIF_MSG_LINK
| NETIF_MSG_IFUP
|
53 NETIF_MSG_IFDOWN
| NETIF_MSG_RX_ERR
|
56 static int debug
= -1;
57 module_param(debug
, int, 0444);
58 MODULE_PARM_DESC(debug
, "Debug level (0=none,...,16=all)");
60 static LIST_HEAD(netvsc_dev_list
);
62 static void netvsc_change_rx_flags(struct net_device
*net
, int change
)
64 struct net_device_context
*ndev_ctx
= netdev_priv(net
);
65 struct net_device
*vf_netdev
= rtnl_dereference(ndev_ctx
->vf_netdev
);
71 if (change
& IFF_PROMISC
) {
72 inc
= (net
->flags
& IFF_PROMISC
) ? 1 : -1;
73 dev_set_promiscuity(vf_netdev
, inc
);
76 if (change
& IFF_ALLMULTI
) {
77 inc
= (net
->flags
& IFF_ALLMULTI
) ? 1 : -1;
78 dev_set_allmulti(vf_netdev
, inc
);
82 static void netvsc_set_rx_mode(struct net_device
*net
)
84 struct net_device_context
*ndev_ctx
= netdev_priv(net
);
85 struct net_device
*vf_netdev
;
86 struct netvsc_device
*nvdev
;
89 vf_netdev
= rcu_dereference(ndev_ctx
->vf_netdev
);
91 dev_uc_sync(vf_netdev
, net
);
92 dev_mc_sync(vf_netdev
, net
);
95 nvdev
= rcu_dereference(ndev_ctx
->nvdev
);
97 rndis_filter_update(nvdev
);
101 static void netvsc_tx_enable(struct netvsc_device
*nvscdev
,
102 struct net_device
*ndev
)
104 nvscdev
->tx_disable
= false;
105 virt_wmb(); /* ensure queue wake up mechanism is on */
107 netif_tx_wake_all_queues(ndev
);
110 static int netvsc_open(struct net_device
*net
)
112 struct net_device_context
*ndev_ctx
= netdev_priv(net
);
113 struct net_device
*vf_netdev
= rtnl_dereference(ndev_ctx
->vf_netdev
);
114 struct netvsc_device
*nvdev
= rtnl_dereference(ndev_ctx
->nvdev
);
115 struct rndis_device
*rdev
;
118 netif_carrier_off(net
);
120 /* Open up the device */
121 ret
= rndis_filter_open(nvdev
);
123 netdev_err(net
, "unable to open device (ret %d).\n", ret
);
127 rdev
= nvdev
->extension
;
128 if (!rdev
->link_state
) {
129 netif_carrier_on(net
);
130 netvsc_tx_enable(nvdev
, net
);
134 /* Setting synthetic device up transparently sets
135 * slave as up. If open fails, then slave will be
136 * still be offline (and not used).
138 ret
= dev_open(vf_netdev
, NULL
);
141 "unable to open slave: %s: %d\n",
142 vf_netdev
->name
, ret
);
147 static int netvsc_wait_until_empty(struct netvsc_device
*nvdev
)
149 unsigned int retry
= 0;
152 /* Ensure pending bytes in ring are read */
156 for (i
= 0; i
< nvdev
->num_chn
; i
++) {
157 struct vmbus_channel
*chn
158 = nvdev
->chan_table
[i
].channel
;
163 /* make sure receive not running now */
164 napi_synchronize(&nvdev
->chan_table
[i
].napi
);
166 aread
= hv_get_bytes_to_read(&chn
->inbound
);
170 aread
= hv_get_bytes_to_read(&chn
->outbound
);
178 if (++retry
> RETRY_MAX
)
181 usleep_range(RETRY_US_LO
, RETRY_US_HI
);
185 static void netvsc_tx_disable(struct netvsc_device
*nvscdev
,
186 struct net_device
*ndev
)
189 nvscdev
->tx_disable
= true;
190 virt_wmb(); /* ensure txq will not wake up after stop */
193 netif_tx_disable(ndev
);
196 static int netvsc_close(struct net_device
*net
)
198 struct net_device_context
*net_device_ctx
= netdev_priv(net
);
199 struct net_device
*vf_netdev
200 = rtnl_dereference(net_device_ctx
->vf_netdev
);
201 struct netvsc_device
*nvdev
= rtnl_dereference(net_device_ctx
->nvdev
);
204 netvsc_tx_disable(nvdev
, net
);
206 /* No need to close rndis filter if it is removed already */
210 ret
= rndis_filter_close(nvdev
);
212 netdev_err(net
, "unable to close device (ret %d).\n", ret
);
216 ret
= netvsc_wait_until_empty(nvdev
);
218 netdev_err(net
, "Ring buffer not empty after closing rndis\n");
221 dev_close(vf_netdev
);
226 static inline void *init_ppi_data(struct rndis_message
*msg
,
227 u32 ppi_size
, u32 pkt_type
)
229 struct rndis_packet
*rndis_pkt
= &msg
->msg
.pkt
;
230 struct rndis_per_packet_info
*ppi
;
232 rndis_pkt
->data_offset
+= ppi_size
;
233 ppi
= (void *)rndis_pkt
+ rndis_pkt
->per_pkt_info_offset
234 + rndis_pkt
->per_pkt_info_len
;
236 ppi
->size
= ppi_size
;
237 ppi
->type
= pkt_type
;
239 ppi
->ppi_offset
= sizeof(struct rndis_per_packet_info
);
241 rndis_pkt
->per_pkt_info_len
+= ppi_size
;
246 /* Azure hosts don't support non-TCP port numbers in hashing for fragmented
247 * packets. We can use ethtool to change UDP hash level when necessary.
249 static inline u32
netvsc_get_hash(
251 const struct net_device_context
*ndc
)
253 struct flow_keys flow
;
254 u32 hash
, pkt_proto
= 0;
255 static u32 hashrnd __read_mostly
;
257 net_get_random_once(&hashrnd
, sizeof(hashrnd
));
259 if (!skb_flow_dissect_flow_keys(skb
, &flow
, 0))
262 switch (flow
.basic
.ip_proto
) {
264 if (flow
.basic
.n_proto
== htons(ETH_P_IP
))
265 pkt_proto
= HV_TCP4_L4HASH
;
266 else if (flow
.basic
.n_proto
== htons(ETH_P_IPV6
))
267 pkt_proto
= HV_TCP6_L4HASH
;
272 if (flow
.basic
.n_proto
== htons(ETH_P_IP
))
273 pkt_proto
= HV_UDP4_L4HASH
;
274 else if (flow
.basic
.n_proto
== htons(ETH_P_IPV6
))
275 pkt_proto
= HV_UDP6_L4HASH
;
280 if (pkt_proto
& ndc
->l4_hash
) {
281 return skb_get_hash(skb
);
283 if (flow
.basic
.n_proto
== htons(ETH_P_IP
))
284 hash
= jhash2((u32
*)&flow
.addrs
.v4addrs
, 2, hashrnd
);
285 else if (flow
.basic
.n_proto
== htons(ETH_P_IPV6
))
286 hash
= jhash2((u32
*)&flow
.addrs
.v6addrs
, 8, hashrnd
);
290 skb_set_hash(skb
, hash
, PKT_HASH_TYPE_L3
);
296 static inline int netvsc_get_tx_queue(struct net_device
*ndev
,
297 struct sk_buff
*skb
, int old_idx
)
299 const struct net_device_context
*ndc
= netdev_priv(ndev
);
300 struct sock
*sk
= skb
->sk
;
303 q_idx
= ndc
->tx_table
[netvsc_get_hash(skb
, ndc
) &
304 (VRSS_SEND_TAB_SIZE
- 1)];
306 /* If queue index changed record the new value */
307 if (q_idx
!= old_idx
&&
308 sk
&& sk_fullsock(sk
) && rcu_access_pointer(sk
->sk_dst_cache
))
309 sk_tx_queue_set(sk
, q_idx
);
315 * Select queue for transmit.
317 * If a valid queue has already been assigned, then use that.
318 * Otherwise compute tx queue based on hash and the send table.
320 * This is basically similar to default (netdev_pick_tx) with the added step
321 * of using the host send_table when no other queue has been assigned.
323 * TODO support XPS - but get_xps_queue not exported
325 static u16
netvsc_pick_tx(struct net_device
*ndev
, struct sk_buff
*skb
)
327 int q_idx
= sk_tx_queue_get(skb
->sk
);
329 if (q_idx
< 0 || skb
->ooo_okay
|| q_idx
>= ndev
->real_num_tx_queues
) {
330 /* If forwarding a packet, we use the recorded queue when
331 * available for better cache locality.
333 if (skb_rx_queue_recorded(skb
))
334 q_idx
= skb_get_rx_queue(skb
);
336 q_idx
= netvsc_get_tx_queue(ndev
, skb
, q_idx
);
342 static u16
netvsc_select_queue(struct net_device
*ndev
, struct sk_buff
*skb
,
343 struct net_device
*sb_dev
)
345 struct net_device_context
*ndc
= netdev_priv(ndev
);
346 struct net_device
*vf_netdev
;
350 vf_netdev
= rcu_dereference(ndc
->vf_netdev
);
352 const struct net_device_ops
*vf_ops
= vf_netdev
->netdev_ops
;
354 if (vf_ops
->ndo_select_queue
)
355 txq
= vf_ops
->ndo_select_queue(vf_netdev
, skb
, sb_dev
);
357 txq
= netdev_pick_tx(vf_netdev
, skb
, NULL
);
359 /* Record the queue selected by VF so that it can be
360 * used for common case where VF has more queues than
361 * the synthetic device.
363 qdisc_skb_cb(skb
)->slave_dev_queue_mapping
= txq
;
365 txq
= netvsc_pick_tx(ndev
, skb
);
369 while (unlikely(txq
>= ndev
->real_num_tx_queues
))
370 txq
-= ndev
->real_num_tx_queues
;
375 static u32
fill_pg_buf(struct page
*page
, u32 offset
, u32 len
,
376 struct hv_page_buffer
*pb
)
380 /* Deal with compound pages by ignoring unused part
383 page
+= (offset
>> PAGE_SHIFT
);
384 offset
&= ~PAGE_MASK
;
389 bytes
= PAGE_SIZE
- offset
;
392 pb
[j
].pfn
= page_to_pfn(page
);
393 pb
[j
].offset
= offset
;
399 if (offset
== PAGE_SIZE
&& len
) {
409 static u32
init_page_array(void *hdr
, u32 len
, struct sk_buff
*skb
,
410 struct hv_netvsc_packet
*packet
,
411 struct hv_page_buffer
*pb
)
414 char *data
= skb
->data
;
415 int frags
= skb_shinfo(skb
)->nr_frags
;
418 /* The packet is laid out thus:
419 * 1. hdr: RNDIS header and PPI
421 * 3. skb fragment data
423 slots_used
+= fill_pg_buf(virt_to_page(hdr
),
425 len
, &pb
[slots_used
]);
427 packet
->rmsg_size
= len
;
428 packet
->rmsg_pgcnt
= slots_used
;
430 slots_used
+= fill_pg_buf(virt_to_page(data
),
431 offset_in_page(data
),
432 skb_headlen(skb
), &pb
[slots_used
]);
434 for (i
= 0; i
< frags
; i
++) {
435 skb_frag_t
*frag
= skb_shinfo(skb
)->frags
+ i
;
437 slots_used
+= fill_pg_buf(skb_frag_page(frag
),
439 skb_frag_size(frag
), &pb
[slots_used
]);
444 static int count_skb_frag_slots(struct sk_buff
*skb
)
446 int i
, frags
= skb_shinfo(skb
)->nr_frags
;
449 for (i
= 0; i
< frags
; i
++) {
450 skb_frag_t
*frag
= skb_shinfo(skb
)->frags
+ i
;
451 unsigned long size
= skb_frag_size(frag
);
452 unsigned long offset
= frag
->page_offset
;
454 /* Skip unused frames from start of page */
455 offset
&= ~PAGE_MASK
;
456 pages
+= PFN_UP(offset
+ size
);
461 static int netvsc_get_slots(struct sk_buff
*skb
)
463 char *data
= skb
->data
;
464 unsigned int offset
= offset_in_page(data
);
465 unsigned int len
= skb_headlen(skb
);
469 slots
= DIV_ROUND_UP(offset
+ len
, PAGE_SIZE
);
470 frag_slots
= count_skb_frag_slots(skb
);
471 return slots
+ frag_slots
;
474 static u32
net_checksum_info(struct sk_buff
*skb
)
476 if (skb
->protocol
== htons(ETH_P_IP
)) {
477 struct iphdr
*ip
= ip_hdr(skb
);
479 if (ip
->protocol
== IPPROTO_TCP
)
480 return TRANSPORT_INFO_IPV4_TCP
;
481 else if (ip
->protocol
== IPPROTO_UDP
)
482 return TRANSPORT_INFO_IPV4_UDP
;
484 struct ipv6hdr
*ip6
= ipv6_hdr(skb
);
486 if (ip6
->nexthdr
== IPPROTO_TCP
)
487 return TRANSPORT_INFO_IPV6_TCP
;
488 else if (ip6
->nexthdr
== IPPROTO_UDP
)
489 return TRANSPORT_INFO_IPV6_UDP
;
492 return TRANSPORT_INFO_NOT_IP
;
495 /* Send skb on the slave VF device. */
496 static int netvsc_vf_xmit(struct net_device
*net
, struct net_device
*vf_netdev
,
499 struct net_device_context
*ndev_ctx
= netdev_priv(net
);
500 unsigned int len
= skb
->len
;
503 skb
->dev
= vf_netdev
;
504 skb
->queue_mapping
= qdisc_skb_cb(skb
)->slave_dev_queue_mapping
;
506 rc
= dev_queue_xmit(skb
);
507 if (likely(rc
== NET_XMIT_SUCCESS
|| rc
== NET_XMIT_CN
)) {
508 struct netvsc_vf_pcpu_stats
*pcpu_stats
509 = this_cpu_ptr(ndev_ctx
->vf_stats
);
511 u64_stats_update_begin(&pcpu_stats
->syncp
);
512 pcpu_stats
->tx_packets
++;
513 pcpu_stats
->tx_bytes
+= len
;
514 u64_stats_update_end(&pcpu_stats
->syncp
);
516 this_cpu_inc(ndev_ctx
->vf_stats
->tx_dropped
);
522 static int netvsc_start_xmit(struct sk_buff
*skb
, struct net_device
*net
)
524 struct net_device_context
*net_device_ctx
= netdev_priv(net
);
525 struct hv_netvsc_packet
*packet
= NULL
;
527 unsigned int num_data_pgs
;
528 struct rndis_message
*rndis_msg
;
529 struct net_device
*vf_netdev
;
532 struct hv_page_buffer pb
[MAX_PAGE_BUFFER_COUNT
];
534 /* if VF is present and up then redirect packets
535 * already called with rcu_read_lock_bh
537 vf_netdev
= rcu_dereference_bh(net_device_ctx
->vf_netdev
);
538 if (vf_netdev
&& netif_running(vf_netdev
) &&
539 !netpoll_tx_running(net
))
540 return netvsc_vf_xmit(net
, vf_netdev
, skb
);
542 /* We will atmost need two pages to describe the rndis
543 * header. We can only transmit MAX_PAGE_BUFFER_COUNT number
544 * of pages in a single packet. If skb is scattered around
545 * more pages we try linearizing it.
548 num_data_pgs
= netvsc_get_slots(skb
) + 2;
550 if (unlikely(num_data_pgs
> MAX_PAGE_BUFFER_COUNT
)) {
551 ++net_device_ctx
->eth_stats
.tx_scattered
;
553 if (skb_linearize(skb
))
556 num_data_pgs
= netvsc_get_slots(skb
) + 2;
557 if (num_data_pgs
> MAX_PAGE_BUFFER_COUNT
) {
558 ++net_device_ctx
->eth_stats
.tx_too_big
;
564 * Place the rndis header in the skb head room and
565 * the skb->cb will be used for hv_netvsc_packet
568 ret
= skb_cow_head(skb
, RNDIS_AND_PPI_SIZE
);
572 /* Use the skb control buffer for building up the packet */
573 BUILD_BUG_ON(sizeof(struct hv_netvsc_packet
) >
574 FIELD_SIZEOF(struct sk_buff
, cb
));
575 packet
= (struct hv_netvsc_packet
*)skb
->cb
;
577 packet
->q_idx
= skb_get_queue_mapping(skb
);
579 packet
->total_data_buflen
= skb
->len
;
580 packet
->total_bytes
= skb
->len
;
581 packet
->total_packets
= 1;
583 rndis_msg
= (struct rndis_message
*)skb
->head
;
585 /* Add the rndis header */
586 rndis_msg
->ndis_msg_type
= RNDIS_MSG_PACKET
;
587 rndis_msg
->msg_len
= packet
->total_data_buflen
;
589 rndis_msg
->msg
.pkt
= (struct rndis_packet
) {
590 .data_offset
= sizeof(struct rndis_packet
),
591 .data_len
= packet
->total_data_buflen
,
592 .per_pkt_info_offset
= sizeof(struct rndis_packet
),
595 rndis_msg_size
= RNDIS_MESSAGE_SIZE(struct rndis_packet
);
597 hash
= skb_get_hash_raw(skb
);
598 if (hash
!= 0 && net
->real_num_tx_queues
> 1) {
601 rndis_msg_size
+= NDIS_HASH_PPI_SIZE
;
602 hash_info
= init_ppi_data(rndis_msg
, NDIS_HASH_PPI_SIZE
,
607 if (skb_vlan_tag_present(skb
)) {
608 struct ndis_pkt_8021q_info
*vlan
;
610 rndis_msg_size
+= NDIS_VLAN_PPI_SIZE
;
611 vlan
= init_ppi_data(rndis_msg
, NDIS_VLAN_PPI_SIZE
,
615 vlan
->vlanid
= skb_vlan_tag_get_id(skb
);
616 vlan
->cfi
= skb_vlan_tag_get_cfi(skb
);
617 vlan
->pri
= skb_vlan_tag_get_prio(skb
);
620 if (skb_is_gso(skb
)) {
621 struct ndis_tcp_lso_info
*lso_info
;
623 rndis_msg_size
+= NDIS_LSO_PPI_SIZE
;
624 lso_info
= init_ppi_data(rndis_msg
, NDIS_LSO_PPI_SIZE
,
625 TCP_LARGESEND_PKTINFO
);
628 lso_info
->lso_v2_transmit
.type
= NDIS_TCP_LARGE_SEND_OFFLOAD_V2_TYPE
;
629 if (skb
->protocol
== htons(ETH_P_IP
)) {
630 lso_info
->lso_v2_transmit
.ip_version
=
631 NDIS_TCP_LARGE_SEND_OFFLOAD_IPV4
;
632 ip_hdr(skb
)->tot_len
= 0;
633 ip_hdr(skb
)->check
= 0;
634 tcp_hdr(skb
)->check
=
635 ~csum_tcpudp_magic(ip_hdr(skb
)->saddr
,
636 ip_hdr(skb
)->daddr
, 0, IPPROTO_TCP
, 0);
638 lso_info
->lso_v2_transmit
.ip_version
=
639 NDIS_TCP_LARGE_SEND_OFFLOAD_IPV6
;
640 ipv6_hdr(skb
)->payload_len
= 0;
641 tcp_hdr(skb
)->check
=
642 ~csum_ipv6_magic(&ipv6_hdr(skb
)->saddr
,
643 &ipv6_hdr(skb
)->daddr
, 0, IPPROTO_TCP
, 0);
645 lso_info
->lso_v2_transmit
.tcp_header_offset
= skb_transport_offset(skb
);
646 lso_info
->lso_v2_transmit
.mss
= skb_shinfo(skb
)->gso_size
;
647 } else if (skb
->ip_summed
== CHECKSUM_PARTIAL
) {
648 if (net_checksum_info(skb
) & net_device_ctx
->tx_checksum_mask
) {
649 struct ndis_tcp_ip_checksum_info
*csum_info
;
651 rndis_msg_size
+= NDIS_CSUM_PPI_SIZE
;
652 csum_info
= init_ppi_data(rndis_msg
, NDIS_CSUM_PPI_SIZE
,
653 TCPIP_CHKSUM_PKTINFO
);
655 csum_info
->value
= 0;
656 csum_info
->transmit
.tcp_header_offset
= skb_transport_offset(skb
);
658 if (skb
->protocol
== htons(ETH_P_IP
)) {
659 csum_info
->transmit
.is_ipv4
= 1;
661 if (ip_hdr(skb
)->protocol
== IPPROTO_TCP
)
662 csum_info
->transmit
.tcp_checksum
= 1;
664 csum_info
->transmit
.udp_checksum
= 1;
666 csum_info
->transmit
.is_ipv6
= 1;
668 if (ipv6_hdr(skb
)->nexthdr
== IPPROTO_TCP
)
669 csum_info
->transmit
.tcp_checksum
= 1;
671 csum_info
->transmit
.udp_checksum
= 1;
674 /* Can't do offload of this type of checksum */
675 if (skb_checksum_help(skb
))
680 /* Start filling in the page buffers with the rndis hdr */
681 rndis_msg
->msg_len
+= rndis_msg_size
;
682 packet
->total_data_buflen
= rndis_msg
->msg_len
;
683 packet
->page_buf_cnt
= init_page_array(rndis_msg
, rndis_msg_size
,
686 /* timestamp packet in software */
687 skb_tx_timestamp(skb
);
689 ret
= netvsc_send(net
, packet
, rndis_msg
, pb
, skb
);
690 if (likely(ret
== 0))
693 if (ret
== -EAGAIN
) {
694 ++net_device_ctx
->eth_stats
.tx_busy
;
695 return NETDEV_TX_BUSY
;
699 ++net_device_ctx
->eth_stats
.tx_no_space
;
702 dev_kfree_skb_any(skb
);
703 net
->stats
.tx_dropped
++;
708 ++net_device_ctx
->eth_stats
.tx_no_memory
;
713 * netvsc_linkstatus_callback - Link up/down notification
715 void netvsc_linkstatus_callback(struct net_device
*net
,
716 struct rndis_message
*resp
)
718 struct rndis_indicate_status
*indicate
= &resp
->msg
.indicate_status
;
719 struct net_device_context
*ndev_ctx
= netdev_priv(net
);
720 struct netvsc_reconfig
*event
;
723 /* Update the physical link speed when changing to another vSwitch */
724 if (indicate
->status
== RNDIS_STATUS_LINK_SPEED_CHANGE
) {
727 speed
= *(u32
*)((void *)indicate
728 + indicate
->status_buf_offset
) / 10000;
729 ndev_ctx
->speed
= speed
;
733 /* Handle these link change statuses below */
734 if (indicate
->status
!= RNDIS_STATUS_NETWORK_CHANGE
&&
735 indicate
->status
!= RNDIS_STATUS_MEDIA_CONNECT
&&
736 indicate
->status
!= RNDIS_STATUS_MEDIA_DISCONNECT
)
739 if (net
->reg_state
!= NETREG_REGISTERED
)
742 event
= kzalloc(sizeof(*event
), GFP_ATOMIC
);
745 event
->event
= indicate
->status
;
747 spin_lock_irqsave(&ndev_ctx
->lock
, flags
);
748 list_add_tail(&event
->list
, &ndev_ctx
->reconfig_events
);
749 spin_unlock_irqrestore(&ndev_ctx
->lock
, flags
);
751 schedule_delayed_work(&ndev_ctx
->dwork
, 0);
754 static void netvsc_comp_ipcsum(struct sk_buff
*skb
)
756 struct iphdr
*iph
= (struct iphdr
*)skb
->data
;
759 iph
->check
= ip_fast_csum(iph
, iph
->ihl
);
762 static struct sk_buff
*netvsc_alloc_recv_skb(struct net_device
*net
,
763 struct netvsc_channel
*nvchan
)
765 struct napi_struct
*napi
= &nvchan
->napi
;
766 const struct ndis_pkt_8021q_info
*vlan
= nvchan
->rsc
.vlan
;
767 const struct ndis_tcp_ip_checksum_info
*csum_info
=
768 nvchan
->rsc
.csum_info
;
772 skb
= napi_alloc_skb(napi
, nvchan
->rsc
.pktlen
);
777 * Copy to skb. This copy is needed here since the memory pointed by
778 * hv_netvsc_packet cannot be deallocated
780 for (i
= 0; i
< nvchan
->rsc
.cnt
; i
++)
781 skb_put_data(skb
, nvchan
->rsc
.data
[i
], nvchan
->rsc
.len
[i
]);
783 skb
->protocol
= eth_type_trans(skb
, net
);
785 /* skb is already created with CHECKSUM_NONE */
786 skb_checksum_none_assert(skb
);
788 /* Incoming packets may have IP header checksum verified by the host.
789 * They may not have IP header checksum computed after coalescing.
790 * We compute it here if the flags are set, because on Linux, the IP
791 * checksum is always checked.
793 if (csum_info
&& csum_info
->receive
.ip_checksum_value_invalid
&&
794 csum_info
->receive
.ip_checksum_succeeded
&&
795 skb
->protocol
== htons(ETH_P_IP
))
796 netvsc_comp_ipcsum(skb
);
798 /* Do L4 checksum offload if enabled and present.
800 if (csum_info
&& (net
->features
& NETIF_F_RXCSUM
)) {
801 if (csum_info
->receive
.tcp_checksum_succeeded
||
802 csum_info
->receive
.udp_checksum_succeeded
)
803 skb
->ip_summed
= CHECKSUM_UNNECESSARY
;
807 u16 vlan_tci
= vlan
->vlanid
| (vlan
->pri
<< VLAN_PRIO_SHIFT
) |
808 (vlan
->cfi
? VLAN_CFI_MASK
: 0);
810 __vlan_hwaccel_put_tag(skb
, htons(ETH_P_8021Q
),
818 * netvsc_recv_callback - Callback when we receive a packet from the
819 * "wire" on the specified device.
821 int netvsc_recv_callback(struct net_device
*net
,
822 struct netvsc_device
*net_device
,
823 struct netvsc_channel
*nvchan
)
825 struct net_device_context
*net_device_ctx
= netdev_priv(net
);
826 struct vmbus_channel
*channel
= nvchan
->channel
;
827 u16 q_idx
= channel
->offermsg
.offer
.sub_channel_index
;
829 struct netvsc_stats
*rx_stats
;
831 if (net
->reg_state
!= NETREG_REGISTERED
)
832 return NVSP_STAT_FAIL
;
834 /* Allocate a skb - TODO direct I/O to pages? */
835 skb
= netvsc_alloc_recv_skb(net
, nvchan
);
837 if (unlikely(!skb
)) {
838 ++net_device_ctx
->eth_stats
.rx_no_memory
;
840 return NVSP_STAT_FAIL
;
843 skb_record_rx_queue(skb
, q_idx
);
846 * Even if injecting the packet, record the statistics
847 * on the synthetic device because modifying the VF device
848 * statistics will not work correctly.
850 rx_stats
= &nvchan
->rx_stats
;
851 u64_stats_update_begin(&rx_stats
->syncp
);
853 rx_stats
->bytes
+= nvchan
->rsc
.pktlen
;
855 if (skb
->pkt_type
== PACKET_BROADCAST
)
856 ++rx_stats
->broadcast
;
857 else if (skb
->pkt_type
== PACKET_MULTICAST
)
858 ++rx_stats
->multicast
;
859 u64_stats_update_end(&rx_stats
->syncp
);
861 napi_gro_receive(&nvchan
->napi
, skb
);
862 return NVSP_STAT_SUCCESS
;
865 static void netvsc_get_drvinfo(struct net_device
*net
,
866 struct ethtool_drvinfo
*info
)
868 strlcpy(info
->driver
, KBUILD_MODNAME
, sizeof(info
->driver
));
869 strlcpy(info
->fw_version
, "N/A", sizeof(info
->fw_version
));
872 static void netvsc_get_channels(struct net_device
*net
,
873 struct ethtool_channels
*channel
)
875 struct net_device_context
*net_device_ctx
= netdev_priv(net
);
876 struct netvsc_device
*nvdev
= rtnl_dereference(net_device_ctx
->nvdev
);
879 channel
->max_combined
= nvdev
->max_chn
;
880 channel
->combined_count
= nvdev
->num_chn
;
884 /* Alloc struct netvsc_device_info, and initialize it from either existing
885 * struct netvsc_device, or from default values.
887 static struct netvsc_device_info
*netvsc_devinfo_get
888 (struct netvsc_device
*nvdev
)
890 struct netvsc_device_info
*dev_info
;
892 dev_info
= kzalloc(sizeof(*dev_info
), GFP_ATOMIC
);
898 dev_info
->num_chn
= nvdev
->num_chn
;
899 dev_info
->send_sections
= nvdev
->send_section_cnt
;
900 dev_info
->send_section_size
= nvdev
->send_section_size
;
901 dev_info
->recv_sections
= nvdev
->recv_section_cnt
;
902 dev_info
->recv_section_size
= nvdev
->recv_section_size
;
904 memcpy(dev_info
->rss_key
, nvdev
->extension
->rss_key
,
907 dev_info
->num_chn
= VRSS_CHANNEL_DEFAULT
;
908 dev_info
->send_sections
= NETVSC_DEFAULT_TX
;
909 dev_info
->send_section_size
= NETVSC_SEND_SECTION_SIZE
;
910 dev_info
->recv_sections
= NETVSC_DEFAULT_RX
;
911 dev_info
->recv_section_size
= NETVSC_RECV_SECTION_SIZE
;
917 static int netvsc_detach(struct net_device
*ndev
,
918 struct netvsc_device
*nvdev
)
920 struct net_device_context
*ndev_ctx
= netdev_priv(ndev
);
921 struct hv_device
*hdev
= ndev_ctx
->device_ctx
;
924 /* Don't try continuing to try and setup sub channels */
925 if (cancel_work_sync(&nvdev
->subchan_work
))
928 /* If device was up (receiving) then shutdown */
929 if (netif_running(ndev
)) {
930 netvsc_tx_disable(nvdev
, ndev
);
932 ret
= rndis_filter_close(nvdev
);
935 "unable to close device (ret %d).\n", ret
);
939 ret
= netvsc_wait_until_empty(nvdev
);
942 "Ring buffer not empty after closing rndis\n");
947 netif_device_detach(ndev
);
949 rndis_filter_device_remove(hdev
, nvdev
);
954 static int netvsc_attach(struct net_device
*ndev
,
955 struct netvsc_device_info
*dev_info
)
957 struct net_device_context
*ndev_ctx
= netdev_priv(ndev
);
958 struct hv_device
*hdev
= ndev_ctx
->device_ctx
;
959 struct netvsc_device
*nvdev
;
960 struct rndis_device
*rdev
;
963 nvdev
= rndis_filter_device_add(hdev
, dev_info
);
965 return PTR_ERR(nvdev
);
967 if (nvdev
->num_chn
> 1) {
968 ret
= rndis_set_subchannel(ndev
, nvdev
, dev_info
);
970 /* if unavailable, just proceed with one queue */
977 /* In any case device is now ready */
978 netif_device_attach(ndev
);
980 /* Note: enable and attach happen when sub-channels setup */
981 netif_carrier_off(ndev
);
983 if (netif_running(ndev
)) {
984 ret
= rndis_filter_open(nvdev
);
988 rdev
= nvdev
->extension
;
989 if (!rdev
->link_state
)
990 netif_carrier_on(ndev
);
996 static int netvsc_set_channels(struct net_device
*net
,
997 struct ethtool_channels
*channels
)
999 struct net_device_context
*net_device_ctx
= netdev_priv(net
);
1000 struct netvsc_device
*nvdev
= rtnl_dereference(net_device_ctx
->nvdev
);
1001 unsigned int orig
, count
= channels
->combined_count
;
1002 struct netvsc_device_info
*device_info
;
1005 /* We do not support separate count for rx, tx, or other */
1007 channels
->rx_count
|| channels
->tx_count
|| channels
->other_count
)
1010 if (!nvdev
|| nvdev
->destroy
)
1013 if (nvdev
->nvsp_version
< NVSP_PROTOCOL_VERSION_5
)
1016 if (count
> nvdev
->max_chn
)
1019 orig
= nvdev
->num_chn
;
1021 device_info
= netvsc_devinfo_get(nvdev
);
1026 device_info
->num_chn
= count
;
1028 ret
= netvsc_detach(net
, nvdev
);
1032 ret
= netvsc_attach(net
, device_info
);
1034 device_info
->num_chn
= orig
;
1035 if (netvsc_attach(net
, device_info
))
1036 netdev_err(net
, "restoring channel setting failed\n");
1045 netvsc_validate_ethtool_ss_cmd(const struct ethtool_link_ksettings
*cmd
)
1047 struct ethtool_link_ksettings diff1
= *cmd
;
1048 struct ethtool_link_ksettings diff2
= {};
1050 diff1
.base
.speed
= 0;
1051 diff1
.base
.duplex
= 0;
1052 /* advertising and cmd are usually set */
1053 ethtool_link_ksettings_zero_link_mode(&diff1
, advertising
);
1055 /* We set port to PORT_OTHER */
1056 diff2
.base
.port
= PORT_OTHER
;
1058 return !memcmp(&diff1
, &diff2
, sizeof(diff1
));
1061 static void netvsc_init_settings(struct net_device
*dev
)
1063 struct net_device_context
*ndc
= netdev_priv(dev
);
1065 ndc
->l4_hash
= HV_DEFAULT_L4HASH
;
1067 ndc
->speed
= SPEED_UNKNOWN
;
1068 ndc
->duplex
= DUPLEX_FULL
;
1070 dev
->features
= NETIF_F_LRO
;
1073 static int netvsc_get_link_ksettings(struct net_device
*dev
,
1074 struct ethtool_link_ksettings
*cmd
)
1076 struct net_device_context
*ndc
= netdev_priv(dev
);
1078 cmd
->base
.speed
= ndc
->speed
;
1079 cmd
->base
.duplex
= ndc
->duplex
;
1080 cmd
->base
.port
= PORT_OTHER
;
1085 static int netvsc_set_link_ksettings(struct net_device
*dev
,
1086 const struct ethtool_link_ksettings
*cmd
)
1088 struct net_device_context
*ndc
= netdev_priv(dev
);
1091 speed
= cmd
->base
.speed
;
1092 if (!ethtool_validate_speed(speed
) ||
1093 !ethtool_validate_duplex(cmd
->base
.duplex
) ||
1094 !netvsc_validate_ethtool_ss_cmd(cmd
))
1098 ndc
->duplex
= cmd
->base
.duplex
;
1103 static int netvsc_change_mtu(struct net_device
*ndev
, int mtu
)
1105 struct net_device_context
*ndevctx
= netdev_priv(ndev
);
1106 struct net_device
*vf_netdev
= rtnl_dereference(ndevctx
->vf_netdev
);
1107 struct netvsc_device
*nvdev
= rtnl_dereference(ndevctx
->nvdev
);
1108 int orig_mtu
= ndev
->mtu
;
1109 struct netvsc_device_info
*device_info
;
1112 if (!nvdev
|| nvdev
->destroy
)
1115 device_info
= netvsc_devinfo_get(nvdev
);
1120 /* Change MTU of underlying VF netdev first. */
1122 ret
= dev_set_mtu(vf_netdev
, mtu
);
1127 ret
= netvsc_detach(ndev
, nvdev
);
1133 ret
= netvsc_attach(ndev
, device_info
);
1137 /* Attempt rollback to original MTU */
1138 ndev
->mtu
= orig_mtu
;
1140 if (netvsc_attach(ndev
, device_info
))
1141 netdev_err(ndev
, "restoring mtu failed\n");
1144 dev_set_mtu(vf_netdev
, orig_mtu
);
1151 static void netvsc_get_vf_stats(struct net_device
*net
,
1152 struct netvsc_vf_pcpu_stats
*tot
)
1154 struct net_device_context
*ndev_ctx
= netdev_priv(net
);
1157 memset(tot
, 0, sizeof(*tot
));
1159 for_each_possible_cpu(i
) {
1160 const struct netvsc_vf_pcpu_stats
*stats
1161 = per_cpu_ptr(ndev_ctx
->vf_stats
, i
);
1162 u64 rx_packets
, rx_bytes
, tx_packets
, tx_bytes
;
1166 start
= u64_stats_fetch_begin_irq(&stats
->syncp
);
1167 rx_packets
= stats
->rx_packets
;
1168 tx_packets
= stats
->tx_packets
;
1169 rx_bytes
= stats
->rx_bytes
;
1170 tx_bytes
= stats
->tx_bytes
;
1171 } while (u64_stats_fetch_retry_irq(&stats
->syncp
, start
));
1173 tot
->rx_packets
+= rx_packets
;
1174 tot
->tx_packets
+= tx_packets
;
1175 tot
->rx_bytes
+= rx_bytes
;
1176 tot
->tx_bytes
+= tx_bytes
;
1177 tot
->tx_dropped
+= stats
->tx_dropped
;
1181 static void netvsc_get_pcpu_stats(struct net_device
*net
,
1182 struct netvsc_ethtool_pcpu_stats
*pcpu_tot
)
1184 struct net_device_context
*ndev_ctx
= netdev_priv(net
);
1185 struct netvsc_device
*nvdev
= rcu_dereference_rtnl(ndev_ctx
->nvdev
);
1188 /* fetch percpu stats of vf */
1189 for_each_possible_cpu(i
) {
1190 const struct netvsc_vf_pcpu_stats
*stats
=
1191 per_cpu_ptr(ndev_ctx
->vf_stats
, i
);
1192 struct netvsc_ethtool_pcpu_stats
*this_tot
= &pcpu_tot
[i
];
1196 start
= u64_stats_fetch_begin_irq(&stats
->syncp
);
1197 this_tot
->vf_rx_packets
= stats
->rx_packets
;
1198 this_tot
->vf_tx_packets
= stats
->tx_packets
;
1199 this_tot
->vf_rx_bytes
= stats
->rx_bytes
;
1200 this_tot
->vf_tx_bytes
= stats
->tx_bytes
;
1201 } while (u64_stats_fetch_retry_irq(&stats
->syncp
, start
));
1202 this_tot
->rx_packets
= this_tot
->vf_rx_packets
;
1203 this_tot
->tx_packets
= this_tot
->vf_tx_packets
;
1204 this_tot
->rx_bytes
= this_tot
->vf_rx_bytes
;
1205 this_tot
->tx_bytes
= this_tot
->vf_tx_bytes
;
1208 /* fetch percpu stats of netvsc */
1209 for (i
= 0; i
< nvdev
->num_chn
; i
++) {
1210 const struct netvsc_channel
*nvchan
= &nvdev
->chan_table
[i
];
1211 const struct netvsc_stats
*stats
;
1212 struct netvsc_ethtool_pcpu_stats
*this_tot
=
1213 &pcpu_tot
[nvchan
->channel
->target_cpu
];
1217 stats
= &nvchan
->tx_stats
;
1219 start
= u64_stats_fetch_begin_irq(&stats
->syncp
);
1220 packets
= stats
->packets
;
1221 bytes
= stats
->bytes
;
1222 } while (u64_stats_fetch_retry_irq(&stats
->syncp
, start
));
1224 this_tot
->tx_bytes
+= bytes
;
1225 this_tot
->tx_packets
+= packets
;
1227 stats
= &nvchan
->rx_stats
;
1229 start
= u64_stats_fetch_begin_irq(&stats
->syncp
);
1230 packets
= stats
->packets
;
1231 bytes
= stats
->bytes
;
1232 } while (u64_stats_fetch_retry_irq(&stats
->syncp
, start
));
1234 this_tot
->rx_bytes
+= bytes
;
1235 this_tot
->rx_packets
+= packets
;
1239 static void netvsc_get_stats64(struct net_device
*net
,
1240 struct rtnl_link_stats64
*t
)
1242 struct net_device_context
*ndev_ctx
= netdev_priv(net
);
1243 struct netvsc_device
*nvdev
= rcu_dereference_rtnl(ndev_ctx
->nvdev
);
1244 struct netvsc_vf_pcpu_stats vf_tot
;
1250 netdev_stats_to_stats64(t
, &net
->stats
);
1252 netvsc_get_vf_stats(net
, &vf_tot
);
1253 t
->rx_packets
+= vf_tot
.rx_packets
;
1254 t
->tx_packets
+= vf_tot
.tx_packets
;
1255 t
->rx_bytes
+= vf_tot
.rx_bytes
;
1256 t
->tx_bytes
+= vf_tot
.tx_bytes
;
1257 t
->tx_dropped
+= vf_tot
.tx_dropped
;
1259 for (i
= 0; i
< nvdev
->num_chn
; i
++) {
1260 const struct netvsc_channel
*nvchan
= &nvdev
->chan_table
[i
];
1261 const struct netvsc_stats
*stats
;
1262 u64 packets
, bytes
, multicast
;
1265 stats
= &nvchan
->tx_stats
;
1267 start
= u64_stats_fetch_begin_irq(&stats
->syncp
);
1268 packets
= stats
->packets
;
1269 bytes
= stats
->bytes
;
1270 } while (u64_stats_fetch_retry_irq(&stats
->syncp
, start
));
1272 t
->tx_bytes
+= bytes
;
1273 t
->tx_packets
+= packets
;
1275 stats
= &nvchan
->rx_stats
;
1277 start
= u64_stats_fetch_begin_irq(&stats
->syncp
);
1278 packets
= stats
->packets
;
1279 bytes
= stats
->bytes
;
1280 multicast
= stats
->multicast
+ stats
->broadcast
;
1281 } while (u64_stats_fetch_retry_irq(&stats
->syncp
, start
));
1283 t
->rx_bytes
+= bytes
;
1284 t
->rx_packets
+= packets
;
1285 t
->multicast
+= multicast
;
1289 static int netvsc_set_mac_addr(struct net_device
*ndev
, void *p
)
1291 struct net_device_context
*ndc
= netdev_priv(ndev
);
1292 struct net_device
*vf_netdev
= rtnl_dereference(ndc
->vf_netdev
);
1293 struct netvsc_device
*nvdev
= rtnl_dereference(ndc
->nvdev
);
1294 struct sockaddr
*addr
= p
;
1297 err
= eth_prepare_mac_addr_change(ndev
, p
);
1305 err
= dev_set_mac_address(vf_netdev
, addr
, NULL
);
1310 err
= rndis_filter_set_device_mac(nvdev
, addr
->sa_data
);
1312 eth_commit_mac_addr_change(ndev
, p
);
1313 } else if (vf_netdev
) {
1314 /* rollback change on VF */
1315 memcpy(addr
->sa_data
, ndev
->dev_addr
, ETH_ALEN
);
1316 dev_set_mac_address(vf_netdev
, addr
, NULL
);
1322 static const struct {
1323 char name
[ETH_GSTRING_LEN
];
1325 } netvsc_stats
[] = {
1326 { "tx_scattered", offsetof(struct netvsc_ethtool_stats
, tx_scattered
) },
1327 { "tx_no_memory", offsetof(struct netvsc_ethtool_stats
, tx_no_memory
) },
1328 { "tx_no_space", offsetof(struct netvsc_ethtool_stats
, tx_no_space
) },
1329 { "tx_too_big", offsetof(struct netvsc_ethtool_stats
, tx_too_big
) },
1330 { "tx_busy", offsetof(struct netvsc_ethtool_stats
, tx_busy
) },
1331 { "tx_send_full", offsetof(struct netvsc_ethtool_stats
, tx_send_full
) },
1332 { "rx_comp_busy", offsetof(struct netvsc_ethtool_stats
, rx_comp_busy
) },
1333 { "rx_no_memory", offsetof(struct netvsc_ethtool_stats
, rx_no_memory
) },
1334 { "stop_queue", offsetof(struct netvsc_ethtool_stats
, stop_queue
) },
1335 { "wake_queue", offsetof(struct netvsc_ethtool_stats
, wake_queue
) },
1337 { "cpu%u_rx_packets",
1338 offsetof(struct netvsc_ethtool_pcpu_stats
, rx_packets
) },
1340 offsetof(struct netvsc_ethtool_pcpu_stats
, rx_bytes
) },
1341 { "cpu%u_tx_packets",
1342 offsetof(struct netvsc_ethtool_pcpu_stats
, tx_packets
) },
1344 offsetof(struct netvsc_ethtool_pcpu_stats
, tx_bytes
) },
1345 { "cpu%u_vf_rx_packets",
1346 offsetof(struct netvsc_ethtool_pcpu_stats
, vf_rx_packets
) },
1347 { "cpu%u_vf_rx_bytes",
1348 offsetof(struct netvsc_ethtool_pcpu_stats
, vf_rx_bytes
) },
1349 { "cpu%u_vf_tx_packets",
1350 offsetof(struct netvsc_ethtool_pcpu_stats
, vf_tx_packets
) },
1351 { "cpu%u_vf_tx_bytes",
1352 offsetof(struct netvsc_ethtool_pcpu_stats
, vf_tx_bytes
) },
1354 { "vf_rx_packets", offsetof(struct netvsc_vf_pcpu_stats
, rx_packets
) },
1355 { "vf_rx_bytes", offsetof(struct netvsc_vf_pcpu_stats
, rx_bytes
) },
1356 { "vf_tx_packets", offsetof(struct netvsc_vf_pcpu_stats
, tx_packets
) },
1357 { "vf_tx_bytes", offsetof(struct netvsc_vf_pcpu_stats
, tx_bytes
) },
1358 { "vf_tx_dropped", offsetof(struct netvsc_vf_pcpu_stats
, tx_dropped
) },
1361 #define NETVSC_GLOBAL_STATS_LEN ARRAY_SIZE(netvsc_stats)
1362 #define NETVSC_VF_STATS_LEN ARRAY_SIZE(vf_stats)
1364 /* statistics per queue (rx/tx packets/bytes) */
1365 #define NETVSC_PCPU_STATS_LEN (num_present_cpus() * ARRAY_SIZE(pcpu_stats))
1367 /* 4 statistics per queue (rx/tx packets/bytes) */
1368 #define NETVSC_QUEUE_STATS_LEN(dev) ((dev)->num_chn * 4)
1370 static int netvsc_get_sset_count(struct net_device
*dev
, int string_set
)
1372 struct net_device_context
*ndc
= netdev_priv(dev
);
1373 struct netvsc_device
*nvdev
= rtnl_dereference(ndc
->nvdev
);
1378 switch (string_set
) {
1380 return NETVSC_GLOBAL_STATS_LEN
1381 + NETVSC_VF_STATS_LEN
1382 + NETVSC_QUEUE_STATS_LEN(nvdev
)
1383 + NETVSC_PCPU_STATS_LEN
;
1389 static void netvsc_get_ethtool_stats(struct net_device
*dev
,
1390 struct ethtool_stats
*stats
, u64
*data
)
1392 struct net_device_context
*ndc
= netdev_priv(dev
);
1393 struct netvsc_device
*nvdev
= rtnl_dereference(ndc
->nvdev
);
1394 const void *nds
= &ndc
->eth_stats
;
1395 const struct netvsc_stats
*qstats
;
1396 struct netvsc_vf_pcpu_stats sum
;
1397 struct netvsc_ethtool_pcpu_stats
*pcpu_sum
;
1405 for (i
= 0; i
< NETVSC_GLOBAL_STATS_LEN
; i
++)
1406 data
[i
] = *(unsigned long *)(nds
+ netvsc_stats
[i
].offset
);
1408 netvsc_get_vf_stats(dev
, &sum
);
1409 for (j
= 0; j
< NETVSC_VF_STATS_LEN
; j
++)
1410 data
[i
++] = *(u64
*)((void *)&sum
+ vf_stats
[j
].offset
);
1412 for (j
= 0; j
< nvdev
->num_chn
; j
++) {
1413 qstats
= &nvdev
->chan_table
[j
].tx_stats
;
1416 start
= u64_stats_fetch_begin_irq(&qstats
->syncp
);
1417 packets
= qstats
->packets
;
1418 bytes
= qstats
->bytes
;
1419 } while (u64_stats_fetch_retry_irq(&qstats
->syncp
, start
));
1420 data
[i
++] = packets
;
1423 qstats
= &nvdev
->chan_table
[j
].rx_stats
;
1425 start
= u64_stats_fetch_begin_irq(&qstats
->syncp
);
1426 packets
= qstats
->packets
;
1427 bytes
= qstats
->bytes
;
1428 } while (u64_stats_fetch_retry_irq(&qstats
->syncp
, start
));
1429 data
[i
++] = packets
;
1433 pcpu_sum
= kvmalloc_array(num_possible_cpus(),
1434 sizeof(struct netvsc_ethtool_pcpu_stats
),
1436 netvsc_get_pcpu_stats(dev
, pcpu_sum
);
1437 for_each_present_cpu(cpu
) {
1438 struct netvsc_ethtool_pcpu_stats
*this_sum
= &pcpu_sum
[cpu
];
1440 for (j
= 0; j
< ARRAY_SIZE(pcpu_stats
); j
++)
1441 data
[i
++] = *(u64
*)((void *)this_sum
1442 + pcpu_stats
[j
].offset
);
1447 static void netvsc_get_strings(struct net_device
*dev
, u32 stringset
, u8
*data
)
1449 struct net_device_context
*ndc
= netdev_priv(dev
);
1450 struct netvsc_device
*nvdev
= rtnl_dereference(ndc
->nvdev
);
1457 switch (stringset
) {
1459 for (i
= 0; i
< ARRAY_SIZE(netvsc_stats
); i
++) {
1460 memcpy(p
, netvsc_stats
[i
].name
, ETH_GSTRING_LEN
);
1461 p
+= ETH_GSTRING_LEN
;
1464 for (i
= 0; i
< ARRAY_SIZE(vf_stats
); i
++) {
1465 memcpy(p
, vf_stats
[i
].name
, ETH_GSTRING_LEN
);
1466 p
+= ETH_GSTRING_LEN
;
1469 for (i
= 0; i
< nvdev
->num_chn
; i
++) {
1470 sprintf(p
, "tx_queue_%u_packets", i
);
1471 p
+= ETH_GSTRING_LEN
;
1472 sprintf(p
, "tx_queue_%u_bytes", i
);
1473 p
+= ETH_GSTRING_LEN
;
1474 sprintf(p
, "rx_queue_%u_packets", i
);
1475 p
+= ETH_GSTRING_LEN
;
1476 sprintf(p
, "rx_queue_%u_bytes", i
);
1477 p
+= ETH_GSTRING_LEN
;
1480 for_each_present_cpu(cpu
) {
1481 for (i
= 0; i
< ARRAY_SIZE(pcpu_stats
); i
++) {
1482 sprintf(p
, pcpu_stats
[i
].name
, cpu
);
1483 p
+= ETH_GSTRING_LEN
;
1492 netvsc_get_rss_hash_opts(struct net_device_context
*ndc
,
1493 struct ethtool_rxnfc
*info
)
1495 const u32 l4_flag
= RXH_L4_B_0_1
| RXH_L4_B_2_3
;
1497 info
->data
= RXH_IP_SRC
| RXH_IP_DST
;
1499 switch (info
->flow_type
) {
1501 if (ndc
->l4_hash
& HV_TCP4_L4HASH
)
1502 info
->data
|= l4_flag
;
1507 if (ndc
->l4_hash
& HV_TCP6_L4HASH
)
1508 info
->data
|= l4_flag
;
1513 if (ndc
->l4_hash
& HV_UDP4_L4HASH
)
1514 info
->data
|= l4_flag
;
1519 if (ndc
->l4_hash
& HV_UDP6_L4HASH
)
1520 info
->data
|= l4_flag
;
1536 netvsc_get_rxnfc(struct net_device
*dev
, struct ethtool_rxnfc
*info
,
1539 struct net_device_context
*ndc
= netdev_priv(dev
);
1540 struct netvsc_device
*nvdev
= rtnl_dereference(ndc
->nvdev
);
1545 switch (info
->cmd
) {
1546 case ETHTOOL_GRXRINGS
:
1547 info
->data
= nvdev
->num_chn
;
1551 return netvsc_get_rss_hash_opts(ndc
, info
);
1556 static int netvsc_set_rss_hash_opts(struct net_device_context
*ndc
,
1557 struct ethtool_rxnfc
*info
)
1559 if (info
->data
== (RXH_IP_SRC
| RXH_IP_DST
|
1560 RXH_L4_B_0_1
| RXH_L4_B_2_3
)) {
1561 switch (info
->flow_type
) {
1563 ndc
->l4_hash
|= HV_TCP4_L4HASH
;
1567 ndc
->l4_hash
|= HV_TCP6_L4HASH
;
1571 ndc
->l4_hash
|= HV_UDP4_L4HASH
;
1575 ndc
->l4_hash
|= HV_UDP6_L4HASH
;
1585 if (info
->data
== (RXH_IP_SRC
| RXH_IP_DST
)) {
1586 switch (info
->flow_type
) {
1588 ndc
->l4_hash
&= ~HV_TCP4_L4HASH
;
1592 ndc
->l4_hash
&= ~HV_TCP6_L4HASH
;
1596 ndc
->l4_hash
&= ~HV_UDP4_L4HASH
;
1600 ndc
->l4_hash
&= ~HV_UDP6_L4HASH
;
1614 netvsc_set_rxnfc(struct net_device
*ndev
, struct ethtool_rxnfc
*info
)
1616 struct net_device_context
*ndc
= netdev_priv(ndev
);
1618 if (info
->cmd
== ETHTOOL_SRXFH
)
1619 return netvsc_set_rss_hash_opts(ndc
, info
);
1624 static u32
netvsc_get_rxfh_key_size(struct net_device
*dev
)
1626 return NETVSC_HASH_KEYLEN
;
1629 static u32
netvsc_rss_indir_size(struct net_device
*dev
)
1634 static int netvsc_get_rxfh(struct net_device
*dev
, u32
*indir
, u8
*key
,
1637 struct net_device_context
*ndc
= netdev_priv(dev
);
1638 struct netvsc_device
*ndev
= rtnl_dereference(ndc
->nvdev
);
1639 struct rndis_device
*rndis_dev
;
1646 *hfunc
= ETH_RSS_HASH_TOP
; /* Toeplitz */
1648 rndis_dev
= ndev
->extension
;
1650 for (i
= 0; i
< ITAB_NUM
; i
++)
1651 indir
[i
] = rndis_dev
->rx_table
[i
];
1655 memcpy(key
, rndis_dev
->rss_key
, NETVSC_HASH_KEYLEN
);
1660 static int netvsc_set_rxfh(struct net_device
*dev
, const u32
*indir
,
1661 const u8
*key
, const u8 hfunc
)
1663 struct net_device_context
*ndc
= netdev_priv(dev
);
1664 struct netvsc_device
*ndev
= rtnl_dereference(ndc
->nvdev
);
1665 struct rndis_device
*rndis_dev
;
1671 if (hfunc
!= ETH_RSS_HASH_NO_CHANGE
&& hfunc
!= ETH_RSS_HASH_TOP
)
1674 rndis_dev
= ndev
->extension
;
1676 for (i
= 0; i
< ITAB_NUM
; i
++)
1677 if (indir
[i
] >= ndev
->num_chn
)
1680 for (i
= 0; i
< ITAB_NUM
; i
++)
1681 rndis_dev
->rx_table
[i
] = indir
[i
];
1688 key
= rndis_dev
->rss_key
;
1691 return rndis_filter_set_rss_param(rndis_dev
, key
);
1694 /* Hyper-V RNDIS protocol does not have ring in the HW sense.
1695 * It does have pre-allocated receive area which is divided into sections.
1697 static void __netvsc_get_ringparam(struct netvsc_device
*nvdev
,
1698 struct ethtool_ringparam
*ring
)
1702 ring
->rx_pending
= nvdev
->recv_section_cnt
;
1703 ring
->tx_pending
= nvdev
->send_section_cnt
;
1705 if (nvdev
->nvsp_version
<= NVSP_PROTOCOL_VERSION_2
)
1706 max_buf_size
= NETVSC_RECEIVE_BUFFER_SIZE_LEGACY
;
1708 max_buf_size
= NETVSC_RECEIVE_BUFFER_SIZE
;
1710 ring
->rx_max_pending
= max_buf_size
/ nvdev
->recv_section_size
;
1711 ring
->tx_max_pending
= NETVSC_SEND_BUFFER_SIZE
1712 / nvdev
->send_section_size
;
1715 static void netvsc_get_ringparam(struct net_device
*ndev
,
1716 struct ethtool_ringparam
*ring
)
1718 struct net_device_context
*ndevctx
= netdev_priv(ndev
);
1719 struct netvsc_device
*nvdev
= rtnl_dereference(ndevctx
->nvdev
);
1724 __netvsc_get_ringparam(nvdev
, ring
);
1727 static int netvsc_set_ringparam(struct net_device
*ndev
,
1728 struct ethtool_ringparam
*ring
)
1730 struct net_device_context
*ndevctx
= netdev_priv(ndev
);
1731 struct netvsc_device
*nvdev
= rtnl_dereference(ndevctx
->nvdev
);
1732 struct netvsc_device_info
*device_info
;
1733 struct ethtool_ringparam orig
;
1737 if (!nvdev
|| nvdev
->destroy
)
1740 memset(&orig
, 0, sizeof(orig
));
1741 __netvsc_get_ringparam(nvdev
, &orig
);
1743 new_tx
= clamp_t(u32
, ring
->tx_pending
,
1744 NETVSC_MIN_TX_SECTIONS
, orig
.tx_max_pending
);
1745 new_rx
= clamp_t(u32
, ring
->rx_pending
,
1746 NETVSC_MIN_RX_SECTIONS
, orig
.rx_max_pending
);
1748 if (new_tx
== orig
.tx_pending
&&
1749 new_rx
== orig
.rx_pending
)
1750 return 0; /* no change */
1752 device_info
= netvsc_devinfo_get(nvdev
);
1757 device_info
->send_sections
= new_tx
;
1758 device_info
->recv_sections
= new_rx
;
1760 ret
= netvsc_detach(ndev
, nvdev
);
1764 ret
= netvsc_attach(ndev
, device_info
);
1766 device_info
->send_sections
= orig
.tx_pending
;
1767 device_info
->recv_sections
= orig
.rx_pending
;
1769 if (netvsc_attach(ndev
, device_info
))
1770 netdev_err(ndev
, "restoring ringparam failed");
1778 static int netvsc_set_features(struct net_device
*ndev
,
1779 netdev_features_t features
)
1781 netdev_features_t change
= features
^ ndev
->features
;
1782 struct net_device_context
*ndevctx
= netdev_priv(ndev
);
1783 struct netvsc_device
*nvdev
= rtnl_dereference(ndevctx
->nvdev
);
1784 struct ndis_offload_params offloads
;
1786 if (!nvdev
|| nvdev
->destroy
)
1789 if (!(change
& NETIF_F_LRO
))
1792 memset(&offloads
, 0, sizeof(struct ndis_offload_params
));
1794 if (features
& NETIF_F_LRO
) {
1795 offloads
.rsc_ip_v4
= NDIS_OFFLOAD_PARAMETERS_RSC_ENABLED
;
1796 offloads
.rsc_ip_v6
= NDIS_OFFLOAD_PARAMETERS_RSC_ENABLED
;
1798 offloads
.rsc_ip_v4
= NDIS_OFFLOAD_PARAMETERS_RSC_DISABLED
;
1799 offloads
.rsc_ip_v6
= NDIS_OFFLOAD_PARAMETERS_RSC_DISABLED
;
1802 return rndis_filter_set_offload_params(ndev
, nvdev
, &offloads
);
1805 static u32
netvsc_get_msglevel(struct net_device
*ndev
)
1807 struct net_device_context
*ndev_ctx
= netdev_priv(ndev
);
1809 return ndev_ctx
->msg_enable
;
1812 static void netvsc_set_msglevel(struct net_device
*ndev
, u32 val
)
1814 struct net_device_context
*ndev_ctx
= netdev_priv(ndev
);
1816 ndev_ctx
->msg_enable
= val
;
1819 static const struct ethtool_ops ethtool_ops
= {
1820 .get_drvinfo
= netvsc_get_drvinfo
,
1821 .get_msglevel
= netvsc_get_msglevel
,
1822 .set_msglevel
= netvsc_set_msglevel
,
1823 .get_link
= ethtool_op_get_link
,
1824 .get_ethtool_stats
= netvsc_get_ethtool_stats
,
1825 .get_sset_count
= netvsc_get_sset_count
,
1826 .get_strings
= netvsc_get_strings
,
1827 .get_channels
= netvsc_get_channels
,
1828 .set_channels
= netvsc_set_channels
,
1829 .get_ts_info
= ethtool_op_get_ts_info
,
1830 .get_rxnfc
= netvsc_get_rxnfc
,
1831 .set_rxnfc
= netvsc_set_rxnfc
,
1832 .get_rxfh_key_size
= netvsc_get_rxfh_key_size
,
1833 .get_rxfh_indir_size
= netvsc_rss_indir_size
,
1834 .get_rxfh
= netvsc_get_rxfh
,
1835 .set_rxfh
= netvsc_set_rxfh
,
1836 .get_link_ksettings
= netvsc_get_link_ksettings
,
1837 .set_link_ksettings
= netvsc_set_link_ksettings
,
1838 .get_ringparam
= netvsc_get_ringparam
,
1839 .set_ringparam
= netvsc_set_ringparam
,
1842 static const struct net_device_ops device_ops
= {
1843 .ndo_open
= netvsc_open
,
1844 .ndo_stop
= netvsc_close
,
1845 .ndo_start_xmit
= netvsc_start_xmit
,
1846 .ndo_change_rx_flags
= netvsc_change_rx_flags
,
1847 .ndo_set_rx_mode
= netvsc_set_rx_mode
,
1848 .ndo_set_features
= netvsc_set_features
,
1849 .ndo_change_mtu
= netvsc_change_mtu
,
1850 .ndo_validate_addr
= eth_validate_addr
,
1851 .ndo_set_mac_address
= netvsc_set_mac_addr
,
1852 .ndo_select_queue
= netvsc_select_queue
,
1853 .ndo_get_stats64
= netvsc_get_stats64
,
1857 * Handle link status changes. For RNDIS_STATUS_NETWORK_CHANGE emulate link
1858 * down/up sequence. In case of RNDIS_STATUS_MEDIA_CONNECT when carrier is
1859 * present send GARP packet to network peers with netif_notify_peers().
1861 static void netvsc_link_change(struct work_struct
*w
)
1863 struct net_device_context
*ndev_ctx
=
1864 container_of(w
, struct net_device_context
, dwork
.work
);
1865 struct hv_device
*device_obj
= ndev_ctx
->device_ctx
;
1866 struct net_device
*net
= hv_get_drvdata(device_obj
);
1867 struct netvsc_device
*net_device
;
1868 struct rndis_device
*rdev
;
1869 struct netvsc_reconfig
*event
= NULL
;
1870 bool notify
= false, reschedule
= false;
1871 unsigned long flags
, next_reconfig
, delay
;
1873 /* if changes are happening, comeback later */
1874 if (!rtnl_trylock()) {
1875 schedule_delayed_work(&ndev_ctx
->dwork
, LINKCHANGE_INT
);
1879 net_device
= rtnl_dereference(ndev_ctx
->nvdev
);
1883 rdev
= net_device
->extension
;
1885 next_reconfig
= ndev_ctx
->last_reconfig
+ LINKCHANGE_INT
;
1886 if (time_is_after_jiffies(next_reconfig
)) {
1887 /* link_watch only sends one notification with current state
1888 * per second, avoid doing reconfig more frequently. Handle
1891 delay
= next_reconfig
- jiffies
;
1892 delay
= delay
< LINKCHANGE_INT
? delay
: LINKCHANGE_INT
;
1893 schedule_delayed_work(&ndev_ctx
->dwork
, delay
);
1896 ndev_ctx
->last_reconfig
= jiffies
;
1898 spin_lock_irqsave(&ndev_ctx
->lock
, flags
);
1899 if (!list_empty(&ndev_ctx
->reconfig_events
)) {
1900 event
= list_first_entry(&ndev_ctx
->reconfig_events
,
1901 struct netvsc_reconfig
, list
);
1902 list_del(&event
->list
);
1903 reschedule
= !list_empty(&ndev_ctx
->reconfig_events
);
1905 spin_unlock_irqrestore(&ndev_ctx
->lock
, flags
);
1910 switch (event
->event
) {
1911 /* Only the following events are possible due to the check in
1912 * netvsc_linkstatus_callback()
1914 case RNDIS_STATUS_MEDIA_CONNECT
:
1915 if (rdev
->link_state
) {
1916 rdev
->link_state
= false;
1917 netif_carrier_on(net
);
1918 netvsc_tx_enable(net_device
, net
);
1924 case RNDIS_STATUS_MEDIA_DISCONNECT
:
1925 if (!rdev
->link_state
) {
1926 rdev
->link_state
= true;
1927 netif_carrier_off(net
);
1928 netvsc_tx_disable(net_device
, net
);
1932 case RNDIS_STATUS_NETWORK_CHANGE
:
1933 /* Only makes sense if carrier is present */
1934 if (!rdev
->link_state
) {
1935 rdev
->link_state
= true;
1936 netif_carrier_off(net
);
1937 netvsc_tx_disable(net_device
, net
);
1938 event
->event
= RNDIS_STATUS_MEDIA_CONNECT
;
1939 spin_lock_irqsave(&ndev_ctx
->lock
, flags
);
1940 list_add(&event
->list
, &ndev_ctx
->reconfig_events
);
1941 spin_unlock_irqrestore(&ndev_ctx
->lock
, flags
);
1950 netdev_notify_peers(net
);
1952 /* link_watch only sends one notification with current state per
1953 * second, handle next reconfig event in 2 seconds.
1956 schedule_delayed_work(&ndev_ctx
->dwork
, LINKCHANGE_INT
);
1964 static struct net_device
*get_netvsc_byref(struct net_device
*vf_netdev
)
1966 struct net_device_context
*net_device_ctx
;
1967 struct net_device
*dev
;
1969 dev
= netdev_master_upper_dev_get(vf_netdev
);
1970 if (!dev
|| dev
->netdev_ops
!= &device_ops
)
1971 return NULL
; /* not a netvsc device */
1973 net_device_ctx
= netdev_priv(dev
);
1974 if (!rtnl_dereference(net_device_ctx
->nvdev
))
1975 return NULL
; /* device is removed */
1980 /* Called when VF is injecting data into network stack.
1981 * Change the associated network device from VF to netvsc.
1982 * note: already called with rcu_read_lock
1984 static rx_handler_result_t
netvsc_vf_handle_frame(struct sk_buff
**pskb
)
1986 struct sk_buff
*skb
= *pskb
;
1987 struct net_device
*ndev
= rcu_dereference(skb
->dev
->rx_handler_data
);
1988 struct net_device_context
*ndev_ctx
= netdev_priv(ndev
);
1989 struct netvsc_vf_pcpu_stats
*pcpu_stats
1990 = this_cpu_ptr(ndev_ctx
->vf_stats
);
1992 skb
= skb_share_check(skb
, GFP_ATOMIC
);
1994 return RX_HANDLER_CONSUMED
;
2000 u64_stats_update_begin(&pcpu_stats
->syncp
);
2001 pcpu_stats
->rx_packets
++;
2002 pcpu_stats
->rx_bytes
+= skb
->len
;
2003 u64_stats_update_end(&pcpu_stats
->syncp
);
2005 return RX_HANDLER_ANOTHER
;
2008 static int netvsc_vf_join(struct net_device
*vf_netdev
,
2009 struct net_device
*ndev
)
2011 struct net_device_context
*ndev_ctx
= netdev_priv(ndev
);
2014 ret
= netdev_rx_handler_register(vf_netdev
,
2015 netvsc_vf_handle_frame
, ndev
);
2017 netdev_err(vf_netdev
,
2018 "can not register netvsc VF receive handler (err = %d)\n",
2020 goto rx_handler_failed
;
2023 ret
= netdev_master_upper_dev_link(vf_netdev
, ndev
,
2026 netdev_err(vf_netdev
,
2027 "can not set master device %s (err = %d)\n",
2029 goto upper_link_failed
;
2032 /* set slave flag before open to prevent IPv6 addrconf */
2033 vf_netdev
->flags
|= IFF_SLAVE
;
2035 schedule_delayed_work(&ndev_ctx
->vf_takeover
, VF_TAKEOVER_INT
);
2037 call_netdevice_notifiers(NETDEV_JOIN
, vf_netdev
);
2039 netdev_info(vf_netdev
, "joined to %s\n", ndev
->name
);
2043 netdev_rx_handler_unregister(vf_netdev
);
2048 static void __netvsc_vf_setup(struct net_device
*ndev
,
2049 struct net_device
*vf_netdev
)
2053 /* Align MTU of VF with master */
2054 ret
= dev_set_mtu(vf_netdev
, ndev
->mtu
);
2056 netdev_warn(vf_netdev
,
2057 "unable to change mtu to %u\n", ndev
->mtu
);
2059 /* set multicast etc flags on VF */
2060 dev_change_flags(vf_netdev
, ndev
->flags
| IFF_SLAVE
, NULL
);
2062 /* sync address list from ndev to VF */
2063 netif_addr_lock_bh(ndev
);
2064 dev_uc_sync(vf_netdev
, ndev
);
2065 dev_mc_sync(vf_netdev
, ndev
);
2066 netif_addr_unlock_bh(ndev
);
2068 if (netif_running(ndev
)) {
2069 ret
= dev_open(vf_netdev
, NULL
);
2071 netdev_warn(vf_netdev
,
2072 "unable to open: %d\n", ret
);
2076 /* Setup VF as slave of the synthetic device.
2077 * Runs in workqueue to avoid recursion in netlink callbacks.
2079 static void netvsc_vf_setup(struct work_struct
*w
)
2081 struct net_device_context
*ndev_ctx
2082 = container_of(w
, struct net_device_context
, vf_takeover
.work
);
2083 struct net_device
*ndev
= hv_get_drvdata(ndev_ctx
->device_ctx
);
2084 struct net_device
*vf_netdev
;
2086 if (!rtnl_trylock()) {
2087 schedule_delayed_work(&ndev_ctx
->vf_takeover
, 0);
2091 vf_netdev
= rtnl_dereference(ndev_ctx
->vf_netdev
);
2093 __netvsc_vf_setup(ndev
, vf_netdev
);
2098 /* Find netvsc by VF serial number.
2099 * The PCI hyperv controller records the serial number as the slot kobj name.
2101 static struct net_device
*get_netvsc_byslot(const struct net_device
*vf_netdev
)
2103 struct device
*parent
= vf_netdev
->dev
.parent
;
2104 struct net_device_context
*ndev_ctx
;
2105 struct pci_dev
*pdev
;
2108 if (!parent
|| !dev_is_pci(parent
))
2109 return NULL
; /* not a PCI device */
2111 pdev
= to_pci_dev(parent
);
2113 netdev_notice(vf_netdev
, "no PCI slot information\n");
2117 if (kstrtou32(pci_slot_name(pdev
->slot
), 10, &serial
)) {
2118 netdev_notice(vf_netdev
, "Invalid vf serial:%s\n",
2119 pci_slot_name(pdev
->slot
));
2123 list_for_each_entry(ndev_ctx
, &netvsc_dev_list
, list
) {
2124 if (!ndev_ctx
->vf_alloc
)
2127 if (ndev_ctx
->vf_serial
== serial
)
2128 return hv_get_drvdata(ndev_ctx
->device_ctx
);
2131 netdev_notice(vf_netdev
,
2132 "no netdev found for vf serial:%u\n", serial
);
2136 static int netvsc_register_vf(struct net_device
*vf_netdev
)
2138 struct net_device_context
*net_device_ctx
;
2139 struct netvsc_device
*netvsc_dev
;
2140 struct net_device
*ndev
;
2143 if (vf_netdev
->addr_len
!= ETH_ALEN
)
2146 ndev
= get_netvsc_byslot(vf_netdev
);
2150 net_device_ctx
= netdev_priv(ndev
);
2151 netvsc_dev
= rtnl_dereference(net_device_ctx
->nvdev
);
2152 if (!netvsc_dev
|| rtnl_dereference(net_device_ctx
->vf_netdev
))
2155 /* if synthetic interface is a different namespace,
2156 * then move the VF to that namespace; join will be
2157 * done again in that context.
2159 if (!net_eq(dev_net(ndev
), dev_net(vf_netdev
))) {
2160 ret
= dev_change_net_namespace(vf_netdev
,
2161 dev_net(ndev
), "eth%d");
2163 netdev_err(vf_netdev
,
2164 "could not move to same namespace as %s: %d\n",
2167 netdev_info(vf_netdev
,
2168 "VF moved to namespace with: %s\n",
2173 netdev_info(ndev
, "VF registering: %s\n", vf_netdev
->name
);
2175 if (netvsc_vf_join(vf_netdev
, ndev
) != 0)
2178 dev_hold(vf_netdev
);
2179 rcu_assign_pointer(net_device_ctx
->vf_netdev
, vf_netdev
);
2183 /* VF up/down change detected, schedule to change data path */
2184 static int netvsc_vf_changed(struct net_device
*vf_netdev
)
2186 struct net_device_context
*net_device_ctx
;
2187 struct netvsc_device
*netvsc_dev
;
2188 struct net_device
*ndev
;
2189 bool vf_is_up
= netif_running(vf_netdev
);
2191 ndev
= get_netvsc_byref(vf_netdev
);
2195 net_device_ctx
= netdev_priv(ndev
);
2196 netvsc_dev
= rtnl_dereference(net_device_ctx
->nvdev
);
2200 netvsc_switch_datapath(ndev
, vf_is_up
);
2201 netdev_info(ndev
, "Data path switched %s VF: %s\n",
2202 vf_is_up
? "to" : "from", vf_netdev
->name
);
2207 static int netvsc_unregister_vf(struct net_device
*vf_netdev
)
2209 struct net_device
*ndev
;
2210 struct net_device_context
*net_device_ctx
;
2212 ndev
= get_netvsc_byref(vf_netdev
);
2216 net_device_ctx
= netdev_priv(ndev
);
2217 cancel_delayed_work_sync(&net_device_ctx
->vf_takeover
);
2219 netdev_info(ndev
, "VF unregistering: %s\n", vf_netdev
->name
);
2221 netdev_rx_handler_unregister(vf_netdev
);
2222 netdev_upper_dev_unlink(vf_netdev
, ndev
);
2223 RCU_INIT_POINTER(net_device_ctx
->vf_netdev
, NULL
);
2229 static int netvsc_probe(struct hv_device
*dev
,
2230 const struct hv_vmbus_device_id
*dev_id
)
2232 struct net_device
*net
= NULL
;
2233 struct net_device_context
*net_device_ctx
;
2234 struct netvsc_device_info
*device_info
= NULL
;
2235 struct netvsc_device
*nvdev
;
2238 net
= alloc_etherdev_mq(sizeof(struct net_device_context
),
2243 netif_carrier_off(net
);
2245 netvsc_init_settings(net
);
2247 net_device_ctx
= netdev_priv(net
);
2248 net_device_ctx
->device_ctx
= dev
;
2249 net_device_ctx
->msg_enable
= netif_msg_init(debug
, default_msg
);
2250 if (netif_msg_probe(net_device_ctx
))
2251 netdev_dbg(net
, "netvsc msg_enable: %d\n",
2252 net_device_ctx
->msg_enable
);
2254 hv_set_drvdata(dev
, net
);
2256 INIT_DELAYED_WORK(&net_device_ctx
->dwork
, netvsc_link_change
);
2258 spin_lock_init(&net_device_ctx
->lock
);
2259 INIT_LIST_HEAD(&net_device_ctx
->reconfig_events
);
2260 INIT_DELAYED_WORK(&net_device_ctx
->vf_takeover
, netvsc_vf_setup
);
2262 net_device_ctx
->vf_stats
2263 = netdev_alloc_pcpu_stats(struct netvsc_vf_pcpu_stats
);
2264 if (!net_device_ctx
->vf_stats
)
2267 net
->netdev_ops
= &device_ops
;
2268 net
->ethtool_ops
= ðtool_ops
;
2269 SET_NETDEV_DEV(net
, &dev
->device
);
2271 /* We always need headroom for rndis header */
2272 net
->needed_headroom
= RNDIS_AND_PPI_SIZE
;
2274 /* Initialize the number of queues to be 1, we may change it if more
2275 * channels are offered later.
2277 netif_set_real_num_tx_queues(net
, 1);
2278 netif_set_real_num_rx_queues(net
, 1);
2280 /* Notify the netvsc driver of the new device */
2281 device_info
= netvsc_devinfo_get(NULL
);
2285 goto devinfo_failed
;
2288 nvdev
= rndis_filter_device_add(dev
, device_info
);
2289 if (IS_ERR(nvdev
)) {
2290 ret
= PTR_ERR(nvdev
);
2291 netdev_err(net
, "unable to add netvsc device (ret %d)\n", ret
);
2295 memcpy(net
->dev_addr
, device_info
->mac_adr
, ETH_ALEN
);
2297 /* We must get rtnl lock before scheduling nvdev->subchan_work,
2298 * otherwise netvsc_subchan_work() can get rtnl lock first and wait
2299 * all subchannels to show up, but that may not happen because
2300 * netvsc_probe() can't get rtnl lock and as a result vmbus_onoffer()
2301 * -> ... -> device_add() -> ... -> __device_attach() can't get
2302 * the device lock, so all the subchannels can't be processed --
2303 * finally netvsc_subchan_work() hangs forever.
2307 if (nvdev
->num_chn
> 1)
2308 schedule_work(&nvdev
->subchan_work
);
2310 /* hw_features computed in rndis_netdev_set_hwcaps() */
2311 net
->features
= net
->hw_features
|
2312 NETIF_F_HIGHDMA
| NETIF_F_SG
|
2313 NETIF_F_HW_VLAN_CTAG_TX
| NETIF_F_HW_VLAN_CTAG_RX
;
2314 net
->vlan_features
= net
->features
;
2316 netdev_lockdep_set_classes(net
);
2318 /* MTU range: 68 - 1500 or 65521 */
2319 net
->min_mtu
= NETVSC_MTU_MIN
;
2320 if (nvdev
->nvsp_version
>= NVSP_PROTOCOL_VERSION_2
)
2321 net
->max_mtu
= NETVSC_MTU
- ETH_HLEN
;
2323 net
->max_mtu
= ETH_DATA_LEN
;
2325 ret
= register_netdevice(net
);
2327 pr_err("Unable to register netdev.\n");
2328 goto register_failed
;
2331 list_add(&net_device_ctx
->list
, &netvsc_dev_list
);
2339 rndis_filter_device_remove(dev
, nvdev
);
2343 free_percpu(net_device_ctx
->vf_stats
);
2345 hv_set_drvdata(dev
, NULL
);
2351 static int netvsc_remove(struct hv_device
*dev
)
2353 struct net_device_context
*ndev_ctx
;
2354 struct net_device
*vf_netdev
, *net
;
2355 struct netvsc_device
*nvdev
;
2357 net
= hv_get_drvdata(dev
);
2359 dev_err(&dev
->device
, "No net device to remove\n");
2363 ndev_ctx
= netdev_priv(net
);
2365 cancel_delayed_work_sync(&ndev_ctx
->dwork
);
2368 nvdev
= rtnl_dereference(ndev_ctx
->nvdev
);
2370 cancel_work_sync(&nvdev
->subchan_work
);
2373 * Call to the vsc driver to let it know that the device is being
2374 * removed. Also blocks mtu and channel changes.
2376 vf_netdev
= rtnl_dereference(ndev_ctx
->vf_netdev
);
2378 netvsc_unregister_vf(vf_netdev
);
2381 rndis_filter_device_remove(dev
, nvdev
);
2383 unregister_netdevice(net
);
2384 list_del(&ndev_ctx
->list
);
2388 hv_set_drvdata(dev
, NULL
);
2390 free_percpu(ndev_ctx
->vf_stats
);
2395 static const struct hv_vmbus_device_id id_table
[] = {
2401 MODULE_DEVICE_TABLE(vmbus
, id_table
);
2403 /* The one and only one */
2404 static struct hv_driver netvsc_drv
= {
2405 .name
= KBUILD_MODNAME
,
2406 .id_table
= id_table
,
2407 .probe
= netvsc_probe
,
2408 .remove
= netvsc_remove
,
2410 .probe_type
= PROBE_FORCE_SYNCHRONOUS
,
2415 * On Hyper-V, every VF interface is matched with a corresponding
2416 * synthetic interface. The synthetic interface is presented first
2417 * to the guest. When the corresponding VF instance is registered,
2418 * we will take care of switching the data path.
2420 static int netvsc_netdev_event(struct notifier_block
*this,
2421 unsigned long event
, void *ptr
)
2423 struct net_device
*event_dev
= netdev_notifier_info_to_dev(ptr
);
2425 /* Skip our own events */
2426 if (event_dev
->netdev_ops
== &device_ops
)
2429 /* Avoid non-Ethernet type devices */
2430 if (event_dev
->type
!= ARPHRD_ETHER
)
2433 /* Avoid Vlan dev with same MAC registering as VF */
2434 if (is_vlan_dev(event_dev
))
2437 /* Avoid Bonding master dev with same MAC registering as VF */
2438 if ((event_dev
->priv_flags
& IFF_BONDING
) &&
2439 (event_dev
->flags
& IFF_MASTER
))
2443 case NETDEV_REGISTER
:
2444 return netvsc_register_vf(event_dev
);
2445 case NETDEV_UNREGISTER
:
2446 return netvsc_unregister_vf(event_dev
);
2449 return netvsc_vf_changed(event_dev
);
2455 static struct notifier_block netvsc_netdev_notifier
= {
2456 .notifier_call
= netvsc_netdev_event
,
2459 static void __exit
netvsc_drv_exit(void)
2461 unregister_netdevice_notifier(&netvsc_netdev_notifier
);
2462 vmbus_driver_unregister(&netvsc_drv
);
2465 static int __init
netvsc_drv_init(void)
2469 if (ring_size
< RING_SIZE_MIN
) {
2470 ring_size
= RING_SIZE_MIN
;
2471 pr_info("Increased ring_size to %u (min allowed)\n",
2474 netvsc_ring_bytes
= ring_size
* PAGE_SIZE
;
2476 ret
= vmbus_driver_register(&netvsc_drv
);
2480 register_netdevice_notifier(&netvsc_netdev_notifier
);
2484 MODULE_LICENSE("GPL");
2485 MODULE_DESCRIPTION("Microsoft Hyper-V network driver");
2487 module_init(netvsc_drv_init
);
2488 module_exit(netvsc_drv_exit
);