2 * Copyright (c) 2009, Microsoft Corporation.
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms and conditions of the GNU General Public License,
6 * version 2, as published by the Free Software Foundation.
8 * This program is distributed in the hope it will be useful, but WITHOUT
9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 * You should have received a copy of the GNU General Public License along with
14 * this program; if not, see <http://www.gnu.org/licenses/>.
17 * Haiyang Zhang <haiyangz@microsoft.com>
18 * Hank Janssen <hjanssen@microsoft.com>
20 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
22 #include <linux/kernel.h>
23 #include <linux/sched.h>
24 #include <linux/wait.h>
26 #include <linux/delay.h>
28 #include <linux/slab.h>
29 #include <linux/netdevice.h>
30 #include <linux/if_ether.h>
31 #include <linux/vmalloc.h>
32 #include <linux/rtnetlink.h>
33 #include <linux/prefetch.h>
35 #include <asm/sync_bitops.h>
37 #include "hyperv_net.h"
38 #include "netvsc_trace.h"
41 * Switch the data path from the synthetic interface to the VF
44 void netvsc_switch_datapath(struct net_device
*ndev
, bool vf
)
46 struct net_device_context
*net_device_ctx
= netdev_priv(ndev
);
47 struct hv_device
*dev
= net_device_ctx
->device_ctx
;
48 struct netvsc_device
*nv_dev
= rtnl_dereference(net_device_ctx
->nvdev
);
49 struct nvsp_message
*init_pkt
= &nv_dev
->channel_init_pkt
;
51 memset(init_pkt
, 0, sizeof(struct nvsp_message
));
52 init_pkt
->hdr
.msg_type
= NVSP_MSG4_TYPE_SWITCH_DATA_PATH
;
54 init_pkt
->msg
.v4_msg
.active_dp
.active_datapath
=
57 init_pkt
->msg
.v4_msg
.active_dp
.active_datapath
=
58 NVSP_DATAPATH_SYNTHETIC
;
60 trace_nvsp_send(ndev
, init_pkt
);
62 vmbus_sendpacket(dev
->channel
, init_pkt
,
63 sizeof(struct nvsp_message
),
64 (unsigned long)init_pkt
,
65 VM_PKT_DATA_INBAND
, 0);
68 /* Worker to setup sub channels on initial setup
69 * Initial hotplug event occurs in softirq context
70 * and can't wait for channels.
72 static void netvsc_subchan_work(struct work_struct
*w
)
74 struct netvsc_device
*nvdev
=
75 container_of(w
, struct netvsc_device
, subchan_work
);
76 struct rndis_device
*rdev
;
79 /* Avoid deadlock with device removal already under RTNL */
80 if (!rtnl_trylock()) {
85 rdev
= nvdev
->extension
;
87 ret
= rndis_set_subchannel(rdev
->ndev
, nvdev
);
89 netif_device_attach(rdev
->ndev
);
91 /* fallback to only primary channel */
92 for (i
= 1; i
< nvdev
->num_chn
; i
++)
93 netif_napi_del(&nvdev
->chan_table
[i
].napi
);
103 static struct netvsc_device
*alloc_net_device(void)
105 struct netvsc_device
*net_device
;
107 net_device
= kzalloc(sizeof(struct netvsc_device
), GFP_KERNEL
);
111 init_waitqueue_head(&net_device
->wait_drain
);
112 net_device
->destroy
= false;
114 net_device
->max_pkt
= RNDIS_MAX_PKT_DEFAULT
;
115 net_device
->pkt_align
= RNDIS_PKT_ALIGN_DEFAULT
;
117 init_completion(&net_device
->channel_init_wait
);
118 init_waitqueue_head(&net_device
->subchan_open
);
119 INIT_WORK(&net_device
->subchan_work
, netvsc_subchan_work
);
124 static void free_netvsc_device(struct rcu_head
*head
)
126 struct netvsc_device
*nvdev
127 = container_of(head
, struct netvsc_device
, rcu
);
130 kfree(nvdev
->extension
);
131 vfree(nvdev
->recv_buf
);
132 vfree(nvdev
->send_buf
);
133 kfree(nvdev
->send_section_map
);
135 for (i
= 0; i
< VRSS_CHANNEL_MAX
; i
++)
136 vfree(nvdev
->chan_table
[i
].mrc
.slots
);
141 static void free_netvsc_device_rcu(struct netvsc_device
*nvdev
)
143 call_rcu(&nvdev
->rcu
, free_netvsc_device
);
146 static void netvsc_revoke_recv_buf(struct hv_device
*device
,
147 struct netvsc_device
*net_device
,
148 struct net_device
*ndev
)
150 struct nvsp_message
*revoke_packet
;
154 * If we got a section count, it means we received a
155 * SendReceiveBufferComplete msg (ie sent
156 * NvspMessage1TypeSendReceiveBuffer msg) therefore, we need
157 * to send a revoke msg here
159 if (net_device
->recv_section_cnt
) {
160 /* Send the revoke receive buffer */
161 revoke_packet
= &net_device
->revoke_packet
;
162 memset(revoke_packet
, 0, sizeof(struct nvsp_message
));
164 revoke_packet
->hdr
.msg_type
=
165 NVSP_MSG1_TYPE_REVOKE_RECV_BUF
;
166 revoke_packet
->msg
.v1_msg
.
167 revoke_recv_buf
.id
= NETVSC_RECEIVE_BUFFER_ID
;
169 trace_nvsp_send(ndev
, revoke_packet
);
171 ret
= vmbus_sendpacket(device
->channel
,
173 sizeof(struct nvsp_message
),
174 (unsigned long)revoke_packet
,
175 VM_PKT_DATA_INBAND
, 0);
176 /* If the failure is because the channel is rescinded;
177 * ignore the failure since we cannot send on a rescinded
178 * channel. This would allow us to properly cleanup
179 * even when the channel is rescinded.
181 if (device
->channel
->rescind
)
184 * If we failed here, we might as well return and
185 * have a leak rather than continue and a bugchk
188 netdev_err(ndev
, "unable to send "
189 "revoke receive buffer to netvsp\n");
192 net_device
->recv_section_cnt
= 0;
196 static void netvsc_revoke_send_buf(struct hv_device
*device
,
197 struct netvsc_device
*net_device
,
198 struct net_device
*ndev
)
200 struct nvsp_message
*revoke_packet
;
203 /* Deal with the send buffer we may have setup.
204 * If we got a send section size, it means we received a
205 * NVSP_MSG1_TYPE_SEND_SEND_BUF_COMPLETE msg (ie sent
206 * NVSP_MSG1_TYPE_SEND_SEND_BUF msg) therefore, we need
207 * to send a revoke msg here
209 if (net_device
->send_section_cnt
) {
210 /* Send the revoke receive buffer */
211 revoke_packet
= &net_device
->revoke_packet
;
212 memset(revoke_packet
, 0, sizeof(struct nvsp_message
));
214 revoke_packet
->hdr
.msg_type
=
215 NVSP_MSG1_TYPE_REVOKE_SEND_BUF
;
216 revoke_packet
->msg
.v1_msg
.revoke_send_buf
.id
=
217 NETVSC_SEND_BUFFER_ID
;
219 trace_nvsp_send(ndev
, revoke_packet
);
221 ret
= vmbus_sendpacket(device
->channel
,
223 sizeof(struct nvsp_message
),
224 (unsigned long)revoke_packet
,
225 VM_PKT_DATA_INBAND
, 0);
227 /* If the failure is because the channel is rescinded;
228 * ignore the failure since we cannot send on a rescinded
229 * channel. This would allow us to properly cleanup
230 * even when the channel is rescinded.
232 if (device
->channel
->rescind
)
235 /* If we failed here, we might as well return and
236 * have a leak rather than continue and a bugchk
239 netdev_err(ndev
, "unable to send "
240 "revoke send buffer to netvsp\n");
243 net_device
->send_section_cnt
= 0;
247 static void netvsc_teardown_recv_gpadl(struct hv_device
*device
,
248 struct netvsc_device
*net_device
,
249 struct net_device
*ndev
)
253 if (net_device
->recv_buf_gpadl_handle
) {
254 ret
= vmbus_teardown_gpadl(device
->channel
,
255 net_device
->recv_buf_gpadl_handle
);
257 /* If we failed here, we might as well return and have a leak
258 * rather than continue and a bugchk
262 "unable to teardown receive buffer's gpadl\n");
265 net_device
->recv_buf_gpadl_handle
= 0;
269 static void netvsc_teardown_send_gpadl(struct hv_device
*device
,
270 struct netvsc_device
*net_device
,
271 struct net_device
*ndev
)
275 if (net_device
->send_buf_gpadl_handle
) {
276 ret
= vmbus_teardown_gpadl(device
->channel
,
277 net_device
->send_buf_gpadl_handle
);
279 /* If we failed here, we might as well return and have a leak
280 * rather than continue and a bugchk
284 "unable to teardown send buffer's gpadl\n");
287 net_device
->send_buf_gpadl_handle
= 0;
291 int netvsc_alloc_recv_comp_ring(struct netvsc_device
*net_device
, u32 q_idx
)
293 struct netvsc_channel
*nvchan
= &net_device
->chan_table
[q_idx
];
294 int node
= cpu_to_node(nvchan
->channel
->target_cpu
);
297 size
= net_device
->recv_completion_cnt
* sizeof(struct recv_comp_data
);
298 nvchan
->mrc
.slots
= vzalloc_node(size
, node
);
299 if (!nvchan
->mrc
.slots
)
300 nvchan
->mrc
.slots
= vzalloc(size
);
302 return nvchan
->mrc
.slots
? 0 : -ENOMEM
;
305 static int netvsc_init_buf(struct hv_device
*device
,
306 struct netvsc_device
*net_device
,
307 const struct netvsc_device_info
*device_info
)
309 struct nvsp_1_message_send_receive_buffer_complete
*resp
;
310 struct net_device
*ndev
= hv_get_drvdata(device
);
311 struct nvsp_message
*init_packet
;
312 unsigned int buf_size
;
316 /* Get receive buffer area. */
317 buf_size
= device_info
->recv_sections
* device_info
->recv_section_size
;
318 buf_size
= roundup(buf_size
, PAGE_SIZE
);
320 /* Legacy hosts only allow smaller receive buffer */
321 if (net_device
->nvsp_version
<= NVSP_PROTOCOL_VERSION_2
)
322 buf_size
= min_t(unsigned int, buf_size
,
323 NETVSC_RECEIVE_BUFFER_SIZE_LEGACY
);
325 net_device
->recv_buf
= vzalloc(buf_size
);
326 if (!net_device
->recv_buf
) {
328 "unable to allocate receive buffer of size %u\n",
334 net_device
->recv_buf_size
= buf_size
;
337 * Establish the gpadl handle for this buffer on this
338 * channel. Note: This call uses the vmbus connection rather
339 * than the channel to establish the gpadl handle.
341 ret
= vmbus_establish_gpadl(device
->channel
, net_device
->recv_buf
,
343 &net_device
->recv_buf_gpadl_handle
);
346 "unable to establish receive buffer's gpadl\n");
350 /* Notify the NetVsp of the gpadl handle */
351 init_packet
= &net_device
->channel_init_pkt
;
352 memset(init_packet
, 0, sizeof(struct nvsp_message
));
353 init_packet
->hdr
.msg_type
= NVSP_MSG1_TYPE_SEND_RECV_BUF
;
354 init_packet
->msg
.v1_msg
.send_recv_buf
.
355 gpadl_handle
= net_device
->recv_buf_gpadl_handle
;
356 init_packet
->msg
.v1_msg
.
357 send_recv_buf
.id
= NETVSC_RECEIVE_BUFFER_ID
;
359 trace_nvsp_send(ndev
, init_packet
);
361 /* Send the gpadl notification request */
362 ret
= vmbus_sendpacket(device
->channel
, init_packet
,
363 sizeof(struct nvsp_message
),
364 (unsigned long)init_packet
,
366 VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED
);
369 "unable to send receive buffer's gpadl to netvsp\n");
373 wait_for_completion(&net_device
->channel_init_wait
);
375 /* Check the response */
376 resp
= &init_packet
->msg
.v1_msg
.send_recv_buf_complete
;
377 if (resp
->status
!= NVSP_STAT_SUCCESS
) {
379 "Unable to complete receive buffer initialization with NetVsp - status %d\n",
385 /* Parse the response */
386 netdev_dbg(ndev
, "Receive sections: %u sub_allocs: size %u count: %u\n",
387 resp
->num_sections
, resp
->sections
[0].sub_alloc_size
,
388 resp
->sections
[0].num_sub_allocs
);
390 /* There should only be one section for the entire receive buffer */
391 if (resp
->num_sections
!= 1 || resp
->sections
[0].offset
!= 0) {
396 net_device
->recv_section_size
= resp
->sections
[0].sub_alloc_size
;
397 net_device
->recv_section_cnt
= resp
->sections
[0].num_sub_allocs
;
399 /* Setup receive completion ring */
400 net_device
->recv_completion_cnt
401 = round_up(net_device
->recv_section_cnt
+ 1,
402 PAGE_SIZE
/ sizeof(u64
));
403 ret
= netvsc_alloc_recv_comp_ring(net_device
, 0);
407 /* Now setup the send buffer. */
408 buf_size
= device_info
->send_sections
* device_info
->send_section_size
;
409 buf_size
= round_up(buf_size
, PAGE_SIZE
);
411 net_device
->send_buf
= vzalloc(buf_size
);
412 if (!net_device
->send_buf
) {
413 netdev_err(ndev
, "unable to allocate send buffer of size %u\n",
419 /* Establish the gpadl handle for this buffer on this
420 * channel. Note: This call uses the vmbus connection rather
421 * than the channel to establish the gpadl handle.
423 ret
= vmbus_establish_gpadl(device
->channel
, net_device
->send_buf
,
425 &net_device
->send_buf_gpadl_handle
);
428 "unable to establish send buffer's gpadl\n");
432 /* Notify the NetVsp of the gpadl handle */
433 init_packet
= &net_device
->channel_init_pkt
;
434 memset(init_packet
, 0, sizeof(struct nvsp_message
));
435 init_packet
->hdr
.msg_type
= NVSP_MSG1_TYPE_SEND_SEND_BUF
;
436 init_packet
->msg
.v1_msg
.send_send_buf
.gpadl_handle
=
437 net_device
->send_buf_gpadl_handle
;
438 init_packet
->msg
.v1_msg
.send_send_buf
.id
= NETVSC_SEND_BUFFER_ID
;
440 trace_nvsp_send(ndev
, init_packet
);
442 /* Send the gpadl notification request */
443 ret
= vmbus_sendpacket(device
->channel
, init_packet
,
444 sizeof(struct nvsp_message
),
445 (unsigned long)init_packet
,
447 VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED
);
450 "unable to send send buffer's gpadl to netvsp\n");
454 wait_for_completion(&net_device
->channel_init_wait
);
456 /* Check the response */
457 if (init_packet
->msg
.v1_msg
.
458 send_send_buf_complete
.status
!= NVSP_STAT_SUCCESS
) {
459 netdev_err(ndev
, "Unable to complete send buffer "
460 "initialization with NetVsp - status %d\n",
461 init_packet
->msg
.v1_msg
.
462 send_send_buf_complete
.status
);
467 /* Parse the response */
468 net_device
->send_section_size
= init_packet
->msg
.
469 v1_msg
.send_send_buf_complete
.section_size
;
471 /* Section count is simply the size divided by the section size. */
472 net_device
->send_section_cnt
= buf_size
/ net_device
->send_section_size
;
474 netdev_dbg(ndev
, "Send section size: %d, Section count:%d\n",
475 net_device
->send_section_size
, net_device
->send_section_cnt
);
477 /* Setup state for managing the send buffer. */
478 map_words
= DIV_ROUND_UP(net_device
->send_section_cnt
, BITS_PER_LONG
);
480 net_device
->send_section_map
= kcalloc(map_words
, sizeof(ulong
), GFP_KERNEL
);
481 if (net_device
->send_section_map
== NULL
) {
489 netvsc_revoke_recv_buf(device
, net_device
, ndev
);
490 netvsc_revoke_send_buf(device
, net_device
, ndev
);
491 netvsc_teardown_recv_gpadl(device
, net_device
, ndev
);
492 netvsc_teardown_send_gpadl(device
, net_device
, ndev
);
498 /* Negotiate NVSP protocol version */
499 static int negotiate_nvsp_ver(struct hv_device
*device
,
500 struct netvsc_device
*net_device
,
501 struct nvsp_message
*init_packet
,
504 struct net_device
*ndev
= hv_get_drvdata(device
);
507 memset(init_packet
, 0, sizeof(struct nvsp_message
));
508 init_packet
->hdr
.msg_type
= NVSP_MSG_TYPE_INIT
;
509 init_packet
->msg
.init_msg
.init
.min_protocol_ver
= nvsp_ver
;
510 init_packet
->msg
.init_msg
.init
.max_protocol_ver
= nvsp_ver
;
511 trace_nvsp_send(ndev
, init_packet
);
513 /* Send the init request */
514 ret
= vmbus_sendpacket(device
->channel
, init_packet
,
515 sizeof(struct nvsp_message
),
516 (unsigned long)init_packet
,
518 VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED
);
523 wait_for_completion(&net_device
->channel_init_wait
);
525 if (init_packet
->msg
.init_msg
.init_complete
.status
!=
529 if (nvsp_ver
== NVSP_PROTOCOL_VERSION_1
)
532 /* NVSPv2 or later: Send NDIS config */
533 memset(init_packet
, 0, sizeof(struct nvsp_message
));
534 init_packet
->hdr
.msg_type
= NVSP_MSG2_TYPE_SEND_NDIS_CONFIG
;
535 init_packet
->msg
.v2_msg
.send_ndis_config
.mtu
= ndev
->mtu
+ ETH_HLEN
;
536 init_packet
->msg
.v2_msg
.send_ndis_config
.capability
.ieee8021q
= 1;
538 if (nvsp_ver
>= NVSP_PROTOCOL_VERSION_5
) {
539 init_packet
->msg
.v2_msg
.send_ndis_config
.capability
.sriov
= 1;
541 /* Teaming bit is needed to receive link speed updates */
542 init_packet
->msg
.v2_msg
.send_ndis_config
.capability
.teaming
= 1;
545 if (nvsp_ver
>= NVSP_PROTOCOL_VERSION_61
)
546 init_packet
->msg
.v2_msg
.send_ndis_config
.capability
.rsc
= 1;
548 trace_nvsp_send(ndev
, init_packet
);
550 ret
= vmbus_sendpacket(device
->channel
, init_packet
,
551 sizeof(struct nvsp_message
),
552 (unsigned long)init_packet
,
553 VM_PKT_DATA_INBAND
, 0);
558 static int netvsc_connect_vsp(struct hv_device
*device
,
559 struct netvsc_device
*net_device
,
560 const struct netvsc_device_info
*device_info
)
562 struct net_device
*ndev
= hv_get_drvdata(device
);
563 static const u32 ver_list
[] = {
564 NVSP_PROTOCOL_VERSION_1
, NVSP_PROTOCOL_VERSION_2
,
565 NVSP_PROTOCOL_VERSION_4
, NVSP_PROTOCOL_VERSION_5
,
566 NVSP_PROTOCOL_VERSION_6
, NVSP_PROTOCOL_VERSION_61
568 struct nvsp_message
*init_packet
;
569 int ndis_version
, i
, ret
;
571 init_packet
= &net_device
->channel_init_pkt
;
573 /* Negotiate the latest NVSP protocol supported */
574 for (i
= ARRAY_SIZE(ver_list
) - 1; i
>= 0; i
--)
575 if (negotiate_nvsp_ver(device
, net_device
, init_packet
,
577 net_device
->nvsp_version
= ver_list
[i
];
586 pr_debug("Negotiated NVSP version:%x\n", net_device
->nvsp_version
);
588 /* Send the ndis version */
589 memset(init_packet
, 0, sizeof(struct nvsp_message
));
591 if (net_device
->nvsp_version
<= NVSP_PROTOCOL_VERSION_4
)
592 ndis_version
= 0x00060001;
594 ndis_version
= 0x0006001e;
596 init_packet
->hdr
.msg_type
= NVSP_MSG1_TYPE_SEND_NDIS_VER
;
597 init_packet
->msg
.v1_msg
.
598 send_ndis_ver
.ndis_major_ver
=
599 (ndis_version
& 0xFFFF0000) >> 16;
600 init_packet
->msg
.v1_msg
.
601 send_ndis_ver
.ndis_minor_ver
=
602 ndis_version
& 0xFFFF;
604 trace_nvsp_send(ndev
, init_packet
);
606 /* Send the init request */
607 ret
= vmbus_sendpacket(device
->channel
, init_packet
,
608 sizeof(struct nvsp_message
),
609 (unsigned long)init_packet
,
610 VM_PKT_DATA_INBAND
, 0);
615 ret
= netvsc_init_buf(device
, net_device
, device_info
);
622 * netvsc_device_remove - Callback when the root bus device is removed
624 void netvsc_device_remove(struct hv_device
*device
)
626 struct net_device
*ndev
= hv_get_drvdata(device
);
627 struct net_device_context
*net_device_ctx
= netdev_priv(ndev
);
628 struct netvsc_device
*net_device
629 = rtnl_dereference(net_device_ctx
->nvdev
);
633 * Revoke receive buffer. If host is pre-Win2016 then tear down
634 * receive buffer GPADL. Do the same for send buffer.
636 netvsc_revoke_recv_buf(device
, net_device
, ndev
);
637 if (vmbus_proto_version
< VERSION_WIN10
)
638 netvsc_teardown_recv_gpadl(device
, net_device
, ndev
);
640 netvsc_revoke_send_buf(device
, net_device
, ndev
);
641 if (vmbus_proto_version
< VERSION_WIN10
)
642 netvsc_teardown_send_gpadl(device
, net_device
, ndev
);
644 RCU_INIT_POINTER(net_device_ctx
->nvdev
, NULL
);
646 /* And disassociate NAPI context from device */
647 for (i
= 0; i
< net_device
->num_chn
; i
++)
648 netif_napi_del(&net_device
->chan_table
[i
].napi
);
651 * At this point, no one should be accessing net_device
654 netdev_dbg(ndev
, "net device safe to remove\n");
656 /* Now, we can close the channel safely */
657 vmbus_close(device
->channel
);
660 * If host is Win2016 or higher then we do the GPADL tear down
661 * here after VMBus is closed.
663 if (vmbus_proto_version
>= VERSION_WIN10
) {
664 netvsc_teardown_recv_gpadl(device
, net_device
, ndev
);
665 netvsc_teardown_send_gpadl(device
, net_device
, ndev
);
668 /* Release all resources */
669 free_netvsc_device_rcu(net_device
);
672 #define RING_AVAIL_PERCENT_HIWATER 20
673 #define RING_AVAIL_PERCENT_LOWATER 10
675 static inline void netvsc_free_send_slot(struct netvsc_device
*net_device
,
678 sync_change_bit(index
, net_device
->send_section_map
);
681 static void netvsc_send_tx_complete(struct net_device
*ndev
,
682 struct netvsc_device
*net_device
,
683 struct vmbus_channel
*channel
,
684 const struct vmpacket_descriptor
*desc
,
687 struct sk_buff
*skb
= (struct sk_buff
*)(unsigned long)desc
->trans_id
;
688 struct net_device_context
*ndev_ctx
= netdev_priv(ndev
);
692 /* Notify the layer above us */
694 const struct hv_netvsc_packet
*packet
695 = (struct hv_netvsc_packet
*)skb
->cb
;
696 u32 send_index
= packet
->send_buf_index
;
697 struct netvsc_stats
*tx_stats
;
699 if (send_index
!= NETVSC_INVALID_INDEX
)
700 netvsc_free_send_slot(net_device
, send_index
);
701 q_idx
= packet
->q_idx
;
703 tx_stats
= &net_device
->chan_table
[q_idx
].tx_stats
;
705 u64_stats_update_begin(&tx_stats
->syncp
);
706 tx_stats
->packets
+= packet
->total_packets
;
707 tx_stats
->bytes
+= packet
->total_bytes
;
708 u64_stats_update_end(&tx_stats
->syncp
);
710 napi_consume_skb(skb
, budget
);
714 atomic_dec_return(&net_device
->chan_table
[q_idx
].queue_sends
);
716 if (unlikely(net_device
->destroy
)) {
717 if (queue_sends
== 0)
718 wake_up(&net_device
->wait_drain
);
720 struct netdev_queue
*txq
= netdev_get_tx_queue(ndev
, q_idx
);
722 if (netif_tx_queue_stopped(txq
) &&
723 (hv_get_avail_to_write_percent(&channel
->outbound
) >
724 RING_AVAIL_PERCENT_HIWATER
|| queue_sends
< 1)) {
725 netif_tx_wake_queue(txq
);
726 ndev_ctx
->eth_stats
.wake_queue
++;
731 static void netvsc_send_completion(struct net_device
*ndev
,
732 struct netvsc_device
*net_device
,
733 struct vmbus_channel
*incoming_channel
,
734 const struct vmpacket_descriptor
*desc
,
737 const struct nvsp_message
*nvsp_packet
= hv_pkt_data(desc
);
739 switch (nvsp_packet
->hdr
.msg_type
) {
740 case NVSP_MSG_TYPE_INIT_COMPLETE
:
741 case NVSP_MSG1_TYPE_SEND_RECV_BUF_COMPLETE
:
742 case NVSP_MSG1_TYPE_SEND_SEND_BUF_COMPLETE
:
743 case NVSP_MSG5_TYPE_SUBCHANNEL
:
744 /* Copy the response back */
745 memcpy(&net_device
->channel_init_pkt
, nvsp_packet
,
746 sizeof(struct nvsp_message
));
747 complete(&net_device
->channel_init_wait
);
750 case NVSP_MSG1_TYPE_SEND_RNDIS_PKT_COMPLETE
:
751 netvsc_send_tx_complete(ndev
, net_device
, incoming_channel
,
757 "Unknown send completion type %d received!!\n",
758 nvsp_packet
->hdr
.msg_type
);
762 static u32
netvsc_get_next_send_section(struct netvsc_device
*net_device
)
764 unsigned long *map_addr
= net_device
->send_section_map
;
767 for_each_clear_bit(i
, map_addr
, net_device
->send_section_cnt
) {
768 if (sync_test_and_set_bit(i
, map_addr
) == 0)
772 return NETVSC_INVALID_INDEX
;
775 static void netvsc_copy_to_send_buf(struct netvsc_device
*net_device
,
776 unsigned int section_index
,
778 struct hv_netvsc_packet
*packet
,
779 struct rndis_message
*rndis_msg
,
780 struct hv_page_buffer
*pb
,
783 char *start
= net_device
->send_buf
;
784 char *dest
= start
+ (section_index
* net_device
->send_section_size
)
788 u32 page_count
= packet
->cp_partial
? packet
->rmsg_pgcnt
:
789 packet
->page_buf_cnt
;
793 remain
= packet
->total_data_buflen
& (net_device
->pkt_align
- 1);
794 if (xmit_more
&& remain
) {
795 padding
= net_device
->pkt_align
- remain
;
796 rndis_msg
->msg_len
+= padding
;
797 packet
->total_data_buflen
+= padding
;
800 for (i
= 0; i
< page_count
; i
++) {
801 char *src
= phys_to_virt(pb
[i
].pfn
<< PAGE_SHIFT
);
802 u32 offset
= pb
[i
].offset
;
805 memcpy(dest
, (src
+ offset
), len
);
810 memset(dest
, 0, padding
);
813 static inline int netvsc_send_pkt(
814 struct hv_device
*device
,
815 struct hv_netvsc_packet
*packet
,
816 struct netvsc_device
*net_device
,
817 struct hv_page_buffer
*pb
,
820 struct nvsp_message nvmsg
;
821 struct nvsp_1_message_send_rndis_packet
*rpkt
=
822 &nvmsg
.msg
.v1_msg
.send_rndis_pkt
;
823 struct netvsc_channel
* const nvchan
=
824 &net_device
->chan_table
[packet
->q_idx
];
825 struct vmbus_channel
*out_channel
= nvchan
->channel
;
826 struct net_device
*ndev
= hv_get_drvdata(device
);
827 struct net_device_context
*ndev_ctx
= netdev_priv(ndev
);
828 struct netdev_queue
*txq
= netdev_get_tx_queue(ndev
, packet
->q_idx
);
831 u32 ring_avail
= hv_get_avail_to_write_percent(&out_channel
->outbound
);
833 nvmsg
.hdr
.msg_type
= NVSP_MSG1_TYPE_SEND_RNDIS_PKT
;
835 rpkt
->channel_type
= 0; /* 0 is RMC_DATA */
837 rpkt
->channel_type
= 1; /* 1 is RMC_CONTROL */
839 rpkt
->send_buf_section_index
= packet
->send_buf_index
;
840 if (packet
->send_buf_index
== NETVSC_INVALID_INDEX
)
841 rpkt
->send_buf_section_size
= 0;
843 rpkt
->send_buf_section_size
= packet
->total_data_buflen
;
847 if (out_channel
->rescind
)
850 trace_nvsp_send_pkt(ndev
, out_channel
, rpkt
);
852 if (packet
->page_buf_cnt
) {
853 if (packet
->cp_partial
)
854 pb
+= packet
->rmsg_pgcnt
;
856 ret
= vmbus_sendpacket_pagebuffer(out_channel
,
857 pb
, packet
->page_buf_cnt
,
858 &nvmsg
, sizeof(nvmsg
),
861 ret
= vmbus_sendpacket(out_channel
,
862 &nvmsg
, sizeof(nvmsg
),
863 req_id
, VM_PKT_DATA_INBAND
,
864 VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED
);
868 atomic_inc_return(&nvchan
->queue_sends
);
870 if (ring_avail
< RING_AVAIL_PERCENT_LOWATER
) {
871 netif_tx_stop_queue(txq
);
872 ndev_ctx
->eth_stats
.stop_queue
++;
874 } else if (ret
== -EAGAIN
) {
875 netif_tx_stop_queue(txq
);
876 ndev_ctx
->eth_stats
.stop_queue
++;
877 if (atomic_read(&nvchan
->queue_sends
) < 1) {
878 netif_tx_wake_queue(txq
);
879 ndev_ctx
->eth_stats
.wake_queue
++;
884 "Unable to send packet pages %u len %u, ret %d\n",
885 packet
->page_buf_cnt
, packet
->total_data_buflen
,
892 /* Move packet out of multi send data (msd), and clear msd */
893 static inline void move_pkt_msd(struct hv_netvsc_packet
**msd_send
,
894 struct sk_buff
**msd_skb
,
895 struct multi_send_data
*msdp
)
897 *msd_skb
= msdp
->skb
;
898 *msd_send
= msdp
->pkt
;
904 /* RCU already held by caller */
905 int netvsc_send(struct net_device
*ndev
,
906 struct hv_netvsc_packet
*packet
,
907 struct rndis_message
*rndis_msg
,
908 struct hv_page_buffer
*pb
,
911 struct net_device_context
*ndev_ctx
= netdev_priv(ndev
);
912 struct netvsc_device
*net_device
913 = rcu_dereference_bh(ndev_ctx
->nvdev
);
914 struct hv_device
*device
= ndev_ctx
->device_ctx
;
916 struct netvsc_channel
*nvchan
;
917 u32 pktlen
= packet
->total_data_buflen
, msd_len
= 0;
918 unsigned int section_index
= NETVSC_INVALID_INDEX
;
919 struct multi_send_data
*msdp
;
920 struct hv_netvsc_packet
*msd_send
= NULL
, *cur_send
= NULL
;
921 struct sk_buff
*msd_skb
= NULL
;
922 bool try_batch
, xmit_more
;
924 /* If device is rescinded, return error and packet will get dropped. */
925 if (unlikely(!net_device
|| net_device
->destroy
))
928 nvchan
= &net_device
->chan_table
[packet
->q_idx
];
929 packet
->send_buf_index
= NETVSC_INVALID_INDEX
;
930 packet
->cp_partial
= false;
932 /* Send control message directly without accessing msd (Multi-Send
933 * Data) field which may be changed during data packet processing.
936 return netvsc_send_pkt(device
, packet
, net_device
, pb
, skb
);
938 /* batch packets in send buffer if possible */
941 msd_len
= msdp
->pkt
->total_data_buflen
;
943 try_batch
= msd_len
> 0 && msdp
->count
< net_device
->max_pkt
;
944 if (try_batch
&& msd_len
+ pktlen
+ net_device
->pkt_align
<
945 net_device
->send_section_size
) {
946 section_index
= msdp
->pkt
->send_buf_index
;
948 } else if (try_batch
&& msd_len
+ packet
->rmsg_size
<
949 net_device
->send_section_size
) {
950 section_index
= msdp
->pkt
->send_buf_index
;
951 packet
->cp_partial
= true;
953 } else if (pktlen
+ net_device
->pkt_align
<
954 net_device
->send_section_size
) {
955 section_index
= netvsc_get_next_send_section(net_device
);
956 if (unlikely(section_index
== NETVSC_INVALID_INDEX
)) {
957 ++ndev_ctx
->eth_stats
.tx_send_full
;
959 move_pkt_msd(&msd_send
, &msd_skb
, msdp
);
964 /* Keep aggregating only if stack says more data is coming
965 * and not doing mixed modes send and not flow blocked
967 xmit_more
= skb
->xmit_more
&&
968 !packet
->cp_partial
&&
969 !netif_xmit_stopped(netdev_get_tx_queue(ndev
, packet
->q_idx
));
971 if (section_index
!= NETVSC_INVALID_INDEX
) {
972 netvsc_copy_to_send_buf(net_device
,
973 section_index
, msd_len
,
974 packet
, rndis_msg
, pb
, xmit_more
);
976 packet
->send_buf_index
= section_index
;
978 if (packet
->cp_partial
) {
979 packet
->page_buf_cnt
-= packet
->rmsg_pgcnt
;
980 packet
->total_data_buflen
= msd_len
+ packet
->rmsg_size
;
982 packet
->page_buf_cnt
= 0;
983 packet
->total_data_buflen
+= msd_len
;
987 packet
->total_packets
+= msdp
->pkt
->total_packets
;
988 packet
->total_bytes
+= msdp
->pkt
->total_bytes
;
992 dev_consume_skb_any(msdp
->skb
);
1005 move_pkt_msd(&msd_send
, &msd_skb
, msdp
);
1010 int m_ret
= netvsc_send_pkt(device
, msd_send
, net_device
,
1014 netvsc_free_send_slot(net_device
,
1015 msd_send
->send_buf_index
);
1016 dev_kfree_skb_any(msd_skb
);
1021 ret
= netvsc_send_pkt(device
, cur_send
, net_device
, pb
, skb
);
1023 if (ret
!= 0 && section_index
!= NETVSC_INVALID_INDEX
)
1024 netvsc_free_send_slot(net_device
, section_index
);
1029 /* Send pending recv completions */
1030 static int send_recv_completions(struct net_device
*ndev
,
1031 struct netvsc_device
*nvdev
,
1032 struct netvsc_channel
*nvchan
)
1034 struct multi_recv_comp
*mrc
= &nvchan
->mrc
;
1035 struct recv_comp_msg
{
1036 struct nvsp_message_header hdr
;
1039 struct recv_comp_msg msg
= {
1040 .hdr
.msg_type
= NVSP_MSG1_TYPE_SEND_RNDIS_PKT_COMPLETE
,
1044 while (mrc
->first
!= mrc
->next
) {
1045 const struct recv_comp_data
*rcd
1046 = mrc
->slots
+ mrc
->first
;
1048 msg
.status
= rcd
->status
;
1049 ret
= vmbus_sendpacket(nvchan
->channel
, &msg
, sizeof(msg
),
1050 rcd
->tid
, VM_PKT_COMP
, 0);
1051 if (unlikely(ret
)) {
1052 struct net_device_context
*ndev_ctx
= netdev_priv(ndev
);
1054 ++ndev_ctx
->eth_stats
.rx_comp_busy
;
1058 if (++mrc
->first
== nvdev
->recv_completion_cnt
)
1062 /* receive completion ring has been emptied */
1063 if (unlikely(nvdev
->destroy
))
1064 wake_up(&nvdev
->wait_drain
);
1069 /* Count how many receive completions are outstanding */
1070 static void recv_comp_slot_avail(const struct netvsc_device
*nvdev
,
1071 const struct multi_recv_comp
*mrc
,
1072 u32
*filled
, u32
*avail
)
1074 u32 count
= nvdev
->recv_completion_cnt
;
1076 if (mrc
->next
>= mrc
->first
)
1077 *filled
= mrc
->next
- mrc
->first
;
1079 *filled
= (count
- mrc
->first
) + mrc
->next
;
1081 *avail
= count
- *filled
- 1;
1084 /* Add receive complete to ring to send to host. */
1085 static void enq_receive_complete(struct net_device
*ndev
,
1086 struct netvsc_device
*nvdev
, u16 q_idx
,
1087 u64 tid
, u32 status
)
1089 struct netvsc_channel
*nvchan
= &nvdev
->chan_table
[q_idx
];
1090 struct multi_recv_comp
*mrc
= &nvchan
->mrc
;
1091 struct recv_comp_data
*rcd
;
1094 recv_comp_slot_avail(nvdev
, mrc
, &filled
, &avail
);
1096 if (unlikely(filled
> NAPI_POLL_WEIGHT
)) {
1097 send_recv_completions(ndev
, nvdev
, nvchan
);
1098 recv_comp_slot_avail(nvdev
, mrc
, &filled
, &avail
);
1101 if (unlikely(!avail
)) {
1102 netdev_err(ndev
, "Recv_comp full buf q:%hd, tid:%llx\n",
1107 rcd
= mrc
->slots
+ mrc
->next
;
1109 rcd
->status
= status
;
1111 if (++mrc
->next
== nvdev
->recv_completion_cnt
)
1115 static int netvsc_receive(struct net_device
*ndev
,
1116 struct netvsc_device
*net_device
,
1117 struct netvsc_channel
*nvchan
,
1118 const struct vmpacket_descriptor
*desc
,
1119 const struct nvsp_message
*nvsp
)
1121 struct net_device_context
*net_device_ctx
= netdev_priv(ndev
);
1122 struct vmbus_channel
*channel
= nvchan
->channel
;
1123 const struct vmtransfer_page_packet_header
*vmxferpage_packet
1124 = container_of(desc
, const struct vmtransfer_page_packet_header
, d
);
1125 u16 q_idx
= channel
->offermsg
.offer
.sub_channel_index
;
1126 char *recv_buf
= net_device
->recv_buf
;
1127 u32 status
= NVSP_STAT_SUCCESS
;
1131 /* Make sure this is a valid nvsp packet */
1132 if (unlikely(nvsp
->hdr
.msg_type
!= NVSP_MSG1_TYPE_SEND_RNDIS_PKT
)) {
1133 netif_err(net_device_ctx
, rx_err
, ndev
,
1134 "Unknown nvsp packet type received %u\n",
1135 nvsp
->hdr
.msg_type
);
1139 if (unlikely(vmxferpage_packet
->xfer_pageset_id
!= NETVSC_RECEIVE_BUFFER_ID
)) {
1140 netif_err(net_device_ctx
, rx_err
, ndev
,
1141 "Invalid xfer page set id - expecting %x got %x\n",
1142 NETVSC_RECEIVE_BUFFER_ID
,
1143 vmxferpage_packet
->xfer_pageset_id
);
1147 count
= vmxferpage_packet
->range_cnt
;
1149 /* Each range represents 1 RNDIS pkt that contains 1 ethernet frame */
1150 for (i
= 0; i
< count
; i
++) {
1151 u32 offset
= vmxferpage_packet
->ranges
[i
].byte_offset
;
1152 u32 buflen
= vmxferpage_packet
->ranges
[i
].byte_count
;
1156 if (unlikely(offset
+ buflen
> net_device
->recv_buf_size
)) {
1157 nvchan
->rsc
.cnt
= 0;
1158 status
= NVSP_STAT_FAIL
;
1159 netif_err(net_device_ctx
, rx_err
, ndev
,
1160 "Packet offset:%u + len:%u too big\n",
1166 data
= recv_buf
+ offset
;
1168 nvchan
->rsc
.is_last
= (i
== count
- 1);
1170 trace_rndis_recv(ndev
, q_idx
, data
);
1172 /* Pass it to the upper layer */
1173 ret
= rndis_filter_receive(ndev
, net_device
,
1174 nvchan
, data
, buflen
);
1176 if (unlikely(ret
!= NVSP_STAT_SUCCESS
))
1177 status
= NVSP_STAT_FAIL
;
1180 enq_receive_complete(ndev
, net_device
, q_idx
,
1181 vmxferpage_packet
->d
.trans_id
, status
);
1186 static void netvsc_send_table(struct net_device
*ndev
,
1187 const struct nvsp_message
*nvmsg
)
1189 struct net_device_context
*net_device_ctx
= netdev_priv(ndev
);
1193 count
= nvmsg
->msg
.v5_msg
.send_table
.count
;
1194 if (count
!= VRSS_SEND_TAB_SIZE
) {
1195 netdev_err(ndev
, "Received wrong send-table size:%u\n", count
);
1199 tab
= (u32
*)((unsigned long)&nvmsg
->msg
.v5_msg
.send_table
+
1200 nvmsg
->msg
.v5_msg
.send_table
.offset
);
1202 for (i
= 0; i
< count
; i
++)
1203 net_device_ctx
->tx_table
[i
] = tab
[i
];
1206 static void netvsc_send_vf(struct net_device
*ndev
,
1207 const struct nvsp_message
*nvmsg
)
1209 struct net_device_context
*net_device_ctx
= netdev_priv(ndev
);
1211 net_device_ctx
->vf_alloc
= nvmsg
->msg
.v4_msg
.vf_assoc
.allocated
;
1212 net_device_ctx
->vf_serial
= nvmsg
->msg
.v4_msg
.vf_assoc
.serial
;
1213 netdev_info(ndev
, "VF slot %u %s\n",
1214 net_device_ctx
->vf_serial
,
1215 net_device_ctx
->vf_alloc
? "added" : "removed");
1218 static void netvsc_receive_inband(struct net_device
*ndev
,
1219 const struct nvsp_message
*nvmsg
)
1221 switch (nvmsg
->hdr
.msg_type
) {
1222 case NVSP_MSG5_TYPE_SEND_INDIRECTION_TABLE
:
1223 netvsc_send_table(ndev
, nvmsg
);
1226 case NVSP_MSG4_TYPE_SEND_VF_ASSOCIATION
:
1227 netvsc_send_vf(ndev
, nvmsg
);
1232 static int netvsc_process_raw_pkt(struct hv_device
*device
,
1233 struct netvsc_channel
*nvchan
,
1234 struct netvsc_device
*net_device
,
1235 struct net_device
*ndev
,
1236 const struct vmpacket_descriptor
*desc
,
1239 struct vmbus_channel
*channel
= nvchan
->channel
;
1240 const struct nvsp_message
*nvmsg
= hv_pkt_data(desc
);
1242 trace_nvsp_recv(ndev
, channel
, nvmsg
);
1244 switch (desc
->type
) {
1246 netvsc_send_completion(ndev
, net_device
, channel
,
1250 case VM_PKT_DATA_USING_XFER_PAGES
:
1251 return netvsc_receive(ndev
, net_device
, nvchan
,
1255 case VM_PKT_DATA_INBAND
:
1256 netvsc_receive_inband(ndev
, nvmsg
);
1260 netdev_err(ndev
, "unhandled packet type %d, tid %llx\n",
1261 desc
->type
, desc
->trans_id
);
1268 static struct hv_device
*netvsc_channel_to_device(struct vmbus_channel
*channel
)
1270 struct vmbus_channel
*primary
= channel
->primary_channel
;
1272 return primary
? primary
->device_obj
: channel
->device_obj
;
1275 /* Network processing softirq
1276 * Process data in incoming ring buffer from host
1277 * Stops when ring is empty or budget is met or exceeded.
1279 int netvsc_poll(struct napi_struct
*napi
, int budget
)
1281 struct netvsc_channel
*nvchan
1282 = container_of(napi
, struct netvsc_channel
, napi
);
1283 struct netvsc_device
*net_device
= nvchan
->net_device
;
1284 struct vmbus_channel
*channel
= nvchan
->channel
;
1285 struct hv_device
*device
= netvsc_channel_to_device(channel
);
1286 struct net_device
*ndev
= hv_get_drvdata(device
);
1290 /* If starting a new interval */
1292 nvchan
->desc
= hv_pkt_iter_first(channel
);
1294 while (nvchan
->desc
&& work_done
< budget
) {
1295 work_done
+= netvsc_process_raw_pkt(device
, nvchan
, net_device
,
1296 ndev
, nvchan
->desc
, budget
);
1297 nvchan
->desc
= hv_pkt_iter_next(channel
, nvchan
->desc
);
1300 /* Send any pending receive completions */
1301 ret
= send_recv_completions(ndev
, net_device
, nvchan
);
1303 /* If it did not exhaust NAPI budget this time
1304 * and not doing busy poll
1305 * then re-enable host interrupts
1306 * and reschedule if ring is not empty
1307 * or sending receive completion failed.
1309 if (work_done
< budget
&&
1310 napi_complete_done(napi
, work_done
) &&
1311 (ret
|| hv_end_read(&channel
->inbound
)) &&
1312 napi_schedule_prep(napi
)) {
1313 hv_begin_read(&channel
->inbound
);
1314 __napi_schedule(napi
);
1317 /* Driver may overshoot since multiple packets per descriptor */
1318 return min(work_done
, budget
);
1321 /* Call back when data is available in host ring buffer.
1322 * Processing is deferred until network softirq (NAPI)
1324 void netvsc_channel_cb(void *context
)
1326 struct netvsc_channel
*nvchan
= context
;
1327 struct vmbus_channel
*channel
= nvchan
->channel
;
1328 struct hv_ring_buffer_info
*rbi
= &channel
->inbound
;
1330 /* preload first vmpacket descriptor */
1331 prefetch(hv_get_ring_buffer(rbi
) + rbi
->priv_read_index
);
1333 if (napi_schedule_prep(&nvchan
->napi
)) {
1334 /* disable interupts from host */
1337 __napi_schedule_irqoff(&nvchan
->napi
);
1342 * netvsc_device_add - Callback when the device belonging to this
1345 struct netvsc_device
*netvsc_device_add(struct hv_device
*device
,
1346 const struct netvsc_device_info
*device_info
)
1349 struct netvsc_device
*net_device
;
1350 struct net_device
*ndev
= hv_get_drvdata(device
);
1351 struct net_device_context
*net_device_ctx
= netdev_priv(ndev
);
1353 net_device
= alloc_net_device();
1355 return ERR_PTR(-ENOMEM
);
1357 for (i
= 0; i
< VRSS_SEND_TAB_SIZE
; i
++)
1358 net_device_ctx
->tx_table
[i
] = 0;
1360 /* Because the device uses NAPI, all the interrupt batching and
1361 * control is done via Net softirq, not the channel handling
1363 set_channel_read_mode(device
->channel
, HV_CALL_ISR
);
1365 /* If we're reopening the device we may have multiple queues, fill the
1366 * chn_table with the default channel to use it before subchannels are
1368 * Initialize the channel state before we open;
1369 * we can be interrupted as soon as we open the channel.
1372 for (i
= 0; i
< VRSS_CHANNEL_MAX
; i
++) {
1373 struct netvsc_channel
*nvchan
= &net_device
->chan_table
[i
];
1375 nvchan
->channel
= device
->channel
;
1376 nvchan
->net_device
= net_device
;
1377 u64_stats_init(&nvchan
->tx_stats
.syncp
);
1378 u64_stats_init(&nvchan
->rx_stats
.syncp
);
1381 /* Enable NAPI handler before init callbacks */
1382 netif_napi_add(ndev
, &net_device
->chan_table
[0].napi
,
1383 netvsc_poll
, NAPI_POLL_WEIGHT
);
1385 /* Open the channel */
1386 ret
= vmbus_open(device
->channel
, netvsc_ring_bytes
,
1387 netvsc_ring_bytes
, NULL
, 0,
1388 netvsc_channel_cb
, net_device
->chan_table
);
1391 netdev_err(ndev
, "unable to open channel: %d\n", ret
);
1395 /* Channel is opened */
1396 netdev_dbg(ndev
, "hv_netvsc channel opened successfully\n");
1398 napi_enable(&net_device
->chan_table
[0].napi
);
1400 /* Connect with the NetVsp */
1401 ret
= netvsc_connect_vsp(device
, net_device
, device_info
);
1404 "unable to connect to NetVSP - %d\n", ret
);
1408 /* Writing nvdev pointer unlocks netvsc_send(), make sure chn_table is
1411 rcu_assign_pointer(net_device_ctx
->nvdev
, net_device
);
1416 RCU_INIT_POINTER(net_device_ctx
->nvdev
, NULL
);
1417 napi_disable(&net_device
->chan_table
[0].napi
);
1419 /* Now, we can close the channel safely */
1420 vmbus_close(device
->channel
);
1423 netif_napi_del(&net_device
->chan_table
[0].napi
);
1424 free_netvsc_device(&net_device
->rcu
);
1426 return ERR_PTR(ret
);