2 * Copyright (c) 2009, Microsoft Corporation.
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms and conditions of the GNU General Public License,
6 * version 2, as published by the Free Software Foundation.
8 * This program is distributed in the hope it will be useful, but WITHOUT
9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 * You should have received a copy of the GNU General Public License along with
14 * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
15 * Place - Suite 330, Boston, MA 02111-1307 USA.
18 * Haiyang Zhang <haiyangz@microsoft.com>
19 * Hank Janssen <hjanssen@microsoft.com>
21 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
23 #include <linux/kernel.h>
24 #include <linux/sched.h>
25 #include <linux/wait.h>
27 #include <linux/delay.h>
29 #include <linux/slab.h>
33 #include "rndis_filter.h"
38 static const char *driver_name
= "netvsc";
40 /* {F8615163-DF3E-46c5-913F-F2D2F965ED0E} */
41 static const struct hv_guid netvsc_device_type
= {
43 0x63, 0x51, 0x61, 0xF8, 0x3E, 0xDF, 0xc5, 0x46,
44 0x91, 0x3F, 0xF2, 0xD2, 0xF9, 0x65, 0xED, 0x0E
49 static struct netvsc_device
*alloc_net_device(struct hv_device
*device
)
51 struct netvsc_device
*net_device
;
53 net_device
= kzalloc(sizeof(struct netvsc_device
), GFP_KERNEL
);
57 /* Set to 2 to allow both inbound and outbound traffic */
58 atomic_cmpxchg(&net_device
->refcnt
, 0, 2);
60 net_device
->dev
= device
;
61 device
->ext
= net_device
;
66 static void free_net_device(struct netvsc_device
*device
)
68 WARN_ON(atomic_read(&device
->refcnt
) != 0);
69 device
->dev
->ext
= NULL
;
74 /* Get the net device object iff exists and its refcount > 1 */
75 static struct netvsc_device
*get_outbound_net_device(struct hv_device
*device
)
77 struct netvsc_device
*net_device
;
79 net_device
= device
->ext
;
80 if (net_device
&& atomic_read(&net_device
->refcnt
) > 1)
81 atomic_inc(&net_device
->refcnt
);
88 /* Get the net device object iff exists and its refcount > 0 */
89 static struct netvsc_device
*get_inbound_net_device(struct hv_device
*device
)
91 struct netvsc_device
*net_device
;
93 net_device
= device
->ext
;
94 if (net_device
&& atomic_read(&net_device
->refcnt
))
95 atomic_inc(&net_device
->refcnt
);
102 static void put_net_device(struct hv_device
*device
)
104 struct netvsc_device
*net_device
;
106 net_device
= device
->ext
;
108 atomic_dec(&net_device
->refcnt
);
111 static struct netvsc_device
*release_outbound_net_device(
112 struct hv_device
*device
)
114 struct netvsc_device
*net_device
;
116 net_device
= device
->ext
;
117 if (net_device
== NULL
)
120 /* Busy wait until the ref drop to 2, then set it to 1 */
121 while (atomic_cmpxchg(&net_device
->refcnt
, 2, 1) != 2)
127 static struct netvsc_device
*release_inbound_net_device(
128 struct hv_device
*device
)
130 struct netvsc_device
*net_device
;
132 net_device
= device
->ext
;
133 if (net_device
== NULL
)
136 /* Busy wait until the ref drop to 1, then set it to 0 */
137 while (atomic_cmpxchg(&net_device
->refcnt
, 1, 0) != 1)
144 static int netvsc_destroy_recv_buf(struct netvsc_device
*net_device
)
146 struct nvsp_message
*revoke_packet
;
150 * If we got a section count, it means we received a
151 * SendReceiveBufferComplete msg (ie sent
152 * NvspMessage1TypeSendReceiveBuffer msg) therefore, we need
153 * to send a revoke msg here
155 if (net_device
->recv_section_cnt
) {
156 /* Send the revoke receive buffer */
157 revoke_packet
= &net_device
->revoke_packet
;
158 memset(revoke_packet
, 0, sizeof(struct nvsp_message
));
160 revoke_packet
->hdr
.msg_type
=
161 NVSP_MSG1_TYPE_REVOKE_RECV_BUF
;
162 revoke_packet
->msg
.v1_msg
.
163 revoke_recv_buf
.id
= NETVSC_RECEIVE_BUFFER_ID
;
165 ret
= vmbus_sendpacket(net_device
->dev
->channel
,
167 sizeof(struct nvsp_message
),
168 (unsigned long)revoke_packet
,
169 VM_PKT_DATA_INBAND
, 0);
171 * If we failed here, we might as well return and
172 * have a leak rather than continue and a bugchk
175 dev_err(&net_device
->dev
->device
, "unable to send "
176 "revoke receive buffer to netvsp");
181 /* Teardown the gpadl on the vsp end */
182 if (net_device
->recv_buf_gpadl_handle
) {
183 ret
= vmbus_teardown_gpadl(net_device
->dev
->channel
,
184 net_device
->recv_buf_gpadl_handle
);
186 /* If we failed here, we might as well return and have a leak
187 * rather than continue and a bugchk
190 dev_err(&net_device
->dev
->device
,
191 "unable to teardown receive buffer's gpadl");
194 net_device
->recv_buf_gpadl_handle
= 0;
197 if (net_device
->recv_buf
) {
198 /* Free up the receive buffer */
199 free_pages((unsigned long)net_device
->recv_buf
,
200 get_order(net_device
->recv_buf_size
));
201 net_device
->recv_buf
= NULL
;
204 if (net_device
->recv_section
) {
205 net_device
->recv_section_cnt
= 0;
206 kfree(net_device
->recv_section
);
207 net_device
->recv_section
= NULL
;
213 static int netvsc_init_recv_buf(struct hv_device
*device
)
216 struct netvsc_device
*net_device
;
217 struct nvsp_message
*init_packet
;
219 net_device
= get_outbound_net_device(device
);
221 dev_err(&device
->device
, "unable to get net device..."
222 "device being destroyed?");
226 net_device
->recv_buf
=
227 (void *)__get_free_pages(GFP_KERNEL
|__GFP_ZERO
,
228 get_order(net_device
->recv_buf_size
));
229 if (!net_device
->recv_buf
) {
230 dev_err(&device
->device
, "unable to allocate receive "
231 "buffer of size %d", net_device
->recv_buf_size
);
237 * Establish the gpadl handle for this buffer on this
238 * channel. Note: This call uses the vmbus connection rather
239 * than the channel to establish the gpadl handle.
241 ret
= vmbus_establish_gpadl(device
->channel
, net_device
->recv_buf
,
242 net_device
->recv_buf_size
,
243 &net_device
->recv_buf_gpadl_handle
);
245 dev_err(&device
->device
,
246 "unable to establish receive buffer's gpadl");
251 /* Notify the NetVsp of the gpadl handle */
252 init_packet
= &net_device
->channel_init_pkt
;
254 memset(init_packet
, 0, sizeof(struct nvsp_message
));
256 init_packet
->hdr
.msg_type
= NVSP_MSG1_TYPE_SEND_RECV_BUF
;
257 init_packet
->msg
.v1_msg
.send_recv_buf
.
258 gpadl_handle
= net_device
->recv_buf_gpadl_handle
;
259 init_packet
->msg
.v1_msg
.
260 send_recv_buf
.id
= NETVSC_RECEIVE_BUFFER_ID
;
262 /* Send the gpadl notification request */
263 net_device
->wait_condition
= 0;
264 ret
= vmbus_sendpacket(device
->channel
, init_packet
,
265 sizeof(struct nvsp_message
),
266 (unsigned long)init_packet
,
268 VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED
);
270 dev_err(&device
->device
,
271 "unable to send receive buffer's gpadl to netvsp");
275 wait_event_timeout(net_device
->channel_init_wait
,
276 net_device
->wait_condition
,
277 msecs_to_jiffies(1000));
278 BUG_ON(net_device
->wait_condition
== 0);
281 /* Check the response */
282 if (init_packet
->msg
.v1_msg
.
283 send_recv_buf_complete
.status
!= NVSP_STAT_SUCCESS
) {
284 dev_err(&device
->device
, "Unable to complete receive buffer "
285 "initialzation with NetVsp - status %d",
286 init_packet
->msg
.v1_msg
.
287 send_recv_buf_complete
.status
);
292 /* Parse the response */
294 net_device
->recv_section_cnt
= init_packet
->msg
.
295 v1_msg
.send_recv_buf_complete
.num_sections
;
297 net_device
->recv_section
= kmalloc(net_device
->recv_section_cnt
298 * sizeof(struct nvsp_1_receive_buffer_section
), GFP_KERNEL
);
299 if (net_device
->recv_section
== NULL
) {
304 memcpy(net_device
->recv_section
,
305 init_packet
->msg
.v1_msg
.
306 send_recv_buf_complete
.sections
,
307 net_device
->recv_section_cnt
*
308 sizeof(struct nvsp_1_receive_buffer_section
));
311 * For 1st release, there should only be 1 section that represents the
312 * entire receive buffer
314 if (net_device
->recv_section_cnt
!= 1 ||
315 net_device
->recv_section
->offset
!= 0) {
323 netvsc_destroy_recv_buf(net_device
);
326 put_net_device(device
);
330 static int netvsc_destroy_send_buf(struct netvsc_device
*net_device
)
332 struct nvsp_message
*revoke_packet
;
336 * If we got a section count, it means we received a
337 * SendReceiveBufferComplete msg (ie sent
338 * NvspMessage1TypeSendReceiveBuffer msg) therefore, we need
339 * to send a revoke msg here
341 if (net_device
->send_section_size
) {
342 /* Send the revoke send buffer */
343 revoke_packet
= &net_device
->revoke_packet
;
344 memset(revoke_packet
, 0, sizeof(struct nvsp_message
));
346 revoke_packet
->hdr
.msg_type
=
347 NVSP_MSG1_TYPE_REVOKE_SEND_BUF
;
348 revoke_packet
->msg
.v1_msg
.
349 revoke_send_buf
.id
= NETVSC_SEND_BUFFER_ID
;
351 ret
= vmbus_sendpacket(net_device
->dev
->channel
,
353 sizeof(struct nvsp_message
),
354 (unsigned long)revoke_packet
,
355 VM_PKT_DATA_INBAND
, 0);
357 * If we failed here, we might as well return and have a leak
358 * rather than continue and a bugchk
361 dev_err(&net_device
->dev
->device
, "unable to send "
362 "revoke send buffer to netvsp");
367 /* Teardown the gpadl on the vsp end */
368 if (net_device
->send_buf_gpadl_handle
) {
369 ret
= vmbus_teardown_gpadl(net_device
->dev
->channel
,
370 net_device
->send_buf_gpadl_handle
);
373 * If we failed here, we might as well return and have a leak
374 * rather than continue and a bugchk
377 dev_err(&net_device
->dev
->device
,
378 "unable to teardown send buffer's gpadl");
381 net_device
->send_buf_gpadl_handle
= 0;
384 if (net_device
->send_buf
) {
385 /* Free up the receive buffer */
386 free_pages((unsigned long)net_device
->send_buf
,
387 get_order(net_device
->send_buf_size
));
388 net_device
->send_buf
= NULL
;
394 static int netvsc_init_send_buf(struct hv_device
*device
)
397 struct netvsc_device
*net_device
;
398 struct nvsp_message
*init_packet
;
400 net_device
= get_outbound_net_device(device
);
402 dev_err(&device
->device
, "unable to get net device..."
403 "device being destroyed?");
406 if (net_device
->send_buf_size
<= 0) {
411 net_device
->send_buf
=
412 (void *)__get_free_pages(GFP_KERNEL
|__GFP_ZERO
,
413 get_order(net_device
->send_buf_size
));
414 if (!net_device
->send_buf
) {
415 dev_err(&device
->device
, "unable to allocate send "
416 "buffer of size %d", net_device
->send_buf_size
);
422 * Establish the gpadl handle for this buffer on this
423 * channel. Note: This call uses the vmbus connection rather
424 * than the channel to establish the gpadl handle.
426 ret
= vmbus_establish_gpadl(device
->channel
, net_device
->send_buf
,
427 net_device
->send_buf_size
,
428 &net_device
->send_buf_gpadl_handle
);
430 dev_err(&device
->device
, "unable to establish send buffer's gpadl");
434 /* Notify the NetVsp of the gpadl handle */
435 init_packet
= &net_device
->channel_init_pkt
;
437 memset(init_packet
, 0, sizeof(struct nvsp_message
));
439 init_packet
->hdr
.msg_type
= NVSP_MSG1_TYPE_SEND_SEND_BUF
;
440 init_packet
->msg
.v1_msg
.send_recv_buf
.
441 gpadl_handle
= net_device
->send_buf_gpadl_handle
;
442 init_packet
->msg
.v1_msg
.send_recv_buf
.id
=
443 NETVSC_SEND_BUFFER_ID
;
445 /* Send the gpadl notification request */
446 net_device
->wait_condition
= 0;
447 ret
= vmbus_sendpacket(device
->channel
, init_packet
,
448 sizeof(struct nvsp_message
),
449 (unsigned long)init_packet
,
451 VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED
);
453 dev_err(&device
->device
,
454 "unable to send receive buffer's gpadl to netvsp");
458 wait_event_timeout(net_device
->channel_init_wait
,
459 net_device
->wait_condition
,
460 msecs_to_jiffies(1000));
461 BUG_ON(net_device
->wait_condition
== 0);
463 /* Check the response */
464 if (init_packet
->msg
.v1_msg
.
465 send_send_buf_complete
.status
!= NVSP_STAT_SUCCESS
) {
466 dev_err(&device
->device
, "Unable to complete send buffer "
467 "initialzation with NetVsp - status %d",
468 init_packet
->msg
.v1_msg
.
469 send_send_buf_complete
.status
);
474 net_device
->send_section_size
= init_packet
->
475 msg
.v1_msg
.send_send_buf_complete
.section_size
;
480 netvsc_destroy_send_buf(net_device
);
483 put_net_device(device
);
488 static int netvsc_connect_vsp(struct hv_device
*device
)
491 struct netvsc_device
*net_device
;
492 struct nvsp_message
*init_packet
;
495 net_device
= get_outbound_net_device(device
);
497 dev_err(&device
->device
, "unable to get net device..."
498 "device being destroyed?");
502 init_packet
= &net_device
->channel_init_pkt
;
504 memset(init_packet
, 0, sizeof(struct nvsp_message
));
505 init_packet
->hdr
.msg_type
= NVSP_MSG_TYPE_INIT
;
506 init_packet
->msg
.init_msg
.init
.min_protocol_ver
=
507 NVSP_MIN_PROTOCOL_VERSION
;
508 init_packet
->msg
.init_msg
.init
.max_protocol_ver
=
509 NVSP_MAX_PROTOCOL_VERSION
;
511 /* Send the init request */
512 net_device
->wait_condition
= 0;
513 ret
= vmbus_sendpacket(device
->channel
, init_packet
,
514 sizeof(struct nvsp_message
),
515 (unsigned long)init_packet
,
517 VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED
);
522 wait_event_timeout(net_device
->channel_init_wait
,
523 net_device
->wait_condition
,
524 msecs_to_jiffies(1000));
525 if (net_device
->wait_condition
== 0) {
530 if (init_packet
->msg
.init_msg
.init_complete
.status
!=
536 if (init_packet
->msg
.init_msg
.init_complete
.
537 negotiated_protocol_ver
!= NVSP_PROTOCOL_VERSION_1
) {
541 /* Send the ndis version */
542 memset(init_packet
, 0, sizeof(struct nvsp_message
));
544 ndis_version
= 0x00050000;
546 init_packet
->hdr
.msg_type
= NVSP_MSG1_TYPE_SEND_NDIS_VER
;
547 init_packet
->msg
.v1_msg
.
548 send_ndis_ver
.ndis_major_ver
=
549 (ndis_version
& 0xFFFF0000) >> 16;
550 init_packet
->msg
.v1_msg
.
551 send_ndis_ver
.ndis_minor_ver
=
552 ndis_version
& 0xFFFF;
554 /* Send the init request */
555 ret
= vmbus_sendpacket(device
->channel
, init_packet
,
556 sizeof(struct nvsp_message
),
557 (unsigned long)init_packet
,
558 VM_PKT_DATA_INBAND
, 0);
564 /* Post the big receive buffer to NetVSP */
565 ret
= netvsc_init_recv_buf(device
);
567 ret
= netvsc_init_send_buf(device
);
570 put_net_device(device
);
574 static void netvsc_disconnect_vsp(struct netvsc_device
*net_device
)
576 netvsc_destroy_recv_buf(net_device
);
577 netvsc_destroy_send_buf(net_device
);
581 * netvsc_device_remove - Callback when the root bus device is removed
583 static int netvsc_device_remove(struct hv_device
*device
)
585 struct netvsc_device
*net_device
;
586 struct hv_netvsc_packet
*netvsc_packet
, *pos
;
588 /* Stop outbound traffic ie sends and receives completions */
589 net_device
= release_outbound_net_device(device
);
591 dev_err(&device
->device
, "No net device present!!");
595 /* Wait for all send completions */
596 while (atomic_read(&net_device
->num_outstanding_sends
)) {
597 dev_err(&device
->device
,
598 "waiting for %d requests to complete...",
599 atomic_read(&net_device
->num_outstanding_sends
));
603 netvsc_disconnect_vsp(net_device
);
605 /* Stop inbound traffic ie receives and sends completions */
606 net_device
= release_inbound_net_device(device
);
608 /* At this point, no one should be accessing netDevice except in here */
609 dev_notice(&device
->device
, "net device safe to remove");
611 /* Now, we can close the channel safely */
612 vmbus_close(device
->channel
);
614 /* Release all resources */
615 list_for_each_entry_safe(netvsc_packet
, pos
,
616 &net_device
->recv_pkt_list
, list_ent
) {
617 list_del(&netvsc_packet
->list_ent
);
618 kfree(netvsc_packet
);
621 free_net_device(net_device
);
626 * netvsc_cleanup - Perform any cleanup when the driver is removed
628 static void netvsc_cleanup(struct hv_driver
*drv
)
632 static void netvsc_send_completion(struct hv_device
*device
,
633 struct vmpacket_descriptor
*packet
)
635 struct netvsc_device
*net_device
;
636 struct nvsp_message
*nvsp_packet
;
637 struct hv_netvsc_packet
*nvsc_packet
;
639 net_device
= get_inbound_net_device(device
);
641 dev_err(&device
->device
, "unable to get net device..."
642 "device being destroyed?");
646 nvsp_packet
= (struct nvsp_message
*)((unsigned long)packet
+
647 (packet
->offset8
<< 3));
649 if ((nvsp_packet
->hdr
.msg_type
== NVSP_MSG_TYPE_INIT_COMPLETE
) ||
650 (nvsp_packet
->hdr
.msg_type
==
651 NVSP_MSG1_TYPE_SEND_RECV_BUF_COMPLETE
) ||
652 (nvsp_packet
->hdr
.msg_type
==
653 NVSP_MSG1_TYPE_SEND_SEND_BUF_COMPLETE
)) {
654 /* Copy the response back */
655 memcpy(&net_device
->channel_init_pkt
, nvsp_packet
,
656 sizeof(struct nvsp_message
));
657 net_device
->wait_condition
= 1;
658 wake_up(&net_device
->channel_init_wait
);
659 } else if (nvsp_packet
->hdr
.msg_type
==
660 NVSP_MSG1_TYPE_SEND_RNDIS_PKT_COMPLETE
) {
661 /* Get the send context */
662 nvsc_packet
= (struct hv_netvsc_packet
*)(unsigned long)
665 /* Notify the layer above us */
666 nvsc_packet
->completion
.send
.send_completion(
667 nvsc_packet
->completion
.send
.send_completion_ctx
);
669 atomic_dec(&net_device
->num_outstanding_sends
);
671 dev_err(&device
->device
, "Unknown send completion packet type- "
672 "%d received!!", nvsp_packet
->hdr
.msg_type
);
675 put_net_device(device
);
678 static int netvsc_send(struct hv_device
*device
,
679 struct hv_netvsc_packet
*packet
)
681 struct netvsc_device
*net_device
;
684 struct nvsp_message sendMessage
;
686 net_device
= get_outbound_net_device(device
);
688 dev_err(&device
->device
, "net device (%p) shutting down..."
689 "ignoring outbound packets", net_device
);
693 sendMessage
.hdr
.msg_type
= NVSP_MSG1_TYPE_SEND_RNDIS_PKT
;
694 if (packet
->is_data_pkt
) {
696 sendMessage
.msg
.v1_msg
.send_rndis_pkt
.channel_type
= 0;
698 /* 1 is RMC_CONTROL; */
699 sendMessage
.msg
.v1_msg
.send_rndis_pkt
.channel_type
= 1;
702 /* Not using send buffer section */
703 sendMessage
.msg
.v1_msg
.send_rndis_pkt
.send_buf_section_index
=
705 sendMessage
.msg
.v1_msg
.send_rndis_pkt
.send_buf_section_size
= 0;
707 if (packet
->page_buf_cnt
) {
708 ret
= vmbus_sendpacket_pagebuffer(device
->channel
,
710 packet
->page_buf_cnt
,
712 sizeof(struct nvsp_message
),
713 (unsigned long)packet
);
715 ret
= vmbus_sendpacket(device
->channel
, &sendMessage
,
716 sizeof(struct nvsp_message
),
717 (unsigned long)packet
,
719 VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED
);
724 dev_err(&device
->device
, "Unable to send packet %p ret %d",
727 atomic_inc(&net_device
->num_outstanding_sends
);
728 put_net_device(device
);
732 static void netvsc_send_recv_completion(struct hv_device
*device
,
735 struct nvsp_message recvcompMessage
;
739 recvcompMessage
.hdr
.msg_type
=
740 NVSP_MSG1_TYPE_SEND_RNDIS_PKT_COMPLETE
;
742 /* FIXME: Pass in the status */
743 recvcompMessage
.msg
.v1_msg
.send_rndis_pkt_complete
.status
=
747 /* Send the completion */
748 ret
= vmbus_sendpacket(device
->channel
, &recvcompMessage
,
749 sizeof(struct nvsp_message
), transaction_id
,
754 } else if (ret
== -1) {
755 /* no more room...wait a bit and attempt to retry 3 times */
757 dev_err(&device
->device
, "unable to send receive completion pkt"
758 " (tid %llx)...retrying %d", transaction_id
, retries
);
762 goto retry_send_cmplt
;
764 dev_err(&device
->device
, "unable to send receive "
765 "completion pkt (tid %llx)...give up retrying",
769 dev_err(&device
->device
, "unable to send receive "
770 "completion pkt - %llx", transaction_id
);
774 /* Send a receive completion packet to RNDIS device (ie NetVsp) */
775 static void netvsc_receive_completion(void *context
)
777 struct hv_netvsc_packet
*packet
= context
;
778 struct hv_device
*device
= (struct hv_device
*)packet
->device
;
779 struct netvsc_device
*net_device
;
780 u64 transaction_id
= 0;
781 bool fsend_receive_comp
= false;
785 * Even though it seems logical to do a GetOutboundNetDevice() here to
786 * send out receive completion, we are using GetInboundNetDevice()
787 * since we may have disable outbound traffic already.
789 net_device
= get_inbound_net_device(device
);
791 dev_err(&device
->device
, "unable to get net device..."
792 "device being destroyed?");
796 /* Overloading use of the lock. */
797 spin_lock_irqsave(&net_device
->recv_pkt_list_lock
, flags
);
799 packet
->xfer_page_pkt
->count
--;
802 * Last one in the line that represent 1 xfer page packet.
803 * Return the xfer page packet itself to the freelist
805 if (packet
->xfer_page_pkt
->count
== 0) {
806 fsend_receive_comp
= true;
807 transaction_id
= packet
->completion
.recv
.recv_completion_tid
;
808 list_add_tail(&packet
->xfer_page_pkt
->list_ent
,
809 &net_device
->recv_pkt_list
);
813 /* Put the packet back */
814 list_add_tail(&packet
->list_ent
, &net_device
->recv_pkt_list
);
815 spin_unlock_irqrestore(&net_device
->recv_pkt_list_lock
, flags
);
817 /* Send a receive completion for the xfer page packet */
818 if (fsend_receive_comp
)
819 netvsc_send_recv_completion(device
, transaction_id
);
821 put_net_device(device
);
824 static void netvsc_receive(struct hv_device
*device
,
825 struct vmpacket_descriptor
*packet
)
827 struct netvsc_device
*net_device
;
828 struct vmtransfer_page_packet_header
*vmxferpage_packet
;
829 struct nvsp_message
*nvsp_packet
;
830 struct hv_netvsc_packet
*netvsc_packet
= NULL
;
832 unsigned long end
, end_virtual
;
833 /* struct netvsc_driver *netvscDriver; */
834 struct xferpage_packet
*xferpage_packet
= NULL
;
836 int count
= 0, bytes_remain
= 0;
838 struct netvsc_driver
*netvsc_drv
=
839 drv_to_netvscdrv(device
->device
.driver
);
843 net_device
= get_inbound_net_device(device
);
845 dev_err(&device
->device
, "unable to get net device..."
846 "device being destroyed?");
851 * All inbound packets other than send completion should be xfer page
854 if (packet
->type
!= VM_PKT_DATA_USING_XFER_PAGES
) {
855 dev_err(&device
->device
, "Unknown packet type received - %d",
857 put_net_device(device
);
861 nvsp_packet
= (struct nvsp_message
*)((unsigned long)packet
+
862 (packet
->offset8
<< 3));
864 /* Make sure this is a valid nvsp packet */
865 if (nvsp_packet
->hdr
.msg_type
!=
866 NVSP_MSG1_TYPE_SEND_RNDIS_PKT
) {
867 dev_err(&device
->device
, "Unknown nvsp packet type received-"
868 " %d", nvsp_packet
->hdr
.msg_type
);
869 put_net_device(device
);
873 vmxferpage_packet
= (struct vmtransfer_page_packet_header
*)packet
;
875 if (vmxferpage_packet
->xfer_pageset_id
!= NETVSC_RECEIVE_BUFFER_ID
) {
876 dev_err(&device
->device
, "Invalid xfer page set id - "
877 "expecting %x got %x", NETVSC_RECEIVE_BUFFER_ID
,
878 vmxferpage_packet
->xfer_pageset_id
);
879 put_net_device(device
);
884 * Grab free packets (range count + 1) to represent this xfer
885 * page packet. +1 to represent the xfer page packet itself.
886 * We grab it here so that we know exactly how many we can
889 spin_lock_irqsave(&net_device
->recv_pkt_list_lock
, flags
);
890 while (!list_empty(&net_device
->recv_pkt_list
)) {
891 list_move_tail(net_device
->recv_pkt_list
.next
, &listHead
);
892 if (++count
== vmxferpage_packet
->range_cnt
+ 1)
895 spin_unlock_irqrestore(&net_device
->recv_pkt_list_lock
, flags
);
898 * We need at least 2 netvsc pkts (1 to represent the xfer
899 * page and at least 1 for the range) i.e. we can handled
900 * some of the xfer page packet ranges...
903 dev_err(&device
->device
, "Got only %d netvsc pkt...needed "
904 "%d pkts. Dropping this xfer page packet completely!",
905 count
, vmxferpage_packet
->range_cnt
+ 1);
907 /* Return it to the freelist */
908 spin_lock_irqsave(&net_device
->recv_pkt_list_lock
, flags
);
909 for (i
= count
; i
!= 0; i
--) {
910 list_move_tail(listHead
.next
,
911 &net_device
->recv_pkt_list
);
913 spin_unlock_irqrestore(&net_device
->recv_pkt_list_lock
,
916 netvsc_send_recv_completion(device
,
917 vmxferpage_packet
->d
.trans_id
);
919 put_net_device(device
);
923 /* Remove the 1st packet to represent the xfer page packet itself */
924 xferpage_packet
= (struct xferpage_packet
*)listHead
.next
;
925 list_del(&xferpage_packet
->list_ent
);
927 /* This is how much we can satisfy */
928 xferpage_packet
->count
= count
- 1;
930 if (xferpage_packet
->count
!= vmxferpage_packet
->range_cnt
) {
931 dev_err(&device
->device
, "Needed %d netvsc pkts to satisy "
932 "this xfer page...got %d",
933 vmxferpage_packet
->range_cnt
, xferpage_packet
->count
);
936 /* Each range represents 1 RNDIS pkt that contains 1 ethernet frame */
937 for (i
= 0; i
< (count
- 1); i
++) {
938 netvsc_packet
= (struct hv_netvsc_packet
*)listHead
.next
;
939 list_del(&netvsc_packet
->list_ent
);
941 /* Initialize the netvsc packet */
942 netvsc_packet
->xfer_page_pkt
= xferpage_packet
;
943 netvsc_packet
->completion
.recv
.recv_completion
=
944 netvsc_receive_completion
;
945 netvsc_packet
->completion
.recv
.recv_completion_ctx
=
947 netvsc_packet
->device
= device
;
948 /* Save this so that we can send it back */
949 netvsc_packet
->completion
.recv
.recv_completion_tid
=
950 vmxferpage_packet
->d
.trans_id
;
952 netvsc_packet
->total_data_buflen
=
953 vmxferpage_packet
->ranges
[i
].byte_count
;
954 netvsc_packet
->page_buf_cnt
= 1;
956 netvsc_packet
->page_buf
[0].len
=
957 vmxferpage_packet
->ranges
[i
].byte_count
;
959 start
= virt_to_phys((void *)((unsigned long)net_device
->
960 recv_buf
+ vmxferpage_packet
->ranges
[i
].byte_offset
));
962 netvsc_packet
->page_buf
[0].pfn
= start
>> PAGE_SHIFT
;
963 end_virtual
= (unsigned long)net_device
->recv_buf
964 + vmxferpage_packet
->ranges
[i
].byte_offset
965 + vmxferpage_packet
->ranges
[i
].byte_count
- 1;
966 end
= virt_to_phys((void *)end_virtual
);
968 /* Calculate the page relative offset */
969 netvsc_packet
->page_buf
[0].offset
=
970 vmxferpage_packet
->ranges
[i
].byte_offset
&
972 if ((end
>> PAGE_SHIFT
) != (start
>> PAGE_SHIFT
)) {
973 /* Handle frame across multiple pages: */
974 netvsc_packet
->page_buf
[0].len
=
975 (netvsc_packet
->page_buf
[0].pfn
<<
978 bytes_remain
= netvsc_packet
->total_data_buflen
-
979 netvsc_packet
->page_buf
[0].len
;
980 for (j
= 1; j
< NETVSC_PACKET_MAXPAGE
; j
++) {
981 netvsc_packet
->page_buf
[j
].offset
= 0;
982 if (bytes_remain
<= PAGE_SIZE
) {
983 netvsc_packet
->page_buf
[j
].len
=
987 netvsc_packet
->page_buf
[j
].len
=
989 bytes_remain
-= PAGE_SIZE
;
991 netvsc_packet
->page_buf
[j
].pfn
=
992 virt_to_phys((void *)(end_virtual
-
993 bytes_remain
)) >> PAGE_SHIFT
;
994 netvsc_packet
->page_buf_cnt
++;
995 if (bytes_remain
== 0)
1000 /* Pass it to the upper layer */
1001 netvsc_drv
->recv_cb(device
, netvsc_packet
);
1003 netvsc_receive_completion(netvsc_packet
->
1004 completion
.recv
.recv_completion_ctx
);
1007 put_net_device(device
);
1010 static void netvsc_channel_cb(void *context
)
1013 struct hv_device
*device
= context
;
1014 struct netvsc_device
*net_device
;
1017 unsigned char *packet
;
1018 struct vmpacket_descriptor
*desc
;
1019 unsigned char *buffer
;
1020 int bufferlen
= NETVSC_PACKET_SIZE
;
1022 packet
= kzalloc(NETVSC_PACKET_SIZE
* sizeof(unsigned char),
1028 net_device
= get_inbound_net_device(device
);
1030 dev_err(&device
->device
, "net device (%p) shutting down..."
1031 "ignoring inbound packets", net_device
);
1036 ret
= vmbus_recvpacket_raw(device
->channel
, buffer
, bufferlen
,
1037 &bytes_recvd
, &request_id
);
1039 if (bytes_recvd
> 0) {
1040 desc
= (struct vmpacket_descriptor
*)buffer
;
1041 switch (desc
->type
) {
1043 netvsc_send_completion(device
, desc
);
1046 case VM_PKT_DATA_USING_XFER_PAGES
:
1047 netvsc_receive(device
, desc
);
1051 dev_err(&device
->device
,
1052 "unhandled packet type %d, "
1053 "tid %llx len %d\n",
1054 desc
->type
, request_id
,
1060 if (bufferlen
> NETVSC_PACKET_SIZE
) {
1063 bufferlen
= NETVSC_PACKET_SIZE
;
1067 if (bufferlen
> NETVSC_PACKET_SIZE
) {
1070 bufferlen
= NETVSC_PACKET_SIZE
;
1075 } else if (ret
== -2) {
1076 /* Handle large packet */
1077 buffer
= kmalloc(bytes_recvd
, GFP_ATOMIC
);
1078 if (buffer
== NULL
) {
1079 /* Try again next time around */
1080 dev_err(&device
->device
,
1081 "unable to allocate buffer of size "
1082 "(%d)!!", bytes_recvd
);
1086 bufferlen
= bytes_recvd
;
1090 put_net_device(device
);
1097 * netvsc_device_add - Callback when the device belonging to this
1100 static int netvsc_device_add(struct hv_device
*device
, void *additional_info
)
1104 struct netvsc_device
*net_device
;
1105 struct hv_netvsc_packet
*packet
, *pos
;
1106 struct netvsc_driver
*net_driver
=
1107 drv_to_netvscdrv(device
->device
.driver
);
1109 net_device
= alloc_net_device(device
);
1115 /* Initialize the NetVSC channel extension */
1116 net_device
->recv_buf_size
= NETVSC_RECEIVE_BUFFER_SIZE
;
1117 spin_lock_init(&net_device
->recv_pkt_list_lock
);
1119 net_device
->send_buf_size
= NETVSC_SEND_BUFFER_SIZE
;
1121 INIT_LIST_HEAD(&net_device
->recv_pkt_list
);
1123 for (i
= 0; i
< NETVSC_RECEIVE_PACKETLIST_COUNT
; i
++) {
1124 packet
= kzalloc(sizeof(struct hv_netvsc_packet
) +
1125 (NETVSC_RECEIVE_SG_COUNT
*
1126 sizeof(struct hv_page_buffer
)), GFP_KERNEL
);
1130 list_add_tail(&packet
->list_ent
,
1131 &net_device
->recv_pkt_list
);
1133 init_waitqueue_head(&net_device
->channel_init_wait
);
1135 /* Open the channel */
1136 ret
= vmbus_open(device
->channel
, net_driver
->ring_buf_size
,
1137 net_driver
->ring_buf_size
, NULL
, 0,
1138 netvsc_channel_cb
, device
);
1141 dev_err(&device
->device
, "unable to open channel: %d", ret
);
1146 /* Channel is opened */
1147 pr_info("hv_netvsc channel opened successfully");
1149 /* Connect with the NetVsp */
1150 ret
= netvsc_connect_vsp(device
);
1152 dev_err(&device
->device
,
1153 "unable to connect to NetVSP - %d", ret
);
1161 /* Now, we can close the channel safely */
1162 vmbus_close(device
->channel
);
1167 list_for_each_entry_safe(packet
, pos
,
1168 &net_device
->recv_pkt_list
,
1170 list_del(&packet
->list_ent
);
1174 release_outbound_net_device(device
);
1175 release_inbound_net_device(device
);
1177 free_net_device(net_device
);
1184 * netvsc_initialize - Main entry point
1186 int netvsc_initialize(struct hv_driver
*drv
)
1188 struct netvsc_driver
*driver
=
1189 drv_to_netvscdrv(&drv
->driver
);
1191 drv
->name
= driver_name
;
1192 memcpy(&drv
->dev_type
, &netvsc_device_type
, sizeof(struct hv_guid
));
1194 /* Setup the dispatch table */
1195 driver
->base
.dev_add
= netvsc_device_add
;
1196 driver
->base
.dev_rm
= netvsc_device_remove
;
1197 driver
->base
.cleanup
= netvsc_cleanup
;
1199 driver
->send
= netvsc_send
;
1201 rndis_filter_init(driver
);