Merge remote-tracking branch 'cleancache/linux-next'
[linux-2.6/next.git] / drivers / staging / hv / netvsc.c
blob27c7449fa1cf0ef17ac604a8c5f82e438e1d8281
1 /*
2 * Copyright (c) 2009, Microsoft Corporation.
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms and conditions of the GNU General Public License,
6 * version 2, as published by the Free Software Foundation.
8 * This program is distributed in the hope it will be useful, but WITHOUT
9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
11 * more details.
13 * You should have received a copy of the GNU General Public License along with
14 * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
15 * Place - Suite 330, Boston, MA 02111-1307 USA.
17 * Authors:
18 * Haiyang Zhang <haiyangz@microsoft.com>
19 * Hank Janssen <hjanssen@microsoft.com>
21 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
23 #include <linux/kernel.h>
24 #include <linux/sched.h>
25 #include <linux/wait.h>
26 #include <linux/mm.h>
27 #include <linux/delay.h>
28 #include <linux/io.h>
29 #include <linux/slab.h>
30 #include "hv_api.h"
31 #include "logging.h"
32 #include "netvsc.h"
33 #include "rndis_filter.h"
34 #include "channel.h"
37 /* Globals */
38 static const char *driver_name = "netvsc";
40 /* {F8615163-DF3E-46c5-913F-F2D2F965ED0E} */
41 static const struct hv_guid netvsc_device_type = {
42 .data = {
43 0x63, 0x51, 0x61, 0xF8, 0x3E, 0xDF, 0xc5, 0x46,
44 0x91, 0x3F, 0xF2, 0xD2, 0xF9, 0x65, 0xED, 0x0E
49 static struct netvsc_device *alloc_net_device(struct hv_device *device)
51 struct netvsc_device *net_device;
53 net_device = kzalloc(sizeof(struct netvsc_device), GFP_KERNEL);
54 if (!net_device)
55 return NULL;
57 /* Set to 2 to allow both inbound and outbound traffic */
58 atomic_cmpxchg(&net_device->refcnt, 0, 2);
60 net_device->dev = device;
61 device->ext = net_device;
63 return net_device;
66 static void free_net_device(struct netvsc_device *device)
68 WARN_ON(atomic_read(&device->refcnt) != 0);
69 device->dev->ext = NULL;
70 kfree(device);
74 /* Get the net device object iff exists and its refcount > 1 */
75 static struct netvsc_device *get_outbound_net_device(struct hv_device *device)
77 struct netvsc_device *net_device;
79 net_device = device->ext;
80 if (net_device && atomic_read(&net_device->refcnt) > 1)
81 atomic_inc(&net_device->refcnt);
82 else
83 net_device = NULL;
85 return net_device;
88 /* Get the net device object iff exists and its refcount > 0 */
89 static struct netvsc_device *get_inbound_net_device(struct hv_device *device)
91 struct netvsc_device *net_device;
93 net_device = device->ext;
94 if (net_device && atomic_read(&net_device->refcnt))
95 atomic_inc(&net_device->refcnt);
96 else
97 net_device = NULL;
99 return net_device;
102 static void put_net_device(struct hv_device *device)
104 struct netvsc_device *net_device;
106 net_device = device->ext;
108 atomic_dec(&net_device->refcnt);
111 static struct netvsc_device *release_outbound_net_device(
112 struct hv_device *device)
114 struct netvsc_device *net_device;
116 net_device = device->ext;
117 if (net_device == NULL)
118 return NULL;
120 /* Busy wait until the ref drop to 2, then set it to 1 */
121 while (atomic_cmpxchg(&net_device->refcnt, 2, 1) != 2)
122 udelay(100);
124 return net_device;
127 static struct netvsc_device *release_inbound_net_device(
128 struct hv_device *device)
130 struct netvsc_device *net_device;
132 net_device = device->ext;
133 if (net_device == NULL)
134 return NULL;
136 /* Busy wait until the ref drop to 1, then set it to 0 */
137 while (atomic_cmpxchg(&net_device->refcnt, 1, 0) != 1)
138 udelay(100);
140 device->ext = NULL;
141 return net_device;
144 static int netvsc_destroy_recv_buf(struct netvsc_device *net_device)
146 struct nvsp_message *revoke_packet;
147 int ret = 0;
150 * If we got a section count, it means we received a
151 * SendReceiveBufferComplete msg (ie sent
152 * NvspMessage1TypeSendReceiveBuffer msg) therefore, we need
153 * to send a revoke msg here
155 if (net_device->recv_section_cnt) {
156 /* Send the revoke receive buffer */
157 revoke_packet = &net_device->revoke_packet;
158 memset(revoke_packet, 0, sizeof(struct nvsp_message));
160 revoke_packet->hdr.msg_type =
161 NVSP_MSG1_TYPE_REVOKE_RECV_BUF;
162 revoke_packet->msg.v1_msg.
163 revoke_recv_buf.id = NETVSC_RECEIVE_BUFFER_ID;
165 ret = vmbus_sendpacket(net_device->dev->channel,
166 revoke_packet,
167 sizeof(struct nvsp_message),
168 (unsigned long)revoke_packet,
169 VM_PKT_DATA_INBAND, 0);
171 * If we failed here, we might as well return and
172 * have a leak rather than continue and a bugchk
174 if (ret != 0) {
175 dev_err(&net_device->dev->device, "unable to send "
176 "revoke receive buffer to netvsp");
177 return -1;
181 /* Teardown the gpadl on the vsp end */
182 if (net_device->recv_buf_gpadl_handle) {
183 ret = vmbus_teardown_gpadl(net_device->dev->channel,
184 net_device->recv_buf_gpadl_handle);
186 /* If we failed here, we might as well return and have a leak
187 * rather than continue and a bugchk
189 if (ret != 0) {
190 dev_err(&net_device->dev->device,
191 "unable to teardown receive buffer's gpadl");
192 return -1;
194 net_device->recv_buf_gpadl_handle = 0;
197 if (net_device->recv_buf) {
198 /* Free up the receive buffer */
199 free_pages((unsigned long)net_device->recv_buf,
200 get_order(net_device->recv_buf_size));
201 net_device->recv_buf = NULL;
204 if (net_device->recv_section) {
205 net_device->recv_section_cnt = 0;
206 kfree(net_device->recv_section);
207 net_device->recv_section = NULL;
210 return ret;
213 static int netvsc_init_recv_buf(struct hv_device *device)
215 int ret = 0;
216 struct netvsc_device *net_device;
217 struct nvsp_message *init_packet;
219 net_device = get_outbound_net_device(device);
220 if (!net_device) {
221 dev_err(&device->device, "unable to get net device..."
222 "device being destroyed?");
223 return -1;
226 net_device->recv_buf =
227 (void *)__get_free_pages(GFP_KERNEL|__GFP_ZERO,
228 get_order(net_device->recv_buf_size));
229 if (!net_device->recv_buf) {
230 dev_err(&device->device, "unable to allocate receive "
231 "buffer of size %d", net_device->recv_buf_size);
232 ret = -1;
233 goto cleanup;
237 * Establish the gpadl handle for this buffer on this
238 * channel. Note: This call uses the vmbus connection rather
239 * than the channel to establish the gpadl handle.
241 ret = vmbus_establish_gpadl(device->channel, net_device->recv_buf,
242 net_device->recv_buf_size,
243 &net_device->recv_buf_gpadl_handle);
244 if (ret != 0) {
245 dev_err(&device->device,
246 "unable to establish receive buffer's gpadl");
247 goto cleanup;
251 /* Notify the NetVsp of the gpadl handle */
252 init_packet = &net_device->channel_init_pkt;
254 memset(init_packet, 0, sizeof(struct nvsp_message));
256 init_packet->hdr.msg_type = NVSP_MSG1_TYPE_SEND_RECV_BUF;
257 init_packet->msg.v1_msg.send_recv_buf.
258 gpadl_handle = net_device->recv_buf_gpadl_handle;
259 init_packet->msg.v1_msg.
260 send_recv_buf.id = NETVSC_RECEIVE_BUFFER_ID;
262 /* Send the gpadl notification request */
263 net_device->wait_condition = 0;
264 ret = vmbus_sendpacket(device->channel, init_packet,
265 sizeof(struct nvsp_message),
266 (unsigned long)init_packet,
267 VM_PKT_DATA_INBAND,
268 VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED);
269 if (ret != 0) {
270 dev_err(&device->device,
271 "unable to send receive buffer's gpadl to netvsp");
272 goto cleanup;
275 wait_event_timeout(net_device->channel_init_wait,
276 net_device->wait_condition,
277 msecs_to_jiffies(1000));
278 BUG_ON(net_device->wait_condition == 0);
281 /* Check the response */
282 if (init_packet->msg.v1_msg.
283 send_recv_buf_complete.status != NVSP_STAT_SUCCESS) {
284 dev_err(&device->device, "Unable to complete receive buffer "
285 "initialzation with NetVsp - status %d",
286 init_packet->msg.v1_msg.
287 send_recv_buf_complete.status);
288 ret = -1;
289 goto cleanup;
292 /* Parse the response */
294 net_device->recv_section_cnt = init_packet->msg.
295 v1_msg.send_recv_buf_complete.num_sections;
297 net_device->recv_section = kmalloc(net_device->recv_section_cnt
298 * sizeof(struct nvsp_1_receive_buffer_section), GFP_KERNEL);
299 if (net_device->recv_section == NULL) {
300 ret = -1;
301 goto cleanup;
304 memcpy(net_device->recv_section,
305 init_packet->msg.v1_msg.
306 send_recv_buf_complete.sections,
307 net_device->recv_section_cnt *
308 sizeof(struct nvsp_1_receive_buffer_section));
311 * For 1st release, there should only be 1 section that represents the
312 * entire receive buffer
314 if (net_device->recv_section_cnt != 1 ||
315 net_device->recv_section->offset != 0) {
316 ret = -1;
317 goto cleanup;
320 goto exit;
322 cleanup:
323 netvsc_destroy_recv_buf(net_device);
325 exit:
326 put_net_device(device);
327 return ret;
330 static int netvsc_destroy_send_buf(struct netvsc_device *net_device)
332 struct nvsp_message *revoke_packet;
333 int ret = 0;
336 * If we got a section count, it means we received a
337 * SendReceiveBufferComplete msg (ie sent
338 * NvspMessage1TypeSendReceiveBuffer msg) therefore, we need
339 * to send a revoke msg here
341 if (net_device->send_section_size) {
342 /* Send the revoke send buffer */
343 revoke_packet = &net_device->revoke_packet;
344 memset(revoke_packet, 0, sizeof(struct nvsp_message));
346 revoke_packet->hdr.msg_type =
347 NVSP_MSG1_TYPE_REVOKE_SEND_BUF;
348 revoke_packet->msg.v1_msg.
349 revoke_send_buf.id = NETVSC_SEND_BUFFER_ID;
351 ret = vmbus_sendpacket(net_device->dev->channel,
352 revoke_packet,
353 sizeof(struct nvsp_message),
354 (unsigned long)revoke_packet,
355 VM_PKT_DATA_INBAND, 0);
357 * If we failed here, we might as well return and have a leak
358 * rather than continue and a bugchk
360 if (ret != 0) {
361 dev_err(&net_device->dev->device, "unable to send "
362 "revoke send buffer to netvsp");
363 return -1;
367 /* Teardown the gpadl on the vsp end */
368 if (net_device->send_buf_gpadl_handle) {
369 ret = vmbus_teardown_gpadl(net_device->dev->channel,
370 net_device->send_buf_gpadl_handle);
373 * If we failed here, we might as well return and have a leak
374 * rather than continue and a bugchk
376 if (ret != 0) {
377 dev_err(&net_device->dev->device,
378 "unable to teardown send buffer's gpadl");
379 return -1;
381 net_device->send_buf_gpadl_handle = 0;
384 if (net_device->send_buf) {
385 /* Free up the receive buffer */
386 free_pages((unsigned long)net_device->send_buf,
387 get_order(net_device->send_buf_size));
388 net_device->send_buf = NULL;
391 return ret;
394 static int netvsc_init_send_buf(struct hv_device *device)
396 int ret = 0;
397 struct netvsc_device *net_device;
398 struct nvsp_message *init_packet;
400 net_device = get_outbound_net_device(device);
401 if (!net_device) {
402 dev_err(&device->device, "unable to get net device..."
403 "device being destroyed?");
404 return -1;
406 if (net_device->send_buf_size <= 0) {
407 ret = -EINVAL;
408 goto cleanup;
411 net_device->send_buf =
412 (void *)__get_free_pages(GFP_KERNEL|__GFP_ZERO,
413 get_order(net_device->send_buf_size));
414 if (!net_device->send_buf) {
415 dev_err(&device->device, "unable to allocate send "
416 "buffer of size %d", net_device->send_buf_size);
417 ret = -1;
418 goto cleanup;
422 * Establish the gpadl handle for this buffer on this
423 * channel. Note: This call uses the vmbus connection rather
424 * than the channel to establish the gpadl handle.
426 ret = vmbus_establish_gpadl(device->channel, net_device->send_buf,
427 net_device->send_buf_size,
428 &net_device->send_buf_gpadl_handle);
429 if (ret != 0) {
430 dev_err(&device->device, "unable to establish send buffer's gpadl");
431 goto cleanup;
434 /* Notify the NetVsp of the gpadl handle */
435 init_packet = &net_device->channel_init_pkt;
437 memset(init_packet, 0, sizeof(struct nvsp_message));
439 init_packet->hdr.msg_type = NVSP_MSG1_TYPE_SEND_SEND_BUF;
440 init_packet->msg.v1_msg.send_recv_buf.
441 gpadl_handle = net_device->send_buf_gpadl_handle;
442 init_packet->msg.v1_msg.send_recv_buf.id =
443 NETVSC_SEND_BUFFER_ID;
445 /* Send the gpadl notification request */
446 net_device->wait_condition = 0;
447 ret = vmbus_sendpacket(device->channel, init_packet,
448 sizeof(struct nvsp_message),
449 (unsigned long)init_packet,
450 VM_PKT_DATA_INBAND,
451 VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED);
452 if (ret != 0) {
453 dev_err(&device->device,
454 "unable to send receive buffer's gpadl to netvsp");
455 goto cleanup;
458 wait_event_timeout(net_device->channel_init_wait,
459 net_device->wait_condition,
460 msecs_to_jiffies(1000));
461 BUG_ON(net_device->wait_condition == 0);
463 /* Check the response */
464 if (init_packet->msg.v1_msg.
465 send_send_buf_complete.status != NVSP_STAT_SUCCESS) {
466 dev_err(&device->device, "Unable to complete send buffer "
467 "initialzation with NetVsp - status %d",
468 init_packet->msg.v1_msg.
469 send_send_buf_complete.status);
470 ret = -1;
471 goto cleanup;
474 net_device->send_section_size = init_packet->
475 msg.v1_msg.send_send_buf_complete.section_size;
477 goto exit;
479 cleanup:
480 netvsc_destroy_send_buf(net_device);
482 exit:
483 put_net_device(device);
484 return ret;
488 static int netvsc_connect_vsp(struct hv_device *device)
490 int ret;
491 struct netvsc_device *net_device;
492 struct nvsp_message *init_packet;
493 int ndis_version;
495 net_device = get_outbound_net_device(device);
496 if (!net_device) {
497 dev_err(&device->device, "unable to get net device..."
498 "device being destroyed?");
499 return -1;
502 init_packet = &net_device->channel_init_pkt;
504 memset(init_packet, 0, sizeof(struct nvsp_message));
505 init_packet->hdr.msg_type = NVSP_MSG_TYPE_INIT;
506 init_packet->msg.init_msg.init.min_protocol_ver =
507 NVSP_MIN_PROTOCOL_VERSION;
508 init_packet->msg.init_msg.init.max_protocol_ver =
509 NVSP_MAX_PROTOCOL_VERSION;
511 /* Send the init request */
512 net_device->wait_condition = 0;
513 ret = vmbus_sendpacket(device->channel, init_packet,
514 sizeof(struct nvsp_message),
515 (unsigned long)init_packet,
516 VM_PKT_DATA_INBAND,
517 VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED);
519 if (ret != 0)
520 goto cleanup;
522 wait_event_timeout(net_device->channel_init_wait,
523 net_device->wait_condition,
524 msecs_to_jiffies(1000));
525 if (net_device->wait_condition == 0) {
526 ret = -ETIMEDOUT;
527 goto cleanup;
530 if (init_packet->msg.init_msg.init_complete.status !=
531 NVSP_STAT_SUCCESS) {
532 ret = -1;
533 goto cleanup;
536 if (init_packet->msg.init_msg.init_complete.
537 negotiated_protocol_ver != NVSP_PROTOCOL_VERSION_1) {
538 ret = -1;
539 goto cleanup;
541 /* Send the ndis version */
542 memset(init_packet, 0, sizeof(struct nvsp_message));
544 ndis_version = 0x00050000;
546 init_packet->hdr.msg_type = NVSP_MSG1_TYPE_SEND_NDIS_VER;
547 init_packet->msg.v1_msg.
548 send_ndis_ver.ndis_major_ver =
549 (ndis_version & 0xFFFF0000) >> 16;
550 init_packet->msg.v1_msg.
551 send_ndis_ver.ndis_minor_ver =
552 ndis_version & 0xFFFF;
554 /* Send the init request */
555 ret = vmbus_sendpacket(device->channel, init_packet,
556 sizeof(struct nvsp_message),
557 (unsigned long)init_packet,
558 VM_PKT_DATA_INBAND, 0);
559 if (ret != 0) {
560 ret = -1;
561 goto cleanup;
564 /* Post the big receive buffer to NetVSP */
565 ret = netvsc_init_recv_buf(device);
566 if (ret == 0)
567 ret = netvsc_init_send_buf(device);
569 cleanup:
570 put_net_device(device);
571 return ret;
574 static void netvsc_disconnect_vsp(struct netvsc_device *net_device)
576 netvsc_destroy_recv_buf(net_device);
577 netvsc_destroy_send_buf(net_device);
581 * netvsc_device_remove - Callback when the root bus device is removed
583 static int netvsc_device_remove(struct hv_device *device)
585 struct netvsc_device *net_device;
586 struct hv_netvsc_packet *netvsc_packet, *pos;
588 /* Stop outbound traffic ie sends and receives completions */
589 net_device = release_outbound_net_device(device);
590 if (!net_device) {
591 dev_err(&device->device, "No net device present!!");
592 return -1;
595 /* Wait for all send completions */
596 while (atomic_read(&net_device->num_outstanding_sends)) {
597 dev_err(&device->device,
598 "waiting for %d requests to complete...",
599 atomic_read(&net_device->num_outstanding_sends));
600 udelay(100);
603 netvsc_disconnect_vsp(net_device);
605 /* Stop inbound traffic ie receives and sends completions */
606 net_device = release_inbound_net_device(device);
608 /* At this point, no one should be accessing netDevice except in here */
609 dev_notice(&device->device, "net device safe to remove");
611 /* Now, we can close the channel safely */
612 vmbus_close(device->channel);
614 /* Release all resources */
615 list_for_each_entry_safe(netvsc_packet, pos,
616 &net_device->recv_pkt_list, list_ent) {
617 list_del(&netvsc_packet->list_ent);
618 kfree(netvsc_packet);
621 free_net_device(net_device);
622 return 0;
626 * netvsc_cleanup - Perform any cleanup when the driver is removed
628 static void netvsc_cleanup(struct hv_driver *drv)
632 static void netvsc_send_completion(struct hv_device *device,
633 struct vmpacket_descriptor *packet)
635 struct netvsc_device *net_device;
636 struct nvsp_message *nvsp_packet;
637 struct hv_netvsc_packet *nvsc_packet;
639 net_device = get_inbound_net_device(device);
640 if (!net_device) {
641 dev_err(&device->device, "unable to get net device..."
642 "device being destroyed?");
643 return;
646 nvsp_packet = (struct nvsp_message *)((unsigned long)packet +
647 (packet->offset8 << 3));
649 if ((nvsp_packet->hdr.msg_type == NVSP_MSG_TYPE_INIT_COMPLETE) ||
650 (nvsp_packet->hdr.msg_type ==
651 NVSP_MSG1_TYPE_SEND_RECV_BUF_COMPLETE) ||
652 (nvsp_packet->hdr.msg_type ==
653 NVSP_MSG1_TYPE_SEND_SEND_BUF_COMPLETE)) {
654 /* Copy the response back */
655 memcpy(&net_device->channel_init_pkt, nvsp_packet,
656 sizeof(struct nvsp_message));
657 net_device->wait_condition = 1;
658 wake_up(&net_device->channel_init_wait);
659 } else if (nvsp_packet->hdr.msg_type ==
660 NVSP_MSG1_TYPE_SEND_RNDIS_PKT_COMPLETE) {
661 /* Get the send context */
662 nvsc_packet = (struct hv_netvsc_packet *)(unsigned long)
663 packet->trans_id;
665 /* Notify the layer above us */
666 nvsc_packet->completion.send.send_completion(
667 nvsc_packet->completion.send.send_completion_ctx);
669 atomic_dec(&net_device->num_outstanding_sends);
670 } else {
671 dev_err(&device->device, "Unknown send completion packet type- "
672 "%d received!!", nvsp_packet->hdr.msg_type);
675 put_net_device(device);
678 static int netvsc_send(struct hv_device *device,
679 struct hv_netvsc_packet *packet)
681 struct netvsc_device *net_device;
682 int ret = 0;
684 struct nvsp_message sendMessage;
686 net_device = get_outbound_net_device(device);
687 if (!net_device) {
688 dev_err(&device->device, "net device (%p) shutting down..."
689 "ignoring outbound packets", net_device);
690 return -2;
693 sendMessage.hdr.msg_type = NVSP_MSG1_TYPE_SEND_RNDIS_PKT;
694 if (packet->is_data_pkt) {
695 /* 0 is RMC_DATA; */
696 sendMessage.msg.v1_msg.send_rndis_pkt.channel_type = 0;
697 } else {
698 /* 1 is RMC_CONTROL; */
699 sendMessage.msg.v1_msg.send_rndis_pkt.channel_type = 1;
702 /* Not using send buffer section */
703 sendMessage.msg.v1_msg.send_rndis_pkt.send_buf_section_index =
704 0xFFFFFFFF;
705 sendMessage.msg.v1_msg.send_rndis_pkt.send_buf_section_size = 0;
707 if (packet->page_buf_cnt) {
708 ret = vmbus_sendpacket_pagebuffer(device->channel,
709 packet->page_buf,
710 packet->page_buf_cnt,
711 &sendMessage,
712 sizeof(struct nvsp_message),
713 (unsigned long)packet);
714 } else {
715 ret = vmbus_sendpacket(device->channel, &sendMessage,
716 sizeof(struct nvsp_message),
717 (unsigned long)packet,
718 VM_PKT_DATA_INBAND,
719 VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED);
723 if (ret != 0)
724 dev_err(&device->device, "Unable to send packet %p ret %d",
725 packet, ret);
727 atomic_inc(&net_device->num_outstanding_sends);
728 put_net_device(device);
729 return ret;
732 static void netvsc_send_recv_completion(struct hv_device *device,
733 u64 transaction_id)
735 struct nvsp_message recvcompMessage;
736 int retries = 0;
737 int ret;
739 recvcompMessage.hdr.msg_type =
740 NVSP_MSG1_TYPE_SEND_RNDIS_PKT_COMPLETE;
742 /* FIXME: Pass in the status */
743 recvcompMessage.msg.v1_msg.send_rndis_pkt_complete.status =
744 NVSP_STAT_SUCCESS;
746 retry_send_cmplt:
747 /* Send the completion */
748 ret = vmbus_sendpacket(device->channel, &recvcompMessage,
749 sizeof(struct nvsp_message), transaction_id,
750 VM_PKT_COMP, 0);
751 if (ret == 0) {
752 /* success */
753 /* no-op */
754 } else if (ret == -1) {
755 /* no more room...wait a bit and attempt to retry 3 times */
756 retries++;
757 dev_err(&device->device, "unable to send receive completion pkt"
758 " (tid %llx)...retrying %d", transaction_id, retries);
760 if (retries < 4) {
761 udelay(100);
762 goto retry_send_cmplt;
763 } else {
764 dev_err(&device->device, "unable to send receive "
765 "completion pkt (tid %llx)...give up retrying",
766 transaction_id);
768 } else {
769 dev_err(&device->device, "unable to send receive "
770 "completion pkt - %llx", transaction_id);
774 /* Send a receive completion packet to RNDIS device (ie NetVsp) */
775 static void netvsc_receive_completion(void *context)
777 struct hv_netvsc_packet *packet = context;
778 struct hv_device *device = (struct hv_device *)packet->device;
779 struct netvsc_device *net_device;
780 u64 transaction_id = 0;
781 bool fsend_receive_comp = false;
782 unsigned long flags;
785 * Even though it seems logical to do a GetOutboundNetDevice() here to
786 * send out receive completion, we are using GetInboundNetDevice()
787 * since we may have disable outbound traffic already.
789 net_device = get_inbound_net_device(device);
790 if (!net_device) {
791 dev_err(&device->device, "unable to get net device..."
792 "device being destroyed?");
793 return;
796 /* Overloading use of the lock. */
797 spin_lock_irqsave(&net_device->recv_pkt_list_lock, flags);
799 packet->xfer_page_pkt->count--;
802 * Last one in the line that represent 1 xfer page packet.
803 * Return the xfer page packet itself to the freelist
805 if (packet->xfer_page_pkt->count == 0) {
806 fsend_receive_comp = true;
807 transaction_id = packet->completion.recv.recv_completion_tid;
808 list_add_tail(&packet->xfer_page_pkt->list_ent,
809 &net_device->recv_pkt_list);
813 /* Put the packet back */
814 list_add_tail(&packet->list_ent, &net_device->recv_pkt_list);
815 spin_unlock_irqrestore(&net_device->recv_pkt_list_lock, flags);
817 /* Send a receive completion for the xfer page packet */
818 if (fsend_receive_comp)
819 netvsc_send_recv_completion(device, transaction_id);
821 put_net_device(device);
824 static void netvsc_receive(struct hv_device *device,
825 struct vmpacket_descriptor *packet)
827 struct netvsc_device *net_device;
828 struct vmtransfer_page_packet_header *vmxferpage_packet;
829 struct nvsp_message *nvsp_packet;
830 struct hv_netvsc_packet *netvsc_packet = NULL;
831 unsigned long start;
832 unsigned long end, end_virtual;
833 /* struct netvsc_driver *netvscDriver; */
834 struct xferpage_packet *xferpage_packet = NULL;
835 int i, j;
836 int count = 0, bytes_remain = 0;
837 unsigned long flags;
838 struct netvsc_driver *netvsc_drv =
839 drv_to_netvscdrv(device->device.driver);
841 LIST_HEAD(listHead);
843 net_device = get_inbound_net_device(device);
844 if (!net_device) {
845 dev_err(&device->device, "unable to get net device..."
846 "device being destroyed?");
847 return;
851 * All inbound packets other than send completion should be xfer page
852 * packet
854 if (packet->type != VM_PKT_DATA_USING_XFER_PAGES) {
855 dev_err(&device->device, "Unknown packet type received - %d",
856 packet->type);
857 put_net_device(device);
858 return;
861 nvsp_packet = (struct nvsp_message *)((unsigned long)packet +
862 (packet->offset8 << 3));
864 /* Make sure this is a valid nvsp packet */
865 if (nvsp_packet->hdr.msg_type !=
866 NVSP_MSG1_TYPE_SEND_RNDIS_PKT) {
867 dev_err(&device->device, "Unknown nvsp packet type received-"
868 " %d", nvsp_packet->hdr.msg_type);
869 put_net_device(device);
870 return;
873 vmxferpage_packet = (struct vmtransfer_page_packet_header *)packet;
875 if (vmxferpage_packet->xfer_pageset_id != NETVSC_RECEIVE_BUFFER_ID) {
876 dev_err(&device->device, "Invalid xfer page set id - "
877 "expecting %x got %x", NETVSC_RECEIVE_BUFFER_ID,
878 vmxferpage_packet->xfer_pageset_id);
879 put_net_device(device);
880 return;
884 * Grab free packets (range count + 1) to represent this xfer
885 * page packet. +1 to represent the xfer page packet itself.
886 * We grab it here so that we know exactly how many we can
887 * fulfil
889 spin_lock_irqsave(&net_device->recv_pkt_list_lock, flags);
890 while (!list_empty(&net_device->recv_pkt_list)) {
891 list_move_tail(net_device->recv_pkt_list.next, &listHead);
892 if (++count == vmxferpage_packet->range_cnt + 1)
893 break;
895 spin_unlock_irqrestore(&net_device->recv_pkt_list_lock, flags);
898 * We need at least 2 netvsc pkts (1 to represent the xfer
899 * page and at least 1 for the range) i.e. we can handled
900 * some of the xfer page packet ranges...
902 if (count < 2) {
903 dev_err(&device->device, "Got only %d netvsc pkt...needed "
904 "%d pkts. Dropping this xfer page packet completely!",
905 count, vmxferpage_packet->range_cnt + 1);
907 /* Return it to the freelist */
908 spin_lock_irqsave(&net_device->recv_pkt_list_lock, flags);
909 for (i = count; i != 0; i--) {
910 list_move_tail(listHead.next,
911 &net_device->recv_pkt_list);
913 spin_unlock_irqrestore(&net_device->recv_pkt_list_lock,
914 flags);
916 netvsc_send_recv_completion(device,
917 vmxferpage_packet->d.trans_id);
919 put_net_device(device);
920 return;
923 /* Remove the 1st packet to represent the xfer page packet itself */
924 xferpage_packet = (struct xferpage_packet *)listHead.next;
925 list_del(&xferpage_packet->list_ent);
927 /* This is how much we can satisfy */
928 xferpage_packet->count = count - 1;
930 if (xferpage_packet->count != vmxferpage_packet->range_cnt) {
931 dev_err(&device->device, "Needed %d netvsc pkts to satisy "
932 "this xfer page...got %d",
933 vmxferpage_packet->range_cnt, xferpage_packet->count);
936 /* Each range represents 1 RNDIS pkt that contains 1 ethernet frame */
937 for (i = 0; i < (count - 1); i++) {
938 netvsc_packet = (struct hv_netvsc_packet *)listHead.next;
939 list_del(&netvsc_packet->list_ent);
941 /* Initialize the netvsc packet */
942 netvsc_packet->xfer_page_pkt = xferpage_packet;
943 netvsc_packet->completion.recv.recv_completion =
944 netvsc_receive_completion;
945 netvsc_packet->completion.recv.recv_completion_ctx =
946 netvsc_packet;
947 netvsc_packet->device = device;
948 /* Save this so that we can send it back */
949 netvsc_packet->completion.recv.recv_completion_tid =
950 vmxferpage_packet->d.trans_id;
952 netvsc_packet->total_data_buflen =
953 vmxferpage_packet->ranges[i].byte_count;
954 netvsc_packet->page_buf_cnt = 1;
956 netvsc_packet->page_buf[0].len =
957 vmxferpage_packet->ranges[i].byte_count;
959 start = virt_to_phys((void *)((unsigned long)net_device->
960 recv_buf + vmxferpage_packet->ranges[i].byte_offset));
962 netvsc_packet->page_buf[0].pfn = start >> PAGE_SHIFT;
963 end_virtual = (unsigned long)net_device->recv_buf
964 + vmxferpage_packet->ranges[i].byte_offset
965 + vmxferpage_packet->ranges[i].byte_count - 1;
966 end = virt_to_phys((void *)end_virtual);
968 /* Calculate the page relative offset */
969 netvsc_packet->page_buf[0].offset =
970 vmxferpage_packet->ranges[i].byte_offset &
971 (PAGE_SIZE - 1);
972 if ((end >> PAGE_SHIFT) != (start >> PAGE_SHIFT)) {
973 /* Handle frame across multiple pages: */
974 netvsc_packet->page_buf[0].len =
975 (netvsc_packet->page_buf[0].pfn <<
976 PAGE_SHIFT)
977 + PAGE_SIZE - start;
978 bytes_remain = netvsc_packet->total_data_buflen -
979 netvsc_packet->page_buf[0].len;
980 for (j = 1; j < NETVSC_PACKET_MAXPAGE; j++) {
981 netvsc_packet->page_buf[j].offset = 0;
982 if (bytes_remain <= PAGE_SIZE) {
983 netvsc_packet->page_buf[j].len =
984 bytes_remain;
985 bytes_remain = 0;
986 } else {
987 netvsc_packet->page_buf[j].len =
988 PAGE_SIZE;
989 bytes_remain -= PAGE_SIZE;
991 netvsc_packet->page_buf[j].pfn =
992 virt_to_phys((void *)(end_virtual -
993 bytes_remain)) >> PAGE_SHIFT;
994 netvsc_packet->page_buf_cnt++;
995 if (bytes_remain == 0)
996 break;
1000 /* Pass it to the upper layer */
1001 netvsc_drv->recv_cb(device, netvsc_packet);
1003 netvsc_receive_completion(netvsc_packet->
1004 completion.recv.recv_completion_ctx);
1007 put_net_device(device);
1010 static void netvsc_channel_cb(void *context)
1012 int ret;
1013 struct hv_device *device = context;
1014 struct netvsc_device *net_device;
1015 u32 bytes_recvd;
1016 u64 request_id;
1017 unsigned char *packet;
1018 struct vmpacket_descriptor *desc;
1019 unsigned char *buffer;
1020 int bufferlen = NETVSC_PACKET_SIZE;
1022 packet = kzalloc(NETVSC_PACKET_SIZE * sizeof(unsigned char),
1023 GFP_ATOMIC);
1024 if (!packet)
1025 return;
1026 buffer = packet;
1028 net_device = get_inbound_net_device(device);
1029 if (!net_device) {
1030 dev_err(&device->device, "net device (%p) shutting down..."
1031 "ignoring inbound packets", net_device);
1032 goto out;
1035 do {
1036 ret = vmbus_recvpacket_raw(device->channel, buffer, bufferlen,
1037 &bytes_recvd, &request_id);
1038 if (ret == 0) {
1039 if (bytes_recvd > 0) {
1040 desc = (struct vmpacket_descriptor *)buffer;
1041 switch (desc->type) {
1042 case VM_PKT_COMP:
1043 netvsc_send_completion(device, desc);
1044 break;
1046 case VM_PKT_DATA_USING_XFER_PAGES:
1047 netvsc_receive(device, desc);
1048 break;
1050 default:
1051 dev_err(&device->device,
1052 "unhandled packet type %d, "
1053 "tid %llx len %d\n",
1054 desc->type, request_id,
1055 bytes_recvd);
1056 break;
1059 /* reset */
1060 if (bufferlen > NETVSC_PACKET_SIZE) {
1061 kfree(buffer);
1062 buffer = packet;
1063 bufferlen = NETVSC_PACKET_SIZE;
1065 } else {
1066 /* reset */
1067 if (bufferlen > NETVSC_PACKET_SIZE) {
1068 kfree(buffer);
1069 buffer = packet;
1070 bufferlen = NETVSC_PACKET_SIZE;
1073 break;
1075 } else if (ret == -2) {
1076 /* Handle large packet */
1077 buffer = kmalloc(bytes_recvd, GFP_ATOMIC);
1078 if (buffer == NULL) {
1079 /* Try again next time around */
1080 dev_err(&device->device,
1081 "unable to allocate buffer of size "
1082 "(%d)!!", bytes_recvd);
1083 break;
1086 bufferlen = bytes_recvd;
1088 } while (1);
1090 put_net_device(device);
1091 out:
1092 kfree(buffer);
1093 return;
1097 * netvsc_device_add - Callback when the device belonging to this
1098 * driver is added
1100 static int netvsc_device_add(struct hv_device *device, void *additional_info)
1102 int ret = 0;
1103 int i;
1104 struct netvsc_device *net_device;
1105 struct hv_netvsc_packet *packet, *pos;
1106 struct netvsc_driver *net_driver =
1107 drv_to_netvscdrv(device->device.driver);
1109 net_device = alloc_net_device(device);
1110 if (!net_device) {
1111 ret = -1;
1112 goto cleanup;
1115 /* Initialize the NetVSC channel extension */
1116 net_device->recv_buf_size = NETVSC_RECEIVE_BUFFER_SIZE;
1117 spin_lock_init(&net_device->recv_pkt_list_lock);
1119 net_device->send_buf_size = NETVSC_SEND_BUFFER_SIZE;
1121 INIT_LIST_HEAD(&net_device->recv_pkt_list);
1123 for (i = 0; i < NETVSC_RECEIVE_PACKETLIST_COUNT; i++) {
1124 packet = kzalloc(sizeof(struct hv_netvsc_packet) +
1125 (NETVSC_RECEIVE_SG_COUNT *
1126 sizeof(struct hv_page_buffer)), GFP_KERNEL);
1127 if (!packet)
1128 break;
1130 list_add_tail(&packet->list_ent,
1131 &net_device->recv_pkt_list);
1133 init_waitqueue_head(&net_device->channel_init_wait);
1135 /* Open the channel */
1136 ret = vmbus_open(device->channel, net_driver->ring_buf_size,
1137 net_driver->ring_buf_size, NULL, 0,
1138 netvsc_channel_cb, device);
1140 if (ret != 0) {
1141 dev_err(&device->device, "unable to open channel: %d", ret);
1142 ret = -1;
1143 goto cleanup;
1146 /* Channel is opened */
1147 pr_info("hv_netvsc channel opened successfully");
1149 /* Connect with the NetVsp */
1150 ret = netvsc_connect_vsp(device);
1151 if (ret != 0) {
1152 dev_err(&device->device,
1153 "unable to connect to NetVSP - %d", ret);
1154 ret = -1;
1155 goto close;
1158 return ret;
1160 close:
1161 /* Now, we can close the channel safely */
1162 vmbus_close(device->channel);
1164 cleanup:
1166 if (net_device) {
1167 list_for_each_entry_safe(packet, pos,
1168 &net_device->recv_pkt_list,
1169 list_ent) {
1170 list_del(&packet->list_ent);
1171 kfree(packet);
1174 release_outbound_net_device(device);
1175 release_inbound_net_device(device);
1177 free_net_device(net_device);
1180 return ret;
1184 * netvsc_initialize - Main entry point
1186 int netvsc_initialize(struct hv_driver *drv)
1188 struct netvsc_driver *driver =
1189 drv_to_netvscdrv(&drv->driver);
1191 drv->name = driver_name;
1192 memcpy(&drv->dev_type, &netvsc_device_type, sizeof(struct hv_guid));
1194 /* Setup the dispatch table */
1195 driver->base.dev_add = netvsc_device_add;
1196 driver->base.dev_rm = netvsc_device_remove;
1197 driver->base.cleanup = netvsc_cleanup;
1199 driver->send = netvsc_send;
1201 rndis_filter_init(driver);
1202 return 0;