2 * Copyright (c) 2009, Microsoft Corporation.
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms and conditions of the GNU General Public License,
6 * version 2, as published by the Free Software Foundation.
8 * This program is distributed in the hope it will be useful, but WITHOUT
9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 * You should have received a copy of the GNU General Public License along with
14 * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
15 * Place - Suite 330, Boston, MA 02111-1307 USA.
18 * Haiyang Zhang <haiyangz@microsoft.com>
19 * Hank Janssen <hjanssen@microsoft.com>
21 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
23 #include <linux/kernel.h>
24 #include <linux/sched.h>
25 #include <linux/wait.h>
27 #include <linux/slab.h>
28 #include <linux/module.h>
29 #include <linux/hyperv.h>
30 #include <linux/uio.h>
31 #include <linux/interrupt.h>
33 #include "hyperv_vmbus.h"
35 #define NUM_PAGES_SPANNED(addr, len) \
36 ((PAGE_ALIGN(addr + len) >> PAGE_SHIFT) - (addr >> PAGE_SHIFT))
39 * vmbus_setevent- Trigger an event notification on the specified
42 static void vmbus_setevent(struct vmbus_channel
*channel
)
44 struct hv_monitor_page
*monitorpage
;
46 if (channel
->offermsg
.monitor_allocated
) {
47 /* Each u32 represents 32 channels */
48 sync_set_bit(channel
->offermsg
.child_relid
& 31,
49 (unsigned long *) vmbus_connection
.send_int_page
+
50 (channel
->offermsg
.child_relid
>> 5));
52 /* Get the child to parent monitor page */
53 monitorpage
= vmbus_connection
.monitor_pages
[1];
55 sync_set_bit(channel
->monitor_bit
,
56 (unsigned long *)&monitorpage
->trigger_group
57 [channel
->monitor_grp
].pending
);
60 vmbus_set_event(channel
);
65 * vmbus_open - Open the specified channel.
67 int vmbus_open(struct vmbus_channel
*newchannel
, u32 send_ringbuffer_size
,
68 u32 recv_ringbuffer_size
, void *userdata
, u32 userdatalen
,
69 void (*onchannelcallback
)(void *context
), void *context
)
71 struct vmbus_channel_open_channel
*open_msg
;
72 struct vmbus_channel_msginfo
*open_info
= NULL
;
79 spin_lock_irqsave(&newchannel
->lock
, flags
);
80 if (newchannel
->state
== CHANNEL_OPEN_STATE
) {
81 newchannel
->state
= CHANNEL_OPENING_STATE
;
83 spin_unlock_irqrestore(&newchannel
->lock
, flags
);
86 spin_unlock_irqrestore(&newchannel
->lock
, flags
);
88 newchannel
->onchannel_callback
= onchannelcallback
;
89 newchannel
->channel_callback_context
= context
;
91 /* Allocate the ring buffer */
92 page
= alloc_pages_node(cpu_to_node(newchannel
->target_cpu
),
93 GFP_KERNEL
|__GFP_ZERO
,
94 get_order(send_ringbuffer_size
+
95 recv_ringbuffer_size
));
98 out
= (void *)__get_free_pages(GFP_KERNEL
|__GFP_ZERO
,
99 get_order(send_ringbuffer_size
+
100 recv_ringbuffer_size
));
102 out
= (void *)page_address(page
);
109 in
= (void *)((unsigned long)out
+ send_ringbuffer_size
);
111 newchannel
->ringbuffer_pages
= out
;
112 newchannel
->ringbuffer_pagecount
= (send_ringbuffer_size
+
113 recv_ringbuffer_size
) >> PAGE_SHIFT
;
115 ret
= hv_ringbuffer_init(
116 &newchannel
->outbound
, out
, send_ringbuffer_size
);
123 ret
= hv_ringbuffer_init(
124 &newchannel
->inbound
, in
, recv_ringbuffer_size
);
131 /* Establish the gpadl for the ring buffer */
132 newchannel
->ringbuffer_gpadlhandle
= 0;
134 ret
= vmbus_establish_gpadl(newchannel
,
135 newchannel
->outbound
.ring_buffer
,
136 send_ringbuffer_size
+
137 recv_ringbuffer_size
,
138 &newchannel
->ringbuffer_gpadlhandle
);
145 /* Create and init the channel open message */
146 open_info
= kmalloc(sizeof(*open_info
) +
147 sizeof(struct vmbus_channel_open_channel
),
154 init_completion(&open_info
->waitevent
);
156 open_msg
= (struct vmbus_channel_open_channel
*)open_info
->msg
;
157 open_msg
->header
.msgtype
= CHANNELMSG_OPENCHANNEL
;
158 open_msg
->openid
= newchannel
->offermsg
.child_relid
;
159 open_msg
->child_relid
= newchannel
->offermsg
.child_relid
;
160 open_msg
->ringbuffer_gpadlhandle
= newchannel
->ringbuffer_gpadlhandle
;
161 open_msg
->downstream_ringbuffer_pageoffset
= send_ringbuffer_size
>>
163 open_msg
->target_vp
= newchannel
->target_vp
;
165 if (userdatalen
> MAX_USER_DEFINED_BYTES
) {
171 memcpy(open_msg
->userdata
, userdata
, userdatalen
);
173 spin_lock_irqsave(&vmbus_connection
.channelmsg_lock
, flags
);
174 list_add_tail(&open_info
->msglistentry
,
175 &vmbus_connection
.chn_msg_list
);
176 spin_unlock_irqrestore(&vmbus_connection
.channelmsg_lock
, flags
);
178 ret
= vmbus_post_msg(open_msg
,
179 sizeof(struct vmbus_channel_open_channel
));
186 t
= wait_for_completion_timeout(&open_info
->waitevent
, 5*HZ
);
192 spin_lock_irqsave(&vmbus_connection
.channelmsg_lock
, flags
);
193 list_del(&open_info
->msglistentry
);
194 spin_unlock_irqrestore(&vmbus_connection
.channelmsg_lock
, flags
);
196 if (open_info
->response
.open_result
.status
) {
201 newchannel
->state
= CHANNEL_OPENED_STATE
;
206 spin_lock_irqsave(&vmbus_connection
.channelmsg_lock
, flags
);
207 list_del(&open_info
->msglistentry
);
208 spin_unlock_irqrestore(&vmbus_connection
.channelmsg_lock
, flags
);
211 vmbus_teardown_gpadl(newchannel
, newchannel
->ringbuffer_gpadlhandle
);
214 free_pages((unsigned long)out
,
215 get_order(send_ringbuffer_size
+ recv_ringbuffer_size
));
217 newchannel
->state
= CHANNEL_OPEN_STATE
;
220 EXPORT_SYMBOL_GPL(vmbus_open
);
223 * create_gpadl_header - Creates a gpadl for the specified buffer
225 static int create_gpadl_header(void *kbuffer
, u32 size
,
226 struct vmbus_channel_msginfo
**msginfo
,
231 struct vmbus_channel_gpadl_header
*gpadl_header
;
232 struct vmbus_channel_gpadl_body
*gpadl_body
;
233 struct vmbus_channel_msginfo
*msgheader
;
234 struct vmbus_channel_msginfo
*msgbody
= NULL
;
237 int pfnsum
, pfncount
, pfnleft
, pfncurr
, pfnsize
;
239 pagecount
= size
>> PAGE_SHIFT
;
241 /* do we need a gpadl body msg */
242 pfnsize
= MAX_SIZE_CHANNEL_MESSAGE
-
243 sizeof(struct vmbus_channel_gpadl_header
) -
244 sizeof(struct gpa_range
);
245 pfncount
= pfnsize
/ sizeof(u64
);
247 if (pagecount
> pfncount
) {
248 /* we need a gpadl body */
249 /* fill in the header */
250 msgsize
= sizeof(struct vmbus_channel_msginfo
) +
251 sizeof(struct vmbus_channel_gpadl_header
) +
252 sizeof(struct gpa_range
) + pfncount
* sizeof(u64
);
253 msgheader
= kzalloc(msgsize
, GFP_KERNEL
);
257 INIT_LIST_HEAD(&msgheader
->submsglist
);
258 msgheader
->msgsize
= msgsize
;
260 gpadl_header
= (struct vmbus_channel_gpadl_header
*)
262 gpadl_header
->rangecount
= 1;
263 gpadl_header
->range_buflen
= sizeof(struct gpa_range
) +
264 pagecount
* sizeof(u64
);
265 gpadl_header
->range
[0].byte_offset
= 0;
266 gpadl_header
->range
[0].byte_count
= size
;
267 for (i
= 0; i
< pfncount
; i
++)
268 gpadl_header
->range
[0].pfn_array
[i
] = slow_virt_to_phys(
269 kbuffer
+ PAGE_SIZE
* i
) >> PAGE_SHIFT
;
270 *msginfo
= msgheader
;
274 pfnleft
= pagecount
- pfncount
;
276 /* how many pfns can we fit */
277 pfnsize
= MAX_SIZE_CHANNEL_MESSAGE
-
278 sizeof(struct vmbus_channel_gpadl_body
);
279 pfncount
= pfnsize
/ sizeof(u64
);
281 /* fill in the body */
283 if (pfnleft
> pfncount
)
288 msgsize
= sizeof(struct vmbus_channel_msginfo
) +
289 sizeof(struct vmbus_channel_gpadl_body
) +
290 pfncurr
* sizeof(u64
);
291 msgbody
= kzalloc(msgsize
, GFP_KERNEL
);
294 struct vmbus_channel_msginfo
*pos
= NULL
;
295 struct vmbus_channel_msginfo
*tmp
= NULL
;
297 * Free up all the allocated messages.
299 list_for_each_entry_safe(pos
, tmp
,
300 &msgheader
->submsglist
,
303 list_del(&pos
->msglistentry
);
310 msgbody
->msgsize
= msgsize
;
313 (struct vmbus_channel_gpadl_body
*)msgbody
->msg
;
316 * Gpadl is u32 and we are using a pointer which could
318 * This is governed by the guest/host protocol and
319 * so the hypervisor gurantees that this is ok.
321 for (i
= 0; i
< pfncurr
; i
++)
322 gpadl_body
->pfn
[i
] = slow_virt_to_phys(
323 kbuffer
+ PAGE_SIZE
* (pfnsum
+ i
)) >>
326 /* add to msg header */
327 list_add_tail(&msgbody
->msglistentry
,
328 &msgheader
->submsglist
);
333 /* everything fits in a header */
334 msgsize
= sizeof(struct vmbus_channel_msginfo
) +
335 sizeof(struct vmbus_channel_gpadl_header
) +
336 sizeof(struct gpa_range
) + pagecount
* sizeof(u64
);
337 msgheader
= kzalloc(msgsize
, GFP_KERNEL
);
338 if (msgheader
== NULL
)
340 msgheader
->msgsize
= msgsize
;
342 gpadl_header
= (struct vmbus_channel_gpadl_header
*)
344 gpadl_header
->rangecount
= 1;
345 gpadl_header
->range_buflen
= sizeof(struct gpa_range
) +
346 pagecount
* sizeof(u64
);
347 gpadl_header
->range
[0].byte_offset
= 0;
348 gpadl_header
->range
[0].byte_count
= size
;
349 for (i
= 0; i
< pagecount
; i
++)
350 gpadl_header
->range
[0].pfn_array
[i
] = slow_virt_to_phys(
351 kbuffer
+ PAGE_SIZE
* i
) >> PAGE_SHIFT
;
353 *msginfo
= msgheader
;
365 * vmbus_establish_gpadl - Estabish a GPADL for the specified buffer
367 * @channel: a channel
368 * @kbuffer: from kmalloc or vmalloc
369 * @size: page-size multiple
370 * @gpadl_handle: some funky thing
372 int vmbus_establish_gpadl(struct vmbus_channel
*channel
, void *kbuffer
,
373 u32 size
, u32
*gpadl_handle
)
375 struct vmbus_channel_gpadl_header
*gpadlmsg
;
376 struct vmbus_channel_gpadl_body
*gpadl_body
;
377 struct vmbus_channel_msginfo
*msginfo
= NULL
;
378 struct vmbus_channel_msginfo
*submsginfo
;
380 struct list_head
*curr
;
381 u32 next_gpadl_handle
;
386 (atomic_inc_return(&vmbus_connection
.next_gpadl_handle
) - 1);
388 ret
= create_gpadl_header(kbuffer
, size
, &msginfo
, &msgcount
);
392 init_completion(&msginfo
->waitevent
);
394 gpadlmsg
= (struct vmbus_channel_gpadl_header
*)msginfo
->msg
;
395 gpadlmsg
->header
.msgtype
= CHANNELMSG_GPADL_HEADER
;
396 gpadlmsg
->child_relid
= channel
->offermsg
.child_relid
;
397 gpadlmsg
->gpadl
= next_gpadl_handle
;
400 spin_lock_irqsave(&vmbus_connection
.channelmsg_lock
, flags
);
401 list_add_tail(&msginfo
->msglistentry
,
402 &vmbus_connection
.chn_msg_list
);
404 spin_unlock_irqrestore(&vmbus_connection
.channelmsg_lock
, flags
);
406 ret
= vmbus_post_msg(gpadlmsg
, msginfo
->msgsize
-
412 list_for_each(curr
, &msginfo
->submsglist
) {
414 submsginfo
= (struct vmbus_channel_msginfo
*)curr
;
416 (struct vmbus_channel_gpadl_body
*)submsginfo
->msg
;
418 gpadl_body
->header
.msgtype
=
419 CHANNELMSG_GPADL_BODY
;
420 gpadl_body
->gpadl
= next_gpadl_handle
;
422 ret
= vmbus_post_msg(gpadl_body
,
423 submsginfo
->msgsize
-
424 sizeof(*submsginfo
));
430 wait_for_completion(&msginfo
->waitevent
);
432 /* At this point, we received the gpadl created msg */
433 *gpadl_handle
= gpadlmsg
->gpadl
;
436 spin_lock_irqsave(&vmbus_connection
.channelmsg_lock
, flags
);
437 list_del(&msginfo
->msglistentry
);
438 spin_unlock_irqrestore(&vmbus_connection
.channelmsg_lock
, flags
);
443 EXPORT_SYMBOL_GPL(vmbus_establish_gpadl
);
446 * vmbus_teardown_gpadl -Teardown the specified GPADL handle
448 int vmbus_teardown_gpadl(struct vmbus_channel
*channel
, u32 gpadl_handle
)
450 struct vmbus_channel_gpadl_teardown
*msg
;
451 struct vmbus_channel_msginfo
*info
;
455 info
= kmalloc(sizeof(*info
) +
456 sizeof(struct vmbus_channel_gpadl_teardown
), GFP_KERNEL
);
460 init_completion(&info
->waitevent
);
462 msg
= (struct vmbus_channel_gpadl_teardown
*)info
->msg
;
464 msg
->header
.msgtype
= CHANNELMSG_GPADL_TEARDOWN
;
465 msg
->child_relid
= channel
->offermsg
.child_relid
;
466 msg
->gpadl
= gpadl_handle
;
468 spin_lock_irqsave(&vmbus_connection
.channelmsg_lock
, flags
);
469 list_add_tail(&info
->msglistentry
,
470 &vmbus_connection
.chn_msg_list
);
471 spin_unlock_irqrestore(&vmbus_connection
.channelmsg_lock
, flags
);
472 ret
= vmbus_post_msg(msg
,
473 sizeof(struct vmbus_channel_gpadl_teardown
));
478 wait_for_completion(&info
->waitevent
);
481 spin_lock_irqsave(&vmbus_connection
.channelmsg_lock
, flags
);
482 list_del(&info
->msglistentry
);
483 spin_unlock_irqrestore(&vmbus_connection
.channelmsg_lock
, flags
);
488 EXPORT_SYMBOL_GPL(vmbus_teardown_gpadl
);
490 static void reset_channel_cb(void *arg
)
492 struct vmbus_channel
*channel
= arg
;
494 channel
->onchannel_callback
= NULL
;
497 static int vmbus_close_internal(struct vmbus_channel
*channel
)
499 struct vmbus_channel_close_channel
*msg
;
500 struct tasklet_struct
*tasklet
;
504 * process_chn_event(), running in the tasklet, can race
505 * with vmbus_close_internal() in the case of SMP guest, e.g., when
506 * the former is accessing channel->inbound.ring_buffer, the latter
507 * could be freeing the ring_buffer pages.
509 * To resolve the race, we can serialize them by disabling the
510 * tasklet when the latter is running here.
512 tasklet
= hv_context
.event_dpc
[channel
->target_cpu
];
513 tasklet_disable(tasklet
);
516 * In case a device driver's probe() fails (e.g.,
517 * util_probe() -> vmbus_open() returns -ENOMEM) and the device is
518 * rescinded later (e.g., we dynamically disble an Integrated Service
519 * in Hyper-V Manager), the driver's remove() invokes vmbus_close():
520 * here we should skip most of the below cleanup work.
522 if (channel
->state
!= CHANNEL_OPENED_STATE
) {
527 channel
->state
= CHANNEL_OPEN_STATE
;
528 channel
->sc_creation_callback
= NULL
;
529 /* Stop callback and cancel the timer asap */
530 if (channel
->target_cpu
!= get_cpu()) {
532 smp_call_function_single(channel
->target_cpu
, reset_channel_cb
,
535 reset_channel_cb(channel
);
539 /* Send a closing message */
541 msg
= &channel
->close_msg
.msg
;
543 msg
->header
.msgtype
= CHANNELMSG_CLOSECHANNEL
;
544 msg
->child_relid
= channel
->offermsg
.child_relid
;
546 ret
= vmbus_post_msg(msg
, sizeof(struct vmbus_channel_close_channel
));
549 pr_err("Close failed: close post msg return is %d\n", ret
);
551 * If we failed to post the close msg,
552 * it is perhaps better to leak memory.
557 /* Tear down the gpadl for the channel's ring buffer */
558 if (channel
->ringbuffer_gpadlhandle
) {
559 ret
= vmbus_teardown_gpadl(channel
,
560 channel
->ringbuffer_gpadlhandle
);
562 pr_err("Close failed: teardown gpadl return %d\n", ret
);
564 * If we failed to teardown gpadl,
565 * it is perhaps better to leak memory.
571 /* Cleanup the ring buffers for this channel */
572 hv_ringbuffer_cleanup(&channel
->outbound
);
573 hv_ringbuffer_cleanup(&channel
->inbound
);
575 free_pages((unsigned long)channel
->ringbuffer_pages
,
576 get_order(channel
->ringbuffer_pagecount
* PAGE_SIZE
));
579 tasklet_enable(tasklet
);
585 * vmbus_close - Close the specified channel
587 void vmbus_close(struct vmbus_channel
*channel
)
589 struct list_head
*cur
, *tmp
;
590 struct vmbus_channel
*cur_channel
;
592 if (channel
->primary_channel
!= NULL
) {
594 * We will only close sub-channels when
595 * the primary is closed.
600 * Close all the sub-channels first and then close the
603 list_for_each_safe(cur
, tmp
, &channel
->sc_list
) {
604 cur_channel
= list_entry(cur
, struct vmbus_channel
, sc_list
);
605 if (cur_channel
->state
!= CHANNEL_OPENED_STATE
)
607 vmbus_close_internal(cur_channel
);
610 * Now close the primary.
612 vmbus_close_internal(channel
);
614 EXPORT_SYMBOL_GPL(vmbus_close
);
616 int vmbus_sendpacket_ctl(struct vmbus_channel
*channel
, void *buffer
,
617 u32 bufferlen
, u64 requestid
,
618 enum vmbus_packet_type type
, u32 flags
, bool kick_q
)
620 struct vmpacket_descriptor desc
;
621 u32 packetlen
= sizeof(struct vmpacket_descriptor
) + bufferlen
;
622 u32 packetlen_aligned
= ALIGN(packetlen
, sizeof(u64
));
623 struct kvec bufferlist
[3];
624 u64 aligned_data
= 0;
627 int num_vecs
= ((bufferlen
!= 0) ? 3 : 1);
630 /* Setup the descriptor */
631 desc
.type
= type
; /* VmbusPacketTypeDataInBand; */
632 desc
.flags
= flags
; /* VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED; */
633 /* in 8-bytes granularity */
634 desc
.offset8
= sizeof(struct vmpacket_descriptor
) >> 3;
635 desc
.len8
= (u16
)(packetlen_aligned
>> 3);
636 desc
.trans_id
= requestid
;
638 bufferlist
[0].iov_base
= &desc
;
639 bufferlist
[0].iov_len
= sizeof(struct vmpacket_descriptor
);
640 bufferlist
[1].iov_base
= buffer
;
641 bufferlist
[1].iov_len
= bufferlen
;
642 bufferlist
[2].iov_base
= &aligned_data
;
643 bufferlist
[2].iov_len
= (packetlen_aligned
- packetlen
);
645 ret
= hv_ringbuffer_write(&channel
->outbound
, bufferlist
, num_vecs
,
649 * Signalling the host is conditional on many factors:
650 * 1. The ring state changed from being empty to non-empty.
651 * This is tracked by the variable "signal".
652 * 2. The variable kick_q tracks if more data will be placed
653 * on the ring. We will not signal if more data is
656 * Based on the channel signal state, we will decide
657 * which signaling policy will be applied.
659 * If we cannot write to the ring-buffer; signal the host
660 * even if we may not have written anything. This is a rare
661 * enough condition that it should not matter.
664 if (channel
->signal_policy
)
669 if (((ret
== 0) && kick_q
&& signal
) || (ret
))
670 vmbus_setevent(channel
);
674 EXPORT_SYMBOL(vmbus_sendpacket_ctl
);
677 * vmbus_sendpacket() - Send the specified buffer on the given channel
678 * @channel: Pointer to vmbus_channel structure.
679 * @buffer: Pointer to the buffer you want to receive the data into.
680 * @bufferlen: Maximum size of what the the buffer will hold
681 * @requestid: Identifier of the request
682 * @type: Type of packet that is being send e.g. negotiate, time
685 * Sends data in @buffer directly to hyper-v via the vmbus
686 * This will send the data unparsed to hyper-v.
688 * Mainly used by Hyper-V drivers.
690 int vmbus_sendpacket(struct vmbus_channel
*channel
, void *buffer
,
691 u32 bufferlen
, u64 requestid
,
692 enum vmbus_packet_type type
, u32 flags
)
694 return vmbus_sendpacket_ctl(channel
, buffer
, bufferlen
, requestid
,
697 EXPORT_SYMBOL(vmbus_sendpacket
);
700 * vmbus_sendpacket_pagebuffer_ctl - Send a range of single-page buffer
701 * packets using a GPADL Direct packet type. This interface allows you
702 * to control notifying the host. This will be useful for sending
703 * batched data. Also the sender can control the send flags
706 int vmbus_sendpacket_pagebuffer_ctl(struct vmbus_channel
*channel
,
707 struct hv_page_buffer pagebuffers
[],
708 u32 pagecount
, void *buffer
, u32 bufferlen
,
715 struct vmbus_channel_packet_page_buffer desc
;
718 u32 packetlen_aligned
;
719 struct kvec bufferlist
[3];
720 u64 aligned_data
= 0;
723 if (pagecount
> MAX_PAGE_BUFFER_COUNT
)
728 * Adjust the size down since vmbus_channel_packet_page_buffer is the
729 * largest size we support
731 descsize
= sizeof(struct vmbus_channel_packet_page_buffer
) -
732 ((MAX_PAGE_BUFFER_COUNT
- pagecount
) *
733 sizeof(struct hv_page_buffer
));
734 packetlen
= descsize
+ bufferlen
;
735 packetlen_aligned
= ALIGN(packetlen
, sizeof(u64
));
737 /* Setup the descriptor */
738 desc
.type
= VM_PKT_DATA_USING_GPA_DIRECT
;
740 desc
.dataoffset8
= descsize
>> 3; /* in 8-bytes grandularity */
741 desc
.length8
= (u16
)(packetlen_aligned
>> 3);
742 desc
.transactionid
= requestid
;
743 desc
.rangecount
= pagecount
;
745 for (i
= 0; i
< pagecount
; i
++) {
746 desc
.range
[i
].len
= pagebuffers
[i
].len
;
747 desc
.range
[i
].offset
= pagebuffers
[i
].offset
;
748 desc
.range
[i
].pfn
= pagebuffers
[i
].pfn
;
751 bufferlist
[0].iov_base
= &desc
;
752 bufferlist
[0].iov_len
= descsize
;
753 bufferlist
[1].iov_base
= buffer
;
754 bufferlist
[1].iov_len
= bufferlen
;
755 bufferlist
[2].iov_base
= &aligned_data
;
756 bufferlist
[2].iov_len
= (packetlen_aligned
- packetlen
);
758 ret
= hv_ringbuffer_write(&channel
->outbound
, bufferlist
, 3, &signal
);
761 * Signalling the host is conditional on many factors:
762 * 1. The ring state changed from being empty to non-empty.
763 * This is tracked by the variable "signal".
764 * 2. The variable kick_q tracks if more data will be placed
765 * on the ring. We will not signal if more data is
768 * Based on the channel signal state, we will decide
769 * which signaling policy will be applied.
771 * If we cannot write to the ring-buffer; signal the host
772 * even if we may not have written anything. This is a rare
773 * enough condition that it should not matter.
776 if (channel
->signal_policy
)
781 if (((ret
== 0) && kick_q
&& signal
) || (ret
))
782 vmbus_setevent(channel
);
786 EXPORT_SYMBOL_GPL(vmbus_sendpacket_pagebuffer_ctl
);
789 * vmbus_sendpacket_pagebuffer - Send a range of single-page buffer
790 * packets using a GPADL Direct packet type.
792 int vmbus_sendpacket_pagebuffer(struct vmbus_channel
*channel
,
793 struct hv_page_buffer pagebuffers
[],
794 u32 pagecount
, void *buffer
, u32 bufferlen
,
797 u32 flags
= VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED
;
798 return vmbus_sendpacket_pagebuffer_ctl(channel
, pagebuffers
, pagecount
,
799 buffer
, bufferlen
, requestid
,
803 EXPORT_SYMBOL_GPL(vmbus_sendpacket_pagebuffer
);
806 * vmbus_sendpacket_multipagebuffer - Send a multi-page buffer packet
807 * using a GPADL Direct packet type.
808 * The buffer includes the vmbus descriptor.
810 int vmbus_sendpacket_mpb_desc(struct vmbus_channel
*channel
,
811 struct vmbus_packet_mpb_array
*desc
,
813 void *buffer
, u32 bufferlen
, u64 requestid
)
817 u32 packetlen_aligned
;
818 struct kvec bufferlist
[3];
819 u64 aligned_data
= 0;
822 packetlen
= desc_size
+ bufferlen
;
823 packetlen_aligned
= ALIGN(packetlen
, sizeof(u64
));
825 /* Setup the descriptor */
826 desc
->type
= VM_PKT_DATA_USING_GPA_DIRECT
;
827 desc
->flags
= VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED
;
828 desc
->dataoffset8
= desc_size
>> 3; /* in 8-bytes grandularity */
829 desc
->length8
= (u16
)(packetlen_aligned
>> 3);
830 desc
->transactionid
= requestid
;
831 desc
->rangecount
= 1;
833 bufferlist
[0].iov_base
= desc
;
834 bufferlist
[0].iov_len
= desc_size
;
835 bufferlist
[1].iov_base
= buffer
;
836 bufferlist
[1].iov_len
= bufferlen
;
837 bufferlist
[2].iov_base
= &aligned_data
;
838 bufferlist
[2].iov_len
= (packetlen_aligned
- packetlen
);
840 ret
= hv_ringbuffer_write(&channel
->outbound
, bufferlist
, 3, &signal
);
842 if (ret
== 0 && signal
)
843 vmbus_setevent(channel
);
847 EXPORT_SYMBOL_GPL(vmbus_sendpacket_mpb_desc
);
850 * vmbus_sendpacket_multipagebuffer - Send a multi-page buffer packet
851 * using a GPADL Direct packet type.
853 int vmbus_sendpacket_multipagebuffer(struct vmbus_channel
*channel
,
854 struct hv_multipage_buffer
*multi_pagebuffer
,
855 void *buffer
, u32 bufferlen
, u64 requestid
)
858 struct vmbus_channel_packet_multipage_buffer desc
;
861 u32 packetlen_aligned
;
862 struct kvec bufferlist
[3];
863 u64 aligned_data
= 0;
865 u32 pfncount
= NUM_PAGES_SPANNED(multi_pagebuffer
->offset
,
866 multi_pagebuffer
->len
);
868 if (pfncount
> MAX_MULTIPAGE_BUFFER_COUNT
)
872 * Adjust the size down since vmbus_channel_packet_multipage_buffer is
873 * the largest size we support
875 descsize
= sizeof(struct vmbus_channel_packet_multipage_buffer
) -
876 ((MAX_MULTIPAGE_BUFFER_COUNT
- pfncount
) *
878 packetlen
= descsize
+ bufferlen
;
879 packetlen_aligned
= ALIGN(packetlen
, sizeof(u64
));
882 /* Setup the descriptor */
883 desc
.type
= VM_PKT_DATA_USING_GPA_DIRECT
;
884 desc
.flags
= VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED
;
885 desc
.dataoffset8
= descsize
>> 3; /* in 8-bytes grandularity */
886 desc
.length8
= (u16
)(packetlen_aligned
>> 3);
887 desc
.transactionid
= requestid
;
890 desc
.range
.len
= multi_pagebuffer
->len
;
891 desc
.range
.offset
= multi_pagebuffer
->offset
;
893 memcpy(desc
.range
.pfn_array
, multi_pagebuffer
->pfn_array
,
894 pfncount
* sizeof(u64
));
896 bufferlist
[0].iov_base
= &desc
;
897 bufferlist
[0].iov_len
= descsize
;
898 bufferlist
[1].iov_base
= buffer
;
899 bufferlist
[1].iov_len
= bufferlen
;
900 bufferlist
[2].iov_base
= &aligned_data
;
901 bufferlist
[2].iov_len
= (packetlen_aligned
- packetlen
);
903 ret
= hv_ringbuffer_write(&channel
->outbound
, bufferlist
, 3, &signal
);
905 if (ret
== 0 && signal
)
906 vmbus_setevent(channel
);
910 EXPORT_SYMBOL_GPL(vmbus_sendpacket_multipagebuffer
);
913 * vmbus_recvpacket() - Retrieve the user packet on the specified channel
914 * @channel: Pointer to vmbus_channel structure.
915 * @buffer: Pointer to the buffer you want to receive the data into.
916 * @bufferlen: Maximum size of what the the buffer will hold
917 * @buffer_actual_len: The actual size of the data after it was received
918 * @requestid: Identifier of the request
920 * Receives directly from the hyper-v vmbus and puts the data it received
921 * into Buffer. This will receive the data unparsed from hyper-v.
923 * Mainly used by Hyper-V drivers.
926 __vmbus_recvpacket(struct vmbus_channel
*channel
, void *buffer
,
927 u32 bufferlen
, u32
*buffer_actual_len
, u64
*requestid
,
933 ret
= hv_ringbuffer_read(&channel
->inbound
, buffer
, bufferlen
,
934 buffer_actual_len
, requestid
, &signal
, raw
);
937 vmbus_setevent(channel
);
942 int vmbus_recvpacket(struct vmbus_channel
*channel
, void *buffer
,
943 u32 bufferlen
, u32
*buffer_actual_len
,
946 return __vmbus_recvpacket(channel
, buffer
, bufferlen
,
947 buffer_actual_len
, requestid
, false);
949 EXPORT_SYMBOL(vmbus_recvpacket
);
952 * vmbus_recvpacket_raw - Retrieve the raw packet on the specified channel
954 int vmbus_recvpacket_raw(struct vmbus_channel
*channel
, void *buffer
,
955 u32 bufferlen
, u32
*buffer_actual_len
,
958 return __vmbus_recvpacket(channel
, buffer
, bufferlen
,
959 buffer_actual_len
, requestid
, true);
961 EXPORT_SYMBOL_GPL(vmbus_recvpacket_raw
);