2 * Copyright (c) 2009, Microsoft Corporation.
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms and conditions of the GNU General Public License,
6 * version 2, as published by the Free Software Foundation.
8 * This program is distributed in the hope it will be useful, but WITHOUT
9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 * You should have received a copy of the GNU General Public License along with
14 * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
15 * Place - Suite 330, Boston, MA 02111-1307 USA.
18 * Haiyang Zhang <haiyangz@microsoft.com>
19 * Hank Janssen <hjanssen@microsoft.com>
21 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
23 #include <linux/kernel.h>
24 #include <linux/sched.h>
25 #include <linux/wait.h>
27 #include <linux/slab.h>
28 #include <linux/module.h>
31 #include "vmbus_private.h"
33 #define NUM_PAGES_SPANNED(addr, len) \
34 ((PAGE_ALIGN(addr + len) >> PAGE_SHIFT) - (addr >> PAGE_SHIFT))
36 /* Internal routines */
37 static int create_gpadl_header(
38 void *kbuffer
, /* must be phys and virt contiguous */
39 u32 size
, /* page-size multiple */
40 struct vmbus_channel_msginfo
**msginfo
,
42 static void dump_vmbus_channel(struct vmbus_channel
*channel
);
43 static void vmbus_setevent(struct vmbus_channel
*channel
);
47 static void DumpMonitorPage(struct hv_monitor_page
*MonitorPage
)
52 DPRINT_DBG(VMBUS
, "monitorPage - %p, trigger state - %d",
53 MonitorPage
, MonitorPage
->trigger_state
);
55 for (i
= 0; i
< 4; i
++)
56 DPRINT_DBG(VMBUS
, "trigger group (%d) - %llx", i
,
57 MonitorPage
->trigger_group
[i
].as_uint64
);
59 for (i
= 0; i
< 4; i
++) {
60 for (j
= 0; j
< 32; j
++) {
61 DPRINT_DBG(VMBUS
, "latency (%d)(%d) - %llx", i
, j
,
62 MonitorPage
->latency
[i
][j
]);
65 for (i
= 0; i
< 4; i
++) {
66 for (j
= 0; j
< 32; j
++) {
67 DPRINT_DBG(VMBUS
, "param-conn id (%d)(%d) - %d", i
, j
,
68 MonitorPage
->parameter
[i
][j
].connectionid
.asu32
);
69 DPRINT_DBG(VMBUS
, "param-flag (%d)(%d) - %d", i
, j
,
70 MonitorPage
->parameter
[i
][j
].flag_number
);
77 * vmbus_setevent- Trigger an event notification on the specified
80 static void vmbus_setevent(struct vmbus_channel
*channel
)
82 struct hv_monitor_page
*monitorpage
;
84 if (channel
->offermsg
.monitor_allocated
) {
85 /* Each u32 represents 32 channels */
86 sync_set_bit(channel
->offermsg
.child_relid
& 31,
87 (unsigned long *) vmbus_connection
.send_int_page
+
88 (channel
->offermsg
.child_relid
>> 5));
90 monitorpage
= vmbus_connection
.monitor_pages
;
91 monitorpage
++; /* Get the child to parent monitor page */
93 sync_set_bit(channel
->monitor_bit
,
94 (unsigned long *)&monitorpage
->trigger_group
95 [channel
->monitor_grp
].pending
);
98 vmbus_set_event(channel
->offermsg
.child_relid
);
103 static void VmbusChannelClearEvent(struct vmbus_channel
*channel
)
105 struct hv_monitor_page
*monitorPage
;
107 if (Channel
->offermsg
.monitor_allocated
) {
108 /* Each u32 represents 32 channels */
109 sync_clear_bit(Channel
->offermsg
.child_relid
& 31,
110 (unsigned long *)vmbus_connection
.send_int_page
+
111 (Channel
->offermsg
.child_relid
>> 5));
113 monitorPage
= (struct hv_monitor_page
*)
114 vmbus_connection
.monitor_pages
;
115 monitorPage
++; /* Get the child to parent monitor page */
117 sync_clear_bit(Channel
->monitor_bit
,
118 (unsigned long *)&monitorPage
->trigger_group
119 [Channel
->monitor_grp
].Pending
);
125 * vmbus_get_debug_info -Retrieve various channel debug info
127 void vmbus_get_debug_info(struct vmbus_channel
*channel
,
128 struct vmbus_channel_debug_info
*debuginfo
)
130 struct hv_monitor_page
*monitorpage
;
131 u8 monitor_group
= (u8
)channel
->offermsg
.monitorid
/ 32;
132 u8 monitor_offset
= (u8
)channel
->offermsg
.monitorid
% 32;
133 /* u32 monitorBit = 1 << monitorOffset; */
135 debuginfo
->relid
= channel
->offermsg
.child_relid
;
136 debuginfo
->state
= channel
->state
;
137 memcpy(&debuginfo
->interfacetype
,
138 &channel
->offermsg
.offer
.if_type
, sizeof(struct hv_guid
));
139 memcpy(&debuginfo
->interface_instance
,
140 &channel
->offermsg
.offer
.if_instance
,
141 sizeof(struct hv_guid
));
143 monitorpage
= (struct hv_monitor_page
*)vmbus_connection
.monitor_pages
;
145 debuginfo
->monitorid
= channel
->offermsg
.monitorid
;
147 debuginfo
->servermonitor_pending
=
148 monitorpage
->trigger_group
[monitor_group
].pending
;
149 debuginfo
->servermonitor_latency
=
150 monitorpage
->latency
[monitor_group
][monitor_offset
];
151 debuginfo
->servermonitor_connectionid
=
152 monitorpage
->parameter
[monitor_group
]
153 [monitor_offset
].connectionid
.u
.id
;
157 debuginfo
->clientmonitor_pending
=
158 monitorpage
->trigger_group
[monitor_group
].pending
;
159 debuginfo
->clientmonitor_latency
=
160 monitorpage
->latency
[monitor_group
][monitor_offset
];
161 debuginfo
->clientmonitor_connectionid
=
162 monitorpage
->parameter
[monitor_group
]
163 [monitor_offset
].connectionid
.u
.id
;
165 ringbuffer_get_debuginfo(&channel
->inbound
, &debuginfo
->inbound
);
166 ringbuffer_get_debuginfo(&channel
->outbound
, &debuginfo
->outbound
);
170 * vmbus_open - Open the specified channel.
172 int vmbus_open(struct vmbus_channel
*newchannel
, u32 send_ringbuffer_size
,
173 u32 recv_ringbuffer_size
, void *userdata
, u32 userdatalen
,
174 void (*onchannelcallback
)(void *context
), void *context
)
176 struct vmbus_channel_open_channel
*openMsg
;
177 struct vmbus_channel_msginfo
*openInfo
= NULL
;
182 /* Aligned to page size */
183 /* ASSERT(!(SendRingBufferSize & (PAGE_SIZE - 1))); */
184 /* ASSERT(!(RecvRingBufferSize & (PAGE_SIZE - 1))); */
186 newchannel
->onchannel_callback
= onchannelcallback
;
187 newchannel
->channel_callback_context
= context
;
189 /* Allocate the ring buffer */
190 out
= (void *)__get_free_pages(GFP_KERNEL
|__GFP_ZERO
,
191 get_order(send_ringbuffer_size
+ recv_ringbuffer_size
));
196 /* ASSERT(((unsigned long)out & (PAGE_SIZE-1)) == 0); */
198 in
= (void *)((unsigned long)out
+ send_ringbuffer_size
);
200 newchannel
->ringbuffer_pages
= out
;
201 newchannel
->ringbuffer_pagecount
= (send_ringbuffer_size
+
202 recv_ringbuffer_size
) >> PAGE_SHIFT
;
204 ret
= ringbuffer_init(&newchannel
->outbound
, out
, send_ringbuffer_size
);
210 ret
= ringbuffer_init(&newchannel
->inbound
, in
, recv_ringbuffer_size
);
217 /* Establish the gpadl for the ring buffer */
218 newchannel
->ringbuffer_gpadlhandle
= 0;
220 ret
= vmbus_establish_gpadl(newchannel
,
221 newchannel
->outbound
.ring_buffer
,
222 send_ringbuffer_size
+
223 recv_ringbuffer_size
,
224 &newchannel
->ringbuffer_gpadlhandle
);
231 /* Create and init the channel open message */
232 openInfo
= kmalloc(sizeof(*openInfo
) +
233 sizeof(struct vmbus_channel_open_channel
),
240 init_waitqueue_head(&openInfo
->waitevent
);
242 openMsg
= (struct vmbus_channel_open_channel
*)openInfo
->msg
;
243 openMsg
->header
.msgtype
= CHANNELMSG_OPENCHANNEL
;
244 openMsg
->openid
= newchannel
->offermsg
.child_relid
; /* FIXME */
245 openMsg
->child_relid
= newchannel
->offermsg
.child_relid
;
246 openMsg
->ringbuffer_gpadlhandle
= newchannel
->ringbuffer_gpadlhandle
;
247 openMsg
->downstream_ringbuffer_pageoffset
= send_ringbuffer_size
>>
249 openMsg
->server_contextarea_gpadlhandle
= 0; /* TODO */
251 if (userdatalen
> MAX_USER_DEFINED_BYTES
) {
257 memcpy(openMsg
->userdata
, userdata
, userdatalen
);
259 spin_lock_irqsave(&vmbus_connection
.channelmsg_lock
, flags
);
260 list_add_tail(&openInfo
->msglistentry
,
261 &vmbus_connection
.chn_msg_list
);
262 spin_unlock_irqrestore(&vmbus_connection
.channelmsg_lock
, flags
);
264 ret
= vmbus_post_msg(openMsg
,
265 sizeof(struct vmbus_channel_open_channel
));
270 openInfo
->wait_condition
= 0;
271 wait_event_timeout(openInfo
->waitevent
,
272 openInfo
->wait_condition
,
273 msecs_to_jiffies(1000));
274 if (openInfo
->wait_condition
== 0) {
280 if (openInfo
->response
.open_result
.status
)
281 err
= openInfo
->response
.open_result
.status
;
284 spin_lock_irqsave(&vmbus_connection
.channelmsg_lock
, flags
);
285 list_del(&openInfo
->msglistentry
);
286 spin_unlock_irqrestore(&vmbus_connection
.channelmsg_lock
, flags
);
292 ringbuffer_cleanup(&newchannel
->outbound
);
293 ringbuffer_cleanup(&newchannel
->inbound
);
294 free_pages((unsigned long)out
,
295 get_order(send_ringbuffer_size
+ recv_ringbuffer_size
));
299 EXPORT_SYMBOL_GPL(vmbus_open
);
302 * dump_gpadl_body - Dump the gpadl body message to the console for
303 * debugging purposes.
305 static void dump_gpadl_body(struct vmbus_channel_gpadl_body
*gpadl
, u32 len
)
310 pfncount
= (len
- sizeof(struct vmbus_channel_gpadl_body
)) /
313 DPRINT_DBG(VMBUS
, "gpadl body - len %d pfn count %d", len
, pfncount
);
315 for (i
= 0; i
< pfncount
; i
++)
316 DPRINT_DBG(VMBUS
, "gpadl body - %d) pfn %llu",
321 * dump_gpadl_header - Dump the gpadl header message to the console for
322 * debugging purposes.
324 static void dump_gpadl_header(struct vmbus_channel_gpadl_header
*gpadl
)
330 "gpadl header - relid %d, range count %d, range buflen %d",
331 gpadl
->child_relid
, gpadl
->rangecount
, gpadl
->range_buflen
);
332 for (i
= 0; i
< gpadl
->rangecount
; i
++) {
333 pagecount
= gpadl
->range
[i
].byte_count
>> PAGE_SHIFT
;
334 pagecount
= (pagecount
> 26) ? 26 : pagecount
;
336 DPRINT_DBG(VMBUS
, "gpadl range %d - len %d offset %d "
337 "page count %d", i
, gpadl
->range
[i
].byte_count
,
338 gpadl
->range
[i
].byte_offset
, pagecount
);
340 for (j
= 0; j
< pagecount
; j
++)
341 DPRINT_DBG(VMBUS
, "%d) pfn %llu", j
,
342 gpadl
->range
[i
].pfn_array
[j
]);
347 * create_gpadl_header - Creates a gpadl for the specified buffer
349 static int create_gpadl_header(void *kbuffer
, u32 size
,
350 struct vmbus_channel_msginfo
**msginfo
,
355 unsigned long long pfn
;
356 struct vmbus_channel_gpadl_header
*gpadl_header
;
357 struct vmbus_channel_gpadl_body
*gpadl_body
;
358 struct vmbus_channel_msginfo
*msgheader
;
359 struct vmbus_channel_msginfo
*msgbody
= NULL
;
362 int pfnsum
, pfncount
, pfnleft
, pfncurr
, pfnsize
;
364 /* ASSERT((kbuffer & (PAGE_SIZE-1)) == 0); */
365 /* ASSERT((Size & (PAGE_SIZE-1)) == 0); */
367 pagecount
= size
>> PAGE_SHIFT
;
368 pfn
= virt_to_phys(kbuffer
) >> PAGE_SHIFT
;
370 /* do we need a gpadl body msg */
371 pfnsize
= MAX_SIZE_CHANNEL_MESSAGE
-
372 sizeof(struct vmbus_channel_gpadl_header
) -
373 sizeof(struct gpa_range
);
374 pfncount
= pfnsize
/ sizeof(u64
);
376 if (pagecount
> pfncount
) {
377 /* we need a gpadl body */
378 /* fill in the header */
379 msgsize
= sizeof(struct vmbus_channel_msginfo
) +
380 sizeof(struct vmbus_channel_gpadl_header
) +
381 sizeof(struct gpa_range
) + pfncount
* sizeof(u64
);
382 msgheader
= kzalloc(msgsize
, GFP_KERNEL
);
386 INIT_LIST_HEAD(&msgheader
->submsglist
);
387 msgheader
->msgsize
= msgsize
;
389 gpadl_header
= (struct vmbus_channel_gpadl_header
*)
391 gpadl_header
->rangecount
= 1;
392 gpadl_header
->range_buflen
= sizeof(struct gpa_range
) +
393 pagecount
* sizeof(u64
);
394 gpadl_header
->range
[0].byte_offset
= 0;
395 gpadl_header
->range
[0].byte_count
= size
;
396 for (i
= 0; i
< pfncount
; i
++)
397 gpadl_header
->range
[0].pfn_array
[i
] = pfn
+i
;
398 *msginfo
= msgheader
;
402 pfnleft
= pagecount
- pfncount
;
404 /* how many pfns can we fit */
405 pfnsize
= MAX_SIZE_CHANNEL_MESSAGE
-
406 sizeof(struct vmbus_channel_gpadl_body
);
407 pfncount
= pfnsize
/ sizeof(u64
);
409 /* fill in the body */
411 if (pfnleft
> pfncount
)
416 msgsize
= sizeof(struct vmbus_channel_msginfo
) +
417 sizeof(struct vmbus_channel_gpadl_body
) +
418 pfncurr
* sizeof(u64
);
419 msgbody
= kzalloc(msgsize
, GFP_KERNEL
);
420 /* FIXME: we probably need to more if this fails */
423 msgbody
->msgsize
= msgsize
;
426 (struct vmbus_channel_gpadl_body
*)msgbody
->msg
;
430 * Gpadl is u32 and we are using a pointer which could
433 /* gpadl_body->Gpadl = kbuffer; */
434 for (i
= 0; i
< pfncurr
; i
++)
435 gpadl_body
->pfn
[i
] = pfn
+ pfnsum
+ i
;
437 /* add to msg header */
438 list_add_tail(&msgbody
->msglistentry
,
439 &msgheader
->submsglist
);
444 /* everything fits in a header */
445 msgsize
= sizeof(struct vmbus_channel_msginfo
) +
446 sizeof(struct vmbus_channel_gpadl_header
) +
447 sizeof(struct gpa_range
) + pagecount
* sizeof(u64
);
448 msgheader
= kzalloc(msgsize
, GFP_KERNEL
);
449 if (msgheader
== NULL
)
451 msgheader
->msgsize
= msgsize
;
453 gpadl_header
= (struct vmbus_channel_gpadl_header
*)
455 gpadl_header
->rangecount
= 1;
456 gpadl_header
->range_buflen
= sizeof(struct gpa_range
) +
457 pagecount
* sizeof(u64
);
458 gpadl_header
->range
[0].byte_offset
= 0;
459 gpadl_header
->range
[0].byte_count
= size
;
460 for (i
= 0; i
< pagecount
; i
++)
461 gpadl_header
->range
[0].pfn_array
[i
] = pfn
+i
;
463 *msginfo
= msgheader
;
475 * vmbus_establish_gpadl - Estabish a GPADL for the specified buffer
477 * @channel: a channel
478 * @kbuffer: from kmalloc
479 * @size: page-size multiple
480 * @gpadl_handle: some funky thing
482 int vmbus_establish_gpadl(struct vmbus_channel
*channel
, void *kbuffer
,
483 u32 size
, u32
*gpadl_handle
)
485 struct vmbus_channel_gpadl_header
*gpadlmsg
;
486 struct vmbus_channel_gpadl_body
*gpadl_body
;
487 /* struct vmbus_channel_gpadl_created *gpadlCreated; */
488 struct vmbus_channel_msginfo
*msginfo
= NULL
;
489 struct vmbus_channel_msginfo
*submsginfo
;
491 struct list_head
*curr
;
492 u32 next_gpadl_handle
;
496 next_gpadl_handle
= atomic_read(&vmbus_connection
.next_gpadl_handle
);
497 atomic_inc(&vmbus_connection
.next_gpadl_handle
);
499 ret
= create_gpadl_header(kbuffer
, size
, &msginfo
, &msgcount
);
503 init_waitqueue_head(&msginfo
->waitevent
);
505 gpadlmsg
= (struct vmbus_channel_gpadl_header
*)msginfo
->msg
;
506 gpadlmsg
->header
.msgtype
= CHANNELMSG_GPADL_HEADER
;
507 gpadlmsg
->child_relid
= channel
->offermsg
.child_relid
;
508 gpadlmsg
->gpadl
= next_gpadl_handle
;
510 dump_gpadl_header(gpadlmsg
);
512 spin_lock_irqsave(&vmbus_connection
.channelmsg_lock
, flags
);
513 list_add_tail(&msginfo
->msglistentry
,
514 &vmbus_connection
.chn_msg_list
);
516 spin_unlock_irqrestore(&vmbus_connection
.channelmsg_lock
, flags
);
518 msginfo
->wait_condition
= 0;
519 ret
= vmbus_post_msg(gpadlmsg
, msginfo
->msgsize
-
525 list_for_each(curr
, &msginfo
->submsglist
) {
527 /* FIXME: should this use list_entry() instead ? */
528 submsginfo
= (struct vmbus_channel_msginfo
*)curr
;
530 (struct vmbus_channel_gpadl_body
*)submsginfo
->msg
;
532 gpadl_body
->header
.msgtype
=
533 CHANNELMSG_GPADL_BODY
;
534 gpadl_body
->gpadl
= next_gpadl_handle
;
536 dump_gpadl_body(gpadl_body
, submsginfo
->msgsize
-
537 sizeof(*submsginfo
));
538 ret
= vmbus_post_msg(gpadl_body
,
539 submsginfo
->msgsize
-
540 sizeof(*submsginfo
));
546 wait_event_timeout(msginfo
->waitevent
,
547 msginfo
->wait_condition
,
548 msecs_to_jiffies(1000));
549 BUG_ON(msginfo
->wait_condition
== 0);
552 /* At this point, we received the gpadl created msg */
553 *gpadl_handle
= gpadlmsg
->gpadl
;
556 spin_lock_irqsave(&vmbus_connection
.channelmsg_lock
, flags
);
557 list_del(&msginfo
->msglistentry
);
558 spin_unlock_irqrestore(&vmbus_connection
.channelmsg_lock
, flags
);
563 EXPORT_SYMBOL_GPL(vmbus_establish_gpadl
);
566 * vmbus_teardown_gpadl -Teardown the specified GPADL handle
568 int vmbus_teardown_gpadl(struct vmbus_channel
*channel
, u32 gpadl_handle
)
570 struct vmbus_channel_gpadl_teardown
*msg
;
571 struct vmbus_channel_msginfo
*info
;
575 /* ASSERT(gpadl_handle != 0); */
577 info
= kmalloc(sizeof(*info
) +
578 sizeof(struct vmbus_channel_gpadl_teardown
), GFP_KERNEL
);
582 init_waitqueue_head(&info
->waitevent
);
584 msg
= (struct vmbus_channel_gpadl_teardown
*)info
->msg
;
586 msg
->header
.msgtype
= CHANNELMSG_GPADL_TEARDOWN
;
587 msg
->child_relid
= channel
->offermsg
.child_relid
;
588 msg
->gpadl
= gpadl_handle
;
590 spin_lock_irqsave(&vmbus_connection
.channelmsg_lock
, flags
);
591 list_add_tail(&info
->msglistentry
,
592 &vmbus_connection
.chn_msg_list
);
593 spin_unlock_irqrestore(&vmbus_connection
.channelmsg_lock
, flags
);
594 info
->wait_condition
= 0;
595 ret
= vmbus_post_msg(msg
,
596 sizeof(struct vmbus_channel_gpadl_teardown
));
599 wait_event_timeout(info
->waitevent
,
600 info
->wait_condition
, msecs_to_jiffies(1000));
601 BUG_ON(info
->wait_condition
== 0);
603 /* Received a torndown response */
604 spin_lock_irqsave(&vmbus_connection
.channelmsg_lock
, flags
);
605 list_del(&info
->msglistentry
);
606 spin_unlock_irqrestore(&vmbus_connection
.channelmsg_lock
, flags
);
611 EXPORT_SYMBOL_GPL(vmbus_teardown_gpadl
);
614 * vmbus_close - Close the specified channel
616 void vmbus_close(struct vmbus_channel
*channel
)
618 struct vmbus_channel_close_channel
*msg
;
619 struct vmbus_channel_msginfo
*info
;
623 /* Stop callback and cancel the timer asap */
624 channel
->onchannel_callback
= NULL
;
625 del_timer_sync(&channel
->poll_timer
);
627 /* Send a closing message */
628 info
= kmalloc(sizeof(*info
) +
629 sizeof(struct vmbus_channel_close_channel
), GFP_KERNEL
);
630 /* FIXME: can't do anything other than return here because the
631 * function is void */
636 msg
= (struct vmbus_channel_close_channel
*)info
->msg
;
637 msg
->header
.msgtype
= CHANNELMSG_CLOSECHANNEL
;
638 msg
->child_relid
= channel
->offermsg
.child_relid
;
640 ret
= vmbus_post_msg(msg
, sizeof(struct vmbus_channel_close_channel
));
643 /* Tear down the gpadl for the channel's ring buffer */
644 if (channel
->ringbuffer_gpadlhandle
)
645 vmbus_teardown_gpadl(channel
,
646 channel
->ringbuffer_gpadlhandle
);
648 /* TODO: Send a msg to release the childRelId */
650 /* Cleanup the ring buffers for this channel */
651 ringbuffer_cleanup(&channel
->outbound
);
652 ringbuffer_cleanup(&channel
->inbound
);
654 free_pages((unsigned long)channel
->ringbuffer_pages
,
655 get_order(channel
->ringbuffer_pagecount
* PAGE_SIZE
));
660 * If we are closing the channel during an error path in
661 * opening the channel, don't free the channel since the
662 * caller will free the channel
665 if (channel
->state
== CHANNEL_OPEN_STATE
) {
666 spin_lock_irqsave(&vmbus_connection
.channel_lock
, flags
);
667 list_del(&channel
->listentry
);
668 spin_unlock_irqrestore(&vmbus_connection
.channel_lock
, flags
);
670 free_channel(channel
);
673 EXPORT_SYMBOL_GPL(vmbus_close
);
676 * vmbus_sendpacket() - Send the specified buffer on the given channel
677 * @channel: Pointer to vmbus_channel structure.
678 * @buffer: Pointer to the buffer you want to receive the data into.
679 * @bufferlen: Maximum size of what the the buffer will hold
680 * @requestid: Identifier of the request
681 * @type: Type of packet that is being send e.g. negotiate, time
684 * Sends data in @buffer directly to hyper-v via the vmbus
685 * This will send the data unparsed to hyper-v.
687 * Mainly used by Hyper-V drivers.
689 int vmbus_sendpacket(struct vmbus_channel
*channel
, const void *buffer
,
690 u32 bufferlen
, u64 requestid
,
691 enum vmbus_packet_type type
, u32 flags
)
693 struct vmpacket_descriptor desc
;
694 u32 packetlen
= sizeof(struct vmpacket_descriptor
) + bufferlen
;
695 u32 packetlen_aligned
= ALIGN(packetlen
, sizeof(u64
));
696 struct scatterlist bufferlist
[3];
697 u64 aligned_data
= 0;
700 dump_vmbus_channel(channel
);
702 /* ASSERT((packetLenAligned - packetLen) < sizeof(u64)); */
704 /* Setup the descriptor */
705 desc
.type
= type
; /* VmbusPacketTypeDataInBand; */
706 desc
.flags
= flags
; /* VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED; */
707 /* in 8-bytes granularity */
708 desc
.offset8
= sizeof(struct vmpacket_descriptor
) >> 3;
709 desc
.len8
= (u16
)(packetlen_aligned
>> 3);
710 desc
.trans_id
= requestid
;
712 sg_init_table(bufferlist
, 3);
713 sg_set_buf(&bufferlist
[0], &desc
, sizeof(struct vmpacket_descriptor
));
714 sg_set_buf(&bufferlist
[1], buffer
, bufferlen
);
715 sg_set_buf(&bufferlist
[2], &aligned_data
,
716 packetlen_aligned
- packetlen
);
718 ret
= ringbuffer_write(&channel
->outbound
, bufferlist
, 3);
720 /* TODO: We should determine if this is optional */
721 if (ret
== 0 && !get_ringbuffer_interrupt_mask(&channel
->outbound
))
722 vmbus_setevent(channel
);
726 EXPORT_SYMBOL(vmbus_sendpacket
);
729 * vmbus_sendpacket_pagebuffer - Send a range of single-page buffer
730 * packets using a GPADL Direct packet type.
732 int vmbus_sendpacket_pagebuffer(struct vmbus_channel
*channel
,
733 struct hv_page_buffer pagebuffers
[],
734 u32 pagecount
, void *buffer
, u32 bufferlen
,
739 struct vmbus_channel_packet_page_buffer desc
;
742 u32 packetlen_aligned
;
743 struct scatterlist bufferlist
[3];
744 u64 aligned_data
= 0;
746 if (pagecount
> MAX_PAGE_BUFFER_COUNT
)
749 dump_vmbus_channel(channel
);
752 * Adjust the size down since vmbus_channel_packet_page_buffer is the
753 * largest size we support
755 descsize
= sizeof(struct vmbus_channel_packet_page_buffer
) -
756 ((MAX_PAGE_BUFFER_COUNT
- pagecount
) *
757 sizeof(struct hv_page_buffer
));
758 packetlen
= descsize
+ bufferlen
;
759 packetlen_aligned
= ALIGN(packetlen
, sizeof(u64
));
761 /* ASSERT((packetLenAligned - packetLen) < sizeof(u64)); */
763 /* Setup the descriptor */
764 desc
.type
= VM_PKT_DATA_USING_GPA_DIRECT
;
765 desc
.flags
= VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED
;
766 desc
.dataoffset8
= descsize
>> 3; /* in 8-bytes grandularity */
767 desc
.length8
= (u16
)(packetlen_aligned
>> 3);
768 desc
.transactionid
= requestid
;
769 desc
.rangecount
= pagecount
;
771 for (i
= 0; i
< pagecount
; i
++) {
772 desc
.range
[i
].len
= pagebuffers
[i
].len
;
773 desc
.range
[i
].offset
= pagebuffers
[i
].offset
;
774 desc
.range
[i
].pfn
= pagebuffers
[i
].pfn
;
777 sg_init_table(bufferlist
, 3);
778 sg_set_buf(&bufferlist
[0], &desc
, descsize
);
779 sg_set_buf(&bufferlist
[1], buffer
, bufferlen
);
780 sg_set_buf(&bufferlist
[2], &aligned_data
,
781 packetlen_aligned
- packetlen
);
783 ret
= ringbuffer_write(&channel
->outbound
, bufferlist
, 3);
785 /* TODO: We should determine if this is optional */
786 if (ret
== 0 && !get_ringbuffer_interrupt_mask(&channel
->outbound
))
787 vmbus_setevent(channel
);
791 EXPORT_SYMBOL_GPL(vmbus_sendpacket_pagebuffer
);
794 * vmbus_sendpacket_multipagebuffer - Send a multi-page buffer packet
795 * using a GPADL Direct packet type.
797 int vmbus_sendpacket_multipagebuffer(struct vmbus_channel
*channel
,
798 struct hv_multipage_buffer
*multi_pagebuffer
,
799 void *buffer
, u32 bufferlen
, u64 requestid
)
802 struct vmbus_channel_packet_multipage_buffer desc
;
805 u32 packetlen_aligned
;
806 struct scatterlist bufferlist
[3];
807 u64 aligned_data
= 0;
808 u32 pfncount
= NUM_PAGES_SPANNED(multi_pagebuffer
->offset
,
809 multi_pagebuffer
->len
);
811 dump_vmbus_channel(channel
);
813 if ((pfncount
< 0) || (pfncount
> MAX_MULTIPAGE_BUFFER_COUNT
))
817 * Adjust the size down since vmbus_channel_packet_multipage_buffer is
818 * the largest size we support
820 descsize
= sizeof(struct vmbus_channel_packet_multipage_buffer
) -
821 ((MAX_MULTIPAGE_BUFFER_COUNT
- pfncount
) *
823 packetlen
= descsize
+ bufferlen
;
824 packetlen_aligned
= ALIGN(packetlen
, sizeof(u64
));
826 /* ASSERT((packetLenAligned - packetLen) < sizeof(u64)); */
828 /* Setup the descriptor */
829 desc
.type
= VM_PKT_DATA_USING_GPA_DIRECT
;
830 desc
.flags
= VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED
;
831 desc
.dataoffset8
= descsize
>> 3; /* in 8-bytes grandularity */
832 desc
.length8
= (u16
)(packetlen_aligned
>> 3);
833 desc
.transactionid
= requestid
;
836 desc
.range
.len
= multi_pagebuffer
->len
;
837 desc
.range
.offset
= multi_pagebuffer
->offset
;
839 memcpy(desc
.range
.pfn_array
, multi_pagebuffer
->pfn_array
,
840 pfncount
* sizeof(u64
));
842 sg_init_table(bufferlist
, 3);
843 sg_set_buf(&bufferlist
[0], &desc
, descsize
);
844 sg_set_buf(&bufferlist
[1], buffer
, bufferlen
);
845 sg_set_buf(&bufferlist
[2], &aligned_data
,
846 packetlen_aligned
- packetlen
);
848 ret
= ringbuffer_write(&channel
->outbound
, bufferlist
, 3);
850 /* TODO: We should determine if this is optional */
851 if (ret
== 0 && !get_ringbuffer_interrupt_mask(&channel
->outbound
))
852 vmbus_setevent(channel
);
856 EXPORT_SYMBOL_GPL(vmbus_sendpacket_multipagebuffer
);
859 * vmbus_recvpacket() - Retrieve the user packet on the specified channel
860 * @channel: Pointer to vmbus_channel structure.
861 * @buffer: Pointer to the buffer you want to receive the data into.
862 * @bufferlen: Maximum size of what the the buffer will hold
863 * @buffer_actual_len: The actual size of the data after it was received
864 * @requestid: Identifier of the request
866 * Receives directly from the hyper-v vmbus and puts the data it received
867 * into Buffer. This will receive the data unparsed from hyper-v.
869 * Mainly used by Hyper-V drivers.
871 int vmbus_recvpacket(struct vmbus_channel
*channel
, void *buffer
,
872 u32 bufferlen
, u32
*buffer_actual_len
, u64
*requestid
)
874 struct vmpacket_descriptor desc
;
880 *buffer_actual_len
= 0;
883 spin_lock_irqsave(&channel
->inbound_lock
, flags
);
885 ret
= ringbuffer_peek(&channel
->inbound
, &desc
,
886 sizeof(struct vmpacket_descriptor
));
888 spin_unlock_irqrestore(&channel
->inbound_lock
, flags
);
892 /* VmbusChannelClearEvent(Channel); */
894 packetlen
= desc
.len8
<< 3;
895 userlen
= packetlen
- (desc
.offset8
<< 3);
896 /* ASSERT(userLen > 0); */
898 *buffer_actual_len
= userlen
;
900 if (userlen
> bufferlen
) {
901 spin_unlock_irqrestore(&channel
->inbound_lock
, flags
);
903 pr_err("Buffer too small - got %d needs %d\n",
908 *requestid
= desc
.trans_id
;
910 /* Copy over the packet to the user buffer */
911 ret
= ringbuffer_read(&channel
->inbound
, buffer
, userlen
,
912 (desc
.offset8
<< 3));
914 spin_unlock_irqrestore(&channel
->inbound_lock
, flags
);
918 EXPORT_SYMBOL(vmbus_recvpacket
);
921 * vmbus_recvpacket_raw - Retrieve the raw packet on the specified channel
923 int vmbus_recvpacket_raw(struct vmbus_channel
*channel
, void *buffer
,
924 u32 bufferlen
, u32
*buffer_actual_len
,
927 struct vmpacket_descriptor desc
;
933 *buffer_actual_len
= 0;
936 spin_lock_irqsave(&channel
->inbound_lock
, flags
);
938 ret
= ringbuffer_peek(&channel
->inbound
, &desc
,
939 sizeof(struct vmpacket_descriptor
));
941 spin_unlock_irqrestore(&channel
->inbound_lock
, flags
);
945 /* VmbusChannelClearEvent(Channel); */
947 packetlen
= desc
.len8
<< 3;
948 userlen
= packetlen
- (desc
.offset8
<< 3);
950 *buffer_actual_len
= packetlen
;
952 if (packetlen
> bufferlen
) {
953 spin_unlock_irqrestore(&channel
->inbound_lock
, flags
);
955 pr_err("Buffer too small - needed %d bytes but "
956 "got space for only %d bytes\n",
957 packetlen
, bufferlen
);
961 *requestid
= desc
.trans_id
;
963 /* Copy over the entire packet to the user buffer */
964 ret
= ringbuffer_read(&channel
->inbound
, buffer
, packetlen
, 0);
966 spin_unlock_irqrestore(&channel
->inbound_lock
, flags
);
969 EXPORT_SYMBOL_GPL(vmbus_recvpacket_raw
);
972 * vmbus_onchannel_event - Channel event callback
974 void vmbus_onchannel_event(struct vmbus_channel
*channel
)
976 dump_vmbus_channel(channel
);
977 /* ASSERT(Channel->OnChannelCallback); */
979 channel
->onchannel_callback(channel
->channel_callback_context
);
981 mod_timer(&channel
->poll_timer
, jiffies
+ usecs_to_jiffies(100));
985 * vmbus_ontimer - Timer event callback
987 void vmbus_ontimer(unsigned long data
)
989 struct vmbus_channel
*channel
= (struct vmbus_channel
*)data
;
991 if (channel
->onchannel_callback
)
992 channel
->onchannel_callback(channel
->channel_callback_context
);
996 * dump_vmbus_channel- Dump vmbus channel info to the console
998 static void dump_vmbus_channel(struct vmbus_channel
*channel
)
1000 DPRINT_DBG(VMBUS
, "Channel (%d)", channel
->offermsg
.child_relid
);
1001 dump_ring_info(&channel
->outbound
, "Outbound ");
1002 dump_ring_info(&channel
->inbound
, "Inbound ");