2 * Copyright (c) 2009, Microsoft Corporation.
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms and conditions of the GNU General Public License,
6 * version 2, as published by the Free Software Foundation.
8 * This program is distributed in the hope it will be useful, but WITHOUT
9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 * You should have received a copy of the GNU General Public License along with
14 * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
15 * Place - Suite 330, Boston, MA 02111-1307 USA.
18 * Haiyang Zhang <haiyangz@microsoft.com>
19 * Hank Janssen <hjanssen@microsoft.com>
21 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
23 #include <linux/kernel.h>
24 #include <linux/interrupt.h>
25 #include <linux/sched.h>
26 #include <linux/wait.h>
28 #include <linux/slab.h>
29 #include <linux/list.h>
30 #include <linux/module.h>
31 #include <linux/completion.h>
32 #include <linux/delay.h>
33 #include <linux/hyperv.h>
35 #include "hyperv_vmbus.h"
37 static void init_vp_index(struct vmbus_channel
*channel
, u16 dev_type
);
39 static const struct vmbus_device vmbus_devs
[] = {
47 { .dev_type
= HV_SCSI
,
71 { .dev_type
= HV_PCIE
,
76 /* Synthetic Frame Buffer */
82 /* Synthetic Keyboard */
89 { .dev_type
= HV_MOUSE
,
103 .perf_device
= false,
109 .perf_device
= false,
113 { .dev_type
= HV_SHUTDOWN
,
115 .perf_device
= false,
119 { .dev_type
= HV_FCOPY
,
121 .perf_device
= false,
125 { .dev_type
= HV_BACKUP
,
127 .perf_device
= false,
133 .perf_device
= false,
137 { .dev_type
= HV_UNKNOWN
,
138 .perf_device
= false,
142 static const struct {
144 } vmbus_unsupported_devs
[] = {
150 static bool is_unsupported_vmbus_devs(const uuid_le
*guid
)
154 for (i
= 0; i
< ARRAY_SIZE(vmbus_unsupported_devs
); i
++)
155 if (!uuid_le_cmp(*guid
, vmbus_unsupported_devs
[i
].guid
))
160 static u16
hv_get_dev_type(const struct vmbus_channel
*channel
)
162 const uuid_le
*guid
= &channel
->offermsg
.offer
.if_type
;
165 if (is_hvsock_channel(channel
) || is_unsupported_vmbus_devs(guid
))
168 for (i
= HV_IDE
; i
< HV_UNKNOWN
; i
++) {
169 if (!uuid_le_cmp(*guid
, vmbus_devs
[i
].guid
))
172 pr_info("Unknown GUID: %pUl\n", guid
);
177 * vmbus_prep_negotiate_resp() - Create default response for Hyper-V Negotiate message
178 * @icmsghdrp: Pointer to msg header structure
179 * @icmsg_negotiate: Pointer to negotiate message structure
180 * @buf: Raw buffer channel data
182 * @icmsghdrp is of type &struct icmsg_hdr.
183 * @negop is of type &struct icmsg_negotiate.
184 * Set up and fill in default negotiate response message.
186 * The fw_version specifies the framework version that
187 * we can support and srv_version specifies the service
188 * version we can support.
190 * Mainly used by Hyper-V drivers.
192 bool vmbus_prep_negotiate_resp(struct icmsg_hdr
*icmsghdrp
,
193 struct icmsg_negotiate
*negop
, u8
*buf
,
194 int fw_version
, int srv_version
)
196 int icframe_major
, icframe_minor
;
197 int icmsg_major
, icmsg_minor
;
198 int fw_major
, fw_minor
;
199 int srv_major
, srv_minor
;
201 bool found_match
= false;
203 icmsghdrp
->icmsgsize
= 0x10;
204 fw_major
= (fw_version
>> 16);
205 fw_minor
= (fw_version
& 0xFFFF);
207 srv_major
= (srv_version
>> 16);
208 srv_minor
= (srv_version
& 0xFFFF);
210 negop
= (struct icmsg_negotiate
*)&buf
[
211 sizeof(struct vmbuspipe_hdr
) +
212 sizeof(struct icmsg_hdr
)];
214 icframe_major
= negop
->icframe_vercnt
;
217 icmsg_major
= negop
->icmsg_vercnt
;
221 * Select the framework version number we will
225 for (i
= 0; i
< negop
->icframe_vercnt
; i
++) {
226 if ((negop
->icversion_data
[i
].major
== fw_major
) &&
227 (negop
->icversion_data
[i
].minor
== fw_minor
)) {
228 icframe_major
= negop
->icversion_data
[i
].major
;
229 icframe_minor
= negop
->icversion_data
[i
].minor
;
239 for (i
= negop
->icframe_vercnt
;
240 (i
< negop
->icframe_vercnt
+ negop
->icmsg_vercnt
); i
++) {
241 if ((negop
->icversion_data
[i
].major
== srv_major
) &&
242 (negop
->icversion_data
[i
].minor
== srv_minor
)) {
243 icmsg_major
= negop
->icversion_data
[i
].major
;
244 icmsg_minor
= negop
->icversion_data
[i
].minor
;
250 * Respond with the framework and service
251 * version numbers we can support.
256 negop
->icframe_vercnt
= 0;
257 negop
->icmsg_vercnt
= 0;
259 negop
->icframe_vercnt
= 1;
260 negop
->icmsg_vercnt
= 1;
263 negop
->icversion_data
[0].major
= icframe_major
;
264 negop
->icversion_data
[0].minor
= icframe_minor
;
265 negop
->icversion_data
[1].major
= icmsg_major
;
266 negop
->icversion_data
[1].minor
= icmsg_minor
;
270 EXPORT_SYMBOL_GPL(vmbus_prep_negotiate_resp
);
273 * alloc_channel - Allocate and initialize a vmbus channel object
275 static struct vmbus_channel
*alloc_channel(void)
277 struct vmbus_channel
*channel
;
279 channel
= kzalloc(sizeof(*channel
), GFP_ATOMIC
);
283 channel
->acquire_ring_lock
= true;
284 spin_lock_init(&channel
->inbound_lock
);
285 spin_lock_init(&channel
->lock
);
287 INIT_LIST_HEAD(&channel
->sc_list
);
288 INIT_LIST_HEAD(&channel
->percpu_list
);
294 * free_channel - Release the resources used by the vmbus channel object
296 static void free_channel(struct vmbus_channel
*channel
)
301 static void percpu_channel_enq(void *arg
)
303 struct vmbus_channel
*channel
= arg
;
304 int cpu
= smp_processor_id();
306 list_add_tail(&channel
->percpu_list
, &hv_context
.percpu_list
[cpu
]);
309 static void percpu_channel_deq(void *arg
)
311 struct vmbus_channel
*channel
= arg
;
313 list_del(&channel
->percpu_list
);
317 static void vmbus_release_relid(u32 relid
)
319 struct vmbus_channel_relid_released msg
;
321 memset(&msg
, 0, sizeof(struct vmbus_channel_relid_released
));
322 msg
.child_relid
= relid
;
323 msg
.header
.msgtype
= CHANNELMSG_RELID_RELEASED
;
324 vmbus_post_msg(&msg
, sizeof(struct vmbus_channel_relid_released
));
327 void hv_event_tasklet_disable(struct vmbus_channel
*channel
)
329 struct tasklet_struct
*tasklet
;
330 tasklet
= hv_context
.event_dpc
[channel
->target_cpu
];
331 tasklet_disable(tasklet
);
334 void hv_event_tasklet_enable(struct vmbus_channel
*channel
)
336 struct tasklet_struct
*tasklet
;
337 tasklet
= hv_context
.event_dpc
[channel
->target_cpu
];
338 tasklet_enable(tasklet
);
340 /* In case there is any pending event */
341 tasklet_schedule(tasklet
);
344 void hv_process_channel_removal(struct vmbus_channel
*channel
, u32 relid
)
347 struct vmbus_channel
*primary_channel
;
349 BUG_ON(!channel
->rescind
);
350 BUG_ON(!mutex_is_locked(&vmbus_connection
.channel_mutex
));
352 hv_event_tasklet_disable(channel
);
353 if (channel
->target_cpu
!= get_cpu()) {
355 smp_call_function_single(channel
->target_cpu
,
356 percpu_channel_deq
, channel
, true);
358 percpu_channel_deq(channel
);
361 hv_event_tasklet_enable(channel
);
363 if (channel
->primary_channel
== NULL
) {
364 list_del(&channel
->listentry
);
366 primary_channel
= channel
;
368 primary_channel
= channel
->primary_channel
;
369 spin_lock_irqsave(&primary_channel
->lock
, flags
);
370 list_del(&channel
->sc_list
);
371 primary_channel
->num_sc
--;
372 spin_unlock_irqrestore(&primary_channel
->lock
, flags
);
376 * We need to free the bit for init_vp_index() to work in the case
377 * of sub-channel, when we reload drivers like hv_netvsc.
379 if (channel
->affinity_policy
== HV_LOCALIZED
)
380 cpumask_clear_cpu(channel
->target_cpu
,
381 &primary_channel
->alloced_cpus_in_node
);
383 vmbus_release_relid(relid
);
385 free_channel(channel
);
388 void vmbus_free_channels(void)
390 struct vmbus_channel
*channel
, *tmp
;
392 mutex_lock(&vmbus_connection
.channel_mutex
);
393 list_for_each_entry_safe(channel
, tmp
, &vmbus_connection
.chn_list
,
395 /* hv_process_channel_removal() needs this */
396 channel
->rescind
= true;
398 vmbus_device_unregister(channel
->device_obj
);
400 mutex_unlock(&vmbus_connection
.channel_mutex
);
404 * vmbus_process_offer - Process the offer by creating a channel/device
405 * associated with this offer
407 static void vmbus_process_offer(struct vmbus_channel
*newchannel
)
409 struct vmbus_channel
*channel
;
415 /* Make sure this is a new offer */
416 mutex_lock(&vmbus_connection
.channel_mutex
);
418 list_for_each_entry(channel
, &vmbus_connection
.chn_list
, listentry
) {
419 if (!uuid_le_cmp(channel
->offermsg
.offer
.if_type
,
420 newchannel
->offermsg
.offer
.if_type
) &&
421 !uuid_le_cmp(channel
->offermsg
.offer
.if_instance
,
422 newchannel
->offermsg
.offer
.if_instance
)) {
429 list_add_tail(&newchannel
->listentry
,
430 &vmbus_connection
.chn_list
);
432 mutex_unlock(&vmbus_connection
.channel_mutex
);
436 * Check to see if this is a sub-channel.
438 if (newchannel
->offermsg
.offer
.sub_channel_index
!= 0) {
440 * Process the sub-channel.
442 newchannel
->primary_channel
= channel
;
443 spin_lock_irqsave(&channel
->lock
, flags
);
444 list_add_tail(&newchannel
->sc_list
, &channel
->sc_list
);
446 spin_unlock_irqrestore(&channel
->lock
, flags
);
451 dev_type
= hv_get_dev_type(newchannel
);
453 init_vp_index(newchannel
, dev_type
);
455 hv_event_tasklet_disable(newchannel
);
456 if (newchannel
->target_cpu
!= get_cpu()) {
458 smp_call_function_single(newchannel
->target_cpu
,
462 percpu_channel_enq(newchannel
);
465 hv_event_tasklet_enable(newchannel
);
468 * This state is used to indicate a successful open
469 * so that when we do close the channel normally, we
470 * can cleanup properly
472 newchannel
->state
= CHANNEL_OPEN_STATE
;
475 if (channel
->sc_creation_callback
!= NULL
)
476 channel
->sc_creation_callback(newchannel
);
481 * Start the process of binding this offer to the driver
482 * We need to set the DeviceObject field before calling
483 * vmbus_child_dev_add()
485 newchannel
->device_obj
= vmbus_device_create(
486 &newchannel
->offermsg
.offer
.if_type
,
487 &newchannel
->offermsg
.offer
.if_instance
,
489 if (!newchannel
->device_obj
)
492 newchannel
->device_obj
->device_id
= dev_type
;
494 * Add the new device to the bus. This will kick off device-driver
495 * binding which eventually invokes the device driver's AddDevice()
498 mutex_lock(&vmbus_connection
.channel_mutex
);
499 ret
= vmbus_device_register(newchannel
->device_obj
);
500 mutex_unlock(&vmbus_connection
.channel_mutex
);
503 pr_err("unable to add child device object (relid %d)\n",
504 newchannel
->offermsg
.child_relid
);
505 kfree(newchannel
->device_obj
);
511 mutex_lock(&vmbus_connection
.channel_mutex
);
512 list_del(&newchannel
->listentry
);
513 mutex_unlock(&vmbus_connection
.channel_mutex
);
515 hv_event_tasklet_disable(newchannel
);
516 if (newchannel
->target_cpu
!= get_cpu()) {
518 smp_call_function_single(newchannel
->target_cpu
,
519 percpu_channel_deq
, newchannel
, true);
521 percpu_channel_deq(newchannel
);
524 hv_event_tasklet_enable(newchannel
);
526 vmbus_release_relid(newchannel
->offermsg
.child_relid
);
529 free_channel(newchannel
);
533 * We use this state to statically distribute the channel interrupt load.
535 static int next_numa_node_id
;
538 * Starting with Win8, we can statically distribute the incoming
539 * channel interrupt load by binding a channel to VCPU.
540 * We do this in a hierarchical fashion:
541 * First distribute the primary channels across available NUMA nodes
542 * and then distribute the subchannels amongst the CPUs in the NUMA
543 * node assigned to the primary channel.
545 * For pre-win8 hosts or non-performance critical channels we assign the
546 * first CPU in the first NUMA node.
548 static void init_vp_index(struct vmbus_channel
*channel
, u16 dev_type
)
551 bool perf_chn
= vmbus_devs
[dev_type
].perf_device
;
552 struct vmbus_channel
*primary
= channel
->primary_channel
;
554 struct cpumask available_mask
;
555 struct cpumask
*alloced_mask
;
557 if ((vmbus_proto_version
== VERSION_WS2008
) ||
558 (vmbus_proto_version
== VERSION_WIN7
) || (!perf_chn
)) {
560 * Prior to win8, all channel interrupts are
561 * delivered on cpu 0.
562 * Also if the channel is not a performance critical
563 * channel, bind it to cpu 0.
565 channel
->numa_node
= 0;
566 channel
->target_cpu
= 0;
567 channel
->target_vp
= hv_context
.vp_index
[0];
572 * Based on the channel affinity policy, we will assign the NUMA
576 if ((channel
->affinity_policy
== HV_BALANCED
) || (!primary
)) {
578 next_node
= next_numa_node_id
++;
579 if (next_node
== nr_node_ids
) {
580 next_node
= next_numa_node_id
= 0;
583 if (cpumask_empty(cpumask_of_node(next_node
)))
587 channel
->numa_node
= next_node
;
590 alloced_mask
= &hv_context
.hv_numa_map
[primary
->numa_node
];
592 if (cpumask_weight(alloced_mask
) ==
593 cpumask_weight(cpumask_of_node(primary
->numa_node
))) {
595 * We have cycled through all the CPUs in the node;
596 * reset the alloced map.
598 cpumask_clear(alloced_mask
);
601 cpumask_xor(&available_mask
, alloced_mask
,
602 cpumask_of_node(primary
->numa_node
));
606 if (primary
->affinity_policy
== HV_LOCALIZED
) {
608 * Normally Hyper-V host doesn't create more subchannels
609 * than there are VCPUs on the node but it is possible when not
610 * all present VCPUs on the node are initialized by guest.
611 * Clear the alloced_cpus_in_node to start over.
613 if (cpumask_equal(&primary
->alloced_cpus_in_node
,
614 cpumask_of_node(primary
->numa_node
)))
615 cpumask_clear(&primary
->alloced_cpus_in_node
);
619 cur_cpu
= cpumask_next(cur_cpu
, &available_mask
);
620 if (cur_cpu
>= nr_cpu_ids
) {
622 cpumask_copy(&available_mask
,
623 cpumask_of_node(primary
->numa_node
));
627 if (primary
->affinity_policy
== HV_LOCALIZED
) {
629 * NOTE: in the case of sub-channel, we clear the
630 * sub-channel related bit(s) in
631 * primary->alloced_cpus_in_node in
632 * hv_process_channel_removal(), so when we
633 * reload drivers like hv_netvsc in SMP guest, here
634 * we're able to re-allocate
635 * bit from primary->alloced_cpus_in_node.
637 if (!cpumask_test_cpu(cur_cpu
,
638 &primary
->alloced_cpus_in_node
)) {
639 cpumask_set_cpu(cur_cpu
,
640 &primary
->alloced_cpus_in_node
);
641 cpumask_set_cpu(cur_cpu
, alloced_mask
);
645 cpumask_set_cpu(cur_cpu
, alloced_mask
);
650 channel
->target_cpu
= cur_cpu
;
651 channel
->target_vp
= hv_context
.vp_index
[cur_cpu
];
654 static void vmbus_wait_for_unload(void)
658 struct hv_message
*msg
;
659 struct vmbus_channel_message_header
*hdr
;
663 * CHANNELMSG_UNLOAD_RESPONSE is always delivered to the CPU which was
664 * used for initial contact or to CPU0 depending on host version. When
665 * we're crashing on a different CPU let's hope that IRQ handler on
666 * the cpu which receives CHANNELMSG_UNLOAD_RESPONSE is still
667 * functional and vmbus_unload_response() will complete
668 * vmbus_connection.unload_event. If not, the last thing we can do is
669 * read message pages for all CPUs directly.
672 if (completion_done(&vmbus_connection
.unload_event
))
675 for_each_online_cpu(cpu
) {
676 page_addr
= hv_context
.synic_message_page
[cpu
];
677 msg
= (struct hv_message
*)page_addr
+
680 message_type
= READ_ONCE(msg
->header
.message_type
);
681 if (message_type
== HVMSG_NONE
)
684 hdr
= (struct vmbus_channel_message_header
*)
687 if (hdr
->msgtype
== CHANNELMSG_UNLOAD_RESPONSE
)
688 complete(&vmbus_connection
.unload_event
);
690 vmbus_signal_eom(msg
, message_type
);
697 * We're crashing and already got the UNLOAD_RESPONSE, cleanup all
698 * maybe-pending messages on all CPUs to be able to receive new
699 * messages after we reconnect.
701 for_each_online_cpu(cpu
) {
702 page_addr
= hv_context
.synic_message_page
[cpu
];
703 msg
= (struct hv_message
*)page_addr
+ VMBUS_MESSAGE_SINT
;
704 msg
->header
.message_type
= HVMSG_NONE
;
709 * vmbus_unload_response - Handler for the unload response.
711 static void vmbus_unload_response(struct vmbus_channel_message_header
*hdr
)
714 * This is a global event; just wakeup the waiting thread.
715 * Once we successfully unload, we can cleanup the monitor state.
717 complete(&vmbus_connection
.unload_event
);
720 void vmbus_initiate_unload(bool crash
)
722 struct vmbus_channel_message_header hdr
;
724 /* Pre-Win2012R2 hosts don't support reconnect */
725 if (vmbus_proto_version
< VERSION_WIN8_1
)
728 init_completion(&vmbus_connection
.unload_event
);
729 memset(&hdr
, 0, sizeof(struct vmbus_channel_message_header
));
730 hdr
.msgtype
= CHANNELMSG_UNLOAD
;
731 vmbus_post_msg(&hdr
, sizeof(struct vmbus_channel_message_header
));
734 * vmbus_initiate_unload() is also called on crash and the crash can be
735 * happening in an interrupt context, where scheduling is impossible.
738 wait_for_completion(&vmbus_connection
.unload_event
);
740 vmbus_wait_for_unload();
744 * vmbus_onoffer - Handler for channel offers from vmbus in parent partition.
747 static void vmbus_onoffer(struct vmbus_channel_message_header
*hdr
)
749 struct vmbus_channel_offer_channel
*offer
;
750 struct vmbus_channel
*newchannel
;
752 offer
= (struct vmbus_channel_offer_channel
*)hdr
;
754 /* Allocate the channel object and save this offer. */
755 newchannel
= alloc_channel();
757 pr_err("Unable to allocate channel object\n");
762 * By default we setup state to enable batched
763 * reading. A specific service can choose to
764 * disable this prior to opening the channel.
766 newchannel
->batched_reading
= true;
769 * Setup state for signalling the host.
771 newchannel
->sig_event
= (struct hv_input_signal_event
*)
772 (ALIGN((unsigned long)
773 &newchannel
->sig_buf
,
774 HV_HYPERCALL_PARAM_ALIGN
));
776 newchannel
->sig_event
->connectionid
.asu32
= 0;
777 newchannel
->sig_event
->connectionid
.u
.id
= VMBUS_EVENT_CONNECTION_ID
;
778 newchannel
->sig_event
->flag_number
= 0;
779 newchannel
->sig_event
->rsvdz
= 0;
781 if (vmbus_proto_version
!= VERSION_WS2008
) {
782 newchannel
->is_dedicated_interrupt
=
783 (offer
->is_dedicated_interrupt
!= 0);
784 newchannel
->sig_event
->connectionid
.u
.id
=
785 offer
->connection_id
;
788 memcpy(&newchannel
->offermsg
, offer
,
789 sizeof(struct vmbus_channel_offer_channel
));
790 newchannel
->monitor_grp
= (u8
)offer
->monitorid
/ 32;
791 newchannel
->monitor_bit
= (u8
)offer
->monitorid
% 32;
793 vmbus_process_offer(newchannel
);
797 * vmbus_onoffer_rescind - Rescind offer handler.
799 * We queue a work item to process this offer synchronously
801 static void vmbus_onoffer_rescind(struct vmbus_channel_message_header
*hdr
)
803 struct vmbus_channel_rescind_offer
*rescind
;
804 struct vmbus_channel
*channel
;
808 rescind
= (struct vmbus_channel_rescind_offer
*)hdr
;
810 mutex_lock(&vmbus_connection
.channel_mutex
);
811 channel
= relid2channel(rescind
->child_relid
);
813 if (channel
== NULL
) {
815 * This is very impossible, because in
816 * vmbus_process_offer(), we have already invoked
817 * vmbus_release_relid() on error.
822 spin_lock_irqsave(&channel
->lock
, flags
);
823 channel
->rescind
= true;
824 spin_unlock_irqrestore(&channel
->lock
, flags
);
826 if (channel
->device_obj
) {
827 if (channel
->chn_rescind_callback
) {
828 channel
->chn_rescind_callback(channel
);
832 * We will have to unregister this device from the
835 dev
= get_device(&channel
->device_obj
->device
);
837 vmbus_device_unregister(channel
->device_obj
);
841 hv_process_channel_removal(channel
,
842 channel
->offermsg
.child_relid
);
846 mutex_unlock(&vmbus_connection
.channel_mutex
);
849 void vmbus_hvsock_device_unregister(struct vmbus_channel
*channel
)
851 mutex_lock(&vmbus_connection
.channel_mutex
);
853 BUG_ON(!is_hvsock_channel(channel
));
855 channel
->rescind
= true;
856 vmbus_device_unregister(channel
->device_obj
);
858 mutex_unlock(&vmbus_connection
.channel_mutex
);
860 EXPORT_SYMBOL_GPL(vmbus_hvsock_device_unregister
);
864 * vmbus_onoffers_delivered -
865 * This is invoked when all offers have been delivered.
867 * Nothing to do here.
869 static void vmbus_onoffers_delivered(
870 struct vmbus_channel_message_header
*hdr
)
875 * vmbus_onopen_result - Open result handler.
877 * This is invoked when we received a response to our channel open request.
878 * Find the matching request, copy the response and signal the requesting
881 static void vmbus_onopen_result(struct vmbus_channel_message_header
*hdr
)
883 struct vmbus_channel_open_result
*result
;
884 struct vmbus_channel_msginfo
*msginfo
;
885 struct vmbus_channel_message_header
*requestheader
;
886 struct vmbus_channel_open_channel
*openmsg
;
889 result
= (struct vmbus_channel_open_result
*)hdr
;
892 * Find the open msg, copy the result and signal/unblock the wait event
894 spin_lock_irqsave(&vmbus_connection
.channelmsg_lock
, flags
);
896 list_for_each_entry(msginfo
, &vmbus_connection
.chn_msg_list
,
899 (struct vmbus_channel_message_header
*)msginfo
->msg
;
901 if (requestheader
->msgtype
== CHANNELMSG_OPENCHANNEL
) {
903 (struct vmbus_channel_open_channel
*)msginfo
->msg
;
904 if (openmsg
->child_relid
== result
->child_relid
&&
905 openmsg
->openid
== result
->openid
) {
906 memcpy(&msginfo
->response
.open_result
,
909 struct vmbus_channel_open_result
));
910 complete(&msginfo
->waitevent
);
915 spin_unlock_irqrestore(&vmbus_connection
.channelmsg_lock
, flags
);
919 * vmbus_ongpadl_created - GPADL created handler.
921 * This is invoked when we received a response to our gpadl create request.
922 * Find the matching request, copy the response and signal the requesting
925 static void vmbus_ongpadl_created(struct vmbus_channel_message_header
*hdr
)
927 struct vmbus_channel_gpadl_created
*gpadlcreated
;
928 struct vmbus_channel_msginfo
*msginfo
;
929 struct vmbus_channel_message_header
*requestheader
;
930 struct vmbus_channel_gpadl_header
*gpadlheader
;
933 gpadlcreated
= (struct vmbus_channel_gpadl_created
*)hdr
;
936 * Find the establish msg, copy the result and signal/unblock the wait
939 spin_lock_irqsave(&vmbus_connection
.channelmsg_lock
, flags
);
941 list_for_each_entry(msginfo
, &vmbus_connection
.chn_msg_list
,
944 (struct vmbus_channel_message_header
*)msginfo
->msg
;
946 if (requestheader
->msgtype
== CHANNELMSG_GPADL_HEADER
) {
948 (struct vmbus_channel_gpadl_header
*)requestheader
;
950 if ((gpadlcreated
->child_relid
==
951 gpadlheader
->child_relid
) &&
952 (gpadlcreated
->gpadl
== gpadlheader
->gpadl
)) {
953 memcpy(&msginfo
->response
.gpadl_created
,
956 struct vmbus_channel_gpadl_created
));
957 complete(&msginfo
->waitevent
);
962 spin_unlock_irqrestore(&vmbus_connection
.channelmsg_lock
, flags
);
966 * vmbus_ongpadl_torndown - GPADL torndown handler.
968 * This is invoked when we received a response to our gpadl teardown request.
969 * Find the matching request, copy the response and signal the requesting
972 static void vmbus_ongpadl_torndown(
973 struct vmbus_channel_message_header
*hdr
)
975 struct vmbus_channel_gpadl_torndown
*gpadl_torndown
;
976 struct vmbus_channel_msginfo
*msginfo
;
977 struct vmbus_channel_message_header
*requestheader
;
978 struct vmbus_channel_gpadl_teardown
*gpadl_teardown
;
981 gpadl_torndown
= (struct vmbus_channel_gpadl_torndown
*)hdr
;
984 * Find the open msg, copy the result and signal/unblock the wait event
986 spin_lock_irqsave(&vmbus_connection
.channelmsg_lock
, flags
);
988 list_for_each_entry(msginfo
, &vmbus_connection
.chn_msg_list
,
991 (struct vmbus_channel_message_header
*)msginfo
->msg
;
993 if (requestheader
->msgtype
== CHANNELMSG_GPADL_TEARDOWN
) {
995 (struct vmbus_channel_gpadl_teardown
*)requestheader
;
997 if (gpadl_torndown
->gpadl
== gpadl_teardown
->gpadl
) {
998 memcpy(&msginfo
->response
.gpadl_torndown
,
1001 struct vmbus_channel_gpadl_torndown
));
1002 complete(&msginfo
->waitevent
);
1007 spin_unlock_irqrestore(&vmbus_connection
.channelmsg_lock
, flags
);
1011 * vmbus_onversion_response - Version response handler
1013 * This is invoked when we received a response to our initiate contact request.
1014 * Find the matching request, copy the response and signal the requesting
1017 static void vmbus_onversion_response(
1018 struct vmbus_channel_message_header
*hdr
)
1020 struct vmbus_channel_msginfo
*msginfo
;
1021 struct vmbus_channel_message_header
*requestheader
;
1022 struct vmbus_channel_version_response
*version_response
;
1023 unsigned long flags
;
1025 version_response
= (struct vmbus_channel_version_response
*)hdr
;
1026 spin_lock_irqsave(&vmbus_connection
.channelmsg_lock
, flags
);
1028 list_for_each_entry(msginfo
, &vmbus_connection
.chn_msg_list
,
1031 (struct vmbus_channel_message_header
*)msginfo
->msg
;
1033 if (requestheader
->msgtype
==
1034 CHANNELMSG_INITIATE_CONTACT
) {
1035 memcpy(&msginfo
->response
.version_response
,
1037 sizeof(struct vmbus_channel_version_response
));
1038 complete(&msginfo
->waitevent
);
1041 spin_unlock_irqrestore(&vmbus_connection
.channelmsg_lock
, flags
);
1044 /* Channel message dispatch table */
1045 struct vmbus_channel_message_table_entry
1046 channel_message_table
[CHANNELMSG_COUNT
] = {
1047 {CHANNELMSG_INVALID
, 0, NULL
},
1048 {CHANNELMSG_OFFERCHANNEL
, 0, vmbus_onoffer
},
1049 {CHANNELMSG_RESCIND_CHANNELOFFER
, 0, vmbus_onoffer_rescind
},
1050 {CHANNELMSG_REQUESTOFFERS
, 0, NULL
},
1051 {CHANNELMSG_ALLOFFERS_DELIVERED
, 1, vmbus_onoffers_delivered
},
1052 {CHANNELMSG_OPENCHANNEL
, 0, NULL
},
1053 {CHANNELMSG_OPENCHANNEL_RESULT
, 1, vmbus_onopen_result
},
1054 {CHANNELMSG_CLOSECHANNEL
, 0, NULL
},
1055 {CHANNELMSG_GPADL_HEADER
, 0, NULL
},
1056 {CHANNELMSG_GPADL_BODY
, 0, NULL
},
1057 {CHANNELMSG_GPADL_CREATED
, 1, vmbus_ongpadl_created
},
1058 {CHANNELMSG_GPADL_TEARDOWN
, 0, NULL
},
1059 {CHANNELMSG_GPADL_TORNDOWN
, 1, vmbus_ongpadl_torndown
},
1060 {CHANNELMSG_RELID_RELEASED
, 0, NULL
},
1061 {CHANNELMSG_INITIATE_CONTACT
, 0, NULL
},
1062 {CHANNELMSG_VERSION_RESPONSE
, 1, vmbus_onversion_response
},
1063 {CHANNELMSG_UNLOAD
, 0, NULL
},
1064 {CHANNELMSG_UNLOAD_RESPONSE
, 1, vmbus_unload_response
},
1065 {CHANNELMSG_18
, 0, NULL
},
1066 {CHANNELMSG_19
, 0, NULL
},
1067 {CHANNELMSG_20
, 0, NULL
},
1068 {CHANNELMSG_TL_CONNECT_REQUEST
, 0, NULL
},
1072 * vmbus_onmessage - Handler for channel protocol messages.
1074 * This is invoked in the vmbus worker thread context.
1076 void vmbus_onmessage(void *context
)
1078 struct hv_message
*msg
= context
;
1079 struct vmbus_channel_message_header
*hdr
;
1082 hdr
= (struct vmbus_channel_message_header
*)msg
->u
.payload
;
1083 size
= msg
->header
.payload_size
;
1085 if (hdr
->msgtype
>= CHANNELMSG_COUNT
) {
1086 pr_err("Received invalid channel message type %d size %d\n",
1087 hdr
->msgtype
, size
);
1088 print_hex_dump_bytes("", DUMP_PREFIX_NONE
,
1089 (unsigned char *)msg
->u
.payload
, size
);
1093 if (channel_message_table
[hdr
->msgtype
].message_handler
)
1094 channel_message_table
[hdr
->msgtype
].message_handler(hdr
);
1096 pr_err("Unhandled channel message type %d\n", hdr
->msgtype
);
1100 * vmbus_request_offers - Send a request to get all our pending offers.
1102 int vmbus_request_offers(void)
1104 struct vmbus_channel_message_header
*msg
;
1105 struct vmbus_channel_msginfo
*msginfo
;
1108 msginfo
= kmalloc(sizeof(*msginfo
) +
1109 sizeof(struct vmbus_channel_message_header
),
1114 msg
= (struct vmbus_channel_message_header
*)msginfo
->msg
;
1116 msg
->msgtype
= CHANNELMSG_REQUESTOFFERS
;
1119 ret
= vmbus_post_msg(msg
,
1120 sizeof(struct vmbus_channel_message_header
));
1122 pr_err("Unable to request offers - %d\n", ret
);
1134 * Retrieve the (sub) channel on which to send an outgoing request.
1135 * When a primary channel has multiple sub-channels, we try to
1136 * distribute the load equally amongst all available channels.
1138 struct vmbus_channel
*vmbus_get_outgoing_channel(struct vmbus_channel
*primary
)
1140 struct list_head
*cur
, *tmp
;
1142 struct vmbus_channel
*cur_channel
;
1143 struct vmbus_channel
*outgoing_channel
= primary
;
1147 if (list_empty(&primary
->sc_list
))
1148 return outgoing_channel
;
1150 next_channel
= primary
->next_oc
++;
1152 if (next_channel
> (primary
->num_sc
)) {
1153 primary
->next_oc
= 0;
1154 return outgoing_channel
;
1157 cur_cpu
= hv_context
.vp_index
[get_cpu()];
1159 list_for_each_safe(cur
, tmp
, &primary
->sc_list
) {
1160 cur_channel
= list_entry(cur
, struct vmbus_channel
, sc_list
);
1161 if (cur_channel
->state
!= CHANNEL_OPENED_STATE
)
1164 if (cur_channel
->target_vp
== cur_cpu
)
1167 if (i
== next_channel
)
1173 return outgoing_channel
;
1175 EXPORT_SYMBOL_GPL(vmbus_get_outgoing_channel
);
1177 static void invoke_sc_cb(struct vmbus_channel
*primary_channel
)
1179 struct list_head
*cur
, *tmp
;
1180 struct vmbus_channel
*cur_channel
;
1182 if (primary_channel
->sc_creation_callback
== NULL
)
1185 list_for_each_safe(cur
, tmp
, &primary_channel
->sc_list
) {
1186 cur_channel
= list_entry(cur
, struct vmbus_channel
, sc_list
);
1188 primary_channel
->sc_creation_callback(cur_channel
);
1192 void vmbus_set_sc_create_callback(struct vmbus_channel
*primary_channel
,
1193 void (*sc_cr_cb
)(struct vmbus_channel
*new_sc
))
1195 primary_channel
->sc_creation_callback
= sc_cr_cb
;
1197 EXPORT_SYMBOL_GPL(vmbus_set_sc_create_callback
);
1199 bool vmbus_are_subchannels_present(struct vmbus_channel
*primary
)
1203 ret
= !list_empty(&primary
->sc_list
);
1207 * Invoke the callback on sub-channel creation.
1208 * This will present a uniform interface to the
1211 invoke_sc_cb(primary
);
1216 EXPORT_SYMBOL_GPL(vmbus_are_subchannels_present
);
1218 void vmbus_set_chn_rescind_callback(struct vmbus_channel
*channel
,
1219 void (*chn_rescind_cb
)(struct vmbus_channel
*))
1221 channel
->chn_rescind_callback
= chn_rescind_cb
;
1223 EXPORT_SYMBOL_GPL(vmbus_set_chn_rescind_callback
);