2 * Copyright (c) 2009, Microsoft Corporation.
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms and conditions of the GNU General Public License,
6 * version 2, as published by the Free Software Foundation.
8 * This program is distributed in the hope it will be useful, but WITHOUT
9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 * You should have received a copy of the GNU General Public License along with
14 * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
15 * Place - Suite 330, Boston, MA 02111-1307 USA.
18 * Haiyang Zhang <haiyangz@microsoft.com>
19 * Hank Janssen <hjanssen@microsoft.com>
20 * K. Y. Srinivasan <kys@microsoft.com>
23 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
25 #include <linux/init.h>
26 #include <linux/module.h>
27 #include <linux/device.h>
28 #include <linux/interrupt.h>
29 #include <linux/sysctl.h>
30 #include <linux/slab.h>
31 #include <linux/acpi.h>
32 #include <linux/completion.h>
33 #include <linux/hyperv.h>
34 #include <linux/kernel_stat.h>
35 #include <linux/clockchips.h>
36 #include <linux/cpu.h>
37 #include <asm/hyperv.h>
38 #include <asm/hypervisor.h>
39 #include <asm/mshyperv.h>
40 #include <linux/notifier.h>
41 #include <linux/ptrace.h>
42 #include "hyperv_vmbus.h"
44 static struct acpi_device
*hv_acpi_dev
;
46 static struct tasklet_struct msg_dpc
;
47 static struct completion probe_event
;
51 static int hyperv_panic_event(struct notifier_block
*nb
,
52 unsigned long event
, void *ptr
)
56 regs
= current_pt_regs();
58 wrmsrl(HV_X64_MSR_CRASH_P0
, regs
->ip
);
59 wrmsrl(HV_X64_MSR_CRASH_P1
, regs
->ax
);
60 wrmsrl(HV_X64_MSR_CRASH_P2
, regs
->bx
);
61 wrmsrl(HV_X64_MSR_CRASH_P3
, regs
->cx
);
62 wrmsrl(HV_X64_MSR_CRASH_P4
, regs
->dx
);
65 * Let Hyper-V know there is crash data available
67 wrmsrl(HV_X64_MSR_CRASH_CTL
, HV_CRASH_CTL_CRASH_NOTIFY
);
71 static struct notifier_block hyperv_panic_block
= {
72 .notifier_call
= hyperv_panic_event
,
75 struct resource hyperv_mmio
= {
76 .name
= "hyperv mmio",
77 .flags
= IORESOURCE_MEM
,
79 EXPORT_SYMBOL_GPL(hyperv_mmio
);
81 static int vmbus_exists(void)
83 if (hv_acpi_dev
== NULL
)
89 #define VMBUS_ALIAS_LEN ((sizeof((struct hv_vmbus_device_id *)0)->guid) * 2)
90 static void print_alias_name(struct hv_device
*hv_dev
, char *alias_name
)
93 for (i
= 0; i
< VMBUS_ALIAS_LEN
; i
+= 2)
94 sprintf(&alias_name
[i
], "%02x", hv_dev
->dev_type
.b
[i
/2]);
97 static u8
channel_monitor_group(struct vmbus_channel
*channel
)
99 return (u8
)channel
->offermsg
.monitorid
/ 32;
102 static u8
channel_monitor_offset(struct vmbus_channel
*channel
)
104 return (u8
)channel
->offermsg
.monitorid
% 32;
107 static u32
channel_pending(struct vmbus_channel
*channel
,
108 struct hv_monitor_page
*monitor_page
)
110 u8 monitor_group
= channel_monitor_group(channel
);
111 return monitor_page
->trigger_group
[monitor_group
].pending
;
114 static u32
channel_latency(struct vmbus_channel
*channel
,
115 struct hv_monitor_page
*monitor_page
)
117 u8 monitor_group
= channel_monitor_group(channel
);
118 u8 monitor_offset
= channel_monitor_offset(channel
);
119 return monitor_page
->latency
[monitor_group
][monitor_offset
];
122 static u32
channel_conn_id(struct vmbus_channel
*channel
,
123 struct hv_monitor_page
*monitor_page
)
125 u8 monitor_group
= channel_monitor_group(channel
);
126 u8 monitor_offset
= channel_monitor_offset(channel
);
127 return monitor_page
->parameter
[monitor_group
][monitor_offset
].connectionid
.u
.id
;
130 static ssize_t
id_show(struct device
*dev
, struct device_attribute
*dev_attr
,
133 struct hv_device
*hv_dev
= device_to_hv_device(dev
);
135 if (!hv_dev
->channel
)
137 return sprintf(buf
, "%d\n", hv_dev
->channel
->offermsg
.child_relid
);
139 static DEVICE_ATTR_RO(id
);
141 static ssize_t
state_show(struct device
*dev
, struct device_attribute
*dev_attr
,
144 struct hv_device
*hv_dev
= device_to_hv_device(dev
);
146 if (!hv_dev
->channel
)
148 return sprintf(buf
, "%d\n", hv_dev
->channel
->state
);
150 static DEVICE_ATTR_RO(state
);
152 static ssize_t
monitor_id_show(struct device
*dev
,
153 struct device_attribute
*dev_attr
, char *buf
)
155 struct hv_device
*hv_dev
= device_to_hv_device(dev
);
157 if (!hv_dev
->channel
)
159 return sprintf(buf
, "%d\n", hv_dev
->channel
->offermsg
.monitorid
);
161 static DEVICE_ATTR_RO(monitor_id
);
163 static ssize_t
class_id_show(struct device
*dev
,
164 struct device_attribute
*dev_attr
, char *buf
)
166 struct hv_device
*hv_dev
= device_to_hv_device(dev
);
168 if (!hv_dev
->channel
)
170 return sprintf(buf
, "{%pUl}\n",
171 hv_dev
->channel
->offermsg
.offer
.if_type
.b
);
173 static DEVICE_ATTR_RO(class_id
);
175 static ssize_t
device_id_show(struct device
*dev
,
176 struct device_attribute
*dev_attr
, char *buf
)
178 struct hv_device
*hv_dev
= device_to_hv_device(dev
);
180 if (!hv_dev
->channel
)
182 return sprintf(buf
, "{%pUl}\n",
183 hv_dev
->channel
->offermsg
.offer
.if_instance
.b
);
185 static DEVICE_ATTR_RO(device_id
);
187 static ssize_t
modalias_show(struct device
*dev
,
188 struct device_attribute
*dev_attr
, char *buf
)
190 struct hv_device
*hv_dev
= device_to_hv_device(dev
);
191 char alias_name
[VMBUS_ALIAS_LEN
+ 1];
193 print_alias_name(hv_dev
, alias_name
);
194 return sprintf(buf
, "vmbus:%s\n", alias_name
);
196 static DEVICE_ATTR_RO(modalias
);
198 static ssize_t
server_monitor_pending_show(struct device
*dev
,
199 struct device_attribute
*dev_attr
,
202 struct hv_device
*hv_dev
= device_to_hv_device(dev
);
204 if (!hv_dev
->channel
)
206 return sprintf(buf
, "%d\n",
207 channel_pending(hv_dev
->channel
,
208 vmbus_connection
.monitor_pages
[1]));
210 static DEVICE_ATTR_RO(server_monitor_pending
);
212 static ssize_t
client_monitor_pending_show(struct device
*dev
,
213 struct device_attribute
*dev_attr
,
216 struct hv_device
*hv_dev
= device_to_hv_device(dev
);
218 if (!hv_dev
->channel
)
220 return sprintf(buf
, "%d\n",
221 channel_pending(hv_dev
->channel
,
222 vmbus_connection
.monitor_pages
[1]));
224 static DEVICE_ATTR_RO(client_monitor_pending
);
226 static ssize_t
server_monitor_latency_show(struct device
*dev
,
227 struct device_attribute
*dev_attr
,
230 struct hv_device
*hv_dev
= device_to_hv_device(dev
);
232 if (!hv_dev
->channel
)
234 return sprintf(buf
, "%d\n",
235 channel_latency(hv_dev
->channel
,
236 vmbus_connection
.monitor_pages
[0]));
238 static DEVICE_ATTR_RO(server_monitor_latency
);
240 static ssize_t
client_monitor_latency_show(struct device
*dev
,
241 struct device_attribute
*dev_attr
,
244 struct hv_device
*hv_dev
= device_to_hv_device(dev
);
246 if (!hv_dev
->channel
)
248 return sprintf(buf
, "%d\n",
249 channel_latency(hv_dev
->channel
,
250 vmbus_connection
.monitor_pages
[1]));
252 static DEVICE_ATTR_RO(client_monitor_latency
);
254 static ssize_t
server_monitor_conn_id_show(struct device
*dev
,
255 struct device_attribute
*dev_attr
,
258 struct hv_device
*hv_dev
= device_to_hv_device(dev
);
260 if (!hv_dev
->channel
)
262 return sprintf(buf
, "%d\n",
263 channel_conn_id(hv_dev
->channel
,
264 vmbus_connection
.monitor_pages
[0]));
266 static DEVICE_ATTR_RO(server_monitor_conn_id
);
268 static ssize_t
client_monitor_conn_id_show(struct device
*dev
,
269 struct device_attribute
*dev_attr
,
272 struct hv_device
*hv_dev
= device_to_hv_device(dev
);
274 if (!hv_dev
->channel
)
276 return sprintf(buf
, "%d\n",
277 channel_conn_id(hv_dev
->channel
,
278 vmbus_connection
.monitor_pages
[1]));
280 static DEVICE_ATTR_RO(client_monitor_conn_id
);
282 static ssize_t
out_intr_mask_show(struct device
*dev
,
283 struct device_attribute
*dev_attr
, char *buf
)
285 struct hv_device
*hv_dev
= device_to_hv_device(dev
);
286 struct hv_ring_buffer_debug_info outbound
;
288 if (!hv_dev
->channel
)
290 hv_ringbuffer_get_debuginfo(&hv_dev
->channel
->outbound
, &outbound
);
291 return sprintf(buf
, "%d\n", outbound
.current_interrupt_mask
);
293 static DEVICE_ATTR_RO(out_intr_mask
);
295 static ssize_t
out_read_index_show(struct device
*dev
,
296 struct device_attribute
*dev_attr
, char *buf
)
298 struct hv_device
*hv_dev
= device_to_hv_device(dev
);
299 struct hv_ring_buffer_debug_info outbound
;
301 if (!hv_dev
->channel
)
303 hv_ringbuffer_get_debuginfo(&hv_dev
->channel
->outbound
, &outbound
);
304 return sprintf(buf
, "%d\n", outbound
.current_read_index
);
306 static DEVICE_ATTR_RO(out_read_index
);
308 static ssize_t
out_write_index_show(struct device
*dev
,
309 struct device_attribute
*dev_attr
,
312 struct hv_device
*hv_dev
= device_to_hv_device(dev
);
313 struct hv_ring_buffer_debug_info outbound
;
315 if (!hv_dev
->channel
)
317 hv_ringbuffer_get_debuginfo(&hv_dev
->channel
->outbound
, &outbound
);
318 return sprintf(buf
, "%d\n", outbound
.current_write_index
);
320 static DEVICE_ATTR_RO(out_write_index
);
322 static ssize_t
out_read_bytes_avail_show(struct device
*dev
,
323 struct device_attribute
*dev_attr
,
326 struct hv_device
*hv_dev
= device_to_hv_device(dev
);
327 struct hv_ring_buffer_debug_info outbound
;
329 if (!hv_dev
->channel
)
331 hv_ringbuffer_get_debuginfo(&hv_dev
->channel
->outbound
, &outbound
);
332 return sprintf(buf
, "%d\n", outbound
.bytes_avail_toread
);
334 static DEVICE_ATTR_RO(out_read_bytes_avail
);
336 static ssize_t
out_write_bytes_avail_show(struct device
*dev
,
337 struct device_attribute
*dev_attr
,
340 struct hv_device
*hv_dev
= device_to_hv_device(dev
);
341 struct hv_ring_buffer_debug_info outbound
;
343 if (!hv_dev
->channel
)
345 hv_ringbuffer_get_debuginfo(&hv_dev
->channel
->outbound
, &outbound
);
346 return sprintf(buf
, "%d\n", outbound
.bytes_avail_towrite
);
348 static DEVICE_ATTR_RO(out_write_bytes_avail
);
350 static ssize_t
in_intr_mask_show(struct device
*dev
,
351 struct device_attribute
*dev_attr
, char *buf
)
353 struct hv_device
*hv_dev
= device_to_hv_device(dev
);
354 struct hv_ring_buffer_debug_info inbound
;
356 if (!hv_dev
->channel
)
358 hv_ringbuffer_get_debuginfo(&hv_dev
->channel
->inbound
, &inbound
);
359 return sprintf(buf
, "%d\n", inbound
.current_interrupt_mask
);
361 static DEVICE_ATTR_RO(in_intr_mask
);
363 static ssize_t
in_read_index_show(struct device
*dev
,
364 struct device_attribute
*dev_attr
, char *buf
)
366 struct hv_device
*hv_dev
= device_to_hv_device(dev
);
367 struct hv_ring_buffer_debug_info inbound
;
369 if (!hv_dev
->channel
)
371 hv_ringbuffer_get_debuginfo(&hv_dev
->channel
->inbound
, &inbound
);
372 return sprintf(buf
, "%d\n", inbound
.current_read_index
);
374 static DEVICE_ATTR_RO(in_read_index
);
376 static ssize_t
in_write_index_show(struct device
*dev
,
377 struct device_attribute
*dev_attr
, char *buf
)
379 struct hv_device
*hv_dev
= device_to_hv_device(dev
);
380 struct hv_ring_buffer_debug_info inbound
;
382 if (!hv_dev
->channel
)
384 hv_ringbuffer_get_debuginfo(&hv_dev
->channel
->inbound
, &inbound
);
385 return sprintf(buf
, "%d\n", inbound
.current_write_index
);
387 static DEVICE_ATTR_RO(in_write_index
);
389 static ssize_t
in_read_bytes_avail_show(struct device
*dev
,
390 struct device_attribute
*dev_attr
,
393 struct hv_device
*hv_dev
= device_to_hv_device(dev
);
394 struct hv_ring_buffer_debug_info inbound
;
396 if (!hv_dev
->channel
)
398 hv_ringbuffer_get_debuginfo(&hv_dev
->channel
->inbound
, &inbound
);
399 return sprintf(buf
, "%d\n", inbound
.bytes_avail_toread
);
401 static DEVICE_ATTR_RO(in_read_bytes_avail
);
403 static ssize_t
in_write_bytes_avail_show(struct device
*dev
,
404 struct device_attribute
*dev_attr
,
407 struct hv_device
*hv_dev
= device_to_hv_device(dev
);
408 struct hv_ring_buffer_debug_info inbound
;
410 if (!hv_dev
->channel
)
412 hv_ringbuffer_get_debuginfo(&hv_dev
->channel
->inbound
, &inbound
);
413 return sprintf(buf
, "%d\n", inbound
.bytes_avail_towrite
);
415 static DEVICE_ATTR_RO(in_write_bytes_avail
);
417 /* Set up per device attributes in /sys/bus/vmbus/devices/<bus device> */
418 static struct attribute
*vmbus_attrs
[] = {
420 &dev_attr_state
.attr
,
421 &dev_attr_monitor_id
.attr
,
422 &dev_attr_class_id
.attr
,
423 &dev_attr_device_id
.attr
,
424 &dev_attr_modalias
.attr
,
425 &dev_attr_server_monitor_pending
.attr
,
426 &dev_attr_client_monitor_pending
.attr
,
427 &dev_attr_server_monitor_latency
.attr
,
428 &dev_attr_client_monitor_latency
.attr
,
429 &dev_attr_server_monitor_conn_id
.attr
,
430 &dev_attr_client_monitor_conn_id
.attr
,
431 &dev_attr_out_intr_mask
.attr
,
432 &dev_attr_out_read_index
.attr
,
433 &dev_attr_out_write_index
.attr
,
434 &dev_attr_out_read_bytes_avail
.attr
,
435 &dev_attr_out_write_bytes_avail
.attr
,
436 &dev_attr_in_intr_mask
.attr
,
437 &dev_attr_in_read_index
.attr
,
438 &dev_attr_in_write_index
.attr
,
439 &dev_attr_in_read_bytes_avail
.attr
,
440 &dev_attr_in_write_bytes_avail
.attr
,
443 ATTRIBUTE_GROUPS(vmbus
);
446 * vmbus_uevent - add uevent for our device
448 * This routine is invoked when a device is added or removed on the vmbus to
449 * generate a uevent to udev in the userspace. The udev will then look at its
450 * rule and the uevent generated here to load the appropriate driver
452 * The alias string will be of the form vmbus:guid where guid is the string
453 * representation of the device guid (each byte of the guid will be
454 * represented with two hex characters.
456 static int vmbus_uevent(struct device
*device
, struct kobj_uevent_env
*env
)
458 struct hv_device
*dev
= device_to_hv_device(device
);
460 char alias_name
[VMBUS_ALIAS_LEN
+ 1];
462 print_alias_name(dev
, alias_name
);
463 ret
= add_uevent_var(env
, "MODALIAS=vmbus:%s", alias_name
);
467 static const uuid_le null_guid
;
469 static inline bool is_null_guid(const __u8
*guid
)
471 if (memcmp(guid
, &null_guid
, sizeof(uuid_le
)))
477 * Return a matching hv_vmbus_device_id pointer.
478 * If there is no match, return NULL.
480 static const struct hv_vmbus_device_id
*hv_vmbus_get_id(
481 const struct hv_vmbus_device_id
*id
,
484 for (; !is_null_guid(id
->guid
); id
++)
485 if (!memcmp(&id
->guid
, guid
, sizeof(uuid_le
)))
494 * vmbus_match - Attempt to match the specified device to the specified driver
496 static int vmbus_match(struct device
*device
, struct device_driver
*driver
)
498 struct hv_driver
*drv
= drv_to_hv_drv(driver
);
499 struct hv_device
*hv_dev
= device_to_hv_device(device
);
501 if (hv_vmbus_get_id(drv
->id_table
, hv_dev
->dev_type
.b
))
508 * vmbus_probe - Add the new vmbus's child device
510 static int vmbus_probe(struct device
*child_device
)
513 struct hv_driver
*drv
=
514 drv_to_hv_drv(child_device
->driver
);
515 struct hv_device
*dev
= device_to_hv_device(child_device
);
516 const struct hv_vmbus_device_id
*dev_id
;
518 dev_id
= hv_vmbus_get_id(drv
->id_table
, dev
->dev_type
.b
);
520 ret
= drv
->probe(dev
, dev_id
);
522 pr_err("probe failed for device %s (%d)\n",
523 dev_name(child_device
), ret
);
526 pr_err("probe not set for driver %s\n",
527 dev_name(child_device
));
534 * vmbus_remove - Remove a vmbus device
536 static int vmbus_remove(struct device
*child_device
)
538 struct hv_driver
*drv
;
539 struct hv_device
*dev
= device_to_hv_device(child_device
);
540 u32 relid
= dev
->channel
->offermsg
.child_relid
;
542 if (child_device
->driver
) {
543 drv
= drv_to_hv_drv(child_device
->driver
);
547 hv_process_channel_removal(dev
->channel
, relid
);
548 pr_err("remove not set for driver %s\n",
549 dev_name(child_device
));
553 * We don't have a driver for this device; deal with the
554 * rescind message by removing the channel.
556 hv_process_channel_removal(dev
->channel
, relid
);
564 * vmbus_shutdown - Shutdown a vmbus device
566 static void vmbus_shutdown(struct device
*child_device
)
568 struct hv_driver
*drv
;
569 struct hv_device
*dev
= device_to_hv_device(child_device
);
572 /* The device may not be attached yet */
573 if (!child_device
->driver
)
576 drv
= drv_to_hv_drv(child_device
->driver
);
586 * vmbus_device_release - Final callback release of the vmbus child device
588 static void vmbus_device_release(struct device
*device
)
590 struct hv_device
*hv_dev
= device_to_hv_device(device
);
596 /* The one and only one */
597 static struct bus_type hv_bus
= {
599 .match
= vmbus_match
,
600 .shutdown
= vmbus_shutdown
,
601 .remove
= vmbus_remove
,
602 .probe
= vmbus_probe
,
603 .uevent
= vmbus_uevent
,
604 .dev_groups
= vmbus_groups
,
607 struct onmessage_work_context
{
608 struct work_struct work
;
609 struct hv_message msg
;
612 static void vmbus_onmessage_work(struct work_struct
*work
)
614 struct onmessage_work_context
*ctx
;
616 /* Do not process messages if we're in DISCONNECTED state */
617 if (vmbus_connection
.conn_state
== DISCONNECTED
)
620 ctx
= container_of(work
, struct onmessage_work_context
,
622 vmbus_onmessage(&ctx
->msg
);
626 static void hv_process_timer_expiration(struct hv_message
*msg
, int cpu
)
628 struct clock_event_device
*dev
= hv_context
.clk_evt
[cpu
];
630 if (dev
->event_handler
)
631 dev
->event_handler(dev
);
633 msg
->header
.message_type
= HVMSG_NONE
;
636 * Make sure the write to MessageType (ie set to
637 * HVMSG_NONE) happens before we read the
638 * MessagePending and EOMing. Otherwise, the EOMing
639 * will not deliver any more messages since there is
644 if (msg
->header
.message_flags
.msg_pending
) {
646 * This will cause message queue rescan to
647 * possibly deliver another msg from the
650 wrmsrl(HV_X64_MSR_EOM
, 0);
654 static void vmbus_on_msg_dpc(unsigned long data
)
656 int cpu
= smp_processor_id();
657 void *page_addr
= hv_context
.synic_message_page
[cpu
];
658 struct hv_message
*msg
= (struct hv_message
*)page_addr
+
660 struct vmbus_channel_message_header
*hdr
;
661 struct vmbus_channel_message_table_entry
*entry
;
662 struct onmessage_work_context
*ctx
;
665 if (msg
->header
.message_type
== HVMSG_NONE
)
669 hdr
= (struct vmbus_channel_message_header
*)msg
->u
.payload
;
671 if (hdr
->msgtype
>= CHANNELMSG_COUNT
) {
672 WARN_ONCE(1, "unknown msgtype=%d\n", hdr
->msgtype
);
676 entry
= &channel_message_table
[hdr
->msgtype
];
677 if (entry
->handler_type
== VMHT_BLOCKING
) {
678 ctx
= kmalloc(sizeof(*ctx
), GFP_ATOMIC
);
682 INIT_WORK(&ctx
->work
, vmbus_onmessage_work
);
683 memcpy(&ctx
->msg
, msg
, sizeof(*msg
));
685 queue_work(vmbus_connection
.work_queue
, &ctx
->work
);
687 entry
->message_handler(hdr
);
690 msg
->header
.message_type
= HVMSG_NONE
;
693 * Make sure the write to MessageType (ie set to
694 * HVMSG_NONE) happens before we read the
695 * MessagePending and EOMing. Otherwise, the EOMing
696 * will not deliver any more messages since there is
701 if (msg
->header
.message_flags
.msg_pending
) {
703 * This will cause message queue rescan to
704 * possibly deliver another msg from the
707 wrmsrl(HV_X64_MSR_EOM
, 0);
712 static void vmbus_isr(void)
714 int cpu
= smp_processor_id();
716 struct hv_message
*msg
;
717 union hv_synic_event_flags
*event
;
718 bool handled
= false;
720 page_addr
= hv_context
.synic_event_page
[cpu
];
721 if (page_addr
== NULL
)
724 event
= (union hv_synic_event_flags
*)page_addr
+
727 * Check for events before checking for messages. This is the order
728 * in which events and messages are checked in Windows guests on
729 * Hyper-V, and the Windows team suggested we do the same.
732 if ((vmbus_proto_version
== VERSION_WS2008
) ||
733 (vmbus_proto_version
== VERSION_WIN7
)) {
735 /* Since we are a child, we only need to check bit 0 */
736 if (sync_test_and_clear_bit(0,
737 (unsigned long *) &event
->flags32
[0])) {
742 * Our host is win8 or above. The signaling mechanism
743 * has changed and we can directly look at the event page.
744 * If bit n is set then we have an interrup on the channel
751 tasklet_schedule(hv_context
.event_dpc
[cpu
]);
754 page_addr
= hv_context
.synic_message_page
[cpu
];
755 msg
= (struct hv_message
*)page_addr
+ VMBUS_MESSAGE_SINT
;
757 /* Check if there are actual msgs to be processed */
758 if (msg
->header
.message_type
!= HVMSG_NONE
) {
759 if (msg
->header
.message_type
== HVMSG_TIMER_EXPIRED
)
760 hv_process_timer_expiration(msg
, cpu
);
762 tasklet_schedule(&msg_dpc
);
766 #ifdef CONFIG_HOTPLUG_CPU
767 static int hyperv_cpu_disable(void)
772 static void hv_cpu_hotplug_quirk(bool vmbus_loaded
)
774 static void *previous_cpu_disable
;
777 * Offlining a CPU when running on newer hypervisors (WS2012R2, Win8,
778 * ...) is not supported at this moment as channel interrupts are
779 * distributed across all of them.
782 if ((vmbus_proto_version
== VERSION_WS2008
) ||
783 (vmbus_proto_version
== VERSION_WIN7
))
787 previous_cpu_disable
= smp_ops
.cpu_disable
;
788 smp_ops
.cpu_disable
= hyperv_cpu_disable
;
789 pr_notice("CPU offlining is not supported by hypervisor\n");
790 } else if (previous_cpu_disable
)
791 smp_ops
.cpu_disable
= previous_cpu_disable
;
794 static void hv_cpu_hotplug_quirk(bool vmbus_loaded
)
800 * vmbus_bus_init -Main vmbus driver initialization routine.
803 * - initialize the vmbus driver context
804 * - invoke the vmbus hv main init routine
805 * - get the irq resource
806 * - retrieve the channel offers
808 static int vmbus_bus_init(int irq
)
812 /* Hypervisor initialization...setup hypercall page..etc */
815 pr_err("Unable to initialize the hypervisor - 0x%x\n", ret
);
819 tasklet_init(&msg_dpc
, vmbus_on_msg_dpc
, 0);
821 ret
= bus_register(&hv_bus
);
825 hv_setup_vmbus_irq(vmbus_isr
);
827 ret
= hv_synic_alloc();
831 * Initialize the per-cpu interrupt state and
832 * connect to the host.
834 on_each_cpu(hv_synic_init
, NULL
, 1);
835 ret
= vmbus_connect();
839 hv_cpu_hotplug_quirk(true);
842 * Only register if the crash MSRs are available
844 if (ms_hyperv
.features
& HV_FEATURE_GUEST_CRASH_MSR_AVAILABLE
) {
845 atomic_notifier_chain_register(&panic_notifier_list
,
846 &hyperv_panic_block
);
849 vmbus_request_offers();
855 hv_remove_vmbus_irq();
857 bus_unregister(&hv_bus
);
866 * __vmbus_child_driver_register - Register a vmbus's driver
867 * @drv: Pointer to driver structure you want to register
868 * @owner: owner module of the drv
869 * @mod_name: module name string
871 * Registers the given driver with Linux through the 'driver_register()' call
872 * and sets up the hyper-v vmbus handling for this driver.
873 * It will return the state of the 'driver_register()' call.
876 int __vmbus_driver_register(struct hv_driver
*hv_driver
, struct module
*owner
, const char *mod_name
)
880 pr_info("registering driver %s\n", hv_driver
->name
);
882 ret
= vmbus_exists();
886 hv_driver
->driver
.name
= hv_driver
->name
;
887 hv_driver
->driver
.owner
= owner
;
888 hv_driver
->driver
.mod_name
= mod_name
;
889 hv_driver
->driver
.bus
= &hv_bus
;
891 ret
= driver_register(&hv_driver
->driver
);
895 EXPORT_SYMBOL_GPL(__vmbus_driver_register
);
898 * vmbus_driver_unregister() - Unregister a vmbus's driver
899 * @drv: Pointer to driver structure you want to un-register
901 * Un-register the given driver that was previous registered with a call to
902 * vmbus_driver_register()
904 void vmbus_driver_unregister(struct hv_driver
*hv_driver
)
906 pr_info("unregistering driver %s\n", hv_driver
->name
);
909 driver_unregister(&hv_driver
->driver
);
911 EXPORT_SYMBOL_GPL(vmbus_driver_unregister
);
914 * vmbus_device_create - Creates and registers a new child device
917 struct hv_device
*vmbus_device_create(const uuid_le
*type
,
918 const uuid_le
*instance
,
919 struct vmbus_channel
*channel
)
921 struct hv_device
*child_device_obj
;
923 child_device_obj
= kzalloc(sizeof(struct hv_device
), GFP_KERNEL
);
924 if (!child_device_obj
) {
925 pr_err("Unable to allocate device object for child device\n");
929 child_device_obj
->channel
= channel
;
930 memcpy(&child_device_obj
->dev_type
, type
, sizeof(uuid_le
));
931 memcpy(&child_device_obj
->dev_instance
, instance
,
935 return child_device_obj
;
939 * vmbus_device_register - Register the child device
941 int vmbus_device_register(struct hv_device
*child_device_obj
)
945 dev_set_name(&child_device_obj
->device
, "vmbus_%d",
946 child_device_obj
->channel
->id
);
948 child_device_obj
->device
.bus
= &hv_bus
;
949 child_device_obj
->device
.parent
= &hv_acpi_dev
->dev
;
950 child_device_obj
->device
.release
= vmbus_device_release
;
953 * Register with the LDM. This will kick off the driver/device
954 * binding...which will eventually call vmbus_match() and vmbus_probe()
956 ret
= device_register(&child_device_obj
->device
);
959 pr_err("Unable to register child device\n");
961 pr_debug("child device %s registered\n",
962 dev_name(&child_device_obj
->device
));
968 * vmbus_device_unregister - Remove the specified child device
971 void vmbus_device_unregister(struct hv_device
*device_obj
)
973 pr_debug("child device %s unregistered\n",
974 dev_name(&device_obj
->device
));
977 * Kick off the process of unregistering the device.
978 * This will call vmbus_remove() and eventually vmbus_device_release()
980 device_unregister(&device_obj
->device
);
985 * VMBUS is an acpi enumerated device. Get the the information we
989 static acpi_status
vmbus_walk_resources(struct acpi_resource
*res
, void *ctx
)
992 case ACPI_RESOURCE_TYPE_IRQ
:
993 irq
= res
->data
.irq
.interrupts
[0];
996 case ACPI_RESOURCE_TYPE_ADDRESS64
:
997 hyperv_mmio
.start
= res
->data
.address64
.address
.minimum
;
998 hyperv_mmio
.end
= res
->data
.address64
.address
.maximum
;
1005 static int vmbus_acpi_add(struct acpi_device
*device
)
1008 int ret_val
= -ENODEV
;
1010 hv_acpi_dev
= device
;
1012 result
= acpi_walk_resources(device
->handle
, METHOD_NAME__CRS
,
1013 vmbus_walk_resources
, NULL
);
1015 if (ACPI_FAILURE(result
))
1018 * The parent of the vmbus acpi device (Gen2 firmware) is the VMOD that
1019 * has the mmio ranges. Get that.
1021 if (device
->parent
) {
1022 result
= acpi_walk_resources(device
->parent
->handle
,
1024 vmbus_walk_resources
, NULL
);
1026 if (ACPI_FAILURE(result
))
1028 if (hyperv_mmio
.start
&& hyperv_mmio
.end
)
1029 request_resource(&iomem_resource
, &hyperv_mmio
);
1034 complete(&probe_event
);
1038 static int vmbus_acpi_remove(struct acpi_device
*device
)
1042 if (hyperv_mmio
.start
&& hyperv_mmio
.end
)
1043 ret
= release_resource(&hyperv_mmio
);
1047 static const struct acpi_device_id vmbus_acpi_device_ids
[] = {
1052 MODULE_DEVICE_TABLE(acpi
, vmbus_acpi_device_ids
);
1054 static struct acpi_driver vmbus_acpi_driver
= {
1056 .ids
= vmbus_acpi_device_ids
,
1058 .add
= vmbus_acpi_add
,
1059 .remove
= vmbus_acpi_remove
,
1063 static int __init
hv_acpi_init(void)
1067 if (x86_hyper
!= &x86_hyper_ms_hyperv
)
1070 init_completion(&probe_event
);
1073 * Get irq resources first.
1075 ret
= acpi_bus_register_driver(&vmbus_acpi_driver
);
1080 t
= wait_for_completion_timeout(&probe_event
, 5*HZ
);
1091 ret
= vmbus_bus_init(irq
);
1098 acpi_bus_unregister_driver(&vmbus_acpi_driver
);
1103 static void __exit
vmbus_exit(void)
1107 vmbus_connection
.conn_state
= DISCONNECTED
;
1108 hv_synic_clockevents_cleanup();
1110 hv_remove_vmbus_irq();
1111 tasklet_kill(&msg_dpc
);
1112 vmbus_free_channels();
1113 if (ms_hyperv
.features
& HV_FEATURE_GUEST_CRASH_MSR_AVAILABLE
) {
1114 atomic_notifier_chain_unregister(&panic_notifier_list
,
1115 &hyperv_panic_block
);
1117 bus_unregister(&hv_bus
);
1119 for_each_online_cpu(cpu
) {
1120 tasklet_kill(hv_context
.event_dpc
[cpu
]);
1121 smp_call_function_single(cpu
, hv_synic_cleanup
, NULL
, 1);
1123 acpi_bus_unregister_driver(&vmbus_acpi_driver
);
1124 hv_cpu_hotplug_quirk(false);
1128 MODULE_LICENSE("GPL");
1130 subsys_initcall(hv_acpi_init
);
1131 module_exit(vmbus_exit
);