1 // SPDX-License-Identifier: GPL-2.0-only
3 * Copyright (c) 2009, Microsoft Corporation.
6 * Haiyang Zhang <haiyangz@microsoft.com>
7 * Hank Janssen <hjanssen@microsoft.com>
8 * K. Y. Srinivasan <kys@microsoft.com>
10 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
12 #include <linux/init.h>
13 #include <linux/module.h>
14 #include <linux/device.h>
15 #include <linux/interrupt.h>
16 #include <linux/sysctl.h>
17 #include <linux/slab.h>
18 #include <linux/acpi.h>
19 #include <linux/completion.h>
20 #include <linux/hyperv.h>
21 #include <linux/kernel_stat.h>
22 #include <linux/clockchips.h>
23 #include <linux/cpu.h>
24 #include <linux/sched/task_stack.h>
26 #include <linux/delay.h>
27 #include <linux/notifier.h>
28 #include <linux/ptrace.h>
29 #include <linux/screen_info.h>
30 #include <linux/kdebug.h>
31 #include <linux/efi.h>
32 #include <linux/random.h>
33 #include <linux/kernel.h>
34 #include <linux/syscore_ops.h>
35 #include <clocksource/hyperv_timer.h>
36 #include "hyperv_vmbus.h"
39 struct list_head node
;
40 struct hv_vmbus_device_id id
;
43 static struct acpi_device
*hv_acpi_dev
;
45 static struct completion probe_event
;
47 static int hyperv_cpuhp_online
;
49 static void *hv_panic_page
;
51 /* Values parsed from ACPI DSDT */
56 * Boolean to control whether to report panic messages over Hyper-V.
58 * It can be set via /proc/sys/kernel/hyperv_record_panic_msg
60 static int sysctl_record_panic_msg
= 1;
62 static int hyperv_report_reg(void)
64 return !sysctl_record_panic_msg
|| !hv_panic_page
;
67 static int hyperv_panic_event(struct notifier_block
*nb
, unsigned long val
,
72 vmbus_initiate_unload(true);
75 * Hyper-V should be notified only once about a panic. If we will be
76 * doing hyperv_report_panic_msg() later with kmsg data, don't do
77 * the notification here.
79 if (ms_hyperv
.misc_features
& HV_FEATURE_GUEST_CRASH_MSR_AVAILABLE
80 && hyperv_report_reg()) {
81 regs
= current_pt_regs();
82 hyperv_report_panic(regs
, val
, false);
87 static int hyperv_die_event(struct notifier_block
*nb
, unsigned long val
,
90 struct die_args
*die
= args
;
91 struct pt_regs
*regs
= die
->regs
;
93 /* Don't notify Hyper-V if the die event is other than oops */
98 * Hyper-V should be notified only once about a panic. If we will be
99 * doing hyperv_report_panic_msg() later with kmsg data, don't do
100 * the notification here.
102 if (hyperv_report_reg())
103 hyperv_report_panic(regs
, val
, true);
107 static struct notifier_block hyperv_die_block
= {
108 .notifier_call
= hyperv_die_event
,
110 static struct notifier_block hyperv_panic_block
= {
111 .notifier_call
= hyperv_panic_event
,
114 static const char *fb_mmio_name
= "fb_range";
115 static struct resource
*fb_mmio
;
116 static struct resource
*hyperv_mmio
;
117 static DEFINE_MUTEX(hyperv_mmio_lock
);
119 static int vmbus_exists(void)
121 if (hv_acpi_dev
== NULL
)
127 static u8
channel_monitor_group(const struct vmbus_channel
*channel
)
129 return (u8
)channel
->offermsg
.monitorid
/ 32;
132 static u8
channel_monitor_offset(const struct vmbus_channel
*channel
)
134 return (u8
)channel
->offermsg
.monitorid
% 32;
137 static u32
channel_pending(const struct vmbus_channel
*channel
,
138 const struct hv_monitor_page
*monitor_page
)
140 u8 monitor_group
= channel_monitor_group(channel
);
142 return monitor_page
->trigger_group
[monitor_group
].pending
;
145 static u32
channel_latency(const struct vmbus_channel
*channel
,
146 const struct hv_monitor_page
*monitor_page
)
148 u8 monitor_group
= channel_monitor_group(channel
);
149 u8 monitor_offset
= channel_monitor_offset(channel
);
151 return monitor_page
->latency
[monitor_group
][monitor_offset
];
154 static u32
channel_conn_id(struct vmbus_channel
*channel
,
155 struct hv_monitor_page
*monitor_page
)
157 u8 monitor_group
= channel_monitor_group(channel
);
158 u8 monitor_offset
= channel_monitor_offset(channel
);
160 return monitor_page
->parameter
[monitor_group
][monitor_offset
].connectionid
.u
.id
;
163 static ssize_t
id_show(struct device
*dev
, struct device_attribute
*dev_attr
,
166 struct hv_device
*hv_dev
= device_to_hv_device(dev
);
168 if (!hv_dev
->channel
)
170 return sprintf(buf
, "%d\n", hv_dev
->channel
->offermsg
.child_relid
);
172 static DEVICE_ATTR_RO(id
);
174 static ssize_t
state_show(struct device
*dev
, struct device_attribute
*dev_attr
,
177 struct hv_device
*hv_dev
= device_to_hv_device(dev
);
179 if (!hv_dev
->channel
)
181 return sprintf(buf
, "%d\n", hv_dev
->channel
->state
);
183 static DEVICE_ATTR_RO(state
);
185 static ssize_t
monitor_id_show(struct device
*dev
,
186 struct device_attribute
*dev_attr
, char *buf
)
188 struct hv_device
*hv_dev
= device_to_hv_device(dev
);
190 if (!hv_dev
->channel
)
192 return sprintf(buf
, "%d\n", hv_dev
->channel
->offermsg
.monitorid
);
194 static DEVICE_ATTR_RO(monitor_id
);
196 static ssize_t
class_id_show(struct device
*dev
,
197 struct device_attribute
*dev_attr
, char *buf
)
199 struct hv_device
*hv_dev
= device_to_hv_device(dev
);
201 if (!hv_dev
->channel
)
203 return sprintf(buf
, "{%pUl}\n",
204 &hv_dev
->channel
->offermsg
.offer
.if_type
);
206 static DEVICE_ATTR_RO(class_id
);
208 static ssize_t
device_id_show(struct device
*dev
,
209 struct device_attribute
*dev_attr
, char *buf
)
211 struct hv_device
*hv_dev
= device_to_hv_device(dev
);
213 if (!hv_dev
->channel
)
215 return sprintf(buf
, "{%pUl}\n",
216 &hv_dev
->channel
->offermsg
.offer
.if_instance
);
218 static DEVICE_ATTR_RO(device_id
);
220 static ssize_t
modalias_show(struct device
*dev
,
221 struct device_attribute
*dev_attr
, char *buf
)
223 struct hv_device
*hv_dev
= device_to_hv_device(dev
);
225 return sprintf(buf
, "vmbus:%*phN\n", UUID_SIZE
, &hv_dev
->dev_type
);
227 static DEVICE_ATTR_RO(modalias
);
230 static ssize_t
numa_node_show(struct device
*dev
,
231 struct device_attribute
*attr
, char *buf
)
233 struct hv_device
*hv_dev
= device_to_hv_device(dev
);
235 if (!hv_dev
->channel
)
238 return sprintf(buf
, "%d\n", cpu_to_node(hv_dev
->channel
->target_cpu
));
240 static DEVICE_ATTR_RO(numa_node
);
243 static ssize_t
server_monitor_pending_show(struct device
*dev
,
244 struct device_attribute
*dev_attr
,
247 struct hv_device
*hv_dev
= device_to_hv_device(dev
);
249 if (!hv_dev
->channel
)
251 return sprintf(buf
, "%d\n",
252 channel_pending(hv_dev
->channel
,
253 vmbus_connection
.monitor_pages
[0]));
255 static DEVICE_ATTR_RO(server_monitor_pending
);
257 static ssize_t
client_monitor_pending_show(struct device
*dev
,
258 struct device_attribute
*dev_attr
,
261 struct hv_device
*hv_dev
= device_to_hv_device(dev
);
263 if (!hv_dev
->channel
)
265 return sprintf(buf
, "%d\n",
266 channel_pending(hv_dev
->channel
,
267 vmbus_connection
.monitor_pages
[1]));
269 static DEVICE_ATTR_RO(client_monitor_pending
);
271 static ssize_t
server_monitor_latency_show(struct device
*dev
,
272 struct device_attribute
*dev_attr
,
275 struct hv_device
*hv_dev
= device_to_hv_device(dev
);
277 if (!hv_dev
->channel
)
279 return sprintf(buf
, "%d\n",
280 channel_latency(hv_dev
->channel
,
281 vmbus_connection
.monitor_pages
[0]));
283 static DEVICE_ATTR_RO(server_monitor_latency
);
285 static ssize_t
client_monitor_latency_show(struct device
*dev
,
286 struct device_attribute
*dev_attr
,
289 struct hv_device
*hv_dev
= device_to_hv_device(dev
);
291 if (!hv_dev
->channel
)
293 return sprintf(buf
, "%d\n",
294 channel_latency(hv_dev
->channel
,
295 vmbus_connection
.monitor_pages
[1]));
297 static DEVICE_ATTR_RO(client_monitor_latency
);
299 static ssize_t
server_monitor_conn_id_show(struct device
*dev
,
300 struct device_attribute
*dev_attr
,
303 struct hv_device
*hv_dev
= device_to_hv_device(dev
);
305 if (!hv_dev
->channel
)
307 return sprintf(buf
, "%d\n",
308 channel_conn_id(hv_dev
->channel
,
309 vmbus_connection
.monitor_pages
[0]));
311 static DEVICE_ATTR_RO(server_monitor_conn_id
);
313 static ssize_t
client_monitor_conn_id_show(struct device
*dev
,
314 struct device_attribute
*dev_attr
,
317 struct hv_device
*hv_dev
= device_to_hv_device(dev
);
319 if (!hv_dev
->channel
)
321 return sprintf(buf
, "%d\n",
322 channel_conn_id(hv_dev
->channel
,
323 vmbus_connection
.monitor_pages
[1]));
325 static DEVICE_ATTR_RO(client_monitor_conn_id
);
327 static ssize_t
out_intr_mask_show(struct device
*dev
,
328 struct device_attribute
*dev_attr
, char *buf
)
330 struct hv_device
*hv_dev
= device_to_hv_device(dev
);
331 struct hv_ring_buffer_debug_info outbound
;
334 if (!hv_dev
->channel
)
337 ret
= hv_ringbuffer_get_debuginfo(&hv_dev
->channel
->outbound
,
342 return sprintf(buf
, "%d\n", outbound
.current_interrupt_mask
);
344 static DEVICE_ATTR_RO(out_intr_mask
);
346 static ssize_t
out_read_index_show(struct device
*dev
,
347 struct device_attribute
*dev_attr
, char *buf
)
349 struct hv_device
*hv_dev
= device_to_hv_device(dev
);
350 struct hv_ring_buffer_debug_info outbound
;
353 if (!hv_dev
->channel
)
356 ret
= hv_ringbuffer_get_debuginfo(&hv_dev
->channel
->outbound
,
360 return sprintf(buf
, "%d\n", outbound
.current_read_index
);
362 static DEVICE_ATTR_RO(out_read_index
);
364 static ssize_t
out_write_index_show(struct device
*dev
,
365 struct device_attribute
*dev_attr
,
368 struct hv_device
*hv_dev
= device_to_hv_device(dev
);
369 struct hv_ring_buffer_debug_info outbound
;
372 if (!hv_dev
->channel
)
375 ret
= hv_ringbuffer_get_debuginfo(&hv_dev
->channel
->outbound
,
379 return sprintf(buf
, "%d\n", outbound
.current_write_index
);
381 static DEVICE_ATTR_RO(out_write_index
);
383 static ssize_t
out_read_bytes_avail_show(struct device
*dev
,
384 struct device_attribute
*dev_attr
,
387 struct hv_device
*hv_dev
= device_to_hv_device(dev
);
388 struct hv_ring_buffer_debug_info outbound
;
391 if (!hv_dev
->channel
)
394 ret
= hv_ringbuffer_get_debuginfo(&hv_dev
->channel
->outbound
,
398 return sprintf(buf
, "%d\n", outbound
.bytes_avail_toread
);
400 static DEVICE_ATTR_RO(out_read_bytes_avail
);
402 static ssize_t
out_write_bytes_avail_show(struct device
*dev
,
403 struct device_attribute
*dev_attr
,
406 struct hv_device
*hv_dev
= device_to_hv_device(dev
);
407 struct hv_ring_buffer_debug_info outbound
;
410 if (!hv_dev
->channel
)
413 ret
= hv_ringbuffer_get_debuginfo(&hv_dev
->channel
->outbound
,
417 return sprintf(buf
, "%d\n", outbound
.bytes_avail_towrite
);
419 static DEVICE_ATTR_RO(out_write_bytes_avail
);
421 static ssize_t
in_intr_mask_show(struct device
*dev
,
422 struct device_attribute
*dev_attr
, char *buf
)
424 struct hv_device
*hv_dev
= device_to_hv_device(dev
);
425 struct hv_ring_buffer_debug_info inbound
;
428 if (!hv_dev
->channel
)
431 ret
= hv_ringbuffer_get_debuginfo(&hv_dev
->channel
->inbound
, &inbound
);
435 return sprintf(buf
, "%d\n", inbound
.current_interrupt_mask
);
437 static DEVICE_ATTR_RO(in_intr_mask
);
439 static ssize_t
in_read_index_show(struct device
*dev
,
440 struct device_attribute
*dev_attr
, char *buf
)
442 struct hv_device
*hv_dev
= device_to_hv_device(dev
);
443 struct hv_ring_buffer_debug_info inbound
;
446 if (!hv_dev
->channel
)
449 ret
= hv_ringbuffer_get_debuginfo(&hv_dev
->channel
->inbound
, &inbound
);
453 return sprintf(buf
, "%d\n", inbound
.current_read_index
);
455 static DEVICE_ATTR_RO(in_read_index
);
457 static ssize_t
in_write_index_show(struct device
*dev
,
458 struct device_attribute
*dev_attr
, char *buf
)
460 struct hv_device
*hv_dev
= device_to_hv_device(dev
);
461 struct hv_ring_buffer_debug_info inbound
;
464 if (!hv_dev
->channel
)
467 ret
= hv_ringbuffer_get_debuginfo(&hv_dev
->channel
->inbound
, &inbound
);
471 return sprintf(buf
, "%d\n", inbound
.current_write_index
);
473 static DEVICE_ATTR_RO(in_write_index
);
475 static ssize_t
in_read_bytes_avail_show(struct device
*dev
,
476 struct device_attribute
*dev_attr
,
479 struct hv_device
*hv_dev
= device_to_hv_device(dev
);
480 struct hv_ring_buffer_debug_info inbound
;
483 if (!hv_dev
->channel
)
486 ret
= hv_ringbuffer_get_debuginfo(&hv_dev
->channel
->inbound
, &inbound
);
490 return sprintf(buf
, "%d\n", inbound
.bytes_avail_toread
);
492 static DEVICE_ATTR_RO(in_read_bytes_avail
);
494 static ssize_t
in_write_bytes_avail_show(struct device
*dev
,
495 struct device_attribute
*dev_attr
,
498 struct hv_device
*hv_dev
= device_to_hv_device(dev
);
499 struct hv_ring_buffer_debug_info inbound
;
502 if (!hv_dev
->channel
)
505 ret
= hv_ringbuffer_get_debuginfo(&hv_dev
->channel
->inbound
, &inbound
);
509 return sprintf(buf
, "%d\n", inbound
.bytes_avail_towrite
);
511 static DEVICE_ATTR_RO(in_write_bytes_avail
);
513 static ssize_t
channel_vp_mapping_show(struct device
*dev
,
514 struct device_attribute
*dev_attr
,
517 struct hv_device
*hv_dev
= device_to_hv_device(dev
);
518 struct vmbus_channel
*channel
= hv_dev
->channel
, *cur_sc
;
519 int buf_size
= PAGE_SIZE
, n_written
, tot_written
;
520 struct list_head
*cur
;
525 mutex_lock(&vmbus_connection
.channel_mutex
);
527 tot_written
= snprintf(buf
, buf_size
, "%u:%u\n",
528 channel
->offermsg
.child_relid
, channel
->target_cpu
);
530 list_for_each(cur
, &channel
->sc_list
) {
531 if (tot_written
>= buf_size
- 1)
534 cur_sc
= list_entry(cur
, struct vmbus_channel
, sc_list
);
535 n_written
= scnprintf(buf
+ tot_written
,
536 buf_size
- tot_written
,
538 cur_sc
->offermsg
.child_relid
,
540 tot_written
+= n_written
;
543 mutex_unlock(&vmbus_connection
.channel_mutex
);
547 static DEVICE_ATTR_RO(channel_vp_mapping
);
549 static ssize_t
vendor_show(struct device
*dev
,
550 struct device_attribute
*dev_attr
,
553 struct hv_device
*hv_dev
= device_to_hv_device(dev
);
555 return sprintf(buf
, "0x%x\n", hv_dev
->vendor_id
);
557 static DEVICE_ATTR_RO(vendor
);
559 static ssize_t
device_show(struct device
*dev
,
560 struct device_attribute
*dev_attr
,
563 struct hv_device
*hv_dev
= device_to_hv_device(dev
);
565 return sprintf(buf
, "0x%x\n", hv_dev
->device_id
);
567 static DEVICE_ATTR_RO(device
);
569 static ssize_t
driver_override_store(struct device
*dev
,
570 struct device_attribute
*attr
,
571 const char *buf
, size_t count
)
573 struct hv_device
*hv_dev
= device_to_hv_device(dev
);
574 char *driver_override
, *old
, *cp
;
576 /* We need to keep extra room for a newline */
577 if (count
>= (PAGE_SIZE
- 1))
580 driver_override
= kstrndup(buf
, count
, GFP_KERNEL
);
581 if (!driver_override
)
584 cp
= strchr(driver_override
, '\n');
589 old
= hv_dev
->driver_override
;
590 if (strlen(driver_override
)) {
591 hv_dev
->driver_override
= driver_override
;
593 kfree(driver_override
);
594 hv_dev
->driver_override
= NULL
;
603 static ssize_t
driver_override_show(struct device
*dev
,
604 struct device_attribute
*attr
, char *buf
)
606 struct hv_device
*hv_dev
= device_to_hv_device(dev
);
610 len
= snprintf(buf
, PAGE_SIZE
, "%s\n", hv_dev
->driver_override
);
615 static DEVICE_ATTR_RW(driver_override
);
617 /* Set up per device attributes in /sys/bus/vmbus/devices/<bus device> */
618 static struct attribute
*vmbus_dev_attrs
[] = {
620 &dev_attr_state
.attr
,
621 &dev_attr_monitor_id
.attr
,
622 &dev_attr_class_id
.attr
,
623 &dev_attr_device_id
.attr
,
624 &dev_attr_modalias
.attr
,
626 &dev_attr_numa_node
.attr
,
628 &dev_attr_server_monitor_pending
.attr
,
629 &dev_attr_client_monitor_pending
.attr
,
630 &dev_attr_server_monitor_latency
.attr
,
631 &dev_attr_client_monitor_latency
.attr
,
632 &dev_attr_server_monitor_conn_id
.attr
,
633 &dev_attr_client_monitor_conn_id
.attr
,
634 &dev_attr_out_intr_mask
.attr
,
635 &dev_attr_out_read_index
.attr
,
636 &dev_attr_out_write_index
.attr
,
637 &dev_attr_out_read_bytes_avail
.attr
,
638 &dev_attr_out_write_bytes_avail
.attr
,
639 &dev_attr_in_intr_mask
.attr
,
640 &dev_attr_in_read_index
.attr
,
641 &dev_attr_in_write_index
.attr
,
642 &dev_attr_in_read_bytes_avail
.attr
,
643 &dev_attr_in_write_bytes_avail
.attr
,
644 &dev_attr_channel_vp_mapping
.attr
,
645 &dev_attr_vendor
.attr
,
646 &dev_attr_device
.attr
,
647 &dev_attr_driver_override
.attr
,
652 * Device-level attribute_group callback function. Returns the permission for
653 * each attribute, and returns 0 if an attribute is not visible.
655 static umode_t
vmbus_dev_attr_is_visible(struct kobject
*kobj
,
656 struct attribute
*attr
, int idx
)
658 struct device
*dev
= kobj_to_dev(kobj
);
659 const struct hv_device
*hv_dev
= device_to_hv_device(dev
);
661 /* Hide the monitor attributes if the monitor mechanism is not used. */
662 if (!hv_dev
->channel
->offermsg
.monitor_allocated
&&
663 (attr
== &dev_attr_monitor_id
.attr
||
664 attr
== &dev_attr_server_monitor_pending
.attr
||
665 attr
== &dev_attr_client_monitor_pending
.attr
||
666 attr
== &dev_attr_server_monitor_latency
.attr
||
667 attr
== &dev_attr_client_monitor_latency
.attr
||
668 attr
== &dev_attr_server_monitor_conn_id
.attr
||
669 attr
== &dev_attr_client_monitor_conn_id
.attr
))
675 static const struct attribute_group vmbus_dev_group
= {
676 .attrs
= vmbus_dev_attrs
,
677 .is_visible
= vmbus_dev_attr_is_visible
679 __ATTRIBUTE_GROUPS(vmbus_dev
);
682 * vmbus_uevent - add uevent for our device
684 * This routine is invoked when a device is added or removed on the vmbus to
685 * generate a uevent to udev in the userspace. The udev will then look at its
686 * rule and the uevent generated here to load the appropriate driver
688 * The alias string will be of the form vmbus:guid where guid is the string
689 * representation of the device guid (each byte of the guid will be
690 * represented with two hex characters.
692 static int vmbus_uevent(struct device
*device
, struct kobj_uevent_env
*env
)
694 struct hv_device
*dev
= device_to_hv_device(device
);
695 const char *format
= "MODALIAS=vmbus:%*phN";
697 return add_uevent_var(env
, format
, UUID_SIZE
, &dev
->dev_type
);
700 static const struct hv_vmbus_device_id
*
701 hv_vmbus_dev_match(const struct hv_vmbus_device_id
*id
, const guid_t
*guid
)
704 return NULL
; /* empty device table */
706 for (; !guid_is_null(&id
->guid
); id
++)
707 if (guid_equal(&id
->guid
, guid
))
713 static const struct hv_vmbus_device_id
*
714 hv_vmbus_dynid_match(struct hv_driver
*drv
, const guid_t
*guid
)
716 const struct hv_vmbus_device_id
*id
= NULL
;
717 struct vmbus_dynid
*dynid
;
719 spin_lock(&drv
->dynids
.lock
);
720 list_for_each_entry(dynid
, &drv
->dynids
.list
, node
) {
721 if (guid_equal(&dynid
->id
.guid
, guid
)) {
726 spin_unlock(&drv
->dynids
.lock
);
731 static const struct hv_vmbus_device_id vmbus_device_null
;
734 * Return a matching hv_vmbus_device_id pointer.
735 * If there is no match, return NULL.
737 static const struct hv_vmbus_device_id
*hv_vmbus_get_id(struct hv_driver
*drv
,
738 struct hv_device
*dev
)
740 const guid_t
*guid
= &dev
->dev_type
;
741 const struct hv_vmbus_device_id
*id
;
743 /* When driver_override is set, only bind to the matching driver */
744 if (dev
->driver_override
&& strcmp(dev
->driver_override
, drv
->name
))
747 /* Look at the dynamic ids first, before the static ones */
748 id
= hv_vmbus_dynid_match(drv
, guid
);
750 id
= hv_vmbus_dev_match(drv
->id_table
, guid
);
752 /* driver_override will always match, send a dummy id */
753 if (!id
&& dev
->driver_override
)
754 id
= &vmbus_device_null
;
759 /* vmbus_add_dynid - add a new device ID to this driver and re-probe devices */
760 static int vmbus_add_dynid(struct hv_driver
*drv
, guid_t
*guid
)
762 struct vmbus_dynid
*dynid
;
764 dynid
= kzalloc(sizeof(*dynid
), GFP_KERNEL
);
768 dynid
->id
.guid
= *guid
;
770 spin_lock(&drv
->dynids
.lock
);
771 list_add_tail(&dynid
->node
, &drv
->dynids
.list
);
772 spin_unlock(&drv
->dynids
.lock
);
774 return driver_attach(&drv
->driver
);
777 static void vmbus_free_dynids(struct hv_driver
*drv
)
779 struct vmbus_dynid
*dynid
, *n
;
781 spin_lock(&drv
->dynids
.lock
);
782 list_for_each_entry_safe(dynid
, n
, &drv
->dynids
.list
, node
) {
783 list_del(&dynid
->node
);
786 spin_unlock(&drv
->dynids
.lock
);
790 * store_new_id - sysfs frontend to vmbus_add_dynid()
792 * Allow GUIDs to be added to an existing driver via sysfs.
794 static ssize_t
new_id_store(struct device_driver
*driver
, const char *buf
,
797 struct hv_driver
*drv
= drv_to_hv_drv(driver
);
801 retval
= guid_parse(buf
, &guid
);
805 if (hv_vmbus_dynid_match(drv
, &guid
))
808 retval
= vmbus_add_dynid(drv
, &guid
);
813 static DRIVER_ATTR_WO(new_id
);
816 * store_remove_id - remove a PCI device ID from this driver
818 * Removes a dynamic pci device ID to this driver.
820 static ssize_t
remove_id_store(struct device_driver
*driver
, const char *buf
,
823 struct hv_driver
*drv
= drv_to_hv_drv(driver
);
824 struct vmbus_dynid
*dynid
, *n
;
828 retval
= guid_parse(buf
, &guid
);
833 spin_lock(&drv
->dynids
.lock
);
834 list_for_each_entry_safe(dynid
, n
, &drv
->dynids
.list
, node
) {
835 struct hv_vmbus_device_id
*id
= &dynid
->id
;
837 if (guid_equal(&id
->guid
, &guid
)) {
838 list_del(&dynid
->node
);
844 spin_unlock(&drv
->dynids
.lock
);
848 static DRIVER_ATTR_WO(remove_id
);
850 static struct attribute
*vmbus_drv_attrs
[] = {
851 &driver_attr_new_id
.attr
,
852 &driver_attr_remove_id
.attr
,
855 ATTRIBUTE_GROUPS(vmbus_drv
);
859 * vmbus_match - Attempt to match the specified device to the specified driver
861 static int vmbus_match(struct device
*device
, struct device_driver
*driver
)
863 struct hv_driver
*drv
= drv_to_hv_drv(driver
);
864 struct hv_device
*hv_dev
= device_to_hv_device(device
);
866 /* The hv_sock driver handles all hv_sock offers. */
867 if (is_hvsock_channel(hv_dev
->channel
))
870 if (hv_vmbus_get_id(drv
, hv_dev
))
877 * vmbus_probe - Add the new vmbus's child device
879 static int vmbus_probe(struct device
*child_device
)
882 struct hv_driver
*drv
=
883 drv_to_hv_drv(child_device
->driver
);
884 struct hv_device
*dev
= device_to_hv_device(child_device
);
885 const struct hv_vmbus_device_id
*dev_id
;
887 dev_id
= hv_vmbus_get_id(drv
, dev
);
889 ret
= drv
->probe(dev
, dev_id
);
891 pr_err("probe failed for device %s (%d)\n",
892 dev_name(child_device
), ret
);
895 pr_err("probe not set for driver %s\n",
896 dev_name(child_device
));
903 * vmbus_remove - Remove a vmbus device
905 static int vmbus_remove(struct device
*child_device
)
907 struct hv_driver
*drv
;
908 struct hv_device
*dev
= device_to_hv_device(child_device
);
910 if (child_device
->driver
) {
911 drv
= drv_to_hv_drv(child_device
->driver
);
921 * vmbus_shutdown - Shutdown a vmbus device
923 static void vmbus_shutdown(struct device
*child_device
)
925 struct hv_driver
*drv
;
926 struct hv_device
*dev
= device_to_hv_device(child_device
);
929 /* The device may not be attached yet */
930 if (!child_device
->driver
)
933 drv
= drv_to_hv_drv(child_device
->driver
);
939 #ifdef CONFIG_PM_SLEEP
941 * vmbus_suspend - Suspend a vmbus device
943 static int vmbus_suspend(struct device
*child_device
)
945 struct hv_driver
*drv
;
946 struct hv_device
*dev
= device_to_hv_device(child_device
);
948 /* The device may not be attached yet */
949 if (!child_device
->driver
)
952 drv
= drv_to_hv_drv(child_device
->driver
);
956 return drv
->suspend(dev
);
960 * vmbus_resume - Resume a vmbus device
962 static int vmbus_resume(struct device
*child_device
)
964 struct hv_driver
*drv
;
965 struct hv_device
*dev
= device_to_hv_device(child_device
);
967 /* The device may not be attached yet */
968 if (!child_device
->driver
)
971 drv
= drv_to_hv_drv(child_device
->driver
);
975 return drv
->resume(dev
);
978 #define vmbus_suspend NULL
979 #define vmbus_resume NULL
980 #endif /* CONFIG_PM_SLEEP */
983 * vmbus_device_release - Final callback release of the vmbus child device
985 static void vmbus_device_release(struct device
*device
)
987 struct hv_device
*hv_dev
= device_to_hv_device(device
);
988 struct vmbus_channel
*channel
= hv_dev
->channel
;
990 hv_debug_rm_dev_dir(hv_dev
);
992 mutex_lock(&vmbus_connection
.channel_mutex
);
993 hv_process_channel_removal(channel
);
994 mutex_unlock(&vmbus_connection
.channel_mutex
);
999 * Note: we must use the "noirq" ops: see the comment before vmbus_bus_pm.
1001 * suspend_noirq/resume_noirq are set to NULL to support Suspend-to-Idle: we
1002 * shouldn't suspend the vmbus devices upon Suspend-to-Idle, otherwise there
1003 * is no way to wake up a Generation-2 VM.
1005 * The other 4 ops are for hibernation.
1008 static const struct dev_pm_ops vmbus_pm
= {
1009 .suspend_noirq
= NULL
,
1010 .resume_noirq
= NULL
,
1011 .freeze_noirq
= vmbus_suspend
,
1012 .thaw_noirq
= vmbus_resume
,
1013 .poweroff_noirq
= vmbus_suspend
,
1014 .restore_noirq
= vmbus_resume
,
1017 /* The one and only one */
1018 static struct bus_type hv_bus
= {
1020 .match
= vmbus_match
,
1021 .shutdown
= vmbus_shutdown
,
1022 .remove
= vmbus_remove
,
1023 .probe
= vmbus_probe
,
1024 .uevent
= vmbus_uevent
,
1025 .dev_groups
= vmbus_dev_groups
,
1026 .drv_groups
= vmbus_drv_groups
,
1030 struct onmessage_work_context
{
1031 struct work_struct work
;
1033 struct hv_message_header header
;
1038 static void vmbus_onmessage_work(struct work_struct
*work
)
1040 struct onmessage_work_context
*ctx
;
1042 /* Do not process messages if we're in DISCONNECTED state */
1043 if (vmbus_connection
.conn_state
== DISCONNECTED
)
1046 ctx
= container_of(work
, struct onmessage_work_context
,
1048 vmbus_onmessage((struct vmbus_channel_message_header
*)
1053 void vmbus_on_msg_dpc(unsigned long data
)
1055 struct hv_per_cpu_context
*hv_cpu
= (void *)data
;
1056 void *page_addr
= hv_cpu
->synic_message_page
;
1057 struct hv_message
*msg
= (struct hv_message
*)page_addr
+
1059 struct vmbus_channel_message_header
*hdr
;
1060 const struct vmbus_channel_message_table_entry
*entry
;
1061 struct onmessage_work_context
*ctx
;
1062 u32 message_type
= msg
->header
.message_type
;
1065 * 'enum vmbus_channel_message_type' is supposed to always be 'u32' as
1066 * it is being used in 'struct vmbus_channel_message_header' definition
1067 * which is supposed to match hypervisor ABI.
1069 BUILD_BUG_ON(sizeof(enum vmbus_channel_message_type
) != sizeof(u32
));
1071 if (message_type
== HVMSG_NONE
)
1075 hdr
= (struct vmbus_channel_message_header
*)msg
->u
.payload
;
1077 trace_vmbus_on_msg_dpc(hdr
);
1079 if (hdr
->msgtype
>= CHANNELMSG_COUNT
) {
1080 WARN_ONCE(1, "unknown msgtype=%d\n", hdr
->msgtype
);
1084 if (msg
->header
.payload_size
> HV_MESSAGE_PAYLOAD_BYTE_COUNT
) {
1085 WARN_ONCE(1, "payload size is too large (%d)\n",
1086 msg
->header
.payload_size
);
1090 entry
= &channel_message_table
[hdr
->msgtype
];
1092 if (!entry
->message_handler
)
1095 if (msg
->header
.payload_size
< entry
->min_payload_len
) {
1096 WARN_ONCE(1, "message too short: msgtype=%d len=%d\n",
1097 hdr
->msgtype
, msg
->header
.payload_size
);
1101 if (entry
->handler_type
== VMHT_BLOCKING
) {
1102 ctx
= kmalloc(sizeof(*ctx
) + msg
->header
.payload_size
,
1107 INIT_WORK(&ctx
->work
, vmbus_onmessage_work
);
1108 memcpy(&ctx
->msg
, msg
, sizeof(msg
->header
) +
1109 msg
->header
.payload_size
);
1112 * The host can generate a rescind message while we
1113 * may still be handling the original offer. We deal with
1114 * this condition by relying on the synchronization provided
1115 * by offer_in_progress and by channel_mutex. See also the
1116 * inline comments in vmbus_onoffer_rescind().
1118 switch (hdr
->msgtype
) {
1119 case CHANNELMSG_RESCIND_CHANNELOFFER
:
1121 * If we are handling the rescind message;
1122 * schedule the work on the global work queue.
1124 * The OFFER message and the RESCIND message should
1125 * not be handled by the same serialized work queue,
1126 * because the OFFER handler may call vmbus_open(),
1127 * which tries to open the channel by sending an
1128 * OPEN_CHANNEL message to the host and waits for
1129 * the host's response; however, if the host has
1130 * rescinded the channel before it receives the
1131 * OPEN_CHANNEL message, the host just silently
1132 * ignores the OPEN_CHANNEL message; as a result,
1133 * the guest's OFFER handler hangs for ever, if we
1134 * handle the RESCIND message in the same serialized
1135 * work queue: the RESCIND handler can not start to
1136 * run before the OFFER handler finishes.
1138 schedule_work(&ctx
->work
);
1141 case CHANNELMSG_OFFERCHANNEL
:
1143 * The host sends the offer message of a given channel
1144 * before sending the rescind message of the same
1145 * channel. These messages are sent to the guest's
1146 * connect CPU; the guest then starts processing them
1147 * in the tasklet handler on this CPU:
1151 * [vmbus_on_msg_dpc()]
1152 * atomic_inc() // CHANNELMSG_OFFERCHANNEL
1155 * [vmbus_on_msg_dpc()]
1156 * schedule_work() // CHANNELMSG_RESCIND_CHANNELOFFER
1158 * We rely on the memory-ordering properties of the
1159 * queue_work() and schedule_work() primitives, which
1160 * guarantee that the atomic increment will be visible
1161 * to the CPUs which will execute the offer & rescind
1162 * works by the time these works will start execution.
1164 atomic_inc(&vmbus_connection
.offer_in_progress
);
1168 queue_work(vmbus_connection
.work_queue
, &ctx
->work
);
1171 entry
->message_handler(hdr
);
1174 vmbus_signal_eom(msg
, message_type
);
1177 #ifdef CONFIG_PM_SLEEP
1179 * Fake RESCIND_CHANNEL messages to clean up hv_sock channels by force for
1180 * hibernation, because hv_sock connections can not persist across hibernation.
1182 static void vmbus_force_channel_rescinded(struct vmbus_channel
*channel
)
1184 struct onmessage_work_context
*ctx
;
1185 struct vmbus_channel_rescind_offer
*rescind
;
1187 WARN_ON(!is_hvsock_channel(channel
));
1190 * Allocation size is small and the allocation should really not fail,
1191 * otherwise the state of the hv_sock connections ends up in limbo.
1193 ctx
= kzalloc(sizeof(*ctx
) + sizeof(*rescind
),
1194 GFP_KERNEL
| __GFP_NOFAIL
);
1197 * So far, these are not really used by Linux. Just set them to the
1198 * reasonable values conforming to the definitions of the fields.
1200 ctx
->msg
.header
.message_type
= 1;
1201 ctx
->msg
.header
.payload_size
= sizeof(*rescind
);
1203 /* These values are actually used by Linux. */
1204 rescind
= (struct vmbus_channel_rescind_offer
*)ctx
->msg
.payload
;
1205 rescind
->header
.msgtype
= CHANNELMSG_RESCIND_CHANNELOFFER
;
1206 rescind
->child_relid
= channel
->offermsg
.child_relid
;
1208 INIT_WORK(&ctx
->work
, vmbus_onmessage_work
);
1210 queue_work(vmbus_connection
.work_queue
, &ctx
->work
);
1212 #endif /* CONFIG_PM_SLEEP */
1215 * Schedule all channels with events pending
1217 static void vmbus_chan_sched(struct hv_per_cpu_context
*hv_cpu
)
1219 unsigned long *recv_int_page
;
1222 if (vmbus_proto_version
< VERSION_WIN8
) {
1223 maxbits
= MAX_NUM_CHANNELS_SUPPORTED
;
1224 recv_int_page
= vmbus_connection
.recv_int_page
;
1227 * When the host is win8 and beyond, the event page
1228 * can be directly checked to get the id of the channel
1229 * that has the interrupt pending.
1231 void *page_addr
= hv_cpu
->synic_event_page
;
1232 union hv_synic_event_flags
*event
1233 = (union hv_synic_event_flags
*)page_addr
+
1236 maxbits
= HV_EVENT_FLAGS_COUNT
;
1237 recv_int_page
= event
->flags
;
1240 if (unlikely(!recv_int_page
))
1243 for_each_set_bit(relid
, recv_int_page
, maxbits
) {
1244 void (*callback_fn
)(void *context
);
1245 struct vmbus_channel
*channel
;
1247 if (!sync_test_and_clear_bit(relid
, recv_int_page
))
1250 /* Special case - vmbus channel protocol msg */
1255 * Pairs with the kfree_rcu() in vmbus_chan_release().
1256 * Guarantees that the channel data structure doesn't
1257 * get freed while the channel pointer below is being
1262 /* Find channel based on relid */
1263 channel
= relid2channel(relid
);
1264 if (channel
== NULL
)
1265 goto sched_unlock_rcu
;
1267 if (channel
->rescind
)
1268 goto sched_unlock_rcu
;
1271 * Make sure that the ring buffer data structure doesn't get
1272 * freed while we dereference the ring buffer pointer. Test
1273 * for the channel's onchannel_callback being NULL within a
1274 * sched_lock critical section. See also the inline comments
1275 * in vmbus_reset_channel_cb().
1277 spin_lock(&channel
->sched_lock
);
1279 callback_fn
= channel
->onchannel_callback
;
1280 if (unlikely(callback_fn
== NULL
))
1283 trace_vmbus_chan_sched(channel
);
1285 ++channel
->interrupts
;
1287 switch (channel
->callback_mode
) {
1289 (*callback_fn
)(channel
->channel_callback_context
);
1292 case HV_CALL_BATCHED
:
1293 hv_begin_read(&channel
->inbound
);
1295 case HV_CALL_DIRECT
:
1296 tasklet_schedule(&channel
->callback_event
);
1300 spin_unlock(&channel
->sched_lock
);
1306 static void vmbus_isr(void)
1308 struct hv_per_cpu_context
*hv_cpu
1309 = this_cpu_ptr(hv_context
.cpu_context
);
1310 void *page_addr
= hv_cpu
->synic_event_page
;
1311 struct hv_message
*msg
;
1312 union hv_synic_event_flags
*event
;
1313 bool handled
= false;
1315 if (unlikely(page_addr
== NULL
))
1318 event
= (union hv_synic_event_flags
*)page_addr
+
1321 * Check for events before checking for messages. This is the order
1322 * in which events and messages are checked in Windows guests on
1323 * Hyper-V, and the Windows team suggested we do the same.
1326 if ((vmbus_proto_version
== VERSION_WS2008
) ||
1327 (vmbus_proto_version
== VERSION_WIN7
)) {
1329 /* Since we are a child, we only need to check bit 0 */
1330 if (sync_test_and_clear_bit(0, event
->flags
))
1334 * Our host is win8 or above. The signaling mechanism
1335 * has changed and we can directly look at the event page.
1336 * If bit n is set then we have an interrup on the channel
1343 vmbus_chan_sched(hv_cpu
);
1345 page_addr
= hv_cpu
->synic_message_page
;
1346 msg
= (struct hv_message
*)page_addr
+ VMBUS_MESSAGE_SINT
;
1348 /* Check if there are actual msgs to be processed */
1349 if (msg
->header
.message_type
!= HVMSG_NONE
) {
1350 if (msg
->header
.message_type
== HVMSG_TIMER_EXPIRED
) {
1352 vmbus_signal_eom(msg
, HVMSG_TIMER_EXPIRED
);
1354 tasklet_schedule(&hv_cpu
->msg_dpc
);
1357 add_interrupt_randomness(hv_get_vector(), 0);
1361 * Callback from kmsg_dump. Grab as much as possible from the end of the kmsg
1362 * buffer and call into Hyper-V to transfer the data.
1364 static void hv_kmsg_dump(struct kmsg_dumper
*dumper
,
1365 enum kmsg_dump_reason reason
)
1367 size_t bytes_written
;
1368 phys_addr_t panic_pa
;
1370 /* We are only interested in panics. */
1371 if ((reason
!= KMSG_DUMP_PANIC
) || (!sysctl_record_panic_msg
))
1374 panic_pa
= virt_to_phys(hv_panic_page
);
1377 * Write dump contents to the page. No need to synchronize; panic should
1378 * be single-threaded.
1380 kmsg_dump_get_buffer(dumper
, false, hv_panic_page
, HV_HYP_PAGE_SIZE
,
1383 hyperv_report_panic_msg(panic_pa
, bytes_written
);
1386 static struct kmsg_dumper hv_kmsg_dumper
= {
1387 .dump
= hv_kmsg_dump
,
1390 static void hv_kmsg_dump_register(void)
1394 hv_panic_page
= hv_alloc_hyperv_zeroed_page();
1395 if (!hv_panic_page
) {
1396 pr_err("Hyper-V: panic message page memory allocation failed\n");
1400 ret
= kmsg_dump_register(&hv_kmsg_dumper
);
1402 pr_err("Hyper-V: kmsg dump register error 0x%x\n", ret
);
1403 hv_free_hyperv_page((unsigned long)hv_panic_page
);
1404 hv_panic_page
= NULL
;
1408 static struct ctl_table_header
*hv_ctl_table_hdr
;
1411 * sysctl option to allow the user to control whether kmsg data should be
1412 * reported to Hyper-V on panic.
1414 static struct ctl_table hv_ctl_table
[] = {
1416 .procname
= "hyperv_record_panic_msg",
1417 .data
= &sysctl_record_panic_msg
,
1418 .maxlen
= sizeof(int),
1420 .proc_handler
= proc_dointvec_minmax
,
1421 .extra1
= SYSCTL_ZERO
,
1422 .extra2
= SYSCTL_ONE
1427 static struct ctl_table hv_root_table
[] = {
1429 .procname
= "kernel",
1431 .child
= hv_ctl_table
1437 * vmbus_bus_init -Main vmbus driver initialization routine.
1440 * - initialize the vmbus driver context
1441 * - invoke the vmbus hv main init routine
1442 * - retrieve the channel offers
1444 static int vmbus_bus_init(void)
1450 pr_err("Unable to initialize the hypervisor - 0x%x\n", ret
);
1454 ret
= bus_register(&hv_bus
);
1458 ret
= hv_setup_vmbus_irq(vmbus_irq
, vmbus_isr
);
1462 ret
= hv_synic_alloc();
1467 * Initialize the per-cpu interrupt state and stimer state.
1468 * Then connect to the host.
1470 ret
= cpuhp_setup_state(CPUHP_AP_ONLINE_DYN
, "hyperv/vmbus:online",
1471 hv_synic_init
, hv_synic_cleanup
);
1474 hyperv_cpuhp_online
= ret
;
1476 ret
= vmbus_connect();
1481 * Only register if the crash MSRs are available
1483 if (ms_hyperv
.misc_features
& HV_FEATURE_GUEST_CRASH_MSR_AVAILABLE
) {
1484 u64 hyperv_crash_ctl
;
1486 * Sysctl registration is not fatal, since by default
1487 * reporting is enabled.
1489 hv_ctl_table_hdr
= register_sysctl_table(hv_root_table
);
1490 if (!hv_ctl_table_hdr
)
1491 pr_err("Hyper-V: sysctl table register error");
1494 * Register for panic kmsg callback only if the right
1495 * capability is supported by the hypervisor.
1497 hv_get_crash_ctl(hyperv_crash_ctl
);
1498 if (hyperv_crash_ctl
& HV_CRASH_CTL_CRASH_NOTIFY_MSG
)
1499 hv_kmsg_dump_register();
1501 register_die_notifier(&hyperv_die_block
);
1505 * Always register the panic notifier because we need to unload
1506 * the VMbus channel connection to prevent any VMbus
1507 * activity after the VM panics.
1509 atomic_notifier_chain_register(&panic_notifier_list
,
1510 &hyperv_panic_block
);
1512 vmbus_request_offers();
1517 cpuhp_remove_state(hyperv_cpuhp_online
);
1521 hv_remove_vmbus_irq();
1523 bus_unregister(&hv_bus
);
1524 unregister_sysctl_table(hv_ctl_table_hdr
);
1525 hv_ctl_table_hdr
= NULL
;
1530 * __vmbus_child_driver_register() - Register a vmbus's driver
1531 * @hv_driver: Pointer to driver structure you want to register
1532 * @owner: owner module of the drv
1533 * @mod_name: module name string
1535 * Registers the given driver with Linux through the 'driver_register()' call
1536 * and sets up the hyper-v vmbus handling for this driver.
1537 * It will return the state of the 'driver_register()' call.
1540 int __vmbus_driver_register(struct hv_driver
*hv_driver
, struct module
*owner
, const char *mod_name
)
1544 pr_info("registering driver %s\n", hv_driver
->name
);
1546 ret
= vmbus_exists();
1550 hv_driver
->driver
.name
= hv_driver
->name
;
1551 hv_driver
->driver
.owner
= owner
;
1552 hv_driver
->driver
.mod_name
= mod_name
;
1553 hv_driver
->driver
.bus
= &hv_bus
;
1555 spin_lock_init(&hv_driver
->dynids
.lock
);
1556 INIT_LIST_HEAD(&hv_driver
->dynids
.list
);
1558 ret
= driver_register(&hv_driver
->driver
);
1562 EXPORT_SYMBOL_GPL(__vmbus_driver_register
);
1565 * vmbus_driver_unregister() - Unregister a vmbus's driver
1566 * @hv_driver: Pointer to driver structure you want to
1569 * Un-register the given driver that was previous registered with a call to
1570 * vmbus_driver_register()
1572 void vmbus_driver_unregister(struct hv_driver
*hv_driver
)
1574 pr_info("unregistering driver %s\n", hv_driver
->name
);
1576 if (!vmbus_exists()) {
1577 driver_unregister(&hv_driver
->driver
);
1578 vmbus_free_dynids(hv_driver
);
1581 EXPORT_SYMBOL_GPL(vmbus_driver_unregister
);
1585 * Called when last reference to channel is gone.
1587 static void vmbus_chan_release(struct kobject
*kobj
)
1589 struct vmbus_channel
*channel
1590 = container_of(kobj
, struct vmbus_channel
, kobj
);
1592 kfree_rcu(channel
, rcu
);
1595 struct vmbus_chan_attribute
{
1596 struct attribute attr
;
1597 ssize_t (*show
)(struct vmbus_channel
*chan
, char *buf
);
1598 ssize_t (*store
)(struct vmbus_channel
*chan
,
1599 const char *buf
, size_t count
);
1601 #define VMBUS_CHAN_ATTR(_name, _mode, _show, _store) \
1602 struct vmbus_chan_attribute chan_attr_##_name \
1603 = __ATTR(_name, _mode, _show, _store)
1604 #define VMBUS_CHAN_ATTR_RW(_name) \
1605 struct vmbus_chan_attribute chan_attr_##_name = __ATTR_RW(_name)
1606 #define VMBUS_CHAN_ATTR_RO(_name) \
1607 struct vmbus_chan_attribute chan_attr_##_name = __ATTR_RO(_name)
1608 #define VMBUS_CHAN_ATTR_WO(_name) \
1609 struct vmbus_chan_attribute chan_attr_##_name = __ATTR_WO(_name)
1611 static ssize_t
vmbus_chan_attr_show(struct kobject
*kobj
,
1612 struct attribute
*attr
, char *buf
)
1614 const struct vmbus_chan_attribute
*attribute
1615 = container_of(attr
, struct vmbus_chan_attribute
, attr
);
1616 struct vmbus_channel
*chan
1617 = container_of(kobj
, struct vmbus_channel
, kobj
);
1619 if (!attribute
->show
)
1622 return attribute
->show(chan
, buf
);
1625 static ssize_t
vmbus_chan_attr_store(struct kobject
*kobj
,
1626 struct attribute
*attr
, const char *buf
,
1629 const struct vmbus_chan_attribute
*attribute
1630 = container_of(attr
, struct vmbus_chan_attribute
, attr
);
1631 struct vmbus_channel
*chan
1632 = container_of(kobj
, struct vmbus_channel
, kobj
);
1634 if (!attribute
->store
)
1637 return attribute
->store(chan
, buf
, count
);
1640 static const struct sysfs_ops vmbus_chan_sysfs_ops
= {
1641 .show
= vmbus_chan_attr_show
,
1642 .store
= vmbus_chan_attr_store
,
1645 static ssize_t
out_mask_show(struct vmbus_channel
*channel
, char *buf
)
1647 struct hv_ring_buffer_info
*rbi
= &channel
->outbound
;
1650 mutex_lock(&rbi
->ring_buffer_mutex
);
1651 if (!rbi
->ring_buffer
) {
1652 mutex_unlock(&rbi
->ring_buffer_mutex
);
1656 ret
= sprintf(buf
, "%u\n", rbi
->ring_buffer
->interrupt_mask
);
1657 mutex_unlock(&rbi
->ring_buffer_mutex
);
1660 static VMBUS_CHAN_ATTR_RO(out_mask
);
1662 static ssize_t
in_mask_show(struct vmbus_channel
*channel
, char *buf
)
1664 struct hv_ring_buffer_info
*rbi
= &channel
->inbound
;
1667 mutex_lock(&rbi
->ring_buffer_mutex
);
1668 if (!rbi
->ring_buffer
) {
1669 mutex_unlock(&rbi
->ring_buffer_mutex
);
1673 ret
= sprintf(buf
, "%u\n", rbi
->ring_buffer
->interrupt_mask
);
1674 mutex_unlock(&rbi
->ring_buffer_mutex
);
1677 static VMBUS_CHAN_ATTR_RO(in_mask
);
1679 static ssize_t
read_avail_show(struct vmbus_channel
*channel
, char *buf
)
1681 struct hv_ring_buffer_info
*rbi
= &channel
->inbound
;
1684 mutex_lock(&rbi
->ring_buffer_mutex
);
1685 if (!rbi
->ring_buffer
) {
1686 mutex_unlock(&rbi
->ring_buffer_mutex
);
1690 ret
= sprintf(buf
, "%u\n", hv_get_bytes_to_read(rbi
));
1691 mutex_unlock(&rbi
->ring_buffer_mutex
);
1694 static VMBUS_CHAN_ATTR_RO(read_avail
);
1696 static ssize_t
write_avail_show(struct vmbus_channel
*channel
, char *buf
)
1698 struct hv_ring_buffer_info
*rbi
= &channel
->outbound
;
1701 mutex_lock(&rbi
->ring_buffer_mutex
);
1702 if (!rbi
->ring_buffer
) {
1703 mutex_unlock(&rbi
->ring_buffer_mutex
);
1707 ret
= sprintf(buf
, "%u\n", hv_get_bytes_to_write(rbi
));
1708 mutex_unlock(&rbi
->ring_buffer_mutex
);
1711 static VMBUS_CHAN_ATTR_RO(write_avail
);
1713 static ssize_t
target_cpu_show(struct vmbus_channel
*channel
, char *buf
)
1715 return sprintf(buf
, "%u\n", channel
->target_cpu
);
1717 static ssize_t
target_cpu_store(struct vmbus_channel
*channel
,
1718 const char *buf
, size_t count
)
1720 u32 target_cpu
, origin_cpu
;
1721 ssize_t ret
= count
;
1723 if (vmbus_proto_version
< VERSION_WIN10_V4_1
)
1726 if (sscanf(buf
, "%uu", &target_cpu
) != 1)
1729 /* Validate target_cpu for the cpumask_test_cpu() operation below. */
1730 if (target_cpu
>= nr_cpumask_bits
)
1733 /* No CPUs should come up or down during this. */
1736 if (!cpu_online(target_cpu
)) {
1742 * Synchronizes target_cpu_store() and channel closure:
1744 * { Initially: state = CHANNEL_OPENED }
1748 * [target_cpu_store()] [vmbus_disconnect_ring()]
1750 * LOCK channel_mutex LOCK channel_mutex
1751 * LOAD r1 = state LOAD r2 = state
1752 * IF (r1 == CHANNEL_OPENED) IF (r2 == CHANNEL_OPENED)
1753 * SEND MODIFYCHANNEL STORE state = CHANNEL_OPEN
1754 * [...] SEND CLOSECHANNEL
1755 * UNLOCK channel_mutex UNLOCK channel_mutex
1757 * Forbids: r1 == r2 == CHANNEL_OPENED (i.e., CPU1's LOCK precedes
1758 * CPU2's LOCK) && CPU2's SEND precedes CPU1's SEND
1760 * Note. The host processes the channel messages "sequentially", in
1761 * the order in which they are received on a per-partition basis.
1763 mutex_lock(&vmbus_connection
.channel_mutex
);
1766 * Hyper-V will ignore MODIFYCHANNEL messages for "non-open" channels;
1767 * avoid sending the message and fail here for such channels.
1769 if (channel
->state
!= CHANNEL_OPENED_STATE
) {
1771 goto cpu_store_unlock
;
1774 origin_cpu
= channel
->target_cpu
;
1775 if (target_cpu
== origin_cpu
)
1776 goto cpu_store_unlock
;
1778 if (vmbus_send_modifychannel(channel
->offermsg
.child_relid
,
1779 hv_cpu_number_to_vp_number(target_cpu
))) {
1781 goto cpu_store_unlock
;
1785 * Warning. At this point, there is *no* guarantee that the host will
1786 * have successfully processed the vmbus_send_modifychannel() request.
1787 * See the header comment of vmbus_send_modifychannel() for more info.
1789 * Lags in the processing of the above vmbus_send_modifychannel() can
1790 * result in missed interrupts if the "old" target CPU is taken offline
1791 * before Hyper-V starts sending interrupts to the "new" target CPU.
1792 * But apart from this offlining scenario, the code tolerates such
1793 * lags. It will function correctly even if a channel interrupt comes
1794 * in on a CPU that is different from the channel target_cpu value.
1797 channel
->target_cpu
= target_cpu
;
1799 /* See init_vp_index(). */
1800 if (hv_is_perf_channel(channel
))
1801 hv_update_alloced_cpus(origin_cpu
, target_cpu
);
1803 /* Currently set only for storvsc channels. */
1804 if (channel
->change_target_cpu_callback
) {
1805 (*channel
->change_target_cpu_callback
)(channel
,
1806 origin_cpu
, target_cpu
);
1810 mutex_unlock(&vmbus_connection
.channel_mutex
);
1814 static VMBUS_CHAN_ATTR(cpu
, 0644, target_cpu_show
, target_cpu_store
);
1816 static ssize_t
channel_pending_show(struct vmbus_channel
*channel
,
1819 return sprintf(buf
, "%d\n",
1820 channel_pending(channel
,
1821 vmbus_connection
.monitor_pages
[1]));
1823 static VMBUS_CHAN_ATTR(pending
, 0444, channel_pending_show
, NULL
);
1825 static ssize_t
channel_latency_show(struct vmbus_channel
*channel
,
1828 return sprintf(buf
, "%d\n",
1829 channel_latency(channel
,
1830 vmbus_connection
.monitor_pages
[1]));
1832 static VMBUS_CHAN_ATTR(latency
, 0444, channel_latency_show
, NULL
);
1834 static ssize_t
channel_interrupts_show(struct vmbus_channel
*channel
, char *buf
)
1836 return sprintf(buf
, "%llu\n", channel
->interrupts
);
1838 static VMBUS_CHAN_ATTR(interrupts
, 0444, channel_interrupts_show
, NULL
);
1840 static ssize_t
channel_events_show(struct vmbus_channel
*channel
, char *buf
)
1842 return sprintf(buf
, "%llu\n", channel
->sig_events
);
1844 static VMBUS_CHAN_ATTR(events
, 0444, channel_events_show
, NULL
);
1846 static ssize_t
channel_intr_in_full_show(struct vmbus_channel
*channel
,
1849 return sprintf(buf
, "%llu\n",
1850 (unsigned long long)channel
->intr_in_full
);
1852 static VMBUS_CHAN_ATTR(intr_in_full
, 0444, channel_intr_in_full_show
, NULL
);
1854 static ssize_t
channel_intr_out_empty_show(struct vmbus_channel
*channel
,
1857 return sprintf(buf
, "%llu\n",
1858 (unsigned long long)channel
->intr_out_empty
);
1860 static VMBUS_CHAN_ATTR(intr_out_empty
, 0444, channel_intr_out_empty_show
, NULL
);
1862 static ssize_t
channel_out_full_first_show(struct vmbus_channel
*channel
,
1865 return sprintf(buf
, "%llu\n",
1866 (unsigned long long)channel
->out_full_first
);
1868 static VMBUS_CHAN_ATTR(out_full_first
, 0444, channel_out_full_first_show
, NULL
);
1870 static ssize_t
channel_out_full_total_show(struct vmbus_channel
*channel
,
1873 return sprintf(buf
, "%llu\n",
1874 (unsigned long long)channel
->out_full_total
);
1876 static VMBUS_CHAN_ATTR(out_full_total
, 0444, channel_out_full_total_show
, NULL
);
1878 static ssize_t
subchannel_monitor_id_show(struct vmbus_channel
*channel
,
1881 return sprintf(buf
, "%u\n", channel
->offermsg
.monitorid
);
1883 static VMBUS_CHAN_ATTR(monitor_id
, 0444, subchannel_monitor_id_show
, NULL
);
1885 static ssize_t
subchannel_id_show(struct vmbus_channel
*channel
,
1888 return sprintf(buf
, "%u\n",
1889 channel
->offermsg
.offer
.sub_channel_index
);
1891 static VMBUS_CHAN_ATTR_RO(subchannel_id
);
1893 static struct attribute
*vmbus_chan_attrs
[] = {
1894 &chan_attr_out_mask
.attr
,
1895 &chan_attr_in_mask
.attr
,
1896 &chan_attr_read_avail
.attr
,
1897 &chan_attr_write_avail
.attr
,
1898 &chan_attr_cpu
.attr
,
1899 &chan_attr_pending
.attr
,
1900 &chan_attr_latency
.attr
,
1901 &chan_attr_interrupts
.attr
,
1902 &chan_attr_events
.attr
,
1903 &chan_attr_intr_in_full
.attr
,
1904 &chan_attr_intr_out_empty
.attr
,
1905 &chan_attr_out_full_first
.attr
,
1906 &chan_attr_out_full_total
.attr
,
1907 &chan_attr_monitor_id
.attr
,
1908 &chan_attr_subchannel_id
.attr
,
1913 * Channel-level attribute_group callback function. Returns the permission for
1914 * each attribute, and returns 0 if an attribute is not visible.
1916 static umode_t
vmbus_chan_attr_is_visible(struct kobject
*kobj
,
1917 struct attribute
*attr
, int idx
)
1919 const struct vmbus_channel
*channel
=
1920 container_of(kobj
, struct vmbus_channel
, kobj
);
1922 /* Hide the monitor attributes if the monitor mechanism is not used. */
1923 if (!channel
->offermsg
.monitor_allocated
&&
1924 (attr
== &chan_attr_pending
.attr
||
1925 attr
== &chan_attr_latency
.attr
||
1926 attr
== &chan_attr_monitor_id
.attr
))
1932 static struct attribute_group vmbus_chan_group
= {
1933 .attrs
= vmbus_chan_attrs
,
1934 .is_visible
= vmbus_chan_attr_is_visible
1937 static struct kobj_type vmbus_chan_ktype
= {
1938 .sysfs_ops
= &vmbus_chan_sysfs_ops
,
1939 .release
= vmbus_chan_release
,
1943 * vmbus_add_channel_kobj - setup a sub-directory under device/channels
1945 int vmbus_add_channel_kobj(struct hv_device
*dev
, struct vmbus_channel
*channel
)
1947 const struct device
*device
= &dev
->device
;
1948 struct kobject
*kobj
= &channel
->kobj
;
1949 u32 relid
= channel
->offermsg
.child_relid
;
1952 kobj
->kset
= dev
->channels_kset
;
1953 ret
= kobject_init_and_add(kobj
, &vmbus_chan_ktype
, NULL
,
1958 ret
= sysfs_create_group(kobj
, &vmbus_chan_group
);
1962 * The calling functions' error handling paths will cleanup the
1963 * empty channel directory.
1965 dev_err(device
, "Unable to set up channel sysfs files\n");
1969 kobject_uevent(kobj
, KOBJ_ADD
);
1975 * vmbus_remove_channel_attr_group - remove the channel's attribute group
1977 void vmbus_remove_channel_attr_group(struct vmbus_channel
*channel
)
1979 sysfs_remove_group(&channel
->kobj
, &vmbus_chan_group
);
1983 * vmbus_device_create - Creates and registers a new child device
1986 struct hv_device
*vmbus_device_create(const guid_t
*type
,
1987 const guid_t
*instance
,
1988 struct vmbus_channel
*channel
)
1990 struct hv_device
*child_device_obj
;
1992 child_device_obj
= kzalloc(sizeof(struct hv_device
), GFP_KERNEL
);
1993 if (!child_device_obj
) {
1994 pr_err("Unable to allocate device object for child device\n");
1998 child_device_obj
->channel
= channel
;
1999 guid_copy(&child_device_obj
->dev_type
, type
);
2000 guid_copy(&child_device_obj
->dev_instance
, instance
);
2001 child_device_obj
->vendor_id
= 0x1414; /* MSFT vendor ID */
2003 return child_device_obj
;
2007 * vmbus_device_register - Register the child device
2009 int vmbus_device_register(struct hv_device
*child_device_obj
)
2011 struct kobject
*kobj
= &child_device_obj
->device
.kobj
;
2014 dev_set_name(&child_device_obj
->device
, "%pUl",
2015 &child_device_obj
->channel
->offermsg
.offer
.if_instance
);
2017 child_device_obj
->device
.bus
= &hv_bus
;
2018 child_device_obj
->device
.parent
= &hv_acpi_dev
->dev
;
2019 child_device_obj
->device
.release
= vmbus_device_release
;
2022 * Register with the LDM. This will kick off the driver/device
2023 * binding...which will eventually call vmbus_match() and vmbus_probe()
2025 ret
= device_register(&child_device_obj
->device
);
2027 pr_err("Unable to register child device\n");
2031 child_device_obj
->channels_kset
= kset_create_and_add("channels",
2033 if (!child_device_obj
->channels_kset
) {
2035 goto err_dev_unregister
;
2038 ret
= vmbus_add_channel_kobj(child_device_obj
,
2039 child_device_obj
->channel
);
2041 pr_err("Unable to register primary channeln");
2042 goto err_kset_unregister
;
2044 hv_debug_add_dev_dir(child_device_obj
);
2048 err_kset_unregister
:
2049 kset_unregister(child_device_obj
->channels_kset
);
2052 device_unregister(&child_device_obj
->device
);
2057 * vmbus_device_unregister - Remove the specified child device
2060 void vmbus_device_unregister(struct hv_device
*device_obj
)
2062 pr_debug("child device %s unregistered\n",
2063 dev_name(&device_obj
->device
));
2065 kset_unregister(device_obj
->channels_kset
);
2068 * Kick off the process of unregistering the device.
2069 * This will call vmbus_remove() and eventually vmbus_device_release()
2071 device_unregister(&device_obj
->device
);
2076 * VMBUS is an acpi enumerated device. Get the information we
2079 #define VTPM_BASE_ADDRESS 0xfed40000
2080 static acpi_status
vmbus_walk_resources(struct acpi_resource
*res
, void *ctx
)
2082 resource_size_t start
= 0;
2083 resource_size_t end
= 0;
2084 struct resource
*new_res
;
2085 struct resource
**old_res
= &hyperv_mmio
;
2086 struct resource
**prev_res
= NULL
;
2089 switch (res
->type
) {
2092 * "Address" descriptors are for bus windows. Ignore
2093 * "memory" descriptors, which are for registers on
2096 case ACPI_RESOURCE_TYPE_ADDRESS32
:
2097 start
= res
->data
.address32
.address
.minimum
;
2098 end
= res
->data
.address32
.address
.maximum
;
2101 case ACPI_RESOURCE_TYPE_ADDRESS64
:
2102 start
= res
->data
.address64
.address
.minimum
;
2103 end
= res
->data
.address64
.address
.maximum
;
2107 * The IRQ information is needed only on ARM64, which Hyper-V
2108 * sets up in the extended format. IRQ information is present
2109 * on x86/x64 in the non-extended format but it is not used by
2110 * Linux. So don't bother checking for the non-extended format.
2112 case ACPI_RESOURCE_TYPE_EXTENDED_IRQ
:
2113 if (!acpi_dev_resource_interrupt(res
, 0, &r
)) {
2114 pr_err("Unable to parse Hyper-V ACPI interrupt\n");
2117 /* ARM64 INTID for VMbus */
2118 vmbus_interrupt
= res
->data
.extended_irq
.interrupts
[0];
2119 /* Linux IRQ number */
2120 vmbus_irq
= r
.start
;
2124 /* Unused resource type */
2129 * Ignore ranges that are below 1MB, as they're not
2130 * necessary or useful here.
2135 new_res
= kzalloc(sizeof(*new_res
), GFP_ATOMIC
);
2137 return AE_NO_MEMORY
;
2139 /* If this range overlaps the virtual TPM, truncate it. */
2140 if (end
> VTPM_BASE_ADDRESS
&& start
< VTPM_BASE_ADDRESS
)
2141 end
= VTPM_BASE_ADDRESS
;
2143 new_res
->name
= "hyperv mmio";
2144 new_res
->flags
= IORESOURCE_MEM
;
2145 new_res
->start
= start
;
2149 * If two ranges are adjacent, merge them.
2157 if (((*old_res
)->end
+ 1) == new_res
->start
) {
2158 (*old_res
)->end
= new_res
->end
;
2163 if ((*old_res
)->start
== new_res
->end
+ 1) {
2164 (*old_res
)->start
= new_res
->start
;
2169 if ((*old_res
)->start
> new_res
->end
) {
2170 new_res
->sibling
= *old_res
;
2172 (*prev_res
)->sibling
= new_res
;
2178 old_res
= &(*old_res
)->sibling
;
2185 static int vmbus_acpi_remove(struct acpi_device
*device
)
2187 struct resource
*cur_res
;
2188 struct resource
*next_res
;
2192 __release_region(hyperv_mmio
, fb_mmio
->start
,
2193 resource_size(fb_mmio
));
2197 for (cur_res
= hyperv_mmio
; cur_res
; cur_res
= next_res
) {
2198 next_res
= cur_res
->sibling
;
2206 static void vmbus_reserve_fb(void)
2210 * Make a claim for the frame buffer in the resource tree under the
2211 * first node, which will be the one below 4GB. The length seems to
2212 * be underreported, particularly in a Generation 1 VM. So start out
2213 * reserving a larger area and make it smaller until it succeeds.
2216 if (screen_info
.lfb_base
) {
2217 if (efi_enabled(EFI_BOOT
))
2218 size
= max_t(__u32
, screen_info
.lfb_size
, 0x800000);
2220 size
= max_t(__u32
, screen_info
.lfb_size
, 0x4000000);
2222 for (; !fb_mmio
&& (size
>= 0x100000); size
>>= 1) {
2223 fb_mmio
= __request_region(hyperv_mmio
,
2224 screen_info
.lfb_base
, size
,
2231 * vmbus_allocate_mmio() - Pick a memory-mapped I/O range.
2232 * @new: If successful, supplied a pointer to the
2233 * allocated MMIO space.
2234 * @device_obj: Identifies the caller
2235 * @min: Minimum guest physical address of the
2237 * @max: Maximum guest physical address
2238 * @size: Size of the range to be allocated
2239 * @align: Alignment of the range to be allocated
2240 * @fb_overlap_ok: Whether this allocation can be allowed
2241 * to overlap the video frame buffer.
2243 * This function walks the resources granted to VMBus by the
2244 * _CRS object in the ACPI namespace underneath the parent
2245 * "bridge" whether that's a root PCI bus in the Generation 1
2246 * case or a Module Device in the Generation 2 case. It then
2247 * attempts to allocate from the global MMIO pool in a way that
2248 * matches the constraints supplied in these parameters and by
2251 * Return: 0 on success, -errno on failure
2253 int vmbus_allocate_mmio(struct resource
**new, struct hv_device
*device_obj
,
2254 resource_size_t min
, resource_size_t max
,
2255 resource_size_t size
, resource_size_t align
,
2258 struct resource
*iter
, *shadow
;
2259 resource_size_t range_min
, range_max
, start
;
2260 const char *dev_n
= dev_name(&device_obj
->device
);
2264 mutex_lock(&hyperv_mmio_lock
);
2267 * If overlaps with frame buffers are allowed, then first attempt to
2268 * make the allocation from within the reserved region. Because it
2269 * is already reserved, no shadow allocation is necessary.
2271 if (fb_overlap_ok
&& fb_mmio
&& !(min
> fb_mmio
->end
) &&
2272 !(max
< fb_mmio
->start
)) {
2274 range_min
= fb_mmio
->start
;
2275 range_max
= fb_mmio
->end
;
2276 start
= (range_min
+ align
- 1) & ~(align
- 1);
2277 for (; start
+ size
- 1 <= range_max
; start
+= align
) {
2278 *new = request_mem_region_exclusive(start
, size
, dev_n
);
2286 for (iter
= hyperv_mmio
; iter
; iter
= iter
->sibling
) {
2287 if ((iter
->start
>= max
) || (iter
->end
<= min
))
2290 range_min
= iter
->start
;
2291 range_max
= iter
->end
;
2292 start
= (range_min
+ align
- 1) & ~(align
- 1);
2293 for (; start
+ size
- 1 <= range_max
; start
+= align
) {
2294 shadow
= __request_region(iter
, start
, size
, NULL
,
2299 *new = request_mem_region_exclusive(start
, size
, dev_n
);
2301 shadow
->name
= (char *)*new;
2306 __release_region(iter
, start
, size
);
2311 mutex_unlock(&hyperv_mmio_lock
);
2314 EXPORT_SYMBOL_GPL(vmbus_allocate_mmio
);
2317 * vmbus_free_mmio() - Free a memory-mapped I/O range.
2318 * @start: Base address of region to release.
2319 * @size: Size of the range to be allocated
2321 * This function releases anything requested by
2322 * vmbus_mmio_allocate().
2324 void vmbus_free_mmio(resource_size_t start
, resource_size_t size
)
2326 struct resource
*iter
;
2328 mutex_lock(&hyperv_mmio_lock
);
2329 for (iter
= hyperv_mmio
; iter
; iter
= iter
->sibling
) {
2330 if ((iter
->start
>= start
+ size
) || (iter
->end
<= start
))
2333 __release_region(iter
, start
, size
);
2335 release_mem_region(start
, size
);
2336 mutex_unlock(&hyperv_mmio_lock
);
2339 EXPORT_SYMBOL_GPL(vmbus_free_mmio
);
2341 static int vmbus_acpi_add(struct acpi_device
*device
)
2344 int ret_val
= -ENODEV
;
2345 struct acpi_device
*ancestor
;
2347 hv_acpi_dev
= device
;
2349 result
= acpi_walk_resources(device
->handle
, METHOD_NAME__CRS
,
2350 vmbus_walk_resources
, NULL
);
2352 if (ACPI_FAILURE(result
))
2355 * Some ancestor of the vmbus acpi device (Gen1 or Gen2
2356 * firmware) is the VMOD that has the mmio ranges. Get that.
2358 for (ancestor
= device
->parent
; ancestor
; ancestor
= ancestor
->parent
) {
2359 result
= acpi_walk_resources(ancestor
->handle
, METHOD_NAME__CRS
,
2360 vmbus_walk_resources
, NULL
);
2362 if (ACPI_FAILURE(result
))
2372 complete(&probe_event
);
2374 vmbus_acpi_remove(device
);
2378 #ifdef CONFIG_PM_SLEEP
2379 static int vmbus_bus_suspend(struct device
*dev
)
2381 struct vmbus_channel
*channel
, *sc
;
2383 while (atomic_read(&vmbus_connection
.offer_in_progress
) != 0) {
2385 * We wait here until the completion of any channel
2386 * offers that are currently in progress.
2388 usleep_range(1000, 2000);
2391 mutex_lock(&vmbus_connection
.channel_mutex
);
2392 list_for_each_entry(channel
, &vmbus_connection
.chn_list
, listentry
) {
2393 if (!is_hvsock_channel(channel
))
2396 vmbus_force_channel_rescinded(channel
);
2398 mutex_unlock(&vmbus_connection
.channel_mutex
);
2401 * Wait until all the sub-channels and hv_sock channels have been
2402 * cleaned up. Sub-channels should be destroyed upon suspend, otherwise
2403 * they would conflict with the new sub-channels that will be created
2404 * in the resume path. hv_sock channels should also be destroyed, but
2405 * a hv_sock channel of an established hv_sock connection can not be
2406 * really destroyed since it may still be referenced by the userspace
2407 * application, so we just force the hv_sock channel to be rescinded
2408 * by vmbus_force_channel_rescinded(), and the userspace application
2409 * will thoroughly destroy the channel after hibernation.
2411 * Note: the counter nr_chan_close_on_suspend may never go above 0 if
2412 * the VM has no sub-channel and hv_sock channel, e.g. a 1-vCPU VM.
2414 if (atomic_read(&vmbus_connection
.nr_chan_close_on_suspend
) > 0)
2415 wait_for_completion(&vmbus_connection
.ready_for_suspend_event
);
2417 if (atomic_read(&vmbus_connection
.nr_chan_fixup_on_resume
) != 0) {
2418 pr_err("Can not suspend due to a previous failed resuming\n");
2422 mutex_lock(&vmbus_connection
.channel_mutex
);
2424 list_for_each_entry(channel
, &vmbus_connection
.chn_list
, listentry
) {
2426 * Remove the channel from the array of channels and invalidate
2427 * the channel's relid. Upon resume, vmbus_onoffer() will fix
2428 * up the relid (and other fields, if necessary) and add the
2429 * channel back to the array.
2431 vmbus_channel_unmap_relid(channel
);
2432 channel
->offermsg
.child_relid
= INVALID_RELID
;
2434 if (is_hvsock_channel(channel
)) {
2435 if (!channel
->rescind
) {
2436 pr_err("hv_sock channel not rescinded!\n");
2442 list_for_each_entry(sc
, &channel
->sc_list
, sc_list
) {
2443 pr_err("Sub-channel not deleted!\n");
2447 atomic_inc(&vmbus_connection
.nr_chan_fixup_on_resume
);
2450 mutex_unlock(&vmbus_connection
.channel_mutex
);
2452 vmbus_initiate_unload(false);
2454 /* Reset the event for the next resume. */
2455 reinit_completion(&vmbus_connection
.ready_for_resume_event
);
2460 static int vmbus_bus_resume(struct device
*dev
)
2462 struct vmbus_channel_msginfo
*msginfo
;
2467 * We only use the 'vmbus_proto_version', which was in use before
2468 * hibernation, to re-negotiate with the host.
2470 if (!vmbus_proto_version
) {
2471 pr_err("Invalid proto version = 0x%x\n", vmbus_proto_version
);
2475 msgsize
= sizeof(*msginfo
) +
2476 sizeof(struct vmbus_channel_initiate_contact
);
2478 msginfo
= kzalloc(msgsize
, GFP_KERNEL
);
2480 if (msginfo
== NULL
)
2483 ret
= vmbus_negotiate_version(msginfo
, vmbus_proto_version
);
2490 WARN_ON(atomic_read(&vmbus_connection
.nr_chan_fixup_on_resume
) == 0);
2492 vmbus_request_offers();
2494 if (wait_for_completion_timeout(
2495 &vmbus_connection
.ready_for_resume_event
, 10 * HZ
) == 0)
2496 pr_err("Some vmbus device is missing after suspending?\n");
2498 /* Reset the event for the next suspend. */
2499 reinit_completion(&vmbus_connection
.ready_for_suspend_event
);
2504 #define vmbus_bus_suspend NULL
2505 #define vmbus_bus_resume NULL
2506 #endif /* CONFIG_PM_SLEEP */
2508 static const struct acpi_device_id vmbus_acpi_device_ids
[] = {
2513 MODULE_DEVICE_TABLE(acpi
, vmbus_acpi_device_ids
);
2516 * Note: we must use the "no_irq" ops, otherwise hibernation can not work with
2517 * PCI device assignment, because "pci_dev_pm_ops" uses the "noirq" ops: in
2518 * the resume path, the pci "noirq" restore op runs before "non-noirq" op (see
2519 * resume_target_kernel() -> dpm_resume_start(), and hibernation_restore() ->
2520 * dpm_resume_end()). This means vmbus_bus_resume() and the pci-hyperv's
2521 * resume callback must also run via the "noirq" ops.
2523 * Set suspend_noirq/resume_noirq to NULL for Suspend-to-Idle: see the comment
2524 * earlier in this file before vmbus_pm.
2527 static const struct dev_pm_ops vmbus_bus_pm
= {
2528 .suspend_noirq
= NULL
,
2529 .resume_noirq
= NULL
,
2530 .freeze_noirq
= vmbus_bus_suspend
,
2531 .thaw_noirq
= vmbus_bus_resume
,
2532 .poweroff_noirq
= vmbus_bus_suspend
,
2533 .restore_noirq
= vmbus_bus_resume
2536 static struct acpi_driver vmbus_acpi_driver
= {
2538 .ids
= vmbus_acpi_device_ids
,
2540 .add
= vmbus_acpi_add
,
2541 .remove
= vmbus_acpi_remove
,
2543 .drv
.pm
= &vmbus_bus_pm
,
2546 static void hv_kexec_handler(void)
2548 hv_stimer_global_cleanup();
2549 vmbus_initiate_unload(false);
2550 /* Make sure conn_state is set as hv_synic_cleanup checks for it */
2552 cpuhp_remove_state(hyperv_cpuhp_online
);
2556 static void hv_crash_handler(struct pt_regs
*regs
)
2560 vmbus_initiate_unload(true);
2562 * In crash handler we can't schedule synic cleanup for all CPUs,
2563 * doing the cleanup for current CPU only. This should be sufficient
2566 cpu
= smp_processor_id();
2567 hv_stimer_cleanup(cpu
);
2568 hv_synic_disable_regs(cpu
);
2572 static int hv_synic_suspend(void)
2575 * When we reach here, all the non-boot CPUs have been offlined.
2576 * If we're in a legacy configuration where stimer Direct Mode is
2577 * not enabled, the stimers on the non-boot CPUs have been unbound
2578 * in hv_synic_cleanup() -> hv_stimer_legacy_cleanup() ->
2579 * hv_stimer_cleanup() -> clockevents_unbind_device().
2581 * hv_synic_suspend() only runs on CPU0 with interrupts disabled.
2582 * Here we do not call hv_stimer_legacy_cleanup() on CPU0 because:
2583 * 1) it's unnecessary as interrupts remain disabled between
2584 * syscore_suspend() and syscore_resume(): see create_image() and
2585 * resume_target_kernel()
2586 * 2) the stimer on CPU0 is automatically disabled later by
2587 * syscore_suspend() -> timekeeping_suspend() -> tick_suspend() -> ...
2588 * -> clockevents_shutdown() -> ... -> hv_ce_shutdown()
2589 * 3) a warning would be triggered if we call
2590 * clockevents_unbind_device(), which may sleep, in an
2591 * interrupts-disabled context.
2594 hv_synic_disable_regs(0);
2599 static void hv_synic_resume(void)
2601 hv_synic_enable_regs(0);
2604 * Note: we don't need to call hv_stimer_init(0), because the timer
2605 * on CPU0 is not unbound in hv_synic_suspend(), and the timer is
2606 * automatically re-enabled in timekeeping_resume().
2610 /* The callbacks run only on CPU0, with irqs_disabled. */
2611 static struct syscore_ops hv_synic_syscore_ops
= {
2612 .suspend
= hv_synic_suspend
,
2613 .resume
= hv_synic_resume
,
2616 static int __init
hv_acpi_init(void)
2620 if (!hv_is_hyperv_initialized())
2623 init_completion(&probe_event
);
2626 * Get ACPI resources first.
2628 ret
= acpi_bus_register_driver(&vmbus_acpi_driver
);
2633 t
= wait_for_completion_timeout(&probe_event
, 5*HZ
);
2640 ret
= vmbus_bus_init();
2644 hv_setup_kexec_handler(hv_kexec_handler
);
2645 hv_setup_crash_handler(hv_crash_handler
);
2647 register_syscore_ops(&hv_synic_syscore_ops
);
2652 acpi_bus_unregister_driver(&vmbus_acpi_driver
);
2657 static void __exit
vmbus_exit(void)
2661 unregister_syscore_ops(&hv_synic_syscore_ops
);
2663 hv_remove_kexec_handler();
2664 hv_remove_crash_handler();
2665 vmbus_connection
.conn_state
= DISCONNECTED
;
2666 hv_stimer_global_cleanup();
2668 hv_remove_vmbus_irq();
2669 for_each_online_cpu(cpu
) {
2670 struct hv_per_cpu_context
*hv_cpu
2671 = per_cpu_ptr(hv_context
.cpu_context
, cpu
);
2673 tasklet_kill(&hv_cpu
->msg_dpc
);
2675 hv_debug_rm_all_dir();
2677 vmbus_free_channels();
2678 kfree(vmbus_connection
.channels
);
2680 if (ms_hyperv
.misc_features
& HV_FEATURE_GUEST_CRASH_MSR_AVAILABLE
) {
2681 kmsg_dump_unregister(&hv_kmsg_dumper
);
2682 unregister_die_notifier(&hyperv_die_block
);
2683 atomic_notifier_chain_unregister(&panic_notifier_list
,
2684 &hyperv_panic_block
);
2687 free_page((unsigned long)hv_panic_page
);
2688 unregister_sysctl_table(hv_ctl_table_hdr
);
2689 hv_ctl_table_hdr
= NULL
;
2690 bus_unregister(&hv_bus
);
2692 cpuhp_remove_state(hyperv_cpuhp_online
);
2694 acpi_bus_unregister_driver(&vmbus_acpi_driver
);
2698 MODULE_LICENSE("GPL");
2699 MODULE_DESCRIPTION("Microsoft Hyper-V VMBus Driver");
2701 subsys_initcall(hv_acpi_init
);
2702 module_exit(vmbus_exit
);