2 * Copyright (c) 2009, Microsoft Corporation.
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms and conditions of the GNU General Public License,
6 * version 2, as published by the Free Software Foundation.
8 * This program is distributed in the hope it will be useful, but WITHOUT
9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 * You should have received a copy of the GNU General Public License along with
14 * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
15 * Place - Suite 330, Boston, MA 02111-1307 USA.
18 * Haiyang Zhang <haiyangz@microsoft.com>
19 * Hank Janssen <hjanssen@microsoft.com>
20 * K. Y. Srinivasan <kys@microsoft.com>
23 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
25 #include <linux/init.h>
26 #include <linux/module.h>
27 #include <linux/device.h>
28 #include <linux/interrupt.h>
29 #include <linux/sysctl.h>
30 #include <linux/slab.h>
31 #include <linux/acpi.h>
32 #include <linux/completion.h>
33 #include <linux/hyperv.h>
34 #include <linux/kernel_stat.h>
35 #include <linux/clockchips.h>
36 #include <asm/hyperv.h>
37 #include <asm/hypervisor.h>
38 #include <asm/mshyperv.h>
39 #include "hyperv_vmbus.h"
41 static struct acpi_device
*hv_acpi_dev
;
43 static struct tasklet_struct msg_dpc
;
44 static struct completion probe_event
;
47 struct resource hyperv_mmio
= {
48 .name
= "hyperv mmio",
49 .flags
= IORESOURCE_MEM
,
51 EXPORT_SYMBOL_GPL(hyperv_mmio
);
53 static int vmbus_exists(void)
55 if (hv_acpi_dev
== NULL
)
61 #define VMBUS_ALIAS_LEN ((sizeof((struct hv_vmbus_device_id *)0)->guid) * 2)
62 static void print_alias_name(struct hv_device
*hv_dev
, char *alias_name
)
65 for (i
= 0; i
< VMBUS_ALIAS_LEN
; i
+= 2)
66 sprintf(&alias_name
[i
], "%02x", hv_dev
->dev_type
.b
[i
/2]);
69 static u8
channel_monitor_group(struct vmbus_channel
*channel
)
71 return (u8
)channel
->offermsg
.monitorid
/ 32;
74 static u8
channel_monitor_offset(struct vmbus_channel
*channel
)
76 return (u8
)channel
->offermsg
.monitorid
% 32;
79 static u32
channel_pending(struct vmbus_channel
*channel
,
80 struct hv_monitor_page
*monitor_page
)
82 u8 monitor_group
= channel_monitor_group(channel
);
83 return monitor_page
->trigger_group
[monitor_group
].pending
;
86 static u32
channel_latency(struct vmbus_channel
*channel
,
87 struct hv_monitor_page
*monitor_page
)
89 u8 monitor_group
= channel_monitor_group(channel
);
90 u8 monitor_offset
= channel_monitor_offset(channel
);
91 return monitor_page
->latency
[monitor_group
][monitor_offset
];
94 static u32
channel_conn_id(struct vmbus_channel
*channel
,
95 struct hv_monitor_page
*monitor_page
)
97 u8 monitor_group
= channel_monitor_group(channel
);
98 u8 monitor_offset
= channel_monitor_offset(channel
);
99 return monitor_page
->parameter
[monitor_group
][monitor_offset
].connectionid
.u
.id
;
102 static ssize_t
id_show(struct device
*dev
, struct device_attribute
*dev_attr
,
105 struct hv_device
*hv_dev
= device_to_hv_device(dev
);
107 if (!hv_dev
->channel
)
109 return sprintf(buf
, "%d\n", hv_dev
->channel
->offermsg
.child_relid
);
111 static DEVICE_ATTR_RO(id
);
113 static ssize_t
state_show(struct device
*dev
, struct device_attribute
*dev_attr
,
116 struct hv_device
*hv_dev
= device_to_hv_device(dev
);
118 if (!hv_dev
->channel
)
120 return sprintf(buf
, "%d\n", hv_dev
->channel
->state
);
122 static DEVICE_ATTR_RO(state
);
124 static ssize_t
monitor_id_show(struct device
*dev
,
125 struct device_attribute
*dev_attr
, char *buf
)
127 struct hv_device
*hv_dev
= device_to_hv_device(dev
);
129 if (!hv_dev
->channel
)
131 return sprintf(buf
, "%d\n", hv_dev
->channel
->offermsg
.monitorid
);
133 static DEVICE_ATTR_RO(monitor_id
);
135 static ssize_t
class_id_show(struct device
*dev
,
136 struct device_attribute
*dev_attr
, char *buf
)
138 struct hv_device
*hv_dev
= device_to_hv_device(dev
);
140 if (!hv_dev
->channel
)
142 return sprintf(buf
, "{%pUl}\n",
143 hv_dev
->channel
->offermsg
.offer
.if_type
.b
);
145 static DEVICE_ATTR_RO(class_id
);
147 static ssize_t
device_id_show(struct device
*dev
,
148 struct device_attribute
*dev_attr
, char *buf
)
150 struct hv_device
*hv_dev
= device_to_hv_device(dev
);
152 if (!hv_dev
->channel
)
154 return sprintf(buf
, "{%pUl}\n",
155 hv_dev
->channel
->offermsg
.offer
.if_instance
.b
);
157 static DEVICE_ATTR_RO(device_id
);
159 static ssize_t
modalias_show(struct device
*dev
,
160 struct device_attribute
*dev_attr
, char *buf
)
162 struct hv_device
*hv_dev
= device_to_hv_device(dev
);
163 char alias_name
[VMBUS_ALIAS_LEN
+ 1];
165 print_alias_name(hv_dev
, alias_name
);
166 return sprintf(buf
, "vmbus:%s\n", alias_name
);
168 static DEVICE_ATTR_RO(modalias
);
170 static ssize_t
server_monitor_pending_show(struct device
*dev
,
171 struct device_attribute
*dev_attr
,
174 struct hv_device
*hv_dev
= device_to_hv_device(dev
);
176 if (!hv_dev
->channel
)
178 return sprintf(buf
, "%d\n",
179 channel_pending(hv_dev
->channel
,
180 vmbus_connection
.monitor_pages
[1]));
182 static DEVICE_ATTR_RO(server_monitor_pending
);
184 static ssize_t
client_monitor_pending_show(struct device
*dev
,
185 struct device_attribute
*dev_attr
,
188 struct hv_device
*hv_dev
= device_to_hv_device(dev
);
190 if (!hv_dev
->channel
)
192 return sprintf(buf
, "%d\n",
193 channel_pending(hv_dev
->channel
,
194 vmbus_connection
.monitor_pages
[1]));
196 static DEVICE_ATTR_RO(client_monitor_pending
);
198 static ssize_t
server_monitor_latency_show(struct device
*dev
,
199 struct device_attribute
*dev_attr
,
202 struct hv_device
*hv_dev
= device_to_hv_device(dev
);
204 if (!hv_dev
->channel
)
206 return sprintf(buf
, "%d\n",
207 channel_latency(hv_dev
->channel
,
208 vmbus_connection
.monitor_pages
[0]));
210 static DEVICE_ATTR_RO(server_monitor_latency
);
212 static ssize_t
client_monitor_latency_show(struct device
*dev
,
213 struct device_attribute
*dev_attr
,
216 struct hv_device
*hv_dev
= device_to_hv_device(dev
);
218 if (!hv_dev
->channel
)
220 return sprintf(buf
, "%d\n",
221 channel_latency(hv_dev
->channel
,
222 vmbus_connection
.monitor_pages
[1]));
224 static DEVICE_ATTR_RO(client_monitor_latency
);
226 static ssize_t
server_monitor_conn_id_show(struct device
*dev
,
227 struct device_attribute
*dev_attr
,
230 struct hv_device
*hv_dev
= device_to_hv_device(dev
);
232 if (!hv_dev
->channel
)
234 return sprintf(buf
, "%d\n",
235 channel_conn_id(hv_dev
->channel
,
236 vmbus_connection
.monitor_pages
[0]));
238 static DEVICE_ATTR_RO(server_monitor_conn_id
);
240 static ssize_t
client_monitor_conn_id_show(struct device
*dev
,
241 struct device_attribute
*dev_attr
,
244 struct hv_device
*hv_dev
= device_to_hv_device(dev
);
246 if (!hv_dev
->channel
)
248 return sprintf(buf
, "%d\n",
249 channel_conn_id(hv_dev
->channel
,
250 vmbus_connection
.monitor_pages
[1]));
252 static DEVICE_ATTR_RO(client_monitor_conn_id
);
254 static ssize_t
out_intr_mask_show(struct device
*dev
,
255 struct device_attribute
*dev_attr
, char *buf
)
257 struct hv_device
*hv_dev
= device_to_hv_device(dev
);
258 struct hv_ring_buffer_debug_info outbound
;
260 if (!hv_dev
->channel
)
262 hv_ringbuffer_get_debuginfo(&hv_dev
->channel
->outbound
, &outbound
);
263 return sprintf(buf
, "%d\n", outbound
.current_interrupt_mask
);
265 static DEVICE_ATTR_RO(out_intr_mask
);
267 static ssize_t
out_read_index_show(struct device
*dev
,
268 struct device_attribute
*dev_attr
, char *buf
)
270 struct hv_device
*hv_dev
= device_to_hv_device(dev
);
271 struct hv_ring_buffer_debug_info outbound
;
273 if (!hv_dev
->channel
)
275 hv_ringbuffer_get_debuginfo(&hv_dev
->channel
->outbound
, &outbound
);
276 return sprintf(buf
, "%d\n", outbound
.current_read_index
);
278 static DEVICE_ATTR_RO(out_read_index
);
280 static ssize_t
out_write_index_show(struct device
*dev
,
281 struct device_attribute
*dev_attr
,
284 struct hv_device
*hv_dev
= device_to_hv_device(dev
);
285 struct hv_ring_buffer_debug_info outbound
;
287 if (!hv_dev
->channel
)
289 hv_ringbuffer_get_debuginfo(&hv_dev
->channel
->outbound
, &outbound
);
290 return sprintf(buf
, "%d\n", outbound
.current_write_index
);
292 static DEVICE_ATTR_RO(out_write_index
);
294 static ssize_t
out_read_bytes_avail_show(struct device
*dev
,
295 struct device_attribute
*dev_attr
,
298 struct hv_device
*hv_dev
= device_to_hv_device(dev
);
299 struct hv_ring_buffer_debug_info outbound
;
301 if (!hv_dev
->channel
)
303 hv_ringbuffer_get_debuginfo(&hv_dev
->channel
->outbound
, &outbound
);
304 return sprintf(buf
, "%d\n", outbound
.bytes_avail_toread
);
306 static DEVICE_ATTR_RO(out_read_bytes_avail
);
308 static ssize_t
out_write_bytes_avail_show(struct device
*dev
,
309 struct device_attribute
*dev_attr
,
312 struct hv_device
*hv_dev
= device_to_hv_device(dev
);
313 struct hv_ring_buffer_debug_info outbound
;
315 if (!hv_dev
->channel
)
317 hv_ringbuffer_get_debuginfo(&hv_dev
->channel
->outbound
, &outbound
);
318 return sprintf(buf
, "%d\n", outbound
.bytes_avail_towrite
);
320 static DEVICE_ATTR_RO(out_write_bytes_avail
);
322 static ssize_t
in_intr_mask_show(struct device
*dev
,
323 struct device_attribute
*dev_attr
, char *buf
)
325 struct hv_device
*hv_dev
= device_to_hv_device(dev
);
326 struct hv_ring_buffer_debug_info inbound
;
328 if (!hv_dev
->channel
)
330 hv_ringbuffer_get_debuginfo(&hv_dev
->channel
->inbound
, &inbound
);
331 return sprintf(buf
, "%d\n", inbound
.current_interrupt_mask
);
333 static DEVICE_ATTR_RO(in_intr_mask
);
335 static ssize_t
in_read_index_show(struct device
*dev
,
336 struct device_attribute
*dev_attr
, char *buf
)
338 struct hv_device
*hv_dev
= device_to_hv_device(dev
);
339 struct hv_ring_buffer_debug_info inbound
;
341 if (!hv_dev
->channel
)
343 hv_ringbuffer_get_debuginfo(&hv_dev
->channel
->inbound
, &inbound
);
344 return sprintf(buf
, "%d\n", inbound
.current_read_index
);
346 static DEVICE_ATTR_RO(in_read_index
);
348 static ssize_t
in_write_index_show(struct device
*dev
,
349 struct device_attribute
*dev_attr
, char *buf
)
351 struct hv_device
*hv_dev
= device_to_hv_device(dev
);
352 struct hv_ring_buffer_debug_info inbound
;
354 if (!hv_dev
->channel
)
356 hv_ringbuffer_get_debuginfo(&hv_dev
->channel
->inbound
, &inbound
);
357 return sprintf(buf
, "%d\n", inbound
.current_write_index
);
359 static DEVICE_ATTR_RO(in_write_index
);
361 static ssize_t
in_read_bytes_avail_show(struct device
*dev
,
362 struct device_attribute
*dev_attr
,
365 struct hv_device
*hv_dev
= device_to_hv_device(dev
);
366 struct hv_ring_buffer_debug_info inbound
;
368 if (!hv_dev
->channel
)
370 hv_ringbuffer_get_debuginfo(&hv_dev
->channel
->inbound
, &inbound
);
371 return sprintf(buf
, "%d\n", inbound
.bytes_avail_toread
);
373 static DEVICE_ATTR_RO(in_read_bytes_avail
);
375 static ssize_t
in_write_bytes_avail_show(struct device
*dev
,
376 struct device_attribute
*dev_attr
,
379 struct hv_device
*hv_dev
= device_to_hv_device(dev
);
380 struct hv_ring_buffer_debug_info inbound
;
382 if (!hv_dev
->channel
)
384 hv_ringbuffer_get_debuginfo(&hv_dev
->channel
->inbound
, &inbound
);
385 return sprintf(buf
, "%d\n", inbound
.bytes_avail_towrite
);
387 static DEVICE_ATTR_RO(in_write_bytes_avail
);
389 /* Set up per device attributes in /sys/bus/vmbus/devices/<bus device> */
390 static struct attribute
*vmbus_attrs
[] = {
392 &dev_attr_state
.attr
,
393 &dev_attr_monitor_id
.attr
,
394 &dev_attr_class_id
.attr
,
395 &dev_attr_device_id
.attr
,
396 &dev_attr_modalias
.attr
,
397 &dev_attr_server_monitor_pending
.attr
,
398 &dev_attr_client_monitor_pending
.attr
,
399 &dev_attr_server_monitor_latency
.attr
,
400 &dev_attr_client_monitor_latency
.attr
,
401 &dev_attr_server_monitor_conn_id
.attr
,
402 &dev_attr_client_monitor_conn_id
.attr
,
403 &dev_attr_out_intr_mask
.attr
,
404 &dev_attr_out_read_index
.attr
,
405 &dev_attr_out_write_index
.attr
,
406 &dev_attr_out_read_bytes_avail
.attr
,
407 &dev_attr_out_write_bytes_avail
.attr
,
408 &dev_attr_in_intr_mask
.attr
,
409 &dev_attr_in_read_index
.attr
,
410 &dev_attr_in_write_index
.attr
,
411 &dev_attr_in_read_bytes_avail
.attr
,
412 &dev_attr_in_write_bytes_avail
.attr
,
415 ATTRIBUTE_GROUPS(vmbus
);
418 * vmbus_uevent - add uevent for our device
420 * This routine is invoked when a device is added or removed on the vmbus to
421 * generate a uevent to udev in the userspace. The udev will then look at its
422 * rule and the uevent generated here to load the appropriate driver
424 * The alias string will be of the form vmbus:guid where guid is the string
425 * representation of the device guid (each byte of the guid will be
426 * represented with two hex characters.
428 static int vmbus_uevent(struct device
*device
, struct kobj_uevent_env
*env
)
430 struct hv_device
*dev
= device_to_hv_device(device
);
432 char alias_name
[VMBUS_ALIAS_LEN
+ 1];
434 print_alias_name(dev
, alias_name
);
435 ret
= add_uevent_var(env
, "MODALIAS=vmbus:%s", alias_name
);
439 static const uuid_le null_guid
;
441 static inline bool is_null_guid(const __u8
*guid
)
443 if (memcmp(guid
, &null_guid
, sizeof(uuid_le
)))
449 * Return a matching hv_vmbus_device_id pointer.
450 * If there is no match, return NULL.
452 static const struct hv_vmbus_device_id
*hv_vmbus_get_id(
453 const struct hv_vmbus_device_id
*id
,
456 for (; !is_null_guid(id
->guid
); id
++)
457 if (!memcmp(&id
->guid
, guid
, sizeof(uuid_le
)))
466 * vmbus_match - Attempt to match the specified device to the specified driver
468 static int vmbus_match(struct device
*device
, struct device_driver
*driver
)
470 struct hv_driver
*drv
= drv_to_hv_drv(driver
);
471 struct hv_device
*hv_dev
= device_to_hv_device(device
);
473 if (hv_vmbus_get_id(drv
->id_table
, hv_dev
->dev_type
.b
))
480 * vmbus_probe - Add the new vmbus's child device
482 static int vmbus_probe(struct device
*child_device
)
485 struct hv_driver
*drv
=
486 drv_to_hv_drv(child_device
->driver
);
487 struct hv_device
*dev
= device_to_hv_device(child_device
);
488 const struct hv_vmbus_device_id
*dev_id
;
490 dev_id
= hv_vmbus_get_id(drv
->id_table
, dev
->dev_type
.b
);
492 ret
= drv
->probe(dev
, dev_id
);
494 pr_err("probe failed for device %s (%d)\n",
495 dev_name(child_device
), ret
);
498 pr_err("probe not set for driver %s\n",
499 dev_name(child_device
));
506 * vmbus_remove - Remove a vmbus device
508 static int vmbus_remove(struct device
*child_device
)
510 struct hv_driver
*drv
= drv_to_hv_drv(child_device
->driver
);
511 struct hv_device
*dev
= device_to_hv_device(child_device
);
516 pr_err("remove not set for driver %s\n",
517 dev_name(child_device
));
524 * vmbus_shutdown - Shutdown a vmbus device
526 static void vmbus_shutdown(struct device
*child_device
)
528 struct hv_driver
*drv
;
529 struct hv_device
*dev
= device_to_hv_device(child_device
);
532 /* The device may not be attached yet */
533 if (!child_device
->driver
)
536 drv
= drv_to_hv_drv(child_device
->driver
);
546 * vmbus_device_release - Final callback release of the vmbus child device
548 static void vmbus_device_release(struct device
*device
)
550 struct hv_device
*hv_dev
= device_to_hv_device(device
);
556 /* The one and only one */
557 static struct bus_type hv_bus
= {
559 .match
= vmbus_match
,
560 .shutdown
= vmbus_shutdown
,
561 .remove
= vmbus_remove
,
562 .probe
= vmbus_probe
,
563 .uevent
= vmbus_uevent
,
564 .dev_groups
= vmbus_groups
,
567 struct onmessage_work_context
{
568 struct work_struct work
;
569 struct hv_message msg
;
572 static void vmbus_onmessage_work(struct work_struct
*work
)
574 struct onmessage_work_context
*ctx
;
576 ctx
= container_of(work
, struct onmessage_work_context
,
578 vmbus_onmessage(&ctx
->msg
);
582 static void hv_process_timer_expiration(struct hv_message
*msg
, int cpu
)
584 struct clock_event_device
*dev
= hv_context
.clk_evt
[cpu
];
586 if (dev
->event_handler
)
587 dev
->event_handler(dev
);
589 msg
->header
.message_type
= HVMSG_NONE
;
592 * Make sure the write to MessageType (ie set to
593 * HVMSG_NONE) happens before we read the
594 * MessagePending and EOMing. Otherwise, the EOMing
595 * will not deliver any more messages since there is
600 if (msg
->header
.message_flags
.msg_pending
) {
602 * This will cause message queue rescan to
603 * possibly deliver another msg from the
606 wrmsrl(HV_X64_MSR_EOM
, 0);
610 static void vmbus_on_msg_dpc(unsigned long data
)
612 int cpu
= smp_processor_id();
613 void *page_addr
= hv_context
.synic_message_page
[cpu
];
614 struct hv_message
*msg
= (struct hv_message
*)page_addr
+
616 struct onmessage_work_context
*ctx
;
619 if (msg
->header
.message_type
== HVMSG_NONE
) {
623 ctx
= kmalloc(sizeof(*ctx
), GFP_ATOMIC
);
626 INIT_WORK(&ctx
->work
, vmbus_onmessage_work
);
627 memcpy(&ctx
->msg
, msg
, sizeof(*msg
));
628 queue_work(vmbus_connection
.work_queue
, &ctx
->work
);
631 msg
->header
.message_type
= HVMSG_NONE
;
634 * Make sure the write to MessageType (ie set to
635 * HVMSG_NONE) happens before we read the
636 * MessagePending and EOMing. Otherwise, the EOMing
637 * will not deliver any more messages since there is
642 if (msg
->header
.message_flags
.msg_pending
) {
644 * This will cause message queue rescan to
645 * possibly deliver another msg from the
648 wrmsrl(HV_X64_MSR_EOM
, 0);
653 static void vmbus_isr(void)
655 int cpu
= smp_processor_id();
657 struct hv_message
*msg
;
658 union hv_synic_event_flags
*event
;
659 bool handled
= false;
661 page_addr
= hv_context
.synic_event_page
[cpu
];
662 if (page_addr
== NULL
)
665 event
= (union hv_synic_event_flags
*)page_addr
+
668 * Check for events before checking for messages. This is the order
669 * in which events and messages are checked in Windows guests on
670 * Hyper-V, and the Windows team suggested we do the same.
673 if ((vmbus_proto_version
== VERSION_WS2008
) ||
674 (vmbus_proto_version
== VERSION_WIN7
)) {
676 /* Since we are a child, we only need to check bit 0 */
677 if (sync_test_and_clear_bit(0,
678 (unsigned long *) &event
->flags32
[0])) {
683 * Our host is win8 or above. The signaling mechanism
684 * has changed and we can directly look at the event page.
685 * If bit n is set then we have an interrup on the channel
692 tasklet_schedule(hv_context
.event_dpc
[cpu
]);
695 page_addr
= hv_context
.synic_message_page
[cpu
];
696 msg
= (struct hv_message
*)page_addr
+ VMBUS_MESSAGE_SINT
;
698 /* Check if there are actual msgs to be processed */
699 if (msg
->header
.message_type
!= HVMSG_NONE
) {
700 if (msg
->header
.message_type
== HVMSG_TIMER_EXPIRED
)
701 hv_process_timer_expiration(msg
, cpu
);
703 tasklet_schedule(&msg_dpc
);
708 * vmbus_bus_init -Main vmbus driver initialization routine.
711 * - initialize the vmbus driver context
712 * - invoke the vmbus hv main init routine
713 * - get the irq resource
714 * - retrieve the channel offers
716 static int vmbus_bus_init(int irq
)
720 /* Hypervisor initialization...setup hypercall page..etc */
723 pr_err("Unable to initialize the hypervisor - 0x%x\n", ret
);
727 tasklet_init(&msg_dpc
, vmbus_on_msg_dpc
, 0);
729 ret
= bus_register(&hv_bus
);
733 hv_setup_vmbus_irq(vmbus_isr
);
735 ret
= hv_synic_alloc();
739 * Initialize the per-cpu interrupt state and
740 * connect to the host.
742 on_each_cpu(hv_synic_init
, NULL
, 1);
743 ret
= vmbus_connect();
747 vmbus_request_offers();
753 hv_remove_vmbus_irq();
755 bus_unregister(&hv_bus
);
764 * __vmbus_child_driver_register - Register a vmbus's driver
765 * @drv: Pointer to driver structure you want to register
766 * @owner: owner module of the drv
767 * @mod_name: module name string
769 * Registers the given driver with Linux through the 'driver_register()' call
770 * and sets up the hyper-v vmbus handling for this driver.
771 * It will return the state of the 'driver_register()' call.
774 int __vmbus_driver_register(struct hv_driver
*hv_driver
, struct module
*owner
, const char *mod_name
)
778 pr_info("registering driver %s\n", hv_driver
->name
);
780 ret
= vmbus_exists();
784 hv_driver
->driver
.name
= hv_driver
->name
;
785 hv_driver
->driver
.owner
= owner
;
786 hv_driver
->driver
.mod_name
= mod_name
;
787 hv_driver
->driver
.bus
= &hv_bus
;
789 ret
= driver_register(&hv_driver
->driver
);
793 EXPORT_SYMBOL_GPL(__vmbus_driver_register
);
796 * vmbus_driver_unregister() - Unregister a vmbus's driver
797 * @drv: Pointer to driver structure you want to un-register
799 * Un-register the given driver that was previous registered with a call to
800 * vmbus_driver_register()
802 void vmbus_driver_unregister(struct hv_driver
*hv_driver
)
804 pr_info("unregistering driver %s\n", hv_driver
->name
);
807 driver_unregister(&hv_driver
->driver
);
809 EXPORT_SYMBOL_GPL(vmbus_driver_unregister
);
812 * vmbus_device_create - Creates and registers a new child device
815 struct hv_device
*vmbus_device_create(const uuid_le
*type
,
816 const uuid_le
*instance
,
817 struct vmbus_channel
*channel
)
819 struct hv_device
*child_device_obj
;
821 child_device_obj
= kzalloc(sizeof(struct hv_device
), GFP_KERNEL
);
822 if (!child_device_obj
) {
823 pr_err("Unable to allocate device object for child device\n");
827 child_device_obj
->channel
= channel
;
828 memcpy(&child_device_obj
->dev_type
, type
, sizeof(uuid_le
));
829 memcpy(&child_device_obj
->dev_instance
, instance
,
833 return child_device_obj
;
837 * vmbus_device_register - Register the child device
839 int vmbus_device_register(struct hv_device
*child_device_obj
)
843 static atomic_t device_num
= ATOMIC_INIT(0);
845 dev_set_name(&child_device_obj
->device
, "vmbus_0_%d",
846 atomic_inc_return(&device_num
));
848 child_device_obj
->device
.bus
= &hv_bus
;
849 child_device_obj
->device
.parent
= &hv_acpi_dev
->dev
;
850 child_device_obj
->device
.release
= vmbus_device_release
;
853 * Register with the LDM. This will kick off the driver/device
854 * binding...which will eventually call vmbus_match() and vmbus_probe()
856 ret
= device_register(&child_device_obj
->device
);
859 pr_err("Unable to register child device\n");
861 pr_debug("child device %s registered\n",
862 dev_name(&child_device_obj
->device
));
868 * vmbus_device_unregister - Remove the specified child device
871 void vmbus_device_unregister(struct hv_device
*device_obj
)
873 pr_debug("child device %s unregistered\n",
874 dev_name(&device_obj
->device
));
877 * Kick off the process of unregistering the device.
878 * This will call vmbus_remove() and eventually vmbus_device_release()
880 device_unregister(&device_obj
->device
);
885 * VMBUS is an acpi enumerated device. Get the the information we
889 static acpi_status
vmbus_walk_resources(struct acpi_resource
*res
, void *ctx
)
892 case ACPI_RESOURCE_TYPE_IRQ
:
893 irq
= res
->data
.irq
.interrupts
[0];
896 case ACPI_RESOURCE_TYPE_ADDRESS64
:
897 hyperv_mmio
.start
= res
->data
.address64
.address
.minimum
;
898 hyperv_mmio
.end
= res
->data
.address64
.address
.maximum
;
905 static int vmbus_acpi_add(struct acpi_device
*device
)
908 int ret_val
= -ENODEV
;
910 hv_acpi_dev
= device
;
912 result
= acpi_walk_resources(device
->handle
, METHOD_NAME__CRS
,
913 vmbus_walk_resources
, NULL
);
915 if (ACPI_FAILURE(result
))
918 * The parent of the vmbus acpi device (Gen2 firmware) is the VMOD that
919 * has the mmio ranges. Get that.
921 if (device
->parent
) {
922 result
= acpi_walk_resources(device
->parent
->handle
,
924 vmbus_walk_resources
, NULL
);
926 if (ACPI_FAILURE(result
))
928 if (hyperv_mmio
.start
&& hyperv_mmio
.end
)
929 request_resource(&iomem_resource
, &hyperv_mmio
);
934 complete(&probe_event
);
938 static const struct acpi_device_id vmbus_acpi_device_ids
[] = {
943 MODULE_DEVICE_TABLE(acpi
, vmbus_acpi_device_ids
);
945 static struct acpi_driver vmbus_acpi_driver
= {
947 .ids
= vmbus_acpi_device_ids
,
949 .add
= vmbus_acpi_add
,
953 static int __init
hv_acpi_init(void)
957 if (x86_hyper
!= &x86_hyper_ms_hyperv
)
960 init_completion(&probe_event
);
963 * Get irq resources first.
965 ret
= acpi_bus_register_driver(&vmbus_acpi_driver
);
970 t
= wait_for_completion_timeout(&probe_event
, 5*HZ
);
981 ret
= vmbus_bus_init(irq
);
988 acpi_bus_unregister_driver(&vmbus_acpi_driver
);
993 static void __exit
vmbus_exit(void)
995 hv_remove_vmbus_irq();
996 vmbus_free_channels();
997 bus_unregister(&hv_bus
);
999 acpi_bus_unregister_driver(&vmbus_acpi_driver
);
1003 MODULE_LICENSE("GPL");
1005 subsys_initcall(hv_acpi_init
);
1006 module_exit(vmbus_exit
);