2 * Hyper-V guest/hypervisor interaction
4 * Copyright (c) 2015-2018 Virtuozzo International GmbH.
6 * This work is licensed under the terms of the GNU GPL, version 2 or later.
7 * See the COPYING file in the top-level directory.
10 #include "qemu/osdep.h"
11 #include "qemu/main-loop.h"
12 #include "qemu/module.h"
13 #include "qapi/error.h"
14 #include "exec/address-spaces.h"
15 #include "exec/memory.h"
16 #include "sysemu/kvm.h"
17 #include "qemu/bitops.h"
18 #include "qemu/error-report.h"
19 #include "qemu/lockable.h"
20 #include "qemu/queue.h"
22 #include "qemu/rcu_queue.h"
23 #include "hw/hyperv/hyperv.h"
24 #include "qom/object.h"
25 #include "target/i386/kvm/hyperv-proto.h"
26 #include "target/i386/cpu.h"
27 #include "exec/cpu-all.h"
30 DeviceState parent_obj
;
36 hwaddr event_page_addr
;
37 MemoryRegion msg_page_mr
;
38 MemoryRegion event_page_mr
;
39 struct hyperv_message_page
*msg_page
;
40 struct hyperv_event_flags_page
*event_page
;
42 QemuMutex sint_routes_mutex
;
43 QLIST_HEAD(, HvSintRoute
) sint_routes
;
46 #define TYPE_SYNIC "hyperv-synic"
47 OBJECT_DECLARE_SIMPLE_TYPE(SynICState
, SYNIC
)
49 static bool synic_enabled
;
51 bool hyperv_is_synic_enabled(void)
56 static SynICState
*get_synic(CPUState
*cs
)
58 return SYNIC(object_resolve_path_component(OBJECT(cs
), "synic"));
61 static void synic_update(SynICState
*synic
, bool sctl_enable
,
62 hwaddr msg_page_addr
, hwaddr event_page_addr
)
65 synic
->sctl_enabled
= sctl_enable
;
66 if (synic
->msg_page_addr
!= msg_page_addr
) {
67 if (synic
->msg_page_addr
) {
68 memory_region_del_subregion(get_system_memory(),
72 memory_region_add_subregion(get_system_memory(), msg_page_addr
,
75 synic
->msg_page_addr
= msg_page_addr
;
77 if (synic
->event_page_addr
!= event_page_addr
) {
78 if (synic
->event_page_addr
) {
79 memory_region_del_subregion(get_system_memory(),
80 &synic
->event_page_mr
);
82 if (event_page_addr
) {
83 memory_region_add_subregion(get_system_memory(), event_page_addr
,
84 &synic
->event_page_mr
);
86 synic
->event_page_addr
= event_page_addr
;
90 void hyperv_synic_update(CPUState
*cs
, bool sctl_enable
,
91 hwaddr msg_page_addr
, hwaddr event_page_addr
)
93 SynICState
*synic
= get_synic(cs
);
99 synic_update(synic
, sctl_enable
, msg_page_addr
, event_page_addr
);
102 static void synic_realize(DeviceState
*dev
, Error
**errp
)
104 Object
*obj
= OBJECT(dev
);
105 SynICState
*synic
= SYNIC(dev
);
106 char *msgp_name
, *eventp_name
;
109 /* memory region names have to be globally unique */
110 vp_index
= hyperv_vp_index(synic
->cs
);
111 msgp_name
= g_strdup_printf("synic-%u-msg-page", vp_index
);
112 eventp_name
= g_strdup_printf("synic-%u-event-page", vp_index
);
114 memory_region_init_ram(&synic
->msg_page_mr
, obj
, msgp_name
,
115 sizeof(*synic
->msg_page
), &error_abort
);
116 memory_region_init_ram(&synic
->event_page_mr
, obj
, eventp_name
,
117 sizeof(*synic
->event_page
), &error_abort
);
118 synic
->msg_page
= memory_region_get_ram_ptr(&synic
->msg_page_mr
);
119 synic
->event_page
= memory_region_get_ram_ptr(&synic
->event_page_mr
);
120 qemu_mutex_init(&synic
->sint_routes_mutex
);
121 QLIST_INIT(&synic
->sint_routes
);
127 static void synic_reset(DeviceState
*dev
)
129 SynICState
*synic
= SYNIC(dev
);
130 memset(synic
->msg_page
, 0, sizeof(*synic
->msg_page
));
131 memset(synic
->event_page
, 0, sizeof(*synic
->event_page
));
132 synic_update(synic
, false, 0, 0);
133 assert(QLIST_EMPTY(&synic
->sint_routes
));
136 static void synic_class_init(ObjectClass
*klass
, void *data
)
138 DeviceClass
*dc
= DEVICE_CLASS(klass
);
140 dc
->realize
= synic_realize
;
141 device_class_set_legacy_reset(dc
, synic_reset
);
142 dc
->user_creatable
= false;
145 void hyperv_synic_add(CPUState
*cs
)
150 obj
= object_new(TYPE_SYNIC
);
153 object_property_add_child(OBJECT(cs
), "synic", obj
);
155 qdev_realize(DEVICE(obj
), NULL
, &error_abort
);
156 synic_enabled
= true;
159 void hyperv_synic_reset(CPUState
*cs
)
161 SynICState
*synic
= get_synic(cs
);
164 device_cold_reset(DEVICE(synic
));
168 static const TypeInfo synic_type_info
= {
170 .parent
= TYPE_DEVICE
,
171 .instance_size
= sizeof(SynICState
),
172 .class_init
= synic_class_init
,
175 static void synic_register_types(void)
177 type_register_static(&synic_type_info
);
180 type_init(synic_register_types
)
183 * KVM has its own message producers (SynIC timers). To guarantee
184 * serialization with both KVM vcpu and the guest cpu, the messages are first
185 * staged in an intermediate area and then posted to the SynIC message page in
188 typedef struct HvSintStagedMessage
{
189 /* message content staged by hyperv_post_msg */
190 struct hyperv_message msg
;
191 /* callback + data (r/o) to complete the processing in a BH */
194 /* message posting status filled by cpu_post_msg */
196 /* passing the buck: */
201 * hyperv_post_msg (e.g. in main loop) grabs the staged area (FREE ->
202 * BUSY), copies msg, and schedules cpu_post_msg on the assigned cpu
206 * cpu_post_msg (vcpu thread) tries to copy staged msg to msg slot,
207 * notify the guest, records the status, marks the posting done (BUSY
208 * -> POSTED), and schedules sint_msg_bh BH
210 HV_STAGED_MSG_POSTED
,
212 * sint_msg_bh (BH) verifies that the posting is done, runs the
213 * callback, and starts over (POSTED -> FREE)
216 } HvSintStagedMessage
;
222 EventNotifier sint_set_notifier
;
223 EventNotifier sint_ack_notifier
;
225 HvSintStagedMessage
*staged_msg
;
228 QLIST_ENTRY(HvSintRoute
) link
;
231 static CPUState
*hyperv_find_vcpu(uint32_t vp_index
)
233 CPUState
*cs
= qemu_get_cpu(vp_index
);
234 assert(hyperv_vp_index(cs
) == vp_index
);
239 * BH to complete the processing of a staged message.
241 static void sint_msg_bh(void *opaque
)
243 HvSintRoute
*sint_route
= opaque
;
244 HvSintStagedMessage
*staged_msg
= sint_route
->staged_msg
;
246 if (qatomic_read(&staged_msg
->state
) != HV_STAGED_MSG_POSTED
) {
247 /* status nor ready yet (spurious ack from guest?), ignore */
251 staged_msg
->cb(staged_msg
->cb_data
, staged_msg
->status
);
252 staged_msg
->status
= 0;
254 /* staged message processing finished, ready to start over */
255 qatomic_set(&staged_msg
->state
, HV_STAGED_MSG_FREE
);
256 /* drop the reference taken in hyperv_post_msg */
257 hyperv_sint_route_unref(sint_route
);
261 * Worker to transfer the message from the staging area into the SynIC message
262 * page in vcpu context.
264 static void cpu_post_msg(CPUState
*cs
, run_on_cpu_data data
)
266 HvSintRoute
*sint_route
= data
.host_ptr
;
267 HvSintStagedMessage
*staged_msg
= sint_route
->staged_msg
;
268 SynICState
*synic
= sint_route
->synic
;
269 struct hyperv_message
*dst_msg
;
270 bool wait_for_sint_ack
= false;
272 assert(staged_msg
->state
== HV_STAGED_MSG_BUSY
);
274 if (!synic
->msg_page_addr
) {
275 staged_msg
->status
= -ENXIO
;
279 dst_msg
= &synic
->msg_page
->slot
[sint_route
->sint
];
281 if (dst_msg
->header
.message_type
!= HV_MESSAGE_NONE
) {
282 dst_msg
->header
.message_flags
|= HV_MESSAGE_FLAG_PENDING
;
283 staged_msg
->status
= -EAGAIN
;
284 wait_for_sint_ack
= true;
286 memcpy(dst_msg
, &staged_msg
->msg
, sizeof(*dst_msg
));
287 staged_msg
->status
= hyperv_sint_route_set_sint(sint_route
);
290 memory_region_set_dirty(&synic
->msg_page_mr
, 0, sizeof(*synic
->msg_page
));
293 qatomic_set(&staged_msg
->state
, HV_STAGED_MSG_POSTED
);
295 * Notify the msg originator of the progress made; if the slot was busy we
296 * set msg_pending flag in it so it will be the guest who will do EOM and
297 * trigger the notification from KVM via sint_ack_notifier
299 if (!wait_for_sint_ack
) {
300 aio_bh_schedule_oneshot(qemu_get_aio_context(), sint_msg_bh
,
306 * Post a Hyper-V message to the staging area, for delivery to guest in the
309 int hyperv_post_msg(HvSintRoute
*sint_route
, struct hyperv_message
*src_msg
)
311 HvSintStagedMessage
*staged_msg
= sint_route
->staged_msg
;
315 /* grab the staging area */
316 if (qatomic_cmpxchg(&staged_msg
->state
, HV_STAGED_MSG_FREE
,
317 HV_STAGED_MSG_BUSY
) != HV_STAGED_MSG_FREE
) {
321 memcpy(&staged_msg
->msg
, src_msg
, sizeof(*src_msg
));
323 /* hold a reference on sint_route until the callback is finished */
324 hyperv_sint_route_ref(sint_route
);
326 /* schedule message posting attempt in vcpu thread */
327 async_run_on_cpu(sint_route
->synic
->cs
, cpu_post_msg
,
328 RUN_ON_CPU_HOST_PTR(sint_route
));
332 static void sint_ack_handler(EventNotifier
*notifier
)
334 HvSintRoute
*sint_route
= container_of(notifier
, HvSintRoute
,
336 event_notifier_test_and_clear(notifier
);
339 * the guest consumed the previous message so complete the current one with
340 * -EAGAIN and let the msg originator retry
342 aio_bh_schedule_oneshot(qemu_get_aio_context(), sint_msg_bh
, sint_route
);
346 * Set given event flag for a given sint on a given vcpu, and signal the sint.
348 int hyperv_set_event_flag(HvSintRoute
*sint_route
, unsigned eventno
)
351 SynICState
*synic
= sint_route
->synic
;
352 unsigned long *flags
, set_mask
;
355 if (eventno
> HV_EVENT_FLAGS_COUNT
) {
358 if (!synic
->sctl_enabled
|| !synic
->event_page_addr
) {
362 set_idx
= BIT_WORD(eventno
);
363 set_mask
= BIT_MASK(eventno
);
364 flags
= synic
->event_page
->slot
[sint_route
->sint
].flags
;
366 if ((qatomic_fetch_or(&flags
[set_idx
], set_mask
) & set_mask
) != set_mask
) {
367 memory_region_set_dirty(&synic
->event_page_mr
, 0,
368 sizeof(*synic
->event_page
));
369 ret
= hyperv_sint_route_set_sint(sint_route
);
376 static int kvm_irqchip_add_hv_sint_route(KVMState
*s
, uint32_t vcpu
, uint32_t sint
)
378 struct kvm_irq_routing_entry kroute
= {};
381 if (!kvm_gsi_routing_enabled()) {
384 virq
= kvm_irqchip_get_virq(s
);
390 kroute
.type
= KVM_IRQ_ROUTING_HV_SINT
;
392 kroute
.u
.hv_sint
.vcpu
= vcpu
;
393 kroute
.u
.hv_sint
.sint
= sint
;
395 kvm_add_routing_entry(s
, &kroute
);
396 kvm_irqchip_commit_routes(s
);
401 HvSintRoute
*hyperv_sint_route_new(uint32_t vp_index
, uint32_t sint
,
402 HvSintMsgCb cb
, void *cb_data
)
404 HvSintRoute
*sint_route
= NULL
;
405 EventNotifier
*ack_notifier
= NULL
;
409 bool ack_event_initialized
= false;
411 cs
= hyperv_find_vcpu(vp_index
);
416 synic
= get_synic(cs
);
421 sint_route
= g_new0(HvSintRoute
, 1);
426 sint_route
->synic
= synic
;
427 sint_route
->sint
= sint
;
428 sint_route
->refcount
= 1;
430 ack_notifier
= cb
? &sint_route
->sint_ack_notifier
: NULL
;
432 sint_route
->staged_msg
= g_new0(HvSintStagedMessage
, 1);
433 if (!sint_route
->staged_msg
) {
434 goto cleanup_err_sint
;
436 sint_route
->staged_msg
->cb
= cb
;
437 sint_route
->staged_msg
->cb_data
= cb_data
;
439 r
= event_notifier_init(ack_notifier
, false);
441 goto cleanup_err_sint
;
443 event_notifier_set_handler(ack_notifier
, sint_ack_handler
);
444 ack_event_initialized
= true;
447 /* See if we are done or we need to setup a GSI for this SintRoute */
448 if (!synic
->sctl_enabled
) {
452 /* We need to setup a GSI for this SintRoute */
453 r
= event_notifier_init(&sint_route
->sint_set_notifier
, false);
455 goto cleanup_err_sint
;
458 gsi
= kvm_irqchip_add_hv_sint_route(kvm_state
, vp_index
, sint
);
460 goto cleanup_err_sint_notifier
;
463 r
= kvm_irqchip_add_irqfd_notifier_gsi(kvm_state
,
464 &sint_route
->sint_set_notifier
,
467 goto cleanup_err_irqfd
;
469 sint_route
->gsi
= gsi
;
471 qemu_mutex_lock(&synic
->sint_routes_mutex
);
472 QLIST_INSERT_HEAD(&synic
->sint_routes
, sint_route
, link
);
473 qemu_mutex_unlock(&synic
->sint_routes_mutex
);
477 kvm_irqchip_release_virq(kvm_state
, gsi
);
479 cleanup_err_sint_notifier
:
480 event_notifier_cleanup(&sint_route
->sint_set_notifier
);
484 if (ack_event_initialized
) {
485 event_notifier_set_handler(ack_notifier
, NULL
);
486 event_notifier_cleanup(ack_notifier
);
489 g_free(sint_route
->staged_msg
);
496 void hyperv_sint_route_ref(HvSintRoute
*sint_route
)
498 sint_route
->refcount
++;
501 void hyperv_sint_route_unref(HvSintRoute
*sint_route
)
509 assert(sint_route
->refcount
> 0);
511 if (--sint_route
->refcount
) {
515 synic
= sint_route
->synic
;
516 qemu_mutex_lock(&synic
->sint_routes_mutex
);
517 QLIST_REMOVE(sint_route
, link
);
518 qemu_mutex_unlock(&synic
->sint_routes_mutex
);
520 if (sint_route
->gsi
) {
521 kvm_irqchip_remove_irqfd_notifier_gsi(kvm_state
,
522 &sint_route
->sint_set_notifier
,
524 kvm_irqchip_release_virq(kvm_state
, sint_route
->gsi
);
525 event_notifier_cleanup(&sint_route
->sint_set_notifier
);
528 if (sint_route
->staged_msg
) {
529 event_notifier_set_handler(&sint_route
->sint_ack_notifier
, NULL
);
530 event_notifier_cleanup(&sint_route
->sint_ack_notifier
);
531 g_free(sint_route
->staged_msg
);
536 int hyperv_sint_route_set_sint(HvSintRoute
*sint_route
)
538 if (!sint_route
->gsi
) {
542 return event_notifier_set(&sint_route
->sint_set_notifier
);
545 typedef struct MsgHandler
{
547 QLIST_ENTRY(MsgHandler
) link
;
549 HvMsgHandler handler
;
553 typedef struct EventFlagHandler
{
555 QLIST_ENTRY(EventFlagHandler
) link
;
557 EventNotifier
*notifier
;
560 static QLIST_HEAD(, MsgHandler
) msg_handlers
;
561 static QLIST_HEAD(, EventFlagHandler
) event_flag_handlers
;
562 static QemuMutex handlers_mutex
;
564 static void __attribute__((constructor
)) hv_init(void)
566 QLIST_INIT(&msg_handlers
);
567 QLIST_INIT(&event_flag_handlers
);
568 qemu_mutex_init(&handlers_mutex
);
571 int hyperv_set_msg_handler(uint32_t conn_id
, HvMsgHandler handler
, void *data
)
576 QEMU_LOCK_GUARD(&handlers_mutex
);
577 QLIST_FOREACH(mh
, &msg_handlers
, link
) {
578 if (mh
->conn_id
== conn_id
) {
582 QLIST_REMOVE_RCU(mh
, link
);
591 mh
= g_new(MsgHandler
, 1);
592 mh
->conn_id
= conn_id
;
593 mh
->handler
= handler
;
595 QLIST_INSERT_HEAD_RCU(&msg_handlers
, mh
, link
);
604 uint16_t hyperv_hcall_post_message(uint64_t param
, bool fast
)
608 struct hyperv_post_message_input
*msg
;
612 return HV_STATUS_INVALID_HYPERCALL_CODE
;
614 if (param
& (__alignof__(*msg
) - 1)) {
615 return HV_STATUS_INVALID_ALIGNMENT
;
619 msg
= cpu_physical_memory_map(param
, &len
, 0);
620 if (len
< sizeof(*msg
)) {
621 ret
= HV_STATUS_INSUFFICIENT_MEMORY
;
624 if (msg
->payload_size
> sizeof(msg
->payload
)) {
625 ret
= HV_STATUS_INVALID_HYPERCALL_INPUT
;
629 ret
= HV_STATUS_INVALID_CONNECTION_ID
;
630 WITH_RCU_READ_LOCK_GUARD() {
631 QLIST_FOREACH_RCU(mh
, &msg_handlers
, link
) {
632 if (mh
->conn_id
== (msg
->connection_id
& HV_CONNECTION_ID_MASK
)) {
633 ret
= mh
->handler(msg
, mh
->data
);
640 cpu_physical_memory_unmap(msg
, len
, 0, 0);
644 static int set_event_flag_handler(uint32_t conn_id
, EventNotifier
*notifier
)
647 EventFlagHandler
*handler
;
649 QEMU_LOCK_GUARD(&handlers_mutex
);
650 QLIST_FOREACH(handler
, &event_flag_handlers
, link
) {
651 if (handler
->conn_id
== conn_id
) {
655 QLIST_REMOVE_RCU(handler
, link
);
656 g_free_rcu(handler
, rcu
);
664 handler
= g_new(EventFlagHandler
, 1);
665 handler
->conn_id
= conn_id
;
666 handler
->notifier
= notifier
;
667 QLIST_INSERT_HEAD_RCU(&event_flag_handlers
, handler
, link
);
676 static bool process_event_flags_userspace
;
678 int hyperv_set_event_flag_handler(uint32_t conn_id
, EventNotifier
*notifier
)
680 if (!process_event_flags_userspace
&&
681 !kvm_check_extension(kvm_state
, KVM_CAP_HYPERV_EVENTFD
)) {
682 process_event_flags_userspace
= true;
684 warn_report("Hyper-V event signaling is not supported by this kernel; "
685 "using slower userspace hypercall processing");
688 if (!process_event_flags_userspace
) {
689 struct kvm_hyperv_eventfd hvevfd
= {
691 .fd
= notifier
? event_notifier_get_fd(notifier
) : -1,
692 .flags
= notifier
? 0 : KVM_HYPERV_EVENTFD_DEASSIGN
,
695 return kvm_vm_ioctl(kvm_state
, KVM_HYPERV_EVENTFD
, &hvevfd
);
697 return set_event_flag_handler(conn_id
, notifier
);
700 uint16_t hyperv_hcall_signal_event(uint64_t param
, bool fast
)
702 EventFlagHandler
*handler
;
704 if (unlikely(!fast
)) {
707 if (addr
& (__alignof__(addr
) - 1)) {
708 return HV_STATUS_INVALID_ALIGNMENT
;
711 param
= ldq_phys(&address_space_memory
, addr
);
715 * Per spec, bits 32-47 contain the extra "flag number". However, we
716 * have no use for it, and in all known usecases it is zero, so just
717 * report lookup failure if it isn't.
719 if (param
& 0xffff00000000ULL
) {
720 return HV_STATUS_INVALID_PORT_ID
;
722 /* remaining bits are reserved-zero */
723 if (param
& ~HV_CONNECTION_ID_MASK
) {
724 return HV_STATUS_INVALID_HYPERCALL_INPUT
;
727 RCU_READ_LOCK_GUARD();
728 QLIST_FOREACH_RCU(handler
, &event_flag_handlers
, link
) {
729 if (handler
->conn_id
== param
) {
730 event_notifier_set(handler
->notifier
);
734 return HV_STATUS_INVALID_CONNECTION_ID
;
737 static HvSynDbgHandler hv_syndbg_handler
;
738 static void *hv_syndbg_context
;
740 void hyperv_set_syndbg_handler(HvSynDbgHandler handler
, void *context
)
742 assert(!hv_syndbg_handler
);
743 hv_syndbg_handler
= handler
;
744 hv_syndbg_context
= context
;
747 uint16_t hyperv_hcall_reset_dbg_session(uint64_t outgpa
)
751 struct hyperv_reset_debug_session_output
*reset_dbg_session
= NULL
;
754 if (!hv_syndbg_handler
) {
755 ret
= HV_STATUS_INVALID_HYPERCALL_CODE
;
759 len
= sizeof(*reset_dbg_session
);
760 reset_dbg_session
= cpu_physical_memory_map(outgpa
, &len
, 1);
761 if (!reset_dbg_session
|| len
< sizeof(*reset_dbg_session
)) {
762 ret
= HV_STATUS_INSUFFICIENT_MEMORY
;
766 msg
.type
= HV_SYNDBG_MSG_CONNECTION_INFO
;
767 ret
= hv_syndbg_handler(hv_syndbg_context
, &msg
);
772 reset_dbg_session
->host_ip
= msg
.u
.connection_info
.host_ip
;
773 reset_dbg_session
->host_port
= msg
.u
.connection_info
.host_port
;
774 /* The following fields are only used as validation for KDVM */
775 memset(&reset_dbg_session
->host_mac
, 0,
776 sizeof(reset_dbg_session
->host_mac
));
777 reset_dbg_session
->target_ip
= msg
.u
.connection_info
.host_ip
;
778 reset_dbg_session
->target_port
= msg
.u
.connection_info
.host_port
;
779 memset(&reset_dbg_session
->target_mac
, 0,
780 sizeof(reset_dbg_session
->target_mac
));
782 if (reset_dbg_session
) {
783 cpu_physical_memory_unmap(reset_dbg_session
,
784 sizeof(*reset_dbg_session
), 1, len
);
790 uint16_t hyperv_hcall_retreive_dbg_data(uint64_t ingpa
, uint64_t outgpa
,
794 struct hyperv_retrieve_debug_data_input
*debug_data_in
= NULL
;
795 struct hyperv_retrieve_debug_data_output
*debug_data_out
= NULL
;
796 hwaddr in_len
, out_len
;
799 if (fast
|| !hv_syndbg_handler
) {
800 ret
= HV_STATUS_INVALID_HYPERCALL_CODE
;
804 in_len
= sizeof(*debug_data_in
);
805 debug_data_in
= cpu_physical_memory_map(ingpa
, &in_len
, 0);
806 if (!debug_data_in
|| in_len
< sizeof(*debug_data_in
)) {
807 ret
= HV_STATUS_INSUFFICIENT_MEMORY
;
811 out_len
= sizeof(*debug_data_out
);
812 debug_data_out
= cpu_physical_memory_map(outgpa
, &out_len
, 1);
813 if (!debug_data_out
|| out_len
< sizeof(*debug_data_out
)) {
814 ret
= HV_STATUS_INSUFFICIENT_MEMORY
;
818 msg
.type
= HV_SYNDBG_MSG_RECV
;
819 msg
.u
.recv
.buf_gpa
= outgpa
+ sizeof(*debug_data_out
);
820 msg
.u
.recv
.count
= TARGET_PAGE_SIZE
- sizeof(*debug_data_out
);
821 msg
.u
.recv
.options
= debug_data_in
->options
;
822 msg
.u
.recv
.timeout
= debug_data_in
->timeout
;
823 msg
.u
.recv
.is_raw
= true;
824 ret
= hv_syndbg_handler(hv_syndbg_context
, &msg
);
825 if (ret
== HV_STATUS_NO_DATA
) {
826 debug_data_out
->retrieved_count
= 0;
827 debug_data_out
->remaining_count
= debug_data_in
->count
;
829 } else if (ret
!= HV_STATUS_SUCCESS
) {
833 debug_data_out
->retrieved_count
= msg
.u
.recv
.retrieved_count
;
834 debug_data_out
->remaining_count
=
835 debug_data_in
->count
- msg
.u
.recv
.retrieved_count
;
837 if (debug_data_out
) {
838 cpu_physical_memory_unmap(debug_data_out
, sizeof(*debug_data_out
), 1,
843 cpu_physical_memory_unmap(debug_data_in
, sizeof(*debug_data_in
), 0,
850 uint16_t hyperv_hcall_post_dbg_data(uint64_t ingpa
, uint64_t outgpa
, bool fast
)
853 struct hyperv_post_debug_data_input
*post_data_in
= NULL
;
854 struct hyperv_post_debug_data_output
*post_data_out
= NULL
;
855 hwaddr in_len
, out_len
;
858 if (fast
|| !hv_syndbg_handler
) {
859 ret
= HV_STATUS_INVALID_HYPERCALL_CODE
;
863 in_len
= sizeof(*post_data_in
);
864 post_data_in
= cpu_physical_memory_map(ingpa
, &in_len
, 0);
865 if (!post_data_in
|| in_len
< sizeof(*post_data_in
)) {
866 ret
= HV_STATUS_INSUFFICIENT_MEMORY
;
870 if (post_data_in
->count
> TARGET_PAGE_SIZE
- sizeof(*post_data_in
)) {
871 ret
= HV_STATUS_INVALID_PARAMETER
;
875 out_len
= sizeof(*post_data_out
);
876 post_data_out
= cpu_physical_memory_map(outgpa
, &out_len
, 1);
877 if (!post_data_out
|| out_len
< sizeof(*post_data_out
)) {
878 ret
= HV_STATUS_INSUFFICIENT_MEMORY
;
882 msg
.type
= HV_SYNDBG_MSG_SEND
;
883 msg
.u
.send
.buf_gpa
= ingpa
+ sizeof(*post_data_in
);
884 msg
.u
.send
.count
= post_data_in
->count
;
885 msg
.u
.send
.is_raw
= true;
886 ret
= hv_syndbg_handler(hv_syndbg_context
, &msg
);
887 if (ret
!= HV_STATUS_SUCCESS
) {
891 post_data_out
->pending_count
= msg
.u
.send
.pending_count
;
892 ret
= post_data_out
->pending_count
? HV_STATUS_INSUFFICIENT_BUFFERS
:
896 cpu_physical_memory_unmap(post_data_out
,
897 sizeof(*post_data_out
), 1, out_len
);
901 cpu_physical_memory_unmap(post_data_in
,
902 sizeof(*post_data_in
), 0, in_len
);
908 uint32_t hyperv_syndbg_send(uint64_t ingpa
, uint32_t count
)
912 if (!hv_syndbg_handler
) {
913 return HV_SYNDBG_STATUS_INVALID
;
916 msg
.type
= HV_SYNDBG_MSG_SEND
;
917 msg
.u
.send
.buf_gpa
= ingpa
;
918 msg
.u
.send
.count
= count
;
919 msg
.u
.send
.is_raw
= false;
920 if (hv_syndbg_handler(hv_syndbg_context
, &msg
)) {
921 return HV_SYNDBG_STATUS_INVALID
;
924 return HV_SYNDBG_STATUS_SEND_SUCCESS
;
927 uint32_t hyperv_syndbg_recv(uint64_t ingpa
, uint32_t count
)
932 if (!hv_syndbg_handler
) {
933 return HV_SYNDBG_STATUS_INVALID
;
936 msg
.type
= HV_SYNDBG_MSG_RECV
;
937 msg
.u
.recv
.buf_gpa
= ingpa
;
938 msg
.u
.recv
.count
= count
;
939 msg
.u
.recv
.options
= 0;
940 msg
.u
.recv
.timeout
= 0;
941 msg
.u
.recv
.is_raw
= false;
942 ret
= hv_syndbg_handler(hv_syndbg_context
, &msg
);
943 if (ret
!= HV_STATUS_SUCCESS
) {
947 return HV_SYNDBG_STATUS_SET_SIZE(HV_SYNDBG_STATUS_RECV_SUCCESS
,
948 msg
.u
.recv
.retrieved_count
);
951 void hyperv_syndbg_set_pending_page(uint64_t ingpa
)
955 if (!hv_syndbg_handler
) {
959 msg
.type
= HV_SYNDBG_MSG_SET_PENDING_PAGE
;
960 msg
.u
.pending_page
.buf_gpa
= ingpa
;
961 hv_syndbg_handler(hv_syndbg_context
, &msg
);
964 uint64_t hyperv_syndbg_query_options(void)
968 if (!hv_syndbg_handler
) {
972 msg
.type
= HV_SYNDBG_MSG_QUERY_OPTIONS
;
973 if (hv_syndbg_handler(hv_syndbg_context
, &msg
) != HV_STATUS_SUCCESS
) {
977 return msg
.u
.query_options
.options
;
980 static bool vmbus_recommended_features_enabled
;
982 bool hyperv_are_vmbus_recommended_features_enabled(void)
984 return vmbus_recommended_features_enabled
;
987 void hyperv_set_vmbus_recommended_features_enabled(void)
989 vmbus_recommended_features_enabled
= true;