2 * KVM Microsoft Hyper-V emulation
4 * derived from arch/x86/kvm/x86.c
6 * Copyright (C) 2006 Qumranet, Inc.
7 * Copyright (C) 2008 Qumranet, Inc.
8 * Copyright IBM Corporation, 2008
9 * Copyright 2010 Red Hat, Inc. and/or its affiliates.
10 * Copyright (C) 2015 Andrey Smetanin <asmetanin@virtuozzo.com>
13 * Avi Kivity <avi@qumranet.com>
14 * Yaniv Kamay <yaniv@qumranet.com>
15 * Amit Shah <amit.shah@qumranet.com>
16 * Ben-Ami Yassour <benami@il.ibm.com>
17 * Andrey Smetanin <asmetanin@virtuozzo.com>
19 * This work is licensed under the terms of the GNU GPL, version 2. See
20 * the COPYING file in the top-level directory.
29 #include <linux/kvm_host.h>
30 #include <linux/highmem.h>
31 #include <asm/apicdef.h>
32 #include <trace/events/kvm.h>
36 static inline u64
synic_read_sint(struct kvm_vcpu_hv_synic
*synic
, int sint
)
38 return atomic64_read(&synic
->sint
[sint
]);
41 static inline int synic_get_sint_vector(u64 sint_value
)
43 if (sint_value
& HV_SYNIC_SINT_MASKED
)
45 return sint_value
& HV_SYNIC_SINT_VECTOR_MASK
;
48 static bool synic_has_vector_connected(struct kvm_vcpu_hv_synic
*synic
,
53 for (i
= 0; i
< ARRAY_SIZE(synic
->sint
); i
++) {
54 if (synic_get_sint_vector(synic_read_sint(synic
, i
)) == vector
)
60 static bool synic_has_vector_auto_eoi(struct kvm_vcpu_hv_synic
*synic
,
66 for (i
= 0; i
< ARRAY_SIZE(synic
->sint
); i
++) {
67 sint_value
= synic_read_sint(synic
, i
);
68 if (synic_get_sint_vector(sint_value
) == vector
&&
69 sint_value
& HV_SYNIC_SINT_AUTO_EOI
)
75 static int synic_set_sint(struct kvm_vcpu_hv_synic
*synic
, int sint
,
80 vector
= data
& HV_SYNIC_SINT_VECTOR_MASK
;
81 if (vector
< 16 && !host
)
84 * Guest may configure multiple SINTs to use the same vector, so
85 * we maintain a bitmap of vectors handled by synic, and a
86 * bitmap of vectors with auto-eoi behavior. The bitmaps are
87 * updated here, and atomically queried on fast paths.
90 atomic64_set(&synic
->sint
[sint
], data
);
92 if (synic_has_vector_connected(synic
, vector
))
93 __set_bit(vector
, synic
->vec_bitmap
);
95 __clear_bit(vector
, synic
->vec_bitmap
);
97 if (synic_has_vector_auto_eoi(synic
, vector
))
98 __set_bit(vector
, synic
->auto_eoi_bitmap
);
100 __clear_bit(vector
, synic
->auto_eoi_bitmap
);
102 /* Load SynIC vectors into EOI exit bitmap */
103 kvm_make_request(KVM_REQ_SCAN_IOAPIC
, synic_to_vcpu(synic
));
107 static struct kvm_vcpu_hv_synic
*synic_get(struct kvm
*kvm
, u32 vcpu_id
)
109 struct kvm_vcpu
*vcpu
;
110 struct kvm_vcpu_hv_synic
*synic
;
112 if (vcpu_id
>= atomic_read(&kvm
->online_vcpus
))
114 vcpu
= kvm_get_vcpu(kvm
, vcpu_id
);
117 synic
= vcpu_to_synic(vcpu
);
118 return (synic
->active
) ? synic
: NULL
;
121 static void synic_clear_sint_msg_pending(struct kvm_vcpu_hv_synic
*synic
,
124 struct kvm_vcpu
*vcpu
= synic_to_vcpu(synic
);
127 struct hv_message
*msg
;
128 struct hv_message_page
*msg_page
;
130 gpa
= synic
->msg_page
& PAGE_MASK
;
131 page
= kvm_vcpu_gfn_to_page(vcpu
, gpa
>> PAGE_SHIFT
);
132 if (is_error_page(page
)) {
133 vcpu_err(vcpu
, "Hyper-V SynIC can't get msg page, gpa 0x%llx\n",
137 msg_page
= kmap_atomic(page
);
139 msg
= &msg_page
->sint_message
[sint
];
140 msg
->header
.message_flags
.msg_pending
= 0;
142 kunmap_atomic(msg_page
);
143 kvm_release_page_dirty(page
);
144 kvm_vcpu_mark_page_dirty(vcpu
, gpa
>> PAGE_SHIFT
);
147 static void kvm_hv_notify_acked_sint(struct kvm_vcpu
*vcpu
, u32 sint
)
149 struct kvm
*kvm
= vcpu
->kvm
;
150 struct kvm_vcpu_hv_synic
*synic
= vcpu_to_synic(vcpu
);
151 struct kvm_vcpu_hv
*hv_vcpu
= vcpu_to_hv_vcpu(vcpu
);
152 struct kvm_vcpu_hv_stimer
*stimer
;
153 int gsi
, idx
, stimers_pending
;
155 trace_kvm_hv_notify_acked_sint(vcpu
->vcpu_id
, sint
);
157 if (synic
->msg_page
& HV_SYNIC_SIMP_ENABLE
)
158 synic_clear_sint_msg_pending(synic
, sint
);
160 /* Try to deliver pending Hyper-V SynIC timers messages */
162 for (idx
= 0; idx
< ARRAY_SIZE(hv_vcpu
->stimer
); idx
++) {
163 stimer
= &hv_vcpu
->stimer
[idx
];
164 if (stimer
->msg_pending
&&
165 (stimer
->config
& HV_STIMER_ENABLE
) &&
166 HV_STIMER_SINT(stimer
->config
) == sint
) {
167 set_bit(stimer
->index
,
168 hv_vcpu
->stimer_pending_bitmap
);
173 kvm_make_request(KVM_REQ_HV_STIMER
, vcpu
);
175 idx
= srcu_read_lock(&kvm
->irq_srcu
);
176 gsi
= atomic_read(&synic
->sint_to_gsi
[sint
]);
178 kvm_notify_acked_gsi(kvm
, gsi
);
179 srcu_read_unlock(&kvm
->irq_srcu
, idx
);
182 static void synic_exit(struct kvm_vcpu_hv_synic
*synic
, u32 msr
)
184 struct kvm_vcpu
*vcpu
= synic_to_vcpu(synic
);
185 struct kvm_vcpu_hv
*hv_vcpu
= &vcpu
->arch
.hyperv
;
187 hv_vcpu
->exit
.type
= KVM_EXIT_HYPERV_SYNIC
;
188 hv_vcpu
->exit
.u
.synic
.msr
= msr
;
189 hv_vcpu
->exit
.u
.synic
.control
= synic
->control
;
190 hv_vcpu
->exit
.u
.synic
.evt_page
= synic
->evt_page
;
191 hv_vcpu
->exit
.u
.synic
.msg_page
= synic
->msg_page
;
193 kvm_make_request(KVM_REQ_HV_EXIT
, vcpu
);
196 static int synic_set_msr(struct kvm_vcpu_hv_synic
*synic
,
197 u32 msr
, u64 data
, bool host
)
199 struct kvm_vcpu
*vcpu
= synic_to_vcpu(synic
);
205 trace_kvm_hv_synic_set_msr(vcpu
->vcpu_id
, msr
, data
, host
);
209 case HV_X64_MSR_SCONTROL
:
210 synic
->control
= data
;
212 synic_exit(synic
, msr
);
214 case HV_X64_MSR_SVERSION
:
219 synic
->version
= data
;
221 case HV_X64_MSR_SIEFP
:
222 if (data
& HV_SYNIC_SIEFP_ENABLE
)
223 if (kvm_clear_guest(vcpu
->kvm
,
224 data
& PAGE_MASK
, PAGE_SIZE
)) {
228 synic
->evt_page
= data
;
230 synic_exit(synic
, msr
);
232 case HV_X64_MSR_SIMP
:
233 if (data
& HV_SYNIC_SIMP_ENABLE
)
234 if (kvm_clear_guest(vcpu
->kvm
,
235 data
& PAGE_MASK
, PAGE_SIZE
)) {
239 synic
->msg_page
= data
;
241 synic_exit(synic
, msr
);
243 case HV_X64_MSR_EOM
: {
246 for (i
= 0; i
< ARRAY_SIZE(synic
->sint
); i
++)
247 kvm_hv_notify_acked_sint(vcpu
, i
);
250 case HV_X64_MSR_SINT0
... HV_X64_MSR_SINT15
:
251 ret
= synic_set_sint(synic
, msr
- HV_X64_MSR_SINT0
, data
, host
);
260 static int synic_get_msr(struct kvm_vcpu_hv_synic
*synic
, u32 msr
, u64
*pdata
)
269 case HV_X64_MSR_SCONTROL
:
270 *pdata
= synic
->control
;
272 case HV_X64_MSR_SVERSION
:
273 *pdata
= synic
->version
;
275 case HV_X64_MSR_SIEFP
:
276 *pdata
= synic
->evt_page
;
278 case HV_X64_MSR_SIMP
:
279 *pdata
= synic
->msg_page
;
284 case HV_X64_MSR_SINT0
... HV_X64_MSR_SINT15
:
285 *pdata
= atomic64_read(&synic
->sint
[msr
- HV_X64_MSR_SINT0
]);
294 int synic_set_irq(struct kvm_vcpu_hv_synic
*synic
, u32 sint
)
296 struct kvm_vcpu
*vcpu
= synic_to_vcpu(synic
);
297 struct kvm_lapic_irq irq
;
300 if (sint
>= ARRAY_SIZE(synic
->sint
))
303 vector
= synic_get_sint_vector(synic_read_sint(synic
, sint
));
307 memset(&irq
, 0, sizeof(irq
));
308 irq
.dest_id
= kvm_apic_id(vcpu
->arch
.apic
);
309 irq
.dest_mode
= APIC_DEST_PHYSICAL
;
310 irq
.delivery_mode
= APIC_DM_FIXED
;
314 ret
= kvm_irq_delivery_to_apic(vcpu
->kvm
, NULL
, &irq
, NULL
);
315 trace_kvm_hv_synic_set_irq(vcpu
->vcpu_id
, sint
, irq
.vector
, ret
);
319 int kvm_hv_synic_set_irq(struct kvm
*kvm
, u32 vcpu_id
, u32 sint
)
321 struct kvm_vcpu_hv_synic
*synic
;
323 synic
= synic_get(kvm
, vcpu_id
);
327 return synic_set_irq(synic
, sint
);
330 void kvm_hv_synic_send_eoi(struct kvm_vcpu
*vcpu
, int vector
)
332 struct kvm_vcpu_hv_synic
*synic
= vcpu_to_synic(vcpu
);
335 trace_kvm_hv_synic_send_eoi(vcpu
->vcpu_id
, vector
);
337 for (i
= 0; i
< ARRAY_SIZE(synic
->sint
); i
++)
338 if (synic_get_sint_vector(synic_read_sint(synic
, i
)) == vector
)
339 kvm_hv_notify_acked_sint(vcpu
, i
);
342 static int kvm_hv_set_sint_gsi(struct kvm
*kvm
, u32 vcpu_id
, u32 sint
, int gsi
)
344 struct kvm_vcpu_hv_synic
*synic
;
346 synic
= synic_get(kvm
, vcpu_id
);
350 if (sint
>= ARRAY_SIZE(synic
->sint_to_gsi
))
353 atomic_set(&synic
->sint_to_gsi
[sint
], gsi
);
357 void kvm_hv_irq_routing_update(struct kvm
*kvm
)
359 struct kvm_irq_routing_table
*irq_rt
;
360 struct kvm_kernel_irq_routing_entry
*e
;
363 irq_rt
= srcu_dereference_check(kvm
->irq_routing
, &kvm
->irq_srcu
,
364 lockdep_is_held(&kvm
->irq_lock
));
366 for (gsi
= 0; gsi
< irq_rt
->nr_rt_entries
; gsi
++) {
367 hlist_for_each_entry(e
, &irq_rt
->map
[gsi
], link
) {
368 if (e
->type
== KVM_IRQ_ROUTING_HV_SINT
)
369 kvm_hv_set_sint_gsi(kvm
, e
->hv_sint
.vcpu
,
370 e
->hv_sint
.sint
, gsi
);
375 static void synic_init(struct kvm_vcpu_hv_synic
*synic
)
379 memset(synic
, 0, sizeof(*synic
));
380 synic
->version
= HV_SYNIC_VERSION_1
;
381 for (i
= 0; i
< ARRAY_SIZE(synic
->sint
); i
++) {
382 atomic64_set(&synic
->sint
[i
], HV_SYNIC_SINT_MASKED
);
383 atomic_set(&synic
->sint_to_gsi
[i
], -1);
387 static u64
get_time_ref_counter(struct kvm
*kvm
)
389 return div_u64(get_kernel_ns() + kvm
->arch
.kvmclock_offset
, 100);
392 static void stimer_mark_pending(struct kvm_vcpu_hv_stimer
*stimer
,
395 struct kvm_vcpu
*vcpu
= stimer_to_vcpu(stimer
);
397 set_bit(stimer
->index
,
398 vcpu_to_hv_vcpu(vcpu
)->stimer_pending_bitmap
);
399 kvm_make_request(KVM_REQ_HV_STIMER
, vcpu
);
404 static void stimer_cleanup(struct kvm_vcpu_hv_stimer
*stimer
)
406 struct kvm_vcpu
*vcpu
= stimer_to_vcpu(stimer
);
408 trace_kvm_hv_stimer_cleanup(stimer_to_vcpu(stimer
)->vcpu_id
,
411 hrtimer_cancel(&stimer
->timer
);
412 clear_bit(stimer
->index
,
413 vcpu_to_hv_vcpu(vcpu
)->stimer_pending_bitmap
);
414 stimer
->msg_pending
= false;
415 stimer
->exp_time
= 0;
418 static enum hrtimer_restart
stimer_timer_callback(struct hrtimer
*timer
)
420 struct kvm_vcpu_hv_stimer
*stimer
;
422 stimer
= container_of(timer
, struct kvm_vcpu_hv_stimer
, timer
);
423 trace_kvm_hv_stimer_callback(stimer_to_vcpu(stimer
)->vcpu_id
,
425 stimer_mark_pending(stimer
, true);
427 return HRTIMER_NORESTART
;
431 * stimer_start() assumptions:
432 * a) stimer->count is not equal to 0
433 * b) stimer->config has HV_STIMER_ENABLE flag
435 static int stimer_start(struct kvm_vcpu_hv_stimer
*stimer
)
440 time_now
= get_time_ref_counter(stimer_to_vcpu(stimer
)->kvm
);
441 ktime_now
= ktime_get();
443 if (stimer
->config
& HV_STIMER_PERIODIC
) {
444 if (stimer
->exp_time
) {
445 if (time_now
>= stimer
->exp_time
) {
448 div64_u64_rem(time_now
- stimer
->exp_time
,
449 stimer
->count
, &remainder
);
451 time_now
+ (stimer
->count
- remainder
);
454 stimer
->exp_time
= time_now
+ stimer
->count
;
456 trace_kvm_hv_stimer_start_periodic(
457 stimer_to_vcpu(stimer
)->vcpu_id
,
459 time_now
, stimer
->exp_time
);
461 hrtimer_start(&stimer
->timer
,
462 ktime_add_ns(ktime_now
,
463 100 * (stimer
->exp_time
- time_now
)),
467 stimer
->exp_time
= stimer
->count
;
468 if (time_now
>= stimer
->count
) {
470 * Expire timer according to Hypervisor Top-Level Functional
471 * specification v4(15.3.1):
472 * "If a one shot is enabled and the specified count is in
473 * the past, it will expire immediately."
475 stimer_mark_pending(stimer
, false);
479 trace_kvm_hv_stimer_start_one_shot(stimer_to_vcpu(stimer
)->vcpu_id
,
481 time_now
, stimer
->count
);
483 hrtimer_start(&stimer
->timer
,
484 ktime_add_ns(ktime_now
, 100 * (stimer
->count
- time_now
)),
489 static int stimer_set_config(struct kvm_vcpu_hv_stimer
*stimer
, u64 config
,
492 trace_kvm_hv_stimer_set_config(stimer_to_vcpu(stimer
)->vcpu_id
,
493 stimer
->index
, config
, host
);
495 stimer_cleanup(stimer
);
496 if ((stimer
->config
& HV_STIMER_ENABLE
) && HV_STIMER_SINT(config
) == 0)
497 config
&= ~HV_STIMER_ENABLE
;
498 stimer
->config
= config
;
499 stimer_mark_pending(stimer
, false);
503 static int stimer_set_count(struct kvm_vcpu_hv_stimer
*stimer
, u64 count
,
506 trace_kvm_hv_stimer_set_count(stimer_to_vcpu(stimer
)->vcpu_id
,
507 stimer
->index
, count
, host
);
509 stimer_cleanup(stimer
);
510 stimer
->count
= count
;
511 if (stimer
->count
== 0)
512 stimer
->config
&= ~HV_STIMER_ENABLE
;
513 else if (stimer
->config
& HV_STIMER_AUTOENABLE
)
514 stimer
->config
|= HV_STIMER_ENABLE
;
515 stimer_mark_pending(stimer
, false);
519 static int stimer_get_config(struct kvm_vcpu_hv_stimer
*stimer
, u64
*pconfig
)
521 *pconfig
= stimer
->config
;
525 static int stimer_get_count(struct kvm_vcpu_hv_stimer
*stimer
, u64
*pcount
)
527 *pcount
= stimer
->count
;
531 static int synic_deliver_msg(struct kvm_vcpu_hv_synic
*synic
, u32 sint
,
532 struct hv_message
*src_msg
)
534 struct kvm_vcpu
*vcpu
= synic_to_vcpu(synic
);
537 struct hv_message
*dst_msg
;
539 struct hv_message_page
*msg_page
;
541 if (!(synic
->msg_page
& HV_SYNIC_SIMP_ENABLE
))
544 gpa
= synic
->msg_page
& PAGE_MASK
;
545 page
= kvm_vcpu_gfn_to_page(vcpu
, gpa
>> PAGE_SHIFT
);
546 if (is_error_page(page
))
549 msg_page
= kmap_atomic(page
);
550 dst_msg
= &msg_page
->sint_message
[sint
];
551 if (sync_cmpxchg(&dst_msg
->header
.message_type
, HVMSG_NONE
,
552 src_msg
->header
.message_type
) != HVMSG_NONE
) {
553 dst_msg
->header
.message_flags
.msg_pending
= 1;
556 memcpy(&dst_msg
->u
.payload
, &src_msg
->u
.payload
,
557 src_msg
->header
.payload_size
);
558 dst_msg
->header
.message_type
= src_msg
->header
.message_type
;
559 dst_msg
->header
.payload_size
= src_msg
->header
.payload_size
;
560 r
= synic_set_irq(synic
, sint
);
566 kunmap_atomic(msg_page
);
567 kvm_release_page_dirty(page
);
568 kvm_vcpu_mark_page_dirty(vcpu
, gpa
>> PAGE_SHIFT
);
572 static int stimer_send_msg(struct kvm_vcpu_hv_stimer
*stimer
)
574 struct kvm_vcpu
*vcpu
= stimer_to_vcpu(stimer
);
575 struct hv_message
*msg
= &stimer
->msg
;
576 struct hv_timer_message_payload
*payload
=
577 (struct hv_timer_message_payload
*)&msg
->u
.payload
;
579 payload
->expiration_time
= stimer
->exp_time
;
580 payload
->delivery_time
= get_time_ref_counter(vcpu
->kvm
);
581 return synic_deliver_msg(vcpu_to_synic(vcpu
),
582 HV_STIMER_SINT(stimer
->config
), msg
);
585 static void stimer_expiration(struct kvm_vcpu_hv_stimer
*stimer
)
589 stimer
->msg_pending
= true;
590 r
= stimer_send_msg(stimer
);
591 trace_kvm_hv_stimer_expiration(stimer_to_vcpu(stimer
)->vcpu_id
,
594 stimer
->msg_pending
= false;
595 if (!(stimer
->config
& HV_STIMER_PERIODIC
))
596 stimer
->config
&= ~HV_STIMER_ENABLE
;
600 void kvm_hv_process_stimers(struct kvm_vcpu
*vcpu
)
602 struct kvm_vcpu_hv
*hv_vcpu
= vcpu_to_hv_vcpu(vcpu
);
603 struct kvm_vcpu_hv_stimer
*stimer
;
604 u64 time_now
, exp_time
;
607 for (i
= 0; i
< ARRAY_SIZE(hv_vcpu
->stimer
); i
++)
608 if (test_and_clear_bit(i
, hv_vcpu
->stimer_pending_bitmap
)) {
609 stimer
= &hv_vcpu
->stimer
[i
];
610 if (stimer
->config
& HV_STIMER_ENABLE
) {
611 exp_time
= stimer
->exp_time
;
615 get_time_ref_counter(vcpu
->kvm
);
616 if (time_now
>= exp_time
)
617 stimer_expiration(stimer
);
620 if ((stimer
->config
& HV_STIMER_ENABLE
) &&
622 stimer_start(stimer
);
624 stimer_cleanup(stimer
);
629 void kvm_hv_vcpu_uninit(struct kvm_vcpu
*vcpu
)
631 struct kvm_vcpu_hv
*hv_vcpu
= vcpu_to_hv_vcpu(vcpu
);
634 for (i
= 0; i
< ARRAY_SIZE(hv_vcpu
->stimer
); i
++)
635 stimer_cleanup(&hv_vcpu
->stimer
[i
]);
638 static void stimer_prepare_msg(struct kvm_vcpu_hv_stimer
*stimer
)
640 struct hv_message
*msg
= &stimer
->msg
;
641 struct hv_timer_message_payload
*payload
=
642 (struct hv_timer_message_payload
*)&msg
->u
.payload
;
644 memset(&msg
->header
, 0, sizeof(msg
->header
));
645 msg
->header
.message_type
= HVMSG_TIMER_EXPIRED
;
646 msg
->header
.payload_size
= sizeof(*payload
);
648 payload
->timer_index
= stimer
->index
;
649 payload
->expiration_time
= 0;
650 payload
->delivery_time
= 0;
653 static void stimer_init(struct kvm_vcpu_hv_stimer
*stimer
, int timer_index
)
655 memset(stimer
, 0, sizeof(*stimer
));
656 stimer
->index
= timer_index
;
657 hrtimer_init(&stimer
->timer
, CLOCK_MONOTONIC
, HRTIMER_MODE_ABS
);
658 stimer
->timer
.function
= stimer_timer_callback
;
659 stimer_prepare_msg(stimer
);
662 void kvm_hv_vcpu_init(struct kvm_vcpu
*vcpu
)
664 struct kvm_vcpu_hv
*hv_vcpu
= vcpu_to_hv_vcpu(vcpu
);
667 synic_init(&hv_vcpu
->synic
);
669 bitmap_zero(hv_vcpu
->stimer_pending_bitmap
, HV_SYNIC_STIMER_COUNT
);
670 for (i
= 0; i
< ARRAY_SIZE(hv_vcpu
->stimer
); i
++)
671 stimer_init(&hv_vcpu
->stimer
[i
], i
);
674 int kvm_hv_activate_synic(struct kvm_vcpu
*vcpu
)
677 * Hyper-V SynIC auto EOI SINT's are
678 * not compatible with APICV, so deactivate APICV
680 kvm_vcpu_deactivate_apicv(vcpu
);
681 vcpu_to_synic(vcpu
)->active
= true;
685 static bool kvm_hv_msr_partition_wide(u32 msr
)
690 case HV_X64_MSR_GUEST_OS_ID
:
691 case HV_X64_MSR_HYPERCALL
:
692 case HV_X64_MSR_REFERENCE_TSC
:
693 case HV_X64_MSR_TIME_REF_COUNT
:
694 case HV_X64_MSR_CRASH_CTL
:
695 case HV_X64_MSR_CRASH_P0
... HV_X64_MSR_CRASH_P4
:
696 case HV_X64_MSR_RESET
:
704 static int kvm_hv_msr_get_crash_data(struct kvm_vcpu
*vcpu
,
705 u32 index
, u64
*pdata
)
707 struct kvm_hv
*hv
= &vcpu
->kvm
->arch
.hyperv
;
709 if (WARN_ON_ONCE(index
>= ARRAY_SIZE(hv
->hv_crash_param
)))
712 *pdata
= hv
->hv_crash_param
[index
];
716 static int kvm_hv_msr_get_crash_ctl(struct kvm_vcpu
*vcpu
, u64
*pdata
)
718 struct kvm_hv
*hv
= &vcpu
->kvm
->arch
.hyperv
;
720 *pdata
= hv
->hv_crash_ctl
;
724 static int kvm_hv_msr_set_crash_ctl(struct kvm_vcpu
*vcpu
, u64 data
, bool host
)
726 struct kvm_hv
*hv
= &vcpu
->kvm
->arch
.hyperv
;
729 hv
->hv_crash_ctl
= data
& HV_X64_MSR_CRASH_CTL_NOTIFY
;
731 if (!host
&& (data
& HV_X64_MSR_CRASH_CTL_NOTIFY
)) {
733 vcpu_debug(vcpu
, "hv crash (0x%llx 0x%llx 0x%llx 0x%llx 0x%llx)\n",
734 hv
->hv_crash_param
[0],
735 hv
->hv_crash_param
[1],
736 hv
->hv_crash_param
[2],
737 hv
->hv_crash_param
[3],
738 hv
->hv_crash_param
[4]);
740 /* Send notification about crash to user space */
741 kvm_make_request(KVM_REQ_HV_CRASH
, vcpu
);
747 static int kvm_hv_msr_set_crash_data(struct kvm_vcpu
*vcpu
,
750 struct kvm_hv
*hv
= &vcpu
->kvm
->arch
.hyperv
;
752 if (WARN_ON_ONCE(index
>= ARRAY_SIZE(hv
->hv_crash_param
)))
755 hv
->hv_crash_param
[index
] = data
;
759 static int kvm_hv_set_msr_pw(struct kvm_vcpu
*vcpu
, u32 msr
, u64 data
,
762 struct kvm
*kvm
= vcpu
->kvm
;
763 struct kvm_hv
*hv
= &kvm
->arch
.hyperv
;
766 case HV_X64_MSR_GUEST_OS_ID
:
767 hv
->hv_guest_os_id
= data
;
768 /* setting guest os id to zero disables hypercall page */
769 if (!hv
->hv_guest_os_id
)
770 hv
->hv_hypercall
&= ~HV_X64_MSR_HYPERCALL_ENABLE
;
772 case HV_X64_MSR_HYPERCALL
: {
777 /* if guest os id is not set hypercall should remain disabled */
778 if (!hv
->hv_guest_os_id
)
780 if (!(data
& HV_X64_MSR_HYPERCALL_ENABLE
)) {
781 hv
->hv_hypercall
= data
;
784 gfn
= data
>> HV_X64_MSR_HYPERCALL_PAGE_ADDRESS_SHIFT
;
785 addr
= gfn_to_hva(kvm
, gfn
);
786 if (kvm_is_error_hva(addr
))
788 kvm_x86_ops
->patch_hypercall(vcpu
, instructions
);
789 ((unsigned char *)instructions
)[3] = 0xc3; /* ret */
790 if (__copy_to_user((void __user
*)addr
, instructions
, 4))
792 hv
->hv_hypercall
= data
;
793 mark_page_dirty(kvm
, gfn
);
796 case HV_X64_MSR_REFERENCE_TSC
: {
798 HV_REFERENCE_TSC_PAGE tsc_ref
;
800 memset(&tsc_ref
, 0, sizeof(tsc_ref
));
801 hv
->hv_tsc_page
= data
;
802 if (!(data
& HV_X64_MSR_TSC_REFERENCE_ENABLE
))
804 gfn
= data
>> HV_X64_MSR_TSC_REFERENCE_ADDRESS_SHIFT
;
807 gfn
<< HV_X64_MSR_TSC_REFERENCE_ADDRESS_SHIFT
,
808 &tsc_ref
, sizeof(tsc_ref
)))
810 mark_page_dirty(kvm
, gfn
);
813 case HV_X64_MSR_CRASH_P0
... HV_X64_MSR_CRASH_P4
:
814 return kvm_hv_msr_set_crash_data(vcpu
,
815 msr
- HV_X64_MSR_CRASH_P0
,
817 case HV_X64_MSR_CRASH_CTL
:
818 return kvm_hv_msr_set_crash_ctl(vcpu
, data
, host
);
819 case HV_X64_MSR_RESET
:
821 vcpu_debug(vcpu
, "hyper-v reset requested\n");
822 kvm_make_request(KVM_REQ_HV_RESET
, vcpu
);
826 vcpu_unimpl(vcpu
, "Hyper-V uhandled wrmsr: 0x%x data 0x%llx\n",
833 /* Calculate cpu time spent by current task in 100ns units */
834 static u64
current_task_runtime_100ns(void)
836 cputime_t utime
, stime
;
838 task_cputime_adjusted(current
, &utime
, &stime
);
839 return div_u64(cputime_to_nsecs(utime
+ stime
), 100);
842 static int kvm_hv_set_msr(struct kvm_vcpu
*vcpu
, u32 msr
, u64 data
, bool host
)
844 struct kvm_vcpu_hv
*hv
= &vcpu
->arch
.hyperv
;
847 case HV_X64_MSR_APIC_ASSIST_PAGE
: {
851 if (!(data
& HV_X64_MSR_APIC_ASSIST_PAGE_ENABLE
)) {
853 if (kvm_lapic_enable_pv_eoi(vcpu
, 0))
857 gfn
= data
>> HV_X64_MSR_APIC_ASSIST_PAGE_ADDRESS_SHIFT
;
858 addr
= kvm_vcpu_gfn_to_hva(vcpu
, gfn
);
859 if (kvm_is_error_hva(addr
))
861 if (__clear_user((void __user
*)addr
, PAGE_SIZE
))
864 kvm_vcpu_mark_page_dirty(vcpu
, gfn
);
865 if (kvm_lapic_enable_pv_eoi(vcpu
,
866 gfn_to_gpa(gfn
) | KVM_MSR_ENABLED
))
871 return kvm_hv_vapic_msr_write(vcpu
, APIC_EOI
, data
);
873 return kvm_hv_vapic_msr_write(vcpu
, APIC_ICR
, data
);
875 return kvm_hv_vapic_msr_write(vcpu
, APIC_TASKPRI
, data
);
876 case HV_X64_MSR_VP_RUNTIME
:
879 hv
->runtime_offset
= data
- current_task_runtime_100ns();
881 case HV_X64_MSR_SCONTROL
:
882 case HV_X64_MSR_SVERSION
:
883 case HV_X64_MSR_SIEFP
:
884 case HV_X64_MSR_SIMP
:
886 case HV_X64_MSR_SINT0
... HV_X64_MSR_SINT15
:
887 return synic_set_msr(vcpu_to_synic(vcpu
), msr
, data
, host
);
888 case HV_X64_MSR_STIMER0_CONFIG
:
889 case HV_X64_MSR_STIMER1_CONFIG
:
890 case HV_X64_MSR_STIMER2_CONFIG
:
891 case HV_X64_MSR_STIMER3_CONFIG
: {
892 int timer_index
= (msr
- HV_X64_MSR_STIMER0_CONFIG
)/2;
894 return stimer_set_config(vcpu_to_stimer(vcpu
, timer_index
),
897 case HV_X64_MSR_STIMER0_COUNT
:
898 case HV_X64_MSR_STIMER1_COUNT
:
899 case HV_X64_MSR_STIMER2_COUNT
:
900 case HV_X64_MSR_STIMER3_COUNT
: {
901 int timer_index
= (msr
- HV_X64_MSR_STIMER0_COUNT
)/2;
903 return stimer_set_count(vcpu_to_stimer(vcpu
, timer_index
),
907 vcpu_unimpl(vcpu
, "Hyper-V uhandled wrmsr: 0x%x data 0x%llx\n",
915 static int kvm_hv_get_msr_pw(struct kvm_vcpu
*vcpu
, u32 msr
, u64
*pdata
)
918 struct kvm
*kvm
= vcpu
->kvm
;
919 struct kvm_hv
*hv
= &kvm
->arch
.hyperv
;
922 case HV_X64_MSR_GUEST_OS_ID
:
923 data
= hv
->hv_guest_os_id
;
925 case HV_X64_MSR_HYPERCALL
:
926 data
= hv
->hv_hypercall
;
928 case HV_X64_MSR_TIME_REF_COUNT
:
929 data
= get_time_ref_counter(kvm
);
931 case HV_X64_MSR_REFERENCE_TSC
:
932 data
= hv
->hv_tsc_page
;
934 case HV_X64_MSR_CRASH_P0
... HV_X64_MSR_CRASH_P4
:
935 return kvm_hv_msr_get_crash_data(vcpu
,
936 msr
- HV_X64_MSR_CRASH_P0
,
938 case HV_X64_MSR_CRASH_CTL
:
939 return kvm_hv_msr_get_crash_ctl(vcpu
, pdata
);
940 case HV_X64_MSR_RESET
:
944 vcpu_unimpl(vcpu
, "Hyper-V unhandled rdmsr: 0x%x\n", msr
);
952 static int kvm_hv_get_msr(struct kvm_vcpu
*vcpu
, u32 msr
, u64
*pdata
)
955 struct kvm_vcpu_hv
*hv
= &vcpu
->arch
.hyperv
;
958 case HV_X64_MSR_VP_INDEX
: {
962 kvm_for_each_vcpu(r
, v
, vcpu
->kvm
) {
971 return kvm_hv_vapic_msr_read(vcpu
, APIC_EOI
, pdata
);
973 return kvm_hv_vapic_msr_read(vcpu
, APIC_ICR
, pdata
);
975 return kvm_hv_vapic_msr_read(vcpu
, APIC_TASKPRI
, pdata
);
976 case HV_X64_MSR_APIC_ASSIST_PAGE
:
979 case HV_X64_MSR_VP_RUNTIME
:
980 data
= current_task_runtime_100ns() + hv
->runtime_offset
;
982 case HV_X64_MSR_SCONTROL
:
983 case HV_X64_MSR_SVERSION
:
984 case HV_X64_MSR_SIEFP
:
985 case HV_X64_MSR_SIMP
:
987 case HV_X64_MSR_SINT0
... HV_X64_MSR_SINT15
:
988 return synic_get_msr(vcpu_to_synic(vcpu
), msr
, pdata
);
989 case HV_X64_MSR_STIMER0_CONFIG
:
990 case HV_X64_MSR_STIMER1_CONFIG
:
991 case HV_X64_MSR_STIMER2_CONFIG
:
992 case HV_X64_MSR_STIMER3_CONFIG
: {
993 int timer_index
= (msr
- HV_X64_MSR_STIMER0_CONFIG
)/2;
995 return stimer_get_config(vcpu_to_stimer(vcpu
, timer_index
),
998 case HV_X64_MSR_STIMER0_COUNT
:
999 case HV_X64_MSR_STIMER1_COUNT
:
1000 case HV_X64_MSR_STIMER2_COUNT
:
1001 case HV_X64_MSR_STIMER3_COUNT
: {
1002 int timer_index
= (msr
- HV_X64_MSR_STIMER0_COUNT
)/2;
1004 return stimer_get_count(vcpu_to_stimer(vcpu
, timer_index
),
1008 vcpu_unimpl(vcpu
, "Hyper-V unhandled rdmsr: 0x%x\n", msr
);
1015 int kvm_hv_set_msr_common(struct kvm_vcpu
*vcpu
, u32 msr
, u64 data
, bool host
)
1017 if (kvm_hv_msr_partition_wide(msr
)) {
1020 mutex_lock(&vcpu
->kvm
->lock
);
1021 r
= kvm_hv_set_msr_pw(vcpu
, msr
, data
, host
);
1022 mutex_unlock(&vcpu
->kvm
->lock
);
1025 return kvm_hv_set_msr(vcpu
, msr
, data
, host
);
1028 int kvm_hv_get_msr_common(struct kvm_vcpu
*vcpu
, u32 msr
, u64
*pdata
)
1030 if (kvm_hv_msr_partition_wide(msr
)) {
1033 mutex_lock(&vcpu
->kvm
->lock
);
1034 r
= kvm_hv_get_msr_pw(vcpu
, msr
, pdata
);
1035 mutex_unlock(&vcpu
->kvm
->lock
);
1038 return kvm_hv_get_msr(vcpu
, msr
, pdata
);
1041 bool kvm_hv_hypercall_enabled(struct kvm
*kvm
)
1043 return kvm
->arch
.hyperv
.hv_hypercall
& HV_X64_MSR_HYPERCALL_ENABLE
;
1046 static void kvm_hv_hypercall_set_result(struct kvm_vcpu
*vcpu
, u64 result
)
1050 longmode
= is_64_bit_mode(vcpu
);
1052 kvm_register_write(vcpu
, VCPU_REGS_RAX
, result
);
1054 kvm_register_write(vcpu
, VCPU_REGS_RDX
, result
>> 32);
1055 kvm_register_write(vcpu
, VCPU_REGS_RAX
, result
& 0xffffffff);
1059 static int kvm_hv_hypercall_complete_userspace(struct kvm_vcpu
*vcpu
)
1061 struct kvm_run
*run
= vcpu
->run
;
1063 kvm_hv_hypercall_set_result(vcpu
, run
->hyperv
.u
.hcall
.result
);
1067 int kvm_hv_hypercall(struct kvm_vcpu
*vcpu
)
1069 u64 param
, ingpa
, outgpa
, ret
;
1070 uint16_t code
, rep_idx
, rep_cnt
, res
= HV_STATUS_SUCCESS
, rep_done
= 0;
1071 bool fast
, longmode
;
1074 * hypercall generates UD from non zero cpl and real mode
1077 if (kvm_x86_ops
->get_cpl(vcpu
) != 0 || !is_protmode(vcpu
)) {
1078 kvm_queue_exception(vcpu
, UD_VECTOR
);
1082 longmode
= is_64_bit_mode(vcpu
);
1085 param
= ((u64
)kvm_register_read(vcpu
, VCPU_REGS_RDX
) << 32) |
1086 (kvm_register_read(vcpu
, VCPU_REGS_RAX
) & 0xffffffff);
1087 ingpa
= ((u64
)kvm_register_read(vcpu
, VCPU_REGS_RBX
) << 32) |
1088 (kvm_register_read(vcpu
, VCPU_REGS_RCX
) & 0xffffffff);
1089 outgpa
= ((u64
)kvm_register_read(vcpu
, VCPU_REGS_RDI
) << 32) |
1090 (kvm_register_read(vcpu
, VCPU_REGS_RSI
) & 0xffffffff);
1092 #ifdef CONFIG_X86_64
1094 param
= kvm_register_read(vcpu
, VCPU_REGS_RCX
);
1095 ingpa
= kvm_register_read(vcpu
, VCPU_REGS_RDX
);
1096 outgpa
= kvm_register_read(vcpu
, VCPU_REGS_R8
);
1100 code
= param
& 0xffff;
1101 fast
= (param
>> 16) & 0x1;
1102 rep_cnt
= (param
>> 32) & 0xfff;
1103 rep_idx
= (param
>> 48) & 0xfff;
1105 trace_kvm_hv_hypercall(code
, fast
, rep_cnt
, rep_idx
, ingpa
, outgpa
);
1107 /* Hypercall continuation is not supported yet */
1108 if (rep_cnt
|| rep_idx
) {
1109 res
= HV_STATUS_INVALID_HYPERCALL_CODE
;
1114 case HVCALL_NOTIFY_LONG_SPIN_WAIT
:
1115 kvm_vcpu_on_spin(vcpu
);
1117 case HVCALL_POST_MESSAGE
:
1118 case HVCALL_SIGNAL_EVENT
:
1119 /* don't bother userspace if it has no way to handle it */
1120 if (!vcpu_to_synic(vcpu
)->active
) {
1121 res
= HV_STATUS_INVALID_HYPERCALL_CODE
;
1124 vcpu
->run
->exit_reason
= KVM_EXIT_HYPERV
;
1125 vcpu
->run
->hyperv
.type
= KVM_EXIT_HYPERV_HCALL
;
1126 vcpu
->run
->hyperv
.u
.hcall
.input
= param
;
1127 vcpu
->run
->hyperv
.u
.hcall
.params
[0] = ingpa
;
1128 vcpu
->run
->hyperv
.u
.hcall
.params
[1] = outgpa
;
1129 vcpu
->arch
.complete_userspace_io
=
1130 kvm_hv_hypercall_complete_userspace
;
1133 res
= HV_STATUS_INVALID_HYPERCALL_CODE
;
1138 ret
= res
| (((u64
)rep_done
& 0xfff) << 32);
1139 kvm_hv_hypercall_set_result(vcpu
, ret
);