2 * KVM Microsoft Hyper-V emulation
4 * derived from arch/x86/kvm/x86.c
6 * Copyright (C) 2006 Qumranet, Inc.
7 * Copyright (C) 2008 Qumranet, Inc.
8 * Copyright IBM Corporation, 2008
9 * Copyright 2010 Red Hat, Inc. and/or its affiliates.
10 * Copyright (C) 2015 Andrey Smetanin <asmetanin@virtuozzo.com>
13 * Avi Kivity <avi@qumranet.com>
14 * Yaniv Kamay <yaniv@qumranet.com>
15 * Amit Shah <amit.shah@qumranet.com>
16 * Ben-Ami Yassour <benami@il.ibm.com>
17 * Andrey Smetanin <asmetanin@virtuozzo.com>
19 * This work is licensed under the terms of the GNU GPL, version 2. See
20 * the COPYING file in the top-level directory.
29 #include <linux/kvm_host.h>
30 #include <linux/highmem.h>
31 #include <linux/sched/cputime.h>
33 #include <asm/apicdef.h>
34 #include <trace/events/kvm.h>
38 static inline u64
synic_read_sint(struct kvm_vcpu_hv_synic
*synic
, int sint
)
40 return atomic64_read(&synic
->sint
[sint
]);
43 static inline int synic_get_sint_vector(u64 sint_value
)
45 if (sint_value
& HV_SYNIC_SINT_MASKED
)
47 return sint_value
& HV_SYNIC_SINT_VECTOR_MASK
;
50 static bool synic_has_vector_connected(struct kvm_vcpu_hv_synic
*synic
,
55 for (i
= 0; i
< ARRAY_SIZE(synic
->sint
); i
++) {
56 if (synic_get_sint_vector(synic_read_sint(synic
, i
)) == vector
)
62 static bool synic_has_vector_auto_eoi(struct kvm_vcpu_hv_synic
*synic
,
68 for (i
= 0; i
< ARRAY_SIZE(synic
->sint
); i
++) {
69 sint_value
= synic_read_sint(synic
, i
);
70 if (synic_get_sint_vector(sint_value
) == vector
&&
71 sint_value
& HV_SYNIC_SINT_AUTO_EOI
)
77 static int synic_set_sint(struct kvm_vcpu_hv_synic
*synic
, int sint
,
82 vector
= data
& HV_SYNIC_SINT_VECTOR_MASK
;
83 if (vector
< 16 && !host
)
86 * Guest may configure multiple SINTs to use the same vector, so
87 * we maintain a bitmap of vectors handled by synic, and a
88 * bitmap of vectors with auto-eoi behavior. The bitmaps are
89 * updated here, and atomically queried on fast paths.
92 atomic64_set(&synic
->sint
[sint
], data
);
94 if (synic_has_vector_connected(synic
, vector
))
95 __set_bit(vector
, synic
->vec_bitmap
);
97 __clear_bit(vector
, synic
->vec_bitmap
);
99 if (synic_has_vector_auto_eoi(synic
, vector
))
100 __set_bit(vector
, synic
->auto_eoi_bitmap
);
102 __clear_bit(vector
, synic
->auto_eoi_bitmap
);
104 /* Load SynIC vectors into EOI exit bitmap */
105 kvm_make_request(KVM_REQ_SCAN_IOAPIC
, synic_to_vcpu(synic
));
109 static struct kvm_vcpu
*get_vcpu_by_vpidx(struct kvm
*kvm
, u32 vpidx
)
111 struct kvm_vcpu
*vcpu
= NULL
;
114 if (vpidx
< KVM_MAX_VCPUS
)
115 vcpu
= kvm_get_vcpu(kvm
, vpidx
);
116 if (vcpu
&& vcpu_to_hv_vcpu(vcpu
)->vp_index
== vpidx
)
118 kvm_for_each_vcpu(i
, vcpu
, kvm
)
119 if (vcpu_to_hv_vcpu(vcpu
)->vp_index
== vpidx
)
124 static struct kvm_vcpu_hv_synic
*synic_get(struct kvm
*kvm
, u32 vpidx
)
126 struct kvm_vcpu
*vcpu
;
127 struct kvm_vcpu_hv_synic
*synic
;
129 vcpu
= get_vcpu_by_vpidx(kvm
, vpidx
);
132 synic
= vcpu_to_synic(vcpu
);
133 return (synic
->active
) ? synic
: NULL
;
136 static void synic_clear_sint_msg_pending(struct kvm_vcpu_hv_synic
*synic
,
139 struct kvm_vcpu
*vcpu
= synic_to_vcpu(synic
);
142 struct hv_message
*msg
;
143 struct hv_message_page
*msg_page
;
145 gpa
= synic
->msg_page
& PAGE_MASK
;
146 page
= kvm_vcpu_gfn_to_page(vcpu
, gpa
>> PAGE_SHIFT
);
147 if (is_error_page(page
)) {
148 vcpu_err(vcpu
, "Hyper-V SynIC can't get msg page, gpa 0x%llx\n",
152 msg_page
= kmap_atomic(page
);
154 msg
= &msg_page
->sint_message
[sint
];
155 msg
->header
.message_flags
.msg_pending
= 0;
157 kunmap_atomic(msg_page
);
158 kvm_release_page_dirty(page
);
159 kvm_vcpu_mark_page_dirty(vcpu
, gpa
>> PAGE_SHIFT
);
162 static void kvm_hv_notify_acked_sint(struct kvm_vcpu
*vcpu
, u32 sint
)
164 struct kvm
*kvm
= vcpu
->kvm
;
165 struct kvm_vcpu_hv_synic
*synic
= vcpu_to_synic(vcpu
);
166 struct kvm_vcpu_hv
*hv_vcpu
= vcpu_to_hv_vcpu(vcpu
);
167 struct kvm_vcpu_hv_stimer
*stimer
;
168 int gsi
, idx
, stimers_pending
;
170 trace_kvm_hv_notify_acked_sint(vcpu
->vcpu_id
, sint
);
172 if (synic
->msg_page
& HV_SYNIC_SIMP_ENABLE
)
173 synic_clear_sint_msg_pending(synic
, sint
);
175 /* Try to deliver pending Hyper-V SynIC timers messages */
177 for (idx
= 0; idx
< ARRAY_SIZE(hv_vcpu
->stimer
); idx
++) {
178 stimer
= &hv_vcpu
->stimer
[idx
];
179 if (stimer
->msg_pending
&&
180 (stimer
->config
& HV_STIMER_ENABLE
) &&
181 HV_STIMER_SINT(stimer
->config
) == sint
) {
182 set_bit(stimer
->index
,
183 hv_vcpu
->stimer_pending_bitmap
);
188 kvm_make_request(KVM_REQ_HV_STIMER
, vcpu
);
190 idx
= srcu_read_lock(&kvm
->irq_srcu
);
191 gsi
= atomic_read(&synic
->sint_to_gsi
[sint
]);
193 kvm_notify_acked_gsi(kvm
, gsi
);
194 srcu_read_unlock(&kvm
->irq_srcu
, idx
);
197 static void synic_exit(struct kvm_vcpu_hv_synic
*synic
, u32 msr
)
199 struct kvm_vcpu
*vcpu
= synic_to_vcpu(synic
);
200 struct kvm_vcpu_hv
*hv_vcpu
= &vcpu
->arch
.hyperv
;
202 hv_vcpu
->exit
.type
= KVM_EXIT_HYPERV_SYNIC
;
203 hv_vcpu
->exit
.u
.synic
.msr
= msr
;
204 hv_vcpu
->exit
.u
.synic
.control
= synic
->control
;
205 hv_vcpu
->exit
.u
.synic
.evt_page
= synic
->evt_page
;
206 hv_vcpu
->exit
.u
.synic
.msg_page
= synic
->msg_page
;
208 kvm_make_request(KVM_REQ_HV_EXIT
, vcpu
);
211 static int synic_set_msr(struct kvm_vcpu_hv_synic
*synic
,
212 u32 msr
, u64 data
, bool host
)
214 struct kvm_vcpu
*vcpu
= synic_to_vcpu(synic
);
220 trace_kvm_hv_synic_set_msr(vcpu
->vcpu_id
, msr
, data
, host
);
224 case HV_X64_MSR_SCONTROL
:
225 synic
->control
= data
;
227 synic_exit(synic
, msr
);
229 case HV_X64_MSR_SVERSION
:
234 synic
->version
= data
;
236 case HV_X64_MSR_SIEFP
:
237 if ((data
& HV_SYNIC_SIEFP_ENABLE
) && !host
&&
238 !synic
->dont_zero_synic_pages
)
239 if (kvm_clear_guest(vcpu
->kvm
,
240 data
& PAGE_MASK
, PAGE_SIZE
)) {
244 synic
->evt_page
= data
;
246 synic_exit(synic
, msr
);
248 case HV_X64_MSR_SIMP
:
249 if ((data
& HV_SYNIC_SIMP_ENABLE
) && !host
&&
250 !synic
->dont_zero_synic_pages
)
251 if (kvm_clear_guest(vcpu
->kvm
,
252 data
& PAGE_MASK
, PAGE_SIZE
)) {
256 synic
->msg_page
= data
;
258 synic_exit(synic
, msr
);
260 case HV_X64_MSR_EOM
: {
263 for (i
= 0; i
< ARRAY_SIZE(synic
->sint
); i
++)
264 kvm_hv_notify_acked_sint(vcpu
, i
);
267 case HV_X64_MSR_SINT0
... HV_X64_MSR_SINT15
:
268 ret
= synic_set_sint(synic
, msr
- HV_X64_MSR_SINT0
, data
, host
);
277 static int synic_get_msr(struct kvm_vcpu_hv_synic
*synic
, u32 msr
, u64
*pdata
)
286 case HV_X64_MSR_SCONTROL
:
287 *pdata
= synic
->control
;
289 case HV_X64_MSR_SVERSION
:
290 *pdata
= synic
->version
;
292 case HV_X64_MSR_SIEFP
:
293 *pdata
= synic
->evt_page
;
295 case HV_X64_MSR_SIMP
:
296 *pdata
= synic
->msg_page
;
301 case HV_X64_MSR_SINT0
... HV_X64_MSR_SINT15
:
302 *pdata
= atomic64_read(&synic
->sint
[msr
- HV_X64_MSR_SINT0
]);
311 static int synic_set_irq(struct kvm_vcpu_hv_synic
*synic
, u32 sint
)
313 struct kvm_vcpu
*vcpu
= synic_to_vcpu(synic
);
314 struct kvm_lapic_irq irq
;
317 if (sint
>= ARRAY_SIZE(synic
->sint
))
320 vector
= synic_get_sint_vector(synic_read_sint(synic
, sint
));
324 memset(&irq
, 0, sizeof(irq
));
325 irq
.shorthand
= APIC_DEST_SELF
;
326 irq
.dest_mode
= APIC_DEST_PHYSICAL
;
327 irq
.delivery_mode
= APIC_DM_FIXED
;
331 ret
= kvm_irq_delivery_to_apic(vcpu
->kvm
, vcpu
->arch
.apic
, &irq
, NULL
);
332 trace_kvm_hv_synic_set_irq(vcpu
->vcpu_id
, sint
, irq
.vector
, ret
);
336 int kvm_hv_synic_set_irq(struct kvm
*kvm
, u32 vpidx
, u32 sint
)
338 struct kvm_vcpu_hv_synic
*synic
;
340 synic
= synic_get(kvm
, vpidx
);
344 return synic_set_irq(synic
, sint
);
347 void kvm_hv_synic_send_eoi(struct kvm_vcpu
*vcpu
, int vector
)
349 struct kvm_vcpu_hv_synic
*synic
= vcpu_to_synic(vcpu
);
352 trace_kvm_hv_synic_send_eoi(vcpu
->vcpu_id
, vector
);
354 for (i
= 0; i
< ARRAY_SIZE(synic
->sint
); i
++)
355 if (synic_get_sint_vector(synic_read_sint(synic
, i
)) == vector
)
356 kvm_hv_notify_acked_sint(vcpu
, i
);
359 static int kvm_hv_set_sint_gsi(struct kvm
*kvm
, u32 vpidx
, u32 sint
, int gsi
)
361 struct kvm_vcpu_hv_synic
*synic
;
363 synic
= synic_get(kvm
, vpidx
);
367 if (sint
>= ARRAY_SIZE(synic
->sint_to_gsi
))
370 atomic_set(&synic
->sint_to_gsi
[sint
], gsi
);
374 void kvm_hv_irq_routing_update(struct kvm
*kvm
)
376 struct kvm_irq_routing_table
*irq_rt
;
377 struct kvm_kernel_irq_routing_entry
*e
;
380 irq_rt
= srcu_dereference_check(kvm
->irq_routing
, &kvm
->irq_srcu
,
381 lockdep_is_held(&kvm
->irq_lock
));
383 for (gsi
= 0; gsi
< irq_rt
->nr_rt_entries
; gsi
++) {
384 hlist_for_each_entry(e
, &irq_rt
->map
[gsi
], link
) {
385 if (e
->type
== KVM_IRQ_ROUTING_HV_SINT
)
386 kvm_hv_set_sint_gsi(kvm
, e
->hv_sint
.vcpu
,
387 e
->hv_sint
.sint
, gsi
);
392 static void synic_init(struct kvm_vcpu_hv_synic
*synic
)
396 memset(synic
, 0, sizeof(*synic
));
397 synic
->version
= HV_SYNIC_VERSION_1
;
398 for (i
= 0; i
< ARRAY_SIZE(synic
->sint
); i
++) {
399 atomic64_set(&synic
->sint
[i
], HV_SYNIC_SINT_MASKED
);
400 atomic_set(&synic
->sint_to_gsi
[i
], -1);
404 static u64
get_time_ref_counter(struct kvm
*kvm
)
406 struct kvm_hv
*hv
= &kvm
->arch
.hyperv
;
407 struct kvm_vcpu
*vcpu
;
411 * The guest has not set up the TSC page or the clock isn't
412 * stable, fall back to get_kvmclock_ns.
414 if (!hv
->tsc_ref
.tsc_sequence
)
415 return div_u64(get_kvmclock_ns(kvm
), 100);
417 vcpu
= kvm_get_vcpu(kvm
, 0);
418 tsc
= kvm_read_l1_tsc(vcpu
, rdtsc());
419 return mul_u64_u64_shr(tsc
, hv
->tsc_ref
.tsc_scale
, 64)
420 + hv
->tsc_ref
.tsc_offset
;
423 static void stimer_mark_pending(struct kvm_vcpu_hv_stimer
*stimer
,
426 struct kvm_vcpu
*vcpu
= stimer_to_vcpu(stimer
);
428 set_bit(stimer
->index
,
429 vcpu_to_hv_vcpu(vcpu
)->stimer_pending_bitmap
);
430 kvm_make_request(KVM_REQ_HV_STIMER
, vcpu
);
435 static void stimer_cleanup(struct kvm_vcpu_hv_stimer
*stimer
)
437 struct kvm_vcpu
*vcpu
= stimer_to_vcpu(stimer
);
439 trace_kvm_hv_stimer_cleanup(stimer_to_vcpu(stimer
)->vcpu_id
,
442 hrtimer_cancel(&stimer
->timer
);
443 clear_bit(stimer
->index
,
444 vcpu_to_hv_vcpu(vcpu
)->stimer_pending_bitmap
);
445 stimer
->msg_pending
= false;
446 stimer
->exp_time
= 0;
449 static enum hrtimer_restart
stimer_timer_callback(struct hrtimer
*timer
)
451 struct kvm_vcpu_hv_stimer
*stimer
;
453 stimer
= container_of(timer
, struct kvm_vcpu_hv_stimer
, timer
);
454 trace_kvm_hv_stimer_callback(stimer_to_vcpu(stimer
)->vcpu_id
,
456 stimer_mark_pending(stimer
, true);
458 return HRTIMER_NORESTART
;
462 * stimer_start() assumptions:
463 * a) stimer->count is not equal to 0
464 * b) stimer->config has HV_STIMER_ENABLE flag
466 static int stimer_start(struct kvm_vcpu_hv_stimer
*stimer
)
471 time_now
= get_time_ref_counter(stimer_to_vcpu(stimer
)->kvm
);
472 ktime_now
= ktime_get();
474 if (stimer
->config
& HV_STIMER_PERIODIC
) {
475 if (stimer
->exp_time
) {
476 if (time_now
>= stimer
->exp_time
) {
479 div64_u64_rem(time_now
- stimer
->exp_time
,
480 stimer
->count
, &remainder
);
482 time_now
+ (stimer
->count
- remainder
);
485 stimer
->exp_time
= time_now
+ stimer
->count
;
487 trace_kvm_hv_stimer_start_periodic(
488 stimer_to_vcpu(stimer
)->vcpu_id
,
490 time_now
, stimer
->exp_time
);
492 hrtimer_start(&stimer
->timer
,
493 ktime_add_ns(ktime_now
,
494 100 * (stimer
->exp_time
- time_now
)),
498 stimer
->exp_time
= stimer
->count
;
499 if (time_now
>= stimer
->count
) {
501 * Expire timer according to Hypervisor Top-Level Functional
502 * specification v4(15.3.1):
503 * "If a one shot is enabled and the specified count is in
504 * the past, it will expire immediately."
506 stimer_mark_pending(stimer
, false);
510 trace_kvm_hv_stimer_start_one_shot(stimer_to_vcpu(stimer
)->vcpu_id
,
512 time_now
, stimer
->count
);
514 hrtimer_start(&stimer
->timer
,
515 ktime_add_ns(ktime_now
, 100 * (stimer
->count
- time_now
)),
520 static int stimer_set_config(struct kvm_vcpu_hv_stimer
*stimer
, u64 config
,
523 trace_kvm_hv_stimer_set_config(stimer_to_vcpu(stimer
)->vcpu_id
,
524 stimer
->index
, config
, host
);
526 stimer_cleanup(stimer
);
527 if ((stimer
->config
& HV_STIMER_ENABLE
) && HV_STIMER_SINT(config
) == 0)
528 config
&= ~HV_STIMER_ENABLE
;
529 stimer
->config
= config
;
530 stimer_mark_pending(stimer
, false);
534 static int stimer_set_count(struct kvm_vcpu_hv_stimer
*stimer
, u64 count
,
537 trace_kvm_hv_stimer_set_count(stimer_to_vcpu(stimer
)->vcpu_id
,
538 stimer
->index
, count
, host
);
540 stimer_cleanup(stimer
);
541 stimer
->count
= count
;
542 if (stimer
->count
== 0)
543 stimer
->config
&= ~HV_STIMER_ENABLE
;
544 else if (stimer
->config
& HV_STIMER_AUTOENABLE
)
545 stimer
->config
|= HV_STIMER_ENABLE
;
546 stimer_mark_pending(stimer
, false);
550 static int stimer_get_config(struct kvm_vcpu_hv_stimer
*stimer
, u64
*pconfig
)
552 *pconfig
= stimer
->config
;
556 static int stimer_get_count(struct kvm_vcpu_hv_stimer
*stimer
, u64
*pcount
)
558 *pcount
= stimer
->count
;
562 static int synic_deliver_msg(struct kvm_vcpu_hv_synic
*synic
, u32 sint
,
563 struct hv_message
*src_msg
)
565 struct kvm_vcpu
*vcpu
= synic_to_vcpu(synic
);
568 struct hv_message
*dst_msg
;
570 struct hv_message_page
*msg_page
;
572 if (!(synic
->msg_page
& HV_SYNIC_SIMP_ENABLE
))
575 gpa
= synic
->msg_page
& PAGE_MASK
;
576 page
= kvm_vcpu_gfn_to_page(vcpu
, gpa
>> PAGE_SHIFT
);
577 if (is_error_page(page
))
580 msg_page
= kmap_atomic(page
);
581 dst_msg
= &msg_page
->sint_message
[sint
];
582 if (sync_cmpxchg(&dst_msg
->header
.message_type
, HVMSG_NONE
,
583 src_msg
->header
.message_type
) != HVMSG_NONE
) {
584 dst_msg
->header
.message_flags
.msg_pending
= 1;
587 memcpy(&dst_msg
->u
.payload
, &src_msg
->u
.payload
,
588 src_msg
->header
.payload_size
);
589 dst_msg
->header
.message_type
= src_msg
->header
.message_type
;
590 dst_msg
->header
.payload_size
= src_msg
->header
.payload_size
;
591 r
= synic_set_irq(synic
, sint
);
597 kunmap_atomic(msg_page
);
598 kvm_release_page_dirty(page
);
599 kvm_vcpu_mark_page_dirty(vcpu
, gpa
>> PAGE_SHIFT
);
603 static int stimer_send_msg(struct kvm_vcpu_hv_stimer
*stimer
)
605 struct kvm_vcpu
*vcpu
= stimer_to_vcpu(stimer
);
606 struct hv_message
*msg
= &stimer
->msg
;
607 struct hv_timer_message_payload
*payload
=
608 (struct hv_timer_message_payload
*)&msg
->u
.payload
;
610 payload
->expiration_time
= stimer
->exp_time
;
611 payload
->delivery_time
= get_time_ref_counter(vcpu
->kvm
);
612 return synic_deliver_msg(vcpu_to_synic(vcpu
),
613 HV_STIMER_SINT(stimer
->config
), msg
);
616 static void stimer_expiration(struct kvm_vcpu_hv_stimer
*stimer
)
620 stimer
->msg_pending
= true;
621 r
= stimer_send_msg(stimer
);
622 trace_kvm_hv_stimer_expiration(stimer_to_vcpu(stimer
)->vcpu_id
,
625 stimer
->msg_pending
= false;
626 if (!(stimer
->config
& HV_STIMER_PERIODIC
))
627 stimer
->config
&= ~HV_STIMER_ENABLE
;
631 void kvm_hv_process_stimers(struct kvm_vcpu
*vcpu
)
633 struct kvm_vcpu_hv
*hv_vcpu
= vcpu_to_hv_vcpu(vcpu
);
634 struct kvm_vcpu_hv_stimer
*stimer
;
635 u64 time_now
, exp_time
;
638 for (i
= 0; i
< ARRAY_SIZE(hv_vcpu
->stimer
); i
++)
639 if (test_and_clear_bit(i
, hv_vcpu
->stimer_pending_bitmap
)) {
640 stimer
= &hv_vcpu
->stimer
[i
];
641 if (stimer
->config
& HV_STIMER_ENABLE
) {
642 exp_time
= stimer
->exp_time
;
646 get_time_ref_counter(vcpu
->kvm
);
647 if (time_now
>= exp_time
)
648 stimer_expiration(stimer
);
651 if ((stimer
->config
& HV_STIMER_ENABLE
) &&
653 if (!stimer
->msg_pending
)
654 stimer_start(stimer
);
656 stimer_cleanup(stimer
);
661 void kvm_hv_vcpu_uninit(struct kvm_vcpu
*vcpu
)
663 struct kvm_vcpu_hv
*hv_vcpu
= vcpu_to_hv_vcpu(vcpu
);
666 for (i
= 0; i
< ARRAY_SIZE(hv_vcpu
->stimer
); i
++)
667 stimer_cleanup(&hv_vcpu
->stimer
[i
]);
670 static void stimer_prepare_msg(struct kvm_vcpu_hv_stimer
*stimer
)
672 struct hv_message
*msg
= &stimer
->msg
;
673 struct hv_timer_message_payload
*payload
=
674 (struct hv_timer_message_payload
*)&msg
->u
.payload
;
676 memset(&msg
->header
, 0, sizeof(msg
->header
));
677 msg
->header
.message_type
= HVMSG_TIMER_EXPIRED
;
678 msg
->header
.payload_size
= sizeof(*payload
);
680 payload
->timer_index
= stimer
->index
;
681 payload
->expiration_time
= 0;
682 payload
->delivery_time
= 0;
685 static void stimer_init(struct kvm_vcpu_hv_stimer
*stimer
, int timer_index
)
687 memset(stimer
, 0, sizeof(*stimer
));
688 stimer
->index
= timer_index
;
689 hrtimer_init(&stimer
->timer
, CLOCK_MONOTONIC
, HRTIMER_MODE_ABS
);
690 stimer
->timer
.function
= stimer_timer_callback
;
691 stimer_prepare_msg(stimer
);
694 void kvm_hv_vcpu_init(struct kvm_vcpu
*vcpu
)
696 struct kvm_vcpu_hv
*hv_vcpu
= vcpu_to_hv_vcpu(vcpu
);
699 synic_init(&hv_vcpu
->synic
);
701 bitmap_zero(hv_vcpu
->stimer_pending_bitmap
, HV_SYNIC_STIMER_COUNT
);
702 for (i
= 0; i
< ARRAY_SIZE(hv_vcpu
->stimer
); i
++)
703 stimer_init(&hv_vcpu
->stimer
[i
], i
);
706 void kvm_hv_vcpu_postcreate(struct kvm_vcpu
*vcpu
)
708 struct kvm_vcpu_hv
*hv_vcpu
= vcpu_to_hv_vcpu(vcpu
);
710 hv_vcpu
->vp_index
= kvm_vcpu_get_idx(vcpu
);
713 int kvm_hv_activate_synic(struct kvm_vcpu
*vcpu
, bool dont_zero_synic_pages
)
715 struct kvm_vcpu_hv_synic
*synic
= vcpu_to_synic(vcpu
);
718 * Hyper-V SynIC auto EOI SINT's are
719 * not compatible with APICV, so deactivate APICV
721 kvm_vcpu_deactivate_apicv(vcpu
);
722 synic
->active
= true;
723 synic
->dont_zero_synic_pages
= dont_zero_synic_pages
;
727 static bool kvm_hv_msr_partition_wide(u32 msr
)
732 case HV_X64_MSR_GUEST_OS_ID
:
733 case HV_X64_MSR_HYPERCALL
:
734 case HV_X64_MSR_REFERENCE_TSC
:
735 case HV_X64_MSR_TIME_REF_COUNT
:
736 case HV_X64_MSR_CRASH_CTL
:
737 case HV_X64_MSR_CRASH_P0
... HV_X64_MSR_CRASH_P4
:
738 case HV_X64_MSR_RESET
:
746 static int kvm_hv_msr_get_crash_data(struct kvm_vcpu
*vcpu
,
747 u32 index
, u64
*pdata
)
749 struct kvm_hv
*hv
= &vcpu
->kvm
->arch
.hyperv
;
750 size_t size
= ARRAY_SIZE(hv
->hv_crash_param
);
752 if (WARN_ON_ONCE(index
>= size
))
755 *pdata
= hv
->hv_crash_param
[array_index_nospec(index
, size
)];
759 static int kvm_hv_msr_get_crash_ctl(struct kvm_vcpu
*vcpu
, u64
*pdata
)
761 struct kvm_hv
*hv
= &vcpu
->kvm
->arch
.hyperv
;
763 *pdata
= hv
->hv_crash_ctl
;
767 static int kvm_hv_msr_set_crash_ctl(struct kvm_vcpu
*vcpu
, u64 data
, bool host
)
769 struct kvm_hv
*hv
= &vcpu
->kvm
->arch
.hyperv
;
772 hv
->hv_crash_ctl
= data
& HV_X64_MSR_CRASH_CTL_NOTIFY
;
774 if (!host
&& (data
& HV_X64_MSR_CRASH_CTL_NOTIFY
)) {
776 vcpu_debug(vcpu
, "hv crash (0x%llx 0x%llx 0x%llx 0x%llx 0x%llx)\n",
777 hv
->hv_crash_param
[0],
778 hv
->hv_crash_param
[1],
779 hv
->hv_crash_param
[2],
780 hv
->hv_crash_param
[3],
781 hv
->hv_crash_param
[4]);
783 /* Send notification about crash to user space */
784 kvm_make_request(KVM_REQ_HV_CRASH
, vcpu
);
790 static int kvm_hv_msr_set_crash_data(struct kvm_vcpu
*vcpu
,
793 struct kvm_hv
*hv
= &vcpu
->kvm
->arch
.hyperv
;
794 size_t size
= ARRAY_SIZE(hv
->hv_crash_param
);
796 if (WARN_ON_ONCE(index
>= size
))
799 hv
->hv_crash_param
[array_index_nospec(index
, size
)] = data
;
804 * The kvmclock and Hyper-V TSC page use similar formulas, and converting
805 * between them is possible:
808 * nsec = (ticks - tsc_timestamp) * tsc_to_system_mul * 2^(tsc_shift-32)
812 * nsec/100 = ticks * scale / 2^64 + offset
814 * When tsc_timestamp = system_time = 0, offset is zero in the Hyper-V formula.
815 * By dividing the kvmclock formula by 100 and equating what's left we get:
816 * ticks * scale / 2^64 = ticks * tsc_to_system_mul * 2^(tsc_shift-32) / 100
817 * scale / 2^64 = tsc_to_system_mul * 2^(tsc_shift-32) / 100
818 * scale = tsc_to_system_mul * 2^(32+tsc_shift) / 100
820 * Now expand the kvmclock formula and divide by 100:
821 * nsec = ticks * tsc_to_system_mul * 2^(tsc_shift-32)
822 * - tsc_timestamp * tsc_to_system_mul * 2^(tsc_shift-32)
824 * nsec/100 = ticks * tsc_to_system_mul * 2^(tsc_shift-32) / 100
825 * - tsc_timestamp * tsc_to_system_mul * 2^(tsc_shift-32) / 100
826 * + system_time / 100
828 * Replace tsc_to_system_mul * 2^(tsc_shift-32) / 100 by scale / 2^64:
829 * nsec/100 = ticks * scale / 2^64
830 * - tsc_timestamp * scale / 2^64
831 * + system_time / 100
833 * Equate with the Hyper-V formula so that ticks * scale / 2^64 cancels out:
834 * offset = system_time / 100 - tsc_timestamp * scale / 2^64
836 * These two equivalencies are implemented in this function.
838 static bool compute_tsc_page_parameters(struct pvclock_vcpu_time_info
*hv_clock
,
839 HV_REFERENCE_TSC_PAGE
*tsc_ref
)
843 if (!(hv_clock
->flags
& PVCLOCK_TSC_STABLE_BIT
))
847 * check if scale would overflow, if so we use the time ref counter
848 * tsc_to_system_mul * 2^(tsc_shift+32) / 100 >= 2^64
849 * tsc_to_system_mul / 100 >= 2^(32-tsc_shift)
850 * tsc_to_system_mul >= 100 * 2^(32-tsc_shift)
852 max_mul
= 100ull << (32 - hv_clock
->tsc_shift
);
853 if (hv_clock
->tsc_to_system_mul
>= max_mul
)
857 * Otherwise compute the scale and offset according to the formulas
861 mul_u64_u32_div(1ULL << (32 + hv_clock
->tsc_shift
),
862 hv_clock
->tsc_to_system_mul
,
865 tsc_ref
->tsc_offset
= hv_clock
->system_time
;
866 do_div(tsc_ref
->tsc_offset
, 100);
867 tsc_ref
->tsc_offset
-=
868 mul_u64_u64_shr(hv_clock
->tsc_timestamp
, tsc_ref
->tsc_scale
, 64);
872 void kvm_hv_setup_tsc_page(struct kvm
*kvm
,
873 struct pvclock_vcpu_time_info
*hv_clock
)
875 struct kvm_hv
*hv
= &kvm
->arch
.hyperv
;
879 BUILD_BUG_ON(sizeof(tsc_seq
) != sizeof(hv
->tsc_ref
.tsc_sequence
));
880 BUILD_BUG_ON(offsetof(HV_REFERENCE_TSC_PAGE
, tsc_sequence
) != 0);
882 if (!(hv
->hv_tsc_page
& HV_X64_MSR_TSC_REFERENCE_ENABLE
))
885 mutex_lock(&kvm
->arch
.hyperv
.hv_lock
);
886 if (!(hv
->hv_tsc_page
& HV_X64_MSR_TSC_REFERENCE_ENABLE
))
889 gfn
= hv
->hv_tsc_page
>> HV_X64_MSR_TSC_REFERENCE_ADDRESS_SHIFT
;
891 * Because the TSC parameters only vary when there is a
892 * change in the master clock, do not bother with caching.
894 if (unlikely(kvm_read_guest(kvm
, gfn_to_gpa(gfn
),
895 &tsc_seq
, sizeof(tsc_seq
))))
899 * While we're computing and writing the parameters, force the
900 * guest to use the time reference count MSR.
902 hv
->tsc_ref
.tsc_sequence
= 0;
903 if (kvm_write_guest(kvm
, gfn_to_gpa(gfn
),
904 &hv
->tsc_ref
, sizeof(hv
->tsc_ref
.tsc_sequence
)))
907 if (!compute_tsc_page_parameters(hv_clock
, &hv
->tsc_ref
))
910 /* Ensure sequence is zero before writing the rest of the struct. */
912 if (kvm_write_guest(kvm
, gfn_to_gpa(gfn
), &hv
->tsc_ref
, sizeof(hv
->tsc_ref
)))
916 * Now switch to the TSC page mechanism by writing the sequence.
919 if (tsc_seq
== 0xFFFFFFFF || tsc_seq
== 0)
922 /* Write the struct entirely before the non-zero sequence. */
925 hv
->tsc_ref
.tsc_sequence
= tsc_seq
;
926 kvm_write_guest(kvm
, gfn_to_gpa(gfn
),
927 &hv
->tsc_ref
, sizeof(hv
->tsc_ref
.tsc_sequence
));
929 mutex_unlock(&kvm
->arch
.hyperv
.hv_lock
);
932 static int kvm_hv_set_msr_pw(struct kvm_vcpu
*vcpu
, u32 msr
, u64 data
,
935 struct kvm
*kvm
= vcpu
->kvm
;
936 struct kvm_hv
*hv
= &kvm
->arch
.hyperv
;
939 case HV_X64_MSR_GUEST_OS_ID
:
940 hv
->hv_guest_os_id
= data
;
941 /* setting guest os id to zero disables hypercall page */
942 if (!hv
->hv_guest_os_id
)
943 hv
->hv_hypercall
&= ~HV_X64_MSR_HYPERCALL_ENABLE
;
945 case HV_X64_MSR_HYPERCALL
: {
950 /* if guest os id is not set hypercall should remain disabled */
951 if (!hv
->hv_guest_os_id
)
953 if (!(data
& HV_X64_MSR_HYPERCALL_ENABLE
)) {
954 hv
->hv_hypercall
= data
;
957 gfn
= data
>> HV_X64_MSR_HYPERCALL_PAGE_ADDRESS_SHIFT
;
958 addr
= gfn_to_hva(kvm
, gfn
);
959 if (kvm_is_error_hva(addr
))
961 kvm_x86_ops
->patch_hypercall(vcpu
, instructions
);
962 ((unsigned char *)instructions
)[3] = 0xc3; /* ret */
963 if (__copy_to_user((void __user
*)addr
, instructions
, 4))
965 hv
->hv_hypercall
= data
;
966 mark_page_dirty(kvm
, gfn
);
969 case HV_X64_MSR_REFERENCE_TSC
:
970 hv
->hv_tsc_page
= data
;
971 if (hv
->hv_tsc_page
& HV_X64_MSR_TSC_REFERENCE_ENABLE
)
972 kvm_make_request(KVM_REQ_MASTERCLOCK_UPDATE
, vcpu
);
974 case HV_X64_MSR_CRASH_P0
... HV_X64_MSR_CRASH_P4
:
975 return kvm_hv_msr_set_crash_data(vcpu
,
976 msr
- HV_X64_MSR_CRASH_P0
,
978 case HV_X64_MSR_CRASH_CTL
:
979 return kvm_hv_msr_set_crash_ctl(vcpu
, data
, host
);
980 case HV_X64_MSR_RESET
:
982 vcpu_debug(vcpu
, "hyper-v reset requested\n");
983 kvm_make_request(KVM_REQ_HV_RESET
, vcpu
);
987 vcpu_unimpl(vcpu
, "Hyper-V uhandled wrmsr: 0x%x data 0x%llx\n",
994 /* Calculate cpu time spent by current task in 100ns units */
995 static u64
current_task_runtime_100ns(void)
999 task_cputime_adjusted(current
, &utime
, &stime
);
1001 return div_u64(utime
+ stime
, 100);
1004 static int kvm_hv_set_msr(struct kvm_vcpu
*vcpu
, u32 msr
, u64 data
, bool host
)
1006 struct kvm_vcpu_hv
*hv
= &vcpu
->arch
.hyperv
;
1009 case HV_X64_MSR_VP_INDEX
:
1012 hv
->vp_index
= (u32
)data
;
1014 case HV_X64_MSR_APIC_ASSIST_PAGE
: {
1018 if (!(data
& HV_X64_MSR_APIC_ASSIST_PAGE_ENABLE
)) {
1019 hv
->hv_vapic
= data
;
1020 if (kvm_lapic_enable_pv_eoi(vcpu
, 0))
1024 gfn
= data
>> HV_X64_MSR_APIC_ASSIST_PAGE_ADDRESS_SHIFT
;
1025 addr
= kvm_vcpu_gfn_to_hva(vcpu
, gfn
);
1026 if (kvm_is_error_hva(addr
))
1028 if (__clear_user((void __user
*)addr
, PAGE_SIZE
))
1030 hv
->hv_vapic
= data
;
1031 kvm_vcpu_mark_page_dirty(vcpu
, gfn
);
1032 if (kvm_lapic_enable_pv_eoi(vcpu
,
1033 gfn_to_gpa(gfn
) | KVM_MSR_ENABLED
))
1037 case HV_X64_MSR_EOI
:
1038 return kvm_hv_vapic_msr_write(vcpu
, APIC_EOI
, data
);
1039 case HV_X64_MSR_ICR
:
1040 return kvm_hv_vapic_msr_write(vcpu
, APIC_ICR
, data
);
1041 case HV_X64_MSR_TPR
:
1042 return kvm_hv_vapic_msr_write(vcpu
, APIC_TASKPRI
, data
);
1043 case HV_X64_MSR_VP_RUNTIME
:
1046 hv
->runtime_offset
= data
- current_task_runtime_100ns();
1048 case HV_X64_MSR_SCONTROL
:
1049 case HV_X64_MSR_SVERSION
:
1050 case HV_X64_MSR_SIEFP
:
1051 case HV_X64_MSR_SIMP
:
1052 case HV_X64_MSR_EOM
:
1053 case HV_X64_MSR_SINT0
... HV_X64_MSR_SINT15
:
1054 return synic_set_msr(vcpu_to_synic(vcpu
), msr
, data
, host
);
1055 case HV_X64_MSR_STIMER0_CONFIG
:
1056 case HV_X64_MSR_STIMER1_CONFIG
:
1057 case HV_X64_MSR_STIMER2_CONFIG
:
1058 case HV_X64_MSR_STIMER3_CONFIG
: {
1059 int timer_index
= (msr
- HV_X64_MSR_STIMER0_CONFIG
)/2;
1061 return stimer_set_config(vcpu_to_stimer(vcpu
, timer_index
),
1064 case HV_X64_MSR_STIMER0_COUNT
:
1065 case HV_X64_MSR_STIMER1_COUNT
:
1066 case HV_X64_MSR_STIMER2_COUNT
:
1067 case HV_X64_MSR_STIMER3_COUNT
: {
1068 int timer_index
= (msr
- HV_X64_MSR_STIMER0_COUNT
)/2;
1070 return stimer_set_count(vcpu_to_stimer(vcpu
, timer_index
),
1074 vcpu_unimpl(vcpu
, "Hyper-V uhandled wrmsr: 0x%x data 0x%llx\n",
1082 static int kvm_hv_get_msr_pw(struct kvm_vcpu
*vcpu
, u32 msr
, u64
*pdata
)
1085 struct kvm
*kvm
= vcpu
->kvm
;
1086 struct kvm_hv
*hv
= &kvm
->arch
.hyperv
;
1089 case HV_X64_MSR_GUEST_OS_ID
:
1090 data
= hv
->hv_guest_os_id
;
1092 case HV_X64_MSR_HYPERCALL
:
1093 data
= hv
->hv_hypercall
;
1095 case HV_X64_MSR_TIME_REF_COUNT
:
1096 data
= get_time_ref_counter(kvm
);
1098 case HV_X64_MSR_REFERENCE_TSC
:
1099 data
= hv
->hv_tsc_page
;
1101 case HV_X64_MSR_CRASH_P0
... HV_X64_MSR_CRASH_P4
:
1102 return kvm_hv_msr_get_crash_data(vcpu
,
1103 msr
- HV_X64_MSR_CRASH_P0
,
1105 case HV_X64_MSR_CRASH_CTL
:
1106 return kvm_hv_msr_get_crash_ctl(vcpu
, pdata
);
1107 case HV_X64_MSR_RESET
:
1111 vcpu_unimpl(vcpu
, "Hyper-V unhandled rdmsr: 0x%x\n", msr
);
1119 static int kvm_hv_get_msr(struct kvm_vcpu
*vcpu
, u32 msr
, u64
*pdata
)
1122 struct kvm_vcpu_hv
*hv
= &vcpu
->arch
.hyperv
;
1125 case HV_X64_MSR_VP_INDEX
:
1126 data
= hv
->vp_index
;
1128 case HV_X64_MSR_EOI
:
1129 return kvm_hv_vapic_msr_read(vcpu
, APIC_EOI
, pdata
);
1130 case HV_X64_MSR_ICR
:
1131 return kvm_hv_vapic_msr_read(vcpu
, APIC_ICR
, pdata
);
1132 case HV_X64_MSR_TPR
:
1133 return kvm_hv_vapic_msr_read(vcpu
, APIC_TASKPRI
, pdata
);
1134 case HV_X64_MSR_APIC_ASSIST_PAGE
:
1135 data
= hv
->hv_vapic
;
1137 case HV_X64_MSR_VP_RUNTIME
:
1138 data
= current_task_runtime_100ns() + hv
->runtime_offset
;
1140 case HV_X64_MSR_SCONTROL
:
1141 case HV_X64_MSR_SVERSION
:
1142 case HV_X64_MSR_SIEFP
:
1143 case HV_X64_MSR_SIMP
:
1144 case HV_X64_MSR_EOM
:
1145 case HV_X64_MSR_SINT0
... HV_X64_MSR_SINT15
:
1146 return synic_get_msr(vcpu_to_synic(vcpu
), msr
, pdata
);
1147 case HV_X64_MSR_STIMER0_CONFIG
:
1148 case HV_X64_MSR_STIMER1_CONFIG
:
1149 case HV_X64_MSR_STIMER2_CONFIG
:
1150 case HV_X64_MSR_STIMER3_CONFIG
: {
1151 int timer_index
= (msr
- HV_X64_MSR_STIMER0_CONFIG
)/2;
1153 return stimer_get_config(vcpu_to_stimer(vcpu
, timer_index
),
1156 case HV_X64_MSR_STIMER0_COUNT
:
1157 case HV_X64_MSR_STIMER1_COUNT
:
1158 case HV_X64_MSR_STIMER2_COUNT
:
1159 case HV_X64_MSR_STIMER3_COUNT
: {
1160 int timer_index
= (msr
- HV_X64_MSR_STIMER0_COUNT
)/2;
1162 return stimer_get_count(vcpu_to_stimer(vcpu
, timer_index
),
1165 case HV_X64_MSR_TSC_FREQUENCY
:
1166 data
= (u64
)vcpu
->arch
.virtual_tsc_khz
* 1000;
1168 case HV_X64_MSR_APIC_FREQUENCY
:
1169 data
= APIC_BUS_FREQUENCY
;
1172 vcpu_unimpl(vcpu
, "Hyper-V unhandled rdmsr: 0x%x\n", msr
);
1179 int kvm_hv_set_msr_common(struct kvm_vcpu
*vcpu
, u32 msr
, u64 data
, bool host
)
1181 if (kvm_hv_msr_partition_wide(msr
)) {
1184 mutex_lock(&vcpu
->kvm
->arch
.hyperv
.hv_lock
);
1185 r
= kvm_hv_set_msr_pw(vcpu
, msr
, data
, host
);
1186 mutex_unlock(&vcpu
->kvm
->arch
.hyperv
.hv_lock
);
1189 return kvm_hv_set_msr(vcpu
, msr
, data
, host
);
1192 int kvm_hv_get_msr_common(struct kvm_vcpu
*vcpu
, u32 msr
, u64
*pdata
)
1194 if (kvm_hv_msr_partition_wide(msr
)) {
1197 mutex_lock(&vcpu
->kvm
->arch
.hyperv
.hv_lock
);
1198 r
= kvm_hv_get_msr_pw(vcpu
, msr
, pdata
);
1199 mutex_unlock(&vcpu
->kvm
->arch
.hyperv
.hv_lock
);
1202 return kvm_hv_get_msr(vcpu
, msr
, pdata
);
1205 bool kvm_hv_hypercall_enabled(struct kvm
*kvm
)
1207 return READ_ONCE(kvm
->arch
.hyperv
.hv_hypercall
) & HV_X64_MSR_HYPERCALL_ENABLE
;
1210 static void kvm_hv_hypercall_set_result(struct kvm_vcpu
*vcpu
, u64 result
)
1214 longmode
= is_64_bit_mode(vcpu
);
1216 kvm_register_write(vcpu
, VCPU_REGS_RAX
, result
);
1218 kvm_register_write(vcpu
, VCPU_REGS_RDX
, result
>> 32);
1219 kvm_register_write(vcpu
, VCPU_REGS_RAX
, result
& 0xffffffff);
1223 static int kvm_hv_hypercall_complete_userspace(struct kvm_vcpu
*vcpu
)
1225 struct kvm_run
*run
= vcpu
->run
;
1227 kvm_hv_hypercall_set_result(vcpu
, run
->hyperv
.u
.hcall
.result
);
1228 return kvm_skip_emulated_instruction(vcpu
);
1231 int kvm_hv_hypercall(struct kvm_vcpu
*vcpu
)
1233 u64 param
, ingpa
, outgpa
, ret
;
1234 uint16_t code
, rep_idx
, rep_cnt
, res
= HV_STATUS_SUCCESS
, rep_done
= 0;
1235 bool fast
, longmode
;
1238 * hypercall generates UD from non zero cpl and real mode
1241 if (kvm_x86_ops
->get_cpl(vcpu
) != 0 || !is_protmode(vcpu
)) {
1242 kvm_queue_exception(vcpu
, UD_VECTOR
);
1246 longmode
= is_64_bit_mode(vcpu
);
1249 param
= ((u64
)kvm_register_read(vcpu
, VCPU_REGS_RDX
) << 32) |
1250 (kvm_register_read(vcpu
, VCPU_REGS_RAX
) & 0xffffffff);
1251 ingpa
= ((u64
)kvm_register_read(vcpu
, VCPU_REGS_RBX
) << 32) |
1252 (kvm_register_read(vcpu
, VCPU_REGS_RCX
) & 0xffffffff);
1253 outgpa
= ((u64
)kvm_register_read(vcpu
, VCPU_REGS_RDI
) << 32) |
1254 (kvm_register_read(vcpu
, VCPU_REGS_RSI
) & 0xffffffff);
1256 #ifdef CONFIG_X86_64
1258 param
= kvm_register_read(vcpu
, VCPU_REGS_RCX
);
1259 ingpa
= kvm_register_read(vcpu
, VCPU_REGS_RDX
);
1260 outgpa
= kvm_register_read(vcpu
, VCPU_REGS_R8
);
1264 code
= param
& 0xffff;
1265 fast
= (param
>> 16) & 0x1;
1266 rep_cnt
= (param
>> 32) & 0xfff;
1267 rep_idx
= (param
>> 48) & 0xfff;
1269 trace_kvm_hv_hypercall(code
, fast
, rep_cnt
, rep_idx
, ingpa
, outgpa
);
1271 /* Hypercall continuation is not supported yet */
1272 if (rep_cnt
|| rep_idx
) {
1273 res
= HV_STATUS_INVALID_HYPERCALL_CODE
;
1278 case HVCALL_NOTIFY_LONG_SPIN_WAIT
:
1279 kvm_vcpu_on_spin(vcpu
, true);
1281 case HVCALL_POST_MESSAGE
:
1282 case HVCALL_SIGNAL_EVENT
:
1283 /* don't bother userspace if it has no way to handle it */
1284 if (!vcpu_to_synic(vcpu
)->active
) {
1285 res
= HV_STATUS_INVALID_HYPERCALL_CODE
;
1288 vcpu
->run
->exit_reason
= KVM_EXIT_HYPERV
;
1289 vcpu
->run
->hyperv
.type
= KVM_EXIT_HYPERV_HCALL
;
1290 vcpu
->run
->hyperv
.u
.hcall
.input
= param
;
1291 vcpu
->run
->hyperv
.u
.hcall
.params
[0] = ingpa
;
1292 vcpu
->run
->hyperv
.u
.hcall
.params
[1] = outgpa
;
1293 vcpu
->arch
.complete_userspace_io
=
1294 kvm_hv_hypercall_complete_userspace
;
1297 res
= HV_STATUS_INVALID_HYPERCALL_CODE
;
1302 ret
= res
| (((u64
)rep_done
& 0xfff) << 32);
1303 kvm_hv_hypercall_set_result(vcpu
, ret
);