2 * KVM Microsoft Hyper-V emulation
4 * derived from arch/x86/kvm/x86.c
6 * Copyright (C) 2006 Qumranet, Inc.
7 * Copyright (C) 2008 Qumranet, Inc.
8 * Copyright IBM Corporation, 2008
9 * Copyright 2010 Red Hat, Inc. and/or its affiliates.
10 * Copyright (C) 2015 Andrey Smetanin <asmetanin@virtuozzo.com>
13 * Avi Kivity <avi@qumranet.com>
14 * Yaniv Kamay <yaniv@qumranet.com>
15 * Amit Shah <amit.shah@qumranet.com>
16 * Ben-Ami Yassour <benami@il.ibm.com>
17 * Andrey Smetanin <asmetanin@virtuozzo.com>
19 * This work is licensed under the terms of the GNU GPL, version 2. See
20 * the COPYING file in the top-level directory.
29 #include <linux/kvm_host.h>
30 #include <linux/highmem.h>
31 #include <linux/sched/cputime.h>
32 #include <linux/eventfd.h>
34 #include <asm/apicdef.h>
35 #include <trace/events/kvm.h>
39 #define KVM_HV_MAX_SPARSE_VCPU_SET_BITS DIV_ROUND_UP(KVM_MAX_VCPUS, 64)
41 static void stimer_mark_pending(struct kvm_vcpu_hv_stimer
*stimer
,
44 static inline u64
synic_read_sint(struct kvm_vcpu_hv_synic
*synic
, int sint
)
46 return atomic64_read(&synic
->sint
[sint
]);
49 static inline int synic_get_sint_vector(u64 sint_value
)
51 if (sint_value
& HV_SYNIC_SINT_MASKED
)
53 return sint_value
& HV_SYNIC_SINT_VECTOR_MASK
;
56 static bool synic_has_vector_connected(struct kvm_vcpu_hv_synic
*synic
,
61 for (i
= 0; i
< ARRAY_SIZE(synic
->sint
); i
++) {
62 if (synic_get_sint_vector(synic_read_sint(synic
, i
)) == vector
)
68 static bool synic_has_vector_auto_eoi(struct kvm_vcpu_hv_synic
*synic
,
74 for (i
= 0; i
< ARRAY_SIZE(synic
->sint
); i
++) {
75 sint_value
= synic_read_sint(synic
, i
);
76 if (synic_get_sint_vector(sint_value
) == vector
&&
77 sint_value
& HV_SYNIC_SINT_AUTO_EOI
)
83 static void synic_update_vector(struct kvm_vcpu_hv_synic
*synic
,
86 if (vector
< HV_SYNIC_FIRST_VALID_VECTOR
)
89 if (synic_has_vector_connected(synic
, vector
))
90 __set_bit(vector
, synic
->vec_bitmap
);
92 __clear_bit(vector
, synic
->vec_bitmap
);
94 if (synic_has_vector_auto_eoi(synic
, vector
))
95 __set_bit(vector
, synic
->auto_eoi_bitmap
);
97 __clear_bit(vector
, synic
->auto_eoi_bitmap
);
100 static int synic_set_sint(struct kvm_vcpu_hv_synic
*synic
, int sint
,
103 int vector
, old_vector
;
106 vector
= data
& HV_SYNIC_SINT_VECTOR_MASK
;
107 masked
= data
& HV_SYNIC_SINT_MASKED
;
110 * Valid vectors are 16-255, however, nested Hyper-V attempts to write
111 * default '0x10000' value on boot and this should not #GP. We need to
112 * allow zero-initing the register from host as well.
114 if (vector
< HV_SYNIC_FIRST_VALID_VECTOR
&& !host
&& !masked
)
117 * Guest may configure multiple SINTs to use the same vector, so
118 * we maintain a bitmap of vectors handled by synic, and a
119 * bitmap of vectors with auto-eoi behavior. The bitmaps are
120 * updated here, and atomically queried on fast paths.
122 old_vector
= synic_read_sint(synic
, sint
) & HV_SYNIC_SINT_VECTOR_MASK
;
124 atomic64_set(&synic
->sint
[sint
], data
);
126 synic_update_vector(synic
, old_vector
);
128 synic_update_vector(synic
, vector
);
130 /* Load SynIC vectors into EOI exit bitmap */
131 kvm_make_request(KVM_REQ_SCAN_IOAPIC
, synic_to_vcpu(synic
));
135 static struct kvm_vcpu
*get_vcpu_by_vpidx(struct kvm
*kvm
, u32 vpidx
)
137 struct kvm_vcpu
*vcpu
= NULL
;
140 if (vpidx
>= KVM_MAX_VCPUS
)
143 vcpu
= kvm_get_vcpu(kvm
, vpidx
);
144 if (vcpu
&& vcpu_to_hv_vcpu(vcpu
)->vp_index
== vpidx
)
146 kvm_for_each_vcpu(i
, vcpu
, kvm
)
147 if (vcpu_to_hv_vcpu(vcpu
)->vp_index
== vpidx
)
152 static struct kvm_vcpu_hv_synic
*synic_get(struct kvm
*kvm
, u32 vpidx
)
154 struct kvm_vcpu
*vcpu
;
155 struct kvm_vcpu_hv_synic
*synic
;
157 vcpu
= get_vcpu_by_vpidx(kvm
, vpidx
);
160 synic
= vcpu_to_synic(vcpu
);
161 return (synic
->active
) ? synic
: NULL
;
164 static void kvm_hv_notify_acked_sint(struct kvm_vcpu
*vcpu
, u32 sint
)
166 struct kvm
*kvm
= vcpu
->kvm
;
167 struct kvm_vcpu_hv_synic
*synic
= vcpu_to_synic(vcpu
);
168 struct kvm_vcpu_hv
*hv_vcpu
= vcpu_to_hv_vcpu(vcpu
);
169 struct kvm_vcpu_hv_stimer
*stimer
;
172 trace_kvm_hv_notify_acked_sint(vcpu
->vcpu_id
, sint
);
174 /* Try to deliver pending Hyper-V SynIC timers messages */
175 for (idx
= 0; idx
< ARRAY_SIZE(hv_vcpu
->stimer
); idx
++) {
176 stimer
= &hv_vcpu
->stimer
[idx
];
177 if (stimer
->msg_pending
&& stimer
->config
.enable
&&
178 !stimer
->config
.direct_mode
&&
179 stimer
->config
.sintx
== sint
)
180 stimer_mark_pending(stimer
, false);
183 idx
= srcu_read_lock(&kvm
->irq_srcu
);
184 gsi
= atomic_read(&synic
->sint_to_gsi
[sint
]);
186 kvm_notify_acked_gsi(kvm
, gsi
);
187 srcu_read_unlock(&kvm
->irq_srcu
, idx
);
190 static void synic_exit(struct kvm_vcpu_hv_synic
*synic
, u32 msr
)
192 struct kvm_vcpu
*vcpu
= synic_to_vcpu(synic
);
193 struct kvm_vcpu_hv
*hv_vcpu
= &vcpu
->arch
.hyperv
;
195 hv_vcpu
->exit
.type
= KVM_EXIT_HYPERV_SYNIC
;
196 hv_vcpu
->exit
.u
.synic
.msr
= msr
;
197 hv_vcpu
->exit
.u
.synic
.control
= synic
->control
;
198 hv_vcpu
->exit
.u
.synic
.evt_page
= synic
->evt_page
;
199 hv_vcpu
->exit
.u
.synic
.msg_page
= synic
->msg_page
;
201 kvm_make_request(KVM_REQ_HV_EXIT
, vcpu
);
204 static int synic_set_msr(struct kvm_vcpu_hv_synic
*synic
,
205 u32 msr
, u64 data
, bool host
)
207 struct kvm_vcpu
*vcpu
= synic_to_vcpu(synic
);
210 if (!synic
->active
&& !host
)
213 trace_kvm_hv_synic_set_msr(vcpu
->vcpu_id
, msr
, data
, host
);
217 case HV_X64_MSR_SCONTROL
:
218 synic
->control
= data
;
220 synic_exit(synic
, msr
);
222 case HV_X64_MSR_SVERSION
:
227 synic
->version
= data
;
229 case HV_X64_MSR_SIEFP
:
230 if ((data
& HV_SYNIC_SIEFP_ENABLE
) && !host
&&
231 !synic
->dont_zero_synic_pages
)
232 if (kvm_clear_guest(vcpu
->kvm
,
233 data
& PAGE_MASK
, PAGE_SIZE
)) {
237 synic
->evt_page
= data
;
239 synic_exit(synic
, msr
);
241 case HV_X64_MSR_SIMP
:
242 if ((data
& HV_SYNIC_SIMP_ENABLE
) && !host
&&
243 !synic
->dont_zero_synic_pages
)
244 if (kvm_clear_guest(vcpu
->kvm
,
245 data
& PAGE_MASK
, PAGE_SIZE
)) {
249 synic
->msg_page
= data
;
251 synic_exit(synic
, msr
);
253 case HV_X64_MSR_EOM
: {
256 for (i
= 0; i
< ARRAY_SIZE(synic
->sint
); i
++)
257 kvm_hv_notify_acked_sint(vcpu
, i
);
260 case HV_X64_MSR_SINT0
... HV_X64_MSR_SINT15
:
261 ret
= synic_set_sint(synic
, msr
- HV_X64_MSR_SINT0
, data
, host
);
270 static int synic_get_msr(struct kvm_vcpu_hv_synic
*synic
, u32 msr
, u64
*pdata
,
275 if (!synic
->active
&& !host
)
280 case HV_X64_MSR_SCONTROL
:
281 *pdata
= synic
->control
;
283 case HV_X64_MSR_SVERSION
:
284 *pdata
= synic
->version
;
286 case HV_X64_MSR_SIEFP
:
287 *pdata
= synic
->evt_page
;
289 case HV_X64_MSR_SIMP
:
290 *pdata
= synic
->msg_page
;
295 case HV_X64_MSR_SINT0
... HV_X64_MSR_SINT15
:
296 *pdata
= atomic64_read(&synic
->sint
[msr
- HV_X64_MSR_SINT0
]);
305 static int synic_set_irq(struct kvm_vcpu_hv_synic
*synic
, u32 sint
)
307 struct kvm_vcpu
*vcpu
= synic_to_vcpu(synic
);
308 struct kvm_lapic_irq irq
;
311 if (sint
>= ARRAY_SIZE(synic
->sint
))
314 vector
= synic_get_sint_vector(synic_read_sint(synic
, sint
));
318 memset(&irq
, 0, sizeof(irq
));
319 irq
.shorthand
= APIC_DEST_SELF
;
320 irq
.dest_mode
= APIC_DEST_PHYSICAL
;
321 irq
.delivery_mode
= APIC_DM_FIXED
;
325 ret
= kvm_irq_delivery_to_apic(vcpu
->kvm
, vcpu
->arch
.apic
, &irq
, NULL
);
326 trace_kvm_hv_synic_set_irq(vcpu
->vcpu_id
, sint
, irq
.vector
, ret
);
330 int kvm_hv_synic_set_irq(struct kvm
*kvm
, u32 vpidx
, u32 sint
)
332 struct kvm_vcpu_hv_synic
*synic
;
334 synic
= synic_get(kvm
, vpidx
);
338 return synic_set_irq(synic
, sint
);
341 void kvm_hv_synic_send_eoi(struct kvm_vcpu
*vcpu
, int vector
)
343 struct kvm_vcpu_hv_synic
*synic
= vcpu_to_synic(vcpu
);
346 trace_kvm_hv_synic_send_eoi(vcpu
->vcpu_id
, vector
);
348 for (i
= 0; i
< ARRAY_SIZE(synic
->sint
); i
++)
349 if (synic_get_sint_vector(synic_read_sint(synic
, i
)) == vector
)
350 kvm_hv_notify_acked_sint(vcpu
, i
);
353 static int kvm_hv_set_sint_gsi(struct kvm
*kvm
, u32 vpidx
, u32 sint
, int gsi
)
355 struct kvm_vcpu_hv_synic
*synic
;
357 synic
= synic_get(kvm
, vpidx
);
361 if (sint
>= ARRAY_SIZE(synic
->sint_to_gsi
))
364 atomic_set(&synic
->sint_to_gsi
[sint
], gsi
);
368 void kvm_hv_irq_routing_update(struct kvm
*kvm
)
370 struct kvm_irq_routing_table
*irq_rt
;
371 struct kvm_kernel_irq_routing_entry
*e
;
374 irq_rt
= srcu_dereference_check(kvm
->irq_routing
, &kvm
->irq_srcu
,
375 lockdep_is_held(&kvm
->irq_lock
));
377 for (gsi
= 0; gsi
< irq_rt
->nr_rt_entries
; gsi
++) {
378 hlist_for_each_entry(e
, &irq_rt
->map
[gsi
], link
) {
379 if (e
->type
== KVM_IRQ_ROUTING_HV_SINT
)
380 kvm_hv_set_sint_gsi(kvm
, e
->hv_sint
.vcpu
,
381 e
->hv_sint
.sint
, gsi
);
386 static void synic_init(struct kvm_vcpu_hv_synic
*synic
)
390 memset(synic
, 0, sizeof(*synic
));
391 synic
->version
= HV_SYNIC_VERSION_1
;
392 for (i
= 0; i
< ARRAY_SIZE(synic
->sint
); i
++) {
393 atomic64_set(&synic
->sint
[i
], HV_SYNIC_SINT_MASKED
);
394 atomic_set(&synic
->sint_to_gsi
[i
], -1);
398 static u64
get_time_ref_counter(struct kvm
*kvm
)
400 struct kvm_hv
*hv
= &kvm
->arch
.hyperv
;
401 struct kvm_vcpu
*vcpu
;
405 * The guest has not set up the TSC page or the clock isn't
406 * stable, fall back to get_kvmclock_ns.
408 if (!hv
->tsc_ref
.tsc_sequence
)
409 return div_u64(get_kvmclock_ns(kvm
), 100);
411 vcpu
= kvm_get_vcpu(kvm
, 0);
412 tsc
= kvm_read_l1_tsc(vcpu
, rdtsc());
413 return mul_u64_u64_shr(tsc
, hv
->tsc_ref
.tsc_scale
, 64)
414 + hv
->tsc_ref
.tsc_offset
;
417 static void stimer_mark_pending(struct kvm_vcpu_hv_stimer
*stimer
,
420 struct kvm_vcpu
*vcpu
= stimer_to_vcpu(stimer
);
422 set_bit(stimer
->index
,
423 vcpu_to_hv_vcpu(vcpu
)->stimer_pending_bitmap
);
424 kvm_make_request(KVM_REQ_HV_STIMER
, vcpu
);
429 static void stimer_cleanup(struct kvm_vcpu_hv_stimer
*stimer
)
431 struct kvm_vcpu
*vcpu
= stimer_to_vcpu(stimer
);
433 trace_kvm_hv_stimer_cleanup(stimer_to_vcpu(stimer
)->vcpu_id
,
436 hrtimer_cancel(&stimer
->timer
);
437 clear_bit(stimer
->index
,
438 vcpu_to_hv_vcpu(vcpu
)->stimer_pending_bitmap
);
439 stimer
->msg_pending
= false;
440 stimer
->exp_time
= 0;
443 static enum hrtimer_restart
stimer_timer_callback(struct hrtimer
*timer
)
445 struct kvm_vcpu_hv_stimer
*stimer
;
447 stimer
= container_of(timer
, struct kvm_vcpu_hv_stimer
, timer
);
448 trace_kvm_hv_stimer_callback(stimer_to_vcpu(stimer
)->vcpu_id
,
450 stimer_mark_pending(stimer
, true);
452 return HRTIMER_NORESTART
;
456 * stimer_start() assumptions:
457 * a) stimer->count is not equal to 0
458 * b) stimer->config has HV_STIMER_ENABLE flag
460 static int stimer_start(struct kvm_vcpu_hv_stimer
*stimer
)
465 time_now
= get_time_ref_counter(stimer_to_vcpu(stimer
)->kvm
);
466 ktime_now
= ktime_get();
468 if (stimer
->config
.periodic
) {
469 if (stimer
->exp_time
) {
470 if (time_now
>= stimer
->exp_time
) {
473 div64_u64_rem(time_now
- stimer
->exp_time
,
474 stimer
->count
, &remainder
);
476 time_now
+ (stimer
->count
- remainder
);
479 stimer
->exp_time
= time_now
+ stimer
->count
;
481 trace_kvm_hv_stimer_start_periodic(
482 stimer_to_vcpu(stimer
)->vcpu_id
,
484 time_now
, stimer
->exp_time
);
486 hrtimer_start(&stimer
->timer
,
487 ktime_add_ns(ktime_now
,
488 100 * (stimer
->exp_time
- time_now
)),
492 stimer
->exp_time
= stimer
->count
;
493 if (time_now
>= stimer
->count
) {
495 * Expire timer according to Hypervisor Top-Level Functional
496 * specification v4(15.3.1):
497 * "If a one shot is enabled and the specified count is in
498 * the past, it will expire immediately."
500 stimer_mark_pending(stimer
, false);
504 trace_kvm_hv_stimer_start_one_shot(stimer_to_vcpu(stimer
)->vcpu_id
,
506 time_now
, stimer
->count
);
508 hrtimer_start(&stimer
->timer
,
509 ktime_add_ns(ktime_now
, 100 * (stimer
->count
- time_now
)),
514 static int stimer_set_config(struct kvm_vcpu_hv_stimer
*stimer
, u64 config
,
517 union hv_stimer_config new_config
= {.as_uint64
= config
},
518 old_config
= {.as_uint64
= stimer
->config
.as_uint64
};
520 trace_kvm_hv_stimer_set_config(stimer_to_vcpu(stimer
)->vcpu_id
,
521 stimer
->index
, config
, host
);
523 stimer_cleanup(stimer
);
524 if (old_config
.enable
&&
525 !new_config
.direct_mode
&& new_config
.sintx
== 0)
526 new_config
.enable
= 0;
527 stimer
->config
.as_uint64
= new_config
.as_uint64
;
529 if (stimer
->config
.enable
)
530 stimer_mark_pending(stimer
, false);
535 static int stimer_set_count(struct kvm_vcpu_hv_stimer
*stimer
, u64 count
,
538 trace_kvm_hv_stimer_set_count(stimer_to_vcpu(stimer
)->vcpu_id
,
539 stimer
->index
, count
, host
);
541 stimer_cleanup(stimer
);
542 stimer
->count
= count
;
543 if (stimer
->count
== 0)
544 stimer
->config
.enable
= 0;
545 else if (stimer
->config
.auto_enable
)
546 stimer
->config
.enable
= 1;
548 if (stimer
->config
.enable
)
549 stimer_mark_pending(stimer
, false);
554 static int stimer_get_config(struct kvm_vcpu_hv_stimer
*stimer
, u64
*pconfig
)
556 *pconfig
= stimer
->config
.as_uint64
;
560 static int stimer_get_count(struct kvm_vcpu_hv_stimer
*stimer
, u64
*pcount
)
562 *pcount
= stimer
->count
;
566 static int synic_deliver_msg(struct kvm_vcpu_hv_synic
*synic
, u32 sint
,
567 struct hv_message
*src_msg
, bool no_retry
)
569 struct kvm_vcpu
*vcpu
= synic_to_vcpu(synic
);
570 int msg_off
= offsetof(struct hv_message_page
, sint_message
[sint
]);
572 struct hv_message_header hv_hdr
;
575 if (!(synic
->msg_page
& HV_SYNIC_SIMP_ENABLE
))
578 msg_page_gfn
= synic
->msg_page
>> PAGE_SHIFT
;
581 * Strictly following the spec-mandated ordering would assume setting
582 * .msg_pending before checking .message_type. However, this function
583 * is only called in vcpu context so the entire update is atomic from
584 * guest POV and thus the exact order here doesn't matter.
586 r
= kvm_vcpu_read_guest_page(vcpu
, msg_page_gfn
, &hv_hdr
.message_type
,
587 msg_off
+ offsetof(struct hv_message
,
588 header
.message_type
),
589 sizeof(hv_hdr
.message_type
));
593 if (hv_hdr
.message_type
!= HVMSG_NONE
) {
597 hv_hdr
.message_flags
.msg_pending
= 1;
598 r
= kvm_vcpu_write_guest_page(vcpu
, msg_page_gfn
,
599 &hv_hdr
.message_flags
,
601 offsetof(struct hv_message
,
602 header
.message_flags
),
603 sizeof(hv_hdr
.message_flags
));
609 r
= kvm_vcpu_write_guest_page(vcpu
, msg_page_gfn
, src_msg
, msg_off
,
610 sizeof(src_msg
->header
) +
611 src_msg
->header
.payload_size
);
615 r
= synic_set_irq(synic
, sint
);
623 static int stimer_send_msg(struct kvm_vcpu_hv_stimer
*stimer
)
625 struct kvm_vcpu
*vcpu
= stimer_to_vcpu(stimer
);
626 struct hv_message
*msg
= &stimer
->msg
;
627 struct hv_timer_message_payload
*payload
=
628 (struct hv_timer_message_payload
*)&msg
->u
.payload
;
631 * To avoid piling up periodic ticks, don't retry message
632 * delivery for them (within "lazy" lost ticks policy).
634 bool no_retry
= stimer
->config
.periodic
;
636 payload
->expiration_time
= stimer
->exp_time
;
637 payload
->delivery_time
= get_time_ref_counter(vcpu
->kvm
);
638 return synic_deliver_msg(vcpu_to_synic(vcpu
),
639 stimer
->config
.sintx
, msg
,
643 static int stimer_notify_direct(struct kvm_vcpu_hv_stimer
*stimer
)
645 struct kvm_vcpu
*vcpu
= stimer_to_vcpu(stimer
);
646 struct kvm_lapic_irq irq
= {
647 .delivery_mode
= APIC_DM_FIXED
,
648 .vector
= stimer
->config
.apic_vector
651 return !kvm_apic_set_irq(vcpu
, &irq
, NULL
);
654 static void stimer_expiration(struct kvm_vcpu_hv_stimer
*stimer
)
656 int r
, direct
= stimer
->config
.direct_mode
;
658 stimer
->msg_pending
= true;
660 r
= stimer_send_msg(stimer
);
662 r
= stimer_notify_direct(stimer
);
663 trace_kvm_hv_stimer_expiration(stimer_to_vcpu(stimer
)->vcpu_id
,
664 stimer
->index
, direct
, r
);
666 stimer
->msg_pending
= false;
667 if (!(stimer
->config
.periodic
))
668 stimer
->config
.enable
= 0;
672 void kvm_hv_process_stimers(struct kvm_vcpu
*vcpu
)
674 struct kvm_vcpu_hv
*hv_vcpu
= vcpu_to_hv_vcpu(vcpu
);
675 struct kvm_vcpu_hv_stimer
*stimer
;
676 u64 time_now
, exp_time
;
679 for (i
= 0; i
< ARRAY_SIZE(hv_vcpu
->stimer
); i
++)
680 if (test_and_clear_bit(i
, hv_vcpu
->stimer_pending_bitmap
)) {
681 stimer
= &hv_vcpu
->stimer
[i
];
682 if (stimer
->config
.enable
) {
683 exp_time
= stimer
->exp_time
;
687 get_time_ref_counter(vcpu
->kvm
);
688 if (time_now
>= exp_time
)
689 stimer_expiration(stimer
);
692 if ((stimer
->config
.enable
) &&
694 if (!stimer
->msg_pending
)
695 stimer_start(stimer
);
697 stimer_cleanup(stimer
);
702 void kvm_hv_vcpu_uninit(struct kvm_vcpu
*vcpu
)
704 struct kvm_vcpu_hv
*hv_vcpu
= vcpu_to_hv_vcpu(vcpu
);
707 for (i
= 0; i
< ARRAY_SIZE(hv_vcpu
->stimer
); i
++)
708 stimer_cleanup(&hv_vcpu
->stimer
[i
]);
711 bool kvm_hv_assist_page_enabled(struct kvm_vcpu
*vcpu
)
713 if (!(vcpu
->arch
.hyperv
.hv_vapic
& HV_X64_MSR_VP_ASSIST_PAGE_ENABLE
))
715 return vcpu
->arch
.pv_eoi
.msr_val
& KVM_MSR_ENABLED
;
717 EXPORT_SYMBOL_GPL(kvm_hv_assist_page_enabled
);
719 bool kvm_hv_get_assist_page(struct kvm_vcpu
*vcpu
,
720 struct hv_vp_assist_page
*assist_page
)
722 if (!kvm_hv_assist_page_enabled(vcpu
))
724 return !kvm_read_guest_cached(vcpu
->kvm
, &vcpu
->arch
.pv_eoi
.data
,
725 assist_page
, sizeof(*assist_page
));
727 EXPORT_SYMBOL_GPL(kvm_hv_get_assist_page
);
729 static void stimer_prepare_msg(struct kvm_vcpu_hv_stimer
*stimer
)
731 struct hv_message
*msg
= &stimer
->msg
;
732 struct hv_timer_message_payload
*payload
=
733 (struct hv_timer_message_payload
*)&msg
->u
.payload
;
735 memset(&msg
->header
, 0, sizeof(msg
->header
));
736 msg
->header
.message_type
= HVMSG_TIMER_EXPIRED
;
737 msg
->header
.payload_size
= sizeof(*payload
);
739 payload
->timer_index
= stimer
->index
;
740 payload
->expiration_time
= 0;
741 payload
->delivery_time
= 0;
744 static void stimer_init(struct kvm_vcpu_hv_stimer
*stimer
, int timer_index
)
746 memset(stimer
, 0, sizeof(*stimer
));
747 stimer
->index
= timer_index
;
748 hrtimer_init(&stimer
->timer
, CLOCK_MONOTONIC
, HRTIMER_MODE_ABS
);
749 stimer
->timer
.function
= stimer_timer_callback
;
750 stimer_prepare_msg(stimer
);
753 void kvm_hv_vcpu_init(struct kvm_vcpu
*vcpu
)
755 struct kvm_vcpu_hv
*hv_vcpu
= vcpu_to_hv_vcpu(vcpu
);
758 synic_init(&hv_vcpu
->synic
);
760 bitmap_zero(hv_vcpu
->stimer_pending_bitmap
, HV_SYNIC_STIMER_COUNT
);
761 for (i
= 0; i
< ARRAY_SIZE(hv_vcpu
->stimer
); i
++)
762 stimer_init(&hv_vcpu
->stimer
[i
], i
);
765 void kvm_hv_vcpu_postcreate(struct kvm_vcpu
*vcpu
)
767 struct kvm_vcpu_hv
*hv_vcpu
= vcpu_to_hv_vcpu(vcpu
);
769 hv_vcpu
->vp_index
= kvm_vcpu_get_idx(vcpu
);
772 int kvm_hv_activate_synic(struct kvm_vcpu
*vcpu
, bool dont_zero_synic_pages
)
774 struct kvm_vcpu_hv_synic
*synic
= vcpu_to_synic(vcpu
);
777 * Hyper-V SynIC auto EOI SINT's are
778 * not compatible with APICV, so deactivate APICV
780 kvm_vcpu_deactivate_apicv(vcpu
);
781 synic
->active
= true;
782 synic
->dont_zero_synic_pages
= dont_zero_synic_pages
;
786 static bool kvm_hv_msr_partition_wide(u32 msr
)
791 case HV_X64_MSR_GUEST_OS_ID
:
792 case HV_X64_MSR_HYPERCALL
:
793 case HV_X64_MSR_REFERENCE_TSC
:
794 case HV_X64_MSR_TIME_REF_COUNT
:
795 case HV_X64_MSR_CRASH_CTL
:
796 case HV_X64_MSR_CRASH_P0
... HV_X64_MSR_CRASH_P4
:
797 case HV_X64_MSR_RESET
:
798 case HV_X64_MSR_REENLIGHTENMENT_CONTROL
:
799 case HV_X64_MSR_TSC_EMULATION_CONTROL
:
800 case HV_X64_MSR_TSC_EMULATION_STATUS
:
808 static int kvm_hv_msr_get_crash_data(struct kvm_vcpu
*vcpu
,
809 u32 index
, u64
*pdata
)
811 struct kvm_hv
*hv
= &vcpu
->kvm
->arch
.hyperv
;
813 if (WARN_ON_ONCE(index
>= ARRAY_SIZE(hv
->hv_crash_param
)))
816 *pdata
= hv
->hv_crash_param
[index
];
820 static int kvm_hv_msr_get_crash_ctl(struct kvm_vcpu
*vcpu
, u64
*pdata
)
822 struct kvm_hv
*hv
= &vcpu
->kvm
->arch
.hyperv
;
824 *pdata
= hv
->hv_crash_ctl
;
828 static int kvm_hv_msr_set_crash_ctl(struct kvm_vcpu
*vcpu
, u64 data
, bool host
)
830 struct kvm_hv
*hv
= &vcpu
->kvm
->arch
.hyperv
;
833 hv
->hv_crash_ctl
= data
& HV_CRASH_CTL_CRASH_NOTIFY
;
835 if (!host
&& (data
& HV_CRASH_CTL_CRASH_NOTIFY
)) {
837 vcpu_debug(vcpu
, "hv crash (0x%llx 0x%llx 0x%llx 0x%llx 0x%llx)\n",
838 hv
->hv_crash_param
[0],
839 hv
->hv_crash_param
[1],
840 hv
->hv_crash_param
[2],
841 hv
->hv_crash_param
[3],
842 hv
->hv_crash_param
[4]);
844 /* Send notification about crash to user space */
845 kvm_make_request(KVM_REQ_HV_CRASH
, vcpu
);
851 static int kvm_hv_msr_set_crash_data(struct kvm_vcpu
*vcpu
,
854 struct kvm_hv
*hv
= &vcpu
->kvm
->arch
.hyperv
;
856 if (WARN_ON_ONCE(index
>= ARRAY_SIZE(hv
->hv_crash_param
)))
859 hv
->hv_crash_param
[index
] = data
;
864 * The kvmclock and Hyper-V TSC page use similar formulas, and converting
865 * between them is possible:
868 * nsec = (ticks - tsc_timestamp) * tsc_to_system_mul * 2^(tsc_shift-32)
872 * nsec/100 = ticks * scale / 2^64 + offset
874 * When tsc_timestamp = system_time = 0, offset is zero in the Hyper-V formula.
875 * By dividing the kvmclock formula by 100 and equating what's left we get:
876 * ticks * scale / 2^64 = ticks * tsc_to_system_mul * 2^(tsc_shift-32) / 100
877 * scale / 2^64 = tsc_to_system_mul * 2^(tsc_shift-32) / 100
878 * scale = tsc_to_system_mul * 2^(32+tsc_shift) / 100
880 * Now expand the kvmclock formula and divide by 100:
881 * nsec = ticks * tsc_to_system_mul * 2^(tsc_shift-32)
882 * - tsc_timestamp * tsc_to_system_mul * 2^(tsc_shift-32)
884 * nsec/100 = ticks * tsc_to_system_mul * 2^(tsc_shift-32) / 100
885 * - tsc_timestamp * tsc_to_system_mul * 2^(tsc_shift-32) / 100
886 * + system_time / 100
888 * Replace tsc_to_system_mul * 2^(tsc_shift-32) / 100 by scale / 2^64:
889 * nsec/100 = ticks * scale / 2^64
890 * - tsc_timestamp * scale / 2^64
891 * + system_time / 100
893 * Equate with the Hyper-V formula so that ticks * scale / 2^64 cancels out:
894 * offset = system_time / 100 - tsc_timestamp * scale / 2^64
896 * These two equivalencies are implemented in this function.
898 static bool compute_tsc_page_parameters(struct pvclock_vcpu_time_info
*hv_clock
,
899 HV_REFERENCE_TSC_PAGE
*tsc_ref
)
903 if (!(hv_clock
->flags
& PVCLOCK_TSC_STABLE_BIT
))
907 * check if scale would overflow, if so we use the time ref counter
908 * tsc_to_system_mul * 2^(tsc_shift+32) / 100 >= 2^64
909 * tsc_to_system_mul / 100 >= 2^(32-tsc_shift)
910 * tsc_to_system_mul >= 100 * 2^(32-tsc_shift)
912 max_mul
= 100ull << (32 - hv_clock
->tsc_shift
);
913 if (hv_clock
->tsc_to_system_mul
>= max_mul
)
917 * Otherwise compute the scale and offset according to the formulas
921 mul_u64_u32_div(1ULL << (32 + hv_clock
->tsc_shift
),
922 hv_clock
->tsc_to_system_mul
,
925 tsc_ref
->tsc_offset
= hv_clock
->system_time
;
926 do_div(tsc_ref
->tsc_offset
, 100);
927 tsc_ref
->tsc_offset
-=
928 mul_u64_u64_shr(hv_clock
->tsc_timestamp
, tsc_ref
->tsc_scale
, 64);
932 void kvm_hv_setup_tsc_page(struct kvm
*kvm
,
933 struct pvclock_vcpu_time_info
*hv_clock
)
935 struct kvm_hv
*hv
= &kvm
->arch
.hyperv
;
939 BUILD_BUG_ON(sizeof(tsc_seq
) != sizeof(hv
->tsc_ref
.tsc_sequence
));
940 BUILD_BUG_ON(offsetof(HV_REFERENCE_TSC_PAGE
, tsc_sequence
) != 0);
942 if (!(hv
->hv_tsc_page
& HV_X64_MSR_TSC_REFERENCE_ENABLE
))
945 mutex_lock(&kvm
->arch
.hyperv
.hv_lock
);
946 if (!(hv
->hv_tsc_page
& HV_X64_MSR_TSC_REFERENCE_ENABLE
))
949 gfn
= hv
->hv_tsc_page
>> HV_X64_MSR_TSC_REFERENCE_ADDRESS_SHIFT
;
951 * Because the TSC parameters only vary when there is a
952 * change in the master clock, do not bother with caching.
954 if (unlikely(kvm_read_guest(kvm
, gfn_to_gpa(gfn
),
955 &tsc_seq
, sizeof(tsc_seq
))))
959 * While we're computing and writing the parameters, force the
960 * guest to use the time reference count MSR.
962 hv
->tsc_ref
.tsc_sequence
= 0;
963 if (kvm_write_guest(kvm
, gfn_to_gpa(gfn
),
964 &hv
->tsc_ref
, sizeof(hv
->tsc_ref
.tsc_sequence
)))
967 if (!compute_tsc_page_parameters(hv_clock
, &hv
->tsc_ref
))
970 /* Ensure sequence is zero before writing the rest of the struct. */
972 if (kvm_write_guest(kvm
, gfn_to_gpa(gfn
), &hv
->tsc_ref
, sizeof(hv
->tsc_ref
)))
976 * Now switch to the TSC page mechanism by writing the sequence.
979 if (tsc_seq
== 0xFFFFFFFF || tsc_seq
== 0)
982 /* Write the struct entirely before the non-zero sequence. */
985 hv
->tsc_ref
.tsc_sequence
= tsc_seq
;
986 kvm_write_guest(kvm
, gfn_to_gpa(gfn
),
987 &hv
->tsc_ref
, sizeof(hv
->tsc_ref
.tsc_sequence
));
989 mutex_unlock(&kvm
->arch
.hyperv
.hv_lock
);
992 static int kvm_hv_set_msr_pw(struct kvm_vcpu
*vcpu
, u32 msr
, u64 data
,
995 struct kvm
*kvm
= vcpu
->kvm
;
996 struct kvm_hv
*hv
= &kvm
->arch
.hyperv
;
999 case HV_X64_MSR_GUEST_OS_ID
:
1000 hv
->hv_guest_os_id
= data
;
1001 /* setting guest os id to zero disables hypercall page */
1002 if (!hv
->hv_guest_os_id
)
1003 hv
->hv_hypercall
&= ~HV_X64_MSR_HYPERCALL_ENABLE
;
1005 case HV_X64_MSR_HYPERCALL
: {
1010 /* if guest os id is not set hypercall should remain disabled */
1011 if (!hv
->hv_guest_os_id
)
1013 if (!(data
& HV_X64_MSR_HYPERCALL_ENABLE
)) {
1014 hv
->hv_hypercall
= data
;
1017 gfn
= data
>> HV_X64_MSR_HYPERCALL_PAGE_ADDRESS_SHIFT
;
1018 addr
= gfn_to_hva(kvm
, gfn
);
1019 if (kvm_is_error_hva(addr
))
1021 kvm_x86_ops
->patch_hypercall(vcpu
, instructions
);
1022 ((unsigned char *)instructions
)[3] = 0xc3; /* ret */
1023 if (__copy_to_user((void __user
*)addr
, instructions
, 4))
1025 hv
->hv_hypercall
= data
;
1026 mark_page_dirty(kvm
, gfn
);
1029 case HV_X64_MSR_REFERENCE_TSC
:
1030 hv
->hv_tsc_page
= data
;
1031 if (hv
->hv_tsc_page
& HV_X64_MSR_TSC_REFERENCE_ENABLE
)
1032 kvm_make_request(KVM_REQ_MASTERCLOCK_UPDATE
, vcpu
);
1034 case HV_X64_MSR_CRASH_P0
... HV_X64_MSR_CRASH_P4
:
1035 return kvm_hv_msr_set_crash_data(vcpu
,
1036 msr
- HV_X64_MSR_CRASH_P0
,
1038 case HV_X64_MSR_CRASH_CTL
:
1039 return kvm_hv_msr_set_crash_ctl(vcpu
, data
, host
);
1040 case HV_X64_MSR_RESET
:
1042 vcpu_debug(vcpu
, "hyper-v reset requested\n");
1043 kvm_make_request(KVM_REQ_HV_RESET
, vcpu
);
1046 case HV_X64_MSR_REENLIGHTENMENT_CONTROL
:
1047 hv
->hv_reenlightenment_control
= data
;
1049 case HV_X64_MSR_TSC_EMULATION_CONTROL
:
1050 hv
->hv_tsc_emulation_control
= data
;
1052 case HV_X64_MSR_TSC_EMULATION_STATUS
:
1053 hv
->hv_tsc_emulation_status
= data
;
1055 case HV_X64_MSR_TIME_REF_COUNT
:
1056 /* read-only, but still ignore it if host-initiated */
1061 vcpu_unimpl(vcpu
, "Hyper-V uhandled wrmsr: 0x%x data 0x%llx\n",
1068 /* Calculate cpu time spent by current task in 100ns units */
1069 static u64
current_task_runtime_100ns(void)
1073 task_cputime_adjusted(current
, &utime
, &stime
);
1075 return div_u64(utime
+ stime
, 100);
1078 static int kvm_hv_set_msr(struct kvm_vcpu
*vcpu
, u32 msr
, u64 data
, bool host
)
1080 struct kvm_vcpu_hv
*hv_vcpu
= &vcpu
->arch
.hyperv
;
1083 case HV_X64_MSR_VP_INDEX
: {
1084 struct kvm_hv
*hv
= &vcpu
->kvm
->arch
.hyperv
;
1085 int vcpu_idx
= kvm_vcpu_get_idx(vcpu
);
1086 u32 new_vp_index
= (u32
)data
;
1088 if (!host
|| new_vp_index
>= KVM_MAX_VCPUS
)
1091 if (new_vp_index
== hv_vcpu
->vp_index
)
1095 * The VP index is initialized to vcpu_index by
1096 * kvm_hv_vcpu_postcreate so they initially match. Now the
1097 * VP index is changing, adjust num_mismatched_vp_indexes if
1098 * it now matches or no longer matches vcpu_idx.
1100 if (hv_vcpu
->vp_index
== vcpu_idx
)
1101 atomic_inc(&hv
->num_mismatched_vp_indexes
);
1102 else if (new_vp_index
== vcpu_idx
)
1103 atomic_dec(&hv
->num_mismatched_vp_indexes
);
1105 hv_vcpu
->vp_index
= new_vp_index
;
1108 case HV_X64_MSR_VP_ASSIST_PAGE
: {
1112 if (!(data
& HV_X64_MSR_VP_ASSIST_PAGE_ENABLE
)) {
1113 hv_vcpu
->hv_vapic
= data
;
1114 if (kvm_lapic_enable_pv_eoi(vcpu
, 0, 0))
1118 gfn
= data
>> HV_X64_MSR_VP_ASSIST_PAGE_ADDRESS_SHIFT
;
1119 addr
= kvm_vcpu_gfn_to_hva(vcpu
, gfn
);
1120 if (kvm_is_error_hva(addr
))
1124 * Clear apic_assist portion of f(struct hv_vp_assist_page
1125 * only, there can be valuable data in the rest which needs
1126 * to be preserved e.g. on migration.
1128 if (__clear_user((void __user
*)addr
, sizeof(u32
)))
1130 hv_vcpu
->hv_vapic
= data
;
1131 kvm_vcpu_mark_page_dirty(vcpu
, gfn
);
1132 if (kvm_lapic_enable_pv_eoi(vcpu
,
1133 gfn_to_gpa(gfn
) | KVM_MSR_ENABLED
,
1134 sizeof(struct hv_vp_assist_page
)))
1138 case HV_X64_MSR_EOI
:
1139 return kvm_hv_vapic_msr_write(vcpu
, APIC_EOI
, data
);
1140 case HV_X64_MSR_ICR
:
1141 return kvm_hv_vapic_msr_write(vcpu
, APIC_ICR
, data
);
1142 case HV_X64_MSR_TPR
:
1143 return kvm_hv_vapic_msr_write(vcpu
, APIC_TASKPRI
, data
);
1144 case HV_X64_MSR_VP_RUNTIME
:
1147 hv_vcpu
->runtime_offset
= data
- current_task_runtime_100ns();
1149 case HV_X64_MSR_SCONTROL
:
1150 case HV_X64_MSR_SVERSION
:
1151 case HV_X64_MSR_SIEFP
:
1152 case HV_X64_MSR_SIMP
:
1153 case HV_X64_MSR_EOM
:
1154 case HV_X64_MSR_SINT0
... HV_X64_MSR_SINT15
:
1155 return synic_set_msr(vcpu_to_synic(vcpu
), msr
, data
, host
);
1156 case HV_X64_MSR_STIMER0_CONFIG
:
1157 case HV_X64_MSR_STIMER1_CONFIG
:
1158 case HV_X64_MSR_STIMER2_CONFIG
:
1159 case HV_X64_MSR_STIMER3_CONFIG
: {
1160 int timer_index
= (msr
- HV_X64_MSR_STIMER0_CONFIG
)/2;
1162 return stimer_set_config(vcpu_to_stimer(vcpu
, timer_index
),
1165 case HV_X64_MSR_STIMER0_COUNT
:
1166 case HV_X64_MSR_STIMER1_COUNT
:
1167 case HV_X64_MSR_STIMER2_COUNT
:
1168 case HV_X64_MSR_STIMER3_COUNT
: {
1169 int timer_index
= (msr
- HV_X64_MSR_STIMER0_COUNT
)/2;
1171 return stimer_set_count(vcpu_to_stimer(vcpu
, timer_index
),
1174 case HV_X64_MSR_TSC_FREQUENCY
:
1175 case HV_X64_MSR_APIC_FREQUENCY
:
1176 /* read-only, but still ignore it if host-initiated */
1181 vcpu_unimpl(vcpu
, "Hyper-V uhandled wrmsr: 0x%x data 0x%llx\n",
1189 static int kvm_hv_get_msr_pw(struct kvm_vcpu
*vcpu
, u32 msr
, u64
*pdata
)
1192 struct kvm
*kvm
= vcpu
->kvm
;
1193 struct kvm_hv
*hv
= &kvm
->arch
.hyperv
;
1196 case HV_X64_MSR_GUEST_OS_ID
:
1197 data
= hv
->hv_guest_os_id
;
1199 case HV_X64_MSR_HYPERCALL
:
1200 data
= hv
->hv_hypercall
;
1202 case HV_X64_MSR_TIME_REF_COUNT
:
1203 data
= get_time_ref_counter(kvm
);
1205 case HV_X64_MSR_REFERENCE_TSC
:
1206 data
= hv
->hv_tsc_page
;
1208 case HV_X64_MSR_CRASH_P0
... HV_X64_MSR_CRASH_P4
:
1209 return kvm_hv_msr_get_crash_data(vcpu
,
1210 msr
- HV_X64_MSR_CRASH_P0
,
1212 case HV_X64_MSR_CRASH_CTL
:
1213 return kvm_hv_msr_get_crash_ctl(vcpu
, pdata
);
1214 case HV_X64_MSR_RESET
:
1217 case HV_X64_MSR_REENLIGHTENMENT_CONTROL
:
1218 data
= hv
->hv_reenlightenment_control
;
1220 case HV_X64_MSR_TSC_EMULATION_CONTROL
:
1221 data
= hv
->hv_tsc_emulation_control
;
1223 case HV_X64_MSR_TSC_EMULATION_STATUS
:
1224 data
= hv
->hv_tsc_emulation_status
;
1227 vcpu_unimpl(vcpu
, "Hyper-V unhandled rdmsr: 0x%x\n", msr
);
1235 static int kvm_hv_get_msr(struct kvm_vcpu
*vcpu
, u32 msr
, u64
*pdata
,
1239 struct kvm_vcpu_hv
*hv_vcpu
= &vcpu
->arch
.hyperv
;
1242 case HV_X64_MSR_VP_INDEX
:
1243 data
= hv_vcpu
->vp_index
;
1245 case HV_X64_MSR_EOI
:
1246 return kvm_hv_vapic_msr_read(vcpu
, APIC_EOI
, pdata
);
1247 case HV_X64_MSR_ICR
:
1248 return kvm_hv_vapic_msr_read(vcpu
, APIC_ICR
, pdata
);
1249 case HV_X64_MSR_TPR
:
1250 return kvm_hv_vapic_msr_read(vcpu
, APIC_TASKPRI
, pdata
);
1251 case HV_X64_MSR_VP_ASSIST_PAGE
:
1252 data
= hv_vcpu
->hv_vapic
;
1254 case HV_X64_MSR_VP_RUNTIME
:
1255 data
= current_task_runtime_100ns() + hv_vcpu
->runtime_offset
;
1257 case HV_X64_MSR_SCONTROL
:
1258 case HV_X64_MSR_SVERSION
:
1259 case HV_X64_MSR_SIEFP
:
1260 case HV_X64_MSR_SIMP
:
1261 case HV_X64_MSR_EOM
:
1262 case HV_X64_MSR_SINT0
... HV_X64_MSR_SINT15
:
1263 return synic_get_msr(vcpu_to_synic(vcpu
), msr
, pdata
, host
);
1264 case HV_X64_MSR_STIMER0_CONFIG
:
1265 case HV_X64_MSR_STIMER1_CONFIG
:
1266 case HV_X64_MSR_STIMER2_CONFIG
:
1267 case HV_X64_MSR_STIMER3_CONFIG
: {
1268 int timer_index
= (msr
- HV_X64_MSR_STIMER0_CONFIG
)/2;
1270 return stimer_get_config(vcpu_to_stimer(vcpu
, timer_index
),
1273 case HV_X64_MSR_STIMER0_COUNT
:
1274 case HV_X64_MSR_STIMER1_COUNT
:
1275 case HV_X64_MSR_STIMER2_COUNT
:
1276 case HV_X64_MSR_STIMER3_COUNT
: {
1277 int timer_index
= (msr
- HV_X64_MSR_STIMER0_COUNT
)/2;
1279 return stimer_get_count(vcpu_to_stimer(vcpu
, timer_index
),
1282 case HV_X64_MSR_TSC_FREQUENCY
:
1283 data
= (u64
)vcpu
->arch
.virtual_tsc_khz
* 1000;
1285 case HV_X64_MSR_APIC_FREQUENCY
:
1286 data
= APIC_BUS_FREQUENCY
;
1289 vcpu_unimpl(vcpu
, "Hyper-V unhandled rdmsr: 0x%x\n", msr
);
1296 int kvm_hv_set_msr_common(struct kvm_vcpu
*vcpu
, u32 msr
, u64 data
, bool host
)
1298 if (kvm_hv_msr_partition_wide(msr
)) {
1301 mutex_lock(&vcpu
->kvm
->arch
.hyperv
.hv_lock
);
1302 r
= kvm_hv_set_msr_pw(vcpu
, msr
, data
, host
);
1303 mutex_unlock(&vcpu
->kvm
->arch
.hyperv
.hv_lock
);
1306 return kvm_hv_set_msr(vcpu
, msr
, data
, host
);
1309 int kvm_hv_get_msr_common(struct kvm_vcpu
*vcpu
, u32 msr
, u64
*pdata
, bool host
)
1311 if (kvm_hv_msr_partition_wide(msr
)) {
1314 mutex_lock(&vcpu
->kvm
->arch
.hyperv
.hv_lock
);
1315 r
= kvm_hv_get_msr_pw(vcpu
, msr
, pdata
);
1316 mutex_unlock(&vcpu
->kvm
->arch
.hyperv
.hv_lock
);
1319 return kvm_hv_get_msr(vcpu
, msr
, pdata
, host
);
1322 static __always_inline
unsigned long *sparse_set_to_vcpu_mask(
1323 struct kvm
*kvm
, u64
*sparse_banks
, u64 valid_bank_mask
,
1324 u64
*vp_bitmap
, unsigned long *vcpu_bitmap
)
1326 struct kvm_hv
*hv
= &kvm
->arch
.hyperv
;
1327 struct kvm_vcpu
*vcpu
;
1328 int i
, bank
, sbank
= 0;
1330 memset(vp_bitmap
, 0,
1331 KVM_HV_MAX_SPARSE_VCPU_SET_BITS
* sizeof(*vp_bitmap
));
1332 for_each_set_bit(bank
, (unsigned long *)&valid_bank_mask
,
1333 KVM_HV_MAX_SPARSE_VCPU_SET_BITS
)
1334 vp_bitmap
[bank
] = sparse_banks
[sbank
++];
1336 if (likely(!atomic_read(&hv
->num_mismatched_vp_indexes
))) {
1337 /* for all vcpus vp_index == vcpu_idx */
1338 return (unsigned long *)vp_bitmap
;
1341 bitmap_zero(vcpu_bitmap
, KVM_MAX_VCPUS
);
1342 kvm_for_each_vcpu(i
, vcpu
, kvm
) {
1343 if (test_bit(vcpu_to_hv_vcpu(vcpu
)->vp_index
,
1344 (unsigned long *)vp_bitmap
))
1345 __set_bit(i
, vcpu_bitmap
);
1350 static u64
kvm_hv_flush_tlb(struct kvm_vcpu
*current_vcpu
, u64 ingpa
,
1351 u16 rep_cnt
, bool ex
)
1353 struct kvm
*kvm
= current_vcpu
->kvm
;
1354 struct kvm_vcpu_hv
*hv_vcpu
= ¤t_vcpu
->arch
.hyperv
;
1355 struct hv_tlb_flush_ex flush_ex
;
1356 struct hv_tlb_flush flush
;
1357 u64 vp_bitmap
[KVM_HV_MAX_SPARSE_VCPU_SET_BITS
];
1358 DECLARE_BITMAP(vcpu_bitmap
, KVM_MAX_VCPUS
);
1359 unsigned long *vcpu_mask
;
1360 u64 valid_bank_mask
;
1361 u64 sparse_banks
[64];
1362 int sparse_banks_len
;
1366 if (unlikely(kvm_read_guest(kvm
, ingpa
, &flush
, sizeof(flush
))))
1367 return HV_STATUS_INVALID_HYPERCALL_INPUT
;
1369 trace_kvm_hv_flush_tlb(flush
.processor_mask
,
1370 flush
.address_space
, flush
.flags
);
1372 valid_bank_mask
= BIT_ULL(0);
1373 sparse_banks
[0] = flush
.processor_mask
;
1376 * Work around possible WS2012 bug: it sends hypercalls
1377 * with processor_mask = 0x0 and HV_FLUSH_ALL_PROCESSORS clear,
1378 * while also expecting us to flush something and crashing if
1379 * we don't. Let's treat processor_mask == 0 same as
1380 * HV_FLUSH_ALL_PROCESSORS.
1382 all_cpus
= (flush
.flags
& HV_FLUSH_ALL_PROCESSORS
) ||
1383 flush
.processor_mask
== 0;
1385 if (unlikely(kvm_read_guest(kvm
, ingpa
, &flush_ex
,
1387 return HV_STATUS_INVALID_HYPERCALL_INPUT
;
1389 trace_kvm_hv_flush_tlb_ex(flush_ex
.hv_vp_set
.valid_bank_mask
,
1390 flush_ex
.hv_vp_set
.format
,
1391 flush_ex
.address_space
,
1394 valid_bank_mask
= flush_ex
.hv_vp_set
.valid_bank_mask
;
1395 all_cpus
= flush_ex
.hv_vp_set
.format
!=
1396 HV_GENERIC_SET_SPARSE_4K
;
1399 bitmap_weight((unsigned long *)&valid_bank_mask
, 64) *
1400 sizeof(sparse_banks
[0]);
1402 if (!sparse_banks_len
&& !all_cpus
)
1407 ingpa
+ offsetof(struct hv_tlb_flush_ex
,
1408 hv_vp_set
.bank_contents
),
1411 return HV_STATUS_INVALID_HYPERCALL_INPUT
;
1414 cpumask_clear(&hv_vcpu
->tlb_flush
);
1416 vcpu_mask
= all_cpus
? NULL
:
1417 sparse_set_to_vcpu_mask(kvm
, sparse_banks
, valid_bank_mask
,
1418 vp_bitmap
, vcpu_bitmap
);
1421 * vcpu->arch.cr3 may not be up-to-date for running vCPUs so we can't
1422 * analyze it here, flush TLB regardless of the specified address space.
1424 kvm_make_vcpus_request_mask(kvm
,
1425 KVM_REQ_TLB_FLUSH
| KVM_REQUEST_NO_WAKEUP
,
1426 vcpu_mask
, &hv_vcpu
->tlb_flush
);
1429 /* We always do full TLB flush, set rep_done = rep_cnt. */
1430 return (u64
)HV_STATUS_SUCCESS
|
1431 ((u64
)rep_cnt
<< HV_HYPERCALL_REP_COMP_OFFSET
);
1434 static void kvm_send_ipi_to_many(struct kvm
*kvm
, u32 vector
,
1435 unsigned long *vcpu_bitmap
)
1437 struct kvm_lapic_irq irq
= {
1438 .delivery_mode
= APIC_DM_FIXED
,
1441 struct kvm_vcpu
*vcpu
;
1444 kvm_for_each_vcpu(i
, vcpu
, kvm
) {
1445 if (vcpu_bitmap
&& !test_bit(i
, vcpu_bitmap
))
1448 /* We fail only when APIC is disabled */
1449 kvm_apic_set_irq(vcpu
, &irq
, NULL
);
1453 static u64
kvm_hv_send_ipi(struct kvm_vcpu
*current_vcpu
, u64 ingpa
, u64 outgpa
,
1456 struct kvm
*kvm
= current_vcpu
->kvm
;
1457 struct hv_send_ipi_ex send_ipi_ex
;
1458 struct hv_send_ipi send_ipi
;
1459 u64 vp_bitmap
[KVM_HV_MAX_SPARSE_VCPU_SET_BITS
];
1460 DECLARE_BITMAP(vcpu_bitmap
, KVM_MAX_VCPUS
);
1461 unsigned long *vcpu_mask
;
1462 unsigned long valid_bank_mask
;
1463 u64 sparse_banks
[64];
1464 int sparse_banks_len
;
1470 if (unlikely(kvm_read_guest(kvm
, ingpa
, &send_ipi
,
1472 return HV_STATUS_INVALID_HYPERCALL_INPUT
;
1473 sparse_banks
[0] = send_ipi
.cpu_mask
;
1474 vector
= send_ipi
.vector
;
1476 /* 'reserved' part of hv_send_ipi should be 0 */
1477 if (unlikely(ingpa
>> 32 != 0))
1478 return HV_STATUS_INVALID_HYPERCALL_INPUT
;
1479 sparse_banks
[0] = outgpa
;
1480 vector
= (u32
)ingpa
;
1483 valid_bank_mask
= BIT_ULL(0);
1485 trace_kvm_hv_send_ipi(vector
, sparse_banks
[0]);
1487 if (unlikely(kvm_read_guest(kvm
, ingpa
, &send_ipi_ex
,
1488 sizeof(send_ipi_ex
))))
1489 return HV_STATUS_INVALID_HYPERCALL_INPUT
;
1491 trace_kvm_hv_send_ipi_ex(send_ipi_ex
.vector
,
1492 send_ipi_ex
.vp_set
.format
,
1493 send_ipi_ex
.vp_set
.valid_bank_mask
);
1495 vector
= send_ipi_ex
.vector
;
1496 valid_bank_mask
= send_ipi_ex
.vp_set
.valid_bank_mask
;
1497 sparse_banks_len
= bitmap_weight(&valid_bank_mask
, 64) *
1498 sizeof(sparse_banks
[0]);
1500 all_cpus
= send_ipi_ex
.vp_set
.format
== HV_GENERIC_SET_ALL
;
1502 if (!sparse_banks_len
)
1507 ingpa
+ offsetof(struct hv_send_ipi_ex
,
1508 vp_set
.bank_contents
),
1511 return HV_STATUS_INVALID_HYPERCALL_INPUT
;
1514 if ((vector
< HV_IPI_LOW_VECTOR
) || (vector
> HV_IPI_HIGH_VECTOR
))
1515 return HV_STATUS_INVALID_HYPERCALL_INPUT
;
1517 vcpu_mask
= all_cpus
? NULL
:
1518 sparse_set_to_vcpu_mask(kvm
, sparse_banks
, valid_bank_mask
,
1519 vp_bitmap
, vcpu_bitmap
);
1521 kvm_send_ipi_to_many(kvm
, vector
, vcpu_mask
);
1524 return HV_STATUS_SUCCESS
;
1527 bool kvm_hv_hypercall_enabled(struct kvm
*kvm
)
1529 return READ_ONCE(kvm
->arch
.hyperv
.hv_hypercall
) & HV_X64_MSR_HYPERCALL_ENABLE
;
1532 static void kvm_hv_hypercall_set_result(struct kvm_vcpu
*vcpu
, u64 result
)
1536 longmode
= is_64_bit_mode(vcpu
);
1538 kvm_register_write(vcpu
, VCPU_REGS_RAX
, result
);
1540 kvm_register_write(vcpu
, VCPU_REGS_RDX
, result
>> 32);
1541 kvm_register_write(vcpu
, VCPU_REGS_RAX
, result
& 0xffffffff);
1545 static int kvm_hv_hypercall_complete(struct kvm_vcpu
*vcpu
, u64 result
)
1547 kvm_hv_hypercall_set_result(vcpu
, result
);
1548 ++vcpu
->stat
.hypercalls
;
1549 return kvm_skip_emulated_instruction(vcpu
);
1552 static int kvm_hv_hypercall_complete_userspace(struct kvm_vcpu
*vcpu
)
1554 return kvm_hv_hypercall_complete(vcpu
, vcpu
->run
->hyperv
.u
.hcall
.result
);
1557 static u16
kvm_hvcall_signal_event(struct kvm_vcpu
*vcpu
, bool fast
, u64 param
)
1559 struct eventfd_ctx
*eventfd
;
1561 if (unlikely(!fast
)) {
1565 if ((gpa
& (__alignof__(param
) - 1)) ||
1566 offset_in_page(gpa
) + sizeof(param
) > PAGE_SIZE
)
1567 return HV_STATUS_INVALID_ALIGNMENT
;
1569 ret
= kvm_vcpu_read_guest(vcpu
, gpa
, ¶m
, sizeof(param
));
1571 return HV_STATUS_INVALID_ALIGNMENT
;
1575 * Per spec, bits 32-47 contain the extra "flag number". However, we
1576 * have no use for it, and in all known usecases it is zero, so just
1577 * report lookup failure if it isn't.
1579 if (param
& 0xffff00000000ULL
)
1580 return HV_STATUS_INVALID_PORT_ID
;
1581 /* remaining bits are reserved-zero */
1582 if (param
& ~KVM_HYPERV_CONN_ID_MASK
)
1583 return HV_STATUS_INVALID_HYPERCALL_INPUT
;
1585 /* the eventfd is protected by vcpu->kvm->srcu, but conn_to_evt isn't */
1587 eventfd
= idr_find(&vcpu
->kvm
->arch
.hyperv
.conn_to_evt
, param
);
1590 return HV_STATUS_INVALID_PORT_ID
;
1592 eventfd_signal(eventfd
, 1);
1593 return HV_STATUS_SUCCESS
;
1596 int kvm_hv_hypercall(struct kvm_vcpu
*vcpu
)
1598 u64 param
, ingpa
, outgpa
, ret
= HV_STATUS_SUCCESS
;
1599 uint16_t code
, rep_idx
, rep_cnt
;
1600 bool fast
, longmode
, rep
;
1603 * hypercall generates UD from non zero cpl and real mode
1606 if (kvm_x86_ops
->get_cpl(vcpu
) != 0 || !is_protmode(vcpu
)) {
1607 kvm_queue_exception(vcpu
, UD_VECTOR
);
1611 longmode
= is_64_bit_mode(vcpu
);
1614 param
= ((u64
)kvm_register_read(vcpu
, VCPU_REGS_RDX
) << 32) |
1615 (kvm_register_read(vcpu
, VCPU_REGS_RAX
) & 0xffffffff);
1616 ingpa
= ((u64
)kvm_register_read(vcpu
, VCPU_REGS_RBX
) << 32) |
1617 (kvm_register_read(vcpu
, VCPU_REGS_RCX
) & 0xffffffff);
1618 outgpa
= ((u64
)kvm_register_read(vcpu
, VCPU_REGS_RDI
) << 32) |
1619 (kvm_register_read(vcpu
, VCPU_REGS_RSI
) & 0xffffffff);
1621 #ifdef CONFIG_X86_64
1623 param
= kvm_register_read(vcpu
, VCPU_REGS_RCX
);
1624 ingpa
= kvm_register_read(vcpu
, VCPU_REGS_RDX
);
1625 outgpa
= kvm_register_read(vcpu
, VCPU_REGS_R8
);
1629 code
= param
& 0xffff;
1630 fast
= !!(param
& HV_HYPERCALL_FAST_BIT
);
1631 rep_cnt
= (param
>> HV_HYPERCALL_REP_COMP_OFFSET
) & 0xfff;
1632 rep_idx
= (param
>> HV_HYPERCALL_REP_START_OFFSET
) & 0xfff;
1633 rep
= !!(rep_cnt
|| rep_idx
);
1635 trace_kvm_hv_hypercall(code
, fast
, rep_cnt
, rep_idx
, ingpa
, outgpa
);
1638 case HVCALL_NOTIFY_LONG_SPIN_WAIT
:
1639 if (unlikely(rep
)) {
1640 ret
= HV_STATUS_INVALID_HYPERCALL_INPUT
;
1643 kvm_vcpu_on_spin(vcpu
, true);
1645 case HVCALL_SIGNAL_EVENT
:
1646 if (unlikely(rep
)) {
1647 ret
= HV_STATUS_INVALID_HYPERCALL_INPUT
;
1650 ret
= kvm_hvcall_signal_event(vcpu
, fast
, ingpa
);
1651 if (ret
!= HV_STATUS_INVALID_PORT_ID
)
1653 /* fall through - maybe userspace knows this conn_id. */
1654 case HVCALL_POST_MESSAGE
:
1655 /* don't bother userspace if it has no way to handle it */
1656 if (unlikely(rep
|| !vcpu_to_synic(vcpu
)->active
)) {
1657 ret
= HV_STATUS_INVALID_HYPERCALL_INPUT
;
1660 vcpu
->run
->exit_reason
= KVM_EXIT_HYPERV
;
1661 vcpu
->run
->hyperv
.type
= KVM_EXIT_HYPERV_HCALL
;
1662 vcpu
->run
->hyperv
.u
.hcall
.input
= param
;
1663 vcpu
->run
->hyperv
.u
.hcall
.params
[0] = ingpa
;
1664 vcpu
->run
->hyperv
.u
.hcall
.params
[1] = outgpa
;
1665 vcpu
->arch
.complete_userspace_io
=
1666 kvm_hv_hypercall_complete_userspace
;
1668 case HVCALL_FLUSH_VIRTUAL_ADDRESS_LIST
:
1669 if (unlikely(fast
|| !rep_cnt
|| rep_idx
)) {
1670 ret
= HV_STATUS_INVALID_HYPERCALL_INPUT
;
1673 ret
= kvm_hv_flush_tlb(vcpu
, ingpa
, rep_cnt
, false);
1675 case HVCALL_FLUSH_VIRTUAL_ADDRESS_SPACE
:
1676 if (unlikely(fast
|| rep
)) {
1677 ret
= HV_STATUS_INVALID_HYPERCALL_INPUT
;
1680 ret
= kvm_hv_flush_tlb(vcpu
, ingpa
, rep_cnt
, false);
1682 case HVCALL_FLUSH_VIRTUAL_ADDRESS_LIST_EX
:
1683 if (unlikely(fast
|| !rep_cnt
|| rep_idx
)) {
1684 ret
= HV_STATUS_INVALID_HYPERCALL_INPUT
;
1687 ret
= kvm_hv_flush_tlb(vcpu
, ingpa
, rep_cnt
, true);
1689 case HVCALL_FLUSH_VIRTUAL_ADDRESS_SPACE_EX
:
1690 if (unlikely(fast
|| rep
)) {
1691 ret
= HV_STATUS_INVALID_HYPERCALL_INPUT
;
1694 ret
= kvm_hv_flush_tlb(vcpu
, ingpa
, rep_cnt
, true);
1696 case HVCALL_SEND_IPI
:
1697 if (unlikely(rep
)) {
1698 ret
= HV_STATUS_INVALID_HYPERCALL_INPUT
;
1701 ret
= kvm_hv_send_ipi(vcpu
, ingpa
, outgpa
, false, fast
);
1703 case HVCALL_SEND_IPI_EX
:
1704 if (unlikely(fast
|| rep
)) {
1705 ret
= HV_STATUS_INVALID_HYPERCALL_INPUT
;
1708 ret
= kvm_hv_send_ipi(vcpu
, ingpa
, outgpa
, true, false);
1711 ret
= HV_STATUS_INVALID_HYPERCALL_CODE
;
1715 return kvm_hv_hypercall_complete(vcpu
, ret
);
1718 void kvm_hv_init_vm(struct kvm
*kvm
)
1720 mutex_init(&kvm
->arch
.hyperv
.hv_lock
);
1721 idr_init(&kvm
->arch
.hyperv
.conn_to_evt
);
1724 void kvm_hv_destroy_vm(struct kvm
*kvm
)
1726 struct eventfd_ctx
*eventfd
;
1729 idr_for_each_entry(&kvm
->arch
.hyperv
.conn_to_evt
, eventfd
, i
)
1730 eventfd_ctx_put(eventfd
);
1731 idr_destroy(&kvm
->arch
.hyperv
.conn_to_evt
);
1734 static int kvm_hv_eventfd_assign(struct kvm
*kvm
, u32 conn_id
, int fd
)
1736 struct kvm_hv
*hv
= &kvm
->arch
.hyperv
;
1737 struct eventfd_ctx
*eventfd
;
1740 eventfd
= eventfd_ctx_fdget(fd
);
1741 if (IS_ERR(eventfd
))
1742 return PTR_ERR(eventfd
);
1744 mutex_lock(&hv
->hv_lock
);
1745 ret
= idr_alloc(&hv
->conn_to_evt
, eventfd
, conn_id
, conn_id
+ 1,
1746 GFP_KERNEL_ACCOUNT
);
1747 mutex_unlock(&hv
->hv_lock
);
1754 eventfd_ctx_put(eventfd
);
1758 static int kvm_hv_eventfd_deassign(struct kvm
*kvm
, u32 conn_id
)
1760 struct kvm_hv
*hv
= &kvm
->arch
.hyperv
;
1761 struct eventfd_ctx
*eventfd
;
1763 mutex_lock(&hv
->hv_lock
);
1764 eventfd
= idr_remove(&hv
->conn_to_evt
, conn_id
);
1765 mutex_unlock(&hv
->hv_lock
);
1770 synchronize_srcu(&kvm
->srcu
);
1771 eventfd_ctx_put(eventfd
);
1775 int kvm_vm_ioctl_hv_eventfd(struct kvm
*kvm
, struct kvm_hyperv_eventfd
*args
)
1777 if ((args
->flags
& ~KVM_HYPERV_EVENTFD_DEASSIGN
) ||
1778 (args
->conn_id
& ~KVM_HYPERV_CONN_ID_MASK
))
1781 if (args
->flags
== KVM_HYPERV_EVENTFD_DEASSIGN
)
1782 return kvm_hv_eventfd_deassign(kvm
, args
->conn_id
);
1783 return kvm_hv_eventfd_assign(kvm
, args
->conn_id
, args
->fd
);
1786 int kvm_vcpu_ioctl_get_hv_cpuid(struct kvm_vcpu
*vcpu
, struct kvm_cpuid2
*cpuid
,
1787 struct kvm_cpuid_entry2 __user
*entries
)
1789 uint16_t evmcs_ver
= kvm_x86_ops
->nested_get_evmcs_version(vcpu
);
1790 struct kvm_cpuid_entry2 cpuid_entries
[] = {
1791 { .function
= HYPERV_CPUID_VENDOR_AND_MAX_FUNCTIONS
},
1792 { .function
= HYPERV_CPUID_INTERFACE
},
1793 { .function
= HYPERV_CPUID_VERSION
},
1794 { .function
= HYPERV_CPUID_FEATURES
},
1795 { .function
= HYPERV_CPUID_ENLIGHTMENT_INFO
},
1796 { .function
= HYPERV_CPUID_IMPLEMENT_LIMITS
},
1797 { .function
= HYPERV_CPUID_NESTED_FEATURES
},
1799 int i
, nent
= ARRAY_SIZE(cpuid_entries
);
1801 /* Skip NESTED_FEATURES if eVMCS is not supported */
1805 if (cpuid
->nent
< nent
)
1808 if (cpuid
->nent
> nent
)
1811 for (i
= 0; i
< nent
; i
++) {
1812 struct kvm_cpuid_entry2
*ent
= &cpuid_entries
[i
];
1815 switch (ent
->function
) {
1816 case HYPERV_CPUID_VENDOR_AND_MAX_FUNCTIONS
:
1817 memcpy(signature
, "Linux KVM Hv", 12);
1819 ent
->eax
= HYPERV_CPUID_NESTED_FEATURES
;
1820 ent
->ebx
= signature
[0];
1821 ent
->ecx
= signature
[1];
1822 ent
->edx
= signature
[2];
1825 case HYPERV_CPUID_INTERFACE
:
1826 memcpy(signature
, "Hv#1\0\0\0\0\0\0\0\0", 12);
1827 ent
->eax
= signature
[0];
1830 case HYPERV_CPUID_VERSION
:
1832 * We implement some Hyper-V 2016 functions so let's use
1835 ent
->eax
= 0x00003839;
1836 ent
->ebx
= 0x000A0000;
1839 case HYPERV_CPUID_FEATURES
:
1840 ent
->eax
|= HV_X64_MSR_VP_RUNTIME_AVAILABLE
;
1841 ent
->eax
|= HV_MSR_TIME_REF_COUNT_AVAILABLE
;
1842 ent
->eax
|= HV_X64_MSR_SYNIC_AVAILABLE
;
1843 ent
->eax
|= HV_MSR_SYNTIMER_AVAILABLE
;
1844 ent
->eax
|= HV_X64_MSR_APIC_ACCESS_AVAILABLE
;
1845 ent
->eax
|= HV_X64_MSR_HYPERCALL_AVAILABLE
;
1846 ent
->eax
|= HV_X64_MSR_VP_INDEX_AVAILABLE
;
1847 ent
->eax
|= HV_X64_MSR_RESET_AVAILABLE
;
1848 ent
->eax
|= HV_MSR_REFERENCE_TSC_AVAILABLE
;
1849 ent
->eax
|= HV_X64_ACCESS_FREQUENCY_MSRS
;
1850 ent
->eax
|= HV_X64_ACCESS_REENLIGHTENMENT
;
1852 ent
->ebx
|= HV_X64_POST_MESSAGES
;
1853 ent
->ebx
|= HV_X64_SIGNAL_EVENTS
;
1855 ent
->edx
|= HV_FEATURE_FREQUENCY_MSRS_AVAILABLE
;
1856 ent
->edx
|= HV_FEATURE_GUEST_CRASH_MSR_AVAILABLE
;
1857 ent
->edx
|= HV_STIMER_DIRECT_MODE_AVAILABLE
;
1861 case HYPERV_CPUID_ENLIGHTMENT_INFO
:
1862 ent
->eax
|= HV_X64_REMOTE_TLB_FLUSH_RECOMMENDED
;
1863 ent
->eax
|= HV_X64_APIC_ACCESS_RECOMMENDED
;
1864 ent
->eax
|= HV_X64_RELAXED_TIMING_RECOMMENDED
;
1865 ent
->eax
|= HV_X64_CLUSTER_IPI_RECOMMENDED
;
1866 ent
->eax
|= HV_X64_EX_PROCESSOR_MASKS_RECOMMENDED
;
1868 ent
->eax
|= HV_X64_ENLIGHTENED_VMCS_RECOMMENDED
;
1871 * Default number of spinlock retry attempts, matches
1874 ent
->ebx
= 0x00000FFF;
1878 case HYPERV_CPUID_IMPLEMENT_LIMITS
:
1879 /* Maximum number of virtual processors */
1880 ent
->eax
= KVM_MAX_VCPUS
;
1882 * Maximum number of logical processors, matches
1889 case HYPERV_CPUID_NESTED_FEATURES
:
1890 ent
->eax
= evmcs_ver
;
1899 if (copy_to_user(entries
, cpuid_entries
,
1900 nent
* sizeof(struct kvm_cpuid_entry2
)))