1 // SPDX-License-Identifier: GPL-2.0-or-later
3 * KVM paravirt_ops implementation
5 * Copyright (C) 2007, Red Hat, Inc., Ingo Molnar <mingo@redhat.com>
6 * Copyright IBM Corporation, 2007
7 * Authors: Anthony Liguori <aliguori@us.ibm.com>
10 #define pr_fmt(fmt) "kvm-guest: " fmt
12 #include <linux/context_tracking.h>
13 #include <linux/init.h>
14 #include <linux/irq.h>
15 #include <linux/kernel.h>
16 #include <linux/kvm_para.h>
17 #include <linux/cpu.h>
19 #include <linux/highmem.h>
20 #include <linux/hardirq.h>
21 #include <linux/notifier.h>
22 #include <linux/reboot.h>
23 #include <linux/hash.h>
24 #include <linux/sched.h>
25 #include <linux/slab.h>
26 #include <linux/kprobes.h>
27 #include <linux/nmi.h>
28 #include <linux/swait.h>
29 #include <linux/syscore_ops.h>
30 #include <linux/cc_platform.h>
31 #include <linux/efi.h>
32 #include <asm/timer.h>
34 #include <asm/traps.h>
36 #include <asm/tlbflush.h>
38 #include <asm/apicdef.h>
39 #include <asm/hypervisor.h>
42 #include <asm/cpuidle_haltpoll.h>
43 #include <asm/ptrace.h>
44 #include <asm/reboot.h>
46 #include <asm/e820/api.h>
48 DEFINE_STATIC_KEY_FALSE_RO(kvm_async_pf_enabled
);
50 static int kvmapf
= 1;
52 static int __init
parse_no_kvmapf(char *arg
)
58 early_param("no-kvmapf", parse_no_kvmapf
);
60 static int steal_acc
= 1;
61 static int __init
parse_no_stealacc(char *arg
)
67 early_param("no-steal-acc", parse_no_stealacc
);
69 static DEFINE_PER_CPU_READ_MOSTLY(bool, async_pf_enabled
);
70 static DEFINE_PER_CPU_DECRYPTED(struct kvm_vcpu_pv_apf_data
, apf_reason
) __aligned(64);
71 DEFINE_PER_CPU_DECRYPTED(struct kvm_steal_time
, steal_time
) __aligned(64) __visible
;
72 static int has_steal_clock
= 0;
74 static int has_guest_poll
= 0;
76 * No need for any "IO delay" on KVM
78 static void kvm_io_delay(void)
82 #define KVM_TASK_SLEEP_HASHBITS 8
83 #define KVM_TASK_SLEEP_HASHSIZE (1<<KVM_TASK_SLEEP_HASHBITS)
85 struct kvm_task_sleep_node
{
86 struct hlist_node link
;
87 struct swait_queue_head wq
;
92 static struct kvm_task_sleep_head
{
94 struct hlist_head list
;
95 } async_pf_sleepers
[KVM_TASK_SLEEP_HASHSIZE
];
97 static struct kvm_task_sleep_node
*_find_apf_task(struct kvm_task_sleep_head
*b
,
100 struct hlist_node
*p
;
102 hlist_for_each(p
, &b
->list
) {
103 struct kvm_task_sleep_node
*n
=
104 hlist_entry(p
, typeof(*n
), link
);
105 if (n
->token
== token
)
112 static bool kvm_async_pf_queue_task(u32 token
, struct kvm_task_sleep_node
*n
)
114 u32 key
= hash_32(token
, KVM_TASK_SLEEP_HASHBITS
);
115 struct kvm_task_sleep_head
*b
= &async_pf_sleepers
[key
];
116 struct kvm_task_sleep_node
*e
;
118 raw_spin_lock(&b
->lock
);
119 e
= _find_apf_task(b
, token
);
121 /* dummy entry exist -> wake up was delivered ahead of PF */
123 raw_spin_unlock(&b
->lock
);
129 n
->cpu
= smp_processor_id();
130 init_swait_queue_head(&n
->wq
);
131 hlist_add_head(&n
->link
, &b
->list
);
132 raw_spin_unlock(&b
->lock
);
137 * kvm_async_pf_task_wait_schedule - Wait for pagefault to be handled
138 * @token: Token to identify the sleep node entry
140 * Invoked from the async pagefault handling code or from the VM exit page
141 * fault handler. In both cases RCU is watching.
143 void kvm_async_pf_task_wait_schedule(u32 token
)
145 struct kvm_task_sleep_node n
;
146 DECLARE_SWAITQUEUE(wait
);
148 lockdep_assert_irqs_disabled();
150 if (!kvm_async_pf_queue_task(token
, &n
))
154 prepare_to_swait_exclusive(&n
.wq
, &wait
, TASK_UNINTERRUPTIBLE
);
155 if (hlist_unhashed(&n
.link
))
162 finish_swait(&n
.wq
, &wait
);
164 EXPORT_SYMBOL_GPL(kvm_async_pf_task_wait_schedule
);
166 static void apf_task_wake_one(struct kvm_task_sleep_node
*n
)
168 hlist_del_init(&n
->link
);
169 if (swq_has_sleeper(&n
->wq
))
170 swake_up_one(&n
->wq
);
173 static void apf_task_wake_all(void)
177 for (i
= 0; i
< KVM_TASK_SLEEP_HASHSIZE
; i
++) {
178 struct kvm_task_sleep_head
*b
= &async_pf_sleepers
[i
];
179 struct kvm_task_sleep_node
*n
;
180 struct hlist_node
*p
, *next
;
182 raw_spin_lock(&b
->lock
);
183 hlist_for_each_safe(p
, next
, &b
->list
) {
184 n
= hlist_entry(p
, typeof(*n
), link
);
185 if (n
->cpu
== smp_processor_id())
186 apf_task_wake_one(n
);
188 raw_spin_unlock(&b
->lock
);
192 void kvm_async_pf_task_wake(u32 token
)
194 u32 key
= hash_32(token
, KVM_TASK_SLEEP_HASHBITS
);
195 struct kvm_task_sleep_head
*b
= &async_pf_sleepers
[key
];
196 struct kvm_task_sleep_node
*n
, *dummy
= NULL
;
204 raw_spin_lock(&b
->lock
);
205 n
= _find_apf_task(b
, token
);
208 * Async #PF not yet handled, add a dummy entry for the token.
209 * Allocating the token must be down outside of the raw lock
210 * as the allocator is preemptible on PREEMPT_RT kernels.
213 raw_spin_unlock(&b
->lock
);
214 dummy
= kzalloc(sizeof(*dummy
), GFP_ATOMIC
);
217 * Continue looping on allocation failure, eventually
218 * the async #PF will be handled and allocating a new
219 * node will be unnecessary.
225 * Recheck for async #PF completion before enqueueing
226 * the dummy token to avoid duplicate list entries.
230 dummy
->token
= token
;
231 dummy
->cpu
= smp_processor_id();
232 init_swait_queue_head(&dummy
->wq
);
233 hlist_add_head(&dummy
->link
, &b
->list
);
236 apf_task_wake_one(n
);
238 raw_spin_unlock(&b
->lock
);
240 /* A dummy token might be allocated and ultimately not used. */
243 EXPORT_SYMBOL_GPL(kvm_async_pf_task_wake
);
245 noinstr u32
kvm_read_and_reset_apf_flags(void)
249 if (__this_cpu_read(async_pf_enabled
)) {
250 flags
= __this_cpu_read(apf_reason
.flags
);
251 __this_cpu_write(apf_reason
.flags
, 0);
256 EXPORT_SYMBOL_GPL(kvm_read_and_reset_apf_flags
);
258 noinstr
bool __kvm_handle_async_pf(struct pt_regs
*regs
, u32 token
)
260 u32 flags
= kvm_read_and_reset_apf_flags();
261 irqentry_state_t state
;
266 state
= irqentry_enter(regs
);
267 instrumentation_begin();
270 * If the host managed to inject an async #PF into an interrupt
271 * disabled region, then die hard as this is not going to end well
272 * and the host side is seriously broken.
274 if (unlikely(!(regs
->flags
& X86_EFLAGS_IF
)))
275 panic("Host injected async #PF in interrupt disabled region\n");
277 if (flags
& KVM_PV_REASON_PAGE_NOT_PRESENT
) {
278 if (unlikely(!(user_mode(regs
))))
279 panic("Host injected async #PF in kernel mode\n");
280 /* Page is swapped out by the host. */
281 kvm_async_pf_task_wait_schedule(token
);
283 WARN_ONCE(1, "Unexpected async PF flags: %x\n", flags
);
286 instrumentation_end();
287 irqentry_exit(regs
, state
);
291 DEFINE_IDTENTRY_SYSVEC(sysvec_kvm_asyncpf_interrupt
)
293 struct pt_regs
*old_regs
= set_irq_regs(regs
);
298 inc_irq_stat(irq_hv_callback_count
);
300 if (__this_cpu_read(async_pf_enabled
)) {
301 token
= __this_cpu_read(apf_reason
.token
);
302 kvm_async_pf_task_wake(token
);
303 __this_cpu_write(apf_reason
.token
, 0);
304 wrmsrl(MSR_KVM_ASYNC_PF_ACK
, 1);
307 set_irq_regs(old_regs
);
310 static void __init
paravirt_ops_setup(void)
312 pv_info
.name
= "KVM";
314 if (kvm_para_has_feature(KVM_FEATURE_NOP_IO_DELAY
))
315 pv_ops
.cpu
.io_delay
= kvm_io_delay
;
317 #ifdef CONFIG_X86_IO_APIC
322 static void kvm_register_steal_time(void)
324 int cpu
= smp_processor_id();
325 struct kvm_steal_time
*st
= &per_cpu(steal_time
, cpu
);
327 if (!has_steal_clock
)
330 wrmsrl(MSR_KVM_STEAL_TIME
, (slow_virt_to_phys(st
) | KVM_MSR_ENABLED
));
331 pr_debug("stealtime: cpu %d, msr %llx\n", cpu
,
332 (unsigned long long) slow_virt_to_phys(st
));
335 static DEFINE_PER_CPU_DECRYPTED(unsigned long, kvm_apic_eoi
) = KVM_PV_EOI_DISABLED
;
337 static notrace __maybe_unused
void kvm_guest_apic_eoi_write(void)
340 * This relies on __test_and_clear_bit to modify the memory
341 * in a way that is atomic with respect to the local CPU.
342 * The hypervisor only accesses this memory from the local CPU so
343 * there's no need for lock or memory barriers.
344 * An optimization barrier is implied in apic write.
346 if (__test_and_clear_bit(KVM_PV_EOI_BIT
, this_cpu_ptr(&kvm_apic_eoi
)))
351 static void kvm_guest_cpu_init(void)
353 if (kvm_para_has_feature(KVM_FEATURE_ASYNC_PF_INT
) && kvmapf
) {
356 WARN_ON_ONCE(!static_branch_likely(&kvm_async_pf_enabled
));
358 pa
= slow_virt_to_phys(this_cpu_ptr(&apf_reason
));
359 pa
|= KVM_ASYNC_PF_ENABLED
| KVM_ASYNC_PF_DELIVERY_AS_INT
;
361 if (kvm_para_has_feature(KVM_FEATURE_ASYNC_PF_VMEXIT
))
362 pa
|= KVM_ASYNC_PF_DELIVERY_AS_PF_VMEXIT
;
364 wrmsrl(MSR_KVM_ASYNC_PF_INT
, HYPERVISOR_CALLBACK_VECTOR
);
366 wrmsrl(MSR_KVM_ASYNC_PF_EN
, pa
);
367 __this_cpu_write(async_pf_enabled
, true);
368 pr_debug("setup async PF for cpu %d\n", smp_processor_id());
371 if (kvm_para_has_feature(KVM_FEATURE_PV_EOI
)) {
374 /* Size alignment is implied but just to make it explicit. */
375 BUILD_BUG_ON(__alignof__(kvm_apic_eoi
) < 4);
376 __this_cpu_write(kvm_apic_eoi
, 0);
377 pa
= slow_virt_to_phys(this_cpu_ptr(&kvm_apic_eoi
))
379 wrmsrl(MSR_KVM_PV_EOI_EN
, pa
);
383 kvm_register_steal_time();
386 static void kvm_pv_disable_apf(void)
388 if (!__this_cpu_read(async_pf_enabled
))
391 wrmsrl(MSR_KVM_ASYNC_PF_EN
, 0);
392 __this_cpu_write(async_pf_enabled
, false);
394 pr_debug("disable async PF for cpu %d\n", smp_processor_id());
397 static void kvm_disable_steal_time(void)
399 if (!has_steal_clock
)
402 wrmsr(MSR_KVM_STEAL_TIME
, 0, 0);
405 static u64
kvm_steal_clock(int cpu
)
408 struct kvm_steal_time
*src
;
411 src
= &per_cpu(steal_time
, cpu
);
413 version
= src
->version
;
417 } while ((version
& 1) || (version
!= src
->version
));
422 static inline void __set_percpu_decrypted(void *ptr
, unsigned long size
)
424 early_set_memory_decrypted((unsigned long) ptr
, size
);
428 * Iterate through all possible CPUs and map the memory region pointed
429 * by apf_reason, steal_time and kvm_apic_eoi as decrypted at once.
431 * Note: we iterate through all possible CPUs to ensure that CPUs
432 * hotplugged will have their per-cpu variable already mapped as
435 static void __init
sev_map_percpu_data(void)
439 if (cc_vendor
!= CC_VENDOR_AMD
||
440 !cc_platform_has(CC_ATTR_GUEST_MEM_ENCRYPT
))
443 for_each_possible_cpu(cpu
) {
444 __set_percpu_decrypted(&per_cpu(apf_reason
, cpu
), sizeof(apf_reason
));
445 __set_percpu_decrypted(&per_cpu(steal_time
, cpu
), sizeof(steal_time
));
446 __set_percpu_decrypted(&per_cpu(kvm_apic_eoi
, cpu
), sizeof(kvm_apic_eoi
));
450 static void kvm_guest_cpu_offline(bool shutdown
)
452 kvm_disable_steal_time();
453 if (kvm_para_has_feature(KVM_FEATURE_PV_EOI
))
454 wrmsrl(MSR_KVM_PV_EOI_EN
, 0);
455 if (kvm_para_has_feature(KVM_FEATURE_MIGRATION_CONTROL
))
456 wrmsrl(MSR_KVM_MIGRATION_CONTROL
, 0);
457 kvm_pv_disable_apf();
463 static int kvm_cpu_online(unsigned int cpu
)
467 local_irq_save(flags
);
468 kvm_guest_cpu_init();
469 local_irq_restore(flags
);
475 static DEFINE_PER_CPU(cpumask_var_t
, __pv_cpu_mask
);
477 static bool pv_tlb_flush_supported(void)
479 return (kvm_para_has_feature(KVM_FEATURE_PV_TLB_FLUSH
) &&
480 !kvm_para_has_hint(KVM_HINTS_REALTIME
) &&
481 kvm_para_has_feature(KVM_FEATURE_STEAL_TIME
) &&
482 !boot_cpu_has(X86_FEATURE_MWAIT
) &&
483 (num_possible_cpus() != 1));
486 static bool pv_ipi_supported(void)
488 return (kvm_para_has_feature(KVM_FEATURE_PV_SEND_IPI
) &&
489 (num_possible_cpus() != 1));
492 static bool pv_sched_yield_supported(void)
494 return (kvm_para_has_feature(KVM_FEATURE_PV_SCHED_YIELD
) &&
495 !kvm_para_has_hint(KVM_HINTS_REALTIME
) &&
496 kvm_para_has_feature(KVM_FEATURE_STEAL_TIME
) &&
497 !boot_cpu_has(X86_FEATURE_MWAIT
) &&
498 (num_possible_cpus() != 1));
501 #define KVM_IPI_CLUSTER_SIZE (2 * BITS_PER_LONG)
503 static void __send_ipi_mask(const struct cpumask
*mask
, int vector
)
506 int cpu
, min
= 0, max
= 0;
508 __uint128_t ipi_bitmap
= 0;
515 if (cpumask_empty(mask
))
518 local_irq_save(flags
);
522 icr
= APIC_DM_FIXED
| vector
;
529 for_each_cpu(cpu
, mask
) {
530 apic_id
= per_cpu(x86_cpu_to_apicid
, cpu
);
533 } else if (apic_id
< min
&& max
- apic_id
< KVM_IPI_CLUSTER_SIZE
) {
534 ipi_bitmap
<<= min
- apic_id
;
536 } else if (apic_id
> min
&& apic_id
< min
+ KVM_IPI_CLUSTER_SIZE
) {
537 max
= apic_id
< max
? max
: apic_id
;
539 ret
= kvm_hypercall4(KVM_HC_SEND_IPI
, (unsigned long)ipi_bitmap
,
540 (unsigned long)(ipi_bitmap
>> BITS_PER_LONG
), min
, icr
);
541 WARN_ONCE(ret
< 0, "kvm-guest: failed to send PV IPI: %ld",
546 __set_bit(apic_id
- min
, (unsigned long *)&ipi_bitmap
);
550 ret
= kvm_hypercall4(KVM_HC_SEND_IPI
, (unsigned long)ipi_bitmap
,
551 (unsigned long)(ipi_bitmap
>> BITS_PER_LONG
), min
, icr
);
552 WARN_ONCE(ret
< 0, "kvm-guest: failed to send PV IPI: %ld",
556 local_irq_restore(flags
);
559 static void kvm_send_ipi_mask(const struct cpumask
*mask
, int vector
)
561 __send_ipi_mask(mask
, vector
);
564 static void kvm_send_ipi_mask_allbutself(const struct cpumask
*mask
, int vector
)
566 unsigned int this_cpu
= smp_processor_id();
567 struct cpumask
*new_mask
= this_cpu_cpumask_var_ptr(__pv_cpu_mask
);
568 const struct cpumask
*local_mask
;
570 cpumask_copy(new_mask
, mask
);
571 cpumask_clear_cpu(this_cpu
, new_mask
);
572 local_mask
= new_mask
;
573 __send_ipi_mask(local_mask
, vector
);
576 static int __init
setup_efi_kvm_sev_migration(void)
578 efi_char16_t efi_sev_live_migration_enabled
[] = L
"SevLiveMigrationEnabled";
579 efi_guid_t efi_variable_guid
= AMD_SEV_MEM_ENCRYPT_GUID
;
584 if (!cc_platform_has(CC_ATTR_GUEST_MEM_ENCRYPT
) ||
585 !kvm_para_has_feature(KVM_FEATURE_MIGRATION_CONTROL
))
588 if (!efi_enabled(EFI_BOOT
))
591 if (!efi_enabled(EFI_RUNTIME_SERVICES
)) {
592 pr_info("%s : EFI runtime services are not enabled\n", __func__
);
596 size
= sizeof(enabled
);
598 /* Get variable contents into buffer */
599 status
= efi
.get_variable(efi_sev_live_migration_enabled
,
600 &efi_variable_guid
, NULL
, &size
, &enabled
);
602 if (status
== EFI_NOT_FOUND
) {
603 pr_info("%s : EFI live migration variable not found\n", __func__
);
607 if (status
!= EFI_SUCCESS
) {
608 pr_info("%s : EFI variable retrieval failed\n", __func__
);
613 pr_info("%s: live migration disabled in EFI\n", __func__
);
617 pr_info("%s : live migration enabled in EFI\n", __func__
);
618 wrmsrl(MSR_KVM_MIGRATION_CONTROL
, KVM_MIGRATION_READY
);
623 late_initcall(setup_efi_kvm_sev_migration
);
626 * Set the IPI entry points
628 static __init
void kvm_setup_pv_ipi(void)
630 apic_update_callback(send_IPI_mask
, kvm_send_ipi_mask
);
631 apic_update_callback(send_IPI_mask_allbutself
, kvm_send_ipi_mask_allbutself
);
632 pr_info("setup PV IPIs\n");
635 static void kvm_smp_send_call_func_ipi(const struct cpumask
*mask
)
639 native_send_call_func_ipi(mask
);
641 /* Make sure other vCPUs get a chance to run if they need to. */
642 for_each_cpu(cpu
, mask
) {
643 if (!idle_cpu(cpu
) && vcpu_is_preempted(cpu
)) {
644 kvm_hypercall1(KVM_HC_SCHED_YIELD
, per_cpu(x86_cpu_to_apicid
, cpu
));
650 static void kvm_flush_tlb_multi(const struct cpumask
*cpumask
,
651 const struct flush_tlb_info
*info
)
655 struct kvm_steal_time
*src
;
656 struct cpumask
*flushmask
= this_cpu_cpumask_var_ptr(__pv_cpu_mask
);
658 cpumask_copy(flushmask
, cpumask
);
660 * We have to call flush only on online vCPUs. And
661 * queue flush_on_enter for pre-empted vCPUs
663 for_each_cpu(cpu
, flushmask
) {
665 * The local vCPU is never preempted, so we do not explicitly
666 * skip check for local vCPU - it will never be cleared from
669 src
= &per_cpu(steal_time
, cpu
);
670 state
= READ_ONCE(src
->preempted
);
671 if ((state
& KVM_VCPU_PREEMPTED
)) {
672 if (try_cmpxchg(&src
->preempted
, &state
,
673 state
| KVM_VCPU_FLUSH_TLB
))
674 __cpumask_clear_cpu(cpu
, flushmask
);
678 native_flush_tlb_multi(flushmask
, info
);
681 static __init
int kvm_alloc_cpumask(void)
685 if (!kvm_para_available() || nopv
)
688 if (pv_tlb_flush_supported() || pv_ipi_supported())
689 for_each_possible_cpu(cpu
) {
690 zalloc_cpumask_var_node(per_cpu_ptr(&__pv_cpu_mask
, cpu
),
691 GFP_KERNEL
, cpu_to_node(cpu
));
696 arch_initcall(kvm_alloc_cpumask
);
698 static void __init
kvm_smp_prepare_boot_cpu(void)
701 * Map the per-cpu variables as decrypted before kvm_guest_cpu_init()
702 * shares the guest physical address with the hypervisor.
704 sev_map_percpu_data();
706 kvm_guest_cpu_init();
707 native_smp_prepare_boot_cpu();
711 static int kvm_cpu_down_prepare(unsigned int cpu
)
715 local_irq_save(flags
);
716 kvm_guest_cpu_offline(false);
717 local_irq_restore(flags
);
723 static int kvm_suspend(void)
727 kvm_guest_cpu_offline(false);
729 #ifdef CONFIG_ARCH_CPUIDLE_HALTPOLL
730 if (kvm_para_has_feature(KVM_FEATURE_POLL_CONTROL
))
731 rdmsrl(MSR_KVM_POLL_CONTROL
, val
);
732 has_guest_poll
= !(val
& 1);
737 static void kvm_resume(void)
739 kvm_cpu_online(raw_smp_processor_id());
741 #ifdef CONFIG_ARCH_CPUIDLE_HALTPOLL
742 if (kvm_para_has_feature(KVM_FEATURE_POLL_CONTROL
) && has_guest_poll
)
743 wrmsrl(MSR_KVM_POLL_CONTROL
, 0);
747 static struct syscore_ops kvm_syscore_ops
= {
748 .suspend
= kvm_suspend
,
749 .resume
= kvm_resume
,
752 static void kvm_pv_guest_cpu_reboot(void *unused
)
754 kvm_guest_cpu_offline(true);
757 static int kvm_pv_reboot_notify(struct notifier_block
*nb
,
758 unsigned long code
, void *unused
)
760 if (code
== SYS_RESTART
)
761 on_each_cpu(kvm_pv_guest_cpu_reboot
, NULL
, 1);
765 static struct notifier_block kvm_pv_reboot_nb
= {
766 .notifier_call
= kvm_pv_reboot_notify
,
770 * After a PV feature is registered, the host will keep writing to the
771 * registered memory location. If the guest happens to shutdown, this memory
772 * won't be valid. In cases like kexec, in which you install a new kernel, this
773 * means a random memory location will be kept being written.
775 #ifdef CONFIG_CRASH_DUMP
776 static void kvm_crash_shutdown(struct pt_regs
*regs
)
778 kvm_guest_cpu_offline(true);
779 native_machine_crash_shutdown(regs
);
783 #if defined(CONFIG_X86_32) || !defined(CONFIG_SMP)
784 bool __kvm_vcpu_is_preempted(long cpu
);
786 __visible
bool __kvm_vcpu_is_preempted(long cpu
)
788 struct kvm_steal_time
*src
= &per_cpu(steal_time
, cpu
);
790 return !!(src
->preempted
& KVM_VCPU_PREEMPTED
);
792 PV_CALLEE_SAVE_REGS_THUNK(__kvm_vcpu_is_preempted
);
796 #include <asm/asm-offsets.h>
798 extern bool __raw_callee_save___kvm_vcpu_is_preempted(long);
801 * Hand-optimize version for x86-64 to avoid 8 64-bit register saving and
802 * restoring to/from the stack.
804 #define PV_VCPU_PREEMPTED_ASM \
805 "movq __per_cpu_offset(,%rdi,8), %rax\n\t" \
806 "cmpb $0, " __stringify(KVM_STEAL_TIME_preempted) "+steal_time(%rax)\n\t" \
809 DEFINE_ASM_FUNC(__raw_callee_save___kvm_vcpu_is_preempted
,
810 PV_VCPU_PREEMPTED_ASM
, .text
);
813 static void __init
kvm_guest_init(void)
817 paravirt_ops_setup();
818 register_reboot_notifier(&kvm_pv_reboot_nb
);
819 for (i
= 0; i
< KVM_TASK_SLEEP_HASHSIZE
; i
++)
820 raw_spin_lock_init(&async_pf_sleepers
[i
].lock
);
822 if (kvm_para_has_feature(KVM_FEATURE_STEAL_TIME
)) {
824 static_call_update(pv_steal_clock
, kvm_steal_clock
);
826 pv_ops
.lock
.vcpu_is_preempted
=
827 PV_CALLEE_SAVE(__kvm_vcpu_is_preempted
);
830 if (kvm_para_has_feature(KVM_FEATURE_PV_EOI
))
831 apic_update_callback(eoi
, kvm_guest_apic_eoi_write
);
833 if (kvm_para_has_feature(KVM_FEATURE_ASYNC_PF_INT
) && kvmapf
) {
834 static_branch_enable(&kvm_async_pf_enabled
);
835 sysvec_install(HYPERVISOR_CALLBACK_VECTOR
, sysvec_kvm_asyncpf_interrupt
);
839 if (pv_tlb_flush_supported()) {
840 pv_ops
.mmu
.flush_tlb_multi
= kvm_flush_tlb_multi
;
841 pv_ops
.mmu
.tlb_remove_table
= tlb_remove_table
;
842 pr_info("KVM setup pv remote TLB flush\n");
845 smp_ops
.smp_prepare_boot_cpu
= kvm_smp_prepare_boot_cpu
;
846 if (pv_sched_yield_supported()) {
847 smp_ops
.send_call_func_ipi
= kvm_smp_send_call_func_ipi
;
848 pr_info("setup PV sched yield\n");
850 if (cpuhp_setup_state_nocalls(CPUHP_AP_ONLINE_DYN
, "x86/kvm:online",
851 kvm_cpu_online
, kvm_cpu_down_prepare
) < 0)
852 pr_err("failed to install cpu hotplug callbacks\n");
854 sev_map_percpu_data();
855 kvm_guest_cpu_init();
858 #ifdef CONFIG_CRASH_DUMP
859 machine_ops
.crash_shutdown
= kvm_crash_shutdown
;
862 register_syscore_ops(&kvm_syscore_ops
);
865 * Hard lockup detection is enabled by default. Disable it, as guests
866 * can get false positives too easily, for example if the host is
869 hardlockup_detector_disable();
872 static noinline
uint32_t __kvm_cpuid_base(void)
874 if (boot_cpu_data
.cpuid_level
< 0)
875 return 0; /* So we don't blow up on old processors */
877 if (boot_cpu_has(X86_FEATURE_HYPERVISOR
))
878 return hypervisor_cpuid_base(KVM_SIGNATURE
, 0);
883 static inline uint32_t kvm_cpuid_base(void)
885 static int kvm_cpuid_base
= -1;
887 if (kvm_cpuid_base
== -1)
888 kvm_cpuid_base
= __kvm_cpuid_base();
890 return kvm_cpuid_base
;
893 bool kvm_para_available(void)
895 return kvm_cpuid_base() != 0;
897 EXPORT_SYMBOL_GPL(kvm_para_available
);
899 unsigned int kvm_arch_para_features(void)
901 return cpuid_eax(kvm_cpuid_base() | KVM_CPUID_FEATURES
);
904 unsigned int kvm_arch_para_hints(void)
906 return cpuid_edx(kvm_cpuid_base() | KVM_CPUID_FEATURES
);
908 EXPORT_SYMBOL_GPL(kvm_arch_para_hints
);
910 static uint32_t __init
kvm_detect(void)
912 return kvm_cpuid_base();
915 static void __init
kvm_apic_init(void)
918 if (pv_ipi_supported())
923 static bool __init
kvm_msi_ext_dest_id(void)
925 return kvm_para_has_feature(KVM_FEATURE_MSI_EXT_DEST_ID
);
928 static void kvm_sev_hc_page_enc_status(unsigned long pfn
, int npages
, bool enc
)
930 kvm_sev_hypercall3(KVM_HC_MAP_GPA_RANGE
, pfn
<< PAGE_SHIFT
, npages
,
931 KVM_MAP_GPA_RANGE_ENC_STAT(enc
) | KVM_MAP_GPA_RANGE_PAGE_SZ_4K
);
934 static void __init
kvm_init_platform(void)
936 if (cc_platform_has(CC_ATTR_GUEST_MEM_ENCRYPT
) &&
937 kvm_para_has_feature(KVM_FEATURE_MIGRATION_CONTROL
)) {
938 unsigned long nr_pages
;
941 pv_ops
.mmu
.notify_page_enc_status_changed
=
942 kvm_sev_hc_page_enc_status
;
945 * Reset the host's shared pages list related to kernel
946 * specific page encryption status settings before we load a
947 * new kernel by kexec. Reset the page encryption status
948 * during early boot instead of just before kexec to avoid SMP
949 * races during kvm_pv_guest_cpu_reboot().
950 * NOTE: We cannot reset the complete shared pages list
951 * here as we need to retain the UEFI/OVMF firmware
955 for (i
= 0; i
< e820_table
->nr_entries
; i
++) {
956 struct e820_entry
*entry
= &e820_table
->entries
[i
];
958 if (entry
->type
!= E820_TYPE_RAM
)
961 nr_pages
= DIV_ROUND_UP(entry
->size
, PAGE_SIZE
);
963 kvm_sev_hypercall3(KVM_HC_MAP_GPA_RANGE
, entry
->addr
,
965 KVM_MAP_GPA_RANGE_ENCRYPTED
| KVM_MAP_GPA_RANGE_PAGE_SZ_4K
);
969 * Ensure that _bss_decrypted section is marked as decrypted in the
972 early_set_mem_enc_dec_hypercall((unsigned long)__start_bss_decrypted
,
973 __end_bss_decrypted
- __start_bss_decrypted
, 0);
976 * If not booted using EFI, enable Live migration support.
978 if (!efi_enabled(EFI_BOOT
))
979 wrmsrl(MSR_KVM_MIGRATION_CONTROL
,
980 KVM_MIGRATION_READY
);
983 x86_platform
.apic_post_init
= kvm_apic_init
;
985 /* Set WB as the default cache mode for SEV-SNP and TDX */
986 mtrr_overwrite_state(NULL
, 0, MTRR_TYPE_WRBACK
);
989 #if defined(CONFIG_AMD_MEM_ENCRYPT)
990 static void kvm_sev_es_hcall_prepare(struct ghcb
*ghcb
, struct pt_regs
*regs
)
992 /* RAX and CPL are already in the GHCB */
993 ghcb_set_rbx(ghcb
, regs
->bx
);
994 ghcb_set_rcx(ghcb
, regs
->cx
);
995 ghcb_set_rdx(ghcb
, regs
->dx
);
996 ghcb_set_rsi(ghcb
, regs
->si
);
999 static bool kvm_sev_es_hcall_finish(struct ghcb
*ghcb
, struct pt_regs
*regs
)
1001 /* No checking of the return state needed */
1006 const __initconst
struct hypervisor_x86 x86_hyper_kvm
= {
1008 .detect
= kvm_detect
,
1009 .type
= X86_HYPER_KVM
,
1010 .init
.guest_late_init
= kvm_guest_init
,
1011 .init
.x2apic_available
= kvm_para_available
,
1012 .init
.msi_ext_dest_id
= kvm_msi_ext_dest_id
,
1013 .init
.init_platform
= kvm_init_platform
,
1014 #if defined(CONFIG_AMD_MEM_ENCRYPT)
1015 .runtime
.sev_es_hcall_prepare
= kvm_sev_es_hcall_prepare
,
1016 .runtime
.sev_es_hcall_finish
= kvm_sev_es_hcall_finish
,
1020 static __init
int activate_jump_labels(void)
1022 if (has_steal_clock
) {
1023 static_key_slow_inc(¶virt_steal_enabled
);
1025 static_key_slow_inc(¶virt_steal_rq_enabled
);
1030 arch_initcall(activate_jump_labels
);
1032 #ifdef CONFIG_PARAVIRT_SPINLOCKS
1034 /* Kick a cpu by its apicid. Used to wake up a halted vcpu */
1035 static void kvm_kick_cpu(int cpu
)
1037 unsigned long flags
= 0;
1040 apicid
= per_cpu(x86_cpu_to_apicid
, cpu
);
1041 kvm_hypercall2(KVM_HC_KICK_CPU
, flags
, apicid
);
1044 #include <asm/qspinlock.h>
1046 static void kvm_wait(u8
*ptr
, u8 val
)
1052 * halt until it's our turn and kicked. Note that we do safe halt
1053 * for irq enabled case to avoid hang when lock info is overwritten
1054 * in irq spinlock slowpath and no spurious interrupt occur to save us.
1056 if (irqs_disabled()) {
1057 if (READ_ONCE(*ptr
) == val
)
1060 local_irq_disable();
1062 /* safe_halt() will enable IRQ */
1063 if (READ_ONCE(*ptr
) == val
)
1071 * Setup pv_lock_ops to exploit KVM_FEATURE_PV_UNHALT if present.
1073 void __init
kvm_spinlock_init(void)
1076 * In case host doesn't support KVM_FEATURE_PV_UNHALT there is still an
1077 * advantage of keeping virt_spin_lock_key enabled: virt_spin_lock() is
1078 * preferred over native qspinlock when vCPU is preempted.
1080 if (!kvm_para_has_feature(KVM_FEATURE_PV_UNHALT
)) {
1081 pr_info("PV spinlocks disabled, no host support\n");
1086 * Disable PV spinlocks and use native qspinlock when dedicated pCPUs
1089 if (kvm_para_has_hint(KVM_HINTS_REALTIME
)) {
1090 pr_info("PV spinlocks disabled with KVM_HINTS_REALTIME hints\n");
1094 if (num_possible_cpus() == 1) {
1095 pr_info("PV spinlocks disabled, single CPU\n");
1100 pr_info("PV spinlocks disabled, forced by \"nopvspin\" parameter\n");
1104 pr_info("PV spinlocks enabled\n");
1106 __pv_init_lock_hash();
1107 pv_ops
.lock
.queued_spin_lock_slowpath
= __pv_queued_spin_lock_slowpath
;
1108 pv_ops
.lock
.queued_spin_unlock
=
1109 PV_CALLEE_SAVE(__pv_queued_spin_unlock
);
1110 pv_ops
.lock
.wait
= kvm_wait
;
1111 pv_ops
.lock
.kick
= kvm_kick_cpu
;
1114 * When PV spinlock is enabled which is preferred over
1115 * virt_spin_lock(), virt_spin_lock_key's value is meaningless.
1116 * Just disable it anyway.
1119 static_branch_disable(&virt_spin_lock_key
);
1122 #endif /* CONFIG_PARAVIRT_SPINLOCKS */
1124 #ifdef CONFIG_ARCH_CPUIDLE_HALTPOLL
1126 static void kvm_disable_host_haltpoll(void *i
)
1128 wrmsrl(MSR_KVM_POLL_CONTROL
, 0);
1131 static void kvm_enable_host_haltpoll(void *i
)
1133 wrmsrl(MSR_KVM_POLL_CONTROL
, 1);
1136 void arch_haltpoll_enable(unsigned int cpu
)
1138 if (!kvm_para_has_feature(KVM_FEATURE_POLL_CONTROL
)) {
1139 pr_err_once("host does not support poll control\n");
1140 pr_err_once("host upgrade recommended\n");
1144 /* Enable guest halt poll disables host halt poll */
1145 smp_call_function_single(cpu
, kvm_disable_host_haltpoll
, NULL
, 1);
1147 EXPORT_SYMBOL_GPL(arch_haltpoll_enable
);
1149 void arch_haltpoll_disable(unsigned int cpu
)
1151 if (!kvm_para_has_feature(KVM_FEATURE_POLL_CONTROL
))
1154 /* Disable guest halt poll enables host halt poll */
1155 smp_call_function_single(cpu
, kvm_enable_host_haltpoll
, NULL
, 1);
1157 EXPORT_SYMBOL_GPL(arch_haltpoll_disable
);