2 * KVM paravirt_ops implementation
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License as published by
6 * the Free Software Foundation; either version 2 of the License, or
7 * (at your option) any later version.
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write to the Free Software
16 * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
18 * Copyright (C) 2007, Red Hat, Inc., Ingo Molnar <mingo@redhat.com>
19 * Copyright IBM Corporation, 2007
20 * Authors: Anthony Liguori <aliguori@us.ibm.com>
23 #include <linux/context_tracking.h>
24 #include <linux/init.h>
25 #include <linux/kernel.h>
26 #include <linux/kvm_para.h>
27 #include <linux/cpu.h>
29 #include <linux/highmem.h>
30 #include <linux/hardirq.h>
31 #include <linux/notifier.h>
32 #include <linux/reboot.h>
33 #include <linux/hash.h>
34 #include <linux/sched.h>
35 #include <linux/slab.h>
36 #include <linux/kprobes.h>
37 #include <linux/debugfs.h>
38 #include <linux/nmi.h>
39 #include <linux/swait.h>
40 #include <asm/timer.h>
42 #include <asm/traps.h>
44 #include <asm/tlbflush.h>
46 #include <asm/apicdef.h>
47 #include <asm/hypervisor.h>
48 #include <asm/kvm_guest.h>
50 static int kvmapf
= 1;
52 static int __init
parse_no_kvmapf(char *arg
)
58 early_param("no-kvmapf", parse_no_kvmapf
);
60 static int steal_acc
= 1;
61 static int __init
parse_no_stealacc(char *arg
)
67 early_param("no-steal-acc", parse_no_stealacc
);
69 static int kvmclock_vsyscall
= 1;
70 static int __init
parse_no_kvmclock_vsyscall(char *arg
)
72 kvmclock_vsyscall
= 0;
76 early_param("no-kvmclock-vsyscall", parse_no_kvmclock_vsyscall
);
78 static DEFINE_PER_CPU_DECRYPTED(struct kvm_vcpu_pv_apf_data
, apf_reason
) __aligned(64);
79 static DEFINE_PER_CPU_DECRYPTED(struct kvm_steal_time
, steal_time
) __aligned(64);
80 static int has_steal_clock
= 0;
83 * No need for any "IO delay" on KVM
85 static void kvm_io_delay(void)
89 #define KVM_TASK_SLEEP_HASHBITS 8
90 #define KVM_TASK_SLEEP_HASHSIZE (1<<KVM_TASK_SLEEP_HASHBITS)
92 struct kvm_task_sleep_node
{
93 struct hlist_node link
;
94 struct swait_queue_head wq
;
100 static struct kvm_task_sleep_head
{
102 struct hlist_head list
;
103 } async_pf_sleepers
[KVM_TASK_SLEEP_HASHSIZE
];
105 static struct kvm_task_sleep_node
*_find_apf_task(struct kvm_task_sleep_head
*b
,
108 struct hlist_node
*p
;
110 hlist_for_each(p
, &b
->list
) {
111 struct kvm_task_sleep_node
*n
=
112 hlist_entry(p
, typeof(*n
), link
);
113 if (n
->token
== token
)
121 * @interrupt_kernel: Is this called from a routine which interrupts the kernel
122 * (other than user space)?
124 void kvm_async_pf_task_wait(u32 token
, int interrupt_kernel
)
126 u32 key
= hash_32(token
, KVM_TASK_SLEEP_HASHBITS
);
127 struct kvm_task_sleep_head
*b
= &async_pf_sleepers
[key
];
128 struct kvm_task_sleep_node n
, *e
;
129 DECLARE_SWAITQUEUE(wait
);
133 raw_spin_lock(&b
->lock
);
134 e
= _find_apf_task(b
, token
);
136 /* dummy entry exist -> wake up was delivered ahead of PF */
139 raw_spin_unlock(&b
->lock
);
146 n
.cpu
= smp_processor_id();
147 n
.halted
= is_idle_task(current
) ||
148 (IS_ENABLED(CONFIG_PREEMPT_COUNT
)
149 ? preempt_count() > 1 || rcu_preempt_depth()
151 init_swait_queue_head(&n
.wq
);
152 hlist_add_head(&n
.link
, &b
->list
);
153 raw_spin_unlock(&b
->lock
);
157 prepare_to_swait(&n
.wq
, &wait
, TASK_UNINTERRUPTIBLE
);
158 if (hlist_unhashed(&n
.link
))
169 * We cannot reschedule. So halt.
178 finish_swait(&n
.wq
, &wait
);
183 EXPORT_SYMBOL_GPL(kvm_async_pf_task_wait
);
185 static void apf_task_wake_one(struct kvm_task_sleep_node
*n
)
187 hlist_del_init(&n
->link
);
189 smp_send_reschedule(n
->cpu
);
190 else if (swq_has_sleeper(&n
->wq
))
194 static void apf_task_wake_all(void)
198 for (i
= 0; i
< KVM_TASK_SLEEP_HASHSIZE
; i
++) {
199 struct hlist_node
*p
, *next
;
200 struct kvm_task_sleep_head
*b
= &async_pf_sleepers
[i
];
201 raw_spin_lock(&b
->lock
);
202 hlist_for_each_safe(p
, next
, &b
->list
) {
203 struct kvm_task_sleep_node
*n
=
204 hlist_entry(p
, typeof(*n
), link
);
205 if (n
->cpu
== smp_processor_id())
206 apf_task_wake_one(n
);
208 raw_spin_unlock(&b
->lock
);
212 void kvm_async_pf_task_wake(u32 token
)
214 u32 key
= hash_32(token
, KVM_TASK_SLEEP_HASHBITS
);
215 struct kvm_task_sleep_head
*b
= &async_pf_sleepers
[key
];
216 struct kvm_task_sleep_node
*n
;
224 raw_spin_lock(&b
->lock
);
225 n
= _find_apf_task(b
, token
);
228 * async PF was not yet handled.
229 * Add dummy entry for the token.
231 n
= kzalloc(sizeof(*n
), GFP_ATOMIC
);
234 * Allocation failed! Busy wait while other cpu
237 raw_spin_unlock(&b
->lock
);
242 n
->cpu
= smp_processor_id();
243 init_swait_queue_head(&n
->wq
);
244 hlist_add_head(&n
->link
, &b
->list
);
246 apf_task_wake_one(n
);
247 raw_spin_unlock(&b
->lock
);
250 EXPORT_SYMBOL_GPL(kvm_async_pf_task_wake
);
252 u32
kvm_read_and_reset_pf_reason(void)
256 if (__this_cpu_read(apf_reason
.enabled
)) {
257 reason
= __this_cpu_read(apf_reason
.reason
);
258 __this_cpu_write(apf_reason
.reason
, 0);
263 EXPORT_SYMBOL_GPL(kvm_read_and_reset_pf_reason
);
264 NOKPROBE_SYMBOL(kvm_read_and_reset_pf_reason
);
267 do_async_page_fault(struct pt_regs
*regs
, unsigned long error_code
)
269 enum ctx_state prev_state
;
271 switch (kvm_read_and_reset_pf_reason()) {
273 do_page_fault(regs
, error_code
);
275 case KVM_PV_REASON_PAGE_NOT_PRESENT
:
276 /* page is swapped out by the host. */
277 prev_state
= exception_enter();
278 kvm_async_pf_task_wait((u32
)read_cr2(), !user_mode(regs
));
279 exception_exit(prev_state
);
281 case KVM_PV_REASON_PAGE_READY
:
283 kvm_async_pf_task_wake((u32
)read_cr2());
288 NOKPROBE_SYMBOL(do_async_page_fault
);
290 static void __init
paravirt_ops_setup(void)
292 pv_info
.name
= "KVM";
294 if (kvm_para_has_feature(KVM_FEATURE_NOP_IO_DELAY
))
295 pv_cpu_ops
.io_delay
= kvm_io_delay
;
297 #ifdef CONFIG_X86_IO_APIC
302 static void kvm_register_steal_time(void)
304 int cpu
= smp_processor_id();
305 struct kvm_steal_time
*st
= &per_cpu(steal_time
, cpu
);
307 if (!has_steal_clock
)
310 wrmsrl(MSR_KVM_STEAL_TIME
, (slow_virt_to_phys(st
) | KVM_MSR_ENABLED
));
311 pr_info("kvm-stealtime: cpu %d, msr %llx\n",
312 cpu
, (unsigned long long) slow_virt_to_phys(st
));
315 static DEFINE_PER_CPU_DECRYPTED(unsigned long, kvm_apic_eoi
) = KVM_PV_EOI_DISABLED
;
317 static notrace
void kvm_guest_apic_eoi_write(u32 reg
, u32 val
)
320 * This relies on __test_and_clear_bit to modify the memory
321 * in a way that is atomic with respect to the local CPU.
322 * The hypervisor only accesses this memory from the local CPU so
323 * there's no need for lock or memory barriers.
324 * An optimization barrier is implied in apic write.
326 if (__test_and_clear_bit(KVM_PV_EOI_BIT
, this_cpu_ptr(&kvm_apic_eoi
)))
328 apic
->native_eoi_write(APIC_EOI
, APIC_EOI_ACK
);
331 static void kvm_guest_cpu_init(void)
333 if (!kvm_para_available())
336 if (kvm_para_has_feature(KVM_FEATURE_ASYNC_PF
) && kvmapf
) {
337 u64 pa
= slow_virt_to_phys(this_cpu_ptr(&apf_reason
));
339 #ifdef CONFIG_PREEMPT
340 pa
|= KVM_ASYNC_PF_SEND_ALWAYS
;
342 pa
|= KVM_ASYNC_PF_ENABLED
;
344 if (kvm_para_has_feature(KVM_FEATURE_ASYNC_PF_VMEXIT
))
345 pa
|= KVM_ASYNC_PF_DELIVERY_AS_PF_VMEXIT
;
347 wrmsrl(MSR_KVM_ASYNC_PF_EN
, pa
);
348 __this_cpu_write(apf_reason
.enabled
, 1);
349 printk(KERN_INFO
"KVM setup async PF for cpu %d\n",
353 if (kvm_para_has_feature(KVM_FEATURE_PV_EOI
)) {
355 /* Size alignment is implied but just to make it explicit. */
356 BUILD_BUG_ON(__alignof__(kvm_apic_eoi
) < 4);
357 __this_cpu_write(kvm_apic_eoi
, 0);
358 pa
= slow_virt_to_phys(this_cpu_ptr(&kvm_apic_eoi
))
360 wrmsrl(MSR_KVM_PV_EOI_EN
, pa
);
364 kvm_register_steal_time();
367 static void kvm_pv_disable_apf(void)
369 if (!__this_cpu_read(apf_reason
.enabled
))
372 wrmsrl(MSR_KVM_ASYNC_PF_EN
, 0);
373 __this_cpu_write(apf_reason
.enabled
, 0);
375 printk(KERN_INFO
"Unregister pv shared memory for cpu %d\n",
379 static void kvm_pv_guest_cpu_reboot(void *unused
)
382 * We disable PV EOI before we load a new kernel by kexec,
383 * since MSR_KVM_PV_EOI_EN stores a pointer into old kernel's memory.
384 * New kernel can re-enable when it boots.
386 if (kvm_para_has_feature(KVM_FEATURE_PV_EOI
))
387 wrmsrl(MSR_KVM_PV_EOI_EN
, 0);
388 kvm_pv_disable_apf();
389 kvm_disable_steal_time();
392 static int kvm_pv_reboot_notify(struct notifier_block
*nb
,
393 unsigned long code
, void *unused
)
395 if (code
== SYS_RESTART
)
396 on_each_cpu(kvm_pv_guest_cpu_reboot
, NULL
, 1);
400 static struct notifier_block kvm_pv_reboot_nb
= {
401 .notifier_call
= kvm_pv_reboot_notify
,
404 static u64
kvm_steal_clock(int cpu
)
407 struct kvm_steal_time
*src
;
410 src
= &per_cpu(steal_time
, cpu
);
412 version
= src
->version
;
416 } while ((version
& 1) || (version
!= src
->version
));
421 void kvm_disable_steal_time(void)
423 if (!has_steal_clock
)
426 wrmsr(MSR_KVM_STEAL_TIME
, 0, 0);
429 static inline void __set_percpu_decrypted(void *ptr
, unsigned long size
)
431 early_set_memory_decrypted((unsigned long) ptr
, size
);
435 * Iterate through all possible CPUs and map the memory region pointed
436 * by apf_reason, steal_time and kvm_apic_eoi as decrypted at once.
438 * Note: we iterate through all possible CPUs to ensure that CPUs
439 * hotplugged will have their per-cpu variable already mapped as
442 static void __init
sev_map_percpu_data(void)
449 for_each_possible_cpu(cpu
) {
450 __set_percpu_decrypted(&per_cpu(apf_reason
, cpu
), sizeof(apf_reason
));
451 __set_percpu_decrypted(&per_cpu(steal_time
, cpu
), sizeof(steal_time
));
452 __set_percpu_decrypted(&per_cpu(kvm_apic_eoi
, cpu
), sizeof(kvm_apic_eoi
));
457 static void __init
kvm_smp_prepare_boot_cpu(void)
460 * Map the per-cpu variables as decrypted before kvm_guest_cpu_init()
461 * shares the guest physical address with the hypervisor.
463 sev_map_percpu_data();
465 kvm_guest_cpu_init();
466 native_smp_prepare_boot_cpu();
470 static void kvm_guest_cpu_offline(void)
472 kvm_disable_steal_time();
473 if (kvm_para_has_feature(KVM_FEATURE_PV_EOI
))
474 wrmsrl(MSR_KVM_PV_EOI_EN
, 0);
475 kvm_pv_disable_apf();
479 static int kvm_cpu_online(unsigned int cpu
)
482 kvm_guest_cpu_init();
487 static int kvm_cpu_down_prepare(unsigned int cpu
)
490 kvm_guest_cpu_offline();
496 static void __init
kvm_apf_trap_init(void)
498 update_intr_gate(X86_TRAP_PF
, async_page_fault
);
501 static DEFINE_PER_CPU(cpumask_var_t
, __pv_tlb_mask
);
503 static void kvm_flush_tlb_others(const struct cpumask
*cpumask
,
504 const struct flush_tlb_info
*info
)
508 struct kvm_steal_time
*src
;
509 struct cpumask
*flushmask
= this_cpu_cpumask_var_ptr(__pv_tlb_mask
);
511 cpumask_copy(flushmask
, cpumask
);
513 * We have to call flush only on online vCPUs. And
514 * queue flush_on_enter for pre-empted vCPUs
516 for_each_cpu(cpu
, flushmask
) {
517 src
= &per_cpu(steal_time
, cpu
);
518 state
= READ_ONCE(src
->preempted
);
519 if ((state
& KVM_VCPU_PREEMPTED
)) {
520 if (try_cmpxchg(&src
->preempted
, &state
,
521 state
| KVM_VCPU_FLUSH_TLB
))
522 __cpumask_clear_cpu(cpu
, flushmask
);
526 native_flush_tlb_others(flushmask
, info
);
529 static void __init
kvm_guest_init(void)
533 if (!kvm_para_available())
536 paravirt_ops_setup();
537 register_reboot_notifier(&kvm_pv_reboot_nb
);
538 for (i
= 0; i
< KVM_TASK_SLEEP_HASHSIZE
; i
++)
539 raw_spin_lock_init(&async_pf_sleepers
[i
].lock
);
540 if (kvm_para_has_feature(KVM_FEATURE_ASYNC_PF
))
541 x86_init
.irqs
.trap_init
= kvm_apf_trap_init
;
543 if (kvm_para_has_feature(KVM_FEATURE_STEAL_TIME
)) {
545 pv_time_ops
.steal_clock
= kvm_steal_clock
;
548 if (kvm_para_has_feature(KVM_FEATURE_PV_TLB_FLUSH
) &&
549 !kvm_para_has_feature(KVM_FEATURE_STEAL_TIME
))
550 pv_mmu_ops
.flush_tlb_others
= kvm_flush_tlb_others
;
552 if (kvm_para_has_feature(KVM_FEATURE_PV_EOI
))
553 apic_set_eoi_write(kvm_guest_apic_eoi_write
);
555 if (kvmclock_vsyscall
)
556 kvm_setup_vsyscall_timeinfo();
559 smp_ops
.smp_prepare_boot_cpu
= kvm_smp_prepare_boot_cpu
;
560 if (cpuhp_setup_state_nocalls(CPUHP_AP_ONLINE_DYN
, "x86/kvm:online",
561 kvm_cpu_online
, kvm_cpu_down_prepare
) < 0)
562 pr_err("kvm_guest: Failed to install cpu hotplug callbacks\n");
564 sev_map_percpu_data();
565 kvm_guest_cpu_init();
569 * Hard lockup detection is enabled by default. Disable it, as guests
570 * can get false positives too easily, for example if the host is
573 hardlockup_detector_disable();
576 static noinline
uint32_t __kvm_cpuid_base(void)
578 if (boot_cpu_data
.cpuid_level
< 0)
579 return 0; /* So we don't blow up on old processors */
581 if (boot_cpu_has(X86_FEATURE_HYPERVISOR
))
582 return hypervisor_cpuid_base("KVMKVMKVM\0\0\0", 0);
587 static inline uint32_t kvm_cpuid_base(void)
589 static int kvm_cpuid_base
= -1;
591 if (kvm_cpuid_base
== -1)
592 kvm_cpuid_base
= __kvm_cpuid_base();
594 return kvm_cpuid_base
;
597 bool kvm_para_available(void)
599 return kvm_cpuid_base() != 0;
601 EXPORT_SYMBOL_GPL(kvm_para_available
);
603 unsigned int kvm_arch_para_features(void)
605 return cpuid_eax(kvm_cpuid_base() | KVM_CPUID_FEATURES
);
608 static uint32_t __init
kvm_detect(void)
610 return kvm_cpuid_base();
613 const __initconst
struct hypervisor_x86 x86_hyper_kvm
= {
615 .detect
= kvm_detect
,
616 .type
= X86_HYPER_KVM
,
617 .init
.guest_late_init
= kvm_guest_init
,
618 .init
.x2apic_available
= kvm_para_available
,
621 static __init
int activate_jump_labels(void)
623 if (has_steal_clock
) {
624 static_key_slow_inc(¶virt_steal_enabled
);
626 static_key_slow_inc(¶virt_steal_rq_enabled
);
631 arch_initcall(activate_jump_labels
);
633 static __init
int kvm_setup_pv_tlb_flush(void)
637 if (kvm_para_has_feature(KVM_FEATURE_PV_TLB_FLUSH
) &&
638 !kvm_para_has_feature(KVM_FEATURE_STEAL_TIME
)) {
639 for_each_possible_cpu(cpu
) {
640 zalloc_cpumask_var_node(per_cpu_ptr(&__pv_tlb_mask
, cpu
),
641 GFP_KERNEL
, cpu_to_node(cpu
));
643 pr_info("KVM setup pv remote TLB flush\n");
648 arch_initcall(kvm_setup_pv_tlb_flush
);
650 #ifdef CONFIG_PARAVIRT_SPINLOCKS
652 /* Kick a cpu by its apicid. Used to wake up a halted vcpu */
653 static void kvm_kick_cpu(int cpu
)
656 unsigned long flags
= 0;
658 apicid
= per_cpu(x86_cpu_to_apicid
, cpu
);
659 kvm_hypercall2(KVM_HC_KICK_CPU
, flags
, apicid
);
662 #include <asm/qspinlock.h>
664 static void kvm_wait(u8
*ptr
, u8 val
)
671 local_irq_save(flags
);
673 if (READ_ONCE(*ptr
) != val
)
677 * halt until it's our turn and kicked. Note that we do safe halt
678 * for irq enabled case to avoid hang when lock info is overwritten
679 * in irq spinlock slowpath and no spurious interrupt occur to save us.
681 if (arch_irqs_disabled_flags(flags
))
687 local_irq_restore(flags
);
691 __visible
bool __kvm_vcpu_is_preempted(long cpu
)
693 struct kvm_steal_time
*src
= &per_cpu(steal_time
, cpu
);
695 return !!(src
->preempted
& KVM_VCPU_PREEMPTED
);
697 PV_CALLEE_SAVE_REGS_THUNK(__kvm_vcpu_is_preempted
);
701 #include <asm/asm-offsets.h>
703 extern bool __raw_callee_save___kvm_vcpu_is_preempted(long);
706 * Hand-optimize version for x86-64 to avoid 8 64-bit register saving and
707 * restoring to/from the stack.
710 ".pushsection .text;"
711 ".global __raw_callee_save___kvm_vcpu_is_preempted;"
712 ".type __raw_callee_save___kvm_vcpu_is_preempted, @function;"
713 "__raw_callee_save___kvm_vcpu_is_preempted:"
714 "movq __per_cpu_offset(,%rdi,8), %rax;"
715 "cmpb $0, " __stringify(KVM_STEAL_TIME_preempted
) "+steal_time(%rax);"
723 * Setup pv_lock_ops to exploit KVM_FEATURE_PV_UNHALT if present.
725 void __init
kvm_spinlock_init(void)
727 if (!kvm_para_available())
729 /* Does host kernel support KVM_FEATURE_PV_UNHALT? */
730 if (!kvm_para_has_feature(KVM_FEATURE_PV_UNHALT
))
733 __pv_init_lock_hash();
734 pv_lock_ops
.queued_spin_lock_slowpath
= __pv_queued_spin_lock_slowpath
;
735 pv_lock_ops
.queued_spin_unlock
= PV_CALLEE_SAVE(__pv_queued_spin_unlock
);
736 pv_lock_ops
.wait
= kvm_wait
;
737 pv_lock_ops
.kick
= kvm_kick_cpu
;
739 if (kvm_para_has_feature(KVM_FEATURE_STEAL_TIME
)) {
740 pv_lock_ops
.vcpu_is_preempted
=
741 PV_CALLEE_SAVE(__kvm_vcpu_is_preempted
);
745 #endif /* CONFIG_PARAVIRT_SPINLOCKS */