1 // SPDX-License-Identifier: GPL-2.0-or-later
3 * KVM paravirt_ops implementation
5 * Copyright (C) 2007, Red Hat, Inc., Ingo Molnar <mingo@redhat.com>
6 * Copyright IBM Corporation, 2007
7 * Authors: Anthony Liguori <aliguori@us.ibm.com>
10 #include <linux/context_tracking.h>
11 #include <linux/init.h>
12 #include <linux/kernel.h>
13 #include <linux/kvm_para.h>
14 #include <linux/cpu.h>
16 #include <linux/highmem.h>
17 #include <linux/hardirq.h>
18 #include <linux/notifier.h>
19 #include <linux/reboot.h>
20 #include <linux/hash.h>
21 #include <linux/sched.h>
22 #include <linux/slab.h>
23 #include <linux/kprobes.h>
24 #include <linux/debugfs.h>
25 #include <linux/nmi.h>
26 #include <linux/swait.h>
27 #include <asm/timer.h>
29 #include <asm/traps.h>
31 #include <asm/tlbflush.h>
33 #include <asm/apicdef.h>
34 #include <asm/hypervisor.h>
36 #include <asm/cpuidle_haltpoll.h>
38 static int kvmapf
= 1;
40 static int __init
parse_no_kvmapf(char *arg
)
46 early_param("no-kvmapf", parse_no_kvmapf
);
48 static int steal_acc
= 1;
49 static int __init
parse_no_stealacc(char *arg
)
55 early_param("no-steal-acc", parse_no_stealacc
);
57 static DEFINE_PER_CPU_DECRYPTED(struct kvm_vcpu_pv_apf_data
, apf_reason
) __aligned(64);
58 DEFINE_PER_CPU_DECRYPTED(struct kvm_steal_time
, steal_time
) __aligned(64) __visible
;
59 static int has_steal_clock
= 0;
62 * No need for any "IO delay" on KVM
64 static void kvm_io_delay(void)
68 #define KVM_TASK_SLEEP_HASHBITS 8
69 #define KVM_TASK_SLEEP_HASHSIZE (1<<KVM_TASK_SLEEP_HASHBITS)
71 struct kvm_task_sleep_node
{
72 struct hlist_node link
;
73 struct swait_queue_head wq
;
79 static struct kvm_task_sleep_head
{
81 struct hlist_head list
;
82 } async_pf_sleepers
[KVM_TASK_SLEEP_HASHSIZE
];
84 static struct kvm_task_sleep_node
*_find_apf_task(struct kvm_task_sleep_head
*b
,
89 hlist_for_each(p
, &b
->list
) {
90 struct kvm_task_sleep_node
*n
=
91 hlist_entry(p
, typeof(*n
), link
);
92 if (n
->token
== token
)
100 * @interrupt_kernel: Is this called from a routine which interrupts the kernel
101 * (other than user space)?
103 void kvm_async_pf_task_wait(u32 token
, int interrupt_kernel
)
105 u32 key
= hash_32(token
, KVM_TASK_SLEEP_HASHBITS
);
106 struct kvm_task_sleep_head
*b
= &async_pf_sleepers
[key
];
107 struct kvm_task_sleep_node n
, *e
;
108 DECLARE_SWAITQUEUE(wait
);
112 raw_spin_lock(&b
->lock
);
113 e
= _find_apf_task(b
, token
);
115 /* dummy entry exist -> wake up was delivered ahead of PF */
118 raw_spin_unlock(&b
->lock
);
125 n
.cpu
= smp_processor_id();
126 n
.halted
= is_idle_task(current
) ||
127 (IS_ENABLED(CONFIG_PREEMPT_COUNT
)
128 ? preempt_count() > 1 || rcu_preempt_depth()
130 init_swait_queue_head(&n
.wq
);
131 hlist_add_head(&n
.link
, &b
->list
);
132 raw_spin_unlock(&b
->lock
);
136 prepare_to_swait_exclusive(&n
.wq
, &wait
, TASK_UNINTERRUPTIBLE
);
137 if (hlist_unhashed(&n
.link
))
148 * We cannot reschedule. So halt.
157 finish_swait(&n
.wq
, &wait
);
162 EXPORT_SYMBOL_GPL(kvm_async_pf_task_wait
);
164 static void apf_task_wake_one(struct kvm_task_sleep_node
*n
)
166 hlist_del_init(&n
->link
);
168 smp_send_reschedule(n
->cpu
);
169 else if (swq_has_sleeper(&n
->wq
))
170 swake_up_one(&n
->wq
);
173 static void apf_task_wake_all(void)
177 for (i
= 0; i
< KVM_TASK_SLEEP_HASHSIZE
; i
++) {
178 struct hlist_node
*p
, *next
;
179 struct kvm_task_sleep_head
*b
= &async_pf_sleepers
[i
];
180 raw_spin_lock(&b
->lock
);
181 hlist_for_each_safe(p
, next
, &b
->list
) {
182 struct kvm_task_sleep_node
*n
=
183 hlist_entry(p
, typeof(*n
), link
);
184 if (n
->cpu
== smp_processor_id())
185 apf_task_wake_one(n
);
187 raw_spin_unlock(&b
->lock
);
191 void kvm_async_pf_task_wake(u32 token
)
193 u32 key
= hash_32(token
, KVM_TASK_SLEEP_HASHBITS
);
194 struct kvm_task_sleep_head
*b
= &async_pf_sleepers
[key
];
195 struct kvm_task_sleep_node
*n
;
203 raw_spin_lock(&b
->lock
);
204 n
= _find_apf_task(b
, token
);
207 * async PF was not yet handled.
208 * Add dummy entry for the token.
210 n
= kzalloc(sizeof(*n
), GFP_ATOMIC
);
213 * Allocation failed! Busy wait while other cpu
216 raw_spin_unlock(&b
->lock
);
221 n
->cpu
= smp_processor_id();
222 init_swait_queue_head(&n
->wq
);
223 hlist_add_head(&n
->link
, &b
->list
);
225 apf_task_wake_one(n
);
226 raw_spin_unlock(&b
->lock
);
229 EXPORT_SYMBOL_GPL(kvm_async_pf_task_wake
);
231 u32
kvm_read_and_reset_pf_reason(void)
235 if (__this_cpu_read(apf_reason
.enabled
)) {
236 reason
= __this_cpu_read(apf_reason
.reason
);
237 __this_cpu_write(apf_reason
.reason
, 0);
242 EXPORT_SYMBOL_GPL(kvm_read_and_reset_pf_reason
);
243 NOKPROBE_SYMBOL(kvm_read_and_reset_pf_reason
);
246 do_async_page_fault(struct pt_regs
*regs
, unsigned long error_code
, unsigned long address
)
248 switch (kvm_read_and_reset_pf_reason()) {
250 do_page_fault(regs
, error_code
, address
);
252 case KVM_PV_REASON_PAGE_NOT_PRESENT
:
253 /* page is swapped out by the host. */
254 kvm_async_pf_task_wait((u32
)address
, !user_mode(regs
));
256 case KVM_PV_REASON_PAGE_READY
:
258 kvm_async_pf_task_wake((u32
)address
);
263 NOKPROBE_SYMBOL(do_async_page_fault
);
265 static void __init
paravirt_ops_setup(void)
267 pv_info
.name
= "KVM";
269 if (kvm_para_has_feature(KVM_FEATURE_NOP_IO_DELAY
))
270 pv_ops
.cpu
.io_delay
= kvm_io_delay
;
272 #ifdef CONFIG_X86_IO_APIC
277 static void kvm_register_steal_time(void)
279 int cpu
= smp_processor_id();
280 struct kvm_steal_time
*st
= &per_cpu(steal_time
, cpu
);
282 if (!has_steal_clock
)
285 wrmsrl(MSR_KVM_STEAL_TIME
, (slow_virt_to_phys(st
) | KVM_MSR_ENABLED
));
286 pr_info("kvm-stealtime: cpu %d, msr %llx\n",
287 cpu
, (unsigned long long) slow_virt_to_phys(st
));
290 static DEFINE_PER_CPU_DECRYPTED(unsigned long, kvm_apic_eoi
) = KVM_PV_EOI_DISABLED
;
292 static notrace
void kvm_guest_apic_eoi_write(u32 reg
, u32 val
)
295 * This relies on __test_and_clear_bit to modify the memory
296 * in a way that is atomic with respect to the local CPU.
297 * The hypervisor only accesses this memory from the local CPU so
298 * there's no need for lock or memory barriers.
299 * An optimization barrier is implied in apic write.
301 if (__test_and_clear_bit(KVM_PV_EOI_BIT
, this_cpu_ptr(&kvm_apic_eoi
)))
303 apic
->native_eoi_write(APIC_EOI
, APIC_EOI_ACK
);
306 static void kvm_guest_cpu_init(void)
308 if (kvm_para_has_feature(KVM_FEATURE_ASYNC_PF
) && kvmapf
) {
309 u64 pa
= slow_virt_to_phys(this_cpu_ptr(&apf_reason
));
311 #ifdef CONFIG_PREEMPTION
312 pa
|= KVM_ASYNC_PF_SEND_ALWAYS
;
314 pa
|= KVM_ASYNC_PF_ENABLED
;
316 if (kvm_para_has_feature(KVM_FEATURE_ASYNC_PF_VMEXIT
))
317 pa
|= KVM_ASYNC_PF_DELIVERY_AS_PF_VMEXIT
;
319 wrmsrl(MSR_KVM_ASYNC_PF_EN
, pa
);
320 __this_cpu_write(apf_reason
.enabled
, 1);
321 printk(KERN_INFO
"KVM setup async PF for cpu %d\n",
325 if (kvm_para_has_feature(KVM_FEATURE_PV_EOI
)) {
327 /* Size alignment is implied but just to make it explicit. */
328 BUILD_BUG_ON(__alignof__(kvm_apic_eoi
) < 4);
329 __this_cpu_write(kvm_apic_eoi
, 0);
330 pa
= slow_virt_to_phys(this_cpu_ptr(&kvm_apic_eoi
))
332 wrmsrl(MSR_KVM_PV_EOI_EN
, pa
);
336 kvm_register_steal_time();
339 static void kvm_pv_disable_apf(void)
341 if (!__this_cpu_read(apf_reason
.enabled
))
344 wrmsrl(MSR_KVM_ASYNC_PF_EN
, 0);
345 __this_cpu_write(apf_reason
.enabled
, 0);
347 printk(KERN_INFO
"Unregister pv shared memory for cpu %d\n",
351 static void kvm_pv_guest_cpu_reboot(void *unused
)
354 * We disable PV EOI before we load a new kernel by kexec,
355 * since MSR_KVM_PV_EOI_EN stores a pointer into old kernel's memory.
356 * New kernel can re-enable when it boots.
358 if (kvm_para_has_feature(KVM_FEATURE_PV_EOI
))
359 wrmsrl(MSR_KVM_PV_EOI_EN
, 0);
360 kvm_pv_disable_apf();
361 kvm_disable_steal_time();
364 static int kvm_pv_reboot_notify(struct notifier_block
*nb
,
365 unsigned long code
, void *unused
)
367 if (code
== SYS_RESTART
)
368 on_each_cpu(kvm_pv_guest_cpu_reboot
, NULL
, 1);
372 static struct notifier_block kvm_pv_reboot_nb
= {
373 .notifier_call
= kvm_pv_reboot_notify
,
376 static u64
kvm_steal_clock(int cpu
)
379 struct kvm_steal_time
*src
;
382 src
= &per_cpu(steal_time
, cpu
);
384 version
= src
->version
;
388 } while ((version
& 1) || (version
!= src
->version
));
393 void kvm_disable_steal_time(void)
395 if (!has_steal_clock
)
398 wrmsr(MSR_KVM_STEAL_TIME
, 0, 0);
401 static inline void __set_percpu_decrypted(void *ptr
, unsigned long size
)
403 early_set_memory_decrypted((unsigned long) ptr
, size
);
407 * Iterate through all possible CPUs and map the memory region pointed
408 * by apf_reason, steal_time and kvm_apic_eoi as decrypted at once.
410 * Note: we iterate through all possible CPUs to ensure that CPUs
411 * hotplugged will have their per-cpu variable already mapped as
414 static void __init
sev_map_percpu_data(void)
421 for_each_possible_cpu(cpu
) {
422 __set_percpu_decrypted(&per_cpu(apf_reason
, cpu
), sizeof(apf_reason
));
423 __set_percpu_decrypted(&per_cpu(steal_time
, cpu
), sizeof(steal_time
));
424 __set_percpu_decrypted(&per_cpu(kvm_apic_eoi
, cpu
), sizeof(kvm_apic_eoi
));
428 static bool pv_tlb_flush_supported(void)
430 return (kvm_para_has_feature(KVM_FEATURE_PV_TLB_FLUSH
) &&
431 !kvm_para_has_hint(KVM_HINTS_REALTIME
) &&
432 kvm_para_has_feature(KVM_FEATURE_STEAL_TIME
));
435 static DEFINE_PER_CPU(cpumask_var_t
, __pv_cpu_mask
);
439 static bool pv_ipi_supported(void)
441 return kvm_para_has_feature(KVM_FEATURE_PV_SEND_IPI
);
444 static bool pv_sched_yield_supported(void)
446 return (kvm_para_has_feature(KVM_FEATURE_PV_SCHED_YIELD
) &&
447 !kvm_para_has_hint(KVM_HINTS_REALTIME
) &&
448 kvm_para_has_feature(KVM_FEATURE_STEAL_TIME
));
451 #define KVM_IPI_CLUSTER_SIZE (2 * BITS_PER_LONG)
453 static void __send_ipi_mask(const struct cpumask
*mask
, int vector
)
456 int cpu
, apic_id
, icr
;
457 int min
= 0, max
= 0;
459 __uint128_t ipi_bitmap
= 0;
465 if (cpumask_empty(mask
))
468 local_irq_save(flags
);
472 icr
= APIC_DM_FIXED
| vector
;
479 for_each_cpu(cpu
, mask
) {
480 apic_id
= per_cpu(x86_cpu_to_apicid
, cpu
);
483 } else if (apic_id
< min
&& max
- apic_id
< KVM_IPI_CLUSTER_SIZE
) {
484 ipi_bitmap
<<= min
- apic_id
;
486 } else if (apic_id
< min
+ KVM_IPI_CLUSTER_SIZE
) {
487 max
= apic_id
< max
? max
: apic_id
;
489 ret
= kvm_hypercall4(KVM_HC_SEND_IPI
, (unsigned long)ipi_bitmap
,
490 (unsigned long)(ipi_bitmap
>> BITS_PER_LONG
), min
, icr
);
491 WARN_ONCE(ret
< 0, "KVM: failed to send PV IPI: %ld", ret
);
495 __set_bit(apic_id
- min
, (unsigned long *)&ipi_bitmap
);
499 ret
= kvm_hypercall4(KVM_HC_SEND_IPI
, (unsigned long)ipi_bitmap
,
500 (unsigned long)(ipi_bitmap
>> BITS_PER_LONG
), min
, icr
);
501 WARN_ONCE(ret
< 0, "KVM: failed to send PV IPI: %ld", ret
);
504 local_irq_restore(flags
);
507 static void kvm_send_ipi_mask(const struct cpumask
*mask
, int vector
)
509 __send_ipi_mask(mask
, vector
);
512 static void kvm_send_ipi_mask_allbutself(const struct cpumask
*mask
, int vector
)
514 unsigned int this_cpu
= smp_processor_id();
515 struct cpumask
*new_mask
= this_cpu_cpumask_var_ptr(__pv_cpu_mask
);
516 const struct cpumask
*local_mask
;
518 cpumask_copy(new_mask
, mask
);
519 cpumask_clear_cpu(this_cpu
, new_mask
);
520 local_mask
= new_mask
;
521 __send_ipi_mask(local_mask
, vector
);
525 * Set the IPI entry points
527 static void kvm_setup_pv_ipi(void)
529 apic
->send_IPI_mask
= kvm_send_ipi_mask
;
530 apic
->send_IPI_mask_allbutself
= kvm_send_ipi_mask_allbutself
;
531 pr_info("KVM setup pv IPIs\n");
534 static void kvm_smp_send_call_func_ipi(const struct cpumask
*mask
)
538 native_send_call_func_ipi(mask
);
540 /* Make sure other vCPUs get a chance to run if they need to. */
541 for_each_cpu(cpu
, mask
) {
542 if (vcpu_is_preempted(cpu
)) {
543 kvm_hypercall1(KVM_HC_SCHED_YIELD
, per_cpu(x86_cpu_to_apicid
, cpu
));
549 static void __init
kvm_smp_prepare_cpus(unsigned int max_cpus
)
551 native_smp_prepare_cpus(max_cpus
);
552 if (kvm_para_has_hint(KVM_HINTS_REALTIME
))
553 static_branch_disable(&virt_spin_lock_key
);
556 static void __init
kvm_smp_prepare_boot_cpu(void)
559 * Map the per-cpu variables as decrypted before kvm_guest_cpu_init()
560 * shares the guest physical address with the hypervisor.
562 sev_map_percpu_data();
564 kvm_guest_cpu_init();
565 native_smp_prepare_boot_cpu();
569 static void kvm_guest_cpu_offline(void)
571 kvm_disable_steal_time();
572 if (kvm_para_has_feature(KVM_FEATURE_PV_EOI
))
573 wrmsrl(MSR_KVM_PV_EOI_EN
, 0);
574 kvm_pv_disable_apf();
578 static int kvm_cpu_online(unsigned int cpu
)
581 kvm_guest_cpu_init();
586 static int kvm_cpu_down_prepare(unsigned int cpu
)
589 kvm_guest_cpu_offline();
595 static void __init
kvm_apf_trap_init(void)
597 update_intr_gate(X86_TRAP_PF
, async_page_fault
);
601 static void kvm_flush_tlb_others(const struct cpumask
*cpumask
,
602 const struct flush_tlb_info
*info
)
606 struct kvm_steal_time
*src
;
607 struct cpumask
*flushmask
= this_cpu_cpumask_var_ptr(__pv_cpu_mask
);
609 cpumask_copy(flushmask
, cpumask
);
611 * We have to call flush only on online vCPUs. And
612 * queue flush_on_enter for pre-empted vCPUs
614 for_each_cpu(cpu
, flushmask
) {
615 src
= &per_cpu(steal_time
, cpu
);
616 state
= READ_ONCE(src
->preempted
);
617 if ((state
& KVM_VCPU_PREEMPTED
)) {
618 if (try_cmpxchg(&src
->preempted
, &state
,
619 state
| KVM_VCPU_FLUSH_TLB
))
620 __cpumask_clear_cpu(cpu
, flushmask
);
624 native_flush_tlb_others(flushmask
, info
);
627 static void __init
kvm_guest_init(void)
631 paravirt_ops_setup();
632 register_reboot_notifier(&kvm_pv_reboot_nb
);
633 for (i
= 0; i
< KVM_TASK_SLEEP_HASHSIZE
; i
++)
634 raw_spin_lock_init(&async_pf_sleepers
[i
].lock
);
635 if (kvm_para_has_feature(KVM_FEATURE_ASYNC_PF
))
636 x86_init
.irqs
.trap_init
= kvm_apf_trap_init
;
638 if (kvm_para_has_feature(KVM_FEATURE_STEAL_TIME
)) {
640 pv_ops
.time
.steal_clock
= kvm_steal_clock
;
643 if (pv_tlb_flush_supported()) {
644 pv_ops
.mmu
.flush_tlb_others
= kvm_flush_tlb_others
;
645 pv_ops
.mmu
.tlb_remove_table
= tlb_remove_table
;
646 pr_info("KVM setup pv remote TLB flush\n");
649 if (kvm_para_has_feature(KVM_FEATURE_PV_EOI
))
650 apic_set_eoi_write(kvm_guest_apic_eoi_write
);
653 smp_ops
.smp_prepare_cpus
= kvm_smp_prepare_cpus
;
654 smp_ops
.smp_prepare_boot_cpu
= kvm_smp_prepare_boot_cpu
;
655 if (pv_sched_yield_supported()) {
656 smp_ops
.send_call_func_ipi
= kvm_smp_send_call_func_ipi
;
657 pr_info("KVM setup pv sched yield\n");
659 if (cpuhp_setup_state_nocalls(CPUHP_AP_ONLINE_DYN
, "x86/kvm:online",
660 kvm_cpu_online
, kvm_cpu_down_prepare
) < 0)
661 pr_err("kvm_guest: Failed to install cpu hotplug callbacks\n");
663 sev_map_percpu_data();
664 kvm_guest_cpu_init();
668 * Hard lockup detection is enabled by default. Disable it, as guests
669 * can get false positives too easily, for example if the host is
672 hardlockup_detector_disable();
675 static noinline
uint32_t __kvm_cpuid_base(void)
677 if (boot_cpu_data
.cpuid_level
< 0)
678 return 0; /* So we don't blow up on old processors */
680 if (boot_cpu_has(X86_FEATURE_HYPERVISOR
))
681 return hypervisor_cpuid_base("KVMKVMKVM\0\0\0", 0);
686 static inline uint32_t kvm_cpuid_base(void)
688 static int kvm_cpuid_base
= -1;
690 if (kvm_cpuid_base
== -1)
691 kvm_cpuid_base
= __kvm_cpuid_base();
693 return kvm_cpuid_base
;
696 bool kvm_para_available(void)
698 return kvm_cpuid_base() != 0;
700 EXPORT_SYMBOL_GPL(kvm_para_available
);
702 unsigned int kvm_arch_para_features(void)
704 return cpuid_eax(kvm_cpuid_base() | KVM_CPUID_FEATURES
);
707 unsigned int kvm_arch_para_hints(void)
709 return cpuid_edx(kvm_cpuid_base() | KVM_CPUID_FEATURES
);
711 EXPORT_SYMBOL_GPL(kvm_arch_para_hints
);
713 static uint32_t __init
kvm_detect(void)
715 return kvm_cpuid_base();
718 static void __init
kvm_apic_init(void)
720 #if defined(CONFIG_SMP)
721 if (pv_ipi_supported())
726 static void __init
kvm_init_platform(void)
729 x86_platform
.apic_post_init
= kvm_apic_init
;
732 const __initconst
struct hypervisor_x86 x86_hyper_kvm
= {
734 .detect
= kvm_detect
,
735 .type
= X86_HYPER_KVM
,
736 .init
.guest_late_init
= kvm_guest_init
,
737 .init
.x2apic_available
= kvm_para_available
,
738 .init
.init_platform
= kvm_init_platform
,
741 static __init
int activate_jump_labels(void)
743 if (has_steal_clock
) {
744 static_key_slow_inc(¶virt_steal_enabled
);
746 static_key_slow_inc(¶virt_steal_rq_enabled
);
751 arch_initcall(activate_jump_labels
);
753 static __init
int kvm_alloc_cpumask(void)
758 if (!kvm_para_available() || nopv
)
761 if (pv_tlb_flush_supported())
764 #if defined(CONFIG_SMP)
765 if (pv_ipi_supported())
770 for_each_possible_cpu(cpu
) {
771 zalloc_cpumask_var_node(per_cpu_ptr(&__pv_cpu_mask
, cpu
),
772 GFP_KERNEL
, cpu_to_node(cpu
));
777 arch_initcall(kvm_alloc_cpumask
);
779 #ifdef CONFIG_PARAVIRT_SPINLOCKS
781 /* Kick a cpu by its apicid. Used to wake up a halted vcpu */
782 static void kvm_kick_cpu(int cpu
)
785 unsigned long flags
= 0;
787 apicid
= per_cpu(x86_cpu_to_apicid
, cpu
);
788 kvm_hypercall2(KVM_HC_KICK_CPU
, flags
, apicid
);
791 #include <asm/qspinlock.h>
793 static void kvm_wait(u8
*ptr
, u8 val
)
800 local_irq_save(flags
);
802 if (READ_ONCE(*ptr
) != val
)
806 * halt until it's our turn and kicked. Note that we do safe halt
807 * for irq enabled case to avoid hang when lock info is overwritten
808 * in irq spinlock slowpath and no spurious interrupt occur to save us.
810 if (arch_irqs_disabled_flags(flags
))
816 local_irq_restore(flags
);
820 __visible
bool __kvm_vcpu_is_preempted(long cpu
)
822 struct kvm_steal_time
*src
= &per_cpu(steal_time
, cpu
);
824 return !!(src
->preempted
& KVM_VCPU_PREEMPTED
);
826 PV_CALLEE_SAVE_REGS_THUNK(__kvm_vcpu_is_preempted
);
830 #include <asm/asm-offsets.h>
832 extern bool __raw_callee_save___kvm_vcpu_is_preempted(long);
835 * Hand-optimize version for x86-64 to avoid 8 64-bit register saving and
836 * restoring to/from the stack.
839 ".pushsection .text;"
840 ".global __raw_callee_save___kvm_vcpu_is_preempted;"
841 ".type __raw_callee_save___kvm_vcpu_is_preempted, @function;"
842 "__raw_callee_save___kvm_vcpu_is_preempted:"
843 "movq __per_cpu_offset(,%rdi,8), %rax;"
844 "cmpb $0, " __stringify(KVM_STEAL_TIME_preempted
) "+steal_time(%rax);"
847 ".size __raw_callee_save___kvm_vcpu_is_preempted, .-__raw_callee_save___kvm_vcpu_is_preempted;"
853 * Setup pv_lock_ops to exploit KVM_FEATURE_PV_UNHALT if present.
855 void __init
kvm_spinlock_init(void)
857 /* Does host kernel support KVM_FEATURE_PV_UNHALT? */
858 if (!kvm_para_has_feature(KVM_FEATURE_PV_UNHALT
))
861 if (kvm_para_has_hint(KVM_HINTS_REALTIME
))
864 /* Don't use the pvqspinlock code if there is only 1 vCPU. */
865 if (num_possible_cpus() == 1)
868 __pv_init_lock_hash();
869 pv_ops
.lock
.queued_spin_lock_slowpath
= __pv_queued_spin_lock_slowpath
;
870 pv_ops
.lock
.queued_spin_unlock
=
871 PV_CALLEE_SAVE(__pv_queued_spin_unlock
);
872 pv_ops
.lock
.wait
= kvm_wait
;
873 pv_ops
.lock
.kick
= kvm_kick_cpu
;
875 if (kvm_para_has_feature(KVM_FEATURE_STEAL_TIME
)) {
876 pv_ops
.lock
.vcpu_is_preempted
=
877 PV_CALLEE_SAVE(__kvm_vcpu_is_preempted
);
881 #endif /* CONFIG_PARAVIRT_SPINLOCKS */
883 #ifdef CONFIG_ARCH_CPUIDLE_HALTPOLL
885 static void kvm_disable_host_haltpoll(void *i
)
887 wrmsrl(MSR_KVM_POLL_CONTROL
, 0);
890 static void kvm_enable_host_haltpoll(void *i
)
892 wrmsrl(MSR_KVM_POLL_CONTROL
, 1);
895 void arch_haltpoll_enable(unsigned int cpu
)
897 if (!kvm_para_has_feature(KVM_FEATURE_POLL_CONTROL
)) {
898 pr_err_once("kvm: host does not support poll control\n");
899 pr_err_once("kvm: host upgrade recommended\n");
903 /* Enable guest halt poll disables host halt poll */
904 smp_call_function_single(cpu
, kvm_disable_host_haltpoll
, NULL
, 1);
906 EXPORT_SYMBOL_GPL(arch_haltpoll_enable
);
908 void arch_haltpoll_disable(unsigned int cpu
)
910 if (!kvm_para_has_feature(KVM_FEATURE_POLL_CONTROL
))
913 /* Enable guest halt poll disables host halt poll */
914 smp_call_function_single(cpu
, kvm_enable_host_haltpoll
, NULL
, 1);
916 EXPORT_SYMBOL_GPL(arch_haltpoll_disable
);