treewide: remove redundant IS_ERR() before error code check
[linux/fpc-iii.git] / arch / x86 / kernel / kvm.c
blob81045aabb6f4595d5eaec7897cd9abf5c181315d
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3 * KVM paravirt_ops implementation
5 * Copyright (C) 2007, Red Hat, Inc., Ingo Molnar <mingo@redhat.com>
6 * Copyright IBM Corporation, 2007
7 * Authors: Anthony Liguori <aliguori@us.ibm.com>
8 */
10 #include <linux/context_tracking.h>
11 #include <linux/init.h>
12 #include <linux/kernel.h>
13 #include <linux/kvm_para.h>
14 #include <linux/cpu.h>
15 #include <linux/mm.h>
16 #include <linux/highmem.h>
17 #include <linux/hardirq.h>
18 #include <linux/notifier.h>
19 #include <linux/reboot.h>
20 #include <linux/hash.h>
21 #include <linux/sched.h>
22 #include <linux/slab.h>
23 #include <linux/kprobes.h>
24 #include <linux/debugfs.h>
25 #include <linux/nmi.h>
26 #include <linux/swait.h>
27 #include <asm/timer.h>
28 #include <asm/cpu.h>
29 #include <asm/traps.h>
30 #include <asm/desc.h>
31 #include <asm/tlbflush.h>
32 #include <asm/apic.h>
33 #include <asm/apicdef.h>
34 #include <asm/hypervisor.h>
35 #include <asm/tlb.h>
36 #include <asm/cpuidle_haltpoll.h>
38 static int kvmapf = 1;
40 static int __init parse_no_kvmapf(char *arg)
42 kvmapf = 0;
43 return 0;
46 early_param("no-kvmapf", parse_no_kvmapf);
48 static int steal_acc = 1;
49 static int __init parse_no_stealacc(char *arg)
51 steal_acc = 0;
52 return 0;
55 early_param("no-steal-acc", parse_no_stealacc);
57 static DEFINE_PER_CPU_DECRYPTED(struct kvm_vcpu_pv_apf_data, apf_reason) __aligned(64);
58 DEFINE_PER_CPU_DECRYPTED(struct kvm_steal_time, steal_time) __aligned(64) __visible;
59 static int has_steal_clock = 0;
62 * No need for any "IO delay" on KVM
64 static void kvm_io_delay(void)
68 #define KVM_TASK_SLEEP_HASHBITS 8
69 #define KVM_TASK_SLEEP_HASHSIZE (1<<KVM_TASK_SLEEP_HASHBITS)
71 struct kvm_task_sleep_node {
72 struct hlist_node link;
73 struct swait_queue_head wq;
74 u32 token;
75 int cpu;
76 bool halted;
79 static struct kvm_task_sleep_head {
80 raw_spinlock_t lock;
81 struct hlist_head list;
82 } async_pf_sleepers[KVM_TASK_SLEEP_HASHSIZE];
84 static struct kvm_task_sleep_node *_find_apf_task(struct kvm_task_sleep_head *b,
85 u32 token)
87 struct hlist_node *p;
89 hlist_for_each(p, &b->list) {
90 struct kvm_task_sleep_node *n =
91 hlist_entry(p, typeof(*n), link);
92 if (n->token == token)
93 return n;
96 return NULL;
100 * @interrupt_kernel: Is this called from a routine which interrupts the kernel
101 * (other than user space)?
103 void kvm_async_pf_task_wait(u32 token, int interrupt_kernel)
105 u32 key = hash_32(token, KVM_TASK_SLEEP_HASHBITS);
106 struct kvm_task_sleep_head *b = &async_pf_sleepers[key];
107 struct kvm_task_sleep_node n, *e;
108 DECLARE_SWAITQUEUE(wait);
110 rcu_irq_enter();
112 raw_spin_lock(&b->lock);
113 e = _find_apf_task(b, token);
114 if (e) {
115 /* dummy entry exist -> wake up was delivered ahead of PF */
116 hlist_del(&e->link);
117 kfree(e);
118 raw_spin_unlock(&b->lock);
120 rcu_irq_exit();
121 return;
124 n.token = token;
125 n.cpu = smp_processor_id();
126 n.halted = is_idle_task(current) ||
127 (IS_ENABLED(CONFIG_PREEMPT_COUNT)
128 ? preempt_count() > 1 || rcu_preempt_depth()
129 : interrupt_kernel);
130 init_swait_queue_head(&n.wq);
131 hlist_add_head(&n.link, &b->list);
132 raw_spin_unlock(&b->lock);
134 for (;;) {
135 if (!n.halted)
136 prepare_to_swait_exclusive(&n.wq, &wait, TASK_UNINTERRUPTIBLE);
137 if (hlist_unhashed(&n.link))
138 break;
140 rcu_irq_exit();
142 if (!n.halted) {
143 local_irq_enable();
144 schedule();
145 local_irq_disable();
146 } else {
148 * We cannot reschedule. So halt.
150 native_safe_halt();
151 local_irq_disable();
154 rcu_irq_enter();
156 if (!n.halted)
157 finish_swait(&n.wq, &wait);
159 rcu_irq_exit();
160 return;
162 EXPORT_SYMBOL_GPL(kvm_async_pf_task_wait);
164 static void apf_task_wake_one(struct kvm_task_sleep_node *n)
166 hlist_del_init(&n->link);
167 if (n->halted)
168 smp_send_reschedule(n->cpu);
169 else if (swq_has_sleeper(&n->wq))
170 swake_up_one(&n->wq);
173 static void apf_task_wake_all(void)
175 int i;
177 for (i = 0; i < KVM_TASK_SLEEP_HASHSIZE; i++) {
178 struct hlist_node *p, *next;
179 struct kvm_task_sleep_head *b = &async_pf_sleepers[i];
180 raw_spin_lock(&b->lock);
181 hlist_for_each_safe(p, next, &b->list) {
182 struct kvm_task_sleep_node *n =
183 hlist_entry(p, typeof(*n), link);
184 if (n->cpu == smp_processor_id())
185 apf_task_wake_one(n);
187 raw_spin_unlock(&b->lock);
191 void kvm_async_pf_task_wake(u32 token)
193 u32 key = hash_32(token, KVM_TASK_SLEEP_HASHBITS);
194 struct kvm_task_sleep_head *b = &async_pf_sleepers[key];
195 struct kvm_task_sleep_node *n;
197 if (token == ~0) {
198 apf_task_wake_all();
199 return;
202 again:
203 raw_spin_lock(&b->lock);
204 n = _find_apf_task(b, token);
205 if (!n) {
207 * async PF was not yet handled.
208 * Add dummy entry for the token.
210 n = kzalloc(sizeof(*n), GFP_ATOMIC);
211 if (!n) {
213 * Allocation failed! Busy wait while other cpu
214 * handles async PF.
216 raw_spin_unlock(&b->lock);
217 cpu_relax();
218 goto again;
220 n->token = token;
221 n->cpu = smp_processor_id();
222 init_swait_queue_head(&n->wq);
223 hlist_add_head(&n->link, &b->list);
224 } else
225 apf_task_wake_one(n);
226 raw_spin_unlock(&b->lock);
227 return;
229 EXPORT_SYMBOL_GPL(kvm_async_pf_task_wake);
231 u32 kvm_read_and_reset_pf_reason(void)
233 u32 reason = 0;
235 if (__this_cpu_read(apf_reason.enabled)) {
236 reason = __this_cpu_read(apf_reason.reason);
237 __this_cpu_write(apf_reason.reason, 0);
240 return reason;
242 EXPORT_SYMBOL_GPL(kvm_read_and_reset_pf_reason);
243 NOKPROBE_SYMBOL(kvm_read_and_reset_pf_reason);
245 dotraplinkage void
246 do_async_page_fault(struct pt_regs *regs, unsigned long error_code, unsigned long address)
248 switch (kvm_read_and_reset_pf_reason()) {
249 default:
250 do_page_fault(regs, error_code, address);
251 break;
252 case KVM_PV_REASON_PAGE_NOT_PRESENT:
253 /* page is swapped out by the host. */
254 kvm_async_pf_task_wait((u32)address, !user_mode(regs));
255 break;
256 case KVM_PV_REASON_PAGE_READY:
257 rcu_irq_enter();
258 kvm_async_pf_task_wake((u32)address);
259 rcu_irq_exit();
260 break;
263 NOKPROBE_SYMBOL(do_async_page_fault);
265 static void __init paravirt_ops_setup(void)
267 pv_info.name = "KVM";
269 if (kvm_para_has_feature(KVM_FEATURE_NOP_IO_DELAY))
270 pv_ops.cpu.io_delay = kvm_io_delay;
272 #ifdef CONFIG_X86_IO_APIC
273 no_timer_check = 1;
274 #endif
277 static void kvm_register_steal_time(void)
279 int cpu = smp_processor_id();
280 struct kvm_steal_time *st = &per_cpu(steal_time, cpu);
282 if (!has_steal_clock)
283 return;
285 wrmsrl(MSR_KVM_STEAL_TIME, (slow_virt_to_phys(st) | KVM_MSR_ENABLED));
286 pr_info("kvm-stealtime: cpu %d, msr %llx\n",
287 cpu, (unsigned long long) slow_virt_to_phys(st));
290 static DEFINE_PER_CPU_DECRYPTED(unsigned long, kvm_apic_eoi) = KVM_PV_EOI_DISABLED;
292 static notrace void kvm_guest_apic_eoi_write(u32 reg, u32 val)
295 * This relies on __test_and_clear_bit to modify the memory
296 * in a way that is atomic with respect to the local CPU.
297 * The hypervisor only accesses this memory from the local CPU so
298 * there's no need for lock or memory barriers.
299 * An optimization barrier is implied in apic write.
301 if (__test_and_clear_bit(KVM_PV_EOI_BIT, this_cpu_ptr(&kvm_apic_eoi)))
302 return;
303 apic->native_eoi_write(APIC_EOI, APIC_EOI_ACK);
306 static void kvm_guest_cpu_init(void)
308 if (kvm_para_has_feature(KVM_FEATURE_ASYNC_PF) && kvmapf) {
309 u64 pa = slow_virt_to_phys(this_cpu_ptr(&apf_reason));
311 #ifdef CONFIG_PREEMPTION
312 pa |= KVM_ASYNC_PF_SEND_ALWAYS;
313 #endif
314 pa |= KVM_ASYNC_PF_ENABLED;
316 if (kvm_para_has_feature(KVM_FEATURE_ASYNC_PF_VMEXIT))
317 pa |= KVM_ASYNC_PF_DELIVERY_AS_PF_VMEXIT;
319 wrmsrl(MSR_KVM_ASYNC_PF_EN, pa);
320 __this_cpu_write(apf_reason.enabled, 1);
321 printk(KERN_INFO"KVM setup async PF for cpu %d\n",
322 smp_processor_id());
325 if (kvm_para_has_feature(KVM_FEATURE_PV_EOI)) {
326 unsigned long pa;
327 /* Size alignment is implied but just to make it explicit. */
328 BUILD_BUG_ON(__alignof__(kvm_apic_eoi) < 4);
329 __this_cpu_write(kvm_apic_eoi, 0);
330 pa = slow_virt_to_phys(this_cpu_ptr(&kvm_apic_eoi))
331 | KVM_MSR_ENABLED;
332 wrmsrl(MSR_KVM_PV_EOI_EN, pa);
335 if (has_steal_clock)
336 kvm_register_steal_time();
339 static void kvm_pv_disable_apf(void)
341 if (!__this_cpu_read(apf_reason.enabled))
342 return;
344 wrmsrl(MSR_KVM_ASYNC_PF_EN, 0);
345 __this_cpu_write(apf_reason.enabled, 0);
347 printk(KERN_INFO"Unregister pv shared memory for cpu %d\n",
348 smp_processor_id());
351 static void kvm_pv_guest_cpu_reboot(void *unused)
354 * We disable PV EOI before we load a new kernel by kexec,
355 * since MSR_KVM_PV_EOI_EN stores a pointer into old kernel's memory.
356 * New kernel can re-enable when it boots.
358 if (kvm_para_has_feature(KVM_FEATURE_PV_EOI))
359 wrmsrl(MSR_KVM_PV_EOI_EN, 0);
360 kvm_pv_disable_apf();
361 kvm_disable_steal_time();
364 static int kvm_pv_reboot_notify(struct notifier_block *nb,
365 unsigned long code, void *unused)
367 if (code == SYS_RESTART)
368 on_each_cpu(kvm_pv_guest_cpu_reboot, NULL, 1);
369 return NOTIFY_DONE;
372 static struct notifier_block kvm_pv_reboot_nb = {
373 .notifier_call = kvm_pv_reboot_notify,
376 static u64 kvm_steal_clock(int cpu)
378 u64 steal;
379 struct kvm_steal_time *src;
380 int version;
382 src = &per_cpu(steal_time, cpu);
383 do {
384 version = src->version;
385 virt_rmb();
386 steal = src->steal;
387 virt_rmb();
388 } while ((version & 1) || (version != src->version));
390 return steal;
393 void kvm_disable_steal_time(void)
395 if (!has_steal_clock)
396 return;
398 wrmsr(MSR_KVM_STEAL_TIME, 0, 0);
401 static inline void __set_percpu_decrypted(void *ptr, unsigned long size)
403 early_set_memory_decrypted((unsigned long) ptr, size);
407 * Iterate through all possible CPUs and map the memory region pointed
408 * by apf_reason, steal_time and kvm_apic_eoi as decrypted at once.
410 * Note: we iterate through all possible CPUs to ensure that CPUs
411 * hotplugged will have their per-cpu variable already mapped as
412 * decrypted.
414 static void __init sev_map_percpu_data(void)
416 int cpu;
418 if (!sev_active())
419 return;
421 for_each_possible_cpu(cpu) {
422 __set_percpu_decrypted(&per_cpu(apf_reason, cpu), sizeof(apf_reason));
423 __set_percpu_decrypted(&per_cpu(steal_time, cpu), sizeof(steal_time));
424 __set_percpu_decrypted(&per_cpu(kvm_apic_eoi, cpu), sizeof(kvm_apic_eoi));
428 #ifdef CONFIG_SMP
429 #define KVM_IPI_CLUSTER_SIZE (2 * BITS_PER_LONG)
431 static void __send_ipi_mask(const struct cpumask *mask, int vector)
433 unsigned long flags;
434 int cpu, apic_id, icr;
435 int min = 0, max = 0;
436 #ifdef CONFIG_X86_64
437 __uint128_t ipi_bitmap = 0;
438 #else
439 u64 ipi_bitmap = 0;
440 #endif
441 long ret;
443 if (cpumask_empty(mask))
444 return;
446 local_irq_save(flags);
448 switch (vector) {
449 default:
450 icr = APIC_DM_FIXED | vector;
451 break;
452 case NMI_VECTOR:
453 icr = APIC_DM_NMI;
454 break;
457 for_each_cpu(cpu, mask) {
458 apic_id = per_cpu(x86_cpu_to_apicid, cpu);
459 if (!ipi_bitmap) {
460 min = max = apic_id;
461 } else if (apic_id < min && max - apic_id < KVM_IPI_CLUSTER_SIZE) {
462 ipi_bitmap <<= min - apic_id;
463 min = apic_id;
464 } else if (apic_id < min + KVM_IPI_CLUSTER_SIZE) {
465 max = apic_id < max ? max : apic_id;
466 } else {
467 ret = kvm_hypercall4(KVM_HC_SEND_IPI, (unsigned long)ipi_bitmap,
468 (unsigned long)(ipi_bitmap >> BITS_PER_LONG), min, icr);
469 WARN_ONCE(ret < 0, "KVM: failed to send PV IPI: %ld", ret);
470 min = max = apic_id;
471 ipi_bitmap = 0;
473 __set_bit(apic_id - min, (unsigned long *)&ipi_bitmap);
476 if (ipi_bitmap) {
477 ret = kvm_hypercall4(KVM_HC_SEND_IPI, (unsigned long)ipi_bitmap,
478 (unsigned long)(ipi_bitmap >> BITS_PER_LONG), min, icr);
479 WARN_ONCE(ret < 0, "KVM: failed to send PV IPI: %ld", ret);
482 local_irq_restore(flags);
485 static void kvm_send_ipi_mask(const struct cpumask *mask, int vector)
487 __send_ipi_mask(mask, vector);
490 static void kvm_send_ipi_mask_allbutself(const struct cpumask *mask, int vector)
492 unsigned int this_cpu = smp_processor_id();
493 struct cpumask new_mask;
494 const struct cpumask *local_mask;
496 cpumask_copy(&new_mask, mask);
497 cpumask_clear_cpu(this_cpu, &new_mask);
498 local_mask = &new_mask;
499 __send_ipi_mask(local_mask, vector);
503 * Set the IPI entry points
505 static void kvm_setup_pv_ipi(void)
507 apic->send_IPI_mask = kvm_send_ipi_mask;
508 apic->send_IPI_mask_allbutself = kvm_send_ipi_mask_allbutself;
509 pr_info("KVM setup pv IPIs\n");
512 static void kvm_smp_send_call_func_ipi(const struct cpumask *mask)
514 int cpu;
516 native_send_call_func_ipi(mask);
518 /* Make sure other vCPUs get a chance to run if they need to. */
519 for_each_cpu(cpu, mask) {
520 if (vcpu_is_preempted(cpu)) {
521 kvm_hypercall1(KVM_HC_SCHED_YIELD, per_cpu(x86_cpu_to_apicid, cpu));
522 break;
527 static void __init kvm_smp_prepare_cpus(unsigned int max_cpus)
529 native_smp_prepare_cpus(max_cpus);
530 if (kvm_para_has_hint(KVM_HINTS_REALTIME))
531 static_branch_disable(&virt_spin_lock_key);
534 static void __init kvm_smp_prepare_boot_cpu(void)
537 * Map the per-cpu variables as decrypted before kvm_guest_cpu_init()
538 * shares the guest physical address with the hypervisor.
540 sev_map_percpu_data();
542 kvm_guest_cpu_init();
543 native_smp_prepare_boot_cpu();
544 kvm_spinlock_init();
547 static void kvm_guest_cpu_offline(void)
549 kvm_disable_steal_time();
550 if (kvm_para_has_feature(KVM_FEATURE_PV_EOI))
551 wrmsrl(MSR_KVM_PV_EOI_EN, 0);
552 kvm_pv_disable_apf();
553 apf_task_wake_all();
556 static int kvm_cpu_online(unsigned int cpu)
558 local_irq_disable();
559 kvm_guest_cpu_init();
560 local_irq_enable();
561 return 0;
564 static int kvm_cpu_down_prepare(unsigned int cpu)
566 local_irq_disable();
567 kvm_guest_cpu_offline();
568 local_irq_enable();
569 return 0;
571 #endif
573 static void __init kvm_apf_trap_init(void)
575 update_intr_gate(X86_TRAP_PF, async_page_fault);
578 static DEFINE_PER_CPU(cpumask_var_t, __pv_tlb_mask);
580 static void kvm_flush_tlb_others(const struct cpumask *cpumask,
581 const struct flush_tlb_info *info)
583 u8 state;
584 int cpu;
585 struct kvm_steal_time *src;
586 struct cpumask *flushmask = this_cpu_cpumask_var_ptr(__pv_tlb_mask);
588 cpumask_copy(flushmask, cpumask);
590 * We have to call flush only on online vCPUs. And
591 * queue flush_on_enter for pre-empted vCPUs
593 for_each_cpu(cpu, flushmask) {
594 src = &per_cpu(steal_time, cpu);
595 state = READ_ONCE(src->preempted);
596 if ((state & KVM_VCPU_PREEMPTED)) {
597 if (try_cmpxchg(&src->preempted, &state,
598 state | KVM_VCPU_FLUSH_TLB))
599 __cpumask_clear_cpu(cpu, flushmask);
603 native_flush_tlb_others(flushmask, info);
606 static void __init kvm_guest_init(void)
608 int i;
610 paravirt_ops_setup();
611 register_reboot_notifier(&kvm_pv_reboot_nb);
612 for (i = 0; i < KVM_TASK_SLEEP_HASHSIZE; i++)
613 raw_spin_lock_init(&async_pf_sleepers[i].lock);
614 if (kvm_para_has_feature(KVM_FEATURE_ASYNC_PF))
615 x86_init.irqs.trap_init = kvm_apf_trap_init;
617 if (kvm_para_has_feature(KVM_FEATURE_STEAL_TIME)) {
618 has_steal_clock = 1;
619 pv_ops.time.steal_clock = kvm_steal_clock;
622 if (kvm_para_has_feature(KVM_FEATURE_PV_TLB_FLUSH) &&
623 !kvm_para_has_hint(KVM_HINTS_REALTIME) &&
624 kvm_para_has_feature(KVM_FEATURE_STEAL_TIME)) {
625 pv_ops.mmu.flush_tlb_others = kvm_flush_tlb_others;
626 pv_ops.mmu.tlb_remove_table = tlb_remove_table;
629 if (kvm_para_has_feature(KVM_FEATURE_PV_EOI))
630 apic_set_eoi_write(kvm_guest_apic_eoi_write);
632 #ifdef CONFIG_SMP
633 smp_ops.smp_prepare_cpus = kvm_smp_prepare_cpus;
634 smp_ops.smp_prepare_boot_cpu = kvm_smp_prepare_boot_cpu;
635 if (kvm_para_has_feature(KVM_FEATURE_PV_SCHED_YIELD) &&
636 !kvm_para_has_hint(KVM_HINTS_REALTIME) &&
637 kvm_para_has_feature(KVM_FEATURE_STEAL_TIME)) {
638 smp_ops.send_call_func_ipi = kvm_smp_send_call_func_ipi;
639 pr_info("KVM setup pv sched yield\n");
641 if (cpuhp_setup_state_nocalls(CPUHP_AP_ONLINE_DYN, "x86/kvm:online",
642 kvm_cpu_online, kvm_cpu_down_prepare) < 0)
643 pr_err("kvm_guest: Failed to install cpu hotplug callbacks\n");
644 #else
645 sev_map_percpu_data();
646 kvm_guest_cpu_init();
647 #endif
650 * Hard lockup detection is enabled by default. Disable it, as guests
651 * can get false positives too easily, for example if the host is
652 * overcommitted.
654 hardlockup_detector_disable();
657 static noinline uint32_t __kvm_cpuid_base(void)
659 if (boot_cpu_data.cpuid_level < 0)
660 return 0; /* So we don't blow up on old processors */
662 if (boot_cpu_has(X86_FEATURE_HYPERVISOR))
663 return hypervisor_cpuid_base("KVMKVMKVM\0\0\0", 0);
665 return 0;
668 static inline uint32_t kvm_cpuid_base(void)
670 static int kvm_cpuid_base = -1;
672 if (kvm_cpuid_base == -1)
673 kvm_cpuid_base = __kvm_cpuid_base();
675 return kvm_cpuid_base;
678 bool kvm_para_available(void)
680 return kvm_cpuid_base() != 0;
682 EXPORT_SYMBOL_GPL(kvm_para_available);
684 unsigned int kvm_arch_para_features(void)
686 return cpuid_eax(kvm_cpuid_base() | KVM_CPUID_FEATURES);
689 unsigned int kvm_arch_para_hints(void)
691 return cpuid_edx(kvm_cpuid_base() | KVM_CPUID_FEATURES);
693 EXPORT_SYMBOL_GPL(kvm_arch_para_hints);
695 static uint32_t __init kvm_detect(void)
697 return kvm_cpuid_base();
700 static void __init kvm_apic_init(void)
702 #if defined(CONFIG_SMP)
703 if (kvm_para_has_feature(KVM_FEATURE_PV_SEND_IPI))
704 kvm_setup_pv_ipi();
705 #endif
708 static void __init kvm_init_platform(void)
710 kvmclock_init();
711 x86_platform.apic_post_init = kvm_apic_init;
714 const __initconst struct hypervisor_x86 x86_hyper_kvm = {
715 .name = "KVM",
716 .detect = kvm_detect,
717 .type = X86_HYPER_KVM,
718 .init.guest_late_init = kvm_guest_init,
719 .init.x2apic_available = kvm_para_available,
720 .init.init_platform = kvm_init_platform,
723 static __init int activate_jump_labels(void)
725 if (has_steal_clock) {
726 static_key_slow_inc(&paravirt_steal_enabled);
727 if (steal_acc)
728 static_key_slow_inc(&paravirt_steal_rq_enabled);
731 return 0;
733 arch_initcall(activate_jump_labels);
735 static __init int kvm_setup_pv_tlb_flush(void)
737 int cpu;
739 if (kvm_para_has_feature(KVM_FEATURE_PV_TLB_FLUSH) &&
740 !kvm_para_has_hint(KVM_HINTS_REALTIME) &&
741 kvm_para_has_feature(KVM_FEATURE_STEAL_TIME)) {
742 for_each_possible_cpu(cpu) {
743 zalloc_cpumask_var_node(per_cpu_ptr(&__pv_tlb_mask, cpu),
744 GFP_KERNEL, cpu_to_node(cpu));
746 pr_info("KVM setup pv remote TLB flush\n");
749 return 0;
751 arch_initcall(kvm_setup_pv_tlb_flush);
753 #ifdef CONFIG_PARAVIRT_SPINLOCKS
755 /* Kick a cpu by its apicid. Used to wake up a halted vcpu */
756 static void kvm_kick_cpu(int cpu)
758 int apicid;
759 unsigned long flags = 0;
761 apicid = per_cpu(x86_cpu_to_apicid, cpu);
762 kvm_hypercall2(KVM_HC_KICK_CPU, flags, apicid);
765 #include <asm/qspinlock.h>
767 static void kvm_wait(u8 *ptr, u8 val)
769 unsigned long flags;
771 if (in_nmi())
772 return;
774 local_irq_save(flags);
776 if (READ_ONCE(*ptr) != val)
777 goto out;
780 * halt until it's our turn and kicked. Note that we do safe halt
781 * for irq enabled case to avoid hang when lock info is overwritten
782 * in irq spinlock slowpath and no spurious interrupt occur to save us.
784 if (arch_irqs_disabled_flags(flags))
785 halt();
786 else
787 safe_halt();
789 out:
790 local_irq_restore(flags);
793 #ifdef CONFIG_X86_32
794 __visible bool __kvm_vcpu_is_preempted(long cpu)
796 struct kvm_steal_time *src = &per_cpu(steal_time, cpu);
798 return !!(src->preempted & KVM_VCPU_PREEMPTED);
800 PV_CALLEE_SAVE_REGS_THUNK(__kvm_vcpu_is_preempted);
802 #else
804 #include <asm/asm-offsets.h>
806 extern bool __raw_callee_save___kvm_vcpu_is_preempted(long);
809 * Hand-optimize version for x86-64 to avoid 8 64-bit register saving and
810 * restoring to/from the stack.
812 asm(
813 ".pushsection .text;"
814 ".global __raw_callee_save___kvm_vcpu_is_preempted;"
815 ".type __raw_callee_save___kvm_vcpu_is_preempted, @function;"
816 "__raw_callee_save___kvm_vcpu_is_preempted:"
817 "movq __per_cpu_offset(,%rdi,8), %rax;"
818 "cmpb $0, " __stringify(KVM_STEAL_TIME_preempted) "+steal_time(%rax);"
819 "setne %al;"
820 "ret;"
821 ".size __raw_callee_save___kvm_vcpu_is_preempted, .-__raw_callee_save___kvm_vcpu_is_preempted;"
822 ".popsection");
824 #endif
827 * Setup pv_lock_ops to exploit KVM_FEATURE_PV_UNHALT if present.
829 void __init kvm_spinlock_init(void)
831 /* Does host kernel support KVM_FEATURE_PV_UNHALT? */
832 if (!kvm_para_has_feature(KVM_FEATURE_PV_UNHALT))
833 return;
835 if (kvm_para_has_hint(KVM_HINTS_REALTIME))
836 return;
838 /* Don't use the pvqspinlock code if there is only 1 vCPU. */
839 if (num_possible_cpus() == 1)
840 return;
842 __pv_init_lock_hash();
843 pv_ops.lock.queued_spin_lock_slowpath = __pv_queued_spin_lock_slowpath;
844 pv_ops.lock.queued_spin_unlock =
845 PV_CALLEE_SAVE(__pv_queued_spin_unlock);
846 pv_ops.lock.wait = kvm_wait;
847 pv_ops.lock.kick = kvm_kick_cpu;
849 if (kvm_para_has_feature(KVM_FEATURE_STEAL_TIME)) {
850 pv_ops.lock.vcpu_is_preempted =
851 PV_CALLEE_SAVE(__kvm_vcpu_is_preempted);
855 #endif /* CONFIG_PARAVIRT_SPINLOCKS */
857 #ifdef CONFIG_ARCH_CPUIDLE_HALTPOLL
859 static void kvm_disable_host_haltpoll(void *i)
861 wrmsrl(MSR_KVM_POLL_CONTROL, 0);
864 static void kvm_enable_host_haltpoll(void *i)
866 wrmsrl(MSR_KVM_POLL_CONTROL, 1);
869 void arch_haltpoll_enable(unsigned int cpu)
871 if (!kvm_para_has_feature(KVM_FEATURE_POLL_CONTROL)) {
872 pr_err_once("kvm: host does not support poll control\n");
873 pr_err_once("kvm: host upgrade recommended\n");
874 return;
877 /* Enable guest halt poll disables host halt poll */
878 smp_call_function_single(cpu, kvm_disable_host_haltpoll, NULL, 1);
880 EXPORT_SYMBOL_GPL(arch_haltpoll_enable);
882 void arch_haltpoll_disable(unsigned int cpu)
884 if (!kvm_para_has_feature(KVM_FEATURE_POLL_CONTROL))
885 return;
887 /* Enable guest halt poll disables host halt poll */
888 smp_call_function_single(cpu, kvm_enable_host_haltpoll, NULL, 1);
890 EXPORT_SYMBOL_GPL(arch_haltpoll_disable);
891 #endif