Merge tag 'x86-urgent-2020-08-15' of git://git.kernel.org/pub/scm/linux/kernel/git...
[linux/fpc-iii.git] / arch / x86 / hyperv / hv_spinlock.c
blob07f21a06392fb9940c28025e654d66c2740846f1
1 // SPDX-License-Identifier: GPL-2.0
3 /*
4 * Hyper-V specific spinlock code.
6 * Copyright (C) 2018, Intel, Inc.
8 * Author : Yi Sun <yi.y.sun@intel.com>
9 */
11 #define pr_fmt(fmt) "Hyper-V: " fmt
13 #include <linux/spinlock.h>
15 #include <asm/mshyperv.h>
16 #include <asm/paravirt.h>
17 #include <asm/apic.h>
19 static bool __initdata hv_pvspin = true;
21 static void hv_qlock_kick(int cpu)
23 apic->send_IPI(cpu, X86_PLATFORM_IPI_VECTOR);
26 static void hv_qlock_wait(u8 *byte, u8 val)
28 unsigned long msr_val;
29 unsigned long flags;
31 if (in_nmi())
32 return;
35 * Reading HV_X64_MSR_GUEST_IDLE MSR tells the hypervisor that the
36 * vCPU can be put into 'idle' state. This 'idle' state is
37 * terminated by an IPI, usually from hv_qlock_kick(), even if
38 * interrupts are disabled on the vCPU.
40 * To prevent a race against the unlock path it is required to
41 * disable interrupts before accessing the HV_X64_MSR_GUEST_IDLE
42 * MSR. Otherwise, if the IPI from hv_qlock_kick() arrives between
43 * the lock value check and the rdmsrl() then the vCPU might be put
44 * into 'idle' state by the hypervisor and kept in that state for
45 * an unspecified amount of time.
47 local_irq_save(flags);
49 * Only issue the rdmsrl() when the lock state has not changed.
51 if (READ_ONCE(*byte) == val)
52 rdmsrl(HV_X64_MSR_GUEST_IDLE, msr_val);
53 local_irq_restore(flags);
57 * Hyper-V does not support this so far.
59 __visible bool hv_vcpu_is_preempted(int vcpu)
61 return false;
63 PV_CALLEE_SAVE_REGS_THUNK(hv_vcpu_is_preempted);
65 void __init hv_init_spinlocks(void)
67 if (!hv_pvspin || !apic ||
68 !(ms_hyperv.hints & HV_X64_CLUSTER_IPI_RECOMMENDED) ||
69 !(ms_hyperv.features & HV_X64_MSR_GUEST_IDLE_AVAILABLE)) {
70 pr_info("PV spinlocks disabled\n");
71 return;
73 pr_info("PV spinlocks enabled\n");
75 __pv_init_lock_hash();
76 pv_ops.lock.queued_spin_lock_slowpath = __pv_queued_spin_lock_slowpath;
77 pv_ops.lock.queued_spin_unlock = PV_CALLEE_SAVE(__pv_queued_spin_unlock);
78 pv_ops.lock.wait = hv_qlock_wait;
79 pv_ops.lock.kick = hv_qlock_kick;
80 pv_ops.lock.vcpu_is_preempted = PV_CALLEE_SAVE(hv_vcpu_is_preempted);
83 static __init int hv_parse_nopvspin(char *arg)
85 hv_pvspin = false;
86 return 0;
88 early_param("hv_nopvspin", hv_parse_nopvspin);