2 * KVM paravirt_ops implementation
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License as published by
6 * the Free Software Foundation; either version 2 of the License, or
7 * (at your option) any later version.
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write to the Free Software
16 * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
18 * Copyright (C) 2007, Red Hat, Inc., Ingo Molnar <mingo@redhat.com>
19 * Copyright IBM Corporation, 2007
20 * Authors: Anthony Liguori <aliguori@us.ibm.com>
23 #include <linux/context_tracking.h>
24 #include <linux/module.h>
25 #include <linux/kernel.h>
26 #include <linux/kvm_para.h>
27 #include <linux/cpu.h>
29 #include <linux/highmem.h>
30 #include <linux/hardirq.h>
31 #include <linux/notifier.h>
32 #include <linux/reboot.h>
33 #include <linux/hash.h>
34 #include <linux/sched.h>
35 #include <linux/slab.h>
36 #include <linux/kprobes.h>
37 #include <linux/debugfs.h>
38 #include <linux/nmi.h>
39 #include <linux/swait.h>
40 #include <asm/timer.h>
42 #include <asm/traps.h>
44 #include <asm/tlbflush.h>
47 #include <asm/apicdef.h>
48 #include <asm/hypervisor.h>
49 #include <asm/kvm_guest.h>
51 static int kvmapf
= 1;
53 static int parse_no_kvmapf(char *arg
)
59 early_param("no-kvmapf", parse_no_kvmapf
);
61 static int steal_acc
= 1;
62 static int parse_no_stealacc(char *arg
)
68 early_param("no-steal-acc", parse_no_stealacc
);
70 static int kvmclock_vsyscall
= 1;
71 static int parse_no_kvmclock_vsyscall(char *arg
)
73 kvmclock_vsyscall
= 0;
77 early_param("no-kvmclock-vsyscall", parse_no_kvmclock_vsyscall
);
79 static DEFINE_PER_CPU(struct kvm_vcpu_pv_apf_data
, apf_reason
) __aligned(64);
80 static DEFINE_PER_CPU(struct kvm_steal_time
, steal_time
) __aligned(64);
81 static int has_steal_clock
= 0;
84 * No need for any "IO delay" on KVM
86 static void kvm_io_delay(void)
90 #define KVM_TASK_SLEEP_HASHBITS 8
91 #define KVM_TASK_SLEEP_HASHSIZE (1<<KVM_TASK_SLEEP_HASHBITS)
93 struct kvm_task_sleep_node
{
94 struct hlist_node link
;
95 struct swait_queue_head wq
;
101 static struct kvm_task_sleep_head
{
103 struct hlist_head list
;
104 } async_pf_sleepers
[KVM_TASK_SLEEP_HASHSIZE
];
106 static struct kvm_task_sleep_node
*_find_apf_task(struct kvm_task_sleep_head
*b
,
109 struct hlist_node
*p
;
111 hlist_for_each(p
, &b
->list
) {
112 struct kvm_task_sleep_node
*n
=
113 hlist_entry(p
, typeof(*n
), link
);
114 if (n
->token
== token
)
121 void kvm_async_pf_task_wait(u32 token
)
123 u32 key
= hash_32(token
, KVM_TASK_SLEEP_HASHBITS
);
124 struct kvm_task_sleep_head
*b
= &async_pf_sleepers
[key
];
125 struct kvm_task_sleep_node n
, *e
;
126 DECLARE_SWAITQUEUE(wait
);
130 raw_spin_lock(&b
->lock
);
131 e
= _find_apf_task(b
, token
);
133 /* dummy entry exist -> wake up was delivered ahead of PF */
136 raw_spin_unlock(&b
->lock
);
143 n
.cpu
= smp_processor_id();
144 n
.halted
= is_idle_task(current
) || preempt_count() > 1;
145 init_swait_queue_head(&n
.wq
);
146 hlist_add_head(&n
.link
, &b
->list
);
147 raw_spin_unlock(&b
->lock
);
151 prepare_to_swait(&n
.wq
, &wait
, TASK_UNINTERRUPTIBLE
);
152 if (hlist_unhashed(&n
.link
))
161 * We cannot reschedule. So halt.
170 finish_swait(&n
.wq
, &wait
);
175 EXPORT_SYMBOL_GPL(kvm_async_pf_task_wait
);
177 static void apf_task_wake_one(struct kvm_task_sleep_node
*n
)
179 hlist_del_init(&n
->link
);
181 smp_send_reschedule(n
->cpu
);
182 else if (swait_active(&n
->wq
))
186 static void apf_task_wake_all(void)
190 for (i
= 0; i
< KVM_TASK_SLEEP_HASHSIZE
; i
++) {
191 struct hlist_node
*p
, *next
;
192 struct kvm_task_sleep_head
*b
= &async_pf_sleepers
[i
];
193 raw_spin_lock(&b
->lock
);
194 hlist_for_each_safe(p
, next
, &b
->list
) {
195 struct kvm_task_sleep_node
*n
=
196 hlist_entry(p
, typeof(*n
), link
);
197 if (n
->cpu
== smp_processor_id())
198 apf_task_wake_one(n
);
200 raw_spin_unlock(&b
->lock
);
204 void kvm_async_pf_task_wake(u32 token
)
206 u32 key
= hash_32(token
, KVM_TASK_SLEEP_HASHBITS
);
207 struct kvm_task_sleep_head
*b
= &async_pf_sleepers
[key
];
208 struct kvm_task_sleep_node
*n
;
216 raw_spin_lock(&b
->lock
);
217 n
= _find_apf_task(b
, token
);
220 * async PF was not yet handled.
221 * Add dummy entry for the token.
223 n
= kzalloc(sizeof(*n
), GFP_ATOMIC
);
226 * Allocation failed! Busy wait while other cpu
229 raw_spin_unlock(&b
->lock
);
234 n
->cpu
= smp_processor_id();
235 init_swait_queue_head(&n
->wq
);
236 hlist_add_head(&n
->link
, &b
->list
);
238 apf_task_wake_one(n
);
239 raw_spin_unlock(&b
->lock
);
242 EXPORT_SYMBOL_GPL(kvm_async_pf_task_wake
);
244 u32
kvm_read_and_reset_pf_reason(void)
248 if (__this_cpu_read(apf_reason
.enabled
)) {
249 reason
= __this_cpu_read(apf_reason
.reason
);
250 __this_cpu_write(apf_reason
.reason
, 0);
255 EXPORT_SYMBOL_GPL(kvm_read_and_reset_pf_reason
);
256 NOKPROBE_SYMBOL(kvm_read_and_reset_pf_reason
);
259 do_async_page_fault(struct pt_regs
*regs
, unsigned long error_code
)
261 enum ctx_state prev_state
;
263 switch (kvm_read_and_reset_pf_reason()) {
265 trace_do_page_fault(regs
, error_code
);
267 case KVM_PV_REASON_PAGE_NOT_PRESENT
:
268 /* page is swapped out by the host. */
269 prev_state
= exception_enter();
271 kvm_async_pf_task_wait((u32
)read_cr2());
272 exception_exit(prev_state
);
274 case KVM_PV_REASON_PAGE_READY
:
277 kvm_async_pf_task_wake((u32
)read_cr2());
282 NOKPROBE_SYMBOL(do_async_page_fault
);
284 static void __init
paravirt_ops_setup(void)
286 pv_info
.name
= "KVM";
289 * KVM isn't paravirt in the sense of paravirt_enabled. A KVM
290 * guest kernel works like a bare metal kernel with additional
291 * features, and paravirt_enabled is about features that are
294 pv_info
.paravirt_enabled
= 0;
296 if (kvm_para_has_feature(KVM_FEATURE_NOP_IO_DELAY
))
297 pv_cpu_ops
.io_delay
= kvm_io_delay
;
299 #ifdef CONFIG_X86_IO_APIC
304 static void kvm_register_steal_time(void)
306 int cpu
= smp_processor_id();
307 struct kvm_steal_time
*st
= &per_cpu(steal_time
, cpu
);
309 if (!has_steal_clock
)
312 memset(st
, 0, sizeof(*st
));
314 wrmsrl(MSR_KVM_STEAL_TIME
, (slow_virt_to_phys(st
) | KVM_MSR_ENABLED
));
315 pr_info("kvm-stealtime: cpu %d, msr %llx\n",
316 cpu
, (unsigned long long) slow_virt_to_phys(st
));
319 static DEFINE_PER_CPU(unsigned long, kvm_apic_eoi
) = KVM_PV_EOI_DISABLED
;
321 static void kvm_guest_apic_eoi_write(u32 reg
, u32 val
)
324 * This relies on __test_and_clear_bit to modify the memory
325 * in a way that is atomic with respect to the local CPU.
326 * The hypervisor only accesses this memory from the local CPU so
327 * there's no need for lock or memory barriers.
328 * An optimization barrier is implied in apic write.
330 if (__test_and_clear_bit(KVM_PV_EOI_BIT
, this_cpu_ptr(&kvm_apic_eoi
)))
332 apic_write(APIC_EOI
, APIC_EOI_ACK
);
335 static void kvm_guest_cpu_init(void)
337 if (!kvm_para_available())
340 if (kvm_para_has_feature(KVM_FEATURE_ASYNC_PF
) && kvmapf
) {
341 u64 pa
= slow_virt_to_phys(this_cpu_ptr(&apf_reason
));
343 #ifdef CONFIG_PREEMPT
344 pa
|= KVM_ASYNC_PF_SEND_ALWAYS
;
346 wrmsrl(MSR_KVM_ASYNC_PF_EN
, pa
| KVM_ASYNC_PF_ENABLED
);
347 __this_cpu_write(apf_reason
.enabled
, 1);
348 printk(KERN_INFO
"KVM setup async PF for cpu %d\n",
352 if (kvm_para_has_feature(KVM_FEATURE_PV_EOI
)) {
354 /* Size alignment is implied but just to make it explicit. */
355 BUILD_BUG_ON(__alignof__(kvm_apic_eoi
) < 4);
356 __this_cpu_write(kvm_apic_eoi
, 0);
357 pa
= slow_virt_to_phys(this_cpu_ptr(&kvm_apic_eoi
))
359 wrmsrl(MSR_KVM_PV_EOI_EN
, pa
);
363 kvm_register_steal_time();
366 static void kvm_pv_disable_apf(void)
368 if (!__this_cpu_read(apf_reason
.enabled
))
371 wrmsrl(MSR_KVM_ASYNC_PF_EN
, 0);
372 __this_cpu_write(apf_reason
.enabled
, 0);
374 printk(KERN_INFO
"Unregister pv shared memory for cpu %d\n",
378 static void kvm_pv_guest_cpu_reboot(void *unused
)
381 * We disable PV EOI before we load a new kernel by kexec,
382 * since MSR_KVM_PV_EOI_EN stores a pointer into old kernel's memory.
383 * New kernel can re-enable when it boots.
385 if (kvm_para_has_feature(KVM_FEATURE_PV_EOI
))
386 wrmsrl(MSR_KVM_PV_EOI_EN
, 0);
387 kvm_pv_disable_apf();
388 kvm_disable_steal_time();
391 static int kvm_pv_reboot_notify(struct notifier_block
*nb
,
392 unsigned long code
, void *unused
)
394 if (code
== SYS_RESTART
)
395 on_each_cpu(kvm_pv_guest_cpu_reboot
, NULL
, 1);
399 static struct notifier_block kvm_pv_reboot_nb
= {
400 .notifier_call
= kvm_pv_reboot_notify
,
403 static u64
kvm_steal_clock(int cpu
)
406 struct kvm_steal_time
*src
;
409 src
= &per_cpu(steal_time
, cpu
);
411 version
= src
->version
;
415 } while ((version
& 1) || (version
!= src
->version
));
420 void kvm_disable_steal_time(void)
422 if (!has_steal_clock
)
425 wrmsr(MSR_KVM_STEAL_TIME
, 0, 0);
429 static void __init
kvm_smp_prepare_boot_cpu(void)
431 kvm_guest_cpu_init();
432 native_smp_prepare_boot_cpu();
436 static void kvm_guest_cpu_online(void *dummy
)
438 kvm_guest_cpu_init();
441 static void kvm_guest_cpu_offline(void *dummy
)
443 kvm_disable_steal_time();
444 if (kvm_para_has_feature(KVM_FEATURE_PV_EOI
))
445 wrmsrl(MSR_KVM_PV_EOI_EN
, 0);
446 kvm_pv_disable_apf();
450 static int kvm_cpu_notify(struct notifier_block
*self
, unsigned long action
,
453 int cpu
= (unsigned long)hcpu
;
456 case CPU_DOWN_FAILED
:
457 case CPU_ONLINE_FROZEN
:
458 smp_call_function_single(cpu
, kvm_guest_cpu_online
, NULL
, 0);
460 case CPU_DOWN_PREPARE
:
461 case CPU_DOWN_PREPARE_FROZEN
:
462 smp_call_function_single(cpu
, kvm_guest_cpu_offline
, NULL
, 1);
470 static struct notifier_block kvm_cpu_notifier
= {
471 .notifier_call
= kvm_cpu_notify
,
475 static void __init
kvm_apf_trap_init(void)
477 set_intr_gate(14, async_page_fault
);
480 void __init
kvm_guest_init(void)
484 if (!kvm_para_available())
487 paravirt_ops_setup();
488 register_reboot_notifier(&kvm_pv_reboot_nb
);
489 for (i
= 0; i
< KVM_TASK_SLEEP_HASHSIZE
; i
++)
490 raw_spin_lock_init(&async_pf_sleepers
[i
].lock
);
491 if (kvm_para_has_feature(KVM_FEATURE_ASYNC_PF
))
492 x86_init
.irqs
.trap_init
= kvm_apf_trap_init
;
494 if (kvm_para_has_feature(KVM_FEATURE_STEAL_TIME
)) {
496 pv_time_ops
.steal_clock
= kvm_steal_clock
;
499 if (kvm_para_has_feature(KVM_FEATURE_PV_EOI
))
500 apic_set_eoi_write(kvm_guest_apic_eoi_write
);
502 if (kvmclock_vsyscall
)
503 kvm_setup_vsyscall_timeinfo();
506 smp_ops
.smp_prepare_boot_cpu
= kvm_smp_prepare_boot_cpu
;
507 register_cpu_notifier(&kvm_cpu_notifier
);
509 kvm_guest_cpu_init();
513 * Hard lockup detection is enabled by default. Disable it, as guests
514 * can get false positives too easily, for example if the host is
517 hardlockup_detector_disable();
520 static noinline
uint32_t __kvm_cpuid_base(void)
522 if (boot_cpu_data
.cpuid_level
< 0)
523 return 0; /* So we don't blow up on old processors */
525 if (cpu_has_hypervisor
)
526 return hypervisor_cpuid_base("KVMKVMKVM\0\0\0", 0);
531 static inline uint32_t kvm_cpuid_base(void)
533 static int kvm_cpuid_base
= -1;
535 if (kvm_cpuid_base
== -1)
536 kvm_cpuid_base
= __kvm_cpuid_base();
538 return kvm_cpuid_base
;
541 bool kvm_para_available(void)
543 return kvm_cpuid_base() != 0;
545 EXPORT_SYMBOL_GPL(kvm_para_available
);
547 unsigned int kvm_arch_para_features(void)
549 return cpuid_eax(kvm_cpuid_base() | KVM_CPUID_FEATURES
);
552 static uint32_t __init
kvm_detect(void)
554 return kvm_cpuid_base();
557 const struct hypervisor_x86 x86_hyper_kvm __refconst
= {
559 .detect
= kvm_detect
,
560 .x2apic_available
= kvm_para_available
,
562 EXPORT_SYMBOL_GPL(x86_hyper_kvm
);
564 static __init
int activate_jump_labels(void)
566 if (has_steal_clock
) {
567 static_key_slow_inc(¶virt_steal_enabled
);
569 static_key_slow_inc(¶virt_steal_rq_enabled
);
574 arch_initcall(activate_jump_labels
);
576 #ifdef CONFIG_PARAVIRT_SPINLOCKS
578 /* Kick a cpu by its apicid. Used to wake up a halted vcpu */
579 static void kvm_kick_cpu(int cpu
)
582 unsigned long flags
= 0;
584 apicid
= per_cpu(x86_cpu_to_apicid
, cpu
);
585 kvm_hypercall2(KVM_HC_KICK_CPU
, flags
, apicid
);
589 #ifdef CONFIG_QUEUED_SPINLOCKS
591 #include <asm/qspinlock.h>
593 static void kvm_wait(u8
*ptr
, u8 val
)
600 local_irq_save(flags
);
602 if (READ_ONCE(*ptr
) != val
)
606 * halt until it's our turn and kicked. Note that we do safe halt
607 * for irq enabled case to avoid hang when lock info is overwritten
608 * in irq spinlock slowpath and no spurious interrupt occur to save us.
610 if (arch_irqs_disabled_flags(flags
))
616 local_irq_restore(flags
);
619 #else /* !CONFIG_QUEUED_SPINLOCKS */
621 enum kvm_contention_stat
{
625 RELEASED_SLOW_KICKED
,
629 #ifdef CONFIG_KVM_DEBUG_FS
630 #define HISTO_BUCKETS 30
632 static struct kvm_spinlock_stats
634 u32 contention_stats
[NR_CONTENTION_STATS
];
635 u32 histo_spin_blocked
[HISTO_BUCKETS
+1];
639 static u8 zero_stats
;
641 static inline void check_zero(void)
646 old
= READ_ONCE(zero_stats
);
648 ret
= cmpxchg(&zero_stats
, old
, 0);
649 /* This ensures only one fellow resets the stat */
651 memset(&spinlock_stats
, 0, sizeof(spinlock_stats
));
655 static inline void add_stats(enum kvm_contention_stat var
, u32 val
)
658 spinlock_stats
.contention_stats
[var
] += val
;
662 static inline u64
spin_time_start(void)
664 return sched_clock();
667 static void __spin_time_accum(u64 delta
, u32
*array
)
671 index
= ilog2(delta
);
674 if (index
< HISTO_BUCKETS
)
677 array
[HISTO_BUCKETS
]++;
680 static inline void spin_time_accum_blocked(u64 start
)
684 delta
= sched_clock() - start
;
685 __spin_time_accum(delta
, spinlock_stats
.histo_spin_blocked
);
686 spinlock_stats
.time_blocked
+= delta
;
689 static struct dentry
*d_spin_debug
;
690 static struct dentry
*d_kvm_debug
;
692 static struct dentry
*kvm_init_debugfs(void)
694 d_kvm_debug
= debugfs_create_dir("kvm-guest", NULL
);
696 printk(KERN_WARNING
"Could not create 'kvm' debugfs directory\n");
701 static int __init
kvm_spinlock_debugfs(void)
703 struct dentry
*d_kvm
;
705 d_kvm
= kvm_init_debugfs();
709 d_spin_debug
= debugfs_create_dir("spinlocks", d_kvm
);
711 debugfs_create_u8("zero_stats", 0644, d_spin_debug
, &zero_stats
);
713 debugfs_create_u32("taken_slow", 0444, d_spin_debug
,
714 &spinlock_stats
.contention_stats
[TAKEN_SLOW
]);
715 debugfs_create_u32("taken_slow_pickup", 0444, d_spin_debug
,
716 &spinlock_stats
.contention_stats
[TAKEN_SLOW_PICKUP
]);
718 debugfs_create_u32("released_slow", 0444, d_spin_debug
,
719 &spinlock_stats
.contention_stats
[RELEASED_SLOW
]);
720 debugfs_create_u32("released_slow_kicked", 0444, d_spin_debug
,
721 &spinlock_stats
.contention_stats
[RELEASED_SLOW_KICKED
]);
723 debugfs_create_u64("time_blocked", 0444, d_spin_debug
,
724 &spinlock_stats
.time_blocked
);
726 debugfs_create_u32_array("histo_blocked", 0444, d_spin_debug
,
727 spinlock_stats
.histo_spin_blocked
, HISTO_BUCKETS
+ 1);
731 fs_initcall(kvm_spinlock_debugfs
);
732 #else /* !CONFIG_KVM_DEBUG_FS */
733 static inline void add_stats(enum kvm_contention_stat var
, u32 val
)
737 static inline u64
spin_time_start(void)
742 static inline void spin_time_accum_blocked(u64 start
)
745 #endif /* CONFIG_KVM_DEBUG_FS */
747 struct kvm_lock_waiting
{
748 struct arch_spinlock
*lock
;
752 /* cpus 'waiting' on a spinlock to become available */
753 static cpumask_t waiting_cpus
;
755 /* Track spinlock on which a cpu is waiting */
756 static DEFINE_PER_CPU(struct kvm_lock_waiting
, klock_waiting
);
758 __visible
void kvm_lock_spinning(struct arch_spinlock
*lock
, __ticket_t want
)
760 struct kvm_lock_waiting
*w
;
769 w
= this_cpu_ptr(&klock_waiting
);
770 cpu
= smp_processor_id();
771 start
= spin_time_start();
774 * Make sure an interrupt handler can't upset things in a
775 * partially setup state.
777 local_irq_save(flags
);
780 * The ordering protocol on this is that the "lock" pointer
781 * may only be set non-NULL if the "want" ticket is correct.
782 * If we're updating "want", we must first clear "lock".
790 add_stats(TAKEN_SLOW
, 1);
793 * This uses set_bit, which is atomic but we should not rely on its
794 * reordering gurantees. So barrier is needed after this call.
796 cpumask_set_cpu(cpu
, &waiting_cpus
);
801 * Mark entry to slowpath before doing the pickup test to make
802 * sure we don't deadlock with an unlocker.
804 __ticket_enter_slowpath(lock
);
806 /* make sure enter_slowpath, which is atomic does not cross the read */
807 smp_mb__after_atomic();
810 * check again make sure it didn't become free while
811 * we weren't looking.
813 head
= READ_ONCE(lock
->tickets
.head
);
814 if (__tickets_equal(head
, want
)) {
815 add_stats(TAKEN_SLOW_PICKUP
, 1);
820 * halt until it's our turn and kicked. Note that we do safe halt
821 * for irq enabled case to avoid hang when lock info is overwritten
822 * in irq spinlock slowpath and no spurious interrupt occur to save us.
824 if (arch_irqs_disabled_flags(flags
))
830 cpumask_clear_cpu(cpu
, &waiting_cpus
);
832 local_irq_restore(flags
);
833 spin_time_accum_blocked(start
);
835 PV_CALLEE_SAVE_REGS_THUNK(kvm_lock_spinning
);
837 /* Kick vcpu waiting on @lock->head to reach value @ticket */
838 static void kvm_unlock_kick(struct arch_spinlock
*lock
, __ticket_t ticket
)
842 add_stats(RELEASED_SLOW
, 1);
843 for_each_cpu(cpu
, &waiting_cpus
) {
844 const struct kvm_lock_waiting
*w
= &per_cpu(klock_waiting
, cpu
);
845 if (READ_ONCE(w
->lock
) == lock
&&
846 READ_ONCE(w
->want
) == ticket
) {
847 add_stats(RELEASED_SLOW_KICKED
, 1);
854 #endif /* !CONFIG_QUEUED_SPINLOCKS */
857 * Setup pv_lock_ops to exploit KVM_FEATURE_PV_UNHALT if present.
859 void __init
kvm_spinlock_init(void)
861 if (!kvm_para_available())
863 /* Does host kernel support KVM_FEATURE_PV_UNHALT? */
864 if (!kvm_para_has_feature(KVM_FEATURE_PV_UNHALT
))
867 #ifdef CONFIG_QUEUED_SPINLOCKS
868 __pv_init_lock_hash();
869 pv_lock_ops
.queued_spin_lock_slowpath
= __pv_queued_spin_lock_slowpath
;
870 pv_lock_ops
.queued_spin_unlock
= PV_CALLEE_SAVE(__pv_queued_spin_unlock
);
871 pv_lock_ops
.wait
= kvm_wait
;
872 pv_lock_ops
.kick
= kvm_kick_cpu
;
873 #else /* !CONFIG_QUEUED_SPINLOCKS */
874 pv_lock_ops
.lock_spinning
= PV_CALLEE_SAVE(kvm_lock_spinning
);
875 pv_lock_ops
.unlock_kick
= kvm_unlock_kick
;
879 static __init
int kvm_spinlock_init_jump(void)
881 if (!kvm_para_available())
883 if (!kvm_para_has_feature(KVM_FEATURE_PV_UNHALT
))
886 static_key_slow_inc(¶virt_ticketlocks_enabled
);
887 printk(KERN_INFO
"KVM setup paravirtual spinlock\n");
891 early_initcall(kvm_spinlock_init_jump
);
893 #endif /* CONFIG_PARAVIRT_SPINLOCKS */