2 * KVM paravirt_ops implementation
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License as published by
6 * the Free Software Foundation; either version 2 of the License, or
7 * (at your option) any later version.
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write to the Free Software
16 * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
18 * Copyright (C) 2007, Red Hat, Inc., Ingo Molnar <mingo@redhat.com>
19 * Copyright IBM Corporation, 2007
20 * Authors: Anthony Liguori <aliguori@us.ibm.com>
23 #include <linux/module.h>
24 #include <linux/kernel.h>
25 #include <linux/kvm_para.h>
26 #include <linux/cpu.h>
28 #include <linux/highmem.h>
29 #include <linux/hardirq.h>
30 #include <linux/notifier.h>
31 #include <linux/reboot.h>
32 #include <linux/hash.h>
33 #include <linux/sched.h>
34 #include <linux/slab.h>
35 #include <linux/kprobes.h>
36 #include <asm/timer.h>
38 #include <asm/traps.h>
40 #include <asm/tlbflush.h>
42 #define MMU_QUEUE_SIZE 1024
44 static int kvmapf
= 1;
46 static int parse_no_kvmapf(char *arg
)
52 early_param("no-kvmapf", parse_no_kvmapf
);
54 static int steal_acc
= 1;
55 static int parse_no_stealacc(char *arg
)
61 early_param("no-steal-acc", parse_no_stealacc
);
63 struct kvm_para_state
{
64 u8 mmu_queue
[MMU_QUEUE_SIZE
];
68 static DEFINE_PER_CPU(struct kvm_para_state
, para_state
);
69 static DEFINE_PER_CPU(struct kvm_vcpu_pv_apf_data
, apf_reason
) __aligned(64);
70 static DEFINE_PER_CPU(struct kvm_steal_time
, steal_time
) __aligned(64);
71 static int has_steal_clock
= 0;
73 static struct kvm_para_state
*kvm_para_state(void)
75 return &per_cpu(para_state
, raw_smp_processor_id());
79 * No need for any "IO delay" on KVM
81 static void kvm_io_delay(void)
85 #define KVM_TASK_SLEEP_HASHBITS 8
86 #define KVM_TASK_SLEEP_HASHSIZE (1<<KVM_TASK_SLEEP_HASHBITS)
88 struct kvm_task_sleep_node
{
89 struct hlist_node link
;
97 static struct kvm_task_sleep_head
{
99 struct hlist_head list
;
100 } async_pf_sleepers
[KVM_TASK_SLEEP_HASHSIZE
];
102 static struct kvm_task_sleep_node
*_find_apf_task(struct kvm_task_sleep_head
*b
,
105 struct hlist_node
*p
;
107 hlist_for_each(p
, &b
->list
) {
108 struct kvm_task_sleep_node
*n
=
109 hlist_entry(p
, typeof(*n
), link
);
110 if (n
->token
== token
)
117 void kvm_async_pf_task_wait(u32 token
)
119 u32 key
= hash_32(token
, KVM_TASK_SLEEP_HASHBITS
);
120 struct kvm_task_sleep_head
*b
= &async_pf_sleepers
[key
];
121 struct kvm_task_sleep_node n
, *e
;
126 idle
= idle_cpu(cpu
);
130 e
= _find_apf_task(b
, token
);
132 /* dummy entry exist -> wake up was delivered ahead of PF */
135 spin_unlock(&b
->lock
);
140 n
.cpu
= smp_processor_id();
141 n
.mm
= current
->active_mm
;
142 n
.halted
= idle
|| preempt_count() > 1;
143 atomic_inc(&n
.mm
->mm_count
);
144 init_waitqueue_head(&n
.wq
);
145 hlist_add_head(&n
.link
, &b
->list
);
146 spin_unlock(&b
->lock
);
150 prepare_to_wait(&n
.wq
, &wait
, TASK_UNINTERRUPTIBLE
);
151 if (hlist_unhashed(&n
.link
))
160 * We cannot reschedule. So halt.
167 finish_wait(&n
.wq
, &wait
);
171 EXPORT_SYMBOL_GPL(kvm_async_pf_task_wait
);
173 static void apf_task_wake_one(struct kvm_task_sleep_node
*n
)
175 hlist_del_init(&n
->link
);
180 smp_send_reschedule(n
->cpu
);
181 else if (waitqueue_active(&n
->wq
))
185 static void apf_task_wake_all(void)
189 for (i
= 0; i
< KVM_TASK_SLEEP_HASHSIZE
; i
++) {
190 struct hlist_node
*p
, *next
;
191 struct kvm_task_sleep_head
*b
= &async_pf_sleepers
[i
];
193 hlist_for_each_safe(p
, next
, &b
->list
) {
194 struct kvm_task_sleep_node
*n
=
195 hlist_entry(p
, typeof(*n
), link
);
196 if (n
->cpu
== smp_processor_id())
197 apf_task_wake_one(n
);
199 spin_unlock(&b
->lock
);
203 void kvm_async_pf_task_wake(u32 token
)
205 u32 key
= hash_32(token
, KVM_TASK_SLEEP_HASHBITS
);
206 struct kvm_task_sleep_head
*b
= &async_pf_sleepers
[key
];
207 struct kvm_task_sleep_node
*n
;
216 n
= _find_apf_task(b
, token
);
219 * async PF was not yet handled.
220 * Add dummy entry for the token.
222 n
= kmalloc(sizeof(*n
), GFP_ATOMIC
);
225 * Allocation failed! Busy wait while other cpu
228 spin_unlock(&b
->lock
);
233 n
->cpu
= smp_processor_id();
235 init_waitqueue_head(&n
->wq
);
236 hlist_add_head(&n
->link
, &b
->list
);
238 apf_task_wake_one(n
);
239 spin_unlock(&b
->lock
);
242 EXPORT_SYMBOL_GPL(kvm_async_pf_task_wake
);
244 u32
kvm_read_and_reset_pf_reason(void)
248 if (__get_cpu_var(apf_reason
).enabled
) {
249 reason
= __get_cpu_var(apf_reason
).reason
;
250 __get_cpu_var(apf_reason
).reason
= 0;
255 EXPORT_SYMBOL_GPL(kvm_read_and_reset_pf_reason
);
257 dotraplinkage
void __kprobes
258 do_async_page_fault(struct pt_regs
*regs
, unsigned long error_code
)
260 switch (kvm_read_and_reset_pf_reason()) {
262 do_page_fault(regs
, error_code
);
264 case KVM_PV_REASON_PAGE_NOT_PRESENT
:
265 /* page is swapped out by the host. */
266 kvm_async_pf_task_wait((u32
)read_cr2());
268 case KVM_PV_REASON_PAGE_READY
:
269 kvm_async_pf_task_wake((u32
)read_cr2());
274 static void kvm_mmu_op(void *buffer
, unsigned len
)
277 unsigned long a1
, a2
;
281 a2
= 0; /* on i386 __pa() always returns <4G */
282 r
= kvm_hypercall3(KVM_HC_MMU_OP
, len
, a1
, a2
);
288 static void mmu_queue_flush(struct kvm_para_state
*state
)
290 if (state
->mmu_queue_len
) {
291 kvm_mmu_op(state
->mmu_queue
, state
->mmu_queue_len
);
292 state
->mmu_queue_len
= 0;
296 static void kvm_deferred_mmu_op(void *buffer
, int len
)
298 struct kvm_para_state
*state
= kvm_para_state();
300 if (paravirt_get_lazy_mode() != PARAVIRT_LAZY_MMU
) {
301 kvm_mmu_op(buffer
, len
);
304 if (state
->mmu_queue_len
+ len
> sizeof state
->mmu_queue
)
305 mmu_queue_flush(state
);
306 memcpy(state
->mmu_queue
+ state
->mmu_queue_len
, buffer
, len
);
307 state
->mmu_queue_len
+= len
;
310 static void kvm_mmu_write(void *dest
, u64 val
)
313 struct kvm_mmu_op_write_pte wpte
;
315 #ifdef CONFIG_HIGHPTE
317 unsigned long dst
= (unsigned long) dest
;
319 page
= kmap_atomic_to_page(dest
);
320 pte_phys
= page_to_pfn(page
);
321 pte_phys
<<= PAGE_SHIFT
;
322 pte_phys
+= (dst
& ~(PAGE_MASK
));
324 pte_phys
= (unsigned long)__pa(dest
);
326 wpte
.header
.op
= KVM_MMU_OP_WRITE_PTE
;
328 wpte
.pte_phys
= pte_phys
;
330 kvm_deferred_mmu_op(&wpte
, sizeof wpte
);
334 * We only need to hook operations that are MMU writes. We hook these so that
335 * we can use lazy MMU mode to batch these operations. We could probably
336 * improve the performance of the host code if we used some of the information
337 * here to simplify processing of batched writes.
339 static void kvm_set_pte(pte_t
*ptep
, pte_t pte
)
341 kvm_mmu_write(ptep
, pte_val(pte
));
344 static void kvm_set_pte_at(struct mm_struct
*mm
, unsigned long addr
,
345 pte_t
*ptep
, pte_t pte
)
347 kvm_mmu_write(ptep
, pte_val(pte
));
350 static void kvm_set_pmd(pmd_t
*pmdp
, pmd_t pmd
)
352 kvm_mmu_write(pmdp
, pmd_val(pmd
));
355 #if PAGETABLE_LEVELS >= 3
356 #ifdef CONFIG_X86_PAE
357 static void kvm_set_pte_atomic(pte_t
*ptep
, pte_t pte
)
359 kvm_mmu_write(ptep
, pte_val(pte
));
362 static void kvm_pte_clear(struct mm_struct
*mm
,
363 unsigned long addr
, pte_t
*ptep
)
365 kvm_mmu_write(ptep
, 0);
368 static void kvm_pmd_clear(pmd_t
*pmdp
)
370 kvm_mmu_write(pmdp
, 0);
374 static void kvm_set_pud(pud_t
*pudp
, pud_t pud
)
376 kvm_mmu_write(pudp
, pud_val(pud
));
379 #if PAGETABLE_LEVELS == 4
380 static void kvm_set_pgd(pgd_t
*pgdp
, pgd_t pgd
)
382 kvm_mmu_write(pgdp
, pgd_val(pgd
));
385 #endif /* PAGETABLE_LEVELS >= 3 */
387 static void kvm_flush_tlb(void)
389 struct kvm_mmu_op_flush_tlb ftlb
= {
390 .header
.op
= KVM_MMU_OP_FLUSH_TLB
,
393 kvm_deferred_mmu_op(&ftlb
, sizeof ftlb
);
396 static void kvm_release_pt(unsigned long pfn
)
398 struct kvm_mmu_op_release_pt rpt
= {
399 .header
.op
= KVM_MMU_OP_RELEASE_PT
,
400 .pt_phys
= (u64
)pfn
<< PAGE_SHIFT
,
403 kvm_mmu_op(&rpt
, sizeof rpt
);
406 static void kvm_enter_lazy_mmu(void)
408 paravirt_enter_lazy_mmu();
411 static void kvm_leave_lazy_mmu(void)
413 struct kvm_para_state
*state
= kvm_para_state();
415 mmu_queue_flush(state
);
416 paravirt_leave_lazy_mmu();
419 static void __init
paravirt_ops_setup(void)
421 pv_info
.name
= "KVM";
422 pv_info
.paravirt_enabled
= 1;
424 if (kvm_para_has_feature(KVM_FEATURE_NOP_IO_DELAY
))
425 pv_cpu_ops
.io_delay
= kvm_io_delay
;
427 if (kvm_para_has_feature(KVM_FEATURE_MMU_OP
)) {
428 pv_mmu_ops
.set_pte
= kvm_set_pte
;
429 pv_mmu_ops
.set_pte_at
= kvm_set_pte_at
;
430 pv_mmu_ops
.set_pmd
= kvm_set_pmd
;
431 #if PAGETABLE_LEVELS >= 3
432 #ifdef CONFIG_X86_PAE
433 pv_mmu_ops
.set_pte_atomic
= kvm_set_pte_atomic
;
434 pv_mmu_ops
.pte_clear
= kvm_pte_clear
;
435 pv_mmu_ops
.pmd_clear
= kvm_pmd_clear
;
437 pv_mmu_ops
.set_pud
= kvm_set_pud
;
438 #if PAGETABLE_LEVELS == 4
439 pv_mmu_ops
.set_pgd
= kvm_set_pgd
;
442 pv_mmu_ops
.flush_tlb_user
= kvm_flush_tlb
;
443 pv_mmu_ops
.release_pte
= kvm_release_pt
;
444 pv_mmu_ops
.release_pmd
= kvm_release_pt
;
445 pv_mmu_ops
.release_pud
= kvm_release_pt
;
447 pv_mmu_ops
.lazy_mode
.enter
= kvm_enter_lazy_mmu
;
448 pv_mmu_ops
.lazy_mode
.leave
= kvm_leave_lazy_mmu
;
450 #ifdef CONFIG_X86_IO_APIC
455 static void kvm_register_steal_time(void)
457 int cpu
= smp_processor_id();
458 struct kvm_steal_time
*st
= &per_cpu(steal_time
, cpu
);
460 if (!has_steal_clock
)
463 memset(st
, 0, sizeof(*st
));
465 wrmsrl(MSR_KVM_STEAL_TIME
, (__pa(st
) | KVM_MSR_ENABLED
));
466 printk(KERN_INFO
"kvm-stealtime: cpu %d, msr %lx\n",
470 void __cpuinit
kvm_guest_cpu_init(void)
472 if (!kvm_para_available())
475 if (kvm_para_has_feature(KVM_FEATURE_ASYNC_PF
) && kvmapf
) {
476 u64 pa
= __pa(&__get_cpu_var(apf_reason
));
478 #ifdef CONFIG_PREEMPT
479 pa
|= KVM_ASYNC_PF_SEND_ALWAYS
;
481 wrmsrl(MSR_KVM_ASYNC_PF_EN
, pa
| KVM_ASYNC_PF_ENABLED
);
482 __get_cpu_var(apf_reason
).enabled
= 1;
483 printk(KERN_INFO
"KVM setup async PF for cpu %d\n",
488 kvm_register_steal_time();
491 static void kvm_pv_disable_apf(void *unused
)
493 if (!__get_cpu_var(apf_reason
).enabled
)
496 wrmsrl(MSR_KVM_ASYNC_PF_EN
, 0);
497 __get_cpu_var(apf_reason
).enabled
= 0;
499 printk(KERN_INFO
"Unregister pv shared memory for cpu %d\n",
503 static int kvm_pv_reboot_notify(struct notifier_block
*nb
,
504 unsigned long code
, void *unused
)
506 if (code
== SYS_RESTART
)
507 on_each_cpu(kvm_pv_disable_apf
, NULL
, 1);
511 static struct notifier_block kvm_pv_reboot_nb
= {
512 .notifier_call
= kvm_pv_reboot_notify
,
515 static u64
kvm_steal_clock(int cpu
)
518 struct kvm_steal_time
*src
;
521 src
= &per_cpu(steal_time
, cpu
);
523 version
= src
->version
;
527 } while ((version
& 1) || (version
!= src
->version
));
532 void kvm_disable_steal_time(void)
534 if (!has_steal_clock
)
537 wrmsr(MSR_KVM_STEAL_TIME
, 0, 0);
541 static void __init
kvm_smp_prepare_boot_cpu(void)
543 #ifdef CONFIG_KVM_CLOCK
544 WARN_ON(kvm_register_clock("primary cpu clock"));
546 kvm_guest_cpu_init();
547 native_smp_prepare_boot_cpu();
550 static void __cpuinit
kvm_guest_cpu_online(void *dummy
)
552 kvm_guest_cpu_init();
555 static void kvm_guest_cpu_offline(void *dummy
)
557 kvm_disable_steal_time();
558 kvm_pv_disable_apf(NULL
);
562 static int __cpuinit
kvm_cpu_notify(struct notifier_block
*self
,
563 unsigned long action
, void *hcpu
)
565 int cpu
= (unsigned long)hcpu
;
568 case CPU_DOWN_FAILED
:
569 case CPU_ONLINE_FROZEN
:
570 smp_call_function_single(cpu
, kvm_guest_cpu_online
, NULL
, 0);
572 case CPU_DOWN_PREPARE
:
573 case CPU_DOWN_PREPARE_FROZEN
:
574 smp_call_function_single(cpu
, kvm_guest_cpu_offline
, NULL
, 1);
582 static struct notifier_block __cpuinitdata kvm_cpu_notifier
= {
583 .notifier_call
= kvm_cpu_notify
,
587 static void __init
kvm_apf_trap_init(void)
589 set_intr_gate(14, &async_page_fault
);
592 void __init
kvm_guest_init(void)
596 if (!kvm_para_available())
599 paravirt_ops_setup();
600 register_reboot_notifier(&kvm_pv_reboot_nb
);
601 for (i
= 0; i
< KVM_TASK_SLEEP_HASHSIZE
; i
++)
602 spin_lock_init(&async_pf_sleepers
[i
].lock
);
603 if (kvm_para_has_feature(KVM_FEATURE_ASYNC_PF
))
604 x86_init
.irqs
.trap_init
= kvm_apf_trap_init
;
606 if (kvm_para_has_feature(KVM_FEATURE_STEAL_TIME
)) {
608 pv_time_ops
.steal_clock
= kvm_steal_clock
;
612 smp_ops
.smp_prepare_boot_cpu
= kvm_smp_prepare_boot_cpu
;
613 register_cpu_notifier(&kvm_cpu_notifier
);
615 kvm_guest_cpu_init();
619 static __init
int activate_jump_labels(void)
621 if (has_steal_clock
) {
622 jump_label_inc(¶virt_steal_enabled
);
624 jump_label_inc(¶virt_steal_rq_enabled
);
629 arch_initcall(activate_jump_labels
);