2 * Kernel-based Virtual Machine driver for Linux
4 * This module enables machines with Intel VT-x extensions to run virtual
5 * machines without emulation or binary translation.
7 * Copyright (C) 2006 Qumranet, Inc.
10 * Avi Kivity <avi@qumranet.com>
11 * Yaniv Kamay <yaniv@qumranet.com>
13 * This work is licensed under the terms of the GNU GPL, version 2. See
14 * the COPYING file in the top-level directory.
20 #include <linux/kvm_host.h>
21 #include <linux/kvm.h>
22 #include <linux/module.h>
23 #include <linux/errno.h>
24 #include <linux/percpu.h>
25 #include <linux/gfp.h>
27 #include <linux/miscdevice.h>
28 #include <linux/vmalloc.h>
29 #include <linux/reboot.h>
30 #include <linux/debugfs.h>
31 #include <linux/highmem.h>
32 #include <linux/file.h>
33 #include <linux/sysdev.h>
34 #include <linux/cpu.h>
35 #include <linux/sched.h>
36 #include <linux/cpumask.h>
37 #include <linux/smp.h>
38 #include <linux/anon_inodes.h>
39 #include <linux/profile.h>
40 #include <linux/kvm_para.h>
41 #include <linux/pagemap.h>
42 #include <linux/mman.h>
44 #include <asm/processor.h>
46 #include <asm/uaccess.h>
47 #include <asm/pgtable.h>
49 MODULE_AUTHOR("Qumranet");
50 MODULE_LICENSE("GPL");
52 DEFINE_SPINLOCK(kvm_lock
);
55 static cpumask_t cpus_hardware_enabled
;
57 struct kmem_cache
*kvm_vcpu_cache
;
58 EXPORT_SYMBOL_GPL(kvm_vcpu_cache
);
60 static __read_mostly
struct preempt_ops kvm_preempt_ops
;
62 static struct dentry
*debugfs_dir
;
64 static long kvm_vcpu_ioctl(struct file
*file
, unsigned int ioctl
,
67 static inline int valid_vcpu(int n
)
69 return likely(n
>= 0 && n
< KVM_MAX_VCPUS
);
73 * Switches to specified vcpu, until a matching vcpu_put()
75 void vcpu_load(struct kvm_vcpu
*vcpu
)
79 mutex_lock(&vcpu
->mutex
);
81 preempt_notifier_register(&vcpu
->preempt_notifier
);
82 kvm_arch_vcpu_load(vcpu
, cpu
);
86 void vcpu_put(struct kvm_vcpu
*vcpu
)
89 kvm_arch_vcpu_put(vcpu
);
90 preempt_notifier_unregister(&vcpu
->preempt_notifier
);
92 mutex_unlock(&vcpu
->mutex
);
95 static void ack_flush(void *_completed
)
99 void kvm_flush_remote_tlbs(struct kvm
*kvm
)
103 struct kvm_vcpu
*vcpu
;
106 for (i
= 0; i
< KVM_MAX_VCPUS
; ++i
) {
107 vcpu
= kvm
->vcpus
[i
];
110 if (test_and_set_bit(KVM_REQ_TLB_FLUSH
, &vcpu
->requests
))
113 if (cpu
!= -1 && cpu
!= raw_smp_processor_id())
116 if (cpus_empty(cpus
))
118 ++kvm
->stat
.remote_tlb_flush
;
119 smp_call_function_mask(cpus
, ack_flush
, NULL
, 1);
122 int kvm_vcpu_init(struct kvm_vcpu
*vcpu
, struct kvm
*kvm
, unsigned id
)
127 mutex_init(&vcpu
->mutex
);
131 init_waitqueue_head(&vcpu
->wq
);
133 page
= alloc_page(GFP_KERNEL
| __GFP_ZERO
);
138 vcpu
->run
= page_address(page
);
140 r
= kvm_arch_vcpu_init(vcpu
);
146 free_page((unsigned long)vcpu
->run
);
150 EXPORT_SYMBOL_GPL(kvm_vcpu_init
);
152 void kvm_vcpu_uninit(struct kvm_vcpu
*vcpu
)
154 kvm_arch_vcpu_uninit(vcpu
);
155 free_page((unsigned long)vcpu
->run
);
157 EXPORT_SYMBOL_GPL(kvm_vcpu_uninit
);
159 static struct kvm
*kvm_create_vm(void)
161 struct kvm
*kvm
= kvm_arch_create_vm();
166 kvm
->mm
= current
->mm
;
167 atomic_inc(&kvm
->mm
->mm_count
);
168 spin_lock_init(&kvm
->mmu_lock
);
169 kvm_io_bus_init(&kvm
->pio_bus
);
170 mutex_init(&kvm
->lock
);
171 kvm_io_bus_init(&kvm
->mmio_bus
);
172 spin_lock(&kvm_lock
);
173 list_add(&kvm
->vm_list
, &vm_list
);
174 spin_unlock(&kvm_lock
);
180 * Free any memory in @free but not in @dont.
182 static void kvm_free_physmem_slot(struct kvm_memory_slot
*free
,
183 struct kvm_memory_slot
*dont
)
185 if (!dont
|| free
->rmap
!= dont
->rmap
)
188 if (!dont
|| free
->dirty_bitmap
!= dont
->dirty_bitmap
)
189 vfree(free
->dirty_bitmap
);
192 free
->dirty_bitmap
= NULL
;
196 void kvm_free_physmem(struct kvm
*kvm
)
200 for (i
= 0; i
< kvm
->nmemslots
; ++i
)
201 kvm_free_physmem_slot(&kvm
->memslots
[i
], NULL
);
204 static void kvm_destroy_vm(struct kvm
*kvm
)
206 struct mm_struct
*mm
= kvm
->mm
;
208 spin_lock(&kvm_lock
);
209 list_del(&kvm
->vm_list
);
210 spin_unlock(&kvm_lock
);
211 kvm_io_bus_destroy(&kvm
->pio_bus
);
212 kvm_io_bus_destroy(&kvm
->mmio_bus
);
213 kvm_arch_destroy_vm(kvm
);
217 static int kvm_vm_release(struct inode
*inode
, struct file
*filp
)
219 struct kvm
*kvm
= filp
->private_data
;
226 * Allocate some memory and give it an address in the guest physical address
229 * Discontiguous memory is allowed, mostly for framebuffers.
231 * Must be called holding mmap_sem for write.
233 int __kvm_set_memory_region(struct kvm
*kvm
,
234 struct kvm_userspace_memory_region
*mem
,
239 unsigned long npages
;
241 struct kvm_memory_slot
*memslot
;
242 struct kvm_memory_slot old
, new;
245 /* General sanity checks */
246 if (mem
->memory_size
& (PAGE_SIZE
- 1))
248 if (mem
->guest_phys_addr
& (PAGE_SIZE
- 1))
250 if (mem
->slot
>= KVM_MEMORY_SLOTS
+ KVM_PRIVATE_MEM_SLOTS
)
252 if (mem
->guest_phys_addr
+ mem
->memory_size
< mem
->guest_phys_addr
)
255 memslot
= &kvm
->memslots
[mem
->slot
];
256 base_gfn
= mem
->guest_phys_addr
>> PAGE_SHIFT
;
257 npages
= mem
->memory_size
>> PAGE_SHIFT
;
260 mem
->flags
&= ~KVM_MEM_LOG_DIRTY_PAGES
;
262 new = old
= *memslot
;
264 new.base_gfn
= base_gfn
;
266 new.flags
= mem
->flags
;
268 /* Disallow changing a memory slot's size. */
270 if (npages
&& old
.npages
&& npages
!= old
.npages
)
273 /* Check for overlaps */
275 for (i
= 0; i
< KVM_MEMORY_SLOTS
; ++i
) {
276 struct kvm_memory_slot
*s
= &kvm
->memslots
[i
];
280 if (!((base_gfn
+ npages
<= s
->base_gfn
) ||
281 (base_gfn
>= s
->base_gfn
+ s
->npages
)))
285 /* Free page dirty bitmap if unneeded */
286 if (!(new.flags
& KVM_MEM_LOG_DIRTY_PAGES
))
287 new.dirty_bitmap
= NULL
;
291 /* Allocate if a slot is being created */
292 if (npages
&& !new.rmap
) {
293 new.rmap
= vmalloc(npages
* sizeof(struct page
*));
298 memset(new.rmap
, 0, npages
* sizeof(*new.rmap
));
300 new.user_alloc
= user_alloc
;
301 new.userspace_addr
= mem
->userspace_addr
;
304 /* Allocate page dirty bitmap if needed */
305 if ((new.flags
& KVM_MEM_LOG_DIRTY_PAGES
) && !new.dirty_bitmap
) {
306 unsigned dirty_bytes
= ALIGN(npages
, BITS_PER_LONG
) / 8;
308 new.dirty_bitmap
= vmalloc(dirty_bytes
);
309 if (!new.dirty_bitmap
)
311 memset(new.dirty_bitmap
, 0, dirty_bytes
);
314 if (mem
->slot
>= kvm
->nmemslots
)
315 kvm
->nmemslots
= mem
->slot
+ 1;
319 r
= kvm_arch_set_memory_region(kvm
, mem
, old
, user_alloc
);
325 kvm_free_physmem_slot(&old
, &new);
329 kvm_free_physmem_slot(&new, &old
);
334 EXPORT_SYMBOL_GPL(__kvm_set_memory_region
);
336 int kvm_set_memory_region(struct kvm
*kvm
,
337 struct kvm_userspace_memory_region
*mem
,
342 down_write(¤t
->mm
->mmap_sem
);
343 r
= __kvm_set_memory_region(kvm
, mem
, user_alloc
);
344 up_write(¤t
->mm
->mmap_sem
);
347 EXPORT_SYMBOL_GPL(kvm_set_memory_region
);
349 int kvm_vm_ioctl_set_memory_region(struct kvm
*kvm
,
351 kvm_userspace_memory_region
*mem
,
354 if (mem
->slot
>= KVM_MEMORY_SLOTS
)
356 return kvm_set_memory_region(kvm
, mem
, user_alloc
);
359 int kvm_get_dirty_log(struct kvm
*kvm
,
360 struct kvm_dirty_log
*log
, int *is_dirty
)
362 struct kvm_memory_slot
*memslot
;
365 unsigned long any
= 0;
368 if (log
->slot
>= KVM_MEMORY_SLOTS
)
371 memslot
= &kvm
->memslots
[log
->slot
];
373 if (!memslot
->dirty_bitmap
)
376 n
= ALIGN(memslot
->npages
, BITS_PER_LONG
) / 8;
378 for (i
= 0; !any
&& i
< n
/sizeof(long); ++i
)
379 any
= memslot
->dirty_bitmap
[i
];
382 if (copy_to_user(log
->dirty_bitmap
, memslot
->dirty_bitmap
, n
))
393 int is_error_page(struct page
*page
)
395 return page
== bad_page
;
397 EXPORT_SYMBOL_GPL(is_error_page
);
399 static inline unsigned long bad_hva(void)
404 int kvm_is_error_hva(unsigned long addr
)
406 return addr
== bad_hva();
408 EXPORT_SYMBOL_GPL(kvm_is_error_hva
);
410 static struct kvm_memory_slot
*__gfn_to_memslot(struct kvm
*kvm
, gfn_t gfn
)
414 for (i
= 0; i
< kvm
->nmemslots
; ++i
) {
415 struct kvm_memory_slot
*memslot
= &kvm
->memslots
[i
];
417 if (gfn
>= memslot
->base_gfn
418 && gfn
< memslot
->base_gfn
+ memslot
->npages
)
424 struct kvm_memory_slot
*gfn_to_memslot(struct kvm
*kvm
, gfn_t gfn
)
426 gfn
= unalias_gfn(kvm
, gfn
);
427 return __gfn_to_memslot(kvm
, gfn
);
430 int kvm_is_visible_gfn(struct kvm
*kvm
, gfn_t gfn
)
434 gfn
= unalias_gfn(kvm
, gfn
);
435 for (i
= 0; i
< KVM_MEMORY_SLOTS
; ++i
) {
436 struct kvm_memory_slot
*memslot
= &kvm
->memslots
[i
];
438 if (gfn
>= memslot
->base_gfn
439 && gfn
< memslot
->base_gfn
+ memslot
->npages
)
444 EXPORT_SYMBOL_GPL(kvm_is_visible_gfn
);
446 static unsigned long gfn_to_hva(struct kvm
*kvm
, gfn_t gfn
)
448 struct kvm_memory_slot
*slot
;
450 gfn
= unalias_gfn(kvm
, gfn
);
451 slot
= __gfn_to_memslot(kvm
, gfn
);
454 return (slot
->userspace_addr
+ (gfn
- slot
->base_gfn
) * PAGE_SIZE
);
458 * Requires current->mm->mmap_sem to be held
460 struct page
*gfn_to_page(struct kvm
*kvm
, gfn_t gfn
)
462 struct page
*page
[1];
468 addr
= gfn_to_hva(kvm
, gfn
);
469 if (kvm_is_error_hva(addr
)) {
474 npages
= get_user_pages(current
, current
->mm
, addr
, 1, 1, 1, page
,
485 EXPORT_SYMBOL_GPL(gfn_to_page
);
487 void kvm_release_page_clean(struct page
*page
)
491 EXPORT_SYMBOL_GPL(kvm_release_page_clean
);
493 void kvm_release_page_dirty(struct page
*page
)
495 if (!PageReserved(page
))
499 EXPORT_SYMBOL_GPL(kvm_release_page_dirty
);
501 static int next_segment(unsigned long len
, int offset
)
503 if (len
> PAGE_SIZE
- offset
)
504 return PAGE_SIZE
- offset
;
509 int kvm_read_guest_page(struct kvm
*kvm
, gfn_t gfn
, void *data
, int offset
,
515 addr
= gfn_to_hva(kvm
, gfn
);
516 if (kvm_is_error_hva(addr
))
518 r
= copy_from_user(data
, (void __user
*)addr
+ offset
, len
);
523 EXPORT_SYMBOL_GPL(kvm_read_guest_page
);
525 int kvm_read_guest(struct kvm
*kvm
, gpa_t gpa
, void *data
, unsigned long len
)
527 gfn_t gfn
= gpa
>> PAGE_SHIFT
;
529 int offset
= offset_in_page(gpa
);
532 while ((seg
= next_segment(len
, offset
)) != 0) {
533 ret
= kvm_read_guest_page(kvm
, gfn
, data
, offset
, seg
);
543 EXPORT_SYMBOL_GPL(kvm_read_guest
);
545 int kvm_read_guest_atomic(struct kvm
*kvm
, gpa_t gpa
, void *data
,
550 gfn_t gfn
= gpa
>> PAGE_SHIFT
;
551 int offset
= offset_in_page(gpa
);
553 addr
= gfn_to_hva(kvm
, gfn
);
554 if (kvm_is_error_hva(addr
))
556 r
= __copy_from_user_inatomic(data
, (void __user
*)addr
+ offset
, len
);
561 EXPORT_SYMBOL(kvm_read_guest_atomic
);
563 int kvm_write_guest_page(struct kvm
*kvm
, gfn_t gfn
, const void *data
,
569 addr
= gfn_to_hva(kvm
, gfn
);
570 if (kvm_is_error_hva(addr
))
572 r
= copy_to_user((void __user
*)addr
+ offset
, data
, len
);
575 mark_page_dirty(kvm
, gfn
);
578 EXPORT_SYMBOL_GPL(kvm_write_guest_page
);
580 int kvm_write_guest(struct kvm
*kvm
, gpa_t gpa
, const void *data
,
583 gfn_t gfn
= gpa
>> PAGE_SHIFT
;
585 int offset
= offset_in_page(gpa
);
588 while ((seg
= next_segment(len
, offset
)) != 0) {
589 ret
= kvm_write_guest_page(kvm
, gfn
, data
, offset
, seg
);
600 int kvm_clear_guest_page(struct kvm
*kvm
, gfn_t gfn
, int offset
, int len
)
602 return kvm_write_guest_page(kvm
, gfn
, empty_zero_page
, offset
, len
);
604 EXPORT_SYMBOL_GPL(kvm_clear_guest_page
);
606 int kvm_clear_guest(struct kvm
*kvm
, gpa_t gpa
, unsigned long len
)
608 gfn_t gfn
= gpa
>> PAGE_SHIFT
;
610 int offset
= offset_in_page(gpa
);
613 while ((seg
= next_segment(len
, offset
)) != 0) {
614 ret
= kvm_clear_guest_page(kvm
, gfn
, offset
, seg
);
623 EXPORT_SYMBOL_GPL(kvm_clear_guest
);
625 void mark_page_dirty(struct kvm
*kvm
, gfn_t gfn
)
627 struct kvm_memory_slot
*memslot
;
629 gfn
= unalias_gfn(kvm
, gfn
);
630 memslot
= __gfn_to_memslot(kvm
, gfn
);
631 if (memslot
&& memslot
->dirty_bitmap
) {
632 unsigned long rel_gfn
= gfn
- memslot
->base_gfn
;
635 if (!test_bit(rel_gfn
, memslot
->dirty_bitmap
))
636 set_bit(rel_gfn
, memslot
->dirty_bitmap
);
641 * The vCPU has executed a HLT instruction with in-kernel mode enabled.
643 void kvm_vcpu_block(struct kvm_vcpu
*vcpu
)
645 DECLARE_WAITQUEUE(wait
, current
);
647 add_wait_queue(&vcpu
->wq
, &wait
);
650 * We will block until either an interrupt or a signal wakes us up
652 while (!kvm_cpu_has_interrupt(vcpu
)
653 && !signal_pending(current
)
654 && !kvm_arch_vcpu_runnable(vcpu
)) {
655 set_current_state(TASK_INTERRUPTIBLE
);
661 __set_current_state(TASK_RUNNING
);
662 remove_wait_queue(&vcpu
->wq
, &wait
);
665 void kvm_resched(struct kvm_vcpu
*vcpu
)
671 EXPORT_SYMBOL_GPL(kvm_resched
);
673 static int kvm_vcpu_fault(struct vm_area_struct
*vma
, struct vm_fault
*vmf
)
675 struct kvm_vcpu
*vcpu
= vma
->vm_file
->private_data
;
679 page
= virt_to_page(vcpu
->run
);
680 else if (vmf
->pgoff
== KVM_PIO_PAGE_OFFSET
)
681 page
= virt_to_page(vcpu
->arch
.pio_data
);
683 return VM_FAULT_SIGBUS
;
689 static struct vm_operations_struct kvm_vcpu_vm_ops
= {
690 .fault
= kvm_vcpu_fault
,
693 static int kvm_vcpu_mmap(struct file
*file
, struct vm_area_struct
*vma
)
695 vma
->vm_ops
= &kvm_vcpu_vm_ops
;
699 static int kvm_vcpu_release(struct inode
*inode
, struct file
*filp
)
701 struct kvm_vcpu
*vcpu
= filp
->private_data
;
703 fput(vcpu
->kvm
->filp
);
707 static struct file_operations kvm_vcpu_fops
= {
708 .release
= kvm_vcpu_release
,
709 .unlocked_ioctl
= kvm_vcpu_ioctl
,
710 .compat_ioctl
= kvm_vcpu_ioctl
,
711 .mmap
= kvm_vcpu_mmap
,
715 * Allocates an inode for the vcpu.
717 static int create_vcpu_fd(struct kvm_vcpu
*vcpu
)
723 r
= anon_inode_getfd(&fd
, &inode
, &file
,
724 "kvm-vcpu", &kvm_vcpu_fops
, vcpu
);
727 atomic_inc(&vcpu
->kvm
->filp
->f_count
);
732 * Creates some virtual cpus. Good luck creating more than one.
734 static int kvm_vm_ioctl_create_vcpu(struct kvm
*kvm
, int n
)
737 struct kvm_vcpu
*vcpu
;
742 vcpu
= kvm_arch_vcpu_create(kvm
, n
);
744 return PTR_ERR(vcpu
);
746 preempt_notifier_init(&vcpu
->preempt_notifier
, &kvm_preempt_ops
);
748 r
= kvm_arch_vcpu_setup(vcpu
);
752 mutex_lock(&kvm
->lock
);
755 mutex_unlock(&kvm
->lock
);
758 kvm
->vcpus
[n
] = vcpu
;
759 mutex_unlock(&kvm
->lock
);
761 /* Now it's all set up, let userspace reach it */
762 r
= create_vcpu_fd(vcpu
);
768 mutex_lock(&kvm
->lock
);
769 kvm
->vcpus
[n
] = NULL
;
770 mutex_unlock(&kvm
->lock
);
772 kvm_arch_vcpu_destroy(vcpu
);
776 static int kvm_vcpu_ioctl_set_sigmask(struct kvm_vcpu
*vcpu
, sigset_t
*sigset
)
779 sigdelsetmask(sigset
, sigmask(SIGKILL
)|sigmask(SIGSTOP
));
780 vcpu
->sigset_active
= 1;
781 vcpu
->sigset
= *sigset
;
783 vcpu
->sigset_active
= 0;
787 static long kvm_vcpu_ioctl(struct file
*filp
,
788 unsigned int ioctl
, unsigned long arg
)
790 struct kvm_vcpu
*vcpu
= filp
->private_data
;
791 void __user
*argp
= (void __user
*)arg
;
794 if (vcpu
->kvm
->mm
!= current
->mm
)
801 r
= kvm_arch_vcpu_ioctl_run(vcpu
, vcpu
->run
);
804 struct kvm_regs kvm_regs
;
806 memset(&kvm_regs
, 0, sizeof kvm_regs
);
807 r
= kvm_arch_vcpu_ioctl_get_regs(vcpu
, &kvm_regs
);
811 if (copy_to_user(argp
, &kvm_regs
, sizeof kvm_regs
))
817 struct kvm_regs kvm_regs
;
820 if (copy_from_user(&kvm_regs
, argp
, sizeof kvm_regs
))
822 r
= kvm_arch_vcpu_ioctl_set_regs(vcpu
, &kvm_regs
);
828 case KVM_GET_SREGS
: {
829 struct kvm_sregs kvm_sregs
;
831 memset(&kvm_sregs
, 0, sizeof kvm_sregs
);
832 r
= kvm_arch_vcpu_ioctl_get_sregs(vcpu
, &kvm_sregs
);
836 if (copy_to_user(argp
, &kvm_sregs
, sizeof kvm_sregs
))
841 case KVM_SET_SREGS
: {
842 struct kvm_sregs kvm_sregs
;
845 if (copy_from_user(&kvm_sregs
, argp
, sizeof kvm_sregs
))
847 r
= kvm_arch_vcpu_ioctl_set_sregs(vcpu
, &kvm_sregs
);
853 case KVM_TRANSLATE
: {
854 struct kvm_translation tr
;
857 if (copy_from_user(&tr
, argp
, sizeof tr
))
859 r
= kvm_arch_vcpu_ioctl_translate(vcpu
, &tr
);
863 if (copy_to_user(argp
, &tr
, sizeof tr
))
868 case KVM_DEBUG_GUEST
: {
869 struct kvm_debug_guest dbg
;
872 if (copy_from_user(&dbg
, argp
, sizeof dbg
))
874 r
= kvm_arch_vcpu_ioctl_debug_guest(vcpu
, &dbg
);
880 case KVM_SET_SIGNAL_MASK
: {
881 struct kvm_signal_mask __user
*sigmask_arg
= argp
;
882 struct kvm_signal_mask kvm_sigmask
;
888 if (copy_from_user(&kvm_sigmask
, argp
,
892 if (kvm_sigmask
.len
!= sizeof sigset
)
895 if (copy_from_user(&sigset
, sigmask_arg
->sigset
,
900 r
= kvm_vcpu_ioctl_set_sigmask(vcpu
, &sigset
);
906 memset(&fpu
, 0, sizeof fpu
);
907 r
= kvm_arch_vcpu_ioctl_get_fpu(vcpu
, &fpu
);
911 if (copy_to_user(argp
, &fpu
, sizeof fpu
))
920 if (copy_from_user(&fpu
, argp
, sizeof fpu
))
922 r
= kvm_arch_vcpu_ioctl_set_fpu(vcpu
, &fpu
);
929 r
= kvm_arch_vcpu_ioctl(filp
, ioctl
, arg
);
935 static long kvm_vm_ioctl(struct file
*filp
,
936 unsigned int ioctl
, unsigned long arg
)
938 struct kvm
*kvm
= filp
->private_data
;
939 void __user
*argp
= (void __user
*)arg
;
942 if (kvm
->mm
!= current
->mm
)
945 case KVM_CREATE_VCPU
:
946 r
= kvm_vm_ioctl_create_vcpu(kvm
, arg
);
950 case KVM_SET_USER_MEMORY_REGION
: {
951 struct kvm_userspace_memory_region kvm_userspace_mem
;
954 if (copy_from_user(&kvm_userspace_mem
, argp
,
955 sizeof kvm_userspace_mem
))
958 r
= kvm_vm_ioctl_set_memory_region(kvm
, &kvm_userspace_mem
, 1);
963 case KVM_GET_DIRTY_LOG
: {
964 struct kvm_dirty_log log
;
967 if (copy_from_user(&log
, argp
, sizeof log
))
969 r
= kvm_vm_ioctl_get_dirty_log(kvm
, &log
);
975 r
= kvm_arch_vm_ioctl(filp
, ioctl
, arg
);
981 static int kvm_vm_fault(struct vm_area_struct
*vma
, struct vm_fault
*vmf
)
983 struct kvm
*kvm
= vma
->vm_file
->private_data
;
986 if (!kvm_is_visible_gfn(kvm
, vmf
->pgoff
))
987 return VM_FAULT_SIGBUS
;
988 page
= gfn_to_page(kvm
, vmf
->pgoff
);
989 if (is_error_page(page
)) {
990 kvm_release_page_clean(page
);
991 return VM_FAULT_SIGBUS
;
997 static struct vm_operations_struct kvm_vm_vm_ops
= {
998 .fault
= kvm_vm_fault
,
1001 static int kvm_vm_mmap(struct file
*file
, struct vm_area_struct
*vma
)
1003 vma
->vm_ops
= &kvm_vm_vm_ops
;
1007 static struct file_operations kvm_vm_fops
= {
1008 .release
= kvm_vm_release
,
1009 .unlocked_ioctl
= kvm_vm_ioctl
,
1010 .compat_ioctl
= kvm_vm_ioctl
,
1011 .mmap
= kvm_vm_mmap
,
1014 static int kvm_dev_ioctl_create_vm(void)
1017 struct inode
*inode
;
1021 kvm
= kvm_create_vm();
1023 return PTR_ERR(kvm
);
1024 r
= anon_inode_getfd(&fd
, &inode
, &file
, "kvm-vm", &kvm_vm_fops
, kvm
);
1026 kvm_destroy_vm(kvm
);
1035 static long kvm_dev_ioctl(struct file
*filp
,
1036 unsigned int ioctl
, unsigned long arg
)
1038 void __user
*argp
= (void __user
*)arg
;
1042 case KVM_GET_API_VERSION
:
1046 r
= KVM_API_VERSION
;
1052 r
= kvm_dev_ioctl_create_vm();
1054 case KVM_CHECK_EXTENSION
:
1055 r
= kvm_dev_ioctl_check_extension((long)argp
);
1057 case KVM_GET_VCPU_MMAP_SIZE
:
1064 return kvm_arch_dev_ioctl(filp
, ioctl
, arg
);
1070 static struct file_operations kvm_chardev_ops
= {
1071 .unlocked_ioctl
= kvm_dev_ioctl
,
1072 .compat_ioctl
= kvm_dev_ioctl
,
1075 static struct miscdevice kvm_dev
= {
1081 static void hardware_enable(void *junk
)
1083 int cpu
= raw_smp_processor_id();
1085 if (cpu_isset(cpu
, cpus_hardware_enabled
))
1087 cpu_set(cpu
, cpus_hardware_enabled
);
1088 kvm_arch_hardware_enable(NULL
);
1091 static void hardware_disable(void *junk
)
1093 int cpu
= raw_smp_processor_id();
1095 if (!cpu_isset(cpu
, cpus_hardware_enabled
))
1097 cpu_clear(cpu
, cpus_hardware_enabled
);
1098 decache_vcpus_on_cpu(cpu
);
1099 kvm_arch_hardware_disable(NULL
);
1102 static int kvm_cpu_hotplug(struct notifier_block
*notifier
, unsigned long val
,
1107 val
&= ~CPU_TASKS_FROZEN
;
1110 printk(KERN_INFO
"kvm: disabling virtualization on CPU%d\n",
1112 hardware_disable(NULL
);
1114 case CPU_UP_CANCELED
:
1115 printk(KERN_INFO
"kvm: disabling virtualization on CPU%d\n",
1117 smp_call_function_single(cpu
, hardware_disable
, NULL
, 0, 1);
1120 printk(KERN_INFO
"kvm: enabling virtualization on CPU%d\n",
1122 smp_call_function_single(cpu
, hardware_enable
, NULL
, 0, 1);
1128 static int kvm_reboot(struct notifier_block
*notifier
, unsigned long val
,
1131 if (val
== SYS_RESTART
) {
1133 * Some (well, at least mine) BIOSes hang on reboot if
1136 printk(KERN_INFO
"kvm: exiting hardware virtualization\n");
1137 on_each_cpu(hardware_disable
, NULL
, 0, 1);
1142 static struct notifier_block kvm_reboot_notifier
= {
1143 .notifier_call
= kvm_reboot
,
1147 void kvm_io_bus_init(struct kvm_io_bus
*bus
)
1149 memset(bus
, 0, sizeof(*bus
));
1152 void kvm_io_bus_destroy(struct kvm_io_bus
*bus
)
1156 for (i
= 0; i
< bus
->dev_count
; i
++) {
1157 struct kvm_io_device
*pos
= bus
->devs
[i
];
1159 kvm_iodevice_destructor(pos
);
1163 struct kvm_io_device
*kvm_io_bus_find_dev(struct kvm_io_bus
*bus
, gpa_t addr
)
1167 for (i
= 0; i
< bus
->dev_count
; i
++) {
1168 struct kvm_io_device
*pos
= bus
->devs
[i
];
1170 if (pos
->in_range(pos
, addr
))
1177 void kvm_io_bus_register_dev(struct kvm_io_bus
*bus
, struct kvm_io_device
*dev
)
1179 BUG_ON(bus
->dev_count
> (NR_IOBUS_DEVS
-1));
1181 bus
->devs
[bus
->dev_count
++] = dev
;
1184 static struct notifier_block kvm_cpu_notifier
= {
1185 .notifier_call
= kvm_cpu_hotplug
,
1186 .priority
= 20, /* must be > scheduler priority */
1189 static u64
vm_stat_get(void *_offset
)
1191 unsigned offset
= (long)_offset
;
1195 spin_lock(&kvm_lock
);
1196 list_for_each_entry(kvm
, &vm_list
, vm_list
)
1197 total
+= *(u32
*)((void *)kvm
+ offset
);
1198 spin_unlock(&kvm_lock
);
1202 DEFINE_SIMPLE_ATTRIBUTE(vm_stat_fops
, vm_stat_get
, NULL
, "%llu\n");
1204 static u64
vcpu_stat_get(void *_offset
)
1206 unsigned offset
= (long)_offset
;
1209 struct kvm_vcpu
*vcpu
;
1212 spin_lock(&kvm_lock
);
1213 list_for_each_entry(kvm
, &vm_list
, vm_list
)
1214 for (i
= 0; i
< KVM_MAX_VCPUS
; ++i
) {
1215 vcpu
= kvm
->vcpus
[i
];
1217 total
+= *(u32
*)((void *)vcpu
+ offset
);
1219 spin_unlock(&kvm_lock
);
1223 DEFINE_SIMPLE_ATTRIBUTE(vcpu_stat_fops
, vcpu_stat_get
, NULL
, "%llu\n");
1225 static struct file_operations
*stat_fops
[] = {
1226 [KVM_STAT_VCPU
] = &vcpu_stat_fops
,
1227 [KVM_STAT_VM
] = &vm_stat_fops
,
1230 static void kvm_init_debug(void)
1232 struct kvm_stats_debugfs_item
*p
;
1234 debugfs_dir
= debugfs_create_dir("kvm", NULL
);
1235 for (p
= debugfs_entries
; p
->name
; ++p
)
1236 p
->dentry
= debugfs_create_file(p
->name
, 0444, debugfs_dir
,
1237 (void *)(long)p
->offset
,
1238 stat_fops
[p
->kind
]);
1241 static void kvm_exit_debug(void)
1243 struct kvm_stats_debugfs_item
*p
;
1245 for (p
= debugfs_entries
; p
->name
; ++p
)
1246 debugfs_remove(p
->dentry
);
1247 debugfs_remove(debugfs_dir
);
1250 static int kvm_suspend(struct sys_device
*dev
, pm_message_t state
)
1252 hardware_disable(NULL
);
1256 static int kvm_resume(struct sys_device
*dev
)
1258 hardware_enable(NULL
);
1262 static struct sysdev_class kvm_sysdev_class
= {
1264 .suspend
= kvm_suspend
,
1265 .resume
= kvm_resume
,
1268 static struct sys_device kvm_sysdev
= {
1270 .cls
= &kvm_sysdev_class
,
1273 struct page
*bad_page
;
1276 struct kvm_vcpu
*preempt_notifier_to_vcpu(struct preempt_notifier
*pn
)
1278 return container_of(pn
, struct kvm_vcpu
, preempt_notifier
);
1281 static void kvm_sched_in(struct preempt_notifier
*pn
, int cpu
)
1283 struct kvm_vcpu
*vcpu
= preempt_notifier_to_vcpu(pn
);
1285 kvm_arch_vcpu_load(vcpu
, cpu
);
1288 static void kvm_sched_out(struct preempt_notifier
*pn
,
1289 struct task_struct
*next
)
1291 struct kvm_vcpu
*vcpu
= preempt_notifier_to_vcpu(pn
);
1293 kvm_arch_vcpu_put(vcpu
);
1296 int kvm_init(void *opaque
, unsigned int vcpu_size
,
1297 struct module
*module
)
1304 r
= kvm_arch_init(opaque
);
1308 bad_page
= alloc_page(GFP_KERNEL
| __GFP_ZERO
);
1310 if (bad_page
== NULL
) {
1315 r
= kvm_arch_hardware_setup();
1319 for_each_online_cpu(cpu
) {
1320 smp_call_function_single(cpu
,
1321 kvm_arch_check_processor_compat
,
1327 on_each_cpu(hardware_enable
, NULL
, 0, 1);
1328 r
= register_cpu_notifier(&kvm_cpu_notifier
);
1331 register_reboot_notifier(&kvm_reboot_notifier
);
1333 r
= sysdev_class_register(&kvm_sysdev_class
);
1337 r
= sysdev_register(&kvm_sysdev
);
1341 /* A kmem cache lets us meet the alignment requirements of fx_save. */
1342 kvm_vcpu_cache
= kmem_cache_create("kvm_vcpu", vcpu_size
,
1343 __alignof__(struct kvm_vcpu
),
1345 if (!kvm_vcpu_cache
) {
1350 kvm_chardev_ops
.owner
= module
;
1352 r
= misc_register(&kvm_dev
);
1354 printk(KERN_ERR
"kvm: misc device register failed\n");
1358 kvm_preempt_ops
.sched_in
= kvm_sched_in
;
1359 kvm_preempt_ops
.sched_out
= kvm_sched_out
;
1364 kmem_cache_destroy(kvm_vcpu_cache
);
1366 sysdev_unregister(&kvm_sysdev
);
1368 sysdev_class_unregister(&kvm_sysdev_class
);
1370 unregister_reboot_notifier(&kvm_reboot_notifier
);
1371 unregister_cpu_notifier(&kvm_cpu_notifier
);
1373 on_each_cpu(hardware_disable
, NULL
, 0, 1);
1375 kvm_arch_hardware_unsetup();
1377 __free_page(bad_page
);
1384 EXPORT_SYMBOL_GPL(kvm_init
);
1388 misc_deregister(&kvm_dev
);
1389 kmem_cache_destroy(kvm_vcpu_cache
);
1390 sysdev_unregister(&kvm_sysdev
);
1391 sysdev_class_unregister(&kvm_sysdev_class
);
1392 unregister_reboot_notifier(&kvm_reboot_notifier
);
1393 unregister_cpu_notifier(&kvm_cpu_notifier
);
1394 on_each_cpu(hardware_disable
, NULL
, 0, 1);
1395 kvm_arch_hardware_unsetup();
1398 __free_page(bad_page
);
1400 EXPORT_SYMBOL_GPL(kvm_exit
);