2 * Kernel-based Virtual Machine control library
4 * This library provides an API to control the kvm hardware virtualization
7 * Copyright (C) 2006 Qumranet
11 * Avi Kivity <avi@qumranet.com>
12 * Yaniv Kamay <yaniv@qumranet.com>
14 * This work is licensed under the GNU LGPL license, version 2.
18 #define __user /* temporary, until installed via make headers_install */
21 #include <linux/kvm.h>
23 #define EXPECTED_KVM_API_VERSION 12
25 #if EXPECTED_KVM_API_VERSION != KVM_API_VERSION
26 #error libkvm: userspace and kernel version mismatch
36 #include <sys/ioctl.h>
38 #include "kvm-abi-10.h"
40 #if defined(__x86_64__) || defined(__i386__)
44 int kvm_abi
= EXPECTED_KVM_API_VERSION
;
46 int free_slots
[KVM_MAX_NUM_MEM_REGIONS
];
47 unsigned long phys_addr_slots
[KVM_MAX_NUM_MEM_REGIONS
];
53 for (i
= 0; i
< KVM_MAX_NUM_MEM_REGIONS
; ++i
)
57 int get_free_slot(kvm_context_t kvm
)
62 #ifdef KVM_CAP_SET_TSS_ADDR
63 tss_ext
= ioctl(kvm
->fd
, KVM_CHECK_EXTENSION
, KVM_CAP_SET_TSS_ADDR
);
69 * on older kernels where the set tss ioctl is not supprted we must save
70 * slot 0 to hold the extended memory, as the vmx will use the last 3
78 for (; i
< KVM_MAX_NUM_MEM_REGIONS
; ++i
)
84 void register_slot(int slot
, unsigned long phys_addr
)
87 phys_addr_slots
[slot
] = phys_addr
;
90 int get_slot(unsigned long phys_addr
)
94 for (i
= 0; i
< KVM_MAX_NUM_MEM_REGIONS
; ++i
)
95 if (free_slots
[i
] && phys_addr_slots
[i
] == phys_addr
)
101 * memory regions parameters
103 void kvm_memory_region_save_params(kvm_context_t kvm
,
104 struct kvm_memory_region
*mem
)
106 if (!mem
|| (mem
->slot
>= KVM_MAX_NUM_MEM_REGIONS
)) {
107 fprintf(stderr
, "BUG: %s: invalid parameters\n", __FUNCTION__
);
110 kvm
->mem_regions
[mem
->slot
] = *mem
;
113 #ifdef KVM_CAP_USER_MEMORY
115 void kvm_userspace_memory_region_save_params(kvm_context_t kvm
,
116 struct kvm_userspace_memory_region
*mem
)
118 struct kvm_memory_region kvm_mem
;
120 kvm_mem
.slot
= mem
->slot
;
121 kvm_mem
.memory_size
= mem
->memory_size
;
122 kvm_mem
.guest_phys_addr
= mem
->guest_phys_addr
;
124 kvm_memory_region_save_params(kvm
, &kvm_mem
);
129 void kvm_memory_region_clear_params(kvm_context_t kvm
, int regnum
)
131 if (regnum
>= KVM_MAX_NUM_MEM_REGIONS
) {
132 fprintf(stderr
, "BUG: %s: invalid parameters\n", __FUNCTION__
);
135 kvm
->mem_regions
[regnum
].memory_size
= 0;
139 * dirty pages logging control
141 static int kvm_dirty_pages_log_change(kvm_context_t kvm
, int regnum
, __u32 flag
)
144 struct kvm_memory_region
*mem
;
146 if (regnum
>= KVM_MAX_NUM_MEM_REGIONS
) {
147 fprintf(stderr
, "BUG: %s: invalid parameters\n", __FUNCTION__
);
150 mem
= &kvm
->mem_regions
[regnum
];
151 if (mem
->memory_size
== 0) /* not used */
153 if (mem
->flags
& KVM_MEM_LOG_DIRTY_PAGES
) /* log already enabled */
155 mem
->flags
|= flag
; /* temporary turn on flag */
156 r
= ioctl(kvm
->vm_fd
, KVM_SET_MEMORY_REGION
, mem
);
157 mem
->flags
&= ~flag
; /* back to previous value */
159 fprintf(stderr
, "%s: %m\n", __FUNCTION__
);
164 static int kvm_dirty_pages_log_change_all(kvm_context_t kvm
, __u32 flag
)
168 for (i
=r
=0; i
<KVM_MAX_NUM_MEM_REGIONS
&& r
==0; i
++) {
169 r
= kvm_dirty_pages_log_change(kvm
, i
, flag
);
175 * Enable dirty page logging for all memory regions
177 int kvm_dirty_pages_log_enable_all(kvm_context_t kvm
)
179 if (kvm
->dirty_pages_log_all
)
181 kvm
->dirty_pages_log_all
= 1;
182 return kvm_dirty_pages_log_change_all(kvm
, KVM_MEM_LOG_DIRTY_PAGES
);
186 * Enable dirty page logging only for memory regions that were created with
187 * dirty logging enabled (disable for all other memory regions).
189 int kvm_dirty_pages_log_reset(kvm_context_t kvm
)
191 if (!kvm
->dirty_pages_log_all
)
193 kvm
->dirty_pages_log_all
= 0;
194 return kvm_dirty_pages_log_change_all(kvm
, 0);
198 kvm_context_t
kvm_init(struct kvm_callbacks
*callbacks
,
205 fd
= open("/dev/kvm", O_RDWR
);
207 perror("open /dev/kvm");
210 r
= ioctl(fd
, KVM_GET_API_VERSION
, 0);
212 fprintf(stderr
, "kvm kernel version too old: "
213 "KVM_GET_API_VERSION ioctl not supported\n");
216 if (r
< EXPECTED_KVM_API_VERSION
&& r
!= 10) {
217 fprintf(stderr
, "kvm kernel version too old: "
218 "We expect API version %d or newer, but got "
220 EXPECTED_KVM_API_VERSION
, r
);
223 if (r
> EXPECTED_KVM_API_VERSION
) {
224 fprintf(stderr
, "kvm userspace version too old\n");
228 kvm
= malloc(sizeof(*kvm
));
231 kvm
->callbacks
= callbacks
;
232 kvm
->opaque
= opaque
;
233 kvm
->dirty_pages_log_all
= 0;
234 kvm
->no_irqchip_creation
= 0;
235 memset(&kvm
->mem_regions
, 0, sizeof(kvm
->mem_regions
));
243 void kvm_finalize(kvm_context_t kvm
)
245 if (kvm
->vcpu_fd
[0] != -1)
246 close(kvm
->vcpu_fd
[0]);
247 if (kvm
->vm_fd
!= -1)
253 void kvm_disable_irqchip_creation(kvm_context_t kvm
)
255 kvm
->no_irqchip_creation
= 1;
258 int kvm_create_vcpu(kvm_context_t kvm
, int slot
)
263 r
= ioctl(kvm
->vm_fd
, KVM_CREATE_VCPU
, slot
);
266 fprintf(stderr
, "kvm_create_vcpu: %m\n");
269 kvm
->vcpu_fd
[slot
] = r
;
270 mmap_size
= ioctl(kvm
->fd
, KVM_GET_VCPU_MMAP_SIZE
, 0);
271 if (mmap_size
== -1) {
273 fprintf(stderr
, "get vcpu mmap size: %m\n");
276 kvm
->run
[slot
] = mmap(NULL
, mmap_size
, PROT_READ
|PROT_WRITE
, MAP_SHARED
,
277 kvm
->vcpu_fd
[slot
], 0);
278 if (kvm
->run
[slot
] == MAP_FAILED
) {
280 fprintf(stderr
, "mmap vcpu area: %m\n");
286 int kvm_set_shadow_pages(kvm_context_t kvm
, unsigned int nrshadow_pages
)
288 #ifdef KVM_CAP_MMU_SHADOW_CACHE_CONTROL
291 r
= ioctl(kvm
->fd
, KVM_CHECK_EXTENSION
,
292 KVM_CAP_MMU_SHADOW_CACHE_CONTROL
);
294 r
= ioctl(kvm
->vm_fd
, KVM_SET_NR_MMU_PAGES
, nrshadow_pages
);
296 fprintf(stderr
, "kvm_set_shadow_pages: %m\n");
305 int kvm_get_shadow_pages(kvm_context_t kvm
, unsigned int *nrshadow_pages
)
307 #ifdef KVM_CAP_MMU_SHADOW_CACHE_CONTROL
310 r
= ioctl(kvm
->fd
, KVM_CHECK_EXTENSION
,
311 KVM_CAP_MMU_SHADOW_CACHE_CONTROL
);
313 *nrshadow_pages
= ioctl(kvm
->vm_fd
, KVM_GET_NR_MMU_PAGES
);
321 int kvm_create_vm(kvm_context_t kvm
)
325 kvm
->vcpu_fd
[0] = -1;
327 fd
= ioctl(fd
, KVM_CREATE_VM
, 0);
329 fprintf(stderr
, "kvm_create_vm: %m\n");
336 static int kvm_create_default_phys_mem(kvm_context_t kvm
,
337 unsigned long phys_mem_bytes
,
340 unsigned long memory
= (phys_mem_bytes
+ PAGE_SIZE
- 1) & PAGE_MASK
;
343 #ifdef KVM_CAP_USER_MEMORY
344 r
= ioctl(kvm
->fd
, KVM_CHECK_EXTENSION
, KVM_CAP_USER_MEMORY
);
346 r
= kvm_alloc_userspace_memory(kvm
, memory
, vm_mem
);
349 r
= kvm_alloc_kernel_memory(kvm
, memory
, vm_mem
);
353 r
= kvm_arch_create_default_phys_mem(kvm
, phys_mem_bytes
, vm_mem
);
357 kvm
->physical_memory
= *vm_mem
;
361 void kvm_create_irqchip(kvm_context_t kvm
)
365 kvm
->irqchip_in_kernel
= 0;
366 #ifdef KVM_CAP_IRQCHIP
367 if (!kvm
->no_irqchip_creation
) {
368 r
= ioctl(kvm
->fd
, KVM_CHECK_EXTENSION
, KVM_CAP_IRQCHIP
);
369 if (r
> 0) { /* kernel irqchip supported */
370 r
= ioctl(kvm
->vm_fd
, KVM_CREATE_IRQCHIP
);
372 kvm
->irqchip_in_kernel
= 1;
374 printf("Create kernel PIC irqchip failed\n");
380 int kvm_create(kvm_context_t kvm
, unsigned long phys_mem_bytes
, void **vm_mem
)
384 r
= kvm_create_vm(kvm
);
387 r
= kvm_arch_create(kvm
, phys_mem_bytes
, vm_mem
);
391 r
= kvm_create_default_phys_mem(kvm
, phys_mem_bytes
, vm_mem
);
394 kvm_create_irqchip(kvm
);
395 r
= kvm_create_vcpu(kvm
, 0);
403 #ifdef KVM_CAP_USER_MEMORY
405 void *kvm_create_userspace_phys_mem(kvm_context_t kvm
, unsigned long phys_start
,
406 unsigned long len
, int log
, int writable
)
409 int prot
= PROT_READ
;
411 struct kvm_userspace_memory_region memory
= {
413 .guest_phys_addr
= phys_start
,
414 .flags
= log
? KVM_MEM_LOG_DIRTY_PAGES
: 0,
420 ptr
= mmap(NULL
, len
, prot
, MAP_ANONYMOUS
| MAP_SHARED
, -1, 0);
421 if (ptr
== MAP_FAILED
) {
422 fprintf(stderr
, "create_userspace_phys_mem: %s", strerror(errno
));
428 memory
.userspace_addr
= (unsigned long)ptr
;
429 memory
.slot
= get_free_slot(kvm
);
430 r
= ioctl(kvm
->vm_fd
, KVM_SET_USER_MEMORY_REGION
, &memory
);
432 fprintf(stderr
, "create_userspace_phys_mem: %s", strerror(errno
));
435 register_slot(memory
.slot
, memory
.guest_phys_addr
);
437 kvm_userspace_memory_region_save_params(kvm
, &memory
);
444 void *kvm_create_phys_mem(kvm_context_t kvm
, unsigned long phys_start
,
445 unsigned long len
, int log
, int writable
)
447 #ifdef KVM_CAP_USER_MEMORY
450 r
= ioctl(kvm
->fd
, KVM_CHECK_EXTENSION
, KVM_CAP_USER_MEMORY
);
452 return kvm_create_userspace_phys_mem(kvm
, phys_start
, len
,
456 return kvm_create_kernel_phys_mem(kvm
, phys_start
, len
,
460 int kvm_register_userspace_phys_mem(kvm_context_t kvm
,
461 unsigned long phys_start
, void *userspace_addr
,
462 unsigned long len
, int log
)
464 #ifdef KVM_CAP_USER_MEMORY
465 struct kvm_userspace_memory_region memory
= {
467 .guest_phys_addr
= phys_start
,
468 .userspace_addr
= (intptr_t)userspace_addr
,
469 .flags
= log
? KVM_MEM_LOG_DIRTY_PAGES
: 0,
473 if (!kvm
->physical_memory
)
474 kvm
->physical_memory
= userspace_addr
- phys_start
;
476 memory
.slot
= get_free_slot(kvm
);
477 r
= ioctl(kvm
->vm_fd
, KVM_SET_USER_MEMORY_REGION
, &memory
);
479 fprintf(stderr
, "create_userspace_phys_mem: %s\n", strerror(errno
));
482 register_slot(memory
.slot
, memory
.guest_phys_addr
);
484 kvm_userspace_memory_region_save_params(kvm
, &memory
);
492 /* destroy/free a whole slot.
493 * phys_start, len and slot are the params passed to kvm_create_phys_mem()
495 void kvm_destroy_phys_mem(kvm_context_t kvm
, unsigned long phys_start
,
499 struct kvm_memory_region
*mem
;
501 slot
= get_slot(phys_start
);
503 if (slot
>= KVM_MAX_NUM_MEM_REGIONS
) {
504 fprintf(stderr
, "BUG: %s: invalid parameters (slot=%d)\n",
508 mem
= &kvm
->mem_regions
[slot
];
509 if (phys_start
!= mem
->guest_phys_addr
) {
511 "WARNING: %s: phys_start is 0x%lx expecting 0x%llx\n",
512 __FUNCTION__
, phys_start
, mem
->guest_phys_addr
);
513 phys_start
= mem
->guest_phys_addr
;
515 kvm_create_phys_mem(kvm
, phys_start
, 0, 0, 0);
518 static int kvm_get_map(kvm_context_t kvm
, int ioctl_num
, int slot
, void *buf
)
521 struct kvm_dirty_log log
= {
525 log
.dirty_bitmap
= buf
;
527 r
= ioctl(kvm
->vm_fd
, ioctl_num
, &log
);
533 int kvm_get_dirty_pages(kvm_context_t kvm
, unsigned long phys_addr
, void *buf
)
537 slot
= get_slot(phys_addr
);
538 return kvm_get_map(kvm
, KVM_GET_DIRTY_LOG
, slot
, buf
);
541 int kvm_get_mem_map(kvm_context_t kvm
, unsigned long phys_addr
, void *buf
)
545 slot
= get_slot(phys_addr
);
546 #ifdef KVM_GET_MEM_MAP
547 return kvm_get_map(kvm
, KVM_GET_MEM_MAP
, slot
, buf
);
548 #else /* not KVM_GET_MEM_MAP ==> fake it: all pages exist */
549 unsigned long i
, n
, m
, npages
;
552 if (slot
>= KVM_MAX_NUM_MEM_REGIONS
) {
556 npages
= kvm
->mem_regions
[slot
].memory_size
/ PAGE_SIZE
;
559 memset(buf
, 0xff, n
); /* all pages exist */
561 for (i
=0; i
<=m
; i
++) /* last byte may not be "aligned" */
564 *(unsigned char*)(buf
+n
) = v
;
566 #endif /* KVM_GET_MEM_MAP */
569 #ifdef KVM_CAP_IRQCHIP
571 int kvm_set_irq_level(kvm_context_t kvm
, int irq
, int level
)
573 struct kvm_irq_level event
;
576 if (!kvm
->irqchip_in_kernel
)
580 r
= ioctl(kvm
->vm_fd
, KVM_IRQ_LINE
, &event
);
582 perror("kvm_set_irq_level");
586 int kvm_get_irqchip(kvm_context_t kvm
, struct kvm_irqchip
*chip
)
590 if (!kvm
->irqchip_in_kernel
)
592 r
= ioctl(kvm
->vm_fd
, KVM_GET_IRQCHIP
, chip
);
595 perror("kvm_get_irqchip\n");
600 int kvm_set_irqchip(kvm_context_t kvm
, struct kvm_irqchip
*chip
)
604 if (!kvm
->irqchip_in_kernel
)
606 r
= ioctl(kvm
->vm_fd
, KVM_SET_IRQCHIP
, chip
);
609 perror("kvm_set_irqchip\n");
616 static int handle_io(kvm_context_t kvm
, struct kvm_run
*run
, int vcpu
)
618 uint16_t addr
= run
->io
.port
;
621 void *p
= (void *)run
+ run
->io
.data_offset
;
623 for (i
= 0; i
< run
->io
.count
; ++i
) {
624 switch (run
->io
.direction
) {
626 switch (run
->io
.size
) {
628 r
= kvm
->callbacks
->inb(kvm
->opaque
, addr
, p
);
631 r
= kvm
->callbacks
->inw(kvm
->opaque
, addr
, p
);
634 r
= kvm
->callbacks
->inl(kvm
->opaque
, addr
, p
);
637 fprintf(stderr
, "bad I/O size %d\n", run
->io
.size
);
641 case KVM_EXIT_IO_OUT
:
642 switch (run
->io
.size
) {
644 r
= kvm
->callbacks
->outb(kvm
->opaque
, addr
,
648 r
= kvm
->callbacks
->outw(kvm
->opaque
, addr
,
652 r
= kvm
->callbacks
->outl(kvm
->opaque
, addr
,
656 fprintf(stderr
, "bad I/O size %d\n", run
->io
.size
);
661 fprintf(stderr
, "bad I/O direction %d\n", run
->io
.direction
);
671 int handle_debug(kvm_context_t kvm
, int vcpu
)
673 return kvm
->callbacks
->debug(kvm
->opaque
, vcpu
);
676 int kvm_get_regs(kvm_context_t kvm
, int vcpu
, struct kvm_regs
*regs
)
678 return ioctl(kvm
->vcpu_fd
[vcpu
], KVM_GET_REGS
, regs
);
681 int kvm_set_regs(kvm_context_t kvm
, int vcpu
, struct kvm_regs
*regs
)
683 return ioctl(kvm
->vcpu_fd
[vcpu
], KVM_SET_REGS
, regs
);
686 int kvm_get_fpu(kvm_context_t kvm
, int vcpu
, struct kvm_fpu
*fpu
)
688 return ioctl(kvm
->vcpu_fd
[vcpu
], KVM_GET_FPU
, fpu
);
691 int kvm_set_fpu(kvm_context_t kvm
, int vcpu
, struct kvm_fpu
*fpu
)
693 return ioctl(kvm
->vcpu_fd
[vcpu
], KVM_SET_FPU
, fpu
);
696 int kvm_get_sregs(kvm_context_t kvm
, int vcpu
, struct kvm_sregs
*sregs
)
698 return ioctl(kvm
->vcpu_fd
[vcpu
], KVM_GET_SREGS
, sregs
);
701 int kvm_set_sregs(kvm_context_t kvm
, int vcpu
, struct kvm_sregs
*sregs
)
703 return ioctl(kvm
->vcpu_fd
[vcpu
], KVM_SET_SREGS
, sregs
);
706 static int handle_mmio(kvm_context_t kvm
, struct kvm_run
*kvm_run
)
708 unsigned long addr
= kvm_run
->mmio
.phys_addr
;
709 void *data
= kvm_run
->mmio
.data
;
712 /* hack: Red Hat 7.1 generates these wierd accesses. */
713 if (addr
== 0xa0000 && kvm_run
->mmio
.len
== 3)
716 if (kvm_run
->mmio
.is_write
) {
717 switch (kvm_run
->mmio
.len
) {
719 r
= kvm
->callbacks
->writeb(kvm
->opaque
, addr
, *(uint8_t *)data
);
722 r
= kvm
->callbacks
->writew(kvm
->opaque
, addr
, *(uint16_t *)data
);
725 r
= kvm
->callbacks
->writel(kvm
->opaque
, addr
, *(uint32_t *)data
);
728 r
= kvm
->callbacks
->writeq(kvm
->opaque
, addr
, *(uint64_t *)data
);
732 switch (kvm_run
->mmio
.len
) {
734 r
= kvm
->callbacks
->readb(kvm
->opaque
, addr
, (uint8_t *)data
);
737 r
= kvm
->callbacks
->readw(kvm
->opaque
, addr
, (uint16_t *)data
);
740 r
= kvm
->callbacks
->readl(kvm
->opaque
, addr
, (uint32_t *)data
);
743 r
= kvm
->callbacks
->readq(kvm
->opaque
, addr
, (uint64_t *)data
);
750 int handle_io_window(kvm_context_t kvm
)
752 return kvm
->callbacks
->io_window(kvm
->opaque
);
755 int handle_halt(kvm_context_t kvm
, int vcpu
)
757 return kvm
->callbacks
->halt(kvm
->opaque
, vcpu
);
760 int handle_shutdown(kvm_context_t kvm
, int vcpu
)
762 return kvm
->callbacks
->shutdown(kvm
->opaque
, vcpu
);
765 int try_push_interrupts(kvm_context_t kvm
)
767 return kvm
->callbacks
->try_push_interrupts(kvm
->opaque
);
770 void post_kvm_run(kvm_context_t kvm
, int vcpu
)
772 kvm
->callbacks
->post_kvm_run(kvm
->opaque
, vcpu
);
775 int pre_kvm_run(kvm_context_t kvm
, int vcpu
)
777 return kvm
->callbacks
->pre_kvm_run(kvm
->opaque
, vcpu
);
780 int kvm_get_interrupt_flag(kvm_context_t kvm
, int vcpu
)
782 struct kvm_run
*run
= kvm
->run
[vcpu
];
785 return ((struct kvm_run_abi10
*)run
)->if_flag
;
789 int kvm_is_ready_for_interrupt_injection(kvm_context_t kvm
, int vcpu
)
791 struct kvm_run
*run
= kvm
->run
[vcpu
];
794 return ((struct kvm_run_abi10
*)run
)->ready_for_interrupt_injection
;
795 return run
->ready_for_interrupt_injection
;
798 void kvm_set_cr8(kvm_context_t kvm
, int vcpu
, uint64_t cr8
)
800 struct kvm_run
*run
= kvm
->run
[vcpu
];
803 ((struct kvm_run_abi10
*)run
)->cr8
= cr8
;
809 __u64
kvm_get_cr8(kvm_context_t kvm
, int vcpu
)
811 return kvm
->run
[vcpu
]->cr8
;
814 int kvm_run(kvm_context_t kvm
, int vcpu
)
817 int fd
= kvm
->vcpu_fd
[vcpu
];
818 struct kvm_run
*run
= kvm
->run
[vcpu
];
821 return kvm_run_abi10(kvm
, vcpu
);
824 if (!kvm
->irqchip_in_kernel
)
825 run
->request_interrupt_window
= try_push_interrupts(kvm
);
826 r
= pre_kvm_run(kvm
, vcpu
);
829 r
= ioctl(fd
, KVM_RUN
, 0);
830 post_kvm_run(kvm
, vcpu
);
832 if (r
== -1 && errno
!= EINTR
&& errno
!= EAGAIN
) {
834 printf("kvm_run: %m\n");
838 r
= handle_io_window(kvm
);
842 switch (run
->exit_reason
) {
843 case KVM_EXIT_UNKNOWN
:
844 fprintf(stderr
, "unhandled vm exit: 0x%x vcpu_id %d\n",
845 (unsigned)run
->hw
.hardware_exit_reason
, vcpu
);
846 kvm_show_regs(kvm
, vcpu
);
849 case KVM_EXIT_FAIL_ENTRY
:
850 fprintf(stderr
, "kvm_run: failed entry, reason %u\n",
851 (unsigned)run
->fail_entry
.hardware_entry_failure_reason
& 0xffff);
854 case KVM_EXIT_EXCEPTION
:
855 fprintf(stderr
, "exception %d (%x)\n",
858 kvm_show_regs(kvm
, vcpu
);
859 kvm_show_code(kvm
, vcpu
);
863 r
= handle_io(kvm
, run
, vcpu
);
866 r
= handle_debug(kvm
, vcpu
);
869 r
= handle_mmio(kvm
, run
);
872 r
= handle_halt(kvm
, vcpu
);
874 case KVM_EXIT_IRQ_WINDOW_OPEN
:
876 case KVM_EXIT_SHUTDOWN
:
877 r
= handle_shutdown(kvm
, vcpu
);
879 #ifdef KVM_EXIT_SET_TPR
880 case KVM_EXIT_SET_TPR
:
884 fprintf(stderr
, "unhandled vm exit: 0x%x\n", run
->exit_reason
);
885 kvm_show_regs(kvm
, vcpu
);
896 int kvm_inject_irq(kvm_context_t kvm
, int vcpu
, unsigned irq
)
898 struct kvm_interrupt intr
;
901 return ioctl(kvm
->vcpu_fd
[vcpu
], KVM_INTERRUPT
, &intr
);
904 int kvm_guest_debug(kvm_context_t kvm
, int vcpu
, struct kvm_debug_guest
*dbg
)
906 return ioctl(kvm
->vcpu_fd
[vcpu
], KVM_DEBUG_GUEST
, dbg
);
909 int kvm_setup_cpuid(kvm_context_t kvm
, int vcpu
, int nent
,
910 struct kvm_cpuid_entry
*entries
)
912 struct kvm_cpuid
*cpuid
;
915 cpuid
= malloc(sizeof(*cpuid
) + nent
* sizeof(*entries
));
920 memcpy(cpuid
->entries
, entries
, nent
* sizeof(*entries
));
921 r
= ioctl(kvm
->vcpu_fd
[vcpu
], KVM_SET_CPUID
, cpuid
);
927 int kvm_set_signal_mask(kvm_context_t kvm
, int vcpu
, const sigset_t
*sigset
)
929 struct kvm_signal_mask
*sigmask
;
933 r
= ioctl(kvm
->vcpu_fd
[vcpu
], KVM_SET_SIGNAL_MASK
, NULL
);
938 sigmask
= malloc(sizeof(*sigmask
) + sizeof(*sigset
));
943 memcpy(sigmask
->sigset
, sigset
, sizeof(*sigset
));
944 r
= ioctl(kvm
->vcpu_fd
[vcpu
], KVM_SET_SIGNAL_MASK
, sigmask
);
951 int kvm_irqchip_in_kernel(kvm_context_t kvm
)
953 return kvm
->irqchip_in_kernel
;