kvm: libkvm: move kvm_get_apic to libkvm-x86.c
[kvm-userspace.git] / libkvm / libkvm.c
blob1632ee979fb9a26fa111c70e1054f4f73820e4ba
1 /*
2 * Kernel-based Virtual Machine control library
4 * This library provides an API to control the kvm hardware virtualization
5 * module.
7 * Copyright (C) 2006 Qumranet
9 * Authors:
11 * Avi Kivity <avi@qumranet.com>
12 * Yaniv Kamay <yaniv@qumranet.com>
14 * This work is licensed under the GNU LGPL license, version 2.
17 #ifndef __user
18 #define __user /* temporary, until installed via make headers_install */
19 #endif
21 #include <linux/kvm.h>
23 #define EXPECTED_KVM_API_VERSION 12
25 #if EXPECTED_KVM_API_VERSION != KVM_API_VERSION
26 #error libkvm: userspace and kernel version mismatch
27 #endif
29 #include <unistd.h>
30 #include <fcntl.h>
31 #include <stdio.h>
32 #include <stdlib.h>
33 #include <sys/mman.h>
34 #include <string.h>
35 #include <errno.h>
36 #include <sys/ioctl.h>
37 #include "libkvm.h"
38 #include "kvm-abi-10.h"
40 #if defined(__x86_64__) || defined(__i386__)
41 #include "kvm-x86.h"
42 #endif
44 int kvm_abi = EXPECTED_KVM_API_VERSION;
46 int free_slots[KVM_MAX_NUM_MEM_REGIONS];
47 unsigned long phys_addr_slots[KVM_MAX_NUM_MEM_REGIONS];
49 void init_slots(void)
51 int i;
53 for (i = 0; i < KVM_MAX_NUM_MEM_REGIONS; ++i)
54 free_slots[i] = 0;
57 int get_free_slot(kvm_context_t kvm)
59 int i;
60 int tss_ext;
62 #ifdef KVM_CAP_SET_TSS_ADDR
63 tss_ext = ioctl(kvm->fd, KVM_CHECK_EXTENSION, KVM_CAP_SET_TSS_ADDR);
64 #else
65 tss_ext = 0;
66 #endif
69 * on older kernels where the set tss ioctl is not supprted we must save
70 * slot 0 to hold the extended memory, as the vmx will use the last 3
71 * pages of this slot.
73 if (tss_ext > 0)
74 i = 0;
75 else
76 i = 1;
78 for (; i < KVM_MAX_NUM_MEM_REGIONS; ++i)
79 if (!free_slots[i])
80 return i;
81 return -1;
84 void register_slot(int slot, unsigned long phys_addr)
86 free_slots[slot] = 1;
87 phys_addr_slots[slot] = phys_addr;
90 int get_slot(unsigned long phys_addr)
92 int i;
94 for (i = 0; i < KVM_MAX_NUM_MEM_REGIONS; ++i)
95 if (free_slots[i] && phys_addr_slots[i] == phys_addr)
96 return i;
97 return -1;
101 * memory regions parameters
103 void kvm_memory_region_save_params(kvm_context_t kvm,
104 struct kvm_memory_region *mem)
106 if (!mem || (mem->slot >= KVM_MAX_NUM_MEM_REGIONS)) {
107 fprintf(stderr, "BUG: %s: invalid parameters\n", __FUNCTION__);
108 return;
110 kvm->mem_regions[mem->slot] = *mem;
113 #ifdef KVM_CAP_USER_MEMORY
115 void kvm_userspace_memory_region_save_params(kvm_context_t kvm,
116 struct kvm_userspace_memory_region *mem)
118 struct kvm_memory_region kvm_mem;
120 kvm_mem.slot = mem->slot;
121 kvm_mem.memory_size = mem->memory_size;
122 kvm_mem.guest_phys_addr = mem->guest_phys_addr;
124 kvm_memory_region_save_params(kvm, &kvm_mem);
127 #endif
129 void kvm_memory_region_clear_params(kvm_context_t kvm, int regnum)
131 if (regnum >= KVM_MAX_NUM_MEM_REGIONS) {
132 fprintf(stderr, "BUG: %s: invalid parameters\n", __FUNCTION__);
133 return;
135 kvm->mem_regions[regnum].memory_size = 0;
139 * dirty pages logging control
141 static int kvm_dirty_pages_log_change(kvm_context_t kvm, int regnum, __u32 flag)
143 int r;
144 struct kvm_memory_region *mem;
146 if (regnum >= KVM_MAX_NUM_MEM_REGIONS) {
147 fprintf(stderr, "BUG: %s: invalid parameters\n", __FUNCTION__);
148 return 1;
150 mem = &kvm->mem_regions[regnum];
151 if (mem->memory_size == 0) /* not used */
152 return 0;
153 if (mem->flags & KVM_MEM_LOG_DIRTY_PAGES) /* log already enabled */
154 return 0;
155 mem->flags |= flag; /* temporary turn on flag */
156 r = ioctl(kvm->vm_fd, KVM_SET_MEMORY_REGION, mem);
157 mem->flags &= ~flag; /* back to previous value */
158 if (r == -1) {
159 fprintf(stderr, "%s: %m\n", __FUNCTION__);
161 return r;
164 static int kvm_dirty_pages_log_change_all(kvm_context_t kvm, __u32 flag)
166 int i, r;
168 for (i=r=0; i<KVM_MAX_NUM_MEM_REGIONS && r==0; i++) {
169 r = kvm_dirty_pages_log_change(kvm, i, flag);
171 return r;
175 * Enable dirty page logging for all memory regions
177 int kvm_dirty_pages_log_enable_all(kvm_context_t kvm)
179 if (kvm->dirty_pages_log_all)
180 return 0;
181 kvm->dirty_pages_log_all = 1;
182 return kvm_dirty_pages_log_change_all(kvm, KVM_MEM_LOG_DIRTY_PAGES);
186 * Enable dirty page logging only for memory regions that were created with
187 * dirty logging enabled (disable for all other memory regions).
189 int kvm_dirty_pages_log_reset(kvm_context_t kvm)
191 if (!kvm->dirty_pages_log_all)
192 return 0;
193 kvm->dirty_pages_log_all = 0;
194 return kvm_dirty_pages_log_change_all(kvm, 0);
198 kvm_context_t kvm_init(struct kvm_callbacks *callbacks,
199 void *opaque)
201 int fd;
202 kvm_context_t kvm;
203 int r;
205 fd = open("/dev/kvm", O_RDWR);
206 if (fd == -1) {
207 perror("open /dev/kvm");
208 return NULL;
210 r = ioctl(fd, KVM_GET_API_VERSION, 0);
211 if (r == -1) {
212 fprintf(stderr, "kvm kernel version too old: "
213 "KVM_GET_API_VERSION ioctl not supported\n");
214 goto out_close;
216 if (r < EXPECTED_KVM_API_VERSION && r != 10) {
217 fprintf(stderr, "kvm kernel version too old: "
218 "We expect API version %d or newer, but got "
219 "version %d\n",
220 EXPECTED_KVM_API_VERSION, r);
221 goto out_close;
223 if (r > EXPECTED_KVM_API_VERSION) {
224 fprintf(stderr, "kvm userspace version too old\n");
225 goto out_close;
227 kvm_abi = r;
228 kvm = malloc(sizeof(*kvm));
229 kvm->fd = fd;
230 kvm->vm_fd = -1;
231 kvm->callbacks = callbacks;
232 kvm->opaque = opaque;
233 kvm->dirty_pages_log_all = 0;
234 kvm->no_irqchip_creation = 0;
235 memset(&kvm->mem_regions, 0, sizeof(kvm->mem_regions));
237 return kvm;
238 out_close:
239 close(fd);
240 return NULL;
243 void kvm_finalize(kvm_context_t kvm)
245 if (kvm->vcpu_fd[0] != -1)
246 close(kvm->vcpu_fd[0]);
247 if (kvm->vm_fd != -1)
248 close(kvm->vm_fd);
249 close(kvm->fd);
250 free(kvm);
253 void kvm_disable_irqchip_creation(kvm_context_t kvm)
255 kvm->no_irqchip_creation = 1;
258 int kvm_create_vcpu(kvm_context_t kvm, int slot)
260 long mmap_size;
261 int r;
263 r = ioctl(kvm->vm_fd, KVM_CREATE_VCPU, slot);
264 if (r == -1) {
265 r = -errno;
266 fprintf(stderr, "kvm_create_vcpu: %m\n");
267 return r;
269 kvm->vcpu_fd[slot] = r;
270 mmap_size = ioctl(kvm->fd, KVM_GET_VCPU_MMAP_SIZE, 0);
271 if (mmap_size == -1) {
272 r = -errno;
273 fprintf(stderr, "get vcpu mmap size: %m\n");
274 return r;
276 kvm->run[slot] = mmap(NULL, mmap_size, PROT_READ|PROT_WRITE, MAP_SHARED,
277 kvm->vcpu_fd[slot], 0);
278 if (kvm->run[slot] == MAP_FAILED) {
279 r = -errno;
280 fprintf(stderr, "mmap vcpu area: %m\n");
281 return r;
283 return 0;
286 int kvm_set_shadow_pages(kvm_context_t kvm, unsigned int nrshadow_pages)
288 #ifdef KVM_CAP_MMU_SHADOW_CACHE_CONTROL
289 int r;
291 r = ioctl(kvm->fd, KVM_CHECK_EXTENSION,
292 KVM_CAP_MMU_SHADOW_CACHE_CONTROL);
293 if (r > 0) {
294 r = ioctl(kvm->vm_fd, KVM_SET_NR_MMU_PAGES, nrshadow_pages);
295 if (r == -1) {
296 fprintf(stderr, "kvm_set_shadow_pages: %m\n");
297 return -errno;
299 return 0;
301 #endif
302 return -1;
305 int kvm_get_shadow_pages(kvm_context_t kvm, unsigned int *nrshadow_pages)
307 #ifdef KVM_CAP_MMU_SHADOW_CACHE_CONTROL
308 int r;
310 r = ioctl(kvm->fd, KVM_CHECK_EXTENSION,
311 KVM_CAP_MMU_SHADOW_CACHE_CONTROL);
312 if (r > 0) {
313 *nrshadow_pages = ioctl(kvm->vm_fd, KVM_GET_NR_MMU_PAGES);
314 return 0;
316 #endif
317 return -1;
321 int kvm_create_vm(kvm_context_t kvm)
323 int fd = kvm->fd;
325 kvm->vcpu_fd[0] = -1;
327 fd = ioctl(fd, KVM_CREATE_VM, 0);
328 if (fd == -1) {
329 fprintf(stderr, "kvm_create_vm: %m\n");
330 return -1;
332 kvm->vm_fd = fd;
333 return 0;
336 static int kvm_create_default_phys_mem(kvm_context_t kvm,
337 unsigned long phys_mem_bytes,
338 void **vm_mem)
340 unsigned long memory = (phys_mem_bytes + PAGE_SIZE - 1) & PAGE_MASK;
341 int r;
343 #ifdef KVM_CAP_USER_MEMORY
344 r = ioctl(kvm->fd, KVM_CHECK_EXTENSION, KVM_CAP_USER_MEMORY);
345 if (r > 0)
346 r = kvm_alloc_userspace_memory(kvm, memory, vm_mem);
347 else
348 #endif
349 r = kvm_alloc_kernel_memory(kvm, memory, vm_mem);
350 if (r < 0)
351 return r;
353 r = kvm_arch_create_default_phys_mem(kvm, phys_mem_bytes, vm_mem);
354 if (r < 0)
355 return r;
357 kvm->physical_memory = *vm_mem;
358 return 0;
361 void kvm_create_irqchip(kvm_context_t kvm)
363 int r;
365 kvm->irqchip_in_kernel = 0;
366 #ifdef KVM_CAP_IRQCHIP
367 if (!kvm->no_irqchip_creation) {
368 r = ioctl(kvm->fd, KVM_CHECK_EXTENSION, KVM_CAP_IRQCHIP);
369 if (r > 0) { /* kernel irqchip supported */
370 r = ioctl(kvm->vm_fd, KVM_CREATE_IRQCHIP);
371 if (r >= 0)
372 kvm->irqchip_in_kernel = 1;
373 else
374 printf("Create kernel PIC irqchip failed\n");
377 #endif
380 int kvm_create(kvm_context_t kvm, unsigned long phys_mem_bytes, void **vm_mem)
382 int r;
384 r = kvm_create_vm(kvm);
385 if (r < 0)
386 return r;
387 r = kvm_arch_create(kvm, phys_mem_bytes, vm_mem);
388 if (r < 0)
389 return r;
390 init_slots();
391 r = kvm_create_default_phys_mem(kvm, phys_mem_bytes, vm_mem);
392 if (r < 0)
393 return r;
394 kvm_create_irqchip(kvm);
395 r = kvm_create_vcpu(kvm, 0);
396 if (r < 0)
397 return r;
399 return 0;
403 #ifdef KVM_CAP_USER_MEMORY
405 void *kvm_create_userspace_phys_mem(kvm_context_t kvm, unsigned long phys_start,
406 unsigned long len, int log, int writable)
408 int r;
409 int prot = PROT_READ;
410 void *ptr;
411 struct kvm_userspace_memory_region memory = {
412 .memory_size = len,
413 .guest_phys_addr = phys_start,
414 .flags = log ? KVM_MEM_LOG_DIRTY_PAGES : 0,
417 if (writable)
418 prot |= PROT_WRITE;
420 ptr = mmap(NULL, len, prot, MAP_ANONYMOUS | MAP_SHARED, -1, 0);
421 if (ptr == MAP_FAILED) {
422 fprintf(stderr, "create_userspace_phys_mem: %s", strerror(errno));
423 return 0;
426 memset(ptr, 0, len);
428 memory.userspace_addr = (unsigned long)ptr;
429 memory.slot = get_free_slot(kvm);
430 r = ioctl(kvm->vm_fd, KVM_SET_USER_MEMORY_REGION, &memory);
431 if (r == -1) {
432 fprintf(stderr, "create_userspace_phys_mem: %s", strerror(errno));
433 return 0;
435 register_slot(memory.slot, memory.guest_phys_addr);
437 kvm_userspace_memory_region_save_params(kvm, &memory);
439 return ptr;
442 #endif
444 void *kvm_create_phys_mem(kvm_context_t kvm, unsigned long phys_start,
445 unsigned long len, int log, int writable)
447 #ifdef KVM_CAP_USER_MEMORY
448 int r;
450 r = ioctl(kvm->fd, KVM_CHECK_EXTENSION, KVM_CAP_USER_MEMORY);
451 if (r > 0)
452 return kvm_create_userspace_phys_mem(kvm, phys_start, len,
453 log, writable);
454 else
455 #endif
456 return kvm_create_kernel_phys_mem(kvm, phys_start, len,
457 log, writable);
460 int kvm_register_userspace_phys_mem(kvm_context_t kvm,
461 unsigned long phys_start, void *userspace_addr,
462 unsigned long len, int log)
464 #ifdef KVM_CAP_USER_MEMORY
465 struct kvm_userspace_memory_region memory = {
466 .memory_size = len,
467 .guest_phys_addr = phys_start,
468 .userspace_addr = (intptr_t)userspace_addr,
469 .flags = log ? KVM_MEM_LOG_DIRTY_PAGES : 0,
471 int r;
473 if (!kvm->physical_memory)
474 kvm->physical_memory = userspace_addr - phys_start;
476 memory.slot = get_free_slot(kvm);
477 r = ioctl(kvm->vm_fd, KVM_SET_USER_MEMORY_REGION, &memory);
478 if (r == -1) {
479 fprintf(stderr, "create_userspace_phys_mem: %s\n", strerror(errno));
480 return -1;
482 register_slot(memory.slot, memory.guest_phys_addr);
484 kvm_userspace_memory_region_save_params(kvm, &memory);
485 return 0;
486 #else
487 return -ENOSYS;
488 #endif
492 /* destroy/free a whole slot.
493 * phys_start, len and slot are the params passed to kvm_create_phys_mem()
495 void kvm_destroy_phys_mem(kvm_context_t kvm, unsigned long phys_start,
496 unsigned long len)
498 int slot;
499 struct kvm_memory_region *mem;
501 slot = get_slot(phys_start);
503 if (slot >= KVM_MAX_NUM_MEM_REGIONS) {
504 fprintf(stderr, "BUG: %s: invalid parameters (slot=%d)\n",
505 __FUNCTION__, slot);
506 return;
508 mem = &kvm->mem_regions[slot];
509 if (phys_start != mem->guest_phys_addr) {
510 fprintf(stderr,
511 "WARNING: %s: phys_start is 0x%lx expecting 0x%llx\n",
512 __FUNCTION__, phys_start, mem->guest_phys_addr);
513 phys_start = mem->guest_phys_addr;
515 kvm_create_phys_mem(kvm, phys_start, 0, 0, 0);
518 static int kvm_get_map(kvm_context_t kvm, int ioctl_num, int slot, void *buf)
520 int r;
521 struct kvm_dirty_log log = {
522 .slot = slot,
525 log.dirty_bitmap = buf;
527 r = ioctl(kvm->vm_fd, ioctl_num, &log);
528 if (r == -1)
529 return -errno;
530 return 0;
533 int kvm_get_dirty_pages(kvm_context_t kvm, unsigned long phys_addr, void *buf)
535 int slot;
537 slot = get_slot(phys_addr);
538 return kvm_get_map(kvm, KVM_GET_DIRTY_LOG, slot, buf);
541 int kvm_get_mem_map(kvm_context_t kvm, unsigned long phys_addr, void *buf)
543 int slot;
545 slot = get_slot(phys_addr);
546 #ifdef KVM_GET_MEM_MAP
547 return kvm_get_map(kvm, KVM_GET_MEM_MAP, slot, buf);
548 #else /* not KVM_GET_MEM_MAP ==> fake it: all pages exist */
549 unsigned long i, n, m, npages;
550 unsigned char v;
552 if (slot >= KVM_MAX_NUM_MEM_REGIONS) {
553 errno = -EINVAL;
554 return -1;
556 npages = kvm->mem_regions[slot].memory_size / PAGE_SIZE;
557 n = npages / 8;
558 m = npages % 8;
559 memset(buf, 0xff, n); /* all pages exist */
560 v = 0;
561 for (i=0; i<=m; i++) /* last byte may not be "aligned" */
562 v |= 1<<(7-i);
563 if (v)
564 *(unsigned char*)(buf+n) = v;
565 return 0;
566 #endif /* KVM_GET_MEM_MAP */
569 #ifdef KVM_CAP_IRQCHIP
571 int kvm_set_irq_level(kvm_context_t kvm, int irq, int level)
573 struct kvm_irq_level event;
574 int r;
576 if (!kvm->irqchip_in_kernel)
577 return 0;
578 event.level = level;
579 event.irq = irq;
580 r = ioctl(kvm->vm_fd, KVM_IRQ_LINE, &event);
581 if (r == -1)
582 perror("kvm_set_irq_level");
583 return 1;
586 int kvm_get_irqchip(kvm_context_t kvm, struct kvm_irqchip *chip)
588 int r;
590 if (!kvm->irqchip_in_kernel)
591 return 0;
592 r = ioctl(kvm->vm_fd, KVM_GET_IRQCHIP, chip);
593 if (r == -1) {
594 r = -errno;
595 perror("kvm_get_irqchip\n");
597 return r;
600 int kvm_set_irqchip(kvm_context_t kvm, struct kvm_irqchip *chip)
602 int r;
604 if (!kvm->irqchip_in_kernel)
605 return 0;
606 r = ioctl(kvm->vm_fd, KVM_SET_IRQCHIP, chip);
607 if (r == -1) {
608 r = -errno;
609 perror("kvm_set_irqchip\n");
611 return r;
614 #endif
616 static int handle_io(kvm_context_t kvm, struct kvm_run *run, int vcpu)
618 uint16_t addr = run->io.port;
619 int r;
620 int i;
621 void *p = (void *)run + run->io.data_offset;
623 for (i = 0; i < run->io.count; ++i) {
624 switch (run->io.direction) {
625 case KVM_EXIT_IO_IN:
626 switch (run->io.size) {
627 case 1:
628 r = kvm->callbacks->inb(kvm->opaque, addr, p);
629 break;
630 case 2:
631 r = kvm->callbacks->inw(kvm->opaque, addr, p);
632 break;
633 case 4:
634 r = kvm->callbacks->inl(kvm->opaque, addr, p);
635 break;
636 default:
637 fprintf(stderr, "bad I/O size %d\n", run->io.size);
638 return -EMSGSIZE;
640 break;
641 case KVM_EXIT_IO_OUT:
642 switch (run->io.size) {
643 case 1:
644 r = kvm->callbacks->outb(kvm->opaque, addr,
645 *(uint8_t *)p);
646 break;
647 case 2:
648 r = kvm->callbacks->outw(kvm->opaque, addr,
649 *(uint16_t *)p);
650 break;
651 case 4:
652 r = kvm->callbacks->outl(kvm->opaque, addr,
653 *(uint32_t *)p);
654 break;
655 default:
656 fprintf(stderr, "bad I/O size %d\n", run->io.size);
657 return -EMSGSIZE;
659 break;
660 default:
661 fprintf(stderr, "bad I/O direction %d\n", run->io.direction);
662 return -EPROTO;
665 p += run->io.size;
668 return 0;
671 int handle_debug(kvm_context_t kvm, int vcpu)
673 return kvm->callbacks->debug(kvm->opaque, vcpu);
676 int kvm_get_regs(kvm_context_t kvm, int vcpu, struct kvm_regs *regs)
678 return ioctl(kvm->vcpu_fd[vcpu], KVM_GET_REGS, regs);
681 int kvm_set_regs(kvm_context_t kvm, int vcpu, struct kvm_regs *regs)
683 return ioctl(kvm->vcpu_fd[vcpu], KVM_SET_REGS, regs);
686 int kvm_get_fpu(kvm_context_t kvm, int vcpu, struct kvm_fpu *fpu)
688 return ioctl(kvm->vcpu_fd[vcpu], KVM_GET_FPU, fpu);
691 int kvm_set_fpu(kvm_context_t kvm, int vcpu, struct kvm_fpu *fpu)
693 return ioctl(kvm->vcpu_fd[vcpu], KVM_SET_FPU, fpu);
696 int kvm_get_sregs(kvm_context_t kvm, int vcpu, struct kvm_sregs *sregs)
698 return ioctl(kvm->vcpu_fd[vcpu], KVM_GET_SREGS, sregs);
701 int kvm_set_sregs(kvm_context_t kvm, int vcpu, struct kvm_sregs *sregs)
703 return ioctl(kvm->vcpu_fd[vcpu], KVM_SET_SREGS, sregs);
706 static int handle_mmio(kvm_context_t kvm, struct kvm_run *kvm_run)
708 unsigned long addr = kvm_run->mmio.phys_addr;
709 void *data = kvm_run->mmio.data;
710 int r = -1;
712 /* hack: Red Hat 7.1 generates these wierd accesses. */
713 if (addr == 0xa0000 && kvm_run->mmio.len == 3)
714 return 0;
716 if (kvm_run->mmio.is_write) {
717 switch (kvm_run->mmio.len) {
718 case 1:
719 r = kvm->callbacks->writeb(kvm->opaque, addr, *(uint8_t *)data);
720 break;
721 case 2:
722 r = kvm->callbacks->writew(kvm->opaque, addr, *(uint16_t *)data);
723 break;
724 case 4:
725 r = kvm->callbacks->writel(kvm->opaque, addr, *(uint32_t *)data);
726 break;
727 case 8:
728 r = kvm->callbacks->writeq(kvm->opaque, addr, *(uint64_t *)data);
729 break;
731 } else {
732 switch (kvm_run->mmio.len) {
733 case 1:
734 r = kvm->callbacks->readb(kvm->opaque, addr, (uint8_t *)data);
735 break;
736 case 2:
737 r = kvm->callbacks->readw(kvm->opaque, addr, (uint16_t *)data);
738 break;
739 case 4:
740 r = kvm->callbacks->readl(kvm->opaque, addr, (uint32_t *)data);
741 break;
742 case 8:
743 r = kvm->callbacks->readq(kvm->opaque, addr, (uint64_t *)data);
744 break;
747 return r;
750 int handle_io_window(kvm_context_t kvm)
752 return kvm->callbacks->io_window(kvm->opaque);
755 int handle_halt(kvm_context_t kvm, int vcpu)
757 return kvm->callbacks->halt(kvm->opaque, vcpu);
760 int handle_shutdown(kvm_context_t kvm, int vcpu)
762 return kvm->callbacks->shutdown(kvm->opaque, vcpu);
765 int try_push_interrupts(kvm_context_t kvm)
767 return kvm->callbacks->try_push_interrupts(kvm->opaque);
770 void post_kvm_run(kvm_context_t kvm, int vcpu)
772 kvm->callbacks->post_kvm_run(kvm->opaque, vcpu);
775 int pre_kvm_run(kvm_context_t kvm, int vcpu)
777 return kvm->callbacks->pre_kvm_run(kvm->opaque, vcpu);
780 int kvm_get_interrupt_flag(kvm_context_t kvm, int vcpu)
782 struct kvm_run *run = kvm->run[vcpu];
784 if (kvm_abi == 10)
785 return ((struct kvm_run_abi10 *)run)->if_flag;
786 return run->if_flag;
789 int kvm_is_ready_for_interrupt_injection(kvm_context_t kvm, int vcpu)
791 struct kvm_run *run = kvm->run[vcpu];
793 if (kvm_abi == 10)
794 return ((struct kvm_run_abi10 *)run)->ready_for_interrupt_injection;
795 return run->ready_for_interrupt_injection;
798 void kvm_set_cr8(kvm_context_t kvm, int vcpu, uint64_t cr8)
800 struct kvm_run *run = kvm->run[vcpu];
802 if (kvm_abi == 10) {
803 ((struct kvm_run_abi10 *)run)->cr8 = cr8;
804 return;
806 run->cr8 = cr8;
809 __u64 kvm_get_cr8(kvm_context_t kvm, int vcpu)
811 return kvm->run[vcpu]->cr8;
814 int kvm_run(kvm_context_t kvm, int vcpu)
816 int r;
817 int fd = kvm->vcpu_fd[vcpu];
818 struct kvm_run *run = kvm->run[vcpu];
820 if (kvm_abi == 10)
821 return kvm_run_abi10(kvm, vcpu);
823 again:
824 if (!kvm->irqchip_in_kernel)
825 run->request_interrupt_window = try_push_interrupts(kvm);
826 r = pre_kvm_run(kvm, vcpu);
827 if (r)
828 return r;
829 r = ioctl(fd, KVM_RUN, 0);
830 post_kvm_run(kvm, vcpu);
832 if (r == -1 && errno != EINTR && errno != EAGAIN) {
833 r = -errno;
834 printf("kvm_run: %m\n");
835 return r;
837 if (r == -1) {
838 r = handle_io_window(kvm);
839 goto more;
841 if (1) {
842 switch (run->exit_reason) {
843 case KVM_EXIT_UNKNOWN:
844 fprintf(stderr, "unhandled vm exit: 0x%x vcpu_id %d\n",
845 (unsigned)run->hw.hardware_exit_reason, vcpu);
846 kvm_show_regs(kvm, vcpu);
847 abort();
848 break;
849 case KVM_EXIT_FAIL_ENTRY:
850 fprintf(stderr, "kvm_run: failed entry, reason %u\n",
851 (unsigned)run->fail_entry.hardware_entry_failure_reason & 0xffff);
852 return -ENOEXEC;
853 break;
854 case KVM_EXIT_EXCEPTION:
855 fprintf(stderr, "exception %d (%x)\n",
856 run->ex.exception,
857 run->ex.error_code);
858 kvm_show_regs(kvm, vcpu);
859 kvm_show_code(kvm, vcpu);
860 abort();
861 break;
862 case KVM_EXIT_IO:
863 r = handle_io(kvm, run, vcpu);
864 break;
865 case KVM_EXIT_DEBUG:
866 r = handle_debug(kvm, vcpu);
867 break;
868 case KVM_EXIT_MMIO:
869 r = handle_mmio(kvm, run);
870 break;
871 case KVM_EXIT_HLT:
872 r = handle_halt(kvm, vcpu);
873 break;
874 case KVM_EXIT_IRQ_WINDOW_OPEN:
875 break;
876 case KVM_EXIT_SHUTDOWN:
877 r = handle_shutdown(kvm, vcpu);
878 break;
879 #ifdef KVM_EXIT_SET_TPR
880 case KVM_EXIT_SET_TPR:
881 break;
882 #endif
883 default:
884 fprintf(stderr, "unhandled vm exit: 0x%x\n", run->exit_reason);
885 kvm_show_regs(kvm, vcpu);
886 abort();
887 break;
890 more:
891 if (!r)
892 goto again;
893 return r;
896 int kvm_inject_irq(kvm_context_t kvm, int vcpu, unsigned irq)
898 struct kvm_interrupt intr;
900 intr.irq = irq;
901 return ioctl(kvm->vcpu_fd[vcpu], KVM_INTERRUPT, &intr);
904 int kvm_guest_debug(kvm_context_t kvm, int vcpu, struct kvm_debug_guest *dbg)
906 return ioctl(kvm->vcpu_fd[vcpu], KVM_DEBUG_GUEST, dbg);
909 int kvm_setup_cpuid(kvm_context_t kvm, int vcpu, int nent,
910 struct kvm_cpuid_entry *entries)
912 struct kvm_cpuid *cpuid;
913 int r;
915 cpuid = malloc(sizeof(*cpuid) + nent * sizeof(*entries));
916 if (!cpuid)
917 return -ENOMEM;
919 cpuid->nent = nent;
920 memcpy(cpuid->entries, entries, nent * sizeof(*entries));
921 r = ioctl(kvm->vcpu_fd[vcpu], KVM_SET_CPUID, cpuid);
923 free(cpuid);
924 return r;
927 int kvm_set_signal_mask(kvm_context_t kvm, int vcpu, const sigset_t *sigset)
929 struct kvm_signal_mask *sigmask;
930 int r;
932 if (!sigset) {
933 r = ioctl(kvm->vcpu_fd[vcpu], KVM_SET_SIGNAL_MASK, NULL);
934 if (r == -1)
935 r = -errno;
936 return r;
938 sigmask = malloc(sizeof(*sigmask) + sizeof(*sigset));
939 if (!sigmask)
940 return -ENOMEM;
942 sigmask->len = 8;
943 memcpy(sigmask->sigset, sigset, sizeof(*sigset));
944 r = ioctl(kvm->vcpu_fd[vcpu], KVM_SET_SIGNAL_MASK, sigmask);
945 if (r == -1)
946 r = -errno;
947 free(sigmask);
948 return r;
951 int kvm_irqchip_in_kernel(kvm_context_t kvm)
953 return kvm->irqchip_in_kernel;