2 * Copyright (c) 2008 Brent Stephens <brents@rice.edu>
3 * Copyright (c) 2008 Diego Ongaro <diego.ongaro@rice.edu>
4 * Copyright (c) 2008 Oleg Pesok <olegpesok@gmail.com>
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
16 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
17 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
20 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
21 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
22 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
23 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
24 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
39 #include <sys/syscall.h>
40 #include <sys/param.h>
43 #include "libfkvm-common.h"
47 _is_valid_vcpu_slot(int slot
, const char *func
)
49 bool valid
= (slot
>= 0 && slot
< MAX_VCPUS
);
51 fprintf(stderr
, "%s: Invalid vcpu slot provided: %d\n",
55 #define is_valid_vcpu_slot(slot) _is_valid_vcpu_slot((slot), __FUNCTION__)
58 cpu_virtual_memory_rw(unsigned long gvaddr
,
69 on_this_page
= MIN(len
, PAGE_SIZE
- (gvaddr
& PAGE_MASK
)); // 1 - 4096
71 gpaddr
= kvm_get_phys_addr(gvaddr
);
76 printf("guest virtual 0x%lx -> physical 0x%lx\n", gvaddr
, gpaddr
);
79 cpu_physical_memory_rw(gpaddr
, buf
, on_this_page
, is_write
);
85 /* Helper Functions */
87 handle_exit_io(kvm_context_t kvm
, int vcpu
, struct kvm_run
*kvm_run
)
90 struct kvm_sregs sregs
;
93 error
= kvm_get_regs(kvm
, vcpu
, ®s
);
97 error
= kvm_get_sregs(kvm
, vcpu
, &sregs
);
101 error
= emulate_ioio(kvm
,
107 kvm_run
->u
.io
.string
,
112 regs
.rip
= kvm_run
->u
.io
.next_rip
;
114 error
= kvm_set_regs(kvm
, vcpu
, ®s
);
122 handle_exit_mmio(kvm_context_t kvm
, int vcpu
, struct kvm_run
*kvm_run
)
124 /* TODO: These regs structures and the syscalls needed to fill them
125 * should be eliminated when kvm_run is implemented with a shared page */
126 struct kvm_regs regs
;
127 struct kvm_sregs sregs
;
132 printf("Memory Mapped IO Call\n");
133 printf("Guest Physical Address : 0x%" PRIx64
"\n",
134 kvm_run
->u
.mmio
.fault_gpa
);
137 error
= kvm_get_regs(kvm
, vcpu
, ®s
);
141 error
= kvm_get_sregs(kvm
, vcpu
, &sregs
);
145 error
= emulate_mmio(kvm
, ®s
, &sregs
, kvm_run
->u
.mmio
.fault_gpa
);
150 loc
= kvm_run
->u
.mmio
.rip
+ kvm_run
->u
.mmio
.cs_base
;
151 printf("Instruction Pointer : 0x%" PRIx64
"\n", kvm_run
->u
.mmio
.rip
);
152 printf("CS Base Address : 0x%" PRIx64
"\n", kvm_run
->u
.mmio
.cs_base
);
153 printf("ES Base Address : 0x%" PRIx64
"\n", sregs
.es
.base
);
154 printf("rIP+cs_base : 0x%" PRIx64
"\n", loc
);
155 printf("Guest Base : %p\n", get_guest_base(kvm
));
156 printf("RSI : 0x%" PRIx64
"\n", regs
.rsi
);
157 printf("RSI : 0x%" PRIx64
"\n", kvm_regs_get(®s
, KVM_REG_RSI
));
158 printf("RDI : 0x%" PRIx64
"\n", regs
.rdi
);
159 printf("RDI : 0x%" PRIx64
"\n", kvm_regs_get(®s
, KVM_REG_RDI
));
160 printf("RCX : 0x%" PRIx64
"\n", kvm_regs_get(®s
, KVM_REG_RCX
));
161 printf("default operand size : %d\n", (int)disasm_inst
.operand_size
);
164 error2
= kvm_set_regs(kvm
, vcpu
, ®s
);
172 handle_exit_cr(kvm_context_t kvm
, int vcpu
, struct kvm_run
*kvm_run
)
177 struct kvm_regs regs
;
178 struct kvm_sregs sregs
;
181 cr_num
= kvm_run
->u
.cr
.cr_num
;
182 is_write
= kvm_run
->u
.cr
.is_write
;
207 error
= kvm_get_regs(kvm
, vcpu
, ®s
);
211 error
= kvm_get_sregs(kvm
, vcpu
, &sregs
);
215 error
= emulate_cr(kvm
, ®s
, &sregs
, cr_num
, is_write
);
219 error
= kvm_set_regs(kvm
, vcpu
, ®s
);
223 error
= kvm_set_sregs(kvm
, vcpu
, &sregs
);
230 static struct kvm_cpuid_entry
*
231 get_cpuid_entry(struct kvm_context
*kvm
, int vcpu
, int func
)
234 for (i
= 0; i
< kvm
->ncpuid_entries
[vcpu
]; i
++) {
235 if (kvm
->cpuid_entries
[vcpu
][i
].function
== func
)
236 return &kvm
->cpuid_entries
[vcpu
][i
];
242 /* LIBKVM Functions */
244 /* TODO: stop faking this function */
245 struct kvm_msr_list
*
246 kvm_get_msr_list(kvm_context_t kvm
)
248 struct kvm_msr_list
*msrs
;
250 msrs
= malloc(sizeof(*msrs
) + 1*sizeof(*msrs
->indices
));
255 msrs
->indices
[0] = MSR_STAR
;
260 kvm_get_msrs(kvm_context_t kvm
, int vcpu
, struct kvm_msr_entry
*msrs
, int n
)
264 error
= syscall(SYS_fkvm_set_regs
, FKVM_REGS_TYPE_MSRS
, msrs
, n
);
266 printf("kvm_set_msrs failed (errno=%d)\n", errno
);
274 kvm_set_msrs(kvm_context_t kvm
, int vcpu
, struct kvm_msr_entry
*msrs
, int n
)
280 printf("kvm_set_msrs:\n");
281 for (i
= 0; i
< n
; i
++) {
282 printf("idx: 0x%" PRIx32
" to data: 0x%" PRIx64
"\n",
283 msrs
[i
].index
, msrs
[i
].data
);
288 error
= syscall(SYS_fkvm_set_regs
, FKVM_REGS_TYPE_MSRS
, msrs
, n
);
290 printf("kvm_set_msrs failed (errno=%d)\n", errno
);
298 kvm_dirty_pages_log_enable_slot(kvm_context_t kvm
,
299 uint64_t phys_start
, uint64_t len
)
301 fprintf(stderr
, "WARNING: we just ignored kvm_dirty_pages_log_enable_slot\n");
302 fprintf(stderr
, "dirty_pages_log_enable_slot: %" PRIx64
" len %" PRIx64
"\n", phys_start
, len
);
308 kvm_dirty_pages_log_disable_slot(kvm_context_t kvm
,
309 uint64_t phys_start
, uint64_t len
)
316 kvm_inject_nmi(kvm_context_t kvm
, int vcpu
)
323 kvm_is_ready_for_nmi_injection(kvm_context_t kvm
, int vcpu
)
330 kvm_enable_vapic(kvm_context_t kvm
, int vcpu
, uint64_t vapic
)
337 kvm_enable_tpr_access_reporting(kvm_context_t kvm
, int vcpu
)
339 fprintf(stderr
, "WARNING: we just ignored kvm_enable_tpr_access\n");
340 /* TODO: we shouldn't just ignore this request... */
345 kvm_init(struct kvm_callbacks
*callbacks
, void *opaque
)
349 kvm
= malloc(sizeof(*kvm
));
352 memset(kvm
, 0, sizeof(*kvm
));
353 kvm
->callbacks
= callbacks
;
354 kvm
->opaque
= opaque
;
360 kvm_finalize(kvm_context_t kvm
)
364 for (i
= 0; i
< MAX_VCPUS
; i
++) {
365 if(kvm
->kvm_run
[i
] != NULL
)
366 free(kvm
->kvm_run
[i
]);
375 kvm_disable_irqchip_creation(kvm_context_t kvm
)
382 kvm_disable_pit_creation(kvm_context_t kvm
)
389 kvm_create(kvm_context_t kvm
, unsigned long phys_mem_bytes
, void **phys_mem
)
393 r
= kvm_create_vm(kvm
);
394 //kvm_arch_create(kvm, ...);
396 kvm
->phys_mem_bytes
= phys_mem_bytes
;
397 kvm
->phys_mem_ptr
= phys_mem
;
402 kvm_create_vm(kvm_context_t kvm
)
406 r
= syscall(SYS_fkvm_create_vm
);
414 kvm_check_extension(kvm_context_t kvm
, int ext
)
421 kvm_create_irqchip(kvm_context_t kvm
)
428 kvm_create_vcpu(kvm_context_t kvm
, int slot
)
433 if (!is_valid_vcpu_slot(slot
))
436 run
= malloc(sizeof(*run
));
440 error
= syscall(SYS_fkvm_create_vcpu
);
442 printf("kvm_create_vcpu failed\n");
447 memset(run
, 0, sizeof(*run
));
448 kvm
->kvm_run
[slot
] = run
;
454 kvm_run(kvm_context_t kvm
, int vcpu
)
456 struct kvm_run
*kvm_run
;
459 //printf("kvm_run: entering\n");
461 if (!is_valid_vcpu_slot(vcpu
))
464 kvm_run
= kvm
->kvm_run
[vcpu
];
469 struct kvm_regs regs
;
470 struct kvm_sregs sregs
;
473 kvm_run
->request_interrupt_window
= kvm
->callbacks
->try_push_interrupts(kvm
->opaque
);
475 error
= kvm
->callbacks
->pre_kvm_run(kvm
->opaque
, vcpu
);
480 kvm_get_regs(kvm
, vcpu
, ®s
);
481 kvm_get_sregs(kvm
, vcpu
, &sregs
);
482 printf("Begin kvm_run\n");
483 printf("Rip = %" PRIx64
"\n", regs
.rip
);
484 printf("cs.base = %" PRIx64
"\n", sregs
.cs
.base
);
488 error
= syscall(SYS_fkvm_vm_run
, kvm_run
);
490 printf("kvm_run failed (errno %d)\n", errno
);
491 printf("exit_reason: %d\n", kvm_run
->exit_reason
);
492 kvm
->callbacks
->post_kvm_run(kvm
->opaque
, vcpu
);
497 kvm_get_regs(kvm
, vcpu
, ®s
);
498 kvm_get_sregs(kvm
, vcpu
, &sregs
);
499 printf("After kvm_run\n");
500 printf("Rip = %" PRIx64
"\n", regs
.rip
);
501 printf("cs.base = %" PRIx64
"\n", sregs
.cs
.base
);
502 printf("exit_reason: %d\n", kvm_run
->exit_reason
);
506 kvm
->callbacks
->post_kvm_run(kvm
->opaque
, vcpu
);
508 switch(kvm_run
->exit_reason
) {
510 case KVM_EXIT_UNKNOWN
: {
515 case KVM_EXIT_EXCEPTION
: {
521 error
= handle_exit_io(kvm
, vcpu
, kvm_run
);
525 case KVM_EXIT_HYPERCALL
: {
530 case KVM_EXIT_DEBUG
: {
536 struct kvm_regs regs
;
537 struct kvm_sregs sregs
;
538 kvm_get_regs(kvm
, vcpu
, ®s
);
539 kvm_get_sregs(kvm
, vcpu
, &sregs
);
540 printf("KVM_EXIT_HLT:\n");
541 printf("rip = %" PRIx64
"\n", regs
.rip
);
542 printf("cs.base = %" PRIx64
"\n", sregs
.cs
.base
);
544 error
= kvm
->callbacks
->halt(kvm
->opaque
, vcpu
);
545 /* error should be 1, we want to exit this loop */
551 case KVM_EXIT_MMIO
: {
552 error
= handle_exit_mmio(kvm
, vcpu
, kvm_run
);
556 case KVM_EXIT_IRQ_WINDOW_OPEN
: {
561 case KVM_EXIT_SHUTDOWN
: {
566 case KVM_EXIT_FAIL_ENTRY
: {
571 case KVM_EXIT_INTR
: {
576 case KVM_EXIT_S390_SIEIC
: {
581 case KVM_EXIT_S390_RESET
: {
596 case KVM_EXIT_NMI_WINDOW_OPEN
: {
601 case KVM_EXIT_CPUID
: {
602 struct kvm_regs regs
;
604 struct kvm_cpuid_entry
*cpuid_entry
;
606 func
= kvm_run
->u
.cpuid
.fn
;
608 error
= kvm_get_regs(kvm
, vcpu
, ®s
);
612 cpuid_entry
= get_cpuid_entry(kvm
, vcpu
, func
);
613 if (cpuid_entry
== NULL
) {
614 printf("CPUID func 0x%" PRIx32
" not found\n", func
);
616 cpuid_entry
= get_cpuid_entry(kvm
, vcpu
, 0);
617 if (cpuid_entry
== NULL
)
620 func
= cpuid_entry
->eax
;
621 printf("Using highest basic info leaf 0x%" PRIx32
" not found\n", func
);
623 cpuid_entry
= get_cpuid_entry(kvm
, vcpu
, func
);
624 if (cpuid_entry
== NULL
)
628 regs
.rax
= cpuid_entry
->eax
;
629 regs
.rbx
= cpuid_entry
->ebx
;
630 regs
.rcx
= cpuid_entry
->ecx
;
631 regs
.rdx
= cpuid_entry
->edx
;
635 printf("CPUID func 0x%" PRIx32
":\n", func
);
636 printf("eax: 0x%" PRIx32
"\n", (uint32_t) regs
.rax
);
637 printf("ebx: 0x%" PRIx32
"\n", (uint32_t) regs
.rbx
);
638 printf("ecx: 0x%" PRIx32
"\n", (uint32_t) regs
.rcx
);
639 printf("edx: 0x%" PRIx32
"\n", (uint32_t) regs
.rdx
);
642 error
= kvm_set_regs(kvm
, vcpu
, ®s
);
650 error
= handle_exit_cr(kvm
, vcpu
, kvm_run
);
661 case KVM_EXIT_EXCP
: {
662 struct kvm_regs regs
;
663 struct kvm_sregs sregs
;
665 kvm_get_regs(kvm
, vcpu
, ®s
);
666 kvm_get_sregs(kvm
, vcpu
, &sregs
);
667 printf("KVM_EXIT_EXCP\n");
668 (void) get_x86_insn(sregs
.cs
.base
+ regs
.rip
, &insn
);
673 case KVM_EXIT_CONTINUE
: {
689 kvm_get_interrupt_flag(kvm_context_t kvm
, int vcpu
)
691 if (!is_valid_vcpu_slot(vcpu
))
694 return kvm
->kvm_run
[vcpu
]->if_flag
;
698 kvm_get_apic_base(kvm_context_t kvm
, int vcpu
)
700 if (!is_valid_vcpu_slot(vcpu
))
707 kvm_is_ready_for_interrupt_injection(kvm_context_t kvm
, int vcpu
)
709 if (!is_valid_vcpu_slot(vcpu
))
712 return kvm
->kvm_run
[vcpu
]->ready_for_interrupt_injection
;
716 kvm_get_regs(kvm_context_t kvm
, int vcpu
, struct kvm_regs
*regs
)
720 if (!is_valid_vcpu_slot(vcpu
))
723 error
= syscall(SYS_fkvm_get_regs
, FKVM_REGS_TYPE_REGS
, regs
, 0);
725 printf("kvm_set_regs failed (errno=%d)\n", errno
);
733 kvm_set_regs(kvm_context_t kvm
, int vcpu
, struct kvm_regs
*regs
)
737 if (!is_valid_vcpu_slot(vcpu
))
740 error
= syscall(SYS_fkvm_set_regs
, FKVM_REGS_TYPE_REGS
, regs
, 0);
742 printf("kvm_set_regs failed (errno=%d)\n", errno
);
749 struct kvm_fpu fpus
[MAX_VCPUS
]; // TODO: temporary. For faking
752 kvm_get_fpu(kvm_context_t kvm
, int vcpu
, struct kvm_fpu
*fpu
)
754 if (!is_valid_vcpu_slot(vcpu
))
759 //fprintf(stderr, "WARNING: we just ignored kvm_get_fpu\n");
760 /* TODO: we shouldn't just ignore this request... */
765 kvm_set_fpu(kvm_context_t kvm
, int vcpu
, struct kvm_fpu
*fpu
)
767 if (!is_valid_vcpu_slot(vcpu
))
772 fprintf(stderr
, "WARNING: we just ignored kvm_set_fpu\n");
773 /* TODO: we shouldn't just ignore this request... */
778 kvm_get_sregs(kvm_context_t kvm
, int vcpu
, struct kvm_sregs
*regs
)
782 if(!is_valid_vcpu_slot(vcpu
))
785 error
= syscall(SYS_fkvm_get_regs
, FKVM_REGS_TYPE_SREGS
, regs
, 0);
787 printf("kvm_get_sregs failed (errno=%d)\n", errno
);
794 kvm_set_sregs(kvm_context_t kvm
, int vcpu
, struct kvm_sregs
*regs
)
798 if (!is_valid_vcpu_slot(vcpu
))
801 error
= syscall(SYS_fkvm_set_regs
, FKVM_REGS_TYPE_SREGS
, regs
, 0);
803 printf("kvm_set_sregs failed (errno=%d)\n", errno
);
811 kvm_get_mpstate(kvm_context_t kvm
, int vcpu
, void *mp_state
)
818 kvm_set_mpstate(kvm_context_t kvm
, int vcpu
, void *mp_state
)
825 kvm_inject_irq(kvm_context_t kvm
, int vcpu
, unsigned irq
)
828 error
= syscall(SYS_fkvm_inject_virq
, irq
);
830 printf("kvm_inject_irq failed (errno=%d)\n", errno
);
837 kvm_guest_debug(kvm_context_t kvm
, int vcpu
, struct kvm_debug_guest
*dbg
)
843 /* TODO: it'd be easy to move this into the kernel for fewer vm_run round-trips.
844 add some stats to see if its worth it */
846 kvm_setup_cpuid(kvm_context_t kvm
, int vcpu
,
847 int nent
, const struct kvm_cpuid_entry
*entries
)
849 if (!is_valid_vcpu_slot(vcpu
))
852 if (kvm
->cpuid_entries
[vcpu
] != NULL
)
853 free(kvm
->cpuid_entries
[vcpu
]);
854 kvm
->ncpuid_entries
[vcpu
] = 0;
856 kvm
->cpuid_entries
[vcpu
] = malloc(sizeof(entries
[0]) * nent
);
857 if (kvm
->cpuid_entries
[vcpu
] == NULL
)
859 /* TODO: free this when finalizing vcpu */
861 memcpy(kvm
->cpuid_entries
[vcpu
], entries
, sizeof(entries
[0]) * nent
);
862 kvm
->ncpuid_entries
[vcpu
] = nent
;
867 kvm_set_shadow_pages(kvm_context_t kvm
, unsigned int nrshadow_pages
)
874 kvm_get_shadow_pages(kvm_context_t kvm
, unsigned int *nrshadow_pages
)
881 kvm_set_cr8(kvm_context_t kvm
, int vcpu
, uint64_t cr8
)
883 if (!is_valid_vcpu_slot(vcpu
))
886 kvm
->kvm_run
[vcpu
]->cr8
= cr8
;
890 kvm_get_cr8(kvm_context_t kvm
, int vcpu
)
892 if (!is_valid_vcpu_slot(vcpu
))
895 return kvm
->kvm_run
[vcpu
]->cr8
;
899 kvm_set_signal_mask(kvm_context_t kvm
, int vcpu
, const sigset_t
*sigset
)
901 if (!is_valid_vcpu_slot(vcpu
))
904 fprintf(stderr
, "WARNING: we just ignored kvm_set_signal_mask\n");
905 /* TODO: we shouldn't just ignore this request... */
910 kvm_dump_vcpu(kvm_context_t kvm
, int vcpu
)
917 kvm_show_regs(kvm_context_t kvm
, int vcpu
)
925 kvm_create_phys_mem(kvm_context_t kvm
, unsigned long phys_start
,
926 unsigned long len
, int log
, int writable
)
933 kvm_destroy_phys_mem(kvm_context_t kvm
, unsigned long phys_start
,
938 error
= syscall(SYS_fkvm_unset_user_mem_region
, len
, phys_start
);
940 fprintf(stderr
, "destroy_userspace_phys_mem: %s",
948 kvm_is_intersecting_mem(kvm_context_t kvm
, unsigned long phys_start
)
955 kvm_is_containing_region(kvm_context_t kvm
,
956 unsigned long phys_start
, unsigned long size
)
960 /* The fkvm_set_user_mem_region checks for a containing region
961 * when userspace_addr = NULL */
962 error
= syscall(SYS_fkvm_set_user_mem_region
, NULL
, size
, phys_start
);
965 if (errno
== EFAULT
) {
969 fprintf(stderr
, "kvm_is_containing_region: %s\n",
976 kvm_register_phys_mem(kvm_context_t kvm
, unsigned long phys_start
,
977 void *userspace_addr
, unsigned long len
, int log
)
981 error
= syscall(SYS_fkvm_set_user_mem_region
,
982 userspace_addr
, len
, phys_start
);
985 fprintf(stderr
, "create_userspace_phys_mem: %s\n",
994 kvm_unregister_memory_area(kvm_context_t kvm
, uint64_t phys_start
,
997 kvm_destroy_phys_mem(kvm
, phys_start
, len
);
1002 kvm_is_allocated_mem(kvm_context_t kvm
, unsigned long phys_start
,
1010 kvm_create_mem_hole(kvm_context_t kvm
, unsigned long phys_start
,
1018 kvm_register_userspace_phys_mem(kvm_context_t kvm
,
1019 unsigned long phys_start
, void *userspace_addr
,
1020 unsigned long len
, int log
)
1027 kvm_get_dirty_pages(kvm_context_t kvm
, unsigned long phys_addr
, void *buf
)
1034 kvm_get_dirty_pages_range(kvm_context_t kvm
, unsigned long phys_addr
,
1035 unsigned long end_addr
, void *buf
, void*opaque
,
1036 int (*cb
)(unsigned long start
, unsigned long len
,
1037 void*bitmap
, void *opaque
))
1044 kvm_register_coalesced_mmio(kvm_context_t kvm
,
1045 uint64_t addr
, uint32_t size
)
1047 /* let's act like we can't do this... */
1052 kvm_unregister_coalesced_mmio(kvm_context_t kvm
,
1053 uint64_t addr
, uint32_t size
)
1055 /* let's act like we can't do this... */
1060 kvm_create_memory_alias(kvm_context_t kvm
,
1061 uint64_t phys_start
, uint64_t len
,
1062 uint64_t target_phys
)
1069 kvm_destroy_memory_alias(kvm_context_t kvm
, uint64_t phys_start
)
1076 kvm_get_mem_map(kvm_context_t kvm
, unsigned long phys_addr
, void *bitmap
)
1083 kvm_get_mem_map_range(kvm_context_t kvm
, unsigned long phys_addr
,
1084 unsigned long len
, void *buf
, void *opaque
,
1085 int (*cb
)(unsigned long start
,unsigned long len
,
1086 void* bitmap
, void* opaque
))
1093 kvm_set_irq_level(kvm_context_t kvm
, int irq
, int level
)
1100 kvm_dirty_pages_log_enable_all(kvm_context_t kvm
)
1107 kvm_dirty_pages_log_reset(kvm_context_t kvm
)
1114 kvm_irqchip_in_kernel(kvm_context_t kvm
)
1120 kvm_has_sync_mmu(kvm_context_t kvm
)
1127 kvm_get_irqchip(kvm_context_t kvm
, void *chip
)
1134 kvm_set_irqchip(kvm_context_t kvm
, void *chip
)
1141 kvm_get_lapic(kvm_context_t kvm
, int vcpu
, void *s
)
1148 kvm_set_lapic(kvm_context_t kvm
, int vcpu
, void *s
)
1155 kvm_pit_in_kernel(kvm_context_t kvm
)
1162 kvm_init_coalesced_mmio(kvm_context_t kvm
)
1164 /* let's act like we can't do this... */