11 #include <sys/types.h>
16 int kvm_set_tss_addr(kvm_context_t kvm
, unsigned long addr
)
18 #ifdef KVM_CAP_SET_TSS_ADDR
21 r
= ioctl(kvm
->fd
, KVM_CHECK_EXTENSION
, KVM_CAP_SET_TSS_ADDR
);
23 r
= ioctl(kvm
->vm_fd
, KVM_SET_TSS_ADDR
, addr
);
25 fprintf(stderr
, "kvm_set_tss_addr: %m\n");
34 static int kvm_init_tss(kvm_context_t kvm
)
36 #ifdef KVM_CAP_SET_TSS_ADDR
39 r
= ioctl(kvm
->fd
, KVM_CHECK_EXTENSION
, KVM_CAP_SET_TSS_ADDR
);
42 * this address is 3 pages before the bios, and the bios should present
45 r
= kvm_set_tss_addr(kvm
, 0xfffbd000);
47 fprintf(stderr
, "kvm_init_tss: unable to set tss addr\n");
56 int kvm_create_pit(kvm_context_t kvm
)
61 kvm
->pit_in_kernel
= 0;
62 if (!kvm
->no_pit_creation
) {
63 r
= ioctl(kvm
->fd
, KVM_CHECK_EXTENSION
, KVM_CAP_PIT
);
65 r
= ioctl(kvm
->vm_fd
, KVM_CREATE_PIT
);
67 kvm
->pit_in_kernel
= 1;
69 fprintf(stderr
, "Create kernel PIC irqchip failed\n");
78 int kvm_arch_create(kvm_context_t kvm
, unsigned long phys_mem_bytes
,
83 r
= kvm_init_tss(kvm
);
87 r
= kvm_create_pit(kvm
);
91 r
= kvm_init_coalesced_mmio(kvm
);
98 #ifdef KVM_EXIT_TPR_ACCESS
100 static int handle_tpr_access(kvm_context_t kvm
, struct kvm_run
*run
, int vcpu
)
102 return kvm
->callbacks
->tpr_access(kvm
->opaque
, vcpu
,
104 run
->tpr_access
.is_write
);
108 int kvm_enable_vapic(kvm_context_t kvm
, int vcpu
, uint64_t vapic
)
111 struct kvm_vapic_addr va
= {
115 r
= ioctl(kvm
->vcpu_fd
[vcpu
], KVM_SET_VAPIC_ADDR
, &va
);
118 perror("kvm_enable_vapic");
126 int kvm_arch_run(struct kvm_run
*run
,kvm_context_t kvm
, int vcpu
)
130 switch (run
->exit_reason
) {
131 #ifdef KVM_EXIT_SET_TPR
132 case KVM_EXIT_SET_TPR
:
135 #ifdef KVM_EXIT_TPR_ACCESS
136 case KVM_EXIT_TPR_ACCESS
:
137 r
= handle_tpr_access(kvm
, run
, vcpu
);
148 #define MAX_ALIAS_SLOTS 4
152 } kvm_aliases
[MAX_ALIAS_SLOTS
];
154 static int get_alias_slot(uint64_t start
)
158 for (i
=0; i
<MAX_ALIAS_SLOTS
; i
++)
159 if (kvm_aliases
[i
].start
== start
)
163 static int get_free_alias_slot(void)
167 for (i
=0; i
<MAX_ALIAS_SLOTS
; i
++)
168 if (kvm_aliases
[i
].len
== 0)
173 static void register_alias(int slot
, uint64_t start
, uint64_t len
)
175 kvm_aliases
[slot
].start
= start
;
176 kvm_aliases
[slot
].len
= len
;
179 int kvm_create_memory_alias(kvm_context_t kvm
,
182 uint64_t target_phys
)
184 struct kvm_memory_alias alias
= {
186 .guest_phys_addr
= phys_start
,
188 .target_phys_addr
= target_phys
,
194 slot
= get_alias_slot(phys_start
);
196 slot
= get_free_alias_slot();
201 r
= ioctl(fd
, KVM_SET_MEMORY_ALIAS
, &alias
);
205 register_alias(slot
, phys_start
, len
);
209 int kvm_destroy_memory_alias(kvm_context_t kvm
, uint64_t phys_start
)
211 return kvm_create_memory_alias(kvm
, phys_start
, 0, 0);
214 #ifdef KVM_CAP_IRQCHIP
216 int kvm_get_lapic(kvm_context_t kvm
, int vcpu
, struct kvm_lapic_state
*s
)
219 if (!kvm
->irqchip_in_kernel
)
221 r
= ioctl(kvm
->vcpu_fd
[vcpu
], KVM_GET_LAPIC
, s
);
224 perror("kvm_get_lapic");
229 int kvm_set_lapic(kvm_context_t kvm
, int vcpu
, struct kvm_lapic_state
*s
)
232 if (!kvm
->irqchip_in_kernel
)
234 r
= ioctl(kvm
->vcpu_fd
[vcpu
], KVM_SET_LAPIC
, s
);
237 perror("kvm_set_lapic");
246 int kvm_get_pit(kvm_context_t kvm
, struct kvm_pit_state
*s
)
249 if (!kvm
->pit_in_kernel
)
251 r
= ioctl(kvm
->vm_fd
, KVM_GET_PIT
, s
);
254 perror("kvm_get_pit");
259 int kvm_set_pit(kvm_context_t kvm
, struct kvm_pit_state
*s
)
262 if (!kvm
->pit_in_kernel
)
264 r
= ioctl(kvm
->vm_fd
, KVM_SET_PIT
, s
);
267 perror("kvm_set_pit");
274 void kvm_show_code(kvm_context_t kvm
, int vcpu
)
276 #define SHOW_CODE_LEN 50
277 int fd
= kvm
->vcpu_fd
[vcpu
];
278 struct kvm_regs regs
;
279 struct kvm_sregs sregs
;
283 char code_str
[SHOW_CODE_LEN
* 3 + 1];
286 r
= ioctl(fd
, KVM_GET_SREGS
, &sregs
);
288 perror("KVM_GET_SREGS");
291 r
= ioctl(fd
, KVM_GET_REGS
, ®s
);
293 perror("KVM_GET_REGS");
296 rip
= sregs
.cs
.base
+ regs
.rip
;
297 back_offset
= regs
.rip
;
298 if (back_offset
> 20)
301 for (n
= -back_offset
; n
< SHOW_CODE_LEN
-back_offset
; ++n
) {
303 strcat(code_str
, " -->");
304 r
= kvm
->callbacks
->mmio_read(kvm
->opaque
, rip
+ n
, &code
, 1);
306 strcat(code_str
, " xx");
309 sprintf(code_str
+ strlen(code_str
), " %02x", code
);
311 fprintf(stderr
, "code:%s\n", code_str
);
316 * Returns available msr list. User must free.
318 struct kvm_msr_list
*kvm_get_msr_list(kvm_context_t kvm
)
320 struct kvm_msr_list sizer
, *msrs
;
324 r
= ioctl(kvm
->fd
, KVM_GET_MSR_INDEX_LIST
, &sizer
);
325 if (r
== -1 && errno
!= E2BIG
)
327 msrs
= malloc(sizeof *msrs
+ sizer
.nmsrs
* sizeof *msrs
->indices
);
332 msrs
->nmsrs
= sizer
.nmsrs
;
333 r
= ioctl(kvm
->fd
, KVM_GET_MSR_INDEX_LIST
, msrs
);
343 int kvm_get_msrs(kvm_context_t kvm
, int vcpu
, struct kvm_msr_entry
*msrs
,
346 struct kvm_msrs
*kmsrs
= malloc(sizeof *kmsrs
+ n
* sizeof *msrs
);
354 memcpy(kmsrs
->entries
, msrs
, n
* sizeof *msrs
);
355 r
= ioctl(kvm
->vcpu_fd
[vcpu
], KVM_GET_MSRS
, kmsrs
);
357 memcpy(msrs
, kmsrs
->entries
, n
* sizeof *msrs
);
363 int kvm_set_msrs(kvm_context_t kvm
, int vcpu
, struct kvm_msr_entry
*msrs
,
366 struct kvm_msrs
*kmsrs
= malloc(sizeof *kmsrs
+ n
* sizeof *msrs
);
374 memcpy(kmsrs
->entries
, msrs
, n
* sizeof *msrs
);
375 r
= ioctl(kvm
->vcpu_fd
[vcpu
], KVM_SET_MSRS
, kmsrs
);
382 static void print_seg(FILE *file
, const char *name
, struct kvm_segment
*seg
)
385 "%s %04x (%08llx/%08x p %d dpl %d db %d s %d type %x l %d"
387 name
, seg
->selector
, seg
->base
, seg
->limit
, seg
->present
,
388 seg
->dpl
, seg
->db
, seg
->s
, seg
->type
, seg
->l
, seg
->g
,
392 static void print_dt(FILE *file
, const char *name
, struct kvm_dtable
*dt
)
394 fprintf(stderr
, "%s %llx/%x\n", name
, dt
->base
, dt
->limit
);
397 void kvm_show_regs(kvm_context_t kvm
, int vcpu
)
399 int fd
= kvm
->vcpu_fd
[vcpu
];
400 struct kvm_regs regs
;
401 struct kvm_sregs sregs
;
404 r
= ioctl(fd
, KVM_GET_REGS
, ®s
);
406 perror("KVM_GET_REGS");
410 "rax %016llx rbx %016llx rcx %016llx rdx %016llx\n"
411 "rsi %016llx rdi %016llx rsp %016llx rbp %016llx\n"
412 "r8 %016llx r9 %016llx r10 %016llx r11 %016llx\n"
413 "r12 %016llx r13 %016llx r14 %016llx r15 %016llx\n"
414 "rip %016llx rflags %08llx\n",
415 regs
.rax
, regs
.rbx
, regs
.rcx
, regs
.rdx
,
416 regs
.rsi
, regs
.rdi
, regs
.rsp
, regs
.rbp
,
417 regs
.r8
, regs
.r9
, regs
.r10
, regs
.r11
,
418 regs
.r12
, regs
.r13
, regs
.r14
, regs
.r15
,
419 regs
.rip
, regs
.rflags
);
420 r
= ioctl(fd
, KVM_GET_SREGS
, &sregs
);
422 perror("KVM_GET_SREGS");
425 print_seg(stderr
, "cs", &sregs
.cs
);
426 print_seg(stderr
, "ds", &sregs
.ds
);
427 print_seg(stderr
, "es", &sregs
.es
);
428 print_seg(stderr
, "ss", &sregs
.ss
);
429 print_seg(stderr
, "fs", &sregs
.fs
);
430 print_seg(stderr
, "gs", &sregs
.gs
);
431 print_seg(stderr
, "tr", &sregs
.tr
);
432 print_seg(stderr
, "ldt", &sregs
.ldt
);
433 print_dt(stderr
, "gdt", &sregs
.gdt
);
434 print_dt(stderr
, "idt", &sregs
.idt
);
435 fprintf(stderr
, "cr0 %llx cr2 %llx cr3 %llx cr4 %llx cr8 %llx"
437 sregs
.cr0
, sregs
.cr2
, sregs
.cr3
, sregs
.cr4
, sregs
.cr8
,
441 uint64_t kvm_get_apic_base(kvm_context_t kvm
, int vcpu
)
443 struct kvm_run
*run
= kvm
->run
[vcpu
];
445 return run
->apic_base
;
448 void kvm_set_cr8(kvm_context_t kvm
, int vcpu
, uint64_t cr8
)
450 struct kvm_run
*run
= kvm
->run
[vcpu
];
455 __u64
kvm_get_cr8(kvm_context_t kvm
, int vcpu
)
457 return kvm
->run
[vcpu
]->cr8
;
460 int kvm_setup_cpuid(kvm_context_t kvm
, int vcpu
, int nent
,
461 struct kvm_cpuid_entry
*entries
)
463 struct kvm_cpuid
*cpuid
;
466 cpuid
= malloc(sizeof(*cpuid
) + nent
* sizeof(*entries
));
471 memcpy(cpuid
->entries
, entries
, nent
* sizeof(*entries
));
472 r
= ioctl(kvm
->vcpu_fd
[vcpu
], KVM_SET_CPUID
, cpuid
);
478 int kvm_set_shadow_pages(kvm_context_t kvm
, unsigned int nrshadow_pages
)
480 #ifdef KVM_CAP_MMU_SHADOW_CACHE_CONTROL
483 r
= ioctl(kvm
->fd
, KVM_CHECK_EXTENSION
,
484 KVM_CAP_MMU_SHADOW_CACHE_CONTROL
);
486 r
= ioctl(kvm
->vm_fd
, KVM_SET_NR_MMU_PAGES
, nrshadow_pages
);
488 fprintf(stderr
, "kvm_set_shadow_pages: %m\n");
497 int kvm_get_shadow_pages(kvm_context_t kvm
, unsigned int *nrshadow_pages
)
499 #ifdef KVM_CAP_MMU_SHADOW_CACHE_CONTROL
502 r
= ioctl(kvm
->fd
, KVM_CHECK_EXTENSION
,
503 KVM_CAP_MMU_SHADOW_CACHE_CONTROL
);
505 *nrshadow_pages
= ioctl(kvm
->vm_fd
, KVM_GET_NR_MMU_PAGES
);
514 static int tpr_access_reporting(kvm_context_t kvm
, int vcpu
, int enabled
)
517 struct kvm_tpr_access_ctl tac
= {
521 r
= ioctl(kvm
->fd
, KVM_CHECK_EXTENSION
, KVM_CAP_VAPIC
);
522 if (r
== -1 || r
== 0)
524 r
= ioctl(kvm
->vcpu_fd
[vcpu
], KVM_TPR_ACCESS_REPORTING
, &tac
);
527 perror("KVM_TPR_ACCESS_REPORTING");
533 int kvm_enable_tpr_access_reporting(kvm_context_t kvm
, int vcpu
)
535 return tpr_access_reporting(kvm
, vcpu
, 1);
538 int kvm_disable_tpr_access_reporting(kvm_context_t kvm
, int vcpu
)
540 return tpr_access_reporting(kvm
, vcpu
, 0);