Merge commit '344b983d9b653b47468b6f5b5a4fbbdc8894995f' into upstream-merge
[qemu-kvm/fedora.git] / target-i386 / kvm.c
blob1da15431ee08e93df4fc1bafb6b577f9dd924f14
1 /*
2 * QEMU KVM support
4 * Copyright (C) 2006-2008 Qumranet Technologies
5 * Copyright IBM, Corp. 2008
7 * Authors:
8 * Anthony Liguori <aliguori@us.ibm.com>
10 * This work is licensed under the terms of the GNU GPL, version 2 or later.
11 * See the COPYING file in the top-level directory.
15 #include <sys/types.h>
16 #include <sys/ioctl.h>
17 #include <sys/mman.h>
19 #include <linux/kvm.h>
21 #include "qemu-common.h"
22 #include "sysemu.h"
23 #include "kvm.h"
24 #include "cpu.h"
25 #include "gdbstub.h"
27 //#define DEBUG_KVM
29 #ifdef DEBUG_KVM
30 #define dprintf(fmt, ...) \
31 do { fprintf(stderr, fmt, ## __VA_ARGS__); } while (0)
32 #else
33 #define dprintf(fmt, ...) \
34 do { } while (0)
35 #endif
37 #ifdef KVM_CAP_EXT_CPUID
39 static struct kvm_cpuid2 *try_get_cpuid(KVMState *s, int max)
41 struct kvm_cpuid2 *cpuid;
42 int r, size;
44 size = sizeof(*cpuid) + max * sizeof(*cpuid->entries);
45 cpuid = (struct kvm_cpuid2 *)qemu_mallocz(size);
46 cpuid->nent = max;
47 r = kvm_ioctl(s, KVM_GET_SUPPORTED_CPUID, cpuid);
48 if (r == 0 && cpuid->nent >= max) {
49 r = -E2BIG;
51 if (r < 0) {
52 if (r == -E2BIG) {
53 qemu_free(cpuid);
54 return NULL;
55 } else {
56 fprintf(stderr, "KVM_GET_SUPPORTED_CPUID failed: %s\n",
57 strerror(-r));
58 exit(1);
61 return cpuid;
64 uint32_t kvm_arch_get_supported_cpuid(CPUState *env, uint32_t function, int reg)
66 struct kvm_cpuid2 *cpuid;
67 int i, max;
68 uint32_t ret = 0;
69 uint32_t cpuid_1_edx;
71 if (!kvm_check_extension(env->kvm_state, KVM_CAP_EXT_CPUID)) {
72 return -1U;
75 max = 1;
76 while ((cpuid = try_get_cpuid(env->kvm_state, max)) == NULL) {
77 max *= 2;
80 for (i = 0; i < cpuid->nent; ++i) {
81 if (cpuid->entries[i].function == function) {
82 switch (reg) {
83 case R_EAX:
84 ret = cpuid->entries[i].eax;
85 break;
86 case R_EBX:
87 ret = cpuid->entries[i].ebx;
88 break;
89 case R_ECX:
90 ret = cpuid->entries[i].ecx;
91 break;
92 case R_EDX:
93 ret = cpuid->entries[i].edx;
94 if (function == 0x80000001) {
95 /* On Intel, kvm returns cpuid according to the Intel spec,
96 * so add missing bits according to the AMD spec:
98 cpuid_1_edx = kvm_arch_get_supported_cpuid(env, 1, R_EDX);
99 ret |= cpuid_1_edx & 0xdfeff7ff;
101 break;
106 qemu_free(cpuid);
108 return ret;
111 #else
113 uint32_t kvm_arch_get_supported_cpuid(CPUState *env, uint32_t function, int reg)
115 return -1U;
118 #endif
120 static void kvm_trim_features(uint32_t *features, uint32_t supported)
122 int i;
123 uint32_t mask;
125 for (i = 0; i < 32; ++i) {
126 mask = 1U << i;
127 if ((*features & mask) && !(supported & mask)) {
128 *features &= ~mask;
133 int kvm_arch_init_vcpu(CPUState *env)
135 struct {
136 struct kvm_cpuid2 cpuid;
137 struct kvm_cpuid_entry2 entries[100];
138 } __attribute__((packed)) cpuid_data;
139 uint32_t limit, i, j, cpuid_i;
140 uint32_t unused;
142 env->mp_state = KVM_MP_STATE_RUNNABLE;
144 kvm_trim_features(&env->cpuid_features,
145 kvm_arch_get_supported_cpuid(env, 1, R_EDX));
147 i = env->cpuid_ext_features & CPUID_EXT_HYPERVISOR;
148 kvm_trim_features(&env->cpuid_ext_features,
149 kvm_arch_get_supported_cpuid(env, 1, R_ECX));
150 env->cpuid_ext_features |= i;
152 kvm_trim_features(&env->cpuid_ext2_features,
153 kvm_arch_get_supported_cpuid(env, 0x80000001, R_EDX));
154 kvm_trim_features(&env->cpuid_ext3_features,
155 kvm_arch_get_supported_cpuid(env, 0x80000001, R_ECX));
157 cpuid_i = 0;
159 cpu_x86_cpuid(env, 0, 0, &limit, &unused, &unused, &unused);
161 for (i = 0; i <= limit; i++) {
162 struct kvm_cpuid_entry2 *c = &cpuid_data.entries[cpuid_i++];
164 switch (i) {
165 case 2: {
166 /* Keep reading function 2 till all the input is received */
167 int times;
169 c->function = i;
170 c->flags = KVM_CPUID_FLAG_STATEFUL_FUNC |
171 KVM_CPUID_FLAG_STATE_READ_NEXT;
172 cpu_x86_cpuid(env, i, 0, &c->eax, &c->ebx, &c->ecx, &c->edx);
173 times = c->eax & 0xff;
175 for (j = 1; j < times; ++j) {
176 c = &cpuid_data.entries[cpuid_i++];
177 c->function = i;
178 c->flags = KVM_CPUID_FLAG_STATEFUL_FUNC;
179 cpu_x86_cpuid(env, i, 0, &c->eax, &c->ebx, &c->ecx, &c->edx);
181 break;
183 case 4:
184 case 0xb:
185 case 0xd:
186 for (j = 0; ; j++) {
187 c->function = i;
188 c->flags = KVM_CPUID_FLAG_SIGNIFCANT_INDEX;
189 c->index = j;
190 cpu_x86_cpuid(env, i, j, &c->eax, &c->ebx, &c->ecx, &c->edx);
192 if (i == 4 && c->eax == 0)
193 break;
194 if (i == 0xb && !(c->ecx & 0xff00))
195 break;
196 if (i == 0xd && c->eax == 0)
197 break;
199 c = &cpuid_data.entries[cpuid_i++];
201 break;
202 default:
203 c->function = i;
204 c->flags = 0;
205 cpu_x86_cpuid(env, i, 0, &c->eax, &c->ebx, &c->ecx, &c->edx);
206 break;
209 cpu_x86_cpuid(env, 0x80000000, 0, &limit, &unused, &unused, &unused);
211 for (i = 0x80000000; i <= limit; i++) {
212 struct kvm_cpuid_entry2 *c = &cpuid_data.entries[cpuid_i++];
214 c->function = i;
215 c->flags = 0;
216 cpu_x86_cpuid(env, i, 0, &c->eax, &c->ebx, &c->ecx, &c->edx);
219 cpuid_data.cpuid.nent = cpuid_i;
221 return kvm_vcpu_ioctl(env, KVM_SET_CPUID2, &cpuid_data);
224 static int kvm_has_msr_star(CPUState *env)
226 static int has_msr_star;
227 int ret;
229 /* first time */
230 if (has_msr_star == 0) {
231 struct kvm_msr_list msr_list, *kvm_msr_list;
233 has_msr_star = -1;
235 /* Obtain MSR list from KVM. These are the MSRs that we must
236 * save/restore */
237 msr_list.nmsrs = 0;
238 ret = kvm_ioctl(env->kvm_state, KVM_GET_MSR_INDEX_LIST, &msr_list);
239 if (ret < 0)
240 return 0;
242 /* Old kernel modules had a bug and could write beyond the provided
243 memory. Allocate at least a safe amount of 1K. */
244 kvm_msr_list = qemu_mallocz(MAX(1024, sizeof(msr_list) +
245 msr_list.nmsrs *
246 sizeof(msr_list.indices[0])));
248 kvm_msr_list->nmsrs = msr_list.nmsrs;
249 ret = kvm_ioctl(env->kvm_state, KVM_GET_MSR_INDEX_LIST, kvm_msr_list);
250 if (ret >= 0) {
251 int i;
253 for (i = 0; i < kvm_msr_list->nmsrs; i++) {
254 if (kvm_msr_list->indices[i] == MSR_STAR) {
255 has_msr_star = 1;
256 break;
261 free(kvm_msr_list);
264 if (has_msr_star == 1)
265 return 1;
266 return 0;
269 int kvm_arch_init(KVMState *s, int smp_cpus)
271 int ret;
273 /* create vm86 tss. KVM uses vm86 mode to emulate 16-bit code
274 * directly. In order to use vm86 mode, a TSS is needed. Since this
275 * must be part of guest physical memory, we need to allocate it. Older
276 * versions of KVM just assumed that it would be at the end of physical
277 * memory but that doesn't work with more than 4GB of memory. We simply
278 * refuse to work with those older versions of KVM. */
279 ret = kvm_ioctl(s, KVM_CHECK_EXTENSION, KVM_CAP_SET_TSS_ADDR);
280 if (ret <= 0) {
281 fprintf(stderr, "kvm does not support KVM_CAP_SET_TSS_ADDR\n");
282 return ret;
285 /* this address is 3 pages before the bios, and the bios should present
286 * as unavaible memory. FIXME, need to ensure the e820 map deals with
287 * this?
289 return kvm_vm_ioctl(s, KVM_SET_TSS_ADDR, 0xfffbd000);
292 static void set_v8086_seg(struct kvm_segment *lhs, const SegmentCache *rhs)
294 lhs->selector = rhs->selector;
295 lhs->base = rhs->base;
296 lhs->limit = rhs->limit;
297 lhs->type = 3;
298 lhs->present = 1;
299 lhs->dpl = 3;
300 lhs->db = 0;
301 lhs->s = 1;
302 lhs->l = 0;
303 lhs->g = 0;
304 lhs->avl = 0;
305 lhs->unusable = 0;
308 static void set_seg(struct kvm_segment *lhs, const SegmentCache *rhs)
310 unsigned flags = rhs->flags;
311 lhs->selector = rhs->selector;
312 lhs->base = rhs->base;
313 lhs->limit = rhs->limit;
314 lhs->type = (flags >> DESC_TYPE_SHIFT) & 15;
315 lhs->present = (flags & DESC_P_MASK) != 0;
316 lhs->dpl = rhs->selector & 3;
317 lhs->db = (flags >> DESC_B_SHIFT) & 1;
318 lhs->s = (flags & DESC_S_MASK) != 0;
319 lhs->l = (flags >> DESC_L_SHIFT) & 1;
320 lhs->g = (flags & DESC_G_MASK) != 0;
321 lhs->avl = (flags & DESC_AVL_MASK) != 0;
322 lhs->unusable = 0;
325 static void get_seg(SegmentCache *lhs, const struct kvm_segment *rhs)
327 lhs->selector = rhs->selector;
328 lhs->base = rhs->base;
329 lhs->limit = rhs->limit;
330 lhs->flags =
331 (rhs->type << DESC_TYPE_SHIFT)
332 | (rhs->present * DESC_P_MASK)
333 | (rhs->dpl << DESC_DPL_SHIFT)
334 | (rhs->db << DESC_B_SHIFT)
335 | (rhs->s * DESC_S_MASK)
336 | (rhs->l << DESC_L_SHIFT)
337 | (rhs->g * DESC_G_MASK)
338 | (rhs->avl * DESC_AVL_MASK);
341 static void kvm_getput_reg(__u64 *kvm_reg, target_ulong *qemu_reg, int set)
343 if (set)
344 *kvm_reg = *qemu_reg;
345 else
346 *qemu_reg = *kvm_reg;
349 static int kvm_getput_regs(CPUState *env, int set)
351 struct kvm_regs regs;
352 int ret = 0;
354 if (!set) {
355 ret = kvm_vcpu_ioctl(env, KVM_GET_REGS, &regs);
356 if (ret < 0)
357 return ret;
360 kvm_getput_reg(&regs.rax, &env->regs[R_EAX], set);
361 kvm_getput_reg(&regs.rbx, &env->regs[R_EBX], set);
362 kvm_getput_reg(&regs.rcx, &env->regs[R_ECX], set);
363 kvm_getput_reg(&regs.rdx, &env->regs[R_EDX], set);
364 kvm_getput_reg(&regs.rsi, &env->regs[R_ESI], set);
365 kvm_getput_reg(&regs.rdi, &env->regs[R_EDI], set);
366 kvm_getput_reg(&regs.rsp, &env->regs[R_ESP], set);
367 kvm_getput_reg(&regs.rbp, &env->regs[R_EBP], set);
368 #ifdef TARGET_X86_64
369 kvm_getput_reg(&regs.r8, &env->regs[8], set);
370 kvm_getput_reg(&regs.r9, &env->regs[9], set);
371 kvm_getput_reg(&regs.r10, &env->regs[10], set);
372 kvm_getput_reg(&regs.r11, &env->regs[11], set);
373 kvm_getput_reg(&regs.r12, &env->regs[12], set);
374 kvm_getput_reg(&regs.r13, &env->regs[13], set);
375 kvm_getput_reg(&regs.r14, &env->regs[14], set);
376 kvm_getput_reg(&regs.r15, &env->regs[15], set);
377 #endif
379 kvm_getput_reg(&regs.rflags, &env->eflags, set);
380 kvm_getput_reg(&regs.rip, &env->eip, set);
382 if (set)
383 ret = kvm_vcpu_ioctl(env, KVM_SET_REGS, &regs);
385 return ret;
388 static int kvm_put_fpu(CPUState *env)
390 struct kvm_fpu fpu;
391 int i;
393 memset(&fpu, 0, sizeof fpu);
394 fpu.fsw = env->fpus & ~(7 << 11);
395 fpu.fsw |= (env->fpstt & 7) << 11;
396 fpu.fcw = env->fpuc;
397 for (i = 0; i < 8; ++i)
398 fpu.ftwx |= (!env->fptags[i]) << i;
399 memcpy(fpu.fpr, env->fpregs, sizeof env->fpregs);
400 memcpy(fpu.xmm, env->xmm_regs, sizeof env->xmm_regs);
401 fpu.mxcsr = env->mxcsr;
403 return kvm_vcpu_ioctl(env, KVM_SET_FPU, &fpu);
406 static int kvm_put_sregs(CPUState *env)
408 struct kvm_sregs sregs;
410 memcpy(sregs.interrupt_bitmap,
411 env->interrupt_bitmap,
412 sizeof(sregs.interrupt_bitmap));
414 if ((env->eflags & VM_MASK)) {
415 set_v8086_seg(&sregs.cs, &env->segs[R_CS]);
416 set_v8086_seg(&sregs.ds, &env->segs[R_DS]);
417 set_v8086_seg(&sregs.es, &env->segs[R_ES]);
418 set_v8086_seg(&sregs.fs, &env->segs[R_FS]);
419 set_v8086_seg(&sregs.gs, &env->segs[R_GS]);
420 set_v8086_seg(&sregs.ss, &env->segs[R_SS]);
421 } else {
422 set_seg(&sregs.cs, &env->segs[R_CS]);
423 set_seg(&sregs.ds, &env->segs[R_DS]);
424 set_seg(&sregs.es, &env->segs[R_ES]);
425 set_seg(&sregs.fs, &env->segs[R_FS]);
426 set_seg(&sregs.gs, &env->segs[R_GS]);
427 set_seg(&sregs.ss, &env->segs[R_SS]);
429 if (env->cr[0] & CR0_PE_MASK) {
430 /* force ss cpl to cs cpl */
431 sregs.ss.selector = (sregs.ss.selector & ~3) |
432 (sregs.cs.selector & 3);
433 sregs.ss.dpl = sregs.ss.selector & 3;
437 set_seg(&sregs.tr, &env->tr);
438 set_seg(&sregs.ldt, &env->ldt);
440 sregs.idt.limit = env->idt.limit;
441 sregs.idt.base = env->idt.base;
442 sregs.gdt.limit = env->gdt.limit;
443 sregs.gdt.base = env->gdt.base;
445 sregs.cr0 = env->cr[0];
446 sregs.cr2 = env->cr[2];
447 sregs.cr3 = env->cr[3];
448 sregs.cr4 = env->cr[4];
450 sregs.cr8 = cpu_get_apic_tpr(env);
451 sregs.apic_base = cpu_get_apic_base(env);
453 sregs.efer = env->efer;
455 return kvm_vcpu_ioctl(env, KVM_SET_SREGS, &sregs);
458 static void kvm_msr_entry_set(struct kvm_msr_entry *entry,
459 uint32_t index, uint64_t value)
461 entry->index = index;
462 entry->data = value;
465 static int kvm_put_msrs(CPUState *env)
467 struct {
468 struct kvm_msrs info;
469 struct kvm_msr_entry entries[100];
470 } msr_data;
471 struct kvm_msr_entry *msrs = msr_data.entries;
472 int n = 0;
474 kvm_msr_entry_set(&msrs[n++], MSR_IA32_SYSENTER_CS, env->sysenter_cs);
475 kvm_msr_entry_set(&msrs[n++], MSR_IA32_SYSENTER_ESP, env->sysenter_esp);
476 kvm_msr_entry_set(&msrs[n++], MSR_IA32_SYSENTER_EIP, env->sysenter_eip);
477 if (kvm_has_msr_star(env))
478 kvm_msr_entry_set(&msrs[n++], MSR_STAR, env->star);
479 kvm_msr_entry_set(&msrs[n++], MSR_IA32_TSC, env->tsc);
480 kvm_msr_entry_set(&msrs[n++], MSR_VM_HSAVE_PA, env->vm_hsave);
481 #ifdef TARGET_X86_64
482 /* FIXME if lm capable */
483 kvm_msr_entry_set(&msrs[n++], MSR_CSTAR, env->cstar);
484 kvm_msr_entry_set(&msrs[n++], MSR_KERNELGSBASE, env->kernelgsbase);
485 kvm_msr_entry_set(&msrs[n++], MSR_FMASK, env->fmask);
486 kvm_msr_entry_set(&msrs[n++], MSR_LSTAR, env->lstar);
487 #endif
488 msr_data.info.nmsrs = n;
490 return kvm_vcpu_ioctl(env, KVM_SET_MSRS, &msr_data);
495 static int kvm_get_fpu(CPUState *env)
497 struct kvm_fpu fpu;
498 int i, ret;
500 ret = kvm_vcpu_ioctl(env, KVM_GET_FPU, &fpu);
501 if (ret < 0)
502 return ret;
504 env->fpstt = (fpu.fsw >> 11) & 7;
505 env->fpus = fpu.fsw;
506 env->fpuc = fpu.fcw;
507 for (i = 0; i < 8; ++i)
508 env->fptags[i] = !((fpu.ftwx >> i) & 1);
509 memcpy(env->fpregs, fpu.fpr, sizeof env->fpregs);
510 memcpy(env->xmm_regs, fpu.xmm, sizeof env->xmm_regs);
511 env->mxcsr = fpu.mxcsr;
513 return 0;
516 static int kvm_get_sregs(CPUState *env)
518 struct kvm_sregs sregs;
519 uint32_t hflags;
520 int ret;
522 ret = kvm_vcpu_ioctl(env, KVM_GET_SREGS, &sregs);
523 if (ret < 0)
524 return ret;
526 memcpy(env->interrupt_bitmap,
527 sregs.interrupt_bitmap,
528 sizeof(sregs.interrupt_bitmap));
530 get_seg(&env->segs[R_CS], &sregs.cs);
531 get_seg(&env->segs[R_DS], &sregs.ds);
532 get_seg(&env->segs[R_ES], &sregs.es);
533 get_seg(&env->segs[R_FS], &sregs.fs);
534 get_seg(&env->segs[R_GS], &sregs.gs);
535 get_seg(&env->segs[R_SS], &sregs.ss);
537 get_seg(&env->tr, &sregs.tr);
538 get_seg(&env->ldt, &sregs.ldt);
540 env->idt.limit = sregs.idt.limit;
541 env->idt.base = sregs.idt.base;
542 env->gdt.limit = sregs.gdt.limit;
543 env->gdt.base = sregs.gdt.base;
545 env->cr[0] = sregs.cr0;
546 env->cr[2] = sregs.cr2;
547 env->cr[3] = sregs.cr3;
548 env->cr[4] = sregs.cr4;
550 cpu_set_apic_base(env, sregs.apic_base);
552 env->efer = sregs.efer;
553 //cpu_set_apic_tpr(env, sregs.cr8);
555 #define HFLAG_COPY_MASK ~( \
556 HF_CPL_MASK | HF_PE_MASK | HF_MP_MASK | HF_EM_MASK | \
557 HF_TS_MASK | HF_TF_MASK | HF_VM_MASK | HF_IOPL_MASK | \
558 HF_OSFXSR_MASK | HF_LMA_MASK | HF_CS32_MASK | \
559 HF_SS32_MASK | HF_CS64_MASK | HF_ADDSEG_MASK)
563 hflags = (env->segs[R_CS].flags >> DESC_DPL_SHIFT) & HF_CPL_MASK;
564 hflags |= (env->cr[0] & CR0_PE_MASK) << (HF_PE_SHIFT - CR0_PE_SHIFT);
565 hflags |= (env->cr[0] << (HF_MP_SHIFT - CR0_MP_SHIFT)) &
566 (HF_MP_MASK | HF_EM_MASK | HF_TS_MASK);
567 hflags |= (env->eflags & (HF_TF_MASK | HF_VM_MASK | HF_IOPL_MASK));
568 hflags |= (env->cr[4] & CR4_OSFXSR_MASK) <<
569 (HF_OSFXSR_SHIFT - CR4_OSFXSR_SHIFT);
571 if (env->efer & MSR_EFER_LMA) {
572 hflags |= HF_LMA_MASK;
575 if ((hflags & HF_LMA_MASK) && (env->segs[R_CS].flags & DESC_L_MASK)) {
576 hflags |= HF_CS32_MASK | HF_SS32_MASK | HF_CS64_MASK;
577 } else {
578 hflags |= (env->segs[R_CS].flags & DESC_B_MASK) >>
579 (DESC_B_SHIFT - HF_CS32_SHIFT);
580 hflags |= (env->segs[R_SS].flags & DESC_B_MASK) >>
581 (DESC_B_SHIFT - HF_SS32_SHIFT);
582 if (!(env->cr[0] & CR0_PE_MASK) ||
583 (env->eflags & VM_MASK) ||
584 !(hflags & HF_CS32_MASK)) {
585 hflags |= HF_ADDSEG_MASK;
586 } else {
587 hflags |= ((env->segs[R_DS].base |
588 env->segs[R_ES].base |
589 env->segs[R_SS].base) != 0) <<
590 HF_ADDSEG_SHIFT;
593 env->hflags = (env->hflags & HFLAG_COPY_MASK) | hflags;
595 return 0;
598 static int kvm_get_msrs(CPUState *env)
600 struct {
601 struct kvm_msrs info;
602 struct kvm_msr_entry entries[100];
603 } msr_data;
604 struct kvm_msr_entry *msrs = msr_data.entries;
605 int ret, i, n;
607 n = 0;
608 msrs[n++].index = MSR_IA32_SYSENTER_CS;
609 msrs[n++].index = MSR_IA32_SYSENTER_ESP;
610 msrs[n++].index = MSR_IA32_SYSENTER_EIP;
611 if (kvm_has_msr_star(env))
612 msrs[n++].index = MSR_STAR;
613 msrs[n++].index = MSR_IA32_TSC;
614 msrs[n++].index = MSR_VM_HSAVE_PA;
615 #ifdef TARGET_X86_64
616 /* FIXME lm_capable_kernel */
617 msrs[n++].index = MSR_CSTAR;
618 msrs[n++].index = MSR_KERNELGSBASE;
619 msrs[n++].index = MSR_FMASK;
620 msrs[n++].index = MSR_LSTAR;
621 #endif
622 msr_data.info.nmsrs = n;
623 ret = kvm_vcpu_ioctl(env, KVM_GET_MSRS, &msr_data);
624 if (ret < 0)
625 return ret;
627 for (i = 0; i < ret; i++) {
628 switch (msrs[i].index) {
629 case MSR_IA32_SYSENTER_CS:
630 env->sysenter_cs = msrs[i].data;
631 break;
632 case MSR_IA32_SYSENTER_ESP:
633 env->sysenter_esp = msrs[i].data;
634 break;
635 case MSR_IA32_SYSENTER_EIP:
636 env->sysenter_eip = msrs[i].data;
637 break;
638 case MSR_STAR:
639 env->star = msrs[i].data;
640 break;
641 #ifdef TARGET_X86_64
642 case MSR_CSTAR:
643 env->cstar = msrs[i].data;
644 break;
645 case MSR_KERNELGSBASE:
646 env->kernelgsbase = msrs[i].data;
647 break;
648 case MSR_FMASK:
649 env->fmask = msrs[i].data;
650 break;
651 case MSR_LSTAR:
652 env->lstar = msrs[i].data;
653 break;
654 #endif
655 case MSR_IA32_TSC:
656 env->tsc = msrs[i].data;
657 break;
658 case MSR_VM_HSAVE_PA:
659 env->vm_hsave = msrs[i].data;
660 break;
664 return 0;
667 int kvm_arch_put_registers(CPUState *env)
669 int ret;
671 ret = kvm_getput_regs(env, 1);
672 if (ret < 0)
673 return ret;
675 ret = kvm_put_fpu(env);
676 if (ret < 0)
677 return ret;
679 ret = kvm_put_sregs(env);
680 if (ret < 0)
681 return ret;
683 ret = kvm_put_msrs(env);
684 if (ret < 0)
685 return ret;
687 ret = kvm_put_mp_state(env);
688 if (ret < 0)
689 return ret;
691 ret = kvm_get_mp_state(env);
692 if (ret < 0)
693 return ret;
695 return 0;
698 int kvm_arch_get_registers(CPUState *env)
700 int ret;
702 ret = kvm_getput_regs(env, 0);
703 if (ret < 0)
704 return ret;
706 ret = kvm_get_fpu(env);
707 if (ret < 0)
708 return ret;
710 ret = kvm_get_sregs(env);
711 if (ret < 0)
712 return ret;
714 ret = kvm_get_msrs(env);
715 if (ret < 0)
716 return ret;
718 return 0;
721 int kvm_arch_pre_run(CPUState *env, struct kvm_run *run)
723 /* Try to inject an interrupt if the guest can accept it */
724 if (run->ready_for_interrupt_injection &&
725 (env->interrupt_request & CPU_INTERRUPT_HARD) &&
726 (env->eflags & IF_MASK)) {
727 int irq;
729 env->interrupt_request &= ~CPU_INTERRUPT_HARD;
730 irq = cpu_get_pic_interrupt(env);
731 if (irq >= 0) {
732 struct kvm_interrupt intr;
733 intr.irq = irq;
734 /* FIXME: errors */
735 dprintf("injected interrupt %d\n", irq);
736 kvm_vcpu_ioctl(env, KVM_INTERRUPT, &intr);
740 /* If we have an interrupt but the guest is not ready to receive an
741 * interrupt, request an interrupt window exit. This will
742 * cause a return to userspace as soon as the guest is ready to
743 * receive interrupts. */
744 if ((env->interrupt_request & CPU_INTERRUPT_HARD))
745 run->request_interrupt_window = 1;
746 else
747 run->request_interrupt_window = 0;
749 dprintf("setting tpr\n");
750 run->cr8 = cpu_get_apic_tpr(env);
752 return 0;
755 int kvm_arch_post_run(CPUState *env, struct kvm_run *run)
757 if (run->if_flag)
758 env->eflags |= IF_MASK;
759 else
760 env->eflags &= ~IF_MASK;
762 cpu_set_apic_tpr(env, run->cr8);
763 cpu_set_apic_base(env, run->apic_base);
765 return 0;
768 static int kvm_handle_halt(CPUState *env)
770 if (!((env->interrupt_request & CPU_INTERRUPT_HARD) &&
771 (env->eflags & IF_MASK)) &&
772 !(env->interrupt_request & CPU_INTERRUPT_NMI)) {
773 env->halted = 1;
774 env->exception_index = EXCP_HLT;
775 return 0;
778 return 1;
781 int kvm_arch_handle_exit(CPUState *env, struct kvm_run *run)
783 int ret = 0;
785 switch (run->exit_reason) {
786 case KVM_EXIT_HLT:
787 dprintf("handle_hlt\n");
788 ret = kvm_handle_halt(env);
789 break;
792 return ret;
795 #ifdef KVM_CAP_SET_GUEST_DEBUG
796 int kvm_arch_insert_sw_breakpoint(CPUState *env, struct kvm_sw_breakpoint *bp)
798 const static uint8_t int3 = 0xcc;
800 if (cpu_memory_rw_debug(env, bp->pc, (uint8_t *)&bp->saved_insn, 1, 0) ||
801 cpu_memory_rw_debug(env, bp->pc, (uint8_t *)&int3, 1, 1))
802 return -EINVAL;
803 return 0;
806 int kvm_arch_remove_sw_breakpoint(CPUState *env, struct kvm_sw_breakpoint *bp)
808 uint8_t int3;
810 if (cpu_memory_rw_debug(env, bp->pc, &int3, 1, 0) || int3 != 0xcc ||
811 cpu_memory_rw_debug(env, bp->pc, (uint8_t *)&bp->saved_insn, 1, 1))
812 return -EINVAL;
813 return 0;
816 static struct {
817 target_ulong addr;
818 int len;
819 int type;
820 } hw_breakpoint[4];
822 static int nb_hw_breakpoint;
824 static int find_hw_breakpoint(target_ulong addr, int len, int type)
826 int n;
828 for (n = 0; n < nb_hw_breakpoint; n++)
829 if (hw_breakpoint[n].addr == addr && hw_breakpoint[n].type == type &&
830 (hw_breakpoint[n].len == len || len == -1))
831 return n;
832 return -1;
835 int kvm_arch_insert_hw_breakpoint(target_ulong addr,
836 target_ulong len, int type)
838 switch (type) {
839 case GDB_BREAKPOINT_HW:
840 len = 1;
841 break;
842 case GDB_WATCHPOINT_WRITE:
843 case GDB_WATCHPOINT_ACCESS:
844 switch (len) {
845 case 1:
846 break;
847 case 2:
848 case 4:
849 case 8:
850 if (addr & (len - 1))
851 return -EINVAL;
852 break;
853 default:
854 return -EINVAL;
856 break;
857 default:
858 return -ENOSYS;
861 if (nb_hw_breakpoint == 4)
862 return -ENOBUFS;
864 if (find_hw_breakpoint(addr, len, type) >= 0)
865 return -EEXIST;
867 hw_breakpoint[nb_hw_breakpoint].addr = addr;
868 hw_breakpoint[nb_hw_breakpoint].len = len;
869 hw_breakpoint[nb_hw_breakpoint].type = type;
870 nb_hw_breakpoint++;
872 return 0;
875 int kvm_arch_remove_hw_breakpoint(target_ulong addr,
876 target_ulong len, int type)
878 int n;
880 n = find_hw_breakpoint(addr, (type == GDB_BREAKPOINT_HW) ? 1 : len, type);
881 if (n < 0)
882 return -ENOENT;
884 nb_hw_breakpoint--;
885 hw_breakpoint[n] = hw_breakpoint[nb_hw_breakpoint];
887 return 0;
890 void kvm_arch_remove_all_hw_breakpoints(void)
892 nb_hw_breakpoint = 0;
895 static CPUWatchpoint hw_watchpoint;
897 int kvm_arch_debug(struct kvm_debug_exit_arch *arch_info)
899 int handle = 0;
900 int n;
902 if (arch_info->exception == 1) {
903 if (arch_info->dr6 & (1 << 14)) {
904 if (cpu_single_env->singlestep_enabled)
905 handle = 1;
906 } else {
907 for (n = 0; n < 4; n++)
908 if (arch_info->dr6 & (1 << n))
909 switch ((arch_info->dr7 >> (16 + n*4)) & 0x3) {
910 case 0x0:
911 handle = 1;
912 break;
913 case 0x1:
914 handle = 1;
915 cpu_single_env->watchpoint_hit = &hw_watchpoint;
916 hw_watchpoint.vaddr = hw_breakpoint[n].addr;
917 hw_watchpoint.flags = BP_MEM_WRITE;
918 break;
919 case 0x3:
920 handle = 1;
921 cpu_single_env->watchpoint_hit = &hw_watchpoint;
922 hw_watchpoint.vaddr = hw_breakpoint[n].addr;
923 hw_watchpoint.flags = BP_MEM_ACCESS;
924 break;
927 } else if (kvm_find_sw_breakpoint(cpu_single_env, arch_info->pc))
928 handle = 1;
930 if (!handle)
931 kvm_update_guest_debug(cpu_single_env,
932 (arch_info->exception == 1) ?
933 KVM_GUESTDBG_INJECT_DB : KVM_GUESTDBG_INJECT_BP);
935 return handle;
938 void kvm_arch_update_guest_debug(CPUState *env, struct kvm_guest_debug *dbg)
940 const uint8_t type_code[] = {
941 [GDB_BREAKPOINT_HW] = 0x0,
942 [GDB_WATCHPOINT_WRITE] = 0x1,
943 [GDB_WATCHPOINT_ACCESS] = 0x3
945 const uint8_t len_code[] = {
946 [1] = 0x0, [2] = 0x1, [4] = 0x3, [8] = 0x2
948 int n;
950 if (kvm_sw_breakpoints_active(env))
951 dbg->control |= KVM_GUESTDBG_ENABLE | KVM_GUESTDBG_USE_SW_BP;
953 if (nb_hw_breakpoint > 0) {
954 dbg->control |= KVM_GUESTDBG_ENABLE | KVM_GUESTDBG_USE_HW_BP;
955 dbg->arch.debugreg[7] = 0x0600;
956 for (n = 0; n < nb_hw_breakpoint; n++) {
957 dbg->arch.debugreg[n] = hw_breakpoint[n].addr;
958 dbg->arch.debugreg[7] |= (2 << (n * 2)) |
959 (type_code[hw_breakpoint[n].type] << (16 + n*4)) |
960 (len_code[hw_breakpoint[n].len] << (18 + n*4));
964 #endif /* KVM_CAP_SET_GUEST_DEBUG */