4 * Copyright (C) 2006-2008 Qumranet Technologies
6 * Licensed under the terms of the GNU GPL version 2 or higher.
9 #include "config-host.h"
22 #include <sys/utsname.h>
23 #include <sys/syscall.h>
25 extern void perror(const char *s
);
27 kvm_context_t kvm_context
;
31 static int qemu_kvm_reset_requested
;
33 pthread_mutex_t qemu_mutex
= PTHREAD_MUTEX_INITIALIZER
;
34 pthread_cond_t qemu_aio_cond
= PTHREAD_COND_INITIALIZER
;
35 __thread
struct vcpu_info
*vcpu
;
37 struct qemu_kvm_signal_table
{
42 static struct qemu_kvm_signal_table io_signal_table
;
43 static struct qemu_kvm_signal_table vcpu_signal_table
;
45 #define SIG_IPI (SIGRTMIN+4)
60 static inline unsigned long kvm_get_thread_id(void)
62 return syscall(SYS_gettid
);
65 CPUState
*qemu_kvm_cpu_env(int index
)
67 return vcpu_info
[index
].env
;
70 static void sig_ipi_handler(int n
)
74 void kvm_update_interrupt_request(CPUState
*env
)
81 if (vcpu
&& env
!= vcpu
->env
&& !vcpu_info
[env
->cpu_index
].signalled
)
85 vcpu_info
[env
->cpu_index
].signalled
= 1;
86 if (vcpu_info
[env
->cpu_index
].thread
)
87 pthread_kill(vcpu_info
[env
->cpu_index
].thread
, SIG_IPI
);
92 void kvm_update_after_sipi(CPUState
*env
)
94 vcpu_info
[env
->cpu_index
].sipi_needed
= 1;
95 kvm_update_interrupt_request(env
);
98 void kvm_apic_init(CPUState
*env
)
100 if (env
->cpu_index
!= 0)
101 vcpu_info
[env
->cpu_index
].init
= 1;
102 kvm_update_interrupt_request(env
);
107 static int try_push_interrupts(void *opaque
)
109 return kvm_arch_try_push_interrupts(opaque
);
112 static void post_kvm_run(void *opaque
, int vcpu
)
115 pthread_mutex_lock(&qemu_mutex
);
116 kvm_arch_post_kvm_run(opaque
, vcpu
);
119 static int pre_kvm_run(void *opaque
, int vcpu
)
121 CPUState
*env
= qemu_kvm_cpu_env(vcpu
);
123 kvm_arch_pre_kvm_run(opaque
, vcpu
);
125 if (env
->interrupt_request
& CPU_INTERRUPT_EXIT
)
127 pthread_mutex_unlock(&qemu_mutex
);
131 void kvm_load_registers(CPUState
*env
)
134 kvm_arch_load_regs(env
);
137 void kvm_save_registers(CPUState
*env
)
140 kvm_arch_save_regs(env
);
143 int kvm_cpu_exec(CPUState
*env
)
147 r
= kvm_run(kvm_context
, env
->cpu_index
);
149 printf("kvm_run returned %d\n", r
);
156 extern int vm_running
;
158 static int has_work(CPUState
*env
)
160 if (!vm_running
|| (env
&& vcpu_info
[env
->cpu_index
].stopped
))
162 if (!(env
->hflags
& HF_HALTED_MASK
))
164 return kvm_arch_has_work(env
);
167 static int kvm_process_signal(int si_signo
)
173 pthread_cond_signal(&qemu_aio_cond
);
177 sigaction(si_signo
, NULL
, &sa
);
178 sa
.sa_handler(si_signo
);
185 static int kvm_eat_signal(struct qemu_kvm_signal_table
*waitset
, CPUState
*env
,
192 ts
.tv_sec
= timeout
/ 1000;
193 ts
.tv_nsec
= (timeout
% 1000) * 1000000;
194 r
= sigtimedwait(&waitset
->sigset
, &siginfo
, &ts
);
195 if (r
== -1 && (errno
== EAGAIN
|| errno
== EINTR
) && !timeout
)
198 pthread_mutex_lock(&qemu_mutex
);
200 cpu_single_env
= vcpu
->env
;
201 if (r
== -1 && !(errno
== EAGAIN
|| errno
== EINTR
)) {
202 printf("sigtimedwait: %s\n", strerror(e
));
206 ret
= kvm_process_signal(siginfo
.si_signo
);
208 if (env
&& vcpu_info
[env
->cpu_index
].stop
) {
209 vcpu_info
[env
->cpu_index
].stop
= 0;
210 vcpu_info
[env
->cpu_index
].stopped
= 1;
211 pthread_kill(io_thread
, SIGUSR1
);
213 pthread_mutex_unlock(&qemu_mutex
);
219 static void kvm_eat_signals(CPUState
*env
, int timeout
)
222 struct qemu_kvm_signal_table
*waitset
= &vcpu_signal_table
;
224 while (kvm_eat_signal(waitset
, env
, 0))
227 r
= kvm_eat_signal(waitset
, env
, timeout
);
229 while (kvm_eat_signal(waitset
, env
, 0))
234 static void kvm_main_loop_wait(CPUState
*env
, int timeout
)
236 pthread_mutex_unlock(&qemu_mutex
);
237 kvm_eat_signals(env
, timeout
);
238 pthread_mutex_lock(&qemu_mutex
);
239 cpu_single_env
= env
;
240 vcpu_info
[env
->cpu_index
].signalled
= 0;
243 static int all_threads_paused(void)
247 for (i
= 0; i
< smp_cpus
; ++i
)
248 if (vcpu_info
[i
].stop
)
253 static void pause_all_threads(void)
257 for (i
= 0; i
< smp_cpus
; ++i
) {
258 vcpu_info
[i
].stop
= 1;
259 pthread_kill(vcpu_info
[i
].thread
, SIG_IPI
);
261 while (!all_threads_paused()) {
262 pthread_mutex_unlock(&qemu_mutex
);
263 kvm_eat_signal(&io_signal_table
, NULL
, 1000);
264 pthread_mutex_lock(&qemu_mutex
);
265 cpu_single_env
= NULL
;
269 static void resume_all_threads(void)
273 for (i
= 0; i
< smp_cpus
; ++i
) {
274 vcpu_info
[i
].stop
= 0;
275 vcpu_info
[i
].stopped
= 0;
276 pthread_kill(vcpu_info
[i
].thread
, SIG_IPI
);
280 static void kvm_vm_state_change_handler(void *context
, int running
)
283 resume_all_threads();
288 static void update_regs_for_sipi(CPUState
*env
)
290 kvm_arch_update_regs_for_sipi(env
);
291 vcpu_info
[env
->cpu_index
].sipi_needed
= 0;
292 vcpu_info
[env
->cpu_index
].init
= 0;
295 static void update_regs_for_init(CPUState
*env
)
298 kvm_arch_load_regs(env
);
301 static void setup_kernel_sigmask(CPUState
*env
)
305 sigprocmask(SIG_BLOCK
, NULL
, &set
);
306 sigdelset(&set
, SIG_IPI
);
308 kvm_set_signal_mask(kvm_context
, env
->cpu_index
, &set
);
311 void qemu_kvm_system_reset_request(void)
315 for (i
= 0; i
< smp_cpus
; ++i
) {
316 vcpu_info
[i
].reload_regs
= 1;
317 pthread_kill(vcpu_info
[i
].thread
, SIG_IPI
);
322 static int kvm_main_loop_cpu(CPUState
*env
)
324 struct vcpu_info
*info
= &vcpu_info
[env
->cpu_index
];
326 setup_kernel_sigmask(env
);
327 pthread_mutex_lock(&qemu_mutex
);
329 kvm_qemu_init_env(env
);
330 env
->ready_for_interrupt_injection
= 1;
332 kvm_tpr_vcpu_start(env
);
335 cpu_single_env
= env
;
337 while (!has_work(env
))
338 kvm_main_loop_wait(env
, 10);
339 if (env
->interrupt_request
& CPU_INTERRUPT_HARD
)
340 env
->hflags
&= ~HF_HALTED_MASK
;
341 if (!kvm_irqchip_in_kernel(kvm_context
) && info
->sipi_needed
)
342 update_regs_for_sipi(env
);
343 if (!kvm_irqchip_in_kernel(kvm_context
) && info
->init
)
344 update_regs_for_init(env
);
345 if (!(env
->hflags
& HF_HALTED_MASK
) && !info
->init
)
347 env
->interrupt_request
&= ~CPU_INTERRUPT_EXIT
;
348 kvm_main_loop_wait(env
, 0);
349 if (info
->reload_regs
) {
350 info
->reload_regs
= 0;
351 if (env
->cpu_index
== 0) /* ap needs to be placed in INIT */
352 kvm_arch_load_regs(env
);
355 pthread_mutex_unlock(&qemu_mutex
);
359 static void *ap_main_loop(void *_env
)
361 CPUState
*env
= _env
;
364 vcpu
= &vcpu_info
[env
->cpu_index
];
366 vcpu
->env
->thread_id
= kvm_get_thread_id();
367 sigfillset(&signals
);
368 sigprocmask(SIG_BLOCK
, &signals
, NULL
);
369 kvm_create_vcpu(kvm_context
, env
->cpu_index
);
370 kvm_qemu_init_env(env
);
371 if (kvm_irqchip_in_kernel(kvm_context
))
372 env
->hflags
&= ~HF_HALTED_MASK
;
373 kvm_main_loop_cpu(env
);
377 static void qemu_kvm_init_signal_table(struct qemu_kvm_signal_table
*sigtab
)
379 sigemptyset(&sigtab
->sigset
);
380 sigfillset(&sigtab
->negsigset
);
383 static void kvm_add_signal(struct qemu_kvm_signal_table
*sigtab
, int signum
)
385 sigaddset(&sigtab
->sigset
, signum
);
386 sigdelset(&sigtab
->negsigset
, signum
);
389 void kvm_init_new_ap(int cpu
, CPUState
*env
)
391 pthread_create(&vcpu_info
[cpu
].thread
, NULL
, ap_main_loop
, env
);
394 static void qemu_kvm_init_signal_tables(void)
396 qemu_kvm_init_signal_table(&io_signal_table
);
397 qemu_kvm_init_signal_table(&vcpu_signal_table
);
399 kvm_add_signal(&io_signal_table
, SIGIO
);
400 kvm_add_signal(&io_signal_table
, SIGALRM
);
401 kvm_add_signal(&io_signal_table
, SIGUSR1
);
402 kvm_add_signal(&io_signal_table
, SIGUSR2
);
404 kvm_add_signal(&vcpu_signal_table
, SIG_IPI
);
406 sigprocmask(SIG_BLOCK
, &io_signal_table
.sigset
, NULL
);
409 int kvm_init_ap(void)
411 CPUState
*env
= first_cpu
;
417 qemu_add_vm_change_state_handler(kvm_vm_state_change_handler
, NULL
);
418 qemu_kvm_init_signal_tables();
420 signal(SIG_IPI
, sig_ipi_handler
);
421 for (i
= 0; i
< smp_cpus
; ++i
) {
422 kvm_init_new_ap(i
, env
);
428 void qemu_kvm_notify_work(void)
431 pthread_kill(io_thread
, SIGUSR1
);
435 * The IO thread has all signals that inform machine events
436 * blocked (io_signal_table), so it won't get interrupted
437 * while processing in main_loop_wait().
440 int kvm_main_loop(void)
442 io_thread
= pthread_self();
443 pthread_mutex_unlock(&qemu_mutex
);
445 kvm_eat_signal(&io_signal_table
, NULL
, 1000);
446 pthread_mutex_lock(&qemu_mutex
);
447 cpu_single_env
= NULL
;
449 if (qemu_shutdown_requested())
451 else if (qemu_powerdown_requested())
452 qemu_system_powerdown();
453 else if (qemu_reset_requested()) {
454 pthread_kill(vcpu_info
[0].thread
, SIG_IPI
);
455 qemu_kvm_reset_requested
= 1;
457 pthread_mutex_unlock(&qemu_mutex
);
460 pthread_mutex_unlock(&qemu_mutex
);
464 static int kvm_debug(void *opaque
, int vcpu
)
466 CPUState
*env
= cpu_single_env
;
468 env
->exception_index
= EXCP_DEBUG
;
472 static int kvm_inb(void *opaque
, uint16_t addr
, uint8_t *data
)
474 *data
= cpu_inb(0, addr
);
478 static int kvm_inw(void *opaque
, uint16_t addr
, uint16_t *data
)
480 *data
= cpu_inw(0, addr
);
484 static int kvm_inl(void *opaque
, uint16_t addr
, uint32_t *data
)
486 *data
= cpu_inl(0, addr
);
490 #define PM_IO_BASE 0xb000
492 static int kvm_outb(void *opaque
, uint16_t addr
, uint8_t data
)
497 cpu_outb(0, 0xb3, 0);
504 x
= cpu_inw(0, PM_IO_BASE
+ 4);
506 cpu_outw(0, PM_IO_BASE
+ 4, x
);
513 x
= cpu_inw(0, PM_IO_BASE
+ 4);
515 cpu_outw(0, PM_IO_BASE
+ 4, x
);
523 cpu_outb(0, addr
, data
);
527 static int kvm_outw(void *opaque
, uint16_t addr
, uint16_t data
)
529 cpu_outw(0, addr
, data
);
533 static int kvm_outl(void *opaque
, uint16_t addr
, uint32_t data
)
535 cpu_outl(0, addr
, data
);
539 static int kvm_mmio_read(void *opaque
, uint64_t addr
, uint8_t *data
, int len
)
541 cpu_physical_memory_rw(addr
, data
, len
, 0);
545 static int kvm_mmio_write(void *opaque
, uint64_t addr
, uint8_t *data
, int len
)
547 cpu_physical_memory_rw(addr
, data
, len
, 1);
551 static int kvm_io_window(void *opaque
)
557 static int kvm_halt(void *opaque
, int vcpu
)
559 return kvm_arch_halt(opaque
, vcpu
);
562 static int kvm_shutdown(void *opaque
, int vcpu
)
564 qemu_system_reset_request();
568 static struct kvm_callbacks qemu_kvm_ops
= {
576 .mmio_read
= kvm_mmio_read
,
577 .mmio_write
= kvm_mmio_write
,
579 .shutdown
= kvm_shutdown
,
580 .io_window
= kvm_io_window
,
581 .try_push_interrupts
= try_push_interrupts
,
582 .post_kvm_run
= post_kvm_run
,
583 .pre_kvm_run
= pre_kvm_run
,
585 .tpr_access
= handle_tpr_access
,
588 .powerpc_dcr_read
= handle_powerpc_dcr_read
,
589 .powerpc_dcr_write
= handle_powerpc_dcr_write
,
595 /* Try to initialize kvm */
596 kvm_context
= kvm_init(&qemu_kvm_ops
, cpu_single_env
);
600 pthread_mutex_lock(&qemu_mutex
);
605 int kvm_qemu_create_context(void)
609 kvm_disable_irqchip_creation(kvm_context
);
612 kvm_disable_pit_creation(kvm_context
);
614 if (kvm_create(kvm_context
, phys_ram_size
, (void**)&phys_ram_base
) < 0) {
618 r
= kvm_arch_qemu_create_context();
624 void kvm_qemu_destroy(void)
626 kvm_finalize(kvm_context
);
629 void kvm_cpu_register_physical_memory(target_phys_addr_t start_addr
,
631 unsigned long phys_offset
)
633 #ifdef KVM_CAP_USER_MEMORY
636 r
= kvm_check_extension(kvm_context
, KVM_CAP_USER_MEMORY
);
638 if (!(phys_offset
& ~TARGET_PAGE_MASK
)) {
639 r
= kvm_is_allocated_mem(kvm_context
, start_addr
, size
);
642 r
= kvm_is_intersecting_mem(kvm_context
, start_addr
);
644 kvm_create_mem_hole(kvm_context
, start_addr
, size
);
645 r
= kvm_register_userspace_phys_mem(kvm_context
, start_addr
,
646 phys_ram_base
+ phys_offset
,
649 if (phys_offset
& IO_MEM_ROM
) {
650 phys_offset
&= ~IO_MEM_ROM
;
651 r
= kvm_is_intersecting_mem(kvm_context
, start_addr
);
653 kvm_create_mem_hole(kvm_context
, start_addr
, size
);
654 r
= kvm_register_userspace_phys_mem(kvm_context
, start_addr
,
655 phys_ram_base
+ phys_offset
,
659 printf("kvm_cpu_register_physical_memory: failed\n");
665 if (phys_offset
& IO_MEM_ROM
) {
666 phys_offset
&= ~IO_MEM_ROM
;
667 memcpy(phys_ram_base
+ start_addr
, phys_ram_base
+ phys_offset
, size
);
671 int kvm_qemu_check_extension(int ext
)
673 return kvm_check_extension(kvm_context
, ext
);
676 int kvm_qemu_init_env(CPUState
*cenv
)
678 return kvm_arch_qemu_init_env(cenv
);
681 int kvm_update_debugger(CPUState
*env
)
683 struct kvm_debug_guest dbg
;
687 if (env
->nb_breakpoints
|| env
->singlestep_enabled
) {
689 for (i
= 0; i
< 4 && i
< env
->nb_breakpoints
; ++i
) {
690 dbg
.breakpoints
[i
].enabled
= 1;
691 dbg
.breakpoints
[i
].address
= env
->breakpoints
[i
];
693 dbg
.singlestep
= env
->singlestep_enabled
;
695 return kvm_guest_debug(kvm_context
, env
->cpu_index
, &dbg
);
700 * dirty pages logging
702 /* FIXME: use unsigned long pointer instead of unsigned char */
703 unsigned char *kvm_dirty_bitmap
= NULL
;
704 int kvm_physical_memory_set_dirty_tracking(int enable
)
712 if (!kvm_dirty_bitmap
) {
713 unsigned bitmap_size
= BITMAP_SIZE(phys_ram_size
);
714 kvm_dirty_bitmap
= qemu_malloc(bitmap_size
);
715 if (kvm_dirty_bitmap
== NULL
) {
716 perror("Failed to allocate dirty pages bitmap");
720 r
= kvm_dirty_pages_log_enable_all(kvm_context
);
725 if (kvm_dirty_bitmap
) {
726 r
= kvm_dirty_pages_log_reset(kvm_context
);
727 qemu_free(kvm_dirty_bitmap
);
728 kvm_dirty_bitmap
= NULL
;
734 /* get kvm's dirty pages bitmap and update qemu's */
735 int kvm_get_dirty_pages_log_range(unsigned long start_addr
,
736 unsigned char *bitmap
,
738 unsigned long mem_size
)
740 unsigned int i
, j
, n
=0;
742 unsigned page_number
, addr
, addr1
;
743 unsigned int len
= ((mem_size
/TARGET_PAGE_SIZE
) + 7) / 8;
746 * bitmap-traveling is faster than memory-traveling (for addr...)
747 * especially when most of the memory is not dirty.
749 for (i
=0; i
<len
; i
++) {
754 page_number
= i
* 8 + j
;
755 addr1
= page_number
* TARGET_PAGE_SIZE
;
756 addr
= offset
+ addr1
;
757 cpu_physical_memory_set_dirty(addr
);
763 int kvm_get_dirty_bitmap_cb(unsigned long start
, unsigned long len
,
764 void *bitmap
, void *opaque
)
766 return kvm_get_dirty_pages_log_range(start
, bitmap
, start
, len
);
770 * get kvm's dirty pages bitmap and update qemu's
771 * we only care about physical ram, which resides in slots 0 and 3
773 int kvm_update_dirty_pages_log(void)
778 r
= kvm_get_dirty_pages_range(kvm_context
, 0, phys_ram_size
,
779 kvm_dirty_bitmap
, NULL
,
780 kvm_get_dirty_bitmap_cb
);
784 int kvm_get_phys_ram_page_bitmap(unsigned char *bitmap
)
786 unsigned int bsize
= BITMAP_SIZE(phys_ram_size
);
787 unsigned int brsize
= BITMAP_SIZE(ram_size
);
788 unsigned int extra_pages
= (phys_ram_size
- ram_size
) / TARGET_PAGE_SIZE
;
789 unsigned int extra_bytes
= (extra_pages
+7)/8;
790 unsigned int hole_start
= BITMAP_SIZE(0xa0000);
791 unsigned int hole_end
= BITMAP_SIZE(0xc0000);
793 memset(bitmap
, 0xFF, brsize
+ extra_bytes
);
794 memset(bitmap
+ hole_start
, 0, hole_end
- hole_start
);
795 memset(bitmap
+ brsize
+ extra_bytes
, 0, bsize
- brsize
- extra_bytes
);
800 #ifdef KVM_CAP_IRQCHIP
802 int kvm_set_irq(int irq
, int level
)
804 return kvm_set_irq_level(kvm_context
, irq
, level
);
809 void qemu_kvm_aio_wait_start(void)
813 void qemu_kvm_aio_wait(void)
815 CPUState
*cpu_single
= cpu_single_env
;
817 if (!cpu_single_env
) {
818 pthread_mutex_unlock(&qemu_mutex
);
819 kvm_eat_signal(&io_signal_table
, NULL
, 1000);
820 pthread_mutex_lock(&qemu_mutex
);
821 cpu_single_env
= NULL
;
823 pthread_cond_wait(&qemu_aio_cond
, &qemu_mutex
);
824 cpu_single_env
= cpu_single
;
828 void qemu_kvm_aio_wait_end(void)
832 int qemu_kvm_get_dirty_pages(unsigned long phys_addr
, void *buf
)
834 return kvm_get_dirty_pages(kvm_context
, phys_addr
, buf
);
837 void *kvm_cpu_create_phys_mem(target_phys_addr_t start_addr
,
838 unsigned long size
, int log
, int writable
)
840 return kvm_create_phys_mem(kvm_context
, start_addr
, size
, log
, writable
);
843 void kvm_cpu_destroy_phys_mem(target_phys_addr_t start_addr
,
846 kvm_destroy_phys_mem(kvm_context
, start_addr
, size
);