4 * Copyright (C) 2006-2008 Qumranet Technologies
6 * Licensed under the terms of the GNU GPL version 2 or higher.
9 #include "config-host.h"
15 #include "qemu-common.h"
24 #include <sys/utsname.h>
25 #include <sys/syscall.h>
34 int kvm_pit_reinject
= 1;
36 kvm_context_t kvm_context
;
38 pthread_mutex_t qemu_mutex
= PTHREAD_MUTEX_INITIALIZER
;
39 pthread_cond_t qemu_vcpu_cond
= PTHREAD_COND_INITIALIZER
;
40 pthread_cond_t qemu_system_cond
= PTHREAD_COND_INITIALIZER
;
41 pthread_cond_t qemu_pause_cond
= PTHREAD_COND_INITIALIZER
;
42 pthread_cond_t qemu_work_cond
= PTHREAD_COND_INITIALIZER
;
43 __thread
struct CPUState
*current_env
;
45 static int qemu_system_ready
;
47 #define SIG_IPI (SIGRTMIN+4)
50 static int io_thread_fd
= -1;
51 static int io_thread_sigfd
= -1;
53 static CPUState
*kvm_debug_cpu_requested
;
55 /* The list of ioperm_data */
56 static LIST_HEAD(, ioperm_data
) ioperm_head
;
58 static inline unsigned long kvm_get_thread_id(void)
60 return syscall(SYS_gettid
);
63 static void qemu_cond_wait(pthread_cond_t
*cond
)
65 CPUState
*env
= cpu_single_env
;
66 static const struct timespec ts
= {
71 pthread_cond_timedwait(cond
, &qemu_mutex
, &ts
);
75 static void sig_ipi_handler(int n
)
79 static void on_vcpu(CPUState
*env
, void (*func
)(void *data
), void *data
)
81 struct qemu_work_item wi
;
83 if (env
== current_env
) {
90 if (!env
->kvm_cpu_state
.queued_work_first
)
91 env
->kvm_cpu_state
.queued_work_first
= &wi
;
93 env
->kvm_cpu_state
.queued_work_last
->next
= &wi
;
94 env
->kvm_cpu_state
.queued_work_last
= &wi
;
98 pthread_kill(env
->kvm_cpu_state
.thread
, SIG_IPI
);
100 qemu_cond_wait(&qemu_work_cond
);
103 static void inject_interrupt(void *data
)
105 cpu_interrupt(current_env
, (int)data
);
108 void kvm_inject_interrupt(CPUState
*env
, int mask
)
110 on_vcpu(env
, inject_interrupt
, (void *)mask
);
113 void kvm_update_interrupt_request(CPUState
*env
)
118 if (!current_env
|| !current_env
->kvm_cpu_state
.created
)
121 * Testing for created here is really redundant
123 if (current_env
&& current_env
->kvm_cpu_state
.created
&&
124 env
!= current_env
&& !env
->kvm_cpu_state
.signalled
)
128 env
->kvm_cpu_state
.signalled
= 1;
129 if (env
->kvm_cpu_state
.thread
)
130 pthread_kill(env
->kvm_cpu_state
.thread
, SIG_IPI
);
135 void kvm_update_after_sipi(CPUState
*env
)
137 env
->kvm_cpu_state
.sipi_needed
= 1;
138 kvm_update_interrupt_request(env
);
141 void kvm_apic_init(CPUState
*env
)
143 if (env
->cpu_index
!= 0)
144 env
->kvm_cpu_state
.init
= 1;
145 kvm_update_interrupt_request(env
);
150 static int try_push_interrupts(void *opaque
)
152 return kvm_arch_try_push_interrupts(opaque
);
155 static void post_kvm_run(void *opaque
, void *data
)
157 CPUState
*env
= (CPUState
*)data
;
159 pthread_mutex_lock(&qemu_mutex
);
160 kvm_arch_post_kvm_run(opaque
, env
);
163 static int pre_kvm_run(void *opaque
, void *data
)
165 CPUState
*env
= (CPUState
*)data
;
167 kvm_arch_pre_kvm_run(opaque
, env
);
169 if (env
->interrupt_request
& CPU_INTERRUPT_EXIT
)
171 pthread_mutex_unlock(&qemu_mutex
);
175 static void kvm_do_load_registers(void *_env
)
177 CPUState
*env
= _env
;
179 kvm_arch_load_regs(env
);
182 void kvm_load_registers(CPUState
*env
)
184 if (kvm_enabled() && qemu_system_ready
)
185 on_vcpu(env
, kvm_do_load_registers
, env
);
188 static void kvm_do_save_registers(void *_env
)
190 CPUState
*env
= _env
;
192 kvm_arch_save_regs(env
);
195 void kvm_save_registers(CPUState
*env
)
198 on_vcpu(env
, kvm_do_save_registers
, env
);
201 int kvm_cpu_exec(CPUState
*env
)
205 r
= kvm_run(kvm_context
, env
->cpu_index
, env
);
207 printf("kvm_run returned %d\n", r
);
214 static int has_work(CPUState
*env
)
216 if (!vm_running
|| (env
&& env
->kvm_cpu_state
.stopped
))
220 return kvm_arch_has_work(env
);
223 static void flush_queued_work(CPUState
*env
)
225 struct qemu_work_item
*wi
;
227 if (!env
->kvm_cpu_state
.queued_work_first
)
230 while ((wi
= env
->kvm_cpu_state
.queued_work_first
)) {
231 env
->kvm_cpu_state
.queued_work_first
= wi
->next
;
235 env
->kvm_cpu_state
.queued_work_last
= NULL
;
236 pthread_cond_broadcast(&qemu_work_cond
);
239 static void kvm_main_loop_wait(CPUState
*env
, int timeout
)
246 pthread_mutex_unlock(&qemu_mutex
);
248 ts
.tv_sec
= timeout
/ 1000;
249 ts
.tv_nsec
= (timeout
% 1000) * 1000000;
250 sigemptyset(&waitset
);
251 sigaddset(&waitset
, SIG_IPI
);
253 r
= sigtimedwait(&waitset
, &siginfo
, &ts
);
256 pthread_mutex_lock(&qemu_mutex
);
258 if (r
== -1 && !(e
== EAGAIN
|| e
== EINTR
)) {
259 printf("sigtimedwait: %s\n", strerror(e
));
263 cpu_single_env
= env
;
264 flush_queued_work(env
);
266 if (env
->kvm_cpu_state
.stop
) {
267 env
->kvm_cpu_state
.stop
= 0;
268 env
->kvm_cpu_state
.stopped
= 1;
269 pthread_cond_signal(&qemu_pause_cond
);
272 env
->kvm_cpu_state
.signalled
= 0;
275 static int all_threads_paused(void)
277 CPUState
*penv
= first_cpu
;
280 if (penv
->kvm_cpu_state
.stop
)
282 penv
= (CPUState
*)penv
->next_cpu
;
288 static void pause_all_threads(void)
290 CPUState
*penv
= first_cpu
;
292 assert(!cpu_single_env
);
295 penv
->kvm_cpu_state
.stop
= 1;
296 pthread_kill(penv
->kvm_cpu_state
.thread
, SIG_IPI
);
297 penv
= (CPUState
*)penv
->next_cpu
;
300 while (!all_threads_paused())
301 qemu_cond_wait(&qemu_pause_cond
);
304 static void resume_all_threads(void)
306 CPUState
*penv
= first_cpu
;
308 assert(!cpu_single_env
);
311 penv
->kvm_cpu_state
.stop
= 0;
312 penv
->kvm_cpu_state
.stopped
= 0;
313 pthread_kill(penv
->kvm_cpu_state
.thread
, SIG_IPI
);
314 penv
= (CPUState
*)penv
->next_cpu
;
318 static void kvm_vm_state_change_handler(void *context
, int running
)
321 resume_all_threads();
326 static void update_regs_for_sipi(CPUState
*env
)
328 kvm_arch_update_regs_for_sipi(env
);
329 env
->kvm_cpu_state
.sipi_needed
= 0;
332 static void update_regs_for_init(CPUState
*env
)
335 SegmentCache cs
= env
->segs
[R_CS
];
341 /* restore SIPI vector */
342 if(env
->kvm_cpu_state
.sipi_needed
)
343 env
->segs
[R_CS
] = cs
;
346 env
->kvm_cpu_state
.init
= 0;
347 kvm_arch_load_regs(env
);
350 static void setup_kernel_sigmask(CPUState
*env
)
355 sigaddset(&set
, SIGUSR2
);
356 sigaddset(&set
, SIGIO
);
357 sigaddset(&set
, SIGALRM
);
358 sigprocmask(SIG_BLOCK
, &set
, NULL
);
360 sigprocmask(SIG_BLOCK
, NULL
, &set
);
361 sigdelset(&set
, SIG_IPI
);
363 kvm_set_signal_mask(kvm_context
, env
->cpu_index
, &set
);
366 void qemu_kvm_system_reset(void)
368 CPUState
*penv
= first_cpu
;
375 kvm_arch_cpu_reset(penv
);
376 penv
= (CPUState
*)penv
->next_cpu
;
379 resume_all_threads();
382 static int kvm_main_loop_cpu(CPUState
*env
)
384 setup_kernel_sigmask(env
);
386 pthread_mutex_lock(&qemu_mutex
);
387 if (kvm_irqchip_in_kernel(kvm_context
))
390 kvm_qemu_init_env(env
);
392 kvm_tpr_vcpu_start(env
);
395 cpu_single_env
= env
;
396 kvm_load_registers(env
);
399 while (!has_work(env
))
400 kvm_main_loop_wait(env
, 1000);
401 if (env
->interrupt_request
& (CPU_INTERRUPT_HARD
| CPU_INTERRUPT_NMI
))
403 if (!kvm_irqchip_in_kernel(kvm_context
)) {
404 if (env
->kvm_cpu_state
.init
)
405 update_regs_for_init(env
);
406 if (env
->kvm_cpu_state
.sipi_needed
)
407 update_regs_for_sipi(env
);
409 if (!env
->halted
&& !env
->kvm_cpu_state
.init
)
411 env
->interrupt_request
&= ~CPU_INTERRUPT_EXIT
;
412 kvm_main_loop_wait(env
, 0);
414 pthread_mutex_unlock(&qemu_mutex
);
418 static void *ap_main_loop(void *_env
)
420 CPUState
*env
= _env
;
422 struct ioperm_data
*data
= NULL
;
425 env
->thread_id
= kvm_get_thread_id();
426 sigfillset(&signals
);
427 sigprocmask(SIG_BLOCK
, &signals
, NULL
);
428 kvm_create_vcpu(kvm_context
, env
->cpu_index
);
429 kvm_qemu_init_env(env
);
431 #ifdef USE_KVM_DEVICE_ASSIGNMENT
432 /* do ioperm for io ports of assigned devices */
433 LIST_FOREACH(data
, &ioperm_head
, entries
)
434 on_vcpu(env
, kvm_arch_do_ioperm
, data
);
437 /* signal VCPU creation */
438 pthread_mutex_lock(&qemu_mutex
);
439 current_env
->kvm_cpu_state
.created
= 1;
440 pthread_cond_signal(&qemu_vcpu_cond
);
442 /* and wait for machine initialization */
443 while (!qemu_system_ready
)
444 qemu_cond_wait(&qemu_system_cond
);
445 pthread_mutex_unlock(&qemu_mutex
);
447 kvm_main_loop_cpu(env
);
451 void kvm_init_vcpu(CPUState
*env
)
453 int cpu
= env
->cpu_index
;
454 pthread_create(&env
->kvm_cpu_state
.thread
, NULL
, ap_main_loop
, env
);
456 while (env
->kvm_cpu_state
.created
== 0)
457 qemu_cond_wait(&qemu_vcpu_cond
);
460 int kvm_init_ap(void)
465 qemu_add_vm_change_state_handler(kvm_vm_state_change_handler
, NULL
);
467 signal(SIG_IPI
, sig_ipi_handler
);
471 void qemu_kvm_notify_work(void)
477 if (io_thread_fd
== -1)
480 memcpy(buffer
, &value
, sizeof(value
));
485 len
= write(io_thread_fd
, buffer
+ offset
, 8 - offset
);
486 if (len
== -1 && errno
== EINTR
)
496 fprintf(stderr
, "failed to notify io thread\n");
499 /* If we have signalfd, we mask out the signals we want to handle and then
500 * use signalfd to listen for them. We rely on whatever the current signal
501 * handler is to dispatch the signals when we receive them.
504 static void sigfd_handler(void *opaque
)
506 int fd
= (unsigned long)opaque
;
507 struct qemu_signalfd_siginfo info
;
508 struct sigaction action
;
513 len
= read(fd
, &info
, sizeof(info
));
514 } while (len
== -1 && errno
== EINTR
);
516 if (len
== -1 && errno
== EAGAIN
)
519 if (len
!= sizeof(info
)) {
520 printf("read from sigfd returned %ld: %m\n", len
);
524 sigaction(info
.ssi_signo
, NULL
, &action
);
525 if (action
.sa_handler
)
526 action
.sa_handler(info
.ssi_signo
);
531 /* Used to break IO thread out of select */
532 static void io_thread_wakeup(void *opaque
)
534 int fd
= (unsigned long)opaque
;
541 len
= read(fd
, buffer
+ offset
, 8 - offset
);
542 if (len
== -1 && errno
== EINTR
)
552 int kvm_main_loop(void)
558 io_thread
= pthread_self();
559 qemu_system_ready
= 1;
561 if (qemu_eventfd(fds
) == -1) {
562 fprintf(stderr
, "failed to create eventfd\n");
566 qemu_set_fd_handler2(fds
[0], NULL
, io_thread_wakeup
, NULL
,
567 (void *)(unsigned long)fds
[0]);
569 io_thread_fd
= fds
[1];
572 sigaddset(&mask
, SIGIO
);
573 sigaddset(&mask
, SIGALRM
);
574 sigprocmask(SIG_BLOCK
, &mask
, NULL
);
576 sigfd
= qemu_signalfd(&mask
);
578 fprintf(stderr
, "failed to create signalfd\n");
582 fcntl(sigfd
, F_SETFL
, O_NONBLOCK
);
584 qemu_set_fd_handler2(sigfd
, NULL
, sigfd_handler
, NULL
,
585 (void *)(unsigned long)sigfd
);
587 pthread_cond_broadcast(&qemu_system_cond
);
589 io_thread_sigfd
= sigfd
;
590 cpu_single_env
= NULL
;
593 main_loop_wait(1000);
594 if (qemu_shutdown_requested())
596 else if (qemu_powerdown_requested())
597 qemu_system_powerdown();
598 else if (qemu_reset_requested())
599 qemu_kvm_system_reset();
600 #ifdef CONFIG_GDBSTUB
601 else if (kvm_debug_cpu_requested
) {
602 gdb_set_stop_cpu(kvm_debug_cpu_requested
);
604 kvm_debug_cpu_requested
= NULL
;
610 pthread_mutex_unlock(&qemu_mutex
);
615 #ifdef KVM_CAP_SET_GUEST_DEBUG
616 int kvm_debug(void *opaque
, void *data
, struct kvm_debug_exit_arch
*arch_info
)
618 int handle
= kvm_arch_debug(arch_info
);
619 struct CPUState
*env
= data
;
622 kvm_debug_cpu_requested
= env
;
623 env
->kvm_cpu_state
.stopped
= 1;
629 static int kvm_inb(void *opaque
, uint16_t addr
, uint8_t *data
)
631 *data
= cpu_inb(0, addr
);
635 static int kvm_inw(void *opaque
, uint16_t addr
, uint16_t *data
)
637 *data
= cpu_inw(0, addr
);
641 static int kvm_inl(void *opaque
, uint16_t addr
, uint32_t *data
)
643 *data
= cpu_inl(0, addr
);
647 #define PM_IO_BASE 0xb000
649 static int kvm_outb(void *opaque
, uint16_t addr
, uint8_t data
)
654 cpu_outb(0, 0xb3, 0);
661 x
= cpu_inw(0, PM_IO_BASE
+ 4);
663 cpu_outw(0, PM_IO_BASE
+ 4, x
);
670 x
= cpu_inw(0, PM_IO_BASE
+ 4);
672 cpu_outw(0, PM_IO_BASE
+ 4, x
);
680 cpu_outb(0, addr
, data
);
684 static int kvm_outw(void *opaque
, uint16_t addr
, uint16_t data
)
686 cpu_outw(0, addr
, data
);
690 static int kvm_outl(void *opaque
, uint16_t addr
, uint32_t data
)
692 cpu_outl(0, addr
, data
);
696 static int kvm_mmio_read(void *opaque
, uint64_t addr
, uint8_t *data
, int len
)
698 cpu_physical_memory_rw(addr
, data
, len
, 0);
702 static int kvm_mmio_write(void *opaque
, uint64_t addr
, uint8_t *data
, int len
)
704 cpu_physical_memory_rw(addr
, data
, len
, 1);
708 static int kvm_io_window(void *opaque
)
714 static int kvm_halt(void *opaque
, int vcpu
)
716 return kvm_arch_halt(opaque
, vcpu
);
719 static int kvm_shutdown(void *opaque
, void *data
)
721 struct CPUState
*env
= (struct CPUState
*)data
;
723 /* stop the current vcpu from going back to guest mode */
724 env
->kvm_cpu_state
.stopped
= 1;
726 qemu_system_reset_request();
730 static struct kvm_callbacks qemu_kvm_ops
= {
731 #ifdef KVM_CAP_SET_GUEST_DEBUG
740 .mmio_read
= kvm_mmio_read
,
741 .mmio_write
= kvm_mmio_write
,
743 .shutdown
= kvm_shutdown
,
744 .io_window
= kvm_io_window
,
745 .try_push_interrupts
= try_push_interrupts
,
746 #ifdef KVM_CAP_USER_NMI
747 .push_nmi
= kvm_arch_push_nmi
,
749 .post_kvm_run
= post_kvm_run
,
750 .pre_kvm_run
= pre_kvm_run
,
752 .tpr_access
= handle_tpr_access
,
755 .powerpc_dcr_read
= handle_powerpc_dcr_read
,
756 .powerpc_dcr_write
= handle_powerpc_dcr_write
,
762 /* Try to initialize kvm */
763 kvm_context
= kvm_init(&qemu_kvm_ops
, cpu_single_env
);
767 pthread_mutex_lock(&qemu_mutex
);
773 static int destroy_region_works
= 0;
776 int kvm_qemu_create_context(void)
780 kvm_disable_irqchip_creation(kvm_context
);
783 kvm_disable_pit_creation(kvm_context
);
785 if (kvm_create(kvm_context
, phys_ram_size
, (void**)&phys_ram_base
) < 0) {
789 r
= kvm_arch_qemu_create_context();
792 if (kvm_pit
&& !kvm_pit_reinject
) {
793 if (kvm_reinject_control(kvm_context
, 0)) {
794 fprintf(stderr
, "failure to disable in-kernel PIT reinjection\n");
799 destroy_region_works
= kvm_destroy_memory_region_works(kvm_context
);
804 void kvm_qemu_destroy(void)
806 kvm_finalize(kvm_context
);
810 static int must_use_aliases_source(target_phys_addr_t addr
)
812 if (destroy_region_works
)
814 if (addr
== 0xa0000 || addr
== 0xa8000)
819 static int must_use_aliases_target(target_phys_addr_t addr
)
821 if (destroy_region_works
)
823 if (addr
>= 0xe0000000 && addr
< 0x100000000ull
)
828 static struct mapping
{
829 target_phys_addr_t phys
;
833 static int nr_mappings
;
835 static struct mapping
*find_ram_mapping(ram_addr_t ram_addr
)
839 for (p
= mappings
; p
< mappings
+ nr_mappings
; ++p
) {
840 if (p
->ram
<= ram_addr
&& ram_addr
< p
->ram
+ p
->len
) {
847 static struct mapping
*find_mapping(target_phys_addr_t start_addr
)
851 for (p
= mappings
; p
< mappings
+ nr_mappings
; ++p
) {
852 if (p
->phys
<= start_addr
&& start_addr
< p
->phys
+ p
->len
) {
859 static void drop_mapping(target_phys_addr_t start_addr
)
861 struct mapping
*p
= find_mapping(start_addr
);
864 *p
= mappings
[--nr_mappings
];
868 void kvm_cpu_register_physical_memory(target_phys_addr_t start_addr
,
870 unsigned long phys_offset
)
873 unsigned long area_flags
= phys_offset
& ~TARGET_PAGE_MASK
;
878 phys_offset
&= ~IO_MEM_ROM
;
880 if (area_flags
== IO_MEM_UNASSIGNED
) {
882 if (must_use_aliases_source(start_addr
)) {
883 kvm_destroy_memory_alias(kvm_context
, start_addr
);
886 if (must_use_aliases_target(start_addr
))
889 kvm_unregister_memory_area(kvm_context
, start_addr
, size
);
893 r
= kvm_is_containing_region(kvm_context
, start_addr
, size
);
897 if (area_flags
>= TLB_MMIO
)
901 if (must_use_aliases_source(start_addr
)) {
902 p
= find_ram_mapping(phys_offset
);
904 kvm_create_memory_alias(kvm_context
, start_addr
, size
,
905 p
->phys
+ (phys_offset
- p
->ram
));
911 r
= kvm_register_phys_mem(kvm_context
, start_addr
,
912 phys_ram_base
+ phys_offset
,
915 printf("kvm_cpu_register_physical_memory: failed\n");
920 drop_mapping(start_addr
);
921 p
= &mappings
[nr_mappings
++];
922 p
->phys
= start_addr
;
923 p
->ram
= phys_offset
;
930 void kvm_cpu_unregister_physical_memory(target_phys_addr_t start_addr
,
931 target_phys_addr_t size
,
932 unsigned long phys_offset
)
934 kvm_unregister_memory_area(kvm_context
, start_addr
, size
);
937 int kvm_setup_guest_memory(void *area
, unsigned long size
)
942 if (kvm_enabled() && !kvm_has_sync_mmu())
943 ret
= madvise(area
, size
, MADV_DONTFORK
);
952 int kvm_qemu_check_extension(int ext
)
954 return kvm_check_extension(kvm_context
, ext
);
957 int kvm_qemu_init_env(CPUState
*cenv
)
959 return kvm_arch_qemu_init_env(cenv
);
962 #ifdef KVM_CAP_SET_GUEST_DEBUG
963 struct kvm_sw_breakpoint_head kvm_sw_breakpoints
=
964 TAILQ_HEAD_INITIALIZER(kvm_sw_breakpoints
);
966 struct kvm_sw_breakpoint
*kvm_find_sw_breakpoint(target_ulong pc
)
968 struct kvm_sw_breakpoint
*bp
;
970 TAILQ_FOREACH(bp
, &kvm_sw_breakpoints
, entry
) {
977 struct kvm_set_guest_debug_data
{
978 struct kvm_guest_debug dbg
;
982 void kvm_invoke_set_guest_debug(void *data
)
984 struct kvm_set_guest_debug_data
*dbg_data
= data
;
986 dbg_data
->err
= kvm_set_guest_debug(kvm_context
, cpu_single_env
->cpu_index
,
990 int kvm_update_guest_debug(CPUState
*env
, unsigned long reinject_trap
)
992 struct kvm_set_guest_debug_data data
;
994 data
.dbg
.control
= 0;
995 if (env
->singlestep_enabled
)
996 data
.dbg
.control
= KVM_GUESTDBG_ENABLE
| KVM_GUESTDBG_SINGLESTEP
;
998 kvm_arch_update_guest_debug(env
, &data
.dbg
);
999 data
.dbg
.control
|= reinject_trap
;
1001 on_vcpu(env
, kvm_invoke_set_guest_debug
, &data
);
1005 int kvm_insert_breakpoint(CPUState
*current_env
, target_ulong addr
,
1006 target_ulong len
, int type
)
1008 struct kvm_sw_breakpoint
*bp
;
1012 if (type
== GDB_BREAKPOINT_SW
) {
1013 bp
= kvm_find_sw_breakpoint(addr
);
1019 bp
= qemu_malloc(sizeof(struct kvm_sw_breakpoint
));
1025 err
= kvm_arch_insert_sw_breakpoint(current_env
, bp
);
1031 TAILQ_INSERT_HEAD(&kvm_sw_breakpoints
, bp
, entry
);
1033 err
= kvm_arch_insert_hw_breakpoint(addr
, len
, type
);
1038 for (env
= first_cpu
; env
!= NULL
; env
= env
->next_cpu
) {
1039 err
= kvm_update_guest_debug(env
, 0);
1046 int kvm_remove_breakpoint(CPUState
*current_env
, target_ulong addr
,
1047 target_ulong len
, int type
)
1049 struct kvm_sw_breakpoint
*bp
;
1053 if (type
== GDB_BREAKPOINT_SW
) {
1054 bp
= kvm_find_sw_breakpoint(addr
);
1058 if (bp
->use_count
> 1) {
1063 err
= kvm_arch_remove_sw_breakpoint(current_env
, bp
);
1067 TAILQ_REMOVE(&kvm_sw_breakpoints
, bp
, entry
);
1070 err
= kvm_arch_remove_hw_breakpoint(addr
, len
, type
);
1075 for (env
= first_cpu
; env
!= NULL
; env
= env
->next_cpu
) {
1076 err
= kvm_update_guest_debug(env
, 0);
1083 void kvm_remove_all_breakpoints(CPUState
*current_env
)
1085 struct kvm_sw_breakpoint
*bp
, *next
;
1088 TAILQ_FOREACH_SAFE(bp
, &kvm_sw_breakpoints
, entry
, next
) {
1089 if (kvm_arch_remove_sw_breakpoint(current_env
, bp
) != 0) {
1090 /* Try harder to find a CPU that currently sees the breakpoint. */
1091 for (env
= first_cpu
; env
!= NULL
; env
= env
->next_cpu
) {
1092 if (kvm_arch_remove_sw_breakpoint(env
, bp
) == 0)
1097 kvm_arch_remove_all_hw_breakpoints();
1099 for (env
= first_cpu
; env
!= NULL
; env
= env
->next_cpu
)
1100 kvm_update_guest_debug(env
, 0);
1103 #else /* !KVM_CAP_SET_GUEST_DEBUG */
1105 int kvm_update_guest_debug(CPUState
*env
, unsigned long reinject_trap
)
1110 int kvm_insert_breakpoint(CPUState
*current_env
, target_ulong addr
,
1111 target_ulong len
, int type
)
1116 int kvm_remove_breakpoint(CPUState
*current_env
, target_ulong addr
,
1117 target_ulong len
, int type
)
1122 void kvm_remove_all_breakpoints(CPUState
*current_env
)
1125 #endif /* !KVM_CAP_SET_GUEST_DEBUG */
1128 * dirty pages logging
1130 /* FIXME: use unsigned long pointer instead of unsigned char */
1131 unsigned char *kvm_dirty_bitmap
= NULL
;
1132 int kvm_physical_memory_set_dirty_tracking(int enable
)
1140 if (!kvm_dirty_bitmap
) {
1141 unsigned bitmap_size
= BITMAP_SIZE(phys_ram_size
);
1142 kvm_dirty_bitmap
= qemu_malloc(bitmap_size
);
1143 if (kvm_dirty_bitmap
== NULL
) {
1144 perror("Failed to allocate dirty pages bitmap");
1148 r
= kvm_dirty_pages_log_enable_all(kvm_context
);
1153 if (kvm_dirty_bitmap
) {
1154 r
= kvm_dirty_pages_log_reset(kvm_context
);
1155 qemu_free(kvm_dirty_bitmap
);
1156 kvm_dirty_bitmap
= NULL
;
1162 /* get kvm's dirty pages bitmap and update qemu's */
1163 int kvm_get_dirty_pages_log_range(unsigned long start_addr
,
1164 unsigned char *bitmap
,
1165 unsigned int offset
,
1166 unsigned long mem_size
)
1168 unsigned int i
, j
, n
=0;
1170 unsigned long page_number
, addr
, addr1
;
1171 ram_addr_t ram_addr
;
1172 unsigned int len
= ((mem_size
/TARGET_PAGE_SIZE
) + 7) / 8;
1175 * bitmap-traveling is faster than memory-traveling (for addr...)
1176 * especially when most of the memory is not dirty.
1178 for (i
=0; i
<len
; i
++) {
1183 page_number
= i
* 8 + j
;
1184 addr1
= page_number
* TARGET_PAGE_SIZE
;
1185 addr
= offset
+ addr1
;
1186 ram_addr
= cpu_get_physical_page_desc(addr
);
1187 cpu_physical_memory_set_dirty(ram_addr
);
1193 int kvm_get_dirty_bitmap_cb(unsigned long start
, unsigned long len
,
1194 void *bitmap
, void *opaque
)
1196 return kvm_get_dirty_pages_log_range(start
, bitmap
, start
, len
);
1200 * get kvm's dirty pages bitmap and update qemu's
1201 * we only care about physical ram, which resides in slots 0 and 3
1203 int kvm_update_dirty_pages_log(void)
1208 r
= kvm_get_dirty_pages_range(kvm_context
, 0, phys_ram_size
,
1209 kvm_dirty_bitmap
, NULL
,
1210 kvm_get_dirty_bitmap_cb
);
1214 void kvm_qemu_log_memory(target_phys_addr_t start
, target_phys_addr_t size
,
1218 kvm_dirty_pages_log_enable_slot(kvm_context
, start
, size
);
1221 if (must_use_aliases_target(start
))
1224 kvm_dirty_pages_log_disable_slot(kvm_context
, start
, size
);
1228 int kvm_get_phys_ram_page_bitmap(unsigned char *bitmap
)
1230 unsigned int bsize
= BITMAP_SIZE(phys_ram_size
);
1231 unsigned int brsize
= BITMAP_SIZE(ram_size
);
1232 unsigned int extra_pages
= (phys_ram_size
- ram_size
) / TARGET_PAGE_SIZE
;
1233 unsigned int extra_bytes
= (extra_pages
+7)/8;
1234 unsigned int hole_start
= BITMAP_SIZE(0xa0000);
1235 unsigned int hole_end
= BITMAP_SIZE(0xc0000);
1237 memset(bitmap
, 0xFF, brsize
+ extra_bytes
);
1238 memset(bitmap
+ hole_start
, 0, hole_end
- hole_start
);
1239 memset(bitmap
+ brsize
+ extra_bytes
, 0, bsize
- brsize
- extra_bytes
);
1244 #ifdef KVM_CAP_IRQCHIP
1246 int kvm_set_irq(int irq
, int level
)
1248 return kvm_set_irq_level(kvm_context
, irq
, level
);
1253 int qemu_kvm_get_dirty_pages(unsigned long phys_addr
, void *buf
)
1255 return kvm_get_dirty_pages(kvm_context
, phys_addr
, buf
);
1258 void *kvm_cpu_create_phys_mem(target_phys_addr_t start_addr
,
1259 unsigned long size
, int log
, int writable
)
1261 return kvm_create_phys_mem(kvm_context
, start_addr
, size
, log
, writable
);
1264 void kvm_cpu_destroy_phys_mem(target_phys_addr_t start_addr
,
1267 kvm_destroy_phys_mem(kvm_context
, start_addr
, size
);
1270 void kvm_mutex_unlock(void)
1272 assert(!cpu_single_env
);
1273 pthread_mutex_unlock(&qemu_mutex
);
1276 void kvm_mutex_lock(void)
1278 pthread_mutex_lock(&qemu_mutex
);
1279 cpu_single_env
= NULL
;
1282 int qemu_kvm_register_coalesced_mmio(target_phys_addr_t addr
, unsigned int size
)
1284 return kvm_register_coalesced_mmio(kvm_context
, addr
, size
);
1287 int qemu_kvm_unregister_coalesced_mmio(target_phys_addr_t addr
,
1290 return kvm_unregister_coalesced_mmio(kvm_context
, addr
, size
);
1293 int kvm_coalesce_mmio_region(target_phys_addr_t start
, ram_addr_t size
)
1295 return kvm_register_coalesced_mmio(kvm_context
, start
, size
);
1298 int kvm_uncoalesce_mmio_region(target_phys_addr_t start
, ram_addr_t size
)
1300 return kvm_unregister_coalesced_mmio(kvm_context
, start
, size
);
1303 #ifdef USE_KVM_DEVICE_ASSIGNMENT
1304 void kvm_add_ioperm_data(struct ioperm_data
*data
)
1306 LIST_INSERT_HEAD(&ioperm_head
, data
, entries
);
1309 void kvm_ioperm(CPUState
*env
, void *data
)
1311 if (kvm_enabled() && qemu_system_ready
)
1312 on_vcpu(env
, kvm_arch_do_ioperm
, data
);
1317 void kvm_physical_sync_dirty_bitmap(target_phys_addr_t start_addr
, target_phys_addr_t end_addr
)
1323 if (must_use_aliases_source(start_addr
))
1327 buf
= qemu_malloc((end_addr
- start_addr
) / 8 + 2);
1328 kvm_get_dirty_pages_range(kvm_context
, start_addr
, end_addr
- start_addr
,
1329 buf
, NULL
, kvm_get_dirty_bitmap_cb
);
1334 int kvm_log_start(target_phys_addr_t phys_addr
, target_phys_addr_t len
)
1337 if (must_use_aliases_source(phys_addr
))
1340 kvm_qemu_log_memory(phys_addr
, len
, 1);
1344 int kvm_log_stop(target_phys_addr_t phys_addr
, target_phys_addr_t len
)
1347 if (must_use_aliases_source(phys_addr
))
1350 kvm_qemu_log_memory(phys_addr
, len
, 0);
1354 /* hack: both libkvm and upstream qemu define kvm_has_sync_mmu(), differently */
1355 #undef kvm_has_sync_mmu
1356 int qemu_kvm_has_sync_mmu(void)
1358 return kvm_has_sync_mmu(kvm_context
);
1361 void qemu_kvm_cpu_stop(CPUState
*env
)
1364 env
->kvm_cpu_state
.stopped
= 1;