4 * Copyright (C) 2006-2008 Qumranet Technologies
6 * Licensed under the terms of the GNU GPL version 2 or higher.
9 #include "config-host.h"
20 #include "qemu-common.h"
29 #include <sys/utsname.h>
30 #include <sys/syscall.h>
36 extern void perror(const char *s
);
38 kvm_context_t kvm_context
;
42 pthread_mutex_t qemu_mutex
= PTHREAD_MUTEX_INITIALIZER
;
43 pthread_cond_t qemu_vcpu_cond
= PTHREAD_COND_INITIALIZER
;
44 pthread_cond_t qemu_system_cond
= PTHREAD_COND_INITIALIZER
;
45 pthread_cond_t qemu_pause_cond
= PTHREAD_COND_INITIALIZER
;
46 pthread_cond_t qemu_work_cond
= PTHREAD_COND_INITIALIZER
;
47 __thread
struct CPUState
*current_env
;
49 static int qemu_system_ready
;
51 #define SIG_IPI (SIGRTMIN+4)
54 static int io_thread_fd
= -1;
55 static int io_thread_sigfd
= -1;
57 static CPUState
*kvm_debug_cpu_requested
;
59 /* The list of ioperm_data */
60 static LIST_HEAD(, ioperm_data
) ioperm_head
;
62 static inline unsigned long kvm_get_thread_id(void)
64 return syscall(SYS_gettid
);
67 static void qemu_cond_wait(pthread_cond_t
*cond
)
69 CPUState
*env
= cpu_single_env
;
70 static const struct timespec ts
= {
75 pthread_cond_timedwait(cond
, &qemu_mutex
, &ts
);
79 static void sig_ipi_handler(int n
)
83 static void on_vcpu(CPUState
*env
, void (*func
)(void *data
), void *data
)
85 struct qemu_work_item wi
;
87 if (env
== current_env
) {
94 if (!env
->kvm_cpu_state
.queued_work_first
)
95 env
->kvm_cpu_state
.queued_work_first
= &wi
;
97 env
->kvm_cpu_state
.queued_work_last
->next
= &wi
;
98 env
->kvm_cpu_state
.queued_work_last
= &wi
;
102 pthread_kill(env
->kvm_cpu_state
.thread
, SIG_IPI
);
104 qemu_cond_wait(&qemu_work_cond
);
107 static void inject_interrupt(void *data
)
109 cpu_interrupt(current_env
, (int)data
);
112 void kvm_inject_interrupt(CPUState
*env
, int mask
)
114 on_vcpu(env
, inject_interrupt
, (void *)mask
);
117 void kvm_update_interrupt_request(CPUState
*env
)
122 if (!current_env
|| !current_env
->kvm_cpu_state
.created
)
125 * Testing for created here is really redundant
127 if (current_env
&& current_env
->kvm_cpu_state
.created
&&
128 env
!= current_env
&& !env
->kvm_cpu_state
.signalled
)
132 env
->kvm_cpu_state
.signalled
= 1;
133 if (env
->kvm_cpu_state
.thread
)
134 pthread_kill(env
->kvm_cpu_state
.thread
, SIG_IPI
);
139 void kvm_update_after_sipi(CPUState
*env
)
141 env
->kvm_cpu_state
.sipi_needed
= 1;
142 kvm_update_interrupt_request(env
);
145 void kvm_apic_init(CPUState
*env
)
147 if (env
->cpu_index
!= 0)
148 env
->kvm_cpu_state
.init
= 1;
149 kvm_update_interrupt_request(env
);
154 static int try_push_interrupts(void *opaque
)
156 return kvm_arch_try_push_interrupts(opaque
);
159 static void post_kvm_run(void *opaque
, void *data
)
161 CPUState
*env
= (CPUState
*)data
;
163 pthread_mutex_lock(&qemu_mutex
);
164 kvm_arch_post_kvm_run(opaque
, env
);
167 static int pre_kvm_run(void *opaque
, void *data
)
169 CPUState
*env
= (CPUState
*)data
;
171 kvm_arch_pre_kvm_run(opaque
, env
);
173 if (env
->interrupt_request
& CPU_INTERRUPT_EXIT
)
175 pthread_mutex_unlock(&qemu_mutex
);
179 static void kvm_do_load_registers(void *_env
)
181 CPUState
*env
= _env
;
183 kvm_arch_load_regs(env
);
186 void kvm_load_registers(CPUState
*env
)
188 if (kvm_enabled() && qemu_system_ready
)
189 on_vcpu(env
, kvm_do_load_registers
, env
);
192 static void kvm_do_save_registers(void *_env
)
194 CPUState
*env
= _env
;
196 kvm_arch_save_regs(env
);
199 void kvm_save_registers(CPUState
*env
)
202 on_vcpu(env
, kvm_do_save_registers
, env
);
205 int kvm_cpu_exec(CPUState
*env
)
209 r
= kvm_run(kvm_context
, env
->cpu_index
, env
);
211 printf("kvm_run returned %d\n", r
);
218 extern int vm_running
;
220 static int has_work(CPUState
*env
)
222 if (!vm_running
|| (env
&& env
->kvm_cpu_state
.stopped
))
226 return kvm_arch_has_work(env
);
229 static void flush_queued_work(CPUState
*env
)
231 struct qemu_work_item
*wi
;
233 if (!env
->kvm_cpu_state
.queued_work_first
)
236 while ((wi
= env
->kvm_cpu_state
.queued_work_first
)) {
237 env
->kvm_cpu_state
.queued_work_first
= wi
->next
;
241 env
->kvm_cpu_state
.queued_work_last
= NULL
;
242 pthread_cond_broadcast(&qemu_work_cond
);
245 static void kvm_main_loop_wait(CPUState
*env
, int timeout
)
252 pthread_mutex_unlock(&qemu_mutex
);
254 ts
.tv_sec
= timeout
/ 1000;
255 ts
.tv_nsec
= (timeout
% 1000) * 1000000;
256 sigemptyset(&waitset
);
257 sigaddset(&waitset
, SIG_IPI
);
259 r
= sigtimedwait(&waitset
, &siginfo
, &ts
);
262 pthread_mutex_lock(&qemu_mutex
);
264 if (r
== -1 && !(e
== EAGAIN
|| e
== EINTR
)) {
265 printf("sigtimedwait: %s\n", strerror(e
));
269 cpu_single_env
= env
;
270 flush_queued_work(env
);
272 if (env
->kvm_cpu_state
.stop
) {
273 env
->kvm_cpu_state
.stop
= 0;
274 env
->kvm_cpu_state
.stopped
= 1;
275 pthread_cond_signal(&qemu_pause_cond
);
278 env
->kvm_cpu_state
.signalled
= 0;
281 static int all_threads_paused(void)
283 CPUState
*penv
= first_cpu
;
286 if (penv
->kvm_cpu_state
.stop
)
288 penv
= (CPUState
*)penv
->next_cpu
;
294 static void pause_all_threads(void)
296 CPUState
*penv
= first_cpu
;
298 assert(!cpu_single_env
);
301 penv
->kvm_cpu_state
.stop
= 1;
302 pthread_kill(penv
->kvm_cpu_state
.thread
, SIG_IPI
);
303 penv
= (CPUState
*)penv
->next_cpu
;
306 while (!all_threads_paused())
307 qemu_cond_wait(&qemu_pause_cond
);
310 static void resume_all_threads(void)
312 CPUState
*penv
= first_cpu
;
314 assert(!cpu_single_env
);
317 penv
->kvm_cpu_state
.stop
= 0;
318 penv
->kvm_cpu_state
.stopped
= 0;
319 pthread_kill(penv
->kvm_cpu_state
.thread
, SIG_IPI
);
320 penv
= (CPUState
*)penv
->next_cpu
;
324 static void kvm_vm_state_change_handler(void *context
, int running
)
327 resume_all_threads();
332 static void update_regs_for_sipi(CPUState
*env
)
334 kvm_arch_update_regs_for_sipi(env
);
335 env
->kvm_cpu_state
.sipi_needed
= 0;
338 static void update_regs_for_init(CPUState
*env
)
341 SegmentCache cs
= env
->segs
[R_CS
];
347 /* restore SIPI vector */
348 if(env
->kvm_cpu_state
.sipi_needed
)
349 env
->segs
[R_CS
] = cs
;
352 env
->kvm_cpu_state
.init
= 0;
353 kvm_arch_load_regs(env
);
356 static void setup_kernel_sigmask(CPUState
*env
)
361 sigaddset(&set
, SIGUSR2
);
362 sigaddset(&set
, SIGIO
);
363 sigaddset(&set
, SIGALRM
);
364 sigprocmask(SIG_BLOCK
, &set
, NULL
);
366 sigprocmask(SIG_BLOCK
, NULL
, &set
);
367 sigdelset(&set
, SIG_IPI
);
369 kvm_set_signal_mask(kvm_context
, env
->cpu_index
, &set
);
372 void qemu_kvm_system_reset(void)
374 CPUState
*penv
= first_cpu
;
381 kvm_arch_cpu_reset(penv
);
382 penv
= (CPUState
*)penv
->next_cpu
;
385 resume_all_threads();
388 static int kvm_main_loop_cpu(CPUState
*env
)
390 setup_kernel_sigmask(env
);
392 pthread_mutex_lock(&qemu_mutex
);
393 if (kvm_irqchip_in_kernel(kvm_context
))
396 kvm_qemu_init_env(env
);
398 kvm_tpr_vcpu_start(env
);
401 cpu_single_env
= env
;
402 kvm_load_registers(env
);
405 while (!has_work(env
))
406 kvm_main_loop_wait(env
, 1000);
407 if (env
->interrupt_request
& (CPU_INTERRUPT_HARD
| CPU_INTERRUPT_NMI
))
409 if (!kvm_irqchip_in_kernel(kvm_context
)) {
410 if (env
->kvm_cpu_state
.init
)
411 update_regs_for_init(env
);
412 if (env
->kvm_cpu_state
.sipi_needed
)
413 update_regs_for_sipi(env
);
415 if (!env
->halted
&& !env
->kvm_cpu_state
.init
)
417 env
->interrupt_request
&= ~CPU_INTERRUPT_EXIT
;
418 kvm_main_loop_wait(env
, 0);
420 pthread_mutex_unlock(&qemu_mutex
);
424 static void *ap_main_loop(void *_env
)
426 CPUState
*env
= _env
;
428 struct ioperm_data
*data
= NULL
;
431 env
->thread_id
= kvm_get_thread_id();
432 sigfillset(&signals
);
433 sigprocmask(SIG_BLOCK
, &signals
, NULL
);
434 kvm_create_vcpu(kvm_context
, env
->cpu_index
);
435 kvm_qemu_init_env(env
);
437 #ifdef USE_KVM_DEVICE_ASSIGNMENT
438 /* do ioperm for io ports of assigned devices */
439 LIST_FOREACH(data
, &ioperm_head
, entries
)
440 on_vcpu(env
, kvm_arch_do_ioperm
, data
);
443 /* signal VCPU creation */
444 pthread_mutex_lock(&qemu_mutex
);
445 current_env
->kvm_cpu_state
.created
= 1;
446 pthread_cond_signal(&qemu_vcpu_cond
);
448 /* and wait for machine initialization */
449 while (!qemu_system_ready
)
450 qemu_cond_wait(&qemu_system_cond
);
451 pthread_mutex_unlock(&qemu_mutex
);
453 kvm_main_loop_cpu(env
);
457 void kvm_init_vcpu(CPUState
*env
)
459 int cpu
= env
->cpu_index
;
460 pthread_create(&env
->kvm_cpu_state
.thread
, NULL
, ap_main_loop
, env
);
462 while (env
->kvm_cpu_state
.created
== 0)
463 qemu_cond_wait(&qemu_vcpu_cond
);
466 int kvm_init_ap(void)
471 qemu_add_vm_change_state_handler(kvm_vm_state_change_handler
, NULL
);
473 signal(SIG_IPI
, sig_ipi_handler
);
477 void qemu_kvm_notify_work(void)
483 if (io_thread_fd
== -1)
486 memcpy(buffer
, &value
, sizeof(value
));
491 len
= write(io_thread_fd
, buffer
+ offset
, 8 - offset
);
492 if (len
== -1 && errno
== EINTR
)
502 fprintf(stderr
, "failed to notify io thread\n");
505 /* If we have signalfd, we mask out the signals we want to handle and then
506 * use signalfd to listen for them. We rely on whatever the current signal
507 * handler is to dispatch the signals when we receive them.
510 static void sigfd_handler(void *opaque
)
512 int fd
= (unsigned long)opaque
;
513 struct qemu_signalfd_siginfo info
;
514 struct sigaction action
;
519 len
= read(fd
, &info
, sizeof(info
));
520 } while (len
== -1 && errno
== EINTR
);
522 if (len
== -1 && errno
== EAGAIN
)
525 if (len
!= sizeof(info
)) {
526 printf("read from sigfd returned %ld: %m\n", len
);
530 sigaction(info
.ssi_signo
, NULL
, &action
);
531 if (action
.sa_handler
)
532 action
.sa_handler(info
.ssi_signo
);
537 /* Used to break IO thread out of select */
538 static void io_thread_wakeup(void *opaque
)
540 int fd
= (unsigned long)opaque
;
547 len
= read(fd
, buffer
+ offset
, 8 - offset
);
548 if (len
== -1 && errno
== EINTR
)
558 int kvm_main_loop(void)
564 io_thread
= pthread_self();
565 qemu_system_ready
= 1;
567 if (qemu_eventfd(fds
) == -1) {
568 fprintf(stderr
, "failed to create eventfd\n");
572 qemu_set_fd_handler2(fds
[0], NULL
, io_thread_wakeup
, NULL
,
573 (void *)(unsigned long)fds
[0]);
575 io_thread_fd
= fds
[1];
578 sigaddset(&mask
, SIGIO
);
579 sigaddset(&mask
, SIGALRM
);
580 sigprocmask(SIG_BLOCK
, &mask
, NULL
);
582 sigfd
= qemu_signalfd(&mask
);
584 fprintf(stderr
, "failed to create signalfd\n");
588 fcntl(sigfd
, F_SETFL
, O_NONBLOCK
);
590 qemu_set_fd_handler2(sigfd
, NULL
, sigfd_handler
, NULL
,
591 (void *)(unsigned long)sigfd
);
593 pthread_cond_broadcast(&qemu_system_cond
);
595 io_thread_sigfd
= sigfd
;
596 cpu_single_env
= NULL
;
599 main_loop_wait(1000);
600 if (qemu_shutdown_requested())
602 else if (qemu_powerdown_requested())
603 qemu_system_powerdown();
604 else if (qemu_reset_requested())
605 qemu_kvm_system_reset();
606 #ifdef CONFIG_GDBSTUB
607 else if (kvm_debug_cpu_requested
) {
608 gdb_set_stop_cpu(kvm_debug_cpu_requested
);
610 kvm_debug_cpu_requested
= NULL
;
616 pthread_mutex_unlock(&qemu_mutex
);
621 #ifdef KVM_CAP_SET_GUEST_DEBUG
622 int kvm_debug(void *opaque
, void *data
, struct kvm_debug_exit_arch
*arch_info
)
624 int handle
= kvm_arch_debug(arch_info
);
625 struct CPUState
*env
= data
;
628 kvm_debug_cpu_requested
= env
;
629 env
->kvm_cpu_state
.stopped
= 1;
635 static int kvm_inb(void *opaque
, uint16_t addr
, uint8_t *data
)
637 *data
= cpu_inb(0, addr
);
641 static int kvm_inw(void *opaque
, uint16_t addr
, uint16_t *data
)
643 *data
= cpu_inw(0, addr
);
647 static int kvm_inl(void *opaque
, uint16_t addr
, uint32_t *data
)
649 *data
= cpu_inl(0, addr
);
653 #define PM_IO_BASE 0xb000
655 static int kvm_outb(void *opaque
, uint16_t addr
, uint8_t data
)
660 cpu_outb(0, 0xb3, 0);
667 x
= cpu_inw(0, PM_IO_BASE
+ 4);
669 cpu_outw(0, PM_IO_BASE
+ 4, x
);
676 x
= cpu_inw(0, PM_IO_BASE
+ 4);
678 cpu_outw(0, PM_IO_BASE
+ 4, x
);
686 cpu_outb(0, addr
, data
);
690 static int kvm_outw(void *opaque
, uint16_t addr
, uint16_t data
)
692 cpu_outw(0, addr
, data
);
696 static int kvm_outl(void *opaque
, uint16_t addr
, uint32_t data
)
698 cpu_outl(0, addr
, data
);
702 static int kvm_mmio_read(void *opaque
, uint64_t addr
, uint8_t *data
, int len
)
704 cpu_physical_memory_rw(addr
, data
, len
, 0);
708 static int kvm_mmio_write(void *opaque
, uint64_t addr
, uint8_t *data
, int len
)
710 cpu_physical_memory_rw(addr
, data
, len
, 1);
714 static int kvm_io_window(void *opaque
)
720 static int kvm_halt(void *opaque
, int vcpu
)
722 return kvm_arch_halt(opaque
, vcpu
);
725 static int kvm_shutdown(void *opaque
, void *data
)
727 struct CPUState
*env
= (struct CPUState
*)data
;
729 /* stop the current vcpu from going back to guest mode */
730 env
->kvm_cpu_state
.stopped
= 1;
732 qemu_system_reset_request();
736 static struct kvm_callbacks qemu_kvm_ops
= {
737 #ifdef KVM_CAP_SET_GUEST_DEBUG
746 .mmio_read
= kvm_mmio_read
,
747 .mmio_write
= kvm_mmio_write
,
749 .shutdown
= kvm_shutdown
,
750 .io_window
= kvm_io_window
,
751 .try_push_interrupts
= try_push_interrupts
,
752 #ifdef KVM_CAP_USER_NMI
753 .push_nmi
= kvm_arch_push_nmi
,
755 .post_kvm_run
= post_kvm_run
,
756 .pre_kvm_run
= pre_kvm_run
,
758 .tpr_access
= handle_tpr_access
,
761 .powerpc_dcr_read
= handle_powerpc_dcr_read
,
762 .powerpc_dcr_write
= handle_powerpc_dcr_write
,
768 /* Try to initialize kvm */
769 kvm_context
= kvm_init(&qemu_kvm_ops
, cpu_single_env
);
773 pthread_mutex_lock(&qemu_mutex
);
779 static int destroy_region_works
= 0;
782 int kvm_qemu_create_context(void)
786 kvm_disable_irqchip_creation(kvm_context
);
789 kvm_disable_pit_creation(kvm_context
);
791 if (kvm_create(kvm_context
, phys_ram_size
, (void**)&phys_ram_base
) < 0) {
795 r
= kvm_arch_qemu_create_context();
799 destroy_region_works
= kvm_destroy_memory_region_works(kvm_context
);
804 void kvm_qemu_destroy(void)
806 kvm_finalize(kvm_context
);
810 static int must_use_aliases_source(target_phys_addr_t addr
)
812 if (destroy_region_works
)
814 if (addr
== 0xa0000 || addr
== 0xa8000)
819 static int must_use_aliases_target(target_phys_addr_t addr
)
821 if (destroy_region_works
)
823 if (addr
>= 0xe0000000 && addr
< 0x100000000ull
)
828 static struct mapping
{
829 target_phys_addr_t phys
;
833 static int nr_mappings
;
835 static struct mapping
*find_ram_mapping(ram_addr_t ram_addr
)
839 for (p
= mappings
; p
< mappings
+ nr_mappings
; ++p
) {
840 if (p
->ram
<= ram_addr
&& ram_addr
< p
->ram
+ p
->len
) {
847 static struct mapping
*find_mapping(target_phys_addr_t start_addr
)
851 for (p
= mappings
; p
< mappings
+ nr_mappings
; ++p
) {
852 if (p
->phys
<= start_addr
&& start_addr
< p
->phys
+ p
->len
) {
859 static void drop_mapping(target_phys_addr_t start_addr
)
861 struct mapping
*p
= find_mapping(start_addr
);
864 *p
= mappings
[--nr_mappings
];
868 void kvm_cpu_register_physical_memory(target_phys_addr_t start_addr
,
870 unsigned long phys_offset
)
873 unsigned long area_flags
= phys_offset
& ~TARGET_PAGE_MASK
;
878 phys_offset
&= ~IO_MEM_ROM
;
880 if (area_flags
== IO_MEM_UNASSIGNED
) {
882 if (must_use_aliases_source(start_addr
)) {
883 kvm_destroy_memory_alias(kvm_context
, start_addr
);
886 if (must_use_aliases_target(start_addr
))
889 kvm_unregister_memory_area(kvm_context
, start_addr
, size
);
893 r
= kvm_is_containing_region(kvm_context
, start_addr
, size
);
897 if (area_flags
>= TLB_MMIO
)
901 if (must_use_aliases_source(start_addr
)) {
902 p
= find_ram_mapping(phys_offset
);
904 kvm_create_memory_alias(kvm_context
, start_addr
, size
,
905 p
->phys
+ (phys_offset
- p
->ram
));
911 r
= kvm_register_phys_mem(kvm_context
, start_addr
,
912 phys_ram_base
+ phys_offset
,
915 printf("kvm_cpu_register_physical_memory: failed\n");
920 drop_mapping(start_addr
);
921 p
= &mappings
[nr_mappings
++];
922 p
->phys
= start_addr
;
923 p
->ram
= phys_offset
;
930 void kvm_cpu_unregister_physical_memory(target_phys_addr_t start_addr
,
931 target_phys_addr_t size
,
932 unsigned long phys_offset
)
934 kvm_unregister_memory_area(kvm_context
, start_addr
, size
);
937 int kvm_setup_guest_memory(void *area
, unsigned long size
)
942 if (kvm_enabled() && !kvm_has_sync_mmu())
943 ret
= madvise(area
, size
, MADV_DONTFORK
);
952 int kvm_qemu_check_extension(int ext
)
954 return kvm_check_extension(kvm_context
, ext
);
957 int kvm_qemu_init_env(CPUState
*cenv
)
959 return kvm_arch_qemu_init_env(cenv
);
962 #ifdef KVM_CAP_SET_GUEST_DEBUG
963 struct kvm_sw_breakpoint_head kvm_sw_breakpoints
=
964 TAILQ_HEAD_INITIALIZER(kvm_sw_breakpoints
);
966 struct kvm_sw_breakpoint
*kvm_find_sw_breakpoint(target_ulong pc
)
968 struct kvm_sw_breakpoint
*bp
;
970 TAILQ_FOREACH(bp
, &kvm_sw_breakpoints
, entry
) {
977 struct kvm_set_guest_debug_data
{
978 struct kvm_guest_debug dbg
;
982 void kvm_invoke_set_guest_debug(void *data
)
984 struct kvm_set_guest_debug_data
*dbg_data
= data
;
986 dbg_data
->err
= kvm_set_guest_debug(kvm_context
, cpu_single_env
->cpu_index
,
990 int kvm_update_guest_debug(CPUState
*env
, unsigned long reinject_trap
)
992 struct kvm_set_guest_debug_data data
;
994 data
.dbg
.control
= 0;
995 if (env
->singlestep_enabled
)
996 data
.dbg
.control
= KVM_GUESTDBG_ENABLE
| KVM_GUESTDBG_SINGLESTEP
;
998 kvm_arch_update_guest_debug(env
, &data
.dbg
);
999 data
.dbg
.control
|= reinject_trap
;
1001 on_vcpu(env
, kvm_invoke_set_guest_debug
, &data
);
1005 int kvm_insert_breakpoint(CPUState
*current_env
, target_ulong addr
,
1006 target_ulong len
, int type
)
1008 struct kvm_sw_breakpoint
*bp
;
1012 if (type
== GDB_BREAKPOINT_SW
) {
1013 bp
= kvm_find_sw_breakpoint(addr
);
1019 bp
= qemu_malloc(sizeof(struct kvm_sw_breakpoint
));
1025 err
= kvm_arch_insert_sw_breakpoint(current_env
, bp
);
1031 TAILQ_INSERT_HEAD(&kvm_sw_breakpoints
, bp
, entry
);
1033 err
= kvm_arch_insert_hw_breakpoint(addr
, len
, type
);
1038 for (env
= first_cpu
; env
!= NULL
; env
= env
->next_cpu
) {
1039 err
= kvm_update_guest_debug(env
, 0);
1046 int kvm_remove_breakpoint(CPUState
*current_env
, target_ulong addr
,
1047 target_ulong len
, int type
)
1049 struct kvm_sw_breakpoint
*bp
;
1053 if (type
== GDB_BREAKPOINT_SW
) {
1054 bp
= kvm_find_sw_breakpoint(addr
);
1058 if (bp
->use_count
> 1) {
1063 err
= kvm_arch_remove_sw_breakpoint(current_env
, bp
);
1067 TAILQ_REMOVE(&kvm_sw_breakpoints
, bp
, entry
);
1070 err
= kvm_arch_remove_hw_breakpoint(addr
, len
, type
);
1075 for (env
= first_cpu
; env
!= NULL
; env
= env
->next_cpu
) {
1076 err
= kvm_update_guest_debug(env
, 0);
1083 void kvm_remove_all_breakpoints(CPUState
*current_env
)
1085 struct kvm_sw_breakpoint
*bp
, *next
;
1088 TAILQ_FOREACH_SAFE(bp
, &kvm_sw_breakpoints
, entry
, next
) {
1089 if (kvm_arch_remove_sw_breakpoint(current_env
, bp
) != 0) {
1090 /* Try harder to find a CPU that currently sees the breakpoint. */
1091 for (env
= first_cpu
; env
!= NULL
; env
= env
->next_cpu
) {
1092 if (kvm_arch_remove_sw_breakpoint(env
, bp
) == 0)
1097 kvm_arch_remove_all_hw_breakpoints();
1099 for (env
= first_cpu
; env
!= NULL
; env
= env
->next_cpu
)
1100 kvm_update_guest_debug(env
, 0);
1103 #else /* !KVM_CAP_SET_GUEST_DEBUG */
1105 int kvm_update_guest_debug(CPUState
*env
, unsigned long reinject_trap
)
1110 int kvm_insert_breakpoint(CPUState
*current_env
, target_ulong addr
,
1111 target_ulong len
, int type
)
1116 int kvm_remove_breakpoint(CPUState
*current_env
, target_ulong addr
,
1117 target_ulong len
, int type
)
1122 void kvm_remove_all_breakpoints(CPUState
*current_env
)
1125 #endif /* !KVM_CAP_SET_GUEST_DEBUG */
1128 * dirty pages logging
1130 /* FIXME: use unsigned long pointer instead of unsigned char */
1131 unsigned char *kvm_dirty_bitmap
= NULL
;
1132 int kvm_physical_memory_set_dirty_tracking(int enable
)
1140 if (!kvm_dirty_bitmap
) {
1141 unsigned bitmap_size
= BITMAP_SIZE(phys_ram_size
);
1142 kvm_dirty_bitmap
= qemu_malloc(bitmap_size
);
1143 if (kvm_dirty_bitmap
== NULL
) {
1144 perror("Failed to allocate dirty pages bitmap");
1148 r
= kvm_dirty_pages_log_enable_all(kvm_context
);
1153 if (kvm_dirty_bitmap
) {
1154 r
= kvm_dirty_pages_log_reset(kvm_context
);
1155 qemu_free(kvm_dirty_bitmap
);
1156 kvm_dirty_bitmap
= NULL
;
1162 /* get kvm's dirty pages bitmap and update qemu's */
1163 int kvm_get_dirty_pages_log_range(unsigned long start_addr
,
1164 unsigned char *bitmap
,
1165 unsigned int offset
,
1166 unsigned long mem_size
)
1168 unsigned int i
, j
, n
=0;
1170 unsigned long page_number
, addr
, addr1
;
1171 ram_addr_t ram_addr
;
1172 unsigned int len
= ((mem_size
/TARGET_PAGE_SIZE
) + 7) / 8;
1175 * bitmap-traveling is faster than memory-traveling (for addr...)
1176 * especially when most of the memory is not dirty.
1178 for (i
=0; i
<len
; i
++) {
1183 page_number
= i
* 8 + j
;
1184 addr1
= page_number
* TARGET_PAGE_SIZE
;
1185 addr
= offset
+ addr1
;
1186 ram_addr
= cpu_get_physical_page_desc(addr
);
1187 cpu_physical_memory_set_dirty(ram_addr
);
1193 int kvm_get_dirty_bitmap_cb(unsigned long start
, unsigned long len
,
1194 void *bitmap
, void *opaque
)
1196 return kvm_get_dirty_pages_log_range(start
, bitmap
, start
, len
);
1200 * get kvm's dirty pages bitmap and update qemu's
1201 * we only care about physical ram, which resides in slots 0 and 3
1203 int kvm_update_dirty_pages_log(void)
1208 r
= kvm_get_dirty_pages_range(kvm_context
, 0, phys_ram_size
,
1209 kvm_dirty_bitmap
, NULL
,
1210 kvm_get_dirty_bitmap_cb
);
1214 void kvm_qemu_log_memory(target_phys_addr_t start
, target_phys_addr_t size
,
1218 kvm_dirty_pages_log_enable_slot(kvm_context
, start
, size
);
1221 if (must_use_aliases_target(start
))
1224 kvm_dirty_pages_log_disable_slot(kvm_context
, start
, size
);
1228 int kvm_get_phys_ram_page_bitmap(unsigned char *bitmap
)
1230 unsigned int bsize
= BITMAP_SIZE(phys_ram_size
);
1231 unsigned int brsize
= BITMAP_SIZE(ram_size
);
1232 unsigned int extra_pages
= (phys_ram_size
- ram_size
) / TARGET_PAGE_SIZE
;
1233 unsigned int extra_bytes
= (extra_pages
+7)/8;
1234 unsigned int hole_start
= BITMAP_SIZE(0xa0000);
1235 unsigned int hole_end
= BITMAP_SIZE(0xc0000);
1237 memset(bitmap
, 0xFF, brsize
+ extra_bytes
);
1238 memset(bitmap
+ hole_start
, 0, hole_end
- hole_start
);
1239 memset(bitmap
+ brsize
+ extra_bytes
, 0, bsize
- brsize
- extra_bytes
);
1244 #ifdef KVM_CAP_IRQCHIP
1246 int kvm_set_irq(int irq
, int level
)
1248 return kvm_set_irq_level(kvm_context
, irq
, level
);
1253 int qemu_kvm_get_dirty_pages(unsigned long phys_addr
, void *buf
)
1255 return kvm_get_dirty_pages(kvm_context
, phys_addr
, buf
);
1258 void *kvm_cpu_create_phys_mem(target_phys_addr_t start_addr
,
1259 unsigned long size
, int log
, int writable
)
1261 return kvm_create_phys_mem(kvm_context
, start_addr
, size
, log
, writable
);
1264 void kvm_cpu_destroy_phys_mem(target_phys_addr_t start_addr
,
1267 kvm_destroy_phys_mem(kvm_context
, start_addr
, size
);
1270 void kvm_mutex_unlock(void)
1272 assert(!cpu_single_env
);
1273 pthread_mutex_unlock(&qemu_mutex
);
1276 void kvm_mutex_lock(void)
1278 pthread_mutex_lock(&qemu_mutex
);
1279 cpu_single_env
= NULL
;
1282 int qemu_kvm_register_coalesced_mmio(target_phys_addr_t addr
, unsigned int size
)
1284 return kvm_register_coalesced_mmio(kvm_context
, addr
, size
);
1287 int qemu_kvm_unregister_coalesced_mmio(target_phys_addr_t addr
,
1290 return kvm_unregister_coalesced_mmio(kvm_context
, addr
, size
);
1293 int kvm_coalesce_mmio_region(target_phys_addr_t start
, ram_addr_t size
)
1295 return kvm_register_coalesced_mmio(kvm_context
, start
, size
);
1298 int kvm_uncoalesce_mmio_region(target_phys_addr_t start
, ram_addr_t size
)
1300 return kvm_unregister_coalesced_mmio(kvm_context
, start
, size
);
1303 #ifdef USE_KVM_DEVICE_ASSIGNMENT
1304 void kvm_add_ioperm_data(struct ioperm_data
*data
)
1306 LIST_INSERT_HEAD(&ioperm_head
, data
, entries
);
1309 void kvm_ioperm(CPUState
*env
, void *data
)
1311 if (kvm_enabled() && qemu_system_ready
)
1312 on_vcpu(env
, kvm_arch_do_ioperm
, data
);
1317 void kvm_physical_sync_dirty_bitmap(target_phys_addr_t start_addr
, target_phys_addr_t end_addr
)
1323 if (must_use_aliases_source(start_addr
))
1327 buf
= qemu_malloc((end_addr
- start_addr
) / 8 + 2);
1328 kvm_get_dirty_pages_range(kvm_context
, start_addr
, end_addr
- start_addr
,
1329 buf
, NULL
, kvm_get_dirty_bitmap_cb
);
1334 int kvm_log_start(target_phys_addr_t phys_addr
, target_phys_addr_t len
)
1337 if (must_use_aliases_source(phys_addr
))
1340 kvm_qemu_log_memory(phys_addr
, len
, 1);
1344 int kvm_log_stop(target_phys_addr_t phys_addr
, target_phys_addr_t len
)
1347 if (must_use_aliases_source(phys_addr
))
1350 kvm_qemu_log_memory(phys_addr
, len
, 0);
1354 /* hack: both libkvm and upstream qemu define kvm_has_sync_mmu(), differently */
1355 #undef kvm_has_sync_mmu
1356 int qemu_kvm_has_sync_mmu(void)
1358 return kvm_has_sync_mmu(kvm_context
);