Merge remote-tracking branch 'kraxel/usb.44' into staging
[qemu/opensuse.git] / cpus.c
blob17b055fba00cd40a92763ecd34b0c3aee6cac56e
1 /*
2 * QEMU System Emulator
4 * Copyright (c) 2003-2008 Fabrice Bellard
6 * Permission is hereby granted, free of charge, to any person obtaining a copy
7 * of this software and associated documentation files (the "Software"), to deal
8 * in the Software without restriction, including without limitation the rights
9 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
10 * copies of the Software, and to permit persons to whom the Software is
11 * furnished to do so, subject to the following conditions:
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
22 * THE SOFTWARE.
25 /* Needed early for CONFIG_BSD etc. */
26 #include "config-host.h"
28 #include "monitor.h"
29 #include "sysemu.h"
30 #include "gdbstub.h"
31 #include "dma.h"
32 #include "kvm.h"
33 #include "qmp-commands.h"
35 #include "qemu-thread.h"
36 #include "cpus.h"
37 #include "main-loop.h"
39 #ifndef _WIN32
40 #include "compatfd.h"
41 #endif
43 #ifdef CONFIG_LINUX
45 #include <sys/prctl.h>
47 #ifndef PR_MCE_KILL
48 #define PR_MCE_KILL 33
49 #endif
51 #ifndef PR_MCE_KILL_SET
52 #define PR_MCE_KILL_SET 1
53 #endif
55 #ifndef PR_MCE_KILL_EARLY
56 #define PR_MCE_KILL_EARLY 1
57 #endif
59 #endif /* CONFIG_LINUX */
61 static CPUState *next_cpu;
63 /***********************************************************/
64 /* guest cycle counter */
66 /* Conversion factor from emulated instructions to virtual clock ticks. */
67 static int icount_time_shift;
68 /* Arbitrarily pick 1MIPS as the minimum allowable speed. */
69 #define MAX_ICOUNT_SHIFT 10
70 /* Compensate for varying guest execution speed. */
71 static int64_t qemu_icount_bias;
72 static QEMUTimer *icount_rt_timer;
73 static QEMUTimer *icount_vm_timer;
74 static QEMUTimer *icount_warp_timer;
75 static int64_t vm_clock_warp_start;
76 static int64_t qemu_icount;
78 typedef struct TimersState {
79 int64_t cpu_ticks_prev;
80 int64_t cpu_ticks_offset;
81 int64_t cpu_clock_offset;
82 int32_t cpu_ticks_enabled;
83 int64_t dummy;
84 } TimersState;
86 TimersState timers_state;
88 /* Return the virtual CPU time, based on the instruction counter. */
89 int64_t cpu_get_icount(void)
91 int64_t icount;
92 CPUState *env = cpu_single_env;
94 icount = qemu_icount;
95 if (env) {
96 if (!can_do_io(env)) {
97 fprintf(stderr, "Bad clock read\n");
99 icount -= (env->icount_decr.u16.low + env->icount_extra);
101 return qemu_icount_bias + (icount << icount_time_shift);
104 /* return the host CPU cycle counter and handle stop/restart */
105 int64_t cpu_get_ticks(void)
107 if (use_icount) {
108 return cpu_get_icount();
110 if (!timers_state.cpu_ticks_enabled) {
111 return timers_state.cpu_ticks_offset;
112 } else {
113 int64_t ticks;
114 ticks = cpu_get_real_ticks();
115 if (timers_state.cpu_ticks_prev > ticks) {
116 /* Note: non increasing ticks may happen if the host uses
117 software suspend */
118 timers_state.cpu_ticks_offset += timers_state.cpu_ticks_prev - ticks;
120 timers_state.cpu_ticks_prev = ticks;
121 return ticks + timers_state.cpu_ticks_offset;
125 /* return the host CPU monotonic timer and handle stop/restart */
126 int64_t cpu_get_clock(void)
128 int64_t ti;
129 if (!timers_state.cpu_ticks_enabled) {
130 return timers_state.cpu_clock_offset;
131 } else {
132 ti = get_clock();
133 return ti + timers_state.cpu_clock_offset;
137 /* enable cpu_get_ticks() */
138 void cpu_enable_ticks(void)
140 if (!timers_state.cpu_ticks_enabled) {
141 timers_state.cpu_ticks_offset -= cpu_get_real_ticks();
142 timers_state.cpu_clock_offset -= get_clock();
143 timers_state.cpu_ticks_enabled = 1;
147 /* disable cpu_get_ticks() : the clock is stopped. You must not call
148 cpu_get_ticks() after that. */
149 void cpu_disable_ticks(void)
151 if (timers_state.cpu_ticks_enabled) {
152 timers_state.cpu_ticks_offset = cpu_get_ticks();
153 timers_state.cpu_clock_offset = cpu_get_clock();
154 timers_state.cpu_ticks_enabled = 0;
158 /* Correlation between real and virtual time is always going to be
159 fairly approximate, so ignore small variation.
160 When the guest is idle real and virtual time will be aligned in
161 the IO wait loop. */
162 #define ICOUNT_WOBBLE (get_ticks_per_sec() / 10)
164 static void icount_adjust(void)
166 int64_t cur_time;
167 int64_t cur_icount;
168 int64_t delta;
169 static int64_t last_delta;
170 /* If the VM is not running, then do nothing. */
171 if (!runstate_is_running()) {
172 return;
174 cur_time = cpu_get_clock();
175 cur_icount = qemu_get_clock_ns(vm_clock);
176 delta = cur_icount - cur_time;
177 /* FIXME: This is a very crude algorithm, somewhat prone to oscillation. */
178 if (delta > 0
179 && last_delta + ICOUNT_WOBBLE < delta * 2
180 && icount_time_shift > 0) {
181 /* The guest is getting too far ahead. Slow time down. */
182 icount_time_shift--;
184 if (delta < 0
185 && last_delta - ICOUNT_WOBBLE > delta * 2
186 && icount_time_shift < MAX_ICOUNT_SHIFT) {
187 /* The guest is getting too far behind. Speed time up. */
188 icount_time_shift++;
190 last_delta = delta;
191 qemu_icount_bias = cur_icount - (qemu_icount << icount_time_shift);
194 static void icount_adjust_rt(void *opaque)
196 qemu_mod_timer(icount_rt_timer,
197 qemu_get_clock_ms(rt_clock) + 1000);
198 icount_adjust();
201 static void icount_adjust_vm(void *opaque)
203 qemu_mod_timer(icount_vm_timer,
204 qemu_get_clock_ns(vm_clock) + get_ticks_per_sec() / 10);
205 icount_adjust();
208 static int64_t qemu_icount_round(int64_t count)
210 return (count + (1 << icount_time_shift) - 1) >> icount_time_shift;
213 static void icount_warp_rt(void *opaque)
215 if (vm_clock_warp_start == -1) {
216 return;
219 if (runstate_is_running()) {
220 int64_t clock = qemu_get_clock_ns(rt_clock);
221 int64_t warp_delta = clock - vm_clock_warp_start;
222 if (use_icount == 1) {
223 qemu_icount_bias += warp_delta;
224 } else {
226 * In adaptive mode, do not let the vm_clock run too
227 * far ahead of real time.
229 int64_t cur_time = cpu_get_clock();
230 int64_t cur_icount = qemu_get_clock_ns(vm_clock);
231 int64_t delta = cur_time - cur_icount;
232 qemu_icount_bias += MIN(warp_delta, delta);
234 if (qemu_clock_expired(vm_clock)) {
235 qemu_notify_event();
238 vm_clock_warp_start = -1;
241 void qemu_clock_warp(QEMUClock *clock)
243 int64_t deadline;
246 * There are too many global variables to make the "warp" behavior
247 * applicable to other clocks. But a clock argument removes the
248 * need for if statements all over the place.
250 if (clock != vm_clock || !use_icount) {
251 return;
255 * If the CPUs have been sleeping, advance the vm_clock timer now. This
256 * ensures that the deadline for the timer is computed correctly below.
257 * This also makes sure that the insn counter is synchronized before the
258 * CPU starts running, in case the CPU is woken by an event other than
259 * the earliest vm_clock timer.
261 icount_warp_rt(NULL);
262 if (!all_cpu_threads_idle() || !qemu_clock_has_timers(vm_clock)) {
263 qemu_del_timer(icount_warp_timer);
264 return;
267 vm_clock_warp_start = qemu_get_clock_ns(rt_clock);
268 deadline = qemu_clock_deadline(vm_clock);
269 if (deadline > 0) {
271 * Ensure the vm_clock proceeds even when the virtual CPU goes to
272 * sleep. Otherwise, the CPU might be waiting for a future timer
273 * interrupt to wake it up, but the interrupt never comes because
274 * the vCPU isn't running any insns and thus doesn't advance the
275 * vm_clock.
277 * An extreme solution for this problem would be to never let VCPUs
278 * sleep in icount mode if there is a pending vm_clock timer; rather
279 * time could just advance to the next vm_clock event. Instead, we
280 * do stop VCPUs and only advance vm_clock after some "real" time,
281 * (related to the time left until the next event) has passed. This
282 * rt_clock timer will do this. This avoids that the warps are too
283 * visible externally---for example, you will not be sending network
284 * packets continuously instead of every 100ms.
286 qemu_mod_timer(icount_warp_timer, vm_clock_warp_start + deadline);
287 } else {
288 qemu_notify_event();
292 static const VMStateDescription vmstate_timers = {
293 .name = "timer",
294 .version_id = 2,
295 .minimum_version_id = 1,
296 .minimum_version_id_old = 1,
297 .fields = (VMStateField[]) {
298 VMSTATE_INT64(cpu_ticks_offset, TimersState),
299 VMSTATE_INT64(dummy, TimersState),
300 VMSTATE_INT64_V(cpu_clock_offset, TimersState, 2),
301 VMSTATE_END_OF_LIST()
305 void configure_icount(const char *option)
307 vmstate_register(NULL, 0, &vmstate_timers, &timers_state);
308 if (!option) {
309 return;
312 icount_warp_timer = qemu_new_timer_ns(rt_clock, icount_warp_rt, NULL);
313 if (strcmp(option, "auto") != 0) {
314 icount_time_shift = strtol(option, NULL, 0);
315 use_icount = 1;
316 return;
319 use_icount = 2;
321 /* 125MIPS seems a reasonable initial guess at the guest speed.
322 It will be corrected fairly quickly anyway. */
323 icount_time_shift = 3;
325 /* Have both realtime and virtual time triggers for speed adjustment.
326 The realtime trigger catches emulated time passing too slowly,
327 the virtual time trigger catches emulated time passing too fast.
328 Realtime triggers occur even when idle, so use them less frequently
329 than VM triggers. */
330 icount_rt_timer = qemu_new_timer_ms(rt_clock, icount_adjust_rt, NULL);
331 qemu_mod_timer(icount_rt_timer,
332 qemu_get_clock_ms(rt_clock) + 1000);
333 icount_vm_timer = qemu_new_timer_ns(vm_clock, icount_adjust_vm, NULL);
334 qemu_mod_timer(icount_vm_timer,
335 qemu_get_clock_ns(vm_clock) + get_ticks_per_sec() / 10);
338 /***********************************************************/
339 void hw_error(const char *fmt, ...)
341 va_list ap;
342 CPUState *env;
344 va_start(ap, fmt);
345 fprintf(stderr, "qemu: hardware error: ");
346 vfprintf(stderr, fmt, ap);
347 fprintf(stderr, "\n");
348 for(env = first_cpu; env != NULL; env = env->next_cpu) {
349 fprintf(stderr, "CPU #%d:\n", env->cpu_index);
350 #ifdef TARGET_I386
351 cpu_dump_state(env, stderr, fprintf, X86_DUMP_FPU);
352 #else
353 cpu_dump_state(env, stderr, fprintf, 0);
354 #endif
356 va_end(ap);
357 abort();
360 void cpu_synchronize_all_states(void)
362 CPUState *cpu;
364 for (cpu = first_cpu; cpu; cpu = cpu->next_cpu) {
365 cpu_synchronize_state(cpu);
369 void cpu_synchronize_all_post_reset(void)
371 CPUState *cpu;
373 for (cpu = first_cpu; cpu; cpu = cpu->next_cpu) {
374 cpu_synchronize_post_reset(cpu);
378 void cpu_synchronize_all_post_init(void)
380 CPUState *cpu;
382 for (cpu = first_cpu; cpu; cpu = cpu->next_cpu) {
383 cpu_synchronize_post_init(cpu);
387 int cpu_is_stopped(CPUState *env)
389 return !runstate_is_running() || env->stopped;
392 static void do_vm_stop(RunState state)
394 if (runstate_is_running()) {
395 cpu_disable_ticks();
396 pause_all_vcpus();
397 runstate_set(state);
398 vm_state_notify(0, state);
399 bdrv_drain_all();
400 bdrv_flush_all();
401 monitor_protocol_event(QEVENT_STOP, NULL);
405 static int cpu_can_run(CPUState *env)
407 if (env->stop) {
408 return 0;
410 if (env->stopped || !runstate_is_running()) {
411 return 0;
413 return 1;
416 static bool cpu_thread_is_idle(CPUState *env)
418 if (env->stop || env->queued_work_first) {
419 return false;
421 if (env->stopped || !runstate_is_running()) {
422 return true;
424 if (!env->halted || qemu_cpu_has_work(env) ||
425 (kvm_enabled() && kvm_irqchip_in_kernel())) {
426 return false;
428 return true;
431 bool all_cpu_threads_idle(void)
433 CPUState *env;
435 for (env = first_cpu; env != NULL; env = env->next_cpu) {
436 if (!cpu_thread_is_idle(env)) {
437 return false;
440 return true;
443 static void cpu_handle_guest_debug(CPUState *env)
445 gdb_set_stop_cpu(env);
446 qemu_system_debug_request();
447 env->stopped = 1;
450 static void cpu_signal(int sig)
452 if (cpu_single_env) {
453 cpu_exit(cpu_single_env);
455 exit_request = 1;
458 #ifdef CONFIG_LINUX
459 static void sigbus_reraise(void)
461 sigset_t set;
462 struct sigaction action;
464 memset(&action, 0, sizeof(action));
465 action.sa_handler = SIG_DFL;
466 if (!sigaction(SIGBUS, &action, NULL)) {
467 raise(SIGBUS);
468 sigemptyset(&set);
469 sigaddset(&set, SIGBUS);
470 sigprocmask(SIG_UNBLOCK, &set, NULL);
472 perror("Failed to re-raise SIGBUS!\n");
473 abort();
476 static void sigbus_handler(int n, struct qemu_signalfd_siginfo *siginfo,
477 void *ctx)
479 if (kvm_on_sigbus(siginfo->ssi_code,
480 (void *)(intptr_t)siginfo->ssi_addr)) {
481 sigbus_reraise();
485 static void qemu_init_sigbus(void)
487 struct sigaction action;
489 memset(&action, 0, sizeof(action));
490 action.sa_flags = SA_SIGINFO;
491 action.sa_sigaction = (void (*)(int, siginfo_t*, void*))sigbus_handler;
492 sigaction(SIGBUS, &action, NULL);
494 prctl(PR_MCE_KILL, PR_MCE_KILL_SET, PR_MCE_KILL_EARLY, 0, 0);
497 static void qemu_kvm_eat_signals(CPUState *env)
499 struct timespec ts = { 0, 0 };
500 siginfo_t siginfo;
501 sigset_t waitset;
502 sigset_t chkset;
503 int r;
505 sigemptyset(&waitset);
506 sigaddset(&waitset, SIG_IPI);
507 sigaddset(&waitset, SIGBUS);
509 do {
510 r = sigtimedwait(&waitset, &siginfo, &ts);
511 if (r == -1 && !(errno == EAGAIN || errno == EINTR)) {
512 perror("sigtimedwait");
513 exit(1);
516 switch (r) {
517 case SIGBUS:
518 if (kvm_on_sigbus_vcpu(env, siginfo.si_code, siginfo.si_addr)) {
519 sigbus_reraise();
521 break;
522 default:
523 break;
526 r = sigpending(&chkset);
527 if (r == -1) {
528 perror("sigpending");
529 exit(1);
531 } while (sigismember(&chkset, SIG_IPI) || sigismember(&chkset, SIGBUS));
534 #else /* !CONFIG_LINUX */
536 static void qemu_init_sigbus(void)
540 static void qemu_kvm_eat_signals(CPUState *env)
543 #endif /* !CONFIG_LINUX */
545 #ifndef _WIN32
546 static void dummy_signal(int sig)
550 static void qemu_kvm_init_cpu_signals(CPUState *env)
552 int r;
553 sigset_t set;
554 struct sigaction sigact;
556 memset(&sigact, 0, sizeof(sigact));
557 sigact.sa_handler = dummy_signal;
558 sigaction(SIG_IPI, &sigact, NULL);
560 pthread_sigmask(SIG_BLOCK, NULL, &set);
561 sigdelset(&set, SIG_IPI);
562 sigdelset(&set, SIGBUS);
563 r = kvm_set_signal_mask(env, &set);
564 if (r) {
565 fprintf(stderr, "kvm_set_signal_mask: %s\n", strerror(-r));
566 exit(1);
570 static void qemu_tcg_init_cpu_signals(void)
572 sigset_t set;
573 struct sigaction sigact;
575 memset(&sigact, 0, sizeof(sigact));
576 sigact.sa_handler = cpu_signal;
577 sigaction(SIG_IPI, &sigact, NULL);
579 sigemptyset(&set);
580 sigaddset(&set, SIG_IPI);
581 pthread_sigmask(SIG_UNBLOCK, &set, NULL);
584 #else /* _WIN32 */
585 static void qemu_kvm_init_cpu_signals(CPUState *env)
587 abort();
590 static void qemu_tcg_init_cpu_signals(void)
593 #endif /* _WIN32 */
595 QemuMutex qemu_global_mutex;
596 static QemuCond qemu_io_proceeded_cond;
597 static bool iothread_requesting_mutex;
599 static QemuThread io_thread;
601 static QemuThread *tcg_cpu_thread;
602 static QemuCond *tcg_halt_cond;
604 /* cpu creation */
605 static QemuCond qemu_cpu_cond;
606 /* system init */
607 static QemuCond qemu_pause_cond;
608 static QemuCond qemu_work_cond;
610 void qemu_init_cpu_loop(void)
612 qemu_init_sigbus();
613 qemu_cond_init(&qemu_cpu_cond);
614 qemu_cond_init(&qemu_pause_cond);
615 qemu_cond_init(&qemu_work_cond);
616 qemu_cond_init(&qemu_io_proceeded_cond);
617 qemu_mutex_init(&qemu_global_mutex);
619 qemu_thread_get_self(&io_thread);
622 void run_on_cpu(CPUState *env, void (*func)(void *data), void *data)
624 struct qemu_work_item wi;
626 if (qemu_cpu_is_self(env)) {
627 func(data);
628 return;
631 wi.func = func;
632 wi.data = data;
633 if (!env->queued_work_first) {
634 env->queued_work_first = &wi;
635 } else {
636 env->queued_work_last->next = &wi;
638 env->queued_work_last = &wi;
639 wi.next = NULL;
640 wi.done = false;
642 qemu_cpu_kick(env);
643 while (!wi.done) {
644 CPUState *self_env = cpu_single_env;
646 qemu_cond_wait(&qemu_work_cond, &qemu_global_mutex);
647 cpu_single_env = self_env;
651 static void flush_queued_work(CPUState *env)
653 struct qemu_work_item *wi;
655 if (!env->queued_work_first) {
656 return;
659 while ((wi = env->queued_work_first)) {
660 env->queued_work_first = wi->next;
661 wi->func(wi->data);
662 wi->done = true;
664 env->queued_work_last = NULL;
665 qemu_cond_broadcast(&qemu_work_cond);
668 static void qemu_wait_io_event_common(CPUState *env)
670 if (env->stop) {
671 env->stop = 0;
672 env->stopped = 1;
673 qemu_cond_signal(&qemu_pause_cond);
675 flush_queued_work(env);
676 env->thread_kicked = false;
679 static void qemu_tcg_wait_io_event(void)
681 CPUState *env;
683 while (all_cpu_threads_idle()) {
684 /* Start accounting real time to the virtual clock if the CPUs
685 are idle. */
686 qemu_clock_warp(vm_clock);
687 qemu_cond_wait(tcg_halt_cond, &qemu_global_mutex);
690 while (iothread_requesting_mutex) {
691 qemu_cond_wait(&qemu_io_proceeded_cond, &qemu_global_mutex);
694 for (env = first_cpu; env != NULL; env = env->next_cpu) {
695 qemu_wait_io_event_common(env);
699 static void qemu_kvm_wait_io_event(CPUState *env)
701 while (cpu_thread_is_idle(env)) {
702 qemu_cond_wait(env->halt_cond, &qemu_global_mutex);
705 qemu_kvm_eat_signals(env);
706 qemu_wait_io_event_common(env);
709 static void *qemu_kvm_cpu_thread_fn(void *arg)
711 CPUState *env = arg;
712 int r;
714 qemu_mutex_lock(&qemu_global_mutex);
715 qemu_thread_get_self(env->thread);
716 env->thread_id = qemu_get_thread_id();
717 cpu_single_env = env;
719 r = kvm_init_vcpu(env);
720 if (r < 0) {
721 fprintf(stderr, "kvm_init_vcpu failed: %s\n", strerror(-r));
722 exit(1);
725 qemu_kvm_init_cpu_signals(env);
727 /* signal CPU creation */
728 env->created = 1;
729 qemu_cond_signal(&qemu_cpu_cond);
731 while (1) {
732 if (cpu_can_run(env)) {
733 r = kvm_cpu_exec(env);
734 if (r == EXCP_DEBUG) {
735 cpu_handle_guest_debug(env);
738 qemu_kvm_wait_io_event(env);
741 return NULL;
744 static void tcg_exec_all(void);
746 static void *qemu_tcg_cpu_thread_fn(void *arg)
748 CPUState *env = arg;
750 qemu_tcg_init_cpu_signals();
751 qemu_thread_get_self(env->thread);
753 /* signal CPU creation */
754 qemu_mutex_lock(&qemu_global_mutex);
755 for (env = first_cpu; env != NULL; env = env->next_cpu) {
756 env->thread_id = qemu_get_thread_id();
757 env->created = 1;
759 qemu_cond_signal(&qemu_cpu_cond);
761 /* wait for initial kick-off after machine start */
762 while (first_cpu->stopped) {
763 qemu_cond_wait(tcg_halt_cond, &qemu_global_mutex);
765 /* process any pending work */
766 for (env = first_cpu; env != NULL; env = env->next_cpu) {
767 qemu_wait_io_event_common(env);
771 while (1) {
772 tcg_exec_all();
773 if (use_icount && qemu_clock_deadline(vm_clock) <= 0) {
774 qemu_notify_event();
776 qemu_tcg_wait_io_event();
779 return NULL;
782 static void qemu_cpu_kick_thread(CPUState *env)
784 #ifndef _WIN32
785 int err;
787 err = pthread_kill(env->thread->thread, SIG_IPI);
788 if (err) {
789 fprintf(stderr, "qemu:%s: %s", __func__, strerror(err));
790 exit(1);
792 #else /* _WIN32 */
793 if (!qemu_cpu_is_self(env)) {
794 SuspendThread(env->hThread);
795 cpu_signal(0);
796 ResumeThread(env->hThread);
798 #endif
801 void qemu_cpu_kick(void *_env)
803 CPUState *env = _env;
805 qemu_cond_broadcast(env->halt_cond);
806 if (kvm_enabled() && !env->thread_kicked) {
807 qemu_cpu_kick_thread(env);
808 env->thread_kicked = true;
812 void qemu_cpu_kick_self(void)
814 #ifndef _WIN32
815 assert(cpu_single_env);
817 if (!cpu_single_env->thread_kicked) {
818 qemu_cpu_kick_thread(cpu_single_env);
819 cpu_single_env->thread_kicked = true;
821 #else
822 abort();
823 #endif
826 int qemu_cpu_is_self(void *_env)
828 CPUState *env = _env;
830 return qemu_thread_is_self(env->thread);
833 void qemu_mutex_lock_iothread(void)
835 if (kvm_enabled()) {
836 qemu_mutex_lock(&qemu_global_mutex);
837 } else {
838 iothread_requesting_mutex = true;
839 if (qemu_mutex_trylock(&qemu_global_mutex)) {
840 qemu_cpu_kick_thread(first_cpu);
841 qemu_mutex_lock(&qemu_global_mutex);
843 iothread_requesting_mutex = false;
844 qemu_cond_broadcast(&qemu_io_proceeded_cond);
848 void qemu_mutex_unlock_iothread(void)
850 qemu_mutex_unlock(&qemu_global_mutex);
853 static int all_vcpus_paused(void)
855 CPUState *penv = first_cpu;
857 while (penv) {
858 if (!penv->stopped) {
859 return 0;
861 penv = penv->next_cpu;
864 return 1;
867 void pause_all_vcpus(void)
869 CPUState *penv = first_cpu;
871 qemu_clock_enable(vm_clock, false);
872 while (penv) {
873 penv->stop = 1;
874 qemu_cpu_kick(penv);
875 penv = penv->next_cpu;
878 if (!qemu_thread_is_self(&io_thread)) {
879 cpu_stop_current();
880 if (!kvm_enabled()) {
881 while (penv) {
882 penv->stop = 0;
883 penv->stopped = 1;
884 penv = penv->next_cpu;
886 return;
890 while (!all_vcpus_paused()) {
891 qemu_cond_wait(&qemu_pause_cond, &qemu_global_mutex);
892 penv = first_cpu;
893 while (penv) {
894 qemu_cpu_kick(penv);
895 penv = penv->next_cpu;
900 void resume_all_vcpus(void)
902 CPUState *penv = first_cpu;
904 qemu_clock_enable(vm_clock, true);
905 while (penv) {
906 penv->stop = 0;
907 penv->stopped = 0;
908 qemu_cpu_kick(penv);
909 penv = penv->next_cpu;
913 static void qemu_tcg_init_vcpu(void *_env)
915 CPUState *env = _env;
917 /* share a single thread for all cpus with TCG */
918 if (!tcg_cpu_thread) {
919 env->thread = g_malloc0(sizeof(QemuThread));
920 env->halt_cond = g_malloc0(sizeof(QemuCond));
921 qemu_cond_init(env->halt_cond);
922 tcg_halt_cond = env->halt_cond;
923 qemu_thread_create(env->thread, qemu_tcg_cpu_thread_fn, env,
924 QEMU_THREAD_JOINABLE);
925 #ifdef _WIN32
926 env->hThread = qemu_thread_get_handle(env->thread);
927 #endif
928 while (env->created == 0) {
929 qemu_cond_wait(&qemu_cpu_cond, &qemu_global_mutex);
931 tcg_cpu_thread = env->thread;
932 } else {
933 env->thread = tcg_cpu_thread;
934 env->halt_cond = tcg_halt_cond;
938 static void qemu_kvm_start_vcpu(CPUState *env)
940 env->thread = g_malloc0(sizeof(QemuThread));
941 env->halt_cond = g_malloc0(sizeof(QemuCond));
942 qemu_cond_init(env->halt_cond);
943 qemu_thread_create(env->thread, qemu_kvm_cpu_thread_fn, env,
944 QEMU_THREAD_JOINABLE);
945 while (env->created == 0) {
946 qemu_cond_wait(&qemu_cpu_cond, &qemu_global_mutex);
950 void qemu_init_vcpu(void *_env)
952 CPUState *env = _env;
954 env->nr_cores = smp_cores;
955 env->nr_threads = smp_threads;
956 env->stopped = 1;
957 if (kvm_enabled()) {
958 qemu_kvm_start_vcpu(env);
959 } else {
960 qemu_tcg_init_vcpu(env);
964 void cpu_stop_current(void)
966 if (cpu_single_env) {
967 cpu_single_env->stop = 0;
968 cpu_single_env->stopped = 1;
969 cpu_exit(cpu_single_env);
970 qemu_cond_signal(&qemu_pause_cond);
974 void vm_stop(RunState state)
976 if (!qemu_thread_is_self(&io_thread)) {
977 qemu_system_vmstop_request(state);
979 * FIXME: should not return to device code in case
980 * vm_stop() has been requested.
982 cpu_stop_current();
983 return;
985 do_vm_stop(state);
988 /* does a state transition even if the VM is already stopped,
989 current state is forgotten forever */
990 void vm_stop_force_state(RunState state)
992 if (runstate_is_running()) {
993 vm_stop(state);
994 } else {
995 runstate_set(state);
999 static int tcg_cpu_exec(CPUState *env)
1001 int ret;
1002 #ifdef CONFIG_PROFILER
1003 int64_t ti;
1004 #endif
1006 #ifdef CONFIG_PROFILER
1007 ti = profile_getclock();
1008 #endif
1009 if (use_icount) {
1010 int64_t count;
1011 int decr;
1012 qemu_icount -= (env->icount_decr.u16.low + env->icount_extra);
1013 env->icount_decr.u16.low = 0;
1014 env->icount_extra = 0;
1015 count = qemu_icount_round(qemu_clock_deadline(vm_clock));
1016 qemu_icount += count;
1017 decr = (count > 0xffff) ? 0xffff : count;
1018 count -= decr;
1019 env->icount_decr.u16.low = decr;
1020 env->icount_extra = count;
1022 ret = cpu_exec(env);
1023 #ifdef CONFIG_PROFILER
1024 qemu_time += profile_getclock() - ti;
1025 #endif
1026 if (use_icount) {
1027 /* Fold pending instructions back into the
1028 instruction counter, and clear the interrupt flag. */
1029 qemu_icount -= (env->icount_decr.u16.low
1030 + env->icount_extra);
1031 env->icount_decr.u32 = 0;
1032 env->icount_extra = 0;
1034 return ret;
1037 static void tcg_exec_all(void)
1039 int r;
1041 /* Account partial waits to the vm_clock. */
1042 qemu_clock_warp(vm_clock);
1044 if (next_cpu == NULL) {
1045 next_cpu = first_cpu;
1047 for (; next_cpu != NULL && !exit_request; next_cpu = next_cpu->next_cpu) {
1048 CPUState *env = next_cpu;
1050 qemu_clock_enable(vm_clock,
1051 (env->singlestep_enabled & SSTEP_NOTIMER) == 0);
1053 if (cpu_can_run(env)) {
1054 r = tcg_cpu_exec(env);
1055 if (r == EXCP_DEBUG) {
1056 cpu_handle_guest_debug(env);
1057 break;
1059 } else if (env->stop || env->stopped) {
1060 break;
1063 exit_request = 0;
1066 void set_numa_modes(void)
1068 CPUState *env;
1069 int i;
1071 for (env = first_cpu; env != NULL; env = env->next_cpu) {
1072 for (i = 0; i < nb_numa_nodes; i++) {
1073 if (node_cpumask[i] & (1 << env->cpu_index)) {
1074 env->numa_node = i;
1080 void set_cpu_log(const char *optarg)
1082 int mask;
1083 const CPULogItem *item;
1085 mask = cpu_str_to_log_mask(optarg);
1086 if (!mask) {
1087 printf("Log items (comma separated):\n");
1088 for (item = cpu_log_items; item->mask != 0; item++) {
1089 printf("%-10s %s\n", item->name, item->help);
1091 exit(1);
1093 cpu_set_log(mask);
1096 void set_cpu_log_filename(const char *optarg)
1098 cpu_set_log_filename(optarg);
1101 void list_cpus(FILE *f, fprintf_function cpu_fprintf, const char *optarg)
1103 /* XXX: implement xxx_cpu_list for targets that still miss it */
1104 #if defined(cpu_list_id)
1105 cpu_list_id(f, cpu_fprintf, optarg);
1106 #elif defined(cpu_list)
1107 cpu_list(f, cpu_fprintf); /* deprecated */
1108 #endif
1111 CpuInfoList *qmp_query_cpus(Error **errp)
1113 CpuInfoList *head = NULL, *cur_item = NULL;
1114 CPUState *env;
1116 for(env = first_cpu; env != NULL; env = env->next_cpu) {
1117 CpuInfoList *info;
1119 cpu_synchronize_state(env);
1121 info = g_malloc0(sizeof(*info));
1122 info->value = g_malloc0(sizeof(*info->value));
1123 info->value->CPU = env->cpu_index;
1124 info->value->current = (env == first_cpu);
1125 info->value->halted = env->halted;
1126 info->value->thread_id = env->thread_id;
1127 #if defined(TARGET_I386)
1128 info->value->has_pc = true;
1129 info->value->pc = env->eip + env->segs[R_CS].base;
1130 #elif defined(TARGET_PPC)
1131 info->value->has_nip = true;
1132 info->value->nip = env->nip;
1133 #elif defined(TARGET_SPARC)
1134 info->value->has_pc = true;
1135 info->value->pc = env->pc;
1136 info->value->has_npc = true;
1137 info->value->npc = env->npc;
1138 #elif defined(TARGET_MIPS)
1139 info->value->has_PC = true;
1140 info->value->PC = env->active_tc.PC;
1141 #endif
1143 /* XXX: waiting for the qapi to support GSList */
1144 if (!cur_item) {
1145 head = cur_item = info;
1146 } else {
1147 cur_item->next = info;
1148 cur_item = info;
1152 return head;
1155 void qmp_memsave(int64_t addr, int64_t size, const char *filename,
1156 bool has_cpu, int64_t cpu_index, Error **errp)
1158 FILE *f;
1159 uint32_t l;
1160 CPUState *env;
1161 uint8_t buf[1024];
1163 if (!has_cpu) {
1164 cpu_index = 0;
1167 for (env = first_cpu; env; env = env->next_cpu) {
1168 if (cpu_index == env->cpu_index) {
1169 break;
1173 if (env == NULL) {
1174 error_set(errp, QERR_INVALID_PARAMETER_VALUE, "cpu-index",
1175 "a CPU number");
1176 return;
1179 f = fopen(filename, "wb");
1180 if (!f) {
1181 error_set(errp, QERR_OPEN_FILE_FAILED, filename);
1182 return;
1185 while (size != 0) {
1186 l = sizeof(buf);
1187 if (l > size)
1188 l = size;
1189 cpu_memory_rw_debug(env, addr, buf, l, 0);
1190 if (fwrite(buf, 1, l, f) != l) {
1191 error_set(errp, QERR_IO_ERROR);
1192 goto exit;
1194 addr += l;
1195 size -= l;
1198 exit:
1199 fclose(f);
1202 void qmp_pmemsave(int64_t addr, int64_t size, const char *filename,
1203 Error **errp)
1205 FILE *f;
1206 uint32_t l;
1207 uint8_t buf[1024];
1209 f = fopen(filename, "wb");
1210 if (!f) {
1211 error_set(errp, QERR_OPEN_FILE_FAILED, filename);
1212 return;
1215 while (size != 0) {
1216 l = sizeof(buf);
1217 if (l > size)
1218 l = size;
1219 cpu_physical_memory_rw(addr, buf, l, 0);
1220 if (fwrite(buf, 1, l, f) != l) {
1221 error_set(errp, QERR_IO_ERROR);
1222 goto exit;
1224 addr += l;
1225 size -= l;
1228 exit:
1229 fclose(f);
1232 void qmp_inject_nmi(Error **errp)
1234 #if defined(TARGET_I386)
1235 CPUState *env;
1237 for (env = first_cpu; env != NULL; env = env->next_cpu) {
1238 if (!env->apic_state) {
1239 cpu_interrupt(env, CPU_INTERRUPT_NMI);
1240 } else {
1241 apic_deliver_nmi(env->apic_state);
1244 #else
1245 error_set(errp, QERR_UNSUPPORTED);
1246 #endif