qapi: Restrict Xen migration commands to migration.json
[qemu/armbru.git] / accel / tcg / tcg-cpus.c
blobda1c63d8f60802da2137fa0e55656a423f0bbd4a
1 /*
2 * QEMU System Emulator
4 * Copyright (c) 2003-2008 Fabrice Bellard
5 * Copyright (c) 2014 Red Hat Inc.
7 * Permission is hereby granted, free of charge, to any person obtaining a copy
8 * of this software and associated documentation files (the "Software"), to deal
9 * in the Software without restriction, including without limitation the rights
10 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
11 * copies of the Software, and to permit persons to whom the Software is
12 * furnished to do so, subject to the following conditions:
14 * The above copyright notice and this permission notice shall be included in
15 * all copies or substantial portions of the Software.
17 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
18 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
19 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
20 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
21 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
22 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
23 * THE SOFTWARE.
26 #include "qemu/osdep.h"
27 #include "qemu-common.h"
28 #include "sysemu/tcg.h"
29 #include "sysemu/replay.h"
30 #include "qemu/main-loop.h"
31 #include "qemu/guest-random.h"
32 #include "exec/exec-all.h"
33 #include "hw/boards.h"
35 #include "tcg-cpus.h"
37 /* Kick all RR vCPUs */
38 static void qemu_cpu_kick_rr_cpus(void)
40 CPUState *cpu;
42 CPU_FOREACH(cpu) {
43 cpu_exit(cpu);
47 static void tcg_kick_vcpu_thread(CPUState *cpu)
49 if (qemu_tcg_mttcg_enabled()) {
50 cpu_exit(cpu);
51 } else {
52 qemu_cpu_kick_rr_cpus();
57 * TCG vCPU kick timer
59 * The kick timer is responsible for moving single threaded vCPU
60 * emulation on to the next vCPU. If more than one vCPU is running a
61 * timer event with force a cpu->exit so the next vCPU can get
62 * scheduled.
64 * The timer is removed if all vCPUs are idle and restarted again once
65 * idleness is complete.
68 static QEMUTimer *tcg_kick_vcpu_timer;
69 static CPUState *tcg_current_rr_cpu;
71 #define TCG_KICK_PERIOD (NANOSECONDS_PER_SECOND / 10)
73 static inline int64_t qemu_tcg_next_kick(void)
75 return qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) + TCG_KICK_PERIOD;
78 /* Kick the currently round-robin scheduled vCPU to next */
79 static void qemu_cpu_kick_rr_next_cpu(void)
81 CPUState *cpu;
82 do {
83 cpu = qatomic_mb_read(&tcg_current_rr_cpu);
84 if (cpu) {
85 cpu_exit(cpu);
87 } while (cpu != qatomic_mb_read(&tcg_current_rr_cpu));
90 static void kick_tcg_thread(void *opaque)
92 timer_mod(tcg_kick_vcpu_timer, qemu_tcg_next_kick());
93 qemu_cpu_kick_rr_next_cpu();
96 static void start_tcg_kick_timer(void)
98 assert(!mttcg_enabled);
99 if (!tcg_kick_vcpu_timer && CPU_NEXT(first_cpu)) {
100 tcg_kick_vcpu_timer = timer_new_ns(QEMU_CLOCK_VIRTUAL,
101 kick_tcg_thread, NULL);
103 if (tcg_kick_vcpu_timer && !timer_pending(tcg_kick_vcpu_timer)) {
104 timer_mod(tcg_kick_vcpu_timer, qemu_tcg_next_kick());
108 static void stop_tcg_kick_timer(void)
110 assert(!mttcg_enabled);
111 if (tcg_kick_vcpu_timer && timer_pending(tcg_kick_vcpu_timer)) {
112 timer_del(tcg_kick_vcpu_timer);
116 static void qemu_tcg_destroy_vcpu(CPUState *cpu)
120 static void qemu_tcg_rr_wait_io_event(void)
122 CPUState *cpu;
124 while (all_cpu_threads_idle()) {
125 stop_tcg_kick_timer();
126 qemu_cond_wait_iothread(first_cpu->halt_cond);
129 start_tcg_kick_timer();
131 CPU_FOREACH(cpu) {
132 qemu_wait_io_event_common(cpu);
136 static int64_t tcg_get_icount_limit(void)
138 int64_t deadline;
140 if (replay_mode != REPLAY_MODE_PLAY) {
142 * Include all the timers, because they may need an attention.
143 * Too long CPU execution may create unnecessary delay in UI.
145 deadline = qemu_clock_deadline_ns_all(QEMU_CLOCK_VIRTUAL,
146 QEMU_TIMER_ATTR_ALL);
147 /* Check realtime timers, because they help with input processing */
148 deadline = qemu_soonest_timeout(deadline,
149 qemu_clock_deadline_ns_all(QEMU_CLOCK_REALTIME,
150 QEMU_TIMER_ATTR_ALL));
153 * Maintain prior (possibly buggy) behaviour where if no deadline
154 * was set (as there is no QEMU_CLOCK_VIRTUAL timer) or it is more than
155 * INT32_MAX nanoseconds ahead, we still use INT32_MAX
156 * nanoseconds.
158 if ((deadline < 0) || (deadline > INT32_MAX)) {
159 deadline = INT32_MAX;
162 return icount_round(deadline);
163 } else {
164 return replay_get_instructions();
168 static void notify_aio_contexts(void)
170 /* Wake up other AioContexts. */
171 qemu_clock_notify(QEMU_CLOCK_VIRTUAL);
172 qemu_clock_run_timers(QEMU_CLOCK_VIRTUAL);
175 static void handle_icount_deadline(void)
177 assert(qemu_in_vcpu_thread());
178 if (icount_enabled()) {
179 int64_t deadline = qemu_clock_deadline_ns_all(QEMU_CLOCK_VIRTUAL,
180 QEMU_TIMER_ATTR_ALL);
182 if (deadline == 0) {
183 notify_aio_contexts();
188 static void prepare_icount_for_run(CPUState *cpu)
190 if (icount_enabled()) {
191 int insns_left;
194 * These should always be cleared by process_icount_data after
195 * each vCPU execution. However u16.high can be raised
196 * asynchronously by cpu_exit/cpu_interrupt/tcg_handle_interrupt
198 g_assert(cpu_neg(cpu)->icount_decr.u16.low == 0);
199 g_assert(cpu->icount_extra == 0);
201 cpu->icount_budget = tcg_get_icount_limit();
202 insns_left = MIN(0xffff, cpu->icount_budget);
203 cpu_neg(cpu)->icount_decr.u16.low = insns_left;
204 cpu->icount_extra = cpu->icount_budget - insns_left;
206 replay_mutex_lock();
208 if (cpu->icount_budget == 0 && replay_has_checkpoint()) {
209 notify_aio_contexts();
214 static void process_icount_data(CPUState *cpu)
216 if (icount_enabled()) {
217 /* Account for executed instructions */
218 icount_update(cpu);
220 /* Reset the counters */
221 cpu_neg(cpu)->icount_decr.u16.low = 0;
222 cpu->icount_extra = 0;
223 cpu->icount_budget = 0;
225 replay_account_executed_instructions();
227 replay_mutex_unlock();
231 static int tcg_cpu_exec(CPUState *cpu)
233 int ret;
234 #ifdef CONFIG_PROFILER
235 int64_t ti;
236 #endif
238 assert(tcg_enabled());
239 #ifdef CONFIG_PROFILER
240 ti = profile_getclock();
241 #endif
242 cpu_exec_start(cpu);
243 ret = cpu_exec(cpu);
244 cpu_exec_end(cpu);
245 #ifdef CONFIG_PROFILER
246 qatomic_set(&tcg_ctx->prof.cpu_exec_time,
247 tcg_ctx->prof.cpu_exec_time + profile_getclock() - ti);
248 #endif
249 return ret;
253 * Destroy any remaining vCPUs which have been unplugged and have
254 * finished running
256 static void deal_with_unplugged_cpus(void)
258 CPUState *cpu;
260 CPU_FOREACH(cpu) {
261 if (cpu->unplug && !cpu_can_run(cpu)) {
262 qemu_tcg_destroy_vcpu(cpu);
263 cpu_thread_signal_destroyed(cpu);
264 break;
270 * Single-threaded TCG
272 * In the single-threaded case each vCPU is simulated in turn. If
273 * there is more than a single vCPU we create a simple timer to kick
274 * the vCPU and ensure we don't get stuck in a tight loop in one vCPU.
275 * This is done explicitly rather than relying on side-effects
276 * elsewhere.
279 static void *tcg_rr_cpu_thread_fn(void *arg)
281 CPUState *cpu = arg;
283 assert(tcg_enabled());
284 rcu_register_thread();
285 tcg_register_thread();
287 qemu_mutex_lock_iothread();
288 qemu_thread_get_self(cpu->thread);
290 cpu->thread_id = qemu_get_thread_id();
291 cpu->can_do_io = 1;
292 cpu_thread_signal_created(cpu);
293 qemu_guest_random_seed_thread_part2(cpu->random_seed);
295 /* wait for initial kick-off after machine start */
296 while (first_cpu->stopped) {
297 qemu_cond_wait_iothread(first_cpu->halt_cond);
299 /* process any pending work */
300 CPU_FOREACH(cpu) {
301 current_cpu = cpu;
302 qemu_wait_io_event_common(cpu);
306 start_tcg_kick_timer();
308 cpu = first_cpu;
310 /* process any pending work */
311 cpu->exit_request = 1;
313 while (1) {
314 qemu_mutex_unlock_iothread();
315 replay_mutex_lock();
316 qemu_mutex_lock_iothread();
317 /* Account partial waits to QEMU_CLOCK_VIRTUAL. */
318 icount_account_warp_timer();
321 * Run the timers here. This is much more efficient than
322 * waking up the I/O thread and waiting for completion.
324 handle_icount_deadline();
326 replay_mutex_unlock();
328 if (!cpu) {
329 cpu = first_cpu;
332 while (cpu && cpu_work_list_empty(cpu) && !cpu->exit_request) {
334 qatomic_mb_set(&tcg_current_rr_cpu, cpu);
335 current_cpu = cpu;
337 qemu_clock_enable(QEMU_CLOCK_VIRTUAL,
338 (cpu->singlestep_enabled & SSTEP_NOTIMER) == 0);
340 if (cpu_can_run(cpu)) {
341 int r;
343 qemu_mutex_unlock_iothread();
344 prepare_icount_for_run(cpu);
346 r = tcg_cpu_exec(cpu);
348 process_icount_data(cpu);
349 qemu_mutex_lock_iothread();
351 if (r == EXCP_DEBUG) {
352 cpu_handle_guest_debug(cpu);
353 break;
354 } else if (r == EXCP_ATOMIC) {
355 qemu_mutex_unlock_iothread();
356 cpu_exec_step_atomic(cpu);
357 qemu_mutex_lock_iothread();
358 break;
360 } else if (cpu->stop) {
361 if (cpu->unplug) {
362 cpu = CPU_NEXT(cpu);
364 break;
367 cpu = CPU_NEXT(cpu);
368 } /* while (cpu && !cpu->exit_request).. */
370 /* Does not need qatomic_mb_set because a spurious wakeup is okay. */
371 qatomic_set(&tcg_current_rr_cpu, NULL);
373 if (cpu && cpu->exit_request) {
374 qatomic_mb_set(&cpu->exit_request, 0);
377 if (icount_enabled() && all_cpu_threads_idle()) {
379 * When all cpus are sleeping (e.g in WFI), to avoid a deadlock
380 * in the main_loop, wake it up in order to start the warp timer.
382 qemu_notify_event();
385 qemu_tcg_rr_wait_io_event();
386 deal_with_unplugged_cpus();
389 rcu_unregister_thread();
390 return NULL;
394 * Multi-threaded TCG
396 * In the multi-threaded case each vCPU has its own thread. The TLS
397 * variable current_cpu can be used deep in the code to find the
398 * current CPUState for a given thread.
401 static void *tcg_cpu_thread_fn(void *arg)
403 CPUState *cpu = arg;
405 assert(tcg_enabled());
406 g_assert(!icount_enabled());
408 rcu_register_thread();
409 tcg_register_thread();
411 qemu_mutex_lock_iothread();
412 qemu_thread_get_self(cpu->thread);
414 cpu->thread_id = qemu_get_thread_id();
415 cpu->can_do_io = 1;
416 current_cpu = cpu;
417 cpu_thread_signal_created(cpu);
418 qemu_guest_random_seed_thread_part2(cpu->random_seed);
420 /* process any pending work */
421 cpu->exit_request = 1;
423 do {
424 if (cpu_can_run(cpu)) {
425 int r;
426 qemu_mutex_unlock_iothread();
427 r = tcg_cpu_exec(cpu);
428 qemu_mutex_lock_iothread();
429 switch (r) {
430 case EXCP_DEBUG:
431 cpu_handle_guest_debug(cpu);
432 break;
433 case EXCP_HALTED:
435 * during start-up the vCPU is reset and the thread is
436 * kicked several times. If we don't ensure we go back
437 * to sleep in the halted state we won't cleanly
438 * start-up when the vCPU is enabled.
440 * cpu->halted should ensure we sleep in wait_io_event
442 g_assert(cpu->halted);
443 break;
444 case EXCP_ATOMIC:
445 qemu_mutex_unlock_iothread();
446 cpu_exec_step_atomic(cpu);
447 qemu_mutex_lock_iothread();
448 default:
449 /* Ignore everything else? */
450 break;
454 qatomic_mb_set(&cpu->exit_request, 0);
455 qemu_wait_io_event(cpu);
456 } while (!cpu->unplug || cpu_can_run(cpu));
458 qemu_tcg_destroy_vcpu(cpu);
459 cpu_thread_signal_destroyed(cpu);
460 qemu_mutex_unlock_iothread();
461 rcu_unregister_thread();
462 return NULL;
465 static void tcg_start_vcpu_thread(CPUState *cpu)
467 char thread_name[VCPU_THREAD_NAME_SIZE];
468 static QemuCond *single_tcg_halt_cond;
469 static QemuThread *single_tcg_cpu_thread;
470 static int tcg_region_inited;
472 assert(tcg_enabled());
474 * Initialize TCG regions--once. Now is a good time, because:
475 * (1) TCG's init context, prologue and target globals have been set up.
476 * (2) qemu_tcg_mttcg_enabled() works now (TCG init code runs before the
477 * -accel flag is processed, so the check doesn't work then).
479 if (!tcg_region_inited) {
480 tcg_region_inited = 1;
481 tcg_region_init();
482 parallel_cpus = qemu_tcg_mttcg_enabled() && current_machine->smp.max_cpus > 1;
485 if (qemu_tcg_mttcg_enabled() || !single_tcg_cpu_thread) {
486 cpu->thread = g_malloc0(sizeof(QemuThread));
487 cpu->halt_cond = g_malloc0(sizeof(QemuCond));
488 qemu_cond_init(cpu->halt_cond);
490 if (qemu_tcg_mttcg_enabled()) {
491 /* create a thread per vCPU with TCG (MTTCG) */
492 snprintf(thread_name, VCPU_THREAD_NAME_SIZE, "CPU %d/TCG",
493 cpu->cpu_index);
495 qemu_thread_create(cpu->thread, thread_name, tcg_cpu_thread_fn,
496 cpu, QEMU_THREAD_JOINABLE);
498 } else {
499 /* share a single thread for all cpus with TCG */
500 snprintf(thread_name, VCPU_THREAD_NAME_SIZE, "ALL CPUs/TCG");
501 qemu_thread_create(cpu->thread, thread_name,
502 tcg_rr_cpu_thread_fn,
503 cpu, QEMU_THREAD_JOINABLE);
505 single_tcg_halt_cond = cpu->halt_cond;
506 single_tcg_cpu_thread = cpu->thread;
508 #ifdef _WIN32
509 cpu->hThread = qemu_thread_get_handle(cpu->thread);
510 #endif
511 } else {
512 /* For non-MTTCG cases we share the thread */
513 cpu->thread = single_tcg_cpu_thread;
514 cpu->halt_cond = single_tcg_halt_cond;
515 cpu->thread_id = first_cpu->thread_id;
516 cpu->can_do_io = 1;
517 cpu->created = true;
521 static int64_t tcg_get_virtual_clock(void)
523 if (icount_enabled()) {
524 return icount_get();
526 return cpu_get_clock();
529 static int64_t tcg_get_elapsed_ticks(void)
531 if (icount_enabled()) {
532 return icount_get();
534 return cpu_get_ticks();
537 /* mask must never be zero, except for A20 change call */
538 static void tcg_handle_interrupt(CPUState *cpu, int mask)
540 int old_mask;
541 g_assert(qemu_mutex_iothread_locked());
543 old_mask = cpu->interrupt_request;
544 cpu->interrupt_request |= mask;
547 * If called from iothread context, wake the target cpu in
548 * case its halted.
550 if (!qemu_cpu_is_self(cpu)) {
551 qemu_cpu_kick(cpu);
552 } else {
553 qatomic_set(&cpu_neg(cpu)->icount_decr.u16.high, -1);
554 if (icount_enabled() &&
555 !cpu->can_do_io
556 && (mask & ~old_mask) != 0) {
557 cpu_abort(cpu, "Raised interrupt while not in I/O function");
562 const CpusAccel tcg_cpus = {
563 .create_vcpu_thread = tcg_start_vcpu_thread,
564 .kick_vcpu_thread = tcg_kick_vcpu_thread,
566 .handle_interrupt = tcg_handle_interrupt,
568 .get_virtual_clock = tcg_get_virtual_clock,
569 .get_elapsed_ticks = tcg_get_elapsed_ticks,