4 * Copyright (c) 2003-2008 Fabrice Bellard
6 * Permission is hereby granted, free of charge, to any person obtaining a copy
7 * of this software and associated documentation files (the "Software"), to deal
8 * in the Software without restriction, including without limitation the rights
9 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
10 * copies of the Software, and to permit persons to whom the Software is
11 * furnished to do so, subject to the following conditions:
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
25 #include "qemu/osdep.h"
26 #include "qemu-common.h"
27 #include "qemu/cutils.h"
28 #include "migration/vmstate.h"
29 #include "qapi/error.h"
30 #include "qemu/error-report.h"
31 #include "sysemu/cpus.h"
32 #include "qemu/main-loop.h"
33 #include "qemu/option.h"
34 #include "qemu/seqlock.h"
35 #include "sysemu/replay.h"
36 #include "sysemu/runstate.h"
37 #include "hw/core/cpu.h"
38 #include "sysemu/cpu-timers.h"
39 #include "sysemu/cpu-throttle.h"
40 #include "timers-state.h"
44 static int64_t cpu_get_ticks_locked(void)
46 int64_t ticks
= timers_state
.cpu_ticks_offset
;
47 if (timers_state
.cpu_ticks_enabled
) {
48 ticks
+= cpu_get_host_ticks();
51 if (timers_state
.cpu_ticks_prev
> ticks
) {
52 /* Non increasing ticks may happen if the host uses software suspend. */
53 timers_state
.cpu_ticks_offset
+= timers_state
.cpu_ticks_prev
- ticks
;
54 ticks
= timers_state
.cpu_ticks_prev
;
57 timers_state
.cpu_ticks_prev
= ticks
;
62 * return the time elapsed in VM between vm_start and vm_stop.
63 * cpu_get_ticks() uses units of the host CPU cycle counter.
65 int64_t cpu_get_ticks(void)
69 qemu_spin_lock(&timers_state
.vm_clock_lock
);
70 ticks
= cpu_get_ticks_locked();
71 qemu_spin_unlock(&timers_state
.vm_clock_lock
);
75 int64_t cpu_get_clock_locked(void)
79 time
= timers_state
.cpu_clock_offset
;
80 if (timers_state
.cpu_ticks_enabled
) {
88 * Return the monotonic time elapsed in VM, i.e.,
89 * the time between vm_start and vm_stop
91 int64_t cpu_get_clock(void)
97 start
= seqlock_read_begin(&timers_state
.vm_clock_seqlock
);
98 ti
= cpu_get_clock_locked();
99 } while (seqlock_read_retry(&timers_state
.vm_clock_seqlock
, start
));
105 * enable cpu_get_ticks()
106 * Caller must hold BQL which serves as mutex for vm_clock_seqlock.
108 void cpu_enable_ticks(void)
110 seqlock_write_lock(&timers_state
.vm_clock_seqlock
,
111 &timers_state
.vm_clock_lock
);
112 if (!timers_state
.cpu_ticks_enabled
) {
113 timers_state
.cpu_ticks_offset
-= cpu_get_host_ticks();
114 timers_state
.cpu_clock_offset
-= get_clock();
115 timers_state
.cpu_ticks_enabled
= 1;
117 seqlock_write_unlock(&timers_state
.vm_clock_seqlock
,
118 &timers_state
.vm_clock_lock
);
122 * disable cpu_get_ticks() : the clock is stopped. You must not call
123 * cpu_get_ticks() after that.
124 * Caller must hold BQL which serves as mutex for vm_clock_seqlock.
126 void cpu_disable_ticks(void)
128 seqlock_write_lock(&timers_state
.vm_clock_seqlock
,
129 &timers_state
.vm_clock_lock
);
130 if (timers_state
.cpu_ticks_enabled
) {
131 timers_state
.cpu_ticks_offset
+= cpu_get_host_ticks();
132 timers_state
.cpu_clock_offset
= cpu_get_clock_locked();
133 timers_state
.cpu_ticks_enabled
= 0;
135 seqlock_write_unlock(&timers_state
.vm_clock_seqlock
,
136 &timers_state
.vm_clock_lock
);
139 static bool icount_state_needed(void *opaque
)
141 return icount_enabled();
144 static bool warp_timer_state_needed(void *opaque
)
146 TimersState
*s
= opaque
;
147 return s
->icount_warp_timer
!= NULL
;
150 static bool adjust_timers_state_needed(void *opaque
)
152 TimersState
*s
= opaque
;
153 return s
->icount_rt_timer
!= NULL
;
156 static bool icount_shift_state_needed(void *opaque
)
158 return icount_enabled() == 2;
162 * Subsection for warp timer migration is optional, because may not be created
164 static const VMStateDescription icount_vmstate_warp_timer
= {
165 .name
= "timer/icount/warp_timer",
167 .minimum_version_id
= 1,
168 .needed
= warp_timer_state_needed
,
169 .fields
= (VMStateField
[]) {
170 VMSTATE_INT64(vm_clock_warp_start
, TimersState
),
171 VMSTATE_TIMER_PTR(icount_warp_timer
, TimersState
),
172 VMSTATE_END_OF_LIST()
176 static const VMStateDescription icount_vmstate_adjust_timers
= {
177 .name
= "timer/icount/timers",
179 .minimum_version_id
= 1,
180 .needed
= adjust_timers_state_needed
,
181 .fields
= (VMStateField
[]) {
182 VMSTATE_TIMER_PTR(icount_rt_timer
, TimersState
),
183 VMSTATE_TIMER_PTR(icount_vm_timer
, TimersState
),
184 VMSTATE_END_OF_LIST()
188 static const VMStateDescription icount_vmstate_shift
= {
189 .name
= "timer/icount/shift",
191 .minimum_version_id
= 2,
192 .needed
= icount_shift_state_needed
,
193 .fields
= (VMStateField
[]) {
194 VMSTATE_INT16(icount_time_shift
, TimersState
),
195 VMSTATE_INT64(last_delta
, TimersState
),
196 VMSTATE_END_OF_LIST()
201 * This is a subsection for icount migration.
203 static const VMStateDescription icount_vmstate_timers
= {
204 .name
= "timer/icount",
206 .minimum_version_id
= 1,
207 .needed
= icount_state_needed
,
208 .fields
= (VMStateField
[]) {
209 VMSTATE_INT64(qemu_icount_bias
, TimersState
),
210 VMSTATE_INT64(qemu_icount
, TimersState
),
211 VMSTATE_END_OF_LIST()
213 .subsections
= (const VMStateDescription
* []) {
214 &icount_vmstate_warp_timer
,
215 &icount_vmstate_adjust_timers
,
216 &icount_vmstate_shift
,
221 static const VMStateDescription vmstate_timers
= {
224 .minimum_version_id
= 1,
225 .fields
= (VMStateField
[]) {
226 VMSTATE_INT64(cpu_ticks_offset
, TimersState
),
228 VMSTATE_INT64_V(cpu_clock_offset
, TimersState
, 2),
229 VMSTATE_END_OF_LIST()
231 .subsections
= (const VMStateDescription
* []) {
232 &icount_vmstate_timers
,
237 static void do_nothing(CPUState
*cpu
, run_on_cpu_data unused
)
241 void qemu_timer_notify_cb(void *opaque
, QEMUClockType type
)
243 if (!icount_enabled() || type
!= QEMU_CLOCK_VIRTUAL
) {
248 if (qemu_in_vcpu_thread()) {
250 * A CPU is currently running; kick it back out to the
251 * tcg_cpu_exec() loop so it will recalculate its
252 * icount deadline immediately.
254 qemu_cpu_kick(current_cpu
);
255 } else if (first_cpu
) {
257 * qemu_cpu_kick is not enough to kick a halted CPU out of
258 * qemu_tcg_wait_io_event. async_run_on_cpu, instead,
259 * causes cpu_thread_is_idle to return false. This way,
260 * handle_icount_deadline can run.
261 * If we have no CPUs at all for some reason, we don't
262 * need to do anything.
264 async_run_on_cpu(first_cpu
, do_nothing
, RUN_ON_CPU_NULL
);
268 TimersState timers_state
;
270 /* initialize timers state and the cpu throttle for convenience */
271 void cpu_timers_init(void)
273 seqlock_init(&timers_state
.vm_clock_seqlock
);
274 qemu_spin_init(&timers_state
.vm_clock_lock
);
275 vmstate_register(NULL
, 0, &vmstate_timers
, &timers_state
);