2 * Xen time implementation.
4 * This is implemented in terms of a clocksource driver which uses
5 * the hypervisor clock as a nanosecond timebase, and a clockevent
6 * driver which uses the hypervisor's timer mechanism.
8 * Jeremy Fitzhardinge <jeremy@xensource.com>, XenSource Inc, 2007
10 #include <linux/kernel.h>
11 #include <linux/interrupt.h>
12 #include <linux/clocksource.h>
13 #include <linux/clockchips.h>
14 #include <linux/kernel_stat.h>
15 #include <linux/math64.h>
16 #include <linux/gfp.h>
18 #include <asm/pvclock.h>
19 #include <asm/xen/hypervisor.h>
20 #include <asm/xen/hypercall.h>
22 #include <xen/events.h>
23 #include <xen/interface/xen.h>
24 #include <xen/interface/vcpu.h>
30 /* Xen may fire a timer up to this many ns early */
31 #define TIMER_SLOP 100000
32 #define NS_PER_TICK (1000000000LL / HZ)
34 /* runstate info updated by Xen */
35 static DEFINE_PER_CPU(struct vcpu_runstate_info
, xen_runstate
);
37 /* snapshots of runstate info */
38 static DEFINE_PER_CPU(struct vcpu_runstate_info
, xen_runstate_snapshot
);
40 /* unused ns of stolen and blocked time */
41 static DEFINE_PER_CPU(u64
, xen_residual_stolen
);
42 static DEFINE_PER_CPU(u64
, xen_residual_blocked
);
44 /* return an consistent snapshot of 64-bit time/counter value */
45 static u64
get64(const u64
*p
)
49 if (BITS_PER_LONG
< 64) {
54 * Read high then low, and then make sure high is
55 * still the same; this will only loop if low wraps
56 * and carries into high.
57 * XXX some clean way to make this endian-proof?
64 } while (p32
[1] != h
);
66 ret
= (((u64
)h
) << 32) | l
;
76 static void get_runstate_snapshot(struct vcpu_runstate_info
*res
)
79 struct vcpu_runstate_info
*state
;
81 BUG_ON(preemptible());
83 state
= &__get_cpu_var(xen_runstate
);
86 * The runstate info is always updated by the hypervisor on
87 * the current CPU, so there's no need to use anything
88 * stronger than a compiler barrier when fetching it.
91 state_time
= get64(&state
->state_entry_time
);
95 } while (get64(&state
->state_entry_time
) != state_time
);
98 /* return true when a vcpu could run but has no real cpu to run on */
99 bool xen_vcpu_stolen(int vcpu
)
101 return per_cpu(xen_runstate
, vcpu
).state
== RUNSTATE_runnable
;
104 void xen_setup_runstate_info(int cpu
)
106 struct vcpu_register_runstate_memory_area area
;
108 area
.addr
.v
= &per_cpu(xen_runstate
, cpu
);
110 if (HYPERVISOR_vcpu_op(VCPUOP_register_runstate_memory_area
,
115 static void do_stolen_accounting(void)
117 struct vcpu_runstate_info state
;
118 struct vcpu_runstate_info
*snap
;
119 s64 blocked
, runnable
, offline
, stolen
;
122 get_runstate_snapshot(&state
);
124 WARN_ON(state
.state
!= RUNSTATE_running
);
126 snap
= &__get_cpu_var(xen_runstate_snapshot
);
128 /* work out how much time the VCPU has not been runn*ing* */
129 blocked
= state
.time
[RUNSTATE_blocked
] - snap
->time
[RUNSTATE_blocked
];
130 runnable
= state
.time
[RUNSTATE_runnable
] - snap
->time
[RUNSTATE_runnable
];
131 offline
= state
.time
[RUNSTATE_offline
] - snap
->time
[RUNSTATE_offline
];
135 /* Add the appropriate number of ticks of stolen time,
136 including any left-overs from last time. */
137 stolen
= runnable
+ offline
+ __get_cpu_var(xen_residual_stolen
);
142 ticks
= iter_div_u64_rem(stolen
, NS_PER_TICK
, &stolen
);
143 __get_cpu_var(xen_residual_stolen
) = stolen
;
144 account_steal_ticks(ticks
);
146 /* Add the appropriate number of ticks of blocked time,
147 including any left-overs from last time. */
148 blocked
+= __get_cpu_var(xen_residual_blocked
);
153 ticks
= iter_div_u64_rem(blocked
, NS_PER_TICK
, &blocked
);
154 __get_cpu_var(xen_residual_blocked
) = blocked
;
155 account_idle_ticks(ticks
);
159 * Xen sched_clock implementation. Returns the number of unstolen
160 * nanoseconds, which is nanoseconds the VCPU spent in RUNNING+BLOCKED
163 unsigned long long xen_sched_clock(void)
165 struct vcpu_runstate_info state
;
171 * Ideally sched_clock should be called on a per-cpu basis
172 * anyway, so preempt should already be disabled, but that's
173 * not current practice at the moment.
177 now
= xen_clocksource_read();
179 get_runstate_snapshot(&state
);
181 WARN_ON(state
.state
!= RUNSTATE_running
);
183 offset
= now
- state
.state_entry_time
;
187 ret
= state
.time
[RUNSTATE_blocked
] +
188 state
.time
[RUNSTATE_running
] +
197 /* Get the TSC speed from Xen */
198 unsigned long xen_tsc_khz(void)
200 struct pvclock_vcpu_time_info
*info
=
201 &HYPERVISOR_shared_info
->vcpu_info
[0].time
;
203 return pvclock_tsc_khz(info
);
206 cycle_t
xen_clocksource_read(void)
208 struct pvclock_vcpu_time_info
*src
;
211 src
= &get_cpu_var(xen_vcpu
)->time
;
212 ret
= pvclock_clocksource_read(src
);
213 put_cpu_var(xen_vcpu
);
217 static cycle_t
xen_clocksource_get_cycles(struct clocksource
*cs
)
219 return xen_clocksource_read();
222 static void xen_read_wallclock(struct timespec
*ts
)
224 struct shared_info
*s
= HYPERVISOR_shared_info
;
225 struct pvclock_wall_clock
*wall_clock
= &(s
->wc
);
226 struct pvclock_vcpu_time_info
*vcpu_time
;
228 vcpu_time
= &get_cpu_var(xen_vcpu
)->time
;
229 pvclock_read_wallclock(wall_clock
, vcpu_time
, ts
);
230 put_cpu_var(xen_vcpu
);
233 unsigned long xen_get_wallclock(void)
237 xen_read_wallclock(&ts
);
241 int xen_set_wallclock(unsigned long now
)
243 /* do nothing for domU */
247 static struct clocksource xen_clocksource __read_mostly
= {
250 .read
= xen_clocksource_get_cycles
,
252 .mult
= 1<<XEN_SHIFT
, /* time directly in nanoseconds */
254 .flags
= CLOCK_SOURCE_IS_CONTINUOUS
,
258 Xen clockevent implementation
260 Xen has two clockevent implementations:
262 The old timer_op one works with all released versions of Xen prior
263 to version 3.0.4. This version of the hypervisor provides a
264 single-shot timer with nanosecond resolution. However, sharing the
265 same event channel is a 100Hz tick which is delivered while the
266 vcpu is running. We don't care about or use this tick, but it will
267 cause the core time code to think the timer fired too soon, and
268 will end up resetting it each time. It could be filtered, but
269 doing so has complications when the ktime clocksource is not yet
270 the xen clocksource (ie, at boot time).
272 The new vcpu_op-based timer interface allows the tick timer period
273 to be changed or turned off. The tick timer is not useful as a
274 periodic timer because events are only delivered to running vcpus.
275 The one-shot timer can report when a timeout is in the past, so
276 set_next_event is capable of returning -ETIME when appropriate.
277 This interface is used when available.
282 Get a hypervisor absolute time. In theory we could maintain an
283 offset between the kernel's time and the hypervisor's time, and
284 apply that to a kernel's absolute timeout. Unfortunately the
285 hypervisor and kernel times can drift even if the kernel is using
286 the Xen clocksource, because ntp can warp the kernel's clocksource.
288 static s64
get_abs_timeout(unsigned long delta
)
290 return xen_clocksource_read() + delta
;
293 static void xen_timerop_set_mode(enum clock_event_mode mode
,
294 struct clock_event_device
*evt
)
297 case CLOCK_EVT_MODE_PERIODIC
:
302 case CLOCK_EVT_MODE_ONESHOT
:
303 case CLOCK_EVT_MODE_RESUME
:
306 case CLOCK_EVT_MODE_UNUSED
:
307 case CLOCK_EVT_MODE_SHUTDOWN
:
308 HYPERVISOR_set_timer_op(0); /* cancel timeout */
313 static int xen_timerop_set_next_event(unsigned long delta
,
314 struct clock_event_device
*evt
)
316 WARN_ON(evt
->mode
!= CLOCK_EVT_MODE_ONESHOT
);
318 if (HYPERVISOR_set_timer_op(get_abs_timeout(delta
)) < 0)
321 /* We may have missed the deadline, but there's no real way of
322 knowing for sure. If the event was in the past, then we'll
323 get an immediate interrupt. */
328 static const struct clock_event_device xen_timerop_clockevent
= {
330 .features
= CLOCK_EVT_FEAT_ONESHOT
,
332 .max_delta_ns
= 0xffffffff,
333 .min_delta_ns
= TIMER_SLOP
,
339 .set_mode
= xen_timerop_set_mode
,
340 .set_next_event
= xen_timerop_set_next_event
,
345 static void xen_vcpuop_set_mode(enum clock_event_mode mode
,
346 struct clock_event_device
*evt
)
348 int cpu
= smp_processor_id();
351 case CLOCK_EVT_MODE_PERIODIC
:
352 WARN_ON(1); /* unsupported */
355 case CLOCK_EVT_MODE_ONESHOT
:
356 if (HYPERVISOR_vcpu_op(VCPUOP_stop_periodic_timer
, cpu
, NULL
))
360 case CLOCK_EVT_MODE_UNUSED
:
361 case CLOCK_EVT_MODE_SHUTDOWN
:
362 if (HYPERVISOR_vcpu_op(VCPUOP_stop_singleshot_timer
, cpu
, NULL
) ||
363 HYPERVISOR_vcpu_op(VCPUOP_stop_periodic_timer
, cpu
, NULL
))
366 case CLOCK_EVT_MODE_RESUME
:
371 static int xen_vcpuop_set_next_event(unsigned long delta
,
372 struct clock_event_device
*evt
)
374 int cpu
= smp_processor_id();
375 struct vcpu_set_singleshot_timer single
;
378 WARN_ON(evt
->mode
!= CLOCK_EVT_MODE_ONESHOT
);
380 single
.timeout_abs_ns
= get_abs_timeout(delta
);
381 single
.flags
= VCPU_SSHOTTMR_future
;
383 ret
= HYPERVISOR_vcpu_op(VCPUOP_set_singleshot_timer
, cpu
, &single
);
385 BUG_ON(ret
!= 0 && ret
!= -ETIME
);
390 static const struct clock_event_device xen_vcpuop_clockevent
= {
392 .features
= CLOCK_EVT_FEAT_ONESHOT
,
394 .max_delta_ns
= 0xffffffff,
395 .min_delta_ns
= TIMER_SLOP
,
401 .set_mode
= xen_vcpuop_set_mode
,
402 .set_next_event
= xen_vcpuop_set_next_event
,
405 static const struct clock_event_device
*xen_clockevent
=
406 &xen_timerop_clockevent
;
407 static DEFINE_PER_CPU(struct clock_event_device
, xen_clock_events
);
409 static irqreturn_t
xen_timer_interrupt(int irq
, void *dev_id
)
411 struct clock_event_device
*evt
= &__get_cpu_var(xen_clock_events
);
415 if (evt
->event_handler
) {
416 evt
->event_handler(evt
);
420 do_stolen_accounting();
425 void xen_setup_timer(int cpu
)
428 struct clock_event_device
*evt
;
431 printk(KERN_INFO
"installing Xen timer for CPU %d\n", cpu
);
433 name
= kasprintf(GFP_KERNEL
, "timer%d", cpu
);
435 name
= "<timer kasprintf failed>";
437 irq
= bind_virq_to_irqhandler(VIRQ_TIMER
, cpu
, xen_timer_interrupt
,
438 IRQF_DISABLED
|IRQF_PERCPU
|IRQF_NOBALANCING
|IRQF_TIMER
,
441 evt
= &per_cpu(xen_clock_events
, cpu
);
442 memcpy(evt
, xen_clockevent
, sizeof(*evt
));
444 evt
->cpumask
= cpumask_of(cpu
);
448 void xen_teardown_timer(int cpu
)
450 struct clock_event_device
*evt
;
452 evt
= &per_cpu(xen_clock_events
, cpu
);
453 unbind_from_irqhandler(evt
->irq
, NULL
);
456 void xen_setup_cpu_clockevents(void)
458 BUG_ON(preemptible());
460 clockevents_register_device(&__get_cpu_var(xen_clock_events
));
463 void xen_timer_resume(void)
467 if (xen_clockevent
!= &xen_vcpuop_clockevent
)
470 for_each_online_cpu(cpu
) {
471 if (HYPERVISOR_vcpu_op(VCPUOP_stop_periodic_timer
, cpu
, NULL
))
476 __init
void xen_time_init(void)
478 int cpu
= smp_processor_id();
481 clocksource_register(&xen_clocksource
);
483 if (HYPERVISOR_vcpu_op(VCPUOP_stop_periodic_timer
, cpu
, NULL
) == 0) {
484 /* Successfully turned off 100Hz tick, so we have the
485 vcpuop-based timer interface */
486 printk(KERN_DEBUG
"Xen: using vcpuop timer interface\n");
487 xen_clockevent
= &xen_vcpuop_clockevent
;
490 /* Set initial system time with full resolution */
491 xen_read_wallclock(&tp
);
492 do_settimeofday(&tp
);
494 setup_force_cpu_cap(X86_FEATURE_TSC
);
496 xen_setup_runstate_info(cpu
);
497 xen_setup_timer(cpu
);
498 xen_setup_cpu_clockevents();