2 * Xen stolen ticks accounting.
4 #include <linux/kernel.h>
5 #include <linux/kernel_stat.h>
6 #include <linux/math64.h>
9 #include <asm/xen/hypervisor.h>
10 #include <asm/xen/hypercall.h>
12 #include <xen/events.h>
13 #include <xen/features.h>
14 #include <xen/interface/xen.h>
15 #include <xen/interface/vcpu.h>
16 #include <xen/xen-ops.h>
18 /* runstate info updated by Xen */
19 static DEFINE_PER_CPU(struct vcpu_runstate_info
, xen_runstate
);
21 /* return an consistent snapshot of 64-bit time/counter value */
22 static u64
get64(const u64
*p
)
26 if (BITS_PER_LONG
< 64) {
31 * Read high then low, and then make sure high is
32 * still the same; this will only loop if low wraps
33 * and carries into high.
34 * XXX some clean way to make this endian-proof?
37 h
= READ_ONCE(p32
[1]);
38 l
= READ_ONCE(p32
[0]);
39 h2
= READ_ONCE(p32
[1]);
42 ret
= (((u64
)h
) << 32) | l
;
52 void xen_get_runstate_snapshot(struct vcpu_runstate_info
*res
)
55 struct vcpu_runstate_info
*state
;
57 BUG_ON(preemptible());
59 state
= this_cpu_ptr(&xen_runstate
);
62 * The runstate info is always updated by the hypervisor on
63 * the current CPU, so there's no need to use anything
64 * stronger than a compiler barrier when fetching it.
67 state_time
= get64(&state
->state_entry_time
);
68 *res
= READ_ONCE(*state
);
69 } while (get64(&state
->state_entry_time
) != state_time
);
72 /* return true when a vcpu could run but has no real cpu to run on */
73 bool xen_vcpu_stolen(int vcpu
)
75 return per_cpu(xen_runstate
, vcpu
).state
== RUNSTATE_runnable
;
78 void xen_setup_runstate_info(int cpu
)
80 struct vcpu_register_runstate_memory_area area
;
82 area
.addr
.v
= &per_cpu(xen_runstate
, cpu
);
84 if (HYPERVISOR_vcpu_op(VCPUOP_register_runstate_memory_area
,