1 // SPDX-License-Identifier: GPL-2.0
4 * Clocksource driver for the synthetic counter and timers
5 * provided by the Hyper-V hypervisor to guest VMs, as described
6 * in the Hyper-V Top Level Functional Spec (TLFS). This driver
7 * is instruction set architecture independent.
9 * Copyright (C) 2019, Microsoft, Inc.
11 * Author: Michael Kelley <mikelley@microsoft.com>
14 #include <linux/percpu.h>
15 #include <linux/cpumask.h>
16 #include <linux/clockchips.h>
17 #include <linux/clocksource.h>
18 #include <linux/sched_clock.h>
20 #include <clocksource/hyperv_timer.h>
21 #include <asm/hyperv-tlfs.h>
22 #include <asm/mshyperv.h>
24 static struct clock_event_device __percpu
*hv_clock_event
;
27 * If false, we're using the old mechanism for stimer0 interrupts
28 * where it sends a VMbus message when it expires. The old
29 * mechanism is used when running on older versions of Hyper-V
30 * that don't support Direct Mode. While Hyper-V provides
31 * four stimer's per CPU, Linux uses only stimer0.
33 static bool direct_mode_enabled
;
35 static int stimer0_irq
;
36 static int stimer0_vector
;
37 static int stimer0_message_sint
;
40 * ISR for when stimer0 is operating in Direct Mode. Direct Mode
41 * does not use VMbus or any VMbus messages, so process here and not
42 * in the VMbus driver code.
44 void hv_stimer0_isr(void)
46 struct clock_event_device
*ce
;
48 ce
= this_cpu_ptr(hv_clock_event
);
49 ce
->event_handler(ce
);
51 EXPORT_SYMBOL_GPL(hv_stimer0_isr
);
53 static int hv_ce_set_next_event(unsigned long delta
,
54 struct clock_event_device
*evt
)
58 current_tick
= hyperv_cs
->read(NULL
);
59 current_tick
+= delta
;
60 hv_init_timer(0, current_tick
);
64 static int hv_ce_shutdown(struct clock_event_device
*evt
)
67 hv_init_timer_config(0, 0);
68 if (direct_mode_enabled
)
69 hv_disable_stimer0_percpu_irq(stimer0_irq
);
74 static int hv_ce_set_oneshot(struct clock_event_device
*evt
)
76 union hv_stimer_config timer_cfg
;
78 timer_cfg
.as_uint64
= 0;
80 timer_cfg
.auto_enable
= 1;
81 if (direct_mode_enabled
) {
83 * When it expires, the timer will directly interrupt
84 * on the specified hardware vector/IRQ.
86 timer_cfg
.direct_mode
= 1;
87 timer_cfg
.apic_vector
= stimer0_vector
;
88 hv_enable_stimer0_percpu_irq(stimer0_irq
);
91 * When it expires, the timer will generate a VMbus message,
92 * to be handled by the normal VMbus interrupt handler.
94 timer_cfg
.direct_mode
= 0;
95 timer_cfg
.sintx
= stimer0_message_sint
;
97 hv_init_timer_config(0, timer_cfg
.as_uint64
);
102 * hv_stimer_init - Per-cpu initialization of the clockevent
104 void hv_stimer_init(unsigned int cpu
)
106 struct clock_event_device
*ce
;
109 * Synthetic timers are always available except on old versions of
110 * Hyper-V on x86. In that case, just return as Linux will use a
111 * clocksource based on emulated PIT or LAPIC timer hardware.
113 if (!(ms_hyperv
.features
& HV_MSR_SYNTIMER_AVAILABLE
))
116 ce
= per_cpu_ptr(hv_clock_event
, cpu
);
117 ce
->name
= "Hyper-V clockevent";
118 ce
->features
= CLOCK_EVT_FEAT_ONESHOT
;
119 ce
->cpumask
= cpumask_of(cpu
);
121 ce
->set_state_shutdown
= hv_ce_shutdown
;
122 ce
->set_state_oneshot
= hv_ce_set_oneshot
;
123 ce
->set_next_event
= hv_ce_set_next_event
;
125 clockevents_config_and_register(ce
,
128 HV_MAX_MAX_DELTA_TICKS
);
130 EXPORT_SYMBOL_GPL(hv_stimer_init
);
133 * hv_stimer_cleanup - Per-cpu cleanup of the clockevent
135 void hv_stimer_cleanup(unsigned int cpu
)
137 struct clock_event_device
*ce
;
139 /* Turn off clockevent device */
140 if (ms_hyperv
.features
& HV_MSR_SYNTIMER_AVAILABLE
) {
141 ce
= per_cpu_ptr(hv_clock_event
, cpu
);
145 EXPORT_SYMBOL_GPL(hv_stimer_cleanup
);
147 /* hv_stimer_alloc - Global initialization of the clockevent and stimer0 */
148 int hv_stimer_alloc(int sint
)
152 hv_clock_event
= alloc_percpu(struct clock_event_device
);
156 direct_mode_enabled
= ms_hyperv
.misc_features
&
157 HV_STIMER_DIRECT_MODE_AVAILABLE
;
158 if (direct_mode_enabled
) {
159 ret
= hv_setup_stimer0_irq(&stimer0_irq
, &stimer0_vector
,
162 free_percpu(hv_clock_event
);
163 hv_clock_event
= NULL
;
168 stimer0_message_sint
= sint
;
171 EXPORT_SYMBOL_GPL(hv_stimer_alloc
);
173 /* hv_stimer_free - Free global resources allocated by hv_stimer_alloc() */
174 void hv_stimer_free(void)
176 if (direct_mode_enabled
&& (stimer0_irq
!= 0)) {
177 hv_remove_stimer0_irq(stimer0_irq
);
180 free_percpu(hv_clock_event
);
181 hv_clock_event
= NULL
;
183 EXPORT_SYMBOL_GPL(hv_stimer_free
);
186 * Do a global cleanup of clockevents for the cases of kexec and
189 void hv_stimer_global_cleanup(void)
192 struct clock_event_device
*ce
;
194 if (ms_hyperv
.features
& HV_MSR_SYNTIMER_AVAILABLE
) {
195 for_each_present_cpu(cpu
) {
196 ce
= per_cpu_ptr(hv_clock_event
, cpu
);
197 clockevents_unbind_device(ce
, cpu
);
202 EXPORT_SYMBOL_GPL(hv_stimer_global_cleanup
);
205 * Code and definitions for the Hyper-V clocksources. Two
206 * clocksources are defined: one that reads the Hyper-V defined MSR, and
207 * the other that uses the TSC reference page feature as defined in the
208 * TLFS. The MSR version is for compatibility with old versions of
209 * Hyper-V and 32-bit x86. The TSC reference page version is preferred.
212 struct clocksource
*hyperv_cs
;
213 EXPORT_SYMBOL_GPL(hyperv_cs
);
215 #ifdef CONFIG_HYPERV_TSCPAGE
217 static struct ms_hyperv_tsc_page
*tsc_pg
;
219 struct ms_hyperv_tsc_page
*hv_get_tsc_page(void)
223 EXPORT_SYMBOL_GPL(hv_get_tsc_page
);
225 static u64 notrace
read_hv_sched_clock_tsc(void)
227 u64 current_tick
= hv_read_tsc_page(tsc_pg
);
229 if (current_tick
== U64_MAX
)
230 hv_get_time_ref_count(current_tick
);
235 static u64
read_hv_clock_tsc(struct clocksource
*arg
)
237 return read_hv_sched_clock_tsc();
240 static struct clocksource hyperv_cs_tsc
= {
241 .name
= "hyperv_clocksource_tsc_page",
243 .read
= read_hv_clock_tsc
,
244 .mask
= CLOCKSOURCE_MASK(64),
245 .flags
= CLOCK_SOURCE_IS_CONTINUOUS
,
249 static u64 notrace
read_hv_sched_clock_msr(void)
253 * Read the partition counter to get the current tick count. This count
254 * is set to 0 when the partition is created and is incremented in
255 * 100 nanosecond units.
257 hv_get_time_ref_count(current_tick
);
261 static u64
read_hv_clock_msr(struct clocksource
*arg
)
263 return read_hv_sched_clock_msr();
266 static struct clocksource hyperv_cs_msr
= {
267 .name
= "hyperv_clocksource_msr",
269 .read
= read_hv_clock_msr
,
270 .mask
= CLOCKSOURCE_MASK(64),
271 .flags
= CLOCK_SOURCE_IS_CONTINUOUS
,
274 #ifdef CONFIG_HYPERV_TSCPAGE
275 static bool __init
hv_init_tsc_clocksource(void)
278 phys_addr_t phys_addr
;
280 if (!(ms_hyperv
.features
& HV_MSR_REFERENCE_TSC_AVAILABLE
))
283 tsc_pg
= vmalloc(PAGE_SIZE
);
287 hyperv_cs
= &hyperv_cs_tsc
;
288 phys_addr
= page_to_phys(vmalloc_to_page(tsc_pg
));
291 * The Hyper-V TLFS specifies to preserve the value of reserved
292 * bits in registers. So read the existing value, preserve the
293 * low order 12 bits, and add in the guest physical address
294 * (which already has at least the low 12 bits set to zero since
295 * it is page aligned). Also set the "enable" bit, which is bit 0.
297 hv_get_reference_tsc(tsc_msr
);
298 tsc_msr
&= GENMASK_ULL(11, 0);
299 tsc_msr
= tsc_msr
| 0x1 | (u64
)phys_addr
;
300 hv_set_reference_tsc(tsc_msr
);
302 hv_set_clocksource_vdso(hyperv_cs_tsc
);
303 clocksource_register_hz(&hyperv_cs_tsc
, NSEC_PER_SEC
/100);
305 /* sched_clock_register is needed on ARM64 but is a no-op on x86 */
306 sched_clock_register(read_hv_sched_clock_tsc
, 64, HV_CLOCK_HZ
);
310 static bool __init
hv_init_tsc_clocksource(void)
317 void __init
hv_init_clocksource(void)
320 * Try to set up the TSC page clocksource. If it succeeds, we're
321 * done. Otherwise, set up the MSR clocksoruce. At least one of
322 * these will always be available except on very old versions of
323 * Hyper-V on x86. In that case we won't have a Hyper-V
324 * clocksource, but Linux will still run with a clocksource based
325 * on the emulated PIT or LAPIC timer.
327 if (hv_init_tsc_clocksource())
330 if (!(ms_hyperv
.features
& HV_MSR_TIME_REF_COUNT_AVAILABLE
))
333 hyperv_cs
= &hyperv_cs_msr
;
334 clocksource_register_hz(&hyperv_cs_msr
, NSEC_PER_SEC
/100);
336 /* sched_clock_register is needed on ARM64 but is a no-op on x86 */
337 sched_clock_register(read_hv_sched_clock_msr
, 64, HV_CLOCK_HZ
);
339 EXPORT_SYMBOL_GPL(hv_init_clocksource
);