dm writecache: fix incorrect flush sequence when doing SSD mode commit
[linux/fpc-iii.git] / virt / kvm / arm / arch_timer.c
blobf182b2380345712d320e299f28b4306c9f5456ff
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * Copyright (C) 2012 ARM Ltd.
4 * Author: Marc Zyngier <marc.zyngier@arm.com>
5 */
7 #include <linux/cpu.h>
8 #include <linux/kvm.h>
9 #include <linux/kvm_host.h>
10 #include <linux/interrupt.h>
11 #include <linux/irq.h>
12 #include <linux/uaccess.h>
14 #include <clocksource/arm_arch_timer.h>
15 #include <asm/arch_timer.h>
16 #include <asm/kvm_emulate.h>
17 #include <asm/kvm_hyp.h>
19 #include <kvm/arm_vgic.h>
20 #include <kvm/arm_arch_timer.h>
22 #include "trace.h"
24 static struct timecounter *timecounter;
25 static unsigned int host_vtimer_irq;
26 static unsigned int host_ptimer_irq;
27 static u32 host_vtimer_irq_flags;
28 static u32 host_ptimer_irq_flags;
30 static DEFINE_STATIC_KEY_FALSE(has_gic_active_state);
32 static const struct kvm_irq_level default_ptimer_irq = {
33 .irq = 30,
34 .level = 1,
37 static const struct kvm_irq_level default_vtimer_irq = {
38 .irq = 27,
39 .level = 1,
42 static bool kvm_timer_irq_can_fire(struct arch_timer_context *timer_ctx);
43 static void kvm_timer_update_irq(struct kvm_vcpu *vcpu, bool new_level,
44 struct arch_timer_context *timer_ctx);
45 static bool kvm_timer_should_fire(struct arch_timer_context *timer_ctx);
46 static void kvm_arm_timer_write(struct kvm_vcpu *vcpu,
47 struct arch_timer_context *timer,
48 enum kvm_arch_timer_regs treg,
49 u64 val);
50 static u64 kvm_arm_timer_read(struct kvm_vcpu *vcpu,
51 struct arch_timer_context *timer,
52 enum kvm_arch_timer_regs treg);
54 u64 kvm_phys_timer_read(void)
56 return timecounter->cc->read(timecounter->cc);
59 static void get_timer_map(struct kvm_vcpu *vcpu, struct timer_map *map)
61 if (has_vhe()) {
62 map->direct_vtimer = vcpu_vtimer(vcpu);
63 map->direct_ptimer = vcpu_ptimer(vcpu);
64 map->emul_ptimer = NULL;
65 } else {
66 map->direct_vtimer = vcpu_vtimer(vcpu);
67 map->direct_ptimer = NULL;
68 map->emul_ptimer = vcpu_ptimer(vcpu);
71 trace_kvm_get_timer_map(vcpu->vcpu_id, map);
74 static inline bool userspace_irqchip(struct kvm *kvm)
76 return static_branch_unlikely(&userspace_irqchip_in_use) &&
77 unlikely(!irqchip_in_kernel(kvm));
80 static void soft_timer_start(struct hrtimer *hrt, u64 ns)
82 hrtimer_start(hrt, ktime_add_ns(ktime_get(), ns),
83 HRTIMER_MODE_ABS_HARD);
86 static void soft_timer_cancel(struct hrtimer *hrt)
88 hrtimer_cancel(hrt);
91 static irqreturn_t kvm_arch_timer_handler(int irq, void *dev_id)
93 struct kvm_vcpu *vcpu = *(struct kvm_vcpu **)dev_id;
94 struct arch_timer_context *ctx;
95 struct timer_map map;
98 * We may see a timer interrupt after vcpu_put() has been called which
99 * sets the CPU's vcpu pointer to NULL, because even though the timer
100 * has been disabled in timer_save_state(), the hardware interrupt
101 * signal may not have been retired from the interrupt controller yet.
103 if (!vcpu)
104 return IRQ_HANDLED;
106 get_timer_map(vcpu, &map);
108 if (irq == host_vtimer_irq)
109 ctx = map.direct_vtimer;
110 else
111 ctx = map.direct_ptimer;
113 if (kvm_timer_should_fire(ctx))
114 kvm_timer_update_irq(vcpu, true, ctx);
116 if (userspace_irqchip(vcpu->kvm) &&
117 !static_branch_unlikely(&has_gic_active_state))
118 disable_percpu_irq(host_vtimer_irq);
120 return IRQ_HANDLED;
123 static u64 kvm_timer_compute_delta(struct arch_timer_context *timer_ctx)
125 u64 cval, now;
127 cval = timer_ctx->cnt_cval;
128 now = kvm_phys_timer_read() - timer_ctx->cntvoff;
130 if (now < cval) {
131 u64 ns;
133 ns = cyclecounter_cyc2ns(timecounter->cc,
134 cval - now,
135 timecounter->mask,
136 &timecounter->frac);
137 return ns;
140 return 0;
143 static bool kvm_timer_irq_can_fire(struct arch_timer_context *timer_ctx)
145 WARN_ON(timer_ctx && timer_ctx->loaded);
146 return timer_ctx &&
147 !(timer_ctx->cnt_ctl & ARCH_TIMER_CTRL_IT_MASK) &&
148 (timer_ctx->cnt_ctl & ARCH_TIMER_CTRL_ENABLE);
152 * Returns the earliest expiration time in ns among guest timers.
153 * Note that it will return 0 if none of timers can fire.
155 static u64 kvm_timer_earliest_exp(struct kvm_vcpu *vcpu)
157 u64 min_delta = ULLONG_MAX;
158 int i;
160 for (i = 0; i < NR_KVM_TIMERS; i++) {
161 struct arch_timer_context *ctx = &vcpu->arch.timer_cpu.timers[i];
163 WARN(ctx->loaded, "timer %d loaded\n", i);
164 if (kvm_timer_irq_can_fire(ctx))
165 min_delta = min(min_delta, kvm_timer_compute_delta(ctx));
168 /* If none of timers can fire, then return 0 */
169 if (min_delta == ULLONG_MAX)
170 return 0;
172 return min_delta;
175 static enum hrtimer_restart kvm_bg_timer_expire(struct hrtimer *hrt)
177 struct arch_timer_cpu *timer;
178 struct kvm_vcpu *vcpu;
179 u64 ns;
181 timer = container_of(hrt, struct arch_timer_cpu, bg_timer);
182 vcpu = container_of(timer, struct kvm_vcpu, arch.timer_cpu);
185 * Check that the timer has really expired from the guest's
186 * PoV (NTP on the host may have forced it to expire
187 * early). If we should have slept longer, restart it.
189 ns = kvm_timer_earliest_exp(vcpu);
190 if (unlikely(ns)) {
191 hrtimer_forward_now(hrt, ns_to_ktime(ns));
192 return HRTIMER_RESTART;
195 kvm_vcpu_wake_up(vcpu);
196 return HRTIMER_NORESTART;
199 static enum hrtimer_restart kvm_hrtimer_expire(struct hrtimer *hrt)
201 struct arch_timer_context *ctx;
202 struct kvm_vcpu *vcpu;
203 u64 ns;
205 ctx = container_of(hrt, struct arch_timer_context, hrtimer);
206 vcpu = ctx->vcpu;
208 trace_kvm_timer_hrtimer_expire(ctx);
211 * Check that the timer has really expired from the guest's
212 * PoV (NTP on the host may have forced it to expire
213 * early). If not ready, schedule for a later time.
215 ns = kvm_timer_compute_delta(ctx);
216 if (unlikely(ns)) {
217 hrtimer_forward_now(hrt, ns_to_ktime(ns));
218 return HRTIMER_RESTART;
221 kvm_timer_update_irq(vcpu, true, ctx);
222 return HRTIMER_NORESTART;
225 static bool kvm_timer_should_fire(struct arch_timer_context *timer_ctx)
227 enum kvm_arch_timers index;
228 u64 cval, now;
230 if (!timer_ctx)
231 return false;
233 index = arch_timer_ctx_index(timer_ctx);
235 if (timer_ctx->loaded) {
236 u32 cnt_ctl = 0;
238 switch (index) {
239 case TIMER_VTIMER:
240 cnt_ctl = read_sysreg_el0(SYS_CNTV_CTL);
241 break;
242 case TIMER_PTIMER:
243 cnt_ctl = read_sysreg_el0(SYS_CNTP_CTL);
244 break;
245 case NR_KVM_TIMERS:
246 /* GCC is braindead */
247 cnt_ctl = 0;
248 break;
251 return (cnt_ctl & ARCH_TIMER_CTRL_ENABLE) &&
252 (cnt_ctl & ARCH_TIMER_CTRL_IT_STAT) &&
253 !(cnt_ctl & ARCH_TIMER_CTRL_IT_MASK);
256 if (!kvm_timer_irq_can_fire(timer_ctx))
257 return false;
259 cval = timer_ctx->cnt_cval;
260 now = kvm_phys_timer_read() - timer_ctx->cntvoff;
262 return cval <= now;
265 bool kvm_timer_is_pending(struct kvm_vcpu *vcpu)
267 struct timer_map map;
269 get_timer_map(vcpu, &map);
271 return kvm_timer_should_fire(map.direct_vtimer) ||
272 kvm_timer_should_fire(map.direct_ptimer) ||
273 kvm_timer_should_fire(map.emul_ptimer);
277 * Reflect the timer output level into the kvm_run structure
279 void kvm_timer_update_run(struct kvm_vcpu *vcpu)
281 struct arch_timer_context *vtimer = vcpu_vtimer(vcpu);
282 struct arch_timer_context *ptimer = vcpu_ptimer(vcpu);
283 struct kvm_sync_regs *regs = &vcpu->run->s.regs;
285 /* Populate the device bitmap with the timer states */
286 regs->device_irq_level &= ~(KVM_ARM_DEV_EL1_VTIMER |
287 KVM_ARM_DEV_EL1_PTIMER);
288 if (kvm_timer_should_fire(vtimer))
289 regs->device_irq_level |= KVM_ARM_DEV_EL1_VTIMER;
290 if (kvm_timer_should_fire(ptimer))
291 regs->device_irq_level |= KVM_ARM_DEV_EL1_PTIMER;
294 static void kvm_timer_update_irq(struct kvm_vcpu *vcpu, bool new_level,
295 struct arch_timer_context *timer_ctx)
297 int ret;
299 timer_ctx->irq.level = new_level;
300 trace_kvm_timer_update_irq(vcpu->vcpu_id, timer_ctx->irq.irq,
301 timer_ctx->irq.level);
303 if (!userspace_irqchip(vcpu->kvm)) {
304 ret = kvm_vgic_inject_irq(vcpu->kvm, vcpu->vcpu_id,
305 timer_ctx->irq.irq,
306 timer_ctx->irq.level,
307 timer_ctx);
308 WARN_ON(ret);
312 /* Only called for a fully emulated timer */
313 static void timer_emulate(struct arch_timer_context *ctx)
315 bool should_fire = kvm_timer_should_fire(ctx);
317 trace_kvm_timer_emulate(ctx, should_fire);
319 if (should_fire != ctx->irq.level) {
320 kvm_timer_update_irq(ctx->vcpu, should_fire, ctx);
321 return;
325 * If the timer can fire now, we don't need to have a soft timer
326 * scheduled for the future. If the timer cannot fire at all,
327 * then we also don't need a soft timer.
329 if (!kvm_timer_irq_can_fire(ctx)) {
330 soft_timer_cancel(&ctx->hrtimer);
331 return;
334 soft_timer_start(&ctx->hrtimer, kvm_timer_compute_delta(ctx));
337 static void timer_save_state(struct arch_timer_context *ctx)
339 struct arch_timer_cpu *timer = vcpu_timer(ctx->vcpu);
340 enum kvm_arch_timers index = arch_timer_ctx_index(ctx);
341 unsigned long flags;
343 if (!timer->enabled)
344 return;
346 local_irq_save(flags);
348 if (!ctx->loaded)
349 goto out;
351 switch (index) {
352 case TIMER_VTIMER:
353 ctx->cnt_ctl = read_sysreg_el0(SYS_CNTV_CTL);
354 ctx->cnt_cval = read_sysreg_el0(SYS_CNTV_CVAL);
356 /* Disable the timer */
357 write_sysreg_el0(0, SYS_CNTV_CTL);
358 isb();
360 break;
361 case TIMER_PTIMER:
362 ctx->cnt_ctl = read_sysreg_el0(SYS_CNTP_CTL);
363 ctx->cnt_cval = read_sysreg_el0(SYS_CNTP_CVAL);
365 /* Disable the timer */
366 write_sysreg_el0(0, SYS_CNTP_CTL);
367 isb();
369 break;
370 case NR_KVM_TIMERS:
371 BUG();
374 trace_kvm_timer_save_state(ctx);
376 ctx->loaded = false;
377 out:
378 local_irq_restore(flags);
382 * Schedule the background timer before calling kvm_vcpu_block, so that this
383 * thread is removed from its waitqueue and made runnable when there's a timer
384 * interrupt to handle.
386 static void kvm_timer_blocking(struct kvm_vcpu *vcpu)
388 struct arch_timer_cpu *timer = vcpu_timer(vcpu);
389 struct timer_map map;
391 get_timer_map(vcpu, &map);
394 * If no timers are capable of raising interrupts (disabled or
395 * masked), then there's no more work for us to do.
397 if (!kvm_timer_irq_can_fire(map.direct_vtimer) &&
398 !kvm_timer_irq_can_fire(map.direct_ptimer) &&
399 !kvm_timer_irq_can_fire(map.emul_ptimer))
400 return;
403 * At least one guest time will expire. Schedule a background timer.
404 * Set the earliest expiration time among the guest timers.
406 soft_timer_start(&timer->bg_timer, kvm_timer_earliest_exp(vcpu));
409 static void kvm_timer_unblocking(struct kvm_vcpu *vcpu)
411 struct arch_timer_cpu *timer = vcpu_timer(vcpu);
413 soft_timer_cancel(&timer->bg_timer);
416 static void timer_restore_state(struct arch_timer_context *ctx)
418 struct arch_timer_cpu *timer = vcpu_timer(ctx->vcpu);
419 enum kvm_arch_timers index = arch_timer_ctx_index(ctx);
420 unsigned long flags;
422 if (!timer->enabled)
423 return;
425 local_irq_save(flags);
427 if (ctx->loaded)
428 goto out;
430 switch (index) {
431 case TIMER_VTIMER:
432 write_sysreg_el0(ctx->cnt_cval, SYS_CNTV_CVAL);
433 isb();
434 write_sysreg_el0(ctx->cnt_ctl, SYS_CNTV_CTL);
435 break;
436 case TIMER_PTIMER:
437 write_sysreg_el0(ctx->cnt_cval, SYS_CNTP_CVAL);
438 isb();
439 write_sysreg_el0(ctx->cnt_ctl, SYS_CNTP_CTL);
440 break;
441 case NR_KVM_TIMERS:
442 BUG();
445 trace_kvm_timer_restore_state(ctx);
447 ctx->loaded = true;
448 out:
449 local_irq_restore(flags);
452 static void set_cntvoff(u64 cntvoff)
454 u32 low = lower_32_bits(cntvoff);
455 u32 high = upper_32_bits(cntvoff);
458 * Since kvm_call_hyp doesn't fully support the ARM PCS especially on
459 * 32-bit systems, but rather passes register by register shifted one
460 * place (we put the function address in r0/x0), we cannot simply pass
461 * a 64-bit value as an argument, but have to split the value in two
462 * 32-bit halves.
464 kvm_call_hyp(__kvm_timer_set_cntvoff, low, high);
467 static inline void set_timer_irq_phys_active(struct arch_timer_context *ctx, bool active)
469 int r;
470 r = irq_set_irqchip_state(ctx->host_timer_irq, IRQCHIP_STATE_ACTIVE, active);
471 WARN_ON(r);
474 static void kvm_timer_vcpu_load_gic(struct arch_timer_context *ctx)
476 struct kvm_vcpu *vcpu = ctx->vcpu;
477 bool phys_active = false;
480 * Update the timer output so that it is likely to match the
481 * state we're about to restore. If the timer expires between
482 * this point and the register restoration, we'll take the
483 * interrupt anyway.
485 kvm_timer_update_irq(ctx->vcpu, kvm_timer_should_fire(ctx), ctx);
487 if (irqchip_in_kernel(vcpu->kvm))
488 phys_active = kvm_vgic_map_is_active(vcpu, ctx->irq.irq);
490 phys_active |= ctx->irq.level;
492 set_timer_irq_phys_active(ctx, phys_active);
495 static void kvm_timer_vcpu_load_nogic(struct kvm_vcpu *vcpu)
497 struct arch_timer_context *vtimer = vcpu_vtimer(vcpu);
500 * Update the timer output so that it is likely to match the
501 * state we're about to restore. If the timer expires between
502 * this point and the register restoration, we'll take the
503 * interrupt anyway.
505 kvm_timer_update_irq(vcpu, kvm_timer_should_fire(vtimer), vtimer);
508 * When using a userspace irqchip with the architected timers and a
509 * host interrupt controller that doesn't support an active state, we
510 * must still prevent continuously exiting from the guest, and
511 * therefore mask the physical interrupt by disabling it on the host
512 * interrupt controller when the virtual level is high, such that the
513 * guest can make forward progress. Once we detect the output level
514 * being de-asserted, we unmask the interrupt again so that we exit
515 * from the guest when the timer fires.
517 if (vtimer->irq.level)
518 disable_percpu_irq(host_vtimer_irq);
519 else
520 enable_percpu_irq(host_vtimer_irq, host_vtimer_irq_flags);
523 void kvm_timer_vcpu_load(struct kvm_vcpu *vcpu)
525 struct arch_timer_cpu *timer = vcpu_timer(vcpu);
526 struct timer_map map;
528 if (unlikely(!timer->enabled))
529 return;
531 get_timer_map(vcpu, &map);
533 if (static_branch_likely(&has_gic_active_state)) {
534 kvm_timer_vcpu_load_gic(map.direct_vtimer);
535 if (map.direct_ptimer)
536 kvm_timer_vcpu_load_gic(map.direct_ptimer);
537 } else {
538 kvm_timer_vcpu_load_nogic(vcpu);
541 set_cntvoff(map.direct_vtimer->cntvoff);
543 kvm_timer_unblocking(vcpu);
545 timer_restore_state(map.direct_vtimer);
546 if (map.direct_ptimer)
547 timer_restore_state(map.direct_ptimer);
549 if (map.emul_ptimer)
550 timer_emulate(map.emul_ptimer);
553 bool kvm_timer_should_notify_user(struct kvm_vcpu *vcpu)
555 struct arch_timer_context *vtimer = vcpu_vtimer(vcpu);
556 struct arch_timer_context *ptimer = vcpu_ptimer(vcpu);
557 struct kvm_sync_regs *sregs = &vcpu->run->s.regs;
558 bool vlevel, plevel;
560 if (likely(irqchip_in_kernel(vcpu->kvm)))
561 return false;
563 vlevel = sregs->device_irq_level & KVM_ARM_DEV_EL1_VTIMER;
564 plevel = sregs->device_irq_level & KVM_ARM_DEV_EL1_PTIMER;
566 return kvm_timer_should_fire(vtimer) != vlevel ||
567 kvm_timer_should_fire(ptimer) != plevel;
570 void kvm_timer_vcpu_put(struct kvm_vcpu *vcpu)
572 struct arch_timer_cpu *timer = vcpu_timer(vcpu);
573 struct timer_map map;
575 if (unlikely(!timer->enabled))
576 return;
578 get_timer_map(vcpu, &map);
580 timer_save_state(map.direct_vtimer);
581 if (map.direct_ptimer)
582 timer_save_state(map.direct_ptimer);
585 * Cancel soft timer emulation, because the only case where we
586 * need it after a vcpu_put is in the context of a sleeping VCPU, and
587 * in that case we already factor in the deadline for the physical
588 * timer when scheduling the bg_timer.
590 * In any case, we re-schedule the hrtimer for the physical timer when
591 * coming back to the VCPU thread in kvm_timer_vcpu_load().
593 if (map.emul_ptimer)
594 soft_timer_cancel(&map.emul_ptimer->hrtimer);
596 if (swait_active(kvm_arch_vcpu_wq(vcpu)))
597 kvm_timer_blocking(vcpu);
600 * The kernel may decide to run userspace after calling vcpu_put, so
601 * we reset cntvoff to 0 to ensure a consistent read between user
602 * accesses to the virtual counter and kernel access to the physical
603 * counter of non-VHE case. For VHE, the virtual counter uses a fixed
604 * virtual offset of zero, so no need to zero CNTVOFF_EL2 register.
606 set_cntvoff(0);
610 * With a userspace irqchip we have to check if the guest de-asserted the
611 * timer and if so, unmask the timer irq signal on the host interrupt
612 * controller to ensure that we see future timer signals.
614 static void unmask_vtimer_irq_user(struct kvm_vcpu *vcpu)
616 struct arch_timer_context *vtimer = vcpu_vtimer(vcpu);
618 if (!kvm_timer_should_fire(vtimer)) {
619 kvm_timer_update_irq(vcpu, false, vtimer);
620 if (static_branch_likely(&has_gic_active_state))
621 set_timer_irq_phys_active(vtimer, false);
622 else
623 enable_percpu_irq(host_vtimer_irq, host_vtimer_irq_flags);
627 void kvm_timer_sync_hwstate(struct kvm_vcpu *vcpu)
629 struct arch_timer_cpu *timer = vcpu_timer(vcpu);
631 if (unlikely(!timer->enabled))
632 return;
634 if (unlikely(!irqchip_in_kernel(vcpu->kvm)))
635 unmask_vtimer_irq_user(vcpu);
638 int kvm_timer_vcpu_reset(struct kvm_vcpu *vcpu)
640 struct arch_timer_cpu *timer = vcpu_timer(vcpu);
641 struct timer_map map;
643 get_timer_map(vcpu, &map);
646 * The bits in CNTV_CTL are architecturally reset to UNKNOWN for ARMv8
647 * and to 0 for ARMv7. We provide an implementation that always
648 * resets the timer to be disabled and unmasked and is compliant with
649 * the ARMv7 architecture.
651 vcpu_vtimer(vcpu)->cnt_ctl = 0;
652 vcpu_ptimer(vcpu)->cnt_ctl = 0;
654 if (timer->enabled) {
655 kvm_timer_update_irq(vcpu, false, vcpu_vtimer(vcpu));
656 kvm_timer_update_irq(vcpu, false, vcpu_ptimer(vcpu));
658 if (irqchip_in_kernel(vcpu->kvm)) {
659 kvm_vgic_reset_mapped_irq(vcpu, map.direct_vtimer->irq.irq);
660 if (map.direct_ptimer)
661 kvm_vgic_reset_mapped_irq(vcpu, map.direct_ptimer->irq.irq);
665 if (map.emul_ptimer)
666 soft_timer_cancel(&map.emul_ptimer->hrtimer);
668 return 0;
671 /* Make the updates of cntvoff for all vtimer contexts atomic */
672 static void update_vtimer_cntvoff(struct kvm_vcpu *vcpu, u64 cntvoff)
674 int i;
675 struct kvm *kvm = vcpu->kvm;
676 struct kvm_vcpu *tmp;
678 mutex_lock(&kvm->lock);
679 kvm_for_each_vcpu(i, tmp, kvm)
680 vcpu_vtimer(tmp)->cntvoff = cntvoff;
683 * When called from the vcpu create path, the CPU being created is not
684 * included in the loop above, so we just set it here as well.
686 vcpu_vtimer(vcpu)->cntvoff = cntvoff;
687 mutex_unlock(&kvm->lock);
690 void kvm_timer_vcpu_init(struct kvm_vcpu *vcpu)
692 struct arch_timer_cpu *timer = vcpu_timer(vcpu);
693 struct arch_timer_context *vtimer = vcpu_vtimer(vcpu);
694 struct arch_timer_context *ptimer = vcpu_ptimer(vcpu);
696 /* Synchronize cntvoff across all vtimers of a VM. */
697 update_vtimer_cntvoff(vcpu, kvm_phys_timer_read());
698 ptimer->cntvoff = 0;
700 hrtimer_init(&timer->bg_timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS_HARD);
701 timer->bg_timer.function = kvm_bg_timer_expire;
703 hrtimer_init(&vtimer->hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS_HARD);
704 hrtimer_init(&ptimer->hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS_HARD);
705 vtimer->hrtimer.function = kvm_hrtimer_expire;
706 ptimer->hrtimer.function = kvm_hrtimer_expire;
708 vtimer->irq.irq = default_vtimer_irq.irq;
709 ptimer->irq.irq = default_ptimer_irq.irq;
711 vtimer->host_timer_irq = host_vtimer_irq;
712 ptimer->host_timer_irq = host_ptimer_irq;
714 vtimer->host_timer_irq_flags = host_vtimer_irq_flags;
715 ptimer->host_timer_irq_flags = host_ptimer_irq_flags;
717 vtimer->vcpu = vcpu;
718 ptimer->vcpu = vcpu;
721 static void kvm_timer_init_interrupt(void *info)
723 enable_percpu_irq(host_vtimer_irq, host_vtimer_irq_flags);
724 enable_percpu_irq(host_ptimer_irq, host_ptimer_irq_flags);
727 int kvm_arm_timer_set_reg(struct kvm_vcpu *vcpu, u64 regid, u64 value)
729 struct arch_timer_context *timer;
731 switch (regid) {
732 case KVM_REG_ARM_TIMER_CTL:
733 timer = vcpu_vtimer(vcpu);
734 kvm_arm_timer_write(vcpu, timer, TIMER_REG_CTL, value);
735 break;
736 case KVM_REG_ARM_TIMER_CNT:
737 timer = vcpu_vtimer(vcpu);
738 update_vtimer_cntvoff(vcpu, kvm_phys_timer_read() - value);
739 break;
740 case KVM_REG_ARM_TIMER_CVAL:
741 timer = vcpu_vtimer(vcpu);
742 kvm_arm_timer_write(vcpu, timer, TIMER_REG_CVAL, value);
743 break;
744 case KVM_REG_ARM_PTIMER_CTL:
745 timer = vcpu_ptimer(vcpu);
746 kvm_arm_timer_write(vcpu, timer, TIMER_REG_CTL, value);
747 break;
748 case KVM_REG_ARM_PTIMER_CVAL:
749 timer = vcpu_ptimer(vcpu);
750 kvm_arm_timer_write(vcpu, timer, TIMER_REG_CVAL, value);
751 break;
753 default:
754 return -1;
757 return 0;
760 static u64 read_timer_ctl(struct arch_timer_context *timer)
763 * Set ISTATUS bit if it's expired.
764 * Note that according to ARMv8 ARM Issue A.k, ISTATUS bit is
765 * UNKNOWN when ENABLE bit is 0, so we chose to set ISTATUS bit
766 * regardless of ENABLE bit for our implementation convenience.
768 if (!kvm_timer_compute_delta(timer))
769 return timer->cnt_ctl | ARCH_TIMER_CTRL_IT_STAT;
770 else
771 return timer->cnt_ctl;
774 u64 kvm_arm_timer_get_reg(struct kvm_vcpu *vcpu, u64 regid)
776 switch (regid) {
777 case KVM_REG_ARM_TIMER_CTL:
778 return kvm_arm_timer_read(vcpu,
779 vcpu_vtimer(vcpu), TIMER_REG_CTL);
780 case KVM_REG_ARM_TIMER_CNT:
781 return kvm_arm_timer_read(vcpu,
782 vcpu_vtimer(vcpu), TIMER_REG_CNT);
783 case KVM_REG_ARM_TIMER_CVAL:
784 return kvm_arm_timer_read(vcpu,
785 vcpu_vtimer(vcpu), TIMER_REG_CVAL);
786 case KVM_REG_ARM_PTIMER_CTL:
787 return kvm_arm_timer_read(vcpu,
788 vcpu_ptimer(vcpu), TIMER_REG_CTL);
789 case KVM_REG_ARM_PTIMER_CNT:
790 return kvm_arm_timer_read(vcpu,
791 vcpu_vtimer(vcpu), TIMER_REG_CNT);
792 case KVM_REG_ARM_PTIMER_CVAL:
793 return kvm_arm_timer_read(vcpu,
794 vcpu_ptimer(vcpu), TIMER_REG_CVAL);
796 return (u64)-1;
799 static u64 kvm_arm_timer_read(struct kvm_vcpu *vcpu,
800 struct arch_timer_context *timer,
801 enum kvm_arch_timer_regs treg)
803 u64 val;
805 switch (treg) {
806 case TIMER_REG_TVAL:
807 val = timer->cnt_cval - kvm_phys_timer_read() + timer->cntvoff;
808 break;
810 case TIMER_REG_CTL:
811 val = read_timer_ctl(timer);
812 break;
814 case TIMER_REG_CVAL:
815 val = timer->cnt_cval;
816 break;
818 case TIMER_REG_CNT:
819 val = kvm_phys_timer_read() - timer->cntvoff;
820 break;
822 default:
823 BUG();
826 return val;
829 u64 kvm_arm_timer_read_sysreg(struct kvm_vcpu *vcpu,
830 enum kvm_arch_timers tmr,
831 enum kvm_arch_timer_regs treg)
833 u64 val;
835 preempt_disable();
836 kvm_timer_vcpu_put(vcpu);
838 val = kvm_arm_timer_read(vcpu, vcpu_get_timer(vcpu, tmr), treg);
840 kvm_timer_vcpu_load(vcpu);
841 preempt_enable();
843 return val;
846 static void kvm_arm_timer_write(struct kvm_vcpu *vcpu,
847 struct arch_timer_context *timer,
848 enum kvm_arch_timer_regs treg,
849 u64 val)
851 switch (treg) {
852 case TIMER_REG_TVAL:
853 timer->cnt_cval = kvm_phys_timer_read() - timer->cntvoff + val;
854 break;
856 case TIMER_REG_CTL:
857 timer->cnt_ctl = val & ~ARCH_TIMER_CTRL_IT_STAT;
858 break;
860 case TIMER_REG_CVAL:
861 timer->cnt_cval = val;
862 break;
864 default:
865 BUG();
869 void kvm_arm_timer_write_sysreg(struct kvm_vcpu *vcpu,
870 enum kvm_arch_timers tmr,
871 enum kvm_arch_timer_regs treg,
872 u64 val)
874 preempt_disable();
875 kvm_timer_vcpu_put(vcpu);
877 kvm_arm_timer_write(vcpu, vcpu_get_timer(vcpu, tmr), treg, val);
879 kvm_timer_vcpu_load(vcpu);
880 preempt_enable();
883 static int kvm_timer_starting_cpu(unsigned int cpu)
885 kvm_timer_init_interrupt(NULL);
886 return 0;
889 static int kvm_timer_dying_cpu(unsigned int cpu)
891 disable_percpu_irq(host_vtimer_irq);
892 return 0;
895 int kvm_timer_hyp_init(bool has_gic)
897 struct arch_timer_kvm_info *info;
898 int err;
900 info = arch_timer_get_kvm_info();
901 timecounter = &info->timecounter;
903 if (!timecounter->cc) {
904 kvm_err("kvm_arch_timer: uninitialized timecounter\n");
905 return -ENODEV;
908 /* First, do the virtual EL1 timer irq */
910 if (info->virtual_irq <= 0) {
911 kvm_err("kvm_arch_timer: invalid virtual timer IRQ: %d\n",
912 info->virtual_irq);
913 return -ENODEV;
915 host_vtimer_irq = info->virtual_irq;
917 host_vtimer_irq_flags = irq_get_trigger_type(host_vtimer_irq);
918 if (host_vtimer_irq_flags != IRQF_TRIGGER_HIGH &&
919 host_vtimer_irq_flags != IRQF_TRIGGER_LOW) {
920 kvm_err("Invalid trigger for vtimer IRQ%d, assuming level low\n",
921 host_vtimer_irq);
922 host_vtimer_irq_flags = IRQF_TRIGGER_LOW;
925 err = request_percpu_irq(host_vtimer_irq, kvm_arch_timer_handler,
926 "kvm guest vtimer", kvm_get_running_vcpus());
927 if (err) {
928 kvm_err("kvm_arch_timer: can't request vtimer interrupt %d (%d)\n",
929 host_vtimer_irq, err);
930 return err;
933 if (has_gic) {
934 err = irq_set_vcpu_affinity(host_vtimer_irq,
935 kvm_get_running_vcpus());
936 if (err) {
937 kvm_err("kvm_arch_timer: error setting vcpu affinity\n");
938 goto out_free_irq;
941 static_branch_enable(&has_gic_active_state);
944 kvm_debug("virtual timer IRQ%d\n", host_vtimer_irq);
946 /* Now let's do the physical EL1 timer irq */
948 if (info->physical_irq > 0) {
949 host_ptimer_irq = info->physical_irq;
950 host_ptimer_irq_flags = irq_get_trigger_type(host_ptimer_irq);
951 if (host_ptimer_irq_flags != IRQF_TRIGGER_HIGH &&
952 host_ptimer_irq_flags != IRQF_TRIGGER_LOW) {
953 kvm_err("Invalid trigger for ptimer IRQ%d, assuming level low\n",
954 host_ptimer_irq);
955 host_ptimer_irq_flags = IRQF_TRIGGER_LOW;
958 err = request_percpu_irq(host_ptimer_irq, kvm_arch_timer_handler,
959 "kvm guest ptimer", kvm_get_running_vcpus());
960 if (err) {
961 kvm_err("kvm_arch_timer: can't request ptimer interrupt %d (%d)\n",
962 host_ptimer_irq, err);
963 return err;
966 if (has_gic) {
967 err = irq_set_vcpu_affinity(host_ptimer_irq,
968 kvm_get_running_vcpus());
969 if (err) {
970 kvm_err("kvm_arch_timer: error setting vcpu affinity\n");
971 goto out_free_irq;
975 kvm_debug("physical timer IRQ%d\n", host_ptimer_irq);
976 } else if (has_vhe()) {
977 kvm_err("kvm_arch_timer: invalid physical timer IRQ: %d\n",
978 info->physical_irq);
979 err = -ENODEV;
980 goto out_free_irq;
983 cpuhp_setup_state(CPUHP_AP_KVM_ARM_TIMER_STARTING,
984 "kvm/arm/timer:starting", kvm_timer_starting_cpu,
985 kvm_timer_dying_cpu);
986 return 0;
987 out_free_irq:
988 free_percpu_irq(host_vtimer_irq, kvm_get_running_vcpus());
989 return err;
992 void kvm_timer_vcpu_terminate(struct kvm_vcpu *vcpu)
994 struct arch_timer_cpu *timer = vcpu_timer(vcpu);
996 soft_timer_cancel(&timer->bg_timer);
999 static bool timer_irqs_are_valid(struct kvm_vcpu *vcpu)
1001 int vtimer_irq, ptimer_irq;
1002 int i, ret;
1004 vtimer_irq = vcpu_vtimer(vcpu)->irq.irq;
1005 ret = kvm_vgic_set_owner(vcpu, vtimer_irq, vcpu_vtimer(vcpu));
1006 if (ret)
1007 return false;
1009 ptimer_irq = vcpu_ptimer(vcpu)->irq.irq;
1010 ret = kvm_vgic_set_owner(vcpu, ptimer_irq, vcpu_ptimer(vcpu));
1011 if (ret)
1012 return false;
1014 kvm_for_each_vcpu(i, vcpu, vcpu->kvm) {
1015 if (vcpu_vtimer(vcpu)->irq.irq != vtimer_irq ||
1016 vcpu_ptimer(vcpu)->irq.irq != ptimer_irq)
1017 return false;
1020 return true;
1023 bool kvm_arch_timer_get_input_level(int vintid)
1025 struct kvm_vcpu *vcpu = kvm_arm_get_running_vcpu();
1026 struct arch_timer_context *timer;
1028 if (vintid == vcpu_vtimer(vcpu)->irq.irq)
1029 timer = vcpu_vtimer(vcpu);
1030 else if (vintid == vcpu_ptimer(vcpu)->irq.irq)
1031 timer = vcpu_ptimer(vcpu);
1032 else
1033 BUG();
1035 return kvm_timer_should_fire(timer);
1038 int kvm_timer_enable(struct kvm_vcpu *vcpu)
1040 struct arch_timer_cpu *timer = vcpu_timer(vcpu);
1041 struct timer_map map;
1042 int ret;
1044 if (timer->enabled)
1045 return 0;
1047 /* Without a VGIC we do not map virtual IRQs to physical IRQs */
1048 if (!irqchip_in_kernel(vcpu->kvm))
1049 goto no_vgic;
1051 if (!vgic_initialized(vcpu->kvm))
1052 return -ENODEV;
1054 if (!timer_irqs_are_valid(vcpu)) {
1055 kvm_debug("incorrectly configured timer irqs\n");
1056 return -EINVAL;
1059 get_timer_map(vcpu, &map);
1061 ret = kvm_vgic_map_phys_irq(vcpu,
1062 map.direct_vtimer->host_timer_irq,
1063 map.direct_vtimer->irq.irq,
1064 kvm_arch_timer_get_input_level);
1065 if (ret)
1066 return ret;
1068 if (map.direct_ptimer) {
1069 ret = kvm_vgic_map_phys_irq(vcpu,
1070 map.direct_ptimer->host_timer_irq,
1071 map.direct_ptimer->irq.irq,
1072 kvm_arch_timer_get_input_level);
1075 if (ret)
1076 return ret;
1078 no_vgic:
1079 timer->enabled = 1;
1080 return 0;
1084 * On VHE system, we only need to configure the EL2 timer trap register once,
1085 * not for every world switch.
1086 * The host kernel runs at EL2 with HCR_EL2.TGE == 1,
1087 * and this makes those bits have no effect for the host kernel execution.
1089 void kvm_timer_init_vhe(void)
1091 /* When HCR_EL2.E2H ==1, EL1PCEN and EL1PCTEN are shifted by 10 */
1092 u32 cnthctl_shift = 10;
1093 u64 val;
1096 * VHE systems allow the guest direct access to the EL1 physical
1097 * timer/counter.
1099 val = read_sysreg(cnthctl_el2);
1100 val |= (CNTHCTL_EL1PCEN << cnthctl_shift);
1101 val |= (CNTHCTL_EL1PCTEN << cnthctl_shift);
1102 write_sysreg(val, cnthctl_el2);
1105 static void set_timer_irqs(struct kvm *kvm, int vtimer_irq, int ptimer_irq)
1107 struct kvm_vcpu *vcpu;
1108 int i;
1110 kvm_for_each_vcpu(i, vcpu, kvm) {
1111 vcpu_vtimer(vcpu)->irq.irq = vtimer_irq;
1112 vcpu_ptimer(vcpu)->irq.irq = ptimer_irq;
1116 int kvm_arm_timer_set_attr(struct kvm_vcpu *vcpu, struct kvm_device_attr *attr)
1118 int __user *uaddr = (int __user *)(long)attr->addr;
1119 struct arch_timer_context *vtimer = vcpu_vtimer(vcpu);
1120 struct arch_timer_context *ptimer = vcpu_ptimer(vcpu);
1121 int irq;
1123 if (!irqchip_in_kernel(vcpu->kvm))
1124 return -EINVAL;
1126 if (get_user(irq, uaddr))
1127 return -EFAULT;
1129 if (!(irq_is_ppi(irq)))
1130 return -EINVAL;
1132 if (vcpu->arch.timer_cpu.enabled)
1133 return -EBUSY;
1135 switch (attr->attr) {
1136 case KVM_ARM_VCPU_TIMER_IRQ_VTIMER:
1137 set_timer_irqs(vcpu->kvm, irq, ptimer->irq.irq);
1138 break;
1139 case KVM_ARM_VCPU_TIMER_IRQ_PTIMER:
1140 set_timer_irqs(vcpu->kvm, vtimer->irq.irq, irq);
1141 break;
1142 default:
1143 return -ENXIO;
1146 return 0;
1149 int kvm_arm_timer_get_attr(struct kvm_vcpu *vcpu, struct kvm_device_attr *attr)
1151 int __user *uaddr = (int __user *)(long)attr->addr;
1152 struct arch_timer_context *timer;
1153 int irq;
1155 switch (attr->attr) {
1156 case KVM_ARM_VCPU_TIMER_IRQ_VTIMER:
1157 timer = vcpu_vtimer(vcpu);
1158 break;
1159 case KVM_ARM_VCPU_TIMER_IRQ_PTIMER:
1160 timer = vcpu_ptimer(vcpu);
1161 break;
1162 default:
1163 return -ENXIO;
1166 irq = timer->irq.irq;
1167 return put_user(irq, uaddr);
1170 int kvm_arm_timer_has_attr(struct kvm_vcpu *vcpu, struct kvm_device_attr *attr)
1172 switch (attr->attr) {
1173 case KVM_ARM_VCPU_TIMER_IRQ_VTIMER:
1174 case KVM_ARM_VCPU_TIMER_IRQ_PTIMER:
1175 return 0;
1178 return -ENXIO;