1 // SPDX-License-Identifier: GPL-2.0
3 * Copyright (C) 2020-2023 Loongson Technology Corporation Limited
6 #include <linux/kvm_host.h>
7 #include <asm/kvm_csr.h>
8 #include <asm/kvm_vcpu.h>
11 * ktime_to_tick() - Scale ktime_t to timer tick value.
13 static inline u64
ktime_to_tick(struct kvm_vcpu
*vcpu
, ktime_t now
)
17 delta
= ktime_to_ns(now
);
18 return div_u64(delta
* vcpu
->arch
.timer_mhz
, MNSEC_PER_SEC
);
21 static inline u64
tick_to_ns(struct kvm_vcpu
*vcpu
, u64 tick
)
23 return div_u64(tick
* MNSEC_PER_SEC
, vcpu
->arch
.timer_mhz
);
26 /* Low level hrtimer wake routine */
27 enum hrtimer_restart
kvm_swtimer_wakeup(struct hrtimer
*timer
)
29 struct kvm_vcpu
*vcpu
;
31 vcpu
= container_of(timer
, struct kvm_vcpu
, arch
.swtimer
);
32 kvm_queue_irq(vcpu
, INT_TI
);
33 rcuwait_wake_up(&vcpu
->wait
);
35 return HRTIMER_NORESTART
;
39 * Initialise the timer to the specified frequency, zero it
41 void kvm_init_timer(struct kvm_vcpu
*vcpu
, unsigned long timer_hz
)
43 vcpu
->arch
.timer_mhz
= timer_hz
>> 20;
46 kvm_write_sw_gcsr(vcpu
->arch
.csr
, LOONGARCH_CSR_TVAL
, 0);
50 * Restore soft timer state from saved context.
52 void kvm_restore_timer(struct kvm_vcpu
*vcpu
)
54 unsigned long cfg
, estat
;
55 unsigned long ticks
, delta
, period
;
57 struct loongarch_csrs
*csr
= vcpu
->arch
.csr
;
60 * Set guest stable timer cfg csr
61 * Disable timer before restore estat CSR register, avoid to
62 * get invalid timer interrupt for old timer cfg
64 cfg
= kvm_read_sw_gcsr(csr
, LOONGARCH_CSR_TCFG
);
66 write_gcsr_timercfg(0);
67 kvm_restore_hw_gcsr(csr
, LOONGARCH_CSR_ESTAT
);
68 kvm_restore_hw_gcsr(csr
, LOONGARCH_CSR_TCFG
);
69 if (!(cfg
& CSR_TCFG_EN
)) {
70 /* Guest timer is disabled, just restore timer registers */
71 kvm_restore_hw_gcsr(csr
, LOONGARCH_CSR_TVAL
);
76 * Freeze the soft-timer and sync the guest stable timer with it.
78 if (kvm_vcpu_is_blocking(vcpu
))
79 hrtimer_cancel(&vcpu
->arch
.swtimer
);
82 * From LoongArch Reference Manual Volume 1 Chapter 7.6.2
83 * If oneshot timer is fired, CSR TVAL will be -1, there are two
85 * 1) timer is fired during exiting to host
86 * 2) timer is fired and vm is doing timer irq, and then exiting to
87 * host. Host should not inject timer irq to avoid spurious
88 * timer interrupt again
90 ticks
= kvm_read_sw_gcsr(csr
, LOONGARCH_CSR_TVAL
);
91 estat
= kvm_read_sw_gcsr(csr
, LOONGARCH_CSR_ESTAT
);
92 if (!(cfg
& CSR_TCFG_PERIOD
) && (ticks
> cfg
)) {
94 * Writing 0 to LOONGARCH_CSR_TVAL will inject timer irq
95 * and set CSR TVAL with -1
97 write_gcsr_timertick(0);
100 * Writing CSR_TINTCLR_TI to LOONGARCH_CSR_TINTCLR will clear
101 * timer interrupt, and CSR TVAL keeps unchanged with -1, it
102 * avoids spurious timer interrupt
104 if (!(estat
& CPU_TIMER
))
105 gcsr_write(CSR_TINTCLR_TI
, LOONGARCH_CSR_TINTCLR
);
110 * Set remainder tick value if not expired
114 expire
= vcpu
->arch
.expire
;
115 if (ktime_before(now
, expire
))
116 delta
= ktime_to_tick(vcpu
, ktime_sub(expire
, now
));
117 else if (cfg
& CSR_TCFG_PERIOD
) {
118 period
= cfg
& CSR_TCFG_VAL
;
119 delta
= ktime_to_tick(vcpu
, ktime_sub(now
, expire
));
120 delta
= period
- (delta
% period
);
123 * Inject timer here though sw timer should inject timer
124 * interrupt async already, since sw timer may be cancelled
125 * during injecting intr async
127 kvm_queue_irq(vcpu
, INT_TI
);
130 write_gcsr_timertick(delta
);
134 * Save guest timer state and switch to software emulation of guest
135 * timer. The hard timer must already be in use, so preemption should be
138 static void _kvm_save_timer(struct kvm_vcpu
*vcpu
)
140 unsigned long ticks
, delta
, cfg
;
142 struct loongarch_csrs
*csr
= vcpu
->arch
.csr
;
144 cfg
= kvm_read_sw_gcsr(csr
, LOONGARCH_CSR_TCFG
);
145 ticks
= kvm_read_sw_gcsr(csr
, LOONGARCH_CSR_TVAL
);
148 * From LoongArch Reference Manual Volume 1 Chapter 7.6.2
149 * If period timer is fired, CSR TVAL will be reloaded from CSR TCFG
150 * If oneshot timer is fired, CSR TVAL will be -1
151 * Here judge one-shot timer fired by checking whether TVAL is larger
155 delta
= tick_to_ns(vcpu
, ticks
);
159 expire
= ktime_add_ns(ktime_get(), delta
);
160 vcpu
->arch
.expire
= expire
;
161 if (kvm_vcpu_is_blocking(vcpu
)) {
164 * HRTIMER_MODE_PINNED_HARD is suggested since vcpu may run in
165 * the same physical cpu in next time, and the timer should run
166 * in hardirq context even in the PREEMPT_RT case.
168 hrtimer_start(&vcpu
->arch
.swtimer
, expire
, HRTIMER_MODE_ABS_PINNED_HARD
);
173 * Save guest timer state and switch to soft guest timer if hard timer was in
176 void kvm_save_timer(struct kvm_vcpu
*vcpu
)
178 struct loongarch_csrs
*csr
= vcpu
->arch
.csr
;
182 /* Save hard timer state */
183 kvm_save_hw_gcsr(csr
, LOONGARCH_CSR_TCFG
);
184 kvm_save_hw_gcsr(csr
, LOONGARCH_CSR_TVAL
);
185 if (kvm_read_sw_gcsr(csr
, LOONGARCH_CSR_TCFG
) & CSR_TCFG_EN
)
186 _kvm_save_timer(vcpu
);
188 /* Save timer-related state to vCPU context */
189 kvm_save_hw_gcsr(csr
, LOONGARCH_CSR_ESTAT
);