1 /* SPDX-License-Identifier: GPL-2.0-only */
3 * Copyright (C) 2019 Western Digital Corporation or its affiliates.
6 * Anup Patel <anup.patel@wdc.com>
9 #ifndef __RISCV_KVM_HOST_H__
10 #define __RISCV_KVM_HOST_H__
12 #include <linux/types.h>
13 #include <linux/kvm.h>
14 #include <linux/kvm_types.h>
15 #include <linux/spinlock.h>
16 #include <asm/hwcap.h>
17 #include <asm/kvm_aia.h>
18 #include <asm/ptrace.h>
19 #include <asm/kvm_vcpu_fp.h>
20 #include <asm/kvm_vcpu_insn.h>
21 #include <asm/kvm_vcpu_sbi.h>
22 #include <asm/kvm_vcpu_timer.h>
23 #include <asm/kvm_vcpu_pmu.h>
25 #define KVM_MAX_VCPUS 1024
27 #define KVM_HALT_POLL_NS_DEFAULT 500000
29 #define KVM_VCPU_MAX_FEATURES 0
31 #define KVM_IRQCHIP_NUM_PINS 1024
33 #define KVM_REQ_SLEEP \
34 KVM_ARCH_REQ_FLAGS(0, KVM_REQUEST_WAIT | KVM_REQUEST_NO_WAKEUP)
35 #define KVM_REQ_VCPU_RESET KVM_ARCH_REQ(1)
36 #define KVM_REQ_UPDATE_HGATP KVM_ARCH_REQ(2)
37 #define KVM_REQ_FENCE_I \
38 KVM_ARCH_REQ_FLAGS(3, KVM_REQUEST_WAIT | KVM_REQUEST_NO_WAKEUP)
39 #define KVM_REQ_HFENCE_GVMA_VMID_ALL KVM_REQ_TLB_FLUSH
40 #define KVM_REQ_HFENCE_VVMA_ALL \
41 KVM_ARCH_REQ_FLAGS(4, KVM_REQUEST_WAIT | KVM_REQUEST_NO_WAKEUP)
42 #define KVM_REQ_HFENCE \
43 KVM_ARCH_REQ_FLAGS(5, KVM_REQUEST_WAIT | KVM_REQUEST_NO_WAKEUP)
44 #define KVM_REQ_STEAL_UPDATE KVM_ARCH_REQ(6)
46 #define KVM_HEDELEG_DEFAULT (BIT(EXC_INST_MISALIGNED) | \
47 BIT(EXC_BREAKPOINT) | \
49 BIT(EXC_INST_PAGE_FAULT) | \
50 BIT(EXC_LOAD_PAGE_FAULT) | \
51 BIT(EXC_STORE_PAGE_FAULT))
53 #define KVM_HIDELEG_DEFAULT (BIT(IRQ_VS_SOFT) | \
57 enum kvm_riscv_hfence_type
{
58 KVM_RISCV_HFENCE_UNKNOWN
= 0,
59 KVM_RISCV_HFENCE_GVMA_VMID_GPA
,
60 KVM_RISCV_HFENCE_VVMA_ASID_GVA
,
61 KVM_RISCV_HFENCE_VVMA_ASID_ALL
,
62 KVM_RISCV_HFENCE_VVMA_GVA
,
65 struct kvm_riscv_hfence
{
66 enum kvm_riscv_hfence_type type
;
73 #define KVM_RISCV_VCPU_MAX_HFENCE 64
76 struct kvm_vm_stat_generic generic
;
79 struct kvm_vcpu_stat
{
80 struct kvm_vcpu_stat_generic generic
;
92 struct kvm_arch_memory_slot
{
97 * Writes to vmid_version and vmid happen with vmid_lock held
98 * whereas reads happen without any lock held.
100 unsigned long vmid_version
;
106 struct kvm_vmid vmid
;
108 /* G-stage page table */
110 phys_addr_t pgd_phys
;
113 struct kvm_guest_timer timer
;
115 /* AIA Guest/VM context */
119 struct kvm_cpu_trap
{
121 unsigned long scause
;
124 unsigned long htinst
;
127 struct kvm_cpu_context
{
161 unsigned long sstatus
;
162 unsigned long hstatus
;
163 union __riscv_fp_state fp
;
164 struct __riscv_v_ext_state vector
;
167 struct kvm_vcpu_csr
{
168 unsigned long vsstatus
;
170 unsigned long vstvec
;
171 unsigned long vsscratch
;
173 unsigned long vscause
;
174 unsigned long vstval
;
177 unsigned long scounteren
;
178 unsigned long senvcfg
;
181 struct kvm_vcpu_config
{
184 unsigned long hedeleg
;
187 struct kvm_vcpu_smstateen_csr
{
188 unsigned long sstateen0
;
191 struct kvm_vcpu_arch
{
192 /* VCPU ran at least once */
193 bool ran_atleast_once
;
195 /* Last Host CPU on which Guest VCPU exited */
198 /* ISA feature bits (similar to MISA) */
199 DECLARE_BITMAP(isa
, RISCV_ISA_EXT_MAX
);
201 /* Vendor, Arch, and Implementation details */
202 unsigned long mvendorid
;
203 unsigned long marchid
;
204 unsigned long mimpid
;
206 /* SSCRATCH, STVEC, and SCOUNTEREN of Host */
207 unsigned long host_sscratch
;
208 unsigned long host_stvec
;
209 unsigned long host_scounteren
;
210 unsigned long host_senvcfg
;
211 unsigned long host_sstateen0
;
213 /* CPU context of Host */
214 struct kvm_cpu_context host_context
;
216 /* CPU context of Guest VCPU */
217 struct kvm_cpu_context guest_context
;
219 /* CPU CSR context of Guest VCPU */
220 struct kvm_vcpu_csr guest_csr
;
222 /* CPU Smstateen CSR context of Guest VCPU */
223 struct kvm_vcpu_smstateen_csr smstateen_csr
;
225 /* CPU context upon Guest VCPU reset */
226 struct kvm_cpu_context guest_reset_context
;
227 spinlock_t reset_cntx_lock
;
229 /* CPU CSR context upon Guest VCPU reset */
230 struct kvm_vcpu_csr guest_reset_csr
;
235 * We have a lockless approach for tracking pending VCPU interrupts
236 * implemented using atomic bitops. The irqs_pending bitmap represent
237 * pending interrupts whereas irqs_pending_mask represent bits changed
238 * in irqs_pending. Our approach is modeled around multiple producer
239 * and single consumer problem where the consumer is the VCPU itself.
241 #define KVM_RISCV_VCPU_NR_IRQS 64
242 DECLARE_BITMAP(irqs_pending
, KVM_RISCV_VCPU_NR_IRQS
);
243 DECLARE_BITMAP(irqs_pending_mask
, KVM_RISCV_VCPU_NR_IRQS
);
246 struct kvm_vcpu_timer timer
;
248 /* HFENCE request queue */
249 spinlock_t hfence_lock
;
250 unsigned long hfence_head
;
251 unsigned long hfence_tail
;
252 struct kvm_riscv_hfence hfence_queue
[KVM_RISCV_VCPU_MAX_HFENCE
];
254 /* MMIO instruction details */
255 struct kvm_mmio_decode mmio_decode
;
257 /* CSR instruction details */
258 struct kvm_csr_decode csr_decode
;
261 struct kvm_vcpu_sbi_context sbi_context
;
263 /* AIA VCPU context */
264 struct kvm_vcpu_aia aia_context
;
266 /* Cache pages needed to program page tables with spinlock held */
267 struct kvm_mmu_memory_cache mmu_page_cache
;
269 /* VCPU power state */
270 struct kvm_mp_state mp_state
;
271 spinlock_t mp_state_lock
;
273 /* Don't run the VCPU (blocked) */
276 /* Performance monitoring context */
277 struct kvm_pmu pmu_context
;
279 /* 'static' configurations which are set only once */
280 struct kvm_vcpu_config cfg
;
282 /* SBI steal-time accounting */
290 * Returns true if a Performance Monitoring Interrupt (PMI), a.k.a. perf event,
291 * arrived in guest context. For riscv, any event that arrives while a vCPU is
292 * loaded is considered to be "in guest".
294 static inline bool kvm_arch_pmi_in_guest(struct kvm_vcpu
*vcpu
)
296 return IS_ENABLED(CONFIG_GUEST_PERF_EVENTS
) && !!vcpu
;
299 static inline void kvm_arch_sync_events(struct kvm
*kvm
) {}
301 #define KVM_RISCV_GSTAGE_TLB_MIN_ORDER 12
303 void kvm_riscv_local_hfence_gvma_vmid_gpa(unsigned long vmid
,
304 gpa_t gpa
, gpa_t gpsz
,
305 unsigned long order
);
306 void kvm_riscv_local_hfence_gvma_vmid_all(unsigned long vmid
);
307 void kvm_riscv_local_hfence_gvma_gpa(gpa_t gpa
, gpa_t gpsz
,
308 unsigned long order
);
309 void kvm_riscv_local_hfence_gvma_all(void);
310 void kvm_riscv_local_hfence_vvma_asid_gva(unsigned long vmid
,
314 unsigned long order
);
315 void kvm_riscv_local_hfence_vvma_asid_all(unsigned long vmid
,
317 void kvm_riscv_local_hfence_vvma_gva(unsigned long vmid
,
318 unsigned long gva
, unsigned long gvsz
,
319 unsigned long order
);
320 void kvm_riscv_local_hfence_vvma_all(unsigned long vmid
);
322 void kvm_riscv_local_tlb_sanitize(struct kvm_vcpu
*vcpu
);
324 void kvm_riscv_fence_i_process(struct kvm_vcpu
*vcpu
);
325 void kvm_riscv_hfence_gvma_vmid_all_process(struct kvm_vcpu
*vcpu
);
326 void kvm_riscv_hfence_vvma_all_process(struct kvm_vcpu
*vcpu
);
327 void kvm_riscv_hfence_process(struct kvm_vcpu
*vcpu
);
329 void kvm_riscv_fence_i(struct kvm
*kvm
,
330 unsigned long hbase
, unsigned long hmask
);
331 void kvm_riscv_hfence_gvma_vmid_gpa(struct kvm
*kvm
,
332 unsigned long hbase
, unsigned long hmask
,
333 gpa_t gpa
, gpa_t gpsz
,
334 unsigned long order
);
335 void kvm_riscv_hfence_gvma_vmid_all(struct kvm
*kvm
,
336 unsigned long hbase
, unsigned long hmask
);
337 void kvm_riscv_hfence_vvma_asid_gva(struct kvm
*kvm
,
338 unsigned long hbase
, unsigned long hmask
,
339 unsigned long gva
, unsigned long gvsz
,
340 unsigned long order
, unsigned long asid
);
341 void kvm_riscv_hfence_vvma_asid_all(struct kvm
*kvm
,
342 unsigned long hbase
, unsigned long hmask
,
344 void kvm_riscv_hfence_vvma_gva(struct kvm
*kvm
,
345 unsigned long hbase
, unsigned long hmask
,
346 unsigned long gva
, unsigned long gvsz
,
347 unsigned long order
);
348 void kvm_riscv_hfence_vvma_all(struct kvm
*kvm
,
349 unsigned long hbase
, unsigned long hmask
);
351 int kvm_riscv_gstage_ioremap(struct kvm
*kvm
, gpa_t gpa
,
352 phys_addr_t hpa
, unsigned long size
,
353 bool writable
, bool in_atomic
);
354 void kvm_riscv_gstage_iounmap(struct kvm
*kvm
, gpa_t gpa
,
356 int kvm_riscv_gstage_map(struct kvm_vcpu
*vcpu
,
357 struct kvm_memory_slot
*memslot
,
358 gpa_t gpa
, unsigned long hva
, bool is_write
);
359 int kvm_riscv_gstage_alloc_pgd(struct kvm
*kvm
);
360 void kvm_riscv_gstage_free_pgd(struct kvm
*kvm
);
361 void kvm_riscv_gstage_update_hgatp(struct kvm_vcpu
*vcpu
);
362 void __init
kvm_riscv_gstage_mode_detect(void);
363 unsigned long __init
kvm_riscv_gstage_mode(void);
364 int kvm_riscv_gstage_gpa_bits(void);
366 void __init
kvm_riscv_gstage_vmid_detect(void);
367 unsigned long kvm_riscv_gstage_vmid_bits(void);
368 int kvm_riscv_gstage_vmid_init(struct kvm
*kvm
);
369 bool kvm_riscv_gstage_vmid_ver_changed(struct kvm_vmid
*vmid
);
370 void kvm_riscv_gstage_vmid_update(struct kvm_vcpu
*vcpu
);
372 int kvm_riscv_setup_default_irq_routing(struct kvm
*kvm
, u32 lines
);
374 void __kvm_riscv_unpriv_trap(void);
376 unsigned long kvm_riscv_vcpu_unpriv_read(struct kvm_vcpu
*vcpu
,
378 unsigned long guest_addr
,
379 struct kvm_cpu_trap
*trap
);
380 void kvm_riscv_vcpu_trap_redirect(struct kvm_vcpu
*vcpu
,
381 struct kvm_cpu_trap
*trap
);
382 int kvm_riscv_vcpu_exit(struct kvm_vcpu
*vcpu
, struct kvm_run
*run
,
383 struct kvm_cpu_trap
*trap
);
385 void __kvm_riscv_switch_to(struct kvm_vcpu_arch
*vcpu_arch
);
387 void kvm_riscv_vcpu_setup_isa(struct kvm_vcpu
*vcpu
);
388 unsigned long kvm_riscv_vcpu_num_regs(struct kvm_vcpu
*vcpu
);
389 int kvm_riscv_vcpu_copy_reg_indices(struct kvm_vcpu
*vcpu
,
390 u64 __user
*uindices
);
391 int kvm_riscv_vcpu_get_reg(struct kvm_vcpu
*vcpu
,
392 const struct kvm_one_reg
*reg
);
393 int kvm_riscv_vcpu_set_reg(struct kvm_vcpu
*vcpu
,
394 const struct kvm_one_reg
*reg
);
396 int kvm_riscv_vcpu_set_interrupt(struct kvm_vcpu
*vcpu
, unsigned int irq
);
397 int kvm_riscv_vcpu_unset_interrupt(struct kvm_vcpu
*vcpu
, unsigned int irq
);
398 void kvm_riscv_vcpu_flush_interrupts(struct kvm_vcpu
*vcpu
);
399 void kvm_riscv_vcpu_sync_interrupts(struct kvm_vcpu
*vcpu
);
400 bool kvm_riscv_vcpu_has_interrupts(struct kvm_vcpu
*vcpu
, u64 mask
);
401 void __kvm_riscv_vcpu_power_off(struct kvm_vcpu
*vcpu
);
402 void kvm_riscv_vcpu_power_off(struct kvm_vcpu
*vcpu
);
403 void __kvm_riscv_vcpu_power_on(struct kvm_vcpu
*vcpu
);
404 void kvm_riscv_vcpu_power_on(struct kvm_vcpu
*vcpu
);
405 bool kvm_riscv_vcpu_stopped(struct kvm_vcpu
*vcpu
);
407 void kvm_riscv_vcpu_sbi_sta_reset(struct kvm_vcpu
*vcpu
);
408 void kvm_riscv_vcpu_record_steal_time(struct kvm_vcpu
*vcpu
);
410 #endif /* __RISCV_KVM_HOST_H__ */