1 /* SPDX-License-Identifier: GPL-2.0-only */
3 * Copyright (C) 2021 Western Digital Corporation or its affiliates.
4 * Copyright (C) 2022 Ventana Micro Systems Inc.
7 * Anup Patel <apatel@ventanamicro.com>
10 #ifndef __KVM_RISCV_AIA_H
11 #define __KVM_RISCV_AIA_H
13 #include <linux/jump_label.h>
14 #include <linux/kvm_types.h>
18 /* In-kernel irqchip created */
21 /* In-kernel irqchip initialized */
24 /* Virtualization mode (Emulation, HW Accelerated, or Auto) */
30 /* Number of wired IRQs */
33 /* Number of group bits in IMSIC address */
36 /* Position of group bits in IMSIC address */
39 /* Number of hart bits in IMSIC address */
42 /* Number of guest bits in IMSIC address */
45 /* Guest physical address of APLIC */
48 /* Internal state of APLIC */
52 struct kvm_vcpu_aia_csr
{
53 unsigned long vsiselect
;
54 unsigned long hviprio1
;
55 unsigned long hviprio2
;
58 unsigned long hviprio1h
;
59 unsigned long hviprio2h
;
63 /* CPU AIA CSR context of Guest VCPU */
64 struct kvm_vcpu_aia_csr guest_csr
;
66 /* CPU AIA CSR context upon Guest VCPU reset */
67 struct kvm_vcpu_aia_csr guest_reset_csr
;
69 /* Guest physical address of IMSIC for this VCPU */
72 /* HART index of IMSIC extacted from guest physical address */
75 /* Internal state of IMSIC for this VCPU */
79 #define KVM_RISCV_AIA_UNDEF_ADDR (-1)
81 #define kvm_riscv_aia_initialized(k) ((k)->arch.aia.initialized)
83 #define irqchip_in_kernel(k) ((k)->arch.aia.in_kernel)
85 extern unsigned int kvm_riscv_aia_nr_hgei
;
86 extern unsigned int kvm_riscv_aia_max_ids
;
87 DECLARE_STATIC_KEY_FALSE(kvm_riscv_aia_available
);
88 #define kvm_riscv_aia_available() \
89 static_branch_unlikely(&kvm_riscv_aia_available)
91 extern struct kvm_device_ops kvm_riscv_aia_device_ops
;
93 void kvm_riscv_vcpu_aia_imsic_release(struct kvm_vcpu
*vcpu
);
94 int kvm_riscv_vcpu_aia_imsic_update(struct kvm_vcpu
*vcpu
);
96 #define KVM_RISCV_AIA_IMSIC_TOPEI (ISELECT_MASK + 1)
97 int kvm_riscv_vcpu_aia_imsic_rmw(struct kvm_vcpu
*vcpu
, unsigned long isel
,
98 unsigned long *val
, unsigned long new_val
,
99 unsigned long wr_mask
);
100 int kvm_riscv_aia_imsic_rw_attr(struct kvm
*kvm
, unsigned long type
,
101 bool write
, unsigned long *val
);
102 int kvm_riscv_aia_imsic_has_attr(struct kvm
*kvm
, unsigned long type
);
103 void kvm_riscv_vcpu_aia_imsic_reset(struct kvm_vcpu
*vcpu
);
104 int kvm_riscv_vcpu_aia_imsic_inject(struct kvm_vcpu
*vcpu
,
105 u32 guest_index
, u32 offset
, u32 iid
);
106 int kvm_riscv_vcpu_aia_imsic_init(struct kvm_vcpu
*vcpu
);
107 void kvm_riscv_vcpu_aia_imsic_cleanup(struct kvm_vcpu
*vcpu
);
109 int kvm_riscv_aia_aplic_set_attr(struct kvm
*kvm
, unsigned long type
, u32 v
);
110 int kvm_riscv_aia_aplic_get_attr(struct kvm
*kvm
, unsigned long type
, u32
*v
);
111 int kvm_riscv_aia_aplic_has_attr(struct kvm
*kvm
, unsigned long type
);
112 int kvm_riscv_aia_aplic_inject(struct kvm
*kvm
, u32 source
, bool level
);
113 int kvm_riscv_aia_aplic_init(struct kvm
*kvm
);
114 void kvm_riscv_aia_aplic_cleanup(struct kvm
*kvm
);
117 void kvm_riscv_vcpu_aia_flush_interrupts(struct kvm_vcpu
*vcpu
);
118 void kvm_riscv_vcpu_aia_sync_interrupts(struct kvm_vcpu
*vcpu
);
120 static inline void kvm_riscv_vcpu_aia_flush_interrupts(struct kvm_vcpu
*vcpu
)
123 static inline void kvm_riscv_vcpu_aia_sync_interrupts(struct kvm_vcpu
*vcpu
)
127 bool kvm_riscv_vcpu_aia_has_interrupts(struct kvm_vcpu
*vcpu
, u64 mask
);
129 void kvm_riscv_vcpu_aia_update_hvip(struct kvm_vcpu
*vcpu
);
130 void kvm_riscv_vcpu_aia_load(struct kvm_vcpu
*vcpu
, int cpu
);
131 void kvm_riscv_vcpu_aia_put(struct kvm_vcpu
*vcpu
);
132 int kvm_riscv_vcpu_aia_get_csr(struct kvm_vcpu
*vcpu
,
133 unsigned long reg_num
,
134 unsigned long *out_val
);
135 int kvm_riscv_vcpu_aia_set_csr(struct kvm_vcpu
*vcpu
,
136 unsigned long reg_num
,
139 int kvm_riscv_vcpu_aia_rmw_topei(struct kvm_vcpu
*vcpu
,
140 unsigned int csr_num
,
142 unsigned long new_val
,
143 unsigned long wr_mask
);
144 int kvm_riscv_vcpu_aia_rmw_ireg(struct kvm_vcpu
*vcpu
, unsigned int csr_num
,
145 unsigned long *val
, unsigned long new_val
,
146 unsigned long wr_mask
);
147 #define KVM_RISCV_VCPU_AIA_CSR_FUNCS \
148 { .base = CSR_SIREG, .count = 1, .func = kvm_riscv_vcpu_aia_rmw_ireg }, \
149 { .base = CSR_STOPEI, .count = 1, .func = kvm_riscv_vcpu_aia_rmw_topei },
151 int kvm_riscv_vcpu_aia_update(struct kvm_vcpu
*vcpu
);
152 void kvm_riscv_vcpu_aia_reset(struct kvm_vcpu
*vcpu
);
153 int kvm_riscv_vcpu_aia_init(struct kvm_vcpu
*vcpu
);
154 void kvm_riscv_vcpu_aia_deinit(struct kvm_vcpu
*vcpu
);
156 int kvm_riscv_aia_inject_msi_by_id(struct kvm
*kvm
, u32 hart_index
,
157 u32 guest_index
, u32 iid
);
158 int kvm_riscv_aia_inject_msi(struct kvm
*kvm
, struct kvm_msi
*msi
);
159 int kvm_riscv_aia_inject_irq(struct kvm
*kvm
, unsigned int irq
, bool level
);
161 void kvm_riscv_aia_init_vm(struct kvm
*kvm
);
162 void kvm_riscv_aia_destroy_vm(struct kvm
*kvm
);
164 int kvm_riscv_aia_alloc_hgei(int cpu
, struct kvm_vcpu
*owner
,
165 void __iomem
**hgei_va
, phys_addr_t
*hgei_pa
);
166 void kvm_riscv_aia_free_hgei(int cpu
, int hgei
);
167 void kvm_riscv_aia_wakeon_hgei(struct kvm_vcpu
*owner
, bool enable
);
169 void kvm_riscv_aia_enable(void);
170 void kvm_riscv_aia_disable(void);
171 int kvm_riscv_aia_init(void);
172 void kvm_riscv_aia_exit(void);