1 // SPDX-License-Identifier: GPL-2.0
3 * Copyright (C) 2020-2023 Loongson Technology Corporation Limited
7 #include <linux/module.h>
8 #include <linux/kvm_host.h>
9 #include <asm/cacheflush.h>
10 #include <asm/cpufeature.h>
11 #include <asm/kvm_csr.h>
12 #include <asm/kvm_eiointc.h>
13 #include <asm/kvm_pch_pic.h>
16 unsigned long vpid_mask
;
17 struct kvm_world_switch
*kvm_loongarch_ops
;
18 static int gcsr_flag
[CSR_MAX_NUMS
];
19 static struct kvm_context __percpu
*vmcs
;
21 int get_gcsr_flag(int csr
)
23 if (csr
< CSR_MAX_NUMS
)
24 return gcsr_flag
[csr
];
29 static inline void set_gcsr_sw_flag(int csr
)
31 if (csr
< CSR_MAX_NUMS
)
32 gcsr_flag
[csr
] |= SW_GCSR
;
35 static inline void set_gcsr_hw_flag(int csr
)
37 if (csr
< CSR_MAX_NUMS
)
38 gcsr_flag
[csr
] |= HW_GCSR
;
42 * The default value of gcsr_flag[CSR] is 0, and we use this
43 * function to set the flag to 1 (SW_GCSR) or 2 (HW_GCSR) if the
44 * gcsr is software or hardware. It will be used by get/set_gcsr,
45 * if gcsr_flag is HW we should use gcsrrd/gcsrwr to access it,
46 * else use software csr to emulate it.
48 static void kvm_init_gcsr_flag(void)
50 set_gcsr_hw_flag(LOONGARCH_CSR_CRMD
);
51 set_gcsr_hw_flag(LOONGARCH_CSR_PRMD
);
52 set_gcsr_hw_flag(LOONGARCH_CSR_EUEN
);
53 set_gcsr_hw_flag(LOONGARCH_CSR_MISC
);
54 set_gcsr_hw_flag(LOONGARCH_CSR_ECFG
);
55 set_gcsr_hw_flag(LOONGARCH_CSR_ESTAT
);
56 set_gcsr_hw_flag(LOONGARCH_CSR_ERA
);
57 set_gcsr_hw_flag(LOONGARCH_CSR_BADV
);
58 set_gcsr_hw_flag(LOONGARCH_CSR_BADI
);
59 set_gcsr_hw_flag(LOONGARCH_CSR_EENTRY
);
60 set_gcsr_hw_flag(LOONGARCH_CSR_TLBIDX
);
61 set_gcsr_hw_flag(LOONGARCH_CSR_TLBEHI
);
62 set_gcsr_hw_flag(LOONGARCH_CSR_TLBELO0
);
63 set_gcsr_hw_flag(LOONGARCH_CSR_TLBELO1
);
64 set_gcsr_hw_flag(LOONGARCH_CSR_ASID
);
65 set_gcsr_hw_flag(LOONGARCH_CSR_PGDL
);
66 set_gcsr_hw_flag(LOONGARCH_CSR_PGDH
);
67 set_gcsr_hw_flag(LOONGARCH_CSR_PGD
);
68 set_gcsr_hw_flag(LOONGARCH_CSR_PWCTL0
);
69 set_gcsr_hw_flag(LOONGARCH_CSR_PWCTL1
);
70 set_gcsr_hw_flag(LOONGARCH_CSR_STLBPGSIZE
);
71 set_gcsr_hw_flag(LOONGARCH_CSR_RVACFG
);
72 set_gcsr_hw_flag(LOONGARCH_CSR_CPUID
);
73 set_gcsr_hw_flag(LOONGARCH_CSR_PRCFG1
);
74 set_gcsr_hw_flag(LOONGARCH_CSR_PRCFG2
);
75 set_gcsr_hw_flag(LOONGARCH_CSR_PRCFG3
);
76 set_gcsr_hw_flag(LOONGARCH_CSR_KS0
);
77 set_gcsr_hw_flag(LOONGARCH_CSR_KS1
);
78 set_gcsr_hw_flag(LOONGARCH_CSR_KS2
);
79 set_gcsr_hw_flag(LOONGARCH_CSR_KS3
);
80 set_gcsr_hw_flag(LOONGARCH_CSR_KS4
);
81 set_gcsr_hw_flag(LOONGARCH_CSR_KS5
);
82 set_gcsr_hw_flag(LOONGARCH_CSR_KS6
);
83 set_gcsr_hw_flag(LOONGARCH_CSR_KS7
);
84 set_gcsr_hw_flag(LOONGARCH_CSR_TMID
);
85 set_gcsr_hw_flag(LOONGARCH_CSR_TCFG
);
86 set_gcsr_hw_flag(LOONGARCH_CSR_TVAL
);
87 set_gcsr_hw_flag(LOONGARCH_CSR_TINTCLR
);
88 set_gcsr_hw_flag(LOONGARCH_CSR_CNTC
);
89 set_gcsr_hw_flag(LOONGARCH_CSR_LLBCTL
);
90 set_gcsr_hw_flag(LOONGARCH_CSR_TLBRENTRY
);
91 set_gcsr_hw_flag(LOONGARCH_CSR_TLBRBADV
);
92 set_gcsr_hw_flag(LOONGARCH_CSR_TLBRERA
);
93 set_gcsr_hw_flag(LOONGARCH_CSR_TLBRSAVE
);
94 set_gcsr_hw_flag(LOONGARCH_CSR_TLBRELO0
);
95 set_gcsr_hw_flag(LOONGARCH_CSR_TLBRELO1
);
96 set_gcsr_hw_flag(LOONGARCH_CSR_TLBREHI
);
97 set_gcsr_hw_flag(LOONGARCH_CSR_TLBRPRMD
);
98 set_gcsr_hw_flag(LOONGARCH_CSR_DMWIN0
);
99 set_gcsr_hw_flag(LOONGARCH_CSR_DMWIN1
);
100 set_gcsr_hw_flag(LOONGARCH_CSR_DMWIN2
);
101 set_gcsr_hw_flag(LOONGARCH_CSR_DMWIN3
);
103 set_gcsr_sw_flag(LOONGARCH_CSR_IMPCTL1
);
104 set_gcsr_sw_flag(LOONGARCH_CSR_IMPCTL2
);
105 set_gcsr_sw_flag(LOONGARCH_CSR_MERRCTL
);
106 set_gcsr_sw_flag(LOONGARCH_CSR_MERRINFO1
);
107 set_gcsr_sw_flag(LOONGARCH_CSR_MERRINFO2
);
108 set_gcsr_sw_flag(LOONGARCH_CSR_MERRENTRY
);
109 set_gcsr_sw_flag(LOONGARCH_CSR_MERRERA
);
110 set_gcsr_sw_flag(LOONGARCH_CSR_MERRSAVE
);
111 set_gcsr_sw_flag(LOONGARCH_CSR_CTAG
);
112 set_gcsr_sw_flag(LOONGARCH_CSR_DEBUG
);
113 set_gcsr_sw_flag(LOONGARCH_CSR_DERA
);
114 set_gcsr_sw_flag(LOONGARCH_CSR_DESAVE
);
116 set_gcsr_sw_flag(LOONGARCH_CSR_FWPC
);
117 set_gcsr_sw_flag(LOONGARCH_CSR_FWPS
);
118 set_gcsr_sw_flag(LOONGARCH_CSR_MWPC
);
119 set_gcsr_sw_flag(LOONGARCH_CSR_MWPS
);
121 set_gcsr_sw_flag(LOONGARCH_CSR_DB0ADDR
);
122 set_gcsr_sw_flag(LOONGARCH_CSR_DB0MASK
);
123 set_gcsr_sw_flag(LOONGARCH_CSR_DB0CTRL
);
124 set_gcsr_sw_flag(LOONGARCH_CSR_DB0ASID
);
125 set_gcsr_sw_flag(LOONGARCH_CSR_DB1ADDR
);
126 set_gcsr_sw_flag(LOONGARCH_CSR_DB1MASK
);
127 set_gcsr_sw_flag(LOONGARCH_CSR_DB1CTRL
);
128 set_gcsr_sw_flag(LOONGARCH_CSR_DB1ASID
);
129 set_gcsr_sw_flag(LOONGARCH_CSR_DB2ADDR
);
130 set_gcsr_sw_flag(LOONGARCH_CSR_DB2MASK
);
131 set_gcsr_sw_flag(LOONGARCH_CSR_DB2CTRL
);
132 set_gcsr_sw_flag(LOONGARCH_CSR_DB2ASID
);
133 set_gcsr_sw_flag(LOONGARCH_CSR_DB3ADDR
);
134 set_gcsr_sw_flag(LOONGARCH_CSR_DB3MASK
);
135 set_gcsr_sw_flag(LOONGARCH_CSR_DB3CTRL
);
136 set_gcsr_sw_flag(LOONGARCH_CSR_DB3ASID
);
137 set_gcsr_sw_flag(LOONGARCH_CSR_DB4ADDR
);
138 set_gcsr_sw_flag(LOONGARCH_CSR_DB4MASK
);
139 set_gcsr_sw_flag(LOONGARCH_CSR_DB4CTRL
);
140 set_gcsr_sw_flag(LOONGARCH_CSR_DB4ASID
);
141 set_gcsr_sw_flag(LOONGARCH_CSR_DB5ADDR
);
142 set_gcsr_sw_flag(LOONGARCH_CSR_DB5MASK
);
143 set_gcsr_sw_flag(LOONGARCH_CSR_DB5CTRL
);
144 set_gcsr_sw_flag(LOONGARCH_CSR_DB5ASID
);
145 set_gcsr_sw_flag(LOONGARCH_CSR_DB6ADDR
);
146 set_gcsr_sw_flag(LOONGARCH_CSR_DB6MASK
);
147 set_gcsr_sw_flag(LOONGARCH_CSR_DB6CTRL
);
148 set_gcsr_sw_flag(LOONGARCH_CSR_DB6ASID
);
149 set_gcsr_sw_flag(LOONGARCH_CSR_DB7ADDR
);
150 set_gcsr_sw_flag(LOONGARCH_CSR_DB7MASK
);
151 set_gcsr_sw_flag(LOONGARCH_CSR_DB7CTRL
);
152 set_gcsr_sw_flag(LOONGARCH_CSR_DB7ASID
);
154 set_gcsr_sw_flag(LOONGARCH_CSR_IB0ADDR
);
155 set_gcsr_sw_flag(LOONGARCH_CSR_IB0MASK
);
156 set_gcsr_sw_flag(LOONGARCH_CSR_IB0CTRL
);
157 set_gcsr_sw_flag(LOONGARCH_CSR_IB0ASID
);
158 set_gcsr_sw_flag(LOONGARCH_CSR_IB1ADDR
);
159 set_gcsr_sw_flag(LOONGARCH_CSR_IB1MASK
);
160 set_gcsr_sw_flag(LOONGARCH_CSR_IB1CTRL
);
161 set_gcsr_sw_flag(LOONGARCH_CSR_IB1ASID
);
162 set_gcsr_sw_flag(LOONGARCH_CSR_IB2ADDR
);
163 set_gcsr_sw_flag(LOONGARCH_CSR_IB2MASK
);
164 set_gcsr_sw_flag(LOONGARCH_CSR_IB2CTRL
);
165 set_gcsr_sw_flag(LOONGARCH_CSR_IB2ASID
);
166 set_gcsr_sw_flag(LOONGARCH_CSR_IB3ADDR
);
167 set_gcsr_sw_flag(LOONGARCH_CSR_IB3MASK
);
168 set_gcsr_sw_flag(LOONGARCH_CSR_IB3CTRL
);
169 set_gcsr_sw_flag(LOONGARCH_CSR_IB3ASID
);
170 set_gcsr_sw_flag(LOONGARCH_CSR_IB4ADDR
);
171 set_gcsr_sw_flag(LOONGARCH_CSR_IB4MASK
);
172 set_gcsr_sw_flag(LOONGARCH_CSR_IB4CTRL
);
173 set_gcsr_sw_flag(LOONGARCH_CSR_IB4ASID
);
174 set_gcsr_sw_flag(LOONGARCH_CSR_IB5ADDR
);
175 set_gcsr_sw_flag(LOONGARCH_CSR_IB5MASK
);
176 set_gcsr_sw_flag(LOONGARCH_CSR_IB5CTRL
);
177 set_gcsr_sw_flag(LOONGARCH_CSR_IB5ASID
);
178 set_gcsr_sw_flag(LOONGARCH_CSR_IB6ADDR
);
179 set_gcsr_sw_flag(LOONGARCH_CSR_IB6MASK
);
180 set_gcsr_sw_flag(LOONGARCH_CSR_IB6CTRL
);
181 set_gcsr_sw_flag(LOONGARCH_CSR_IB6ASID
);
182 set_gcsr_sw_flag(LOONGARCH_CSR_IB7ADDR
);
183 set_gcsr_sw_flag(LOONGARCH_CSR_IB7MASK
);
184 set_gcsr_sw_flag(LOONGARCH_CSR_IB7CTRL
);
185 set_gcsr_sw_flag(LOONGARCH_CSR_IB7ASID
);
187 set_gcsr_sw_flag(LOONGARCH_CSR_PERFCTRL0
);
188 set_gcsr_sw_flag(LOONGARCH_CSR_PERFCNTR0
);
189 set_gcsr_sw_flag(LOONGARCH_CSR_PERFCTRL1
);
190 set_gcsr_sw_flag(LOONGARCH_CSR_PERFCNTR1
);
191 set_gcsr_sw_flag(LOONGARCH_CSR_PERFCTRL2
);
192 set_gcsr_sw_flag(LOONGARCH_CSR_PERFCNTR2
);
193 set_gcsr_sw_flag(LOONGARCH_CSR_PERFCTRL3
);
194 set_gcsr_sw_flag(LOONGARCH_CSR_PERFCNTR3
);
197 static void kvm_update_vpid(struct kvm_vcpu
*vcpu
, int cpu
)
200 struct kvm_context
*context
;
202 context
= per_cpu_ptr(vcpu
->kvm
->arch
.vmcs
, cpu
);
203 vpid
= context
->vpid_cache
+ 1;
204 if (!(vpid
& vpid_mask
)) {
205 /* finish round of vpid loop */
207 vpid
= vpid_mask
+ 1;
209 ++vpid
; /* vpid 0 reserved for root */
211 /* start new vpid cycle */
215 context
->vpid_cache
= vpid
;
216 vcpu
->arch
.vpid
= vpid
;
219 void kvm_check_vpid(struct kvm_vcpu
*vcpu
)
223 unsigned long ver
, old
, vpid
;
224 struct kvm_context
*context
;
226 cpu
= smp_processor_id();
228 * Are we entering guest context on a different CPU to last time?
229 * If so, the vCPU's guest TLB state on this CPU may be stale.
231 context
= per_cpu_ptr(vcpu
->kvm
->arch
.vmcs
, cpu
);
232 migrated
= (vcpu
->cpu
!= cpu
);
235 * Check if our vpid is of an older version
237 * We also discard the stored vpid if we've executed on
238 * another CPU, as the guest mappings may have changed without
239 * hypervisor knowledge.
241 ver
= vcpu
->arch
.vpid
& ~vpid_mask
;
242 old
= context
->vpid_cache
& ~vpid_mask
;
243 if (migrated
|| (ver
!= old
)) {
244 kvm_update_vpid(vcpu
, cpu
);
245 trace_kvm_vpid_change(vcpu
, vcpu
->arch
.vpid
);
247 kvm_clear_request(KVM_REQ_TLB_FLUSH_GPA
, vcpu
);
250 * LLBCTL is a separated guest CSR register from host, a general
251 * exception ERET instruction clears the host LLBCTL register in
252 * host mode, and clears the guest LLBCTL register in guest mode.
253 * ERET in tlb refill exception does not clear LLBCTL register.
255 * When secondary mmu mapping is changed, guest OS does not know
256 * even if the content is changed after mapping is changed.
258 * Here clear WCLLB of the guest LLBCTL register when mapping is
259 * changed. Otherwise, if mmu mapping is changed while guest is
260 * executing LL/SC pair, LL loads with the old address and set
261 * the LLBCTL flag, SC checks the LLBCTL flag and will store the
262 * new address successfully since LLBCTL_WCLLB is on, even if
263 * memory with new address is changed on other VCPUs.
265 set_gcsr_llbctl(CSR_LLBCTL_WCLLB
);
268 /* Restore GSTAT(0x50).vpid */
269 vpid
= (vcpu
->arch
.vpid
& vpid_mask
) << CSR_GSTAT_GID_SHIFT
;
270 change_csr_gstat(vpid_mask
<< CSR_GSTAT_GID_SHIFT
, vpid
);
273 void kvm_init_vmcs(struct kvm
*kvm
)
275 kvm
->arch
.vmcs
= vmcs
;
278 long kvm_arch_dev_ioctl(struct file
*filp
,
279 unsigned int ioctl
, unsigned long arg
)
284 int kvm_arch_enable_virtualization_cpu(void)
286 unsigned long env
, gcfg
= 0;
288 env
= read_csr_gcfg();
290 /* First init gcfg, gstat, gintc, gtlbc. All guest use the same config */
294 clear_csr_gtlbc(CSR_GTLBC_USETGID
| CSR_GTLBC_TOTI
);
297 * Enable virtualization features granting guest direct control of
299 * GCI=2: Trap on init or unimplement cache instruction.
300 * TORU=0: Trap on Root Unimplement.
301 * CACTRL=1: Root control cache.
302 * TOP=0: Trap on Previlege.
303 * TOE=0: Trap on Exception.
304 * TIT=0: Trap on Timer.
306 if (env
& CSR_GCFG_GCIP_ALL
)
307 gcfg
|= CSR_GCFG_GCI_SECURE
;
308 if (env
& CSR_GCFG_MATC_ROOT
)
309 gcfg
|= CSR_GCFG_MATC_ROOT
;
311 write_csr_gcfg(gcfg
);
315 /* Enable using TGID */
316 set_csr_gtlbc(CSR_GTLBC_USETGID
);
317 kvm_debug("GCFG:%lx GSTAT:%lx GINTC:%lx GTLBC:%lx",
318 read_csr_gcfg(), read_csr_gstat(), read_csr_gintc(), read_csr_gtlbc());
323 void kvm_arch_disable_virtualization_cpu(void)
328 clear_csr_gtlbc(CSR_GTLBC_USETGID
| CSR_GTLBC_TOTI
);
330 /* Flush any remaining guest TLB entries */
334 static int kvm_loongarch_env_init(void)
338 struct kvm_context
*context
;
340 vmcs
= alloc_percpu(struct kvm_context
);
342 pr_err("kvm: failed to allocate percpu kvm_context\n");
346 kvm_loongarch_ops
= kzalloc(sizeof(*kvm_loongarch_ops
), GFP_KERNEL
);
347 if (!kvm_loongarch_ops
) {
354 * PGD register is shared between root kernel and kvm hypervisor.
355 * So world switch entry should be in DMW area rather than TLB area
356 * to avoid page fault reenter.
358 * In future if hardware pagetable walking is supported, we won't
359 * need to copy world switch code to DMW area.
361 order
= get_order(kvm_exception_size
+ kvm_enter_guest_size
);
362 addr
= (void *)__get_free_pages(GFP_KERNEL
, order
);
366 kfree(kvm_loongarch_ops
);
367 kvm_loongarch_ops
= NULL
;
371 memcpy(addr
, kvm_exc_entry
, kvm_exception_size
);
372 memcpy(addr
+ kvm_exception_size
, kvm_enter_guest
, kvm_enter_guest_size
);
373 flush_icache_range((unsigned long)addr
, (unsigned long)addr
+ kvm_exception_size
+ kvm_enter_guest_size
);
374 kvm_loongarch_ops
->exc_entry
= addr
;
375 kvm_loongarch_ops
->enter_guest
= addr
+ kvm_exception_size
;
376 kvm_loongarch_ops
->page_order
= order
;
378 vpid_mask
= read_csr_gstat();
379 vpid_mask
= (vpid_mask
& CSR_GSTAT_GIDBIT
) >> CSR_GSTAT_GIDBIT_SHIFT
;
381 vpid_mask
= GENMASK(vpid_mask
- 1, 0);
383 for_each_possible_cpu(cpu
) {
384 context
= per_cpu_ptr(vmcs
, cpu
);
385 context
->vpid_cache
= vpid_mask
+ 1;
386 context
->last_vcpu
= NULL
;
389 kvm_init_gcsr_flag();
391 /* Register LoongArch IPI interrupt controller interface. */
392 ret
= kvm_loongarch_register_ipi_device();
396 /* Register LoongArch EIOINTC interrupt controller interface. */
397 ret
= kvm_loongarch_register_eiointc_device();
401 /* Register LoongArch PCH-PIC interrupt controller interface. */
402 ret
= kvm_loongarch_register_pch_pic_device();
407 static void kvm_loongarch_env_exit(void)
414 if (kvm_loongarch_ops
) {
415 if (kvm_loongarch_ops
->exc_entry
) {
416 addr
= (unsigned long)kvm_loongarch_ops
->exc_entry
;
417 free_pages(addr
, kvm_loongarch_ops
->page_order
);
419 kfree(kvm_loongarch_ops
);
423 static int kvm_loongarch_init(void)
428 kvm_info("Hardware virtualization not available\n");
431 r
= kvm_loongarch_env_init();
435 return kvm_init(sizeof(struct kvm_vcpu
), 0, THIS_MODULE
);
438 static void kvm_loongarch_exit(void)
441 kvm_loongarch_env_exit();
444 module_init(kvm_loongarch_init
);
445 module_exit(kvm_loongarch_exit
);
448 static const struct cpu_feature kvm_feature
[] = {
449 { .feature
= cpu_feature(LOONGARCH_LVZ
) },
452 MODULE_DEVICE_TABLE(cpu
, kvm_feature
);