accel/qaic: Add AIC200 support
[drm/drm-misc.git] / arch / riscv / kvm / tlb.c
blob2f91ea5f8493253ea6eb68a3e75047c20830e57c
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * Copyright (c) 2022 Ventana Micro Systems Inc.
4 */
6 #include <linux/bitmap.h>
7 #include <linux/cpumask.h>
8 #include <linux/errno.h>
9 #include <linux/err.h>
10 #include <linux/module.h>
11 #include <linux/smp.h>
12 #include <linux/kvm_host.h>
13 #include <asm/cacheflush.h>
14 #include <asm/csr.h>
15 #include <asm/cpufeature.h>
16 #include <asm/insn-def.h>
17 #include <asm/kvm_nacl.h>
19 #define has_svinval() riscv_has_extension_unlikely(RISCV_ISA_EXT_SVINVAL)
21 void kvm_riscv_local_hfence_gvma_vmid_gpa(unsigned long vmid,
22 gpa_t gpa, gpa_t gpsz,
23 unsigned long order)
25 gpa_t pos;
27 if (PTRS_PER_PTE < (gpsz >> order)) {
28 kvm_riscv_local_hfence_gvma_vmid_all(vmid);
29 return;
32 if (has_svinval()) {
33 asm volatile (SFENCE_W_INVAL() ::: "memory");
34 for (pos = gpa; pos < (gpa + gpsz); pos += BIT(order))
35 asm volatile (HINVAL_GVMA(%0, %1)
36 : : "r" (pos >> 2), "r" (vmid) : "memory");
37 asm volatile (SFENCE_INVAL_IR() ::: "memory");
38 } else {
39 for (pos = gpa; pos < (gpa + gpsz); pos += BIT(order))
40 asm volatile (HFENCE_GVMA(%0, %1)
41 : : "r" (pos >> 2), "r" (vmid) : "memory");
45 void kvm_riscv_local_hfence_gvma_vmid_all(unsigned long vmid)
47 asm volatile(HFENCE_GVMA(zero, %0) : : "r" (vmid) : "memory");
50 void kvm_riscv_local_hfence_gvma_gpa(gpa_t gpa, gpa_t gpsz,
51 unsigned long order)
53 gpa_t pos;
55 if (PTRS_PER_PTE < (gpsz >> order)) {
56 kvm_riscv_local_hfence_gvma_all();
57 return;
60 if (has_svinval()) {
61 asm volatile (SFENCE_W_INVAL() ::: "memory");
62 for (pos = gpa; pos < (gpa + gpsz); pos += BIT(order))
63 asm volatile(HINVAL_GVMA(%0, zero)
64 : : "r" (pos >> 2) : "memory");
65 asm volatile (SFENCE_INVAL_IR() ::: "memory");
66 } else {
67 for (pos = gpa; pos < (gpa + gpsz); pos += BIT(order))
68 asm volatile(HFENCE_GVMA(%0, zero)
69 : : "r" (pos >> 2) : "memory");
73 void kvm_riscv_local_hfence_gvma_all(void)
75 asm volatile(HFENCE_GVMA(zero, zero) : : : "memory");
78 void kvm_riscv_local_hfence_vvma_asid_gva(unsigned long vmid,
79 unsigned long asid,
80 unsigned long gva,
81 unsigned long gvsz,
82 unsigned long order)
84 unsigned long pos, hgatp;
86 if (PTRS_PER_PTE < (gvsz >> order)) {
87 kvm_riscv_local_hfence_vvma_asid_all(vmid, asid);
88 return;
91 hgatp = csr_swap(CSR_HGATP, vmid << HGATP_VMID_SHIFT);
93 if (has_svinval()) {
94 asm volatile (SFENCE_W_INVAL() ::: "memory");
95 for (pos = gva; pos < (gva + gvsz); pos += BIT(order))
96 asm volatile(HINVAL_VVMA(%0, %1)
97 : : "r" (pos), "r" (asid) : "memory");
98 asm volatile (SFENCE_INVAL_IR() ::: "memory");
99 } else {
100 for (pos = gva; pos < (gva + gvsz); pos += BIT(order))
101 asm volatile(HFENCE_VVMA(%0, %1)
102 : : "r" (pos), "r" (asid) : "memory");
105 csr_write(CSR_HGATP, hgatp);
108 void kvm_riscv_local_hfence_vvma_asid_all(unsigned long vmid,
109 unsigned long asid)
111 unsigned long hgatp;
113 hgatp = csr_swap(CSR_HGATP, vmid << HGATP_VMID_SHIFT);
115 asm volatile(HFENCE_VVMA(zero, %0) : : "r" (asid) : "memory");
117 csr_write(CSR_HGATP, hgatp);
120 void kvm_riscv_local_hfence_vvma_gva(unsigned long vmid,
121 unsigned long gva, unsigned long gvsz,
122 unsigned long order)
124 unsigned long pos, hgatp;
126 if (PTRS_PER_PTE < (gvsz >> order)) {
127 kvm_riscv_local_hfence_vvma_all(vmid);
128 return;
131 hgatp = csr_swap(CSR_HGATP, vmid << HGATP_VMID_SHIFT);
133 if (has_svinval()) {
134 asm volatile (SFENCE_W_INVAL() ::: "memory");
135 for (pos = gva; pos < (gva + gvsz); pos += BIT(order))
136 asm volatile(HINVAL_VVMA(%0, zero)
137 : : "r" (pos) : "memory");
138 asm volatile (SFENCE_INVAL_IR() ::: "memory");
139 } else {
140 for (pos = gva; pos < (gva + gvsz); pos += BIT(order))
141 asm volatile(HFENCE_VVMA(%0, zero)
142 : : "r" (pos) : "memory");
145 csr_write(CSR_HGATP, hgatp);
148 void kvm_riscv_local_hfence_vvma_all(unsigned long vmid)
150 unsigned long hgatp;
152 hgatp = csr_swap(CSR_HGATP, vmid << HGATP_VMID_SHIFT);
154 asm volatile(HFENCE_VVMA(zero, zero) : : : "memory");
156 csr_write(CSR_HGATP, hgatp);
159 void kvm_riscv_local_tlb_sanitize(struct kvm_vcpu *vcpu)
161 unsigned long vmid;
163 if (!kvm_riscv_gstage_vmid_bits() ||
164 vcpu->arch.last_exit_cpu == vcpu->cpu)
165 return;
168 * On RISC-V platforms with hardware VMID support, we share same
169 * VMID for all VCPUs of a particular Guest/VM. This means we might
170 * have stale G-stage TLB entries on the current Host CPU due to
171 * some other VCPU of the same Guest which ran previously on the
172 * current Host CPU.
174 * To cleanup stale TLB entries, we simply flush all G-stage TLB
175 * entries by VMID whenever underlying Host CPU changes for a VCPU.
178 vmid = READ_ONCE(vcpu->kvm->arch.vmid.vmid);
179 kvm_riscv_local_hfence_gvma_vmid_all(vmid);
182 void kvm_riscv_fence_i_process(struct kvm_vcpu *vcpu)
184 kvm_riscv_vcpu_pmu_incr_fw(vcpu, SBI_PMU_FW_FENCE_I_RCVD);
185 local_flush_icache_all();
188 void kvm_riscv_hfence_gvma_vmid_all_process(struct kvm_vcpu *vcpu)
190 struct kvm_vmid *v = &vcpu->kvm->arch.vmid;
191 unsigned long vmid = READ_ONCE(v->vmid);
193 if (kvm_riscv_nacl_available())
194 nacl_hfence_gvma_vmid_all(nacl_shmem(), vmid);
195 else
196 kvm_riscv_local_hfence_gvma_vmid_all(vmid);
199 void kvm_riscv_hfence_vvma_all_process(struct kvm_vcpu *vcpu)
201 struct kvm_vmid *v = &vcpu->kvm->arch.vmid;
202 unsigned long vmid = READ_ONCE(v->vmid);
204 if (kvm_riscv_nacl_available())
205 nacl_hfence_vvma_all(nacl_shmem(), vmid);
206 else
207 kvm_riscv_local_hfence_vvma_all(vmid);
210 static bool vcpu_hfence_dequeue(struct kvm_vcpu *vcpu,
211 struct kvm_riscv_hfence *out_data)
213 bool ret = false;
214 struct kvm_vcpu_arch *varch = &vcpu->arch;
216 spin_lock(&varch->hfence_lock);
218 if (varch->hfence_queue[varch->hfence_head].type) {
219 memcpy(out_data, &varch->hfence_queue[varch->hfence_head],
220 sizeof(*out_data));
221 varch->hfence_queue[varch->hfence_head].type = 0;
223 varch->hfence_head++;
224 if (varch->hfence_head == KVM_RISCV_VCPU_MAX_HFENCE)
225 varch->hfence_head = 0;
227 ret = true;
230 spin_unlock(&varch->hfence_lock);
232 return ret;
235 static bool vcpu_hfence_enqueue(struct kvm_vcpu *vcpu,
236 const struct kvm_riscv_hfence *data)
238 bool ret = false;
239 struct kvm_vcpu_arch *varch = &vcpu->arch;
241 spin_lock(&varch->hfence_lock);
243 if (!varch->hfence_queue[varch->hfence_tail].type) {
244 memcpy(&varch->hfence_queue[varch->hfence_tail],
245 data, sizeof(*data));
247 varch->hfence_tail++;
248 if (varch->hfence_tail == KVM_RISCV_VCPU_MAX_HFENCE)
249 varch->hfence_tail = 0;
251 ret = true;
254 spin_unlock(&varch->hfence_lock);
256 return ret;
259 void kvm_riscv_hfence_process(struct kvm_vcpu *vcpu)
261 unsigned long vmid;
262 struct kvm_riscv_hfence d = { 0 };
263 struct kvm_vmid *v = &vcpu->kvm->arch.vmid;
265 while (vcpu_hfence_dequeue(vcpu, &d)) {
266 switch (d.type) {
267 case KVM_RISCV_HFENCE_UNKNOWN:
268 break;
269 case KVM_RISCV_HFENCE_GVMA_VMID_GPA:
270 vmid = READ_ONCE(v->vmid);
271 if (kvm_riscv_nacl_available())
272 nacl_hfence_gvma_vmid(nacl_shmem(), vmid,
273 d.addr, d.size, d.order);
274 else
275 kvm_riscv_local_hfence_gvma_vmid_gpa(vmid, d.addr,
276 d.size, d.order);
277 break;
278 case KVM_RISCV_HFENCE_VVMA_ASID_GVA:
279 kvm_riscv_vcpu_pmu_incr_fw(vcpu, SBI_PMU_FW_HFENCE_VVMA_ASID_RCVD);
280 vmid = READ_ONCE(v->vmid);
281 if (kvm_riscv_nacl_available())
282 nacl_hfence_vvma_asid(nacl_shmem(), vmid, d.asid,
283 d.addr, d.size, d.order);
284 else
285 kvm_riscv_local_hfence_vvma_asid_gva(vmid, d.asid, d.addr,
286 d.size, d.order);
287 break;
288 case KVM_RISCV_HFENCE_VVMA_ASID_ALL:
289 kvm_riscv_vcpu_pmu_incr_fw(vcpu, SBI_PMU_FW_HFENCE_VVMA_ASID_RCVD);
290 vmid = READ_ONCE(v->vmid);
291 if (kvm_riscv_nacl_available())
292 nacl_hfence_vvma_asid_all(nacl_shmem(), vmid, d.asid);
293 else
294 kvm_riscv_local_hfence_vvma_asid_all(vmid, d.asid);
295 break;
296 case KVM_RISCV_HFENCE_VVMA_GVA:
297 kvm_riscv_vcpu_pmu_incr_fw(vcpu, SBI_PMU_FW_HFENCE_VVMA_RCVD);
298 vmid = READ_ONCE(v->vmid);
299 if (kvm_riscv_nacl_available())
300 nacl_hfence_vvma(nacl_shmem(), vmid,
301 d.addr, d.size, d.order);
302 else
303 kvm_riscv_local_hfence_vvma_gva(vmid, d.addr,
304 d.size, d.order);
305 break;
306 default:
307 break;
312 static void make_xfence_request(struct kvm *kvm,
313 unsigned long hbase, unsigned long hmask,
314 unsigned int req, unsigned int fallback_req,
315 const struct kvm_riscv_hfence *data)
317 unsigned long i;
318 struct kvm_vcpu *vcpu;
319 unsigned int actual_req = req;
320 DECLARE_BITMAP(vcpu_mask, KVM_MAX_VCPUS);
322 bitmap_zero(vcpu_mask, KVM_MAX_VCPUS);
323 kvm_for_each_vcpu(i, vcpu, kvm) {
324 if (hbase != -1UL) {
325 if (vcpu->vcpu_id < hbase)
326 continue;
327 if (!(hmask & (1UL << (vcpu->vcpu_id - hbase))))
328 continue;
331 bitmap_set(vcpu_mask, i, 1);
333 if (!data || !data->type)
334 continue;
337 * Enqueue hfence data to VCPU hfence queue. If we don't
338 * have space in the VCPU hfence queue then fallback to
339 * a more conservative hfence request.
341 if (!vcpu_hfence_enqueue(vcpu, data))
342 actual_req = fallback_req;
345 kvm_make_vcpus_request_mask(kvm, actual_req, vcpu_mask);
348 void kvm_riscv_fence_i(struct kvm *kvm,
349 unsigned long hbase, unsigned long hmask)
351 make_xfence_request(kvm, hbase, hmask, KVM_REQ_FENCE_I,
352 KVM_REQ_FENCE_I, NULL);
355 void kvm_riscv_hfence_gvma_vmid_gpa(struct kvm *kvm,
356 unsigned long hbase, unsigned long hmask,
357 gpa_t gpa, gpa_t gpsz,
358 unsigned long order)
360 struct kvm_riscv_hfence data;
362 data.type = KVM_RISCV_HFENCE_GVMA_VMID_GPA;
363 data.asid = 0;
364 data.addr = gpa;
365 data.size = gpsz;
366 data.order = order;
367 make_xfence_request(kvm, hbase, hmask, KVM_REQ_HFENCE,
368 KVM_REQ_HFENCE_GVMA_VMID_ALL, &data);
371 void kvm_riscv_hfence_gvma_vmid_all(struct kvm *kvm,
372 unsigned long hbase, unsigned long hmask)
374 make_xfence_request(kvm, hbase, hmask, KVM_REQ_HFENCE_GVMA_VMID_ALL,
375 KVM_REQ_HFENCE_GVMA_VMID_ALL, NULL);
378 void kvm_riscv_hfence_vvma_asid_gva(struct kvm *kvm,
379 unsigned long hbase, unsigned long hmask,
380 unsigned long gva, unsigned long gvsz,
381 unsigned long order, unsigned long asid)
383 struct kvm_riscv_hfence data;
385 data.type = KVM_RISCV_HFENCE_VVMA_ASID_GVA;
386 data.asid = asid;
387 data.addr = gva;
388 data.size = gvsz;
389 data.order = order;
390 make_xfence_request(kvm, hbase, hmask, KVM_REQ_HFENCE,
391 KVM_REQ_HFENCE_VVMA_ALL, &data);
394 void kvm_riscv_hfence_vvma_asid_all(struct kvm *kvm,
395 unsigned long hbase, unsigned long hmask,
396 unsigned long asid)
398 struct kvm_riscv_hfence data;
400 data.type = KVM_RISCV_HFENCE_VVMA_ASID_ALL;
401 data.asid = asid;
402 data.addr = data.size = data.order = 0;
403 make_xfence_request(kvm, hbase, hmask, KVM_REQ_HFENCE,
404 KVM_REQ_HFENCE_VVMA_ALL, &data);
407 void kvm_riscv_hfence_vvma_gva(struct kvm *kvm,
408 unsigned long hbase, unsigned long hmask,
409 unsigned long gva, unsigned long gvsz,
410 unsigned long order)
412 struct kvm_riscv_hfence data;
414 data.type = KVM_RISCV_HFENCE_VVMA_GVA;
415 data.asid = 0;
416 data.addr = gva;
417 data.size = gvsz;
418 data.order = order;
419 make_xfence_request(kvm, hbase, hmask, KVM_REQ_HFENCE,
420 KVM_REQ_HFENCE_VVMA_ALL, &data);
423 void kvm_riscv_hfence_vvma_all(struct kvm *kvm,
424 unsigned long hbase, unsigned long hmask)
426 make_xfence_request(kvm, hbase, hmask, KVM_REQ_HFENCE_VVMA_ALL,
427 KVM_REQ_HFENCE_VVMA_ALL, NULL);