1 // SPDX-License-Identifier: GPL-2.0
3 * Copyright (c) 2022 Ventana Micro Systems Inc.
6 #include <linux/bitmap.h>
7 #include <linux/cpumask.h>
8 #include <linux/errno.h>
10 #include <linux/module.h>
11 #include <linux/smp.h>
12 #include <linux/kvm_host.h>
13 #include <asm/cacheflush.h>
15 #include <asm/cpufeature.h>
16 #include <asm/insn-def.h>
17 #include <asm/kvm_nacl.h>
19 #define has_svinval() riscv_has_extension_unlikely(RISCV_ISA_EXT_SVINVAL)
21 void kvm_riscv_local_hfence_gvma_vmid_gpa(unsigned long vmid
,
22 gpa_t gpa
, gpa_t gpsz
,
27 if (PTRS_PER_PTE
< (gpsz
>> order
)) {
28 kvm_riscv_local_hfence_gvma_vmid_all(vmid
);
33 asm volatile (SFENCE_W_INVAL() ::: "memory");
34 for (pos
= gpa
; pos
< (gpa
+ gpsz
); pos
+= BIT(order
))
35 asm volatile (HINVAL_GVMA(%0, %1)
36 : : "r" (pos
>> 2), "r" (vmid
) : "memory");
37 asm volatile (SFENCE_INVAL_IR() ::: "memory");
39 for (pos
= gpa
; pos
< (gpa
+ gpsz
); pos
+= BIT(order
))
40 asm volatile (HFENCE_GVMA(%0, %1)
41 : : "r" (pos
>> 2), "r" (vmid
) : "memory");
45 void kvm_riscv_local_hfence_gvma_vmid_all(unsigned long vmid
)
47 asm volatile(HFENCE_GVMA(zero
, %0) : : "r" (vmid
) : "memory");
50 void kvm_riscv_local_hfence_gvma_gpa(gpa_t gpa
, gpa_t gpsz
,
55 if (PTRS_PER_PTE
< (gpsz
>> order
)) {
56 kvm_riscv_local_hfence_gvma_all();
61 asm volatile (SFENCE_W_INVAL() ::: "memory");
62 for (pos
= gpa
; pos
< (gpa
+ gpsz
); pos
+= BIT(order
))
63 asm volatile(HINVAL_GVMA(%0, zero
)
64 : : "r" (pos
>> 2) : "memory");
65 asm volatile (SFENCE_INVAL_IR() ::: "memory");
67 for (pos
= gpa
; pos
< (gpa
+ gpsz
); pos
+= BIT(order
))
68 asm volatile(HFENCE_GVMA(%0, zero
)
69 : : "r" (pos
>> 2) : "memory");
73 void kvm_riscv_local_hfence_gvma_all(void)
75 asm volatile(HFENCE_GVMA(zero
, zero
) : : : "memory");
78 void kvm_riscv_local_hfence_vvma_asid_gva(unsigned long vmid
,
84 unsigned long pos
, hgatp
;
86 if (PTRS_PER_PTE
< (gvsz
>> order
)) {
87 kvm_riscv_local_hfence_vvma_asid_all(vmid
, asid
);
91 hgatp
= csr_swap(CSR_HGATP
, vmid
<< HGATP_VMID_SHIFT
);
94 asm volatile (SFENCE_W_INVAL() ::: "memory");
95 for (pos
= gva
; pos
< (gva
+ gvsz
); pos
+= BIT(order
))
96 asm volatile(HINVAL_VVMA(%0, %1)
97 : : "r" (pos
), "r" (asid
) : "memory");
98 asm volatile (SFENCE_INVAL_IR() ::: "memory");
100 for (pos
= gva
; pos
< (gva
+ gvsz
); pos
+= BIT(order
))
101 asm volatile(HFENCE_VVMA(%0, %1)
102 : : "r" (pos
), "r" (asid
) : "memory");
105 csr_write(CSR_HGATP
, hgatp
);
108 void kvm_riscv_local_hfence_vvma_asid_all(unsigned long vmid
,
113 hgatp
= csr_swap(CSR_HGATP
, vmid
<< HGATP_VMID_SHIFT
);
115 asm volatile(HFENCE_VVMA(zero
, %0) : : "r" (asid
) : "memory");
117 csr_write(CSR_HGATP
, hgatp
);
120 void kvm_riscv_local_hfence_vvma_gva(unsigned long vmid
,
121 unsigned long gva
, unsigned long gvsz
,
124 unsigned long pos
, hgatp
;
126 if (PTRS_PER_PTE
< (gvsz
>> order
)) {
127 kvm_riscv_local_hfence_vvma_all(vmid
);
131 hgatp
= csr_swap(CSR_HGATP
, vmid
<< HGATP_VMID_SHIFT
);
134 asm volatile (SFENCE_W_INVAL() ::: "memory");
135 for (pos
= gva
; pos
< (gva
+ gvsz
); pos
+= BIT(order
))
136 asm volatile(HINVAL_VVMA(%0, zero
)
137 : : "r" (pos
) : "memory");
138 asm volatile (SFENCE_INVAL_IR() ::: "memory");
140 for (pos
= gva
; pos
< (gva
+ gvsz
); pos
+= BIT(order
))
141 asm volatile(HFENCE_VVMA(%0, zero
)
142 : : "r" (pos
) : "memory");
145 csr_write(CSR_HGATP
, hgatp
);
148 void kvm_riscv_local_hfence_vvma_all(unsigned long vmid
)
152 hgatp
= csr_swap(CSR_HGATP
, vmid
<< HGATP_VMID_SHIFT
);
154 asm volatile(HFENCE_VVMA(zero
, zero
) : : : "memory");
156 csr_write(CSR_HGATP
, hgatp
);
159 void kvm_riscv_local_tlb_sanitize(struct kvm_vcpu
*vcpu
)
163 if (!kvm_riscv_gstage_vmid_bits() ||
164 vcpu
->arch
.last_exit_cpu
== vcpu
->cpu
)
168 * On RISC-V platforms with hardware VMID support, we share same
169 * VMID for all VCPUs of a particular Guest/VM. This means we might
170 * have stale G-stage TLB entries on the current Host CPU due to
171 * some other VCPU of the same Guest which ran previously on the
174 * To cleanup stale TLB entries, we simply flush all G-stage TLB
175 * entries by VMID whenever underlying Host CPU changes for a VCPU.
178 vmid
= READ_ONCE(vcpu
->kvm
->arch
.vmid
.vmid
);
179 kvm_riscv_local_hfence_gvma_vmid_all(vmid
);
182 void kvm_riscv_fence_i_process(struct kvm_vcpu
*vcpu
)
184 kvm_riscv_vcpu_pmu_incr_fw(vcpu
, SBI_PMU_FW_FENCE_I_RCVD
);
185 local_flush_icache_all();
188 void kvm_riscv_hfence_gvma_vmid_all_process(struct kvm_vcpu
*vcpu
)
190 struct kvm_vmid
*v
= &vcpu
->kvm
->arch
.vmid
;
191 unsigned long vmid
= READ_ONCE(v
->vmid
);
193 if (kvm_riscv_nacl_available())
194 nacl_hfence_gvma_vmid_all(nacl_shmem(), vmid
);
196 kvm_riscv_local_hfence_gvma_vmid_all(vmid
);
199 void kvm_riscv_hfence_vvma_all_process(struct kvm_vcpu
*vcpu
)
201 struct kvm_vmid
*v
= &vcpu
->kvm
->arch
.vmid
;
202 unsigned long vmid
= READ_ONCE(v
->vmid
);
204 if (kvm_riscv_nacl_available())
205 nacl_hfence_vvma_all(nacl_shmem(), vmid
);
207 kvm_riscv_local_hfence_vvma_all(vmid
);
210 static bool vcpu_hfence_dequeue(struct kvm_vcpu
*vcpu
,
211 struct kvm_riscv_hfence
*out_data
)
214 struct kvm_vcpu_arch
*varch
= &vcpu
->arch
;
216 spin_lock(&varch
->hfence_lock
);
218 if (varch
->hfence_queue
[varch
->hfence_head
].type
) {
219 memcpy(out_data
, &varch
->hfence_queue
[varch
->hfence_head
],
221 varch
->hfence_queue
[varch
->hfence_head
].type
= 0;
223 varch
->hfence_head
++;
224 if (varch
->hfence_head
== KVM_RISCV_VCPU_MAX_HFENCE
)
225 varch
->hfence_head
= 0;
230 spin_unlock(&varch
->hfence_lock
);
235 static bool vcpu_hfence_enqueue(struct kvm_vcpu
*vcpu
,
236 const struct kvm_riscv_hfence
*data
)
239 struct kvm_vcpu_arch
*varch
= &vcpu
->arch
;
241 spin_lock(&varch
->hfence_lock
);
243 if (!varch
->hfence_queue
[varch
->hfence_tail
].type
) {
244 memcpy(&varch
->hfence_queue
[varch
->hfence_tail
],
245 data
, sizeof(*data
));
247 varch
->hfence_tail
++;
248 if (varch
->hfence_tail
== KVM_RISCV_VCPU_MAX_HFENCE
)
249 varch
->hfence_tail
= 0;
254 spin_unlock(&varch
->hfence_lock
);
259 void kvm_riscv_hfence_process(struct kvm_vcpu
*vcpu
)
262 struct kvm_riscv_hfence d
= { 0 };
263 struct kvm_vmid
*v
= &vcpu
->kvm
->arch
.vmid
;
265 while (vcpu_hfence_dequeue(vcpu
, &d
)) {
267 case KVM_RISCV_HFENCE_UNKNOWN
:
269 case KVM_RISCV_HFENCE_GVMA_VMID_GPA
:
270 vmid
= READ_ONCE(v
->vmid
);
271 if (kvm_riscv_nacl_available())
272 nacl_hfence_gvma_vmid(nacl_shmem(), vmid
,
273 d
.addr
, d
.size
, d
.order
);
275 kvm_riscv_local_hfence_gvma_vmid_gpa(vmid
, d
.addr
,
278 case KVM_RISCV_HFENCE_VVMA_ASID_GVA
:
279 kvm_riscv_vcpu_pmu_incr_fw(vcpu
, SBI_PMU_FW_HFENCE_VVMA_ASID_RCVD
);
280 vmid
= READ_ONCE(v
->vmid
);
281 if (kvm_riscv_nacl_available())
282 nacl_hfence_vvma_asid(nacl_shmem(), vmid
, d
.asid
,
283 d
.addr
, d
.size
, d
.order
);
285 kvm_riscv_local_hfence_vvma_asid_gva(vmid
, d
.asid
, d
.addr
,
288 case KVM_RISCV_HFENCE_VVMA_ASID_ALL
:
289 kvm_riscv_vcpu_pmu_incr_fw(vcpu
, SBI_PMU_FW_HFENCE_VVMA_ASID_RCVD
);
290 vmid
= READ_ONCE(v
->vmid
);
291 if (kvm_riscv_nacl_available())
292 nacl_hfence_vvma_asid_all(nacl_shmem(), vmid
, d
.asid
);
294 kvm_riscv_local_hfence_vvma_asid_all(vmid
, d
.asid
);
296 case KVM_RISCV_HFENCE_VVMA_GVA
:
297 kvm_riscv_vcpu_pmu_incr_fw(vcpu
, SBI_PMU_FW_HFENCE_VVMA_RCVD
);
298 vmid
= READ_ONCE(v
->vmid
);
299 if (kvm_riscv_nacl_available())
300 nacl_hfence_vvma(nacl_shmem(), vmid
,
301 d
.addr
, d
.size
, d
.order
);
303 kvm_riscv_local_hfence_vvma_gva(vmid
, d
.addr
,
312 static void make_xfence_request(struct kvm
*kvm
,
313 unsigned long hbase
, unsigned long hmask
,
314 unsigned int req
, unsigned int fallback_req
,
315 const struct kvm_riscv_hfence
*data
)
318 struct kvm_vcpu
*vcpu
;
319 unsigned int actual_req
= req
;
320 DECLARE_BITMAP(vcpu_mask
, KVM_MAX_VCPUS
);
322 bitmap_zero(vcpu_mask
, KVM_MAX_VCPUS
);
323 kvm_for_each_vcpu(i
, vcpu
, kvm
) {
325 if (vcpu
->vcpu_id
< hbase
)
327 if (!(hmask
& (1UL << (vcpu
->vcpu_id
- hbase
))))
331 bitmap_set(vcpu_mask
, i
, 1);
333 if (!data
|| !data
->type
)
337 * Enqueue hfence data to VCPU hfence queue. If we don't
338 * have space in the VCPU hfence queue then fallback to
339 * a more conservative hfence request.
341 if (!vcpu_hfence_enqueue(vcpu
, data
))
342 actual_req
= fallback_req
;
345 kvm_make_vcpus_request_mask(kvm
, actual_req
, vcpu_mask
);
348 void kvm_riscv_fence_i(struct kvm
*kvm
,
349 unsigned long hbase
, unsigned long hmask
)
351 make_xfence_request(kvm
, hbase
, hmask
, KVM_REQ_FENCE_I
,
352 KVM_REQ_FENCE_I
, NULL
);
355 void kvm_riscv_hfence_gvma_vmid_gpa(struct kvm
*kvm
,
356 unsigned long hbase
, unsigned long hmask
,
357 gpa_t gpa
, gpa_t gpsz
,
360 struct kvm_riscv_hfence data
;
362 data
.type
= KVM_RISCV_HFENCE_GVMA_VMID_GPA
;
367 make_xfence_request(kvm
, hbase
, hmask
, KVM_REQ_HFENCE
,
368 KVM_REQ_HFENCE_GVMA_VMID_ALL
, &data
);
371 void kvm_riscv_hfence_gvma_vmid_all(struct kvm
*kvm
,
372 unsigned long hbase
, unsigned long hmask
)
374 make_xfence_request(kvm
, hbase
, hmask
, KVM_REQ_HFENCE_GVMA_VMID_ALL
,
375 KVM_REQ_HFENCE_GVMA_VMID_ALL
, NULL
);
378 void kvm_riscv_hfence_vvma_asid_gva(struct kvm
*kvm
,
379 unsigned long hbase
, unsigned long hmask
,
380 unsigned long gva
, unsigned long gvsz
,
381 unsigned long order
, unsigned long asid
)
383 struct kvm_riscv_hfence data
;
385 data
.type
= KVM_RISCV_HFENCE_VVMA_ASID_GVA
;
390 make_xfence_request(kvm
, hbase
, hmask
, KVM_REQ_HFENCE
,
391 KVM_REQ_HFENCE_VVMA_ALL
, &data
);
394 void kvm_riscv_hfence_vvma_asid_all(struct kvm
*kvm
,
395 unsigned long hbase
, unsigned long hmask
,
398 struct kvm_riscv_hfence data
;
400 data
.type
= KVM_RISCV_HFENCE_VVMA_ASID_ALL
;
402 data
.addr
= data
.size
= data
.order
= 0;
403 make_xfence_request(kvm
, hbase
, hmask
, KVM_REQ_HFENCE
,
404 KVM_REQ_HFENCE_VVMA_ALL
, &data
);
407 void kvm_riscv_hfence_vvma_gva(struct kvm
*kvm
,
408 unsigned long hbase
, unsigned long hmask
,
409 unsigned long gva
, unsigned long gvsz
,
412 struct kvm_riscv_hfence data
;
414 data
.type
= KVM_RISCV_HFENCE_VVMA_GVA
;
419 make_xfence_request(kvm
, hbase
, hmask
, KVM_REQ_HFENCE
,
420 KVM_REQ_HFENCE_VVMA_ALL
, &data
);
423 void kvm_riscv_hfence_vvma_all(struct kvm
*kvm
,
424 unsigned long hbase
, unsigned long hmask
)
426 make_xfence_request(kvm
, hbase
, hmask
, KVM_REQ_HFENCE_VVMA_ALL
,
427 KVM_REQ_HFENCE_VVMA_ALL
, NULL
);