Merge tag 'trace-printf-v6.13' of git://git.kernel.org/pub/scm/linux/kernel/git/trace...
[drm/drm-misc.git] / arch / riscv / kvm / nacl.c
blob08a95ad9ada2c17d9b4b8cf629eae3219aa11133
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * Copyright (c) 2024 Ventana Micro Systems Inc.
4 */
6 #include <linux/kvm_host.h>
7 #include <linux/vmalloc.h>
8 #include <asm/kvm_nacl.h>
10 DEFINE_STATIC_KEY_FALSE(kvm_riscv_nacl_available);
11 DEFINE_STATIC_KEY_FALSE(kvm_riscv_nacl_sync_csr_available);
12 DEFINE_STATIC_KEY_FALSE(kvm_riscv_nacl_sync_hfence_available);
13 DEFINE_STATIC_KEY_FALSE(kvm_riscv_nacl_sync_sret_available);
14 DEFINE_STATIC_KEY_FALSE(kvm_riscv_nacl_autoswap_csr_available);
15 DEFINE_PER_CPU(struct kvm_riscv_nacl, kvm_riscv_nacl);
17 void __kvm_riscv_nacl_hfence(void *shmem,
18 unsigned long control,
19 unsigned long page_num,
20 unsigned long page_count)
22 int i, ent = -1, try_count = 5;
23 unsigned long *entp;
25 again:
26 for (i = 0; i < SBI_NACL_SHMEM_HFENCE_ENTRY_MAX; i++) {
27 entp = shmem + SBI_NACL_SHMEM_HFENCE_ENTRY_CONFIG(i);
28 if (lelong_to_cpu(*entp) & SBI_NACL_SHMEM_HFENCE_CONFIG_PEND)
29 continue;
31 ent = i;
32 break;
35 if (ent < 0) {
36 if (try_count) {
37 nacl_sync_hfence(-1UL);
38 goto again;
39 } else {
40 pr_warn("KVM: No free entry in NACL shared memory\n");
41 return;
45 entp = shmem + SBI_NACL_SHMEM_HFENCE_ENTRY_CONFIG(i);
46 *entp = cpu_to_lelong(control);
47 entp = shmem + SBI_NACL_SHMEM_HFENCE_ENTRY_PNUM(i);
48 *entp = cpu_to_lelong(page_num);
49 entp = shmem + SBI_NACL_SHMEM_HFENCE_ENTRY_PCOUNT(i);
50 *entp = cpu_to_lelong(page_count);
53 int kvm_riscv_nacl_enable(void)
55 int rc;
56 struct sbiret ret;
57 struct kvm_riscv_nacl *nacl;
59 if (!kvm_riscv_nacl_available())
60 return 0;
61 nacl = this_cpu_ptr(&kvm_riscv_nacl);
63 ret = sbi_ecall(SBI_EXT_NACL, SBI_EXT_NACL_SET_SHMEM,
64 nacl->shmem_phys, 0, 0, 0, 0, 0);
65 rc = sbi_err_map_linux_errno(ret.error);
66 if (rc)
67 return rc;
69 return 0;
72 void kvm_riscv_nacl_disable(void)
74 if (!kvm_riscv_nacl_available())
75 return;
77 sbi_ecall(SBI_EXT_NACL, SBI_EXT_NACL_SET_SHMEM,
78 SBI_SHMEM_DISABLE, SBI_SHMEM_DISABLE, 0, 0, 0, 0);
81 void kvm_riscv_nacl_exit(void)
83 int cpu;
84 struct kvm_riscv_nacl *nacl;
86 if (!kvm_riscv_nacl_available())
87 return;
89 /* Allocate per-CPU shared memory */
90 for_each_possible_cpu(cpu) {
91 nacl = per_cpu_ptr(&kvm_riscv_nacl, cpu);
92 if (!nacl->shmem)
93 continue;
95 free_pages((unsigned long)nacl->shmem,
96 get_order(SBI_NACL_SHMEM_SIZE));
97 nacl->shmem = NULL;
98 nacl->shmem_phys = 0;
102 static long nacl_probe_feature(long feature_id)
104 struct sbiret ret;
106 if (!kvm_riscv_nacl_available())
107 return 0;
109 ret = sbi_ecall(SBI_EXT_NACL, SBI_EXT_NACL_PROBE_FEATURE,
110 feature_id, 0, 0, 0, 0, 0);
111 return ret.value;
114 int kvm_riscv_nacl_init(void)
116 int cpu;
117 struct page *shmem_page;
118 struct kvm_riscv_nacl *nacl;
120 if (sbi_spec_version < sbi_mk_version(1, 0) ||
121 sbi_probe_extension(SBI_EXT_NACL) <= 0)
122 return -ENODEV;
124 /* Enable NACL support */
125 static_branch_enable(&kvm_riscv_nacl_available);
127 /* Probe NACL features */
128 if (nacl_probe_feature(SBI_NACL_FEAT_SYNC_CSR))
129 static_branch_enable(&kvm_riscv_nacl_sync_csr_available);
130 if (nacl_probe_feature(SBI_NACL_FEAT_SYNC_HFENCE))
131 static_branch_enable(&kvm_riscv_nacl_sync_hfence_available);
132 if (nacl_probe_feature(SBI_NACL_FEAT_SYNC_SRET))
133 static_branch_enable(&kvm_riscv_nacl_sync_sret_available);
134 if (nacl_probe_feature(SBI_NACL_FEAT_AUTOSWAP_CSR))
135 static_branch_enable(&kvm_riscv_nacl_autoswap_csr_available);
137 /* Allocate per-CPU shared memory */
138 for_each_possible_cpu(cpu) {
139 nacl = per_cpu_ptr(&kvm_riscv_nacl, cpu);
141 shmem_page = alloc_pages(GFP_KERNEL | __GFP_ZERO,
142 get_order(SBI_NACL_SHMEM_SIZE));
143 if (!shmem_page) {
144 kvm_riscv_nacl_exit();
145 return -ENOMEM;
147 nacl->shmem = page_to_virt(shmem_page);
148 nacl->shmem_phys = page_to_phys(shmem_page);
151 return 0;