1 // SPDX-License-Identifier: GPL-2.0
3 * Copyright (c) 2024 Ventana Micro Systems Inc.
6 #include <linux/kvm_host.h>
7 #include <linux/vmalloc.h>
8 #include <asm/kvm_nacl.h>
10 DEFINE_STATIC_KEY_FALSE(kvm_riscv_nacl_available
);
11 DEFINE_STATIC_KEY_FALSE(kvm_riscv_nacl_sync_csr_available
);
12 DEFINE_STATIC_KEY_FALSE(kvm_riscv_nacl_sync_hfence_available
);
13 DEFINE_STATIC_KEY_FALSE(kvm_riscv_nacl_sync_sret_available
);
14 DEFINE_STATIC_KEY_FALSE(kvm_riscv_nacl_autoswap_csr_available
);
15 DEFINE_PER_CPU(struct kvm_riscv_nacl
, kvm_riscv_nacl
);
17 void __kvm_riscv_nacl_hfence(void *shmem
,
18 unsigned long control
,
19 unsigned long page_num
,
20 unsigned long page_count
)
22 int i
, ent
= -1, try_count
= 5;
26 for (i
= 0; i
< SBI_NACL_SHMEM_HFENCE_ENTRY_MAX
; i
++) {
27 entp
= shmem
+ SBI_NACL_SHMEM_HFENCE_ENTRY_CONFIG(i
);
28 if (lelong_to_cpu(*entp
) & SBI_NACL_SHMEM_HFENCE_CONFIG_PEND
)
37 nacl_sync_hfence(-1UL);
40 pr_warn("KVM: No free entry in NACL shared memory\n");
45 entp
= shmem
+ SBI_NACL_SHMEM_HFENCE_ENTRY_CONFIG(i
);
46 *entp
= cpu_to_lelong(control
);
47 entp
= shmem
+ SBI_NACL_SHMEM_HFENCE_ENTRY_PNUM(i
);
48 *entp
= cpu_to_lelong(page_num
);
49 entp
= shmem
+ SBI_NACL_SHMEM_HFENCE_ENTRY_PCOUNT(i
);
50 *entp
= cpu_to_lelong(page_count
);
53 int kvm_riscv_nacl_enable(void)
57 struct kvm_riscv_nacl
*nacl
;
59 if (!kvm_riscv_nacl_available())
61 nacl
= this_cpu_ptr(&kvm_riscv_nacl
);
63 ret
= sbi_ecall(SBI_EXT_NACL
, SBI_EXT_NACL_SET_SHMEM
,
64 nacl
->shmem_phys
, 0, 0, 0, 0, 0);
65 rc
= sbi_err_map_linux_errno(ret
.error
);
72 void kvm_riscv_nacl_disable(void)
74 if (!kvm_riscv_nacl_available())
77 sbi_ecall(SBI_EXT_NACL
, SBI_EXT_NACL_SET_SHMEM
,
78 SBI_SHMEM_DISABLE
, SBI_SHMEM_DISABLE
, 0, 0, 0, 0);
81 void kvm_riscv_nacl_exit(void)
84 struct kvm_riscv_nacl
*nacl
;
86 if (!kvm_riscv_nacl_available())
89 /* Allocate per-CPU shared memory */
90 for_each_possible_cpu(cpu
) {
91 nacl
= per_cpu_ptr(&kvm_riscv_nacl
, cpu
);
95 free_pages((unsigned long)nacl
->shmem
,
96 get_order(SBI_NACL_SHMEM_SIZE
));
102 static long nacl_probe_feature(long feature_id
)
106 if (!kvm_riscv_nacl_available())
109 ret
= sbi_ecall(SBI_EXT_NACL
, SBI_EXT_NACL_PROBE_FEATURE
,
110 feature_id
, 0, 0, 0, 0, 0);
114 int kvm_riscv_nacl_init(void)
117 struct page
*shmem_page
;
118 struct kvm_riscv_nacl
*nacl
;
120 if (sbi_spec_version
< sbi_mk_version(1, 0) ||
121 sbi_probe_extension(SBI_EXT_NACL
) <= 0)
124 /* Enable NACL support */
125 static_branch_enable(&kvm_riscv_nacl_available
);
127 /* Probe NACL features */
128 if (nacl_probe_feature(SBI_NACL_FEAT_SYNC_CSR
))
129 static_branch_enable(&kvm_riscv_nacl_sync_csr_available
);
130 if (nacl_probe_feature(SBI_NACL_FEAT_SYNC_HFENCE
))
131 static_branch_enable(&kvm_riscv_nacl_sync_hfence_available
);
132 if (nacl_probe_feature(SBI_NACL_FEAT_SYNC_SRET
))
133 static_branch_enable(&kvm_riscv_nacl_sync_sret_available
);
134 if (nacl_probe_feature(SBI_NACL_FEAT_AUTOSWAP_CSR
))
135 static_branch_enable(&kvm_riscv_nacl_autoswap_csr_available
);
137 /* Allocate per-CPU shared memory */
138 for_each_possible_cpu(cpu
) {
139 nacl
= per_cpu_ptr(&kvm_riscv_nacl
, cpu
);
141 shmem_page
= alloc_pages(GFP_KERNEL
| __GFP_ZERO
,
142 get_order(SBI_NACL_SHMEM_SIZE
));
144 kvm_riscv_nacl_exit();
147 nacl
->shmem
= page_to_virt(shmem_page
);
148 nacl
->shmem_phys
= page_to_phys(shmem_page
);