1 // SPDX-License-Identifier: GPL-2.0
3 * Copyright (C) 2019 Western Digital Corporation or its affiliates.
6 * Anup Patel <anup.patel@wdc.com>
9 #include <linux/errno.h>
10 #include <linux/err.h>
11 #include <linux/module.h>
12 #include <linux/kvm_host.h>
13 #include <asm/cpufeature.h>
14 #include <asm/kvm_nacl.h>
17 long kvm_arch_dev_ioctl(struct file
*filp
,
18 unsigned int ioctl
, unsigned long arg
)
23 int kvm_arch_enable_virtualization_cpu(void)
27 rc
= kvm_riscv_nacl_enable();
31 csr_write(CSR_HEDELEG
, KVM_HEDELEG_DEFAULT
);
32 csr_write(CSR_HIDELEG
, KVM_HIDELEG_DEFAULT
);
34 /* VS should access only the time counter directly. Everything else should trap */
35 csr_write(CSR_HCOUNTEREN
, 0x02);
37 csr_write(CSR_HVIP
, 0);
39 kvm_riscv_aia_enable();
44 void kvm_arch_disable_virtualization_cpu(void)
46 kvm_riscv_aia_disable();
49 * After clearing the hideleg CSR, the host kernel will receive
50 * spurious interrupts if hvip CSR has pending interrupts and the
51 * corresponding enable bits in vsie CSR are asserted. To avoid it,
52 * hvip CSR and vsie CSR must be cleared before clearing hideleg CSR.
54 csr_write(CSR_VSIE
, 0);
55 csr_write(CSR_HVIP
, 0);
56 csr_write(CSR_HEDELEG
, 0);
57 csr_write(CSR_HIDELEG
, 0);
59 kvm_riscv_nacl_disable();
62 static void kvm_riscv_teardown(void)
65 kvm_riscv_nacl_exit();
66 kvm_unregister_perf_callbacks();
69 static int __init
riscv_kvm_init(void)
75 if (!riscv_isa_extension_available(NULL
, h
)) {
76 kvm_info("hypervisor extension not available\n");
80 if (sbi_spec_is_0_1()) {
81 kvm_info("require SBI v0.2 or higher\n");
85 if (!sbi_probe_extension(SBI_EXT_RFENCE
)) {
86 kvm_info("require SBI RFENCE extension\n");
90 rc
= kvm_riscv_nacl_init();
91 if (rc
&& rc
!= -ENODEV
)
94 kvm_riscv_gstage_mode_detect();
96 kvm_riscv_gstage_vmid_detect();
98 rc
= kvm_riscv_aia_init();
99 if (rc
&& rc
!= -ENODEV
) {
100 kvm_riscv_nacl_exit();
104 kvm_info("hypervisor extension available\n");
106 if (kvm_riscv_nacl_available()) {
109 if (kvm_riscv_nacl_sync_csr_available()) {
112 strcat(slist
, "sync_csr");
115 if (kvm_riscv_nacl_sync_hfence_available()) {
118 strcat(slist
, "sync_hfence");
121 if (kvm_riscv_nacl_sync_sret_available()) {
124 strcat(slist
, "sync_sret");
127 if (kvm_riscv_nacl_autoswap_csr_available()) {
130 strcat(slist
, "autoswap_csr");
133 kvm_info("using SBI nested acceleration with %s\n",
134 (rc
) ? slist
: "no features");
137 switch (kvm_riscv_gstage_mode()) {
138 case HGATP_MODE_SV32X4
:
141 case HGATP_MODE_SV39X4
:
144 case HGATP_MODE_SV48X4
:
147 case HGATP_MODE_SV57X4
:
153 kvm_info("using %s G-stage page table format\n", str
);
155 kvm_info("VMID %ld bits available\n", kvm_riscv_gstage_vmid_bits());
157 if (kvm_riscv_aia_available())
158 kvm_info("AIA available with %d guest external interrupts\n",
159 kvm_riscv_aia_nr_hgei
);
161 kvm_register_perf_callbacks(NULL
);
163 rc
= kvm_init(sizeof(struct kvm_vcpu
), 0, THIS_MODULE
);
165 kvm_riscv_teardown();
171 module_init(riscv_kvm_init
);
173 static void __exit
riscv_kvm_exit(void)
175 kvm_riscv_teardown();
179 module_exit(riscv_kvm_exit
);