1 // SPDX-License-Identifier: GPL-2.0
3 * Copyright (c) 2021 Western Digital Corporation or its affiliates.
6 * Atish Patra <atish.patra@wdc.com>
9 #include <linux/errno.h>
10 #include <linux/err.h>
11 #include <linux/kvm_host.h>
13 #include <asm/kvm_vcpu_timer.h>
14 #include <asm/kvm_vcpu_pmu.h>
15 #include <asm/kvm_vcpu_sbi.h>
17 static int kvm_sbi_ext_time_handler(struct kvm_vcpu
*vcpu
, struct kvm_run
*run
,
18 struct kvm_vcpu_sbi_return
*retdata
)
20 struct kvm_cpu_context
*cp
= &vcpu
->arch
.guest_context
;
23 if (cp
->a6
!= SBI_EXT_TIME_SET_TIMER
) {
24 retdata
->err_val
= SBI_ERR_INVALID_PARAM
;
28 kvm_riscv_vcpu_pmu_incr_fw(vcpu
, SBI_PMU_FW_SET_TIMER
);
29 #if __riscv_xlen == 32
30 next_cycle
= ((u64
)cp
->a1
<< 32) | (u64
)cp
->a0
;
32 next_cycle
= (u64
)cp
->a0
;
34 kvm_riscv_vcpu_timer_next_event(vcpu
, next_cycle
);
39 const struct kvm_vcpu_sbi_extension vcpu_sbi_ext_time
= {
40 .extid_start
= SBI_EXT_TIME
,
41 .extid_end
= SBI_EXT_TIME
,
42 .handler
= kvm_sbi_ext_time_handler
,
45 static int kvm_sbi_ext_ipi_handler(struct kvm_vcpu
*vcpu
, struct kvm_run
*run
,
46 struct kvm_vcpu_sbi_return
*retdata
)
51 struct kvm_cpu_context
*cp
= &vcpu
->arch
.guest_context
;
52 unsigned long hmask
= cp
->a0
;
53 unsigned long hbase
= cp
->a1
;
55 if (cp
->a6
!= SBI_EXT_IPI_SEND_IPI
) {
56 retdata
->err_val
= SBI_ERR_INVALID_PARAM
;
60 kvm_riscv_vcpu_pmu_incr_fw(vcpu
, SBI_PMU_FW_IPI_SENT
);
61 kvm_for_each_vcpu(i
, tmp
, vcpu
->kvm
) {
63 if (tmp
->vcpu_id
< hbase
)
65 if (!(hmask
& (1UL << (tmp
->vcpu_id
- hbase
))))
68 ret
= kvm_riscv_vcpu_set_interrupt(tmp
, IRQ_VS_SOFT
);
71 kvm_riscv_vcpu_pmu_incr_fw(tmp
, SBI_PMU_FW_IPI_RCVD
);
77 const struct kvm_vcpu_sbi_extension vcpu_sbi_ext_ipi
= {
78 .extid_start
= SBI_EXT_IPI
,
79 .extid_end
= SBI_EXT_IPI
,
80 .handler
= kvm_sbi_ext_ipi_handler
,
83 static int kvm_sbi_ext_rfence_handler(struct kvm_vcpu
*vcpu
, struct kvm_run
*run
,
84 struct kvm_vcpu_sbi_return
*retdata
)
86 struct kvm_cpu_context
*cp
= &vcpu
->arch
.guest_context
;
87 unsigned long hmask
= cp
->a0
;
88 unsigned long hbase
= cp
->a1
;
89 unsigned long funcid
= cp
->a6
;
92 case SBI_EXT_RFENCE_REMOTE_FENCE_I
:
93 kvm_riscv_fence_i(vcpu
->kvm
, hbase
, hmask
);
94 kvm_riscv_vcpu_pmu_incr_fw(vcpu
, SBI_PMU_FW_FENCE_I_SENT
);
96 case SBI_EXT_RFENCE_REMOTE_SFENCE_VMA
:
97 if (cp
->a2
== 0 && cp
->a3
== 0)
98 kvm_riscv_hfence_vvma_all(vcpu
->kvm
, hbase
, hmask
);
100 kvm_riscv_hfence_vvma_gva(vcpu
->kvm
, hbase
, hmask
,
101 cp
->a2
, cp
->a3
, PAGE_SHIFT
);
102 kvm_riscv_vcpu_pmu_incr_fw(vcpu
, SBI_PMU_FW_HFENCE_VVMA_SENT
);
104 case SBI_EXT_RFENCE_REMOTE_SFENCE_VMA_ASID
:
105 if (cp
->a2
== 0 && cp
->a3
== 0)
106 kvm_riscv_hfence_vvma_asid_all(vcpu
->kvm
,
107 hbase
, hmask
, cp
->a4
);
109 kvm_riscv_hfence_vvma_asid_gva(vcpu
->kvm
,
113 kvm_riscv_vcpu_pmu_incr_fw(vcpu
, SBI_PMU_FW_HFENCE_VVMA_ASID_SENT
);
115 case SBI_EXT_RFENCE_REMOTE_HFENCE_GVMA
:
116 case SBI_EXT_RFENCE_REMOTE_HFENCE_GVMA_VMID
:
117 case SBI_EXT_RFENCE_REMOTE_HFENCE_VVMA
:
118 case SBI_EXT_RFENCE_REMOTE_HFENCE_VVMA_ASID
:
120 * Until nested virtualization is implemented, the
121 * SBI HFENCE calls should be treated as NOPs
125 retdata
->err_val
= SBI_ERR_NOT_SUPPORTED
;
131 const struct kvm_vcpu_sbi_extension vcpu_sbi_ext_rfence
= {
132 .extid_start
= SBI_EXT_RFENCE
,
133 .extid_end
= SBI_EXT_RFENCE
,
134 .handler
= kvm_sbi_ext_rfence_handler
,
137 static int kvm_sbi_ext_srst_handler(struct kvm_vcpu
*vcpu
,
139 struct kvm_vcpu_sbi_return
*retdata
)
141 struct kvm_cpu_context
*cp
= &vcpu
->arch
.guest_context
;
142 unsigned long funcid
= cp
->a6
;
147 case SBI_EXT_SRST_RESET
:
149 case SBI_SRST_RESET_TYPE_SHUTDOWN
:
150 kvm_riscv_vcpu_sbi_system_reset(vcpu
, run
,
151 KVM_SYSTEM_EVENT_SHUTDOWN
,
153 retdata
->uexit
= true;
155 case SBI_SRST_RESET_TYPE_COLD_REBOOT
:
156 case SBI_SRST_RESET_TYPE_WARM_REBOOT
:
157 kvm_riscv_vcpu_sbi_system_reset(vcpu
, run
,
158 KVM_SYSTEM_EVENT_RESET
,
160 retdata
->uexit
= true;
163 retdata
->err_val
= SBI_ERR_NOT_SUPPORTED
;
167 retdata
->err_val
= SBI_ERR_NOT_SUPPORTED
;
173 const struct kvm_vcpu_sbi_extension vcpu_sbi_ext_srst
= {
174 .extid_start
= SBI_EXT_SRST
,
175 .extid_end
= SBI_EXT_SRST
,
176 .handler
= kvm_sbi_ext_srst_handler
,
179 static int kvm_sbi_ext_dbcn_handler(struct kvm_vcpu
*vcpu
,
181 struct kvm_vcpu_sbi_return
*retdata
)
183 struct kvm_cpu_context
*cp
= &vcpu
->arch
.guest_context
;
184 unsigned long funcid
= cp
->a6
;
187 case SBI_EXT_DBCN_CONSOLE_WRITE
:
188 case SBI_EXT_DBCN_CONSOLE_READ
:
189 case SBI_EXT_DBCN_CONSOLE_WRITE_BYTE
:
191 * The SBI debug console functions are unconditionally
192 * forwarded to the userspace.
194 kvm_riscv_vcpu_sbi_forward(vcpu
, run
);
195 retdata
->uexit
= true;
198 retdata
->err_val
= SBI_ERR_NOT_SUPPORTED
;
204 const struct kvm_vcpu_sbi_extension vcpu_sbi_ext_dbcn
= {
205 .extid_start
= SBI_EXT_DBCN
,
206 .extid_end
= SBI_EXT_DBCN
,
207 .default_disabled
= true,
208 .handler
= kvm_sbi_ext_dbcn_handler
,