1 // SPDX-License-Identifier: GPL-2.0
3 * Copyright (c) 2021 Western Digital Corporation or its affiliates.
6 * Atish Patra <atish.patra@wdc.com>
9 #include <linux/errno.h>
10 #include <linux/err.h>
11 #include <linux/kvm_host.h>
13 #include <asm/kvm_vcpu_sbi.h>
15 static int kvm_sbi_hsm_vcpu_start(struct kvm_vcpu
*vcpu
)
17 struct kvm_cpu_context
*reset_cntx
;
18 struct kvm_cpu_context
*cp
= &vcpu
->arch
.guest_context
;
19 struct kvm_vcpu
*target_vcpu
;
20 unsigned long target_vcpuid
= cp
->a0
;
23 target_vcpu
= kvm_get_vcpu_by_id(vcpu
->kvm
, target_vcpuid
);
25 return SBI_ERR_INVALID_PARAM
;
27 spin_lock(&target_vcpu
->arch
.mp_state_lock
);
29 if (!kvm_riscv_vcpu_stopped(target_vcpu
)) {
30 ret
= SBI_ERR_ALREADY_AVAILABLE
;
34 spin_lock(&target_vcpu
->arch
.reset_cntx_lock
);
35 reset_cntx
= &target_vcpu
->arch
.guest_reset_context
;
37 reset_cntx
->sepc
= cp
->a1
;
38 /* target vcpu id to start */
39 reset_cntx
->a0
= target_vcpuid
;
40 /* private data passed from kernel */
41 reset_cntx
->a1
= cp
->a2
;
42 spin_unlock(&target_vcpu
->arch
.reset_cntx_lock
);
44 kvm_make_request(KVM_REQ_VCPU_RESET
, target_vcpu
);
46 __kvm_riscv_vcpu_power_on(target_vcpu
);
49 spin_unlock(&target_vcpu
->arch
.mp_state_lock
);
54 static int kvm_sbi_hsm_vcpu_stop(struct kvm_vcpu
*vcpu
)
58 spin_lock(&vcpu
->arch
.mp_state_lock
);
60 if (kvm_riscv_vcpu_stopped(vcpu
)) {
61 ret
= SBI_ERR_FAILURE
;
65 __kvm_riscv_vcpu_power_off(vcpu
);
68 spin_unlock(&vcpu
->arch
.mp_state_lock
);
73 static int kvm_sbi_hsm_vcpu_get_status(struct kvm_vcpu
*vcpu
)
75 struct kvm_cpu_context
*cp
= &vcpu
->arch
.guest_context
;
76 unsigned long target_vcpuid
= cp
->a0
;
77 struct kvm_vcpu
*target_vcpu
;
79 target_vcpu
= kvm_get_vcpu_by_id(vcpu
->kvm
, target_vcpuid
);
81 return SBI_ERR_INVALID_PARAM
;
82 if (!kvm_riscv_vcpu_stopped(target_vcpu
))
83 return SBI_HSM_STATE_STARTED
;
84 else if (vcpu
->stat
.generic
.blocking
)
85 return SBI_HSM_STATE_SUSPENDED
;
87 return SBI_HSM_STATE_STOPPED
;
90 static int kvm_sbi_ext_hsm_handler(struct kvm_vcpu
*vcpu
, struct kvm_run
*run
,
91 struct kvm_vcpu_sbi_return
*retdata
)
94 struct kvm_cpu_context
*cp
= &vcpu
->arch
.guest_context
;
95 unsigned long funcid
= cp
->a6
;
98 case SBI_EXT_HSM_HART_START
:
99 ret
= kvm_sbi_hsm_vcpu_start(vcpu
);
101 case SBI_EXT_HSM_HART_STOP
:
102 ret
= kvm_sbi_hsm_vcpu_stop(vcpu
);
104 case SBI_EXT_HSM_HART_STATUS
:
105 ret
= kvm_sbi_hsm_vcpu_get_status(vcpu
);
107 retdata
->out_val
= ret
;
108 retdata
->err_val
= 0;
111 case SBI_EXT_HSM_HART_SUSPEND
:
113 case SBI_HSM_SUSPEND_RET_DEFAULT
:
114 kvm_riscv_vcpu_wfi(vcpu
);
116 case SBI_HSM_SUSPEND_NON_RET_DEFAULT
:
117 ret
= SBI_ERR_NOT_SUPPORTED
;
120 ret
= SBI_ERR_INVALID_PARAM
;
124 ret
= SBI_ERR_NOT_SUPPORTED
;
127 retdata
->err_val
= ret
;
132 const struct kvm_vcpu_sbi_extension vcpu_sbi_ext_hsm
= {
133 .extid_start
= SBI_EXT_HSM
,
134 .extid_end
= SBI_EXT_HSM
,
135 .handler
= kvm_sbi_ext_hsm_handler
,