1 // SPDX-License-Identifier: GPL-2.0
3 * Copyright (c) 2023 Ventana Micro Systems Inc.
6 #include <linux/kconfig.h>
7 #include <linux/kernel.h>
8 #include <linux/kvm_host.h>
10 #include <linux/sizes.h>
13 #include <asm/current.h>
14 #include <asm/kvm_vcpu_sbi.h>
17 #include <asm/uaccess.h>
19 void kvm_riscv_vcpu_sbi_sta_reset(struct kvm_vcpu
*vcpu
)
21 vcpu
->arch
.sta
.shmem
= INVALID_GPA
;
22 vcpu
->arch
.sta
.last_steal
= 0;
25 void kvm_riscv_vcpu_record_steal_time(struct kvm_vcpu
*vcpu
)
27 gpa_t shmem
= vcpu
->arch
.sta
.shmem
;
28 u64 last_steal
= vcpu
->arch
.sta
.last_steal
;
29 __le32 __user
*sequence_ptr
;
30 __le64 __user
*steal_ptr
;
38 if (shmem
== INVALID_GPA
)
42 * shmem is 64-byte aligned (see the enforcement in
43 * kvm_sbi_sta_steal_time_set_shmem()) and the size of sbi_sta_struct
44 * is 64 bytes, so we know all its offsets are in the same page.
46 gfn
= shmem
>> PAGE_SHIFT
;
47 hva
= kvm_vcpu_gfn_to_hva(vcpu
, gfn
);
49 if (WARN_ON(kvm_is_error_hva(hva
))) {
50 vcpu
->arch
.sta
.shmem
= INVALID_GPA
;
54 sequence_ptr
= (__le32 __user
*)(hva
+ offset_in_page(shmem
) +
55 offsetof(struct sbi_sta_struct
, sequence
));
56 steal_ptr
= (__le64 __user
*)(hva
+ offset_in_page(shmem
) +
57 offsetof(struct sbi_sta_struct
, steal
));
59 if (WARN_ON(get_user(sequence_le
, sequence_ptr
)))
62 sequence
= le32_to_cpu(sequence_le
);
65 if (WARN_ON(put_user(cpu_to_le32(sequence
), sequence_ptr
)))
68 if (!WARN_ON(get_user(steal_le
, steal_ptr
))) {
69 steal
= le64_to_cpu(steal_le
);
70 vcpu
->arch
.sta
.last_steal
= READ_ONCE(current
->sched_info
.run_delay
);
71 steal
+= vcpu
->arch
.sta
.last_steal
- last_steal
;
72 WARN_ON(put_user(cpu_to_le64(steal
), steal_ptr
));
76 WARN_ON(put_user(cpu_to_le32(sequence
), sequence_ptr
));
78 kvm_vcpu_mark_page_dirty(vcpu
, gfn
);
81 static int kvm_sbi_sta_steal_time_set_shmem(struct kvm_vcpu
*vcpu
)
83 struct kvm_cpu_context
*cp
= &vcpu
->arch
.guest_context
;
84 unsigned long shmem_phys_lo
= cp
->a0
;
85 unsigned long shmem_phys_hi
= cp
->a1
;
87 struct sbi_sta_struct zero_sta
= {0};
94 return SBI_ERR_INVALID_PARAM
;
96 if (shmem_phys_lo
== SBI_SHMEM_DISABLE
&&
97 shmem_phys_hi
== SBI_SHMEM_DISABLE
) {
98 vcpu
->arch
.sta
.shmem
= INVALID_GPA
;
102 if (shmem_phys_lo
& (SZ_64
- 1))
103 return SBI_ERR_INVALID_PARAM
;
105 shmem
= shmem_phys_lo
;
107 if (shmem_phys_hi
!= 0) {
108 if (IS_ENABLED(CONFIG_32BIT
))
109 shmem
|= ((gpa_t
)shmem_phys_hi
<< 32);
111 return SBI_ERR_INVALID_ADDRESS
;
114 hva
= kvm_vcpu_gfn_to_hva_prot(vcpu
, shmem
>> PAGE_SHIFT
, &writable
);
115 if (kvm_is_error_hva(hva
) || !writable
)
116 return SBI_ERR_INVALID_ADDRESS
;
118 ret
= kvm_vcpu_write_guest(vcpu
, shmem
, &zero_sta
, sizeof(zero_sta
));
120 return SBI_ERR_FAILURE
;
122 vcpu
->arch
.sta
.shmem
= shmem
;
123 vcpu
->arch
.sta
.last_steal
= current
->sched_info
.run_delay
;
128 static int kvm_sbi_ext_sta_handler(struct kvm_vcpu
*vcpu
, struct kvm_run
*run
,
129 struct kvm_vcpu_sbi_return
*retdata
)
131 struct kvm_cpu_context
*cp
= &vcpu
->arch
.guest_context
;
132 unsigned long funcid
= cp
->a6
;
136 case SBI_EXT_STA_STEAL_TIME_SET_SHMEM
:
137 ret
= kvm_sbi_sta_steal_time_set_shmem(vcpu
);
140 ret
= SBI_ERR_NOT_SUPPORTED
;
144 retdata
->err_val
= ret
;
149 static unsigned long kvm_sbi_ext_sta_probe(struct kvm_vcpu
*vcpu
)
151 return !!sched_info_on();
154 const struct kvm_vcpu_sbi_extension vcpu_sbi_ext_sta
= {
155 .extid_start
= SBI_EXT_STA
,
156 .extid_end
= SBI_EXT_STA
,
157 .handler
= kvm_sbi_ext_sta_handler
,
158 .probe
= kvm_sbi_ext_sta_probe
,
161 int kvm_riscv_vcpu_get_reg_sbi_sta(struct kvm_vcpu
*vcpu
,
162 unsigned long reg_num
,
163 unsigned long *reg_val
)
166 case KVM_REG_RISCV_SBI_STA_REG(shmem_lo
):
167 *reg_val
= (unsigned long)vcpu
->arch
.sta
.shmem
;
169 case KVM_REG_RISCV_SBI_STA_REG(shmem_hi
):
170 if (IS_ENABLED(CONFIG_32BIT
))
171 *reg_val
= upper_32_bits(vcpu
->arch
.sta
.shmem
);
182 int kvm_riscv_vcpu_set_reg_sbi_sta(struct kvm_vcpu
*vcpu
,
183 unsigned long reg_num
,
184 unsigned long reg_val
)
187 case KVM_REG_RISCV_SBI_STA_REG(shmem_lo
):
188 if (IS_ENABLED(CONFIG_32BIT
)) {
189 gpa_t hi
= upper_32_bits(vcpu
->arch
.sta
.shmem
);
191 vcpu
->arch
.sta
.shmem
= reg_val
;
192 vcpu
->arch
.sta
.shmem
|= hi
<< 32;
194 vcpu
->arch
.sta
.shmem
= reg_val
;
197 case KVM_REG_RISCV_SBI_STA_REG(shmem_hi
):
198 if (IS_ENABLED(CONFIG_32BIT
)) {
199 gpa_t lo
= lower_32_bits(vcpu
->arch
.sta
.shmem
);
201 vcpu
->arch
.sta
.shmem
= ((gpa_t
)reg_val
<< 32);
202 vcpu
->arch
.sta
.shmem
|= lo
;
203 } else if (reg_val
!= 0) {