accel/qaic: Add AIC200 support
[drm/drm-misc.git] / arch / riscv / kvm / vcpu_sbi.c
blob6e704ed86a83a9c4f610d6836073c80e9b7620cc
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * Copyright (c) 2019 Western Digital Corporation or its affiliates.
5 * Authors:
6 * Atish Patra <atish.patra@wdc.com>
7 */
9 #include <linux/errno.h>
10 #include <linux/err.h>
11 #include <linux/kvm_host.h>
12 #include <asm/sbi.h>
13 #include <asm/kvm_vcpu_sbi.h>
15 #ifndef CONFIG_RISCV_SBI_V01
16 static const struct kvm_vcpu_sbi_extension vcpu_sbi_ext_v01 = {
17 .extid_start = -1UL,
18 .extid_end = -1UL,
19 .handler = NULL,
21 #endif
23 #ifndef CONFIG_RISCV_PMU_SBI
24 static const struct kvm_vcpu_sbi_extension vcpu_sbi_ext_pmu = {
25 .extid_start = -1UL,
26 .extid_end = -1UL,
27 .handler = NULL,
29 #endif
31 struct kvm_riscv_sbi_extension_entry {
32 enum KVM_RISCV_SBI_EXT_ID ext_idx;
33 const struct kvm_vcpu_sbi_extension *ext_ptr;
36 static const struct kvm_riscv_sbi_extension_entry sbi_ext[] = {
38 .ext_idx = KVM_RISCV_SBI_EXT_V01,
39 .ext_ptr = &vcpu_sbi_ext_v01,
42 .ext_idx = KVM_RISCV_SBI_EXT_MAX, /* Can't be disabled */
43 .ext_ptr = &vcpu_sbi_ext_base,
46 .ext_idx = KVM_RISCV_SBI_EXT_TIME,
47 .ext_ptr = &vcpu_sbi_ext_time,
50 .ext_idx = KVM_RISCV_SBI_EXT_IPI,
51 .ext_ptr = &vcpu_sbi_ext_ipi,
54 .ext_idx = KVM_RISCV_SBI_EXT_RFENCE,
55 .ext_ptr = &vcpu_sbi_ext_rfence,
58 .ext_idx = KVM_RISCV_SBI_EXT_SRST,
59 .ext_ptr = &vcpu_sbi_ext_srst,
62 .ext_idx = KVM_RISCV_SBI_EXT_HSM,
63 .ext_ptr = &vcpu_sbi_ext_hsm,
66 .ext_idx = KVM_RISCV_SBI_EXT_PMU,
67 .ext_ptr = &vcpu_sbi_ext_pmu,
70 .ext_idx = KVM_RISCV_SBI_EXT_DBCN,
71 .ext_ptr = &vcpu_sbi_ext_dbcn,
74 .ext_idx = KVM_RISCV_SBI_EXT_STA,
75 .ext_ptr = &vcpu_sbi_ext_sta,
78 .ext_idx = KVM_RISCV_SBI_EXT_EXPERIMENTAL,
79 .ext_ptr = &vcpu_sbi_ext_experimental,
82 .ext_idx = KVM_RISCV_SBI_EXT_VENDOR,
83 .ext_ptr = &vcpu_sbi_ext_vendor,
87 static const struct kvm_riscv_sbi_extension_entry *
88 riscv_vcpu_get_sbi_ext(struct kvm_vcpu *vcpu, unsigned long idx)
90 const struct kvm_riscv_sbi_extension_entry *sext = NULL;
92 if (idx >= KVM_RISCV_SBI_EXT_MAX)
93 return NULL;
95 for (int i = 0; i < ARRAY_SIZE(sbi_ext); i++) {
96 if (sbi_ext[i].ext_idx == idx) {
97 sext = &sbi_ext[i];
98 break;
102 return sext;
105 bool riscv_vcpu_supports_sbi_ext(struct kvm_vcpu *vcpu, int idx)
107 struct kvm_vcpu_sbi_context *scontext = &vcpu->arch.sbi_context;
108 const struct kvm_riscv_sbi_extension_entry *sext;
110 sext = riscv_vcpu_get_sbi_ext(vcpu, idx);
112 return sext && scontext->ext_status[sext->ext_idx] != KVM_RISCV_SBI_EXT_STATUS_UNAVAILABLE;
115 void kvm_riscv_vcpu_sbi_forward(struct kvm_vcpu *vcpu, struct kvm_run *run)
117 struct kvm_cpu_context *cp = &vcpu->arch.guest_context;
119 vcpu->arch.sbi_context.return_handled = 0;
120 vcpu->stat.ecall_exit_stat++;
121 run->exit_reason = KVM_EXIT_RISCV_SBI;
122 run->riscv_sbi.extension_id = cp->a7;
123 run->riscv_sbi.function_id = cp->a6;
124 run->riscv_sbi.args[0] = cp->a0;
125 run->riscv_sbi.args[1] = cp->a1;
126 run->riscv_sbi.args[2] = cp->a2;
127 run->riscv_sbi.args[3] = cp->a3;
128 run->riscv_sbi.args[4] = cp->a4;
129 run->riscv_sbi.args[5] = cp->a5;
130 run->riscv_sbi.ret[0] = SBI_ERR_NOT_SUPPORTED;
131 run->riscv_sbi.ret[1] = 0;
134 void kvm_riscv_vcpu_sbi_system_reset(struct kvm_vcpu *vcpu,
135 struct kvm_run *run,
136 u32 type, u64 reason)
138 unsigned long i;
139 struct kvm_vcpu *tmp;
141 kvm_for_each_vcpu(i, tmp, vcpu->kvm) {
142 spin_lock(&vcpu->arch.mp_state_lock);
143 WRITE_ONCE(tmp->arch.mp_state.mp_state, KVM_MP_STATE_STOPPED);
144 spin_unlock(&vcpu->arch.mp_state_lock);
146 kvm_make_all_cpus_request(vcpu->kvm, KVM_REQ_SLEEP);
148 memset(&run->system_event, 0, sizeof(run->system_event));
149 run->system_event.type = type;
150 run->system_event.ndata = 1;
151 run->system_event.data[0] = reason;
152 run->exit_reason = KVM_EXIT_SYSTEM_EVENT;
155 int kvm_riscv_vcpu_sbi_return(struct kvm_vcpu *vcpu, struct kvm_run *run)
157 struct kvm_cpu_context *cp = &vcpu->arch.guest_context;
159 /* Handle SBI return only once */
160 if (vcpu->arch.sbi_context.return_handled)
161 return 0;
162 vcpu->arch.sbi_context.return_handled = 1;
164 /* Update return values */
165 cp->a0 = run->riscv_sbi.ret[0];
166 cp->a1 = run->riscv_sbi.ret[1];
168 /* Move to next instruction */
169 vcpu->arch.guest_context.sepc += 4;
171 return 0;
174 static int riscv_vcpu_set_sbi_ext_single(struct kvm_vcpu *vcpu,
175 unsigned long reg_num,
176 unsigned long reg_val)
178 struct kvm_vcpu_sbi_context *scontext = &vcpu->arch.sbi_context;
179 const struct kvm_riscv_sbi_extension_entry *sext;
181 if (reg_val != 1 && reg_val != 0)
182 return -EINVAL;
184 sext = riscv_vcpu_get_sbi_ext(vcpu, reg_num);
185 if (!sext || scontext->ext_status[sext->ext_idx] == KVM_RISCV_SBI_EXT_STATUS_UNAVAILABLE)
186 return -ENOENT;
188 scontext->ext_status[sext->ext_idx] = (reg_val) ?
189 KVM_RISCV_SBI_EXT_STATUS_ENABLED :
190 KVM_RISCV_SBI_EXT_STATUS_DISABLED;
192 return 0;
195 static int riscv_vcpu_get_sbi_ext_single(struct kvm_vcpu *vcpu,
196 unsigned long reg_num,
197 unsigned long *reg_val)
199 struct kvm_vcpu_sbi_context *scontext = &vcpu->arch.sbi_context;
200 const struct kvm_riscv_sbi_extension_entry *sext;
202 sext = riscv_vcpu_get_sbi_ext(vcpu, reg_num);
203 if (!sext || scontext->ext_status[sext->ext_idx] == KVM_RISCV_SBI_EXT_STATUS_UNAVAILABLE)
204 return -ENOENT;
206 *reg_val = scontext->ext_status[sext->ext_idx] ==
207 KVM_RISCV_SBI_EXT_STATUS_ENABLED;
209 return 0;
212 static int riscv_vcpu_set_sbi_ext_multi(struct kvm_vcpu *vcpu,
213 unsigned long reg_num,
214 unsigned long reg_val, bool enable)
216 unsigned long i, ext_id;
218 if (reg_num > KVM_REG_RISCV_SBI_MULTI_REG_LAST)
219 return -ENOENT;
221 for_each_set_bit(i, &reg_val, BITS_PER_LONG) {
222 ext_id = i + reg_num * BITS_PER_LONG;
223 if (ext_id >= KVM_RISCV_SBI_EXT_MAX)
224 break;
226 riscv_vcpu_set_sbi_ext_single(vcpu, ext_id, enable);
229 return 0;
232 static int riscv_vcpu_get_sbi_ext_multi(struct kvm_vcpu *vcpu,
233 unsigned long reg_num,
234 unsigned long *reg_val)
236 unsigned long i, ext_id, ext_val;
238 if (reg_num > KVM_REG_RISCV_SBI_MULTI_REG_LAST)
239 return -ENOENT;
241 for (i = 0; i < BITS_PER_LONG; i++) {
242 ext_id = i + reg_num * BITS_PER_LONG;
243 if (ext_id >= KVM_RISCV_SBI_EXT_MAX)
244 break;
246 ext_val = 0;
247 riscv_vcpu_get_sbi_ext_single(vcpu, ext_id, &ext_val);
248 if (ext_val)
249 *reg_val |= KVM_REG_RISCV_SBI_MULTI_MASK(ext_id);
252 return 0;
255 int kvm_riscv_vcpu_set_reg_sbi_ext(struct kvm_vcpu *vcpu,
256 const struct kvm_one_reg *reg)
258 unsigned long __user *uaddr =
259 (unsigned long __user *)(unsigned long)reg->addr;
260 unsigned long reg_num = reg->id & ~(KVM_REG_ARCH_MASK |
261 KVM_REG_SIZE_MASK |
262 KVM_REG_RISCV_SBI_EXT);
263 unsigned long reg_val, reg_subtype;
265 if (KVM_REG_SIZE(reg->id) != sizeof(unsigned long))
266 return -EINVAL;
268 if (vcpu->arch.ran_atleast_once)
269 return -EBUSY;
271 reg_subtype = reg_num & KVM_REG_RISCV_SUBTYPE_MASK;
272 reg_num &= ~KVM_REG_RISCV_SUBTYPE_MASK;
274 if (copy_from_user(&reg_val, uaddr, KVM_REG_SIZE(reg->id)))
275 return -EFAULT;
277 switch (reg_subtype) {
278 case KVM_REG_RISCV_SBI_SINGLE:
279 return riscv_vcpu_set_sbi_ext_single(vcpu, reg_num, reg_val);
280 case KVM_REG_RISCV_SBI_MULTI_EN:
281 return riscv_vcpu_set_sbi_ext_multi(vcpu, reg_num, reg_val, true);
282 case KVM_REG_RISCV_SBI_MULTI_DIS:
283 return riscv_vcpu_set_sbi_ext_multi(vcpu, reg_num, reg_val, false);
284 default:
285 return -ENOENT;
288 return 0;
291 int kvm_riscv_vcpu_get_reg_sbi_ext(struct kvm_vcpu *vcpu,
292 const struct kvm_one_reg *reg)
294 int rc;
295 unsigned long __user *uaddr =
296 (unsigned long __user *)(unsigned long)reg->addr;
297 unsigned long reg_num = reg->id & ~(KVM_REG_ARCH_MASK |
298 KVM_REG_SIZE_MASK |
299 KVM_REG_RISCV_SBI_EXT);
300 unsigned long reg_val, reg_subtype;
302 if (KVM_REG_SIZE(reg->id) != sizeof(unsigned long))
303 return -EINVAL;
305 reg_subtype = reg_num & KVM_REG_RISCV_SUBTYPE_MASK;
306 reg_num &= ~KVM_REG_RISCV_SUBTYPE_MASK;
308 reg_val = 0;
309 switch (reg_subtype) {
310 case KVM_REG_RISCV_SBI_SINGLE:
311 rc = riscv_vcpu_get_sbi_ext_single(vcpu, reg_num, &reg_val);
312 break;
313 case KVM_REG_RISCV_SBI_MULTI_EN:
314 case KVM_REG_RISCV_SBI_MULTI_DIS:
315 rc = riscv_vcpu_get_sbi_ext_multi(vcpu, reg_num, &reg_val);
316 if (!rc && reg_subtype == KVM_REG_RISCV_SBI_MULTI_DIS)
317 reg_val = ~reg_val;
318 break;
319 default:
320 rc = -ENOENT;
322 if (rc)
323 return rc;
325 if (copy_to_user(uaddr, &reg_val, KVM_REG_SIZE(reg->id)))
326 return -EFAULT;
328 return 0;
331 int kvm_riscv_vcpu_set_reg_sbi(struct kvm_vcpu *vcpu,
332 const struct kvm_one_reg *reg)
334 unsigned long __user *uaddr =
335 (unsigned long __user *)(unsigned long)reg->addr;
336 unsigned long reg_num = reg->id & ~(KVM_REG_ARCH_MASK |
337 KVM_REG_SIZE_MASK |
338 KVM_REG_RISCV_SBI_STATE);
339 unsigned long reg_subtype, reg_val;
341 if (KVM_REG_SIZE(reg->id) != sizeof(unsigned long))
342 return -EINVAL;
344 if (copy_from_user(&reg_val, uaddr, KVM_REG_SIZE(reg->id)))
345 return -EFAULT;
347 reg_subtype = reg_num & KVM_REG_RISCV_SUBTYPE_MASK;
348 reg_num &= ~KVM_REG_RISCV_SUBTYPE_MASK;
350 switch (reg_subtype) {
351 case KVM_REG_RISCV_SBI_STA:
352 return kvm_riscv_vcpu_set_reg_sbi_sta(vcpu, reg_num, reg_val);
353 default:
354 return -EINVAL;
357 return 0;
360 int kvm_riscv_vcpu_get_reg_sbi(struct kvm_vcpu *vcpu,
361 const struct kvm_one_reg *reg)
363 unsigned long __user *uaddr =
364 (unsigned long __user *)(unsigned long)reg->addr;
365 unsigned long reg_num = reg->id & ~(KVM_REG_ARCH_MASK |
366 KVM_REG_SIZE_MASK |
367 KVM_REG_RISCV_SBI_STATE);
368 unsigned long reg_subtype, reg_val;
369 int ret;
371 if (KVM_REG_SIZE(reg->id) != sizeof(unsigned long))
372 return -EINVAL;
374 reg_subtype = reg_num & KVM_REG_RISCV_SUBTYPE_MASK;
375 reg_num &= ~KVM_REG_RISCV_SUBTYPE_MASK;
377 switch (reg_subtype) {
378 case KVM_REG_RISCV_SBI_STA:
379 ret = kvm_riscv_vcpu_get_reg_sbi_sta(vcpu, reg_num, &reg_val);
380 break;
381 default:
382 return -EINVAL;
385 if (ret)
386 return ret;
388 if (copy_to_user(uaddr, &reg_val, KVM_REG_SIZE(reg->id)))
389 return -EFAULT;
391 return 0;
394 const struct kvm_vcpu_sbi_extension *kvm_vcpu_sbi_find_ext(
395 struct kvm_vcpu *vcpu, unsigned long extid)
397 struct kvm_vcpu_sbi_context *scontext = &vcpu->arch.sbi_context;
398 const struct kvm_riscv_sbi_extension_entry *entry;
399 const struct kvm_vcpu_sbi_extension *ext;
400 int i;
402 for (i = 0; i < ARRAY_SIZE(sbi_ext); i++) {
403 entry = &sbi_ext[i];
404 ext = entry->ext_ptr;
406 if (ext->extid_start <= extid && ext->extid_end >= extid) {
407 if (entry->ext_idx >= KVM_RISCV_SBI_EXT_MAX ||
408 scontext->ext_status[entry->ext_idx] ==
409 KVM_RISCV_SBI_EXT_STATUS_ENABLED)
410 return ext;
412 return NULL;
416 return NULL;
419 int kvm_riscv_vcpu_sbi_ecall(struct kvm_vcpu *vcpu, struct kvm_run *run)
421 int ret = 1;
422 bool next_sepc = true;
423 struct kvm_cpu_context *cp = &vcpu->arch.guest_context;
424 const struct kvm_vcpu_sbi_extension *sbi_ext;
425 struct kvm_cpu_trap utrap = {0};
426 struct kvm_vcpu_sbi_return sbi_ret = {
427 .out_val = 0,
428 .err_val = 0,
429 .utrap = &utrap,
431 bool ext_is_v01 = false;
433 sbi_ext = kvm_vcpu_sbi_find_ext(vcpu, cp->a7);
434 if (sbi_ext && sbi_ext->handler) {
435 #ifdef CONFIG_RISCV_SBI_V01
436 if (cp->a7 >= SBI_EXT_0_1_SET_TIMER &&
437 cp->a7 <= SBI_EXT_0_1_SHUTDOWN)
438 ext_is_v01 = true;
439 #endif
440 ret = sbi_ext->handler(vcpu, run, &sbi_ret);
441 } else {
442 /* Return error for unsupported SBI calls */
443 cp->a0 = SBI_ERR_NOT_SUPPORTED;
444 goto ecall_done;
448 * When the SBI extension returns a Linux error code, it exits the ioctl
449 * loop and forwards the error to userspace.
451 if (ret < 0) {
452 next_sepc = false;
453 goto ecall_done;
456 /* Handle special error cases i.e trap, exit or userspace forward */
457 if (sbi_ret.utrap->scause) {
458 /* No need to increment sepc or exit ioctl loop */
459 ret = 1;
460 sbi_ret.utrap->sepc = cp->sepc;
461 kvm_riscv_vcpu_trap_redirect(vcpu, sbi_ret.utrap);
462 next_sepc = false;
463 goto ecall_done;
466 /* Exit ioctl loop or Propagate the error code the guest */
467 if (sbi_ret.uexit) {
468 next_sepc = false;
469 ret = 0;
470 } else {
471 cp->a0 = sbi_ret.err_val;
472 ret = 1;
474 ecall_done:
475 if (next_sepc)
476 cp->sepc += 4;
477 /* a1 should only be updated when we continue the ioctl loop */
478 if (!ext_is_v01 && ret == 1)
479 cp->a1 = sbi_ret.out_val;
481 return ret;
484 void kvm_riscv_vcpu_sbi_init(struct kvm_vcpu *vcpu)
486 struct kvm_vcpu_sbi_context *scontext = &vcpu->arch.sbi_context;
487 const struct kvm_riscv_sbi_extension_entry *entry;
488 const struct kvm_vcpu_sbi_extension *ext;
489 int idx, i;
491 for (i = 0; i < ARRAY_SIZE(sbi_ext); i++) {
492 entry = &sbi_ext[i];
493 ext = entry->ext_ptr;
494 idx = entry->ext_idx;
496 if (idx < 0 || idx >= ARRAY_SIZE(scontext->ext_status))
497 continue;
499 if (ext->probe && !ext->probe(vcpu)) {
500 scontext->ext_status[idx] = KVM_RISCV_SBI_EXT_STATUS_UNAVAILABLE;
501 continue;
504 scontext->ext_status[idx] = ext->default_disabled ?
505 KVM_RISCV_SBI_EXT_STATUS_DISABLED :
506 KVM_RISCV_SBI_EXT_STATUS_ENABLED;