1 // SPDX-License-Identifier: GPL-2.0
3 * Copyright (C) 2021 Western Digital Corporation or its affiliates.
4 * Copyright (C) 2022 Ventana Micro Systems Inc.
7 * Anup Patel <apatel@ventanamicro.com>
10 #include <linux/bits.h>
11 #include <linux/irqchip/riscv-imsic.h>
12 #include <linux/kvm_host.h>
13 #include <linux/uaccess.h>
15 static void unlock_vcpus(struct kvm
*kvm
, int vcpu_lock_idx
)
17 struct kvm_vcpu
*tmp_vcpu
;
19 for (; vcpu_lock_idx
>= 0; vcpu_lock_idx
--) {
20 tmp_vcpu
= kvm_get_vcpu(kvm
, vcpu_lock_idx
);
21 mutex_unlock(&tmp_vcpu
->mutex
);
25 static void unlock_all_vcpus(struct kvm
*kvm
)
27 unlock_vcpus(kvm
, atomic_read(&kvm
->online_vcpus
) - 1);
30 static bool lock_all_vcpus(struct kvm
*kvm
)
32 struct kvm_vcpu
*tmp_vcpu
;
35 kvm_for_each_vcpu(c
, tmp_vcpu
, kvm
) {
36 if (!mutex_trylock(&tmp_vcpu
->mutex
)) {
37 unlock_vcpus(kvm
, c
- 1);
45 static int aia_create(struct kvm_device
*dev
, u32 type
)
49 struct kvm
*kvm
= dev
->kvm
;
50 struct kvm_vcpu
*vcpu
;
52 if (irqchip_in_kernel(kvm
))
56 if (!lock_all_vcpus(kvm
))
59 kvm_for_each_vcpu(i
, vcpu
, kvm
) {
60 if (vcpu
->arch
.ran_atleast_once
)
65 kvm
->arch
.aia
.in_kernel
= true;
68 unlock_all_vcpus(kvm
);
72 static void aia_destroy(struct kvm_device
*dev
)
77 static int aia_config(struct kvm
*kvm
, unsigned long type
,
80 struct kvm_aia
*aia
= &kvm
->arch
.aia
;
82 /* Writes can only be done before irqchip is initialized */
83 if (write
&& kvm_riscv_aia_initialized(kvm
))
87 case KVM_DEV_RISCV_AIA_CONFIG_MODE
:
90 case KVM_DEV_RISCV_AIA_MODE_EMUL
:
92 case KVM_DEV_RISCV_AIA_MODE_HWACCEL
:
93 case KVM_DEV_RISCV_AIA_MODE_AUTO
:
95 * HW Acceleration and Auto modes only
96 * supported on host with non-zero guest
97 * external interrupts (i.e. non-zero
98 * VS-level IMSIC pages).
100 if (!kvm_riscv_aia_nr_hgei
)
110 case KVM_DEV_RISCV_AIA_CONFIG_IDS
:
112 if ((*nr
< KVM_DEV_RISCV_AIA_IDS_MIN
) ||
113 (*nr
>= KVM_DEV_RISCV_AIA_IDS_MAX
) ||
114 ((*nr
& KVM_DEV_RISCV_AIA_IDS_MIN
) !=
115 KVM_DEV_RISCV_AIA_IDS_MIN
) ||
116 (kvm_riscv_aia_max_ids
<= *nr
))
122 case KVM_DEV_RISCV_AIA_CONFIG_SRCS
:
124 if ((*nr
>= KVM_DEV_RISCV_AIA_SRCS_MAX
) ||
125 (*nr
>= kvm_riscv_aia_max_ids
))
127 aia
->nr_sources
= *nr
;
129 *nr
= aia
->nr_sources
;
131 case KVM_DEV_RISCV_AIA_CONFIG_GROUP_BITS
:
133 if (*nr
>= KVM_DEV_RISCV_AIA_GROUP_BITS_MAX
)
135 aia
->nr_group_bits
= *nr
;
137 *nr
= aia
->nr_group_bits
;
139 case KVM_DEV_RISCV_AIA_CONFIG_GROUP_SHIFT
:
141 if ((*nr
< KVM_DEV_RISCV_AIA_GROUP_SHIFT_MIN
) ||
142 (*nr
>= KVM_DEV_RISCV_AIA_GROUP_SHIFT_MAX
))
144 aia
->nr_group_shift
= *nr
;
146 *nr
= aia
->nr_group_shift
;
148 case KVM_DEV_RISCV_AIA_CONFIG_HART_BITS
:
150 if (*nr
>= KVM_DEV_RISCV_AIA_HART_BITS_MAX
)
152 aia
->nr_hart_bits
= *nr
;
154 *nr
= aia
->nr_hart_bits
;
156 case KVM_DEV_RISCV_AIA_CONFIG_GUEST_BITS
:
158 if (*nr
>= KVM_DEV_RISCV_AIA_GUEST_BITS_MAX
)
160 aia
->nr_guest_bits
= *nr
;
162 *nr
= aia
->nr_guest_bits
;
171 static int aia_aplic_addr(struct kvm
*kvm
, u64
*addr
, bool write
)
173 struct kvm_aia
*aia
= &kvm
->arch
.aia
;
176 /* Writes can only be done before irqchip is initialized */
177 if (kvm_riscv_aia_initialized(kvm
))
180 if (*addr
& (KVM_DEV_RISCV_APLIC_ALIGN
- 1))
183 aia
->aplic_addr
= *addr
;
185 *addr
= aia
->aplic_addr
;
190 static int aia_imsic_addr(struct kvm
*kvm
, u64
*addr
,
191 unsigned long vcpu_idx
, bool write
)
193 struct kvm_vcpu
*vcpu
;
194 struct kvm_vcpu_aia
*vcpu_aia
;
196 vcpu
= kvm_get_vcpu(kvm
, vcpu_idx
);
199 vcpu_aia
= &vcpu
->arch
.aia_context
;
202 /* Writes can only be done before irqchip is initialized */
203 if (kvm_riscv_aia_initialized(kvm
))
206 if (*addr
& (KVM_DEV_RISCV_IMSIC_ALIGN
- 1))
210 mutex_lock(&vcpu
->mutex
);
212 vcpu_aia
->imsic_addr
= *addr
;
214 *addr
= vcpu_aia
->imsic_addr
;
215 mutex_unlock(&vcpu
->mutex
);
220 static gpa_t
aia_imsic_ppn(struct kvm_aia
*aia
, gpa_t addr
)
225 h
= aia
->nr_hart_bits
+ aia
->nr_guest_bits
+
226 IMSIC_MMIO_PAGE_SHIFT
- 1;
227 mask
= GENMASK_ULL(h
, 0);
229 if (aia
->nr_group_bits
) {
230 h
= aia
->nr_group_bits
+ aia
->nr_group_shift
- 1;
231 l
= aia
->nr_group_shift
;
232 mask
|= GENMASK_ULL(h
, l
);
235 return (addr
& ~mask
) >> IMSIC_MMIO_PAGE_SHIFT
;
238 static u32
aia_imsic_hart_index(struct kvm_aia
*aia
, gpa_t addr
)
240 u32 hart
= 0, group
= 0;
242 if (aia
->nr_hart_bits
)
243 hart
= (addr
>> (aia
->nr_guest_bits
+ IMSIC_MMIO_PAGE_SHIFT
)) &
244 GENMASK_ULL(aia
->nr_hart_bits
- 1, 0);
245 if (aia
->nr_group_bits
)
246 group
= (addr
>> aia
->nr_group_shift
) &
247 GENMASK_ULL(aia
->nr_group_bits
- 1, 0);
249 return (group
<< aia
->nr_hart_bits
) | hart
;
252 static int aia_init(struct kvm
*kvm
)
256 struct kvm_vcpu
*vcpu
;
257 struct kvm_vcpu_aia
*vaia
;
258 struct kvm_aia
*aia
= &kvm
->arch
.aia
;
259 gpa_t base_ppn
= KVM_RISCV_AIA_UNDEF_ADDR
;
261 /* Irqchip can be initialized only once */
262 if (kvm_riscv_aia_initialized(kvm
))
265 /* We might be in the middle of creating a VCPU? */
266 if (kvm
->created_vcpus
!= atomic_read(&kvm
->online_vcpus
))
269 /* Number of sources should be less than or equals number of IDs */
270 if (aia
->nr_ids
< aia
->nr_sources
)
273 /* APLIC base is required for non-zero number of sources */
274 if (aia
->nr_sources
&& aia
->aplic_addr
== KVM_RISCV_AIA_UNDEF_ADDR
)
277 /* Initialize APLIC */
278 ret
= kvm_riscv_aia_aplic_init(kvm
);
282 /* Iterate over each VCPU */
283 kvm_for_each_vcpu(idx
, vcpu
, kvm
) {
284 vaia
= &vcpu
->arch
.aia_context
;
286 /* IMSIC base is required */
287 if (vaia
->imsic_addr
== KVM_RISCV_AIA_UNDEF_ADDR
) {
289 goto fail_cleanup_imsics
;
292 /* All IMSICs should have matching base PPN */
293 if (base_ppn
== KVM_RISCV_AIA_UNDEF_ADDR
)
294 base_ppn
= aia_imsic_ppn(aia
, vaia
->imsic_addr
);
295 if (base_ppn
!= aia_imsic_ppn(aia
, vaia
->imsic_addr
)) {
297 goto fail_cleanup_imsics
;
300 /* Update HART index of the IMSIC based on IMSIC base */
301 vaia
->hart_index
= aia_imsic_hart_index(aia
,
304 /* Initialize IMSIC for this VCPU */
305 ret
= kvm_riscv_vcpu_aia_imsic_init(vcpu
);
307 goto fail_cleanup_imsics
;
310 /* Set the initialized flag */
311 kvm
->arch
.aia
.initialized
= true;
316 for (i
= idx
- 1; i
>= 0; i
--) {
317 vcpu
= kvm_get_vcpu(kvm
, i
);
320 kvm_riscv_vcpu_aia_imsic_cleanup(vcpu
);
322 kvm_riscv_aia_aplic_cleanup(kvm
);
326 static int aia_set_attr(struct kvm_device
*dev
, struct kvm_device_attr
*attr
)
330 int nr_vcpus
, r
= -ENXIO
;
331 unsigned long v
, type
= (unsigned long)attr
->attr
;
332 void __user
*uaddr
= (void __user
*)(long)attr
->addr
;
334 switch (attr
->group
) {
335 case KVM_DEV_RISCV_AIA_GRP_CONFIG
:
336 if (copy_from_user(&nr
, uaddr
, sizeof(nr
)))
339 mutex_lock(&dev
->kvm
->lock
);
340 r
= aia_config(dev
->kvm
, type
, &nr
, true);
341 mutex_unlock(&dev
->kvm
->lock
);
345 case KVM_DEV_RISCV_AIA_GRP_ADDR
:
346 if (copy_from_user(&addr
, uaddr
, sizeof(addr
)))
349 nr_vcpus
= atomic_read(&dev
->kvm
->online_vcpus
);
350 mutex_lock(&dev
->kvm
->lock
);
351 if (type
== KVM_DEV_RISCV_AIA_ADDR_APLIC
)
352 r
= aia_aplic_addr(dev
->kvm
, &addr
, true);
353 else if (type
< KVM_DEV_RISCV_AIA_ADDR_IMSIC(nr_vcpus
))
354 r
= aia_imsic_addr(dev
->kvm
, &addr
,
355 type
- KVM_DEV_RISCV_AIA_ADDR_IMSIC(0), true);
356 mutex_unlock(&dev
->kvm
->lock
);
360 case KVM_DEV_RISCV_AIA_GRP_CTRL
:
362 case KVM_DEV_RISCV_AIA_CTRL_INIT
:
363 mutex_lock(&dev
->kvm
->lock
);
364 r
= aia_init(dev
->kvm
);
365 mutex_unlock(&dev
->kvm
->lock
);
370 case KVM_DEV_RISCV_AIA_GRP_APLIC
:
371 if (copy_from_user(&nr
, uaddr
, sizeof(nr
)))
374 mutex_lock(&dev
->kvm
->lock
);
375 r
= kvm_riscv_aia_aplic_set_attr(dev
->kvm
, type
, nr
);
376 mutex_unlock(&dev
->kvm
->lock
);
379 case KVM_DEV_RISCV_AIA_GRP_IMSIC
:
380 if (copy_from_user(&v
, uaddr
, sizeof(v
)))
383 mutex_lock(&dev
->kvm
->lock
);
384 r
= kvm_riscv_aia_imsic_rw_attr(dev
->kvm
, type
, true, &v
);
385 mutex_unlock(&dev
->kvm
->lock
);
393 static int aia_get_attr(struct kvm_device
*dev
, struct kvm_device_attr
*attr
)
397 int nr_vcpus
, r
= -ENXIO
;
398 void __user
*uaddr
= (void __user
*)(long)attr
->addr
;
399 unsigned long v
, type
= (unsigned long)attr
->attr
;
401 switch (attr
->group
) {
402 case KVM_DEV_RISCV_AIA_GRP_CONFIG
:
403 if (copy_from_user(&nr
, uaddr
, sizeof(nr
)))
406 mutex_lock(&dev
->kvm
->lock
);
407 r
= aia_config(dev
->kvm
, type
, &nr
, false);
408 mutex_unlock(&dev
->kvm
->lock
);
412 if (copy_to_user(uaddr
, &nr
, sizeof(nr
)))
416 case KVM_DEV_RISCV_AIA_GRP_ADDR
:
417 if (copy_from_user(&addr
, uaddr
, sizeof(addr
)))
420 nr_vcpus
= atomic_read(&dev
->kvm
->online_vcpus
);
421 mutex_lock(&dev
->kvm
->lock
);
422 if (type
== KVM_DEV_RISCV_AIA_ADDR_APLIC
)
423 r
= aia_aplic_addr(dev
->kvm
, &addr
, false);
424 else if (type
< KVM_DEV_RISCV_AIA_ADDR_IMSIC(nr_vcpus
))
425 r
= aia_imsic_addr(dev
->kvm
, &addr
,
426 type
- KVM_DEV_RISCV_AIA_ADDR_IMSIC(0), false);
427 mutex_unlock(&dev
->kvm
->lock
);
431 if (copy_to_user(uaddr
, &addr
, sizeof(addr
)))
435 case KVM_DEV_RISCV_AIA_GRP_APLIC
:
436 if (copy_from_user(&nr
, uaddr
, sizeof(nr
)))
439 mutex_lock(&dev
->kvm
->lock
);
440 r
= kvm_riscv_aia_aplic_get_attr(dev
->kvm
, type
, &nr
);
441 mutex_unlock(&dev
->kvm
->lock
);
445 if (copy_to_user(uaddr
, &nr
, sizeof(nr
)))
449 case KVM_DEV_RISCV_AIA_GRP_IMSIC
:
450 if (copy_from_user(&v
, uaddr
, sizeof(v
)))
453 mutex_lock(&dev
->kvm
->lock
);
454 r
= kvm_riscv_aia_imsic_rw_attr(dev
->kvm
, type
, false, &v
);
455 mutex_unlock(&dev
->kvm
->lock
);
459 if (copy_to_user(uaddr
, &v
, sizeof(v
)))
468 static int aia_has_attr(struct kvm_device
*dev
, struct kvm_device_attr
*attr
)
472 switch (attr
->group
) {
473 case KVM_DEV_RISCV_AIA_GRP_CONFIG
:
474 switch (attr
->attr
) {
475 case KVM_DEV_RISCV_AIA_CONFIG_MODE
:
476 case KVM_DEV_RISCV_AIA_CONFIG_IDS
:
477 case KVM_DEV_RISCV_AIA_CONFIG_SRCS
:
478 case KVM_DEV_RISCV_AIA_CONFIG_GROUP_BITS
:
479 case KVM_DEV_RISCV_AIA_CONFIG_GROUP_SHIFT
:
480 case KVM_DEV_RISCV_AIA_CONFIG_HART_BITS
:
481 case KVM_DEV_RISCV_AIA_CONFIG_GUEST_BITS
:
485 case KVM_DEV_RISCV_AIA_GRP_ADDR
:
486 nr_vcpus
= atomic_read(&dev
->kvm
->online_vcpus
);
487 if (attr
->attr
== KVM_DEV_RISCV_AIA_ADDR_APLIC
)
489 else if (attr
->attr
< KVM_DEV_RISCV_AIA_ADDR_IMSIC(nr_vcpus
))
492 case KVM_DEV_RISCV_AIA_GRP_CTRL
:
493 switch (attr
->attr
) {
494 case KVM_DEV_RISCV_AIA_CTRL_INIT
:
498 case KVM_DEV_RISCV_AIA_GRP_APLIC
:
499 return kvm_riscv_aia_aplic_has_attr(dev
->kvm
, attr
->attr
);
500 case KVM_DEV_RISCV_AIA_GRP_IMSIC
:
501 return kvm_riscv_aia_imsic_has_attr(dev
->kvm
, attr
->attr
);
507 struct kvm_device_ops kvm_riscv_aia_device_ops
= {
508 .name
= "kvm-riscv-aia",
509 .create
= aia_create
,
510 .destroy
= aia_destroy
,
511 .set_attr
= aia_set_attr
,
512 .get_attr
= aia_get_attr
,
513 .has_attr
= aia_has_attr
,
516 int kvm_riscv_vcpu_aia_update(struct kvm_vcpu
*vcpu
)
518 /* Proceed only if AIA was initialized successfully */
519 if (!kvm_riscv_aia_initialized(vcpu
->kvm
))
522 /* Update the IMSIC HW state before entering guest mode */
523 return kvm_riscv_vcpu_aia_imsic_update(vcpu
);
526 void kvm_riscv_vcpu_aia_reset(struct kvm_vcpu
*vcpu
)
528 struct kvm_vcpu_aia_csr
*csr
= &vcpu
->arch
.aia_context
.guest_csr
;
529 struct kvm_vcpu_aia_csr
*reset_csr
=
530 &vcpu
->arch
.aia_context
.guest_reset_csr
;
532 if (!kvm_riscv_aia_available())
534 memcpy(csr
, reset_csr
, sizeof(*csr
));
536 /* Proceed only if AIA was initialized successfully */
537 if (!kvm_riscv_aia_initialized(vcpu
->kvm
))
540 /* Reset the IMSIC context */
541 kvm_riscv_vcpu_aia_imsic_reset(vcpu
);
544 int kvm_riscv_vcpu_aia_init(struct kvm_vcpu
*vcpu
)
546 struct kvm_vcpu_aia
*vaia
= &vcpu
->arch
.aia_context
;
548 if (!kvm_riscv_aia_available())
552 * We don't do any memory allocations over here because these
553 * will be done after AIA device is initialized by the user-space.
555 * Refer, aia_init() implementation for more details.
558 /* Initialize default values in AIA vcpu context */
559 vaia
->imsic_addr
= KVM_RISCV_AIA_UNDEF_ADDR
;
560 vaia
->hart_index
= vcpu
->vcpu_idx
;
565 void kvm_riscv_vcpu_aia_deinit(struct kvm_vcpu
*vcpu
)
567 /* Proceed only if AIA was initialized successfully */
568 if (!kvm_riscv_aia_initialized(vcpu
->kvm
))
571 /* Cleanup IMSIC context */
572 kvm_riscv_vcpu_aia_imsic_cleanup(vcpu
);
575 int kvm_riscv_aia_inject_msi_by_id(struct kvm
*kvm
, u32 hart_index
,
576 u32 guest_index
, u32 iid
)
579 struct kvm_vcpu
*vcpu
;
581 /* Proceed only if AIA was initialized successfully */
582 if (!kvm_riscv_aia_initialized(kvm
))
585 /* Inject MSI to matching VCPU */
586 kvm_for_each_vcpu(idx
, vcpu
, kvm
) {
587 if (vcpu
->arch
.aia_context
.hart_index
== hart_index
)
588 return kvm_riscv_vcpu_aia_imsic_inject(vcpu
,
596 int kvm_riscv_aia_inject_msi(struct kvm
*kvm
, struct kvm_msi
*msi
)
600 struct kvm_vcpu
*vcpu
;
601 u32 g
, toff
, iid
= msi
->data
;
602 struct kvm_aia
*aia
= &kvm
->arch
.aia
;
603 gpa_t target
= (((gpa_t
)msi
->address_hi
) << 32) | msi
->address_lo
;
605 /* Proceed only if AIA was initialized successfully */
606 if (!kvm_riscv_aia_initialized(kvm
))
609 /* Convert target address to target PPN */
610 tppn
= target
>> IMSIC_MMIO_PAGE_SHIFT
;
612 /* Extract and clear Guest ID from target PPN */
613 g
= tppn
& (BIT(aia
->nr_guest_bits
) - 1);
614 tppn
&= ~((gpa_t
)(BIT(aia
->nr_guest_bits
) - 1));
616 /* Inject MSI to matching VCPU */
617 kvm_for_each_vcpu(idx
, vcpu
, kvm
) {
618 ippn
= vcpu
->arch
.aia_context
.imsic_addr
>>
619 IMSIC_MMIO_PAGE_SHIFT
;
621 toff
= target
& (IMSIC_MMIO_PAGE_SZ
- 1);
622 return kvm_riscv_vcpu_aia_imsic_inject(vcpu
, g
,
630 int kvm_riscv_aia_inject_irq(struct kvm
*kvm
, unsigned int irq
, bool level
)
632 /* Proceed only if AIA was initialized successfully */
633 if (!kvm_riscv_aia_initialized(kvm
))
636 /* Inject interrupt level change in APLIC */
637 return kvm_riscv_aia_aplic_inject(kvm
, irq
, level
);
640 void kvm_riscv_aia_init_vm(struct kvm
*kvm
)
642 struct kvm_aia
*aia
= &kvm
->arch
.aia
;
644 if (!kvm_riscv_aia_available())
648 * We don't do any memory allocations over here because these
649 * will be done after AIA device is initialized by the user-space.
651 * Refer, aia_init() implementation for more details.
654 /* Initialize default values in AIA global context */
655 aia
->mode
= (kvm_riscv_aia_nr_hgei
) ?
656 KVM_DEV_RISCV_AIA_MODE_AUTO
: KVM_DEV_RISCV_AIA_MODE_EMUL
;
657 aia
->nr_ids
= kvm_riscv_aia_max_ids
- 1;
659 aia
->nr_group_bits
= 0;
660 aia
->nr_group_shift
= KVM_DEV_RISCV_AIA_GROUP_SHIFT_MIN
;
661 aia
->nr_hart_bits
= 0;
662 aia
->nr_guest_bits
= 0;
663 aia
->aplic_addr
= KVM_RISCV_AIA_UNDEF_ADDR
;
666 void kvm_riscv_aia_destroy_vm(struct kvm
*kvm
)
668 /* Proceed only if AIA was initialized successfully */
669 if (!kvm_riscv_aia_initialized(kvm
))
672 /* Cleanup APLIC context */
673 kvm_riscv_aia_aplic_cleanup(kvm
);