1 // SPDX-License-Identifier: GPL-2.0
3 * Copyright (C) 2021 Western Digital Corporation or its affiliates.
4 * Copyright (C) 2022 Ventana Micro Systems Inc.
7 * Anup Patel <apatel@ventanamicro.com>
10 #include <linux/irqchip/riscv-aplic.h>
11 #include <linux/kvm_host.h>
12 #include <linux/math.h>
13 #include <linux/spinlock.h>
14 #include <linux/swab.h>
15 #include <kvm/iodev.h>
21 #define APLIC_IRQ_STATE_PENDING BIT(0)
22 #define APLIC_IRQ_STATE_ENABLED BIT(1)
23 #define APLIC_IRQ_STATE_ENPEND (APLIC_IRQ_STATE_PENDING | \
24 APLIC_IRQ_STATE_ENABLED)
25 #define APLIC_IRQ_STATE_INPUT BIT(8)
30 struct kvm_io_device iodev
;
37 struct aplic_irq
*irqs
;
40 static u32
aplic_read_sourcecfg(struct aplic
*aplic
, u32 irq
)
44 struct aplic_irq
*irqd
;
46 if (!irq
|| aplic
->nr_irqs
<= irq
)
48 irqd
= &aplic
->irqs
[irq
];
50 raw_spin_lock_irqsave(&irqd
->lock
, flags
);
51 ret
= irqd
->sourcecfg
;
52 raw_spin_unlock_irqrestore(&irqd
->lock
, flags
);
57 static void aplic_write_sourcecfg(struct aplic
*aplic
, u32 irq
, u32 val
)
60 struct aplic_irq
*irqd
;
62 if (!irq
|| aplic
->nr_irqs
<= irq
)
64 irqd
= &aplic
->irqs
[irq
];
66 if (val
& APLIC_SOURCECFG_D
)
69 val
&= APLIC_SOURCECFG_SM_MASK
;
71 raw_spin_lock_irqsave(&irqd
->lock
, flags
);
72 irqd
->sourcecfg
= val
;
73 raw_spin_unlock_irqrestore(&irqd
->lock
, flags
);
76 static u32
aplic_read_target(struct aplic
*aplic
, u32 irq
)
80 struct aplic_irq
*irqd
;
82 if (!irq
|| aplic
->nr_irqs
<= irq
)
84 irqd
= &aplic
->irqs
[irq
];
86 raw_spin_lock_irqsave(&irqd
->lock
, flags
);
88 raw_spin_unlock_irqrestore(&irqd
->lock
, flags
);
93 static void aplic_write_target(struct aplic
*aplic
, u32 irq
, u32 val
)
96 struct aplic_irq
*irqd
;
98 if (!irq
|| aplic
->nr_irqs
<= irq
)
100 irqd
= &aplic
->irqs
[irq
];
102 val
&= APLIC_TARGET_EIID_MASK
|
103 (APLIC_TARGET_HART_IDX_MASK
<< APLIC_TARGET_HART_IDX_SHIFT
) |
104 (APLIC_TARGET_GUEST_IDX_MASK
<< APLIC_TARGET_GUEST_IDX_SHIFT
);
106 raw_spin_lock_irqsave(&irqd
->lock
, flags
);
108 raw_spin_unlock_irqrestore(&irqd
->lock
, flags
);
111 static bool aplic_read_pending(struct aplic
*aplic
, u32 irq
)
115 struct aplic_irq
*irqd
;
117 if (!irq
|| aplic
->nr_irqs
<= irq
)
119 irqd
= &aplic
->irqs
[irq
];
121 raw_spin_lock_irqsave(&irqd
->lock
, flags
);
122 ret
= (irqd
->state
& APLIC_IRQ_STATE_PENDING
) ? true : false;
123 raw_spin_unlock_irqrestore(&irqd
->lock
, flags
);
128 static void aplic_write_pending(struct aplic
*aplic
, u32 irq
, bool pending
)
130 unsigned long flags
, sm
;
131 struct aplic_irq
*irqd
;
133 if (!irq
|| aplic
->nr_irqs
<= irq
)
135 irqd
= &aplic
->irqs
[irq
];
137 raw_spin_lock_irqsave(&irqd
->lock
, flags
);
139 sm
= irqd
->sourcecfg
& APLIC_SOURCECFG_SM_MASK
;
140 if (sm
== APLIC_SOURCECFG_SM_INACTIVE
)
141 goto skip_write_pending
;
143 if (sm
== APLIC_SOURCECFG_SM_LEVEL_HIGH
||
144 sm
== APLIC_SOURCECFG_SM_LEVEL_LOW
) {
146 goto noskip_write_pending
;
147 if ((irqd
->state
& APLIC_IRQ_STATE_INPUT
) &&
148 sm
== APLIC_SOURCECFG_SM_LEVEL_LOW
)
149 goto skip_write_pending
;
150 if (!(irqd
->state
& APLIC_IRQ_STATE_INPUT
) &&
151 sm
== APLIC_SOURCECFG_SM_LEVEL_HIGH
)
152 goto skip_write_pending
;
155 noskip_write_pending
:
157 irqd
->state
|= APLIC_IRQ_STATE_PENDING
;
159 irqd
->state
&= ~APLIC_IRQ_STATE_PENDING
;
162 raw_spin_unlock_irqrestore(&irqd
->lock
, flags
);
165 static bool aplic_read_enabled(struct aplic
*aplic
, u32 irq
)
169 struct aplic_irq
*irqd
;
171 if (!irq
|| aplic
->nr_irqs
<= irq
)
173 irqd
= &aplic
->irqs
[irq
];
175 raw_spin_lock_irqsave(&irqd
->lock
, flags
);
176 ret
= (irqd
->state
& APLIC_IRQ_STATE_ENABLED
) ? true : false;
177 raw_spin_unlock_irqrestore(&irqd
->lock
, flags
);
182 static void aplic_write_enabled(struct aplic
*aplic
, u32 irq
, bool enabled
)
185 struct aplic_irq
*irqd
;
187 if (!irq
|| aplic
->nr_irqs
<= irq
)
189 irqd
= &aplic
->irqs
[irq
];
191 raw_spin_lock_irqsave(&irqd
->lock
, flags
);
193 irqd
->state
|= APLIC_IRQ_STATE_ENABLED
;
195 irqd
->state
&= ~APLIC_IRQ_STATE_ENABLED
;
196 raw_spin_unlock_irqrestore(&irqd
->lock
, flags
);
199 static bool aplic_read_input(struct aplic
*aplic
, u32 irq
)
201 u32 sourcecfg
, sm
, raw_input
, irq_inverted
;
202 struct aplic_irq
*irqd
;
206 if (!irq
|| aplic
->nr_irqs
<= irq
)
208 irqd
= &aplic
->irqs
[irq
];
210 raw_spin_lock_irqsave(&irqd
->lock
, flags
);
212 sourcecfg
= irqd
->sourcecfg
;
213 if (sourcecfg
& APLIC_SOURCECFG_D
)
216 sm
= sourcecfg
& APLIC_SOURCECFG_SM_MASK
;
217 if (sm
== APLIC_SOURCECFG_SM_INACTIVE
)
220 raw_input
= (irqd
->state
& APLIC_IRQ_STATE_INPUT
) ? 1 : 0;
221 irq_inverted
= (sm
== APLIC_SOURCECFG_SM_LEVEL_LOW
||
222 sm
== APLIC_SOURCECFG_SM_EDGE_FALL
) ? 1 : 0;
223 ret
= !!(raw_input
^ irq_inverted
);
226 raw_spin_unlock_irqrestore(&irqd
->lock
, flags
);
231 static void aplic_inject_msi(struct kvm
*kvm
, u32 irq
, u32 target
)
233 u32 hart_idx
, guest_idx
, eiid
;
235 hart_idx
= target
>> APLIC_TARGET_HART_IDX_SHIFT
;
236 hart_idx
&= APLIC_TARGET_HART_IDX_MASK
;
237 guest_idx
= target
>> APLIC_TARGET_GUEST_IDX_SHIFT
;
238 guest_idx
&= APLIC_TARGET_GUEST_IDX_MASK
;
239 eiid
= target
& APLIC_TARGET_EIID_MASK
;
240 kvm_riscv_aia_inject_msi_by_id(kvm
, hart_idx
, guest_idx
, eiid
);
243 static void aplic_update_irq_range(struct kvm
*kvm
, u32 first
, u32 last
)
248 struct aplic_irq
*irqd
;
249 struct aplic
*aplic
= kvm
->arch
.aia
.aplic_state
;
251 if (!(aplic
->domaincfg
& APLIC_DOMAINCFG_IE
))
254 for (irq
= first
; irq
<= last
; irq
++) {
255 if (!irq
|| aplic
->nr_irqs
<= irq
)
257 irqd
= &aplic
->irqs
[irq
];
259 raw_spin_lock_irqsave(&irqd
->lock
, flags
);
262 target
= irqd
->target
;
263 if ((irqd
->state
& APLIC_IRQ_STATE_ENPEND
) ==
264 APLIC_IRQ_STATE_ENPEND
) {
265 irqd
->state
&= ~APLIC_IRQ_STATE_PENDING
;
269 raw_spin_unlock_irqrestore(&irqd
->lock
, flags
);
272 aplic_inject_msi(kvm
, irq
, target
);
276 int kvm_riscv_aia_aplic_inject(struct kvm
*kvm
, u32 source
, bool level
)
279 bool inject
= false, ie
;
281 struct aplic_irq
*irqd
;
282 struct aplic
*aplic
= kvm
->arch
.aia
.aplic_state
;
284 if (!aplic
|| !source
|| (aplic
->nr_irqs
<= source
))
286 irqd
= &aplic
->irqs
[source
];
287 ie
= (aplic
->domaincfg
& APLIC_DOMAINCFG_IE
) ? true : false;
289 raw_spin_lock_irqsave(&irqd
->lock
, flags
);
291 if (irqd
->sourcecfg
& APLIC_SOURCECFG_D
)
294 switch (irqd
->sourcecfg
& APLIC_SOURCECFG_SM_MASK
) {
295 case APLIC_SOURCECFG_SM_EDGE_RISE
:
296 if (level
&& !(irqd
->state
& APLIC_IRQ_STATE_INPUT
) &&
297 !(irqd
->state
& APLIC_IRQ_STATE_PENDING
))
298 irqd
->state
|= APLIC_IRQ_STATE_PENDING
;
300 case APLIC_SOURCECFG_SM_EDGE_FALL
:
301 if (!level
&& (irqd
->state
& APLIC_IRQ_STATE_INPUT
) &&
302 !(irqd
->state
& APLIC_IRQ_STATE_PENDING
))
303 irqd
->state
|= APLIC_IRQ_STATE_PENDING
;
305 case APLIC_SOURCECFG_SM_LEVEL_HIGH
:
306 if (level
&& !(irqd
->state
& APLIC_IRQ_STATE_PENDING
))
307 irqd
->state
|= APLIC_IRQ_STATE_PENDING
;
309 case APLIC_SOURCECFG_SM_LEVEL_LOW
:
310 if (!level
&& !(irqd
->state
& APLIC_IRQ_STATE_PENDING
))
311 irqd
->state
|= APLIC_IRQ_STATE_PENDING
;
316 irqd
->state
|= APLIC_IRQ_STATE_INPUT
;
318 irqd
->state
&= ~APLIC_IRQ_STATE_INPUT
;
320 target
= irqd
->target
;
321 if (ie
&& ((irqd
->state
& APLIC_IRQ_STATE_ENPEND
) ==
322 APLIC_IRQ_STATE_ENPEND
)) {
323 irqd
->state
&= ~APLIC_IRQ_STATE_PENDING
;
328 raw_spin_unlock_irqrestore(&irqd
->lock
, flags
);
331 aplic_inject_msi(kvm
, source
, target
);
336 static u32
aplic_read_input_word(struct aplic
*aplic
, u32 word
)
340 for (i
= 0; i
< 32; i
++)
341 ret
|= aplic_read_input(aplic
, word
* 32 + i
) ? BIT(i
) : 0;
346 static u32
aplic_read_pending_word(struct aplic
*aplic
, u32 word
)
350 for (i
= 0; i
< 32; i
++)
351 ret
|= aplic_read_pending(aplic
, word
* 32 + i
) ? BIT(i
) : 0;
356 static void aplic_write_pending_word(struct aplic
*aplic
, u32 word
,
357 u32 val
, bool pending
)
361 for (i
= 0; i
< 32; i
++) {
363 aplic_write_pending(aplic
, word
* 32 + i
, pending
);
367 static u32
aplic_read_enabled_word(struct aplic
*aplic
, u32 word
)
371 for (i
= 0; i
< 32; i
++)
372 ret
|= aplic_read_enabled(aplic
, word
* 32 + i
) ? BIT(i
) : 0;
377 static void aplic_write_enabled_word(struct aplic
*aplic
, u32 word
,
378 u32 val
, bool enabled
)
382 for (i
= 0; i
< 32; i
++) {
384 aplic_write_enabled(aplic
, word
* 32 + i
, enabled
);
388 static int aplic_mmio_read_offset(struct kvm
*kvm
, gpa_t off
, u32
*val32
)
391 struct aplic
*aplic
= kvm
->arch
.aia
.aplic_state
;
393 if ((off
& 0x3) != 0)
396 if (off
== APLIC_DOMAINCFG
) {
397 *val32
= APLIC_DOMAINCFG_RDONLY
|
398 aplic
->domaincfg
| APLIC_DOMAINCFG_DM
;
399 } else if ((off
>= APLIC_SOURCECFG_BASE
) &&
400 (off
< (APLIC_SOURCECFG_BASE
+ (aplic
->nr_irqs
- 1) * 4))) {
401 i
= ((off
- APLIC_SOURCECFG_BASE
) >> 2) + 1;
402 *val32
= aplic_read_sourcecfg(aplic
, i
);
403 } else if ((off
>= APLIC_SETIP_BASE
) &&
404 (off
< (APLIC_SETIP_BASE
+ aplic
->nr_words
* 4))) {
405 i
= (off
- APLIC_SETIP_BASE
) >> 2;
406 *val32
= aplic_read_pending_word(aplic
, i
);
407 } else if (off
== APLIC_SETIPNUM
) {
409 } else if ((off
>= APLIC_CLRIP_BASE
) &&
410 (off
< (APLIC_CLRIP_BASE
+ aplic
->nr_words
* 4))) {
411 i
= (off
- APLIC_CLRIP_BASE
) >> 2;
412 *val32
= aplic_read_input_word(aplic
, i
);
413 } else if (off
== APLIC_CLRIPNUM
) {
415 } else if ((off
>= APLIC_SETIE_BASE
) &&
416 (off
< (APLIC_SETIE_BASE
+ aplic
->nr_words
* 4))) {
417 i
= (off
- APLIC_SETIE_BASE
) >> 2;
418 *val32
= aplic_read_enabled_word(aplic
, i
);
419 } else if (off
== APLIC_SETIENUM
) {
421 } else if ((off
>= APLIC_CLRIE_BASE
) &&
422 (off
< (APLIC_CLRIE_BASE
+ aplic
->nr_words
* 4))) {
424 } else if (off
== APLIC_CLRIENUM
) {
426 } else if (off
== APLIC_SETIPNUM_LE
) {
428 } else if (off
== APLIC_SETIPNUM_BE
) {
430 } else if (off
== APLIC_GENMSI
) {
431 *val32
= aplic
->genmsi
;
432 } else if ((off
>= APLIC_TARGET_BASE
) &&
433 (off
< (APLIC_TARGET_BASE
+ (aplic
->nr_irqs
- 1) * 4))) {
434 i
= ((off
- APLIC_TARGET_BASE
) >> 2) + 1;
435 *val32
= aplic_read_target(aplic
, i
);
442 static int aplic_mmio_read(struct kvm_vcpu
*vcpu
, struct kvm_io_device
*dev
,
443 gpa_t addr
, int len
, void *val
)
448 return aplic_mmio_read_offset(vcpu
->kvm
,
449 addr
- vcpu
->kvm
->arch
.aia
.aplic_addr
,
453 static int aplic_mmio_write_offset(struct kvm
*kvm
, gpa_t off
, u32 val32
)
456 struct aplic
*aplic
= kvm
->arch
.aia
.aplic_state
;
458 if ((off
& 0x3) != 0)
461 if (off
== APLIC_DOMAINCFG
) {
462 /* Only IE bit writeable */
463 aplic
->domaincfg
= val32
& APLIC_DOMAINCFG_IE
;
464 } else if ((off
>= APLIC_SOURCECFG_BASE
) &&
465 (off
< (APLIC_SOURCECFG_BASE
+ (aplic
->nr_irqs
- 1) * 4))) {
466 i
= ((off
- APLIC_SOURCECFG_BASE
) >> 2) + 1;
467 aplic_write_sourcecfg(aplic
, i
, val32
);
468 } else if ((off
>= APLIC_SETIP_BASE
) &&
469 (off
< (APLIC_SETIP_BASE
+ aplic
->nr_words
* 4))) {
470 i
= (off
- APLIC_SETIP_BASE
) >> 2;
471 aplic_write_pending_word(aplic
, i
, val32
, true);
472 } else if (off
== APLIC_SETIPNUM
) {
473 aplic_write_pending(aplic
, val32
, true);
474 } else if ((off
>= APLIC_CLRIP_BASE
) &&
475 (off
< (APLIC_CLRIP_BASE
+ aplic
->nr_words
* 4))) {
476 i
= (off
- APLIC_CLRIP_BASE
) >> 2;
477 aplic_write_pending_word(aplic
, i
, val32
, false);
478 } else if (off
== APLIC_CLRIPNUM
) {
479 aplic_write_pending(aplic
, val32
, false);
480 } else if ((off
>= APLIC_SETIE_BASE
) &&
481 (off
< (APLIC_SETIE_BASE
+ aplic
->nr_words
* 4))) {
482 i
= (off
- APLIC_SETIE_BASE
) >> 2;
483 aplic_write_enabled_word(aplic
, i
, val32
, true);
484 } else if (off
== APLIC_SETIENUM
) {
485 aplic_write_enabled(aplic
, val32
, true);
486 } else if ((off
>= APLIC_CLRIE_BASE
) &&
487 (off
< (APLIC_CLRIE_BASE
+ aplic
->nr_words
* 4))) {
488 i
= (off
- APLIC_CLRIE_BASE
) >> 2;
489 aplic_write_enabled_word(aplic
, i
, val32
, false);
490 } else if (off
== APLIC_CLRIENUM
) {
491 aplic_write_enabled(aplic
, val32
, false);
492 } else if (off
== APLIC_SETIPNUM_LE
) {
493 aplic_write_pending(aplic
, val32
, true);
494 } else if (off
== APLIC_SETIPNUM_BE
) {
495 aplic_write_pending(aplic
, __swab32(val32
), true);
496 } else if (off
== APLIC_GENMSI
) {
497 aplic
->genmsi
= val32
& ~(APLIC_TARGET_GUEST_IDX_MASK
<<
498 APLIC_TARGET_GUEST_IDX_SHIFT
);
499 kvm_riscv_aia_inject_msi_by_id(kvm
,
500 val32
>> APLIC_TARGET_HART_IDX_SHIFT
, 0,
501 val32
& APLIC_TARGET_EIID_MASK
);
502 } else if ((off
>= APLIC_TARGET_BASE
) &&
503 (off
< (APLIC_TARGET_BASE
+ (aplic
->nr_irqs
- 1) * 4))) {
504 i
= ((off
- APLIC_TARGET_BASE
) >> 2) + 1;
505 aplic_write_target(aplic
, i
, val32
);
509 aplic_update_irq_range(kvm
, 1, aplic
->nr_irqs
- 1);
514 static int aplic_mmio_write(struct kvm_vcpu
*vcpu
, struct kvm_io_device
*dev
,
515 gpa_t addr
, int len
, const void *val
)
520 return aplic_mmio_write_offset(vcpu
->kvm
,
521 addr
- vcpu
->kvm
->arch
.aia
.aplic_addr
,
522 *((const u32
*)val
));
525 static struct kvm_io_device_ops aplic_iodoev_ops
= {
526 .read
= aplic_mmio_read
,
527 .write
= aplic_mmio_write
,
530 int kvm_riscv_aia_aplic_set_attr(struct kvm
*kvm
, unsigned long type
, u32 v
)
534 if (!kvm
->arch
.aia
.aplic_state
)
537 rc
= aplic_mmio_write_offset(kvm
, type
, v
);
544 int kvm_riscv_aia_aplic_get_attr(struct kvm
*kvm
, unsigned long type
, u32
*v
)
548 if (!kvm
->arch
.aia
.aplic_state
)
551 rc
= aplic_mmio_read_offset(kvm
, type
, v
);
558 int kvm_riscv_aia_aplic_has_attr(struct kvm
*kvm
, unsigned long type
)
563 if (!kvm
->arch
.aia
.aplic_state
)
566 rc
= aplic_mmio_read_offset(kvm
, type
, &val
);
573 int kvm_riscv_aia_aplic_init(struct kvm
*kvm
)
578 /* Do nothing if we have zero sources */
579 if (!kvm
->arch
.aia
.nr_sources
)
582 /* Allocate APLIC global state */
583 aplic
= kzalloc(sizeof(*aplic
), GFP_KERNEL
);
586 kvm
->arch
.aia
.aplic_state
= aplic
;
588 /* Setup APLIC IRQs */
589 aplic
->nr_irqs
= kvm
->arch
.aia
.nr_sources
+ 1;
590 aplic
->nr_words
= DIV_ROUND_UP(aplic
->nr_irqs
, 32);
591 aplic
->irqs
= kcalloc(aplic
->nr_irqs
,
592 sizeof(*aplic
->irqs
), GFP_KERNEL
);
595 goto fail_free_aplic
;
597 for (i
= 0; i
< aplic
->nr_irqs
; i
++)
598 raw_spin_lock_init(&aplic
->irqs
[i
].lock
);
600 /* Setup IO device */
601 kvm_iodevice_init(&aplic
->iodev
, &aplic_iodoev_ops
);
602 mutex_lock(&kvm
->slots_lock
);
603 ret
= kvm_io_bus_register_dev(kvm
, KVM_MMIO_BUS
,
604 kvm
->arch
.aia
.aplic_addr
,
605 KVM_DEV_RISCV_APLIC_SIZE
,
607 mutex_unlock(&kvm
->slots_lock
);
609 goto fail_free_aplic_irqs
;
611 /* Setup default IRQ routing */
612 ret
= kvm_riscv_setup_default_irq_routing(kvm
, aplic
->nr_irqs
);
614 goto fail_unreg_iodev
;
619 mutex_lock(&kvm
->slots_lock
);
620 kvm_io_bus_unregister_dev(kvm
, KVM_MMIO_BUS
, &aplic
->iodev
);
621 mutex_unlock(&kvm
->slots_lock
);
622 fail_free_aplic_irqs
:
625 kvm
->arch
.aia
.aplic_state
= NULL
;
630 void kvm_riscv_aia_aplic_cleanup(struct kvm
*kvm
)
632 struct aplic
*aplic
= kvm
->arch
.aia
.aplic_state
;
637 mutex_lock(&kvm
->slots_lock
);
638 kvm_io_bus_unregister_dev(kvm
, KVM_MMIO_BUS
, &aplic
->iodev
);
639 mutex_unlock(&kvm
->slots_lock
);
643 kvm
->arch
.aia
.aplic_state
= NULL
;