1 // SPDX-License-Identifier: GPL-2.0
3 * Copyright (C) 2021 Western Digital Corporation or its affiliates.
4 * Copyright (C) 2022 Ventana Micro Systems Inc.
7 * Anup Patel <apatel@ventanamicro.com>
10 #include <linux/kernel.h>
11 #include <linux/bitops.h>
12 #include <linux/irq.h>
13 #include <linux/irqchip/riscv-imsic.h>
14 #include <linux/irqdomain.h>
15 #include <linux/kvm_host.h>
16 #include <linux/percpu.h>
17 #include <linux/spinlock.h>
18 #include <asm/cpufeature.h>
19 #include <asm/kvm_nacl.h>
21 struct aia_hgei_control
{
23 unsigned long free_bitmap
;
24 struct kvm_vcpu
*owners
[BITS_PER_LONG
];
26 static DEFINE_PER_CPU(struct aia_hgei_control
, aia_hgei
);
27 static int hgei_parent_irq
;
29 unsigned int kvm_riscv_aia_nr_hgei
;
30 unsigned int kvm_riscv_aia_max_ids
;
31 DEFINE_STATIC_KEY_FALSE(kvm_riscv_aia_available
);
33 static int aia_find_hgei(struct kvm_vcpu
*owner
)
37 struct aia_hgei_control
*hgctrl
= get_cpu_ptr(&aia_hgei
);
39 raw_spin_lock_irqsave(&hgctrl
->lock
, flags
);
42 for (i
= 1; i
<= kvm_riscv_aia_nr_hgei
; i
++) {
43 if (hgctrl
->owners
[i
] == owner
) {
49 raw_spin_unlock_irqrestore(&hgctrl
->lock
, flags
);
51 put_cpu_ptr(&aia_hgei
);
55 static inline unsigned long aia_hvictl_value(bool ext_irq_pending
)
60 * HVICTL.IID == 9 and HVICTL.IPRIO == 0 represents
61 * no interrupt in HVICTL.
64 hvictl
= (IRQ_S_EXT
<< HVICTL_IID_SHIFT
) & HVICTL_IID
;
65 hvictl
|= ext_irq_pending
;
70 void kvm_riscv_vcpu_aia_flush_interrupts(struct kvm_vcpu
*vcpu
)
72 struct kvm_vcpu_aia_csr
*csr
= &vcpu
->arch
.aia_context
.guest_csr
;
73 unsigned long mask
, val
;
75 if (!kvm_riscv_aia_available())
78 if (READ_ONCE(vcpu
->arch
.irqs_pending_mask
[1])) {
79 mask
= xchg_acquire(&vcpu
->arch
.irqs_pending_mask
[1], 0);
80 val
= READ_ONCE(vcpu
->arch
.irqs_pending
[1]) & mask
;
87 void kvm_riscv_vcpu_aia_sync_interrupts(struct kvm_vcpu
*vcpu
)
89 struct kvm_vcpu_aia_csr
*csr
= &vcpu
->arch
.aia_context
.guest_csr
;
91 if (kvm_riscv_aia_available())
92 csr
->vsieh
= ncsr_read(CSR_VSIEH
);
96 bool kvm_riscv_vcpu_aia_has_interrupts(struct kvm_vcpu
*vcpu
, u64 mask
)
101 if (!kvm_riscv_aia_available())
105 if (READ_ONCE(vcpu
->arch
.irqs_pending
[1]) &
106 (vcpu
->arch
.aia_context
.guest_csr
.vsieh
& upper_32_bits(mask
)))
110 seip
= vcpu
->arch
.guest_csr
.vsie
;
111 seip
&= (unsigned long)mask
;
112 seip
&= BIT(IRQ_S_EXT
);
114 if (!kvm_riscv_aia_initialized(vcpu
->kvm
) || !seip
)
117 hgei
= aia_find_hgei(vcpu
);
119 return !!(ncsr_read(CSR_HGEIP
) & BIT(hgei
));
124 void kvm_riscv_vcpu_aia_update_hvip(struct kvm_vcpu
*vcpu
)
126 struct kvm_vcpu_csr
*csr
= &vcpu
->arch
.guest_csr
;
128 if (!kvm_riscv_aia_available())
132 ncsr_write(CSR_HVIPH
, vcpu
->arch
.aia_context
.guest_csr
.hviph
);
134 ncsr_write(CSR_HVICTL
, aia_hvictl_value(!!(csr
->hvip
& BIT(IRQ_VS_EXT
))));
137 void kvm_riscv_vcpu_aia_load(struct kvm_vcpu
*vcpu
, int cpu
)
139 struct kvm_vcpu_aia_csr
*csr
= &vcpu
->arch
.aia_context
.guest_csr
;
142 if (!kvm_riscv_aia_available())
145 if (kvm_riscv_nacl_sync_csr_available()) {
147 nacl_csr_write(nsh
, CSR_VSISELECT
, csr
->vsiselect
);
148 nacl_csr_write(nsh
, CSR_HVIPRIO1
, csr
->hviprio1
);
149 nacl_csr_write(nsh
, CSR_HVIPRIO2
, csr
->hviprio2
);
151 nacl_csr_write(nsh
, CSR_VSIEH
, csr
->vsieh
);
152 nacl_csr_write(nsh
, CSR_HVIPH
, csr
->hviph
);
153 nacl_csr_write(nsh
, CSR_HVIPRIO1H
, csr
->hviprio1h
);
154 nacl_csr_write(nsh
, CSR_HVIPRIO2H
, csr
->hviprio2h
);
157 csr_write(CSR_VSISELECT
, csr
->vsiselect
);
158 csr_write(CSR_HVIPRIO1
, csr
->hviprio1
);
159 csr_write(CSR_HVIPRIO2
, csr
->hviprio2
);
161 csr_write(CSR_VSIEH
, csr
->vsieh
);
162 csr_write(CSR_HVIPH
, csr
->hviph
);
163 csr_write(CSR_HVIPRIO1H
, csr
->hviprio1h
);
164 csr_write(CSR_HVIPRIO2H
, csr
->hviprio2h
);
169 void kvm_riscv_vcpu_aia_put(struct kvm_vcpu
*vcpu
)
171 struct kvm_vcpu_aia_csr
*csr
= &vcpu
->arch
.aia_context
.guest_csr
;
174 if (!kvm_riscv_aia_available())
177 if (kvm_riscv_nacl_available()) {
179 csr
->vsiselect
= nacl_csr_read(nsh
, CSR_VSISELECT
);
180 csr
->hviprio1
= nacl_csr_read(nsh
, CSR_HVIPRIO1
);
181 csr
->hviprio2
= nacl_csr_read(nsh
, CSR_HVIPRIO2
);
183 csr
->vsieh
= nacl_csr_read(nsh
, CSR_VSIEH
);
184 csr
->hviph
= nacl_csr_read(nsh
, CSR_HVIPH
);
185 csr
->hviprio1h
= nacl_csr_read(nsh
, CSR_HVIPRIO1H
);
186 csr
->hviprio2h
= nacl_csr_read(nsh
, CSR_HVIPRIO2H
);
189 csr
->vsiselect
= csr_read(CSR_VSISELECT
);
190 csr
->hviprio1
= csr_read(CSR_HVIPRIO1
);
191 csr
->hviprio2
= csr_read(CSR_HVIPRIO2
);
193 csr
->vsieh
= csr_read(CSR_VSIEH
);
194 csr
->hviph
= csr_read(CSR_HVIPH
);
195 csr
->hviprio1h
= csr_read(CSR_HVIPRIO1H
);
196 csr
->hviprio2h
= csr_read(CSR_HVIPRIO2H
);
201 int kvm_riscv_vcpu_aia_get_csr(struct kvm_vcpu
*vcpu
,
202 unsigned long reg_num
,
203 unsigned long *out_val
)
205 struct kvm_vcpu_aia_csr
*csr
= &vcpu
->arch
.aia_context
.guest_csr
;
207 if (reg_num
>= sizeof(struct kvm_riscv_aia_csr
) / sizeof(unsigned long))
211 if (kvm_riscv_aia_available())
212 *out_val
= ((unsigned long *)csr
)[reg_num
];
217 int kvm_riscv_vcpu_aia_set_csr(struct kvm_vcpu
*vcpu
,
218 unsigned long reg_num
,
221 struct kvm_vcpu_aia_csr
*csr
= &vcpu
->arch
.aia_context
.guest_csr
;
223 if (reg_num
>= sizeof(struct kvm_riscv_aia_csr
) / sizeof(unsigned long))
226 if (kvm_riscv_aia_available()) {
227 ((unsigned long *)csr
)[reg_num
] = val
;
230 if (reg_num
== KVM_REG_RISCV_CSR_AIA_REG(siph
))
231 WRITE_ONCE(vcpu
->arch
.irqs_pending_mask
[1], 0);
238 int kvm_riscv_vcpu_aia_rmw_topei(struct kvm_vcpu
*vcpu
,
239 unsigned int csr_num
,
241 unsigned long new_val
,
242 unsigned long wr_mask
)
244 /* If AIA not available then redirect trap */
245 if (!kvm_riscv_aia_available())
246 return KVM_INSN_ILLEGAL_TRAP
;
248 /* If AIA not initialized then forward to user space */
249 if (!kvm_riscv_aia_initialized(vcpu
->kvm
))
250 return KVM_INSN_EXIT_TO_USER_SPACE
;
252 return kvm_riscv_vcpu_aia_imsic_rmw(vcpu
, KVM_RISCV_AIA_IMSIC_TOPEI
,
253 val
, new_val
, wr_mask
);
257 * External IRQ priority always read-only zero. This means default
258 * priority order is always preferred for external IRQs unless
259 * HVICTL.IID == 9 and HVICTL.IPRIO != 0
261 static int aia_irq2bitpos
[] = {
262 0, 8, -1, -1, 16, 24, -1, -1, /* 0 - 7 */
263 32, -1, -1, -1, -1, 40, 48, 56, /* 8 - 15 */
264 64, 72, 80, 88, 96, 104, 112, 120, /* 16 - 23 */
265 -1, -1, -1, -1, -1, -1, -1, -1, /* 24 - 31 */
266 -1, -1, -1, -1, -1, -1, -1, -1, /* 32 - 39 */
267 -1, -1, -1, -1, -1, -1, -1, -1, /* 40 - 47 */
268 -1, -1, -1, -1, -1, -1, -1, -1, /* 48 - 55 */
269 -1, -1, -1, -1, -1, -1, -1, -1, /* 56 - 63 */
272 static u8
aia_get_iprio8(struct kvm_vcpu
*vcpu
, unsigned int irq
)
274 unsigned long hviprio
;
275 int bitpos
= aia_irq2bitpos
[irq
];
280 switch (bitpos
/ BITS_PER_LONG
) {
282 hviprio
= ncsr_read(CSR_HVIPRIO1
);
286 hviprio
= ncsr_read(CSR_HVIPRIO2
);
289 hviprio
= ncsr_read(CSR_HVIPRIO1H
);
292 hviprio
= ncsr_read(CSR_HVIPRIO2
);
295 hviprio
= ncsr_read(CSR_HVIPRIO2H
);
302 return (hviprio
>> (bitpos
% BITS_PER_LONG
)) & TOPI_IPRIO_MASK
;
305 static void aia_set_iprio8(struct kvm_vcpu
*vcpu
, unsigned int irq
, u8 prio
)
307 unsigned long hviprio
;
308 int bitpos
= aia_irq2bitpos
[irq
];
313 switch (bitpos
/ BITS_PER_LONG
) {
315 hviprio
= ncsr_read(CSR_HVIPRIO1
);
319 hviprio
= ncsr_read(CSR_HVIPRIO2
);
322 hviprio
= ncsr_read(CSR_HVIPRIO1H
);
325 hviprio
= ncsr_read(CSR_HVIPRIO2
);
328 hviprio
= ncsr_read(CSR_HVIPRIO2H
);
335 hviprio
&= ~(TOPI_IPRIO_MASK
<< (bitpos
% BITS_PER_LONG
));
336 hviprio
|= (unsigned long)prio
<< (bitpos
% BITS_PER_LONG
);
338 switch (bitpos
/ BITS_PER_LONG
) {
340 ncsr_write(CSR_HVIPRIO1
, hviprio
);
344 ncsr_write(CSR_HVIPRIO2
, hviprio
);
347 ncsr_write(CSR_HVIPRIO1H
, hviprio
);
350 ncsr_write(CSR_HVIPRIO2
, hviprio
);
353 ncsr_write(CSR_HVIPRIO2H
, hviprio
);
361 static int aia_rmw_iprio(struct kvm_vcpu
*vcpu
, unsigned int isel
,
362 unsigned long *val
, unsigned long new_val
,
363 unsigned long wr_mask
)
365 int i
, first_irq
, nirqs
;
366 unsigned long old_val
;
371 return KVM_INSN_ILLEGAL_TRAP
;
374 nirqs
= 4 * (BITS_PER_LONG
/ 32);
375 first_irq
= (isel
- ISELECT_IPRIO0
) * 4;
378 for (i
= 0; i
< nirqs
; i
++) {
379 prio
= aia_get_iprio8(vcpu
, first_irq
+ i
);
380 old_val
|= (unsigned long)prio
<< (TOPI_IPRIO_BITS
* i
);
387 new_val
= (old_val
& ~wr_mask
) | (new_val
& wr_mask
);
388 for (i
= 0; i
< nirqs
; i
++) {
389 prio
= (new_val
>> (TOPI_IPRIO_BITS
* i
)) &
391 aia_set_iprio8(vcpu
, first_irq
+ i
, prio
);
395 return KVM_INSN_CONTINUE_NEXT_SEPC
;
398 int kvm_riscv_vcpu_aia_rmw_ireg(struct kvm_vcpu
*vcpu
, unsigned int csr_num
,
399 unsigned long *val
, unsigned long new_val
,
400 unsigned long wr_mask
)
404 /* If AIA not available then redirect trap */
405 if (!kvm_riscv_aia_available())
406 return KVM_INSN_ILLEGAL_TRAP
;
408 /* First try to emulate in kernel space */
409 isel
= ncsr_read(CSR_VSISELECT
) & ISELECT_MASK
;
410 if (isel
>= ISELECT_IPRIO0
&& isel
<= ISELECT_IPRIO15
)
411 return aia_rmw_iprio(vcpu
, isel
, val
, new_val
, wr_mask
);
412 else if (isel
>= IMSIC_FIRST
&& isel
<= IMSIC_LAST
&&
413 kvm_riscv_aia_initialized(vcpu
->kvm
))
414 return kvm_riscv_vcpu_aia_imsic_rmw(vcpu
, isel
, val
, new_val
,
417 /* We can't handle it here so redirect to user space */
418 return KVM_INSN_EXIT_TO_USER_SPACE
;
421 int kvm_riscv_aia_alloc_hgei(int cpu
, struct kvm_vcpu
*owner
,
422 void __iomem
**hgei_va
, phys_addr_t
*hgei_pa
)
426 const struct imsic_global_config
*gc
;
427 const struct imsic_local_config
*lc
;
428 struct aia_hgei_control
*hgctrl
= per_cpu_ptr(&aia_hgei
, cpu
);
430 if (!kvm_riscv_aia_available() || !hgctrl
)
433 raw_spin_lock_irqsave(&hgctrl
->lock
, flags
);
435 if (hgctrl
->free_bitmap
) {
436 ret
= __ffs(hgctrl
->free_bitmap
);
437 hgctrl
->free_bitmap
&= ~BIT(ret
);
438 hgctrl
->owners
[ret
] = owner
;
441 raw_spin_unlock_irqrestore(&hgctrl
->lock
, flags
);
443 gc
= imsic_get_global_config();
444 lc
= (gc
) ? per_cpu_ptr(gc
->local
, cpu
) : NULL
;
447 *hgei_va
= lc
->msi_va
+ (ret
* IMSIC_MMIO_PAGE_SZ
);
449 *hgei_pa
= lc
->msi_pa
+ (ret
* IMSIC_MMIO_PAGE_SZ
);
455 void kvm_riscv_aia_free_hgei(int cpu
, int hgei
)
458 struct aia_hgei_control
*hgctrl
= per_cpu_ptr(&aia_hgei
, cpu
);
460 if (!kvm_riscv_aia_available() || !hgctrl
)
463 raw_spin_lock_irqsave(&hgctrl
->lock
, flags
);
465 if (hgei
> 0 && hgei
<= kvm_riscv_aia_nr_hgei
) {
466 if (!(hgctrl
->free_bitmap
& BIT(hgei
))) {
467 hgctrl
->free_bitmap
|= BIT(hgei
);
468 hgctrl
->owners
[hgei
] = NULL
;
472 raw_spin_unlock_irqrestore(&hgctrl
->lock
, flags
);
475 void kvm_riscv_aia_wakeon_hgei(struct kvm_vcpu
*owner
, bool enable
)
479 if (!kvm_riscv_aia_available())
482 hgei
= aia_find_hgei(owner
);
485 csr_set(CSR_HGEIE
, BIT(hgei
));
487 csr_clear(CSR_HGEIE
, BIT(hgei
));
491 static irqreturn_t
hgei_interrupt(int irq
, void *dev_id
)
494 unsigned long hgei_mask
, flags
;
495 struct aia_hgei_control
*hgctrl
= get_cpu_ptr(&aia_hgei
);
497 hgei_mask
= csr_read(CSR_HGEIP
) & csr_read(CSR_HGEIE
);
498 csr_clear(CSR_HGEIE
, hgei_mask
);
500 raw_spin_lock_irqsave(&hgctrl
->lock
, flags
);
502 for_each_set_bit(i
, &hgei_mask
, BITS_PER_LONG
) {
503 if (hgctrl
->owners
[i
])
504 kvm_vcpu_kick(hgctrl
->owners
[i
]);
507 raw_spin_unlock_irqrestore(&hgctrl
->lock
, flags
);
509 put_cpu_ptr(&aia_hgei
);
513 static int aia_hgei_init(void)
516 struct irq_domain
*domain
;
517 struct aia_hgei_control
*hgctrl
;
519 /* Initialize per-CPU guest external interrupt line management */
520 for_each_possible_cpu(cpu
) {
521 hgctrl
= per_cpu_ptr(&aia_hgei
, cpu
);
522 raw_spin_lock_init(&hgctrl
->lock
);
523 if (kvm_riscv_aia_nr_hgei
) {
524 hgctrl
->free_bitmap
=
525 BIT(kvm_riscv_aia_nr_hgei
+ 1) - 1;
526 hgctrl
->free_bitmap
&= ~BIT(0);
528 hgctrl
->free_bitmap
= 0;
531 /* Skip SGEI interrupt setup for zero guest external interrupts */
532 if (!kvm_riscv_aia_nr_hgei
)
533 goto skip_sgei_interrupt
;
535 /* Find INTC irq domain */
536 domain
= irq_find_matching_fwnode(riscv_get_intc_hwnode(),
539 kvm_err("unable to find INTC domain\n");
543 /* Map per-CPU SGEI interrupt from INTC domain */
544 hgei_parent_irq
= irq_create_mapping(domain
, IRQ_S_GEXT
);
545 if (!hgei_parent_irq
) {
546 kvm_err("unable to map SGEI IRQ\n");
550 /* Request per-CPU SGEI interrupt */
551 rc
= request_percpu_irq(hgei_parent_irq
, hgei_interrupt
,
552 "riscv-kvm", &aia_hgei
);
554 kvm_err("failed to request SGEI IRQ\n");
562 static void aia_hgei_exit(void)
564 /* Do nothing for zero guest external interrupts */
565 if (!kvm_riscv_aia_nr_hgei
)
568 /* Free per-CPU SGEI interrupt */
569 free_percpu_irq(hgei_parent_irq
, &aia_hgei
);
572 void kvm_riscv_aia_enable(void)
574 if (!kvm_riscv_aia_available())
577 csr_write(CSR_HVICTL
, aia_hvictl_value(false));
578 csr_write(CSR_HVIPRIO1
, 0x0);
579 csr_write(CSR_HVIPRIO2
, 0x0);
581 csr_write(CSR_HVIPH
, 0x0);
582 csr_write(CSR_HIDELEGH
, 0x0);
583 csr_write(CSR_HVIPRIO1H
, 0x0);
584 csr_write(CSR_HVIPRIO2H
, 0x0);
587 /* Enable per-CPU SGEI interrupt */
588 enable_percpu_irq(hgei_parent_irq
,
589 irq_get_trigger_type(hgei_parent_irq
));
590 csr_set(CSR_HIE
, BIT(IRQ_S_GEXT
));
591 /* Enable IRQ filtering for overflow interrupt only if sscofpmf is present */
592 if (__riscv_isa_extension_available(NULL
, RISCV_ISA_EXT_SSCOFPMF
))
593 csr_write(CSR_HVIEN
, BIT(IRQ_PMU_OVF
));
596 void kvm_riscv_aia_disable(void)
600 struct kvm_vcpu
*vcpu
;
601 struct aia_hgei_control
*hgctrl
;
603 if (!kvm_riscv_aia_available())
605 hgctrl
= get_cpu_ptr(&aia_hgei
);
607 if (__riscv_isa_extension_available(NULL
, RISCV_ISA_EXT_SSCOFPMF
))
608 csr_clear(CSR_HVIEN
, BIT(IRQ_PMU_OVF
));
609 /* Disable per-CPU SGEI interrupt */
610 csr_clear(CSR_HIE
, BIT(IRQ_S_GEXT
));
611 disable_percpu_irq(hgei_parent_irq
);
613 csr_write(CSR_HVICTL
, aia_hvictl_value(false));
615 raw_spin_lock_irqsave(&hgctrl
->lock
, flags
);
617 for (i
= 0; i
<= kvm_riscv_aia_nr_hgei
; i
++) {
618 vcpu
= hgctrl
->owners
[i
];
623 * We release hgctrl->lock before notifying IMSIC
624 * so that we don't have lock ordering issues.
626 raw_spin_unlock_irqrestore(&hgctrl
->lock
, flags
);
629 kvm_riscv_vcpu_aia_imsic_release(vcpu
);
632 * Wakeup VCPU if it was blocked so that it can
635 if (csr_read(CSR_HGEIE
) & BIT(i
)) {
636 csr_clear(CSR_HGEIE
, BIT(i
));
640 raw_spin_lock_irqsave(&hgctrl
->lock
, flags
);
643 raw_spin_unlock_irqrestore(&hgctrl
->lock
, flags
);
645 put_cpu_ptr(&aia_hgei
);
648 int kvm_riscv_aia_init(void)
651 const struct imsic_global_config
*gc
;
653 if (!riscv_isa_extension_available(NULL
, SxAIA
))
655 gc
= imsic_get_global_config();
657 /* Figure-out number of bits in HGEIE */
658 csr_write(CSR_HGEIE
, -1UL);
659 kvm_riscv_aia_nr_hgei
= fls_long(csr_read(CSR_HGEIE
));
660 csr_write(CSR_HGEIE
, 0);
661 if (kvm_riscv_aia_nr_hgei
)
662 kvm_riscv_aia_nr_hgei
--;
665 * Number of usable HGEI lines should be minimum of per-HART
666 * IMSIC guest files and number of bits in HGEIE
669 kvm_riscv_aia_nr_hgei
= min((ulong
)kvm_riscv_aia_nr_hgei
,
670 BIT(gc
->guest_index_bits
) - 1);
672 kvm_riscv_aia_nr_hgei
= 0;
674 /* Find number of guest MSI IDs */
675 kvm_riscv_aia_max_ids
= IMSIC_MAX_ID
;
676 if (gc
&& kvm_riscv_aia_nr_hgei
)
677 kvm_riscv_aia_max_ids
= gc
->nr_guest_ids
+ 1;
679 /* Initialize guest external interrupt line management */
680 rc
= aia_hgei_init();
684 /* Register device operations */
685 rc
= kvm_register_device_ops(&kvm_riscv_aia_device_ops
,
686 KVM_DEV_TYPE_RISCV_AIA
);
692 /* Enable KVM AIA support */
693 static_branch_enable(&kvm_riscv_aia_available
);
698 void kvm_riscv_aia_exit(void)
700 if (!kvm_riscv_aia_available())
703 /* Unregister device operations */
704 kvm_unregister_device_ops(KVM_DEV_TYPE_RISCV_AIA
);
706 /* Cleanup the HGEI state */