1 // SPDX-License-Identifier: GPL-2.0
3 * Copyright (C) 2021 Western Digital Corporation or its affiliates.
4 * Copyright (C) 2022 Ventana Micro Systems Inc.
7 #define pr_fmt(fmt) "riscv-imsic: " fmt
8 #include <linux/acpi.h>
10 #include <linux/bitmap.h>
11 #include <linux/interrupt.h>
12 #include <linux/irq.h>
13 #include <linux/module.h>
15 #include <linux/of_address.h>
16 #include <linux/of_irq.h>
17 #include <linux/seq_file.h>
18 #include <linux/spinlock.h>
19 #include <linux/smp.h>
20 #include <asm/hwcap.h>
22 #include "irq-riscv-imsic-state.h"
24 #define IMSIC_DISABLE_EIDELIVERY 0
25 #define IMSIC_ENABLE_EIDELIVERY 1
26 #define IMSIC_DISABLE_EITHRESHOLD 1
27 #define IMSIC_ENABLE_EITHRESHOLD 0
29 static inline void imsic_csr_write(unsigned long reg
, unsigned long val
)
31 csr_write(CSR_ISELECT
, reg
);
32 csr_write(CSR_IREG
, val
);
35 static inline unsigned long imsic_csr_read(unsigned long reg
)
37 csr_write(CSR_ISELECT
, reg
);
38 return csr_read(CSR_IREG
);
41 static inline unsigned long imsic_csr_read_clear(unsigned long reg
, unsigned long val
)
43 csr_write(CSR_ISELECT
, reg
);
44 return csr_read_clear(CSR_IREG
, val
);
47 static inline void imsic_csr_set(unsigned long reg
, unsigned long val
)
49 csr_write(CSR_ISELECT
, reg
);
50 csr_set(CSR_IREG
, val
);
53 static inline void imsic_csr_clear(unsigned long reg
, unsigned long val
)
55 csr_write(CSR_ISELECT
, reg
);
56 csr_clear(CSR_IREG
, val
);
59 struct imsic_priv
*imsic
;
61 const struct imsic_global_config
*imsic_get_global_config(void)
63 return imsic
? &imsic
->global
: NULL
;
65 EXPORT_SYMBOL_GPL(imsic_get_global_config
);
67 static bool __imsic_eix_read_clear(unsigned long id
, bool pend
)
69 unsigned long isel
, imask
;
71 isel
= id
/ BITS_PER_LONG
;
72 isel
*= BITS_PER_LONG
/ IMSIC_EIPx_BITS
;
73 isel
+= pend
? IMSIC_EIP0
: IMSIC_EIE0
;
74 imask
= BIT(id
& (__riscv_xlen
- 1));
76 return !!(imsic_csr_read_clear(isel
, imask
) & imask
);
79 static inline bool __imsic_id_read_clear_enabled(unsigned long id
)
81 return __imsic_eix_read_clear(id
, false);
84 static inline bool __imsic_id_read_clear_pending(unsigned long id
)
86 return __imsic_eix_read_clear(id
, true);
89 void __imsic_eix_update(unsigned long base_id
, unsigned long num_id
, bool pend
, bool val
)
91 unsigned long id
= base_id
, last_id
= base_id
+ num_id
;
92 unsigned long i
, isel
, ireg
;
94 while (id
< last_id
) {
95 isel
= id
/ BITS_PER_LONG
;
96 isel
*= BITS_PER_LONG
/ IMSIC_EIPx_BITS
;
97 isel
+= pend
? IMSIC_EIP0
: IMSIC_EIE0
;
100 * Prepare the ID mask to be programmed in the
101 * IMSIC EIEx and EIPx registers. These registers
102 * are XLEN-wide and we must not touch IDs which
103 * are < base_id and >= (base_id + num_id).
106 for (i
= id
& (__riscv_xlen
- 1); id
< last_id
&& i
< __riscv_xlen
; i
++) {
112 * The IMSIC EIEx and EIPx registers are indirectly
113 * accessed via using ISELECT and IREG CSRs so we
114 * need to access these CSRs without getting preempted.
116 * All existing users of this function call this
117 * function with local IRQs disabled so we don't
118 * need to do anything special here.
121 imsic_csr_set(isel
, ireg
);
123 imsic_csr_clear(isel
, ireg
);
127 static void __imsic_local_sync(struct imsic_local_priv
*lpriv
)
129 struct imsic_local_config
*mlocal
;
130 struct imsic_vector
*vec
, *mvec
;
133 lockdep_assert_held(&lpriv
->lock
);
135 for_each_set_bit(i
, lpriv
->dirty_bitmap
, imsic
->global
.nr_ids
+ 1) {
136 if (!i
|| i
== IMSIC_IPI_ID
)
138 vec
= &lpriv
->vectors
[i
];
140 if (READ_ONCE(vec
->enable
))
141 __imsic_id_set_enable(i
);
143 __imsic_id_clear_enable(i
);
146 * If the ID was being moved to a new ID on some other CPU
147 * then we can get a MSI during the movement so check the
148 * ID pending bit and re-trigger the new ID on other CPU
151 mvec
= READ_ONCE(vec
->move
);
152 WRITE_ONCE(vec
->move
, NULL
);
153 if (mvec
&& mvec
!= vec
) {
154 if (__imsic_id_read_clear_pending(i
)) {
155 mlocal
= per_cpu_ptr(imsic
->global
.local
, mvec
->cpu
);
156 writel_relaxed(mvec
->local_id
, mlocal
->msi_va
);
159 imsic_vector_free(&lpriv
->vectors
[i
]);
163 bitmap_clear(lpriv
->dirty_bitmap
, i
, 1);
167 void imsic_local_sync_all(void)
169 struct imsic_local_priv
*lpriv
= this_cpu_ptr(imsic
->lpriv
);
172 raw_spin_lock_irqsave(&lpriv
->lock
, flags
);
173 bitmap_fill(lpriv
->dirty_bitmap
, imsic
->global
.nr_ids
+ 1);
174 __imsic_local_sync(lpriv
);
175 raw_spin_unlock_irqrestore(&lpriv
->lock
, flags
);
178 void imsic_local_delivery(bool enable
)
181 imsic_csr_write(IMSIC_EITHRESHOLD
, IMSIC_ENABLE_EITHRESHOLD
);
182 imsic_csr_write(IMSIC_EIDELIVERY
, IMSIC_ENABLE_EIDELIVERY
);
186 imsic_csr_write(IMSIC_EIDELIVERY
, IMSIC_DISABLE_EIDELIVERY
);
187 imsic_csr_write(IMSIC_EITHRESHOLD
, IMSIC_DISABLE_EITHRESHOLD
);
191 static void imsic_local_timer_callback(struct timer_list
*timer
)
193 struct imsic_local_priv
*lpriv
= this_cpu_ptr(imsic
->lpriv
);
196 raw_spin_lock_irqsave(&lpriv
->lock
, flags
);
197 __imsic_local_sync(lpriv
);
198 raw_spin_unlock_irqrestore(&lpriv
->lock
, flags
);
201 static void __imsic_remote_sync(struct imsic_local_priv
*lpriv
, unsigned int cpu
)
203 lockdep_assert_held(&lpriv
->lock
);
206 * The spinlock acquire/release semantics ensure that changes
207 * to vector enable, vector move and dirty bitmap are visible
212 * We schedule a timer on the target CPU if the target CPU is not
213 * same as the current CPU. An offline CPU will unconditionally
214 * synchronize IDs through imsic_starting_cpu() when the
217 if (cpu_online(cpu
)) {
218 if (cpu
== smp_processor_id()) {
219 __imsic_local_sync(lpriv
);
223 if (!timer_pending(&lpriv
->timer
)) {
224 lpriv
->timer
.expires
= jiffies
+ 1;
225 add_timer_on(&lpriv
->timer
, cpu
);
230 static void __imsic_remote_sync(struct imsic_local_priv
*lpriv
, unsigned int cpu
)
232 lockdep_assert_held(&lpriv
->lock
);
233 __imsic_local_sync(lpriv
);
237 void imsic_vector_mask(struct imsic_vector
*vec
)
239 struct imsic_local_priv
*lpriv
;
241 lpriv
= per_cpu_ptr(imsic
->lpriv
, vec
->cpu
);
242 if (WARN_ON_ONCE(&lpriv
->vectors
[vec
->local_id
] != vec
))
246 * This function is called through Linux irq subsystem with
247 * irqs disabled so no need to save/restore irq flags.
250 raw_spin_lock(&lpriv
->lock
);
252 WRITE_ONCE(vec
->enable
, false);
253 bitmap_set(lpriv
->dirty_bitmap
, vec
->local_id
, 1);
254 __imsic_remote_sync(lpriv
, vec
->cpu
);
256 raw_spin_unlock(&lpriv
->lock
);
259 void imsic_vector_unmask(struct imsic_vector
*vec
)
261 struct imsic_local_priv
*lpriv
;
263 lpriv
= per_cpu_ptr(imsic
->lpriv
, vec
->cpu
);
264 if (WARN_ON_ONCE(&lpriv
->vectors
[vec
->local_id
] != vec
))
268 * This function is called through Linux irq subsystem with
269 * irqs disabled so no need to save/restore irq flags.
272 raw_spin_lock(&lpriv
->lock
);
274 WRITE_ONCE(vec
->enable
, true);
275 bitmap_set(lpriv
->dirty_bitmap
, vec
->local_id
, 1);
276 __imsic_remote_sync(lpriv
, vec
->cpu
);
278 raw_spin_unlock(&lpriv
->lock
);
281 static bool imsic_vector_move_update(struct imsic_local_priv
*lpriv
, struct imsic_vector
*vec
,
282 bool new_enable
, struct imsic_vector
*new_move
)
287 raw_spin_lock_irqsave(&lpriv
->lock
, flags
);
289 /* Update enable and move details */
290 enabled
= READ_ONCE(vec
->enable
);
291 WRITE_ONCE(vec
->enable
, new_enable
);
292 WRITE_ONCE(vec
->move
, new_move
);
294 /* Mark the vector as dirty and synchronize */
295 bitmap_set(lpriv
->dirty_bitmap
, vec
->local_id
, 1);
296 __imsic_remote_sync(lpriv
, vec
->cpu
);
298 raw_spin_unlock_irqrestore(&lpriv
->lock
, flags
);
303 void imsic_vector_move(struct imsic_vector
*old_vec
, struct imsic_vector
*new_vec
)
305 struct imsic_local_priv
*old_lpriv
, *new_lpriv
;
308 if (WARN_ON_ONCE(old_vec
->cpu
== new_vec
->cpu
))
311 old_lpriv
= per_cpu_ptr(imsic
->lpriv
, old_vec
->cpu
);
312 if (WARN_ON_ONCE(&old_lpriv
->vectors
[old_vec
->local_id
] != old_vec
))
315 new_lpriv
= per_cpu_ptr(imsic
->lpriv
, new_vec
->cpu
);
316 if (WARN_ON_ONCE(&new_lpriv
->vectors
[new_vec
->local_id
] != new_vec
))
320 * Move and re-trigger the new vector based on the pending
321 * state of the old vector because we might get a device
322 * interrupt on the old vector while device was being moved
325 enabled
= imsic_vector_move_update(old_lpriv
, old_vec
, false, new_vec
);
326 imsic_vector_move_update(new_lpriv
, new_vec
, enabled
, new_vec
);
329 #ifdef CONFIG_GENERIC_IRQ_DEBUGFS
330 void imsic_vector_debug_show(struct seq_file
*m
, struct imsic_vector
*vec
, int ind
)
332 struct imsic_local_priv
*lpriv
;
333 struct imsic_vector
*mvec
;
336 lpriv
= per_cpu_ptr(imsic
->lpriv
, vec
->cpu
);
337 if (WARN_ON_ONCE(&lpriv
->vectors
[vec
->local_id
] != vec
))
340 is_enabled
= imsic_vector_isenabled(vec
);
341 mvec
= imsic_vector_get_move(vec
);
343 seq_printf(m
, "%*starget_cpu : %5u\n", ind
, "", vec
->cpu
);
344 seq_printf(m
, "%*starget_local_id : %5u\n", ind
, "", vec
->local_id
);
345 seq_printf(m
, "%*sis_reserved : %5u\n", ind
, "",
346 (vec
->local_id
<= IMSIC_IPI_ID
) ? 1 : 0);
347 seq_printf(m
, "%*sis_enabled : %5u\n", ind
, "", is_enabled
? 1 : 0);
348 seq_printf(m
, "%*sis_move_pending : %5u\n", ind
, "", mvec
? 1 : 0);
350 seq_printf(m
, "%*smove_cpu : %5u\n", ind
, "", mvec
->cpu
);
351 seq_printf(m
, "%*smove_local_id : %5u\n", ind
, "", mvec
->local_id
);
355 void imsic_vector_debug_show_summary(struct seq_file
*m
, int ind
)
357 irq_matrix_debug_show(m
, imsic
->matrix
, ind
);
361 struct imsic_vector
*imsic_vector_from_local_id(unsigned int cpu
, unsigned int local_id
)
363 struct imsic_local_priv
*lpriv
= per_cpu_ptr(imsic
->lpriv
, cpu
);
365 if (!lpriv
|| imsic
->global
.nr_ids
< local_id
)
368 return &lpriv
->vectors
[local_id
];
371 struct imsic_vector
*imsic_vector_alloc(unsigned int hwirq
, const struct cpumask
*mask
)
373 struct imsic_vector
*vec
= NULL
;
374 struct imsic_local_priv
*lpriv
;
379 raw_spin_lock_irqsave(&imsic
->matrix_lock
, flags
);
380 local_id
= irq_matrix_alloc(imsic
->matrix
, mask
, false, &cpu
);
381 raw_spin_unlock_irqrestore(&imsic
->matrix_lock
, flags
);
385 lpriv
= per_cpu_ptr(imsic
->lpriv
, cpu
);
386 vec
= &lpriv
->vectors
[local_id
];
394 void imsic_vector_free(struct imsic_vector
*vec
)
398 raw_spin_lock_irqsave(&imsic
->matrix_lock
, flags
);
399 vec
->hwirq
= UINT_MAX
;
400 irq_matrix_free(imsic
->matrix
, vec
->cpu
, vec
->local_id
, false);
401 raw_spin_unlock_irqrestore(&imsic
->matrix_lock
, flags
);
404 static void __init
imsic_local_cleanup(void)
406 struct imsic_local_priv
*lpriv
;
409 for_each_possible_cpu(cpu
) {
410 lpriv
= per_cpu_ptr(imsic
->lpriv
, cpu
);
412 bitmap_free(lpriv
->dirty_bitmap
);
413 kfree(lpriv
->vectors
);
416 free_percpu(imsic
->lpriv
);
419 static int __init
imsic_local_init(void)
421 struct imsic_global_config
*global
= &imsic
->global
;
422 struct imsic_local_priv
*lpriv
;
423 struct imsic_vector
*vec
;
426 /* Allocate per-CPU private state */
427 imsic
->lpriv
= alloc_percpu(typeof(*imsic
->lpriv
));
431 /* Setup per-CPU private state */
432 for_each_possible_cpu(cpu
) {
433 lpriv
= per_cpu_ptr(imsic
->lpriv
, cpu
);
435 raw_spin_lock_init(&lpriv
->lock
);
437 /* Allocate dirty bitmap */
438 lpriv
->dirty_bitmap
= bitmap_zalloc(global
->nr_ids
+ 1, GFP_KERNEL
);
439 if (!lpriv
->dirty_bitmap
)
440 goto fail_local_cleanup
;
443 /* Setup lazy timer for synchronization */
444 timer_setup(&lpriv
->timer
, imsic_local_timer_callback
, TIMER_PINNED
);
447 /* Allocate vector array */
448 lpriv
->vectors
= kcalloc(global
->nr_ids
+ 1, sizeof(*lpriv
->vectors
),
451 goto fail_local_cleanup
;
453 /* Setup vector array */
454 for (i
= 0; i
<= global
->nr_ids
; i
++) {
455 vec
= &lpriv
->vectors
[i
];
458 vec
->hwirq
= UINT_MAX
;
465 imsic_local_cleanup();
469 void imsic_state_online(void)
473 raw_spin_lock_irqsave(&imsic
->matrix_lock
, flags
);
474 irq_matrix_online(imsic
->matrix
);
475 raw_spin_unlock_irqrestore(&imsic
->matrix_lock
, flags
);
478 void imsic_state_offline(void)
482 raw_spin_lock_irqsave(&imsic
->matrix_lock
, flags
);
483 irq_matrix_offline(imsic
->matrix
);
484 raw_spin_unlock_irqrestore(&imsic
->matrix_lock
, flags
);
487 struct imsic_local_priv
*lpriv
= this_cpu_ptr(imsic
->lpriv
);
489 raw_spin_lock_irqsave(&lpriv
->lock
, flags
);
490 WARN_ON_ONCE(try_to_del_timer_sync(&lpriv
->timer
) < 0);
491 raw_spin_unlock_irqrestore(&lpriv
->lock
, flags
);
495 static int __init
imsic_matrix_init(void)
497 struct imsic_global_config
*global
= &imsic
->global
;
499 raw_spin_lock_init(&imsic
->matrix_lock
);
500 imsic
->matrix
= irq_alloc_matrix(global
->nr_ids
+ 1,
501 0, global
->nr_ids
+ 1);
505 /* Reserve ID#0 because it is special and never implemented */
506 irq_matrix_assign_system(imsic
->matrix
, 0, false);
508 /* Reserve IPI ID because it is special and used internally */
509 irq_matrix_assign_system(imsic
->matrix
, IMSIC_IPI_ID
, false);
514 static int __init
imsic_populate_global_dt(struct fwnode_handle
*fwnode
,
515 struct imsic_global_config
*global
,
520 /* Find number of guest index bits in MSI address */
521 rc
= of_property_read_u32(to_of_node(fwnode
), "riscv,guest-index-bits",
522 &global
->guest_index_bits
);
524 global
->guest_index_bits
= 0;
526 /* Find number of HART index bits */
527 rc
= of_property_read_u32(to_of_node(fwnode
), "riscv,hart-index-bits",
528 &global
->hart_index_bits
);
530 /* Assume default value */
531 global
->hart_index_bits
= __fls(*nr_parent_irqs
);
532 if (BIT(global
->hart_index_bits
) < *nr_parent_irqs
)
533 global
->hart_index_bits
++;
536 /* Find number of group index bits */
537 rc
= of_property_read_u32(to_of_node(fwnode
), "riscv,group-index-bits",
538 &global
->group_index_bits
);
540 global
->group_index_bits
= 0;
543 * Find first bit position of group index.
544 * If not specified assumed the default APLIC-IMSIC configuration.
546 rc
= of_property_read_u32(to_of_node(fwnode
), "riscv,group-index-shift",
547 &global
->group_index_shift
);
549 global
->group_index_shift
= IMSIC_MMIO_PAGE_SHIFT
* 2;
551 /* Find number of interrupt identities */
552 rc
= of_property_read_u32(to_of_node(fwnode
), "riscv,num-ids",
555 pr_err("%pfwP: number of interrupt identities not found\n", fwnode
);
559 /* Find number of guest interrupt identities */
560 rc
= of_property_read_u32(to_of_node(fwnode
), "riscv,num-guest-ids",
561 &global
->nr_guest_ids
);
563 global
->nr_guest_ids
= global
->nr_ids
;
568 static int __init
imsic_populate_global_acpi(struct fwnode_handle
*fwnode
,
569 struct imsic_global_config
*global
,
570 u32
*nr_parent_irqs
, void *opaque
)
572 struct acpi_madt_imsic
*imsic
= (struct acpi_madt_imsic
*)opaque
;
574 global
->guest_index_bits
= imsic
->guest_index_bits
;
575 global
->hart_index_bits
= imsic
->hart_index_bits
;
576 global
->group_index_bits
= imsic
->group_index_bits
;
577 global
->group_index_shift
= imsic
->group_index_shift
;
578 global
->nr_ids
= imsic
->num_ids
;
579 global
->nr_guest_ids
= imsic
->num_guest_ids
;
583 static int __init
imsic_get_parent_hartid(struct fwnode_handle
*fwnode
,
584 u32 index
, unsigned long *hartid
)
586 struct of_phandle_args parent
;
589 if (!is_of_node(fwnode
)) {
591 *hartid
= acpi_rintc_index_to_hartid(index
);
593 if (!hartid
|| (*hartid
== INVALID_HARTID
))
599 rc
= of_irq_parse_one(to_of_node(fwnode
), index
, &parent
);
604 * Skip interrupts other than external interrupts for
605 * current privilege level.
607 if (parent
.args
[0] != RV_IRQ_EXT
)
610 return riscv_of_parent_hartid(parent
.np
, hartid
);
613 static int __init
imsic_get_mmio_resource(struct fwnode_handle
*fwnode
,
614 u32 index
, struct resource
*res
)
616 if (!is_of_node(fwnode
))
617 return acpi_rintc_get_imsic_mmio_info(index
, res
);
619 return of_address_to_resource(to_of_node(fwnode
), index
, res
);
622 static int __init
imsic_parse_fwnode(struct fwnode_handle
*fwnode
,
623 struct imsic_global_config
*global
,
628 unsigned long hartid
;
636 /* Find number of parent interrupts */
637 while (!imsic_get_parent_hartid(fwnode
, *nr_parent_irqs
, &hartid
))
639 if (!*nr_parent_irqs
) {
640 pr_err("%pfwP: no parent irqs available\n", fwnode
);
644 if (is_of_node(fwnode
))
645 rc
= imsic_populate_global_dt(fwnode
, global
, nr_parent_irqs
);
647 rc
= imsic_populate_global_acpi(fwnode
, global
, nr_parent_irqs
, opaque
);
652 /* Sanity check guest index bits */
653 i
= BITS_PER_LONG
- IMSIC_MMIO_PAGE_SHIFT
;
654 if (i
< global
->guest_index_bits
) {
655 pr_err("%pfwP: guest index bits too big\n", fwnode
);
659 /* Sanity check HART index bits */
660 i
= BITS_PER_LONG
- IMSIC_MMIO_PAGE_SHIFT
- global
->guest_index_bits
;
661 if (i
< global
->hart_index_bits
) {
662 pr_err("%pfwP: HART index bits too big\n", fwnode
);
666 /* Sanity check group index bits */
667 i
= BITS_PER_LONG
- IMSIC_MMIO_PAGE_SHIFT
-
668 global
->guest_index_bits
- global
->hart_index_bits
;
669 if (i
< global
->group_index_bits
) {
670 pr_err("%pfwP: group index bits too big\n", fwnode
);
674 /* Sanity check group index shift */
675 i
= global
->group_index_bits
+ global
->group_index_shift
- 1;
676 if (i
>= BITS_PER_LONG
) {
677 pr_err("%pfwP: group index shift too big\n", fwnode
);
681 /* Sanity check number of interrupt identities */
682 if (global
->nr_ids
< IMSIC_MIN_ID
||
683 global
->nr_ids
>= IMSIC_MAX_ID
||
684 (global
->nr_ids
& IMSIC_MIN_ID
) != IMSIC_MIN_ID
) {
685 pr_err("%pfwP: invalid number of interrupt identities\n", fwnode
);
689 /* Sanity check number of guest interrupt identities */
690 if (global
->nr_guest_ids
< IMSIC_MIN_ID
||
691 global
->nr_guest_ids
>= IMSIC_MAX_ID
||
692 (global
->nr_guest_ids
& IMSIC_MIN_ID
) != IMSIC_MIN_ID
) {
693 pr_err("%pfwP: invalid number of guest interrupt identities\n", fwnode
);
697 /* Compute base address */
698 rc
= imsic_get_mmio_resource(fwnode
, 0, &res
);
700 pr_err("%pfwP: first MMIO resource not found\n", fwnode
);
703 global
->base_addr
= res
.start
;
704 global
->base_addr
&= ~(BIT(global
->guest_index_bits
+
705 global
->hart_index_bits
+
706 IMSIC_MMIO_PAGE_SHIFT
) - 1);
707 global
->base_addr
&= ~((BIT(global
->group_index_bits
) - 1) <<
708 global
->group_index_shift
);
710 /* Find number of MMIO register sets */
711 while (!imsic_get_mmio_resource(fwnode
, *nr_mmios
, &res
))
717 int __init
imsic_setup_state(struct fwnode_handle
*fwnode
, void *opaque
)
719 u32 i
, j
, index
, nr_parent_irqs
, nr_mmios
, nr_handlers
= 0;
720 struct imsic_global_config
*global
;
721 struct imsic_local_config
*local
;
722 void __iomem
**mmios_va
= NULL
;
723 struct resource
*mmios
= NULL
;
724 unsigned long reloff
, hartid
;
725 phys_addr_t base_addr
;
729 * Only one IMSIC instance allowed in a platform for clean
730 * implementation of SMP IRQ affinity and per-CPU IPIs.
732 * This means on a multi-socket (or multi-die) platform we
733 * will have multiple MMIO regions for one IMSIC instance.
736 pr_err("%pfwP: already initialized hence ignoring\n", fwnode
);
740 if (!riscv_isa_extension_available(NULL
, SxAIA
)) {
741 pr_err("%pfwP: AIA support not available\n", fwnode
);
745 imsic
= kzalloc(sizeof(*imsic
), GFP_KERNEL
);
748 imsic
->fwnode
= fwnode
;
749 global
= &imsic
->global
;
751 global
->local
= alloc_percpu(typeof(*global
->local
));
752 if (!global
->local
) {
757 /* Parse IMSIC fwnode */
758 rc
= imsic_parse_fwnode(fwnode
, global
, &nr_parent_irqs
, &nr_mmios
, opaque
);
762 /* Allocate MMIO resource array */
763 mmios
= kcalloc(nr_mmios
, sizeof(*mmios
), GFP_KERNEL
);
769 /* Allocate MMIO virtual address array */
770 mmios_va
= kcalloc(nr_mmios
, sizeof(*mmios_va
), GFP_KERNEL
);
776 /* Parse and map MMIO register sets */
777 for (i
= 0; i
< nr_mmios
; i
++) {
778 rc
= imsic_get_mmio_resource(fwnode
, i
, &mmios
[i
]);
780 pr_err("%pfwP: unable to parse MMIO regset %d\n", fwnode
, i
);
784 base_addr
= mmios
[i
].start
;
785 base_addr
&= ~(BIT(global
->guest_index_bits
+
786 global
->hart_index_bits
+
787 IMSIC_MMIO_PAGE_SHIFT
) - 1);
788 base_addr
&= ~((BIT(global
->group_index_bits
) - 1) <<
789 global
->group_index_shift
);
790 if (base_addr
!= global
->base_addr
) {
792 pr_err("%pfwP: address mismatch for regset %d\n", fwnode
, i
);
796 mmios_va
[i
] = ioremap(mmios
[i
].start
, resource_size(&mmios
[i
]));
799 pr_err("%pfwP: unable to map MMIO regset %d\n", fwnode
, i
);
804 /* Initialize local (or per-CPU )state */
805 rc
= imsic_local_init();
807 pr_err("%pfwP: failed to initialize local state\n",
812 /* Configure handlers for target CPUs */
813 for (i
= 0; i
< nr_parent_irqs
; i
++) {
814 rc
= imsic_get_parent_hartid(fwnode
, i
, &hartid
);
816 pr_warn("%pfwP: hart ID for parent irq%d not found\n", fwnode
, i
);
820 cpu
= riscv_hartid_to_cpuid(hartid
);
822 pr_warn("%pfwP: invalid cpuid for parent irq%d\n", fwnode
, i
);
826 /* Find MMIO location of MSI page */
828 reloff
= i
* BIT(global
->guest_index_bits
) *
830 for (j
= 0; nr_mmios
; j
++) {
831 if (reloff
< resource_size(&mmios
[j
])) {
837 * MMIO region size may not be aligned to
838 * BIT(global->guest_index_bits) * IMSIC_MMIO_PAGE_SZ
839 * if holes are present.
841 reloff
-= ALIGN(resource_size(&mmios
[j
]),
842 BIT(global
->guest_index_bits
) * IMSIC_MMIO_PAGE_SZ
);
844 if (index
>= nr_mmios
) {
845 pr_warn("%pfwP: MMIO not found for parent irq%d\n", fwnode
, i
);
849 local
= per_cpu_ptr(global
->local
, cpu
);
850 local
->msi_pa
= mmios
[index
].start
+ reloff
;
851 local
->msi_va
= mmios_va
[index
] + reloff
;
856 /* If no CPU handlers found then can't take interrupts */
858 pr_err("%pfwP: No CPU handlers found\n", fwnode
);
860 goto out_local_cleanup
;
863 /* Initialize matrix allocator */
864 rc
= imsic_matrix_init();
866 pr_err("%pfwP: failed to create matrix allocator\n", fwnode
);
867 goto out_local_cleanup
;
870 /* We don't need MMIO arrays anymore so let's free-up */
877 imsic_local_cleanup();
879 for (i
= 0; i
< nr_mmios
; i
++) {
881 iounmap(mmios_va
[i
]);
886 free_percpu(imsic
->global
.local
);