1 // SPDX-License-Identifier: GPL-2.0
3 * Copyright (C) 2020-2024 Loongson Technologies, Inc.
6 #include <linux/cpuhotplug.h>
7 #include <linux/init.h>
8 #include <linux/interrupt.h>
10 #include <linux/irqchip.h>
11 #include <linux/irqchip/chained_irq.h>
12 #include <linux/irqdomain.h>
13 #include <linux/kernel.h>
14 #include <linux/msi.h>
15 #include <linux/radix-tree.h>
16 #include <linux/spinlock.h>
18 #include <asm/loongarch.h>
19 #include <asm/setup.h>
21 #include "irq-msi-lib.h"
22 #include "irq-loongson.h"
24 #define VECTORS_PER_REG 64
25 #define IRR_VECTOR_MASK 0xffUL
26 #define IRR_INVALID_MASK 0x80000000UL
27 #define AVEC_MSG_OFFSET 0x100000
31 struct list_head head
;
34 static struct cpumask intersect_mask
;
35 static DEFINE_PER_CPU(struct pending_list
, pending_list
);
38 static DEFINE_PER_CPU(struct irq_desc
* [NR_VECTORS
], irq_map
);
40 struct avecintc_chip
{
42 struct fwnode_handle
*fwnode
;
43 struct irq_domain
*domain
;
44 struct irq_matrix
*vector_matrix
;
45 phys_addr_t msi_base_addr
;
48 static struct avecintc_chip loongarch_avec
;
50 struct avecintc_data
{
51 struct list_head entry
;
54 unsigned int prev_cpu
;
55 unsigned int prev_vec
;
59 static inline void avecintc_ack_irq(struct irq_data
*d
)
63 static inline void avecintc_mask_irq(struct irq_data
*d
)
67 static inline void avecintc_unmask_irq(struct irq_data
*d
)
72 static inline void pending_list_init(int cpu
)
74 struct pending_list
*plist
= per_cpu_ptr(&pending_list
, cpu
);
76 INIT_LIST_HEAD(&plist
->head
);
79 static void avecintc_sync(struct avecintc_data
*adata
)
81 struct pending_list
*plist
;
83 if (cpu_online(adata
->prev_cpu
)) {
84 plist
= per_cpu_ptr(&pending_list
, adata
->prev_cpu
);
85 list_add_tail(&adata
->entry
, &plist
->head
);
87 mp_ops
.send_ipi_single(adata
->prev_cpu
, ACTION_CLEAR_VECTOR
);
91 static int avecintc_set_affinity(struct irq_data
*data
, const struct cpumask
*dest
, bool force
)
94 struct avecintc_data
*adata
;
96 scoped_guard(raw_spinlock
, &loongarch_avec
.lock
) {
97 adata
= irq_data_get_irq_chip_data(data
);
102 if (cpu_online(adata
->cpu
) && cpumask_test_cpu(adata
->cpu
, dest
))
105 cpumask_and(&intersect_mask
, dest
, cpu_online_mask
);
107 ret
= irq_matrix_alloc(loongarch_avec
.vector_matrix
, &intersect_mask
, false, &cpu
);
114 per_cpu_ptr(irq_map
, adata
->cpu
)[adata
->vec
] = irq_data_to_desc(data
);
115 avecintc_sync(adata
);
118 irq_data_update_effective_affinity(data
, cpumask_of(cpu
));
120 return IRQ_SET_MASK_OK
;
123 static int avecintc_cpu_online(unsigned int cpu
)
125 if (!loongarch_avec
.vector_matrix
)
128 guard(raw_spinlock
)(&loongarch_avec
.lock
);
130 irq_matrix_online(loongarch_avec
.vector_matrix
);
132 pending_list_init(cpu
);
137 static int avecintc_cpu_offline(unsigned int cpu
)
139 struct pending_list
*plist
= per_cpu_ptr(&pending_list
, cpu
);
141 if (!loongarch_avec
.vector_matrix
)
144 guard(raw_spinlock
)(&loongarch_avec
.lock
);
146 if (!list_empty(&plist
->head
))
147 pr_warn("CPU#%d vector is busy\n", cpu
);
149 irq_matrix_offline(loongarch_avec
.vector_matrix
);
154 void complete_irq_moving(void)
156 struct pending_list
*plist
= this_cpu_ptr(&pending_list
);
157 struct avecintc_data
*adata
, *tdata
;
158 int cpu
, vector
, bias
;
161 guard(raw_spinlock
)(&loongarch_avec
.lock
);
163 list_for_each_entry_safe(adata
, tdata
, &plist
->head
, entry
) {
164 cpu
= adata
->prev_cpu
;
165 vector
= adata
->prev_vec
;
166 bias
= vector
/ VECTORS_PER_REG
;
169 isr
= csr_read64(LOONGARCH_CSR_ISR0
);
172 isr
= csr_read64(LOONGARCH_CSR_ISR1
);
175 isr
= csr_read64(LOONGARCH_CSR_ISR2
);
178 isr
= csr_read64(LOONGARCH_CSR_ISR3
);
182 if (isr
& (1UL << (vector
% VECTORS_PER_REG
))) {
183 mp_ops
.send_ipi_single(cpu
, ACTION_CLEAR_VECTOR
);
186 list_del(&adata
->entry
);
187 irq_matrix_free(loongarch_avec
.vector_matrix
, cpu
, vector
, false);
188 this_cpu_write(irq_map
[vector
], NULL
);
190 adata
->prev_cpu
= adata
->cpu
;
191 adata
->prev_vec
= adata
->vec
;
196 static void avecintc_compose_msi_msg(struct irq_data
*d
, struct msi_msg
*msg
)
198 struct avecintc_data
*adata
= irq_data_get_irq_chip_data(d
);
200 msg
->address_hi
= 0x0;
201 msg
->address_lo
= (loongarch_avec
.msi_base_addr
| (adata
->vec
& 0xff) << 4)
202 | ((cpu_logical_map(adata
->cpu
& 0xffff)) << 12);
206 static struct irq_chip avec_irq_controller
= {
208 .irq_ack
= avecintc_ack_irq
,
209 .irq_mask
= avecintc_mask_irq
,
210 .irq_unmask
= avecintc_unmask_irq
,
212 .irq_set_affinity
= avecintc_set_affinity
,
214 .irq_compose_msi_msg
= avecintc_compose_msi_msg
,
217 static void avecintc_irq_dispatch(struct irq_desc
*desc
)
219 struct irq_chip
*chip
= irq_desc_get_chip(desc
);
222 chained_irq_enter(chip
, desc
);
225 unsigned long vector
= csr_read64(LOONGARCH_CSR_IRR
);
226 if (vector
& IRR_INVALID_MASK
)
229 vector
&= IRR_VECTOR_MASK
;
231 d
= this_cpu_read(irq_map
[vector
]);
233 generic_handle_irq_desc(d
);
235 spurious_interrupt();
236 pr_warn("Unexpected IRQ occurs on CPU#%d [vector %ld]\n", smp_processor_id(), vector
);
240 chained_irq_exit(chip
, desc
);
243 static int avecintc_alloc_vector(struct irq_data
*irqd
, struct avecintc_data
*adata
)
247 guard(raw_spinlock_irqsave
)(&loongarch_avec
.lock
);
249 ret
= irq_matrix_alloc(loongarch_avec
.vector_matrix
, cpu_online_mask
, false, &cpu
);
253 adata
->prev_cpu
= adata
->cpu
= cpu
;
254 adata
->prev_vec
= adata
->vec
= ret
;
255 per_cpu_ptr(irq_map
, adata
->cpu
)[adata
->vec
] = irq_data_to_desc(irqd
);
260 static int avecintc_domain_alloc(struct irq_domain
*domain
, unsigned int virq
,
261 unsigned int nr_irqs
, void *arg
)
263 for (unsigned int i
= 0; i
< nr_irqs
; i
++) {
264 struct irq_data
*irqd
= irq_domain_get_irq_data(domain
, virq
+ i
);
265 struct avecintc_data
*adata
= kzalloc(sizeof(*adata
), GFP_KERNEL
);
271 ret
= avecintc_alloc_vector(irqd
, adata
);
277 irq_domain_set_info(domain
, virq
+ i
, virq
+ i
, &avec_irq_controller
,
278 adata
, handle_edge_irq
, NULL
, NULL
);
279 irqd_set_single_target(irqd
);
280 irqd_set_affinity_on_activate(irqd
);
286 static void avecintc_free_vector(struct irq_data
*irqd
, struct avecintc_data
*adata
)
288 guard(raw_spinlock_irqsave
)(&loongarch_avec
.lock
);
290 per_cpu(irq_map
, adata
->cpu
)[adata
->vec
] = NULL
;
291 irq_matrix_free(loongarch_avec
.vector_matrix
, adata
->cpu
, adata
->vec
, false);
297 per_cpu(irq_map
, adata
->prev_cpu
)[adata
->prev_vec
] = NULL
;
298 irq_matrix_free(loongarch_avec
.vector_matrix
, adata
->prev_cpu
, adata
->prev_vec
, false);
299 list_del_init(&adata
->entry
);
303 static void avecintc_domain_free(struct irq_domain
*domain
, unsigned int virq
,
304 unsigned int nr_irqs
)
306 for (unsigned int i
= 0; i
< nr_irqs
; i
++) {
307 struct irq_data
*d
= irq_domain_get_irq_data(domain
, virq
+ i
);
310 struct avecintc_data
*adata
= irq_data_get_irq_chip_data(d
);
312 avecintc_free_vector(d
, adata
);
313 irq_domain_reset_irq_data(d
);
319 static const struct irq_domain_ops avecintc_domain_ops
= {
320 .alloc
= avecintc_domain_alloc
,
321 .free
= avecintc_domain_free
,
322 .select
= msi_lib_irq_domain_select
,
325 static int __init
irq_matrix_init(void)
327 loongarch_avec
.vector_matrix
= irq_alloc_matrix(NR_VECTORS
, 0, NR_VECTORS
);
328 if (!loongarch_avec
.vector_matrix
)
331 for (int i
= 0; i
< NR_LEGACY_VECTORS
; i
++)
332 irq_matrix_assign_system(loongarch_avec
.vector_matrix
, i
, false);
334 irq_matrix_online(loongarch_avec
.vector_matrix
);
339 static int __init
avecintc_init(struct irq_domain
*parent
)
344 raw_spin_lock_init(&loongarch_avec
.lock
);
346 loongarch_avec
.fwnode
= irq_domain_alloc_named_fwnode("AVECINTC");
347 if (!loongarch_avec
.fwnode
) {
348 pr_err("Unable to allocate domain handle\n");
353 loongarch_avec
.domain
= irq_domain_create_tree(loongarch_avec
.fwnode
,
354 &avecintc_domain_ops
, NULL
);
355 if (!loongarch_avec
.domain
) {
356 pr_err("Unable to create IRQ domain\n");
358 goto out_free_handle
;
361 parent_irq
= irq_create_mapping(parent
, INT_AVEC
);
363 pr_err("Failed to mapping hwirq\n");
365 goto out_remove_domain
;
368 ret
= irq_matrix_init();
370 pr_err("Failed to init irq matrix\n");
371 goto out_remove_domain
;
373 irq_set_chained_handler_and_data(parent_irq
, avecintc_irq_dispatch
, NULL
);
376 pending_list_init(0);
377 cpuhp_setup_state_nocalls(CPUHP_AP_IRQ_AVECINTC_STARTING
,
378 "irqchip/loongarch/avecintc:starting",
379 avecintc_cpu_online
, avecintc_cpu_offline
);
381 value
= iocsr_read64(LOONGARCH_IOCSR_MISC_FUNC
);
382 value
|= IOCSR_MISC_FUNC_AVEC_EN
;
383 iocsr_write64(value
, LOONGARCH_IOCSR_MISC_FUNC
);
388 irq_domain_remove(loongarch_avec
.domain
);
390 irq_domain_free_fwnode(loongarch_avec
.fwnode
);
395 static int __init
pch_msi_parse_madt(union acpi_subtable_headers
*header
,
396 const unsigned long end
)
398 struct acpi_madt_msi_pic
*pchmsi_entry
= (struct acpi_madt_msi_pic
*)header
;
400 loongarch_avec
.msi_base_addr
= pchmsi_entry
->msg_address
- AVEC_MSG_OFFSET
;
402 return pch_msi_acpi_init_avec(loongarch_avec
.domain
);
405 static inline int __init
acpi_cascade_irqdomain_init(void)
407 return acpi_table_parse_madt(ACPI_MADT_TYPE_MSI_PIC
, pch_msi_parse_madt
, 1);
410 int __init
avecintc_acpi_init(struct irq_domain
*parent
)
412 int ret
= avecintc_init(parent
);
414 pr_err("Failed to init IRQ domain\n");
418 ret
= acpi_cascade_irqdomain_init();
420 pr_err("Failed to init cascade IRQ domain\n");