Merge tag 'trace-printf-v6.13' of git://git.kernel.org/pub/scm/linux/kernel/git/trace...
[drm/drm-misc.git] / drivers / irqchip / irq-loongarch-avec.c
blob0f6e465dd3095607f2cc8ffaa862a957e3a5c037
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * Copyright (C) 2020-2024 Loongson Technologies, Inc.
4 */
6 #include <linux/cpuhotplug.h>
7 #include <linux/init.h>
8 #include <linux/interrupt.h>
9 #include <linux/irq.h>
10 #include <linux/irqchip.h>
11 #include <linux/irqchip/chained_irq.h>
12 #include <linux/irqdomain.h>
13 #include <linux/kernel.h>
14 #include <linux/msi.h>
15 #include <linux/radix-tree.h>
16 #include <linux/spinlock.h>
18 #include <asm/loongarch.h>
19 #include <asm/setup.h>
21 #include "irq-msi-lib.h"
22 #include "irq-loongson.h"
24 #define VECTORS_PER_REG 64
25 #define IRR_VECTOR_MASK 0xffUL
26 #define IRR_INVALID_MASK 0x80000000UL
27 #define AVEC_MSG_OFFSET 0x100000
29 #ifdef CONFIG_SMP
30 struct pending_list {
31 struct list_head head;
34 static struct cpumask intersect_mask;
35 static DEFINE_PER_CPU(struct pending_list, pending_list);
36 #endif
38 static DEFINE_PER_CPU(struct irq_desc * [NR_VECTORS], irq_map);
40 struct avecintc_chip {
41 raw_spinlock_t lock;
42 struct fwnode_handle *fwnode;
43 struct irq_domain *domain;
44 struct irq_matrix *vector_matrix;
45 phys_addr_t msi_base_addr;
48 static struct avecintc_chip loongarch_avec;
50 struct avecintc_data {
51 struct list_head entry;
52 unsigned int cpu;
53 unsigned int vec;
54 unsigned int prev_cpu;
55 unsigned int prev_vec;
56 unsigned int moving;
59 static inline void avecintc_ack_irq(struct irq_data *d)
63 static inline void avecintc_mask_irq(struct irq_data *d)
67 static inline void avecintc_unmask_irq(struct irq_data *d)
71 #ifdef CONFIG_SMP
72 static inline void pending_list_init(int cpu)
74 struct pending_list *plist = per_cpu_ptr(&pending_list, cpu);
76 INIT_LIST_HEAD(&plist->head);
79 static void avecintc_sync(struct avecintc_data *adata)
81 struct pending_list *plist;
83 if (cpu_online(adata->prev_cpu)) {
84 plist = per_cpu_ptr(&pending_list, adata->prev_cpu);
85 list_add_tail(&adata->entry, &plist->head);
86 adata->moving = 1;
87 mp_ops.send_ipi_single(adata->prev_cpu, ACTION_CLEAR_VECTOR);
91 static int avecintc_set_affinity(struct irq_data *data, const struct cpumask *dest, bool force)
93 int cpu, ret, vector;
94 struct avecintc_data *adata;
96 scoped_guard(raw_spinlock, &loongarch_avec.lock) {
97 adata = irq_data_get_irq_chip_data(data);
99 if (adata->moving)
100 return -EBUSY;
102 if (cpu_online(adata->cpu) && cpumask_test_cpu(adata->cpu, dest))
103 return 0;
105 cpumask_and(&intersect_mask, dest, cpu_online_mask);
107 ret = irq_matrix_alloc(loongarch_avec.vector_matrix, &intersect_mask, false, &cpu);
108 if (ret < 0)
109 return ret;
111 vector = ret;
112 adata->cpu = cpu;
113 adata->vec = vector;
114 per_cpu_ptr(irq_map, adata->cpu)[adata->vec] = irq_data_to_desc(data);
115 avecintc_sync(adata);
118 irq_data_update_effective_affinity(data, cpumask_of(cpu));
120 return IRQ_SET_MASK_OK;
123 static int avecintc_cpu_online(unsigned int cpu)
125 if (!loongarch_avec.vector_matrix)
126 return 0;
128 guard(raw_spinlock)(&loongarch_avec.lock);
130 irq_matrix_online(loongarch_avec.vector_matrix);
132 pending_list_init(cpu);
134 return 0;
137 static int avecintc_cpu_offline(unsigned int cpu)
139 struct pending_list *plist = per_cpu_ptr(&pending_list, cpu);
141 if (!loongarch_avec.vector_matrix)
142 return 0;
144 guard(raw_spinlock)(&loongarch_avec.lock);
146 if (!list_empty(&plist->head))
147 pr_warn("CPU#%d vector is busy\n", cpu);
149 irq_matrix_offline(loongarch_avec.vector_matrix);
151 return 0;
154 void complete_irq_moving(void)
156 struct pending_list *plist = this_cpu_ptr(&pending_list);
157 struct avecintc_data *adata, *tdata;
158 int cpu, vector, bias;
159 uint64_t isr;
161 guard(raw_spinlock)(&loongarch_avec.lock);
163 list_for_each_entry_safe(adata, tdata, &plist->head, entry) {
164 cpu = adata->prev_cpu;
165 vector = adata->prev_vec;
166 bias = vector / VECTORS_PER_REG;
167 switch (bias) {
168 case 0:
169 isr = csr_read64(LOONGARCH_CSR_ISR0);
170 break;
171 case 1:
172 isr = csr_read64(LOONGARCH_CSR_ISR1);
173 break;
174 case 2:
175 isr = csr_read64(LOONGARCH_CSR_ISR2);
176 break;
177 case 3:
178 isr = csr_read64(LOONGARCH_CSR_ISR3);
179 break;
182 if (isr & (1UL << (vector % VECTORS_PER_REG))) {
183 mp_ops.send_ipi_single(cpu, ACTION_CLEAR_VECTOR);
184 continue;
186 list_del(&adata->entry);
187 irq_matrix_free(loongarch_avec.vector_matrix, cpu, vector, false);
188 this_cpu_write(irq_map[vector], NULL);
189 adata->moving = 0;
190 adata->prev_cpu = adata->cpu;
191 adata->prev_vec = adata->vec;
194 #endif
196 static void avecintc_compose_msi_msg(struct irq_data *d, struct msi_msg *msg)
198 struct avecintc_data *adata = irq_data_get_irq_chip_data(d);
200 msg->address_hi = 0x0;
201 msg->address_lo = (loongarch_avec.msi_base_addr | (adata->vec & 0xff) << 4)
202 | ((cpu_logical_map(adata->cpu & 0xffff)) << 12);
203 msg->data = 0x0;
206 static struct irq_chip avec_irq_controller = {
207 .name = "AVECINTC",
208 .irq_ack = avecintc_ack_irq,
209 .irq_mask = avecintc_mask_irq,
210 .irq_unmask = avecintc_unmask_irq,
211 #ifdef CONFIG_SMP
212 .irq_set_affinity = avecintc_set_affinity,
213 #endif
214 .irq_compose_msi_msg = avecintc_compose_msi_msg,
217 static void avecintc_irq_dispatch(struct irq_desc *desc)
219 struct irq_chip *chip = irq_desc_get_chip(desc);
220 struct irq_desc *d;
222 chained_irq_enter(chip, desc);
224 while (true) {
225 unsigned long vector = csr_read64(LOONGARCH_CSR_IRR);
226 if (vector & IRR_INVALID_MASK)
227 break;
229 vector &= IRR_VECTOR_MASK;
231 d = this_cpu_read(irq_map[vector]);
232 if (d) {
233 generic_handle_irq_desc(d);
234 } else {
235 spurious_interrupt();
236 pr_warn("Unexpected IRQ occurs on CPU#%d [vector %ld]\n", smp_processor_id(), vector);
240 chained_irq_exit(chip, desc);
243 static int avecintc_alloc_vector(struct irq_data *irqd, struct avecintc_data *adata)
245 int cpu, ret;
247 guard(raw_spinlock_irqsave)(&loongarch_avec.lock);
249 ret = irq_matrix_alloc(loongarch_avec.vector_matrix, cpu_online_mask, false, &cpu);
250 if (ret < 0)
251 return ret;
253 adata->prev_cpu = adata->cpu = cpu;
254 adata->prev_vec = adata->vec = ret;
255 per_cpu_ptr(irq_map, adata->cpu)[adata->vec] = irq_data_to_desc(irqd);
257 return 0;
260 static int avecintc_domain_alloc(struct irq_domain *domain, unsigned int virq,
261 unsigned int nr_irqs, void *arg)
263 for (unsigned int i = 0; i < nr_irqs; i++) {
264 struct irq_data *irqd = irq_domain_get_irq_data(domain, virq + i);
265 struct avecintc_data *adata = kzalloc(sizeof(*adata), GFP_KERNEL);
266 int ret;
268 if (!adata)
269 return -ENOMEM;
271 ret = avecintc_alloc_vector(irqd, adata);
272 if (ret < 0) {
273 kfree(adata);
274 return ret;
277 irq_domain_set_info(domain, virq + i, virq + i, &avec_irq_controller,
278 adata, handle_edge_irq, NULL, NULL);
279 irqd_set_single_target(irqd);
280 irqd_set_affinity_on_activate(irqd);
283 return 0;
286 static void avecintc_free_vector(struct irq_data *irqd, struct avecintc_data *adata)
288 guard(raw_spinlock_irqsave)(&loongarch_avec.lock);
290 per_cpu(irq_map, adata->cpu)[adata->vec] = NULL;
291 irq_matrix_free(loongarch_avec.vector_matrix, adata->cpu, adata->vec, false);
293 #ifdef CONFIG_SMP
294 if (!adata->moving)
295 return;
297 per_cpu(irq_map, adata->prev_cpu)[adata->prev_vec] = NULL;
298 irq_matrix_free(loongarch_avec.vector_matrix, adata->prev_cpu, adata->prev_vec, false);
299 list_del_init(&adata->entry);
300 #endif
303 static void avecintc_domain_free(struct irq_domain *domain, unsigned int virq,
304 unsigned int nr_irqs)
306 for (unsigned int i = 0; i < nr_irqs; i++) {
307 struct irq_data *d = irq_domain_get_irq_data(domain, virq + i);
309 if (d) {
310 struct avecintc_data *adata = irq_data_get_irq_chip_data(d);
312 avecintc_free_vector(d, adata);
313 irq_domain_reset_irq_data(d);
314 kfree(adata);
319 static const struct irq_domain_ops avecintc_domain_ops = {
320 .alloc = avecintc_domain_alloc,
321 .free = avecintc_domain_free,
322 .select = msi_lib_irq_domain_select,
325 static int __init irq_matrix_init(void)
327 loongarch_avec.vector_matrix = irq_alloc_matrix(NR_VECTORS, 0, NR_VECTORS);
328 if (!loongarch_avec.vector_matrix)
329 return -ENOMEM;
331 for (int i = 0; i < NR_LEGACY_VECTORS; i++)
332 irq_matrix_assign_system(loongarch_avec.vector_matrix, i, false);
334 irq_matrix_online(loongarch_avec.vector_matrix);
336 return 0;
339 static int __init avecintc_init(struct irq_domain *parent)
341 int ret, parent_irq;
342 unsigned long value;
344 raw_spin_lock_init(&loongarch_avec.lock);
346 loongarch_avec.fwnode = irq_domain_alloc_named_fwnode("AVECINTC");
347 if (!loongarch_avec.fwnode) {
348 pr_err("Unable to allocate domain handle\n");
349 ret = -ENOMEM;
350 goto out;
353 loongarch_avec.domain = irq_domain_create_tree(loongarch_avec.fwnode,
354 &avecintc_domain_ops, NULL);
355 if (!loongarch_avec.domain) {
356 pr_err("Unable to create IRQ domain\n");
357 ret = -ENOMEM;
358 goto out_free_handle;
361 parent_irq = irq_create_mapping(parent, INT_AVEC);
362 if (!parent_irq) {
363 pr_err("Failed to mapping hwirq\n");
364 ret = -EINVAL;
365 goto out_remove_domain;
368 ret = irq_matrix_init();
369 if (ret < 0) {
370 pr_err("Failed to init irq matrix\n");
371 goto out_remove_domain;
373 irq_set_chained_handler_and_data(parent_irq, avecintc_irq_dispatch, NULL);
375 #ifdef CONFIG_SMP
376 pending_list_init(0);
377 cpuhp_setup_state_nocalls(CPUHP_AP_IRQ_AVECINTC_STARTING,
378 "irqchip/loongarch/avecintc:starting",
379 avecintc_cpu_online, avecintc_cpu_offline);
380 #endif
381 value = iocsr_read64(LOONGARCH_IOCSR_MISC_FUNC);
382 value |= IOCSR_MISC_FUNC_AVEC_EN;
383 iocsr_write64(value, LOONGARCH_IOCSR_MISC_FUNC);
385 return ret;
387 out_remove_domain:
388 irq_domain_remove(loongarch_avec.domain);
389 out_free_handle:
390 irq_domain_free_fwnode(loongarch_avec.fwnode);
391 out:
392 return ret;
395 static int __init pch_msi_parse_madt(union acpi_subtable_headers *header,
396 const unsigned long end)
398 struct acpi_madt_msi_pic *pchmsi_entry = (struct acpi_madt_msi_pic *)header;
400 loongarch_avec.msi_base_addr = pchmsi_entry->msg_address - AVEC_MSG_OFFSET;
402 return pch_msi_acpi_init_avec(loongarch_avec.domain);
405 static inline int __init acpi_cascade_irqdomain_init(void)
407 return acpi_table_parse_madt(ACPI_MADT_TYPE_MSI_PIC, pch_msi_parse_madt, 1);
410 int __init avecintc_acpi_init(struct irq_domain *parent)
412 int ret = avecintc_init(parent);
413 if (ret < 0) {
414 pr_err("Failed to init IRQ domain\n");
415 return ret;
418 ret = acpi_cascade_irqdomain_init();
419 if (ret < 0) {
420 pr_err("Failed to init cascade IRQ domain\n");
421 return ret;
424 return ret;