printf: Remove unused 'bprintf'
[drm/drm-misc.git] / drivers / irqchip / irq-loongson-eiointc.c
blobbb79e19dfb59001cef89fc8c884a1415dd3334c3
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * Loongson Extend I/O Interrupt Controller support
5 * Copyright (C) 2020-2022 Loongson Technology Corporation Limited
6 */
8 #define pr_fmt(fmt) "eiointc: " fmt
10 #include <linux/cpuhotplug.h>
11 #include <linux/interrupt.h>
12 #include <linux/irq.h>
13 #include <linux/irqchip.h>
14 #include <linux/irqdomain.h>
15 #include <linux/irqchip/chained_irq.h>
16 #include <linux/kernel.h>
17 #include <linux/kvm_para.h>
18 #include <linux/syscore_ops.h>
19 #include <asm/numa.h>
21 #include "irq-loongson.h"
23 #define EIOINTC_REG_NODEMAP 0x14a0
24 #define EIOINTC_REG_IPMAP 0x14c0
25 #define EIOINTC_REG_ENABLE 0x1600
26 #define EIOINTC_REG_BOUNCE 0x1680
27 #define EIOINTC_REG_ISR 0x1800
28 #define EIOINTC_REG_ROUTE 0x1c00
30 #define EXTIOI_VIRT_FEATURES 0x40000000
31 #define EXTIOI_HAS_VIRT_EXTENSION BIT(0)
32 #define EXTIOI_HAS_ENABLE_OPTION BIT(1)
33 #define EXTIOI_HAS_INT_ENCODE BIT(2)
34 #define EXTIOI_HAS_CPU_ENCODE BIT(3)
35 #define EXTIOI_VIRT_CONFIG 0x40000004
36 #define EXTIOI_ENABLE BIT(1)
37 #define EXTIOI_ENABLE_INT_ENCODE BIT(2)
38 #define EXTIOI_ENABLE_CPU_ENCODE BIT(3)
40 #define VEC_REG_COUNT 4
41 #define VEC_COUNT_PER_REG 64
42 #define VEC_COUNT (VEC_REG_COUNT * VEC_COUNT_PER_REG)
43 #define VEC_REG_IDX(irq_id) ((irq_id) / VEC_COUNT_PER_REG)
44 #define VEC_REG_BIT(irq_id) ((irq_id) % VEC_COUNT_PER_REG)
45 #define EIOINTC_ALL_ENABLE 0xffffffff
46 #define EIOINTC_ALL_ENABLE_VEC_MASK(vector) (EIOINTC_ALL_ENABLE & ~BIT(vector & 0x1f))
47 #define EIOINTC_REG_ENABLE_VEC(vector) (EIOINTC_REG_ENABLE + ((vector >> 5) << 2))
48 #define EIOINTC_USE_CPU_ENCODE BIT(0)
50 #define MAX_EIO_NODES (NR_CPUS / CORES_PER_EIO_NODE)
53 * Routing registers are 32bit, and there is 8-bit route setting for every
54 * interrupt vector. So one Route register contains four vectors routing
55 * information.
57 #define EIOINTC_REG_ROUTE_VEC(vector) (EIOINTC_REG_ROUTE + (vector & ~0x03))
58 #define EIOINTC_REG_ROUTE_VEC_SHIFT(vector) ((vector & 0x03) << 3)
59 #define EIOINTC_REG_ROUTE_VEC_MASK(vector) (0xff << EIOINTC_REG_ROUTE_VEC_SHIFT(vector))
61 static int nr_pics;
63 struct eiointc_priv {
64 u32 node;
65 u32 vec_count;
66 nodemask_t node_map;
67 cpumask_t cpuspan_map;
68 struct fwnode_handle *domain_handle;
69 struct irq_domain *eiointc_domain;
70 int flags;
73 static struct eiointc_priv *eiointc_priv[MAX_IO_PICS];
75 static void eiointc_enable(void)
77 uint64_t misc;
79 misc = iocsr_read64(LOONGARCH_IOCSR_MISC_FUNC);
80 misc |= IOCSR_MISC_FUNC_EXT_IOI_EN;
81 iocsr_write64(misc, LOONGARCH_IOCSR_MISC_FUNC);
84 static int cpu_to_eio_node(int cpu)
86 if (!kvm_para_has_feature(KVM_FEATURE_VIRT_EXTIOI))
87 return cpu_logical_map(cpu) / CORES_PER_EIO_NODE;
88 else
89 return cpu_logical_map(cpu) / CORES_PER_VEIO_NODE;
92 #ifdef CONFIG_SMP
93 static void eiointc_set_irq_route(int pos, unsigned int cpu, unsigned int mnode, nodemask_t *node_map)
95 int i, node, cpu_node, route_node;
96 unsigned char coremap;
97 uint32_t pos_off, data, data_byte, data_mask;
99 pos_off = pos & ~3;
100 data_byte = pos & 3;
101 data_mask = ~BIT_MASK(data_byte) & 0xf;
103 /* Calculate node and coremap of target irq */
104 cpu_node = cpu_logical_map(cpu) / CORES_PER_EIO_NODE;
105 coremap = BIT(cpu_logical_map(cpu) % CORES_PER_EIO_NODE);
107 for_each_online_cpu(i) {
108 node = cpu_to_eio_node(i);
109 if (!node_isset(node, *node_map))
110 continue;
112 /* EIO node 0 is in charge of inter-node interrupt dispatch */
113 route_node = (node == mnode) ? cpu_node : node;
114 data = ((coremap | (route_node << 4)) << (data_byte * 8));
115 csr_any_send(EIOINTC_REG_ROUTE + pos_off, data, data_mask, node * CORES_PER_EIO_NODE);
119 static void veiointc_set_irq_route(unsigned int vector, unsigned int cpu)
121 unsigned long reg = EIOINTC_REG_ROUTE_VEC(vector);
122 unsigned int data;
124 data = iocsr_read32(reg);
125 data &= ~EIOINTC_REG_ROUTE_VEC_MASK(vector);
126 data |= cpu_logical_map(cpu) << EIOINTC_REG_ROUTE_VEC_SHIFT(vector);
127 iocsr_write32(data, reg);
130 static DEFINE_RAW_SPINLOCK(affinity_lock);
132 static int eiointc_set_irq_affinity(struct irq_data *d, const struct cpumask *affinity, bool force)
134 unsigned int cpu;
135 unsigned long flags;
136 uint32_t vector, regaddr;
137 struct eiointc_priv *priv = d->domain->host_data;
139 raw_spin_lock_irqsave(&affinity_lock, flags);
141 cpu = cpumask_first_and_and(&priv->cpuspan_map, affinity, cpu_online_mask);
142 if (cpu >= nr_cpu_ids) {
143 raw_spin_unlock_irqrestore(&affinity_lock, flags);
144 return -EINVAL;
147 vector = d->hwirq;
148 regaddr = EIOINTC_REG_ENABLE_VEC(vector);
150 if (priv->flags & EIOINTC_USE_CPU_ENCODE) {
151 iocsr_write32(EIOINTC_ALL_ENABLE_VEC_MASK(vector), regaddr);
152 veiointc_set_irq_route(vector, cpu);
153 iocsr_write32(EIOINTC_ALL_ENABLE, regaddr);
154 } else {
155 /* Mask target vector */
156 csr_any_send(regaddr, EIOINTC_ALL_ENABLE_VEC_MASK(vector),
157 0x0, priv->node * CORES_PER_EIO_NODE);
159 /* Set route for target vector */
160 eiointc_set_irq_route(vector, cpu, priv->node, &priv->node_map);
162 /* Unmask target vector */
163 csr_any_send(regaddr, EIOINTC_ALL_ENABLE,
164 0x0, priv->node * CORES_PER_EIO_NODE);
167 irq_data_update_effective_affinity(d, cpumask_of(cpu));
169 raw_spin_unlock_irqrestore(&affinity_lock, flags);
171 return IRQ_SET_MASK_OK;
173 #endif
175 static int eiointc_index(int node)
177 int i;
179 for (i = 0; i < nr_pics; i++) {
180 if (node_isset(node, eiointc_priv[i]->node_map))
181 return i;
184 return -1;
187 static int eiointc_router_init(unsigned int cpu)
189 int i, bit, cores, index, node;
190 unsigned int data;
192 node = cpu_to_eio_node(cpu);
193 index = eiointc_index(node);
195 if (index < 0) {
196 pr_err("Error: invalid nodemap!\n");
197 return -EINVAL;
200 if (!(eiointc_priv[index]->flags & EIOINTC_USE_CPU_ENCODE))
201 cores = CORES_PER_EIO_NODE;
202 else
203 cores = CORES_PER_VEIO_NODE;
205 if ((cpu_logical_map(cpu) % cores) == 0) {
206 eiointc_enable();
208 for (i = 0; i < eiointc_priv[0]->vec_count / 32; i++) {
209 data = (((1 << (i * 2 + 1)) << 16) | (1 << (i * 2)));
210 iocsr_write32(data, EIOINTC_REG_NODEMAP + i * 4);
213 for (i = 0; i < eiointc_priv[0]->vec_count / 32 / 4; i++) {
214 bit = BIT(1 + index); /* Route to IP[1 + index] */
215 data = bit | (bit << 8) | (bit << 16) | (bit << 24);
216 iocsr_write32(data, EIOINTC_REG_IPMAP + i * 4);
219 for (i = 0; i < eiointc_priv[0]->vec_count / 4; i++) {
220 /* Route to Node-0 Core-0 */
221 if (eiointc_priv[index]->flags & EIOINTC_USE_CPU_ENCODE)
222 bit = cpu_logical_map(0);
223 else if (index == 0)
224 bit = BIT(cpu_logical_map(0));
225 else
226 bit = (eiointc_priv[index]->node << 4) | 1;
228 data = bit | (bit << 8) | (bit << 16) | (bit << 24);
229 iocsr_write32(data, EIOINTC_REG_ROUTE + i * 4);
232 for (i = 0; i < eiointc_priv[0]->vec_count / 32; i++) {
233 data = 0xffffffff;
234 iocsr_write32(data, EIOINTC_REG_ENABLE + i * 4);
235 iocsr_write32(data, EIOINTC_REG_BOUNCE + i * 4);
239 return 0;
242 static void eiointc_irq_dispatch(struct irq_desc *desc)
244 int i;
245 u64 pending;
246 bool handled = false;
247 struct irq_chip *chip = irq_desc_get_chip(desc);
248 struct eiointc_priv *priv = irq_desc_get_handler_data(desc);
250 chained_irq_enter(chip, desc);
252 for (i = 0; i < eiointc_priv[0]->vec_count / VEC_COUNT_PER_REG; i++) {
253 pending = iocsr_read64(EIOINTC_REG_ISR + (i << 3));
255 /* Skip handling if pending bitmap is zero */
256 if (!pending)
257 continue;
259 /* Clear the IRQs */
260 iocsr_write64(pending, EIOINTC_REG_ISR + (i << 3));
261 while (pending) {
262 int bit = __ffs(pending);
263 int irq = bit + VEC_COUNT_PER_REG * i;
265 generic_handle_domain_irq(priv->eiointc_domain, irq);
266 pending &= ~BIT(bit);
267 handled = true;
271 if (!handled)
272 spurious_interrupt();
274 chained_irq_exit(chip, desc);
277 static void eiointc_ack_irq(struct irq_data *d)
281 static void eiointc_mask_irq(struct irq_data *d)
285 static void eiointc_unmask_irq(struct irq_data *d)
289 static struct irq_chip eiointc_irq_chip = {
290 .name = "EIOINTC",
291 .irq_ack = eiointc_ack_irq,
292 .irq_mask = eiointc_mask_irq,
293 .irq_unmask = eiointc_unmask_irq,
294 #ifdef CONFIG_SMP
295 .irq_set_affinity = eiointc_set_irq_affinity,
296 #endif
299 static int eiointc_domain_alloc(struct irq_domain *domain, unsigned int virq,
300 unsigned int nr_irqs, void *arg)
302 int ret;
303 unsigned int i, type;
304 unsigned long hwirq = 0;
305 struct eiointc_priv *priv = domain->host_data;
307 ret = irq_domain_translate_onecell(domain, arg, &hwirq, &type);
308 if (ret)
309 return ret;
311 for (i = 0; i < nr_irqs; i++) {
312 irq_domain_set_info(domain, virq + i, hwirq + i, &eiointc_irq_chip,
313 priv, handle_edge_irq, NULL, NULL);
316 return 0;
319 static void eiointc_domain_free(struct irq_domain *domain, unsigned int virq,
320 unsigned int nr_irqs)
322 int i;
324 for (i = 0; i < nr_irqs; i++) {
325 struct irq_data *d = irq_domain_get_irq_data(domain, virq + i);
327 irq_set_handler(virq + i, NULL);
328 irq_domain_reset_irq_data(d);
332 static const struct irq_domain_ops eiointc_domain_ops = {
333 .translate = irq_domain_translate_onecell,
334 .alloc = eiointc_domain_alloc,
335 .free = eiointc_domain_free,
338 static void acpi_set_vec_parent(int node, struct irq_domain *parent, struct acpi_vector_group *vec_group)
340 int i;
342 for (i = 0; i < MAX_IO_PICS; i++) {
343 if (node == vec_group[i].node) {
344 vec_group[i].parent = parent;
345 return;
350 static struct irq_domain *acpi_get_vec_parent(int node, struct acpi_vector_group *vec_group)
352 int i;
354 for (i = 0; i < MAX_IO_PICS; i++) {
355 if (node == vec_group[i].node)
356 return vec_group[i].parent;
358 return NULL;
361 static int eiointc_suspend(void)
363 return 0;
366 static void eiointc_resume(void)
368 eiointc_router_init(0);
371 static struct syscore_ops eiointc_syscore_ops = {
372 .suspend = eiointc_suspend,
373 .resume = eiointc_resume,
376 static int __init pch_pic_parse_madt(union acpi_subtable_headers *header,
377 const unsigned long end)
379 struct acpi_madt_bio_pic *pchpic_entry = (struct acpi_madt_bio_pic *)header;
380 unsigned int node = (pchpic_entry->address >> 44) & 0xf;
381 struct irq_domain *parent = acpi_get_vec_parent(node, pch_group);
383 if (parent)
384 return pch_pic_acpi_init(parent, pchpic_entry);
386 return 0;
389 static int __init pch_msi_parse_madt(union acpi_subtable_headers *header,
390 const unsigned long end)
392 struct irq_domain *parent;
393 struct acpi_madt_msi_pic *pchmsi_entry = (struct acpi_madt_msi_pic *)header;
394 int node;
396 if (cpu_has_flatmode)
397 node = early_cpu_to_node(eiointc_priv[nr_pics - 1]->node * CORES_PER_EIO_NODE);
398 else
399 node = eiointc_priv[nr_pics - 1]->node;
401 parent = acpi_get_vec_parent(node, msi_group);
403 if (parent)
404 return pch_msi_acpi_init(parent, pchmsi_entry);
406 return 0;
409 static int __init acpi_cascade_irqdomain_init(void)
411 int r;
413 r = acpi_table_parse_madt(ACPI_MADT_TYPE_BIO_PIC, pch_pic_parse_madt, 0);
414 if (r < 0)
415 return r;
417 if (cpu_has_avecint)
418 return 0;
420 r = acpi_table_parse_madt(ACPI_MADT_TYPE_MSI_PIC, pch_msi_parse_madt, 1);
421 if (r < 0)
422 return r;
424 return 0;
427 static int __init eiointc_init(struct eiointc_priv *priv, int parent_irq,
428 u64 node_map)
430 int i, val;
432 node_map = node_map ? node_map : -1ULL;
433 for_each_possible_cpu(i) {
434 if (node_map & (1ULL << (cpu_to_eio_node(i)))) {
435 node_set(cpu_to_eio_node(i), priv->node_map);
436 cpumask_or(&priv->cpuspan_map, &priv->cpuspan_map,
437 cpumask_of(i));
441 priv->eiointc_domain = irq_domain_create_linear(priv->domain_handle,
442 priv->vec_count,
443 &eiointc_domain_ops,
444 priv);
445 if (!priv->eiointc_domain) {
446 pr_err("loongson-extioi: cannot add IRQ domain\n");
447 return -ENOMEM;
450 if (kvm_para_has_feature(KVM_FEATURE_VIRT_EXTIOI)) {
451 val = iocsr_read32(EXTIOI_VIRT_FEATURES);
453 * With EXTIOI_ENABLE_CPU_ENCODE set
454 * interrupts can route to 256 vCPUs.
456 if (val & EXTIOI_HAS_CPU_ENCODE) {
457 val = iocsr_read32(EXTIOI_VIRT_CONFIG);
458 val |= EXTIOI_ENABLE_CPU_ENCODE;
459 iocsr_write32(val, EXTIOI_VIRT_CONFIG);
460 priv->flags = EIOINTC_USE_CPU_ENCODE;
464 eiointc_priv[nr_pics++] = priv;
465 eiointc_router_init(0);
466 irq_set_chained_handler_and_data(parent_irq, eiointc_irq_dispatch, priv);
468 if (nr_pics == 1) {
469 register_syscore_ops(&eiointc_syscore_ops);
470 cpuhp_setup_state_nocalls(CPUHP_AP_IRQ_EIOINTC_STARTING,
471 "irqchip/loongarch/eiointc:starting",
472 eiointc_router_init, NULL);
475 return 0;
478 int __init eiointc_acpi_init(struct irq_domain *parent,
479 struct acpi_madt_eio_pic *acpi_eiointc)
481 int parent_irq, ret;
482 struct eiointc_priv *priv;
483 int node;
485 priv = kzalloc(sizeof(*priv), GFP_KERNEL);
486 if (!priv)
487 return -ENOMEM;
489 priv->domain_handle = irq_domain_alloc_named_id_fwnode("EIOPIC",
490 acpi_eiointc->node);
491 if (!priv->domain_handle) {
492 pr_err("Unable to allocate domain handle\n");
493 goto out_free_priv;
496 priv->vec_count = VEC_COUNT;
497 priv->node = acpi_eiointc->node;
499 parent_irq = irq_create_mapping(parent, acpi_eiointc->cascade);
501 ret = eiointc_init(priv, parent_irq, acpi_eiointc->node_map);
502 if (ret < 0)
503 goto out_free_handle;
505 if (cpu_has_flatmode)
506 node = early_cpu_to_node(acpi_eiointc->node * CORES_PER_EIO_NODE);
507 else
508 node = acpi_eiointc->node;
509 acpi_set_vec_parent(node, priv->eiointc_domain, pch_group);
510 acpi_set_vec_parent(node, priv->eiointc_domain, msi_group);
512 ret = acpi_cascade_irqdomain_init();
513 if (ret < 0)
514 goto out_free_handle;
516 return ret;
518 out_free_handle:
519 irq_domain_free_fwnode(priv->domain_handle);
520 priv->domain_handle = NULL;
521 out_free_priv:
522 kfree(priv);
524 return -ENOMEM;
527 static int __init eiointc_of_init(struct device_node *of_node,
528 struct device_node *parent)
530 int parent_irq, ret;
531 struct eiointc_priv *priv;
533 priv = kzalloc(sizeof(*priv), GFP_KERNEL);
534 if (!priv)
535 return -ENOMEM;
537 parent_irq = irq_of_parse_and_map(of_node, 0);
538 if (parent_irq <= 0) {
539 ret = -ENODEV;
540 goto out_free_priv;
543 ret = irq_set_handler_data(parent_irq, priv);
544 if (ret < 0)
545 goto out_free_priv;
548 * In particular, the number of devices supported by the LS2K0500
549 * extended I/O interrupt vector is 128.
551 if (of_device_is_compatible(of_node, "loongson,ls2k0500-eiointc"))
552 priv->vec_count = 128;
553 else
554 priv->vec_count = VEC_COUNT;
556 priv->node = 0;
557 priv->domain_handle = of_node_to_fwnode(of_node);
559 ret = eiointc_init(priv, parent_irq, 0);
560 if (ret < 0)
561 goto out_free_priv;
563 return 0;
565 out_free_priv:
566 kfree(priv);
567 return ret;
570 IRQCHIP_DECLARE(loongson_ls2k0500_eiointc, "loongson,ls2k0500-eiointc", eiointc_of_init);
571 IRQCHIP_DECLARE(loongson_ls2k2000_eiointc, "loongson,ls2k2000-eiointc", eiointc_of_init);