1 // SPDX-License-Identifier: GPL-2.0
3 * ip27-irq.c: Highlevel interrupt handling for IP27 architecture.
5 * Copyright (C) 1999, 2000 Ralf Baechle (ralf@gnu.org)
6 * Copyright (C) 1999, 2000 Silicon Graphics, Inc.
7 * Copyright (C) 1999 - 2001 Kanoj Sarcar
10 #include <linux/interrupt.h>
11 #include <linux/irq.h>
12 #include <linux/irqdomain.h>
13 #include <linux/ioport.h>
14 #include <linux/kernel.h>
15 #include <linux/bitops.h>
16 #include <linux/sched.h>
19 #include <asm/irq_cpu.h>
20 #include <asm/sn/addrs.h>
21 #include <asm/sn/agent.h>
22 #include <asm/sn/arch.h>
23 #include <asm/sn/intr.h>
24 #include <asm/sn/irq_alloc.h>
26 #include "ip27-common.h"
33 static DECLARE_BITMAP(hub_irq_map
, IP27_HUB_IRQ_COUNT
);
35 static DEFINE_PER_CPU(unsigned long [2], irq_enable_mask
);
37 static inline int alloc_level(void)
42 level
= find_first_zero_bit(hub_irq_map
, IP27_HUB_IRQ_COUNT
);
43 if (level
>= IP27_HUB_IRQ_COUNT
)
46 if (test_and_set_bit(level
, hub_irq_map
))
52 static void enable_hub_irq(struct irq_data
*d
)
54 struct hub_irq_data
*hd
= irq_data_get_irq_chip_data(d
);
55 unsigned long *mask
= per_cpu(irq_enable_mask
, hd
->cpu
);
57 set_bit(d
->hwirq
, mask
);
58 __raw_writeq(mask
[0], hd
->irq_mask
[0]);
59 __raw_writeq(mask
[1], hd
->irq_mask
[1]);
62 static void disable_hub_irq(struct irq_data
*d
)
64 struct hub_irq_data
*hd
= irq_data_get_irq_chip_data(d
);
65 unsigned long *mask
= per_cpu(irq_enable_mask
, hd
->cpu
);
67 clear_bit(d
->hwirq
, mask
);
68 __raw_writeq(mask
[0], hd
->irq_mask
[0]);
69 __raw_writeq(mask
[1], hd
->irq_mask
[1]);
72 static void setup_hub_mask(struct hub_irq_data
*hd
, const struct cpumask
*mask
)
77 cpu
= cpumask_first_and(mask
, cpu_online_mask
);
78 if (cpu
>= nr_cpu_ids
)
79 cpu
= cpumask_any(cpu_online_mask
);
81 nasid
= cpu_to_node(cpu
);
83 if (!cputoslice(cpu
)) {
84 hd
->irq_mask
[0] = REMOTE_HUB_PTR(nasid
, PI_INT_MASK0_A
);
85 hd
->irq_mask
[1] = REMOTE_HUB_PTR(nasid
, PI_INT_MASK1_A
);
87 hd
->irq_mask
[0] = REMOTE_HUB_PTR(nasid
, PI_INT_MASK0_B
);
88 hd
->irq_mask
[1] = REMOTE_HUB_PTR(nasid
, PI_INT_MASK1_B
);
92 static int set_affinity_hub_irq(struct irq_data
*d
, const struct cpumask
*mask
,
95 struct hub_irq_data
*hd
= irq_data_get_irq_chip_data(d
);
100 if (irqd_is_started(d
))
103 setup_hub_mask(hd
, mask
);
105 if (irqd_is_started(d
))
108 irq_data_update_effective_affinity(d
, cpumask_of(hd
->cpu
));
113 static struct irq_chip hub_irq_type
= {
115 .irq_mask
= disable_hub_irq
,
116 .irq_unmask
= enable_hub_irq
,
117 .irq_set_affinity
= set_affinity_hub_irq
,
120 static int hub_domain_alloc(struct irq_domain
*domain
, unsigned int virq
,
121 unsigned int nr_irqs
, void *arg
)
123 struct irq_alloc_info
*info
= arg
;
124 struct hub_irq_data
*hd
;
125 struct hub_data
*hub
;
126 struct irq_desc
*desc
;
129 if (nr_irqs
> 1 || !info
)
132 hd
= kzalloc(sizeof(*hd
), GFP_KERNEL
);
136 swlevel
= alloc_level();
137 if (unlikely(swlevel
< 0)) {
141 irq_domain_set_info(domain
, virq
, swlevel
, &hub_irq_type
, hd
,
142 handle_level_irq
, NULL
, NULL
);
144 /* use CPU connected to nearest hub */
145 hub
= hub_data(info
->nasid
);
146 setup_hub_mask(hd
, &hub
->h_cpus
);
147 info
->nasid
= cpu_to_node(hd
->cpu
);
149 /* Make sure it's not already pending when we connect it. */
150 REMOTE_HUB_CLR_INTR(info
->nasid
, swlevel
);
152 desc
= irq_to_desc(virq
);
153 desc
->irq_common_data
.node
= info
->nasid
;
154 cpumask_copy(desc
->irq_common_data
.affinity
, &hub
->h_cpus
);
159 static void hub_domain_free(struct irq_domain
*domain
,
160 unsigned int virq
, unsigned int nr_irqs
)
162 struct irq_data
*irqd
;
167 irqd
= irq_domain_get_irq_data(domain
, virq
);
168 if (irqd
&& irqd
->chip_data
)
169 kfree(irqd
->chip_data
);
172 static const struct irq_domain_ops hub_domain_ops
= {
173 .alloc
= hub_domain_alloc
,
174 .free
= hub_domain_free
,
178 * This code is unnecessarily complex, because we do
179 * intr enabling. Basically, once we grab the set of intrs we need
180 * to service, we must mask _all_ these interrupts; firstly, to make
181 * sure the same intr does not intr again, causing recursion that
182 * can lead to stack overflow. Secondly, we can not just mask the
183 * one intr we are do_IRQing, because the non-masked intrs in the
184 * first set might intr again, causing multiple servicings of the
185 * same intr. This effect is mostly seen for intercpu intrs.
189 static void ip27_do_irq_mask0(struct irq_desc
*desc
)
191 cpuid_t cpu
= smp_processor_id();
192 unsigned long *mask
= per_cpu(irq_enable_mask
, cpu
);
193 struct irq_domain
*domain
;
197 /* copied from Irix intpend0() */
198 pend0
= LOCAL_HUB_L(PI_INT_PEND0
);
200 pend0
&= mask
[0]; /* Pick intrs we should look at */
205 if (pend0
& (1UL << CPU_RESCHED_A_IRQ
)) {
206 LOCAL_HUB_CLR_INTR(CPU_RESCHED_A_IRQ
);
208 } else if (pend0
& (1UL << CPU_RESCHED_B_IRQ
)) {
209 LOCAL_HUB_CLR_INTR(CPU_RESCHED_B_IRQ
);
211 } else if (pend0
& (1UL << CPU_CALL_A_IRQ
)) {
212 LOCAL_HUB_CLR_INTR(CPU_CALL_A_IRQ
);
213 generic_smp_call_function_interrupt();
214 } else if (pend0
& (1UL << CPU_CALL_B_IRQ
)) {
215 LOCAL_HUB_CLR_INTR(CPU_CALL_B_IRQ
);
216 generic_smp_call_function_interrupt();
220 domain
= irq_desc_get_handler_data(desc
);
221 ret
= generic_handle_domain_irq(domain
, __ffs(pend0
));
223 spurious_interrupt();
226 LOCAL_HUB_L(PI_INT_PEND0
);
229 static void ip27_do_irq_mask1(struct irq_desc
*desc
)
231 cpuid_t cpu
= smp_processor_id();
232 unsigned long *mask
= per_cpu(irq_enable_mask
, cpu
);
233 struct irq_domain
*domain
;
237 /* copied from Irix intpend0() */
238 pend1
= LOCAL_HUB_L(PI_INT_PEND1
);
240 pend1
&= mask
[1]; /* Pick intrs we should look at */
244 domain
= irq_desc_get_handler_data(desc
);
245 ret
= generic_handle_domain_irq(domain
, __ffs(pend1
) + 64);
247 spurious_interrupt();
249 LOCAL_HUB_L(PI_INT_PEND1
);
252 void install_ipi(void)
254 int cpu
= smp_processor_id();
255 unsigned long *mask
= per_cpu(irq_enable_mask
, cpu
);
256 int slice
= LOCAL_HUB_L(PI_CPU_NUM
);
259 resched
= CPU_RESCHED_A_IRQ
+ slice
;
260 set_bit(resched
, mask
);
261 LOCAL_HUB_CLR_INTR(resched
);
263 call
= CPU_CALL_A_IRQ
+ slice
;
265 LOCAL_HUB_CLR_INTR(call
);
268 LOCAL_HUB_S(PI_INT_MASK0_A
, mask
[0]);
269 LOCAL_HUB_S(PI_INT_MASK1_A
, mask
[1]);
271 LOCAL_HUB_S(PI_INT_MASK0_B
, mask
[0]);
272 LOCAL_HUB_S(PI_INT_MASK1_B
, mask
[1]);
276 void __init
arch_init_irq(void)
278 struct irq_domain
*domain
;
279 struct fwnode_handle
*fn
;
284 * Some interrupts are reserved by hardware or by software convention.
285 * Mark these as reserved right away so they won't be used accidentally
288 bitmap_set(hub_irq_map
, 0, CPU_CALL_B_IRQ
+ 1);
289 bitmap_set(hub_irq_map
, NI_BRDCAST_ERR_A
, MSC_PANIC_INTR
- NI_BRDCAST_ERR_A
+ 1);
291 fn
= irq_domain_alloc_named_fwnode("HUB");
292 if (WARN_ON(fn
== NULL
))
295 domain
= irq_domain_create_linear(fn
, IP27_HUB_IRQ_COUNT
,
296 &hub_domain_ops
, NULL
);
297 if (WARN_ON(domain
== NULL
))
300 irq_set_default_host(domain
);
302 irq_set_percpu_devid(IP27_HUB_PEND0_IRQ
);
303 irq_set_chained_handler_and_data(IP27_HUB_PEND0_IRQ
, ip27_do_irq_mask0
,
305 irq_set_percpu_devid(IP27_HUB_PEND1_IRQ
);
306 irq_set_chained_handler_and_data(IP27_HUB_PEND1_IRQ
, ip27_do_irq_mask1
,