1 // SPDX-License-Identifier: GPL-2.0
3 * ip27-irq.c: Highlevel interrupt handling for IP27 architecture.
5 * Copyright (C) 1999, 2000 Ralf Baechle (ralf@gnu.org)
6 * Copyright (C) 1999, 2000 Silicon Graphics, Inc.
7 * Copyright (C) 1999 - 2001 Kanoj Sarcar
10 #include <linux/interrupt.h>
11 #include <linux/irq.h>
12 #include <linux/ioport.h>
13 #include <linux/kernel.h>
14 #include <linux/bitops.h>
15 #include <linux/sched.h>
18 #include <asm/irq_cpu.h>
19 #include <asm/sn/addrs.h>
20 #include <asm/sn/agent.h>
21 #include <asm/sn/arch.h>
22 #include <asm/sn/intr.h>
23 #include <asm/sn/irq_alloc.h>
30 static DECLARE_BITMAP(hub_irq_map
, IP27_HUB_IRQ_COUNT
);
32 static DEFINE_PER_CPU(unsigned long [2], irq_enable_mask
);
34 static inline int alloc_level(void)
39 level
= find_first_zero_bit(hub_irq_map
, IP27_HUB_IRQ_COUNT
);
40 if (level
>= IP27_HUB_IRQ_COUNT
)
43 if (test_and_set_bit(level
, hub_irq_map
))
49 static void enable_hub_irq(struct irq_data
*d
)
51 struct hub_irq_data
*hd
= irq_data_get_irq_chip_data(d
);
52 unsigned long *mask
= per_cpu(irq_enable_mask
, hd
->cpu
);
54 set_bit(d
->hwirq
, mask
);
55 __raw_writeq(mask
[0], hd
->irq_mask
[0]);
56 __raw_writeq(mask
[1], hd
->irq_mask
[1]);
59 static void disable_hub_irq(struct irq_data
*d
)
61 struct hub_irq_data
*hd
= irq_data_get_irq_chip_data(d
);
62 unsigned long *mask
= per_cpu(irq_enable_mask
, hd
->cpu
);
64 clear_bit(d
->hwirq
, mask
);
65 __raw_writeq(mask
[0], hd
->irq_mask
[0]);
66 __raw_writeq(mask
[1], hd
->irq_mask
[1]);
69 static void setup_hub_mask(struct hub_irq_data
*hd
, const struct cpumask
*mask
)
74 cpu
= cpumask_first_and(mask
, cpu_online_mask
);
75 if (cpu
>= nr_cpu_ids
)
76 cpu
= cpumask_any(cpu_online_mask
);
78 nasid
= cpu_to_node(cpu
);
80 if (!cputoslice(cpu
)) {
81 hd
->irq_mask
[0] = REMOTE_HUB_PTR(nasid
, PI_INT_MASK0_A
);
82 hd
->irq_mask
[1] = REMOTE_HUB_PTR(nasid
, PI_INT_MASK1_A
);
84 hd
->irq_mask
[0] = REMOTE_HUB_PTR(nasid
, PI_INT_MASK0_B
);
85 hd
->irq_mask
[1] = REMOTE_HUB_PTR(nasid
, PI_INT_MASK1_B
);
89 static int set_affinity_hub_irq(struct irq_data
*d
, const struct cpumask
*mask
,
92 struct hub_irq_data
*hd
= irq_data_get_irq_chip_data(d
);
97 if (irqd_is_started(d
))
100 setup_hub_mask(hd
, mask
);
102 if (irqd_is_started(d
))
105 irq_data_update_effective_affinity(d
, cpumask_of(hd
->cpu
));
110 static struct irq_chip hub_irq_type
= {
112 .irq_mask
= disable_hub_irq
,
113 .irq_unmask
= enable_hub_irq
,
114 .irq_set_affinity
= set_affinity_hub_irq
,
117 static int hub_domain_alloc(struct irq_domain
*domain
, unsigned int virq
,
118 unsigned int nr_irqs
, void *arg
)
120 struct irq_alloc_info
*info
= arg
;
121 struct hub_irq_data
*hd
;
122 struct hub_data
*hub
;
123 struct irq_desc
*desc
;
126 if (nr_irqs
> 1 || !info
)
129 hd
= kzalloc(sizeof(*hd
), GFP_KERNEL
);
133 swlevel
= alloc_level();
134 if (unlikely(swlevel
< 0)) {
138 irq_domain_set_info(domain
, virq
, swlevel
, &hub_irq_type
, hd
,
139 handle_level_irq
, NULL
, NULL
);
141 /* use CPU connected to nearest hub */
142 hub
= hub_data(info
->nasid
);
143 setup_hub_mask(hd
, &hub
->h_cpus
);
144 info
->nasid
= cpu_to_node(hd
->cpu
);
146 /* Make sure it's not already pending when we connect it. */
147 REMOTE_HUB_CLR_INTR(info
->nasid
, swlevel
);
149 desc
= irq_to_desc(virq
);
150 desc
->irq_common_data
.node
= info
->nasid
;
151 cpumask_copy(desc
->irq_common_data
.affinity
, &hub
->h_cpus
);
156 static void hub_domain_free(struct irq_domain
*domain
,
157 unsigned int virq
, unsigned int nr_irqs
)
159 struct irq_data
*irqd
;
164 irqd
= irq_domain_get_irq_data(domain
, virq
);
165 if (irqd
&& irqd
->chip_data
)
166 kfree(irqd
->chip_data
);
169 static const struct irq_domain_ops hub_domain_ops
= {
170 .alloc
= hub_domain_alloc
,
171 .free
= hub_domain_free
,
175 * This code is unnecessarily complex, because we do
176 * intr enabling. Basically, once we grab the set of intrs we need
177 * to service, we must mask _all_ these interrupts; firstly, to make
178 * sure the same intr does not intr again, causing recursion that
179 * can lead to stack overflow. Secondly, we can not just mask the
180 * one intr we are do_IRQing, because the non-masked intrs in the
181 * first set might intr again, causing multiple servicings of the
182 * same intr. This effect is mostly seen for intercpu intrs.
186 static void ip27_do_irq_mask0(struct irq_desc
*desc
)
188 cpuid_t cpu
= smp_processor_id();
189 unsigned long *mask
= per_cpu(irq_enable_mask
, cpu
);
190 struct irq_domain
*domain
;
194 /* copied from Irix intpend0() */
195 pend0
= LOCAL_HUB_L(PI_INT_PEND0
);
197 pend0
&= mask
[0]; /* Pick intrs we should look at */
202 if (pend0
& (1UL << CPU_RESCHED_A_IRQ
)) {
203 LOCAL_HUB_CLR_INTR(CPU_RESCHED_A_IRQ
);
205 } else if (pend0
& (1UL << CPU_RESCHED_B_IRQ
)) {
206 LOCAL_HUB_CLR_INTR(CPU_RESCHED_B_IRQ
);
208 } else if (pend0
& (1UL << CPU_CALL_A_IRQ
)) {
209 LOCAL_HUB_CLR_INTR(CPU_CALL_A_IRQ
);
210 generic_smp_call_function_interrupt();
211 } else if (pend0
& (1UL << CPU_CALL_B_IRQ
)) {
212 LOCAL_HUB_CLR_INTR(CPU_CALL_B_IRQ
);
213 generic_smp_call_function_interrupt();
217 domain
= irq_desc_get_handler_data(desc
);
218 irq
= irq_linear_revmap(domain
, __ffs(pend0
));
220 generic_handle_irq(irq
);
222 spurious_interrupt();
225 LOCAL_HUB_L(PI_INT_PEND0
);
228 static void ip27_do_irq_mask1(struct irq_desc
*desc
)
230 cpuid_t cpu
= smp_processor_id();
231 unsigned long *mask
= per_cpu(irq_enable_mask
, cpu
);
232 struct irq_domain
*domain
;
236 /* copied from Irix intpend0() */
237 pend1
= LOCAL_HUB_L(PI_INT_PEND1
);
239 pend1
&= mask
[1]; /* Pick intrs we should look at */
243 domain
= irq_desc_get_handler_data(desc
);
244 irq
= irq_linear_revmap(domain
, __ffs(pend1
) + 64);
246 generic_handle_irq(irq
);
248 spurious_interrupt();
250 LOCAL_HUB_L(PI_INT_PEND1
);
253 void install_ipi(void)
255 int cpu
= smp_processor_id();
256 unsigned long *mask
= per_cpu(irq_enable_mask
, cpu
);
257 int slice
= LOCAL_HUB_L(PI_CPU_NUM
);
260 resched
= CPU_RESCHED_A_IRQ
+ slice
;
261 set_bit(resched
, mask
);
262 LOCAL_HUB_CLR_INTR(resched
);
264 call
= CPU_CALL_A_IRQ
+ slice
;
266 LOCAL_HUB_CLR_INTR(call
);
269 LOCAL_HUB_S(PI_INT_MASK0_A
, mask
[0]);
270 LOCAL_HUB_S(PI_INT_MASK1_A
, mask
[1]);
272 LOCAL_HUB_S(PI_INT_MASK0_B
, mask
[0]);
273 LOCAL_HUB_S(PI_INT_MASK1_B
, mask
[1]);
277 void __init
arch_init_irq(void)
279 struct irq_domain
*domain
;
280 struct fwnode_handle
*fn
;
286 * Some interrupts are reserved by hardware or by software convention.
287 * Mark these as reserved right away so they won't be used accidentally
290 for (i
= 0; i
<= CPU_CALL_B_IRQ
; i
++)
291 set_bit(i
, hub_irq_map
);
293 for (i
= NI_BRDCAST_ERR_A
; i
<= MSC_PANIC_INTR
; i
++)
294 set_bit(i
, hub_irq_map
);
296 fn
= irq_domain_alloc_named_fwnode("HUB");
300 domain
= irq_domain_create_linear(fn
, IP27_HUB_IRQ_COUNT
,
301 &hub_domain_ops
, NULL
);
302 WARN_ON(domain
== NULL
);
306 irq_set_default_host(domain
);
308 irq_set_percpu_devid(IP27_HUB_PEND0_IRQ
);
309 irq_set_chained_handler_and_data(IP27_HUB_PEND0_IRQ
, ip27_do_irq_mask0
,
311 irq_set_percpu_devid(IP27_HUB_PEND1_IRQ
);
312 irq_set_chained_handler_and_data(IP27_HUB_PEND1_IRQ
, ip27_do_irq_mask1
,