treewide: remove redundant IS_ERR() before error code check
[linux/fpc-iii.git] / arch / mips / pmcs-msp71xx / msp_irq_cic.c
blob0706010cc99f676947c90288279c524e96b0caf0
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3 * Copyright 2010 PMC-Sierra, Inc, derived from irq_cpu.c
5 * This file define the irq handler for MSP CIC subsystem interrupts.
6 */
8 #include <linux/init.h>
9 #include <linux/interrupt.h>
10 #include <linux/kernel.h>
11 #include <linux/bitops.h>
12 #include <linux/irq.h>
14 #include <asm/mipsregs.h>
16 #include <msp_cic_int.h>
17 #include <msp_regs.h>
20 * External API
22 extern void msp_per_irq_init(void);
23 extern void msp_per_irq_dispatch(void);
27 * Convenience Macro. Should be somewhere generic.
29 #define get_current_vpe() \
30 ((read_c0_tcbind() >> TCBIND_CURVPE_SHIFT) & TCBIND_CURVPE)
32 #ifdef CONFIG_SMP
34 #define LOCK_VPE(flags, mtflags) \
35 do { \
36 local_irq_save(flags); \
37 mtflags = dmt(); \
38 } while (0)
40 #define UNLOCK_VPE(flags, mtflags) \
41 do { \
42 emt(mtflags); \
43 local_irq_restore(flags);\
44 } while (0)
46 #define LOCK_CORE(flags, mtflags) \
47 do { \
48 local_irq_save(flags); \
49 mtflags = dvpe(); \
50 } while (0)
52 #define UNLOCK_CORE(flags, mtflags) \
53 do { \
54 evpe(mtflags); \
55 local_irq_restore(flags);\
56 } while (0)
58 #else
60 #define LOCK_VPE(flags, mtflags)
61 #define UNLOCK_VPE(flags, mtflags)
62 #endif
64 /* ensure writes to cic are completed */
65 static inline void cic_wmb(void)
67 const volatile void __iomem *cic_mem = CIC_VPE0_MSK_REG;
68 volatile u32 dummy_read;
70 wmb();
71 dummy_read = __raw_readl(cic_mem);
72 dummy_read++;
75 static void unmask_cic_irq(struct irq_data *d)
77 volatile u32 *cic_msk_reg = CIC_VPE0_MSK_REG;
78 int vpe;
79 #ifdef CONFIG_SMP
80 unsigned int mtflags;
81 unsigned long flags;
84 * Make sure we have IRQ affinity. It may have changed while
85 * we were processing the IRQ.
87 if (!cpumask_test_cpu(smp_processor_id(),
88 irq_data_get_affinity_mask(d)))
89 return;
90 #endif
92 vpe = get_current_vpe();
93 LOCK_VPE(flags, mtflags);
94 cic_msk_reg[vpe] |= (1 << (d->irq - MSP_CIC_INTBASE));
95 UNLOCK_VPE(flags, mtflags);
96 cic_wmb();
99 static void mask_cic_irq(struct irq_data *d)
101 volatile u32 *cic_msk_reg = CIC_VPE0_MSK_REG;
102 int vpe = get_current_vpe();
103 #ifdef CONFIG_SMP
104 unsigned long flags, mtflags;
105 #endif
106 LOCK_VPE(flags, mtflags);
107 cic_msk_reg[vpe] &= ~(1 << (d->irq - MSP_CIC_INTBASE));
108 UNLOCK_VPE(flags, mtflags);
109 cic_wmb();
111 static void msp_cic_irq_ack(struct irq_data *d)
113 mask_cic_irq(d);
115 * Only really necessary for 18, 16-14 and sometimes 3:0
116 * (since these can be edge sensitive) but it doesn't
117 * hurt for the others
119 *CIC_STS_REG = (1 << (d->irq - MSP_CIC_INTBASE));
122 /* Note: Limiting to VSMP. */
124 #ifdef CONFIG_MIPS_MT_SMP
125 static int msp_cic_irq_set_affinity(struct irq_data *d,
126 const struct cpumask *cpumask, bool force)
128 int cpu;
129 unsigned long flags;
130 unsigned int mtflags;
131 unsigned long imask = (1 << (d->irq - MSP_CIC_INTBASE));
132 volatile u32 *cic_mask = (volatile u32 *)CIC_VPE0_MSK_REG;
134 /* timer balancing should be disabled in kernel code */
135 BUG_ON(d->irq == MSP_INT_VPE0_TIMER || d->irq == MSP_INT_VPE1_TIMER);
137 LOCK_CORE(flags, mtflags);
138 /* enable if any of each VPE's TCs require this IRQ */
139 for_each_online_cpu(cpu) {
140 if (cpumask_test_cpu(cpu, cpumask))
141 cic_mask[cpu] |= imask;
142 else
143 cic_mask[cpu] &= ~imask;
147 UNLOCK_CORE(flags, mtflags);
148 return 0;
151 #endif
153 static struct irq_chip msp_cic_irq_controller = {
154 .name = "MSP_CIC",
155 .irq_mask = mask_cic_irq,
156 .irq_mask_ack = msp_cic_irq_ack,
157 .irq_unmask = unmask_cic_irq,
158 .irq_ack = msp_cic_irq_ack,
159 #ifdef CONFIG_MIPS_MT_SMP
160 .irq_set_affinity = msp_cic_irq_set_affinity,
161 #endif
164 void __init msp_cic_irq_init(void)
166 int i;
167 /* Mask/clear interrupts. */
168 *CIC_VPE0_MSK_REG = 0x00000000;
169 *CIC_VPE1_MSK_REG = 0x00000000;
170 *CIC_STS_REG = 0xFFFFFFFF;
172 * The MSP7120 RG and EVBD boards use IRQ[6:4] for PCI.
173 * These inputs map to EXT_INT_POL[6:4] inside the CIC.
174 * They are to be active low, level sensitive.
176 *CIC_EXT_CFG_REG &= 0xFFFF8F8F;
178 /* initialize all the IRQ descriptors */
179 for (i = MSP_CIC_INTBASE ; i < MSP_CIC_INTBASE + 32 ; i++) {
180 irq_set_chip_and_handler(i, &msp_cic_irq_controller,
181 handle_level_irq);
184 /* Initialize the PER interrupt sub-system */
185 msp_per_irq_init();
188 /* CIC masked by CIC vector processing before dispatch called */
189 void msp_cic_irq_dispatch(void)
191 volatile u32 *cic_msk_reg = (volatile u32 *)CIC_VPE0_MSK_REG;
192 u32 cic_mask;
193 u32 pending;
194 int cic_status = *CIC_STS_REG;
195 cic_mask = cic_msk_reg[get_current_vpe()];
196 pending = cic_status & cic_mask;
197 if (pending & (1 << (MSP_INT_VPE0_TIMER - MSP_CIC_INTBASE))) {
198 do_IRQ(MSP_INT_VPE0_TIMER);
199 } else if (pending & (1 << (MSP_INT_VPE1_TIMER - MSP_CIC_INTBASE))) {
200 do_IRQ(MSP_INT_VPE1_TIMER);
201 } else if (pending & (1 << (MSP_INT_PER - MSP_CIC_INTBASE))) {
202 msp_per_irq_dispatch();
203 } else if (pending) {
204 do_IRQ(ffs(pending) + MSP_CIC_INTBASE - 1);
205 } else{
206 spurious_interrupt();