nfsd4: typo logical vs bitwise negate for want_mask
[linux-btrfs-devel.git] / arch / mips / lantiq / irq.c
blobfc89795cafdb6ad1fecb5274b76f74bc8fd85121
1 /*
2 * This program is free software; you can redistribute it and/or modify it
3 * under the terms of the GNU General Public License version 2 as published
4 * by the Free Software Foundation.
6 * Copyright (C) 2010 John Crispin <blogic@openwrt.org>
7 * Copyright (C) 2010 Thomas Langer <thomas.langer@lantiq.com>
8 */
10 #include <linux/interrupt.h>
11 #include <linux/ioport.h>
13 #include <asm/bootinfo.h>
14 #include <asm/irq_cpu.h>
16 #include <lantiq_soc.h>
17 #include <irq.h>
19 /* register definitions */
20 #define LTQ_ICU_IM0_ISR 0x0000
21 #define LTQ_ICU_IM0_IER 0x0008
22 #define LTQ_ICU_IM0_IOSR 0x0010
23 #define LTQ_ICU_IM0_IRSR 0x0018
24 #define LTQ_ICU_IM0_IMR 0x0020
25 #define LTQ_ICU_IM1_ISR 0x0028
26 #define LTQ_ICU_OFFSET (LTQ_ICU_IM1_ISR - LTQ_ICU_IM0_ISR)
28 #define LTQ_EIU_EXIN_C 0x0000
29 #define LTQ_EIU_EXIN_INIC 0x0004
30 #define LTQ_EIU_EXIN_INEN 0x000C
32 /* irq numbers used by the external interrupt unit (EIU) */
33 #define LTQ_EIU_IR0 (INT_NUM_IM4_IRL0 + 30)
34 #define LTQ_EIU_IR1 (INT_NUM_IM3_IRL0 + 31)
35 #define LTQ_EIU_IR2 (INT_NUM_IM1_IRL0 + 26)
36 #define LTQ_EIU_IR3 INT_NUM_IM1_IRL0
37 #define LTQ_EIU_IR4 (INT_NUM_IM1_IRL0 + 1)
38 #define LTQ_EIU_IR5 (INT_NUM_IM1_IRL0 + 2)
39 #define LTQ_EIU_IR6 (INT_NUM_IM2_IRL0 + 30)
41 #define MAX_EIU 6
43 /* irqs generated by device attached to the EBU need to be acked in
44 * a special manner
46 #define LTQ_ICU_EBU_IRQ 22
48 #define ltq_icu_w32(x, y) ltq_w32((x), ltq_icu_membase + (y))
49 #define ltq_icu_r32(x) ltq_r32(ltq_icu_membase + (x))
51 #define ltq_eiu_w32(x, y) ltq_w32((x), ltq_eiu_membase + (y))
52 #define ltq_eiu_r32(x) ltq_r32(ltq_eiu_membase + (x))
54 static unsigned short ltq_eiu_irq[MAX_EIU] = {
55 LTQ_EIU_IR0,
56 LTQ_EIU_IR1,
57 LTQ_EIU_IR2,
58 LTQ_EIU_IR3,
59 LTQ_EIU_IR4,
60 LTQ_EIU_IR5,
63 static struct resource ltq_icu_resource = {
64 .name = "icu",
65 .start = LTQ_ICU_BASE_ADDR,
66 .end = LTQ_ICU_BASE_ADDR + LTQ_ICU_SIZE - 1,
67 .flags = IORESOURCE_MEM,
70 static struct resource ltq_eiu_resource = {
71 .name = "eiu",
72 .start = LTQ_EIU_BASE_ADDR,
73 .end = LTQ_EIU_BASE_ADDR + LTQ_ICU_SIZE - 1,
74 .flags = IORESOURCE_MEM,
77 static void __iomem *ltq_icu_membase;
78 static void __iomem *ltq_eiu_membase;
80 void ltq_disable_irq(struct irq_data *d)
82 u32 ier = LTQ_ICU_IM0_IER;
83 int irq_nr = d->irq - INT_NUM_IRQ0;
85 ier += LTQ_ICU_OFFSET * (irq_nr / INT_NUM_IM_OFFSET);
86 irq_nr %= INT_NUM_IM_OFFSET;
87 ltq_icu_w32(ltq_icu_r32(ier) & ~(1 << irq_nr), ier);
90 void ltq_mask_and_ack_irq(struct irq_data *d)
92 u32 ier = LTQ_ICU_IM0_IER;
93 u32 isr = LTQ_ICU_IM0_ISR;
94 int irq_nr = d->irq - INT_NUM_IRQ0;
96 ier += LTQ_ICU_OFFSET * (irq_nr / INT_NUM_IM_OFFSET);
97 isr += LTQ_ICU_OFFSET * (irq_nr / INT_NUM_IM_OFFSET);
98 irq_nr %= INT_NUM_IM_OFFSET;
99 ltq_icu_w32(ltq_icu_r32(ier) & ~(1 << irq_nr), ier);
100 ltq_icu_w32((1 << irq_nr), isr);
103 static void ltq_ack_irq(struct irq_data *d)
105 u32 isr = LTQ_ICU_IM0_ISR;
106 int irq_nr = d->irq - INT_NUM_IRQ0;
108 isr += LTQ_ICU_OFFSET * (irq_nr / INT_NUM_IM_OFFSET);
109 irq_nr %= INT_NUM_IM_OFFSET;
110 ltq_icu_w32((1 << irq_nr), isr);
113 void ltq_enable_irq(struct irq_data *d)
115 u32 ier = LTQ_ICU_IM0_IER;
116 int irq_nr = d->irq - INT_NUM_IRQ0;
118 ier += LTQ_ICU_OFFSET * (irq_nr / INT_NUM_IM_OFFSET);
119 irq_nr %= INT_NUM_IM_OFFSET;
120 ltq_icu_w32(ltq_icu_r32(ier) | (1 << irq_nr), ier);
123 static unsigned int ltq_startup_eiu_irq(struct irq_data *d)
125 int i;
126 int irq_nr = d->irq - INT_NUM_IRQ0;
128 ltq_enable_irq(d);
129 for (i = 0; i < MAX_EIU; i++) {
130 if (irq_nr == ltq_eiu_irq[i]) {
131 /* low level - we should really handle set_type */
132 ltq_eiu_w32(ltq_eiu_r32(LTQ_EIU_EXIN_C) |
133 (0x6 << (i * 4)), LTQ_EIU_EXIN_C);
134 /* clear all pending */
135 ltq_eiu_w32(ltq_eiu_r32(LTQ_EIU_EXIN_INIC) & ~(1 << i),
136 LTQ_EIU_EXIN_INIC);
137 /* enable */
138 ltq_eiu_w32(ltq_eiu_r32(LTQ_EIU_EXIN_INEN) | (1 << i),
139 LTQ_EIU_EXIN_INEN);
140 break;
144 return 0;
147 static void ltq_shutdown_eiu_irq(struct irq_data *d)
149 int i;
150 int irq_nr = d->irq - INT_NUM_IRQ0;
152 ltq_disable_irq(d);
153 for (i = 0; i < MAX_EIU; i++) {
154 if (irq_nr == ltq_eiu_irq[i]) {
155 /* disable */
156 ltq_eiu_w32(ltq_eiu_r32(LTQ_EIU_EXIN_INEN) & ~(1 << i),
157 LTQ_EIU_EXIN_INEN);
158 break;
163 static struct irq_chip ltq_irq_type = {
164 "icu",
165 .irq_enable = ltq_enable_irq,
166 .irq_disable = ltq_disable_irq,
167 .irq_unmask = ltq_enable_irq,
168 .irq_ack = ltq_ack_irq,
169 .irq_mask = ltq_disable_irq,
170 .irq_mask_ack = ltq_mask_and_ack_irq,
173 static struct irq_chip ltq_eiu_type = {
174 "eiu",
175 .irq_startup = ltq_startup_eiu_irq,
176 .irq_shutdown = ltq_shutdown_eiu_irq,
177 .irq_enable = ltq_enable_irq,
178 .irq_disable = ltq_disable_irq,
179 .irq_unmask = ltq_enable_irq,
180 .irq_ack = ltq_ack_irq,
181 .irq_mask = ltq_disable_irq,
182 .irq_mask_ack = ltq_mask_and_ack_irq,
185 static void ltq_hw_irqdispatch(int module)
187 u32 irq;
189 irq = ltq_icu_r32(LTQ_ICU_IM0_IOSR + (module * LTQ_ICU_OFFSET));
190 if (irq == 0)
191 return;
193 /* silicon bug causes only the msb set to 1 to be valid. all
194 * other bits might be bogus
196 irq = __fls(irq);
197 do_IRQ((int)irq + INT_NUM_IM0_IRL0 + (INT_NUM_IM_OFFSET * module));
199 /* if this is a EBU irq, we need to ack it or get a deadlock */
200 if ((irq == LTQ_ICU_EBU_IRQ) && (module == 0))
201 ltq_ebu_w32(ltq_ebu_r32(LTQ_EBU_PCC_ISTAT) | 0x10,
202 LTQ_EBU_PCC_ISTAT);
205 #define DEFINE_HWx_IRQDISPATCH(x) \
206 static void ltq_hw ## x ## _irqdispatch(void) \
208 ltq_hw_irqdispatch(x); \
210 DEFINE_HWx_IRQDISPATCH(0)
211 DEFINE_HWx_IRQDISPATCH(1)
212 DEFINE_HWx_IRQDISPATCH(2)
213 DEFINE_HWx_IRQDISPATCH(3)
214 DEFINE_HWx_IRQDISPATCH(4)
216 static void ltq_hw5_irqdispatch(void)
218 do_IRQ(MIPS_CPU_TIMER_IRQ);
221 asmlinkage void plat_irq_dispatch(void)
223 unsigned int pending = read_c0_status() & read_c0_cause() & ST0_IM;
224 unsigned int i;
226 if (pending & CAUSEF_IP7) {
227 do_IRQ(MIPS_CPU_TIMER_IRQ);
228 goto out;
229 } else {
230 for (i = 0; i < 5; i++) {
231 if (pending & (CAUSEF_IP2 << i)) {
232 ltq_hw_irqdispatch(i);
233 goto out;
237 pr_alert("Spurious IRQ: CAUSE=0x%08x\n", read_c0_status());
239 out:
240 return;
243 static struct irqaction cascade = {
244 .handler = no_action,
245 .flags = IRQF_DISABLED,
246 .name = "cascade",
249 void __init arch_init_irq(void)
251 int i;
253 if (insert_resource(&iomem_resource, &ltq_icu_resource) < 0)
254 panic("Failed to insert icu memory\n");
256 if (request_mem_region(ltq_icu_resource.start,
257 resource_size(&ltq_icu_resource), "icu") < 0)
258 panic("Failed to request icu memory\n");
260 ltq_icu_membase = ioremap_nocache(ltq_icu_resource.start,
261 resource_size(&ltq_icu_resource));
262 if (!ltq_icu_membase)
263 panic("Failed to remap icu memory\n");
265 if (insert_resource(&iomem_resource, &ltq_eiu_resource) < 0)
266 panic("Failed to insert eiu memory\n");
268 if (request_mem_region(ltq_eiu_resource.start,
269 resource_size(&ltq_eiu_resource), "eiu") < 0)
270 panic("Failed to request eiu memory\n");
272 ltq_eiu_membase = ioremap_nocache(ltq_eiu_resource.start,
273 resource_size(&ltq_eiu_resource));
274 if (!ltq_eiu_membase)
275 panic("Failed to remap eiu memory\n");
277 /* make sure all irqs are turned off by default */
278 for (i = 0; i < 5; i++)
279 ltq_icu_w32(0, LTQ_ICU_IM0_IER + (i * LTQ_ICU_OFFSET));
281 /* clear all possibly pending interrupts */
282 ltq_icu_w32(~0, LTQ_ICU_IM0_ISR + (i * LTQ_ICU_OFFSET));
284 mips_cpu_irq_init();
286 for (i = 2; i <= 6; i++)
287 setup_irq(i, &cascade);
289 if (cpu_has_vint) {
290 pr_info("Setting up vectored interrupts\n");
291 set_vi_handler(2, ltq_hw0_irqdispatch);
292 set_vi_handler(3, ltq_hw1_irqdispatch);
293 set_vi_handler(4, ltq_hw2_irqdispatch);
294 set_vi_handler(5, ltq_hw3_irqdispatch);
295 set_vi_handler(6, ltq_hw4_irqdispatch);
296 set_vi_handler(7, ltq_hw5_irqdispatch);
299 for (i = INT_NUM_IRQ0;
300 i <= (INT_NUM_IRQ0 + (5 * INT_NUM_IM_OFFSET)); i++)
301 if ((i == LTQ_EIU_IR0) || (i == LTQ_EIU_IR1) ||
302 (i == LTQ_EIU_IR2))
303 irq_set_chip_and_handler(i, &ltq_eiu_type,
304 handle_level_irq);
305 /* EIU3-5 only exist on ar9 and vr9 */
306 else if (((i == LTQ_EIU_IR3) || (i == LTQ_EIU_IR4) ||
307 (i == LTQ_EIU_IR5)) && (ltq_is_ar9() || ltq_is_vr9()))
308 irq_set_chip_and_handler(i, &ltq_eiu_type,
309 handle_level_irq);
310 else
311 irq_set_chip_and_handler(i, &ltq_irq_type,
312 handle_level_irq);
314 #if !defined(CONFIG_MIPS_MT_SMP) && !defined(CONFIG_MIPS_MT_SMTC)
315 set_c0_status(IE_IRQ0 | IE_IRQ1 | IE_IRQ2 |
316 IE_IRQ3 | IE_IRQ4 | IE_IRQ5);
317 #else
318 set_c0_status(IE_SW0 | IE_SW1 | IE_IRQ0 | IE_IRQ1 |
319 IE_IRQ2 | IE_IRQ3 | IE_IRQ4 | IE_IRQ5);
320 #endif
323 unsigned int __cpuinit get_c0_compare_int(void)
325 return CP0_LEGACY_COMPARE_IRQ;