2 * Copyright (C) 2011-12 Synopsys, Inc. (www.synopsys.com)
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 as
6 * published by the Free Software Foundation.
10 #include <linux/interrupt.h>
11 #include <linux/module.h>
13 #include <linux/irqdomain.h>
14 #include <linux/irqchip.h>
15 #include "../../drivers/irqchip/irqchip.h"
16 #include <asm/sections.h>
18 #include <asm/mach_desc.h>
21 * Early Hardware specific Interrupt setup
22 * -Platform independent, needed for each CPU (not foldable into init_IRQ)
23 * -Called very early (start_kernel -> setup_arch -> setup_processor)
26 * -Optionally, setup the High priority Interrupts as Level 2 IRQs
28 void arc_init_IRQ(void)
32 /* setup any high priority Interrupts (Level2 in ARCompact jargon) */
33 level_mask
|= IS_ENABLED(CONFIG_ARC_IRQ3_LV2
) << 3;
34 level_mask
|= IS_ENABLED(CONFIG_ARC_IRQ5_LV2
) << 5;
35 level_mask
|= IS_ENABLED(CONFIG_ARC_IRQ6_LV2
) << 6;
38 * Write to register, even if no LV2 IRQs configured to reset it
39 * in case bootloader had mucked with it
41 write_aux_reg(AUX_IRQ_LEV
, level_mask
);
44 pr_info("Level-2 interrupts bitset %x\n", level_mask
);
48 * ARC700 core includes a simple on-chip intc supporting
49 * -per IRQ enable/disable
50 * -2 levels of interrupts (high/low)
51 * -all interrupts being level triggered
53 * To reduce platform code, we assume all IRQs directly hooked-up into intc.
54 * Platforms with external intc, hence cascaded IRQs, are free to over-ride
58 static void arc_irq_mask(struct irq_data
*data
)
62 ienb
= read_aux_reg(AUX_IENABLE
);
63 ienb
&= ~(1 << data
->irq
);
64 write_aux_reg(AUX_IENABLE
, ienb
);
67 static void arc_irq_unmask(struct irq_data
*data
)
71 ienb
= read_aux_reg(AUX_IENABLE
);
72 ienb
|= (1 << data
->irq
);
73 write_aux_reg(AUX_IENABLE
, ienb
);
76 static struct irq_chip onchip_intc
= {
77 .name
= "ARC In-core Intc",
78 .irq_mask
= arc_irq_mask
,
79 .irq_unmask
= arc_irq_unmask
,
82 static int arc_intc_domain_map(struct irq_domain
*d
, unsigned int irq
,
85 if (irq
== TIMER0_IRQ
)
86 irq_set_chip_and_handler(irq
, &onchip_intc
, handle_percpu_irq
);
88 irq_set_chip_and_handler(irq
, &onchip_intc
, handle_level_irq
);
93 static const struct irq_domain_ops arc_intc_domain_ops
= {
94 .xlate
= irq_domain_xlate_onecell
,
95 .map
= arc_intc_domain_map
,
98 static struct irq_domain
*root_domain
;
101 init_onchip_IRQ(struct device_node
*intc
, struct device_node
*parent
)
104 panic("DeviceTree incore intc not a root irq controller\n");
106 root_domain
= irq_domain_add_legacy(intc
, NR_CPU_IRQS
, 0, 0,
107 &arc_intc_domain_ops
, NULL
);
110 panic("root irq domain not avail\n");
112 /* with this we don't need to export root_domain */
113 irq_set_default_host(root_domain
);
118 IRQCHIP_DECLARE(arc_intc
, "snps,arc700-intc", init_onchip_IRQ
);
121 * Late Interrupt system init called from start_kernel for Boot CPU only
123 * Since slab must already be initialized, platforms can start doing any
124 * needed request_irq( )s
126 void __init
init_IRQ(void)
128 /* Any external intc can be setup here */
129 if (machine_desc
->init_irq
)
130 machine_desc
->init_irq();
132 /* process the entire interrupt tree in one go */
136 /* Master CPU can initialize it's side of IPI */
137 if (machine_desc
->init_smp
)
138 machine_desc
->init_smp(smp_processor_id());
143 * "C" Entry point for any ARC ISR, called from low level vector handler
144 * @irq is the vector number read from ICAUSE reg of on-chip intc
146 void arch_do_IRQ(unsigned int irq
, struct pt_regs
*regs
)
148 struct pt_regs
*old_regs
= set_irq_regs(regs
);
151 generic_handle_irq(irq
);
153 set_irq_regs(old_regs
);
156 void arc_request_percpu_irq(int irq
, int cpu
,
157 irqreturn_t (*isr
)(int irq
, void *dev
),
161 /* Boot cpu calls request, all call enable */
166 * These 2 calls are essential to making percpu IRQ APIs work
167 * Ideally these details could be hidden in irq chip map function
168 * but the issue is IPIs IRQs being static (non-DT) and platform
169 * specific, so we can't identify them there.
171 irq_set_percpu_devid(irq
);
172 irq_modify_status(irq
, IRQ_NOAUTOEN
, 0); /* @irq, @clr, @set */
174 rc
= request_percpu_irq(irq
, isr
, irq_nm
, percpu_dev
);
176 panic("Percpu IRQ request failed for %d\n", irq
);
179 enable_percpu_irq(irq
, 0);
183 * arch_local_irq_enable - Enable interrupts.
185 * 1. Explicitly called to re-enable interrupts
186 * 2. Implicitly called from spin_unlock_irq, write_unlock_irq etc
187 * which maybe in hard ISR itself
189 * Semantics of this function change depending on where it is called from:
191 * -If called from hard-ISR, it must not invert interrupt priorities
192 * e.g. suppose TIMER is high priority (Level 2) IRQ
193 * Time hard-ISR, timer_interrupt( ) calls spin_unlock_irq several times.
194 * Here local_irq_enable( ) shd not re-enable lower priority interrupts
195 * -If called from soft-ISR, it must re-enable all interrupts
196 * soft ISR are low prioity jobs which can be very slow, thus all IRQs
197 * must be enabled while they run.
198 * Now hardware context wise we may still be in L2 ISR (not done rtie)
199 * still we must re-enable both L1 and L2 IRQs
200 * Another twist is prev scenario with flow being
201 * L1 ISR ==> interrupted by L2 ISR ==> L2 soft ISR
202 * here we must not re-enable Ll as prev Ll Interrupt's h/w context will get
203 * over-written (this is deficiency in ARC700 Interrupt mechanism)
206 #ifdef CONFIG_ARC_COMPACT_IRQ_LEVELS /* Complex version for 2 IRQ levels */
208 void arch_local_irq_enable(void)
212 flags
= arch_local_save_flags();
214 /* Allow both L1 and L2 at the onset */
215 flags
|= (STATUS_E1_MASK
| STATUS_E2_MASK
);
217 /* Called from hard ISR (between irq_enter and irq_exit) */
220 /* If in L2 ISR, don't re-enable any further IRQs as this can
221 * cause IRQ priorities to get upside down. e.g. it could allow
222 * L1 be taken while in L2 hard ISR which is wrong not only in
223 * theory, it can also cause the dreaded L1-L2-L1 scenario
225 if (flags
& STATUS_A2_MASK
)
226 flags
&= ~(STATUS_E1_MASK
| STATUS_E2_MASK
);
228 /* Even if in L1 ISR, allowe Higher prio L2 IRQs */
229 else if (flags
& STATUS_A1_MASK
)
230 flags
&= ~(STATUS_E1_MASK
);
233 /* called from soft IRQ, ideally we want to re-enable all levels */
235 else if (in_softirq()) {
237 /* However if this is case of L1 interrupted by L2,
238 * re-enabling both may cause whaco L1-L2-L1 scenario
239 * because ARC700 allows level 1 to interrupt an active L2 ISR
240 * Thus we disable both
241 * However some code, executing in soft ISR wants some IRQs
242 * to be enabled so we re-enable L2 only
244 * How do we determine L1 intr by L2
245 * -A2 is set (means in L2 ISR)
246 * -E1 is set in this ISR's pt_regs->status32 which is
247 * saved copy of status32_l2 when l2 ISR happened
249 struct pt_regs
*pt
= get_irq_regs();
250 if ((flags
& STATUS_A2_MASK
) && pt
&&
251 (pt
->status32
& STATUS_A1_MASK
)) {
252 /*flags &= ~(STATUS_E1_MASK | STATUS_E2_MASK); */
253 flags
&= ~(STATUS_E1_MASK
);
257 arch_local_irq_restore(flags
);
260 #else /* ! CONFIG_ARC_COMPACT_IRQ_LEVELS */
263 * Simpler version for only 1 level of interrupt
264 * Here we only Worry about Level 1 Bits
266 void arch_local_irq_enable(void)
271 * ARC IDE Drivers tries to re-enable interrupts from hard-isr
272 * context which is simply wrong
275 WARN_ONCE(1, "IRQ enabled from hard-isr");
279 flags
= arch_local_save_flags();
280 flags
|= (STATUS_E1_MASK
| STATUS_E2_MASK
);
281 arch_local_irq_restore(flags
);
284 EXPORT_SYMBOL(arch_local_irq_enable
);