4 * Copyright 2005-2009 Analog Devices Inc.
5 * D. Jeff Dionne <jeff@ryeham.ee.ryerson.ca>
6 * Kenneth Albanowski <kjahds@kjahds.com>
8 * Licensed under the GPL-2 or later.
11 #include <asm/blackfin.h>
13 #include <linux/linkage.h>
14 #include <asm/entry.h>
15 #include <asm/asm-offsets.h>
16 #include <asm/trace.h>
17 #include <asm/traps.h>
18 #include <asm/thread_info.h>
20 #include <asm/context.S>
22 .extern _ret_from_exception
24 #ifdef CONFIG_I_ENTRY_L1
30 .align 4 /* just in case */
32 /* Common interrupt entry code. First we do CLI, then push
33 * RETI, to keep interrupts disabled, but to allow this state to be changed
35 * R0 contains the interrupt number, while R1 may contain the value of IPEND,
36 * or garbage if IPEND won't be needed by the ISR. */
74 [--sp] = r0; /* Skip reserved */
82 [--sp] = r1; /* IPEND - R1 may or may not be set up before jumping here. */
84 /* Switch to other method of keeping interrupts disabled. */
85 #ifdef CONFIG_DEBUG_HWERR
91 [--sp] = RETI; /* orig_pc */
92 /* Clear all L registers. */
98 #ifdef CONFIG_FRAME_POINTER
102 ANOMALY_283_315_WORKAROUND(p5, r7)
107 call ___ipipe_grab_irq
110 if cc jump .Lcommon_restore_context;
111 #else /* CONFIG_IPIPE */
114 #endif /* CONFIG_IPIPE */
115 call _return_from_int;
116 .Lcommon_restore_context:
120 /* interrupt routine for ivhw - 5 */
122 /* In case a single action kicks off multiple memory transactions, (like
123 * a cache line fetch, - this can cause multiple hardware errors, let's
124 * catch them all. First - make sure all the actions are complete, and
125 * the core sees the hardware errors.
131 #ifdef CONFIG_FRAME_POINTER
135 ANOMALY_283_315_WORKAROUND(p5, r7)
137 /* Handle all stacked hardware errors
138 * To make sure we don't hang forever, only do it 10 times
146 CC = BITTST(R1, EVT_IVHW_P);
148 /* OK a hardware error is pending - clear it */
156 # We are going to dump something out, so make sure we print IPEND properly
160 [sp + PT_IPEND] = r0;
162 /* set the EXCAUSE to HWERR for trap_c */
163 r0 = [sp + PT_SEQSTAT];
164 R1.L = LO(VEC_HWERR);
165 R1.H = HI(VEC_HWERR);
167 [sp + PT_SEQSTAT] = R0;
169 r0 = sp; /* stack frame pt_regs pointer argument ==> r0 */
175 /* make sure EBIU_ERRMST is clear */
176 p0.l = LO(EBIU_ERRMST);
177 p0.h = HI(EBIU_ERRMST);
178 r0.l = (CORE_ERROR | CORE_MERROR);
182 call _ret_from_exception;
184 .Lcommon_restore_all_sys:
189 /* Interrupt routine for evt2 (NMI).
190 * We don't actually use this, so just return.
191 * For inner circle type details, please see:
192 * http://docs.blackfin.uclinux.org/doku.php?id=linux-kernel:nmi
199 /* interrupt routine for core timer - 6 */
201 TIMER_INTERRUPT_ENTRY(EVT_IVTMR_P)
203 /* interrupt routine for evt7 - 7 */
205 INTERRUPT_ENTRY(EVT_IVG7_P)
207 INTERRUPT_ENTRY(EVT_IVG8_P)
209 INTERRUPT_ENTRY(EVT_IVG9_P)
211 INTERRUPT_ENTRY(EVT_IVG10_P)
213 INTERRUPT_ENTRY(EVT_IVG11_P)
215 INTERRUPT_ENTRY(EVT_IVG12_P)
217 INTERRUPT_ENTRY(EVT_IVG13_P)
220 /* interrupt routine for system_call - 15 */
221 ENTRY(_evt_system_call)
223 #ifdef CONFIG_FRAME_POINTER
227 jump .Lcommon_restore_context;
228 ENDPROC(_evt_system_call)
232 * __ipipe_call_irqtail: lowers the current priority level to EVT15
233 * before running a user-defined routine, then raises the priority
234 * level to EVT14 to prepare the caller for a normal interrupt
235 * return through RTI.
237 * We currently use this facility in two occasions:
239 * - to branch to __ipipe_irq_tail_hook as requested by a high
240 * priority domain after the pipeline delivered an interrupt,
241 * e.g. such as Xenomai, in order to start its rescheduling
242 * procedure, since we may not switch tasks when IRQ levels are
243 * nested on the Blackfin, so we have to fake an interrupt return
244 * so that we may reschedule immediately.
246 * - to branch to sync_root_irqs, in order to play any interrupt
247 * pending for the root domain (i.e. the Linux kernel). This lowers
248 * the core priority level enough so that Linux IRQ handlers may
249 * never delay interrupts handled by high priority domains; we defer
250 * those handlers until this point instead. This is a substitute
251 * to using a threaded interrupt model for the Linux kernel.
253 * r0: address of user-defined routine
254 * context: caller must have preempted EVT15, hw interrupts must be off.
256 ENTRY(___ipipe_call_irqtail)
264 [--sp] = ( r7:4, p5:3 );
268 ( r7:4, p5:3 ) = [sp++];
271 #ifdef CONFIG_DEBUG_HWERR
272 /* enable irq14 & hwerr interrupt, until we transition to _evt_evt14 */
273 r0 = (EVT_IVG14 | EVT_IVHW | \
274 EVT_IRPTEN | EVT_EVX | EVT_NMI | EVT_RST | EVT_EMU);
276 /* Only enable irq14 interrupt, until we transition to _evt_evt14 */
278 EVT_IRPTEN | EVT_EVX | EVT_NMI | EVT_RST | EVT_EMU);
281 raise 14; /* Branches to _evt_evt14 */
283 jump 2b; /* Likely paranoid. */
284 ENDPROC(___ipipe_call_irqtail)
286 #endif /* CONFIG_IPIPE */