OMAP: Add new function to check wether there is irq pending
[linux-ginger.git] / arch / blackfin / kernel / ipipe.c
bloba5de8d45424cda8b29f21d338e6224c94892cb1b
1 /* -*- linux-c -*-
2 * linux/arch/blackfin/kernel/ipipe.c
4 * Copyright (C) 2005-2007 Philippe Gerum.
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation, Inc., 675 Mass Ave, Cambridge MA 02139,
9 * USA; either version 2 of the License, or (at your option) any later
10 * version.
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
17 * You should have received a copy of the GNU General Public License
18 * along with this program; if not, write to the Free Software
19 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
21 * Architecture-dependent I-pipe support for the Blackfin.
24 #include <linux/kernel.h>
25 #include <linux/sched.h>
26 #include <linux/module.h>
27 #include <linux/interrupt.h>
28 #include <linux/percpu.h>
29 #include <linux/bitops.h>
30 #include <linux/slab.h>
31 #include <linux/errno.h>
32 #include <linux/kthread.h>
33 #include <asm/unistd.h>
34 #include <asm/system.h>
35 #include <asm/atomic.h>
36 #include <asm/io.h>
38 DEFINE_PER_CPU(struct pt_regs, __ipipe_tick_regs);
40 asmlinkage void asm_do_IRQ(unsigned int irq, struct pt_regs *regs);
42 static void __ipipe_no_irqtail(void);
44 unsigned long __ipipe_irq_tail_hook = (unsigned long)&__ipipe_no_irqtail;
45 EXPORT_SYMBOL(__ipipe_irq_tail_hook);
47 unsigned long __ipipe_core_clock;
48 EXPORT_SYMBOL(__ipipe_core_clock);
50 unsigned long __ipipe_freq_scale;
51 EXPORT_SYMBOL(__ipipe_freq_scale);
53 atomic_t __ipipe_irq_lvdepth[IVG15 + 1];
55 unsigned long __ipipe_irq_lvmask = __all_masked_irq_flags;
56 EXPORT_SYMBOL(__ipipe_irq_lvmask);
58 static void __ipipe_ack_irq(unsigned irq, struct irq_desc *desc)
60 desc->ipipe_ack(irq, desc);
64 * __ipipe_enable_pipeline() -- We are running on the boot CPU, hw
65 * interrupts are off, and secondary CPUs are still lost in space.
67 void __ipipe_enable_pipeline(void)
69 unsigned irq;
71 __ipipe_core_clock = get_cclk(); /* Fetch this once. */
72 __ipipe_freq_scale = 1000000000UL / __ipipe_core_clock;
74 for (irq = 0; irq < NR_IRQS; ++irq)
75 ipipe_virtualize_irq(ipipe_root_domain,
76 irq,
77 (ipipe_irq_handler_t)&asm_do_IRQ,
78 NULL,
79 &__ipipe_ack_irq,
80 IPIPE_HANDLE_MASK | IPIPE_PASS_MASK);
84 * __ipipe_handle_irq() -- IPIPE's generic IRQ handler. An optimistic
85 * interrupt protection log is maintained here for each domain. Hw
86 * interrupts are masked on entry.
88 void __ipipe_handle_irq(unsigned irq, struct pt_regs *regs)
90 struct ipipe_percpu_domain_data *p = ipipe_root_cpudom_ptr();
91 struct ipipe_domain *this_domain, *next_domain;
92 struct list_head *head, *pos;
93 int m_ack, s = -1;
96 * Software-triggered IRQs do not need any ack. The contents
97 * of the register frame should only be used when processing
98 * the timer interrupt, but not for handling any other
99 * interrupt.
101 m_ack = (regs == NULL || irq == IRQ_SYSTMR || irq == IRQ_CORETMR);
102 this_domain = ipipe_current_domain;
104 if (unlikely(test_bit(IPIPE_STICKY_FLAG, &this_domain->irqs[irq].control)))
105 head = &this_domain->p_link;
106 else {
107 head = __ipipe_pipeline.next;
108 next_domain = list_entry(head, struct ipipe_domain, p_link);
109 if (likely(test_bit(IPIPE_WIRED_FLAG, &next_domain->irqs[irq].control))) {
110 if (!m_ack && next_domain->irqs[irq].acknowledge != NULL)
111 next_domain->irqs[irq].acknowledge(irq, irq_to_desc(irq));
112 if (test_bit(IPIPE_SYNCDEFER_FLAG, &p->status))
113 s = __test_and_set_bit(IPIPE_STALL_FLAG, &p->status);
114 __ipipe_dispatch_wired(next_domain, irq);
115 goto out;
119 /* Ack the interrupt. */
121 pos = head;
122 while (pos != &__ipipe_pipeline) {
123 next_domain = list_entry(pos, struct ipipe_domain, p_link);
124 if (test_bit(IPIPE_HANDLE_FLAG, &next_domain->irqs[irq].control)) {
125 __ipipe_set_irq_pending(next_domain, irq);
126 if (!m_ack && next_domain->irqs[irq].acknowledge != NULL) {
127 next_domain->irqs[irq].acknowledge(irq, irq_to_desc(irq));
128 m_ack = 1;
131 if (!test_bit(IPIPE_PASS_FLAG, &next_domain->irqs[irq].control))
132 break;
133 pos = next_domain->p_link.next;
137 * Now walk the pipeline, yielding control to the highest
138 * priority domain that has pending interrupt(s) or
139 * immediately to the current domain if the interrupt has been
140 * marked as 'sticky'. This search does not go beyond the
141 * current domain in the pipeline. We also enforce the
142 * additional root stage lock (blackfin-specific).
144 if (test_bit(IPIPE_SYNCDEFER_FLAG, &p->status))
145 s = __test_and_set_bit(IPIPE_STALL_FLAG, &p->status);
148 * If the interrupt preempted the head domain, then do not
149 * even try to walk the pipeline, unless an interrupt is
150 * pending for it.
152 if (test_bit(IPIPE_AHEAD_FLAG, &this_domain->flags) &&
153 ipipe_head_cpudom_var(irqpend_himask) == 0)
154 goto out;
156 __ipipe_walk_pipeline(head);
157 out:
158 if (!s)
159 __clear_bit(IPIPE_STALL_FLAG, &p->status);
162 int __ipipe_check_root(void)
164 return ipipe_root_domain_p;
167 void __ipipe_enable_irqdesc(struct ipipe_domain *ipd, unsigned irq)
169 struct irq_desc *desc = irq_to_desc(irq);
170 int prio = desc->ic_prio;
172 desc->depth = 0;
173 if (ipd != &ipipe_root &&
174 atomic_inc_return(&__ipipe_irq_lvdepth[prio]) == 1)
175 __set_bit(prio, &__ipipe_irq_lvmask);
177 EXPORT_SYMBOL(__ipipe_enable_irqdesc);
179 void __ipipe_disable_irqdesc(struct ipipe_domain *ipd, unsigned irq)
181 struct irq_desc *desc = irq_to_desc(irq);
182 int prio = desc->ic_prio;
184 if (ipd != &ipipe_root &&
185 atomic_dec_and_test(&__ipipe_irq_lvdepth[prio]))
186 __clear_bit(prio, &__ipipe_irq_lvmask);
188 EXPORT_SYMBOL(__ipipe_disable_irqdesc);
190 void __ipipe_stall_root_raw(void)
193 * This code is called by the ins{bwl} routines (see
194 * arch/blackfin/lib/ins.S), which are heavily used by the
195 * network stack. It masks all interrupts but those handled by
196 * non-root domains, so that we keep decent network transfer
197 * rates for Linux without inducing pathological jitter for
198 * the real-time domain.
200 __asm__ __volatile__ ("sti %0;" : : "d"(__ipipe_irq_lvmask));
202 __set_bit(IPIPE_STALL_FLAG,
203 &ipipe_root_cpudom_var(status));
206 void __ipipe_unstall_root_raw(void)
208 __clear_bit(IPIPE_STALL_FLAG,
209 &ipipe_root_cpudom_var(status));
211 __asm__ __volatile__ ("sti %0;" : : "d"(bfin_irq_flags));
214 int __ipipe_syscall_root(struct pt_regs *regs)
216 unsigned long flags;
219 * We need to run the IRQ tail hook whenever we don't
220 * propagate a syscall to higher domains, because we know that
221 * important operations might be pending there (e.g. Xenomai
222 * deferred rescheduling).
225 if (regs->orig_p0 < NR_syscalls) {
226 void (*hook)(void) = (void (*)(void))__ipipe_irq_tail_hook;
227 hook();
228 if ((current->flags & PF_EVNOTIFY) == 0)
229 return 0;
233 * This routine either returns:
234 * 0 -- if the syscall is to be passed to Linux;
235 * 1 -- if the syscall should not be passed to Linux, and no
236 * tail work should be performed;
237 * -1 -- if the syscall should not be passed to Linux but the
238 * tail work has to be performed (for handling signals etc).
241 if (__ipipe_event_monitored_p(IPIPE_EVENT_SYSCALL) &&
242 __ipipe_dispatch_event(IPIPE_EVENT_SYSCALL, regs) > 0) {
243 if (ipipe_root_domain_p && !in_atomic()) {
245 * Sync pending VIRQs before _TIF_NEED_RESCHED
246 * is tested.
248 local_irq_save_hw(flags);
249 if ((ipipe_root_cpudom_var(irqpend_himask) & IPIPE_IRQMASK_VIRT) != 0)
250 __ipipe_sync_pipeline(IPIPE_IRQMASK_VIRT);
251 local_irq_restore_hw(flags);
252 return -1;
254 return 1;
257 return 0;
260 unsigned long ipipe_critical_enter(void (*syncfn) (void))
262 unsigned long flags;
264 local_irq_save_hw(flags);
266 return flags;
269 void ipipe_critical_exit(unsigned long flags)
271 local_irq_restore_hw(flags);
274 static void __ipipe_no_irqtail(void)
278 int ipipe_get_sysinfo(struct ipipe_sysinfo *info)
280 info->ncpus = num_online_cpus();
281 info->cpufreq = ipipe_cpu_freq();
282 info->archdep.tmirq = IPIPE_TIMER_IRQ;
283 info->archdep.tmfreq = info->cpufreq;
285 return 0;
289 * ipipe_trigger_irq() -- Push the interrupt at front of the pipeline
290 * just like if it has been actually received from a hw source. Also
291 * works for virtual interrupts.
293 int ipipe_trigger_irq(unsigned irq)
295 unsigned long flags;
297 #ifdef CONFIG_IPIPE_DEBUG
298 if (irq >= IPIPE_NR_IRQS ||
299 (ipipe_virtual_irq_p(irq)
300 && !test_bit(irq - IPIPE_VIRQ_BASE, &__ipipe_virtual_irq_map)))
301 return -EINVAL;
302 #endif
304 local_irq_save_hw(flags);
305 __ipipe_handle_irq(irq, NULL);
306 local_irq_restore_hw(flags);
308 return 1;
311 asmlinkage void __ipipe_sync_root(void)
313 unsigned long flags;
315 BUG_ON(irqs_disabled());
317 local_irq_save_hw(flags);
319 clear_thread_flag(TIF_IRQ_SYNC);
321 if (ipipe_root_cpudom_var(irqpend_himask) != 0)
322 __ipipe_sync_pipeline(IPIPE_IRQMASK_ANY);
324 local_irq_restore_hw(flags);
327 void ___ipipe_sync_pipeline(unsigned long syncmask)
329 struct ipipe_domain *ipd = ipipe_current_domain;
331 if (ipd == ipipe_root_domain) {
332 if (test_bit(IPIPE_SYNCDEFER_FLAG, &ipipe_root_cpudom_var(status)))
333 return;
336 __ipipe_sync_stage(syncmask);
339 EXPORT_SYMBOL(show_stack);
341 #ifdef CONFIG_IPIPE_TRACE_MCOUNT
342 void notrace _mcount(void);
343 EXPORT_SYMBOL(_mcount);
344 #endif /* CONFIG_IPIPE_TRACE_MCOUNT */