2 * linux/arch/arm/kernel/irq.c
4 * Copyright (C) 1992 Linus Torvalds
5 * Modifications for ARM processor Copyright (C) 1995-2000 Russell King.
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
11 * This file contains the code used by various IRQ handling routines:
12 * asking for different IRQ's should be done through these routines
13 * instead of just grabbing them. Thus setups with different IRQ numbers
14 * shouldn't result in any weird surprises, and installing new handlers
17 * IRQ's are in fact implemented a bit like signal handlers for the kernel.
18 * Naturally it's not a 1:1 relation, but there are similarities.
20 #include <linux/config.h>
21 #include <linux/kernel_stat.h>
22 #include <linux/module.h>
23 #include <linux/signal.h>
24 #include <linux/ioport.h>
25 #include <linux/interrupt.h>
26 #include <linux/ptrace.h>
27 #include <linux/slab.h>
28 #include <linux/random.h>
29 #include <linux/smp.h>
30 #include <linux/init.h>
31 #include <linux/seq_file.h>
32 #include <linux/errno.h>
33 #include <linux/list.h>
34 #include <linux/kallsyms.h>
35 #include <linux/proc_fs.h>
38 #include <asm/system.h>
39 #include <asm/mach/irq.h>
42 * Maximum IRQ count. Currently, this is arbitary. However, it should
43 * not be set too low to prevent false triggering. Conversely, if it
44 * is set too high, then you could miss a stuck IRQ.
46 * Maybe we ought to set a timer and re-enable the IRQ at a later time?
48 #define MAX_IRQ_CNT 100000
50 static int noirqdebug
;
51 static volatile unsigned long irq_err_count
;
52 static DEFINE_SPINLOCK(irq_controller_lock
);
53 static LIST_HEAD(irq_pending
);
55 struct irqdesc irq_desc
[NR_IRQS
];
56 void (*init_arch_irq
)(void) __initdata
= NULL
;
59 * No architecture-specific irq_finish function defined in arm/arch/irqs.h.
62 #define irq_finish(irq) do { } while (0)
66 * Dummy mask/unmask handler
68 void dummy_mask_unmask_irq(unsigned int irq
)
72 irqreturn_t
no_action(int irq
, void *dev_id
, struct pt_regs
*regs
)
77 void do_bad_IRQ(unsigned int irq
, struct irqdesc
*desc
, struct pt_regs
*regs
)
80 printk(KERN_ERR
"IRQ: spurious interrupt %d\n", irq
);
83 static struct irqchip bad_chip
= {
84 .ack
= dummy_mask_unmask_irq
,
85 .mask
= dummy_mask_unmask_irq
,
86 .unmask
= dummy_mask_unmask_irq
,
89 static struct irqdesc bad_irq_desc
= {
92 .pend
= LIST_HEAD_INIT(bad_irq_desc
.pend
),
97 void synchronize_irq(unsigned int irq
)
99 struct irqdesc
*desc
= irq_desc
+ irq
;
101 while (desc
->running
)
104 EXPORT_SYMBOL(synchronize_irq
);
106 #define smp_set_running(desc) do { desc->running = 1; } while (0)
107 #define smp_clear_running(desc) do { desc->running = 0; } while (0)
109 #define smp_set_running(desc) do { } while (0)
110 #define smp_clear_running(desc) do { } while (0)
114 * disable_irq_nosync - disable an irq without waiting
115 * @irq: Interrupt to disable
117 * Disable the selected interrupt line. Enables and disables
118 * are nested. We do this lazily.
120 * This function may be called from IRQ context.
122 void disable_irq_nosync(unsigned int irq
)
124 struct irqdesc
*desc
= irq_desc
+ irq
;
127 spin_lock_irqsave(&irq_controller_lock
, flags
);
128 desc
->disable_depth
++;
129 list_del_init(&desc
->pend
);
130 spin_unlock_irqrestore(&irq_controller_lock
, flags
);
132 EXPORT_SYMBOL(disable_irq_nosync
);
135 * disable_irq - disable an irq and wait for completion
136 * @irq: Interrupt to disable
138 * Disable the selected interrupt line. Enables and disables
139 * are nested. This functions waits for any pending IRQ
140 * handlers for this interrupt to complete before returning.
141 * If you use this function while holding a resource the IRQ
142 * handler may need you will deadlock.
144 * This function may be called - with care - from IRQ context.
146 void disable_irq(unsigned int irq
)
148 struct irqdesc
*desc
= irq_desc
+ irq
;
150 disable_irq_nosync(irq
);
152 synchronize_irq(irq
);
154 EXPORT_SYMBOL(disable_irq
);
157 * enable_irq - enable interrupt handling on an irq
158 * @irq: Interrupt to enable
160 * Re-enables the processing of interrupts on this IRQ line.
161 * Note that this may call the interrupt handler, so you may
162 * get unexpected results if you hold IRQs disabled.
164 * This function may be called from IRQ context.
166 void enable_irq(unsigned int irq
)
168 struct irqdesc
*desc
= irq_desc
+ irq
;
171 spin_lock_irqsave(&irq_controller_lock
, flags
);
172 if (unlikely(!desc
->disable_depth
)) {
173 printk("enable_irq(%u) unbalanced from %p\n", irq
,
174 __builtin_return_address(0));
175 } else if (!--desc
->disable_depth
) {
177 desc
->chip
->unmask(irq
);
180 * If the interrupt is waiting to be processed,
181 * try to re-run it. We can't directly run it
182 * from here since the caller might be in an
183 * interrupt-protected region.
185 if (desc
->pending
&& list_empty(&desc
->pend
)) {
187 if (!desc
->chip
->retrigger
||
188 desc
->chip
->retrigger(irq
))
189 list_add(&desc
->pend
, &irq_pending
);
192 spin_unlock_irqrestore(&irq_controller_lock
, flags
);
194 EXPORT_SYMBOL(enable_irq
);
197 * Enable wake on selected irq
199 void enable_irq_wake(unsigned int irq
)
201 struct irqdesc
*desc
= irq_desc
+ irq
;
204 spin_lock_irqsave(&irq_controller_lock
, flags
);
205 if (desc
->chip
->wake
)
206 desc
->chip
->wake(irq
, 1);
207 spin_unlock_irqrestore(&irq_controller_lock
, flags
);
209 EXPORT_SYMBOL(enable_irq_wake
);
211 void disable_irq_wake(unsigned int irq
)
213 struct irqdesc
*desc
= irq_desc
+ irq
;
216 spin_lock_irqsave(&irq_controller_lock
, flags
);
217 if (desc
->chip
->wake
)
218 desc
->chip
->wake(irq
, 0);
219 spin_unlock_irqrestore(&irq_controller_lock
, flags
);
221 EXPORT_SYMBOL(disable_irq_wake
);
223 int show_interrupts(struct seq_file
*p
, void *v
)
225 int i
= *(loff_t
*) v
, cpu
;
226 struct irqaction
* action
;
233 for_each_present_cpu(cpu
) {
234 sprintf(cpuname
, "CPU%d", cpu
);
235 seq_printf(p
, " %10s", cpuname
);
241 spin_lock_irqsave(&irq_controller_lock
, flags
);
242 action
= irq_desc
[i
].action
;
246 seq_printf(p
, "%3d: ", i
);
247 for_each_present_cpu(cpu
)
248 seq_printf(p
, "%10u ", kstat_cpu(cpu
).irqs
[i
]);
249 seq_printf(p
, " %s", action
->name
);
250 for (action
= action
->next
; action
; action
= action
->next
)
251 seq_printf(p
, ", %s", action
->name
);
255 spin_unlock_irqrestore(&irq_controller_lock
, flags
);
256 } else if (i
== NR_IRQS
) {
257 #ifdef CONFIG_ARCH_ACORN
263 seq_printf(p
, "Err: %10lu\n", irq_err_count
);
269 * IRQ lock detection.
271 * Hopefully, this should get us out of a few locked situations.
272 * However, it may take a while for this to happen, since we need
273 * a large number if IRQs to appear in the same jiffie with the
274 * same instruction pointer (or within 2 instructions).
276 static int check_irq_lock(struct irqdesc
*desc
, int irq
, struct pt_regs
*regs
)
278 unsigned long instr_ptr
= instruction_pointer(regs
);
280 if (desc
->lck_jif
== jiffies
&&
281 desc
->lck_pc
>= instr_ptr
&& desc
->lck_pc
< instr_ptr
+ 8) {
284 if (desc
->lck_cnt
> MAX_IRQ_CNT
) {
285 printk(KERN_ERR
"IRQ LOCK: IRQ%d is locking the system, disabled\n", irq
);
290 desc
->lck_pc
= instruction_pointer(regs
);
291 desc
->lck_jif
= jiffies
;
297 report_bad_irq(unsigned int irq
, struct pt_regs
*regs
, struct irqdesc
*desc
, int ret
)
299 static int count
= 100;
300 struct irqaction
*action
;
302 if (!count
|| noirqdebug
)
307 if (ret
!= IRQ_HANDLED
&& ret
!= IRQ_NONE
) {
308 printk("irq%u: bogus retval mask %x\n", irq
, ret
);
310 printk("irq%u: nobody cared\n", irq
);
314 printk(KERN_ERR
"handlers:");
315 action
= desc
->action
;
317 printk("\n" KERN_ERR
"[<%p>]", action
->handler
);
318 print_symbol(" (%s)", (unsigned long)action
->handler
);
319 action
= action
->next
;
325 __do_irq(unsigned int irq
, struct irqaction
*action
, struct pt_regs
*regs
)
330 spin_unlock(&irq_controller_lock
);
332 if (!(action
->flags
& SA_INTERRUPT
))
337 ret
= action
->handler(irq
, action
->dev_id
, regs
);
338 if (ret
== IRQ_HANDLED
)
339 status
|= action
->flags
;
341 action
= action
->next
;
344 if (status
& SA_SAMPLE_RANDOM
)
345 add_interrupt_randomness(irq
);
347 spin_lock_irq(&irq_controller_lock
);
353 * This is for software-decoded IRQs. The caller is expected to
354 * handle the ack, clear, mask and unmask issues.
357 do_simple_IRQ(unsigned int irq
, struct irqdesc
*desc
, struct pt_regs
*regs
)
359 struct irqaction
*action
;
360 const unsigned int cpu
= smp_processor_id();
364 kstat_cpu(cpu
).irqs
[irq
]++;
366 smp_set_running(desc
);
368 action
= desc
->action
;
370 int ret
= __do_irq(irq
, action
, regs
);
371 if (ret
!= IRQ_HANDLED
)
372 report_bad_irq(irq
, regs
, desc
, ret
);
375 smp_clear_running(desc
);
379 * Most edge-triggered IRQ implementations seem to take a broken
380 * approach to this. Hence the complexity.
383 do_edge_IRQ(unsigned int irq
, struct irqdesc
*desc
, struct pt_regs
*regs
)
385 const unsigned int cpu
= smp_processor_id();
390 * If we're currently running this IRQ, or its disabled,
391 * we shouldn't process the IRQ. Instead, turn on the
394 if (unlikely(desc
->running
|| desc
->disable_depth
))
398 * Acknowledge and clear the IRQ, but don't mask it.
400 desc
->chip
->ack(irq
);
403 * Mark the IRQ currently in progress.
407 kstat_cpu(cpu
).irqs
[irq
]++;
410 struct irqaction
*action
;
412 action
= desc
->action
;
416 if (desc
->pending
&& !desc
->disable_depth
) {
418 desc
->chip
->unmask(irq
);
421 __do_irq(irq
, action
, regs
);
422 } while (desc
->pending
&& !desc
->disable_depth
);
427 * If we were disabled or freed, shut down the handler.
429 if (likely(desc
->action
&& !check_irq_lock(desc
, irq
, regs
)))
434 * We got another IRQ while this one was masked or
435 * currently running. Delay it.
438 desc
->chip
->mask(irq
);
439 desc
->chip
->ack(irq
);
443 * Level-based IRQ handler. Nice and simple.
446 do_level_IRQ(unsigned int irq
, struct irqdesc
*desc
, struct pt_regs
*regs
)
448 struct irqaction
*action
;
449 const unsigned int cpu
= smp_processor_id();
454 * Acknowledge, clear _AND_ disable the interrupt.
456 desc
->chip
->ack(irq
);
458 if (likely(!desc
->disable_depth
)) {
459 kstat_cpu(cpu
).irqs
[irq
]++;
461 smp_set_running(desc
);
464 * Return with this interrupt masked if no action
466 action
= desc
->action
;
468 int ret
= __do_irq(irq
, desc
->action
, regs
);
470 if (ret
!= IRQ_HANDLED
)
471 report_bad_irq(irq
, regs
, desc
, ret
);
473 if (likely(!desc
->disable_depth
&&
474 !check_irq_lock(desc
, irq
, regs
)))
475 desc
->chip
->unmask(irq
);
478 smp_clear_running(desc
);
482 static void do_pending_irqs(struct pt_regs
*regs
)
484 struct list_head head
, *l
, *n
;
487 struct irqdesc
*desc
;
490 * First, take the pending interrupts off the list.
491 * The act of calling the handlers may add some IRQs
492 * back onto the list.
495 INIT_LIST_HEAD(&irq_pending
);
496 head
.next
->prev
= &head
;
497 head
.prev
->next
= &head
;
500 * Now run each entry. We must delete it from our
501 * list before calling the handler.
503 list_for_each_safe(l
, n
, &head
) {
504 desc
= list_entry(l
, struct irqdesc
, pend
);
505 list_del_init(&desc
->pend
);
506 desc
->handle(desc
- irq_desc
, desc
, regs
);
510 * The list must be empty.
512 BUG_ON(!list_empty(&head
));
513 } while (!list_empty(&irq_pending
));
517 * do_IRQ handles all hardware IRQ's. Decoded IRQs should not
518 * come via this function. Instead, they should provide their
521 asmlinkage
void asm_do_IRQ(unsigned int irq
, struct pt_regs
*regs
)
523 struct irqdesc
*desc
= irq_desc
+ irq
;
526 * Some hardware gives randomly wrong interrupts. Rather
527 * than crashing, do something sensible.
530 desc
= &bad_irq_desc
;
533 spin_lock(&irq_controller_lock
);
534 desc
->handle(irq
, desc
, regs
);
537 * Now re-run any pending interrupts.
539 if (!list_empty(&irq_pending
))
540 do_pending_irqs(regs
);
544 spin_unlock(&irq_controller_lock
);
548 void __set_irq_handler(unsigned int irq
, irq_handler_t handle
, int is_chained
)
550 struct irqdesc
*desc
;
553 if (irq
>= NR_IRQS
) {
554 printk(KERN_ERR
"Trying to install handler for IRQ%d\n", irq
);
561 desc
= irq_desc
+ irq
;
563 if (is_chained
&& desc
->chip
== &bad_chip
)
564 printk(KERN_WARNING
"Trying to install chained handler for IRQ%d\n", irq
);
566 spin_lock_irqsave(&irq_controller_lock
, flags
);
567 if (handle
== do_bad_IRQ
) {
568 desc
->chip
->mask(irq
);
569 desc
->chip
->ack(irq
);
570 desc
->disable_depth
= 1;
572 desc
->handle
= handle
;
573 if (handle
!= do_bad_IRQ
&& is_chained
) {
576 desc
->disable_depth
= 0;
577 desc
->chip
->unmask(irq
);
579 spin_unlock_irqrestore(&irq_controller_lock
, flags
);
582 void set_irq_chip(unsigned int irq
, struct irqchip
*chip
)
584 struct irqdesc
*desc
;
587 if (irq
>= NR_IRQS
) {
588 printk(KERN_ERR
"Trying to install chip for IRQ%d\n", irq
);
595 desc
= irq_desc
+ irq
;
596 spin_lock_irqsave(&irq_controller_lock
, flags
);
598 spin_unlock_irqrestore(&irq_controller_lock
, flags
);
601 int set_irq_type(unsigned int irq
, unsigned int type
)
603 struct irqdesc
*desc
;
607 if (irq
>= NR_IRQS
) {
608 printk(KERN_ERR
"Trying to set irq type for IRQ%d\n", irq
);
612 desc
= irq_desc
+ irq
;
613 if (desc
->chip
->type
) {
614 spin_lock_irqsave(&irq_controller_lock
, flags
);
615 ret
= desc
->chip
->type(irq
, type
);
616 spin_unlock_irqrestore(&irq_controller_lock
, flags
);
621 EXPORT_SYMBOL(set_irq_type
);
623 void set_irq_flags(unsigned int irq
, unsigned int iflags
)
625 struct irqdesc
*desc
;
628 if (irq
>= NR_IRQS
) {
629 printk(KERN_ERR
"Trying to set irq flags for IRQ%d\n", irq
);
633 desc
= irq_desc
+ irq
;
634 spin_lock_irqsave(&irq_controller_lock
, flags
);
635 desc
->valid
= (iflags
& IRQF_VALID
) != 0;
636 desc
->probe_ok
= (iflags
& IRQF_PROBE
) != 0;
637 desc
->noautoenable
= (iflags
& IRQF_NOAUTOEN
) != 0;
638 spin_unlock_irqrestore(&irq_controller_lock
, flags
);
641 int setup_irq(unsigned int irq
, struct irqaction
*new)
644 struct irqaction
*old
, **p
;
646 struct irqdesc
*desc
;
649 * Some drivers like serial.c use request_irq() heavily,
650 * so we have to be careful not to interfere with a
653 if (new->flags
& SA_SAMPLE_RANDOM
) {
655 * This function might sleep, we want to call it first,
656 * outside of the atomic block.
657 * Yes, this might clear the entropy pool if the wrong
658 * driver is attempted to be loaded, without actually
659 * installing a new handler, but is this really a problem,
660 * only the sysadmin is able to do this.
662 rand_initialize_irq(irq
);
666 * The following block of code has to be executed atomically
668 desc
= irq_desc
+ irq
;
669 spin_lock_irqsave(&irq_controller_lock
, flags
);
671 if ((old
= *p
) != NULL
) {
672 /* Can't share interrupts unless both agree to */
673 if (!(old
->flags
& new->flags
& SA_SHIRQ
)) {
674 spin_unlock_irqrestore(&irq_controller_lock
, flags
);
678 /* add new interrupt at end of irq queue */
692 desc
->disable_depth
= 1;
693 if (!desc
->noautoenable
) {
694 desc
->disable_depth
= 0;
695 desc
->chip
->unmask(irq
);
699 spin_unlock_irqrestore(&irq_controller_lock
, flags
);
704 * request_irq - allocate an interrupt line
705 * @irq: Interrupt line to allocate
706 * @handler: Function to be called when the IRQ occurs
707 * @irqflags: Interrupt type flags
708 * @devname: An ascii name for the claiming device
709 * @dev_id: A cookie passed back to the handler function
711 * This call allocates interrupt resources and enables the
712 * interrupt line and IRQ handling. From the point this
713 * call is made your handler function may be invoked. Since
714 * your handler function must clear any interrupt the board
715 * raises, you must take care both to initialise your hardware
716 * and to set up the interrupt handler in the right order.
718 * Dev_id must be globally unique. Normally the address of the
719 * device data structure is used as the cookie. Since the handler
720 * receives this value it makes sense to use it.
722 * If your interrupt is shared you must pass a non NULL dev_id
723 * as this is required when freeing the interrupt.
727 * SA_SHIRQ Interrupt is shared
729 * SA_INTERRUPT Disable local interrupts while processing
731 * SA_SAMPLE_RANDOM The interrupt can be used for entropy
734 int request_irq(unsigned int irq
, irqreturn_t (*handler
)(int, void *, struct pt_regs
*),
735 unsigned long irq_flags
, const char * devname
, void *dev_id
)
737 unsigned long retval
;
738 struct irqaction
*action
;
740 if (irq
>= NR_IRQS
|| !irq_desc
[irq
].valid
|| !handler
||
741 (irq_flags
& SA_SHIRQ
&& !dev_id
))
744 action
= (struct irqaction
*)kmalloc(sizeof(struct irqaction
), GFP_KERNEL
);
748 action
->handler
= handler
;
749 action
->flags
= irq_flags
;
750 cpus_clear(action
->mask
);
751 action
->name
= devname
;
753 action
->dev_id
= dev_id
;
755 retval
= setup_irq(irq
, action
);
762 EXPORT_SYMBOL(request_irq
);
765 * free_irq - free an interrupt
766 * @irq: Interrupt line to free
767 * @dev_id: Device identity to free
769 * Remove an interrupt handler. The handler is removed and if the
770 * interrupt line is no longer in use by any driver it is disabled.
771 * On a shared IRQ the caller must ensure the interrupt is disabled
772 * on the card it drives before calling this function.
774 * This function must not be called from interrupt context.
776 void free_irq(unsigned int irq
, void *dev_id
)
778 struct irqaction
* action
, **p
;
781 if (irq
>= NR_IRQS
|| !irq_desc
[irq
].valid
) {
782 printk(KERN_ERR
"Trying to free IRQ%d\n",irq
);
787 spin_lock_irqsave(&irq_controller_lock
, flags
);
788 for (p
= &irq_desc
[irq
].action
; (action
= *p
) != NULL
; p
= &action
->next
) {
789 if (action
->dev_id
!= dev_id
)
792 /* Found it - now free it */
796 spin_unlock_irqrestore(&irq_controller_lock
, flags
);
799 printk(KERN_ERR
"Trying to free free IRQ%d\n",irq
);
802 synchronize_irq(irq
);
807 EXPORT_SYMBOL(free_irq
);
809 static DECLARE_MUTEX(probe_sem
);
811 /* Start the interrupt probing. Unlike other architectures,
812 * we don't return a mask of interrupts from probe_irq_on,
813 * but return the number of interrupts enabled for the probe.
814 * The interrupts which have been enabled for probing is
815 * instead recorded in the irq_desc structure.
817 unsigned long probe_irq_on(void)
819 unsigned int i
, irqs
= 0;
825 * first snaffle up any unassigned but
826 * probe-able interrupts
828 spin_lock_irq(&irq_controller_lock
);
829 for (i
= 0; i
< NR_IRQS
; i
++) {
830 if (!irq_desc
[i
].probe_ok
|| irq_desc
[i
].action
)
833 irq_desc
[i
].probing
= 1;
834 irq_desc
[i
].triggered
= 0;
835 if (irq_desc
[i
].chip
->type
)
836 irq_desc
[i
].chip
->type(i
, IRQT_PROBE
);
837 irq_desc
[i
].chip
->unmask(i
);
840 spin_unlock_irq(&irq_controller_lock
);
843 * wait for spurious interrupts to mask themselves out again
845 for (delay
= jiffies
+ HZ
/10; time_before(jiffies
, delay
); )
846 /* min 100ms delay */;
849 * now filter out any obviously spurious interrupts
851 spin_lock_irq(&irq_controller_lock
);
852 for (i
= 0; i
< NR_IRQS
; i
++) {
853 if (irq_desc
[i
].probing
&& irq_desc
[i
].triggered
) {
854 irq_desc
[i
].probing
= 0;
858 spin_unlock_irq(&irq_controller_lock
);
863 EXPORT_SYMBOL(probe_irq_on
);
865 unsigned int probe_irq_mask(unsigned long irqs
)
867 unsigned int mask
= 0, i
;
869 spin_lock_irq(&irq_controller_lock
);
870 for (i
= 0; i
< 16 && i
< NR_IRQS
; i
++)
871 if (irq_desc
[i
].probing
&& irq_desc
[i
].triggered
)
873 spin_unlock_irq(&irq_controller_lock
);
879 EXPORT_SYMBOL(probe_irq_mask
);
882 * Possible return values:
883 * >= 0 - interrupt number
884 * -1 - no interrupt/many interrupts
886 int probe_irq_off(unsigned long irqs
)
889 int irq_found
= NO_IRQ
;
892 * look at the interrupts, and find exactly one
893 * that we were probing has been triggered
895 spin_lock_irq(&irq_controller_lock
);
896 for (i
= 0; i
< NR_IRQS
; i
++) {
897 if (irq_desc
[i
].probing
&&
898 irq_desc
[i
].triggered
) {
899 if (irq_found
!= NO_IRQ
) {
910 spin_unlock_irq(&irq_controller_lock
);
917 EXPORT_SYMBOL(probe_irq_off
);
920 static void route_irq(struct irqdesc
*desc
, unsigned int irq
, unsigned int cpu
)
922 pr_debug("IRQ%u: moving from cpu%u to cpu%u\n", irq
, desc
->cpu
, cpu
);
924 spin_lock_irq(&irq_controller_lock
);
926 desc
->chip
->set_cpu(desc
, irq
, cpu
);
927 spin_unlock_irq(&irq_controller_lock
);
930 #ifdef CONFIG_PROC_FS
932 irq_affinity_read_proc(char *page
, char **start
, off_t off
, int count
,
933 int *eof
, void *data
)
935 struct irqdesc
*desc
= irq_desc
+ ((int)data
);
936 int len
= cpumask_scnprintf(page
, count
, desc
->affinity
);
947 irq_affinity_write_proc(struct file
*file
, const char __user
*buffer
,
948 unsigned long count
, void *data
)
950 unsigned int irq
= (unsigned int)data
;
951 struct irqdesc
*desc
= irq_desc
+ irq
;
952 cpumask_t affinity
, tmp
;
955 if (!desc
->chip
->set_cpu
)
958 ret
= cpumask_parse(buffer
, count
, affinity
);
962 cpus_and(tmp
, affinity
, cpu_online_map
);
963 if (cpus_empty(tmp
)) {
968 desc
->affinity
= affinity
;
969 route_irq(desc
, irq
, first_cpu(tmp
));
978 void __init
init_irq_proc(void)
980 #if defined(CONFIG_SMP) && defined(CONFIG_PROC_FS)
981 struct proc_dir_entry
*dir
;
984 dir
= proc_mkdir("irq", 0);
988 for (irq
= 0; irq
< NR_IRQS
; irq
++) {
989 struct proc_dir_entry
*entry
;
990 struct irqdesc
*desc
;
993 desc
= irq_desc
+ irq
;
994 memset(name
, 0, sizeof(name
));
995 snprintf(name
, sizeof(name
) - 1, "%u", irq
);
997 desc
->procdir
= proc_mkdir(name
, dir
);
1001 entry
= create_proc_entry("smp_affinity", 0600, desc
->procdir
);
1004 entry
->data
= (void *)irq
;
1005 entry
->read_proc
= irq_affinity_read_proc
;
1006 entry
->write_proc
= irq_affinity_write_proc
;
1012 void __init
init_IRQ(void)
1014 struct irqdesc
*desc
;
1015 extern void init_dma(void);
1019 bad_irq_desc
.affinity
= CPU_MASK_ALL
;
1020 bad_irq_desc
.cpu
= smp_processor_id();
1023 for (irq
= 0, desc
= irq_desc
; irq
< NR_IRQS
; irq
++, desc
++) {
1024 *desc
= bad_irq_desc
;
1025 INIT_LIST_HEAD(&desc
->pend
);
1032 static int __init
noirqdebug_setup(char *str
)
1038 __setup("noirqdebug", noirqdebug_setup
);