1 /* irq.c: FRV IRQ handling
3 * Copyright (C) 2003, 2004 Red Hat, Inc. All Rights Reserved.
4 * Written by David Howells (dhowells@redhat.com)
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * as published by the Free Software Foundation; either version
9 * 2 of the License, or (at your option) any later version.
13 * (mostly architecture independent, will move to kernel/irq.c in 2.5.)
15 * IRQs are in fact implemented a bit like signal handlers for the kernel.
16 * Naturally it's not a 1:1 relation, but there are similarities.
19 #include <linux/config.h>
20 #include <linux/ptrace.h>
21 #include <linux/errno.h>
22 #include <linux/signal.h>
23 #include <linux/sched.h>
24 #include <linux/ioport.h>
25 #include <linux/interrupt.h>
26 #include <linux/timex.h>
27 #include <linux/slab.h>
28 #include <linux/random.h>
29 #include <linux/smp_lock.h>
30 #include <linux/init.h>
31 #include <linux/kernel_stat.h>
32 #include <linux/irq.h>
33 #include <linux/proc_fs.h>
34 #include <linux/seq_file.h>
36 #include <asm/atomic.h>
39 #include <asm/system.h>
40 #include <asm/bitops.h>
41 #include <asm/uaccess.h>
42 #include <asm/pgalloc.h>
43 #include <asm/delay.h>
45 #include <asm/irc-regs.h>
46 #include <asm/irq-routing.h>
47 #include <asm/gdb-stub.h>
49 extern void __init
fpga_init(void);
50 extern void __init
route_mb93493_irqs(void);
52 static void register_irq_proc (unsigned int irq
);
55 * Special irq handlers.
58 irqreturn_t
no_action(int cpl
, void *dev_id
, struct pt_regs
*regs
) { return IRQ_HANDLED
; }
60 atomic_t irq_err_count
;
63 * Generic, controller-independent functions:
65 int show_interrupts(struct seq_file
*p
, void *v
)
67 struct irqaction
*action
;
68 struct irq_group
*group
;
70 int level
, grp
, ix
, i
, j
;
77 for (j
= 0; j
< NR_CPUS
; j
++)
79 seq_printf(p
, "CPU%d ",j
);
84 case 1 ... NR_IRQ_GROUPS
* NR_IRQ_ACTIONS_PER_GROUP
:
85 local_irq_save(flags
);
87 grp
= (i
- 1) / NR_IRQ_ACTIONS_PER_GROUP
;
88 group
= irq_groups
[grp
];
92 ix
= (i
- 1) % NR_IRQ_ACTIONS_PER_GROUP
;
93 action
= group
->actions
[ix
];
97 seq_printf(p
, "%3d: ", i
- 1);
100 seq_printf(p
, "%10u ", kstat_irqs(i
));
102 for (j
= 0; j
< NR_CPUS
; j
++)
104 seq_printf(p
, "%10u ", kstat_cpu(j
).irqs
[i
- 1]);
107 level
= group
->sources
[ix
]->level
- frv_irq_levels
;
109 seq_printf(p
, " %12s@%x", group
->sources
[ix
]->muxname
, level
);
110 seq_printf(p
, " %s", action
->name
);
112 for (action
= action
->next
; action
; action
= action
->next
)
113 seq_printf(p
, ", %s", action
->name
);
117 local_irq_restore(flags
);
120 case NR_IRQ_GROUPS
* NR_IRQ_ACTIONS_PER_GROUP
+ 1:
121 seq_printf(p
, "ERR: %10u\n", atomic_read(&irq_err_count
));
133 * Generic enable/disable code: this just calls
134 * down into the PIC-specific version for the actual
135 * hardware disable after having gotten the irq
140 * disable_irq_nosync - disable an irq without waiting
141 * @irq: Interrupt to disable
143 * Disable the selected interrupt line. Disables and Enables are
145 * Unlike disable_irq(), this function does not ensure existing
146 * instances of the IRQ handler have completed before returning.
148 * This function may be called from IRQ context.
151 void disable_irq_nosync(unsigned int irq
)
153 struct irq_source
*source
;
154 struct irq_group
*group
;
155 struct irq_level
*level
;
157 int idx
= irq
& (NR_IRQ_ACTIONS_PER_GROUP
- 1);
159 group
= irq_groups
[irq
>> NR_IRQ_LOG2_ACTIONS_PER_GROUP
];
163 source
= group
->sources
[idx
];
167 level
= source
->level
;
169 spin_lock_irqsave(&level
->lock
, flags
);
171 if (group
->control
) {
172 if (!group
->disable_cnt
[idx
]++)
173 group
->control(group
, idx
, 0);
174 } else if (!level
->disable_count
++) {
175 __set_MASK(level
- frv_irq_levels
);
178 spin_unlock_irqrestore(&level
->lock
, flags
);
182 * disable_irq - disable an irq and wait for completion
183 * @irq: Interrupt to disable
185 * Disable the selected interrupt line. Enables and Disables are
187 * This function waits for any pending IRQ handlers for this interrupt
188 * to complete before returning. If you use this function while
189 * holding a resource the IRQ handler may need you will deadlock.
191 * This function may be called - with care - from IRQ context.
194 void disable_irq(unsigned int irq
)
196 disable_irq_nosync(irq
);
199 if (!local_irq_count(smp_processor_id())) {
202 } while (irq_desc
[irq
].status
& IRQ_INPROGRESS
);
208 * enable_irq - enable handling of an irq
209 * @irq: Interrupt to enable
211 * Undoes the effect of one call to disable_irq(). If this
212 * matches the last disable, processing of interrupts on this
213 * IRQ line is re-enabled.
215 * This function may be called from IRQ context.
218 void enable_irq(unsigned int irq
)
220 struct irq_source
*source
;
221 struct irq_group
*group
;
222 struct irq_level
*level
;
224 int idx
= irq
& (NR_IRQ_ACTIONS_PER_GROUP
- 1);
227 group
= irq_groups
[irq
>> NR_IRQ_LOG2_ACTIONS_PER_GROUP
];
231 source
= group
->sources
[idx
];
235 level
= source
->level
;
237 spin_lock_irqsave(&level
->lock
, flags
);
240 count
= group
->disable_cnt
[idx
];
242 count
= level
->disable_count
;
246 if (group
->control
) {
247 if (group
->actions
[idx
])
248 group
->control(group
, idx
, 1);
251 __clr_MASK(level
- frv_irq_levels
);
260 printk("enable_irq(%u) unbalanced from %p\n", irq
, __builtin_return_address(0));
264 group
->disable_cnt
[idx
] = count
;
266 level
->disable_count
= count
;
268 spin_unlock_irqrestore(&level
->lock
, flags
);
271 /*****************************************************************************/
273 * handles all normal device IRQ's
274 * - registers are referred to by the __frame variable (GR28)
275 * - IRQ distribution is complicated in this arch because of the many PICs, the
276 * way they work and the way they cascade
278 asmlinkage
void do_IRQ(void)
280 struct irq_source
*source
;
283 level
= (__frame
->tbr
>> 4) & 0xf;
284 cpu
= smp_processor_id();
289 *(volatile u32
*) 0xe1200004 = ~((irqcount
++ << 8) | level
);
290 *(volatile u16
*) 0xffc00100 = (u16
) ~0x9999;
295 if ((unsigned long) __frame
- (unsigned long) (current
+ 1) < 512)
302 kstat_this_cpu
.irqs
[level
]++;
306 for (source
= frv_irq_levels
[level
].sources
; source
; source
= source
->next
)
307 source
->doirq(source
);
313 /* only process softirqs if we didn't interrupt another interrupt handler */
314 if ((__frame
->psr
& PSR_PIL
) == PSR_PIL_0
)
315 if (local_softirq_pending())
318 #ifdef CONFIG_PREEMPT
320 while (--current
->preempt_count
== 0) {
321 if (!(__frame
->psr
& PSR_S
) ||
322 current
->need_resched
== 0 ||
325 current
->preempt_count
++;
334 *(volatile u16
*) 0xffc00100 = (u16
) ~0x6666;
341 /*****************************************************************************/
343 * handles all NMIs when not co-opted by the debugger
344 * - registers are referred to by the __frame variable (GR28)
346 asmlinkage
void do_NMI(void)
350 /*****************************************************************************/
352 * request_irq - allocate an interrupt line
353 * @irq: Interrupt line to allocate
354 * @handler: Function to be called when the IRQ occurs
355 * @irqflags: Interrupt type flags
356 * @devname: An ascii name for the claiming device
357 * @dev_id: A cookie passed back to the handler function
359 * This call allocates interrupt resources and enables the
360 * interrupt line and IRQ handling. From the point this
361 * call is made your handler function may be invoked. Since
362 * your handler function must clear any interrupt the board
363 * raises, you must take care both to initialise your hardware
364 * and to set up the interrupt handler in the right order.
366 * Dev_id must be globally unique. Normally the address of the
367 * device data structure is used as the cookie. Since the handler
368 * receives this value it makes sense to use it.
370 * If your interrupt is shared you must pass a non NULL dev_id
371 * as this is required when freeing the interrupt.
375 * SA_SHIRQ Interrupt is shared
377 * SA_INTERRUPT Disable local interrupts while processing
379 * SA_SAMPLE_RANDOM The interrupt can be used for entropy
383 int request_irq(unsigned int irq
,
384 irqreturn_t (*handler
)(int, void *, struct pt_regs
*),
385 unsigned long irqflags
,
386 const char * devname
,
390 struct irqaction
*action
;
394 * Sanity-check: shared interrupts should REALLY pass in
395 * a real dev-ID, otherwise we'll have trouble later trying
396 * to figure out which interrupt is which (messes up the
397 * interrupt freeing logic etc).
399 if (irqflags
& SA_SHIRQ
) {
401 printk("Bad boy: %s (at 0x%x) called us without a dev_id!\n",
402 devname
, (&irq
)[-1]);
406 if ((irq
>> NR_IRQ_LOG2_ACTIONS_PER_GROUP
) >= NR_IRQ_GROUPS
)
411 action
= (struct irqaction
*) kmalloc(sizeof(struct irqaction
), GFP_KERNEL
);
415 action
->handler
= handler
;
416 action
->flags
= irqflags
;
417 action
->mask
= CPU_MASK_NONE
;
418 action
->name
= devname
;
420 action
->dev_id
= dev_id
;
422 retval
= setup_irq(irq
, action
);
429 * free_irq - free an interrupt
430 * @irq: Interrupt line to free
431 * @dev_id: Device identity to free
433 * Remove an interrupt handler. The handler is removed and if the
434 * interrupt line is no longer in use by any driver it is disabled.
435 * On a shared IRQ the caller must ensure the interrupt is disabled
436 * on the card it drives before calling this function. The function
437 * does not return until any executing interrupts for this IRQ
440 * This function may be called from interrupt context.
442 * Bugs: Attempting to free an irq in a handler for the same irq hangs
446 void free_irq(unsigned int irq
, void *dev_id
)
448 struct irq_source
*source
;
449 struct irq_group
*group
;
450 struct irq_level
*level
;
451 struct irqaction
**p
, **pp
;
454 if ((irq
>> NR_IRQ_LOG2_ACTIONS_PER_GROUP
) >= NR_IRQ_GROUPS
)
457 group
= irq_groups
[irq
>> NR_IRQ_LOG2_ACTIONS_PER_GROUP
];
461 source
= group
->sources
[irq
& (NR_IRQ_ACTIONS_PER_GROUP
- 1)];
465 level
= source
->level
;
466 p
= &group
->actions
[irq
& (NR_IRQ_ACTIONS_PER_GROUP
- 1)];
468 spin_lock_irqsave(&level
->lock
, flags
);
470 for (pp
= p
; *pp
; pp
= &(*pp
)->next
) {
471 struct irqaction
*action
= *pp
;
473 if (action
->dev_id
!= dev_id
)
476 /* found it - remove from the list of entries */
481 if (p
== pp
&& group
->control
)
482 group
->control(group
, irq
& (NR_IRQ_ACTIONS_PER_GROUP
- 1), 0);
484 if (level
->usage
== 0)
485 __set_MASK(level
- frv_irq_levels
);
487 spin_unlock_irqrestore(&level
->lock
,flags
);
490 /* Wait to make sure it's not being used on another CPU */
491 while (desc
->status
& IRQ_INPROGRESS
)
500 * IRQ autodetection code..
502 * This depends on the fact that any interrupt that comes in on to an
503 * unassigned IRQ will cause GxICR_DETECT to be set
506 static DECLARE_MUTEX(probe_sem
);
509 * probe_irq_on - begin an interrupt autodetect
511 * Commence probing for an interrupt. The interrupts are scanned
512 * and a mask of potential interrupt lines is returned.
516 unsigned long probe_irq_on(void)
523 * Return a mask of triggered interrupts (this
524 * can handle only legacy ISA interrupts).
528 * probe_irq_mask - scan a bitmap of interrupt lines
529 * @val: mask of interrupts to consider
531 * Scan the ISA bus interrupt lines and return a bitmap of
532 * active interrupts. The interrupt probe logic state is then
533 * returned to its previous value.
535 * Note: we need to scan all the irq's even though we will
536 * only return ISA irq numbers - just so that we reset them
537 * all to a known state.
539 unsigned int probe_irq_mask(unsigned long xmask
)
546 * Return the one interrupt that triggered (this can
547 * handle any interrupt source).
551 * probe_irq_off - end an interrupt autodetect
552 * @xmask: mask of potential interrupts (unused)
554 * Scans the unused interrupt lines and returns the line which
555 * appears to have triggered the interrupt. If no interrupt was
556 * found then zero is returned. If more than one interrupt is
557 * found then minus the first candidate is returned to indicate
560 * The interrupt probe logic state is returned to its previous
563 * BUGS: When used in a module (which arguably shouldnt happen)
564 * nothing prevents two IRQ probe callers from overlapping. The
565 * results of this are non-optimal.
568 int probe_irq_off(unsigned long xmask
)
574 /* this was setup_x86_irq but it seems pretty generic */
575 int setup_irq(unsigned int irq
, struct irqaction
*new)
577 struct irq_source
*source
;
578 struct irq_group
*group
;
579 struct irq_level
*level
;
580 struct irqaction
**p
, **pp
;
583 group
= irq_groups
[irq
>> NR_IRQ_LOG2_ACTIONS_PER_GROUP
];
587 source
= group
->sources
[irq
& (NR_IRQ_ACTIONS_PER_GROUP
- 1)];
591 level
= source
->level
;
593 p
= &group
->actions
[irq
& (NR_IRQ_ACTIONS_PER_GROUP
- 1)];
596 * Some drivers like serial.c use request_irq() heavily,
597 * so we have to be careful not to interfere with a
600 if (new->flags
& SA_SAMPLE_RANDOM
) {
602 * This function might sleep, we want to call it first,
603 * outside of the atomic block.
604 * Yes, this might clear the entropy pool if the wrong
605 * driver is attempted to be loaded, without actually
606 * installing a new handler, but is this really a problem,
607 * only the sysadmin is able to do this.
609 rand_initialize_irq(irq
);
612 /* must juggle the interrupt processing stuff with interrupts disabled */
613 spin_lock_irqsave(&level
->lock
, flags
);
615 /* can't share interrupts unless all parties agree to */
616 if (level
->usage
!= 0 && !(level
->flags
& new->flags
& SA_SHIRQ
)) {
617 spin_unlock_irqrestore(&level
->lock
,flags
);
621 /* add new interrupt at end of irq queue */
629 level
->flags
= new->flags
;
631 /* turn the interrupts on */
632 if (level
->usage
== 1)
633 __clr_MASK(level
- frv_irq_levels
);
635 if (p
== pp
&& group
->control
)
636 group
->control(group
, irq
& (NR_IRQ_ACTIONS_PER_GROUP
- 1), 1);
638 spin_unlock_irqrestore(&level
->lock
, flags
);
639 register_irq_proc(irq
);
643 static struct proc_dir_entry
* root_irq_dir
;
644 static struct proc_dir_entry
* irq_dir
[NR_IRQS
];
648 static unsigned int parse_hex_value (const char *buffer
,
649 unsigned long count
, unsigned long *ret
)
651 unsigned char hexnum
[HEX_DIGITS
];
657 if (count
> HEX_DIGITS
)
659 if (copy_from_user(hexnum
, buffer
, count
))
663 * Parse the first 8 characters as a hex string, any non-hex char
664 * is end-of-string. '00e1', 'e1', '00E1', 'E1' are all the same.
668 for (i
= 0; i
< count
; i
++) {
669 unsigned int c
= hexnum
[i
];
672 case '0' ... '9': c
-= '0'; break;
673 case 'a' ... 'f': c
-= 'a'-10; break;
674 case 'A' ... 'F': c
-= 'A'-10; break;
678 value
= (value
<< 4) | c
;
686 static int prof_cpu_mask_read_proc (char *page
, char **start
, off_t off
,
687 int count
, int *eof
, void *data
)
689 unsigned long *mask
= (unsigned long *) data
;
690 if (count
< HEX_DIGITS
+1)
692 return sprintf (page
, "%08lx\n", *mask
);
695 static int prof_cpu_mask_write_proc (struct file
*file
, const char *buffer
,
696 unsigned long count
, void *data
)
698 unsigned long *mask
= (unsigned long *) data
, full_count
= count
, err
;
699 unsigned long new_value
;
702 err
= parse_hex_value(buffer
, count
, &new_value
);
710 #define MAX_NAMELEN 10
712 static void register_irq_proc (unsigned int irq
)
714 char name
[MAX_NAMELEN
];
716 if (!root_irq_dir
|| irq_dir
[irq
])
719 memset(name
, 0, MAX_NAMELEN
);
720 sprintf(name
, "%d", irq
);
722 /* create /proc/irq/1234 */
723 irq_dir
[irq
] = proc_mkdir(name
, root_irq_dir
);
726 unsigned long prof_cpu_mask
= -1;
728 void init_irq_proc (void)
730 struct proc_dir_entry
*entry
;
733 /* create /proc/irq */
734 root_irq_dir
= proc_mkdir("irq", 0);
736 /* create /proc/irq/prof_cpu_mask */
737 entry
= create_proc_entry("prof_cpu_mask", 0600, root_irq_dir
);
742 entry
->data
= (void *)&prof_cpu_mask
;
743 entry
->read_proc
= prof_cpu_mask_read_proc
;
744 entry
->write_proc
= prof_cpu_mask_write_proc
;
747 * Create entries for all existing IRQs.
749 for (i
= 0; i
< NR_IRQS
; i
++)
750 register_irq_proc(i
);
753 /*****************************************************************************/
755 * initialise the interrupt system
757 void __init
init_IRQ(void)
761 #ifdef CONFIG_FUJITSU_MB93493
762 route_mb93493_irqs();
764 } /* end init_IRQ() */