2 * Copyright (C) 2000 - 2007 Jeff Dike (jdike@{addtoit,linux.intel}.com)
3 * Licensed under the GPL
4 * Derived (i.e. mostly copied) from arch/i386/kernel/irq.c:
5 * Copyright (C) 1992, 1998 Linus Torvalds, Ingo Molnar
8 #include "linux/cpumask.h"
9 #include "linux/hardirq.h"
10 #include "linux/interrupt.h"
11 #include "linux/kernel_stat.h"
12 #include "linux/module.h"
13 #include "linux/seq_file.h"
14 #include "as-layout.h"
15 #include "kern_util.h"
19 * Generic, controller-independent functions:
22 int show_interrupts(struct seq_file
*p
, void *v
)
24 int i
= *(loff_t
*) v
, j
;
25 struct irqaction
* action
;
30 for_each_online_cpu(j
)
31 seq_printf(p
, "CPU%d ",j
);
36 spin_lock_irqsave(&irq_desc
[i
].lock
, flags
);
37 action
= irq_desc
[i
].action
;
40 seq_printf(p
, "%3d: ",i
);
42 seq_printf(p
, "%10u ", kstat_irqs(i
));
44 for_each_online_cpu(j
)
45 seq_printf(p
, "%10u ", kstat_cpu(j
).irqs
[i
]);
47 seq_printf(p
, " %14s", irq_desc
[i
].chip
->typename
);
48 seq_printf(p
, " %s", action
->name
);
50 for (action
=action
->next
; action
; action
= action
->next
)
51 seq_printf(p
, ", %s", action
->name
);
55 spin_unlock_irqrestore(&irq_desc
[i
].lock
, flags
);
56 } else if (i
== NR_IRQS
)
63 * This list is accessed under irq_lock, except in sigio_handler,
64 * where it is safe from being modified. IRQ handlers won't change it -
65 * if an IRQ source has vanished, it will be freed by free_irqs just
66 * before returning from sigio_handler. That will process a separate
67 * list of irqs to free, with its own locking, coming back here to
68 * remove list elements, taking the irq_lock to do so.
70 static struct irq_fd
*active_fds
= NULL
;
71 static struct irq_fd
**last_irq_ptr
= &active_fds
;
73 extern void free_irqs(void);
75 void sigio_handler(int sig
, struct uml_pt_regs
*regs
)
77 struct irq_fd
*irq_fd
;
80 if (smp_sigio_handler())
84 n
= os_waiting_for_events(active_fds
);
91 for (irq_fd
= active_fds
; irq_fd
!= NULL
;
92 irq_fd
= irq_fd
->next
) {
93 if (irq_fd
->current_events
!= 0) {
94 irq_fd
->current_events
= 0;
95 do_IRQ(irq_fd
->irq
, regs
);
103 static DEFINE_SPINLOCK(irq_lock
);
105 int activate_fd(int irq
, int fd
, int type
, void *dev_id
)
107 struct pollfd
*tmp_pfd
;
108 struct irq_fd
*new_fd
, *irq_fd
;
110 int pid
, events
, err
, n
;
113 err
= os_set_fd_async(fd
, pid
);
118 new_fd
= kmalloc(sizeof(struct irq_fd
), GFP_KERNEL
);
122 if (type
== IRQ_READ
)
123 events
= UM_POLLIN
| UM_POLLPRI
;
124 else events
= UM_POLLOUT
;
125 *new_fd
= ((struct irq_fd
) { .next
= NULL
,
132 .current_events
= 0 } );
135 spin_lock_irqsave(&irq_lock
, flags
);
136 for (irq_fd
= active_fds
; irq_fd
!= NULL
; irq_fd
= irq_fd
->next
) {
137 if ((irq_fd
->fd
== fd
) && (irq_fd
->type
== type
)) {
138 printk(KERN_ERR
"Registering fd %d twice\n", fd
);
139 printk(KERN_ERR
"Irqs : %d, %d\n", irq_fd
->irq
, irq
);
140 printk(KERN_ERR
"Ids : 0x%p, 0x%p\n", irq_fd
->id
,
146 if (type
== IRQ_WRITE
)
153 n
= os_create_pollfd(fd
, events
, tmp_pfd
, n
);
159 * It means we couldn't put new pollfd to current pollfds
160 * and tmp_fds is NULL or too small for new pollfds array.
161 * Needed size is equal to n as minimum.
163 * Here we have to drop the lock in order to call
164 * kmalloc, which might sleep.
165 * If something else came in and changed the pollfds array
166 * so we will not be able to put new pollfd struct to pollfds
167 * then we free the buffer tmp_fds and try again.
169 spin_unlock_irqrestore(&irq_lock
, flags
);
172 tmp_pfd
= kmalloc(n
, GFP_KERNEL
);
176 spin_lock_irqsave(&irq_lock
, flags
);
179 *last_irq_ptr
= new_fd
;
180 last_irq_ptr
= &new_fd
->next
;
182 spin_unlock_irqrestore(&irq_lock
, flags
);
185 * This calls activate_fd, so it has to be outside the critical
188 maybe_sigio_broken(fd
, (type
== IRQ_READ
));
193 spin_unlock_irqrestore(&irq_lock
, flags
);
200 static void free_irq_by_cb(int (*test
)(struct irq_fd
*, void *), void *arg
)
204 spin_lock_irqsave(&irq_lock
, flags
);
205 os_free_irq_by_cb(test
, arg
, active_fds
, &last_irq_ptr
);
206 spin_unlock_irqrestore(&irq_lock
, flags
);
214 static int same_irq_and_dev(struct irq_fd
*irq
, void *d
)
216 struct irq_and_dev
*data
= d
;
218 return ((irq
->irq
== data
->irq
) && (irq
->id
== data
->dev
));
221 void free_irq_by_irq_and_dev(unsigned int irq
, void *dev
)
223 struct irq_and_dev data
= ((struct irq_and_dev
) { .irq
= irq
,
226 free_irq_by_cb(same_irq_and_dev
, &data
);
229 static int same_fd(struct irq_fd
*irq
, void *fd
)
231 return (irq
->fd
== *((int *)fd
));
234 void free_irq_by_fd(int fd
)
236 free_irq_by_cb(same_fd
, &fd
);
239 /* Must be called with irq_lock held */
240 static struct irq_fd
*find_irq_by_fd(int fd
, int irqnum
, int *index_out
)
246 for (irq
= active_fds
; irq
!= NULL
; irq
= irq
->next
) {
247 if ((irq
->fd
== fd
) && (irq
->irq
== irqnum
))
252 printk(KERN_ERR
"find_irq_by_fd doesn't have descriptor %d\n",
256 fdi
= os_get_pollfd(i
);
257 if ((fdi
!= -1) && (fdi
!= fd
)) {
258 printk(KERN_ERR
"find_irq_by_fd - mismatch between active_fds "
259 "and pollfds, fd %d vs %d, need %d\n", irq
->fd
,
269 void reactivate_fd(int fd
, int irqnum
)
275 spin_lock_irqsave(&irq_lock
, flags
);
276 irq
= find_irq_by_fd(fd
, irqnum
, &i
);
278 spin_unlock_irqrestore(&irq_lock
, flags
);
281 os_set_pollfd(i
, irq
->fd
);
282 spin_unlock_irqrestore(&irq_lock
, flags
);
287 void deactivate_fd(int fd
, int irqnum
)
293 spin_lock_irqsave(&irq_lock
, flags
);
294 irq
= find_irq_by_fd(fd
, irqnum
, &i
);
296 spin_unlock_irqrestore(&irq_lock
, flags
);
300 os_set_pollfd(i
, -1);
301 spin_unlock_irqrestore(&irq_lock
, flags
);
307 * Called just before shutdown in order to provide a clean exec
308 * environment in case the system is rebooting. No locking because
309 * that would cause a pointless shutdown hang if something hadn't
312 int deactivate_all_fds(void)
317 for (irq
= active_fds
; irq
!= NULL
; irq
= irq
->next
) {
318 err
= os_clear_fd_async(irq
->fd
);
322 /* If there is a signal already queued, after unblocking ignore it */
329 * do_IRQ handles all normal device IRQs (the special
330 * SMP cross-CPU interrupts have their own specific
333 unsigned int do_IRQ(int irq
, struct uml_pt_regs
*regs
)
335 struct pt_regs
*old_regs
= set_irq_regs((struct pt_regs
*)regs
);
339 set_irq_regs(old_regs
);
343 int um_request_irq(unsigned int irq
, int fd
, int type
,
344 irq_handler_t handler
,
345 unsigned long irqflags
, const char * devname
,
350 err
= request_irq(irq
, handler
, irqflags
, devname
, dev_id
);
355 err
= activate_fd(irq
, fd
, type
, dev_id
);
358 EXPORT_SYMBOL(um_request_irq
);
359 EXPORT_SYMBOL(reactivate_fd
);
362 * hw_interrupt_type must define (startup || enable) &&
363 * (shutdown || disable) && end
365 static void dummy(unsigned int irq
)
369 /* This is used for everything else than the timer. */
370 static struct hw_interrupt_type normal_irq_type
= {
372 .release
= free_irq_by_irq_and_dev
,
379 static struct hw_interrupt_type SIGVTALRM_irq_type
= {
380 .typename
= "SIGVTALRM",
381 .release
= free_irq_by_irq_and_dev
,
382 .shutdown
= dummy
, /* never called */
389 void __init
init_IRQ(void)
393 irq_desc
[TIMER_IRQ
].status
= IRQ_DISABLED
;
394 irq_desc
[TIMER_IRQ
].action
= NULL
;
395 irq_desc
[TIMER_IRQ
].depth
= 1;
396 irq_desc
[TIMER_IRQ
].chip
= &SIGVTALRM_irq_type
;
397 enable_irq(TIMER_IRQ
);
398 for (i
= 1; i
< NR_IRQS
; i
++) {
399 irq_desc
[i
].status
= IRQ_DISABLED
;
400 irq_desc
[i
].action
= NULL
;
401 irq_desc
[i
].depth
= 1;
402 irq_desc
[i
].chip
= &normal_irq_type
;
407 int init_aio_irq(int irq
, char *name
, irq_handler_t handler
)
411 err
= os_pipe(fds
, 1, 1);
413 printk(KERN_ERR
"init_aio_irq - os_pipe failed, err = %d\n",
418 err
= um_request_irq(irq
, fds
[0], IRQ_READ
, handler
,
419 IRQF_DISABLED
| IRQF_SAMPLE_RANDOM
, name
,
420 (void *) (long) fds
[0]);
422 printk(KERN_ERR
"init_aio_irq - : um_request_irq failed, "
432 os_close_file(fds
[0]);
433 os_close_file(fds
[1]);
439 * IRQ stack entry and exit:
441 * Unlike i386, UML doesn't receive IRQs on the normal kernel stack
442 * and switch over to the IRQ stack after some preparation. We use
443 * sigaltstack to receive signals on a separate stack from the start.
444 * These two functions make sure the rest of the kernel won't be too
445 * upset by being on a different stack. The IRQ stack has a
446 * thread_info structure at the bottom so that current et al continue
449 * to_irq_stack copies the current task's thread_info to the IRQ stack
450 * thread_info and sets the tasks's stack to point to the IRQ stack.
452 * from_irq_stack copies the thread_info struct back (flags may have
453 * been modified) and resets the task's stack pointer.
457 * What happens when two signals race each other? UML doesn't block
458 * signals with sigprocmask, SA_DEFER, or sa_mask, so a second signal
459 * could arrive while a previous one is still setting up the
462 * There are three cases -
463 * The first interrupt on the stack - sets up the thread_info and
464 * handles the interrupt
465 * A nested interrupt interrupting the copying of the thread_info -
466 * can't handle the interrupt, as the stack is in an unknown state
467 * A nested interrupt not interrupting the copying of the
468 * thread_info - doesn't do any setup, just handles the interrupt
470 * The first job is to figure out whether we interrupted stack setup.
471 * This is done by xchging the signal mask with thread_info->pending.
472 * If the value that comes back is zero, then there is no setup in
473 * progress, and the interrupt can be handled. If the value is
474 * non-zero, then there is stack setup in progress. In order to have
475 * the interrupt handled, we leave our signal in the mask, and it will
476 * be handled by the upper handler after it has set up the stack.
478 * Next is to figure out whether we are the outer handler or a nested
479 * one. As part of setting up the stack, thread_info->real_thread is
480 * set to non-NULL (and is reset to NULL on exit). This is the
481 * nesting indicator. If it is non-NULL, then the stack is already
482 * set up and the handler can run.
485 static unsigned long pending_mask
;
487 unsigned long to_irq_stack(unsigned long *mask_out
)
489 struct thread_info
*ti
;
490 unsigned long mask
, old
;
493 mask
= xchg(&pending_mask
, *mask_out
);
496 * If any interrupts come in at this point, we want to
497 * make sure that their bits aren't lost by our
498 * putting our bit in. So, this loop accumulates bits
499 * until xchg returns the same value that we put in.
500 * When that happens, there were no new interrupts,
501 * and pending_mask contains a bit for each interrupt
507 mask
= xchg(&pending_mask
, old
);
508 } while (mask
!= old
);
512 ti
= current_thread_info();
513 nested
= (ti
->real_thread
!= NULL
);
515 struct task_struct
*task
;
516 struct thread_info
*tti
;
518 task
= cpu_tasks
[ti
->cpu
].task
;
519 tti
= task_thread_info(task
);
522 ti
->real_thread
= tti
;
526 mask
= xchg(&pending_mask
, 0);
527 *mask_out
|= mask
| nested
;
531 unsigned long from_irq_stack(int nested
)
533 struct thread_info
*ti
, *to
;
536 ti
= current_thread_info();
540 to
= ti
->real_thread
;
542 ti
->real_thread
= NULL
;
545 mask
= xchg(&pending_mask
, 0);