2 * Copyright (C) 2017 - Cambridge Greys Ltd
3 * Copyright (C) 2011 - 2014 Cisco Systems Inc
4 * Copyright (C) 2000 - 2007 Jeff Dike (jdike@{addtoit,linux.intel}.com)
5 * Licensed under the GPL
6 * Derived (i.e. mostly copied) from arch/i386/kernel/irq.c:
7 * Copyright (C) 1992, 1998 Linus Torvalds, Ingo Molnar
10 #include <linux/cpumask.h>
11 #include <linux/hardirq.h>
12 #include <linux/interrupt.h>
13 #include <linux/kernel_stat.h>
14 #include <linux/module.h>
15 #include <linux/sched.h>
16 #include <linux/seq_file.h>
17 #include <linux/slab.h>
18 #include <as-layout.h>
19 #include <kern_util.h>
24 /* When epoll triggers we do not know why it did so
25 * we can also have different IRQs for read and write.
26 * This is why we keep a small irq_fd array for each fd -
27 * one entry per IRQ type
31 struct irq_entry
*next
;
33 struct irq_fd
*irq_array
[MAX_IRQ_TYPE
+ 1];
36 static struct irq_entry
*active_fds
;
38 static DEFINE_SPINLOCK(irq_lock
);
40 static void irq_io_loop(struct irq_fd
*irq
, struct uml_pt_regs
*regs
)
43 * irq->active guards against reentry
44 * irq->pending accumulates pending requests
45 * if pending is raised the irq_handler is re-run
46 * until pending is cleared
52 do_IRQ(irq
->irq
, regs
);
53 } while (irq
->pending
&& (!irq
->purge
));
61 void sigio_handler(int sig
, struct siginfo
*unused_si
, struct uml_pt_regs
*regs
)
63 struct irq_entry
*irq_entry
;
69 /* This is now lockless - epoll keeps back-referencesto the irqs
70 * which have trigger it so there is no need to walk the irq
71 * list and lock it every time. We avoid locking by turning off
72 * IO for a specific fd by executing os_del_epoll_fd(fd) before
73 * we do any changes to the actual data structures
75 n
= os_waiting_for_events_epoll();
84 for (i
= 0; i
< n
; i
++) {
85 /* Epoll back reference is the entry with 3 irq_fd
86 * leaves - one for each irq type.
88 irq_entry
= (struct irq_entry
*)
89 os_epoll_get_data_pointer(i
);
90 for (j
= 0; j
< MAX_IRQ_TYPE
; j
++) {
91 irq
= irq_entry
->irq_array
[j
];
94 if (os_epoll_triggered(i
, irq
->events
) > 0)
95 irq_io_loop(irq
, regs
);
97 irq_entry
->irq_array
[j
] = NULL
;
105 static int assign_epoll_events_to_irq(struct irq_entry
*irq_entry
)
111 for (i
= 0; i
< MAX_IRQ_TYPE
; i
++) {
112 irq
= irq_entry
->irq_array
[i
];
114 events
= irq
->events
| events
;
117 /* os_add_epoll will call os_mod_epoll if this already exists */
118 return os_add_epoll_fd(events
, irq_entry
->fd
, irq_entry
);
120 /* No events - delete */
121 return os_del_epoll_fd(irq_entry
->fd
);
126 static int activate_fd(int irq
, int fd
, int type
, void *dev_id
)
128 struct irq_fd
*new_fd
;
129 struct irq_entry
*irq_entry
;
133 err
= os_set_fd_async(fd
);
137 spin_lock_irqsave(&irq_lock
, flags
);
139 /* Check if we have an entry for this fd */
142 for (irq_entry
= active_fds
;
143 irq_entry
!= NULL
; irq_entry
= irq_entry
->next
) {
144 if (irq_entry
->fd
== fd
)
148 if (irq_entry
== NULL
) {
149 /* This needs to be atomic as it may be called from an
152 irq_entry
= kmalloc(sizeof(struct irq_entry
), GFP_ATOMIC
);
153 if (irq_entry
== NULL
) {
155 "Failed to allocate new IRQ entry\n");
159 for (i
= 0; i
< MAX_IRQ_TYPE
; i
++)
160 irq_entry
->irq_array
[i
] = NULL
;
161 irq_entry
->next
= active_fds
;
162 active_fds
= irq_entry
;
165 /* Check if we are trying to re-register an interrupt for a
169 if (irq_entry
->irq_array
[type
] != NULL
) {
171 "Trying to reregister IRQ %d FD %d TYPE %d ID %p\n",
172 irq
, fd
, type
, dev_id
176 /* New entry for this fd */
179 new_fd
= kmalloc(sizeof(struct irq_fd
), GFP_ATOMIC
);
183 events
= os_event_mask(type
);
185 *new_fd
= ((struct irq_fd
) {
194 /* Turn off any IO on this fd - allows us to
195 * avoid locking the IRQ loop
197 os_del_epoll_fd(irq_entry
->fd
);
198 irq_entry
->irq_array
[type
] = new_fd
;
201 /* Turn back IO on with the correct (new) IO event mask */
202 assign_epoll_events_to_irq(irq_entry
);
203 spin_unlock_irqrestore(&irq_lock
, flags
);
204 maybe_sigio_broken(fd
, (type
!= IRQ_NONE
));
208 spin_unlock_irqrestore(&irq_lock
, flags
);
214 * Walk the IRQ list and dispose of any unused entries.
215 * Should be done under irq_lock.
218 static void garbage_collect_irq_entries(void)
222 struct irq_entry
*walk
;
223 struct irq_entry
*previous
= NULL
;
224 struct irq_entry
*to_free
;
226 if (active_fds
== NULL
)
229 while (walk
!= NULL
) {
231 for (i
= 0; i
< MAX_IRQ_TYPE
; i
++) {
232 if (walk
->irq_array
[i
] != NULL
) {
238 if (previous
== NULL
)
239 active_fds
= walk
->next
;
241 previous
->next
= walk
->next
;
253 * Walk the IRQ list and get the descriptor for our FD
256 static struct irq_entry
*get_irq_entry_by_fd(int fd
)
258 struct irq_entry
*walk
= active_fds
;
260 while (walk
!= NULL
) {
270 * Walk the IRQ list and dispose of an entry for a specific
271 * device, fd and number. Note - if sharing an IRQ for read
272 * and writefor the same FD it will be disposed in either case.
273 * If this behaviour is undesirable use different IRQ ids.
277 #define IGNORE_DEV (1<<1)
279 static void do_free_by_irq_and_dev(
280 struct irq_entry
*irq_entry
,
287 struct irq_fd
*to_free
;
289 for (i
= 0; i
< MAX_IRQ_TYPE
; i
++) {
290 if (irq_entry
->irq_array
[i
] != NULL
) {
292 ((flags
& IGNORE_IRQ
) ||
293 (irq_entry
->irq_array
[i
]->irq
== irq
)) &&
294 ((flags
& IGNORE_DEV
) ||
295 (irq_entry
->irq_array
[i
]->id
== dev
))
297 /* Turn off any IO on this fd - allows us to
298 * avoid locking the IRQ loop
300 os_del_epoll_fd(irq_entry
->fd
);
301 to_free
= irq_entry
->irq_array
[i
];
302 irq_entry
->irq_array
[i
] = NULL
;
303 assign_epoll_events_to_irq(irq_entry
);
305 to_free
->purge
= true;
313 void free_irq_by_fd(int fd
)
315 struct irq_entry
*to_free
;
318 spin_lock_irqsave(&irq_lock
, flags
);
319 to_free
= get_irq_entry_by_fd(fd
);
320 if (to_free
!= NULL
) {
321 do_free_by_irq_and_dev(
325 IGNORE_IRQ
| IGNORE_DEV
328 garbage_collect_irq_entries();
329 spin_unlock_irqrestore(&irq_lock
, flags
);
331 EXPORT_SYMBOL(free_irq_by_fd
);
333 static void free_irq_by_irq_and_dev(unsigned int irq
, void *dev
)
335 struct irq_entry
*to_free
;
338 spin_lock_irqsave(&irq_lock
, flags
);
339 to_free
= active_fds
;
340 while (to_free
!= NULL
) {
341 do_free_by_irq_and_dev(
347 to_free
= to_free
->next
;
349 garbage_collect_irq_entries();
350 spin_unlock_irqrestore(&irq_lock
, flags
);
354 void reactivate_fd(int fd
, int irqnum
)
356 /** NOP - we do auto-EOI now **/
359 void deactivate_fd(int fd
, int irqnum
)
361 struct irq_entry
*to_free
;
365 spin_lock_irqsave(&irq_lock
, flags
);
366 to_free
= get_irq_entry_by_fd(fd
);
367 if (to_free
!= NULL
) {
368 do_free_by_irq_and_dev(
375 garbage_collect_irq_entries();
376 spin_unlock_irqrestore(&irq_lock
, flags
);
379 EXPORT_SYMBOL(deactivate_fd
);
382 * Called just before shutdown in order to provide a clean exec
383 * environment in case the system is rebooting. No locking because
384 * that would cause a pointless shutdown hang if something hadn't
387 int deactivate_all_fds(void)
390 struct irq_entry
*to_free
;
392 spin_lock_irqsave(&irq_lock
, flags
);
393 /* Stop IO. The IRQ loop has no lock so this is our
394 * only way of making sure we are safe to dispose
395 * of all IRQ handlers
398 to_free
= active_fds
;
399 while (to_free
!= NULL
) {
400 do_free_by_irq_and_dev(
404 IGNORE_IRQ
| IGNORE_DEV
406 to_free
= to_free
->next
;
408 garbage_collect_irq_entries();
409 spin_unlock_irqrestore(&irq_lock
, flags
);
415 * do_IRQ handles all normal device IRQs (the special
416 * SMP cross-CPU interrupts have their own specific
419 unsigned int do_IRQ(int irq
, struct uml_pt_regs
*regs
)
421 struct pt_regs
*old_regs
= set_irq_regs((struct pt_regs
*)regs
);
423 generic_handle_irq(irq
);
425 set_irq_regs(old_regs
);
429 void um_free_irq(unsigned int irq
, void *dev
)
431 free_irq_by_irq_and_dev(irq
, dev
);
434 EXPORT_SYMBOL(um_free_irq
);
436 int um_request_irq(unsigned int irq
, int fd
, int type
,
437 irq_handler_t handler
,
438 unsigned long irqflags
, const char * devname
,
444 err
= activate_fd(irq
, fd
, type
, dev_id
);
449 return request_irq(irq
, handler
, irqflags
, devname
, dev_id
);
452 EXPORT_SYMBOL(um_request_irq
);
453 EXPORT_SYMBOL(reactivate_fd
);
456 * irq_chip must define at least enable/disable and ack when
457 * the edge handler is used.
459 static void dummy(struct irq_data
*d
)
463 /* This is used for everything else than the timer. */
464 static struct irq_chip normal_irq_type
= {
466 .irq_disable
= dummy
,
473 static struct irq_chip SIGVTALRM_irq_type
= {
475 .irq_disable
= dummy
,
482 void __init
init_IRQ(void)
486 irq_set_chip_and_handler(TIMER_IRQ
, &SIGVTALRM_irq_type
, handle_edge_irq
);
489 for (i
= 1; i
< NR_IRQS
; i
++)
490 irq_set_chip_and_handler(i
, &normal_irq_type
, handle_edge_irq
);
491 /* Initialize EPOLL Loop */
496 * IRQ stack entry and exit:
498 * Unlike i386, UML doesn't receive IRQs on the normal kernel stack
499 * and switch over to the IRQ stack after some preparation. We use
500 * sigaltstack to receive signals on a separate stack from the start.
501 * These two functions make sure the rest of the kernel won't be too
502 * upset by being on a different stack. The IRQ stack has a
503 * thread_info structure at the bottom so that current et al continue
506 * to_irq_stack copies the current task's thread_info to the IRQ stack
507 * thread_info and sets the tasks's stack to point to the IRQ stack.
509 * from_irq_stack copies the thread_info struct back (flags may have
510 * been modified) and resets the task's stack pointer.
514 * What happens when two signals race each other? UML doesn't block
515 * signals with sigprocmask, SA_DEFER, or sa_mask, so a second signal
516 * could arrive while a previous one is still setting up the
519 * There are three cases -
520 * The first interrupt on the stack - sets up the thread_info and
521 * handles the interrupt
522 * A nested interrupt interrupting the copying of the thread_info -
523 * can't handle the interrupt, as the stack is in an unknown state
524 * A nested interrupt not interrupting the copying of the
525 * thread_info - doesn't do any setup, just handles the interrupt
527 * The first job is to figure out whether we interrupted stack setup.
528 * This is done by xchging the signal mask with thread_info->pending.
529 * If the value that comes back is zero, then there is no setup in
530 * progress, and the interrupt can be handled. If the value is
531 * non-zero, then there is stack setup in progress. In order to have
532 * the interrupt handled, we leave our signal in the mask, and it will
533 * be handled by the upper handler after it has set up the stack.
535 * Next is to figure out whether we are the outer handler or a nested
536 * one. As part of setting up the stack, thread_info->real_thread is
537 * set to non-NULL (and is reset to NULL on exit). This is the
538 * nesting indicator. If it is non-NULL, then the stack is already
539 * set up and the handler can run.
542 static unsigned long pending_mask
;
544 unsigned long to_irq_stack(unsigned long *mask_out
)
546 struct thread_info
*ti
;
547 unsigned long mask
, old
;
550 mask
= xchg(&pending_mask
, *mask_out
);
553 * If any interrupts come in at this point, we want to
554 * make sure that their bits aren't lost by our
555 * putting our bit in. So, this loop accumulates bits
556 * until xchg returns the same value that we put in.
557 * When that happens, there were no new interrupts,
558 * and pending_mask contains a bit for each interrupt
564 mask
= xchg(&pending_mask
, old
);
565 } while (mask
!= old
);
569 ti
= current_thread_info();
570 nested
= (ti
->real_thread
!= NULL
);
572 struct task_struct
*task
;
573 struct thread_info
*tti
;
575 task
= cpu_tasks
[ti
->cpu
].task
;
576 tti
= task_thread_info(task
);
579 ti
->real_thread
= tti
;
583 mask
= xchg(&pending_mask
, 0);
584 *mask_out
|= mask
| nested
;
588 unsigned long from_irq_stack(int nested
)
590 struct thread_info
*ti
, *to
;
593 ti
= current_thread_info();
597 to
= ti
->real_thread
;
599 ti
->real_thread
= NULL
;
602 mask
= xchg(&pending_mask
, 0);