Linux 4.18.10
[linux/fpc-iii.git] / arch / um / kernel / irq.c
blob6b7f3827d6e4add1993315c220bf96217bfb8986
1 /*
2 * Copyright (C) 2017 - Cambridge Greys Ltd
3 * Copyright (C) 2011 - 2014 Cisco Systems Inc
4 * Copyright (C) 2000 - 2007 Jeff Dike (jdike@{addtoit,linux.intel}.com)
5 * Licensed under the GPL
6 * Derived (i.e. mostly copied) from arch/i386/kernel/irq.c:
7 * Copyright (C) 1992, 1998 Linus Torvalds, Ingo Molnar
8 */
10 #include <linux/cpumask.h>
11 #include <linux/hardirq.h>
12 #include <linux/interrupt.h>
13 #include <linux/kernel_stat.h>
14 #include <linux/module.h>
15 #include <linux/sched.h>
16 #include <linux/seq_file.h>
17 #include <linux/slab.h>
18 #include <as-layout.h>
19 #include <kern_util.h>
20 #include <os.h>
21 #include <irq_user.h>
24 /* When epoll triggers we do not know why it did so
25 * we can also have different IRQs for read and write.
26 * This is why we keep a small irq_fd array for each fd -
27 * one entry per IRQ type
30 struct irq_entry {
31 struct irq_entry *next;
32 int fd;
33 struct irq_fd *irq_array[MAX_IRQ_TYPE + 1];
36 static struct irq_entry *active_fds;
38 static DEFINE_SPINLOCK(irq_lock);
40 static void irq_io_loop(struct irq_fd *irq, struct uml_pt_regs *regs)
43 * irq->active guards against reentry
44 * irq->pending accumulates pending requests
45 * if pending is raised the irq_handler is re-run
46 * until pending is cleared
48 if (irq->active) {
49 irq->active = false;
50 do {
51 irq->pending = false;
52 do_IRQ(irq->irq, regs);
53 } while (irq->pending && (!irq->purge));
54 if (!irq->purge)
55 irq->active = true;
56 } else {
57 irq->pending = true;
61 void sigio_handler(int sig, struct siginfo *unused_si, struct uml_pt_regs *regs)
63 struct irq_entry *irq_entry;
64 struct irq_fd *irq;
66 int n, i, j;
68 while (1) {
69 /* This is now lockless - epoll keeps back-referencesto the irqs
70 * which have trigger it so there is no need to walk the irq
71 * list and lock it every time. We avoid locking by turning off
72 * IO for a specific fd by executing os_del_epoll_fd(fd) before
73 * we do any changes to the actual data structures
75 n = os_waiting_for_events_epoll();
77 if (n <= 0) {
78 if (n == -EINTR)
79 continue;
80 else
81 break;
84 for (i = 0; i < n ; i++) {
85 /* Epoll back reference is the entry with 3 irq_fd
86 * leaves - one for each irq type.
88 irq_entry = (struct irq_entry *)
89 os_epoll_get_data_pointer(i);
90 for (j = 0; j < MAX_IRQ_TYPE ; j++) {
91 irq = irq_entry->irq_array[j];
92 if (irq == NULL)
93 continue;
94 if (os_epoll_triggered(i, irq->events) > 0)
95 irq_io_loop(irq, regs);
96 if (irq->purge) {
97 irq_entry->irq_array[j] = NULL;
98 kfree(irq);
105 static int assign_epoll_events_to_irq(struct irq_entry *irq_entry)
107 int i;
108 int events = 0;
109 struct irq_fd *irq;
111 for (i = 0; i < MAX_IRQ_TYPE ; i++) {
112 irq = irq_entry->irq_array[i];
113 if (irq != NULL)
114 events = irq->events | events;
116 if (events > 0) {
117 /* os_add_epoll will call os_mod_epoll if this already exists */
118 return os_add_epoll_fd(events, irq_entry->fd, irq_entry);
120 /* No events - delete */
121 return os_del_epoll_fd(irq_entry->fd);
126 static int activate_fd(int irq, int fd, int type, void *dev_id)
128 struct irq_fd *new_fd;
129 struct irq_entry *irq_entry;
130 int i, err, events;
131 unsigned long flags;
133 err = os_set_fd_async(fd);
134 if (err < 0)
135 goto out;
137 spin_lock_irqsave(&irq_lock, flags);
139 /* Check if we have an entry for this fd */
141 err = -EBUSY;
142 for (irq_entry = active_fds;
143 irq_entry != NULL; irq_entry = irq_entry->next) {
144 if (irq_entry->fd == fd)
145 break;
148 if (irq_entry == NULL) {
149 /* This needs to be atomic as it may be called from an
150 * IRQ context.
152 irq_entry = kmalloc(sizeof(struct irq_entry), GFP_ATOMIC);
153 if (irq_entry == NULL) {
154 printk(KERN_ERR
155 "Failed to allocate new IRQ entry\n");
156 goto out_unlock;
158 irq_entry->fd = fd;
159 for (i = 0; i < MAX_IRQ_TYPE; i++)
160 irq_entry->irq_array[i] = NULL;
161 irq_entry->next = active_fds;
162 active_fds = irq_entry;
165 /* Check if we are trying to re-register an interrupt for a
166 * particular fd
169 if (irq_entry->irq_array[type] != NULL) {
170 printk(KERN_ERR
171 "Trying to reregister IRQ %d FD %d TYPE %d ID %p\n",
172 irq, fd, type, dev_id
174 goto out_unlock;
175 } else {
176 /* New entry for this fd */
178 err = -ENOMEM;
179 new_fd = kmalloc(sizeof(struct irq_fd), GFP_ATOMIC);
180 if (new_fd == NULL)
181 goto out_unlock;
183 events = os_event_mask(type);
185 *new_fd = ((struct irq_fd) {
186 .id = dev_id,
187 .irq = irq,
188 .type = type,
189 .events = events,
190 .active = true,
191 .pending = false,
192 .purge = false
194 /* Turn off any IO on this fd - allows us to
195 * avoid locking the IRQ loop
197 os_del_epoll_fd(irq_entry->fd);
198 irq_entry->irq_array[type] = new_fd;
201 /* Turn back IO on with the correct (new) IO event mask */
202 assign_epoll_events_to_irq(irq_entry);
203 spin_unlock_irqrestore(&irq_lock, flags);
204 maybe_sigio_broken(fd, (type != IRQ_NONE));
206 return 0;
207 out_unlock:
208 spin_unlock_irqrestore(&irq_lock, flags);
209 out:
210 return err;
214 * Walk the IRQ list and dispose of any unused entries.
215 * Should be done under irq_lock.
218 static void garbage_collect_irq_entries(void)
220 int i;
221 bool reap;
222 struct irq_entry *walk;
223 struct irq_entry *previous = NULL;
224 struct irq_entry *to_free;
226 if (active_fds == NULL)
227 return;
228 walk = active_fds;
229 while (walk != NULL) {
230 reap = true;
231 for (i = 0; i < MAX_IRQ_TYPE ; i++) {
232 if (walk->irq_array[i] != NULL) {
233 reap = false;
234 break;
237 if (reap) {
238 if (previous == NULL)
239 active_fds = walk->next;
240 else
241 previous->next = walk->next;
242 to_free = walk;
243 } else {
244 to_free = NULL;
246 walk = walk->next;
247 if (to_free != NULL)
248 kfree(to_free);
253 * Walk the IRQ list and get the descriptor for our FD
256 static struct irq_entry *get_irq_entry_by_fd(int fd)
258 struct irq_entry *walk = active_fds;
260 while (walk != NULL) {
261 if (walk->fd == fd)
262 return walk;
263 walk = walk->next;
265 return NULL;
270 * Walk the IRQ list and dispose of an entry for a specific
271 * device, fd and number. Note - if sharing an IRQ for read
272 * and writefor the same FD it will be disposed in either case.
273 * If this behaviour is undesirable use different IRQ ids.
276 #define IGNORE_IRQ 1
277 #define IGNORE_DEV (1<<1)
279 static void do_free_by_irq_and_dev(
280 struct irq_entry *irq_entry,
281 unsigned int irq,
282 void *dev,
283 int flags
286 int i;
287 struct irq_fd *to_free;
289 for (i = 0; i < MAX_IRQ_TYPE ; i++) {
290 if (irq_entry->irq_array[i] != NULL) {
291 if (
292 ((flags & IGNORE_IRQ) ||
293 (irq_entry->irq_array[i]->irq == irq)) &&
294 ((flags & IGNORE_DEV) ||
295 (irq_entry->irq_array[i]->id == dev))
297 /* Turn off any IO on this fd - allows us to
298 * avoid locking the IRQ loop
300 os_del_epoll_fd(irq_entry->fd);
301 to_free = irq_entry->irq_array[i];
302 irq_entry->irq_array[i] = NULL;
303 assign_epoll_events_to_irq(irq_entry);
304 if (to_free->active)
305 to_free->purge = true;
306 else
307 kfree(to_free);
313 void free_irq_by_fd(int fd)
315 struct irq_entry *to_free;
316 unsigned long flags;
318 spin_lock_irqsave(&irq_lock, flags);
319 to_free = get_irq_entry_by_fd(fd);
320 if (to_free != NULL) {
321 do_free_by_irq_and_dev(
322 to_free,
324 NULL,
325 IGNORE_IRQ | IGNORE_DEV
328 garbage_collect_irq_entries();
329 spin_unlock_irqrestore(&irq_lock, flags);
331 EXPORT_SYMBOL(free_irq_by_fd);
333 static void free_irq_by_irq_and_dev(unsigned int irq, void *dev)
335 struct irq_entry *to_free;
336 unsigned long flags;
338 spin_lock_irqsave(&irq_lock, flags);
339 to_free = active_fds;
340 while (to_free != NULL) {
341 do_free_by_irq_and_dev(
342 to_free,
343 irq,
344 dev,
347 to_free = to_free->next;
349 garbage_collect_irq_entries();
350 spin_unlock_irqrestore(&irq_lock, flags);
354 void reactivate_fd(int fd, int irqnum)
356 /** NOP - we do auto-EOI now **/
359 void deactivate_fd(int fd, int irqnum)
361 struct irq_entry *to_free;
362 unsigned long flags;
364 os_del_epoll_fd(fd);
365 spin_lock_irqsave(&irq_lock, flags);
366 to_free = get_irq_entry_by_fd(fd);
367 if (to_free != NULL) {
368 do_free_by_irq_and_dev(
369 to_free,
370 irqnum,
371 NULL,
372 IGNORE_DEV
375 garbage_collect_irq_entries();
376 spin_unlock_irqrestore(&irq_lock, flags);
377 ignore_sigio_fd(fd);
379 EXPORT_SYMBOL(deactivate_fd);
382 * Called just before shutdown in order to provide a clean exec
383 * environment in case the system is rebooting. No locking because
384 * that would cause a pointless shutdown hang if something hadn't
385 * released the lock.
387 int deactivate_all_fds(void)
389 unsigned long flags;
390 struct irq_entry *to_free;
392 spin_lock_irqsave(&irq_lock, flags);
393 /* Stop IO. The IRQ loop has no lock so this is our
394 * only way of making sure we are safe to dispose
395 * of all IRQ handlers
397 os_set_ioignore();
398 to_free = active_fds;
399 while (to_free != NULL) {
400 do_free_by_irq_and_dev(
401 to_free,
403 NULL,
404 IGNORE_IRQ | IGNORE_DEV
406 to_free = to_free->next;
408 garbage_collect_irq_entries();
409 spin_unlock_irqrestore(&irq_lock, flags);
410 os_close_epoll_fd();
411 return 0;
415 * do_IRQ handles all normal device IRQs (the special
416 * SMP cross-CPU interrupts have their own specific
417 * handlers).
419 unsigned int do_IRQ(int irq, struct uml_pt_regs *regs)
421 struct pt_regs *old_regs = set_irq_regs((struct pt_regs *)regs);
422 irq_enter();
423 generic_handle_irq(irq);
424 irq_exit();
425 set_irq_regs(old_regs);
426 return 1;
429 void um_free_irq(unsigned int irq, void *dev)
431 free_irq_by_irq_and_dev(irq, dev);
432 free_irq(irq, dev);
434 EXPORT_SYMBOL(um_free_irq);
436 int um_request_irq(unsigned int irq, int fd, int type,
437 irq_handler_t handler,
438 unsigned long irqflags, const char * devname,
439 void *dev_id)
441 int err;
443 if (fd != -1) {
444 err = activate_fd(irq, fd, type, dev_id);
445 if (err)
446 return err;
449 return request_irq(irq, handler, irqflags, devname, dev_id);
452 EXPORT_SYMBOL(um_request_irq);
453 EXPORT_SYMBOL(reactivate_fd);
456 * irq_chip must define at least enable/disable and ack when
457 * the edge handler is used.
459 static void dummy(struct irq_data *d)
463 /* This is used for everything else than the timer. */
464 static struct irq_chip normal_irq_type = {
465 .name = "SIGIO",
466 .irq_disable = dummy,
467 .irq_enable = dummy,
468 .irq_ack = dummy,
469 .irq_mask = dummy,
470 .irq_unmask = dummy,
473 static struct irq_chip SIGVTALRM_irq_type = {
474 .name = "SIGVTALRM",
475 .irq_disable = dummy,
476 .irq_enable = dummy,
477 .irq_ack = dummy,
478 .irq_mask = dummy,
479 .irq_unmask = dummy,
482 void __init init_IRQ(void)
484 int i;
486 irq_set_chip_and_handler(TIMER_IRQ, &SIGVTALRM_irq_type, handle_edge_irq);
489 for (i = 1; i < NR_IRQS; i++)
490 irq_set_chip_and_handler(i, &normal_irq_type, handle_edge_irq);
491 /* Initialize EPOLL Loop */
492 os_setup_epoll();
496 * IRQ stack entry and exit:
498 * Unlike i386, UML doesn't receive IRQs on the normal kernel stack
499 * and switch over to the IRQ stack after some preparation. We use
500 * sigaltstack to receive signals on a separate stack from the start.
501 * These two functions make sure the rest of the kernel won't be too
502 * upset by being on a different stack. The IRQ stack has a
503 * thread_info structure at the bottom so that current et al continue
504 * to work.
506 * to_irq_stack copies the current task's thread_info to the IRQ stack
507 * thread_info and sets the tasks's stack to point to the IRQ stack.
509 * from_irq_stack copies the thread_info struct back (flags may have
510 * been modified) and resets the task's stack pointer.
512 * Tricky bits -
514 * What happens when two signals race each other? UML doesn't block
515 * signals with sigprocmask, SA_DEFER, or sa_mask, so a second signal
516 * could arrive while a previous one is still setting up the
517 * thread_info.
519 * There are three cases -
520 * The first interrupt on the stack - sets up the thread_info and
521 * handles the interrupt
522 * A nested interrupt interrupting the copying of the thread_info -
523 * can't handle the interrupt, as the stack is in an unknown state
524 * A nested interrupt not interrupting the copying of the
525 * thread_info - doesn't do any setup, just handles the interrupt
527 * The first job is to figure out whether we interrupted stack setup.
528 * This is done by xchging the signal mask with thread_info->pending.
529 * If the value that comes back is zero, then there is no setup in
530 * progress, and the interrupt can be handled. If the value is
531 * non-zero, then there is stack setup in progress. In order to have
532 * the interrupt handled, we leave our signal in the mask, and it will
533 * be handled by the upper handler after it has set up the stack.
535 * Next is to figure out whether we are the outer handler or a nested
536 * one. As part of setting up the stack, thread_info->real_thread is
537 * set to non-NULL (and is reset to NULL on exit). This is the
538 * nesting indicator. If it is non-NULL, then the stack is already
539 * set up and the handler can run.
542 static unsigned long pending_mask;
544 unsigned long to_irq_stack(unsigned long *mask_out)
546 struct thread_info *ti;
547 unsigned long mask, old;
548 int nested;
550 mask = xchg(&pending_mask, *mask_out);
551 if (mask != 0) {
553 * If any interrupts come in at this point, we want to
554 * make sure that their bits aren't lost by our
555 * putting our bit in. So, this loop accumulates bits
556 * until xchg returns the same value that we put in.
557 * When that happens, there were no new interrupts,
558 * and pending_mask contains a bit for each interrupt
559 * that came in.
561 old = *mask_out;
562 do {
563 old |= mask;
564 mask = xchg(&pending_mask, old);
565 } while (mask != old);
566 return 1;
569 ti = current_thread_info();
570 nested = (ti->real_thread != NULL);
571 if (!nested) {
572 struct task_struct *task;
573 struct thread_info *tti;
575 task = cpu_tasks[ti->cpu].task;
576 tti = task_thread_info(task);
578 *ti = *tti;
579 ti->real_thread = tti;
580 task->stack = ti;
583 mask = xchg(&pending_mask, 0);
584 *mask_out |= mask | nested;
585 return 0;
588 unsigned long from_irq_stack(int nested)
590 struct thread_info *ti, *to;
591 unsigned long mask;
593 ti = current_thread_info();
595 pending_mask = 1;
597 to = ti->real_thread;
598 current->stack = to;
599 ti->real_thread = NULL;
600 *to = *ti;
602 mask = xchg(&pending_mask, 0);
603 return mask & ~1;