repo init
[linux-rt-nao.git] / kernel / irq / manage.c
blob82b89d38882a333228469dc73b998bbcd47e8f1c
1 /*
2 * linux/kernel/irq/manage.c
4 * Copyright (C) 1992, 1998-2006 Linus Torvalds, Ingo Molnar
5 * Copyright (C) 2005-2006 Thomas Gleixner
7 * This file contains driver APIs to the irq subsystem.
8 */
10 #include <linux/irq.h>
11 #include <linux/random.h>
12 #include <linux/module.h>
13 #include <linux/kthread.h>
14 #include <linux/syscalls.h>
15 #include <linux/interrupt.h>
16 #include <linux/slab.h>
18 #include "internals.h"
20 #if defined(CONFIG_SMP) && defined(CONFIG_GENERIC_HARDIRQS)
21 cpumask_var_t irq_default_affinity;
23 /**
24 * synchronize_irq - wait for pending IRQ handlers (on other CPUs)
25 * @irq: interrupt number to wait for
27 * This function waits for any pending IRQ handlers for this interrupt
28 * to complete before returning. If you use this function while
29 * holding a resource the IRQ handler may need you will deadlock.
31 * This function may be called - with care - from IRQ context.
33 void synchronize_irq(unsigned int irq)
35 struct irq_desc *desc = irq_to_desc(irq);
36 unsigned int status;
38 if (!desc)
39 return;
41 do {
42 unsigned long flags;
45 * Wait until we're out of the critical section. This might
46 * give the wrong answer due to the lack of memory barriers.
48 if (hardirq_preemption && !(desc->status & IRQ_NODELAY))
49 wait_event(desc->wait_for_handler,
50 !(desc->status & IRQ_INPROGRESS));
51 else
52 while (desc->status & IRQ_INPROGRESS)
53 cpu_relax();
55 /* Ok, that indicated we're done: double-check carefully. */
56 spin_lock_irqsave(&desc->lock, flags);
57 status = desc->status;
58 spin_unlock_irqrestore(&desc->lock, flags);
60 /* Oops, that failed? */
61 } while (status & IRQ_INPROGRESS);
63 EXPORT_SYMBOL(synchronize_irq);
65 /**
66 * irq_can_set_affinity - Check if the affinity of a given irq can be set
67 * @irq: Interrupt to check
70 int irq_can_set_affinity(unsigned int irq)
72 struct irq_desc *desc = irq_to_desc(irq);
74 if (CHECK_IRQ_PER_CPU(desc->status) || !desc->chip ||
75 !desc->chip->set_affinity)
76 return 0;
78 return 1;
81 /**
82 * irq_set_affinity - Set the irq affinity of a given irq
83 * @irq: Interrupt to set affinity
84 * @cpumask: cpumask
87 int irq_set_affinity(unsigned int irq, const struct cpumask *cpumask)
89 struct irq_desc *desc = irq_to_desc(irq);
90 unsigned long flags;
92 if (!desc->chip->set_affinity)
93 return -EINVAL;
95 spin_lock_irqsave(&desc->lock, flags);
97 #ifdef CONFIG_GENERIC_PENDING_IRQ
98 if (desc->status & IRQ_MOVE_PCNTXT || desc->status & IRQ_DISABLED) {
99 cpumask_copy(desc->affinity, cpumask);
100 desc->chip->set_affinity(irq, cpumask);
101 } else {
102 desc->status |= IRQ_MOVE_PENDING;
103 cpumask_copy(desc->pending_mask, cpumask);
105 #else
106 cpumask_copy(desc->affinity, cpumask);
107 desc->chip->set_affinity(irq, cpumask);
108 #endif
109 desc->status |= IRQ_AFFINITY_SET;
110 spin_unlock_irqrestore(&desc->lock, flags);
111 return 0;
114 #ifndef CONFIG_AUTO_IRQ_AFFINITY
116 * Generic version of the affinity autoselector.
118 static int setup_affinity(unsigned int irq, struct irq_desc *desc)
120 if (!irq_can_set_affinity(irq))
121 return 0;
124 * Preserve an userspace affinity setup, but make sure that
125 * one of the targets is online.
127 if (desc->status & (IRQ_AFFINITY_SET | IRQ_NO_BALANCING)) {
128 if (cpumask_any_and(desc->affinity, cpu_online_mask)
129 < nr_cpu_ids)
130 goto set_affinity;
131 else
132 desc->status &= ~IRQ_AFFINITY_SET;
135 cpumask_and(desc->affinity, cpu_online_mask, irq_default_affinity);
136 set_affinity:
137 desc->chip->set_affinity(irq, desc->affinity);
139 return 0;
141 #else
142 static inline int setup_affinity(unsigned int irq, struct irq_desc *d)
144 return irq_select_affinity(irq);
146 #endif
149 * Called when affinity is set via /proc/irq
151 int irq_select_affinity_usr(unsigned int irq)
153 struct irq_desc *desc = irq_to_desc(irq);
154 unsigned long flags;
155 int ret;
157 spin_lock_irqsave(&desc->lock, flags);
158 ret = setup_affinity(irq, desc);
159 spin_unlock_irqrestore(&desc->lock, flags);
161 return ret;
164 #else
165 static inline int setup_affinity(unsigned int irq, struct irq_desc *desc)
167 return 0;
169 #endif
172 * disable_irq_nosync - disable an irq without waiting
173 * @irq: Interrupt to disable
175 * Disable the selected interrupt line. Disables and Enables are
176 * nested.
177 * Unlike disable_irq(), this function does not ensure existing
178 * instances of the IRQ handler have completed before returning.
180 * This function may be called from IRQ context.
182 void disable_irq_nosync(unsigned int irq)
184 struct irq_desc *desc = irq_to_desc(irq);
185 unsigned long flags;
187 if (!desc)
188 return;
190 spin_lock_irqsave(&desc->lock, flags);
191 if (!desc->depth++) {
192 desc->status |= IRQ_DISABLED;
193 desc->chip->disable(irq);
195 spin_unlock_irqrestore(&desc->lock, flags);
197 EXPORT_SYMBOL(disable_irq_nosync);
200 * disable_irq - disable an irq and wait for completion
201 * @irq: Interrupt to disable
203 * Disable the selected interrupt line. Enables and Disables are
204 * nested.
205 * This function waits for any pending IRQ handlers for this interrupt
206 * to complete before returning. If you use this function while
207 * holding a resource the IRQ handler may need you will deadlock.
209 * This function may be called - with care - from IRQ context.
211 void disable_irq(unsigned int irq)
213 struct irq_desc *desc = irq_to_desc(irq);
215 if (!desc)
216 return;
218 disable_irq_nosync(irq);
219 if (desc->action)
220 synchronize_irq(irq);
222 EXPORT_SYMBOL(disable_irq);
224 static void __enable_irq(struct irq_desc *desc, unsigned int irq)
226 switch (desc->depth) {
227 case 0:
228 WARN(1, KERN_WARNING "Unbalanced enable for IRQ %d\n", irq);
229 break;
230 case 1: {
231 unsigned int status = desc->status & ~IRQ_DISABLED;
233 /* Prevent probing on this irq: */
234 desc->status = status | IRQ_NOPROBE;
235 check_irq_resend(desc, irq);
236 /* fall-through */
238 default:
239 desc->depth--;
244 * enable_irq - enable handling of an irq
245 * @irq: Interrupt to enable
247 * Undoes the effect of one call to disable_irq(). If this
248 * matches the last disable, processing of interrupts on this
249 * IRQ line is re-enabled.
251 * This function may be called from IRQ context.
253 void enable_irq(unsigned int irq)
255 struct irq_desc *desc = irq_to_desc(irq);
256 unsigned long flags;
258 if (!desc)
259 return;
261 spin_lock_irqsave(&desc->lock, flags);
262 __enable_irq(desc, irq);
263 spin_unlock_irqrestore(&desc->lock, flags);
264 #ifdef CONFIG_HARDIRQS_SW_RESEND
266 * Do a bh disable/enable pair to trigger any pending
267 * irq resend logic:
269 local_bh_disable();
270 local_bh_enable();
271 #endif
273 EXPORT_SYMBOL(enable_irq);
275 static int set_irq_wake_real(unsigned int irq, unsigned int on)
277 struct irq_desc *desc = irq_to_desc(irq);
278 int ret = -ENXIO;
280 if (desc->chip->set_wake)
281 ret = desc->chip->set_wake(irq, on);
283 return ret;
287 * set_irq_wake - control irq power management wakeup
288 * @irq: interrupt to control
289 * @on: enable/disable power management wakeup
291 * Enable/disable power management wakeup mode, which is
292 * disabled by default. Enables and disables must match,
293 * just as they match for non-wakeup mode support.
295 * Wakeup mode lets this IRQ wake the system from sleep
296 * states like "suspend to RAM".
298 int set_irq_wake(unsigned int irq, unsigned int on)
300 struct irq_desc *desc = irq_to_desc(irq);
301 unsigned long flags;
302 int ret = 0;
304 /* wakeup-capable irqs can be shared between drivers that
305 * don't need to have the same sleep mode behaviors.
307 spin_lock_irqsave(&desc->lock, flags);
308 if (on) {
309 if (desc->wake_depth++ == 0) {
310 ret = set_irq_wake_real(irq, on);
311 if (ret)
312 desc->wake_depth = 0;
313 else
314 desc->status |= IRQ_WAKEUP;
316 } else {
317 if (desc->wake_depth == 0) {
318 WARN(1, "Unbalanced IRQ %d wake disable\n", irq);
319 } else if (--desc->wake_depth == 0) {
320 ret = set_irq_wake_real(irq, on);
321 if (ret)
322 desc->wake_depth = 1;
323 else
324 desc->status &= ~IRQ_WAKEUP;
328 spin_unlock_irqrestore(&desc->lock, flags);
329 return ret;
331 EXPORT_SYMBOL(set_irq_wake);
334 * If any action has IRQF_NODELAY then turn IRQ_NODELAY on:
336 void recalculate_desc_flags(struct irq_desc *desc)
338 struct irqaction *action;
340 desc->status &= ~IRQ_NODELAY;
341 for (action = desc->action ; action; action = action->next)
342 if (action->flags & IRQF_NODELAY)
343 desc->status |= IRQ_NODELAY;
346 static int start_irq_thread(int irq, struct irq_desc *desc);
349 * Internal function that tells the architecture code whether a
350 * particular irq has been exclusively allocated or is available
351 * for driver use.
353 int can_request_irq(unsigned int irq, unsigned long irqflags)
355 struct irq_desc *desc = irq_to_desc(irq);
356 struct irqaction *action;
358 if (!desc)
359 return 0;
361 if (desc->status & IRQ_NOREQUEST)
362 return 0;
364 action = desc->action;
365 if (action)
366 if (irqflags & action->flags & IRQF_SHARED)
367 action = NULL;
369 return !action;
372 void compat_irq_chip_set_default_handler(struct irq_desc *desc)
375 * If the architecture still has not overriden
376 * the flow handler then zap the default. This
377 * should catch incorrect flow-type setting.
379 if (desc->handle_irq == &handle_bad_irq)
380 desc->handle_irq = NULL;
383 int __irq_set_trigger(struct irq_desc *desc, unsigned int irq,
384 unsigned long flags)
386 int ret;
387 struct irq_chip *chip = desc->chip;
389 if (!chip || !chip->set_type) {
391 * IRQF_TRIGGER_* but the PIC does not support multiple
392 * flow-types?
394 pr_debug("No set_type function for IRQ %d (%s)\n", irq,
395 chip ? (chip->name ? : "unknown") : "unknown");
396 return 0;
399 /* caller masked out all except trigger mode flags */
400 ret = chip->set_type(irq, flags);
402 if (ret)
403 pr_err("setting trigger mode %d for irq %u failed (%pF)\n",
404 (int)flags, irq, chip->set_type);
405 else {
406 if (flags & (IRQ_TYPE_LEVEL_LOW | IRQ_TYPE_LEVEL_HIGH))
407 flags |= IRQ_LEVEL;
408 /* note that IRQF_TRIGGER_MASK == IRQ_TYPE_SENSE_MASK */
409 desc->status &= ~(IRQ_LEVEL | IRQ_TYPE_SENSE_MASK);
410 desc->status |= flags;
413 return ret;
417 * Internal function to register an irqaction - typically used to
418 * allocate special interrupts that are part of the architecture.
420 static int
421 __setup_irq(unsigned int irq, struct irq_desc *desc, struct irqaction *new)
423 struct irqaction *old, **old_ptr;
424 const char *old_name = NULL;
425 unsigned long flags;
426 int shared = 0;
427 int ret;
429 if (!desc)
430 return -EINVAL;
432 if (desc->chip == &no_irq_chip)
433 return -ENOSYS;
435 * Some drivers like serial.c use request_irq() heavily,
436 * so we have to be careful not to interfere with a
437 * running system.
439 if (new->flags & IRQF_SAMPLE_RANDOM) {
441 * This function might sleep, we want to call it first,
442 * outside of the atomic block.
443 * Yes, this might clear the entropy pool if the wrong
444 * driver is attempted to be loaded, without actually
445 * installing a new handler, but is this really a problem,
446 * only the sysadmin is able to do this.
448 rand_initialize_irq(irq);
451 if (!(new->flags & IRQF_NODELAY))
452 if (start_irq_thread(irq, desc))
453 return -ENOMEM;
455 * The following block of code has to be executed atomically
457 spin_lock_irqsave(&desc->lock, flags);
458 old_ptr = &desc->action;
459 old = *old_ptr;
460 if (old) {
462 * Can't share interrupts unless both agree to and are
463 * the same type (level, edge, polarity). So both flag
464 * fields must have IRQF_SHARED set and the bits which
465 * set the trigger type must match.
467 if (!((old->flags & new->flags) & IRQF_SHARED) ||
468 ((old->flags ^ new->flags) & IRQF_TRIGGER_MASK)) {
469 old_name = old->name;
470 goto mismatch;
473 #if defined(CONFIG_IRQ_PER_CPU)
474 /* All handlers must agree on per-cpuness */
475 if ((old->flags & IRQF_PERCPU) !=
476 (new->flags & IRQF_PERCPU))
477 goto mismatch;
478 #endif
480 /* add new interrupt at end of irq queue */
481 do {
482 old_ptr = &old->next;
483 old = *old_ptr;
484 } while (old);
485 shared = 1;
488 if (!shared) {
489 irq_chip_set_defaults(desc->chip);
491 /* Setup the type (level, edge polarity) if configured: */
492 if (new->flags & IRQF_TRIGGER_MASK) {
493 ret = __irq_set_trigger(desc, irq,
494 new->flags & IRQF_TRIGGER_MASK);
496 if (ret) {
497 spin_unlock_irqrestore(&desc->lock, flags);
498 return ret;
500 } else
501 compat_irq_chip_set_default_handler(desc);
502 #if defined(CONFIG_IRQ_PER_CPU)
503 if (new->flags & IRQF_PERCPU)
504 desc->status |= IRQ_PER_CPU;
505 #endif
507 desc->status &= ~(IRQ_AUTODETECT | IRQ_WAITING |
508 IRQ_INPROGRESS | IRQ_SPURIOUS_DISABLED);
510 if (!(desc->status & IRQ_NOAUTOEN)) {
511 desc->depth = 0;
512 desc->status &= ~IRQ_DISABLED;
513 desc->chip->startup(irq);
514 } else
515 /* Undo nested disables: */
516 desc->depth = 1;
518 /* Exclude IRQ from balancing if requested */
519 if (new->flags & IRQF_NOBALANCING)
520 desc->status |= IRQ_NO_BALANCING;
522 /* Set default affinity mask once everything is setup */
523 setup_affinity(irq, desc);
525 } else if ((new->flags & IRQF_TRIGGER_MASK)
526 && (new->flags & IRQF_TRIGGER_MASK)
527 != (desc->status & IRQ_TYPE_SENSE_MASK)) {
528 /* hope the handler works with the actual trigger mode... */
529 pr_warning("IRQ %d uses trigger mode %d; requested %d\n",
530 irq, (int)(desc->status & IRQ_TYPE_SENSE_MASK),
531 (int)(new->flags & IRQF_TRIGGER_MASK));
534 *old_ptr = new;
537 * Propagate any possible IRQF_NODELAY flag into IRQ_NODELAY:
539 recalculate_desc_flags(desc);
541 /* Reset broken irq detection when installing new handler */
542 desc->irq_count = 0;
543 desc->irqs_unhandled = 0;
544 init_waitqueue_head(&desc->wait_for_handler);
547 * Check whether we disabled the irq via the spurious handler
548 * before. Reenable it and give it another chance.
550 if (shared && (desc->status & IRQ_SPURIOUS_DISABLED)) {
551 desc->status &= ~IRQ_SPURIOUS_DISABLED;
552 __enable_irq(desc, irq);
555 spin_unlock_irqrestore(&desc->lock, flags);
557 new->irq = irq;
558 register_irq_proc(irq, desc);
559 new->dir = new->threaded = NULL;
560 register_handler_proc(irq, new);
562 return 0;
564 mismatch:
565 #ifdef CONFIG_DEBUG_SHIRQ
566 if (!(new->flags & IRQF_PROBE_SHARED)) {
567 printk(KERN_ERR "IRQ handler type mismatch for IRQ %d\n", irq);
568 if (old_name)
569 printk(KERN_ERR "current handler: %s\n", old_name);
570 dump_stack();
572 #endif
573 spin_unlock_irqrestore(&desc->lock, flags);
574 return -EBUSY;
578 * setup_irq - setup an interrupt
579 * @irq: Interrupt line to setup
580 * @act: irqaction for the interrupt
582 * Used to statically setup interrupts in the early boot process.
584 int setup_irq(unsigned int irq, struct irqaction *act)
586 struct irq_desc *desc = irq_to_desc(irq);
588 return __setup_irq(irq, desc, act);
590 EXPORT_SYMBOL_GPL(setup_irq);
593 * Internal function to unregister an irqaction - used to free
594 * regular and special interrupts that are part of the architecture.
596 static struct irqaction *__free_irq(unsigned int irq, void *dev_id)
598 struct irq_desc *desc = irq_to_desc(irq);
599 struct irqaction *action, **action_ptr;
600 unsigned long flags;
602 WARN(in_interrupt(), "Trying to free IRQ %d from IRQ context!\n", irq);
604 if (!desc)
605 return NULL;
607 spin_lock_irqsave(&desc->lock, flags);
610 * There can be multiple actions per IRQ descriptor, find the right
611 * one based on the dev_id:
613 action_ptr = &desc->action;
614 for (;;) {
615 action = *action_ptr;
617 if (!action) {
618 WARN(1, "Trying to free already-free IRQ %d\n", irq);
619 spin_unlock_irqrestore(&desc->lock, flags);
621 return NULL;
624 if (action->dev_id == dev_id)
625 break;
626 action_ptr = &action->next;
629 /* Found it - now remove it from the list of entries: */
630 *action_ptr = action->next;
632 /* Currently used only by UML, might disappear one day: */
633 #ifdef CONFIG_IRQ_RELEASE_METHOD
634 if (desc->chip->release)
635 desc->chip->release(irq, dev_id);
636 #endif
638 /* If this was the last handler, shut down the IRQ line: */
639 if (!desc->action) {
640 desc->status |= IRQ_DISABLED;
641 if (desc->chip->shutdown)
642 desc->chip->shutdown(irq);
643 else
644 desc->chip->disable(irq);
646 recalculate_desc_flags(desc);
647 spin_unlock_irqrestore(&desc->lock, flags);
649 unregister_handler_proc(irq, action);
651 /* Make sure it's not being used on another CPU: */
652 synchronize_irq(irq);
654 #ifdef CONFIG_DEBUG_SHIRQ
656 * It's a shared IRQ -- the driver ought to be prepared for an IRQ
657 * event to happen even now it's being freed, so let's make sure that
658 * is so by doing an extra call to the handler ....
660 * ( We do this after actually deregistering it, to make sure that a
661 * 'real' IRQ doesn't run in * parallel with our fake. )
663 if (action->flags & IRQF_SHARED) {
664 local_irq_save_nort(flags);
665 action->handler(irq, dev_id);
666 local_irq_restore_nort(flags);
668 #endif
669 return action;
673 * remove_irq - free an interrupt
674 * @irq: Interrupt line to free
675 * @act: irqaction for the interrupt
677 * Used to remove interrupts statically setup by the early boot process.
679 void remove_irq(unsigned int irq, struct irqaction *act)
681 __free_irq(irq, act->dev_id);
683 EXPORT_SYMBOL_GPL(remove_irq);
686 * free_irq - free an interrupt allocated with request_irq
687 * @irq: Interrupt line to free
688 * @dev_id: Device identity to free
690 * Remove an interrupt handler. The handler is removed and if the
691 * interrupt line is no longer in use by any driver it is disabled.
692 * On a shared IRQ the caller must ensure the interrupt is disabled
693 * on the card it drives before calling this function. The function
694 * does not return until any executing interrupts for this IRQ
695 * have completed.
697 * This function must not be called from interrupt context.
699 void free_irq(unsigned int irq, void *dev_id)
701 kfree(__free_irq(irq, dev_id));
703 EXPORT_SYMBOL(free_irq);
706 * request_irq - allocate an interrupt line
707 * @irq: Interrupt line to allocate
708 * @handler: Function to be called when the IRQ occurs
709 * @irqflags: Interrupt type flags
710 * @devname: An ascii name for the claiming device
711 * @dev_id: A cookie passed back to the handler function
713 * This call allocates interrupt resources and enables the
714 * interrupt line and IRQ handling. From the point this
715 * call is made your handler function may be invoked. Since
716 * your handler function must clear any interrupt the board
717 * raises, you must take care both to initialise your hardware
718 * and to set up the interrupt handler in the right order.
720 * Dev_id must be globally unique. Normally the address of the
721 * device data structure is used as the cookie. Since the handler
722 * receives this value it makes sense to use it.
724 * If your interrupt is shared you must pass a non NULL dev_id
725 * as this is required when freeing the interrupt.
727 * Flags:
729 * IRQF_SHARED Interrupt is shared
730 * IRQF_DISABLED Disable local interrupts while processing
731 * IRQF_SAMPLE_RANDOM The interrupt can be used for entropy
732 * IRQF_TRIGGER_* Specify active edge(s) or level
735 int request_irq(unsigned int irq, irq_handler_t handler,
736 unsigned long irqflags, const char *devname, void *dev_id)
738 struct irqaction *action;
739 struct irq_desc *desc;
740 int retval;
743 * handle_IRQ_event() always ignores IRQF_DISABLED except for
744 * the _first_ irqaction (sigh). That can cause oopsing, but
745 * the behavior is classified as "will not fix" so we need to
746 * start nudging drivers away from using that idiom.
748 if ((irqflags & (IRQF_SHARED|IRQF_DISABLED)) ==
749 (IRQF_SHARED|IRQF_DISABLED)) {
750 pr_warning(
751 "IRQ %d/%s: IRQF_DISABLED is not guaranteed on shared IRQs\n",
752 irq, devname);
755 #ifdef CONFIG_LOCKDEP
757 * Lockdep wants atomic interrupt handlers:
759 irqflags |= IRQF_DISABLED;
760 #endif
762 * Sanity-check: shared interrupts must pass in a real dev-ID,
763 * otherwise we'll have trouble later trying to figure out
764 * which interrupt is which (messes up the interrupt freeing
765 * logic etc).
767 if ((irqflags & IRQF_SHARED) && !dev_id)
768 return -EINVAL;
770 desc = irq_to_desc(irq);
771 if (!desc)
772 return -EINVAL;
774 if (desc->status & IRQ_NOREQUEST)
775 return -EINVAL;
776 if (!handler)
777 return -EINVAL;
779 action = kzalloc(sizeof(struct irqaction), GFP_KERNEL);
780 if (!action)
781 return -ENOMEM;
783 action->handler = handler;
784 action->flags = irqflags;
785 action->name = devname;
786 action->dev_id = dev_id;
788 retval = __setup_irq(irq, desc, action);
789 if (retval)
790 kfree(action);
792 #ifdef CONFIG_DEBUG_SHIRQ
793 if (irqflags & IRQF_SHARED) {
795 * It's a shared IRQ -- the driver ought to be prepared for it
796 * to happen immediately, so let's make sure....
797 * We disable the irq to make sure that a 'real' IRQ doesn't
798 * run in parallel with our fake.
800 unsigned long flags;
802 disable_irq(irq);
803 local_irq_save_nort(flags);
805 handler(irq, dev_id);
807 local_irq_restore_nort(flags);
808 enable_irq(irq);
810 #endif
811 return retval;
813 EXPORT_SYMBOL(request_irq);
815 #ifdef CONFIG_PREEMPT_HARDIRQS
817 int hardirq_preemption = 1;
819 EXPORT_SYMBOL(hardirq_preemption);
822 * Real-Time Preemption depends on hardirq threading:
824 #ifndef CONFIG_PREEMPT_RT
826 static int __init hardirq_preempt_setup (char *str)
828 if (!strncmp(str, "off", 3))
829 hardirq_preemption = 0;
830 else
831 get_option(&str, &hardirq_preemption);
832 if (!hardirq_preemption)
833 printk("turning off hardirq preemption!\n");
835 return 1;
838 __setup("hardirq-preempt=", hardirq_preempt_setup);
840 #endif
843 * threaded simple handler
845 static void thread_simple_irq(irq_desc_t *desc)
847 struct irqaction *action = desc->action;
848 unsigned int irq = desc->irq;
849 irqreturn_t action_ret;
851 do {
852 if (!action || desc->depth)
853 break;
854 desc->status &= ~IRQ_PENDING;
855 spin_unlock(&desc->lock);
856 action_ret = handle_IRQ_event(irq, action);
857 cond_resched_hardirq_context();
858 spin_lock_irq(&desc->lock);
859 if (!noirqdebug)
860 note_interrupt(irq, desc, action_ret);
861 } while (desc->status & IRQ_PENDING);
862 desc->status &= ~IRQ_INPROGRESS;
866 * threaded level type irq handler
868 static void thread_level_irq(irq_desc_t *desc)
870 unsigned int irq = desc->irq;
872 thread_simple_irq(desc);
873 if (!(desc->status & IRQ_DISABLED) && desc->chip->unmask)
874 desc->chip->unmask(irq);
878 * threaded fasteoi type irq handler
880 static void thread_fasteoi_irq(irq_desc_t *desc)
882 unsigned int irq = desc->irq;
884 thread_simple_irq(desc);
885 if (!(desc->status & IRQ_DISABLED) && desc->chip->unmask)
886 desc->chip->unmask(irq);
890 * threaded edge type IRQ handler
892 static void thread_edge_irq(irq_desc_t *desc)
894 unsigned int irq = desc->irq;
896 do {
897 struct irqaction *action = desc->action;
898 irqreturn_t action_ret;
900 if (unlikely(!action)) {
901 desc->status &= ~IRQ_INPROGRESS;
902 desc->chip->mask(irq);
903 return;
907 * When another irq arrived while we were handling
908 * one, we could have masked the irq.
909 * Renable it, if it was not disabled in meantime.
911 if (unlikely(((desc->status & (IRQ_PENDING | IRQ_MASKED)) ==
912 (IRQ_PENDING | IRQ_MASKED)) && !desc->depth))
913 desc->chip->unmask(irq);
915 desc->status &= ~IRQ_PENDING;
916 spin_unlock(&desc->lock);
917 action_ret = handle_IRQ_event(irq, action);
918 cond_resched_hardirq_context();
919 spin_lock_irq(&desc->lock);
920 if (!noirqdebug)
921 note_interrupt(irq, desc, action_ret);
922 } while ((desc->status & IRQ_PENDING) && !desc->depth);
924 desc->status &= ~IRQ_INPROGRESS;
928 * threaded edge type IRQ handler
930 static void thread_do_irq(irq_desc_t *desc)
932 unsigned int irq = desc->irq;
934 do {
935 struct irqaction *action = desc->action;
936 irqreturn_t action_ret;
938 if (unlikely(!action)) {
939 desc->status &= ~IRQ_INPROGRESS;
940 desc->chip->disable(irq);
941 return;
944 desc->status &= ~IRQ_PENDING;
945 spin_unlock(&desc->lock);
946 action_ret = handle_IRQ_event(irq, action);
947 cond_resched_hardirq_context();
948 spin_lock_irq(&desc->lock);
949 if (!noirqdebug)
950 note_interrupt(irq, desc, action_ret);
951 } while ((desc->status & IRQ_PENDING) && !desc->depth);
953 desc->status &= ~IRQ_INPROGRESS;
954 desc->chip->end(irq);
957 static void do_hardirq(struct irq_desc *desc)
959 unsigned long flags;
961 spin_lock_irqsave(&desc->lock, flags);
963 if (!(desc->status & IRQ_INPROGRESS))
964 goto out;
966 if (desc->handle_irq == handle_simple_irq)
967 thread_simple_irq(desc);
968 else if (desc->handle_irq == handle_level_irq)
969 thread_level_irq(desc);
970 else if (desc->handle_irq == handle_fasteoi_irq)
971 thread_fasteoi_irq(desc);
972 else if (desc->handle_irq == handle_edge_irq)
973 thread_edge_irq(desc);
974 else
975 thread_do_irq(desc);
976 out:
977 spin_unlock_irqrestore(&desc->lock, flags);
979 if (waitqueue_active(&desc->wait_for_handler))
980 wake_up(&desc->wait_for_handler);
983 extern asmlinkage void __do_softirq(void);
985 static int do_irqd(void * __desc)
987 struct sched_param param = { 0, };
988 struct irq_desc *desc = __desc;
990 #ifdef CONFIG_SMP
991 set_cpus_allowed_ptr(current, desc->affinity);
992 #endif
993 current->flags |= PF_NOFREEZE | PF_HARDIRQ;
996 * Set irq thread priority to SCHED_FIFO/50:
998 param.sched_priority = MAX_USER_RT_PRIO/2;
1000 sys_sched_setscheduler(current->pid, SCHED_FIFO, &param);
1002 while (!kthread_should_stop()) {
1003 local_irq_disable_nort();
1004 set_current_state(TASK_INTERRUPTIBLE);
1005 #ifndef CONFIG_PREEMPT_RT
1006 irq_enter();
1007 #endif
1008 do_hardirq(desc);
1009 #ifndef CONFIG_PREEMPT_RT
1010 irq_exit();
1011 #endif
1012 local_irq_enable_nort();
1013 cond_resched();
1014 #ifdef CONFIG_SMP
1016 * Did IRQ affinities change?
1018 if (!cpumask_equal(&current->cpus_allowed, desc->affinity))
1019 set_cpus_allowed_ptr(current, desc->affinity);
1020 #endif
1021 schedule();
1023 __set_current_state(TASK_RUNNING);
1025 return 0;
1028 static int ok_to_create_irq_threads;
1030 static int start_irq_thread(int irq, struct irq_desc *desc)
1032 if (desc->thread || !ok_to_create_irq_threads)
1033 return 0;
1035 init_waitqueue_head(&desc->wait_for_handler);
1037 desc->thread = kthread_create(do_irqd, desc, "IRQ-%d", irq);
1038 if (!desc->thread) {
1039 printk(KERN_ERR "irqd: could not create IRQ thread %d!\n", irq);
1040 return -ENOMEM;
1044 * An interrupt may have come in before the thread pointer was
1045 * stored in desc->thread; make sure the thread gets woken up in
1046 * such a case:
1048 smp_mb();
1049 wake_up_process(desc->thread);
1051 return 0;
1055 * Start hardirq threads for all IRQs that are registered already.
1057 * New ones will be started at the time of IRQ setup from now on.
1059 void __init init_hardirqs(void)
1061 struct irq_desc *desc;
1062 int irq;
1064 ok_to_create_irq_threads = 1;
1066 for_each_irq_desc(irq, desc) {
1067 if (desc->action && !(desc->status & IRQ_NODELAY))
1068 start_irq_thread(irq, desc);
1072 #else
1074 static int start_irq_thread(int irq, struct irq_desc *desc)
1076 return 0;
1079 #endif
1081 void __init early_init_hardirqs(void)
1083 struct irq_desc *desc;
1084 int i;
1086 for_each_irq_desc(i, desc)
1087 init_waitqueue_head(&desc->wait_for_handler);