2 * linux/kernel/softirq.c
4 * Copyright (C) 1992 Linus Torvalds
6 * Distribute under GPLv2.
8 * Rewritten. Old one was good in 2.2, but in 2.3 it was immoral. --ANK (990903)
10 * Remote softirq infrastructure is by Jens Axboe.
12 * Softirq-split implemetation by
13 * Copyright (C) 2005 Thomas Gleixner, Ingo Molnar
16 #include <linux/module.h>
17 #include <linux/kallsyms.h>
18 #include <linux/syscalls.h>
19 #include <linux/wait.h>
20 #include <linux/kernel_stat.h>
21 #include <linux/interrupt.h>
22 #include <linux/init.h>
23 #include <linux/delay.h>
25 #include <linux/notifier.h>
26 #include <linux/percpu.h>
27 #include <linux/delay.h>
28 #include <linux/cpu.h>
29 #include <linux/freezer.h>
30 #include <linux/kthread.h>
31 #include <linux/rcupdate.h>
32 #include <linux/ftrace.h>
33 #include <linux/smp.h>
34 #include <linux/tick.h>
35 #include <trace/irq.h>
39 - No shared variables, all the data are CPU local.
40 - If a softirq needs serialization, let it serialize itself
42 - Even if softirq is serialized, only local cpu is marked for
43 execution. Hence, we get something sort of weak cpu binding.
44 Though it is still not clear, will it result in better locality
48 - NET RX softirq. It is multithreaded and does not require
49 any global serialization.
50 - NET TX softirq. It kicks software netdevice queues, hence
51 it is logically serialized per device, but this serialization
52 is invisible to common code.
53 - Tasklets: serialized wrt itself.
56 #ifndef __ARCH_IRQ_STAT
57 irq_cpustat_t irq_stat
[NR_CPUS
] ____cacheline_aligned
;
58 EXPORT_SYMBOL(irq_stat
);
61 static struct softirq_action softirq_vec
[NR_SOFTIRQS
] __cacheline_aligned_in_smp
;
66 struct task_struct
*tsk
;
67 #ifdef CONFIG_PREEMPT_SOFTIRQS
68 wait_queue_head_t wait
;
73 static DEFINE_PER_CPU(struct softirqdata
[MAX_SOFTIRQ
], ksoftirqd
);
75 #ifdef CONFIG_PREEMPT_SOFTIRQS
77 * Preempting the softirq causes cases that would not be a
78 * problem when the softirq is not preempted. That is a
79 * process may have code to spin while waiting for a softirq
80 * to finish on another CPU. But if it happens that the
81 * process has preempted the softirq, this could cause a
84 void wait_for_softirq(int softirq
)
86 struct softirqdata
*data
= &__get_cpu_var(ksoftirqd
)[softirq
];
89 DECLARE_WAITQUEUE(wait
, current
);
91 set_current_state(TASK_UNINTERRUPTIBLE
);
92 add_wait_queue(&data
->wait
, &wait
);
95 remove_wait_queue(&data
->wait
, &wait
);
96 __set_current_state(TASK_RUNNING
);
101 char *softirq_to_name
[NR_SOFTIRQS
] = {
102 "HI", "TIMER", "NET_TX", "NET_RX", "BLOCK",
103 "TASKLET", "SCHED", "HRTIMER", "RCU"
107 * we cannot loop indefinitely here to avoid userspace starvation,
108 * but we also don't want to introduce a worst case 1/HZ latency
109 * to the pending events, so lets the scheduler to balance
110 * the softirq load for us.
112 static void wakeup_softirqd(int softirq
)
114 /* Interrupts are disabled: no need to stop preemption */
115 struct task_struct
*tsk
= __get_cpu_var(ksoftirqd
)[softirq
].tsk
;
117 if (tsk
&& tsk
->state
!= TASK_RUNNING
)
118 wake_up_process(tsk
);
122 * Wake up the softirq threads which have work
124 static void trigger_softirqs(void)
126 u32 pending
= local_softirq_pending();
131 wakeup_softirqd(curr
);
137 #ifndef CONFIG_PREEMPT_HARDIRQS
140 * This one is for softirq.c-internal use,
141 * where hardirqs are disabled legitimately:
143 #ifdef CONFIG_TRACE_IRQFLAGS
144 static void __local_bh_disable(unsigned long ip
)
148 WARN_ON_ONCE(in_irq());
150 raw_local_irq_save(flags
);
152 * The preempt tracer hooks into add_preempt_count and will break
153 * lockdep because it calls back into lockdep after SOFTIRQ_OFFSET
154 * is set and before current->softirq_enabled is cleared.
155 * We must manually increment preempt_count here and manually
156 * call the trace_preempt_off later.
158 preempt_count() += SOFTIRQ_OFFSET
;
160 * Were softirqs turned off above:
162 if (softirq_count() == SOFTIRQ_OFFSET
)
163 trace_softirqs_off(ip
);
164 raw_local_irq_restore(flags
);
166 if (preempt_count() == SOFTIRQ_OFFSET
)
167 trace_preempt_off(CALLER_ADDR0
, get_parent_ip(CALLER_ADDR1
));
169 #else /* !CONFIG_TRACE_IRQFLAGS */
170 static inline void __local_bh_disable(unsigned long ip
)
172 add_preempt_count(SOFTIRQ_OFFSET
);
175 #endif /* CONFIG_TRACE_IRQFLAGS */
177 void local_bh_disable(void)
179 __local_bh_disable((unsigned long)__builtin_return_address(0));
182 EXPORT_SYMBOL(local_bh_disable
);
185 * Special-case - softirqs can safely be enabled in
186 * cond_resched_softirq(), or by __do_softirq(),
187 * without processing still-pending softirqs:
189 void _local_bh_enable(void)
191 WARN_ON_ONCE(!irqs_disabled());
193 if (softirq_count() == SOFTIRQ_OFFSET
)
194 trace_softirqs_on((unsigned long)__builtin_return_address(0));
195 sub_preempt_count(SOFTIRQ_OFFSET
);
198 EXPORT_SYMBOL(_local_bh_enable
);
200 void local_bh_enable(void)
202 #ifdef CONFIG_TRACE_IRQFLAGS
205 WARN_ON_ONCE(in_irq());
208 #ifdef CONFIG_TRACE_IRQFLAGS
209 local_irq_save(flags
);
212 * Are softirqs going to be turned on now:
214 if (softirq_count() == SOFTIRQ_OFFSET
)
215 trace_softirqs_on((unsigned long)__builtin_return_address(0));
217 * Keep preemption disabled until we are done with
218 * softirq processing:
220 sub_preempt_count(SOFTIRQ_OFFSET
- 1);
222 if (unlikely(!in_interrupt() && local_softirq_pending()))
226 #ifdef CONFIG_TRACE_IRQFLAGS
227 local_irq_restore(flags
);
229 preempt_check_resched();
231 EXPORT_SYMBOL(local_bh_enable
);
233 void local_bh_enable_ip(unsigned long ip
)
235 #ifdef CONFIG_TRACE_IRQFLAGS
238 WARN_ON_ONCE(in_irq());
240 local_irq_save(flags
);
243 * Are softirqs going to be turned on now:
245 if (softirq_count() == SOFTIRQ_OFFSET
)
246 trace_softirqs_on(ip
);
248 * Keep preemption disabled until we are done with
249 * softirq processing:
251 sub_preempt_count(SOFTIRQ_OFFSET
- 1);
253 if (unlikely(!in_interrupt() && local_softirq_pending()))
257 #ifdef CONFIG_TRACE_IRQFLAGS
258 local_irq_restore(flags
);
260 preempt_check_resched();
262 EXPORT_SYMBOL(local_bh_enable_ip
);
267 * We restart softirq processing MAX_SOFTIRQ_RESTART times,
268 * and we fall back to softirqd after that.
270 * This number has been established via experimentation.
271 * The two things to balance is latency against fairness -
272 * we want to handle softirqs as soon as possible, but they
273 * should not be able to lock up the box.
275 #define MAX_SOFTIRQ_RESTART 20
277 DEFINE_TRACE(softirq_entry
);
278 DEFINE_TRACE(softirq_exit
);
280 static DEFINE_PER_CPU(u32
, softirq_running
);
283 * Debug check for leaking preempt counts in h->action handlers:
286 static inline void debug_check_preempt_count_start(__u32
*preempt_count
)
288 #ifdef CONFIG_DEBUG_PREEMPT
289 *preempt_count
= preempt_count();
294 debug_check_preempt_count_stop(__u32
*preempt_count
, struct softirq_action
*h
)
296 #ifdef CONFIG_DEBUG_PREEMPT
297 if (*preempt_count
== preempt_count())
300 print_symbol("BUG: %Ps exited with wrong preemption count!\n",
301 (unsigned long)h
->action
);
302 printk("=> enter: %08x, exit: %08x.\n", *preempt_count
, preempt_count());
303 preempt_count() = *preempt_count
;
308 * Execute softirq handlers:
310 static void ___do_softirq(const int same_prio_only
)
312 __u32 pending
, available_mask
, same_prio_skipped
, preempt_count
;
313 int max_restart
= MAX_SOFTIRQ_RESTART
;
314 struct softirq_action
*h
;
317 pending
= local_softirq_pending();
318 account_system_vtime(current
);
320 cpu
= smp_processor_id();
324 same_prio_skipped
= 0;
325 /* Reset the pending bitmask before enabling irqs */
326 set_softirq_pending(0);
331 u32 softirq_mask
= 1 << softirq
;
336 debug_check_preempt_count_start(&preempt_count
);
338 #if defined(CONFIG_PREEMPT_SOFTIRQS) && defined(CONFIG_PREEMPT_HARDIRQS)
340 * If executed by a same-prio hardirq thread
341 * then skip pending softirqs that belong
342 * to softirq threads with different priority:
344 if (same_prio_only
) {
345 struct task_struct
*tsk
;
347 tsk
= __get_cpu_var(ksoftirqd
)[softirq
].tsk
;
348 if (tsk
&& tsk
->normal_prio
!= current
->normal_prio
) {
349 same_prio_skipped
|= softirq_mask
;
350 available_mask
&= ~softirq_mask
;
356 * Is this softirq already being processed?
358 if (per_cpu(softirq_running
, cpu
) & softirq_mask
) {
359 available_mask
&= ~softirq_mask
;
362 per_cpu(softirq_running
, cpu
) |= softirq_mask
;
367 debug_check_preempt_count_stop(&preempt_count
, h
);
369 rcu_bh_qsctr_inc(cpu
);
370 cond_resched_softirq_context();
372 per_cpu(softirq_running
, cpu
) &= ~softirq_mask
;
379 or_softirq_pending(same_prio_skipped
);
380 pending
= local_softirq_pending();
381 if (pending
& available_mask
) {
390 asmlinkage
void __do_softirq(void)
392 #ifdef CONFIG_PREEMPT_SOFTIRQS
394 * 'preempt harder'. Push all softirq processing off to ksoftirqd.
396 if (softirq_preemption
) {
397 if (local_softirq_pending())
403 * 'immediate' softirq execution:
405 __local_bh_disable((unsigned long)__builtin_return_address(0));
406 lockdep_softirq_enter();
410 lockdep_softirq_exit();
412 account_system_vtime(current
);
417 #ifndef __ARCH_HAS_DO_SOFTIRQ
419 asmlinkage
void do_softirq(void)
427 local_irq_save(flags
);
429 pending
= local_softirq_pending();
434 local_irq_restore(flags
);
440 * Enter an interrupt context.
444 int cpu
= smp_processor_id();
447 if (idle_cpu(cpu
) && !in_interrupt()) {
449 tick_check_idle(cpu
);
454 #ifdef __ARCH_IRQ_EXIT_IRQS_DISABLED
455 # define invoke_softirq() __do_softirq()
457 # define invoke_softirq() do_softirq()
461 * Exit an interrupt context. Process softirqs if needed and possible:
465 account_system_vtime(current
);
466 trace_hardirq_exit();
467 sub_preempt_count(IRQ_EXIT_OFFSET
);
468 if (!in_interrupt() && local_softirq_pending())
472 /* Make sure that timer wheel updates are propagated */
474 if (idle_cpu(smp_processor_id()) && !in_interrupt() && !need_resched())
475 tick_nohz_stop_sched_tick(0);
477 __preempt_enable_no_resched();
481 * This function must run with irqs disabled!
483 inline void raise_softirq_irqoff(unsigned int nr
)
485 __do_raise_softirq_irqoff(nr
);
487 #ifdef CONFIG_PREEMPT_SOFTIRQS
492 void raise_softirq(unsigned int nr
)
496 local_irq_save(flags
);
497 raise_softirq_irqoff(nr
);
498 local_irq_restore(flags
);
501 void open_softirq(int nr
, void (*action
)(struct softirq_action
*))
503 softirq_vec
[nr
].action
= action
;
509 struct tasklet_struct
*head
;
510 struct tasklet_struct
**tail
;
513 static DEFINE_PER_CPU(struct tasklet_head
, tasklet_vec
);
514 static DEFINE_PER_CPU(struct tasklet_head
, tasklet_hi_vec
);
517 __tasklet_common_schedule(struct tasklet_struct
*t
, struct tasklet_head
*head
, unsigned int nr
)
519 if (tasklet_trylock(t
)) {
521 /* We may have been preempted before tasklet_trylock
522 * and __tasklet_action may have already run.
523 * So double check the sched bit while the takslet
524 * is locked before adding it to the list.
526 if (test_bit(TASKLET_STATE_SCHED
, &t
->state
)) {
529 head
->tail
= &(t
->next
);
530 raise_softirq_irqoff(nr
);
533 /* This is subtle. If we hit the corner case above
534 * It is possible that we get preempted right here,
535 * and another task has successfully called
536 * tasklet_schedule(), then this function, and
537 * failed on the trylock. Thus we must be sure
538 * before releasing the tasklet lock, that the
539 * SCHED_BIT is clear. Otherwise the tasklet
540 * may get its SCHED_BIT set, but not added to the
543 if (!tasklet_tryunlock(t
))
549 void __tasklet_schedule(struct tasklet_struct
*t
)
553 local_irq_save(flags
);
554 __tasklet_common_schedule(t
, &__get_cpu_var(tasklet_vec
), TASKLET_SOFTIRQ
);
555 local_irq_restore(flags
);
558 EXPORT_SYMBOL(__tasklet_schedule
);
560 void __tasklet_hi_schedule(struct tasklet_struct
*t
)
564 local_irq_save(flags
);
565 __tasklet_common_schedule(t
, &__get_cpu_var(tasklet_hi_vec
), HI_SOFTIRQ
);
566 local_irq_restore(flags
);
569 EXPORT_SYMBOL(__tasklet_hi_schedule
);
571 void __tasklet_hi_schedule_first(struct tasklet_struct
*t
)
573 __tasklet_hi_schedule(t
);
576 EXPORT_SYMBOL(__tasklet_hi_schedule_first
);
578 void tasklet_enable(struct tasklet_struct
*t
)
580 if (!atomic_dec_and_test(&t
->count
))
582 if (test_and_clear_bit(TASKLET_STATE_PENDING
, &t
->state
))
586 EXPORT_SYMBOL(tasklet_enable
);
588 void tasklet_hi_enable(struct tasklet_struct
*t
)
590 if (!atomic_dec_and_test(&t
->count
))
592 if (test_and_clear_bit(TASKLET_STATE_PENDING
, &t
->state
))
593 tasklet_hi_schedule(t
);
596 EXPORT_SYMBOL(tasklet_hi_enable
);
599 __tasklet_action(struct softirq_action
*a
, struct tasklet_struct
*list
)
604 struct tasklet_struct
*t
= list
;
609 * Should always succeed - after a tasklist got on the
610 * list (after getting the SCHED bit set from 0 to 1),
611 * nothing but the tasklet softirq it got queued to can
614 if (!tasklet_trylock(t
)) {
622 * If we cannot handle the tasklet because it's disabled,
623 * mark it as pending. tasklet_enable() will later
624 * re-schedule the tasklet.
626 if (unlikely(atomic_read(&t
->count
))) {
628 /* implicit unlock: */
630 t
->state
= TASKLET_STATEF_PENDING
;
635 * After this point on the tasklet might be rescheduled
636 * on another CPU, but it can only be added to another
637 * CPU's tasklet list if we unlock the tasklet (which we
640 if (!test_and_clear_bit(TASKLET_STATE_SCHED
, &t
->state
))
647 * Try to unlock the tasklet. We must use cmpxchg, because
648 * another CPU might have scheduled or disabled the tasklet.
649 * We only allow the STATE_RUN -> 0 transition here.
651 while (!tasklet_tryunlock(t
)) {
653 * If it got disabled meanwhile, bail out:
655 if (atomic_read(&t
->count
))
658 * If it got scheduled meanwhile, re-execute
659 * the tasklet function:
661 if (test_and_clear_bit(TASKLET_STATE_SCHED
, &t
->state
))
664 printk("hm, tasklet state: %08lx\n", t
->state
);
673 static void tasklet_action(struct softirq_action
*a
)
675 struct tasklet_struct
*list
;
678 list
= __get_cpu_var(tasklet_vec
).head
;
679 __get_cpu_var(tasklet_vec
).head
= NULL
;
680 __get_cpu_var(tasklet_vec
).tail
= &__get_cpu_var(tasklet_vec
).head
;
683 __tasklet_action(a
, list
);
686 static void tasklet_hi_action(struct softirq_action
*a
)
688 struct tasklet_struct
*list
;
691 list
= __get_cpu_var(tasklet_hi_vec
).head
;
692 __get_cpu_var(tasklet_hi_vec
).head
= NULL
;
693 __get_cpu_var(tasklet_hi_vec
).tail
= &__get_cpu_var(tasklet_hi_vec
).head
;
696 __tasklet_action(a
, list
);
700 void tasklet_init(struct tasklet_struct
*t
,
701 void (*func
)(unsigned long), unsigned long data
)
705 atomic_set(&t
->count
, 0);
710 EXPORT_SYMBOL(tasklet_init
);
712 void tasklet_kill(struct tasklet_struct
*t
)
715 printk("Attempt to kill tasklet from interrupt\n");
717 while (test_and_set_bit(TASKLET_STATE_SCHED
, &t
->state
)) {
720 while (test_bit(TASKLET_STATE_SCHED
, &t
->state
));
722 tasklet_unlock_wait(t
);
723 clear_bit(TASKLET_STATE_SCHED
, &t
->state
);
726 EXPORT_SYMBOL(tasklet_kill
);
728 DEFINE_PER_CPU(struct list_head
[NR_SOFTIRQS
], softirq_work_list
);
729 EXPORT_PER_CPU_SYMBOL(softirq_work_list
);
731 static void __local_trigger(struct call_single_data
*cp
, int softirq
)
733 struct list_head
*head
= &__get_cpu_var(softirq_work_list
[softirq
]);
735 list_add_tail(&cp
->list
, head
);
737 /* Trigger the softirq only if the list was previously empty. */
738 if (head
->next
== &cp
->list
)
739 raise_softirq_irqoff(softirq
);
742 #ifdef CONFIG_USE_GENERIC_SMP_HELPERS
743 static void remote_softirq_receive(void *data
)
745 struct call_single_data
*cp
= data
;
751 local_irq_save(flags
);
752 __local_trigger(cp
, softirq
);
753 local_irq_restore(flags
);
756 static int __try_remote_softirq(struct call_single_data
*cp
, int cpu
, int softirq
)
758 if (cpu_online(cpu
)) {
759 cp
->func
= remote_softirq_receive
;
764 __smp_call_function_single(cpu
, cp
, 0);
769 #else /* CONFIG_USE_GENERIC_SMP_HELPERS */
770 static int __try_remote_softirq(struct call_single_data
*cp
, int cpu
, int softirq
)
777 * __send_remote_softirq - try to schedule softirq work on a remote cpu
778 * @cp: private SMP call function data area
779 * @cpu: the remote cpu
780 * @this_cpu: the currently executing cpu
781 * @softirq: the softirq for the work
783 * Attempt to schedule softirq work on a remote cpu. If this cannot be
784 * done, the work is instead queued up on the local cpu.
786 * Interrupts must be disabled.
788 void __send_remote_softirq(struct call_single_data
*cp
, int cpu
, int this_cpu
, int softirq
)
790 if (cpu
== this_cpu
|| __try_remote_softirq(cp
, cpu
, softirq
))
791 __local_trigger(cp
, softirq
);
793 EXPORT_SYMBOL(__send_remote_softirq
);
796 * send_remote_softirq - try to schedule softirq work on a remote cpu
797 * @cp: private SMP call function data area
798 * @cpu: the remote cpu
799 * @softirq: the softirq for the work
801 * Like __send_remote_softirq except that disabling interrupts and
802 * computing the current cpu is done for the caller.
804 void send_remote_softirq(struct call_single_data
*cp
, int cpu
, int softirq
)
809 local_irq_save(flags
);
810 this_cpu
= smp_processor_id();
811 __send_remote_softirq(cp
, cpu
, this_cpu
, softirq
);
812 local_irq_restore(flags
);
814 EXPORT_SYMBOL(send_remote_softirq
);
816 static int __cpuinit
remote_softirq_cpu_notify(struct notifier_block
*self
,
817 unsigned long action
, void *hcpu
)
820 * If a CPU goes away, splice its entries to the current CPU
821 * and trigger a run of the softirq
823 if (action
== CPU_DEAD
|| action
== CPU_DEAD_FROZEN
) {
824 int cpu
= (unsigned long) hcpu
;
828 for (i
= 0; i
< NR_SOFTIRQS
; i
++) {
829 struct list_head
*head
= &per_cpu(softirq_work_list
[i
], cpu
);
830 struct list_head
*local_head
;
832 if (list_empty(head
))
835 local_head
= &__get_cpu_var(softirq_work_list
[i
]);
836 list_splice_init(head
, local_head
);
837 raise_softirq_irqoff(i
);
845 static struct notifier_block __cpuinitdata remote_softirq_cpu_notifier
= {
846 .notifier_call
= remote_softirq_cpu_notify
,
849 void __init
softirq_init(void)
853 for_each_possible_cpu(cpu
) {
856 per_cpu(tasklet_vec
, cpu
).tail
=
857 &per_cpu(tasklet_vec
, cpu
).head
;
858 per_cpu(tasklet_hi_vec
, cpu
).tail
=
859 &per_cpu(tasklet_hi_vec
, cpu
).head
;
860 for (i
= 0; i
< NR_SOFTIRQS
; i
++)
861 INIT_LIST_HEAD(&per_cpu(softirq_work_list
[i
], cpu
));
864 register_hotcpu_notifier(&remote_softirq_cpu_notifier
);
866 open_softirq(TASKLET_SOFTIRQ
, tasklet_action
);
867 open_softirq(HI_SOFTIRQ
, tasklet_hi_action
);
870 #if defined(CONFIG_SMP) || defined(CONFIG_PREEMPT_RT)
872 void tasklet_unlock_wait(struct tasklet_struct
*t
)
874 while (test_bit(TASKLET_STATE_RUN
, &(t
)->state
)) {
876 * Hack for now to avoid this busy-loop:
878 #ifdef CONFIG_PREEMPT_RT
885 EXPORT_SYMBOL(tasklet_unlock_wait
);
889 static int ksoftirqd(void * __data
)
891 /* Priority needs to be below hardirqs */
892 struct sched_param param
= { .sched_priority
= MAX_USER_RT_PRIO
/2 - 1};
893 struct softirqdata
*data
= __data
;
894 u32 softirq_mask
= (1 << data
->nr
);
895 struct softirq_action
*h
;
898 #ifdef CONFIG_PREEMPT_SOFTIRQS
899 init_waitqueue_head(&data
->wait
);
902 sys_sched_setscheduler(current
->pid
, SCHED_FIFO
, ¶m
);
903 current
->flags
|= PF_SOFTIRQ
;
904 set_current_state(TASK_INTERRUPTIBLE
);
906 while (!kthread_should_stop()) {
908 if (!(local_softirq_pending() & softirq_mask
)) {
910 __preempt_enable_no_resched();
915 __set_current_state(TASK_RUNNING
);
917 #ifdef CONFIG_PREEMPT_SOFTIRQS
921 while (local_softirq_pending() & softirq_mask
) {
922 /* Preempt disable stops cpu going offline.
923 If already offline, we'll be on wrong CPU:
925 if (cpu_is_offline(cpu
))
930 * Is the softirq already being executed by
933 if (per_cpu(softirq_running
, cpu
) & softirq_mask
) {
935 set_current_state(TASK_INTERRUPTIBLE
);
938 per_cpu(softirq_running
, cpu
) |= softirq_mask
;
939 __preempt_enable_no_resched();
940 set_softirq_pending(local_softirq_pending() & ~softirq_mask
);
944 h
= &softirq_vec
[data
->nr
];
947 rcu_bh_qsctr_inc(data
->cpu
);
950 per_cpu(softirq_running
, cpu
) &= ~softirq_mask
;
956 rcu_qsctr_inc(data
->cpu
);
959 set_current_state(TASK_INTERRUPTIBLE
);
960 #ifdef CONFIG_PREEMPT_SOFTIRQS
962 wake_up(&data
->wait
);
965 __set_current_state(TASK_RUNNING
);
970 /* Wait for kthread_stop */
971 set_current_state(TASK_INTERRUPTIBLE
);
972 while (!kthread_should_stop()) {
974 set_current_state(TASK_INTERRUPTIBLE
);
976 __set_current_state(TASK_RUNNING
);
980 #ifdef CONFIG_HOTPLUG_CPU
982 * tasklet_kill_immediate is called to remove a tasklet which can already be
983 * scheduled for execution on @cpu.
985 * Unlike tasklet_kill, this function removes the tasklet
986 * _immediately_, even if the tasklet is in TASKLET_STATE_SCHED state.
988 * When this function is called, @cpu must be in the CPU_DEAD state.
990 void tasklet_kill_immediate(struct tasklet_struct
*t
, unsigned int cpu
)
992 struct tasklet_struct
**i
;
994 BUG_ON(cpu_online(cpu
));
995 BUG_ON(test_bit(TASKLET_STATE_RUN
, &t
->state
));
997 if (!test_bit(TASKLET_STATE_SCHED
, &t
->state
))
1000 /* CPU is dead, so no lock needed. */
1001 for (i
= &per_cpu(tasklet_vec
, cpu
).head
; *i
; i
= &(*i
)->next
) {
1004 /* If this was the tail element, move the tail ptr */
1006 per_cpu(tasklet_vec
, cpu
).tail
= i
;
1013 void takeover_tasklets(unsigned int cpu
)
1015 /* CPU is dead, so no lock needed. */
1016 local_irq_disable();
1018 /* Find end, append list for that CPU. */
1019 if (&per_cpu(tasklet_vec
, cpu
).head
!= per_cpu(tasklet_vec
, cpu
).tail
) {
1020 *(__get_cpu_var(tasklet_vec
).tail
) = per_cpu(tasklet_vec
, cpu
).head
;
1021 __get_cpu_var(tasklet_vec
).tail
= per_cpu(tasklet_vec
, cpu
).tail
;
1022 per_cpu(tasklet_vec
, cpu
).head
= NULL
;
1023 per_cpu(tasklet_vec
, cpu
).tail
= &per_cpu(tasklet_vec
, cpu
).head
;
1025 raise_softirq_irqoff(TASKLET_SOFTIRQ
);
1027 if (&per_cpu(tasklet_hi_vec
, cpu
).head
!= per_cpu(tasklet_hi_vec
, cpu
).tail
) {
1028 *__get_cpu_var(tasklet_hi_vec
).tail
= per_cpu(tasklet_hi_vec
, cpu
).head
;
1029 __get_cpu_var(tasklet_hi_vec
).tail
= per_cpu(tasklet_hi_vec
, cpu
).tail
;
1030 per_cpu(tasklet_hi_vec
, cpu
).head
= NULL
;
1031 per_cpu(tasklet_hi_vec
, cpu
).tail
= &per_cpu(tasklet_hi_vec
, cpu
).head
;
1033 raise_softirq_irqoff(HI_SOFTIRQ
);
1037 #endif /* CONFIG_HOTPLUG_CPU */
1039 static const char *softirq_names
[] =
1041 [HI_SOFTIRQ
] = "high",
1042 [SCHED_SOFTIRQ
] = "sched",
1043 [TIMER_SOFTIRQ
] = "timer",
1044 [NET_TX_SOFTIRQ
] = "net-tx",
1045 [NET_RX_SOFTIRQ
] = "net-rx",
1046 [BLOCK_SOFTIRQ
] = "block",
1047 [TASKLET_SOFTIRQ
] = "tasklet",
1048 #ifdef CONFIG_HIGH_RES_TIMERS
1049 [HRTIMER_SOFTIRQ
] = "hrtimer",
1051 [RCU_SOFTIRQ
] = "rcu",
1054 static int __cpuinit
cpu_callback(struct notifier_block
*nfb
,
1055 unsigned long action
,
1058 int hotcpu
= (unsigned long)hcpu
, i
;
1059 struct task_struct
*p
;
1062 case CPU_UP_PREPARE
:
1063 case CPU_UP_PREPARE_FROZEN
:
1064 for (i
= 0; i
< MAX_SOFTIRQ
; i
++) {
1065 per_cpu(ksoftirqd
, hotcpu
)[i
].nr
= i
;
1066 per_cpu(ksoftirqd
, hotcpu
)[i
].cpu
= hotcpu
;
1067 per_cpu(ksoftirqd
, hotcpu
)[i
].tsk
= NULL
;
1069 for (i
= 0; i
< MAX_SOFTIRQ
; i
++) {
1070 p
= kthread_create(ksoftirqd
,
1071 &per_cpu(ksoftirqd
, hotcpu
)[i
],
1072 "sirq-%s/%d", softirq_names
[i
],
1075 printk("ksoftirqd %d for %i failed\n", i
,
1079 kthread_bind(p
, hotcpu
);
1080 per_cpu(ksoftirqd
, hotcpu
)[i
].tsk
= p
;
1085 case CPU_ONLINE_FROZEN
:
1086 for (i
= 0; i
< MAX_SOFTIRQ
; i
++)
1087 wake_up_process(per_cpu(ksoftirqd
, hotcpu
)[i
].tsk
);
1089 #ifdef CONFIG_HOTPLUG_CPU
1090 case CPU_UP_CANCELED
:
1091 case CPU_UP_CANCELED_FROZEN
:
1093 for (i
= 0; i
< MAX_SOFTIRQ
; i
++) {
1094 if (!per_cpu(ksoftirqd
, hotcpu
)[i
].tsk
)
1096 kthread_bind(per_cpu(ksoftirqd
, hotcpu
)[i
].tsk
,
1097 any_online_cpu(cpu_online_map
));
1101 case CPU_DEAD_FROZEN
: {
1102 struct sched_param param
;
1104 for (i
= 0; i
< MAX_SOFTIRQ
; i
++) {
1105 param
.sched_priority
= MAX_RT_PRIO
-1;
1106 p
= per_cpu(ksoftirqd
, hotcpu
)[i
].tsk
;
1107 sched_setscheduler(p
, SCHED_FIFO
, ¶m
);
1108 per_cpu(ksoftirqd
, hotcpu
)[i
].tsk
= NULL
;
1111 takeover_tasklets(hotcpu
);
1114 #endif /* CONFIG_HOTPLUG_CPU */
1119 static struct notifier_block __cpuinitdata cpu_nfb
= {
1120 .notifier_call
= cpu_callback
1123 static __init
int spawn_ksoftirqd(void)
1125 void *cpu
= (void *)(long)smp_processor_id();
1126 int err
= cpu_callback(&cpu_nfb
, CPU_UP_PREPARE
, cpu
);
1128 BUG_ON(err
== NOTIFY_BAD
);
1129 cpu_callback(&cpu_nfb
, CPU_ONLINE
, cpu
);
1130 register_cpu_notifier(&cpu_nfb
);
1133 early_initcall(spawn_ksoftirqd
);
1136 #ifdef CONFIG_PREEMPT_SOFTIRQS
1138 int softirq_preemption
= 1;
1140 EXPORT_SYMBOL(softirq_preemption
);
1143 * Real-Time Preemption depends on softirq threading:
1145 #ifndef CONFIG_PREEMPT_RT
1147 static int __init
softirq_preempt_setup (char *str
)
1149 if (!strncmp(str
, "off", 3))
1150 softirq_preemption
= 0;
1152 get_option(&str
, &softirq_preemption
);
1153 if (!softirq_preemption
)
1154 printk("turning off softirq preemption!\n");
1159 __setup("softirq-preempt=", softirq_preempt_setup
);
1165 * Call a function on all processors
1167 int on_each_cpu(void (*func
) (void *info
), void *info
, int wait
)
1172 ret
= smp_call_function(func
, info
, wait
);
1173 local_irq_disable();
1179 EXPORT_SYMBOL(on_each_cpu
);
1183 * [ These __weak aliases are kept in a separate compilation unit, so that
1184 * GCC does not inline them incorrectly. ]
1187 int __init __weak
early_irq_init(void)
1192 int __init __weak
arch_probe_nr_irqs(void)
1197 int __init __weak
arch_early_irq_init(void)
1202 int __weak
arch_init_chip_data(struct irq_desc
*desc
, int cpu
)