2 * linux/kernel/softirq.c
4 * Copyright (C) 1992 Linus Torvalds
6 * Rewritten. Old one was good in 2.2, but in 2.3 it was immoral. --ANK (990903)
9 #include <linux/module.h>
10 #include <linux/kernel_stat.h>
11 #include <linux/interrupt.h>
12 #include <linux/init.h>
14 #include <linux/notifier.h>
15 #include <linux/percpu.h>
16 #include <linux/cpu.h>
17 #include <linux/kthread.h>
18 #include <linux/rcupdate.h>
22 - No shared variables, all the data are CPU local.
23 - If a softirq needs serialization, let it serialize itself
25 - Even if softirq is serialized, only local cpu is marked for
26 execution. Hence, we get something sort of weak cpu binding.
27 Though it is still not clear, will it result in better locality
31 - NET RX softirq. It is multithreaded and does not require
32 any global serialization.
33 - NET TX softirq. It kicks software netdevice queues, hence
34 it is logically serialized per device, but this serialization
35 is invisible to common code.
36 - Tasklets: serialized wrt itself.
39 #ifndef __ARCH_IRQ_STAT
40 irq_cpustat_t irq_stat
[NR_CPUS
] ____cacheline_aligned
;
41 EXPORT_SYMBOL(irq_stat
);
44 static struct softirq_action softirq_vec
[32] __cacheline_aligned_in_smp
;
46 static DEFINE_PER_CPU(struct task_struct
*, ksoftirqd
);
49 * we cannot loop indefinitely here to avoid userspace starvation,
50 * but we also don't want to introduce a worst case 1/HZ latency
51 * to the pending events, so lets the scheduler to balance
52 * the softirq load for us.
54 static inline void wakeup_softirqd(void)
56 /* Interrupts are disabled: no need to stop preemption */
57 struct task_struct
*tsk
= __get_cpu_var(ksoftirqd
);
59 if (tsk
&& tsk
->state
!= TASK_RUNNING
)
64 * We restart softirq processing MAX_SOFTIRQ_RESTART times,
65 * and we fall back to softirqd after that.
67 * This number has been established via experimentation.
68 * The two things to balance is latency against fairness -
69 * we want to handle softirqs as soon as possible, but they
70 * should not be able to lock up the box.
72 #define MAX_SOFTIRQ_RESTART 10
74 asmlinkage
void __do_softirq(void)
76 struct softirq_action
*h
;
78 int max_restart
= MAX_SOFTIRQ_RESTART
;
81 pending
= local_softirq_pending();
84 cpu
= smp_processor_id();
86 /* Reset the pending bitmask before enabling irqs */
87 local_softirq_pending() = 0;
96 rcu_bh_qsctr_inc(cpu
);
104 pending
= local_softirq_pending();
105 if (pending
&& --max_restart
)
114 #ifndef __ARCH_HAS_DO_SOFTIRQ
116 asmlinkage
void do_softirq(void)
124 local_irq_save(flags
);
126 pending
= local_softirq_pending();
131 local_irq_restore(flags
);
134 EXPORT_SYMBOL(do_softirq
);
138 void local_bh_enable(void)
141 WARN_ON(irqs_disabled());
142 if (unlikely(!in_interrupt() &&
143 local_softirq_pending()))
145 preempt_check_resched();
147 EXPORT_SYMBOL(local_bh_enable
);
150 * This function must run with irqs disabled!
152 inline fastcall
void raise_softirq_irqoff(unsigned int nr
)
154 __raise_softirq_irqoff(nr
);
157 * If we're in an interrupt or softirq, we're done
158 * (this also catches softirq-disabled code). We will
159 * actually run the softirq once we return from
160 * the irq or softirq.
162 * Otherwise we wake up ksoftirqd to make sure we
163 * schedule the softirq soon.
169 EXPORT_SYMBOL(raise_softirq_irqoff
);
171 void fastcall
raise_softirq(unsigned int nr
)
175 local_irq_save(flags
);
176 raise_softirq_irqoff(nr
);
177 local_irq_restore(flags
);
180 EXPORT_SYMBOL(raise_softirq
);
182 void open_softirq(int nr
, void (*action
)(struct softirq_action
*), void *data
)
184 softirq_vec
[nr
].data
= data
;
185 softirq_vec
[nr
].action
= action
;
188 EXPORT_SYMBOL(open_softirq
);
193 struct tasklet_struct
*list
;
196 /* Some compilers disobey section attribute on statics when not
198 static DEFINE_PER_CPU(struct tasklet_head
, tasklet_vec
) = { NULL
};
199 static DEFINE_PER_CPU(struct tasklet_head
, tasklet_hi_vec
) = { NULL
};
201 void fastcall
__tasklet_schedule(struct tasklet_struct
*t
)
205 local_irq_save(flags
);
206 t
->next
= __get_cpu_var(tasklet_vec
).list
;
207 __get_cpu_var(tasklet_vec
).list
= t
;
208 raise_softirq_irqoff(TASKLET_SOFTIRQ
);
209 local_irq_restore(flags
);
212 EXPORT_SYMBOL(__tasklet_schedule
);
214 void fastcall
__tasklet_hi_schedule(struct tasklet_struct
*t
)
218 local_irq_save(flags
);
219 t
->next
= __get_cpu_var(tasklet_hi_vec
).list
;
220 __get_cpu_var(tasklet_hi_vec
).list
= t
;
221 raise_softirq_irqoff(HI_SOFTIRQ
);
222 local_irq_restore(flags
);
225 EXPORT_SYMBOL(__tasklet_hi_schedule
);
227 static void tasklet_action(struct softirq_action
*a
)
229 struct tasklet_struct
*list
;
232 list
= __get_cpu_var(tasklet_vec
).list
;
233 __get_cpu_var(tasklet_vec
).list
= NULL
;
237 struct tasklet_struct
*t
= list
;
241 if (tasklet_trylock(t
)) {
242 if (!atomic_read(&t
->count
)) {
243 if (!test_and_clear_bit(TASKLET_STATE_SCHED
, &t
->state
))
253 t
->next
= __get_cpu_var(tasklet_vec
).list
;
254 __get_cpu_var(tasklet_vec
).list
= t
;
255 __raise_softirq_irqoff(TASKLET_SOFTIRQ
);
260 static void tasklet_hi_action(struct softirq_action
*a
)
262 struct tasklet_struct
*list
;
265 list
= __get_cpu_var(tasklet_hi_vec
).list
;
266 __get_cpu_var(tasklet_hi_vec
).list
= NULL
;
270 struct tasklet_struct
*t
= list
;
274 if (tasklet_trylock(t
)) {
275 if (!atomic_read(&t
->count
)) {
276 if (!test_and_clear_bit(TASKLET_STATE_SCHED
, &t
->state
))
286 t
->next
= __get_cpu_var(tasklet_hi_vec
).list
;
287 __get_cpu_var(tasklet_hi_vec
).list
= t
;
288 __raise_softirq_irqoff(HI_SOFTIRQ
);
294 void tasklet_init(struct tasklet_struct
*t
,
295 void (*func
)(unsigned long), unsigned long data
)
299 atomic_set(&t
->count
, 0);
304 EXPORT_SYMBOL(tasklet_init
);
306 void tasklet_kill(struct tasklet_struct
*t
)
309 printk("Attempt to kill tasklet from interrupt\n");
311 while (test_and_set_bit(TASKLET_STATE_SCHED
, &t
->state
)) {
314 while (test_bit(TASKLET_STATE_SCHED
, &t
->state
));
316 tasklet_unlock_wait(t
);
317 clear_bit(TASKLET_STATE_SCHED
, &t
->state
);
320 EXPORT_SYMBOL(tasklet_kill
);
322 void __init
softirq_init(void)
324 open_softirq(TASKLET_SOFTIRQ
, tasklet_action
, NULL
);
325 open_softirq(HI_SOFTIRQ
, tasklet_hi_action
, NULL
);
328 static int ksoftirqd(void * __bind_cpu
)
330 set_user_nice(current
, 19);
331 current
->flags
|= PF_NOFREEZE
;
333 set_current_state(TASK_INTERRUPTIBLE
);
335 while (!kthread_should_stop()) {
336 if (!local_softirq_pending())
339 __set_current_state(TASK_RUNNING
);
341 while (local_softirq_pending()) {
342 /* Preempt disable stops cpu going offline.
343 If already offline, we'll be on wrong CPU:
346 if (cpu_is_offline((long)__bind_cpu
))
353 set_current_state(TASK_INTERRUPTIBLE
);
355 __set_current_state(TASK_RUNNING
);
360 /* Wait for kthread_stop */
361 set_current_state(TASK_INTERRUPTIBLE
);
362 while (!kthread_should_stop()) {
364 set_current_state(TASK_INTERRUPTIBLE
);
366 __set_current_state(TASK_RUNNING
);
370 #ifdef CONFIG_HOTPLUG_CPU
372 * tasklet_kill_immediate is called to remove a tasklet which can already be
373 * scheduled for execution on @cpu.
375 * Unlike tasklet_kill, this function removes the tasklet
376 * _immediately_, even if the tasklet is in TASKLET_STATE_SCHED state.
378 * When this function is called, @cpu must be in the CPU_DEAD state.
380 void tasklet_kill_immediate(struct tasklet_struct
*t
, unsigned int cpu
)
382 struct tasklet_struct
**i
;
384 BUG_ON(cpu_online(cpu
));
385 BUG_ON(test_bit(TASKLET_STATE_RUN
, &t
->state
));
387 if (!test_bit(TASKLET_STATE_SCHED
, &t
->state
))
390 /* CPU is dead, so no lock needed. */
391 for (i
= &per_cpu(tasklet_vec
, cpu
).list
; *i
; i
= &(*i
)->next
) {
400 static void takeover_tasklets(unsigned int cpu
)
402 struct tasklet_struct
**i
;
404 /* CPU is dead, so no lock needed. */
407 /* Find end, append list for that CPU. */
408 for (i
= &__get_cpu_var(tasklet_vec
).list
; *i
; i
= &(*i
)->next
);
409 *i
= per_cpu(tasklet_vec
, cpu
).list
;
410 per_cpu(tasklet_vec
, cpu
).list
= NULL
;
411 raise_softirq_irqoff(TASKLET_SOFTIRQ
);
413 for (i
= &__get_cpu_var(tasklet_hi_vec
).list
; *i
; i
= &(*i
)->next
);
414 *i
= per_cpu(tasklet_hi_vec
, cpu
).list
;
415 per_cpu(tasklet_hi_vec
, cpu
).list
= NULL
;
416 raise_softirq_irqoff(HI_SOFTIRQ
);
420 #endif /* CONFIG_HOTPLUG_CPU */
422 static int __devinit
cpu_callback(struct notifier_block
*nfb
,
423 unsigned long action
,
426 int hotcpu
= (unsigned long)hcpu
;
427 struct task_struct
*p
;
431 BUG_ON(per_cpu(tasklet_vec
, hotcpu
).list
);
432 BUG_ON(per_cpu(tasklet_hi_vec
, hotcpu
).list
);
433 p
= kthread_create(ksoftirqd
, hcpu
, "ksoftirqd/%d", hotcpu
);
435 printk("ksoftirqd for %i failed\n", hotcpu
);
438 kthread_bind(p
, hotcpu
);
439 per_cpu(ksoftirqd
, hotcpu
) = p
;
442 wake_up_process(per_cpu(ksoftirqd
, hotcpu
));
444 #ifdef CONFIG_HOTPLUG_CPU
445 case CPU_UP_CANCELED
:
446 /* Unbind so it can run. Fall thru. */
447 kthread_bind(per_cpu(ksoftirqd
, hotcpu
), smp_processor_id());
449 p
= per_cpu(ksoftirqd
, hotcpu
);
450 per_cpu(ksoftirqd
, hotcpu
) = NULL
;
452 takeover_tasklets(hotcpu
);
454 #endif /* CONFIG_HOTPLUG_CPU */
459 static struct notifier_block __devinitdata cpu_nfb
= {
460 .notifier_call
= cpu_callback
463 __init
int spawn_ksoftirqd(void)
465 void *cpu
= (void *)(long)smp_processor_id();
466 cpu_callback(&cpu_nfb
, CPU_UP_PREPARE
, cpu
);
467 cpu_callback(&cpu_nfb
, CPU_ONLINE
, cpu
);
468 register_cpu_notifier(&cpu_nfb
);