MOXA linux-2.6.x / linux-2.6.9-uc0 from sdlinux-moxaart.tgz
[linux-2.6.9-moxart.git] / kernel / softirq.c
blob4a3da9be9f260474d744ad938c5ec07f9b687413
1 /*
2 * linux/kernel/softirq.c
4 * Copyright (C) 1992 Linus Torvalds
6 * Rewritten. Old one was good in 2.2, but in 2.3 it was immoral. --ANK (990903)
7 */
9 #include <linux/module.h>
10 #include <linux/kernel_stat.h>
11 #include <linux/interrupt.h>
12 #include <linux/init.h>
13 #include <linux/mm.h>
14 #include <linux/notifier.h>
15 #include <linux/percpu.h>
16 #include <linux/cpu.h>
17 #include <linux/kthread.h>
18 #include <linux/rcupdate.h>
20 #include <asm/irq.h>
22 - No shared variables, all the data are CPU local.
23 - If a softirq needs serialization, let it serialize itself
24 by its own spinlocks.
25 - Even if softirq is serialized, only local cpu is marked for
26 execution. Hence, we get something sort of weak cpu binding.
27 Though it is still not clear, will it result in better locality
28 or will not.
30 Examples:
31 - NET RX softirq. It is multithreaded and does not require
32 any global serialization.
33 - NET TX softirq. It kicks software netdevice queues, hence
34 it is logically serialized per device, but this serialization
35 is invisible to common code.
36 - Tasklets: serialized wrt itself.
39 #ifndef __ARCH_IRQ_STAT
40 irq_cpustat_t irq_stat[NR_CPUS] ____cacheline_aligned;
41 EXPORT_SYMBOL(irq_stat);
42 #endif
44 static struct softirq_action softirq_vec[32] __cacheline_aligned_in_smp;
46 static DEFINE_PER_CPU(struct task_struct *, ksoftirqd);
49 * we cannot loop indefinitely here to avoid userspace starvation,
50 * but we also don't want to introduce a worst case 1/HZ latency
51 * to the pending events, so lets the scheduler to balance
52 * the softirq load for us.
54 static inline void wakeup_softirqd(void)
56 /* Interrupts are disabled: no need to stop preemption */
57 struct task_struct *tsk = __get_cpu_var(ksoftirqd);
59 if (tsk && tsk->state != TASK_RUNNING)
60 wake_up_process(tsk);
64 * We restart softirq processing MAX_SOFTIRQ_RESTART times,
65 * and we fall back to softirqd after that.
67 * This number has been established via experimentation.
68 * The two things to balance is latency against fairness -
69 * we want to handle softirqs as soon as possible, but they
70 * should not be able to lock up the box.
72 #define MAX_SOFTIRQ_RESTART 10
74 asmlinkage void __do_softirq(void)
76 struct softirq_action *h;
77 __u32 pending;
78 int max_restart = MAX_SOFTIRQ_RESTART;
79 int cpu;
81 pending = local_softirq_pending();
83 local_bh_disable();
84 cpu = smp_processor_id();
85 restart:
86 /* Reset the pending bitmask before enabling irqs */
87 local_softirq_pending() = 0;
89 local_irq_enable();
91 h = softirq_vec;
93 do {
94 if (pending & 1) {
95 h->action(h);
96 rcu_bh_qsctr_inc(cpu);
98 h++;
99 pending >>= 1;
100 } while (pending);
102 local_irq_disable();
104 pending = local_softirq_pending();
105 if (pending && --max_restart)
106 goto restart;
108 if (pending)
109 wakeup_softirqd();
111 __local_bh_enable();
114 #ifndef __ARCH_HAS_DO_SOFTIRQ
116 asmlinkage void do_softirq(void)
118 __u32 pending;
119 unsigned long flags;
121 if (in_interrupt())
122 return;
124 local_irq_save(flags);
126 pending = local_softirq_pending();
128 if (pending)
129 __do_softirq();
131 local_irq_restore(flags);
134 EXPORT_SYMBOL(do_softirq);
136 #endif
138 void local_bh_enable(void)
140 __local_bh_enable();
141 WARN_ON(irqs_disabled());
142 if (unlikely(!in_interrupt() &&
143 local_softirq_pending()))
144 invoke_softirq();
145 preempt_check_resched();
147 EXPORT_SYMBOL(local_bh_enable);
150 * This function must run with irqs disabled!
152 inline fastcall void raise_softirq_irqoff(unsigned int nr)
154 __raise_softirq_irqoff(nr);
157 * If we're in an interrupt or softirq, we're done
158 * (this also catches softirq-disabled code). We will
159 * actually run the softirq once we return from
160 * the irq or softirq.
162 * Otherwise we wake up ksoftirqd to make sure we
163 * schedule the softirq soon.
165 if (!in_interrupt())
166 wakeup_softirqd();
169 EXPORT_SYMBOL(raise_softirq_irqoff);
171 void fastcall raise_softirq(unsigned int nr)
173 unsigned long flags;
175 local_irq_save(flags);
176 raise_softirq_irqoff(nr);
177 local_irq_restore(flags);
180 EXPORT_SYMBOL(raise_softirq);
182 void open_softirq(int nr, void (*action)(struct softirq_action*), void *data)
184 softirq_vec[nr].data = data;
185 softirq_vec[nr].action = action;
188 EXPORT_SYMBOL(open_softirq);
190 /* Tasklets */
191 struct tasklet_head
193 struct tasklet_struct *list;
196 /* Some compilers disobey section attribute on statics when not
197 initialized -- RR */
198 static DEFINE_PER_CPU(struct tasklet_head, tasklet_vec) = { NULL };
199 static DEFINE_PER_CPU(struct tasklet_head, tasklet_hi_vec) = { NULL };
201 void fastcall __tasklet_schedule(struct tasklet_struct *t)
203 unsigned long flags;
205 local_irq_save(flags);
206 t->next = __get_cpu_var(tasklet_vec).list;
207 __get_cpu_var(tasklet_vec).list = t;
208 raise_softirq_irqoff(TASKLET_SOFTIRQ);
209 local_irq_restore(flags);
212 EXPORT_SYMBOL(__tasklet_schedule);
214 void fastcall __tasklet_hi_schedule(struct tasklet_struct *t)
216 unsigned long flags;
218 local_irq_save(flags);
219 t->next = __get_cpu_var(tasklet_hi_vec).list;
220 __get_cpu_var(tasklet_hi_vec).list = t;
221 raise_softirq_irqoff(HI_SOFTIRQ);
222 local_irq_restore(flags);
225 EXPORT_SYMBOL(__tasklet_hi_schedule);
227 static void tasklet_action(struct softirq_action *a)
229 struct tasklet_struct *list;
231 local_irq_disable();
232 list = __get_cpu_var(tasklet_vec).list;
233 __get_cpu_var(tasklet_vec).list = NULL;
234 local_irq_enable();
236 while (list) {
237 struct tasklet_struct *t = list;
239 list = list->next;
241 if (tasklet_trylock(t)) {
242 if (!atomic_read(&t->count)) {
243 if (!test_and_clear_bit(TASKLET_STATE_SCHED, &t->state))
244 BUG();
245 t->func(t->data);
246 tasklet_unlock(t);
247 continue;
249 tasklet_unlock(t);
252 local_irq_disable();
253 t->next = __get_cpu_var(tasklet_vec).list;
254 __get_cpu_var(tasklet_vec).list = t;
255 __raise_softirq_irqoff(TASKLET_SOFTIRQ);
256 local_irq_enable();
260 static void tasklet_hi_action(struct softirq_action *a)
262 struct tasklet_struct *list;
264 local_irq_disable();
265 list = __get_cpu_var(tasklet_hi_vec).list;
266 __get_cpu_var(tasklet_hi_vec).list = NULL;
267 local_irq_enable();
269 while (list) {
270 struct tasklet_struct *t = list;
272 list = list->next;
274 if (tasklet_trylock(t)) {
275 if (!atomic_read(&t->count)) {
276 if (!test_and_clear_bit(TASKLET_STATE_SCHED, &t->state))
277 BUG();
278 t->func(t->data);
279 tasklet_unlock(t);
280 continue;
282 tasklet_unlock(t);
285 local_irq_disable();
286 t->next = __get_cpu_var(tasklet_hi_vec).list;
287 __get_cpu_var(tasklet_hi_vec).list = t;
288 __raise_softirq_irqoff(HI_SOFTIRQ);
289 local_irq_enable();
294 void tasklet_init(struct tasklet_struct *t,
295 void (*func)(unsigned long), unsigned long data)
297 t->next = NULL;
298 t->state = 0;
299 atomic_set(&t->count, 0);
300 t->func = func;
301 t->data = data;
304 EXPORT_SYMBOL(tasklet_init);
306 void tasklet_kill(struct tasklet_struct *t)
308 if (in_interrupt())
309 printk("Attempt to kill tasklet from interrupt\n");
311 while (test_and_set_bit(TASKLET_STATE_SCHED, &t->state)) {
313 yield();
314 while (test_bit(TASKLET_STATE_SCHED, &t->state));
316 tasklet_unlock_wait(t);
317 clear_bit(TASKLET_STATE_SCHED, &t->state);
320 EXPORT_SYMBOL(tasklet_kill);
322 void __init softirq_init(void)
324 open_softirq(TASKLET_SOFTIRQ, tasklet_action, NULL);
325 open_softirq(HI_SOFTIRQ, tasklet_hi_action, NULL);
328 static int ksoftirqd(void * __bind_cpu)
330 set_user_nice(current, 19);
331 current->flags |= PF_NOFREEZE;
333 set_current_state(TASK_INTERRUPTIBLE);
335 while (!kthread_should_stop()) {
336 if (!local_softirq_pending())
337 schedule();
339 __set_current_state(TASK_RUNNING);
341 while (local_softirq_pending()) {
342 /* Preempt disable stops cpu going offline.
343 If already offline, we'll be on wrong CPU:
344 don't process */
345 preempt_disable();
346 if (cpu_is_offline((long)__bind_cpu))
347 goto wait_to_die;
348 do_softirq();
349 preempt_enable();
350 cond_resched();
353 set_current_state(TASK_INTERRUPTIBLE);
355 __set_current_state(TASK_RUNNING);
356 return 0;
358 wait_to_die:
359 preempt_enable();
360 /* Wait for kthread_stop */
361 set_current_state(TASK_INTERRUPTIBLE);
362 while (!kthread_should_stop()) {
363 schedule();
364 set_current_state(TASK_INTERRUPTIBLE);
366 __set_current_state(TASK_RUNNING);
367 return 0;
370 #ifdef CONFIG_HOTPLUG_CPU
372 * tasklet_kill_immediate is called to remove a tasklet which can already be
373 * scheduled for execution on @cpu.
375 * Unlike tasklet_kill, this function removes the tasklet
376 * _immediately_, even if the tasklet is in TASKLET_STATE_SCHED state.
378 * When this function is called, @cpu must be in the CPU_DEAD state.
380 void tasklet_kill_immediate(struct tasklet_struct *t, unsigned int cpu)
382 struct tasklet_struct **i;
384 BUG_ON(cpu_online(cpu));
385 BUG_ON(test_bit(TASKLET_STATE_RUN, &t->state));
387 if (!test_bit(TASKLET_STATE_SCHED, &t->state))
388 return;
390 /* CPU is dead, so no lock needed. */
391 for (i = &per_cpu(tasklet_vec, cpu).list; *i; i = &(*i)->next) {
392 if (*i == t) {
393 *i = t->next;
394 return;
397 BUG();
400 static void takeover_tasklets(unsigned int cpu)
402 struct tasklet_struct **i;
404 /* CPU is dead, so no lock needed. */
405 local_irq_disable();
407 /* Find end, append list for that CPU. */
408 for (i = &__get_cpu_var(tasklet_vec).list; *i; i = &(*i)->next);
409 *i = per_cpu(tasklet_vec, cpu).list;
410 per_cpu(tasklet_vec, cpu).list = NULL;
411 raise_softirq_irqoff(TASKLET_SOFTIRQ);
413 for (i = &__get_cpu_var(tasklet_hi_vec).list; *i; i = &(*i)->next);
414 *i = per_cpu(tasklet_hi_vec, cpu).list;
415 per_cpu(tasklet_hi_vec, cpu).list = NULL;
416 raise_softirq_irqoff(HI_SOFTIRQ);
418 local_irq_enable();
420 #endif /* CONFIG_HOTPLUG_CPU */
422 static int __devinit cpu_callback(struct notifier_block *nfb,
423 unsigned long action,
424 void *hcpu)
426 int hotcpu = (unsigned long)hcpu;
427 struct task_struct *p;
429 switch (action) {
430 case CPU_UP_PREPARE:
431 BUG_ON(per_cpu(tasklet_vec, hotcpu).list);
432 BUG_ON(per_cpu(tasklet_hi_vec, hotcpu).list);
433 p = kthread_create(ksoftirqd, hcpu, "ksoftirqd/%d", hotcpu);
434 if (IS_ERR(p)) {
435 printk("ksoftirqd for %i failed\n", hotcpu);
436 return NOTIFY_BAD;
438 kthread_bind(p, hotcpu);
439 per_cpu(ksoftirqd, hotcpu) = p;
440 break;
441 case CPU_ONLINE:
442 wake_up_process(per_cpu(ksoftirqd, hotcpu));
443 break;
444 #ifdef CONFIG_HOTPLUG_CPU
445 case CPU_UP_CANCELED:
446 /* Unbind so it can run. Fall thru. */
447 kthread_bind(per_cpu(ksoftirqd, hotcpu), smp_processor_id());
448 case CPU_DEAD:
449 p = per_cpu(ksoftirqd, hotcpu);
450 per_cpu(ksoftirqd, hotcpu) = NULL;
451 kthread_stop(p);
452 takeover_tasklets(hotcpu);
453 break;
454 #endif /* CONFIG_HOTPLUG_CPU */
456 return NOTIFY_OK;
459 static struct notifier_block __devinitdata cpu_nfb = {
460 .notifier_call = cpu_callback
463 __init int spawn_ksoftirqd(void)
465 void *cpu = (void *)(long)smp_processor_id();
466 cpu_callback(&cpu_nfb, CPU_UP_PREPARE, cpu);
467 cpu_callback(&cpu_nfb, CPU_ONLINE, cpu);
468 register_cpu_notifier(&cpu_nfb);
469 return 0;