[CONNECTOR]: Update documentation to match reality.
[linux-2.6/verdex.git] / kernel / workqueue.c
blob91bacb13a7e2fec56f5067f12e0b97c1043c30b7
1 /*
2 * linux/kernel/workqueue.c
4 * Generic mechanism for defining kernel helper threads for running
5 * arbitrary tasks in process context.
7 * Started by Ingo Molnar, Copyright (C) 2002
9 * Derived from the taskqueue/keventd code by:
11 * David Woodhouse <dwmw2@infradead.org>
12 * Andrew Morton <andrewm@uow.edu.au>
13 * Kai Petzke <wpp@marie.physik.tu-berlin.de>
14 * Theodore Ts'o <tytso@mit.edu>
17 #include <linux/module.h>
18 #include <linux/kernel.h>
19 #include <linux/sched.h>
20 #include <linux/init.h>
21 #include <linux/signal.h>
22 #include <linux/completion.h>
23 #include <linux/workqueue.h>
24 #include <linux/slab.h>
25 #include <linux/cpu.h>
26 #include <linux/notifier.h>
27 #include <linux/kthread.h>
30 * The per-CPU workqueue (if single thread, we always use cpu 0's).
32 * The sequence counters are for flush_scheduled_work(). It wants to wait
33 * until until all currently-scheduled works are completed, but it doesn't
34 * want to be livelocked by new, incoming ones. So it waits until
35 * remove_sequence is >= the insert_sequence which pertained when
36 * flush_scheduled_work() was called.
38 struct cpu_workqueue_struct {
40 spinlock_t lock;
42 long remove_sequence; /* Least-recently added (next to run) */
43 long insert_sequence; /* Next to add */
45 struct list_head worklist;
46 wait_queue_head_t more_work;
47 wait_queue_head_t work_done;
49 struct workqueue_struct *wq;
50 task_t *thread;
52 int run_depth; /* Detect run_workqueue() recursion depth */
53 } ____cacheline_aligned;
56 * The externally visible workqueue abstraction is an array of
57 * per-CPU workqueues:
59 struct workqueue_struct {
60 struct cpu_workqueue_struct cpu_wq[NR_CPUS];
61 const char *name;
62 struct list_head list; /* Empty if single thread */
65 /* All the per-cpu workqueues on the system, for hotplug cpu to add/remove
66 threads to each one as cpus come/go. */
67 static DEFINE_SPINLOCK(workqueue_lock);
68 static LIST_HEAD(workqueues);
70 /* If it's single threaded, it isn't in the list of workqueues. */
71 static inline int is_single_threaded(struct workqueue_struct *wq)
73 return list_empty(&wq->list);
76 /* Preempt must be disabled. */
77 static void __queue_work(struct cpu_workqueue_struct *cwq,
78 struct work_struct *work)
80 unsigned long flags;
82 spin_lock_irqsave(&cwq->lock, flags);
83 work->wq_data = cwq;
84 list_add_tail(&work->entry, &cwq->worklist);
85 cwq->insert_sequence++;
86 wake_up(&cwq->more_work);
87 spin_unlock_irqrestore(&cwq->lock, flags);
91 * Queue work on a workqueue. Return non-zero if it was successfully
92 * added.
94 * We queue the work to the CPU it was submitted, but there is no
95 * guarantee that it will be processed by that CPU.
97 int fastcall queue_work(struct workqueue_struct *wq, struct work_struct *work)
99 int ret = 0, cpu = get_cpu();
101 if (!test_and_set_bit(0, &work->pending)) {
102 if (unlikely(is_single_threaded(wq)))
103 cpu = 0;
104 BUG_ON(!list_empty(&work->entry));
105 __queue_work(wq->cpu_wq + cpu, work);
106 ret = 1;
108 put_cpu();
109 return ret;
112 static void delayed_work_timer_fn(unsigned long __data)
114 struct work_struct *work = (struct work_struct *)__data;
115 struct workqueue_struct *wq = work->wq_data;
116 int cpu = smp_processor_id();
118 if (unlikely(is_single_threaded(wq)))
119 cpu = 0;
121 __queue_work(wq->cpu_wq + cpu, work);
124 int fastcall queue_delayed_work(struct workqueue_struct *wq,
125 struct work_struct *work, unsigned long delay)
127 int ret = 0;
128 struct timer_list *timer = &work->timer;
130 if (!test_and_set_bit(0, &work->pending)) {
131 BUG_ON(timer_pending(timer));
132 BUG_ON(!list_empty(&work->entry));
134 /* This stores wq for the moment, for the timer_fn */
135 work->wq_data = wq;
136 timer->expires = jiffies + delay;
137 timer->data = (unsigned long)work;
138 timer->function = delayed_work_timer_fn;
139 add_timer(timer);
140 ret = 1;
142 return ret;
145 static inline void run_workqueue(struct cpu_workqueue_struct *cwq)
147 unsigned long flags;
150 * Keep taking off work from the queue until
151 * done.
153 spin_lock_irqsave(&cwq->lock, flags);
154 cwq->run_depth++;
155 if (cwq->run_depth > 3) {
156 /* morton gets to eat his hat */
157 printk("%s: recursion depth exceeded: %d\n",
158 __FUNCTION__, cwq->run_depth);
159 dump_stack();
161 while (!list_empty(&cwq->worklist)) {
162 struct work_struct *work = list_entry(cwq->worklist.next,
163 struct work_struct, entry);
164 void (*f) (void *) = work->func;
165 void *data = work->data;
167 list_del_init(cwq->worklist.next);
168 spin_unlock_irqrestore(&cwq->lock, flags);
170 BUG_ON(work->wq_data != cwq);
171 clear_bit(0, &work->pending);
172 f(data);
174 spin_lock_irqsave(&cwq->lock, flags);
175 cwq->remove_sequence++;
176 wake_up(&cwq->work_done);
178 cwq->run_depth--;
179 spin_unlock_irqrestore(&cwq->lock, flags);
182 static int worker_thread(void *__cwq)
184 struct cpu_workqueue_struct *cwq = __cwq;
185 DECLARE_WAITQUEUE(wait, current);
186 struct k_sigaction sa;
187 sigset_t blocked;
189 current->flags |= PF_NOFREEZE;
191 set_user_nice(current, -5);
193 /* Block and flush all signals */
194 sigfillset(&blocked);
195 sigprocmask(SIG_BLOCK, &blocked, NULL);
196 flush_signals(current);
198 /* SIG_IGN makes children autoreap: see do_notify_parent(). */
199 sa.sa.sa_handler = SIG_IGN;
200 sa.sa.sa_flags = 0;
201 siginitset(&sa.sa.sa_mask, sigmask(SIGCHLD));
202 do_sigaction(SIGCHLD, &sa, (struct k_sigaction *)0);
204 set_current_state(TASK_INTERRUPTIBLE);
205 while (!kthread_should_stop()) {
206 add_wait_queue(&cwq->more_work, &wait);
207 if (list_empty(&cwq->worklist))
208 schedule();
209 else
210 __set_current_state(TASK_RUNNING);
211 remove_wait_queue(&cwq->more_work, &wait);
213 if (!list_empty(&cwq->worklist))
214 run_workqueue(cwq);
215 set_current_state(TASK_INTERRUPTIBLE);
217 __set_current_state(TASK_RUNNING);
218 return 0;
221 static void flush_cpu_workqueue(struct cpu_workqueue_struct *cwq)
223 if (cwq->thread == current) {
225 * Probably keventd trying to flush its own queue. So simply run
226 * it by hand rather than deadlocking.
228 run_workqueue(cwq);
229 } else {
230 DEFINE_WAIT(wait);
231 long sequence_needed;
233 spin_lock_irq(&cwq->lock);
234 sequence_needed = cwq->insert_sequence;
236 while (sequence_needed - cwq->remove_sequence > 0) {
237 prepare_to_wait(&cwq->work_done, &wait,
238 TASK_UNINTERRUPTIBLE);
239 spin_unlock_irq(&cwq->lock);
240 schedule();
241 spin_lock_irq(&cwq->lock);
243 finish_wait(&cwq->work_done, &wait);
244 spin_unlock_irq(&cwq->lock);
249 * flush_workqueue - ensure that any scheduled work has run to completion.
251 * Forces execution of the workqueue and blocks until its completion.
252 * This is typically used in driver shutdown handlers.
254 * This function will sample each workqueue's current insert_sequence number and
255 * will sleep until the head sequence is greater than or equal to that. This
256 * means that we sleep until all works which were queued on entry have been
257 * handled, but we are not livelocked by new incoming ones.
259 * This function used to run the workqueues itself. Now we just wait for the
260 * helper threads to do it.
262 void fastcall flush_workqueue(struct workqueue_struct *wq)
264 might_sleep();
266 if (is_single_threaded(wq)) {
267 /* Always use cpu 0's area. */
268 flush_cpu_workqueue(wq->cpu_wq + 0);
269 } else {
270 int cpu;
272 lock_cpu_hotplug();
273 for_each_online_cpu(cpu)
274 flush_cpu_workqueue(wq->cpu_wq + cpu);
275 unlock_cpu_hotplug();
279 static struct task_struct *create_workqueue_thread(struct workqueue_struct *wq,
280 int cpu)
282 struct cpu_workqueue_struct *cwq = wq->cpu_wq + cpu;
283 struct task_struct *p;
285 spin_lock_init(&cwq->lock);
286 cwq->wq = wq;
287 cwq->thread = NULL;
288 cwq->insert_sequence = 0;
289 cwq->remove_sequence = 0;
290 INIT_LIST_HEAD(&cwq->worklist);
291 init_waitqueue_head(&cwq->more_work);
292 init_waitqueue_head(&cwq->work_done);
294 if (is_single_threaded(wq))
295 p = kthread_create(worker_thread, cwq, "%s", wq->name);
296 else
297 p = kthread_create(worker_thread, cwq, "%s/%d", wq->name, cpu);
298 if (IS_ERR(p))
299 return NULL;
300 cwq->thread = p;
301 return p;
304 struct workqueue_struct *__create_workqueue(const char *name,
305 int singlethread)
307 int cpu, destroy = 0;
308 struct workqueue_struct *wq;
309 struct task_struct *p;
311 wq = kzalloc(sizeof(*wq), GFP_KERNEL);
312 if (!wq)
313 return NULL;
315 wq->name = name;
316 /* We don't need the distraction of CPUs appearing and vanishing. */
317 lock_cpu_hotplug();
318 if (singlethread) {
319 INIT_LIST_HEAD(&wq->list);
320 p = create_workqueue_thread(wq, 0);
321 if (!p)
322 destroy = 1;
323 else
324 wake_up_process(p);
325 } else {
326 spin_lock(&workqueue_lock);
327 list_add(&wq->list, &workqueues);
328 spin_unlock(&workqueue_lock);
329 for_each_online_cpu(cpu) {
330 p = create_workqueue_thread(wq, cpu);
331 if (p) {
332 kthread_bind(p, cpu);
333 wake_up_process(p);
334 } else
335 destroy = 1;
338 unlock_cpu_hotplug();
341 * Was there any error during startup? If yes then clean up:
343 if (destroy) {
344 destroy_workqueue(wq);
345 wq = NULL;
347 return wq;
350 static void cleanup_workqueue_thread(struct workqueue_struct *wq, int cpu)
352 struct cpu_workqueue_struct *cwq;
353 unsigned long flags;
354 struct task_struct *p;
356 cwq = wq->cpu_wq + cpu;
357 spin_lock_irqsave(&cwq->lock, flags);
358 p = cwq->thread;
359 cwq->thread = NULL;
360 spin_unlock_irqrestore(&cwq->lock, flags);
361 if (p)
362 kthread_stop(p);
365 void destroy_workqueue(struct workqueue_struct *wq)
367 int cpu;
369 flush_workqueue(wq);
371 /* We don't need the distraction of CPUs appearing and vanishing. */
372 lock_cpu_hotplug();
373 if (is_single_threaded(wq))
374 cleanup_workqueue_thread(wq, 0);
375 else {
376 for_each_online_cpu(cpu)
377 cleanup_workqueue_thread(wq, cpu);
378 spin_lock(&workqueue_lock);
379 list_del(&wq->list);
380 spin_unlock(&workqueue_lock);
382 unlock_cpu_hotplug();
383 kfree(wq);
386 static struct workqueue_struct *keventd_wq;
388 int fastcall schedule_work(struct work_struct *work)
390 return queue_work(keventd_wq, work);
393 int fastcall schedule_delayed_work(struct work_struct *work, unsigned long delay)
395 return queue_delayed_work(keventd_wq, work, delay);
398 int schedule_delayed_work_on(int cpu,
399 struct work_struct *work, unsigned long delay)
401 int ret = 0;
402 struct timer_list *timer = &work->timer;
404 if (!test_and_set_bit(0, &work->pending)) {
405 BUG_ON(timer_pending(timer));
406 BUG_ON(!list_empty(&work->entry));
407 /* This stores keventd_wq for the moment, for the timer_fn */
408 work->wq_data = keventd_wq;
409 timer->expires = jiffies + delay;
410 timer->data = (unsigned long)work;
411 timer->function = delayed_work_timer_fn;
412 add_timer_on(timer, cpu);
413 ret = 1;
415 return ret;
418 void flush_scheduled_work(void)
420 flush_workqueue(keventd_wq);
424 * cancel_rearming_delayed_workqueue - reliably kill off a delayed
425 * work whose handler rearms the delayed work.
426 * @wq: the controlling workqueue structure
427 * @work: the delayed work struct
429 void cancel_rearming_delayed_workqueue(struct workqueue_struct *wq,
430 struct work_struct *work)
432 while (!cancel_delayed_work(work))
433 flush_workqueue(wq);
435 EXPORT_SYMBOL(cancel_rearming_delayed_workqueue);
438 * cancel_rearming_delayed_work - reliably kill off a delayed keventd
439 * work whose handler rearms the delayed work.
440 * @work: the delayed work struct
442 void cancel_rearming_delayed_work(struct work_struct *work)
444 cancel_rearming_delayed_workqueue(keventd_wq, work);
446 EXPORT_SYMBOL(cancel_rearming_delayed_work);
448 int keventd_up(void)
450 return keventd_wq != NULL;
453 int current_is_keventd(void)
455 struct cpu_workqueue_struct *cwq;
456 int cpu = smp_processor_id(); /* preempt-safe: keventd is per-cpu */
457 int ret = 0;
459 BUG_ON(!keventd_wq);
461 cwq = keventd_wq->cpu_wq + cpu;
462 if (current == cwq->thread)
463 ret = 1;
465 return ret;
469 #ifdef CONFIG_HOTPLUG_CPU
470 /* Take the work from this (downed) CPU. */
471 static void take_over_work(struct workqueue_struct *wq, unsigned int cpu)
473 struct cpu_workqueue_struct *cwq = wq->cpu_wq + cpu;
474 LIST_HEAD(list);
475 struct work_struct *work;
477 spin_lock_irq(&cwq->lock);
478 list_splice_init(&cwq->worklist, &list);
480 while (!list_empty(&list)) {
481 printk("Taking work for %s\n", wq->name);
482 work = list_entry(list.next,struct work_struct,entry);
483 list_del(&work->entry);
484 __queue_work(wq->cpu_wq + smp_processor_id(), work);
486 spin_unlock_irq(&cwq->lock);
489 /* We're holding the cpucontrol mutex here */
490 static int __devinit workqueue_cpu_callback(struct notifier_block *nfb,
491 unsigned long action,
492 void *hcpu)
494 unsigned int hotcpu = (unsigned long)hcpu;
495 struct workqueue_struct *wq;
497 switch (action) {
498 case CPU_UP_PREPARE:
499 /* Create a new workqueue thread for it. */
500 list_for_each_entry(wq, &workqueues, list) {
501 if (!create_workqueue_thread(wq, hotcpu)) {
502 printk("workqueue for %i failed\n", hotcpu);
503 return NOTIFY_BAD;
506 break;
508 case CPU_ONLINE:
509 /* Kick off worker threads. */
510 list_for_each_entry(wq, &workqueues, list) {
511 kthread_bind(wq->cpu_wq[hotcpu].thread, hotcpu);
512 wake_up_process(wq->cpu_wq[hotcpu].thread);
514 break;
516 case CPU_UP_CANCELED:
517 list_for_each_entry(wq, &workqueues, list) {
518 /* Unbind so it can run. */
519 kthread_bind(wq->cpu_wq[hotcpu].thread,
520 smp_processor_id());
521 cleanup_workqueue_thread(wq, hotcpu);
523 break;
525 case CPU_DEAD:
526 list_for_each_entry(wq, &workqueues, list)
527 cleanup_workqueue_thread(wq, hotcpu);
528 list_for_each_entry(wq, &workqueues, list)
529 take_over_work(wq, hotcpu);
530 break;
533 return NOTIFY_OK;
535 #endif
537 void init_workqueues(void)
539 hotcpu_notifier(workqueue_cpu_callback, 0);
540 keventd_wq = create_workqueue("events");
541 BUG_ON(!keventd_wq);
544 EXPORT_SYMBOL_GPL(__create_workqueue);
545 EXPORT_SYMBOL_GPL(queue_work);
546 EXPORT_SYMBOL_GPL(queue_delayed_work);
547 EXPORT_SYMBOL_GPL(flush_workqueue);
548 EXPORT_SYMBOL_GPL(destroy_workqueue);
550 EXPORT_SYMBOL(schedule_work);
551 EXPORT_SYMBOL(schedule_delayed_work);
552 EXPORT_SYMBOL(schedule_delayed_work_on);
553 EXPORT_SYMBOL(flush_scheduled_work);