power: improve inline asm memory constraints
[linux/fpc-iii.git] / kernel / workqueue.c
blobeebb1d83923515d02a848e69bf363b83d871ae67
1 /*
2 * linux/kernel/workqueue.c
4 * Generic mechanism for defining kernel helper threads for running
5 * arbitrary tasks in process context.
7 * Started by Ingo Molnar, Copyright (C) 2002
9 * Derived from the taskqueue/keventd code by:
11 * David Woodhouse <dwmw2@infradead.org>
12 * Andrew Morton <andrewm@uow.edu.au>
13 * Kai Petzke <wpp@marie.physik.tu-berlin.de>
14 * Theodore Ts'o <tytso@mit.edu>
16 * Made to use alloc_percpu by Christoph Lameter <clameter@sgi.com>.
19 #include <linux/module.h>
20 #include <linux/kernel.h>
21 #include <linux/sched.h>
22 #include <linux/init.h>
23 #include <linux/signal.h>
24 #include <linux/completion.h>
25 #include <linux/workqueue.h>
26 #include <linux/slab.h>
27 #include <linux/cpu.h>
28 #include <linux/notifier.h>
29 #include <linux/kthread.h>
30 #include <linux/hardirq.h>
33 * The per-CPU workqueue (if single thread, we always use the first
34 * possible cpu).
36 * The sequence counters are for flush_scheduled_work(). It wants to wait
37 * until until all currently-scheduled works are completed, but it doesn't
38 * want to be livelocked by new, incoming ones. So it waits until
39 * remove_sequence is >= the insert_sequence which pertained when
40 * flush_scheduled_work() was called.
42 struct cpu_workqueue_struct {
44 spinlock_t lock;
46 long remove_sequence; /* Least-recently added (next to run) */
47 long insert_sequence; /* Next to add */
49 struct list_head worklist;
50 wait_queue_head_t more_work;
51 wait_queue_head_t work_done;
53 struct workqueue_struct *wq;
54 struct task_struct *thread;
56 int run_depth; /* Detect run_workqueue() recursion depth */
57 } ____cacheline_aligned;
60 * The externally visible workqueue abstraction is an array of
61 * per-CPU workqueues:
63 struct workqueue_struct {
64 struct cpu_workqueue_struct *cpu_wq;
65 const char *name;
66 struct list_head list; /* Empty if single thread */
69 /* All the per-cpu workqueues on the system, for hotplug cpu to add/remove
70 threads to each one as cpus come/go. */
71 static DEFINE_SPINLOCK(workqueue_lock);
72 static LIST_HEAD(workqueues);
74 static int singlethread_cpu;
76 /* If it's single threaded, it isn't in the list of workqueues. */
77 static inline int is_single_threaded(struct workqueue_struct *wq)
79 return list_empty(&wq->list);
82 /* Preempt must be disabled. */
83 static void __queue_work(struct cpu_workqueue_struct *cwq,
84 struct work_struct *work)
86 unsigned long flags;
88 spin_lock_irqsave(&cwq->lock, flags);
89 work->wq_data = cwq;
90 list_add_tail(&work->entry, &cwq->worklist);
91 cwq->insert_sequence++;
92 wake_up(&cwq->more_work);
93 spin_unlock_irqrestore(&cwq->lock, flags);
97 * Queue work on a workqueue. Return non-zero if it was successfully
98 * added.
100 * We queue the work to the CPU it was submitted, but there is no
101 * guarantee that it will be processed by that CPU.
103 int fastcall queue_work(struct workqueue_struct *wq, struct work_struct *work)
105 int ret = 0, cpu = get_cpu();
107 if (!test_and_set_bit(0, &work->pending)) {
108 if (unlikely(is_single_threaded(wq)))
109 cpu = singlethread_cpu;
110 BUG_ON(!list_empty(&work->entry));
111 __queue_work(per_cpu_ptr(wq->cpu_wq, cpu), work);
112 ret = 1;
114 put_cpu();
115 return ret;
117 EXPORT_SYMBOL_GPL(queue_work);
119 static void delayed_work_timer_fn(unsigned long __data)
121 struct work_struct *work = (struct work_struct *)__data;
122 struct workqueue_struct *wq = work->wq_data;
123 int cpu = smp_processor_id();
125 if (unlikely(is_single_threaded(wq)))
126 cpu = singlethread_cpu;
128 __queue_work(per_cpu_ptr(wq->cpu_wq, cpu), work);
131 int fastcall queue_delayed_work(struct workqueue_struct *wq,
132 struct work_struct *work, unsigned long delay)
134 int ret = 0;
135 struct timer_list *timer = &work->timer;
137 if (!test_and_set_bit(0, &work->pending)) {
138 BUG_ON(timer_pending(timer));
139 BUG_ON(!list_empty(&work->entry));
141 /* This stores wq for the moment, for the timer_fn */
142 work->wq_data = wq;
143 timer->expires = jiffies + delay;
144 timer->data = (unsigned long)work;
145 timer->function = delayed_work_timer_fn;
146 add_timer(timer);
147 ret = 1;
149 return ret;
151 EXPORT_SYMBOL_GPL(queue_delayed_work);
153 int queue_delayed_work_on(int cpu, struct workqueue_struct *wq,
154 struct work_struct *work, unsigned long delay)
156 int ret = 0;
157 struct timer_list *timer = &work->timer;
159 if (!test_and_set_bit(0, &work->pending)) {
160 BUG_ON(timer_pending(timer));
161 BUG_ON(!list_empty(&work->entry));
163 /* This stores wq for the moment, for the timer_fn */
164 work->wq_data = wq;
165 timer->expires = jiffies + delay;
166 timer->data = (unsigned long)work;
167 timer->function = delayed_work_timer_fn;
168 add_timer_on(timer, cpu);
169 ret = 1;
171 return ret;
173 EXPORT_SYMBOL_GPL(queue_delayed_work_on);
175 static void run_workqueue(struct cpu_workqueue_struct *cwq)
177 unsigned long flags;
180 * Keep taking off work from the queue until
181 * done.
183 spin_lock_irqsave(&cwq->lock, flags);
184 cwq->run_depth++;
185 if (cwq->run_depth > 3) {
186 /* morton gets to eat his hat */
187 printk("%s: recursion depth exceeded: %d\n",
188 __FUNCTION__, cwq->run_depth);
189 dump_stack();
191 while (!list_empty(&cwq->worklist)) {
192 struct work_struct *work = list_entry(cwq->worklist.next,
193 struct work_struct, entry);
194 void (*f) (void *) = work->func;
195 void *data = work->data;
197 list_del_init(cwq->worklist.next);
198 spin_unlock_irqrestore(&cwq->lock, flags);
200 BUG_ON(work->wq_data != cwq);
201 clear_bit(0, &work->pending);
202 f(data);
204 spin_lock_irqsave(&cwq->lock, flags);
205 cwq->remove_sequence++;
206 wake_up(&cwq->work_done);
208 cwq->run_depth--;
209 spin_unlock_irqrestore(&cwq->lock, flags);
212 static int worker_thread(void *__cwq)
214 struct cpu_workqueue_struct *cwq = __cwq;
215 DECLARE_WAITQUEUE(wait, current);
216 struct k_sigaction sa;
217 sigset_t blocked;
219 current->flags |= PF_NOFREEZE;
221 set_user_nice(current, -5);
223 /* Block and flush all signals */
224 sigfillset(&blocked);
225 sigprocmask(SIG_BLOCK, &blocked, NULL);
226 flush_signals(current);
228 /* SIG_IGN makes children autoreap: see do_notify_parent(). */
229 sa.sa.sa_handler = SIG_IGN;
230 sa.sa.sa_flags = 0;
231 siginitset(&sa.sa.sa_mask, sigmask(SIGCHLD));
232 do_sigaction(SIGCHLD, &sa, (struct k_sigaction *)0);
234 set_current_state(TASK_INTERRUPTIBLE);
235 while (!kthread_should_stop()) {
236 add_wait_queue(&cwq->more_work, &wait);
237 if (list_empty(&cwq->worklist))
238 schedule();
239 else
240 __set_current_state(TASK_RUNNING);
241 remove_wait_queue(&cwq->more_work, &wait);
243 if (!list_empty(&cwq->worklist))
244 run_workqueue(cwq);
245 set_current_state(TASK_INTERRUPTIBLE);
247 __set_current_state(TASK_RUNNING);
248 return 0;
251 static void flush_cpu_workqueue(struct cpu_workqueue_struct *cwq)
253 if (cwq->thread == current) {
255 * Probably keventd trying to flush its own queue. So simply run
256 * it by hand rather than deadlocking.
258 run_workqueue(cwq);
259 } else {
260 DEFINE_WAIT(wait);
261 long sequence_needed;
263 spin_lock_irq(&cwq->lock);
264 sequence_needed = cwq->insert_sequence;
266 while (sequence_needed - cwq->remove_sequence > 0) {
267 prepare_to_wait(&cwq->work_done, &wait,
268 TASK_UNINTERRUPTIBLE);
269 spin_unlock_irq(&cwq->lock);
270 schedule();
271 spin_lock_irq(&cwq->lock);
273 finish_wait(&cwq->work_done, &wait);
274 spin_unlock_irq(&cwq->lock);
279 * flush_workqueue - ensure that any scheduled work has run to completion.
281 * Forces execution of the workqueue and blocks until its completion.
282 * This is typically used in driver shutdown handlers.
284 * This function will sample each workqueue's current insert_sequence number and
285 * will sleep until the head sequence is greater than or equal to that. This
286 * means that we sleep until all works which were queued on entry have been
287 * handled, but we are not livelocked by new incoming ones.
289 * This function used to run the workqueues itself. Now we just wait for the
290 * helper threads to do it.
292 void fastcall flush_workqueue(struct workqueue_struct *wq)
294 might_sleep();
296 if (is_single_threaded(wq)) {
297 /* Always use first cpu's area. */
298 flush_cpu_workqueue(per_cpu_ptr(wq->cpu_wq, singlethread_cpu));
299 } else {
300 int cpu;
302 lock_cpu_hotplug();
303 for_each_online_cpu(cpu)
304 flush_cpu_workqueue(per_cpu_ptr(wq->cpu_wq, cpu));
305 unlock_cpu_hotplug();
308 EXPORT_SYMBOL_GPL(flush_workqueue);
310 static struct task_struct *create_workqueue_thread(struct workqueue_struct *wq,
311 int cpu)
313 struct cpu_workqueue_struct *cwq = per_cpu_ptr(wq->cpu_wq, cpu);
314 struct task_struct *p;
316 spin_lock_init(&cwq->lock);
317 cwq->wq = wq;
318 cwq->thread = NULL;
319 cwq->insert_sequence = 0;
320 cwq->remove_sequence = 0;
321 INIT_LIST_HEAD(&cwq->worklist);
322 init_waitqueue_head(&cwq->more_work);
323 init_waitqueue_head(&cwq->work_done);
325 if (is_single_threaded(wq))
326 p = kthread_create(worker_thread, cwq, "%s", wq->name);
327 else
328 p = kthread_create(worker_thread, cwq, "%s/%d", wq->name, cpu);
329 if (IS_ERR(p))
330 return NULL;
331 cwq->thread = p;
332 return p;
335 struct workqueue_struct *__create_workqueue(const char *name,
336 int singlethread)
338 int cpu, destroy = 0;
339 struct workqueue_struct *wq;
340 struct task_struct *p;
342 wq = kzalloc(sizeof(*wq), GFP_KERNEL);
343 if (!wq)
344 return NULL;
346 wq->cpu_wq = alloc_percpu(struct cpu_workqueue_struct);
347 if (!wq->cpu_wq) {
348 kfree(wq);
349 return NULL;
352 wq->name = name;
353 /* We don't need the distraction of CPUs appearing and vanishing. */
354 lock_cpu_hotplug();
355 if (singlethread) {
356 INIT_LIST_HEAD(&wq->list);
357 p = create_workqueue_thread(wq, singlethread_cpu);
358 if (!p)
359 destroy = 1;
360 else
361 wake_up_process(p);
362 } else {
363 spin_lock(&workqueue_lock);
364 list_add(&wq->list, &workqueues);
365 spin_unlock(&workqueue_lock);
366 for_each_online_cpu(cpu) {
367 p = create_workqueue_thread(wq, cpu);
368 if (p) {
369 kthread_bind(p, cpu);
370 wake_up_process(p);
371 } else
372 destroy = 1;
375 unlock_cpu_hotplug();
378 * Was there any error during startup? If yes then clean up:
380 if (destroy) {
381 destroy_workqueue(wq);
382 wq = NULL;
384 return wq;
386 EXPORT_SYMBOL_GPL(__create_workqueue);
388 static void cleanup_workqueue_thread(struct workqueue_struct *wq, int cpu)
390 struct cpu_workqueue_struct *cwq;
391 unsigned long flags;
392 struct task_struct *p;
394 cwq = per_cpu_ptr(wq->cpu_wq, cpu);
395 spin_lock_irqsave(&cwq->lock, flags);
396 p = cwq->thread;
397 cwq->thread = NULL;
398 spin_unlock_irqrestore(&cwq->lock, flags);
399 if (p)
400 kthread_stop(p);
403 void destroy_workqueue(struct workqueue_struct *wq)
405 int cpu;
407 flush_workqueue(wq);
409 /* We don't need the distraction of CPUs appearing and vanishing. */
410 lock_cpu_hotplug();
411 if (is_single_threaded(wq))
412 cleanup_workqueue_thread(wq, singlethread_cpu);
413 else {
414 for_each_online_cpu(cpu)
415 cleanup_workqueue_thread(wq, cpu);
416 spin_lock(&workqueue_lock);
417 list_del(&wq->list);
418 spin_unlock(&workqueue_lock);
420 unlock_cpu_hotplug();
421 free_percpu(wq->cpu_wq);
422 kfree(wq);
424 EXPORT_SYMBOL_GPL(destroy_workqueue);
426 static struct workqueue_struct *keventd_wq;
428 int fastcall schedule_work(struct work_struct *work)
430 return queue_work(keventd_wq, work);
432 EXPORT_SYMBOL(schedule_work);
434 int fastcall schedule_delayed_work(struct work_struct *work, unsigned long delay)
436 return queue_delayed_work(keventd_wq, work, delay);
438 EXPORT_SYMBOL(schedule_delayed_work);
440 int schedule_delayed_work_on(int cpu,
441 struct work_struct *work, unsigned long delay)
443 return queue_delayed_work_on(cpu, keventd_wq, work, delay);
445 EXPORT_SYMBOL(schedule_delayed_work_on);
448 * schedule_on_each_cpu - call a function on each online CPU from keventd
449 * @func: the function to call
450 * @info: a pointer to pass to func()
452 * Returns zero on success.
453 * Returns -ve errno on failure.
455 * Appears to be racy against CPU hotplug.
457 * schedule_on_each_cpu() is very slow.
459 int schedule_on_each_cpu(void (*func)(void *info), void *info)
461 int cpu;
462 struct work_struct *works;
464 works = alloc_percpu(struct work_struct);
465 if (!works)
466 return -ENOMEM;
468 for_each_online_cpu(cpu) {
469 INIT_WORK(per_cpu_ptr(works, cpu), func, info);
470 __queue_work(per_cpu_ptr(keventd_wq->cpu_wq, cpu),
471 per_cpu_ptr(works, cpu));
473 flush_workqueue(keventd_wq);
474 free_percpu(works);
475 return 0;
478 void flush_scheduled_work(void)
480 flush_workqueue(keventd_wq);
482 EXPORT_SYMBOL(flush_scheduled_work);
485 * cancel_rearming_delayed_workqueue - reliably kill off a delayed
486 * work whose handler rearms the delayed work.
487 * @wq: the controlling workqueue structure
488 * @work: the delayed work struct
490 void cancel_rearming_delayed_workqueue(struct workqueue_struct *wq,
491 struct work_struct *work)
493 while (!cancel_delayed_work(work))
494 flush_workqueue(wq);
496 EXPORT_SYMBOL(cancel_rearming_delayed_workqueue);
499 * cancel_rearming_delayed_work - reliably kill off a delayed keventd
500 * work whose handler rearms the delayed work.
501 * @work: the delayed work struct
503 void cancel_rearming_delayed_work(struct work_struct *work)
505 cancel_rearming_delayed_workqueue(keventd_wq, work);
507 EXPORT_SYMBOL(cancel_rearming_delayed_work);
510 * execute_in_process_context - reliably execute the routine with user context
511 * @fn: the function to execute
512 * @data: data to pass to the function
513 * @ew: guaranteed storage for the execute work structure (must
514 * be available when the work executes)
516 * Executes the function immediately if process context is available,
517 * otherwise schedules the function for delayed execution.
519 * Returns: 0 - function was executed
520 * 1 - function was scheduled for execution
522 int execute_in_process_context(void (*fn)(void *data), void *data,
523 struct execute_work *ew)
525 if (!in_interrupt()) {
526 fn(data);
527 return 0;
530 INIT_WORK(&ew->work, fn, data);
531 schedule_work(&ew->work);
533 return 1;
535 EXPORT_SYMBOL_GPL(execute_in_process_context);
537 int keventd_up(void)
539 return keventd_wq != NULL;
542 int current_is_keventd(void)
544 struct cpu_workqueue_struct *cwq;
545 int cpu = smp_processor_id(); /* preempt-safe: keventd is per-cpu */
546 int ret = 0;
548 BUG_ON(!keventd_wq);
550 cwq = per_cpu_ptr(keventd_wq->cpu_wq, cpu);
551 if (current == cwq->thread)
552 ret = 1;
554 return ret;
558 #ifdef CONFIG_HOTPLUG_CPU
559 /* Take the work from this (downed) CPU. */
560 static void take_over_work(struct workqueue_struct *wq, unsigned int cpu)
562 struct cpu_workqueue_struct *cwq = per_cpu_ptr(wq->cpu_wq, cpu);
563 struct list_head list;
564 struct work_struct *work;
566 spin_lock_irq(&cwq->lock);
567 list_replace_init(&cwq->worklist, &list);
569 while (!list_empty(&list)) {
570 printk("Taking work for %s\n", wq->name);
571 work = list_entry(list.next,struct work_struct,entry);
572 list_del(&work->entry);
573 __queue_work(per_cpu_ptr(wq->cpu_wq, smp_processor_id()), work);
575 spin_unlock_irq(&cwq->lock);
578 /* We're holding the cpucontrol mutex here */
579 static int __devinit workqueue_cpu_callback(struct notifier_block *nfb,
580 unsigned long action,
581 void *hcpu)
583 unsigned int hotcpu = (unsigned long)hcpu;
584 struct workqueue_struct *wq;
586 switch (action) {
587 case CPU_UP_PREPARE:
588 /* Create a new workqueue thread for it. */
589 list_for_each_entry(wq, &workqueues, list) {
590 if (!create_workqueue_thread(wq, hotcpu)) {
591 printk("workqueue for %i failed\n", hotcpu);
592 return NOTIFY_BAD;
595 break;
597 case CPU_ONLINE:
598 /* Kick off worker threads. */
599 list_for_each_entry(wq, &workqueues, list) {
600 struct cpu_workqueue_struct *cwq;
602 cwq = per_cpu_ptr(wq->cpu_wq, hotcpu);
603 kthread_bind(cwq->thread, hotcpu);
604 wake_up_process(cwq->thread);
606 break;
608 case CPU_UP_CANCELED:
609 list_for_each_entry(wq, &workqueues, list) {
610 if (!per_cpu_ptr(wq->cpu_wq, hotcpu)->thread)
611 continue;
612 /* Unbind so it can run. */
613 kthread_bind(per_cpu_ptr(wq->cpu_wq, hotcpu)->thread,
614 any_online_cpu(cpu_online_map));
615 cleanup_workqueue_thread(wq, hotcpu);
617 break;
619 case CPU_DEAD:
620 list_for_each_entry(wq, &workqueues, list)
621 cleanup_workqueue_thread(wq, hotcpu);
622 list_for_each_entry(wq, &workqueues, list)
623 take_over_work(wq, hotcpu);
624 break;
627 return NOTIFY_OK;
629 #endif
631 void init_workqueues(void)
633 singlethread_cpu = first_cpu(cpu_possible_map);
634 hotcpu_notifier(workqueue_cpu_callback, 0);
635 keventd_wq = create_workqueue("events");
636 BUG_ON(!keventd_wq);