2 * linux/kernel/workqueue.c
4 * Generic mechanism for defining kernel helper threads for running
5 * arbitrary tasks in process context.
7 * Started by Ingo Molnar, Copyright (C) 2002
9 * Derived from the taskqueue/keventd code by:
11 * David Woodhouse <dwmw2@infradead.org>
13 * Kai Petzke <wpp@marie.physik.tu-berlin.de>
14 * Theodore Ts'o <tytso@mit.edu>
16 * Made to use alloc_percpu by Christoph Lameter.
19 #include <linux/module.h>
20 #include <linux/kernel.h>
21 #include <linux/sched.h>
22 #include <linux/init.h>
23 #include <linux/signal.h>
24 #include <linux/completion.h>
25 #include <linux/workqueue.h>
26 #include <linux/slab.h>
27 #include <linux/cpu.h>
28 #include <linux/notifier.h>
29 #include <linux/syscalls.h>
30 #include <linux/kthread.h>
31 #include <linux/hardirq.h>
32 #include <linux/mempolicy.h>
33 #include <linux/freezer.h>
34 #include <linux/kallsyms.h>
35 #include <linux/debug_locks.h>
36 #include <linux/lockdep.h>
37 #include <trace/workqueue.h>
39 #include <asm/uaccess.h>
42 * The per-CPU workqueue (if single thread, we always use the first
45 struct cpu_workqueue_struct
{
49 struct list_head worklist
;
50 wait_queue_head_t more_work
;
51 struct work_struct
*current_work
;
53 struct workqueue_struct
*wq
;
54 struct task_struct
*thread
;
56 int run_depth
; /* Detect run_workqueue() recursion depth */
57 } ____cacheline_aligned
;
60 * The externally visible workqueue abstraction is an array of
63 struct workqueue_struct
{
64 struct cpu_workqueue_struct
*cpu_wq
;
65 struct list_head list
;
68 int freezeable
; /* Freeze threads during suspend */
71 struct lockdep_map lockdep_map
;
75 /* Serializes the accesses to the list of workqueues. */
76 static DEFINE_SPINLOCK(workqueue_lock
);
77 static LIST_HEAD(workqueues
);
79 static int singlethread_cpu __read_mostly
;
80 static const struct cpumask
*cpu_singlethread_map __read_mostly
;
82 * _cpu_down() first removes CPU from cpu_online_map, then CPU_DEAD
83 * flushes cwq->worklist. This means that flush_workqueue/wait_on_work
84 * which comes in between can't use for_each_online_cpu(). We could
85 * use cpu_possible_map, the cpumask below is more a documentation
88 static cpumask_var_t cpu_populated_map __read_mostly
;
90 /* If it's single threaded, it isn't in the list of workqueues. */
91 static inline int is_wq_single_threaded(struct workqueue_struct
*wq
)
93 return wq
->singlethread
;
96 static const struct cpumask
*wq_cpu_map(struct workqueue_struct
*wq
)
98 return is_wq_single_threaded(wq
)
99 ? cpu_singlethread_map
: cpu_populated_map
;
103 struct cpu_workqueue_struct
*wq_per_cpu(struct workqueue_struct
*wq
, int cpu
)
105 if (unlikely(is_wq_single_threaded(wq
)))
106 cpu
= singlethread_cpu
;
107 return per_cpu_ptr(wq
->cpu_wq
, cpu
);
111 * Set the workqueue on which a work item is to be run
112 * - Must *only* be called if the pending flag is set
114 static inline void set_wq_data(struct work_struct
*work
,
115 struct cpu_workqueue_struct
*cwq
)
119 BUG_ON(!work_pending(work
));
121 new = (unsigned long) cwq
| (1UL << WORK_STRUCT_PENDING
);
122 new |= WORK_STRUCT_FLAG_MASK
& *work_data_bits(work
);
123 atomic_long_set(&work
->data
, new);
127 struct cpu_workqueue_struct
*get_wq_data(struct work_struct
*work
)
129 return (void *) (atomic_long_read(&work
->data
) & WORK_STRUCT_WQ_DATA_MASK
);
132 DEFINE_TRACE(workqueue_insertion
);
134 static void insert_work(struct cpu_workqueue_struct
*cwq
,
135 struct work_struct
*work
, struct list_head
*head
)
137 trace_workqueue_insertion(cwq
->thread
, work
);
139 set_wq_data(work
, cwq
);
141 * Ensure that we get the right work->data if we see the
142 * result of list_add() below, see try_to_grab_pending().
145 list_add_tail(&work
->entry
, head
);
146 wake_up(&cwq
->more_work
);
149 static void __queue_work(struct cpu_workqueue_struct
*cwq
,
150 struct work_struct
*work
)
154 spin_lock_irqsave(&cwq
->lock
, flags
);
155 insert_work(cwq
, work
, &cwq
->worklist
);
156 spin_unlock_irqrestore(&cwq
->lock
, flags
);
160 * queue_work - queue work on a workqueue
161 * @wq: workqueue to use
162 * @work: work to queue
164 * Returns 0 if @work was already on a queue, non-zero otherwise.
166 * We queue the work to the CPU on which it was submitted, but if the CPU dies
167 * it can be processed by another CPU.
169 * Especially no such guarantee on PREEMPT_RT.
171 int queue_work(struct workqueue_struct
*wq
, struct work_struct
*work
)
173 int ret
= 0, cpu
= raw_smp_processor_id();
175 ret
= queue_work_on(cpu
, wq
, work
);
179 EXPORT_SYMBOL_GPL(queue_work
);
182 * queue_work_on - queue work on specific cpu
183 * @cpu: CPU number to execute work on
184 * @wq: workqueue to use
185 * @work: work to queue
187 * Returns 0 if @work was already on a queue, non-zero otherwise.
189 * We queue the work to a specific CPU, the caller must ensure it
193 queue_work_on(int cpu
, struct workqueue_struct
*wq
, struct work_struct
*work
)
197 if (!test_and_set_bit(WORK_STRUCT_PENDING
, work_data_bits(work
))) {
198 BUG_ON(!list_empty(&work
->entry
));
199 __queue_work(wq_per_cpu(wq
, cpu
), work
);
204 EXPORT_SYMBOL_GPL(queue_work_on
);
206 static void delayed_work_timer_fn(unsigned long __data
)
208 struct delayed_work
*dwork
= (struct delayed_work
*)__data
;
209 struct cpu_workqueue_struct
*cwq
= get_wq_data(&dwork
->work
);
210 struct workqueue_struct
*wq
= cwq
->wq
;
212 __queue_work(wq_per_cpu(wq
, raw_smp_processor_id()), &dwork
->work
);
216 * queue_delayed_work - queue work on a workqueue after delay
217 * @wq: workqueue to use
218 * @dwork: delayable work to queue
219 * @delay: number of jiffies to wait before queueing
221 * Returns 0 if @work was already on a queue, non-zero otherwise.
223 int queue_delayed_work(struct workqueue_struct
*wq
,
224 struct delayed_work
*dwork
, unsigned long delay
)
227 return queue_work(wq
, &dwork
->work
);
229 return queue_delayed_work_on(-1, wq
, dwork
, delay
);
231 EXPORT_SYMBOL_GPL(queue_delayed_work
);
234 * queue_delayed_work_on - queue work on specific CPU after delay
235 * @cpu: CPU number to execute work on
236 * @wq: workqueue to use
237 * @dwork: work to queue
238 * @delay: number of jiffies to wait before queueing
240 * Returns 0 if @work was already on a queue, non-zero otherwise.
242 int queue_delayed_work_on(int cpu
, struct workqueue_struct
*wq
,
243 struct delayed_work
*dwork
, unsigned long delay
)
246 struct timer_list
*timer
= &dwork
->timer
;
247 struct work_struct
*work
= &dwork
->work
;
249 if (!test_and_set_bit(WORK_STRUCT_PENDING
, work_data_bits(work
))) {
250 BUG_ON(timer_pending(timer
));
251 BUG_ON(!list_empty(&work
->entry
));
253 timer_stats_timer_set_start_info(&dwork
->timer
);
255 /* This stores cwq for the moment, for the timer_fn */
256 set_wq_data(work
, wq_per_cpu(wq
, raw_smp_processor_id()));
257 timer
->expires
= jiffies
+ delay
;
258 timer
->data
= (unsigned long)dwork
;
259 timer
->function
= delayed_work_timer_fn
;
261 if (unlikely(cpu
>= 0))
262 add_timer_on(timer
, cpu
);
269 EXPORT_SYMBOL_GPL(queue_delayed_work_on
);
271 DEFINE_TRACE(workqueue_execution
);
273 static void run_workqueue(struct cpu_workqueue_struct
*cwq
)
275 spin_lock_irq(&cwq
->lock
);
277 if (cwq
->run_depth
> 3) {
278 /* morton gets to eat his hat */
279 printk("%s: recursion depth exceeded: %d\n",
280 __func__
, cwq
->run_depth
);
283 while (!list_empty(&cwq
->worklist
)) {
284 struct work_struct
*work
= list_entry(cwq
->worklist
.next
,
285 struct work_struct
, entry
);
286 work_func_t f
= work
->func
;
287 #ifdef CONFIG_LOCKDEP
289 * It is permissible to free the struct work_struct
290 * from inside the function that is called from it,
291 * this we need to take into account for lockdep too.
292 * To avoid bogus "held lock freed" warnings as well
293 * as problems when looking into work->lockdep_map,
294 * make a copy and use that here.
296 struct lockdep_map lockdep_map
= work
->lockdep_map
;
298 trace_workqueue_execution(cwq
->thread
, work
);
299 cwq
->current_work
= work
;
300 list_del_init(cwq
->worklist
.next
);
301 spin_unlock_irq(&cwq
->lock
);
303 BUG_ON(get_wq_data(work
) != cwq
);
304 work_clear_pending(work
);
305 lock_map_acquire(&cwq
->wq
->lockdep_map
);
306 lock_map_acquire(&lockdep_map
);
308 lock_map_release(&lockdep_map
);
309 lock_map_release(&cwq
->wq
->lockdep_map
);
311 if (unlikely(in_atomic() || lockdep_depth(current
) > 0)) {
312 printk(KERN_ERR
"BUG: workqueue leaked lock or atomic: "
314 current
->comm
, preempt_count(),
315 task_pid_nr(current
));
316 printk(KERN_ERR
" last function: ");
317 print_symbol("%s\n", (unsigned long)f
);
318 debug_show_held_locks(current
);
322 spin_lock_irq(&cwq
->lock
);
323 cwq
->current_work
= NULL
;
326 spin_unlock_irq(&cwq
->lock
);
329 static int worker_thread(void *__cwq
)
331 struct cpu_workqueue_struct
*cwq
= __cwq
;
334 if (cwq
->wq
->freezeable
)
337 set_user_nice(current
, -5);
340 prepare_to_wait(&cwq
->more_work
, &wait
, TASK_INTERRUPTIBLE
);
341 if (!freezing(current
) &&
342 !kthread_should_stop() &&
343 list_empty(&cwq
->worklist
))
345 finish_wait(&cwq
->more_work
, &wait
);
349 if (kthread_should_stop())
359 struct work_struct work
;
360 struct completion done
;
363 static void wq_barrier_func(struct work_struct
*work
)
365 struct wq_barrier
*barr
= container_of(work
, struct wq_barrier
, work
);
366 complete(&barr
->done
);
369 static void insert_wq_barrier(struct cpu_workqueue_struct
*cwq
,
370 struct wq_barrier
*barr
, struct list_head
*head
)
372 INIT_WORK(&barr
->work
, wq_barrier_func
);
373 __set_bit(WORK_STRUCT_PENDING
, work_data_bits(&barr
->work
));
375 init_completion(&barr
->done
);
377 insert_work(cwq
, &barr
->work
, head
);
380 static int flush_cpu_workqueue(struct cpu_workqueue_struct
*cwq
)
384 if (cwq
->thread
== current
) {
386 * Probably keventd trying to flush its own queue. So simply run
387 * it by hand rather than deadlocking.
392 struct wq_barrier barr
;
395 spin_lock_irq(&cwq
->lock
);
396 if (!list_empty(&cwq
->worklist
) || cwq
->current_work
!= NULL
) {
397 insert_wq_barrier(cwq
, &barr
, &cwq
->worklist
);
400 spin_unlock_irq(&cwq
->lock
);
403 wait_for_completion(&barr
.done
);
410 * flush_workqueue - ensure that any scheduled work has run to completion.
411 * @wq: workqueue to flush
413 * Forces execution of the workqueue and blocks until its completion.
414 * This is typically used in driver shutdown handlers.
416 * We sleep until all works which were queued on entry have been handled,
417 * but we are not livelocked by new incoming ones.
419 * This function used to run the workqueues itself. Now we just wait for the
420 * helper threads to do it.
422 void flush_workqueue(struct workqueue_struct
*wq
)
424 const struct cpumask
*cpu_map
= wq_cpu_map(wq
);
428 lock_map_acquire(&wq
->lockdep_map
);
429 lock_map_release(&wq
->lockdep_map
);
430 for_each_cpu_mask_nr(cpu
, *cpu_map
)
431 flush_cpu_workqueue(per_cpu_ptr(wq
->cpu_wq
, cpu
));
433 EXPORT_SYMBOL_GPL(flush_workqueue
);
436 * flush_work - block until a work_struct's callback has terminated
437 * @work: the work which is to be flushed
439 * Returns false if @work has already terminated.
441 * It is expected that, prior to calling flush_work(), the caller has
442 * arranged for the work to not be requeued, otherwise it doesn't make
443 * sense to use this function.
445 int flush_work(struct work_struct
*work
)
447 struct cpu_workqueue_struct
*cwq
;
448 struct list_head
*prev
;
449 struct wq_barrier barr
;
452 cwq
= get_wq_data(work
);
456 lock_map_acquire(&cwq
->wq
->lockdep_map
);
457 lock_map_release(&cwq
->wq
->lockdep_map
);
460 spin_lock_irq(&cwq
->lock
);
461 if (!list_empty(&work
->entry
)) {
463 * See the comment near try_to_grab_pending()->smp_rmb().
464 * If it was re-queued under us we are not going to wait.
467 if (unlikely(cwq
!= get_wq_data(work
)))
471 if (cwq
->current_work
!= work
)
473 prev
= &cwq
->worklist
;
475 insert_wq_barrier(cwq
, &barr
, prev
->next
);
477 spin_unlock_irq(&cwq
->lock
);
481 wait_for_completion(&barr
.done
);
484 EXPORT_SYMBOL_GPL(flush_work
);
487 * Upon a successful return (>= 0), the caller "owns" WORK_STRUCT_PENDING bit,
488 * so this work can't be re-armed in any way.
490 static int try_to_grab_pending(struct work_struct
*work
)
492 struct cpu_workqueue_struct
*cwq
;
495 if (!test_and_set_bit(WORK_STRUCT_PENDING
, work_data_bits(work
)))
499 * The queueing is in progress, or it is already queued. Try to
500 * steal it from ->worklist without clearing WORK_STRUCT_PENDING.
503 cwq
= get_wq_data(work
);
507 spin_lock_irq(&cwq
->lock
);
508 if (!list_empty(&work
->entry
)) {
510 * This work is queued, but perhaps we locked the wrong cwq.
511 * In that case we must see the new value after rmb(), see
512 * insert_work()->wmb().
515 if (cwq
== get_wq_data(work
)) {
516 list_del_init(&work
->entry
);
520 spin_unlock_irq(&cwq
->lock
);
525 static void wait_on_cpu_work(struct cpu_workqueue_struct
*cwq
,
526 struct work_struct
*work
)
528 struct wq_barrier barr
;
531 spin_lock_irq(&cwq
->lock
);
532 if (unlikely(cwq
->current_work
== work
)) {
533 insert_wq_barrier(cwq
, &barr
, cwq
->worklist
.next
);
536 spin_unlock_irq(&cwq
->lock
);
538 if (unlikely(running
))
539 wait_for_completion(&barr
.done
);
542 static void wait_on_work(struct work_struct
*work
)
544 struct cpu_workqueue_struct
*cwq
;
545 struct workqueue_struct
*wq
;
546 const struct cpumask
*cpu_map
;
551 lock_map_acquire(&work
->lockdep_map
);
552 lock_map_release(&work
->lockdep_map
);
554 cwq
= get_wq_data(work
);
559 cpu_map
= wq_cpu_map(wq
);
561 for_each_cpu_mask_nr(cpu
, *cpu_map
)
562 wait_on_cpu_work(per_cpu_ptr(wq
->cpu_wq
, cpu
), work
);
565 static int __cancel_work_timer(struct work_struct
*work
,
566 struct timer_list
* timer
)
571 ret
= (timer
&& likely(del_timer(timer
)));
573 ret
= try_to_grab_pending(work
);
575 } while (unlikely(ret
< 0));
577 work_clear_pending(work
);
582 * cancel_work_sync - block until a work_struct's callback has terminated
583 * @work: the work which is to be flushed
585 * Returns true if @work was pending.
587 * cancel_work_sync() will cancel the work if it is queued. If the work's
588 * callback appears to be running, cancel_work_sync() will block until it
591 * It is possible to use this function if the work re-queues itself. It can
592 * cancel the work even if it migrates to another workqueue, however in that
593 * case it only guarantees that work->func() has completed on the last queued
596 * cancel_work_sync(&delayed_work->work) should be used only if ->timer is not
597 * pending, otherwise it goes into a busy-wait loop until the timer expires.
599 * The caller must ensure that workqueue_struct on which this work was last
600 * queued can't be destroyed before this function returns.
602 int cancel_work_sync(struct work_struct
*work
)
604 return __cancel_work_timer(work
, NULL
);
606 EXPORT_SYMBOL_GPL(cancel_work_sync
);
609 * cancel_delayed_work_sync - reliably kill off a delayed work.
610 * @dwork: the delayed work struct
612 * Returns true if @dwork was pending.
614 * It is possible to use this function if @dwork rearms itself via queue_work()
615 * or queue_delayed_work(). See also the comment for cancel_work_sync().
617 int cancel_delayed_work_sync(struct delayed_work
*dwork
)
619 return __cancel_work_timer(&dwork
->work
, &dwork
->timer
);
621 EXPORT_SYMBOL(cancel_delayed_work_sync
);
623 static struct workqueue_struct
*keventd_wq __read_mostly
;
626 * schedule_work - put work task in global workqueue
627 * @work: job to be done
629 * This puts a job in the kernel-global workqueue.
631 int schedule_work(struct work_struct
*work
)
633 return queue_work(keventd_wq
, work
);
635 EXPORT_SYMBOL(schedule_work
);
638 * schedule_work_on - put work task on a specific cpu
639 * @cpu: cpu to put the work task on
640 * @work: job to be done
642 * This puts a job on a specific cpu
644 int schedule_work_on(int cpu
, struct work_struct
*work
)
646 return queue_work_on(cpu
, keventd_wq
, work
);
648 EXPORT_SYMBOL(schedule_work_on
);
651 * schedule_delayed_work - put work task in global workqueue after delay
652 * @dwork: job to be done
653 * @delay: number of jiffies to wait or 0 for immediate execution
655 * After waiting for a given time this puts a job in the kernel-global
658 int schedule_delayed_work(struct delayed_work
*dwork
,
661 return queue_delayed_work(keventd_wq
, dwork
, delay
);
663 EXPORT_SYMBOL(schedule_delayed_work
);
666 * schedule_delayed_work_on - queue work in global workqueue on CPU after delay
668 * @dwork: job to be done
669 * @delay: number of jiffies to wait
671 * After waiting for a given time this puts a job in the kernel-global
672 * workqueue on the specified CPU.
674 int schedule_delayed_work_on(int cpu
,
675 struct delayed_work
*dwork
, unsigned long delay
)
677 return queue_delayed_work_on(cpu
, keventd_wq
, dwork
, delay
);
679 EXPORT_SYMBOL(schedule_delayed_work_on
);
682 * schedule_on_each_cpu - call a function on each online CPU from keventd
683 * @func: the function to call
685 * Returns zero on success.
686 * Returns -ve errno on failure.
688 * schedule_on_each_cpu() is very slow.
690 int schedule_on_each_cpu(work_func_t func
)
693 struct work_struct
*works
;
695 works
= alloc_percpu(struct work_struct
);
700 for_each_online_cpu(cpu
) {
701 struct work_struct
*work
= per_cpu_ptr(works
, cpu
);
703 INIT_WORK(work
, func
);
704 schedule_work_on(cpu
, work
);
706 for_each_online_cpu(cpu
)
707 flush_work(per_cpu_ptr(works
, cpu
));
713 void flush_scheduled_work(void)
715 flush_workqueue(keventd_wq
);
717 EXPORT_SYMBOL(flush_scheduled_work
);
720 * execute_in_process_context - reliably execute the routine with user context
721 * @fn: the function to execute
722 * @ew: guaranteed storage for the execute work structure (must
723 * be available when the work executes)
725 * Executes the function immediately if process context is available,
726 * otherwise schedules the function for delayed execution.
728 * Returns: 0 - function was executed
729 * 1 - function was scheduled for execution
731 int execute_in_process_context(work_func_t fn
, struct execute_work
*ew
)
733 if (!in_interrupt()) {
738 INIT_WORK(&ew
->work
, fn
);
739 schedule_work(&ew
->work
);
743 EXPORT_SYMBOL_GPL(execute_in_process_context
);
747 return keventd_wq
!= NULL
;
750 int current_is_keventd(void)
752 struct cpu_workqueue_struct
*cwq
;
753 int cpu
= raw_smp_processor_id(); /* preempt-safe: keventd is per-cpu */
758 cwq
= per_cpu_ptr(keventd_wq
->cpu_wq
, cpu
);
759 if (current
== cwq
->thread
)
766 static struct cpu_workqueue_struct
*
767 init_cpu_workqueue(struct workqueue_struct
*wq
, int cpu
)
769 struct cpu_workqueue_struct
*cwq
= per_cpu_ptr(wq
->cpu_wq
, cpu
);
772 spin_lock_init(&cwq
->lock
);
773 INIT_LIST_HEAD(&cwq
->worklist
);
774 init_waitqueue_head(&cwq
->more_work
);
779 DEFINE_TRACE(workqueue_creation
);
781 static int create_workqueue_thread(struct cpu_workqueue_struct
*cwq
, int cpu
)
783 struct sched_param param
= { .sched_priority
= MAX_RT_PRIO
-1 };
784 struct workqueue_struct
*wq
= cwq
->wq
;
785 const char *fmt
= is_wq_single_threaded(wq
) ? "%s" : "%s/%d";
786 struct task_struct
*p
;
788 p
= kthread_create(worker_thread
, cwq
, fmt
, wq
->name
, cpu
);
790 * Nobody can add the work_struct to this cwq,
791 * if (caller is __create_workqueue)
792 * nobody should see this wq
793 * else // caller is CPU_UP_PREPARE
794 * cpu is not on cpu_online_map
795 * so we can abort safely.
800 sched_setscheduler_nocheck(p
, SCHED_FIFO
, ¶m
);
803 trace_workqueue_creation(cwq
->thread
, cpu
);
808 static void start_workqueue_thread(struct cpu_workqueue_struct
*cwq
, int cpu
)
810 struct task_struct
*p
= cwq
->thread
;
814 kthread_bind(p
, cpu
);
819 struct workqueue_struct
*__create_workqueue_key(const char *name
,
823 struct lock_class_key
*key
,
824 const char *lock_name
)
826 struct workqueue_struct
*wq
;
827 struct cpu_workqueue_struct
*cwq
;
830 wq
= kzalloc(sizeof(*wq
), GFP_KERNEL
);
834 wq
->cpu_wq
= alloc_percpu(struct cpu_workqueue_struct
);
841 lockdep_init_map(&wq
->lockdep_map
, lock_name
, key
, 0);
842 wq
->singlethread
= singlethread
;
843 wq
->freezeable
= freezeable
;
845 INIT_LIST_HEAD(&wq
->list
);
848 cwq
= init_cpu_workqueue(wq
, singlethread_cpu
);
849 err
= create_workqueue_thread(cwq
, singlethread_cpu
);
850 start_workqueue_thread(cwq
, -1);
852 cpu_maps_update_begin();
854 * We must place this wq on list even if the code below fails.
855 * cpu_down(cpu) can remove cpu from cpu_populated_map before
856 * destroy_workqueue() takes the lock, in that case we leak
859 spin_lock(&workqueue_lock
);
860 list_add(&wq
->list
, &workqueues
);
861 spin_unlock(&workqueue_lock
);
863 * We must initialize cwqs for each possible cpu even if we
864 * are going to call destroy_workqueue() finally. Otherwise
865 * cpu_up() can hit the uninitialized cwq once we drop the
868 for_each_possible_cpu(cpu
) {
869 cwq
= init_cpu_workqueue(wq
, cpu
);
870 if (err
|| !cpu_online(cpu
))
872 err
= create_workqueue_thread(cwq
, cpu
);
873 start_workqueue_thread(cwq
, cpu
);
875 cpu_maps_update_done();
879 destroy_workqueue(wq
);
884 EXPORT_SYMBOL_GPL(__create_workqueue_key
);
886 DEFINE_TRACE(workqueue_destruction
);
888 static void cleanup_workqueue_thread(struct cpu_workqueue_struct
*cwq
)
891 * Our caller is either destroy_workqueue() or CPU_POST_DEAD,
892 * cpu_add_remove_lock protects cwq->thread.
894 if (cwq
->thread
== NULL
)
897 lock_map_acquire(&cwq
->wq
->lockdep_map
);
898 lock_map_release(&cwq
->wq
->lockdep_map
);
900 flush_cpu_workqueue(cwq
);
902 * If the caller is CPU_POST_DEAD and cwq->worklist was not empty,
903 * a concurrent flush_workqueue() can insert a barrier after us.
904 * However, in that case run_workqueue() won't return and check
905 * kthread_should_stop() until it flushes all work_struct's.
906 * When ->worklist becomes empty it is safe to exit because no
907 * more work_structs can be queued on this cwq: flush_workqueue
908 * checks list_empty(), and a "normal" queue_work() can't use
911 trace_workqueue_destruction(cwq
->thread
);
912 kthread_stop(cwq
->thread
);
916 void set_workqueue_thread_prio(struct workqueue_struct
*wq
, int cpu
,
917 int policy
, int rt_priority
, int nice
)
919 struct sched_param param
= { .sched_priority
= rt_priority
};
920 struct cpu_workqueue_struct
*cwq
;
921 mm_segment_t oldfs
= get_fs();
922 struct task_struct
*p
;
926 cwq
= per_cpu_ptr(wq
->cpu_wq
, cpu
);
927 spin_lock_irqsave(&cwq
->lock
, flags
);
929 spin_unlock_irqrestore(&cwq
->lock
, flags
);
931 set_user_nice(p
, nice
);
934 ret
= sys_sched_setscheduler(p
->pid
, policy
, ¶m
);
940 void set_workqueue_prio(struct workqueue_struct
*wq
, int policy
,
941 int rt_priority
, int nice
)
945 /* We don't need the distraction of CPUs appearing and vanishing. */
947 spin_lock(&workqueue_lock
);
948 if (is_wq_single_threaded(wq
))
949 set_workqueue_thread_prio(wq
, 0, policy
, rt_priority
, nice
);
951 for_each_online_cpu(cpu
)
952 set_workqueue_thread_prio(wq
, cpu
, policy
,
955 spin_unlock(&workqueue_lock
);
960 * destroy_workqueue - safely terminate a workqueue
961 * @wq: target workqueue
963 * Safely destroy a workqueue. All work currently pending will be done first.
965 void destroy_workqueue(struct workqueue_struct
*wq
)
967 const struct cpumask
*cpu_map
= wq_cpu_map(wq
);
970 cpu_maps_update_begin();
971 spin_lock(&workqueue_lock
);
973 spin_unlock(&workqueue_lock
);
975 for_each_cpu_mask_nr(cpu
, *cpu_map
)
976 cleanup_workqueue_thread(per_cpu_ptr(wq
->cpu_wq
, cpu
));
977 cpu_maps_update_done();
979 free_percpu(wq
->cpu_wq
);
982 EXPORT_SYMBOL_GPL(destroy_workqueue
);
984 static int __devinit
workqueue_cpu_callback(struct notifier_block
*nfb
,
985 unsigned long action
,
988 unsigned int cpu
= (unsigned long)hcpu
;
989 struct cpu_workqueue_struct
*cwq
;
990 struct workqueue_struct
*wq
;
993 action
&= ~CPU_TASKS_FROZEN
;
997 cpumask_set_cpu(cpu
, cpu_populated_map
);
1000 list_for_each_entry(wq
, &workqueues
, list
) {
1001 cwq
= per_cpu_ptr(wq
->cpu_wq
, cpu
);
1004 case CPU_UP_PREPARE
:
1005 if (!create_workqueue_thread(cwq
, cpu
))
1007 printk(KERN_ERR
"workqueue [%s] for %i failed\n",
1009 action
= CPU_UP_CANCELED
;
1014 start_workqueue_thread(cwq
, cpu
);
1017 case CPU_UP_CANCELED
:
1018 start_workqueue_thread(cwq
, -1);
1020 cleanup_workqueue_thread(cwq
);
1026 case CPU_UP_CANCELED
:
1028 cpumask_clear_cpu(cpu
, cpu_populated_map
);
1035 static struct workqueue_struct
*work_on_cpu_wq __read_mostly
;
1037 struct work_for_cpu
{
1038 struct work_struct work
;
1044 static void do_work_for_cpu(struct work_struct
*w
)
1046 struct work_for_cpu
*wfc
= container_of(w
, struct work_for_cpu
, work
);
1048 wfc
->ret
= wfc
->fn(wfc
->arg
);
1052 * work_on_cpu - run a function in user context on a particular cpu
1053 * @cpu: the cpu to run on
1054 * @fn: the function to run
1055 * @arg: the function arg
1057 * This will return the value @fn returns.
1058 * It is up to the caller to ensure that the cpu doesn't go offline.
1060 long work_on_cpu(unsigned int cpu
, long (*fn
)(void *), void *arg
)
1062 struct work_for_cpu wfc
;
1064 INIT_WORK(&wfc
.work
, do_work_for_cpu
);
1067 queue_work_on(cpu
, work_on_cpu_wq
, &wfc
.work
);
1068 flush_work(&wfc
.work
);
1072 EXPORT_SYMBOL_GPL(work_on_cpu
);
1073 #endif /* CONFIG_SMP */
1075 void __init
init_workqueues(void)
1077 alloc_cpumask_var(&cpu_populated_map
, GFP_KERNEL
);
1079 cpumask_copy(cpu_populated_map
, cpu_online_mask
);
1080 singlethread_cpu
= cpumask_first(cpu_possible_mask
);
1081 cpu_singlethread_map
= cpumask_of(singlethread_cpu
);
1082 hotcpu_notifier(workqueue_cpu_callback
, 0);
1083 keventd_wq
= create_workqueue("events");
1084 BUG_ON(!keventd_wq
);
1085 set_workqueue_prio(keventd_wq
, SCHED_FIFO
, 1, -20);
1087 work_on_cpu_wq
= create_workqueue("work_on_cpu");
1088 BUG_ON(!work_on_cpu_wq
);