Linux 3.4.74
[linux/fpc-iii.git] / kernel / workqueue.c
blob575d092fa746ef04ab7d4107abd336c2441a3947
1 /*
2 * kernel/workqueue.c - generic async execution with shared worker pool
4 * Copyright (C) 2002 Ingo Molnar
6 * Derived from the taskqueue/keventd code by:
7 * David Woodhouse <dwmw2@infradead.org>
8 * Andrew Morton
9 * Kai Petzke <wpp@marie.physik.tu-berlin.de>
10 * Theodore Ts'o <tytso@mit.edu>
12 * Made to use alloc_percpu by Christoph Lameter.
14 * Copyright (C) 2010 SUSE Linux Products GmbH
15 * Copyright (C) 2010 Tejun Heo <tj@kernel.org>
17 * This is the generic async execution mechanism. Work items as are
18 * executed in process context. The worker pool is shared and
19 * automatically managed. There is one worker pool for each CPU and
20 * one extra for works which are better served by workers which are
21 * not bound to any specific CPU.
23 * Please read Documentation/workqueue.txt for details.
26 #include <linux/export.h>
27 #include <linux/kernel.h>
28 #include <linux/sched.h>
29 #include <linux/init.h>
30 #include <linux/signal.h>
31 #include <linux/completion.h>
32 #include <linux/workqueue.h>
33 #include <linux/slab.h>
34 #include <linux/cpu.h>
35 #include <linux/notifier.h>
36 #include <linux/kthread.h>
37 #include <linux/hardirq.h>
38 #include <linux/mempolicy.h>
39 #include <linux/freezer.h>
40 #include <linux/kallsyms.h>
41 #include <linux/debug_locks.h>
42 #include <linux/lockdep.h>
43 #include <linux/idr.h>
45 #include "workqueue_sched.h"
47 enum {
48 /* global_cwq flags */
49 GCWQ_MANAGE_WORKERS = 1 << 0, /* need to manage workers */
50 GCWQ_MANAGING_WORKERS = 1 << 1, /* managing workers */
51 GCWQ_DISASSOCIATED = 1 << 2, /* cpu can't serve workers */
52 GCWQ_FREEZING = 1 << 3, /* freeze in progress */
53 GCWQ_HIGHPRI_PENDING = 1 << 4, /* highpri works on queue */
55 /* worker flags */
56 WORKER_STARTED = 1 << 0, /* started */
57 WORKER_DIE = 1 << 1, /* die die die */
58 WORKER_IDLE = 1 << 2, /* is idle */
59 WORKER_PREP = 1 << 3, /* preparing to run works */
60 WORKER_ROGUE = 1 << 4, /* not bound to any cpu */
61 WORKER_REBIND = 1 << 5, /* mom is home, come back */
62 WORKER_CPU_INTENSIVE = 1 << 6, /* cpu intensive */
63 WORKER_UNBOUND = 1 << 7, /* worker is unbound */
65 WORKER_NOT_RUNNING = WORKER_PREP | WORKER_ROGUE | WORKER_REBIND |
66 WORKER_CPU_INTENSIVE | WORKER_UNBOUND,
68 /* gcwq->trustee_state */
69 TRUSTEE_START = 0, /* start */
70 TRUSTEE_IN_CHARGE = 1, /* trustee in charge of gcwq */
71 TRUSTEE_BUTCHER = 2, /* butcher workers */
72 TRUSTEE_RELEASE = 3, /* release workers */
73 TRUSTEE_DONE = 4, /* trustee is done */
75 BUSY_WORKER_HASH_ORDER = 6, /* 64 pointers */
76 BUSY_WORKER_HASH_SIZE = 1 << BUSY_WORKER_HASH_ORDER,
77 BUSY_WORKER_HASH_MASK = BUSY_WORKER_HASH_SIZE - 1,
79 MAX_IDLE_WORKERS_RATIO = 4, /* 1/4 of busy can be idle */
80 IDLE_WORKER_TIMEOUT = 300 * HZ, /* keep idle ones for 5 mins */
82 MAYDAY_INITIAL_TIMEOUT = HZ / 100 >= 2 ? HZ / 100 : 2,
83 /* call for help after 10ms
84 (min two ticks) */
85 MAYDAY_INTERVAL = HZ / 10, /* and then every 100ms */
86 CREATE_COOLDOWN = HZ, /* time to breath after fail */
87 TRUSTEE_COOLDOWN = HZ / 10, /* for trustee draining */
90 * Rescue workers are used only on emergencies and shared by
91 * all cpus. Give -20.
93 RESCUER_NICE_LEVEL = -20,
97 * Structure fields follow one of the following exclusion rules.
99 * I: Modifiable by initialization/destruction paths and read-only for
100 * everyone else.
102 * P: Preemption protected. Disabling preemption is enough and should
103 * only be modified and accessed from the local cpu.
105 * L: gcwq->lock protected. Access with gcwq->lock held.
107 * X: During normal operation, modification requires gcwq->lock and
108 * should be done only from local cpu. Either disabling preemption
109 * on local cpu or grabbing gcwq->lock is enough for read access.
110 * If GCWQ_DISASSOCIATED is set, it's identical to L.
112 * F: wq->flush_mutex protected.
114 * W: workqueue_lock protected.
117 struct global_cwq;
120 * The poor guys doing the actual heavy lifting. All on-duty workers
121 * are either serving the manager role, on idle list or on busy hash.
123 struct worker {
124 /* on idle list while idle, on busy hash table while busy */
125 union {
126 struct list_head entry; /* L: while idle */
127 struct hlist_node hentry; /* L: while busy */
130 struct work_struct *current_work; /* L: work being processed */
131 work_func_t current_func; /* L: current_work's fn */
132 struct cpu_workqueue_struct *current_cwq; /* L: current_work's cwq */
133 struct list_head scheduled; /* L: scheduled works */
134 struct task_struct *task; /* I: worker task */
135 struct global_cwq *gcwq; /* I: the associated gcwq */
136 /* 64 bytes boundary on 64bit, 32 on 32bit */
137 unsigned long last_active; /* L: last active timestamp */
138 unsigned int flags; /* X: flags */
139 int id; /* I: worker id */
140 struct work_struct rebind_work; /* L: rebind worker to cpu */
144 * Global per-cpu workqueue. There's one and only one for each cpu
145 * and all works are queued and processed here regardless of their
146 * target workqueues.
148 struct global_cwq {
149 spinlock_t lock; /* the gcwq lock */
150 struct list_head worklist; /* L: list of pending works */
151 unsigned int cpu; /* I: the associated cpu */
152 unsigned int flags; /* L: GCWQ_* flags */
154 int nr_workers; /* L: total number of workers */
155 int nr_idle; /* L: currently idle ones */
157 /* workers are chained either in the idle_list or busy_hash */
158 struct list_head idle_list; /* X: list of idle workers */
159 struct hlist_head busy_hash[BUSY_WORKER_HASH_SIZE];
160 /* L: hash of busy workers */
162 struct timer_list idle_timer; /* L: worker idle timeout */
163 struct timer_list mayday_timer; /* L: SOS timer for dworkers */
165 struct ida worker_ida; /* L: for worker IDs */
167 struct task_struct *trustee; /* L: for gcwq shutdown */
168 unsigned int trustee_state; /* L: trustee state */
169 wait_queue_head_t trustee_wait; /* trustee wait */
170 struct worker *first_idle; /* L: first idle worker */
171 } ____cacheline_aligned_in_smp;
174 * The per-CPU workqueue. The lower WORK_STRUCT_FLAG_BITS of
175 * work_struct->data are used for flags and thus cwqs need to be
176 * aligned at two's power of the number of flag bits.
178 struct cpu_workqueue_struct {
179 struct global_cwq *gcwq; /* I: the associated gcwq */
180 struct workqueue_struct *wq; /* I: the owning workqueue */
181 int work_color; /* L: current color */
182 int flush_color; /* L: flushing color */
183 int nr_in_flight[WORK_NR_COLORS];
184 /* L: nr of in_flight works */
185 int nr_active; /* L: nr of active works */
186 int max_active; /* L: max active works */
187 struct list_head delayed_works; /* L: delayed works */
191 * Structure used to wait for workqueue flush.
193 struct wq_flusher {
194 struct list_head list; /* F: list of flushers */
195 int flush_color; /* F: flush color waiting for */
196 struct completion done; /* flush completion */
200 * All cpumasks are assumed to be always set on UP and thus can't be
201 * used to determine whether there's something to be done.
203 #ifdef CONFIG_SMP
204 typedef cpumask_var_t mayday_mask_t;
205 #define mayday_test_and_set_cpu(cpu, mask) \
206 cpumask_test_and_set_cpu((cpu), (mask))
207 #define mayday_clear_cpu(cpu, mask) cpumask_clear_cpu((cpu), (mask))
208 #define for_each_mayday_cpu(cpu, mask) for_each_cpu((cpu), (mask))
209 #define alloc_mayday_mask(maskp, gfp) zalloc_cpumask_var((maskp), (gfp))
210 #define free_mayday_mask(mask) free_cpumask_var((mask))
211 #else
212 typedef unsigned long mayday_mask_t;
213 #define mayday_test_and_set_cpu(cpu, mask) test_and_set_bit(0, &(mask))
214 #define mayday_clear_cpu(cpu, mask) clear_bit(0, &(mask))
215 #define for_each_mayday_cpu(cpu, mask) if ((cpu) = 0, (mask))
216 #define alloc_mayday_mask(maskp, gfp) true
217 #define free_mayday_mask(mask) do { } while (0)
218 #endif
221 * The externally visible workqueue abstraction is an array of
222 * per-CPU workqueues:
224 struct workqueue_struct {
225 unsigned int flags; /* W: WQ_* flags */
226 union {
227 struct cpu_workqueue_struct __percpu *pcpu;
228 struct cpu_workqueue_struct *single;
229 unsigned long v;
230 } cpu_wq; /* I: cwq's */
231 struct list_head list; /* W: list of all workqueues */
233 struct mutex flush_mutex; /* protects wq flushing */
234 int work_color; /* F: current work color */
235 int flush_color; /* F: current flush color */
236 atomic_t nr_cwqs_to_flush; /* flush in progress */
237 struct wq_flusher *first_flusher; /* F: first flusher */
238 struct list_head flusher_queue; /* F: flush waiters */
239 struct list_head flusher_overflow; /* F: flush overflow list */
241 mayday_mask_t mayday_mask; /* cpus requesting rescue */
242 struct worker *rescuer; /* I: rescue worker */
244 int nr_drainers; /* W: drain in progress */
245 int saved_max_active; /* W: saved cwq max_active */
246 #ifdef CONFIG_LOCKDEP
247 struct lockdep_map lockdep_map;
248 #endif
249 char name[]; /* I: workqueue name */
252 struct workqueue_struct *system_wq __read_mostly;
253 struct workqueue_struct *system_long_wq __read_mostly;
254 struct workqueue_struct *system_nrt_wq __read_mostly;
255 struct workqueue_struct *system_unbound_wq __read_mostly;
256 struct workqueue_struct *system_freezable_wq __read_mostly;
257 struct workqueue_struct *system_nrt_freezable_wq __read_mostly;
258 EXPORT_SYMBOL_GPL(system_wq);
259 EXPORT_SYMBOL_GPL(system_long_wq);
260 EXPORT_SYMBOL_GPL(system_nrt_wq);
261 EXPORT_SYMBOL_GPL(system_unbound_wq);
262 EXPORT_SYMBOL_GPL(system_freezable_wq);
263 EXPORT_SYMBOL_GPL(system_nrt_freezable_wq);
265 #define CREATE_TRACE_POINTS
266 #include <trace/events/workqueue.h>
268 #define for_each_busy_worker(worker, i, pos, gcwq) \
269 for (i = 0; i < BUSY_WORKER_HASH_SIZE; i++) \
270 hlist_for_each_entry(worker, pos, &gcwq->busy_hash[i], hentry)
272 static inline int __next_gcwq_cpu(int cpu, const struct cpumask *mask,
273 unsigned int sw)
275 if (cpu < nr_cpu_ids) {
276 if (sw & 1) {
277 cpu = cpumask_next(cpu, mask);
278 if (cpu < nr_cpu_ids)
279 return cpu;
281 if (sw & 2)
282 return WORK_CPU_UNBOUND;
284 return WORK_CPU_NONE;
287 static inline int __next_wq_cpu(int cpu, const struct cpumask *mask,
288 struct workqueue_struct *wq)
290 return __next_gcwq_cpu(cpu, mask, !(wq->flags & WQ_UNBOUND) ? 1 : 2);
294 * CPU iterators
296 * An extra gcwq is defined for an invalid cpu number
297 * (WORK_CPU_UNBOUND) to host workqueues which are not bound to any
298 * specific CPU. The following iterators are similar to
299 * for_each_*_cpu() iterators but also considers the unbound gcwq.
301 * for_each_gcwq_cpu() : possible CPUs + WORK_CPU_UNBOUND
302 * for_each_online_gcwq_cpu() : online CPUs + WORK_CPU_UNBOUND
303 * for_each_cwq_cpu() : possible CPUs for bound workqueues,
304 * WORK_CPU_UNBOUND for unbound workqueues
306 #define for_each_gcwq_cpu(cpu) \
307 for ((cpu) = __next_gcwq_cpu(-1, cpu_possible_mask, 3); \
308 (cpu) < WORK_CPU_NONE; \
309 (cpu) = __next_gcwq_cpu((cpu), cpu_possible_mask, 3))
311 #define for_each_online_gcwq_cpu(cpu) \
312 for ((cpu) = __next_gcwq_cpu(-1, cpu_online_mask, 3); \
313 (cpu) < WORK_CPU_NONE; \
314 (cpu) = __next_gcwq_cpu((cpu), cpu_online_mask, 3))
316 #define for_each_cwq_cpu(cpu, wq) \
317 for ((cpu) = __next_wq_cpu(-1, cpu_possible_mask, (wq)); \
318 (cpu) < WORK_CPU_NONE; \
319 (cpu) = __next_wq_cpu((cpu), cpu_possible_mask, (wq)))
321 #ifdef CONFIG_DEBUG_OBJECTS_WORK
323 static struct debug_obj_descr work_debug_descr;
325 static void *work_debug_hint(void *addr)
327 return ((struct work_struct *) addr)->func;
331 * fixup_init is called when:
332 * - an active object is initialized
334 static int work_fixup_init(void *addr, enum debug_obj_state state)
336 struct work_struct *work = addr;
338 switch (state) {
339 case ODEBUG_STATE_ACTIVE:
340 cancel_work_sync(work);
341 debug_object_init(work, &work_debug_descr);
342 return 1;
343 default:
344 return 0;
349 * fixup_activate is called when:
350 * - an active object is activated
351 * - an unknown object is activated (might be a statically initialized object)
353 static int work_fixup_activate(void *addr, enum debug_obj_state state)
355 struct work_struct *work = addr;
357 switch (state) {
359 case ODEBUG_STATE_NOTAVAILABLE:
361 * This is not really a fixup. The work struct was
362 * statically initialized. We just make sure that it
363 * is tracked in the object tracker.
365 if (test_bit(WORK_STRUCT_STATIC_BIT, work_data_bits(work))) {
366 debug_object_init(work, &work_debug_descr);
367 debug_object_activate(work, &work_debug_descr);
368 return 0;
370 WARN_ON_ONCE(1);
371 return 0;
373 case ODEBUG_STATE_ACTIVE:
374 WARN_ON(1);
376 default:
377 return 0;
382 * fixup_free is called when:
383 * - an active object is freed
385 static int work_fixup_free(void *addr, enum debug_obj_state state)
387 struct work_struct *work = addr;
389 switch (state) {
390 case ODEBUG_STATE_ACTIVE:
391 cancel_work_sync(work);
392 debug_object_free(work, &work_debug_descr);
393 return 1;
394 default:
395 return 0;
399 static struct debug_obj_descr work_debug_descr = {
400 .name = "work_struct",
401 .debug_hint = work_debug_hint,
402 .fixup_init = work_fixup_init,
403 .fixup_activate = work_fixup_activate,
404 .fixup_free = work_fixup_free,
407 static inline void debug_work_activate(struct work_struct *work)
409 debug_object_activate(work, &work_debug_descr);
412 static inline void debug_work_deactivate(struct work_struct *work)
414 debug_object_deactivate(work, &work_debug_descr);
417 void __init_work(struct work_struct *work, int onstack)
419 if (onstack)
420 debug_object_init_on_stack(work, &work_debug_descr);
421 else
422 debug_object_init(work, &work_debug_descr);
424 EXPORT_SYMBOL_GPL(__init_work);
426 void destroy_work_on_stack(struct work_struct *work)
428 debug_object_free(work, &work_debug_descr);
430 EXPORT_SYMBOL_GPL(destroy_work_on_stack);
432 #else
433 static inline void debug_work_activate(struct work_struct *work) { }
434 static inline void debug_work_deactivate(struct work_struct *work) { }
435 #endif
437 /* Serializes the accesses to the list of workqueues. */
438 static DEFINE_SPINLOCK(workqueue_lock);
439 static LIST_HEAD(workqueues);
440 static bool workqueue_freezing; /* W: have wqs started freezing? */
443 * The almighty global cpu workqueues. nr_running is the only field
444 * which is expected to be used frequently by other cpus via
445 * try_to_wake_up(). Put it in a separate cacheline.
447 static DEFINE_PER_CPU(struct global_cwq, global_cwq);
448 static DEFINE_PER_CPU_SHARED_ALIGNED(atomic_t, gcwq_nr_running);
451 * Global cpu workqueue and nr_running counter for unbound gcwq. The
452 * gcwq is always online, has GCWQ_DISASSOCIATED set, and all its
453 * workers have WORKER_UNBOUND set.
455 static struct global_cwq unbound_global_cwq;
456 static atomic_t unbound_gcwq_nr_running = ATOMIC_INIT(0); /* always 0 */
458 static int worker_thread(void *__worker);
460 static struct global_cwq *get_gcwq(unsigned int cpu)
462 if (cpu != WORK_CPU_UNBOUND)
463 return &per_cpu(global_cwq, cpu);
464 else
465 return &unbound_global_cwq;
468 static atomic_t *get_gcwq_nr_running(unsigned int cpu)
470 if (cpu != WORK_CPU_UNBOUND)
471 return &per_cpu(gcwq_nr_running, cpu);
472 else
473 return &unbound_gcwq_nr_running;
476 static struct cpu_workqueue_struct *get_cwq(unsigned int cpu,
477 struct workqueue_struct *wq)
479 if (!(wq->flags & WQ_UNBOUND)) {
480 if (likely(cpu < nr_cpu_ids))
481 return per_cpu_ptr(wq->cpu_wq.pcpu, cpu);
482 } else if (likely(cpu == WORK_CPU_UNBOUND))
483 return wq->cpu_wq.single;
484 return NULL;
487 static unsigned int work_color_to_flags(int color)
489 return color << WORK_STRUCT_COLOR_SHIFT;
492 static int get_work_color(struct work_struct *work)
494 return (*work_data_bits(work) >> WORK_STRUCT_COLOR_SHIFT) &
495 ((1 << WORK_STRUCT_COLOR_BITS) - 1);
498 static int work_next_color(int color)
500 return (color + 1) % WORK_NR_COLORS;
504 * A work's data points to the cwq with WORK_STRUCT_CWQ set while the
505 * work is on queue. Once execution starts, WORK_STRUCT_CWQ is
506 * cleared and the work data contains the cpu number it was last on.
508 * set_work_{cwq|cpu}() and clear_work_data() can be used to set the
509 * cwq, cpu or clear work->data. These functions should only be
510 * called while the work is owned - ie. while the PENDING bit is set.
512 * get_work_[g]cwq() can be used to obtain the gcwq or cwq
513 * corresponding to a work. gcwq is available once the work has been
514 * queued anywhere after initialization. cwq is available only from
515 * queueing until execution starts.
517 static inline void set_work_data(struct work_struct *work, unsigned long data,
518 unsigned long flags)
520 BUG_ON(!work_pending(work));
521 atomic_long_set(&work->data, data | flags | work_static(work));
524 static void set_work_cwq(struct work_struct *work,
525 struct cpu_workqueue_struct *cwq,
526 unsigned long extra_flags)
528 set_work_data(work, (unsigned long)cwq,
529 WORK_STRUCT_PENDING | WORK_STRUCT_CWQ | extra_flags);
532 static void set_work_cpu(struct work_struct *work, unsigned int cpu)
534 set_work_data(work, cpu << WORK_STRUCT_FLAG_BITS, WORK_STRUCT_PENDING);
537 static void clear_work_data(struct work_struct *work)
539 set_work_data(work, WORK_STRUCT_NO_CPU, 0);
542 static struct cpu_workqueue_struct *get_work_cwq(struct work_struct *work)
544 unsigned long data = atomic_long_read(&work->data);
546 if (data & WORK_STRUCT_CWQ)
547 return (void *)(data & WORK_STRUCT_WQ_DATA_MASK);
548 else
549 return NULL;
552 static struct global_cwq *get_work_gcwq(struct work_struct *work)
554 unsigned long data = atomic_long_read(&work->data);
555 unsigned int cpu;
557 if (data & WORK_STRUCT_CWQ)
558 return ((struct cpu_workqueue_struct *)
559 (data & WORK_STRUCT_WQ_DATA_MASK))->gcwq;
561 cpu = data >> WORK_STRUCT_FLAG_BITS;
562 if (cpu == WORK_CPU_NONE)
563 return NULL;
565 BUG_ON(cpu >= nr_cpu_ids && cpu != WORK_CPU_UNBOUND);
566 return get_gcwq(cpu);
570 * Policy functions. These define the policies on how the global
571 * worker pool is managed. Unless noted otherwise, these functions
572 * assume that they're being called with gcwq->lock held.
575 static bool __need_more_worker(struct global_cwq *gcwq)
577 return !atomic_read(get_gcwq_nr_running(gcwq->cpu)) ||
578 gcwq->flags & GCWQ_HIGHPRI_PENDING;
582 * Need to wake up a worker? Called from anything but currently
583 * running workers.
585 static bool need_more_worker(struct global_cwq *gcwq)
587 return !list_empty(&gcwq->worklist) && __need_more_worker(gcwq);
590 /* Can I start working? Called from busy but !running workers. */
591 static bool may_start_working(struct global_cwq *gcwq)
593 return gcwq->nr_idle;
596 /* Do I need to keep working? Called from currently running workers. */
597 static bool keep_working(struct global_cwq *gcwq)
599 atomic_t *nr_running = get_gcwq_nr_running(gcwq->cpu);
601 return !list_empty(&gcwq->worklist) &&
602 (atomic_read(nr_running) <= 1 ||
603 gcwq->flags & GCWQ_HIGHPRI_PENDING);
606 /* Do we need a new worker? Called from manager. */
607 static bool need_to_create_worker(struct global_cwq *gcwq)
609 return need_more_worker(gcwq) && !may_start_working(gcwq);
612 /* Do I need to be the manager? */
613 static bool need_to_manage_workers(struct global_cwq *gcwq)
615 return need_to_create_worker(gcwq) || gcwq->flags & GCWQ_MANAGE_WORKERS;
618 /* Do we have too many workers and should some go away? */
619 static bool too_many_workers(struct global_cwq *gcwq)
621 bool managing = gcwq->flags & GCWQ_MANAGING_WORKERS;
622 int nr_idle = gcwq->nr_idle + managing; /* manager is considered idle */
623 int nr_busy = gcwq->nr_workers - nr_idle;
625 return nr_idle > 2 && (nr_idle - 2) * MAX_IDLE_WORKERS_RATIO >= nr_busy;
629 * Wake up functions.
632 /* Return the first worker. Safe with preemption disabled */
633 static struct worker *first_worker(struct global_cwq *gcwq)
635 if (unlikely(list_empty(&gcwq->idle_list)))
636 return NULL;
638 return list_first_entry(&gcwq->idle_list, struct worker, entry);
642 * wake_up_worker - wake up an idle worker
643 * @gcwq: gcwq to wake worker for
645 * Wake up the first idle worker of @gcwq.
647 * CONTEXT:
648 * spin_lock_irq(gcwq->lock).
650 static void wake_up_worker(struct global_cwq *gcwq)
652 struct worker *worker = first_worker(gcwq);
654 if (likely(worker))
655 wake_up_process(worker->task);
659 * wq_worker_waking_up - a worker is waking up
660 * @task: task waking up
661 * @cpu: CPU @task is waking up to
663 * This function is called during try_to_wake_up() when a worker is
664 * being awoken.
666 * CONTEXT:
667 * spin_lock_irq(rq->lock)
669 void wq_worker_waking_up(struct task_struct *task, unsigned int cpu)
671 struct worker *worker = kthread_data(task);
673 if (!(worker->flags & WORKER_NOT_RUNNING))
674 atomic_inc(get_gcwq_nr_running(cpu));
678 * wq_worker_sleeping - a worker is going to sleep
679 * @task: task going to sleep
680 * @cpu: CPU in question, must be the current CPU number
682 * This function is called during schedule() when a busy worker is
683 * going to sleep. Worker on the same cpu can be woken up by
684 * returning pointer to its task.
686 * CONTEXT:
687 * spin_lock_irq(rq->lock)
689 * RETURNS:
690 * Worker task on @cpu to wake up, %NULL if none.
692 struct task_struct *wq_worker_sleeping(struct task_struct *task,
693 unsigned int cpu)
695 struct worker *worker = kthread_data(task), *to_wakeup = NULL;
696 struct global_cwq *gcwq = get_gcwq(cpu);
697 atomic_t *nr_running = get_gcwq_nr_running(cpu);
699 if (worker->flags & WORKER_NOT_RUNNING)
700 return NULL;
702 /* this can only happen on the local cpu */
703 BUG_ON(cpu != raw_smp_processor_id());
706 * The counterpart of the following dec_and_test, implied mb,
707 * worklist not empty test sequence is in insert_work().
708 * Please read comment there.
710 * NOT_RUNNING is clear. This means that trustee is not in
711 * charge and we're running on the local cpu w/ rq lock held
712 * and preemption disabled, which in turn means that none else
713 * could be manipulating idle_list, so dereferencing idle_list
714 * without gcwq lock is safe.
716 if (atomic_dec_and_test(nr_running) && !list_empty(&gcwq->worklist))
717 to_wakeup = first_worker(gcwq);
718 return to_wakeup ? to_wakeup->task : NULL;
722 * worker_set_flags - set worker flags and adjust nr_running accordingly
723 * @worker: self
724 * @flags: flags to set
725 * @wakeup: wakeup an idle worker if necessary
727 * Set @flags in @worker->flags and adjust nr_running accordingly. If
728 * nr_running becomes zero and @wakeup is %true, an idle worker is
729 * woken up.
731 * CONTEXT:
732 * spin_lock_irq(gcwq->lock)
734 static inline void worker_set_flags(struct worker *worker, unsigned int flags,
735 bool wakeup)
737 struct global_cwq *gcwq = worker->gcwq;
739 WARN_ON_ONCE(worker->task != current);
742 * If transitioning into NOT_RUNNING, adjust nr_running and
743 * wake up an idle worker as necessary if requested by
744 * @wakeup.
746 if ((flags & WORKER_NOT_RUNNING) &&
747 !(worker->flags & WORKER_NOT_RUNNING)) {
748 atomic_t *nr_running = get_gcwq_nr_running(gcwq->cpu);
750 if (wakeup) {
751 if (atomic_dec_and_test(nr_running) &&
752 !list_empty(&gcwq->worklist))
753 wake_up_worker(gcwq);
754 } else
755 atomic_dec(nr_running);
758 worker->flags |= flags;
762 * worker_clr_flags - clear worker flags and adjust nr_running accordingly
763 * @worker: self
764 * @flags: flags to clear
766 * Clear @flags in @worker->flags and adjust nr_running accordingly.
768 * CONTEXT:
769 * spin_lock_irq(gcwq->lock)
771 static inline void worker_clr_flags(struct worker *worker, unsigned int flags)
773 struct global_cwq *gcwq = worker->gcwq;
774 unsigned int oflags = worker->flags;
776 WARN_ON_ONCE(worker->task != current);
778 worker->flags &= ~flags;
781 * If transitioning out of NOT_RUNNING, increment nr_running. Note
782 * that the nested NOT_RUNNING is not a noop. NOT_RUNNING is mask
783 * of multiple flags, not a single flag.
785 if ((flags & WORKER_NOT_RUNNING) && (oflags & WORKER_NOT_RUNNING))
786 if (!(worker->flags & WORKER_NOT_RUNNING))
787 atomic_inc(get_gcwq_nr_running(gcwq->cpu));
791 * busy_worker_head - return the busy hash head for a work
792 * @gcwq: gcwq of interest
793 * @work: work to be hashed
795 * Return hash head of @gcwq for @work.
797 * CONTEXT:
798 * spin_lock_irq(gcwq->lock).
800 * RETURNS:
801 * Pointer to the hash head.
803 static struct hlist_head *busy_worker_head(struct global_cwq *gcwq,
804 struct work_struct *work)
806 const int base_shift = ilog2(sizeof(struct work_struct));
807 unsigned long v = (unsigned long)work;
809 /* simple shift and fold hash, do we need something better? */
810 v >>= base_shift;
811 v += v >> BUSY_WORKER_HASH_ORDER;
812 v &= BUSY_WORKER_HASH_MASK;
814 return &gcwq->busy_hash[v];
818 * __find_worker_executing_work - find worker which is executing a work
819 * @gcwq: gcwq of interest
820 * @bwh: hash head as returned by busy_worker_head()
821 * @work: work to find worker for
823 * Find a worker which is executing @work on @gcwq. @bwh should be
824 * the hash head obtained by calling busy_worker_head() with the same
825 * work.
827 * CONTEXT:
828 * spin_lock_irq(gcwq->lock).
830 * RETURNS:
831 * Pointer to worker which is executing @work if found, NULL
832 * otherwise.
834 static struct worker *__find_worker_executing_work(struct global_cwq *gcwq,
835 struct hlist_head *bwh,
836 struct work_struct *work)
838 struct worker *worker;
839 struct hlist_node *tmp;
841 hlist_for_each_entry(worker, tmp, bwh, hentry)
842 if (worker->current_work == work &&
843 worker->current_func == work->func)
844 return worker;
845 return NULL;
849 * find_worker_executing_work - find worker which is executing a work
850 * @gcwq: gcwq of interest
851 * @work: work to find worker for
853 * Find a worker which is executing @work on @gcwq by searching
854 * @gcwq->busy_hash which is keyed by the address of @work. For a worker
855 * to match, its current execution should match the address of @work and
856 * its work function. This is to avoid unwanted dependency between
857 * unrelated work executions through a work item being recycled while still
858 * being executed.
860 * This is a bit tricky. A work item may be freed once its execution
861 * starts and nothing prevents the freed area from being recycled for
862 * another work item. If the same work item address ends up being reused
863 * before the original execution finishes, workqueue will identify the
864 * recycled work item as currently executing and make it wait until the
865 * current execution finishes, introducing an unwanted dependency.
867 * This function checks the work item address, work function and workqueue
868 * to avoid false positives. Note that this isn't complete as one may
869 * construct a work function which can introduce dependency onto itself
870 * through a recycled work item. Well, if somebody wants to shoot oneself
871 * in the foot that badly, there's only so much we can do, and if such
872 * deadlock actually occurs, it should be easy to locate the culprit work
873 * function.
875 * CONTEXT:
876 * spin_lock_irq(gcwq->lock).
878 * RETURNS:
879 * Pointer to worker which is executing @work if found, NULL
880 * otherwise.
882 static struct worker *find_worker_executing_work(struct global_cwq *gcwq,
883 struct work_struct *work)
885 return __find_worker_executing_work(gcwq, busy_worker_head(gcwq, work),
886 work);
890 * gcwq_determine_ins_pos - find insertion position
891 * @gcwq: gcwq of interest
892 * @cwq: cwq a work is being queued for
894 * A work for @cwq is about to be queued on @gcwq, determine insertion
895 * position for the work. If @cwq is for HIGHPRI wq, the work is
896 * queued at the head of the queue but in FIFO order with respect to
897 * other HIGHPRI works; otherwise, at the end of the queue. This
898 * function also sets GCWQ_HIGHPRI_PENDING flag to hint @gcwq that
899 * there are HIGHPRI works pending.
901 * CONTEXT:
902 * spin_lock_irq(gcwq->lock).
904 * RETURNS:
905 * Pointer to inserstion position.
907 static inline struct list_head *gcwq_determine_ins_pos(struct global_cwq *gcwq,
908 struct cpu_workqueue_struct *cwq)
910 struct work_struct *twork;
912 if (likely(!(cwq->wq->flags & WQ_HIGHPRI)))
913 return &gcwq->worklist;
915 list_for_each_entry(twork, &gcwq->worklist, entry) {
916 struct cpu_workqueue_struct *tcwq = get_work_cwq(twork);
918 if (!(tcwq->wq->flags & WQ_HIGHPRI))
919 break;
922 gcwq->flags |= GCWQ_HIGHPRI_PENDING;
923 return &twork->entry;
927 * insert_work - insert a work into gcwq
928 * @cwq: cwq @work belongs to
929 * @work: work to insert
930 * @head: insertion point
931 * @extra_flags: extra WORK_STRUCT_* flags to set
933 * Insert @work which belongs to @cwq into @gcwq after @head.
934 * @extra_flags is or'd to work_struct flags.
936 * CONTEXT:
937 * spin_lock_irq(gcwq->lock).
939 static void insert_work(struct cpu_workqueue_struct *cwq,
940 struct work_struct *work, struct list_head *head,
941 unsigned int extra_flags)
943 struct global_cwq *gcwq = cwq->gcwq;
945 /* we own @work, set data and link */
946 set_work_cwq(work, cwq, extra_flags);
949 * Ensure that we get the right work->data if we see the
950 * result of list_add() below, see try_to_grab_pending().
952 smp_wmb();
954 list_add_tail(&work->entry, head);
957 * Ensure either worker_sched_deactivated() sees the above
958 * list_add_tail() or we see zero nr_running to avoid workers
959 * lying around lazily while there are works to be processed.
961 smp_mb();
963 if (__need_more_worker(gcwq))
964 wake_up_worker(gcwq);
968 * Test whether @work is being queued from another work executing on the
969 * same workqueue. This is rather expensive and should only be used from
970 * cold paths.
972 static bool is_chained_work(struct workqueue_struct *wq)
974 unsigned long flags;
975 unsigned int cpu;
977 for_each_gcwq_cpu(cpu) {
978 struct global_cwq *gcwq = get_gcwq(cpu);
979 struct worker *worker;
980 struct hlist_node *pos;
981 int i;
983 spin_lock_irqsave(&gcwq->lock, flags);
984 for_each_busy_worker(worker, i, pos, gcwq) {
985 if (worker->task != current)
986 continue;
987 spin_unlock_irqrestore(&gcwq->lock, flags);
989 * I'm @worker, no locking necessary. See if @work
990 * is headed to the same workqueue.
992 return worker->current_cwq->wq == wq;
994 spin_unlock_irqrestore(&gcwq->lock, flags);
996 return false;
999 static void __queue_work(unsigned int cpu, struct workqueue_struct *wq,
1000 struct work_struct *work)
1002 struct global_cwq *gcwq;
1003 struct cpu_workqueue_struct *cwq;
1004 struct list_head *worklist;
1005 unsigned int work_flags;
1006 unsigned long flags;
1008 debug_work_activate(work);
1010 /* if dying, only works from the same workqueue are allowed */
1011 if (unlikely(wq->flags & WQ_DRAINING) &&
1012 WARN_ON_ONCE(!is_chained_work(wq)))
1013 return;
1015 /* determine gcwq to use */
1016 if (!(wq->flags & WQ_UNBOUND)) {
1017 struct global_cwq *last_gcwq;
1019 if (unlikely(cpu == WORK_CPU_UNBOUND))
1020 cpu = raw_smp_processor_id();
1023 * It's multi cpu. If @wq is non-reentrant and @work
1024 * was previously on a different cpu, it might still
1025 * be running there, in which case the work needs to
1026 * be queued on that cpu to guarantee non-reentrance.
1028 gcwq = get_gcwq(cpu);
1029 if (wq->flags & WQ_NON_REENTRANT &&
1030 (last_gcwq = get_work_gcwq(work)) && last_gcwq != gcwq) {
1031 struct worker *worker;
1033 spin_lock_irqsave(&last_gcwq->lock, flags);
1035 worker = find_worker_executing_work(last_gcwq, work);
1037 if (worker && worker->current_cwq->wq == wq)
1038 gcwq = last_gcwq;
1039 else {
1040 /* meh... not running there, queue here */
1041 spin_unlock_irqrestore(&last_gcwq->lock, flags);
1042 spin_lock_irqsave(&gcwq->lock, flags);
1044 } else
1045 spin_lock_irqsave(&gcwq->lock, flags);
1046 } else {
1047 gcwq = get_gcwq(WORK_CPU_UNBOUND);
1048 spin_lock_irqsave(&gcwq->lock, flags);
1051 /* gcwq determined, get cwq and queue */
1052 cwq = get_cwq(gcwq->cpu, wq);
1053 trace_workqueue_queue_work(cpu, cwq, work);
1055 BUG_ON(!list_empty(&work->entry));
1057 cwq->nr_in_flight[cwq->work_color]++;
1058 work_flags = work_color_to_flags(cwq->work_color);
1060 if (likely(cwq->nr_active < cwq->max_active)) {
1061 trace_workqueue_activate_work(work);
1062 cwq->nr_active++;
1063 worklist = gcwq_determine_ins_pos(gcwq, cwq);
1064 } else {
1065 work_flags |= WORK_STRUCT_DELAYED;
1066 worklist = &cwq->delayed_works;
1069 insert_work(cwq, work, worklist, work_flags);
1071 spin_unlock_irqrestore(&gcwq->lock, flags);
1075 * queue_work - queue work on a workqueue
1076 * @wq: workqueue to use
1077 * @work: work to queue
1079 * Returns 0 if @work was already on a queue, non-zero otherwise.
1081 * We queue the work to the CPU on which it was submitted, but if the CPU dies
1082 * it can be processed by another CPU.
1084 int queue_work(struct workqueue_struct *wq, struct work_struct *work)
1086 int ret;
1088 ret = queue_work_on(get_cpu(), wq, work);
1089 put_cpu();
1091 return ret;
1093 EXPORT_SYMBOL_GPL(queue_work);
1096 * queue_work_on - queue work on specific cpu
1097 * @cpu: CPU number to execute work on
1098 * @wq: workqueue to use
1099 * @work: work to queue
1101 * Returns 0 if @work was already on a queue, non-zero otherwise.
1103 * We queue the work to a specific CPU, the caller must ensure it
1104 * can't go away.
1107 queue_work_on(int cpu, struct workqueue_struct *wq, struct work_struct *work)
1109 int ret = 0;
1111 if (!test_and_set_bit(WORK_STRUCT_PENDING_BIT, work_data_bits(work))) {
1112 __queue_work(cpu, wq, work);
1113 ret = 1;
1115 return ret;
1117 EXPORT_SYMBOL_GPL(queue_work_on);
1119 static void delayed_work_timer_fn(unsigned long __data)
1121 struct delayed_work *dwork = (struct delayed_work *)__data;
1122 struct cpu_workqueue_struct *cwq = get_work_cwq(&dwork->work);
1124 __queue_work(smp_processor_id(), cwq->wq, &dwork->work);
1128 * queue_delayed_work - queue work on a workqueue after delay
1129 * @wq: workqueue to use
1130 * @dwork: delayable work to queue
1131 * @delay: number of jiffies to wait before queueing
1133 * Returns 0 if @work was already on a queue, non-zero otherwise.
1135 int queue_delayed_work(struct workqueue_struct *wq,
1136 struct delayed_work *dwork, unsigned long delay)
1138 if (delay == 0)
1139 return queue_work(wq, &dwork->work);
1141 return queue_delayed_work_on(-1, wq, dwork, delay);
1143 EXPORT_SYMBOL_GPL(queue_delayed_work);
1146 * queue_delayed_work_on - queue work on specific CPU after delay
1147 * @cpu: CPU number to execute work on
1148 * @wq: workqueue to use
1149 * @dwork: work to queue
1150 * @delay: number of jiffies to wait before queueing
1152 * Returns 0 if @work was already on a queue, non-zero otherwise.
1154 int queue_delayed_work_on(int cpu, struct workqueue_struct *wq,
1155 struct delayed_work *dwork, unsigned long delay)
1157 int ret = 0;
1158 struct timer_list *timer = &dwork->timer;
1159 struct work_struct *work = &dwork->work;
1161 if (!test_and_set_bit(WORK_STRUCT_PENDING_BIT, work_data_bits(work))) {
1162 unsigned int lcpu;
1164 WARN_ON_ONCE(timer_pending(timer));
1165 WARN_ON_ONCE(!list_empty(&work->entry));
1167 timer_stats_timer_set_start_info(&dwork->timer);
1170 * This stores cwq for the moment, for the timer_fn.
1171 * Note that the work's gcwq is preserved to allow
1172 * reentrance detection for delayed works.
1174 if (!(wq->flags & WQ_UNBOUND)) {
1175 struct global_cwq *gcwq = get_work_gcwq(work);
1177 if (gcwq && gcwq->cpu != WORK_CPU_UNBOUND)
1178 lcpu = gcwq->cpu;
1179 else
1180 lcpu = raw_smp_processor_id();
1181 } else
1182 lcpu = WORK_CPU_UNBOUND;
1184 set_work_cwq(work, get_cwq(lcpu, wq), 0);
1186 timer->expires = jiffies + delay;
1187 timer->data = (unsigned long)dwork;
1188 timer->function = delayed_work_timer_fn;
1190 if (unlikely(cpu >= 0))
1191 add_timer_on(timer, cpu);
1192 else
1193 add_timer(timer);
1194 ret = 1;
1196 return ret;
1198 EXPORT_SYMBOL_GPL(queue_delayed_work_on);
1201 * worker_enter_idle - enter idle state
1202 * @worker: worker which is entering idle state
1204 * @worker is entering idle state. Update stats and idle timer if
1205 * necessary.
1207 * LOCKING:
1208 * spin_lock_irq(gcwq->lock).
1210 static void worker_enter_idle(struct worker *worker)
1212 struct global_cwq *gcwq = worker->gcwq;
1214 BUG_ON(worker->flags & WORKER_IDLE);
1215 BUG_ON(!list_empty(&worker->entry) &&
1216 (worker->hentry.next || worker->hentry.pprev));
1218 /* can't use worker_set_flags(), also called from start_worker() */
1219 worker->flags |= WORKER_IDLE;
1220 gcwq->nr_idle++;
1221 worker->last_active = jiffies;
1223 /* idle_list is LIFO */
1224 list_add(&worker->entry, &gcwq->idle_list);
1226 if (likely(!(worker->flags & WORKER_ROGUE))) {
1227 if (too_many_workers(gcwq) && !timer_pending(&gcwq->idle_timer))
1228 mod_timer(&gcwq->idle_timer,
1229 jiffies + IDLE_WORKER_TIMEOUT);
1230 } else
1231 wake_up_all(&gcwq->trustee_wait);
1234 * Sanity check nr_running. Because trustee releases gcwq->lock
1235 * between setting %WORKER_ROGUE and zapping nr_running, the
1236 * warning may trigger spuriously. Check iff trustee is idle.
1238 WARN_ON_ONCE(gcwq->trustee_state == TRUSTEE_DONE &&
1239 gcwq->nr_workers == gcwq->nr_idle &&
1240 atomic_read(get_gcwq_nr_running(gcwq->cpu)));
1244 * worker_leave_idle - leave idle state
1245 * @worker: worker which is leaving idle state
1247 * @worker is leaving idle state. Update stats.
1249 * LOCKING:
1250 * spin_lock_irq(gcwq->lock).
1252 static void worker_leave_idle(struct worker *worker)
1254 struct global_cwq *gcwq = worker->gcwq;
1256 BUG_ON(!(worker->flags & WORKER_IDLE));
1257 worker_clr_flags(worker, WORKER_IDLE);
1258 gcwq->nr_idle--;
1259 list_del_init(&worker->entry);
1263 * worker_maybe_bind_and_lock - bind worker to its cpu if possible and lock gcwq
1264 * @worker: self
1266 * Works which are scheduled while the cpu is online must at least be
1267 * scheduled to a worker which is bound to the cpu so that if they are
1268 * flushed from cpu callbacks while cpu is going down, they are
1269 * guaranteed to execute on the cpu.
1271 * This function is to be used by rogue workers and rescuers to bind
1272 * themselves to the target cpu and may race with cpu going down or
1273 * coming online. kthread_bind() can't be used because it may put the
1274 * worker to already dead cpu and set_cpus_allowed_ptr() can't be used
1275 * verbatim as it's best effort and blocking and gcwq may be
1276 * [dis]associated in the meantime.
1278 * This function tries set_cpus_allowed() and locks gcwq and verifies
1279 * the binding against GCWQ_DISASSOCIATED which is set during
1280 * CPU_DYING and cleared during CPU_ONLINE, so if the worker enters
1281 * idle state or fetches works without dropping lock, it can guarantee
1282 * the scheduling requirement described in the first paragraph.
1284 * CONTEXT:
1285 * Might sleep. Called without any lock but returns with gcwq->lock
1286 * held.
1288 * RETURNS:
1289 * %true if the associated gcwq is online (@worker is successfully
1290 * bound), %false if offline.
1292 static bool worker_maybe_bind_and_lock(struct worker *worker)
1293 __acquires(&gcwq->lock)
1295 struct global_cwq *gcwq = worker->gcwq;
1296 struct task_struct *task = worker->task;
1298 while (true) {
1300 * The following call may fail, succeed or succeed
1301 * without actually migrating the task to the cpu if
1302 * it races with cpu hotunplug operation. Verify
1303 * against GCWQ_DISASSOCIATED.
1305 if (!(gcwq->flags & GCWQ_DISASSOCIATED))
1306 set_cpus_allowed_ptr(task, get_cpu_mask(gcwq->cpu));
1308 spin_lock_irq(&gcwq->lock);
1309 if (gcwq->flags & GCWQ_DISASSOCIATED)
1310 return false;
1311 if (task_cpu(task) == gcwq->cpu &&
1312 cpumask_equal(&current->cpus_allowed,
1313 get_cpu_mask(gcwq->cpu)))
1314 return true;
1315 spin_unlock_irq(&gcwq->lock);
1318 * We've raced with CPU hot[un]plug. Give it a breather
1319 * and retry migration. cond_resched() is required here;
1320 * otherwise, we might deadlock against cpu_stop trying to
1321 * bring down the CPU on non-preemptive kernel.
1323 cpu_relax();
1324 cond_resched();
1329 * Function for worker->rebind_work used to rebind rogue busy workers
1330 * to the associated cpu which is coming back online. This is
1331 * scheduled by cpu up but can race with other cpu hotplug operations
1332 * and may be executed twice without intervening cpu down.
1334 static void worker_rebind_fn(struct work_struct *work)
1336 struct worker *worker = container_of(work, struct worker, rebind_work);
1337 struct global_cwq *gcwq = worker->gcwq;
1339 if (worker_maybe_bind_and_lock(worker))
1340 worker_clr_flags(worker, WORKER_REBIND);
1342 spin_unlock_irq(&gcwq->lock);
1345 static struct worker *alloc_worker(void)
1347 struct worker *worker;
1349 worker = kzalloc(sizeof(*worker), GFP_KERNEL);
1350 if (worker) {
1351 INIT_LIST_HEAD(&worker->entry);
1352 INIT_LIST_HEAD(&worker->scheduled);
1353 INIT_WORK(&worker->rebind_work, worker_rebind_fn);
1354 /* on creation a worker is in !idle && prep state */
1355 worker->flags = WORKER_PREP;
1357 return worker;
1361 * create_worker - create a new workqueue worker
1362 * @gcwq: gcwq the new worker will belong to
1363 * @bind: whether to set affinity to @cpu or not
1365 * Create a new worker which is bound to @gcwq. The returned worker
1366 * can be started by calling start_worker() or destroyed using
1367 * destroy_worker().
1369 * CONTEXT:
1370 * Might sleep. Does GFP_KERNEL allocations.
1372 * RETURNS:
1373 * Pointer to the newly created worker.
1375 static struct worker *create_worker(struct global_cwq *gcwq, bool bind)
1377 bool on_unbound_cpu = gcwq->cpu == WORK_CPU_UNBOUND;
1378 struct worker *worker = NULL;
1379 int id = -1;
1381 spin_lock_irq(&gcwq->lock);
1382 while (ida_get_new(&gcwq->worker_ida, &id)) {
1383 spin_unlock_irq(&gcwq->lock);
1384 if (!ida_pre_get(&gcwq->worker_ida, GFP_KERNEL))
1385 goto fail;
1386 spin_lock_irq(&gcwq->lock);
1388 spin_unlock_irq(&gcwq->lock);
1390 worker = alloc_worker();
1391 if (!worker)
1392 goto fail;
1394 worker->gcwq = gcwq;
1395 worker->id = id;
1397 if (!on_unbound_cpu)
1398 worker->task = kthread_create_on_node(worker_thread,
1399 worker,
1400 cpu_to_node(gcwq->cpu),
1401 "kworker/%u:%d", gcwq->cpu, id);
1402 else
1403 worker->task = kthread_create(worker_thread, worker,
1404 "kworker/u:%d", id);
1405 if (IS_ERR(worker->task))
1406 goto fail;
1409 * A rogue worker will become a regular one if CPU comes
1410 * online later on. Make sure every worker has
1411 * PF_THREAD_BOUND set.
1413 if (bind && !on_unbound_cpu)
1414 kthread_bind(worker->task, gcwq->cpu);
1415 else {
1416 worker->task->flags |= PF_THREAD_BOUND;
1417 if (on_unbound_cpu)
1418 worker->flags |= WORKER_UNBOUND;
1421 return worker;
1422 fail:
1423 if (id >= 0) {
1424 spin_lock_irq(&gcwq->lock);
1425 ida_remove(&gcwq->worker_ida, id);
1426 spin_unlock_irq(&gcwq->lock);
1428 kfree(worker);
1429 return NULL;
1433 * start_worker - start a newly created worker
1434 * @worker: worker to start
1436 * Make the gcwq aware of @worker and start it.
1438 * CONTEXT:
1439 * spin_lock_irq(gcwq->lock).
1441 static void start_worker(struct worker *worker)
1443 worker->flags |= WORKER_STARTED;
1444 worker->gcwq->nr_workers++;
1445 worker_enter_idle(worker);
1446 wake_up_process(worker->task);
1450 * destroy_worker - destroy a workqueue worker
1451 * @worker: worker to be destroyed
1453 * Destroy @worker and adjust @gcwq stats accordingly.
1455 * CONTEXT:
1456 * spin_lock_irq(gcwq->lock) which is released and regrabbed.
1458 static void destroy_worker(struct worker *worker)
1460 struct global_cwq *gcwq = worker->gcwq;
1461 int id = worker->id;
1463 /* sanity check frenzy */
1464 BUG_ON(worker->current_work);
1465 BUG_ON(!list_empty(&worker->scheduled));
1467 if (worker->flags & WORKER_STARTED)
1468 gcwq->nr_workers--;
1469 if (worker->flags & WORKER_IDLE)
1470 gcwq->nr_idle--;
1472 list_del_init(&worker->entry);
1473 worker->flags |= WORKER_DIE;
1475 spin_unlock_irq(&gcwq->lock);
1477 kthread_stop(worker->task);
1478 kfree(worker);
1480 spin_lock_irq(&gcwq->lock);
1481 ida_remove(&gcwq->worker_ida, id);
1484 static void idle_worker_timeout(unsigned long __gcwq)
1486 struct global_cwq *gcwq = (void *)__gcwq;
1488 spin_lock_irq(&gcwq->lock);
1490 if (too_many_workers(gcwq)) {
1491 struct worker *worker;
1492 unsigned long expires;
1494 /* idle_list is kept in LIFO order, check the last one */
1495 worker = list_entry(gcwq->idle_list.prev, struct worker, entry);
1496 expires = worker->last_active + IDLE_WORKER_TIMEOUT;
1498 if (time_before(jiffies, expires))
1499 mod_timer(&gcwq->idle_timer, expires);
1500 else {
1501 /* it's been idle for too long, wake up manager */
1502 gcwq->flags |= GCWQ_MANAGE_WORKERS;
1503 wake_up_worker(gcwq);
1507 spin_unlock_irq(&gcwq->lock);
1510 static bool send_mayday(struct work_struct *work)
1512 struct cpu_workqueue_struct *cwq = get_work_cwq(work);
1513 struct workqueue_struct *wq = cwq->wq;
1514 unsigned int cpu;
1516 if (!(wq->flags & WQ_RESCUER))
1517 return false;
1519 /* mayday mayday mayday */
1520 cpu = cwq->gcwq->cpu;
1521 /* WORK_CPU_UNBOUND can't be set in cpumask, use cpu 0 instead */
1522 if (cpu == WORK_CPU_UNBOUND)
1523 cpu = 0;
1524 if (!mayday_test_and_set_cpu(cpu, wq->mayday_mask))
1525 wake_up_process(wq->rescuer->task);
1526 return true;
1529 static void gcwq_mayday_timeout(unsigned long __gcwq)
1531 struct global_cwq *gcwq = (void *)__gcwq;
1532 struct work_struct *work;
1534 spin_lock_irq(&gcwq->lock);
1536 if (need_to_create_worker(gcwq)) {
1538 * We've been trying to create a new worker but
1539 * haven't been successful. We might be hitting an
1540 * allocation deadlock. Send distress signals to
1541 * rescuers.
1543 list_for_each_entry(work, &gcwq->worklist, entry)
1544 send_mayday(work);
1547 spin_unlock_irq(&gcwq->lock);
1549 mod_timer(&gcwq->mayday_timer, jiffies + MAYDAY_INTERVAL);
1553 * maybe_create_worker - create a new worker if necessary
1554 * @gcwq: gcwq to create a new worker for
1556 * Create a new worker for @gcwq if necessary. @gcwq is guaranteed to
1557 * have at least one idle worker on return from this function. If
1558 * creating a new worker takes longer than MAYDAY_INTERVAL, mayday is
1559 * sent to all rescuers with works scheduled on @gcwq to resolve
1560 * possible allocation deadlock.
1562 * On return, need_to_create_worker() is guaranteed to be false and
1563 * may_start_working() true.
1565 * LOCKING:
1566 * spin_lock_irq(gcwq->lock) which may be released and regrabbed
1567 * multiple times. Does GFP_KERNEL allocations. Called only from
1568 * manager.
1570 * RETURNS:
1571 * false if no action was taken and gcwq->lock stayed locked, true
1572 * otherwise.
1574 static bool maybe_create_worker(struct global_cwq *gcwq)
1575 __releases(&gcwq->lock)
1576 __acquires(&gcwq->lock)
1578 if (!need_to_create_worker(gcwq))
1579 return false;
1580 restart:
1581 spin_unlock_irq(&gcwq->lock);
1583 /* if we don't make progress in MAYDAY_INITIAL_TIMEOUT, call for help */
1584 mod_timer(&gcwq->mayday_timer, jiffies + MAYDAY_INITIAL_TIMEOUT);
1586 while (true) {
1587 struct worker *worker;
1589 worker = create_worker(gcwq, true);
1590 if (worker) {
1591 del_timer_sync(&gcwq->mayday_timer);
1592 spin_lock_irq(&gcwq->lock);
1593 start_worker(worker);
1594 BUG_ON(need_to_create_worker(gcwq));
1595 return true;
1598 if (!need_to_create_worker(gcwq))
1599 break;
1601 __set_current_state(TASK_INTERRUPTIBLE);
1602 schedule_timeout(CREATE_COOLDOWN);
1604 if (!need_to_create_worker(gcwq))
1605 break;
1608 del_timer_sync(&gcwq->mayday_timer);
1609 spin_lock_irq(&gcwq->lock);
1610 if (need_to_create_worker(gcwq))
1611 goto restart;
1612 return true;
1616 * maybe_destroy_worker - destroy workers which have been idle for a while
1617 * @gcwq: gcwq to destroy workers for
1619 * Destroy @gcwq workers which have been idle for longer than
1620 * IDLE_WORKER_TIMEOUT.
1622 * LOCKING:
1623 * spin_lock_irq(gcwq->lock) which may be released and regrabbed
1624 * multiple times. Called only from manager.
1626 * RETURNS:
1627 * false if no action was taken and gcwq->lock stayed locked, true
1628 * otherwise.
1630 static bool maybe_destroy_workers(struct global_cwq *gcwq)
1632 bool ret = false;
1634 while (too_many_workers(gcwq)) {
1635 struct worker *worker;
1636 unsigned long expires;
1638 worker = list_entry(gcwq->idle_list.prev, struct worker, entry);
1639 expires = worker->last_active + IDLE_WORKER_TIMEOUT;
1641 if (time_before(jiffies, expires)) {
1642 mod_timer(&gcwq->idle_timer, expires);
1643 break;
1646 destroy_worker(worker);
1647 ret = true;
1650 return ret;
1654 * manage_workers - manage worker pool
1655 * @worker: self
1657 * Assume the manager role and manage gcwq worker pool @worker belongs
1658 * to. At any given time, there can be only zero or one manager per
1659 * gcwq. The exclusion is handled automatically by this function.
1661 * The caller can safely start processing works on false return. On
1662 * true return, it's guaranteed that need_to_create_worker() is false
1663 * and may_start_working() is true.
1665 * CONTEXT:
1666 * spin_lock_irq(gcwq->lock) which may be released and regrabbed
1667 * multiple times. Does GFP_KERNEL allocations.
1669 * RETURNS:
1670 * false if no action was taken and gcwq->lock stayed locked, true if
1671 * some action was taken.
1673 static bool manage_workers(struct worker *worker)
1675 struct global_cwq *gcwq = worker->gcwq;
1676 bool ret = false;
1678 if (gcwq->flags & GCWQ_MANAGING_WORKERS)
1679 return ret;
1681 gcwq->flags &= ~GCWQ_MANAGE_WORKERS;
1682 gcwq->flags |= GCWQ_MANAGING_WORKERS;
1685 * Destroy and then create so that may_start_working() is true
1686 * on return.
1688 ret |= maybe_destroy_workers(gcwq);
1689 ret |= maybe_create_worker(gcwq);
1691 gcwq->flags &= ~GCWQ_MANAGING_WORKERS;
1694 * The trustee might be waiting to take over the manager
1695 * position, tell it we're done.
1697 if (unlikely(gcwq->trustee))
1698 wake_up_all(&gcwq->trustee_wait);
1700 return ret;
1704 * move_linked_works - move linked works to a list
1705 * @work: start of series of works to be scheduled
1706 * @head: target list to append @work to
1707 * @nextp: out paramter for nested worklist walking
1709 * Schedule linked works starting from @work to @head. Work series to
1710 * be scheduled starts at @work and includes any consecutive work with
1711 * WORK_STRUCT_LINKED set in its predecessor.
1713 * If @nextp is not NULL, it's updated to point to the next work of
1714 * the last scheduled work. This allows move_linked_works() to be
1715 * nested inside outer list_for_each_entry_safe().
1717 * CONTEXT:
1718 * spin_lock_irq(gcwq->lock).
1720 static void move_linked_works(struct work_struct *work, struct list_head *head,
1721 struct work_struct **nextp)
1723 struct work_struct *n;
1726 * Linked worklist will always end before the end of the list,
1727 * use NULL for list head.
1729 list_for_each_entry_safe_from(work, n, NULL, entry) {
1730 list_move_tail(&work->entry, head);
1731 if (!(*work_data_bits(work) & WORK_STRUCT_LINKED))
1732 break;
1736 * If we're already inside safe list traversal and have moved
1737 * multiple works to the scheduled queue, the next position
1738 * needs to be updated.
1740 if (nextp)
1741 *nextp = n;
1744 static void cwq_activate_delayed_work(struct work_struct *work)
1746 struct cpu_workqueue_struct *cwq = get_work_cwq(work);
1747 struct list_head *pos = gcwq_determine_ins_pos(cwq->gcwq, cwq);
1749 trace_workqueue_activate_work(work);
1750 move_linked_works(work, pos, NULL);
1751 __clear_bit(WORK_STRUCT_DELAYED_BIT, work_data_bits(work));
1752 cwq->nr_active++;
1755 static void cwq_activate_first_delayed(struct cpu_workqueue_struct *cwq)
1757 struct work_struct *work = list_first_entry(&cwq->delayed_works,
1758 struct work_struct, entry);
1760 cwq_activate_delayed_work(work);
1764 * cwq_dec_nr_in_flight - decrement cwq's nr_in_flight
1765 * @cwq: cwq of interest
1766 * @color: color of work which left the queue
1767 * @delayed: for a delayed work
1769 * A work either has completed or is removed from pending queue,
1770 * decrement nr_in_flight of its cwq and handle workqueue flushing.
1772 * CONTEXT:
1773 * spin_lock_irq(gcwq->lock).
1775 static void cwq_dec_nr_in_flight(struct cpu_workqueue_struct *cwq, int color,
1776 bool delayed)
1778 /* ignore uncolored works */
1779 if (color == WORK_NO_COLOR)
1780 return;
1782 cwq->nr_in_flight[color]--;
1784 if (!delayed) {
1785 cwq->nr_active--;
1786 if (!list_empty(&cwq->delayed_works)) {
1787 /* one down, submit a delayed one */
1788 if (cwq->nr_active < cwq->max_active)
1789 cwq_activate_first_delayed(cwq);
1793 /* is flush in progress and are we at the flushing tip? */
1794 if (likely(cwq->flush_color != color))
1795 return;
1797 /* are there still in-flight works? */
1798 if (cwq->nr_in_flight[color])
1799 return;
1801 /* this cwq is done, clear flush_color */
1802 cwq->flush_color = -1;
1805 * If this was the last cwq, wake up the first flusher. It
1806 * will handle the rest.
1808 if (atomic_dec_and_test(&cwq->wq->nr_cwqs_to_flush))
1809 complete(&cwq->wq->first_flusher->done);
1813 * process_one_work - process single work
1814 * @worker: self
1815 * @work: work to process
1817 * Process @work. This function contains all the logics necessary to
1818 * process a single work including synchronization against and
1819 * interaction with other workers on the same cpu, queueing and
1820 * flushing. As long as context requirement is met, any worker can
1821 * call this function to process a work.
1823 * CONTEXT:
1824 * spin_lock_irq(gcwq->lock) which is released and regrabbed.
1826 static void process_one_work(struct worker *worker, struct work_struct *work)
1827 __releases(&gcwq->lock)
1828 __acquires(&gcwq->lock)
1830 struct cpu_workqueue_struct *cwq = get_work_cwq(work);
1831 struct global_cwq *gcwq = cwq->gcwq;
1832 struct hlist_head *bwh = busy_worker_head(gcwq, work);
1833 bool cpu_intensive = cwq->wq->flags & WQ_CPU_INTENSIVE;
1834 int work_color;
1835 struct worker *collision;
1836 #ifdef CONFIG_LOCKDEP
1838 * It is permissible to free the struct work_struct from
1839 * inside the function that is called from it, this we need to
1840 * take into account for lockdep too. To avoid bogus "held
1841 * lock freed" warnings as well as problems when looking into
1842 * work->lockdep_map, make a copy and use that here.
1844 struct lockdep_map lockdep_map = work->lockdep_map;
1845 #endif
1847 * A single work shouldn't be executed concurrently by
1848 * multiple workers on a single cpu. Check whether anyone is
1849 * already processing the work. If so, defer the work to the
1850 * currently executing one.
1852 collision = __find_worker_executing_work(gcwq, bwh, work);
1853 if (unlikely(collision)) {
1854 move_linked_works(work, &collision->scheduled, NULL);
1855 return;
1858 /* claim and process */
1859 debug_work_deactivate(work);
1860 hlist_add_head(&worker->hentry, bwh);
1861 worker->current_work = work;
1862 worker->current_func = work->func;
1863 worker->current_cwq = cwq;
1864 work_color = get_work_color(work);
1866 /* record the current cpu number in the work data and dequeue */
1867 set_work_cpu(work, gcwq->cpu);
1868 list_del_init(&work->entry);
1871 * If HIGHPRI_PENDING, check the next work, and, if HIGHPRI,
1872 * wake up another worker; otherwise, clear HIGHPRI_PENDING.
1874 if (unlikely(gcwq->flags & GCWQ_HIGHPRI_PENDING)) {
1875 struct work_struct *nwork = list_first_entry(&gcwq->worklist,
1876 struct work_struct, entry);
1878 if (!list_empty(&gcwq->worklist) &&
1879 get_work_cwq(nwork)->wq->flags & WQ_HIGHPRI)
1880 wake_up_worker(gcwq);
1881 else
1882 gcwq->flags &= ~GCWQ_HIGHPRI_PENDING;
1886 * CPU intensive works don't participate in concurrency
1887 * management. They're the scheduler's responsibility.
1889 if (unlikely(cpu_intensive))
1890 worker_set_flags(worker, WORKER_CPU_INTENSIVE, true);
1892 spin_unlock_irq(&gcwq->lock);
1894 smp_wmb(); /* paired with test_and_set_bit(PENDING) */
1895 work_clear_pending(work);
1897 lock_map_acquire_read(&cwq->wq->lockdep_map);
1898 lock_map_acquire(&lockdep_map);
1899 trace_workqueue_execute_start(work);
1900 worker->current_func(work);
1902 * While we must be careful to not use "work" after this, the trace
1903 * point will only record its address.
1905 trace_workqueue_execute_end(work);
1906 lock_map_release(&lockdep_map);
1907 lock_map_release(&cwq->wq->lockdep_map);
1909 if (unlikely(in_atomic() || lockdep_depth(current) > 0)) {
1910 pr_err("BUG: workqueue leaked lock or atomic: %s/0x%08x/%d\n"
1911 " last function: %pf\n",
1912 current->comm, preempt_count(), task_pid_nr(current),
1913 worker->current_func);
1914 debug_show_held_locks(current);
1915 dump_stack();
1918 spin_lock_irq(&gcwq->lock);
1920 /* clear cpu intensive status */
1921 if (unlikely(cpu_intensive))
1922 worker_clr_flags(worker, WORKER_CPU_INTENSIVE);
1924 /* we're done with it, release */
1925 hlist_del_init(&worker->hentry);
1926 worker->current_work = NULL;
1927 worker->current_func = NULL;
1928 worker->current_cwq = NULL;
1929 cwq_dec_nr_in_flight(cwq, work_color, false);
1933 * process_scheduled_works - process scheduled works
1934 * @worker: self
1936 * Process all scheduled works. Please note that the scheduled list
1937 * may change while processing a work, so this function repeatedly
1938 * fetches a work from the top and executes it.
1940 * CONTEXT:
1941 * spin_lock_irq(gcwq->lock) which may be released and regrabbed
1942 * multiple times.
1944 static void process_scheduled_works(struct worker *worker)
1946 while (!list_empty(&worker->scheduled)) {
1947 struct work_struct *work = list_first_entry(&worker->scheduled,
1948 struct work_struct, entry);
1949 process_one_work(worker, work);
1954 * worker_thread - the worker thread function
1955 * @__worker: self
1957 * The gcwq worker thread function. There's a single dynamic pool of
1958 * these per each cpu. These workers process all works regardless of
1959 * their specific target workqueue. The only exception is works which
1960 * belong to workqueues with a rescuer which will be explained in
1961 * rescuer_thread().
1963 static int worker_thread(void *__worker)
1965 struct worker *worker = __worker;
1966 struct global_cwq *gcwq = worker->gcwq;
1968 /* tell the scheduler that this is a workqueue worker */
1969 worker->task->flags |= PF_WQ_WORKER;
1970 woke_up:
1971 spin_lock_irq(&gcwq->lock);
1973 /* DIE can be set only while we're idle, checking here is enough */
1974 if (worker->flags & WORKER_DIE) {
1975 spin_unlock_irq(&gcwq->lock);
1976 worker->task->flags &= ~PF_WQ_WORKER;
1977 return 0;
1980 worker_leave_idle(worker);
1981 recheck:
1982 /* no more worker necessary? */
1983 if (!need_more_worker(gcwq))
1984 goto sleep;
1986 /* do we need to manage? */
1987 if (unlikely(!may_start_working(gcwq)) && manage_workers(worker))
1988 goto recheck;
1991 * ->scheduled list can only be filled while a worker is
1992 * preparing to process a work or actually processing it.
1993 * Make sure nobody diddled with it while I was sleeping.
1995 BUG_ON(!list_empty(&worker->scheduled));
1998 * When control reaches this point, we're guaranteed to have
1999 * at least one idle worker or that someone else has already
2000 * assumed the manager role.
2002 worker_clr_flags(worker, WORKER_PREP);
2004 do {
2005 struct work_struct *work =
2006 list_first_entry(&gcwq->worklist,
2007 struct work_struct, entry);
2009 if (likely(!(*work_data_bits(work) & WORK_STRUCT_LINKED))) {
2010 /* optimization path, not strictly necessary */
2011 process_one_work(worker, work);
2012 if (unlikely(!list_empty(&worker->scheduled)))
2013 process_scheduled_works(worker);
2014 } else {
2015 move_linked_works(work, &worker->scheduled, NULL);
2016 process_scheduled_works(worker);
2018 } while (keep_working(gcwq));
2020 worker_set_flags(worker, WORKER_PREP, false);
2021 sleep:
2022 if (unlikely(need_to_manage_workers(gcwq)) && manage_workers(worker))
2023 goto recheck;
2026 * gcwq->lock is held and there's no work to process and no
2027 * need to manage, sleep. Workers are woken up only while
2028 * holding gcwq->lock or from local cpu, so setting the
2029 * current state before releasing gcwq->lock is enough to
2030 * prevent losing any event.
2032 worker_enter_idle(worker);
2033 __set_current_state(TASK_INTERRUPTIBLE);
2034 spin_unlock_irq(&gcwq->lock);
2035 schedule();
2036 goto woke_up;
2040 * rescuer_thread - the rescuer thread function
2041 * @__wq: the associated workqueue
2043 * Workqueue rescuer thread function. There's one rescuer for each
2044 * workqueue which has WQ_RESCUER set.
2046 * Regular work processing on a gcwq may block trying to create a new
2047 * worker which uses GFP_KERNEL allocation which has slight chance of
2048 * developing into deadlock if some works currently on the same queue
2049 * need to be processed to satisfy the GFP_KERNEL allocation. This is
2050 * the problem rescuer solves.
2052 * When such condition is possible, the gcwq summons rescuers of all
2053 * workqueues which have works queued on the gcwq and let them process
2054 * those works so that forward progress can be guaranteed.
2056 * This should happen rarely.
2058 static int rescuer_thread(void *__wq)
2060 struct workqueue_struct *wq = __wq;
2061 struct worker *rescuer = wq->rescuer;
2062 struct list_head *scheduled = &rescuer->scheduled;
2063 bool is_unbound = wq->flags & WQ_UNBOUND;
2064 unsigned int cpu;
2066 set_user_nice(current, RESCUER_NICE_LEVEL);
2067 repeat:
2068 set_current_state(TASK_INTERRUPTIBLE);
2070 if (kthread_should_stop()) {
2071 __set_current_state(TASK_RUNNING);
2072 return 0;
2076 * See whether any cpu is asking for help. Unbounded
2077 * workqueues use cpu 0 in mayday_mask for CPU_UNBOUND.
2079 for_each_mayday_cpu(cpu, wq->mayday_mask) {
2080 unsigned int tcpu = is_unbound ? WORK_CPU_UNBOUND : cpu;
2081 struct cpu_workqueue_struct *cwq = get_cwq(tcpu, wq);
2082 struct global_cwq *gcwq = cwq->gcwq;
2083 struct work_struct *work, *n;
2085 __set_current_state(TASK_RUNNING);
2086 mayday_clear_cpu(cpu, wq->mayday_mask);
2088 /* migrate to the target cpu if possible */
2089 rescuer->gcwq = gcwq;
2090 worker_maybe_bind_and_lock(rescuer);
2093 * Slurp in all works issued via this workqueue and
2094 * process'em.
2096 BUG_ON(!list_empty(&rescuer->scheduled));
2097 list_for_each_entry_safe(work, n, &gcwq->worklist, entry)
2098 if (get_work_cwq(work) == cwq)
2099 move_linked_works(work, scheduled, &n);
2101 process_scheduled_works(rescuer);
2104 * Leave this gcwq. If keep_working() is %true, notify a
2105 * regular worker; otherwise, we end up with 0 concurrency
2106 * and stalling the execution.
2108 if (keep_working(gcwq))
2109 wake_up_worker(gcwq);
2111 spin_unlock_irq(&gcwq->lock);
2114 schedule();
2115 goto repeat;
2118 struct wq_barrier {
2119 struct work_struct work;
2120 struct completion done;
2123 static void wq_barrier_func(struct work_struct *work)
2125 struct wq_barrier *barr = container_of(work, struct wq_barrier, work);
2126 complete(&barr->done);
2130 * insert_wq_barrier - insert a barrier work
2131 * @cwq: cwq to insert barrier into
2132 * @barr: wq_barrier to insert
2133 * @target: target work to attach @barr to
2134 * @worker: worker currently executing @target, NULL if @target is not executing
2136 * @barr is linked to @target such that @barr is completed only after
2137 * @target finishes execution. Please note that the ordering
2138 * guarantee is observed only with respect to @target and on the local
2139 * cpu.
2141 * Currently, a queued barrier can't be canceled. This is because
2142 * try_to_grab_pending() can't determine whether the work to be
2143 * grabbed is at the head of the queue and thus can't clear LINKED
2144 * flag of the previous work while there must be a valid next work
2145 * after a work with LINKED flag set.
2147 * Note that when @worker is non-NULL, @target may be modified
2148 * underneath us, so we can't reliably determine cwq from @target.
2150 * CONTEXT:
2151 * spin_lock_irq(gcwq->lock).
2153 static void insert_wq_barrier(struct cpu_workqueue_struct *cwq,
2154 struct wq_barrier *barr,
2155 struct work_struct *target, struct worker *worker)
2157 struct list_head *head;
2158 unsigned int linked = 0;
2161 * debugobject calls are safe here even with gcwq->lock locked
2162 * as we know for sure that this will not trigger any of the
2163 * checks and call back into the fixup functions where we
2164 * might deadlock.
2166 INIT_WORK_ONSTACK(&barr->work, wq_barrier_func);
2167 __set_bit(WORK_STRUCT_PENDING_BIT, work_data_bits(&barr->work));
2168 init_completion(&barr->done);
2171 * If @target is currently being executed, schedule the
2172 * barrier to the worker; otherwise, put it after @target.
2174 if (worker)
2175 head = worker->scheduled.next;
2176 else {
2177 unsigned long *bits = work_data_bits(target);
2179 head = target->entry.next;
2180 /* there can already be other linked works, inherit and set */
2181 linked = *bits & WORK_STRUCT_LINKED;
2182 __set_bit(WORK_STRUCT_LINKED_BIT, bits);
2185 debug_work_activate(&barr->work);
2186 insert_work(cwq, &barr->work, head,
2187 work_color_to_flags(WORK_NO_COLOR) | linked);
2191 * flush_workqueue_prep_cwqs - prepare cwqs for workqueue flushing
2192 * @wq: workqueue being flushed
2193 * @flush_color: new flush color, < 0 for no-op
2194 * @work_color: new work color, < 0 for no-op
2196 * Prepare cwqs for workqueue flushing.
2198 * If @flush_color is non-negative, flush_color on all cwqs should be
2199 * -1. If no cwq has in-flight commands at the specified color, all
2200 * cwq->flush_color's stay at -1 and %false is returned. If any cwq
2201 * has in flight commands, its cwq->flush_color is set to
2202 * @flush_color, @wq->nr_cwqs_to_flush is updated accordingly, cwq
2203 * wakeup logic is armed and %true is returned.
2205 * The caller should have initialized @wq->first_flusher prior to
2206 * calling this function with non-negative @flush_color. If
2207 * @flush_color is negative, no flush color update is done and %false
2208 * is returned.
2210 * If @work_color is non-negative, all cwqs should have the same
2211 * work_color which is previous to @work_color and all will be
2212 * advanced to @work_color.
2214 * CONTEXT:
2215 * mutex_lock(wq->flush_mutex).
2217 * RETURNS:
2218 * %true if @flush_color >= 0 and there's something to flush. %false
2219 * otherwise.
2221 static bool flush_workqueue_prep_cwqs(struct workqueue_struct *wq,
2222 int flush_color, int work_color)
2224 bool wait = false;
2225 unsigned int cpu;
2227 if (flush_color >= 0) {
2228 BUG_ON(atomic_read(&wq->nr_cwqs_to_flush));
2229 atomic_set(&wq->nr_cwqs_to_flush, 1);
2232 for_each_cwq_cpu(cpu, wq) {
2233 struct cpu_workqueue_struct *cwq = get_cwq(cpu, wq);
2234 struct global_cwq *gcwq = cwq->gcwq;
2236 spin_lock_irq(&gcwq->lock);
2238 if (flush_color >= 0) {
2239 BUG_ON(cwq->flush_color != -1);
2241 if (cwq->nr_in_flight[flush_color]) {
2242 cwq->flush_color = flush_color;
2243 atomic_inc(&wq->nr_cwqs_to_flush);
2244 wait = true;
2248 if (work_color >= 0) {
2249 BUG_ON(work_color != work_next_color(cwq->work_color));
2250 cwq->work_color = work_color;
2253 spin_unlock_irq(&gcwq->lock);
2256 if (flush_color >= 0 && atomic_dec_and_test(&wq->nr_cwqs_to_flush))
2257 complete(&wq->first_flusher->done);
2259 return wait;
2263 * flush_workqueue - ensure that any scheduled work has run to completion.
2264 * @wq: workqueue to flush
2266 * Forces execution of the workqueue and blocks until its completion.
2267 * This is typically used in driver shutdown handlers.
2269 * We sleep until all works which were queued on entry have been handled,
2270 * but we are not livelocked by new incoming ones.
2272 void flush_workqueue(struct workqueue_struct *wq)
2274 struct wq_flusher this_flusher = {
2275 .list = LIST_HEAD_INIT(this_flusher.list),
2276 .flush_color = -1,
2277 .done = COMPLETION_INITIALIZER_ONSTACK(this_flusher.done),
2279 int next_color;
2281 lock_map_acquire(&wq->lockdep_map);
2282 lock_map_release(&wq->lockdep_map);
2284 mutex_lock(&wq->flush_mutex);
2287 * Start-to-wait phase
2289 next_color = work_next_color(wq->work_color);
2291 if (next_color != wq->flush_color) {
2293 * Color space is not full. The current work_color
2294 * becomes our flush_color and work_color is advanced
2295 * by one.
2297 BUG_ON(!list_empty(&wq->flusher_overflow));
2298 this_flusher.flush_color = wq->work_color;
2299 wq->work_color = next_color;
2301 if (!wq->first_flusher) {
2302 /* no flush in progress, become the first flusher */
2303 BUG_ON(wq->flush_color != this_flusher.flush_color);
2305 wq->first_flusher = &this_flusher;
2307 if (!flush_workqueue_prep_cwqs(wq, wq->flush_color,
2308 wq->work_color)) {
2309 /* nothing to flush, done */
2310 wq->flush_color = next_color;
2311 wq->first_flusher = NULL;
2312 goto out_unlock;
2314 } else {
2315 /* wait in queue */
2316 BUG_ON(wq->flush_color == this_flusher.flush_color);
2317 list_add_tail(&this_flusher.list, &wq->flusher_queue);
2318 flush_workqueue_prep_cwqs(wq, -1, wq->work_color);
2320 } else {
2322 * Oops, color space is full, wait on overflow queue.
2323 * The next flush completion will assign us
2324 * flush_color and transfer to flusher_queue.
2326 list_add_tail(&this_flusher.list, &wq->flusher_overflow);
2329 mutex_unlock(&wq->flush_mutex);
2331 wait_for_completion(&this_flusher.done);
2334 * Wake-up-and-cascade phase
2336 * First flushers are responsible for cascading flushes and
2337 * handling overflow. Non-first flushers can simply return.
2339 if (wq->first_flusher != &this_flusher)
2340 return;
2342 mutex_lock(&wq->flush_mutex);
2344 /* we might have raced, check again with mutex held */
2345 if (wq->first_flusher != &this_flusher)
2346 goto out_unlock;
2348 wq->first_flusher = NULL;
2350 BUG_ON(!list_empty(&this_flusher.list));
2351 BUG_ON(wq->flush_color != this_flusher.flush_color);
2353 while (true) {
2354 struct wq_flusher *next, *tmp;
2356 /* complete all the flushers sharing the current flush color */
2357 list_for_each_entry_safe(next, tmp, &wq->flusher_queue, list) {
2358 if (next->flush_color != wq->flush_color)
2359 break;
2360 list_del_init(&next->list);
2361 complete(&next->done);
2364 BUG_ON(!list_empty(&wq->flusher_overflow) &&
2365 wq->flush_color != work_next_color(wq->work_color));
2367 /* this flush_color is finished, advance by one */
2368 wq->flush_color = work_next_color(wq->flush_color);
2370 /* one color has been freed, handle overflow queue */
2371 if (!list_empty(&wq->flusher_overflow)) {
2373 * Assign the same color to all overflowed
2374 * flushers, advance work_color and append to
2375 * flusher_queue. This is the start-to-wait
2376 * phase for these overflowed flushers.
2378 list_for_each_entry(tmp, &wq->flusher_overflow, list)
2379 tmp->flush_color = wq->work_color;
2381 wq->work_color = work_next_color(wq->work_color);
2383 list_splice_tail_init(&wq->flusher_overflow,
2384 &wq->flusher_queue);
2385 flush_workqueue_prep_cwqs(wq, -1, wq->work_color);
2388 if (list_empty(&wq->flusher_queue)) {
2389 BUG_ON(wq->flush_color != wq->work_color);
2390 break;
2394 * Need to flush more colors. Make the next flusher
2395 * the new first flusher and arm cwqs.
2397 BUG_ON(wq->flush_color == wq->work_color);
2398 BUG_ON(wq->flush_color != next->flush_color);
2400 list_del_init(&next->list);
2401 wq->first_flusher = next;
2403 if (flush_workqueue_prep_cwqs(wq, wq->flush_color, -1))
2404 break;
2407 * Meh... this color is already done, clear first
2408 * flusher and repeat cascading.
2410 wq->first_flusher = NULL;
2413 out_unlock:
2414 mutex_unlock(&wq->flush_mutex);
2416 EXPORT_SYMBOL_GPL(flush_workqueue);
2419 * drain_workqueue - drain a workqueue
2420 * @wq: workqueue to drain
2422 * Wait until the workqueue becomes empty. While draining is in progress,
2423 * only chain queueing is allowed. IOW, only currently pending or running
2424 * work items on @wq can queue further work items on it. @wq is flushed
2425 * repeatedly until it becomes empty. The number of flushing is detemined
2426 * by the depth of chaining and should be relatively short. Whine if it
2427 * takes too long.
2429 void drain_workqueue(struct workqueue_struct *wq)
2431 unsigned int flush_cnt = 0;
2432 unsigned int cpu;
2435 * __queue_work() needs to test whether there are drainers, is much
2436 * hotter than drain_workqueue() and already looks at @wq->flags.
2437 * Use WQ_DRAINING so that queue doesn't have to check nr_drainers.
2439 spin_lock(&workqueue_lock);
2440 if (!wq->nr_drainers++)
2441 wq->flags |= WQ_DRAINING;
2442 spin_unlock(&workqueue_lock);
2443 reflush:
2444 flush_workqueue(wq);
2446 for_each_cwq_cpu(cpu, wq) {
2447 struct cpu_workqueue_struct *cwq = get_cwq(cpu, wq);
2448 bool drained;
2450 spin_lock_irq(&cwq->gcwq->lock);
2451 drained = !cwq->nr_active && list_empty(&cwq->delayed_works);
2452 spin_unlock_irq(&cwq->gcwq->lock);
2454 if (drained)
2455 continue;
2457 if (++flush_cnt == 10 ||
2458 (flush_cnt % 100 == 0 && flush_cnt <= 1000))
2459 pr_warning("workqueue %s: flush on destruction isn't complete after %u tries\n",
2460 wq->name, flush_cnt);
2461 goto reflush;
2464 spin_lock(&workqueue_lock);
2465 if (!--wq->nr_drainers)
2466 wq->flags &= ~WQ_DRAINING;
2467 spin_unlock(&workqueue_lock);
2469 EXPORT_SYMBOL_GPL(drain_workqueue);
2471 static bool start_flush_work(struct work_struct *work, struct wq_barrier *barr,
2472 bool wait_executing)
2474 struct worker *worker = NULL;
2475 struct global_cwq *gcwq;
2476 struct cpu_workqueue_struct *cwq;
2478 might_sleep();
2479 gcwq = get_work_gcwq(work);
2480 if (!gcwq)
2481 return false;
2483 spin_lock_irq(&gcwq->lock);
2484 if (!list_empty(&work->entry)) {
2486 * See the comment near try_to_grab_pending()->smp_rmb().
2487 * If it was re-queued to a different gcwq under us, we
2488 * are not going to wait.
2490 smp_rmb();
2491 cwq = get_work_cwq(work);
2492 if (unlikely(!cwq || gcwq != cwq->gcwq))
2493 goto already_gone;
2494 } else if (wait_executing) {
2495 worker = find_worker_executing_work(gcwq, work);
2496 if (!worker)
2497 goto already_gone;
2498 cwq = worker->current_cwq;
2499 } else
2500 goto already_gone;
2502 insert_wq_barrier(cwq, barr, work, worker);
2503 spin_unlock_irq(&gcwq->lock);
2506 * If @max_active is 1 or rescuer is in use, flushing another work
2507 * item on the same workqueue may lead to deadlock. Make sure the
2508 * flusher is not running on the same workqueue by verifying write
2509 * access.
2511 if (cwq->wq->saved_max_active == 1 || cwq->wq->flags & WQ_RESCUER)
2512 lock_map_acquire(&cwq->wq->lockdep_map);
2513 else
2514 lock_map_acquire_read(&cwq->wq->lockdep_map);
2515 lock_map_release(&cwq->wq->lockdep_map);
2517 return true;
2518 already_gone:
2519 spin_unlock_irq(&gcwq->lock);
2520 return false;
2524 * flush_work - wait for a work to finish executing the last queueing instance
2525 * @work: the work to flush
2527 * Wait until @work has finished execution. This function considers
2528 * only the last queueing instance of @work. If @work has been
2529 * enqueued across different CPUs on a non-reentrant workqueue or on
2530 * multiple workqueues, @work might still be executing on return on
2531 * some of the CPUs from earlier queueing.
2533 * If @work was queued only on a non-reentrant, ordered or unbound
2534 * workqueue, @work is guaranteed to be idle on return if it hasn't
2535 * been requeued since flush started.
2537 * RETURNS:
2538 * %true if flush_work() waited for the work to finish execution,
2539 * %false if it was already idle.
2541 bool flush_work(struct work_struct *work)
2543 struct wq_barrier barr;
2545 if (start_flush_work(work, &barr, true)) {
2546 wait_for_completion(&barr.done);
2547 destroy_work_on_stack(&barr.work);
2548 return true;
2549 } else
2550 return false;
2552 EXPORT_SYMBOL_GPL(flush_work);
2554 static bool wait_on_cpu_work(struct global_cwq *gcwq, struct work_struct *work)
2556 struct wq_barrier barr;
2557 struct worker *worker;
2559 spin_lock_irq(&gcwq->lock);
2561 worker = find_worker_executing_work(gcwq, work);
2562 if (unlikely(worker))
2563 insert_wq_barrier(worker->current_cwq, &barr, work, worker);
2565 spin_unlock_irq(&gcwq->lock);
2567 if (unlikely(worker)) {
2568 wait_for_completion(&barr.done);
2569 destroy_work_on_stack(&barr.work);
2570 return true;
2571 } else
2572 return false;
2575 static bool wait_on_work(struct work_struct *work)
2577 bool ret = false;
2578 int cpu;
2580 might_sleep();
2582 lock_map_acquire(&work->lockdep_map);
2583 lock_map_release(&work->lockdep_map);
2585 for_each_gcwq_cpu(cpu)
2586 ret |= wait_on_cpu_work(get_gcwq(cpu), work);
2587 return ret;
2591 * flush_work_sync - wait until a work has finished execution
2592 * @work: the work to flush
2594 * Wait until @work has finished execution. On return, it's
2595 * guaranteed that all queueing instances of @work which happened
2596 * before this function is called are finished. In other words, if
2597 * @work hasn't been requeued since this function was called, @work is
2598 * guaranteed to be idle on return.
2600 * RETURNS:
2601 * %true if flush_work_sync() waited for the work to finish execution,
2602 * %false if it was already idle.
2604 bool flush_work_sync(struct work_struct *work)
2606 struct wq_barrier barr;
2607 bool pending, waited;
2609 /* we'll wait for executions separately, queue barr only if pending */
2610 pending = start_flush_work(work, &barr, false);
2612 /* wait for executions to finish */
2613 waited = wait_on_work(work);
2615 /* wait for the pending one */
2616 if (pending) {
2617 wait_for_completion(&barr.done);
2618 destroy_work_on_stack(&barr.work);
2621 return pending || waited;
2623 EXPORT_SYMBOL_GPL(flush_work_sync);
2626 * Upon a successful return (>= 0), the caller "owns" WORK_STRUCT_PENDING bit,
2627 * so this work can't be re-armed in any way.
2629 static int try_to_grab_pending(struct work_struct *work)
2631 struct global_cwq *gcwq;
2632 int ret = -1;
2634 if (!test_and_set_bit(WORK_STRUCT_PENDING_BIT, work_data_bits(work)))
2635 return 0;
2638 * The queueing is in progress, or it is already queued. Try to
2639 * steal it from ->worklist without clearing WORK_STRUCT_PENDING.
2641 gcwq = get_work_gcwq(work);
2642 if (!gcwq)
2643 return ret;
2645 spin_lock_irq(&gcwq->lock);
2646 if (!list_empty(&work->entry)) {
2648 * This work is queued, but perhaps we locked the wrong gcwq.
2649 * In that case we must see the new value after rmb(), see
2650 * insert_work()->wmb().
2652 smp_rmb();
2653 if (gcwq == get_work_gcwq(work)) {
2654 debug_work_deactivate(work);
2657 * A delayed work item cannot be grabbed directly
2658 * because it might have linked NO_COLOR work items
2659 * which, if left on the delayed_list, will confuse
2660 * cwq->nr_active management later on and cause
2661 * stall. Make sure the work item is activated
2662 * before grabbing.
2664 if (*work_data_bits(work) & WORK_STRUCT_DELAYED)
2665 cwq_activate_delayed_work(work);
2667 list_del_init(&work->entry);
2668 cwq_dec_nr_in_flight(get_work_cwq(work),
2669 get_work_color(work),
2670 *work_data_bits(work) & WORK_STRUCT_DELAYED);
2671 ret = 1;
2674 spin_unlock_irq(&gcwq->lock);
2676 return ret;
2679 static bool __cancel_work_timer(struct work_struct *work,
2680 struct timer_list* timer)
2682 int ret;
2684 do {
2685 ret = (timer && likely(del_timer(timer)));
2686 if (!ret)
2687 ret = try_to_grab_pending(work);
2688 wait_on_work(work);
2689 } while (unlikely(ret < 0));
2691 clear_work_data(work);
2692 return ret;
2696 * cancel_work_sync - cancel a work and wait for it to finish
2697 * @work: the work to cancel
2699 * Cancel @work and wait for its execution to finish. This function
2700 * can be used even if the work re-queues itself or migrates to
2701 * another workqueue. On return from this function, @work is
2702 * guaranteed to be not pending or executing on any CPU.
2704 * cancel_work_sync(&delayed_work->work) must not be used for
2705 * delayed_work's. Use cancel_delayed_work_sync() instead.
2707 * The caller must ensure that the workqueue on which @work was last
2708 * queued can't be destroyed before this function returns.
2710 * RETURNS:
2711 * %true if @work was pending, %false otherwise.
2713 bool cancel_work_sync(struct work_struct *work)
2715 return __cancel_work_timer(work, NULL);
2717 EXPORT_SYMBOL_GPL(cancel_work_sync);
2720 * flush_delayed_work - wait for a dwork to finish executing the last queueing
2721 * @dwork: the delayed work to flush
2723 * Delayed timer is cancelled and the pending work is queued for
2724 * immediate execution. Like flush_work(), this function only
2725 * considers the last queueing instance of @dwork.
2727 * RETURNS:
2728 * %true if flush_work() waited for the work to finish execution,
2729 * %false if it was already idle.
2731 bool flush_delayed_work(struct delayed_work *dwork)
2733 if (del_timer_sync(&dwork->timer))
2734 __queue_work(raw_smp_processor_id(),
2735 get_work_cwq(&dwork->work)->wq, &dwork->work);
2736 return flush_work(&dwork->work);
2738 EXPORT_SYMBOL(flush_delayed_work);
2741 * flush_delayed_work_sync - wait for a dwork to finish
2742 * @dwork: the delayed work to flush
2744 * Delayed timer is cancelled and the pending work is queued for
2745 * execution immediately. Other than timer handling, its behavior
2746 * is identical to flush_work_sync().
2748 * RETURNS:
2749 * %true if flush_work_sync() waited for the work to finish execution,
2750 * %false if it was already idle.
2752 bool flush_delayed_work_sync(struct delayed_work *dwork)
2754 if (del_timer_sync(&dwork->timer))
2755 __queue_work(raw_smp_processor_id(),
2756 get_work_cwq(&dwork->work)->wq, &dwork->work);
2757 return flush_work_sync(&dwork->work);
2759 EXPORT_SYMBOL(flush_delayed_work_sync);
2762 * cancel_delayed_work_sync - cancel a delayed work and wait for it to finish
2763 * @dwork: the delayed work cancel
2765 * This is cancel_work_sync() for delayed works.
2767 * RETURNS:
2768 * %true if @dwork was pending, %false otherwise.
2770 bool cancel_delayed_work_sync(struct delayed_work *dwork)
2772 return __cancel_work_timer(&dwork->work, &dwork->timer);
2774 EXPORT_SYMBOL(cancel_delayed_work_sync);
2777 * schedule_work - put work task in global workqueue
2778 * @work: job to be done
2780 * Returns zero if @work was already on the kernel-global workqueue and
2781 * non-zero otherwise.
2783 * This puts a job in the kernel-global workqueue if it was not already
2784 * queued and leaves it in the same position on the kernel-global
2785 * workqueue otherwise.
2787 int schedule_work(struct work_struct *work)
2789 return queue_work(system_wq, work);
2791 EXPORT_SYMBOL(schedule_work);
2794 * schedule_work_on - put work task on a specific cpu
2795 * @cpu: cpu to put the work task on
2796 * @work: job to be done
2798 * This puts a job on a specific cpu
2800 int schedule_work_on(int cpu, struct work_struct *work)
2802 return queue_work_on(cpu, system_wq, work);
2804 EXPORT_SYMBOL(schedule_work_on);
2807 * schedule_delayed_work - put work task in global workqueue after delay
2808 * @dwork: job to be done
2809 * @delay: number of jiffies to wait or 0 for immediate execution
2811 * After waiting for a given time this puts a job in the kernel-global
2812 * workqueue.
2814 int schedule_delayed_work(struct delayed_work *dwork,
2815 unsigned long delay)
2817 return queue_delayed_work(system_wq, dwork, delay);
2819 EXPORT_SYMBOL(schedule_delayed_work);
2822 * schedule_delayed_work_on - queue work in global workqueue on CPU after delay
2823 * @cpu: cpu to use
2824 * @dwork: job to be done
2825 * @delay: number of jiffies to wait
2827 * After waiting for a given time this puts a job in the kernel-global
2828 * workqueue on the specified CPU.
2830 int schedule_delayed_work_on(int cpu,
2831 struct delayed_work *dwork, unsigned long delay)
2833 return queue_delayed_work_on(cpu, system_wq, dwork, delay);
2835 EXPORT_SYMBOL(schedule_delayed_work_on);
2838 * schedule_on_each_cpu - execute a function synchronously on each online CPU
2839 * @func: the function to call
2841 * schedule_on_each_cpu() executes @func on each online CPU using the
2842 * system workqueue and blocks until all CPUs have completed.
2843 * schedule_on_each_cpu() is very slow.
2845 * RETURNS:
2846 * 0 on success, -errno on failure.
2848 int schedule_on_each_cpu(work_func_t func)
2850 int cpu;
2851 struct work_struct __percpu *works;
2853 works = alloc_percpu(struct work_struct);
2854 if (!works)
2855 return -ENOMEM;
2857 get_online_cpus();
2859 for_each_online_cpu(cpu) {
2860 struct work_struct *work = per_cpu_ptr(works, cpu);
2862 INIT_WORK(work, func);
2863 schedule_work_on(cpu, work);
2866 for_each_online_cpu(cpu)
2867 flush_work(per_cpu_ptr(works, cpu));
2869 put_online_cpus();
2870 free_percpu(works);
2871 return 0;
2875 * flush_scheduled_work - ensure that any scheduled work has run to completion.
2877 * Forces execution of the kernel-global workqueue and blocks until its
2878 * completion.
2880 * Think twice before calling this function! It's very easy to get into
2881 * trouble if you don't take great care. Either of the following situations
2882 * will lead to deadlock:
2884 * One of the work items currently on the workqueue needs to acquire
2885 * a lock held by your code or its caller.
2887 * Your code is running in the context of a work routine.
2889 * They will be detected by lockdep when they occur, but the first might not
2890 * occur very often. It depends on what work items are on the workqueue and
2891 * what locks they need, which you have no control over.
2893 * In most situations flushing the entire workqueue is overkill; you merely
2894 * need to know that a particular work item isn't queued and isn't running.
2895 * In such cases you should use cancel_delayed_work_sync() or
2896 * cancel_work_sync() instead.
2898 void flush_scheduled_work(void)
2900 flush_workqueue(system_wq);
2902 EXPORT_SYMBOL(flush_scheduled_work);
2905 * execute_in_process_context - reliably execute the routine with user context
2906 * @fn: the function to execute
2907 * @ew: guaranteed storage for the execute work structure (must
2908 * be available when the work executes)
2910 * Executes the function immediately if process context is available,
2911 * otherwise schedules the function for delayed execution.
2913 * Returns: 0 - function was executed
2914 * 1 - function was scheduled for execution
2916 int execute_in_process_context(work_func_t fn, struct execute_work *ew)
2918 if (!in_interrupt()) {
2919 fn(&ew->work);
2920 return 0;
2923 INIT_WORK(&ew->work, fn);
2924 schedule_work(&ew->work);
2926 return 1;
2928 EXPORT_SYMBOL_GPL(execute_in_process_context);
2930 int keventd_up(void)
2932 return system_wq != NULL;
2935 static int alloc_cwqs(struct workqueue_struct *wq)
2938 * cwqs are forced aligned according to WORK_STRUCT_FLAG_BITS.
2939 * Make sure that the alignment isn't lower than that of
2940 * unsigned long long.
2942 const size_t size = sizeof(struct cpu_workqueue_struct);
2943 const size_t align = max_t(size_t, 1 << WORK_STRUCT_FLAG_BITS,
2944 __alignof__(unsigned long long));
2946 if (!(wq->flags & WQ_UNBOUND))
2947 wq->cpu_wq.pcpu = __alloc_percpu(size, align);
2948 else {
2949 void *ptr;
2952 * Allocate enough room to align cwq and put an extra
2953 * pointer at the end pointing back to the originally
2954 * allocated pointer which will be used for free.
2956 ptr = kzalloc(size + align + sizeof(void *), GFP_KERNEL);
2957 if (ptr) {
2958 wq->cpu_wq.single = PTR_ALIGN(ptr, align);
2959 *(void **)(wq->cpu_wq.single + 1) = ptr;
2963 /* just in case, make sure it's actually aligned */
2964 BUG_ON(!IS_ALIGNED(wq->cpu_wq.v, align));
2965 return wq->cpu_wq.v ? 0 : -ENOMEM;
2968 static void free_cwqs(struct workqueue_struct *wq)
2970 if (!(wq->flags & WQ_UNBOUND))
2971 free_percpu(wq->cpu_wq.pcpu);
2972 else if (wq->cpu_wq.single) {
2973 /* the pointer to free is stored right after the cwq */
2974 kfree(*(void **)(wq->cpu_wq.single + 1));
2978 static int wq_clamp_max_active(int max_active, unsigned int flags,
2979 const char *name)
2981 int lim = flags & WQ_UNBOUND ? WQ_UNBOUND_MAX_ACTIVE : WQ_MAX_ACTIVE;
2983 if (max_active < 1 || max_active > lim)
2984 printk(KERN_WARNING "workqueue: max_active %d requested for %s "
2985 "is out of range, clamping between %d and %d\n",
2986 max_active, name, 1, lim);
2988 return clamp_val(max_active, 1, lim);
2991 struct workqueue_struct *__alloc_workqueue_key(const char *fmt,
2992 unsigned int flags,
2993 int max_active,
2994 struct lock_class_key *key,
2995 const char *lock_name, ...)
2997 va_list args, args1;
2998 struct workqueue_struct *wq;
2999 unsigned int cpu;
3000 size_t namelen;
3002 /* determine namelen, allocate wq and format name */
3003 va_start(args, lock_name);
3004 va_copy(args1, args);
3005 namelen = vsnprintf(NULL, 0, fmt, args) + 1;
3007 wq = kzalloc(sizeof(*wq) + namelen, GFP_KERNEL);
3008 if (!wq)
3009 goto err;
3011 vsnprintf(wq->name, namelen, fmt, args1);
3012 va_end(args);
3013 va_end(args1);
3016 * Workqueues which may be used during memory reclaim should
3017 * have a rescuer to guarantee forward progress.
3019 if (flags & WQ_MEM_RECLAIM)
3020 flags |= WQ_RESCUER;
3023 * Unbound workqueues aren't concurrency managed and should be
3024 * dispatched to workers immediately.
3026 if (flags & WQ_UNBOUND)
3027 flags |= WQ_HIGHPRI;
3029 max_active = max_active ?: WQ_DFL_ACTIVE;
3030 max_active = wq_clamp_max_active(max_active, flags, wq->name);
3032 /* init wq */
3033 wq->flags = flags;
3034 wq->saved_max_active = max_active;
3035 mutex_init(&wq->flush_mutex);
3036 atomic_set(&wq->nr_cwqs_to_flush, 0);
3037 INIT_LIST_HEAD(&wq->flusher_queue);
3038 INIT_LIST_HEAD(&wq->flusher_overflow);
3040 lockdep_init_map(&wq->lockdep_map, lock_name, key, 0);
3041 INIT_LIST_HEAD(&wq->list);
3043 if (alloc_cwqs(wq) < 0)
3044 goto err;
3046 for_each_cwq_cpu(cpu, wq) {
3047 struct cpu_workqueue_struct *cwq = get_cwq(cpu, wq);
3048 struct global_cwq *gcwq = get_gcwq(cpu);
3050 BUG_ON((unsigned long)cwq & WORK_STRUCT_FLAG_MASK);
3051 cwq->gcwq = gcwq;
3052 cwq->wq = wq;
3053 cwq->flush_color = -1;
3054 cwq->max_active = max_active;
3055 INIT_LIST_HEAD(&cwq->delayed_works);
3058 if (flags & WQ_RESCUER) {
3059 struct worker *rescuer;
3061 if (!alloc_mayday_mask(&wq->mayday_mask, GFP_KERNEL))
3062 goto err;
3064 wq->rescuer = rescuer = alloc_worker();
3065 if (!rescuer)
3066 goto err;
3068 rescuer->task = kthread_create(rescuer_thread, wq, "%s",
3069 wq->name);
3070 if (IS_ERR(rescuer->task))
3071 goto err;
3073 rescuer->task->flags |= PF_THREAD_BOUND;
3074 wake_up_process(rescuer->task);
3078 * workqueue_lock protects global freeze state and workqueues
3079 * list. Grab it, set max_active accordingly and add the new
3080 * workqueue to workqueues list.
3082 spin_lock(&workqueue_lock);
3084 if (workqueue_freezing && wq->flags & WQ_FREEZABLE)
3085 for_each_cwq_cpu(cpu, wq)
3086 get_cwq(cpu, wq)->max_active = 0;
3088 list_add(&wq->list, &workqueues);
3090 spin_unlock(&workqueue_lock);
3092 return wq;
3093 err:
3094 if (wq) {
3095 free_cwqs(wq);
3096 free_mayday_mask(wq->mayday_mask);
3097 kfree(wq->rescuer);
3098 kfree(wq);
3100 return NULL;
3102 EXPORT_SYMBOL_GPL(__alloc_workqueue_key);
3105 * destroy_workqueue - safely terminate a workqueue
3106 * @wq: target workqueue
3108 * Safely destroy a workqueue. All work currently pending will be done first.
3110 void destroy_workqueue(struct workqueue_struct *wq)
3112 unsigned int cpu;
3114 /* drain it before proceeding with destruction */
3115 drain_workqueue(wq);
3118 * wq list is used to freeze wq, remove from list after
3119 * flushing is complete in case freeze races us.
3121 spin_lock(&workqueue_lock);
3122 list_del(&wq->list);
3123 spin_unlock(&workqueue_lock);
3125 /* sanity check */
3126 for_each_cwq_cpu(cpu, wq) {
3127 struct cpu_workqueue_struct *cwq = get_cwq(cpu, wq);
3128 int i;
3130 for (i = 0; i < WORK_NR_COLORS; i++)
3131 BUG_ON(cwq->nr_in_flight[i]);
3132 BUG_ON(cwq->nr_active);
3133 BUG_ON(!list_empty(&cwq->delayed_works));
3136 if (wq->flags & WQ_RESCUER) {
3137 kthread_stop(wq->rescuer->task);
3138 free_mayday_mask(wq->mayday_mask);
3139 kfree(wq->rescuer);
3142 free_cwqs(wq);
3143 kfree(wq);
3145 EXPORT_SYMBOL_GPL(destroy_workqueue);
3148 * workqueue_set_max_active - adjust max_active of a workqueue
3149 * @wq: target workqueue
3150 * @max_active: new max_active value.
3152 * Set max_active of @wq to @max_active.
3154 * CONTEXT:
3155 * Don't call from IRQ context.
3157 void workqueue_set_max_active(struct workqueue_struct *wq, int max_active)
3159 unsigned int cpu;
3161 max_active = wq_clamp_max_active(max_active, wq->flags, wq->name);
3163 spin_lock(&workqueue_lock);
3165 wq->saved_max_active = max_active;
3167 for_each_cwq_cpu(cpu, wq) {
3168 struct global_cwq *gcwq = get_gcwq(cpu);
3170 spin_lock_irq(&gcwq->lock);
3172 if (!(wq->flags & WQ_FREEZABLE) ||
3173 !(gcwq->flags & GCWQ_FREEZING))
3174 get_cwq(gcwq->cpu, wq)->max_active = max_active;
3176 spin_unlock_irq(&gcwq->lock);
3179 spin_unlock(&workqueue_lock);
3181 EXPORT_SYMBOL_GPL(workqueue_set_max_active);
3184 * workqueue_congested - test whether a workqueue is congested
3185 * @cpu: CPU in question
3186 * @wq: target workqueue
3188 * Test whether @wq's cpu workqueue for @cpu is congested. There is
3189 * no synchronization around this function and the test result is
3190 * unreliable and only useful as advisory hints or for debugging.
3192 * RETURNS:
3193 * %true if congested, %false otherwise.
3195 bool workqueue_congested(unsigned int cpu, struct workqueue_struct *wq)
3197 struct cpu_workqueue_struct *cwq = get_cwq(cpu, wq);
3199 return !list_empty(&cwq->delayed_works);
3201 EXPORT_SYMBOL_GPL(workqueue_congested);
3204 * work_cpu - return the last known associated cpu for @work
3205 * @work: the work of interest
3207 * RETURNS:
3208 * CPU number if @work was ever queued. WORK_CPU_NONE otherwise.
3210 unsigned int work_cpu(struct work_struct *work)
3212 struct global_cwq *gcwq = get_work_gcwq(work);
3214 return gcwq ? gcwq->cpu : WORK_CPU_NONE;
3216 EXPORT_SYMBOL_GPL(work_cpu);
3219 * work_busy - test whether a work is currently pending or running
3220 * @work: the work to be tested
3222 * Test whether @work is currently pending or running. There is no
3223 * synchronization around this function and the test result is
3224 * unreliable and only useful as advisory hints or for debugging.
3225 * Especially for reentrant wqs, the pending state might hide the
3226 * running state.
3228 * RETURNS:
3229 * OR'd bitmask of WORK_BUSY_* bits.
3231 unsigned int work_busy(struct work_struct *work)
3233 struct global_cwq *gcwq = get_work_gcwq(work);
3234 unsigned long flags;
3235 unsigned int ret = 0;
3237 if (!gcwq)
3238 return false;
3240 spin_lock_irqsave(&gcwq->lock, flags);
3242 if (work_pending(work))
3243 ret |= WORK_BUSY_PENDING;
3244 if (find_worker_executing_work(gcwq, work))
3245 ret |= WORK_BUSY_RUNNING;
3247 spin_unlock_irqrestore(&gcwq->lock, flags);
3249 return ret;
3251 EXPORT_SYMBOL_GPL(work_busy);
3254 * CPU hotplug.
3256 * There are two challenges in supporting CPU hotplug. Firstly, there
3257 * are a lot of assumptions on strong associations among work, cwq and
3258 * gcwq which make migrating pending and scheduled works very
3259 * difficult to implement without impacting hot paths. Secondly,
3260 * gcwqs serve mix of short, long and very long running works making
3261 * blocked draining impractical.
3263 * This is solved by allowing a gcwq to be detached from CPU, running
3264 * it with unbound (rogue) workers and allowing it to be reattached
3265 * later if the cpu comes back online. A separate thread is created
3266 * to govern a gcwq in such state and is called the trustee of the
3267 * gcwq.
3269 * Trustee states and their descriptions.
3271 * START Command state used on startup. On CPU_DOWN_PREPARE, a
3272 * new trustee is started with this state.
3274 * IN_CHARGE Once started, trustee will enter this state after
3275 * assuming the manager role and making all existing
3276 * workers rogue. DOWN_PREPARE waits for trustee to
3277 * enter this state. After reaching IN_CHARGE, trustee
3278 * tries to execute the pending worklist until it's empty
3279 * and the state is set to BUTCHER, or the state is set
3280 * to RELEASE.
3282 * BUTCHER Command state which is set by the cpu callback after
3283 * the cpu has went down. Once this state is set trustee
3284 * knows that there will be no new works on the worklist
3285 * and once the worklist is empty it can proceed to
3286 * killing idle workers.
3288 * RELEASE Command state which is set by the cpu callback if the
3289 * cpu down has been canceled or it has come online
3290 * again. After recognizing this state, trustee stops
3291 * trying to drain or butcher and clears ROGUE, rebinds
3292 * all remaining workers back to the cpu and releases
3293 * manager role.
3295 * DONE Trustee will enter this state after BUTCHER or RELEASE
3296 * is complete.
3298 * trustee CPU draining
3299 * took over down complete
3300 * START -----------> IN_CHARGE -----------> BUTCHER -----------> DONE
3301 * | | ^
3302 * | CPU is back online v return workers |
3303 * ----------------> RELEASE --------------
3307 * trustee_wait_event_timeout - timed event wait for trustee
3308 * @cond: condition to wait for
3309 * @timeout: timeout in jiffies
3311 * wait_event_timeout() for trustee to use. Handles locking and
3312 * checks for RELEASE request.
3314 * CONTEXT:
3315 * spin_lock_irq(gcwq->lock) which may be released and regrabbed
3316 * multiple times. To be used by trustee.
3318 * RETURNS:
3319 * Positive indicating left time if @cond is satisfied, 0 if timed
3320 * out, -1 if canceled.
3322 #define trustee_wait_event_timeout(cond, timeout) ({ \
3323 long __ret = (timeout); \
3324 while (!((cond) || (gcwq->trustee_state == TRUSTEE_RELEASE)) && \
3325 __ret) { \
3326 spin_unlock_irq(&gcwq->lock); \
3327 __wait_event_timeout(gcwq->trustee_wait, (cond) || \
3328 (gcwq->trustee_state == TRUSTEE_RELEASE), \
3329 __ret); \
3330 spin_lock_irq(&gcwq->lock); \
3332 gcwq->trustee_state == TRUSTEE_RELEASE ? -1 : (__ret); \
3336 * trustee_wait_event - event wait for trustee
3337 * @cond: condition to wait for
3339 * wait_event() for trustee to use. Automatically handles locking and
3340 * checks for CANCEL request.
3342 * CONTEXT:
3343 * spin_lock_irq(gcwq->lock) which may be released and regrabbed
3344 * multiple times. To be used by trustee.
3346 * RETURNS:
3347 * 0 if @cond is satisfied, -1 if canceled.
3349 #define trustee_wait_event(cond) ({ \
3350 long __ret1; \
3351 __ret1 = trustee_wait_event_timeout(cond, MAX_SCHEDULE_TIMEOUT);\
3352 __ret1 < 0 ? -1 : 0; \
3355 static int __cpuinit trustee_thread(void *__gcwq)
3357 struct global_cwq *gcwq = __gcwq;
3358 struct worker *worker;
3359 struct work_struct *work;
3360 struct hlist_node *pos;
3361 long rc;
3362 int i;
3364 BUG_ON(gcwq->cpu != smp_processor_id());
3366 spin_lock_irq(&gcwq->lock);
3368 * Claim the manager position and make all workers rogue.
3369 * Trustee must be bound to the target cpu and can't be
3370 * cancelled.
3372 BUG_ON(gcwq->cpu != smp_processor_id());
3373 rc = trustee_wait_event(!(gcwq->flags & GCWQ_MANAGING_WORKERS));
3374 BUG_ON(rc < 0);
3376 gcwq->flags |= GCWQ_MANAGING_WORKERS;
3378 list_for_each_entry(worker, &gcwq->idle_list, entry)
3379 worker->flags |= WORKER_ROGUE;
3381 for_each_busy_worker(worker, i, pos, gcwq)
3382 worker->flags |= WORKER_ROGUE;
3385 * Call schedule() so that we cross rq->lock and thus can
3386 * guarantee sched callbacks see the rogue flag. This is
3387 * necessary as scheduler callbacks may be invoked from other
3388 * cpus.
3390 spin_unlock_irq(&gcwq->lock);
3391 schedule();
3392 spin_lock_irq(&gcwq->lock);
3395 * Sched callbacks are disabled now. Zap nr_running. After
3396 * this, nr_running stays zero and need_more_worker() and
3397 * keep_working() are always true as long as the worklist is
3398 * not empty.
3400 atomic_set(get_gcwq_nr_running(gcwq->cpu), 0);
3402 spin_unlock_irq(&gcwq->lock);
3403 del_timer_sync(&gcwq->idle_timer);
3404 spin_lock_irq(&gcwq->lock);
3407 * We're now in charge. Notify and proceed to drain. We need
3408 * to keep the gcwq running during the whole CPU down
3409 * procedure as other cpu hotunplug callbacks may need to
3410 * flush currently running tasks.
3412 gcwq->trustee_state = TRUSTEE_IN_CHARGE;
3413 wake_up_all(&gcwq->trustee_wait);
3416 * The original cpu is in the process of dying and may go away
3417 * anytime now. When that happens, we and all workers would
3418 * be migrated to other cpus. Try draining any left work. We
3419 * want to get it over with ASAP - spam rescuers, wake up as
3420 * many idlers as necessary and create new ones till the
3421 * worklist is empty. Note that if the gcwq is frozen, there
3422 * may be frozen works in freezable cwqs. Don't declare
3423 * completion while frozen.
3425 while (gcwq->nr_workers != gcwq->nr_idle ||
3426 gcwq->flags & GCWQ_FREEZING ||
3427 gcwq->trustee_state == TRUSTEE_IN_CHARGE) {
3428 int nr_works = 0;
3430 list_for_each_entry(work, &gcwq->worklist, entry) {
3431 send_mayday(work);
3432 nr_works++;
3435 list_for_each_entry(worker, &gcwq->idle_list, entry) {
3436 if (!nr_works--)
3437 break;
3438 wake_up_process(worker->task);
3441 if (need_to_create_worker(gcwq)) {
3442 spin_unlock_irq(&gcwq->lock);
3443 worker = create_worker(gcwq, false);
3444 spin_lock_irq(&gcwq->lock);
3445 if (worker) {
3446 worker->flags |= WORKER_ROGUE;
3447 start_worker(worker);
3451 /* give a breather */
3452 if (trustee_wait_event_timeout(false, TRUSTEE_COOLDOWN) < 0)
3453 break;
3457 * Either all works have been scheduled and cpu is down, or
3458 * cpu down has already been canceled. Wait for and butcher
3459 * all workers till we're canceled.
3461 do {
3462 rc = trustee_wait_event(!list_empty(&gcwq->idle_list));
3463 while (!list_empty(&gcwq->idle_list))
3464 destroy_worker(list_first_entry(&gcwq->idle_list,
3465 struct worker, entry));
3466 } while (gcwq->nr_workers && rc >= 0);
3469 * At this point, either draining has completed and no worker
3470 * is left, or cpu down has been canceled or the cpu is being
3471 * brought back up. There shouldn't be any idle one left.
3472 * Tell the remaining busy ones to rebind once it finishes the
3473 * currently scheduled works by scheduling the rebind_work.
3475 WARN_ON(!list_empty(&gcwq->idle_list));
3477 for_each_busy_worker(worker, i, pos, gcwq) {
3478 struct work_struct *rebind_work = &worker->rebind_work;
3479 unsigned long worker_flags = worker->flags;
3482 * Rebind_work may race with future cpu hotplug
3483 * operations. Use a separate flag to mark that
3484 * rebinding is scheduled. The morphing should
3485 * be atomic.
3487 worker_flags |= WORKER_REBIND;
3488 worker_flags &= ~WORKER_ROGUE;
3489 ACCESS_ONCE(worker->flags) = worker_flags;
3491 /* queue rebind_work, wq doesn't matter, use the default one */
3492 if (test_and_set_bit(WORK_STRUCT_PENDING_BIT,
3493 work_data_bits(rebind_work)))
3494 continue;
3496 debug_work_activate(rebind_work);
3497 insert_work(get_cwq(gcwq->cpu, system_wq), rebind_work,
3498 worker->scheduled.next,
3499 work_color_to_flags(WORK_NO_COLOR));
3502 /* relinquish manager role */
3503 gcwq->flags &= ~GCWQ_MANAGING_WORKERS;
3505 /* notify completion */
3506 gcwq->trustee = NULL;
3507 gcwq->trustee_state = TRUSTEE_DONE;
3508 wake_up_all(&gcwq->trustee_wait);
3509 spin_unlock_irq(&gcwq->lock);
3510 return 0;
3514 * wait_trustee_state - wait for trustee to enter the specified state
3515 * @gcwq: gcwq the trustee of interest belongs to
3516 * @state: target state to wait for
3518 * Wait for the trustee to reach @state. DONE is already matched.
3520 * CONTEXT:
3521 * spin_lock_irq(gcwq->lock) which may be released and regrabbed
3522 * multiple times. To be used by cpu_callback.
3524 static void __cpuinit wait_trustee_state(struct global_cwq *gcwq, int state)
3525 __releases(&gcwq->lock)
3526 __acquires(&gcwq->lock)
3528 if (!(gcwq->trustee_state == state ||
3529 gcwq->trustee_state == TRUSTEE_DONE)) {
3530 spin_unlock_irq(&gcwq->lock);
3531 __wait_event(gcwq->trustee_wait,
3532 gcwq->trustee_state == state ||
3533 gcwq->trustee_state == TRUSTEE_DONE);
3534 spin_lock_irq(&gcwq->lock);
3538 static int __devinit workqueue_cpu_callback(struct notifier_block *nfb,
3539 unsigned long action,
3540 void *hcpu)
3542 unsigned int cpu = (unsigned long)hcpu;
3543 struct global_cwq *gcwq = get_gcwq(cpu);
3544 struct task_struct *new_trustee = NULL;
3545 struct worker *uninitialized_var(new_worker);
3546 unsigned long flags;
3548 action &= ~CPU_TASKS_FROZEN;
3550 switch (action) {
3551 case CPU_DOWN_PREPARE:
3552 new_trustee = kthread_create(trustee_thread, gcwq,
3553 "workqueue_trustee/%d\n", cpu);
3554 if (IS_ERR(new_trustee))
3555 return notifier_from_errno(PTR_ERR(new_trustee));
3556 kthread_bind(new_trustee, cpu);
3557 /* fall through */
3558 case CPU_UP_PREPARE:
3559 BUG_ON(gcwq->first_idle);
3560 new_worker = create_worker(gcwq, false);
3561 if (!new_worker) {
3562 if (new_trustee)
3563 kthread_stop(new_trustee);
3564 return NOTIFY_BAD;
3568 /* some are called w/ irq disabled, don't disturb irq status */
3569 spin_lock_irqsave(&gcwq->lock, flags);
3571 switch (action) {
3572 case CPU_DOWN_PREPARE:
3573 /* initialize trustee and tell it to acquire the gcwq */
3574 BUG_ON(gcwq->trustee || gcwq->trustee_state != TRUSTEE_DONE);
3575 gcwq->trustee = new_trustee;
3576 gcwq->trustee_state = TRUSTEE_START;
3577 wake_up_process(gcwq->trustee);
3578 wait_trustee_state(gcwq, TRUSTEE_IN_CHARGE);
3579 /* fall through */
3580 case CPU_UP_PREPARE:
3581 BUG_ON(gcwq->first_idle);
3582 gcwq->first_idle = new_worker;
3583 break;
3585 case CPU_DYING:
3587 * Before this, the trustee and all workers except for
3588 * the ones which are still executing works from
3589 * before the last CPU down must be on the cpu. After
3590 * this, they'll all be diasporas.
3592 gcwq->flags |= GCWQ_DISASSOCIATED;
3593 break;
3595 case CPU_POST_DEAD:
3596 gcwq->trustee_state = TRUSTEE_BUTCHER;
3597 /* fall through */
3598 case CPU_UP_CANCELED:
3599 destroy_worker(gcwq->first_idle);
3600 gcwq->first_idle = NULL;
3601 break;
3603 case CPU_DOWN_FAILED:
3604 case CPU_ONLINE:
3605 gcwq->flags &= ~GCWQ_DISASSOCIATED;
3606 if (gcwq->trustee_state != TRUSTEE_DONE) {
3607 gcwq->trustee_state = TRUSTEE_RELEASE;
3608 wake_up_process(gcwq->trustee);
3609 wait_trustee_state(gcwq, TRUSTEE_DONE);
3613 * Trustee is done and there might be no worker left.
3614 * Put the first_idle in and request a real manager to
3615 * take a look.
3617 spin_unlock_irq(&gcwq->lock);
3618 kthread_bind(gcwq->first_idle->task, cpu);
3619 spin_lock_irq(&gcwq->lock);
3620 gcwq->flags |= GCWQ_MANAGE_WORKERS;
3621 start_worker(gcwq->first_idle);
3622 gcwq->first_idle = NULL;
3623 break;
3626 spin_unlock_irqrestore(&gcwq->lock, flags);
3628 return notifier_from_errno(0);
3632 * Workqueues should be brought up before normal priority CPU notifiers.
3633 * This will be registered high priority CPU notifier.
3635 static int __devinit workqueue_cpu_up_callback(struct notifier_block *nfb,
3636 unsigned long action,
3637 void *hcpu)
3639 switch (action & ~CPU_TASKS_FROZEN) {
3640 case CPU_UP_PREPARE:
3641 case CPU_UP_CANCELED:
3642 case CPU_DOWN_FAILED:
3643 case CPU_ONLINE:
3644 return workqueue_cpu_callback(nfb, action, hcpu);
3646 return NOTIFY_OK;
3650 * Workqueues should be brought down after normal priority CPU notifiers.
3651 * This will be registered as low priority CPU notifier.
3653 static int __devinit workqueue_cpu_down_callback(struct notifier_block *nfb,
3654 unsigned long action,
3655 void *hcpu)
3657 switch (action & ~CPU_TASKS_FROZEN) {
3658 case CPU_DOWN_PREPARE:
3659 case CPU_DYING:
3660 case CPU_POST_DEAD:
3661 return workqueue_cpu_callback(nfb, action, hcpu);
3663 return NOTIFY_OK;
3666 #ifdef CONFIG_SMP
3668 struct work_for_cpu {
3669 struct work_struct work;
3670 long (*fn)(void *);
3671 void *arg;
3672 long ret;
3675 static void work_for_cpu_fn(struct work_struct *work)
3677 struct work_for_cpu *wfc = container_of(work, struct work_for_cpu, work);
3679 wfc->ret = wfc->fn(wfc->arg);
3683 * work_on_cpu - run a function in user context on a particular cpu
3684 * @cpu: the cpu to run on
3685 * @fn: the function to run
3686 * @arg: the function arg
3688 * This will return the value @fn returns.
3689 * It is up to the caller to ensure that the cpu doesn't go offline.
3690 * The caller must not hold any locks which would prevent @fn from completing.
3692 long work_on_cpu(unsigned int cpu, long (*fn)(void *), void *arg)
3694 struct work_for_cpu wfc = { .fn = fn, .arg = arg };
3696 INIT_WORK_ONSTACK(&wfc.work, work_for_cpu_fn);
3697 schedule_work_on(cpu, &wfc.work);
3698 flush_work(&wfc.work);
3699 return wfc.ret;
3701 EXPORT_SYMBOL_GPL(work_on_cpu);
3702 #endif /* CONFIG_SMP */
3704 #ifdef CONFIG_FREEZER
3707 * freeze_workqueues_begin - begin freezing workqueues
3709 * Start freezing workqueues. After this function returns, all freezable
3710 * workqueues will queue new works to their frozen_works list instead of
3711 * gcwq->worklist.
3713 * CONTEXT:
3714 * Grabs and releases workqueue_lock and gcwq->lock's.
3716 void freeze_workqueues_begin(void)
3718 unsigned int cpu;
3720 spin_lock(&workqueue_lock);
3722 BUG_ON(workqueue_freezing);
3723 workqueue_freezing = true;
3725 for_each_gcwq_cpu(cpu) {
3726 struct global_cwq *gcwq = get_gcwq(cpu);
3727 struct workqueue_struct *wq;
3729 spin_lock_irq(&gcwq->lock);
3731 BUG_ON(gcwq->flags & GCWQ_FREEZING);
3732 gcwq->flags |= GCWQ_FREEZING;
3734 list_for_each_entry(wq, &workqueues, list) {
3735 struct cpu_workqueue_struct *cwq = get_cwq(cpu, wq);
3737 if (cwq && wq->flags & WQ_FREEZABLE)
3738 cwq->max_active = 0;
3741 spin_unlock_irq(&gcwq->lock);
3744 spin_unlock(&workqueue_lock);
3748 * freeze_workqueues_busy - are freezable workqueues still busy?
3750 * Check whether freezing is complete. This function must be called
3751 * between freeze_workqueues_begin() and thaw_workqueues().
3753 * CONTEXT:
3754 * Grabs and releases workqueue_lock.
3756 * RETURNS:
3757 * %true if some freezable workqueues are still busy. %false if freezing
3758 * is complete.
3760 bool freeze_workqueues_busy(void)
3762 unsigned int cpu;
3763 bool busy = false;
3765 spin_lock(&workqueue_lock);
3767 BUG_ON(!workqueue_freezing);
3769 for_each_gcwq_cpu(cpu) {
3770 struct workqueue_struct *wq;
3772 * nr_active is monotonically decreasing. It's safe
3773 * to peek without lock.
3775 list_for_each_entry(wq, &workqueues, list) {
3776 struct cpu_workqueue_struct *cwq = get_cwq(cpu, wq);
3778 if (!cwq || !(wq->flags & WQ_FREEZABLE))
3779 continue;
3781 BUG_ON(cwq->nr_active < 0);
3782 if (cwq->nr_active) {
3783 busy = true;
3784 goto out_unlock;
3788 out_unlock:
3789 spin_unlock(&workqueue_lock);
3790 return busy;
3794 * thaw_workqueues - thaw workqueues
3796 * Thaw workqueues. Normal queueing is restored and all collected
3797 * frozen works are transferred to their respective gcwq worklists.
3799 * CONTEXT:
3800 * Grabs and releases workqueue_lock and gcwq->lock's.
3802 void thaw_workqueues(void)
3804 unsigned int cpu;
3806 spin_lock(&workqueue_lock);
3808 if (!workqueue_freezing)
3809 goto out_unlock;
3811 for_each_gcwq_cpu(cpu) {
3812 struct global_cwq *gcwq = get_gcwq(cpu);
3813 struct workqueue_struct *wq;
3815 spin_lock_irq(&gcwq->lock);
3817 BUG_ON(!(gcwq->flags & GCWQ_FREEZING));
3818 gcwq->flags &= ~GCWQ_FREEZING;
3820 list_for_each_entry(wq, &workqueues, list) {
3821 struct cpu_workqueue_struct *cwq = get_cwq(cpu, wq);
3823 if (!cwq || !(wq->flags & WQ_FREEZABLE))
3824 continue;
3826 /* restore max_active and repopulate worklist */
3827 cwq->max_active = wq->saved_max_active;
3829 while (!list_empty(&cwq->delayed_works) &&
3830 cwq->nr_active < cwq->max_active)
3831 cwq_activate_first_delayed(cwq);
3834 wake_up_worker(gcwq);
3836 spin_unlock_irq(&gcwq->lock);
3839 workqueue_freezing = false;
3840 out_unlock:
3841 spin_unlock(&workqueue_lock);
3843 #endif /* CONFIG_FREEZER */
3845 static int __init init_workqueues(void)
3847 unsigned int cpu;
3848 int i;
3850 cpu_notifier(workqueue_cpu_up_callback, CPU_PRI_WORKQUEUE_UP);
3851 cpu_notifier(workqueue_cpu_down_callback, CPU_PRI_WORKQUEUE_DOWN);
3853 /* initialize gcwqs */
3854 for_each_gcwq_cpu(cpu) {
3855 struct global_cwq *gcwq = get_gcwq(cpu);
3857 spin_lock_init(&gcwq->lock);
3858 INIT_LIST_HEAD(&gcwq->worklist);
3859 gcwq->cpu = cpu;
3860 gcwq->flags |= GCWQ_DISASSOCIATED;
3862 INIT_LIST_HEAD(&gcwq->idle_list);
3863 for (i = 0; i < BUSY_WORKER_HASH_SIZE; i++)
3864 INIT_HLIST_HEAD(&gcwq->busy_hash[i]);
3866 init_timer_deferrable(&gcwq->idle_timer);
3867 gcwq->idle_timer.function = idle_worker_timeout;
3868 gcwq->idle_timer.data = (unsigned long)gcwq;
3870 setup_timer(&gcwq->mayday_timer, gcwq_mayday_timeout,
3871 (unsigned long)gcwq);
3873 ida_init(&gcwq->worker_ida);
3875 gcwq->trustee_state = TRUSTEE_DONE;
3876 init_waitqueue_head(&gcwq->trustee_wait);
3879 /* create the initial worker */
3880 for_each_online_gcwq_cpu(cpu) {
3881 struct global_cwq *gcwq = get_gcwq(cpu);
3882 struct worker *worker;
3884 if (cpu != WORK_CPU_UNBOUND)
3885 gcwq->flags &= ~GCWQ_DISASSOCIATED;
3886 worker = create_worker(gcwq, true);
3887 BUG_ON(!worker);
3888 spin_lock_irq(&gcwq->lock);
3889 start_worker(worker);
3890 spin_unlock_irq(&gcwq->lock);
3893 system_wq = alloc_workqueue("events", 0, 0);
3894 system_long_wq = alloc_workqueue("events_long", 0, 0);
3895 system_nrt_wq = alloc_workqueue("events_nrt", WQ_NON_REENTRANT, 0);
3896 system_unbound_wq = alloc_workqueue("events_unbound", WQ_UNBOUND,
3897 WQ_UNBOUND_MAX_ACTIVE);
3898 system_freezable_wq = alloc_workqueue("events_freezable",
3899 WQ_FREEZABLE, 0);
3900 system_nrt_freezable_wq = alloc_workqueue("events_nrt_freezable",
3901 WQ_NON_REENTRANT | WQ_FREEZABLE, 0);
3902 BUG_ON(!system_wq || !system_long_wq || !system_nrt_wq ||
3903 !system_unbound_wq || !system_freezable_wq ||
3904 !system_nrt_freezable_wq);
3905 return 0;
3907 early_initcall(init_workqueues);