1 /* SPDX-License-Identifier: GPL-2.0 */
3 * workqueue.h --- work queue handling for Linux.
6 #ifndef _LINUX_WORKQUEUE_H
7 #define _LINUX_WORKQUEUE_H
9 #include <linux/timer.h>
10 #include <linux/linkage.h>
11 #include <linux/bitops.h>
12 #include <linux/lockdep.h>
13 #include <linux/threads.h>
14 #include <linux/atomic.h>
15 #include <linux/cpumask_types.h>
16 #include <linux/rcupdate.h>
17 #include <linux/workqueue_types.h>
20 * The first word is the work queue pointer and the flags rolled into
23 #define work_data_bits(work) ((unsigned long *)(&(work)->data))
26 WORK_STRUCT_PENDING_BIT
= 0, /* work item is pending execution */
27 WORK_STRUCT_INACTIVE_BIT
, /* work item is inactive */
28 WORK_STRUCT_PWQ_BIT
, /* data points to pwq */
29 WORK_STRUCT_LINKED_BIT
, /* next work is linked to this one */
30 #ifdef CONFIG_DEBUG_OBJECTS_WORK
31 WORK_STRUCT_STATIC_BIT
, /* static initializer (debugobjects) */
33 WORK_STRUCT_FLAG_BITS
,
35 /* color for workqueue flushing */
36 WORK_STRUCT_COLOR_SHIFT
= WORK_STRUCT_FLAG_BITS
,
37 WORK_STRUCT_COLOR_BITS
= 4,
40 * When WORK_STRUCT_PWQ is set, reserve 8 bits off of pwq pointer w/
41 * debugobjects turned off. This makes pwqs aligned to 256 bytes (512
42 * bytes w/ DEBUG_OBJECTS_WORK) and allows 16 workqueue flush colors.
45 * [ pwq pointer ] [ flush color ] [ STRUCT flags ]
48 WORK_STRUCT_PWQ_SHIFT
= WORK_STRUCT_COLOR_SHIFT
+ WORK_STRUCT_COLOR_BITS
,
51 * data contains off-queue information when !WORK_STRUCT_PWQ.
54 * [ pool ID ] [ disable depth ] [ OFFQ flags ] [ STRUCT flags ]
55 * 16 bits 1 bit 4 or 5 bits
57 WORK_OFFQ_FLAG_SHIFT
= WORK_STRUCT_FLAG_BITS
,
58 WORK_OFFQ_BH_BIT
= WORK_OFFQ_FLAG_SHIFT
,
60 WORK_OFFQ_FLAG_BITS
= WORK_OFFQ_FLAG_END
- WORK_OFFQ_FLAG_SHIFT
,
62 WORK_OFFQ_DISABLE_SHIFT
= WORK_OFFQ_FLAG_SHIFT
+ WORK_OFFQ_FLAG_BITS
,
63 WORK_OFFQ_DISABLE_BITS
= 16,
66 * When a work item is off queue, the high bits encode off-queue flags
67 * and the last pool it was on. Cap pool ID to 31 bits and use the
68 * highest number to indicate that no pool is associated.
70 WORK_OFFQ_POOL_SHIFT
= WORK_OFFQ_DISABLE_SHIFT
+ WORK_OFFQ_DISABLE_BITS
,
71 WORK_OFFQ_LEFT
= BITS_PER_LONG
- WORK_OFFQ_POOL_SHIFT
,
72 WORK_OFFQ_POOL_BITS
= WORK_OFFQ_LEFT
<= 31 ? WORK_OFFQ_LEFT
: 31,
76 WORK_STRUCT_PENDING
= 1 << WORK_STRUCT_PENDING_BIT
,
77 WORK_STRUCT_INACTIVE
= 1 << WORK_STRUCT_INACTIVE_BIT
,
78 WORK_STRUCT_PWQ
= 1 << WORK_STRUCT_PWQ_BIT
,
79 WORK_STRUCT_LINKED
= 1 << WORK_STRUCT_LINKED_BIT
,
80 #ifdef CONFIG_DEBUG_OBJECTS_WORK
81 WORK_STRUCT_STATIC
= 1 << WORK_STRUCT_STATIC_BIT
,
83 WORK_STRUCT_STATIC
= 0,
88 WORK_NR_COLORS
= (1 << WORK_STRUCT_COLOR_BITS
),
90 /* not bound to any CPU, prefer the local CPU */
91 WORK_CPU_UNBOUND
= NR_CPUS
,
93 /* bit mask for work_busy() return values */
94 WORK_BUSY_PENDING
= 1 << 0,
95 WORK_BUSY_RUNNING
= 1 << 1,
97 /* maximum string length for set_worker_desc() */
101 /* Convenience constants - of type 'unsigned long', not 'enum'! */
102 #define WORK_OFFQ_BH (1ul << WORK_OFFQ_BH_BIT)
103 #define WORK_OFFQ_FLAG_MASK (((1ul << WORK_OFFQ_FLAG_BITS) - 1) << WORK_OFFQ_FLAG_SHIFT)
104 #define WORK_OFFQ_DISABLE_MASK (((1ul << WORK_OFFQ_DISABLE_BITS) - 1) << WORK_OFFQ_DISABLE_SHIFT)
105 #define WORK_OFFQ_POOL_NONE ((1ul << WORK_OFFQ_POOL_BITS) - 1)
106 #define WORK_STRUCT_NO_POOL (WORK_OFFQ_POOL_NONE << WORK_OFFQ_POOL_SHIFT)
107 #define WORK_STRUCT_PWQ_MASK (~((1ul << WORK_STRUCT_PWQ_SHIFT) - 1))
109 #define WORK_DATA_INIT() ATOMIC_LONG_INIT((unsigned long)WORK_STRUCT_NO_POOL)
110 #define WORK_DATA_STATIC_INIT() \
111 ATOMIC_LONG_INIT((unsigned long)(WORK_STRUCT_NO_POOL | WORK_STRUCT_STATIC))
113 struct delayed_work
{
114 struct work_struct work
;
115 struct timer_list timer
;
117 /* target workqueue and CPU ->timer uses to queue ->work */
118 struct workqueue_struct
*wq
;
123 struct work_struct work
;
126 /* target workqueue ->rcu uses to queue ->work */
127 struct workqueue_struct
*wq
;
131 WQ_AFFN_DFL
, /* use system default */
132 WQ_AFFN_CPU
, /* one pod per CPU */
133 WQ_AFFN_SMT
, /* one pod poer SMT */
134 WQ_AFFN_CACHE
, /* one pod per LLC */
135 WQ_AFFN_NUMA
, /* one pod per NUMA node */
136 WQ_AFFN_SYSTEM
, /* one pod across the whole system */
142 * struct workqueue_attrs - A struct for workqueue attributes.
144 * This can be used to change attributes of an unbound workqueue.
146 struct workqueue_attrs
{
153 * @cpumask: allowed CPUs
155 * Work items in this workqueue are affine to these CPUs and not allowed
156 * to execute on other CPUs. A pool serving a workqueue must have the
159 cpumask_var_t cpumask
;
162 * @__pod_cpumask: internal attribute used to create per-pod pools
166 * Per-pod unbound worker pools are used to improve locality. Always a
167 * subset of ->cpumask. A workqueue can be associated with multiple
168 * worker pools with disjoint @__pod_cpumask's. Whether the enforcement
169 * of a pool's @__pod_cpumask is strict depends on @affn_strict.
171 cpumask_var_t __pod_cpumask
;
174 * @affn_strict: affinity scope is strict
176 * If clear, workqueue will make a best-effort attempt at starting the
177 * worker inside @__pod_cpumask but the scheduler is free to migrate it
180 * If set, workers are only allowed to run inside @__pod_cpumask.
185 * Below fields aren't properties of a worker_pool. They only modify how
186 * :c:func:`apply_workqueue_attrs` select pools and thus don't
187 * participate in pool hash calculations or equality comparisons.
189 * If @affn_strict is set, @cpumask isn't a property of a worker_pool
194 * @affn_scope: unbound CPU affinity scope
196 * CPU pods are used to improve execution locality of unbound work
197 * items. There are multiple pod types, one for each wq_affn_scope, and
198 * every CPU in the system belongs to one pod in every pod type. CPUs
199 * that belong to the same pod share the worker pool. For example,
200 * selecting %WQ_AFFN_NUMA makes the workqueue use a separate worker
201 * pool for each NUMA node.
203 enum wq_affn_scope affn_scope
;
206 * @ordered: work items must be executed one by one in queueing order
211 static inline struct delayed_work
*to_delayed_work(struct work_struct
*work
)
213 return container_of(work
, struct delayed_work
, work
);
216 static inline struct rcu_work
*to_rcu_work(struct work_struct
*work
)
218 return container_of(work
, struct rcu_work
, work
);
221 struct execute_work
{
222 struct work_struct work
;
225 #ifdef CONFIG_LOCKDEP
227 * NB: because we have to copy the lockdep_map, setting _key
228 * here is required, otherwise it could get initialised to the
229 * copy of the lockdep_map!
231 #define __WORK_INIT_LOCKDEP_MAP(n, k) \
232 .lockdep_map = STATIC_LOCKDEP_MAP_INIT(n, k),
234 #define __WORK_INIT_LOCKDEP_MAP(n, k)
237 #define __WORK_INITIALIZER(n, f) { \
238 .data = WORK_DATA_STATIC_INIT(), \
239 .entry = { &(n).entry, &(n).entry }, \
241 __WORK_INIT_LOCKDEP_MAP(#n, &(n)) \
244 #define __DELAYED_WORK_INITIALIZER(n, f, tflags) { \
245 .work = __WORK_INITIALIZER((n).work, (f)), \
246 .timer = __TIMER_INITIALIZER(delayed_work_timer_fn,\
247 (tflags) | TIMER_IRQSAFE), \
250 #define DECLARE_WORK(n, f) \
251 struct work_struct n = __WORK_INITIALIZER(n, f)
253 #define DECLARE_DELAYED_WORK(n, f) \
254 struct delayed_work n = __DELAYED_WORK_INITIALIZER(n, f, 0)
256 #define DECLARE_DEFERRABLE_WORK(n, f) \
257 struct delayed_work n = __DELAYED_WORK_INITIALIZER(n, f, TIMER_DEFERRABLE)
259 #ifdef CONFIG_DEBUG_OBJECTS_WORK
260 extern void __init_work(struct work_struct
*work
, int onstack
);
261 extern void destroy_work_on_stack(struct work_struct
*work
);
262 extern void destroy_delayed_work_on_stack(struct delayed_work
*work
);
263 static inline unsigned int work_static(struct work_struct
*work
)
265 return *work_data_bits(work
) & WORK_STRUCT_STATIC
;
268 static inline void __init_work(struct work_struct
*work
, int onstack
) { }
269 static inline void destroy_work_on_stack(struct work_struct
*work
) { }
270 static inline void destroy_delayed_work_on_stack(struct delayed_work
*work
) { }
271 static inline unsigned int work_static(struct work_struct
*work
) { return 0; }
275 * initialize all of a work item in one go
277 * NOTE! No point in using "atomic_long_set()": using a direct
278 * assignment of the work data initializer allows the compiler
279 * to generate better code.
281 #ifdef CONFIG_LOCKDEP
282 #define __INIT_WORK_KEY(_work, _func, _onstack, _key) \
284 __init_work((_work), _onstack); \
285 (_work)->data = (atomic_long_t) WORK_DATA_INIT(); \
286 lockdep_init_map(&(_work)->lockdep_map, "(work_completion)"#_work, (_key), 0); \
287 INIT_LIST_HEAD(&(_work)->entry); \
288 (_work)->func = (_func); \
291 #define __INIT_WORK_KEY(_work, _func, _onstack, _key) \
293 __init_work((_work), _onstack); \
294 (_work)->data = (atomic_long_t) WORK_DATA_INIT(); \
295 INIT_LIST_HEAD(&(_work)->entry); \
296 (_work)->func = (_func); \
300 #define __INIT_WORK(_work, _func, _onstack) \
302 static __maybe_unused struct lock_class_key __key; \
304 __INIT_WORK_KEY(_work, _func, _onstack, &__key); \
307 #define INIT_WORK(_work, _func) \
308 __INIT_WORK((_work), (_func), 0)
310 #define INIT_WORK_ONSTACK(_work, _func) \
311 __INIT_WORK((_work), (_func), 1)
313 #define INIT_WORK_ONSTACK_KEY(_work, _func, _key) \
314 __INIT_WORK_KEY((_work), (_func), 1, _key)
316 #define __INIT_DELAYED_WORK(_work, _func, _tflags) \
318 INIT_WORK(&(_work)->work, (_func)); \
319 __init_timer(&(_work)->timer, \
320 delayed_work_timer_fn, \
321 (_tflags) | TIMER_IRQSAFE); \
324 #define __INIT_DELAYED_WORK_ONSTACK(_work, _func, _tflags) \
326 INIT_WORK_ONSTACK(&(_work)->work, (_func)); \
327 __init_timer_on_stack(&(_work)->timer, \
328 delayed_work_timer_fn, \
329 (_tflags) | TIMER_IRQSAFE); \
332 #define INIT_DELAYED_WORK(_work, _func) \
333 __INIT_DELAYED_WORK(_work, _func, 0)
335 #define INIT_DELAYED_WORK_ONSTACK(_work, _func) \
336 __INIT_DELAYED_WORK_ONSTACK(_work, _func, 0)
338 #define INIT_DEFERRABLE_WORK(_work, _func) \
339 __INIT_DELAYED_WORK(_work, _func, TIMER_DEFERRABLE)
341 #define INIT_DEFERRABLE_WORK_ONSTACK(_work, _func) \
342 __INIT_DELAYED_WORK_ONSTACK(_work, _func, TIMER_DEFERRABLE)
344 #define INIT_RCU_WORK(_work, _func) \
345 INIT_WORK(&(_work)->work, (_func))
347 #define INIT_RCU_WORK_ONSTACK(_work, _func) \
348 INIT_WORK_ONSTACK(&(_work)->work, (_func))
351 * work_pending - Find out whether a work item is currently pending
352 * @work: The work item in question
354 #define work_pending(work) \
355 test_bit(WORK_STRUCT_PENDING_BIT, work_data_bits(work))
358 * delayed_work_pending - Find out whether a delayable work item is currently
360 * @w: The work item in question
362 #define delayed_work_pending(w) \
363 work_pending(&(w)->work)
366 * Workqueue flags and constants. For details, please refer to
367 * Documentation/core-api/workqueue.rst.
370 WQ_BH
= 1 << 0, /* execute in bottom half (softirq) context */
371 WQ_UNBOUND
= 1 << 1, /* not bound to any cpu */
372 WQ_FREEZABLE
= 1 << 2, /* freeze during suspend */
373 WQ_MEM_RECLAIM
= 1 << 3, /* may be used for memory reclaim */
374 WQ_HIGHPRI
= 1 << 4, /* high priority */
375 WQ_CPU_INTENSIVE
= 1 << 5, /* cpu intensive workqueue */
376 WQ_SYSFS
= 1 << 6, /* visible in sysfs, see workqueue_sysfs_register() */
379 * Per-cpu workqueues are generally preferred because they tend to
380 * show better performance thanks to cache locality. Per-cpu
381 * workqueues exclude the scheduler from choosing the CPU to
382 * execute the worker threads, which has an unfortunate side effect
383 * of increasing power consumption.
385 * The scheduler considers a CPU idle if it doesn't have any task
386 * to execute and tries to keep idle cores idle to conserve power;
387 * however, for example, a per-cpu work item scheduled from an
388 * interrupt handler on an idle CPU will force the scheduler to
389 * execute the work item on that CPU breaking the idleness, which in
390 * turn may lead to more scheduling choices which are sub-optimal
391 * in terms of power consumption.
393 * Workqueues marked with WQ_POWER_EFFICIENT are per-cpu by default
394 * but become unbound if workqueue.power_efficient kernel param is
395 * specified. Per-cpu workqueues which are identified to
396 * contribute significantly to power-consumption are identified and
397 * marked with this flag and enabling the power_efficient mode
398 * leads to noticeable power saving at the cost of small
399 * performance disadvantage.
401 * http://thread.gmane.org/gmane.linux.kernel/1480396
403 WQ_POWER_EFFICIENT
= 1 << 7,
405 __WQ_DESTROYING
= 1 << 15, /* internal: workqueue is destroying */
406 __WQ_DRAINING
= 1 << 16, /* internal: workqueue is draining */
407 __WQ_ORDERED
= 1 << 17, /* internal: workqueue is ordered */
408 __WQ_LEGACY
= 1 << 18, /* internal: create*_workqueue() */
410 /* BH wq only allows the following flags */
411 __WQ_BH_ALLOWS
= WQ_BH
| WQ_HIGHPRI
,
415 WQ_MAX_ACTIVE
= 2048, /* I like 2048, better ideas? */
416 WQ_UNBOUND_MAX_ACTIVE
= WQ_MAX_ACTIVE
,
417 WQ_DFL_ACTIVE
= WQ_MAX_ACTIVE
/ 2,
420 * Per-node default cap on min_active. Unless explicitly set, min_active
421 * is set to min(max_active, WQ_DFL_MIN_ACTIVE). For more details, see
422 * workqueue_struct->min_active definition.
424 WQ_DFL_MIN_ACTIVE
= 8,
428 * System-wide workqueues which are always present.
430 * system_wq is the one used by schedule[_delayed]_work[_on]().
431 * Multi-CPU multi-threaded. There are users which expect relatively
432 * short queue flush time. Don't queue works which can run for too
435 * system_highpri_wq is similar to system_wq but for work items which
436 * require WQ_HIGHPRI.
438 * system_long_wq is similar to system_wq but may host long running
439 * works. Queue flushing might take relatively long.
441 * system_unbound_wq is unbound workqueue. Workers are not bound to
442 * any specific CPU, not concurrency managed, and all queued works are
443 * executed immediately as long as max_active limit is not reached and
444 * resources are available.
446 * system_freezable_wq is equivalent to system_wq except that it's
449 * *_power_efficient_wq are inclined towards saving power and converted
450 * into WQ_UNBOUND variants if 'wq_power_efficient' is enabled; otherwise,
451 * they are same as their non-power-efficient counterparts - e.g.
452 * system_power_efficient_wq is identical to system_wq if
453 * 'wq_power_efficient' is disabled. See WQ_POWER_EFFICIENT for more info.
455 * system_bh[_highpri]_wq are convenience interface to softirq. BH work items
456 * are executed in the queueing CPU's BH context in the queueing order.
458 extern struct workqueue_struct
*system_wq
;
459 extern struct workqueue_struct
*system_highpri_wq
;
460 extern struct workqueue_struct
*system_long_wq
;
461 extern struct workqueue_struct
*system_unbound_wq
;
462 extern struct workqueue_struct
*system_freezable_wq
;
463 extern struct workqueue_struct
*system_power_efficient_wq
;
464 extern struct workqueue_struct
*system_freezable_power_efficient_wq
;
465 extern struct workqueue_struct
*system_bh_wq
;
466 extern struct workqueue_struct
*system_bh_highpri_wq
;
468 void workqueue_softirq_action(bool highpri
);
469 void workqueue_softirq_dead(unsigned int cpu
);
472 * alloc_workqueue - allocate a workqueue
473 * @fmt: printf format for the name of the workqueue
475 * @max_active: max in-flight work items, 0 for default
476 * @...: args for @fmt
478 * For a per-cpu workqueue, @max_active limits the number of in-flight work
479 * items for each CPU. e.g. @max_active of 1 indicates that each CPU can be
480 * executing at most one work item for the workqueue.
482 * For unbound workqueues, @max_active limits the number of in-flight work items
483 * for the whole system. e.g. @max_active of 16 indicates that that there can be
484 * at most 16 work items executing for the workqueue in the whole system.
486 * As sharing the same active counter for an unbound workqueue across multiple
487 * NUMA nodes can be expensive, @max_active is distributed to each NUMA node
488 * according to the proportion of the number of online CPUs and enforced
491 * Depending on online CPU distribution, a node may end up with per-node
492 * max_active which is significantly lower than @max_active, which can lead to
493 * deadlocks if the per-node concurrency limit is lower than the maximum number
494 * of interdependent work items for the workqueue.
496 * To guarantee forward progress regardless of online CPU distribution, the
497 * concurrency limit on every node is guaranteed to be equal to or greater than
498 * min_active which is set to min(@max_active, %WQ_DFL_MIN_ACTIVE). This means
499 * that the sum of per-node max_active's may be larger than @max_active.
501 * For detailed information on %WQ_* flags, please refer to
502 * Documentation/core-api/workqueue.rst.
505 * Pointer to the allocated workqueue on success, %NULL on failure.
507 __printf(1, 4) struct workqueue_struct
*
508 alloc_workqueue(const char *fmt
, unsigned int flags
, int max_active
, ...);
510 #ifdef CONFIG_LOCKDEP
512 * alloc_workqueue_lockdep_map - allocate a workqueue with user-defined lockdep_map
513 * @fmt: printf format for the name of the workqueue
515 * @max_active: max in-flight work items, 0 for default
516 * @lockdep_map: user-defined lockdep_map
517 * @...: args for @fmt
519 * Same as alloc_workqueue but with the a user-define lockdep_map. Useful for
520 * workqueues created with the same purpose and to avoid leaking a lockdep_map
521 * on each workqueue creation.
524 * Pointer to the allocated workqueue on success, %NULL on failure.
526 __printf(1, 5) struct workqueue_struct
*
527 alloc_workqueue_lockdep_map(const char *fmt
, unsigned int flags
, int max_active
,
528 struct lockdep_map
*lockdep_map
, ...);
531 * alloc_ordered_workqueue_lockdep_map - allocate an ordered workqueue with
532 * user-defined lockdep_map
534 * @fmt: printf format for the name of the workqueue
535 * @flags: WQ_* flags (only WQ_FREEZABLE and WQ_MEM_RECLAIM are meaningful)
536 * @lockdep_map: user-defined lockdep_map
537 * @args: args for @fmt
539 * Same as alloc_ordered_workqueue but with the a user-define lockdep_map.
540 * Useful for workqueues created with the same purpose and to avoid leaking a
541 * lockdep_map on each workqueue creation.
544 * Pointer to the allocated workqueue on success, %NULL on failure.
546 #define alloc_ordered_workqueue_lockdep_map(fmt, flags, lockdep_map, args...) \
547 alloc_workqueue_lockdep_map(fmt, WQ_UNBOUND | __WQ_ORDERED | (flags), \
548 1, lockdep_map, ##args)
552 * alloc_ordered_workqueue - allocate an ordered workqueue
553 * @fmt: printf format for the name of the workqueue
554 * @flags: WQ_* flags (only WQ_FREEZABLE and WQ_MEM_RECLAIM are meaningful)
555 * @args: args for @fmt
557 * Allocate an ordered workqueue. An ordered workqueue executes at
558 * most one work item at any given time in the queued order. They are
559 * implemented as unbound workqueues with @max_active of one.
562 * Pointer to the allocated workqueue on success, %NULL on failure.
564 #define alloc_ordered_workqueue(fmt, flags, args...) \
565 alloc_workqueue(fmt, WQ_UNBOUND | __WQ_ORDERED | (flags), 1, ##args)
567 #define create_workqueue(name) \
568 alloc_workqueue("%s", __WQ_LEGACY | WQ_MEM_RECLAIM, 1, (name))
569 #define create_freezable_workqueue(name) \
570 alloc_workqueue("%s", __WQ_LEGACY | WQ_FREEZABLE | WQ_UNBOUND | \
571 WQ_MEM_RECLAIM, 1, (name))
572 #define create_singlethread_workqueue(name) \
573 alloc_ordered_workqueue("%s", __WQ_LEGACY | WQ_MEM_RECLAIM, name)
575 #define from_work(var, callback_work, work_fieldname) \
576 container_of(callback_work, typeof(*var), work_fieldname)
578 extern void destroy_workqueue(struct workqueue_struct
*wq
);
580 struct workqueue_attrs
*alloc_workqueue_attrs(void);
581 void free_workqueue_attrs(struct workqueue_attrs
*attrs
);
582 int apply_workqueue_attrs(struct workqueue_struct
*wq
,
583 const struct workqueue_attrs
*attrs
);
584 extern int workqueue_unbound_exclude_cpumask(cpumask_var_t cpumask
);
586 extern bool queue_work_on(int cpu
, struct workqueue_struct
*wq
,
587 struct work_struct
*work
);
588 extern bool queue_work_node(int node
, struct workqueue_struct
*wq
,
589 struct work_struct
*work
);
590 extern bool queue_delayed_work_on(int cpu
, struct workqueue_struct
*wq
,
591 struct delayed_work
*work
, unsigned long delay
);
592 extern bool mod_delayed_work_on(int cpu
, struct workqueue_struct
*wq
,
593 struct delayed_work
*dwork
, unsigned long delay
);
594 extern bool queue_rcu_work(struct workqueue_struct
*wq
, struct rcu_work
*rwork
);
596 extern void __flush_workqueue(struct workqueue_struct
*wq
);
597 extern void drain_workqueue(struct workqueue_struct
*wq
);
599 extern int schedule_on_each_cpu(work_func_t func
);
601 int execute_in_process_context(work_func_t fn
, struct execute_work
*);
603 extern bool flush_work(struct work_struct
*work
);
604 extern bool cancel_work(struct work_struct
*work
);
605 extern bool cancel_work_sync(struct work_struct
*work
);
607 extern bool flush_delayed_work(struct delayed_work
*dwork
);
608 extern bool cancel_delayed_work(struct delayed_work
*dwork
);
609 extern bool cancel_delayed_work_sync(struct delayed_work
*dwork
);
611 extern bool disable_work(struct work_struct
*work
);
612 extern bool disable_work_sync(struct work_struct
*work
);
613 extern bool enable_work(struct work_struct
*work
);
615 extern bool disable_delayed_work(struct delayed_work
*dwork
);
616 extern bool disable_delayed_work_sync(struct delayed_work
*dwork
);
617 extern bool enable_delayed_work(struct delayed_work
*dwork
);
619 extern bool flush_rcu_work(struct rcu_work
*rwork
);
621 extern void workqueue_set_max_active(struct workqueue_struct
*wq
,
623 extern void workqueue_set_min_active(struct workqueue_struct
*wq
,
625 extern struct work_struct
*current_work(void);
626 extern bool current_is_workqueue_rescuer(void);
627 extern bool workqueue_congested(int cpu
, struct workqueue_struct
*wq
);
628 extern unsigned int work_busy(struct work_struct
*work
);
629 extern __printf(1, 2) void set_worker_desc(const char *fmt
, ...);
630 extern void print_worker_info(const char *log_lvl
, struct task_struct
*task
);
631 extern void show_all_workqueues(void);
632 extern void show_freezable_workqueues(void);
633 extern void show_one_workqueue(struct workqueue_struct
*wq
);
634 extern void wq_worker_comm(char *buf
, size_t size
, struct task_struct
*task
);
637 * queue_work - queue work on a workqueue
638 * @wq: workqueue to use
639 * @work: work to queue
641 * Returns %false if @work was already on a queue, %true otherwise.
643 * We queue the work to the CPU on which it was submitted, but if the CPU dies
644 * it can be processed by another CPU.
646 * Memory-ordering properties: If it returns %true, guarantees that all stores
647 * preceding the call to queue_work() in the program order will be visible from
648 * the CPU which will execute @work by the time such work executes, e.g.,
650 * { x is initially 0 }
654 * WRITE_ONCE(x, 1); [ @work is being executed ]
655 * r0 = queue_work(wq, work); r1 = READ_ONCE(x);
657 * Forbids: r0 == true && r1 == 0
659 static inline bool queue_work(struct workqueue_struct
*wq
,
660 struct work_struct
*work
)
662 return queue_work_on(WORK_CPU_UNBOUND
, wq
, work
);
666 * queue_delayed_work - queue work on a workqueue after delay
667 * @wq: workqueue to use
668 * @dwork: delayable work to queue
669 * @delay: number of jiffies to wait before queueing
671 * Equivalent to queue_delayed_work_on() but tries to use the local CPU.
673 static inline bool queue_delayed_work(struct workqueue_struct
*wq
,
674 struct delayed_work
*dwork
,
677 return queue_delayed_work_on(WORK_CPU_UNBOUND
, wq
, dwork
, delay
);
681 * mod_delayed_work - modify delay of or queue a delayed work
682 * @wq: workqueue to use
683 * @dwork: work to queue
684 * @delay: number of jiffies to wait before queueing
686 * mod_delayed_work_on() on local CPU.
688 static inline bool mod_delayed_work(struct workqueue_struct
*wq
,
689 struct delayed_work
*dwork
,
692 return mod_delayed_work_on(WORK_CPU_UNBOUND
, wq
, dwork
, delay
);
696 * schedule_work_on - put work task on a specific cpu
697 * @cpu: cpu to put the work task on
698 * @work: job to be done
700 * This puts a job on a specific cpu
702 static inline bool schedule_work_on(int cpu
, struct work_struct
*work
)
704 return queue_work_on(cpu
, system_wq
, work
);
708 * schedule_work - put work task in global workqueue
709 * @work: job to be done
711 * Returns %false if @work was already on the kernel-global workqueue and
714 * This puts a job in the kernel-global workqueue if it was not already
715 * queued and leaves it in the same position on the kernel-global
716 * workqueue otherwise.
718 * Shares the same memory-ordering properties of queue_work(), cf. the
719 * DocBook header of queue_work().
721 static inline bool schedule_work(struct work_struct
*work
)
723 return queue_work(system_wq
, work
);
727 * enable_and_queue_work - Enable and queue a work item on a specific workqueue
728 * @wq: The target workqueue
729 * @work: The work item to be enabled and queued
731 * This function combines the operations of enable_work() and queue_work(),
732 * providing a convenient way to enable and queue a work item in a single call.
733 * It invokes enable_work() on @work and then queues it if the disable depth
734 * reached 0. Returns %true if the disable depth reached 0 and @work is queued,
735 * and %false otherwise.
737 * Note that @work is always queued when disable depth reaches zero. If the
738 * desired behavior is queueing only if certain events took place while @work is
739 * disabled, the user should implement the necessary state tracking and perform
740 * explicit conditional queueing after enable_work().
742 static inline bool enable_and_queue_work(struct workqueue_struct
*wq
,
743 struct work_struct
*work
)
745 if (enable_work(work
)) {
746 queue_work(wq
, work
);
753 * Detect attempt to flush system-wide workqueues at compile time when possible.
754 * Warn attempt to flush system-wide workqueues at runtime.
756 * See https://lkml.kernel.org/r/49925af7-78a8-a3dd-bce6-cfc02e1a9236@I-love.SAKURA.ne.jp
757 * for reasons and steps for converting system-wide workqueues into local workqueues.
759 extern void __warn_flushing_systemwide_wq(void)
760 __compiletime_warning("Please avoid flushing system-wide workqueues.");
762 /* Please stop using this function, for this function will be removed in near future. */
763 #define flush_scheduled_work() \
765 __warn_flushing_systemwide_wq(); \
766 __flush_workqueue(system_wq); \
769 #define flush_workqueue(wq) \
771 struct workqueue_struct *_wq = (wq); \
773 if ((__builtin_constant_p(_wq == system_wq) && \
774 _wq == system_wq) || \
775 (__builtin_constant_p(_wq == system_highpri_wq) && \
776 _wq == system_highpri_wq) || \
777 (__builtin_constant_p(_wq == system_long_wq) && \
778 _wq == system_long_wq) || \
779 (__builtin_constant_p(_wq == system_unbound_wq) && \
780 _wq == system_unbound_wq) || \
781 (__builtin_constant_p(_wq == system_freezable_wq) && \
782 _wq == system_freezable_wq) || \
783 (__builtin_constant_p(_wq == system_power_efficient_wq) && \
784 _wq == system_power_efficient_wq) || \
785 (__builtin_constant_p(_wq == system_freezable_power_efficient_wq) && \
786 _wq == system_freezable_power_efficient_wq)) \
787 __warn_flushing_systemwide_wq(); \
788 __flush_workqueue(_wq); \
792 * schedule_delayed_work_on - queue work in global workqueue on CPU after delay
794 * @dwork: job to be done
795 * @delay: number of jiffies to wait
797 * After waiting for a given time this puts a job in the kernel-global
798 * workqueue on the specified CPU.
800 static inline bool schedule_delayed_work_on(int cpu
, struct delayed_work
*dwork
,
803 return queue_delayed_work_on(cpu
, system_wq
, dwork
, delay
);
807 * schedule_delayed_work - put work task in global workqueue after delay
808 * @dwork: job to be done
809 * @delay: number of jiffies to wait or 0 for immediate execution
811 * After waiting for a given time this puts a job in the kernel-global
814 static inline bool schedule_delayed_work(struct delayed_work
*dwork
,
817 return queue_delayed_work(system_wq
, dwork
, delay
);
821 static inline long work_on_cpu(int cpu
, long (*fn
)(void *), void *arg
)
825 static inline long work_on_cpu_safe(int cpu
, long (*fn
)(void *), void *arg
)
830 long work_on_cpu_key(int cpu
, long (*fn
)(void *),
831 void *arg
, struct lock_class_key
*key
);
833 * A new key is defined for each caller to make sure the work
834 * associated with the function doesn't share its locking class.
836 #define work_on_cpu(_cpu, _fn, _arg) \
838 static struct lock_class_key __key; \
840 work_on_cpu_key(_cpu, _fn, _arg, &__key); \
843 long work_on_cpu_safe_key(int cpu
, long (*fn
)(void *),
844 void *arg
, struct lock_class_key
*key
);
847 * A new key is defined for each caller to make sure the work
848 * associated with the function doesn't share its locking class.
850 #define work_on_cpu_safe(_cpu, _fn, _arg) \
852 static struct lock_class_key __key; \
854 work_on_cpu_safe_key(_cpu, _fn, _arg, &__key); \
856 #endif /* CONFIG_SMP */
858 #ifdef CONFIG_FREEZER
859 extern void freeze_workqueues_begin(void);
860 extern bool freeze_workqueues_busy(void);
861 extern void thaw_workqueues(void);
862 #endif /* CONFIG_FREEZER */
865 int workqueue_sysfs_register(struct workqueue_struct
*wq
);
866 #else /* CONFIG_SYSFS */
867 static inline int workqueue_sysfs_register(struct workqueue_struct
*wq
)
869 #endif /* CONFIG_SYSFS */
871 #ifdef CONFIG_WQ_WATCHDOG
872 void wq_watchdog_touch(int cpu
);
873 #else /* CONFIG_WQ_WATCHDOG */
874 static inline void wq_watchdog_touch(int cpu
) { }
875 #endif /* CONFIG_WQ_WATCHDOG */
878 int workqueue_prepare_cpu(unsigned int cpu
);
879 int workqueue_online_cpu(unsigned int cpu
);
880 int workqueue_offline_cpu(unsigned int cpu
);
883 void __init
workqueue_init_early(void);
884 void __init
workqueue_init(void);
885 void __init
workqueue_init_topology(void);