1 // SPDX-License-Identifier: GPL-2.0
3 * Basic worker thread pool for io_uring
5 * Copyright (C) 2019 Jens Axboe
8 #include <linux/kernel.h>
9 #include <linux/init.h>
10 #include <linux/errno.h>
11 #include <linux/sched/signal.h>
13 #include <linux/sched/mm.h>
14 #include <linux/percpu.h>
15 #include <linux/slab.h>
16 #include <linux/kthread.h>
17 #include <linux/rculist_nulls.h>
18 #include <linux/fs_struct.h>
19 #include <linux/task_work.h>
20 #include <linux/blk-cgroup.h>
21 #include <linux/audit.h>
22 #include <linux/cpu.h>
24 #include "../kernel/sched/sched.h"
27 #define WORKER_IDLE_TIMEOUT (5 * HZ)
30 IO_WORKER_F_UP
= 1, /* up and active */
31 IO_WORKER_F_RUNNING
= 2, /* account as running */
32 IO_WORKER_F_FREE
= 4, /* worker on free list */
33 IO_WORKER_F_FIXED
= 8, /* static idle worker */
34 IO_WORKER_F_BOUND
= 16, /* is doing bounded work */
38 IO_WQ_BIT_EXIT
= 0, /* wq exiting */
39 IO_WQ_BIT_ERROR
= 1, /* error on setup */
43 IO_WQE_FLAG_STALLED
= 1, /* stalled on hash */
47 * One for each thread in a wqe pool
52 struct hlist_nulls_node nulls_node
;
53 struct list_head all_list
;
54 struct task_struct
*task
;
57 struct io_wq_work
*cur_work
;
62 #ifdef CONFIG_BLK_CGROUP
63 struct cgroup_subsys_state
*blkcg_css
;
65 const struct cred
*cur_creds
;
66 const struct cred
*saved_creds
;
67 struct files_struct
*restore_files
;
68 struct nsproxy
*restore_nsproxy
;
69 struct fs_struct
*restore_fs
;
72 #if BITS_PER_LONG == 64
73 #define IO_WQ_HASH_ORDER 6
75 #define IO_WQ_HASH_ORDER 5
78 #define IO_WQ_NR_HASH_BUCKETS (1u << IO_WQ_HASH_ORDER)
92 * Per-node worker thread pool
97 struct io_wq_work_list work_list
;
98 unsigned long hash_map
;
100 } ____cacheline_aligned_in_smp
;
103 struct io_wqe_acct acct
[2];
105 struct hlist_nulls_head free_list
;
106 struct list_head all_list
;
109 struct io_wq_work
*hash_tail
[IO_WQ_NR_HASH_BUCKETS
];
116 struct io_wqe
**wqes
;
119 free_work_fn
*free_work
;
120 io_wq_work_fn
*do_work
;
122 struct task_struct
*manager
;
123 struct user_struct
*user
;
125 struct completion done
;
127 struct hlist_node cpuhp_node
;
132 static enum cpuhp_state io_wq_online
;
134 static bool io_worker_get(struct io_worker
*worker
)
136 return refcount_inc_not_zero(&worker
->ref
);
139 static void io_worker_release(struct io_worker
*worker
)
141 if (refcount_dec_and_test(&worker
->ref
))
142 wake_up_process(worker
->task
);
146 * Note: drops the wqe->lock if returning true! The caller must re-acquire
147 * the lock in that case. Some callers need to restart handling if this
148 * happens, so we can't just re-acquire the lock on behalf of the caller.
150 static bool __io_worker_unuse(struct io_wqe
*wqe
, struct io_worker
*worker
)
152 bool dropped_lock
= false;
154 if (worker
->saved_creds
) {
155 revert_creds(worker
->saved_creds
);
156 worker
->cur_creds
= worker
->saved_creds
= NULL
;
159 if (current
->files
!= worker
->restore_files
) {
160 __acquire(&wqe
->lock
);
161 raw_spin_unlock_irq(&wqe
->lock
);
165 current
->files
= worker
->restore_files
;
166 current
->nsproxy
= worker
->restore_nsproxy
;
167 task_unlock(current
);
170 if (current
->fs
!= worker
->restore_fs
)
171 current
->fs
= worker
->restore_fs
;
174 * If we have an active mm, we need to drop the wq lock before unusing
175 * it. If we do, return true and let the caller retry the idle loop.
179 __acquire(&wqe
->lock
);
180 raw_spin_unlock_irq(&wqe
->lock
);
183 __set_current_state(TASK_RUNNING
);
184 kthread_unuse_mm(worker
->mm
);
189 #ifdef CONFIG_BLK_CGROUP
190 if (worker
->blkcg_css
) {
191 kthread_associate_blkcg(NULL
);
192 worker
->blkcg_css
= NULL
;
195 if (current
->signal
->rlim
[RLIMIT_FSIZE
].rlim_cur
!= RLIM_INFINITY
)
196 current
->signal
->rlim
[RLIMIT_FSIZE
].rlim_cur
= RLIM_INFINITY
;
200 static inline struct io_wqe_acct
*io_work_get_acct(struct io_wqe
*wqe
,
201 struct io_wq_work
*work
)
203 if (work
->flags
& IO_WQ_WORK_UNBOUND
)
204 return &wqe
->acct
[IO_WQ_ACCT_UNBOUND
];
206 return &wqe
->acct
[IO_WQ_ACCT_BOUND
];
209 static inline struct io_wqe_acct
*io_wqe_get_acct(struct io_wqe
*wqe
,
210 struct io_worker
*worker
)
212 if (worker
->flags
& IO_WORKER_F_BOUND
)
213 return &wqe
->acct
[IO_WQ_ACCT_BOUND
];
215 return &wqe
->acct
[IO_WQ_ACCT_UNBOUND
];
218 static void io_worker_exit(struct io_worker
*worker
)
220 struct io_wqe
*wqe
= worker
->wqe
;
221 struct io_wqe_acct
*acct
= io_wqe_get_acct(wqe
, worker
);
224 * If we're not at zero, someone else is holding a brief reference
225 * to the worker. Wait for that to go away.
227 set_current_state(TASK_INTERRUPTIBLE
);
228 if (!refcount_dec_and_test(&worker
->ref
))
230 __set_current_state(TASK_RUNNING
);
233 current
->flags
&= ~PF_IO_WORKER
;
234 if (worker
->flags
& IO_WORKER_F_RUNNING
)
235 atomic_dec(&acct
->nr_running
);
236 if (!(worker
->flags
& IO_WORKER_F_BOUND
))
237 atomic_dec(&wqe
->wq
->user
->processes
);
241 raw_spin_lock_irq(&wqe
->lock
);
242 hlist_nulls_del_rcu(&worker
->nulls_node
);
243 list_del_rcu(&worker
->all_list
);
244 if (__io_worker_unuse(wqe
, worker
)) {
245 __release(&wqe
->lock
);
246 raw_spin_lock_irq(&wqe
->lock
);
249 raw_spin_unlock_irq(&wqe
->lock
);
251 kfree_rcu(worker
, rcu
);
252 if (refcount_dec_and_test(&wqe
->wq
->refs
))
253 complete(&wqe
->wq
->done
);
256 static inline bool io_wqe_run_queue(struct io_wqe
*wqe
)
257 __must_hold(wqe
->lock
)
259 if (!wq_list_empty(&wqe
->work_list
) &&
260 !(wqe
->flags
& IO_WQE_FLAG_STALLED
))
266 * Check head of free list for an available worker. If one isn't available,
267 * caller must wake up the wq manager to create one.
269 static bool io_wqe_activate_free_worker(struct io_wqe
*wqe
)
272 struct hlist_nulls_node
*n
;
273 struct io_worker
*worker
;
275 n
= rcu_dereference(hlist_nulls_first_rcu(&wqe
->free_list
));
279 worker
= hlist_nulls_entry(n
, struct io_worker
, nulls_node
);
280 if (io_worker_get(worker
)) {
281 wake_up_process(worker
->task
);
282 io_worker_release(worker
);
290 * We need a worker. If we find a free one, we're good. If not, and we're
291 * below the max number of workers, wake up the manager to create one.
293 static void io_wqe_wake_worker(struct io_wqe
*wqe
, struct io_wqe_acct
*acct
)
298 * Most likely an attempt to queue unbounded work on an io_wq that
299 * wasn't setup with any unbounded workers.
301 WARN_ON_ONCE(!acct
->max_workers
);
304 ret
= io_wqe_activate_free_worker(wqe
);
307 if (!ret
&& acct
->nr_workers
< acct
->max_workers
)
308 wake_up_process(wqe
->wq
->manager
);
311 static void io_wqe_inc_running(struct io_wqe
*wqe
, struct io_worker
*worker
)
313 struct io_wqe_acct
*acct
= io_wqe_get_acct(wqe
, worker
);
315 atomic_inc(&acct
->nr_running
);
318 static void io_wqe_dec_running(struct io_wqe
*wqe
, struct io_worker
*worker
)
319 __must_hold(wqe
->lock
)
321 struct io_wqe_acct
*acct
= io_wqe_get_acct(wqe
, worker
);
323 if (atomic_dec_and_test(&acct
->nr_running
) && io_wqe_run_queue(wqe
))
324 io_wqe_wake_worker(wqe
, acct
);
327 static void io_worker_start(struct io_wqe
*wqe
, struct io_worker
*worker
)
329 allow_kernel_signal(SIGINT
);
331 current
->flags
|= PF_IO_WORKER
;
333 worker
->flags
|= (IO_WORKER_F_UP
| IO_WORKER_F_RUNNING
);
334 worker
->restore_files
= current
->files
;
335 worker
->restore_nsproxy
= current
->nsproxy
;
336 worker
->restore_fs
= current
->fs
;
337 io_wqe_inc_running(wqe
, worker
);
341 * Worker will start processing some work. Move it to the busy list, if
342 * it's currently on the freelist
344 static void __io_worker_busy(struct io_wqe
*wqe
, struct io_worker
*worker
,
345 struct io_wq_work
*work
)
346 __must_hold(wqe
->lock
)
348 bool worker_bound
, work_bound
;
350 if (worker
->flags
& IO_WORKER_F_FREE
) {
351 worker
->flags
&= ~IO_WORKER_F_FREE
;
352 hlist_nulls_del_init_rcu(&worker
->nulls_node
);
356 * If worker is moving from bound to unbound (or vice versa), then
357 * ensure we update the running accounting.
359 worker_bound
= (worker
->flags
& IO_WORKER_F_BOUND
) != 0;
360 work_bound
= (work
->flags
& IO_WQ_WORK_UNBOUND
) == 0;
361 if (worker_bound
!= work_bound
) {
362 io_wqe_dec_running(wqe
, worker
);
364 worker
->flags
|= IO_WORKER_F_BOUND
;
365 wqe
->acct
[IO_WQ_ACCT_UNBOUND
].nr_workers
--;
366 wqe
->acct
[IO_WQ_ACCT_BOUND
].nr_workers
++;
367 atomic_dec(&wqe
->wq
->user
->processes
);
369 worker
->flags
&= ~IO_WORKER_F_BOUND
;
370 wqe
->acct
[IO_WQ_ACCT_UNBOUND
].nr_workers
++;
371 wqe
->acct
[IO_WQ_ACCT_BOUND
].nr_workers
--;
372 atomic_inc(&wqe
->wq
->user
->processes
);
374 io_wqe_inc_running(wqe
, worker
);
379 * No work, worker going to sleep. Move to freelist, and unuse mm if we
380 * have one attached. Dropping the mm may potentially sleep, so we drop
381 * the lock in that case and return success. Since the caller has to
382 * retry the loop in that case (we changed task state), we don't regrab
383 * the lock if we return success.
385 static bool __io_worker_idle(struct io_wqe
*wqe
, struct io_worker
*worker
)
386 __must_hold(wqe
->lock
)
388 if (!(worker
->flags
& IO_WORKER_F_FREE
)) {
389 worker
->flags
|= IO_WORKER_F_FREE
;
390 hlist_nulls_add_head_rcu(&worker
->nulls_node
, &wqe
->free_list
);
393 return __io_worker_unuse(wqe
, worker
);
396 static inline unsigned int io_get_work_hash(struct io_wq_work
*work
)
398 return work
->flags
>> IO_WQ_HASH_SHIFT
;
401 static struct io_wq_work
*io_get_next_work(struct io_wqe
*wqe
)
402 __must_hold(wqe
->lock
)
404 struct io_wq_work_node
*node
, *prev
;
405 struct io_wq_work
*work
, *tail
;
408 wq_list_for_each(node
, prev
, &wqe
->work_list
) {
409 work
= container_of(node
, struct io_wq_work
, list
);
411 /* not hashed, can run anytime */
412 if (!io_wq_is_hashed(work
)) {
413 wq_list_del(&wqe
->work_list
, node
, prev
);
417 /* hashed, can run if not already running */
418 hash
= io_get_work_hash(work
);
419 if (!(wqe
->hash_map
& BIT(hash
))) {
420 wqe
->hash_map
|= BIT(hash
);
421 /* all items with this hash lie in [work, tail] */
422 tail
= wqe
->hash_tail
[hash
];
423 wqe
->hash_tail
[hash
] = NULL
;
424 wq_list_cut(&wqe
->work_list
, &tail
->list
, prev
);
432 static void io_wq_switch_mm(struct io_worker
*worker
, struct io_wq_work
*work
)
435 kthread_unuse_mm(worker
->mm
);
440 if (mmget_not_zero(work
->identity
->mm
)) {
441 kthread_use_mm(work
->identity
->mm
);
442 worker
->mm
= work
->identity
->mm
;
446 /* failed grabbing mm, ensure work gets cancelled */
447 work
->flags
|= IO_WQ_WORK_CANCEL
;
450 static inline void io_wq_switch_blkcg(struct io_worker
*worker
,
451 struct io_wq_work
*work
)
453 #ifdef CONFIG_BLK_CGROUP
454 if (!(work
->flags
& IO_WQ_WORK_BLKCG
))
456 if (work
->identity
->blkcg_css
!= worker
->blkcg_css
) {
457 kthread_associate_blkcg(work
->identity
->blkcg_css
);
458 worker
->blkcg_css
= work
->identity
->blkcg_css
;
463 static void io_wq_switch_creds(struct io_worker
*worker
,
464 struct io_wq_work
*work
)
466 const struct cred
*old_creds
= override_creds(work
->identity
->creds
);
468 worker
->cur_creds
= work
->identity
->creds
;
469 if (worker
->saved_creds
)
470 put_cred(old_creds
); /* creds set by previous switch */
472 worker
->saved_creds
= old_creds
;
475 static void io_impersonate_work(struct io_worker
*worker
,
476 struct io_wq_work
*work
)
478 if ((work
->flags
& IO_WQ_WORK_FILES
) &&
479 current
->files
!= work
->identity
->files
) {
481 current
->files
= work
->identity
->files
;
482 current
->nsproxy
= work
->identity
->nsproxy
;
483 task_unlock(current
);
484 if (!work
->identity
->files
) {
485 /* failed grabbing files, ensure work gets cancelled */
486 work
->flags
|= IO_WQ_WORK_CANCEL
;
489 if ((work
->flags
& IO_WQ_WORK_FS
) && current
->fs
!= work
->identity
->fs
)
490 current
->fs
= work
->identity
->fs
;
491 if ((work
->flags
& IO_WQ_WORK_MM
) && work
->identity
->mm
!= worker
->mm
)
492 io_wq_switch_mm(worker
, work
);
493 if ((work
->flags
& IO_WQ_WORK_CREDS
) &&
494 worker
->cur_creds
!= work
->identity
->creds
)
495 io_wq_switch_creds(worker
, work
);
496 if (work
->flags
& IO_WQ_WORK_FSIZE
)
497 current
->signal
->rlim
[RLIMIT_FSIZE
].rlim_cur
= work
->identity
->fsize
;
498 else if (current
->signal
->rlim
[RLIMIT_FSIZE
].rlim_cur
!= RLIM_INFINITY
)
499 current
->signal
->rlim
[RLIMIT_FSIZE
].rlim_cur
= RLIM_INFINITY
;
500 io_wq_switch_blkcg(worker
, work
);
502 current
->loginuid
= work
->identity
->loginuid
;
503 current
->sessionid
= work
->identity
->sessionid
;
507 static void io_assign_current_work(struct io_worker
*worker
,
508 struct io_wq_work
*work
)
511 /* flush pending signals before assigning new work */
512 if (signal_pending(current
))
513 flush_signals(current
);
518 current
->loginuid
= KUIDT_INIT(AUDIT_UID_UNSET
);
519 current
->sessionid
= AUDIT_SID_UNSET
;
522 spin_lock_irq(&worker
->lock
);
523 worker
->cur_work
= work
;
524 spin_unlock_irq(&worker
->lock
);
527 static void io_wqe_enqueue(struct io_wqe
*wqe
, struct io_wq_work
*work
);
529 static void io_worker_handle_work(struct io_worker
*worker
)
530 __releases(wqe
->lock
)
532 struct io_wqe
*wqe
= worker
->wqe
;
533 struct io_wq
*wq
= wqe
->wq
;
536 struct io_wq_work
*work
;
539 * If we got some work, mark us as busy. If we didn't, but
540 * the list isn't empty, it means we stalled on hashed work.
541 * Mark us stalled so we don't keep looking for work when we
542 * can't make progress, any work completion or insertion will
543 * clear the stalled flag.
545 work
= io_get_next_work(wqe
);
547 __io_worker_busy(wqe
, worker
, work
);
548 else if (!wq_list_empty(&wqe
->work_list
))
549 wqe
->flags
|= IO_WQE_FLAG_STALLED
;
551 raw_spin_unlock_irq(&wqe
->lock
);
554 io_assign_current_work(worker
, work
);
556 /* handle a whole dependent link */
558 struct io_wq_work
*old_work
, *next_hashed
, *linked
;
559 unsigned int hash
= io_get_work_hash(work
);
561 next_hashed
= wq_next_work(work
);
562 io_impersonate_work(worker
, work
);
565 linked
= wq
->do_work(work
);
568 if (!work
&& linked
&& !io_wq_is_hashed(linked
)) {
572 io_assign_current_work(worker
, work
);
573 wq
->free_work(old_work
);
576 io_wqe_enqueue(wqe
, linked
);
578 if (hash
!= -1U && !next_hashed
) {
579 raw_spin_lock_irq(&wqe
->lock
);
580 wqe
->hash_map
&= ~BIT_ULL(hash
);
581 wqe
->flags
&= ~IO_WQE_FLAG_STALLED
;
582 /* skip unnecessary unlock-lock wqe->lock */
585 raw_spin_unlock_irq(&wqe
->lock
);
589 raw_spin_lock_irq(&wqe
->lock
);
593 static int io_wqe_worker(void *data
)
595 struct io_worker
*worker
= data
;
596 struct io_wqe
*wqe
= worker
->wqe
;
597 struct io_wq
*wq
= wqe
->wq
;
599 io_worker_start(wqe
, worker
);
601 while (!test_bit(IO_WQ_BIT_EXIT
, &wq
->state
)) {
602 set_current_state(TASK_INTERRUPTIBLE
);
604 raw_spin_lock_irq(&wqe
->lock
);
605 if (io_wqe_run_queue(wqe
)) {
606 __set_current_state(TASK_RUNNING
);
607 io_worker_handle_work(worker
);
610 /* drops the lock on success, retry */
611 if (__io_worker_idle(wqe
, worker
)) {
612 __release(&wqe
->lock
);
615 raw_spin_unlock_irq(&wqe
->lock
);
616 if (signal_pending(current
))
617 flush_signals(current
);
618 if (schedule_timeout(WORKER_IDLE_TIMEOUT
))
620 /* timed out, exit unless we're the fixed worker */
621 if (test_bit(IO_WQ_BIT_EXIT
, &wq
->state
) ||
622 !(worker
->flags
& IO_WORKER_F_FIXED
))
626 if (test_bit(IO_WQ_BIT_EXIT
, &wq
->state
)) {
627 raw_spin_lock_irq(&wqe
->lock
);
628 if (!wq_list_empty(&wqe
->work_list
))
629 io_worker_handle_work(worker
);
631 raw_spin_unlock_irq(&wqe
->lock
);
634 io_worker_exit(worker
);
639 * Called when a worker is scheduled in. Mark us as currently running.
641 void io_wq_worker_running(struct task_struct
*tsk
)
643 struct io_worker
*worker
= kthread_data(tsk
);
644 struct io_wqe
*wqe
= worker
->wqe
;
646 if (!(worker
->flags
& IO_WORKER_F_UP
))
648 if (worker
->flags
& IO_WORKER_F_RUNNING
)
650 worker
->flags
|= IO_WORKER_F_RUNNING
;
651 io_wqe_inc_running(wqe
, worker
);
655 * Called when worker is going to sleep. If there are no workers currently
656 * running and we have work pending, wake up a free one or have the manager
659 void io_wq_worker_sleeping(struct task_struct
*tsk
)
661 struct io_worker
*worker
= kthread_data(tsk
);
662 struct io_wqe
*wqe
= worker
->wqe
;
664 if (!(worker
->flags
& IO_WORKER_F_UP
))
666 if (!(worker
->flags
& IO_WORKER_F_RUNNING
))
669 worker
->flags
&= ~IO_WORKER_F_RUNNING
;
671 raw_spin_lock_irq(&wqe
->lock
);
672 io_wqe_dec_running(wqe
, worker
);
673 raw_spin_unlock_irq(&wqe
->lock
);
676 static bool create_io_worker(struct io_wq
*wq
, struct io_wqe
*wqe
, int index
)
678 struct io_wqe_acct
*acct
= &wqe
->acct
[index
];
679 struct io_worker
*worker
;
681 worker
= kzalloc_node(sizeof(*worker
), GFP_KERNEL
, wqe
->node
);
685 refcount_set(&worker
->ref
, 1);
686 worker
->nulls_node
.pprev
= NULL
;
688 spin_lock_init(&worker
->lock
);
690 worker
->task
= kthread_create_on_node(io_wqe_worker
, worker
, wqe
->node
,
691 "io_wqe_worker-%d/%d", index
, wqe
->node
);
692 if (IS_ERR(worker
->task
)) {
696 kthread_bind_mask(worker
->task
, cpumask_of_node(wqe
->node
));
698 raw_spin_lock_irq(&wqe
->lock
);
699 hlist_nulls_add_head_rcu(&worker
->nulls_node
, &wqe
->free_list
);
700 list_add_tail_rcu(&worker
->all_list
, &wqe
->all_list
);
701 worker
->flags
|= IO_WORKER_F_FREE
;
702 if (index
== IO_WQ_ACCT_BOUND
)
703 worker
->flags
|= IO_WORKER_F_BOUND
;
704 if (!acct
->nr_workers
&& (worker
->flags
& IO_WORKER_F_BOUND
))
705 worker
->flags
|= IO_WORKER_F_FIXED
;
707 raw_spin_unlock_irq(&wqe
->lock
);
709 if (index
== IO_WQ_ACCT_UNBOUND
)
710 atomic_inc(&wq
->user
->processes
);
712 refcount_inc(&wq
->refs
);
713 wake_up_process(worker
->task
);
717 static inline bool io_wqe_need_worker(struct io_wqe
*wqe
, int index
)
718 __must_hold(wqe
->lock
)
720 struct io_wqe_acct
*acct
= &wqe
->acct
[index
];
722 /* if we have available workers or no work, no need */
723 if (!hlist_nulls_empty(&wqe
->free_list
) || !io_wqe_run_queue(wqe
))
725 return acct
->nr_workers
< acct
->max_workers
;
729 * Iterate the passed in list and call the specific function for each
730 * worker that isn't exiting
732 static bool io_wq_for_each_worker(struct io_wqe
*wqe
,
733 bool (*func
)(struct io_worker
*, void *),
736 struct io_worker
*worker
;
739 list_for_each_entry_rcu(worker
, &wqe
->all_list
, all_list
) {
740 if (io_worker_get(worker
)) {
741 /* no task if node is/was offline */
743 ret
= func(worker
, data
);
744 io_worker_release(worker
);
753 static bool io_wq_worker_wake(struct io_worker
*worker
, void *data
)
755 wake_up_process(worker
->task
);
760 * Manager thread. Tasked with creating new workers, if we need them.
762 static int io_wq_manager(void *data
)
764 struct io_wq
*wq
= data
;
767 /* create fixed workers */
768 refcount_set(&wq
->refs
, 1);
769 for_each_node(node
) {
770 if (!node_online(node
))
772 if (create_io_worker(wq
, wq
->wqes
[node
], IO_WQ_ACCT_BOUND
))
774 set_bit(IO_WQ_BIT_ERROR
, &wq
->state
);
775 set_bit(IO_WQ_BIT_EXIT
, &wq
->state
);
781 while (!kthread_should_stop()) {
782 if (current
->task_works
)
785 for_each_node(node
) {
786 struct io_wqe
*wqe
= wq
->wqes
[node
];
787 bool fork_worker
[2] = { false, false };
789 if (!node_online(node
))
792 raw_spin_lock_irq(&wqe
->lock
);
793 if (io_wqe_need_worker(wqe
, IO_WQ_ACCT_BOUND
))
794 fork_worker
[IO_WQ_ACCT_BOUND
] = true;
795 if (io_wqe_need_worker(wqe
, IO_WQ_ACCT_UNBOUND
))
796 fork_worker
[IO_WQ_ACCT_UNBOUND
] = true;
797 raw_spin_unlock_irq(&wqe
->lock
);
798 if (fork_worker
[IO_WQ_ACCT_BOUND
])
799 create_io_worker(wq
, wqe
, IO_WQ_ACCT_BOUND
);
800 if (fork_worker
[IO_WQ_ACCT_UNBOUND
])
801 create_io_worker(wq
, wqe
, IO_WQ_ACCT_UNBOUND
);
803 set_current_state(TASK_INTERRUPTIBLE
);
804 schedule_timeout(HZ
);
807 if (current
->task_works
)
811 if (refcount_dec_and_test(&wq
->refs
)) {
815 /* if ERROR is set and we get here, we have workers to wake */
816 if (test_bit(IO_WQ_BIT_ERROR
, &wq
->state
)) {
819 io_wq_for_each_worker(wq
->wqes
[node
], io_wq_worker_wake
, NULL
);
825 static bool io_wq_can_queue(struct io_wqe
*wqe
, struct io_wqe_acct
*acct
,
826 struct io_wq_work
*work
)
830 if (!(work
->flags
& IO_WQ_WORK_UNBOUND
))
832 if (atomic_read(&acct
->nr_running
))
836 free_worker
= !hlist_nulls_empty(&wqe
->free_list
);
841 if (atomic_read(&wqe
->wq
->user
->processes
) >= acct
->max_workers
&&
842 !(capable(CAP_SYS_RESOURCE
) || capable(CAP_SYS_ADMIN
)))
848 static void io_run_cancel(struct io_wq_work
*work
, struct io_wqe
*wqe
)
850 struct io_wq
*wq
= wqe
->wq
;
853 struct io_wq_work
*old_work
= work
;
855 work
->flags
|= IO_WQ_WORK_CANCEL
;
856 work
= wq
->do_work(work
);
857 wq
->free_work(old_work
);
861 static void io_wqe_insert_work(struct io_wqe
*wqe
, struct io_wq_work
*work
)
864 struct io_wq_work
*tail
;
866 if (!io_wq_is_hashed(work
)) {
868 wq_list_add_tail(&work
->list
, &wqe
->work_list
);
872 hash
= io_get_work_hash(work
);
873 tail
= wqe
->hash_tail
[hash
];
874 wqe
->hash_tail
[hash
] = work
;
878 wq_list_add_after(&work
->list
, &tail
->list
, &wqe
->work_list
);
881 static void io_wqe_enqueue(struct io_wqe
*wqe
, struct io_wq_work
*work
)
883 struct io_wqe_acct
*acct
= io_work_get_acct(wqe
, work
);
888 * Do early check to see if we need a new unbound worker, and if we do,
889 * if we're allowed to do so. This isn't 100% accurate as there's a
890 * gap between this check and incrementing the value, but that's OK.
891 * It's close enough to not be an issue, fork() has the same delay.
893 if (unlikely(!io_wq_can_queue(wqe
, acct
, work
))) {
894 io_run_cancel(work
, wqe
);
898 work_flags
= work
->flags
;
899 raw_spin_lock_irqsave(&wqe
->lock
, flags
);
900 io_wqe_insert_work(wqe
, work
);
901 wqe
->flags
&= ~IO_WQE_FLAG_STALLED
;
902 raw_spin_unlock_irqrestore(&wqe
->lock
, flags
);
904 if ((work_flags
& IO_WQ_WORK_CONCURRENT
) ||
905 !atomic_read(&acct
->nr_running
))
906 io_wqe_wake_worker(wqe
, acct
);
909 void io_wq_enqueue(struct io_wq
*wq
, struct io_wq_work
*work
)
911 struct io_wqe
*wqe
= wq
->wqes
[numa_node_id()];
913 io_wqe_enqueue(wqe
, work
);
917 * Work items that hash to the same value will not be done in parallel.
918 * Used to limit concurrent writes, generally hashed by inode.
920 void io_wq_hash_work(struct io_wq_work
*work
, void *val
)
924 bit
= hash_ptr(val
, IO_WQ_HASH_ORDER
);
925 work
->flags
|= (IO_WQ_WORK_HASHED
| (bit
<< IO_WQ_HASH_SHIFT
));
928 struct io_cb_cancel_data
{
936 static bool io_wq_worker_cancel(struct io_worker
*worker
, void *data
)
938 struct io_cb_cancel_data
*match
= data
;
942 * Hold the lock to avoid ->cur_work going out of scope, caller
943 * may dereference the passed in work.
945 spin_lock_irqsave(&worker
->lock
, flags
);
946 if (worker
->cur_work
&&
947 !(worker
->cur_work
->flags
& IO_WQ_WORK_NO_CANCEL
) &&
948 match
->fn(worker
->cur_work
, match
->data
)) {
949 send_sig(SIGINT
, worker
->task
, 1);
952 spin_unlock_irqrestore(&worker
->lock
, flags
);
954 return match
->nr_running
&& !match
->cancel_all
;
957 static inline void io_wqe_remove_pending(struct io_wqe
*wqe
,
958 struct io_wq_work
*work
,
959 struct io_wq_work_node
*prev
)
961 unsigned int hash
= io_get_work_hash(work
);
962 struct io_wq_work
*prev_work
= NULL
;
964 if (io_wq_is_hashed(work
) && work
== wqe
->hash_tail
[hash
]) {
966 prev_work
= container_of(prev
, struct io_wq_work
, list
);
967 if (prev_work
&& io_get_work_hash(prev_work
) == hash
)
968 wqe
->hash_tail
[hash
] = prev_work
;
970 wqe
->hash_tail
[hash
] = NULL
;
972 wq_list_del(&wqe
->work_list
, &work
->list
, prev
);
975 static void io_wqe_cancel_pending_work(struct io_wqe
*wqe
,
976 struct io_cb_cancel_data
*match
)
978 struct io_wq_work_node
*node
, *prev
;
979 struct io_wq_work
*work
;
983 raw_spin_lock_irqsave(&wqe
->lock
, flags
);
984 wq_list_for_each(node
, prev
, &wqe
->work_list
) {
985 work
= container_of(node
, struct io_wq_work
, list
);
986 if (!match
->fn(work
, match
->data
))
988 io_wqe_remove_pending(wqe
, work
, prev
);
989 raw_spin_unlock_irqrestore(&wqe
->lock
, flags
);
990 io_run_cancel(work
, wqe
);
992 if (!match
->cancel_all
)
995 /* not safe to continue after unlock */
998 raw_spin_unlock_irqrestore(&wqe
->lock
, flags
);
1001 static void io_wqe_cancel_running_work(struct io_wqe
*wqe
,
1002 struct io_cb_cancel_data
*match
)
1005 io_wq_for_each_worker(wqe
, io_wq_worker_cancel
, match
);
1009 enum io_wq_cancel
io_wq_cancel_cb(struct io_wq
*wq
, work_cancel_fn
*cancel
,
1010 void *data
, bool cancel_all
)
1012 struct io_cb_cancel_data match
= {
1015 .cancel_all
= cancel_all
,
1020 * First check pending list, if we're lucky we can just remove it
1021 * from there. CANCEL_OK means that the work is returned as-new,
1022 * no completion will be posted for it.
1024 for_each_node(node
) {
1025 struct io_wqe
*wqe
= wq
->wqes
[node
];
1027 io_wqe_cancel_pending_work(wqe
, &match
);
1028 if (match
.nr_pending
&& !match
.cancel_all
)
1029 return IO_WQ_CANCEL_OK
;
1033 * Now check if a free (going busy) or busy worker has the work
1034 * currently running. If we find it there, we'll return CANCEL_RUNNING
1035 * as an indication that we attempt to signal cancellation. The
1036 * completion will run normally in this case.
1038 for_each_node(node
) {
1039 struct io_wqe
*wqe
= wq
->wqes
[node
];
1041 io_wqe_cancel_running_work(wqe
, &match
);
1042 if (match
.nr_running
&& !match
.cancel_all
)
1043 return IO_WQ_CANCEL_RUNNING
;
1046 if (match
.nr_running
)
1047 return IO_WQ_CANCEL_RUNNING
;
1048 if (match
.nr_pending
)
1049 return IO_WQ_CANCEL_OK
;
1050 return IO_WQ_CANCEL_NOTFOUND
;
1053 struct io_wq
*io_wq_create(unsigned bounded
, struct io_wq_data
*data
)
1055 int ret
= -ENOMEM
, node
;
1058 if (WARN_ON_ONCE(!data
->free_work
|| !data
->do_work
))
1059 return ERR_PTR(-EINVAL
);
1061 wq
= kzalloc(sizeof(*wq
), GFP_KERNEL
);
1063 return ERR_PTR(-ENOMEM
);
1065 wq
->wqes
= kcalloc(nr_node_ids
, sizeof(struct io_wqe
*), GFP_KERNEL
);
1069 ret
= cpuhp_state_add_instance_nocalls(io_wq_online
, &wq
->cpuhp_node
);
1073 wq
->free_work
= data
->free_work
;
1074 wq
->do_work
= data
->do_work
;
1076 /* caller must already hold a reference to this */
1077 wq
->user
= data
->user
;
1080 for_each_node(node
) {
1082 int alloc_node
= node
;
1084 if (!node_online(alloc_node
))
1085 alloc_node
= NUMA_NO_NODE
;
1086 wqe
= kzalloc_node(sizeof(struct io_wqe
), GFP_KERNEL
, alloc_node
);
1089 wq
->wqes
[node
] = wqe
;
1090 wqe
->node
= alloc_node
;
1091 wqe
->acct
[IO_WQ_ACCT_BOUND
].max_workers
= bounded
;
1092 atomic_set(&wqe
->acct
[IO_WQ_ACCT_BOUND
].nr_running
, 0);
1094 wqe
->acct
[IO_WQ_ACCT_UNBOUND
].max_workers
=
1095 task_rlimit(current
, RLIMIT_NPROC
);
1097 atomic_set(&wqe
->acct
[IO_WQ_ACCT_UNBOUND
].nr_running
, 0);
1099 raw_spin_lock_init(&wqe
->lock
);
1100 INIT_WQ_LIST(&wqe
->work_list
);
1101 INIT_HLIST_NULLS_HEAD(&wqe
->free_list
, 0);
1102 INIT_LIST_HEAD(&wqe
->all_list
);
1105 init_completion(&wq
->done
);
1107 wq
->manager
= kthread_create(io_wq_manager
, wq
, "io_wq_manager");
1108 if (!IS_ERR(wq
->manager
)) {
1109 wake_up_process(wq
->manager
);
1110 wait_for_completion(&wq
->done
);
1111 if (test_bit(IO_WQ_BIT_ERROR
, &wq
->state
)) {
1115 refcount_set(&wq
->use_refs
, 1);
1116 reinit_completion(&wq
->done
);
1120 ret
= PTR_ERR(wq
->manager
);
1121 complete(&wq
->done
);
1123 cpuhp_state_remove_instance_nocalls(io_wq_online
, &wq
->cpuhp_node
);
1125 kfree(wq
->wqes
[node
]);
1130 return ERR_PTR(ret
);
1133 bool io_wq_get(struct io_wq
*wq
, struct io_wq_data
*data
)
1135 if (data
->free_work
!= wq
->free_work
|| data
->do_work
!= wq
->do_work
)
1138 return refcount_inc_not_zero(&wq
->use_refs
);
1141 static void __io_wq_destroy(struct io_wq
*wq
)
1145 cpuhp_state_remove_instance_nocalls(io_wq_online
, &wq
->cpuhp_node
);
1147 set_bit(IO_WQ_BIT_EXIT
, &wq
->state
);
1149 kthread_stop(wq
->manager
);
1153 io_wq_for_each_worker(wq
->wqes
[node
], io_wq_worker_wake
, NULL
);
1156 wait_for_completion(&wq
->done
);
1159 kfree(wq
->wqes
[node
]);
1164 void io_wq_destroy(struct io_wq
*wq
)
1166 if (refcount_dec_and_test(&wq
->use_refs
))
1167 __io_wq_destroy(wq
);
1170 struct task_struct
*io_wq_get_task(struct io_wq
*wq
)
1175 static bool io_wq_worker_affinity(struct io_worker
*worker
, void *data
)
1177 struct task_struct
*task
= worker
->task
;
1181 rq
= task_rq_lock(task
, &rf
);
1182 do_set_cpus_allowed(task
, cpumask_of_node(worker
->wqe
->node
));
1183 task
->flags
|= PF_NO_SETAFFINITY
;
1184 task_rq_unlock(rq
, task
, &rf
);
1188 static int io_wq_cpu_online(unsigned int cpu
, struct hlist_node
*node
)
1190 struct io_wq
*wq
= hlist_entry_safe(node
, struct io_wq
, cpuhp_node
);
1195 io_wq_for_each_worker(wq
->wqes
[i
], io_wq_worker_affinity
, NULL
);
1200 static __init
int io_wq_init(void)
1204 ret
= cpuhp_setup_state_multi(CPUHP_AP_ONLINE_DYN
, "io-wq/online",
1205 io_wq_cpu_online
, NULL
);
1211 subsys_initcall(io_wq_init
);