1 // SPDX-License-Identifier: GPL-2.0
3 * Basic worker thread pool for io_uring
5 * Copyright (C) 2019 Jens Axboe
8 #include <linux/kernel.h>
9 #include <linux/init.h>
10 #include <linux/errno.h>
11 #include <linux/sched/signal.h>
13 #include <linux/sched/mm.h>
14 #include <linux/percpu.h>
15 #include <linux/slab.h>
16 #include <linux/kthread.h>
17 #include <linux/rculist_nulls.h>
18 #include <linux/fs_struct.h>
19 #include <linux/task_work.h>
23 #define WORKER_IDLE_TIMEOUT (5 * HZ)
26 IO_WORKER_F_UP
= 1, /* up and active */
27 IO_WORKER_F_RUNNING
= 2, /* account as running */
28 IO_WORKER_F_FREE
= 4, /* worker on free list */
29 IO_WORKER_F_EXITING
= 8, /* worker exiting */
30 IO_WORKER_F_FIXED
= 16, /* static idle worker */
31 IO_WORKER_F_BOUND
= 32, /* is doing bounded work */
35 IO_WQ_BIT_EXIT
= 0, /* wq exiting */
36 IO_WQ_BIT_CANCEL
= 1, /* cancel work on list */
37 IO_WQ_BIT_ERROR
= 2, /* error on setup */
41 IO_WQE_FLAG_STALLED
= 1, /* stalled on hash */
45 * One for each thread in a wqe pool
50 struct hlist_nulls_node nulls_node
;
51 struct list_head all_list
;
52 struct task_struct
*task
;
55 struct io_wq_work
*cur_work
;
60 const struct cred
*cur_creds
;
61 const struct cred
*saved_creds
;
62 struct files_struct
*restore_files
;
63 struct fs_struct
*restore_fs
;
66 #if BITS_PER_LONG == 64
67 #define IO_WQ_HASH_ORDER 6
69 #define IO_WQ_HASH_ORDER 5
72 #define IO_WQ_NR_HASH_BUCKETS (1u << IO_WQ_HASH_ORDER)
86 * Per-node worker thread pool
91 struct io_wq_work_list work_list
;
92 unsigned long hash_map
;
94 } ____cacheline_aligned_in_smp
;
97 struct io_wqe_acct acct
[2];
99 struct hlist_nulls_head free_list
;
100 struct list_head all_list
;
103 struct io_wq_work
*hash_tail
[IO_WQ_NR_HASH_BUCKETS
];
110 struct io_wqe
**wqes
;
113 free_work_fn
*free_work
;
114 io_wq_work_fn
*do_work
;
116 struct task_struct
*manager
;
117 struct user_struct
*user
;
119 struct completion done
;
124 static bool io_worker_get(struct io_worker
*worker
)
126 return refcount_inc_not_zero(&worker
->ref
);
129 static void io_worker_release(struct io_worker
*worker
)
131 if (refcount_dec_and_test(&worker
->ref
))
132 wake_up_process(worker
->task
);
136 * Note: drops the wqe->lock if returning true! The caller must re-acquire
137 * the lock in that case. Some callers need to restart handling if this
138 * happens, so we can't just re-acquire the lock on behalf of the caller.
140 static bool __io_worker_unuse(struct io_wqe
*wqe
, struct io_worker
*worker
)
142 bool dropped_lock
= false;
144 if (worker
->saved_creds
) {
145 revert_creds(worker
->saved_creds
);
146 worker
->cur_creds
= worker
->saved_creds
= NULL
;
149 if (current
->files
!= worker
->restore_files
) {
150 __acquire(&wqe
->lock
);
151 spin_unlock_irq(&wqe
->lock
);
155 current
->files
= worker
->restore_files
;
156 task_unlock(current
);
159 if (current
->fs
!= worker
->restore_fs
)
160 current
->fs
= worker
->restore_fs
;
163 * If we have an active mm, we need to drop the wq lock before unusing
164 * it. If we do, return true and let the caller retry the idle loop.
168 __acquire(&wqe
->lock
);
169 spin_unlock_irq(&wqe
->lock
);
172 __set_current_state(TASK_RUNNING
);
173 kthread_unuse_mm(worker
->mm
);
181 static inline struct io_wqe_acct
*io_work_get_acct(struct io_wqe
*wqe
,
182 struct io_wq_work
*work
)
184 if (work
->flags
& IO_WQ_WORK_UNBOUND
)
185 return &wqe
->acct
[IO_WQ_ACCT_UNBOUND
];
187 return &wqe
->acct
[IO_WQ_ACCT_BOUND
];
190 static inline struct io_wqe_acct
*io_wqe_get_acct(struct io_wqe
*wqe
,
191 struct io_worker
*worker
)
193 if (worker
->flags
& IO_WORKER_F_BOUND
)
194 return &wqe
->acct
[IO_WQ_ACCT_BOUND
];
196 return &wqe
->acct
[IO_WQ_ACCT_UNBOUND
];
199 static void io_worker_exit(struct io_worker
*worker
)
201 struct io_wqe
*wqe
= worker
->wqe
;
202 struct io_wqe_acct
*acct
= io_wqe_get_acct(wqe
, worker
);
206 * If we're not at zero, someone else is holding a brief reference
207 * to the worker. Wait for that to go away.
209 set_current_state(TASK_INTERRUPTIBLE
);
210 if (!refcount_dec_and_test(&worker
->ref
))
212 __set_current_state(TASK_RUNNING
);
215 current
->flags
&= ~PF_IO_WORKER
;
216 if (worker
->flags
& IO_WORKER_F_RUNNING
)
217 atomic_dec(&acct
->nr_running
);
218 if (!(worker
->flags
& IO_WORKER_F_BOUND
))
219 atomic_dec(&wqe
->wq
->user
->processes
);
223 spin_lock_irq(&wqe
->lock
);
224 hlist_nulls_del_rcu(&worker
->nulls_node
);
225 list_del_rcu(&worker
->all_list
);
226 if (__io_worker_unuse(wqe
, worker
)) {
227 __release(&wqe
->lock
);
228 spin_lock_irq(&wqe
->lock
);
231 nr_workers
= wqe
->acct
[IO_WQ_ACCT_BOUND
].nr_workers
+
232 wqe
->acct
[IO_WQ_ACCT_UNBOUND
].nr_workers
;
233 spin_unlock_irq(&wqe
->lock
);
235 /* all workers gone, wq exit can proceed */
236 if (!nr_workers
&& refcount_dec_and_test(&wqe
->wq
->refs
))
237 complete(&wqe
->wq
->done
);
239 kfree_rcu(worker
, rcu
);
242 static inline bool io_wqe_run_queue(struct io_wqe
*wqe
)
243 __must_hold(wqe
->lock
)
245 if (!wq_list_empty(&wqe
->work_list
) &&
246 !(wqe
->flags
& IO_WQE_FLAG_STALLED
))
252 * Check head of free list for an available worker. If one isn't available,
253 * caller must wake up the wq manager to create one.
255 static bool io_wqe_activate_free_worker(struct io_wqe
*wqe
)
258 struct hlist_nulls_node
*n
;
259 struct io_worker
*worker
;
261 n
= rcu_dereference(hlist_nulls_first_rcu(&wqe
->free_list
));
265 worker
= hlist_nulls_entry(n
, struct io_worker
, nulls_node
);
266 if (io_worker_get(worker
)) {
267 wake_up_process(worker
->task
);
268 io_worker_release(worker
);
276 * We need a worker. If we find a free one, we're good. If not, and we're
277 * below the max number of workers, wake up the manager to create one.
279 static void io_wqe_wake_worker(struct io_wqe
*wqe
, struct io_wqe_acct
*acct
)
284 * Most likely an attempt to queue unbounded work on an io_wq that
285 * wasn't setup with any unbounded workers.
287 WARN_ON_ONCE(!acct
->max_workers
);
290 ret
= io_wqe_activate_free_worker(wqe
);
293 if (!ret
&& acct
->nr_workers
< acct
->max_workers
)
294 wake_up_process(wqe
->wq
->manager
);
297 static void io_wqe_inc_running(struct io_wqe
*wqe
, struct io_worker
*worker
)
299 struct io_wqe_acct
*acct
= io_wqe_get_acct(wqe
, worker
);
301 atomic_inc(&acct
->nr_running
);
304 static void io_wqe_dec_running(struct io_wqe
*wqe
, struct io_worker
*worker
)
305 __must_hold(wqe
->lock
)
307 struct io_wqe_acct
*acct
= io_wqe_get_acct(wqe
, worker
);
309 if (atomic_dec_and_test(&acct
->nr_running
) && io_wqe_run_queue(wqe
))
310 io_wqe_wake_worker(wqe
, acct
);
313 static void io_worker_start(struct io_wqe
*wqe
, struct io_worker
*worker
)
315 allow_kernel_signal(SIGINT
);
317 current
->flags
|= PF_IO_WORKER
;
319 worker
->flags
|= (IO_WORKER_F_UP
| IO_WORKER_F_RUNNING
);
320 worker
->restore_files
= current
->files
;
321 worker
->restore_fs
= current
->fs
;
322 io_wqe_inc_running(wqe
, worker
);
326 * Worker will start processing some work. Move it to the busy list, if
327 * it's currently on the freelist
329 static void __io_worker_busy(struct io_wqe
*wqe
, struct io_worker
*worker
,
330 struct io_wq_work
*work
)
331 __must_hold(wqe
->lock
)
333 bool worker_bound
, work_bound
;
335 if (worker
->flags
& IO_WORKER_F_FREE
) {
336 worker
->flags
&= ~IO_WORKER_F_FREE
;
337 hlist_nulls_del_init_rcu(&worker
->nulls_node
);
341 * If worker is moving from bound to unbound (or vice versa), then
342 * ensure we update the running accounting.
344 worker_bound
= (worker
->flags
& IO_WORKER_F_BOUND
) != 0;
345 work_bound
= (work
->flags
& IO_WQ_WORK_UNBOUND
) == 0;
346 if (worker_bound
!= work_bound
) {
347 io_wqe_dec_running(wqe
, worker
);
349 worker
->flags
|= IO_WORKER_F_BOUND
;
350 wqe
->acct
[IO_WQ_ACCT_UNBOUND
].nr_workers
--;
351 wqe
->acct
[IO_WQ_ACCT_BOUND
].nr_workers
++;
352 atomic_dec(&wqe
->wq
->user
->processes
);
354 worker
->flags
&= ~IO_WORKER_F_BOUND
;
355 wqe
->acct
[IO_WQ_ACCT_UNBOUND
].nr_workers
++;
356 wqe
->acct
[IO_WQ_ACCT_BOUND
].nr_workers
--;
357 atomic_inc(&wqe
->wq
->user
->processes
);
359 io_wqe_inc_running(wqe
, worker
);
364 * No work, worker going to sleep. Move to freelist, and unuse mm if we
365 * have one attached. Dropping the mm may potentially sleep, so we drop
366 * the lock in that case and return success. Since the caller has to
367 * retry the loop in that case (we changed task state), we don't regrab
368 * the lock if we return success.
370 static bool __io_worker_idle(struct io_wqe
*wqe
, struct io_worker
*worker
)
371 __must_hold(wqe
->lock
)
373 if (!(worker
->flags
& IO_WORKER_F_FREE
)) {
374 worker
->flags
|= IO_WORKER_F_FREE
;
375 hlist_nulls_add_head_rcu(&worker
->nulls_node
, &wqe
->free_list
);
378 return __io_worker_unuse(wqe
, worker
);
381 static inline unsigned int io_get_work_hash(struct io_wq_work
*work
)
383 return work
->flags
>> IO_WQ_HASH_SHIFT
;
386 static struct io_wq_work
*io_get_next_work(struct io_wqe
*wqe
)
387 __must_hold(wqe
->lock
)
389 struct io_wq_work_node
*node
, *prev
;
390 struct io_wq_work
*work
, *tail
;
393 wq_list_for_each(node
, prev
, &wqe
->work_list
) {
394 work
= container_of(node
, struct io_wq_work
, list
);
396 /* not hashed, can run anytime */
397 if (!io_wq_is_hashed(work
)) {
398 wq_list_del(&wqe
->work_list
, node
, prev
);
402 /* hashed, can run if not already running */
403 hash
= io_get_work_hash(work
);
404 if (!(wqe
->hash_map
& BIT(hash
))) {
405 wqe
->hash_map
|= BIT(hash
);
406 /* all items with this hash lie in [work, tail] */
407 tail
= wqe
->hash_tail
[hash
];
408 wqe
->hash_tail
[hash
] = NULL
;
409 wq_list_cut(&wqe
->work_list
, &tail
->list
, prev
);
417 static void io_wq_switch_mm(struct io_worker
*worker
, struct io_wq_work
*work
)
420 kthread_unuse_mm(worker
->mm
);
427 if (mmget_not_zero(work
->mm
)) {
428 kthread_use_mm(work
->mm
);
429 worker
->mm
= work
->mm
;
430 /* hang on to this mm */
435 /* failed grabbing mm, ensure work gets cancelled */
436 work
->flags
|= IO_WQ_WORK_CANCEL
;
439 static void io_wq_switch_creds(struct io_worker
*worker
,
440 struct io_wq_work
*work
)
442 const struct cred
*old_creds
= override_creds(work
->creds
);
444 worker
->cur_creds
= work
->creds
;
445 if (worker
->saved_creds
)
446 put_cred(old_creds
); /* creds set by previous switch */
448 worker
->saved_creds
= old_creds
;
451 static void io_impersonate_work(struct io_worker
*worker
,
452 struct io_wq_work
*work
)
454 if (work
->files
&& current
->files
!= work
->files
) {
456 current
->files
= work
->files
;
457 task_unlock(current
);
459 if (work
->fs
&& current
->fs
!= work
->fs
)
460 current
->fs
= work
->fs
;
461 if (work
->mm
!= worker
->mm
)
462 io_wq_switch_mm(worker
, work
);
463 if (worker
->cur_creds
!= work
->creds
)
464 io_wq_switch_creds(worker
, work
);
465 current
->signal
->rlim
[RLIMIT_FSIZE
].rlim_cur
= work
->fsize
;
468 static void io_assign_current_work(struct io_worker
*worker
,
469 struct io_wq_work
*work
)
472 /* flush pending signals before assigning new work */
473 if (signal_pending(current
))
474 flush_signals(current
);
478 spin_lock_irq(&worker
->lock
);
479 worker
->cur_work
= work
;
480 spin_unlock_irq(&worker
->lock
);
483 static void io_wqe_enqueue(struct io_wqe
*wqe
, struct io_wq_work
*work
);
485 static void io_worker_handle_work(struct io_worker
*worker
)
486 __releases(wqe
->lock
)
488 struct io_wqe
*wqe
= worker
->wqe
;
489 struct io_wq
*wq
= wqe
->wq
;
492 struct io_wq_work
*work
;
495 * If we got some work, mark us as busy. If we didn't, but
496 * the list isn't empty, it means we stalled on hashed work.
497 * Mark us stalled so we don't keep looking for work when we
498 * can't make progress, any work completion or insertion will
499 * clear the stalled flag.
501 work
= io_get_next_work(wqe
);
503 __io_worker_busy(wqe
, worker
, work
);
504 else if (!wq_list_empty(&wqe
->work_list
))
505 wqe
->flags
|= IO_WQE_FLAG_STALLED
;
507 spin_unlock_irq(&wqe
->lock
);
510 io_assign_current_work(worker
, work
);
512 /* handle a whole dependent link */
514 struct io_wq_work
*old_work
, *next_hashed
, *linked
;
515 unsigned int hash
= io_get_work_hash(work
);
517 next_hashed
= wq_next_work(work
);
518 io_impersonate_work(worker
, work
);
520 * OK to set IO_WQ_WORK_CANCEL even for uncancellable
521 * work, the worker function will do the right thing.
523 if (test_bit(IO_WQ_BIT_CANCEL
, &wq
->state
))
524 work
->flags
|= IO_WQ_WORK_CANCEL
;
527 linked
= wq
->do_work(work
);
530 if (!work
&& linked
&& !io_wq_is_hashed(linked
)) {
534 io_assign_current_work(worker
, work
);
535 wq
->free_work(old_work
);
538 io_wqe_enqueue(wqe
, linked
);
540 if (hash
!= -1U && !next_hashed
) {
541 spin_lock_irq(&wqe
->lock
);
542 wqe
->hash_map
&= ~BIT_ULL(hash
);
543 wqe
->flags
&= ~IO_WQE_FLAG_STALLED
;
544 /* skip unnecessary unlock-lock wqe->lock */
547 spin_unlock_irq(&wqe
->lock
);
551 spin_lock_irq(&wqe
->lock
);
555 static int io_wqe_worker(void *data
)
557 struct io_worker
*worker
= data
;
558 struct io_wqe
*wqe
= worker
->wqe
;
559 struct io_wq
*wq
= wqe
->wq
;
561 io_worker_start(wqe
, worker
);
563 while (!test_bit(IO_WQ_BIT_EXIT
, &wq
->state
)) {
564 set_current_state(TASK_INTERRUPTIBLE
);
566 spin_lock_irq(&wqe
->lock
);
567 if (io_wqe_run_queue(wqe
)) {
568 __set_current_state(TASK_RUNNING
);
569 io_worker_handle_work(worker
);
572 /* drops the lock on success, retry */
573 if (__io_worker_idle(wqe
, worker
)) {
574 __release(&wqe
->lock
);
577 spin_unlock_irq(&wqe
->lock
);
578 if (signal_pending(current
))
579 flush_signals(current
);
580 if (schedule_timeout(WORKER_IDLE_TIMEOUT
))
582 /* timed out, exit unless we're the fixed worker */
583 if (test_bit(IO_WQ_BIT_EXIT
, &wq
->state
) ||
584 !(worker
->flags
& IO_WORKER_F_FIXED
))
588 if (test_bit(IO_WQ_BIT_EXIT
, &wq
->state
)) {
589 spin_lock_irq(&wqe
->lock
);
590 if (!wq_list_empty(&wqe
->work_list
))
591 io_worker_handle_work(worker
);
593 spin_unlock_irq(&wqe
->lock
);
596 io_worker_exit(worker
);
601 * Called when a worker is scheduled in. Mark us as currently running.
603 void io_wq_worker_running(struct task_struct
*tsk
)
605 struct io_worker
*worker
= kthread_data(tsk
);
606 struct io_wqe
*wqe
= worker
->wqe
;
608 if (!(worker
->flags
& IO_WORKER_F_UP
))
610 if (worker
->flags
& IO_WORKER_F_RUNNING
)
612 worker
->flags
|= IO_WORKER_F_RUNNING
;
613 io_wqe_inc_running(wqe
, worker
);
617 * Called when worker is going to sleep. If there are no workers currently
618 * running and we have work pending, wake up a free one or have the manager
621 void io_wq_worker_sleeping(struct task_struct
*tsk
)
623 struct io_worker
*worker
= kthread_data(tsk
);
624 struct io_wqe
*wqe
= worker
->wqe
;
626 if (!(worker
->flags
& IO_WORKER_F_UP
))
628 if (!(worker
->flags
& IO_WORKER_F_RUNNING
))
631 worker
->flags
&= ~IO_WORKER_F_RUNNING
;
633 spin_lock_irq(&wqe
->lock
);
634 io_wqe_dec_running(wqe
, worker
);
635 spin_unlock_irq(&wqe
->lock
);
638 static bool create_io_worker(struct io_wq
*wq
, struct io_wqe
*wqe
, int index
)
640 struct io_wqe_acct
*acct
=&wqe
->acct
[index
];
641 struct io_worker
*worker
;
643 worker
= kzalloc_node(sizeof(*worker
), GFP_KERNEL
, wqe
->node
);
647 refcount_set(&worker
->ref
, 1);
648 worker
->nulls_node
.pprev
= NULL
;
650 spin_lock_init(&worker
->lock
);
652 worker
->task
= kthread_create_on_node(io_wqe_worker
, worker
, wqe
->node
,
653 "io_wqe_worker-%d/%d", index
, wqe
->node
);
654 if (IS_ERR(worker
->task
)) {
659 spin_lock_irq(&wqe
->lock
);
660 hlist_nulls_add_head_rcu(&worker
->nulls_node
, &wqe
->free_list
);
661 list_add_tail_rcu(&worker
->all_list
, &wqe
->all_list
);
662 worker
->flags
|= IO_WORKER_F_FREE
;
663 if (index
== IO_WQ_ACCT_BOUND
)
664 worker
->flags
|= IO_WORKER_F_BOUND
;
665 if (!acct
->nr_workers
&& (worker
->flags
& IO_WORKER_F_BOUND
))
666 worker
->flags
|= IO_WORKER_F_FIXED
;
668 spin_unlock_irq(&wqe
->lock
);
670 if (index
== IO_WQ_ACCT_UNBOUND
)
671 atomic_inc(&wq
->user
->processes
);
673 wake_up_process(worker
->task
);
677 static inline bool io_wqe_need_worker(struct io_wqe
*wqe
, int index
)
678 __must_hold(wqe
->lock
)
680 struct io_wqe_acct
*acct
= &wqe
->acct
[index
];
682 /* if we have available workers or no work, no need */
683 if (!hlist_nulls_empty(&wqe
->free_list
) || !io_wqe_run_queue(wqe
))
685 return acct
->nr_workers
< acct
->max_workers
;
689 * Manager thread. Tasked with creating new workers, if we need them.
691 static int io_wq_manager(void *data
)
693 struct io_wq
*wq
= data
;
694 int workers_to_create
= num_possible_nodes();
697 /* create fixed workers */
698 refcount_set(&wq
->refs
, workers_to_create
);
699 for_each_node(node
) {
700 if (!node_online(node
))
702 if (!create_io_worker(wq
, wq
->wqes
[node
], IO_WQ_ACCT_BOUND
))
707 while (workers_to_create
--)
708 refcount_dec(&wq
->refs
);
712 while (!kthread_should_stop()) {
713 if (current
->task_works
)
716 for_each_node(node
) {
717 struct io_wqe
*wqe
= wq
->wqes
[node
];
718 bool fork_worker
[2] = { false, false };
720 if (!node_online(node
))
723 spin_lock_irq(&wqe
->lock
);
724 if (io_wqe_need_worker(wqe
, IO_WQ_ACCT_BOUND
))
725 fork_worker
[IO_WQ_ACCT_BOUND
] = true;
726 if (io_wqe_need_worker(wqe
, IO_WQ_ACCT_UNBOUND
))
727 fork_worker
[IO_WQ_ACCT_UNBOUND
] = true;
728 spin_unlock_irq(&wqe
->lock
);
729 if (fork_worker
[IO_WQ_ACCT_BOUND
])
730 create_io_worker(wq
, wqe
, IO_WQ_ACCT_BOUND
);
731 if (fork_worker
[IO_WQ_ACCT_UNBOUND
])
732 create_io_worker(wq
, wqe
, IO_WQ_ACCT_UNBOUND
);
734 set_current_state(TASK_INTERRUPTIBLE
);
735 schedule_timeout(HZ
);
738 if (current
->task_works
)
743 set_bit(IO_WQ_BIT_ERROR
, &wq
->state
);
744 set_bit(IO_WQ_BIT_EXIT
, &wq
->state
);
745 if (refcount_sub_and_test(workers_to_create
, &wq
->refs
))
750 static bool io_wq_can_queue(struct io_wqe
*wqe
, struct io_wqe_acct
*acct
,
751 struct io_wq_work
*work
)
755 if (!(work
->flags
& IO_WQ_WORK_UNBOUND
))
757 if (atomic_read(&acct
->nr_running
))
761 free_worker
= !hlist_nulls_empty(&wqe
->free_list
);
766 if (atomic_read(&wqe
->wq
->user
->processes
) >= acct
->max_workers
&&
767 !(capable(CAP_SYS_RESOURCE
) || capable(CAP_SYS_ADMIN
)))
773 static void io_run_cancel(struct io_wq_work
*work
, struct io_wqe
*wqe
)
775 struct io_wq
*wq
= wqe
->wq
;
778 struct io_wq_work
*old_work
= work
;
780 work
->flags
|= IO_WQ_WORK_CANCEL
;
781 work
= wq
->do_work(work
);
782 wq
->free_work(old_work
);
786 static void io_wqe_insert_work(struct io_wqe
*wqe
, struct io_wq_work
*work
)
789 struct io_wq_work
*tail
;
791 if (!io_wq_is_hashed(work
)) {
793 wq_list_add_tail(&work
->list
, &wqe
->work_list
);
797 hash
= io_get_work_hash(work
);
798 tail
= wqe
->hash_tail
[hash
];
799 wqe
->hash_tail
[hash
] = work
;
803 wq_list_add_after(&work
->list
, &tail
->list
, &wqe
->work_list
);
806 static void io_wqe_enqueue(struct io_wqe
*wqe
, struct io_wq_work
*work
)
808 struct io_wqe_acct
*acct
= io_work_get_acct(wqe
, work
);
813 * Do early check to see if we need a new unbound worker, and if we do,
814 * if we're allowed to do so. This isn't 100% accurate as there's a
815 * gap between this check and incrementing the value, but that's OK.
816 * It's close enough to not be an issue, fork() has the same delay.
818 if (unlikely(!io_wq_can_queue(wqe
, acct
, work
))) {
819 io_run_cancel(work
, wqe
);
823 work_flags
= work
->flags
;
824 spin_lock_irqsave(&wqe
->lock
, flags
);
825 io_wqe_insert_work(wqe
, work
);
826 wqe
->flags
&= ~IO_WQE_FLAG_STALLED
;
827 spin_unlock_irqrestore(&wqe
->lock
, flags
);
829 if ((work_flags
& IO_WQ_WORK_CONCURRENT
) ||
830 !atomic_read(&acct
->nr_running
))
831 io_wqe_wake_worker(wqe
, acct
);
834 void io_wq_enqueue(struct io_wq
*wq
, struct io_wq_work
*work
)
836 struct io_wqe
*wqe
= wq
->wqes
[numa_node_id()];
838 io_wqe_enqueue(wqe
, work
);
842 * Work items that hash to the same value will not be done in parallel.
843 * Used to limit concurrent writes, generally hashed by inode.
845 void io_wq_hash_work(struct io_wq_work
*work
, void *val
)
849 bit
= hash_ptr(val
, IO_WQ_HASH_ORDER
);
850 work
->flags
|= (IO_WQ_WORK_HASHED
| (bit
<< IO_WQ_HASH_SHIFT
));
853 static bool io_wqe_worker_send_sig(struct io_worker
*worker
, void *data
)
855 send_sig(SIGINT
, worker
->task
, 1);
860 * Iterate the passed in list and call the specific function for each
861 * worker that isn't exiting
863 static bool io_wq_for_each_worker(struct io_wqe
*wqe
,
864 bool (*func
)(struct io_worker
*, void *),
867 struct io_worker
*worker
;
870 list_for_each_entry_rcu(worker
, &wqe
->all_list
, all_list
) {
871 if (io_worker_get(worker
)) {
872 /* no task if node is/was offline */
874 ret
= func(worker
, data
);
875 io_worker_release(worker
);
884 void io_wq_cancel_all(struct io_wq
*wq
)
888 set_bit(IO_WQ_BIT_CANCEL
, &wq
->state
);
891 for_each_node(node
) {
892 struct io_wqe
*wqe
= wq
->wqes
[node
];
894 io_wq_for_each_worker(wqe
, io_wqe_worker_send_sig
, NULL
);
899 struct io_cb_cancel_data
{
907 static bool io_wq_worker_cancel(struct io_worker
*worker
, void *data
)
909 struct io_cb_cancel_data
*match
= data
;
913 * Hold the lock to avoid ->cur_work going out of scope, caller
914 * may dereference the passed in work.
916 spin_lock_irqsave(&worker
->lock
, flags
);
917 if (worker
->cur_work
&&
918 !(worker
->cur_work
->flags
& IO_WQ_WORK_NO_CANCEL
) &&
919 match
->fn(worker
->cur_work
, match
->data
)) {
920 send_sig(SIGINT
, worker
->task
, 1);
923 spin_unlock_irqrestore(&worker
->lock
, flags
);
925 return match
->nr_running
&& !match
->cancel_all
;
928 static inline void io_wqe_remove_pending(struct io_wqe
*wqe
,
929 struct io_wq_work
*work
,
930 struct io_wq_work_node
*prev
)
932 unsigned int hash
= io_get_work_hash(work
);
933 struct io_wq_work
*prev_work
= NULL
;
935 if (io_wq_is_hashed(work
) && work
== wqe
->hash_tail
[hash
]) {
937 prev_work
= container_of(prev
, struct io_wq_work
, list
);
938 if (prev_work
&& io_get_work_hash(prev_work
) == hash
)
939 wqe
->hash_tail
[hash
] = prev_work
;
941 wqe
->hash_tail
[hash
] = NULL
;
943 wq_list_del(&wqe
->work_list
, &work
->list
, prev
);
946 static void io_wqe_cancel_pending_work(struct io_wqe
*wqe
,
947 struct io_cb_cancel_data
*match
)
949 struct io_wq_work_node
*node
, *prev
;
950 struct io_wq_work
*work
;
954 spin_lock_irqsave(&wqe
->lock
, flags
);
955 wq_list_for_each(node
, prev
, &wqe
->work_list
) {
956 work
= container_of(node
, struct io_wq_work
, list
);
957 if (!match
->fn(work
, match
->data
))
959 io_wqe_remove_pending(wqe
, work
, prev
);
960 spin_unlock_irqrestore(&wqe
->lock
, flags
);
961 io_run_cancel(work
, wqe
);
963 if (!match
->cancel_all
)
966 /* not safe to continue after unlock */
969 spin_unlock_irqrestore(&wqe
->lock
, flags
);
972 static void io_wqe_cancel_running_work(struct io_wqe
*wqe
,
973 struct io_cb_cancel_data
*match
)
976 io_wq_for_each_worker(wqe
, io_wq_worker_cancel
, match
);
980 enum io_wq_cancel
io_wq_cancel_cb(struct io_wq
*wq
, work_cancel_fn
*cancel
,
981 void *data
, bool cancel_all
)
983 struct io_cb_cancel_data match
= {
986 .cancel_all
= cancel_all
,
991 * First check pending list, if we're lucky we can just remove it
992 * from there. CANCEL_OK means that the work is returned as-new,
993 * no completion will be posted for it.
995 for_each_node(node
) {
996 struct io_wqe
*wqe
= wq
->wqes
[node
];
998 io_wqe_cancel_pending_work(wqe
, &match
);
999 if (match
.nr_pending
&& !match
.cancel_all
)
1000 return IO_WQ_CANCEL_OK
;
1004 * Now check if a free (going busy) or busy worker has the work
1005 * currently running. If we find it there, we'll return CANCEL_RUNNING
1006 * as an indication that we attempt to signal cancellation. The
1007 * completion will run normally in this case.
1009 for_each_node(node
) {
1010 struct io_wqe
*wqe
= wq
->wqes
[node
];
1012 io_wqe_cancel_running_work(wqe
, &match
);
1013 if (match
.nr_running
&& !match
.cancel_all
)
1014 return IO_WQ_CANCEL_RUNNING
;
1017 if (match
.nr_running
)
1018 return IO_WQ_CANCEL_RUNNING
;
1019 if (match
.nr_pending
)
1020 return IO_WQ_CANCEL_OK
;
1021 return IO_WQ_CANCEL_NOTFOUND
;
1024 static bool io_wq_io_cb_cancel_data(struct io_wq_work
*work
, void *data
)
1026 return work
== data
;
1029 enum io_wq_cancel
io_wq_cancel_work(struct io_wq
*wq
, struct io_wq_work
*cwork
)
1031 return io_wq_cancel_cb(wq
, io_wq_io_cb_cancel_data
, (void *)cwork
, false);
1034 struct io_wq
*io_wq_create(unsigned bounded
, struct io_wq_data
*data
)
1036 int ret
= -ENOMEM
, node
;
1039 if (WARN_ON_ONCE(!data
->free_work
|| !data
->do_work
))
1040 return ERR_PTR(-EINVAL
);
1042 wq
= kzalloc(sizeof(*wq
), GFP_KERNEL
);
1044 return ERR_PTR(-ENOMEM
);
1046 wq
->wqes
= kcalloc(nr_node_ids
, sizeof(struct io_wqe
*), GFP_KERNEL
);
1049 return ERR_PTR(-ENOMEM
);
1052 wq
->free_work
= data
->free_work
;
1053 wq
->do_work
= data
->do_work
;
1055 /* caller must already hold a reference to this */
1056 wq
->user
= data
->user
;
1058 for_each_node(node
) {
1060 int alloc_node
= node
;
1062 if (!node_online(alloc_node
))
1063 alloc_node
= NUMA_NO_NODE
;
1064 wqe
= kzalloc_node(sizeof(struct io_wqe
), GFP_KERNEL
, alloc_node
);
1067 wq
->wqes
[node
] = wqe
;
1068 wqe
->node
= alloc_node
;
1069 wqe
->acct
[IO_WQ_ACCT_BOUND
].max_workers
= bounded
;
1070 atomic_set(&wqe
->acct
[IO_WQ_ACCT_BOUND
].nr_running
, 0);
1072 wqe
->acct
[IO_WQ_ACCT_UNBOUND
].max_workers
=
1073 task_rlimit(current
, RLIMIT_NPROC
);
1075 atomic_set(&wqe
->acct
[IO_WQ_ACCT_UNBOUND
].nr_running
, 0);
1077 spin_lock_init(&wqe
->lock
);
1078 INIT_WQ_LIST(&wqe
->work_list
);
1079 INIT_HLIST_NULLS_HEAD(&wqe
->free_list
, 0);
1080 INIT_LIST_HEAD(&wqe
->all_list
);
1083 init_completion(&wq
->done
);
1085 wq
->manager
= kthread_create(io_wq_manager
, wq
, "io_wq_manager");
1086 if (!IS_ERR(wq
->manager
)) {
1087 wake_up_process(wq
->manager
);
1088 wait_for_completion(&wq
->done
);
1089 if (test_bit(IO_WQ_BIT_ERROR
, &wq
->state
)) {
1093 refcount_set(&wq
->use_refs
, 1);
1094 reinit_completion(&wq
->done
);
1098 ret
= PTR_ERR(wq
->manager
);
1099 complete(&wq
->done
);
1102 kfree(wq
->wqes
[node
]);
1105 return ERR_PTR(ret
);
1108 bool io_wq_get(struct io_wq
*wq
, struct io_wq_data
*data
)
1110 if (data
->free_work
!= wq
->free_work
|| data
->do_work
!= wq
->do_work
)
1113 return refcount_inc_not_zero(&wq
->use_refs
);
1116 static bool io_wq_worker_wake(struct io_worker
*worker
, void *data
)
1118 wake_up_process(worker
->task
);
1122 static void __io_wq_destroy(struct io_wq
*wq
)
1126 set_bit(IO_WQ_BIT_EXIT
, &wq
->state
);
1128 kthread_stop(wq
->manager
);
1132 io_wq_for_each_worker(wq
->wqes
[node
], io_wq_worker_wake
, NULL
);
1135 wait_for_completion(&wq
->done
);
1138 kfree(wq
->wqes
[node
]);
1143 void io_wq_destroy(struct io_wq
*wq
)
1145 if (refcount_dec_and_test(&wq
->use_refs
))
1146 __io_wq_destroy(wq
);
1149 struct task_struct
*io_wq_get_task(struct io_wq
*wq
)