1 // SPDX-License-Identifier: GPL-2.0
3 * Basic worker thread pool for io_uring
5 * Copyright (C) 2019 Jens Axboe
8 #include <linux/kernel.h>
9 #include <linux/init.h>
10 #include <linux/errno.h>
11 #include <linux/sched/signal.h>
13 #include <linux/mmu_context.h>
14 #include <linux/sched/mm.h>
15 #include <linux/percpu.h>
16 #include <linux/slab.h>
17 #include <linux/kthread.h>
18 #include <linux/rculist_nulls.h>
19 #include <linux/fs_struct.h>
23 #define WORKER_IDLE_TIMEOUT (5 * HZ)
26 IO_WORKER_F_UP
= 1, /* up and active */
27 IO_WORKER_F_RUNNING
= 2, /* account as running */
28 IO_WORKER_F_FREE
= 4, /* worker on free list */
29 IO_WORKER_F_EXITING
= 8, /* worker exiting */
30 IO_WORKER_F_FIXED
= 16, /* static idle worker */
31 IO_WORKER_F_BOUND
= 32, /* is doing bounded work */
35 IO_WQ_BIT_EXIT
= 0, /* wq exiting */
36 IO_WQ_BIT_CANCEL
= 1, /* cancel work on list */
37 IO_WQ_BIT_ERROR
= 2, /* error on setup */
41 IO_WQE_FLAG_STALLED
= 1, /* stalled on hash */
45 * One for each thread in a wqe pool
50 struct hlist_nulls_node nulls_node
;
51 struct list_head all_list
;
52 struct task_struct
*task
;
55 struct io_wq_work
*cur_work
;
60 const struct cred
*cur_creds
;
61 const struct cred
*saved_creds
;
62 struct files_struct
*restore_files
;
63 struct fs_struct
*restore_fs
;
66 #if BITS_PER_LONG == 64
67 #define IO_WQ_HASH_ORDER 6
69 #define IO_WQ_HASH_ORDER 5
84 * Per-node worker thread pool
89 struct io_wq_work_list work_list
;
90 unsigned long hash_map
;
92 } ____cacheline_aligned_in_smp
;
95 struct io_wqe_acct acct
[2];
97 struct hlist_nulls_head free_list
;
98 struct list_head all_list
;
107 struct io_wqe
**wqes
;
110 get_work_fn
*get_work
;
111 put_work_fn
*put_work
;
113 struct task_struct
*manager
;
114 struct user_struct
*user
;
116 struct completion done
;
121 static bool io_worker_get(struct io_worker
*worker
)
123 return refcount_inc_not_zero(&worker
->ref
);
126 static void io_worker_release(struct io_worker
*worker
)
128 if (refcount_dec_and_test(&worker
->ref
))
129 wake_up_process(worker
->task
);
133 * Note: drops the wqe->lock if returning true! The caller must re-acquire
134 * the lock in that case. Some callers need to restart handling if this
135 * happens, so we can't just re-acquire the lock on behalf of the caller.
137 static bool __io_worker_unuse(struct io_wqe
*wqe
, struct io_worker
*worker
)
139 bool dropped_lock
= false;
141 if (worker
->saved_creds
) {
142 revert_creds(worker
->saved_creds
);
143 worker
->cur_creds
= worker
->saved_creds
= NULL
;
146 if (current
->files
!= worker
->restore_files
) {
147 __acquire(&wqe
->lock
);
148 spin_unlock_irq(&wqe
->lock
);
152 current
->files
= worker
->restore_files
;
153 task_unlock(current
);
156 if (current
->fs
!= worker
->restore_fs
)
157 current
->fs
= worker
->restore_fs
;
160 * If we have an active mm, we need to drop the wq lock before unusing
161 * it. If we do, return true and let the caller retry the idle loop.
165 __acquire(&wqe
->lock
);
166 spin_unlock_irq(&wqe
->lock
);
169 __set_current_state(TASK_RUNNING
);
171 unuse_mm(worker
->mm
);
179 static inline struct io_wqe_acct
*io_work_get_acct(struct io_wqe
*wqe
,
180 struct io_wq_work
*work
)
182 if (work
->flags
& IO_WQ_WORK_UNBOUND
)
183 return &wqe
->acct
[IO_WQ_ACCT_UNBOUND
];
185 return &wqe
->acct
[IO_WQ_ACCT_BOUND
];
188 static inline struct io_wqe_acct
*io_wqe_get_acct(struct io_wqe
*wqe
,
189 struct io_worker
*worker
)
191 if (worker
->flags
& IO_WORKER_F_BOUND
)
192 return &wqe
->acct
[IO_WQ_ACCT_BOUND
];
194 return &wqe
->acct
[IO_WQ_ACCT_UNBOUND
];
197 static void io_worker_exit(struct io_worker
*worker
)
199 struct io_wqe
*wqe
= worker
->wqe
;
200 struct io_wqe_acct
*acct
= io_wqe_get_acct(wqe
, worker
);
204 * If we're not at zero, someone else is holding a brief reference
205 * to the worker. Wait for that to go away.
207 set_current_state(TASK_INTERRUPTIBLE
);
208 if (!refcount_dec_and_test(&worker
->ref
))
210 __set_current_state(TASK_RUNNING
);
213 current
->flags
&= ~PF_IO_WORKER
;
214 if (worker
->flags
& IO_WORKER_F_RUNNING
)
215 atomic_dec(&acct
->nr_running
);
216 if (!(worker
->flags
& IO_WORKER_F_BOUND
))
217 atomic_dec(&wqe
->wq
->user
->processes
);
221 spin_lock_irq(&wqe
->lock
);
222 hlist_nulls_del_rcu(&worker
->nulls_node
);
223 list_del_rcu(&worker
->all_list
);
224 if (__io_worker_unuse(wqe
, worker
)) {
225 __release(&wqe
->lock
);
226 spin_lock_irq(&wqe
->lock
);
229 nr_workers
= wqe
->acct
[IO_WQ_ACCT_BOUND
].nr_workers
+
230 wqe
->acct
[IO_WQ_ACCT_UNBOUND
].nr_workers
;
231 spin_unlock_irq(&wqe
->lock
);
233 /* all workers gone, wq exit can proceed */
234 if (!nr_workers
&& refcount_dec_and_test(&wqe
->wq
->refs
))
235 complete(&wqe
->wq
->done
);
237 kfree_rcu(worker
, rcu
);
240 static inline bool io_wqe_run_queue(struct io_wqe
*wqe
)
241 __must_hold(wqe
->lock
)
243 if (!wq_list_empty(&wqe
->work_list
) &&
244 !(wqe
->flags
& IO_WQE_FLAG_STALLED
))
250 * Check head of free list for an available worker. If one isn't available,
251 * caller must wake up the wq manager to create one.
253 static bool io_wqe_activate_free_worker(struct io_wqe
*wqe
)
256 struct hlist_nulls_node
*n
;
257 struct io_worker
*worker
;
259 n
= rcu_dereference(hlist_nulls_first_rcu(&wqe
->free_list
));
263 worker
= hlist_nulls_entry(n
, struct io_worker
, nulls_node
);
264 if (io_worker_get(worker
)) {
265 wake_up_process(worker
->task
);
266 io_worker_release(worker
);
274 * We need a worker. If we find a free one, we're good. If not, and we're
275 * below the max number of workers, wake up the manager to create one.
277 static void io_wqe_wake_worker(struct io_wqe
*wqe
, struct io_wqe_acct
*acct
)
282 * Most likely an attempt to queue unbounded work on an io_wq that
283 * wasn't setup with any unbounded workers.
285 WARN_ON_ONCE(!acct
->max_workers
);
288 ret
= io_wqe_activate_free_worker(wqe
);
291 if (!ret
&& acct
->nr_workers
< acct
->max_workers
)
292 wake_up_process(wqe
->wq
->manager
);
295 static void io_wqe_inc_running(struct io_wqe
*wqe
, struct io_worker
*worker
)
297 struct io_wqe_acct
*acct
= io_wqe_get_acct(wqe
, worker
);
299 atomic_inc(&acct
->nr_running
);
302 static void io_wqe_dec_running(struct io_wqe
*wqe
, struct io_worker
*worker
)
303 __must_hold(wqe
->lock
)
305 struct io_wqe_acct
*acct
= io_wqe_get_acct(wqe
, worker
);
307 if (atomic_dec_and_test(&acct
->nr_running
) && io_wqe_run_queue(wqe
))
308 io_wqe_wake_worker(wqe
, acct
);
311 static void io_worker_start(struct io_wqe
*wqe
, struct io_worker
*worker
)
313 allow_kernel_signal(SIGINT
);
315 current
->flags
|= PF_IO_WORKER
;
317 worker
->flags
|= (IO_WORKER_F_UP
| IO_WORKER_F_RUNNING
);
318 worker
->restore_files
= current
->files
;
319 worker
->restore_fs
= current
->fs
;
320 io_wqe_inc_running(wqe
, worker
);
324 * Worker will start processing some work. Move it to the busy list, if
325 * it's currently on the freelist
327 static void __io_worker_busy(struct io_wqe
*wqe
, struct io_worker
*worker
,
328 struct io_wq_work
*work
)
329 __must_hold(wqe
->lock
)
331 bool worker_bound
, work_bound
;
333 if (worker
->flags
& IO_WORKER_F_FREE
) {
334 worker
->flags
&= ~IO_WORKER_F_FREE
;
335 hlist_nulls_del_init_rcu(&worker
->nulls_node
);
339 * If worker is moving from bound to unbound (or vice versa), then
340 * ensure we update the running accounting.
342 worker_bound
= (worker
->flags
& IO_WORKER_F_BOUND
) != 0;
343 work_bound
= (work
->flags
& IO_WQ_WORK_UNBOUND
) == 0;
344 if (worker_bound
!= work_bound
) {
345 io_wqe_dec_running(wqe
, worker
);
347 worker
->flags
|= IO_WORKER_F_BOUND
;
348 wqe
->acct
[IO_WQ_ACCT_UNBOUND
].nr_workers
--;
349 wqe
->acct
[IO_WQ_ACCT_BOUND
].nr_workers
++;
350 atomic_dec(&wqe
->wq
->user
->processes
);
352 worker
->flags
&= ~IO_WORKER_F_BOUND
;
353 wqe
->acct
[IO_WQ_ACCT_UNBOUND
].nr_workers
++;
354 wqe
->acct
[IO_WQ_ACCT_BOUND
].nr_workers
--;
355 atomic_inc(&wqe
->wq
->user
->processes
);
357 io_wqe_inc_running(wqe
, worker
);
362 * No work, worker going to sleep. Move to freelist, and unuse mm if we
363 * have one attached. Dropping the mm may potentially sleep, so we drop
364 * the lock in that case and return success. Since the caller has to
365 * retry the loop in that case (we changed task state), we don't regrab
366 * the lock if we return success.
368 static bool __io_worker_idle(struct io_wqe
*wqe
, struct io_worker
*worker
)
369 __must_hold(wqe
->lock
)
371 if (!(worker
->flags
& IO_WORKER_F_FREE
)) {
372 worker
->flags
|= IO_WORKER_F_FREE
;
373 hlist_nulls_add_head_rcu(&worker
->nulls_node
, &wqe
->free_list
);
376 return __io_worker_unuse(wqe
, worker
);
379 static struct io_wq_work
*io_get_next_work(struct io_wqe
*wqe
, unsigned *hash
)
380 __must_hold(wqe
->lock
)
382 struct io_wq_work_node
*node
, *prev
;
383 struct io_wq_work
*work
;
385 wq_list_for_each(node
, prev
, &wqe
->work_list
) {
386 work
= container_of(node
, struct io_wq_work
, list
);
388 /* not hashed, can run anytime */
389 if (!(work
->flags
& IO_WQ_WORK_HASHED
)) {
390 wq_node_del(&wqe
->work_list
, node
, prev
);
394 /* hashed, can run if not already running */
395 *hash
= work
->flags
>> IO_WQ_HASH_SHIFT
;
396 if (!(wqe
->hash_map
& BIT_ULL(*hash
))) {
397 wqe
->hash_map
|= BIT_ULL(*hash
);
398 wq_node_del(&wqe
->work_list
, node
, prev
);
406 static void io_wq_switch_mm(struct io_worker
*worker
, struct io_wq_work
*work
)
409 unuse_mm(worker
->mm
);
417 if (mmget_not_zero(work
->mm
)) {
421 worker
->mm
= work
->mm
;
422 /* hang on to this mm */
427 /* failed grabbing mm, ensure work gets cancelled */
428 work
->flags
|= IO_WQ_WORK_CANCEL
;
431 static void io_wq_switch_creds(struct io_worker
*worker
,
432 struct io_wq_work
*work
)
434 const struct cred
*old_creds
= override_creds(work
->creds
);
436 worker
->cur_creds
= work
->creds
;
437 if (worker
->saved_creds
)
438 put_cred(old_creds
); /* creds set by previous switch */
440 worker
->saved_creds
= old_creds
;
443 static void io_worker_handle_work(struct io_worker
*worker
)
444 __releases(wqe
->lock
)
446 struct io_wq_work
*work
, *old_work
= NULL
, *put_work
= NULL
;
447 struct io_wqe
*wqe
= worker
->wqe
;
448 struct io_wq
*wq
= wqe
->wq
;
454 * If we got some work, mark us as busy. If we didn't, but
455 * the list isn't empty, it means we stalled on hashed work.
456 * Mark us stalled so we don't keep looking for work when we
457 * can't make progress, any work completion or insertion will
458 * clear the stalled flag.
460 work
= io_get_next_work(wqe
, &hash
);
462 __io_worker_busy(wqe
, worker
, work
);
463 else if (!wq_list_empty(&wqe
->work_list
))
464 wqe
->flags
|= IO_WQE_FLAG_STALLED
;
466 spin_unlock_irq(&wqe
->lock
);
467 if (put_work
&& wq
->put_work
)
468 wq
->put_work(old_work
);
472 /* flush any pending signals before assigning new work */
473 if (signal_pending(current
))
474 flush_signals(current
);
478 spin_lock_irq(&worker
->lock
);
479 worker
->cur_work
= work
;
480 spin_unlock_irq(&worker
->lock
);
482 if (work
->flags
& IO_WQ_WORK_CB
)
485 if (work
->files
&& current
->files
!= work
->files
) {
487 current
->files
= work
->files
;
488 task_unlock(current
);
490 if (work
->fs
&& current
->fs
!= work
->fs
)
491 current
->fs
= work
->fs
;
492 if (work
->mm
!= worker
->mm
)
493 io_wq_switch_mm(worker
, work
);
494 if (worker
->cur_creds
!= work
->creds
)
495 io_wq_switch_creds(worker
, work
);
497 * OK to set IO_WQ_WORK_CANCEL even for uncancellable work,
498 * the worker function will do the right thing.
500 if (test_bit(IO_WQ_BIT_CANCEL
, &wq
->state
))
501 work
->flags
|= IO_WQ_WORK_CANCEL
;
503 work
->flags
|= IO_WQ_WORK_HAS_MM
;
505 if (wq
->get_work
&& !(work
->flags
& IO_WQ_WORK_INTERNAL
)) {
513 spin_lock_irq(&worker
->lock
);
514 worker
->cur_work
= NULL
;
515 spin_unlock_irq(&worker
->lock
);
517 spin_lock_irq(&wqe
->lock
);
520 wqe
->hash_map
&= ~BIT_ULL(hash
);
521 wqe
->flags
&= ~IO_WQE_FLAG_STALLED
;
523 if (work
&& work
!= old_work
) {
524 spin_unlock_irq(&wqe
->lock
);
526 if (put_work
&& wq
->put_work
) {
527 wq
->put_work(put_work
);
531 /* dependent work not hashed */
538 static int io_wqe_worker(void *data
)
540 struct io_worker
*worker
= data
;
541 struct io_wqe
*wqe
= worker
->wqe
;
542 struct io_wq
*wq
= wqe
->wq
;
544 io_worker_start(wqe
, worker
);
546 while (!test_bit(IO_WQ_BIT_EXIT
, &wq
->state
)) {
547 set_current_state(TASK_INTERRUPTIBLE
);
549 spin_lock_irq(&wqe
->lock
);
550 if (io_wqe_run_queue(wqe
)) {
551 __set_current_state(TASK_RUNNING
);
552 io_worker_handle_work(worker
);
555 /* drops the lock on success, retry */
556 if (__io_worker_idle(wqe
, worker
)) {
557 __release(&wqe
->lock
);
560 spin_unlock_irq(&wqe
->lock
);
561 if (signal_pending(current
))
562 flush_signals(current
);
563 if (schedule_timeout(WORKER_IDLE_TIMEOUT
))
565 /* timed out, exit unless we're the fixed worker */
566 if (test_bit(IO_WQ_BIT_EXIT
, &wq
->state
) ||
567 !(worker
->flags
& IO_WORKER_F_FIXED
))
571 if (test_bit(IO_WQ_BIT_EXIT
, &wq
->state
)) {
572 spin_lock_irq(&wqe
->lock
);
573 if (!wq_list_empty(&wqe
->work_list
))
574 io_worker_handle_work(worker
);
576 spin_unlock_irq(&wqe
->lock
);
579 io_worker_exit(worker
);
584 * Called when a worker is scheduled in. Mark us as currently running.
586 void io_wq_worker_running(struct task_struct
*tsk
)
588 struct io_worker
*worker
= kthread_data(tsk
);
589 struct io_wqe
*wqe
= worker
->wqe
;
591 if (!(worker
->flags
& IO_WORKER_F_UP
))
593 if (worker
->flags
& IO_WORKER_F_RUNNING
)
595 worker
->flags
|= IO_WORKER_F_RUNNING
;
596 io_wqe_inc_running(wqe
, worker
);
600 * Called when worker is going to sleep. If there are no workers currently
601 * running and we have work pending, wake up a free one or have the manager
604 void io_wq_worker_sleeping(struct task_struct
*tsk
)
606 struct io_worker
*worker
= kthread_data(tsk
);
607 struct io_wqe
*wqe
= worker
->wqe
;
609 if (!(worker
->flags
& IO_WORKER_F_UP
))
611 if (!(worker
->flags
& IO_WORKER_F_RUNNING
))
614 worker
->flags
&= ~IO_WORKER_F_RUNNING
;
616 spin_lock_irq(&wqe
->lock
);
617 io_wqe_dec_running(wqe
, worker
);
618 spin_unlock_irq(&wqe
->lock
);
621 static bool create_io_worker(struct io_wq
*wq
, struct io_wqe
*wqe
, int index
)
623 struct io_wqe_acct
*acct
=&wqe
->acct
[index
];
624 struct io_worker
*worker
;
626 worker
= kzalloc_node(sizeof(*worker
), GFP_KERNEL
, wqe
->node
);
630 refcount_set(&worker
->ref
, 1);
631 worker
->nulls_node
.pprev
= NULL
;
633 spin_lock_init(&worker
->lock
);
635 worker
->task
= kthread_create_on_node(io_wqe_worker
, worker
, wqe
->node
,
636 "io_wqe_worker-%d/%d", index
, wqe
->node
);
637 if (IS_ERR(worker
->task
)) {
642 spin_lock_irq(&wqe
->lock
);
643 hlist_nulls_add_head_rcu(&worker
->nulls_node
, &wqe
->free_list
);
644 list_add_tail_rcu(&worker
->all_list
, &wqe
->all_list
);
645 worker
->flags
|= IO_WORKER_F_FREE
;
646 if (index
== IO_WQ_ACCT_BOUND
)
647 worker
->flags
|= IO_WORKER_F_BOUND
;
648 if (!acct
->nr_workers
&& (worker
->flags
& IO_WORKER_F_BOUND
))
649 worker
->flags
|= IO_WORKER_F_FIXED
;
651 spin_unlock_irq(&wqe
->lock
);
653 if (index
== IO_WQ_ACCT_UNBOUND
)
654 atomic_inc(&wq
->user
->processes
);
656 wake_up_process(worker
->task
);
660 static inline bool io_wqe_need_worker(struct io_wqe
*wqe
, int index
)
661 __must_hold(wqe
->lock
)
663 struct io_wqe_acct
*acct
= &wqe
->acct
[index
];
665 /* if we have available workers or no work, no need */
666 if (!hlist_nulls_empty(&wqe
->free_list
) || !io_wqe_run_queue(wqe
))
668 return acct
->nr_workers
< acct
->max_workers
;
672 * Manager thread. Tasked with creating new workers, if we need them.
674 static int io_wq_manager(void *data
)
676 struct io_wq
*wq
= data
;
677 int workers_to_create
= num_possible_nodes();
680 /* create fixed workers */
681 refcount_set(&wq
->refs
, workers_to_create
);
682 for_each_node(node
) {
683 if (!node_online(node
))
685 if (!create_io_worker(wq
, wq
->wqes
[node
], IO_WQ_ACCT_BOUND
))
690 while (workers_to_create
--)
691 refcount_dec(&wq
->refs
);
695 while (!kthread_should_stop()) {
696 for_each_node(node
) {
697 struct io_wqe
*wqe
= wq
->wqes
[node
];
698 bool fork_worker
[2] = { false, false };
700 if (!node_online(node
))
703 spin_lock_irq(&wqe
->lock
);
704 if (io_wqe_need_worker(wqe
, IO_WQ_ACCT_BOUND
))
705 fork_worker
[IO_WQ_ACCT_BOUND
] = true;
706 if (io_wqe_need_worker(wqe
, IO_WQ_ACCT_UNBOUND
))
707 fork_worker
[IO_WQ_ACCT_UNBOUND
] = true;
708 spin_unlock_irq(&wqe
->lock
);
709 if (fork_worker
[IO_WQ_ACCT_BOUND
])
710 create_io_worker(wq
, wqe
, IO_WQ_ACCT_BOUND
);
711 if (fork_worker
[IO_WQ_ACCT_UNBOUND
])
712 create_io_worker(wq
, wqe
, IO_WQ_ACCT_UNBOUND
);
714 set_current_state(TASK_INTERRUPTIBLE
);
715 schedule_timeout(HZ
);
720 set_bit(IO_WQ_BIT_ERROR
, &wq
->state
);
721 set_bit(IO_WQ_BIT_EXIT
, &wq
->state
);
722 if (refcount_sub_and_test(workers_to_create
, &wq
->refs
))
727 static bool io_wq_can_queue(struct io_wqe
*wqe
, struct io_wqe_acct
*acct
,
728 struct io_wq_work
*work
)
732 if (!(work
->flags
& IO_WQ_WORK_UNBOUND
))
734 if (atomic_read(&acct
->nr_running
))
738 free_worker
= !hlist_nulls_empty(&wqe
->free_list
);
743 if (atomic_read(&wqe
->wq
->user
->processes
) >= acct
->max_workers
&&
744 !(capable(CAP_SYS_RESOURCE
) || capable(CAP_SYS_ADMIN
)))
750 static void io_wqe_enqueue(struct io_wqe
*wqe
, struct io_wq_work
*work
)
752 struct io_wqe_acct
*acct
= io_work_get_acct(wqe
, work
);
757 * Do early check to see if we need a new unbound worker, and if we do,
758 * if we're allowed to do so. This isn't 100% accurate as there's a
759 * gap between this check and incrementing the value, but that's OK.
760 * It's close enough to not be an issue, fork() has the same delay.
762 if (unlikely(!io_wq_can_queue(wqe
, acct
, work
))) {
763 work
->flags
|= IO_WQ_WORK_CANCEL
;
768 work_flags
= work
->flags
;
769 spin_lock_irqsave(&wqe
->lock
, flags
);
770 wq_list_add_tail(&work
->list
, &wqe
->work_list
);
771 wqe
->flags
&= ~IO_WQE_FLAG_STALLED
;
772 spin_unlock_irqrestore(&wqe
->lock
, flags
);
774 if ((work_flags
& IO_WQ_WORK_CONCURRENT
) ||
775 !atomic_read(&acct
->nr_running
))
776 io_wqe_wake_worker(wqe
, acct
);
779 void io_wq_enqueue(struct io_wq
*wq
, struct io_wq_work
*work
)
781 struct io_wqe
*wqe
= wq
->wqes
[numa_node_id()];
783 io_wqe_enqueue(wqe
, work
);
787 * Enqueue work, hashed by some key. Work items that hash to the same value
788 * will not be done in parallel. Used to limit concurrent writes, generally
791 void io_wq_enqueue_hashed(struct io_wq
*wq
, struct io_wq_work
*work
, void *val
)
793 struct io_wqe
*wqe
= wq
->wqes
[numa_node_id()];
797 bit
= hash_ptr(val
, IO_WQ_HASH_ORDER
);
798 work
->flags
|= (IO_WQ_WORK_HASHED
| (bit
<< IO_WQ_HASH_SHIFT
));
799 io_wqe_enqueue(wqe
, work
);
802 static bool io_wqe_worker_send_sig(struct io_worker
*worker
, void *data
)
804 send_sig(SIGINT
, worker
->task
, 1);
809 * Iterate the passed in list and call the specific function for each
810 * worker that isn't exiting
812 static bool io_wq_for_each_worker(struct io_wqe
*wqe
,
813 bool (*func
)(struct io_worker
*, void *),
816 struct io_worker
*worker
;
819 list_for_each_entry_rcu(worker
, &wqe
->all_list
, all_list
) {
820 if (io_worker_get(worker
)) {
821 /* no task if node is/was offline */
823 ret
= func(worker
, data
);
824 io_worker_release(worker
);
833 void io_wq_cancel_all(struct io_wq
*wq
)
837 set_bit(IO_WQ_BIT_CANCEL
, &wq
->state
);
840 for_each_node(node
) {
841 struct io_wqe
*wqe
= wq
->wqes
[node
];
843 io_wq_for_each_worker(wqe
, io_wqe_worker_send_sig
, NULL
);
848 struct io_cb_cancel_data
{
850 work_cancel_fn
*cancel
;
854 static bool io_work_cancel(struct io_worker
*worker
, void *cancel_data
)
856 struct io_cb_cancel_data
*data
= cancel_data
;
861 * Hold the lock to avoid ->cur_work going out of scope, caller
862 * may dereference the passed in work.
864 spin_lock_irqsave(&worker
->lock
, flags
);
865 if (worker
->cur_work
&&
866 !(worker
->cur_work
->flags
& IO_WQ_WORK_NO_CANCEL
) &&
867 data
->cancel(worker
->cur_work
, data
->caller_data
)) {
868 send_sig(SIGINT
, worker
->task
, 1);
871 spin_unlock_irqrestore(&worker
->lock
, flags
);
876 static enum io_wq_cancel
io_wqe_cancel_cb_work(struct io_wqe
*wqe
,
877 work_cancel_fn
*cancel
,
880 struct io_cb_cancel_data data
= {
883 .caller_data
= cancel_data
,
885 struct io_wq_work_node
*node
, *prev
;
886 struct io_wq_work
*work
;
890 spin_lock_irqsave(&wqe
->lock
, flags
);
891 wq_list_for_each(node
, prev
, &wqe
->work_list
) {
892 work
= container_of(node
, struct io_wq_work
, list
);
894 if (cancel(work
, cancel_data
)) {
895 wq_node_del(&wqe
->work_list
, node
, prev
);
900 spin_unlock_irqrestore(&wqe
->lock
, flags
);
903 work
->flags
|= IO_WQ_WORK_CANCEL
;
905 return IO_WQ_CANCEL_OK
;
909 found
= io_wq_for_each_worker(wqe
, io_work_cancel
, &data
);
911 return found
? IO_WQ_CANCEL_RUNNING
: IO_WQ_CANCEL_NOTFOUND
;
914 enum io_wq_cancel
io_wq_cancel_cb(struct io_wq
*wq
, work_cancel_fn
*cancel
,
917 enum io_wq_cancel ret
= IO_WQ_CANCEL_NOTFOUND
;
920 for_each_node(node
) {
921 struct io_wqe
*wqe
= wq
->wqes
[node
];
923 ret
= io_wqe_cancel_cb_work(wqe
, cancel
, data
);
924 if (ret
!= IO_WQ_CANCEL_NOTFOUND
)
932 bool (*fn
)(struct io_wq_work
*, void *data
);
936 static bool io_wq_worker_cancel(struct io_worker
*worker
, void *data
)
938 struct work_match
*match
= data
;
942 spin_lock_irqsave(&worker
->lock
, flags
);
943 if (match
->fn(worker
->cur_work
, match
->data
) &&
944 !(worker
->cur_work
->flags
& IO_WQ_WORK_NO_CANCEL
)) {
945 send_sig(SIGINT
, worker
->task
, 1);
948 spin_unlock_irqrestore(&worker
->lock
, flags
);
953 static enum io_wq_cancel
io_wqe_cancel_work(struct io_wqe
*wqe
,
954 struct work_match
*match
)
956 struct io_wq_work_node
*node
, *prev
;
957 struct io_wq_work
*work
;
962 * First check pending list, if we're lucky we can just remove it
963 * from there. CANCEL_OK means that the work is returned as-new,
964 * no completion will be posted for it.
966 spin_lock_irqsave(&wqe
->lock
, flags
);
967 wq_list_for_each(node
, prev
, &wqe
->work_list
) {
968 work
= container_of(node
, struct io_wq_work
, list
);
970 if (match
->fn(work
, match
->data
)) {
971 wq_node_del(&wqe
->work_list
, node
, prev
);
976 spin_unlock_irqrestore(&wqe
->lock
, flags
);
979 work
->flags
|= IO_WQ_WORK_CANCEL
;
981 return IO_WQ_CANCEL_OK
;
985 * Now check if a free (going busy) or busy worker has the work
986 * currently running. If we find it there, we'll return CANCEL_RUNNING
987 * as an indication that we attempt to signal cancellation. The
988 * completion will run normally in this case.
991 found
= io_wq_for_each_worker(wqe
, io_wq_worker_cancel
, match
);
993 return found
? IO_WQ_CANCEL_RUNNING
: IO_WQ_CANCEL_NOTFOUND
;
996 static bool io_wq_work_match(struct io_wq_work
*work
, void *data
)
1001 enum io_wq_cancel
io_wq_cancel_work(struct io_wq
*wq
, struct io_wq_work
*cwork
)
1003 struct work_match match
= {
1004 .fn
= io_wq_work_match
,
1007 enum io_wq_cancel ret
= IO_WQ_CANCEL_NOTFOUND
;
1010 cwork
->flags
|= IO_WQ_WORK_CANCEL
;
1012 for_each_node(node
) {
1013 struct io_wqe
*wqe
= wq
->wqes
[node
];
1015 ret
= io_wqe_cancel_work(wqe
, &match
);
1016 if (ret
!= IO_WQ_CANCEL_NOTFOUND
)
1023 static bool io_wq_pid_match(struct io_wq_work
*work
, void *data
)
1025 pid_t pid
= (pid_t
) (unsigned long) data
;
1028 return work
->task_pid
== pid
;
1032 enum io_wq_cancel
io_wq_cancel_pid(struct io_wq
*wq
, pid_t pid
)
1034 struct work_match match
= {
1035 .fn
= io_wq_pid_match
,
1036 .data
= (void *) (unsigned long) pid
1038 enum io_wq_cancel ret
= IO_WQ_CANCEL_NOTFOUND
;
1041 for_each_node(node
) {
1042 struct io_wqe
*wqe
= wq
->wqes
[node
];
1044 ret
= io_wqe_cancel_work(wqe
, &match
);
1045 if (ret
!= IO_WQ_CANCEL_NOTFOUND
)
1052 struct io_wq_flush_data
{
1053 struct io_wq_work work
;
1054 struct completion done
;
1057 static void io_wq_flush_func(struct io_wq_work
**workptr
)
1059 struct io_wq_work
*work
= *workptr
;
1060 struct io_wq_flush_data
*data
;
1062 data
= container_of(work
, struct io_wq_flush_data
, work
);
1063 complete(&data
->done
);
1067 * Doesn't wait for previously queued work to finish. When this completes,
1068 * it just means that previously queued work was started.
1070 void io_wq_flush(struct io_wq
*wq
)
1072 struct io_wq_flush_data data
;
1075 for_each_node(node
) {
1076 struct io_wqe
*wqe
= wq
->wqes
[node
];
1078 if (!node_online(node
))
1080 init_completion(&data
.done
);
1081 INIT_IO_WORK(&data
.work
, io_wq_flush_func
);
1082 data
.work
.flags
|= IO_WQ_WORK_INTERNAL
;
1083 io_wqe_enqueue(wqe
, &data
.work
);
1084 wait_for_completion(&data
.done
);
1088 struct io_wq
*io_wq_create(unsigned bounded
, struct io_wq_data
*data
)
1090 int ret
= -ENOMEM
, node
;
1093 wq
= kzalloc(sizeof(*wq
), GFP_KERNEL
);
1095 return ERR_PTR(-ENOMEM
);
1097 wq
->wqes
= kcalloc(nr_node_ids
, sizeof(struct io_wqe
*), GFP_KERNEL
);
1100 return ERR_PTR(-ENOMEM
);
1103 wq
->get_work
= data
->get_work
;
1104 wq
->put_work
= data
->put_work
;
1106 /* caller must already hold a reference to this */
1107 wq
->user
= data
->user
;
1109 for_each_node(node
) {
1111 int alloc_node
= node
;
1113 if (!node_online(alloc_node
))
1114 alloc_node
= NUMA_NO_NODE
;
1115 wqe
= kzalloc_node(sizeof(struct io_wqe
), GFP_KERNEL
, alloc_node
);
1118 wq
->wqes
[node
] = wqe
;
1119 wqe
->node
= alloc_node
;
1120 wqe
->acct
[IO_WQ_ACCT_BOUND
].max_workers
= bounded
;
1121 atomic_set(&wqe
->acct
[IO_WQ_ACCT_BOUND
].nr_running
, 0);
1123 wqe
->acct
[IO_WQ_ACCT_UNBOUND
].max_workers
=
1124 task_rlimit(current
, RLIMIT_NPROC
);
1126 atomic_set(&wqe
->acct
[IO_WQ_ACCT_UNBOUND
].nr_running
, 0);
1128 spin_lock_init(&wqe
->lock
);
1129 INIT_WQ_LIST(&wqe
->work_list
);
1130 INIT_HLIST_NULLS_HEAD(&wqe
->free_list
, 0);
1131 INIT_LIST_HEAD(&wqe
->all_list
);
1134 init_completion(&wq
->done
);
1136 wq
->manager
= kthread_create(io_wq_manager
, wq
, "io_wq_manager");
1137 if (!IS_ERR(wq
->manager
)) {
1138 wake_up_process(wq
->manager
);
1139 wait_for_completion(&wq
->done
);
1140 if (test_bit(IO_WQ_BIT_ERROR
, &wq
->state
)) {
1144 refcount_set(&wq
->use_refs
, 1);
1145 reinit_completion(&wq
->done
);
1149 ret
= PTR_ERR(wq
->manager
);
1150 complete(&wq
->done
);
1153 kfree(wq
->wqes
[node
]);
1156 return ERR_PTR(ret
);
1159 bool io_wq_get(struct io_wq
*wq
, struct io_wq_data
*data
)
1161 if (data
->get_work
!= wq
->get_work
|| data
->put_work
!= wq
->put_work
)
1164 return refcount_inc_not_zero(&wq
->use_refs
);
1167 static bool io_wq_worker_wake(struct io_worker
*worker
, void *data
)
1169 wake_up_process(worker
->task
);
1173 static void __io_wq_destroy(struct io_wq
*wq
)
1177 set_bit(IO_WQ_BIT_EXIT
, &wq
->state
);
1179 kthread_stop(wq
->manager
);
1183 io_wq_for_each_worker(wq
->wqes
[node
], io_wq_worker_wake
, NULL
);
1186 wait_for_completion(&wq
->done
);
1189 kfree(wq
->wqes
[node
]);
1194 void io_wq_destroy(struct io_wq
*wq
)
1196 if (refcount_dec_and_test(&wq
->use_refs
))
1197 __io_wq_destroy(wq
);