2 * Copyright (C) 2007-2010 Lawrence Livermore National Security, LLC.
3 * Copyright (C) 2007 The Regents of the University of California.
4 * Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
5 * Written by Brian Behlendorf <behlendorf1@llnl.gov>.
8 * This file is part of the SPL, Solaris Porting Layer.
9 * For details, see <http://zfsonlinux.org/>.
11 * The SPL is free software; you can redistribute it and/or modify it
12 * under the terms of the GNU General Public License as published by the
13 * Free Software Foundation; either version 2 of the License, or (at your
14 * option) any later version.
16 * The SPL is distributed in the hope that it will be useful, but WITHOUT
17 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
18 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
21 * You should have received a copy of the GNU General Public License along
22 * with the SPL. If not, see <http://www.gnu.org/licenses/>.
24 * Solaris Porting Layer (SPL) Task Queue Implementation.
27 #include <sys/taskq.h>
31 int spl_taskq_thread_bind
= 0;
32 module_param(spl_taskq_thread_bind
, int, 0644);
33 MODULE_PARM_DESC(spl_taskq_thread_bind
, "Bind taskq thread to CPU by default");
36 int spl_taskq_thread_dynamic
= 1;
37 module_param(spl_taskq_thread_dynamic
, int, 0644);
38 MODULE_PARM_DESC(spl_taskq_thread_dynamic
, "Allow dynamic taskq threads");
40 int spl_taskq_thread_priority
= 1;
41 module_param(spl_taskq_thread_priority
, int, 0644);
42 MODULE_PARM_DESC(spl_taskq_thread_priority
,
43 "Allow non-default priority for taskq threads");
45 int spl_taskq_thread_sequential
= 4;
46 module_param(spl_taskq_thread_sequential
, int, 0644);
47 MODULE_PARM_DESC(spl_taskq_thread_sequential
,
48 "Create new taskq threads after N sequential tasks");
50 /* Global system-wide dynamic task queue available for all consumers */
51 taskq_t
*system_taskq
;
52 EXPORT_SYMBOL(system_taskq
);
53 /* Global dynamic task queue for long delay */
54 taskq_t
*system_delay_taskq
;
55 EXPORT_SYMBOL(system_delay_taskq
);
57 /* Private dedicated taskq for creating new taskq threads on demand. */
58 static taskq_t
*dynamic_taskq
;
59 static taskq_thread_t
*taskq_thread_create(taskq_t
*);
61 /* List of all taskqs */
63 DECLARE_RWSEM(tq_list_sem
);
64 static uint_t taskq_tsd
;
67 task_km_flags(uint_t flags
)
69 if (flags
& TQ_NOSLEEP
)
72 if (flags
& TQ_PUSHPAGE
)
79 * taskq_find_by_name - Find the largest instance number of a named taskq.
82 taskq_find_by_name(const char *name
)
84 struct list_head
*tql
;
87 list_for_each_prev(tql
, &tq_list
) {
88 tq
= list_entry(tql
, taskq_t
, tq_taskqs
);
89 if (strcmp(name
, tq
->tq_name
) == 0)
90 return tq
->tq_instance
;
96 * NOTE: Must be called with tq->tq_lock held, returns a list_t which
97 * is not attached to the free, work, or pending taskq lists.
100 task_alloc(taskq_t
*tq
, uint_t flags
, unsigned long *irqflags
)
107 /* Acquire taskq_ent_t's from free list if available */
108 if (!list_empty(&tq
->tq_free_list
) && !(flags
& TQ_NEW
)) {
109 t
= list_entry(tq
->tq_free_list
.next
, taskq_ent_t
, tqent_list
);
111 ASSERT(!(t
->tqent_flags
& TQENT_FLAG_PREALLOC
));
112 ASSERT(!(t
->tqent_flags
& TQENT_FLAG_CANCEL
));
113 ASSERT(!timer_pending(&t
->tqent_timer
));
115 list_del_init(&t
->tqent_list
);
119 /* Free list is empty and memory allocations are prohibited */
120 if (flags
& TQ_NOALLOC
)
123 /* Hit maximum taskq_ent_t pool size */
124 if (tq
->tq_nalloc
>= tq
->tq_maxalloc
) {
125 if (flags
& TQ_NOSLEEP
)
129 * Sleep periodically polling the free list for an available
130 * taskq_ent_t. Dispatching with TQ_SLEEP should always succeed
131 * but we cannot block forever waiting for an taskq_ent_t to
132 * show up in the free list, otherwise a deadlock can happen.
134 * Therefore, we need to allocate a new task even if the number
135 * of allocated tasks is above tq->tq_maxalloc, but we still
136 * end up delaying the task allocation by one second, thereby
137 * throttling the task dispatch rate.
139 spin_unlock_irqrestore(&tq
->tq_lock
, *irqflags
);
140 schedule_timeout(HZ
/ 100);
141 spin_lock_irqsave_nested(&tq
->tq_lock
, *irqflags
,
149 spin_unlock_irqrestore(&tq
->tq_lock
, *irqflags
);
150 t
= kmem_alloc(sizeof (taskq_ent_t
), task_km_flags(flags
));
151 spin_lock_irqsave_nested(&tq
->tq_lock
, *irqflags
, tq
->tq_lock_class
);
162 * NOTE: Must be called with tq->tq_lock held, expects the taskq_ent_t
163 * to already be removed from the free, work, or pending taskq lists.
166 task_free(taskq_t
*tq
, taskq_ent_t
*t
)
170 ASSERT(list_empty(&t
->tqent_list
));
171 ASSERT(!timer_pending(&t
->tqent_timer
));
173 kmem_free(t
, sizeof (taskq_ent_t
));
178 * NOTE: Must be called with tq->tq_lock held, either destroys the
179 * taskq_ent_t if too many exist or moves it to the free list for later use.
182 task_done(taskq_t
*tq
, taskq_ent_t
*t
)
187 /* Wake tasks blocked in taskq_wait_id() */
188 wake_up_all(&t
->tqent_waitq
);
190 list_del_init(&t
->tqent_list
);
192 if (tq
->tq_nalloc
<= tq
->tq_minalloc
) {
193 t
->tqent_id
= TASKQID_INVALID
;
194 t
->tqent_func
= NULL
;
198 list_add_tail(&t
->tqent_list
, &tq
->tq_free_list
);
205 * When a delayed task timer expires remove it from the delay list and
206 * add it to the priority list in order for immediate processing.
209 task_expire_impl(taskq_ent_t
*t
)
212 taskq_t
*tq
= t
->tqent_taskq
;
216 spin_lock_irqsave_nested(&tq
->tq_lock
, flags
, tq
->tq_lock_class
);
218 if (t
->tqent_flags
& TQENT_FLAG_CANCEL
) {
219 ASSERT(list_empty(&t
->tqent_list
));
220 spin_unlock_irqrestore(&tq
->tq_lock
, flags
);
224 t
->tqent_birth
= jiffies
;
226 * The priority list must be maintained in strict task id order
227 * from lowest to highest for lowest_id to be easily calculable.
229 list_del(&t
->tqent_list
);
230 list_for_each_prev(l
, &tq
->tq_prio_list
) {
231 w
= list_entry(l
, taskq_ent_t
, tqent_list
);
232 if (w
->tqent_id
< t
->tqent_id
) {
233 list_add(&t
->tqent_list
, l
);
237 if (l
== &tq
->tq_prio_list
)
238 list_add(&t
->tqent_list
, &tq
->tq_prio_list
);
240 spin_unlock_irqrestore(&tq
->tq_lock
, flags
);
242 wake_up(&tq
->tq_work_waitq
);
245 #ifdef HAVE_KERNEL_TIMER_FUNCTION_TIMER_LIST
247 task_expire(struct timer_list
*tl
)
249 taskq_ent_t
*t
= from_timer(t
, tl
, tqent_timer
);
254 task_expire(unsigned long data
)
256 task_expire_impl((taskq_ent_t
*)data
);
261 * Returns the lowest incomplete taskqid_t. The taskqid_t may
262 * be queued on the pending list, on the priority list, on the
263 * delay list, or on the work list currently being handled, but
264 * it is not 100% complete yet.
267 taskq_lowest_id(taskq_t
*tq
)
269 taskqid_t lowest_id
= tq
->tq_next_id
;
275 if (!list_empty(&tq
->tq_pend_list
)) {
276 t
= list_entry(tq
->tq_pend_list
.next
, taskq_ent_t
, tqent_list
);
277 lowest_id
= MIN(lowest_id
, t
->tqent_id
);
280 if (!list_empty(&tq
->tq_prio_list
)) {
281 t
= list_entry(tq
->tq_prio_list
.next
, taskq_ent_t
, tqent_list
);
282 lowest_id
= MIN(lowest_id
, t
->tqent_id
);
285 if (!list_empty(&tq
->tq_delay_list
)) {
286 t
= list_entry(tq
->tq_delay_list
.next
, taskq_ent_t
, tqent_list
);
287 lowest_id
= MIN(lowest_id
, t
->tqent_id
);
290 if (!list_empty(&tq
->tq_active_list
)) {
291 tqt
= list_entry(tq
->tq_active_list
.next
, taskq_thread_t
,
293 ASSERT(tqt
->tqt_id
!= TASKQID_INVALID
);
294 lowest_id
= MIN(lowest_id
, tqt
->tqt_id
);
301 * Insert a task into a list keeping the list sorted by increasing taskqid.
304 taskq_insert_in_order(taskq_t
*tq
, taskq_thread_t
*tqt
)
312 list_for_each_prev(l
, &tq
->tq_active_list
) {
313 w
= list_entry(l
, taskq_thread_t
, tqt_active_list
);
314 if (w
->tqt_id
< tqt
->tqt_id
) {
315 list_add(&tqt
->tqt_active_list
, l
);
319 if (l
== &tq
->tq_active_list
)
320 list_add(&tqt
->tqt_active_list
, &tq
->tq_active_list
);
324 * Find and return a task from the given list if it exists. The list
325 * must be in lowest to highest task id order.
328 taskq_find_list(taskq_t
*tq
, struct list_head
*lh
, taskqid_t id
)
333 list_for_each(l
, lh
) {
334 t
= list_entry(l
, taskq_ent_t
, tqent_list
);
336 if (t
->tqent_id
== id
)
339 if (t
->tqent_id
> id
)
347 * Find an already dispatched task given the task id regardless of what
348 * state it is in. If a task is still pending it will be returned.
349 * If a task is executing, then -EBUSY will be returned instead.
350 * If the task has already been run then NULL is returned.
353 taskq_find(taskq_t
*tq
, taskqid_t id
)
359 t
= taskq_find_list(tq
, &tq
->tq_delay_list
, id
);
363 t
= taskq_find_list(tq
, &tq
->tq_prio_list
, id
);
367 t
= taskq_find_list(tq
, &tq
->tq_pend_list
, id
);
371 list_for_each(l
, &tq
->tq_active_list
) {
372 tqt
= list_entry(l
, taskq_thread_t
, tqt_active_list
);
373 if (tqt
->tqt_id
== id
) {
375 * Instead of returning tqt_task, we just return a non
376 * NULL value to prevent misuse, since tqt_task only
377 * has two valid fields.
379 return (ERR_PTR(-EBUSY
));
387 * Theory for the taskq_wait_id(), taskq_wait_outstanding(), and
388 * taskq_wait() functions below.
390 * Taskq waiting is accomplished by tracking the lowest outstanding task
391 * id and the next available task id. As tasks are dispatched they are
392 * added to the tail of the pending, priority, or delay lists. As worker
393 * threads become available the tasks are removed from the heads of these
394 * lists and linked to the worker threads. This ensures the lists are
395 * kept sorted by lowest to highest task id.
397 * Therefore the lowest outstanding task id can be quickly determined by
398 * checking the head item from all of these lists. This value is stored
399 * with the taskq as the lowest id. It only needs to be recalculated when
400 * either the task with the current lowest id completes or is canceled.
402 * By blocking until the lowest task id exceeds the passed task id the
403 * taskq_wait_outstanding() function can be easily implemented. Similarly,
404 * by blocking until the lowest task id matches the next task id taskq_wait()
405 * can be implemented.
407 * Callers should be aware that when there are multiple worked threads it
408 * is possible for larger task ids to complete before smaller ones. Also
409 * when the taskq contains delay tasks with small task ids callers may
410 * block for a considerable length of time waiting for them to expire and
414 taskq_wait_id_check(taskq_t
*tq
, taskqid_t id
)
419 spin_lock_irqsave_nested(&tq
->tq_lock
, flags
, tq
->tq_lock_class
);
420 rc
= (taskq_find(tq
, id
) == NULL
);
421 spin_unlock_irqrestore(&tq
->tq_lock
, flags
);
427 * The taskq_wait_id() function blocks until the passed task id completes.
428 * This does not guarantee that all lower task ids have completed.
431 taskq_wait_id(taskq_t
*tq
, taskqid_t id
)
433 wait_event(tq
->tq_wait_waitq
, taskq_wait_id_check(tq
, id
));
435 EXPORT_SYMBOL(taskq_wait_id
);
438 taskq_wait_outstanding_check(taskq_t
*tq
, taskqid_t id
)
443 spin_lock_irqsave_nested(&tq
->tq_lock
, flags
, tq
->tq_lock_class
);
444 rc
= (id
< tq
->tq_lowest_id
);
445 spin_unlock_irqrestore(&tq
->tq_lock
, flags
);
451 * The taskq_wait_outstanding() function will block until all tasks with a
452 * lower taskqid than the passed 'id' have been completed. Note that all
453 * task id's are assigned monotonically at dispatch time. Zero may be
454 * passed for the id to indicate all tasks dispatch up to this point,
455 * but not after, should be waited for.
458 taskq_wait_outstanding(taskq_t
*tq
, taskqid_t id
)
460 id
= id
? id
: tq
->tq_next_id
- 1;
461 wait_event(tq
->tq_wait_waitq
, taskq_wait_outstanding_check(tq
, id
));
463 EXPORT_SYMBOL(taskq_wait_outstanding
);
466 taskq_wait_check(taskq_t
*tq
)
471 spin_lock_irqsave_nested(&tq
->tq_lock
, flags
, tq
->tq_lock_class
);
472 rc
= (tq
->tq_lowest_id
== tq
->tq_next_id
);
473 spin_unlock_irqrestore(&tq
->tq_lock
, flags
);
479 * The taskq_wait() function will block until the taskq is empty.
480 * This means that if a taskq re-dispatches work to itself taskq_wait()
481 * callers will block indefinitely.
484 taskq_wait(taskq_t
*tq
)
486 wait_event(tq
->tq_wait_waitq
, taskq_wait_check(tq
));
488 EXPORT_SYMBOL(taskq_wait
);
491 taskq_member(taskq_t
*tq
, kthread_t
*t
)
493 return (tq
== (taskq_t
*)tsd_get_by_thread(taskq_tsd
, t
));
495 EXPORT_SYMBOL(taskq_member
);
498 * Cancel an already dispatched task given the task id. Still pending tasks
499 * will be immediately canceled, and if the task is active the function will
500 * block until it completes. Preallocated tasks which are canceled must be
501 * freed by the caller.
504 taskq_cancel_id(taskq_t
*tq
, taskqid_t id
)
512 spin_lock_irqsave_nested(&tq
->tq_lock
, flags
, tq
->tq_lock_class
);
513 t
= taskq_find(tq
, id
);
514 if (t
&& t
!= ERR_PTR(-EBUSY
)) {
515 list_del_init(&t
->tqent_list
);
516 t
->tqent_flags
|= TQENT_FLAG_CANCEL
;
519 * When canceling the lowest outstanding task id we
520 * must recalculate the new lowest outstanding id.
522 if (tq
->tq_lowest_id
== t
->tqent_id
) {
523 tq
->tq_lowest_id
= taskq_lowest_id(tq
);
524 ASSERT3S(tq
->tq_lowest_id
, >, t
->tqent_id
);
528 * The task_expire() function takes the tq->tq_lock so drop
529 * drop the lock before synchronously cancelling the timer.
531 if (timer_pending(&t
->tqent_timer
)) {
532 spin_unlock_irqrestore(&tq
->tq_lock
, flags
);
533 del_timer_sync(&t
->tqent_timer
);
534 spin_lock_irqsave_nested(&tq
->tq_lock
, flags
,
538 if (!(t
->tqent_flags
& TQENT_FLAG_PREALLOC
))
543 spin_unlock_irqrestore(&tq
->tq_lock
, flags
);
545 if (t
== ERR_PTR(-EBUSY
)) {
546 taskq_wait_id(tq
, id
);
552 EXPORT_SYMBOL(taskq_cancel_id
);
554 static int taskq_thread_spawn(taskq_t
*tq
);
557 taskq_dispatch(taskq_t
*tq
, task_func_t func
, void *arg
, uint_t flags
)
560 taskqid_t rc
= TASKQID_INVALID
;
561 unsigned long irqflags
;
566 spin_lock_irqsave_nested(&tq
->tq_lock
, irqflags
, tq
->tq_lock_class
);
568 /* Taskq being destroyed and all tasks drained */
569 if (!(tq
->tq_flags
& TASKQ_ACTIVE
))
572 /* Do not queue the task unless there is idle thread for it */
573 ASSERT(tq
->tq_nactive
<= tq
->tq_nthreads
);
574 if ((flags
& TQ_NOQUEUE
) && (tq
->tq_nactive
== tq
->tq_nthreads
)) {
575 /* Dynamic taskq may be able to spawn another thread */
576 if (!(tq
->tq_flags
& TASKQ_DYNAMIC
) || taskq_thread_spawn(tq
) == 0)
580 if ((t
= task_alloc(tq
, flags
, &irqflags
)) == NULL
)
583 spin_lock(&t
->tqent_lock
);
585 /* Queue to the front of the list to enforce TQ_NOQUEUE semantics */
586 if (flags
& TQ_NOQUEUE
)
587 list_add(&t
->tqent_list
, &tq
->tq_prio_list
);
588 /* Queue to the priority list instead of the pending list */
589 else if (flags
& TQ_FRONT
)
590 list_add_tail(&t
->tqent_list
, &tq
->tq_prio_list
);
592 list_add_tail(&t
->tqent_list
, &tq
->tq_pend_list
);
594 t
->tqent_id
= rc
= tq
->tq_next_id
;
596 t
->tqent_func
= func
;
599 #ifndef HAVE_KERNEL_TIMER_FUNCTION_TIMER_LIST
600 t
->tqent_timer
.data
= 0;
602 t
->tqent_timer
.function
= NULL
;
603 t
->tqent_timer
.expires
= 0;
604 t
->tqent_birth
= jiffies
;
606 ASSERT(!(t
->tqent_flags
& TQENT_FLAG_PREALLOC
));
608 spin_unlock(&t
->tqent_lock
);
610 wake_up(&tq
->tq_work_waitq
);
612 /* Spawn additional taskq threads if required. */
613 if (!(flags
& TQ_NOQUEUE
) && tq
->tq_nactive
== tq
->tq_nthreads
)
614 (void) taskq_thread_spawn(tq
);
616 spin_unlock_irqrestore(&tq
->tq_lock
, irqflags
);
619 EXPORT_SYMBOL(taskq_dispatch
);
622 taskq_dispatch_delay(taskq_t
*tq
, task_func_t func
, void *arg
,
623 uint_t flags
, clock_t expire_time
)
625 taskqid_t rc
= TASKQID_INVALID
;
627 unsigned long irqflags
;
632 spin_lock_irqsave_nested(&tq
->tq_lock
, irqflags
, tq
->tq_lock_class
);
634 /* Taskq being destroyed and all tasks drained */
635 if (!(tq
->tq_flags
& TASKQ_ACTIVE
))
638 if ((t
= task_alloc(tq
, flags
, &irqflags
)) == NULL
)
641 spin_lock(&t
->tqent_lock
);
643 /* Queue to the delay list for subsequent execution */
644 list_add_tail(&t
->tqent_list
, &tq
->tq_delay_list
);
646 t
->tqent_id
= rc
= tq
->tq_next_id
;
648 t
->tqent_func
= func
;
651 #ifndef HAVE_KERNEL_TIMER_FUNCTION_TIMER_LIST
652 t
->tqent_timer
.data
= (unsigned long)t
;
654 t
->tqent_timer
.function
= task_expire
;
655 t
->tqent_timer
.expires
= (unsigned long)expire_time
;
656 add_timer(&t
->tqent_timer
);
658 ASSERT(!(t
->tqent_flags
& TQENT_FLAG_PREALLOC
));
660 spin_unlock(&t
->tqent_lock
);
662 /* Spawn additional taskq threads if required. */
663 if (tq
->tq_nactive
== tq
->tq_nthreads
)
664 (void) taskq_thread_spawn(tq
);
665 spin_unlock_irqrestore(&tq
->tq_lock
, irqflags
);
668 EXPORT_SYMBOL(taskq_dispatch_delay
);
671 taskq_dispatch_ent(taskq_t
*tq
, task_func_t func
, void *arg
, uint_t flags
,
674 unsigned long irqflags
;
678 spin_lock_irqsave_nested(&tq
->tq_lock
, irqflags
,
681 /* Taskq being destroyed and all tasks drained */
682 if (!(tq
->tq_flags
& TASKQ_ACTIVE
)) {
683 t
->tqent_id
= TASKQID_INVALID
;
687 if ((flags
& TQ_NOQUEUE
) && (tq
->tq_nactive
== tq
->tq_nthreads
)) {
688 /* Dynamic taskq may be able to spawn another thread */
689 if (!(tq
->tq_flags
& TASKQ_DYNAMIC
) || taskq_thread_spawn(tq
) == 0)
694 spin_lock(&t
->tqent_lock
);
697 * Make sure the entry is not on some other taskq; it is important to
698 * ASSERT() under lock
700 ASSERT(taskq_empty_ent(t
));
703 * Mark it as a prealloc'd task. This is important
704 * to ensure that we don't free it later.
706 t
->tqent_flags
|= TQENT_FLAG_PREALLOC
;
708 /* Queue to the priority list instead of the pending list */
709 if (flags
& TQ_FRONT
)
710 list_add_tail(&t
->tqent_list
, &tq
->tq_prio_list
);
712 list_add_tail(&t
->tqent_list
, &tq
->tq_pend_list
);
714 t
->tqent_id
= tq
->tq_next_id
;
716 t
->tqent_func
= func
;
719 t
->tqent_birth
= jiffies
;
721 spin_unlock(&t
->tqent_lock
);
723 wake_up(&tq
->tq_work_waitq
);
725 /* Spawn additional taskq threads if required. */
726 if (tq
->tq_nactive
== tq
->tq_nthreads
)
727 (void) taskq_thread_spawn(tq
);
729 spin_unlock_irqrestore(&tq
->tq_lock
, irqflags
);
731 EXPORT_SYMBOL(taskq_dispatch_ent
);
734 taskq_empty_ent(taskq_ent_t
*t
)
736 return (list_empty(&t
->tqent_list
));
738 EXPORT_SYMBOL(taskq_empty_ent
);
741 taskq_init_ent(taskq_ent_t
*t
)
743 spin_lock_init(&t
->tqent_lock
);
744 init_waitqueue_head(&t
->tqent_waitq
);
745 #ifdef HAVE_KERNEL_TIMER_FUNCTION_TIMER_LIST
746 timer_setup(&t
->tqent_timer
, NULL
, 0);
748 init_timer(&t
->tqent_timer
);
750 INIT_LIST_HEAD(&t
->tqent_list
);
752 t
->tqent_func
= NULL
;
755 t
->tqent_taskq
= NULL
;
757 EXPORT_SYMBOL(taskq_init_ent
);
760 * Return the next pending task, preference is given to tasks on the
761 * priority list which were dispatched with TQ_FRONT.
764 taskq_next_ent(taskq_t
*tq
)
766 struct list_head
*list
;
768 if (!list_empty(&tq
->tq_prio_list
))
769 list
= &tq
->tq_prio_list
;
770 else if (!list_empty(&tq
->tq_pend_list
))
771 list
= &tq
->tq_pend_list
;
775 return (list_entry(list
->next
, taskq_ent_t
, tqent_list
));
779 * Spawns a new thread for the specified taskq.
782 taskq_thread_spawn_task(void *arg
)
784 taskq_t
*tq
= (taskq_t
*)arg
;
787 if (taskq_thread_create(tq
) == NULL
) {
788 /* restore spawning count if failed */
789 spin_lock_irqsave_nested(&tq
->tq_lock
, flags
, tq
->tq_lock_class
);
791 spin_unlock_irqrestore(&tq
->tq_lock
, flags
);
796 * Spawn addition threads for dynamic taskqs (TASKQ_DYNAMIC) the current
797 * number of threads is insufficient to handle the pending tasks. These
798 * new threads must be created by the dedicated dynamic_taskq to avoid
799 * deadlocks between thread creation and memory reclaim. The system_taskq
800 * which is also a dynamic taskq cannot be safely used for this.
803 taskq_thread_spawn(taskq_t
*tq
)
807 if (!(tq
->tq_flags
& TASKQ_DYNAMIC
))
810 if ((tq
->tq_nthreads
+ tq
->tq_nspawn
< tq
->tq_maxthreads
) &&
811 (tq
->tq_flags
& TASKQ_ACTIVE
)) {
812 spawning
= (++tq
->tq_nspawn
);
813 taskq_dispatch(dynamic_taskq
, taskq_thread_spawn_task
,
821 * Threads in a dynamic taskq should only exit once it has been completely
822 * drained and no other threads are actively servicing tasks. This prevents
823 * threads from being created and destroyed more than is required.
825 * The first thread is the thread list is treated as the primary thread.
826 * There is nothing special about the primary thread but in order to avoid
827 * all the taskq pids from changing we opt to make it long running.
830 taskq_thread_should_stop(taskq_t
*tq
, taskq_thread_t
*tqt
)
832 if (!(tq
->tq_flags
& TASKQ_DYNAMIC
))
835 if (list_first_entry(&(tq
->tq_thread_list
), taskq_thread_t
,
836 tqt_thread_list
) == tqt
)
840 ((tq
->tq_nspawn
== 0) && /* No threads are being spawned */
841 (tq
->tq_nactive
== 0) && /* No threads are handling tasks */
842 (tq
->tq_nthreads
> 1) && /* More than 1 thread is running */
843 (!taskq_next_ent(tq
)) && /* There are no pending tasks */
844 (spl_taskq_thread_dynamic
)); /* Dynamic taskqs are allowed */
848 taskq_thread(void *args
)
850 DECLARE_WAITQUEUE(wait
, current
);
852 taskq_thread_t
*tqt
= args
;
857 taskq_ent_t dup_task
= {};
862 current
->flags
|= PF_NOFREEZE
;
864 (void) spl_fstrans_mark();
866 sigfillset(&blocked
);
867 sigprocmask(SIG_BLOCK
, &blocked
, NULL
);
868 flush_signals(current
);
870 tsd_set(taskq_tsd
, tq
);
871 spin_lock_irqsave_nested(&tq
->tq_lock
, flags
, tq
->tq_lock_class
);
873 * If we are dynamically spawned, decrease spawning count. Note that
874 * we could be created during taskq_create, in which case we shouldn't
875 * do the decrement. But it's fine because taskq_create will reset
878 if (tq
->tq_flags
& TASKQ_DYNAMIC
)
881 /* Immediately exit if more threads than allowed were created. */
882 if (tq
->tq_nthreads
>= tq
->tq_maxthreads
)
886 list_add_tail(&tqt
->tqt_thread_list
, &tq
->tq_thread_list
);
887 wake_up(&tq
->tq_wait_waitq
);
888 set_current_state(TASK_INTERRUPTIBLE
);
890 while (!kthread_should_stop()) {
892 if (list_empty(&tq
->tq_pend_list
) &&
893 list_empty(&tq
->tq_prio_list
)) {
895 if (taskq_thread_should_stop(tq
, tqt
)) {
896 wake_up_all(&tq
->tq_wait_waitq
);
900 add_wait_queue_exclusive(&tq
->tq_work_waitq
, &wait
);
901 spin_unlock_irqrestore(&tq
->tq_lock
, flags
);
906 spin_lock_irqsave_nested(&tq
->tq_lock
, flags
,
908 remove_wait_queue(&tq
->tq_work_waitq
, &wait
);
910 __set_current_state(TASK_RUNNING
);
913 if ((t
= taskq_next_ent(tq
)) != NULL
) {
914 list_del_init(&t
->tqent_list
);
917 * A TQENT_FLAG_PREALLOC task may be reused or freed
918 * during the task function call. Store tqent_id and
921 * Also use an on stack taskq_ent_t for tqt_task
922 * assignment in this case. We only populate the two
923 * fields used by the only user in taskq proc file.
925 tqt
->tqt_id
= t
->tqent_id
;
926 tqt
->tqt_flags
= t
->tqent_flags
;
928 if (t
->tqent_flags
& TQENT_FLAG_PREALLOC
) {
929 dup_task
.tqent_func
= t
->tqent_func
;
930 dup_task
.tqent_arg
= t
->tqent_arg
;
935 taskq_insert_in_order(tq
, tqt
);
937 spin_unlock_irqrestore(&tq
->tq_lock
, flags
);
939 /* Perform the requested task */
940 t
->tqent_func(t
->tqent_arg
);
942 spin_lock_irqsave_nested(&tq
->tq_lock
, flags
,
945 list_del_init(&tqt
->tqt_active_list
);
946 tqt
->tqt_task
= NULL
;
948 /* For prealloc'd tasks, we don't free anything. */
949 if (!(tqt
->tqt_flags
& TQENT_FLAG_PREALLOC
))
953 * When the current lowest outstanding taskqid is
954 * done calculate the new lowest outstanding id
956 if (tq
->tq_lowest_id
== tqt
->tqt_id
) {
957 tq
->tq_lowest_id
= taskq_lowest_id(tq
);
958 ASSERT3S(tq
->tq_lowest_id
, >, tqt
->tqt_id
);
961 /* Spawn additional taskq threads if required. */
962 if ((++seq_tasks
) > spl_taskq_thread_sequential
&&
963 taskq_thread_spawn(tq
))
966 tqt
->tqt_id
= TASKQID_INVALID
;
968 wake_up_all(&tq
->tq_wait_waitq
);
970 if (taskq_thread_should_stop(tq
, tqt
))
974 set_current_state(TASK_INTERRUPTIBLE
);
978 __set_current_state(TASK_RUNNING
);
980 list_del_init(&tqt
->tqt_thread_list
);
982 kmem_free(tqt
, sizeof (taskq_thread_t
));
983 spin_unlock_irqrestore(&tq
->tq_lock
, flags
);
985 tsd_set(taskq_tsd
, NULL
);
990 static taskq_thread_t
*
991 taskq_thread_create(taskq_t
*tq
)
993 static int last_used_cpu
= 0;
996 tqt
= kmem_alloc(sizeof (*tqt
), KM_PUSHPAGE
);
997 INIT_LIST_HEAD(&tqt
->tqt_thread_list
);
998 INIT_LIST_HEAD(&tqt
->tqt_active_list
);
1000 tqt
->tqt_id
= TASKQID_INVALID
;
1002 tqt
->tqt_thread
= spl_kthread_create(taskq_thread
, tqt
,
1004 if (tqt
->tqt_thread
== NULL
) {
1005 kmem_free(tqt
, sizeof (taskq_thread_t
));
1009 if (spl_taskq_thread_bind
) {
1010 last_used_cpu
= (last_used_cpu
+ 1) % num_online_cpus();
1011 kthread_bind(tqt
->tqt_thread
, last_used_cpu
);
1014 if (spl_taskq_thread_priority
)
1015 set_user_nice(tqt
->tqt_thread
, PRIO_TO_NICE(tq
->tq_pri
));
1017 wake_up_process(tqt
->tqt_thread
);
1023 taskq_create(const char *name
, int nthreads
, pri_t pri
,
1024 int minalloc
, int maxalloc
, uint_t flags
)
1027 taskq_thread_t
*tqt
;
1028 int count
= 0, rc
= 0, i
;
1029 unsigned long irqflags
;
1031 ASSERT(name
!= NULL
);
1032 ASSERT(minalloc
>= 0);
1033 ASSERT(maxalloc
<= INT_MAX
);
1034 ASSERT(!(flags
& (TASKQ_CPR_SAFE
))); /* Unsupported */
1036 /* Scale the number of threads using nthreads as a percentage */
1037 if (flags
& TASKQ_THREADS_CPU_PCT
) {
1038 ASSERT(nthreads
<= 100);
1039 ASSERT(nthreads
>= 0);
1040 nthreads
= MIN(nthreads
, 100);
1041 nthreads
= MAX(nthreads
, 0);
1042 nthreads
= MAX((num_online_cpus() * nthreads
) / 100, 1);
1045 tq
= kmem_alloc(sizeof (*tq
), KM_PUSHPAGE
);
1049 spin_lock_init(&tq
->tq_lock
);
1050 INIT_LIST_HEAD(&tq
->tq_thread_list
);
1051 INIT_LIST_HEAD(&tq
->tq_active_list
);
1052 tq
->tq_name
= strdup(name
);
1054 tq
->tq_nthreads
= 0;
1056 tq
->tq_maxthreads
= nthreads
;
1058 tq
->tq_minalloc
= minalloc
;
1059 tq
->tq_maxalloc
= maxalloc
;
1061 tq
->tq_flags
= (flags
| TASKQ_ACTIVE
);
1062 tq
->tq_next_id
= TASKQID_INITIAL
;
1063 tq
->tq_lowest_id
= TASKQID_INITIAL
;
1064 INIT_LIST_HEAD(&tq
->tq_free_list
);
1065 INIT_LIST_HEAD(&tq
->tq_pend_list
);
1066 INIT_LIST_HEAD(&tq
->tq_prio_list
);
1067 INIT_LIST_HEAD(&tq
->tq_delay_list
);
1068 init_waitqueue_head(&tq
->tq_work_waitq
);
1069 init_waitqueue_head(&tq
->tq_wait_waitq
);
1070 tq
->tq_lock_class
= TQ_LOCK_GENERAL
;
1071 INIT_LIST_HEAD(&tq
->tq_taskqs
);
1073 if (flags
& TASKQ_PREPOPULATE
) {
1074 spin_lock_irqsave_nested(&tq
->tq_lock
, irqflags
,
1077 for (i
= 0; i
< minalloc
; i
++)
1078 task_done(tq
, task_alloc(tq
, TQ_PUSHPAGE
| TQ_NEW
,
1081 spin_unlock_irqrestore(&tq
->tq_lock
, irqflags
);
1084 if ((flags
& TASKQ_DYNAMIC
) && spl_taskq_thread_dynamic
)
1087 for (i
= 0; i
< nthreads
; i
++) {
1088 tqt
= taskq_thread_create(tq
);
1095 /* Wait for all threads to be started before potential destroy */
1096 wait_event(tq
->tq_wait_waitq
, tq
->tq_nthreads
== count
);
1098 * taskq_thread might have touched nspawn, but we don't want them to
1099 * because they're not dynamically spawned. So we reset it to 0
1107 down_write(&tq_list_sem
);
1108 tq
->tq_instance
= taskq_find_by_name(name
) + 1;
1109 list_add_tail(&tq
->tq_taskqs
, &tq_list
);
1110 up_write(&tq_list_sem
);
1115 EXPORT_SYMBOL(taskq_create
);
1118 taskq_destroy(taskq_t
*tq
)
1120 struct task_struct
*thread
;
1121 taskq_thread_t
*tqt
;
1123 unsigned long flags
;
1126 spin_lock_irqsave_nested(&tq
->tq_lock
, flags
, tq
->tq_lock_class
);
1127 tq
->tq_flags
&= ~TASKQ_ACTIVE
;
1128 spin_unlock_irqrestore(&tq
->tq_lock
, flags
);
1131 * When TASKQ_ACTIVE is clear new tasks may not be added nor may
1132 * new worker threads be spawned for dynamic taskq.
1134 if (dynamic_taskq
!= NULL
)
1135 taskq_wait_outstanding(dynamic_taskq
, 0);
1139 /* remove taskq from global list used by the kstats */
1140 down_write(&tq_list_sem
);
1141 list_del(&tq
->tq_taskqs
);
1142 up_write(&tq_list_sem
);
1144 spin_lock_irqsave_nested(&tq
->tq_lock
, flags
, tq
->tq_lock_class
);
1145 /* wait for spawning threads to insert themselves to the list */
1146 while (tq
->tq_nspawn
) {
1147 spin_unlock_irqrestore(&tq
->tq_lock
, flags
);
1148 schedule_timeout_interruptible(1);
1149 spin_lock_irqsave_nested(&tq
->tq_lock
, flags
, tq
->tq_lock_class
);
1153 * Signal each thread to exit and block until it does. Each thread
1154 * is responsible for removing itself from the list and freeing its
1155 * taskq_thread_t. This allows for idle threads to opt to remove
1156 * themselves from the taskq. They can be recreated as needed.
1158 while (!list_empty(&tq
->tq_thread_list
)) {
1159 tqt
= list_entry(tq
->tq_thread_list
.next
,
1160 taskq_thread_t
, tqt_thread_list
);
1161 thread
= tqt
->tqt_thread
;
1162 spin_unlock_irqrestore(&tq
->tq_lock
, flags
);
1164 kthread_stop(thread
);
1166 spin_lock_irqsave_nested(&tq
->tq_lock
, flags
,
1170 while (!list_empty(&tq
->tq_free_list
)) {
1171 t
= list_entry(tq
->tq_free_list
.next
, taskq_ent_t
, tqent_list
);
1173 ASSERT(!(t
->tqent_flags
& TQENT_FLAG_PREALLOC
));
1175 list_del_init(&t
->tqent_list
);
1179 ASSERT0(tq
->tq_nthreads
);
1180 ASSERT0(tq
->tq_nalloc
);
1181 ASSERT0(tq
->tq_nspawn
);
1182 ASSERT(list_empty(&tq
->tq_thread_list
));
1183 ASSERT(list_empty(&tq
->tq_active_list
));
1184 ASSERT(list_empty(&tq
->tq_free_list
));
1185 ASSERT(list_empty(&tq
->tq_pend_list
));
1186 ASSERT(list_empty(&tq
->tq_prio_list
));
1187 ASSERT(list_empty(&tq
->tq_delay_list
));
1189 spin_unlock_irqrestore(&tq
->tq_lock
, flags
);
1191 strfree(tq
->tq_name
);
1192 kmem_free(tq
, sizeof (taskq_t
));
1194 EXPORT_SYMBOL(taskq_destroy
);
1197 static unsigned int spl_taskq_kick
= 0;
1201 * module_param_cb is introduced to take kernel_param_ops and
1202 * module_param_call is marked as obsolete. Also set and get operations
1203 * were changed to take a 'const struct kernel_param *'.
1206 #ifdef module_param_cb
1207 param_set_taskq_kick(const char *val
, const struct kernel_param
*kp
)
1209 param_set_taskq_kick(const char *val
, struct kernel_param
*kp
)
1215 unsigned long flags
;
1217 ret
= param_set_uint(val
, kp
);
1218 if (ret
< 0 || !spl_taskq_kick
)
1223 down_read(&tq_list_sem
);
1224 list_for_each_entry(tq
, &tq_list
, tq_taskqs
) {
1225 spin_lock_irqsave_nested(&tq
->tq_lock
, flags
,
1227 /* Check if the first pending is older than 5 seconds */
1228 t
= taskq_next_ent(tq
);
1229 if (t
&& time_after(jiffies
, t
->tqent_birth
+ 5*HZ
)) {
1230 (void) taskq_thread_spawn(tq
);
1231 printk(KERN_INFO
"spl: Kicked taskq %s/%d\n",
1232 tq
->tq_name
, tq
->tq_instance
);
1234 spin_unlock_irqrestore(&tq
->tq_lock
, flags
);
1236 up_read(&tq_list_sem
);
1240 #ifdef module_param_cb
1241 static const struct kernel_param_ops param_ops_taskq_kick
= {
1242 .set
= param_set_taskq_kick
,
1243 .get
= param_get_uint
,
1245 module_param_cb(spl_taskq_kick
, ¶m_ops_taskq_kick
, &spl_taskq_kick
, 0644);
1247 module_param_call(spl_taskq_kick
, param_set_taskq_kick
, param_get_uint
,
1248 &spl_taskq_kick
, 0644);
1250 MODULE_PARM_DESC(spl_taskq_kick
,
1251 "Write nonzero to kick stuck taskqs to spawn more threads");
1254 spl_taskq_init(void)
1256 tsd_create(&taskq_tsd
, NULL
);
1258 system_taskq
= taskq_create("spl_system_taskq", MAX(boot_ncpus
, 64),
1259 maxclsyspri
, boot_ncpus
, INT_MAX
, TASKQ_PREPOPULATE
|TASKQ_DYNAMIC
);
1260 if (system_taskq
== NULL
)
1263 system_delay_taskq
= taskq_create("spl_delay_taskq", MAX(boot_ncpus
, 4),
1264 maxclsyspri
, boot_ncpus
, INT_MAX
, TASKQ_PREPOPULATE
|TASKQ_DYNAMIC
);
1265 if (system_delay_taskq
== NULL
) {
1266 taskq_destroy(system_taskq
);
1270 dynamic_taskq
= taskq_create("spl_dynamic_taskq", 1,
1271 maxclsyspri
, boot_ncpus
, INT_MAX
, TASKQ_PREPOPULATE
);
1272 if (dynamic_taskq
== NULL
) {
1273 taskq_destroy(system_taskq
);
1274 taskq_destroy(system_delay_taskq
);
1279 * This is used to annotate tq_lock, so
1280 * taskq_dispatch -> taskq_thread_spawn -> taskq_dispatch
1281 * does not trigger a lockdep warning re: possible recursive locking
1283 dynamic_taskq
->tq_lock_class
= TQ_LOCK_DYNAMIC
;
1289 spl_taskq_fini(void)
1291 taskq_destroy(dynamic_taskq
);
1292 dynamic_taskq
= NULL
;
1294 taskq_destroy(system_delay_taskq
);
1295 system_delay_taskq
= NULL
;
1297 taskq_destroy(system_taskq
);
1298 system_taskq
= NULL
;
1300 tsd_destroy(&taskq_tsd
);