1 /* $NetBSD: taskq.c,v 1.1 2009/03/26 22:11:45 ad Exp $ */
6 * The contents of this file are subject to the terms of the
7 * Common Development and Distribution License, Version 1.0 only
8 * (the "License"). You may not use this file except in compliance
11 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
12 * or http://www.opensolaris.org/os/licensing.
13 * See the License for the specific language governing permissions
14 * and limitations under the License.
16 * When distributing Covered Code, include this CDDL HEADER in each
17 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
18 * If applicable, add the following below this CDDL HEADER, with the
19 * fields enclosed by brackets "[]" replaced with your own identifying
20 * information: Portions Copyright [yyyy] [name of copyright owner]
25 * Copyright 2005 Sun Microsystems, Inc. All rights reserved.
26 * Use is subject to license terms.
29 #pragma ident "%Z%%M% %I% %E% SMI"
32 * Kernel task queues: general-purpose asynchronous task scheduling.
34 * A common problem in kernel programming is the need to schedule tasks
35 * to be performed later, by another thread. There are several reasons
36 * you may want or need to do this:
38 * (1) The task isn't time-critical, but your current code path is.
40 * (2) The task may require grabbing locks that you already hold.
42 * (3) The task may need to block (e.g. to wait for memory), but you
43 * cannot block in your current context.
45 * (4) Your code path can't complete because of some condition, but you can't
46 * sleep or fail, so you queue the task for later execution when condition
49 * (5) You just want a simple way to launch multiple tasks in parallel.
51 * Task queues provide such a facility. In its simplest form (used when
52 * performance is not a critical consideration) a task queue consists of a
53 * single list of tasks, together with one or more threads to service the
54 * list. There are some cases when this simple queue is not sufficient:
56 * (1) The task queues are very hot and there is a need to avoid data and lock
57 * contention over global resources.
59 * (2) Some tasks may depend on other tasks to complete, so they can't be put in
60 * the same list managed by the same thread.
62 * (3) Some tasks may block for a long time, and this should not block other
65 * To provide useful service in such cases we define a "dynamic task queue"
66 * which has an individual thread for each of the tasks. These threads are
67 * dynamically created as they are needed and destroyed when they are not in
68 * use. The API for managing task pools is the same as for managing task queues
69 * with the exception of a taskq creation flag TASKQ_DYNAMIC which tells that
70 * dynamic task pool behavior is desired.
72 * Dynamic task queues may also place tasks in the normal queue (called "backing
73 * queue") when task pool runs out of resources. Users of task queues may
74 * disallow such queued scheduling by specifying TQ_NOQUEUE in the dispatch
77 * The backing task queue is also used for scheduling internal tasks needed for
78 * dynamic task queue maintenance.
82 * taskq_t *taskq_create(name, nthreads, pri_t pri, minalloc, maxall, flags);
84 * Create a taskq with specified properties.
87 * TASKQ_DYNAMIC: Create task pool for task management. If this flag is
88 * specified, 'nthreads' specifies the maximum number of threads in
89 * the task queue. Task execution order for dynamic task queues is
92 * If this flag is not specified (default case) a
93 * single-list task queue is created with 'nthreads' threads
94 * servicing it. Entries in this queue are managed by
95 * taskq_ent_alloc() and taskq_ent_free() which try to keep the
96 * task population between 'minalloc' and 'maxalloc', but the
97 * latter limit is only advisory for TQ_SLEEP dispatches and the
98 * former limit is only advisory for TQ_NOALLOC dispatches. If
99 * TASKQ_PREPOPULATE is set in 'flags', the taskq will be
100 * prepopulated with 'minalloc' task structures.
102 * Since non-DYNAMIC taskqs are queues, tasks are guaranteed to be
103 * executed in the order they are scheduled if nthreads == 1.
104 * If nthreads > 1, task execution order is not predictable.
106 * TASKQ_PREPOPULATE: Prepopulate task queue with threads.
107 * Also prepopulate the task queue with 'minalloc' task structures.
109 * TASKQ_CPR_SAFE: This flag specifies that users of the task queue will
110 * use their own protocol for handling CPR issues. This flag is not
111 * supported for DYNAMIC task queues.
113 * The 'pri' field specifies the default priority for the threads that
114 * service all scheduled tasks.
116 * void taskq_destroy(tap):
118 * Waits for any scheduled tasks to complete, then destroys the taskq.
119 * Caller should guarantee that no new tasks are scheduled in the closing
122 * taskqid_t taskq_dispatch(tq, func, arg, flags):
124 * Dispatches the task "func(arg)" to taskq. The 'flags' indicates whether
125 * the caller is willing to block for memory. The function returns an
126 * opaque value which is zero iff dispatch fails. If flags is TQ_NOSLEEP
127 * or TQ_NOALLOC and the task can't be dispatched, taskq_dispatch() fails
128 * and returns (taskqid_t)0.
130 * ASSUMES: func != NULL.
133 * TQ_NOSLEEP: Do not wait for resources; may fail.
135 * TQ_NOALLOC: Do not allocate memory; may fail. May only be used with
136 * non-dynamic task queues.
138 * TQ_NOQUEUE: Do not enqueue a task if it can't dispatch it due to
139 * lack of available resources and fail. If this flag is not
140 * set, and the task pool is exhausted, the task may be scheduled
141 * in the backing queue. This flag may ONLY be used with dynamic
144 * NOTE: This flag should always be used when a task queue is used
145 * for tasks that may depend on each other for completion.
146 * Enqueueing dependent tasks may create deadlocks.
148 * TQ_SLEEP: May block waiting for resources. May still fail for
149 * dynamic task queues if TQ_NOQUEUE is also specified, otherwise
152 * NOTE: Dynamic task queues are much more likely to fail in
153 * taskq_dispatch() (especially if TQ_NOQUEUE was specified), so it
154 * is important to have backup strategies handling such failures.
156 * void taskq_wait(tq):
158 * Waits for all previously scheduled tasks to complete.
160 * NOTE: It does not stop any new task dispatches.
161 * Do NOT call taskq_wait() from a task: it will cause deadlock.
163 * void taskq_suspend(tq)
165 * Suspend all task execution. Tasks already scheduled for a dynamic task
166 * queue will still be executed, but all new scheduled tasks will be
167 * suspended until taskq_resume() is called.
169 * int taskq_suspended(tq)
171 * Returns 1 if taskq is suspended and 0 otherwise. It is intended to
172 * ASSERT that the task queue is suspended.
174 * void taskq_resume(tq)
176 * Resume task queue execution.
178 * int taskq_member(tq, thread)
180 * Returns 1 if 'thread' belongs to taskq 'tq' and 0 otherwise. The
181 * intended use is to ASSERT that a given function is called in taskq
186 * Global system-wide dynamic task queue for common uses. It may be used by
187 * any subsystem that needs to schedule tasks and does not need to manage
188 * its own task queues. It is initialized quite early during system boot.
192 * This is schematic representation of the task queue structures.
196 * |tq_lock | +---< taskq_ent_free()
198 * |... | | tqent: tqent:
199 * +-------------+ | +------------+ +------------+
200 * | tq_freelist |-->| tqent_next |--> ... ->| tqent_next |
201 * +-------------+ +------------+ +------------+
202 * |... | | ... | | ... |
203 * +-------------+ +------------+ +------------+
205 * | | +-------------->taskq_ent_alloc()
206 * +--------------------------------------------------------------------------+
207 * | | | tqent tqent |
208 * | +---------------------+ +--> +------------+ +--> +------------+ |
209 * | | ... | | | func, arg | | | func, arg | |
210 * +>+---------------------+ <---|-+ +------------+ <---|-+ +------------+ |
211 * | tq_taskq.tqent_next | ----+ | | tqent_next | --->+ | | tqent_next |--+
212 * +---------------------+ | +------------+ ^ | +------------+
213 * +-| tq_task.tqent_prev | +--| tqent_prev | | +--| tqent_prev | ^
214 * | +---------------------+ +------------+ | +------------+ |
215 * | |... | | ... | | | ... | |
216 * | +---------------------+ +------------+ | +------------+ |
219 * +--------------------------------------+--------------+ TQ_APPEND() -+
221 * |... | taskq_thread()-----+
223 * | tq_buckets |--+-------> [ NULL ] (for regular task queues)
225 * | DYNAMIC TASK QUEUES:
227 * +-> taskq_bucket[nCPU] taskq_bucket_dispatch()
228 * +-------------------+ ^
229 * +--->| tqbucket_lock | |
230 * | +-------------------+ +--------+ +--------+
231 * | | tqbucket_freelist |-->| tqent |-->...| tqent | ^
232 * | +-------------------+<--+--------+<--...+--------+ |
233 * | | ... | | thread | | thread | |
234 * | +-------------------+ +--------+ +--------+ |
235 * | +-------------------+ |
236 * taskq_dispatch()--+--->| tqbucket_lock | TQ_APPEND()------+
237 * TQ_HASH() | +-------------------+ +--------+ +--------+
238 * | | tqbucket_freelist |-->| tqent |-->...| tqent |
239 * | +-------------------+<--+--------+<--...+--------+
240 * | | ... | | thread | | thread |
241 * | +-------------------+ +--------+ +--------+
245 * Task queues use tq_task field to link new entry in the queue. The queue is a
246 * circular doubly-linked list. Entries are put in the end of the list with
247 * TQ_APPEND() and processed from the front of the list by taskq_thread() in
248 * FIFO order. Task queue entries are cached in the free list managed by
249 * taskq_ent_alloc() and taskq_ent_free() functions.
251 * All threads used by task queues mark t_taskq field of the thread to
252 * point to the task queue.
254 * Dynamic Task Queues Implementation.
256 * For a dynamic task queues there is a 1-to-1 mapping between a thread and
257 * taskq_ent_structure. Each entry is serviced by its own thread and each thread
258 * is controlled by a single entry.
260 * Entries are distributed over a set of buckets. To avoid using modulo
261 * arithmetics the number of buckets is 2^n and is determined as the nearest
262 * power of two roundown of the number of CPUs in the system. Tunable
263 * variable 'taskq_maxbuckets' limits the maximum number of buckets. Each entry
264 * is attached to a bucket for its lifetime and can't migrate to other buckets.
266 * Entries that have scheduled tasks are not placed in any list. The dispatch
267 * function sets their "func" and "arg" fields and signals the corresponding
268 * thread to execute the task. Once the thread executes the task it clears the
269 * "func" field and places an entry on the bucket cache of free entries pointed
270 * by "tqbucket_freelist" field. ALL entries on the free list should have "func"
271 * field equal to NULL. The free list is a circular doubly-linked list identical
272 * in structure to the tq_task list above, but entries are taken from it in LIFO
273 * order - the last freed entry is the first to be allocated. The
274 * taskq_bucket_dispatch() function gets the most recently used entry from the
275 * free list, sets its "func" and "arg" fields and signals a worker thread.
277 * After executing each task a per-entry thread taskq_d_thread() places its
278 * entry on the bucket free list and goes to a timed sleep. If it wakes up
279 * without getting new task it removes the entry from the free list and destroys
280 * itself. The thread sleep time is controlled by a tunable variable
281 * `taskq_thread_timeout'.
283 * There is various statistics kept in the bucket which allows for later
284 * analysis of taskq usage patterns. Also, a global copy of taskq creation and
285 * death statistics is kept in the global taskq data structure. Since thread
286 * creation and death happen rarely, updating such global data does not present
287 * a performance problem.
289 * NOTE: Threads are not bound to any CPU and there is absolutely no association
290 * between the bucket and actual thread CPU, so buckets are used only to
291 * split resources and reduce resource contention. Having threads attached
292 * to the CPU denoted by a bucket may reduce number of times the job
293 * switches between CPUs.
295 * Current algorithm creates a thread whenever a bucket has no free
296 * entries. It would be nice to know how many threads are in the running
297 * state and don't create threads if all CPUs are busy with existing
298 * tasks, but it is unclear how such strategy can be implemented.
300 * Currently buckets are created statically as an array attached to task
301 * queue. On some system with nCPUs < max_ncpus it may waste system
302 * memory. One solution may be allocation of buckets when they are first
303 * touched, but it is not clear how useful it is.
305 * SUSPEND/RESUME implementation.
307 * Before executing a task taskq_thread() (executing non-dynamic task
308 * queues) obtains taskq's thread lock as a reader. The taskq_suspend()
309 * function gets the same lock as a writer blocking all non-dynamic task
310 * execution. The taskq_resume() function releases the lock allowing
311 * taskq_thread to continue execution.
313 * For dynamic task queues, each bucket is marked as TQBUCKET_SUSPEND by
314 * taskq_suspend() function. After that taskq_bucket_dispatch() always
315 * fails, so that taskq_dispatch() will either enqueue tasks for a
316 * suspended backing queue or fail if TQ_NOQUEUE is specified in dispatch
319 * NOTE: taskq_suspend() does not immediately block any tasks already
320 * scheduled for dynamic task queues. It only suspends new tasks
321 * scheduled after taskq_suspend() was called.
323 * taskq_member() function works by comparing a thread t_taskq pointer with
324 * the passed thread pointer.
326 * LOCKS and LOCK Hierarchy:
328 * There are two locks used in task queues.
330 * 1) Task queue structure has a lock, protecting global task queue state.
332 * 2) Each per-CPU bucket has a lock for bucket management.
334 * If both locks are needed, task queue lock should be taken only after bucket
339 * For DEBUG kernels it is possible to induce random failures to
340 * taskq_dispatch() function when it is given TQ_NOSLEEP argument. The value of
341 * taskq_dmtbf and taskq_smtbf tunables control the mean time between induced
342 * failures for dynamic and static task queues respectively.
344 * Setting TASKQ_STATISTIC to 0 will disable per-bucket statistics.
348 * system_taskq_size - Size of the global system_taskq.
349 * This value is multiplied by nCPUs to determine
353 * taskq_thread_timeout - Maximum idle time for taskq_d_thread()
354 * Default value: 5 minutes
356 * taskq_maxbuckets - Maximum number of buckets in any task queue
359 * taskq_search_depth - Maximum # of buckets searched for a free entry
362 * taskq_dmtbf - Mean time between induced dispatch failures
363 * for dynamic task queues.
364 * Default value: UINT_MAX (no induced failures)
366 * taskq_smtbf - Mean time between induced dispatch failures
367 * for static task queues.
368 * Default value: UINT_MAX (no induced failures)
370 * CONDITIONAL compilation.
372 * TASKQ_STATISTIC - If set will enable bucket statistic (default).
376 #include <sys/kthread.h>
377 #include <sys/taskq_impl.h>
378 #include <sys/proc.h>
379 #include <sys/kmem.h>
380 #include <sys/callb.h>
381 #include <sys/systm.h>
382 #include <sys/cmn_err.h>
383 #include <sys/debug.h>
384 #include <sys/sysmacros.h>
386 #include <sys/mutex.h>
387 #include <sys/kernel.h>
388 #include <sys/limits.h>
390 static kmem_cache_t
*taskq_ent_cache
, *taskq_cache
;
392 /* Global system task queue for common use */
393 taskq_t
*system_taskq
;
396 * Maxmimum number of entries in global system taskq is
397 * system_taskq_size * max_ncpus
399 #define SYSTEM_TASKQ_SIZE 1
400 int system_taskq_size
= SYSTEM_TASKQ_SIZE
;
403 * Dynamic task queue threads that don't get any work within
404 * taskq_thread_timeout destroy themselves
406 #define TASKQ_THREAD_TIMEOUT (60 * 5)
407 int taskq_thread_timeout
= TASKQ_THREAD_TIMEOUT
;
409 #define TASKQ_MAXBUCKETS 128
410 int taskq_maxbuckets
= TASKQ_MAXBUCKETS
;
413 * When a bucket has no available entries another buckets are tried.
414 * taskq_search_depth parameter limits the amount of buckets that we search
415 * before failing. This is mostly useful in systems with many CPUs where we may
416 * spend too much time scanning busy buckets.
418 #define TASKQ_SEARCH_DEPTH 4
419 int taskq_search_depth
= TASKQ_SEARCH_DEPTH
;
422 * Hashing function: mix various bits of x. May be pretty much anything.
424 #define TQ_HASH(x) ((x) ^ ((x) >> 11) ^ ((x) >> 17) ^ ((x) ^ 27))
427 * We do not create any new threads when the system is low on memory and start
428 * throttling memory allocations. The following macro tries to estimate such
431 #define ENOUGH_MEMORY() (freemem > throttlefree)
436 static taskq_t
*taskq_create_common(const char *, int, int, pri_t
, int,
438 static void taskq_thread(void *);
439 static int taskq_constructor(void *, void *, int);
440 static void taskq_destructor(void *, void *);
441 static int taskq_ent_constructor(void *, void *, int);
442 static void taskq_ent_destructor(void *, void *);
443 static taskq_ent_t
*taskq_ent_alloc(taskq_t
*, int);
444 static void taskq_ent_free(taskq_t
*, taskq_ent_t
*);
447 * Collect per-bucket statistic when TASKQ_STATISTIC is defined.
449 #define TASKQ_STATISTIC 1
452 #define TQ_STAT(b, x) b->tqbucket_stat.x++
454 #define TQ_STAT(b, x)
458 * Random fault injection.
461 uint_t taskq_dmtbf
= UINT_MAX
; /* mean time between injected failures */
462 uint_t taskq_smtbf
= UINT_MAX
; /* mean time between injected failures */
465 * TQ_NOSLEEP dispatches on dynamic task queues are always allowed to fail.
467 * TQ_NOSLEEP dispatches on static task queues can't arbitrarily fail because
468 * they could prepopulate the cache and make sure that they do not use more
469 * then minalloc entries. So, fault injection in this case insures that
470 * either TASKQ_PREPOPULATE is not set or there are more entries allocated
471 * than is specified by minalloc. TQ_NOALLOC dispatches are always allowed
472 * to fail, but for simplicity we treat them identically to TQ_NOSLEEP
476 #define TASKQ_D_RANDOM_DISPATCH_FAILURE(tq, flag) \
477 taskq_random = (taskq_random * 2416 + 374441) % 1771875;\
478 if ((flag & TQ_NOSLEEP) && \
479 taskq_random < 1771875 / taskq_dmtbf) { \
483 #define TASKQ_S_RANDOM_DISPATCH_FAILURE(tq, flag) \
484 taskq_random = (taskq_random * 2416 + 374441) % 1771875;\
485 if ((flag & (TQ_NOSLEEP | TQ_NOALLOC)) && \
486 (!(tq->tq_flags & TASKQ_PREPOPULATE) || \
487 (tq->tq_nalloc > tq->tq_minalloc)) && \
488 (taskq_random < (1771875 / taskq_smtbf))) { \
489 mutex_exit(&tq->tq_lock); \
490 return ((taskqid_t)0); \
493 #define TASKQ_S_RANDOM_DISPATCH_FAILURE(tq, flag)
494 #define TASKQ_D_RANDOM_DISPATCH_FAILURE(tq, flag)
497 #define IS_EMPTY(l) (((l).tqent_prev == (l).tqent_next) && \
498 ((l).tqent_prev == &(l)))
501 * Append `tqe' in the end of the doubly-linked list denoted by l.
503 #define TQ_APPEND(l, tqe) { \
504 tqe->tqent_next = &l; \
505 tqe->tqent_prev = l.tqent_prev; \
506 tqe->tqent_next->tqent_prev = tqe; \
507 tqe->tqent_prev->tqent_next = tqe; \
511 * Schedule a task specified by func and arg into the task queue entry tqe.
513 #define TQ_ENQUEUE(tq, tqe, func, arg) { \
514 ASSERT(MUTEX_HELD(&tq->tq_lock)); \
515 TQ_APPEND(tq->tq_task, tqe); \
516 tqe->tqent_func = (func); \
517 tqe->tqent_arg = (arg); \
519 if (tq->tq_tasks - tq->tq_executed > tq->tq_maxtasks) \
520 tq->tq_maxtasks = tq->tq_tasks - tq->tq_executed; \
521 cv_signal(&tq->tq_dispatch_cv); \
522 DTRACE_PROBE2(taskq__enqueue, taskq_t *, tq, taskq_ent_t *, tqe); \
526 * Do-nothing task which may be used to prepopulate thread caches.
530 nulltask(void *unused
)
537 taskq_constructor(void *arg
, void *obj
, int kmflags
)
541 memset(tq
, 0, sizeof (taskq_t
));
543 mutex_init(&tq
->tq_lock
, NULL
, MUTEX_DEFAULT
, NULL
);
544 rw_init(&tq
->tq_threadlock
, NULL
, RW_DEFAULT
, NULL
);
545 cv_init(&tq
->tq_dispatch_cv
, NULL
, CV_DEFAULT
, NULL
);
546 cv_init(&tq
->tq_wait_cv
, NULL
, CV_DEFAULT
, NULL
);
548 tq
->tq_task
.tqent_next
= &tq
->tq_task
;
549 tq
->tq_task
.tqent_prev
= &tq
->tq_task
;
556 taskq_destructor(void *arg
, void *obj
)
560 mutex_destroy(&tq
->tq_lock
);
561 rw_destroy(&tq
->tq_threadlock
);
562 cv_destroy(&tq
->tq_dispatch_cv
);
563 cv_destroy(&tq
->tq_wait_cv
);
568 taskq_ent_constructor(void *arg
, void *obj
, int kmflags
)
570 taskq_ent_t
*tqe
= obj
;
572 tqe
->tqent_thread
= NULL
;
573 cv_init(&tqe
->tqent_cv
, NULL
, CV_DEFAULT
, NULL
);
580 taskq_ent_destructor(void *arg
, void *obj
)
582 taskq_ent_t
*tqe
= obj
;
584 ASSERT(tqe
->tqent_thread
== NULL
);
585 cv_destroy(&tqe
->tqent_cv
);
589 * Create global system dynamic task queue.
592 system_taskq_init(void)
594 system_taskq
= taskq_create_common("system_taskq", 0,
595 system_taskq_size
* max_ncpus
, minclsyspri
, 4, 512,
600 system_taskq_fini(void)
602 taskq_destroy(system_taskq
);
608 taskq_ent_cache
= kmem_cache_create("taskq_ent_cache",
609 sizeof (taskq_ent_t
), 0, taskq_ent_constructor
,
610 taskq_ent_destructor
, NULL
, NULL
, NULL
, 0);
611 taskq_cache
= kmem_cache_create("taskq_cache", sizeof (taskq_t
),
612 0, taskq_constructor
, taskq_destructor
, NULL
, NULL
, NULL
, 0);
620 kmem_cache_destroy(taskq_cache
);
621 kmem_cache_destroy(taskq_ent_cache
);
627 * Allocates a new taskq_ent_t structure either from the free list or from the
628 * cache. Returns NULL if it can't be allocated.
630 * Assumes: tq->tq_lock is held.
633 taskq_ent_alloc(taskq_t
*tq
, int flags
)
635 int kmflags
= KM_NOSLEEP
;
639 ASSERT(MUTEX_HELD(&tq
->tq_lock
));
642 * TQ_NOALLOC allocations are allowed to use the freelist, even if
643 * we are below tq_minalloc.
645 if ((tqe
= tq
->tq_freelist
) != NULL
&&
646 ((flags
& TQ_NOALLOC
) || tq
->tq_nalloc
>= tq
->tq_minalloc
)) {
647 tq
->tq_freelist
= tqe
->tqent_next
;
649 if (flags
& TQ_NOALLOC
)
652 mutex_exit(&tq
->tq_lock
);
653 if (tq
->tq_nalloc
>= tq
->tq_maxalloc
) {
654 if (kmflags
& KM_NOSLEEP
) {
655 mutex_enter(&tq
->tq_lock
);
659 * We don't want to exceed tq_maxalloc, but we can't
660 * wait for other tasks to complete (and thus free up
661 * task structures) without risking deadlock with
662 * the caller. So, we just delay for one second
663 * to throttle the allocation rate.
667 tqe
= kmem_cache_alloc(taskq_ent_cache
, kmflags
);
668 mutex_enter(&tq
->tq_lock
);
678 * Free taskq_ent_t structure by either putting it on the free list or freeing
681 * Assumes: tq->tq_lock is held.
684 taskq_ent_free(taskq_t
*tq
, taskq_ent_t
*tqe
)
686 ASSERT(MUTEX_HELD(&tq
->tq_lock
));
688 if (tq
->tq_nalloc
<= tq
->tq_minalloc
) {
689 tqe
->tqent_next
= tq
->tq_freelist
;
690 tq
->tq_freelist
= tqe
;
693 mutex_exit(&tq
->tq_lock
);
694 kmem_cache_free(taskq_ent_cache
, tqe
);
695 mutex_enter(&tq
->tq_lock
);
702 * Assumes: func != NULL
704 * Returns: NULL if dispatch failed.
705 * non-NULL if task dispatched successfully.
706 * Actual return value is the pointer to taskq entry that was used to
707 * dispatch a task. This is useful for debugging.
711 taskq_dispatch(taskq_t
*tq
, task_func_t func
, void *arg
, uint_t flags
)
713 taskq_ent_t
*tqe
= NULL
;
716 ASSERT(func
!= NULL
);
717 ASSERT(!(tq
->tq_flags
& TASKQ_DYNAMIC
));
720 * TQ_NOQUEUE flag can't be used with non-dynamic task queues.
722 ASSERT(! (flags
& TQ_NOQUEUE
));
725 * Enqueue the task to the underlying queue.
727 mutex_enter(&tq
->tq_lock
);
729 TASKQ_S_RANDOM_DISPATCH_FAILURE(tq
, flags
);
731 if ((tqe
= taskq_ent_alloc(tq
, flags
)) == NULL
) {
732 mutex_exit(&tq
->tq_lock
);
733 return ((taskqid_t
)NULL
);
735 TQ_ENQUEUE(tq
, tqe
, func
, arg
);
736 mutex_exit(&tq
->tq_lock
);
737 return ((taskqid_t
)tqe
);
741 * Wait for all pending tasks to complete.
742 * Calling taskq_wait from a task will cause deadlock.
745 taskq_wait(taskq_t
*tq
)
748 mutex_enter(&tq
->tq_lock
);
749 while (tq
->tq_task
.tqent_next
!= &tq
->tq_task
|| tq
->tq_active
!= 0)
750 cv_wait(&tq
->tq_wait_cv
, &tq
->tq_lock
);
751 mutex_exit(&tq
->tq_lock
);
755 * Suspend execution of tasks.
757 * Tasks in the queue part will be suspended immediately upon return from this
758 * function. Pending tasks in the dynamic part will continue to execute, but all
759 * new tasks will be suspended.
762 taskq_suspend(taskq_t
*tq
)
764 rw_enter(&tq
->tq_threadlock
, RW_WRITER
);
767 * Mark task queue as being suspended. Needed for taskq_suspended().
769 mutex_enter(&tq
->tq_lock
);
770 ASSERT(!(tq
->tq_flags
& TASKQ_SUSPENDED
));
771 tq
->tq_flags
|= TASKQ_SUSPENDED
;
772 mutex_exit(&tq
->tq_lock
);
776 * returns: 1 if tq is suspended, 0 otherwise.
779 taskq_suspended(taskq_t
*tq
)
781 return ((tq
->tq_flags
& TASKQ_SUSPENDED
) != 0);
785 * Resume taskq execution.
788 taskq_resume(taskq_t
*tq
)
790 ASSERT(RW_WRITE_HELD(&tq
->tq_threadlock
));
792 mutex_enter(&tq
->tq_lock
);
793 ASSERT(tq
->tq_flags
& TASKQ_SUSPENDED
);
794 tq
->tq_flags
&= ~TASKQ_SUSPENDED
;
795 mutex_exit(&tq
->tq_lock
);
797 rw_exit(&tq
->tq_threadlock
);
801 taskq_member(taskq_t
*tq
, kthread_t
*thread
)
803 if (tq
->tq_nthreads
== 1)
804 return (tq
->tq_thread
== thread
);
808 mutex_enter(&tq
->tq_lock
);
809 for (i
= 0; i
< tq
->tq_nthreads
; i
++) {
810 if (tq
->tq_threadlist
[i
] == thread
) {
815 mutex_exit(&tq
->tq_lock
);
821 * Worker thread for processing task queue.
824 taskq_thread(void *arg
)
831 CALLB_CPR_INIT(&cprinfo
, &tq
->tq_lock
, callb_generic_cpr
, tq
->tq_name
);
833 mutex_enter(&tq
->tq_lock
);
834 while (tq
->tq_flags
& TASKQ_ACTIVE
) {
835 if ((tqe
= tq
->tq_task
.tqent_next
) == &tq
->tq_task
) {
836 if (--tq
->tq_active
== 0)
837 cv_broadcast(&tq
->tq_wait_cv
);
838 if (tq
->tq_flags
& TASKQ_CPR_SAFE
) {
839 cv_wait(&tq
->tq_dispatch_cv
, &tq
->tq_lock
);
841 CALLB_CPR_SAFE_BEGIN(&cprinfo
);
842 cv_wait(&tq
->tq_dispatch_cv
, &tq
->tq_lock
);
843 CALLB_CPR_SAFE_END(&cprinfo
, &tq
->tq_lock
);
848 tqe
->tqent_prev
->tqent_next
= tqe
->tqent_next
;
849 tqe
->tqent_next
->tqent_prev
= tqe
->tqent_prev
;
850 mutex_exit(&tq
->tq_lock
);
852 rw_enter(&tq
->tq_threadlock
, RW_READER
);
854 DTRACE_PROBE2(taskq__exec__start
, taskq_t
*, tq
,
856 tqe
->tqent_func(tqe
->tqent_arg
);
857 DTRACE_PROBE2(taskq__exec__end
, taskq_t
*, tq
,
860 rw_exit(&tq
->tq_threadlock
);
862 mutex_enter(&tq
->tq_lock
);
863 tq
->tq_totaltime
+= end
- start
;
866 taskq_ent_free(tq
, tqe
);
869 cv_broadcast(&tq
->tq_wait_cv
);
870 ASSERT(!(tq
->tq_flags
& TASKQ_CPR_SAFE
));
871 CALLB_CPR_EXIT(&cprinfo
);
876 * Taskq creation. May sleep for memory.
877 * Always use automatically generated instances to avoid kstat name space
882 taskq_create(const char *name
, int nthreads
, pri_t pri
, int minalloc
,
883 int maxalloc
, uint_t flags
)
885 return taskq_create_common(name
, 0, nthreads
, pri
, minalloc
,
886 maxalloc
, flags
| TASKQ_NOINSTANCE
);
890 taskq_create_common(const char *name
, int instance
, int nthreads
, pri_t pri
,
891 int minalloc
, int maxalloc
, uint_t flags
)
893 taskq_t
*tq
= kmem_cache_alloc(taskq_cache
, KM_NOSLEEP
);
894 uint_t ncpus
= ((boot_max_ncpus
== -1) ? max_ncpus
: boot_max_ncpus
);
895 uint_t bsize
; /* # of buckets - always power of 2 */
897 ASSERT(instance
== 0);
898 ASSERT(flags
== TASKQ_PREPOPULATE
| TASKQ_NOINSTANCE
);
901 * TASKQ_CPR_SAFE and TASKQ_DYNAMIC flags are mutually exclusive.
903 ASSERT((flags
& (TASKQ_DYNAMIC
| TASKQ_CPR_SAFE
)) !=
904 ((TASKQ_DYNAMIC
| TASKQ_CPR_SAFE
)));
906 ASSERT(tq
->tq_buckets
== NULL
);
908 bsize
= 1 << (highbit(ncpus
) - 1);
910 bsize
= MIN(bsize
, taskq_maxbuckets
);
912 tq
->tq_maxsize
= nthreads
;
914 (void) strncpy(tq
->tq_name
, name
, TASKQ_NAMELEN
+ 1);
915 tq
->tq_name
[TASKQ_NAMELEN
] = '\0';
916 /* Make sure the name conforms to the rules for C indentifiers */
917 strident_canon(tq
->tq_name
, TASKQ_NAMELEN
);
919 tq
->tq_flags
= flags
| TASKQ_ACTIVE
;
920 tq
->tq_active
= nthreads
;
921 tq
->tq_nthreads
= nthreads
;
922 tq
->tq_minalloc
= minalloc
;
923 tq
->tq_maxalloc
= maxalloc
;
924 tq
->tq_nbuckets
= bsize
;
927 if (flags
& TASKQ_PREPOPULATE
) {
928 mutex_enter(&tq
->tq_lock
);
929 while (minalloc
-- > 0)
930 taskq_ent_free(tq
, taskq_ent_alloc(tq
, TQ_SLEEP
));
931 mutex_exit(&tq
->tq_lock
);
935 tq
->tq_thread
= thread_create(NULL
, 0, taskq_thread
, tq
,
936 0, NULL
, TS_RUN
, pri
);
938 kthread_t
**tpp
= kmem_alloc(sizeof (kthread_t
*) * nthreads
,
941 tq
->tq_threadlist
= tpp
;
943 mutex_enter(&tq
->tq_lock
);
944 while (nthreads
-- > 0) {
945 *tpp
= thread_create(NULL
, 0, taskq_thread
, tq
,
946 0, NULL
, TS_RUN
, pri
);
949 mutex_exit(&tq
->tq_lock
);
958 * Assumes: by the time taskq_destroy is called no one will use this task queue
959 * in any way and no one will try to dispatch entries in it.
962 taskq_destroy(taskq_t
*tq
)
964 taskq_bucket_t
*b
= tq
->tq_buckets
;
967 ASSERT(! (tq
->tq_flags
& TASKQ_CPR_SAFE
));
970 * Wait for any pending entries to complete.
974 mutex_enter(&tq
->tq_lock
);
975 ASSERT((tq
->tq_task
.tqent_next
== &tq
->tq_task
) &&
976 (tq
->tq_active
== 0));
978 if ((tq
->tq_nthreads
> 1) && (tq
->tq_threadlist
!= NULL
))
979 kmem_free(tq
->tq_threadlist
, sizeof (kthread_t
*) *
982 tq
->tq_flags
&= ~TASKQ_ACTIVE
;
983 cv_broadcast(&tq
->tq_dispatch_cv
);
984 while (tq
->tq_nthreads
!= 0)
985 cv_wait(&tq
->tq_wait_cv
, &tq
->tq_lock
);
988 while (tq
->tq_nalloc
!= 0)
989 taskq_ent_free(tq
, taskq_ent_alloc(tq
, TQ_SLEEP
));
991 mutex_exit(&tq
->tq_lock
);
994 * Mark each bucket as closing and wakeup all sleeping threads.
996 for (; (b
!= NULL
) && (bid
< tq
->tq_nbuckets
); b
++, bid
++) {
999 mutex_enter(&b
->tqbucket_lock
);
1001 b
->tqbucket_flags
|= TQBUCKET_CLOSE
;
1002 /* Wakeup all sleeping threads */
1004 for (tqe
= b
->tqbucket_freelist
.tqent_next
;
1005 tqe
!= &b
->tqbucket_freelist
; tqe
= tqe
->tqent_next
)
1006 cv_signal(&tqe
->tqent_cv
);
1008 ASSERT(b
->tqbucket_nalloc
== 0);
1011 * At this point we waited for all pending jobs to complete (in
1012 * both the task queue and the bucket and no new jobs should
1013 * arrive. Wait for all threads to die.
1015 while (b
->tqbucket_nfree
> 0)
1016 cv_wait(&b
->tqbucket_cv
, &b
->tqbucket_lock
);
1017 mutex_exit(&b
->tqbucket_lock
);
1018 mutex_destroy(&b
->tqbucket_lock
);
1019 cv_destroy(&b
->tqbucket_cv
);
1022 if (tq
->tq_buckets
!= NULL
) {
1023 ASSERT(tq
->tq_flags
& TASKQ_DYNAMIC
);
1024 kmem_free(tq
->tq_buckets
,
1025 sizeof (taskq_bucket_t
) * tq
->tq_nbuckets
);
1027 /* Cleanup fields before returning tq to the cache */
1028 tq
->tq_buckets
= NULL
;
1029 tq
->tq_tcreates
= 0;
1032 ASSERT(!(tq
->tq_flags
& TASKQ_DYNAMIC
));
1035 tq
->tq_totaltime
= 0;
1037 tq
->tq_maxtasks
= 0;
1038 tq
->tq_executed
= 0;
1039 kmem_cache_free(taskq_cache
, tq
);