4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
22 * Copyright 2010 Sun Microsystems, Inc. All rights reserved.
23 * Use is subject to license terms.
26 * Copyright 2011 Nexenta Systems, Inc. All rights reserved.
27 * Copyright 2012 Garrett D'Amore <garrett@damore.org>. All rights reserved.
28 * Copyright (c) 2014 by Delphix. All rights reserved.
31 #include <sys/zfs_context.h>
34 taskq_t
*system_taskq
;
35 taskq_t
*system_delay_taskq
;
37 static pthread_key_t taskq_tsd
;
39 #define TASKQ_ACTIVE 0x00010000
42 task_alloc(taskq_t
*tq
, int tqflags
)
47 again
: if ((t
= tq
->tq_freelist
) != NULL
&& tq
->tq_nalloc
>= tq
->tq_minalloc
) {
48 ASSERT(!(t
->tqent_flags
& TQENT_FLAG_PREALLOC
));
49 tq
->tq_freelist
= t
->tqent_next
;
51 if (tq
->tq_nalloc
>= tq
->tq_maxalloc
) {
52 if (!(tqflags
& KM_SLEEP
))
56 * We don't want to exceed tq_maxalloc, but we can't
57 * wait for other tasks to complete (and thus free up
58 * task structures) without risking deadlock with
59 * the caller. So, we just delay for one second
60 * to throttle the allocation rate. If we have tasks
61 * complete before one second timeout expires then
62 * taskq_ent_free will signal us and we will
63 * immediately retry the allocation.
65 tq
->tq_maxalloc_wait
++;
66 rv
= cv_timedwait(&tq
->tq_maxalloc_cv
,
67 &tq
->tq_lock
, ddi_get_lbolt() + hz
);
68 tq
->tq_maxalloc_wait
--;
70 goto again
; /* signaled */
72 mutex_exit(&tq
->tq_lock
);
74 t
= kmem_alloc(sizeof (taskq_ent_t
), tqflags
);
76 mutex_enter(&tq
->tq_lock
);
78 /* Make sure we start without any flags */
87 task_free(taskq_t
*tq
, taskq_ent_t
*t
)
89 if (tq
->tq_nalloc
<= tq
->tq_minalloc
) {
90 t
->tqent_next
= tq
->tq_freelist
;
94 mutex_exit(&tq
->tq_lock
);
95 kmem_free(t
, sizeof (taskq_ent_t
));
96 mutex_enter(&tq
->tq_lock
);
99 if (tq
->tq_maxalloc_wait
)
100 cv_signal(&tq
->tq_maxalloc_cv
);
104 taskq_dispatch(taskq_t
*tq
, task_func_t func
, void *arg
, uint_t tqflags
)
113 mutex_enter(&tq
->tq_lock
);
114 ASSERT(tq
->tq_flags
& TASKQ_ACTIVE
);
115 if ((t
= task_alloc(tq
, tqflags
)) == NULL
) {
116 mutex_exit(&tq
->tq_lock
);
119 if (tqflags
& TQ_FRONT
) {
120 t
->tqent_next
= tq
->tq_task
.tqent_next
;
121 t
->tqent_prev
= &tq
->tq_task
;
123 t
->tqent_next
= &tq
->tq_task
;
124 t
->tqent_prev
= tq
->tq_task
.tqent_prev
;
126 t
->tqent_next
->tqent_prev
= t
;
127 t
->tqent_prev
->tqent_next
= t
;
128 t
->tqent_func
= func
;
131 cv_signal(&tq
->tq_dispatch_cv
);
132 mutex_exit(&tq
->tq_lock
);
137 taskq_dispatch_delay(taskq_t
*tq
, task_func_t func
, void *arg
, uint_t tqflags
,
144 taskq_empty_ent(taskq_ent_t
*t
)
146 return (t
->tqent_next
== NULL
);
150 taskq_init_ent(taskq_ent_t
*t
)
152 t
->tqent_next
= NULL
;
153 t
->tqent_prev
= NULL
;
154 t
->tqent_func
= NULL
;
160 taskq_dispatch_ent(taskq_t
*tq
, task_func_t func
, void *arg
, uint_t flags
,
163 ASSERT(func
!= NULL
);
166 * Mark it as a prealloc'd task. This is important
167 * to ensure that we don't free it later.
169 t
->tqent_flags
|= TQENT_FLAG_PREALLOC
;
171 * Enqueue the task to the underlying queue.
173 mutex_enter(&tq
->tq_lock
);
175 if (flags
& TQ_FRONT
) {
176 t
->tqent_next
= tq
->tq_task
.tqent_next
;
177 t
->tqent_prev
= &tq
->tq_task
;
179 t
->tqent_next
= &tq
->tq_task
;
180 t
->tqent_prev
= tq
->tq_task
.tqent_prev
;
182 t
->tqent_next
->tqent_prev
= t
;
183 t
->tqent_prev
->tqent_next
= t
;
184 t
->tqent_func
= func
;
186 cv_signal(&tq
->tq_dispatch_cv
);
187 mutex_exit(&tq
->tq_lock
);
191 taskq_wait(taskq_t
*tq
)
193 mutex_enter(&tq
->tq_lock
);
194 while (tq
->tq_task
.tqent_next
!= &tq
->tq_task
|| tq
->tq_active
!= 0)
195 cv_wait(&tq
->tq_wait_cv
, &tq
->tq_lock
);
196 mutex_exit(&tq
->tq_lock
);
200 taskq_wait_id(taskq_t
*tq
, taskqid_t id
)
206 taskq_wait_outstanding(taskq_t
*tq
, taskqid_t id
)
212 taskq_thread(void *arg
)
218 VERIFY0(pthread_setspecific(taskq_tsd
, tq
));
220 mutex_enter(&tq
->tq_lock
);
221 while (tq
->tq_flags
& TASKQ_ACTIVE
) {
222 if ((t
= tq
->tq_task
.tqent_next
) == &tq
->tq_task
) {
223 if (--tq
->tq_active
== 0)
224 cv_broadcast(&tq
->tq_wait_cv
);
225 cv_wait(&tq
->tq_dispatch_cv
, &tq
->tq_lock
);
229 t
->tqent_prev
->tqent_next
= t
->tqent_next
;
230 t
->tqent_next
->tqent_prev
= t
->tqent_prev
;
231 t
->tqent_next
= NULL
;
232 t
->tqent_prev
= NULL
;
233 prealloc
= t
->tqent_flags
& TQENT_FLAG_PREALLOC
;
234 mutex_exit(&tq
->tq_lock
);
236 rw_enter(&tq
->tq_threadlock
, RW_READER
);
237 t
->tqent_func(t
->tqent_arg
);
238 rw_exit(&tq
->tq_threadlock
);
240 mutex_enter(&tq
->tq_lock
);
245 cv_broadcast(&tq
->tq_wait_cv
);
246 mutex_exit(&tq
->tq_lock
);
252 taskq_create(const char *name
, int nthreads
, pri_t pri
,
253 int minalloc
, int maxalloc
, uint_t flags
)
255 taskq_t
*tq
= kmem_zalloc(sizeof (taskq_t
), KM_SLEEP
);
258 if (flags
& TASKQ_THREADS_CPU_PCT
) {
260 ASSERT3S(nthreads
, >=, 0);
261 ASSERT3S(nthreads
, <=, 100);
262 pct
= MIN(nthreads
, 100);
265 nthreads
= (sysconf(_SC_NPROCESSORS_ONLN
) * pct
) / 100;
266 nthreads
= MAX(nthreads
, 1); /* need at least 1 thread */
268 ASSERT3S(nthreads
, >=, 1);
271 rw_init(&tq
->tq_threadlock
, NULL
, RW_DEFAULT
, NULL
);
272 mutex_init(&tq
->tq_lock
, NULL
, MUTEX_DEFAULT
, NULL
);
273 cv_init(&tq
->tq_dispatch_cv
, NULL
, CV_DEFAULT
, NULL
);
274 cv_init(&tq
->tq_wait_cv
, NULL
, CV_DEFAULT
, NULL
);
275 cv_init(&tq
->tq_maxalloc_cv
, NULL
, CV_DEFAULT
, NULL
);
276 (void) strncpy(tq
->tq_name
, name
, TASKQ_NAMELEN
);
277 tq
->tq_flags
= flags
| TASKQ_ACTIVE
;
278 tq
->tq_active
= nthreads
;
279 tq
->tq_nthreads
= nthreads
;
280 tq
->tq_minalloc
= minalloc
;
281 tq
->tq_maxalloc
= maxalloc
;
282 tq
->tq_task
.tqent_next
= &tq
->tq_task
;
283 tq
->tq_task
.tqent_prev
= &tq
->tq_task
;
284 tq
->tq_threadlist
= kmem_alloc(nthreads
* sizeof (kthread_t
*),
287 if (flags
& TASKQ_PREPOPULATE
) {
288 mutex_enter(&tq
->tq_lock
);
289 while (minalloc
-- > 0)
290 task_free(tq
, task_alloc(tq
, KM_SLEEP
));
291 mutex_exit(&tq
->tq_lock
);
294 for (t
= 0; t
< nthreads
; t
++)
295 VERIFY((tq
->tq_threadlist
[t
] = thread_create(NULL
, 0,
296 taskq_thread
, tq
, 0, &p0
, TS_RUN
, pri
)) != NULL
);
302 taskq_destroy(taskq_t
*tq
)
304 int nthreads
= tq
->tq_nthreads
;
308 mutex_enter(&tq
->tq_lock
);
310 tq
->tq_flags
&= ~TASKQ_ACTIVE
;
311 cv_broadcast(&tq
->tq_dispatch_cv
);
313 while (tq
->tq_nthreads
!= 0)
314 cv_wait(&tq
->tq_wait_cv
, &tq
->tq_lock
);
317 while (tq
->tq_nalloc
!= 0) {
318 ASSERT(tq
->tq_freelist
!= NULL
);
319 task_free(tq
, task_alloc(tq
, KM_SLEEP
));
322 mutex_exit(&tq
->tq_lock
);
324 kmem_free(tq
->tq_threadlist
, nthreads
* sizeof (kthread_t
*));
326 rw_destroy(&tq
->tq_threadlock
);
327 mutex_destroy(&tq
->tq_lock
);
328 cv_destroy(&tq
->tq_dispatch_cv
);
329 cv_destroy(&tq
->tq_wait_cv
);
330 cv_destroy(&tq
->tq_maxalloc_cv
);
332 kmem_free(tq
, sizeof (taskq_t
));
336 taskq_member(taskq_t
*tq
, kthread_t
*t
)
343 for (i
= 0; i
< tq
->tq_nthreads
; i
++)
344 if (tq
->tq_threadlist
[i
] == t
)
351 taskq_of_curthread(void)
353 return (pthread_getspecific(taskq_tsd
));
357 taskq_cancel_id(taskq_t
*tq
, taskqid_t id
)
363 system_taskq_init(void)
365 VERIFY0(pthread_key_create(&taskq_tsd
, NULL
));
366 system_taskq
= taskq_create("system_taskq", 64, maxclsyspri
, 4, 512,
367 TASKQ_DYNAMIC
| TASKQ_PREPOPULATE
);
368 system_delay_taskq
= taskq_create("delay_taskq", 4, maxclsyspri
, 4,
369 512, TASKQ_DYNAMIC
| TASKQ_PREPOPULATE
);
373 system_taskq_fini(void)
375 taskq_destroy(system_taskq
);
376 system_taskq
= NULL
; /* defensive */
377 taskq_destroy(system_delay_taskq
);
378 system_delay_taskq
= NULL
;
379 VERIFY0(pthread_key_delete(taskq_tsd
));