4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
22 * Copyright 2010 Sun Microsystems, Inc. All rights reserved.
23 * Use is subject to license terms.
26 * Copyright 2011 Nexenta Systems, Inc. All rights reserved.
27 * Copyright 2012 Garrett D'Amore <garrett@damore.org>. All rights reserved.
28 * Copyright (c) 2014 by Delphix. All rights reserved.
31 #include <sys/zfs_context.h>
34 taskq_t
*system_taskq
;
36 #define TASKQ_ACTIVE 0x00010000
37 #define TASKQ_NAMELEN 31
40 char tq_name
[TASKQ_NAMELEN
+ 1];
42 krwlock_t tq_threadlock
;
43 kcondvar_t tq_dispatch_cv
;
44 kcondvar_t tq_wait_cv
;
45 thread_t
*tq_threadlist
;
52 kcondvar_t tq_maxalloc_cv
;
54 taskq_ent_t
*tq_freelist
;
59 task_alloc(taskq_t
*tq
, int tqflags
)
64 again
: if ((t
= tq
->tq_freelist
) != NULL
&& tq
->tq_nalloc
>= tq
->tq_minalloc
) {
65 tq
->tq_freelist
= t
->tqent_next
;
67 if (tq
->tq_nalloc
>= tq
->tq_maxalloc
) {
68 if (!(tqflags
& KM_SLEEP
))
72 * We don't want to exceed tq_maxalloc, but we can't
73 * wait for other tasks to complete (and thus free up
74 * task structures) without risking deadlock with
75 * the caller. So, we just delay for one second
76 * to throttle the allocation rate. If we have tasks
77 * complete before one second timeout expires then
78 * taskq_ent_free will signal us and we will
79 * immediately retry the allocation.
81 tq
->tq_maxalloc_wait
++;
82 rv
= cv_timedwait(&tq
->tq_maxalloc_cv
,
83 &tq
->tq_lock
, ddi_get_lbolt() + hz
);
84 tq
->tq_maxalloc_wait
--;
86 goto again
; /* signaled */
88 mutex_exit(&tq
->tq_lock
);
90 t
= kmem_alloc(sizeof (taskq_ent_t
), tqflags
);
92 mutex_enter(&tq
->tq_lock
);
100 task_free(taskq_t
*tq
, taskq_ent_t
*t
)
102 if (tq
->tq_nalloc
<= tq
->tq_minalloc
) {
103 t
->tqent_next
= tq
->tq_freelist
;
107 mutex_exit(&tq
->tq_lock
);
108 kmem_free(t
, sizeof (taskq_ent_t
));
109 mutex_enter(&tq
->tq_lock
);
112 if (tq
->tq_maxalloc_wait
)
113 cv_signal(&tq
->tq_maxalloc_cv
);
117 taskq_dispatch(taskq_t
*tq
, task_func_t func
, void *arg
, uint_t tqflags
)
126 mutex_enter(&tq
->tq_lock
);
127 ASSERT(tq
->tq_flags
& TASKQ_ACTIVE
);
128 if ((t
= task_alloc(tq
, tqflags
)) == NULL
) {
129 mutex_exit(&tq
->tq_lock
);
132 if (tqflags
& TQ_FRONT
) {
133 t
->tqent_next
= tq
->tq_task
.tqent_next
;
134 t
->tqent_prev
= &tq
->tq_task
;
136 t
->tqent_next
= &tq
->tq_task
;
137 t
->tqent_prev
= tq
->tq_task
.tqent_prev
;
139 t
->tqent_next
->tqent_prev
= t
;
140 t
->tqent_prev
->tqent_next
= t
;
141 t
->tqent_func
= func
;
144 cv_signal(&tq
->tq_dispatch_cv
);
145 mutex_exit(&tq
->tq_lock
);
150 taskq_dispatch_ent(taskq_t
*tq
, task_func_t func
, void *arg
, uint_t flags
,
153 ASSERT(func
!= NULL
);
154 ASSERT(!(tq
->tq_flags
& TASKQ_DYNAMIC
));
157 * Mark it as a prealloc'd task. This is important
158 * to ensure that we don't free it later.
160 t
->tqent_flags
|= TQENT_FLAG_PREALLOC
;
162 * Enqueue the task to the underlying queue.
164 mutex_enter(&tq
->tq_lock
);
166 if (flags
& TQ_FRONT
) {
167 t
->tqent_next
= tq
->tq_task
.tqent_next
;
168 t
->tqent_prev
= &tq
->tq_task
;
170 t
->tqent_next
= &tq
->tq_task
;
171 t
->tqent_prev
= tq
->tq_task
.tqent_prev
;
173 t
->tqent_next
->tqent_prev
= t
;
174 t
->tqent_prev
->tqent_next
= t
;
175 t
->tqent_func
= func
;
177 cv_signal(&tq
->tq_dispatch_cv
);
178 mutex_exit(&tq
->tq_lock
);
182 taskq_wait(taskq_t
*tq
)
184 mutex_enter(&tq
->tq_lock
);
185 while (tq
->tq_task
.tqent_next
!= &tq
->tq_task
|| tq
->tq_active
!= 0)
186 cv_wait(&tq
->tq_wait_cv
, &tq
->tq_lock
);
187 mutex_exit(&tq
->tq_lock
);
191 taskq_thread(void *arg
)
197 mutex_enter(&tq
->tq_lock
);
198 while (tq
->tq_flags
& TASKQ_ACTIVE
) {
199 if ((t
= tq
->tq_task
.tqent_next
) == &tq
->tq_task
) {
200 if (--tq
->tq_active
== 0)
201 cv_broadcast(&tq
->tq_wait_cv
);
202 cv_wait(&tq
->tq_dispatch_cv
, &tq
->tq_lock
);
206 t
->tqent_prev
->tqent_next
= t
->tqent_next
;
207 t
->tqent_next
->tqent_prev
= t
->tqent_prev
;
208 t
->tqent_next
= NULL
;
209 t
->tqent_prev
= NULL
;
210 prealloc
= t
->tqent_flags
& TQENT_FLAG_PREALLOC
;
211 mutex_exit(&tq
->tq_lock
);
213 rw_enter(&tq
->tq_threadlock
, RW_READER
);
214 t
->tqent_func(t
->tqent_arg
);
215 rw_exit(&tq
->tq_threadlock
);
217 mutex_enter(&tq
->tq_lock
);
222 cv_broadcast(&tq
->tq_wait_cv
);
223 mutex_exit(&tq
->tq_lock
);
229 taskq_create(const char *name
, int nthreads
, pri_t pri
,
230 int minalloc
, int maxalloc
, uint_t flags
)
232 taskq_t
*tq
= kmem_zalloc(sizeof (taskq_t
), KM_SLEEP
);
235 if (flags
& TASKQ_THREADS_CPU_PCT
) {
237 ASSERT3S(nthreads
, >=, 0);
238 ASSERT3S(nthreads
, <=, 100);
239 pct
= MIN(nthreads
, 100);
242 nthreads
= (sysconf(_SC_NPROCESSORS_ONLN
) * pct
) / 100;
243 nthreads
= MAX(nthreads
, 1); /* need at least 1 thread */
245 ASSERT3S(nthreads
, >=, 1);
248 rw_init(&tq
->tq_threadlock
, NULL
, RW_DEFAULT
, NULL
);
249 mutex_init(&tq
->tq_lock
, NULL
, MUTEX_DEFAULT
, NULL
);
250 cv_init(&tq
->tq_dispatch_cv
, NULL
, CV_DEFAULT
, NULL
);
251 cv_init(&tq
->tq_wait_cv
, NULL
, CV_DEFAULT
, NULL
);
252 cv_init(&tq
->tq_maxalloc_cv
, NULL
, CV_DEFAULT
, NULL
);
253 (void) strncpy(tq
->tq_name
, name
, TASKQ_NAMELEN
+ 1);
254 tq
->tq_flags
= flags
| TASKQ_ACTIVE
;
255 tq
->tq_active
= nthreads
;
256 tq
->tq_nthreads
= nthreads
;
257 tq
->tq_minalloc
= minalloc
;
258 tq
->tq_maxalloc
= maxalloc
;
259 tq
->tq_task
.tqent_next
= &tq
->tq_task
;
260 tq
->tq_task
.tqent_prev
= &tq
->tq_task
;
261 tq
->tq_threadlist
= kmem_alloc(nthreads
* sizeof (thread_t
), KM_SLEEP
);
263 if (flags
& TASKQ_PREPOPULATE
) {
264 mutex_enter(&tq
->tq_lock
);
265 while (minalloc
-- > 0)
266 task_free(tq
, task_alloc(tq
, KM_SLEEP
));
267 mutex_exit(&tq
->tq_lock
);
270 for (t
= 0; t
< nthreads
; t
++)
271 (void) thr_create(0, 0, taskq_thread
,
272 tq
, THR_BOUND
, &tq
->tq_threadlist
[t
]);
278 taskq_destroy(taskq_t
*tq
)
281 int nthreads
= tq
->tq_nthreads
;
285 mutex_enter(&tq
->tq_lock
);
287 tq
->tq_flags
&= ~TASKQ_ACTIVE
;
288 cv_broadcast(&tq
->tq_dispatch_cv
);
290 while (tq
->tq_nthreads
!= 0)
291 cv_wait(&tq
->tq_wait_cv
, &tq
->tq_lock
);
294 while (tq
->tq_nalloc
!= 0) {
295 ASSERT(tq
->tq_freelist
!= NULL
);
296 task_free(tq
, task_alloc(tq
, KM_SLEEP
));
299 mutex_exit(&tq
->tq_lock
);
301 for (t
= 0; t
< nthreads
; t
++)
302 (void) thr_join(tq
->tq_threadlist
[t
], NULL
, NULL
);
304 kmem_free(tq
->tq_threadlist
, nthreads
* sizeof (thread_t
));
306 rw_destroy(&tq
->tq_threadlock
);
307 mutex_destroy(&tq
->tq_lock
);
308 cv_destroy(&tq
->tq_dispatch_cv
);
309 cv_destroy(&tq
->tq_wait_cv
);
310 cv_destroy(&tq
->tq_maxalloc_cv
);
312 kmem_free(tq
, sizeof (taskq_t
));
316 taskq_member(taskq_t
*tq
, void *t
)
323 for (i
= 0; i
< tq
->tq_nthreads
; i
++)
324 if (tq
->tq_threadlist
[i
] == (thread_t
)(uintptr_t)t
)
331 system_taskq_init(void)
333 system_taskq
= taskq_create("system_taskq", 64, minclsyspri
, 4, 512,
334 TASKQ_DYNAMIC
| TASKQ_PREPOPULATE
);
338 system_taskq_fini(void)
340 taskq_destroy(system_taskq
);
341 system_taskq
= NULL
; /* defensive */