4 * Copyright (C) 2004-2009 Internet Systems Consortium, Inc. ("ISC")
5 * Copyright (C) 1998-2003 Internet Software Consortium.
7 * Permission to use, copy, modify, and/or distribute this software for any
8 * purpose with or without fee is hereby granted, provided that the above
9 * copyright notice and this permission notice appear in all copies.
11 * THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH
12 * REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
13 * AND FITNESS. IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT,
14 * INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
15 * LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
16 * OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
17 * PERFORMANCE OF THIS SOFTWARE.
20 /* Id: task.c,v 1.111 2009/10/05 17:30:49 fdupont Exp */
23 * \author Principal Author: Bob Halley
27 * XXXRTH Need to document the states a task can be in, and the rules
28 * for changing states.
33 #include <isc/condition.h>
34 #include <isc/event.h>
35 #include <isc/magic.h>
38 #include <isc/platform.h>
39 #include <isc/string.h>
41 #include <isc/thread.h>
46 #include <openssl/err.h>
50 * For BIND9 internal applications:
51 * when built with threads we use multiple worker threads shared by the whole
53 * when built without threads we share a single global task manager and use
54 * an integrated event loop for socket, timer, and other generic task events.
55 * For generic library:
56 * we don't use either of them: an application can have multiple task managers
57 * whether or not it's threaded, and if the application is threaded each thread
58 * is expected to have a separate manager; no "worker threads" are shared by
59 * the application threads.
62 #ifdef ISC_PLATFORM_USETHREADS
63 #define USE_WORKER_THREADS
65 #define USE_SHARED_MANAGER
66 #endif /* ISC_PLATFORM_USETHREADS */
69 #ifndef USE_WORKER_THREADS
71 #endif /* USE_WORKER_THREADS */
74 #define XTRACE(m) fprintf(stderr, "task %p thread %lu: %s\n", \
75 task, isc_thread_self(), (m))
76 #define XTTRACE(t, m) fprintf(stderr, "task %p thread %lu: %s\n", \
77 (t), isc_thread_self(), (m))
78 #define XTHREADTRACE(m) fprintf(stderr, "thread %lu: %s\n", \
79 isc_thread_self(), (m))
83 #define XTHREADTRACE(m)
91 task_state_idle
, task_state_ready
, task_state_running
,
95 #if defined(HAVE_LIBXML2) && defined(BIND9)
96 static const char *statenames
[] = {
97 "idle", "ready", "running", "done",
101 #define TASK_MAGIC ISC_MAGIC('T', 'A', 'S', 'K')
102 #define VALID_TASK(t) ISC_MAGIC_VALID(t, TASK_MAGIC)
104 typedef struct isc__task isc__task_t
;
105 typedef struct isc__taskmgr isc__taskmgr_t
;
110 isc__taskmgr_t
* manager
;
112 /* Locked by task lock. */
114 unsigned int references
;
115 isc_eventlist_t events
;
116 isc_eventlist_t on_shutdown
;
117 unsigned int quantum
;
122 /* Locked by task manager lock. */
123 LINK(isc__task_t
) link
;
124 LINK(isc__task_t
) ready_link
;
127 #define TASK_F_SHUTTINGDOWN 0x01
129 #define TASK_SHUTTINGDOWN(t) (((t)->flags & TASK_F_SHUTTINGDOWN) \
132 #define TASK_MANAGER_MAGIC ISC_MAGIC('T', 'S', 'K', 'M')
133 #define VALID_MANAGER(m) ISC_MAGIC_VALID(m, TASK_MANAGER_MAGIC)
135 typedef ISC_LIST(isc__task_t
) isc__tasklist_t
;
137 struct isc__taskmgr
{
139 isc_taskmgr_t common
;
142 #ifdef ISC_PLATFORM_USETHREADS
143 unsigned int workers
;
144 isc_thread_t
* threads
;
145 #endif /* ISC_PLATFORM_USETHREADS */
146 /* Locked by task manager lock. */
147 unsigned int default_quantum
;
148 LIST(isc__task_t
) tasks
;
149 isc__tasklist_t ready_tasks
;
150 #ifdef ISC_PLATFORM_USETHREADS
151 isc_condition_t work_available
;
152 isc_condition_t exclusive_granted
;
153 #endif /* ISC_PLATFORM_USETHREADS */
154 unsigned int tasks_running
;
155 isc_boolean_t exclusive_requested
;
156 isc_boolean_t exiting
;
157 #ifdef USE_SHARED_MANAGER
159 #endif /* ISC_PLATFORM_USETHREADS */
162 #define DEFAULT_TASKMGR_QUANTUM 10
163 #define DEFAULT_DEFAULT_QUANTUM 5
164 #define FINISHED(m) ((m)->exiting && EMPTY((m)->tasks))
166 #ifdef USE_SHARED_MANAGER
167 static isc__taskmgr_t
*taskmgr
= NULL
;
168 #endif /* USE_SHARED_MANAGER */
171 * The following can be either static or public, depending on build environment.
175 #define ISC_TASKFUNC_SCOPE
177 #define ISC_TASKFUNC_SCOPE static
180 ISC_TASKFUNC_SCOPE isc_result_t
181 isc__task_create(isc_taskmgr_t
*manager0
, unsigned int quantum
,
183 ISC_TASKFUNC_SCOPE
void
184 isc__task_attach(isc_task_t
*source0
, isc_task_t
**targetp
);
185 ISC_TASKFUNC_SCOPE
void
186 isc__task_detach(isc_task_t
**taskp
);
187 ISC_TASKFUNC_SCOPE
void
188 isc__task_send(isc_task_t
*task0
, isc_event_t
**eventp
);
189 ISC_TASKFUNC_SCOPE
void
190 isc__task_sendanddetach(isc_task_t
**taskp
, isc_event_t
**eventp
);
191 ISC_TASKFUNC_SCOPE
unsigned int
192 isc__task_purgerange(isc_task_t
*task0
, void *sender
, isc_eventtype_t first
,
193 isc_eventtype_t last
, void *tag
);
194 ISC_TASKFUNC_SCOPE
unsigned int
195 isc__task_purge(isc_task_t
*task
, void *sender
, isc_eventtype_t type
,
197 ISC_TASKFUNC_SCOPE isc_boolean_t
198 isc__task_purgeevent(isc_task_t
*task0
, isc_event_t
*event
);
199 ISC_TASKFUNC_SCOPE
unsigned int
200 isc__task_unsendrange(isc_task_t
*task
, void *sender
, isc_eventtype_t first
,
201 isc_eventtype_t last
, void *tag
,
202 isc_eventlist_t
*events
);
203 ISC_TASKFUNC_SCOPE
unsigned int
204 isc__task_unsend(isc_task_t
*task
, void *sender
, isc_eventtype_t type
,
205 void *tag
, isc_eventlist_t
*events
);
206 ISC_TASKFUNC_SCOPE isc_result_t
207 isc__task_onshutdown(isc_task_t
*task0
, isc_taskaction_t action
,
209 ISC_TASKFUNC_SCOPE
void
210 isc__task_shutdown(isc_task_t
*task0
);
211 ISC_TASKFUNC_SCOPE
void
212 isc__task_destroy(isc_task_t
**taskp
);
213 ISC_TASKFUNC_SCOPE
void
214 isc__task_setname(isc_task_t
*task0
, const char *name
, void *tag
);
215 ISC_TASKFUNC_SCOPE
const char *
216 isc__task_getname(isc_task_t
*task0
);
217 ISC_TASKFUNC_SCOPE
void *
218 isc__task_gettag(isc_task_t
*task0
);
219 ISC_TASKFUNC_SCOPE
void
220 isc__task_getcurrenttime(isc_task_t
*task0
, isc_stdtime_t
*t
);
221 ISC_TASKFUNC_SCOPE isc_result_t
222 isc__taskmgr_create(isc_mem_t
*mctx
, unsigned int workers
,
223 unsigned int default_quantum
, isc_taskmgr_t
**managerp
);
224 ISC_TASKFUNC_SCOPE
void
225 isc__taskmgr_destroy(isc_taskmgr_t
**managerp
);
226 ISC_TASKFUNC_SCOPE isc_result_t
227 isc__task_beginexclusive(isc_task_t
*task
);
228 ISC_TASKFUNC_SCOPE
void
229 isc__task_endexclusive(isc_task_t
*task0
);
231 static struct isc__taskmethods
{
232 isc_taskmethods_t methods
;
235 * The following are defined just for avoiding unused static functions.
238 void *purgeevent
, *unsendrange
,
239 *getname
, *gettag
, *getcurrenttime
, *beginexclusive
,
248 isc__task_sendanddetach
,
250 isc__task_onshutdown
,
258 (void *)isc__task_purgeevent
, (void *)isc__task_unsendrange
,
259 (void *)isc__task_getname
, (void *)isc__task_gettag
,
260 (void *)isc__task_getcurrenttime
, (void *)isc__task_beginexclusive
,
261 (void *)isc__task_endexclusive
265 static isc_taskmgrmethods_t taskmgrmethods
= {
266 isc__taskmgr_destroy
,
275 task_finished(isc__task_t
*task
) {
276 isc__taskmgr_t
*manager
= task
->manager
;
278 REQUIRE(EMPTY(task
->events
));
279 REQUIRE(EMPTY(task
->on_shutdown
));
280 REQUIRE(task
->references
== 0);
281 REQUIRE(task
->state
== task_state_done
);
283 XTRACE("task_finished");
285 LOCK(&manager
->lock
);
286 UNLINK(manager
->tasks
, task
, link
);
287 #ifdef USE_WORKER_THREADS
288 if (FINISHED(manager
)) {
290 * All tasks have completed and the
291 * task manager is exiting. Wake up
292 * any idle worker threads so they
295 BROADCAST(&manager
->work_available
);
297 #endif /* USE_WORKER_THREADS */
298 UNLOCK(&manager
->lock
);
300 DESTROYLOCK(&task
->lock
);
301 task
->common
.impmagic
= 0;
302 task
->common
.magic
= 0;
303 isc_mem_put(manager
->mctx
, task
, sizeof(*task
));
306 ISC_TASKFUNC_SCOPE isc_result_t
307 isc__task_create(isc_taskmgr_t
*manager0
, unsigned int quantum
,
310 isc__taskmgr_t
*manager
= (isc__taskmgr_t
*)manager0
;
312 isc_boolean_t exiting
;
315 REQUIRE(VALID_MANAGER(manager
));
316 REQUIRE(taskp
!= NULL
&& *taskp
== NULL
);
318 task
= isc_mem_get(manager
->mctx
, sizeof(*task
));
320 return (ISC_R_NOMEMORY
);
321 XTRACE("isc_task_create");
322 task
->manager
= manager
;
323 result
= isc_mutex_init(&task
->lock
);
324 if (result
!= ISC_R_SUCCESS
) {
325 isc_mem_put(manager
->mctx
, task
, sizeof(*task
));
328 task
->state
= task_state_idle
;
329 task
->references
= 1;
330 INIT_LIST(task
->events
);
331 INIT_LIST(task
->on_shutdown
);
332 task
->quantum
= quantum
;
335 memset(task
->name
, 0, sizeof(task
->name
));
337 INIT_LINK(task
, link
);
338 INIT_LINK(task
, ready_link
);
341 LOCK(&manager
->lock
);
342 if (!manager
->exiting
) {
343 if (task
->quantum
== 0)
344 task
->quantum
= manager
->default_quantum
;
345 APPEND(manager
->tasks
, task
, link
);
348 UNLOCK(&manager
->lock
);
351 DESTROYLOCK(&task
->lock
);
352 isc_mem_put(manager
->mctx
, task
, sizeof(*task
));
353 return (ISC_R_SHUTTINGDOWN
);
356 task
->common
.methods
= (isc_taskmethods_t
*)&taskmethods
;
357 task
->common
.magic
= ISCAPI_TASK_MAGIC
;
358 task
->common
.impmagic
= TASK_MAGIC
;
359 *taskp
= (isc_task_t
*)task
;
361 return (ISC_R_SUCCESS
);
364 ISC_TASKFUNC_SCOPE
void
365 isc__task_attach(isc_task_t
*source0
, isc_task_t
**targetp
) {
366 isc__task_t
*source
= (isc__task_t
*)source0
;
369 * Attach *targetp to source.
372 REQUIRE(VALID_TASK(source
));
373 REQUIRE(targetp
!= NULL
&& *targetp
== NULL
);
375 XTTRACE(source
, "isc_task_attach");
378 source
->references
++;
379 UNLOCK(&source
->lock
);
381 *targetp
= (isc_task_t
*)source
;
384 static inline isc_boolean_t
385 task_shutdown(isc__task_t
*task
) {
386 isc_boolean_t was_idle
= ISC_FALSE
;
387 isc_event_t
*event
, *prev
;
390 * Caller must be holding the task's lock.
393 XTRACE("task_shutdown");
395 if (! TASK_SHUTTINGDOWN(task
)) {
396 XTRACE(isc_msgcat_get(isc_msgcat
, ISC_MSGSET_GENERAL
,
397 ISC_MSG_SHUTTINGDOWN
, "shutting down"));
398 task
->flags
|= TASK_F_SHUTTINGDOWN
;
399 if (task
->state
== task_state_idle
) {
400 INSIST(EMPTY(task
->events
));
401 task
->state
= task_state_ready
;
404 INSIST(task
->state
== task_state_ready
||
405 task
->state
== task_state_running
);
407 * Note that we post shutdown events LIFO.
409 for (event
= TAIL(task
->on_shutdown
);
412 prev
= PREV(event
, ev_link
);
413 DEQUEUE(task
->on_shutdown
, event
, ev_link
);
414 ENQUEUE(task
->events
, event
, ev_link
);
422 task_ready(isc__task_t
*task
) {
423 isc__taskmgr_t
*manager
= task
->manager
;
425 REQUIRE(VALID_MANAGER(manager
));
426 REQUIRE(task
->state
== task_state_ready
);
428 XTRACE("task_ready");
430 LOCK(&manager
->lock
);
432 ENQUEUE(manager
->ready_tasks
, task
, ready_link
);
433 #ifdef USE_WORKER_THREADS
434 SIGNAL(&manager
->work_available
);
435 #endif /* USE_WORKER_THREADS */
437 UNLOCK(&manager
->lock
);
440 static inline isc_boolean_t
441 task_detach(isc__task_t
*task
) {
444 * Caller must be holding the task lock.
447 REQUIRE(task
->references
> 0);
452 if (task
->references
== 0 && task
->state
== task_state_idle
) {
453 INSIST(EMPTY(task
->events
));
455 * There are no references to this task, and no
456 * pending events. We could try to optimize and
457 * either initiate shutdown or clean up the task,
458 * depending on its state, but it's easier to just
459 * make the task ready and allow run() or the event
460 * loop to deal with shutting down and termination.
462 task
->state
= task_state_ready
;
469 ISC_TASKFUNC_SCOPE
void
470 isc__task_detach(isc_task_t
**taskp
) {
472 isc_boolean_t was_idle
;
475 * Detach *taskp from its task.
478 REQUIRE(taskp
!= NULL
);
479 task
= (isc__task_t
*)*taskp
;
480 REQUIRE(VALID_TASK(task
));
482 XTRACE("isc_task_detach");
485 was_idle
= task_detach(task
);
494 static inline isc_boolean_t
495 task_send(isc__task_t
*task
, isc_event_t
**eventp
) {
496 isc_boolean_t was_idle
= ISC_FALSE
;
500 * Caller must be holding the task lock.
503 REQUIRE(eventp
!= NULL
);
505 REQUIRE(event
!= NULL
);
506 REQUIRE(event
->ev_type
> 0);
507 REQUIRE(task
->state
!= task_state_done
);
511 if (task
->state
== task_state_idle
) {
513 INSIST(EMPTY(task
->events
));
514 task
->state
= task_state_ready
;
516 INSIST(task
->state
== task_state_ready
||
517 task
->state
== task_state_running
);
518 ENQUEUE(task
->events
, event
, ev_link
);
524 ISC_TASKFUNC_SCOPE
void
525 isc__task_send(isc_task_t
*task0
, isc_event_t
**eventp
) {
526 isc__task_t
*task
= (isc__task_t
*)task0
;
527 isc_boolean_t was_idle
;
530 * Send '*event' to 'task'.
533 REQUIRE(VALID_TASK(task
));
535 XTRACE("isc_task_send");
538 * We're trying hard to hold locks for as short a time as possible.
539 * We're also trying to hold as few locks as possible. This is why
540 * some processing is deferred until after the lock is released.
543 was_idle
= task_send(task
, eventp
);
548 * We need to add this task to the ready queue.
550 * We've waited until now to do it because making a task
551 * ready requires locking the manager. If we tried to do
552 * this while holding the task lock, we could deadlock.
554 * We've changed the state to ready, so no one else will
555 * be trying to add this task to the ready queue. The
556 * only way to leave the ready state is by executing the
557 * task. It thus doesn't matter if events are added,
558 * removed, or a shutdown is started in the interval
559 * between the time we released the task lock, and the time
560 * we add the task to the ready queue.
566 ISC_TASKFUNC_SCOPE
void
567 isc__task_sendanddetach(isc_task_t
**taskp
, isc_event_t
**eventp
) {
568 isc_boolean_t idle1
, idle2
;
572 * Send '*event' to '*taskp' and then detach '*taskp' from its
576 REQUIRE(taskp
!= NULL
);
577 task
= (isc__task_t
*)*taskp
;
578 REQUIRE(VALID_TASK(task
));
580 XTRACE("isc_task_sendanddetach");
583 idle1
= task_send(task
, eventp
);
584 idle2
= task_detach(task
);
588 * If idle1, then idle2 shouldn't be true as well since we're holding
589 * the task lock, and thus the task cannot switch from ready back to
592 INSIST(!(idle1
&& idle2
));
600 #define PURGE_OK(event) (((event)->ev_attributes & ISC_EVENTATTR_NOPURGE) == 0)
603 dequeue_events(isc__task_t
*task
, void *sender
, isc_eventtype_t first
,
604 isc_eventtype_t last
, void *tag
,
605 isc_eventlist_t
*events
, isc_boolean_t purging
)
607 isc_event_t
*event
, *next_event
;
608 unsigned int count
= 0;
610 REQUIRE(VALID_TASK(task
));
611 REQUIRE(last
>= first
);
613 XTRACE("dequeue_events");
616 * Events matching 'sender', whose type is >= first and <= last, and
617 * whose tag is 'tag' will be dequeued. If 'purging', matching events
618 * which are marked as unpurgable will not be dequeued.
620 * sender == NULL means "any sender", and tag == NULL means "any tag".
625 for (event
= HEAD(task
->events
); event
!= NULL
; event
= next_event
) {
626 next_event
= NEXT(event
, ev_link
);
627 if (event
->ev_type
>= first
&& event
->ev_type
<= last
&&
628 (sender
== NULL
|| event
->ev_sender
== sender
) &&
629 (tag
== NULL
|| event
->ev_tag
== tag
) &&
630 (!purging
|| PURGE_OK(event
))) {
631 DEQUEUE(task
->events
, event
, ev_link
);
632 ENQUEUE(*events
, event
, ev_link
);
642 ISC_TASKFUNC_SCOPE
unsigned int
643 isc__task_purgerange(isc_task_t
*task0
, void *sender
, isc_eventtype_t first
,
644 isc_eventtype_t last
, void *tag
)
646 isc__task_t
*task
= (isc__task_t
*)task0
;
648 isc_eventlist_t events
;
649 isc_event_t
*event
, *next_event
;
652 * Purge events from a task's event queue.
655 XTRACE("isc_task_purgerange");
657 ISC_LIST_INIT(events
);
659 count
= dequeue_events(task
, sender
, first
, last
, tag
, &events
,
662 for (event
= HEAD(events
); event
!= NULL
; event
= next_event
) {
663 next_event
= NEXT(event
, ev_link
);
664 isc_event_free(&event
);
668 * Note that purging never changes the state of the task.
674 ISC_TASKFUNC_SCOPE
unsigned int
675 isc__task_purge(isc_task_t
*task
, void *sender
, isc_eventtype_t type
,
679 * Purge events from a task's event queue.
682 XTRACE("isc_task_purge");
684 return (isc__task_purgerange(task
, sender
, type
, type
, tag
));
687 ISC_TASKFUNC_SCOPE isc_boolean_t
688 isc__task_purgeevent(isc_task_t
*task0
, isc_event_t
*event
) {
689 isc__task_t
*task
= (isc__task_t
*)task0
;
690 isc_event_t
*curr_event
, *next_event
;
693 * Purge 'event' from a task's event queue.
695 * XXXRTH: WARNING: This method may be removed before beta.
698 REQUIRE(VALID_TASK(task
));
701 * If 'event' is on the task's event queue, it will be purged,
702 * unless it is marked as unpurgeable. 'event' does not have to be
703 * on the task's event queue; in fact, it can even be an invalid
704 * pointer. Purging only occurs if the event is actually on the task's
707 * Purging never changes the state of the task.
711 for (curr_event
= HEAD(task
->events
);
713 curr_event
= next_event
) {
714 next_event
= NEXT(curr_event
, ev_link
);
715 if (curr_event
== event
&& PURGE_OK(event
)) {
716 DEQUEUE(task
->events
, curr_event
, ev_link
);
722 if (curr_event
== NULL
)
725 isc_event_free(&curr_event
);
730 ISC_TASKFUNC_SCOPE
unsigned int
731 isc__task_unsendrange(isc_task_t
*task
, void *sender
, isc_eventtype_t first
,
732 isc_eventtype_t last
, void *tag
,
733 isc_eventlist_t
*events
)
736 * Remove events from a task's event queue.
739 XTRACE("isc_task_unsendrange");
741 return (dequeue_events((isc__task_t
*)task
, sender
, first
,
742 last
, tag
, events
, ISC_FALSE
));
745 ISC_TASKFUNC_SCOPE
unsigned int
746 isc__task_unsend(isc_task_t
*task
, void *sender
, isc_eventtype_t type
,
747 void *tag
, isc_eventlist_t
*events
)
750 * Remove events from a task's event queue.
753 XTRACE("isc_task_unsend");
755 return (dequeue_events((isc__task_t
*)task
, sender
, type
,
756 type
, tag
, events
, ISC_FALSE
));
759 ISC_TASKFUNC_SCOPE isc_result_t
760 isc__task_onshutdown(isc_task_t
*task0
, isc_taskaction_t action
,
763 isc__task_t
*task
= (isc__task_t
*)task0
;
764 isc_boolean_t disallowed
= ISC_FALSE
;
765 isc_result_t result
= ISC_R_SUCCESS
;
769 * Send a shutdown event with action 'action' and argument 'arg' when
770 * 'task' is shutdown.
773 REQUIRE(VALID_TASK(task
));
774 REQUIRE(action
!= NULL
);
776 event
= isc_event_allocate(task
->manager
->mctx
,
778 ISC_TASKEVENT_SHUTDOWN
,
783 return (ISC_R_NOMEMORY
);
786 if (TASK_SHUTTINGDOWN(task
)) {
787 disallowed
= ISC_TRUE
;
788 result
= ISC_R_SHUTTINGDOWN
;
790 ENQUEUE(task
->on_shutdown
, event
, ev_link
);
794 isc_mem_put(task
->manager
->mctx
, event
, sizeof(*event
));
799 ISC_TASKFUNC_SCOPE
void
800 isc__task_shutdown(isc_task_t
*task0
) {
801 isc__task_t
*task
= (isc__task_t
*)task0
;
802 isc_boolean_t was_idle
;
808 REQUIRE(VALID_TASK(task
));
811 was_idle
= task_shutdown(task
);
818 ISC_TASKFUNC_SCOPE
void
819 isc__task_destroy(isc_task_t
**taskp
) {
825 REQUIRE(taskp
!= NULL
);
827 isc_task_shutdown(*taskp
);
828 isc_task_detach(taskp
);
831 ISC_TASKFUNC_SCOPE
void
832 isc__task_setname(isc_task_t
*task0
, const char *name
, void *tag
) {
833 isc__task_t
*task
= (isc__task_t
*)task0
;
839 REQUIRE(VALID_TASK(task
));
842 memset(task
->name
, 0, sizeof(task
->name
));
843 strncpy(task
->name
, name
, sizeof(task
->name
) - 1);
848 ISC_TASKFUNC_SCOPE
const char *
849 isc__task_getname(isc_task_t
*task0
) {
850 isc__task_t
*task
= (isc__task_t
*)task0
;
852 REQUIRE(VALID_TASK(task
));
857 ISC_TASKFUNC_SCOPE
void *
858 isc__task_gettag(isc_task_t
*task0
) {
859 isc__task_t
*task
= (isc__task_t
*)task0
;
861 REQUIRE(VALID_TASK(task
));
866 ISC_TASKFUNC_SCOPE
void
867 isc__task_getcurrenttime(isc_task_t
*task0
, isc_stdtime_t
*t
) {
868 isc__task_t
*task
= (isc__task_t
*)task0
;
870 REQUIRE(VALID_TASK(task
));
884 dispatch(isc__taskmgr_t
*manager
) {
886 #ifndef USE_WORKER_THREADS
887 unsigned int total_dispatch_count
= 0;
888 isc__tasklist_t ready_tasks
;
889 #endif /* USE_WORKER_THREADS */
891 REQUIRE(VALID_MANAGER(manager
));
894 * Again we're trying to hold the lock for as short a time as possible
895 * and to do as little locking and unlocking as possible.
897 * In both while loops, the appropriate lock must be held before the
898 * while body starts. Code which acquired the lock at the top of
899 * the loop would be more readable, but would result in a lot of
900 * extra locking. Compare:
907 * while (expression) {
912 * Unlocked part here...
919 * Note how if the loop continues we unlock and then immediately lock.
920 * For N iterations of the loop, this code does 2N+1 locks and 2N+1
921 * unlocks. Also note that the lock is not held when the while
922 * condition is tested, which may or may not be important, depending
928 * while (expression) {
932 * Unlocked part here...
939 * For N iterations of the loop, this code does N+1 locks and N+1
940 * unlocks. The while expression is always protected by the lock.
943 #ifndef USE_WORKER_THREADS
944 ISC_LIST_INIT(ready_tasks
);
946 LOCK(&manager
->lock
);
947 while (!FINISHED(manager
)) {
948 #ifdef USE_WORKER_THREADS
950 * For reasons similar to those given in the comment in
951 * isc_task_send() above, it is safe for us to dequeue
952 * the task while only holding the manager lock, and then
953 * change the task to running state while only holding the
956 while ((EMPTY(manager
->ready_tasks
) ||
957 manager
->exclusive_requested
) &&
960 XTHREADTRACE(isc_msgcat_get(isc_msgcat
,
962 ISC_MSG_WAIT
, "wait"));
963 WAIT(&manager
->work_available
, &manager
->lock
);
964 XTHREADTRACE(isc_msgcat_get(isc_msgcat
,
966 ISC_MSG_AWAKE
, "awake"));
968 #else /* USE_WORKER_THREADS */
969 if (total_dispatch_count
>= DEFAULT_TASKMGR_QUANTUM
||
970 EMPTY(manager
->ready_tasks
))
972 #endif /* USE_WORKER_THREADS */
973 XTHREADTRACE(isc_msgcat_get(isc_msgcat
, ISC_MSGSET_TASK
,
974 ISC_MSG_WORKING
, "working"));
976 task
= HEAD(manager
->ready_tasks
);
978 unsigned int dispatch_count
= 0;
979 isc_boolean_t done
= ISC_FALSE
;
980 isc_boolean_t requeue
= ISC_FALSE
;
981 isc_boolean_t finished
= ISC_FALSE
;
984 INSIST(VALID_TASK(task
));
987 * Note we only unlock the manager lock if we actually
988 * have a task to do. We must reacquire the manager
989 * lock before exiting the 'if (task != NULL)' block.
991 DEQUEUE(manager
->ready_tasks
, task
, ready_link
);
992 manager
->tasks_running
++;
993 UNLOCK(&manager
->lock
);
996 INSIST(task
->state
== task_state_ready
);
997 task
->state
= task_state_running
;
998 XTRACE(isc_msgcat_get(isc_msgcat
, ISC_MSGSET_GENERAL
,
999 ISC_MSG_RUNNING
, "running"));
1000 isc_stdtime_get(&task
->now
);
1002 if (!EMPTY(task
->events
)) {
1003 event
= HEAD(task
->events
);
1004 DEQUEUE(task
->events
, event
, ev_link
);
1007 * Execute the event action.
1009 XTRACE(isc_msgcat_get(isc_msgcat
,
1013 if (event
->ev_action
!= NULL
) {
1014 UNLOCK(&task
->lock
);
1021 #ifndef USE_WORKER_THREADS
1022 total_dispatch_count
++;
1023 #endif /* USE_WORKER_THREADS */
1026 if (task
->references
== 0 &&
1027 EMPTY(task
->events
) &&
1028 !TASK_SHUTTINGDOWN(task
)) {
1029 isc_boolean_t was_idle
;
1032 * There are no references and no
1033 * pending events for this task,
1034 * which means it will not become
1035 * runnable again via an external
1036 * action (such as sending an event
1039 * We initiate shutdown to prevent
1040 * it from becoming a zombie.
1042 * We do this here instead of in
1043 * the "if EMPTY(task->events)" block
1046 * If we post no shutdown events,
1047 * we want the task to finish.
1049 * If we did post shutdown events,
1050 * will still want the task's
1051 * quantum to be applied.
1053 was_idle
= task_shutdown(task
);
1057 if (EMPTY(task
->events
)) {
1059 * Nothing else to do for this task
1062 XTRACE(isc_msgcat_get(isc_msgcat
,
1066 if (task
->references
== 0 &&
1067 TASK_SHUTTINGDOWN(task
)) {
1071 XTRACE(isc_msgcat_get(
1076 finished
= ISC_TRUE
;
1077 task
->state
= task_state_done
;
1079 task
->state
= task_state_idle
;
1081 } else if (dispatch_count
>= task
->quantum
) {
1083 * Our quantum has expired, but
1084 * there is more work to be done.
1085 * We'll requeue it to the ready
1088 * We don't check quantum until
1089 * dispatching at least one event,
1090 * so the minimum quantum is one.
1092 XTRACE(isc_msgcat_get(isc_msgcat
,
1096 task
->state
= task_state_ready
;
1101 UNLOCK(&task
->lock
);
1104 task_finished(task
);
1106 LOCK(&manager
->lock
);
1107 manager
->tasks_running
--;
1108 #ifdef USE_WORKER_THREADS
1109 if (manager
->exclusive_requested
&&
1110 manager
->tasks_running
== 1) {
1111 SIGNAL(&manager
->exclusive_granted
);
1113 #endif /* USE_WORKER_THREADS */
1116 * We know we're awake, so we don't have
1117 * to wakeup any sleeping threads if the
1118 * ready queue is empty before we requeue.
1120 * A possible optimization if the queue is
1121 * empty is to 'goto' the 'if (task != NULL)'
1122 * block, avoiding the ENQUEUE of the task
1123 * and the subsequent immediate DEQUEUE
1124 * (since it is the only executable task).
1125 * We don't do this because then we'd be
1126 * skipping the exit_requested check. The
1127 * cost of ENQUEUE is low anyway, especially
1128 * when you consider that we'd have to do
1129 * an extra EMPTY check to see if we could
1130 * do the optimization. If the ready queue
1131 * were usually nonempty, the 'optimization'
1132 * might even hurt rather than help.
1134 #ifdef USE_WORKER_THREADS
1135 ENQUEUE(manager
->ready_tasks
, task
,
1138 ENQUEUE(ready_tasks
, task
, ready_link
);
1143 #ifndef USE_WORKER_THREADS
1144 ISC_LIST_APPENDLIST(manager
->ready_tasks
, ready_tasks
, ready_link
);
1146 UNLOCK(&manager
->lock
);
1149 #ifdef USE_WORKER_THREADS
1150 static isc_threadresult_t
1155 isc__taskmgr_t
*manager
= uap
;
1157 XTHREADTRACE(isc_msgcat_get(isc_msgcat
, ISC_MSGSET_GENERAL
,
1158 ISC_MSG_STARTING
, "starting"));
1162 XTHREADTRACE(isc_msgcat_get(isc_msgcat
, ISC_MSGSET_GENERAL
,
1163 ISC_MSG_EXITING
, "exiting"));
1165 #ifdef OPENSSL_LEAKS
1166 ERR_remove_state(0);
1169 return ((isc_threadresult_t
)0);
1171 #endif /* USE_WORKER_THREADS */
1174 manager_free(isc__taskmgr_t
*manager
) {
1177 #ifdef USE_WORKER_THREADS
1178 (void)isc_condition_destroy(&manager
->exclusive_granted
);
1179 (void)isc_condition_destroy(&manager
->work_available
);
1180 isc_mem_free(manager
->mctx
, manager
->threads
);
1181 #endif /* USE_WORKER_THREADS */
1182 DESTROYLOCK(&manager
->lock
);
1183 manager
->common
.impmagic
= 0;
1184 manager
->common
.magic
= 0;
1185 mctx
= manager
->mctx
;
1186 isc_mem_put(mctx
, manager
, sizeof(*manager
));
1187 isc_mem_detach(&mctx
);
1189 #ifdef USE_SHARED_MANAGER
1191 #endif /* USE_SHARED_MANAGER */
1194 ISC_TASKFUNC_SCOPE isc_result_t
1195 isc__taskmgr_create(isc_mem_t
*mctx
, unsigned int workers
,
1196 unsigned int default_quantum
, isc_taskmgr_t
**managerp
)
1198 isc_result_t result
;
1199 unsigned int i
, started
= 0;
1200 isc__taskmgr_t
*manager
;
1203 * Create a new task manager.
1206 REQUIRE(workers
> 0);
1207 REQUIRE(managerp
!= NULL
&& *managerp
== NULL
);
1209 #ifndef USE_WORKER_THREADS
1214 #ifdef USE_SHARED_MANAGER
1215 if (taskmgr
!= NULL
) {
1217 *managerp
= (isc_taskmgr_t
*)taskmgr
;
1218 return (ISC_R_SUCCESS
);
1220 #endif /* USE_SHARED_MANAGER */
1222 manager
= isc_mem_get(mctx
, sizeof(*manager
));
1223 if (manager
== NULL
)
1224 return (ISC_R_NOMEMORY
);
1225 manager
->common
.methods
= &taskmgrmethods
;
1226 manager
->common
.impmagic
= TASK_MANAGER_MAGIC
;
1227 manager
->common
.magic
= ISCAPI_TASKMGR_MAGIC
;
1228 manager
->mctx
= NULL
;
1229 result
= isc_mutex_init(&manager
->lock
);
1230 if (result
!= ISC_R_SUCCESS
)
1233 #ifdef USE_WORKER_THREADS
1234 manager
->workers
= 0;
1235 manager
->threads
= isc_mem_allocate(mctx
,
1236 workers
* sizeof(isc_thread_t
));
1237 if (manager
->threads
== NULL
) {
1238 result
= ISC_R_NOMEMORY
;
1241 if (isc_condition_init(&manager
->work_available
) != ISC_R_SUCCESS
) {
1242 UNEXPECTED_ERROR(__FILE__
, __LINE__
,
1243 "isc_condition_init() %s",
1244 isc_msgcat_get(isc_msgcat
, ISC_MSGSET_GENERAL
,
1245 ISC_MSG_FAILED
, "failed"));
1246 result
= ISC_R_UNEXPECTED
;
1247 goto cleanup_threads
;
1249 if (isc_condition_init(&manager
->exclusive_granted
) != ISC_R_SUCCESS
) {
1250 UNEXPECTED_ERROR(__FILE__
, __LINE__
,
1251 "isc_condition_init() %s",
1252 isc_msgcat_get(isc_msgcat
, ISC_MSGSET_GENERAL
,
1253 ISC_MSG_FAILED
, "failed"));
1254 result
= ISC_R_UNEXPECTED
;
1255 goto cleanup_workavailable
;
1257 #endif /* USE_WORKER_THREADS */
1258 if (default_quantum
== 0)
1259 default_quantum
= DEFAULT_DEFAULT_QUANTUM
;
1260 manager
->default_quantum
= default_quantum
;
1261 INIT_LIST(manager
->tasks
);
1262 INIT_LIST(manager
->ready_tasks
);
1263 manager
->tasks_running
= 0;
1264 manager
->exclusive_requested
= ISC_FALSE
;
1265 manager
->exiting
= ISC_FALSE
;
1267 isc_mem_attach(mctx
, &manager
->mctx
);
1269 #ifdef USE_WORKER_THREADS
1270 LOCK(&manager
->lock
);
1274 for (i
= 0; i
< workers
; i
++) {
1275 if (isc_thread_create(run
, manager
,
1276 &manager
->threads
[manager
->workers
]) ==
1282 UNLOCK(&manager
->lock
);
1285 manager_free(manager
);
1286 return (ISC_R_NOTHREADS
);
1288 isc_thread_setconcurrency(workers
);
1289 #endif /* USE_WORKER_THREADS */
1290 #ifdef USE_SHARED_MANAGER
1293 #endif /* USE_SHARED_MANAGER */
1295 *managerp
= (isc_taskmgr_t
*)manager
;
1297 return (ISC_R_SUCCESS
);
1299 #ifdef USE_WORKER_THREADS
1300 cleanup_workavailable
:
1301 (void)isc_condition_destroy(&manager
->work_available
);
1303 isc_mem_free(mctx
, manager
->threads
);
1305 DESTROYLOCK(&manager
->lock
);
1308 isc_mem_put(mctx
, manager
, sizeof(*manager
));
1312 ISC_TASKFUNC_SCOPE
void
1313 isc__taskmgr_destroy(isc_taskmgr_t
**managerp
) {
1314 isc__taskmgr_t
*manager
;
1319 * Destroy '*managerp'.
1322 REQUIRE(managerp
!= NULL
);
1323 manager
= (isc__taskmgr_t
*)*managerp
;
1324 REQUIRE(VALID_MANAGER(manager
));
1326 #ifndef USE_WORKER_THREADS
1328 #endif /* USE_WORKER_THREADS */
1330 #ifdef USE_SHARED_MANAGER
1331 if (manager
->refs
> 1) {
1338 XTHREADTRACE("isc_taskmgr_destroy");
1340 * Only one non-worker thread may ever call this routine.
1341 * If a worker thread wants to initiate shutdown of the
1342 * task manager, it should ask some non-worker thread to call
1343 * isc_taskmgr_destroy(), e.g. by signalling a condition variable
1344 * that the startup thread is sleeping on.
1348 * Unlike elsewhere, we're going to hold this lock a long time.
1349 * We need to do so, because otherwise the list of tasks could
1350 * change while we were traversing it.
1352 * This is also the only function where we will hold both the
1353 * task manager lock and a task lock at the same time.
1356 LOCK(&manager
->lock
);
1359 * Make sure we only get called once.
1361 INSIST(!manager
->exiting
);
1362 manager
->exiting
= ISC_TRUE
;
1365 * Post shutdown event(s) to every task (if they haven't already been
1368 for (task
= HEAD(manager
->tasks
);
1370 task
= NEXT(task
, link
)) {
1372 if (task_shutdown(task
))
1373 ENQUEUE(manager
->ready_tasks
, task
, ready_link
);
1374 UNLOCK(&task
->lock
);
1376 #ifdef USE_WORKER_THREADS
1378 * Wake up any sleeping workers. This ensures we get work done if
1379 * there's work left to do, and if there are already no tasks left
1380 * it will cause the workers to see manager->exiting.
1382 BROADCAST(&manager
->work_available
);
1383 UNLOCK(&manager
->lock
);
1386 * Wait for all the worker threads to exit.
1388 for (i
= 0; i
< manager
->workers
; i
++)
1389 (void)isc_thread_join(manager
->threads
[i
], NULL
);
1390 #else /* USE_WORKER_THREADS */
1392 * Dispatch the shutdown events.
1394 UNLOCK(&manager
->lock
);
1395 while (isc__taskmgr_ready((isc_taskmgr_t
*)manager
))
1396 (void)isc__taskmgr_dispatch((isc_taskmgr_t
*)manager
);
1398 if (!ISC_LIST_EMPTY(manager
->tasks
))
1399 isc_mem_printallactive(stderr
);
1401 INSIST(ISC_LIST_EMPTY(manager
->tasks
));
1402 #endif /* USE_WORKER_THREADS */
1404 manager_free(manager
);
1409 #ifndef USE_WORKER_THREADS
1411 isc__taskmgr_ready(isc_taskmgr_t
*manager0
) {
1412 isc__taskmgr_t
*manager
= (isc__taskmgr_t
*)manager0
;
1414 #ifdef USE_SHARED_MANAGER
1415 if (manager
== NULL
)
1418 if (manager
== NULL
)
1420 return (ISC_TF(!ISC_LIST_EMPTY(manager
->ready_tasks
)));
1424 isc__taskmgr_dispatch(isc_taskmgr_t
*manager0
) {
1425 isc__taskmgr_t
*manager
= (isc__taskmgr_t
*)manager0
;
1427 #ifdef USE_SHARED_MANAGER
1428 if (manager
== NULL
)
1431 if (manager
== NULL
)
1432 return (ISC_R_NOTFOUND
);
1436 return (ISC_R_SUCCESS
);
1439 #endif /* USE_WORKER_THREADS */
1441 ISC_TASKFUNC_SCOPE isc_result_t
1442 isc__task_beginexclusive(isc_task_t
*task0
) {
1443 #ifdef USE_WORKER_THREADS
1444 isc__task_t
*task
= (isc__task_t
*)task0
;
1445 isc__taskmgr_t
*manager
= task
->manager
;
1446 REQUIRE(task
->state
== task_state_running
);
1447 LOCK(&manager
->lock
);
1448 if (manager
->exclusive_requested
) {
1449 UNLOCK(&manager
->lock
);
1450 return (ISC_R_LOCKBUSY
);
1452 manager
->exclusive_requested
= ISC_TRUE
;
1453 while (manager
->tasks_running
> 1) {
1454 WAIT(&manager
->exclusive_granted
, &manager
->lock
);
1456 UNLOCK(&manager
->lock
);
1460 return (ISC_R_SUCCESS
);
1463 ISC_TASKFUNC_SCOPE
void
1464 isc__task_endexclusive(isc_task_t
*task0
) {
1465 #ifdef USE_WORKER_THREADS
1466 isc__task_t
*task
= (isc__task_t
*)task0
;
1467 isc__taskmgr_t
*manager
= task
->manager
;
1469 REQUIRE(task
->state
== task_state_running
);
1470 LOCK(&manager
->lock
);
1471 REQUIRE(manager
->exclusive_requested
);
1472 manager
->exclusive_requested
= ISC_FALSE
;
1473 BROADCAST(&manager
->work_available
);
1474 UNLOCK(&manager
->lock
);
1480 #ifdef USE_SOCKETIMPREGISTER
1482 isc__task_register() {
1483 return (isc_task_register(isc__taskmgr_create
));
1487 #if defined(HAVE_LIBXML2) && defined(BIND9)
1489 isc_taskmgr_renderxml(isc_taskmgr_t
*mgr0
, xmlTextWriterPtr writer
) {
1490 isc__taskmgr_t
*mgr
= (isc__taskmgr_t
*)mgr0
;
1496 * Write out the thread-model, and some details about each depending
1497 * on which type is enabled.
1499 xmlTextWriterStartElement(writer
, ISC_XMLCHAR
"thread-model");
1500 #ifdef ISC_PLATFORM_USETHREADS
1501 xmlTextWriterStartElement(writer
, ISC_XMLCHAR
"type");
1502 xmlTextWriterWriteString(writer
, ISC_XMLCHAR
"threaded");
1503 xmlTextWriterEndElement(writer
); /* type */
1505 xmlTextWriterStartElement(writer
, ISC_XMLCHAR
"worker-threads");
1506 xmlTextWriterWriteFormatString(writer
, "%d", mgr
->workers
);
1507 xmlTextWriterEndElement(writer
); /* worker-threads */
1508 #else /* ISC_PLATFORM_USETHREADS */
1509 xmlTextWriterStartElement(writer
, ISC_XMLCHAR
"type");
1510 xmlTextWriterWriteString(writer
, ISC_XMLCHAR
"non-threaded");
1511 xmlTextWriterEndElement(writer
); /* type */
1513 xmlTextWriterStartElement(writer
, ISC_XMLCHAR
"references");
1514 xmlTextWriterWriteFormatString(writer
, "%d", mgr
->refs
);
1515 xmlTextWriterEndElement(writer
); /* references */
1516 #endif /* ISC_PLATFORM_USETHREADS */
1518 xmlTextWriterStartElement(writer
, ISC_XMLCHAR
"default-quantum");
1519 xmlTextWriterWriteFormatString(writer
, "%d", mgr
->default_quantum
);
1520 xmlTextWriterEndElement(writer
); /* default-quantum */
1522 xmlTextWriterStartElement(writer
, ISC_XMLCHAR
"tasks-running");
1523 xmlTextWriterWriteFormatString(writer
, "%d", mgr
->tasks_running
);
1524 xmlTextWriterEndElement(writer
); /* tasks-running */
1526 xmlTextWriterEndElement(writer
); /* thread-model */
1528 xmlTextWriterStartElement(writer
, ISC_XMLCHAR
"tasks");
1529 task
= ISC_LIST_HEAD(mgr
->tasks
);
1530 while (task
!= NULL
) {
1532 xmlTextWriterStartElement(writer
, ISC_XMLCHAR
"task");
1534 if (task
->name
[0] != 0) {
1535 xmlTextWriterStartElement(writer
, ISC_XMLCHAR
"name");
1536 xmlTextWriterWriteFormatString(writer
, "%s",
1538 xmlTextWriterEndElement(writer
); /* name */
1541 xmlTextWriterStartElement(writer
, ISC_XMLCHAR
"references");
1542 xmlTextWriterWriteFormatString(writer
, "%d", task
->references
);
1543 xmlTextWriterEndElement(writer
); /* references */
1545 xmlTextWriterStartElement(writer
, ISC_XMLCHAR
"id");
1546 xmlTextWriterWriteFormatString(writer
, "%p", task
);
1547 xmlTextWriterEndElement(writer
); /* id */
1549 xmlTextWriterStartElement(writer
, ISC_XMLCHAR
"state");
1550 xmlTextWriterWriteFormatString(writer
, "%s",
1551 statenames
[task
->state
]);
1552 xmlTextWriterEndElement(writer
); /* state */
1554 xmlTextWriterStartElement(writer
, ISC_XMLCHAR
"quantum");
1555 xmlTextWriterWriteFormatString(writer
, "%d", task
->quantum
);
1556 xmlTextWriterEndElement(writer
); /* quantum */
1558 xmlTextWriterEndElement(writer
);
1560 UNLOCK(&task
->lock
);
1561 task
= ISC_LIST_NEXT(task
, link
);
1563 xmlTextWriterEndElement(writer
); /* tasks */
1567 #endif /* HAVE_LIBXML2 && BIND9 */