4 * Copyright (C) 2004-2008 Internet Systems Consortium, Inc. ("ISC")
5 * Copyright (C) 1998-2003 Internet Software Consortium.
7 * Permission to use, copy, modify, and/or distribute this software for any
8 * purpose with or without fee is hereby granted, provided that the above
9 * copyright notice and this permission notice appear in all copies.
11 * THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH
12 * REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
13 * AND FITNESS. IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT,
14 * INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
15 * LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
16 * OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
17 * PERFORMANCE OF THIS SOFTWARE.
20 /* Id: task.c,v 1.107 2008/03/27 23:46:57 tbox Exp */
23 * \author Principal Author: Bob Halley
27 * XXXRTH Need to document the states a task can be in, and the rules
28 * for changing states.
33 #include <isc/condition.h>
34 #include <isc/event.h>
35 #include <isc/magic.h>
38 #include <isc/platform.h>
39 #include <isc/string.h>
41 #include <isc/thread.h>
45 #ifndef ISC_PLATFORM_USETHREADS
47 #endif /* ISC_PLATFORM_USETHREADS */
50 #define XTRACE(m) fprintf(stderr, "task %p thread %lu: %s\n", \
51 task, isc_thread_self(), (m))
52 #define XTTRACE(t, m) fprintf(stderr, "task %p thread %lu: %s\n", \
53 (t), isc_thread_self(), (m))
54 #define XTHREADTRACE(m) fprintf(stderr, "thread %lu: %s\n", \
55 isc_thread_self(), (m))
59 #define XTHREADTRACE(m)
67 task_state_idle
, task_state_ready
, task_state_running
,
72 static const char *statenames
[] = {
73 "idle", "ready", "running", "done",
77 #define TASK_MAGIC ISC_MAGIC('T', 'A', 'S', 'K')
78 #define VALID_TASK(t) ISC_MAGIC_VALID(t, TASK_MAGIC)
83 isc_taskmgr_t
* manager
;
85 /* Locked by task lock. */
87 unsigned int references
;
88 isc_eventlist_t events
;
89 isc_eventlist_t on_shutdown
;
95 /* Locked by task manager lock. */
96 LINK(isc_task_t
) link
;
97 LINK(isc_task_t
) ready_link
;
100 #define TASK_F_SHUTTINGDOWN 0x01
102 #define TASK_SHUTTINGDOWN(t) (((t)->flags & TASK_F_SHUTTINGDOWN) \
105 #define TASK_MANAGER_MAGIC ISC_MAGIC('T', 'S', 'K', 'M')
106 #define VALID_MANAGER(m) ISC_MAGIC_VALID(m, TASK_MANAGER_MAGIC)
113 #ifdef ISC_PLATFORM_USETHREADS
114 unsigned int workers
;
115 isc_thread_t
* threads
;
116 #endif /* ISC_PLATFORM_USETHREADS */
117 /* Locked by task manager lock. */
118 unsigned int default_quantum
;
119 LIST(isc_task_t
) tasks
;
120 isc_tasklist_t ready_tasks
;
121 #ifdef ISC_PLATFORM_USETHREADS
122 isc_condition_t work_available
;
123 isc_condition_t exclusive_granted
;
124 #endif /* ISC_PLATFORM_USETHREADS */
125 unsigned int tasks_running
;
126 isc_boolean_t exclusive_requested
;
127 isc_boolean_t exiting
;
128 #ifndef ISC_PLATFORM_USETHREADS
130 #endif /* ISC_PLATFORM_USETHREADS */
133 #define DEFAULT_TASKMGR_QUANTUM 10
134 #define DEFAULT_DEFAULT_QUANTUM 5
135 #define FINISHED(m) ((m)->exiting && EMPTY((m)->tasks))
137 #ifndef ISC_PLATFORM_USETHREADS
138 static isc_taskmgr_t
*taskmgr
= NULL
;
139 #endif /* ISC_PLATFORM_USETHREADS */
146 task_finished(isc_task_t
*task
) {
147 isc_taskmgr_t
*manager
= task
->manager
;
149 REQUIRE(EMPTY(task
->events
));
150 REQUIRE(EMPTY(task
->on_shutdown
));
151 REQUIRE(task
->references
== 0);
152 REQUIRE(task
->state
== task_state_done
);
154 XTRACE("task_finished");
156 LOCK(&manager
->lock
);
157 UNLINK(manager
->tasks
, task
, link
);
158 #ifdef ISC_PLATFORM_USETHREADS
159 if (FINISHED(manager
)) {
161 * All tasks have completed and the
162 * task manager is exiting. Wake up
163 * any idle worker threads so they
166 BROADCAST(&manager
->work_available
);
168 #endif /* ISC_PLATFORM_USETHREADS */
169 UNLOCK(&manager
->lock
);
171 DESTROYLOCK(&task
->lock
);
173 isc_mem_put(manager
->mctx
, task
, sizeof(*task
));
177 isc_task_create(isc_taskmgr_t
*manager
, unsigned int quantum
,
181 isc_boolean_t exiting
;
184 REQUIRE(VALID_MANAGER(manager
));
185 REQUIRE(taskp
!= NULL
&& *taskp
== NULL
);
187 task
= isc_mem_get(manager
->mctx
, sizeof(*task
));
189 return (ISC_R_NOMEMORY
);
190 XTRACE("isc_task_create");
191 task
->manager
= manager
;
192 result
= isc_mutex_init(&task
->lock
);
193 if (result
!= ISC_R_SUCCESS
) {
194 isc_mem_put(manager
->mctx
, task
, sizeof(*task
));
197 task
->state
= task_state_idle
;
198 task
->references
= 1;
199 INIT_LIST(task
->events
);
200 INIT_LIST(task
->on_shutdown
);
201 task
->quantum
= quantum
;
204 memset(task
->name
, 0, sizeof(task
->name
));
206 INIT_LINK(task
, link
);
207 INIT_LINK(task
, ready_link
);
210 LOCK(&manager
->lock
);
211 if (!manager
->exiting
) {
212 if (task
->quantum
== 0)
213 task
->quantum
= manager
->default_quantum
;
214 APPEND(manager
->tasks
, task
, link
);
217 UNLOCK(&manager
->lock
);
220 DESTROYLOCK(&task
->lock
);
221 isc_mem_put(manager
->mctx
, task
, sizeof(*task
));
222 return (ISC_R_SHUTTINGDOWN
);
225 task
->magic
= TASK_MAGIC
;
228 return (ISC_R_SUCCESS
);
232 isc_task_attach(isc_task_t
*source
, isc_task_t
**targetp
) {
235 * Attach *targetp to source.
238 REQUIRE(VALID_TASK(source
));
239 REQUIRE(targetp
!= NULL
&& *targetp
== NULL
);
241 XTTRACE(source
, "isc_task_attach");
244 source
->references
++;
245 UNLOCK(&source
->lock
);
250 static inline isc_boolean_t
251 task_shutdown(isc_task_t
*task
) {
252 isc_boolean_t was_idle
= ISC_FALSE
;
253 isc_event_t
*event
, *prev
;
256 * Caller must be holding the task's lock.
259 XTRACE("task_shutdown");
261 if (! TASK_SHUTTINGDOWN(task
)) {
262 XTRACE(isc_msgcat_get(isc_msgcat
, ISC_MSGSET_GENERAL
,
263 ISC_MSG_SHUTTINGDOWN
, "shutting down"));
264 task
->flags
|= TASK_F_SHUTTINGDOWN
;
265 if (task
->state
== task_state_idle
) {
266 INSIST(EMPTY(task
->events
));
267 task
->state
= task_state_ready
;
270 INSIST(task
->state
== task_state_ready
||
271 task
->state
== task_state_running
);
273 * Note that we post shutdown events LIFO.
275 for (event
= TAIL(task
->on_shutdown
);
278 prev
= PREV(event
, ev_link
);
279 DEQUEUE(task
->on_shutdown
, event
, ev_link
);
280 ENQUEUE(task
->events
, event
, ev_link
);
288 task_ready(isc_task_t
*task
) {
289 isc_taskmgr_t
*manager
= task
->manager
;
291 REQUIRE(VALID_MANAGER(manager
));
292 REQUIRE(task
->state
== task_state_ready
);
294 XTRACE("task_ready");
296 LOCK(&manager
->lock
);
298 ENQUEUE(manager
->ready_tasks
, task
, ready_link
);
299 #ifdef ISC_PLATFORM_USETHREADS
300 SIGNAL(&manager
->work_available
);
301 #endif /* ISC_PLATFORM_USETHREADS */
303 UNLOCK(&manager
->lock
);
306 static inline isc_boolean_t
307 task_detach(isc_task_t
*task
) {
310 * Caller must be holding the task lock.
313 REQUIRE(task
->references
> 0);
318 if (task
->references
== 0 && task
->state
== task_state_idle
) {
319 INSIST(EMPTY(task
->events
));
321 * There are no references to this task, and no
322 * pending events. We could try to optimize and
323 * either initiate shutdown or clean up the task,
324 * depending on its state, but it's easier to just
325 * make the task ready and allow run() or the event
326 * loop to deal with shutting down and termination.
328 task
->state
= task_state_ready
;
336 isc_task_detach(isc_task_t
**taskp
) {
338 isc_boolean_t was_idle
;
341 * Detach *taskp from its task.
344 REQUIRE(taskp
!= NULL
);
346 REQUIRE(VALID_TASK(task
));
348 XTRACE("isc_task_detach");
351 was_idle
= task_detach(task
);
360 static inline isc_boolean_t
361 task_send(isc_task_t
*task
, isc_event_t
**eventp
) {
362 isc_boolean_t was_idle
= ISC_FALSE
;
366 * Caller must be holding the task lock.
369 REQUIRE(eventp
!= NULL
);
371 REQUIRE(event
!= NULL
);
372 REQUIRE(event
->ev_type
> 0);
373 REQUIRE(task
->state
!= task_state_done
);
377 if (task
->state
== task_state_idle
) {
379 INSIST(EMPTY(task
->events
));
380 task
->state
= task_state_ready
;
382 INSIST(task
->state
== task_state_ready
||
383 task
->state
== task_state_running
);
384 ENQUEUE(task
->events
, event
, ev_link
);
391 isc_task_send(isc_task_t
*task
, isc_event_t
**eventp
) {
392 isc_boolean_t was_idle
;
395 * Send '*event' to 'task'.
398 REQUIRE(VALID_TASK(task
));
400 XTRACE("isc_task_send");
403 * We're trying hard to hold locks for as short a time as possible.
404 * We're also trying to hold as few locks as possible. This is why
405 * some processing is deferred until after the lock is released.
408 was_idle
= task_send(task
, eventp
);
413 * We need to add this task to the ready queue.
415 * We've waited until now to do it because making a task
416 * ready requires locking the manager. If we tried to do
417 * this while holding the task lock, we could deadlock.
419 * We've changed the state to ready, so no one else will
420 * be trying to add this task to the ready queue. The
421 * only way to leave the ready state is by executing the
422 * task. It thus doesn't matter if events are added,
423 * removed, or a shutdown is started in the interval
424 * between the time we released the task lock, and the time
425 * we add the task to the ready queue.
432 isc_task_sendanddetach(isc_task_t
**taskp
, isc_event_t
**eventp
) {
433 isc_boolean_t idle1
, idle2
;
437 * Send '*event' to '*taskp' and then detach '*taskp' from its
441 REQUIRE(taskp
!= NULL
);
443 REQUIRE(VALID_TASK(task
));
445 XTRACE("isc_task_sendanddetach");
448 idle1
= task_send(task
, eventp
);
449 idle2
= task_detach(task
);
453 * If idle1, then idle2 shouldn't be true as well since we're holding
454 * the task lock, and thus the task cannot switch from ready back to
457 INSIST(!(idle1
&& idle2
));
465 #define PURGE_OK(event) (((event)->ev_attributes & ISC_EVENTATTR_NOPURGE) == 0)
468 dequeue_events(isc_task_t
*task
, void *sender
, isc_eventtype_t first
,
469 isc_eventtype_t last
, void *tag
,
470 isc_eventlist_t
*events
, isc_boolean_t purging
)
472 isc_event_t
*event
, *next_event
;
473 unsigned int count
= 0;
475 REQUIRE(VALID_TASK(task
));
476 REQUIRE(last
>= first
);
478 XTRACE("dequeue_events");
481 * Events matching 'sender', whose type is >= first and <= last, and
482 * whose tag is 'tag' will be dequeued. If 'purging', matching events
483 * which are marked as unpurgable will not be dequeued.
485 * sender == NULL means "any sender", and tag == NULL means "any tag".
490 for (event
= HEAD(task
->events
); event
!= NULL
; event
= next_event
) {
491 next_event
= NEXT(event
, ev_link
);
492 if (event
->ev_type
>= first
&& event
->ev_type
<= last
&&
493 (sender
== NULL
|| event
->ev_sender
== sender
) &&
494 (tag
== NULL
|| event
->ev_tag
== tag
) &&
495 (!purging
|| PURGE_OK(event
))) {
496 DEQUEUE(task
->events
, event
, ev_link
);
497 ENQUEUE(*events
, event
, ev_link
);
508 isc_task_purgerange(isc_task_t
*task
, void *sender
, isc_eventtype_t first
,
509 isc_eventtype_t last
, void *tag
)
512 isc_eventlist_t events
;
513 isc_event_t
*event
, *next_event
;
516 * Purge events from a task's event queue.
519 XTRACE("isc_task_purgerange");
521 ISC_LIST_INIT(events
);
523 count
= dequeue_events(task
, sender
, first
, last
, tag
, &events
,
526 for (event
= HEAD(events
); event
!= NULL
; event
= next_event
) {
527 next_event
= NEXT(event
, ev_link
);
528 isc_event_free(&event
);
532 * Note that purging never changes the state of the task.
539 isc_task_purge(isc_task_t
*task
, void *sender
, isc_eventtype_t type
,
543 * Purge events from a task's event queue.
546 XTRACE("isc_task_purge");
548 return (isc_task_purgerange(task
, sender
, type
, type
, tag
));
552 isc_task_purgeevent(isc_task_t
*task
, isc_event_t
*event
) {
553 isc_event_t
*curr_event
, *next_event
;
556 * Purge 'event' from a task's event queue.
558 * XXXRTH: WARNING: This method may be removed before beta.
561 REQUIRE(VALID_TASK(task
));
564 * If 'event' is on the task's event queue, it will be purged,
565 * unless it is marked as unpurgeable. 'event' does not have to be
566 * on the task's event queue; in fact, it can even be an invalid
567 * pointer. Purging only occurs if the event is actually on the task's
570 * Purging never changes the state of the task.
574 for (curr_event
= HEAD(task
->events
);
576 curr_event
= next_event
) {
577 next_event
= NEXT(curr_event
, ev_link
);
578 if (curr_event
== event
&& PURGE_OK(event
)) {
579 DEQUEUE(task
->events
, curr_event
, ev_link
);
585 if (curr_event
== NULL
)
588 isc_event_free(&curr_event
);
594 isc_task_unsendrange(isc_task_t
*task
, void *sender
, isc_eventtype_t first
,
595 isc_eventtype_t last
, void *tag
,
596 isc_eventlist_t
*events
)
599 * Remove events from a task's event queue.
602 XTRACE("isc_task_unsendrange");
604 return (dequeue_events(task
, sender
, first
, last
, tag
, events
,
609 isc_task_unsend(isc_task_t
*task
, void *sender
, isc_eventtype_t type
,
610 void *tag
, isc_eventlist_t
*events
)
613 * Remove events from a task's event queue.
616 XTRACE("isc_task_unsend");
618 return (dequeue_events(task
, sender
, type
, type
, tag
, events
,
623 isc_task_onshutdown(isc_task_t
*task
, isc_taskaction_t action
, const void *arg
)
625 isc_boolean_t disallowed
= ISC_FALSE
;
626 isc_result_t result
= ISC_R_SUCCESS
;
630 * Send a shutdown event with action 'action' and argument 'arg' when
631 * 'task' is shutdown.
634 REQUIRE(VALID_TASK(task
));
635 REQUIRE(action
!= NULL
);
637 event
= isc_event_allocate(task
->manager
->mctx
,
639 ISC_TASKEVENT_SHUTDOWN
,
644 return (ISC_R_NOMEMORY
);
647 if (TASK_SHUTTINGDOWN(task
)) {
648 disallowed
= ISC_TRUE
;
649 result
= ISC_R_SHUTTINGDOWN
;
651 ENQUEUE(task
->on_shutdown
, event
, ev_link
);
655 isc_mem_put(task
->manager
->mctx
, event
, sizeof(*event
));
661 isc_task_shutdown(isc_task_t
*task
) {
662 isc_boolean_t was_idle
;
668 REQUIRE(VALID_TASK(task
));
671 was_idle
= task_shutdown(task
);
679 isc_task_destroy(isc_task_t
**taskp
) {
685 REQUIRE(taskp
!= NULL
);
687 isc_task_shutdown(*taskp
);
688 isc_task_detach(taskp
);
692 isc_task_setname(isc_task_t
*task
, const char *name
, void *tag
) {
698 REQUIRE(VALID_TASK(task
));
701 memset(task
->name
, 0, sizeof(task
->name
));
702 strncpy(task
->name
, name
, sizeof(task
->name
) - 1);
708 isc_task_getname(isc_task_t
*task
) {
713 isc_task_gettag(isc_task_t
*task
) {
718 isc_task_getcurrenttime(isc_task_t
*task
, isc_stdtime_t
*t
) {
719 REQUIRE(VALID_TASK(task
));
733 dispatch(isc_taskmgr_t
*manager
) {
735 #ifndef ISC_PLATFORM_USETHREADS
736 unsigned int total_dispatch_count
= 0;
737 isc_tasklist_t ready_tasks
;
738 #endif /* ISC_PLATFORM_USETHREADS */
740 REQUIRE(VALID_MANAGER(manager
));
743 * Again we're trying to hold the lock for as short a time as possible
744 * and to do as little locking and unlocking as possible.
746 * In both while loops, the appropriate lock must be held before the
747 * while body starts. Code which acquired the lock at the top of
748 * the loop would be more readable, but would result in a lot of
749 * extra locking. Compare:
756 * while (expression) {
761 * Unlocked part here...
768 * Note how if the loop continues we unlock and then immediately lock.
769 * For N iterations of the loop, this code does 2N+1 locks and 2N+1
770 * unlocks. Also note that the lock is not held when the while
771 * condition is tested, which may or may not be important, depending
777 * while (expression) {
781 * Unlocked part here...
788 * For N iterations of the loop, this code does N+1 locks and N+1
789 * unlocks. The while expression is always protected by the lock.
792 #ifndef ISC_PLATFORM_USETHREADS
793 ISC_LIST_INIT(ready_tasks
);
795 LOCK(&manager
->lock
);
796 while (!FINISHED(manager
)) {
797 #ifdef ISC_PLATFORM_USETHREADS
799 * For reasons similar to those given in the comment in
800 * isc_task_send() above, it is safe for us to dequeue
801 * the task while only holding the manager lock, and then
802 * change the task to running state while only holding the
805 while ((EMPTY(manager
->ready_tasks
) ||
806 manager
->exclusive_requested
) &&
809 XTHREADTRACE(isc_msgcat_get(isc_msgcat
,
811 ISC_MSG_WAIT
, "wait"));
812 WAIT(&manager
->work_available
, &manager
->lock
);
813 XTHREADTRACE(isc_msgcat_get(isc_msgcat
,
815 ISC_MSG_AWAKE
, "awake"));
817 #else /* ISC_PLATFORM_USETHREADS */
818 if (total_dispatch_count
>= DEFAULT_TASKMGR_QUANTUM
||
819 EMPTY(manager
->ready_tasks
))
821 #endif /* ISC_PLATFORM_USETHREADS */
822 XTHREADTRACE(isc_msgcat_get(isc_msgcat
, ISC_MSGSET_TASK
,
823 ISC_MSG_WORKING
, "working"));
825 task
= HEAD(manager
->ready_tasks
);
827 unsigned int dispatch_count
= 0;
828 isc_boolean_t done
= ISC_FALSE
;
829 isc_boolean_t requeue
= ISC_FALSE
;
830 isc_boolean_t finished
= ISC_FALSE
;
833 INSIST(VALID_TASK(task
));
836 * Note we only unlock the manager lock if we actually
837 * have a task to do. We must reacquire the manager
838 * lock before exiting the 'if (task != NULL)' block.
840 DEQUEUE(manager
->ready_tasks
, task
, ready_link
);
841 manager
->tasks_running
++;
842 UNLOCK(&manager
->lock
);
845 INSIST(task
->state
== task_state_ready
);
846 task
->state
= task_state_running
;
847 XTRACE(isc_msgcat_get(isc_msgcat
, ISC_MSGSET_GENERAL
,
848 ISC_MSG_RUNNING
, "running"));
849 isc_stdtime_get(&task
->now
);
851 if (!EMPTY(task
->events
)) {
852 event
= HEAD(task
->events
);
853 DEQUEUE(task
->events
, event
, ev_link
);
856 * Execute the event action.
858 XTRACE(isc_msgcat_get(isc_msgcat
,
862 if (event
->ev_action
!= NULL
) {
864 (event
->ev_action
)(task
,event
);
868 #ifndef ISC_PLATFORM_USETHREADS
869 total_dispatch_count
++;
870 #endif /* ISC_PLATFORM_USETHREADS */
873 if (task
->references
== 0 &&
874 EMPTY(task
->events
) &&
875 !TASK_SHUTTINGDOWN(task
)) {
876 isc_boolean_t was_idle
;
879 * There are no references and no
880 * pending events for this task,
881 * which means it will not become
882 * runnable again via an external
883 * action (such as sending an event
886 * We initiate shutdown to prevent
887 * it from becoming a zombie.
889 * We do this here instead of in
890 * the "if EMPTY(task->events)" block
893 * If we post no shutdown events,
894 * we want the task to finish.
896 * If we did post shutdown events,
897 * will still want the task's
898 * quantum to be applied.
900 was_idle
= task_shutdown(task
);
904 if (EMPTY(task
->events
)) {
906 * Nothing else to do for this task
909 XTRACE(isc_msgcat_get(isc_msgcat
,
913 if (task
->references
== 0 &&
914 TASK_SHUTTINGDOWN(task
)) {
918 XTRACE(isc_msgcat_get(
924 task
->state
= task_state_done
;
926 task
->state
= task_state_idle
;
928 } else if (dispatch_count
>= task
->quantum
) {
930 * Our quantum has expired, but
931 * there is more work to be done.
932 * We'll requeue it to the ready
935 * We don't check quantum until
936 * dispatching at least one event,
937 * so the minimum quantum is one.
939 XTRACE(isc_msgcat_get(isc_msgcat
,
943 task
->state
= task_state_ready
;
953 LOCK(&manager
->lock
);
954 manager
->tasks_running
--;
955 #ifdef ISC_PLATFORM_USETHREADS
956 if (manager
->exclusive_requested
&&
957 manager
->tasks_running
== 1) {
958 SIGNAL(&manager
->exclusive_granted
);
960 #endif /* ISC_PLATFORM_USETHREADS */
963 * We know we're awake, so we don't have
964 * to wakeup any sleeping threads if the
965 * ready queue is empty before we requeue.
967 * A possible optimization if the queue is
968 * empty is to 'goto' the 'if (task != NULL)'
969 * block, avoiding the ENQUEUE of the task
970 * and the subsequent immediate DEQUEUE
971 * (since it is the only executable task).
972 * We don't do this because then we'd be
973 * skipping the exit_requested check. The
974 * cost of ENQUEUE is low anyway, especially
975 * when you consider that we'd have to do
976 * an extra EMPTY check to see if we could
977 * do the optimization. If the ready queue
978 * were usually nonempty, the 'optimization'
979 * might even hurt rather than help.
981 #ifdef ISC_PLATFORM_USETHREADS
982 ENQUEUE(manager
->ready_tasks
, task
,
985 ENQUEUE(ready_tasks
, task
, ready_link
);
990 #ifndef ISC_PLATFORM_USETHREADS
991 ISC_LIST_APPENDLIST(manager
->ready_tasks
, ready_tasks
, ready_link
);
993 UNLOCK(&manager
->lock
);
996 #ifdef ISC_PLATFORM_USETHREADS
997 static isc_threadresult_t
1002 isc_taskmgr_t
*manager
= uap
;
1004 XTHREADTRACE(isc_msgcat_get(isc_msgcat
, ISC_MSGSET_GENERAL
,
1005 ISC_MSG_STARTING
, "starting"));
1009 XTHREADTRACE(isc_msgcat_get(isc_msgcat
, ISC_MSGSET_GENERAL
,
1010 ISC_MSG_EXITING
, "exiting"));
1012 return ((isc_threadresult_t
)0);
1014 #endif /* ISC_PLATFORM_USETHREADS */
1017 manager_free(isc_taskmgr_t
*manager
) {
1020 #ifdef ISC_PLATFORM_USETHREADS
1021 (void)isc_condition_destroy(&manager
->exclusive_granted
);
1022 (void)isc_condition_destroy(&manager
->work_available
);
1023 isc_mem_free(manager
->mctx
, manager
->threads
);
1024 #endif /* ISC_PLATFORM_USETHREADS */
1025 DESTROYLOCK(&manager
->lock
);
1027 mctx
= manager
->mctx
;
1028 isc_mem_put(mctx
, manager
, sizeof(*manager
));
1029 isc_mem_detach(&mctx
);
1033 isc_taskmgr_create(isc_mem_t
*mctx
, unsigned int workers
,
1034 unsigned int default_quantum
, isc_taskmgr_t
**managerp
)
1036 isc_result_t result
;
1037 unsigned int i
, started
= 0;
1038 isc_taskmgr_t
*manager
;
1041 * Create a new task manager.
1044 REQUIRE(workers
> 0);
1045 REQUIRE(managerp
!= NULL
&& *managerp
== NULL
);
1047 #ifndef ISC_PLATFORM_USETHREADS
1052 if (taskmgr
!= NULL
) {
1054 *managerp
= taskmgr
;
1055 return (ISC_R_SUCCESS
);
1057 #endif /* ISC_PLATFORM_USETHREADS */
1059 manager
= isc_mem_get(mctx
, sizeof(*manager
));
1060 if (manager
== NULL
)
1061 return (ISC_R_NOMEMORY
);
1062 manager
->magic
= TASK_MANAGER_MAGIC
;
1063 manager
->mctx
= NULL
;
1064 result
= isc_mutex_init(&manager
->lock
);
1065 if (result
!= ISC_R_SUCCESS
)
1068 #ifdef ISC_PLATFORM_USETHREADS
1069 manager
->workers
= 0;
1070 manager
->threads
= isc_mem_allocate(mctx
,
1071 workers
* sizeof(isc_thread_t
));
1072 if (manager
->threads
== NULL
) {
1073 result
= ISC_R_NOMEMORY
;
1076 if (isc_condition_init(&manager
->work_available
) != ISC_R_SUCCESS
) {
1077 UNEXPECTED_ERROR(__FILE__
, __LINE__
,
1078 "isc_condition_init() %s",
1079 isc_msgcat_get(isc_msgcat
, ISC_MSGSET_GENERAL
,
1080 ISC_MSG_FAILED
, "failed"));
1081 result
= ISC_R_UNEXPECTED
;
1082 goto cleanup_threads
;
1084 if (isc_condition_init(&manager
->exclusive_granted
) != ISC_R_SUCCESS
) {
1085 UNEXPECTED_ERROR(__FILE__
, __LINE__
,
1086 "isc_condition_init() %s",
1087 isc_msgcat_get(isc_msgcat
, ISC_MSGSET_GENERAL
,
1088 ISC_MSG_FAILED
, "failed"));
1089 result
= ISC_R_UNEXPECTED
;
1090 goto cleanup_workavailable
;
1092 #endif /* ISC_PLATFORM_USETHREADS */
1093 if (default_quantum
== 0)
1094 default_quantum
= DEFAULT_DEFAULT_QUANTUM
;
1095 manager
->default_quantum
= default_quantum
;
1096 INIT_LIST(manager
->tasks
);
1097 INIT_LIST(manager
->ready_tasks
);
1098 manager
->tasks_running
= 0;
1099 manager
->exclusive_requested
= ISC_FALSE
;
1100 manager
->exiting
= ISC_FALSE
;
1102 isc_mem_attach(mctx
, &manager
->mctx
);
1104 #ifdef ISC_PLATFORM_USETHREADS
1105 LOCK(&manager
->lock
);
1109 for (i
= 0; i
< workers
; i
++) {
1110 if (isc_thread_create(run
, manager
,
1111 &manager
->threads
[manager
->workers
]) ==
1117 UNLOCK(&manager
->lock
);
1120 manager_free(manager
);
1121 return (ISC_R_NOTHREADS
);
1123 isc_thread_setconcurrency(workers
);
1124 #else /* ISC_PLATFORM_USETHREADS */
1127 #endif /* ISC_PLATFORM_USETHREADS */
1129 *managerp
= manager
;
1131 return (ISC_R_SUCCESS
);
1133 #ifdef ISC_PLATFORM_USETHREADS
1134 cleanup_workavailable
:
1135 (void)isc_condition_destroy(&manager
->work_available
);
1137 isc_mem_free(mctx
, manager
->threads
);
1139 DESTROYLOCK(&manager
->lock
);
1142 isc_mem_put(mctx
, manager
, sizeof(*manager
));
1147 isc_taskmgr_destroy(isc_taskmgr_t
**managerp
) {
1148 isc_taskmgr_t
*manager
;
1153 * Destroy '*managerp'.
1156 REQUIRE(managerp
!= NULL
);
1157 manager
= *managerp
;
1158 REQUIRE(VALID_MANAGER(manager
));
1160 #ifndef ISC_PLATFORM_USETHREADS
1163 if (manager
->refs
> 1) {
1168 #endif /* ISC_PLATFORM_USETHREADS */
1170 XTHREADTRACE("isc_taskmgr_destroy");
1172 * Only one non-worker thread may ever call this routine.
1173 * If a worker thread wants to initiate shutdown of the
1174 * task manager, it should ask some non-worker thread to call
1175 * isc_taskmgr_destroy(), e.g. by signalling a condition variable
1176 * that the startup thread is sleeping on.
1180 * Unlike elsewhere, we're going to hold this lock a long time.
1181 * We need to do so, because otherwise the list of tasks could
1182 * change while we were traversing it.
1184 * This is also the only function where we will hold both the
1185 * task manager lock and a task lock at the same time.
1188 LOCK(&manager
->lock
);
1191 * Make sure we only get called once.
1193 INSIST(!manager
->exiting
);
1194 manager
->exiting
= ISC_TRUE
;
1197 * Post shutdown event(s) to every task (if they haven't already been
1200 for (task
= HEAD(manager
->tasks
);
1202 task
= NEXT(task
, link
)) {
1204 if (task_shutdown(task
))
1205 ENQUEUE(manager
->ready_tasks
, task
, ready_link
);
1206 UNLOCK(&task
->lock
);
1208 #ifdef ISC_PLATFORM_USETHREADS
1210 * Wake up any sleeping workers. This ensures we get work done if
1211 * there's work left to do, and if there are already no tasks left
1212 * it will cause the workers to see manager->exiting.
1214 BROADCAST(&manager
->work_available
);
1215 UNLOCK(&manager
->lock
);
1218 * Wait for all the worker threads to exit.
1220 for (i
= 0; i
< manager
->workers
; i
++)
1221 (void)isc_thread_join(manager
->threads
[i
], NULL
);
1222 #else /* ISC_PLATFORM_USETHREADS */
1224 * Dispatch the shutdown events.
1226 UNLOCK(&manager
->lock
);
1227 while (isc__taskmgr_ready())
1228 (void)isc__taskmgr_dispatch();
1229 if (!ISC_LIST_EMPTY(manager
->tasks
))
1230 isc_mem_printallactive(stderr
);
1231 INSIST(ISC_LIST_EMPTY(manager
->tasks
));
1232 #endif /* ISC_PLATFORM_USETHREADS */
1234 manager_free(manager
);
1239 #ifndef ISC_PLATFORM_USETHREADS
1241 isc__taskmgr_ready(void) {
1242 if (taskmgr
== NULL
)
1244 return (ISC_TF(!ISC_LIST_EMPTY(taskmgr
->ready_tasks
)));
1248 isc__taskmgr_dispatch(void) {
1249 isc_taskmgr_t
*manager
= taskmgr
;
1251 if (taskmgr
== NULL
)
1252 return (ISC_R_NOTFOUND
);
1256 return (ISC_R_SUCCESS
);
1259 #endif /* ISC_PLATFORM_USETHREADS */
1262 isc_task_beginexclusive(isc_task_t
*task
) {
1263 #ifdef ISC_PLATFORM_USETHREADS
1264 isc_taskmgr_t
*manager
= task
->manager
;
1265 REQUIRE(task
->state
== task_state_running
);
1266 LOCK(&manager
->lock
);
1267 if (manager
->exclusive_requested
) {
1268 UNLOCK(&manager
->lock
);
1269 return (ISC_R_LOCKBUSY
);
1271 manager
->exclusive_requested
= ISC_TRUE
;
1272 while (manager
->tasks_running
> 1) {
1273 WAIT(&manager
->exclusive_granted
, &manager
->lock
);
1275 UNLOCK(&manager
->lock
);
1279 return (ISC_R_SUCCESS
);
1283 isc_task_endexclusive(isc_task_t
*task
) {
1284 #ifdef ISC_PLATFORM_USETHREADS
1285 isc_taskmgr_t
*manager
= task
->manager
;
1286 REQUIRE(task
->state
== task_state_running
);
1287 LOCK(&manager
->lock
);
1288 REQUIRE(manager
->exclusive_requested
);
1289 manager
->exclusive_requested
= ISC_FALSE
;
1290 BROADCAST(&manager
->work_available
);
1291 UNLOCK(&manager
->lock
);
1300 isc_taskmgr_renderxml(isc_taskmgr_t
*mgr
, xmlTextWriterPtr writer
)
1307 * Write out the thread-model, and some details about each depending
1308 * on which type is enabled.
1310 xmlTextWriterStartElement(writer
, ISC_XMLCHAR
"thread-model");
1311 #ifdef ISC_PLATFORM_USETHREADS
1312 xmlTextWriterStartElement(writer
, ISC_XMLCHAR
"type");
1313 xmlTextWriterWriteString(writer
, ISC_XMLCHAR
"threaded");
1314 xmlTextWriterEndElement(writer
); /* type */
1316 xmlTextWriterStartElement(writer
, ISC_XMLCHAR
"worker-threads");
1317 xmlTextWriterWriteFormatString(writer
, "%d", mgr
->workers
);
1318 xmlTextWriterEndElement(writer
); /* worker-threads */
1319 #else /* ISC_PLATFORM_USETHREADS */
1320 xmlTextWriterStartElement(writer
, ISC_XMLCHAR
"type");
1321 xmlTextWriterWriteString(writer
, ISC_XMLCHAR
"non-threaded");
1322 xmlTextWriterEndElement(writer
); /* type */
1324 xmlTextWriterStartElement(writer
, ISC_XMLCHAR
"references");
1325 xmlTextWriterWriteFormatString(writer
, "%d", mgr
->refs
);
1326 xmlTextWriterEndElement(writer
); /* references */
1327 #endif /* ISC_PLATFORM_USETHREADS */
1329 xmlTextWriterStartElement(writer
, ISC_XMLCHAR
"default-quantum");
1330 xmlTextWriterWriteFormatString(writer
, "%d", mgr
->default_quantum
);
1331 xmlTextWriterEndElement(writer
); /* default-quantum */
1333 xmlTextWriterStartElement(writer
, ISC_XMLCHAR
"tasks-running");
1334 xmlTextWriterWriteFormatString(writer
, "%d", mgr
->tasks_running
);
1335 xmlTextWriterEndElement(writer
); /* tasks-running */
1337 xmlTextWriterEndElement(writer
); /* thread-model */
1339 xmlTextWriterStartElement(writer
, ISC_XMLCHAR
"tasks");
1340 task
= ISC_LIST_HEAD(mgr
->tasks
);
1341 while (task
!= NULL
) {
1343 xmlTextWriterStartElement(writer
, ISC_XMLCHAR
"task");
1345 if (task
->name
[0] != 0) {
1346 xmlTextWriterStartElement(writer
, ISC_XMLCHAR
"name");
1347 xmlTextWriterWriteFormatString(writer
, "%s",
1349 xmlTextWriterEndElement(writer
); /* name */
1352 xmlTextWriterStartElement(writer
, ISC_XMLCHAR
"references");
1353 xmlTextWriterWriteFormatString(writer
, "%d", task
->references
);
1354 xmlTextWriterEndElement(writer
); /* references */
1356 xmlTextWriterStartElement(writer
, ISC_XMLCHAR
"id");
1357 xmlTextWriterWriteFormatString(writer
, "%p", task
);
1358 xmlTextWriterEndElement(writer
); /* id */
1360 xmlTextWriterStartElement(writer
, ISC_XMLCHAR
"state");
1361 xmlTextWriterWriteFormatString(writer
, "%s",
1362 statenames
[task
->state
]);
1363 xmlTextWriterEndElement(writer
); /* state */
1365 xmlTextWriterStartElement(writer
, ISC_XMLCHAR
"quantum");
1366 xmlTextWriterWriteFormatString(writer
, "%d", task
->quantum
);
1367 xmlTextWriterEndElement(writer
); /* quantum */
1369 xmlTextWriterEndElement(writer
);
1371 UNLOCK(&task
->lock
);
1372 task
= ISC_LIST_NEXT(task
, link
);
1374 xmlTextWriterEndElement(writer
); /* tasks */
1378 #endif /* HAVE_LIBXML2 */