1 /* $NetBSD: task.c,v 1.11 2014/12/10 04:37:59 christos Exp $ */
4 * Copyright (C) 2004-2014 Internet Systems Consortium, Inc. ("ISC")
5 * Copyright (C) 1998-2003 Internet Software Consortium.
7 * Permission to use, copy, modify, and/or distribute this software for any
8 * purpose with or without fee is hereby granted, provided that the above
9 * copyright notice and this permission notice appear in all copies.
11 * THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH
12 * REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
13 * AND FITNESS. IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT,
14 * INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
15 * LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
16 * OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
17 * PERFORMANCE OF THIS SOFTWARE.
23 * \author Principal Author: Bob Halley
27 * XXXRTH Need to document the states a task can be in, and the rules
28 * for changing states.
34 #include <isc/condition.h>
35 #include <isc/event.h>
37 #include <isc/magic.h>
41 #include <isc/platform.h>
42 #include <isc/string.h>
44 #include <isc/thread.h>
49 #include <openssl/err.h>
53 * For BIND9 internal applications:
54 * when built with threads we use multiple worker threads shared by the whole
56 * when built without threads we share a single global task manager and use
57 * an integrated event loop for socket, timer, and other generic task events.
58 * For generic library:
59 * we don't use either of them: an application can have multiple task managers
60 * whether or not it's threaded, and if the application is threaded each thread
61 * is expected to have a separate manager; no "worker threads" are shared by
62 * the application threads.
64 #ifdef ISC_PLATFORM_USETHREADS
65 #define USE_WORKER_THREADS
67 #define USE_SHARED_MANAGER
68 #endif /* ISC_PLATFORM_USETHREADS */
73 #define XTRACE(m) fprintf(stderr, "task %p thread %lu: %s\n", \
74 task, isc_thread_self(), (m))
75 #define XTTRACE(t, m) fprintf(stderr, "task %p thread %lu: %s\n", \
76 (t), isc_thread_self(), (m))
77 #define XTHREADTRACE(m) fprintf(stderr, "thread %lu: %s\n", \
78 isc_thread_self(), (m))
82 #define XTHREADTRACE(m)
90 task_state_idle
, task_state_ready
, task_state_running
,
94 #if defined(HAVE_LIBXML2) || defined(HAVE_JSON)
95 static const char *statenames
[] = {
96 "idle", "ready", "running", "done",
100 #define TASK_MAGIC ISC_MAGIC('T', 'A', 'S', 'K')
101 #define VALID_TASK(t) ISC_MAGIC_VALID(t, TASK_MAGIC)
103 typedef struct isc__task isc__task_t
;
104 typedef struct isc__taskmgr isc__taskmgr_t
;
109 isc__taskmgr_t
* manager
;
111 /* Locked by task lock. */
113 unsigned int references
;
114 isc_eventlist_t events
;
115 isc_eventlist_t on_shutdown
;
116 unsigned int nevents
;
117 unsigned int quantum
;
122 /* Locked by task manager lock. */
123 LINK(isc__task_t
) link
;
124 LINK(isc__task_t
) ready_link
;
125 LINK(isc__task_t
) ready_priority_link
;
128 #define TASK_F_SHUTTINGDOWN 0x01
129 #define TASK_F_PRIVILEGED 0x02
131 #define TASK_SHUTTINGDOWN(t) (((t)->flags & TASK_F_SHUTTINGDOWN) \
134 #define TASK_MANAGER_MAGIC ISC_MAGIC('T', 'S', 'K', 'M')
135 #define VALID_MANAGER(m) ISC_MAGIC_VALID(m, TASK_MANAGER_MAGIC)
137 typedef ISC_LIST(isc__task_t
) isc__tasklist_t
;
139 struct isc__taskmgr
{
141 isc_taskmgr_t common
;
144 #ifdef ISC_PLATFORM_USETHREADS
145 unsigned int workers
;
146 isc_thread_t
* threads
;
147 #endif /* ISC_PLATFORM_USETHREADS */
148 /* Locked by task manager lock. */
149 unsigned int default_quantum
;
150 LIST(isc__task_t
) tasks
;
151 isc__tasklist_t ready_tasks
;
152 isc__tasklist_t ready_priority_tasks
;
153 isc_taskmgrmode_t mode
;
154 #ifdef ISC_PLATFORM_USETHREADS
155 isc_condition_t work_available
;
156 isc_condition_t exclusive_granted
;
157 isc_condition_t paused
;
158 #endif /* ISC_PLATFORM_USETHREADS */
159 unsigned int tasks_running
;
160 unsigned int tasks_ready
;
161 isc_boolean_t pause_requested
;
162 isc_boolean_t exclusive_requested
;
163 isc_boolean_t exiting
;
165 #ifdef USE_SHARED_MANAGER
167 #endif /* ISC_PLATFORM_USETHREADS */
170 #define DEFAULT_TASKMGR_QUANTUM 10
171 #define DEFAULT_DEFAULT_QUANTUM 5
172 #define FINISHED(m) ((m)->exiting && EMPTY((m)->tasks))
174 #ifdef USE_SHARED_MANAGER
175 static isc__taskmgr_t
*taskmgr
= NULL
;
176 #endif /* USE_SHARED_MANAGER */
179 * The following are intended for internal use (indicated by "isc__"
180 * prefix) but are not declared as static, allowing direct access from
185 isc__task_create(isc_taskmgr_t
*manager0
, unsigned int quantum
,
188 isc__task_attach(isc_task_t
*source0
, isc_task_t
**targetp
);
190 isc__task_detach(isc_task_t
**taskp
);
192 isc__task_send(isc_task_t
*task0
, isc_event_t
**eventp
);
194 isc__task_sendanddetach(isc_task_t
**taskp
, isc_event_t
**eventp
);
196 isc__task_purgerange(isc_task_t
*task0
, void *sender
, isc_eventtype_t first
,
197 isc_eventtype_t last
, void *tag
);
199 isc__task_purge(isc_task_t
*task
, void *sender
, isc_eventtype_t type
,
202 isc_task_purgeevent(isc_task_t
*task0
, isc_event_t
*event
);
204 isc__task_unsendrange(isc_task_t
*task
, void *sender
, isc_eventtype_t first
,
205 isc_eventtype_t last
, void *tag
,
206 isc_eventlist_t
*events
);
208 isc__task_unsend(isc_task_t
*task
, void *sender
, isc_eventtype_t type
,
209 void *tag
, isc_eventlist_t
*events
);
211 isc__task_onshutdown(isc_task_t
*task0
, isc_taskaction_t action
,
214 isc__task_shutdown(isc_task_t
*task0
);
216 isc__task_destroy(isc_task_t
**taskp
);
218 isc__task_setname(isc_task_t
*task0
, const char *name
, void *tag
);
220 isc__task_getname(isc_task_t
*task0
);
222 isc__task_gettag(isc_task_t
*task0
);
224 isc__task_getcurrenttime(isc_task_t
*task0
, isc_stdtime_t
*t
);
226 isc__taskmgr_create(isc_mem_t
*mctx
, unsigned int workers
,
227 unsigned int default_quantum
, isc_taskmgr_t
**managerp
);
229 isc__taskmgr_destroy(isc_taskmgr_t
**managerp
);
231 isc_taskmgr_setexcltask(isc_taskmgr_t
*mgr0
, isc_task_t
*task0
);
233 isc_taskmgr_excltask(isc_taskmgr_t
*mgr0
, isc_task_t
**taskp
);
235 isc__task_beginexclusive(isc_task_t
*task
);
237 isc__task_endexclusive(isc_task_t
*task0
);
239 isc__task_setprivilege(isc_task_t
*task0
, isc_boolean_t priv
);
241 isc__task_privilege(isc_task_t
*task0
);
243 isc__taskmgr_setmode(isc_taskmgr_t
*manager0
, isc_taskmgrmode_t mode
);
245 isc__taskmgr_mode(isc_taskmgr_t
*manager0
);
247 static inline isc_boolean_t
248 empty_readyq(isc__taskmgr_t
*manager
);
250 static inline isc__task_t
*
251 pop_readyq(isc__taskmgr_t
*manager
);
254 push_readyq(isc__taskmgr_t
*manager
, isc__task_t
*task
);
256 static struct isc__taskmethods
{
257 isc_taskmethods_t methods
;
260 * The following are defined just for avoiding unused static functions.
262 void *purgeevent
, *unsendrange
, *getname
, *gettag
, *getcurrenttime
;
269 isc__task_sendanddetach
,
271 isc__task_onshutdown
,
275 isc__task_purgerange
,
276 isc__task_beginexclusive
,
277 isc__task_endexclusive
,
278 isc__task_setprivilege
,
281 (void *)isc_task_purgeevent
,
282 (void *)isc__task_unsendrange
,
283 (void *)isc__task_getname
,
284 (void *)isc__task_gettag
,
285 (void *)isc__task_getcurrenttime
288 static isc_taskmgrmethods_t taskmgrmethods
= {
289 isc__taskmgr_destroy
,
290 isc__taskmgr_setmode
,
293 isc_taskmgr_setexcltask
,
302 task_finished(isc__task_t
*task
) {
303 isc__taskmgr_t
*manager
= task
->manager
;
305 REQUIRE(EMPTY(task
->events
));
306 REQUIRE(task
->nevents
== 0);
307 REQUIRE(EMPTY(task
->on_shutdown
));
308 REQUIRE(task
->references
== 0);
309 REQUIRE(task
->state
== task_state_done
);
311 XTRACE("task_finished");
313 LOCK(&manager
->lock
);
314 UNLINK(manager
->tasks
, task
, link
);
315 #ifdef USE_WORKER_THREADS
316 if (FINISHED(manager
)) {
318 * All tasks have completed and the
319 * task manager is exiting. Wake up
320 * any idle worker threads so they
323 BROADCAST(&manager
->work_available
);
325 #endif /* USE_WORKER_THREADS */
326 UNLOCK(&manager
->lock
);
328 DESTROYLOCK(&task
->lock
);
329 task
->common
.impmagic
= 0;
330 task
->common
.magic
= 0;
331 isc_mem_put(manager
->mctx
, task
, sizeof(*task
));
335 isc__task_create(isc_taskmgr_t
*manager0
, unsigned int quantum
,
338 isc__taskmgr_t
*manager
= (isc__taskmgr_t
*)manager0
;
340 isc_boolean_t exiting
;
343 REQUIRE(VALID_MANAGER(manager
));
344 REQUIRE(taskp
!= NULL
&& *taskp
== NULL
);
346 task
= isc_mem_get(manager
->mctx
, sizeof(*task
));
348 return (ISC_R_NOMEMORY
);
349 XTRACE("isc_task_create");
350 task
->manager
= manager
;
351 result
= isc_mutex_init(&task
->lock
);
352 if (result
!= ISC_R_SUCCESS
) {
353 isc_mem_put(manager
->mctx
, task
, sizeof(*task
));
356 task
->state
= task_state_idle
;
357 task
->references
= 1;
358 INIT_LIST(task
->events
);
359 INIT_LIST(task
->on_shutdown
);
361 task
->quantum
= quantum
;
364 memset(task
->name
, 0, sizeof(task
->name
));
366 INIT_LINK(task
, link
);
367 INIT_LINK(task
, ready_link
);
368 INIT_LINK(task
, ready_priority_link
);
371 LOCK(&manager
->lock
);
372 if (!manager
->exiting
) {
373 if (task
->quantum
== 0)
374 task
->quantum
= manager
->default_quantum
;
375 APPEND(manager
->tasks
, task
, link
);
378 UNLOCK(&manager
->lock
);
381 DESTROYLOCK(&task
->lock
);
382 isc_mem_put(manager
->mctx
, task
, sizeof(*task
));
383 return (ISC_R_SHUTTINGDOWN
);
386 task
->common
.methods
= (isc_taskmethods_t
*)&taskmethods
;
387 task
->common
.magic
= ISCAPI_TASK_MAGIC
;
388 task
->common
.impmagic
= TASK_MAGIC
;
389 *taskp
= (isc_task_t
*)task
;
391 return (ISC_R_SUCCESS
);
395 isc__task_attach(isc_task_t
*source0
, isc_task_t
**targetp
) {
396 isc__task_t
*source
= (isc__task_t
*)source0
;
399 * Attach *targetp to source.
402 REQUIRE(VALID_TASK(source
));
403 REQUIRE(targetp
!= NULL
&& *targetp
== NULL
);
405 XTTRACE(source
, "isc_task_attach");
408 source
->references
++;
409 UNLOCK(&source
->lock
);
411 *targetp
= (isc_task_t
*)source
;
414 static inline isc_boolean_t
415 task_shutdown(isc__task_t
*task
) {
416 isc_boolean_t was_idle
= ISC_FALSE
;
417 isc_event_t
*event
, *prev
;
420 * Caller must be holding the task's lock.
423 XTRACE("task_shutdown");
425 if (! TASK_SHUTTINGDOWN(task
)) {
426 XTRACE(isc_msgcat_get(isc_msgcat
, ISC_MSGSET_GENERAL
,
427 ISC_MSG_SHUTTINGDOWN
, "shutting down"));
428 task
->flags
|= TASK_F_SHUTTINGDOWN
;
429 if (task
->state
== task_state_idle
) {
430 INSIST(EMPTY(task
->events
));
431 task
->state
= task_state_ready
;
434 INSIST(task
->state
== task_state_ready
||
435 task
->state
== task_state_running
);
438 * Note that we post shutdown events LIFO.
440 for (event
= TAIL(task
->on_shutdown
);
443 prev
= PREV(event
, ev_link
);
444 DEQUEUE(task
->on_shutdown
, event
, ev_link
);
445 ENQUEUE(task
->events
, event
, ev_link
);
454 * Moves a task onto the appropriate run queue.
456 * Caller must NOT hold manager lock.
459 task_ready(isc__task_t
*task
) {
460 isc__taskmgr_t
*manager
= task
->manager
;
461 #ifdef USE_WORKER_THREADS
462 isc_boolean_t has_privilege
= isc__task_privilege((isc_task_t
*) task
);
463 #endif /* USE_WORKER_THREADS */
465 REQUIRE(VALID_MANAGER(manager
));
466 REQUIRE(task
->state
== task_state_ready
);
468 XTRACE("task_ready");
470 LOCK(&manager
->lock
);
471 push_readyq(manager
, task
);
472 #ifdef USE_WORKER_THREADS
473 if (manager
->mode
== isc_taskmgrmode_normal
|| has_privilege
)
474 SIGNAL(&manager
->work_available
);
475 #endif /* USE_WORKER_THREADS */
476 UNLOCK(&manager
->lock
);
479 static inline isc_boolean_t
480 task_detach(isc__task_t
*task
) {
483 * Caller must be holding the task lock.
486 REQUIRE(task
->references
> 0);
491 if (task
->references
== 0 && task
->state
== task_state_idle
) {
492 INSIST(EMPTY(task
->events
));
494 * There are no references to this task, and no
495 * pending events. We could try to optimize and
496 * either initiate shutdown or clean up the task,
497 * depending on its state, but it's easier to just
498 * make the task ready and allow run() or the event
499 * loop to deal with shutting down and termination.
501 task
->state
= task_state_ready
;
509 isc__task_detach(isc_task_t
**taskp
) {
511 isc_boolean_t was_idle
;
514 * Detach *taskp from its task.
517 REQUIRE(taskp
!= NULL
);
518 task
= (isc__task_t
*)*taskp
;
519 REQUIRE(VALID_TASK(task
));
521 XTRACE("isc_task_detach");
524 was_idle
= task_detach(task
);
533 static inline isc_boolean_t
534 task_send(isc__task_t
*task
, isc_event_t
**eventp
) {
535 isc_boolean_t was_idle
= ISC_FALSE
;
539 * Caller must be holding the task lock.
542 REQUIRE(eventp
!= NULL
);
544 REQUIRE(event
!= NULL
);
545 REQUIRE(event
->ev_type
> 0);
546 REQUIRE(task
->state
!= task_state_done
);
550 if (task
->state
== task_state_idle
) {
552 INSIST(EMPTY(task
->events
));
553 task
->state
= task_state_ready
;
555 INSIST(task
->state
== task_state_ready
||
556 task
->state
== task_state_running
);
557 ENQUEUE(task
->events
, event
, ev_link
);
565 isc__task_send(isc_task_t
*task0
, isc_event_t
**eventp
) {
566 isc__task_t
*task
= (isc__task_t
*)task0
;
567 isc_boolean_t was_idle
;
570 * Send '*event' to 'task'.
573 REQUIRE(VALID_TASK(task
));
575 XTRACE("isc_task_send");
578 * We're trying hard to hold locks for as short a time as possible.
579 * We're also trying to hold as few locks as possible. This is why
580 * some processing is deferred until after the lock is released.
583 was_idle
= task_send(task
, eventp
);
588 * We need to add this task to the ready queue.
590 * We've waited until now to do it because making a task
591 * ready requires locking the manager. If we tried to do
592 * this while holding the task lock, we could deadlock.
594 * We've changed the state to ready, so no one else will
595 * be trying to add this task to the ready queue. The
596 * only way to leave the ready state is by executing the
597 * task. It thus doesn't matter if events are added,
598 * removed, or a shutdown is started in the interval
599 * between the time we released the task lock, and the time
600 * we add the task to the ready queue.
607 isc__task_sendanddetach(isc_task_t
**taskp
, isc_event_t
**eventp
) {
608 isc_boolean_t idle1
, idle2
;
612 * Send '*event' to '*taskp' and then detach '*taskp' from its
616 REQUIRE(taskp
!= NULL
);
617 task
= (isc__task_t
*)*taskp
;
618 REQUIRE(VALID_TASK(task
));
620 XTRACE("isc_task_sendanddetach");
623 idle1
= task_send(task
, eventp
);
624 idle2
= task_detach(task
);
628 * If idle1, then idle2 shouldn't be true as well since we're holding
629 * the task lock, and thus the task cannot switch from ready back to
632 INSIST(!(idle1
&& idle2
));
640 #define PURGE_OK(event) (((event)->ev_attributes & ISC_EVENTATTR_NOPURGE) == 0)
643 dequeue_events(isc__task_t
*task
, void *sender
, isc_eventtype_t first
,
644 isc_eventtype_t last
, void *tag
,
645 isc_eventlist_t
*events
, isc_boolean_t purging
)
647 isc_event_t
*event
, *next_event
;
648 unsigned int count
= 0;
650 REQUIRE(VALID_TASK(task
));
651 REQUIRE(last
>= first
);
653 XTRACE("dequeue_events");
656 * Events matching 'sender', whose type is >= first and <= last, and
657 * whose tag is 'tag' will be dequeued. If 'purging', matching events
658 * which are marked as unpurgable will not be dequeued.
660 * sender == NULL means "any sender", and tag == NULL means "any tag".
665 for (event
= HEAD(task
->events
); event
!= NULL
; event
= next_event
) {
666 next_event
= NEXT(event
, ev_link
);
667 if (event
->ev_type
>= first
&& event
->ev_type
<= last
&&
668 (sender
== NULL
|| event
->ev_sender
== sender
) &&
669 (tag
== NULL
|| event
->ev_tag
== tag
) &&
670 (!purging
|| PURGE_OK(event
))) {
671 DEQUEUE(task
->events
, event
, ev_link
);
673 ENQUEUE(*events
, event
, ev_link
);
684 isc__task_purgerange(isc_task_t
*task0
, void *sender
, isc_eventtype_t first
,
685 isc_eventtype_t last
, void *tag
)
687 isc__task_t
*task
= (isc__task_t
*)task0
;
689 isc_eventlist_t events
;
690 isc_event_t
*event
, *next_event
;
693 * Purge events from a task's event queue.
696 XTRACE("isc_task_purgerange");
698 ISC_LIST_INIT(events
);
700 count
= dequeue_events(task
, sender
, first
, last
, tag
, &events
,
703 for (event
= HEAD(events
); event
!= NULL
; event
= next_event
) {
704 next_event
= NEXT(event
, ev_link
);
705 isc_event_free(&event
);
709 * Note that purging never changes the state of the task.
716 isc__task_purge(isc_task_t
*task
, void *sender
, isc_eventtype_t type
,
720 * Purge events from a task's event queue.
723 XTRACE("isc_task_purge");
725 return (isc__task_purgerange(task
, sender
, type
, type
, tag
));
729 isc_task_purgeevent(isc_task_t
*task0
, isc_event_t
*event
) {
730 isc__task_t
*task
= (isc__task_t
*)task0
;
731 isc_event_t
*curr_event
, *next_event
;
734 * Purge 'event' from a task's event queue.
736 * XXXRTH: WARNING: This method may be removed before beta.
739 REQUIRE(VALID_TASK(task
));
742 * If 'event' is on the task's event queue, it will be purged,
743 * unless it is marked as unpurgeable. 'event' does not have to be
744 * on the task's event queue; in fact, it can even be an invalid
745 * pointer. Purging only occurs if the event is actually on the task's
748 * Purging never changes the state of the task.
752 for (curr_event
= HEAD(task
->events
);
754 curr_event
= next_event
) {
755 next_event
= NEXT(curr_event
, ev_link
);
756 if (curr_event
== event
&& PURGE_OK(event
)) {
757 DEQUEUE(task
->events
, curr_event
, ev_link
);
764 if (curr_event
== NULL
)
767 isc_event_free(&curr_event
);
773 isc__task_unsendrange(isc_task_t
*task
, void *sender
, isc_eventtype_t first
,
774 isc_eventtype_t last
, void *tag
,
775 isc_eventlist_t
*events
)
778 * Remove events from a task's event queue.
781 XTRACE("isc_task_unsendrange");
783 return (dequeue_events((isc__task_t
*)task
, sender
, first
,
784 last
, tag
, events
, ISC_FALSE
));
788 isc__task_unsend(isc_task_t
*task
, void *sender
, isc_eventtype_t type
,
789 void *tag
, isc_eventlist_t
*events
)
792 * Remove events from a task's event queue.
795 XTRACE("isc_task_unsend");
797 return (dequeue_events((isc__task_t
*)task
, sender
, type
,
798 type
, tag
, events
, ISC_FALSE
));
802 isc__task_onshutdown(isc_task_t
*task0
, isc_taskaction_t action
,
805 isc__task_t
*task
= (isc__task_t
*)task0
;
806 isc_boolean_t disallowed
= ISC_FALSE
;
807 isc_result_t result
= ISC_R_SUCCESS
;
811 * Send a shutdown event with action 'action' and argument 'arg' when
812 * 'task' is shutdown.
815 REQUIRE(VALID_TASK(task
));
816 REQUIRE(action
!= NULL
);
818 event
= isc_event_allocate(task
->manager
->mctx
,
820 ISC_TASKEVENT_SHUTDOWN
,
825 return (ISC_R_NOMEMORY
);
828 if (TASK_SHUTTINGDOWN(task
)) {
829 disallowed
= ISC_TRUE
;
830 result
= ISC_R_SHUTTINGDOWN
;
832 ENQUEUE(task
->on_shutdown
, event
, ev_link
);
836 isc_mem_put(task
->manager
->mctx
, event
, sizeof(*event
));
842 isc__task_shutdown(isc_task_t
*task0
) {
843 isc__task_t
*task
= (isc__task_t
*)task0
;
844 isc_boolean_t was_idle
;
850 REQUIRE(VALID_TASK(task
));
853 was_idle
= task_shutdown(task
);
861 isc__task_destroy(isc_task_t
**taskp
) {
867 REQUIRE(taskp
!= NULL
);
869 isc_task_shutdown(*taskp
);
870 isc_task_detach(taskp
);
874 isc__task_setname(isc_task_t
*task0
, const char *name
, void *tag
) {
875 isc__task_t
*task
= (isc__task_t
*)task0
;
881 REQUIRE(VALID_TASK(task
));
884 memset(task
->name
, 0, sizeof(task
->name
));
885 strncpy(task
->name
, name
, sizeof(task
->name
) - 1);
891 isc__task_getname(isc_task_t
*task0
) {
892 isc__task_t
*task
= (isc__task_t
*)task0
;
894 REQUIRE(VALID_TASK(task
));
900 isc__task_gettag(isc_task_t
*task0
) {
901 isc__task_t
*task
= (isc__task_t
*)task0
;
903 REQUIRE(VALID_TASK(task
));
909 isc__task_getcurrenttime(isc_task_t
*task0
, isc_stdtime_t
*t
) {
910 isc__task_t
*task
= (isc__task_t
*)task0
;
912 REQUIRE(VALID_TASK(task
));
925 * Return ISC_TRUE if the current ready list for the manager, which is
926 * either ready_tasks or the ready_priority_tasks, depending on whether
927 * the manager is currently in normal or privileged execution mode.
929 * Caller must hold the task manager lock.
931 static inline isc_boolean_t
932 empty_readyq(isc__taskmgr_t
*manager
) {
933 isc__tasklist_t queue
;
935 if (manager
->mode
== isc_taskmgrmode_normal
)
936 queue
= manager
->ready_tasks
;
938 queue
= manager
->ready_priority_tasks
;
940 return (ISC_TF(EMPTY(queue
)));
944 * Dequeue and return a pointer to the first task on the current ready
945 * list for the manager.
946 * If the task is privileged, dequeue it from the other ready list
949 * Caller must hold the task manager lock.
951 static inline isc__task_t
*
952 pop_readyq(isc__taskmgr_t
*manager
) {
955 if (manager
->mode
== isc_taskmgrmode_normal
)
956 task
= HEAD(manager
->ready_tasks
);
958 task
= HEAD(manager
->ready_priority_tasks
);
961 DEQUEUE(manager
->ready_tasks
, task
, ready_link
);
962 if (ISC_LINK_LINKED(task
, ready_priority_link
))
963 DEQUEUE(manager
->ready_priority_tasks
, task
,
964 ready_priority_link
);
971 * Push 'task' onto the ready_tasks queue. If 'task' has the privilege
972 * flag set, then also push it onto the ready_priority_tasks queue.
974 * Caller must hold the task manager lock.
977 push_readyq(isc__taskmgr_t
*manager
, isc__task_t
*task
) {
978 ENQUEUE(manager
->ready_tasks
, task
, ready_link
);
979 if ((task
->flags
& TASK_F_PRIVILEGED
) != 0)
980 ENQUEUE(manager
->ready_priority_tasks
, task
,
981 ready_priority_link
);
982 manager
->tasks_ready
++;
986 dispatch(isc__taskmgr_t
*manager
) {
988 #ifndef USE_WORKER_THREADS
989 unsigned int total_dispatch_count
= 0;
990 isc__tasklist_t new_ready_tasks
;
991 isc__tasklist_t new_priority_tasks
;
992 unsigned int tasks_ready
= 0;
993 #endif /* USE_WORKER_THREADS */
995 REQUIRE(VALID_MANAGER(manager
));
998 * Again we're trying to hold the lock for as short a time as possible
999 * and to do as little locking and unlocking as possible.
1001 * In both while loops, the appropriate lock must be held before the
1002 * while body starts. Code which acquired the lock at the top of
1003 * the loop would be more readable, but would result in a lot of
1004 * extra locking. Compare:
1011 * while (expression) {
1016 * Unlocked part here...
1023 * Note how if the loop continues we unlock and then immediately lock.
1024 * For N iterations of the loop, this code does 2N+1 locks and 2N+1
1025 * unlocks. Also note that the lock is not held when the while
1026 * condition is tested, which may or may not be important, depending
1027 * on the expression.
1032 * while (expression) {
1036 * Unlocked part here...
1043 * For N iterations of the loop, this code does N+1 locks and N+1
1044 * unlocks. The while expression is always protected by the lock.
1047 #ifndef USE_WORKER_THREADS
1048 ISC_LIST_INIT(new_ready_tasks
);
1049 ISC_LIST_INIT(new_priority_tasks
);
1051 LOCK(&manager
->lock
);
1053 while (!FINISHED(manager
)) {
1054 #ifdef USE_WORKER_THREADS
1056 * For reasons similar to those given in the comment in
1057 * isc_task_send() above, it is safe for us to dequeue
1058 * the task while only holding the manager lock, and then
1059 * change the task to running state while only holding the
1062 * If a pause has been requested, don't do any work
1063 * until it's been released.
1065 while ((empty_readyq(manager
) || manager
->pause_requested
||
1066 manager
->exclusive_requested
) && !FINISHED(manager
))
1068 XTHREADTRACE(isc_msgcat_get(isc_msgcat
,
1070 ISC_MSG_WAIT
, "wait"));
1071 WAIT(&manager
->work_available
, &manager
->lock
);
1072 XTHREADTRACE(isc_msgcat_get(isc_msgcat
,
1074 ISC_MSG_AWAKE
, "awake"));
1076 #else /* USE_WORKER_THREADS */
1077 if (total_dispatch_count
>= DEFAULT_TASKMGR_QUANTUM
||
1078 empty_readyq(manager
))
1080 #endif /* USE_WORKER_THREADS */
1081 XTHREADTRACE(isc_msgcat_get(isc_msgcat
, ISC_MSGSET_TASK
,
1082 ISC_MSG_WORKING
, "working"));
1084 task
= pop_readyq(manager
);
1086 unsigned int dispatch_count
= 0;
1087 isc_boolean_t done
= ISC_FALSE
;
1088 isc_boolean_t requeue
= ISC_FALSE
;
1089 isc_boolean_t finished
= ISC_FALSE
;
1092 INSIST(VALID_TASK(task
));
1095 * Note we only unlock the manager lock if we actually
1096 * have a task to do. We must reacquire the manager
1097 * lock before exiting the 'if (task != NULL)' block.
1099 manager
->tasks_ready
--;
1100 manager
->tasks_running
++;
1101 UNLOCK(&manager
->lock
);
1104 INSIST(task
->state
== task_state_ready
);
1105 task
->state
= task_state_running
;
1106 XTRACE(isc_msgcat_get(isc_msgcat
, ISC_MSGSET_GENERAL
,
1107 ISC_MSG_RUNNING
, "running"));
1108 isc_stdtime_get(&task
->now
);
1110 if (!EMPTY(task
->events
)) {
1111 event
= HEAD(task
->events
);
1112 DEQUEUE(task
->events
, event
, ev_link
);
1116 * Execute the event action.
1118 XTRACE(isc_msgcat_get(isc_msgcat
,
1122 if (event
->ev_action
!= NULL
) {
1123 UNLOCK(&task
->lock
);
1130 #ifndef USE_WORKER_THREADS
1131 total_dispatch_count
++;
1132 #endif /* USE_WORKER_THREADS */
1135 if (task
->references
== 0 &&
1136 EMPTY(task
->events
) &&
1137 !TASK_SHUTTINGDOWN(task
)) {
1138 isc_boolean_t was_idle
;
1141 * There are no references and no
1142 * pending events for this task,
1143 * which means it will not become
1144 * runnable again via an external
1145 * action (such as sending an event
1148 * We initiate shutdown to prevent
1149 * it from becoming a zombie.
1151 * We do this here instead of in
1152 * the "if EMPTY(task->events)" block
1155 * If we post no shutdown events,
1156 * we want the task to finish.
1158 * If we did post shutdown events,
1159 * will still want the task's
1160 * quantum to be applied.
1162 was_idle
= task_shutdown(task
);
1166 if (EMPTY(task
->events
)) {
1168 * Nothing else to do for this task
1171 XTRACE(isc_msgcat_get(isc_msgcat
,
1175 if (task
->references
== 0 &&
1176 TASK_SHUTTINGDOWN(task
)) {
1180 XTRACE(isc_msgcat_get(
1185 finished
= ISC_TRUE
;
1186 task
->state
= task_state_done
;
1188 task
->state
= task_state_idle
;
1190 } else if (dispatch_count
>= task
->quantum
) {
1192 * Our quantum has expired, but
1193 * there is more work to be done.
1194 * We'll requeue it to the ready
1197 * We don't check quantum until
1198 * dispatching at least one event,
1199 * so the minimum quantum is one.
1201 XTRACE(isc_msgcat_get(isc_msgcat
,
1205 task
->state
= task_state_ready
;
1210 UNLOCK(&task
->lock
);
1213 task_finished(task
);
1215 LOCK(&manager
->lock
);
1216 manager
->tasks_running
--;
1217 #ifdef USE_WORKER_THREADS
1218 if (manager
->exclusive_requested
&&
1219 manager
->tasks_running
== 1) {
1220 SIGNAL(&manager
->exclusive_granted
);
1221 } else if (manager
->pause_requested
&&
1222 manager
->tasks_running
== 0) {
1223 SIGNAL(&manager
->paused
);
1225 #endif /* USE_WORKER_THREADS */
1228 * We know we're awake, so we don't have
1229 * to wakeup any sleeping threads if the
1230 * ready queue is empty before we requeue.
1232 * A possible optimization if the queue is
1233 * empty is to 'goto' the 'if (task != NULL)'
1234 * block, avoiding the ENQUEUE of the task
1235 * and the subsequent immediate DEQUEUE
1236 * (since it is the only executable task).
1237 * We don't do this because then we'd be
1238 * skipping the exit_requested check. The
1239 * cost of ENQUEUE is low anyway, especially
1240 * when you consider that we'd have to do
1241 * an extra EMPTY check to see if we could
1242 * do the optimization. If the ready queue
1243 * were usually nonempty, the 'optimization'
1244 * might even hurt rather than help.
1246 #ifdef USE_WORKER_THREADS
1247 push_readyq(manager
, task
);
1249 ENQUEUE(new_ready_tasks
, task
, ready_link
);
1250 if ((task
->flags
& TASK_F_PRIVILEGED
) != 0)
1251 ENQUEUE(new_priority_tasks
, task
,
1252 ready_priority_link
);
1258 #ifdef USE_WORKER_THREADS
1260 * If we are in privileged execution mode and there are no
1261 * tasks remaining on the current ready queue, then
1262 * we're stuck. Automatically drop privileges at that
1263 * point and continue with the regular ready queue.
1265 if (manager
->tasks_running
== 0 && empty_readyq(manager
)) {
1266 manager
->mode
= isc_taskmgrmode_normal
;
1267 if (!empty_readyq(manager
))
1268 BROADCAST(&manager
->work_available
);
1273 #ifndef USE_WORKER_THREADS
1274 ISC_LIST_APPENDLIST(manager
->ready_tasks
, new_ready_tasks
, ready_link
);
1275 ISC_LIST_APPENDLIST(manager
->ready_priority_tasks
, new_priority_tasks
,
1276 ready_priority_link
);
1277 manager
->tasks_ready
+= tasks_ready
;
1278 if (empty_readyq(manager
))
1279 manager
->mode
= isc_taskmgrmode_normal
;
1282 UNLOCK(&manager
->lock
);
1285 #ifdef USE_WORKER_THREADS
1286 static isc_threadresult_t
1291 isc__taskmgr_t
*manager
= uap
;
1293 XTHREADTRACE(isc_msgcat_get(isc_msgcat
, ISC_MSGSET_GENERAL
,
1294 ISC_MSG_STARTING
, "starting"));
1298 XTHREADTRACE(isc_msgcat_get(isc_msgcat
, ISC_MSGSET_GENERAL
,
1299 ISC_MSG_EXITING
, "exiting"));
1301 #ifdef OPENSSL_LEAKS
1302 ERR_remove_state(0);
1305 return ((isc_threadresult_t
)0);
1307 #endif /* USE_WORKER_THREADS */
1310 manager_free(isc__taskmgr_t
*manager
) {
1313 #ifdef USE_WORKER_THREADS
1314 (void)isc_condition_destroy(&manager
->exclusive_granted
);
1315 (void)isc_condition_destroy(&manager
->work_available
);
1316 (void)isc_condition_destroy(&manager
->paused
);
1317 isc_mem_free(manager
->mctx
, manager
->threads
);
1318 #endif /* USE_WORKER_THREADS */
1319 DESTROYLOCK(&manager
->lock
);
1320 manager
->common
.impmagic
= 0;
1321 manager
->common
.magic
= 0;
1322 mctx
= manager
->mctx
;
1323 isc_mem_put(mctx
, manager
, sizeof(*manager
));
1324 isc_mem_detach(&mctx
);
1326 #ifdef USE_SHARED_MANAGER
1328 #endif /* USE_SHARED_MANAGER */
1332 isc__taskmgr_create(isc_mem_t
*mctx
, unsigned int workers
,
1333 unsigned int default_quantum
, isc_taskmgr_t
**managerp
)
1335 isc_result_t result
;
1336 unsigned int i
, started
= 0;
1337 isc__taskmgr_t
*manager
;
1340 * Create a new task manager.
1343 REQUIRE(workers
> 0);
1344 REQUIRE(managerp
!= NULL
&& *managerp
== NULL
);
1346 #ifndef USE_WORKER_THREADS
1351 #ifdef USE_SHARED_MANAGER
1352 if (taskmgr
!= NULL
) {
1353 if (taskmgr
->refs
== 0)
1354 return (ISC_R_SHUTTINGDOWN
);
1356 *managerp
= (isc_taskmgr_t
*)taskmgr
;
1357 return (ISC_R_SUCCESS
);
1359 #endif /* USE_SHARED_MANAGER */
1361 manager
= isc_mem_get(mctx
, sizeof(*manager
));
1362 if (manager
== NULL
)
1363 return (ISC_R_NOMEMORY
);
1364 manager
->common
.methods
= &taskmgrmethods
;
1365 manager
->common
.impmagic
= TASK_MANAGER_MAGIC
;
1366 manager
->common
.magic
= ISCAPI_TASKMGR_MAGIC
;
1367 manager
->mode
= isc_taskmgrmode_normal
;
1368 manager
->mctx
= NULL
;
1369 result
= isc_mutex_init(&manager
->lock
);
1370 if (result
!= ISC_R_SUCCESS
)
1373 #ifdef USE_WORKER_THREADS
1374 manager
->workers
= 0;
1375 manager
->threads
= isc_mem_allocate(mctx
,
1376 workers
* sizeof(isc_thread_t
));
1377 if (manager
->threads
== NULL
) {
1378 result
= ISC_R_NOMEMORY
;
1381 if (isc_condition_init(&manager
->work_available
) != ISC_R_SUCCESS
) {
1382 UNEXPECTED_ERROR(__FILE__
, __LINE__
,
1383 "isc_condition_init() %s",
1384 isc_msgcat_get(isc_msgcat
, ISC_MSGSET_GENERAL
,
1385 ISC_MSG_FAILED
, "failed"));
1386 result
= ISC_R_UNEXPECTED
;
1387 goto cleanup_threads
;
1389 if (isc_condition_init(&manager
->exclusive_granted
) != ISC_R_SUCCESS
) {
1390 UNEXPECTED_ERROR(__FILE__
, __LINE__
,
1391 "isc_condition_init() %s",
1392 isc_msgcat_get(isc_msgcat
, ISC_MSGSET_GENERAL
,
1393 ISC_MSG_FAILED
, "failed"));
1394 result
= ISC_R_UNEXPECTED
;
1395 goto cleanup_workavailable
;
1397 if (isc_condition_init(&manager
->paused
) != ISC_R_SUCCESS
) {
1398 UNEXPECTED_ERROR(__FILE__
, __LINE__
,
1399 "isc_condition_init() %s",
1400 isc_msgcat_get(isc_msgcat
, ISC_MSGSET_GENERAL
,
1401 ISC_MSG_FAILED
, "failed"));
1402 result
= ISC_R_UNEXPECTED
;
1403 goto cleanup_exclusivegranted
;
1405 #endif /* USE_WORKER_THREADS */
1406 if (default_quantum
== 0)
1407 default_quantum
= DEFAULT_DEFAULT_QUANTUM
;
1408 manager
->default_quantum
= default_quantum
;
1409 INIT_LIST(manager
->tasks
);
1410 INIT_LIST(manager
->ready_tasks
);
1411 INIT_LIST(manager
->ready_priority_tasks
);
1412 manager
->tasks_running
= 0;
1413 manager
->tasks_ready
= 0;
1414 manager
->exclusive_requested
= ISC_FALSE
;
1415 manager
->pause_requested
= ISC_FALSE
;
1416 manager
->exiting
= ISC_FALSE
;
1417 manager
->excl
= NULL
;
1419 isc_mem_attach(mctx
, &manager
->mctx
);
1421 #ifdef USE_WORKER_THREADS
1422 LOCK(&manager
->lock
);
1426 for (i
= 0; i
< workers
; i
++) {
1427 if (isc_thread_create(run
, manager
,
1428 &manager
->threads
[manager
->workers
]) ==
1434 UNLOCK(&manager
->lock
);
1437 manager_free(manager
);
1438 return (ISC_R_NOTHREADS
);
1440 isc_thread_setconcurrency(workers
);
1441 #endif /* USE_WORKER_THREADS */
1442 #ifdef USE_SHARED_MANAGER
1445 #endif /* USE_SHARED_MANAGER */
1447 *managerp
= (isc_taskmgr_t
*)manager
;
1449 return (ISC_R_SUCCESS
);
1451 #ifdef USE_WORKER_THREADS
1452 cleanup_exclusivegranted
:
1453 (void)isc_condition_destroy(&manager
->exclusive_granted
);
1454 cleanup_workavailable
:
1455 (void)isc_condition_destroy(&manager
->work_available
);
1457 isc_mem_free(mctx
, manager
->threads
);
1459 DESTROYLOCK(&manager
->lock
);
1462 isc_mem_put(mctx
, manager
, sizeof(*manager
));
1467 isc__taskmgr_destroy(isc_taskmgr_t
**managerp
) {
1468 isc__taskmgr_t
*manager
;
1473 * Destroy '*managerp'.
1476 REQUIRE(managerp
!= NULL
);
1477 manager
= (isc__taskmgr_t
*)*managerp
;
1478 REQUIRE(VALID_MANAGER(manager
));
1480 #ifndef USE_WORKER_THREADS
1482 #endif /* USE_WORKER_THREADS */
1484 #ifdef USE_SHARED_MANAGER
1486 if (manager
->refs
> 0) {
1492 XTHREADTRACE("isc_taskmgr_destroy");
1494 * Only one non-worker thread may ever call this routine.
1495 * If a worker thread wants to initiate shutdown of the
1496 * task manager, it should ask some non-worker thread to call
1497 * isc_taskmgr_destroy(), e.g. by signalling a condition variable
1498 * that the startup thread is sleeping on.
1502 * Detach the exclusive task before acquiring the manager lock
1504 if (manager
->excl
!= NULL
)
1505 isc__task_detach((isc_task_t
**) &manager
->excl
);
1508 * Unlike elsewhere, we're going to hold this lock a long time.
1509 * We need to do so, because otherwise the list of tasks could
1510 * change while we were traversing it.
1512 * This is also the only function where we will hold both the
1513 * task manager lock and a task lock at the same time.
1516 LOCK(&manager
->lock
);
1519 * Make sure we only get called once.
1521 INSIST(!manager
->exiting
);
1522 manager
->exiting
= ISC_TRUE
;
1525 * If privileged mode was on, turn it off.
1527 manager
->mode
= isc_taskmgrmode_normal
;
1530 * Post shutdown event(s) to every task (if they haven't already been
1533 for (task
= HEAD(manager
->tasks
);
1535 task
= NEXT(task
, link
)) {
1537 if (task_shutdown(task
))
1538 push_readyq(manager
, task
);
1539 UNLOCK(&task
->lock
);
1541 #ifdef USE_WORKER_THREADS
1543 * Wake up any sleeping workers. This ensures we get work done if
1544 * there's work left to do, and if there are already no tasks left
1545 * it will cause the workers to see manager->exiting.
1547 BROADCAST(&manager
->work_available
);
1548 UNLOCK(&manager
->lock
);
1551 * Wait for all the worker threads to exit.
1553 for (i
= 0; i
< manager
->workers
; i
++)
1554 (void)isc_thread_join(manager
->threads
[i
], NULL
);
1555 #else /* USE_WORKER_THREADS */
1557 * Dispatch the shutdown events.
1559 UNLOCK(&manager
->lock
);
1560 while (isc__taskmgr_ready((isc_taskmgr_t
*)manager
))
1561 (void)isc__taskmgr_dispatch((isc_taskmgr_t
*)manager
);
1562 if (!ISC_LIST_EMPTY(manager
->tasks
))
1563 isc_mem_printallactive(stderr
);
1564 INSIST(ISC_LIST_EMPTY(manager
->tasks
));
1565 #ifdef USE_SHARED_MANAGER
1568 #endif /* USE_WORKER_THREADS */
1570 manager_free(manager
);
1576 isc__taskmgr_setmode(isc_taskmgr_t
*manager0
, isc_taskmgrmode_t mode
) {
1577 isc__taskmgr_t
*manager
= (isc__taskmgr_t
*)manager0
;
1579 LOCK(&manager
->lock
);
1580 manager
->mode
= mode
;
1581 UNLOCK(&manager
->lock
);
1585 isc__taskmgr_mode(isc_taskmgr_t
*manager0
) {
1586 isc__taskmgr_t
*manager
= (isc__taskmgr_t
*)manager0
;
1587 isc_taskmgrmode_t mode
;
1588 LOCK(&manager
->lock
);
1589 mode
= manager
->mode
;
1590 UNLOCK(&manager
->lock
);
1594 #ifndef USE_WORKER_THREADS
1596 isc__taskmgr_ready(isc_taskmgr_t
*manager0
) {
1597 isc__taskmgr_t
*manager
= (isc__taskmgr_t
*)manager0
;
1598 isc_boolean_t is_ready
;
1600 #ifdef USE_SHARED_MANAGER
1601 if (manager
== NULL
)
1604 if (manager
== NULL
)
1607 LOCK(&manager
->lock
);
1608 is_ready
= !empty_readyq(manager
);
1609 UNLOCK(&manager
->lock
);
1615 isc__taskmgr_dispatch(isc_taskmgr_t
*manager0
) {
1616 isc__taskmgr_t
*manager
= (isc__taskmgr_t
*)manager0
;
1618 #ifdef USE_SHARED_MANAGER
1619 if (manager
== NULL
)
1622 if (manager
== NULL
)
1623 return (ISC_R_NOTFOUND
);
1627 return (ISC_R_SUCCESS
);
1632 isc__taskmgr_pause(isc_taskmgr_t
*manager0
) {
1633 isc__taskmgr_t
*manager
= (isc__taskmgr_t
*)manager0
;
1634 LOCK(&manager
->lock
);
1635 while (manager
->tasks_running
> 0) {
1636 WAIT(&manager
->paused
, &manager
->lock
);
1638 manager
->pause_requested
= ISC_TRUE
;
1639 UNLOCK(&manager
->lock
);
1643 isc__taskmgr_resume(isc_taskmgr_t
*manager0
) {
1644 isc__taskmgr_t
*manager
= (isc__taskmgr_t
*)manager0
;
1646 LOCK(&manager
->lock
);
1647 if (manager
->pause_requested
) {
1648 manager
->pause_requested
= ISC_FALSE
;
1649 BROADCAST(&manager
->work_available
);
1651 UNLOCK(&manager
->lock
);
1653 #endif /* USE_WORKER_THREADS */
1656 isc_taskmgr_setexcltask(isc_taskmgr_t
*mgr0
, isc_task_t
*task0
) {
1657 isc__taskmgr_t
*mgr
= (isc__taskmgr_t
*) mgr0
;
1658 isc__task_t
*task
= (isc__task_t
*) task0
;
1660 REQUIRE(VALID_MANAGER(mgr
));
1661 REQUIRE(VALID_TASK(task
));
1662 if (mgr
->excl
!= NULL
)
1663 isc__task_detach((isc_task_t
**) &mgr
->excl
);
1664 isc__task_attach(task0
, (isc_task_t
**) &mgr
->excl
);
1668 isc_taskmgr_excltask(isc_taskmgr_t
*mgr0
, isc_task_t
**taskp
) {
1669 isc__taskmgr_t
*mgr
= (isc__taskmgr_t
*) mgr0
;
1671 REQUIRE(VALID_MANAGER(mgr
));
1672 REQUIRE(taskp
!= NULL
&& *taskp
== NULL
);
1674 if (mgr
->excl
== NULL
)
1675 return (ISC_R_NOTFOUND
);
1677 isc__task_attach((isc_task_t
*) mgr
->excl
, taskp
);
1678 return (ISC_R_SUCCESS
);
1682 isc__task_beginexclusive(isc_task_t
*task0
) {
1683 #ifdef USE_WORKER_THREADS
1684 isc__task_t
*task
= (isc__task_t
*)task0
;
1685 isc__taskmgr_t
*manager
= task
->manager
;
1687 REQUIRE(task
->state
== task_state_running
);
1688 /* XXX: Require task == manager->excl? */
1690 LOCK(&manager
->lock
);
1691 if (manager
->exclusive_requested
) {
1692 UNLOCK(&manager
->lock
);
1693 return (ISC_R_LOCKBUSY
);
1695 manager
->exclusive_requested
= ISC_TRUE
;
1696 while (manager
->tasks_running
> 1) {
1697 WAIT(&manager
->exclusive_granted
, &manager
->lock
);
1699 UNLOCK(&manager
->lock
);
1703 return (ISC_R_SUCCESS
);
1707 isc__task_endexclusive(isc_task_t
*task0
) {
1708 #ifdef USE_WORKER_THREADS
1709 isc__task_t
*task
= (isc__task_t
*)task0
;
1710 isc__taskmgr_t
*manager
= task
->manager
;
1712 REQUIRE(task
->state
== task_state_running
);
1713 LOCK(&manager
->lock
);
1714 REQUIRE(manager
->exclusive_requested
);
1715 manager
->exclusive_requested
= ISC_FALSE
;
1716 BROADCAST(&manager
->work_available
);
1717 UNLOCK(&manager
->lock
);
1724 isc__task_setprivilege(isc_task_t
*task0
, isc_boolean_t priv
) {
1725 isc__task_t
*task
= (isc__task_t
*)task0
;
1726 isc__taskmgr_t
*manager
= task
->manager
;
1727 isc_boolean_t oldpriv
;
1730 oldpriv
= ISC_TF((task
->flags
& TASK_F_PRIVILEGED
) != 0);
1732 task
->flags
|= TASK_F_PRIVILEGED
;
1734 task
->flags
&= ~TASK_F_PRIVILEGED
;
1735 UNLOCK(&task
->lock
);
1737 if (priv
== oldpriv
)
1740 LOCK(&manager
->lock
);
1741 if (priv
&& ISC_LINK_LINKED(task
, ready_link
))
1742 ENQUEUE(manager
->ready_priority_tasks
, task
,
1743 ready_priority_link
);
1744 else if (!priv
&& ISC_LINK_LINKED(task
, ready_priority_link
))
1745 DEQUEUE(manager
->ready_priority_tasks
, task
,
1746 ready_priority_link
);
1747 UNLOCK(&manager
->lock
);
1751 isc__task_privilege(isc_task_t
*task0
) {
1752 isc__task_t
*task
= (isc__task_t
*)task0
;
1756 priv
= ISC_TF((task
->flags
& TASK_F_PRIVILEGED
) != 0);
1757 UNLOCK(&task
->lock
);
1762 isc__task_register(void) {
1763 return (isc_task_register(isc__taskmgr_create
));
1767 isc_task_exiting(isc_task_t
*t
) {
1768 isc__task_t
*task
= (isc__task_t
*)t
;
1770 REQUIRE(VALID_TASK(task
));
1771 return (TASK_SHUTTINGDOWN(task
));
1776 #define TRY0(a) do { xmlrc = (a); if (xmlrc < 0) goto error; } while(/*CONSTCOND*/0)
1778 isc_taskmgr_renderxml(isc_taskmgr_t
*mgr0
, xmlTextWriterPtr writer
) {
1779 isc__taskmgr_t
*mgr
= (isc__taskmgr_t
*)mgr0
;
1780 isc__task_t
*task
= NULL
;
1786 * Write out the thread-model, and some details about each depending
1787 * on which type is enabled.
1789 TRY0(xmlTextWriterStartElement(writer
, ISC_XMLCHAR
"thread-model"));
1790 #ifdef ISC_PLATFORM_USETHREADS
1791 TRY0(xmlTextWriterStartElement(writer
, ISC_XMLCHAR
"type"));
1792 TRY0(xmlTextWriterWriteString(writer
, ISC_XMLCHAR
"threaded"));
1793 TRY0(xmlTextWriterEndElement(writer
)); /* type */
1795 TRY0(xmlTextWriterStartElement(writer
, ISC_XMLCHAR
"worker-threads"));
1796 TRY0(xmlTextWriterWriteFormatString(writer
, "%d", mgr
->workers
));
1797 TRY0(xmlTextWriterEndElement(writer
)); /* worker-threads */
1798 #else /* ISC_PLATFORM_USETHREADS */
1799 TRY0(xmlTextWriterStartElement(writer
, ISC_XMLCHAR
"type"));
1800 TRY0(xmlTextWriterWriteString(writer
, ISC_XMLCHAR
"non-threaded"));
1801 TRY0(xmlTextWriterEndElement(writer
)); /* type */
1803 TRY0(xmlTextWriterStartElement(writer
, ISC_XMLCHAR
"references"));
1804 TRY0(xmlTextWriterWriteFormatString(writer
, "%d", mgr
->refs
));
1805 TRY0(xmlTextWriterEndElement(writer
)); /* references */
1806 #endif /* ISC_PLATFORM_USETHREADS */
1808 TRY0(xmlTextWriterStartElement(writer
, ISC_XMLCHAR
"default-quantum"));
1809 TRY0(xmlTextWriterWriteFormatString(writer
, "%d",
1810 mgr
->default_quantum
));
1811 TRY0(xmlTextWriterEndElement(writer
)); /* default-quantum */
1813 TRY0(xmlTextWriterStartElement(writer
, ISC_XMLCHAR
"tasks-running"));
1814 TRY0(xmlTextWriterWriteFormatString(writer
, "%d", mgr
->tasks_running
));
1815 TRY0(xmlTextWriterEndElement(writer
)); /* tasks-running */
1817 TRY0(xmlTextWriterStartElement(writer
, ISC_XMLCHAR
"tasks-ready"));
1818 TRY0(xmlTextWriterWriteFormatString(writer
, "%d", mgr
->tasks_ready
));
1819 TRY0(xmlTextWriterEndElement(writer
)); /* tasks-ready */
1821 TRY0(xmlTextWriterEndElement(writer
)); /* thread-model */
1823 TRY0(xmlTextWriterStartElement(writer
, ISC_XMLCHAR
"tasks"));
1824 task
= ISC_LIST_HEAD(mgr
->tasks
);
1825 while (task
!= NULL
) {
1827 TRY0(xmlTextWriterStartElement(writer
, ISC_XMLCHAR
"task"));
1829 if (task
->name
[0] != 0) {
1830 TRY0(xmlTextWriterStartElement(writer
,
1831 ISC_XMLCHAR
"name"));
1832 TRY0(xmlTextWriterWriteFormatString(writer
, "%s",
1834 TRY0(xmlTextWriterEndElement(writer
)); /* name */
1837 TRY0(xmlTextWriterStartElement(writer
,
1838 ISC_XMLCHAR
"references"));
1839 TRY0(xmlTextWriterWriteFormatString(writer
, "%d",
1841 TRY0(xmlTextWriterEndElement(writer
)); /* references */
1843 TRY0(xmlTextWriterStartElement(writer
, ISC_XMLCHAR
"id"));
1844 TRY0(xmlTextWriterWriteFormatString(writer
, "%p", task
));
1845 TRY0(xmlTextWriterEndElement(writer
)); /* id */
1847 TRY0(xmlTextWriterStartElement(writer
, ISC_XMLCHAR
"state"));
1848 TRY0(xmlTextWriterWriteFormatString(writer
, "%s",
1849 statenames
[task
->state
]));
1850 TRY0(xmlTextWriterEndElement(writer
)); /* state */
1852 TRY0(xmlTextWriterStartElement(writer
, ISC_XMLCHAR
"quantum"));
1853 TRY0(xmlTextWriterWriteFormatString(writer
, "%d",
1855 TRY0(xmlTextWriterEndElement(writer
)); /* quantum */
1857 TRY0(xmlTextWriterStartElement(writer
, ISC_XMLCHAR
"events"));
1858 TRY0(xmlTextWriterWriteFormatString(writer
, "%d",
1860 TRY0(xmlTextWriterEndElement(writer
)); /* events */
1862 TRY0(xmlTextWriterEndElement(writer
));
1864 UNLOCK(&task
->lock
);
1865 task
= ISC_LIST_NEXT(task
, link
);
1867 TRY0(xmlTextWriterEndElement(writer
)); /* tasks */
1871 UNLOCK(&task
->lock
);
1876 #endif /* HAVE_LIBXML2 */
1879 #define CHECKMEM(m) do { \
1881 result = ISC_R_NOMEMORY;\
1884 } while(/*CONSTCOND*/0)
1887 isc_taskmgr_renderjson(isc_taskmgr_t
*mgr0
, json_object
*tasks
) {
1888 isc_result_t result
= ISC_R_SUCCESS
;
1889 isc__taskmgr_t
*mgr
= (isc__taskmgr_t
*)mgr0
;
1890 isc__task_t
*task
= NULL
;
1891 json_object
*obj
= NULL
, *array
= NULL
, *taskobj
= NULL
;
1896 * Write out the thread-model, and some details about each depending
1897 * on which type is enabled.
1899 #ifdef ISC_PLATFORM_USETHREADS
1900 obj
= json_object_new_string("threaded");
1902 json_object_object_add(tasks
, "thread-model", obj
);
1904 obj
= json_object_new_int(mgr
->workers
);
1906 json_object_object_add(tasks
, "worker-threads", obj
);
1907 #else /* ISC_PLATFORM_USETHREADS */
1908 obj
= json_object_new_string("non-threaded");
1910 json_object_object_add(tasks
, "thread-model", obj
);
1912 obj
= json_object_new_int(mgr
->refs
);
1914 json_object_object_add(tasks
, "references", obj
);
1915 #endif /* ISC_PLATFORM_USETHREADS */
1917 obj
= json_object_new_int(mgr
->default_quantum
);
1919 json_object_object_add(tasks
, "default-quantum", obj
);
1921 obj
= json_object_new_int(mgr
->tasks_running
);
1923 json_object_object_add(tasks
, "tasks-running", obj
);
1925 obj
= json_object_new_int(mgr
->tasks_ready
);
1927 json_object_object_add(tasks
, "tasks-ready", obj
);
1929 array
= json_object_new_array();
1932 for (task
= ISC_LIST_HEAD(mgr
->tasks
);
1934 task
= ISC_LIST_NEXT(task
, link
))
1940 taskobj
= json_object_new_object();
1942 json_object_array_add(array
, taskobj
);
1944 sprintf(buf
, "%p", task
);
1945 obj
= json_object_new_string(buf
);
1947 json_object_object_add(taskobj
, "id", obj
);
1949 if (task
->name
[0] != 0) {
1950 obj
= json_object_new_string(task
->name
);
1952 json_object_object_add(taskobj
, "name", obj
);
1955 obj
= json_object_new_int(task
->references
);
1957 json_object_object_add(taskobj
, "references", obj
);
1959 obj
= json_object_new_string(statenames
[task
->state
]);
1961 json_object_object_add(taskobj
, "state", obj
);
1963 obj
= json_object_new_int(task
->quantum
);
1965 json_object_object_add(taskobj
, "quantum", obj
);
1967 obj
= json_object_new_int(task
->nevents
);
1969 json_object_object_add(taskobj
, "events", obj
);
1971 UNLOCK(&task
->lock
);
1974 json_object_object_add(tasks
, "tasks", array
);
1976 result
= ISC_R_SUCCESS
;
1980 json_object_put(array
);
1983 UNLOCK(&task
->lock
);
1991 static isc_mutex_t createlock
;
1992 static isc_once_t once
= ISC_ONCE_INIT
;
1993 static isc_taskmgrcreatefunc_t taskmgr_createfunc
= NULL
;
1997 RUNTIME_CHECK(isc_mutex_init(&createlock
) == ISC_R_SUCCESS
);
2001 isc_task_register(isc_taskmgrcreatefunc_t createfunc
) {
2002 isc_result_t result
= ISC_R_SUCCESS
;
2004 RUNTIME_CHECK(isc_once_do(&once
, initialize
) == ISC_R_SUCCESS
);
2007 if (taskmgr_createfunc
== NULL
)
2008 taskmgr_createfunc
= createfunc
;
2010 result
= ISC_R_EXISTS
;
2011 UNLOCK(&createlock
);
2017 isc_taskmgr_createinctx(isc_mem_t
*mctx
, isc_appctx_t
*actx
,
2018 unsigned int workers
, unsigned int default_quantum
,
2019 isc_taskmgr_t
**managerp
)
2021 isc_result_t result
;
2025 REQUIRE(taskmgr_createfunc
!= NULL
);
2026 result
= (*taskmgr_createfunc
)(mctx
, workers
, default_quantum
,
2029 UNLOCK(&createlock
);
2031 if (result
== ISC_R_SUCCESS
)
2032 isc_appctx_settaskmgr(actx
, *managerp
);
2038 isc_taskmgr_create(isc_mem_t
*mctx
, unsigned int workers
,
2039 unsigned int default_quantum
, isc_taskmgr_t
**managerp
)
2041 isc_result_t result
;
2044 return (isc__taskmgr_create(mctx
, workers
,
2045 default_quantum
, managerp
));
2048 REQUIRE(taskmgr_createfunc
!= NULL
);
2049 result
= (*taskmgr_createfunc
)(mctx
, workers
, default_quantum
,
2052 UNLOCK(&createlock
);
2058 isc_taskmgr_destroy(isc_taskmgr_t
**managerp
) {
2059 REQUIRE(managerp
!= NULL
&& ISCAPI_TASKMGR_VALID(*managerp
));
2062 isc__taskmgr_destroy(managerp
);
2064 (*managerp
)->methods
->destroy(managerp
);
2066 ENSURE(*managerp
== NULL
);
2070 isc_taskmgr_setmode(isc_taskmgr_t
*manager
, isc_taskmgrmode_t mode
) {
2071 REQUIRE(ISCAPI_TASKMGR_VALID(manager
));
2074 isc__taskmgr_setmode(manager
, mode
);
2076 manager
->methods
->setmode(manager
, mode
);
2080 isc_taskmgr_mode(isc_taskmgr_t
*manager
) {
2081 REQUIRE(ISCAPI_TASKMGR_VALID(manager
));
2084 return (isc__taskmgr_mode(manager
));
2086 return (manager
->methods
->mode(manager
));
2090 isc_task_create(isc_taskmgr_t
*manager
, unsigned int quantum
,
2093 REQUIRE(ISCAPI_TASKMGR_VALID(manager
));
2094 REQUIRE(taskp
!= NULL
&& *taskp
== NULL
);
2097 return (isc__task_create(manager
, quantum
, taskp
));
2099 return (manager
->methods
->taskcreate(manager
, quantum
, taskp
));
2103 isc_task_attach(isc_task_t
*source
, isc_task_t
**targetp
) {
2104 REQUIRE(ISCAPI_TASK_VALID(source
));
2105 REQUIRE(targetp
!= NULL
&& *targetp
== NULL
);
2108 isc__task_attach(source
, targetp
);
2110 source
->methods
->attach(source
, targetp
);
2112 ENSURE(*targetp
== source
);
2116 isc_task_detach(isc_task_t
**taskp
) {
2117 REQUIRE(taskp
!= NULL
&& ISCAPI_TASK_VALID(*taskp
));
2120 isc__task_detach(taskp
);
2122 (*taskp
)->methods
->detach(taskp
);
2124 ENSURE(*taskp
== NULL
);
2128 isc_task_send(isc_task_t
*task
, isc_event_t
**eventp
) {
2129 REQUIRE(ISCAPI_TASK_VALID(task
));
2130 REQUIRE(eventp
!= NULL
&& *eventp
!= NULL
);
2133 isc__task_send(task
, eventp
);
2135 task
->methods
->send(task
, eventp
);
2136 ENSURE(*eventp
== NULL
);
2141 isc_task_sendanddetach(isc_task_t
**taskp
, isc_event_t
**eventp
) {
2142 REQUIRE(taskp
!= NULL
&& ISCAPI_TASK_VALID(*taskp
));
2143 REQUIRE(eventp
!= NULL
&& *eventp
!= NULL
);
2146 isc__task_sendanddetach(taskp
, eventp
);
2148 (*taskp
)->methods
->sendanddetach(taskp
, eventp
);
2149 ENSURE(*eventp
== NULL
);
2152 ENSURE(*taskp
== NULL
);
2156 isc_task_unsend(isc_task_t
*task
, void *sender
, isc_eventtype_t type
,
2157 void *tag
, isc_eventlist_t
*events
)
2159 REQUIRE(ISCAPI_TASK_VALID(task
));
2162 return (isc__task_unsend(task
, sender
, type
, tag
, events
));
2164 return (task
->methods
->unsend(task
, sender
, type
, tag
, events
));
2168 isc_task_onshutdown(isc_task_t
*task
, isc_taskaction_t action
, void *arg
)
2170 REQUIRE(ISCAPI_TASK_VALID(task
));
2173 return (isc__task_onshutdown(task
, action
, arg
));
2175 return (task
->methods
->onshutdown(task
, action
, arg
));
2179 isc_task_shutdown(isc_task_t
*task
) {
2180 REQUIRE(ISCAPI_TASK_VALID(task
));
2183 isc__task_shutdown(task
);
2185 task
->methods
->shutdown(task
);
2189 isc_task_destroy(isc_task_t
**taskp
) {
2193 isc__task_destroy(taskp
);
2197 isc_task_setname(isc_task_t
*task
, const char *name
, void *tag
) {
2198 REQUIRE(ISCAPI_TASK_VALID(task
));
2201 isc__task_setname(task
, name
, tag
);
2203 task
->methods
->setname(task
, name
, tag
);
2207 isc_task_purge(isc_task_t
*task
, void *sender
, isc_eventtype_t type
, void *tag
)
2209 REQUIRE(ISCAPI_TASK_VALID(task
));
2212 return (isc__task_purge(task
, sender
, type
, tag
));
2214 return (task
->methods
->purgeevents(task
, sender
, type
, tag
));
2218 isc_task_beginexclusive(isc_task_t
*task
) {
2219 REQUIRE(ISCAPI_TASK_VALID(task
));
2222 return (isc__task_beginexclusive(task
));
2224 return (task
->methods
->beginexclusive(task
));
2228 isc_task_endexclusive(isc_task_t
*task
) {
2229 REQUIRE(ISCAPI_TASK_VALID(task
));
2232 isc__task_endexclusive(task
);
2234 task
->methods
->endexclusive(task
);
2238 isc_task_setprivilege(isc_task_t
*task
, isc_boolean_t priv
) {
2239 REQUIRE(ISCAPI_TASK_VALID(task
));
2242 isc__task_setprivilege(task
, priv
);
2244 task
->methods
->setprivilege(task
, priv
);
2248 isc_task_privilege(isc_task_t
*task
) {
2249 REQUIRE(ISCAPI_TASK_VALID(task
));
2252 return (isc__task_privilege(task
));
2254 return (task
->methods
->privilege(task
));
2258 isc_task_getcurrenttime(isc_task_t
*task
, isc_stdtime_t
*t
) {
2262 isc__task_getcurrenttime(task
, t
);
2266 * This is necessary for libisc's internal timer implementation. Other
2267 * implementation might skip implementing this.
2270 isc_task_purgerange(isc_task_t
*task
, void *sender
, isc_eventtype_t first
,
2271 isc_eventtype_t last
, void *tag
)
2273 REQUIRE(ISCAPI_TASK_VALID(task
));
2276 return (isc__task_purgerange(task
, sender
, first
, last
, tag
));
2278 return (task
->methods
->purgerange(task
, sender
, first
, last
, tag
));