2 * Copyright 2009, Colin Günther, coling@gmx.de
3 * Copyright 2007, Hugo Santos. All Rights Reserved.
4 * Distributed under the terms of the MIT License.
7 * Hugo Santos, hugosantos@gmail.com
15 #include <compat/sys/taskqueue.h>
16 #include <compat/sys/haiku-module.h>
19 #define TQ_FLAGS_ACTIVE (1 << 0)
20 #define TQ_FLAGS_BLOCKED (1 << 1)
21 #define TQ_FLAGS_PENDING (1 << 2)
28 taskqueue_enqueue_fn tq_enqueue
;
33 thread_id
*tq_threads
;
34 thread_id tq_thread_storage
;
39 struct taskqueue
*taskqueue_fast
= NULL
;
40 struct taskqueue
*taskqueue_swi
= NULL
;
43 static struct taskqueue
*
44 _taskqueue_create(const char *name
, int mflags
, int fast
,
45 taskqueue_enqueue_fn enqueueFunction
, void *context
)
47 struct taskqueue
*tq
= malloc(sizeof(struct taskqueue
));
54 B_INITIALIZE_SPINLOCK(&tq
->tq_spinlock
);
56 mutex_init_etc(&tq
->tq_mutex
, name
, MUTEX_FLAG_CLONE_NAME
);
59 strlcpy(tq
->tq_name
, name
, sizeof(tq
->tq_name
));
60 list_init_etc(&tq
->tq_list
, offsetof(struct task
, ta_link
));
61 tq
->tq_enqueue
= enqueueFunction
;
65 tq
->tq_threads
= NULL
;
66 tq
->tq_threadcount
= 0;
67 tq
->tq_flags
= TQ_FLAGS_ACTIVE
;
74 tq_lock(struct taskqueue
*taskQueue
, cpu_status
*status
)
76 if (taskQueue
->tq_fast
) {
77 *status
= disable_interrupts();
78 acquire_spinlock(&taskQueue
->tq_spinlock
);
80 mutex_lock(&taskQueue
->tq_mutex
);
86 tq_unlock(struct taskqueue
*taskQueue
, cpu_status status
)
88 if (taskQueue
->tq_fast
) {
89 release_spinlock(&taskQueue
->tq_spinlock
);
90 restore_interrupts(status
);
92 mutex_unlock(&taskQueue
->tq_mutex
);
98 taskqueue_create(const char *name
, int mflags
,
99 taskqueue_enqueue_fn enqueueFunction
, void *context
)
101 return _taskqueue_create(name
, mflags
, 0, enqueueFunction
, context
);
106 tq_handle_thread(void *data
)
108 struct taskqueue
*tq
= data
;
109 cpu_status cpu_state
;
114 /* just a synchronization point */
115 tq_lock(tq
, &cpu_state
);
117 tq_unlock(tq
, cpu_state
);
119 while (acquire_sem(sem
) == B_NO_ERROR
) {
120 tq_lock(tq
, &cpu_state
);
121 t
= list_remove_head_item(&tq
->tq_list
);
122 tq_unlock(tq
, cpu_state
);
125 pending
= t
->ta_pending
;
128 t
->ta_handler(t
->ta_argument
, pending
);
136 _taskqueue_start_threads(struct taskqueue
**taskQueue
, int count
, int priority
,
139 struct taskqueue
*tq
= (*taskQueue
);
145 if (tq
->tq_threads
!= NULL
)
149 tq
->tq_threads
= &tq
->tq_thread_storage
;
151 tq
->tq_threads
= malloc(sizeof(thread_id
) * count
);
152 if (tq
->tq_threads
== NULL
)
156 tq
->tq_sem
= create_sem(0, tq
->tq_name
);
157 if (tq
->tq_sem
< B_OK
) {
159 free(tq
->tq_threads
);
160 tq
->tq_threads
= NULL
;
164 for (i
= 0; i
< count
; i
++) {
165 tq
->tq_threads
[i
] = spawn_kernel_thread(tq_handle_thread
, tq
->tq_name
,
167 if (tq
->tq_threads
[i
] < B_OK
) {
168 status_t status
= tq
->tq_threads
[i
];
169 for (j
= 0; j
< i
; j
++)
170 kill_thread(tq
->tq_threads
[j
]);
172 free(tq
->tq_threads
);
173 tq
->tq_threads
= NULL
;
174 delete_sem(tq
->tq_sem
);
179 tq
->tq_threadcount
= count
;
181 for (i
= 0; i
< count
; i
++)
182 resume_thread(tq
->tq_threads
[i
]);
189 taskqueue_start_threads(struct taskqueue
**taskQueue
, int count
, int priority
,
190 const char *format
, ...)
192 /* we assume that start_threads is called in a sane place, and thus
193 * don't need to be locked. This is mostly due to the fact that if
194 * the TQ is 'fast', locking the TQ disables interrupts... and then
195 * we can't create semaphores, threads and bananas. */
197 /* cpu_status state; */
202 va_start(vl
, format
);
203 vsnprintf(name
, sizeof(name
), format
, vl
);
206 /*tq_lock(*tqp, &state);*/
207 result
= _taskqueue_start_threads(taskQueue
, count
, priority
, name
);
208 /*tq_unlock(*tqp, state);*/
215 taskqueue_free(struct taskqueue
*taskQueue
)
217 /* lock and drain list? */
218 taskQueue
->tq_flags
&= ~TQ_FLAGS_ACTIVE
;
219 if (!taskQueue
->tq_fast
)
220 mutex_destroy(&taskQueue
->tq_mutex
);
221 if (taskQueue
->tq_sem
!= -1) {
224 delete_sem(taskQueue
->tq_sem
);
226 for (i
= 0; i
< taskQueue
->tq_threadcount
; i
++) {
228 wait_for_thread(taskQueue
->tq_threads
[i
], &status
);
231 if (taskQueue
->tq_threadcount
> 1)
232 free(taskQueue
->tq_threads
);
240 taskqueue_drain(struct taskqueue
*taskQueue
, struct task
*task
)
244 tq_lock(taskQueue
, &status
);
245 while (task
->ta_pending
!= 0) {
246 tq_unlock(taskQueue
, status
);
248 tq_lock(taskQueue
, &status
);
250 tq_unlock(taskQueue
, status
);
255 taskqueue_enqueue(struct taskqueue
*taskQueue
, struct task
*task
)
258 tq_lock(taskQueue
, &status
);
259 /* we don't really support priorities */
260 if (task
->ta_pending
) {
263 list_add_item(&taskQueue
->tq_list
, task
);
264 task
->ta_pending
= 1;
265 if ((taskQueue
->tq_flags
& TQ_FLAGS_BLOCKED
) == 0)
266 taskQueue
->tq_enqueue(taskQueue
->tq_arg
);
268 taskQueue
->tq_flags
|= TQ_FLAGS_PENDING
;
270 tq_unlock(taskQueue
, status
);
276 taskqueue_thread_enqueue(void *context
)
278 struct taskqueue
**tqp
= context
;
279 release_sem_etc((*tqp
)->tq_sem
, 1, B_DO_NOT_RESCHEDULE
);
284 taskqueue_enqueue_fast(struct taskqueue
*taskQueue
, struct task
*task
)
286 return taskqueue_enqueue(taskQueue
, task
);
291 taskqueue_create_fast(const char *name
, int mflags
,
292 taskqueue_enqueue_fn enqueueFunction
, void *context
)
294 return _taskqueue_create(name
, mflags
, 1, enqueueFunction
, context
);
299 task_init(struct task
*task
, int prio
, task_handler_t handler
, void *context
)
301 task
->ta_priority
= prio
;
302 task
->ta_handler
= handler
;
303 task
->ta_argument
= context
;
304 task
->ta_pending
= 0;
311 status_t status
= B_NO_MEMORY
;
313 if (HAIKU_DRIVER_REQUIRES(FBSD_FAST_TASKQUEUE
)) {
314 taskqueue_fast
= taskqueue_create_fast("fast taskq", 0,
315 taskqueue_thread_enqueue
, &taskqueue_fast
);
316 if (taskqueue_fast
== NULL
)
319 status
= taskqueue_start_threads(&taskqueue_fast
, 1,
320 B_REAL_TIME_PRIORITY
, "fast taskq thread");
325 if (HAIKU_DRIVER_REQUIRES(FBSD_SWI_TASKQUEUE
)) {
326 taskqueue_swi
= taskqueue_create_fast("swi taskq", 0,
327 taskqueue_thread_enqueue
, &taskqueue_swi
);
328 if (taskqueue_swi
== NULL
) {
329 status
= B_NO_MEMORY
;
333 status
= taskqueue_start_threads(&taskqueue_swi
, 1,
334 B_REAL_TIME_PRIORITY
, "swi taskq");
343 taskqueue_free(taskqueue_swi
);
347 taskqueue_free(taskqueue_fast
);
356 if (HAIKU_DRIVER_REQUIRES(FBSD_SWI_TASKQUEUE
))
357 taskqueue_free(taskqueue_swi
);
359 if (HAIKU_DRIVER_REQUIRES(FBSD_FAST_TASKQUEUE
))
360 taskqueue_free(taskqueue_fast
);
365 taskqueue_block(struct taskqueue
*taskQueue
)
369 tq_lock(taskQueue
, &status
);
370 taskQueue
->tq_flags
|= TQ_FLAGS_BLOCKED
;
371 tq_unlock(taskQueue
, status
);
376 taskqueue_unblock(struct taskqueue
*taskQueue
)
380 tq_lock(taskQueue
, &status
);
381 taskQueue
->tq_flags
&= ~TQ_FLAGS_BLOCKED
;
382 if (taskQueue
->tq_flags
& TQ_FLAGS_PENDING
) {
383 taskQueue
->tq_flags
&= ~TQ_FLAGS_PENDING
;
384 taskQueue
->tq_enqueue(taskQueue
->tq_arg
);
386 tq_unlock(taskQueue
, status
);