Expose confdb write to the library.
[openais.git] / exec / wthread.c
blob5ec3b9d714dd6c1d9e524358ad819ab0633ccf40
1 /*
2 * Copyright (c) 2005 MontaVista Software, Inc.
3 * Copyright (c) 2006 Red Hat, Inc.
5 * All rights reserved.
7 * Author: Steven Dake (sdake@mvista.com)
9 * This software licensed under BSD license, the text of which follows:
11 * Redistribution and use in source and binary forms, with or without
12 * modification, are permitted provided that the following conditions are met:
14 * - Redistributions of source code must retain the above copyright notice,
15 * this list of conditions and the following disclaimer.
16 * - Redistributions in binary form must reproduce the above copyright notice,
17 * this list of conditions and the following disclaimer in the documentation
18 * and/or other materials provided with the distribution.
19 * - Neither the name of the MontaVista Software, Inc. nor the names of its
20 * contributors may be used to endorse or promote products derived from this
21 * software without specific prior written permission.
23 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
24 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
25 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
26 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
27 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
28 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
29 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
30 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
31 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
32 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
33 * THE POSSIBILITY OF SUCH DAMAGE.
37 * Add work to a work group and have threads process the work
38 * Provide blocking for all work to complete
40 #include <stdlib.h>
41 #include <pthread.h>
42 #include <errno.h>
43 #include "wthread.h"
44 #include "../include/queue.h"
46 struct thread_data {
47 void *thread_state;
48 void *data;
51 struct worker_thread {
52 struct worker_thread_group *worker_thread_group;
53 pthread_mutex_t new_work_mutex;
54 pthread_cond_t new_work_cond;
55 pthread_cond_t cond;
56 pthread_mutex_t done_work_mutex;
57 pthread_cond_t done_work_cond;
58 pthread_t thread_id;
59 struct queue queue;
60 void *thread_state;
61 struct thread_data thread_data;
64 void *worker_thread (void *thread_data_in) {
65 struct thread_data *thread_data = (struct thread_data *)thread_data_in;
66 struct worker_thread *worker_thread =
67 (struct worker_thread *)thread_data->data;
68 void *data_for_worker_fn;
70 for (;;) {
71 pthread_mutex_lock (&worker_thread->new_work_mutex);
72 if (queue_is_empty (&worker_thread->queue) == 1) {
73 pthread_cond_wait (&worker_thread->new_work_cond,
74 &worker_thread->new_work_mutex);
78 * We unlock then relock the new_work_mutex to allow the
79 * worker function to execute and also allow new work to be
80 * added to the work queue
82 data_for_worker_fn = queue_item_get (&worker_thread->queue);
83 pthread_mutex_unlock (&worker_thread->new_work_mutex);
84 worker_thread->worker_thread_group->worker_fn (worker_thread->thread_state, data_for_worker_fn);
85 pthread_mutex_lock (&worker_thread->new_work_mutex);
86 queue_item_remove (&worker_thread->queue);
87 pthread_mutex_unlock (&worker_thread->new_work_mutex);
88 pthread_mutex_lock (&worker_thread->done_work_mutex);
89 if (queue_is_empty (&worker_thread->queue) == 1) {
90 pthread_cond_signal (&worker_thread->done_work_cond);
92 pthread_mutex_unlock (&worker_thread->done_work_mutex);
94 return (0);
97 int worker_thread_group_init (
98 struct worker_thread_group *worker_thread_group,
99 int threads,
100 int items_max,
101 int item_size,
102 int thread_state_size,
103 void (*thread_state_constructor)(void *),
104 void (*worker_fn)(void *thread_state, void *work_item))
106 int i;
108 worker_thread_group->threadcount = threads;
109 worker_thread_group->last_scheduled = 0;
110 worker_thread_group->worker_fn = worker_fn;
111 worker_thread_group->threads = malloc (sizeof (struct worker_thread) *
112 threads);
113 if (worker_thread_group->threads == 0) {
114 return (-1);
117 for (i = 0; i < threads; i++) {
118 if (thread_state_size) {
119 worker_thread_group->threads[i].thread_state = malloc (thread_state_size);
120 } else {
121 worker_thread_group->threads[i].thread_state = NULL;
123 if (thread_state_constructor) {
124 thread_state_constructor (worker_thread_group->threads[i].thread_state);
126 worker_thread_group->threads[i].worker_thread_group = worker_thread_group;
127 pthread_mutex_init (&worker_thread_group->threads[i].new_work_mutex, NULL);
128 pthread_cond_init (&worker_thread_group->threads[i].new_work_cond, NULL);
129 pthread_mutex_init (&worker_thread_group->threads[i].done_work_mutex, NULL);
130 pthread_cond_init (&worker_thread_group->threads[i].done_work_cond, NULL);
131 queue_init (&worker_thread_group->threads[i].queue, items_max,
132 item_size);
134 worker_thread_group->threads[i].thread_data.thread_state =
135 worker_thread_group->threads[i].thread_state;
136 worker_thread_group->threads[i].thread_data.data = &worker_thread_group->threads[i];
137 pthread_create (&worker_thread_group->threads[i].thread_id,
138 NULL, worker_thread, &worker_thread_group->threads[i].thread_data);
140 return (0);
143 int worker_thread_group_work_add (
144 struct worker_thread_group *worker_thread_group,
145 void *item)
147 int schedule;
149 schedule = (worker_thread_group->last_scheduled + 1) % (worker_thread_group->threadcount);
150 worker_thread_group->last_scheduled = schedule;
152 pthread_mutex_lock (&worker_thread_group->threads[schedule].new_work_mutex);
153 if (queue_is_full (&worker_thread_group->threads[schedule].queue)) {
154 pthread_mutex_unlock (&worker_thread_group->threads[schedule].new_work_mutex);
155 return (-1);
157 queue_item_add (&worker_thread_group->threads[schedule].queue, item);
158 pthread_cond_signal (&worker_thread_group->threads[schedule].new_work_cond);
159 pthread_mutex_unlock (&worker_thread_group->threads[schedule].new_work_mutex);
160 return (0);
163 void worker_thread_group_wait (
164 struct worker_thread_group *worker_thread_group)
166 int i;
168 for (i = 0; i < worker_thread_group->threadcount; i++) {
169 pthread_mutex_lock (&worker_thread_group->threads[i].done_work_mutex);
170 if (queue_is_empty (&worker_thread_group->threads[i].queue) == 0) {
171 pthread_cond_wait (&worker_thread_group->threads[i].done_work_cond,
172 &worker_thread_group->threads[i].done_work_mutex);
174 pthread_mutex_unlock (&worker_thread_group->threads[i].done_work_mutex);
178 void worker_thread_group_exit (
179 struct worker_thread_group *worker_thread_group)
181 int i;
183 for (i = 0; i < worker_thread_group->threadcount; i++) {
184 pthread_cancel (worker_thread_group->threads[i].thread_id);
185 pthread_mutex_destroy (&worker_thread_group->threads[i].new_work_mutex);
186 pthread_cond_destroy (&worker_thread_group->threads[i].new_work_cond);
187 pthread_mutex_destroy (&worker_thread_group->threads[i].done_work_mutex);
188 pthread_cond_destroy (&worker_thread_group->threads[i].done_work_cond);
191 void worker_thread_group_atsegv (
192 struct worker_thread_group *worker_thread_group)
194 void *data_for_worker_fn;
195 struct worker_thread *worker_thread;
196 unsigned int i;
198 for (i = 0; i < worker_thread_group->threadcount; i++) {
199 worker_thread = &worker_thread_group->threads[i];
200 while (queue_is_empty (&worker_thread->queue) == 0) {
201 data_for_worker_fn = queue_item_get (&worker_thread->queue);
202 worker_thread->worker_thread_group->worker_fn (worker_thread->thread_state, data_for_worker_fn);
203 queue_item_remove (&worker_thread->queue);